summaryrefslogtreecommitdiff
path: root/drivers
diff options
context:
space:
mode:
Diffstat (limited to 'drivers')
-rw-r--r--drivers/Kconfig2
-rw-r--r--drivers/Makefile3
-rw-r--r--drivers/acpi/Kconfig8
-rw-r--r--drivers/acpi/acpi_memhotplug.c8
-rw-r--r--drivers/acpi/apei/ghes.c71
-rw-r--r--drivers/acpi/apei/hest.c5
-rw-r--r--drivers/acpi/custom_method.c2
-rw-r--r--drivers/acpi/internal.h8
-rw-r--r--drivers/acpi/osl.c26
-rw-r--r--drivers/acpi/pci_irq.c102
-rw-r--r--drivers/acpi/pci_root.c170
-rw-r--r--drivers/acpi/pci_slot.c13
-rw-r--r--drivers/acpi/power.c112
-rw-r--r--drivers/acpi/processor_driver.c2
-rw-r--r--drivers/acpi/scan.c12
-rw-r--r--drivers/ata/Kconfig23
-rw-r--r--drivers/ata/Makefile2
-rw-r--r--drivers/ata/ahci.c24
-rw-r--r--drivers/ata/ata_piix.c17
-rw-r--r--drivers/ata/libata-acpi.c177
-rw-r--r--drivers/ata/libata-core.c66
-rw-r--r--drivers/ata/libata-eh.c27
-rw-r--r--drivers/ata/libata-scsi.c20
-rw-r--r--drivers/ata/libata-zpodd.c299
-rw-r--r--drivers/ata/libata.h27
-rw-r--r--drivers/ata/pata_mpc52xx.c6
-rw-r--r--drivers/ata/pata_samsung_cf.c4
-rw-r--r--drivers/ata/sata_rcar.c910
-rw-r--r--drivers/atm/atmtcp.c6
-rw-r--r--drivers/atm/eni.c3
-rw-r--r--drivers/atm/he.c3
-rw-r--r--drivers/atm/nicstar.c25
-rw-r--r--drivers/atm/solos-pci.c3
-rw-r--r--drivers/base/devtmpfs.c3
-rw-r--r--drivers/base/dma-buf.c66
-rw-r--r--drivers/base/firmware_class.c2
-rw-r--r--drivers/base/memory.c6
-rw-r--r--drivers/base/power/runtime.c89
-rw-r--r--drivers/base/regmap/regmap-debugfs.c2
-rw-r--r--drivers/block/DAC960.c3
-rw-r--r--drivers/block/Kconfig23
-rw-r--r--drivers/block/Makefile3
-rw-r--r--drivers/block/drbd/drbd_main.c29
-rw-r--r--drivers/block/loop.c87
-rw-r--r--drivers/block/mtip32xx/Kconfig2
-rw-r--r--drivers/block/mtip32xx/mtip32xx.c431
-rw-r--r--drivers/block/mtip32xx/mtip32xx.h48
-rw-r--r--drivers/block/nbd.c38
-rw-r--r--drivers/block/rbd.c1852
-rw-r--r--drivers/block/rsxx/Makefile2
-rw-r--r--drivers/block/rsxx/config.c213
-rw-r--r--drivers/block/rsxx/core.c649
-rw-r--r--drivers/block/rsxx/cregs.c758
-rw-r--r--drivers/block/rsxx/dev.c367
-rw-r--r--drivers/block/rsxx/dma.c998
-rw-r--r--drivers/block/rsxx/rsxx.h45
-rw-r--r--drivers/block/rsxx/rsxx_cfg.h72
-rw-r--r--drivers/block/rsxx/rsxx_priv.h399
-rw-r--r--drivers/block/swim3.c5
-rw-r--r--drivers/block/xd.c1123
-rw-r--r--drivers/block/xd.h134
-rw-r--r--drivers/block/xen-blkback/blkback.c7
-rw-r--r--drivers/block/xen-blkback/xenbus.c49
-rw-r--r--drivers/block/xen-blkfront.c13
-rw-r--r--drivers/bluetooth/Kconfig1
-rw-r--r--drivers/bluetooth/ath3k.c2
-rw-r--r--drivers/char/Kconfig7
-rw-r--r--drivers/char/agp/intel-gtt.c128
-rw-r--r--drivers/char/dsp56k.c8
-rw-r--r--drivers/char/dtlk.c4
-rw-r--r--drivers/char/hw_random/exynos-rng.c2
-rw-r--r--drivers/char/hw_random/virtio-rng.c13
-rw-r--r--drivers/char/ipmi/ipmi_si_intf.c66
-rw-r--r--drivers/char/lp.c8
-rw-r--r--drivers/char/mem.c14
-rw-r--r--drivers/char/misc.c16
-rw-r--r--drivers/char/nsc_gpio.c4
-rw-r--r--drivers/char/pcmcia/Kconfig4
-rw-r--r--drivers/char/pcmcia/cm4000_cs.c2
-rw-r--r--drivers/char/pcmcia/synclink_cs.c707
-rw-r--r--drivers/char/ppdev.c6
-rw-r--r--drivers/char/ps3flash.c2
-rw-r--r--drivers/char/random.c6
-rw-r--r--drivers/char/raw.c2
-rw-r--r--drivers/char/sonypi.c2
-rw-r--r--drivers/char/tb0219.c4
-rw-r--r--drivers/char/virtio_console.c18
-rw-r--r--drivers/clk/Makefile2
-rw-r--r--drivers/clk/clk-bcm2835.c9
-rw-r--r--drivers/clk/clk.c59
-rw-r--r--drivers/clk/mxs/clk-imx28.c2
-rw-r--r--drivers/clk/tegra/Makefile11
-rw-r--r--drivers/clk/tegra/clk-audio-sync.c87
-rw-r--r--drivers/clk/tegra/clk-divider.c187
-rw-r--r--drivers/clk/tegra/clk-periph-gate.c179
-rw-r--r--drivers/clk/tegra/clk-periph.c218
-rw-r--r--drivers/clk/tegra/clk-pll-out.c123
-rw-r--r--drivers/clk/tegra/clk-pll.c648
-rw-r--r--drivers/clk/tegra/clk-super.c166
-rw-r--r--drivers/clk/tegra/clk-tegra20.c1355
-rw-r--r--drivers/clk/tegra/clk-tegra30.c1994
-rw-r--r--drivers/clk/tegra/clk.c85
-rw-r--r--drivers/clk/tegra/clk.h502
-rw-r--r--drivers/clk/versatile/clk-vexpress.c3
-rw-r--r--drivers/clocksource/Kconfig12
-rw-r--r--drivers/clocksource/Makefile5
-rw-r--r--drivers/clocksource/arm_arch_timer.c391
-rw-r--r--drivers/clocksource/arm_generic.c232
-rw-r--r--drivers/clocksource/bcm2835_timer.c9
-rw-r--r--drivers/clocksource/clksrc-of.c35
-rw-r--r--drivers/clocksource/cs5535-clockevt.c11
-rw-r--r--drivers/clocksource/dw_apb_timer_of.c6
-rw-r--r--drivers/clocksource/nomadik-mtu.c45
-rw-r--r--drivers/clocksource/sunxi_timer.c17
-rw-r--r--drivers/clocksource/tcb_clksrc.c7
-rw-r--r--drivers/clocksource/tegra20_timer.c281
-rw-r--r--drivers/clocksource/time-armada-370-xp.c150
-rw-r--r--drivers/clocksource/vt8500_timer.c180
-rw-r--r--drivers/cpufreq/Makefile2
-rw-r--r--drivers/cpufreq/dbx500-cpufreq.c (renamed from drivers/cpufreq/db8500-cpufreq.c)105
-rw-r--r--drivers/cpufreq/exynos-cpufreq.c4
-rw-r--r--drivers/cpufreq/exynos-cpufreq.h48
-rw-r--r--drivers/cpufreq/exynos4210-cpufreq.c3
-rw-r--r--drivers/cpufreq/exynos4x12-cpufreq.c3
-rw-r--r--drivers/cpufreq/exynos5250-cpufreq.c3
-rw-r--r--drivers/cpufreq/imx6q-cpufreq.c2
-rw-r--r--drivers/cpuidle/Kconfig6
-rw-r--r--drivers/cpuidle/Makefile1
-rw-r--r--drivers/cpuidle/cpuidle-kirkwood.c106
-rw-r--r--drivers/crypto/atmel-aes.c2
-rw-r--r--drivers/crypto/bfin_crc.c6
-rw-r--r--drivers/crypto/omap-aes.c658
-rw-r--r--drivers/crypto/omap-sham.c915
-rw-r--r--drivers/crypto/s5p-sss.c4
-rw-r--r--drivers/dca/dca-core.c5
-rw-r--r--drivers/dca/dca-sysfs.c23
-rw-r--r--drivers/devfreq/exynos4_bus.c2
-rw-r--r--drivers/dma/Kconfig13
-rw-r--r--drivers/dma/Makefile3
-rw-r--r--drivers/dma/amba-pl08x.c10
-rw-r--r--drivers/dma/at_hdmac.c10
-rw-r--r--drivers/dma/at_hdmac_regs.h8
-rw-r--r--drivers/dma/bestcomm/Kconfig36
-rw-r--r--drivers/dma/bestcomm/Makefile14
-rw-r--r--drivers/dma/bestcomm/ata.c157
-rw-r--r--drivers/dma/bestcomm/bcom_ata_task.c67
-rw-r--r--drivers/dma/bestcomm/bcom_fec_rx_task.c78
-rw-r--r--drivers/dma/bestcomm/bcom_fec_tx_task.c91
-rw-r--r--drivers/dma/bestcomm/bcom_gen_bd_rx_task.c63
-rw-r--r--drivers/dma/bestcomm/bcom_gen_bd_tx_task.c69
-rw-r--r--drivers/dma/bestcomm/bestcomm.c531
-rw-r--r--drivers/dma/bestcomm/fec.c270
-rw-r--r--drivers/dma/bestcomm/gen_bd.c354
-rw-r--r--drivers/dma/bestcomm/sram.c178
-rw-r--r--drivers/dma/coh901318.c1306
-rw-r--r--drivers/dma/coh901318.h (renamed from drivers/dma/coh901318_lli.h)35
-rw-r--r--drivers/dma/coh901318_lli.c6
-rw-r--r--drivers/dma/dmaengine.c37
-rw-r--r--drivers/dma/dmatest.c22
-rw-r--r--drivers/dma/dw_dmac.c524
-rw-r--r--drivers/dma/dw_dmac_regs.h26
-rw-r--r--drivers/dma/edma.c61
-rw-r--r--drivers/dma/ep93xx_dma.c3
-rw-r--r--drivers/dma/ioat/dma.c11
-rw-r--r--drivers/dma/ioat/dma.h1
-rw-r--r--drivers/dma/ioat/dma_v2.c113
-rw-r--r--drivers/dma/ioat/dma_v3.c216
-rw-r--r--drivers/dma/ioat/hw.h11
-rw-r--r--drivers/dma/ioat/pci.c11
-rw-r--r--drivers/dma/iop-adma.c45
-rw-r--r--drivers/dma/ipu/ipu_idmac.c2
-rw-r--r--drivers/dma/ipu/ipu_irq.c1
-rw-r--r--drivers/dma/mmp_pdma.c6
-rw-r--r--drivers/dma/mv_xor.c40
-rw-r--r--drivers/dma/mxs-dma.c8
-rw-r--r--drivers/dma/of-dma.c267
-rw-r--r--drivers/dma/omap-dma.c20
-rw-r--r--drivers/dma/pch_dma.c13
-rw-r--r--drivers/dma/pl330.c101
-rw-r--r--drivers/dma/sh/shdma-base.c3
-rw-r--r--drivers/dma/sh/shdma.c2
-rw-r--r--drivers/dma/sirf-dma.c73
-rw-r--r--drivers/dma/ste_dma40.c491
-rw-r--r--drivers/dma/ste_dma40_ll.c29
-rw-r--r--drivers/dma/ste_dma40_ll.h130
-rw-r--r--drivers/dma/tegra20-apb-dma.c57
-rw-r--r--drivers/edac/Kconfig23
-rw-r--r--drivers/edac/Makefile1
-rw-r--r--drivers/edac/edac_core.h5
-rw-r--r--drivers/edac/edac_mc.c152
-rw-r--r--drivers/edac/edac_mc_sysfs.c36
-rw-r--r--drivers/edac/edac_module.c2
-rw-r--r--drivers/edac/edac_pci_sysfs.c2
-rw-r--r--drivers/edac/ghes_edac.c537
-rw-r--r--drivers/edac/i3200_edac.c37
-rw-r--r--drivers/edac/i5100_edac.c178
-rw-r--r--drivers/edac/i7core_edac.c8
-rw-r--r--drivers/edac/sb_edac.c2
-rw-r--r--drivers/extcon/Kconfig4
-rw-r--r--drivers/extcon/extcon-arizona.c810
-rw-r--r--drivers/extcon/extcon-gpio.c2
-rw-r--r--drivers/extcon/extcon-max77693.c981
-rw-r--r--drivers/extcon/extcon-max8997.c734
-rw-r--r--drivers/firewire/core-cdev.c20
-rw-r--r--drivers/firewire/core-device.c7
-rw-r--r--drivers/firewire/ohci.c2
-rw-r--r--drivers/firmware/efivars.c146
-rw-r--r--drivers/firmware/memmap.c196
-rw-r--r--drivers/gpio/Kconfig22
-rw-r--r--drivers/gpio/Makefile5
-rw-r--r--drivers/gpio/gpio-em.c3
-rw-r--r--drivers/gpio/gpio-langwell.c53
-rw-r--r--drivers/gpio/gpio-lynxpoint.c469
-rw-r--r--drivers/gpio/gpio-mpc8xxx.c1
-rw-r--r--drivers/gpio/gpio-mxs.c31
-rw-r--r--drivers/gpio/gpio-omap.c6
-rw-r--r--drivers/gpio/gpio-palmas.c184
-rw-r--r--drivers/gpio/gpio-pca953x.c380
-rw-r--r--drivers/gpio/gpio-pl061.c2
-rw-r--r--drivers/gpio/gpio-pxa.c7
-rw-r--r--drivers/gpio/gpio-samsung.c7
-rw-r--r--drivers/gpio/gpio-twl4030.c176
-rw-r--r--drivers/gpio/gpio-vt8500.c65
-rw-r--r--drivers/gpio/gpiolib-acpi.c87
-rw-r--r--drivers/gpio/gpiolib.c770
-rw-r--r--drivers/gpu/Makefile2
-rw-r--r--drivers/gpu/drm/Kconfig8
-rw-r--r--drivers/gpu/drm/Makefile2
-rw-r--r--drivers/gpu/drm/ast/ast_drv.c4
-rw-r--r--drivers/gpu/drm/ast/ast_drv.h2
-rw-r--r--drivers/gpu/drm/ast/ast_fb.c27
-rw-r--r--drivers/gpu/drm/ast/ast_main.c12
-rw-r--r--drivers/gpu/drm/cirrus/cirrus_fbdev.c27
-rw-r--r--drivers/gpu/drm/cirrus/cirrus_main.c12
-rw-r--r--drivers/gpu/drm/drm_context.c19
-rw-r--r--drivers/gpu/drm/drm_crtc.c826
-rw-r--r--drivers/gpu/drm/drm_drv.c1
-rw-r--r--drivers/gpu/drm/drm_edid.c843
-rw-r--r--drivers/gpu/drm/drm_edid_modes.h774
-rw-r--r--drivers/gpu/drm/drm_encoder_slave.c63
-rw-r--r--drivers/gpu/drm/drm_fb_cma_helper.c95
-rw-r--r--drivers/gpu/drm/drm_fb_helper.c310
-rw-r--r--drivers/gpu/drm/drm_fops.c1
-rw-r--r--drivers/gpu/drm/drm_gem.c40
-rw-r--r--drivers/gpu/drm/drm_gem_cma_helper.c21
-rw-r--r--drivers/gpu/drm/drm_hashtab.c19
-rw-r--r--drivers/gpu/drm/drm_irq.c12
-rw-r--r--drivers/gpu/drm/drm_mm.c96
-rw-r--r--drivers/gpu/drm/drm_modes.c70
-rw-r--r--drivers/gpu/drm/drm_pci.c81
-rw-r--r--drivers/gpu/drm/drm_prime.c186
-rw-r--r--drivers/gpu/drm/drm_stub.c19
-rw-r--r--drivers/gpu/drm/drm_usb.c2
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_fb.c55
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_fbdev.c39
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_g2d.c12
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_gem.c33
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_hdmi.c12
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_hdmi.h5
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_iommu.h2
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_ipp.c20
-rw-r--r--drivers/gpu/drm/exynos/exynos_hdmi.c1035
-rw-r--r--drivers/gpu/drm/exynos/exynos_mixer.c34
-rw-r--r--drivers/gpu/drm/gma500/cdv_intel_dp.c1
-rw-r--r--drivers/gpu/drm/gma500/framebuffer.c43
-rw-r--r--drivers/gpu/drm/gma500/gtt.c2
-rw-r--r--drivers/gpu/drm/gma500/psb_device.c8
-rw-r--r--drivers/gpu/drm/gma500/psb_drv.c14
-rw-r--r--drivers/gpu/drm/gma500/psb_intel_display.c12
-rw-r--r--drivers/gpu/drm/i2c/Kconfig28
-rw-r--r--drivers/gpu/drm/i2c/Makefile3
-rw-r--r--drivers/gpu/drm/i2c/ch7006_drv.c2
-rw-r--r--drivers/gpu/drm/i2c/tda998x_drv.c906
-rw-r--r--drivers/gpu/drm/i915/Makefile1
-rw-r--r--drivers/gpu/drm/i915/i915_debugfs.c256
-rw-r--r--drivers/gpu/drm/i915/i915_dma.c94
-rw-r--r--drivers/gpu/drm/i915/i915_drv.c131
-rw-r--r--drivers/gpu/drm/i915/i915_drv.h475
-rw-r--r--drivers/gpu/drm/i915/i915_gem.c526
-rw-r--r--drivers/gpu/drm/i915/i915_gem_context.c33
-rw-r--r--drivers/gpu/drm/i915/i915_gem_dmabuf.c5
-rw-r--r--drivers/gpu/drm/i915/i915_gem_evict.c2
-rw-r--r--drivers/gpu/drm/i915/i915_gem_execbuffer.c333
-rw-r--r--drivers/gpu/drm/i915/i915_gem_gtt.c645
-rw-r--r--drivers/gpu/drm/i915/i915_gem_stolen.c305
-rw-r--r--drivers/gpu/drm/i915/i915_gem_tiling.c33
-rw-r--r--drivers/gpu/drm/i915/i915_irq.c370
-rw-r--r--drivers/gpu/drm/i915/i915_reg.h436
-rw-r--r--drivers/gpu/drm/i915/i915_suspend.c540
-rw-r--r--drivers/gpu/drm/i915/i915_ums.c503
-rw-r--r--drivers/gpu/drm/i915/intel_crt.c46
-rw-r--r--drivers/gpu/drm/i915/intel_ddi.c79
-rw-r--r--drivers/gpu/drm/i915/intel_display.c975
-rw-r--r--drivers/gpu/drm/i915/intel_dp.c374
-rw-r--r--drivers/gpu/drm/i915/intel_drv.h41
-rw-r--r--drivers/gpu/drm/i915/intel_dvo.c1
-rw-r--r--drivers/gpu/drm/i915/intel_fb.c55
-rw-r--r--drivers/gpu/drm/i915/intel_hdmi.c108
-rw-r--r--drivers/gpu/drm/i915/intel_i2c.c103
-rw-r--r--drivers/gpu/drm/i915/intel_lvds.c250
-rw-r--r--drivers/gpu/drm/i915/intel_modes.c6
-rw-r--r--drivers/gpu/drm/i915/intel_opregion.c2
-rw-r--r--drivers/gpu/drm/i915/intel_overlay.c24
-rw-r--r--drivers/gpu/drm/i915/intel_panel.c13
-rw-r--r--drivers/gpu/drm/i915/intel_pm.c95
-rw-r--r--drivers/gpu/drm/i915/intel_ringbuffer.c113
-rw-r--r--drivers/gpu/drm/i915/intel_ringbuffer.h11
-rw-r--r--drivers/gpu/drm/i915/intel_sdvo.c67
-rw-r--r--drivers/gpu/drm/i915/intel_sprite.c46
-rw-r--r--drivers/gpu/drm/i915/intel_tv.c4
-rw-r--r--drivers/gpu/drm/mgag200/mgag200_fb.c28
-rw-r--r--drivers/gpu/drm/mgag200/mgag200_main.c16
-rw-r--r--drivers/gpu/drm/nouveau/Kconfig28
-rw-r--r--drivers/gpu/drm/nouveau/Makefile29
-rw-r--r--drivers/gpu/drm/nouveau/core/core/client.c10
-rw-r--r--drivers/gpu/drm/nouveau/core/core/enum.c11
-rw-r--r--drivers/gpu/drm/nouveau/core/core/event.c106
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/copy/nva3.c6
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/crypt/nv84.c8
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/crypt/nv98.c6
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/disp/base.c52
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/disp/dport.c346
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/disp/dport.h78
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/disp/nv04.c33
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/disp/nv50.c371
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/disp/nv50.h37
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/disp/nv84.c12
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/disp/nv94.c24
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/disp/nva0.c9
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/disp/nva3.c24
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/disp/nvd0.c309
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/disp/nve0.c17
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/disp/piornv50.c140
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/disp/sornv50.c25
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/disp/sornv94.c153
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/disp/sornvd0.c90
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/fifo/base.c21
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/fifo/nv04.c187
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/fifo/nv50.c5
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/fifo/nv84.c22
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/fifo/nvc0.c109
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/fifo/nve0.c64
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/graph/nv04.c16
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/graph/nv10.c16
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/graph/nv20.c15
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/graph/nv40.c16
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/graph/nv50.c53
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/graph/nvc0.c33
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/graph/nve0.c44
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/mpeg/nv31.c7
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/software/nv50.c40
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/software/nvc0.c29
-rw-r--r--drivers/gpu/drm/nouveau/core/include/core/class.h44
-rw-r--r--drivers/gpu/drm/nouveau/core/include/core/client.h3
-rw-r--r--drivers/gpu/drm/nouveau/core/include/core/device.h1
-rw-r--r--drivers/gpu/drm/nouveau/core/include/core/enum.h3
-rw-r--r--drivers/gpu/drm/nouveau/core/include/core/event.h36
-rw-r--r--drivers/gpu/drm/nouveau/core/include/core/object.h12
-rw-r--r--drivers/gpu/drm/nouveau/core/include/core/printk.h3
-rw-r--r--drivers/gpu/drm/nouveau/core/include/engine/disp.h27
-rw-r--r--drivers/gpu/drm/nouveau/core/include/engine/fifo.h4
-rw-r--r--drivers/gpu/drm/nouveau/core/include/engine/software.h4
-rw-r--r--drivers/gpu/drm/nouveau/core/include/subdev/bios/dcb.h3
-rw-r--r--drivers/gpu/drm/nouveau/core/include/subdev/bios/gpio.h11
-rw-r--r--drivers/gpu/drm/nouveau/core/include/subdev/bios/i2c.h2
-rw-r--r--drivers/gpu/drm/nouveau/core/include/subdev/bios/therm.h16
-rw-r--r--drivers/gpu/drm/nouveau/core/include/subdev/bios/xpio.h19
-rw-r--r--drivers/gpu/drm/nouveau/core/include/subdev/bus.h41
-rw-r--r--drivers/gpu/drm/nouveau/core/include/subdev/gpio.h39
-rw-r--r--drivers/gpu/drm/nouveau/core/include/subdev/i2c.h127
-rw-r--r--drivers/gpu/drm/nouveau/core/include/subdev/therm.h37
-rw-r--r--drivers/gpu/drm/nouveau/core/include/subdev/timer.h8
-rw-r--r--drivers/gpu/drm/nouveau/core/os.h1
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/bios/base.c2
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/bios/dcb.c32
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/bios/extdev.c2
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/bios/gpio.c11
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/bios/i2c.c15
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/bios/init.c15
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/bios/therm.c28
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/bios/xpio.c76
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/bus/nv04.c95
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/bus/nv31.c112
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/bus/nv50.c105
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/bus/nvc0.c101
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/device/base.c5
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/device/nv04.c7
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/device/nv10.c25
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/device/nv20.c13
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/device/nv30.c16
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/device/nv40.c50
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/device/nv50.c51
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/device/nvc0.c43
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/device/nve0.c22
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/devinit/nv50.c14
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/fb/nv50.c64
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/gpio/base.c140
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/gpio/nv10.c40
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/gpio/nv50.c45
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/gpio/nvd0.c14
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/gpio/nve0.c131
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/gpio/priv.h17
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/i2c/anx9805.c279
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/i2c/aux.c154
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/i2c/base.c481
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/i2c/bit.c18
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/i2c/nv04.c143
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/i2c/nv4e.c135
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/i2c/nv50.c149
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/i2c/nv50.h32
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/i2c/nv94.c285
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/i2c/nvd0.c124
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/mc/nv04.c2
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/mc/nv50.c1
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/mc/nv98.c2
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/mc/nvc0.c2
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/mxm/mxms.c8
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/therm/base.c218
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/therm/fan.c244
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/therm/fannil.c54
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/therm/fanpwm.c107
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/therm/fantog.c115
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/therm/ic.c54
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/therm/nv40.c82
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/therm/nv50.c199
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/therm/nva3.c99
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/therm/nvd0.c153
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/therm/priv.h103
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/therm/temp.c162
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/timer/nv04.c2
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_acpi.h2
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_backlight.c2
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_bios.c130
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_bios.h12
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_bo.c24
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_bo.h3
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_chan.c5
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_connector.c96
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_connector.h10
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_debugfs.c64
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_debugfs.h22
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_display.c95
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_display.h3
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_dma.h2
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_dp.c297
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_drm.c60
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_drm.h2
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_encoder.h9
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_fbcon.c26
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_fence.c103
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_fence.h42
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_gem.c103
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_gem.h10
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_pm.c233
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_prime.c173
-rw-r--r--drivers/gpu/drm/nouveau/nv04_dfp.c4
-rw-r--r--drivers/gpu/drm/nouveau/nv04_display.c18
-rw-r--r--drivers/gpu/drm/nouveau/nv04_display.h1
-rw-r--r--drivers/gpu/drm/nouveau/nv04_fence.c6
-rw-r--r--drivers/gpu/drm/nouveau/nv04_tv.c39
-rw-r--r--drivers/gpu/drm/nouveau/nv10_fence.c118
-rw-r--r--drivers/gpu/drm/nouveau/nv10_fence.h19
-rw-r--r--drivers/gpu/drm/nouveau/nv17_fence.c149
-rw-r--r--drivers/gpu/drm/nouveau/nv17_tv.c2
-rw-r--r--drivers/gpu/drm/nouveau/nv50_display.c307
-rw-r--r--drivers/gpu/drm/nouveau/nv50_fence.c36
-rw-r--r--drivers/gpu/drm/nouveau/nv84_fence.c214
-rw-r--r--drivers/gpu/drm/nouveau/nvc0_fence.c186
-rw-r--r--drivers/gpu/drm/omapdrm/Kconfig (renamed from drivers/staging/omapdrm/Kconfig)0
-rw-r--r--drivers/gpu/drm/omapdrm/Makefile (renamed from drivers/staging/omapdrm/Makefile)0
-rw-r--r--drivers/gpu/drm/omapdrm/TODO23
-rw-r--r--drivers/gpu/drm/omapdrm/omap_connector.c (renamed from drivers/staging/omapdrm/omap_connector.c)2
-rw-r--r--drivers/gpu/drm/omapdrm/omap_crtc.c (renamed from drivers/staging/omapdrm/omap_crtc.c)14
-rw-r--r--drivers/gpu/drm/omapdrm/omap_debugfs.c (renamed from drivers/staging/omapdrm/omap_debugfs.c)18
-rw-r--r--drivers/gpu/drm/omapdrm/omap_dmm_priv.h (renamed from drivers/staging/omapdrm/omap_dmm_priv.h)0
-rw-r--r--drivers/gpu/drm/omapdrm/omap_dmm_tiler.c (renamed from drivers/staging/omapdrm/omap_dmm_tiler.c)0
-rw-r--r--drivers/gpu/drm/omapdrm/omap_dmm_tiler.h (renamed from drivers/staging/omapdrm/omap_dmm_tiler.h)0
-rw-r--r--drivers/gpu/drm/omapdrm/omap_drv.c (renamed from drivers/staging/omapdrm/omap_drv.c)6
-rw-r--r--drivers/gpu/drm/omapdrm/omap_drv.h (renamed from drivers/staging/omapdrm/omap_drv.h)4
-rw-r--r--drivers/gpu/drm/omapdrm/omap_encoder.c (renamed from drivers/staging/omapdrm/omap_encoder.c)2
-rw-r--r--drivers/gpu/drm/omapdrm/omap_fb.c (renamed from drivers/staging/omapdrm/omap_fb.c)18
-rw-r--r--drivers/gpu/drm/omapdrm/omap_fbdev.c (renamed from drivers/staging/omapdrm/omap_fbdev.c)34
-rw-r--r--drivers/gpu/drm/omapdrm/omap_gem.c (renamed from drivers/staging/omapdrm/omap_gem.c)2
-rw-r--r--drivers/gpu/drm/omapdrm/omap_gem_dmabuf.c (renamed from drivers/staging/omapdrm/omap_gem_dmabuf.c)2
-rw-r--r--drivers/gpu/drm/omapdrm/omap_gem_helpers.c (renamed from drivers/staging/omapdrm/omap_gem_helpers.c)4
-rw-r--r--drivers/gpu/drm/omapdrm/omap_irq.c (renamed from drivers/staging/omapdrm/omap_irq.c)2
-rw-r--r--drivers/gpu/drm/omapdrm/omap_plane.c (renamed from drivers/staging/omapdrm/omap_plane.c)2
-rw-r--r--drivers/gpu/drm/omapdrm/tcm-sita.c (renamed from drivers/staging/omapdrm/tcm-sita.c)0
-rw-r--r--drivers/gpu/drm/omapdrm/tcm-sita.h (renamed from drivers/staging/omapdrm/tcm-sita.h)0
-rw-r--r--drivers/gpu/drm/omapdrm/tcm.h (renamed from drivers/staging/omapdrm/tcm.h)0
-rw-r--r--drivers/gpu/drm/radeon/Kconfig33
-rw-r--r--drivers/gpu/drm/radeon/Makefile10
-rw-r--r--drivers/gpu/drm/radeon/atom.c9
-rw-r--r--drivers/gpu/drm/radeon/atombios_crtc.c6
-rw-r--r--drivers/gpu/drm/radeon/evergreen.c366
-rw-r--r--drivers/gpu/drm/radeon/evergreen_cs.c1149
-rw-r--r--drivers/gpu/drm/radeon/evergreen_hdmi.c85
-rw-r--r--drivers/gpu/drm/radeon/evergreen_reg.h1
-rw-r--r--drivers/gpu/drm/radeon/evergreend.h54
-rw-r--r--drivers/gpu/drm/radeon/ni.c339
-rw-r--r--drivers/gpu/drm/radeon/nid.h27
-rw-r--r--drivers/gpu/drm/radeon/r100.c224
-rw-r--r--drivers/gpu/drm/radeon/r100_track.h4
-rw-r--r--drivers/gpu/drm/radeon/r100d.h11
-rw-r--r--drivers/gpu/drm/radeon/r200.c26
-rw-r--r--drivers/gpu/drm/radeon/r300.c42
-rw-r--r--drivers/gpu/drm/radeon/r300_cmdbuf.c2
-rw-r--r--drivers/gpu/drm/radeon/r300d.h11
-rw-r--r--drivers/gpu/drm/radeon/r500_reg.h1
-rw-r--r--drivers/gpu/drm/radeon/r600.c401
-rw-r--r--drivers/gpu/drm/radeon/r600_blit.c33
-rw-r--r--drivers/gpu/drm/radeon/r600_blit_kms.c31
-rw-r--r--drivers/gpu/drm/radeon/r600_cp.c2
-rw-r--r--drivers/gpu/drm/radeon/r600_cs.c332
-rw-r--r--drivers/gpu/drm/radeon/r600_hdmi.c135
-rw-r--r--drivers/gpu/drm/radeon/r600d.h17
-rw-r--r--drivers/gpu/drm/radeon/radeon.h38
-rw-r--r--drivers/gpu/drm/radeon/radeon_asic.c70
-rw-r--r--drivers/gpu/drm/radeon/radeon_asic.h24
-rw-r--r--drivers/gpu/drm/radeon/radeon_atpx_handler.c73
-rw-r--r--drivers/gpu/drm/radeon/radeon_cp.c2
-rw-r--r--drivers/gpu/drm/radeon/radeon_cs.c176
-rw-r--r--drivers/gpu/drm/radeon/radeon_cursor.c8
-rw-r--r--drivers/gpu/drm/radeon/radeon_device.c10
-rw-r--r--drivers/gpu/drm/radeon/radeon_display.c2
-rw-r--r--drivers/gpu/drm/radeon/radeon_drv.c91
-rw-r--r--drivers/gpu/drm/radeon/radeon_drv.h16
-rw-r--r--drivers/gpu/drm/radeon/radeon_family.h1
-rw-r--r--drivers/gpu/drm/radeon/radeon_fb.c27
-rw-r--r--drivers/gpu/drm/radeon/radeon_gart.c60
-rw-r--r--drivers/gpu/drm/radeon/radeon_irq.c2
-rw-r--r--drivers/gpu/drm/radeon/radeon_kms.c11
-rw-r--r--drivers/gpu/drm/radeon/radeon_mem.c2
-rw-r--r--drivers/gpu/drm/radeon/radeon_pm.c2
-rw-r--r--drivers/gpu/drm/radeon/radeon_prime.c170
-rw-r--r--drivers/gpu/drm/radeon/radeon_reg.h15
-rw-r--r--drivers/gpu/drm/radeon/radeon_ring.c19
-rw-r--r--drivers/gpu/drm/radeon/radeon_state.c2
-rw-r--r--drivers/gpu/drm/radeon/rv515d.h11
-rw-r--r--drivers/gpu/drm/radeon/rv770.c25
-rw-r--r--drivers/gpu/drm/radeon/rv770d.h4
-rw-r--r--drivers/gpu/drm/radeon/si.c509
-rw-r--r--drivers/gpu/drm/radeon/sid.h30
-rw-r--r--drivers/gpu/drm/shmobile/shmob_drm_drv.c4
-rw-r--r--drivers/gpu/drm/sis/sis_drv.c1
-rw-r--r--drivers/gpu/drm/sis/sis_mm.c13
-rw-r--r--drivers/gpu/drm/tegra/Kconfig1
-rw-r--r--drivers/gpu/drm/tegra/dc.c588
-rw-r--r--drivers/gpu/drm/tegra/dc.h14
-rw-r--r--drivers/gpu/drm/tegra/drm.c104
-rw-r--r--drivers/gpu/drm/tegra/drm.h43
-rw-r--r--drivers/gpu/drm/tegra/fb.c4
-rw-r--r--drivers/gpu/drm/tegra/hdmi.c227
-rw-r--r--drivers/gpu/drm/tegra/hdmi.h189
-rw-r--r--drivers/gpu/drm/tilcdc/Kconfig13
-rw-r--r--drivers/gpu/drm/tilcdc/Makefile10
-rw-r--r--drivers/gpu/drm/tilcdc/tilcdc_crtc.c602
-rw-r--r--drivers/gpu/drm/tilcdc/tilcdc_drv.c611
-rw-r--r--drivers/gpu/drm/tilcdc/tilcdc_drv.h150
-rw-r--r--drivers/gpu/drm/tilcdc/tilcdc_panel.c436
-rw-r--r--drivers/gpu/drm/tilcdc/tilcdc_panel.h26
-rw-r--r--drivers/gpu/drm/tilcdc/tilcdc_regs.h154
-rw-r--r--drivers/gpu/drm/tilcdc/tilcdc_slave.c376
-rw-r--r--drivers/gpu/drm/tilcdc/tilcdc_slave.h26
-rw-r--r--drivers/gpu/drm/tilcdc/tilcdc_tfp410.c419
-rw-r--r--drivers/gpu/drm/tilcdc/tilcdc_tfp410.h26
-rw-r--r--drivers/gpu/drm/ttm/ttm_bo.c103
-rw-r--r--drivers/gpu/drm/ttm/ttm_execbuf_util.c78
-rw-r--r--drivers/gpu/drm/ttm/ttm_tt.c4
-rw-r--r--drivers/gpu/drm/udl/udl_drv.h2
-rw-r--r--drivers/gpu/drm/udl/udl_fb.c78
-rw-r--r--drivers/gpu/drm/udl/udl_gem.c2
-rw-r--r--drivers/gpu/drm/udl/udl_transfer.c46
-rw-r--r--drivers/gpu/drm/via/via_map.c1
-rw-r--r--drivers/gpu/drm/via/via_mm.c13
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_drv.c2
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_ioctl.c38
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_kms.c87
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_resource.c21
-rw-r--r--drivers/gpu/stub/Kconfig18
-rw-r--r--drivers/gpu/stub/Makefile1
-rw-r--r--drivers/gpu/stub/poulsbo.c64
-rw-r--r--drivers/gpu/vga/vga_switcheroo.c3
-rw-r--r--drivers/hid/Kconfig20
-rw-r--r--drivers/hid/Makefile2
-rw-r--r--drivers/hid/hid-a4tech.c13
-rw-r--r--drivers/hid/hid-apple.c19
-rw-r--r--drivers/hid/hid-aureal.c13
-rw-r--r--drivers/hid/hid-axff.c14
-rw-r--r--drivers/hid/hid-belkin.c13
-rw-r--r--drivers/hid/hid-cherry.c13
-rw-r--r--drivers/hid/hid-chicony.c13
-rw-r--r--drivers/hid/hid-core.c18
-rw-r--r--drivers/hid/hid-cypress.c13
-rw-r--r--drivers/hid/hid-dr.c13
-rw-r--r--drivers/hid/hid-elecom.c13
-rw-r--r--drivers/hid/hid-emsff.c13
-rw-r--r--drivers/hid/hid-ezkey.c13
-rw-r--r--drivers/hid/hid-gaff.c13
-rw-r--r--drivers/hid/hid-generic.c14
-rw-r--r--drivers/hid/hid-gyration.c13
-rw-r--r--drivers/hid/hid-holtek-kbd.c13
-rw-r--r--drivers/hid/hid-holtekff.c15
-rw-r--r--drivers/hid/hid-hyperv.c3
-rw-r--r--drivers/hid/hid-icade.c19
-rw-r--r--drivers/hid/hid-ids.h24
-rw-r--r--drivers/hid/hid-kensington.c13
-rw-r--r--drivers/hid/hid-keytouch.c13
-rw-r--r--drivers/hid/hid-kye.c13
-rw-r--r--drivers/hid/hid-lcpower.c13
-rw-r--r--drivers/hid/hid-lenovo-tpkbd.c14
-rw-r--r--drivers/hid/hid-lg.c212
-rw-r--r--drivers/hid/hid-lg4ff.c17
-rw-r--r--drivers/hid/hid-magicmouse.c19
-rw-r--r--drivers/hid/hid-microsoft.c13
-rw-r--r--drivers/hid/hid-monterey.c13
-rw-r--r--drivers/hid/hid-multitouch.c163
-rw-r--r--drivers/hid/hid-ntrig.c81
-rw-r--r--drivers/hid/hid-ortek.c13
-rw-r--r--drivers/hid/hid-petalynx.c13
-rw-r--r--drivers/hid/hid-picolcd_core.c13
-rw-r--r--drivers/hid/hid-pl.c26
-rw-r--r--drivers/hid/hid-primax.c13
-rw-r--r--drivers/hid/hid-prodikeys.c19
-rw-r--r--drivers/hid/hid-ps3remote.c13
-rw-r--r--drivers/hid/hid-roccat-lua.c14
-rw-r--r--drivers/hid/hid-roccat.c2
-rw-r--r--drivers/hid/hid-saitek.c13
-rw-r--r--drivers/hid/hid-samsung.c13
-rw-r--r--drivers/hid/hid-sensor-hub.c22
-rw-r--r--drivers/hid/hid-sjoy.c13
-rw-r--r--drivers/hid/hid-sony.c59
-rw-r--r--drivers/hid/hid-speedlink.c13
-rw-r--r--drivers/hid/hid-steelseries.c393
-rw-r--r--drivers/hid/hid-sunplus.c13
-rw-r--r--drivers/hid/hid-thingm.c272
-rw-r--r--drivers/hid/hid-tivo.c13
-rw-r--r--drivers/hid/hid-tmff.c13
-rw-r--r--drivers/hid/hid-topseed.c13
-rw-r--r--drivers/hid/hid-twinhan.c13
-rw-r--r--drivers/hid/hid-uclogic.c13
-rw-r--r--drivers/hid/hid-wacom.c18
-rw-r--r--drivers/hid/hid-waltop.c13
-rw-r--r--drivers/hid/hid-wiimote-core.c19
-rw-r--r--drivers/hid/hid-wiimote-debug.c2
-rw-r--r--drivers/hid/hid-wiimote-ext.c8
-rw-r--r--drivers/hid/hid-zpff.c13
-rw-r--r--drivers/hid/hid-zydacron.c13
-rw-r--r--drivers/hid/hidraw.c7
-rw-r--r--drivers/hid/i2c-hid/i2c-hid.c87
-rw-r--r--drivers/hid/uhid.c95
-rw-r--r--drivers/hsi/hsi.c2
-rw-r--r--drivers/hv/channel.c33
-rw-r--r--drivers/hv/channel_mgmt.c93
-rw-r--r--drivers/hv/connection.c232
-rw-r--r--drivers/hv/hv.c72
-rw-r--r--drivers/hv/hv_balloon.c63
-rw-r--r--drivers/hv/hv_util.c46
-rw-r--r--drivers/hv/hyperv_vmbus.h65
-rw-r--r--drivers/hv/ring_buffer.c130
-rw-r--r--drivers/hv/vmbus_drv.c54
-rw-r--r--drivers/i2c/busses/Kconfig29
-rw-r--r--drivers/i2c/busses/Makefile2
-rw-r--r--drivers/i2c/busses/i2c-at91.c17
-rw-r--r--drivers/i2c/busses/i2c-au1550.c1
-rw-r--r--drivers/i2c/busses/i2c-bcm2835.c342
-rw-r--r--drivers/i2c/busses/i2c-bfin-twi.c2
-rw-r--r--drivers/i2c/busses/i2c-cpm.c2
-rw-r--r--drivers/i2c/busses/i2c-davinci.c2
-rw-r--r--drivers/i2c/busses/i2c-designware-core.c16
-rw-r--r--drivers/i2c/busses/i2c-designware-pcidrv.c2
-rw-r--r--drivers/i2c/busses/i2c-designware-platdrv.c61
-rw-r--r--drivers/i2c/busses/i2c-eg20t.c2
-rw-r--r--drivers/i2c/busses/i2c-highlander.c4
-rw-r--r--drivers/i2c/busses/i2c-i801.c19
-rw-r--r--drivers/i2c/busses/i2c-ibm_iic.c3
-rw-r--r--drivers/i2c/busses/i2c-imx.c1
-rw-r--r--drivers/i2c/busses/i2c-intel-mid.c2
-rw-r--r--drivers/i2c/busses/i2c-iop3xx.c2
-rw-r--r--drivers/i2c/busses/i2c-isch.c17
-rw-r--r--drivers/i2c/busses/i2c-ismt.c963
-rw-r--r--drivers/i2c/busses/i2c-mpc.c2
-rw-r--r--drivers/i2c/busses/i2c-mxs.c272
-rw-r--r--drivers/i2c/busses/i2c-nforce2.c152
-rw-r--r--drivers/i2c/busses/i2c-nomadik.c100
-rw-r--r--drivers/i2c/busses/i2c-ocores.c3
-rw-r--r--drivers/i2c/busses/i2c-octeon.c5
-rw-r--r--drivers/i2c/busses/i2c-omap.c3
-rw-r--r--drivers/i2c/busses/i2c-pca-platform.c1
-rw-r--r--drivers/i2c/busses/i2c-pmcmsp.c2
-rw-r--r--drivers/i2c/busses/i2c-pnx.c2
-rw-r--r--drivers/i2c/busses/i2c-powermac.c1
-rw-r--r--drivers/i2c/busses/i2c-puv3.c2
-rw-r--r--drivers/i2c/busses/i2c-pxa-pci.c2
-rw-r--r--drivers/i2c/busses/i2c-pxa.c6
-rw-r--r--drivers/i2c/busses/i2c-s3c2410.c62
-rw-r--r--drivers/i2c/busses/i2c-s6000.c1
-rw-r--r--drivers/i2c/busses/i2c-sh7760.c1
-rw-r--r--drivers/i2c/busses/i2c-sh_mobile.c152
-rw-r--r--drivers/i2c/busses/i2c-sis630.c360
-rw-r--r--drivers/i2c/busses/i2c-stu300.c1
-rw-r--r--drivers/i2c/busses/i2c-taos-evm.c2
-rw-r--r--drivers/i2c/busses/i2c-tegra.c82
-rw-r--r--drivers/i2c/busses/i2c-versatile.c2
-rw-r--r--drivers/i2c/busses/i2c-xiic.c2
-rw-r--r--drivers/i2c/busses/i2c-xlr.c1
-rw-r--r--drivers/i2c/busses/scx200_acb.c1
-rw-r--r--drivers/i2c/i2c-core.c69
-rw-r--r--drivers/i2c/i2c-dev.c4
-rw-r--r--drivers/i2c/muxes/i2c-mux-gpio.c1
-rw-r--r--drivers/ide/Kconfig5
-rw-r--r--drivers/ide/ide-proc.c4
-rw-r--r--drivers/idle/i7300_idle.c8
-rw-r--r--drivers/infiniband/core/cm.c22
-rw-r--r--drivers/infiniband/core/cma.c27
-rw-r--r--drivers/infiniband/core/fmr_pool.c3
-rw-r--r--drivers/infiniband/core/sa_query.c18
-rw-r--r--drivers/infiniband/core/ucm.c16
-rw-r--r--drivers/infiniband/core/ucma.c32
-rw-r--r--drivers/infiniband/core/uverbs.h2
-rw-r--r--drivers/infiniband/core/uverbs_cmd.c140
-rw-r--r--drivers/infiniband/core/uverbs_main.c13
-rw-r--r--drivers/infiniband/core/verbs.c5
-rw-r--r--drivers/infiniband/hw/amso1100/c2.c13
-rw-r--r--drivers/infiniband/hw/amso1100/c2_qp.c19
-rw-r--r--drivers/infiniband/hw/cxgb3/iwch.h24
-rw-r--r--drivers/infiniband/hw/cxgb3/iwch_provider.c7
-rw-r--r--drivers/infiniband/hw/cxgb3/iwch_qp.c15
-rw-r--r--drivers/infiniband/hw/cxgb4/cm.c170
-rw-r--r--drivers/infiniband/hw/cxgb4/device.c5
-rw-r--r--drivers/infiniband/hw/cxgb4/ev.c8
-rw-r--r--drivers/infiniband/hw/cxgb4/iw_cxgb4.h29
-rw-r--r--drivers/infiniband/hw/cxgb4/mem.c5
-rw-r--r--drivers/infiniband/hw/cxgb4/qp.c1
-rw-r--r--drivers/infiniband/hw/ehca/ehca_cq.c27
-rw-r--r--drivers/infiniband/hw/ehca/ehca_iverbs.h2
-rw-r--r--drivers/infiniband/hw/ehca/ehca_mrmw.c5
-rw-r--r--drivers/infiniband/hw/ehca/ehca_qp.c34
-rw-r--r--drivers/infiniband/hw/ipath/ipath_driver.c16
-rw-r--r--drivers/infiniband/hw/ipath/ipath_file_ops.c4
-rw-r--r--drivers/infiniband/hw/ipath/ipath_fs.c6
-rw-r--r--drivers/infiniband/hw/mlx4/cm.c32
-rw-r--r--drivers/infiniband/hw/mlx4/mad.c7
-rw-r--r--drivers/infiniband/hw/mlx4/main.c22
-rw-r--r--drivers/infiniband/hw/mlx4/mlx4_ib.h18
-rw-r--r--drivers/infiniband/hw/mlx4/mr.c87
-rw-r--r--drivers/infiniband/hw/mlx4/qp.c49
-rw-r--r--drivers/infiniband/hw/mlx4/sysfs.c2
-rw-r--r--drivers/infiniband/hw/nes/nes_verbs.c19
-rw-r--r--drivers/infiniband/hw/ocrdma/ocrdma_main.c14
-rw-r--r--drivers/infiniband/hw/qib/qib_file_ops.c2
-rw-r--r--drivers/infiniband/hw/qib/qib_fs.c4
-rw-r--r--drivers/infiniband/hw/qib/qib_init.c21
-rw-r--r--drivers/infiniband/hw/qib/qib_qp.c5
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib.h4
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib_ethtool.c19
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib_main.c14
-rw-r--r--drivers/infiniband/ulp/iser/iscsi_iser.h2
-rw-r--r--drivers/infiniband/ulp/iser/iser_memory.c9
-rw-r--r--drivers/infiniband/ulp/iser/iser_verbs.c8
-rw-r--r--drivers/infiniband/ulp/srp/ib_srp.c42
-rw-r--r--drivers/infiniband/ulp/srp/ib_srp.h1
-rw-r--r--drivers/input/keyboard/tegra-kbc.c2
-rw-r--r--drivers/input/misc/max8925_onkey.c3
-rw-r--r--drivers/input/serio/Kconfig1
-rw-r--r--drivers/iommu/Kconfig76
-rw-r--r--drivers/iommu/Makefile2
-rw-r--r--drivers/iommu/amd_iommu.c3
-rw-r--r--drivers/iommu/amd_iommu_init.c10
-rw-r--r--drivers/iommu/dmar.c2
-rw-r--r--drivers/iommu/exynos-iommu.c2
-rw-r--r--drivers/iommu/intel-iommu.c8
-rw-r--r--drivers/iommu/iommu.c66
-rw-r--r--drivers/iommu/omap-iommu.c4
-rw-r--r--drivers/iommu/shmobile-iommu.c395
-rw-r--r--drivers/iommu/shmobile-ipmmu.c136
-rw-r--r--drivers/iommu/shmobile-ipmmu.h34
-rw-r--r--drivers/iommu/tegra-gart.c4
-rw-r--r--drivers/iommu/tegra-smmu.c91
-rw-r--r--drivers/ipack/devices/Kconfig2
-rw-r--r--drivers/ipack/devices/ipoctal.c130
-rw-r--r--drivers/irqchip/Kconfig27
-rw-r--r--drivers/irqchip/Makefile7
-rw-r--r--drivers/irqchip/exynos-combiner.c230
-rw-r--r--drivers/irqchip/irq-gic.c845
-rw-r--r--drivers/irqchip/irq-vic.c489
-rw-r--r--drivers/irqchip/irqchip.c30
-rw-r--r--drivers/irqchip/irqchip.h29
-rw-r--r--drivers/irqchip/spear-shirq.c5
-rw-r--r--drivers/isdn/Kconfig1
-rw-r--r--drivers/isdn/capi/Kconfig1
-rw-r--r--drivers/isdn/gigaset/Kconfig1
-rw-r--r--drivers/isdn/gigaset/interface.c14
-rw-r--r--drivers/isdn/hardware/eicon/divacapi.h6
-rw-r--r--drivers/isdn/hardware/eicon/divasproc.c6
-rw-r--r--drivers/isdn/hardware/eicon/pc.h4
-rw-r--r--drivers/isdn/hardware/mISDN/Kconfig1
-rw-r--r--drivers/isdn/hysdn/hysdn_proclog.c4
-rw-r--r--drivers/isdn/i4l/isdn_common.c22
-rw-r--r--drivers/isdn/i4l/isdn_common.h2
-rw-r--r--drivers/isdn/i4l/isdn_ppp.c2
-rw-r--r--drivers/isdn/i4l/isdn_tty.c59
-rw-r--r--drivers/isdn/mISDN/l1oip_core.c2
-rw-r--r--drivers/isdn/mISDN/socket.c3
-rw-r--r--drivers/isdn/mISDN/stack.c3
-rw-r--r--drivers/leds/Kconfig12
-rw-r--r--drivers/leds/Makefile1
-rw-r--r--drivers/leds/leds-88pm860x.c5
-rw-r--r--drivers/leds/leds-lm3530.c58
-rw-r--r--drivers/leds/leds-lm355x.c2
-rw-r--r--drivers/leds/leds-lm3642.c4
-rw-r--r--drivers/leds/leds-lp5521.c944
-rw-r--r--drivers/leds/leds-lp5523.c1050
-rw-r--r--drivers/leds/leds-lp55xx-common.c523
-rw-r--r--drivers/leds/leds-lp55xx-common.h134
-rw-r--r--drivers/leds/leds-lp8788.c9
-rw-r--r--drivers/leds/leds-pca9532.c6
-rw-r--r--drivers/leds/leds-pwm.c152
-rw-r--r--drivers/leds/leds-renesas-tpu.c12
-rw-r--r--drivers/leds/leds-ss4200.c3
-rw-r--r--drivers/leds/leds-sunfire.c19
-rw-r--r--drivers/leds/leds-tca6507.c72
-rw-r--r--drivers/leds/leds-wm831x-status.c2
-rw-r--r--drivers/lguest/Kconfig2
-rw-r--r--drivers/lguest/lguest_device.c2
-rw-r--r--drivers/macintosh/windfarm_pm112.c2
-rw-r--r--drivers/macintosh/windfarm_pm72.c2
-rw-r--r--drivers/macintosh/windfarm_rm31.c2
-rw-r--r--drivers/md/Kconfig55
-rw-r--r--drivers/md/Makefile6
-rw-r--r--drivers/md/bitmap.c4
-rw-r--r--drivers/md/dm-bio-prison.c158
-rw-r--r--drivers/md/dm-bio-prison.h58
-rw-r--r--drivers/md/dm-bufio.c5
-rw-r--r--drivers/md/dm-cache-block-types.h54
-rw-r--r--drivers/md/dm-cache-metadata.c1146
-rw-r--r--drivers/md/dm-cache-metadata.h142
-rw-r--r--drivers/md/dm-cache-policy-cleaner.c464
-rw-r--r--drivers/md/dm-cache-policy-internal.h124
-rw-r--r--drivers/md/dm-cache-policy-mq.c1195
-rw-r--r--drivers/md/dm-cache-policy.c161
-rw-r--r--drivers/md/dm-cache-policy.h228
-rw-r--r--drivers/md/dm-cache-target.c2584
-rw-r--r--drivers/md/dm-crypt.c45
-rw-r--r--drivers/md/dm-delay.c12
-rw-r--r--drivers/md/dm-flakey.c11
-rw-r--r--drivers/md/dm-ioctl.c166
-rw-r--r--drivers/md/dm-kcopyd.c121
-rw-r--r--drivers/md/dm-linear.c13
-rw-r--r--drivers/md/dm-mpath.c12
-rw-r--r--drivers/md/dm-raid.c10
-rw-r--r--drivers/md/dm-raid1.c17
-rw-r--r--drivers/md/dm-snap.c36
-rw-r--r--drivers/md/dm-stripe.c27
-rw-r--r--drivers/md/dm-table.c11
-rw-r--r--drivers/md/dm-target.c2
-rw-r--r--drivers/md/dm-thin-metadata.c12
-rw-r--r--drivers/md/dm-thin.c277
-rw-r--r--drivers/md/dm-verity.c8
-rw-r--r--drivers/md/dm-zero.c2
-rw-r--r--drivers/md/dm.c508
-rw-r--r--drivers/md/persistent-data/Kconfig2
-rw-r--r--drivers/md/persistent-data/Makefile2
-rw-r--r--drivers/md/persistent-data/dm-array.c808
-rw-r--r--drivers/md/persistent-data/dm-array.h166
-rw-r--r--drivers/md/persistent-data/dm-bitset.c163
-rw-r--r--drivers/md/persistent-data/dm-bitset.h165
-rw-r--r--drivers/md/persistent-data/dm-block-manager.c1
-rw-r--r--drivers/md/persistent-data/dm-btree-internal.h1
-rw-r--r--drivers/md/persistent-data/dm-btree-spine.c7
-rw-r--r--drivers/md/persistent-data/dm-btree.c52
-rw-r--r--drivers/md/persistent-data/dm-btree.h15
-rw-r--r--drivers/md/persistent-data/dm-transaction-manager.c21
-rw-r--r--drivers/md/raid5.c14
-rw-r--r--drivers/media/Kconfig19
-rw-r--r--drivers/media/common/Kconfig11
-rw-r--r--drivers/media/common/Makefile3
-rw-r--r--drivers/media/common/btcx-risc.c (renamed from drivers/media/i2c/btcx-risc.c)0
-rw-r--r--drivers/media/common/btcx-risc.h (renamed from drivers/media/i2c/btcx-risc.h)0
-rw-r--r--drivers/media/common/cx2341x.c (renamed from drivers/media/i2c/cx2341x.c)0
-rw-r--r--drivers/media/common/saa7146/saa7146_fops.c5
-rw-r--r--drivers/media/common/tveeprom.c (renamed from drivers/media/i2c/tveeprom.c)290
-rw-r--r--drivers/media/dvb-core/dvb-usb-ids.h6
-rw-r--r--drivers/media/dvb-core/dvb_ca_en50221.c16
-rw-r--r--drivers/media/dvb-core/dvb_frontend.c59
-rw-r--r--drivers/media/dvb-core/dvb_frontend.h10
-rw-r--r--drivers/media/dvb-core/dvb_net.c71
-rw-r--r--drivers/media/dvb-core/dvb_net.h1
-rw-r--r--drivers/media/dvb-core/dvbdev.c2
-rw-r--r--drivers/media/dvb-frontends/Kconfig7
-rw-r--r--drivers/media/dvb-frontends/Makefile1
-rw-r--r--drivers/media/dvb-frontends/af9033.c18
-rw-r--r--drivers/media/dvb-frontends/af9033.h1
-rw-r--r--drivers/media/dvb-frontends/af9033_priv.h132
-rw-r--r--drivers/media/dvb-frontends/bcm3510.h2
-rw-r--r--drivers/media/dvb-frontends/cx22700.h2
-rw-r--r--drivers/media/dvb-frontends/cx24110.h2
-rw-r--r--drivers/media/dvb-frontends/cx24116.c2
-rw-r--r--drivers/media/dvb-frontends/dib0070.h2
-rw-r--r--drivers/media/dvb-frontends/dib0090.h2
-rw-r--r--drivers/media/dvb-frontends/dib3000.h2
-rw-r--r--drivers/media/dvb-frontends/dib8000.h2
-rw-r--r--drivers/media/dvb-frontends/dib9000.h2
-rw-r--r--drivers/media/dvb-frontends/drxd_hard.c9
-rw-r--r--drivers/media/dvb-frontends/ds3000.c261
-rw-r--r--drivers/media/dvb-frontends/ds3000.h10
-rw-r--r--drivers/media/dvb-frontends/dvb-pll.h2
-rw-r--r--drivers/media/dvb-frontends/isl6405.h2
-rw-r--r--drivers/media/dvb-frontends/isl6421.h2
-rw-r--r--drivers/media/dvb-frontends/isl6423.h2
-rw-r--r--drivers/media/dvb-frontends/itd1000.h2
-rw-r--r--drivers/media/dvb-frontends/ix2505v.c2
-rw-r--r--drivers/media/dvb-frontends/l64781.h2
-rw-r--r--drivers/media/dvb-frontends/lgdt330x.h2
-rw-r--r--drivers/media/dvb-frontends/m88rs2000.c422
-rw-r--r--drivers/media/dvb-frontends/m88rs2000.h6
-rw-r--r--drivers/media/dvb-frontends/mb86a16.h2
-rw-r--r--drivers/media/dvb-frontends/mb86a20s.c1790
-rw-r--r--drivers/media/dvb-frontends/mt312.h2
-rw-r--r--drivers/media/dvb-frontends/mt352.h2
-rw-r--r--drivers/media/dvb-frontends/nxt200x.h2
-rw-r--r--drivers/media/dvb-frontends/nxt6000.h2
-rw-r--r--drivers/media/dvb-frontends/or51132.h2
-rw-r--r--drivers/media/dvb-frontends/or51211.c99
-rw-r--r--drivers/media/dvb-frontends/or51211.h2
-rw-r--r--drivers/media/dvb-frontends/s5h1420.h2
-rw-r--r--drivers/media/dvb-frontends/sp8870.h2
-rw-r--r--drivers/media/dvb-frontends/sp887x.h2
-rw-r--r--drivers/media/dvb-frontends/stb0899_drv.h2
-rw-r--r--drivers/media/dvb-frontends/stb6100.h2
-rw-r--r--drivers/media/dvb-frontends/stv0297.h2
-rw-r--r--drivers/media/dvb-frontends/stv0299.c2
-rw-r--r--drivers/media/dvb-frontends/stv0299.h2
-rw-r--r--drivers/media/dvb-frontends/stv0900_core.c40
-rw-r--r--drivers/media/dvb-frontends/stv0900_reg.h3
-rw-r--r--drivers/media/dvb-frontends/stv0900_sw.c7
-rw-r--r--drivers/media/dvb-frontends/stv090x.c141
-rw-r--r--drivers/media/dvb-frontends/stv090x.h2
-rw-r--r--drivers/media/dvb-frontends/stv6110x.h2
-rw-r--r--drivers/media/dvb-frontends/tda1002x.h5
-rw-r--r--drivers/media/dvb-frontends/tda1004x.h2
-rw-r--r--drivers/media/dvb-frontends/tda10071.c22
-rw-r--r--drivers/media/dvb-frontends/tda10071.h8
-rw-r--r--drivers/media/dvb-frontends/tda10086.h2
-rw-r--r--drivers/media/dvb-frontends/tda665x.h2
-rw-r--r--drivers/media/dvb-frontends/tda8083.h2
-rw-r--r--drivers/media/dvb-frontends/tda8261.h2
-rw-r--r--drivers/media/dvb-frontends/tda8261_cfg.h2
-rw-r--r--drivers/media/dvb-frontends/tda826x.h2
-rw-r--r--drivers/media/dvb-frontends/ts2020.c373
-rw-r--r--drivers/media/dvb-frontends/ts2020.h50
-rw-r--r--drivers/media/dvb-frontends/tua6100.h2
-rw-r--r--drivers/media/dvb-frontends/ves1820.h2
-rw-r--r--drivers/media/dvb-frontends/ves1x93.h2
-rw-r--r--drivers/media/dvb-frontends/zl10353.h2
-rw-r--r--drivers/media/i2c/Kconfig42
-rw-r--r--drivers/media/i2c/Makefile5
-rw-r--r--drivers/media/i2c/adv7180.c3
-rw-r--r--drivers/media/i2c/adv7343.c45
-rw-r--r--drivers/media/i2c/cx25840/cx25840-ir.c6
-rw-r--r--drivers/media/i2c/mt9v011.c223
-rw-r--r--drivers/media/i2c/noon010pc30.c7
-rw-r--r--drivers/media/i2c/ov7670.c589
-rw-r--r--drivers/media/i2c/ov9650.c1562
-rw-r--r--drivers/media/i2c/s5c73m3/Makefile2
-rw-r--r--drivers/media/i2c/s5c73m3/s5c73m3-core.c1704
-rw-r--r--drivers/media/i2c/s5c73m3/s5c73m3-ctrls.c563
-rw-r--r--drivers/media/i2c/s5c73m3/s5c73m3-spi.c156
-rw-r--r--drivers/media/i2c/s5c73m3/s5c73m3.h459
-rw-r--r--drivers/media/i2c/s5k6aa.c7
-rw-r--r--drivers/media/i2c/soc_camera/imx074.c27
-rw-r--r--drivers/media/i2c/soc_camera/mt9m001.c52
-rw-r--r--drivers/media/i2c/soc_camera/mt9m111.c36
-rw-r--r--drivers/media/i2c/soc_camera/mt9t031.c36
-rw-r--r--drivers/media/i2c/soc_camera/mt9t112.c45
-rw-r--r--drivers/media/i2c/soc_camera/mt9v022.c45
-rw-r--r--drivers/media/i2c/soc_camera/ov2640.c29
-rw-r--r--drivers/media/i2c/soc_camera/ov5642.c31
-rw-r--r--drivers/media/i2c/soc_camera/ov6650.c30
-rw-r--r--drivers/media/i2c/soc_camera/ov772x.c36
-rw-r--r--drivers/media/i2c/soc_camera/ov9640.c27
-rw-r--r--drivers/media/i2c/soc_camera/ov9740.c29
-rw-r--r--drivers/media/i2c/soc_camera/rj54n1cb0c.c39
-rw-r--r--drivers/media/i2c/soc_camera/tw9910.c30
-rw-r--r--drivers/media/i2c/ths7303.c3
-rw-r--r--drivers/media/i2c/tvaudio.c238
-rw-r--r--drivers/media/i2c/tvp514x.c4
-rw-r--r--drivers/media/i2c/tvp5150.c7
-rw-r--r--drivers/media/i2c/tvp7002.c18
-rw-r--r--drivers/media/parport/Kconfig1
-rw-r--r--drivers/media/parport/bw-qcam.c165
-rw-r--r--drivers/media/pci/bt8xx/Makefile1
-rw-r--r--drivers/media/pci/bt8xx/bttv-driver.c6
-rw-r--r--drivers/media/pci/bt8xx/bttv-i2c.c5
-rw-r--r--drivers/media/pci/bt8xx/dst_ca.c4
-rw-r--r--drivers/media/pci/cx18/cx18-alsa-main.c2
-rw-r--r--drivers/media/pci/cx18/cx18-alsa-pcm.h2
-rw-r--r--drivers/media/pci/cx18/cx18-i2c.c9
-rw-r--r--drivers/media/pci/cx18/cx18-vbi.c2
-rw-r--r--drivers/media/pci/cx23885/Kconfig3
-rw-r--r--drivers/media/pci/cx23885/Makefile1
-rw-r--r--drivers/media/pci/cx23885/cx23885-cards.c114
-rw-r--r--drivers/media/pci/cx23885/cx23885-core.c2
-rw-r--r--drivers/media/pci/cx23885/cx23885-dvb.c66
-rw-r--r--drivers/media/pci/cx23885/cx23885-input.c9
-rw-r--r--drivers/media/pci/cx23885/cx23885-video.c20
-rw-r--r--drivers/media/pci/cx23885/cx23885.h2
-rw-r--r--drivers/media/pci/cx23885/cx23888-ir.c6
-rw-r--r--drivers/media/pci/cx25821/Makefile1
-rw-r--r--drivers/media/pci/cx25821/cx25821-video.c2
-rw-r--r--drivers/media/pci/cx88/Kconfig2
-rw-r--r--drivers/media/pci/cx88/Makefile1
-rw-r--r--drivers/media/pci/cx88/cx88-cards.c2
-rw-r--r--drivers/media/pci/cx88/cx88-core.c2
-rw-r--r--drivers/media/pci/cx88/cx88-dvb.c15
-rw-r--r--drivers/media/pci/cx88/cx88-i2c.c3
-rw-r--r--drivers/media/pci/cx88/cx88-vp3054-i2c.c3
-rw-r--r--drivers/media/pci/cx88/cx88-vp3054-i2c.h2
-rw-r--r--drivers/media/pci/cx88/cx88.h10
-rw-r--r--drivers/media/pci/dm1105/Kconfig1
-rw-r--r--drivers/media/pci/dm1105/dm1105.c11
-rw-r--r--drivers/media/pci/ivtv/ivtv-alsa-main.c2
-rw-r--r--drivers/media/pci/ivtv/ivtv-alsa-pcm.h2
-rw-r--r--drivers/media/pci/ivtv/ivtv-driver.c2
-rw-r--r--drivers/media/pci/ivtv/ivtv-i2c.c14
-rw-r--r--drivers/media/pci/ivtv/ivtv-vbi.c4
-rw-r--r--drivers/media/pci/mantis/mantis_ca.c5
-rw-r--r--drivers/media/pci/meye/meye.c286
-rw-r--r--drivers/media/pci/meye/meye.h2
-rw-r--r--drivers/media/pci/ngene/ngene-cards.c9
-rw-r--r--drivers/media/pci/saa7134/saa7134-cards.c17
-rw-r--r--drivers/media/pci/saa7134/saa7134-core.c2
-rw-r--r--drivers/media/pci/saa7134/saa7134-dvb.c3
-rw-r--r--drivers/media/pci/saa7134/saa7134-video.c13
-rw-r--r--drivers/media/pci/saa7134/saa7134.h7
-rw-r--r--drivers/media/pci/saa7164/saa7164-encoder.c2
-rw-r--r--drivers/media/pci/sta2x11/Kconfig2
-rw-r--r--drivers/media/pci/sta2x11/sta2x11_vip.c1073
-rw-r--r--drivers/media/pci/ttpci/Kconfig5
-rw-r--r--drivers/media/pci/ttpci/av7110.c12
-rw-r--r--drivers/media/pci/ttpci/av7110.h2
-rw-r--r--drivers/media/pci/ttpci/av7110_av.c8
-rw-r--r--drivers/media/pci/ttpci/av7110_ca.c24
-rw-r--r--drivers/media/pci/zoran/zoran_card.c3
-rw-r--r--drivers/media/pci/zoran/zoran_device.c4
-rw-r--r--drivers/media/pci/zoran/zoran_driver.c2
-rw-r--r--drivers/media/pci/zoran/zoran_procfs.c2
-rw-r--r--drivers/media/platform/Kconfig11
-rw-r--r--drivers/media/platform/Makefile2
-rw-r--r--drivers/media/platform/blackfin/Kconfig7
-rw-r--r--drivers/media/platform/blackfin/Makefile4
-rw-r--r--drivers/media/platform/blackfin/bfin_capture.c189
-rw-r--r--drivers/media/platform/blackfin/ppi.c90
-rw-r--r--drivers/media/platform/coda.c32
-rw-r--r--drivers/media/platform/davinci/Kconfig22
-rw-r--r--drivers/media/platform/davinci/Makefile4
-rw-r--r--drivers/media/platform/davinci/dm355_ccdc.c2
-rw-r--r--drivers/media/platform/davinci/vpbe.c12
-rw-r--r--drivers/media/platform/davinci/vpbe_display.c9
-rw-r--r--drivers/media/platform/davinci/vpbe_osd.c35
-rw-r--r--drivers/media/platform/davinci/vpbe_venc.c65
-rw-r--r--drivers/media/platform/davinci/vpfe_capture.c5
-rw-r--r--drivers/media/platform/davinci/vpif_capture.c2
-rw-r--r--drivers/media/platform/davinci/vpif_display.c6
-rw-r--r--drivers/media/platform/davinci/vpss.c71
-rw-r--r--drivers/media/platform/exynos-gsc/gsc-core.c48
-rw-r--r--drivers/media/platform/exynos-gsc/gsc-core.h5
-rw-r--r--drivers/media/platform/exynos-gsc/gsc-m2m.c34
-rw-r--r--drivers/media/platform/exynos-gsc/gsc-regs.c6
-rw-r--r--drivers/media/platform/fsl-viu.c2
-rw-r--r--drivers/media/platform/m2m-deinterlace.c6
-rw-r--r--drivers/media/platform/marvell-ccic/mcam-core.c84
-rw-r--r--drivers/media/platform/marvell-ccic/mcam-core.h17
-rw-r--r--drivers/media/platform/omap/Kconfig2
-rw-r--r--drivers/media/platform/omap/omap_vout.c36
-rw-r--r--drivers/media/platform/omap24xxcam.c2
-rw-r--r--drivers/media/platform/omap3isp/isp.c92
-rw-r--r--drivers/media/platform/omap3isp/isp.h8
-rw-r--r--drivers/media/platform/omap3isp/ispccp2.c8
-rw-r--r--drivers/media/platform/omap3isp/ispcsiphy.c13
-rw-r--r--drivers/media/platform/omap3isp/isph3a_aewb.c28
-rw-r--r--drivers/media/platform/omap3isp/isph3a_af.c28
-rw-r--r--drivers/media/platform/omap3isp/isphist.c21
-rw-r--r--drivers/media/platform/omap3isp/isppreview.c40
-rw-r--r--drivers/media/platform/omap3isp/ispqueue.c5
-rw-r--r--drivers/media/platform/s3c-camif/camif-core.c1
-rw-r--r--drivers/media/platform/s5p-fimc/fimc-capture.c190
-rw-r--r--drivers/media/platform/s5p-fimc/fimc-core.c165
-rw-r--r--drivers/media/platform/s5p-fimc/fimc-core.h17
-rw-r--r--drivers/media/platform/s5p-fimc/fimc-lite-reg.c16
-rw-r--r--drivers/media/platform/s5p-fimc/fimc-lite-reg.h4
-rw-r--r--drivers/media/platform/s5p-fimc/fimc-lite.c192
-rw-r--r--drivers/media/platform/s5p-fimc/fimc-lite.h9
-rw-r--r--drivers/media/platform/s5p-fimc/fimc-m2m.c136
-rw-r--r--drivers/media/platform/s5p-fimc/fimc-mdevice.c400
-rw-r--r--drivers/media/platform/s5p-fimc/fimc-mdevice.h14
-rw-r--r--drivers/media/platform/s5p-fimc/fimc-reg.c82
-rw-r--r--drivers/media/platform/s5p-fimc/fimc-reg.h10
-rw-r--r--drivers/media/platform/s5p-fimc/mipi-csis.c101
-rw-r--r--drivers/media/platform/s5p-g2d/g2d-hw.c16
-rw-r--r--drivers/media/platform/s5p-g2d/g2d-regs.h7
-rw-r--r--drivers/media/platform/s5p-g2d/g2d.c35
-rw-r--r--drivers/media/platform/s5p-g2d/g2d.h17
-rw-r--r--drivers/media/platform/s5p-jpeg/jpeg-core.h2
-rw-r--r--drivers/media/platform/s5p-mfc/s5p_mfc.c165
-rw-r--r--drivers/media/platform/s5p-mfc/s5p_mfc_common.h31
-rw-r--r--drivers/media/platform/s5p-mfc/s5p_mfc_ctrl.c149
-rw-r--r--drivers/media/platform/s5p-mfc/s5p_mfc_ctrl.h3
-rw-r--r--drivers/media/platform/s5p-mfc/s5p_mfc_dec.c15
-rw-r--r--drivers/media/platform/s5p-mfc/s5p_mfc_enc.c2
-rw-r--r--drivers/media/platform/s5p-mfc/s5p_mfc_opr.c30
-rw-r--r--drivers/media/platform/s5p-mfc/s5p_mfc_opr.h5
-rw-r--r--drivers/media/platform/s5p-mfc/s5p_mfc_opr_v5.c197
-rw-r--r--drivers/media/platform/s5p-mfc/s5p_mfc_opr_v6.c148
-rw-r--r--drivers/media/platform/s5p-mfc/s5p_mfc_pm.c2
-rw-r--r--drivers/media/platform/s5p-tv/hdmi_drv.c18
-rw-r--r--drivers/media/platform/s5p-tv/hdmiphy_drv.c2
-rw-r--r--drivers/media/platform/s5p-tv/mixer.h1
-rw-r--r--drivers/media/platform/s5p-tv/mixer_drv.c14
-rw-r--r--drivers/media/platform/s5p-tv/mixer_reg.c6
-rw-r--r--drivers/media/platform/s5p-tv/mixer_video.c19
-rw-r--r--drivers/media/platform/s5p-tv/sdo_drv.c29
-rw-r--r--drivers/media/platform/s5p-tv/sii9234_drv.c6
-rw-r--r--drivers/media/platform/sh_veu.c1266
-rw-r--r--drivers/media/platform/sh_vou.c123
-rw-r--r--drivers/media/platform/soc_camera/Kconfig7
-rw-r--r--drivers/media/platform/soc_camera/atmel-isi.c6
-rw-r--r--drivers/media/platform/soc_camera/mx1_camera.c5
-rw-r--r--drivers/media/platform/soc_camera/mx2_camera.c531
-rw-r--r--drivers/media/platform/soc_camera/mx3_camera.c6
-rw-r--r--drivers/media/platform/soc_camera/omap1_camera.c6
-rw-r--r--drivers/media/platform/soc_camera/pxa_camera.c73
-rw-r--r--drivers/media/platform/soc_camera/sh_mobile_ceu_camera.c44
-rw-r--r--drivers/media/platform/soc_camera/sh_mobile_csi2.c23
-rw-r--r--drivers/media/platform/soc_camera/soc_camera.c172
-rw-r--r--drivers/media/platform/soc_camera/soc_camera_platform.c6
-rw-r--r--drivers/media/platform/soc_camera/soc_mediabus.c6
-rw-r--r--drivers/media/platform/timblogiw.c2
-rw-r--r--drivers/media/platform/via-camera.c60
-rw-r--r--drivers/media/platform/vino.c11
-rw-r--r--drivers/media/platform/vivi.c224
-rw-r--r--drivers/media/radio/Kconfig14
-rw-r--r--drivers/media/radio/Makefile1
-rw-r--r--drivers/media/radio/radio-ma901.c460
-rw-r--r--drivers/media/radio/radio-miropcm20.c173
-rw-r--r--drivers/media/radio/radio-wl1273.c3
-rw-r--r--drivers/media/radio/si470x/radio-si470x.h4
-rw-r--r--drivers/media/radio/wl128x/Kconfig2
-rw-r--r--drivers/media/radio/wl128x/fmdrv_common.c3
-rw-r--r--drivers/media/rc/Kconfig2
-rw-r--r--drivers/media/rc/ati_remote.c27
-rw-r--r--drivers/media/rc/ene_ir.c28
-rw-r--r--drivers/media/rc/fintek-cir.c24
-rw-r--r--drivers/media/rc/gpio-ir-recv.c55
-rw-r--r--drivers/media/rc/iguanair.c26
-rw-r--r--drivers/media/rc/imon.c4
-rw-r--r--drivers/media/rc/ir-raw.c17
-rw-r--r--drivers/media/rc/ite-cir.c26
-rw-r--r--drivers/media/rc/keymaps/Makefile1
-rw-r--r--drivers/media/rc/keymaps/rc-total-media-in-hand-02.c86
-rw-r--r--drivers/media/rc/lirc_dev.c19
-rw-r--r--drivers/media/rc/mceusb.c37
-rw-r--r--drivers/media/rc/nuvoton-cir.c41
-rw-r--r--drivers/media/rc/rc-core-priv.h16
-rw-r--r--drivers/media/rc/rc-main.c7
-rw-r--r--drivers/media/rc/redrat3.c18
-rw-r--r--drivers/media/rc/ttusbir.c10
-rw-r--r--drivers/media/rc/winbond-cir.c41
-rw-r--r--drivers/media/tuners/fc0011.c19
-rw-r--r--drivers/media/tuners/fc0012-priv.h13
-rw-r--r--drivers/media/tuners/fc0012.c113
-rw-r--r--drivers/media/tuners/fc0012.h32
-rw-r--r--drivers/media/tuners/mt2060.h2
-rw-r--r--drivers/media/tuners/mt2063.h2
-rw-r--r--drivers/media/tuners/mt20xx.h2
-rw-r--r--drivers/media/tuners/mt2131.h2
-rw-r--r--drivers/media/tuners/mt2266.h2
-rw-r--r--drivers/media/tuners/mxl5007t.h2
-rw-r--r--drivers/media/tuners/qt1010.h2
-rw-r--r--drivers/media/tuners/tda18212.c6
-rw-r--r--drivers/media/tuners/tda18218.c6
-rw-r--r--drivers/media/tuners/tda18271-fe.c2
-rw-r--r--drivers/media/tuners/tda18271-maps.c6
-rw-r--r--drivers/media/tuners/tda18271.h2
-rw-r--r--drivers/media/tuners/tda827x.h2
-rw-r--r--drivers/media/tuners/tda8290.h2
-rw-r--r--drivers/media/tuners/tda9887.h2
-rw-r--r--drivers/media/tuners/tea5761.h2
-rw-r--r--drivers/media/tuners/tea5767.h2
-rw-r--r--drivers/media/tuners/tuner-simple.h2
-rw-r--r--drivers/media/tuners/tuner-xc2028.c2
-rw-r--r--drivers/media/tuners/tuner-xc2028.h2
-rw-r--r--drivers/media/tuners/xc4000.c2
-rw-r--r--drivers/media/tuners/xc4000.h2
-rw-r--r--drivers/media/tuners/xc5000.c1
-rw-r--r--drivers/media/usb/Kconfig2
-rw-r--r--drivers/media/usb/au0828/Kconfig17
-rw-r--r--drivers/media/usb/au0828/Makefile6
-rw-r--r--drivers/media/usb/au0828/au0828-cards.c24
-rw-r--r--drivers/media/usb/au0828/au0828-core.c13
-rw-r--r--drivers/media/usb/au0828/au0828-i2c.c13
-rw-r--r--drivers/media/usb/au0828/au0828-video.c4
-rw-r--r--drivers/media/usb/au0828/au0828.h2
-rw-r--r--drivers/media/usb/cpia2/cpia2_usb.c2
-rw-r--r--drivers/media/usb/cpia2/cpia2_v4l.c5
-rw-r--r--drivers/media/usb/cx231xx/cx231xx-417.c4
-rw-r--r--drivers/media/usb/cx231xx/cx231xx-cards.c31
-rw-r--r--drivers/media/usb/cx231xx/cx231xx-vbi.c2
-rw-r--r--drivers/media/usb/cx231xx/cx231xx-video.c6
-rw-r--r--drivers/media/usb/cx231xx/cx231xx.h1
-rw-r--r--drivers/media/usb/dvb-usb-v2/Kconfig4
-rw-r--r--drivers/media/usb/dvb-usb-v2/af9015.c4
-rw-r--r--drivers/media/usb/dvb-usb-v2/af9035.c289
-rw-r--r--drivers/media/usb/dvb-usb-v2/af9035.h3
-rw-r--r--drivers/media/usb/dvb-usb-v2/anysee.c4
-rw-r--r--drivers/media/usb/dvb-usb-v2/az6007.c26
-rw-r--r--drivers/media/usb/dvb-usb-v2/dvb_usb.h3
-rw-r--r--drivers/media/usb/dvb-usb-v2/dvb_usb_core.c15
-rw-r--r--drivers/media/usb/dvb-usb-v2/it913x.c54
-rw-r--r--drivers/media/usb/dvb-usb-v2/lmedm04.c38
-rw-r--r--drivers/media/usb/dvb-usb-v2/rtl28xxu.c30
-rw-r--r--drivers/media/usb/dvb-usb/Kconfig8
-rw-r--r--drivers/media/usb/dvb-usb/dib0700_core.c5
-rw-r--r--drivers/media/usb/dvb-usb/dvb-usb-init.c60
-rw-r--r--drivers/media/usb/dvb-usb/dw2102.c179
-rw-r--r--drivers/media/usb/dvb-usb/friio-fe.c5
-rw-r--r--drivers/media/usb/dvb-usb/m920x.c277
-rw-r--r--drivers/media/usb/dvb-usb/ttusb2.c8
-rw-r--r--drivers/media/usb/em28xx/Kconfig8
-rw-r--r--drivers/media/usb/em28xx/em28xx-cards.c270
-rw-r--r--drivers/media/usb/em28xx/em28xx-core.c296
-rw-r--r--drivers/media/usb/em28xx/em28xx-dvb.c96
-rw-r--r--drivers/media/usb/em28xx/em28xx-i2c.c293
-rw-r--r--drivers/media/usb/em28xx/em28xx-input.c359
-rw-r--r--drivers/media/usb/em28xx/em28xx-reg.h5
-rw-r--r--drivers/media/usb/em28xx/em28xx-vbi.c123
-rw-r--r--drivers/media/usb/em28xx/em28xx-video.c1689
-rw-r--r--drivers/media/usb/em28xx/em28xx.h149
-rw-r--r--drivers/media/usb/gspca/cpia1.c6
-rw-r--r--drivers/media/usb/gspca/gspca.c10
-rw-r--r--drivers/media/usb/gspca/gspca.h6
-rw-r--r--drivers/media/usb/gspca/jl2005bcd.c18
-rw-r--r--drivers/media/usb/gspca/konica.c6
-rw-r--r--drivers/media/usb/gspca/ov519.c6
-rw-r--r--drivers/media/usb/gspca/pac207.c36
-rw-r--r--drivers/media/usb/gspca/pac7302.c4
-rw-r--r--drivers/media/usb/gspca/pac7311.c4
-rw-r--r--drivers/media/usb/gspca/se401.c4
-rw-r--r--drivers/media/usb/gspca/sn9c20x.c4
-rw-r--r--drivers/media/usb/gspca/sonixb.c6
-rw-r--r--drivers/media/usb/gspca/sonixj.c4
-rw-r--r--drivers/media/usb/gspca/spca561.c6
-rw-r--r--drivers/media/usb/gspca/stv06xx/stv06xx.c4
-rw-r--r--drivers/media/usb/gspca/stv06xx/stv06xx_vv6410.c17
-rw-r--r--drivers/media/usb/gspca/t613.c8
-rw-r--r--drivers/media/usb/gspca/xirlink_cit.c8
-rw-r--r--drivers/media/usb/gspca/zc3xx.c4
-rw-r--r--drivers/media/usb/hdpvr/hdpvr-core.c6
-rw-r--r--drivers/media/usb/hdpvr/hdpvr-i2c.c5
-rw-r--r--drivers/media/usb/pvrusb2/pvrusb2-encoder.c3
-rw-r--r--drivers/media/usb/pvrusb2/pvrusb2-i2c-core.c4
-rw-r--r--drivers/media/usb/pvrusb2/pvrusb2-v4l2.c2
-rw-r--r--drivers/media/usb/pwc/pwc-if.c5
-rw-r--r--drivers/media/usb/pwc/pwc-v4l.c7
-rw-r--r--drivers/media/usb/s2255/s2255drv.c6
-rw-r--r--drivers/media/usb/sn9c102/sn9c102_core.c9
-rw-r--r--drivers/media/usb/stk1160/stk1160-video.c4
-rw-r--r--drivers/media/usb/stkwebcam/stk-webcam.c59
-rw-r--r--drivers/media/usb/tlg2300/pd-video.c2
-rw-r--r--drivers/media/usb/tm6000/tm6000-core.c9
-rw-r--r--drivers/media/usb/tm6000/tm6000-dvb.c4
-rw-r--r--drivers/media/usb/tm6000/tm6000-video.c542
-rw-r--r--drivers/media/usb/tm6000/tm6000.h10
-rw-r--r--drivers/media/usb/ttusb-budget/dvb-ttusb-budget.c7
-rw-r--r--drivers/media/usb/usbvision/usbvision-core.c2
-rw-r--r--drivers/media/usb/usbvision/usbvision-i2c.c3
-rw-r--r--drivers/media/usb/usbvision/usbvision-video.c5
-rw-r--r--drivers/media/usb/uvc/uvc_ctrl.c2
-rw-r--r--drivers/media/usb/uvc/uvc_queue.c16
-rw-r--r--drivers/media/usb/uvc/uvc_v4l2.c8
-rw-r--r--drivers/media/usb/zr364xx/zr364xx.c6
-rw-r--r--drivers/media/v4l2-core/Kconfig11
-rw-r--r--drivers/media/v4l2-core/Makefile3
-rw-r--r--drivers/media/v4l2-core/tuner-core.c17
-rw-r--r--drivers/media/v4l2-core/v4l2-common.c14
-rw-r--r--drivers/media/v4l2-core/v4l2-ctrls.c179
-rw-r--r--drivers/media/v4l2-core/v4l2-dev.c16
-rw-r--r--drivers/media/v4l2-core/v4l2-device.c32
-rw-r--r--drivers/media/v4l2-core/v4l2-event.c7
-rw-r--r--drivers/media/v4l2-core/v4l2-mem2mem.c4
-rw-r--r--drivers/media/v4l2-core/videobuf-core.c2
-rw-r--r--drivers/media/v4l2-core/videobuf2-core.c15
-rw-r--r--drivers/memstick/core/memstick.c21
-rw-r--r--drivers/memstick/core/mspro_block.c17
-rw-r--r--drivers/memstick/host/r592.c3
-rw-r--r--drivers/memstick/host/rtsx_pci_ms.c7
-rw-r--r--drivers/mfd/88pm800.c10
-rw-r--r--drivers/mfd/88pm805.c4
-rw-r--r--drivers/mfd/88pm80x.c22
-rw-r--r--drivers/mfd/Kconfig72
-rw-r--r--drivers/mfd/Makefile2
-rw-r--r--drivers/mfd/ab8500-core.c61
-rw-r--r--drivers/mfd/ab8500-debugfs.c1248
-rw-r--r--drivers/mfd/ab8500-gpadc.c90
-rw-r--r--drivers/mfd/ab8500-sysctrl.c92
-rw-r--r--drivers/mfd/abx500-core.c16
-rw-r--r--drivers/mfd/arizona-core.c55
-rw-r--r--drivers/mfd/da9052-i2c.c3
-rw-r--r--drivers/mfd/db8500-prcmu.c160
-rw-r--r--drivers/mfd/lpc_ich.c144
-rw-r--r--drivers/mfd/lpc_sch.c147
-rw-r--r--drivers/mfd/max8925-core.c89
-rw-r--r--drivers/mfd/max8925-i2c.c36
-rw-r--r--drivers/mfd/omap-usb-host.c558
-rw-r--r--drivers/mfd/omap-usb-tll.c243
-rw-r--r--drivers/mfd/palmas.c14
-rw-r--r--drivers/mfd/rtl8411.c16
-rw-r--r--drivers/mfd/rts5209.c8
-rw-r--r--drivers/mfd/rts5227.c234
-rw-r--r--drivers/mfd/rts5229.c8
-rw-r--r--drivers/mfd/rtsx_pcr.c111
-rw-r--r--drivers/mfd/rtsx_pcr.h4
-rw-r--r--drivers/mfd/syscon.c1
-rw-r--r--drivers/mfd/tps6507x.c9
-rw-r--r--drivers/mfd/tps65090.c47
-rw-r--r--drivers/mfd/twl-core.c370
-rw-r--r--drivers/mfd/vexpress-sysreg.c85
-rw-r--r--drivers/mfd/wm5102-tables.c140
-rw-r--r--drivers/mfd/wm8994-core.c7
-rw-r--r--drivers/misc/Kconfig14
-rw-r--r--drivers/misc/Makefile3
-rw-r--r--drivers/misc/c2port/core.c22
-rw-r--r--drivers/misc/carma/carma-fpga-program.c2
-rw-r--r--drivers/misc/carma/carma-fpga.c6
-rw-r--r--drivers/misc/cb710/Kconfig2
-rw-r--r--drivers/misc/eeprom/Kconfig11
-rw-r--r--drivers/misc/kgdbts.c2
-rw-r--r--drivers/misc/lattice-ecp3-config.c243
-rw-r--r--drivers/misc/mei/Kconfig15
-rw-r--r--drivers/misc/mei/Makefile6
-rw-r--r--drivers/misc/mei/amthif.c164
-rw-r--r--drivers/misc/mei/client.c729
-rw-r--r--drivers/misc/mei/client.h102
-rw-r--r--drivers/misc/mei/hbm.c669
-rw-r--r--drivers/misc/mei/hbm.h39
-rw-r--r--drivers/misc/mei/hw-me-regs.h167
-rw-r--r--drivers/misc/mei/hw-me.c576
-rw-r--r--drivers/misc/mei/hw-me.h48
-rw-r--r--drivers/misc/mei/hw.h125
-rw-r--r--drivers/misc/mei/init.c572
-rw-r--r--drivers/misc/mei/interface.c388
-rw-r--r--drivers/misc/mei/interface.h81
-rw-r--r--drivers/misc/mei/interrupt.c656
-rw-r--r--drivers/misc/mei/iorw.c366
-rw-r--r--drivers/misc/mei/main.c536
-rw-r--r--drivers/misc/mei/mei_dev.h350
-rw-r--r--drivers/misc/mei/pci-me.c396
-rw-r--r--drivers/misc/mei/wd.c77
-rw-r--r--drivers/misc/sgi-gru/grutlbpurge.c3
-rw-r--r--drivers/misc/ti-st/Kconfig2
-rw-r--r--drivers/misc/ti-st/st_core.c3
-rw-r--r--drivers/misc/tifm_core.c11
-rw-r--r--drivers/misc/vmw_vmci/Kconfig16
-rw-r--r--drivers/misc/vmw_vmci/Makefile4
-rw-r--r--drivers/misc/vmw_vmci/vmci_context.c1214
-rw-r--r--drivers/misc/vmw_vmci/vmci_context.h182
-rw-r--r--drivers/misc/vmw_vmci/vmci_datagram.c500
-rw-r--r--drivers/misc/vmw_vmci/vmci_datagram.h52
-rw-r--r--drivers/misc/vmw_vmci/vmci_doorbell.c601
-rw-r--r--drivers/misc/vmw_vmci/vmci_doorbell.h51
-rw-r--r--drivers/misc/vmw_vmci/vmci_driver.c117
-rw-r--r--drivers/misc/vmw_vmci/vmci_driver.h50
-rw-r--r--drivers/misc/vmw_vmci/vmci_event.c224
-rw-r--r--drivers/misc/vmw_vmci/vmci_event.h25
-rw-r--r--drivers/misc/vmw_vmci/vmci_guest.c759
-rw-r--r--drivers/misc/vmw_vmci/vmci_handle_array.c142
-rw-r--r--drivers/misc/vmw_vmci/vmci_handle_array.h52
-rw-r--r--drivers/misc/vmw_vmci/vmci_host.c1043
-rw-r--r--drivers/misc/vmw_vmci/vmci_queue_pair.c3425
-rw-r--r--drivers/misc/vmw_vmci/vmci_queue_pair.h191
-rw-r--r--drivers/misc/vmw_vmci/vmci_resource.c227
-rw-r--r--drivers/misc/vmw_vmci/vmci_resource.h59
-rw-r--r--drivers/misc/vmw_vmci/vmci_route.c226
-rw-r--r--drivers/misc/vmw_vmci/vmci_route.h30
-rw-r--r--drivers/mmc/card/Kconfig1
-rw-r--r--drivers/mmc/card/block.c491
-rw-r--r--drivers/mmc/card/queue.c128
-rw-r--r--drivers/mmc/card/queue.h25
-rw-r--r--drivers/mmc/card/sdio_uart.c13
-rw-r--r--drivers/mmc/core/bus.c1
-rw-r--r--drivers/mmc/core/core.c246
-rw-r--r--drivers/mmc/core/core.h6
-rw-r--r--drivers/mmc/core/host.c134
-rw-r--r--drivers/mmc/core/mmc.c46
-rw-r--r--drivers/mmc/core/mmc_ops.c1
-rw-r--r--drivers/mmc/core/sd.c44
-rw-r--r--drivers/mmc/core/sdio.c54
-rw-r--r--drivers/mmc/core/slot-gpio.c57
-rw-r--r--drivers/mmc/host/Kconfig20
-rw-r--r--drivers/mmc/host/Makefile2
-rw-r--r--drivers/mmc/host/android-goldfish.c570
-rw-r--r--drivers/mmc/host/dw_mmc-exynos.c10
-rw-r--r--drivers/mmc/host/dw_mmc.c73
-rw-r--r--drivers/mmc/host/mvsdio.c131
-rw-r--r--drivers/mmc/host/mxs-mmc.c3
-rw-r--r--drivers/mmc/host/of_mmc_spi.c2
-rw-r--r--drivers/mmc/host/rtsx_pci_sdmmc.c23
-rw-r--r--drivers/mmc/host/sdhci-bcm2835.c210
-rw-r--r--drivers/mmc/host/sdhci-esdhc-imx.c230
-rw-r--r--drivers/mmc/host/sdhci-pci.c4
-rw-r--r--drivers/mmc/host/sdhci-pltfm.c8
-rw-r--r--drivers/mmc/host/sdhci-pltfm.h2
-rw-r--r--drivers/mmc/host/sdhci-pxav2.c11
-rw-r--r--drivers/mmc/host/sdhci-pxav3.c117
-rw-r--r--drivers/mmc/host/sdhci-s3c.c8
-rw-r--r--drivers/mmc/host/sdhci-tegra.c123
-rw-r--r--drivers/mmc/host/sdhci.c448
-rw-r--r--drivers/mmc/host/sdhci.h14
-rw-r--r--drivers/mmc/host/sh_mmcif.c281
-rw-r--r--drivers/mmc/host/sh_mobile_sdhi.c96
-rw-r--r--drivers/mmc/host/tmio_mmc_pio.c87
-rw-r--r--drivers/mmc/host/wmt-sdmmc.c2
-rw-r--r--drivers/mtd/Kconfig4
-rw-r--r--drivers/mtd/ar7part.c6
-rw-r--r--drivers/mtd/bcm47xxpart.c51
-rw-r--r--drivers/mtd/chips/cfi_cmdset_0002.c217
-rw-r--r--drivers/mtd/cmdlinepart.c47
-rw-r--r--drivers/mtd/devices/Makefile4
-rw-r--r--drivers/mtd/devices/bcm47xxsflash.c55
-rw-r--r--drivers/mtd/devices/bcm47xxsflash.h15
-rw-r--r--drivers/mtd/devices/elm.c404
-rw-r--r--drivers/mtd/devices/m25p80.c100
-rw-r--r--drivers/mtd/maps/Kconfig2
-rw-r--r--drivers/mtd/maps/physmap_of.c9
-rw-r--r--drivers/mtd/maps/uclinux.c30
-rw-r--r--drivers/mtd/mtdcore.c9
-rw-r--r--drivers/mtd/nand/atmel_nand.c141
-rw-r--r--drivers/mtd/nand/bcm47xxnflash/bcm47xxnflash.h4
-rw-r--r--drivers/mtd/nand/bcm47xxnflash/main.c14
-rw-r--r--drivers/mtd/nand/bcm47xxnflash/ops_bcm4706.c4
-rw-r--r--drivers/mtd/nand/davinci_nand.c24
-rw-r--r--drivers/mtd/nand/fsl_ifc_nand.c233
-rw-r--r--drivers/mtd/nand/fsmc_nand.c23
-rw-r--r--drivers/mtd/nand/gpmi-nand/bch-regs.h22
-rw-r--r--drivers/mtd/nand/gpmi-nand/gpmi-lib.c9
-rw-r--r--drivers/mtd/nand/gpmi-nand/gpmi-nand.c63
-rw-r--r--drivers/mtd/nand/gpmi-nand/gpmi-nand.h4
-rw-r--r--drivers/mtd/nand/mxc_nand.c11
-rw-r--r--drivers/mtd/nand/nand_base.c8
-rw-r--r--drivers/mtd/nand/nand_ecc.c5
-rw-r--r--drivers/mtd/nand/nandsim.c40
-rw-r--r--drivers/mtd/nand/omap2.c587
-rw-r--r--drivers/mtd/ofpart.c7
-rw-r--r--drivers/mtd/onenand/omap2.c4
-rw-r--r--drivers/mtd/tests/mtd_nandecctest.c12
-rw-r--r--drivers/mtd/tests/mtd_oobtest.c49
-rw-r--r--drivers/mtd/tests/mtd_pagetest.c43
-rw-r--r--drivers/mtd/tests/mtd_speedtest.c9
-rw-r--r--drivers/mtd/tests/mtd_stresstest.c11
-rw-r--r--drivers/mtd/tests/mtd_subpagetest.c42
-rw-r--r--drivers/mtd/tests/mtd_torturetest.c25
-rw-r--r--drivers/mtd/ubi/cdev.c2
-rw-r--r--drivers/mtd/ubi/debug.h6
-rw-r--r--drivers/net/Kconfig6
-rw-r--r--drivers/net/Makefile1
-rw-r--r--drivers/net/caif/Kconfig2
-rw-r--r--drivers/net/caif/caif_serial.c2
-rw-r--r--drivers/net/can/Kconfig1
-rw-r--r--drivers/net/ethernet/broadcom/b44.c4
-rw-r--r--drivers/net/ethernet/broadcom/bgmac.c24
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c58
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c2
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c4
-rw-r--r--drivers/net/ethernet/cadence/Kconfig1
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c2
-rw-r--r--drivers/net/ethernet/freescale/fec.c58
-rw-r--r--drivers/net/ethernet/freescale/fec_mpc52xx.c4
-rw-r--r--drivers/net/ethernet/freescale/gianfar.c26
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c18
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_main.c8
-rw-r--r--drivers/net/ethernet/marvell/mvneta.c1
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_main.c6
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_netdev.c21
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_rx.c4
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/fw.c14
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/fw.h1
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/main.c4
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/mlx4.h34
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/mr.c186
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/resource_tracker.c63
-rw-r--r--drivers/net/ethernet/pasemi/pasemi_mac.c9
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic.h6
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_init.c7
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_hw.c10
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c10
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c2
-rw-r--r--drivers/net/ethernet/sun/Kconfig4
-rw-r--r--drivers/net/ethernet/sun/sunvnet.c3
-rw-r--r--drivers/net/ethernet/ti/davinci_cpdma.c3
-rw-r--r--drivers/net/hamradio/Kconfig4
-rw-r--r--drivers/net/hyperv/netvsc_drv.c3
-rw-r--r--drivers/net/irda/Kconfig2
-rw-r--r--drivers/net/irda/ali-ircc.c2
-rw-r--r--drivers/net/irda/irtty-sir.c2
-rw-r--r--drivers/net/macvlan.c6
-rw-r--r--drivers/net/macvtap.c21
-rw-r--r--drivers/net/ntb_netdev.c408
-rw-r--r--drivers/net/phy/Kconfig1
-rw-r--r--drivers/net/ppp/Kconfig3
-rw-r--r--drivers/net/ppp/ppp_generic.c33
-rw-r--r--drivers/net/slip/Kconfig1
-rw-r--r--drivers/net/tun.c15
-rw-r--r--drivers/net/usb/Kconfig4
-rw-r--r--drivers/net/usb/hso.c32
-rw-r--r--drivers/net/usb/smsc95xx.c36
-rw-r--r--drivers/net/virtio_net.c12
-rw-r--r--drivers/net/vmxnet3/vmxnet3_drv.c4
-rw-r--r--drivers/net/vmxnet3/vmxnet3_ethtool.c6
-rw-r--r--drivers/net/vxlan.c15
-rw-r--r--drivers/net/wan/Kconfig2
-rw-r--r--drivers/net/wan/cosa.c4
-rw-r--r--drivers/net/wireless/b43/main.c3
-rw-r--r--drivers/net/wireless/brcm80211/brcmfmac/wl_cfg80211.c4
-rw-r--r--drivers/net/wireless/mwifiex/pcie.c21
-rw-r--r--drivers/net/wireless/mwifiex/sdio.c9
-rw-r--r--drivers/net/wireless/ray_cs.c2
-rw-r--r--drivers/net/wireless/zd1201.c7
-rw-r--r--drivers/ntb/Kconfig13
-rw-r--r--drivers/ntb/Makefile3
-rw-r--r--drivers/ntb/ntb_hw.c1141
-rw-r--r--drivers/ntb/ntb_hw.h181
-rw-r--r--drivers/ntb/ntb_regs.h139
-rw-r--r--drivers/ntb/ntb_transport.c1441
-rw-r--r--drivers/of/address.c2
-rw-r--r--drivers/of/platform.c1
-rw-r--r--drivers/oprofile/oprofilefs.c16
-rw-r--r--drivers/parisc/Kconfig1
-rw-r--r--drivers/parisc/dino.c13
-rw-r--r--drivers/parisc/hppb.c6
-rw-r--r--drivers/parisc/led.c2
-rw-r--r--drivers/parisc/pdc_stable.c6
-rw-r--r--drivers/parisc/superio.c2
-rw-r--r--drivers/parport/Kconfig2
-rw-r--r--drivers/parport/parport_serial.c21
-rw-r--r--drivers/pci/access.c6
-rw-r--r--drivers/pci/bus.c81
-rw-r--r--drivers/pci/hotplug/acpiphp.h14
-rw-r--r--drivers/pci/hotplug/acpiphp_core.c23
-rw-r--r--drivers/pci/hotplug/acpiphp_glue.c266
-rw-r--r--drivers/pci/hotplug/cpci_hotplug_pci.c29
-rw-r--r--drivers/pci/hotplug/cpqphp_ctrl.c57
-rw-r--r--drivers/pci/hotplug/pciehp_core.c2
-rw-r--r--drivers/pci/hotplug/pciehp_pci.c44
-rw-r--r--drivers/pci/hotplug/s390_pci_hpc.c60
-rw-r--r--drivers/pci/hotplug/sgi_hotplug.c63
-rw-r--r--drivers/pci/hotplug/shpchp_pci.c36
-rw-r--r--drivers/pci/iov.c10
-rw-r--r--drivers/pci/pci-acpi.c43
-rw-r--r--drivers/pci/pci-driver.c12
-rw-r--r--drivers/pci/pci.c66
-rw-r--r--drivers/pci/pci.h4
-rw-r--r--drivers/pci/pcie/aspm.c8
-rw-r--r--drivers/pci/pcie/portdrv_core.c2
-rw-r--r--drivers/pci/probe.c103
-rw-r--r--drivers/pci/proc.c10
-rw-r--r--drivers/pci/remove.c4
-rw-r--r--drivers/pci/search.c10
-rw-r--r--drivers/pci/setup-bus.c2
-rw-r--r--drivers/pcmcia/cs.c37
-rw-r--r--drivers/pcmcia/i82092.c8
-rw-r--r--drivers/pcmcia/rsrc_nonstatic.c6
-rw-r--r--drivers/pcmcia/vrc4171_card.c1
-rw-r--r--drivers/pinctrl/Kconfig2
-rw-r--r--drivers/pinctrl/Makefile2
-rw-r--r--drivers/pinctrl/pinctrl-nomadik.c4
-rw-r--r--drivers/pinctrl/pinctrl-samsung.c4
-rw-r--r--drivers/pinctrl/sh-pfc/Kconfig116
-rw-r--r--drivers/pinctrl/sh-pfc/Makefile21
-rw-r--r--drivers/pinctrl/sh-pfc/core.c (renamed from drivers/sh/pfc/core.c)355
-rw-r--r--drivers/pinctrl/sh-pfc/core.h72
-rw-r--r--drivers/pinctrl/sh-pfc/gpio.c (renamed from drivers/sh/pfc/gpio.c)114
-rw-r--r--drivers/pinctrl/sh-pfc/pfc-r8a7740.c2612
-rw-r--r--drivers/pinctrl/sh-pfc/pfc-r8a7779.c2624
-rw-r--r--drivers/pinctrl/sh-pfc/pfc-sh7203.c1592
-rw-r--r--drivers/pinctrl/sh-pfc/pfc-sh7264.c2131
-rw-r--r--drivers/pinctrl/sh-pfc/pfc-sh7269.c2834
-rw-r--r--drivers/pinctrl/sh-pfc/pfc-sh7372.c1658
-rw-r--r--drivers/pinctrl/sh-pfc/pfc-sh73a0.c2798
-rw-r--r--drivers/pinctrl/sh-pfc/pfc-sh7720.c1236
-rw-r--r--drivers/pinctrl/sh-pfc/pfc-sh7722.c1779
-rw-r--r--drivers/pinctrl/sh-pfc/pfc-sh7723.c1903
-rw-r--r--drivers/pinctrl/sh-pfc/pfc-sh7724.c2225
-rw-r--r--drivers/pinctrl/sh-pfc/pfc-sh7734.c2475
-rw-r--r--drivers/pinctrl/sh-pfc/pfc-sh7757.c2282
-rw-r--r--drivers/pinctrl/sh-pfc/pfc-sh7785.c1304
-rw-r--r--drivers/pinctrl/sh-pfc/pfc-sh7786.c837
-rw-r--r--drivers/pinctrl/sh-pfc/pfc-shx3.c582
-rw-r--r--drivers/pinctrl/sh-pfc/pinctrl.c (renamed from drivers/sh/pfc/pinctrl.c)164
-rw-r--r--drivers/pinctrl/sh-pfc/sh_pfc.h195
-rw-r--r--drivers/platform/x86/Kconfig15
-rw-r--r--drivers/platform/x86/Makefile1
-rw-r--r--drivers/platform/x86/acer-wmi.c21
-rw-r--r--drivers/platform/x86/asus-laptop.c85
-rw-r--r--drivers/platform/x86/asus-nb-wmi.c76
-rw-r--r--drivers/platform/x86/asus-wmi.c108
-rw-r--r--drivers/platform/x86/asus-wmi.h9
-rw-r--r--drivers/platform/x86/chromeos_laptop.c371
-rw-r--r--drivers/platform/x86/eeepc-wmi.c2
-rw-r--r--drivers/platform/x86/hp-wmi.c117
-rw-r--r--drivers/platform/x86/msi-laptop.c374
-rw-r--r--drivers/platform/x86/msi-wmi.c224
-rw-r--r--drivers/platform/x86/sony-laptop.c147
-rw-r--r--drivers/platform/x86/thinkpad_acpi.c19
-rw-r--r--drivers/platform/x86/toshiba_acpi.c8
-rw-r--r--drivers/pnp/isapnp/proc.c4
-rw-r--r--drivers/pnp/pnpbios/proc.c2
-rw-r--r--drivers/power/bq2415x_charger.c11
-rw-r--r--drivers/power/bq27x00_battery.c9
-rw-r--r--drivers/power/ds2782_battery.c9
-rw-r--r--drivers/pps/clients/Kconfig2
-rw-r--r--drivers/pps/clients/pps-gpio.c6
-rw-r--r--drivers/pps/clients/pps-ldisc.c30
-rw-r--r--drivers/pps/kapi.c2
-rw-r--r--drivers/pps/pps.c83
-rw-r--r--drivers/pwm/Kconfig18
-rw-r--r--drivers/pwm/Makefile1
-rw-r--r--drivers/pwm/core.c52
-rw-r--r--drivers/pwm/pwm-atmel-tcb.c445
-rw-r--r--drivers/pwm/pwm-tegra.c4
-rw-r--r--drivers/pwm/pwm-tiecap.c53
-rw-r--r--drivers/pwm/pwm-tiehrpwm.c93
-rw-r--r--drivers/pwm/pwm-twl-led.c1
-rw-r--r--drivers/pwm/pwm-twl.c10
-rw-r--r--drivers/pwm/pwm-vt8500.c87
-rw-r--r--drivers/remoteproc/Kconfig2
-rw-r--r--drivers/remoteproc/remoteproc_core.c11
-rw-r--r--drivers/remoteproc/remoteproc_virtio.c2
-rw-r--r--drivers/rpmsg/virtio_rpmsg_bus.c33
-rw-r--r--drivers/rtc/Kconfig54
-rw-r--r--drivers/rtc/Makefile6
-rw-r--r--drivers/rtc/class.c4
-rw-r--r--drivers/rtc/rtc-at91rm9200.c17
-rw-r--r--drivers/rtc/rtc-cmos.c15
-rw-r--r--drivers/rtc/rtc-coh901331.c7
-rw-r--r--drivers/rtc/rtc-da9052.c18
-rw-r--r--drivers/rtc/rtc-davinci.c28
-rw-r--r--drivers/rtc/rtc-dev.c11
-rw-r--r--drivers/rtc/rtc-ds1305.c8
-rw-r--r--drivers/rtc/rtc-ds1307.c11
-rw-r--r--drivers/rtc/rtc-ds2404.c18
-rw-r--r--drivers/rtc/rtc-efi.c10
-rw-r--r--drivers/rtc/rtc-fm3130.c33
-rw-r--r--drivers/rtc/rtc-imxdi.c4
-rw-r--r--drivers/rtc/rtc-isl12022.c2
-rw-r--r--drivers/rtc/rtc-lp8788.c338
-rw-r--r--drivers/rtc/rtc-max77686.c641
-rw-r--r--drivers/rtc/rtc-max8907.c6
-rw-r--r--drivers/rtc/rtc-max8997.c552
-rw-r--r--drivers/rtc/rtc-mpc5121.c5
-rw-r--r--drivers/rtc/rtc-palmas.c339
-rw-r--r--drivers/rtc/rtc-pcf8523.c31
-rw-r--r--drivers/rtc/rtc-pcf8563.c2
-rw-r--r--drivers/rtc/rtc-pcf8583.c4
-rw-r--r--drivers/rtc/rtc-pl031.c2
-rw-r--r--drivers/rtc/rtc-pxa.c23
-rw-r--r--drivers/rtc/rtc-rs5c313.c5
-rw-r--r--drivers/rtc/rtc-rs5c372.c10
-rw-r--r--drivers/rtc/rtc-rx4581.c314
-rw-r--r--drivers/rtc/rtc-s3c.c18
-rw-r--r--drivers/rtc/rtc-sa1100.c15
-rw-r--r--drivers/rtc/rtc-snvs.c2
-rw-r--r--drivers/rtc/rtc-stmp3xxx.c67
-rw-r--r--drivers/rtc/rtc-sun4v.c10
-rw-r--r--drivers/rtc/rtc-tps6586x.c4
-rw-r--r--drivers/rtc/rtc-tps65910.c44
-rw-r--r--drivers/rtc/rtc-tps80031.c349
-rw-r--r--drivers/rtc/rtc-twl.c6
-rw-r--r--drivers/rtc/rtc-vr41xx.c2
-rw-r--r--drivers/rtc/rtc-vt8500.c28
-rw-r--r--drivers/rtc/rtc-wm831x.c9
-rw-r--r--drivers/s390/block/dasd.c23
-rw-r--r--drivers/s390/block/dasd_3990_erp.c8
-rw-r--r--drivers/s390/block/dasd_alias.c4
-rw-r--r--drivers/s390/block/dasd_diag.c10
-rw-r--r--drivers/s390/block/dasd_eckd.c30
-rw-r--r--drivers/s390/block/dasd_eer.c2
-rw-r--r--drivers/s390/block/dasd_erp.c4
-rw-r--r--drivers/s390/block/dasd_fba.c2
-rw-r--r--drivers/s390/block/scm_blk.h41
-rw-r--r--drivers/s390/char/Kconfig8
-rw-r--r--drivers/s390/char/con3215.c12
-rw-r--r--drivers/s390/char/fs3270.c33
-rw-r--r--drivers/s390/char/keyboard.h16
-rw-r--r--drivers/s390/char/raw3270.c611
-rw-r--r--drivers/s390/char/raw3270.h12
-rw-r--r--drivers/s390/char/sclp.c4
-rw-r--r--drivers/s390/char/sclp_cmd.c10
-rw-r--r--drivers/s390/char/sclp_tty.c14
-rw-r--r--drivers/s390/char/sclp_vt220.c12
-rw-r--r--drivers/s390/char/tape_char.c8
-rw-r--r--drivers/s390/char/tty3270.c191
-rw-r--r--drivers/s390/char/vmur.c2
-rw-r--r--drivers/s390/char/zcore.c64
-rw-r--r--drivers/s390/cio/chsc.c68
-rw-r--r--drivers/s390/cio/chsc.h2
-rw-r--r--drivers/s390/cio/cio.c4
-rw-r--r--drivers/s390/cio/cmf.c6
-rw-r--r--drivers/s390/cio/css.c2
-rw-r--r--drivers/s390/cio/device.c10
-rw-r--r--drivers/s390/cio/device_fsm.c2
-rw-r--r--drivers/s390/cio/device_pgid.c123
-rw-r--r--drivers/s390/cio/io_sch.h5
-rw-r--r--drivers/s390/cio/qdio_debug.c4
-rw-r--r--drivers/s390/cio/qdio_main.c12
-rw-r--r--drivers/s390/kvm/Makefile2
-rw-r--r--drivers/s390/kvm/kvm_virtio.c40
-rw-r--r--drivers/s390/kvm/virtio_ccw.c926
-rw-r--r--drivers/s390/net/qeth_core.h2
-rw-r--r--drivers/s390/scsi/zfcp_fsf.c2
-rw-r--r--drivers/s390/scsi/zfcp_qdio.c2
-rw-r--r--drivers/sbus/char/display7seg.c2
-rw-r--r--drivers/scsi/3w-9xxx.c2
-rw-r--r--drivers/scsi/3w-sas.c2
-rw-r--r--drivers/scsi/3w-xxxx.c2
-rw-r--r--drivers/scsi/Kconfig2
-rw-r--r--drivers/scsi/aacraid/aacraid.h8
-rw-r--r--drivers/scsi/aacraid/comminit.c11
-rw-r--r--drivers/scsi/aacraid/src.c4
-rw-r--r--drivers/scsi/bfa/bfad.c2
-rw-r--r--drivers/scsi/bfa/bfad_im.c15
-rw-r--r--drivers/scsi/bnx2fc/bnx2fc.h27
-rw-r--r--drivers/scsi/bnx2fc/bnx2fc_fcoe.c277
-rw-r--r--drivers/scsi/bnx2fc/bnx2fc_hwi.c29
-rw-r--r--drivers/scsi/bnx2fc/bnx2fc_io.c8
-rw-r--r--drivers/scsi/bnx2fc/bnx2fc_tgt.c95
-rw-r--r--drivers/scsi/bnx2i/bnx2i_hwi.c2
-rw-r--r--drivers/scsi/ch.c21
-rw-r--r--drivers/scsi/csiostor/csio_hw.c15
-rw-r--r--drivers/scsi/csiostor/csio_init.c11
-rw-r--r--drivers/scsi/cxgbi/cxgb4i/cxgb4i.c1
-rw-r--r--drivers/scsi/dc395x.c2
-rw-r--r--drivers/scsi/dpt_i2o.c4
-rw-r--r--drivers/scsi/fcoe/fcoe.c266
-rw-r--r--drivers/scsi/fcoe/fcoe.h6
-rw-r--r--drivers/scsi/fcoe/fcoe_ctlr.c45
-rw-r--r--drivers/scsi/fcoe/fcoe_sysfs.c186
-rw-r--r--drivers/scsi/fcoe/fcoe_transport.c199
-rw-r--r--drivers/scsi/fcoe/libfcoe.h20
-rw-r--r--drivers/scsi/fnic/Makefile2
-rw-r--r--drivers/scsi/fnic/fnic.h60
-rw-r--r--drivers/scsi/fnic/fnic_debugfs.c314
-rw-r--r--drivers/scsi/fnic/fnic_fcs.c6
-rw-r--r--drivers/scsi/fnic/fnic_io.h6
-rw-r--r--drivers/scsi/fnic/fnic_main.c17
-rw-r--r--drivers/scsi/fnic/fnic_scsi.c721
-rw-r--r--drivers/scsi/fnic/fnic_trace.c273
-rw-r--r--drivers/scsi/fnic/fnic_trace.h90
-rw-r--r--drivers/scsi/gdth.c10
-rw-r--r--drivers/scsi/hpsa.c117
-rw-r--r--drivers/scsi/ipr.c1325
-rw-r--r--drivers/scsi/ipr.h102
-rw-r--r--drivers/scsi/libfc/fc_fcp.c6
-rw-r--r--drivers/scsi/libfc/fc_libfc.h38
-rw-r--r--drivers/scsi/libfc/fc_rport.c2
-rw-r--r--drivers/scsi/lpfc/lpfc.h17
-rw-r--r--drivers/scsi/lpfc/lpfc_bsg.c51
-rw-r--r--drivers/scsi/lpfc/lpfc_crtn.h4
-rw-r--r--drivers/scsi/lpfc/lpfc_ct.c37
-rw-r--r--drivers/scsi/lpfc/lpfc_els.c25
-rw-r--r--drivers/scsi/lpfc/lpfc_hw.h2
-rw-r--r--drivers/scsi/lpfc/lpfc_hw4.h176
-rw-r--r--drivers/scsi/lpfc/lpfc_init.c261
-rw-r--r--drivers/scsi/lpfc/lpfc_nportdisc.c10
-rw-r--r--drivers/scsi/lpfc/lpfc_scsi.c24
-rw-r--r--drivers/scsi/lpfc/lpfc_sli.c434
-rw-r--r--drivers/scsi/lpfc/lpfc_sli4.h8
-rw-r--r--drivers/scsi/lpfc/lpfc_version.h2
-rw-r--r--drivers/scsi/megaraid/megaraid_sas.h6
-rw-r--r--drivers/scsi/megaraid/megaraid_sas_base.c2
-rw-r--r--drivers/scsi/megaraid/megaraid_sas_fusion.c5
-rw-r--r--drivers/scsi/megaraid/megaraid_sas_fusion.h1
-rw-r--r--drivers/scsi/mpt2sas/mpt2sas_base.c27
-rw-r--r--drivers/scsi/mpt2sas/mpt2sas_base.h7
-rw-r--r--drivers/scsi/mpt3sas/mpt3sas_base.c5
-rw-r--r--drivers/scsi/mpt3sas/mpt3sas_config.c1
-rw-r--r--drivers/scsi/mpt3sas/mpt3sas_ctl.c3
-rw-r--r--drivers/scsi/mpt3sas/mpt3sas_scsih.c3
-rw-r--r--drivers/scsi/mpt3sas/mpt3sas_trigger_diag.c1
-rw-r--r--drivers/scsi/mvsas/mv_sas.c10
-rw-r--r--drivers/scsi/mvsas/mv_sas.h1
-rw-r--r--drivers/scsi/osd/osd_initiator.c4
-rw-r--r--drivers/scsi/pm8001/pm8001_init.c3
-rw-r--r--drivers/scsi/qla2xxx/qla_attr.c35
-rw-r--r--drivers/scsi/qla2xxx/qla_bsg.c199
-rw-r--r--drivers/scsi/qla2xxx/qla_bsg.h2
-rw-r--r--drivers/scsi/qla2xxx/qla_dbg.c17
-rw-r--r--drivers/scsi/qla2xxx/qla_dbg.h2
-rw-r--r--drivers/scsi/qla2xxx/qla_def.h54
-rw-r--r--drivers/scsi/qla2xxx/qla_dfs.c2
-rw-r--r--drivers/scsi/qla2xxx/qla_fw.h7
-rw-r--r--drivers/scsi/qla2xxx/qla_gbl.h15
-rw-r--r--drivers/scsi/qla2xxx/qla_gs.c10
-rw-r--r--drivers/scsi/qla2xxx/qla_init.c200
-rw-r--r--drivers/scsi/qla2xxx/qla_inline.h28
-rw-r--r--drivers/scsi/qla2xxx/qla_iocb.c39
-rw-r--r--drivers/scsi/qla2xxx/qla_isr.c82
-rw-r--r--drivers/scsi/qla2xxx/qla_mbx.c207
-rw-r--r--drivers/scsi/qla2xxx/qla_mid.c9
-rw-r--r--drivers/scsi/qla2xxx/qla_nx.c13
-rw-r--r--drivers/scsi/qla2xxx/qla_nx.h4
-rw-r--r--drivers/scsi/qla2xxx/qla_os.c147
-rw-r--r--drivers/scsi/qla2xxx/qla_settings.h2
-rw-r--r--drivers/scsi/qla2xxx/qla_sup.c18
-rw-r--r--drivers/scsi/qla2xxx/qla_target.c169
-rw-r--r--drivers/scsi/qla2xxx/qla_target.h19
-rw-r--r--drivers/scsi/qla2xxx/qla_version.h2
-rw-r--r--drivers/scsi/qla4xxx/ql4_83xx.c55
-rw-r--r--drivers/scsi/qla4xxx/ql4_attr.c16
-rw-r--r--drivers/scsi/qla4xxx/ql4_def.h10
-rw-r--r--drivers/scsi/qla4xxx/ql4_fw.h6
-rw-r--r--drivers/scsi/qla4xxx/ql4_glbl.h6
-rw-r--r--drivers/scsi/qla4xxx/ql4_init.c17
-rw-r--r--drivers/scsi/qla4xxx/ql4_iocb.c2
-rw-r--r--drivers/scsi/qla4xxx/ql4_isr.c68
-rw-r--r--drivers/scsi/qla4xxx/ql4_mbx.c67
-rw-r--r--drivers/scsi/qla4xxx/ql4_nx.c14
-rw-r--r--drivers/scsi/qla4xxx/ql4_os.c160
-rw-r--r--drivers/scsi/qla4xxx/ql4_version.h2
-rw-r--r--drivers/scsi/scsi_lib.c14
-rw-r--r--drivers/scsi/scsi_transport_iscsi.c12
-rw-r--r--drivers/scsi/sg.c43
-rw-r--r--drivers/scsi/sr.c46
-rw-r--r--drivers/scsi/st.c29
-rw-r--r--drivers/scsi/storvsc_drv.c149
-rw-r--r--drivers/scsi/ufs/Kconfig74
-rw-r--r--drivers/scsi/ufs/Makefile1
-rw-r--r--drivers/scsi/ufs/ufs.h44
-rw-r--r--drivers/scsi/ufs/ufshcd-pci.c211
-rw-r--r--drivers/scsi/ufs/ufshcd.c423
-rw-r--r--drivers/scsi/ufs/ufshcd.h202
-rw-r--r--drivers/scsi/ufs/ufshci.h44
-rw-r--r--drivers/sh/Kconfig1
-rw-r--r--drivers/sh/Makefile1
-rw-r--r--drivers/sh/pfc/Kconfig26
-rw-r--r--drivers/sh/pfc/Makefile3
-rw-r--r--drivers/spi/spi-tegra20-sflash.c4
-rw-r--r--drivers/spi/spi-tegra20-slink.c4
-rw-r--r--drivers/ssb/driver_chipcommon_pmu.c4
-rw-r--r--drivers/staging/Kconfig2
-rw-r--r--drivers/staging/Makefile1
-rw-r--r--drivers/staging/android/binder.c19
-rw-r--r--drivers/staging/android/binder.h2
-rw-r--r--drivers/staging/bcm/Misc.c2
-rw-r--r--drivers/staging/ccg/Kconfig2
-rw-r--r--drivers/staging/ccg/f_mass_storage.c2
-rw-r--r--drivers/staging/ccg/rndis.c2
-rw-r--r--drivers/staging/ccg/storage_common.c2
-rw-r--r--drivers/staging/ccg/u_serial.c13
-rw-r--r--drivers/staging/dgrp/Kconfig2
-rw-r--r--drivers/staging/dgrp/dgrp_net_ops.c13
-rw-r--r--drivers/staging/dgrp/dgrp_specproc.c4
-rw-r--r--drivers/staging/dgrp/dgrp_tty.c1
-rw-r--r--drivers/staging/fwserial/Kconfig2
-rw-r--r--drivers/staging/fwserial/fwserial.c51
-rw-r--r--drivers/staging/media/Kconfig2
-rw-r--r--drivers/staging/media/Makefile1
-rw-r--r--drivers/staging/media/as102/as102_usb_drv.c4
-rw-r--r--drivers/staging/media/as102/as10x_cmd_cfg.c2
-rw-r--r--drivers/staging/media/cxd2099/cxd2099.c29
-rw-r--r--drivers/staging/media/cxd2099/cxd2099.h2
-rw-r--r--drivers/staging/media/davinci_vpfe/Kconfig9
-rw-r--r--drivers/staging/media/davinci_vpfe/Makefile3
-rw-r--r--drivers/staging/media/davinci_vpfe/TODO37
-rw-r--r--drivers/staging/media/davinci_vpfe/davinci-vpfe-mc.txt154
-rw-r--r--drivers/staging/media/davinci_vpfe/davinci_vpfe_user.h1290
-rw-r--r--drivers/staging/media/davinci_vpfe/dm365_ipipe.c1863
-rw-r--r--drivers/staging/media/davinci_vpfe/dm365_ipipe.h179
-rw-r--r--drivers/staging/media/davinci_vpfe/dm365_ipipe_hw.c1048
-rw-r--r--drivers/staging/media/davinci_vpfe/dm365_ipipe_hw.h559
-rw-r--r--drivers/staging/media/davinci_vpfe/dm365_ipipeif.c1071
-rw-r--r--drivers/staging/media/davinci_vpfe/dm365_ipipeif.h233
-rw-r--r--drivers/staging/media/davinci_vpfe/dm365_ipipeif_user.h93
-rw-r--r--drivers/staging/media/davinci_vpfe/dm365_isif.c2104
-rw-r--r--drivers/staging/media/davinci_vpfe/dm365_isif.h203
-rw-r--r--drivers/staging/media/davinci_vpfe/dm365_isif_regs.h294
-rw-r--r--drivers/staging/media/davinci_vpfe/dm365_resizer.c1999
-rw-r--r--drivers/staging/media/davinci_vpfe/dm365_resizer.h244
-rw-r--r--drivers/staging/media/davinci_vpfe/vpfe.h86
-rw-r--r--drivers/staging/media/davinci_vpfe/vpfe_mc_capture.c740
-rw-r--r--drivers/staging/media/davinci_vpfe/vpfe_mc_capture.h97
-rw-r--r--drivers/staging/media/davinci_vpfe/vpfe_video.c1620
-rw-r--r--drivers/staging/media/davinci_vpfe/vpfe_video.h155
-rw-r--r--drivers/staging/media/dt3155v4l/dt3155v4l.c2
-rw-r--r--drivers/staging/media/go7007/go7007-driver.c15
-rw-r--r--drivers/staging/media/go7007/go7007-fw.c24
-rw-r--r--drivers/staging/media/go7007/go7007-i2c.c10
-rw-r--r--drivers/staging/media/go7007/go7007-usb.c5
-rw-r--r--drivers/staging/media/go7007/go7007-v4l2.c11
-rw-r--r--drivers/staging/media/go7007/s2250-board.c32
-rw-r--r--drivers/staging/media/go7007/s2250-loader.c38
-rw-r--r--drivers/staging/media/go7007/wis-saa7113.c2
-rw-r--r--drivers/staging/media/go7007/wis-sony-tuner.c86
-rw-r--r--drivers/staging/media/go7007/wis-tw2804.c24
-rw-r--r--drivers/staging/media/go7007/wis-tw9903.c12
-rw-r--r--drivers/staging/media/go7007/wis-uda1342.c7
-rw-r--r--drivers/staging/media/lirc/lirc_bt829.c15
-rw-r--r--drivers/staging/media/lirc/lirc_igorplugusb.c12
-rw-r--r--drivers/staging/media/lirc/lirc_imon.c31
-rw-r--r--drivers/staging/media/lirc/lirc_parallel.c49
-rw-r--r--drivers/staging/media/lirc/lirc_sasem.c73
-rw-r--r--drivers/staging/media/lirc/lirc_serial.c70
-rw-r--r--drivers/staging/media/lirc/lirc_sir.c36
-rw-r--r--drivers/staging/media/solo6x10/p2m.c8
-rw-r--r--drivers/staging/media/solo6x10/v4l2-enc.c4
-rw-r--r--drivers/staging/media/solo6x10/v4l2.c5
-rw-r--r--drivers/staging/nvec/TODO4
-rw-r--r--drivers/staging/nvec/nvec.c5
-rw-r--r--drivers/staging/octeon/ethernet.c2
-rw-r--r--drivers/staging/omapdrm/TODO32
-rw-r--r--drivers/staging/omapdrm/omap_drm.h123
-rw-r--r--drivers/staging/sb105x/Kconfig3
-rw-r--r--drivers/staging/serqt_usb2/serqt_usb2.c41
-rw-r--r--drivers/staging/speakup/selection.c1
-rw-r--r--drivers/staging/tidspbridge/Kconfig2
-rw-r--r--drivers/staging/usbip/usbip_common.c2
-rw-r--r--drivers/staging/vme/devices/vme_user.c8
-rw-r--r--drivers/staging/zcache/zbud.c2
-rw-r--r--drivers/staging/zsmalloc/zsmalloc-main.c2
-rw-r--r--drivers/target/iscsi/iscsi_target.c91
-rw-r--r--drivers/target/iscsi/iscsi_target.h2
-rw-r--r--drivers/target/iscsi/iscsi_target_login.c15
-rw-r--r--drivers/target/iscsi/iscsi_target_parameters.c8
-rw-r--r--drivers/target/iscsi/iscsi_target_stat.c25
-rw-r--r--drivers/target/iscsi/iscsi_target_tpg.c39
-rw-r--r--drivers/target/sbp/sbp_target.c4
-rw-r--r--drivers/target/target_core_alua.c35
-rw-r--r--drivers/target/target_core_configfs.c4
-rw-r--r--drivers/target/target_core_device.c58
-rw-r--r--drivers/target/target_core_fabric_configfs.c12
-rw-r--r--drivers/target/target_core_file.c118
-rw-r--r--drivers/target/target_core_iblock.c48
-rw-r--r--drivers/target/target_core_internal.h3
-rw-r--r--drivers/target/target_core_pr.c27
-rw-r--r--drivers/target/target_core_pscsi.c9
-rw-r--r--drivers/target/target_core_rd.c18
-rw-r--r--drivers/target/target_core_sbc.c8
-rw-r--r--drivers/target/target_core_spc.c42
-rw-r--r--drivers/target/target_core_tmr.c12
-rw-r--r--drivers/target/target_core_tpg.c10
-rw-r--r--drivers/target/target_core_transport.c19
-rw-r--r--drivers/target/tcm_fc/tfc_sess.c12
-rw-r--r--drivers/thermal/Kconfig55
-rw-r--r--drivers/thermal/Makefile10
-rw-r--r--drivers/thermal/cpu_cooling.c21
-rw-r--r--drivers/thermal/db8500_cpufreq_cooling.c5
-rw-r--r--drivers/thermal/db8500_thermal.c4
-rw-r--r--drivers/thermal/dove_thermal.c209
-rw-r--r--drivers/thermal/exynos_thermal.c211
-rw-r--r--drivers/thermal/intel_powerclamp.c795
-rw-r--r--drivers/thermal/kirkwood_thermal.c134
-rw-r--r--drivers/thermal/rcar_thermal.c490
-rw-r--r--drivers/thermal/spear_thermal.c7
-rw-r--r--drivers/thermal/step_wise.c122
-rw-r--r--drivers/thermal/thermal_sys.c122
-rw-r--r--drivers/tty/Kconfig32
-rw-r--r--drivers/tty/Makefile4
-rw-r--r--drivers/tty/amiserial.c18
-rw-r--r--drivers/tty/bfin_jtag_comm.c22
-rw-r--r--drivers/tty/cyclades.c297
-rw-r--r--drivers/tty/ehv_bytechan.c13
-rw-r--r--drivers/tty/goldfish.c328
-rw-r--r--drivers/tty/hvc/Kconfig3
-rw-r--r--drivers/tty/hvc/hvc_console.c6
-rw-r--r--drivers/tty/hvc/hvc_xen.c2
-rw-r--r--drivers/tty/hvc/hvcs.c6
-rw-r--r--drivers/tty/hvc/hvsi.c32
-rw-r--r--drivers/tty/ipwireless/hardware.c4
-rw-r--r--drivers/tty/ipwireless/tty.c12
-rw-r--r--drivers/tty/isicom.c12
-rw-r--r--drivers/tty/metag_da.c677
-rw-r--r--drivers/tty/moxa.c10
-rw-r--r--drivers/tty/mxser.c50
-rw-r--r--drivers/tty/n_gsm.c120
-rw-r--r--drivers/tty/n_tty.c59
-rw-r--r--drivers/tty/nozomi.c37
-rw-r--r--drivers/tty/pty.c28
-rw-r--r--drivers/tty/rocket.c60
-rw-r--r--drivers/tty/serial/21285.c3
-rw-r--r--drivers/tty/serial/68328serial.c19
-rw-r--r--drivers/tty/serial/8250/8250.c132
-rw-r--r--drivers/tty/serial/8250/8250.h50
-rw-r--r--drivers/tty/serial/8250/8250_dma.c216
-rw-r--r--drivers/tty/serial/8250/8250_dw.c257
-rw-r--r--drivers/tty/serial/8250/8250_early.c2
-rw-r--r--drivers/tty/serial/8250/8250_pci.c342
-rw-r--r--drivers/tty/serial/8250/Kconfig27
-rw-r--r--drivers/tty/serial/8250/Makefile1
-rw-r--r--drivers/tty/serial/Kconfig51
-rw-r--r--drivers/tty/serial/Makefile2
-rw-r--r--drivers/tty/serial/altera_jtaguart.c8
-rw-r--r--drivers/tty/serial/altera_uart.c8
-rw-r--r--drivers/tty/serial/amba-pl010.c3
-rw-r--r--drivers/tty/serial/amba-pl011.c11
-rw-r--r--drivers/tty/serial/apbuart.c3
-rw-r--r--drivers/tty/serial/ar933x_uart.c15
-rw-r--r--drivers/tty/serial/arc_uart.c104
-rw-r--r--drivers/tty/serial/atmel_serial.c9
-rw-r--r--drivers/tty/serial/bcm63xx_uart.c9
-rw-r--r--drivers/tty/serial/bfin_sport_uart.c12
-rw-r--r--drivers/tty/serial/bfin_uart.c10
-rw-r--r--drivers/tty/serial/clps711x.c8
-rw-r--r--drivers/tty/serial/cpm_uart/cpm_uart_core.c10
-rw-r--r--drivers/tty/serial/crisv10.c35
-rw-r--r--drivers/tty/serial/dz.c4
-rw-r--r--drivers/tty/serial/efm32-uart.c52
-rw-r--r--drivers/tty/serial/icom.c10
-rw-r--r--drivers/tty/serial/ifx6x60.c11
-rw-r--r--drivers/tty/serial/imx.c282
-rw-r--r--drivers/tty/serial/ioc3_serial.c11
-rw-r--r--drivers/tty/serial/ioc4_serial.c13
-rw-r--r--drivers/tty/serial/ip22zilog.c30
-rw-r--r--drivers/tty/serial/jsm/jsm_tty.c18
-rw-r--r--drivers/tty/serial/kgdb_nmi.c13
-rw-r--r--drivers/tty/serial/lantiq.c20
-rw-r--r--drivers/tty/serial/lpc32xx_hs.c30
-rw-r--r--drivers/tty/serial/m32r_sio.c8
-rw-r--r--drivers/tty/serial/max3100.c10
-rw-r--r--drivers/tty/serial/max310x.c8
-rw-r--r--drivers/tty/serial/mcf.c73
-rw-r--r--drivers/tty/serial/mfd.c15
-rw-r--r--drivers/tty/serial/mpc52xx_uart.c8
-rw-r--r--drivers/tty/serial/mpsc.c15
-rw-r--r--drivers/tty/serial/mrst_max3110.c19
-rw-r--r--drivers/tty/serial/msm_serial.c16
-rw-r--r--drivers/tty/serial/msm_serial_hs.c19
-rw-r--r--drivers/tty/serial/msm_smd_tty.c4
-rw-r--r--drivers/tty/serial/mux.c9
-rw-r--r--drivers/tty/serial/mxs-auart.c11
-rw-r--r--drivers/tty/serial/netx-serial.c4
-rw-r--r--drivers/tty/serial/nwpserial.c6
-rw-r--r--drivers/tty/serial/of_serial.c7
-rw-r--r--drivers/tty/serial/omap-serial.c51
-rw-r--r--drivers/tty/serial/pch_uart.c90
-rw-r--r--drivers/tty/serial/pmac_zilog.c36
-rw-r--r--drivers/tty/serial/pnx8xxx_uart.c3
-rw-r--r--drivers/tty/serial/pxa.c17
-rw-r--r--drivers/tty/serial/rp2.c885
-rw-r--r--drivers/tty/serial/sa1100.c3
-rw-r--r--drivers/tty/serial/samsung.c15
-rw-r--r--drivers/tty/serial/sb1250-duart.c2
-rw-r--r--drivers/tty/serial/sc26xx.c29
-rw-r--r--drivers/tty/serial/sccnxp.c171
-rw-r--r--drivers/tty/serial/serial-tegra.c1401
-rw-r--r--drivers/tty/serial/serial_core.c94
-rw-r--r--drivers/tty/serial/serial_ks8695.c3
-rw-r--r--drivers/tty/serial/serial_txx9.c3
-rw-r--r--drivers/tty/serial/sh-sci.c52
-rw-r--r--drivers/tty/serial/sirfsoc_uart.c55
-rw-r--r--drivers/tty/serial/sirfsoc_uart.h3
-rw-r--r--drivers/tty/serial/sn_console.c16
-rw-r--r--drivers/tty/serial/sunhv.c33
-rw-r--r--drivers/tty/serial/sunsab.c28
-rw-r--r--drivers/tty/serial/sunsu.c18
-rw-r--r--drivers/tty/serial/sunzilog.c39
-rw-r--r--drivers/tty/serial/timbuart.c6
-rw-r--r--drivers/tty/serial/uartlite.c119
-rw-r--r--drivers/tty/serial/ucc_uart.c10
-rw-r--r--drivers/tty/serial/vr41xx_siu.c4
-rw-r--r--drivers/tty/serial/vt8500_serial.c52
-rw-r--r--drivers/tty/serial/xilinx_uartps.c50
-rw-r--r--drivers/tty/serial/zs.c2
-rw-r--r--drivers/tty/synclink.c50
-rw-r--r--drivers/tty/synclink_gt.c61
-rw-r--r--drivers/tty/synclinkmp.c103
-rw-r--r--drivers/tty/sysrq.c15
-rw-r--r--drivers/tty/tty_buffer.c131
-rw-r--r--drivers/tty/tty_io.c11
-rw-r--r--drivers/tty/tty_ioctl.c12
-rw-r--r--drivers/tty/tty_ldisc.c14
-rw-r--r--drivers/tty/vt/Makefile4
-rw-r--r--drivers/tty/vt/keyboard.c25
-rw-r--r--drivers/tty/vt/vc_screen.c8
-rw-r--r--drivers/tty/vt/vt.c154
-rw-r--r--drivers/uio/Kconfig1
-rw-r--r--drivers/uio/uio.c19
-rw-r--r--drivers/usb/chipidea/debug.c2
-rw-r--r--drivers/usb/class/Kconfig2
-rw-r--r--drivers/usb/class/cdc-acm.c13
-rw-r--r--drivers/usb/core/devices.c4
-rw-r--r--drivers/usb/core/devio.c6
-rw-r--r--drivers/usb/core/hub.c13
-rw-r--r--drivers/usb/gadget/Kconfig6
-rw-r--r--drivers/usb/gadget/atmel_usba_udc.c8
-rw-r--r--drivers/usb/gadget/f_mass_storage.c2
-rw-r--r--drivers/usb/gadget/printer.c2
-rw-r--r--drivers/usb/gadget/rndis.c2
-rw-r--r--drivers/usb/gadget/storage_common.c2
-rw-r--r--drivers/usb/gadget/u_serial.c15
-rw-r--r--drivers/usb/host/ehci-omap.c6
-rw-r--r--drivers/usb/host/ehci-tegra.c97
-rw-r--r--drivers/usb/phy/tegra_usb_phy.c132
-rw-r--r--drivers/usb/serial/Kconfig2
-rw-r--r--drivers/usb/serial/aircable.c17
-rw-r--r--drivers/usb/serial/ark3116.c12
-rw-r--r--drivers/usb/serial/belkin_sa.c12
-rw-r--r--drivers/usb/serial/cyberjack.c11
-rw-r--r--drivers/usb/serial/cypress_m8.c6
-rw-r--r--drivers/usb/serial/digi_acceleport.c14
-rw-r--r--drivers/usb/serial/f81232.c15
-rw-r--r--drivers/usb/serial/ftdi_sio.c21
-rw-r--r--drivers/usb/serial/garmin_gps.c9
-rw-r--r--drivers/usb/serial/generic.c12
-rw-r--r--drivers/usb/serial/io_edgeport.c39
-rw-r--r--drivers/usb/serial/io_ti.c32
-rw-r--r--drivers/usb/serial/ir-usb.c9
-rw-r--r--drivers/usb/serial/iuu_phoenix.c9
-rw-r--r--drivers/usb/serial/keyspan.c60
-rw-r--r--drivers/usb/serial/keyspan_pda.c9
-rw-r--r--drivers/usb/serial/kl5kusb105.c10
-rw-r--r--drivers/usb/serial/kobil_sct.c9
-rw-r--r--drivers/usb/serial/mct_u232.c11
-rw-r--r--drivers/usb/serial/metro-usb.c9
-rw-r--r--drivers/usb/serial/mos7720.c9
-rw-r--r--drivers/usb/serial/mos7840.c10
-rw-r--r--drivers/usb/serial/navman.c9
-rw-r--r--drivers/usb/serial/omninet.c10
-rw-r--r--drivers/usb/serial/opticon.c11
-rw-r--r--drivers/usb/serial/oti6858.c9
-rw-r--r--drivers/usb/serial/pl2303.c15
-rw-r--r--drivers/usb/serial/quatech2.c29
-rw-r--r--drivers/usb/serial/safe_serial.c15
-rw-r--r--drivers/usb/serial/sierra.c17
-rw-r--r--drivers/usb/serial/spcp8x5.c24
-rw-r--r--drivers/usb/serial/ssu100.c31
-rw-r--r--drivers/usb/serial/symbolserial.c9
-rw-r--r--drivers/usb/serial/ti_usb_3410_5052.c44
-rw-r--r--drivers/usb/serial/usb_wwan.c17
-rw-r--r--drivers/vfio/pci/Kconfig10
-rw-r--r--drivers/vfio/pci/vfio_pci.c75
-rw-r--r--drivers/vfio/pci/vfio_pci_config.c52
-rw-r--r--drivers/vfio/pci/vfio_pci_private.h19
-rw-r--r--drivers/vfio/pci/vfio_pci_rdwr.c281
-rw-r--r--drivers/vfio/vfio.c52
-rw-r--r--drivers/vhost/tcm_vhost.c287
-rw-r--r--drivers/vhost/tcm_vhost.h8
-rw-r--r--drivers/video/Kconfig41
-rw-r--r--drivers/video/Makefile7
-rw-r--r--drivers/video/auo_k190x.c1
-rw-r--r--drivers/video/backlight/88pm860x_bl.c5
-rw-r--r--drivers/video/backlight/Kconfig30
-rw-r--r--drivers/video/backlight/Makefile90
-rw-r--r--drivers/video/backlight/aat2870_bl.c2
-rw-r--r--drivers/video/backlight/adp8860_bl.c2
-rw-r--r--drivers/video/backlight/adp8870_bl.c2
-rw-r--r--drivers/video/backlight/ams369fg06.c106
-rw-r--r--drivers/video/backlight/as3711_bl.c380
-rw-r--r--drivers/video/backlight/corgi_lcd.c18
-rw-r--r--drivers/video/backlight/hx8357.c497
-rw-r--r--drivers/video/backlight/l4f00242t03.c36
-rw-r--r--drivers/video/backlight/ld9040.c109
-rw-r--r--drivers/video/backlight/lm3630_bl.c4
-rw-r--r--drivers/video/backlight/lm3639_bl.c5
-rw-r--r--drivers/video/backlight/lms283gf05.c4
-rw-r--r--drivers/video/backlight/lms501kf03.c441
-rw-r--r--drivers/video/backlight/lp855x_bl.c169
-rw-r--r--drivers/video/backlight/lp8788_bl.c333
-rw-r--r--drivers/video/backlight/ltv350qv.c10
-rw-r--r--drivers/video/backlight/max8925_bl.c31
-rw-r--r--drivers/video/backlight/omap1_bl.c10
-rw-r--r--drivers/video/backlight/ot200_bl.c1
-rw-r--r--drivers/video/backlight/pwm_bl.c28
-rw-r--r--drivers/video/backlight/s6e63m0.c153
-rw-r--r--drivers/video/backlight/tdo24m.c10
-rw-r--r--drivers/video/backlight/tosa_bl.c2
-rw-r--r--drivers/video/backlight/tosa_lcd.c12
-rw-r--r--drivers/video/backlight/vgg2432a4.c9
-rw-r--r--drivers/video/clps711xfb.c1
-rw-r--r--drivers/video/console/fbcon.c68
-rw-r--r--drivers/video/console/vgacon.c22
-rw-r--r--drivers/video/display_timing.c24
-rw-r--r--drivers/video/exynos/exynos_dp_core.c23
-rw-r--r--drivers/video/exynos/exynos_mipi_dsi.c70
-rw-r--r--drivers/video/exynos/exynos_mipi_dsi_common.c1
-rw-r--r--drivers/video/exynos/exynos_mipi_dsi_lowlevel.c1
-rw-r--r--drivers/video/exynos/s6e8ax0.c14
-rw-r--r--drivers/video/fb_defio.c2
-rw-r--r--drivers/video/fbmem.c13
-rw-r--r--drivers/video/fbmon.c94
-rw-r--r--drivers/video/fbsysfs.c3
-rw-r--r--drivers/video/fsl-diu-fb.c64
-rw-r--r--drivers/video/goldfishfb.c318
-rw-r--r--drivers/video/hdmi.c308
-rw-r--r--drivers/video/mmp/Kconfig11
-rw-r--r--drivers/video/mmp/Makefile1
-rw-r--r--drivers/video/mmp/core.c258
-rw-r--r--drivers/video/mmp/fb/Kconfig13
-rw-r--r--drivers/video/mmp/fb/Makefile1
-rw-r--r--drivers/video/mmp/fb/mmpfb.c685
-rw-r--r--drivers/video/mmp/fb/mmpfb.h54
-rw-r--r--drivers/video/mmp/hw/Kconfig20
-rw-r--r--drivers/video/mmp/hw/Makefile2
-rw-r--r--drivers/video/mmp/hw/mmp_ctrl.c591
-rw-r--r--drivers/video/mmp/hw/mmp_ctrl.h1974
-rw-r--r--drivers/video/mmp/hw/mmp_spi.c180
-rw-r--r--drivers/video/mmp/panel/Kconfig6
-rw-r--r--drivers/video/mmp/panel/Makefile1
-rw-r--r--drivers/video/mmp/panel/tpo_tj032md01bw.c186
-rw-r--r--drivers/video/msm/mdp.c2
-rw-r--r--drivers/video/mx3fb.c2
-rw-r--r--drivers/video/of_display_timing.c239
-rw-r--r--drivers/video/of_videomode.c54
-rw-r--r--drivers/video/omap2/displays/panel-generic-dpi.c24
-rw-r--r--drivers/video/via/hw.c6
-rw-r--r--drivers/video/via/hw.h2
-rw-r--r--drivers/video/via/lcd.c2
-rw-r--r--drivers/video/via/share.h2
-rw-r--r--drivers/video/via/via_modesetting.c8
-rw-r--r--drivers/video/via/via_modesetting.h6
-rw-r--r--drivers/video/videomode.c39
-rw-r--r--drivers/virtio/virtio_balloon.c13
-rw-r--r--drivers/virtio/virtio_mmio.c4
-rw-r--r--drivers/virtio/virtio_pci.c8
-rw-r--r--drivers/vme/vme.c1
-rw-r--r--drivers/w1/masters/ds1wm.c52
-rw-r--r--drivers/w1/masters/ds2482.c51
-rw-r--r--drivers/w1/masters/mxc_w1.c58
-rw-r--r--drivers/w1/masters/w1-gpio.c2
-rw-r--r--drivers/w1/slaves/Kconfig13
-rw-r--r--drivers/w1/slaves/Makefile3
-rw-r--r--drivers/w1/slaves/w1_ds2413.c177
-rw-r--r--drivers/w1/slaves/w1_therm.c36
-rw-r--r--drivers/w1/w1_family.h1
-rw-r--r--drivers/watchdog/Kconfig47
-rw-r--r--drivers/watchdog/Makefile4
-rw-r--r--drivers/watchdog/at91rm9200_wdt.c9
-rw-r--r--drivers/watchdog/at91sam9_wdt.c168
-rw-r--r--drivers/watchdog/ath79_wdt.c66
-rw-r--r--drivers/watchdog/bcm47xx_wdt.c339
-rw-r--r--drivers/watchdog/booke_wdt.c185
-rw-r--r--drivers/watchdog/cpwd.c4
-rw-r--r--drivers/watchdog/davinci_wdt.c29
-rw-r--r--drivers/watchdog/gef_wdt.c1
-rw-r--r--drivers/watchdog/omap_wdt.c6
-rw-r--r--drivers/watchdog/orion_wdt.c11
-rw-r--r--drivers/watchdog/pnx4008_wdt.c7
-rw-r--r--drivers/watchdog/retu_wdt.c178
-rw-r--r--drivers/watchdog/s3c2410_wdt.c48
-rw-r--r--drivers/watchdog/sp5100_tco.c27
-rw-r--r--drivers/watchdog/stmp3xxx_rtc_wdt.c111
-rw-r--r--drivers/watchdog/stmp3xxx_wdt.c288
-rw-r--r--drivers/watchdog/ux500_wdt.c171
-rw-r--r--drivers/watchdog/watchdog_core.c66
-rw-r--r--drivers/watchdog/watchdog_dev.c3
-rw-r--r--drivers/xen/Kconfig34
-rw-r--r--drivers/xen/Makefile3
-rw-r--r--drivers/xen/events.c115
-rw-r--r--drivers/xen/evtchn.c14
-rw-r--r--drivers/xen/grant-table.c2
-rw-r--r--drivers/xen/pcpu.c35
-rw-r--r--drivers/xen/swiotlb-xen.c4
-rw-r--r--drivers/xen/tmem.c2
-rw-r--r--drivers/xen/xen-acpi-cpuhotplug.c471
-rw-r--r--drivers/xen/xen-acpi-memhotplug.c483
-rw-r--r--drivers/xen/xen-stub.c101
-rw-r--r--drivers/xen/xenbus/xenbus_probe.c2
-rw-r--r--drivers/xen/xenfs/super.c66
-rw-r--r--drivers/zorro/proc.c4
2267 files changed, 183834 insertions, 45354 deletions
diff --git a/drivers/Kconfig b/drivers/Kconfig
index 2b4e89ba15ad..202fa6d051b9 100644
--- a/drivers/Kconfig
+++ b/drivers/Kconfig
@@ -152,6 +152,8 @@ source "drivers/memory/Kconfig"
source "drivers/iio/Kconfig"
+source "drivers/ntb/Kconfig"
+
source "drivers/vme/Kconfig"
source "drivers/pwm/Kconfig"
diff --git a/drivers/Makefile b/drivers/Makefile
index a8d32f1094b4..dce39a95fa71 100644
--- a/drivers/Makefile
+++ b/drivers/Makefile
@@ -29,7 +29,7 @@ obj-$(CONFIG_PNP) += pnp/
obj-y += amba/
# Many drivers will want to use DMA so this has to be made available
# really early.
-obj-$(CONFIG_DMA_ENGINE) += dma/
+obj-$(CONFIG_DMADEVICES) += dma/
obj-$(CONFIG_VIRTIO) += virtio/
obj-$(CONFIG_XEN) += xen/
@@ -147,3 +147,4 @@ obj-$(CONFIG_MEMORY) += memory/
obj-$(CONFIG_IIO) += iio/
obj-$(CONFIG_VME_BUS) += vme/
obj-$(CONFIG_IPACK_BUS) += ipack/
+obj-$(CONFIG_NTB) += ntb/
diff --git a/drivers/acpi/Kconfig b/drivers/acpi/Kconfig
index 1a4ed64586a7..92ed9692c47e 100644
--- a/drivers/acpi/Kconfig
+++ b/drivers/acpi/Kconfig
@@ -266,7 +266,8 @@ config ACPI_CUSTOM_DSDT
default ACPI_CUSTOM_DSDT_FILE != ""
config ACPI_INITRD_TABLE_OVERRIDE
- bool "ACPI tables can be passed via uncompressed cpio in initrd"
+ bool "ACPI tables override via initrd"
+ depends on BLK_DEV_INITRD && X86
default n
help
This option provides functionality to override arbitrary ACPI tables
@@ -306,7 +307,7 @@ config ACPI_DEBUG_FUNC_TRACE
is about half of the penalty and is rarely useful.
config ACPI_PCI_SLOT
- tristate "PCI slot detection driver"
+ bool "PCI slot detection driver"
depends on SYSFS
default n
help
@@ -315,9 +316,6 @@ config ACPI_PCI_SLOT
i.e., segment/bus/device/function tuples, with physical slots in
the system. If you are unsure, say N.
- To compile this driver as a module, choose M here:
- the module will be called pci_slot.
-
config X86_PM_TIMER
bool "Power Management Timer Support" if EXPERT
depends on X86
diff --git a/drivers/acpi/acpi_memhotplug.c b/drivers/acpi/acpi_memhotplug.c
index 034d3e72aa92..da1f82b445e0 100644
--- a/drivers/acpi/acpi_memhotplug.c
+++ b/drivers/acpi/acpi_memhotplug.c
@@ -280,9 +280,11 @@ static int acpi_memory_enable_device(struct acpi_memory_device *mem_device)
static int acpi_memory_remove_memory(struct acpi_memory_device *mem_device)
{
- int result = 0;
+ int result = 0, nid;
struct acpi_memory_info *info, *n;
+ nid = acpi_get_node(mem_device->device->handle);
+
list_for_each_entry_safe(info, n, &mem_device->res_list, list) {
if (info->failed)
/* The kernel does not use this memory block */
@@ -295,7 +297,9 @@ static int acpi_memory_remove_memory(struct acpi_memory_device *mem_device)
*/
return -EBUSY;
- result = remove_memory(info->start_addr, info->length);
+ if (nid < 0)
+ nid = memory_add_physaddr_to_nid(info->start_addr);
+ result = remove_memory(nid, info->start_addr, info->length);
if (result)
return result;
diff --git a/drivers/acpi/apei/ghes.c b/drivers/acpi/apei/ghes.c
index 7ae2750bb457..d668a8ae602b 100644
--- a/drivers/acpi/apei/ghes.c
+++ b/drivers/acpi/apei/ghes.c
@@ -48,8 +48,8 @@
#include <linux/genalloc.h>
#include <linux/pci.h>
#include <linux/aer.h>
-#include <acpi/apei.h>
-#include <acpi/hed.h>
+
+#include <acpi/ghes.h>
#include <asm/mce.h>
#include <asm/tlbflush.h>
#include <asm/nmi.h>
@@ -84,42 +84,6 @@
((struct acpi_hest_generic_status *) \
((struct ghes_estatus_node *)(estatus_node) + 1))
-/*
- * One struct ghes is created for each generic hardware error source.
- * It provides the context for APEI hardware error timer/IRQ/SCI/NMI
- * handler.
- *
- * estatus: memory buffer for error status block, allocated during
- * HEST parsing.
- */
-#define GHES_TO_CLEAR 0x0001
-#define GHES_EXITING 0x0002
-
-struct ghes {
- struct acpi_hest_generic *generic;
- struct acpi_hest_generic_status *estatus;
- u64 buffer_paddr;
- unsigned long flags;
- union {
- struct list_head list;
- struct timer_list timer;
- unsigned int irq;
- };
-};
-
-struct ghes_estatus_node {
- struct llist_node llnode;
- struct acpi_hest_generic *generic;
-};
-
-struct ghes_estatus_cache {
- u32 estatus_len;
- atomic_t count;
- struct acpi_hest_generic *generic;
- unsigned long long time_in;
- struct rcu_head rcu;
-};
-
bool ghes_disable;
module_param_named(disable, ghes_disable, bool, 0);
@@ -333,13 +297,6 @@ static void ghes_fini(struct ghes *ghes)
apei_unmap_generic_address(&ghes->generic->error_status_address);
}
-enum {
- GHES_SEV_NO = 0x0,
- GHES_SEV_CORRECTED = 0x1,
- GHES_SEV_RECOVERABLE = 0x2,
- GHES_SEV_PANIC = 0x3,
-};
-
static inline int ghes_severity(int severity)
{
switch (severity) {
@@ -452,7 +409,8 @@ static void ghes_clear_estatus(struct ghes *ghes)
ghes->flags &= ~GHES_TO_CLEAR;
}
-static void ghes_do_proc(const struct acpi_hest_generic_status *estatus)
+static void ghes_do_proc(struct ghes *ghes,
+ const struct acpi_hest_generic_status *estatus)
{
int sev, sec_sev;
struct acpi_hest_generic_data *gdata;
@@ -464,6 +422,8 @@ static void ghes_do_proc(const struct acpi_hest_generic_status *estatus)
CPER_SEC_PLATFORM_MEM)) {
struct cper_sec_mem_err *mem_err;
mem_err = (struct cper_sec_mem_err *)(gdata+1);
+ ghes_edac_report_mem_error(ghes, sev, mem_err);
+
#ifdef CONFIG_X86_MCE
apei_mce_report_mem_error(sev == GHES_SEV_CORRECTED,
mem_err);
@@ -682,7 +642,7 @@ static int ghes_proc(struct ghes *ghes)
if (ghes_print_estatus(NULL, ghes->generic, ghes->estatus))
ghes_estatus_cache_add(ghes->generic, ghes->estatus);
}
- ghes_do_proc(ghes->estatus);
+ ghes_do_proc(ghes, ghes->estatus);
out:
ghes_clear_estatus(ghes);
return 0;
@@ -775,7 +735,7 @@ static void ghes_proc_in_irq(struct irq_work *irq_work)
estatus = GHES_ESTATUS_FROM_NODE(estatus_node);
len = apei_estatus_len(estatus);
node_len = GHES_ESTATUS_NODE_LEN(len);
- ghes_do_proc(estatus);
+ ghes_do_proc(estatus_node->ghes, estatus);
if (!ghes_estatus_cached(estatus)) {
generic = estatus_node->generic;
if (ghes_print_estatus(NULL, generic, estatus))
@@ -864,6 +824,7 @@ static int ghes_notify_nmi(unsigned int cmd, struct pt_regs *regs)
estatus_node = (void *)gen_pool_alloc(ghes_estatus_pool,
node_len);
if (estatus_node) {
+ estatus_node->ghes = ghes;
estatus_node->generic = ghes->generic;
estatus = GHES_ESTATUS_FROM_NODE(estatus_node);
memcpy(estatus, ghes->estatus, len);
@@ -942,6 +903,11 @@ static int ghes_probe(struct platform_device *ghes_dev)
ghes = NULL;
goto err;
}
+
+ rc = ghes_edac_register(ghes, &ghes_dev->dev);
+ if (rc < 0)
+ goto err;
+
switch (generic->notify.type) {
case ACPI_HEST_NOTIFY_POLLED:
ghes->timer.function = ghes_poll_func;
@@ -954,13 +920,13 @@ static int ghes_probe(struct platform_device *ghes_dev)
if (acpi_gsi_to_irq(generic->notify.vector, &ghes->irq)) {
pr_err(GHES_PFX "Failed to map GSI to IRQ for generic hardware error source: %d\n",
generic->header.source_id);
- goto err;
+ goto err_edac_unreg;
}
if (request_irq(ghes->irq, ghes_irq_func,
0, "GHES IRQ", ghes)) {
pr_err(GHES_PFX "Failed to register IRQ for generic hardware error source: %d\n",
generic->header.source_id);
- goto err;
+ goto err_edac_unreg;
}
break;
case ACPI_HEST_NOTIFY_SCI:
@@ -986,6 +952,8 @@ static int ghes_probe(struct platform_device *ghes_dev)
platform_set_drvdata(ghes_dev, ghes);
return 0;
+err_edac_unreg:
+ ghes_edac_unregister(ghes);
err:
if (ghes) {
ghes_fini(ghes);
@@ -1038,6 +1006,9 @@ static int ghes_remove(struct platform_device *ghes_dev)
}
ghes_fini(ghes);
+
+ ghes_edac_unregister(ghes);
+
kfree(ghes);
platform_set_drvdata(ghes_dev, NULL);
diff --git a/drivers/acpi/apei/hest.c b/drivers/acpi/apei/hest.c
index 7f00cf38098f..f5ef5d54e4ac 100644
--- a/drivers/acpi/apei/hest.c
+++ b/drivers/acpi/apei/hest.c
@@ -89,7 +89,7 @@ int apei_hest_parse(apei_hest_func_t func, void *data)
struct acpi_hest_header *hest_hdr;
int i, rc, len;
- if (hest_disable)
+ if (hest_disable || !hest_tab)
return -EINVAL;
hest_hdr = (struct acpi_hest_header *)(hest_tab + 1);
@@ -216,9 +216,6 @@ void __init acpi_hest_init(void)
return;
}
- if (acpi_disabled)
- goto err;
-
status = acpi_get_table(ACPI_SIG_HEST, 0,
(struct acpi_table_header **)&hest_tab);
if (status == AE_NOT_FOUND)
diff --git a/drivers/acpi/custom_method.c b/drivers/acpi/custom_method.c
index 6adfc706a1de..12b62f2cdb3f 100644
--- a/drivers/acpi/custom_method.c
+++ b/drivers/acpi/custom_method.c
@@ -66,7 +66,7 @@ static ssize_t cm_write(struct file *file, const char __user * user_buf,
buf = NULL;
if (ACPI_FAILURE(status))
return -EINVAL;
- add_taint(TAINT_OVERRIDDEN_ACPI_TABLE);
+ add_taint(TAINT_OVERRIDDEN_ACPI_TABLE, LOCKDEP_NOW_UNRELIABLE);
}
return count;
diff --git a/drivers/acpi/internal.h b/drivers/acpi/internal.h
index 79092328cf06..3c94a732b4b3 100644
--- a/drivers/acpi/internal.h
+++ b/drivers/acpi/internal.h
@@ -25,8 +25,14 @@
int init_acpi_device_notify(void);
int acpi_scan_init(void);
+#ifdef CONFIG_ACPI_PCI_SLOT
+void acpi_pci_slot_init(void);
+#else
+static inline void acpi_pci_slot_init(void) { }
+#endif
void acpi_pci_root_init(void);
void acpi_pci_link_init(void);
+void acpi_pci_root_hp_init(void);
void acpi_platform_init(void);
int acpi_sysfs_init(void);
void acpi_csrt_init(void);
@@ -65,7 +71,7 @@ int acpi_extract_power_resources(union acpi_object *package, unsigned int start,
struct list_head *list);
int acpi_add_power_resource(acpi_handle handle);
void acpi_power_add_remove_device(struct acpi_device *adev, bool add);
-int acpi_power_min_system_level(struct list_head *list);
+int acpi_power_wakeup_list_init(struct list_head *list, int *system_level);
int acpi_device_sleep_wake(struct acpi_device *dev,
int enable, int sleep_state, int dev_state);
int acpi_power_get_inferred_state(struct acpi_device *device, int *state);
diff --git a/drivers/acpi/osl.c b/drivers/acpi/osl.c
index 908b02d5da1b..586e7e993d3d 100644
--- a/drivers/acpi/osl.c
+++ b/drivers/acpi/osl.c
@@ -84,8 +84,7 @@ static acpi_osd_handler acpi_irq_handler;
static void *acpi_irq_context;
static struct workqueue_struct *kacpid_wq;
static struct workqueue_struct *kacpi_notify_wq;
-struct workqueue_struct *kacpi_hotplug_wq;
-EXPORT_SYMBOL(kacpi_hotplug_wq);
+static struct workqueue_struct *kacpi_hotplug_wq;
/*
* This list of permanent mappings is for memory that may be accessed from
@@ -661,7 +660,7 @@ static void acpi_table_taint(struct acpi_table_header *table)
pr_warn(PREFIX
"Override [%4.4s-%8.8s], this is unsafe: tainting kernel\n",
table->signature, table->oem_table_id);
- add_taint(TAINT_OVERRIDDEN_ACPI_TABLE);
+ add_taint(TAINT_OVERRIDDEN_ACPI_TABLE, LOCKDEP_NOW_UNRELIABLE);
}
@@ -1778,3 +1777,24 @@ void acpi_os_set_prepare_sleep(int (*func)(u8 sleep_state,
{
__acpi_os_prepare_sleep = func;
}
+
+void alloc_acpi_hp_work(acpi_handle handle, u32 type, void *context,
+ void (*func)(struct work_struct *work))
+{
+ struct acpi_hp_work *hp_work;
+ int ret;
+
+ hp_work = kmalloc(sizeof(*hp_work), GFP_KERNEL);
+ if (!hp_work)
+ return;
+
+ hp_work->handle = handle;
+ hp_work->type = type;
+ hp_work->context = context;
+
+ INIT_WORK(&hp_work->work, func);
+ ret = queue_work(kacpi_hotplug_wq, &hp_work->work);
+ if (!ret)
+ kfree(hp_work);
+}
+EXPORT_SYMBOL_GPL(alloc_acpi_hp_work);
diff --git a/drivers/acpi/pci_irq.c b/drivers/acpi/pci_irq.c
index 68a921d03247..41c5e1b799ef 100644
--- a/drivers/acpi/pci_irq.c
+++ b/drivers/acpi/pci_irq.c
@@ -53,9 +53,6 @@ struct acpi_prt_entry {
u32 index; /* GSI, or link _CRS index */
};
-static LIST_HEAD(acpi_prt_list);
-static DEFINE_SPINLOCK(acpi_prt_lock);
-
static inline char pin_name(int pin)
{
return 'A' + pin - 1;
@@ -65,28 +62,6 @@ static inline char pin_name(int pin)
PCI IRQ Routing Table (PRT) Support
-------------------------------------------------------------------------- */
-static struct acpi_prt_entry *acpi_pci_irq_find_prt_entry(struct pci_dev *dev,
- int pin)
-{
- struct acpi_prt_entry *entry;
- int segment = pci_domain_nr(dev->bus);
- int bus = dev->bus->number;
- int device = PCI_SLOT(dev->devfn);
-
- spin_lock(&acpi_prt_lock);
- list_for_each_entry(entry, &acpi_prt_list, list) {
- if ((segment == entry->id.segment)
- && (bus == entry->id.bus)
- && (device == entry->id.device)
- && (pin == entry->pin)) {
- spin_unlock(&acpi_prt_lock);
- return entry;
- }
- }
- spin_unlock(&acpi_prt_lock);
- return NULL;
-}
-
/* http://bugzilla.kernel.org/show_bug.cgi?id=4773 */
static const struct dmi_system_id medion_md9580[] = {
{
@@ -184,11 +159,19 @@ static void do_prt_fixups(struct acpi_prt_entry *entry,
}
}
-static int acpi_pci_irq_add_entry(acpi_handle handle, int segment, int bus,
- struct acpi_pci_routing_table *prt)
+static int acpi_pci_irq_check_entry(acpi_handle handle, struct pci_dev *dev,
+ int pin, struct acpi_pci_routing_table *prt,
+ struct acpi_prt_entry **entry_ptr)
{
+ int segment = pci_domain_nr(dev->bus);
+ int bus = dev->bus->number;
+ int device = PCI_SLOT(dev->devfn);
struct acpi_prt_entry *entry;
+ if (((prt->address >> 16) & 0xffff) != device ||
+ prt->pin + 1 != pin)
+ return -ENODEV;
+
entry = kzalloc(sizeof(struct acpi_prt_entry), GFP_KERNEL);
if (!entry)
return -ENOMEM;
@@ -237,43 +220,37 @@ static int acpi_pci_irq_add_entry(acpi_handle handle, int segment, int bus,
entry->id.device, pin_name(entry->pin),
prt->source, entry->index));
- spin_lock(&acpi_prt_lock);
- list_add_tail(&entry->list, &acpi_prt_list);
- spin_unlock(&acpi_prt_lock);
+ *entry_ptr = entry;
return 0;
}
-int acpi_pci_irq_add_prt(acpi_handle handle, int segment, int bus)
+static int acpi_pci_irq_find_prt_entry(struct pci_dev *dev,
+ int pin, struct acpi_prt_entry **entry_ptr)
{
acpi_status status;
struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL };
struct acpi_pci_routing_table *entry;
+ acpi_handle handle = NULL;
- /* 'handle' is the _PRT's parent (root bridge or PCI-PCI bridge) */
- status = acpi_get_name(handle, ACPI_FULL_PATHNAME, &buffer);
- if (ACPI_FAILURE(status))
- return -ENODEV;
-
- printk(KERN_DEBUG "ACPI: PCI Interrupt Routing Table [%s._PRT]\n",
- (char *) buffer.pointer);
-
- kfree(buffer.pointer);
+ if (dev->bus->bridge)
+ handle = ACPI_HANDLE(dev->bus->bridge);
- buffer.length = ACPI_ALLOCATE_BUFFER;
- buffer.pointer = NULL;
+ if (!handle)
+ return -ENODEV;
+ /* 'handle' is the _PRT's parent (root bridge or PCI-PCI bridge) */
status = acpi_get_irq_routing_table(handle, &buffer);
if (ACPI_FAILURE(status)) {
- ACPI_EXCEPTION((AE_INFO, status, "Evaluating _PRT [%s]",
- acpi_format_exception(status)));
kfree(buffer.pointer);
return -ENODEV;
}
entry = buffer.pointer;
while (entry && (entry->length > 0)) {
- acpi_pci_irq_add_entry(handle, segment, bus, entry);
+ if (!acpi_pci_irq_check_entry(handle, dev, pin,
+ entry, entry_ptr))
+ break;
entry = (struct acpi_pci_routing_table *)
((unsigned long)entry + entry->length);
}
@@ -282,23 +259,6 @@ int acpi_pci_irq_add_prt(acpi_handle handle, int segment, int bus)
return 0;
}
-void acpi_pci_irq_del_prt(int segment, int bus)
-{
- struct acpi_prt_entry *entry, *tmp;
-
- printk(KERN_DEBUG
- "ACPI: Delete PCI Interrupt Routing Table for %04x:%02x\n",
- segment, bus);
- spin_lock(&acpi_prt_lock);
- list_for_each_entry_safe(entry, tmp, &acpi_prt_list, list) {
- if (segment == entry->id.segment && bus == entry->id.bus) {
- list_del(&entry->list);
- kfree(entry);
- }
- }
- spin_unlock(&acpi_prt_lock);
-}
-
/* --------------------------------------------------------------------------
PCI Interrupt Routing Support
-------------------------------------------------------------------------- */
@@ -359,12 +319,13 @@ static int acpi_reroute_boot_interrupt(struct pci_dev *dev,
static struct acpi_prt_entry *acpi_pci_irq_lookup(struct pci_dev *dev, int pin)
{
- struct acpi_prt_entry *entry;
+ struct acpi_prt_entry *entry = NULL;
struct pci_dev *bridge;
u8 bridge_pin, orig_pin = pin;
+ int ret;
- entry = acpi_pci_irq_find_prt_entry(dev, pin);
- if (entry) {
+ ret = acpi_pci_irq_find_prt_entry(dev, pin, &entry);
+ if (!ret && entry) {
#ifdef CONFIG_X86_IO_APIC
acpi_reroute_boot_interrupt(dev, entry);
#endif /* CONFIG_X86_IO_APIC */
@@ -373,7 +334,7 @@ static struct acpi_prt_entry *acpi_pci_irq_lookup(struct pci_dev *dev, int pin)
return entry;
}
- /*
+ /*
* Attempt to derive an IRQ for this device from a parent bridge's
* PCI interrupt routing entry (eg. yenta bridge and add-in card bridge).
*/
@@ -393,8 +354,8 @@ static struct acpi_prt_entry *acpi_pci_irq_lookup(struct pci_dev *dev, int pin)
pin = bridge_pin;
}
- entry = acpi_pci_irq_find_prt_entry(bridge, pin);
- if (entry) {
+ ret = acpi_pci_irq_find_prt_entry(bridge, pin, &entry);
+ if (!ret && entry) {
ACPI_DEBUG_PRINT((ACPI_DB_INFO,
"Derived GSI for %s INT %c from %s\n",
pci_name(dev), pin_name(orig_pin),
@@ -470,6 +431,7 @@ int acpi_pci_irq_enable(struct pci_dev *dev)
dev_warn(&dev->dev, "PCI INT %c: no GSI\n",
pin_name(pin));
}
+
return 0;
}
@@ -477,6 +439,7 @@ int acpi_pci_irq_enable(struct pci_dev *dev)
if (rc < 0) {
dev_warn(&dev->dev, "PCI INT %c: failed to register GSI\n",
pin_name(pin));
+ kfree(entry);
return rc;
}
dev->irq = rc;
@@ -491,6 +454,7 @@ int acpi_pci_irq_enable(struct pci_dev *dev)
(triggering == ACPI_LEVEL_SENSITIVE) ? "level" : "edge",
(polarity == ACPI_ACTIVE_LOW) ? "low" : "high", dev->irq);
+ kfree(entry);
return 0;
}
@@ -513,6 +477,8 @@ void acpi_pci_irq_disable(struct pci_dev *dev)
else
gsi = entry->index;
+ kfree(entry);
+
/*
* TBD: It might be worth clearing dev->irq by magic constant
* (e.g. PCI_UNDEFINED_IRQ).
diff --git a/drivers/acpi/pci_root.c b/drivers/acpi/pci_root.c
index b3cc69c5caf1..0ac546d5e53f 100644
--- a/drivers/acpi/pci_root.c
+++ b/drivers/acpi/pci_root.c
@@ -103,24 +103,6 @@ void acpi_pci_unregister_driver(struct acpi_pci_driver *driver)
}
EXPORT_SYMBOL(acpi_pci_unregister_driver);
-acpi_handle acpi_get_pci_rootbridge_handle(unsigned int seg, unsigned int bus)
-{
- struct acpi_pci_root *root;
- acpi_handle handle = NULL;
-
- mutex_lock(&acpi_pci_root_lock);
- list_for_each_entry(root, &acpi_pci_roots, node)
- if ((root->segment == (u16) seg) &&
- (root->secondary.start == (u16) bus)) {
- handle = root->device->handle;
- break;
- }
- mutex_unlock(&acpi_pci_root_lock);
- return handle;
-}
-
-EXPORT_SYMBOL_GPL(acpi_get_pci_rootbridge_handle);
-
/**
* acpi_is_root_bridge - determine whether an ACPI CA node is a PCI root bridge
* @handle - the ACPI CA node in question.
@@ -431,7 +413,6 @@ static int acpi_pci_root_add(struct acpi_device *device,
acpi_status status;
int result;
struct acpi_pci_root *root;
- acpi_handle handle;
struct acpi_pci_driver *driver;
u32 flags, base_flags;
bool is_osc_granted = false;
@@ -486,16 +467,6 @@ static int acpi_pci_root_add(struct acpi_device *device,
acpi_device_name(device), acpi_device_bid(device),
root->segment, &root->secondary);
- /*
- * PCI Routing Table
- * -----------------
- * Evaluate and parse _PRT, if exists.
- */
- status = acpi_get_handle(device->handle, METHOD_NAME__PRT, &handle);
- if (ACPI_SUCCESS(status))
- result = acpi_pci_irq_add_prt(device->handle, root->segment,
- root->secondary.start);
-
root->mcfg_addr = acpi_pci_root_get_mcfg_addr(device->handle);
/*
@@ -597,8 +568,10 @@ static int acpi_pci_root_add(struct acpi_device *device,
if (device->wakeup.flags.run_wake)
device_set_run_wake(root->bus->bridge, true);
- if (system_state != SYSTEM_BOOTING)
+ if (system_state != SYSTEM_BOOTING) {
+ pcibios_resource_survey_bus(root->bus);
pci_assign_unassigned_bus_resources(root->bus);
+ }
mutex_lock(&acpi_pci_root_lock);
list_for_each_entry(driver, &acpi_pci_drivers, node)
@@ -618,7 +591,6 @@ out_del_root:
list_del(&root->node);
mutex_unlock(&acpi_pci_root_lock);
- acpi_pci_irq_del_prt(root->segment, root->secondary.start);
end:
kfree(root);
return result;
@@ -626,8 +598,6 @@ end:
static void acpi_pci_root_remove(struct acpi_device *device)
{
- acpi_status status;
- acpi_handle handle;
struct acpi_pci_root *root = acpi_driver_data(device);
struct acpi_pci_driver *driver;
@@ -642,10 +612,6 @@ static void acpi_pci_root_remove(struct acpi_device *device)
device_set_run_wake(root->bus->bridge, false);
pci_acpi_remove_bus_pm_notifier(device);
- status = acpi_get_handle(device->handle, METHOD_NAME__PRT, &handle);
- if (ACPI_SUCCESS(status))
- acpi_pci_irq_del_prt(root->segment, root->secondary.start);
-
pci_remove_root_bus(root->bus);
mutex_lock(&acpi_pci_root_lock);
@@ -663,3 +629,133 @@ void __init acpi_pci_root_init(void)
acpi_scan_add_handler(&pci_root_handler);
}
}
+/* Support root bridge hotplug */
+
+static void handle_root_bridge_insertion(acpi_handle handle)
+{
+ struct acpi_device *device;
+
+ if (!acpi_bus_get_device(handle, &device)) {
+ printk(KERN_DEBUG "acpi device exists...\n");
+ return;
+ }
+
+ if (acpi_bus_scan(handle))
+ printk(KERN_ERR "cannot add bridge to acpi list\n");
+}
+
+static void handle_root_bridge_removal(struct acpi_device *device)
+{
+ struct acpi_eject_event *ej_event;
+
+ ej_event = kmalloc(sizeof(*ej_event), GFP_KERNEL);
+ if (!ej_event) {
+ /* Inform firmware the hot-remove operation has error */
+ (void) acpi_evaluate_hotplug_ost(device->handle,
+ ACPI_NOTIFY_EJECT_REQUEST,
+ ACPI_OST_SC_NON_SPECIFIC_FAILURE,
+ NULL);
+ return;
+ }
+
+ ej_event->device = device;
+ ej_event->event = ACPI_NOTIFY_EJECT_REQUEST;
+
+ acpi_bus_hot_remove_device(ej_event);
+}
+
+static void _handle_hotplug_event_root(struct work_struct *work)
+{
+ struct acpi_pci_root *root;
+ struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER };
+ struct acpi_hp_work *hp_work;
+ acpi_handle handle;
+ u32 type;
+
+ hp_work = container_of(work, struct acpi_hp_work, work);
+ handle = hp_work->handle;
+ type = hp_work->type;
+
+ root = acpi_pci_find_root(handle);
+
+ acpi_get_name(handle, ACPI_FULL_PATHNAME, &buffer);
+
+ switch (type) {
+ case ACPI_NOTIFY_BUS_CHECK:
+ /* bus enumerate */
+ printk(KERN_DEBUG "%s: Bus check notify on %s\n", __func__,
+ (char *)buffer.pointer);
+ if (!root)
+ handle_root_bridge_insertion(handle);
+
+ break;
+
+ case ACPI_NOTIFY_DEVICE_CHECK:
+ /* device check */
+ printk(KERN_DEBUG "%s: Device check notify on %s\n", __func__,
+ (char *)buffer.pointer);
+ if (!root)
+ handle_root_bridge_insertion(handle);
+ break;
+
+ case ACPI_NOTIFY_EJECT_REQUEST:
+ /* request device eject */
+ printk(KERN_DEBUG "%s: Device eject notify on %s\n", __func__,
+ (char *)buffer.pointer);
+ if (root)
+ handle_root_bridge_removal(root->device);
+ break;
+ default:
+ printk(KERN_WARNING "notify_handler: unknown event type 0x%x for %s\n",
+ type, (char *)buffer.pointer);
+ break;
+ }
+
+ kfree(hp_work); /* allocated in handle_hotplug_event_bridge */
+ kfree(buffer.pointer);
+}
+
+static void handle_hotplug_event_root(acpi_handle handle, u32 type,
+ void *context)
+{
+ alloc_acpi_hp_work(handle, type, context,
+ _handle_hotplug_event_root);
+}
+
+static acpi_status __init
+find_root_bridges(acpi_handle handle, u32 lvl, void *context, void **rv)
+{
+ acpi_status status;
+ char objname[64];
+ struct acpi_buffer buffer = { .length = sizeof(objname),
+ .pointer = objname };
+ int *count = (int *)context;
+
+ if (!acpi_is_root_bridge(handle))
+ return AE_OK;
+
+ (*count)++;
+
+ acpi_get_name(handle, ACPI_FULL_PATHNAME, &buffer);
+
+ status = acpi_install_notify_handler(handle, ACPI_SYSTEM_NOTIFY,
+ handle_hotplug_event_root, NULL);
+ if (ACPI_FAILURE(status))
+ printk(KERN_DEBUG "acpi root: %s notify handler is not installed, exit status: %u\n",
+ objname, (unsigned int)status);
+ else
+ printk(KERN_DEBUG "acpi root: %s notify handler is installed\n",
+ objname);
+
+ return AE_OK;
+}
+
+void __init acpi_pci_root_hp_init(void)
+{
+ int num = 0;
+
+ acpi_walk_namespace(ACPI_TYPE_DEVICE, ACPI_ROOT_OBJECT,
+ ACPI_UINT32_MAX, find_root_bridges, NULL, &num, NULL);
+
+ printk(KERN_DEBUG "Found %d acpi root devices\n", num);
+}
diff --git a/drivers/acpi/pci_slot.c b/drivers/acpi/pci_slot.c
index 2c630c006c2f..cd1434eb1de8 100644
--- a/drivers/acpi/pci_slot.c
+++ b/drivers/acpi/pci_slot.c
@@ -329,19 +329,8 @@ static struct dmi_system_id acpi_pci_slot_dmi_table[] __initdata = {
{}
};
-static int __init
-acpi_pci_slot_init(void)
+void __init acpi_pci_slot_init(void)
{
dmi_check_system(acpi_pci_slot_dmi_table);
acpi_pci_register_driver(&acpi_pci_slot_driver);
- return 0;
}
-
-static void __exit
-acpi_pci_slot_exit(void)
-{
- acpi_pci_unregister_driver(&acpi_pci_slot_driver);
-}
-
-module_init(acpi_pci_slot_init);
-module_exit(acpi_pci_slot_exit);
diff --git a/drivers/acpi/power.c b/drivers/acpi/power.c
index b820528a5fa3..34f5ef11d427 100644
--- a/drivers/acpi/power.c
+++ b/drivers/acpi/power.c
@@ -73,6 +73,7 @@ struct acpi_power_resource {
u32 system_level;
u32 order;
unsigned int ref_count;
+ bool wakeup_enabled;
struct mutex resource_lock;
};
@@ -272,11 +273,9 @@ static int __acpi_power_on(struct acpi_power_resource *resource)
return 0;
}
-static int acpi_power_on(struct acpi_power_resource *resource)
+static int acpi_power_on_unlocked(struct acpi_power_resource *resource)
{
- int result = 0;;
-
- mutex_lock(&resource->resource_lock);
+ int result = 0;
if (resource->ref_count++) {
ACPI_DEBUG_PRINT((ACPI_DB_INFO,
@@ -293,9 +292,16 @@ static int acpi_power_on(struct acpi_power_resource *resource)
schedule_work(&dep->work);
}
}
+ return result;
+}
- mutex_unlock(&resource->resource_lock);
+static int acpi_power_on(struct acpi_power_resource *resource)
+{
+ int result;
+ mutex_lock(&resource->resource_lock);
+ result = acpi_power_on_unlocked(resource);
+ mutex_unlock(&resource->resource_lock);
return result;
}
@@ -313,17 +319,15 @@ static int __acpi_power_off(struct acpi_power_resource *resource)
return 0;
}
-static int acpi_power_off(struct acpi_power_resource *resource)
+static int acpi_power_off_unlocked(struct acpi_power_resource *resource)
{
int result = 0;
- mutex_lock(&resource->resource_lock);
-
if (!resource->ref_count) {
ACPI_DEBUG_PRINT((ACPI_DB_INFO,
"Power resource [%s] already off",
resource->name));
- goto unlock;
+ return 0;
}
if (--resource->ref_count) {
@@ -335,10 +339,16 @@ static int acpi_power_off(struct acpi_power_resource *resource)
if (result)
resource->ref_count++;
}
+ return result;
+}
- unlock:
- mutex_unlock(&resource->resource_lock);
+static int acpi_power_off(struct acpi_power_resource *resource)
+{
+ int result;
+ mutex_lock(&resource->resource_lock);
+ result = acpi_power_off_unlocked(resource);
+ mutex_unlock(&resource->resource_lock);
return result;
}
@@ -521,18 +531,35 @@ void acpi_power_add_remove_device(struct acpi_device *adev, bool add)
}
}
-int acpi_power_min_system_level(struct list_head *list)
+int acpi_power_wakeup_list_init(struct list_head *list, int *system_level_p)
{
struct acpi_power_resource_entry *entry;
int system_level = 5;
list_for_each_entry(entry, list, node) {
struct acpi_power_resource *resource = entry->resource;
+ acpi_handle handle = resource->device.handle;
+ int result;
+ int state;
+ mutex_lock(&resource->resource_lock);
+
+ result = acpi_power_get_state(handle, &state);
+ if (result) {
+ mutex_unlock(&resource->resource_lock);
+ return result;
+ }
+ if (state == ACPI_POWER_RESOURCE_STATE_ON) {
+ resource->ref_count++;
+ resource->wakeup_enabled = true;
+ }
if (system_level > resource->system_level)
system_level = resource->system_level;
+
+ mutex_unlock(&resource->resource_lock);
}
- return system_level;
+ *system_level_p = system_level;
+ return 0;
}
/* --------------------------------------------------------------------------
@@ -610,6 +637,7 @@ int acpi_device_sleep_wake(struct acpi_device *dev,
*/
int acpi_enable_wakeup_device_power(struct acpi_device *dev, int sleep_state)
{
+ struct acpi_power_resource_entry *entry;
int err = 0;
if (!dev || !dev->wakeup.flags.valid)
@@ -620,17 +648,31 @@ int acpi_enable_wakeup_device_power(struct acpi_device *dev, int sleep_state)
if (dev->wakeup.prepare_count++)
goto out;
- err = acpi_power_on_list(&dev->wakeup.resources);
- if (err) {
- dev_err(&dev->dev, "Cannot turn wakeup power resources on\n");
- dev->wakeup.flags.valid = 0;
- } else {
- /*
- * Passing 3 as the third argument below means the device may be
- * put into arbitrary power state afterward.
- */
- err = acpi_device_sleep_wake(dev, 1, sleep_state, 3);
+ list_for_each_entry(entry, &dev->wakeup.resources, node) {
+ struct acpi_power_resource *resource = entry->resource;
+
+ mutex_lock(&resource->resource_lock);
+
+ if (!resource->wakeup_enabled) {
+ err = acpi_power_on_unlocked(resource);
+ if (!err)
+ resource->wakeup_enabled = true;
+ }
+
+ mutex_unlock(&resource->resource_lock);
+
+ if (err) {
+ dev_err(&dev->dev,
+ "Cannot turn wakeup power resources on\n");
+ dev->wakeup.flags.valid = 0;
+ goto out;
+ }
}
+ /*
+ * Passing 3 as the third argument below means the device may be
+ * put into arbitrary power state afterward.
+ */
+ err = acpi_device_sleep_wake(dev, 1, sleep_state, 3);
if (err)
dev->wakeup.prepare_count = 0;
@@ -647,6 +689,7 @@ int acpi_enable_wakeup_device_power(struct acpi_device *dev, int sleep_state)
*/
int acpi_disable_wakeup_device_power(struct acpi_device *dev)
{
+ struct acpi_power_resource_entry *entry;
int err = 0;
if (!dev || !dev->wakeup.flags.valid)
@@ -668,10 +711,25 @@ int acpi_disable_wakeup_device_power(struct acpi_device *dev)
if (err)
goto out;
- err = acpi_power_off_list(&dev->wakeup.resources);
- if (err) {
- dev_err(&dev->dev, "Cannot turn wakeup power resources off\n");
- dev->wakeup.flags.valid = 0;
+ list_for_each_entry(entry, &dev->wakeup.resources, node) {
+ struct acpi_power_resource *resource = entry->resource;
+
+ mutex_lock(&resource->resource_lock);
+
+ if (resource->wakeup_enabled) {
+ err = acpi_power_off_unlocked(resource);
+ if (!err)
+ resource->wakeup_enabled = false;
+ }
+
+ mutex_unlock(&resource->resource_lock);
+
+ if (err) {
+ dev_err(&dev->dev,
+ "Cannot turn wakeup power resources off\n");
+ dev->wakeup.flags.valid = 0;
+ break;
+ }
}
out:
diff --git a/drivers/acpi/processor_driver.c b/drivers/acpi/processor_driver.c
index cbf1f122666b..df34bd04ae62 100644
--- a/drivers/acpi/processor_driver.c
+++ b/drivers/acpi/processor_driver.c
@@ -45,6 +45,7 @@
#include <linux/cpuidle.h>
#include <linux/slab.h>
#include <linux/acpi.h>
+#include <linux/memory_hotplug.h>
#include <asm/io.h>
#include <asm/cpu.h>
@@ -641,6 +642,7 @@ static int acpi_processor_remove(struct acpi_device *device)
per_cpu(processors, pr->id) = NULL;
per_cpu(processor_device_array, pr->id) = NULL;
+ try_offline_node(cpu_to_node(pr->id));
free:
free_cpumask_var(pr->throttling.shared_cpu_map);
diff --git a/drivers/acpi/scan.c b/drivers/acpi/scan.c
index daee7497efd3..5e7e991717d7 100644
--- a/drivers/acpi/scan.c
+++ b/drivers/acpi/scan.c
@@ -1002,7 +1002,14 @@ static int acpi_bus_extract_wakeup_device_power_package(acpi_handle handle,
if (!list_empty(&wakeup->resources)) {
int sleep_state;
- sleep_state = acpi_power_min_system_level(&wakeup->resources);
+ err = acpi_power_wakeup_list_init(&wakeup->resources,
+ &sleep_state);
+ if (err) {
+ acpi_handle_warn(handle, "Retrieving current states "
+ "of wakeup power resources failed\n");
+ acpi_power_resources_list_free(&wakeup->resources);
+ goto out;
+ }
if (sleep_state < wakeup->sleep_state) {
acpi_handle_warn(handle, "Overriding _PRW sleep state "
"(S%d) by S%d from power resources\n",
@@ -1783,6 +1790,7 @@ int __init acpi_scan_init(void)
acpi_platform_init();
acpi_csrt_init();
acpi_container_init();
+ acpi_pci_slot_init();
mutex_lock(&acpi_scan_lock);
/*
@@ -1804,6 +1812,8 @@ int __init acpi_scan_init(void)
acpi_update_all_gpes();
+ acpi_pci_root_hp_init();
+
out:
mutex_unlock(&acpi_scan_lock);
return result;
diff --git a/drivers/ata/Kconfig b/drivers/ata/Kconfig
index cdadce23e66d..3e751b74615e 100644
--- a/drivers/ata/Kconfig
+++ b/drivers/ata/Kconfig
@@ -14,7 +14,7 @@ menuconfig ATA
tristate "Serial ATA and Parallel ATA drivers"
depends on HAS_IOMEM
depends on BLOCK
- depends on !(M32R || M68K) || BROKEN
+ depends on !(M32R || M68K || S390) || BROKEN
select SCSI
---help---
If you want to use a ATA hard disk, ATA tape drive, ATA CD-ROM or
@@ -58,6 +58,19 @@ config ATA_ACPI
You can disable this at kernel boot time by using the
option libata.noacpi=1
+config SATA_ZPODD
+ bool "SATA Zero Power ODD Support"
+ depends on ATA_ACPI
+ default n
+ help
+ This option adds support for SATA ZPODD. It requires both
+ ODD and the platform support, and if enabled, will automatically
+ power on/off the ODD when certain condition is satisfied. This
+ does not impact user's experience of the ODD, only power is saved
+ when ODD is not in use(i.e. no disc inside).
+
+ If unsure, say N.
+
config SATA_PMP
bool "SATA Port Multiplier support"
default y
@@ -247,6 +260,14 @@ config SATA_PROMISE
If unsure, say N.
+config SATA_RCAR
+ tristate "Renesas R-Car SATA support"
+ depends on ARCH_SHMOBILE && ARCH_R8A7779
+ help
+ This option enables support for Renesas R-Car Serial ATA.
+
+ If unsure, say N.
+
config SATA_SIL
tristate "Silicon Image SATA support"
depends on PCI
diff --git a/drivers/ata/Makefile b/drivers/ata/Makefile
index 9329dafba91b..c04d0fd038a3 100644
--- a/drivers/ata/Makefile
+++ b/drivers/ata/Makefile
@@ -23,6 +23,7 @@ obj-$(CONFIG_ATA_PIIX) += ata_piix.o
obj-$(CONFIG_SATA_MV) += sata_mv.o
obj-$(CONFIG_SATA_NV) += sata_nv.o
obj-$(CONFIG_SATA_PROMISE) += sata_promise.o
+obj-$(CONFIG_SATA_RCAR) += sata_rcar.o
obj-$(CONFIG_SATA_SIL) += sata_sil.o
obj-$(CONFIG_SATA_SIS) += sata_sis.o
obj-$(CONFIG_SATA_SVW) += sata_svw.o
@@ -107,3 +108,4 @@ libata-y := libata-core.o libata-scsi.o libata-eh.o libata-transport.o
libata-$(CONFIG_ATA_SFF) += libata-sff.o
libata-$(CONFIG_SATA_PMP) += libata-pmp.o
libata-$(CONFIG_ATA_ACPI) += libata-acpi.o
+libata-$(CONFIG_SATA_ZPODD) += libata-zpodd.o
diff --git a/drivers/ata/ahci.c b/drivers/ata/ahci.c
index 495aeed26779..a99112cfd8b1 100644
--- a/drivers/ata/ahci.c
+++ b/drivers/ata/ahci.c
@@ -265,6 +265,30 @@ static const struct pci_device_id ahci_pci_tbl[] = {
{ PCI_VDEVICE(INTEL, 0x9c07), board_ahci }, /* Lynx Point-LP RAID */
{ PCI_VDEVICE(INTEL, 0x9c0e), board_ahci }, /* Lynx Point-LP RAID */
{ PCI_VDEVICE(INTEL, 0x9c0f), board_ahci }, /* Lynx Point-LP RAID */
+ { PCI_VDEVICE(INTEL, 0x1f22), board_ahci }, /* Avoton AHCI */
+ { PCI_VDEVICE(INTEL, 0x1f23), board_ahci }, /* Avoton AHCI */
+ { PCI_VDEVICE(INTEL, 0x1f24), board_ahci }, /* Avoton RAID */
+ { PCI_VDEVICE(INTEL, 0x1f25), board_ahci }, /* Avoton RAID */
+ { PCI_VDEVICE(INTEL, 0x1f26), board_ahci }, /* Avoton RAID */
+ { PCI_VDEVICE(INTEL, 0x1f27), board_ahci }, /* Avoton RAID */
+ { PCI_VDEVICE(INTEL, 0x1f2e), board_ahci }, /* Avoton RAID */
+ { PCI_VDEVICE(INTEL, 0x1f2f), board_ahci }, /* Avoton RAID */
+ { PCI_VDEVICE(INTEL, 0x1f32), board_ahci }, /* Avoton AHCI */
+ { PCI_VDEVICE(INTEL, 0x1f33), board_ahci }, /* Avoton AHCI */
+ { PCI_VDEVICE(INTEL, 0x1f34), board_ahci }, /* Avoton RAID */
+ { PCI_VDEVICE(INTEL, 0x1f35), board_ahci }, /* Avoton RAID */
+ { PCI_VDEVICE(INTEL, 0x1f36), board_ahci }, /* Avoton RAID */
+ { PCI_VDEVICE(INTEL, 0x1f37), board_ahci }, /* Avoton RAID */
+ { PCI_VDEVICE(INTEL, 0x1f3e), board_ahci }, /* Avoton RAID */
+ { PCI_VDEVICE(INTEL, 0x1f3f), board_ahci }, /* Avoton RAID */
+ { PCI_VDEVICE(INTEL, 0x8d02), board_ahci }, /* Wellsburg AHCI */
+ { PCI_VDEVICE(INTEL, 0x8d04), board_ahci }, /* Wellsburg RAID */
+ { PCI_VDEVICE(INTEL, 0x8d06), board_ahci }, /* Wellsburg RAID */
+ { PCI_VDEVICE(INTEL, 0x8d0e), board_ahci }, /* Wellsburg RAID */
+ { PCI_VDEVICE(INTEL, 0x8d62), board_ahci }, /* Wellsburg AHCI */
+ { PCI_VDEVICE(INTEL, 0x8d64), board_ahci }, /* Wellsburg RAID */
+ { PCI_VDEVICE(INTEL, 0x8d66), board_ahci }, /* Wellsburg RAID */
+ { PCI_VDEVICE(INTEL, 0x8d6e), board_ahci }, /* Wellsburg RAID */
/* JMicron 360/1/3/5/6, match class to avoid IDE function */
{ PCI_VENDOR_ID_JMICRON, PCI_ANY_ID, PCI_ANY_ID, PCI_ANY_ID,
diff --git a/drivers/ata/ata_piix.c b/drivers/ata/ata_piix.c
index 174eca609b42..d2ba439cfe54 100644
--- a/drivers/ata/ata_piix.c
+++ b/drivers/ata/ata_piix.c
@@ -317,6 +317,23 @@ static const struct pci_device_id piix_pci_tbl[] = {
{ 0x8086, 0x9c09, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_2port_sata },
/* SATA Controller IDE (DH89xxCC) */
{ 0x8086, 0x2326, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_2port_sata },
+ /* SATA Controller IDE (Avoton) */
+ { 0x8086, 0x1f20, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_sata_snb },
+ /* SATA Controller IDE (Avoton) */
+ { 0x8086, 0x1f21, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_sata_snb },
+ /* SATA Controller IDE (Avoton) */
+ { 0x8086, 0x1f30, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_2port_sata },
+ /* SATA Controller IDE (Avoton) */
+ { 0x8086, 0x1f31, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_2port_sata },
+ /* SATA Controller IDE (Wellsburg) */
+ { 0x8086, 0x8d00, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_sata_snb },
+ /* SATA Controller IDE (Wellsburg) */
+ { 0x8086, 0x8d08, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_2port_sata },
+ /* SATA Controller IDE (Wellsburg) */
+ { 0x8086, 0x8d60, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_sata_snb },
+ /* SATA Controller IDE (Wellsburg) */
+ { 0x8086, 0x8d68, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_2port_sata },
+
{ } /* terminate list */
};
diff --git a/drivers/ata/libata-acpi.c b/drivers/ata/libata-acpi.c
index 6fc67f7efb22..0ea1018280bd 100644
--- a/drivers/ata/libata-acpi.c
+++ b/drivers/ata/libata-acpi.c
@@ -17,6 +17,7 @@
#include <linux/pci.h>
#include <linux/slab.h>
#include <linux/pm_runtime.h>
+#include <linux/pm_qos.h>
#include <scsi/scsi_device.h>
#include "libata.h"
@@ -835,50 +836,95 @@ void ata_acpi_on_resume(struct ata_port *ap)
}
}
-/**
- * ata_acpi_set_state - set the port power state
- * @ap: target ATA port
- * @state: state, on/off
- *
- * This function executes the _PS0/_PS3 ACPI method to set the power state.
- * ACPI spec requires _PS0 when IDE power on and _PS3 when power off
- */
-void ata_acpi_set_state(struct ata_port *ap, pm_message_t state)
+static int ata_acpi_choose_suspend_state(struct ata_device *dev, bool runtime)
+{
+ int d_max_in = ACPI_STATE_D3_COLD;
+ if (!runtime)
+ goto out;
+
+ /*
+ * For ATAPI, runtime D3 cold is only allowed
+ * for ZPODD in zero power ready state
+ */
+ if (dev->class == ATA_DEV_ATAPI &&
+ !(zpodd_dev_enabled(dev) && zpodd_zpready(dev)))
+ d_max_in = ACPI_STATE_D3_HOT;
+
+out:
+ return acpi_pm_device_sleep_state(&dev->sdev->sdev_gendev,
+ NULL, d_max_in);
+}
+
+static void sata_acpi_set_state(struct ata_port *ap, pm_message_t state)
{
+ bool runtime = PMSG_IS_AUTO(state);
struct ata_device *dev;
acpi_handle handle;
int acpi_state;
- /* channel first and then drives for power on and vica versa
- for power off */
- handle = ata_ap_acpi_handle(ap);
- if (handle && state.event == PM_EVENT_ON)
- acpi_bus_set_power(handle, ACPI_STATE_D0);
-
ata_for_each_dev(dev, &ap->link, ENABLED) {
handle = ata_dev_acpi_handle(dev);
if (!handle)
continue;
- if (state.event != PM_EVENT_ON) {
- acpi_state = acpi_pm_device_sleep_state(
- &dev->sdev->sdev_gendev, NULL, ACPI_STATE_D3);
- if (acpi_state > 0)
- acpi_bus_set_power(handle, acpi_state);
- /* TBD: need to check if it's runtime pm request */
- acpi_pm_device_run_wake(
- &dev->sdev->sdev_gendev, true);
+ if (!(state.event & PM_EVENT_RESUME)) {
+ acpi_state = ata_acpi_choose_suspend_state(dev, runtime);
+ if (acpi_state == ACPI_STATE_D0)
+ continue;
+ if (runtime && zpodd_dev_enabled(dev) &&
+ acpi_state == ACPI_STATE_D3_COLD)
+ zpodd_enable_run_wake(dev);
+ acpi_bus_set_power(handle, acpi_state);
} else {
- /* Ditto */
- acpi_pm_device_run_wake(
- &dev->sdev->sdev_gendev, false);
+ if (runtime && zpodd_dev_enabled(dev))
+ zpodd_disable_run_wake(dev);
acpi_bus_set_power(handle, ACPI_STATE_D0);
}
}
+}
- handle = ata_ap_acpi_handle(ap);
- if (handle && state.event != PM_EVENT_ON)
- acpi_bus_set_power(handle, ACPI_STATE_D3);
+/* ACPI spec requires _PS0 when IDE power on and _PS3 when power off */
+static void pata_acpi_set_state(struct ata_port *ap, pm_message_t state)
+{
+ struct ata_device *dev;
+ acpi_handle port_handle;
+
+ port_handle = ata_ap_acpi_handle(ap);
+ if (!port_handle)
+ return;
+
+ /* channel first and then drives for power on and vica versa
+ for power off */
+ if (state.event & PM_EVENT_RESUME)
+ acpi_bus_set_power(port_handle, ACPI_STATE_D0);
+
+ ata_for_each_dev(dev, &ap->link, ENABLED) {
+ acpi_handle dev_handle = ata_dev_acpi_handle(dev);
+ if (!dev_handle)
+ continue;
+
+ acpi_bus_set_power(dev_handle, state.event & PM_EVENT_RESUME ?
+ ACPI_STATE_D0 : ACPI_STATE_D3);
+ }
+
+ if (!(state.event & PM_EVENT_RESUME))
+ acpi_bus_set_power(port_handle, ACPI_STATE_D3);
+}
+
+/**
+ * ata_acpi_set_state - set the port power state
+ * @ap: target ATA port
+ * @state: state, on/off
+ *
+ * This function sets a proper ACPI D state for the device on
+ * system and runtime PM operations.
+ */
+void ata_acpi_set_state(struct ata_port *ap, pm_message_t state)
+{
+ if (ap->flags & ATA_FLAG_ACPI_SATA)
+ sata_acpi_set_state(ap, state);
+ else
+ pata_acpi_set_state(ap, state);
}
/**
@@ -974,57 +1020,6 @@ void ata_acpi_on_disable(struct ata_device *dev)
ata_acpi_clear_gtf(dev);
}
-static void ata_acpi_wake_dev(acpi_handle handle, u32 event, void *context)
-{
- struct ata_device *ata_dev = context;
-
- if (event == ACPI_NOTIFY_DEVICE_WAKE && ata_dev &&
- pm_runtime_suspended(&ata_dev->sdev->sdev_gendev))
- scsi_autopm_get_device(ata_dev->sdev);
-}
-
-static void ata_acpi_add_pm_notifier(struct ata_device *dev)
-{
- struct acpi_device *acpi_dev;
- acpi_handle handle;
- acpi_status status;
-
- handle = ata_dev_acpi_handle(dev);
- if (!handle)
- return;
-
- status = acpi_bus_get_device(handle, &acpi_dev);
- if (ACPI_FAILURE(status))
- return;
-
- if (dev->sdev->can_power_off) {
- acpi_install_notify_handler(handle, ACPI_SYSTEM_NOTIFY,
- ata_acpi_wake_dev, dev);
- device_set_run_wake(&dev->sdev->sdev_gendev, true);
- }
-}
-
-static void ata_acpi_remove_pm_notifier(struct ata_device *dev)
-{
- struct acpi_device *acpi_dev;
- acpi_handle handle;
- acpi_status status;
-
- handle = ata_dev_acpi_handle(dev);
- if (!handle)
- return;
-
- status = acpi_bus_get_device(handle, &acpi_dev);
- if (ACPI_FAILURE(status))
- return;
-
- if (dev->sdev->can_power_off) {
- device_set_run_wake(&dev->sdev->sdev_gendev, false);
- acpi_remove_notify_handler(handle, ACPI_SYSTEM_NOTIFY,
- ata_acpi_wake_dev);
- }
-}
-
static void ata_acpi_register_power_resource(struct ata_device *dev)
{
struct scsi_device *sdev = dev->sdev;
@@ -1047,13 +1042,13 @@ static void ata_acpi_unregister_power_resource(struct ata_device *dev)
void ata_acpi_bind(struct ata_device *dev)
{
- ata_acpi_add_pm_notifier(dev);
ata_acpi_register_power_resource(dev);
+ if (zpodd_dev_enabled(dev))
+ dev_pm_qos_expose_flags(&dev->sdev->sdev_gendev, 0);
}
void ata_acpi_unbind(struct ata_device *dev)
{
- ata_acpi_remove_pm_notifier(dev);
ata_acpi_unregister_power_resource(dev);
}
@@ -1095,9 +1090,6 @@ static int ata_acpi_bind_device(struct ata_port *ap, struct scsi_device *sdev,
acpi_handle *handle)
{
struct ata_device *ata_dev;
- acpi_status status;
- struct acpi_device *acpi_dev;
- struct acpi_device_power_state *states;
if (ap->flags & ATA_FLAG_ACPI_SATA) {
if (!sata_pmp_attached(ap))
@@ -1114,21 +1106,6 @@ static int ata_acpi_bind_device(struct ata_port *ap, struct scsi_device *sdev,
if (!*handle)
return -ENODEV;
- status = acpi_bus_get_device(*handle, &acpi_dev);
- if (ACPI_FAILURE(status))
- return 0;
-
- /*
- * If firmware has _PS3 or _PR3 for this device,
- * and this ata ODD device support device attention,
- * it means this device can be powered off
- */
- states = acpi_dev->power.states;
- if ((states[ACPI_STATE_D3_HOT].flags.valid ||
- states[ACPI_STATE_D3_COLD].flags.explicit_set) &&
- ata_dev->flags & ATA_DFLAG_DA)
- sdev->can_power_off = 1;
-
return 0;
}
diff --git a/drivers/ata/libata-core.c b/drivers/ata/libata-core.c
index 46cd3f4c6aaa..497adea1f0d6 100644
--- a/drivers/ata/libata-core.c
+++ b/drivers/ata/libata-core.c
@@ -2400,8 +2400,10 @@ int ata_dev_configure(struct ata_device *dev)
dma_dir_string = ", DMADIR";
}
- if (ata_id_has_da(dev->id))
+ if (ata_id_has_da(dev->id)) {
dev->flags |= ATA_DFLAG_DA;
+ zpodd_init(dev);
+ }
/* print device info to dmesg */
if (ata_msg_drv(ap) && print_info)
@@ -5331,9 +5333,6 @@ static int ata_port_request_pm(struct ata_port *ap, pm_message_t mesg,
static int __ata_port_suspend_common(struct ata_port *ap, pm_message_t mesg, int *async)
{
- unsigned int ehi_flags = ATA_EHI_QUIET;
- int rc;
-
/*
* On some hardware, device fails to respond after spun down
* for suspend. As the device won't be used before being
@@ -5342,11 +5341,9 @@ static int __ata_port_suspend_common(struct ata_port *ap, pm_message_t mesg, int
*
* http://thread.gmane.org/gmane.linux.ide/46764
*/
- if (mesg.event == PM_EVENT_SUSPEND)
- ehi_flags |= ATA_EHI_NO_AUTOPSY | ATA_EHI_NO_RECOVERY;
-
- rc = ata_port_request_pm(ap, mesg, 0, ehi_flags, async);
- return rc;
+ unsigned int ehi_flags = ATA_EHI_QUIET | ATA_EHI_NO_AUTOPSY |
+ ATA_EHI_NO_RECOVERY;
+ return ata_port_request_pm(ap, mesg, 0, ehi_flags, async);
}
static int ata_port_suspend_common(struct device *dev, pm_message_t mesg)
@@ -5367,40 +5364,38 @@ static int ata_port_suspend(struct device *dev)
static int ata_port_do_freeze(struct device *dev)
{
if (pm_runtime_suspended(dev))
- pm_runtime_resume(dev);
+ return 0;
return ata_port_suspend_common(dev, PMSG_FREEZE);
}
static int ata_port_poweroff(struct device *dev)
{
- if (pm_runtime_suspended(dev))
- return 0;
-
return ata_port_suspend_common(dev, PMSG_HIBERNATE);
}
-static int __ata_port_resume_common(struct ata_port *ap, int *async)
+static int __ata_port_resume_common(struct ata_port *ap, pm_message_t mesg,
+ int *async)
{
int rc;
- rc = ata_port_request_pm(ap, PMSG_ON, ATA_EH_RESET,
+ rc = ata_port_request_pm(ap, mesg, ATA_EH_RESET,
ATA_EHI_NO_AUTOPSY | ATA_EHI_QUIET, async);
return rc;
}
-static int ata_port_resume_common(struct device *dev)
+static int ata_port_resume_common(struct device *dev, pm_message_t mesg)
{
struct ata_port *ap = to_ata_port(dev);
- return __ata_port_resume_common(ap, NULL);
+ return __ata_port_resume_common(ap, mesg, NULL);
}
static int ata_port_resume(struct device *dev)
{
int rc;
- rc = ata_port_resume_common(dev);
+ rc = ata_port_resume_common(dev, PMSG_RESUME);
if (!rc) {
pm_runtime_disable(dev);
pm_runtime_set_active(dev);
@@ -5410,11 +5405,40 @@ static int ata_port_resume(struct device *dev)
return rc;
}
+/*
+ * For ODDs, the upper layer will poll for media change every few seconds,
+ * which will make it enter and leave suspend state every few seconds. And
+ * as each suspend will cause a hard/soft reset, the gain of runtime suspend
+ * is very little and the ODD may malfunction after constantly being reset.
+ * So the idle callback here will not proceed to suspend if a non-ZPODD capable
+ * ODD is attached to the port.
+ */
static int ata_port_runtime_idle(struct device *dev)
{
+ struct ata_port *ap = to_ata_port(dev);
+ struct ata_link *link;
+ struct ata_device *adev;
+
+ ata_for_each_link(link, ap, HOST_FIRST) {
+ ata_for_each_dev(adev, link, ENABLED)
+ if (adev->class == ATA_DEV_ATAPI &&
+ !zpodd_dev_enabled(adev))
+ return -EBUSY;
+ }
+
return pm_runtime_suspend(dev);
}
+static int ata_port_runtime_suspend(struct device *dev)
+{
+ return ata_port_suspend_common(dev, PMSG_AUTO_SUSPEND);
+}
+
+static int ata_port_runtime_resume(struct device *dev)
+{
+ return ata_port_resume_common(dev, PMSG_AUTO_RESUME);
+}
+
static const struct dev_pm_ops ata_port_pm_ops = {
.suspend = ata_port_suspend,
.resume = ata_port_resume,
@@ -5423,8 +5447,8 @@ static const struct dev_pm_ops ata_port_pm_ops = {
.poweroff = ata_port_poweroff,
.restore = ata_port_resume,
- .runtime_suspend = ata_port_suspend,
- .runtime_resume = ata_port_resume_common,
+ .runtime_suspend = ata_port_runtime_suspend,
+ .runtime_resume = ata_port_runtime_resume,
.runtime_idle = ata_port_runtime_idle,
};
@@ -5441,7 +5465,7 @@ EXPORT_SYMBOL_GPL(ata_sas_port_async_suspend);
int ata_sas_port_async_resume(struct ata_port *ap, int *async)
{
- return __ata_port_resume_common(ap, async);
+ return __ata_port_resume_common(ap, PMSG_RESUME, async);
}
EXPORT_SYMBOL_GPL(ata_sas_port_async_resume);
diff --git a/drivers/ata/libata-eh.c b/drivers/ata/libata-eh.c
index bcf4437214f5..f9476fb3ac43 100644
--- a/drivers/ata/libata-eh.c
+++ b/drivers/ata/libata-eh.c
@@ -1591,7 +1591,7 @@ static int ata_eh_read_log_10h(struct ata_device *dev,
* RETURNS:
* 0 on success, AC_ERR_* mask on failure.
*/
-static unsigned int atapi_eh_tur(struct ata_device *dev, u8 *r_sense_key)
+unsigned int atapi_eh_tur(struct ata_device *dev, u8 *r_sense_key)
{
u8 cdb[ATAPI_CDB_LEN] = { TEST_UNIT_READY, 0, 0, 0, 0, 0 };
struct ata_taskfile tf;
@@ -1624,7 +1624,7 @@ static unsigned int atapi_eh_tur(struct ata_device *dev, u8 *r_sense_key)
* RETURNS:
* 0 on success, AC_ERR_* mask on failure
*/
-static unsigned int atapi_eh_request_sense(struct ata_device *dev,
+unsigned int atapi_eh_request_sense(struct ata_device *dev,
u8 *sense_buf, u8 dfl_sense_key)
{
u8 cdb[ATAPI_CDB_LEN] =
@@ -3857,6 +3857,8 @@ int ata_eh_recover(struct ata_port *ap, ata_prereset_fn_t prereset,
rc = atapi_eh_clear_ua(dev);
if (rc)
goto rest_fail;
+ if (zpodd_dev_enabled(dev))
+ zpodd_post_poweron(dev);
}
}
@@ -4022,11 +4024,12 @@ static void ata_eh_handle_port_suspend(struct ata_port *ap)
{
unsigned long flags;
int rc = 0;
+ struct ata_device *dev;
/* are we suspending? */
spin_lock_irqsave(ap->lock, flags);
if (!(ap->pflags & ATA_PFLAG_PM_PENDING) ||
- ap->pm_mesg.event == PM_EVENT_ON) {
+ ap->pm_mesg.event & PM_EVENT_RESUME) {
spin_unlock_irqrestore(ap->lock, flags);
return;
}
@@ -4034,6 +4037,18 @@ static void ata_eh_handle_port_suspend(struct ata_port *ap)
WARN_ON(ap->pflags & ATA_PFLAG_SUSPENDED);
+ /*
+ * If we have a ZPODD attached, check its zero
+ * power ready status before the port is frozen.
+ * Only needed for runtime suspend.
+ */
+ if (PMSG_IS_AUTO(ap->pm_mesg)) {
+ ata_for_each_dev(dev, &ap->link, ENABLED) {
+ if (zpodd_dev_enabled(dev))
+ zpodd_on_suspend(dev);
+ }
+ }
+
/* tell ACPI we're suspending */
rc = ata_acpi_on_suspend(ap);
if (rc)
@@ -4045,7 +4060,7 @@ static void ata_eh_handle_port_suspend(struct ata_port *ap)
if (ap->ops->port_suspend)
rc = ap->ops->port_suspend(ap, ap->pm_mesg);
- ata_acpi_set_state(ap, PMSG_SUSPEND);
+ ata_acpi_set_state(ap, ap->pm_mesg);
out:
/* report result */
spin_lock_irqsave(ap->lock, flags);
@@ -4085,7 +4100,7 @@ static void ata_eh_handle_port_resume(struct ata_port *ap)
/* are we resuming? */
spin_lock_irqsave(ap->lock, flags);
if (!(ap->pflags & ATA_PFLAG_PM_PENDING) ||
- ap->pm_mesg.event != PM_EVENT_ON) {
+ !(ap->pm_mesg.event & PM_EVENT_RESUME)) {
spin_unlock_irqrestore(ap->lock, flags);
return;
}
@@ -4104,7 +4119,7 @@ static void ata_eh_handle_port_resume(struct ata_port *ap)
ata_for_each_dev(dev, link, ALL)
ata_ering_clear(&dev->ering);
- ata_acpi_set_state(ap, PMSG_ON);
+ ata_acpi_set_state(ap, ap->pm_mesg);
if (ap->ops->port_resume)
rc = ap->ops->port_resume(ap);
diff --git a/drivers/ata/libata-scsi.c b/drivers/ata/libata-scsi.c
index 7c337e754dab..318b41358187 100644
--- a/drivers/ata/libata-scsi.c
+++ b/drivers/ata/libata-scsi.c
@@ -933,7 +933,11 @@ static void ata_to_sense_error(unsigned id, u8 drv_stat, u8 drv_err, u8 *sk,
* block specified for the ATA pass through commands. Regardless
* of whether the command errored or not, return a sense
* block. Copy all controller registers into the sense
- * block. Clear sense key, ASC & ASCQ if there is no error.
+ * block. If there was no error, we get the request from an ATA
+ * passthrough command, so we use the following sense data:
+ * sk = RECOVERED ERROR
+ * asc,ascq = ATA PASS-THROUGH INFORMATION AVAILABLE
+ *
*
* LOCKING:
* None.
@@ -959,6 +963,10 @@ static void ata_gen_passthru_sense(struct ata_queued_cmd *qc)
ata_to_sense_error(qc->ap->print_id, tf->command, tf->feature,
&sb[1], &sb[2], &sb[3], verbose);
sb[1] &= 0x0f;
+ } else {
+ sb[1] = RECOVERED_ERROR;
+ sb[2] = 0;
+ sb[3] = 0x1D;
}
/*
@@ -1733,10 +1741,12 @@ static void ata_scsi_qc_complete(struct ata_queued_cmd *qc)
/* For ATA pass thru (SAT) commands, generate a sense block if
* user mandated it or if there's an error. Note that if we
- * generate because the user forced us to, a check condition
- * is generated and the ATA register values are returned
+ * generate because the user forced us to [CK_COND =1], a check
+ * condition is generated and the ATA register values are returned
* whether the command completed successfully or not. If there
- * was no error, SK, ASC and ASCQ will all be zero.
+ * was no error, we use the following sense data:
+ * sk = RECOVERED ERROR
+ * asc,ascq = ATA PASS-THROUGH INFORMATION AVAILABLE
*/
if (((cdb[0] == ATA_16) || (cdb[0] == ATA_12)) &&
((cdb[2] & 0x20) || need_sense)) {
@@ -3755,6 +3765,8 @@ static void ata_scsi_remove_dev(struct ata_device *dev)
mutex_lock(&ap->scsi_host->scan_mutex);
spin_lock_irqsave(ap->lock, flags);
+ if (zpodd_dev_enabled(dev))
+ zpodd_exit(dev);
ata_acpi_unbind(dev);
/* clearing dev->sdev is protected by host lock */
diff --git a/drivers/ata/libata-zpodd.c b/drivers/ata/libata-zpodd.c
new file mode 100644
index 000000000000..90b159b740b3
--- /dev/null
+++ b/drivers/ata/libata-zpodd.c
@@ -0,0 +1,299 @@
+#include <linux/libata.h>
+#include <linux/cdrom.h>
+#include <linux/pm_runtime.h>
+#include <linux/module.h>
+#include <scsi/scsi_device.h>
+
+#include "libata.h"
+
+static int zpodd_poweroff_delay = 30; /* 30 seconds for power off delay */
+module_param(zpodd_poweroff_delay, int, 0644);
+MODULE_PARM_DESC(zpodd_poweroff_delay, "Poweroff delay for ZPODD in seconds");
+
+enum odd_mech_type {
+ ODD_MECH_TYPE_SLOT,
+ ODD_MECH_TYPE_DRAWER,
+ ODD_MECH_TYPE_UNSUPPORTED,
+};
+
+struct zpodd {
+ enum odd_mech_type mech_type; /* init during probe, RO afterwards */
+ struct ata_device *dev;
+
+ /* The following fields are synchronized by PM core. */
+ bool from_notify; /* resumed as a result of
+ * acpi wake notification */
+ bool zp_ready; /* ZP ready state */
+ unsigned long last_ready; /* last ZP ready timestamp */
+ bool zp_sampled; /* ZP ready state sampled */
+ bool powered_off; /* ODD is powered off
+ * during suspend */
+};
+
+static int eject_tray(struct ata_device *dev)
+{
+ struct ata_taskfile tf = {};
+ const char cdb[] = { GPCMD_START_STOP_UNIT,
+ 0, 0, 0,
+ 0x02, /* LoEj */
+ 0, 0, 0, 0, 0, 0, 0,
+ };
+
+ tf.flags = ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE;
+ tf.command = ATA_CMD_PACKET;
+ tf.protocol = ATAPI_PROT_NODATA;
+
+ return ata_exec_internal(dev, &tf, cdb, DMA_NONE, NULL, 0, 0);
+}
+
+/* Per the spec, only slot type and drawer type ODD can be supported */
+static enum odd_mech_type zpodd_get_mech_type(struct ata_device *dev)
+{
+ char buf[16];
+ unsigned int ret;
+ struct rm_feature_desc *desc = (void *)(buf + 8);
+ struct ata_taskfile tf = {};
+
+ char cdb[] = { GPCMD_GET_CONFIGURATION,
+ 2, /* only 1 feature descriptor requested */
+ 0, 3, /* 3, removable medium feature */
+ 0, 0, 0,/* reserved */
+ 0, sizeof(buf),
+ 0, 0, 0,
+ };
+
+ tf.flags = ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE;
+ tf.command = ATA_CMD_PACKET;
+ tf.protocol = ATAPI_PROT_PIO;
+ tf.lbam = sizeof(buf);
+
+ ret = ata_exec_internal(dev, &tf, cdb, DMA_FROM_DEVICE,
+ buf, sizeof(buf), 0);
+ if (ret)
+ return ODD_MECH_TYPE_UNSUPPORTED;
+
+ if (be16_to_cpu(desc->feature_code) != 3)
+ return ODD_MECH_TYPE_UNSUPPORTED;
+
+ if (desc->mech_type == 0 && desc->load == 0 && desc->eject == 1)
+ return ODD_MECH_TYPE_SLOT;
+ else if (desc->mech_type == 1 && desc->load == 0 && desc->eject == 1)
+ return ODD_MECH_TYPE_DRAWER;
+ else
+ return ODD_MECH_TYPE_UNSUPPORTED;
+}
+
+static bool odd_can_poweroff(struct ata_device *ata_dev)
+{
+ acpi_handle handle;
+ acpi_status status;
+ struct acpi_device *acpi_dev;
+
+ handle = ata_dev_acpi_handle(ata_dev);
+ if (!handle)
+ return false;
+
+ status = acpi_bus_get_device(handle, &acpi_dev);
+ if (ACPI_FAILURE(status))
+ return false;
+
+ return acpi_device_can_poweroff(acpi_dev);
+}
+
+/* Test if ODD is zero power ready by sense code */
+static bool zpready(struct ata_device *dev)
+{
+ u8 sense_key, *sense_buf;
+ unsigned int ret, asc, ascq, add_len;
+ struct zpodd *zpodd = dev->zpodd;
+
+ ret = atapi_eh_tur(dev, &sense_key);
+
+ if (!ret || sense_key != NOT_READY)
+ return false;
+
+ sense_buf = dev->link->ap->sector_buf;
+ ret = atapi_eh_request_sense(dev, sense_buf, sense_key);
+ if (ret)
+ return false;
+
+ /* sense valid */
+ if ((sense_buf[0] & 0x7f) != 0x70)
+ return false;
+
+ add_len = sense_buf[7];
+ /* has asc and ascq */
+ if (add_len < 6)
+ return false;
+
+ asc = sense_buf[12];
+ ascq = sense_buf[13];
+
+ if (zpodd->mech_type == ODD_MECH_TYPE_SLOT)
+ /* no media inside */
+ return asc == 0x3a;
+ else
+ /* no media inside and door closed */
+ return asc == 0x3a && ascq == 0x01;
+}
+
+/*
+ * Update the zpodd->zp_ready field. This field will only be set
+ * if the ODD has stayed in ZP ready state for zpodd_poweroff_delay
+ * time, and will be used to decide if power off is allowed. If it
+ * is set, it will be cleared during resume from powered off state.
+ */
+void zpodd_on_suspend(struct ata_device *dev)
+{
+ struct zpodd *zpodd = dev->zpodd;
+ unsigned long expires;
+
+ if (!zpready(dev)) {
+ zpodd->zp_sampled = false;
+ zpodd->zp_ready = false;
+ return;
+ }
+
+ if (!zpodd->zp_sampled) {
+ zpodd->zp_sampled = true;
+ zpodd->last_ready = jiffies;
+ return;
+ }
+
+ expires = zpodd->last_ready +
+ msecs_to_jiffies(zpodd_poweroff_delay * 1000);
+ if (time_before(jiffies, expires))
+ return;
+
+ zpodd->zp_ready = true;
+}
+
+bool zpodd_zpready(struct ata_device *dev)
+{
+ struct zpodd *zpodd = dev->zpodd;
+ return zpodd->zp_ready;
+}
+
+/*
+ * Enable runtime wake capability through ACPI and set the powered_off flag,
+ * this flag will be used during resume to decide what operations are needed
+ * to take.
+ *
+ * Also, media poll needs to be silenced, so that it doesn't bring the ODD
+ * back to full power state every few seconds.
+ */
+void zpodd_enable_run_wake(struct ata_device *dev)
+{
+ struct zpodd *zpodd = dev->zpodd;
+
+ sdev_disable_disk_events(dev->sdev);
+
+ zpodd->powered_off = true;
+ device_set_run_wake(&dev->sdev->sdev_gendev, true);
+ acpi_pm_device_run_wake(&dev->sdev->sdev_gendev, true);
+}
+
+/* Disable runtime wake capability if it is enabled */
+void zpodd_disable_run_wake(struct ata_device *dev)
+{
+ struct zpodd *zpodd = dev->zpodd;
+
+ if (zpodd->powered_off) {
+ acpi_pm_device_run_wake(&dev->sdev->sdev_gendev, false);
+ device_set_run_wake(&dev->sdev->sdev_gendev, false);
+ }
+}
+
+/*
+ * Post power on processing after the ODD has been recovered. If the
+ * ODD wasn't powered off during suspend, it doesn't do anything.
+ *
+ * For drawer type ODD, if it is powered on due to user pressed the
+ * eject button, the tray needs to be ejected. This can only be done
+ * after the ODD has been recovered, i.e. link is initialized and
+ * device is able to process NON_DATA PIO command, as eject needs to
+ * send command for the ODD to process.
+ *
+ * The from_notify flag set in wake notification handler function
+ * zpodd_wake_dev represents if power on is due to user's action.
+ *
+ * For both types of ODD, several fields need to be reset.
+ */
+void zpodd_post_poweron(struct ata_device *dev)
+{
+ struct zpodd *zpodd = dev->zpodd;
+
+ if (!zpodd->powered_off)
+ return;
+
+ zpodd->powered_off = false;
+
+ if (zpodd->from_notify) {
+ zpodd->from_notify = false;
+ if (zpodd->mech_type == ODD_MECH_TYPE_DRAWER)
+ eject_tray(dev);
+ }
+
+ zpodd->zp_sampled = false;
+ zpodd->zp_ready = false;
+
+ sdev_enable_disk_events(dev->sdev);
+}
+
+static void zpodd_wake_dev(acpi_handle handle, u32 event, void *context)
+{
+ struct ata_device *ata_dev = context;
+ struct zpodd *zpodd = ata_dev->zpodd;
+ struct device *dev = &ata_dev->sdev->sdev_gendev;
+
+ if (event == ACPI_NOTIFY_DEVICE_WAKE && pm_runtime_suspended(dev)) {
+ zpodd->from_notify = true;
+ pm_runtime_resume(dev);
+ }
+}
+
+static void ata_acpi_add_pm_notifier(struct ata_device *dev)
+{
+ acpi_handle handle = ata_dev_acpi_handle(dev);
+ acpi_install_notify_handler(handle, ACPI_SYSTEM_NOTIFY,
+ zpodd_wake_dev, dev);
+}
+
+static void ata_acpi_remove_pm_notifier(struct ata_device *dev)
+{
+ acpi_handle handle = DEVICE_ACPI_HANDLE(&dev->sdev->sdev_gendev);
+ acpi_remove_notify_handler(handle, ACPI_SYSTEM_NOTIFY, zpodd_wake_dev);
+}
+
+void zpodd_init(struct ata_device *dev)
+{
+ enum odd_mech_type mech_type;
+ struct zpodd *zpodd;
+
+ if (dev->zpodd)
+ return;
+
+ if (!odd_can_poweroff(dev))
+ return;
+
+ mech_type = zpodd_get_mech_type(dev);
+ if (mech_type == ODD_MECH_TYPE_UNSUPPORTED)
+ return;
+
+ zpodd = kzalloc(sizeof(struct zpodd), GFP_KERNEL);
+ if (!zpodd)
+ return;
+
+ zpodd->mech_type = mech_type;
+
+ ata_acpi_add_pm_notifier(dev);
+ zpodd->dev = dev;
+ dev->zpodd = zpodd;
+}
+
+void zpodd_exit(struct ata_device *dev)
+{
+ ata_acpi_remove_pm_notifier(dev);
+ kfree(dev->zpodd);
+ dev->zpodd = NULL;
+}
diff --git a/drivers/ata/libata.h b/drivers/ata/libata.h
index 7148a58020b9..c949dd311b2e 100644
--- a/drivers/ata/libata.h
+++ b/drivers/ata/libata.h
@@ -182,6 +182,9 @@ extern void ata_eh_finish(struct ata_port *ap);
extern int ata_ering_map(struct ata_ering *ering,
int (*map_fn)(struct ata_ering_entry *, void *),
void *arg);
+extern unsigned int atapi_eh_tur(struct ata_device *dev, u8 *r_sense_key);
+extern unsigned int atapi_eh_request_sense(struct ata_device *dev,
+ u8 *sense_buf, u8 dfl_sense_key);
/* libata-pmp.c */
#ifdef CONFIG_SATA_PMP
@@ -230,4 +233,28 @@ static inline void ata_sff_exit(void)
{ }
#endif /* CONFIG_ATA_SFF */
+/* libata-zpodd.c */
+#ifdef CONFIG_SATA_ZPODD
+void zpodd_init(struct ata_device *dev);
+void zpodd_exit(struct ata_device *dev);
+static inline bool zpodd_dev_enabled(struct ata_device *dev)
+{
+ return dev->zpodd != NULL;
+}
+void zpodd_on_suspend(struct ata_device *dev);
+bool zpodd_zpready(struct ata_device *dev);
+void zpodd_enable_run_wake(struct ata_device *dev);
+void zpodd_disable_run_wake(struct ata_device *dev);
+void zpodd_post_poweron(struct ata_device *dev);
+#else /* CONFIG_SATA_ZPODD */
+static inline void zpodd_init(struct ata_device *dev) {}
+static inline void zpodd_exit(struct ata_device *dev) {}
+static inline bool zpodd_dev_enabled(struct ata_device *dev) { return false; }
+static inline void zpodd_on_suspend(struct ata_device *dev) {}
+static inline bool zpodd_zpready(struct ata_device *dev) { return false; }
+static inline void zpodd_enable_run_wake(struct ata_device *dev) {}
+static inline void zpodd_disable_run_wake(struct ata_device *dev) {}
+static inline void zpodd_post_poweron(struct ata_device *dev) {}
+#endif /* CONFIG_SATA_ZPODD */
+
#endif /* __LIBATA_H__ */
diff --git a/drivers/ata/pata_mpc52xx.c b/drivers/ata/pata_mpc52xx.c
index 652f57e83484..3a8fb28b71f2 100644
--- a/drivers/ata/pata_mpc52xx.c
+++ b/drivers/ata/pata_mpc52xx.c
@@ -26,9 +26,9 @@
#include <asm/prom.h>
#include <asm/mpc52xx.h>
-#include <sysdev/bestcomm/bestcomm.h>
-#include <sysdev/bestcomm/bestcomm_priv.h>
-#include <sysdev/bestcomm/ata.h>
+#include <linux/fsl/bestcomm/bestcomm.h>
+#include <linux/fsl/bestcomm/bestcomm_priv.h>
+#include <linux/fsl/bestcomm/ata.h>
#define DRV_NAME "mpc52xx_ata"
diff --git a/drivers/ata/pata_samsung_cf.c b/drivers/ata/pata_samsung_cf.c
index 63ffb002ec67..70b0e01372b3 100644
--- a/drivers/ata/pata_samsung_cf.c
+++ b/drivers/ata/pata_samsung_cf.c
@@ -512,7 +512,7 @@ static int __init pata_s3c_probe(struct platform_device *pdev)
return -ENOMEM;
}
- info->clk = clk_get(&pdev->dev, "cfcon");
+ info->clk = devm_clk_get(&pdev->dev, "cfcon");
if (IS_ERR(info->clk)) {
dev_err(dev, "failed to get access to cf controller clock\n");
ret = PTR_ERR(info->clk);
@@ -589,7 +589,6 @@ static int __init pata_s3c_probe(struct platform_device *pdev)
stop_clk:
clk_disable(info->clk);
- clk_put(info->clk);
return ret;
}
@@ -601,7 +600,6 @@ static int __exit pata_s3c_remove(struct platform_device *pdev)
ata_host_detach(host);
clk_disable(info->clk);
- clk_put(info->clk);
return 0;
}
diff --git a/drivers/ata/sata_rcar.c b/drivers/ata/sata_rcar.c
new file mode 100644
index 000000000000..caf33f620c35
--- /dev/null
+++ b/drivers/ata/sata_rcar.c
@@ -0,0 +1,910 @@
+/*
+ * Renesas R-Car SATA driver
+ *
+ * Author: Vladimir Barinov <source@cogentembedded.com>
+ * Copyright (C) 2013 Cogent Embedded, Inc.
+ * Copyright (C) 2013 Renesas Solutions Corp.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/ata.h>
+#include <linux/libata.h>
+#include <linux/platform_device.h>
+#include <linux/clk.h>
+
+#define DRV_NAME "sata_rcar"
+
+/* SH-Navi2G/ATAPI-ATA compatible task registers */
+#define DATA_REG 0x100
+#define SDEVCON_REG 0x138
+
+/* SH-Navi2G/ATAPI module compatible control registers */
+#define ATAPI_CONTROL1_REG 0x180
+#define ATAPI_STATUS_REG 0x184
+#define ATAPI_INT_ENABLE_REG 0x188
+#define ATAPI_DTB_ADR_REG 0x198
+#define ATAPI_DMA_START_ADR_REG 0x19C
+#define ATAPI_DMA_TRANS_CNT_REG 0x1A0
+#define ATAPI_CONTROL2_REG 0x1A4
+#define ATAPI_SIG_ST_REG 0x1B0
+#define ATAPI_BYTE_SWAP_REG 0x1BC
+
+/* ATAPI control 1 register (ATAPI_CONTROL1) bits */
+#define ATAPI_CONTROL1_ISM BIT(16)
+#define ATAPI_CONTROL1_DTA32M BIT(11)
+#define ATAPI_CONTROL1_RESET BIT(7)
+#define ATAPI_CONTROL1_DESE BIT(3)
+#define ATAPI_CONTROL1_RW BIT(2)
+#define ATAPI_CONTROL1_STOP BIT(1)
+#define ATAPI_CONTROL1_START BIT(0)
+
+/* ATAPI status register (ATAPI_STATUS) bits */
+#define ATAPI_STATUS_SATAINT BIT(11)
+#define ATAPI_STATUS_DNEND BIT(6)
+#define ATAPI_STATUS_DEVTRM BIT(5)
+#define ATAPI_STATUS_DEVINT BIT(4)
+#define ATAPI_STATUS_ERR BIT(2)
+#define ATAPI_STATUS_NEND BIT(1)
+#define ATAPI_STATUS_ACT BIT(0)
+
+/* Interrupt enable register (ATAPI_INT_ENABLE) bits */
+#define ATAPI_INT_ENABLE_SATAINT BIT(11)
+#define ATAPI_INT_ENABLE_DNEND BIT(6)
+#define ATAPI_INT_ENABLE_DEVTRM BIT(5)
+#define ATAPI_INT_ENABLE_DEVINT BIT(4)
+#define ATAPI_INT_ENABLE_ERR BIT(2)
+#define ATAPI_INT_ENABLE_NEND BIT(1)
+#define ATAPI_INT_ENABLE_ACT BIT(0)
+
+/* Access control registers for physical layer control register */
+#define SATAPHYADDR_REG 0x200
+#define SATAPHYWDATA_REG 0x204
+#define SATAPHYACCEN_REG 0x208
+#define SATAPHYRESET_REG 0x20C
+#define SATAPHYRDATA_REG 0x210
+#define SATAPHYACK_REG 0x214
+
+/* Physical layer control address command register (SATAPHYADDR) bits */
+#define SATAPHYADDR_PHYRATEMODE BIT(10)
+#define SATAPHYADDR_PHYCMD_READ BIT(9)
+#define SATAPHYADDR_PHYCMD_WRITE BIT(8)
+
+/* Physical layer control enable register (SATAPHYACCEN) bits */
+#define SATAPHYACCEN_PHYLANE BIT(0)
+
+/* Physical layer control reset register (SATAPHYRESET) bits */
+#define SATAPHYRESET_PHYRST BIT(1)
+#define SATAPHYRESET_PHYSRES BIT(0)
+
+/* Physical layer control acknowledge register (SATAPHYACK) bits */
+#define SATAPHYACK_PHYACK BIT(0)
+
+/* Serial-ATA HOST control registers */
+#define BISTCONF_REG 0x102C
+#define SDATA_REG 0x1100
+#define SSDEVCON_REG 0x1204
+
+#define SCRSSTS_REG 0x1400
+#define SCRSERR_REG 0x1404
+#define SCRSCON_REG 0x1408
+#define SCRSACT_REG 0x140C
+
+#define SATAINTSTAT_REG 0x1508
+#define SATAINTMASK_REG 0x150C
+
+/* SATA INT status register (SATAINTSTAT) bits */
+#define SATAINTSTAT_SERR BIT(3)
+#define SATAINTSTAT_ATA BIT(0)
+
+/* SATA INT mask register (SATAINTSTAT) bits */
+#define SATAINTMASK_SERRMSK BIT(3)
+#define SATAINTMASK_ERRMSK BIT(2)
+#define SATAINTMASK_ERRCRTMSK BIT(1)
+#define SATAINTMASK_ATAMSK BIT(0)
+
+#define SATA_RCAR_INT_MASK (SATAINTMASK_SERRMSK | \
+ SATAINTMASK_ATAMSK)
+
+/* Physical Layer Control Registers */
+#define SATAPCTLR1_REG 0x43
+#define SATAPCTLR2_REG 0x52
+#define SATAPCTLR3_REG 0x5A
+#define SATAPCTLR4_REG 0x60
+
+/* Descriptor table word 0 bit (when DTA32M = 1) */
+#define SATA_RCAR_DTEND BIT(0)
+
+struct sata_rcar_priv {
+ void __iomem *base;
+ struct clk *clk;
+};
+
+static void sata_rcar_phy_initialize(struct sata_rcar_priv *priv)
+{
+ /* idle state */
+ iowrite32(0, priv->base + SATAPHYADDR_REG);
+ /* reset */
+ iowrite32(SATAPHYRESET_PHYRST, priv->base + SATAPHYRESET_REG);
+ udelay(10);
+ /* deassert reset */
+ iowrite32(0, priv->base + SATAPHYRESET_REG);
+}
+
+static void sata_rcar_phy_write(struct sata_rcar_priv *priv, u16 reg, u32 val,
+ int group)
+{
+ int timeout;
+
+ /* deassert reset */
+ iowrite32(0, priv->base + SATAPHYRESET_REG);
+ /* lane 1 */
+ iowrite32(SATAPHYACCEN_PHYLANE, priv->base + SATAPHYACCEN_REG);
+ /* write phy register value */
+ iowrite32(val, priv->base + SATAPHYWDATA_REG);
+ /* set register group */
+ if (group)
+ reg |= SATAPHYADDR_PHYRATEMODE;
+ /* write command */
+ iowrite32(SATAPHYADDR_PHYCMD_WRITE | reg, priv->base + SATAPHYADDR_REG);
+ /* wait for ack */
+ for (timeout = 0; timeout < 100; timeout++) {
+ val = ioread32(priv->base + SATAPHYACK_REG);
+ if (val & SATAPHYACK_PHYACK)
+ break;
+ }
+ if (timeout >= 100)
+ pr_err("%s timeout\n", __func__);
+ /* idle state */
+ iowrite32(0, priv->base + SATAPHYADDR_REG);
+}
+
+static void sata_rcar_freeze(struct ata_port *ap)
+{
+ struct sata_rcar_priv *priv = ap->host->private_data;
+
+ /* mask */
+ iowrite32(0x7ff, priv->base + SATAINTMASK_REG);
+
+ ata_sff_freeze(ap);
+}
+
+static void sata_rcar_thaw(struct ata_port *ap)
+{
+ struct sata_rcar_priv *priv = ap->host->private_data;
+
+ /* ack */
+ iowrite32(~SATA_RCAR_INT_MASK, priv->base + SATAINTSTAT_REG);
+
+ ata_sff_thaw(ap);
+
+ /* unmask */
+ iowrite32(0x7ff & ~SATA_RCAR_INT_MASK, priv->base + SATAINTMASK_REG);
+}
+
+static void sata_rcar_ioread16_rep(void __iomem *reg, void *buffer, int count)
+{
+ u16 *ptr = buffer;
+
+ while (count--) {
+ u16 data = ioread32(reg);
+
+ *ptr++ = data;
+ }
+}
+
+static void sata_rcar_iowrite16_rep(void __iomem *reg, void *buffer, int count)
+{
+ const u16 *ptr = buffer;
+
+ while (count--)
+ iowrite32(*ptr++, reg);
+}
+
+static u8 sata_rcar_check_status(struct ata_port *ap)
+{
+ return ioread32(ap->ioaddr.status_addr);
+}
+
+static u8 sata_rcar_check_altstatus(struct ata_port *ap)
+{
+ return ioread32(ap->ioaddr.altstatus_addr);
+}
+
+static void sata_rcar_set_devctl(struct ata_port *ap, u8 ctl)
+{
+ iowrite32(ctl, ap->ioaddr.ctl_addr);
+}
+
+static void sata_rcar_dev_select(struct ata_port *ap, unsigned int device)
+{
+ iowrite32(ATA_DEVICE_OBS, ap->ioaddr.device_addr);
+ ata_sff_pause(ap); /* needed; also flushes, for mmio */
+}
+
+static unsigned int sata_rcar_ata_devchk(struct ata_port *ap,
+ unsigned int device)
+{
+ struct ata_ioports *ioaddr = &ap->ioaddr;
+ u8 nsect, lbal;
+
+ sata_rcar_dev_select(ap, device);
+
+ iowrite32(0x55, ioaddr->nsect_addr);
+ iowrite32(0xaa, ioaddr->lbal_addr);
+
+ iowrite32(0xaa, ioaddr->nsect_addr);
+ iowrite32(0x55, ioaddr->lbal_addr);
+
+ iowrite32(0x55, ioaddr->nsect_addr);
+ iowrite32(0xaa, ioaddr->lbal_addr);
+
+ nsect = ioread32(ioaddr->nsect_addr);
+ lbal = ioread32(ioaddr->lbal_addr);
+
+ if (nsect == 0x55 && lbal == 0xaa)
+ return 1; /* found a device */
+
+ return 0; /* nothing found */
+}
+
+static int sata_rcar_wait_after_reset(struct ata_link *link,
+ unsigned long deadline)
+{
+ struct ata_port *ap = link->ap;
+
+ ata_msleep(ap, ATA_WAIT_AFTER_RESET);
+
+ return ata_sff_wait_ready(link, deadline);
+}
+
+static int sata_rcar_bus_softreset(struct ata_port *ap, unsigned long deadline)
+{
+ struct ata_ioports *ioaddr = &ap->ioaddr;
+
+ DPRINTK("ata%u: bus reset via SRST\n", ap->print_id);
+
+ /* software reset. causes dev0 to be selected */
+ iowrite32(ap->ctl, ioaddr->ctl_addr);
+ udelay(20);
+ iowrite32(ap->ctl | ATA_SRST, ioaddr->ctl_addr);
+ udelay(20);
+ iowrite32(ap->ctl, ioaddr->ctl_addr);
+ ap->last_ctl = ap->ctl;
+
+ /* wait the port to become ready */
+ return sata_rcar_wait_after_reset(&ap->link, deadline);
+}
+
+static int sata_rcar_softreset(struct ata_link *link, unsigned int *classes,
+ unsigned long deadline)
+{
+ struct ata_port *ap = link->ap;
+ unsigned int devmask = 0;
+ int rc;
+ u8 err;
+
+ /* determine if device 0 is present */
+ if (sata_rcar_ata_devchk(ap, 0))
+ devmask |= 1 << 0;
+
+ /* issue bus reset */
+ DPRINTK("about to softreset, devmask=%x\n", devmask);
+ rc = sata_rcar_bus_softreset(ap, deadline);
+ /* if link is occupied, -ENODEV too is an error */
+ if (rc && (rc != -ENODEV || sata_scr_valid(link))) {
+ ata_link_err(link, "SRST failed (errno=%d)\n", rc);
+ return rc;
+ }
+
+ /* determine by signature whether we have ATA or ATAPI devices */
+ classes[0] = ata_sff_dev_classify(&link->device[0], devmask, &err);
+
+ DPRINTK("classes[0]=%u\n", classes[0]);
+ return 0;
+}
+
+static void sata_rcar_tf_load(struct ata_port *ap,
+ const struct ata_taskfile *tf)
+{
+ struct ata_ioports *ioaddr = &ap->ioaddr;
+ unsigned int is_addr = tf->flags & ATA_TFLAG_ISADDR;
+
+ if (tf->ctl != ap->last_ctl) {
+ iowrite32(tf->ctl, ioaddr->ctl_addr);
+ ap->last_ctl = tf->ctl;
+ ata_wait_idle(ap);
+ }
+
+ if (is_addr && (tf->flags & ATA_TFLAG_LBA48)) {
+ iowrite32(tf->hob_feature, ioaddr->feature_addr);
+ iowrite32(tf->hob_nsect, ioaddr->nsect_addr);
+ iowrite32(tf->hob_lbal, ioaddr->lbal_addr);
+ iowrite32(tf->hob_lbam, ioaddr->lbam_addr);
+ iowrite32(tf->hob_lbah, ioaddr->lbah_addr);
+ VPRINTK("hob: feat 0x%X nsect 0x%X, lba 0x%X 0x%X 0x%X\n",
+ tf->hob_feature,
+ tf->hob_nsect,
+ tf->hob_lbal,
+ tf->hob_lbam,
+ tf->hob_lbah);
+ }
+
+ if (is_addr) {
+ iowrite32(tf->feature, ioaddr->feature_addr);
+ iowrite32(tf->nsect, ioaddr->nsect_addr);
+ iowrite32(tf->lbal, ioaddr->lbal_addr);
+ iowrite32(tf->lbam, ioaddr->lbam_addr);
+ iowrite32(tf->lbah, ioaddr->lbah_addr);
+ VPRINTK("feat 0x%X nsect 0x%X lba 0x%X 0x%X 0x%X\n",
+ tf->feature,
+ tf->nsect,
+ tf->lbal,
+ tf->lbam,
+ tf->lbah);
+ }
+
+ if (tf->flags & ATA_TFLAG_DEVICE) {
+ iowrite32(tf->device, ioaddr->device_addr);
+ VPRINTK("device 0x%X\n", tf->device);
+ }
+
+ ata_wait_idle(ap);
+}
+
+static void sata_rcar_tf_read(struct ata_port *ap, struct ata_taskfile *tf)
+{
+ struct ata_ioports *ioaddr = &ap->ioaddr;
+
+ tf->command = sata_rcar_check_status(ap);
+ tf->feature = ioread32(ioaddr->error_addr);
+ tf->nsect = ioread32(ioaddr->nsect_addr);
+ tf->lbal = ioread32(ioaddr->lbal_addr);
+ tf->lbam = ioread32(ioaddr->lbam_addr);
+ tf->lbah = ioread32(ioaddr->lbah_addr);
+ tf->device = ioread32(ioaddr->device_addr);
+
+ if (tf->flags & ATA_TFLAG_LBA48) {
+ iowrite32(tf->ctl | ATA_HOB, ioaddr->ctl_addr);
+ tf->hob_feature = ioread32(ioaddr->error_addr);
+ tf->hob_nsect = ioread32(ioaddr->nsect_addr);
+ tf->hob_lbal = ioread32(ioaddr->lbal_addr);
+ tf->hob_lbam = ioread32(ioaddr->lbam_addr);
+ tf->hob_lbah = ioread32(ioaddr->lbah_addr);
+ iowrite32(tf->ctl, ioaddr->ctl_addr);
+ ap->last_ctl = tf->ctl;
+ }
+}
+
+static void sata_rcar_exec_command(struct ata_port *ap,
+ const struct ata_taskfile *tf)
+{
+ DPRINTK("ata%u: cmd 0x%X\n", ap->print_id, tf->command);
+
+ iowrite32(tf->command, ap->ioaddr.command_addr);
+ ata_sff_pause(ap);
+}
+
+static unsigned int sata_rcar_data_xfer(struct ata_device *dev,
+ unsigned char *buf,
+ unsigned int buflen, int rw)
+{
+ struct ata_port *ap = dev->link->ap;
+ void __iomem *data_addr = ap->ioaddr.data_addr;
+ unsigned int words = buflen >> 1;
+
+ /* Transfer multiple of 2 bytes */
+ if (rw == READ)
+ sata_rcar_ioread16_rep(data_addr, buf, words);
+ else
+ sata_rcar_iowrite16_rep(data_addr, buf, words);
+
+ /* Transfer trailing byte, if any. */
+ if (unlikely(buflen & 0x01)) {
+ unsigned char pad[2] = { };
+
+ /* Point buf to the tail of buffer */
+ buf += buflen - 1;
+
+ /*
+ * Use io*16_rep() accessors here as well to avoid pointlessly
+ * swapping bytes to and from on the big endian machines...
+ */
+ if (rw == READ) {
+ sata_rcar_ioread16_rep(data_addr, pad, 1);
+ *buf = pad[0];
+ } else {
+ pad[0] = *buf;
+ sata_rcar_iowrite16_rep(data_addr, pad, 1);
+ }
+ words++;
+ }
+
+ return words << 1;
+}
+
+static void sata_rcar_drain_fifo(struct ata_queued_cmd *qc)
+{
+ int count;
+ struct ata_port *ap;
+
+ /* We only need to flush incoming data when a command was running */
+ if (qc == NULL || qc->dma_dir == DMA_TO_DEVICE)
+ return;
+
+ ap = qc->ap;
+ /* Drain up to 64K of data before we give up this recovery method */
+ for (count = 0; (ap->ops->sff_check_status(ap) & ATA_DRQ) &&
+ count < 65536; count += 2)
+ ioread32(ap->ioaddr.data_addr);
+
+ /* Can become DEBUG later */
+ if (count)
+ ata_port_dbg(ap, "drained %d bytes to clear DRQ\n", count);
+}
+
+static int sata_rcar_scr_read(struct ata_link *link, unsigned int sc_reg,
+ u32 *val)
+{
+ if (sc_reg > SCR_ACTIVE)
+ return -EINVAL;
+
+ *val = ioread32(link->ap->ioaddr.scr_addr + (sc_reg << 2));
+ return 0;
+}
+
+static int sata_rcar_scr_write(struct ata_link *link, unsigned int sc_reg,
+ u32 val)
+{
+ if (sc_reg > SCR_ACTIVE)
+ return -EINVAL;
+
+ iowrite32(val, link->ap->ioaddr.scr_addr + (sc_reg << 2));
+ return 0;
+}
+
+static void sata_rcar_bmdma_fill_sg(struct ata_queued_cmd *qc)
+{
+ struct ata_port *ap = qc->ap;
+ struct ata_bmdma_prd *prd = ap->bmdma_prd;
+ struct scatterlist *sg;
+ unsigned int si, pi;
+
+ pi = 0;
+ for_each_sg(qc->sg, sg, qc->n_elem, si) {
+ u32 addr, sg_len, len;
+
+ /*
+ * Note: h/w doesn't support 64-bit, so we unconditionally
+ * truncate dma_addr_t to u32.
+ */
+ addr = (u32)sg_dma_address(sg);
+ sg_len = sg_dma_len(sg);
+
+ /* H/w transfer count is only 29 bits long, let's be careful */
+ while (sg_len) {
+ len = sg_len;
+ if (len > 0x1ffffffe)
+ len = 0x1ffffffe;
+
+ prd[pi].addr = cpu_to_le32(addr);
+ prd[pi].flags_len = cpu_to_le32(len);
+ VPRINTK("PRD[%u] = (0x%X, 0x%X)\n", pi, addr, len);
+
+ pi++;
+ sg_len -= len;
+ addr += len;
+ }
+ }
+
+ /* end-of-table flag */
+ prd[pi - 1].addr |= cpu_to_le32(SATA_RCAR_DTEND);
+}
+
+static void sata_rcar_qc_prep(struct ata_queued_cmd *qc)
+{
+ if (!(qc->flags & ATA_QCFLAG_DMAMAP))
+ return;
+
+ sata_rcar_bmdma_fill_sg(qc);
+}
+
+static void sata_rcar_bmdma_setup(struct ata_queued_cmd *qc)
+{
+ struct ata_port *ap = qc->ap;
+ unsigned int rw = qc->tf.flags & ATA_TFLAG_WRITE;
+ u32 dmactl;
+ struct sata_rcar_priv *priv = ap->host->private_data;
+
+ /* load PRD table addr. */
+ mb(); /* make sure PRD table writes are visible to controller */
+ iowrite32(ap->bmdma_prd_dma, priv->base + ATAPI_DTB_ADR_REG);
+
+ /* specify data direction, triple-check start bit is clear */
+ dmactl = ioread32(priv->base + ATAPI_CONTROL1_REG);
+ dmactl &= ~(ATAPI_CONTROL1_RW | ATAPI_CONTROL1_STOP);
+ if (dmactl & ATAPI_CONTROL1_START) {
+ dmactl &= ~ATAPI_CONTROL1_START;
+ dmactl |= ATAPI_CONTROL1_STOP;
+ }
+ if (!rw)
+ dmactl |= ATAPI_CONTROL1_RW;
+ iowrite32(dmactl, priv->base + ATAPI_CONTROL1_REG);
+
+ /* issue r/w command */
+ ap->ops->sff_exec_command(ap, &qc->tf);
+}
+
+static void sata_rcar_bmdma_start(struct ata_queued_cmd *qc)
+{
+ struct ata_port *ap = qc->ap;
+ u32 dmactl;
+ struct sata_rcar_priv *priv = ap->host->private_data;
+
+ /* start host DMA transaction */
+ dmactl = ioread32(priv->base + ATAPI_CONTROL1_REG);
+ dmactl |= ATAPI_CONTROL1_START;
+ iowrite32(dmactl, priv->base + ATAPI_CONTROL1_REG);
+}
+
+static void sata_rcar_bmdma_stop(struct ata_queued_cmd *qc)
+{
+ struct ata_port *ap = qc->ap;
+ struct sata_rcar_priv *priv = ap->host->private_data;
+ u32 dmactl;
+
+ /* force termination of DMA transfer if active */
+ dmactl = ioread32(priv->base + ATAPI_CONTROL1_REG);
+ if (dmactl & ATAPI_CONTROL1_START) {
+ dmactl &= ~ATAPI_CONTROL1_START;
+ dmactl |= ATAPI_CONTROL1_STOP;
+ iowrite32(dmactl, priv->base + ATAPI_CONTROL1_REG);
+ }
+
+ /* one-PIO-cycle guaranteed wait, per spec, for HDMA1:0 transition */
+ ata_sff_dma_pause(ap);
+}
+
+static u8 sata_rcar_bmdma_status(struct ata_port *ap)
+{
+ struct sata_rcar_priv *priv = ap->host->private_data;
+ u32 status;
+ u8 host_stat = 0;
+
+ status = ioread32(priv->base + ATAPI_STATUS_REG);
+ if (status & ATAPI_STATUS_DEVINT)
+ host_stat |= ATA_DMA_INTR;
+ if (status & ATAPI_STATUS_ACT)
+ host_stat |= ATA_DMA_ACTIVE;
+
+ return host_stat;
+}
+
+static struct scsi_host_template sata_rcar_sht = {
+ ATA_BMDMA_SHT(DRV_NAME),
+};
+
+static struct ata_port_operations sata_rcar_port_ops = {
+ .inherits = &ata_bmdma_port_ops,
+
+ .freeze = sata_rcar_freeze,
+ .thaw = sata_rcar_thaw,
+ .softreset = sata_rcar_softreset,
+
+ .scr_read = sata_rcar_scr_read,
+ .scr_write = sata_rcar_scr_write,
+
+ .sff_dev_select = sata_rcar_dev_select,
+ .sff_set_devctl = sata_rcar_set_devctl,
+ .sff_check_status = sata_rcar_check_status,
+ .sff_check_altstatus = sata_rcar_check_altstatus,
+ .sff_tf_load = sata_rcar_tf_load,
+ .sff_tf_read = sata_rcar_tf_read,
+ .sff_exec_command = sata_rcar_exec_command,
+ .sff_data_xfer = sata_rcar_data_xfer,
+ .sff_drain_fifo = sata_rcar_drain_fifo,
+
+ .qc_prep = sata_rcar_qc_prep,
+
+ .bmdma_setup = sata_rcar_bmdma_setup,
+ .bmdma_start = sata_rcar_bmdma_start,
+ .bmdma_stop = sata_rcar_bmdma_stop,
+ .bmdma_status = sata_rcar_bmdma_status,
+};
+
+static int sata_rcar_serr_interrupt(struct ata_port *ap)
+{
+ struct sata_rcar_priv *priv = ap->host->private_data;
+ struct ata_eh_info *ehi = &ap->link.eh_info;
+ int freeze = 0;
+ int handled = 0;
+ u32 serror;
+
+ serror = ioread32(priv->base + SCRSERR_REG);
+ if (!serror)
+ return 0;
+
+ DPRINTK("SError @host_intr: 0x%x\n", serror);
+
+ /* first, analyze and record host port events */
+ ata_ehi_clear_desc(ehi);
+
+ if (serror & (SERR_DEV_XCHG | SERR_PHYRDY_CHG)) {
+ /* Setup a soft-reset EH action */
+ ata_ehi_hotplugged(ehi);
+ ata_ehi_push_desc(ehi, "%s", "hotplug");
+
+ freeze = serror & SERR_COMM_WAKE ? 0 : 1;
+ handled = 1;
+ }
+
+ /* freeze or abort */
+ if (freeze)
+ ata_port_freeze(ap);
+ else
+ ata_port_abort(ap);
+
+ return handled;
+}
+
+static int sata_rcar_ata_interrupt(struct ata_port *ap)
+{
+ struct ata_queued_cmd *qc;
+ int handled = 0;
+
+ qc = ata_qc_from_tag(ap, ap->link.active_tag);
+ if (qc)
+ handled |= ata_bmdma_port_intr(ap, qc);
+
+ return handled;
+}
+
+static irqreturn_t sata_rcar_interrupt(int irq, void *dev_instance)
+{
+ struct ata_host *host = dev_instance;
+ struct sata_rcar_priv *priv = host->private_data;
+ struct ata_port *ap;
+ unsigned int handled = 0;
+ u32 sataintstat;
+ unsigned long flags;
+
+ spin_lock_irqsave(&host->lock, flags);
+
+ sataintstat = ioread32(priv->base + SATAINTSTAT_REG);
+ if (!sataintstat)
+ goto done;
+ /* ack */
+ iowrite32(sataintstat & ~SATA_RCAR_INT_MASK,
+ priv->base + SATAINTSTAT_REG);
+
+ ap = host->ports[0];
+
+ if (sataintstat & SATAINTSTAT_ATA)
+ handled |= sata_rcar_ata_interrupt(ap);
+
+ if (sataintstat & SATAINTSTAT_SERR)
+ handled |= sata_rcar_serr_interrupt(ap);
+
+done:
+ spin_unlock_irqrestore(&host->lock, flags);
+
+ return IRQ_RETVAL(handled);
+}
+
+static void sata_rcar_setup_port(struct ata_host *host)
+{
+ struct ata_port *ap = host->ports[0];
+ struct ata_ioports *ioaddr = &ap->ioaddr;
+ struct sata_rcar_priv *priv = host->private_data;
+
+ ap->ops = &sata_rcar_port_ops;
+ ap->pio_mask = ATA_PIO4;
+ ap->udma_mask = ATA_UDMA6;
+ ap->flags |= ATA_FLAG_SATA;
+
+ ioaddr->cmd_addr = priv->base + SDATA_REG;
+ ioaddr->ctl_addr = priv->base + SSDEVCON_REG;
+ ioaddr->scr_addr = priv->base + SCRSSTS_REG;
+ ioaddr->altstatus_addr = ioaddr->ctl_addr;
+
+ ioaddr->data_addr = ioaddr->cmd_addr + (ATA_REG_DATA << 2);
+ ioaddr->error_addr = ioaddr->cmd_addr + (ATA_REG_ERR << 2);
+ ioaddr->feature_addr = ioaddr->cmd_addr + (ATA_REG_FEATURE << 2);
+ ioaddr->nsect_addr = ioaddr->cmd_addr + (ATA_REG_NSECT << 2);
+ ioaddr->lbal_addr = ioaddr->cmd_addr + (ATA_REG_LBAL << 2);
+ ioaddr->lbam_addr = ioaddr->cmd_addr + (ATA_REG_LBAM << 2);
+ ioaddr->lbah_addr = ioaddr->cmd_addr + (ATA_REG_LBAH << 2);
+ ioaddr->device_addr = ioaddr->cmd_addr + (ATA_REG_DEVICE << 2);
+ ioaddr->status_addr = ioaddr->cmd_addr + (ATA_REG_STATUS << 2);
+ ioaddr->command_addr = ioaddr->cmd_addr + (ATA_REG_CMD << 2);
+}
+
+static void sata_rcar_init_controller(struct ata_host *host)
+{
+ struct sata_rcar_priv *priv = host->private_data;
+ u32 val;
+
+ /* reset and setup phy */
+ sata_rcar_phy_initialize(priv);
+ sata_rcar_phy_write(priv, SATAPCTLR1_REG, 0x00200188, 0);
+ sata_rcar_phy_write(priv, SATAPCTLR1_REG, 0x00200188, 1);
+ sata_rcar_phy_write(priv, SATAPCTLR3_REG, 0x0000A061, 0);
+ sata_rcar_phy_write(priv, SATAPCTLR2_REG, 0x20000000, 0);
+ sata_rcar_phy_write(priv, SATAPCTLR2_REG, 0x20000000, 1);
+ sata_rcar_phy_write(priv, SATAPCTLR4_REG, 0x28E80000, 0);
+
+ /* SATA-IP reset state */
+ val = ioread32(priv->base + ATAPI_CONTROL1_REG);
+ val |= ATAPI_CONTROL1_RESET;
+ iowrite32(val, priv->base + ATAPI_CONTROL1_REG);
+
+ /* ISM mode, PRD mode, DTEND flag at bit 0 */
+ val = ioread32(priv->base + ATAPI_CONTROL1_REG);
+ val |= ATAPI_CONTROL1_ISM;
+ val |= ATAPI_CONTROL1_DESE;
+ val |= ATAPI_CONTROL1_DTA32M;
+ iowrite32(val, priv->base + ATAPI_CONTROL1_REG);
+
+ /* Release the SATA-IP from the reset state */
+ val = ioread32(priv->base + ATAPI_CONTROL1_REG);
+ val &= ~ATAPI_CONTROL1_RESET;
+ iowrite32(val, priv->base + ATAPI_CONTROL1_REG);
+
+ /* ack and mask */
+ iowrite32(0, priv->base + SATAINTSTAT_REG);
+ iowrite32(0x7ff, priv->base + SATAINTMASK_REG);
+ /* enable interrupts */
+ iowrite32(ATAPI_INT_ENABLE_SATAINT, priv->base + ATAPI_INT_ENABLE_REG);
+}
+
+static int sata_rcar_probe(struct platform_device *pdev)
+{
+ struct ata_host *host;
+ struct sata_rcar_priv *priv;
+ struct resource *mem;
+ int irq;
+ int ret = 0;
+
+ mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (mem == NULL)
+ return -EINVAL;
+
+ irq = platform_get_irq(pdev, 0);
+ if (irq <= 0)
+ return -EINVAL;
+
+ priv = devm_kzalloc(&pdev->dev, sizeof(struct sata_rcar_priv),
+ GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ priv->clk = devm_clk_get(&pdev->dev, NULL);
+ if (IS_ERR(priv->clk)) {
+ dev_err(&pdev->dev, "failed to get access to sata clock\n");
+ return PTR_ERR(priv->clk);
+ }
+ clk_enable(priv->clk);
+
+ host = ata_host_alloc(&pdev->dev, 1);
+ if (!host) {
+ dev_err(&pdev->dev, "ata_host_alloc failed\n");
+ ret = -ENOMEM;
+ goto cleanup;
+ }
+
+ host->private_data = priv;
+
+ priv->base = devm_request_and_ioremap(&pdev->dev, mem);
+ if (!priv->base) {
+ ret = -EADDRNOTAVAIL;
+ goto cleanup;
+ }
+
+ /* setup port */
+ sata_rcar_setup_port(host);
+
+ /* initialize host controller */
+ sata_rcar_init_controller(host);
+
+ ret = ata_host_activate(host, irq, sata_rcar_interrupt, 0,
+ &sata_rcar_sht);
+ if (!ret)
+ return 0;
+
+cleanup:
+ clk_disable(priv->clk);
+
+ return ret;
+}
+
+static int sata_rcar_remove(struct platform_device *pdev)
+{
+ struct ata_host *host = dev_get_drvdata(&pdev->dev);
+ struct sata_rcar_priv *priv = host->private_data;
+
+ ata_host_detach(host);
+
+ /* disable interrupts */
+ iowrite32(0, priv->base + ATAPI_INT_ENABLE_REG);
+ /* ack and mask */
+ iowrite32(0, priv->base + SATAINTSTAT_REG);
+ iowrite32(0x7ff, priv->base + SATAINTMASK_REG);
+
+ clk_disable(priv->clk);
+
+ return 0;
+}
+
+#ifdef CONFIG_PM
+static int sata_rcar_suspend(struct device *dev)
+{
+ struct ata_host *host = dev_get_drvdata(dev);
+ struct sata_rcar_priv *priv = host->private_data;
+ int ret;
+
+ ret = ata_host_suspend(host, PMSG_SUSPEND);
+ if (!ret) {
+ /* disable interrupts */
+ iowrite32(0, priv->base + ATAPI_INT_ENABLE_REG);
+ /* mask */
+ iowrite32(0x7ff, priv->base + SATAINTMASK_REG);
+
+ clk_disable(priv->clk);
+ }
+
+ return ret;
+}
+
+static int sata_rcar_resume(struct device *dev)
+{
+ struct ata_host *host = dev_get_drvdata(dev);
+ struct sata_rcar_priv *priv = host->private_data;
+
+ clk_enable(priv->clk);
+
+ /* ack and mask */
+ iowrite32(0, priv->base + SATAINTSTAT_REG);
+ iowrite32(0x7ff, priv->base + SATAINTMASK_REG);
+ /* enable interrupts */
+ iowrite32(ATAPI_INT_ENABLE_SATAINT, priv->base + ATAPI_INT_ENABLE_REG);
+
+ ata_host_resume(host);
+
+ return 0;
+}
+
+static const struct dev_pm_ops sata_rcar_pm_ops = {
+ .suspend = sata_rcar_suspend,
+ .resume = sata_rcar_resume,
+};
+#endif
+
+static struct of_device_id sata_rcar_match[] = {
+ { .compatible = "renesas,rcar-sata", },
+ {},
+};
+MODULE_DEVICE_TABLE(of, sata_rcar_match);
+
+static struct platform_driver sata_rcar_driver = {
+ .probe = sata_rcar_probe,
+ .remove = sata_rcar_remove,
+ .driver = {
+ .name = DRV_NAME,
+ .owner = THIS_MODULE,
+ .of_match_table = sata_rcar_match,
+#ifdef CONFIG_PM
+ .pm = &sata_rcar_pm_ops,
+#endif
+ },
+};
+
+module_platform_driver(sata_rcar_driver);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Vladimir Barinov");
+MODULE_DESCRIPTION("Renesas R-Car SATA controller low level driver");
diff --git a/drivers/atm/atmtcp.c b/drivers/atm/atmtcp.c
index b22d71cac54c..0e3f8f9dcd29 100644
--- a/drivers/atm/atmtcp.c
+++ b/drivers/atm/atmtcp.c
@@ -157,7 +157,6 @@ static int atmtcp_v_ioctl(struct atm_dev *dev,unsigned int cmd,void __user *arg)
{
struct atm_cirange ci;
struct atm_vcc *vcc;
- struct hlist_node *node;
struct sock *s;
int i;
@@ -171,7 +170,7 @@ static int atmtcp_v_ioctl(struct atm_dev *dev,unsigned int cmd,void __user *arg)
for(i = 0; i < VCC_HTABLE_SIZE; ++i) {
struct hlist_head *head = &vcc_hash[i];
- sk_for_each(s, node, head) {
+ sk_for_each(s, head) {
vcc = atm_sk(s);
if (vcc->dev != dev)
continue;
@@ -264,12 +263,11 @@ static struct atm_vcc *find_vcc(struct atm_dev *dev, short vpi, int vci)
{
struct hlist_head *head;
struct atm_vcc *vcc;
- struct hlist_node *node;
struct sock *s;
head = &vcc_hash[vci & (VCC_HTABLE_SIZE -1)];
- sk_for_each(s, node, head) {
+ sk_for_each(s, head) {
vcc = atm_sk(s);
if (vcc->dev == dev &&
vcc->vci == vci && vcc->vpi == vpi &&
diff --git a/drivers/atm/eni.c b/drivers/atm/eni.c
index c1eb6fa8ac35..b1955ba40d63 100644
--- a/drivers/atm/eni.c
+++ b/drivers/atm/eni.c
@@ -2093,7 +2093,6 @@ static unsigned char eni_phy_get(struct atm_dev *dev,unsigned long addr)
static int eni_proc_read(struct atm_dev *dev,loff_t *pos,char *page)
{
- struct hlist_node *node;
struct sock *s;
static const char *signal[] = { "LOST","unknown","okay" };
struct eni_dev *eni_dev = ENI_DEV(dev);
@@ -2171,7 +2170,7 @@ static int eni_proc_read(struct atm_dev *dev,loff_t *pos,char *page)
for(i = 0; i < VCC_HTABLE_SIZE; ++i) {
struct hlist_head *head = &vcc_hash[i];
- sk_for_each(s, node, head) {
+ sk_for_each(s, head) {
struct eni_vcc *eni_vcc;
int length;
diff --git a/drivers/atm/he.c b/drivers/atm/he.c
index 72b6960fa95f..d6891267f5bb 100644
--- a/drivers/atm/he.c
+++ b/drivers/atm/he.c
@@ -329,7 +329,6 @@ __find_vcc(struct he_dev *he_dev, unsigned cid)
{
struct hlist_head *head;
struct atm_vcc *vcc;
- struct hlist_node *node;
struct sock *s;
short vpi;
int vci;
@@ -338,7 +337,7 @@ __find_vcc(struct he_dev *he_dev, unsigned cid)
vci = cid & ((1 << he_dev->vcibits) - 1);
head = &vcc_hash[vci & (VCC_HTABLE_SIZE -1)];
- sk_for_each(s, node, head) {
+ sk_for_each(s, head) {
vcc = atm_sk(s);
if (vcc->dev == he_dev->atm_dev &&
vcc->vci == vci && vcc->vpi == vpi &&
diff --git a/drivers/atm/nicstar.c b/drivers/atm/nicstar.c
index ed1d2b7f923b..6587dc295eb0 100644
--- a/drivers/atm/nicstar.c
+++ b/drivers/atm/nicstar.c
@@ -251,7 +251,6 @@ static void nicstar_remove_one(struct pci_dev *pcidev)
if (card->scd2vc[j] != NULL)
free_scq(card, card->scd2vc[j]->scq, card->scd2vc[j]->tx_vcc);
}
- idr_remove_all(&card->idr);
idr_destroy(&card->idr);
pci_free_consistent(card->pcidev, NS_RSQSIZE + NS_RSQ_ALIGNMENT,
card->rsq.org, card->rsq.dma);
@@ -950,11 +949,10 @@ static void free_scq(ns_dev *card, scq_info *scq, struct atm_vcc *vcc)
static void push_rxbufs(ns_dev * card, struct sk_buff *skb)
{
struct sk_buff *handle1, *handle2;
- u32 id1 = 0, id2 = 0;
+ int id1, id2;
u32 addr1, addr2;
u32 stat;
unsigned long flags;
- int err;
/* *BARF* */
handle2 = NULL;
@@ -1027,23 +1025,12 @@ static void push_rxbufs(ns_dev * card, struct sk_buff *skb)
card->lbfqc += 2;
}
- do {
- if (!idr_pre_get(&card->idr, GFP_ATOMIC)) {
- printk(KERN_ERR
- "nicstar%d: no free memory for idr\n",
- card->index);
- goto out;
- }
-
- if (!id1)
- err = idr_get_new_above(&card->idr, handle1, 0, &id1);
-
- if (!id2 && err == 0)
- err = idr_get_new_above(&card->idr, handle2, 0, &id2);
-
- } while (err == -EAGAIN);
+ id1 = idr_alloc(&card->idr, handle1, 0, 0, GFP_ATOMIC);
+ if (id1 < 0)
+ goto out;
- if (err)
+ id2 = idr_alloc(&card->idr, handle2, 0, 0, GFP_ATOMIC);
+ if (id2 < 0)
goto out;
spin_lock_irqsave(&card->res_lock, flags);
diff --git a/drivers/atm/solos-pci.c b/drivers/atm/solos-pci.c
index 0474a89170b9..32784d18d1f7 100644
--- a/drivers/atm/solos-pci.c
+++ b/drivers/atm/solos-pci.c
@@ -896,12 +896,11 @@ static struct atm_vcc *find_vcc(struct atm_dev *dev, short vpi, int vci)
{
struct hlist_head *head;
struct atm_vcc *vcc = NULL;
- struct hlist_node *node;
struct sock *s;
read_lock(&vcc_sklist_lock);
head = &vcc_hash[vci & (VCC_HTABLE_SIZE -1)];
- sk_for_each(s, node, head) {
+ sk_for_each(s, head) {
vcc = atm_sk(s);
if (vcc->dev == dev && vcc->vci == vci &&
vcc->vpi == vpi && vcc->qos.rxtp.traffic_class != ATM_NONE &&
diff --git a/drivers/base/devtmpfs.c b/drivers/base/devtmpfs.c
index 17cf7cad601e..01fc5b07f951 100644
--- a/drivers/base/devtmpfs.c
+++ b/drivers/base/devtmpfs.c
@@ -302,7 +302,8 @@ static int handle_remove(const char *nodename, struct device *dev)
if (dentry->d_inode) {
struct kstat stat;
- err = vfs_getattr(parent.mnt, dentry, &stat);
+ struct path p = {.mnt = parent.mnt, .dentry = dentry};
+ err = vfs_getattr(&p, &stat);
if (!err && dev_mynode(dev, dentry->d_inode, &stat)) {
struct iattr newattrs;
/*
diff --git a/drivers/base/dma-buf.c b/drivers/base/dma-buf.c
index ff5b745c4705..2a7cb0df176b 100644
--- a/drivers/base/dma-buf.c
+++ b/drivers/base/dma-buf.c
@@ -39,6 +39,8 @@ static int dma_buf_release(struct inode *inode, struct file *file)
dmabuf = file->private_data;
+ BUG_ON(dmabuf->vmapping_counter);
+
dmabuf->ops->release(dmabuf);
kfree(dmabuf);
return 0;
@@ -445,6 +447,9 @@ EXPORT_SYMBOL_GPL(dma_buf_kunmap);
int dma_buf_mmap(struct dma_buf *dmabuf, struct vm_area_struct *vma,
unsigned long pgoff)
{
+ struct file *oldfile;
+ int ret;
+
if (WARN_ON(!dmabuf || !vma))
return -EINVAL;
@@ -458,14 +463,22 @@ int dma_buf_mmap(struct dma_buf *dmabuf, struct vm_area_struct *vma,
return -EINVAL;
/* readjust the vma */
- if (vma->vm_file)
- fput(vma->vm_file);
-
- vma->vm_file = get_file(dmabuf->file);
-
+ get_file(dmabuf->file);
+ oldfile = vma->vm_file;
+ vma->vm_file = dmabuf->file;
vma->vm_pgoff = pgoff;
- return dmabuf->ops->mmap(dmabuf, vma);
+ ret = dmabuf->ops->mmap(dmabuf, vma);
+ if (ret) {
+ /* restore old parameters on failure */
+ vma->vm_file = oldfile;
+ fput(dmabuf->file);
+ } else {
+ if (oldfile)
+ fput(oldfile);
+ }
+ return ret;
+
}
EXPORT_SYMBOL_GPL(dma_buf_mmap);
@@ -481,12 +494,34 @@ EXPORT_SYMBOL_GPL(dma_buf_mmap);
*/
void *dma_buf_vmap(struct dma_buf *dmabuf)
{
+ void *ptr;
+
if (WARN_ON(!dmabuf))
return NULL;
- if (dmabuf->ops->vmap)
- return dmabuf->ops->vmap(dmabuf);
- return NULL;
+ if (!dmabuf->ops->vmap)
+ return NULL;
+
+ mutex_lock(&dmabuf->lock);
+ if (dmabuf->vmapping_counter) {
+ dmabuf->vmapping_counter++;
+ BUG_ON(!dmabuf->vmap_ptr);
+ ptr = dmabuf->vmap_ptr;
+ goto out_unlock;
+ }
+
+ BUG_ON(dmabuf->vmap_ptr);
+
+ ptr = dmabuf->ops->vmap(dmabuf);
+ if (IS_ERR_OR_NULL(ptr))
+ goto out_unlock;
+
+ dmabuf->vmap_ptr = ptr;
+ dmabuf->vmapping_counter = 1;
+
+out_unlock:
+ mutex_unlock(&dmabuf->lock);
+ return ptr;
}
EXPORT_SYMBOL_GPL(dma_buf_vmap);
@@ -500,7 +535,16 @@ void dma_buf_vunmap(struct dma_buf *dmabuf, void *vaddr)
if (WARN_ON(!dmabuf))
return;
- if (dmabuf->ops->vunmap)
- dmabuf->ops->vunmap(dmabuf, vaddr);
+ BUG_ON(!dmabuf->vmap_ptr);
+ BUG_ON(dmabuf->vmapping_counter == 0);
+ BUG_ON(dmabuf->vmap_ptr != vaddr);
+
+ mutex_lock(&dmabuf->lock);
+ if (--dmabuf->vmapping_counter == 0) {
+ if (dmabuf->ops->vunmap)
+ dmabuf->ops->vunmap(dmabuf, vaddr);
+ dmabuf->vmap_ptr = NULL;
+ }
+ mutex_unlock(&dmabuf->lock);
}
EXPORT_SYMBOL_GPL(dma_buf_vunmap);
diff --git a/drivers/base/firmware_class.c b/drivers/base/firmware_class.c
index 4a223fedcd73..4b1f9265887f 100644
--- a/drivers/base/firmware_class.c
+++ b/drivers/base/firmware_class.c
@@ -279,7 +279,7 @@ MODULE_PARM_DESC(path, "customized firmware image search path with a higher prio
static noinline_for_stack long fw_file_size(struct file *file)
{
struct kstat st;
- if (vfs_getattr(file->f_path.mnt, file->f_path.dentry, &st))
+ if (vfs_getattr(&file->f_path, &st))
return -1;
if (!S_ISREG(st.mode))
return -1;
diff --git a/drivers/base/memory.c b/drivers/base/memory.c
index 83d0b17ba1c2..a51007b79032 100644
--- a/drivers/base/memory.c
+++ b/drivers/base/memory.c
@@ -693,6 +693,12 @@ int offline_memory_block(struct memory_block *mem)
return ret;
}
+/* return true if the memory block is offlined, otherwise, return false */
+bool is_memblock_offlined(struct memory_block *mem)
+{
+ return mem->state == MEM_OFFLINE;
+}
+
/*
* Initialize the sysfs support for memory devices...
*/
diff --git a/drivers/base/power/runtime.c b/drivers/base/power/runtime.c
index 3148b10dc2e5..1244930e3d7a 100644
--- a/drivers/base/power/runtime.c
+++ b/drivers/base/power/runtime.c
@@ -124,6 +124,76 @@ unsigned long pm_runtime_autosuspend_expiration(struct device *dev)
}
EXPORT_SYMBOL_GPL(pm_runtime_autosuspend_expiration);
+static int dev_memalloc_noio(struct device *dev, void *data)
+{
+ return dev->power.memalloc_noio;
+}
+
+/*
+ * pm_runtime_set_memalloc_noio - Set a device's memalloc_noio flag.
+ * @dev: Device to handle.
+ * @enable: True for setting the flag and False for clearing the flag.
+ *
+ * Set the flag for all devices in the path from the device to the
+ * root device in the device tree if @enable is true, otherwise clear
+ * the flag for devices in the path whose siblings don't set the flag.
+ *
+ * The function should only be called by block device, or network
+ * device driver for solving the deadlock problem during runtime
+ * resume/suspend:
+ *
+ * If memory allocation with GFP_KERNEL is called inside runtime
+ * resume/suspend callback of any one of its ancestors(or the
+ * block device itself), the deadlock may be triggered inside the
+ * memory allocation since it might not complete until the block
+ * device becomes active and the involed page I/O finishes. The
+ * situation is pointed out first by Alan Stern. Network device
+ * are involved in iSCSI kind of situation.
+ *
+ * The lock of dev_hotplug_mutex is held in the function for handling
+ * hotplug race because pm_runtime_set_memalloc_noio() may be called
+ * in async probe().
+ *
+ * The function should be called between device_add() and device_del()
+ * on the affected device(block/network device).
+ */
+void pm_runtime_set_memalloc_noio(struct device *dev, bool enable)
+{
+ static DEFINE_MUTEX(dev_hotplug_mutex);
+
+ mutex_lock(&dev_hotplug_mutex);
+ for (;;) {
+ bool enabled;
+
+ /* hold power lock since bitfield is not SMP-safe. */
+ spin_lock_irq(&dev->power.lock);
+ enabled = dev->power.memalloc_noio;
+ dev->power.memalloc_noio = enable;
+ spin_unlock_irq(&dev->power.lock);
+
+ /*
+ * not need to enable ancestors any more if the device
+ * has been enabled.
+ */
+ if (enabled && enable)
+ break;
+
+ dev = dev->parent;
+
+ /*
+ * clear flag of the parent device only if all the
+ * children don't set the flag because ancestor's
+ * flag was set by any one of the descendants.
+ */
+ if (!dev || (!enable &&
+ device_for_each_child(dev, NULL,
+ dev_memalloc_noio)))
+ break;
+ }
+ mutex_unlock(&dev_hotplug_mutex);
+}
+EXPORT_SYMBOL_GPL(pm_runtime_set_memalloc_noio);
+
/**
* rpm_check_suspend_allowed - Test whether a device may be suspended.
* @dev: Device to test.
@@ -278,7 +348,24 @@ static int rpm_callback(int (*cb)(struct device *), struct device *dev)
if (!cb)
return -ENOSYS;
- retval = __rpm_callback(cb, dev);
+ if (dev->power.memalloc_noio) {
+ unsigned int noio_flag;
+
+ /*
+ * Deadlock might be caused if memory allocation with
+ * GFP_KERNEL happens inside runtime_suspend and
+ * runtime_resume callbacks of one block device's
+ * ancestor or the block device itself. Network
+ * device might be thought as part of iSCSI block
+ * device, so network device and its ancestor should
+ * be marked as memalloc_noio too.
+ */
+ noio_flag = memalloc_noio_save();
+ retval = __rpm_callback(cb, dev);
+ memalloc_noio_restore(noio_flag);
+ } else {
+ retval = __rpm_callback(cb, dev);
+ }
dev->power.runtime_error = retval;
return retval != -EACCES ? retval : -EIO;
diff --git a/drivers/base/regmap/regmap-debugfs.c b/drivers/base/regmap/regmap-debugfs.c
index 78d5f20c5f5b..81d6f605c92e 100644
--- a/drivers/base/regmap/regmap-debugfs.c
+++ b/drivers/base/regmap/regmap-debugfs.c
@@ -279,7 +279,7 @@ static ssize_t regmap_map_write_file(struct file *file,
return -EINVAL;
/* Userspace has been fiddling around behind the kernel's back */
- add_taint(TAINT_USER);
+ add_taint(TAINT_USER, LOCKDEP_NOW_UNRELIABLE);
regmap_write(map, reg, value);
return buf_size;
diff --git a/drivers/block/DAC960.c b/drivers/block/DAC960.c
index 9a13e889837e..5b5ee79ff236 100644
--- a/drivers/block/DAC960.c
+++ b/drivers/block/DAC960.c
@@ -6547,7 +6547,7 @@ static ssize_t dac960_user_command_proc_write(struct file *file,
const char __user *Buffer,
size_t Count, loff_t *pos)
{
- DAC960_Controller_T *Controller = (DAC960_Controller_T *) PDE(file->f_path.dentry->d_inode)->data;
+ DAC960_Controller_T *Controller = (DAC960_Controller_T *) PDE(file_inode(file))->data;
unsigned char CommandBuffer[80];
int Length;
if (Count > sizeof(CommandBuffer)-1) return -EINVAL;
@@ -7054,6 +7054,7 @@ static long DAC960_gam_ioctl(struct file *file, unsigned int Request,
else
ErrorCode = 0;
}
+ break;
default:
ErrorCode = -ENOTTY;
}
diff --git a/drivers/block/Kconfig b/drivers/block/Kconfig
index 824e09c4d0d7..5dc0daed8fac 100644
--- a/drivers/block/Kconfig
+++ b/drivers/block/Kconfig
@@ -63,19 +63,6 @@ config AMIGA_Z2RAM
To compile this driver as a module, choose M here: the
module will be called z2ram.
-config BLK_DEV_XD
- tristate "XT hard disk support"
- depends on ISA && ISA_DMA_API
- select CHECK_SIGNATURE
- help
- Very old 8 bit hard disk controllers used in the IBM XT computer
- will be supported if you say Y here.
-
- To compile this driver as a module, choose M here: the
- module will be called xd.
-
- It's pretty unlikely that you have one of these: say N.
-
config GDROM
tristate "SEGA Dreamcast GD-ROM drive"
depends on SH_DREAMCAST
@@ -544,4 +531,14 @@ config BLK_DEV_RBD
If unsure, say N.
+config BLK_DEV_RSXX
+ tristate "RamSam PCIe Flash SSD Device Driver"
+ depends on PCI
+ help
+ Device driver for IBM's high speed PCIe SSD
+ storage devices: RamSan-70 and RamSan-80.
+
+ To compile this driver as a module, choose M here: the
+ module will be called rsxx.
+
endif # BLK_DEV
diff --git a/drivers/block/Makefile b/drivers/block/Makefile
index 17e82df3df74..a3b40232c6ab 100644
--- a/drivers/block/Makefile
+++ b/drivers/block/Makefile
@@ -15,7 +15,6 @@ obj-$(CONFIG_ATARI_FLOPPY) += ataflop.o
obj-$(CONFIG_AMIGA_Z2RAM) += z2ram.o
obj-$(CONFIG_BLK_DEV_RAM) += brd.o
obj-$(CONFIG_BLK_DEV_LOOP) += loop.o
-obj-$(CONFIG_BLK_DEV_XD) += xd.o
obj-$(CONFIG_BLK_CPQ_DA) += cpqarray.o
obj-$(CONFIG_BLK_CPQ_CISS_DA) += cciss.o
obj-$(CONFIG_BLK_DEV_DAC960) += DAC960.o
@@ -41,4 +40,6 @@ obj-$(CONFIG_BLK_DEV_DRBD) += drbd/
obj-$(CONFIG_BLK_DEV_RBD) += rbd.o
obj-$(CONFIG_BLK_DEV_PCIESSD_MTIP32XX) += mtip32xx/
+obj-$(CONFIG_BLK_DEV_RSXX) += rsxx/
+
swim_mod-y := swim.o swim_asm.o
diff --git a/drivers/block/drbd/drbd_main.c b/drivers/block/drbd/drbd_main.c
index 8c13eeb83c53..e98da675f0c1 100644
--- a/drivers/block/drbd/drbd_main.c
+++ b/drivers/block/drbd/drbd_main.c
@@ -2660,25 +2660,24 @@ enum drbd_ret_code conn_new_minor(struct drbd_tconn *tconn, unsigned int minor,
mdev->read_requests = RB_ROOT;
mdev->write_requests = RB_ROOT;
- if (!idr_pre_get(&minors, GFP_KERNEL))
- goto out_no_minor_idr;
- if (idr_get_new_above(&minors, mdev, minor, &minor_got))
+ minor_got = idr_alloc(&minors, mdev, minor, minor + 1, GFP_KERNEL);
+ if (minor_got < 0) {
+ if (minor_got == -ENOSPC) {
+ err = ERR_MINOR_EXISTS;
+ drbd_msg_put_info("requested minor exists already");
+ }
goto out_no_minor_idr;
- if (minor_got != minor) {
- err = ERR_MINOR_EXISTS;
- drbd_msg_put_info("requested minor exists already");
- goto out_idr_remove_minor;
}
- if (!idr_pre_get(&tconn->volumes, GFP_KERNEL))
- goto out_idr_remove_minor;
- if (idr_get_new_above(&tconn->volumes, mdev, vnr, &vnr_got))
+ vnr_got = idr_alloc(&tconn->volumes, mdev, vnr, vnr + 1, GFP_KERNEL);
+ if (vnr_got < 0) {
+ if (vnr_got == -ENOSPC) {
+ err = ERR_INVALID_REQUEST;
+ drbd_msg_put_info("requested volume exists already");
+ }
goto out_idr_remove_minor;
- if (vnr_got != vnr) {
- err = ERR_INVALID_REQUEST;
- drbd_msg_put_info("requested volume exists already");
- goto out_idr_remove_vol;
}
+
add_disk(disk);
kref_init(&mdev->kref); /* one ref for both idrs and the the add_disk */
@@ -2689,8 +2688,6 @@ enum drbd_ret_code conn_new_minor(struct drbd_tconn *tconn, unsigned int minor,
return NO_ERROR;
-out_idr_remove_vol:
- idr_remove(&tconn->volumes, vnr_got);
out_idr_remove_minor:
idr_remove(&minors, minor_got);
synchronize_rcu();
diff --git a/drivers/block/loop.c b/drivers/block/loop.c
index ae1251270624..747bb2af69dc 100644
--- a/drivers/block/loop.c
+++ b/drivers/block/loop.c
@@ -162,12 +162,13 @@ static struct loop_func_table *xfer_funcs[MAX_LO_CRYPT] = {
static loff_t get_size(loff_t offset, loff_t sizelimit, struct file *file)
{
- loff_t size, loopsize;
+ loff_t loopsize;
/* Compute loopsize in bytes */
- size = i_size_read(file->f_mapping->host);
- loopsize = size - offset;
- /* offset is beyond i_size, wierd but possible */
+ loopsize = i_size_read(file->f_mapping->host);
+ if (offset > 0)
+ loopsize -= offset;
+ /* offset is beyond i_size, weird but possible */
if (loopsize < 0)
return 0;
@@ -190,6 +191,7 @@ figure_loop_size(struct loop_device *lo, loff_t offset, loff_t sizelimit)
{
loff_t size = get_size(offset, sizelimit, lo->lo_backing_file);
sector_t x = (sector_t)size;
+ struct block_device *bdev = lo->lo_device;
if (unlikely((loff_t)x != size))
return -EFBIG;
@@ -198,6 +200,9 @@ figure_loop_size(struct loop_device *lo, loff_t offset, loff_t sizelimit)
if (lo->lo_sizelimit != sizelimit)
lo->lo_sizelimit = sizelimit;
set_capacity(lo->lo_disk, x);
+ bd_set_size(bdev, (loff_t)get_capacity(bdev->bd_disk) << 9);
+ /* let user-space know about the new size */
+ kobject_uevent(&disk_to_dev(bdev->bd_disk)->kobj, KOBJ_CHANGE);
return 0;
}
@@ -1091,10 +1096,10 @@ loop_set_status(struct loop_device *lo, const struct loop_info64 *info)
return err;
if (lo->lo_offset != info->lo_offset ||
- lo->lo_sizelimit != info->lo_sizelimit) {
+ lo->lo_sizelimit != info->lo_sizelimit)
if (figure_loop_size(lo, info->lo_offset, info->lo_sizelimit))
return -EFBIG;
- }
+
loop_config_discard(lo);
memcpy(lo->lo_file_name, info->lo_file_name, LO_NAME_SIZE);
@@ -1139,7 +1144,7 @@ loop_get_status(struct loop_device *lo, struct loop_info64 *info)
if (lo->lo_state != Lo_bound)
return -ENXIO;
- error = vfs_getattr(file->f_path.mnt, file->f_path.dentry, &stat);
+ error = vfs_getattr(&file->f_path, &stat);
if (error)
return error;
memset(info, 0, sizeof(*info));
@@ -1271,28 +1276,10 @@ loop_get_status64(struct loop_device *lo, struct loop_info64 __user *arg) {
static int loop_set_capacity(struct loop_device *lo, struct block_device *bdev)
{
- int err;
- sector_t sec;
- loff_t sz;
-
- err = -ENXIO;
if (unlikely(lo->lo_state != Lo_bound))
- goto out;
- err = figure_loop_size(lo, lo->lo_offset, lo->lo_sizelimit);
- if (unlikely(err))
- goto out;
- sec = get_capacity(lo->lo_disk);
- /* the width of sector_t may be narrow for bit-shift */
- sz = sec;
- sz <<= 9;
- mutex_lock(&bdev->bd_mutex);
- bd_set_size(bdev, sz);
- /* let user-space know about the new size */
- kobject_uevent(&disk_to_dev(bdev->bd_disk)->kobj, KOBJ_CHANGE);
- mutex_unlock(&bdev->bd_mutex);
+ return -ENXIO;
- out:
- return err;
+ return figure_loop_size(lo, lo->lo_offset, lo->lo_sizelimit);
}
static int lo_ioctl(struct block_device *bdev, fmode_t mode,
@@ -1624,30 +1611,17 @@ static int loop_add(struct loop_device **l, int i)
if (!lo)
goto out;
- if (!idr_pre_get(&loop_index_idr, GFP_KERNEL))
- goto out_free_dev;
-
+ /* allocate id, if @id >= 0, we're requesting that specific id */
if (i >= 0) {
- int m;
-
- /* create specific i in the index */
- err = idr_get_new_above(&loop_index_idr, lo, i, &m);
- if (err >= 0 && i != m) {
- idr_remove(&loop_index_idr, m);
+ err = idr_alloc(&loop_index_idr, lo, i, i + 1, GFP_KERNEL);
+ if (err == -ENOSPC)
err = -EEXIST;
- }
- } else if (i == -1) {
- int m;
-
- /* get next free nr */
- err = idr_get_new(&loop_index_idr, lo, &m);
- if (err >= 0)
- i = m;
} else {
- err = -EINVAL;
+ err = idr_alloc(&loop_index_idr, lo, 0, 0, GFP_KERNEL);
}
if (err < 0)
goto out_free_dev;
+ i = err;
lo->lo_queue = blk_alloc_queue(GFP_KERNEL);
if (!lo->lo_queue)
@@ -1858,11 +1832,15 @@ static int __init loop_init(void)
max_part = (1UL << part_shift) - 1;
}
- if ((1UL << part_shift) > DISK_MAX_PARTS)
- return -EINVAL;
+ if ((1UL << part_shift) > DISK_MAX_PARTS) {
+ err = -EINVAL;
+ goto misc_out;
+ }
- if (max_loop > 1UL << (MINORBITS - part_shift))
- return -EINVAL;
+ if (max_loop > 1UL << (MINORBITS - part_shift)) {
+ err = -EINVAL;
+ goto misc_out;
+ }
/*
* If max_loop is specified, create that many devices upfront.
@@ -1880,8 +1858,10 @@ static int __init loop_init(void)
range = 1UL << MINORBITS;
}
- if (register_blkdev(LOOP_MAJOR, "loop"))
- return -EIO;
+ if (register_blkdev(LOOP_MAJOR, "loop")) {
+ err = -EIO;
+ goto misc_out;
+ }
blk_register_region(MKDEV(LOOP_MAJOR, 0), range,
THIS_MODULE, loop_probe, NULL, NULL);
@@ -1894,6 +1874,10 @@ static int __init loop_init(void)
printk(KERN_INFO "loop: module loaded\n");
return 0;
+
+misc_out:
+ misc_deregister(&loop_misc);
+ return err;
}
static int loop_exit_cb(int id, void *ptr, void *data)
@@ -1911,7 +1895,6 @@ static void __exit loop_exit(void)
range = max_loop ? max_loop << part_shift : 1UL << MINORBITS;
idr_for_each(&loop_index_idr, &loop_exit_cb, NULL);
- idr_remove_all(&loop_index_idr);
idr_destroy(&loop_index_idr);
blk_unregister_region(MKDEV(LOOP_MAJOR, 0), range);
diff --git a/drivers/block/mtip32xx/Kconfig b/drivers/block/mtip32xx/Kconfig
index 0ba837fc62a8..1fca1f996b45 100644
--- a/drivers/block/mtip32xx/Kconfig
+++ b/drivers/block/mtip32xx/Kconfig
@@ -4,6 +4,6 @@
config BLK_DEV_PCIESSD_MTIP32XX
tristate "Block Device Driver for Micron PCIe SSDs"
- depends on PCI
+ depends on PCI && GENERIC_HARDIRQS
help
This enables the block driver for Micron PCIe SSDs.
diff --git a/drivers/block/mtip32xx/mtip32xx.c b/drivers/block/mtip32xx/mtip32xx.c
index 3fd100990453..11cc9522cdd4 100644
--- a/drivers/block/mtip32xx/mtip32xx.c
+++ b/drivers/block/mtip32xx/mtip32xx.c
@@ -88,6 +88,8 @@ static int instance;
static int mtip_major;
static struct dentry *dfs_parent;
+static u32 cpu_use[NR_CPUS];
+
static DEFINE_SPINLOCK(rssd_index_lock);
static DEFINE_IDA(rssd_index_ida);
@@ -296,16 +298,17 @@ static int hba_reset_nosleep(struct driver_data *dd)
*/
static inline void mtip_issue_ncq_command(struct mtip_port *port, int tag)
{
- atomic_set(&port->commands[tag].active, 1);
+ int group = tag >> 5;
- spin_lock(&port->cmd_issue_lock);
+ atomic_set(&port->commands[tag].active, 1);
+ /* guard SACT and CI registers */
+ spin_lock(&port->cmd_issue_lock[group]);
writel((1 << MTIP_TAG_BIT(tag)),
port->s_active[MTIP_TAG_INDEX(tag)]);
writel((1 << MTIP_TAG_BIT(tag)),
port->cmd_issue[MTIP_TAG_INDEX(tag)]);
-
- spin_unlock(&port->cmd_issue_lock);
+ spin_unlock(&port->cmd_issue_lock[group]);
/* Set the command's timeout value.*/
port->commands[tag].comp_time = jiffies + msecs_to_jiffies(
@@ -964,56 +967,56 @@ handle_tfe_exit:
/*
* Handle a set device bits interrupt
*/
-static inline void mtip_process_sdbf(struct driver_data *dd)
+static inline void mtip_workq_sdbfx(struct mtip_port *port, int group,
+ u32 completed)
{
- struct mtip_port *port = dd->port;
- int group, tag, bit;
- u32 completed;
+ struct driver_data *dd = port->dd;
+ int tag, bit;
struct mtip_cmd *command;
- /* walk all bits in all slot groups */
- for (group = 0; group < dd->slot_groups; group++) {
- completed = readl(port->completed[group]);
- if (!completed)
- continue;
+ if (!completed) {
+ WARN_ON_ONCE(!completed);
+ return;
+ }
+ /* clear completed status register in the hardware.*/
+ writel(completed, port->completed[group]);
- /* clear completed status register in the hardware.*/
- writel(completed, port->completed[group]);
+ /* Process completed commands. */
+ for (bit = 0; (bit < 32) && completed; bit++) {
+ if (completed & 0x01) {
+ tag = (group << 5) | bit;
- /* Process completed commands. */
- for (bit = 0;
- (bit < 32) && completed;
- bit++, completed >>= 1) {
- if (completed & 0x01) {
- tag = (group << 5) | bit;
+ /* skip internal command slot. */
+ if (unlikely(tag == MTIP_TAG_INTERNAL))
+ continue;
- /* skip internal command slot. */
- if (unlikely(tag == MTIP_TAG_INTERNAL))
- continue;
+ command = &port->commands[tag];
+ /* make internal callback */
+ if (likely(command->comp_func)) {
+ command->comp_func(
+ port,
+ tag,
+ command->comp_data,
+ 0);
+ } else {
+ dev_warn(&dd->pdev->dev,
+ "Null completion "
+ "for tag %d",
+ tag);
- command = &port->commands[tag];
- /* make internal callback */
- if (likely(command->comp_func)) {
- command->comp_func(
- port,
- tag,
- command->comp_data,
- 0);
- } else {
- dev_warn(&dd->pdev->dev,
- "Null completion "
- "for tag %d",
- tag);
-
- if (mtip_check_surprise_removal(
- dd->pdev)) {
- mtip_command_cleanup(dd);
- return;
- }
+ if (mtip_check_surprise_removal(
+ dd->pdev)) {
+ mtip_command_cleanup(dd);
+ return;
}
}
}
+ completed >>= 1;
}
+
+ /* If last, re-enable interrupts */
+ if (atomic_dec_return(&dd->irq_workers_active) == 0)
+ writel(0xffffffff, dd->mmio + HOST_IRQ_STAT);
}
/*
@@ -1072,6 +1075,8 @@ static inline irqreturn_t mtip_handle_irq(struct driver_data *data)
struct mtip_port *port = dd->port;
u32 hba_stat, port_stat;
int rv = IRQ_NONE;
+ int do_irq_enable = 1, i, workers;
+ struct mtip_work *twork;
hba_stat = readl(dd->mmio + HOST_IRQ_STAT);
if (hba_stat) {
@@ -1082,8 +1087,42 @@ static inline irqreturn_t mtip_handle_irq(struct driver_data *data)
writel(port_stat, port->mmio + PORT_IRQ_STAT);
/* Demux port status */
- if (likely(port_stat & PORT_IRQ_SDB_FIS))
- mtip_process_sdbf(dd);
+ if (likely(port_stat & PORT_IRQ_SDB_FIS)) {
+ do_irq_enable = 0;
+ WARN_ON_ONCE(atomic_read(&dd->irq_workers_active) != 0);
+
+ /* Start at 1: group zero is always local? */
+ for (i = 0, workers = 0; i < MTIP_MAX_SLOT_GROUPS;
+ i++) {
+ twork = &dd->work[i];
+ twork->completed = readl(port->completed[i]);
+ if (twork->completed)
+ workers++;
+ }
+
+ atomic_set(&dd->irq_workers_active, workers);
+ if (workers) {
+ for (i = 1; i < MTIP_MAX_SLOT_GROUPS; i++) {
+ twork = &dd->work[i];
+ if (twork->completed)
+ queue_work_on(
+ twork->cpu_binding,
+ dd->isr_workq,
+ &twork->work);
+ }
+
+ if (likely(dd->work[0].completed))
+ mtip_workq_sdbfx(port, 0,
+ dd->work[0].completed);
+
+ } else {
+ /*
+ * Chip quirk: SDB interrupt but nothing
+ * to complete
+ */
+ do_irq_enable = 1;
+ }
+ }
if (unlikely(port_stat & PORT_IRQ_ERR)) {
if (unlikely(mtip_check_surprise_removal(dd->pdev))) {
@@ -1103,21 +1142,13 @@ static inline irqreturn_t mtip_handle_irq(struct driver_data *data)
}
/* acknowledge interrupt */
- writel(hba_stat, dd->mmio + HOST_IRQ_STAT);
+ if (unlikely(do_irq_enable))
+ writel(hba_stat, dd->mmio + HOST_IRQ_STAT);
return rv;
}
/*
- * Wrapper for mtip_handle_irq
- * (ignores return code)
- */
-static void mtip_tasklet(unsigned long data)
-{
- mtip_handle_irq((struct driver_data *) data);
-}
-
-/*
* HBA interrupt subroutine.
*
* @irq IRQ number.
@@ -1130,8 +1161,8 @@ static void mtip_tasklet(unsigned long data)
static irqreturn_t mtip_irq_handler(int irq, void *instance)
{
struct driver_data *dd = instance;
- tasklet_schedule(&dd->tasklet);
- return IRQ_HANDLED;
+
+ return mtip_handle_irq(dd);
}
static void mtip_issue_non_ncq_command(struct mtip_port *port, int tag)
@@ -1489,6 +1520,12 @@ static int mtip_get_identify(struct mtip_port *port, void __user *user_buffer)
}
#endif
+ /* Demux ID.DRAT & ID.RZAT to determine trim support */
+ if (port->identify[69] & (1 << 14) && port->identify[69] & (1 << 5))
+ port->dd->trim_supp = true;
+ else
+ port->dd->trim_supp = false;
+
/* Set the identify buffer as valid. */
port->identify_valid = 1;
@@ -1676,6 +1713,81 @@ static int mtip_get_smart_attr(struct mtip_port *port, unsigned int id,
}
/*
+ * Trim unused sectors
+ *
+ * @dd pointer to driver_data structure
+ * @lba starting lba
+ * @len # of 512b sectors to trim
+ *
+ * return value
+ * -ENOMEM Out of dma memory
+ * -EINVAL Invalid parameters passed in, trim not supported
+ * -EIO Error submitting trim request to hw
+ */
+static int mtip_send_trim(struct driver_data *dd, unsigned int lba, unsigned int len)
+{
+ int i, rv = 0;
+ u64 tlba, tlen, sect_left;
+ struct mtip_trim_entry *buf;
+ dma_addr_t dma_addr;
+ struct host_to_dev_fis fis;
+
+ if (!len || dd->trim_supp == false)
+ return -EINVAL;
+
+ /* Trim request too big */
+ WARN_ON(len > (MTIP_MAX_TRIM_ENTRY_LEN * MTIP_MAX_TRIM_ENTRIES));
+
+ /* Trim request not aligned on 4k boundary */
+ WARN_ON(len % 8 != 0);
+
+ /* Warn if vu_trim structure is too big */
+ WARN_ON(sizeof(struct mtip_trim) > ATA_SECT_SIZE);
+
+ /* Allocate a DMA buffer for the trim structure */
+ buf = dmam_alloc_coherent(&dd->pdev->dev, ATA_SECT_SIZE, &dma_addr,
+ GFP_KERNEL);
+ if (!buf)
+ return -ENOMEM;
+ memset(buf, 0, ATA_SECT_SIZE);
+
+ for (i = 0, sect_left = len, tlba = lba;
+ i < MTIP_MAX_TRIM_ENTRIES && sect_left;
+ i++) {
+ tlen = (sect_left >= MTIP_MAX_TRIM_ENTRY_LEN ?
+ MTIP_MAX_TRIM_ENTRY_LEN :
+ sect_left);
+ buf[i].lba = __force_bit2int cpu_to_le32(tlba);
+ buf[i].range = __force_bit2int cpu_to_le16(tlen);
+ tlba += tlen;
+ sect_left -= tlen;
+ }
+ WARN_ON(sect_left != 0);
+
+ /* Build the fis */
+ memset(&fis, 0, sizeof(struct host_to_dev_fis));
+ fis.type = 0x27;
+ fis.opts = 1 << 7;
+ fis.command = 0xfb;
+ fis.features = 0x60;
+ fis.sect_count = 1;
+ fis.device = ATA_DEVICE_OBS;
+
+ if (mtip_exec_internal_command(dd->port,
+ &fis,
+ 5,
+ dma_addr,
+ ATA_SECT_SIZE,
+ 0,
+ GFP_KERNEL,
+ MTIP_TRIM_TIMEOUT_MS) < 0)
+ rv = -EIO;
+
+ dmam_free_coherent(&dd->pdev->dev, ATA_SECT_SIZE, buf, dma_addr);
+ return rv;
+}
+
+/*
* Get the drive capacity.
*
* @dd Pointer to the device data structure.
@@ -3005,20 +3117,24 @@ static int mtip_hw_init(struct driver_data *dd)
hba_setup(dd);
- tasklet_init(&dd->tasklet, mtip_tasklet, (unsigned long)dd);
-
- dd->port = kzalloc(sizeof(struct mtip_port), GFP_KERNEL);
+ dd->port = kzalloc_node(sizeof(struct mtip_port), GFP_KERNEL,
+ dd->numa_node);
if (!dd->port) {
dev_err(&dd->pdev->dev,
"Memory allocation: port structure\n");
return -ENOMEM;
}
+ /* Continue workqueue setup */
+ for (i = 0; i < MTIP_MAX_SLOT_GROUPS; i++)
+ dd->work[i].port = dd->port;
+
/* Counting semaphore to track command slot usage */
sema_init(&dd->port->cmd_slot, num_command_slots - 1);
/* Spinlock to prevent concurrent issue */
- spin_lock_init(&dd->port->cmd_issue_lock);
+ for (i = 0; i < MTIP_MAX_SLOT_GROUPS; i++)
+ spin_lock_init(&dd->port->cmd_issue_lock[i]);
/* Set the port mmio base address. */
dd->port->mmio = dd->mmio + PORT_OFFSET;
@@ -3165,6 +3281,7 @@ static int mtip_hw_init(struct driver_data *dd)
"Unable to allocate IRQ %d\n", dd->pdev->irq);
goto out2;
}
+ irq_set_affinity_hint(dd->pdev->irq, get_cpu_mask(dd->isr_binding));
/* Enable interrupts on the HBA. */
writel(readl(dd->mmio + HOST_CTL) | HOST_IRQ_EN,
@@ -3241,7 +3358,8 @@ out3:
writel(readl(dd->mmio + HOST_CTL) & ~HOST_IRQ_EN,
dd->mmio + HOST_CTL);
- /*Release the IRQ. */
+ /* Release the IRQ. */
+ irq_set_affinity_hint(dd->pdev->irq, NULL);
devm_free_irq(&dd->pdev->dev, dd->pdev->irq, dd);
out2:
@@ -3291,11 +3409,9 @@ static int mtip_hw_exit(struct driver_data *dd)
del_timer_sync(&dd->port->cmd_timer);
/* Release the IRQ. */
+ irq_set_affinity_hint(dd->pdev->irq, NULL);
devm_free_irq(&dd->pdev->dev, dd->pdev->irq, dd);
- /* Stop the bottom half tasklet. */
- tasklet_kill(&dd->tasklet);
-
/* Free the command/command header memory. */
dmam_free_coherent(&dd->pdev->dev,
HW_PORT_PRIV_DMA_SZ + (ATA_SECT_SIZE * 4),
@@ -3641,6 +3757,12 @@ static void mtip_make_request(struct request_queue *queue, struct bio *bio)
}
}
+ if (unlikely(bio->bi_rw & REQ_DISCARD)) {
+ bio_endio(bio, mtip_send_trim(dd, bio->bi_sector,
+ bio_sectors(bio)));
+ return;
+ }
+
if (unlikely(!bio_has_data(bio))) {
blk_queue_flush(queue, 0);
bio_endio(bio, 0);
@@ -3711,7 +3833,7 @@ static int mtip_block_initialize(struct driver_data *dd)
goto protocol_init_error;
}
- dd->disk = alloc_disk(MTIP_MAX_MINORS);
+ dd->disk = alloc_disk_node(MTIP_MAX_MINORS, dd->numa_node);
if (dd->disk == NULL) {
dev_err(&dd->pdev->dev,
"Unable to allocate gendisk structure\n");
@@ -3755,7 +3877,7 @@ static int mtip_block_initialize(struct driver_data *dd)
skip_create_disk:
/* Allocate the request queue. */
- dd->queue = blk_alloc_queue(GFP_KERNEL);
+ dd->queue = blk_alloc_queue_node(GFP_KERNEL, dd->numa_node);
if (dd->queue == NULL) {
dev_err(&dd->pdev->dev,
"Unable to allocate request queue\n");
@@ -3783,6 +3905,15 @@ skip_create_disk:
*/
blk_queue_flush(dd->queue, 0);
+ /* Signal trim support */
+ if (dd->trim_supp == true) {
+ set_bit(QUEUE_FLAG_DISCARD, &dd->queue->queue_flags);
+ dd->queue->limits.discard_granularity = 4096;
+ blk_queue_max_discard_sectors(dd->queue,
+ MTIP_MAX_TRIM_ENTRY_LEN * MTIP_MAX_TRIM_ENTRIES);
+ dd->queue->limits.discard_zeroes_data = 0;
+ }
+
/* Set the capacity of the device in 512 byte sectors. */
if (!(mtip_hw_get_capacity(dd, &capacity))) {
dev_warn(&dd->pdev->dev,
@@ -3813,9 +3944,8 @@ skip_create_disk:
start_service_thread:
sprintf(thd_name, "mtip_svc_thd_%02d", index);
-
- dd->mtip_svc_handler = kthread_run(mtip_service_thread,
- dd, thd_name);
+ dd->mtip_svc_handler = kthread_create_on_node(mtip_service_thread,
+ dd, dd->numa_node, thd_name);
if (IS_ERR(dd->mtip_svc_handler)) {
dev_err(&dd->pdev->dev, "service thread failed to start\n");
@@ -3823,7 +3953,7 @@ start_service_thread:
rv = -EFAULT;
goto kthread_run_error;
}
-
+ wake_up_process(dd->mtip_svc_handler);
if (wait_for_rebuild == MTIP_FTL_REBUILD_MAGIC)
rv = wait_for_rebuild;
@@ -3963,6 +4093,56 @@ static int mtip_block_resume(struct driver_data *dd)
return 0;
}
+static void drop_cpu(int cpu)
+{
+ cpu_use[cpu]--;
+}
+
+static int get_least_used_cpu_on_node(int node)
+{
+ int cpu, least_used_cpu, least_cnt;
+ const struct cpumask *node_mask;
+
+ node_mask = cpumask_of_node(node);
+ least_used_cpu = cpumask_first(node_mask);
+ least_cnt = cpu_use[least_used_cpu];
+ cpu = least_used_cpu;
+
+ for_each_cpu(cpu, node_mask) {
+ if (cpu_use[cpu] < least_cnt) {
+ least_used_cpu = cpu;
+ least_cnt = cpu_use[cpu];
+ }
+ }
+ cpu_use[least_used_cpu]++;
+ return least_used_cpu;
+}
+
+/* Helper for selecting a node in round robin mode */
+static inline int mtip_get_next_rr_node(void)
+{
+ static int next_node = -1;
+
+ if (next_node == -1) {
+ next_node = first_online_node;
+ return next_node;
+ }
+
+ next_node = next_online_node(next_node);
+ if (next_node == MAX_NUMNODES)
+ next_node = first_online_node;
+ return next_node;
+}
+
+static DEFINE_HANDLER(0);
+static DEFINE_HANDLER(1);
+static DEFINE_HANDLER(2);
+static DEFINE_HANDLER(3);
+static DEFINE_HANDLER(4);
+static DEFINE_HANDLER(5);
+static DEFINE_HANDLER(6);
+static DEFINE_HANDLER(7);
+
/*
* Called for each supported PCI device detected.
*
@@ -3977,9 +4157,25 @@ static int mtip_pci_probe(struct pci_dev *pdev,
{
int rv = 0;
struct driver_data *dd = NULL;
+ char cpu_list[256];
+ const struct cpumask *node_mask;
+ int cpu, i = 0, j = 0;
+ int my_node = NUMA_NO_NODE;
/* Allocate memory for this devices private data. */
- dd = kzalloc(sizeof(struct driver_data), GFP_KERNEL);
+ my_node = pcibus_to_node(pdev->bus);
+ if (my_node != NUMA_NO_NODE) {
+ if (!node_online(my_node))
+ my_node = mtip_get_next_rr_node();
+ } else {
+ dev_info(&pdev->dev, "Kernel not reporting proximity, choosing a node\n");
+ my_node = mtip_get_next_rr_node();
+ }
+ dev_info(&pdev->dev, "NUMA node %d (closest: %d,%d, probe on %d:%d)\n",
+ my_node, pcibus_to_node(pdev->bus), dev_to_node(&pdev->dev),
+ cpu_to_node(smp_processor_id()), smp_processor_id());
+
+ dd = kzalloc_node(sizeof(struct driver_data), GFP_KERNEL, my_node);
if (dd == NULL) {
dev_err(&pdev->dev,
"Unable to allocate memory for driver data\n");
@@ -4016,19 +4212,82 @@ static int mtip_pci_probe(struct pci_dev *pdev,
}
}
- pci_set_master(pdev);
+ /* Copy the info we may need later into the private data structure. */
+ dd->major = mtip_major;
+ dd->instance = instance;
+ dd->pdev = pdev;
+ dd->numa_node = my_node;
+ memset(dd->workq_name, 0, 32);
+ snprintf(dd->workq_name, 31, "mtipq%d", dd->instance);
+
+ dd->isr_workq = create_workqueue(dd->workq_name);
+ if (!dd->isr_workq) {
+ dev_warn(&pdev->dev, "Can't create wq %d\n", dd->instance);
+ goto block_initialize_err;
+ }
+
+ memset(cpu_list, 0, sizeof(cpu_list));
+
+ node_mask = cpumask_of_node(dd->numa_node);
+ if (!cpumask_empty(node_mask)) {
+ for_each_cpu(cpu, node_mask)
+ {
+ snprintf(&cpu_list[j], 256 - j, "%d ", cpu);
+ j = strlen(cpu_list);
+ }
+
+ dev_info(&pdev->dev, "Node %d on package %d has %d cpu(s): %s\n",
+ dd->numa_node,
+ topology_physical_package_id(cpumask_first(node_mask)),
+ nr_cpus_node(dd->numa_node),
+ cpu_list);
+ } else
+ dev_dbg(&pdev->dev, "mtip32xx: node_mask empty\n");
+
+ dd->isr_binding = get_least_used_cpu_on_node(dd->numa_node);
+ dev_info(&pdev->dev, "Initial IRQ binding node:cpu %d:%d\n",
+ cpu_to_node(dd->isr_binding), dd->isr_binding);
+
+ /* first worker context always runs in ISR */
+ dd->work[0].cpu_binding = dd->isr_binding;
+ dd->work[1].cpu_binding = get_least_used_cpu_on_node(dd->numa_node);
+ dd->work[2].cpu_binding = get_least_used_cpu_on_node(dd->numa_node);
+ dd->work[3].cpu_binding = dd->work[0].cpu_binding;
+ dd->work[4].cpu_binding = dd->work[1].cpu_binding;
+ dd->work[5].cpu_binding = dd->work[2].cpu_binding;
+ dd->work[6].cpu_binding = dd->work[2].cpu_binding;
+ dd->work[7].cpu_binding = dd->work[1].cpu_binding;
+
+ /* Log the bindings */
+ for_each_present_cpu(cpu) {
+ memset(cpu_list, 0, sizeof(cpu_list));
+ for (i = 0, j = 0; i < MTIP_MAX_SLOT_GROUPS; i++) {
+ if (dd->work[i].cpu_binding == cpu) {
+ snprintf(&cpu_list[j], 256 - j, "%d ", i);
+ j = strlen(cpu_list);
+ }
+ }
+ if (j)
+ dev_info(&pdev->dev, "CPU %d: WQs %s\n", cpu, cpu_list);
+ }
+
+ INIT_WORK(&dd->work[0].work, mtip_workq_sdbf0);
+ INIT_WORK(&dd->work[1].work, mtip_workq_sdbf1);
+ INIT_WORK(&dd->work[2].work, mtip_workq_sdbf2);
+ INIT_WORK(&dd->work[3].work, mtip_workq_sdbf3);
+ INIT_WORK(&dd->work[4].work, mtip_workq_sdbf4);
+ INIT_WORK(&dd->work[5].work, mtip_workq_sdbf5);
+ INIT_WORK(&dd->work[6].work, mtip_workq_sdbf6);
+ INIT_WORK(&dd->work[7].work, mtip_workq_sdbf7);
+
+ pci_set_master(pdev);
if (pci_enable_msi(pdev)) {
dev_warn(&pdev->dev,
"Unable to enable MSI interrupt.\n");
goto block_initialize_err;
}
- /* Copy the info we may need later into the private data structure. */
- dd->major = mtip_major;
- dd->instance = instance;
- dd->pdev = pdev;
-
/* Initialize the block layer. */
rv = mtip_block_initialize(dd);
if (rv < 0) {
@@ -4048,7 +4307,13 @@ static int mtip_pci_probe(struct pci_dev *pdev,
block_initialize_err:
pci_disable_msi(pdev);
-
+ if (dd->isr_workq) {
+ flush_workqueue(dd->isr_workq);
+ destroy_workqueue(dd->isr_workq);
+ drop_cpu(dd->work[0].cpu_binding);
+ drop_cpu(dd->work[1].cpu_binding);
+ drop_cpu(dd->work[2].cpu_binding);
+ }
setmask_err:
pcim_iounmap_regions(pdev, 1 << MTIP_ABAR);
@@ -4089,6 +4354,14 @@ static void mtip_pci_remove(struct pci_dev *pdev)
/* Clean up the block layer. */
mtip_block_remove(dd);
+ if (dd->isr_workq) {
+ flush_workqueue(dd->isr_workq);
+ destroy_workqueue(dd->isr_workq);
+ drop_cpu(dd->work[0].cpu_binding);
+ drop_cpu(dd->work[1].cpu_binding);
+ drop_cpu(dd->work[2].cpu_binding);
+ }
+
pci_disable_msi(pdev);
kfree(dd);
diff --git a/drivers/block/mtip32xx/mtip32xx.h b/drivers/block/mtip32xx/mtip32xx.h
index b1742640556a..3bffff5f670c 100644
--- a/drivers/block/mtip32xx/mtip32xx.h
+++ b/drivers/block/mtip32xx/mtip32xx.h
@@ -164,6 +164,35 @@ struct smart_attr {
u8 res[3];
} __packed;
+struct mtip_work {
+ struct work_struct work;
+ void *port;
+ int cpu_binding;
+ u32 completed;
+} ____cacheline_aligned_in_smp;
+
+#define DEFINE_HANDLER(group) \
+ void mtip_workq_sdbf##group(struct work_struct *work) \
+ { \
+ struct mtip_work *w = (struct mtip_work *) work; \
+ mtip_workq_sdbfx(w->port, group, w->completed); \
+ }
+
+#define MTIP_TRIM_TIMEOUT_MS 240000
+#define MTIP_MAX_TRIM_ENTRIES 8
+#define MTIP_MAX_TRIM_ENTRY_LEN 0xfff8
+
+struct mtip_trim_entry {
+ u32 lba; /* starting lba of region */
+ u16 rsvd; /* unused */
+ u16 range; /* # of 512b blocks to trim */
+} __packed;
+
+struct mtip_trim {
+ /* Array of regions to trim */
+ struct mtip_trim_entry entry[MTIP_MAX_TRIM_ENTRIES];
+} __packed;
+
/* Register Frame Information Structure (FIS), host to device. */
struct host_to_dev_fis {
/*
@@ -424,7 +453,7 @@ struct mtip_port {
*/
struct semaphore cmd_slot;
/* Spinlock for working around command-issue bug. */
- spinlock_t cmd_issue_lock;
+ spinlock_t cmd_issue_lock[MTIP_MAX_SLOT_GROUPS];
};
/*
@@ -447,9 +476,6 @@ struct driver_data {
struct mtip_port *port; /* Pointer to the port data structure. */
- /* Tasklet used to process the bottom half of the ISR. */
- struct tasklet_struct tasklet;
-
unsigned product_type; /* magic value declaring the product type */
unsigned slot_groups; /* number of slot groups the product supports */
@@ -461,6 +487,20 @@ struct driver_data {
struct task_struct *mtip_svc_handler; /* task_struct of svc thd */
struct dentry *dfs_node;
+
+ bool trim_supp; /* flag indicating trim support */
+
+ int numa_node; /* NUMA support */
+
+ char workq_name[32];
+
+ struct workqueue_struct *isr_workq;
+
+ struct mtip_work work[MTIP_MAX_SLOT_GROUPS];
+
+ atomic_t irq_workers_active;
+
+ int isr_binding;
};
#endif
diff --git a/drivers/block/nbd.c b/drivers/block/nbd.c
index 043ddcca4abf..7fecc784be01 100644
--- a/drivers/block/nbd.c
+++ b/drivers/block/nbd.c
@@ -98,6 +98,7 @@ static const char *nbdcmd_to_ascii(int cmd)
case NBD_CMD_READ: return "read";
case NBD_CMD_WRITE: return "write";
case NBD_CMD_DISC: return "disconnect";
+ case NBD_CMD_FLUSH: return "flush";
case NBD_CMD_TRIM: return "trim/discard";
}
return "invalid";
@@ -244,8 +245,15 @@ static int nbd_send_req(struct nbd_device *nbd, struct request *req)
request.magic = htonl(NBD_REQUEST_MAGIC);
request.type = htonl(nbd_cmd(req));
- request.from = cpu_to_be64((u64)blk_rq_pos(req) << 9);
- request.len = htonl(size);
+
+ if (nbd_cmd(req) == NBD_CMD_FLUSH) {
+ /* Other values are reserved for FLUSH requests. */
+ request.from = 0;
+ request.len = 0;
+ } else {
+ request.from = cpu_to_be64((u64)blk_rq_pos(req) << 9);
+ request.len = htonl(size);
+ }
memcpy(request.handle, &req, sizeof(req));
dprintk(DBG_TX, "%s: request %p: sending control (%s@%llu,%uB)\n",
@@ -482,6 +490,11 @@ static void nbd_handle_req(struct nbd_device *nbd, struct request *req)
}
}
+ if (req->cmd_flags & REQ_FLUSH) {
+ BUG_ON(unlikely(blk_rq_sectors(req)));
+ nbd_cmd(req) = NBD_CMD_FLUSH;
+ }
+
req->errors = 0;
mutex_lock(&nbd->tx_lock);
@@ -551,6 +564,7 @@ static int nbd_thread(void *data)
*/
static void do_nbd_request(struct request_queue *q)
+ __releases(q->queue_lock) __acquires(q->queue_lock)
{
struct request *req;
@@ -595,12 +609,20 @@ static int __nbd_ioctl(struct block_device *bdev, struct nbd_device *nbd,
struct request sreq;
dev_info(disk_to_dev(nbd->disk), "NBD_DISCONNECT\n");
+ if (!nbd->sock)
+ return -EINVAL;
+ mutex_unlock(&nbd->tx_lock);
+ fsync_bdev(bdev);
+ mutex_lock(&nbd->tx_lock);
blk_rq_init(NULL, &sreq);
sreq.cmd_type = REQ_TYPE_SPECIAL;
nbd_cmd(&sreq) = NBD_CMD_DISC;
+
+ /* Check again after getting mutex back. */
if (!nbd->sock)
return -EINVAL;
+
nbd_send_req(nbd, &sreq);
return 0;
}
@@ -614,6 +636,7 @@ static int __nbd_ioctl(struct block_device *bdev, struct nbd_device *nbd,
nbd_clear_que(nbd);
BUG_ON(!list_empty(&nbd->queue_head));
BUG_ON(!list_empty(&nbd->waiting_queue));
+ kill_bdev(bdev);
if (file)
fput(file);
return 0;
@@ -625,7 +648,7 @@ static int __nbd_ioctl(struct block_device *bdev, struct nbd_device *nbd,
return -EBUSY;
file = fget(arg);
if (file) {
- struct inode *inode = file->f_path.dentry->d_inode;
+ struct inode *inode = file_inode(file);
if (S_ISSOCK(inode->i_mode)) {
nbd->file = file;
nbd->sock = SOCKET_I(inode);
@@ -681,9 +704,15 @@ static int __nbd_ioctl(struct block_device *bdev, struct nbd_device *nbd,
mutex_unlock(&nbd->tx_lock);
+ if (nbd->flags & NBD_FLAG_READ_ONLY)
+ set_device_ro(bdev, true);
if (nbd->flags & NBD_FLAG_SEND_TRIM)
queue_flag_set_unlocked(QUEUE_FLAG_DISCARD,
nbd->disk->queue);
+ if (nbd->flags & NBD_FLAG_SEND_FLUSH)
+ blk_queue_flush(nbd->disk->queue, REQ_FLUSH);
+ else
+ blk_queue_flush(nbd->disk->queue, 0);
thread = kthread_create(nbd_thread, nbd, nbd->disk->disk_name);
if (IS_ERR(thread)) {
@@ -702,9 +731,12 @@ static int __nbd_ioctl(struct block_device *bdev, struct nbd_device *nbd,
nbd->file = NULL;
nbd_clear_que(nbd);
dev_warn(disk_to_dev(nbd->disk), "queue cleared\n");
+ kill_bdev(bdev);
queue_flag_clear_unlocked(QUEUE_FLAG_DISCARD, nbd->disk->queue);
+ set_device_ro(bdev, false);
if (file)
fput(file);
+ nbd->flags = 0;
nbd->bytesize = 0;
bdev->bd_inode->i_size = 0;
set_capacity(nbd->disk, 0);
diff --git a/drivers/block/rbd.c b/drivers/block/rbd.c
index 89576a0b3f2e..6c81a4c040b9 100644
--- a/drivers/block/rbd.c
+++ b/drivers/block/rbd.c
@@ -52,9 +52,12 @@
#define SECTOR_SHIFT 9
#define SECTOR_SIZE (1ULL << SECTOR_SHIFT)
-/* It might be useful to have this defined elsewhere too */
+/* It might be useful to have these defined elsewhere */
-#define U64_MAX ((u64) (~0ULL))
+#define U8_MAX ((u8) (~0U))
+#define U16_MAX ((u16) (~0U))
+#define U32_MAX ((u32) (~0U))
+#define U64_MAX ((u64) (~0ULL))
#define RBD_DRV_NAME "rbd"
#define RBD_DRV_NAME_LONG "rbd (rados block device)"
@@ -66,7 +69,6 @@
(NAME_MAX - (sizeof (RBD_SNAP_DEV_NAME_PREFIX) - 1))
#define RBD_MAX_SNAP_COUNT 510 /* allows max snapc to fit in 4KB */
-#define RBD_MAX_OPT_LEN 1024
#define RBD_SNAP_HEAD_NAME "-"
@@ -93,8 +95,6 @@
#define DEV_NAME_LEN 32
#define MAX_INT_FORMAT_WIDTH ((5 * sizeof (int)) / 2 + 1)
-#define RBD_READ_ONLY_DEFAULT false
-
/*
* block device image metadata (in-memory version)
*/
@@ -119,16 +119,33 @@ struct rbd_image_header {
* An rbd image specification.
*
* The tuple (pool_id, image_id, snap_id) is sufficient to uniquely
- * identify an image.
+ * identify an image. Each rbd_dev structure includes a pointer to
+ * an rbd_spec structure that encapsulates this identity.
+ *
+ * Each of the id's in an rbd_spec has an associated name. For a
+ * user-mapped image, the names are supplied and the id's associated
+ * with them are looked up. For a layered image, a parent image is
+ * defined by the tuple, and the names are looked up.
+ *
+ * An rbd_dev structure contains a parent_spec pointer which is
+ * non-null if the image it represents is a child in a layered
+ * image. This pointer will refer to the rbd_spec structure used
+ * by the parent rbd_dev for its own identity (i.e., the structure
+ * is shared between the parent and child).
+ *
+ * Since these structures are populated once, during the discovery
+ * phase of image construction, they are effectively immutable so
+ * we make no effort to synchronize access to them.
+ *
+ * Note that code herein does not assume the image name is known (it
+ * could be a null pointer).
*/
struct rbd_spec {
u64 pool_id;
char *pool_name;
char *image_id;
- size_t image_id_len;
char *image_name;
- size_t image_name_len;
u64 snap_id;
char *snap_name;
@@ -136,10 +153,6 @@ struct rbd_spec {
struct kref kref;
};
-struct rbd_options {
- bool read_only;
-};
-
/*
* an instance of the client. multiple devices may share an rbd client.
*/
@@ -149,37 +162,76 @@ struct rbd_client {
struct list_head node;
};
-/*
- * a request completion status
- */
-struct rbd_req_status {
- int done;
- int rc;
- u64 bytes;
+struct rbd_img_request;
+typedef void (*rbd_img_callback_t)(struct rbd_img_request *);
+
+#define BAD_WHICH U32_MAX /* Good which or bad which, which? */
+
+struct rbd_obj_request;
+typedef void (*rbd_obj_callback_t)(struct rbd_obj_request *);
+
+enum obj_request_type {
+ OBJ_REQUEST_NODATA, OBJ_REQUEST_BIO, OBJ_REQUEST_PAGES
};
-/*
- * a collection of requests
- */
-struct rbd_req_coll {
- int total;
- int num_done;
+struct rbd_obj_request {
+ const char *object_name;
+ u64 offset; /* object start byte */
+ u64 length; /* bytes from offset */
+
+ struct rbd_img_request *img_request;
+ struct list_head links; /* img_request->obj_requests */
+ u32 which; /* posn image request list */
+
+ enum obj_request_type type;
+ union {
+ struct bio *bio_list;
+ struct {
+ struct page **pages;
+ u32 page_count;
+ };
+ };
+
+ struct ceph_osd_request *osd_req;
+
+ u64 xferred; /* bytes transferred */
+ u64 version;
+ int result;
+ atomic_t done;
+
+ rbd_obj_callback_t callback;
+ struct completion completion;
+
struct kref kref;
- struct rbd_req_status status[0];
};
-/*
- * a single io request
- */
-struct rbd_request {
- struct request *rq; /* blk layer request */
- struct bio *bio; /* cloned bio */
- struct page **pages; /* list of used pages */
- u64 len;
- int coll_index;
- struct rbd_req_coll *coll;
+struct rbd_img_request {
+ struct request *rq;
+ struct rbd_device *rbd_dev;
+ u64 offset; /* starting image byte offset */
+ u64 length; /* byte count from offset */
+ bool write_request; /* false for read */
+ union {
+ struct ceph_snap_context *snapc; /* for writes */
+ u64 snap_id; /* for reads */
+ };
+ spinlock_t completion_lock;/* protects next_completion */
+ u32 next_completion;
+ rbd_img_callback_t callback;
+
+ u32 obj_request_count;
+ struct list_head obj_requests; /* rbd_obj_request structs */
+
+ struct kref kref;
};
+#define for_each_obj_request(ireq, oreq) \
+ list_for_each_entry(oreq, &(ireq)->obj_requests, links)
+#define for_each_obj_request_from(ireq, oreq) \
+ list_for_each_entry_from(oreq, &(ireq)->obj_requests, links)
+#define for_each_obj_request_safe(ireq, oreq, n) \
+ list_for_each_entry_safe_reverse(oreq, n, &(ireq)->obj_requests, links)
+
struct rbd_snap {
struct device dev;
const char *name;
@@ -209,16 +261,18 @@ struct rbd_device {
char name[DEV_NAME_LEN]; /* blkdev name, e.g. rbd3 */
- spinlock_t lock; /* queue lock */
+ spinlock_t lock; /* queue, flags, open_count */
struct rbd_image_header header;
- bool exists;
+ unsigned long flags; /* possibly lock protected */
struct rbd_spec *spec;
char *header_name;
+ struct ceph_file_layout layout;
+
struct ceph_osd_event *watch_event;
- struct ceph_osd_request *watch_request;
+ struct rbd_obj_request *watch_request;
struct rbd_spec *parent_spec;
u64 parent_overlap;
@@ -235,7 +289,19 @@ struct rbd_device {
/* sysfs related */
struct device dev;
- unsigned long open_count;
+ unsigned long open_count; /* protected by lock */
+};
+
+/*
+ * Flag bits for rbd_dev->flags. If atomicity is required,
+ * rbd_dev->lock is used to protect access.
+ *
+ * Currently, only the "removing" flag (which is coupled with the
+ * "open_count" field) requires atomic access.
+ */
+enum rbd_dev_flags {
+ RBD_DEV_FLAG_EXISTS, /* mapped snapshot has not been deleted */
+ RBD_DEV_FLAG_REMOVING, /* this mapping is being removed */
};
static DEFINE_MUTEX(ctl_mutex); /* Serialize open/close/setup/teardown */
@@ -277,6 +343,33 @@ static struct device rbd_root_dev = {
.release = rbd_root_dev_release,
};
+static __printf(2, 3)
+void rbd_warn(struct rbd_device *rbd_dev, const char *fmt, ...)
+{
+ struct va_format vaf;
+ va_list args;
+
+ va_start(args, fmt);
+ vaf.fmt = fmt;
+ vaf.va = &args;
+
+ if (!rbd_dev)
+ printk(KERN_WARNING "%s: %pV\n", RBD_DRV_NAME, &vaf);
+ else if (rbd_dev->disk)
+ printk(KERN_WARNING "%s: %s: %pV\n",
+ RBD_DRV_NAME, rbd_dev->disk->disk_name, &vaf);
+ else if (rbd_dev->spec && rbd_dev->spec->image_name)
+ printk(KERN_WARNING "%s: image %s: %pV\n",
+ RBD_DRV_NAME, rbd_dev->spec->image_name, &vaf);
+ else if (rbd_dev->spec && rbd_dev->spec->image_id)
+ printk(KERN_WARNING "%s: id %s: %pV\n",
+ RBD_DRV_NAME, rbd_dev->spec->image_id, &vaf);
+ else /* punt */
+ printk(KERN_WARNING "%s: rbd_dev %p: %pV\n",
+ RBD_DRV_NAME, rbd_dev, &vaf);
+ va_end(args);
+}
+
#ifdef RBD_DEBUG
#define rbd_assert(expr) \
if (unlikely(!(expr))) { \
@@ -296,14 +389,23 @@ static int rbd_dev_v2_refresh(struct rbd_device *rbd_dev, u64 *hver);
static int rbd_open(struct block_device *bdev, fmode_t mode)
{
struct rbd_device *rbd_dev = bdev->bd_disk->private_data;
+ bool removing = false;
if ((mode & FMODE_WRITE) && rbd_dev->mapping.read_only)
return -EROFS;
+ spin_lock_irq(&rbd_dev->lock);
+ if (test_bit(RBD_DEV_FLAG_REMOVING, &rbd_dev->flags))
+ removing = true;
+ else
+ rbd_dev->open_count++;
+ spin_unlock_irq(&rbd_dev->lock);
+ if (removing)
+ return -ENOENT;
+
mutex_lock_nested(&ctl_mutex, SINGLE_DEPTH_NESTING);
(void) get_device(&rbd_dev->dev);
set_device_ro(bdev, rbd_dev->mapping.read_only);
- rbd_dev->open_count++;
mutex_unlock(&ctl_mutex);
return 0;
@@ -312,10 +414,14 @@ static int rbd_open(struct block_device *bdev, fmode_t mode)
static int rbd_release(struct gendisk *disk, fmode_t mode)
{
struct rbd_device *rbd_dev = disk->private_data;
+ unsigned long open_count_before;
+
+ spin_lock_irq(&rbd_dev->lock);
+ open_count_before = rbd_dev->open_count--;
+ spin_unlock_irq(&rbd_dev->lock);
+ rbd_assert(open_count_before > 0);
mutex_lock_nested(&ctl_mutex, SINGLE_DEPTH_NESTING);
- rbd_assert(rbd_dev->open_count > 0);
- rbd_dev->open_count--;
put_device(&rbd_dev->dev);
mutex_unlock(&ctl_mutex);
@@ -337,7 +443,7 @@ static struct rbd_client *rbd_client_create(struct ceph_options *ceph_opts)
struct rbd_client *rbdc;
int ret = -ENOMEM;
- dout("rbd_client_create\n");
+ dout("%s:\n", __func__);
rbdc = kmalloc(sizeof(struct rbd_client), GFP_KERNEL);
if (!rbdc)
goto out_opt;
@@ -361,8 +467,8 @@ static struct rbd_client *rbd_client_create(struct ceph_options *ceph_opts)
spin_unlock(&rbd_client_list_lock);
mutex_unlock(&ctl_mutex);
+ dout("%s: rbdc %p\n", __func__, rbdc);
- dout("rbd_client_create created %p\n", rbdc);
return rbdc;
out_err:
@@ -373,6 +479,8 @@ out_mutex:
out_opt:
if (ceph_opts)
ceph_destroy_options(ceph_opts);
+ dout("%s: error %d\n", __func__, ret);
+
return ERR_PTR(ret);
}
@@ -426,6 +534,12 @@ static match_table_t rbd_opts_tokens = {
{-1, NULL}
};
+struct rbd_options {
+ bool read_only;
+};
+
+#define RBD_READ_ONLY_DEFAULT false
+
static int parse_rbd_opts_token(char *c, void *private)
{
struct rbd_options *rbd_opts = private;
@@ -493,7 +607,7 @@ static void rbd_client_release(struct kref *kref)
{
struct rbd_client *rbdc = container_of(kref, struct rbd_client, kref);
- dout("rbd_release_client %p\n", rbdc);
+ dout("%s: rbdc %p\n", __func__, rbdc);
spin_lock(&rbd_client_list_lock);
list_del(&rbdc->node);
spin_unlock(&rbd_client_list_lock);
@@ -512,18 +626,6 @@ static void rbd_put_client(struct rbd_client *rbdc)
kref_put(&rbdc->kref, rbd_client_release);
}
-/*
- * Destroy requests collection
- */
-static void rbd_coll_release(struct kref *kref)
-{
- struct rbd_req_coll *coll =
- container_of(kref, struct rbd_req_coll, kref);
-
- dout("rbd_coll_release %p\n", coll);
- kfree(coll);
-}
-
static bool rbd_image_format_valid(u32 image_format)
{
return image_format == 1 || image_format == 2;
@@ -707,7 +809,8 @@ static int rbd_dev_set_mapping(struct rbd_device *rbd_dev)
goto done;
rbd_dev->mapping.read_only = true;
}
- rbd_dev->exists = true;
+ set_bit(RBD_DEV_FLAG_EXISTS, &rbd_dev->flags);
+
done:
return ret;
}
@@ -724,7 +827,7 @@ static void rbd_header_free(struct rbd_image_header *header)
header->snapc = NULL;
}
-static char *rbd_segment_name(struct rbd_device *rbd_dev, u64 offset)
+static const char *rbd_segment_name(struct rbd_device *rbd_dev, u64 offset)
{
char *name;
u64 segment;
@@ -767,23 +870,6 @@ static u64 rbd_segment_length(struct rbd_device *rbd_dev,
return length;
}
-static int rbd_get_num_segments(struct rbd_image_header *header,
- u64 ofs, u64 len)
-{
- u64 start_seg;
- u64 end_seg;
-
- if (!len)
- return 0;
- if (len - 1 > U64_MAX - ofs)
- return -ERANGE;
-
- start_seg = ofs >> header->obj_order;
- end_seg = (ofs + len - 1) >> header->obj_order;
-
- return end_seg - start_seg + 1;
-}
-
/*
* returns the size of an object in the image
*/
@@ -949,8 +1035,10 @@ static struct bio *bio_chain_clone_range(struct bio **bio_src,
unsigned int bi_size;
struct bio *bio;
- if (!bi)
+ if (!bi) {
+ rbd_warn(NULL, "bio_chain exhausted with %u left", len);
goto out_err; /* EINVAL; ran out of bio's */
+ }
bi_size = min_t(unsigned int, bi->bi_size - off, len);
bio = bio_clone_range(bi, off, bi_size, gfpmask);
if (!bio)
@@ -976,399 +1064,721 @@ out_err:
return NULL;
}
-/*
- * helpers for osd request op vectors.
- */
-static struct ceph_osd_req_op *rbd_create_rw_ops(int num_ops,
- int opcode, u32 payload_len)
+static void rbd_obj_request_get(struct rbd_obj_request *obj_request)
{
- struct ceph_osd_req_op *ops;
+ dout("%s: obj %p (was %d)\n", __func__, obj_request,
+ atomic_read(&obj_request->kref.refcount));
+ kref_get(&obj_request->kref);
+}
+
+static void rbd_obj_request_destroy(struct kref *kref);
+static void rbd_obj_request_put(struct rbd_obj_request *obj_request)
+{
+ rbd_assert(obj_request != NULL);
+ dout("%s: obj %p (was %d)\n", __func__, obj_request,
+ atomic_read(&obj_request->kref.refcount));
+ kref_put(&obj_request->kref, rbd_obj_request_destroy);
+}
+
+static void rbd_img_request_get(struct rbd_img_request *img_request)
+{
+ dout("%s: img %p (was %d)\n", __func__, img_request,
+ atomic_read(&img_request->kref.refcount));
+ kref_get(&img_request->kref);
+}
+
+static void rbd_img_request_destroy(struct kref *kref);
+static void rbd_img_request_put(struct rbd_img_request *img_request)
+{
+ rbd_assert(img_request != NULL);
+ dout("%s: img %p (was %d)\n", __func__, img_request,
+ atomic_read(&img_request->kref.refcount));
+ kref_put(&img_request->kref, rbd_img_request_destroy);
+}
+
+static inline void rbd_img_obj_request_add(struct rbd_img_request *img_request,
+ struct rbd_obj_request *obj_request)
+{
+ rbd_assert(obj_request->img_request == NULL);
+
+ rbd_obj_request_get(obj_request);
+ obj_request->img_request = img_request;
+ obj_request->which = img_request->obj_request_count;
+ rbd_assert(obj_request->which != BAD_WHICH);
+ img_request->obj_request_count++;
+ list_add_tail(&obj_request->links, &img_request->obj_requests);
+ dout("%s: img %p obj %p w=%u\n", __func__, img_request, obj_request,
+ obj_request->which);
+}
- ops = kzalloc(sizeof (*ops) * (num_ops + 1), GFP_NOIO);
- if (!ops)
+static inline void rbd_img_obj_request_del(struct rbd_img_request *img_request,
+ struct rbd_obj_request *obj_request)
+{
+ rbd_assert(obj_request->which != BAD_WHICH);
+
+ dout("%s: img %p obj %p w=%u\n", __func__, img_request, obj_request,
+ obj_request->which);
+ list_del(&obj_request->links);
+ rbd_assert(img_request->obj_request_count > 0);
+ img_request->obj_request_count--;
+ rbd_assert(obj_request->which == img_request->obj_request_count);
+ obj_request->which = BAD_WHICH;
+ rbd_assert(obj_request->img_request == img_request);
+ obj_request->img_request = NULL;
+ obj_request->callback = NULL;
+ rbd_obj_request_put(obj_request);
+}
+
+static bool obj_request_type_valid(enum obj_request_type type)
+{
+ switch (type) {
+ case OBJ_REQUEST_NODATA:
+ case OBJ_REQUEST_BIO:
+ case OBJ_REQUEST_PAGES:
+ return true;
+ default:
+ return false;
+ }
+}
+
+static struct ceph_osd_req_op *rbd_osd_req_op_create(u16 opcode, ...)
+{
+ struct ceph_osd_req_op *op;
+ va_list args;
+ size_t size;
+
+ op = kzalloc(sizeof (*op), GFP_NOIO);
+ if (!op)
return NULL;
+ op->op = opcode;
+ va_start(args, opcode);
+ switch (opcode) {
+ case CEPH_OSD_OP_READ:
+ case CEPH_OSD_OP_WRITE:
+ /* rbd_osd_req_op_create(READ, offset, length) */
+ /* rbd_osd_req_op_create(WRITE, offset, length) */
+ op->extent.offset = va_arg(args, u64);
+ op->extent.length = va_arg(args, u64);
+ if (opcode == CEPH_OSD_OP_WRITE)
+ op->payload_len = op->extent.length;
+ break;
+ case CEPH_OSD_OP_STAT:
+ break;
+ case CEPH_OSD_OP_CALL:
+ /* rbd_osd_req_op_create(CALL, class, method, data, datalen) */
+ op->cls.class_name = va_arg(args, char *);
+ size = strlen(op->cls.class_name);
+ rbd_assert(size <= (size_t) U8_MAX);
+ op->cls.class_len = size;
+ op->payload_len = size;
+
+ op->cls.method_name = va_arg(args, char *);
+ size = strlen(op->cls.method_name);
+ rbd_assert(size <= (size_t) U8_MAX);
+ op->cls.method_len = size;
+ op->payload_len += size;
+
+ op->cls.argc = 0;
+ op->cls.indata = va_arg(args, void *);
+ size = va_arg(args, size_t);
+ rbd_assert(size <= (size_t) U32_MAX);
+ op->cls.indata_len = (u32) size;
+ op->payload_len += size;
+ break;
+ case CEPH_OSD_OP_NOTIFY_ACK:
+ case CEPH_OSD_OP_WATCH:
+ /* rbd_osd_req_op_create(NOTIFY_ACK, cookie, version) */
+ /* rbd_osd_req_op_create(WATCH, cookie, version, flag) */
+ op->watch.cookie = va_arg(args, u64);
+ op->watch.ver = va_arg(args, u64);
+ op->watch.ver = cpu_to_le64(op->watch.ver);
+ if (opcode == CEPH_OSD_OP_WATCH && va_arg(args, int))
+ op->watch.flag = (u8) 1;
+ break;
+ default:
+ rbd_warn(NULL, "unsupported opcode %hu\n", opcode);
+ kfree(op);
+ op = NULL;
+ break;
+ }
+ va_end(args);
- ops[0].op = opcode;
+ return op;
+}
- /*
- * op extent offset and length will be set later on
- * in calc_raw_layout()
- */
- ops[0].payload_len = payload_len;
+static void rbd_osd_req_op_destroy(struct ceph_osd_req_op *op)
+{
+ kfree(op);
+}
+
+static int rbd_obj_request_submit(struct ceph_osd_client *osdc,
+ struct rbd_obj_request *obj_request)
+{
+ dout("%s: osdc %p obj %p\n", __func__, osdc, obj_request);
- return ops;
+ return ceph_osdc_start_request(osdc, obj_request->osd_req, false);
}
-static void rbd_destroy_ops(struct ceph_osd_req_op *ops)
+static void rbd_img_request_complete(struct rbd_img_request *img_request)
{
- kfree(ops);
+ dout("%s: img %p\n", __func__, img_request);
+ if (img_request->callback)
+ img_request->callback(img_request);
+ else
+ rbd_img_request_put(img_request);
}
-static void rbd_coll_end_req_index(struct request *rq,
- struct rbd_req_coll *coll,
- int index,
- int ret, u64 len)
+/* Caller is responsible for rbd_obj_request_destroy(obj_request) */
+
+static int rbd_obj_request_wait(struct rbd_obj_request *obj_request)
{
- struct request_queue *q;
- int min, max, i;
+ dout("%s: obj %p\n", __func__, obj_request);
- dout("rbd_coll_end_req_index %p index %d ret %d len %llu\n",
- coll, index, ret, (unsigned long long) len);
+ return wait_for_completion_interruptible(&obj_request->completion);
+}
- if (!rq)
- return;
+static void obj_request_done_init(struct rbd_obj_request *obj_request)
+{
+ atomic_set(&obj_request->done, 0);
+ smp_wmb();
+}
- if (!coll) {
- blk_end_request(rq, ret, len);
- return;
+static void obj_request_done_set(struct rbd_obj_request *obj_request)
+{
+ int done;
+
+ done = atomic_inc_return(&obj_request->done);
+ if (done > 1) {
+ struct rbd_img_request *img_request = obj_request->img_request;
+ struct rbd_device *rbd_dev;
+
+ rbd_dev = img_request ? img_request->rbd_dev : NULL;
+ rbd_warn(rbd_dev, "obj_request %p was already done\n",
+ obj_request);
}
+}
- q = rq->q;
-
- spin_lock_irq(q->queue_lock);
- coll->status[index].done = 1;
- coll->status[index].rc = ret;
- coll->status[index].bytes = len;
- max = min = coll->num_done;
- while (max < coll->total && coll->status[max].done)
- max++;
-
- for (i = min; i<max; i++) {
- __blk_end_request(rq, coll->status[i].rc,
- coll->status[i].bytes);
- coll->num_done++;
- kref_put(&coll->kref, rbd_coll_release);
+static bool obj_request_done_test(struct rbd_obj_request *obj_request)
+{
+ smp_mb();
+ return atomic_read(&obj_request->done) != 0;
+}
+
+static void rbd_obj_request_complete(struct rbd_obj_request *obj_request)
+{
+ dout("%s: obj %p cb %p\n", __func__, obj_request,
+ obj_request->callback);
+ if (obj_request->callback)
+ obj_request->callback(obj_request);
+ else
+ complete_all(&obj_request->completion);
+}
+
+static void rbd_osd_trivial_callback(struct rbd_obj_request *obj_request)
+{
+ dout("%s: obj %p\n", __func__, obj_request);
+ obj_request_done_set(obj_request);
+}
+
+static void rbd_osd_read_callback(struct rbd_obj_request *obj_request)
+{
+ dout("%s: obj %p result %d %llu/%llu\n", __func__, obj_request,
+ obj_request->result, obj_request->xferred, obj_request->length);
+ /*
+ * ENOENT means a hole in the object. We zero-fill the
+ * entire length of the request. A short read also implies
+ * zero-fill to the end of the request. Either way we
+ * update the xferred count to indicate the whole request
+ * was satisfied.
+ */
+ if (obj_request->result == -ENOENT) {
+ zero_bio_chain(obj_request->bio_list, 0);
+ obj_request->result = 0;
+ obj_request->xferred = obj_request->length;
+ } else if (obj_request->xferred < obj_request->length &&
+ !obj_request->result) {
+ zero_bio_chain(obj_request->bio_list, obj_request->xferred);
+ obj_request->xferred = obj_request->length;
}
- spin_unlock_irq(q->queue_lock);
+ obj_request_done_set(obj_request);
}
-static void rbd_coll_end_req(struct rbd_request *req,
- int ret, u64 len)
+static void rbd_osd_write_callback(struct rbd_obj_request *obj_request)
{
- rbd_coll_end_req_index(req->rq, req->coll, req->coll_index, ret, len);
+ dout("%s: obj %p result %d %llu\n", __func__, obj_request,
+ obj_request->result, obj_request->length);
+ /*
+ * There is no such thing as a successful short write.
+ * Our xferred value is the number of bytes transferred
+ * back. Set it to our originally-requested length.
+ */
+ obj_request->xferred = obj_request->length;
+ obj_request_done_set(obj_request);
}
/*
- * Send ceph osd request
+ * For a simple stat call there's nothing to do. We'll do more if
+ * this is part of a write sequence for a layered image.
*/
-static int rbd_do_request(struct request *rq,
- struct rbd_device *rbd_dev,
- struct ceph_snap_context *snapc,
- u64 snapid,
- const char *object_name, u64 ofs, u64 len,
- struct bio *bio,
- struct page **pages,
- int num_pages,
- int flags,
- struct ceph_osd_req_op *ops,
- struct rbd_req_coll *coll,
- int coll_index,
- void (*rbd_cb)(struct ceph_osd_request *req,
- struct ceph_msg *msg),
- struct ceph_osd_request **linger_req,
- u64 *ver)
-{
- struct ceph_osd_request *req;
- struct ceph_file_layout *layout;
- int ret;
- u64 bno;
- struct timespec mtime = CURRENT_TIME;
- struct rbd_request *req_data;
- struct ceph_osd_request_head *reqhead;
- struct ceph_osd_client *osdc;
+static void rbd_osd_stat_callback(struct rbd_obj_request *obj_request)
+{
+ dout("%s: obj %p\n", __func__, obj_request);
+ obj_request_done_set(obj_request);
+}
- req_data = kzalloc(sizeof(*req_data), GFP_NOIO);
- if (!req_data) {
- if (coll)
- rbd_coll_end_req_index(rq, coll, coll_index,
- -ENOMEM, len);
- return -ENOMEM;
+static void rbd_osd_req_callback(struct ceph_osd_request *osd_req,
+ struct ceph_msg *msg)
+{
+ struct rbd_obj_request *obj_request = osd_req->r_priv;
+ u16 opcode;
+
+ dout("%s: osd_req %p msg %p\n", __func__, osd_req, msg);
+ rbd_assert(osd_req == obj_request->osd_req);
+ rbd_assert(!!obj_request->img_request ^
+ (obj_request->which == BAD_WHICH));
+
+ if (osd_req->r_result < 0)
+ obj_request->result = osd_req->r_result;
+ obj_request->version = le64_to_cpu(osd_req->r_reassert_version.version);
+
+ WARN_ON(osd_req->r_num_ops != 1); /* For now */
+
+ /*
+ * We support a 64-bit length, but ultimately it has to be
+ * passed to blk_end_request(), which takes an unsigned int.
+ */
+ obj_request->xferred = osd_req->r_reply_op_len[0];
+ rbd_assert(obj_request->xferred < (u64) UINT_MAX);
+ opcode = osd_req->r_request_ops[0].op;
+ switch (opcode) {
+ case CEPH_OSD_OP_READ:
+ rbd_osd_read_callback(obj_request);
+ break;
+ case CEPH_OSD_OP_WRITE:
+ rbd_osd_write_callback(obj_request);
+ break;
+ case CEPH_OSD_OP_STAT:
+ rbd_osd_stat_callback(obj_request);
+ break;
+ case CEPH_OSD_OP_CALL:
+ case CEPH_OSD_OP_NOTIFY_ACK:
+ case CEPH_OSD_OP_WATCH:
+ rbd_osd_trivial_callback(obj_request);
+ break;
+ default:
+ rbd_warn(NULL, "%s: unsupported op %hu\n",
+ obj_request->object_name, (unsigned short) opcode);
+ break;
}
- if (coll) {
- req_data->coll = coll;
- req_data->coll_index = coll_index;
+ if (obj_request_done_test(obj_request))
+ rbd_obj_request_complete(obj_request);
+}
+
+static struct ceph_osd_request *rbd_osd_req_create(
+ struct rbd_device *rbd_dev,
+ bool write_request,
+ struct rbd_obj_request *obj_request,
+ struct ceph_osd_req_op *op)
+{
+ struct rbd_img_request *img_request = obj_request->img_request;
+ struct ceph_snap_context *snapc = NULL;
+ struct ceph_osd_client *osdc;
+ struct ceph_osd_request *osd_req;
+ struct timespec now;
+ struct timespec *mtime;
+ u64 snap_id = CEPH_NOSNAP;
+ u64 offset = obj_request->offset;
+ u64 length = obj_request->length;
+
+ if (img_request) {
+ rbd_assert(img_request->write_request == write_request);
+ if (img_request->write_request)
+ snapc = img_request->snapc;
+ else
+ snap_id = img_request->snap_id;
}
- dout("rbd_do_request object_name=%s ofs=%llu len=%llu coll=%p[%d]\n",
- object_name, (unsigned long long) ofs,
- (unsigned long long) len, coll, coll_index);
+ /* Allocate and initialize the request, for the single op */
osdc = &rbd_dev->rbd_client->client->osdc;
- req = ceph_osdc_alloc_request(osdc, flags, snapc, ops,
- false, GFP_NOIO, pages, bio);
- if (!req) {
- ret = -ENOMEM;
- goto done_pages;
+ osd_req = ceph_osdc_alloc_request(osdc, snapc, 1, false, GFP_ATOMIC);
+ if (!osd_req)
+ return NULL; /* ENOMEM */
+
+ rbd_assert(obj_request_type_valid(obj_request->type));
+ switch (obj_request->type) {
+ case OBJ_REQUEST_NODATA:
+ break; /* Nothing to do */
+ case OBJ_REQUEST_BIO:
+ rbd_assert(obj_request->bio_list != NULL);
+ osd_req->r_bio = obj_request->bio_list;
+ break;
+ case OBJ_REQUEST_PAGES:
+ osd_req->r_pages = obj_request->pages;
+ osd_req->r_num_pages = obj_request->page_count;
+ osd_req->r_page_alignment = offset & ~PAGE_MASK;
+ break;
}
- req->r_callback = rbd_cb;
+ if (write_request) {
+ osd_req->r_flags = CEPH_OSD_FLAG_WRITE | CEPH_OSD_FLAG_ONDISK;
+ now = CURRENT_TIME;
+ mtime = &now;
+ } else {
+ osd_req->r_flags = CEPH_OSD_FLAG_READ;
+ mtime = NULL; /* not needed for reads */
+ offset = 0; /* These are not used... */
+ length = 0; /* ...for osd read requests */
+ }
- req_data->rq = rq;
- req_data->bio = bio;
- req_data->pages = pages;
- req_data->len = len;
+ osd_req->r_callback = rbd_osd_req_callback;
+ osd_req->r_priv = obj_request;
- req->r_priv = req_data;
+ osd_req->r_oid_len = strlen(obj_request->object_name);
+ rbd_assert(osd_req->r_oid_len < sizeof (osd_req->r_oid));
+ memcpy(osd_req->r_oid, obj_request->object_name, osd_req->r_oid_len);
- reqhead = req->r_request->front.iov_base;
- reqhead->snapid = cpu_to_le64(CEPH_NOSNAP);
+ osd_req->r_file_layout = rbd_dev->layout; /* struct */
- strncpy(req->r_oid, object_name, sizeof(req->r_oid));
- req->r_oid_len = strlen(req->r_oid);
+ /* osd_req will get its own reference to snapc (if non-null) */
- layout = &req->r_file_layout;
- memset(layout, 0, sizeof(*layout));
- layout->fl_stripe_unit = cpu_to_le32(1 << RBD_MAX_OBJ_ORDER);
- layout->fl_stripe_count = cpu_to_le32(1);
- layout->fl_object_size = cpu_to_le32(1 << RBD_MAX_OBJ_ORDER);
- layout->fl_pg_pool = cpu_to_le32((int) rbd_dev->spec->pool_id);
- ret = ceph_calc_raw_layout(osdc, layout, snapid, ofs, &len, &bno,
- req, ops);
- rbd_assert(ret == 0);
+ ceph_osdc_build_request(osd_req, offset, length, 1, op,
+ snapc, snap_id, mtime);
- ceph_osdc_build_request(req, ofs, &len,
- ops,
- snapc,
- &mtime,
- req->r_oid, req->r_oid_len);
+ return osd_req;
+}
- if (linger_req) {
- ceph_osdc_set_request_linger(osdc, req);
- *linger_req = req;
- }
+static void rbd_osd_req_destroy(struct ceph_osd_request *osd_req)
+{
+ ceph_osdc_put_request(osd_req);
+}
- ret = ceph_osdc_start_request(osdc, req, false);
- if (ret < 0)
- goto done_err;
-
- if (!rbd_cb) {
- ret = ceph_osdc_wait_request(osdc, req);
- if (ver)
- *ver = le64_to_cpu(req->r_reassert_version.version);
- dout("reassert_ver=%llu\n",
- (unsigned long long)
- le64_to_cpu(req->r_reassert_version.version));
- ceph_osdc_put_request(req);
+/* object_name is assumed to be a non-null pointer and NUL-terminated */
+
+static struct rbd_obj_request *rbd_obj_request_create(const char *object_name,
+ u64 offset, u64 length,
+ enum obj_request_type type)
+{
+ struct rbd_obj_request *obj_request;
+ size_t size;
+ char *name;
+
+ rbd_assert(obj_request_type_valid(type));
+
+ size = strlen(object_name) + 1;
+ obj_request = kzalloc(sizeof (*obj_request) + size, GFP_KERNEL);
+ if (!obj_request)
+ return NULL;
+
+ name = (char *)(obj_request + 1);
+ obj_request->object_name = memcpy(name, object_name, size);
+ obj_request->offset = offset;
+ obj_request->length = length;
+ obj_request->which = BAD_WHICH;
+ obj_request->type = type;
+ INIT_LIST_HEAD(&obj_request->links);
+ obj_request_done_init(obj_request);
+ init_completion(&obj_request->completion);
+ kref_init(&obj_request->kref);
+
+ dout("%s: \"%s\" %llu/%llu %d -> obj %p\n", __func__, object_name,
+ offset, length, (int)type, obj_request);
+
+ return obj_request;
+}
+
+static void rbd_obj_request_destroy(struct kref *kref)
+{
+ struct rbd_obj_request *obj_request;
+
+ obj_request = container_of(kref, struct rbd_obj_request, kref);
+
+ dout("%s: obj %p\n", __func__, obj_request);
+
+ rbd_assert(obj_request->img_request == NULL);
+ rbd_assert(obj_request->which == BAD_WHICH);
+
+ if (obj_request->osd_req)
+ rbd_osd_req_destroy(obj_request->osd_req);
+
+ rbd_assert(obj_request_type_valid(obj_request->type));
+ switch (obj_request->type) {
+ case OBJ_REQUEST_NODATA:
+ break; /* Nothing to do */
+ case OBJ_REQUEST_BIO:
+ if (obj_request->bio_list)
+ bio_chain_put(obj_request->bio_list);
+ break;
+ case OBJ_REQUEST_PAGES:
+ if (obj_request->pages)
+ ceph_release_page_vector(obj_request->pages,
+ obj_request->page_count);
+ break;
}
- return ret;
-done_err:
- bio_chain_put(req_data->bio);
- ceph_osdc_put_request(req);
-done_pages:
- rbd_coll_end_req(req_data, ret, len);
- kfree(req_data);
- return ret;
+ kfree(obj_request);
}
/*
- * Ceph osd op callback
+ * Caller is responsible for filling in the list of object requests
+ * that comprises the image request, and the Linux request pointer
+ * (if there is one).
*/
-static void rbd_req_cb(struct ceph_osd_request *req, struct ceph_msg *msg)
-{
- struct rbd_request *req_data = req->r_priv;
- struct ceph_osd_reply_head *replyhead;
- struct ceph_osd_op *op;
- __s32 rc;
- u64 bytes;
- int read_op;
-
- /* parse reply */
- replyhead = msg->front.iov_base;
- WARN_ON(le32_to_cpu(replyhead->num_ops) == 0);
- op = (void *)(replyhead + 1);
- rc = le32_to_cpu(replyhead->result);
- bytes = le64_to_cpu(op->extent.length);
- read_op = (le16_to_cpu(op->op) == CEPH_OSD_OP_READ);
-
- dout("rbd_req_cb bytes=%llu readop=%d rc=%d\n",
- (unsigned long long) bytes, read_op, (int) rc);
-
- if (rc == -ENOENT && read_op) {
- zero_bio_chain(req_data->bio, 0);
- rc = 0;
- } else if (rc == 0 && read_op && bytes < req_data->len) {
- zero_bio_chain(req_data->bio, bytes);
- bytes = req_data->len;
- }
+static struct rbd_img_request *rbd_img_request_create(
+ struct rbd_device *rbd_dev,
+ u64 offset, u64 length,
+ bool write_request)
+{
+ struct rbd_img_request *img_request;
+ struct ceph_snap_context *snapc = NULL;
- rbd_coll_end_req(req_data, rc, bytes);
+ img_request = kmalloc(sizeof (*img_request), GFP_ATOMIC);
+ if (!img_request)
+ return NULL;
- if (req_data->bio)
- bio_chain_put(req_data->bio);
+ if (write_request) {
+ down_read(&rbd_dev->header_rwsem);
+ snapc = ceph_get_snap_context(rbd_dev->header.snapc);
+ up_read(&rbd_dev->header_rwsem);
+ if (WARN_ON(!snapc)) {
+ kfree(img_request);
+ return NULL; /* Shouldn't happen */
+ }
+ }
- ceph_osdc_put_request(req);
- kfree(req_data);
+ img_request->rq = NULL;
+ img_request->rbd_dev = rbd_dev;
+ img_request->offset = offset;
+ img_request->length = length;
+ img_request->write_request = write_request;
+ if (write_request)
+ img_request->snapc = snapc;
+ else
+ img_request->snap_id = rbd_dev->spec->snap_id;
+ spin_lock_init(&img_request->completion_lock);
+ img_request->next_completion = 0;
+ img_request->callback = NULL;
+ img_request->obj_request_count = 0;
+ INIT_LIST_HEAD(&img_request->obj_requests);
+ kref_init(&img_request->kref);
+
+ rbd_img_request_get(img_request); /* Avoid a warning */
+ rbd_img_request_put(img_request); /* TEMPORARY */
+
+ dout("%s: rbd_dev %p %s %llu/%llu -> img %p\n", __func__, rbd_dev,
+ write_request ? "write" : "read", offset, length,
+ img_request);
+
+ return img_request;
}
-static void rbd_simple_req_cb(struct ceph_osd_request *req, struct ceph_msg *msg)
+static void rbd_img_request_destroy(struct kref *kref)
{
- ceph_osdc_put_request(req);
+ struct rbd_img_request *img_request;
+ struct rbd_obj_request *obj_request;
+ struct rbd_obj_request *next_obj_request;
+
+ img_request = container_of(kref, struct rbd_img_request, kref);
+
+ dout("%s: img %p\n", __func__, img_request);
+
+ for_each_obj_request_safe(img_request, obj_request, next_obj_request)
+ rbd_img_obj_request_del(img_request, obj_request);
+ rbd_assert(img_request->obj_request_count == 0);
+
+ if (img_request->write_request)
+ ceph_put_snap_context(img_request->snapc);
+
+ kfree(img_request);
}
-/*
- * Do a synchronous ceph osd operation
- */
-static int rbd_req_sync_op(struct rbd_device *rbd_dev,
- struct ceph_snap_context *snapc,
- u64 snapid,
- int flags,
- struct ceph_osd_req_op *ops,
- const char *object_name,
- u64 ofs, u64 inbound_size,
- char *inbound,
- struct ceph_osd_request **linger_req,
- u64 *ver)
+static int rbd_img_request_fill_bio(struct rbd_img_request *img_request,
+ struct bio *bio_list)
{
- int ret;
- struct page **pages;
- int num_pages;
-
- rbd_assert(ops != NULL);
+ struct rbd_device *rbd_dev = img_request->rbd_dev;
+ struct rbd_obj_request *obj_request = NULL;
+ struct rbd_obj_request *next_obj_request;
+ unsigned int bio_offset;
+ u64 image_offset;
+ u64 resid;
+ u16 opcode;
+
+ dout("%s: img %p bio %p\n", __func__, img_request, bio_list);
+
+ opcode = img_request->write_request ? CEPH_OSD_OP_WRITE
+ : CEPH_OSD_OP_READ;
+ bio_offset = 0;
+ image_offset = img_request->offset;
+ rbd_assert(image_offset == bio_list->bi_sector << SECTOR_SHIFT);
+ resid = img_request->length;
+ rbd_assert(resid > 0);
+ while (resid) {
+ const char *object_name;
+ unsigned int clone_size;
+ struct ceph_osd_req_op *op;
+ u64 offset;
+ u64 length;
+
+ object_name = rbd_segment_name(rbd_dev, image_offset);
+ if (!object_name)
+ goto out_unwind;
+ offset = rbd_segment_offset(rbd_dev, image_offset);
+ length = rbd_segment_length(rbd_dev, image_offset, resid);
+ obj_request = rbd_obj_request_create(object_name,
+ offset, length,
+ OBJ_REQUEST_BIO);
+ kfree(object_name); /* object request has its own copy */
+ if (!obj_request)
+ goto out_unwind;
+
+ rbd_assert(length <= (u64) UINT_MAX);
+ clone_size = (unsigned int) length;
+ obj_request->bio_list = bio_chain_clone_range(&bio_list,
+ &bio_offset, clone_size,
+ GFP_ATOMIC);
+ if (!obj_request->bio_list)
+ goto out_partial;
- num_pages = calc_pages_for(ofs, inbound_size);
- pages = ceph_alloc_page_vector(num_pages, GFP_KERNEL);
- if (IS_ERR(pages))
- return PTR_ERR(pages);
+ /*
+ * Build up the op to use in building the osd
+ * request. Note that the contents of the op are
+ * copied by rbd_osd_req_create().
+ */
+ op = rbd_osd_req_op_create(opcode, offset, length);
+ if (!op)
+ goto out_partial;
+ obj_request->osd_req = rbd_osd_req_create(rbd_dev,
+ img_request->write_request,
+ obj_request, op);
+ rbd_osd_req_op_destroy(op);
+ if (!obj_request->osd_req)
+ goto out_partial;
+ /* status and version are initially zero-filled */
+
+ rbd_img_obj_request_add(img_request, obj_request);
+
+ image_offset += length;
+ resid -= length;
+ }
- ret = rbd_do_request(NULL, rbd_dev, snapc, snapid,
- object_name, ofs, inbound_size, NULL,
- pages, num_pages,
- flags,
- ops,
- NULL, 0,
- NULL,
- linger_req, ver);
- if (ret < 0)
- goto done;
+ return 0;
- if ((flags & CEPH_OSD_FLAG_READ) && inbound)
- ret = ceph_copy_from_page_vector(pages, inbound, ofs, ret);
+out_partial:
+ rbd_obj_request_put(obj_request);
+out_unwind:
+ for_each_obj_request_safe(img_request, obj_request, next_obj_request)
+ rbd_obj_request_put(obj_request);
-done:
- ceph_release_page_vector(pages, num_pages);
- return ret;
+ return -ENOMEM;
}
-/*
- * Do an asynchronous ceph osd operation
- */
-static int rbd_do_op(struct request *rq,
- struct rbd_device *rbd_dev,
- struct ceph_snap_context *snapc,
- u64 ofs, u64 len,
- struct bio *bio,
- struct rbd_req_coll *coll,
- int coll_index)
-{
- char *seg_name;
- u64 seg_ofs;
- u64 seg_len;
- int ret;
- struct ceph_osd_req_op *ops;
- u32 payload_len;
- int opcode;
- int flags;
- u64 snapid;
-
- seg_name = rbd_segment_name(rbd_dev, ofs);
- if (!seg_name)
- return -ENOMEM;
- seg_len = rbd_segment_length(rbd_dev, ofs, len);
- seg_ofs = rbd_segment_offset(rbd_dev, ofs);
-
- if (rq_data_dir(rq) == WRITE) {
- opcode = CEPH_OSD_OP_WRITE;
- flags = CEPH_OSD_FLAG_WRITE|CEPH_OSD_FLAG_ONDISK;
- snapid = CEPH_NOSNAP;
- payload_len = seg_len;
- } else {
- opcode = CEPH_OSD_OP_READ;
- flags = CEPH_OSD_FLAG_READ;
- snapc = NULL;
- snapid = rbd_dev->spec->snap_id;
- payload_len = 0;
+static void rbd_img_obj_callback(struct rbd_obj_request *obj_request)
+{
+ struct rbd_img_request *img_request;
+ u32 which = obj_request->which;
+ bool more = true;
+
+ img_request = obj_request->img_request;
+
+ dout("%s: img %p obj %p\n", __func__, img_request, obj_request);
+ rbd_assert(img_request != NULL);
+ rbd_assert(img_request->rq != NULL);
+ rbd_assert(img_request->obj_request_count > 0);
+ rbd_assert(which != BAD_WHICH);
+ rbd_assert(which < img_request->obj_request_count);
+ rbd_assert(which >= img_request->next_completion);
+
+ spin_lock_irq(&img_request->completion_lock);
+ if (which != img_request->next_completion)
+ goto out;
+
+ for_each_obj_request_from(img_request, obj_request) {
+ unsigned int xferred;
+ int result;
+
+ rbd_assert(more);
+ rbd_assert(which < img_request->obj_request_count);
+
+ if (!obj_request_done_test(obj_request))
+ break;
+
+ rbd_assert(obj_request->xferred <= (u64) UINT_MAX);
+ xferred = (unsigned int) obj_request->xferred;
+ result = (int) obj_request->result;
+ if (result)
+ rbd_warn(NULL, "obj_request %s result %d xferred %u\n",
+ img_request->write_request ? "write" : "read",
+ result, xferred);
+
+ more = blk_end_request(img_request->rq, result, xferred);
+ which++;
}
- ret = -ENOMEM;
- ops = rbd_create_rw_ops(1, opcode, payload_len);
- if (!ops)
- goto done;
+ rbd_assert(more ^ (which == img_request->obj_request_count));
+ img_request->next_completion = which;
+out:
+ spin_unlock_irq(&img_request->completion_lock);
- /* we've taken care of segment sizes earlier when we
- cloned the bios. We should never have a segment
- truncated at this point */
- rbd_assert(seg_len == len);
-
- ret = rbd_do_request(rq, rbd_dev, snapc, snapid,
- seg_name, seg_ofs, seg_len,
- bio,
- NULL, 0,
- flags,
- ops,
- coll, coll_index,
- rbd_req_cb, 0, NULL);
-
- rbd_destroy_ops(ops);
-done:
- kfree(seg_name);
- return ret;
+ if (!more)
+ rbd_img_request_complete(img_request);
}
-/*
- * Request sync osd read
- */
-static int rbd_req_sync_read(struct rbd_device *rbd_dev,
- u64 snapid,
- const char *object_name,
- u64 ofs, u64 len,
- char *buf,
- u64 *ver)
-{
- struct ceph_osd_req_op *ops;
- int ret;
+static int rbd_img_request_submit(struct rbd_img_request *img_request)
+{
+ struct rbd_device *rbd_dev = img_request->rbd_dev;
+ struct ceph_osd_client *osdc = &rbd_dev->rbd_client->client->osdc;
+ struct rbd_obj_request *obj_request;
- ops = rbd_create_rw_ops(1, CEPH_OSD_OP_READ, 0);
- if (!ops)
- return -ENOMEM;
+ dout("%s: img %p\n", __func__, img_request);
+ for_each_obj_request(img_request, obj_request) {
+ int ret;
- ret = rbd_req_sync_op(rbd_dev, NULL,
- snapid,
- CEPH_OSD_FLAG_READ,
- ops, object_name, ofs, len, buf, NULL, ver);
- rbd_destroy_ops(ops);
+ obj_request->callback = rbd_img_obj_callback;
+ ret = rbd_obj_request_submit(osdc, obj_request);
+ if (ret)
+ return ret;
+ /*
+ * The image request has its own reference to each
+ * of its object requests, so we can safely drop the
+ * initial one here.
+ */
+ rbd_obj_request_put(obj_request);
+ }
- return ret;
+ return 0;
}
-/*
- * Request sync osd watch
- */
-static int rbd_req_sync_notify_ack(struct rbd_device *rbd_dev,
- u64 ver,
- u64 notify_id)
+static int rbd_obj_notify_ack(struct rbd_device *rbd_dev,
+ u64 ver, u64 notify_id)
{
- struct ceph_osd_req_op *ops;
+ struct rbd_obj_request *obj_request;
+ struct ceph_osd_req_op *op;
+ struct ceph_osd_client *osdc;
int ret;
- ops = rbd_create_rw_ops(1, CEPH_OSD_OP_NOTIFY_ACK, 0);
- if (!ops)
+ obj_request = rbd_obj_request_create(rbd_dev->header_name, 0, 0,
+ OBJ_REQUEST_NODATA);
+ if (!obj_request)
return -ENOMEM;
- ops[0].watch.ver = cpu_to_le64(ver);
- ops[0].watch.cookie = notify_id;
- ops[0].watch.flag = 0;
+ ret = -ENOMEM;
+ op = rbd_osd_req_op_create(CEPH_OSD_OP_NOTIFY_ACK, notify_id, ver);
+ if (!op)
+ goto out;
+ obj_request->osd_req = rbd_osd_req_create(rbd_dev, false,
+ obj_request, op);
+ rbd_osd_req_op_destroy(op);
+ if (!obj_request->osd_req)
+ goto out;
- ret = rbd_do_request(NULL, rbd_dev, NULL, CEPH_NOSNAP,
- rbd_dev->header_name, 0, 0, NULL,
- NULL, 0,
- CEPH_OSD_FLAG_READ,
- ops,
- NULL, 0,
- rbd_simple_req_cb, 0, NULL);
+ osdc = &rbd_dev->rbd_client->client->osdc;
+ obj_request->callback = rbd_obj_request_put;
+ ret = rbd_obj_request_submit(osdc, obj_request);
+out:
+ if (ret)
+ rbd_obj_request_put(obj_request);
- rbd_destroy_ops(ops);
return ret;
}
@@ -1381,95 +1791,103 @@ static void rbd_watch_cb(u64 ver, u64 notify_id, u8 opcode, void *data)
if (!rbd_dev)
return;
- dout("rbd_watch_cb %s notify_id=%llu opcode=%u\n",
+ dout("%s: \"%s\" notify_id %llu opcode %u\n", __func__,
rbd_dev->header_name, (unsigned long long) notify_id,
(unsigned int) opcode);
rc = rbd_dev_refresh(rbd_dev, &hver);
if (rc)
- pr_warning(RBD_DRV_NAME "%d got notification but failed to "
- " update snaps: %d\n", rbd_dev->major, rc);
+ rbd_warn(rbd_dev, "got notification but failed to "
+ " update snaps: %d\n", rc);
- rbd_req_sync_notify_ack(rbd_dev, hver, notify_id);
+ rbd_obj_notify_ack(rbd_dev, hver, notify_id);
}
/*
- * Request sync osd watch
+ * Request sync osd watch/unwatch. The value of "start" determines
+ * whether a watch request is being initiated or torn down.
*/
-static int rbd_req_sync_watch(struct rbd_device *rbd_dev)
+static int rbd_dev_header_watch_sync(struct rbd_device *rbd_dev, int start)
{
- struct ceph_osd_req_op *ops;
struct ceph_osd_client *osdc = &rbd_dev->rbd_client->client->osdc;
+ struct rbd_obj_request *obj_request;
+ struct ceph_osd_req_op *op;
int ret;
- ops = rbd_create_rw_ops(1, CEPH_OSD_OP_WATCH, 0);
- if (!ops)
- return -ENOMEM;
+ rbd_assert(start ^ !!rbd_dev->watch_event);
+ rbd_assert(start ^ !!rbd_dev->watch_request);
- ret = ceph_osdc_create_event(osdc, rbd_watch_cb, 0,
- (void *)rbd_dev, &rbd_dev->watch_event);
- if (ret < 0)
- goto fail;
+ if (start) {
+ ret = ceph_osdc_create_event(osdc, rbd_watch_cb, rbd_dev,
+ &rbd_dev->watch_event);
+ if (ret < 0)
+ return ret;
+ rbd_assert(rbd_dev->watch_event != NULL);
+ }
- ops[0].watch.ver = cpu_to_le64(rbd_dev->header.obj_version);
- ops[0].watch.cookie = cpu_to_le64(rbd_dev->watch_event->cookie);
- ops[0].watch.flag = 1;
+ ret = -ENOMEM;
+ obj_request = rbd_obj_request_create(rbd_dev->header_name, 0, 0,
+ OBJ_REQUEST_NODATA);
+ if (!obj_request)
+ goto out_cancel;
+
+ op = rbd_osd_req_op_create(CEPH_OSD_OP_WATCH,
+ rbd_dev->watch_event->cookie,
+ rbd_dev->header.obj_version, start);
+ if (!op)
+ goto out_cancel;
+ obj_request->osd_req = rbd_osd_req_create(rbd_dev, true,
+ obj_request, op);
+ rbd_osd_req_op_destroy(op);
+ if (!obj_request->osd_req)
+ goto out_cancel;
+
+ if (start)
+ ceph_osdc_set_request_linger(osdc, obj_request->osd_req);
+ else
+ ceph_osdc_unregister_linger_request(osdc,
+ rbd_dev->watch_request->osd_req);
+ ret = rbd_obj_request_submit(osdc, obj_request);
+ if (ret)
+ goto out_cancel;
+ ret = rbd_obj_request_wait(obj_request);
+ if (ret)
+ goto out_cancel;
+ ret = obj_request->result;
+ if (ret)
+ goto out_cancel;
- ret = rbd_req_sync_op(rbd_dev, NULL,
- CEPH_NOSNAP,
- CEPH_OSD_FLAG_WRITE | CEPH_OSD_FLAG_ONDISK,
- ops,
- rbd_dev->header_name,
- 0, 0, NULL,
- &rbd_dev->watch_request, NULL);
+ /*
+ * A watch request is set to linger, so the underlying osd
+ * request won't go away until we unregister it. We retain
+ * a pointer to the object request during that time (in
+ * rbd_dev->watch_request), so we'll keep a reference to
+ * it. We'll drop that reference (below) after we've
+ * unregistered it.
+ */
+ if (start) {
+ rbd_dev->watch_request = obj_request;
- if (ret < 0)
- goto fail_event;
+ return 0;
+ }
- rbd_destroy_ops(ops);
- return 0;
+ /* We have successfully torn down the watch request */
-fail_event:
+ rbd_obj_request_put(rbd_dev->watch_request);
+ rbd_dev->watch_request = NULL;
+out_cancel:
+ /* Cancel the event if we're tearing down, or on error */
ceph_osdc_cancel_event(rbd_dev->watch_event);
rbd_dev->watch_event = NULL;
-fail:
- rbd_destroy_ops(ops);
- return ret;
-}
+ if (obj_request)
+ rbd_obj_request_put(obj_request);
-/*
- * Request sync osd unwatch
- */
-static int rbd_req_sync_unwatch(struct rbd_device *rbd_dev)
-{
- struct ceph_osd_req_op *ops;
- int ret;
-
- ops = rbd_create_rw_ops(1, CEPH_OSD_OP_WATCH, 0);
- if (!ops)
- return -ENOMEM;
-
- ops[0].watch.ver = 0;
- ops[0].watch.cookie = cpu_to_le64(rbd_dev->watch_event->cookie);
- ops[0].watch.flag = 0;
-
- ret = rbd_req_sync_op(rbd_dev, NULL,
- CEPH_NOSNAP,
- CEPH_OSD_FLAG_WRITE | CEPH_OSD_FLAG_ONDISK,
- ops,
- rbd_dev->header_name,
- 0, 0, NULL, NULL, NULL);
-
-
- rbd_destroy_ops(ops);
- ceph_osdc_cancel_event(rbd_dev->watch_event);
- rbd_dev->watch_event = NULL;
return ret;
}
/*
* Synchronous osd object method call
*/
-static int rbd_req_sync_exec(struct rbd_device *rbd_dev,
+static int rbd_obj_method_sync(struct rbd_device *rbd_dev,
const char *object_name,
const char *class_name,
const char *method_name,
@@ -1477,169 +1895,154 @@ static int rbd_req_sync_exec(struct rbd_device *rbd_dev,
size_t outbound_size,
char *inbound,
size_t inbound_size,
- int flags,
- u64 *ver)
+ u64 *version)
{
- struct ceph_osd_req_op *ops;
- int class_name_len = strlen(class_name);
- int method_name_len = strlen(method_name);
- int payload_size;
+ struct rbd_obj_request *obj_request;
+ struct ceph_osd_client *osdc;
+ struct ceph_osd_req_op *op;
+ struct page **pages;
+ u32 page_count;
int ret;
/*
- * Any input parameters required by the method we're calling
- * will be sent along with the class and method names as
- * part of the message payload. That data and its size are
- * supplied via the indata and indata_len fields (named from
- * the perspective of the server side) in the OSD request
- * operation.
+ * Method calls are ultimately read operations but they
+ * don't involve object data (so no offset or length).
+ * The result should placed into the inbound buffer
+ * provided. They also supply outbound data--parameters for
+ * the object method. Currently if this is present it will
+ * be a snapshot id.
*/
- payload_size = class_name_len + method_name_len + outbound_size;
- ops = rbd_create_rw_ops(1, CEPH_OSD_OP_CALL, payload_size);
- if (!ops)
- return -ENOMEM;
+ page_count = (u32) calc_pages_for(0, inbound_size);
+ pages = ceph_alloc_page_vector(page_count, GFP_KERNEL);
+ if (IS_ERR(pages))
+ return PTR_ERR(pages);
- ops[0].cls.class_name = class_name;
- ops[0].cls.class_len = (__u8) class_name_len;
- ops[0].cls.method_name = method_name;
- ops[0].cls.method_len = (__u8) method_name_len;
- ops[0].cls.argc = 0;
- ops[0].cls.indata = outbound;
- ops[0].cls.indata_len = outbound_size;
+ ret = -ENOMEM;
+ obj_request = rbd_obj_request_create(object_name, 0, 0,
+ OBJ_REQUEST_PAGES);
+ if (!obj_request)
+ goto out;
- ret = rbd_req_sync_op(rbd_dev, NULL,
- CEPH_NOSNAP,
- flags, ops,
- object_name, 0, inbound_size, inbound,
- NULL, ver);
+ obj_request->pages = pages;
+ obj_request->page_count = page_count;
- rbd_destroy_ops(ops);
+ op = rbd_osd_req_op_create(CEPH_OSD_OP_CALL, class_name,
+ method_name, outbound, outbound_size);
+ if (!op)
+ goto out;
+ obj_request->osd_req = rbd_osd_req_create(rbd_dev, false,
+ obj_request, op);
+ rbd_osd_req_op_destroy(op);
+ if (!obj_request->osd_req)
+ goto out;
- dout("cls_exec returned %d\n", ret);
- return ret;
-}
+ osdc = &rbd_dev->rbd_client->client->osdc;
+ ret = rbd_obj_request_submit(osdc, obj_request);
+ if (ret)
+ goto out;
+ ret = rbd_obj_request_wait(obj_request);
+ if (ret)
+ goto out;
-static struct rbd_req_coll *rbd_alloc_coll(int num_reqs)
-{
- struct rbd_req_coll *coll =
- kzalloc(sizeof(struct rbd_req_coll) +
- sizeof(struct rbd_req_status) * num_reqs,
- GFP_ATOMIC);
+ ret = obj_request->result;
+ if (ret < 0)
+ goto out;
+ ret = 0;
+ ceph_copy_from_page_vector(pages, inbound, 0, obj_request->xferred);
+ if (version)
+ *version = obj_request->version;
+out:
+ if (obj_request)
+ rbd_obj_request_put(obj_request);
+ else
+ ceph_release_page_vector(pages, page_count);
- if (!coll)
- return NULL;
- coll->total = num_reqs;
- kref_init(&coll->kref);
- return coll;
+ return ret;
}
-/*
- * block device queue callback
- */
-static void rbd_rq_fn(struct request_queue *q)
+static void rbd_request_fn(struct request_queue *q)
+ __releases(q->queue_lock) __acquires(q->queue_lock)
{
struct rbd_device *rbd_dev = q->queuedata;
+ bool read_only = rbd_dev->mapping.read_only;
struct request *rq;
+ int result;
while ((rq = blk_fetch_request(q))) {
- struct bio *bio;
- bool do_write;
- unsigned int size;
- u64 ofs;
- int num_segs, cur_seg = 0;
- struct rbd_req_coll *coll;
- struct ceph_snap_context *snapc;
- unsigned int bio_offset;
-
- dout("fetched request\n");
-
- /* filter out block requests we don't understand */
- if ((rq->cmd_type != REQ_TYPE_FS)) {
- __blk_end_request_all(rq, 0);
- continue;
- }
+ bool write_request = rq_data_dir(rq) == WRITE;
+ struct rbd_img_request *img_request;
+ u64 offset;
+ u64 length;
+
+ /* Ignore any non-FS requests that filter through. */
- /* deduce our operation (read, write) */
- do_write = (rq_data_dir(rq) == WRITE);
- if (do_write && rbd_dev->mapping.read_only) {
- __blk_end_request_all(rq, -EROFS);
+ if (rq->cmd_type != REQ_TYPE_FS) {
+ dout("%s: non-fs request type %d\n", __func__,
+ (int) rq->cmd_type);
+ __blk_end_request_all(rq, 0);
continue;
}
- spin_unlock_irq(q->queue_lock);
+ /* Ignore/skip any zero-length requests */
- down_read(&rbd_dev->header_rwsem);
+ offset = (u64) blk_rq_pos(rq) << SECTOR_SHIFT;
+ length = (u64) blk_rq_bytes(rq);
- if (!rbd_dev->exists) {
- rbd_assert(rbd_dev->spec->snap_id != CEPH_NOSNAP);
- up_read(&rbd_dev->header_rwsem);
- dout("request for non-existent snapshot");
- spin_lock_irq(q->queue_lock);
- __blk_end_request_all(rq, -ENXIO);
+ if (!length) {
+ dout("%s: zero-length request\n", __func__);
+ __blk_end_request_all(rq, 0);
continue;
}
- snapc = ceph_get_snap_context(rbd_dev->header.snapc);
-
- up_read(&rbd_dev->header_rwsem);
-
- size = blk_rq_bytes(rq);
- ofs = blk_rq_pos(rq) * SECTOR_SIZE;
- bio = rq->bio;
+ spin_unlock_irq(q->queue_lock);
- dout("%s 0x%x bytes at 0x%llx\n",
- do_write ? "write" : "read",
- size, (unsigned long long) blk_rq_pos(rq) * SECTOR_SIZE);
+ /* Disallow writes to a read-only device */
- num_segs = rbd_get_num_segments(&rbd_dev->header, ofs, size);
- if (num_segs <= 0) {
- spin_lock_irq(q->queue_lock);
- __blk_end_request_all(rq, num_segs);
- ceph_put_snap_context(snapc);
- continue;
+ if (write_request) {
+ result = -EROFS;
+ if (read_only)
+ goto end_request;
+ rbd_assert(rbd_dev->spec->snap_id == CEPH_NOSNAP);
}
- coll = rbd_alloc_coll(num_segs);
- if (!coll) {
- spin_lock_irq(q->queue_lock);
- __blk_end_request_all(rq, -ENOMEM);
- ceph_put_snap_context(snapc);
- continue;
- }
-
- bio_offset = 0;
- do {
- u64 limit = rbd_segment_length(rbd_dev, ofs, size);
- unsigned int chain_size;
- struct bio *bio_chain;
-
- BUG_ON(limit > (u64) UINT_MAX);
- chain_size = (unsigned int) limit;
- dout("rq->bio->bi_vcnt=%hu\n", rq->bio->bi_vcnt);
- kref_get(&coll->kref);
+ /*
+ * Quit early if the mapped snapshot no longer
+ * exists. It's still possible the snapshot will
+ * have disappeared by the time our request arrives
+ * at the osd, but there's no sense in sending it if
+ * we already know.
+ */
+ if (!test_bit(RBD_DEV_FLAG_EXISTS, &rbd_dev->flags)) {
+ dout("request for non-existent snapshot");
+ rbd_assert(rbd_dev->spec->snap_id != CEPH_NOSNAP);
+ result = -ENXIO;
+ goto end_request;
+ }
- /* Pass a cloned bio chain via an osd request */
+ result = -EINVAL;
+ if (WARN_ON(offset && length > U64_MAX - offset + 1))
+ goto end_request; /* Shouldn't happen */
- bio_chain = bio_chain_clone_range(&bio,
- &bio_offset, chain_size,
- GFP_ATOMIC);
- if (bio_chain)
- (void) rbd_do_op(rq, rbd_dev, snapc,
- ofs, chain_size,
- bio_chain, coll, cur_seg);
- else
- rbd_coll_end_req_index(rq, coll, cur_seg,
- -ENOMEM, chain_size);
- size -= chain_size;
- ofs += chain_size;
+ result = -ENOMEM;
+ img_request = rbd_img_request_create(rbd_dev, offset, length,
+ write_request);
+ if (!img_request)
+ goto end_request;
- cur_seg++;
- } while (size > 0);
- kref_put(&coll->kref, rbd_coll_release);
+ img_request->rq = rq;
+ result = rbd_img_request_fill_bio(img_request, rq->bio);
+ if (!result)
+ result = rbd_img_request_submit(img_request);
+ if (result)
+ rbd_img_request_put(img_request);
+end_request:
spin_lock_irq(q->queue_lock);
-
- ceph_put_snap_context(snapc);
+ if (result < 0) {
+ rbd_warn(rbd_dev, "obj_request %s result %d\n",
+ write_request ? "write" : "read", result);
+ __blk_end_request_all(rq, result);
+ }
}
}
@@ -1703,6 +2106,71 @@ static void rbd_free_disk(struct rbd_device *rbd_dev)
put_disk(disk);
}
+static int rbd_obj_read_sync(struct rbd_device *rbd_dev,
+ const char *object_name,
+ u64 offset, u64 length,
+ char *buf, u64 *version)
+
+{
+ struct ceph_osd_req_op *op;
+ struct rbd_obj_request *obj_request;
+ struct ceph_osd_client *osdc;
+ struct page **pages = NULL;
+ u32 page_count;
+ size_t size;
+ int ret;
+
+ page_count = (u32) calc_pages_for(offset, length);
+ pages = ceph_alloc_page_vector(page_count, GFP_KERNEL);
+ if (IS_ERR(pages))
+ ret = PTR_ERR(pages);
+
+ ret = -ENOMEM;
+ obj_request = rbd_obj_request_create(object_name, offset, length,
+ OBJ_REQUEST_PAGES);
+ if (!obj_request)
+ goto out;
+
+ obj_request->pages = pages;
+ obj_request->page_count = page_count;
+
+ op = rbd_osd_req_op_create(CEPH_OSD_OP_READ, offset, length);
+ if (!op)
+ goto out;
+ obj_request->osd_req = rbd_osd_req_create(rbd_dev, false,
+ obj_request, op);
+ rbd_osd_req_op_destroy(op);
+ if (!obj_request->osd_req)
+ goto out;
+
+ osdc = &rbd_dev->rbd_client->client->osdc;
+ ret = rbd_obj_request_submit(osdc, obj_request);
+ if (ret)
+ goto out;
+ ret = rbd_obj_request_wait(obj_request);
+ if (ret)
+ goto out;
+
+ ret = obj_request->result;
+ if (ret < 0)
+ goto out;
+
+ rbd_assert(obj_request->xferred <= (u64) SIZE_MAX);
+ size = (size_t) obj_request->xferred;
+ ceph_copy_from_page_vector(pages, buf, 0, size);
+ rbd_assert(size <= (size_t) INT_MAX);
+ ret = (int) size;
+ if (version)
+ *version = obj_request->version;
+out:
+ if (obj_request)
+ rbd_obj_request_put(obj_request);
+ else
+ ceph_release_page_vector(pages, page_count);
+
+ return ret;
+}
+
/*
* Read the complete header for the given rbd device.
*
@@ -1741,24 +2209,20 @@ rbd_dev_v1_header_read(struct rbd_device *rbd_dev, u64 *version)
if (!ondisk)
return ERR_PTR(-ENOMEM);
- ret = rbd_req_sync_read(rbd_dev, CEPH_NOSNAP,
- rbd_dev->header_name,
+ ret = rbd_obj_read_sync(rbd_dev, rbd_dev->header_name,
0, size,
(char *) ondisk, version);
-
if (ret < 0)
goto out_err;
if (WARN_ON((size_t) ret < size)) {
ret = -ENXIO;
- pr_warning("short header read for image %s"
- " (want %zd got %d)\n",
- rbd_dev->spec->image_name, size, ret);
+ rbd_warn(rbd_dev, "short header read (want %zd got %d)",
+ size, ret);
goto out_err;
}
if (!rbd_dev_ondisk_valid(ondisk)) {
ret = -ENXIO;
- pr_warning("invalid header for image %s\n",
- rbd_dev->spec->image_name);
+ rbd_warn(rbd_dev, "invalid header");
goto out_err;
}
@@ -1895,8 +2359,7 @@ static int rbd_init_disk(struct rbd_device *rbd_dev)
disk->fops = &rbd_bd_ops;
disk->private_data = rbd_dev;
- /* init rq */
- q = blk_init_queue(rbd_rq_fn, &rbd_dev->lock);
+ q = blk_init_queue(rbd_request_fn, &rbd_dev->lock);
if (!q)
goto out_disk;
@@ -2233,7 +2696,7 @@ static void rbd_spec_free(struct kref *kref)
kfree(spec);
}
-struct rbd_device *rbd_dev_create(struct rbd_client *rbdc,
+static struct rbd_device *rbd_dev_create(struct rbd_client *rbdc,
struct rbd_spec *spec)
{
struct rbd_device *rbd_dev;
@@ -2243,6 +2706,7 @@ struct rbd_device *rbd_dev_create(struct rbd_client *rbdc,
return NULL;
spin_lock_init(&rbd_dev->lock);
+ rbd_dev->flags = 0;
INIT_LIST_HEAD(&rbd_dev->node);
INIT_LIST_HEAD(&rbd_dev->snaps);
init_rwsem(&rbd_dev->header_rwsem);
@@ -2250,6 +2714,13 @@ struct rbd_device *rbd_dev_create(struct rbd_client *rbdc,
rbd_dev->spec = spec;
rbd_dev->rbd_client = rbdc;
+ /* Initialize the layout used for all rbd requests */
+
+ rbd_dev->layout.fl_stripe_unit = cpu_to_le32(1 << RBD_MAX_OBJ_ORDER);
+ rbd_dev->layout.fl_stripe_count = cpu_to_le32(1);
+ rbd_dev->layout.fl_object_size = cpu_to_le32(1 << RBD_MAX_OBJ_ORDER);
+ rbd_dev->layout.fl_pg_pool = cpu_to_le32((u32) spec->pool_id);
+
return rbd_dev;
}
@@ -2360,12 +2831,11 @@ static int _rbd_dev_v2_snap_size(struct rbd_device *rbd_dev, u64 snap_id,
__le64 size;
} __attribute__ ((packed)) size_buf = { 0 };
- ret = rbd_req_sync_exec(rbd_dev, rbd_dev->header_name,
+ ret = rbd_obj_method_sync(rbd_dev, rbd_dev->header_name,
"rbd", "get_size",
(char *) &snapid, sizeof (snapid),
- (char *) &size_buf, sizeof (size_buf),
- CEPH_OSD_FLAG_READ, NULL);
- dout("%s: rbd_req_sync_exec returned %d\n", __func__, ret);
+ (char *) &size_buf, sizeof (size_buf), NULL);
+ dout("%s: rbd_obj_method_sync returned %d\n", __func__, ret);
if (ret < 0)
return ret;
@@ -2396,15 +2866,13 @@ static int rbd_dev_v2_object_prefix(struct rbd_device *rbd_dev)
if (!reply_buf)
return -ENOMEM;
- ret = rbd_req_sync_exec(rbd_dev, rbd_dev->header_name,
+ ret = rbd_obj_method_sync(rbd_dev, rbd_dev->header_name,
"rbd", "get_object_prefix",
NULL, 0,
- reply_buf, RBD_OBJ_PREFIX_LEN_MAX,
- CEPH_OSD_FLAG_READ, NULL);
- dout("%s: rbd_req_sync_exec returned %d\n", __func__, ret);
+ reply_buf, RBD_OBJ_PREFIX_LEN_MAX, NULL);
+ dout("%s: rbd_obj_method_sync returned %d\n", __func__, ret);
if (ret < 0)
goto out;
- ret = 0; /* rbd_req_sync_exec() can return positive */
p = reply_buf;
rbd_dev->header.object_prefix = ceph_extract_encoded_string(&p,
@@ -2435,12 +2903,12 @@ static int _rbd_dev_v2_snap_features(struct rbd_device *rbd_dev, u64 snap_id,
u64 incompat;
int ret;
- ret = rbd_req_sync_exec(rbd_dev, rbd_dev->header_name,
+ ret = rbd_obj_method_sync(rbd_dev, rbd_dev->header_name,
"rbd", "get_features",
(char *) &snapid, sizeof (snapid),
(char *) &features_buf, sizeof (features_buf),
- CEPH_OSD_FLAG_READ, NULL);
- dout("%s: rbd_req_sync_exec returned %d\n", __func__, ret);
+ NULL);
+ dout("%s: rbd_obj_method_sync returned %d\n", __func__, ret);
if (ret < 0)
return ret;
@@ -2474,7 +2942,6 @@ static int rbd_dev_v2_parent_info(struct rbd_device *rbd_dev)
void *end;
char *image_id;
u64 overlap;
- size_t len = 0;
int ret;
parent_spec = rbd_spec_alloc();
@@ -2492,12 +2959,11 @@ static int rbd_dev_v2_parent_info(struct rbd_device *rbd_dev)
}
snapid = cpu_to_le64(CEPH_NOSNAP);
- ret = rbd_req_sync_exec(rbd_dev, rbd_dev->header_name,
+ ret = rbd_obj_method_sync(rbd_dev, rbd_dev->header_name,
"rbd", "get_parent",
(char *) &snapid, sizeof (snapid),
- (char *) reply_buf, size,
- CEPH_OSD_FLAG_READ, NULL);
- dout("%s: rbd_req_sync_exec returned %d\n", __func__, ret);
+ (char *) reply_buf, size, NULL);
+ dout("%s: rbd_obj_method_sync returned %d\n", __func__, ret);
if (ret < 0)
goto out_err;
@@ -2508,13 +2974,18 @@ static int rbd_dev_v2_parent_info(struct rbd_device *rbd_dev)
if (parent_spec->pool_id == CEPH_NOPOOL)
goto out; /* No parent? No problem. */
- image_id = ceph_extract_encoded_string(&p, end, &len, GFP_KERNEL);
+ /* The ceph file layout needs to fit pool id in 32 bits */
+
+ ret = -EIO;
+ if (WARN_ON(parent_spec->pool_id > (u64) U32_MAX))
+ goto out;
+
+ image_id = ceph_extract_encoded_string(&p, end, NULL, GFP_KERNEL);
if (IS_ERR(image_id)) {
ret = PTR_ERR(image_id);
goto out_err;
}
parent_spec->image_id = image_id;
- parent_spec->image_id_len = len;
ceph_decode_64_safe(&p, end, parent_spec->snap_id, out_err);
ceph_decode_64_safe(&p, end, overlap, out_err);
@@ -2544,26 +3015,25 @@ static char *rbd_dev_image_name(struct rbd_device *rbd_dev)
rbd_assert(!rbd_dev->spec->image_name);
- image_id_size = sizeof (__le32) + rbd_dev->spec->image_id_len;
+ len = strlen(rbd_dev->spec->image_id);
+ image_id_size = sizeof (__le32) + len;
image_id = kmalloc(image_id_size, GFP_KERNEL);
if (!image_id)
return NULL;
p = image_id;
end = (char *) image_id + image_id_size;
- ceph_encode_string(&p, end, rbd_dev->spec->image_id,
- (u32) rbd_dev->spec->image_id_len);
+ ceph_encode_string(&p, end, rbd_dev->spec->image_id, (u32) len);
size = sizeof (__le32) + RBD_IMAGE_NAME_LEN_MAX;
reply_buf = kmalloc(size, GFP_KERNEL);
if (!reply_buf)
goto out;
- ret = rbd_req_sync_exec(rbd_dev, RBD_DIRECTORY,
+ ret = rbd_obj_method_sync(rbd_dev, RBD_DIRECTORY,
"rbd", "dir_get_name",
image_id, image_id_size,
- (char *) reply_buf, size,
- CEPH_OSD_FLAG_READ, NULL);
+ (char *) reply_buf, size, NULL);
if (ret < 0)
goto out;
p = reply_buf;
@@ -2602,8 +3072,11 @@ static int rbd_dev_probe_update_spec(struct rbd_device *rbd_dev)
osdc = &rbd_dev->rbd_client->client->osdc;
name = ceph_pg_pool_name_by_id(osdc->osdmap, rbd_dev->spec->pool_id);
- if (!name)
- return -EIO; /* pool id too large (>= 2^31) */
+ if (!name) {
+ rbd_warn(rbd_dev, "there is no pool with id %llu",
+ rbd_dev->spec->pool_id); /* Really a BUG() */
+ return -EIO;
+ }
rbd_dev->spec->pool_name = kstrdup(name, GFP_KERNEL);
if (!rbd_dev->spec->pool_name)
@@ -2612,19 +3085,17 @@ static int rbd_dev_probe_update_spec(struct rbd_device *rbd_dev)
/* Fetch the image name; tolerate failure here */
name = rbd_dev_image_name(rbd_dev);
- if (name) {
- rbd_dev->spec->image_name_len = strlen(name);
+ if (name)
rbd_dev->spec->image_name = (char *) name;
- } else {
- pr_warning(RBD_DRV_NAME "%d "
- "unable to get image name for image id %s\n",
- rbd_dev->major, rbd_dev->spec->image_id);
- }
+ else
+ rbd_warn(rbd_dev, "unable to get image name");
/* Look up the snapshot name. */
name = rbd_snap_name(rbd_dev, rbd_dev->spec->snap_id);
if (!name) {
+ rbd_warn(rbd_dev, "no snapshot with id %llu",
+ rbd_dev->spec->snap_id); /* Really a BUG() */
ret = -EIO;
goto out_err;
}
@@ -2665,12 +3136,11 @@ static int rbd_dev_v2_snap_context(struct rbd_device *rbd_dev, u64 *ver)
if (!reply_buf)
return -ENOMEM;
- ret = rbd_req_sync_exec(rbd_dev, rbd_dev->header_name,
+ ret = rbd_obj_method_sync(rbd_dev, rbd_dev->header_name,
"rbd", "get_snapcontext",
NULL, 0,
- reply_buf, size,
- CEPH_OSD_FLAG_READ, ver);
- dout("%s: rbd_req_sync_exec returned %d\n", __func__, ret);
+ reply_buf, size, ver);
+ dout("%s: rbd_obj_method_sync returned %d\n", __func__, ret);
if (ret < 0)
goto out;
@@ -2735,12 +3205,11 @@ static char *rbd_dev_v2_snap_name(struct rbd_device *rbd_dev, u32 which)
return ERR_PTR(-ENOMEM);
snap_id = cpu_to_le64(rbd_dev->header.snapc->snaps[which]);
- ret = rbd_req_sync_exec(rbd_dev, rbd_dev->header_name,
+ ret = rbd_obj_method_sync(rbd_dev, rbd_dev->header_name,
"rbd", "get_snapshot_name",
(char *) &snap_id, sizeof (snap_id),
- reply_buf, size,
- CEPH_OSD_FLAG_READ, NULL);
- dout("%s: rbd_req_sync_exec returned %d\n", __func__, ret);
+ reply_buf, size, NULL);
+ dout("%s: rbd_obj_method_sync returned %d\n", __func__, ret);
if (ret < 0)
goto out;
@@ -2766,7 +3235,7 @@ out:
static char *rbd_dev_v2_snap_info(struct rbd_device *rbd_dev, u32 which,
u64 *snap_size, u64 *snap_features)
{
- __le64 snap_id;
+ u64 snap_id;
u8 order;
int ret;
@@ -2865,10 +3334,17 @@ static int rbd_dev_snaps_update(struct rbd_device *rbd_dev)
if (snap_id == CEPH_NOSNAP || (snap && snap->id > snap_id)) {
struct list_head *next = links->next;
- /* Existing snapshot not in the new snap context */
-
+ /*
+ * A previously-existing snapshot is not in
+ * the new snap context.
+ *
+ * If the now missing snapshot is the one the
+ * image is mapped to, clear its exists flag
+ * so we can avoid sending any more requests
+ * to it.
+ */
if (rbd_dev->spec->snap_id == snap->id)
- rbd_dev->exists = false;
+ clear_bit(RBD_DEV_FLAG_EXISTS, &rbd_dev->flags);
rbd_remove_snap_dev(snap);
dout("%ssnap id %llu has been removed\n",
rbd_dev->spec->snap_id == snap->id ?
@@ -2942,7 +3418,7 @@ static int rbd_dev_snaps_register(struct rbd_device *rbd_dev)
struct rbd_snap *snap;
int ret = 0;
- dout("%s called\n", __func__);
+ dout("%s:\n", __func__);
if (WARN_ON(!device_is_registered(&rbd_dev->dev)))
return -EIO;
@@ -2983,22 +3459,6 @@ static void rbd_bus_del_dev(struct rbd_device *rbd_dev)
device_unregister(&rbd_dev->dev);
}
-static int rbd_init_watch_dev(struct rbd_device *rbd_dev)
-{
- int ret, rc;
-
- do {
- ret = rbd_req_sync_watch(rbd_dev);
- if (ret == -ERANGE) {
- rc = rbd_dev_refresh(rbd_dev, NULL);
- if (rc < 0)
- return rc;
- }
- } while (ret == -ERANGE);
-
- return ret;
-}
-
static atomic64_t rbd_dev_id_max = ATOMIC64_INIT(0);
/*
@@ -3138,11 +3598,9 @@ static inline char *dup_token(const char **buf, size_t *lenp)
size_t len;
len = next_token(buf);
- dup = kmalloc(len + 1, GFP_KERNEL);
+ dup = kmemdup(*buf, len + 1, GFP_KERNEL);
if (!dup)
return NULL;
-
- memcpy(dup, *buf, len);
*(dup + len) = '\0';
*buf += len;
@@ -3210,8 +3668,10 @@ static int rbd_add_parse_args(const char *buf,
/* The first four tokens are required */
len = next_token(&buf);
- if (!len)
- return -EINVAL; /* Missing monitor address(es) */
+ if (!len) {
+ rbd_warn(NULL, "no monitor address(es) provided");
+ return -EINVAL;
+ }
mon_addrs = buf;
mon_addrs_size = len + 1;
buf += len;
@@ -3220,8 +3680,10 @@ static int rbd_add_parse_args(const char *buf,
options = dup_token(&buf, NULL);
if (!options)
return -ENOMEM;
- if (!*options)
- goto out_err; /* Missing options */
+ if (!*options) {
+ rbd_warn(NULL, "no options provided");
+ goto out_err;
+ }
spec = rbd_spec_alloc();
if (!spec)
@@ -3230,14 +3692,18 @@ static int rbd_add_parse_args(const char *buf,
spec->pool_name = dup_token(&buf, NULL);
if (!spec->pool_name)
goto out_mem;
- if (!*spec->pool_name)
- goto out_err; /* Missing pool name */
+ if (!*spec->pool_name) {
+ rbd_warn(NULL, "no pool name provided");
+ goto out_err;
+ }
- spec->image_name = dup_token(&buf, &spec->image_name_len);
+ spec->image_name = dup_token(&buf, NULL);
if (!spec->image_name)
goto out_mem;
- if (!*spec->image_name)
- goto out_err; /* Missing image name */
+ if (!*spec->image_name) {
+ rbd_warn(NULL, "no image name provided");
+ goto out_err;
+ }
/*
* Snapshot name is optional; default is to use "-"
@@ -3251,10 +3717,9 @@ static int rbd_add_parse_args(const char *buf,
ret = -ENAMETOOLONG;
goto out_err;
}
- spec->snap_name = kmalloc(len + 1, GFP_KERNEL);
+ spec->snap_name = kmemdup(buf, len + 1, GFP_KERNEL);
if (!spec->snap_name)
goto out_mem;
- memcpy(spec->snap_name, buf, len);
*(spec->snap_name + len) = '\0';
/* Initialize all rbd options to the defaults */
@@ -3323,7 +3788,7 @@ static int rbd_dev_image_id(struct rbd_device *rbd_dev)
* First, see if the format 2 image id file exists, and if
* so, get the image's persistent id from it.
*/
- size = sizeof (RBD_ID_PREFIX) + rbd_dev->spec->image_name_len;
+ size = sizeof (RBD_ID_PREFIX) + strlen(rbd_dev->spec->image_name);
object_name = kmalloc(size, GFP_NOIO);
if (!object_name)
return -ENOMEM;
@@ -3339,21 +3804,18 @@ static int rbd_dev_image_id(struct rbd_device *rbd_dev)
goto out;
}
- ret = rbd_req_sync_exec(rbd_dev, object_name,
+ ret = rbd_obj_method_sync(rbd_dev, object_name,
"rbd", "get_id",
NULL, 0,
- response, RBD_IMAGE_ID_LEN_MAX,
- CEPH_OSD_FLAG_READ, NULL);
- dout("%s: rbd_req_sync_exec returned %d\n", __func__, ret);
+ response, RBD_IMAGE_ID_LEN_MAX, NULL);
+ dout("%s: rbd_obj_method_sync returned %d\n", __func__, ret);
if (ret < 0)
goto out;
- ret = 0; /* rbd_req_sync_exec() can return positive */
p = response;
rbd_dev->spec->image_id = ceph_extract_encoded_string(&p,
p + RBD_IMAGE_ID_LEN_MAX,
- &rbd_dev->spec->image_id_len,
- GFP_NOIO);
+ NULL, GFP_NOIO);
if (IS_ERR(rbd_dev->spec->image_id)) {
ret = PTR_ERR(rbd_dev->spec->image_id);
rbd_dev->spec->image_id = NULL;
@@ -3377,11 +3839,10 @@ static int rbd_dev_v1_probe(struct rbd_device *rbd_dev)
rbd_dev->spec->image_id = kstrdup("", GFP_KERNEL);
if (!rbd_dev->spec->image_id)
return -ENOMEM;
- rbd_dev->spec->image_id_len = 0;
/* Record the header object name for this rbd image. */
- size = rbd_dev->spec->image_name_len + sizeof (RBD_SUFFIX);
+ size = strlen(rbd_dev->spec->image_name) + sizeof (RBD_SUFFIX);
rbd_dev->header_name = kmalloc(size, GFP_KERNEL);
if (!rbd_dev->header_name) {
ret = -ENOMEM;
@@ -3427,7 +3888,7 @@ static int rbd_dev_v2_probe(struct rbd_device *rbd_dev)
* Image id was filled in by the caller. Record the header
* object name for this rbd image.
*/
- size = sizeof (RBD_HEADER_PREFIX) + rbd_dev->spec->image_id_len;
+ size = sizeof (RBD_HEADER_PREFIX) + strlen(rbd_dev->spec->image_id);
rbd_dev->header_name = kmalloc(size, GFP_KERNEL);
if (!rbd_dev->header_name)
return -ENOMEM;
@@ -3542,7 +4003,7 @@ static int rbd_dev_probe_finish(struct rbd_device *rbd_dev)
if (ret)
goto err_out_bus;
- ret = rbd_init_watch_dev(rbd_dev);
+ ret = rbd_dev_header_watch_sync(rbd_dev, 1);
if (ret)
goto err_out_bus;
@@ -3638,6 +4099,13 @@ static ssize_t rbd_add(struct bus_type *bus,
goto err_out_client;
spec->pool_id = (u64) rc;
+ /* The ceph file layout needs to fit pool id in 32 bits */
+
+ if (WARN_ON(spec->pool_id > (u64) U32_MAX)) {
+ rc = -EIO;
+ goto err_out_client;
+ }
+
rbd_dev = rbd_dev_create(rbdc, spec);
if (!rbd_dev)
goto err_out_client;
@@ -3691,15 +4159,8 @@ static void rbd_dev_release(struct device *dev)
{
struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
- if (rbd_dev->watch_request) {
- struct ceph_client *client = rbd_dev->rbd_client->client;
-
- ceph_osdc_unregister_linger_request(&client->osdc,
- rbd_dev->watch_request);
- }
if (rbd_dev->watch_event)
- rbd_req_sync_unwatch(rbd_dev);
-
+ rbd_dev_header_watch_sync(rbd_dev, 0);
/* clean up and free blkdev */
rbd_free_disk(rbd_dev);
@@ -3743,10 +4204,14 @@ static ssize_t rbd_remove(struct bus_type *bus,
goto done;
}
- if (rbd_dev->open_count) {
+ spin_lock_irq(&rbd_dev->lock);
+ if (rbd_dev->open_count)
ret = -EBUSY;
+ else
+ set_bit(RBD_DEV_FLAG_REMOVING, &rbd_dev->flags);
+ spin_unlock_irq(&rbd_dev->lock);
+ if (ret < 0)
goto done;
- }
rbd_remove_all_snaps(rbd_dev);
rbd_bus_del_dev(rbd_dev);
@@ -3782,10 +4247,15 @@ static void rbd_sysfs_cleanup(void)
device_unregister(&rbd_root_dev);
}
-int __init rbd_init(void)
+static int __init rbd_init(void)
{
int rc;
+ if (!libceph_compatible(NULL)) {
+ rbd_warn(NULL, "libceph incompatibility (quitting)");
+
+ return -EINVAL;
+ }
rc = rbd_sysfs_init();
if (rc)
return rc;
@@ -3793,7 +4263,7 @@ int __init rbd_init(void)
return 0;
}
-void __exit rbd_exit(void)
+static void __exit rbd_exit(void)
{
rbd_sysfs_cleanup();
}
diff --git a/drivers/block/rsxx/Makefile b/drivers/block/rsxx/Makefile
new file mode 100644
index 000000000000..f35cd0b71f7b
--- /dev/null
+++ b/drivers/block/rsxx/Makefile
@@ -0,0 +1,2 @@
+obj-$(CONFIG_BLK_DEV_RSXX) += rsxx.o
+rsxx-y := config.o core.o cregs.o dev.o dma.o
diff --git a/drivers/block/rsxx/config.c b/drivers/block/rsxx/config.c
new file mode 100644
index 000000000000..a295e7e9ee41
--- /dev/null
+++ b/drivers/block/rsxx/config.c
@@ -0,0 +1,213 @@
+/*
+* Filename: config.c
+*
+*
+* Authors: Joshua Morris <josh.h.morris@us.ibm.com>
+* Philip Kelleher <pjk1939@linux.vnet.ibm.com>
+*
+* (C) Copyright 2013 IBM Corporation
+*
+* This program is free software; you can redistribute it and/or
+* modify it under the terms of the GNU General Public License as
+* published by the Free Software Foundation; either version 2 of the
+* License, or (at your option) any later version.
+*
+* This program is distributed in the hope that it will be useful, but
+* WITHOUT ANY WARRANTY; without even the implied warranty of
+* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+* General Public License for more details.
+*
+* You should have received a copy of the GNU General Public License
+* along with this program; if not, write to the Free Software Foundation,
+* Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+*/
+
+#include <linux/types.h>
+#include <linux/crc32.h>
+#include <linux/swab.h>
+
+#include "rsxx_priv.h"
+#include "rsxx_cfg.h"
+
+static void initialize_config(void *config)
+{
+ struct rsxx_card_cfg *cfg = config;
+
+ cfg->hdr.version = RSXX_CFG_VERSION;
+
+ cfg->data.block_size = RSXX_HW_BLK_SIZE;
+ cfg->data.stripe_size = RSXX_HW_BLK_SIZE;
+ cfg->data.vendor_id = RSXX_VENDOR_ID_TMS_IBM;
+ cfg->data.cache_order = (-1);
+ cfg->data.intr_coal.mode = RSXX_INTR_COAL_DISABLED;
+ cfg->data.intr_coal.count = 0;
+ cfg->data.intr_coal.latency = 0;
+}
+
+static u32 config_data_crc32(struct rsxx_card_cfg *cfg)
+{
+ /*
+ * Return the compliment of the CRC to ensure compatibility
+ * (i.e. this is how early rsxx drivers did it.)
+ */
+
+ return ~crc32(~0, &cfg->data, sizeof(cfg->data));
+}
+
+
+/*----------------- Config Byte Swap Functions -------------------*/
+static void config_hdr_be_to_cpu(struct card_cfg_hdr *hdr)
+{
+ hdr->version = be32_to_cpu((__force __be32) hdr->version);
+ hdr->crc = be32_to_cpu((__force __be32) hdr->crc);
+}
+
+static void config_hdr_cpu_to_be(struct card_cfg_hdr *hdr)
+{
+ hdr->version = (__force u32) cpu_to_be32(hdr->version);
+ hdr->crc = (__force u32) cpu_to_be32(hdr->crc);
+}
+
+static void config_data_swab(struct rsxx_card_cfg *cfg)
+{
+ u32 *data = (u32 *) &cfg->data;
+ int i;
+
+ for (i = 0; i < (sizeof(cfg->data) / 4); i++)
+ data[i] = swab32(data[i]);
+}
+
+static void config_data_le_to_cpu(struct rsxx_card_cfg *cfg)
+{
+ u32 *data = (u32 *) &cfg->data;
+ int i;
+
+ for (i = 0; i < (sizeof(cfg->data) / 4); i++)
+ data[i] = le32_to_cpu((__force __le32) data[i]);
+}
+
+static void config_data_cpu_to_le(struct rsxx_card_cfg *cfg)
+{
+ u32 *data = (u32 *) &cfg->data;
+ int i;
+
+ for (i = 0; i < (sizeof(cfg->data) / 4); i++)
+ data[i] = (__force u32) cpu_to_le32(data[i]);
+}
+
+
+/*----------------- Config Operations ------------------*/
+static int rsxx_save_config(struct rsxx_cardinfo *card)
+{
+ struct rsxx_card_cfg cfg;
+ int st;
+
+ memcpy(&cfg, &card->config, sizeof(cfg));
+
+ if (unlikely(cfg.hdr.version != RSXX_CFG_VERSION)) {
+ dev_err(CARD_TO_DEV(card),
+ "Cannot save config with invalid version %d\n",
+ cfg.hdr.version);
+ return -EINVAL;
+ }
+
+ /* Convert data to little endian for the CRC calculation. */
+ config_data_cpu_to_le(&cfg);
+
+ cfg.hdr.crc = config_data_crc32(&cfg);
+
+ /*
+ * Swap the data from little endian to big endian so it can be
+ * stored.
+ */
+ config_data_swab(&cfg);
+ config_hdr_cpu_to_be(&cfg.hdr);
+
+ st = rsxx_creg_write(card, CREG_ADD_CONFIG, sizeof(cfg), &cfg, 1);
+ if (st)
+ return st;
+
+ return 0;
+}
+
+int rsxx_load_config(struct rsxx_cardinfo *card)
+{
+ int st;
+ u32 crc;
+
+ st = rsxx_creg_read(card, CREG_ADD_CONFIG, sizeof(card->config),
+ &card->config, 1);
+ if (st) {
+ dev_err(CARD_TO_DEV(card),
+ "Failed reading card config.\n");
+ return st;
+ }
+
+ config_hdr_be_to_cpu(&card->config.hdr);
+
+ if (card->config.hdr.version == RSXX_CFG_VERSION) {
+ /*
+ * We calculate the CRC with the data in little endian, because
+ * early drivers did not take big endian CPUs into account.
+ * The data is always stored in big endian, so we need to byte
+ * swap it before calculating the CRC.
+ */
+
+ config_data_swab(&card->config);
+
+ /* Check the CRC */
+ crc = config_data_crc32(&card->config);
+ if (crc != card->config.hdr.crc) {
+ dev_err(CARD_TO_DEV(card),
+ "Config corruption detected!\n");
+ dev_info(CARD_TO_DEV(card),
+ "CRC (sb x%08x is x%08x)\n",
+ card->config.hdr.crc, crc);
+ return -EIO;
+ }
+
+ /* Convert the data to CPU byteorder */
+ config_data_le_to_cpu(&card->config);
+
+ } else if (card->config.hdr.version != 0) {
+ dev_err(CARD_TO_DEV(card),
+ "Invalid config version %d.\n",
+ card->config.hdr.version);
+ /*
+ * Config version changes require special handling from the
+ * user
+ */
+ return -EINVAL;
+ } else {
+ dev_info(CARD_TO_DEV(card),
+ "Initializing card configuration.\n");
+ initialize_config(card);
+ st = rsxx_save_config(card);
+ if (st)
+ return st;
+ }
+
+ card->config_valid = 1;
+
+ dev_dbg(CARD_TO_DEV(card), "version: x%08x\n",
+ card->config.hdr.version);
+ dev_dbg(CARD_TO_DEV(card), "crc: x%08x\n",
+ card->config.hdr.crc);
+ dev_dbg(CARD_TO_DEV(card), "block_size: x%08x\n",
+ card->config.data.block_size);
+ dev_dbg(CARD_TO_DEV(card), "stripe_size: x%08x\n",
+ card->config.data.stripe_size);
+ dev_dbg(CARD_TO_DEV(card), "vendor_id: x%08x\n",
+ card->config.data.vendor_id);
+ dev_dbg(CARD_TO_DEV(card), "cache_order: x%08x\n",
+ card->config.data.cache_order);
+ dev_dbg(CARD_TO_DEV(card), "mode: x%08x\n",
+ card->config.data.intr_coal.mode);
+ dev_dbg(CARD_TO_DEV(card), "count: x%08x\n",
+ card->config.data.intr_coal.count);
+ dev_dbg(CARD_TO_DEV(card), "latency: x%08x\n",
+ card->config.data.intr_coal.latency);
+
+ return 0;
+}
+
diff --git a/drivers/block/rsxx/core.c b/drivers/block/rsxx/core.c
new file mode 100644
index 000000000000..e5162487686a
--- /dev/null
+++ b/drivers/block/rsxx/core.c
@@ -0,0 +1,649 @@
+/*
+* Filename: core.c
+*
+*
+* Authors: Joshua Morris <josh.h.morris@us.ibm.com>
+* Philip Kelleher <pjk1939@linux.vnet.ibm.com>
+*
+* (C) Copyright 2013 IBM Corporation
+*
+* This program is free software; you can redistribute it and/or
+* modify it under the terms of the GNU General Public License as
+* published by the Free Software Foundation; either version 2 of the
+* License, or (at your option) any later version.
+*
+* This program is distributed in the hope that it will be useful, but
+* WITHOUT ANY WARRANTY; without even the implied warranty of
+* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+* General Public License for more details.
+*
+* You should have received a copy of the GNU General Public License
+* along with this program; if not, write to the Free Software Foundation,
+* Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+*/
+
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/module.h>
+#include <linux/pci.h>
+#include <linux/reboot.h>
+#include <linux/slab.h>
+#include <linux/bitops.h>
+
+#include <linux/genhd.h>
+#include <linux/idr.h>
+
+#include "rsxx_priv.h"
+#include "rsxx_cfg.h"
+
+#define NO_LEGACY 0
+
+MODULE_DESCRIPTION("IBM RamSan PCIe Flash SSD Device Driver");
+MODULE_AUTHOR("IBM <support@ramsan.com>");
+MODULE_LICENSE("GPL");
+MODULE_VERSION(DRIVER_VERSION);
+
+static unsigned int force_legacy = NO_LEGACY;
+module_param(force_legacy, uint, 0444);
+MODULE_PARM_DESC(force_legacy, "Force the use of legacy type PCI interrupts");
+
+static DEFINE_IDA(rsxx_disk_ida);
+static DEFINE_SPINLOCK(rsxx_ida_lock);
+
+/*----------------- Interrupt Control & Handling -------------------*/
+static void __enable_intr(unsigned int *mask, unsigned int intr)
+{
+ *mask |= intr;
+}
+
+static void __disable_intr(unsigned int *mask, unsigned int intr)
+{
+ *mask &= ~intr;
+}
+
+/*
+ * NOTE: Disabling the IER will disable the hardware interrupt.
+ * Disabling the ISR will disable the software handling of the ISR bit.
+ *
+ * Enable/Disable interrupt functions assume the card->irq_lock
+ * is held by the caller.
+ */
+void rsxx_enable_ier(struct rsxx_cardinfo *card, unsigned int intr)
+{
+ if (unlikely(card->halt))
+ return;
+
+ __enable_intr(&card->ier_mask, intr);
+ iowrite32(card->ier_mask, card->regmap + IER);
+}
+
+void rsxx_disable_ier(struct rsxx_cardinfo *card, unsigned int intr)
+{
+ __disable_intr(&card->ier_mask, intr);
+ iowrite32(card->ier_mask, card->regmap + IER);
+}
+
+void rsxx_enable_ier_and_isr(struct rsxx_cardinfo *card,
+ unsigned int intr)
+{
+ if (unlikely(card->halt))
+ return;
+
+ __enable_intr(&card->isr_mask, intr);
+ __enable_intr(&card->ier_mask, intr);
+ iowrite32(card->ier_mask, card->regmap + IER);
+}
+void rsxx_disable_ier_and_isr(struct rsxx_cardinfo *card,
+ unsigned int intr)
+{
+ __disable_intr(&card->isr_mask, intr);
+ __disable_intr(&card->ier_mask, intr);
+ iowrite32(card->ier_mask, card->regmap + IER);
+}
+
+static irqreturn_t rsxx_isr(int irq, void *pdata)
+{
+ struct rsxx_cardinfo *card = pdata;
+ unsigned int isr;
+ int handled = 0;
+ int reread_isr;
+ int i;
+
+ spin_lock(&card->irq_lock);
+
+ do {
+ reread_isr = 0;
+
+ isr = ioread32(card->regmap + ISR);
+ if (isr == 0xffffffff) {
+ /*
+ * A few systems seem to have an intermittent issue
+ * where PCI reads return all Fs, but retrying the read
+ * a little later will return as expected.
+ */
+ dev_info(CARD_TO_DEV(card),
+ "ISR = 0xFFFFFFFF, retrying later\n");
+ break;
+ }
+
+ isr &= card->isr_mask;
+ if (!isr)
+ break;
+
+ for (i = 0; i < card->n_targets; i++) {
+ if (isr & CR_INTR_DMA(i)) {
+ if (card->ier_mask & CR_INTR_DMA(i)) {
+ rsxx_disable_ier(card, CR_INTR_DMA(i));
+ reread_isr = 1;
+ }
+ queue_work(card->ctrl[i].done_wq,
+ &card->ctrl[i].dma_done_work);
+ handled++;
+ }
+ }
+
+ if (isr & CR_INTR_CREG) {
+ schedule_work(&card->creg_ctrl.done_work);
+ handled++;
+ }
+
+ if (isr & CR_INTR_EVENT) {
+ schedule_work(&card->event_work);
+ rsxx_disable_ier_and_isr(card, CR_INTR_EVENT);
+ handled++;
+ }
+ } while (reread_isr);
+
+ spin_unlock(&card->irq_lock);
+
+ return handled ? IRQ_HANDLED : IRQ_NONE;
+}
+
+/*----------------- Card Event Handler -------------------*/
+static char *rsxx_card_state_to_str(unsigned int state)
+{
+ static char *state_strings[] = {
+ "Unknown", "Shutdown", "Starting", "Formatting",
+ "Uninitialized", "Good", "Shutting Down",
+ "Fault", "Read Only Fault", "dStroying"
+ };
+
+ return state_strings[ffs(state)];
+}
+
+static void card_state_change(struct rsxx_cardinfo *card,
+ unsigned int new_state)
+{
+ int st;
+
+ dev_info(CARD_TO_DEV(card),
+ "card state change detected.(%s -> %s)\n",
+ rsxx_card_state_to_str(card->state),
+ rsxx_card_state_to_str(new_state));
+
+ card->state = new_state;
+
+ /* Don't attach DMA interfaces if the card has an invalid config */
+ if (!card->config_valid)
+ return;
+
+ switch (new_state) {
+ case CARD_STATE_RD_ONLY_FAULT:
+ dev_crit(CARD_TO_DEV(card),
+ "Hardware has entered read-only mode!\n");
+ /*
+ * Fall through so the DMA devices can be attached and
+ * the user can attempt to pull off their data.
+ */
+ case CARD_STATE_GOOD:
+ st = rsxx_get_card_size8(card, &card->size8);
+ if (st)
+ dev_err(CARD_TO_DEV(card),
+ "Failed attaching DMA devices\n");
+
+ if (card->config_valid)
+ set_capacity(card->gendisk, card->size8 >> 9);
+ break;
+
+ case CARD_STATE_FAULT:
+ dev_crit(CARD_TO_DEV(card),
+ "Hardware Fault reported!\n");
+ /* Fall through. */
+
+ /* Everything else, detach DMA interface if it's attached. */
+ case CARD_STATE_SHUTDOWN:
+ case CARD_STATE_STARTING:
+ case CARD_STATE_FORMATTING:
+ case CARD_STATE_UNINITIALIZED:
+ case CARD_STATE_SHUTTING_DOWN:
+ /*
+ * dStroy is a term coined by marketing to represent the low level
+ * secure erase.
+ */
+ case CARD_STATE_DSTROYING:
+ set_capacity(card->gendisk, 0);
+ break;
+ }
+}
+
+static void card_event_handler(struct work_struct *work)
+{
+ struct rsxx_cardinfo *card;
+ unsigned int state;
+ unsigned long flags;
+ int st;
+
+ card = container_of(work, struct rsxx_cardinfo, event_work);
+
+ if (unlikely(card->halt))
+ return;
+
+ /*
+ * Enable the interrupt now to avoid any weird race conditions where a
+ * state change might occur while rsxx_get_card_state() is
+ * processing a returned creg cmd.
+ */
+ spin_lock_irqsave(&card->irq_lock, flags);
+ rsxx_enable_ier_and_isr(card, CR_INTR_EVENT);
+ spin_unlock_irqrestore(&card->irq_lock, flags);
+
+ st = rsxx_get_card_state(card, &state);
+ if (st) {
+ dev_info(CARD_TO_DEV(card),
+ "Failed reading state after event.\n");
+ return;
+ }
+
+ if (card->state != state)
+ card_state_change(card, state);
+
+ if (card->creg_ctrl.creg_stats.stat & CREG_STAT_LOG_PENDING)
+ rsxx_read_hw_log(card);
+}
+
+/*----------------- Card Operations -------------------*/
+static int card_shutdown(struct rsxx_cardinfo *card)
+{
+ unsigned int state;
+ signed long start;
+ const int timeout = msecs_to_jiffies(120000);
+ int st;
+
+ /* We can't issue a shutdown if the card is in a transition state */
+ start = jiffies;
+ do {
+ st = rsxx_get_card_state(card, &state);
+ if (st)
+ return st;
+ } while (state == CARD_STATE_STARTING &&
+ (jiffies - start < timeout));
+
+ if (state == CARD_STATE_STARTING)
+ return -ETIMEDOUT;
+
+ /* Only issue a shutdown if we need to */
+ if ((state != CARD_STATE_SHUTTING_DOWN) &&
+ (state != CARD_STATE_SHUTDOWN)) {
+ st = rsxx_issue_card_cmd(card, CARD_CMD_SHUTDOWN);
+ if (st)
+ return st;
+ }
+
+ start = jiffies;
+ do {
+ st = rsxx_get_card_state(card, &state);
+ if (st)
+ return st;
+ } while (state != CARD_STATE_SHUTDOWN &&
+ (jiffies - start < timeout));
+
+ if (state != CARD_STATE_SHUTDOWN)
+ return -ETIMEDOUT;
+
+ return 0;
+}
+
+/*----------------- Driver Initialization & Setup -------------------*/
+/* Returns: 0 if the driver is compatible with the device
+ -1 if the driver is NOT compatible with the device */
+static int rsxx_compatibility_check(struct rsxx_cardinfo *card)
+{
+ unsigned char pci_rev;
+
+ pci_read_config_byte(card->dev, PCI_REVISION_ID, &pci_rev);
+
+ if (pci_rev > RS70_PCI_REV_SUPPORTED)
+ return -1;
+ return 0;
+}
+
+static int rsxx_pci_probe(struct pci_dev *dev,
+ const struct pci_device_id *id)
+{
+ struct rsxx_cardinfo *card;
+ int st;
+
+ dev_info(&dev->dev, "PCI-Flash SSD discovered\n");
+
+ card = kzalloc(sizeof(*card), GFP_KERNEL);
+ if (!card)
+ return -ENOMEM;
+
+ card->dev = dev;
+ pci_set_drvdata(dev, card);
+
+ do {
+ if (!ida_pre_get(&rsxx_disk_ida, GFP_KERNEL)) {
+ st = -ENOMEM;
+ goto failed_ida_get;
+ }
+
+ spin_lock(&rsxx_ida_lock);
+ st = ida_get_new(&rsxx_disk_ida, &card->disk_id);
+ spin_unlock(&rsxx_ida_lock);
+ } while (st == -EAGAIN);
+
+ if (st)
+ goto failed_ida_get;
+
+ st = pci_enable_device(dev);
+ if (st)
+ goto failed_enable;
+
+ pci_set_master(dev);
+ pci_set_dma_max_seg_size(dev, RSXX_HW_BLK_SIZE);
+
+ st = pci_set_dma_mask(dev, DMA_BIT_MASK(64));
+ if (st) {
+ dev_err(CARD_TO_DEV(card),
+ "No usable DMA configuration,aborting\n");
+ goto failed_dma_mask;
+ }
+
+ st = pci_request_regions(dev, DRIVER_NAME);
+ if (st) {
+ dev_err(CARD_TO_DEV(card),
+ "Failed to request memory region\n");
+ goto failed_request_regions;
+ }
+
+ if (pci_resource_len(dev, 0) == 0) {
+ dev_err(CARD_TO_DEV(card), "BAR0 has length 0!\n");
+ st = -ENOMEM;
+ goto failed_iomap;
+ }
+
+ card->regmap = pci_iomap(dev, 0, 0);
+ if (!card->regmap) {
+ dev_err(CARD_TO_DEV(card), "Failed to map BAR0\n");
+ st = -ENOMEM;
+ goto failed_iomap;
+ }
+
+ spin_lock_init(&card->irq_lock);
+ card->halt = 0;
+
+ spin_lock_irq(&card->irq_lock);
+ rsxx_disable_ier_and_isr(card, CR_INTR_ALL);
+ spin_unlock_irq(&card->irq_lock);
+
+ if (!force_legacy) {
+ st = pci_enable_msi(dev);
+ if (st)
+ dev_warn(CARD_TO_DEV(card),
+ "Failed to enable MSI\n");
+ }
+
+ st = request_irq(dev->irq, rsxx_isr, IRQF_DISABLED | IRQF_SHARED,
+ DRIVER_NAME, card);
+ if (st) {
+ dev_err(CARD_TO_DEV(card),
+ "Failed requesting IRQ%d\n", dev->irq);
+ goto failed_irq;
+ }
+
+ /************* Setup Processor Command Interface *************/
+ rsxx_creg_setup(card);
+
+ spin_lock_irq(&card->irq_lock);
+ rsxx_enable_ier_and_isr(card, CR_INTR_CREG);
+ spin_unlock_irq(&card->irq_lock);
+
+ st = rsxx_compatibility_check(card);
+ if (st) {
+ dev_warn(CARD_TO_DEV(card),
+ "Incompatible driver detected. Please update the driver.\n");
+ st = -EINVAL;
+ goto failed_compatiblity_check;
+ }
+
+ /************* Load Card Config *************/
+ st = rsxx_load_config(card);
+ if (st)
+ dev_err(CARD_TO_DEV(card),
+ "Failed loading card config\n");
+
+ /************* Setup DMA Engine *************/
+ st = rsxx_get_num_targets(card, &card->n_targets);
+ if (st)
+ dev_info(CARD_TO_DEV(card),
+ "Failed reading the number of DMA targets\n");
+
+ card->ctrl = kzalloc(card->n_targets * sizeof(*card->ctrl), GFP_KERNEL);
+ if (!card->ctrl) {
+ st = -ENOMEM;
+ goto failed_dma_setup;
+ }
+
+ st = rsxx_dma_setup(card);
+ if (st) {
+ dev_info(CARD_TO_DEV(card),
+ "Failed to setup DMA engine\n");
+ goto failed_dma_setup;
+ }
+
+ /************* Setup Card Event Handler *************/
+ INIT_WORK(&card->event_work, card_event_handler);
+
+ st = rsxx_setup_dev(card);
+ if (st)
+ goto failed_create_dev;
+
+ rsxx_get_card_state(card, &card->state);
+
+ dev_info(CARD_TO_DEV(card),
+ "card state: %s\n",
+ rsxx_card_state_to_str(card->state));
+
+ /*
+ * Now that the DMA Engine and devices have been setup,
+ * we can enable the event interrupt(it kicks off actions in
+ * those layers so we couldn't enable it right away.)
+ */
+ spin_lock_irq(&card->irq_lock);
+ rsxx_enable_ier_and_isr(card, CR_INTR_EVENT);
+ spin_unlock_irq(&card->irq_lock);
+
+ if (card->state == CARD_STATE_SHUTDOWN) {
+ st = rsxx_issue_card_cmd(card, CARD_CMD_STARTUP);
+ if (st)
+ dev_crit(CARD_TO_DEV(card),
+ "Failed issuing card startup\n");
+ } else if (card->state == CARD_STATE_GOOD ||
+ card->state == CARD_STATE_RD_ONLY_FAULT) {
+ st = rsxx_get_card_size8(card, &card->size8);
+ if (st)
+ card->size8 = 0;
+ }
+
+ rsxx_attach_dev(card);
+
+ return 0;
+
+failed_create_dev:
+ rsxx_dma_destroy(card);
+failed_dma_setup:
+failed_compatiblity_check:
+ spin_lock_irq(&card->irq_lock);
+ rsxx_disable_ier_and_isr(card, CR_INTR_ALL);
+ spin_unlock_irq(&card->irq_lock);
+ free_irq(dev->irq, card);
+ if (!force_legacy)
+ pci_disable_msi(dev);
+failed_irq:
+ pci_iounmap(dev, card->regmap);
+failed_iomap:
+ pci_release_regions(dev);
+failed_request_regions:
+failed_dma_mask:
+ pci_disable_device(dev);
+failed_enable:
+ spin_lock(&rsxx_ida_lock);
+ ida_remove(&rsxx_disk_ida, card->disk_id);
+ spin_unlock(&rsxx_ida_lock);
+failed_ida_get:
+ kfree(card);
+
+ return st;
+}
+
+static void rsxx_pci_remove(struct pci_dev *dev)
+{
+ struct rsxx_cardinfo *card = pci_get_drvdata(dev);
+ unsigned long flags;
+ int st;
+ int i;
+
+ if (!card)
+ return;
+
+ dev_info(CARD_TO_DEV(card),
+ "Removing PCI-Flash SSD.\n");
+
+ rsxx_detach_dev(card);
+
+ for (i = 0; i < card->n_targets; i++) {
+ spin_lock_irqsave(&card->irq_lock, flags);
+ rsxx_disable_ier_and_isr(card, CR_INTR_DMA(i));
+ spin_unlock_irqrestore(&card->irq_lock, flags);
+ }
+
+ st = card_shutdown(card);
+ if (st)
+ dev_crit(CARD_TO_DEV(card), "Shutdown failed!\n");
+
+ /* Sync outstanding event handlers. */
+ spin_lock_irqsave(&card->irq_lock, flags);
+ rsxx_disable_ier_and_isr(card, CR_INTR_EVENT);
+ spin_unlock_irqrestore(&card->irq_lock, flags);
+
+ /* Prevent work_structs from re-queuing themselves. */
+ card->halt = 1;
+
+ cancel_work_sync(&card->event_work);
+
+ rsxx_destroy_dev(card);
+ rsxx_dma_destroy(card);
+
+ spin_lock_irqsave(&card->irq_lock, flags);
+ rsxx_disable_ier_and_isr(card, CR_INTR_ALL);
+ spin_unlock_irqrestore(&card->irq_lock, flags);
+ free_irq(dev->irq, card);
+
+ if (!force_legacy)
+ pci_disable_msi(dev);
+
+ rsxx_creg_destroy(card);
+
+ pci_iounmap(dev, card->regmap);
+
+ pci_disable_device(dev);
+ pci_release_regions(dev);
+
+ kfree(card);
+}
+
+static int rsxx_pci_suspend(struct pci_dev *dev, pm_message_t state)
+{
+ /* We don't support suspend at this time. */
+ return -ENOSYS;
+}
+
+static void rsxx_pci_shutdown(struct pci_dev *dev)
+{
+ struct rsxx_cardinfo *card = pci_get_drvdata(dev);
+ unsigned long flags;
+ int i;
+
+ if (!card)
+ return;
+
+ dev_info(CARD_TO_DEV(card), "Shutting down PCI-Flash SSD.\n");
+
+ rsxx_detach_dev(card);
+
+ for (i = 0; i < card->n_targets; i++) {
+ spin_lock_irqsave(&card->irq_lock, flags);
+ rsxx_disable_ier_and_isr(card, CR_INTR_DMA(i));
+ spin_unlock_irqrestore(&card->irq_lock, flags);
+ }
+
+ card_shutdown(card);
+}
+
+static DEFINE_PCI_DEVICE_TABLE(rsxx_pci_ids) = {
+ {PCI_DEVICE(PCI_VENDOR_ID_TMS_IBM, PCI_DEVICE_ID_RS70_FLASH)},
+ {PCI_DEVICE(PCI_VENDOR_ID_TMS_IBM, PCI_DEVICE_ID_RS70D_FLASH)},
+ {PCI_DEVICE(PCI_VENDOR_ID_TMS_IBM, PCI_DEVICE_ID_RS80_FLASH)},
+ {PCI_DEVICE(PCI_VENDOR_ID_TMS_IBM, PCI_DEVICE_ID_RS81_FLASH)},
+ {0,},
+};
+
+MODULE_DEVICE_TABLE(pci, rsxx_pci_ids);
+
+static struct pci_driver rsxx_pci_driver = {
+ .name = DRIVER_NAME,
+ .id_table = rsxx_pci_ids,
+ .probe = rsxx_pci_probe,
+ .remove = rsxx_pci_remove,
+ .suspend = rsxx_pci_suspend,
+ .shutdown = rsxx_pci_shutdown,
+};
+
+static int __init rsxx_core_init(void)
+{
+ int st;
+
+ st = rsxx_dev_init();
+ if (st)
+ return st;
+
+ st = rsxx_dma_init();
+ if (st)
+ goto dma_init_failed;
+
+ st = rsxx_creg_init();
+ if (st)
+ goto creg_init_failed;
+
+ return pci_register_driver(&rsxx_pci_driver);
+
+creg_init_failed:
+ rsxx_dma_cleanup();
+dma_init_failed:
+ rsxx_dev_cleanup();
+
+ return st;
+}
+
+static void __exit rsxx_core_cleanup(void)
+{
+ pci_unregister_driver(&rsxx_pci_driver);
+ rsxx_creg_cleanup();
+ rsxx_dma_cleanup();
+ rsxx_dev_cleanup();
+}
+
+module_init(rsxx_core_init);
+module_exit(rsxx_core_cleanup);
diff --git a/drivers/block/rsxx/cregs.c b/drivers/block/rsxx/cregs.c
new file mode 100644
index 000000000000..80bbe639fccd
--- /dev/null
+++ b/drivers/block/rsxx/cregs.c
@@ -0,0 +1,758 @@
+/*
+* Filename: cregs.c
+*
+*
+* Authors: Joshua Morris <josh.h.morris@us.ibm.com>
+* Philip Kelleher <pjk1939@linux.vnet.ibm.com>
+*
+* (C) Copyright 2013 IBM Corporation
+*
+* This program is free software; you can redistribute it and/or
+* modify it under the terms of the GNU General Public License as
+* published by the Free Software Foundation; either version 2 of the
+* License, or (at your option) any later version.
+*
+* This program is distributed in the hope that it will be useful, but
+* WITHOUT ANY WARRANTY; without even the implied warranty of
+* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+* General Public License for more details.
+*
+* You should have received a copy of the GNU General Public License
+* along with this program; if not, write to the Free Software Foundation,
+* Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+*/
+
+#include <linux/completion.h>
+#include <linux/slab.h>
+
+#include "rsxx_priv.h"
+
+#define CREG_TIMEOUT_MSEC 10000
+
+typedef void (*creg_cmd_cb)(struct rsxx_cardinfo *card,
+ struct creg_cmd *cmd,
+ int st);
+
+struct creg_cmd {
+ struct list_head list;
+ creg_cmd_cb cb;
+ void *cb_private;
+ unsigned int op;
+ unsigned int addr;
+ int cnt8;
+ void *buf;
+ unsigned int stream;
+ unsigned int status;
+};
+
+static struct kmem_cache *creg_cmd_pool;
+
+
+/*------------ Private Functions --------------*/
+
+#if defined(__LITTLE_ENDIAN)
+#define LITTLE_ENDIAN 1
+#elif defined(__BIG_ENDIAN)
+#define LITTLE_ENDIAN 0
+#else
+#error Unknown endianess!!! Aborting...
+#endif
+
+static void copy_to_creg_data(struct rsxx_cardinfo *card,
+ int cnt8,
+ void *buf,
+ unsigned int stream)
+{
+ int i = 0;
+ u32 *data = buf;
+
+ for (i = 0; cnt8 > 0; i++, cnt8 -= 4) {
+ /*
+ * Firmware implementation makes it necessary to byte swap on
+ * little endian processors.
+ */
+ if (LITTLE_ENDIAN && stream)
+ iowrite32be(data[i], card->regmap + CREG_DATA(i));
+ else
+ iowrite32(data[i], card->regmap + CREG_DATA(i));
+ }
+}
+
+
+static void copy_from_creg_data(struct rsxx_cardinfo *card,
+ int cnt8,
+ void *buf,
+ unsigned int stream)
+{
+ int i = 0;
+ u32 *data = buf;
+
+ for (i = 0; cnt8 > 0; i++, cnt8 -= 4) {
+ /*
+ * Firmware implementation makes it necessary to byte swap on
+ * little endian processors.
+ */
+ if (LITTLE_ENDIAN && stream)
+ data[i] = ioread32be(card->regmap + CREG_DATA(i));
+ else
+ data[i] = ioread32(card->regmap + CREG_DATA(i));
+ }
+}
+
+static struct creg_cmd *pop_active_cmd(struct rsxx_cardinfo *card)
+{
+ struct creg_cmd *cmd;
+
+ /*
+ * Spin lock is needed because this can be called in atomic/interrupt
+ * context.
+ */
+ spin_lock_bh(&card->creg_ctrl.lock);
+ cmd = card->creg_ctrl.active_cmd;
+ card->creg_ctrl.active_cmd = NULL;
+ spin_unlock_bh(&card->creg_ctrl.lock);
+
+ return cmd;
+}
+
+static void creg_issue_cmd(struct rsxx_cardinfo *card, struct creg_cmd *cmd)
+{
+ iowrite32(cmd->addr, card->regmap + CREG_ADD);
+ iowrite32(cmd->cnt8, card->regmap + CREG_CNT);
+
+ if (cmd->op == CREG_OP_WRITE) {
+ if (cmd->buf)
+ copy_to_creg_data(card, cmd->cnt8,
+ cmd->buf, cmd->stream);
+ }
+
+ /*
+ * Data copy must complete before initiating the command. This is
+ * needed for weakly ordered processors (i.e. PowerPC), so that all
+ * neccessary registers are written before we kick the hardware.
+ */
+ wmb();
+
+ /* Setting the valid bit will kick off the command. */
+ iowrite32(cmd->op, card->regmap + CREG_CMD);
+}
+
+static void creg_kick_queue(struct rsxx_cardinfo *card)
+{
+ if (card->creg_ctrl.active || list_empty(&card->creg_ctrl.queue))
+ return;
+
+ card->creg_ctrl.active = 1;
+ card->creg_ctrl.active_cmd = list_first_entry(&card->creg_ctrl.queue,
+ struct creg_cmd, list);
+ list_del(&card->creg_ctrl.active_cmd->list);
+ card->creg_ctrl.q_depth--;
+
+ /*
+ * We have to set the timer before we push the new command. Otherwise,
+ * we could create a race condition that would occur if the timer
+ * was not canceled, and expired after the new command was pushed,
+ * but before the command was issued to hardware.
+ */
+ mod_timer(&card->creg_ctrl.cmd_timer,
+ jiffies + msecs_to_jiffies(CREG_TIMEOUT_MSEC));
+
+ creg_issue_cmd(card, card->creg_ctrl.active_cmd);
+}
+
+static int creg_queue_cmd(struct rsxx_cardinfo *card,
+ unsigned int op,
+ unsigned int addr,
+ unsigned int cnt8,
+ void *buf,
+ int stream,
+ creg_cmd_cb callback,
+ void *cb_private)
+{
+ struct creg_cmd *cmd;
+
+ /* Don't queue stuff up if we're halted. */
+ if (unlikely(card->halt))
+ return -EINVAL;
+
+ if (card->creg_ctrl.reset)
+ return -EAGAIN;
+
+ if (cnt8 > MAX_CREG_DATA8)
+ return -EINVAL;
+
+ cmd = kmem_cache_alloc(creg_cmd_pool, GFP_KERNEL);
+ if (!cmd)
+ return -ENOMEM;
+
+ INIT_LIST_HEAD(&cmd->list);
+
+ cmd->op = op;
+ cmd->addr = addr;
+ cmd->cnt8 = cnt8;
+ cmd->buf = buf;
+ cmd->stream = stream;
+ cmd->cb = callback;
+ cmd->cb_private = cb_private;
+ cmd->status = 0;
+
+ spin_lock(&card->creg_ctrl.lock);
+ list_add_tail(&cmd->list, &card->creg_ctrl.queue);
+ card->creg_ctrl.q_depth++;
+ creg_kick_queue(card);
+ spin_unlock(&card->creg_ctrl.lock);
+
+ return 0;
+}
+
+static void creg_cmd_timed_out(unsigned long data)
+{
+ struct rsxx_cardinfo *card = (struct rsxx_cardinfo *) data;
+ struct creg_cmd *cmd;
+
+ cmd = pop_active_cmd(card);
+ if (cmd == NULL) {
+ card->creg_ctrl.creg_stats.creg_timeout++;
+ dev_warn(CARD_TO_DEV(card),
+ "No active command associated with timeout!\n");
+ return;
+ }
+
+ if (cmd->cb)
+ cmd->cb(card, cmd, -ETIMEDOUT);
+
+ kmem_cache_free(creg_cmd_pool, cmd);
+
+
+ spin_lock(&card->creg_ctrl.lock);
+ card->creg_ctrl.active = 0;
+ creg_kick_queue(card);
+ spin_unlock(&card->creg_ctrl.lock);
+}
+
+
+static void creg_cmd_done(struct work_struct *work)
+{
+ struct rsxx_cardinfo *card;
+ struct creg_cmd *cmd;
+ int st = 0;
+
+ card = container_of(work, struct rsxx_cardinfo,
+ creg_ctrl.done_work);
+
+ /*
+ * The timer could not be cancelled for some reason,
+ * race to pop the active command.
+ */
+ if (del_timer_sync(&card->creg_ctrl.cmd_timer) == 0)
+ card->creg_ctrl.creg_stats.failed_cancel_timer++;
+
+ cmd = pop_active_cmd(card);
+ if (cmd == NULL) {
+ dev_err(CARD_TO_DEV(card),
+ "Spurious creg interrupt!\n");
+ return;
+ }
+
+ card->creg_ctrl.creg_stats.stat = ioread32(card->regmap + CREG_STAT);
+ cmd->status = card->creg_ctrl.creg_stats.stat;
+ if ((cmd->status & CREG_STAT_STATUS_MASK) == 0) {
+ dev_err(CARD_TO_DEV(card),
+ "Invalid status on creg command\n");
+ /*
+ * At this point we're probably reading garbage from HW. Don't
+ * do anything else that could mess up the system and let
+ * the sync function return an error.
+ */
+ st = -EIO;
+ goto creg_done;
+ } else if (cmd->status & CREG_STAT_ERROR) {
+ st = -EIO;
+ }
+
+ if ((cmd->op == CREG_OP_READ)) {
+ unsigned int cnt8 = ioread32(card->regmap + CREG_CNT);
+
+ /* Paranoid Sanity Checks */
+ if (!cmd->buf) {
+ dev_err(CARD_TO_DEV(card),
+ "Buffer not given for read.\n");
+ st = -EIO;
+ goto creg_done;
+ }
+ if (cnt8 != cmd->cnt8) {
+ dev_err(CARD_TO_DEV(card),
+ "count mismatch\n");
+ st = -EIO;
+ goto creg_done;
+ }
+
+ copy_from_creg_data(card, cnt8, cmd->buf, cmd->stream);
+ }
+
+creg_done:
+ if (cmd->cb)
+ cmd->cb(card, cmd, st);
+
+ kmem_cache_free(creg_cmd_pool, cmd);
+
+ spin_lock(&card->creg_ctrl.lock);
+ card->creg_ctrl.active = 0;
+ creg_kick_queue(card);
+ spin_unlock(&card->creg_ctrl.lock);
+}
+
+static void creg_reset(struct rsxx_cardinfo *card)
+{
+ struct creg_cmd *cmd = NULL;
+ struct creg_cmd *tmp;
+ unsigned long flags;
+
+ /*
+ * mutex_trylock is used here because if reset_lock is taken then a
+ * reset is already happening. So, we can just go ahead and return.
+ */
+ if (!mutex_trylock(&card->creg_ctrl.reset_lock))
+ return;
+
+ card->creg_ctrl.reset = 1;
+ spin_lock_irqsave(&card->irq_lock, flags);
+ rsxx_disable_ier_and_isr(card, CR_INTR_CREG | CR_INTR_EVENT);
+ spin_unlock_irqrestore(&card->irq_lock, flags);
+
+ dev_warn(CARD_TO_DEV(card),
+ "Resetting creg interface for recovery\n");
+
+ /* Cancel outstanding commands */
+ spin_lock(&card->creg_ctrl.lock);
+ list_for_each_entry_safe(cmd, tmp, &card->creg_ctrl.queue, list) {
+ list_del(&cmd->list);
+ card->creg_ctrl.q_depth--;
+ if (cmd->cb)
+ cmd->cb(card, cmd, -ECANCELED);
+ kmem_cache_free(creg_cmd_pool, cmd);
+ }
+
+ cmd = card->creg_ctrl.active_cmd;
+ card->creg_ctrl.active_cmd = NULL;
+ if (cmd) {
+ if (timer_pending(&card->creg_ctrl.cmd_timer))
+ del_timer_sync(&card->creg_ctrl.cmd_timer);
+
+ if (cmd->cb)
+ cmd->cb(card, cmd, -ECANCELED);
+ kmem_cache_free(creg_cmd_pool, cmd);
+
+ card->creg_ctrl.active = 0;
+ }
+ spin_unlock(&card->creg_ctrl.lock);
+
+ card->creg_ctrl.reset = 0;
+ spin_lock_irqsave(&card->irq_lock, flags);
+ rsxx_enable_ier_and_isr(card, CR_INTR_CREG | CR_INTR_EVENT);
+ spin_unlock_irqrestore(&card->irq_lock, flags);
+
+ mutex_unlock(&card->creg_ctrl.reset_lock);
+}
+
+/* Used for synchronous accesses */
+struct creg_completion {
+ struct completion *cmd_done;
+ int st;
+ u32 creg_status;
+};
+
+static void creg_cmd_done_cb(struct rsxx_cardinfo *card,
+ struct creg_cmd *cmd,
+ int st)
+{
+ struct creg_completion *cmd_completion;
+
+ cmd_completion = cmd->cb_private;
+ BUG_ON(!cmd_completion);
+
+ cmd_completion->st = st;
+ cmd_completion->creg_status = cmd->status;
+ complete(cmd_completion->cmd_done);
+}
+
+static int __issue_creg_rw(struct rsxx_cardinfo *card,
+ unsigned int op,
+ unsigned int addr,
+ unsigned int cnt8,
+ void *buf,
+ int stream,
+ unsigned int *hw_stat)
+{
+ DECLARE_COMPLETION_ONSTACK(cmd_done);
+ struct creg_completion completion;
+ unsigned long timeout;
+ int st;
+
+ completion.cmd_done = &cmd_done;
+ completion.st = 0;
+ completion.creg_status = 0;
+
+ st = creg_queue_cmd(card, op, addr, cnt8, buf, stream, creg_cmd_done_cb,
+ &completion);
+ if (st)
+ return st;
+
+ /*
+ * This timeout is neccessary for unresponsive hardware. The additional
+ * 20 seconds to used to guarantee that each cregs requests has time to
+ * complete.
+ */
+ timeout = msecs_to_jiffies((CREG_TIMEOUT_MSEC *
+ card->creg_ctrl.q_depth) + 20000);
+
+ /*
+ * The creg interface is guaranteed to complete. It has a timeout
+ * mechanism that will kick in if hardware does not respond.
+ */
+ st = wait_for_completion_timeout(completion.cmd_done, timeout);
+ if (st == 0) {
+ /*
+ * This is really bad, because the kernel timer did not
+ * expire and notify us of a timeout!
+ */
+ dev_crit(CARD_TO_DEV(card),
+ "cregs timer failed\n");
+ creg_reset(card);
+ return -EIO;
+ }
+
+ *hw_stat = completion.creg_status;
+
+ if (completion.st) {
+ dev_warn(CARD_TO_DEV(card),
+ "creg command failed(%d x%08x)\n",
+ completion.st, addr);
+ return completion.st;
+ }
+
+ return 0;
+}
+
+static int issue_creg_rw(struct rsxx_cardinfo *card,
+ u32 addr,
+ unsigned int size8,
+ void *data,
+ int stream,
+ int read)
+{
+ unsigned int hw_stat;
+ unsigned int xfer;
+ unsigned int op;
+ int st;
+
+ op = read ? CREG_OP_READ : CREG_OP_WRITE;
+
+ do {
+ xfer = min_t(unsigned int, size8, MAX_CREG_DATA8);
+
+ st = __issue_creg_rw(card, op, addr, xfer,
+ data, stream, &hw_stat);
+ if (st)
+ return st;
+
+ data = (char *)data + xfer;
+ addr += xfer;
+ size8 -= xfer;
+ } while (size8);
+
+ return 0;
+}
+
+/* ---------------------------- Public API ---------------------------------- */
+int rsxx_creg_write(struct rsxx_cardinfo *card,
+ u32 addr,
+ unsigned int size8,
+ void *data,
+ int byte_stream)
+{
+ return issue_creg_rw(card, addr, size8, data, byte_stream, 0);
+}
+
+int rsxx_creg_read(struct rsxx_cardinfo *card,
+ u32 addr,
+ unsigned int size8,
+ void *data,
+ int byte_stream)
+{
+ return issue_creg_rw(card, addr, size8, data, byte_stream, 1);
+}
+
+int rsxx_get_card_state(struct rsxx_cardinfo *card, unsigned int *state)
+{
+ return rsxx_creg_read(card, CREG_ADD_CARD_STATE,
+ sizeof(*state), state, 0);
+}
+
+int rsxx_get_card_size8(struct rsxx_cardinfo *card, u64 *size8)
+{
+ unsigned int size;
+ int st;
+
+ st = rsxx_creg_read(card, CREG_ADD_CARD_SIZE,
+ sizeof(size), &size, 0);
+ if (st)
+ return st;
+
+ *size8 = (u64)size * RSXX_HW_BLK_SIZE;
+ return 0;
+}
+
+int rsxx_get_num_targets(struct rsxx_cardinfo *card,
+ unsigned int *n_targets)
+{
+ return rsxx_creg_read(card, CREG_ADD_NUM_TARGETS,
+ sizeof(*n_targets), n_targets, 0);
+}
+
+int rsxx_get_card_capabilities(struct rsxx_cardinfo *card,
+ u32 *capabilities)
+{
+ return rsxx_creg_read(card, CREG_ADD_CAPABILITIES,
+ sizeof(*capabilities), capabilities, 0);
+}
+
+int rsxx_issue_card_cmd(struct rsxx_cardinfo *card, u32 cmd)
+{
+ return rsxx_creg_write(card, CREG_ADD_CARD_CMD,
+ sizeof(cmd), &cmd, 0);
+}
+
+
+/*----------------- HW Log Functions -------------------*/
+static void hw_log_msg(struct rsxx_cardinfo *card, const char *str, int len)
+{
+ static char level;
+
+ /*
+ * New messages start with "<#>", where # is the log level. Messages
+ * that extend past the log buffer will use the previous level
+ */
+ if ((len > 3) && (str[0] == '<') && (str[2] == '>')) {
+ level = str[1];
+ str += 3; /* Skip past the log level. */
+ len -= 3;
+ }
+
+ switch (level) {
+ case '0':
+ dev_emerg(CARD_TO_DEV(card), "HW: %.*s", len, str);
+ break;
+ case '1':
+ dev_alert(CARD_TO_DEV(card), "HW: %.*s", len, str);
+ break;
+ case '2':
+ dev_crit(CARD_TO_DEV(card), "HW: %.*s", len, str);
+ break;
+ case '3':
+ dev_err(CARD_TO_DEV(card), "HW: %.*s", len, str);
+ break;
+ case '4':
+ dev_warn(CARD_TO_DEV(card), "HW: %.*s", len, str);
+ break;
+ case '5':
+ dev_notice(CARD_TO_DEV(card), "HW: %.*s", len, str);
+ break;
+ case '6':
+ dev_info(CARD_TO_DEV(card), "HW: %.*s", len, str);
+ break;
+ case '7':
+ dev_dbg(CARD_TO_DEV(card), "HW: %.*s", len, str);
+ break;
+ default:
+ dev_info(CARD_TO_DEV(card), "HW: %.*s", len, str);
+ break;
+ }
+}
+
+/*
+ * The substrncpy function copies the src string (which includes the
+ * terminating '\0' character), up to the count into the dest pointer.
+ * Returns the number of bytes copied to dest.
+ */
+static int substrncpy(char *dest, const char *src, int count)
+{
+ int max_cnt = count;
+
+ while (count) {
+ count--;
+ *dest = *src;
+ if (*dest == '\0')
+ break;
+ src++;
+ dest++;
+ }
+ return max_cnt - count;
+}
+
+
+static void read_hw_log_done(struct rsxx_cardinfo *card,
+ struct creg_cmd *cmd,
+ int st)
+{
+ char *buf;
+ char *log_str;
+ int cnt;
+ int len;
+ int off;
+
+ buf = cmd->buf;
+ off = 0;
+
+ /* Failed getting the log message */
+ if (st)
+ return;
+
+ while (off < cmd->cnt8) {
+ log_str = &card->log.buf[card->log.buf_len];
+ cnt = min(cmd->cnt8 - off, LOG_BUF_SIZE8 - card->log.buf_len);
+ len = substrncpy(log_str, &buf[off], cnt);
+
+ off += len;
+ card->log.buf_len += len;
+
+ /*
+ * Flush the log if we've hit the end of a message or if we've
+ * run out of buffer space.
+ */
+ if ((log_str[len - 1] == '\0') ||
+ (card->log.buf_len == LOG_BUF_SIZE8)) {
+ if (card->log.buf_len != 1) /* Don't log blank lines. */
+ hw_log_msg(card, card->log.buf,
+ card->log.buf_len);
+ card->log.buf_len = 0;
+ }
+
+ }
+
+ if (cmd->status & CREG_STAT_LOG_PENDING)
+ rsxx_read_hw_log(card);
+}
+
+int rsxx_read_hw_log(struct rsxx_cardinfo *card)
+{
+ int st;
+
+ st = creg_queue_cmd(card, CREG_OP_READ, CREG_ADD_LOG,
+ sizeof(card->log.tmp), card->log.tmp,
+ 1, read_hw_log_done, NULL);
+ if (st)
+ dev_err(CARD_TO_DEV(card),
+ "Failed getting log text\n");
+
+ return st;
+}
+
+/*-------------- IOCTL REG Access ------------------*/
+static int issue_reg_cmd(struct rsxx_cardinfo *card,
+ struct rsxx_reg_access *cmd,
+ int read)
+{
+ unsigned int op = read ? CREG_OP_READ : CREG_OP_WRITE;
+
+ return __issue_creg_rw(card, op, cmd->addr, cmd->cnt, cmd->data,
+ cmd->stream, &cmd->stat);
+}
+
+int rsxx_reg_access(struct rsxx_cardinfo *card,
+ struct rsxx_reg_access __user *ucmd,
+ int read)
+{
+ struct rsxx_reg_access cmd;
+ int st;
+
+ st = copy_from_user(&cmd, ucmd, sizeof(cmd));
+ if (st)
+ return -EFAULT;
+
+ if (cmd.cnt > RSXX_MAX_REG_CNT)
+ return -EFAULT;
+
+ st = issue_reg_cmd(card, &cmd, read);
+ if (st)
+ return st;
+
+ st = put_user(cmd.stat, &ucmd->stat);
+ if (st)
+ return -EFAULT;
+
+ if (read) {
+ st = copy_to_user(ucmd->data, cmd.data, cmd.cnt);
+ if (st)
+ return -EFAULT;
+ }
+
+ return 0;
+}
+
+/*------------ Initialization & Setup --------------*/
+int rsxx_creg_setup(struct rsxx_cardinfo *card)
+{
+ card->creg_ctrl.active_cmd = NULL;
+
+ INIT_WORK(&card->creg_ctrl.done_work, creg_cmd_done);
+ mutex_init(&card->creg_ctrl.reset_lock);
+ INIT_LIST_HEAD(&card->creg_ctrl.queue);
+ spin_lock_init(&card->creg_ctrl.lock);
+ setup_timer(&card->creg_ctrl.cmd_timer, creg_cmd_timed_out,
+ (unsigned long) card);
+
+ return 0;
+}
+
+void rsxx_creg_destroy(struct rsxx_cardinfo *card)
+{
+ struct creg_cmd *cmd;
+ struct creg_cmd *tmp;
+ int cnt = 0;
+
+ /* Cancel outstanding commands */
+ spin_lock(&card->creg_ctrl.lock);
+ list_for_each_entry_safe(cmd, tmp, &card->creg_ctrl.queue, list) {
+ list_del(&cmd->list);
+ if (cmd->cb)
+ cmd->cb(card, cmd, -ECANCELED);
+ kmem_cache_free(creg_cmd_pool, cmd);
+ cnt++;
+ }
+
+ if (cnt)
+ dev_info(CARD_TO_DEV(card),
+ "Canceled %d queue creg commands\n", cnt);
+
+ cmd = card->creg_ctrl.active_cmd;
+ card->creg_ctrl.active_cmd = NULL;
+ if (cmd) {
+ if (timer_pending(&card->creg_ctrl.cmd_timer))
+ del_timer_sync(&card->creg_ctrl.cmd_timer);
+
+ if (cmd->cb)
+ cmd->cb(card, cmd, -ECANCELED);
+ dev_info(CARD_TO_DEV(card),
+ "Canceled active creg command\n");
+ kmem_cache_free(creg_cmd_pool, cmd);
+ }
+ spin_unlock(&card->creg_ctrl.lock);
+
+ cancel_work_sync(&card->creg_ctrl.done_work);
+}
+
+
+int rsxx_creg_init(void)
+{
+ creg_cmd_pool = KMEM_CACHE(creg_cmd, SLAB_HWCACHE_ALIGN);
+ if (!creg_cmd_pool)
+ return -ENOMEM;
+
+ return 0;
+}
+
+void rsxx_creg_cleanup(void)
+{
+ kmem_cache_destroy(creg_cmd_pool);
+}
diff --git a/drivers/block/rsxx/dev.c b/drivers/block/rsxx/dev.c
new file mode 100644
index 000000000000..4346d17d2949
--- /dev/null
+++ b/drivers/block/rsxx/dev.c
@@ -0,0 +1,367 @@
+/*
+* Filename: dev.c
+*
+*
+* Authors: Joshua Morris <josh.h.morris@us.ibm.com>
+* Philip Kelleher <pjk1939@linux.vnet.ibm.com>
+*
+* (C) Copyright 2013 IBM Corporation
+*
+* This program is free software; you can redistribute it and/or
+* modify it under the terms of the GNU General Public License as
+* published by the Free Software Foundation; either version 2 of the
+* License, or (at your option) any later version.
+*
+* This program is distributed in the hope that it will be useful, but
+* WITHOUT ANY WARRANTY; without even the implied warranty of
+* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+* General Public License for more details.
+*
+* You should have received a copy of the GNU General Public License
+* along with this program; if not, write to the Free Software Foundation,
+* Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+*/
+
+#include <linux/kernel.h>
+#include <linux/interrupt.h>
+#include <linux/module.h>
+#include <linux/pci.h>
+#include <linux/slab.h>
+
+#include <linux/hdreg.h>
+#include <linux/genhd.h>
+#include <linux/blkdev.h>
+#include <linux/bio.h>
+
+#include <linux/fs.h>
+
+#include "rsxx_priv.h"
+
+static unsigned int blkdev_minors = 64;
+module_param(blkdev_minors, uint, 0444);
+MODULE_PARM_DESC(blkdev_minors, "Number of minors(partitions)");
+
+/*
+ * For now I'm making this tweakable in case any applications hit this limit.
+ * If you see a "bio too big" error in the log you will need to raise this
+ * value.
+ */
+static unsigned int blkdev_max_hw_sectors = 1024;
+module_param(blkdev_max_hw_sectors, uint, 0444);
+MODULE_PARM_DESC(blkdev_max_hw_sectors, "Max hw sectors for a single BIO");
+
+static unsigned int enable_blkdev = 1;
+module_param(enable_blkdev , uint, 0444);
+MODULE_PARM_DESC(enable_blkdev, "Enable block device interfaces");
+
+
+struct rsxx_bio_meta {
+ struct bio *bio;
+ atomic_t pending_dmas;
+ atomic_t error;
+ unsigned long start_time;
+};
+
+static struct kmem_cache *bio_meta_pool;
+
+/*----------------- Block Device Operations -----------------*/
+static int rsxx_blkdev_ioctl(struct block_device *bdev,
+ fmode_t mode,
+ unsigned int cmd,
+ unsigned long arg)
+{
+ struct rsxx_cardinfo *card = bdev->bd_disk->private_data;
+
+ switch (cmd) {
+ case RSXX_GETREG:
+ return rsxx_reg_access(card, (void __user *)arg, 1);
+ case RSXX_SETREG:
+ return rsxx_reg_access(card, (void __user *)arg, 0);
+ }
+
+ return -ENOTTY;
+}
+
+static int rsxx_getgeo(struct block_device *bdev, struct hd_geometry *geo)
+{
+ struct rsxx_cardinfo *card = bdev->bd_disk->private_data;
+ u64 blocks = card->size8 >> 9;
+
+ /*
+ * get geometry: Fake it. I haven't found any drivers that set
+ * geo->start, so we won't either.
+ */
+ if (card->size8) {
+ geo->heads = 64;
+ geo->sectors = 16;
+ do_div(blocks, (geo->heads * geo->sectors));
+ geo->cylinders = blocks;
+ } else {
+ geo->heads = 0;
+ geo->sectors = 0;
+ geo->cylinders = 0;
+ }
+ return 0;
+}
+
+static const struct block_device_operations rsxx_fops = {
+ .owner = THIS_MODULE,
+ .getgeo = rsxx_getgeo,
+ .ioctl = rsxx_blkdev_ioctl,
+};
+
+static void disk_stats_start(struct rsxx_cardinfo *card, struct bio *bio)
+{
+ struct hd_struct *part0 = &card->gendisk->part0;
+ int rw = bio_data_dir(bio);
+ int cpu;
+
+ cpu = part_stat_lock();
+
+ part_round_stats(cpu, part0);
+ part_inc_in_flight(part0, rw);
+
+ part_stat_unlock();
+}
+
+static void disk_stats_complete(struct rsxx_cardinfo *card,
+ struct bio *bio,
+ unsigned long start_time)
+{
+ struct hd_struct *part0 = &card->gendisk->part0;
+ unsigned long duration = jiffies - start_time;
+ int rw = bio_data_dir(bio);
+ int cpu;
+
+ cpu = part_stat_lock();
+
+ part_stat_add(cpu, part0, sectors[rw], bio_sectors(bio));
+ part_stat_inc(cpu, part0, ios[rw]);
+ part_stat_add(cpu, part0, ticks[rw], duration);
+
+ part_round_stats(cpu, part0);
+ part_dec_in_flight(part0, rw);
+
+ part_stat_unlock();
+}
+
+static void bio_dma_done_cb(struct rsxx_cardinfo *card,
+ void *cb_data,
+ unsigned int error)
+{
+ struct rsxx_bio_meta *meta = cb_data;
+
+ if (error)
+ atomic_set(&meta->error, 1);
+
+ if (atomic_dec_and_test(&meta->pending_dmas)) {
+ disk_stats_complete(card, meta->bio, meta->start_time);
+
+ bio_endio(meta->bio, atomic_read(&meta->error) ? -EIO : 0);
+ kmem_cache_free(bio_meta_pool, meta);
+ }
+}
+
+static void rsxx_make_request(struct request_queue *q, struct bio *bio)
+{
+ struct rsxx_cardinfo *card = q->queuedata;
+ struct rsxx_bio_meta *bio_meta;
+ int st = -EINVAL;
+
+ might_sleep();
+
+ if (unlikely(card->halt)) {
+ st = -EFAULT;
+ goto req_err;
+ }
+
+ if (unlikely(card->dma_fault)) {
+ st = (-EFAULT);
+ goto req_err;
+ }
+
+ if (bio->bi_size == 0) {
+ dev_err(CARD_TO_DEV(card), "size zero BIO!\n");
+ goto req_err;
+ }
+
+ bio_meta = kmem_cache_alloc(bio_meta_pool, GFP_KERNEL);
+ if (!bio_meta) {
+ st = -ENOMEM;
+ goto req_err;
+ }
+
+ bio_meta->bio = bio;
+ atomic_set(&bio_meta->error, 0);
+ atomic_set(&bio_meta->pending_dmas, 0);
+ bio_meta->start_time = jiffies;
+
+ disk_stats_start(card, bio);
+
+ dev_dbg(CARD_TO_DEV(card), "BIO[%c]: meta: %p addr8: x%llx size: %d\n",
+ bio_data_dir(bio) ? 'W' : 'R', bio_meta,
+ (u64)bio->bi_sector << 9, bio->bi_size);
+
+ st = rsxx_dma_queue_bio(card, bio, &bio_meta->pending_dmas,
+ bio_dma_done_cb, bio_meta);
+ if (st)
+ goto queue_err;
+
+ return;
+
+queue_err:
+ kmem_cache_free(bio_meta_pool, bio_meta);
+req_err:
+ bio_endio(bio, st);
+}
+
+/*----------------- Device Setup -------------------*/
+static bool rsxx_discard_supported(struct rsxx_cardinfo *card)
+{
+ unsigned char pci_rev;
+
+ pci_read_config_byte(card->dev, PCI_REVISION_ID, &pci_rev);
+
+ return (pci_rev >= RSXX_DISCARD_SUPPORT);
+}
+
+static unsigned short rsxx_get_logical_block_size(
+ struct rsxx_cardinfo *card)
+{
+ u32 capabilities = 0;
+ int st;
+
+ st = rsxx_get_card_capabilities(card, &capabilities);
+ if (st)
+ dev_warn(CARD_TO_DEV(card),
+ "Failed reading card capabilities register\n");
+
+ /* Earlier firmware did not have support for 512 byte accesses */
+ if (capabilities & CARD_CAP_SUBPAGE_WRITES)
+ return 512;
+ else
+ return RSXX_HW_BLK_SIZE;
+}
+
+int rsxx_attach_dev(struct rsxx_cardinfo *card)
+{
+ mutex_lock(&card->dev_lock);
+
+ /* The block device requires the stripe size from the config. */
+ if (enable_blkdev) {
+ if (card->config_valid)
+ set_capacity(card->gendisk, card->size8 >> 9);
+ else
+ set_capacity(card->gendisk, 0);
+ add_disk(card->gendisk);
+
+ card->bdev_attached = 1;
+ }
+
+ mutex_unlock(&card->dev_lock);
+
+ return 0;
+}
+
+void rsxx_detach_dev(struct rsxx_cardinfo *card)
+{
+ mutex_lock(&card->dev_lock);
+
+ if (card->bdev_attached) {
+ del_gendisk(card->gendisk);
+ card->bdev_attached = 0;
+ }
+
+ mutex_unlock(&card->dev_lock);
+}
+
+int rsxx_setup_dev(struct rsxx_cardinfo *card)
+{
+ unsigned short blk_size;
+
+ mutex_init(&card->dev_lock);
+
+ if (!enable_blkdev)
+ return 0;
+
+ card->major = register_blkdev(0, DRIVER_NAME);
+ if (card->major < 0) {
+ dev_err(CARD_TO_DEV(card), "Failed to get major number\n");
+ return -ENOMEM;
+ }
+
+ card->queue = blk_alloc_queue(GFP_KERNEL);
+ if (!card->queue) {
+ dev_err(CARD_TO_DEV(card), "Failed queue alloc\n");
+ unregister_blkdev(card->major, DRIVER_NAME);
+ return -ENOMEM;
+ }
+
+ card->gendisk = alloc_disk(blkdev_minors);
+ if (!card->gendisk) {
+ dev_err(CARD_TO_DEV(card), "Failed disk alloc\n");
+ blk_cleanup_queue(card->queue);
+ unregister_blkdev(card->major, DRIVER_NAME);
+ return -ENOMEM;
+ }
+
+ blk_size = rsxx_get_logical_block_size(card);
+
+ blk_queue_make_request(card->queue, rsxx_make_request);
+ blk_queue_bounce_limit(card->queue, BLK_BOUNCE_ANY);
+ blk_queue_dma_alignment(card->queue, blk_size - 1);
+ blk_queue_max_hw_sectors(card->queue, blkdev_max_hw_sectors);
+ blk_queue_logical_block_size(card->queue, blk_size);
+ blk_queue_physical_block_size(card->queue, RSXX_HW_BLK_SIZE);
+
+ queue_flag_set_unlocked(QUEUE_FLAG_NONROT, card->queue);
+ if (rsxx_discard_supported(card)) {
+ queue_flag_set_unlocked(QUEUE_FLAG_DISCARD, card->queue);
+ blk_queue_max_discard_sectors(card->queue,
+ RSXX_HW_BLK_SIZE >> 9);
+ card->queue->limits.discard_granularity = RSXX_HW_BLK_SIZE;
+ card->queue->limits.discard_alignment = RSXX_HW_BLK_SIZE;
+ card->queue->limits.discard_zeroes_data = 1;
+ }
+
+ card->queue->queuedata = card;
+
+ snprintf(card->gendisk->disk_name, sizeof(card->gendisk->disk_name),
+ "rsxx%d", card->disk_id);
+ card->gendisk->driverfs_dev = &card->dev->dev;
+ card->gendisk->major = card->major;
+ card->gendisk->first_minor = 0;
+ card->gendisk->fops = &rsxx_fops;
+ card->gendisk->private_data = card;
+ card->gendisk->queue = card->queue;
+
+ return 0;
+}
+
+void rsxx_destroy_dev(struct rsxx_cardinfo *card)
+{
+ if (!enable_blkdev)
+ return;
+
+ put_disk(card->gendisk);
+ card->gendisk = NULL;
+
+ blk_cleanup_queue(card->queue);
+ unregister_blkdev(card->major, DRIVER_NAME);
+}
+
+int rsxx_dev_init(void)
+{
+ bio_meta_pool = KMEM_CACHE(rsxx_bio_meta, SLAB_HWCACHE_ALIGN);
+ if (!bio_meta_pool)
+ return -ENOMEM;
+
+ return 0;
+}
+
+void rsxx_dev_cleanup(void)
+{
+ kmem_cache_destroy(bio_meta_pool);
+}
+
+
diff --git a/drivers/block/rsxx/dma.c b/drivers/block/rsxx/dma.c
new file mode 100644
index 000000000000..63176e67662f
--- /dev/null
+++ b/drivers/block/rsxx/dma.c
@@ -0,0 +1,998 @@
+/*
+* Filename: dma.c
+*
+*
+* Authors: Joshua Morris <josh.h.morris@us.ibm.com>
+* Philip Kelleher <pjk1939@linux.vnet.ibm.com>
+*
+* (C) Copyright 2013 IBM Corporation
+*
+* This program is free software; you can redistribute it and/or
+* modify it under the terms of the GNU General Public License as
+* published by the Free Software Foundation; either version 2 of the
+* License, or (at your option) any later version.
+*
+* This program is distributed in the hope that it will be useful, but
+* WITHOUT ANY WARRANTY; without even the implied warranty of
+* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+* General Public License for more details.
+*
+* You should have received a copy of the GNU General Public License
+* along with this program; if not, write to the Free Software Foundation,
+* Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+*/
+
+#include <linux/slab.h>
+#include "rsxx_priv.h"
+
+struct rsxx_dma {
+ struct list_head list;
+ u8 cmd;
+ unsigned int laddr; /* Logical address on the ramsan */
+ struct {
+ u32 off;
+ u32 cnt;
+ } sub_page;
+ dma_addr_t dma_addr;
+ struct page *page;
+ unsigned int pg_off; /* Page Offset */
+ rsxx_dma_cb cb;
+ void *cb_data;
+};
+
+/* This timeout is used to detect a stalled DMA channel */
+#define DMA_ACTIVITY_TIMEOUT msecs_to_jiffies(10000)
+
+struct hw_status {
+ u8 status;
+ u8 tag;
+ __le16 count;
+ __le32 _rsvd2;
+ __le64 _rsvd3;
+} __packed;
+
+enum rsxx_dma_status {
+ DMA_SW_ERR = 0x1,
+ DMA_HW_FAULT = 0x2,
+ DMA_CANCELLED = 0x4,
+};
+
+struct hw_cmd {
+ u8 command;
+ u8 tag;
+ u8 _rsvd;
+ u8 sub_page; /* Bit[0:2]: 512byte offset */
+ /* Bit[4:6]: 512byte count */
+ __le32 device_addr;
+ __le64 host_addr;
+} __packed;
+
+enum rsxx_hw_cmd {
+ HW_CMD_BLK_DISCARD = 0x70,
+ HW_CMD_BLK_WRITE = 0x80,
+ HW_CMD_BLK_READ = 0xC0,
+ HW_CMD_BLK_RECON_READ = 0xE0,
+};
+
+enum rsxx_hw_status {
+ HW_STATUS_CRC = 0x01,
+ HW_STATUS_HARD_ERR = 0x02,
+ HW_STATUS_SOFT_ERR = 0x04,
+ HW_STATUS_FAULT = 0x08,
+};
+
+#define STATUS_BUFFER_SIZE8 4096
+#define COMMAND_BUFFER_SIZE8 4096
+
+static struct kmem_cache *rsxx_dma_pool;
+
+struct dma_tracker {
+ int next_tag;
+ struct rsxx_dma *dma;
+};
+
+#define DMA_TRACKER_LIST_SIZE8 (sizeof(struct dma_tracker_list) + \
+ (sizeof(struct dma_tracker) * RSXX_MAX_OUTSTANDING_CMDS))
+
+struct dma_tracker_list {
+ spinlock_t lock;
+ int head;
+ struct dma_tracker list[0];
+};
+
+
+/*----------------- Misc Utility Functions -------------------*/
+static unsigned int rsxx_addr8_to_laddr(u64 addr8, struct rsxx_cardinfo *card)
+{
+ unsigned long long tgt_addr8;
+
+ tgt_addr8 = ((addr8 >> card->_stripe.upper_shift) &
+ card->_stripe.upper_mask) |
+ ((addr8) & card->_stripe.lower_mask);
+ do_div(tgt_addr8, RSXX_HW_BLK_SIZE);
+ return tgt_addr8;
+}
+
+static unsigned int rsxx_get_dma_tgt(struct rsxx_cardinfo *card, u64 addr8)
+{
+ unsigned int tgt;
+
+ tgt = (addr8 >> card->_stripe.target_shift) & card->_stripe.target_mask;
+
+ return tgt;
+}
+
+static void rsxx_dma_queue_reset(struct rsxx_cardinfo *card)
+{
+ /* Reset all DMA Command/Status Queues */
+ iowrite32(DMA_QUEUE_RESET, card->regmap + RESET);
+}
+
+static unsigned int get_dma_size(struct rsxx_dma *dma)
+{
+ if (dma->sub_page.cnt)
+ return dma->sub_page.cnt << 9;
+ else
+ return RSXX_HW_BLK_SIZE;
+}
+
+
+/*----------------- DMA Tracker -------------------*/
+static void set_tracker_dma(struct dma_tracker_list *trackers,
+ int tag,
+ struct rsxx_dma *dma)
+{
+ trackers->list[tag].dma = dma;
+}
+
+static struct rsxx_dma *get_tracker_dma(struct dma_tracker_list *trackers,
+ int tag)
+{
+ return trackers->list[tag].dma;
+}
+
+static int pop_tracker(struct dma_tracker_list *trackers)
+{
+ int tag;
+
+ spin_lock(&trackers->lock);
+ tag = trackers->head;
+ if (tag != -1) {
+ trackers->head = trackers->list[tag].next_tag;
+ trackers->list[tag].next_tag = -1;
+ }
+ spin_unlock(&trackers->lock);
+
+ return tag;
+}
+
+static void push_tracker(struct dma_tracker_list *trackers, int tag)
+{
+ spin_lock(&trackers->lock);
+ trackers->list[tag].next_tag = trackers->head;
+ trackers->head = tag;
+ trackers->list[tag].dma = NULL;
+ spin_unlock(&trackers->lock);
+}
+
+
+/*----------------- Interrupt Coalescing -------------*/
+/*
+ * Interrupt Coalescing Register Format:
+ * Interrupt Timer (64ns units) [15:0]
+ * Interrupt Count [24:16]
+ * Reserved [31:25]
+*/
+#define INTR_COAL_LATENCY_MASK (0x0000ffff)
+
+#define INTR_COAL_COUNT_SHIFT 16
+#define INTR_COAL_COUNT_BITS 9
+#define INTR_COAL_COUNT_MASK (((1 << INTR_COAL_COUNT_BITS) - 1) << \
+ INTR_COAL_COUNT_SHIFT)
+#define INTR_COAL_LATENCY_UNITS_NS 64
+
+
+static u32 dma_intr_coal_val(u32 mode, u32 count, u32 latency)
+{
+ u32 latency_units = latency / INTR_COAL_LATENCY_UNITS_NS;
+
+ if (mode == RSXX_INTR_COAL_DISABLED)
+ return 0;
+
+ return ((count << INTR_COAL_COUNT_SHIFT) & INTR_COAL_COUNT_MASK) |
+ (latency_units & INTR_COAL_LATENCY_MASK);
+
+}
+
+static void dma_intr_coal_auto_tune(struct rsxx_cardinfo *card)
+{
+ int i;
+ u32 q_depth = 0;
+ u32 intr_coal;
+
+ if (card->config.data.intr_coal.mode != RSXX_INTR_COAL_AUTO_TUNE)
+ return;
+
+ for (i = 0; i < card->n_targets; i++)
+ q_depth += atomic_read(&card->ctrl[i].stats.hw_q_depth);
+
+ intr_coal = dma_intr_coal_val(card->config.data.intr_coal.mode,
+ q_depth / 2,
+ card->config.data.intr_coal.latency);
+ iowrite32(intr_coal, card->regmap + INTR_COAL);
+}
+
+/*----------------- RSXX DMA Handling -------------------*/
+static void rsxx_complete_dma(struct rsxx_cardinfo *card,
+ struct rsxx_dma *dma,
+ unsigned int status)
+{
+ if (status & DMA_SW_ERR)
+ printk_ratelimited(KERN_ERR
+ "SW Error in DMA(cmd x%02x, laddr x%08x)\n",
+ dma->cmd, dma->laddr);
+ if (status & DMA_HW_FAULT)
+ printk_ratelimited(KERN_ERR
+ "HW Fault in DMA(cmd x%02x, laddr x%08x)\n",
+ dma->cmd, dma->laddr);
+ if (status & DMA_CANCELLED)
+ printk_ratelimited(KERN_ERR
+ "DMA Cancelled(cmd x%02x, laddr x%08x)\n",
+ dma->cmd, dma->laddr);
+
+ if (dma->dma_addr)
+ pci_unmap_page(card->dev, dma->dma_addr, get_dma_size(dma),
+ dma->cmd == HW_CMD_BLK_WRITE ?
+ PCI_DMA_TODEVICE :
+ PCI_DMA_FROMDEVICE);
+
+ if (dma->cb)
+ dma->cb(card, dma->cb_data, status ? 1 : 0);
+
+ kmem_cache_free(rsxx_dma_pool, dma);
+}
+
+static void rsxx_requeue_dma(struct rsxx_dma_ctrl *ctrl,
+ struct rsxx_dma *dma)
+{
+ /*
+ * Requeued DMAs go to the front of the queue so they are issued
+ * first.
+ */
+ spin_lock(&ctrl->queue_lock);
+ list_add(&dma->list, &ctrl->queue);
+ spin_unlock(&ctrl->queue_lock);
+}
+
+static void rsxx_handle_dma_error(struct rsxx_dma_ctrl *ctrl,
+ struct rsxx_dma *dma,
+ u8 hw_st)
+{
+ unsigned int status = 0;
+ int requeue_cmd = 0;
+
+ dev_dbg(CARD_TO_DEV(ctrl->card),
+ "Handling DMA error(cmd x%02x, laddr x%08x st:x%02x)\n",
+ dma->cmd, dma->laddr, hw_st);
+
+ if (hw_st & HW_STATUS_CRC)
+ ctrl->stats.crc_errors++;
+ if (hw_st & HW_STATUS_HARD_ERR)
+ ctrl->stats.hard_errors++;
+ if (hw_st & HW_STATUS_SOFT_ERR)
+ ctrl->stats.soft_errors++;
+
+ switch (dma->cmd) {
+ case HW_CMD_BLK_READ:
+ if (hw_st & (HW_STATUS_CRC | HW_STATUS_HARD_ERR)) {
+ if (ctrl->card->scrub_hard) {
+ dma->cmd = HW_CMD_BLK_RECON_READ;
+ requeue_cmd = 1;
+ ctrl->stats.reads_retried++;
+ } else {
+ status |= DMA_HW_FAULT;
+ ctrl->stats.reads_failed++;
+ }
+ } else if (hw_st & HW_STATUS_FAULT) {
+ status |= DMA_HW_FAULT;
+ ctrl->stats.reads_failed++;
+ }
+
+ break;
+ case HW_CMD_BLK_RECON_READ:
+ if (hw_st & (HW_STATUS_CRC | HW_STATUS_HARD_ERR)) {
+ /* Data could not be reconstructed. */
+ status |= DMA_HW_FAULT;
+ ctrl->stats.reads_failed++;
+ }
+
+ break;
+ case HW_CMD_BLK_WRITE:
+ status |= DMA_HW_FAULT;
+ ctrl->stats.writes_failed++;
+
+ break;
+ case HW_CMD_BLK_DISCARD:
+ status |= DMA_HW_FAULT;
+ ctrl->stats.discards_failed++;
+
+ break;
+ default:
+ dev_err(CARD_TO_DEV(ctrl->card),
+ "Unknown command in DMA!(cmd: x%02x "
+ "laddr x%08x st: x%02x\n",
+ dma->cmd, dma->laddr, hw_st);
+ status |= DMA_SW_ERR;
+
+ break;
+ }
+
+ if (requeue_cmd)
+ rsxx_requeue_dma(ctrl, dma);
+ else
+ rsxx_complete_dma(ctrl->card, dma, status);
+}
+
+static void dma_engine_stalled(unsigned long data)
+{
+ struct rsxx_dma_ctrl *ctrl = (struct rsxx_dma_ctrl *)data;
+
+ if (atomic_read(&ctrl->stats.hw_q_depth) == 0)
+ return;
+
+ if (ctrl->cmd.idx != ioread32(ctrl->regmap + SW_CMD_IDX)) {
+ /*
+ * The dma engine was stalled because the SW_CMD_IDX write
+ * was lost. Issue it again to recover.
+ */
+ dev_warn(CARD_TO_DEV(ctrl->card),
+ "SW_CMD_IDX write was lost, re-writing...\n");
+ iowrite32(ctrl->cmd.idx, ctrl->regmap + SW_CMD_IDX);
+ mod_timer(&ctrl->activity_timer,
+ jiffies + DMA_ACTIVITY_TIMEOUT);
+ } else {
+ dev_warn(CARD_TO_DEV(ctrl->card),
+ "DMA channel %d has stalled, faulting interface.\n",
+ ctrl->id);
+ ctrl->card->dma_fault = 1;
+ }
+}
+
+static void rsxx_issue_dmas(struct work_struct *work)
+{
+ struct rsxx_dma_ctrl *ctrl;
+ struct rsxx_dma *dma;
+ int tag;
+ int cmds_pending = 0;
+ struct hw_cmd *hw_cmd_buf;
+
+ ctrl = container_of(work, struct rsxx_dma_ctrl, issue_dma_work);
+ hw_cmd_buf = ctrl->cmd.buf;
+
+ if (unlikely(ctrl->card->halt))
+ return;
+
+ while (1) {
+ spin_lock(&ctrl->queue_lock);
+ if (list_empty(&ctrl->queue)) {
+ spin_unlock(&ctrl->queue_lock);
+ break;
+ }
+ spin_unlock(&ctrl->queue_lock);
+
+ tag = pop_tracker(ctrl->trackers);
+ if (tag == -1)
+ break;
+
+ spin_lock(&ctrl->queue_lock);
+ dma = list_entry(ctrl->queue.next, struct rsxx_dma, list);
+ list_del(&dma->list);
+ ctrl->stats.sw_q_depth--;
+ spin_unlock(&ctrl->queue_lock);
+
+ /*
+ * This will catch any DMAs that slipped in right before the
+ * fault, but was queued after all the other DMAs were
+ * cancelled.
+ */
+ if (unlikely(ctrl->card->dma_fault)) {
+ push_tracker(ctrl->trackers, tag);
+ rsxx_complete_dma(ctrl->card, dma, DMA_CANCELLED);
+ continue;
+ }
+
+ set_tracker_dma(ctrl->trackers, tag, dma);
+ hw_cmd_buf[ctrl->cmd.idx].command = dma->cmd;
+ hw_cmd_buf[ctrl->cmd.idx].tag = tag;
+ hw_cmd_buf[ctrl->cmd.idx]._rsvd = 0;
+ hw_cmd_buf[ctrl->cmd.idx].sub_page =
+ ((dma->sub_page.cnt & 0x7) << 4) |
+ (dma->sub_page.off & 0x7);
+
+ hw_cmd_buf[ctrl->cmd.idx].device_addr =
+ cpu_to_le32(dma->laddr);
+
+ hw_cmd_buf[ctrl->cmd.idx].host_addr =
+ cpu_to_le64(dma->dma_addr);
+
+ dev_dbg(CARD_TO_DEV(ctrl->card),
+ "Issue DMA%d(laddr %d tag %d) to idx %d\n",
+ ctrl->id, dma->laddr, tag, ctrl->cmd.idx);
+
+ ctrl->cmd.idx = (ctrl->cmd.idx + 1) & RSXX_CS_IDX_MASK;
+ cmds_pending++;
+
+ if (dma->cmd == HW_CMD_BLK_WRITE)
+ ctrl->stats.writes_issued++;
+ else if (dma->cmd == HW_CMD_BLK_DISCARD)
+ ctrl->stats.discards_issued++;
+ else
+ ctrl->stats.reads_issued++;
+ }
+
+ /* Let HW know we've queued commands. */
+ if (cmds_pending) {
+ /*
+ * We must guarantee that the CPU writes to 'ctrl->cmd.buf'
+ * (which is in PCI-consistent system-memory) from the loop
+ * above make it into the coherency domain before the
+ * following PIO "trigger" updating the cmd.idx. A WMB is
+ * sufficient. We need not explicitly CPU cache-flush since
+ * the memory is a PCI-consistent (ie; coherent) mapping.
+ */
+ wmb();
+
+ atomic_add(cmds_pending, &ctrl->stats.hw_q_depth);
+ mod_timer(&ctrl->activity_timer,
+ jiffies + DMA_ACTIVITY_TIMEOUT);
+ iowrite32(ctrl->cmd.idx, ctrl->regmap + SW_CMD_IDX);
+ }
+}
+
+static void rsxx_dma_done(struct work_struct *work)
+{
+ struct rsxx_dma_ctrl *ctrl;
+ struct rsxx_dma *dma;
+ unsigned long flags;
+ u16 count;
+ u8 status;
+ u8 tag;
+ struct hw_status *hw_st_buf;
+
+ ctrl = container_of(work, struct rsxx_dma_ctrl, dma_done_work);
+ hw_st_buf = ctrl->status.buf;
+
+ if (unlikely(ctrl->card->halt) ||
+ unlikely(ctrl->card->dma_fault))
+ return;
+
+ count = le16_to_cpu(hw_st_buf[ctrl->status.idx].count);
+
+ while (count == ctrl->e_cnt) {
+ /*
+ * The read memory-barrier is necessary to keep aggressive
+ * processors/optimizers (such as the PPC Apple G5) from
+ * reordering the following status-buffer tag & status read
+ * *before* the count read on subsequent iterations of the
+ * loop!
+ */
+ rmb();
+
+ status = hw_st_buf[ctrl->status.idx].status;
+ tag = hw_st_buf[ctrl->status.idx].tag;
+
+ dma = get_tracker_dma(ctrl->trackers, tag);
+ if (dma == NULL) {
+ spin_lock_irqsave(&ctrl->card->irq_lock, flags);
+ rsxx_disable_ier(ctrl->card, CR_INTR_DMA_ALL);
+ spin_unlock_irqrestore(&ctrl->card->irq_lock, flags);
+
+ dev_err(CARD_TO_DEV(ctrl->card),
+ "No tracker for tag %d "
+ "(idx %d id %d)\n",
+ tag, ctrl->status.idx, ctrl->id);
+ return;
+ }
+
+ dev_dbg(CARD_TO_DEV(ctrl->card),
+ "Completing DMA%d"
+ "(laddr x%x tag %d st: x%x cnt: x%04x) from idx %d.\n",
+ ctrl->id, dma->laddr, tag, status, count,
+ ctrl->status.idx);
+
+ atomic_dec(&ctrl->stats.hw_q_depth);
+
+ mod_timer(&ctrl->activity_timer,
+ jiffies + DMA_ACTIVITY_TIMEOUT);
+
+ if (status)
+ rsxx_handle_dma_error(ctrl, dma, status);
+ else
+ rsxx_complete_dma(ctrl->card, dma, 0);
+
+ push_tracker(ctrl->trackers, tag);
+
+ ctrl->status.idx = (ctrl->status.idx + 1) &
+ RSXX_CS_IDX_MASK;
+ ctrl->e_cnt++;
+
+ count = le16_to_cpu(hw_st_buf[ctrl->status.idx].count);
+ }
+
+ dma_intr_coal_auto_tune(ctrl->card);
+
+ if (atomic_read(&ctrl->stats.hw_q_depth) == 0)
+ del_timer_sync(&ctrl->activity_timer);
+
+ spin_lock_irqsave(&ctrl->card->irq_lock, flags);
+ rsxx_enable_ier(ctrl->card, CR_INTR_DMA(ctrl->id));
+ spin_unlock_irqrestore(&ctrl->card->irq_lock, flags);
+
+ spin_lock(&ctrl->queue_lock);
+ if (ctrl->stats.sw_q_depth)
+ queue_work(ctrl->issue_wq, &ctrl->issue_dma_work);
+ spin_unlock(&ctrl->queue_lock);
+}
+
+static int rsxx_cleanup_dma_queue(struct rsxx_cardinfo *card,
+ struct list_head *q)
+{
+ struct rsxx_dma *dma;
+ struct rsxx_dma *tmp;
+ int cnt = 0;
+
+ list_for_each_entry_safe(dma, tmp, q, list) {
+ list_del(&dma->list);
+
+ if (dma->dma_addr)
+ pci_unmap_page(card->dev, dma->dma_addr,
+ get_dma_size(dma),
+ (dma->cmd == HW_CMD_BLK_WRITE) ?
+ PCI_DMA_TODEVICE :
+ PCI_DMA_FROMDEVICE);
+ kmem_cache_free(rsxx_dma_pool, dma);
+ cnt++;
+ }
+
+ return cnt;
+}
+
+static int rsxx_queue_discard(struct rsxx_cardinfo *card,
+ struct list_head *q,
+ unsigned int laddr,
+ rsxx_dma_cb cb,
+ void *cb_data)
+{
+ struct rsxx_dma *dma;
+
+ dma = kmem_cache_alloc(rsxx_dma_pool, GFP_KERNEL);
+ if (!dma)
+ return -ENOMEM;
+
+ dma->cmd = HW_CMD_BLK_DISCARD;
+ dma->laddr = laddr;
+ dma->dma_addr = 0;
+ dma->sub_page.off = 0;
+ dma->sub_page.cnt = 0;
+ dma->page = NULL;
+ dma->pg_off = 0;
+ dma->cb = cb;
+ dma->cb_data = cb_data;
+
+ dev_dbg(CARD_TO_DEV(card), "Queuing[D] laddr %x\n", dma->laddr);
+
+ list_add_tail(&dma->list, q);
+
+ return 0;
+}
+
+static int rsxx_queue_dma(struct rsxx_cardinfo *card,
+ struct list_head *q,
+ int dir,
+ unsigned int dma_off,
+ unsigned int dma_len,
+ unsigned int laddr,
+ struct page *page,
+ unsigned int pg_off,
+ rsxx_dma_cb cb,
+ void *cb_data)
+{
+ struct rsxx_dma *dma;
+
+ dma = kmem_cache_alloc(rsxx_dma_pool, GFP_KERNEL);
+ if (!dma)
+ return -ENOMEM;
+
+ dma->dma_addr = pci_map_page(card->dev, page, pg_off, dma_len,
+ dir ? PCI_DMA_TODEVICE :
+ PCI_DMA_FROMDEVICE);
+ if (!dma->dma_addr) {
+ kmem_cache_free(rsxx_dma_pool, dma);
+ return -ENOMEM;
+ }
+
+ dma->cmd = dir ? HW_CMD_BLK_WRITE : HW_CMD_BLK_READ;
+ dma->laddr = laddr;
+ dma->sub_page.off = (dma_off >> 9);
+ dma->sub_page.cnt = (dma_len >> 9);
+ dma->page = page;
+ dma->pg_off = pg_off;
+ dma->cb = cb;
+ dma->cb_data = cb_data;
+
+ dev_dbg(CARD_TO_DEV(card),
+ "Queuing[%c] laddr %x off %d cnt %d page %p pg_off %d\n",
+ dir ? 'W' : 'R', dma->laddr, dma->sub_page.off,
+ dma->sub_page.cnt, dma->page, dma->pg_off);
+
+ /* Queue the DMA */
+ list_add_tail(&dma->list, q);
+
+ return 0;
+}
+
+int rsxx_dma_queue_bio(struct rsxx_cardinfo *card,
+ struct bio *bio,
+ atomic_t *n_dmas,
+ rsxx_dma_cb cb,
+ void *cb_data)
+{
+ struct list_head dma_list[RSXX_MAX_TARGETS];
+ struct bio_vec *bvec;
+ unsigned long long addr8;
+ unsigned int laddr;
+ unsigned int bv_len;
+ unsigned int bv_off;
+ unsigned int dma_off;
+ unsigned int dma_len;
+ int dma_cnt[RSXX_MAX_TARGETS];
+ int tgt;
+ int st;
+ int i;
+
+ addr8 = bio->bi_sector << 9; /* sectors are 512 bytes */
+ atomic_set(n_dmas, 0);
+
+ for (i = 0; i < card->n_targets; i++) {
+ INIT_LIST_HEAD(&dma_list[i]);
+ dma_cnt[i] = 0;
+ }
+
+ if (bio->bi_rw & REQ_DISCARD) {
+ bv_len = bio->bi_size;
+
+ while (bv_len > 0) {
+ tgt = rsxx_get_dma_tgt(card, addr8);
+ laddr = rsxx_addr8_to_laddr(addr8, card);
+
+ st = rsxx_queue_discard(card, &dma_list[tgt], laddr,
+ cb, cb_data);
+ if (st)
+ goto bvec_err;
+
+ dma_cnt[tgt]++;
+ atomic_inc(n_dmas);
+ addr8 += RSXX_HW_BLK_SIZE;
+ bv_len -= RSXX_HW_BLK_SIZE;
+ }
+ } else {
+ bio_for_each_segment(bvec, bio, i) {
+ bv_len = bvec->bv_len;
+ bv_off = bvec->bv_offset;
+
+ while (bv_len > 0) {
+ tgt = rsxx_get_dma_tgt(card, addr8);
+ laddr = rsxx_addr8_to_laddr(addr8, card);
+ dma_off = addr8 & RSXX_HW_BLK_MASK;
+ dma_len = min(bv_len,
+ RSXX_HW_BLK_SIZE - dma_off);
+
+ st = rsxx_queue_dma(card, &dma_list[tgt],
+ bio_data_dir(bio),
+ dma_off, dma_len,
+ laddr, bvec->bv_page,
+ bv_off, cb, cb_data);
+ if (st)
+ goto bvec_err;
+
+ dma_cnt[tgt]++;
+ atomic_inc(n_dmas);
+ addr8 += dma_len;
+ bv_off += dma_len;
+ bv_len -= dma_len;
+ }
+ }
+ }
+
+ for (i = 0; i < card->n_targets; i++) {
+ if (!list_empty(&dma_list[i])) {
+ spin_lock(&card->ctrl[i].queue_lock);
+ card->ctrl[i].stats.sw_q_depth += dma_cnt[i];
+ list_splice_tail(&dma_list[i], &card->ctrl[i].queue);
+ spin_unlock(&card->ctrl[i].queue_lock);
+
+ queue_work(card->ctrl[i].issue_wq,
+ &card->ctrl[i].issue_dma_work);
+ }
+ }
+
+ return 0;
+
+bvec_err:
+ for (i = 0; i < card->n_targets; i++)
+ rsxx_cleanup_dma_queue(card, &dma_list[i]);
+
+ return st;
+}
+
+
+/*----------------- DMA Engine Initialization & Setup -------------------*/
+static int rsxx_dma_ctrl_init(struct pci_dev *dev,
+ struct rsxx_dma_ctrl *ctrl)
+{
+ int i;
+
+ memset(&ctrl->stats, 0, sizeof(ctrl->stats));
+
+ ctrl->status.buf = pci_alloc_consistent(dev, STATUS_BUFFER_SIZE8,
+ &ctrl->status.dma_addr);
+ ctrl->cmd.buf = pci_alloc_consistent(dev, COMMAND_BUFFER_SIZE8,
+ &ctrl->cmd.dma_addr);
+ if (ctrl->status.buf == NULL || ctrl->cmd.buf == NULL)
+ return -ENOMEM;
+
+ ctrl->trackers = vmalloc(DMA_TRACKER_LIST_SIZE8);
+ if (!ctrl->trackers)
+ return -ENOMEM;
+
+ ctrl->trackers->head = 0;
+ for (i = 0; i < RSXX_MAX_OUTSTANDING_CMDS; i++) {
+ ctrl->trackers->list[i].next_tag = i + 1;
+ ctrl->trackers->list[i].dma = NULL;
+ }
+ ctrl->trackers->list[RSXX_MAX_OUTSTANDING_CMDS-1].next_tag = -1;
+ spin_lock_init(&ctrl->trackers->lock);
+
+ spin_lock_init(&ctrl->queue_lock);
+ INIT_LIST_HEAD(&ctrl->queue);
+
+ setup_timer(&ctrl->activity_timer, dma_engine_stalled,
+ (unsigned long)ctrl);
+
+ ctrl->issue_wq = alloc_ordered_workqueue(DRIVER_NAME"_issue", 0);
+ if (!ctrl->issue_wq)
+ return -ENOMEM;
+
+ ctrl->done_wq = alloc_ordered_workqueue(DRIVER_NAME"_done", 0);
+ if (!ctrl->done_wq)
+ return -ENOMEM;
+
+ INIT_WORK(&ctrl->issue_dma_work, rsxx_issue_dmas);
+ INIT_WORK(&ctrl->dma_done_work, rsxx_dma_done);
+
+ memset(ctrl->status.buf, 0xac, STATUS_BUFFER_SIZE8);
+ iowrite32(lower_32_bits(ctrl->status.dma_addr),
+ ctrl->regmap + SB_ADD_LO);
+ iowrite32(upper_32_bits(ctrl->status.dma_addr),
+ ctrl->regmap + SB_ADD_HI);
+
+ memset(ctrl->cmd.buf, 0x83, COMMAND_BUFFER_SIZE8);
+ iowrite32(lower_32_bits(ctrl->cmd.dma_addr), ctrl->regmap + CB_ADD_LO);
+ iowrite32(upper_32_bits(ctrl->cmd.dma_addr), ctrl->regmap + CB_ADD_HI);
+
+ ctrl->status.idx = ioread32(ctrl->regmap + HW_STATUS_CNT);
+ if (ctrl->status.idx > RSXX_MAX_OUTSTANDING_CMDS) {
+ dev_crit(&dev->dev, "Failed reading status cnt x%x\n",
+ ctrl->status.idx);
+ return -EINVAL;
+ }
+ iowrite32(ctrl->status.idx, ctrl->regmap + HW_STATUS_CNT);
+ iowrite32(ctrl->status.idx, ctrl->regmap + SW_STATUS_CNT);
+
+ ctrl->cmd.idx = ioread32(ctrl->regmap + HW_CMD_IDX);
+ if (ctrl->cmd.idx > RSXX_MAX_OUTSTANDING_CMDS) {
+ dev_crit(&dev->dev, "Failed reading cmd cnt x%x\n",
+ ctrl->status.idx);
+ return -EINVAL;
+ }
+ iowrite32(ctrl->cmd.idx, ctrl->regmap + HW_CMD_IDX);
+ iowrite32(ctrl->cmd.idx, ctrl->regmap + SW_CMD_IDX);
+
+ wmb();
+
+ return 0;
+}
+
+static int rsxx_dma_stripe_setup(struct rsxx_cardinfo *card,
+ unsigned int stripe_size8)
+{
+ if (!is_power_of_2(stripe_size8)) {
+ dev_err(CARD_TO_DEV(card),
+ "stripe_size is NOT a power of 2!\n");
+ return -EINVAL;
+ }
+
+ card->_stripe.lower_mask = stripe_size8 - 1;
+
+ card->_stripe.upper_mask = ~(card->_stripe.lower_mask);
+ card->_stripe.upper_shift = ffs(card->n_targets) - 1;
+
+ card->_stripe.target_mask = card->n_targets - 1;
+ card->_stripe.target_shift = ffs(stripe_size8) - 1;
+
+ dev_dbg(CARD_TO_DEV(card), "_stripe.lower_mask = x%016llx\n",
+ card->_stripe.lower_mask);
+ dev_dbg(CARD_TO_DEV(card), "_stripe.upper_shift = x%016llx\n",
+ card->_stripe.upper_shift);
+ dev_dbg(CARD_TO_DEV(card), "_stripe.upper_mask = x%016llx\n",
+ card->_stripe.upper_mask);
+ dev_dbg(CARD_TO_DEV(card), "_stripe.target_mask = x%016llx\n",
+ card->_stripe.target_mask);
+ dev_dbg(CARD_TO_DEV(card), "_stripe.target_shift = x%016llx\n",
+ card->_stripe.target_shift);
+
+ return 0;
+}
+
+static int rsxx_dma_configure(struct rsxx_cardinfo *card)
+{
+ u32 intr_coal;
+
+ intr_coal = dma_intr_coal_val(card->config.data.intr_coal.mode,
+ card->config.data.intr_coal.count,
+ card->config.data.intr_coal.latency);
+ iowrite32(intr_coal, card->regmap + INTR_COAL);
+
+ return rsxx_dma_stripe_setup(card, card->config.data.stripe_size);
+}
+
+int rsxx_dma_setup(struct rsxx_cardinfo *card)
+{
+ unsigned long flags;
+ int st;
+ int i;
+
+ dev_info(CARD_TO_DEV(card),
+ "Initializing %d DMA targets\n",
+ card->n_targets);
+
+ /* Regmap is divided up into 4K chunks. One for each DMA channel */
+ for (i = 0; i < card->n_targets; i++)
+ card->ctrl[i].regmap = card->regmap + (i * 4096);
+
+ card->dma_fault = 0;
+
+ /* Reset the DMA queues */
+ rsxx_dma_queue_reset(card);
+
+ /************* Setup DMA Control *************/
+ for (i = 0; i < card->n_targets; i++) {
+ st = rsxx_dma_ctrl_init(card->dev, &card->ctrl[i]);
+ if (st)
+ goto failed_dma_setup;
+
+ card->ctrl[i].card = card;
+ card->ctrl[i].id = i;
+ }
+
+ card->scrub_hard = 1;
+
+ if (card->config_valid)
+ rsxx_dma_configure(card);
+
+ /* Enable the interrupts after all setup has completed. */
+ for (i = 0; i < card->n_targets; i++) {
+ spin_lock_irqsave(&card->irq_lock, flags);
+ rsxx_enable_ier_and_isr(card, CR_INTR_DMA(i));
+ spin_unlock_irqrestore(&card->irq_lock, flags);
+ }
+
+ return 0;
+
+failed_dma_setup:
+ for (i = 0; i < card->n_targets; i++) {
+ struct rsxx_dma_ctrl *ctrl = &card->ctrl[i];
+
+ if (ctrl->issue_wq) {
+ destroy_workqueue(ctrl->issue_wq);
+ ctrl->issue_wq = NULL;
+ }
+
+ if (ctrl->done_wq) {
+ destroy_workqueue(ctrl->done_wq);
+ ctrl->done_wq = NULL;
+ }
+
+ if (ctrl->trackers)
+ vfree(ctrl->trackers);
+
+ if (ctrl->status.buf)
+ pci_free_consistent(card->dev, STATUS_BUFFER_SIZE8,
+ ctrl->status.buf,
+ ctrl->status.dma_addr);
+ if (ctrl->cmd.buf)
+ pci_free_consistent(card->dev, COMMAND_BUFFER_SIZE8,
+ ctrl->cmd.buf, ctrl->cmd.dma_addr);
+ }
+
+ return st;
+}
+
+
+void rsxx_dma_destroy(struct rsxx_cardinfo *card)
+{
+ struct rsxx_dma_ctrl *ctrl;
+ struct rsxx_dma *dma;
+ int i, j;
+ int cnt = 0;
+
+ for (i = 0; i < card->n_targets; i++) {
+ ctrl = &card->ctrl[i];
+
+ if (ctrl->issue_wq) {
+ destroy_workqueue(ctrl->issue_wq);
+ ctrl->issue_wq = NULL;
+ }
+
+ if (ctrl->done_wq) {
+ destroy_workqueue(ctrl->done_wq);
+ ctrl->done_wq = NULL;
+ }
+
+ if (timer_pending(&ctrl->activity_timer))
+ del_timer_sync(&ctrl->activity_timer);
+
+ /* Clean up the DMA queue */
+ spin_lock(&ctrl->queue_lock);
+ cnt = rsxx_cleanup_dma_queue(card, &ctrl->queue);
+ spin_unlock(&ctrl->queue_lock);
+
+ if (cnt)
+ dev_info(CARD_TO_DEV(card),
+ "Freed %d queued DMAs on channel %d\n",
+ cnt, i);
+
+ /* Clean up issued DMAs */
+ for (j = 0; j < RSXX_MAX_OUTSTANDING_CMDS; j++) {
+ dma = get_tracker_dma(ctrl->trackers, j);
+ if (dma) {
+ pci_unmap_page(card->dev, dma->dma_addr,
+ get_dma_size(dma),
+ (dma->cmd == HW_CMD_BLK_WRITE) ?
+ PCI_DMA_TODEVICE :
+ PCI_DMA_FROMDEVICE);
+ kmem_cache_free(rsxx_dma_pool, dma);
+ cnt++;
+ }
+ }
+
+ if (cnt)
+ dev_info(CARD_TO_DEV(card),
+ "Freed %d pending DMAs on channel %d\n",
+ cnt, i);
+
+ vfree(ctrl->trackers);
+
+ pci_free_consistent(card->dev, STATUS_BUFFER_SIZE8,
+ ctrl->status.buf, ctrl->status.dma_addr);
+ pci_free_consistent(card->dev, COMMAND_BUFFER_SIZE8,
+ ctrl->cmd.buf, ctrl->cmd.dma_addr);
+ }
+}
+
+
+int rsxx_dma_init(void)
+{
+ rsxx_dma_pool = KMEM_CACHE(rsxx_dma, SLAB_HWCACHE_ALIGN);
+ if (!rsxx_dma_pool)
+ return -ENOMEM;
+
+ return 0;
+}
+
+
+void rsxx_dma_cleanup(void)
+{
+ kmem_cache_destroy(rsxx_dma_pool);
+}
+
diff --git a/drivers/block/rsxx/rsxx.h b/drivers/block/rsxx/rsxx.h
new file mode 100644
index 000000000000..2e50b65902b7
--- /dev/null
+++ b/drivers/block/rsxx/rsxx.h
@@ -0,0 +1,45 @@
+/*
+* Filename: rsxx.h
+*
+*
+* Authors: Joshua Morris <josh.h.morris@us.ibm.com>
+* Philip Kelleher <pjk1939@linux.vnet.ibm.com>
+*
+* (C) Copyright 2013 IBM Corporation
+*
+* This program is free software; you can redistribute it and/or
+* modify it under the terms of the GNU General Public License as
+* published by the Free Software Foundation; either version 2 of the
+* License, or (at your option) any later version.
+*
+* This program is distributed in the hope that it will be useful, but
+* WITHOUT ANY WARRANTY; without even the implied warranty of
+* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+* General Public License for more details.
+*
+* You should have received a copy of the GNU General Public License
+* along with this program; if not, write to the Free Software Foundation,
+* Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+*/
+
+#ifndef __RSXX_H__
+#define __RSXX_H__
+
+/*----------------- IOCTL Definitions -------------------*/
+
+struct rsxx_reg_access {
+ __u32 addr;
+ __u32 cnt;
+ __u32 stat;
+ __u32 stream;
+ __u32 data[8];
+};
+
+#define RSXX_MAX_REG_CNT (8 * (sizeof(__u32)))
+
+#define RSXX_IOC_MAGIC 'r'
+
+#define RSXX_GETREG _IOWR(RSXX_IOC_MAGIC, 0x20, struct rsxx_reg_access)
+#define RSXX_SETREG _IOWR(RSXX_IOC_MAGIC, 0x21, struct rsxx_reg_access)
+
+#endif /* __RSXX_H_ */
diff --git a/drivers/block/rsxx/rsxx_cfg.h b/drivers/block/rsxx/rsxx_cfg.h
new file mode 100644
index 000000000000..c025fe5fdb70
--- /dev/null
+++ b/drivers/block/rsxx/rsxx_cfg.h
@@ -0,0 +1,72 @@
+/*
+* Filename: rsXX_cfg.h
+*
+*
+* Authors: Joshua Morris <josh.h.morris@us.ibm.com>
+* Philip Kelleher <pjk1939@linux.vnet.ibm.com>
+*
+* (C) Copyright 2013 IBM Corporation
+*
+* This program is free software; you can redistribute it and/or
+* modify it under the terms of the GNU General Public License as
+* published by the Free Software Foundation; either version 2 of the
+* License, or (at your option) any later version.
+*
+* This program is distributed in the hope that it will be useful, but
+* WITHOUT ANY WARRANTY; without even the implied warranty of
+* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+* General Public License for more details.
+*
+* You should have received a copy of the GNU General Public License
+* along with this program; if not, write to the Free Software Foundation,
+* Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+*/
+
+#ifndef __RSXX_CFG_H__
+#define __RSXX_CFG_H__
+
+/* NOTE: Config values will be saved in network byte order (i.e. Big endian) */
+#include <linux/types.h>
+
+/*
+ * The card config version must match the driver's expected version. If it does
+ * not, the DMA interfaces will not be attached and the user will need to
+ * initialize/upgrade the card configuration using the card config utility.
+ */
+#define RSXX_CFG_VERSION 4
+
+struct card_cfg_hdr {
+ __u32 version;
+ __u32 crc;
+};
+
+struct card_cfg_data {
+ __u32 block_size;
+ __u32 stripe_size;
+ __u32 vendor_id;
+ __u32 cache_order;
+ struct {
+ __u32 mode; /* Disabled, manual, auto-tune... */
+ __u32 count; /* Number of intr to coalesce */
+ __u32 latency;/* Max wait time (in ns) */
+ } intr_coal;
+};
+
+struct rsxx_card_cfg {
+ struct card_cfg_hdr hdr;
+ struct card_cfg_data data;
+};
+
+/* Vendor ID Values */
+#define RSXX_VENDOR_ID_TMS_IBM 0
+#define RSXX_VENDOR_ID_DSI 1
+#define RSXX_VENDOR_COUNT 2
+
+/* Interrupt Coalescing Values */
+#define RSXX_INTR_COAL_DISABLED 0
+#define RSXX_INTR_COAL_EXPLICIT 1
+#define RSXX_INTR_COAL_AUTO_TUNE 2
+
+
+#endif /* __RSXX_CFG_H__ */
+
diff --git a/drivers/block/rsxx/rsxx_priv.h b/drivers/block/rsxx/rsxx_priv.h
new file mode 100644
index 000000000000..a1ac907d8f4c
--- /dev/null
+++ b/drivers/block/rsxx/rsxx_priv.h
@@ -0,0 +1,399 @@
+/*
+* Filename: rsxx_priv.h
+*
+*
+* Authors: Joshua Morris <josh.h.morris@us.ibm.com>
+* Philip Kelleher <pjk1939@linux.vnet.ibm.com>
+*
+* (C) Copyright 2013 IBM Corporation
+*
+* This program is free software; you can redistribute it and/or
+* modify it under the terms of the GNU General Public License as
+* published by the Free Software Foundation; either version 2 of the
+* License, or (at your option) any later version.
+*
+* This program is distributed in the hope that it will be useful, but
+* WITHOUT ANY WARRANTY; without even the implied warranty of
+* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+* General Public License for more details.
+*
+* You should have received a copy of the GNU General Public License
+* along with this program; if not, write to the Free Software Foundation,
+* Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+*/
+
+#ifndef __RSXX_PRIV_H__
+#define __RSXX_PRIV_H__
+
+#include <linux/version.h>
+#include <linux/semaphore.h>
+
+#include <linux/fs.h>
+#include <linux/interrupt.h>
+#include <linux/mutex.h>
+#include <linux/pci.h>
+#include <linux/spinlock.h>
+#include <linux/sysfs.h>
+#include <linux/workqueue.h>
+#include <linux/bio.h>
+#include <linux/vmalloc.h>
+#include <linux/timer.h>
+#include <linux/ioctl.h>
+
+#include "rsxx.h"
+#include "rsxx_cfg.h"
+
+struct proc_cmd;
+
+#define PCI_VENDOR_ID_TMS_IBM 0x15B6
+#define PCI_DEVICE_ID_RS70_FLASH 0x0019
+#define PCI_DEVICE_ID_RS70D_FLASH 0x001A
+#define PCI_DEVICE_ID_RS80_FLASH 0x001C
+#define PCI_DEVICE_ID_RS81_FLASH 0x001E
+
+#define RS70_PCI_REV_SUPPORTED 4
+
+#define DRIVER_NAME "rsxx"
+#define DRIVER_VERSION "3.7"
+
+/* Block size is 4096 */
+#define RSXX_HW_BLK_SHIFT 12
+#define RSXX_HW_BLK_SIZE (1 << RSXX_HW_BLK_SHIFT)
+#define RSXX_HW_BLK_MASK (RSXX_HW_BLK_SIZE - 1)
+
+#define MAX_CREG_DATA8 32
+#define LOG_BUF_SIZE8 128
+
+#define RSXX_MAX_OUTSTANDING_CMDS 255
+#define RSXX_CS_IDX_MASK 0xff
+
+#define RSXX_MAX_TARGETS 8
+
+struct dma_tracker_list;
+
+/* DMA Command/Status Buffer structure */
+struct rsxx_cs_buffer {
+ dma_addr_t dma_addr;
+ void *buf;
+ u32 idx;
+};
+
+struct rsxx_dma_stats {
+ u32 crc_errors;
+ u32 hard_errors;
+ u32 soft_errors;
+ u32 writes_issued;
+ u32 writes_failed;
+ u32 reads_issued;
+ u32 reads_failed;
+ u32 reads_retried;
+ u32 discards_issued;
+ u32 discards_failed;
+ u32 done_rescheduled;
+ u32 issue_rescheduled;
+ u32 sw_q_depth; /* Number of DMAs on the SW queue. */
+ atomic_t hw_q_depth; /* Number of DMAs queued to HW. */
+};
+
+struct rsxx_dma_ctrl {
+ struct rsxx_cardinfo *card;
+ int id;
+ void __iomem *regmap;
+ struct rsxx_cs_buffer status;
+ struct rsxx_cs_buffer cmd;
+ u16 e_cnt;
+ spinlock_t queue_lock;
+ struct list_head queue;
+ struct workqueue_struct *issue_wq;
+ struct work_struct issue_dma_work;
+ struct workqueue_struct *done_wq;
+ struct work_struct dma_done_work;
+ struct timer_list activity_timer;
+ struct dma_tracker_list *trackers;
+ struct rsxx_dma_stats stats;
+};
+
+struct rsxx_cardinfo {
+ struct pci_dev *dev;
+ unsigned int halt;
+
+ void __iomem *regmap;
+ spinlock_t irq_lock;
+ unsigned int isr_mask;
+ unsigned int ier_mask;
+
+ struct rsxx_card_cfg config;
+ int config_valid;
+
+ /* Embedded CPU Communication */
+ struct {
+ spinlock_t lock;
+ bool active;
+ struct creg_cmd *active_cmd;
+ struct work_struct done_work;
+ struct list_head queue;
+ unsigned int q_depth;
+ /* Cache the creg status to prevent ioreads */
+ struct {
+ u32 stat;
+ u32 failed_cancel_timer;
+ u32 creg_timeout;
+ } creg_stats;
+ struct timer_list cmd_timer;
+ struct mutex reset_lock;
+ int reset;
+ } creg_ctrl;
+
+ struct {
+ char tmp[MAX_CREG_DATA8];
+ char buf[LOG_BUF_SIZE8]; /* terminated */
+ int buf_len;
+ } log;
+
+ struct work_struct event_work;
+ unsigned int state;
+ u64 size8;
+
+ /* Lock the device attach/detach function */
+ struct mutex dev_lock;
+
+ /* Block Device Variables */
+ bool bdev_attached;
+ int disk_id;
+ int major;
+ struct request_queue *queue;
+ struct gendisk *gendisk;
+ struct {
+ /* Used to convert a byte address to a device address. */
+ u64 lower_mask;
+ u64 upper_shift;
+ u64 upper_mask;
+ u64 target_mask;
+ u64 target_shift;
+ } _stripe;
+ unsigned int dma_fault;
+
+ int scrub_hard;
+
+ int n_targets;
+ struct rsxx_dma_ctrl *ctrl;
+};
+
+enum rsxx_pci_regmap {
+ HWID = 0x00, /* Hardware Identification Register */
+ SCRATCH = 0x04, /* Scratch/Debug Register */
+ RESET = 0x08, /* Reset Register */
+ ISR = 0x10, /* Interrupt Status Register */
+ IER = 0x14, /* Interrupt Enable Register */
+ IPR = 0x18, /* Interrupt Poll Register */
+ CB_ADD_LO = 0x20, /* Command Host Buffer Address [31:0] */
+ CB_ADD_HI = 0x24, /* Command Host Buffer Address [63:32]*/
+ HW_CMD_IDX = 0x28, /* Hardware Processed Command Index */
+ SW_CMD_IDX = 0x2C, /* Software Processed Command Index */
+ SB_ADD_LO = 0x30, /* Status Host Buffer Address [31:0] */
+ SB_ADD_HI = 0x34, /* Status Host Buffer Address [63:32] */
+ HW_STATUS_CNT = 0x38, /* Hardware Status Counter */
+ SW_STATUS_CNT = 0x3C, /* Deprecated */
+ CREG_CMD = 0x40, /* CPU Command Register */
+ CREG_ADD = 0x44, /* CPU Address Register */
+ CREG_CNT = 0x48, /* CPU Count Register */
+ CREG_STAT = 0x4C, /* CPU Status Register */
+ CREG_DATA0 = 0x50, /* CPU Data Registers */
+ CREG_DATA1 = 0x54,
+ CREG_DATA2 = 0x58,
+ CREG_DATA3 = 0x5C,
+ CREG_DATA4 = 0x60,
+ CREG_DATA5 = 0x64,
+ CREG_DATA6 = 0x68,
+ CREG_DATA7 = 0x6c,
+ INTR_COAL = 0x70, /* Interrupt Coalescing Register */
+ HW_ERROR = 0x74, /* Card Error Register */
+ PCI_DEBUG0 = 0x78, /* PCI Debug Registers */
+ PCI_DEBUG1 = 0x7C,
+ PCI_DEBUG2 = 0x80,
+ PCI_DEBUG3 = 0x84,
+ PCI_DEBUG4 = 0x88,
+ PCI_DEBUG5 = 0x8C,
+ PCI_DEBUG6 = 0x90,
+ PCI_DEBUG7 = 0x94,
+ PCI_POWER_THROTTLE = 0x98,
+ PERF_CTRL = 0x9c,
+ PERF_TIMER_LO = 0xa0,
+ PERF_TIMER_HI = 0xa4,
+ PERF_RD512_LO = 0xa8,
+ PERF_RD512_HI = 0xac,
+ PERF_WR512_LO = 0xb0,
+ PERF_WR512_HI = 0xb4,
+};
+
+enum rsxx_intr {
+ CR_INTR_DMA0 = 0x00000001,
+ CR_INTR_CREG = 0x00000002,
+ CR_INTR_DMA1 = 0x00000004,
+ CR_INTR_EVENT = 0x00000008,
+ CR_INTR_DMA2 = 0x00000010,
+ CR_INTR_DMA3 = 0x00000020,
+ CR_INTR_DMA4 = 0x00000040,
+ CR_INTR_DMA5 = 0x00000080,
+ CR_INTR_DMA6 = 0x00000100,
+ CR_INTR_DMA7 = 0x00000200,
+ CR_INTR_DMA_ALL = 0x000003f5,
+ CR_INTR_ALL = 0xffffffff,
+};
+
+static inline int CR_INTR_DMA(int N)
+{
+ static const unsigned int _CR_INTR_DMA[] = {
+ CR_INTR_DMA0, CR_INTR_DMA1, CR_INTR_DMA2, CR_INTR_DMA3,
+ CR_INTR_DMA4, CR_INTR_DMA5, CR_INTR_DMA6, CR_INTR_DMA7
+ };
+ return _CR_INTR_DMA[N];
+}
+enum rsxx_pci_reset {
+ DMA_QUEUE_RESET = 0x00000001,
+};
+
+enum rsxx_pci_revision {
+ RSXX_DISCARD_SUPPORT = 2,
+};
+
+enum rsxx_creg_cmd {
+ CREG_CMD_TAG_MASK = 0x0000FF00,
+ CREG_OP_WRITE = 0x000000C0,
+ CREG_OP_READ = 0x000000E0,
+};
+
+enum rsxx_creg_addr {
+ CREG_ADD_CARD_CMD = 0x80001000,
+ CREG_ADD_CARD_STATE = 0x80001004,
+ CREG_ADD_CARD_SIZE = 0x8000100c,
+ CREG_ADD_CAPABILITIES = 0x80001050,
+ CREG_ADD_LOG = 0x80002000,
+ CREG_ADD_NUM_TARGETS = 0x80003000,
+ CREG_ADD_CONFIG = 0xB0000000,
+};
+
+enum rsxx_creg_card_cmd {
+ CARD_CMD_STARTUP = 1,
+ CARD_CMD_SHUTDOWN = 2,
+ CARD_CMD_LOW_LEVEL_FORMAT = 3,
+ CARD_CMD_FPGA_RECONFIG_BR = 4,
+ CARD_CMD_FPGA_RECONFIG_MAIN = 5,
+ CARD_CMD_BACKUP = 6,
+ CARD_CMD_RESET = 7,
+ CARD_CMD_deprecated = 8,
+ CARD_CMD_UNINITIALIZE = 9,
+ CARD_CMD_DSTROY_EMERGENCY = 10,
+ CARD_CMD_DSTROY_NORMAL = 11,
+ CARD_CMD_DSTROY_EXTENDED = 12,
+ CARD_CMD_DSTROY_ABORT = 13,
+};
+
+enum rsxx_card_state {
+ CARD_STATE_SHUTDOWN = 0x00000001,
+ CARD_STATE_STARTING = 0x00000002,
+ CARD_STATE_FORMATTING = 0x00000004,
+ CARD_STATE_UNINITIALIZED = 0x00000008,
+ CARD_STATE_GOOD = 0x00000010,
+ CARD_STATE_SHUTTING_DOWN = 0x00000020,
+ CARD_STATE_FAULT = 0x00000040,
+ CARD_STATE_RD_ONLY_FAULT = 0x00000080,
+ CARD_STATE_DSTROYING = 0x00000100,
+};
+
+enum rsxx_led {
+ LED_DEFAULT = 0x0,
+ LED_IDENTIFY = 0x1,
+ LED_SOAK = 0x2,
+};
+
+enum rsxx_creg_flash_lock {
+ CREG_FLASH_LOCK = 1,
+ CREG_FLASH_UNLOCK = 2,
+};
+
+enum rsxx_card_capabilities {
+ CARD_CAP_SUBPAGE_WRITES = 0x00000080,
+};
+
+enum rsxx_creg_stat {
+ CREG_STAT_STATUS_MASK = 0x00000003,
+ CREG_STAT_SUCCESS = 0x1,
+ CREG_STAT_ERROR = 0x2,
+ CREG_STAT_CHAR_PENDING = 0x00000004, /* Character I/O pending bit */
+ CREG_STAT_LOG_PENDING = 0x00000008, /* HW log message pending bit */
+ CREG_STAT_TAG_MASK = 0x0000ff00,
+};
+
+static inline unsigned int CREG_DATA(int N)
+{
+ return CREG_DATA0 + (N << 2);
+}
+
+/*----------------- Convenient Log Wrappers -------------------*/
+#define CARD_TO_DEV(__CARD) (&(__CARD)->dev->dev)
+
+/***** config.c *****/
+int rsxx_load_config(struct rsxx_cardinfo *card);
+
+/***** core.c *****/
+void rsxx_enable_ier(struct rsxx_cardinfo *card, unsigned int intr);
+void rsxx_disable_ier(struct rsxx_cardinfo *card, unsigned int intr);
+void rsxx_enable_ier_and_isr(struct rsxx_cardinfo *card,
+ unsigned int intr);
+void rsxx_disable_ier_and_isr(struct rsxx_cardinfo *card,
+ unsigned int intr);
+
+/***** dev.c *****/
+int rsxx_attach_dev(struct rsxx_cardinfo *card);
+void rsxx_detach_dev(struct rsxx_cardinfo *card);
+int rsxx_setup_dev(struct rsxx_cardinfo *card);
+void rsxx_destroy_dev(struct rsxx_cardinfo *card);
+int rsxx_dev_init(void);
+void rsxx_dev_cleanup(void);
+
+/***** dma.c ****/
+typedef void (*rsxx_dma_cb)(struct rsxx_cardinfo *card,
+ void *cb_data,
+ unsigned int status);
+int rsxx_dma_setup(struct rsxx_cardinfo *card);
+void rsxx_dma_destroy(struct rsxx_cardinfo *card);
+int rsxx_dma_init(void);
+void rsxx_dma_cleanup(void);
+int rsxx_dma_queue_bio(struct rsxx_cardinfo *card,
+ struct bio *bio,
+ atomic_t *n_dmas,
+ rsxx_dma_cb cb,
+ void *cb_data);
+
+/***** cregs.c *****/
+int rsxx_creg_write(struct rsxx_cardinfo *card, u32 addr,
+ unsigned int size8,
+ void *data,
+ int byte_stream);
+int rsxx_creg_read(struct rsxx_cardinfo *card,
+ u32 addr,
+ unsigned int size8,
+ void *data,
+ int byte_stream);
+int rsxx_read_hw_log(struct rsxx_cardinfo *card);
+int rsxx_get_card_state(struct rsxx_cardinfo *card,
+ unsigned int *state);
+int rsxx_get_card_size8(struct rsxx_cardinfo *card, u64 *size8);
+int rsxx_get_num_targets(struct rsxx_cardinfo *card,
+ unsigned int *n_targets);
+int rsxx_get_card_capabilities(struct rsxx_cardinfo *card,
+ u32 *capabilities);
+int rsxx_issue_card_cmd(struct rsxx_cardinfo *card, u32 cmd);
+int rsxx_creg_setup(struct rsxx_cardinfo *card);
+void rsxx_creg_destroy(struct rsxx_cardinfo *card);
+int rsxx_creg_init(void);
+void rsxx_creg_cleanup(void);
+
+int rsxx_reg_access(struct rsxx_cardinfo *card,
+ struct rsxx_reg_access __user *ucmd,
+ int read);
+
+
+
+#endif /* __DRIVERS_BLOCK_RSXX_H__ */
diff --git a/drivers/block/swim3.c b/drivers/block/swim3.c
index 57763c54363a..758f2ac878cf 100644
--- a/drivers/block/swim3.c
+++ b/drivers/block/swim3.c
@@ -1090,10 +1090,13 @@ static const struct block_device_operations floppy_fops = {
static void swim3_mb_event(struct macio_dev* mdev, int mb_state)
{
struct floppy_state *fs = macio_get_drvdata(mdev);
- struct swim3 __iomem *sw = fs->swim3;
+ struct swim3 __iomem *sw;
if (!fs)
return;
+
+ sw = fs->swim3;
+
if (mb_state != MB_FD)
return;
diff --git a/drivers/block/xd.c b/drivers/block/xd.c
deleted file mode 100644
index ff540520bada..000000000000
--- a/drivers/block/xd.c
+++ /dev/null
@@ -1,1123 +0,0 @@
-/*
- * This file contains the driver for an XT hard disk controller
- * (at least the DTC 5150X) for Linux.
- *
- * Author: Pat Mackinlay, pat@it.com.au
- * Date: 29/09/92
- *
- * Revised: 01/01/93, ...
- *
- * Ref: DTC 5150X Controller Specification (thanks to Kevin Fowler,
- * kevinf@agora.rain.com)
- * Also thanks to: Salvador Abreu, Dave Thaler, Risto Kankkunen and
- * Wim Van Dorst.
- *
- * Revised: 04/04/94 by Risto Kankkunen
- * Moved the detection code from xd_init() to xd_geninit() as it needed
- * interrupts enabled and Linus didn't want to enable them in that first
- * phase. xd_geninit() is the place to do these kinds of things anyway,
- * he says.
- *
- * Modularized: 04/10/96 by Todd Fries, tfries@umr.edu
- *
- * Revised: 13/12/97 by Andrzej Krzysztofowicz, ankry@mif.pg.gda.pl
- * Fixed some problems with disk initialization and module initiation.
- * Added support for manual geometry setting (except Seagate controllers)
- * in form:
- * xd_geo=<cyl_xda>,<head_xda>,<sec_xda>[,<cyl_xdb>,<head_xdb>,<sec_xdb>]
- * Recovered DMA access. Abridged messages. Added support for DTC5051CX,
- * WD1002-27X & XEBEC controllers. Driver uses now some jumper settings.
- * Extended ioctl() support.
- *
- * Bugfix: 15/02/01, Paul G. - inform queue layer of tiny xd_maxsect.
- *
- */
-
-#include <linux/module.h>
-#include <linux/errno.h>
-#include <linux/interrupt.h>
-#include <linux/mm.h>
-#include <linux/fs.h>
-#include <linux/kernel.h>
-#include <linux/timer.h>
-#include <linux/genhd.h>
-#include <linux/hdreg.h>
-#include <linux/ioport.h>
-#include <linux/init.h>
-#include <linux/wait.h>
-#include <linux/blkdev.h>
-#include <linux/mutex.h>
-#include <linux/blkpg.h>
-#include <linux/delay.h>
-#include <linux/io.h>
-#include <linux/gfp.h>
-
-#include <asm/uaccess.h>
-#include <asm/dma.h>
-
-#include "xd.h"
-
-static DEFINE_MUTEX(xd_mutex);
-static void __init do_xd_setup (int *integers);
-#ifdef MODULE
-static int xd[5] = { -1,-1,-1,-1, };
-#endif
-
-#define XD_DONT_USE_DMA 0 /* Initial value. may be overriden using
- "nodma" module option */
-#define XD_INIT_DISK_DELAY (30) /* 30 ms delay during disk initialization */
-
-/* Above may need to be increased if a problem with the 2nd drive detection
- (ST11M controller) or resetting a controller (WD) appears */
-
-static XD_INFO xd_info[XD_MAXDRIVES];
-
-/* If you try this driver and find that your card is not detected by the driver at bootup, you need to add your BIOS
- signature and details to the following list of signatures. A BIOS signature is a string embedded into the first
- few bytes of your controller's on-board ROM BIOS. To find out what yours is, use something like MS-DOS's DEBUG
- command. Run DEBUG, and then you can examine your BIOS signature with:
-
- d xxxx:0000
-
- where xxxx is the segment of your controller (like C800 or D000 or something). On the ASCII dump at the right, you should
- be able to see a string mentioning the manufacturer's copyright etc. Add this string into the table below. The parameters
- in the table are, in order:
-
- offset ; this is the offset (in bytes) from the start of your ROM where the signature starts
- signature ; this is the actual text of the signature
- xd_?_init_controller ; this is the controller init routine used by your controller
- xd_?_init_drive ; this is the drive init routine used by your controller
-
- The controllers directly supported at the moment are: DTC 5150x, WD 1004A27X, ST11M/R and override. If your controller is
- made by the same manufacturer as one of these, try using the same init routines as they do. If that doesn't work, your
- best bet is to use the "override" routines. These routines use a "portable" method of getting the disk's geometry, and
- may work with your card. If none of these seem to work, try sending me some email and I'll see what I can do <grin>.
-
- NOTE: You can now specify your XT controller's parameters from the command line in the form xd=TYPE,IRQ,IO,DMA. The driver
- should be able to detect your drive's geometry from this info. (eg: xd=0,5,0x320,3 is the "standard"). */
-
-#include <asm/page.h>
-#define xd_dma_mem_alloc(size) __get_dma_pages(GFP_KERNEL,get_order(size))
-#define xd_dma_mem_free(addr, size) free_pages(addr, get_order(size))
-static char *xd_dma_buffer;
-
-static XD_SIGNATURE xd_sigs[] __initdata = {
- { 0x0000,"Override geometry handler",NULL,xd_override_init_drive,"n unknown" }, /* Pat Mackinlay, pat@it.com.au */
- { 0x0008,"[BXD06 (C) DTC 17-MAY-1985]",xd_dtc_init_controller,xd_dtc5150cx_init_drive," DTC 5150CX" }, /* Andrzej Krzysztofowicz, ankry@mif.pg.gda.pl */
- { 0x000B,"CRD18A Not an IBM rom. (C) Copyright Data Technology Corp. 05/31/88",xd_dtc_init_controller,xd_dtc_init_drive," DTC 5150X" }, /* Todd Fries, tfries@umr.edu */
- { 0x000B,"CXD23A Not an IBM ROM (C)Copyright Data Technology Corp 12/03/88",xd_dtc_init_controller,xd_dtc_init_drive," DTC 5150X" }, /* Pat Mackinlay, pat@it.com.au */
- { 0x0008,"07/15/86(C) Copyright 1986 Western Digital Corp.",xd_wd_init_controller,xd_wd_init_drive," Western Dig. 1002-27X" }, /* Andrzej Krzysztofowicz, ankry@mif.pg.gda.pl */
- { 0x0008,"06/24/88(C) Copyright 1988 Western Digital Corp.",xd_wd_init_controller,xd_wd_init_drive," Western Dig. WDXT-GEN2" }, /* Dan Newcombe, newcombe@aa.csc.peachnet.edu */
- { 0x0015,"SEAGATE ST11 BIOS REVISION",xd_seagate_init_controller,xd_seagate_init_drive," Seagate ST11M/R" }, /* Salvador Abreu, spa@fct.unl.pt */
- { 0x0010,"ST11R BIOS",xd_seagate_init_controller,xd_seagate_init_drive," Seagate ST11M/R" }, /* Risto Kankkunen, risto.kankkunen@cs.helsinki.fi */
- { 0x0010,"ST11 BIOS v1.7",xd_seagate_init_controller,xd_seagate_init_drive," Seagate ST11R" }, /* Alan Hourihane, alanh@fairlite.demon.co.uk */
- { 0x1000,"(c)Copyright 1987 SMS",xd_omti_init_controller,xd_omti_init_drive,"n OMTI 5520" }, /* Dirk Melchers, dirk@merlin.nbg.sub.org */
- { 0x0006,"COPYRIGHT XEBEC (C) 1984",xd_xebec_init_controller,xd_xebec_init_drive," XEBEC" }, /* Andrzej Krzysztofowicz, ankry@mif.pg.gda.pl */
- { 0x0008,"(C) Copyright 1984 Western Digital Corp", xd_wd_init_controller, xd_wd_init_drive," Western Dig. 1002s-wx2" },
- { 0x0008,"(C) Copyright 1986 Western Digital Corporation", xd_wd_init_controller, xd_wd_init_drive," 1986 Western Digital" }, /* jfree@sovereign.org */
-};
-
-static unsigned int xd_bases[] __initdata =
-{
- 0xC8000, 0xCA000, 0xCC000,
- 0xCE000, 0xD0000, 0xD2000,
- 0xD4000, 0xD6000, 0xD8000,
- 0xDA000, 0xDC000, 0xDE000,
- 0xE0000
-};
-
-static DEFINE_SPINLOCK(xd_lock);
-
-static struct gendisk *xd_gendisk[2];
-
-static int xd_getgeo(struct block_device *bdev, struct hd_geometry *geo);
-
-static const struct block_device_operations xd_fops = {
- .owner = THIS_MODULE,
- .ioctl = xd_ioctl,
- .getgeo = xd_getgeo,
-};
-static DECLARE_WAIT_QUEUE_HEAD(xd_wait_int);
-static u_char xd_drives, xd_irq = 5, xd_dma = 3, xd_maxsectors;
-static u_char xd_override __initdata = 0, xd_type __initdata = 0;
-static u_short xd_iobase = 0x320;
-static int xd_geo[XD_MAXDRIVES*3] __initdata = { 0, };
-
-static volatile int xdc_busy;
-static struct timer_list xd_watchdog_int;
-
-static volatile u_char xd_error;
-static bool nodma = XD_DONT_USE_DMA;
-
-static struct request_queue *xd_queue;
-
-/* xd_init: register the block device number and set up pointer tables */
-static int __init xd_init(void)
-{
- u_char i,controller;
- unsigned int address;
- int err;
-
-#ifdef MODULE
- {
- u_char count = 0;
- for (i = 4; i > 0; i--)
- if (((xd[i] = xd[i-1]) >= 0) && !count)
- count = i;
- if ((xd[0] = count))
- do_xd_setup(xd);
- }
-#endif
-
- init_timer (&xd_watchdog_int); xd_watchdog_int.function = xd_watchdog;
-
- err = -EBUSY;
- if (register_blkdev(XT_DISK_MAJOR, "xd"))
- goto out1;
-
- err = -ENOMEM;
- xd_queue = blk_init_queue(do_xd_request, &xd_lock);
- if (!xd_queue)
- goto out1a;
-
- if (xd_detect(&controller,&address)) {
-
- printk("Detected a%s controller (type %d) at address %06x\n",
- xd_sigs[controller].name,controller,address);
- if (!request_region(xd_iobase,4,"xd")) {
- printk("xd: Ports at 0x%x are not available\n",
- xd_iobase);
- goto out2;
- }
- if (controller)
- xd_sigs[controller].init_controller(address);
- xd_drives = xd_initdrives(xd_sigs[controller].init_drive);
-
- printk("Detected %d hard drive%s (using IRQ%d & DMA%d)\n",
- xd_drives,xd_drives == 1 ? "" : "s",xd_irq,xd_dma);
- }
-
- /*
- * With the drive detected, xd_maxsectors should now be known.
- * If xd_maxsectors is 0, nothing was detected and we fall through
- * to return -ENODEV
- */
- if (!xd_dma_buffer && xd_maxsectors) {
- xd_dma_buffer = (char *)xd_dma_mem_alloc(xd_maxsectors * 0x200);
- if (!xd_dma_buffer) {
- printk(KERN_ERR "xd: Out of memory.\n");
- goto out3;
- }
- }
-
- err = -ENODEV;
- if (!xd_drives)
- goto out3;
-
- for (i = 0; i < xd_drives; i++) {
- XD_INFO *p = &xd_info[i];
- struct gendisk *disk = alloc_disk(64);
- if (!disk)
- goto Enomem;
- p->unit = i;
- disk->major = XT_DISK_MAJOR;
- disk->first_minor = i<<6;
- sprintf(disk->disk_name, "xd%c", i+'a');
- disk->fops = &xd_fops;
- disk->private_data = p;
- disk->queue = xd_queue;
- set_capacity(disk, p->heads * p->cylinders * p->sectors);
- printk(" %s: CHS=%d/%d/%d\n", disk->disk_name,
- p->cylinders, p->heads, p->sectors);
- xd_gendisk[i] = disk;
- }
-
- err = -EBUSY;
- if (request_irq(xd_irq,xd_interrupt_handler, 0, "XT hard disk", NULL)) {
- printk("xd: unable to get IRQ%d\n",xd_irq);
- goto out4;
- }
-
- if (request_dma(xd_dma,"xd")) {
- printk("xd: unable to get DMA%d\n",xd_dma);
- goto out5;
- }
-
- /* xd_maxsectors depends on controller - so set after detection */
- blk_queue_max_hw_sectors(xd_queue, xd_maxsectors);
-
- for (i = 0; i < xd_drives; i++)
- add_disk(xd_gendisk[i]);
-
- return 0;
-
-out5:
- free_irq(xd_irq, NULL);
-out4:
- for (i = 0; i < xd_drives; i++)
- put_disk(xd_gendisk[i]);
-out3:
- if (xd_maxsectors)
- release_region(xd_iobase,4);
-
- if (xd_dma_buffer)
- xd_dma_mem_free((unsigned long)xd_dma_buffer,
- xd_maxsectors * 0x200);
-out2:
- blk_cleanup_queue(xd_queue);
-out1a:
- unregister_blkdev(XT_DISK_MAJOR, "xd");
-out1:
- return err;
-Enomem:
- err = -ENOMEM;
- while (i--)
- put_disk(xd_gendisk[i]);
- goto out3;
-}
-
-/* xd_detect: scan the possible BIOS ROM locations for the signature strings */
-static u_char __init xd_detect (u_char *controller, unsigned int *address)
-{
- int i, j;
-
- if (xd_override)
- {
- *controller = xd_type;
- *address = 0;
- return(1);
- }
-
- for (i = 0; i < ARRAY_SIZE(xd_bases); i++) {
- void __iomem *p = ioremap(xd_bases[i], 0x2000);
- if (!p)
- continue;
- for (j = 1; j < ARRAY_SIZE(xd_sigs); j++) {
- const char *s = xd_sigs[j].string;
- if (check_signature(p + xd_sigs[j].offset, s, strlen(s))) {
- *controller = j;
- xd_type = j;
- *address = xd_bases[i];
- iounmap(p);
- return 1;
- }
- }
- iounmap(p);
- }
- return 0;
-}
-
-/* do_xd_request: handle an incoming request */
-static void do_xd_request (struct request_queue * q)
-{
- struct request *req;
-
- if (xdc_busy)
- return;
-
- req = blk_fetch_request(q);
- while (req) {
- unsigned block = blk_rq_pos(req);
- unsigned count = blk_rq_cur_sectors(req);
- XD_INFO *disk = req->rq_disk->private_data;
- int res = -EIO;
- int retry;
-
- if (req->cmd_type != REQ_TYPE_FS)
- goto done;
- if (block + count > get_capacity(req->rq_disk))
- goto done;
- for (retry = 0; (retry < XD_RETRIES) && !res; retry++)
- res = xd_readwrite(rq_data_dir(req), disk, req->buffer,
- block, count);
- done:
- /* wrap up, 0 = success, -errno = fail */
- if (!__blk_end_request_cur(req, res))
- req = blk_fetch_request(q);
- }
-}
-
-static int xd_getgeo(struct block_device *bdev, struct hd_geometry *geo)
-{
- XD_INFO *p = bdev->bd_disk->private_data;
-
- geo->heads = p->heads;
- geo->sectors = p->sectors;
- geo->cylinders = p->cylinders;
- return 0;
-}
-
-/* xd_ioctl: handle device ioctl's */
-static int xd_locked_ioctl(struct block_device *bdev, fmode_t mode, u_int cmd, u_long arg)
-{
- switch (cmd) {
- case HDIO_SET_DMA:
- if (!capable(CAP_SYS_ADMIN)) return -EACCES;
- if (xdc_busy) return -EBUSY;
- nodma = !arg;
- if (nodma && xd_dma_buffer) {
- xd_dma_mem_free((unsigned long)xd_dma_buffer,
- xd_maxsectors * 0x200);
- xd_dma_buffer = NULL;
- } else if (!nodma && !xd_dma_buffer) {
- xd_dma_buffer = (char *)xd_dma_mem_alloc(xd_maxsectors * 0x200);
- if (!xd_dma_buffer) {
- nodma = XD_DONT_USE_DMA;
- return -ENOMEM;
- }
- }
- return 0;
- case HDIO_GET_DMA:
- return put_user(!nodma, (long __user *) arg);
- case HDIO_GET_MULTCOUNT:
- return put_user(xd_maxsectors, (long __user *) arg);
- default:
- return -EINVAL;
- }
-}
-
-static int xd_ioctl(struct block_device *bdev, fmode_t mode,
- unsigned int cmd, unsigned long param)
-{
- int ret;
-
- mutex_lock(&xd_mutex);
- ret = xd_locked_ioctl(bdev, mode, cmd, param);
- mutex_unlock(&xd_mutex);
-
- return ret;
-}
-
-/* xd_readwrite: handle a read/write request */
-static int xd_readwrite (u_char operation,XD_INFO *p,char *buffer,u_int block,u_int count)
-{
- int drive = p->unit;
- u_char cmdblk[6],sense[4];
- u_short track,cylinder;
- u_char head,sector,control,mode = PIO_MODE,temp;
- char **real_buffer;
- register int i;
-
-#ifdef DEBUG_READWRITE
- printk("xd_readwrite: operation = %s, drive = %d, buffer = 0x%X, block = %d, count = %d\n",operation == READ ? "read" : "write",drive,buffer,block,count);
-#endif /* DEBUG_READWRITE */
-
- spin_unlock_irq(&xd_lock);
-
- control = p->control;
- if (!xd_dma_buffer)
- xd_dma_buffer = (char *)xd_dma_mem_alloc(xd_maxsectors * 0x200);
- while (count) {
- temp = count < xd_maxsectors ? count : xd_maxsectors;
-
- track = block / p->sectors;
- head = track % p->heads;
- cylinder = track / p->heads;
- sector = block % p->sectors;
-
-#ifdef DEBUG_READWRITE
- printk("xd_readwrite: drive = %d, head = %d, cylinder = %d, sector = %d, count = %d\n",drive,head,cylinder,sector,temp);
-#endif /* DEBUG_READWRITE */
-
- if (xd_dma_buffer) {
- mode = xd_setup_dma(operation == READ ? DMA_MODE_READ : DMA_MODE_WRITE,(u_char *)(xd_dma_buffer),temp * 0x200);
- real_buffer = &xd_dma_buffer;
- for (i=0; i < (temp * 0x200); i++)
- xd_dma_buffer[i] = buffer[i];
- }
- else
- real_buffer = &buffer;
-
- xd_build(cmdblk,operation == READ ? CMD_READ : CMD_WRITE,drive,head,cylinder,sector,temp & 0xFF,control);
-
- switch (xd_command(cmdblk,mode,(u_char *)(*real_buffer),(u_char *)(*real_buffer),sense,XD_TIMEOUT)) {
- case 1:
- printk("xd%c: %s timeout, recalibrating drive\n",'a'+drive,(operation == READ ? "read" : "write"));
- xd_recalibrate(drive);
- spin_lock_irq(&xd_lock);
- return -EIO;
- case 2:
- if (sense[0] & 0x30) {
- printk("xd%c: %s - ",'a'+drive,(operation == READ ? "reading" : "writing"));
- switch ((sense[0] & 0x30) >> 4) {
- case 0: printk("drive error, code = 0x%X",sense[0] & 0x0F);
- break;
- case 1: printk("controller error, code = 0x%X",sense[0] & 0x0F);
- break;
- case 2: printk("command error, code = 0x%X",sense[0] & 0x0F);
- break;
- case 3: printk("miscellaneous error, code = 0x%X",sense[0] & 0x0F);
- break;
- }
- }
- if (sense[0] & 0x80)
- printk(" - CHS = %d/%d/%d\n",((sense[2] & 0xC0) << 2) | sense[3],sense[1] & 0x1F,sense[2] & 0x3F);
- /* reported drive number = (sense[1] & 0xE0) >> 5 */
- else
- printk(" - no valid disk address\n");
- spin_lock_irq(&xd_lock);
- return -EIO;
- }
- if (xd_dma_buffer)
- for (i=0; i < (temp * 0x200); i++)
- buffer[i] = xd_dma_buffer[i];
-
- count -= temp, buffer += temp * 0x200, block += temp;
- }
- spin_lock_irq(&xd_lock);
- return 0;
-}
-
-/* xd_recalibrate: recalibrate a given drive and reset controller if necessary */
-static void xd_recalibrate (u_char drive)
-{
- u_char cmdblk[6];
-
- xd_build(cmdblk,CMD_RECALIBRATE,drive,0,0,0,0,0);
- if (xd_command(cmdblk,PIO_MODE,NULL,NULL,NULL,XD_TIMEOUT * 8))
- printk("xd%c: warning! error recalibrating, controller may be unstable\n", 'a'+drive);
-}
-
-/* xd_interrupt_handler: interrupt service routine */
-static irqreturn_t xd_interrupt_handler(int irq, void *dev_id)
-{
- if (inb(XD_STATUS) & STAT_INTERRUPT) { /* check if it was our device */
-#ifdef DEBUG_OTHER
- printk("xd_interrupt_handler: interrupt detected\n");
-#endif /* DEBUG_OTHER */
- outb(0,XD_CONTROL); /* acknowledge interrupt */
- wake_up(&xd_wait_int); /* and wake up sleeping processes */
- return IRQ_HANDLED;
- }
- else
- printk("xd: unexpected interrupt\n");
- return IRQ_NONE;
-}
-
-/* xd_setup_dma: set up the DMA controller for a data transfer */
-static u_char xd_setup_dma (u_char mode,u_char *buffer,u_int count)
-{
- unsigned long f;
-
- if (nodma)
- return (PIO_MODE);
- if (((unsigned long) buffer & 0xFFFF0000) != (((unsigned long) buffer + count) & 0xFFFF0000)) {
-#ifdef DEBUG_OTHER
- printk("xd_setup_dma: using PIO, transfer overlaps 64k boundary\n");
-#endif /* DEBUG_OTHER */
- return (PIO_MODE);
- }
-
- f=claim_dma_lock();
- disable_dma(xd_dma);
- clear_dma_ff(xd_dma);
- set_dma_mode(xd_dma,mode);
- set_dma_addr(xd_dma, (unsigned long) buffer);
- set_dma_count(xd_dma,count);
-
- release_dma_lock(f);
-
- return (DMA_MODE); /* use DMA and INT */
-}
-
-/* xd_build: put stuff into an array in a format suitable for the controller */
-static u_char *xd_build (u_char *cmdblk,u_char command,u_char drive,u_char head,u_short cylinder,u_char sector,u_char count,u_char control)
-{
- cmdblk[0] = command;
- cmdblk[1] = ((drive & 0x07) << 5) | (head & 0x1F);
- cmdblk[2] = ((cylinder & 0x300) >> 2) | (sector & 0x3F);
- cmdblk[3] = cylinder & 0xFF;
- cmdblk[4] = count;
- cmdblk[5] = control;
-
- return (cmdblk);
-}
-
-static void xd_watchdog (unsigned long unused)
-{
- xd_error = 1;
- wake_up(&xd_wait_int);
-}
-
-/* xd_waitport: waits until port & mask == flags or a timeout occurs. return 1 for a timeout */
-static inline u_char xd_waitport (u_short port,u_char flags,u_char mask,u_long timeout)
-{
- u_long expiry = jiffies + timeout;
- int success;
-
- xdc_busy = 1;
- while ((success = ((inb(port) & mask) != flags)) && time_before(jiffies, expiry))
- schedule_timeout_uninterruptible(1);
- xdc_busy = 0;
- return (success);
-}
-
-static inline u_int xd_wait_for_IRQ (void)
-{
- unsigned long flags;
- xd_watchdog_int.expires = jiffies + 8 * HZ;
- add_timer(&xd_watchdog_int);
-
- flags=claim_dma_lock();
- enable_dma(xd_dma);
- release_dma_lock(flags);
-
- sleep_on(&xd_wait_int);
- del_timer(&xd_watchdog_int);
- xdc_busy = 0;
-
- flags=claim_dma_lock();
- disable_dma(xd_dma);
- release_dma_lock(flags);
-
- if (xd_error) {
- printk("xd: missed IRQ - command aborted\n");
- xd_error = 0;
- return (1);
- }
- return (0);
-}
-
-/* xd_command: handle all data transfers necessary for a single command */
-static u_int xd_command (u_char *command,u_char mode,u_char *indata,u_char *outdata,u_char *sense,u_long timeout)
-{
- u_char cmdblk[6],csb,complete = 0;
-
-#ifdef DEBUG_COMMAND
- printk("xd_command: command = 0x%X, mode = 0x%X, indata = 0x%X, outdata = 0x%X, sense = 0x%X\n",command,mode,indata,outdata,sense);
-#endif /* DEBUG_COMMAND */
-
- outb(0,XD_SELECT);
- outb(mode,XD_CONTROL);
-
- if (xd_waitport(XD_STATUS,STAT_SELECT,STAT_SELECT,timeout))
- return (1);
-
- while (!complete) {
- if (xd_waitport(XD_STATUS,STAT_READY,STAT_READY,timeout))
- return (1);
-
- switch (inb(XD_STATUS) & (STAT_COMMAND | STAT_INPUT)) {
- case 0:
- if (mode == DMA_MODE) {
- if (xd_wait_for_IRQ())
- return (1);
- } else
- outb(outdata ? *outdata++ : 0,XD_DATA);
- break;
- case STAT_INPUT:
- if (mode == DMA_MODE) {
- if (xd_wait_for_IRQ())
- return (1);
- } else
- if (indata)
- *indata++ = inb(XD_DATA);
- else
- inb(XD_DATA);
- break;
- case STAT_COMMAND:
- outb(command ? *command++ : 0,XD_DATA);
- break;
- case STAT_COMMAND | STAT_INPUT:
- complete = 1;
- break;
- }
- }
- csb = inb(XD_DATA);
-
- if (xd_waitport(XD_STATUS,0,STAT_SELECT,timeout)) /* wait until deselected */
- return (1);
-
- if (csb & CSB_ERROR) { /* read sense data if error */
- xd_build(cmdblk,CMD_SENSE,(csb & CSB_LUN) >> 5,0,0,0,0,0);
- if (xd_command(cmdblk,0,sense,NULL,NULL,XD_TIMEOUT))
- printk("xd: warning! sense command failed!\n");
- }
-
-#ifdef DEBUG_COMMAND
- printk("xd_command: completed with csb = 0x%X\n",csb);
-#endif /* DEBUG_COMMAND */
-
- return (csb & CSB_ERROR);
-}
-
-static u_char __init xd_initdrives (void (*init_drive)(u_char drive))
-{
- u_char cmdblk[6],i,count = 0;
-
- for (i = 0; i < XD_MAXDRIVES; i++) {
- xd_build(cmdblk,CMD_TESTREADY,i,0,0,0,0,0);
- if (!xd_command(cmdblk,PIO_MODE,NULL,NULL,NULL,XD_TIMEOUT*8)) {
- msleep_interruptible(XD_INIT_DISK_DELAY);
-
- init_drive(count);
- count++;
-
- msleep_interruptible(XD_INIT_DISK_DELAY);
- }
- }
- return (count);
-}
-
-static void __init xd_manual_geo_set (u_char drive)
-{
- xd_info[drive].heads = (u_char)(xd_geo[3 * drive + 1]);
- xd_info[drive].cylinders = (u_short)(xd_geo[3 * drive]);
- xd_info[drive].sectors = (u_char)(xd_geo[3 * drive + 2]);
-}
-
-static void __init xd_dtc_init_controller (unsigned int address)
-{
- switch (address) {
- case 0x00000:
- case 0xC8000: break; /*initial: 0x320 */
- case 0xCA000: xd_iobase = 0x324;
- case 0xD0000: /*5150CX*/
- case 0xD8000: break; /*5150CX & 5150XL*/
- default: printk("xd_dtc_init_controller: unsupported BIOS address %06x\n",address);
- break;
- }
- xd_maxsectors = 0x01; /* my card seems to have trouble doing multi-block transfers? */
-
- outb(0,XD_RESET); /* reset the controller */
-}
-
-
-static void __init xd_dtc5150cx_init_drive (u_char drive)
-{
- /* values from controller's BIOS - BIOS chip may be removed */
- static u_short geometry_table[][4] = {
- {0x200,8,0x200,0x100},
- {0x267,2,0x267,0x267},
- {0x264,4,0x264,0x80},
- {0x132,4,0x132,0x0},
- {0x132,2,0x80, 0x132},
- {0x177,8,0x177,0x0},
- {0x132,8,0x84, 0x0},
- {}, /* not used */
- {0x132,6,0x80, 0x100},
- {0x200,6,0x100,0x100},
- {0x264,2,0x264,0x80},
- {0x280,4,0x280,0x100},
- {0x2B9,3,0x2B9,0x2B9},
- {0x2B9,5,0x2B9,0x2B9},
- {0x280,6,0x280,0x100},
- {0x132,4,0x132,0x0}};
- u_char n;
-
- n = inb(XD_JUMPER);
- n = (drive ? n : (n >> 2)) & 0x33;
- n = (n | (n >> 2)) & 0x0F;
- if (xd_geo[3*drive])
- xd_manual_geo_set(drive);
- else
- if (n != 7) {
- xd_info[drive].heads = (u_char)(geometry_table[n][1]); /* heads */
- xd_info[drive].cylinders = geometry_table[n][0]; /* cylinders */
- xd_info[drive].sectors = 17; /* sectors */
-#if 0
- xd_info[drive].rwrite = geometry_table[n][2]; /* reduced write */
- xd_info[drive].precomp = geometry_table[n][3] /* write precomp */
- xd_info[drive].ecc = 0x0B; /* ecc length */
-#endif /* 0 */
- }
- else {
- printk("xd%c: undetermined drive geometry\n",'a'+drive);
- return;
- }
- xd_info[drive].control = 5; /* control byte */
- xd_setparam(CMD_DTCSETPARAM,drive,xd_info[drive].heads,xd_info[drive].cylinders,geometry_table[n][2],geometry_table[n][3],0x0B);
- xd_recalibrate(drive);
-}
-
-static void __init xd_dtc_init_drive (u_char drive)
-{
- u_char cmdblk[6],buf[64];
-
- xd_build(cmdblk,CMD_DTCGETGEOM,drive,0,0,0,0,0);
- if (!xd_command(cmdblk,PIO_MODE,buf,NULL,NULL,XD_TIMEOUT * 2)) {
- xd_info[drive].heads = buf[0x0A]; /* heads */
- xd_info[drive].cylinders = ((u_short *) (buf))[0x04]; /* cylinders */
- xd_info[drive].sectors = 17; /* sectors */
- if (xd_geo[3*drive])
- xd_manual_geo_set(drive);
-#if 0
- xd_info[drive].rwrite = ((u_short *) (buf + 1))[0x05]; /* reduced write */
- xd_info[drive].precomp = ((u_short *) (buf + 1))[0x06]; /* write precomp */
- xd_info[drive].ecc = buf[0x0F]; /* ecc length */
-#endif /* 0 */
- xd_info[drive].control = 0; /* control byte */
-
- xd_setparam(CMD_DTCSETPARAM,drive,xd_info[drive].heads,xd_info[drive].cylinders,((u_short *) (buf + 1))[0x05],((u_short *) (buf + 1))[0x06],buf[0x0F]);
- xd_build(cmdblk,CMD_DTCSETSTEP,drive,0,0,0,0,7);
- if (xd_command(cmdblk,PIO_MODE,NULL,NULL,NULL,XD_TIMEOUT * 2))
- printk("xd_dtc_init_drive: error setting step rate for xd%c\n", 'a'+drive);
- }
- else
- printk("xd_dtc_init_drive: error reading geometry for xd%c\n", 'a'+drive);
-}
-
-static void __init xd_wd_init_controller (unsigned int address)
-{
- switch (address) {
- case 0x00000:
- case 0xC8000: break; /*initial: 0x320 */
- case 0xCA000: xd_iobase = 0x324; break;
- case 0xCC000: xd_iobase = 0x328; break;
- case 0xCE000: xd_iobase = 0x32C; break;
- case 0xD0000: xd_iobase = 0x328; break; /* ? */
- case 0xD8000: xd_iobase = 0x32C; break; /* ? */
- default: printk("xd_wd_init_controller: unsupported BIOS address %06x\n",address);
- break;
- }
- xd_maxsectors = 0x01; /* this one doesn't wrap properly either... */
-
- outb(0,XD_RESET); /* reset the controller */
-
- msleep(XD_INIT_DISK_DELAY);
-}
-
-static void __init xd_wd_init_drive (u_char drive)
-{
- /* values from controller's BIOS - BIOS may be disabled */
- static u_short geometry_table[][4] = {
- {0x264,4,0x1C2,0x1C2}, /* common part */
- {0x132,4,0x099,0x0},
- {0x267,2,0x1C2,0x1C2},
- {0x267,4,0x1C2,0x1C2},
-
- {0x334,6,0x335,0x335}, /* 1004 series RLL */
- {0x30E,4,0x30F,0x3DC},
- {0x30E,2,0x30F,0x30F},
- {0x267,4,0x268,0x268},
-
- {0x3D5,5,0x3D6,0x3D6}, /* 1002 series RLL */
- {0x3DB,7,0x3DC,0x3DC},
- {0x264,4,0x265,0x265},
- {0x267,4,0x268,0x268}};
-
- u_char cmdblk[6],buf[0x200];
- u_char n = 0,rll,jumper_state,use_jumper_geo;
- u_char wd_1002 = (xd_sigs[xd_type].string[7] == '6');
-
- jumper_state = ~(inb(0x322));
- if (jumper_state & 0x40)
- xd_irq = 9;
- rll = (jumper_state & 0x30) ? (0x04 << wd_1002) : 0;
- xd_build(cmdblk,CMD_READ,drive,0,0,0,1,0);
- if (!xd_command(cmdblk,PIO_MODE,buf,NULL,NULL,XD_TIMEOUT * 2)) {
- xd_info[drive].heads = buf[0x1AF]; /* heads */
- xd_info[drive].cylinders = ((u_short *) (buf + 1))[0xD6]; /* cylinders */
- xd_info[drive].sectors = 17; /* sectors */
- if (xd_geo[3*drive])
- xd_manual_geo_set(drive);
-#if 0
- xd_info[drive].rwrite = ((u_short *) (buf))[0xD8]; /* reduced write */
- xd_info[drive].wprecomp = ((u_short *) (buf))[0xDA]; /* write precomp */
- xd_info[drive].ecc = buf[0x1B4]; /* ecc length */
-#endif /* 0 */
- xd_info[drive].control = buf[0x1B5]; /* control byte */
- use_jumper_geo = !(xd_info[drive].heads) || !(xd_info[drive].cylinders);
- if (xd_geo[3*drive]) {
- xd_manual_geo_set(drive);
- xd_info[drive].control = rll ? 7 : 5;
- }
- else if (use_jumper_geo) {
- n = (((jumper_state & 0x0F) >> (drive << 1)) & 0x03) | rll;
- xd_info[drive].cylinders = geometry_table[n][0];
- xd_info[drive].heads = (u_char)(geometry_table[n][1]);
- xd_info[drive].control = rll ? 7 : 5;
-#if 0
- xd_info[drive].rwrite = geometry_table[n][2];
- xd_info[drive].wprecomp = geometry_table[n][3];
- xd_info[drive].ecc = 0x0B;
-#endif /* 0 */
- }
- if (!wd_1002) {
- if (use_jumper_geo)
- xd_setparam(CMD_WDSETPARAM,drive,xd_info[drive].heads,xd_info[drive].cylinders,
- geometry_table[n][2],geometry_table[n][3],0x0B);
- else
- xd_setparam(CMD_WDSETPARAM,drive,xd_info[drive].heads,xd_info[drive].cylinders,
- ((u_short *) (buf))[0xD8],((u_short *) (buf))[0xDA],buf[0x1B4]);
- }
- /* 1002 based RLL controller requests converted addressing, but reports physical
- (physical 26 sec., logical 17 sec.)
- 1004 based ???? */
- if (rll & wd_1002) {
- if ((xd_info[drive].cylinders *= 26,
- xd_info[drive].cylinders /= 17) > 1023)
- xd_info[drive].cylinders = 1023; /* 1024 ? */
-#if 0
- xd_info[drive].rwrite *= 26;
- xd_info[drive].rwrite /= 17;
- xd_info[drive].wprecomp *= 26
- xd_info[drive].wprecomp /= 17;
-#endif /* 0 */
- }
- }
- else
- printk("xd_wd_init_drive: error reading geometry for xd%c\n",'a'+drive);
-
-}
-
-static void __init xd_seagate_init_controller (unsigned int address)
-{
- switch (address) {
- case 0x00000:
- case 0xC8000: break; /*initial: 0x320 */
- case 0xD0000: xd_iobase = 0x324; break;
- case 0xD8000: xd_iobase = 0x328; break;
- case 0xE0000: xd_iobase = 0x32C; break;
- default: printk("xd_seagate_init_controller: unsupported BIOS address %06x\n",address);
- break;
- }
- xd_maxsectors = 0x40;
-
- outb(0,XD_RESET); /* reset the controller */
-}
-
-static void __init xd_seagate_init_drive (u_char drive)
-{
- u_char cmdblk[6],buf[0x200];
-
- xd_build(cmdblk,CMD_ST11GETGEOM,drive,0,0,0,1,0);
- if (!xd_command(cmdblk,PIO_MODE,buf,NULL,NULL,XD_TIMEOUT * 2)) {
- xd_info[drive].heads = buf[0x04]; /* heads */
- xd_info[drive].cylinders = (buf[0x02] << 8) | buf[0x03]; /* cylinders */
- xd_info[drive].sectors = buf[0x05]; /* sectors */
- xd_info[drive].control = 0; /* control byte */
- }
- else
- printk("xd_seagate_init_drive: error reading geometry from xd%c\n", 'a'+drive);
-}
-
-/* Omti support courtesy Dirk Melchers */
-static void __init xd_omti_init_controller (unsigned int address)
-{
- switch (address) {
- case 0x00000:
- case 0xC8000: break; /*initial: 0x320 */
- case 0xD0000: xd_iobase = 0x324; break;
- case 0xD8000: xd_iobase = 0x328; break;
- case 0xE0000: xd_iobase = 0x32C; break;
- default: printk("xd_omti_init_controller: unsupported BIOS address %06x\n",address);
- break;
- }
-
- xd_maxsectors = 0x40;
-
- outb(0,XD_RESET); /* reset the controller */
-}
-
-static void __init xd_omti_init_drive (u_char drive)
-{
- /* gets infos from drive */
- xd_override_init_drive(drive);
-
- /* set other parameters, Hardcoded, not that nice :-) */
- xd_info[drive].control = 2;
-}
-
-/* Xebec support (AK) */
-static void __init xd_xebec_init_controller (unsigned int address)
-{
-/* iobase may be set manually in range 0x300 - 0x33C
- irq may be set manually to 2(9),3,4,5,6,7
- dma may be set manually to 1,2,3
- (How to detect them ???)
-BIOS address may be set manually in range 0x0 - 0xF8000
-If you need non-standard settings use the xd=... command */
-
- switch (address) {
- case 0x00000:
- case 0xC8000: /* initially: xd_iobase==0x320 */
- case 0xD0000:
- case 0xD2000:
- case 0xD4000:
- case 0xD6000:
- case 0xD8000:
- case 0xDA000:
- case 0xDC000:
- case 0xDE000:
- case 0xE0000: break;
- default: printk("xd_xebec_init_controller: unsupported BIOS address %06x\n",address);
- break;
- }
-
- xd_maxsectors = 0x01;
- outb(0,XD_RESET); /* reset the controller */
-
- msleep(XD_INIT_DISK_DELAY);
-}
-
-static void __init xd_xebec_init_drive (u_char drive)
-{
- /* values from controller's BIOS - BIOS chip may be removed */
- static u_short geometry_table[][5] = {
- {0x132,4,0x080,0x080,0x7},
- {0x132,4,0x080,0x080,0x17},
- {0x264,2,0x100,0x100,0x7},
- {0x264,2,0x100,0x100,0x17},
- {0x132,8,0x080,0x080,0x7},
- {0x132,8,0x080,0x080,0x17},
- {0x264,4,0x100,0x100,0x6},
- {0x264,4,0x100,0x100,0x17},
- {0x2BC,5,0x2BC,0x12C,0x6},
- {0x3A5,4,0x3A5,0x3A5,0x7},
- {0x26C,6,0x26C,0x26C,0x7},
- {0x200,8,0x200,0x100,0x17},
- {0x400,5,0x400,0x400,0x7},
- {0x400,6,0x400,0x400,0x7},
- {0x264,8,0x264,0x200,0x17},
- {0x33E,7,0x33E,0x200,0x7}};
- u_char n;
-
- n = inb(XD_JUMPER) & 0x0F; /* BIOS's drive number: same geometry
- is assumed for BOTH drives */
- if (xd_geo[3*drive])
- xd_manual_geo_set(drive);
- else {
- xd_info[drive].heads = (u_char)(geometry_table[n][1]); /* heads */
- xd_info[drive].cylinders = geometry_table[n][0]; /* cylinders */
- xd_info[drive].sectors = 17; /* sectors */
-#if 0
- xd_info[drive].rwrite = geometry_table[n][2]; /* reduced write */
- xd_info[drive].precomp = geometry_table[n][3] /* write precomp */
- xd_info[drive].ecc = 0x0B; /* ecc length */
-#endif /* 0 */
- }
- xd_info[drive].control = geometry_table[n][4]; /* control byte */
- xd_setparam(CMD_XBSETPARAM,drive,xd_info[drive].heads,xd_info[drive].cylinders,geometry_table[n][2],geometry_table[n][3],0x0B);
- xd_recalibrate(drive);
-}
-
-/* xd_override_init_drive: this finds disk geometry in a "binary search" style, narrowing in on the "correct" number of heads
- etc. by trying values until it gets the highest successful value. Idea courtesy Salvador Abreu (spa@fct.unl.pt). */
-static void __init xd_override_init_drive (u_char drive)
-{
- u_short min[] = { 0,0,0 },max[] = { 16,1024,64 },test[] = { 0,0,0 };
- u_char cmdblk[6],i;
-
- if (xd_geo[3*drive])
- xd_manual_geo_set(drive);
- else {
- for (i = 0; i < 3; i++) {
- while (min[i] != max[i] - 1) {
- test[i] = (min[i] + max[i]) / 2;
- xd_build(cmdblk,CMD_SEEK,drive,(u_char) test[0],(u_short) test[1],(u_char) test[2],0,0);
- if (!xd_command(cmdblk,PIO_MODE,NULL,NULL,NULL,XD_TIMEOUT * 2))
- min[i] = test[i];
- else
- max[i] = test[i];
- }
- test[i] = min[i];
- }
- xd_info[drive].heads = (u_char) min[0] + 1;
- xd_info[drive].cylinders = (u_short) min[1] + 1;
- xd_info[drive].sectors = (u_char) min[2] + 1;
- }
- xd_info[drive].control = 0;
-}
-
-/* xd_setup: initialise controller from command line parameters */
-static void __init do_xd_setup (int *integers)
-{
- switch (integers[0]) {
- case 4: if (integers[4] < 0)
- nodma = 1;
- else if (integers[4] < 8)
- xd_dma = integers[4];
- case 3: if ((integers[3] > 0) && (integers[3] <= 0x3FC))
- xd_iobase = integers[3];
- case 2: if ((integers[2] > 0) && (integers[2] < 16))
- xd_irq = integers[2];
- case 1: xd_override = 1;
- if ((integers[1] >= 0) && (integers[1] < ARRAY_SIZE(xd_sigs)))
- xd_type = integers[1];
- case 0: break;
- default:printk("xd: too many parameters for xd\n");
- }
- xd_maxsectors = 0x01;
-}
-
-/* xd_setparam: set the drive characteristics */
-static void __init xd_setparam (u_char command,u_char drive,u_char heads,u_short cylinders,u_short rwrite,u_short wprecomp,u_char ecc)
-{
- u_char cmdblk[14];
-
- xd_build(cmdblk,command,drive,0,0,0,0,0);
- cmdblk[6] = (u_char) (cylinders >> 8) & 0x03;
- cmdblk[7] = (u_char) (cylinders & 0xFF);
- cmdblk[8] = heads & 0x1F;
- cmdblk[9] = (u_char) (rwrite >> 8) & 0x03;
- cmdblk[10] = (u_char) (rwrite & 0xFF);
- cmdblk[11] = (u_char) (wprecomp >> 8) & 0x03;
- cmdblk[12] = (u_char) (wprecomp & 0xFF);
- cmdblk[13] = ecc;
-
- /* Some controllers require geometry info as data, not command */
-
- if (xd_command(cmdblk,PIO_MODE,NULL,&cmdblk[6],NULL,XD_TIMEOUT * 2))
- printk("xd: error setting characteristics for xd%c\n", 'a'+drive);
-}
-
-
-#ifdef MODULE
-
-module_param_array(xd, int, NULL, 0);
-module_param_array(xd_geo, int, NULL, 0);
-module_param(nodma, bool, 0);
-
-MODULE_LICENSE("GPL");
-
-void cleanup_module(void)
-{
- int i;
- unregister_blkdev(XT_DISK_MAJOR, "xd");
- for (i = 0; i < xd_drives; i++) {
- del_gendisk(xd_gendisk[i]);
- put_disk(xd_gendisk[i]);
- }
- blk_cleanup_queue(xd_queue);
- release_region(xd_iobase,4);
- if (xd_drives) {
- free_irq(xd_irq, NULL);
- free_dma(xd_dma);
- if (xd_dma_buffer)
- xd_dma_mem_free((unsigned long)xd_dma_buffer, xd_maxsectors * 0x200);
- }
-}
-#else
-
-static int __init xd_setup (char *str)
-{
- int ints[5];
- get_options (str, ARRAY_SIZE (ints), ints);
- do_xd_setup (ints);
- return 1;
-}
-
-/* xd_manual_geo_init: initialise drive geometry from command line parameters
- (used only for WD drives) */
-static int __init xd_manual_geo_init (char *str)
-{
- int i, integers[1 + 3*XD_MAXDRIVES];
-
- get_options (str, ARRAY_SIZE (integers), integers);
- if (integers[0]%3 != 0) {
- printk("xd: incorrect number of parameters for xd_geo\n");
- return 1;
- }
- for (i = 0; (i < integers[0]) && (i < 3*XD_MAXDRIVES); i++)
- xd_geo[i] = integers[i+1];
- return 1;
-}
-
-__setup ("xd=", xd_setup);
-__setup ("xd_geo=", xd_manual_geo_init);
-
-#endif /* MODULE */
-
-module_init(xd_init);
-MODULE_ALIAS_BLOCKDEV_MAJOR(XT_DISK_MAJOR);
diff --git a/drivers/block/xd.h b/drivers/block/xd.h
deleted file mode 100644
index 37cacef16e93..000000000000
--- a/drivers/block/xd.h
+++ /dev/null
@@ -1,134 +0,0 @@
-#ifndef _LINUX_XD_H
-#define _LINUX_XD_H
-
-/*
- * This file contains the definitions for the IO ports and errors etc. for XT hard disk controllers (at least the DTC 5150X).
- *
- * Author: Pat Mackinlay, pat@it.com.au
- * Date: 29/09/92
- *
- * Revised: 01/01/93, ...
- *
- * Ref: DTC 5150X Controller Specification (thanks to Kevin Fowler, kevinf@agora.rain.com)
- * Also thanks to: Salvador Abreu, Dave Thaler, Risto Kankkunen and Wim Van Dorst.
- */
-
-#include <linux/interrupt.h>
-
-/* XT hard disk controller registers */
-#define XD_DATA (xd_iobase + 0x00) /* data RW register */
-#define XD_RESET (xd_iobase + 0x01) /* reset WO register */
-#define XD_STATUS (xd_iobase + 0x01) /* status RO register */
-#define XD_SELECT (xd_iobase + 0x02) /* select WO register */
-#define XD_JUMPER (xd_iobase + 0x02) /* jumper RO register */
-#define XD_CONTROL (xd_iobase + 0x03) /* DMAE/INTE WO register */
-#define XD_RESERVED (xd_iobase + 0x03) /* reserved */
-
-/* XT hard disk controller commands (incomplete list) */
-#define CMD_TESTREADY 0x00 /* test drive ready */
-#define CMD_RECALIBRATE 0x01 /* recalibrate drive */
-#define CMD_SENSE 0x03 /* request sense */
-#define CMD_FORMATDRV 0x04 /* format drive */
-#define CMD_VERIFY 0x05 /* read verify */
-#define CMD_FORMATTRK 0x06 /* format track */
-#define CMD_FORMATBAD 0x07 /* format bad track */
-#define CMD_READ 0x08 /* read */
-#define CMD_WRITE 0x0A /* write */
-#define CMD_SEEK 0x0B /* seek */
-
-/* Controller specific commands */
-#define CMD_DTCSETPARAM 0x0C /* set drive parameters (DTC 5150X & CX only?) */
-#define CMD_DTCGETECC 0x0D /* get ecc error length (DTC 5150X only?) */
-#define CMD_DTCREADBUF 0x0E /* read sector buffer (DTC 5150X only?) */
-#define CMD_DTCWRITEBUF 0x0F /* write sector buffer (DTC 5150X only?) */
-#define CMD_DTCREMAPTRK 0x11 /* assign alternate track (DTC 5150X only?) */
-#define CMD_DTCGETPARAM 0xFB /* get drive parameters (DTC 5150X only?) */
-#define CMD_DTCSETSTEP 0xFC /* set step rate (DTC 5150X only?) */
-#define CMD_DTCSETGEOM 0xFE /* set geometry data (DTC 5150X only?) */
-#define CMD_DTCGETGEOM 0xFF /* get geometry data (DTC 5150X only?) */
-#define CMD_ST11GETGEOM 0xF8 /* get geometry data (Seagate ST11R/M only?) */
-#define CMD_WDSETPARAM 0x0C /* set drive parameters (WD 1004A27X only?) */
-#define CMD_XBSETPARAM 0x0C /* set drive parameters (XEBEC only?) */
-
-/* Bits for command status byte */
-#define CSB_ERROR 0x02 /* error */
-#define CSB_LUN 0x20 /* logical Unit Number */
-
-/* XT hard disk controller status bits */
-#define STAT_READY 0x01 /* controller is ready */
-#define STAT_INPUT 0x02 /* data flowing from controller to host */
-#define STAT_COMMAND 0x04 /* controller in command phase */
-#define STAT_SELECT 0x08 /* controller is selected */
-#define STAT_REQUEST 0x10 /* controller requesting data */
-#define STAT_INTERRUPT 0x20 /* controller requesting interrupt */
-
-/* XT hard disk controller control bits */
-#define PIO_MODE 0x00 /* control bits to set for PIO */
-#define DMA_MODE 0x03 /* control bits to set for DMA & interrupt */
-
-#define XD_MAXDRIVES 2 /* maximum 2 drives */
-#define XD_TIMEOUT HZ /* 1 second timeout */
-#define XD_RETRIES 4 /* maximum 4 retries */
-
-#undef DEBUG /* define for debugging output */
-
-#ifdef DEBUG
- #define DEBUG_STARTUP /* debug driver initialisation */
- #define DEBUG_OVERRIDE /* debug override geometry detection */
- #define DEBUG_READWRITE /* debug each read/write command */
- #define DEBUG_OTHER /* debug misc. interrupt/DMA stuff */
- #define DEBUG_COMMAND /* debug each controller command */
-#endif /* DEBUG */
-
-/* this structure defines the XT drives and their types */
-typedef struct {
- u_char heads;
- u_short cylinders;
- u_char sectors;
- u_char control;
- int unit;
-} XD_INFO;
-
-/* this structure defines a ROM BIOS signature */
-typedef struct {
- unsigned int offset;
- const char *string;
- void (*init_controller)(unsigned int address);
- void (*init_drive)(u_char drive);
- const char *name;
-} XD_SIGNATURE;
-
-#ifndef MODULE
-static int xd_manual_geo_init (char *command);
-#endif /* MODULE */
-static u_char xd_detect (u_char *controller, unsigned int *address);
-static u_char xd_initdrives (void (*init_drive)(u_char drive));
-
-static void do_xd_request (struct request_queue * q);
-static int xd_ioctl (struct block_device *bdev,fmode_t mode,unsigned int cmd,unsigned long arg);
-static int xd_readwrite (u_char operation,XD_INFO *disk,char *buffer,u_int block,u_int count);
-static void xd_recalibrate (u_char drive);
-
-static irqreturn_t xd_interrupt_handler(int irq, void *dev_id);
-static u_char xd_setup_dma (u_char opcode,u_char *buffer,u_int count);
-static u_char *xd_build (u_char *cmdblk,u_char command,u_char drive,u_char head,u_short cylinder,u_char sector,u_char count,u_char control);
-static void xd_watchdog (unsigned long unused);
-static inline u_char xd_waitport (u_short port,u_char flags,u_char mask,u_long timeout);
-static u_int xd_command (u_char *command,u_char mode,u_char *indata,u_char *outdata,u_char *sense,u_long timeout);
-
-/* card specific setup and geometry gathering code */
-static void xd_dtc_init_controller (unsigned int address);
-static void xd_dtc5150cx_init_drive (u_char drive);
-static void xd_dtc_init_drive (u_char drive);
-static void xd_wd_init_controller (unsigned int address);
-static void xd_wd_init_drive (u_char drive);
-static void xd_seagate_init_controller (unsigned int address);
-static void xd_seagate_init_drive (u_char drive);
-static void xd_omti_init_controller (unsigned int address);
-static void xd_omti_init_drive (u_char drive);
-static void xd_xebec_init_controller (unsigned int address);
-static void xd_xebec_init_drive (u_char drive);
-static void xd_setparam (u_char command,u_char drive,u_char heads,u_short cylinders,u_short rwrite,u_short wprecomp,u_char ecc);
-static void xd_override_init_drive (u_char drive);
-
-#endif /* _LINUX_XD_H */
diff --git a/drivers/block/xen-blkback/blkback.c b/drivers/block/xen-blkback/blkback.c
index 5ac841ff6cc7..de1f319f7bd7 100644
--- a/drivers/block/xen-blkback/blkback.c
+++ b/drivers/block/xen-blkback/blkback.c
@@ -46,6 +46,7 @@
#include <xen/xen.h>
#include <asm/xen/hypervisor.h>
#include <asm/xen/hypercall.h>
+#include <xen/balloon.h>
#include "common.h"
/*
@@ -239,6 +240,7 @@ static void free_persistent_gnts(struct rb_root *root, unsigned int num)
ret = gnttab_unmap_refs(unmap, NULL, pages,
segs_to_unmap);
BUG_ON(ret);
+ free_xenballooned_pages(segs_to_unmap, pages);
segs_to_unmap = 0;
}
@@ -527,8 +529,8 @@ static int xen_blkbk_map(struct blkif_request *req,
GFP_KERNEL);
if (!persistent_gnt)
return -ENOMEM;
- persistent_gnt->page = alloc_page(GFP_KERNEL);
- if (!persistent_gnt->page) {
+ if (alloc_xenballooned_pages(1, &persistent_gnt->page,
+ false)) {
kfree(persistent_gnt);
return -ENOMEM;
}
@@ -879,7 +881,6 @@ static int dispatch_rw_block_io(struct xen_blkif *blkif,
goto fail_response;
}
- preq.dev = req->u.rw.handle;
preq.sector_number = req->u.rw.sector_number;
preq.nr_sects = 0;
diff --git a/drivers/block/xen-blkback/xenbus.c b/drivers/block/xen-blkback/xenbus.c
index 63980722db41..5e237f630c47 100644
--- a/drivers/block/xen-blkback/xenbus.c
+++ b/drivers/block/xen-blkback/xenbus.c
@@ -367,6 +367,7 @@ static int xen_blkbk_remove(struct xenbus_device *dev)
be->blkif = NULL;
}
+ kfree(be->mode);
kfree(be);
dev_set_drvdata(&dev->dev, NULL);
return 0;
@@ -502,6 +503,7 @@ static void backend_changed(struct xenbus_watch *watch,
= container_of(watch, struct backend_info, backend_watch);
struct xenbus_device *dev = be->dev;
int cdrom = 0;
+ unsigned long handle;
char *device_type;
DPRINTK("");
@@ -521,10 +523,10 @@ static void backend_changed(struct xenbus_watch *watch,
return;
}
- if ((be->major || be->minor) &&
- ((be->major != major) || (be->minor != minor))) {
- pr_warn(DRV_PFX "changing physical device (from %x:%x to %x:%x) not supported.\n",
- be->major, be->minor, major, minor);
+ if (be->major | be->minor) {
+ if (be->major != major || be->minor != minor)
+ pr_warn(DRV_PFX "changing physical device (from %x:%x to %x:%x) not supported.\n",
+ be->major, be->minor, major, minor);
return;
}
@@ -542,36 +544,33 @@ static void backend_changed(struct xenbus_watch *watch,
kfree(device_type);
}
- if (be->major == 0 && be->minor == 0) {
- /* Front end dir is a number, which is used as the handle. */
-
- char *p = strrchr(dev->otherend, '/') + 1;
- long handle;
- err = strict_strtoul(p, 0, &handle);
- if (err)
- return;
+ /* Front end dir is a number, which is used as the handle. */
+ err = strict_strtoul(strrchr(dev->otherend, '/') + 1, 0, &handle);
+ if (err)
+ return;
- be->major = major;
- be->minor = minor;
+ be->major = major;
+ be->minor = minor;
- err = xen_vbd_create(be->blkif, handle, major, minor,
- (NULL == strchr(be->mode, 'w')), cdrom);
- if (err) {
- be->major = 0;
- be->minor = 0;
- xenbus_dev_fatal(dev, err, "creating vbd structure");
- return;
- }
+ err = xen_vbd_create(be->blkif, handle, major, minor,
+ !strchr(be->mode, 'w'), cdrom);
+ if (err)
+ xenbus_dev_fatal(dev, err, "creating vbd structure");
+ else {
err = xenvbd_sysfs_addif(dev);
if (err) {
xen_vbd_free(&be->blkif->vbd);
- be->major = 0;
- be->minor = 0;
xenbus_dev_fatal(dev, err, "creating sysfs entries");
- return;
}
+ }
+ if (err) {
+ kfree(be->mode);
+ be->mode = NULL;
+ be->major = 0;
+ be->minor = 0;
+ } else {
/* We're potentially connected now */
xen_update_blkif_status(be->blkif);
}
diff --git a/drivers/block/xen-blkfront.c b/drivers/block/xen-blkfront.c
index 11043c18ac5a..c3dae2e0f290 100644
--- a/drivers/block/xen-blkfront.c
+++ b/drivers/block/xen-blkfront.c
@@ -791,7 +791,7 @@ static void blkif_restart_queue(struct work_struct *work)
static void blkif_free(struct blkfront_info *info, int suspend)
{
struct llist_node *all_gnts;
- struct grant *persistent_gnt;
+ struct grant *persistent_gnt, *tmp;
struct llist_node *n;
/* Prevent new requests being issued until we fix things up. */
@@ -805,10 +805,17 @@ static void blkif_free(struct blkfront_info *info, int suspend)
/* Remove all persistent grants */
if (info->persistent_gnts_c) {
all_gnts = llist_del_all(&info->persistent_gnts);
- llist_for_each_entry_safe(persistent_gnt, n, all_gnts, node) {
+ persistent_gnt = llist_entry(all_gnts, typeof(*(persistent_gnt)), node);
+ while (persistent_gnt) {
gnttab_end_foreign_access(persistent_gnt->gref, 0, 0UL);
__free_page(pfn_to_page(persistent_gnt->pfn));
- kfree(persistent_gnt);
+ tmp = persistent_gnt;
+ n = persistent_gnt->node.next;
+ if (n)
+ persistent_gnt = llist_entry(n, typeof(*(persistent_gnt)), node);
+ else
+ persistent_gnt = NULL;
+ kfree(tmp);
}
info->persistent_gnts_c = 0;
}
diff --git a/drivers/bluetooth/Kconfig b/drivers/bluetooth/Kconfig
index e9f203eadb1f..fdfd61a2d523 100644
--- a/drivers/bluetooth/Kconfig
+++ b/drivers/bluetooth/Kconfig
@@ -26,6 +26,7 @@ config BT_HCIBTSDIO
config BT_HCIUART
tristate "HCI UART driver"
+ depends on TTY
help
Bluetooth HCI UART driver.
This driver is required if you want to use Bluetooth devices with
diff --git a/drivers/bluetooth/ath3k.c b/drivers/bluetooth/ath3k.c
index 33c9a44a9678..a8a41e07a221 100644
--- a/drivers/bluetooth/ath3k.c
+++ b/drivers/bluetooth/ath3k.c
@@ -349,7 +349,7 @@ static int ath3k_load_syscfg(struct usb_device *udev)
ret = ath3k_get_state(udev, &fw_state);
if (ret < 0) {
- BT_ERR("Can't get state to change to load configration err");
+ BT_ERR("Can't get state to change to load configuration err");
return -EBUSY;
}
diff --git a/drivers/char/Kconfig b/drivers/char/Kconfig
index 72bedad6bf8c..3bb6fa3930be 100644
--- a/drivers/char/Kconfig
+++ b/drivers/char/Kconfig
@@ -53,7 +53,7 @@ source "drivers/tty/serial/Kconfig"
config TTY_PRINTK
bool "TTY driver to output user messages via printk"
- depends on EXPERT
+ depends on EXPERT && TTY
default n
---help---
If you say Y here, the support for writing user messages (i.e.
@@ -159,7 +159,7 @@ source "drivers/tty/hvc/Kconfig"
config VIRTIO_CONSOLE
tristate "Virtio console"
- depends on VIRTIO
+ depends on VIRTIO && TTY
select HVC_DRIVER
help
Virtio console for use with lguest and other hypervisors.
@@ -392,6 +392,7 @@ config XILINX_HWICAP
config R3964
tristate "Siemens R3964 line discipline"
+ depends on TTY
---help---
This driver allows synchronous communication with devices using the
Siemens R3964 packet protocol. Unless you are dealing with special
@@ -439,7 +440,7 @@ source "drivers/char/pcmcia/Kconfig"
config MWAVE
tristate "ACP Modem (Mwave) support"
- depends on X86
+ depends on X86 && TTY
select SERIAL_8250
---help---
The ACP modem (Mwave) for Linux is a WinModem. It is composed of a
diff --git a/drivers/char/agp/intel-gtt.c b/drivers/char/agp/intel-gtt.c
index dbd901e94ea6..b8e2014cb9cb 100644
--- a/drivers/char/agp/intel-gtt.c
+++ b/drivers/char/agp/intel-gtt.c
@@ -60,7 +60,6 @@ struct intel_gtt_driver {
};
static struct _intel_private {
- struct intel_gtt base;
const struct intel_gtt_driver *driver;
struct pci_dev *pcidev; /* device one */
struct pci_dev *bridge_dev;
@@ -75,7 +74,18 @@ static struct _intel_private {
struct resource ifp_resource;
int resource_valid;
struct page *scratch_page;
+ phys_addr_t scratch_page_dma;
int refcount;
+ /* Whether i915 needs to use the dmar apis or not. */
+ unsigned int needs_dmar : 1;
+ phys_addr_t gma_bus_addr;
+ /* Size of memory reserved for graphics by the BIOS */
+ unsigned int stolen_size;
+ /* Total number of gtt entries. */
+ unsigned int gtt_total_entries;
+ /* Part of the gtt that is mappable by the cpu, for those chips where
+ * this is not the full gtt. */
+ unsigned int gtt_mappable_entries;
} intel_private;
#define INTEL_GTT_GEN intel_private.driver->gen
@@ -291,15 +301,15 @@ static int intel_gtt_setup_scratch_page(void)
get_page(page);
set_pages_uc(page, 1);
- if (intel_private.base.needs_dmar) {
+ if (intel_private.needs_dmar) {
dma_addr = pci_map_page(intel_private.pcidev, page, 0,
PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
if (pci_dma_mapping_error(intel_private.pcidev, dma_addr))
return -EINVAL;
- intel_private.base.scratch_page_dma = dma_addr;
+ intel_private.scratch_page_dma = dma_addr;
} else
- intel_private.base.scratch_page_dma = page_to_phys(page);
+ intel_private.scratch_page_dma = page_to_phys(page);
intel_private.scratch_page = page;
@@ -506,7 +516,7 @@ static unsigned int intel_gtt_total_entries(void)
/* On previous hardware, the GTT size was just what was
* required to map the aperture.
*/
- return intel_private.base.gtt_mappable_entries;
+ return intel_private.gtt_mappable_entries;
}
}
@@ -546,7 +556,7 @@ static unsigned int intel_gtt_mappable_entries(void)
static void intel_gtt_teardown_scratch_page(void)
{
set_pages_wb(intel_private.scratch_page, 1);
- pci_unmap_page(intel_private.pcidev, intel_private.base.scratch_page_dma,
+ pci_unmap_page(intel_private.pcidev, intel_private.scratch_page_dma,
PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
put_page(intel_private.scratch_page);
__free_page(intel_private.scratch_page);
@@ -562,6 +572,40 @@ static void intel_gtt_cleanup(void)
intel_gtt_teardown_scratch_page();
}
+/* Certain Gen5 chipsets require require idling the GPU before
+ * unmapping anything from the GTT when VT-d is enabled.
+ */
+static inline int needs_ilk_vtd_wa(void)
+{
+#ifdef CONFIG_INTEL_IOMMU
+ const unsigned short gpu_devid = intel_private.pcidev->device;
+
+ /* Query intel_iommu to see if we need the workaround. Presumably that
+ * was loaded first.
+ */
+ if ((gpu_devid == PCI_DEVICE_ID_INTEL_IRONLAKE_M_HB ||
+ gpu_devid == PCI_DEVICE_ID_INTEL_IRONLAKE_M_IG) &&
+ intel_iommu_gfx_mapped)
+ return 1;
+#endif
+ return 0;
+}
+
+static bool intel_gtt_can_wc(void)
+{
+ if (INTEL_GTT_GEN <= 2)
+ return false;
+
+ if (INTEL_GTT_GEN >= 6)
+ return false;
+
+ /* Reports of major corruption with ILK vt'd enabled */
+ if (needs_ilk_vtd_wa())
+ return false;
+
+ return true;
+}
+
static int intel_gtt_init(void)
{
u32 gma_addr;
@@ -572,8 +616,8 @@ static int intel_gtt_init(void)
if (ret != 0)
return ret;
- intel_private.base.gtt_mappable_entries = intel_gtt_mappable_entries();
- intel_private.base.gtt_total_entries = intel_gtt_total_entries();
+ intel_private.gtt_mappable_entries = intel_gtt_mappable_entries();
+ intel_private.gtt_total_entries = intel_gtt_total_entries();
/* save the PGETBL reg for resume */
intel_private.PGETBL_save =
@@ -585,13 +629,13 @@ static int intel_gtt_init(void)
dev_info(&intel_private.bridge_dev->dev,
"detected gtt size: %dK total, %dK mappable\n",
- intel_private.base.gtt_total_entries * 4,
- intel_private.base.gtt_mappable_entries * 4);
+ intel_private.gtt_total_entries * 4,
+ intel_private.gtt_mappable_entries * 4);
- gtt_map_size = intel_private.base.gtt_total_entries * 4;
+ gtt_map_size = intel_private.gtt_total_entries * 4;
intel_private.gtt = NULL;
- if (INTEL_GTT_GEN < 6 && INTEL_GTT_GEN > 2)
+ if (intel_gtt_can_wc())
intel_private.gtt = ioremap_wc(intel_private.gtt_bus_addr,
gtt_map_size);
if (intel_private.gtt == NULL)
@@ -602,13 +646,12 @@ static int intel_gtt_init(void)
iounmap(intel_private.registers);
return -ENOMEM;
}
- intel_private.base.gtt = intel_private.gtt;
global_cache_flush(); /* FIXME: ? */
- intel_private.base.stolen_size = intel_gtt_stolen_size();
+ intel_private.stolen_size = intel_gtt_stolen_size();
- intel_private.base.needs_dmar = USE_PCI_DMA_API && INTEL_GTT_GEN > 2;
+ intel_private.needs_dmar = USE_PCI_DMA_API && INTEL_GTT_GEN > 2;
ret = intel_gtt_setup_scratch_page();
if (ret != 0) {
@@ -623,7 +666,7 @@ static int intel_gtt_init(void)
pci_read_config_dword(intel_private.pcidev, I915_GMADDR,
&gma_addr);
- intel_private.base.gma_bus_addr = (gma_addr & PCI_BASE_ADDRESS_MEM_MASK);
+ intel_private.gma_bus_addr = (gma_addr & PCI_BASE_ADDRESS_MEM_MASK);
return 0;
}
@@ -634,8 +677,7 @@ static int intel_fake_agp_fetch_size(void)
unsigned int aper_size;
int i;
- aper_size = (intel_private.base.gtt_mappable_entries << PAGE_SHIFT)
- / MB(1);
+ aper_size = (intel_private.gtt_mappable_entries << PAGE_SHIFT) / MB(1);
for (i = 0; i < num_sizes; i++) {
if (aper_size == intel_fake_agp_sizes[i].size) {
@@ -779,7 +821,7 @@ static int intel_fake_agp_configure(void)
return -EIO;
intel_private.clear_fake_agp = true;
- agp_bridge->gart_bus_addr = intel_private.base.gma_bus_addr;
+ agp_bridge->gart_bus_addr = intel_private.gma_bus_addr;
return 0;
}
@@ -841,12 +883,9 @@ static int intel_fake_agp_insert_entries(struct agp_memory *mem,
{
int ret = -EINVAL;
- if (intel_private.base.do_idle_maps)
- return -ENODEV;
-
if (intel_private.clear_fake_agp) {
- int start = intel_private.base.stolen_size / PAGE_SIZE;
- int end = intel_private.base.gtt_mappable_entries;
+ int start = intel_private.stolen_size / PAGE_SIZE;
+ int end = intel_private.gtt_mappable_entries;
intel_gtt_clear_range(start, end - start);
intel_private.clear_fake_agp = false;
}
@@ -857,7 +896,7 @@ static int intel_fake_agp_insert_entries(struct agp_memory *mem,
if (mem->page_count == 0)
goto out;
- if (pg_start + mem->page_count > intel_private.base.gtt_total_entries)
+ if (pg_start + mem->page_count > intel_private.gtt_total_entries)
goto out_err;
if (type != mem->type)
@@ -869,7 +908,7 @@ static int intel_fake_agp_insert_entries(struct agp_memory *mem,
if (!mem->is_flushed)
global_cache_flush();
- if (intel_private.base.needs_dmar) {
+ if (intel_private.needs_dmar) {
struct sg_table st;
ret = intel_gtt_map_memory(mem->pages, mem->page_count, &st);
@@ -895,7 +934,7 @@ void intel_gtt_clear_range(unsigned int first_entry, unsigned int num_entries)
unsigned int i;
for (i = first_entry; i < (first_entry + num_entries); i++) {
- intel_private.driver->write_entry(intel_private.base.scratch_page_dma,
+ intel_private.driver->write_entry(intel_private.scratch_page_dma,
i, 0);
}
readl(intel_private.gtt+i-1);
@@ -908,12 +947,9 @@ static int intel_fake_agp_remove_entries(struct agp_memory *mem,
if (mem->page_count == 0)
return 0;
- if (intel_private.base.do_idle_maps)
- return -ENODEV;
-
intel_gtt_clear_range(pg_start, mem->page_count);
- if (intel_private.base.needs_dmar) {
+ if (intel_private.needs_dmar) {
intel_gtt_unmap_memory(mem->sg_list, mem->num_sg);
mem->sg_list = NULL;
mem->num_sg = 0;
@@ -1070,25 +1106,6 @@ static void i965_write_entry(dma_addr_t addr,
writel(addr | pte_flags, intel_private.gtt + entry);
}
-/* Certain Gen5 chipsets require require idling the GPU before
- * unmapping anything from the GTT when VT-d is enabled.
- */
-static inline int needs_idle_maps(void)
-{
-#ifdef CONFIG_INTEL_IOMMU
- const unsigned short gpu_devid = intel_private.pcidev->device;
-
- /* Query intel_iommu to see if we need the workaround. Presumably that
- * was loaded first.
- */
- if ((gpu_devid == PCI_DEVICE_ID_INTEL_IRONLAKE_M_HB ||
- gpu_devid == PCI_DEVICE_ID_INTEL_IRONLAKE_M_IG) &&
- intel_iommu_gfx_mapped)
- return 1;
-#endif
- return 0;
-}
-
static int i9xx_setup(void)
{
u32 reg_addr, gtt_addr;
@@ -1116,9 +1133,6 @@ static int i9xx_setup(void)
break;
}
- if (needs_idle_maps())
- intel_private.base.do_idle_maps = 1;
-
intel_i9xx_setup_flush();
return 0;
@@ -1390,9 +1404,13 @@ int intel_gmch_probe(struct pci_dev *bridge_pdev, struct pci_dev *gpu_pdev,
}
EXPORT_SYMBOL(intel_gmch_probe);
-struct intel_gtt *intel_gtt_get(void)
+void intel_gtt_get(size_t *gtt_total, size_t *stolen_size,
+ phys_addr_t *mappable_base, unsigned long *mappable_end)
{
- return &intel_private.base;
+ *gtt_total = intel_private.gtt_total_entries << PAGE_SHIFT;
+ *stolen_size = intel_private.stolen_size;
+ *mappable_base = intel_private.gma_bus_addr;
+ *mappable_end = intel_private.gtt_mappable_entries << PAGE_SHIFT;
}
EXPORT_SYMBOL(intel_gtt_get);
diff --git a/drivers/char/dsp56k.c b/drivers/char/dsp56k.c
index 052797b32bd3..01a5ca7425d7 100644
--- a/drivers/char/dsp56k.c
+++ b/drivers/char/dsp56k.c
@@ -181,7 +181,7 @@ static int dsp56k_upload(u_char __user *bin, int len)
static ssize_t dsp56k_read(struct file *file, char __user *buf, size_t count,
loff_t *ppos)
{
- struct inode *inode = file->f_path.dentry->d_inode;
+ struct inode *inode = file_inode(file);
int dev = iminor(inode) & 0x0f;
switch(dev)
@@ -244,7 +244,7 @@ static ssize_t dsp56k_read(struct file *file, char __user *buf, size_t count,
static ssize_t dsp56k_write(struct file *file, const char __user *buf, size_t count,
loff_t *ppos)
{
- struct inode *inode = file->f_path.dentry->d_inode;
+ struct inode *inode = file_inode(file);
int dev = iminor(inode) & 0x0f;
switch(dev)
@@ -306,7 +306,7 @@ static ssize_t dsp56k_write(struct file *file, const char __user *buf, size_t co
static long dsp56k_ioctl(struct file *file, unsigned int cmd,
unsigned long arg)
{
- int dev = iminor(file->f_path.dentry->d_inode) & 0x0f;
+ int dev = iminor(file_inode(file)) & 0x0f;
void __user *argp = (void __user *)arg;
switch(dev)
@@ -408,7 +408,7 @@ static long dsp56k_ioctl(struct file *file, unsigned int cmd,
#if 0
static unsigned int dsp56k_poll(struct file *file, poll_table *wait)
{
- int dev = iminor(file->f_path.dentry->d_inode) & 0x0f;
+ int dev = iminor(file_inode(file)) & 0x0f;
switch(dev)
{
diff --git a/drivers/char/dtlk.c b/drivers/char/dtlk.c
index 85156dd0caee..65a8d96c0e93 100644
--- a/drivers/char/dtlk.c
+++ b/drivers/char/dtlk.c
@@ -125,7 +125,7 @@ static char dtlk_write_tts(char);
static ssize_t dtlk_read(struct file *file, char __user *buf,
size_t count, loff_t * ppos)
{
- unsigned int minor = iminor(file->f_path.dentry->d_inode);
+ unsigned int minor = iminor(file_inode(file));
char ch;
int i = 0, retries;
@@ -177,7 +177,7 @@ static ssize_t dtlk_write(struct file *file, const char __user *buf,
}
#endif
- if (iminor(file->f_path.dentry->d_inode) != DTLK_MINOR)
+ if (iminor(file_inode(file)) != DTLK_MINOR)
return -EINVAL;
while (1) {
diff --git a/drivers/char/hw_random/exynos-rng.c b/drivers/char/hw_random/exynos-rng.c
index 4673fc4ad931..ac47631ab34f 100644
--- a/drivers/char/hw_random/exynos-rng.c
+++ b/drivers/char/hw_random/exynos-rng.c
@@ -163,7 +163,7 @@ static int exynos_rng_runtime_resume(struct device *dev)
}
-UNIVERSAL_DEV_PM_OPS(exynos_rng_pm_ops, exynos_rng_runtime_suspend,
+static UNIVERSAL_DEV_PM_OPS(exynos_rng_pm_ops, exynos_rng_runtime_suspend,
exynos_rng_runtime_resume, NULL);
static struct platform_driver exynos_rng_driver = {
diff --git a/drivers/char/hw_random/virtio-rng.c b/drivers/char/hw_random/virtio-rng.c
index b65c10395959..10fd71ccf587 100644
--- a/drivers/char/hw_random/virtio-rng.c
+++ b/drivers/char/hw_random/virtio-rng.c
@@ -154,18 +154,7 @@ static struct virtio_driver virtio_rng_driver = {
#endif
};
-static int __init init(void)
-{
- return register_virtio_driver(&virtio_rng_driver);
-}
-
-static void __exit fini(void)
-{
- unregister_virtio_driver(&virtio_rng_driver);
-}
-module_init(init);
-module_exit(fini);
-
+module_virtio_driver(virtio_rng_driver);
MODULE_DEVICE_TABLE(virtio, id_table);
MODULE_DESCRIPTION("Virtio random number driver");
MODULE_LICENSE("GPL");
diff --git a/drivers/char/ipmi/ipmi_si_intf.c b/drivers/char/ipmi/ipmi_si_intf.c
index 1c7fdcd22a98..0ac9b45a585e 100644
--- a/drivers/char/ipmi/ipmi_si_intf.c
+++ b/drivers/char/ipmi/ipmi_si_intf.c
@@ -1208,6 +1208,16 @@ static int smi_num; /* Used to sequence the SMIs */
#define DEFAULT_REGSPACING 1
#define DEFAULT_REGSIZE 1
+#ifdef CONFIG_ACPI
+static bool si_tryacpi = 1;
+#endif
+#ifdef CONFIG_DMI
+static bool si_trydmi = 1;
+#endif
+static bool si_tryplatform = 1;
+#ifdef CONFIG_PCI
+static bool si_trypci = 1;
+#endif
static bool si_trydefaults = 1;
static char *si_type[SI_MAX_PARMS];
#define MAX_SI_TYPE_STR 30
@@ -1238,6 +1248,25 @@ MODULE_PARM_DESC(hotmod, "Add and remove interfaces. See"
" Documentation/IPMI.txt in the kernel sources for the"
" gory details.");
+#ifdef CONFIG_ACPI
+module_param_named(tryacpi, si_tryacpi, bool, 0);
+MODULE_PARM_DESC(tryacpi, "Setting this to zero will disable the"
+ " default scan of the interfaces identified via ACPI");
+#endif
+#ifdef CONFIG_DMI
+module_param_named(trydmi, si_trydmi, bool, 0);
+MODULE_PARM_DESC(trydmi, "Setting this to zero will disable the"
+ " default scan of the interfaces identified via DMI");
+#endif
+module_param_named(tryplatform, si_tryplatform, bool, 0);
+MODULE_PARM_DESC(tryacpi, "Setting this to zero will disable the"
+ " default scan of the interfaces identified via platform"
+ " interfaces like openfirmware");
+#ifdef CONFIG_PCI
+module_param_named(trypci, si_trypci, bool, 0);
+MODULE_PARM_DESC(tryacpi, "Setting this to zero will disable the"
+ " default scan of the interfaces identified via pci");
+#endif
module_param_named(trydefaults, si_trydefaults, bool, 0);
MODULE_PARM_DESC(trydefaults, "Setting this to 'false' will disable the"
" default scan of the KCS and SMIC interface at the standard"
@@ -3371,13 +3400,15 @@ static int init_ipmi_si(void)
return 0;
initialized = 1;
- rv = platform_driver_register(&ipmi_driver);
- if (rv) {
- printk(KERN_ERR PFX "Unable to register driver: %d\n", rv);
- return rv;
+ if (si_tryplatform) {
+ rv = platform_driver_register(&ipmi_driver);
+ if (rv) {
+ printk(KERN_ERR PFX "Unable to register "
+ "driver: %d\n", rv);
+ return rv;
+ }
}
-
/* Parse out the si_type string into its components. */
str = si_type_str;
if (*str != '\0') {
@@ -3400,24 +3431,31 @@ static int init_ipmi_si(void)
return 0;
#ifdef CONFIG_PCI
- rv = pci_register_driver(&ipmi_pci_driver);
- if (rv)
- printk(KERN_ERR PFX "Unable to register PCI driver: %d\n", rv);
- else
- pci_registered = 1;
+ if (si_trypci) {
+ rv = pci_register_driver(&ipmi_pci_driver);
+ if (rv)
+ printk(KERN_ERR PFX "Unable to register "
+ "PCI driver: %d\n", rv);
+ else
+ pci_registered = 1;
+ }
#endif
#ifdef CONFIG_ACPI
- pnp_register_driver(&ipmi_pnp_driver);
- pnp_registered = 1;
+ if (si_tryacpi) {
+ pnp_register_driver(&ipmi_pnp_driver);
+ pnp_registered = 1;
+ }
#endif
#ifdef CONFIG_DMI
- dmi_find_bmc();
+ if (si_trydmi)
+ dmi_find_bmc();
#endif
#ifdef CONFIG_ACPI
- spmi_find_bmc();
+ if (si_tryacpi)
+ spmi_find_bmc();
#endif
/* We prefer devices with interrupts, but in the case of a machine
diff --git a/drivers/char/lp.c b/drivers/char/lp.c
index a741e418b456..dafd9ac6428f 100644
--- a/drivers/char/lp.c
+++ b/drivers/char/lp.c
@@ -294,7 +294,7 @@ static int lp_wait_ready(int minor, int nonblock)
static ssize_t lp_write(struct file * file, const char __user * buf,
size_t count, loff_t *ppos)
{
- unsigned int minor = iminor(file->f_path.dentry->d_inode);
+ unsigned int minor = iminor(file_inode(file));
struct parport *port = lp_table[minor].dev->port;
char *kbuf = lp_table[minor].lp_buffer;
ssize_t retv = 0;
@@ -413,7 +413,7 @@ static ssize_t lp_read(struct file * file, char __user * buf,
size_t count, loff_t *ppos)
{
DEFINE_WAIT(wait);
- unsigned int minor=iminor(file->f_path.dentry->d_inode);
+ unsigned int minor=iminor(file_inode(file));
struct parport *port = lp_table[minor].dev->port;
ssize_t retval = 0;
char *kbuf = lp_table[minor].lp_buffer;
@@ -679,7 +679,7 @@ static long lp_ioctl(struct file *file, unsigned int cmd,
struct timeval par_timeout;
int ret;
- minor = iminor(file->f_path.dentry->d_inode);
+ minor = iminor(file_inode(file));
mutex_lock(&lp_mutex);
switch (cmd) {
case LPSETTIMEOUT:
@@ -707,7 +707,7 @@ static long lp_compat_ioctl(struct file *file, unsigned int cmd,
struct timeval par_timeout;
int ret;
- minor = iminor(file->f_path.dentry->d_inode);
+ minor = iminor(file_inode(file));
mutex_lock(&lp_mutex);
switch (cmd) {
case LPSETTIMEOUT:
diff --git a/drivers/char/mem.c b/drivers/char/mem.c
index c6fa3bc2baa8..2c644afbcdd4 100644
--- a/drivers/char/mem.c
+++ b/drivers/char/mem.c
@@ -399,7 +399,7 @@ static ssize_t read_kmem(struct file *file, char __user *buf,
{
unsigned long p = *ppos;
ssize_t low_count, read, sz;
- char * kbuf; /* k-addr because vread() takes vmlist_lock rwlock */
+ char *kbuf; /* k-addr because vread() takes vmlist_lock rwlock */
int err = 0;
read = 0;
@@ -527,7 +527,7 @@ static ssize_t write_kmem(struct file *file, const char __user *buf,
unsigned long p = *ppos;
ssize_t wrote = 0;
ssize_t virtr = 0;
- char * kbuf; /* k-addr because vwrite() takes vmlist_lock rwlock */
+ char *kbuf; /* k-addr because vwrite() takes vmlist_lock rwlock */
int err = 0;
if (p < (unsigned long) high_memory) {
@@ -595,7 +595,7 @@ static ssize_t write_port(struct file *file, const char __user *buf,
size_t count, loff_t *ppos)
{
unsigned long i = *ppos;
- const char __user * tmp = buf;
+ const char __user *tmp = buf;
if (!access_ok(VERIFY_READ, buf, count))
return -EFAULT;
@@ -708,7 +708,7 @@ static loff_t memory_lseek(struct file *file, loff_t offset, int orig)
{
loff_t ret;
- mutex_lock(&file->f_path.dentry->d_inode->i_mutex);
+ mutex_lock(&file_inode(file)->i_mutex);
switch (orig) {
case SEEK_CUR:
offset += file->f_pos;
@@ -725,11 +725,11 @@ static loff_t memory_lseek(struct file *file, loff_t offset, int orig)
default:
ret = -EINVAL;
}
- mutex_unlock(&file->f_path.dentry->d_inode->i_mutex);
+ mutex_unlock(&file_inode(file)->i_mutex);
return ret;
}
-static int open_port(struct inode * inode, struct file * filp)
+static int open_port(struct inode *inode, struct file *filp)
{
return capable(CAP_SYS_RAWIO) ? 0 : -EPERM;
}
@@ -898,7 +898,7 @@ static int __init chr_dev_init(void)
continue;
/*
- * Create /dev/port?
+ * Create /dev/port?
*/
if ((minor == DEVPORT_MINOR) && !arch_has_dev_port())
continue;
diff --git a/drivers/char/misc.c b/drivers/char/misc.c
index 522136d40843..190d4423653f 100644
--- a/drivers/char/misc.c
+++ b/drivers/char/misc.c
@@ -183,19 +183,12 @@ static const struct file_operations misc_fops = {
int misc_register(struct miscdevice * misc)
{
- struct miscdevice *c;
dev_t dev;
int err = 0;
INIT_LIST_HEAD(&misc->list);
mutex_lock(&misc_mtx);
- list_for_each_entry(c, &misc_list, list) {
- if (c->minor == misc->minor) {
- mutex_unlock(&misc_mtx);
- return -EBUSY;
- }
- }
if (misc->minor == MISC_DYNAMIC_MINOR) {
int i = find_first_zero_bit(misc_minors, DYNAMIC_MINORS);
@@ -205,6 +198,15 @@ int misc_register(struct miscdevice * misc)
}
misc->minor = DYNAMIC_MINORS - i - 1;
set_bit(i, misc_minors);
+ } else {
+ struct miscdevice *c;
+
+ list_for_each_entry(c, &misc_list, list) {
+ if (c->minor == misc->minor) {
+ mutex_unlock(&misc_mtx);
+ return -EBUSY;
+ }
+ }
}
dev = MKDEV(MISC_MAJOR, misc->minor);
diff --git a/drivers/char/nsc_gpio.c b/drivers/char/nsc_gpio.c
index 808d44e9a32a..b07b119ae57f 100644
--- a/drivers/char/nsc_gpio.c
+++ b/drivers/char/nsc_gpio.c
@@ -41,7 +41,7 @@ void nsc_gpio_dump(struct nsc_gpio_ops *amp, unsigned index)
ssize_t nsc_gpio_write(struct file *file, const char __user *data,
size_t len, loff_t *ppos)
{
- unsigned m = iminor(file->f_path.dentry->d_inode);
+ unsigned m = iminor(file_inode(file));
struct nsc_gpio_ops *amp = file->private_data;
struct device *dev = amp->dev;
size_t i;
@@ -104,7 +104,7 @@ ssize_t nsc_gpio_write(struct file *file, const char __user *data,
ssize_t nsc_gpio_read(struct file *file, char __user * buf,
size_t len, loff_t * ppos)
{
- unsigned m = iminor(file->f_path.dentry->d_inode);
+ unsigned m = iminor(file_inode(file));
int value;
struct nsc_gpio_ops *amp = file->private_data;
diff --git a/drivers/char/pcmcia/Kconfig b/drivers/char/pcmcia/Kconfig
index 6614416a8623..2a166d56738a 100644
--- a/drivers/char/pcmcia/Kconfig
+++ b/drivers/char/pcmcia/Kconfig
@@ -7,7 +7,7 @@ menu "PCMCIA character devices"
config SYNCLINK_CS
tristate "SyncLink PC Card support"
- depends on PCMCIA
+ depends on PCMCIA && TTY
help
Enable support for the SyncLink PC Card serial adapter, running
asynchronous and HDLC communications up to 512Kbps. The port is
@@ -45,7 +45,7 @@ config CARDMAN_4040
config IPWIRELESS
tristate "IPWireless 3G UMTS PCMCIA card support"
- depends on PCMCIA && NETDEVICES
+ depends on PCMCIA && NETDEVICES && TTY
select PPP
help
This is a driver for 3G UMTS PCMCIA card from IPWireless company. In
diff --git a/drivers/char/pcmcia/cm4000_cs.c b/drivers/char/pcmcia/cm4000_cs.c
index a7584860e9a7..c115217c79ae 100644
--- a/drivers/char/pcmcia/cm4000_cs.c
+++ b/drivers/char/pcmcia/cm4000_cs.c
@@ -1400,7 +1400,7 @@ static long cmm_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
{
struct cm4000_dev *dev = filp->private_data;
unsigned int iobase = dev->p_dev->resource[0]->start;
- struct inode *inode = filp->f_path.dentry->d_inode;
+ struct inode *inode = file_inode(filp);
struct pcmcia_device *link;
int size;
int rc;
diff --git a/drivers/char/pcmcia/synclink_cs.c b/drivers/char/pcmcia/synclink_cs.c
index b66eaa04f8cb..5c5cc00ebb07 100644
--- a/drivers/char/pcmcia/synclink_cs.c
+++ b/drivers/char/pcmcia/synclink_cs.c
@@ -102,8 +102,7 @@ static MGSL_PARAMS default_params = {
ASYNC_PARITY_NONE /* unsigned char parity; */
};
-typedef struct
-{
+typedef struct {
int count;
unsigned char status;
char data[1];
@@ -210,7 +209,7 @@ typedef struct _mgslpc_info {
char testing_irq;
unsigned int init_error; /* startup error (DIAGS) */
- char flag_buf[MAX_ASYNC_BUFFER_SIZE];
+ char *flag_buf;
bool drop_rts_on_tx_done;
struct _input_signal_events input_signal_events;
@@ -326,10 +325,10 @@ typedef struct _mgslpc_info {
#define write_reg16(info, reg, val) outw((val), (info)->io_base + (reg))
#define set_reg_bits(info, reg, mask) \
- write_reg(info, (reg), \
+ write_reg(info, (reg), \
(unsigned char) (read_reg(info, (reg)) | (mask)))
#define clear_reg_bits(info, reg, mask) \
- write_reg(info, (reg), \
+ write_reg(info, (reg), \
(unsigned char) (read_reg(info, (reg)) & ~(mask)))
/*
* interrupt enable/disable routines
@@ -356,10 +355,10 @@ static void irq_enable(MGSLPC_INFO *info, unsigned char channel, unsigned short
}
#define port_irq_disable(info, mask) \
- { info->pim_value |= (mask); write_reg(info, PIM, info->pim_value); }
+ { info->pim_value |= (mask); write_reg(info, PIM, info->pim_value); }
#define port_irq_enable(info, mask) \
- { info->pim_value &= ~(mask); write_reg(info, PIM, info->pim_value); }
+ { info->pim_value &= ~(mask); write_reg(info, PIM, info->pim_value); }
static void rx_start(MGSLPC_INFO *info);
static void rx_stop(MGSLPC_INFO *info);
@@ -397,7 +396,7 @@ static int adapter_test(MGSLPC_INFO *info);
static int claim_resources(MGSLPC_INFO *info);
static void release_resources(MGSLPC_INFO *info);
-static void mgslpc_add_device(MGSLPC_INFO *info);
+static int mgslpc_add_device(MGSLPC_INFO *info);
static void mgslpc_remove_device(MGSLPC_INFO *info);
static bool rx_get_frame(MGSLPC_INFO *info, struct tty_struct *tty);
@@ -514,49 +513,56 @@ static const struct tty_port_operations mgslpc_port_ops = {
static int mgslpc_probe(struct pcmcia_device *link)
{
- MGSLPC_INFO *info;
- int ret;
-
- if (debug_level >= DEBUG_LEVEL_INFO)
- printk("mgslpc_attach\n");
-
- info = kzalloc(sizeof(MGSLPC_INFO), GFP_KERNEL);
- if (!info) {
- printk("Error can't allocate device instance data\n");
- return -ENOMEM;
- }
-
- info->magic = MGSLPC_MAGIC;
- tty_port_init(&info->port);
- info->port.ops = &mgslpc_port_ops;
- INIT_WORK(&info->task, bh_handler);
- info->max_frame_size = 4096;
- info->port.close_delay = 5*HZ/10;
- info->port.closing_wait = 30*HZ;
- init_waitqueue_head(&info->status_event_wait_q);
- init_waitqueue_head(&info->event_wait_q);
- spin_lock_init(&info->lock);
- spin_lock_init(&info->netlock);
- memcpy(&info->params,&default_params,sizeof(MGSL_PARAMS));
- info->idle_mode = HDLC_TXIDLE_FLAGS;
- info->imra_value = 0xffff;
- info->imrb_value = 0xffff;
- info->pim_value = 0xff;
-
- info->p_dev = link;
- link->priv = info;
-
- /* Initialize the struct pcmcia_device structure */
-
- ret = mgslpc_config(link);
- if (ret) {
- tty_port_destroy(&info->port);
- return ret;
- }
-
- mgslpc_add_device(info);
-
- return 0;
+ MGSLPC_INFO *info;
+ int ret;
+
+ if (debug_level >= DEBUG_LEVEL_INFO)
+ printk("mgslpc_attach\n");
+
+ info = kzalloc(sizeof(MGSLPC_INFO), GFP_KERNEL);
+ if (!info) {
+ printk("Error can't allocate device instance data\n");
+ return -ENOMEM;
+ }
+
+ info->magic = MGSLPC_MAGIC;
+ tty_port_init(&info->port);
+ info->port.ops = &mgslpc_port_ops;
+ INIT_WORK(&info->task, bh_handler);
+ info->max_frame_size = 4096;
+ info->port.close_delay = 5*HZ/10;
+ info->port.closing_wait = 30*HZ;
+ init_waitqueue_head(&info->status_event_wait_q);
+ init_waitqueue_head(&info->event_wait_q);
+ spin_lock_init(&info->lock);
+ spin_lock_init(&info->netlock);
+ memcpy(&info->params,&default_params,sizeof(MGSL_PARAMS));
+ info->idle_mode = HDLC_TXIDLE_FLAGS;
+ info->imra_value = 0xffff;
+ info->imrb_value = 0xffff;
+ info->pim_value = 0xff;
+
+ info->p_dev = link;
+ link->priv = info;
+
+ /* Initialize the struct pcmcia_device structure */
+
+ ret = mgslpc_config(link);
+ if (ret != 0)
+ goto failed;
+
+ ret = mgslpc_add_device(info);
+ if (ret != 0)
+ goto failed_release;
+
+ return 0;
+
+failed_release:
+ mgslpc_release((u_long)link);
+failed:
+ tty_port_destroy(&info->port);
+ kfree(info);
+ return ret;
}
/* Card has been inserted.
@@ -569,35 +575,35 @@ static int mgslpc_ioprobe(struct pcmcia_device *p_dev, void *priv_data)
static int mgslpc_config(struct pcmcia_device *link)
{
- MGSLPC_INFO *info = link->priv;
- int ret;
+ MGSLPC_INFO *info = link->priv;
+ int ret;
- if (debug_level >= DEBUG_LEVEL_INFO)
- printk("mgslpc_config(0x%p)\n", link);
+ if (debug_level >= DEBUG_LEVEL_INFO)
+ printk("mgslpc_config(0x%p)\n", link);
- link->config_flags |= CONF_ENABLE_IRQ | CONF_AUTO_SET_IO;
+ link->config_flags |= CONF_ENABLE_IRQ | CONF_AUTO_SET_IO;
- ret = pcmcia_loop_config(link, mgslpc_ioprobe, NULL);
- if (ret != 0)
- goto failed;
+ ret = pcmcia_loop_config(link, mgslpc_ioprobe, NULL);
+ if (ret != 0)
+ goto failed;
- link->config_index = 8;
- link->config_regs = PRESENT_OPTION;
+ link->config_index = 8;
+ link->config_regs = PRESENT_OPTION;
- ret = pcmcia_request_irq(link, mgslpc_isr);
- if (ret)
- goto failed;
- ret = pcmcia_enable_device(link);
- if (ret)
- goto failed;
+ ret = pcmcia_request_irq(link, mgslpc_isr);
+ if (ret)
+ goto failed;
+ ret = pcmcia_enable_device(link);
+ if (ret)
+ goto failed;
- info->io_base = link->resource[0]->start;
- info->irq_level = link->irq;
- return 0;
+ info->io_base = link->resource[0]->start;
+ info->irq_level = link->irq;
+ return 0;
failed:
- mgslpc_release((u_long)link);
- return -ENODEV;
+ mgslpc_release((u_long)link);
+ return -ENODEV;
}
/* Card has been removed.
@@ -703,12 +709,12 @@ static void tx_pause(struct tty_struct *tty)
if (mgslpc_paranoia_check(info, tty->name, "tx_pause"))
return;
if (debug_level >= DEBUG_LEVEL_INFO)
- printk("tx_pause(%s)\n",info->device_name);
+ printk("tx_pause(%s)\n", info->device_name);
- spin_lock_irqsave(&info->lock,flags);
+ spin_lock_irqsave(&info->lock, flags);
if (info->tx_enabled)
- tx_stop(info);
- spin_unlock_irqrestore(&info->lock,flags);
+ tx_stop(info);
+ spin_unlock_irqrestore(&info->lock, flags);
}
static void tx_release(struct tty_struct *tty)
@@ -719,12 +725,12 @@ static void tx_release(struct tty_struct *tty)
if (mgslpc_paranoia_check(info, tty->name, "tx_release"))
return;
if (debug_level >= DEBUG_LEVEL_INFO)
- printk("tx_release(%s)\n",info->device_name);
+ printk("tx_release(%s)\n", info->device_name);
- spin_lock_irqsave(&info->lock,flags);
+ spin_lock_irqsave(&info->lock, flags);
if (!info->tx_enabled)
- tx_start(info, tty);
- spin_unlock_irqrestore(&info->lock,flags);
+ tx_start(info, tty);
+ spin_unlock_irqrestore(&info->lock, flags);
}
/* Return next bottom half action to perform.
@@ -735,7 +741,7 @@ static int bh_action(MGSLPC_INFO *info)
unsigned long flags;
int rc = 0;
- spin_lock_irqsave(&info->lock,flags);
+ spin_lock_irqsave(&info->lock, flags);
if (info->pending_bh & BH_RECEIVE) {
info->pending_bh &= ~BH_RECEIVE;
@@ -754,7 +760,7 @@ static int bh_action(MGSLPC_INFO *info)
info->bh_requested = false;
}
- spin_unlock_irqrestore(&info->lock,flags);
+ spin_unlock_irqrestore(&info->lock, flags);
return rc;
}
@@ -765,11 +771,8 @@ static void bh_handler(struct work_struct *work)
struct tty_struct *tty;
int action;
- if (!info)
- return;
-
if (debug_level >= DEBUG_LEVEL_BH)
- printk( "%s(%d):bh_handler(%s) entry\n",
+ printk("%s(%d):bh_handler(%s) entry\n",
__FILE__,__LINE__,info->device_name);
info->bh_running = true;
@@ -778,8 +781,8 @@ static void bh_handler(struct work_struct *work)
while((action = bh_action(info)) != 0) {
/* Process work item */
- if ( debug_level >= DEBUG_LEVEL_BH )
- printk( "%s(%d):bh_handler() work item action=%d\n",
+ if (debug_level >= DEBUG_LEVEL_BH)
+ printk("%s(%d):bh_handler() work item action=%d\n",
__FILE__,__LINE__,action);
switch (action) {
@@ -802,7 +805,7 @@ static void bh_handler(struct work_struct *work)
tty_kref_put(tty);
if (debug_level >= DEBUG_LEVEL_BH)
- printk( "%s(%d):bh_handler(%s) exit\n",
+ printk("%s(%d):bh_handler(%s) exit\n",
__FILE__,__LINE__,info->device_name);
}
@@ -831,7 +834,7 @@ static void rx_ready_hdlc(MGSLPC_INFO *info, int eom)
RXBUF *buf = (RXBUF*)(info->rx_buf + (info->rx_put * info->rx_buf_size));
if (debug_level >= DEBUG_LEVEL_ISR)
- printk("%s(%d):rx_ready_hdlc(eom=%d)\n",__FILE__,__LINE__,eom);
+ printk("%s(%d):rx_ready_hdlc(eom=%d)\n", __FILE__, __LINE__, eom);
if (!info->rx_enabled)
return;
@@ -847,7 +850,8 @@ static void rx_ready_hdlc(MGSLPC_INFO *info, int eom)
if (eom) {
/* end of frame, get FIFO count from RBCL register */
- if (!(fifo_count = (unsigned char)(read_reg(info, CHA+RBCL) & 0x1f)))
+ fifo_count = (unsigned char)(read_reg(info, CHA+RBCL) & 0x1f);
+ if (fifo_count == 0)
fifo_count = 32;
} else
fifo_count = 32;
@@ -886,20 +890,13 @@ static void rx_ready_hdlc(MGSLPC_INFO *info, int eom)
issue_command(info, CHA, CMD_RXFIFO);
}
-static void rx_ready_async(MGSLPC_INFO *info, int tcd, struct tty_struct *tty)
+static void rx_ready_async(MGSLPC_INFO *info, int tcd)
{
+ struct tty_port *port = &info->port;
unsigned char data, status, flag;
int fifo_count;
int work = 0;
- struct mgsl_icount *icount = &info->icount;
-
- if (!tty) {
- /* tty is not available anymore */
- issue_command(info, CHA, CMD_RXRESET);
- if (debug_level >= DEBUG_LEVEL_ISR)
- printk("%s(%d):rx_ready_async(tty=NULL)\n",__FILE__,__LINE__);
- return;
- }
+ struct mgsl_icount *icount = &info->icount;
if (tcd) {
/* early termination, get FIFO count from RBCL register */
@@ -913,7 +910,7 @@ static void rx_ready_async(MGSLPC_INFO *info, int tcd, struct tty_struct *tty)
} else
fifo_count = 32;
- tty_buffer_request_room(tty, fifo_count);
+ tty_buffer_request_room(port, fifo_count);
/* Flush received async data to receive data buffer. */
while (fifo_count) {
data = read_reg(info, CHA + RXFIFO);
@@ -944,7 +941,7 @@ static void rx_ready_async(MGSLPC_INFO *info, int tcd, struct tty_struct *tty)
else if (status & BIT6)
flag = TTY_FRAME;
}
- work += tty_insert_flip_char(tty, data, flag);
+ work += tty_insert_flip_char(port, data, flag);
}
issue_command(info, CHA, CMD_RXFIFO);
@@ -957,7 +954,7 @@ static void rx_ready_async(MGSLPC_INFO *info, int tcd, struct tty_struct *tty)
}
if (work)
- tty_flip_buffer_push(tty);
+ tty_flip_buffer_push(port);
}
@@ -1004,7 +1001,7 @@ static void tx_ready(MGSLPC_INFO *info, struct tty_struct *tty)
int c;
if (debug_level >= DEBUG_LEVEL_ISR)
- printk("%s(%d):tx_ready(%s)\n", __FILE__,__LINE__,info->device_name);
+ printk("%s(%d):tx_ready(%s)\n", __FILE__, __LINE__, info->device_name);
if (info->params.mode == MGSL_MODE_HDLC) {
if (!info->tx_active)
@@ -1217,7 +1214,7 @@ static irqreturn_t mgslpc_isr(int dummy, void *dev_id)
if (info->params.mode == MGSL_MODE_HDLC)
rx_ready_hdlc(info, isr & IRQ_RXEOM);
else
- rx_ready_async(info, isr & IRQ_RXEOM, tty);
+ rx_ready_async(info, isr & IRQ_RXEOM);
}
/* transmit IRQs */
@@ -1249,7 +1246,7 @@ static irqreturn_t mgslpc_isr(int dummy, void *dev_id)
*/
if (info->pending_bh && !info->bh_running && !info->bh_requested) {
- if ( debug_level >= DEBUG_LEVEL_ISR )
+ if (debug_level >= DEBUG_LEVEL_ISR)
printk("%s(%d):%s queueing bh task.\n",
__FILE__,__LINE__,info->device_name);
schedule_work(&info->task);
@@ -1273,7 +1270,7 @@ static int startup(MGSLPC_INFO * info, struct tty_struct *tty)
int retval = 0;
if (debug_level >= DEBUG_LEVEL_INFO)
- printk("%s(%d):startup(%s)\n",__FILE__,__LINE__,info->device_name);
+ printk("%s(%d):startup(%s)\n", __FILE__, __LINE__, info->device_name);
if (info->port.flags & ASYNC_INITIALIZED)
return 0;
@@ -1283,7 +1280,7 @@ static int startup(MGSLPC_INFO * info, struct tty_struct *tty)
info->tx_buf = (unsigned char *)get_zeroed_page(GFP_KERNEL);
if (!info->tx_buf) {
printk(KERN_ERR"%s(%d):%s can't allocate transmit buffer\n",
- __FILE__,__LINE__,info->device_name);
+ __FILE__, __LINE__, info->device_name);
return -ENOMEM;
}
}
@@ -1298,15 +1295,15 @@ static int startup(MGSLPC_INFO * info, struct tty_struct *tty)
retval = claim_resources(info);
/* perform existence check and diagnostics */
- if ( !retval )
+ if (!retval)
retval = adapter_test(info);
- if ( retval ) {
- if (capable(CAP_SYS_ADMIN) && tty)
+ if (retval) {
+ if (capable(CAP_SYS_ADMIN) && tty)
set_bit(TTY_IO_ERROR, &tty->flags);
release_resources(info);
- return retval;
- }
+ return retval;
+ }
/* program hardware for current parameters */
mgslpc_change_params(info, tty);
@@ -1330,7 +1327,7 @@ static void shutdown(MGSLPC_INFO * info, struct tty_struct *tty)
if (debug_level >= DEBUG_LEVEL_INFO)
printk("%s(%d):mgslpc_shutdown(%s)\n",
- __FILE__,__LINE__, info->device_name );
+ __FILE__, __LINE__, info->device_name);
/* clear status wait queue because status changes */
/* can't happen after shutting down the hardware */
@@ -1344,7 +1341,7 @@ static void shutdown(MGSLPC_INFO * info, struct tty_struct *tty)
info->tx_buf = NULL;
}
- spin_lock_irqsave(&info->lock,flags);
+ spin_lock_irqsave(&info->lock, flags);
rx_stop(info);
tx_stop(info);
@@ -1352,12 +1349,12 @@ static void shutdown(MGSLPC_INFO * info, struct tty_struct *tty)
/* TODO:disable interrupts instead of reset to preserve signal states */
reset_device(info);
- if (!tty || tty->termios.c_cflag & HUPCL) {
- info->serial_signals &= ~(SerialSignal_DTR + SerialSignal_RTS);
+ if (!tty || tty->termios.c_cflag & HUPCL) {
+ info->serial_signals &= ~(SerialSignal_RTS | SerialSignal_DTR);
set_signals(info);
}
- spin_unlock_irqrestore(&info->lock,flags);
+ spin_unlock_irqrestore(&info->lock, flags);
release_resources(info);
@@ -1371,7 +1368,7 @@ static void mgslpc_program_hw(MGSLPC_INFO *info, struct tty_struct *tty)
{
unsigned long flags;
- spin_lock_irqsave(&info->lock,flags);
+ spin_lock_irqsave(&info->lock, flags);
rx_stop(info);
tx_stop(info);
@@ -1396,7 +1393,7 @@ static void mgslpc_program_hw(MGSLPC_INFO *info, struct tty_struct *tty)
if (info->netcount || (tty && (tty->termios.c_cflag & CREAD)))
rx_start(info);
- spin_unlock_irqrestore(&info->lock,flags);
+ spin_unlock_irqrestore(&info->lock, flags);
}
/* Reconfigure adapter based on new parameters
@@ -1411,16 +1408,16 @@ static void mgslpc_change_params(MGSLPC_INFO *info, struct tty_struct *tty)
if (debug_level >= DEBUG_LEVEL_INFO)
printk("%s(%d):mgslpc_change_params(%s)\n",
- __FILE__,__LINE__, info->device_name );
+ __FILE__, __LINE__, info->device_name);
cflag = tty->termios.c_cflag;
- /* if B0 rate (hangup) specified then negate DTR and RTS */
- /* otherwise assert DTR and RTS */
- if (cflag & CBAUD)
- info->serial_signals |= SerialSignal_RTS + SerialSignal_DTR;
+ /* if B0 rate (hangup) specified then negate RTS and DTR */
+ /* otherwise assert RTS and DTR */
+ if (cflag & CBAUD)
+ info->serial_signals |= SerialSignal_RTS | SerialSignal_DTR;
else
- info->serial_signals &= ~(SerialSignal_RTS + SerialSignal_DTR);
+ info->serial_signals &= ~(SerialSignal_RTS | SerialSignal_DTR);
/* byte size and parity */
@@ -1463,7 +1460,7 @@ static void mgslpc_change_params(MGSLPC_INFO *info, struct tty_struct *tty)
info->params.data_rate = tty_get_baud_rate(tty);
}
- if ( info->params.data_rate ) {
+ if (info->params.data_rate) {
info->timeout = (32*HZ*bits_per_char) /
info->params.data_rate;
}
@@ -1498,8 +1495,8 @@ static int mgslpc_put_char(struct tty_struct *tty, unsigned char ch)
unsigned long flags;
if (debug_level >= DEBUG_LEVEL_INFO) {
- printk( "%s(%d):mgslpc_put_char(%d) on %s\n",
- __FILE__,__LINE__,ch,info->device_name);
+ printk("%s(%d):mgslpc_put_char(%d) on %s\n",
+ __FILE__, __LINE__, ch, info->device_name);
}
if (mgslpc_paranoia_check(info, tty->name, "mgslpc_put_char"))
@@ -1508,7 +1505,7 @@ static int mgslpc_put_char(struct tty_struct *tty, unsigned char ch)
if (!info->tx_buf)
return 0;
- spin_lock_irqsave(&info->lock,flags);
+ spin_lock_irqsave(&info->lock, flags);
if (info->params.mode == MGSL_MODE_ASYNC || !info->tx_active) {
if (info->tx_count < TXBUFSIZE - 1) {
@@ -1518,7 +1515,7 @@ static int mgslpc_put_char(struct tty_struct *tty, unsigned char ch)
}
}
- spin_unlock_irqrestore(&info->lock,flags);
+ spin_unlock_irqrestore(&info->lock, flags);
return 1;
}
@@ -1531,8 +1528,8 @@ static void mgslpc_flush_chars(struct tty_struct *tty)
unsigned long flags;
if (debug_level >= DEBUG_LEVEL_INFO)
- printk( "%s(%d):mgslpc_flush_chars() entry on %s tx_count=%d\n",
- __FILE__,__LINE__,info->device_name,info->tx_count);
+ printk("%s(%d):mgslpc_flush_chars() entry on %s tx_count=%d\n",
+ __FILE__, __LINE__, info->device_name, info->tx_count);
if (mgslpc_paranoia_check(info, tty->name, "mgslpc_flush_chars"))
return;
@@ -1542,13 +1539,13 @@ static void mgslpc_flush_chars(struct tty_struct *tty)
return;
if (debug_level >= DEBUG_LEVEL_INFO)
- printk( "%s(%d):mgslpc_flush_chars() entry on %s starting transmitter\n",
- __FILE__,__LINE__,info->device_name);
+ printk("%s(%d):mgslpc_flush_chars() entry on %s starting transmitter\n",
+ __FILE__, __LINE__, info->device_name);
- spin_lock_irqsave(&info->lock,flags);
+ spin_lock_irqsave(&info->lock, flags);
if (!info->tx_active)
- tx_start(info, tty);
- spin_unlock_irqrestore(&info->lock,flags);
+ tx_start(info, tty);
+ spin_unlock_irqrestore(&info->lock, flags);
}
/* Send a block of data
@@ -1569,8 +1566,8 @@ static int mgslpc_write(struct tty_struct * tty,
unsigned long flags;
if (debug_level >= DEBUG_LEVEL_INFO)
- printk( "%s(%d):mgslpc_write(%s) count=%d\n",
- __FILE__,__LINE__,info->device_name,count);
+ printk("%s(%d):mgslpc_write(%s) count=%d\n",
+ __FILE__, __LINE__, info->device_name, count);
if (mgslpc_paranoia_check(info, tty->name, "mgslpc_write") ||
!info->tx_buf)
@@ -1596,26 +1593,26 @@ static int mgslpc_write(struct tty_struct * tty,
memcpy(info->tx_buf + info->tx_put, buf, c);
- spin_lock_irqsave(&info->lock,flags);
+ spin_lock_irqsave(&info->lock, flags);
info->tx_put = (info->tx_put + c) & (TXBUFSIZE-1);
info->tx_count += c;
- spin_unlock_irqrestore(&info->lock,flags);
+ spin_unlock_irqrestore(&info->lock, flags);
buf += c;
count -= c;
ret += c;
}
start:
- if (info->tx_count && !tty->stopped && !tty->hw_stopped) {
- spin_lock_irqsave(&info->lock,flags);
+ if (info->tx_count && !tty->stopped && !tty->hw_stopped) {
+ spin_lock_irqsave(&info->lock, flags);
if (!info->tx_active)
- tx_start(info, tty);
- spin_unlock_irqrestore(&info->lock,flags);
- }
+ tx_start(info, tty);
+ spin_unlock_irqrestore(&info->lock, flags);
+ }
cleanup:
if (debug_level >= DEBUG_LEVEL_INFO)
- printk( "%s(%d):mgslpc_write(%s) returning=%d\n",
- __FILE__,__LINE__,info->device_name,ret);
+ printk("%s(%d):mgslpc_write(%s) returning=%d\n",
+ __FILE__, __LINE__, info->device_name, ret);
return ret;
}
@@ -1643,7 +1640,7 @@ static int mgslpc_write_room(struct tty_struct *tty)
if (debug_level >= DEBUG_LEVEL_INFO)
printk("%s(%d):mgslpc_write_room(%s)=%d\n",
- __FILE__,__LINE__, info->device_name, ret);
+ __FILE__, __LINE__, info->device_name, ret);
return ret;
}
@@ -1656,7 +1653,7 @@ static int mgslpc_chars_in_buffer(struct tty_struct *tty)
if (debug_level >= DEBUG_LEVEL_INFO)
printk("%s(%d):mgslpc_chars_in_buffer(%s)\n",
- __FILE__,__LINE__, info->device_name );
+ __FILE__, __LINE__, info->device_name);
if (mgslpc_paranoia_check(info, tty->name, "mgslpc_chars_in_buffer"))
return 0;
@@ -1668,7 +1665,7 @@ static int mgslpc_chars_in_buffer(struct tty_struct *tty)
if (debug_level >= DEBUG_LEVEL_INFO)
printk("%s(%d):mgslpc_chars_in_buffer(%s)=%d\n",
- __FILE__,__LINE__, info->device_name, rc);
+ __FILE__, __LINE__, info->device_name, rc);
return rc;
}
@@ -1682,15 +1679,15 @@ static void mgslpc_flush_buffer(struct tty_struct *tty)
if (debug_level >= DEBUG_LEVEL_INFO)
printk("%s(%d):mgslpc_flush_buffer(%s) entry\n",
- __FILE__,__LINE__, info->device_name );
+ __FILE__, __LINE__, info->device_name);
if (mgslpc_paranoia_check(info, tty->name, "mgslpc_flush_buffer"))
return;
- spin_lock_irqsave(&info->lock,flags);
+ spin_lock_irqsave(&info->lock, flags);
info->tx_count = info->tx_put = info->tx_get = 0;
del_timer(&info->tx_timer);
- spin_unlock_irqrestore(&info->lock,flags);
+ spin_unlock_irqrestore(&info->lock, flags);
wake_up_interruptible(&tty->write_wait);
tty_wakeup(tty);
@@ -1705,17 +1702,17 @@ static void mgslpc_send_xchar(struct tty_struct *tty, char ch)
if (debug_level >= DEBUG_LEVEL_INFO)
printk("%s(%d):mgslpc_send_xchar(%s,%d)\n",
- __FILE__,__LINE__, info->device_name, ch );
+ __FILE__, __LINE__, info->device_name, ch);
if (mgslpc_paranoia_check(info, tty->name, "mgslpc_send_xchar"))
return;
info->x_char = ch;
if (ch) {
- spin_lock_irqsave(&info->lock,flags);
+ spin_lock_irqsave(&info->lock, flags);
if (!info->tx_enabled)
- tx_start(info, tty);
- spin_unlock_irqrestore(&info->lock,flags);
+ tx_start(info, tty);
+ spin_unlock_irqrestore(&info->lock, flags);
}
}
@@ -1728,7 +1725,7 @@ static void mgslpc_throttle(struct tty_struct * tty)
if (debug_level >= DEBUG_LEVEL_INFO)
printk("%s(%d):mgslpc_throttle(%s) entry\n",
- __FILE__,__LINE__, info->device_name );
+ __FILE__, __LINE__, info->device_name);
if (mgslpc_paranoia_check(info, tty->name, "mgslpc_throttle"))
return;
@@ -1736,11 +1733,11 @@ static void mgslpc_throttle(struct tty_struct * tty)
if (I_IXOFF(tty))
mgslpc_send_xchar(tty, STOP_CHAR(tty));
- if (tty->termios.c_cflag & CRTSCTS) {
- spin_lock_irqsave(&info->lock,flags);
+ if (tty->termios.c_cflag & CRTSCTS) {
+ spin_lock_irqsave(&info->lock, flags);
info->serial_signals &= ~SerialSignal_RTS;
- set_signals(info);
- spin_unlock_irqrestore(&info->lock,flags);
+ set_signals(info);
+ spin_unlock_irqrestore(&info->lock, flags);
}
}
@@ -1753,7 +1750,7 @@ static void mgslpc_unthrottle(struct tty_struct * tty)
if (debug_level >= DEBUG_LEVEL_INFO)
printk("%s(%d):mgslpc_unthrottle(%s) entry\n",
- __FILE__,__LINE__, info->device_name );
+ __FILE__, __LINE__, info->device_name);
if (mgslpc_paranoia_check(info, tty->name, "mgslpc_unthrottle"))
return;
@@ -1765,11 +1762,11 @@ static void mgslpc_unthrottle(struct tty_struct * tty)
mgslpc_send_xchar(tty, START_CHAR(tty));
}
- if (tty->termios.c_cflag & CRTSCTS) {
- spin_lock_irqsave(&info->lock,flags);
+ if (tty->termios.c_cflag & CRTSCTS) {
+ spin_lock_irqsave(&info->lock, flags);
info->serial_signals |= SerialSignal_RTS;
- set_signals(info);
- spin_unlock_irqrestore(&info->lock,flags);
+ set_signals(info);
+ spin_unlock_irqrestore(&info->lock, flags);
}
}
@@ -1807,33 +1804,33 @@ static int get_params(MGSLPC_INFO * info, MGSL_PARAMS __user *user_params)
*
* Arguments:
*
- * info pointer to device instance data
- * new_params user buffer containing new serial params
+ * info pointer to device instance data
+ * new_params user buffer containing new serial params
*
* Returns: 0 if success, otherwise error code
*/
static int set_params(MGSLPC_INFO * info, MGSL_PARAMS __user *new_params, struct tty_struct *tty)
{
- unsigned long flags;
+ unsigned long flags;
MGSL_PARAMS tmp_params;
int err;
if (debug_level >= DEBUG_LEVEL_INFO)
printk("%s(%d):set_params %s\n", __FILE__,__LINE__,
- info->device_name );
+ info->device_name);
COPY_FROM_USER(err,&tmp_params, new_params, sizeof(MGSL_PARAMS));
if (err) {
- if ( debug_level >= DEBUG_LEVEL_INFO )
- printk( "%s(%d):set_params(%s) user buffer copy failed\n",
- __FILE__,__LINE__,info->device_name);
+ if (debug_level >= DEBUG_LEVEL_INFO)
+ printk("%s(%d):set_params(%s) user buffer copy failed\n",
+ __FILE__, __LINE__, info->device_name);
return -EFAULT;
}
- spin_lock_irqsave(&info->lock,flags);
+ spin_lock_irqsave(&info->lock, flags);
memcpy(&info->params,&tmp_params,sizeof(MGSL_PARAMS));
- spin_unlock_irqrestore(&info->lock,flags);
+ spin_unlock_irqrestore(&info->lock, flags);
- mgslpc_change_params(info, tty);
+ mgslpc_change_params(info, tty);
return 0;
}
@@ -1851,13 +1848,13 @@ static int get_txidle(MGSLPC_INFO * info, int __user *idle_mode)
static int set_txidle(MGSLPC_INFO * info, int idle_mode)
{
- unsigned long flags;
+ unsigned long flags;
if (debug_level >= DEBUG_LEVEL_INFO)
printk("set_txidle(%s,%d)\n", info->device_name, idle_mode);
- spin_lock_irqsave(&info->lock,flags);
+ spin_lock_irqsave(&info->lock, flags);
info->idle_mode = idle_mode;
tx_set_idle(info);
- spin_unlock_irqrestore(&info->lock,flags);
+ spin_unlock_irqrestore(&info->lock, flags);
return 0;
}
@@ -1874,11 +1871,11 @@ static int get_interface(MGSLPC_INFO * info, int __user *if_mode)
static int set_interface(MGSLPC_INFO * info, int if_mode)
{
- unsigned long flags;
+ unsigned long flags;
unsigned char val;
if (debug_level >= DEBUG_LEVEL_INFO)
printk("set_interface(%s,%d)\n", info->device_name, if_mode);
- spin_lock_irqsave(&info->lock,flags);
+ spin_lock_irqsave(&info->lock, flags);
info->if_mode = if_mode;
val = read_reg(info, PVR) & 0x0f;
@@ -1890,18 +1887,18 @@ static int set_interface(MGSLPC_INFO * info, int if_mode)
}
write_reg(info, PVR, val);
- spin_unlock_irqrestore(&info->lock,flags);
+ spin_unlock_irqrestore(&info->lock, flags);
return 0;
}
static int set_txenable(MGSLPC_INFO * info, int enable, struct tty_struct *tty)
{
- unsigned long flags;
+ unsigned long flags;
if (debug_level >= DEBUG_LEVEL_INFO)
printk("set_txenable(%s,%d)\n", info->device_name, enable);
- spin_lock_irqsave(&info->lock,flags);
+ spin_lock_irqsave(&info->lock, flags);
if (enable) {
if (!info->tx_enabled)
tx_start(info, tty);
@@ -1909,18 +1906,18 @@ static int set_txenable(MGSLPC_INFO * info, int enable, struct tty_struct *tty)
if (info->tx_enabled)
tx_stop(info);
}
- spin_unlock_irqrestore(&info->lock,flags);
+ spin_unlock_irqrestore(&info->lock, flags);
return 0;
}
static int tx_abort(MGSLPC_INFO * info)
{
- unsigned long flags;
+ unsigned long flags;
if (debug_level >= DEBUG_LEVEL_INFO)
printk("tx_abort(%s)\n", info->device_name);
- spin_lock_irqsave(&info->lock,flags);
+ spin_lock_irqsave(&info->lock, flags);
if (info->tx_active && info->tx_count &&
info->params.mode == MGSL_MODE_HDLC) {
/* clear data count so FIFO is not filled on next IRQ.
@@ -1929,18 +1926,18 @@ static int tx_abort(MGSLPC_INFO * info)
info->tx_count = info->tx_put = info->tx_get = 0;
info->tx_aborting = true;
}
- spin_unlock_irqrestore(&info->lock,flags);
+ spin_unlock_irqrestore(&info->lock, flags);
return 0;
}
static int set_rxenable(MGSLPC_INFO * info, int enable)
{
- unsigned long flags;
+ unsigned long flags;
if (debug_level >= DEBUG_LEVEL_INFO)
printk("set_rxenable(%s,%d)\n", info->device_name, enable);
- spin_lock_irqsave(&info->lock,flags);
+ spin_lock_irqsave(&info->lock, flags);
if (enable) {
if (!info->rx_enabled)
rx_start(info);
@@ -1948,21 +1945,21 @@ static int set_rxenable(MGSLPC_INFO * info, int enable)
if (info->rx_enabled)
rx_stop(info);
}
- spin_unlock_irqrestore(&info->lock,flags);
+ spin_unlock_irqrestore(&info->lock, flags);
return 0;
}
/* wait for specified event to occur
*
- * Arguments: info pointer to device instance data
- * mask pointer to bitmask of events to wait for
- * Return Value: 0 if successful and bit mask updated with
+ * Arguments: info pointer to device instance data
+ * mask pointer to bitmask of events to wait for
+ * Return Value: 0 if successful and bit mask updated with
* of events triggerred,
- * otherwise error code
+ * otherwise error code
*/
static int wait_events(MGSLPC_INFO * info, int __user *mask_ptr)
{
- unsigned long flags;
+ unsigned long flags;
int s;
int rc=0;
struct mgsl_icount cprev, cnow;
@@ -1978,18 +1975,18 @@ static int wait_events(MGSLPC_INFO * info, int __user *mask_ptr)
if (debug_level >= DEBUG_LEVEL_INFO)
printk("wait_events(%s,%d)\n", info->device_name, mask);
- spin_lock_irqsave(&info->lock,flags);
+ spin_lock_irqsave(&info->lock, flags);
/* return immediately if state matches requested events */
get_signals(info);
s = info->serial_signals;
events = mask &
( ((s & SerialSignal_DSR) ? MgslEvent_DsrActive:MgslEvent_DsrInactive) +
- ((s & SerialSignal_DCD) ? MgslEvent_DcdActive:MgslEvent_DcdInactive) +
+ ((s & SerialSignal_DCD) ? MgslEvent_DcdActive:MgslEvent_DcdInactive) +
((s & SerialSignal_CTS) ? MgslEvent_CtsActive:MgslEvent_CtsInactive) +
((s & SerialSignal_RI) ? MgslEvent_RiActive :MgslEvent_RiInactive) );
if (events) {
- spin_unlock_irqrestore(&info->lock,flags);
+ spin_unlock_irqrestore(&info->lock, flags);
goto exit;
}
@@ -2004,7 +2001,7 @@ static int wait_events(MGSLPC_INFO * info, int __user *mask_ptr)
set_current_state(TASK_INTERRUPTIBLE);
add_wait_queue(&info->event_wait_q, &wait);
- spin_unlock_irqrestore(&info->lock,flags);
+ spin_unlock_irqrestore(&info->lock, flags);
for(;;) {
@@ -2015,11 +2012,11 @@ static int wait_events(MGSLPC_INFO * info, int __user *mask_ptr)
}
/* get current irq counts */
- spin_lock_irqsave(&info->lock,flags);
+ spin_lock_irqsave(&info->lock, flags);
cnow = info->icount;
newsigs = info->input_signal_events;
set_current_state(TASK_INTERRUPTIBLE);
- spin_unlock_irqrestore(&info->lock,flags);
+ spin_unlock_irqrestore(&info->lock, flags);
/* if no change, wait aborted for some reason */
if (newsigs.dsr_up == oldsigs.dsr_up &&
@@ -2058,10 +2055,10 @@ static int wait_events(MGSLPC_INFO * info, int __user *mask_ptr)
set_current_state(TASK_RUNNING);
if (mask & MgslEvent_ExitHuntMode) {
- spin_lock_irqsave(&info->lock,flags);
+ spin_lock_irqsave(&info->lock, flags);
if (!waitqueue_active(&info->event_wait_q))
irq_disable(info, CHA, IRQ_EXITHUNT);
- spin_unlock_irqrestore(&info->lock,flags);
+ spin_unlock_irqrestore(&info->lock, flags);
}
exit:
if (rc == 0)
@@ -2071,17 +2068,17 @@ exit:
static int modem_input_wait(MGSLPC_INFO *info,int arg)
{
- unsigned long flags;
+ unsigned long flags;
int rc;
struct mgsl_icount cprev, cnow;
DECLARE_WAITQUEUE(wait, current);
/* save current irq counts */
- spin_lock_irqsave(&info->lock,flags);
+ spin_lock_irqsave(&info->lock, flags);
cprev = info->icount;
add_wait_queue(&info->status_event_wait_q, &wait);
set_current_state(TASK_INTERRUPTIBLE);
- spin_unlock_irqrestore(&info->lock,flags);
+ spin_unlock_irqrestore(&info->lock, flags);
for(;;) {
schedule();
@@ -2091,10 +2088,10 @@ static int modem_input_wait(MGSLPC_INFO *info,int arg)
}
/* get new irq counts */
- spin_lock_irqsave(&info->lock,flags);
+ spin_lock_irqsave(&info->lock, flags);
cnow = info->icount;
set_current_state(TASK_INTERRUPTIBLE);
- spin_unlock_irqrestore(&info->lock,flags);
+ spin_unlock_irqrestore(&info->lock, flags);
/* if no change, wait aborted for some reason */
if (cnow.rng == cprev.rng && cnow.dsr == cprev.dsr &&
@@ -2125,11 +2122,11 @@ static int tiocmget(struct tty_struct *tty)
{
MGSLPC_INFO *info = (MGSLPC_INFO *)tty->driver_data;
unsigned int result;
- unsigned long flags;
+ unsigned long flags;
- spin_lock_irqsave(&info->lock,flags);
- get_signals(info);
- spin_unlock_irqrestore(&info->lock,flags);
+ spin_lock_irqsave(&info->lock, flags);
+ get_signals(info);
+ spin_unlock_irqrestore(&info->lock, flags);
result = ((info->serial_signals & SerialSignal_RTS) ? TIOCM_RTS:0) +
((info->serial_signals & SerialSignal_DTR) ? TIOCM_DTR:0) +
@@ -2140,7 +2137,7 @@ static int tiocmget(struct tty_struct *tty)
if (debug_level >= DEBUG_LEVEL_INFO)
printk("%s(%d):%s tiocmget() value=%08X\n",
- __FILE__,__LINE__, info->device_name, result );
+ __FILE__, __LINE__, info->device_name, result);
return result;
}
@@ -2150,11 +2147,11 @@ static int tiocmset(struct tty_struct *tty,
unsigned int set, unsigned int clear)
{
MGSLPC_INFO *info = (MGSLPC_INFO *)tty->driver_data;
- unsigned long flags;
+ unsigned long flags;
if (debug_level >= DEBUG_LEVEL_INFO)
printk("%s(%d):%s tiocmset(%x,%x)\n",
- __FILE__,__LINE__,info->device_name, set, clear);
+ __FILE__, __LINE__, info->device_name, set, clear);
if (set & TIOCM_RTS)
info->serial_signals |= SerialSignal_RTS;
@@ -2165,9 +2162,9 @@ static int tiocmset(struct tty_struct *tty,
if (clear & TIOCM_DTR)
info->serial_signals &= ~SerialSignal_DTR;
- spin_lock_irqsave(&info->lock,flags);
- set_signals(info);
- spin_unlock_irqrestore(&info->lock,flags);
+ spin_lock_irqsave(&info->lock, flags);
+ set_signals(info);
+ spin_unlock_irqrestore(&info->lock, flags);
return 0;
}
@@ -2184,17 +2181,17 @@ static int mgslpc_break(struct tty_struct *tty, int break_state)
if (debug_level >= DEBUG_LEVEL_INFO)
printk("%s(%d):mgslpc_break(%s,%d)\n",
- __FILE__,__LINE__, info->device_name, break_state);
+ __FILE__, __LINE__, info->device_name, break_state);
if (mgslpc_paranoia_check(info, tty->name, "mgslpc_break"))
return -EINVAL;
- spin_lock_irqsave(&info->lock,flags);
- if (break_state == -1)
+ spin_lock_irqsave(&info->lock, flags);
+ if (break_state == -1)
set_reg_bits(info, CHA+DAFO, BIT6);
else
clear_reg_bits(info, CHA+DAFO, BIT6);
- spin_unlock_irqrestore(&info->lock,flags);
+ spin_unlock_irqrestore(&info->lock, flags);
return 0;
}
@@ -2205,9 +2202,9 @@ static int mgslpc_get_icount(struct tty_struct *tty,
struct mgsl_icount cnow; /* kernel counter temps */
unsigned long flags;
- spin_lock_irqsave(&info->lock,flags);
+ spin_lock_irqsave(&info->lock, flags);
cnow = info->icount;
- spin_unlock_irqrestore(&info->lock,flags);
+ spin_unlock_irqrestore(&info->lock, flags);
icount->cts = cnow.cts;
icount->dsr = cnow.dsr;
@@ -2228,9 +2225,9 @@ static int mgslpc_get_icount(struct tty_struct *tty,
*
* Arguments:
*
- * tty pointer to tty instance data
- * cmd IOCTL command code
- * arg command argument/context
+ * tty pointer to tty instance data
+ * cmd IOCTL command code
+ * arg command argument/context
*
* Return Value: 0 if success, otherwise error code
*/
@@ -2241,8 +2238,8 @@ static int mgslpc_ioctl(struct tty_struct *tty,
void __user *argp = (void __user *)arg;
if (debug_level >= DEBUG_LEVEL_INFO)
- printk("%s(%d):mgslpc_ioctl %s cmd=%08X\n", __FILE__,__LINE__,
- info->device_name, cmd );
+ printk("%s(%d):mgslpc_ioctl %s cmd=%08X\n", __FILE__, __LINE__,
+ info->device_name, cmd);
if (mgslpc_paranoia_check(info, tty->name, "mgslpc_ioctl"))
return -ENODEV;
@@ -2288,8 +2285,8 @@ static int mgslpc_ioctl(struct tty_struct *tty,
*
* Arguments:
*
- * tty pointer to tty structure
- * termios pointer to buffer to hold returned old termios
+ * tty pointer to tty structure
+ * termios pointer to buffer to hold returned old termios
*/
static void mgslpc_set_termios(struct tty_struct *tty, struct ktermios *old_termios)
{
@@ -2297,8 +2294,8 @@ static void mgslpc_set_termios(struct tty_struct *tty, struct ktermios *old_term
unsigned long flags;
if (debug_level >= DEBUG_LEVEL_INFO)
- printk("%s(%d):mgslpc_set_termios %s\n", __FILE__,__LINE__,
- tty->driver->name );
+ printk("%s(%d):mgslpc_set_termios %s\n", __FILE__, __LINE__,
+ tty->driver->name);
/* just return if nothing has changed */
if ((tty->termios.c_cflag == old_termios->c_cflag)
@@ -2311,23 +2308,23 @@ static void mgslpc_set_termios(struct tty_struct *tty, struct ktermios *old_term
/* Handle transition to B0 status */
if (old_termios->c_cflag & CBAUD &&
!(tty->termios.c_cflag & CBAUD)) {
- info->serial_signals &= ~(SerialSignal_RTS + SerialSignal_DTR);
- spin_lock_irqsave(&info->lock,flags);
- set_signals(info);
- spin_unlock_irqrestore(&info->lock,flags);
+ info->serial_signals &= ~(SerialSignal_RTS | SerialSignal_DTR);
+ spin_lock_irqsave(&info->lock, flags);
+ set_signals(info);
+ spin_unlock_irqrestore(&info->lock, flags);
}
/* Handle transition away from B0 status */
if (!(old_termios->c_cflag & CBAUD) &&
tty->termios.c_cflag & CBAUD) {
info->serial_signals |= SerialSignal_DTR;
- if (!(tty->termios.c_cflag & CRTSCTS) ||
- !test_bit(TTY_THROTTLED, &tty->flags)) {
+ if (!(tty->termios.c_cflag & CRTSCTS) ||
+ !test_bit(TTY_THROTTLED, &tty->flags)) {
info->serial_signals |= SerialSignal_RTS;
- }
- spin_lock_irqsave(&info->lock,flags);
- set_signals(info);
- spin_unlock_irqrestore(&info->lock,flags);
+ }
+ spin_lock_irqsave(&info->lock, flags);
+ set_signals(info);
+ spin_unlock_irqrestore(&info->lock, flags);
}
/* Handle turning off CRTSCTS */
@@ -2348,15 +2345,15 @@ static void mgslpc_close(struct tty_struct *tty, struct file * filp)
if (debug_level >= DEBUG_LEVEL_INFO)
printk("%s(%d):mgslpc_close(%s) entry, count=%d\n",
- __FILE__,__LINE__, info->device_name, port->count);
+ __FILE__, __LINE__, info->device_name, port->count);
WARN_ON(!port->count);
if (tty_port_close_start(port, tty, filp) == 0)
goto cleanup;
- if (port->flags & ASYNC_INITIALIZED)
- mgslpc_wait_until_sent(tty, info->timeout);
+ if (port->flags & ASYNC_INITIALIZED)
+ mgslpc_wait_until_sent(tty, info->timeout);
mgslpc_flush_buffer(tty);
@@ -2367,7 +2364,7 @@ static void mgslpc_close(struct tty_struct *tty, struct file * filp)
tty_port_tty_set(port, NULL);
cleanup:
if (debug_level >= DEBUG_LEVEL_INFO)
- printk("%s(%d):mgslpc_close(%s) exit, count=%d\n", __FILE__,__LINE__,
+ printk("%s(%d):mgslpc_close(%s) exit, count=%d\n", __FILE__, __LINE__,
tty->driver->name, port->count);
}
@@ -2378,12 +2375,12 @@ static void mgslpc_wait_until_sent(struct tty_struct *tty, int timeout)
MGSLPC_INFO * info = (MGSLPC_INFO *)tty->driver_data;
unsigned long orig_jiffies, char_time;
- if (!info )
+ if (!info)
return;
if (debug_level >= DEBUG_LEVEL_INFO)
printk("%s(%d):mgslpc_wait_until_sent(%s) entry\n",
- __FILE__,__LINE__, info->device_name );
+ __FILE__, __LINE__, info->device_name);
if (mgslpc_paranoia_check(info, tty->name, "mgslpc_wait_until_sent"))
return;
@@ -2399,8 +2396,8 @@ static void mgslpc_wait_until_sent(struct tty_struct *tty, int timeout)
* Note: use tight timings here to satisfy the NIST-PCTS.
*/
- if ( info->params.data_rate ) {
- char_time = info->timeout/(32 * 5);
+ if (info->params.data_rate) {
+ char_time = info->timeout/(32 * 5);
if (!char_time)
char_time++;
} else
@@ -2431,7 +2428,7 @@ static void mgslpc_wait_until_sent(struct tty_struct *tty, int timeout)
exit:
if (debug_level >= DEBUG_LEVEL_INFO)
printk("%s(%d):mgslpc_wait_until_sent(%s) exit\n",
- __FILE__,__LINE__, info->device_name );
+ __FILE__, __LINE__, info->device_name);
}
/* Called by tty_hangup() when a hangup is signaled.
@@ -2443,7 +2440,7 @@ static void mgslpc_hangup(struct tty_struct *tty)
if (debug_level >= DEBUG_LEVEL_INFO)
printk("%s(%d):mgslpc_hangup(%s)\n",
- __FILE__,__LINE__, info->device_name );
+ __FILE__, __LINE__, info->device_name);
if (mgslpc_paranoia_check(info, tty->name, "mgslpc_hangup"))
return;
@@ -2458,9 +2455,9 @@ static int carrier_raised(struct tty_port *port)
MGSLPC_INFO *info = container_of(port, MGSLPC_INFO, port);
unsigned long flags;
- spin_lock_irqsave(&info->lock,flags);
- get_signals(info);
- spin_unlock_irqrestore(&info->lock,flags);
+ spin_lock_irqsave(&info->lock, flags);
+ get_signals(info);
+ spin_unlock_irqrestore(&info->lock, flags);
if (info->serial_signals & SerialSignal_DCD)
return 1;
@@ -2472,13 +2469,13 @@ static void dtr_rts(struct tty_port *port, int onoff)
MGSLPC_INFO *info = container_of(port, MGSLPC_INFO, port);
unsigned long flags;
- spin_lock_irqsave(&info->lock,flags);
+ spin_lock_irqsave(&info->lock, flags);
if (onoff)
- info->serial_signals |= SerialSignal_RTS + SerialSignal_DTR;
+ info->serial_signals |= SerialSignal_RTS | SerialSignal_DTR;
else
- info->serial_signals &= ~SerialSignal_RTS + SerialSignal_DTR;
+ info->serial_signals &= ~(SerialSignal_RTS | SerialSignal_DTR);
set_signals(info);
- spin_unlock_irqrestore(&info->lock,flags);
+ spin_unlock_irqrestore(&info->lock, flags);
}
@@ -2486,14 +2483,14 @@ static int mgslpc_open(struct tty_struct *tty, struct file * filp)
{
MGSLPC_INFO *info;
struct tty_port *port;
- int retval, line;
- unsigned long flags;
+ int retval, line;
+ unsigned long flags;
/* verify range of specified line number */
line = tty->index;
if (line >= mgslpc_device_count) {
printk("%s(%d):mgslpc_open with invalid line #%d.\n",
- __FILE__,__LINE__,line);
+ __FILE__, __LINE__, line);
return -ENODEV;
}
@@ -2510,7 +2507,7 @@ static int mgslpc_open(struct tty_struct *tty, struct file * filp)
if (debug_level >= DEBUG_LEVEL_INFO)
printk("%s(%d):mgslpc_open(%s), old ref count = %d\n",
- __FILE__,__LINE__,tty->driver->name, port->count);
+ __FILE__, __LINE__, tty->driver->name, port->count);
/* If port is closing, signal caller to try again */
if (tty_hung_up_p(filp) || port->flags & ASYNC_CLOSING){
@@ -2521,7 +2518,7 @@ static int mgslpc_open(struct tty_struct *tty, struct file * filp)
goto cleanup;
}
- tty->low_latency = (port->flags & ASYNC_LOW_LATENCY) ? 1 : 0;
+ port->low_latency = (port->flags & ASYNC_LOW_LATENCY) ? 1 : 0;
spin_lock_irqsave(&info->netlock, flags);
if (info->netcount) {
@@ -2545,13 +2542,13 @@ static int mgslpc_open(struct tty_struct *tty, struct file * filp)
if (retval) {
if (debug_level >= DEBUG_LEVEL_INFO)
printk("%s(%d):block_til_ready(%s) returned %d\n",
- __FILE__,__LINE__, info->device_name, retval);
+ __FILE__, __LINE__, info->device_name, retval);
goto cleanup;
}
if (debug_level >= DEBUG_LEVEL_INFO)
printk("%s(%d):mgslpc_open(%s) success\n",
- __FILE__,__LINE__, info->device_name);
+ __FILE__, __LINE__, info->device_name);
retval = 0;
cleanup:
@@ -2571,9 +2568,9 @@ static inline void line_info(struct seq_file *m, MGSLPC_INFO *info)
info->device_name, info->io_base, info->irq_level);
/* output current serial signal states */
- spin_lock_irqsave(&info->lock,flags);
- get_signals(info);
- spin_unlock_irqrestore(&info->lock,flags);
+ spin_lock_irqsave(&info->lock, flags);
+ get_signals(info);
+ spin_unlock_irqrestore(&info->lock, flags);
stat_buf[0] = 0;
stat_buf[1] = 0;
@@ -2635,7 +2632,7 @@ static int mgslpc_proc_show(struct seq_file *m, void *v)
seq_printf(m, "synclink driver:%s\n", driver_version);
info = mgslpc_device_list;
- while( info ) {
+ while (info) {
line_info(m, info);
info = info->next_device;
}
@@ -2674,6 +2671,14 @@ static int rx_alloc_buffers(MGSLPC_INFO *info)
if (info->rx_buf == NULL)
return -ENOMEM;
+ /* unused flag buffer to satisfy receive_buf calling interface */
+ info->flag_buf = kzalloc(info->max_frame_size, GFP_KERNEL);
+ if (!info->flag_buf) {
+ kfree(info->rx_buf);
+ info->rx_buf = NULL;
+ return -ENOMEM;
+ }
+
rx_reset_buffers(info);
return 0;
}
@@ -2682,12 +2687,14 @@ static void rx_free_buffers(MGSLPC_INFO *info)
{
kfree(info->rx_buf);
info->rx_buf = NULL;
+ kfree(info->flag_buf);
+ info->flag_buf = NULL;
}
static int claim_resources(MGSLPC_INFO *info)
{
- if (rx_alloc_buffers(info) < 0 ) {
- printk( "Can't allocate rx buffer %s\n", info->device_name);
+ if (rx_alloc_buffers(info) < 0) {
+ printk("Can't allocate rx buffer %s\n", info->device_name);
release_resources(info);
return -ENODEV;
}
@@ -2706,8 +2713,12 @@ static void release_resources(MGSLPC_INFO *info)
*
* Arguments: info pointer to device instance data
*/
-static void mgslpc_add_device(MGSLPC_INFO *info)
+static int mgslpc_add_device(MGSLPC_INFO *info)
{
+ MGSLPC_INFO *current_dev = NULL;
+ struct device *tty_dev;
+ int ret;
+
info->next_device = NULL;
info->line = mgslpc_device_count;
sprintf(info->device_name,"ttySLP%d",info->line);
@@ -2722,8 +2733,8 @@ static void mgslpc_add_device(MGSLPC_INFO *info)
if (!mgslpc_device_list)
mgslpc_device_list = info;
else {
- MGSLPC_INFO *current_dev = mgslpc_device_list;
- while( current_dev->next_device )
+ current_dev = mgslpc_device_list;
+ while (current_dev->next_device)
current_dev = current_dev->next_device;
current_dev->next_device = info;
}
@@ -2733,14 +2744,34 @@ static void mgslpc_add_device(MGSLPC_INFO *info)
else if (info->max_frame_size > 65535)
info->max_frame_size = 65535;
- printk( "SyncLink PC Card %s:IO=%04X IRQ=%d\n",
+ printk("SyncLink PC Card %s:IO=%04X IRQ=%d\n",
info->device_name, info->io_base, info->irq_level);
#if SYNCLINK_GENERIC_HDLC
- hdlcdev_init(info);
+ ret = hdlcdev_init(info);
+ if (ret != 0)
+ goto failed;
#endif
- tty_port_register_device(&info->port, serial_driver, info->line,
+
+ tty_dev = tty_port_register_device(&info->port, serial_driver, info->line,
&info->p_dev->dev);
+ if (IS_ERR(tty_dev)) {
+ ret = PTR_ERR(tty_dev);
+#if SYNCLINK_GENERIC_HDLC
+ hdlcdev_exit(info);
+#endif
+ goto failed;
+ }
+
+ return 0;
+
+failed:
+ if (current_dev)
+ current_dev->next_device = NULL;
+ else
+ mgslpc_device_list = NULL;
+ mgslpc_device_count--;
+ return ret;
}
static void mgslpc_remove_device(MGSLPC_INFO *remove_info)
@@ -3262,7 +3293,7 @@ static void rx_stop(MGSLPC_INFO *info)
{
if (debug_level >= DEBUG_LEVEL_ISR)
printk("%s(%d):rx_stop(%s)\n",
- __FILE__,__LINE__, info->device_name );
+ __FILE__, __LINE__, info->device_name);
/* MODE:03 RAC Receiver Active, 0=inactive */
clear_reg_bits(info, CHA + MODE, BIT3);
@@ -3275,7 +3306,7 @@ static void rx_start(MGSLPC_INFO *info)
{
if (debug_level >= DEBUG_LEVEL_ISR)
printk("%s(%d):rx_start(%s)\n",
- __FILE__,__LINE__, info->device_name );
+ __FILE__, __LINE__, info->device_name);
rx_reset_buffers(info);
info->rx_enabled = false;
@@ -3291,7 +3322,7 @@ static void tx_start(MGSLPC_INFO *info, struct tty_struct *tty)
{
if (debug_level >= DEBUG_LEVEL_ISR)
printk("%s(%d):tx_start(%s)\n",
- __FILE__,__LINE__, info->device_name );
+ __FILE__, __LINE__, info->device_name);
if (info->tx_count) {
/* If auto RTS enabled and RTS is inactive, then assert */
@@ -3329,7 +3360,7 @@ static void tx_stop(MGSLPC_INFO *info)
{
if (debug_level >= DEBUG_LEVEL_ISR)
printk("%s(%d):tx_stop(%s)\n",
- __FILE__,__LINE__, info->device_name );
+ __FILE__, __LINE__, info->device_name);
del_timer(&info->tx_timer);
@@ -3575,8 +3606,8 @@ static void get_signals(MGSLPC_INFO *info)
{
unsigned char status = 0;
- /* preserve DTR and RTS */
- info->serial_signals &= SerialSignal_DTR + SerialSignal_RTS;
+ /* preserve RTS and DTR */
+ info->serial_signals &= SerialSignal_RTS | SerialSignal_DTR;
if (read_reg(info, CHB + VSTR) & BIT7)
info->serial_signals |= SerialSignal_DCD;
@@ -3590,7 +3621,7 @@ static void get_signals(MGSLPC_INFO *info)
info->serial_signals |= SerialSignal_DSR;
}
-/* Set the state of DTR and RTS based on contents of
+/* Set the state of RTS and DTR based on contents of
* serial_signals member of device extension.
*/
static void set_signals(MGSLPC_INFO *info)
@@ -3681,7 +3712,7 @@ static bool rx_get_frame(MGSLPC_INFO *info, struct tty_struct *tty)
if (debug_level >= DEBUG_LEVEL_BH)
printk("%s(%d):rx_get_frame(%s) status=%04X size=%d\n",
- __FILE__,__LINE__,info->device_name,status,framesize);
+ __FILE__, __LINE__, info->device_name, status, framesize);
if (debug_level >= DEBUG_LEVEL_DATA)
trace_block(info, buf->data, framesize, 0);
@@ -3709,13 +3740,13 @@ static bool rx_get_frame(MGSLPC_INFO *info, struct tty_struct *tty)
}
}
- spin_lock_irqsave(&info->lock,flags);
+ spin_lock_irqsave(&info->lock, flags);
buf->status = buf->count = 0;
info->rx_frame_count--;
info->rx_get++;
if (info->rx_get >= info->rx_buf_count)
info->rx_get = 0;
- spin_unlock_irqrestore(&info->lock,flags);
+ spin_unlock_irqrestore(&info->lock, flags);
return true;
}
@@ -3729,7 +3760,7 @@ static bool register_test(MGSLPC_INFO *info)
bool rc = true;
unsigned long flags;
- spin_lock_irqsave(&info->lock,flags);
+ spin_lock_irqsave(&info->lock, flags);
reset_device(info);
for (i = 0; i < count; i++) {
@@ -3742,7 +3773,7 @@ static bool register_test(MGSLPC_INFO *info)
}
}
- spin_unlock_irqrestore(&info->lock,flags);
+ spin_unlock_irqrestore(&info->lock, flags);
return rc;
}
@@ -3751,7 +3782,7 @@ static bool irq_test(MGSLPC_INFO *info)
unsigned long end_time;
unsigned long flags;
- spin_lock_irqsave(&info->lock,flags);
+ spin_lock_irqsave(&info->lock, flags);
reset_device(info);
info->testing_irq = true;
@@ -3765,7 +3796,7 @@ static bool irq_test(MGSLPC_INFO *info)
write_reg(info, CHA + TIMR, 0); /* 512 cycles */
issue_command(info, CHA, CMD_START_TIMER);
- spin_unlock_irqrestore(&info->lock,flags);
+ spin_unlock_irqrestore(&info->lock, flags);
end_time=100;
while(end_time-- && !info->irq_occurred) {
@@ -3774,9 +3805,9 @@ static bool irq_test(MGSLPC_INFO *info)
info->testing_irq = false;
- spin_lock_irqsave(&info->lock,flags);
+ spin_lock_irqsave(&info->lock, flags);
reset_device(info);
- spin_unlock_irqrestore(&info->lock,flags);
+ spin_unlock_irqrestore(&info->lock, flags);
return info->irq_occurred;
}
@@ -3785,21 +3816,21 @@ static int adapter_test(MGSLPC_INFO *info)
{
if (!register_test(info)) {
info->init_error = DiagStatus_AddressFailure;
- printk( "%s(%d):Register test failure for device %s Addr=%04X\n",
- __FILE__,__LINE__,info->device_name, (unsigned short)(info->io_base) );
+ printk("%s(%d):Register test failure for device %s Addr=%04X\n",
+ __FILE__, __LINE__, info->device_name, (unsigned short)(info->io_base));
return -ENODEV;
}
if (!irq_test(info)) {
info->init_error = DiagStatus_IrqFailure;
- printk( "%s(%d):Interrupt test failure for device %s IRQ=%d\n",
- __FILE__,__LINE__,info->device_name, (unsigned short)(info->irq_level) );
+ printk("%s(%d):Interrupt test failure for device %s IRQ=%d\n",
+ __FILE__, __LINE__, info->device_name, (unsigned short)(info->irq_level));
return -ENODEV;
}
if (debug_level >= DEBUG_LEVEL_INFO)
printk("%s(%d):device %s passed diagnostics\n",
- __FILE__,__LINE__,info->device_name);
+ __FILE__, __LINE__, info->device_name);
return 0;
}
@@ -3808,9 +3839,9 @@ static void trace_block(MGSLPC_INFO *info,const char* data, int count, int xmit)
int i;
int linecount;
if (xmit)
- printk("%s tx data:\n",info->device_name);
+ printk("%s tx data:\n", info->device_name);
else
- printk("%s rx data:\n",info->device_name);
+ printk("%s rx data:\n", info->device_name);
while(count) {
if (count > 16)
@@ -3819,12 +3850,12 @@ static void trace_block(MGSLPC_INFO *info,const char* data, int count, int xmit)
linecount = count;
for(i=0;i<linecount;i++)
- printk("%02X ",(unsigned char)data[i]);
+ printk("%02X ", (unsigned char)data[i]);
for(;i<17;i++)
printk(" ");
for(i=0;i<linecount;i++) {
if (data[i]>=040 && data[i]<=0176)
- printk("%c",data[i]);
+ printk("%c", data[i]);
else
printk(".");
}
@@ -3843,18 +3874,18 @@ static void tx_timeout(unsigned long context)
MGSLPC_INFO *info = (MGSLPC_INFO*)context;
unsigned long flags;
- if ( debug_level >= DEBUG_LEVEL_INFO )
- printk( "%s(%d):tx_timeout(%s)\n",
- __FILE__,__LINE__,info->device_name);
- if(info->tx_active &&
- info->params.mode == MGSL_MODE_HDLC) {
+ if (debug_level >= DEBUG_LEVEL_INFO)
+ printk("%s(%d):tx_timeout(%s)\n",
+ __FILE__, __LINE__, info->device_name);
+ if (info->tx_active &&
+ info->params.mode == MGSL_MODE_HDLC) {
info->icount.txtimeout++;
}
- spin_lock_irqsave(&info->lock,flags);
+ spin_lock_irqsave(&info->lock, flags);
info->tx_active = false;
info->tx_count = info->tx_put = info->tx_get = 0;
- spin_unlock_irqrestore(&info->lock,flags);
+ spin_unlock_irqrestore(&info->lock, flags);
#if SYNCLINK_GENERIC_HDLC
if (info->netcount)
@@ -3936,7 +3967,7 @@ static netdev_tx_t hdlcdev_xmit(struct sk_buff *skb,
unsigned long flags;
if (debug_level >= DEBUG_LEVEL_INFO)
- printk(KERN_INFO "%s:hdlc_xmit(%s)\n",__FILE__,dev->name);
+ printk(KERN_INFO "%s:hdlc_xmit(%s)\n", __FILE__, dev->name);
/* stop sending until this frame completes */
netif_stop_queue(dev);
@@ -3957,13 +3988,13 @@ static netdev_tx_t hdlcdev_xmit(struct sk_buff *skb,
dev->trans_start = jiffies;
/* start hardware transmitter if necessary */
- spin_lock_irqsave(&info->lock,flags);
+ spin_lock_irqsave(&info->lock, flags);
if (!info->tx_active) {
struct tty_struct *tty = tty_port_tty_get(&info->port);
- tx_start(info, tty);
- tty_kref_put(tty);
+ tx_start(info, tty);
+ tty_kref_put(tty);
}
- spin_unlock_irqrestore(&info->lock,flags);
+ spin_unlock_irqrestore(&info->lock, flags);
return NETDEV_TX_OK;
}
@@ -3984,10 +4015,11 @@ static int hdlcdev_open(struct net_device *dev)
unsigned long flags;
if (debug_level >= DEBUG_LEVEL_INFO)
- printk("%s:hdlcdev_open(%s)\n",__FILE__,dev->name);
+ printk("%s:hdlcdev_open(%s)\n", __FILE__, dev->name);
/* generic HDLC layer open processing */
- if ((rc = hdlc_open(dev)))
+ rc = hdlc_open(dev);
+ if (rc != 0)
return rc;
/* arbitrate between network and tty opens */
@@ -4002,15 +4034,16 @@ static int hdlcdev_open(struct net_device *dev)
tty = tty_port_tty_get(&info->port);
/* claim resources and init adapter */
- if ((rc = startup(info, tty)) != 0) {
+ rc = startup(info, tty);
+ if (rc != 0) {
tty_kref_put(tty);
spin_lock_irqsave(&info->netlock, flags);
info->netcount=0;
spin_unlock_irqrestore(&info->netlock, flags);
return rc;
}
- /* assert DTR and RTS, apply hardware settings */
- info->serial_signals |= SerialSignal_RTS + SerialSignal_DTR;
+ /* assert RTS and DTR, apply hardware settings */
+ info->serial_signals |= SerialSignal_RTS | SerialSignal_DTR;
mgslpc_program_hw(info, tty);
tty_kref_put(tty);
@@ -4044,7 +4077,7 @@ static int hdlcdev_close(struct net_device *dev)
unsigned long flags;
if (debug_level >= DEBUG_LEVEL_INFO)
- printk("%s:hdlcdev_close(%s)\n",__FILE__,dev->name);
+ printk("%s:hdlcdev_close(%s)\n", __FILE__, dev->name);
netif_stop_queue(dev);
@@ -4078,7 +4111,7 @@ static int hdlcdev_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
unsigned int flags;
if (debug_level >= DEBUG_LEVEL_INFO)
- printk("%s:hdlcdev_ioctl(%s)\n",__FILE__,dev->name);
+ printk("%s:hdlcdev_ioctl(%s)\n", __FILE__, dev->name);
/* return error if TTY interface open */
if (info->port.count)
@@ -4179,14 +4212,14 @@ static void hdlcdev_tx_timeout(struct net_device *dev)
unsigned long flags;
if (debug_level >= DEBUG_LEVEL_INFO)
- printk("hdlcdev_tx_timeout(%s)\n",dev->name);
+ printk("hdlcdev_tx_timeout(%s)\n", dev->name);
dev->stats.tx_errors++;
dev->stats.tx_aborted_errors++;
- spin_lock_irqsave(&info->lock,flags);
+ spin_lock_irqsave(&info->lock, flags);
tx_stop(info);
- spin_unlock_irqrestore(&info->lock,flags);
+ spin_unlock_irqrestore(&info->lock, flags);
netif_wake_queue(dev);
}
@@ -4217,7 +4250,7 @@ static void hdlcdev_rx(MGSLPC_INFO *info, char *buf, int size)
struct net_device *dev = info->netdev;
if (debug_level >= DEBUG_LEVEL_INFO)
- printk("hdlcdev_rx(%s)\n",dev->name);
+ printk("hdlcdev_rx(%s)\n", dev->name);
if (skb == NULL) {
printk(KERN_NOTICE "%s: can't alloc skb, dropping packet\n", dev->name);
@@ -4260,8 +4293,9 @@ static int hdlcdev_init(MGSLPC_INFO *info)
/* allocate and initialize network and HDLC layer objects */
- if (!(dev = alloc_hdlcdev(info))) {
- printk(KERN_ERR "%s:hdlc device allocation failure\n",__FILE__);
+ dev = alloc_hdlcdev(info);
+ if (dev == NULL) {
+ printk(KERN_ERR "%s:hdlc device allocation failure\n", __FILE__);
return -ENOMEM;
}
@@ -4280,8 +4314,9 @@ static int hdlcdev_init(MGSLPC_INFO *info)
hdlc->xmit = hdlcdev_xmit;
/* register objects with HDLC layer */
- if ((rc = register_hdlc_device(dev))) {
- printk(KERN_WARNING "%s:unable to register hdlc device\n",__FILE__);
+ rc = register_hdlc_device(dev);
+ if (rc) {
+ printk(KERN_WARNING "%s:unable to register hdlc device\n", __FILE__);
free_netdev(dev);
return rc;
}
diff --git a/drivers/char/ppdev.c b/drivers/char/ppdev.c
index 1cd49241e60e..ae0b42b66e55 100644
--- a/drivers/char/ppdev.c
+++ b/drivers/char/ppdev.c
@@ -107,7 +107,7 @@ static inline void pp_enable_irq (struct pp_struct *pp)
static ssize_t pp_read (struct file * file, char __user * buf, size_t count,
loff_t * ppos)
{
- unsigned int minor = iminor(file->f_path.dentry->d_inode);
+ unsigned int minor = iminor(file_inode(file));
struct pp_struct *pp = file->private_data;
char * kbuffer;
ssize_t bytes_read = 0;
@@ -189,7 +189,7 @@ static ssize_t pp_read (struct file * file, char __user * buf, size_t count,
static ssize_t pp_write (struct file * file, const char __user * buf,
size_t count, loff_t * ppos)
{
- unsigned int minor = iminor(file->f_path.dentry->d_inode);
+ unsigned int minor = iminor(file_inode(file));
struct pp_struct *pp = file->private_data;
char * kbuffer;
ssize_t bytes_written = 0;
@@ -324,7 +324,7 @@ static enum ieee1284_phase init_phase (int mode)
static int pp_do_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
{
- unsigned int minor = iminor(file->f_path.dentry->d_inode);
+ unsigned int minor = iminor(file_inode(file));
struct pp_struct *pp = file->private_data;
struct parport * port;
void __user *argp = (void __user *)arg;
diff --git a/drivers/char/ps3flash.c b/drivers/char/ps3flash.c
index 588063ac9517..8cafa9ccd43f 100644
--- a/drivers/char/ps3flash.c
+++ b/drivers/char/ps3flash.c
@@ -312,7 +312,7 @@ static int ps3flash_flush(struct file *file, fl_owner_t id)
static int ps3flash_fsync(struct file *file, loff_t start, loff_t end, int datasync)
{
- struct inode *inode = file->f_path.dentry->d_inode;
+ struct inode *inode = file_inode(file);
int err;
mutex_lock(&inode->i_mutex);
err = ps3flash_writeback(ps3flash_dev);
diff --git a/drivers/char/random.c b/drivers/char/random.c
index 85e81ec1451e..594bda9dcfc8 100644
--- a/drivers/char/random.c
+++ b/drivers/char/random.c
@@ -445,7 +445,7 @@ static struct entropy_store input_pool = {
.poolinfo = &poolinfo_table[0],
.name = "input",
.limit = 1,
- .lock = __SPIN_LOCK_UNLOCKED(&input_pool.lock),
+ .lock = __SPIN_LOCK_UNLOCKED(input_pool.lock),
.pool = input_pool_data
};
@@ -454,7 +454,7 @@ static struct entropy_store blocking_pool = {
.name = "blocking",
.limit = 1,
.pull = &input_pool,
- .lock = __SPIN_LOCK_UNLOCKED(&blocking_pool.lock),
+ .lock = __SPIN_LOCK_UNLOCKED(blocking_pool.lock),
.pool = blocking_pool_data
};
@@ -462,7 +462,7 @@ static struct entropy_store nonblocking_pool = {
.poolinfo = &poolinfo_table[1],
.name = "nonblocking",
.pull = &input_pool,
- .lock = __SPIN_LOCK_UNLOCKED(&nonblocking_pool.lock),
+ .lock = __SPIN_LOCK_UNLOCKED(nonblocking_pool.lock),
.pool = nonblocking_pool_data
};
diff --git a/drivers/char/raw.c b/drivers/char/raw.c
index 54a3a6d09819..f3223aac4df1 100644
--- a/drivers/char/raw.c
+++ b/drivers/char/raw.c
@@ -80,7 +80,7 @@ static int raw_open(struct inode *inode, struct file *filp)
filp->f_flags |= O_DIRECT;
filp->f_mapping = bdev->bd_inode->i_mapping;
if (++raw_devices[minor].inuse == 1)
- filp->f_path.dentry->d_inode->i_mapping =
+ file_inode(filp)->i_mapping =
bdev->bd_inode->i_mapping;
filp->private_data = bdev;
mutex_unlock(&raw_mutex);
diff --git a/drivers/char/sonypi.c b/drivers/char/sonypi.c
index 6386a98e43c1..bf2349dbbf7f 100644
--- a/drivers/char/sonypi.c
+++ b/drivers/char/sonypi.c
@@ -938,7 +938,7 @@ static ssize_t sonypi_misc_read(struct file *file, char __user *buf,
}
if (ret > 0) {
- struct inode *inode = file->f_path.dentry->d_inode;
+ struct inode *inode = file_inode(file);
inode->i_atime = current_fs_time(inode->i_sb);
}
diff --git a/drivers/char/tb0219.c b/drivers/char/tb0219.c
index 34c63f85104d..47b9fdfcf083 100644
--- a/drivers/char/tb0219.c
+++ b/drivers/char/tb0219.c
@@ -164,7 +164,7 @@ static ssize_t tanbac_tb0219_read(struct file *file, char __user *buf, size_t le
unsigned int minor;
char value;
- minor = iminor(file->f_path.dentry->d_inode);
+ minor = iminor(file_inode(file));
switch (minor) {
case 0:
value = get_led();
@@ -200,7 +200,7 @@ static ssize_t tanbac_tb0219_write(struct file *file, const char __user *data,
int retval = 0;
char c;
- minor = iminor(file->f_path.dentry->d_inode);
+ minor = iminor(file_inode(file));
switch (minor) {
case 0:
type = TYPE_LED;
diff --git a/drivers/char/virtio_console.c b/drivers/char/virtio_console.c
index ee4dbeafb377..e905d5f53051 100644
--- a/drivers/char/virtio_console.c
+++ b/drivers/char/virtio_console.c
@@ -61,9 +61,6 @@ struct ports_driver_data {
/* List of all the devices we're handling */
struct list_head portdevs;
- /* Number of devices this driver is handling */
- unsigned int index;
-
/*
* This is used to keep track of the number of hvc consoles
* spawned by this driver. This number is given as the first
@@ -169,9 +166,6 @@ struct ports_device {
/* Array of per-port IO virtqueues */
struct virtqueue **in_vqs, **out_vqs;
- /* Used for numbering devices for sysfs and debugfs */
- unsigned int drv_index;
-
/* Major number for this device. Ports will be created as minors. */
int chr_major;
};
@@ -1415,7 +1409,7 @@ static int add_port(struct ports_device *portdev, u32 id)
}
port->dev = device_create(pdrvdata.class, &port->portdev->vdev->dev,
devt, port, "vport%up%u",
- port->portdev->drv_index, id);
+ port->portdev->vdev->index, id);
if (IS_ERR(port->dev)) {
err = PTR_ERR(port->dev);
dev_err(&port->portdev->vdev->dev,
@@ -1442,7 +1436,7 @@ static int add_port(struct ports_device *portdev, u32 id)
* rproc_serial does not want the console port, only
* the generic port implementation.
*/
- port->host_connected = true;
+ port->host_connected = port->guest_connected = true;
else if (!use_multiport(port->portdev)) {
/*
* If we're not using multiport support,
@@ -1470,7 +1464,7 @@ static int add_port(struct ports_device *portdev, u32 id)
* inspect a port's state at any time
*/
sprintf(debugfs_name, "vport%up%u",
- port->portdev->drv_index, id);
+ port->portdev->vdev->index, id);
port->debugfs_file = debugfs_create_file(debugfs_name, 0444,
pdrvdata.debugfs_dir,
port,
@@ -1958,16 +1952,12 @@ static int virtcons_probe(struct virtio_device *vdev)
portdev->vdev = vdev;
vdev->priv = portdev;
- spin_lock_irq(&pdrvdata_lock);
- portdev->drv_index = pdrvdata.index++;
- spin_unlock_irq(&pdrvdata_lock);
-
portdev->chr_major = register_chrdev(0, "virtio-portsdev",
&portdev_fops);
if (portdev->chr_major < 0) {
dev_err(&vdev->dev,
"Error %d registering chrdev for device %u\n",
- portdev->chr_major, portdev->drv_index);
+ portdev->chr_major, vdev->index);
err = portdev->chr_major;
goto free;
}
diff --git a/drivers/clk/Makefile b/drivers/clk/Makefile
index 14fde73ea6ff..300d4775d926 100644
--- a/drivers/clk/Makefile
+++ b/drivers/clk/Makefile
@@ -26,6 +26,8 @@ obj-$(CONFIG_MACH_LOONGSON1) += clk-ls1x.o
obj-$(CONFIG_ARCH_U8500) += ux500/
obj-$(CONFIG_ARCH_VT8500) += clk-vt8500.o
obj-$(CONFIG_ARCH_ZYNQ) += clk-zynq.o
+obj-$(CONFIG_ARCH_TEGRA) += tegra/
+
obj-$(CONFIG_X86) += x86/
# Chip specific
diff --git a/drivers/clk/clk-bcm2835.c b/drivers/clk/clk-bcm2835.c
index e69991aab43a..792bc57a9db7 100644
--- a/drivers/clk/clk-bcm2835.c
+++ b/drivers/clk/clk-bcm2835.c
@@ -20,6 +20,13 @@
#include <linux/clk-provider.h>
#include <linux/clkdev.h>
#include <linux/clk/bcm2835.h>
+#include <linux/clk-provider.h>
+#include <linux/of.h>
+
+static const __initconst struct of_device_id clk_match[] = {
+ { .compatible = "fixed-clock", .data = of_fixed_clk_setup, },
+ { }
+};
/*
* These are fixed clocks. They're probably not all root clocks and it may
@@ -56,4 +63,6 @@ void __init bcm2835_init_clocks(void)
ret = clk_register_clkdev(clk, NULL, "20215000.uart");
if (ret)
pr_err("uart1_pclk alias not registered\n");
+
+ of_clk_init(clk_match);
}
diff --git a/drivers/clk/clk.c b/drivers/clk/clk.c
index fabbfe1a9253..ed87b2405806 100644
--- a/drivers/clk/clk.c
+++ b/drivers/clk/clk.c
@@ -52,31 +52,29 @@ static void clk_summary_show_subtree(struct seq_file *s, struct clk *c,
int level)
{
struct clk *child;
- struct hlist_node *tmp;
if (!c)
return;
clk_summary_show_one(s, c, level);
- hlist_for_each_entry(child, tmp, &c->children, child_node)
+ hlist_for_each_entry(child, &c->children, child_node)
clk_summary_show_subtree(s, child, level + 1);
}
static int clk_summary_show(struct seq_file *s, void *data)
{
struct clk *c;
- struct hlist_node *tmp;
seq_printf(s, " clock enable_cnt prepare_cnt rate\n");
seq_printf(s, "---------------------------------------------------------------------\n");
mutex_lock(&prepare_lock);
- hlist_for_each_entry(c, tmp, &clk_root_list, child_node)
+ hlist_for_each_entry(c, &clk_root_list, child_node)
clk_summary_show_subtree(s, c, 0);
- hlist_for_each_entry(c, tmp, &clk_orphan_list, child_node)
+ hlist_for_each_entry(c, &clk_orphan_list, child_node)
clk_summary_show_subtree(s, c, 0);
mutex_unlock(&prepare_lock);
@@ -111,14 +109,13 @@ static void clk_dump_one(struct seq_file *s, struct clk *c, int level)
static void clk_dump_subtree(struct seq_file *s, struct clk *c, int level)
{
struct clk *child;
- struct hlist_node *tmp;
if (!c)
return;
clk_dump_one(s, c, level);
- hlist_for_each_entry(child, tmp, &c->children, child_node) {
+ hlist_for_each_entry(child, &c->children, child_node) {
seq_printf(s, ",");
clk_dump_subtree(s, child, level + 1);
}
@@ -129,21 +126,20 @@ static void clk_dump_subtree(struct seq_file *s, struct clk *c, int level)
static int clk_dump(struct seq_file *s, void *data)
{
struct clk *c;
- struct hlist_node *tmp;
bool first_node = true;
seq_printf(s, "{");
mutex_lock(&prepare_lock);
- hlist_for_each_entry(c, tmp, &clk_root_list, child_node) {
+ hlist_for_each_entry(c, &clk_root_list, child_node) {
if (!first_node)
seq_printf(s, ",");
first_node = false;
clk_dump_subtree(s, c, 0);
}
- hlist_for_each_entry(c, tmp, &clk_orphan_list, child_node) {
+ hlist_for_each_entry(c, &clk_orphan_list, child_node) {
seq_printf(s, ",");
clk_dump_subtree(s, c, 0);
}
@@ -222,7 +218,6 @@ out:
static int clk_debug_create_subtree(struct clk *clk, struct dentry *pdentry)
{
struct clk *child;
- struct hlist_node *tmp;
int ret = -EINVAL;;
if (!clk || !pdentry)
@@ -233,7 +228,7 @@ static int clk_debug_create_subtree(struct clk *clk, struct dentry *pdentry)
if (ret)
goto out;
- hlist_for_each_entry(child, tmp, &clk->children, child_node)
+ hlist_for_each_entry(child, &clk->children, child_node)
clk_debug_create_subtree(child, clk->dentry);
ret = 0;
@@ -299,7 +294,6 @@ out:
static int __init clk_debug_init(void)
{
struct clk *clk;
- struct hlist_node *tmp;
struct dentry *d;
rootdir = debugfs_create_dir("clk", NULL);
@@ -324,10 +318,10 @@ static int __init clk_debug_init(void)
mutex_lock(&prepare_lock);
- hlist_for_each_entry(clk, tmp, &clk_root_list, child_node)
+ hlist_for_each_entry(clk, &clk_root_list, child_node)
clk_debug_create_subtree(clk, rootdir);
- hlist_for_each_entry(clk, tmp, &clk_orphan_list, child_node)
+ hlist_for_each_entry(clk, &clk_orphan_list, child_node)
clk_debug_create_subtree(clk, orphandir);
inited = 1;
@@ -345,13 +339,12 @@ static inline int clk_debug_register(struct clk *clk) { return 0; }
static void clk_disable_unused_subtree(struct clk *clk)
{
struct clk *child;
- struct hlist_node *tmp;
unsigned long flags;
if (!clk)
goto out;
- hlist_for_each_entry(child, tmp, &clk->children, child_node)
+ hlist_for_each_entry(child, &clk->children, child_node)
clk_disable_unused_subtree(child);
spin_lock_irqsave(&enable_lock, flags);
@@ -384,14 +377,13 @@ out:
static int clk_disable_unused(void)
{
struct clk *clk;
- struct hlist_node *tmp;
mutex_lock(&prepare_lock);
- hlist_for_each_entry(clk, tmp, &clk_root_list, child_node)
+ hlist_for_each_entry(clk, &clk_root_list, child_node)
clk_disable_unused_subtree(clk);
- hlist_for_each_entry(clk, tmp, &clk_orphan_list, child_node)
+ hlist_for_each_entry(clk, &clk_orphan_list, child_node)
clk_disable_unused_subtree(clk);
mutex_unlock(&prepare_lock);
@@ -484,12 +476,11 @@ static struct clk *__clk_lookup_subtree(const char *name, struct clk *clk)
{
struct clk *child;
struct clk *ret;
- struct hlist_node *tmp;
if (!strcmp(clk->name, name))
return clk;
- hlist_for_each_entry(child, tmp, &clk->children, child_node) {
+ hlist_for_each_entry(child, &clk->children, child_node) {
ret = __clk_lookup_subtree(name, child);
if (ret)
return ret;
@@ -502,20 +493,19 @@ struct clk *__clk_lookup(const char *name)
{
struct clk *root_clk;
struct clk *ret;
- struct hlist_node *tmp;
if (!name)
return NULL;
/* search the 'proper' clk tree first */
- hlist_for_each_entry(root_clk, tmp, &clk_root_list, child_node) {
+ hlist_for_each_entry(root_clk, &clk_root_list, child_node) {
ret = __clk_lookup_subtree(name, root_clk);
if (ret)
return ret;
}
/* if not found, then search the orphan tree */
- hlist_for_each_entry(root_clk, tmp, &clk_orphan_list, child_node) {
+ hlist_for_each_entry(root_clk, &clk_orphan_list, child_node) {
ret = __clk_lookup_subtree(name, root_clk);
if (ret)
return ret;
@@ -812,7 +802,6 @@ static void __clk_recalc_rates(struct clk *clk, unsigned long msg)
{
unsigned long old_rate;
unsigned long parent_rate = 0;
- struct hlist_node *tmp;
struct clk *child;
old_rate = clk->rate;
@@ -832,7 +821,7 @@ static void __clk_recalc_rates(struct clk *clk, unsigned long msg)
if (clk->notifier_count && msg)
__clk_notify(clk, msg, old_rate, clk->rate);
- hlist_for_each_entry(child, tmp, &clk->children, child_node)
+ hlist_for_each_entry(child, &clk->children, child_node)
__clk_recalc_rates(child, msg);
}
@@ -878,7 +867,6 @@ EXPORT_SYMBOL_GPL(clk_get_rate);
*/
static int __clk_speculate_rates(struct clk *clk, unsigned long parent_rate)
{
- struct hlist_node *tmp;
struct clk *child;
unsigned long new_rate;
int ret = NOTIFY_DONE;
@@ -895,7 +883,7 @@ static int __clk_speculate_rates(struct clk *clk, unsigned long parent_rate)
if (ret == NOTIFY_BAD)
goto out;
- hlist_for_each_entry(child, tmp, &clk->children, child_node) {
+ hlist_for_each_entry(child, &clk->children, child_node) {
ret = __clk_speculate_rates(child, new_rate);
if (ret == NOTIFY_BAD)
break;
@@ -908,11 +896,10 @@ out:
static void clk_calc_subtree(struct clk *clk, unsigned long new_rate)
{
struct clk *child;
- struct hlist_node *tmp;
clk->new_rate = new_rate;
- hlist_for_each_entry(child, tmp, &clk->children, child_node) {
+ hlist_for_each_entry(child, &clk->children, child_node) {
if (child->ops->recalc_rate)
child->new_rate = child->ops->recalc_rate(child->hw, new_rate);
else
@@ -983,7 +970,6 @@ out:
*/
static struct clk *clk_propagate_rate_change(struct clk *clk, unsigned long event)
{
- struct hlist_node *tmp;
struct clk *child, *fail_clk = NULL;
int ret = NOTIFY_DONE;
@@ -996,7 +982,7 @@ static struct clk *clk_propagate_rate_change(struct clk *clk, unsigned long even
fail_clk = clk;
}
- hlist_for_each_entry(child, tmp, &clk->children, child_node) {
+ hlist_for_each_entry(child, &clk->children, child_node) {
clk = clk_propagate_rate_change(child, event);
if (clk)
fail_clk = clk;
@@ -1014,7 +1000,6 @@ static void clk_change_rate(struct clk *clk)
struct clk *child;
unsigned long old_rate;
unsigned long best_parent_rate = 0;
- struct hlist_node *tmp;
old_rate = clk->rate;
@@ -1032,7 +1017,7 @@ static void clk_change_rate(struct clk *clk)
if (clk->notifier_count && old_rate != clk->rate)
__clk_notify(clk, POST_RATE_CHANGE, old_rate, clk->rate);
- hlist_for_each_entry(child, tmp, &clk->children, child_node)
+ hlist_for_each_entry(child, &clk->children, child_node)
clk_change_rate(child);
}
@@ -1348,7 +1333,7 @@ int __clk_init(struct device *dev, struct clk *clk)
{
int i, ret = 0;
struct clk *orphan;
- struct hlist_node *tmp, *tmp2;
+ struct hlist_node *tmp2;
if (!clk)
return -EINVAL;
@@ -1448,7 +1433,7 @@ int __clk_init(struct device *dev, struct clk *clk)
* walk the list of orphan clocks and reparent any that are children of
* this clock
*/
- hlist_for_each_entry_safe(orphan, tmp, tmp2, &clk_orphan_list, child_node) {
+ hlist_for_each_entry_safe(orphan, tmp2, &clk_orphan_list, child_node) {
if (orphan->ops->get_parent) {
i = orphan->ops->get_parent(orphan->hw);
if (!strcmp(clk->name, orphan->parent_names[i]))
diff --git a/drivers/clk/mxs/clk-imx28.c b/drivers/clk/mxs/clk-imx28.c
index 126370a62ce2..76ce6c6d1113 100644
--- a/drivers/clk/mxs/clk-imx28.c
+++ b/drivers/clk/mxs/clk-imx28.c
@@ -238,7 +238,7 @@ int __init mx28_clocks_init(void)
of_clk_add_provider(np, of_clk_src_onecell_get, &clk_data);
}
- clk_register_clkdev(clks[clk32k], NULL, "timrot");
+ clk_register_clkdev(clks[xbus], NULL, "timrot");
clk_register_clkdev(clks[enet_out], NULL, "enet_out");
for (i = 0; i < ARRAY_SIZE(clks_init_on); i++)
diff --git a/drivers/clk/tegra/Makefile b/drivers/clk/tegra/Makefile
new file mode 100644
index 000000000000..2b41b0f4f731
--- /dev/null
+++ b/drivers/clk/tegra/Makefile
@@ -0,0 +1,11 @@
+obj-y += clk.o
+obj-y += clk-audio-sync.o
+obj-y += clk-divider.o
+obj-y += clk-periph.o
+obj-y += clk-periph-gate.o
+obj-y += clk-pll.o
+obj-y += clk-pll-out.o
+obj-y += clk-super.o
+
+obj-$(CONFIG_ARCH_TEGRA_2x_SOC) += clk-tegra20.o
+obj-$(CONFIG_ARCH_TEGRA_3x_SOC) += clk-tegra30.o
diff --git a/drivers/clk/tegra/clk-audio-sync.c b/drivers/clk/tegra/clk-audio-sync.c
new file mode 100644
index 000000000000..c0f7843e80e6
--- /dev/null
+++ b/drivers/clk/tegra/clk-audio-sync.c
@@ -0,0 +1,87 @@
+/*
+ * Copyright (c) 2012, NVIDIA CORPORATION. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include <linux/clk-provider.h>
+#include <linux/slab.h>
+#include <linux/err.h>
+
+#include "clk.h"
+
+static unsigned long clk_sync_source_recalc_rate(struct clk_hw *hw,
+ unsigned long parent_rate)
+{
+ struct tegra_clk_sync_source *sync = to_clk_sync_source(hw);
+
+ return sync->rate;
+}
+
+static long clk_sync_source_round_rate(struct clk_hw *hw, unsigned long rate,
+ unsigned long *prate)
+{
+ struct tegra_clk_sync_source *sync = to_clk_sync_source(hw);
+
+ if (rate > sync->max_rate)
+ return -EINVAL;
+ else
+ return rate;
+}
+
+static int clk_sync_source_set_rate(struct clk_hw *hw, unsigned long rate,
+ unsigned long parent_rate)
+{
+ struct tegra_clk_sync_source *sync = to_clk_sync_source(hw);
+
+ sync->rate = rate;
+ return 0;
+}
+
+const struct clk_ops tegra_clk_sync_source_ops = {
+ .round_rate = clk_sync_source_round_rate,
+ .set_rate = clk_sync_source_set_rate,
+ .recalc_rate = clk_sync_source_recalc_rate,
+};
+
+struct clk *tegra_clk_register_sync_source(const char *name,
+ unsigned long rate, unsigned long max_rate)
+{
+ struct tegra_clk_sync_source *sync;
+ struct clk_init_data init;
+ struct clk *clk;
+
+ sync = kzalloc(sizeof(*sync), GFP_KERNEL);
+ if (!sync) {
+ pr_err("%s: could not allocate sync source clk\n", __func__);
+ return ERR_PTR(-ENOMEM);
+ }
+
+ sync->rate = rate;
+ sync->max_rate = max_rate;
+
+ init.ops = &tegra_clk_sync_source_ops;
+ init.name = name;
+ init.flags = CLK_IS_ROOT;
+ init.parent_names = NULL;
+ init.num_parents = 0;
+
+ /* Data in .init is copied by clk_register(), so stack variable OK */
+ sync->hw.init = &init;
+
+ clk = clk_register(NULL, &sync->hw);
+ if (IS_ERR(clk))
+ kfree(sync);
+
+ return clk;
+}
diff --git a/drivers/clk/tegra/clk-divider.c b/drivers/clk/tegra/clk-divider.c
new file mode 100644
index 000000000000..4d75b1f37e3a
--- /dev/null
+++ b/drivers/clk/tegra/clk-divider.c
@@ -0,0 +1,187 @@
+/*
+ * Copyright (c) 2012, NVIDIA CORPORATION. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include <linux/kernel.h>
+#include <linux/io.h>
+#include <linux/err.h>
+#include <linux/slab.h>
+#include <linux/clk-provider.h>
+#include <linux/clk.h>
+
+#include "clk.h"
+
+#define pll_out_override(p) (BIT((p->shift - 6)))
+#define div_mask(d) ((1 << (d->width)) - 1)
+#define get_mul(d) (1 << d->frac_width)
+#define get_max_div(d) div_mask(d)
+
+#define PERIPH_CLK_UART_DIV_ENB BIT(24)
+
+static int get_div(struct tegra_clk_frac_div *divider, unsigned long rate,
+ unsigned long parent_rate)
+{
+ s64 divider_ux1 = parent_rate;
+ u8 flags = divider->flags;
+ int mul;
+
+ if (!rate)
+ return 0;
+
+ mul = get_mul(divider);
+
+ if (!(flags & TEGRA_DIVIDER_INT))
+ divider_ux1 *= mul;
+
+ if (flags & TEGRA_DIVIDER_ROUND_UP)
+ divider_ux1 += rate - 1;
+
+ do_div(divider_ux1, rate);
+
+ if (flags & TEGRA_DIVIDER_INT)
+ divider_ux1 *= mul;
+
+ divider_ux1 -= mul;
+
+ if (divider_ux1 < 0)
+ return 0;
+
+ if (divider_ux1 > get_max_div(divider))
+ return -EINVAL;
+
+ return divider_ux1;
+}
+
+static unsigned long clk_frac_div_recalc_rate(struct clk_hw *hw,
+ unsigned long parent_rate)
+{
+ struct tegra_clk_frac_div *divider = to_clk_frac_div(hw);
+ u32 reg;
+ int div, mul;
+ u64 rate = parent_rate;
+
+ reg = readl_relaxed(divider->reg) >> divider->shift;
+ div = reg & div_mask(divider);
+
+ mul = get_mul(divider);
+ div += mul;
+
+ rate *= mul;
+ rate += div - 1;
+ do_div(rate, div);
+
+ return rate;
+}
+
+static long clk_frac_div_round_rate(struct clk_hw *hw, unsigned long rate,
+ unsigned long *prate)
+{
+ struct tegra_clk_frac_div *divider = to_clk_frac_div(hw);
+ int div, mul;
+ unsigned long output_rate = *prate;
+
+ if (!rate)
+ return output_rate;
+
+ div = get_div(divider, rate, output_rate);
+ if (div < 0)
+ return *prate;
+
+ mul = get_mul(divider);
+
+ return DIV_ROUND_UP(output_rate * mul, div + mul);
+}
+
+static int clk_frac_div_set_rate(struct clk_hw *hw, unsigned long rate,
+ unsigned long parent_rate)
+{
+ struct tegra_clk_frac_div *divider = to_clk_frac_div(hw);
+ int div;
+ unsigned long flags = 0;
+ u32 val;
+
+ div = get_div(divider, rate, parent_rate);
+ if (div < 0)
+ return div;
+
+ if (divider->lock)
+ spin_lock_irqsave(divider->lock, flags);
+
+ val = readl_relaxed(divider->reg);
+ val &= ~(div_mask(divider) << divider->shift);
+ val |= div << divider->shift;
+
+ if (divider->flags & TEGRA_DIVIDER_UART) {
+ if (div)
+ val |= PERIPH_CLK_UART_DIV_ENB;
+ else
+ val &= ~PERIPH_CLK_UART_DIV_ENB;
+ }
+
+ if (divider->flags & TEGRA_DIVIDER_FIXED)
+ val |= pll_out_override(divider);
+
+ writel_relaxed(val, divider->reg);
+
+ if (divider->lock)
+ spin_unlock_irqrestore(divider->lock, flags);
+
+ return 0;
+}
+
+const struct clk_ops tegra_clk_frac_div_ops = {
+ .recalc_rate = clk_frac_div_recalc_rate,
+ .set_rate = clk_frac_div_set_rate,
+ .round_rate = clk_frac_div_round_rate,
+};
+
+struct clk *tegra_clk_register_divider(const char *name,
+ const char *parent_name, void __iomem *reg,
+ unsigned long flags, u8 clk_divider_flags, u8 shift, u8 width,
+ u8 frac_width, spinlock_t *lock)
+{
+ struct tegra_clk_frac_div *divider;
+ struct clk *clk;
+ struct clk_init_data init;
+
+ divider = kzalloc(sizeof(*divider), GFP_KERNEL);
+ if (!divider) {
+ pr_err("%s: could not allocate fractional divider clk\n",
+ __func__);
+ return ERR_PTR(-ENOMEM);
+ }
+
+ init.name = name;
+ init.ops = &tegra_clk_frac_div_ops;
+ init.flags = flags;
+ init.parent_names = parent_name ? &parent_name : NULL;
+ init.num_parents = parent_name ? 1 : 0;
+
+ divider->reg = reg;
+ divider->shift = shift;
+ divider->width = width;
+ divider->frac_width = frac_width;
+ divider->lock = lock;
+ divider->flags = clk_divider_flags;
+
+ /* Data in .init is copied by clk_register(), so stack variable OK */
+ divider->hw.init = &init;
+
+ clk = clk_register(NULL, &divider->hw);
+ if (IS_ERR(clk))
+ kfree(divider);
+
+ return clk;
+}
diff --git a/drivers/clk/tegra/clk-periph-gate.c b/drivers/clk/tegra/clk-periph-gate.c
new file mode 100644
index 000000000000..6dd533251e7b
--- /dev/null
+++ b/drivers/clk/tegra/clk-periph-gate.c
@@ -0,0 +1,179 @@
+/*
+ * Copyright (c) 2012, NVIDIA CORPORATION. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include <linux/clk.h>
+#include <linux/clk-provider.h>
+#include <linux/slab.h>
+#include <linux/io.h>
+#include <linux/delay.h>
+#include <linux/err.h>
+#include <linux/tegra-soc.h>
+
+#include "clk.h"
+
+static DEFINE_SPINLOCK(periph_ref_lock);
+
+/* Macros to assist peripheral gate clock */
+#define read_enb(gate) \
+ readl_relaxed(gate->clk_base + (gate->regs->enb_reg))
+#define write_enb_set(val, gate) \
+ writel_relaxed(val, gate->clk_base + (gate->regs->enb_set_reg))
+#define write_enb_clr(val, gate) \
+ writel_relaxed(val, gate->clk_base + (gate->regs->enb_clr_reg))
+
+#define read_rst(gate) \
+ readl_relaxed(gate->clk_base + (gate->regs->rst_reg))
+#define write_rst_set(val, gate) \
+ writel_relaxed(val, gate->clk_base + (gate->regs->rst_set_reg))
+#define write_rst_clr(val, gate) \
+ writel_relaxed(val, gate->clk_base + (gate->regs->rst_clr_reg))
+
+#define periph_clk_to_bit(periph) (1 << (gate->clk_num % 32))
+
+/* Peripheral gate clock ops */
+static int clk_periph_is_enabled(struct clk_hw *hw)
+{
+ struct tegra_clk_periph_gate *gate = to_clk_periph_gate(hw);
+ int state = 1;
+
+ if (!(read_enb(gate) & periph_clk_to_bit(gate)))
+ state = 0;
+
+ if (!(gate->flags & TEGRA_PERIPH_NO_RESET))
+ if (read_rst(gate) & periph_clk_to_bit(gate))
+ state = 0;
+
+ return state;
+}
+
+static int clk_periph_enable(struct clk_hw *hw)
+{
+ struct tegra_clk_periph_gate *gate = to_clk_periph_gate(hw);
+ unsigned long flags = 0;
+
+ spin_lock_irqsave(&periph_ref_lock, flags);
+
+ gate->enable_refcnt[gate->clk_num]++;
+ if (gate->enable_refcnt[gate->clk_num] > 1) {
+ spin_unlock_irqrestore(&periph_ref_lock, flags);
+ return 0;
+ }
+
+ write_enb_set(periph_clk_to_bit(gate), gate);
+ udelay(2);
+
+ if (!(gate->flags & TEGRA_PERIPH_NO_RESET) &&
+ !(gate->flags & TEGRA_PERIPH_MANUAL_RESET)) {
+ if (read_rst(gate) & periph_clk_to_bit(gate)) {
+ udelay(5); /* reset propogation delay */
+ write_rst_clr(periph_clk_to_bit(gate), gate);
+ }
+ }
+
+ spin_unlock_irqrestore(&periph_ref_lock, flags);
+
+ return 0;
+}
+
+static void clk_periph_disable(struct clk_hw *hw)
+{
+ struct tegra_clk_periph_gate *gate = to_clk_periph_gate(hw);
+ unsigned long flags = 0;
+
+ spin_lock_irqsave(&periph_ref_lock, flags);
+
+ gate->enable_refcnt[gate->clk_num]--;
+ if (gate->enable_refcnt[gate->clk_num] > 0) {
+ spin_unlock_irqrestore(&periph_ref_lock, flags);
+ return;
+ }
+
+ /*
+ * If peripheral is in the APB bus then read the APB bus to
+ * flush the write operation in apb bus. This will avoid the
+ * peripheral access after disabling clock
+ */
+ if (gate->flags & TEGRA_PERIPH_ON_APB)
+ tegra_read_chipid();
+
+ write_enb_clr(periph_clk_to_bit(gate), gate);
+
+ spin_unlock_irqrestore(&periph_ref_lock, flags);
+}
+
+void tegra_periph_reset(struct tegra_clk_periph_gate *gate, bool assert)
+{
+ if (gate->flags & TEGRA_PERIPH_NO_RESET)
+ return;
+
+ if (assert) {
+ /*
+ * If peripheral is in the APB bus then read the APB bus to
+ * flush the write operation in apb bus. This will avoid the
+ * peripheral access after disabling clock
+ */
+ if (gate->flags & TEGRA_PERIPH_ON_APB)
+ tegra_read_chipid();
+
+ write_rst_set(periph_clk_to_bit(gate), gate);
+ } else {
+ write_rst_clr(periph_clk_to_bit(gate), gate);
+ }
+}
+
+const struct clk_ops tegra_clk_periph_gate_ops = {
+ .is_enabled = clk_periph_is_enabled,
+ .enable = clk_periph_enable,
+ .disable = clk_periph_disable,
+};
+
+struct clk *tegra_clk_register_periph_gate(const char *name,
+ const char *parent_name, u8 gate_flags, void __iomem *clk_base,
+ unsigned long flags, int clk_num,
+ struct tegra_clk_periph_regs *pregs, int *enable_refcnt)
+{
+ struct tegra_clk_periph_gate *gate;
+ struct clk *clk;
+ struct clk_init_data init;
+
+ gate = kzalloc(sizeof(*gate), GFP_KERNEL);
+ if (!gate) {
+ pr_err("%s: could not allocate periph gate clk\n", __func__);
+ return ERR_PTR(-ENOMEM);
+ }
+
+ init.name = name;
+ init.flags = flags;
+ init.parent_names = parent_name ? &parent_name : NULL;
+ init.num_parents = parent_name ? 1 : 0;
+ init.ops = &tegra_clk_periph_gate_ops;
+
+ gate->magic = TEGRA_CLK_PERIPH_GATE_MAGIC;
+ gate->clk_base = clk_base;
+ gate->clk_num = clk_num;
+ gate->flags = gate_flags;
+ gate->enable_refcnt = enable_refcnt;
+ gate->regs = pregs;
+
+ /* Data in .init is copied by clk_register(), so stack variable OK */
+ gate->hw.init = &init;
+
+ clk = clk_register(NULL, &gate->hw);
+ if (IS_ERR(clk))
+ kfree(gate);
+
+ return clk;
+}
diff --git a/drivers/clk/tegra/clk-periph.c b/drivers/clk/tegra/clk-periph.c
new file mode 100644
index 000000000000..788486e6331a
--- /dev/null
+++ b/drivers/clk/tegra/clk-periph.c
@@ -0,0 +1,218 @@
+/*
+ * Copyright (c) 2012, NVIDIA CORPORATION. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include <linux/clk.h>
+#include <linux/clk-provider.h>
+#include <linux/slab.h>
+#include <linux/err.h>
+
+#include "clk.h"
+
+static u8 clk_periph_get_parent(struct clk_hw *hw)
+{
+ struct tegra_clk_periph *periph = to_clk_periph(hw);
+ const struct clk_ops *mux_ops = periph->mux_ops;
+ struct clk_hw *mux_hw = &periph->mux.hw;
+
+ mux_hw->clk = hw->clk;
+
+ return mux_ops->get_parent(mux_hw);
+}
+
+static int clk_periph_set_parent(struct clk_hw *hw, u8 index)
+{
+ struct tegra_clk_periph *periph = to_clk_periph(hw);
+ const struct clk_ops *mux_ops = periph->mux_ops;
+ struct clk_hw *mux_hw = &periph->mux.hw;
+
+ mux_hw->clk = hw->clk;
+
+ return mux_ops->set_parent(mux_hw, index);
+}
+
+static unsigned long clk_periph_recalc_rate(struct clk_hw *hw,
+ unsigned long parent_rate)
+{
+ struct tegra_clk_periph *periph = to_clk_periph(hw);
+ const struct clk_ops *div_ops = periph->div_ops;
+ struct clk_hw *div_hw = &periph->divider.hw;
+
+ div_hw->clk = hw->clk;
+
+ return div_ops->recalc_rate(div_hw, parent_rate);
+}
+
+static long clk_periph_round_rate(struct clk_hw *hw, unsigned long rate,
+ unsigned long *prate)
+{
+ struct tegra_clk_periph *periph = to_clk_periph(hw);
+ const struct clk_ops *div_ops = periph->div_ops;
+ struct clk_hw *div_hw = &periph->divider.hw;
+
+ div_hw->clk = hw->clk;
+
+ return div_ops->round_rate(div_hw, rate, prate);
+}
+
+static int clk_periph_set_rate(struct clk_hw *hw, unsigned long rate,
+ unsigned long parent_rate)
+{
+ struct tegra_clk_periph *periph = to_clk_periph(hw);
+ const struct clk_ops *div_ops = periph->div_ops;
+ struct clk_hw *div_hw = &periph->divider.hw;
+
+ div_hw->clk = hw->clk;
+
+ return div_ops->set_rate(div_hw, rate, parent_rate);
+}
+
+static int clk_periph_is_enabled(struct clk_hw *hw)
+{
+ struct tegra_clk_periph *periph = to_clk_periph(hw);
+ const struct clk_ops *gate_ops = periph->gate_ops;
+ struct clk_hw *gate_hw = &periph->gate.hw;
+
+ gate_hw->clk = hw->clk;
+
+ return gate_ops->is_enabled(gate_hw);
+}
+
+static int clk_periph_enable(struct clk_hw *hw)
+{
+ struct tegra_clk_periph *periph = to_clk_periph(hw);
+ const struct clk_ops *gate_ops = periph->gate_ops;
+ struct clk_hw *gate_hw = &periph->gate.hw;
+
+ gate_hw->clk = hw->clk;
+
+ return gate_ops->enable(gate_hw);
+}
+
+static void clk_periph_disable(struct clk_hw *hw)
+{
+ struct tegra_clk_periph *periph = to_clk_periph(hw);
+ const struct clk_ops *gate_ops = periph->gate_ops;
+ struct clk_hw *gate_hw = &periph->gate.hw;
+
+ gate_ops->disable(gate_hw);
+}
+
+void tegra_periph_reset_deassert(struct clk *c)
+{
+ struct clk_hw *hw = __clk_get_hw(c);
+ struct tegra_clk_periph *periph = to_clk_periph(hw);
+ struct tegra_clk_periph_gate *gate;
+
+ if (periph->magic != TEGRA_CLK_PERIPH_MAGIC) {
+ gate = to_clk_periph_gate(hw);
+ if (gate->magic != TEGRA_CLK_PERIPH_GATE_MAGIC) {
+ WARN_ON(1);
+ return;
+ }
+ } else {
+ gate = &periph->gate;
+ }
+
+ tegra_periph_reset(gate, 0);
+}
+
+void tegra_periph_reset_assert(struct clk *c)
+{
+ struct clk_hw *hw = __clk_get_hw(c);
+ struct tegra_clk_periph *periph = to_clk_periph(hw);
+ struct tegra_clk_periph_gate *gate;
+
+ if (periph->magic != TEGRA_CLK_PERIPH_MAGIC) {
+ gate = to_clk_periph_gate(hw);
+ if (gate->magic != TEGRA_CLK_PERIPH_GATE_MAGIC) {
+ WARN_ON(1);
+ return;
+ }
+ } else {
+ gate = &periph->gate;
+ }
+
+ tegra_periph_reset(gate, 1);
+}
+
+const struct clk_ops tegra_clk_periph_ops = {
+ .get_parent = clk_periph_get_parent,
+ .set_parent = clk_periph_set_parent,
+ .recalc_rate = clk_periph_recalc_rate,
+ .round_rate = clk_periph_round_rate,
+ .set_rate = clk_periph_set_rate,
+ .is_enabled = clk_periph_is_enabled,
+ .enable = clk_periph_enable,
+ .disable = clk_periph_disable,
+};
+
+const struct clk_ops tegra_clk_periph_nodiv_ops = {
+ .get_parent = clk_periph_get_parent,
+ .set_parent = clk_periph_set_parent,
+ .is_enabled = clk_periph_is_enabled,
+ .enable = clk_periph_enable,
+ .disable = clk_periph_disable,
+};
+
+static struct clk *_tegra_clk_register_periph(const char *name,
+ const char **parent_names, int num_parents,
+ struct tegra_clk_periph *periph,
+ void __iomem *clk_base, u32 offset, bool div)
+{
+ struct clk *clk;
+ struct clk_init_data init;
+
+ init.name = name;
+ init.ops = div ? &tegra_clk_periph_ops : &tegra_clk_periph_nodiv_ops;
+ init.flags = div ? 0 : CLK_SET_RATE_PARENT;
+ init.parent_names = parent_names;
+ init.num_parents = num_parents;
+
+ /* Data in .init is copied by clk_register(), so stack variable OK */
+ periph->hw.init = &init;
+ periph->magic = TEGRA_CLK_PERIPH_MAGIC;
+ periph->mux.reg = clk_base + offset;
+ periph->divider.reg = div ? (clk_base + offset) : NULL;
+ periph->gate.clk_base = clk_base;
+
+ clk = clk_register(NULL, &periph->hw);
+ if (IS_ERR(clk))
+ return clk;
+
+ periph->mux.hw.clk = clk;
+ periph->divider.hw.clk = div ? clk : NULL;
+ periph->gate.hw.clk = clk;
+
+ return clk;
+}
+
+struct clk *tegra_clk_register_periph(const char *name,
+ const char **parent_names, int num_parents,
+ struct tegra_clk_periph *periph, void __iomem *clk_base,
+ u32 offset)
+{
+ return _tegra_clk_register_periph(name, parent_names, num_parents,
+ periph, clk_base, offset, true);
+}
+
+struct clk *tegra_clk_register_periph_nodiv(const char *name,
+ const char **parent_names, int num_parents,
+ struct tegra_clk_periph *periph, void __iomem *clk_base,
+ u32 offset)
+{
+ return _tegra_clk_register_periph(name, parent_names, num_parents,
+ periph, clk_base, offset, false);
+}
diff --git a/drivers/clk/tegra/clk-pll-out.c b/drivers/clk/tegra/clk-pll-out.c
new file mode 100644
index 000000000000..3598987a451d
--- /dev/null
+++ b/drivers/clk/tegra/clk-pll-out.c
@@ -0,0 +1,123 @@
+/*
+ * Copyright (c) 2012, NVIDIA CORPORATION. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include <linux/kernel.h>
+#include <linux/io.h>
+#include <linux/err.h>
+#include <linux/delay.h>
+#include <linux/slab.h>
+#include <linux/clk-provider.h>
+#include <linux/clk.h>
+
+#include "clk.h"
+
+#define pll_out_enb(p) (BIT(p->enb_bit_idx))
+#define pll_out_rst(p) (BIT(p->rst_bit_idx))
+
+static int clk_pll_out_is_enabled(struct clk_hw *hw)
+{
+ struct tegra_clk_pll_out *pll_out = to_clk_pll_out(hw);
+ u32 val = readl_relaxed(pll_out->reg);
+ int state;
+
+ state = (val & pll_out_enb(pll_out)) ? 1 : 0;
+ if (!(val & (pll_out_rst(pll_out))))
+ state = 0;
+ return state;
+}
+
+static int clk_pll_out_enable(struct clk_hw *hw)
+{
+ struct tegra_clk_pll_out *pll_out = to_clk_pll_out(hw);
+ unsigned long flags = 0;
+ u32 val;
+
+ if (pll_out->lock)
+ spin_lock_irqsave(pll_out->lock, flags);
+
+ val = readl_relaxed(pll_out->reg);
+
+ val |= (pll_out_enb(pll_out) | pll_out_rst(pll_out));
+
+ writel_relaxed(val, pll_out->reg);
+ udelay(2);
+
+ if (pll_out->lock)
+ spin_unlock_irqrestore(pll_out->lock, flags);
+
+ return 0;
+}
+
+static void clk_pll_out_disable(struct clk_hw *hw)
+{
+ struct tegra_clk_pll_out *pll_out = to_clk_pll_out(hw);
+ unsigned long flags = 0;
+ u32 val;
+
+ if (pll_out->lock)
+ spin_lock_irqsave(pll_out->lock, flags);
+
+ val = readl_relaxed(pll_out->reg);
+
+ val &= ~(pll_out_enb(pll_out) | pll_out_rst(pll_out));
+
+ writel_relaxed(val, pll_out->reg);
+ udelay(2);
+
+ if (pll_out->lock)
+ spin_unlock_irqrestore(pll_out->lock, flags);
+}
+
+const struct clk_ops tegra_clk_pll_out_ops = {
+ .is_enabled = clk_pll_out_is_enabled,
+ .enable = clk_pll_out_enable,
+ .disable = clk_pll_out_disable,
+};
+
+struct clk *tegra_clk_register_pll_out(const char *name,
+ const char *parent_name, void __iomem *reg, u8 enb_bit_idx,
+ u8 rst_bit_idx, unsigned long flags, u8 pll_out_flags,
+ spinlock_t *lock)
+{
+ struct tegra_clk_pll_out *pll_out;
+ struct clk *clk;
+ struct clk_init_data init;
+
+ pll_out = kzalloc(sizeof(*pll_out), GFP_KERNEL);
+ if (!pll_out)
+ return ERR_PTR(-ENOMEM);
+
+ init.name = name;
+ init.ops = &tegra_clk_pll_out_ops;
+ init.parent_names = (parent_name ? &parent_name : NULL);
+ init.num_parents = (parent_name ? 1 : 0);
+ init.flags = flags;
+
+ pll_out->reg = reg;
+ pll_out->enb_bit_idx = enb_bit_idx;
+ pll_out->rst_bit_idx = rst_bit_idx;
+ pll_out->flags = pll_out_flags;
+ pll_out->lock = lock;
+
+ /* Data in .init is copied by clk_register(), so stack variable OK */
+ pll_out->hw.init = &init;
+
+ clk = clk_register(NULL, &pll_out->hw);
+ if (IS_ERR(clk))
+ kfree(pll_out);
+
+ return clk;
+}
diff --git a/drivers/clk/tegra/clk-pll.c b/drivers/clk/tegra/clk-pll.c
new file mode 100644
index 000000000000..165f24734c1b
--- /dev/null
+++ b/drivers/clk/tegra/clk-pll.c
@@ -0,0 +1,648 @@
+/*
+ * Copyright (c) 2012, NVIDIA CORPORATION. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include <linux/slab.h>
+#include <linux/io.h>
+#include <linux/delay.h>
+#include <linux/err.h>
+#include <linux/clk-provider.h>
+#include <linux/clk.h>
+
+#include "clk.h"
+
+#define PLL_BASE_BYPASS BIT(31)
+#define PLL_BASE_ENABLE BIT(30)
+#define PLL_BASE_REF_ENABLE BIT(29)
+#define PLL_BASE_OVERRIDE BIT(28)
+
+#define PLL_BASE_DIVP_SHIFT 20
+#define PLL_BASE_DIVP_WIDTH 3
+#define PLL_BASE_DIVN_SHIFT 8
+#define PLL_BASE_DIVN_WIDTH 10
+#define PLL_BASE_DIVM_SHIFT 0
+#define PLL_BASE_DIVM_WIDTH 5
+#define PLLU_POST_DIVP_MASK 0x1
+
+#define PLL_MISC_DCCON_SHIFT 20
+#define PLL_MISC_CPCON_SHIFT 8
+#define PLL_MISC_CPCON_WIDTH 4
+#define PLL_MISC_CPCON_MASK ((1 << PLL_MISC_CPCON_WIDTH) - 1)
+#define PLL_MISC_LFCON_SHIFT 4
+#define PLL_MISC_LFCON_WIDTH 4
+#define PLL_MISC_LFCON_MASK ((1 << PLL_MISC_LFCON_WIDTH) - 1)
+#define PLL_MISC_VCOCON_SHIFT 0
+#define PLL_MISC_VCOCON_WIDTH 4
+#define PLL_MISC_VCOCON_MASK ((1 << PLL_MISC_VCOCON_WIDTH) - 1)
+
+#define OUT_OF_TABLE_CPCON 8
+
+#define PMC_PLLP_WB0_OVERRIDE 0xf8
+#define PMC_PLLP_WB0_OVERRIDE_PLLM_ENABLE BIT(12)
+#define PMC_PLLP_WB0_OVERRIDE_PLLM_OVERRIDE BIT(11)
+
+#define PLL_POST_LOCK_DELAY 50
+
+#define PLLDU_LFCON_SET_DIVN 600
+
+#define PLLE_BASE_DIVCML_SHIFT 24
+#define PLLE_BASE_DIVCML_WIDTH 4
+#define PLLE_BASE_DIVP_SHIFT 16
+#define PLLE_BASE_DIVP_WIDTH 7
+#define PLLE_BASE_DIVN_SHIFT 8
+#define PLLE_BASE_DIVN_WIDTH 8
+#define PLLE_BASE_DIVM_SHIFT 0
+#define PLLE_BASE_DIVM_WIDTH 8
+
+#define PLLE_MISC_SETUP_BASE_SHIFT 16
+#define PLLE_MISC_SETUP_BASE_MASK (0xffff << PLLE_MISC_SETUP_BASE_SHIFT)
+#define PLLE_MISC_LOCK_ENABLE BIT(9)
+#define PLLE_MISC_READY BIT(15)
+#define PLLE_MISC_SETUP_EX_SHIFT 2
+#define PLLE_MISC_SETUP_EX_MASK (3 << PLLE_MISC_SETUP_EX_SHIFT)
+#define PLLE_MISC_SETUP_MASK (PLLE_MISC_SETUP_BASE_MASK | \
+ PLLE_MISC_SETUP_EX_MASK)
+#define PLLE_MISC_SETUP_VALUE (7 << PLLE_MISC_SETUP_BASE_SHIFT)
+
+#define PLLE_SS_CTRL 0x68
+#define PLLE_SS_DISABLE (7 << 10)
+
+#define PMC_SATA_PWRGT 0x1ac
+#define PMC_SATA_PWRGT_PLLE_IDDQ_VALUE BIT(5)
+#define PMC_SATA_PWRGT_PLLE_IDDQ_SWCTL BIT(4)
+
+#define pll_readl(offset, p) readl_relaxed(p->clk_base + offset)
+#define pll_readl_base(p) pll_readl(p->params->base_reg, p)
+#define pll_readl_misc(p) pll_readl(p->params->misc_reg, p)
+
+#define pll_writel(val, offset, p) writel_relaxed(val, p->clk_base + offset)
+#define pll_writel_base(val, p) pll_writel(val, p->params->base_reg, p)
+#define pll_writel_misc(val, p) pll_writel(val, p->params->misc_reg, p)
+
+#define mask(w) ((1 << (w)) - 1)
+#define divm_mask(p) mask(p->divm_width)
+#define divn_mask(p) mask(p->divn_width)
+#define divp_mask(p) (p->flags & TEGRA_PLLU ? PLLU_POST_DIVP_MASK : \
+ mask(p->divp_width))
+
+#define divm_max(p) (divm_mask(p))
+#define divn_max(p) (divn_mask(p))
+#define divp_max(p) (1 << (divp_mask(p)))
+
+static void clk_pll_enable_lock(struct tegra_clk_pll *pll)
+{
+ u32 val;
+
+ if (!(pll->flags & TEGRA_PLL_USE_LOCK))
+ return;
+
+ val = pll_readl_misc(pll);
+ val |= BIT(pll->params->lock_enable_bit_idx);
+ pll_writel_misc(val, pll);
+}
+
+static int clk_pll_wait_for_lock(struct tegra_clk_pll *pll,
+ void __iomem *lock_addr, u32 lock_bit_idx)
+{
+ int i;
+ u32 val;
+
+ if (!(pll->flags & TEGRA_PLL_USE_LOCK)) {
+ udelay(pll->params->lock_delay);
+ return 0;
+ }
+
+ for (i = 0; i < pll->params->lock_delay; i++) {
+ val = readl_relaxed(lock_addr);
+ if (val & BIT(lock_bit_idx)) {
+ udelay(PLL_POST_LOCK_DELAY);
+ return 0;
+ }
+ udelay(2); /* timeout = 2 * lock time */
+ }
+
+ pr_err("%s: Timed out waiting for pll %s lock\n", __func__,
+ __clk_get_name(pll->hw.clk));
+
+ return -1;
+}
+
+static int clk_pll_is_enabled(struct clk_hw *hw)
+{
+ struct tegra_clk_pll *pll = to_clk_pll(hw);
+ u32 val;
+
+ if (pll->flags & TEGRA_PLLM) {
+ val = readl_relaxed(pll->pmc + PMC_PLLP_WB0_OVERRIDE);
+ if (val & PMC_PLLP_WB0_OVERRIDE_PLLM_OVERRIDE)
+ return val & PMC_PLLP_WB0_OVERRIDE_PLLM_ENABLE ? 1 : 0;
+ }
+
+ val = pll_readl_base(pll);
+
+ return val & PLL_BASE_ENABLE ? 1 : 0;
+}
+
+static int _clk_pll_enable(struct clk_hw *hw)
+{
+ struct tegra_clk_pll *pll = to_clk_pll(hw);
+ u32 val;
+
+ clk_pll_enable_lock(pll);
+
+ val = pll_readl_base(pll);
+ val &= ~PLL_BASE_BYPASS;
+ val |= PLL_BASE_ENABLE;
+ pll_writel_base(val, pll);
+
+ if (pll->flags & TEGRA_PLLM) {
+ val = readl_relaxed(pll->pmc + PMC_PLLP_WB0_OVERRIDE);
+ val |= PMC_PLLP_WB0_OVERRIDE_PLLM_ENABLE;
+ writel_relaxed(val, pll->pmc + PMC_PLLP_WB0_OVERRIDE);
+ }
+
+ clk_pll_wait_for_lock(pll, pll->clk_base + pll->params->base_reg,
+ pll->params->lock_bit_idx);
+
+ return 0;
+}
+
+static void _clk_pll_disable(struct clk_hw *hw)
+{
+ struct tegra_clk_pll *pll = to_clk_pll(hw);
+ u32 val;
+
+ val = pll_readl_base(pll);
+ val &= ~(PLL_BASE_BYPASS | PLL_BASE_ENABLE);
+ pll_writel_base(val, pll);
+
+ if (pll->flags & TEGRA_PLLM) {
+ val = readl_relaxed(pll->pmc + PMC_PLLP_WB0_OVERRIDE);
+ val &= ~PMC_PLLP_WB0_OVERRIDE_PLLM_ENABLE;
+ writel_relaxed(val, pll->pmc + PMC_PLLP_WB0_OVERRIDE);
+ }
+}
+
+static int clk_pll_enable(struct clk_hw *hw)
+{
+ struct tegra_clk_pll *pll = to_clk_pll(hw);
+ unsigned long flags = 0;
+ int ret;
+
+ if (pll->lock)
+ spin_lock_irqsave(pll->lock, flags);
+
+ ret = _clk_pll_enable(hw);
+
+ if (pll->lock)
+ spin_unlock_irqrestore(pll->lock, flags);
+
+ return ret;
+}
+
+static void clk_pll_disable(struct clk_hw *hw)
+{
+ struct tegra_clk_pll *pll = to_clk_pll(hw);
+ unsigned long flags = 0;
+
+ if (pll->lock)
+ spin_lock_irqsave(pll->lock, flags);
+
+ _clk_pll_disable(hw);
+
+ if (pll->lock)
+ spin_unlock_irqrestore(pll->lock, flags);
+}
+
+static int _get_table_rate(struct clk_hw *hw,
+ struct tegra_clk_pll_freq_table *cfg,
+ unsigned long rate, unsigned long parent_rate)
+{
+ struct tegra_clk_pll *pll = to_clk_pll(hw);
+ struct tegra_clk_pll_freq_table *sel;
+
+ for (sel = pll->freq_table; sel->input_rate != 0; sel++)
+ if (sel->input_rate == parent_rate &&
+ sel->output_rate == rate)
+ break;
+
+ if (sel->input_rate == 0)
+ return -EINVAL;
+
+ BUG_ON(sel->p < 1);
+
+ cfg->input_rate = sel->input_rate;
+ cfg->output_rate = sel->output_rate;
+ cfg->m = sel->m;
+ cfg->n = sel->n;
+ cfg->p = sel->p;
+ cfg->cpcon = sel->cpcon;
+
+ return 0;
+}
+
+static int _calc_rate(struct clk_hw *hw, struct tegra_clk_pll_freq_table *cfg,
+ unsigned long rate, unsigned long parent_rate)
+{
+ struct tegra_clk_pll *pll = to_clk_pll(hw);
+ unsigned long cfreq;
+ u32 p_div = 0;
+
+ switch (parent_rate) {
+ case 12000000:
+ case 26000000:
+ cfreq = (rate <= 1000000 * 1000) ? 1000000 : 2000000;
+ break;
+ case 13000000:
+ cfreq = (rate <= 1000000 * 1000) ? 1000000 : 2600000;
+ break;
+ case 16800000:
+ case 19200000:
+ cfreq = (rate <= 1200000 * 1000) ? 1200000 : 2400000;
+ break;
+ case 9600000:
+ case 28800000:
+ /*
+ * PLL_P_OUT1 rate is not listed in PLLA table
+ */
+ cfreq = parent_rate/(parent_rate/1000000);
+ break;
+ default:
+ pr_err("%s Unexpected reference rate %lu\n",
+ __func__, parent_rate);
+ BUG();
+ }
+
+ /* Raise VCO to guarantee 0.5% accuracy */
+ for (cfg->output_rate = rate; cfg->output_rate < 200 * cfreq;
+ cfg->output_rate <<= 1)
+ p_div++;
+
+ cfg->p = 1 << p_div;
+ cfg->m = parent_rate / cfreq;
+ cfg->n = cfg->output_rate / cfreq;
+ cfg->cpcon = OUT_OF_TABLE_CPCON;
+
+ if (cfg->m > divm_max(pll) || cfg->n > divn_max(pll) ||
+ cfg->p > divp_max(pll) || cfg->output_rate > pll->params->vco_max) {
+ pr_err("%s: Failed to set %s rate %lu\n",
+ __func__, __clk_get_name(hw->clk), rate);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int _program_pll(struct clk_hw *hw, struct tegra_clk_pll_freq_table *cfg,
+ unsigned long rate)
+{
+ struct tegra_clk_pll *pll = to_clk_pll(hw);
+ unsigned long flags = 0;
+ u32 divp, val, old_base;
+ int state;
+
+ divp = __ffs(cfg->p);
+
+ if (pll->flags & TEGRA_PLLU)
+ divp ^= 1;
+
+ if (pll->lock)
+ spin_lock_irqsave(pll->lock, flags);
+
+ old_base = val = pll_readl_base(pll);
+ val &= ~((divm_mask(pll) << pll->divm_shift) |
+ (divn_mask(pll) << pll->divn_shift) |
+ (divp_mask(pll) << pll->divp_shift));
+ val |= ((cfg->m << pll->divm_shift) |
+ (cfg->n << pll->divn_shift) |
+ (divp << pll->divp_shift));
+ if (val == old_base) {
+ if (pll->lock)
+ spin_unlock_irqrestore(pll->lock, flags);
+ return 0;
+ }
+
+ state = clk_pll_is_enabled(hw);
+
+ if (state) {
+ _clk_pll_disable(hw);
+ val &= ~(PLL_BASE_BYPASS | PLL_BASE_ENABLE);
+ }
+ pll_writel_base(val, pll);
+
+ if (pll->flags & TEGRA_PLL_HAS_CPCON) {
+ val = pll_readl_misc(pll);
+ val &= ~(PLL_MISC_CPCON_MASK << PLL_MISC_CPCON_SHIFT);
+ val |= cfg->cpcon << PLL_MISC_CPCON_SHIFT;
+ if (pll->flags & TEGRA_PLL_SET_LFCON) {
+ val &= ~(PLL_MISC_LFCON_MASK << PLL_MISC_LFCON_SHIFT);
+ if (cfg->n >= PLLDU_LFCON_SET_DIVN)
+ val |= 0x1 << PLL_MISC_LFCON_SHIFT;
+ } else if (pll->flags & TEGRA_PLL_SET_DCCON) {
+ val &= ~(0x1 << PLL_MISC_DCCON_SHIFT);
+ if (rate >= (pll->params->vco_max >> 1))
+ val |= 0x1 << PLL_MISC_DCCON_SHIFT;
+ }
+ pll_writel_misc(val, pll);
+ }
+
+ if (pll->lock)
+ spin_unlock_irqrestore(pll->lock, flags);
+
+ if (state)
+ clk_pll_enable(hw);
+
+ return 0;
+}
+
+static int clk_pll_set_rate(struct clk_hw *hw, unsigned long rate,
+ unsigned long parent_rate)
+{
+ struct tegra_clk_pll *pll = to_clk_pll(hw);
+ struct tegra_clk_pll_freq_table cfg;
+
+ if (pll->flags & TEGRA_PLL_FIXED) {
+ if (rate != pll->fixed_rate) {
+ pr_err("%s: Can not change %s fixed rate %lu to %lu\n",
+ __func__, __clk_get_name(hw->clk),
+ pll->fixed_rate, rate);
+ return -EINVAL;
+ }
+ return 0;
+ }
+
+ if (_get_table_rate(hw, &cfg, rate, parent_rate) &&
+ _calc_rate(hw, &cfg, rate, parent_rate))
+ return -EINVAL;
+
+ return _program_pll(hw, &cfg, rate);
+}
+
+static long clk_pll_round_rate(struct clk_hw *hw, unsigned long rate,
+ unsigned long *prate)
+{
+ struct tegra_clk_pll *pll = to_clk_pll(hw);
+ struct tegra_clk_pll_freq_table cfg;
+ u64 output_rate = *prate;
+
+ if (pll->flags & TEGRA_PLL_FIXED)
+ return pll->fixed_rate;
+
+ /* PLLM is used for memory; we do not change rate */
+ if (pll->flags & TEGRA_PLLM)
+ return __clk_get_rate(hw->clk);
+
+ if (_get_table_rate(hw, &cfg, rate, *prate) &&
+ _calc_rate(hw, &cfg, rate, *prate))
+ return -EINVAL;
+
+ output_rate *= cfg.n;
+ do_div(output_rate, cfg.m * cfg.p);
+
+ return output_rate;
+}
+
+static unsigned long clk_pll_recalc_rate(struct clk_hw *hw,
+ unsigned long parent_rate)
+{
+ struct tegra_clk_pll *pll = to_clk_pll(hw);
+ u32 val = pll_readl_base(pll);
+ u32 divn = 0, divm = 0, divp = 0;
+ u64 rate = parent_rate;
+
+ if (val & PLL_BASE_BYPASS)
+ return parent_rate;
+
+ if ((pll->flags & TEGRA_PLL_FIXED) && !(val & PLL_BASE_OVERRIDE)) {
+ struct tegra_clk_pll_freq_table sel;
+ if (_get_table_rate(hw, &sel, pll->fixed_rate, parent_rate)) {
+ pr_err("Clock %s has unknown fixed frequency\n",
+ __clk_get_name(hw->clk));
+ BUG();
+ }
+ return pll->fixed_rate;
+ }
+
+ divp = (val >> pll->divp_shift) & (divp_mask(pll));
+ if (pll->flags & TEGRA_PLLU)
+ divp ^= 1;
+
+ divn = (val >> pll->divn_shift) & (divn_mask(pll));
+ divm = (val >> pll->divm_shift) & (divm_mask(pll));
+ divm *= (1 << divp);
+
+ rate *= divn;
+ do_div(rate, divm);
+ return rate;
+}
+
+static int clk_plle_training(struct tegra_clk_pll *pll)
+{
+ u32 val;
+ unsigned long timeout;
+
+ if (!pll->pmc)
+ return -ENOSYS;
+
+ /*
+ * PLLE is already disabled, and setup cleared;
+ * create falling edge on PLLE IDDQ input.
+ */
+ val = readl(pll->pmc + PMC_SATA_PWRGT);
+ val |= PMC_SATA_PWRGT_PLLE_IDDQ_VALUE;
+ writel(val, pll->pmc + PMC_SATA_PWRGT);
+
+ val = readl(pll->pmc + PMC_SATA_PWRGT);
+ val |= PMC_SATA_PWRGT_PLLE_IDDQ_SWCTL;
+ writel(val, pll->pmc + PMC_SATA_PWRGT);
+
+ val = readl(pll->pmc + PMC_SATA_PWRGT);
+ val &= ~PMC_SATA_PWRGT_PLLE_IDDQ_VALUE;
+ writel(val, pll->pmc + PMC_SATA_PWRGT);
+
+ val = pll_readl_misc(pll);
+
+ timeout = jiffies + msecs_to_jiffies(100);
+ while (1) {
+ val = pll_readl_misc(pll);
+ if (val & PLLE_MISC_READY)
+ break;
+ if (time_after(jiffies, timeout)) {
+ pr_err("%s: timeout waiting for PLLE\n", __func__);
+ return -EBUSY;
+ }
+ udelay(300);
+ }
+
+ return 0;
+}
+
+static int clk_plle_enable(struct clk_hw *hw)
+{
+ struct tegra_clk_pll *pll = to_clk_pll(hw);
+ unsigned long input_rate = clk_get_rate(clk_get_parent(hw->clk));
+ struct tegra_clk_pll_freq_table sel;
+ u32 val;
+ int err;
+
+ if (_get_table_rate(hw, &sel, pll->fixed_rate, input_rate))
+ return -EINVAL;
+
+ clk_pll_disable(hw);
+
+ val = pll_readl_misc(pll);
+ val &= ~(PLLE_MISC_LOCK_ENABLE | PLLE_MISC_SETUP_MASK);
+ pll_writel_misc(val, pll);
+
+ val = pll_readl_misc(pll);
+ if (!(val & PLLE_MISC_READY)) {
+ err = clk_plle_training(pll);
+ if (err)
+ return err;
+ }
+
+ if (pll->flags & TEGRA_PLLE_CONFIGURE) {
+ /* configure dividers */
+ val = pll_readl_base(pll);
+ val &= ~(divm_mask(pll) | divn_mask(pll) | divp_mask(pll));
+ val &= ~(PLLE_BASE_DIVCML_WIDTH << PLLE_BASE_DIVCML_SHIFT);
+ val |= sel.m << pll->divm_shift;
+ val |= sel.n << pll->divn_shift;
+ val |= sel.p << pll->divp_shift;
+ val |= sel.cpcon << PLLE_BASE_DIVCML_SHIFT;
+ pll_writel_base(val, pll);
+ }
+
+ val = pll_readl_misc(pll);
+ val |= PLLE_MISC_SETUP_VALUE;
+ val |= PLLE_MISC_LOCK_ENABLE;
+ pll_writel_misc(val, pll);
+
+ val = readl(pll->clk_base + PLLE_SS_CTRL);
+ val |= PLLE_SS_DISABLE;
+ writel(val, pll->clk_base + PLLE_SS_CTRL);
+
+ val |= pll_readl_base(pll);
+ val |= (PLL_BASE_BYPASS | PLL_BASE_ENABLE);
+ pll_writel_base(val, pll);
+
+ clk_pll_wait_for_lock(pll, pll->clk_base + pll->params->misc_reg,
+ pll->params->lock_bit_idx);
+ return 0;
+}
+
+static unsigned long clk_plle_recalc_rate(struct clk_hw *hw,
+ unsigned long parent_rate)
+{
+ struct tegra_clk_pll *pll = to_clk_pll(hw);
+ u32 val = pll_readl_base(pll);
+ u32 divn = 0, divm = 0, divp = 0;
+ u64 rate = parent_rate;
+
+ divp = (val >> pll->divp_shift) & (divp_mask(pll));
+ divn = (val >> pll->divn_shift) & (divn_mask(pll));
+ divm = (val >> pll->divm_shift) & (divm_mask(pll));
+ divm *= divp;
+
+ rate *= divn;
+ do_div(rate, divm);
+ return rate;
+}
+
+const struct clk_ops tegra_clk_pll_ops = {
+ .is_enabled = clk_pll_is_enabled,
+ .enable = clk_pll_enable,
+ .disable = clk_pll_disable,
+ .recalc_rate = clk_pll_recalc_rate,
+ .round_rate = clk_pll_round_rate,
+ .set_rate = clk_pll_set_rate,
+};
+
+const struct clk_ops tegra_clk_plle_ops = {
+ .recalc_rate = clk_plle_recalc_rate,
+ .is_enabled = clk_pll_is_enabled,
+ .disable = clk_pll_disable,
+ .enable = clk_plle_enable,
+};
+
+static struct clk *_tegra_clk_register_pll(const char *name,
+ const char *parent_name, void __iomem *clk_base,
+ void __iomem *pmc, unsigned long flags,
+ unsigned long fixed_rate,
+ struct tegra_clk_pll_params *pll_params, u8 pll_flags,
+ struct tegra_clk_pll_freq_table *freq_table, spinlock_t *lock,
+ const struct clk_ops *ops)
+{
+ struct tegra_clk_pll *pll;
+ struct clk *clk;
+ struct clk_init_data init;
+
+ pll = kzalloc(sizeof(*pll), GFP_KERNEL);
+ if (!pll)
+ return ERR_PTR(-ENOMEM);
+
+ init.name = name;
+ init.ops = ops;
+ init.flags = flags;
+ init.parent_names = (parent_name ? &parent_name : NULL);
+ init.num_parents = (parent_name ? 1 : 0);
+
+ pll->clk_base = clk_base;
+ pll->pmc = pmc;
+
+ pll->freq_table = freq_table;
+ pll->params = pll_params;
+ pll->fixed_rate = fixed_rate;
+ pll->flags = pll_flags;
+ pll->lock = lock;
+
+ pll->divp_shift = PLL_BASE_DIVP_SHIFT;
+ pll->divp_width = PLL_BASE_DIVP_WIDTH;
+ pll->divn_shift = PLL_BASE_DIVN_SHIFT;
+ pll->divn_width = PLL_BASE_DIVN_WIDTH;
+ pll->divm_shift = PLL_BASE_DIVM_SHIFT;
+ pll->divm_width = PLL_BASE_DIVM_WIDTH;
+
+ /* Data in .init is copied by clk_register(), so stack variable OK */
+ pll->hw.init = &init;
+
+ clk = clk_register(NULL, &pll->hw);
+ if (IS_ERR(clk))
+ kfree(pll);
+
+ return clk;
+}
+
+struct clk *tegra_clk_register_pll(const char *name, const char *parent_name,
+ void __iomem *clk_base, void __iomem *pmc,
+ unsigned long flags, unsigned long fixed_rate,
+ struct tegra_clk_pll_params *pll_params, u8 pll_flags,
+ struct tegra_clk_pll_freq_table *freq_table, spinlock_t *lock)
+{
+ return _tegra_clk_register_pll(name, parent_name, clk_base, pmc,
+ flags, fixed_rate, pll_params, pll_flags, freq_table,
+ lock, &tegra_clk_pll_ops);
+}
+
+struct clk *tegra_clk_register_plle(const char *name, const char *parent_name,
+ void __iomem *clk_base, void __iomem *pmc,
+ unsigned long flags, unsigned long fixed_rate,
+ struct tegra_clk_pll_params *pll_params, u8 pll_flags,
+ struct tegra_clk_pll_freq_table *freq_table, spinlock_t *lock)
+{
+ return _tegra_clk_register_pll(name, parent_name, clk_base, pmc,
+ flags, fixed_rate, pll_params, pll_flags, freq_table,
+ lock, &tegra_clk_plle_ops);
+}
diff --git a/drivers/clk/tegra/clk-super.c b/drivers/clk/tegra/clk-super.c
new file mode 100644
index 000000000000..2fd924d38606
--- /dev/null
+++ b/drivers/clk/tegra/clk-super.c
@@ -0,0 +1,166 @@
+/*
+ * Copyright (c) 2012, NVIDIA CORPORATION. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include <linux/kernel.h>
+#include <linux/io.h>
+#include <linux/delay.h>
+#include <linux/err.h>
+#include <linux/slab.h>
+#include <linux/clk-provider.h>
+#include <linux/clk.h>
+
+#include "clk.h"
+
+#define SUPER_STATE_IDLE 0
+#define SUPER_STATE_RUN 1
+#define SUPER_STATE_IRQ 2
+#define SUPER_STATE_FIQ 3
+
+#define SUPER_STATE_SHIFT 28
+#define SUPER_STATE_MASK ((BIT(SUPER_STATE_IDLE) | BIT(SUPER_STATE_RUN) | \
+ BIT(SUPER_STATE_IRQ) | BIT(SUPER_STATE_FIQ)) \
+ << SUPER_STATE_SHIFT)
+
+#define SUPER_LP_DIV2_BYPASS (1 << 16)
+
+#define super_state(s) (BIT(s) << SUPER_STATE_SHIFT)
+#define super_state_to_src_shift(m, s) ((m->width * s))
+#define super_state_to_src_mask(m) (((1 << m->width) - 1))
+
+static u8 clk_super_get_parent(struct clk_hw *hw)
+{
+ struct tegra_clk_super_mux *mux = to_clk_super_mux(hw);
+ u32 val, state;
+ u8 source, shift;
+
+ val = readl_relaxed(mux->reg);
+
+ state = val & SUPER_STATE_MASK;
+
+ BUG_ON((state != super_state(SUPER_STATE_RUN)) &&
+ (state != super_state(SUPER_STATE_IDLE)));
+ shift = (state == super_state(SUPER_STATE_IDLE)) ?
+ super_state_to_src_shift(mux, SUPER_STATE_IDLE) :
+ super_state_to_src_shift(mux, SUPER_STATE_RUN);
+
+ source = (val >> shift) & super_state_to_src_mask(mux);
+
+ /*
+ * If LP_DIV2_BYPASS is not set and PLLX is current parent then
+ * PLLX/2 is the input source to CCLKLP.
+ */
+ if ((mux->flags & TEGRA_DIVIDER_2) && !(val & SUPER_LP_DIV2_BYPASS) &&
+ (source == mux->pllx_index))
+ source = mux->div2_index;
+
+ return source;
+}
+
+static int clk_super_set_parent(struct clk_hw *hw, u8 index)
+{
+ struct tegra_clk_super_mux *mux = to_clk_super_mux(hw);
+ u32 val, state;
+ int err = 0;
+ u8 parent_index, shift;
+ unsigned long flags = 0;
+
+ if (mux->lock)
+ spin_lock_irqsave(mux->lock, flags);
+
+ val = readl_relaxed(mux->reg);
+ state = val & SUPER_STATE_MASK;
+ BUG_ON((state != super_state(SUPER_STATE_RUN)) &&
+ (state != super_state(SUPER_STATE_IDLE)));
+ shift = (state == super_state(SUPER_STATE_IDLE)) ?
+ super_state_to_src_shift(mux, SUPER_STATE_IDLE) :
+ super_state_to_src_shift(mux, SUPER_STATE_RUN);
+
+ /*
+ * For LP mode super-clock switch between PLLX direct
+ * and divided-by-2 outputs is allowed only when other
+ * than PLLX clock source is current parent.
+ */
+ if ((mux->flags & TEGRA_DIVIDER_2) && ((index == mux->div2_index) ||
+ (index == mux->pllx_index))) {
+ parent_index = clk_super_get_parent(hw);
+ if ((parent_index == mux->div2_index) ||
+ (parent_index == mux->pllx_index)) {
+ err = -EINVAL;
+ goto out;
+ }
+
+ val ^= SUPER_LP_DIV2_BYPASS;
+ writel_relaxed(val, mux->reg);
+ udelay(2);
+
+ if (index == mux->div2_index)
+ index = mux->pllx_index;
+ }
+ val &= ~((super_state_to_src_mask(mux)) << shift);
+ val |= (index & (super_state_to_src_mask(mux))) << shift;
+
+ writel_relaxed(val, mux->reg);
+ udelay(2);
+
+out:
+ if (mux->lock)
+ spin_unlock_irqrestore(mux->lock, flags);
+
+ return err;
+}
+
+const struct clk_ops tegra_clk_super_ops = {
+ .get_parent = clk_super_get_parent,
+ .set_parent = clk_super_set_parent,
+};
+
+struct clk *tegra_clk_register_super_mux(const char *name,
+ const char **parent_names, u8 num_parents,
+ unsigned long flags, void __iomem *reg, u8 clk_super_flags,
+ u8 width, u8 pllx_index, u8 div2_index, spinlock_t *lock)
+{
+ struct tegra_clk_super_mux *super;
+ struct clk *clk;
+ struct clk_init_data init;
+
+ super = kzalloc(sizeof(*super), GFP_KERNEL);
+ if (!super) {
+ pr_err("%s: could not allocate super clk\n", __func__);
+ return ERR_PTR(-ENOMEM);
+ }
+
+ init.name = name;
+ init.ops = &tegra_clk_super_ops;
+ init.flags = flags;
+ init.parent_names = parent_names;
+ init.num_parents = num_parents;
+
+ super->reg = reg;
+ super->pllx_index = pllx_index;
+ super->div2_index = div2_index;
+ super->lock = lock;
+ super->width = width;
+ super->flags = clk_super_flags;
+
+ /* Data in .init is copied by clk_register(), so stack variable OK */
+ super->hw.init = &init;
+
+ clk = clk_register(NULL, &super->hw);
+ if (IS_ERR(clk))
+ kfree(super);
+
+ return clk;
+}
diff --git a/drivers/clk/tegra/clk-tegra20.c b/drivers/clk/tegra/clk-tegra20.c
new file mode 100644
index 000000000000..143ce1f899ad
--- /dev/null
+++ b/drivers/clk/tegra/clk-tegra20.c
@@ -0,0 +1,1355 @@
+/*
+ * Copyright (c) 2012, NVIDIA CORPORATION. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include <linux/io.h>
+#include <linux/clk.h>
+#include <linux/clk-provider.h>
+#include <linux/clkdev.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
+#include <linux/clk/tegra.h>
+#include <linux/delay.h>
+
+#include "clk.h"
+
+#define RST_DEVICES_L 0x004
+#define RST_DEVICES_H 0x008
+#define RST_DEVICES_U 0x00c
+#define RST_DEVICES_SET_L 0x300
+#define RST_DEVICES_CLR_L 0x304
+#define RST_DEVICES_SET_H 0x308
+#define RST_DEVICES_CLR_H 0x30c
+#define RST_DEVICES_SET_U 0x310
+#define RST_DEVICES_CLR_U 0x314
+#define RST_DEVICES_NUM 3
+
+#define CLK_OUT_ENB_L 0x010
+#define CLK_OUT_ENB_H 0x014
+#define CLK_OUT_ENB_U 0x018
+#define CLK_OUT_ENB_SET_L 0x320
+#define CLK_OUT_ENB_CLR_L 0x324
+#define CLK_OUT_ENB_SET_H 0x328
+#define CLK_OUT_ENB_CLR_H 0x32c
+#define CLK_OUT_ENB_SET_U 0x330
+#define CLK_OUT_ENB_CLR_U 0x334
+#define CLK_OUT_ENB_NUM 3
+
+#define OSC_CTRL 0x50
+#define OSC_CTRL_OSC_FREQ_MASK (3<<30)
+#define OSC_CTRL_OSC_FREQ_13MHZ (0<<30)
+#define OSC_CTRL_OSC_FREQ_19_2MHZ (1<<30)
+#define OSC_CTRL_OSC_FREQ_12MHZ (2<<30)
+#define OSC_CTRL_OSC_FREQ_26MHZ (3<<30)
+#define OSC_CTRL_MASK (0x3f2 | OSC_CTRL_OSC_FREQ_MASK)
+
+#define OSC_CTRL_PLL_REF_DIV_MASK (3<<28)
+#define OSC_CTRL_PLL_REF_DIV_1 (0<<28)
+#define OSC_CTRL_PLL_REF_DIV_2 (1<<28)
+#define OSC_CTRL_PLL_REF_DIV_4 (2<<28)
+
+#define OSC_FREQ_DET 0x58
+#define OSC_FREQ_DET_TRIG (1<<31)
+
+#define OSC_FREQ_DET_STATUS 0x5c
+#define OSC_FREQ_DET_BUSY (1<<31)
+#define OSC_FREQ_DET_CNT_MASK 0xFFFF
+
+#define PLLS_BASE 0xf0
+#define PLLS_MISC 0xf4
+#define PLLC_BASE 0x80
+#define PLLC_MISC 0x8c
+#define PLLM_BASE 0x90
+#define PLLM_MISC 0x9c
+#define PLLP_BASE 0xa0
+#define PLLP_MISC 0xac
+#define PLLA_BASE 0xb0
+#define PLLA_MISC 0xbc
+#define PLLU_BASE 0xc0
+#define PLLU_MISC 0xcc
+#define PLLD_BASE 0xd0
+#define PLLD_MISC 0xdc
+#define PLLX_BASE 0xe0
+#define PLLX_MISC 0xe4
+#define PLLE_BASE 0xe8
+#define PLLE_MISC 0xec
+
+#define PLL_BASE_LOCK 27
+#define PLLE_MISC_LOCK 11
+
+#define PLL_MISC_LOCK_ENABLE 18
+#define PLLDU_MISC_LOCK_ENABLE 22
+#define PLLE_MISC_LOCK_ENABLE 9
+
+#define PLLC_OUT 0x84
+#define PLLM_OUT 0x94
+#define PLLP_OUTA 0xa4
+#define PLLP_OUTB 0xa8
+#define PLLA_OUT 0xb4
+
+#define CCLK_BURST_POLICY 0x20
+#define SUPER_CCLK_DIVIDER 0x24
+#define SCLK_BURST_POLICY 0x28
+#define SUPER_SCLK_DIVIDER 0x2c
+#define CLK_SYSTEM_RATE 0x30
+
+#define CCLK_BURST_POLICY_SHIFT 28
+#define CCLK_RUN_POLICY_SHIFT 4
+#define CCLK_IDLE_POLICY_SHIFT 0
+#define CCLK_IDLE_POLICY 1
+#define CCLK_RUN_POLICY 2
+#define CCLK_BURST_POLICY_PLLX 8
+
+#define CLK_SOURCE_I2S1 0x100
+#define CLK_SOURCE_I2S2 0x104
+#define CLK_SOURCE_SPDIF_OUT 0x108
+#define CLK_SOURCE_SPDIF_IN 0x10c
+#define CLK_SOURCE_PWM 0x110
+#define CLK_SOURCE_SPI 0x114
+#define CLK_SOURCE_SBC1 0x134
+#define CLK_SOURCE_SBC2 0x118
+#define CLK_SOURCE_SBC3 0x11c
+#define CLK_SOURCE_SBC4 0x1b4
+#define CLK_SOURCE_XIO 0x120
+#define CLK_SOURCE_TWC 0x12c
+#define CLK_SOURCE_IDE 0x144
+#define CLK_SOURCE_NDFLASH 0x160
+#define CLK_SOURCE_VFIR 0x168
+#define CLK_SOURCE_SDMMC1 0x150
+#define CLK_SOURCE_SDMMC2 0x154
+#define CLK_SOURCE_SDMMC3 0x1bc
+#define CLK_SOURCE_SDMMC4 0x164
+#define CLK_SOURCE_CVE 0x140
+#define CLK_SOURCE_TVO 0x188
+#define CLK_SOURCE_TVDAC 0x194
+#define CLK_SOURCE_HDMI 0x18c
+#define CLK_SOURCE_DISP1 0x138
+#define CLK_SOURCE_DISP2 0x13c
+#define CLK_SOURCE_CSITE 0x1d4
+#define CLK_SOURCE_LA 0x1f8
+#define CLK_SOURCE_OWR 0x1cc
+#define CLK_SOURCE_NOR 0x1d0
+#define CLK_SOURCE_MIPI 0x174
+#define CLK_SOURCE_I2C1 0x124
+#define CLK_SOURCE_I2C2 0x198
+#define CLK_SOURCE_I2C3 0x1b8
+#define CLK_SOURCE_DVC 0x128
+#define CLK_SOURCE_UARTA 0x178
+#define CLK_SOURCE_UARTB 0x17c
+#define CLK_SOURCE_UARTC 0x1a0
+#define CLK_SOURCE_UARTD 0x1c0
+#define CLK_SOURCE_UARTE 0x1c4
+#define CLK_SOURCE_3D 0x158
+#define CLK_SOURCE_2D 0x15c
+#define CLK_SOURCE_MPE 0x170
+#define CLK_SOURCE_EPP 0x16c
+#define CLK_SOURCE_HOST1X 0x180
+#define CLK_SOURCE_VDE 0x1c8
+#define CLK_SOURCE_VI 0x148
+#define CLK_SOURCE_VI_SENSOR 0x1a8
+#define CLK_SOURCE_EMC 0x19c
+
+#define AUDIO_SYNC_CLK 0x38
+
+#define PMC_CTRL 0x0
+#define PMC_CTRL_BLINK_ENB 7
+#define PMC_DPD_PADS_ORIDE 0x1c
+#define PMC_DPD_PADS_ORIDE_BLINK_ENB 20
+#define PMC_BLINK_TIMER 0x40
+
+/* Tegra CPU clock and reset control regs */
+#define TEGRA_CLK_RST_CONTROLLER_CLK_CPU_CMPLX 0x4c
+#define TEGRA_CLK_RST_CONTROLLER_RST_CPU_CMPLX_SET 0x340
+#define TEGRA_CLK_RST_CONTROLLER_RST_CPU_CMPLX_CLR 0x344
+
+#define CPU_CLOCK(cpu) (0x1 << (8 + cpu))
+#define CPU_RESET(cpu) (0x1111ul << (cpu))
+
+#ifdef CONFIG_PM_SLEEP
+static struct cpu_clk_suspend_context {
+ u32 pllx_misc;
+ u32 pllx_base;
+
+ u32 cpu_burst;
+ u32 clk_csite_src;
+ u32 cclk_divider;
+} tegra20_cpu_clk_sctx;
+#endif
+
+static int periph_clk_enb_refcnt[CLK_OUT_ENB_NUM * 32];
+
+static void __iomem *clk_base;
+static void __iomem *pmc_base;
+
+static DEFINE_SPINLOCK(pll_div_lock);
+static DEFINE_SPINLOCK(sysrate_lock);
+
+#define TEGRA_INIT_DATA_MUX(_name, _con_id, _dev_id, _parents, _offset, \
+ _clk_num, _regs, _gate_flags, _clk_id) \
+ TEGRA_INIT_DATA(_name, _con_id, _dev_id, _parents, _offset, \
+ 30, 2, 0, 0, 8, 1, TEGRA_DIVIDER_ROUND_UP, \
+ _regs, _clk_num, periph_clk_enb_refcnt, \
+ _gate_flags, _clk_id)
+
+#define TEGRA_INIT_DATA_INT(_name, _con_id, _dev_id, _parents, _offset, \
+ _clk_num, _regs, _gate_flags, _clk_id) \
+ TEGRA_INIT_DATA(_name, _con_id, _dev_id, _parents, _offset, \
+ 30, 2, 0, 0, 8, 1, TEGRA_DIVIDER_INT, _regs, \
+ _clk_num, periph_clk_enb_refcnt, _gate_flags, \
+ _clk_id)
+
+#define TEGRA_INIT_DATA_DIV16(_name, _con_id, _dev_id, _parents, _offset, \
+ _clk_num, _regs, _gate_flags, _clk_id) \
+ TEGRA_INIT_DATA(_name, _con_id, _dev_id, _parents, _offset, \
+ 30, 2, 0, 0, 16, 0, TEGRA_DIVIDER_ROUND_UP, _regs, \
+ _clk_num, periph_clk_enb_refcnt, _gate_flags, \
+ _clk_id)
+
+#define TEGRA_INIT_DATA_NODIV(_name, _con_id, _dev_id, _parents, _offset, \
+ _mux_shift, _mux_width, _clk_num, _regs, \
+ _gate_flags, _clk_id) \
+ TEGRA_INIT_DATA(_name, _con_id, _dev_id, _parents, _offset, \
+ _mux_shift, _mux_width, 0, 0, 0, 0, 0, _regs, \
+ _clk_num, periph_clk_enb_refcnt, _gate_flags, \
+ _clk_id)
+
+/* IDs assigned here must be in sync with DT bindings definition
+ * for Tegra20 clocks .
+ */
+enum tegra20_clk {
+ cpu, ac97 = 3, rtc, timer, uarta, gpio = 8, sdmmc2, i2s1 = 11, i2c1,
+ ndflash, sdmmc1, sdmmc4, twc, pwm, i2s2, epp, gr2d = 21, usbd, isp,
+ gr3d, ide, disp2, disp1, host1x, vcp, cache2 = 31, mem, ahbdma, apbdma,
+ kbc = 36, stat_mon, pmc, fuse, kfuse, sbc1, nor, spi, sbc2, xio, sbc3,
+ dvc, dsi, mipi = 50, hdmi, csi, tvdac, i2c2, uartc, emc = 57, usb2,
+ usb3, mpe, vde, bsea, bsev, speedo, uartd, uarte, i2c3, sbc4, sdmmc3,
+ pex, owr, afi, csite, pcie_xclk, avpucq = 75, la, irama = 84, iramb,
+ iramc, iramd, cram2, audio_2x, clk_d, csus = 92, cdev1, cdev2,
+ uartb = 96, vfir, spdif_in, spdif_out, vi, vi_sensor, tvo, cve,
+ osc, clk_32k, clk_m, sclk, cclk, hclk, pclk, blink, pll_a, pll_a_out0,
+ pll_c, pll_c_out1, pll_d, pll_d_out0, pll_e, pll_m, pll_m_out1,
+ pll_p, pll_p_out1, pll_p_out2, pll_p_out3, pll_p_out4, pll_s, pll_u,
+ pll_x, cop, audio, pll_ref, twd, clk_max,
+};
+
+static struct clk *clks[clk_max];
+static struct clk_onecell_data clk_data;
+
+static struct tegra_clk_pll_freq_table pll_c_freq_table[] = {
+ { 12000000, 600000000, 600, 12, 1, 8 },
+ { 13000000, 600000000, 600, 13, 1, 8 },
+ { 19200000, 600000000, 500, 16, 1, 6 },
+ { 26000000, 600000000, 600, 26, 1, 8 },
+ { 0, 0, 0, 0, 0, 0 },
+};
+
+static struct tegra_clk_pll_freq_table pll_m_freq_table[] = {
+ { 12000000, 666000000, 666, 12, 1, 8},
+ { 13000000, 666000000, 666, 13, 1, 8},
+ { 19200000, 666000000, 555, 16, 1, 8},
+ { 26000000, 666000000, 666, 26, 1, 8},
+ { 12000000, 600000000, 600, 12, 1, 8},
+ { 13000000, 600000000, 600, 13, 1, 8},
+ { 19200000, 600000000, 375, 12, 1, 6},
+ { 26000000, 600000000, 600, 26, 1, 8},
+ { 0, 0, 0, 0, 0, 0 },
+};
+
+static struct tegra_clk_pll_freq_table pll_p_freq_table[] = {
+ { 12000000, 216000000, 432, 12, 2, 8},
+ { 13000000, 216000000, 432, 13, 2, 8},
+ { 19200000, 216000000, 90, 4, 2, 1},
+ { 26000000, 216000000, 432, 26, 2, 8},
+ { 12000000, 432000000, 432, 12, 1, 8},
+ { 13000000, 432000000, 432, 13, 1, 8},
+ { 19200000, 432000000, 90, 4, 1, 1},
+ { 26000000, 432000000, 432, 26, 1, 8},
+ { 0, 0, 0, 0, 0, 0 },
+};
+
+static struct tegra_clk_pll_freq_table pll_a_freq_table[] = {
+ { 28800000, 56448000, 49, 25, 1, 1},
+ { 28800000, 73728000, 64, 25, 1, 1},
+ { 28800000, 24000000, 5, 6, 1, 1},
+ { 0, 0, 0, 0, 0, 0 },
+};
+
+static struct tegra_clk_pll_freq_table pll_d_freq_table[] = {
+ { 12000000, 216000000, 216, 12, 1, 4},
+ { 13000000, 216000000, 216, 13, 1, 4},
+ { 19200000, 216000000, 135, 12, 1, 3},
+ { 26000000, 216000000, 216, 26, 1, 4},
+
+ { 12000000, 594000000, 594, 12, 1, 8},
+ { 13000000, 594000000, 594, 13, 1, 8},
+ { 19200000, 594000000, 495, 16, 1, 8},
+ { 26000000, 594000000, 594, 26, 1, 8},
+
+ { 12000000, 1000000000, 1000, 12, 1, 12},
+ { 13000000, 1000000000, 1000, 13, 1, 12},
+ { 19200000, 1000000000, 625, 12, 1, 8},
+ { 26000000, 1000000000, 1000, 26, 1, 12},
+
+ { 0, 0, 0, 0, 0, 0 },
+};
+
+static struct tegra_clk_pll_freq_table pll_u_freq_table[] = {
+ { 12000000, 480000000, 960, 12, 2, 0},
+ { 13000000, 480000000, 960, 13, 2, 0},
+ { 19200000, 480000000, 200, 4, 2, 0},
+ { 26000000, 480000000, 960, 26, 2, 0},
+ { 0, 0, 0, 0, 0, 0 },
+};
+
+static struct tegra_clk_pll_freq_table pll_x_freq_table[] = {
+ /* 1 GHz */
+ { 12000000, 1000000000, 1000, 12, 1, 12},
+ { 13000000, 1000000000, 1000, 13, 1, 12},
+ { 19200000, 1000000000, 625, 12, 1, 8},
+ { 26000000, 1000000000, 1000, 26, 1, 12},
+
+ /* 912 MHz */
+ { 12000000, 912000000, 912, 12, 1, 12},
+ { 13000000, 912000000, 912, 13, 1, 12},
+ { 19200000, 912000000, 760, 16, 1, 8},
+ { 26000000, 912000000, 912, 26, 1, 12},
+
+ /* 816 MHz */
+ { 12000000, 816000000, 816, 12, 1, 12},
+ { 13000000, 816000000, 816, 13, 1, 12},
+ { 19200000, 816000000, 680, 16, 1, 8},
+ { 26000000, 816000000, 816, 26, 1, 12},
+
+ /* 760 MHz */
+ { 12000000, 760000000, 760, 12, 1, 12},
+ { 13000000, 760000000, 760, 13, 1, 12},
+ { 19200000, 760000000, 950, 24, 1, 8},
+ { 26000000, 760000000, 760, 26, 1, 12},
+
+ /* 750 MHz */
+ { 12000000, 750000000, 750, 12, 1, 12},
+ { 13000000, 750000000, 750, 13, 1, 12},
+ { 19200000, 750000000, 625, 16, 1, 8},
+ { 26000000, 750000000, 750, 26, 1, 12},
+
+ /* 608 MHz */
+ { 12000000, 608000000, 608, 12, 1, 12},
+ { 13000000, 608000000, 608, 13, 1, 12},
+ { 19200000, 608000000, 380, 12, 1, 8},
+ { 26000000, 608000000, 608, 26, 1, 12},
+
+ /* 456 MHz */
+ { 12000000, 456000000, 456, 12, 1, 12},
+ { 13000000, 456000000, 456, 13, 1, 12},
+ { 19200000, 456000000, 380, 16, 1, 8},
+ { 26000000, 456000000, 456, 26, 1, 12},
+
+ /* 312 MHz */
+ { 12000000, 312000000, 312, 12, 1, 12},
+ { 13000000, 312000000, 312, 13, 1, 12},
+ { 19200000, 312000000, 260, 16, 1, 8},
+ { 26000000, 312000000, 312, 26, 1, 12},
+
+ { 0, 0, 0, 0, 0, 0 },
+};
+
+static struct tegra_clk_pll_freq_table pll_e_freq_table[] = {
+ { 12000000, 100000000, 200, 24, 1, 0 },
+ { 0, 0, 0, 0, 0, 0 },
+};
+
+/* PLL parameters */
+static struct tegra_clk_pll_params pll_c_params = {
+ .input_min = 2000000,
+ .input_max = 31000000,
+ .cf_min = 1000000,
+ .cf_max = 6000000,
+ .vco_min = 20000000,
+ .vco_max = 1400000000,
+ .base_reg = PLLC_BASE,
+ .misc_reg = PLLC_MISC,
+ .lock_bit_idx = PLL_BASE_LOCK,
+ .lock_enable_bit_idx = PLL_MISC_LOCK_ENABLE,
+ .lock_delay = 300,
+};
+
+static struct tegra_clk_pll_params pll_m_params = {
+ .input_min = 2000000,
+ .input_max = 31000000,
+ .cf_min = 1000000,
+ .cf_max = 6000000,
+ .vco_min = 20000000,
+ .vco_max = 1200000000,
+ .base_reg = PLLM_BASE,
+ .misc_reg = PLLM_MISC,
+ .lock_bit_idx = PLL_BASE_LOCK,
+ .lock_enable_bit_idx = PLL_MISC_LOCK_ENABLE,
+ .lock_delay = 300,
+};
+
+static struct tegra_clk_pll_params pll_p_params = {
+ .input_min = 2000000,
+ .input_max = 31000000,
+ .cf_min = 1000000,
+ .cf_max = 6000000,
+ .vco_min = 20000000,
+ .vco_max = 1400000000,
+ .base_reg = PLLP_BASE,
+ .misc_reg = PLLP_MISC,
+ .lock_bit_idx = PLL_BASE_LOCK,
+ .lock_enable_bit_idx = PLL_MISC_LOCK_ENABLE,
+ .lock_delay = 300,
+};
+
+static struct tegra_clk_pll_params pll_a_params = {
+ .input_min = 2000000,
+ .input_max = 31000000,
+ .cf_min = 1000000,
+ .cf_max = 6000000,
+ .vco_min = 20000000,
+ .vco_max = 1400000000,
+ .base_reg = PLLA_BASE,
+ .misc_reg = PLLA_MISC,
+ .lock_bit_idx = PLL_BASE_LOCK,
+ .lock_enable_bit_idx = PLL_MISC_LOCK_ENABLE,
+ .lock_delay = 300,
+};
+
+static struct tegra_clk_pll_params pll_d_params = {
+ .input_min = 2000000,
+ .input_max = 40000000,
+ .cf_min = 1000000,
+ .cf_max = 6000000,
+ .vco_min = 40000000,
+ .vco_max = 1000000000,
+ .base_reg = PLLD_BASE,
+ .misc_reg = PLLD_MISC,
+ .lock_bit_idx = PLL_BASE_LOCK,
+ .lock_enable_bit_idx = PLLDU_MISC_LOCK_ENABLE,
+ .lock_delay = 1000,
+};
+
+static struct tegra_clk_pll_params pll_u_params = {
+ .input_min = 2000000,
+ .input_max = 40000000,
+ .cf_min = 1000000,
+ .cf_max = 6000000,
+ .vco_min = 48000000,
+ .vco_max = 960000000,
+ .base_reg = PLLU_BASE,
+ .misc_reg = PLLU_MISC,
+ .lock_bit_idx = PLL_BASE_LOCK,
+ .lock_enable_bit_idx = PLLDU_MISC_LOCK_ENABLE,
+ .lock_delay = 1000,
+};
+
+static struct tegra_clk_pll_params pll_x_params = {
+ .input_min = 2000000,
+ .input_max = 31000000,
+ .cf_min = 1000000,
+ .cf_max = 6000000,
+ .vco_min = 20000000,
+ .vco_max = 1200000000,
+ .base_reg = PLLX_BASE,
+ .misc_reg = PLLX_MISC,
+ .lock_bit_idx = PLL_BASE_LOCK,
+ .lock_enable_bit_idx = PLL_MISC_LOCK_ENABLE,
+ .lock_delay = 300,
+};
+
+static struct tegra_clk_pll_params pll_e_params = {
+ .input_min = 12000000,
+ .input_max = 12000000,
+ .cf_min = 0,
+ .cf_max = 0,
+ .vco_min = 0,
+ .vco_max = 0,
+ .base_reg = PLLE_BASE,
+ .misc_reg = PLLE_MISC,
+ .lock_bit_idx = PLLE_MISC_LOCK,
+ .lock_enable_bit_idx = PLLE_MISC_LOCK_ENABLE,
+ .lock_delay = 0,
+};
+
+/* Peripheral clock registers */
+static struct tegra_clk_periph_regs periph_l_regs = {
+ .enb_reg = CLK_OUT_ENB_L,
+ .enb_set_reg = CLK_OUT_ENB_SET_L,
+ .enb_clr_reg = CLK_OUT_ENB_CLR_L,
+ .rst_reg = RST_DEVICES_L,
+ .rst_set_reg = RST_DEVICES_SET_L,
+ .rst_clr_reg = RST_DEVICES_CLR_L,
+};
+
+static struct tegra_clk_periph_regs periph_h_regs = {
+ .enb_reg = CLK_OUT_ENB_H,
+ .enb_set_reg = CLK_OUT_ENB_SET_H,
+ .enb_clr_reg = CLK_OUT_ENB_CLR_H,
+ .rst_reg = RST_DEVICES_H,
+ .rst_set_reg = RST_DEVICES_SET_H,
+ .rst_clr_reg = RST_DEVICES_CLR_H,
+};
+
+static struct tegra_clk_periph_regs periph_u_regs = {
+ .enb_reg = CLK_OUT_ENB_U,
+ .enb_set_reg = CLK_OUT_ENB_SET_U,
+ .enb_clr_reg = CLK_OUT_ENB_CLR_U,
+ .rst_reg = RST_DEVICES_U,
+ .rst_set_reg = RST_DEVICES_SET_U,
+ .rst_clr_reg = RST_DEVICES_CLR_U,
+};
+
+static unsigned long tegra20_clk_measure_input_freq(void)
+{
+ u32 osc_ctrl = readl_relaxed(clk_base + OSC_CTRL);
+ u32 auto_clk_control = osc_ctrl & OSC_CTRL_OSC_FREQ_MASK;
+ u32 pll_ref_div = osc_ctrl & OSC_CTRL_PLL_REF_DIV_MASK;
+ unsigned long input_freq;
+
+ switch (auto_clk_control) {
+ case OSC_CTRL_OSC_FREQ_12MHZ:
+ BUG_ON(pll_ref_div != OSC_CTRL_PLL_REF_DIV_1);
+ input_freq = 12000000;
+ break;
+ case OSC_CTRL_OSC_FREQ_13MHZ:
+ BUG_ON(pll_ref_div != OSC_CTRL_PLL_REF_DIV_1);
+ input_freq = 13000000;
+ break;
+ case OSC_CTRL_OSC_FREQ_19_2MHZ:
+ BUG_ON(pll_ref_div != OSC_CTRL_PLL_REF_DIV_1);
+ input_freq = 19200000;
+ break;
+ case OSC_CTRL_OSC_FREQ_26MHZ:
+ BUG_ON(pll_ref_div != OSC_CTRL_PLL_REF_DIV_1);
+ input_freq = 26000000;
+ break;
+ default:
+ pr_err("Unexpected clock autodetect value %d",
+ auto_clk_control);
+ BUG();
+ return 0;
+ }
+
+ return input_freq;
+}
+
+static unsigned int tegra20_get_pll_ref_div(void)
+{
+ u32 pll_ref_div = readl_relaxed(clk_base + OSC_CTRL) &
+ OSC_CTRL_PLL_REF_DIV_MASK;
+
+ switch (pll_ref_div) {
+ case OSC_CTRL_PLL_REF_DIV_1:
+ return 1;
+ case OSC_CTRL_PLL_REF_DIV_2:
+ return 2;
+ case OSC_CTRL_PLL_REF_DIV_4:
+ return 4;
+ default:
+ pr_err("Invalied pll ref divider %d\n", pll_ref_div);
+ BUG();
+ }
+ return 0;
+}
+
+static void tegra20_pll_init(void)
+{
+ struct clk *clk;
+
+ /* PLLC */
+ clk = tegra_clk_register_pll("pll_c", "pll_ref", clk_base, NULL, 0,
+ 0, &pll_c_params, TEGRA_PLL_HAS_CPCON,
+ pll_c_freq_table, NULL);
+ clk_register_clkdev(clk, "pll_c", NULL);
+ clks[pll_c] = clk;
+
+ /* PLLC_OUT1 */
+ clk = tegra_clk_register_divider("pll_c_out1_div", "pll_c",
+ clk_base + PLLC_OUT, 0, TEGRA_DIVIDER_ROUND_UP,
+ 8, 8, 1, NULL);
+ clk = tegra_clk_register_pll_out("pll_c_out1", "pll_c_out1_div",
+ clk_base + PLLC_OUT, 1, 0, CLK_SET_RATE_PARENT,
+ 0, NULL);
+ clk_register_clkdev(clk, "pll_c_out1", NULL);
+ clks[pll_c_out1] = clk;
+
+ /* PLLP */
+ clk = tegra_clk_register_pll("pll_p", "pll_ref", clk_base, NULL, 0,
+ 216000000, &pll_p_params, TEGRA_PLL_FIXED |
+ TEGRA_PLL_HAS_CPCON, pll_p_freq_table, NULL);
+ clk_register_clkdev(clk, "pll_p", NULL);
+ clks[pll_p] = clk;
+
+ /* PLLP_OUT1 */
+ clk = tegra_clk_register_divider("pll_p_out1_div", "pll_p",
+ clk_base + PLLP_OUTA, 0,
+ TEGRA_DIVIDER_FIXED | TEGRA_DIVIDER_ROUND_UP,
+ 8, 8, 1, &pll_div_lock);
+ clk = tegra_clk_register_pll_out("pll_p_out1", "pll_p_out1_div",
+ clk_base + PLLP_OUTA, 1, 0,
+ CLK_IGNORE_UNUSED | CLK_SET_RATE_PARENT, 0,
+ &pll_div_lock);
+ clk_register_clkdev(clk, "pll_p_out1", NULL);
+ clks[pll_p_out1] = clk;
+
+ /* PLLP_OUT2 */
+ clk = tegra_clk_register_divider("pll_p_out2_div", "pll_p",
+ clk_base + PLLP_OUTA, 0,
+ TEGRA_DIVIDER_FIXED | TEGRA_DIVIDER_ROUND_UP,
+ 24, 8, 1, &pll_div_lock);
+ clk = tegra_clk_register_pll_out("pll_p_out2", "pll_p_out2_div",
+ clk_base + PLLP_OUTA, 17, 16,
+ CLK_IGNORE_UNUSED | CLK_SET_RATE_PARENT, 0,
+ &pll_div_lock);
+ clk_register_clkdev(clk, "pll_p_out2", NULL);
+ clks[pll_p_out2] = clk;
+
+ /* PLLP_OUT3 */
+ clk = tegra_clk_register_divider("pll_p_out3_div", "pll_p",
+ clk_base + PLLP_OUTB, 0,
+ TEGRA_DIVIDER_FIXED | TEGRA_DIVIDER_ROUND_UP,
+ 8, 8, 1, &pll_div_lock);
+ clk = tegra_clk_register_pll_out("pll_p_out3", "pll_p_out3_div",
+ clk_base + PLLP_OUTB, 1, 0,
+ CLK_IGNORE_UNUSED | CLK_SET_RATE_PARENT, 0,
+ &pll_div_lock);
+ clk_register_clkdev(clk, "pll_p_out3", NULL);
+ clks[pll_p_out3] = clk;
+
+ /* PLLP_OUT4 */
+ clk = tegra_clk_register_divider("pll_p_out4_div", "pll_p",
+ clk_base + PLLP_OUTB, 0,
+ TEGRA_DIVIDER_FIXED | TEGRA_DIVIDER_ROUND_UP,
+ 24, 8, 1, &pll_div_lock);
+ clk = tegra_clk_register_pll_out("pll_p_out4", "pll_p_out4_div",
+ clk_base + PLLP_OUTB, 17, 16,
+ CLK_IGNORE_UNUSED | CLK_SET_RATE_PARENT, 0,
+ &pll_div_lock);
+ clk_register_clkdev(clk, "pll_p_out4", NULL);
+ clks[pll_p_out4] = clk;
+
+ /* PLLM */
+ clk = tegra_clk_register_pll("pll_m", "pll_ref", clk_base, NULL,
+ CLK_IGNORE_UNUSED | CLK_SET_RATE_GATE, 0,
+ &pll_m_params, TEGRA_PLL_HAS_CPCON,
+ pll_m_freq_table, NULL);
+ clk_register_clkdev(clk, "pll_m", NULL);
+ clks[pll_m] = clk;
+
+ /* PLLM_OUT1 */
+ clk = tegra_clk_register_divider("pll_m_out1_div", "pll_m",
+ clk_base + PLLM_OUT, 0, TEGRA_DIVIDER_ROUND_UP,
+ 8, 8, 1, NULL);
+ clk = tegra_clk_register_pll_out("pll_m_out1", "pll_m_out1_div",
+ clk_base + PLLM_OUT, 1, 0, CLK_IGNORE_UNUSED |
+ CLK_SET_RATE_PARENT, 0, NULL);
+ clk_register_clkdev(clk, "pll_m_out1", NULL);
+ clks[pll_m_out1] = clk;
+
+ /* PLLX */
+ clk = tegra_clk_register_pll("pll_x", "pll_ref", clk_base, NULL, 0,
+ 0, &pll_x_params, TEGRA_PLL_HAS_CPCON,
+ pll_x_freq_table, NULL);
+ clk_register_clkdev(clk, "pll_x", NULL);
+ clks[pll_x] = clk;
+
+ /* PLLU */
+ clk = tegra_clk_register_pll("pll_u", "pll_ref", clk_base, NULL, 0,
+ 0, &pll_u_params, TEGRA_PLLU | TEGRA_PLL_HAS_CPCON,
+ pll_u_freq_table, NULL);
+ clk_register_clkdev(clk, "pll_u", NULL);
+ clks[pll_u] = clk;
+
+ /* PLLD */
+ clk = tegra_clk_register_pll("pll_d", "pll_ref", clk_base, NULL, 0,
+ 0, &pll_d_params, TEGRA_PLL_HAS_CPCON,
+ pll_d_freq_table, NULL);
+ clk_register_clkdev(clk, "pll_d", NULL);
+ clks[pll_d] = clk;
+
+ /* PLLD_OUT0 */
+ clk = clk_register_fixed_factor(NULL, "pll_d_out0", "pll_d",
+ CLK_SET_RATE_PARENT, 1, 2);
+ clk_register_clkdev(clk, "pll_d_out0", NULL);
+ clks[pll_d_out0] = clk;
+
+ /* PLLA */
+ clk = tegra_clk_register_pll("pll_a", "pll_p_out1", clk_base, NULL, 0,
+ 0, &pll_a_params, TEGRA_PLL_HAS_CPCON,
+ pll_a_freq_table, NULL);
+ clk_register_clkdev(clk, "pll_a", NULL);
+ clks[pll_a] = clk;
+
+ /* PLLA_OUT0 */
+ clk = tegra_clk_register_divider("pll_a_out0_div", "pll_a",
+ clk_base + PLLA_OUT, 0, TEGRA_DIVIDER_ROUND_UP,
+ 8, 8, 1, NULL);
+ clk = tegra_clk_register_pll_out("pll_a_out0", "pll_a_out0_div",
+ clk_base + PLLA_OUT, 1, 0, CLK_IGNORE_UNUSED |
+ CLK_SET_RATE_PARENT, 0, NULL);
+ clk_register_clkdev(clk, "pll_a_out0", NULL);
+ clks[pll_a_out0] = clk;
+
+ /* PLLE */
+ clk = tegra_clk_register_plle("pll_e", "pll_ref", clk_base, NULL,
+ 0, 100000000, &pll_e_params,
+ 0, pll_e_freq_table, NULL);
+ clk_register_clkdev(clk, "pll_e", NULL);
+ clks[pll_e] = clk;
+}
+
+static const char *cclk_parents[] = { "clk_m", "pll_c", "clk_32k", "pll_m",
+ "pll_p_cclk", "pll_p_out4_cclk",
+ "pll_p_out3_cclk", "clk_d", "pll_x" };
+static const char *sclk_parents[] = { "clk_m", "pll_c_out1", "pll_p_out4",
+ "pll_p_out3", "pll_p_out2", "clk_d",
+ "clk_32k", "pll_m_out1" };
+
+static void tegra20_super_clk_init(void)
+{
+ struct clk *clk;
+
+ /*
+ * DIV_U71 dividers for CCLK, these dividers are used only
+ * if parent clock is fixed rate.
+ */
+
+ /*
+ * Clock input to cclk divided from pll_p using
+ * U71 divider of cclk.
+ */
+ clk = tegra_clk_register_divider("pll_p_cclk", "pll_p",
+ clk_base + SUPER_CCLK_DIVIDER, 0,
+ TEGRA_DIVIDER_INT, 16, 8, 1, NULL);
+ clk_register_clkdev(clk, "pll_p_cclk", NULL);
+
+ /*
+ * Clock input to cclk divided from pll_p_out3 using
+ * U71 divider of cclk.
+ */
+ clk = tegra_clk_register_divider("pll_p_out3_cclk", "pll_p_out3",
+ clk_base + SUPER_CCLK_DIVIDER, 0,
+ TEGRA_DIVIDER_INT, 16, 8, 1, NULL);
+ clk_register_clkdev(clk, "pll_p_out3_cclk", NULL);
+
+ /*
+ * Clock input to cclk divided from pll_p_out4 using
+ * U71 divider of cclk.
+ */
+ clk = tegra_clk_register_divider("pll_p_out4_cclk", "pll_p_out4",
+ clk_base + SUPER_CCLK_DIVIDER, 0,
+ TEGRA_DIVIDER_INT, 16, 8, 1, NULL);
+ clk_register_clkdev(clk, "pll_p_out4_cclk", NULL);
+
+ /* CCLK */
+ clk = tegra_clk_register_super_mux("cclk", cclk_parents,
+ ARRAY_SIZE(cclk_parents), CLK_SET_RATE_PARENT,
+ clk_base + CCLK_BURST_POLICY, 0, 4, 0, 0, NULL);
+ clk_register_clkdev(clk, "cclk", NULL);
+ clks[cclk] = clk;
+
+ /* SCLK */
+ clk = tegra_clk_register_super_mux("sclk", sclk_parents,
+ ARRAY_SIZE(sclk_parents), CLK_SET_RATE_PARENT,
+ clk_base + SCLK_BURST_POLICY, 0, 4, 0, 0, NULL);
+ clk_register_clkdev(clk, "sclk", NULL);
+ clks[sclk] = clk;
+
+ /* HCLK */
+ clk = clk_register_divider(NULL, "hclk_div", "sclk", 0,
+ clk_base + CLK_SYSTEM_RATE, 4, 2, 0,
+ &sysrate_lock);
+ clk = clk_register_gate(NULL, "hclk", "hclk_div", CLK_SET_RATE_PARENT,
+ clk_base + CLK_SYSTEM_RATE, 7,
+ CLK_GATE_SET_TO_DISABLE, &sysrate_lock);
+ clk_register_clkdev(clk, "hclk", NULL);
+ clks[hclk] = clk;
+
+ /* PCLK */
+ clk = clk_register_divider(NULL, "pclk_div", "hclk", 0,
+ clk_base + CLK_SYSTEM_RATE, 0, 2, 0,
+ &sysrate_lock);
+ clk = clk_register_gate(NULL, "pclk", "pclk_div", CLK_SET_RATE_PARENT,
+ clk_base + CLK_SYSTEM_RATE, 3,
+ CLK_GATE_SET_TO_DISABLE, &sysrate_lock);
+ clk_register_clkdev(clk, "pclk", NULL);
+ clks[pclk] = clk;
+
+ /* twd */
+ clk = clk_register_fixed_factor(NULL, "twd", "cclk", 0, 1, 4);
+ clk_register_clkdev(clk, "twd", NULL);
+ clks[twd] = clk;
+}
+
+static const char *audio_parents[] = {"spdif_in", "i2s1", "i2s2", "unused",
+ "pll_a_out0", "unused", "unused",
+ "unused"};
+
+static void __init tegra20_audio_clk_init(void)
+{
+ struct clk *clk;
+
+ /* audio */
+ clk = clk_register_mux(NULL, "audio_mux", audio_parents,
+ ARRAY_SIZE(audio_parents), 0,
+ clk_base + AUDIO_SYNC_CLK, 0, 3, 0, NULL);
+ clk = clk_register_gate(NULL, "audio", "audio_mux", 0,
+ clk_base + AUDIO_SYNC_CLK, 4,
+ CLK_GATE_SET_TO_DISABLE, NULL);
+ clk_register_clkdev(clk, "audio", NULL);
+ clks[audio] = clk;
+
+ /* audio_2x */
+ clk = clk_register_fixed_factor(NULL, "audio_doubler", "audio",
+ CLK_SET_RATE_PARENT, 2, 1);
+ clk = tegra_clk_register_periph_gate("audio_2x", "audio_doubler",
+ TEGRA_PERIPH_NO_RESET, clk_base,
+ CLK_SET_RATE_PARENT, 89, &periph_u_regs,
+ periph_clk_enb_refcnt);
+ clk_register_clkdev(clk, "audio_2x", NULL);
+ clks[audio_2x] = clk;
+
+}
+
+static const char *i2s1_parents[] = {"pll_a_out0", "audio_2x", "pll_p",
+ "clk_m"};
+static const char *i2s2_parents[] = {"pll_a_out0", "audio_2x", "pll_p",
+ "clk_m"};
+static const char *spdif_out_parents[] = {"pll_a_out0", "audio_2x", "pll_p",
+ "clk_m"};
+static const char *spdif_in_parents[] = {"pll_p", "pll_c", "pll_m"};
+static const char *pwm_parents[] = {"pll_p", "pll_c", "audio", "clk_m",
+ "clk_32k"};
+static const char *mux_pllpcm_clkm[] = {"pll_p", "pll_c", "pll_m", "clk_m"};
+static const char *mux_pllmcpa[] = {"pll_m", "pll_c", "pll_c", "pll_a"};
+static const char *mux_pllpdc_clkm[] = {"pll_p", "pll_d_out0", "pll_c",
+ "clk_m"};
+static const char *mux_pllmcp_clkm[] = {"pll_m", "pll_c", "pll_p", "clk_m"};
+
+static struct tegra_periph_init_data tegra_periph_clk_list[] = {
+ TEGRA_INIT_DATA_MUX("i2s1", NULL, "tegra20-i2s.0", i2s1_parents, CLK_SOURCE_I2S1, 11, &periph_l_regs, TEGRA_PERIPH_ON_APB, i2s1),
+ TEGRA_INIT_DATA_MUX("i2s2", NULL, "tegra20-i2s.1", i2s2_parents, CLK_SOURCE_I2S2, 18, &periph_l_regs, TEGRA_PERIPH_ON_APB, i2s2),
+ TEGRA_INIT_DATA_MUX("spdif_out", "spdif_out", "tegra20-spdif", spdif_out_parents, CLK_SOURCE_SPDIF_OUT, 10, &periph_l_regs, TEGRA_PERIPH_ON_APB, spdif_out),
+ TEGRA_INIT_DATA_MUX("spdif_in", "spdif_in", "tegra20-spdif", spdif_in_parents, CLK_SOURCE_SPDIF_IN, 10, &periph_l_regs, TEGRA_PERIPH_ON_APB, spdif_in),
+ TEGRA_INIT_DATA_MUX("sbc1", NULL, "spi_tegra.0", mux_pllpcm_clkm, CLK_SOURCE_SBC1, 41, &periph_h_regs, TEGRA_PERIPH_ON_APB, sbc1),
+ TEGRA_INIT_DATA_MUX("sbc2", NULL, "spi_tegra.1", mux_pllpcm_clkm, CLK_SOURCE_SBC2, 44, &periph_h_regs, TEGRA_PERIPH_ON_APB, sbc2),
+ TEGRA_INIT_DATA_MUX("sbc3", NULL, "spi_tegra.2", mux_pllpcm_clkm, CLK_SOURCE_SBC3, 46, &periph_h_regs, TEGRA_PERIPH_ON_APB, sbc3),
+ TEGRA_INIT_DATA_MUX("sbc4", NULL, "spi_tegra.3", mux_pllpcm_clkm, CLK_SOURCE_SBC4, 68, &periph_u_regs, TEGRA_PERIPH_ON_APB, sbc4),
+ TEGRA_INIT_DATA_MUX("spi", NULL, "spi", mux_pllpcm_clkm, CLK_SOURCE_SPI, 43, &periph_h_regs, TEGRA_PERIPH_ON_APB, spi),
+ TEGRA_INIT_DATA_MUX("xio", NULL, "xio", mux_pllpcm_clkm, CLK_SOURCE_XIO, 45, &periph_h_regs, 0, xio),
+ TEGRA_INIT_DATA_MUX("twc", NULL, "twc", mux_pllpcm_clkm, CLK_SOURCE_TWC, 16, &periph_l_regs, TEGRA_PERIPH_ON_APB, twc),
+ TEGRA_INIT_DATA_MUX("ide", NULL, "ide", mux_pllpcm_clkm, CLK_SOURCE_XIO, 25, &periph_l_regs, 0, ide),
+ TEGRA_INIT_DATA_MUX("ndflash", NULL, "tegra_nand", mux_pllpcm_clkm, CLK_SOURCE_NDFLASH, 13, &periph_l_regs, 0, ndflash),
+ TEGRA_INIT_DATA_MUX("vfir", NULL, "vfir", mux_pllpcm_clkm, CLK_SOURCE_VFIR, 7, &periph_l_regs, TEGRA_PERIPH_ON_APB, vfir),
+ TEGRA_INIT_DATA_MUX("csite", NULL, "csite", mux_pllpcm_clkm, CLK_SOURCE_CSITE, 73, &periph_u_regs, 0, csite),
+ TEGRA_INIT_DATA_MUX("la", NULL, "la", mux_pllpcm_clkm, CLK_SOURCE_LA, 76, &periph_u_regs, 0, la),
+ TEGRA_INIT_DATA_MUX("owr", NULL, "tegra_w1", mux_pllpcm_clkm, CLK_SOURCE_OWR, 71, &periph_u_regs, TEGRA_PERIPH_ON_APB, owr),
+ TEGRA_INIT_DATA_MUX("mipi", NULL, "mipi", mux_pllpcm_clkm, CLK_SOURCE_MIPI, 50, &periph_h_regs, TEGRA_PERIPH_ON_APB, mipi),
+ TEGRA_INIT_DATA_MUX("vde", NULL, "vde", mux_pllpcm_clkm, CLK_SOURCE_VDE, 61, &periph_h_regs, 0, vde),
+ TEGRA_INIT_DATA_MUX("vi", "vi", "tegra_camera", mux_pllmcpa, CLK_SOURCE_VI, 20, &periph_l_regs, 0, vi),
+ TEGRA_INIT_DATA_MUX("epp", NULL, "epp", mux_pllmcpa, CLK_SOURCE_EPP, 19, &periph_l_regs, 0, epp),
+ TEGRA_INIT_DATA_MUX("mpe", NULL, "mpe", mux_pllmcpa, CLK_SOURCE_MPE, 60, &periph_h_regs, 0, mpe),
+ TEGRA_INIT_DATA_MUX("host1x", NULL, "host1x", mux_pllmcpa, CLK_SOURCE_HOST1X, 28, &periph_l_regs, 0, host1x),
+ TEGRA_INIT_DATA_MUX("3d", NULL, "3d", mux_pllmcpa, CLK_SOURCE_3D, 24, &periph_l_regs, TEGRA_PERIPH_MANUAL_RESET, gr3d),
+ TEGRA_INIT_DATA_MUX("2d", NULL, "2d", mux_pllmcpa, CLK_SOURCE_2D, 21, &periph_l_regs, 0, gr2d),
+ TEGRA_INIT_DATA_MUX("nor", NULL, "tegra-nor", mux_pllpcm_clkm, CLK_SOURCE_NOR, 42, &periph_h_regs, 0, nor),
+ TEGRA_INIT_DATA_MUX("sdmmc1", NULL, "sdhci-tegra.0", mux_pllpcm_clkm, CLK_SOURCE_SDMMC1, 14, &periph_l_regs, 0, sdmmc1),
+ TEGRA_INIT_DATA_MUX("sdmmc2", NULL, "sdhci-tegra.1", mux_pllpcm_clkm, CLK_SOURCE_SDMMC2, 9, &periph_l_regs, 0, sdmmc2),
+ TEGRA_INIT_DATA_MUX("sdmmc3", NULL, "sdhci-tegra.2", mux_pllpcm_clkm, CLK_SOURCE_SDMMC3, 69, &periph_u_regs, 0, sdmmc3),
+ TEGRA_INIT_DATA_MUX("sdmmc4", NULL, "sdhci-tegra.3", mux_pllpcm_clkm, CLK_SOURCE_SDMMC4, 15, &periph_l_regs, 0, sdmmc4),
+ TEGRA_INIT_DATA_MUX("cve", NULL, "cve", mux_pllpdc_clkm, CLK_SOURCE_CVE, 49, &periph_h_regs, 0, cve),
+ TEGRA_INIT_DATA_MUX("tvo", NULL, "tvo", mux_pllpdc_clkm, CLK_SOURCE_TVO, 49, &periph_h_regs, 0, tvo),
+ TEGRA_INIT_DATA_MUX("tvdac", NULL, "tvdac", mux_pllpdc_clkm, CLK_SOURCE_TVDAC, 53, &periph_h_regs, 0, tvdac),
+ TEGRA_INIT_DATA_MUX("vi_sensor", "vi_sensor", "tegra_camera", mux_pllmcpa, CLK_SOURCE_VI_SENSOR, 20, &periph_l_regs, TEGRA_PERIPH_NO_RESET, vi_sensor),
+ TEGRA_INIT_DATA_DIV16("i2c1", "div-clk", "tegra-i2c.0", mux_pllpcm_clkm, CLK_SOURCE_I2C1, 12, &periph_l_regs, TEGRA_PERIPH_ON_APB, i2c1),
+ TEGRA_INIT_DATA_DIV16("i2c2", "div-clk", "tegra-i2c.1", mux_pllpcm_clkm, CLK_SOURCE_I2C2, 54, &periph_h_regs, TEGRA_PERIPH_ON_APB, i2c2),
+ TEGRA_INIT_DATA_DIV16("i2c3", "div-clk", "tegra-i2c.2", mux_pllpcm_clkm, CLK_SOURCE_I2C3, 67, &periph_u_regs, TEGRA_PERIPH_ON_APB, i2c3),
+ TEGRA_INIT_DATA_DIV16("dvc", "div-clk", "tegra-i2c.3", mux_pllpcm_clkm, CLK_SOURCE_DVC, 47, &periph_h_regs, TEGRA_PERIPH_ON_APB, dvc),
+ TEGRA_INIT_DATA_MUX("hdmi", NULL, "hdmi", mux_pllpdc_clkm, CLK_SOURCE_HDMI, 51, &periph_h_regs, 0, hdmi),
+ TEGRA_INIT_DATA("pwm", NULL, "tegra-pwm", pwm_parents, CLK_SOURCE_PWM, 28, 3, 0, 0, 8, 1, 0, &periph_l_regs, 17, periph_clk_enb_refcnt, TEGRA_PERIPH_ON_APB, pwm),
+};
+
+static struct tegra_periph_init_data tegra_periph_nodiv_clk_list[] = {
+ TEGRA_INIT_DATA_NODIV("uarta", NULL, "tegra_uart.0", mux_pllpcm_clkm, CLK_SOURCE_UARTA, 30, 2, 6, &periph_l_regs, TEGRA_PERIPH_ON_APB, uarta),
+ TEGRA_INIT_DATA_NODIV("uartb", NULL, "tegra_uart.1", mux_pllpcm_clkm, CLK_SOURCE_UARTB, 30, 2, 7, &periph_l_regs, TEGRA_PERIPH_ON_APB, uartb),
+ TEGRA_INIT_DATA_NODIV("uartc", NULL, "tegra_uart.2", mux_pllpcm_clkm, CLK_SOURCE_UARTC, 30, 2, 55, &periph_h_regs, TEGRA_PERIPH_ON_APB, uartc),
+ TEGRA_INIT_DATA_NODIV("uartd", NULL, "tegra_uart.3", mux_pllpcm_clkm, CLK_SOURCE_UARTD, 30, 2, 65, &periph_u_regs, TEGRA_PERIPH_ON_APB, uartd),
+ TEGRA_INIT_DATA_NODIV("uarte", NULL, "tegra_uart.4", mux_pllpcm_clkm, CLK_SOURCE_UARTE, 30, 2, 66, &periph_u_regs, TEGRA_PERIPH_ON_APB, uarte),
+ TEGRA_INIT_DATA_NODIV("disp1", NULL, "tegradc.0", mux_pllpdc_clkm, CLK_SOURCE_DISP1, 30, 2, 27, &periph_l_regs, 0, disp1),
+ TEGRA_INIT_DATA_NODIV("disp2", NULL, "tegradc.1", mux_pllpdc_clkm, CLK_SOURCE_DISP2, 30, 2, 26, &periph_l_regs, 0, disp2),
+};
+
+static void __init tegra20_periph_clk_init(void)
+{
+ struct tegra_periph_init_data *data;
+ struct clk *clk;
+ int i;
+
+ /* apbdma */
+ clk = tegra_clk_register_periph_gate("apbdma", "pclk", 0, clk_base,
+ 0, 34, &periph_h_regs,
+ periph_clk_enb_refcnt);
+ clk_register_clkdev(clk, NULL, "tegra-apbdma");
+ clks[apbdma] = clk;
+
+ /* rtc */
+ clk = tegra_clk_register_periph_gate("rtc", "clk_32k",
+ TEGRA_PERIPH_NO_RESET,
+ clk_base, 0, 4, &periph_l_regs,
+ periph_clk_enb_refcnt);
+ clk_register_clkdev(clk, NULL, "rtc-tegra");
+ clks[rtc] = clk;
+
+ /* timer */
+ clk = tegra_clk_register_periph_gate("timer", "clk_m", 0, clk_base,
+ 0, 5, &periph_l_regs,
+ periph_clk_enb_refcnt);
+ clk_register_clkdev(clk, NULL, "timer");
+ clks[timer] = clk;
+
+ /* kbc */
+ clk = tegra_clk_register_periph_gate("kbc", "clk_32k",
+ TEGRA_PERIPH_NO_RESET | TEGRA_PERIPH_ON_APB,
+ clk_base, 0, 36, &periph_h_regs,
+ periph_clk_enb_refcnt);
+ clk_register_clkdev(clk, NULL, "tegra-kbc");
+ clks[kbc] = clk;
+
+ /* csus */
+ clk = tegra_clk_register_periph_gate("csus", "clk_m",
+ TEGRA_PERIPH_NO_RESET,
+ clk_base, 0, 92, &periph_u_regs,
+ periph_clk_enb_refcnt);
+ clk_register_clkdev(clk, "csus", "tengra_camera");
+ clks[csus] = clk;
+
+ /* vcp */
+ clk = tegra_clk_register_periph_gate("vcp", "clk_m", 0,
+ clk_base, 0, 29, &periph_l_regs,
+ periph_clk_enb_refcnt);
+ clk_register_clkdev(clk, "vcp", "tegra-avp");
+ clks[vcp] = clk;
+
+ /* bsea */
+ clk = tegra_clk_register_periph_gate("bsea", "clk_m", 0,
+ clk_base, 0, 62, &periph_h_regs,
+ periph_clk_enb_refcnt);
+ clk_register_clkdev(clk, "bsea", "tegra-avp");
+ clks[bsea] = clk;
+
+ /* bsev */
+ clk = tegra_clk_register_periph_gate("bsev", "clk_m", 0,
+ clk_base, 0, 63, &periph_h_regs,
+ periph_clk_enb_refcnt);
+ clk_register_clkdev(clk, "bsev", "tegra-aes");
+ clks[bsev] = clk;
+
+ /* emc */
+ clk = clk_register_mux(NULL, "emc_mux", mux_pllmcp_clkm,
+ ARRAY_SIZE(mux_pllmcp_clkm), 0,
+ clk_base + CLK_SOURCE_EMC,
+ 30, 2, 0, NULL);
+ clk = tegra_clk_register_periph_gate("emc", "emc_mux", 0, clk_base, 0,
+ 57, &periph_h_regs, periph_clk_enb_refcnt);
+ clk_register_clkdev(clk, "emc", NULL);
+ clks[emc] = clk;
+
+ /* usbd */
+ clk = tegra_clk_register_periph_gate("usbd", "clk_m", 0, clk_base, 0,
+ 22, &periph_l_regs, periph_clk_enb_refcnt);
+ clk_register_clkdev(clk, NULL, "fsl-tegra-udc");
+ clks[usbd] = clk;
+
+ /* usb2 */
+ clk = tegra_clk_register_periph_gate("usb2", "clk_m", 0, clk_base, 0,
+ 58, &periph_h_regs, periph_clk_enb_refcnt);
+ clk_register_clkdev(clk, NULL, "tegra-ehci.1");
+ clks[usb2] = clk;
+
+ /* usb3 */
+ clk = tegra_clk_register_periph_gate("usb3", "clk_m", 0, clk_base, 0,
+ 59, &periph_h_regs, periph_clk_enb_refcnt);
+ clk_register_clkdev(clk, NULL, "tegra-ehci.2");
+ clks[usb3] = clk;
+
+ /* dsi */
+ clk = tegra_clk_register_periph_gate("dsi", "pll_d", 0, clk_base, 0,
+ 48, &periph_h_regs, periph_clk_enb_refcnt);
+ clk_register_clkdev(clk, NULL, "dsi");
+ clks[dsi] = clk;
+
+ /* csi */
+ clk = tegra_clk_register_periph_gate("csi", "pll_p_out3", 0, clk_base,
+ 0, 52, &periph_h_regs,
+ periph_clk_enb_refcnt);
+ clk_register_clkdev(clk, "csi", "tegra_camera");
+ clks[csi] = clk;
+
+ /* isp */
+ clk = tegra_clk_register_periph_gate("isp", "clk_m", 0, clk_base, 0, 23,
+ &periph_l_regs, periph_clk_enb_refcnt);
+ clk_register_clkdev(clk, "isp", "tegra_camera");
+ clks[isp] = clk;
+
+ /* pex */
+ clk = tegra_clk_register_periph_gate("pex", "clk_m", 0, clk_base, 0, 70,
+ &periph_u_regs, periph_clk_enb_refcnt);
+ clk_register_clkdev(clk, "pex", NULL);
+ clks[pex] = clk;
+
+ /* afi */
+ clk = tegra_clk_register_periph_gate("afi", "clk_m", 0, clk_base, 0, 72,
+ &periph_u_regs, periph_clk_enb_refcnt);
+ clk_register_clkdev(clk, "afi", NULL);
+ clks[afi] = clk;
+
+ /* pcie_xclk */
+ clk = tegra_clk_register_periph_gate("pcie_xclk", "clk_m", 0, clk_base,
+ 0, 74, &periph_u_regs,
+ periph_clk_enb_refcnt);
+ clk_register_clkdev(clk, "pcie_xclk", NULL);
+ clks[pcie_xclk] = clk;
+
+ /* cdev1 */
+ clk = clk_register_fixed_rate(NULL, "cdev1_fixed", NULL, CLK_IS_ROOT,
+ 26000000);
+ clk = tegra_clk_register_periph_gate("cdev1", "cdev1_fixed", 0,
+ clk_base, 0, 94, &periph_u_regs,
+ periph_clk_enb_refcnt);
+ clk_register_clkdev(clk, "cdev1", NULL);
+ clks[cdev1] = clk;
+
+ /* cdev2 */
+ clk = clk_register_fixed_rate(NULL, "cdev2_fixed", NULL, CLK_IS_ROOT,
+ 26000000);
+ clk = tegra_clk_register_periph_gate("cdev2", "cdev2_fixed", 0,
+ clk_base, 0, 93, &periph_u_regs,
+ periph_clk_enb_refcnt);
+ clk_register_clkdev(clk, "cdev2", NULL);
+ clks[cdev2] = clk;
+
+ for (i = 0; i < ARRAY_SIZE(tegra_periph_clk_list); i++) {
+ data = &tegra_periph_clk_list[i];
+ clk = tegra_clk_register_periph(data->name, data->parent_names,
+ data->num_parents, &data->periph,
+ clk_base, data->offset);
+ clk_register_clkdev(clk, data->con_id, data->dev_id);
+ clks[data->clk_id] = clk;
+ }
+
+ for (i = 0; i < ARRAY_SIZE(tegra_periph_nodiv_clk_list); i++) {
+ data = &tegra_periph_nodiv_clk_list[i];
+ clk = tegra_clk_register_periph_nodiv(data->name,
+ data->parent_names,
+ data->num_parents, &data->periph,
+ clk_base, data->offset);
+ clk_register_clkdev(clk, data->con_id, data->dev_id);
+ clks[data->clk_id] = clk;
+ }
+}
+
+
+static void __init tegra20_fixed_clk_init(void)
+{
+ struct clk *clk;
+
+ /* clk_32k */
+ clk = clk_register_fixed_rate(NULL, "clk_32k", NULL, CLK_IS_ROOT,
+ 32768);
+ clk_register_clkdev(clk, "clk_32k", NULL);
+ clks[clk_32k] = clk;
+}
+
+static void __init tegra20_pmc_clk_init(void)
+{
+ struct clk *clk;
+
+ /* blink */
+ writel_relaxed(0, pmc_base + PMC_BLINK_TIMER);
+ clk = clk_register_gate(NULL, "blink_override", "clk_32k", 0,
+ pmc_base + PMC_DPD_PADS_ORIDE,
+ PMC_DPD_PADS_ORIDE_BLINK_ENB, 0, NULL);
+ clk = clk_register_gate(NULL, "blink", "blink_override", 0,
+ pmc_base + PMC_CTRL,
+ PMC_CTRL_BLINK_ENB, 0, NULL);
+ clk_register_clkdev(clk, "blink", NULL);
+ clks[blink] = clk;
+}
+
+static void __init tegra20_osc_clk_init(void)
+{
+ struct clk *clk;
+ unsigned long input_freq;
+ unsigned int pll_ref_div;
+
+ input_freq = tegra20_clk_measure_input_freq();
+
+ /* clk_m */
+ clk = clk_register_fixed_rate(NULL, "clk_m", NULL, CLK_IS_ROOT |
+ CLK_IGNORE_UNUSED, input_freq);
+ clk_register_clkdev(clk, "clk_m", NULL);
+ clks[clk_m] = clk;
+
+ /* pll_ref */
+ pll_ref_div = tegra20_get_pll_ref_div();
+ clk = clk_register_fixed_factor(NULL, "pll_ref", "clk_m",
+ CLK_SET_RATE_PARENT, 1, pll_ref_div);
+ clk_register_clkdev(clk, "pll_ref", NULL);
+ clks[pll_ref] = clk;
+}
+
+/* Tegra20 CPU clock and reset control functions */
+static void tegra20_wait_cpu_in_reset(u32 cpu)
+{
+ unsigned int reg;
+
+ do {
+ reg = readl(clk_base +
+ TEGRA_CLK_RST_CONTROLLER_RST_CPU_CMPLX_SET);
+ cpu_relax();
+ } while (!(reg & (1 << cpu))); /* check CPU been reset or not */
+
+ return;
+}
+
+static void tegra20_put_cpu_in_reset(u32 cpu)
+{
+ writel(CPU_RESET(cpu),
+ clk_base + TEGRA_CLK_RST_CONTROLLER_RST_CPU_CMPLX_SET);
+ dmb();
+}
+
+static void tegra20_cpu_out_of_reset(u32 cpu)
+{
+ writel(CPU_RESET(cpu),
+ clk_base + TEGRA_CLK_RST_CONTROLLER_RST_CPU_CMPLX_CLR);
+ wmb();
+}
+
+static void tegra20_enable_cpu_clock(u32 cpu)
+{
+ unsigned int reg;
+
+ reg = readl(clk_base + TEGRA_CLK_RST_CONTROLLER_CLK_CPU_CMPLX);
+ writel(reg & ~CPU_CLOCK(cpu),
+ clk_base + TEGRA_CLK_RST_CONTROLLER_CLK_CPU_CMPLX);
+ barrier();
+ reg = readl(clk_base + TEGRA_CLK_RST_CONTROLLER_CLK_CPU_CMPLX);
+}
+
+static void tegra20_disable_cpu_clock(u32 cpu)
+{
+ unsigned int reg;
+
+ reg = readl(clk_base + TEGRA_CLK_RST_CONTROLLER_CLK_CPU_CMPLX);
+ writel(reg | CPU_CLOCK(cpu),
+ clk_base + TEGRA_CLK_RST_CONTROLLER_CLK_CPU_CMPLX);
+}
+
+#ifdef CONFIG_PM_SLEEP
+static bool tegra20_cpu_rail_off_ready(void)
+{
+ unsigned int cpu_rst_status;
+
+ cpu_rst_status = readl(clk_base +
+ TEGRA_CLK_RST_CONTROLLER_RST_CPU_CMPLX_SET);
+
+ return !!(cpu_rst_status & 0x2);
+}
+
+static void tegra20_cpu_clock_suspend(void)
+{
+ /* switch coresite to clk_m, save off original source */
+ tegra20_cpu_clk_sctx.clk_csite_src =
+ readl(clk_base + CLK_SOURCE_CSITE);
+ writel(3<<30, clk_base + CLK_SOURCE_CSITE);
+
+ tegra20_cpu_clk_sctx.cpu_burst =
+ readl(clk_base + CCLK_BURST_POLICY);
+ tegra20_cpu_clk_sctx.pllx_base =
+ readl(clk_base + PLLX_BASE);
+ tegra20_cpu_clk_sctx.pllx_misc =
+ readl(clk_base + PLLX_MISC);
+ tegra20_cpu_clk_sctx.cclk_divider =
+ readl(clk_base + SUPER_CCLK_DIVIDER);
+}
+
+static void tegra20_cpu_clock_resume(void)
+{
+ unsigned int reg, policy;
+
+ /* Is CPU complex already running on PLLX? */
+ reg = readl(clk_base + CCLK_BURST_POLICY);
+ policy = (reg >> CCLK_BURST_POLICY_SHIFT) & 0xF;
+
+ if (policy == CCLK_IDLE_POLICY)
+ reg = (reg >> CCLK_IDLE_POLICY_SHIFT) & 0xF;
+ else if (policy == CCLK_RUN_POLICY)
+ reg = (reg >> CCLK_RUN_POLICY_SHIFT) & 0xF;
+ else
+ BUG();
+
+ if (reg != CCLK_BURST_POLICY_PLLX) {
+ /* restore PLLX settings if CPU is on different PLL */
+ writel(tegra20_cpu_clk_sctx.pllx_misc,
+ clk_base + PLLX_MISC);
+ writel(tegra20_cpu_clk_sctx.pllx_base,
+ clk_base + PLLX_BASE);
+
+ /* wait for PLL stabilization if PLLX was enabled */
+ if (tegra20_cpu_clk_sctx.pllx_base & (1 << 30))
+ udelay(300);
+ }
+
+ /*
+ * Restore original burst policy setting for calls resulting from CPU
+ * LP2 in idle or system suspend.
+ */
+ writel(tegra20_cpu_clk_sctx.cclk_divider,
+ clk_base + SUPER_CCLK_DIVIDER);
+ writel(tegra20_cpu_clk_sctx.cpu_burst,
+ clk_base + CCLK_BURST_POLICY);
+
+ writel(tegra20_cpu_clk_sctx.clk_csite_src,
+ clk_base + CLK_SOURCE_CSITE);
+}
+#endif
+
+static struct tegra_cpu_car_ops tegra20_cpu_car_ops = {
+ .wait_for_reset = tegra20_wait_cpu_in_reset,
+ .put_in_reset = tegra20_put_cpu_in_reset,
+ .out_of_reset = tegra20_cpu_out_of_reset,
+ .enable_clock = tegra20_enable_cpu_clock,
+ .disable_clock = tegra20_disable_cpu_clock,
+#ifdef CONFIG_PM_SLEEP
+ .rail_off_ready = tegra20_cpu_rail_off_ready,
+ .suspend = tegra20_cpu_clock_suspend,
+ .resume = tegra20_cpu_clock_resume,
+#endif
+};
+
+static __initdata struct tegra_clk_init_table init_table[] = {
+ {pll_p, clk_max, 216000000, 1},
+ {pll_p_out1, clk_max, 28800000, 1},
+ {pll_p_out2, clk_max, 48000000, 1},
+ {pll_p_out3, clk_max, 72000000, 1},
+ {pll_p_out4, clk_max, 24000000, 1},
+ {pll_c, clk_max, 600000000, 1},
+ {pll_c_out1, clk_max, 120000000, 1},
+ {sclk, pll_c_out1, 0, 1},
+ {hclk, clk_max, 0, 1},
+ {pclk, clk_max, 60000000, 1},
+ {csite, clk_max, 0, 1},
+ {emc, clk_max, 0, 1},
+ {cclk, clk_max, 0, 1},
+ {uarta, pll_p, 0, 0},
+ {uartb, pll_p, 0, 0},
+ {uartc, pll_p, 0, 0},
+ {uartd, pll_p, 0, 0},
+ {uarte, pll_p, 0, 0},
+ {usbd, clk_max, 12000000, 0},
+ {usb2, clk_max, 12000000, 0},
+ {usb3, clk_max, 12000000, 0},
+ {pll_a, clk_max, 56448000, 1},
+ {pll_a_out0, clk_max, 11289600, 1},
+ {cdev1, clk_max, 0, 1},
+ {blink, clk_max, 32768, 1},
+ {i2s1, pll_a_out0, 11289600, 0},
+ {i2s2, pll_a_out0, 11289600, 0},
+ {sdmmc1, pll_p, 48000000, 0},
+ {sdmmc3, pll_p, 48000000, 0},
+ {sdmmc4, pll_p, 48000000, 0},
+ {spi, pll_p, 20000000, 0},
+ {sbc1, pll_p, 100000000, 0},
+ {sbc2, pll_p, 100000000, 0},
+ {sbc3, pll_p, 100000000, 0},
+ {sbc4, pll_p, 100000000, 0},
+ {host1x, pll_c, 150000000, 0},
+ {disp1, pll_p, 600000000, 0},
+ {disp2, pll_p, 600000000, 0},
+ {clk_max, clk_max, 0, 0}, /* This MUST be the last entry */
+};
+
+/*
+ * Some clocks may be used by different drivers depending on the board
+ * configuration. List those here to register them twice in the clock lookup
+ * table under two names.
+ */
+static struct tegra_clk_duplicate tegra_clk_duplicates[] = {
+ TEGRA_CLK_DUPLICATE(usbd, "utmip-pad", NULL),
+ TEGRA_CLK_DUPLICATE(usbd, "tegra-ehci.0", NULL),
+ TEGRA_CLK_DUPLICATE(usbd, "tegra-otg", NULL),
+ TEGRA_CLK_DUPLICATE(cclk, NULL, "cpu"),
+ TEGRA_CLK_DUPLICATE(twd, "smp_twd", NULL),
+ TEGRA_CLK_DUPLICATE(clk_max, NULL, NULL), /* Must be the last entry */
+};
+
+static const struct of_device_id pmc_match[] __initconst = {
+ { .compatible = "nvidia,tegra20-pmc" },
+ {},
+};
+
+void __init tegra20_clock_init(struct device_node *np)
+{
+ int i;
+ struct device_node *node;
+
+ clk_base = of_iomap(np, 0);
+ if (!clk_base) {
+ pr_err("Can't map CAR registers\n");
+ BUG();
+ }
+
+ node = of_find_matching_node(NULL, pmc_match);
+ if (!node) {
+ pr_err("Failed to find pmc node\n");
+ BUG();
+ }
+
+ pmc_base = of_iomap(node, 0);
+ if (!pmc_base) {
+ pr_err("Can't map pmc registers\n");
+ BUG();
+ }
+
+ tegra20_osc_clk_init();
+ tegra20_pmc_clk_init();
+ tegra20_fixed_clk_init();
+ tegra20_pll_init();
+ tegra20_super_clk_init();
+ tegra20_periph_clk_init();
+ tegra20_audio_clk_init();
+
+
+ for (i = 0; i < ARRAY_SIZE(clks); i++) {
+ if (IS_ERR(clks[i])) {
+ pr_err("Tegra20 clk %d: register failed with %ld\n",
+ i, PTR_ERR(clks[i]));
+ BUG();
+ }
+ if (!clks[i])
+ clks[i] = ERR_PTR(-EINVAL);
+ }
+
+ tegra_init_dup_clks(tegra_clk_duplicates, clks, clk_max);
+
+ clk_data.clks = clks;
+ clk_data.clk_num = ARRAY_SIZE(clks);
+ of_clk_add_provider(np, of_clk_src_onecell_get, &clk_data);
+
+ tegra_init_from_table(init_table, clks, clk_max);
+
+ tegra_cpu_car_ops = &tegra20_cpu_car_ops;
+}
diff --git a/drivers/clk/tegra/clk-tegra30.c b/drivers/clk/tegra/clk-tegra30.c
new file mode 100644
index 000000000000..32c61cb6d0bb
--- /dev/null
+++ b/drivers/clk/tegra/clk-tegra30.c
@@ -0,0 +1,1994 @@
+/*
+ * Copyright (c) 2012, NVIDIA CORPORATION. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include <linux/io.h>
+#include <linux/delay.h>
+#include <linux/clk.h>
+#include <linux/clk-provider.h>
+#include <linux/clkdev.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
+#include <linux/clk/tegra.h>
+
+#include <mach/powergate.h>
+
+#include "clk.h"
+
+#define RST_DEVICES_L 0x004
+#define RST_DEVICES_H 0x008
+#define RST_DEVICES_U 0x00c
+#define RST_DEVICES_V 0x358
+#define RST_DEVICES_W 0x35c
+#define RST_DEVICES_SET_L 0x300
+#define RST_DEVICES_CLR_L 0x304
+#define RST_DEVICES_SET_H 0x308
+#define RST_DEVICES_CLR_H 0x30c
+#define RST_DEVICES_SET_U 0x310
+#define RST_DEVICES_CLR_U 0x314
+#define RST_DEVICES_SET_V 0x430
+#define RST_DEVICES_CLR_V 0x434
+#define RST_DEVICES_SET_W 0x438
+#define RST_DEVICES_CLR_W 0x43c
+#define RST_DEVICES_NUM 5
+
+#define CLK_OUT_ENB_L 0x010
+#define CLK_OUT_ENB_H 0x014
+#define CLK_OUT_ENB_U 0x018
+#define CLK_OUT_ENB_V 0x360
+#define CLK_OUT_ENB_W 0x364
+#define CLK_OUT_ENB_SET_L 0x320
+#define CLK_OUT_ENB_CLR_L 0x324
+#define CLK_OUT_ENB_SET_H 0x328
+#define CLK_OUT_ENB_CLR_H 0x32c
+#define CLK_OUT_ENB_SET_U 0x330
+#define CLK_OUT_ENB_CLR_U 0x334
+#define CLK_OUT_ENB_SET_V 0x440
+#define CLK_OUT_ENB_CLR_V 0x444
+#define CLK_OUT_ENB_SET_W 0x448
+#define CLK_OUT_ENB_CLR_W 0x44c
+#define CLK_OUT_ENB_NUM 5
+
+#define OSC_CTRL 0x50
+#define OSC_CTRL_OSC_FREQ_MASK (0xF<<28)
+#define OSC_CTRL_OSC_FREQ_13MHZ (0X0<<28)
+#define OSC_CTRL_OSC_FREQ_19_2MHZ (0X4<<28)
+#define OSC_CTRL_OSC_FREQ_12MHZ (0X8<<28)
+#define OSC_CTRL_OSC_FREQ_26MHZ (0XC<<28)
+#define OSC_CTRL_OSC_FREQ_16_8MHZ (0X1<<28)
+#define OSC_CTRL_OSC_FREQ_38_4MHZ (0X5<<28)
+#define OSC_CTRL_OSC_FREQ_48MHZ (0X9<<28)
+#define OSC_CTRL_MASK (0x3f2 | OSC_CTRL_OSC_FREQ_MASK)
+
+#define OSC_CTRL_PLL_REF_DIV_MASK (3<<26)
+#define OSC_CTRL_PLL_REF_DIV_1 (0<<26)
+#define OSC_CTRL_PLL_REF_DIV_2 (1<<26)
+#define OSC_CTRL_PLL_REF_DIV_4 (2<<26)
+
+#define OSC_FREQ_DET 0x58
+#define OSC_FREQ_DET_TRIG BIT(31)
+
+#define OSC_FREQ_DET_STATUS 0x5c
+#define OSC_FREQ_DET_BUSY BIT(31)
+#define OSC_FREQ_DET_CNT_MASK 0xffff
+
+#define CCLKG_BURST_POLICY 0x368
+#define SUPER_CCLKG_DIVIDER 0x36c
+#define CCLKLP_BURST_POLICY 0x370
+#define SUPER_CCLKLP_DIVIDER 0x374
+#define SCLK_BURST_POLICY 0x028
+#define SUPER_SCLK_DIVIDER 0x02c
+
+#define SYSTEM_CLK_RATE 0x030
+
+#define PLLC_BASE 0x80
+#define PLLC_MISC 0x8c
+#define PLLM_BASE 0x90
+#define PLLM_MISC 0x9c
+#define PLLP_BASE 0xa0
+#define PLLP_MISC 0xac
+#define PLLX_BASE 0xe0
+#define PLLX_MISC 0xe4
+#define PLLD_BASE 0xd0
+#define PLLD_MISC 0xdc
+#define PLLD2_BASE 0x4b8
+#define PLLD2_MISC 0x4bc
+#define PLLE_BASE 0xe8
+#define PLLE_MISC 0xec
+#define PLLA_BASE 0xb0
+#define PLLA_MISC 0xbc
+#define PLLU_BASE 0xc0
+#define PLLU_MISC 0xcc
+
+#define PLL_MISC_LOCK_ENABLE 18
+#define PLLDU_MISC_LOCK_ENABLE 22
+#define PLLE_MISC_LOCK_ENABLE 9
+
+#define PLL_BASE_LOCK 27
+#define PLLE_MISC_LOCK 11
+
+#define PLLE_AUX 0x48c
+#define PLLC_OUT 0x84
+#define PLLM_OUT 0x94
+#define PLLP_OUTA 0xa4
+#define PLLP_OUTB 0xa8
+#define PLLA_OUT 0xb4
+
+#define AUDIO_SYNC_CLK_I2S0 0x4a0
+#define AUDIO_SYNC_CLK_I2S1 0x4a4
+#define AUDIO_SYNC_CLK_I2S2 0x4a8
+#define AUDIO_SYNC_CLK_I2S3 0x4ac
+#define AUDIO_SYNC_CLK_I2S4 0x4b0
+#define AUDIO_SYNC_CLK_SPDIF 0x4b4
+
+#define PMC_CLK_OUT_CNTRL 0x1a8
+
+#define CLK_SOURCE_I2S0 0x1d8
+#define CLK_SOURCE_I2S1 0x100
+#define CLK_SOURCE_I2S2 0x104
+#define CLK_SOURCE_I2S3 0x3bc
+#define CLK_SOURCE_I2S4 0x3c0
+#define CLK_SOURCE_SPDIF_OUT 0x108
+#define CLK_SOURCE_SPDIF_IN 0x10c
+#define CLK_SOURCE_PWM 0x110
+#define CLK_SOURCE_D_AUDIO 0x3d0
+#define CLK_SOURCE_DAM0 0x3d8
+#define CLK_SOURCE_DAM1 0x3dc
+#define CLK_SOURCE_DAM2 0x3e0
+#define CLK_SOURCE_HDA 0x428
+#define CLK_SOURCE_HDA2CODEC_2X 0x3e4
+#define CLK_SOURCE_SBC1 0x134
+#define CLK_SOURCE_SBC2 0x118
+#define CLK_SOURCE_SBC3 0x11c
+#define CLK_SOURCE_SBC4 0x1b4
+#define CLK_SOURCE_SBC5 0x3c8
+#define CLK_SOURCE_SBC6 0x3cc
+#define CLK_SOURCE_SATA_OOB 0x420
+#define CLK_SOURCE_SATA 0x424
+#define CLK_SOURCE_NDFLASH 0x160
+#define CLK_SOURCE_NDSPEED 0x3f8
+#define CLK_SOURCE_VFIR 0x168
+#define CLK_SOURCE_SDMMC1 0x150
+#define CLK_SOURCE_SDMMC2 0x154
+#define CLK_SOURCE_SDMMC3 0x1bc
+#define CLK_SOURCE_SDMMC4 0x164
+#define CLK_SOURCE_VDE 0x1c8
+#define CLK_SOURCE_CSITE 0x1d4
+#define CLK_SOURCE_LA 0x1f8
+#define CLK_SOURCE_OWR 0x1cc
+#define CLK_SOURCE_NOR 0x1d0
+#define CLK_SOURCE_MIPI 0x174
+#define CLK_SOURCE_I2C1 0x124
+#define CLK_SOURCE_I2C2 0x198
+#define CLK_SOURCE_I2C3 0x1b8
+#define CLK_SOURCE_I2C4 0x3c4
+#define CLK_SOURCE_I2C5 0x128
+#define CLK_SOURCE_UARTA 0x178
+#define CLK_SOURCE_UARTB 0x17c
+#define CLK_SOURCE_UARTC 0x1a0
+#define CLK_SOURCE_UARTD 0x1c0
+#define CLK_SOURCE_UARTE 0x1c4
+#define CLK_SOURCE_VI 0x148
+#define CLK_SOURCE_VI_SENSOR 0x1a8
+#define CLK_SOURCE_3D 0x158
+#define CLK_SOURCE_3D2 0x3b0
+#define CLK_SOURCE_2D 0x15c
+#define CLK_SOURCE_EPP 0x16c
+#define CLK_SOURCE_MPE 0x170
+#define CLK_SOURCE_HOST1X 0x180
+#define CLK_SOURCE_CVE 0x140
+#define CLK_SOURCE_TVO 0x188
+#define CLK_SOURCE_DTV 0x1dc
+#define CLK_SOURCE_HDMI 0x18c
+#define CLK_SOURCE_TVDAC 0x194
+#define CLK_SOURCE_DISP1 0x138
+#define CLK_SOURCE_DISP2 0x13c
+#define CLK_SOURCE_DSIB 0xd0
+#define CLK_SOURCE_TSENSOR 0x3b8
+#define CLK_SOURCE_ACTMON 0x3e8
+#define CLK_SOURCE_EXTERN1 0x3ec
+#define CLK_SOURCE_EXTERN2 0x3f0
+#define CLK_SOURCE_EXTERN3 0x3f4
+#define CLK_SOURCE_I2CSLOW 0x3fc
+#define CLK_SOURCE_SE 0x42c
+#define CLK_SOURCE_MSELECT 0x3b4
+#define CLK_SOURCE_EMC 0x19c
+
+#define AUDIO_SYNC_DOUBLER 0x49c
+
+#define PMC_CTRL 0
+#define PMC_CTRL_BLINK_ENB 7
+
+#define PMC_DPD_PADS_ORIDE 0x1c
+#define PMC_DPD_PADS_ORIDE_BLINK_ENB 20
+#define PMC_BLINK_TIMER 0x40
+
+#define UTMIP_PLL_CFG2 0x488
+#define UTMIP_PLL_CFG2_STABLE_COUNT(x) (((x) & 0xffff) << 6)
+#define UTMIP_PLL_CFG2_ACTIVE_DLY_COUNT(x) (((x) & 0x3f) << 18)
+#define UTMIP_PLL_CFG2_FORCE_PD_SAMP_A_POWERDOWN BIT(0)
+#define UTMIP_PLL_CFG2_FORCE_PD_SAMP_B_POWERDOWN BIT(2)
+#define UTMIP_PLL_CFG2_FORCE_PD_SAMP_C_POWERDOWN BIT(4)
+
+#define UTMIP_PLL_CFG1 0x484
+#define UTMIP_PLL_CFG1_ENABLE_DLY_COUNT(x) (((x) & 0x1f) << 6)
+#define UTMIP_PLL_CFG1_XTAL_FREQ_COUNT(x) (((x) & 0xfff) << 0)
+#define UTMIP_PLL_CFG1_FORCE_PLL_ENABLE_POWERDOWN BIT(14)
+#define UTMIP_PLL_CFG1_FORCE_PLL_ACTIVE_POWERDOWN BIT(12)
+#define UTMIP_PLL_CFG1_FORCE_PLLU_POWERDOWN BIT(16)
+
+/* Tegra CPU clock and reset control regs */
+#define TEGRA_CLK_RST_CONTROLLER_CLK_CPU_CMPLX 0x4c
+#define TEGRA_CLK_RST_CONTROLLER_RST_CPU_CMPLX_SET 0x340
+#define TEGRA_CLK_RST_CONTROLLER_RST_CPU_CMPLX_CLR 0x344
+#define TEGRA30_CLK_RST_CONTROLLER_CLK_CPU_CMPLX_CLR 0x34c
+#define TEGRA30_CLK_RST_CONTROLLER_CPU_CMPLX_STATUS 0x470
+
+#define CPU_CLOCK(cpu) (0x1 << (8 + cpu))
+#define CPU_RESET(cpu) (0x1111ul << (cpu))
+
+#define CLK_RESET_CCLK_BURST 0x20
+#define CLK_RESET_CCLK_DIVIDER 0x24
+#define CLK_RESET_PLLX_BASE 0xe0
+#define CLK_RESET_PLLX_MISC 0xe4
+
+#define CLK_RESET_SOURCE_CSITE 0x1d4
+
+#define CLK_RESET_CCLK_BURST_POLICY_SHIFT 28
+#define CLK_RESET_CCLK_RUN_POLICY_SHIFT 4
+#define CLK_RESET_CCLK_IDLE_POLICY_SHIFT 0
+#define CLK_RESET_CCLK_IDLE_POLICY 1
+#define CLK_RESET_CCLK_RUN_POLICY 2
+#define CLK_RESET_CCLK_BURST_POLICY_PLLX 8
+
+#ifdef CONFIG_PM_SLEEP
+static struct cpu_clk_suspend_context {
+ u32 pllx_misc;
+ u32 pllx_base;
+
+ u32 cpu_burst;
+ u32 clk_csite_src;
+ u32 cclk_divider;
+} tegra30_cpu_clk_sctx;
+#endif
+
+static int periph_clk_enb_refcnt[CLK_OUT_ENB_NUM * 32];
+
+static void __iomem *clk_base;
+static void __iomem *pmc_base;
+static unsigned long input_freq;
+
+static DEFINE_SPINLOCK(clk_doubler_lock);
+static DEFINE_SPINLOCK(clk_out_lock);
+static DEFINE_SPINLOCK(pll_div_lock);
+static DEFINE_SPINLOCK(cml_lock);
+static DEFINE_SPINLOCK(pll_d_lock);
+static DEFINE_SPINLOCK(sysrate_lock);
+
+#define TEGRA_INIT_DATA_MUX(_name, _con_id, _dev_id, _parents, _offset, \
+ _clk_num, _regs, _gate_flags, _clk_id) \
+ TEGRA_INIT_DATA(_name, _con_id, _dev_id, _parents, _offset, \
+ 30, 2, 0, 0, 8, 1, 0, _regs, _clk_num, \
+ periph_clk_enb_refcnt, _gate_flags, _clk_id)
+
+#define TEGRA_INIT_DATA_DIV16(_name, _con_id, _dev_id, _parents, _offset, \
+ _clk_num, _regs, _gate_flags, _clk_id) \
+ TEGRA_INIT_DATA(_name, _con_id, _dev_id, _parents, _offset, \
+ 30, 2, 0, 0, 16, 0, TEGRA_DIVIDER_ROUND_UP, \
+ _regs, _clk_num, periph_clk_enb_refcnt, \
+ _gate_flags, _clk_id)
+
+#define TEGRA_INIT_DATA_MUX8(_name, _con_id, _dev_id, _parents, _offset, \
+ _clk_num, _regs, _gate_flags, _clk_id) \
+ TEGRA_INIT_DATA(_name, _con_id, _dev_id, _parents, _offset, \
+ 29, 3, 0, 0, 8, 1, 0, _regs, _clk_num, \
+ periph_clk_enb_refcnt, _gate_flags, _clk_id)
+
+#define TEGRA_INIT_DATA_INT(_name, _con_id, _dev_id, _parents, _offset, \
+ _clk_num, _regs, _gate_flags, _clk_id) \
+ TEGRA_INIT_DATA(_name, _con_id, _dev_id, _parents, _offset, \
+ 30, 2, 0, 0, 8, 1, TEGRA_DIVIDER_INT, _regs, \
+ _clk_num, periph_clk_enb_refcnt, _gate_flags, \
+ _clk_id)
+
+#define TEGRA_INIT_DATA_UART(_name, _con_id, _dev_id, _parents, _offset,\
+ _clk_num, _regs, _clk_id) \
+ TEGRA_INIT_DATA(_name, _con_id, _dev_id, _parents, _offset, \
+ 30, 2, 0, 0, 16, 1, TEGRA_DIVIDER_UART, _regs, \
+ _clk_num, periph_clk_enb_refcnt, 0, _clk_id)
+
+#define TEGRA_INIT_DATA_NODIV(_name, _con_id, _dev_id, _parents, _offset, \
+ _mux_shift, _mux_width, _clk_num, _regs, \
+ _gate_flags, _clk_id) \
+ TEGRA_INIT_DATA(_name, _con_id, _dev_id, _parents, _offset, \
+ _mux_shift, _mux_width, 0, 0, 0, 0, 0, _regs, \
+ _clk_num, periph_clk_enb_refcnt, _gate_flags, \
+ _clk_id)
+
+/*
+ * IDs assigned here must be in sync with DT bindings definition
+ * for Tegra30 clocks.
+ */
+enum tegra30_clk {
+ cpu, rtc = 4, timer, uarta, gpio = 8, sdmmc2, i2s1 = 11, i2c1, ndflash,
+ sdmmc1, sdmmc4, pwm = 17, i2s2, epp, gr2d = 21, usbd, isp, gr3d,
+ disp2 = 26, disp1, host1x, vcp, i2s0, cop_cache, mc, ahbdma, apbdma,
+ kbc = 36, statmon, pmc, kfuse = 40, sbc1, nor, sbc2 = 44, sbc3 = 46,
+ i2c5, dsia, mipi = 50, hdmi, csi, tvdac, i2c2, uartc, emc = 57, usb2,
+ usb3, mpe, vde, bsea, bsev, speedo, uartd, uarte, i2c3, sbc4, sdmmc3,
+ pcie, owr, afi, csite, pciex, avpucq, la, dtv = 79, ndspeed, i2cslow,
+ dsib, irama = 84, iramb, iramc, iramd, cram2, audio_2x = 90, csus = 92,
+ cdev1, cdev2, cpu_g = 96, cpu_lp, gr3d2, mselect, tsensor, i2s3, i2s4,
+ i2c4, sbc5, sbc6, d_audio, apbif, dam0, dam1, dam2, hda2codec_2x,
+ atomics, audio0_2x, audio1_2x, audio2_2x, audio3_2x, audio4_2x,
+ spdif_2x, actmon, extern1, extern2, extern3, sata_oob, sata, hda,
+ se = 127, hda2hdmi, sata_cold, uartb = 160, vfir, spdif_in, spdif_out,
+ vi, vi_sensor, fuse, fuse_burn, cve, tvo, clk_32k, clk_m, clk_m_div2,
+ clk_m_div4, pll_ref, pll_c, pll_c_out1, pll_m, pll_m_out1, pll_p,
+ pll_p_out1, pll_p_out2, pll_p_out3, pll_p_out4, pll_a, pll_a_out0,
+ pll_d, pll_d_out0, pll_d2, pll_d2_out0, pll_u, pll_x, pll_x_out0, pll_e,
+ spdif_in_sync, i2s0_sync, i2s1_sync, i2s2_sync, i2s3_sync, i2s4_sync,
+ vimclk_sync, audio0, audio1, audio2, audio3, audio4, spdif, clk_out_1,
+ clk_out_2, clk_out_3, sclk, blink, cclk_g, cclk_lp, twd, cml0, cml1,
+ hclk, pclk, clk_out_1_mux = 300, clk_max
+};
+
+static struct clk *clks[clk_max];
+static struct clk_onecell_data clk_data;
+
+/*
+ * Structure defining the fields for USB UTMI clocks Parameters.
+ */
+struct utmi_clk_param {
+ /* Oscillator Frequency in KHz */
+ u32 osc_frequency;
+ /* UTMIP PLL Enable Delay Count */
+ u8 enable_delay_count;
+ /* UTMIP PLL Stable count */
+ u8 stable_count;
+ /* UTMIP PLL Active delay count */
+ u8 active_delay_count;
+ /* UTMIP PLL Xtal frequency count */
+ u8 xtal_freq_count;
+};
+
+static const struct utmi_clk_param utmi_parameters[] = {
+/* OSC_FREQUENCY, ENABLE_DLY, STABLE_CNT, ACTIVE_DLY, XTAL_FREQ_CNT */
+ {13000000, 0x02, 0x33, 0x05, 0x7F},
+ {19200000, 0x03, 0x4B, 0x06, 0xBB},
+ {12000000, 0x02, 0x2F, 0x04, 0x76},
+ {26000000, 0x04, 0x66, 0x09, 0xFE},
+ {16800000, 0x03, 0x41, 0x0A, 0xA4},
+};
+
+static struct tegra_clk_pll_freq_table pll_c_freq_table[] = {
+ { 12000000, 1040000000, 520, 6, 1, 8},
+ { 13000000, 1040000000, 480, 6, 1, 8},
+ { 16800000, 1040000000, 495, 8, 1, 8}, /* actual: 1039.5 MHz */
+ { 19200000, 1040000000, 325, 6, 1, 6},
+ { 26000000, 1040000000, 520, 13, 1, 8},
+
+ { 12000000, 832000000, 416, 6, 1, 8},
+ { 13000000, 832000000, 832, 13, 1, 8},
+ { 16800000, 832000000, 396, 8, 1, 8}, /* actual: 831.6 MHz */
+ { 19200000, 832000000, 260, 6, 1, 8},
+ { 26000000, 832000000, 416, 13, 1, 8},
+
+ { 12000000, 624000000, 624, 12, 1, 8},
+ { 13000000, 624000000, 624, 13, 1, 8},
+ { 16800000, 600000000, 520, 14, 1, 8},
+ { 19200000, 624000000, 520, 16, 1, 8},
+ { 26000000, 624000000, 624, 26, 1, 8},
+
+ { 12000000, 600000000, 600, 12, 1, 8},
+ { 13000000, 600000000, 600, 13, 1, 8},
+ { 16800000, 600000000, 500, 14, 1, 8},
+ { 19200000, 600000000, 375, 12, 1, 6},
+ { 26000000, 600000000, 600, 26, 1, 8},
+
+ { 12000000, 520000000, 520, 12, 1, 8},
+ { 13000000, 520000000, 520, 13, 1, 8},
+ { 16800000, 520000000, 495, 16, 1, 8}, /* actual: 519.75 MHz */
+ { 19200000, 520000000, 325, 12, 1, 6},
+ { 26000000, 520000000, 520, 26, 1, 8},
+
+ { 12000000, 416000000, 416, 12, 1, 8},
+ { 13000000, 416000000, 416, 13, 1, 8},
+ { 16800000, 416000000, 396, 16, 1, 8}, /* actual: 415.8 MHz */
+ { 19200000, 416000000, 260, 12, 1, 6},
+ { 26000000, 416000000, 416, 26, 1, 8},
+ { 0, 0, 0, 0, 0, 0 },
+};
+
+static struct tegra_clk_pll_freq_table pll_m_freq_table[] = {
+ { 12000000, 666000000, 666, 12, 1, 8},
+ { 13000000, 666000000, 666, 13, 1, 8},
+ { 16800000, 666000000, 555, 14, 1, 8},
+ { 19200000, 666000000, 555, 16, 1, 8},
+ { 26000000, 666000000, 666, 26, 1, 8},
+ { 12000000, 600000000, 600, 12, 1, 8},
+ { 13000000, 600000000, 600, 13, 1, 8},
+ { 16800000, 600000000, 500, 14, 1, 8},
+ { 19200000, 600000000, 375, 12, 1, 6},
+ { 26000000, 600000000, 600, 26, 1, 8},
+ { 0, 0, 0, 0, 0, 0 },
+};
+
+static struct tegra_clk_pll_freq_table pll_p_freq_table[] = {
+ { 12000000, 216000000, 432, 12, 2, 8},
+ { 13000000, 216000000, 432, 13, 2, 8},
+ { 16800000, 216000000, 360, 14, 2, 8},
+ { 19200000, 216000000, 360, 16, 2, 8},
+ { 26000000, 216000000, 432, 26, 2, 8},
+ { 0, 0, 0, 0, 0, 0 },
+};
+
+static struct tegra_clk_pll_freq_table pll_a_freq_table[] = {
+ { 9600000, 564480000, 294, 5, 1, 4},
+ { 9600000, 552960000, 288, 5, 1, 4},
+ { 9600000, 24000000, 5, 2, 1, 1},
+
+ { 28800000, 56448000, 49, 25, 1, 1},
+ { 28800000, 73728000, 64, 25, 1, 1},
+ { 28800000, 24000000, 5, 6, 1, 1},
+ { 0, 0, 0, 0, 0, 0 },
+};
+
+static struct tegra_clk_pll_freq_table pll_d_freq_table[] = {
+ { 12000000, 216000000, 216, 12, 1, 4},
+ { 13000000, 216000000, 216, 13, 1, 4},
+ { 16800000, 216000000, 180, 14, 1, 4},
+ { 19200000, 216000000, 180, 16, 1, 4},
+ { 26000000, 216000000, 216, 26, 1, 4},
+
+ { 12000000, 594000000, 594, 12, 1, 8},
+ { 13000000, 594000000, 594, 13, 1, 8},
+ { 16800000, 594000000, 495, 14, 1, 8},
+ { 19200000, 594000000, 495, 16, 1, 8},
+ { 26000000, 594000000, 594, 26, 1, 8},
+
+ { 12000000, 1000000000, 1000, 12, 1, 12},
+ { 13000000, 1000000000, 1000, 13, 1, 12},
+ { 19200000, 1000000000, 625, 12, 1, 8},
+ { 26000000, 1000000000, 1000, 26, 1, 12},
+
+ { 0, 0, 0, 0, 0, 0 },
+};
+
+static struct tegra_clk_pll_freq_table pll_u_freq_table[] = {
+ { 12000000, 480000000, 960, 12, 2, 12},
+ { 13000000, 480000000, 960, 13, 2, 12},
+ { 16800000, 480000000, 400, 7, 2, 5},
+ { 19200000, 480000000, 200, 4, 2, 3},
+ { 26000000, 480000000, 960, 26, 2, 12},
+ { 0, 0, 0, 0, 0, 0 },
+};
+
+static struct tegra_clk_pll_freq_table pll_x_freq_table[] = {
+ /* 1.7 GHz */
+ { 12000000, 1700000000, 850, 6, 1, 8},
+ { 13000000, 1700000000, 915, 7, 1, 8}, /* actual: 1699.2 MHz */
+ { 16800000, 1700000000, 708, 7, 1, 8}, /* actual: 1699.2 MHz */
+ { 19200000, 1700000000, 885, 10, 1, 8}, /* actual: 1699.2 MHz */
+ { 26000000, 1700000000, 850, 13, 1, 8},
+
+ /* 1.6 GHz */
+ { 12000000, 1600000000, 800, 6, 1, 8},
+ { 13000000, 1600000000, 738, 6, 1, 8}, /* actual: 1599.0 MHz */
+ { 16800000, 1600000000, 857, 9, 1, 8}, /* actual: 1599.7 MHz */
+ { 19200000, 1600000000, 500, 6, 1, 8},
+ { 26000000, 1600000000, 800, 13, 1, 8},
+
+ /* 1.5 GHz */
+ { 12000000, 1500000000, 750, 6, 1, 8},
+ { 13000000, 1500000000, 923, 8, 1, 8}, /* actual: 1499.8 MHz */
+ { 16800000, 1500000000, 625, 7, 1, 8},
+ { 19200000, 1500000000, 625, 8, 1, 8},
+ { 26000000, 1500000000, 750, 13, 1, 8},
+
+ /* 1.4 GHz */
+ { 12000000, 1400000000, 700, 6, 1, 8},
+ { 13000000, 1400000000, 969, 9, 1, 8}, /* actual: 1399.7 MHz */
+ { 16800000, 1400000000, 1000, 12, 1, 8},
+ { 19200000, 1400000000, 875, 12, 1, 8},
+ { 26000000, 1400000000, 700, 13, 1, 8},
+
+ /* 1.3 GHz */
+ { 12000000, 1300000000, 975, 9, 1, 8},
+ { 13000000, 1300000000, 1000, 10, 1, 8},
+ { 16800000, 1300000000, 928, 12, 1, 8}, /* actual: 1299.2 MHz */
+ { 19200000, 1300000000, 812, 12, 1, 8}, /* actual: 1299.2 MHz */
+ { 26000000, 1300000000, 650, 13, 1, 8},
+
+ /* 1.2 GHz */
+ { 12000000, 1200000000, 1000, 10, 1, 8},
+ { 13000000, 1200000000, 923, 10, 1, 8}, /* actual: 1199.9 MHz */
+ { 16800000, 1200000000, 1000, 14, 1, 8},
+ { 19200000, 1200000000, 1000, 16, 1, 8},
+ { 26000000, 1200000000, 600, 13, 1, 8},
+
+ /* 1.1 GHz */
+ { 12000000, 1100000000, 825, 9, 1, 8},
+ { 13000000, 1100000000, 846, 10, 1, 8}, /* actual: 1099.8 MHz */
+ { 16800000, 1100000000, 982, 15, 1, 8}, /* actual: 1099.8 MHz */
+ { 19200000, 1100000000, 859, 15, 1, 8}, /* actual: 1099.5 MHz */
+ { 26000000, 1100000000, 550, 13, 1, 8},
+
+ /* 1 GHz */
+ { 12000000, 1000000000, 1000, 12, 1, 8},
+ { 13000000, 1000000000, 1000, 13, 1, 8},
+ { 16800000, 1000000000, 833, 14, 1, 8}, /* actual: 999.6 MHz */
+ { 19200000, 1000000000, 625, 12, 1, 8},
+ { 26000000, 1000000000, 1000, 26, 1, 8},
+
+ { 0, 0, 0, 0, 0, 0 },
+};
+
+static struct tegra_clk_pll_freq_table pll_e_freq_table[] = {
+ /* PLLE special case: use cpcon field to store cml divider value */
+ { 12000000, 100000000, 150, 1, 18, 11},
+ { 216000000, 100000000, 200, 18, 24, 13},
+ { 0, 0, 0, 0, 0, 0 },
+};
+
+/* PLL parameters */
+static struct tegra_clk_pll_params pll_c_params = {
+ .input_min = 2000000,
+ .input_max = 31000000,
+ .cf_min = 1000000,
+ .cf_max = 6000000,
+ .vco_min = 20000000,
+ .vco_max = 1400000000,
+ .base_reg = PLLC_BASE,
+ .misc_reg = PLLC_MISC,
+ .lock_bit_idx = PLL_BASE_LOCK,
+ .lock_enable_bit_idx = PLL_MISC_LOCK_ENABLE,
+ .lock_delay = 300,
+};
+
+static struct tegra_clk_pll_params pll_m_params = {
+ .input_min = 2000000,
+ .input_max = 31000000,
+ .cf_min = 1000000,
+ .cf_max = 6000000,
+ .vco_min = 20000000,
+ .vco_max = 1200000000,
+ .base_reg = PLLM_BASE,
+ .misc_reg = PLLM_MISC,
+ .lock_bit_idx = PLL_BASE_LOCK,
+ .lock_enable_bit_idx = PLL_MISC_LOCK_ENABLE,
+ .lock_delay = 300,
+};
+
+static struct tegra_clk_pll_params pll_p_params = {
+ .input_min = 2000000,
+ .input_max = 31000000,
+ .cf_min = 1000000,
+ .cf_max = 6000000,
+ .vco_min = 20000000,
+ .vco_max = 1400000000,
+ .base_reg = PLLP_BASE,
+ .misc_reg = PLLP_MISC,
+ .lock_bit_idx = PLL_BASE_LOCK,
+ .lock_enable_bit_idx = PLL_MISC_LOCK_ENABLE,
+ .lock_delay = 300,
+};
+
+static struct tegra_clk_pll_params pll_a_params = {
+ .input_min = 2000000,
+ .input_max = 31000000,
+ .cf_min = 1000000,
+ .cf_max = 6000000,
+ .vco_min = 20000000,
+ .vco_max = 1400000000,
+ .base_reg = PLLA_BASE,
+ .misc_reg = PLLA_MISC,
+ .lock_bit_idx = PLL_BASE_LOCK,
+ .lock_enable_bit_idx = PLL_MISC_LOCK_ENABLE,
+ .lock_delay = 300,
+};
+
+static struct tegra_clk_pll_params pll_d_params = {
+ .input_min = 2000000,
+ .input_max = 40000000,
+ .cf_min = 1000000,
+ .cf_max = 6000000,
+ .vco_min = 40000000,
+ .vco_max = 1000000000,
+ .base_reg = PLLD_BASE,
+ .misc_reg = PLLD_MISC,
+ .lock_bit_idx = PLL_BASE_LOCK,
+ .lock_enable_bit_idx = PLLDU_MISC_LOCK_ENABLE,
+ .lock_delay = 1000,
+};
+
+static struct tegra_clk_pll_params pll_d2_params = {
+ .input_min = 2000000,
+ .input_max = 40000000,
+ .cf_min = 1000000,
+ .cf_max = 6000000,
+ .vco_min = 40000000,
+ .vco_max = 1000000000,
+ .base_reg = PLLD2_BASE,
+ .misc_reg = PLLD2_MISC,
+ .lock_bit_idx = PLL_BASE_LOCK,
+ .lock_enable_bit_idx = PLLDU_MISC_LOCK_ENABLE,
+ .lock_delay = 1000,
+};
+
+static struct tegra_clk_pll_params pll_u_params = {
+ .input_min = 2000000,
+ .input_max = 40000000,
+ .cf_min = 1000000,
+ .cf_max = 6000000,
+ .vco_min = 48000000,
+ .vco_max = 960000000,
+ .base_reg = PLLU_BASE,
+ .misc_reg = PLLU_MISC,
+ .lock_bit_idx = PLL_BASE_LOCK,
+ .lock_enable_bit_idx = PLLDU_MISC_LOCK_ENABLE,
+ .lock_delay = 1000,
+};
+
+static struct tegra_clk_pll_params pll_x_params = {
+ .input_min = 2000000,
+ .input_max = 31000000,
+ .cf_min = 1000000,
+ .cf_max = 6000000,
+ .vco_min = 20000000,
+ .vco_max = 1700000000,
+ .base_reg = PLLX_BASE,
+ .misc_reg = PLLX_MISC,
+ .lock_bit_idx = PLL_BASE_LOCK,
+ .lock_enable_bit_idx = PLL_MISC_LOCK_ENABLE,
+ .lock_delay = 300,
+};
+
+static struct tegra_clk_pll_params pll_e_params = {
+ .input_min = 12000000,
+ .input_max = 216000000,
+ .cf_min = 12000000,
+ .cf_max = 12000000,
+ .vco_min = 1200000000,
+ .vco_max = 2400000000U,
+ .base_reg = PLLE_BASE,
+ .misc_reg = PLLE_MISC,
+ .lock_bit_idx = PLLE_MISC_LOCK,
+ .lock_enable_bit_idx = PLLE_MISC_LOCK_ENABLE,
+ .lock_delay = 300,
+};
+
+/* Peripheral clock registers */
+static struct tegra_clk_periph_regs periph_l_regs = {
+ .enb_reg = CLK_OUT_ENB_L,
+ .enb_set_reg = CLK_OUT_ENB_SET_L,
+ .enb_clr_reg = CLK_OUT_ENB_CLR_L,
+ .rst_reg = RST_DEVICES_L,
+ .rst_set_reg = RST_DEVICES_SET_L,
+ .rst_clr_reg = RST_DEVICES_CLR_L,
+};
+
+static struct tegra_clk_periph_regs periph_h_regs = {
+ .enb_reg = CLK_OUT_ENB_H,
+ .enb_set_reg = CLK_OUT_ENB_SET_H,
+ .enb_clr_reg = CLK_OUT_ENB_CLR_H,
+ .rst_reg = RST_DEVICES_H,
+ .rst_set_reg = RST_DEVICES_SET_H,
+ .rst_clr_reg = RST_DEVICES_CLR_H,
+};
+
+static struct tegra_clk_periph_regs periph_u_regs = {
+ .enb_reg = CLK_OUT_ENB_U,
+ .enb_set_reg = CLK_OUT_ENB_SET_U,
+ .enb_clr_reg = CLK_OUT_ENB_CLR_U,
+ .rst_reg = RST_DEVICES_U,
+ .rst_set_reg = RST_DEVICES_SET_U,
+ .rst_clr_reg = RST_DEVICES_CLR_U,
+};
+
+static struct tegra_clk_periph_regs periph_v_regs = {
+ .enb_reg = CLK_OUT_ENB_V,
+ .enb_set_reg = CLK_OUT_ENB_SET_V,
+ .enb_clr_reg = CLK_OUT_ENB_CLR_V,
+ .rst_reg = RST_DEVICES_V,
+ .rst_set_reg = RST_DEVICES_SET_V,
+ .rst_clr_reg = RST_DEVICES_CLR_V,
+};
+
+static struct tegra_clk_periph_regs periph_w_regs = {
+ .enb_reg = CLK_OUT_ENB_W,
+ .enb_set_reg = CLK_OUT_ENB_SET_W,
+ .enb_clr_reg = CLK_OUT_ENB_CLR_W,
+ .rst_reg = RST_DEVICES_W,
+ .rst_set_reg = RST_DEVICES_SET_W,
+ .rst_clr_reg = RST_DEVICES_CLR_W,
+};
+
+static void tegra30_clk_measure_input_freq(void)
+{
+ u32 osc_ctrl = readl_relaxed(clk_base + OSC_CTRL);
+ u32 auto_clk_control = osc_ctrl & OSC_CTRL_OSC_FREQ_MASK;
+ u32 pll_ref_div = osc_ctrl & OSC_CTRL_PLL_REF_DIV_MASK;
+
+ switch (auto_clk_control) {
+ case OSC_CTRL_OSC_FREQ_12MHZ:
+ BUG_ON(pll_ref_div != OSC_CTRL_PLL_REF_DIV_1);
+ input_freq = 12000000;
+ break;
+ case OSC_CTRL_OSC_FREQ_13MHZ:
+ BUG_ON(pll_ref_div != OSC_CTRL_PLL_REF_DIV_1);
+ input_freq = 13000000;
+ break;
+ case OSC_CTRL_OSC_FREQ_19_2MHZ:
+ BUG_ON(pll_ref_div != OSC_CTRL_PLL_REF_DIV_1);
+ input_freq = 19200000;
+ break;
+ case OSC_CTRL_OSC_FREQ_26MHZ:
+ BUG_ON(pll_ref_div != OSC_CTRL_PLL_REF_DIV_1);
+ input_freq = 26000000;
+ break;
+ case OSC_CTRL_OSC_FREQ_16_8MHZ:
+ BUG_ON(pll_ref_div != OSC_CTRL_PLL_REF_DIV_1);
+ input_freq = 16800000;
+ break;
+ case OSC_CTRL_OSC_FREQ_38_4MHZ:
+ BUG_ON(pll_ref_div != OSC_CTRL_PLL_REF_DIV_2);
+ input_freq = 38400000;
+ break;
+ case OSC_CTRL_OSC_FREQ_48MHZ:
+ BUG_ON(pll_ref_div != OSC_CTRL_PLL_REF_DIV_4);
+ input_freq = 48000000;
+ break;
+ default:
+ pr_err("Unexpected auto clock control value %d",
+ auto_clk_control);
+ BUG();
+ return;
+ }
+}
+
+static unsigned int tegra30_get_pll_ref_div(void)
+{
+ u32 pll_ref_div = readl_relaxed(clk_base + OSC_CTRL) &
+ OSC_CTRL_PLL_REF_DIV_MASK;
+
+ switch (pll_ref_div) {
+ case OSC_CTRL_PLL_REF_DIV_1:
+ return 1;
+ case OSC_CTRL_PLL_REF_DIV_2:
+ return 2;
+ case OSC_CTRL_PLL_REF_DIV_4:
+ return 4;
+ default:
+ pr_err("Invalid pll ref divider %d", pll_ref_div);
+ BUG();
+ }
+ return 0;
+}
+
+static void tegra30_utmi_param_configure(void)
+{
+ u32 reg;
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(utmi_parameters); i++) {
+ if (input_freq == utmi_parameters[i].osc_frequency)
+ break;
+ }
+
+ if (i >= ARRAY_SIZE(utmi_parameters)) {
+ pr_err("%s: Unexpected input rate %lu\n", __func__, input_freq);
+ return;
+ }
+
+ reg = readl_relaxed(clk_base + UTMIP_PLL_CFG2);
+
+ /* Program UTMIP PLL stable and active counts */
+ reg &= ~UTMIP_PLL_CFG2_STABLE_COUNT(~0);
+ reg |= UTMIP_PLL_CFG2_STABLE_COUNT(
+ utmi_parameters[i].stable_count);
+
+ reg &= ~UTMIP_PLL_CFG2_ACTIVE_DLY_COUNT(~0);
+
+ reg |= UTMIP_PLL_CFG2_ACTIVE_DLY_COUNT(
+ utmi_parameters[i].active_delay_count);
+
+ /* Remove power downs from UTMIP PLL control bits */
+ reg &= ~UTMIP_PLL_CFG2_FORCE_PD_SAMP_A_POWERDOWN;
+ reg &= ~UTMIP_PLL_CFG2_FORCE_PD_SAMP_B_POWERDOWN;
+ reg &= ~UTMIP_PLL_CFG2_FORCE_PD_SAMP_C_POWERDOWN;
+
+ writel_relaxed(reg, clk_base + UTMIP_PLL_CFG2);
+
+ /* Program UTMIP PLL delay and oscillator frequency counts */
+ reg = readl_relaxed(clk_base + UTMIP_PLL_CFG1);
+ reg &= ~UTMIP_PLL_CFG1_ENABLE_DLY_COUNT(~0);
+
+ reg |= UTMIP_PLL_CFG1_ENABLE_DLY_COUNT(
+ utmi_parameters[i].enable_delay_count);
+
+ reg &= ~UTMIP_PLL_CFG1_XTAL_FREQ_COUNT(~0);
+ reg |= UTMIP_PLL_CFG1_XTAL_FREQ_COUNT(
+ utmi_parameters[i].xtal_freq_count);
+
+ /* Remove power downs from UTMIP PLL control bits */
+ reg &= ~UTMIP_PLL_CFG1_FORCE_PLL_ENABLE_POWERDOWN;
+ reg &= ~UTMIP_PLL_CFG1_FORCE_PLL_ACTIVE_POWERDOWN;
+ reg &= ~UTMIP_PLL_CFG1_FORCE_PLLU_POWERDOWN;
+
+ writel_relaxed(reg, clk_base + UTMIP_PLL_CFG1);
+}
+
+static const char *pll_e_parents[] = {"pll_ref", "pll_p"};
+
+static void __init tegra30_pll_init(void)
+{
+ struct clk *clk;
+
+ /* PLLC */
+ clk = tegra_clk_register_pll("pll_c", "pll_ref", clk_base, pmc_base, 0,
+ 0, &pll_c_params,
+ TEGRA_PLL_HAS_CPCON | TEGRA_PLL_USE_LOCK,
+ pll_c_freq_table, NULL);
+ clk_register_clkdev(clk, "pll_c", NULL);
+ clks[pll_c] = clk;
+
+ /* PLLC_OUT1 */
+ clk = tegra_clk_register_divider("pll_c_out1_div", "pll_c",
+ clk_base + PLLC_OUT, 0, TEGRA_DIVIDER_ROUND_UP,
+ 8, 8, 1, NULL);
+ clk = tegra_clk_register_pll_out("pll_c_out1", "pll_c_out1_div",
+ clk_base + PLLC_OUT, 1, 0, CLK_SET_RATE_PARENT,
+ 0, NULL);
+ clk_register_clkdev(clk, "pll_c_out1", NULL);
+ clks[pll_c_out1] = clk;
+
+ /* PLLP */
+ clk = tegra_clk_register_pll("pll_p", "pll_ref", clk_base, pmc_base, 0,
+ 408000000, &pll_p_params,
+ TEGRA_PLL_FIXED | TEGRA_PLL_HAS_CPCON |
+ TEGRA_PLL_USE_LOCK, pll_p_freq_table, NULL);
+ clk_register_clkdev(clk, "pll_p", NULL);
+ clks[pll_p] = clk;
+
+ /* PLLP_OUT1 */
+ clk = tegra_clk_register_divider("pll_p_out1_div", "pll_p",
+ clk_base + PLLP_OUTA, 0, TEGRA_DIVIDER_FIXED |
+ TEGRA_DIVIDER_ROUND_UP, 8, 8, 1,
+ &pll_div_lock);
+ clk = tegra_clk_register_pll_out("pll_p_out1", "pll_p_out1_div",
+ clk_base + PLLP_OUTA, 1, 0,
+ CLK_IGNORE_UNUSED | CLK_SET_RATE_PARENT, 0,
+ &pll_div_lock);
+ clk_register_clkdev(clk, "pll_p_out1", NULL);
+ clks[pll_p_out1] = clk;
+
+ /* PLLP_OUT2 */
+ clk = tegra_clk_register_divider("pll_p_out2_div", "pll_p",
+ clk_base + PLLP_OUTA, 0, TEGRA_DIVIDER_FIXED |
+ TEGRA_DIVIDER_ROUND_UP, 24, 8, 1,
+ &pll_div_lock);
+ clk = tegra_clk_register_pll_out("pll_p_out2", "pll_p_out2_div",
+ clk_base + PLLP_OUTA, 17, 16,
+ CLK_IGNORE_UNUSED | CLK_SET_RATE_PARENT, 0,
+ &pll_div_lock);
+ clk_register_clkdev(clk, "pll_p_out2", NULL);
+ clks[pll_p_out2] = clk;
+
+ /* PLLP_OUT3 */
+ clk = tegra_clk_register_divider("pll_p_out3_div", "pll_p",
+ clk_base + PLLP_OUTB, 0, TEGRA_DIVIDER_FIXED |
+ TEGRA_DIVIDER_ROUND_UP, 8, 8, 1,
+ &pll_div_lock);
+ clk = tegra_clk_register_pll_out("pll_p_out3", "pll_p_out3_div",
+ clk_base + PLLP_OUTB, 1, 0,
+ CLK_IGNORE_UNUSED | CLK_SET_RATE_PARENT, 0,
+ &pll_div_lock);
+ clk_register_clkdev(clk, "pll_p_out3", NULL);
+ clks[pll_p_out3] = clk;
+
+ /* PLLP_OUT4 */
+ clk = tegra_clk_register_divider("pll_p_out4_div", "pll_p",
+ clk_base + PLLP_OUTB, 0, TEGRA_DIVIDER_FIXED |
+ TEGRA_DIVIDER_ROUND_UP, 24, 8, 1,
+ &pll_div_lock);
+ clk = tegra_clk_register_pll_out("pll_p_out4", "pll_p_out4_div",
+ clk_base + PLLP_OUTB, 17, 16,
+ CLK_IGNORE_UNUSED | CLK_SET_RATE_PARENT, 0,
+ &pll_div_lock);
+ clk_register_clkdev(clk, "pll_p_out4", NULL);
+ clks[pll_p_out4] = clk;
+
+ /* PLLM */
+ clk = tegra_clk_register_pll("pll_m", "pll_ref", clk_base, pmc_base,
+ CLK_IGNORE_UNUSED | CLK_SET_RATE_GATE, 0,
+ &pll_m_params, TEGRA_PLLM | TEGRA_PLL_HAS_CPCON |
+ TEGRA_PLL_SET_DCCON | TEGRA_PLL_USE_LOCK,
+ pll_m_freq_table, NULL);
+ clk_register_clkdev(clk, "pll_m", NULL);
+ clks[pll_m] = clk;
+
+ /* PLLM_OUT1 */
+ clk = tegra_clk_register_divider("pll_m_out1_div", "pll_m",
+ clk_base + PLLM_OUT, 0, TEGRA_DIVIDER_ROUND_UP,
+ 8, 8, 1, NULL);
+ clk = tegra_clk_register_pll_out("pll_m_out1", "pll_m_out1_div",
+ clk_base + PLLM_OUT, 1, 0, CLK_IGNORE_UNUSED |
+ CLK_SET_RATE_PARENT, 0, NULL);
+ clk_register_clkdev(clk, "pll_m_out1", NULL);
+ clks[pll_m_out1] = clk;
+
+ /* PLLX */
+ clk = tegra_clk_register_pll("pll_x", "pll_ref", clk_base, pmc_base, 0,
+ 0, &pll_x_params, TEGRA_PLL_HAS_CPCON |
+ TEGRA_PLL_SET_DCCON | TEGRA_PLL_USE_LOCK,
+ pll_x_freq_table, NULL);
+ clk_register_clkdev(clk, "pll_x", NULL);
+ clks[pll_x] = clk;
+
+ /* PLLX_OUT0 */
+ clk = clk_register_fixed_factor(NULL, "pll_x_out0", "pll_x",
+ CLK_SET_RATE_PARENT, 1, 2);
+ clk_register_clkdev(clk, "pll_x_out0", NULL);
+ clks[pll_x_out0] = clk;
+
+ /* PLLU */
+ clk = tegra_clk_register_pll("pll_u", "pll_ref", clk_base, pmc_base, 0,
+ 0, &pll_u_params, TEGRA_PLLU | TEGRA_PLL_HAS_CPCON |
+ TEGRA_PLL_SET_LFCON | TEGRA_PLL_USE_LOCK,
+ pll_u_freq_table,
+ NULL);
+ clk_register_clkdev(clk, "pll_u", NULL);
+ clks[pll_u] = clk;
+
+ tegra30_utmi_param_configure();
+
+ /* PLLD */
+ clk = tegra_clk_register_pll("pll_d", "pll_ref", clk_base, pmc_base, 0,
+ 0, &pll_d_params, TEGRA_PLL_HAS_CPCON |
+ TEGRA_PLL_SET_LFCON | TEGRA_PLL_USE_LOCK,
+ pll_d_freq_table, &pll_d_lock);
+ clk_register_clkdev(clk, "pll_d", NULL);
+ clks[pll_d] = clk;
+
+ /* PLLD_OUT0 */
+ clk = clk_register_fixed_factor(NULL, "pll_d_out0", "pll_d",
+ CLK_SET_RATE_PARENT, 1, 2);
+ clk_register_clkdev(clk, "pll_d_out0", NULL);
+ clks[pll_d_out0] = clk;
+
+ /* PLLD2 */
+ clk = tegra_clk_register_pll("pll_d2", "pll_ref", clk_base, pmc_base, 0,
+ 0, &pll_d2_params, TEGRA_PLL_HAS_CPCON |
+ TEGRA_PLL_SET_LFCON | TEGRA_PLL_USE_LOCK,
+ pll_d_freq_table, NULL);
+ clk_register_clkdev(clk, "pll_d2", NULL);
+ clks[pll_d2] = clk;
+
+ /* PLLD2_OUT0 */
+ clk = clk_register_fixed_factor(NULL, "pll_d2_out0", "pll_d2",
+ CLK_SET_RATE_PARENT, 1, 2);
+ clk_register_clkdev(clk, "pll_d2_out0", NULL);
+ clks[pll_d2_out0] = clk;
+
+ /* PLLA */
+ clk = tegra_clk_register_pll("pll_a", "pll_p_out1", clk_base, pmc_base,
+ 0, 0, &pll_a_params, TEGRA_PLL_HAS_CPCON |
+ TEGRA_PLL_USE_LOCK, pll_a_freq_table, NULL);
+ clk_register_clkdev(clk, "pll_a", NULL);
+ clks[pll_a] = clk;
+
+ /* PLLA_OUT0 */
+ clk = tegra_clk_register_divider("pll_a_out0_div", "pll_a",
+ clk_base + PLLA_OUT, 0, TEGRA_DIVIDER_ROUND_UP,
+ 8, 8, 1, NULL);
+ clk = tegra_clk_register_pll_out("pll_a_out0", "pll_a_out0_div",
+ clk_base + PLLA_OUT, 1, 0, CLK_IGNORE_UNUSED |
+ CLK_SET_RATE_PARENT, 0, NULL);
+ clk_register_clkdev(clk, "pll_a_out0", NULL);
+ clks[pll_a_out0] = clk;
+
+ /* PLLE */
+ clk = clk_register_mux(NULL, "pll_e_mux", pll_e_parents,
+ ARRAY_SIZE(pll_e_parents), 0,
+ clk_base + PLLE_AUX, 2, 1, 0, NULL);
+ clk = tegra_clk_register_plle("pll_e", "pll_e_mux", clk_base, pmc_base,
+ CLK_GET_RATE_NOCACHE, 100000000, &pll_e_params,
+ TEGRA_PLLE_CONFIGURE, pll_e_freq_table, NULL);
+ clk_register_clkdev(clk, "pll_e", NULL);
+ clks[pll_e] = clk;
+}
+
+static const char *mux_audio_sync_clk[] = { "spdif_in_sync", "i2s0_sync",
+ "i2s1_sync", "i2s2_sync", "i2s3_sync", "i2s4_sync", "vimclk_sync",};
+static const char *clk_out1_parents[] = { "clk_m", "clk_m_div2",
+ "clk_m_div4", "extern1", };
+static const char *clk_out2_parents[] = { "clk_m", "clk_m_div2",
+ "clk_m_div4", "extern2", };
+static const char *clk_out3_parents[] = { "clk_m", "clk_m_div2",
+ "clk_m_div4", "extern3", };
+
+static void __init tegra30_audio_clk_init(void)
+{
+ struct clk *clk;
+
+ /* spdif_in_sync */
+ clk = tegra_clk_register_sync_source("spdif_in_sync", 24000000,
+ 24000000);
+ clk_register_clkdev(clk, "spdif_in_sync", NULL);
+ clks[spdif_in_sync] = clk;
+
+ /* i2s0_sync */
+ clk = tegra_clk_register_sync_source("i2s0_sync", 24000000, 24000000);
+ clk_register_clkdev(clk, "i2s0_sync", NULL);
+ clks[i2s0_sync] = clk;
+
+ /* i2s1_sync */
+ clk = tegra_clk_register_sync_source("i2s1_sync", 24000000, 24000000);
+ clk_register_clkdev(clk, "i2s1_sync", NULL);
+ clks[i2s1_sync] = clk;
+
+ /* i2s2_sync */
+ clk = tegra_clk_register_sync_source("i2s2_sync", 24000000, 24000000);
+ clk_register_clkdev(clk, "i2s2_sync", NULL);
+ clks[i2s2_sync] = clk;
+
+ /* i2s3_sync */
+ clk = tegra_clk_register_sync_source("i2s3_sync", 24000000, 24000000);
+ clk_register_clkdev(clk, "i2s3_sync", NULL);
+ clks[i2s3_sync] = clk;
+
+ /* i2s4_sync */
+ clk = tegra_clk_register_sync_source("i2s4_sync", 24000000, 24000000);
+ clk_register_clkdev(clk, "i2s4_sync", NULL);
+ clks[i2s4_sync] = clk;
+
+ /* vimclk_sync */
+ clk = tegra_clk_register_sync_source("vimclk_sync", 24000000, 24000000);
+ clk_register_clkdev(clk, "vimclk_sync", NULL);
+ clks[vimclk_sync] = clk;
+
+ /* audio0 */
+ clk = clk_register_mux(NULL, "audio0_mux", mux_audio_sync_clk,
+ ARRAY_SIZE(mux_audio_sync_clk), 0,
+ clk_base + AUDIO_SYNC_CLK_I2S0, 0, 3, 0, NULL);
+ clk = clk_register_gate(NULL, "audio0", "audio0_mux", 0,
+ clk_base + AUDIO_SYNC_CLK_I2S0, 4,
+ CLK_GATE_SET_TO_DISABLE, NULL);
+ clk_register_clkdev(clk, "audio0", NULL);
+ clks[audio0] = clk;
+
+ /* audio1 */
+ clk = clk_register_mux(NULL, "audio1_mux", mux_audio_sync_clk,
+ ARRAY_SIZE(mux_audio_sync_clk), 0,
+ clk_base + AUDIO_SYNC_CLK_I2S1, 0, 3, 0, NULL);
+ clk = clk_register_gate(NULL, "audio1", "audio1_mux", 0,
+ clk_base + AUDIO_SYNC_CLK_I2S1, 4,
+ CLK_GATE_SET_TO_DISABLE, NULL);
+ clk_register_clkdev(clk, "audio1", NULL);
+ clks[audio1] = clk;
+
+ /* audio2 */
+ clk = clk_register_mux(NULL, "audio2_mux", mux_audio_sync_clk,
+ ARRAY_SIZE(mux_audio_sync_clk), 0,
+ clk_base + AUDIO_SYNC_CLK_I2S2, 0, 3, 0, NULL);
+ clk = clk_register_gate(NULL, "audio2", "audio2_mux", 0,
+ clk_base + AUDIO_SYNC_CLK_I2S2, 4,
+ CLK_GATE_SET_TO_DISABLE, NULL);
+ clk_register_clkdev(clk, "audio2", NULL);
+ clks[audio2] = clk;
+
+ /* audio3 */
+ clk = clk_register_mux(NULL, "audio3_mux", mux_audio_sync_clk,
+ ARRAY_SIZE(mux_audio_sync_clk), 0,
+ clk_base + AUDIO_SYNC_CLK_I2S3, 0, 3, 0, NULL);
+ clk = clk_register_gate(NULL, "audio3", "audio3_mux", 0,
+ clk_base + AUDIO_SYNC_CLK_I2S3, 4,
+ CLK_GATE_SET_TO_DISABLE, NULL);
+ clk_register_clkdev(clk, "audio3", NULL);
+ clks[audio3] = clk;
+
+ /* audio4 */
+ clk = clk_register_mux(NULL, "audio4_mux", mux_audio_sync_clk,
+ ARRAY_SIZE(mux_audio_sync_clk), 0,
+ clk_base + AUDIO_SYNC_CLK_I2S4, 0, 3, 0, NULL);
+ clk = clk_register_gate(NULL, "audio4", "audio4_mux", 0,
+ clk_base + AUDIO_SYNC_CLK_I2S4, 4,
+ CLK_GATE_SET_TO_DISABLE, NULL);
+ clk_register_clkdev(clk, "audio4", NULL);
+ clks[audio4] = clk;
+
+ /* spdif */
+ clk = clk_register_mux(NULL, "spdif_mux", mux_audio_sync_clk,
+ ARRAY_SIZE(mux_audio_sync_clk), 0,
+ clk_base + AUDIO_SYNC_CLK_SPDIF, 0, 3, 0, NULL);
+ clk = clk_register_gate(NULL, "spdif", "spdif_mux", 0,
+ clk_base + AUDIO_SYNC_CLK_SPDIF, 4,
+ CLK_GATE_SET_TO_DISABLE, NULL);
+ clk_register_clkdev(clk, "spdif", NULL);
+ clks[spdif] = clk;
+
+ /* audio0_2x */
+ clk = clk_register_fixed_factor(NULL, "audio0_doubler", "audio0",
+ CLK_SET_RATE_PARENT, 2, 1);
+ clk = tegra_clk_register_divider("audio0_div", "audio0_doubler",
+ clk_base + AUDIO_SYNC_DOUBLER, 0, 0, 24, 1, 0,
+ &clk_doubler_lock);
+ clk = tegra_clk_register_periph_gate("audio0_2x", "audio0_div",
+ TEGRA_PERIPH_NO_RESET, clk_base,
+ CLK_SET_RATE_PARENT, 113, &periph_v_regs,
+ periph_clk_enb_refcnt);
+ clk_register_clkdev(clk, "audio0_2x", NULL);
+ clks[audio0_2x] = clk;
+
+ /* audio1_2x */
+ clk = clk_register_fixed_factor(NULL, "audio1_doubler", "audio1",
+ CLK_SET_RATE_PARENT, 2, 1);
+ clk = tegra_clk_register_divider("audio1_div", "audio1_doubler",
+ clk_base + AUDIO_SYNC_DOUBLER, 0, 0, 25, 1, 0,
+ &clk_doubler_lock);
+ clk = tegra_clk_register_periph_gate("audio1_2x", "audio1_div",
+ TEGRA_PERIPH_NO_RESET, clk_base,
+ CLK_SET_RATE_PARENT, 114, &periph_v_regs,
+ periph_clk_enb_refcnt);
+ clk_register_clkdev(clk, "audio1_2x", NULL);
+ clks[audio1_2x] = clk;
+
+ /* audio2_2x */
+ clk = clk_register_fixed_factor(NULL, "audio2_doubler", "audio2",
+ CLK_SET_RATE_PARENT, 2, 1);
+ clk = tegra_clk_register_divider("audio2_div", "audio2_doubler",
+ clk_base + AUDIO_SYNC_DOUBLER, 0, 0, 26, 1, 0,
+ &clk_doubler_lock);
+ clk = tegra_clk_register_periph_gate("audio2_2x", "audio2_div",
+ TEGRA_PERIPH_NO_RESET, clk_base,
+ CLK_SET_RATE_PARENT, 115, &periph_v_regs,
+ periph_clk_enb_refcnt);
+ clk_register_clkdev(clk, "audio2_2x", NULL);
+ clks[audio2_2x] = clk;
+
+ /* audio3_2x */
+ clk = clk_register_fixed_factor(NULL, "audio3_doubler", "audio3",
+ CLK_SET_RATE_PARENT, 2, 1);
+ clk = tegra_clk_register_divider("audio3_div", "audio3_doubler",
+ clk_base + AUDIO_SYNC_DOUBLER, 0, 0, 27, 1, 0,
+ &clk_doubler_lock);
+ clk = tegra_clk_register_periph_gate("audio3_2x", "audio3_div",
+ TEGRA_PERIPH_NO_RESET, clk_base,
+ CLK_SET_RATE_PARENT, 116, &periph_v_regs,
+ periph_clk_enb_refcnt);
+ clk_register_clkdev(clk, "audio3_2x", NULL);
+ clks[audio3_2x] = clk;
+
+ /* audio4_2x */
+ clk = clk_register_fixed_factor(NULL, "audio4_doubler", "audio4",
+ CLK_SET_RATE_PARENT, 2, 1);
+ clk = tegra_clk_register_divider("audio4_div", "audio4_doubler",
+ clk_base + AUDIO_SYNC_DOUBLER, 0, 0, 28, 1, 0,
+ &clk_doubler_lock);
+ clk = tegra_clk_register_periph_gate("audio4_2x", "audio4_div",
+ TEGRA_PERIPH_NO_RESET, clk_base,
+ CLK_SET_RATE_PARENT, 117, &periph_v_regs,
+ periph_clk_enb_refcnt);
+ clk_register_clkdev(clk, "audio4_2x", NULL);
+ clks[audio4_2x] = clk;
+
+ /* spdif_2x */
+ clk = clk_register_fixed_factor(NULL, "spdif_doubler", "spdif",
+ CLK_SET_RATE_PARENT, 2, 1);
+ clk = tegra_clk_register_divider("spdif_div", "spdif_doubler",
+ clk_base + AUDIO_SYNC_DOUBLER, 0, 0, 29, 1, 0,
+ &clk_doubler_lock);
+ clk = tegra_clk_register_periph_gate("spdif_2x", "spdif_div",
+ TEGRA_PERIPH_NO_RESET, clk_base,
+ CLK_SET_RATE_PARENT, 118, &periph_v_regs,
+ periph_clk_enb_refcnt);
+ clk_register_clkdev(clk, "spdif_2x", NULL);
+ clks[spdif_2x] = clk;
+}
+
+static void __init tegra30_pmc_clk_init(void)
+{
+ struct clk *clk;
+
+ /* clk_out_1 */
+ clk = clk_register_mux(NULL, "clk_out_1_mux", clk_out1_parents,
+ ARRAY_SIZE(clk_out1_parents), 0,
+ pmc_base + PMC_CLK_OUT_CNTRL, 6, 3, 0,
+ &clk_out_lock);
+ clks[clk_out_1_mux] = clk;
+ clk = clk_register_gate(NULL, "clk_out_1", "clk_out_1_mux", 0,
+ pmc_base + PMC_CLK_OUT_CNTRL, 2, 0,
+ &clk_out_lock);
+ clk_register_clkdev(clk, "extern1", "clk_out_1");
+ clks[clk_out_1] = clk;
+
+ /* clk_out_2 */
+ clk = clk_register_mux(NULL, "clk_out_2_mux", clk_out2_parents,
+ ARRAY_SIZE(clk_out1_parents), 0,
+ pmc_base + PMC_CLK_OUT_CNTRL, 14, 3, 0,
+ &clk_out_lock);
+ clk = clk_register_gate(NULL, "clk_out_2", "clk_out_2_mux", 0,
+ pmc_base + PMC_CLK_OUT_CNTRL, 10, 0,
+ &clk_out_lock);
+ clk_register_clkdev(clk, "extern2", "clk_out_2");
+ clks[clk_out_2] = clk;
+
+ /* clk_out_3 */
+ clk = clk_register_mux(NULL, "clk_out_3_mux", clk_out3_parents,
+ ARRAY_SIZE(clk_out1_parents), 0,
+ pmc_base + PMC_CLK_OUT_CNTRL, 22, 3, 0,
+ &clk_out_lock);
+ clk = clk_register_gate(NULL, "clk_out_3", "clk_out_3_mux", 0,
+ pmc_base + PMC_CLK_OUT_CNTRL, 18, 0,
+ &clk_out_lock);
+ clk_register_clkdev(clk, "extern3", "clk_out_3");
+ clks[clk_out_3] = clk;
+
+ /* blink */
+ writel_relaxed(0, pmc_base + PMC_BLINK_TIMER);
+ clk = clk_register_gate(NULL, "blink_override", "clk_32k", 0,
+ pmc_base + PMC_DPD_PADS_ORIDE,
+ PMC_DPD_PADS_ORIDE_BLINK_ENB, 0, NULL);
+ clk = clk_register_gate(NULL, "blink", "blink_override", 0,
+ pmc_base + PMC_CTRL,
+ PMC_CTRL_BLINK_ENB, 0, NULL);
+ clk_register_clkdev(clk, "blink", NULL);
+ clks[blink] = clk;
+
+}
+
+static const char *cclk_g_parents[] = { "clk_m", "pll_c", "clk_32k", "pll_m",
+ "pll_p_cclkg", "pll_p_out4_cclkg",
+ "pll_p_out3_cclkg", "unused", "pll_x" };
+static const char *cclk_lp_parents[] = { "clk_m", "pll_c", "clk_32k", "pll_m",
+ "pll_p_cclklp", "pll_p_out4_cclklp",
+ "pll_p_out3_cclklp", "unused", "pll_x",
+ "pll_x_out0" };
+static const char *sclk_parents[] = { "clk_m", "pll_c_out1", "pll_p_out4",
+ "pll_p_out3", "pll_p_out2", "unused",
+ "clk_32k", "pll_m_out1" };
+
+static void __init tegra30_super_clk_init(void)
+{
+ struct clk *clk;
+
+ /*
+ * Clock input to cclk_g divided from pll_p using
+ * U71 divider of cclk_g.
+ */
+ clk = tegra_clk_register_divider("pll_p_cclkg", "pll_p",
+ clk_base + SUPER_CCLKG_DIVIDER, 0,
+ TEGRA_DIVIDER_INT, 16, 8, 1, NULL);
+ clk_register_clkdev(clk, "pll_p_cclkg", NULL);
+
+ /*
+ * Clock input to cclk_g divided from pll_p_out3 using
+ * U71 divider of cclk_g.
+ */
+ clk = tegra_clk_register_divider("pll_p_out3_cclkg", "pll_p_out3",
+ clk_base + SUPER_CCLKG_DIVIDER, 0,
+ TEGRA_DIVIDER_INT, 16, 8, 1, NULL);
+ clk_register_clkdev(clk, "pll_p_out3_cclkg", NULL);
+
+ /*
+ * Clock input to cclk_g divided from pll_p_out4 using
+ * U71 divider of cclk_g.
+ */
+ clk = tegra_clk_register_divider("pll_p_out4_cclkg", "pll_p_out4",
+ clk_base + SUPER_CCLKG_DIVIDER, 0,
+ TEGRA_DIVIDER_INT, 16, 8, 1, NULL);
+ clk_register_clkdev(clk, "pll_p_out4_cclkg", NULL);
+
+ /* CCLKG */
+ clk = tegra_clk_register_super_mux("cclk_g", cclk_g_parents,
+ ARRAY_SIZE(cclk_g_parents),
+ CLK_SET_RATE_PARENT,
+ clk_base + CCLKG_BURST_POLICY,
+ 0, 4, 0, 0, NULL);
+ clk_register_clkdev(clk, "cclk_g", NULL);
+ clks[cclk_g] = clk;
+
+ /*
+ * Clock input to cclk_lp divided from pll_p using
+ * U71 divider of cclk_lp.
+ */
+ clk = tegra_clk_register_divider("pll_p_cclklp", "pll_p",
+ clk_base + SUPER_CCLKLP_DIVIDER, 0,
+ TEGRA_DIVIDER_INT, 16, 8, 1, NULL);
+ clk_register_clkdev(clk, "pll_p_cclklp", NULL);
+
+ /*
+ * Clock input to cclk_lp divided from pll_p_out3 using
+ * U71 divider of cclk_lp.
+ */
+ clk = tegra_clk_register_divider("pll_p_out3_cclklp", "pll_p_out3",
+ clk_base + SUPER_CCLKG_DIVIDER, 0,
+ TEGRA_DIVIDER_INT, 16, 8, 1, NULL);
+ clk_register_clkdev(clk, "pll_p_out3_cclklp", NULL);
+
+ /*
+ * Clock input to cclk_lp divided from pll_p_out4 using
+ * U71 divider of cclk_lp.
+ */
+ clk = tegra_clk_register_divider("pll_p_out4_cclklp", "pll_p_out4",
+ clk_base + SUPER_CCLKLP_DIVIDER, 0,
+ TEGRA_DIVIDER_INT, 16, 8, 1, NULL);
+ clk_register_clkdev(clk, "pll_p_out4_cclklp", NULL);
+
+ /* CCLKLP */
+ clk = tegra_clk_register_super_mux("cclk_lp", cclk_lp_parents,
+ ARRAY_SIZE(cclk_lp_parents),
+ CLK_SET_RATE_PARENT,
+ clk_base + CCLKLP_BURST_POLICY,
+ TEGRA_DIVIDER_2, 4, 8, 9,
+ NULL);
+ clk_register_clkdev(clk, "cclk_lp", NULL);
+ clks[cclk_lp] = clk;
+
+ /* SCLK */
+ clk = tegra_clk_register_super_mux("sclk", sclk_parents,
+ ARRAY_SIZE(sclk_parents),
+ CLK_SET_RATE_PARENT,
+ clk_base + SCLK_BURST_POLICY,
+ 0, 4, 0, 0, NULL);
+ clk_register_clkdev(clk, "sclk", NULL);
+ clks[sclk] = clk;
+
+ /* HCLK */
+ clk = clk_register_divider(NULL, "hclk_div", "sclk", 0,
+ clk_base + SYSTEM_CLK_RATE, 4, 2, 0,
+ &sysrate_lock);
+ clk = clk_register_gate(NULL, "hclk", "hclk_div", CLK_SET_RATE_PARENT,
+ clk_base + SYSTEM_CLK_RATE, 7,
+ CLK_GATE_SET_TO_DISABLE, &sysrate_lock);
+ clk_register_clkdev(clk, "hclk", NULL);
+ clks[hclk] = clk;
+
+ /* PCLK */
+ clk = clk_register_divider(NULL, "pclk_div", "hclk", 0,
+ clk_base + SYSTEM_CLK_RATE, 0, 2, 0,
+ &sysrate_lock);
+ clk = clk_register_gate(NULL, "pclk", "pclk_div", CLK_SET_RATE_PARENT,
+ clk_base + SYSTEM_CLK_RATE, 3,
+ CLK_GATE_SET_TO_DISABLE, &sysrate_lock);
+ clk_register_clkdev(clk, "pclk", NULL);
+ clks[pclk] = clk;
+
+ /* twd */
+ clk = clk_register_fixed_factor(NULL, "twd", "cclk_g",
+ CLK_SET_RATE_PARENT, 1, 2);
+ clk_register_clkdev(clk, "twd", NULL);
+ clks[twd] = clk;
+}
+
+static const char *mux_pllacp_clkm[] = { "pll_a_out0", "unused", "pll_p",
+ "clk_m" };
+static const char *mux_pllpcm_clkm[] = { "pll_p", "pll_c", "pll_m", "clk_m" };
+static const char *mux_pllmcp_clkm[] = { "pll_m", "pll_c", "pll_p", "clk_m" };
+static const char *i2s0_parents[] = { "pll_a_out0", "audio0_2x", "pll_p",
+ "clk_m" };
+static const char *i2s1_parents[] = { "pll_a_out0", "audio1_2x", "pll_p",
+ "clk_m" };
+static const char *i2s2_parents[] = { "pll_a_out0", "audio2_2x", "pll_p",
+ "clk_m" };
+static const char *i2s3_parents[] = { "pll_a_out0", "audio3_2x", "pll_p",
+ "clk_m" };
+static const char *i2s4_parents[] = { "pll_a_out0", "audio4_2x", "pll_p",
+ "clk_m" };
+static const char *spdif_out_parents[] = { "pll_a_out0", "spdif_2x", "pll_p",
+ "clk_m" };
+static const char *spdif_in_parents[] = { "pll_p", "pll_c", "pll_m" };
+static const char *mux_pllpc_clk32k_clkm[] = { "pll_p", "pll_c", "clk_32k",
+ "clk_m" };
+static const char *mux_pllpc_clkm_clk32k[] = { "pll_p", "pll_c", "clk_m",
+ "clk_32k" };
+static const char *mux_pllmcpa[] = { "pll_m", "pll_c", "pll_p", "pll_a_out0" };
+static const char *mux_pllpdc_clkm[] = { "pll_p", "pll_d_out0", "pll_c",
+ "clk_m" };
+static const char *mux_pllp_clkm[] = { "pll_p", "unused", "unused", "clk_m" };
+static const char *mux_pllpmdacd2_clkm[] = { "pll_p", "pll_m", "pll_d_out0",
+ "pll_a_out0", "pll_c",
+ "pll_d2_out0", "clk_m" };
+static const char *mux_plla_clk32k_pllp_clkm_plle[] = { "pll_a_out0",
+ "clk_32k", "pll_p",
+ "clk_m", "pll_e" };
+static const char *mux_plld_out0_plld2_out0[] = { "pll_d_out0",
+ "pll_d2_out0" };
+
+static struct tegra_periph_init_data tegra_periph_clk_list[] = {
+ TEGRA_INIT_DATA_MUX("i2s0", NULL, "tegra30-i2s.0", i2s0_parents, CLK_SOURCE_I2S0, 30, &periph_l_regs, TEGRA_PERIPH_ON_APB, i2s0),
+ TEGRA_INIT_DATA_MUX("i2s1", NULL, "tegra30-i2s.1", i2s1_parents, CLK_SOURCE_I2S1, 11, &periph_l_regs, TEGRA_PERIPH_ON_APB, i2s1),
+ TEGRA_INIT_DATA_MUX("i2s2", NULL, "tegra30-i2s.2", i2s2_parents, CLK_SOURCE_I2S2, 18, &periph_l_regs, TEGRA_PERIPH_ON_APB, i2s2),
+ TEGRA_INIT_DATA_MUX("i2s3", NULL, "tegra30-i2s.3", i2s3_parents, CLK_SOURCE_I2S3, 101, &periph_v_regs, TEGRA_PERIPH_ON_APB, i2s3),
+ TEGRA_INIT_DATA_MUX("i2s4", NULL, "tegra30-i2s.4", i2s4_parents, CLK_SOURCE_I2S4, 102, &periph_v_regs, TEGRA_PERIPH_ON_APB, i2s4),
+ TEGRA_INIT_DATA_MUX("spdif_out", "spdif_out", "tegra30-spdif", spdif_out_parents, CLK_SOURCE_SPDIF_OUT, 10, &periph_l_regs, TEGRA_PERIPH_ON_APB, spdif_out),
+ TEGRA_INIT_DATA_MUX("spdif_in", "spdif_in", "tegra30-spdif", spdif_in_parents, CLK_SOURCE_SPDIF_IN, 10, &periph_l_regs, TEGRA_PERIPH_ON_APB, spdif_in),
+ TEGRA_INIT_DATA_MUX("d_audio", "d_audio", "tegra30-ahub", mux_pllacp_clkm, CLK_SOURCE_D_AUDIO, 106, &periph_v_regs, 0, d_audio),
+ TEGRA_INIT_DATA_MUX("dam0", NULL, "tegra30-dam.0", mux_pllacp_clkm, CLK_SOURCE_DAM0, 108, &periph_v_regs, 0, dam0),
+ TEGRA_INIT_DATA_MUX("dam1", NULL, "tegra30-dam.1", mux_pllacp_clkm, CLK_SOURCE_DAM1, 109, &periph_v_regs, 0, dam1),
+ TEGRA_INIT_DATA_MUX("dam2", NULL, "tegra30-dam.2", mux_pllacp_clkm, CLK_SOURCE_DAM2, 110, &periph_v_regs, 0, dam2),
+ TEGRA_INIT_DATA_MUX("hda", "hda", "tegra30-hda", mux_pllpcm_clkm, CLK_SOURCE_HDA, 125, &periph_v_regs, 0, hda),
+ TEGRA_INIT_DATA_MUX("hda2codec_2x", "hda2codec", "tegra30-hda", mux_pllpcm_clkm, CLK_SOURCE_HDA2CODEC_2X, 111, &periph_v_regs, 0, hda2codec_2x),
+ TEGRA_INIT_DATA_MUX("sbc1", NULL, "spi_tegra.0", mux_pllpcm_clkm, CLK_SOURCE_SBC1, 41, &periph_h_regs, TEGRA_PERIPH_ON_APB, sbc1),
+ TEGRA_INIT_DATA_MUX("sbc2", NULL, "spi_tegra.1", mux_pllpcm_clkm, CLK_SOURCE_SBC2, 44, &periph_h_regs, TEGRA_PERIPH_ON_APB, sbc2),
+ TEGRA_INIT_DATA_MUX("sbc3", NULL, "spi_tegra.2", mux_pllpcm_clkm, CLK_SOURCE_SBC3, 46, &periph_h_regs, TEGRA_PERIPH_ON_APB, sbc3),
+ TEGRA_INIT_DATA_MUX("sbc4", NULL, "spi_tegra.3", mux_pllpcm_clkm, CLK_SOURCE_SBC4, 68, &periph_u_regs, TEGRA_PERIPH_ON_APB, sbc4),
+ TEGRA_INIT_DATA_MUX("sbc5", NULL, "spi_tegra.4", mux_pllpcm_clkm, CLK_SOURCE_SBC5, 104, &periph_v_regs, TEGRA_PERIPH_ON_APB, sbc5),
+ TEGRA_INIT_DATA_MUX("sbc6", NULL, "spi_tegra.5", mux_pllpcm_clkm, CLK_SOURCE_SBC6, 105, &periph_v_regs, TEGRA_PERIPH_ON_APB, sbc6),
+ TEGRA_INIT_DATA_MUX("sata_oob", NULL, "tegra_sata_oob", mux_pllpcm_clkm, CLK_SOURCE_SATA_OOB, 123, &periph_v_regs, TEGRA_PERIPH_ON_APB, sata_oob),
+ TEGRA_INIT_DATA_MUX("sata", NULL, "tegra_sata", mux_pllpcm_clkm, CLK_SOURCE_SATA, 124, &periph_v_regs, TEGRA_PERIPH_ON_APB, sata),
+ TEGRA_INIT_DATA_MUX("ndflash", NULL, "tegra_nand", mux_pllpcm_clkm, CLK_SOURCE_NDFLASH, 13, &periph_l_regs, TEGRA_PERIPH_ON_APB, ndflash),
+ TEGRA_INIT_DATA_MUX("ndspeed", NULL, "tegra_nand_speed", mux_pllpcm_clkm, CLK_SOURCE_NDSPEED, 80, &periph_u_regs, TEGRA_PERIPH_ON_APB, ndspeed),
+ TEGRA_INIT_DATA_MUX("vfir", NULL, "vfir", mux_pllpcm_clkm, CLK_SOURCE_VFIR, 7, &periph_l_regs, TEGRA_PERIPH_ON_APB, vfir),
+ TEGRA_INIT_DATA_MUX("csite", NULL, "csite", mux_pllpcm_clkm, CLK_SOURCE_CSITE, 73, &periph_u_regs, TEGRA_PERIPH_ON_APB, csite),
+ TEGRA_INIT_DATA_MUX("la", NULL, "la", mux_pllpcm_clkm, CLK_SOURCE_LA, 76, &periph_u_regs, TEGRA_PERIPH_ON_APB, la),
+ TEGRA_INIT_DATA_MUX("owr", NULL, "tegra_w1", mux_pllpcm_clkm, CLK_SOURCE_OWR, 71, &periph_u_regs, TEGRA_PERIPH_ON_APB, owr),
+ TEGRA_INIT_DATA_MUX("mipi", NULL, "mipi", mux_pllpcm_clkm, CLK_SOURCE_MIPI, 50, &periph_h_regs, TEGRA_PERIPH_ON_APB, mipi),
+ TEGRA_INIT_DATA_MUX("tsensor", NULL, "tegra-tsensor", mux_pllpc_clkm_clk32k, CLK_SOURCE_TSENSOR, 100, &periph_v_regs, TEGRA_PERIPH_ON_APB, tsensor),
+ TEGRA_INIT_DATA_MUX("i2cslow", NULL, "i2cslow", mux_pllpc_clk32k_clkm, CLK_SOURCE_I2CSLOW, 81, &periph_u_regs, TEGRA_PERIPH_ON_APB, i2cslow),
+ TEGRA_INIT_DATA_INT("vde", NULL, "vde", mux_pllpcm_clkm, CLK_SOURCE_VDE, 61, &periph_h_regs, 0, vde),
+ TEGRA_INIT_DATA_INT("vi", "vi", "tegra_camera", mux_pllmcpa, CLK_SOURCE_VI, 20, &periph_l_regs, 0, vi),
+ TEGRA_INIT_DATA_INT("epp", NULL, "epp", mux_pllmcpa, CLK_SOURCE_EPP, 19, &periph_l_regs, 0, epp),
+ TEGRA_INIT_DATA_INT("mpe", NULL, "mpe", mux_pllmcpa, CLK_SOURCE_MPE, 60, &periph_h_regs, 0, mpe),
+ TEGRA_INIT_DATA_INT("host1x", NULL, "host1x", mux_pllmcpa, CLK_SOURCE_HOST1X, 28, &periph_l_regs, 0, host1x),
+ TEGRA_INIT_DATA_INT("3d", NULL, "3d", mux_pllmcpa, CLK_SOURCE_3D, 24, &periph_l_regs, TEGRA_PERIPH_MANUAL_RESET, gr3d),
+ TEGRA_INIT_DATA_INT("3d2", NULL, "3d2", mux_pllmcpa, CLK_SOURCE_3D2, 98, &periph_v_regs, TEGRA_PERIPH_MANUAL_RESET, gr3d2),
+ TEGRA_INIT_DATA_INT("2d", NULL, "2d", mux_pllmcpa, CLK_SOURCE_2D, 21, &periph_l_regs, 0, gr2d),
+ TEGRA_INIT_DATA_INT("se", NULL, "se", mux_pllpcm_clkm, CLK_SOURCE_SE, 127, &periph_v_regs, 0, se),
+ TEGRA_INIT_DATA_MUX("mselect", NULL, "mselect", mux_pllp_clkm, CLK_SOURCE_MSELECT, 99, &periph_v_regs, 0, mselect),
+ TEGRA_INIT_DATA_MUX("nor", NULL, "tegra-nor", mux_pllpcm_clkm, CLK_SOURCE_NOR, 42, &periph_h_regs, 0, nor),
+ TEGRA_INIT_DATA_MUX("sdmmc1", NULL, "sdhci-tegra.0", mux_pllpcm_clkm, CLK_SOURCE_SDMMC1, 14, &periph_l_regs, 0, sdmmc1),
+ TEGRA_INIT_DATA_MUX("sdmmc2", NULL, "sdhci-tegra.1", mux_pllpcm_clkm, CLK_SOURCE_SDMMC2, 9, &periph_l_regs, 0, sdmmc2),
+ TEGRA_INIT_DATA_MUX("sdmmc3", NULL, "sdhci-tegra.2", mux_pllpcm_clkm, CLK_SOURCE_SDMMC3, 69, &periph_u_regs, 0, sdmmc3),
+ TEGRA_INIT_DATA_MUX("sdmmc4", NULL, "sdhci-tegra.3", mux_pllpcm_clkm, CLK_SOURCE_SDMMC4, 15, &periph_l_regs, 0, sdmmc4),
+ TEGRA_INIT_DATA_MUX("cve", NULL, "cve", mux_pllpdc_clkm, CLK_SOURCE_CVE, 49, &periph_h_regs, 0, cve),
+ TEGRA_INIT_DATA_MUX("tvo", NULL, "tvo", mux_pllpdc_clkm, CLK_SOURCE_TVO, 49, &periph_h_regs, 0, tvo),
+ TEGRA_INIT_DATA_MUX("tvdac", NULL, "tvdac", mux_pllpdc_clkm, CLK_SOURCE_TVDAC, 53, &periph_h_regs, 0, tvdac),
+ TEGRA_INIT_DATA_MUX("actmon", NULL, "actmon", mux_pllpc_clk32k_clkm, CLK_SOURCE_ACTMON, 119, &periph_v_regs, 0, actmon),
+ TEGRA_INIT_DATA_MUX("vi_sensor", "vi_sensor", "tegra_camera", mux_pllmcpa, CLK_SOURCE_VI_SENSOR, 20, &periph_l_regs, TEGRA_PERIPH_NO_RESET, vi_sensor),
+ TEGRA_INIT_DATA_DIV16("i2c1", "div-clk", "tegra-i2c.0", mux_pllp_clkm, CLK_SOURCE_I2C1, 12, &periph_l_regs, TEGRA_PERIPH_ON_APB, i2c1),
+ TEGRA_INIT_DATA_DIV16("i2c2", "div-clk", "tegra-i2c.1", mux_pllp_clkm, CLK_SOURCE_I2C2, 54, &periph_h_regs, TEGRA_PERIPH_ON_APB, i2c2),
+ TEGRA_INIT_DATA_DIV16("i2c3", "div-clk", "tegra-i2c.2", mux_pllp_clkm, CLK_SOURCE_I2C3, 67, &periph_u_regs, TEGRA_PERIPH_ON_APB, i2c3),
+ TEGRA_INIT_DATA_DIV16("i2c4", "div-clk", "tegra-i2c.3", mux_pllp_clkm, CLK_SOURCE_I2C4, 103, &periph_v_regs, TEGRA_PERIPH_ON_APB, i2c4),
+ TEGRA_INIT_DATA_DIV16("i2c5", "div-clk", "tegra-i2c.4", mux_pllp_clkm, CLK_SOURCE_I2C5, 47, &periph_h_regs, TEGRA_PERIPH_ON_APB, i2c5),
+ TEGRA_INIT_DATA_UART("uarta", NULL, "tegra_uart.0", mux_pllpcm_clkm, CLK_SOURCE_UARTA, 6, &periph_l_regs, uarta),
+ TEGRA_INIT_DATA_UART("uartb", NULL, "tegra_uart.1", mux_pllpcm_clkm, CLK_SOURCE_UARTB, 7, &periph_l_regs, uartb),
+ TEGRA_INIT_DATA_UART("uartc", NULL, "tegra_uart.2", mux_pllpcm_clkm, CLK_SOURCE_UARTC, 55, &periph_h_regs, uartc),
+ TEGRA_INIT_DATA_UART("uartd", NULL, "tegra_uart.3", mux_pllpcm_clkm, CLK_SOURCE_UARTD, 65, &periph_u_regs, uartd),
+ TEGRA_INIT_DATA_UART("uarte", NULL, "tegra_uart.4", mux_pllpcm_clkm, CLK_SOURCE_UARTE, 66, &periph_u_regs, uarte),
+ TEGRA_INIT_DATA_MUX8("hdmi", NULL, "hdmi", mux_pllpmdacd2_clkm, CLK_SOURCE_HDMI, 51, &periph_h_regs, 0, hdmi),
+ TEGRA_INIT_DATA_MUX8("extern1", NULL, "extern1", mux_plla_clk32k_pllp_clkm_plle, CLK_SOURCE_EXTERN1, 120, &periph_v_regs, 0, extern1),
+ TEGRA_INIT_DATA_MUX8("extern2", NULL, "extern2", mux_plla_clk32k_pllp_clkm_plle, CLK_SOURCE_EXTERN2, 121, &periph_v_regs, 0, extern2),
+ TEGRA_INIT_DATA_MUX8("extern3", NULL, "extern3", mux_plla_clk32k_pllp_clkm_plle, CLK_SOURCE_EXTERN3, 122, &periph_v_regs, 0, extern3),
+ TEGRA_INIT_DATA("pwm", NULL, "pwm", mux_pllpc_clk32k_clkm, CLK_SOURCE_PWM, 28, 2, 0, 0, 8, 1, 0, &periph_l_regs, 17, periph_clk_enb_refcnt, 0, pwm),
+};
+
+static struct tegra_periph_init_data tegra_periph_nodiv_clk_list[] = {
+ TEGRA_INIT_DATA_NODIV("disp1", NULL, "tegradc.0", mux_pllpmdacd2_clkm, CLK_SOURCE_DISP1, 29, 3, 27, &periph_l_regs, 0, disp1),
+ TEGRA_INIT_DATA_NODIV("disp2", NULL, "tegradc.1", mux_pllpmdacd2_clkm, CLK_SOURCE_DISP2, 29, 3, 26, &periph_l_regs, 0, disp2),
+ TEGRA_INIT_DATA_NODIV("dsib", NULL, "tegradc.1", mux_plld_out0_plld2_out0, CLK_SOURCE_DSIB, 25, 1, 82, &periph_u_regs, 0, dsib),
+};
+
+static void __init tegra30_periph_clk_init(void)
+{
+ struct tegra_periph_init_data *data;
+ struct clk *clk;
+ int i;
+
+ /* apbdma */
+ clk = tegra_clk_register_periph_gate("apbdma", "clk_m", 0, clk_base, 0, 34,
+ &periph_h_regs, periph_clk_enb_refcnt);
+ clk_register_clkdev(clk, NULL, "tegra-apbdma");
+ clks[apbdma] = clk;
+
+ /* rtc */
+ clk = tegra_clk_register_periph_gate("rtc", "clk_32k",
+ TEGRA_PERIPH_NO_RESET | TEGRA_PERIPH_ON_APB,
+ clk_base, 0, 4, &periph_l_regs,
+ periph_clk_enb_refcnt);
+ clk_register_clkdev(clk, NULL, "rtc-tegra");
+ clks[rtc] = clk;
+
+ /* timer */
+ clk = tegra_clk_register_periph_gate("timer", "clk_m", 0, clk_base, 0,
+ 5, &periph_l_regs, periph_clk_enb_refcnt);
+ clk_register_clkdev(clk, NULL, "timer");
+ clks[timer] = clk;
+
+ /* kbc */
+ clk = tegra_clk_register_periph_gate("kbc", "clk_32k",
+ TEGRA_PERIPH_NO_RESET | TEGRA_PERIPH_ON_APB,
+ clk_base, 0, 36, &periph_h_regs,
+ periph_clk_enb_refcnt);
+ clk_register_clkdev(clk, NULL, "tegra-kbc");
+ clks[kbc] = clk;
+
+ /* csus */
+ clk = tegra_clk_register_periph_gate("csus", "clk_m",
+ TEGRA_PERIPH_NO_RESET | TEGRA_PERIPH_ON_APB,
+ clk_base, 0, 92, &periph_u_regs,
+ periph_clk_enb_refcnt);
+ clk_register_clkdev(clk, "csus", "tengra_camera");
+ clks[csus] = clk;
+
+ /* vcp */
+ clk = tegra_clk_register_periph_gate("vcp", "clk_m", 0, clk_base, 0, 29,
+ &periph_l_regs, periph_clk_enb_refcnt);
+ clk_register_clkdev(clk, "vcp", "tegra-avp");
+ clks[vcp] = clk;
+
+ /* bsea */
+ clk = tegra_clk_register_periph_gate("bsea", "clk_m", 0, clk_base, 0,
+ 62, &periph_h_regs, periph_clk_enb_refcnt);
+ clk_register_clkdev(clk, "bsea", "tegra-avp");
+ clks[bsea] = clk;
+
+ /* bsev */
+ clk = tegra_clk_register_periph_gate("bsev", "clk_m", 0, clk_base, 0,
+ 63, &periph_h_regs, periph_clk_enb_refcnt);
+ clk_register_clkdev(clk, "bsev", "tegra-aes");
+ clks[bsev] = clk;
+
+ /* usbd */
+ clk = tegra_clk_register_periph_gate("usbd", "clk_m", 0, clk_base, 0,
+ 22, &periph_l_regs, periph_clk_enb_refcnt);
+ clk_register_clkdev(clk, NULL, "fsl-tegra-udc");
+ clks[usbd] = clk;
+
+ /* usb2 */
+ clk = tegra_clk_register_periph_gate("usb2", "clk_m", 0, clk_base, 0,
+ 58, &periph_h_regs, periph_clk_enb_refcnt);
+ clk_register_clkdev(clk, NULL, "tegra-ehci.1");
+ clks[usb2] = clk;
+
+ /* usb3 */
+ clk = tegra_clk_register_periph_gate("usb3", "clk_m", 0, clk_base, 0,
+ 59, &periph_h_regs, periph_clk_enb_refcnt);
+ clk_register_clkdev(clk, NULL, "tegra-ehci.2");
+ clks[usb3] = clk;
+
+ /* dsia */
+ clk = tegra_clk_register_periph_gate("dsia", "pll_d_out0", 0, clk_base,
+ 0, 48, &periph_h_regs,
+ periph_clk_enb_refcnt);
+ clk_register_clkdev(clk, "dsia", "tegradc.0");
+ clks[dsia] = clk;
+
+ /* csi */
+ clk = tegra_clk_register_periph_gate("csi", "pll_p_out3", 0, clk_base,
+ 0, 52, &periph_h_regs,
+ periph_clk_enb_refcnt);
+ clk_register_clkdev(clk, "csi", "tegra_camera");
+ clks[csi] = clk;
+
+ /* isp */
+ clk = tegra_clk_register_periph_gate("isp", "clk_m", 0, clk_base, 0, 23,
+ &periph_l_regs, periph_clk_enb_refcnt);
+ clk_register_clkdev(clk, "isp", "tegra_camera");
+ clks[isp] = clk;
+
+ /* pcie */
+ clk = tegra_clk_register_periph_gate("pcie", "clk_m", 0, clk_base, 0,
+ 70, &periph_u_regs, periph_clk_enb_refcnt);
+ clk_register_clkdev(clk, "pcie", "tegra-pcie");
+ clks[pcie] = clk;
+
+ /* afi */
+ clk = tegra_clk_register_periph_gate("afi", "clk_m", 0, clk_base, 0, 72,
+ &periph_u_regs, periph_clk_enb_refcnt);
+ clk_register_clkdev(clk, "afi", "tegra-pcie");
+ clks[afi] = clk;
+
+ /* kfuse */
+ clk = tegra_clk_register_periph_gate("kfuse", "clk_m",
+ TEGRA_PERIPH_ON_APB,
+ clk_base, 0, 40, &periph_h_regs,
+ periph_clk_enb_refcnt);
+ clk_register_clkdev(clk, NULL, "kfuse-tegra");
+ clks[kfuse] = clk;
+
+ /* fuse */
+ clk = tegra_clk_register_periph_gate("fuse", "clk_m",
+ TEGRA_PERIPH_ON_APB,
+ clk_base, 0, 39, &periph_h_regs,
+ periph_clk_enb_refcnt);
+ clk_register_clkdev(clk, "fuse", "fuse-tegra");
+ clks[fuse] = clk;
+
+ /* fuse_burn */
+ clk = tegra_clk_register_periph_gate("fuse_burn", "clk_m",
+ TEGRA_PERIPH_ON_APB,
+ clk_base, 0, 39, &periph_h_regs,
+ periph_clk_enb_refcnt);
+ clk_register_clkdev(clk, "fuse_burn", "fuse-tegra");
+ clks[fuse_burn] = clk;
+
+ /* apbif */
+ clk = tegra_clk_register_periph_gate("apbif", "clk_m", 0,
+ clk_base, 0, 107, &periph_v_regs,
+ periph_clk_enb_refcnt);
+ clk_register_clkdev(clk, "apbif", "tegra30-ahub");
+ clks[apbif] = clk;
+
+ /* hda2hdmi */
+ clk = tegra_clk_register_periph_gate("hda2hdmi", "clk_m",
+ TEGRA_PERIPH_ON_APB,
+ clk_base, 0, 128, &periph_w_regs,
+ periph_clk_enb_refcnt);
+ clk_register_clkdev(clk, "hda2hdmi", "tegra30-hda");
+ clks[hda2hdmi] = clk;
+
+ /* sata_cold */
+ clk = tegra_clk_register_periph_gate("sata_cold", "clk_m",
+ TEGRA_PERIPH_ON_APB,
+ clk_base, 0, 129, &periph_w_regs,
+ periph_clk_enb_refcnt);
+ clk_register_clkdev(clk, NULL, "tegra_sata_cold");
+ clks[sata_cold] = clk;
+
+ /* dtv */
+ clk = tegra_clk_register_periph_gate("dtv", "clk_m",
+ TEGRA_PERIPH_ON_APB,
+ clk_base, 0, 79, &periph_u_regs,
+ periph_clk_enb_refcnt);
+ clk_register_clkdev(clk, NULL, "dtv");
+ clks[dtv] = clk;
+
+ /* emc */
+ clk = clk_register_mux(NULL, "emc_mux", mux_pllmcp_clkm,
+ ARRAY_SIZE(mux_pllmcp_clkm), 0,
+ clk_base + CLK_SOURCE_EMC,
+ 30, 2, 0, NULL);
+ clk = tegra_clk_register_periph_gate("emc", "emc_mux", 0, clk_base, 0,
+ 57, &periph_h_regs, periph_clk_enb_refcnt);
+ clk_register_clkdev(clk, "emc", NULL);
+ clks[emc] = clk;
+
+ for (i = 0; i < ARRAY_SIZE(tegra_periph_clk_list); i++) {
+ data = &tegra_periph_clk_list[i];
+ clk = tegra_clk_register_periph(data->name, data->parent_names,
+ data->num_parents, &data->periph,
+ clk_base, data->offset);
+ clk_register_clkdev(clk, data->con_id, data->dev_id);
+ clks[data->clk_id] = clk;
+ }
+
+ for (i = 0; i < ARRAY_SIZE(tegra_periph_nodiv_clk_list); i++) {
+ data = &tegra_periph_nodiv_clk_list[i];
+ clk = tegra_clk_register_periph_nodiv(data->name,
+ data->parent_names,
+ data->num_parents, &data->periph,
+ clk_base, data->offset);
+ clk_register_clkdev(clk, data->con_id, data->dev_id);
+ clks[data->clk_id] = clk;
+ }
+}
+
+static void __init tegra30_fixed_clk_init(void)
+{
+ struct clk *clk;
+
+ /* clk_32k */
+ clk = clk_register_fixed_rate(NULL, "clk_32k", NULL, CLK_IS_ROOT,
+ 32768);
+ clk_register_clkdev(clk, "clk_32k", NULL);
+ clks[clk_32k] = clk;
+
+ /* clk_m_div2 */
+ clk = clk_register_fixed_factor(NULL, "clk_m_div2", "clk_m",
+ CLK_SET_RATE_PARENT, 1, 2);
+ clk_register_clkdev(clk, "clk_m_div2", NULL);
+ clks[clk_m_div2] = clk;
+
+ /* clk_m_div4 */
+ clk = clk_register_fixed_factor(NULL, "clk_m_div4", "clk_m",
+ CLK_SET_RATE_PARENT, 1, 4);
+ clk_register_clkdev(clk, "clk_m_div4", NULL);
+ clks[clk_m_div4] = clk;
+
+ /* cml0 */
+ clk = clk_register_gate(NULL, "cml0", "pll_e", 0, clk_base + PLLE_AUX,
+ 0, 0, &cml_lock);
+ clk_register_clkdev(clk, "cml0", NULL);
+ clks[cml0] = clk;
+
+ /* cml1 */
+ clk = clk_register_gate(NULL, "cml1", "pll_e", 0, clk_base + PLLE_AUX,
+ 1, 0, &cml_lock);
+ clk_register_clkdev(clk, "cml1", NULL);
+ clks[cml1] = clk;
+
+ /* pciex */
+ clk = clk_register_fixed_rate(NULL, "pciex", "pll_e", 0, 100000000);
+ clk_register_clkdev(clk, "pciex", NULL);
+ clks[pciex] = clk;
+}
+
+static void __init tegra30_osc_clk_init(void)
+{
+ struct clk *clk;
+ unsigned int pll_ref_div;
+
+ tegra30_clk_measure_input_freq();
+
+ /* clk_m */
+ clk = clk_register_fixed_rate(NULL, "clk_m", NULL, CLK_IS_ROOT,
+ input_freq);
+ clk_register_clkdev(clk, "clk_m", NULL);
+ clks[clk_m] = clk;
+
+ /* pll_ref */
+ pll_ref_div = tegra30_get_pll_ref_div();
+ clk = clk_register_fixed_factor(NULL, "pll_ref", "clk_m",
+ CLK_SET_RATE_PARENT, 1, pll_ref_div);
+ clk_register_clkdev(clk, "pll_ref", NULL);
+ clks[pll_ref] = clk;
+}
+
+/* Tegra30 CPU clock and reset control functions */
+static void tegra30_wait_cpu_in_reset(u32 cpu)
+{
+ unsigned int reg;
+
+ do {
+ reg = readl(clk_base +
+ TEGRA30_CLK_RST_CONTROLLER_CPU_CMPLX_STATUS);
+ cpu_relax();
+ } while (!(reg & (1 << cpu))); /* check CPU been reset or not */
+
+ return;
+}
+
+static void tegra30_put_cpu_in_reset(u32 cpu)
+{
+ writel(CPU_RESET(cpu),
+ clk_base + TEGRA_CLK_RST_CONTROLLER_RST_CPU_CMPLX_SET);
+ dmb();
+}
+
+static void tegra30_cpu_out_of_reset(u32 cpu)
+{
+ writel(CPU_RESET(cpu),
+ clk_base + TEGRA_CLK_RST_CONTROLLER_RST_CPU_CMPLX_CLR);
+ wmb();
+}
+
+
+static void tegra30_enable_cpu_clock(u32 cpu)
+{
+ unsigned int reg;
+
+ writel(CPU_CLOCK(cpu),
+ clk_base + TEGRA30_CLK_RST_CONTROLLER_CLK_CPU_CMPLX_CLR);
+ reg = readl(clk_base +
+ TEGRA30_CLK_RST_CONTROLLER_CLK_CPU_CMPLX_CLR);
+}
+
+static void tegra30_disable_cpu_clock(u32 cpu)
+{
+
+ unsigned int reg;
+
+ reg = readl(clk_base + TEGRA_CLK_RST_CONTROLLER_CLK_CPU_CMPLX);
+ writel(reg | CPU_CLOCK(cpu),
+ clk_base + TEGRA_CLK_RST_CONTROLLER_CLK_CPU_CMPLX);
+}
+
+#ifdef CONFIG_PM_SLEEP
+static bool tegra30_cpu_rail_off_ready(void)
+{
+ unsigned int cpu_rst_status;
+ int cpu_pwr_status;
+
+ cpu_rst_status = readl(clk_base +
+ TEGRA30_CLK_RST_CONTROLLER_CPU_CMPLX_STATUS);
+ cpu_pwr_status = tegra_powergate_is_powered(TEGRA_POWERGATE_CPU1) ||
+ tegra_powergate_is_powered(TEGRA_POWERGATE_CPU2) ||
+ tegra_powergate_is_powered(TEGRA_POWERGATE_CPU3);
+
+ if (((cpu_rst_status & 0xE) != 0xE) || cpu_pwr_status)
+ return false;
+
+ return true;
+}
+
+static void tegra30_cpu_clock_suspend(void)
+{
+ /* switch coresite to clk_m, save off original source */
+ tegra30_cpu_clk_sctx.clk_csite_src =
+ readl(clk_base + CLK_RESET_SOURCE_CSITE);
+ writel(3<<30, clk_base + CLK_RESET_SOURCE_CSITE);
+
+ tegra30_cpu_clk_sctx.cpu_burst =
+ readl(clk_base + CLK_RESET_CCLK_BURST);
+ tegra30_cpu_clk_sctx.pllx_base =
+ readl(clk_base + CLK_RESET_PLLX_BASE);
+ tegra30_cpu_clk_sctx.pllx_misc =
+ readl(clk_base + CLK_RESET_PLLX_MISC);
+ tegra30_cpu_clk_sctx.cclk_divider =
+ readl(clk_base + CLK_RESET_CCLK_DIVIDER);
+}
+
+static void tegra30_cpu_clock_resume(void)
+{
+ unsigned int reg, policy;
+
+ /* Is CPU complex already running on PLLX? */
+ reg = readl(clk_base + CLK_RESET_CCLK_BURST);
+ policy = (reg >> CLK_RESET_CCLK_BURST_POLICY_SHIFT) & 0xF;
+
+ if (policy == CLK_RESET_CCLK_IDLE_POLICY)
+ reg = (reg >> CLK_RESET_CCLK_IDLE_POLICY_SHIFT) & 0xF;
+ else if (policy == CLK_RESET_CCLK_RUN_POLICY)
+ reg = (reg >> CLK_RESET_CCLK_RUN_POLICY_SHIFT) & 0xF;
+ else
+ BUG();
+
+ if (reg != CLK_RESET_CCLK_BURST_POLICY_PLLX) {
+ /* restore PLLX settings if CPU is on different PLL */
+ writel(tegra30_cpu_clk_sctx.pllx_misc,
+ clk_base + CLK_RESET_PLLX_MISC);
+ writel(tegra30_cpu_clk_sctx.pllx_base,
+ clk_base + CLK_RESET_PLLX_BASE);
+
+ /* wait for PLL stabilization if PLLX was enabled */
+ if (tegra30_cpu_clk_sctx.pllx_base & (1 << 30))
+ udelay(300);
+ }
+
+ /*
+ * Restore original burst policy setting for calls resulting from CPU
+ * LP2 in idle or system suspend.
+ */
+ writel(tegra30_cpu_clk_sctx.cclk_divider,
+ clk_base + CLK_RESET_CCLK_DIVIDER);
+ writel(tegra30_cpu_clk_sctx.cpu_burst,
+ clk_base + CLK_RESET_CCLK_BURST);
+
+ writel(tegra30_cpu_clk_sctx.clk_csite_src,
+ clk_base + CLK_RESET_SOURCE_CSITE);
+}
+#endif
+
+static struct tegra_cpu_car_ops tegra30_cpu_car_ops = {
+ .wait_for_reset = tegra30_wait_cpu_in_reset,
+ .put_in_reset = tegra30_put_cpu_in_reset,
+ .out_of_reset = tegra30_cpu_out_of_reset,
+ .enable_clock = tegra30_enable_cpu_clock,
+ .disable_clock = tegra30_disable_cpu_clock,
+#ifdef CONFIG_PM_SLEEP
+ .rail_off_ready = tegra30_cpu_rail_off_ready,
+ .suspend = tegra30_cpu_clock_suspend,
+ .resume = tegra30_cpu_clock_resume,
+#endif
+};
+
+static __initdata struct tegra_clk_init_table init_table[] = {
+ {uarta, pll_p, 408000000, 0},
+ {uartb, pll_p, 408000000, 0},
+ {uartc, pll_p, 408000000, 0},
+ {uartd, pll_p, 408000000, 0},
+ {uarte, pll_p, 408000000, 0},
+ {pll_a, clk_max, 564480000, 1},
+ {pll_a_out0, clk_max, 11289600, 1},
+ {extern1, pll_a_out0, 0, 1},
+ {clk_out_1_mux, extern1, 0, 0},
+ {clk_out_1, clk_max, 0, 1},
+ {blink, clk_max, 0, 1},
+ {i2s0, pll_a_out0, 11289600, 0},
+ {i2s1, pll_a_out0, 11289600, 0},
+ {i2s2, pll_a_out0, 11289600, 0},
+ {i2s3, pll_a_out0, 11289600, 0},
+ {i2s4, pll_a_out0, 11289600, 0},
+ {sdmmc1, pll_p, 48000000, 0},
+ {sdmmc2, pll_p, 48000000, 0},
+ {sdmmc3, pll_p, 48000000, 0},
+ {pll_m, clk_max, 0, 1},
+ {pclk, clk_max, 0, 1},
+ {csite, clk_max, 0, 1},
+ {emc, clk_max, 0, 1},
+ {mselect, clk_max, 0, 1},
+ {sbc1, pll_p, 100000000, 0},
+ {sbc2, pll_p, 100000000, 0},
+ {sbc3, pll_p, 100000000, 0},
+ {sbc4, pll_p, 100000000, 0},
+ {sbc5, pll_p, 100000000, 0},
+ {sbc6, pll_p, 100000000, 0},
+ {host1x, pll_c, 150000000, 0},
+ {disp1, pll_p, 600000000, 0},
+ {disp2, pll_p, 600000000, 0},
+ {twd, clk_max, 0, 1},
+ {clk_max, clk_max, 0, 0}, /* This MUST be the last entry. */
+};
+
+/*
+ * Some clocks may be used by different drivers depending on the board
+ * configuration. List those here to register them twice in the clock lookup
+ * table under two names.
+ */
+static struct tegra_clk_duplicate tegra_clk_duplicates[] = {
+ TEGRA_CLK_DUPLICATE(usbd, "utmip-pad", NULL),
+ TEGRA_CLK_DUPLICATE(usbd, "tegra-ehci.0", NULL),
+ TEGRA_CLK_DUPLICATE(usbd, "tegra-otg", NULL),
+ TEGRA_CLK_DUPLICATE(bsev, "tegra-avp", "bsev"),
+ TEGRA_CLK_DUPLICATE(bsev, "nvavp", "bsev"),
+ TEGRA_CLK_DUPLICATE(vde, "tegra-aes", "vde"),
+ TEGRA_CLK_DUPLICATE(bsea, "tegra-aes", "bsea"),
+ TEGRA_CLK_DUPLICATE(bsea, "nvavp", "bsea"),
+ TEGRA_CLK_DUPLICATE(cml1, "tegra_sata_cml", NULL),
+ TEGRA_CLK_DUPLICATE(cml0, "tegra_pcie", "cml"),
+ TEGRA_CLK_DUPLICATE(pciex, "tegra_pcie", "pciex"),
+ TEGRA_CLK_DUPLICATE(twd, "smp_twd", NULL),
+ TEGRA_CLK_DUPLICATE(vcp, "nvavp", "vcp"),
+ TEGRA_CLK_DUPLICATE(clk_max, NULL, NULL), /* MUST be the last entry */
+};
+
+static const struct of_device_id pmc_match[] __initconst = {
+ { .compatible = "nvidia,tegra30-pmc" },
+ {},
+};
+
+void __init tegra30_clock_init(struct device_node *np)
+{
+ struct device_node *node;
+ int i;
+
+ clk_base = of_iomap(np, 0);
+ if (!clk_base) {
+ pr_err("ioremap tegra30 CAR failed\n");
+ return;
+ }
+
+ node = of_find_matching_node(NULL, pmc_match);
+ if (!node) {
+ pr_err("Failed to find pmc node\n");
+ BUG();
+ }
+
+ pmc_base = of_iomap(node, 0);
+ if (!pmc_base) {
+ pr_err("Can't map pmc registers\n");
+ BUG();
+ }
+
+ tegra30_osc_clk_init();
+ tegra30_fixed_clk_init();
+ tegra30_pll_init();
+ tegra30_super_clk_init();
+ tegra30_periph_clk_init();
+ tegra30_audio_clk_init();
+ tegra30_pmc_clk_init();
+
+ for (i = 0; i < ARRAY_SIZE(clks); i++) {
+ if (IS_ERR(clks[i])) {
+ pr_err("Tegra30 clk %d: register failed with %ld\n",
+ i, PTR_ERR(clks[i]));
+ BUG();
+ }
+ if (!clks[i])
+ clks[i] = ERR_PTR(-EINVAL);
+ }
+
+ tegra_init_dup_clks(tegra_clk_duplicates, clks, clk_max);
+
+ clk_data.clks = clks;
+ clk_data.clk_num = ARRAY_SIZE(clks);
+ of_clk_add_provider(np, of_clk_src_onecell_get, &clk_data);
+
+ tegra_init_from_table(init_table, clks, clk_max);
+
+ tegra_cpu_car_ops = &tegra30_cpu_car_ops;
+}
diff --git a/drivers/clk/tegra/clk.c b/drivers/clk/tegra/clk.c
new file mode 100644
index 000000000000..a603b9af0ad3
--- /dev/null
+++ b/drivers/clk/tegra/clk.c
@@ -0,0 +1,85 @@
+/*
+ * Copyright (c) 2012, NVIDIA CORPORATION. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include <linux/clk.h>
+#include <linux/clk-provider.h>
+#include <linux/of.h>
+#include <linux/clk/tegra.h>
+
+#include "clk.h"
+
+/* Global data of Tegra CPU CAR ops */
+struct tegra_cpu_car_ops *tegra_cpu_car_ops;
+
+void __init tegra_init_dup_clks(struct tegra_clk_duplicate *dup_list,
+ struct clk *clks[], int clk_max)
+{
+ struct clk *clk;
+
+ for (; dup_list->clk_id < clk_max; dup_list++) {
+ clk = clks[dup_list->clk_id];
+ dup_list->lookup.clk = clk;
+ clkdev_add(&dup_list->lookup);
+ }
+}
+
+void __init tegra_init_from_table(struct tegra_clk_init_table *tbl,
+ struct clk *clks[], int clk_max)
+{
+ struct clk *clk;
+
+ for (; tbl->clk_id < clk_max; tbl++) {
+ clk = clks[tbl->clk_id];
+ if (IS_ERR_OR_NULL(clk))
+ return;
+
+ if (tbl->parent_id < clk_max) {
+ struct clk *parent = clks[tbl->parent_id];
+ if (clk_set_parent(clk, parent)) {
+ pr_err("%s: Failed to set parent %s of %s\n",
+ __func__, __clk_get_name(parent),
+ __clk_get_name(clk));
+ WARN_ON(1);
+ }
+ }
+
+ if (tbl->rate)
+ if (clk_set_rate(clk, tbl->rate)) {
+ pr_err("%s: Failed to set rate %lu of %s\n",
+ __func__, tbl->rate,
+ __clk_get_name(clk));
+ WARN_ON(1);
+ }
+
+ if (tbl->state)
+ if (clk_prepare_enable(clk)) {
+ pr_err("%s: Failed to enable %s\n", __func__,
+ __clk_get_name(clk));
+ WARN_ON(1);
+ }
+ }
+}
+
+static const struct of_device_id tegra_dt_clk_match[] = {
+ { .compatible = "nvidia,tegra20-car", .data = tegra20_clock_init },
+ { .compatible = "nvidia,tegra30-car", .data = tegra30_clock_init },
+ { }
+};
+
+void __init tegra_clocks_init(void)
+{
+ of_clk_init(tegra_dt_clk_match);
+}
diff --git a/drivers/clk/tegra/clk.h b/drivers/clk/tegra/clk.h
new file mode 100644
index 000000000000..0744731c6229
--- /dev/null
+++ b/drivers/clk/tegra/clk.h
@@ -0,0 +1,502 @@
+/*
+ * Copyright (c) 2012, NVIDIA CORPORATION. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#ifndef __TEGRA_CLK_H
+#define __TEGRA_CLK_H
+
+#include <linux/clk-provider.h>
+#include <linux/clkdev.h>
+
+/**
+ * struct tegra_clk_sync_source - external clock source from codec
+ *
+ * @hw: handle between common and hardware-specific interfaces
+ * @rate: input frequency from source
+ * @max_rate: max rate allowed
+ */
+struct tegra_clk_sync_source {
+ struct clk_hw hw;
+ unsigned long rate;
+ unsigned long max_rate;
+};
+
+#define to_clk_sync_source(_hw) \
+ container_of(_hw, struct tegra_clk_sync_source, hw)
+
+extern const struct clk_ops tegra_clk_sync_source_ops;
+struct clk *tegra_clk_register_sync_source(const char *name,
+ unsigned long fixed_rate, unsigned long max_rate);
+
+/**
+ * struct tegra_clk_frac_div - fractional divider clock
+ *
+ * @hw: handle between common and hardware-specific interfaces
+ * @reg: register containing divider
+ * @flags: hardware-specific flags
+ * @shift: shift to the divider bit field
+ * @width: width of the divider bit field
+ * @frac_width: width of the fractional bit field
+ * @lock: register lock
+ *
+ * Flags:
+ * TEGRA_DIVIDER_ROUND_UP - This flags indicates to round up the divider value.
+ * TEGRA_DIVIDER_FIXED - Fixed rate PLL dividers has addition override bit, this
+ * flag indicates that this divider is for fixed rate PLL.
+ * TEGRA_DIVIDER_INT - Some modules can not cope with the duty cycle when
+ * fraction bit is set. This flags indicates to calculate divider for which
+ * fracton bit will be zero.
+ * TEGRA_DIVIDER_UART - UART module divider has additional enable bit which is
+ * set when divider value is not 0. This flags indicates that the divider
+ * is for UART module.
+ */
+struct tegra_clk_frac_div {
+ struct clk_hw hw;
+ void __iomem *reg;
+ u8 flags;
+ u8 shift;
+ u8 width;
+ u8 frac_width;
+ spinlock_t *lock;
+};
+
+#define to_clk_frac_div(_hw) container_of(_hw, struct tegra_clk_frac_div, hw)
+
+#define TEGRA_DIVIDER_ROUND_UP BIT(0)
+#define TEGRA_DIVIDER_FIXED BIT(1)
+#define TEGRA_DIVIDER_INT BIT(2)
+#define TEGRA_DIVIDER_UART BIT(3)
+
+extern const struct clk_ops tegra_clk_frac_div_ops;
+struct clk *tegra_clk_register_divider(const char *name,
+ const char *parent_name, void __iomem *reg,
+ unsigned long flags, u8 clk_divider_flags, u8 shift, u8 width,
+ u8 frac_width, spinlock_t *lock);
+
+/*
+ * Tegra PLL:
+ *
+ * In general, there are 3 requirements for each PLL
+ * that SW needs to be comply with.
+ * (1) Input frequency range (REF).
+ * (2) Comparison frequency range (CF). CF = REF/DIVM.
+ * (3) VCO frequency range (VCO). VCO = CF * DIVN.
+ *
+ * The final PLL output frequency (FO) = VCO >> DIVP.
+ */
+
+/**
+ * struct tegra_clk_pll_freq_table - PLL frequecy table
+ *
+ * @input_rate: input rate from source
+ * @output_rate: output rate from PLL for the input rate
+ * @n: feedback divider
+ * @m: input divider
+ * @p: post divider
+ * @cpcon: charge pump current
+ */
+struct tegra_clk_pll_freq_table {
+ unsigned long input_rate;
+ unsigned long output_rate;
+ u16 n;
+ u16 m;
+ u8 p;
+ u8 cpcon;
+};
+
+/**
+ * struct clk_pll_params - PLL parameters
+ *
+ * @input_min: Minimum input frequency
+ * @input_max: Maximum input frequency
+ * @cf_min: Minimum comparison frequency
+ * @cf_max: Maximum comparison frequency
+ * @vco_min: Minimum VCO frequency
+ * @vco_max: Maximum VCO frequency
+ * @base_reg: PLL base reg offset
+ * @misc_reg: PLL misc reg offset
+ * @lock_reg: PLL lock reg offset
+ * @lock_bit_idx: Bit index for PLL lock status
+ * @lock_enable_bit_idx: Bit index to enable PLL lock
+ * @lock_delay: Delay in us if PLL lock is not used
+ */
+struct tegra_clk_pll_params {
+ unsigned long input_min;
+ unsigned long input_max;
+ unsigned long cf_min;
+ unsigned long cf_max;
+ unsigned long vco_min;
+ unsigned long vco_max;
+
+ u32 base_reg;
+ u32 misc_reg;
+ u32 lock_reg;
+ u32 lock_bit_idx;
+ u32 lock_enable_bit_idx;
+ int lock_delay;
+};
+
+/**
+ * struct tegra_clk_pll - Tegra PLL clock
+ *
+ * @hw: handle between common and hardware-specifix interfaces
+ * @clk_base: address of CAR controller
+ * @pmc: address of PMC, required to read override bits
+ * @freq_table: array of frequencies supported by PLL
+ * @params: PLL parameters
+ * @flags: PLL flags
+ * @fixed_rate: PLL rate if it is fixed
+ * @lock: register lock
+ * @divn_shift: shift to the feedback divider bit field
+ * @divn_width: width of the feedback divider bit field
+ * @divm_shift: shift to the input divider bit field
+ * @divm_width: width of the input divider bit field
+ * @divp_shift: shift to the post divider bit field
+ * @divp_width: width of the post divider bit field
+ *
+ * Flags:
+ * TEGRA_PLL_USE_LOCK - This flag indicated to use lock bits for
+ * PLL locking. If not set it will use lock_delay value to wait.
+ * TEGRA_PLL_HAS_CPCON - This flag indicates that CPCON value needs
+ * to be programmed to change output frequency of the PLL.
+ * TEGRA_PLL_SET_LFCON - This flag indicates that LFCON value needs
+ * to be programmed to change output frequency of the PLL.
+ * TEGRA_PLL_SET_DCCON - This flag indicates that DCCON value needs
+ * to be programmed to change output frequency of the PLL.
+ * TEGRA_PLLU - PLLU has inverted post divider. This flags indicated
+ * that it is PLLU and invert post divider value.
+ * TEGRA_PLLM - PLLM has additional override settings in PMC. This
+ * flag indicates that it is PLLM and use override settings.
+ * TEGRA_PLL_FIXED - We are not supposed to change output frequency
+ * of some plls.
+ * TEGRA_PLLE_CONFIGURE - Configure PLLE when enabling.
+ */
+struct tegra_clk_pll {
+ struct clk_hw hw;
+ void __iomem *clk_base;
+ void __iomem *pmc;
+ u8 flags;
+ unsigned long fixed_rate;
+ spinlock_t *lock;
+ u8 divn_shift;
+ u8 divn_width;
+ u8 divm_shift;
+ u8 divm_width;
+ u8 divp_shift;
+ u8 divp_width;
+ struct tegra_clk_pll_freq_table *freq_table;
+ struct tegra_clk_pll_params *params;
+};
+
+#define to_clk_pll(_hw) container_of(_hw, struct tegra_clk_pll, hw)
+
+#define TEGRA_PLL_USE_LOCK BIT(0)
+#define TEGRA_PLL_HAS_CPCON BIT(1)
+#define TEGRA_PLL_SET_LFCON BIT(2)
+#define TEGRA_PLL_SET_DCCON BIT(3)
+#define TEGRA_PLLU BIT(4)
+#define TEGRA_PLLM BIT(5)
+#define TEGRA_PLL_FIXED BIT(6)
+#define TEGRA_PLLE_CONFIGURE BIT(7)
+
+extern const struct clk_ops tegra_clk_pll_ops;
+extern const struct clk_ops tegra_clk_plle_ops;
+struct clk *tegra_clk_register_pll(const char *name, const char *parent_name,
+ void __iomem *clk_base, void __iomem *pmc,
+ unsigned long flags, unsigned long fixed_rate,
+ struct tegra_clk_pll_params *pll_params, u8 pll_flags,
+ struct tegra_clk_pll_freq_table *freq_table, spinlock_t *lock);
+struct clk *tegra_clk_register_plle(const char *name, const char *parent_name,
+ void __iomem *clk_base, void __iomem *pmc,
+ unsigned long flags, unsigned long fixed_rate,
+ struct tegra_clk_pll_params *pll_params, u8 pll_flags,
+ struct tegra_clk_pll_freq_table *freq_table, spinlock_t *lock);
+
+/**
+ * struct tegra_clk_pll_out - PLL divider down clock
+ *
+ * @hw: handle between common and hardware-specific interfaces
+ * @reg: register containing the PLL divider
+ * @enb_bit_idx: bit to enable/disable PLL divider
+ * @rst_bit_idx: bit to reset PLL divider
+ * @lock: register lock
+ * @flags: hardware-specific flags
+ */
+struct tegra_clk_pll_out {
+ struct clk_hw hw;
+ void __iomem *reg;
+ u8 enb_bit_idx;
+ u8 rst_bit_idx;
+ spinlock_t *lock;
+ u8 flags;
+};
+
+#define to_clk_pll_out(_hw) container_of(_hw, struct tegra_clk_pll_out, hw)
+
+extern const struct clk_ops tegra_clk_pll_out_ops;
+struct clk *tegra_clk_register_pll_out(const char *name,
+ const char *parent_name, void __iomem *reg, u8 enb_bit_idx,
+ u8 rst_bit_idx, unsigned long flags, u8 pll_div_flags,
+ spinlock_t *lock);
+
+/**
+ * struct tegra_clk_periph_regs - Registers controlling peripheral clock
+ *
+ * @enb_reg: read the enable status
+ * @enb_set_reg: write 1 to enable clock
+ * @enb_clr_reg: write 1 to disable clock
+ * @rst_reg: read the reset status
+ * @rst_set_reg: write 1 to assert the reset of peripheral
+ * @rst_clr_reg: write 1 to deassert the reset of peripheral
+ */
+struct tegra_clk_periph_regs {
+ u32 enb_reg;
+ u32 enb_set_reg;
+ u32 enb_clr_reg;
+ u32 rst_reg;
+ u32 rst_set_reg;
+ u32 rst_clr_reg;
+};
+
+/**
+ * struct tegra_clk_periph_gate - peripheral gate clock
+ *
+ * @magic: magic number to validate type
+ * @hw: handle between common and hardware-specific interfaces
+ * @clk_base: address of CAR controller
+ * @regs: Registers to control the peripheral
+ * @flags: hardware-specific flags
+ * @clk_num: Clock number
+ * @enable_refcnt: array to maintain reference count of the clock
+ *
+ * Flags:
+ * TEGRA_PERIPH_NO_RESET - This flag indicates that reset is not allowed
+ * for this module.
+ * TEGRA_PERIPH_MANUAL_RESET - This flag indicates not to reset module
+ * after clock enable and driver for the module is responsible for
+ * doing reset.
+ * TEGRA_PERIPH_ON_APB - If peripheral is in the APB bus then read the
+ * bus to flush the write operation in apb bus. This flag indicates
+ * that this peripheral is in apb bus.
+ */
+struct tegra_clk_periph_gate {
+ u32 magic;
+ struct clk_hw hw;
+ void __iomem *clk_base;
+ u8 flags;
+ int clk_num;
+ int *enable_refcnt;
+ struct tegra_clk_periph_regs *regs;
+};
+
+#define to_clk_periph_gate(_hw) \
+ container_of(_hw, struct tegra_clk_periph_gate, hw)
+
+#define TEGRA_CLK_PERIPH_GATE_MAGIC 0x17760309
+
+#define TEGRA_PERIPH_NO_RESET BIT(0)
+#define TEGRA_PERIPH_MANUAL_RESET BIT(1)
+#define TEGRA_PERIPH_ON_APB BIT(2)
+
+void tegra_periph_reset(struct tegra_clk_periph_gate *gate, bool assert);
+extern const struct clk_ops tegra_clk_periph_gate_ops;
+struct clk *tegra_clk_register_periph_gate(const char *name,
+ const char *parent_name, u8 gate_flags, void __iomem *clk_base,
+ unsigned long flags, int clk_num,
+ struct tegra_clk_periph_regs *pregs, int *enable_refcnt);
+
+/**
+ * struct clk-periph - peripheral clock
+ *
+ * @magic: magic number to validate type
+ * @hw: handle between common and hardware-specific interfaces
+ * @mux: mux clock
+ * @divider: divider clock
+ * @gate: gate clock
+ * @mux_ops: mux clock ops
+ * @div_ops: divider clock ops
+ * @gate_ops: gate clock ops
+ */
+struct tegra_clk_periph {
+ u32 magic;
+ struct clk_hw hw;
+ struct clk_mux mux;
+ struct tegra_clk_frac_div divider;
+ struct tegra_clk_periph_gate gate;
+
+ const struct clk_ops *mux_ops;
+ const struct clk_ops *div_ops;
+ const struct clk_ops *gate_ops;
+};
+
+#define to_clk_periph(_hw) container_of(_hw, struct tegra_clk_periph, hw)
+
+#define TEGRA_CLK_PERIPH_MAGIC 0x18221223
+
+extern const struct clk_ops tegra_clk_periph_ops;
+struct clk *tegra_clk_register_periph(const char *name,
+ const char **parent_names, int num_parents,
+ struct tegra_clk_periph *periph, void __iomem *clk_base,
+ u32 offset);
+struct clk *tegra_clk_register_periph_nodiv(const char *name,
+ const char **parent_names, int num_parents,
+ struct tegra_clk_periph *periph, void __iomem *clk_base,
+ u32 offset);
+
+#define TEGRA_CLK_PERIPH(_mux_shift, _mux_width, _mux_flags, \
+ _div_shift, _div_width, _div_frac_width, \
+ _div_flags, _clk_num, _enb_refcnt, _regs, \
+ _gate_flags) \
+ { \
+ .mux = { \
+ .flags = _mux_flags, \
+ .shift = _mux_shift, \
+ .width = _mux_width, \
+ }, \
+ .divider = { \
+ .flags = _div_flags, \
+ .shift = _div_shift, \
+ .width = _div_width, \
+ .frac_width = _div_frac_width, \
+ }, \
+ .gate = { \
+ .flags = _gate_flags, \
+ .clk_num = _clk_num, \
+ .enable_refcnt = _enb_refcnt, \
+ .regs = _regs, \
+ }, \
+ .mux_ops = &clk_mux_ops, \
+ .div_ops = &tegra_clk_frac_div_ops, \
+ .gate_ops = &tegra_clk_periph_gate_ops, \
+ }
+
+struct tegra_periph_init_data {
+ const char *name;
+ int clk_id;
+ const char **parent_names;
+ int num_parents;
+ struct tegra_clk_periph periph;
+ u32 offset;
+ const char *con_id;
+ const char *dev_id;
+};
+
+#define TEGRA_INIT_DATA(_name, _con_id, _dev_id, _parent_names, _offset, \
+ _mux_shift, _mux_width, _mux_flags, _div_shift, \
+ _div_width, _div_frac_width, _div_flags, _regs, \
+ _clk_num, _enb_refcnt, _gate_flags, _clk_id) \
+ { \
+ .name = _name, \
+ .clk_id = _clk_id, \
+ .parent_names = _parent_names, \
+ .num_parents = ARRAY_SIZE(_parent_names), \
+ .periph = TEGRA_CLK_PERIPH(_mux_shift, _mux_width, \
+ _mux_flags, _div_shift, \
+ _div_width, _div_frac_width, \
+ _div_flags, _clk_num, \
+ _enb_refcnt, _regs, \
+ _gate_flags), \
+ .offset = _offset, \
+ .con_id = _con_id, \
+ .dev_id = _dev_id, \
+ }
+
+/**
+ * struct clk_super_mux - super clock
+ *
+ * @hw: handle between common and hardware-specific interfaces
+ * @reg: register controlling multiplexer
+ * @width: width of the multiplexer bit field
+ * @flags: hardware-specific flags
+ * @div2_index: bit controlling divide-by-2
+ * @pllx_index: PLLX index in the parent list
+ * @lock: register lock
+ *
+ * Flags:
+ * TEGRA_DIVIDER_2 - LP cluster has additional divider. This flag indicates
+ * that this is LP cluster clock.
+ */
+struct tegra_clk_super_mux {
+ struct clk_hw hw;
+ void __iomem *reg;
+ u8 width;
+ u8 flags;
+ u8 div2_index;
+ u8 pllx_index;
+ spinlock_t *lock;
+};
+
+#define to_clk_super_mux(_hw) container_of(_hw, struct tegra_clk_super_mux, hw)
+
+#define TEGRA_DIVIDER_2 BIT(0)
+
+extern const struct clk_ops tegra_clk_super_ops;
+struct clk *tegra_clk_register_super_mux(const char *name,
+ const char **parent_names, u8 num_parents,
+ unsigned long flags, void __iomem *reg, u8 clk_super_flags,
+ u8 width, u8 pllx_index, u8 div2_index, spinlock_t *lock);
+
+/**
+ * struct clk_init_tabel - clock initialization table
+ * @clk_id: clock id as mentioned in device tree bindings
+ * @parent_id: parent clock id as mentioned in device tree bindings
+ * @rate: rate to set
+ * @state: enable/disable
+ */
+struct tegra_clk_init_table {
+ unsigned int clk_id;
+ unsigned int parent_id;
+ unsigned long rate;
+ int state;
+};
+
+/**
+ * struct clk_duplicate - duplicate clocks
+ * @clk_id: clock id as mentioned in device tree bindings
+ * @lookup: duplicate lookup entry for the clock
+ */
+struct tegra_clk_duplicate {
+ int clk_id;
+ struct clk_lookup lookup;
+};
+
+#define TEGRA_CLK_DUPLICATE(_clk_id, _dev, _con) \
+ { \
+ .clk_id = _clk_id, \
+ .lookup = { \
+ .dev_id = _dev, \
+ .con_id = _con, \
+ }, \
+ }
+
+void tegra_init_from_table(struct tegra_clk_init_table *tbl,
+ struct clk *clks[], int clk_max);
+
+void tegra_init_dup_clks(struct tegra_clk_duplicate *dup_list,
+ struct clk *clks[], int clk_max);
+
+#ifdef CONFIG_ARCH_TEGRA_2x_SOC
+void tegra20_clock_init(struct device_node *np);
+#else
+static inline void tegra20_clock_init(struct device_node *np) {}
+#endif /* CONFIG_ARCH_TEGRA_2x_SOC */
+
+#ifdef CONFIG_ARCH_TEGRA_3x_SOC
+void tegra30_clock_init(struct device_node *np);
+#else
+static inline void tegra30_clock_init(struct device_node *np) {}
+#endif /* CONFIG_ARCH_TEGRA_3x_SOC */
+
+#endif /* TEGRA_CLK_H */
diff --git a/drivers/clk/versatile/clk-vexpress.c b/drivers/clk/versatile/clk-vexpress.c
index f889f2f07b37..82b45aad8ccf 100644
--- a/drivers/clk/versatile/clk-vexpress.c
+++ b/drivers/clk/versatile/clk-vexpress.c
@@ -11,6 +11,7 @@
* Copyright (C) 2012 ARM Limited
*/
+#include <linux/amba/sp810.h>
#include <linux/clkdev.h>
#include <linux/clk-provider.h>
#include <linux/err.h>
@@ -18,8 +19,6 @@
#include <linux/of_address.h>
#include <linux/vexpress.h>
-#include <asm/hardware/sp810.h>
-
static struct clk *vexpress_sp810_timerclken[4];
static DEFINE_SPINLOCK(vexpress_sp810_lock);
diff --git a/drivers/clocksource/Kconfig b/drivers/clocksource/Kconfig
index 7fdcbd3f4da5..e920cbe519fa 100644
--- a/drivers/clocksource/Kconfig
+++ b/drivers/clocksource/Kconfig
@@ -1,3 +1,6 @@
+config CLKSRC_OF
+ bool
+
config CLKSRC_I8253
bool
@@ -25,6 +28,9 @@ config ARMADA_370_XP_TIMER
config SUNXI_TIMER
bool
+config VT8500_TIMER
+ bool
+
config CLKSRC_NOMADIK_MTU
bool
depends on (ARCH_NOMADIK || ARCH_U8500)
@@ -54,7 +60,5 @@ config CLKSRC_DBX500_PRCMU_SCHED_CLOCK
help
Use the always on PRCMU Timer as sched_clock
-config CLKSRC_ARM_GENERIC
- def_bool y if ARM64
- help
- This option enables support for the ARM generic timer.
+config ARM_ARCH_TIMER
+ bool
diff --git a/drivers/clocksource/Makefile b/drivers/clocksource/Makefile
index f93453d01673..7d671b85a98e 100644
--- a/drivers/clocksource/Makefile
+++ b/drivers/clocksource/Makefile
@@ -1,3 +1,4 @@
+obj-$(CONFIG_CLKSRC_OF) += clksrc-of.o
obj-$(CONFIG_ATMEL_TCB_CLKSRC) += tcb_clksrc.o
obj-$(CONFIG_X86_CYCLONE_TIMER) += cyclone.o
obj-$(CONFIG_X86_PM_TIMER) += acpi_pm.o
@@ -16,5 +17,7 @@ obj-$(CONFIG_CLKSRC_DBX500_PRCMU) += clksrc-dbx500-prcmu.o
obj-$(CONFIG_ARMADA_370_XP_TIMER) += time-armada-370-xp.o
obj-$(CONFIG_ARCH_BCM2835) += bcm2835_timer.o
obj-$(CONFIG_SUNXI_TIMER) += sunxi_timer.o
+obj-$(CONFIG_ARCH_TEGRA) += tegra20_timer.o
+obj-$(CONFIG_VT8500_TIMER) += vt8500_timer.o
-obj-$(CONFIG_CLKSRC_ARM_GENERIC) += arm_generic.o
+obj-$(CONFIG_ARM_ARCH_TIMER) += arm_arch_timer.o
diff --git a/drivers/clocksource/arm_arch_timer.c b/drivers/clocksource/arm_arch_timer.c
new file mode 100644
index 000000000000..d7ad425ab9b3
--- /dev/null
+++ b/drivers/clocksource/arm_arch_timer.c
@@ -0,0 +1,391 @@
+/*
+ * linux/drivers/clocksource/arm_arch_timer.c
+ *
+ * Copyright (C) 2011 ARM Ltd.
+ * All Rights Reserved
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/device.h>
+#include <linux/smp.h>
+#include <linux/cpu.h>
+#include <linux/clockchips.h>
+#include <linux/interrupt.h>
+#include <linux/of_irq.h>
+#include <linux/io.h>
+
+#include <asm/arch_timer.h>
+#include <asm/virt.h>
+
+#include <clocksource/arm_arch_timer.h>
+
+static u32 arch_timer_rate;
+
+enum ppi_nr {
+ PHYS_SECURE_PPI,
+ PHYS_NONSECURE_PPI,
+ VIRT_PPI,
+ HYP_PPI,
+ MAX_TIMER_PPI
+};
+
+static int arch_timer_ppi[MAX_TIMER_PPI];
+
+static struct clock_event_device __percpu *arch_timer_evt;
+
+static bool arch_timer_use_virtual = true;
+
+/*
+ * Architected system timer support.
+ */
+
+static inline irqreturn_t timer_handler(const int access,
+ struct clock_event_device *evt)
+{
+ unsigned long ctrl;
+ ctrl = arch_timer_reg_read(access, ARCH_TIMER_REG_CTRL);
+ if (ctrl & ARCH_TIMER_CTRL_IT_STAT) {
+ ctrl |= ARCH_TIMER_CTRL_IT_MASK;
+ arch_timer_reg_write(access, ARCH_TIMER_REG_CTRL, ctrl);
+ evt->event_handler(evt);
+ return IRQ_HANDLED;
+ }
+
+ return IRQ_NONE;
+}
+
+static irqreturn_t arch_timer_handler_virt(int irq, void *dev_id)
+{
+ struct clock_event_device *evt = dev_id;
+
+ return timer_handler(ARCH_TIMER_VIRT_ACCESS, evt);
+}
+
+static irqreturn_t arch_timer_handler_phys(int irq, void *dev_id)
+{
+ struct clock_event_device *evt = dev_id;
+
+ return timer_handler(ARCH_TIMER_PHYS_ACCESS, evt);
+}
+
+static inline void timer_set_mode(const int access, int mode)
+{
+ unsigned long ctrl;
+ switch (mode) {
+ case CLOCK_EVT_MODE_UNUSED:
+ case CLOCK_EVT_MODE_SHUTDOWN:
+ ctrl = arch_timer_reg_read(access, ARCH_TIMER_REG_CTRL);
+ ctrl &= ~ARCH_TIMER_CTRL_ENABLE;
+ arch_timer_reg_write(access, ARCH_TIMER_REG_CTRL, ctrl);
+ break;
+ default:
+ break;
+ }
+}
+
+static void arch_timer_set_mode_virt(enum clock_event_mode mode,
+ struct clock_event_device *clk)
+{
+ timer_set_mode(ARCH_TIMER_VIRT_ACCESS, mode);
+}
+
+static void arch_timer_set_mode_phys(enum clock_event_mode mode,
+ struct clock_event_device *clk)
+{
+ timer_set_mode(ARCH_TIMER_PHYS_ACCESS, mode);
+}
+
+static inline void set_next_event(const int access, unsigned long evt)
+{
+ unsigned long ctrl;
+ ctrl = arch_timer_reg_read(access, ARCH_TIMER_REG_CTRL);
+ ctrl |= ARCH_TIMER_CTRL_ENABLE;
+ ctrl &= ~ARCH_TIMER_CTRL_IT_MASK;
+ arch_timer_reg_write(access, ARCH_TIMER_REG_TVAL, evt);
+ arch_timer_reg_write(access, ARCH_TIMER_REG_CTRL, ctrl);
+}
+
+static int arch_timer_set_next_event_virt(unsigned long evt,
+ struct clock_event_device *unused)
+{
+ set_next_event(ARCH_TIMER_VIRT_ACCESS, evt);
+ return 0;
+}
+
+static int arch_timer_set_next_event_phys(unsigned long evt,
+ struct clock_event_device *unused)
+{
+ set_next_event(ARCH_TIMER_PHYS_ACCESS, evt);
+ return 0;
+}
+
+static int __cpuinit arch_timer_setup(struct clock_event_device *clk)
+{
+ clk->features = CLOCK_EVT_FEAT_ONESHOT | CLOCK_EVT_FEAT_C3STOP;
+ clk->name = "arch_sys_timer";
+ clk->rating = 450;
+ if (arch_timer_use_virtual) {
+ clk->irq = arch_timer_ppi[VIRT_PPI];
+ clk->set_mode = arch_timer_set_mode_virt;
+ clk->set_next_event = arch_timer_set_next_event_virt;
+ } else {
+ clk->irq = arch_timer_ppi[PHYS_SECURE_PPI];
+ clk->set_mode = arch_timer_set_mode_phys;
+ clk->set_next_event = arch_timer_set_next_event_phys;
+ }
+
+ clk->cpumask = cpumask_of(smp_processor_id());
+
+ clk->set_mode(CLOCK_EVT_MODE_SHUTDOWN, NULL);
+
+ clockevents_config_and_register(clk, arch_timer_rate,
+ 0xf, 0x7fffffff);
+
+ if (arch_timer_use_virtual)
+ enable_percpu_irq(arch_timer_ppi[VIRT_PPI], 0);
+ else {
+ enable_percpu_irq(arch_timer_ppi[PHYS_SECURE_PPI], 0);
+ if (arch_timer_ppi[PHYS_NONSECURE_PPI])
+ enable_percpu_irq(arch_timer_ppi[PHYS_NONSECURE_PPI], 0);
+ }
+
+ arch_counter_set_user_access();
+
+ return 0;
+}
+
+static int arch_timer_available(void)
+{
+ u32 freq;
+
+ if (arch_timer_rate == 0) {
+ freq = arch_timer_get_cntfrq();
+
+ /* Check the timer frequency. */
+ if (freq == 0) {
+ pr_warn("Architected timer frequency not available\n");
+ return -EINVAL;
+ }
+
+ arch_timer_rate = freq;
+ }
+
+ pr_info_once("Architected local timer running at %lu.%02luMHz (%s).\n",
+ (unsigned long)arch_timer_rate / 1000000,
+ (unsigned long)(arch_timer_rate / 10000) % 100,
+ arch_timer_use_virtual ? "virt" : "phys");
+ return 0;
+}
+
+u32 arch_timer_get_rate(void)
+{
+ return arch_timer_rate;
+}
+
+/*
+ * Some external users of arch_timer_read_counter (e.g. sched_clock) may try to
+ * call it before it has been initialised. Rather than incur a performance
+ * penalty checking for initialisation, provide a default implementation that
+ * won't lead to time appearing to jump backwards.
+ */
+static u64 arch_timer_read_zero(void)
+{
+ return 0;
+}
+
+u64 (*arch_timer_read_counter)(void) = arch_timer_read_zero;
+
+static cycle_t arch_counter_read(struct clocksource *cs)
+{
+ return arch_timer_read_counter();
+}
+
+static cycle_t arch_counter_read_cc(const struct cyclecounter *cc)
+{
+ return arch_timer_read_counter();
+}
+
+static struct clocksource clocksource_counter = {
+ .name = "arch_sys_counter",
+ .rating = 400,
+ .read = arch_counter_read,
+ .mask = CLOCKSOURCE_MASK(56),
+ .flags = CLOCK_SOURCE_IS_CONTINUOUS,
+};
+
+static struct cyclecounter cyclecounter = {
+ .read = arch_counter_read_cc,
+ .mask = CLOCKSOURCE_MASK(56),
+};
+
+static struct timecounter timecounter;
+
+struct timecounter *arch_timer_get_timecounter(void)
+{
+ return &timecounter;
+}
+
+static void __cpuinit arch_timer_stop(struct clock_event_device *clk)
+{
+ pr_debug("arch_timer_teardown disable IRQ%d cpu #%d\n",
+ clk->irq, smp_processor_id());
+
+ if (arch_timer_use_virtual)
+ disable_percpu_irq(arch_timer_ppi[VIRT_PPI]);
+ else {
+ disable_percpu_irq(arch_timer_ppi[PHYS_SECURE_PPI]);
+ if (arch_timer_ppi[PHYS_NONSECURE_PPI])
+ disable_percpu_irq(arch_timer_ppi[PHYS_NONSECURE_PPI]);
+ }
+
+ clk->set_mode(CLOCK_EVT_MODE_UNUSED, clk);
+}
+
+static int __cpuinit arch_timer_cpu_notify(struct notifier_block *self,
+ unsigned long action, void *hcpu)
+{
+ struct clock_event_device *evt = this_cpu_ptr(arch_timer_evt);
+
+ switch (action & ~CPU_TASKS_FROZEN) {
+ case CPU_STARTING:
+ arch_timer_setup(evt);
+ break;
+ case CPU_DYING:
+ arch_timer_stop(evt);
+ break;
+ }
+
+ return NOTIFY_OK;
+}
+
+static struct notifier_block arch_timer_cpu_nb __cpuinitdata = {
+ .notifier_call = arch_timer_cpu_notify,
+};
+
+static int __init arch_timer_register(void)
+{
+ int err;
+ int ppi;
+
+ err = arch_timer_available();
+ if (err)
+ goto out;
+
+ arch_timer_evt = alloc_percpu(struct clock_event_device);
+ if (!arch_timer_evt) {
+ err = -ENOMEM;
+ goto out;
+ }
+
+ clocksource_register_hz(&clocksource_counter, arch_timer_rate);
+ cyclecounter.mult = clocksource_counter.mult;
+ cyclecounter.shift = clocksource_counter.shift;
+ timecounter_init(&timecounter, &cyclecounter,
+ arch_counter_get_cntpct());
+
+ if (arch_timer_use_virtual) {
+ ppi = arch_timer_ppi[VIRT_PPI];
+ err = request_percpu_irq(ppi, arch_timer_handler_virt,
+ "arch_timer", arch_timer_evt);
+ } else {
+ ppi = arch_timer_ppi[PHYS_SECURE_PPI];
+ err = request_percpu_irq(ppi, arch_timer_handler_phys,
+ "arch_timer", arch_timer_evt);
+ if (!err && arch_timer_ppi[PHYS_NONSECURE_PPI]) {
+ ppi = arch_timer_ppi[PHYS_NONSECURE_PPI];
+ err = request_percpu_irq(ppi, arch_timer_handler_phys,
+ "arch_timer", arch_timer_evt);
+ if (err)
+ free_percpu_irq(arch_timer_ppi[PHYS_SECURE_PPI],
+ arch_timer_evt);
+ }
+ }
+
+ if (err) {
+ pr_err("arch_timer: can't register interrupt %d (%d)\n",
+ ppi, err);
+ goto out_free;
+ }
+
+ err = register_cpu_notifier(&arch_timer_cpu_nb);
+ if (err)
+ goto out_free_irq;
+
+ /* Immediately configure the timer on the boot CPU */
+ arch_timer_setup(this_cpu_ptr(arch_timer_evt));
+
+ return 0;
+
+out_free_irq:
+ if (arch_timer_use_virtual)
+ free_percpu_irq(arch_timer_ppi[VIRT_PPI], arch_timer_evt);
+ else {
+ free_percpu_irq(arch_timer_ppi[PHYS_SECURE_PPI],
+ arch_timer_evt);
+ if (arch_timer_ppi[PHYS_NONSECURE_PPI])
+ free_percpu_irq(arch_timer_ppi[PHYS_NONSECURE_PPI],
+ arch_timer_evt);
+ }
+
+out_free:
+ free_percpu(arch_timer_evt);
+out:
+ return err;
+}
+
+static const struct of_device_id arch_timer_of_match[] __initconst = {
+ { .compatible = "arm,armv7-timer", },
+ { .compatible = "arm,armv8-timer", },
+ {},
+};
+
+int __init arch_timer_init(void)
+{
+ struct device_node *np;
+ u32 freq;
+ int i;
+
+ np = of_find_matching_node(NULL, arch_timer_of_match);
+ if (!np) {
+ pr_err("arch_timer: can't find DT node\n");
+ return -ENODEV;
+ }
+
+ /* Try to determine the frequency from the device tree or CNTFRQ */
+ if (!of_property_read_u32(np, "clock-frequency", &freq))
+ arch_timer_rate = freq;
+
+ for (i = PHYS_SECURE_PPI; i < MAX_TIMER_PPI; i++)
+ arch_timer_ppi[i] = irq_of_parse_and_map(np, i);
+
+ of_node_put(np);
+
+ /*
+ * If HYP mode is available, we know that the physical timer
+ * has been configured to be accessible from PL1. Use it, so
+ * that a guest can use the virtual timer instead.
+ *
+ * If no interrupt provided for virtual timer, we'll have to
+ * stick to the physical timer. It'd better be accessible...
+ */
+ if (is_hyp_mode_available() || !arch_timer_ppi[VIRT_PPI]) {
+ arch_timer_use_virtual = false;
+
+ if (!arch_timer_ppi[PHYS_SECURE_PPI] ||
+ !arch_timer_ppi[PHYS_NONSECURE_PPI]) {
+ pr_warn("arch_timer: No interrupt available, giving up\n");
+ return -EINVAL;
+ }
+ }
+
+ if (arch_timer_use_virtual)
+ arch_timer_read_counter = arch_counter_get_cntvct;
+ else
+ arch_timer_read_counter = arch_counter_get_cntpct;
+
+ return arch_timer_register();
+}
diff --git a/drivers/clocksource/arm_generic.c b/drivers/clocksource/arm_generic.c
deleted file mode 100644
index 8ae1a61523ff..000000000000
--- a/drivers/clocksource/arm_generic.c
+++ /dev/null
@@ -1,232 +0,0 @@
-/*
- * Generic timers support
- *
- * Copyright (C) 2012 ARM Ltd.
- * Author: Marc Zyngier <marc.zyngier@arm.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program. If not, see <http://www.gnu.org/licenses/>.
- */
-
-#include <linux/init.h>
-#include <linux/kernel.h>
-#include <linux/delay.h>
-#include <linux/device.h>
-#include <linux/smp.h>
-#include <linux/cpu.h>
-#include <linux/jiffies.h>
-#include <linux/interrupt.h>
-#include <linux/clockchips.h>
-#include <linux/of_irq.h>
-#include <linux/io.h>
-
-#include <clocksource/arm_generic.h>
-
-#include <asm/arm_generic.h>
-
-static u32 arch_timer_rate;
-static u64 sched_clock_mult __read_mostly;
-static DEFINE_PER_CPU(struct clock_event_device, arch_timer_evt);
-static int arch_timer_ppi;
-
-static irqreturn_t arch_timer_handle_irq(int irq, void *dev_id)
-{
- struct clock_event_device *evt = dev_id;
- unsigned long ctrl;
-
- ctrl = arch_timer_reg_read(ARCH_TIMER_REG_CTRL);
- if (ctrl & ARCH_TIMER_CTRL_ISTATUS) {
- ctrl |= ARCH_TIMER_CTRL_IMASK;
- arch_timer_reg_write(ARCH_TIMER_REG_CTRL, ctrl);
- evt->event_handler(evt);
- return IRQ_HANDLED;
- }
-
- return IRQ_NONE;
-}
-
-static void arch_timer_stop(void)
-{
- unsigned long ctrl;
-
- ctrl = arch_timer_reg_read(ARCH_TIMER_REG_CTRL);
- ctrl &= ~ARCH_TIMER_CTRL_ENABLE;
- arch_timer_reg_write(ARCH_TIMER_REG_CTRL, ctrl);
-}
-
-static void arch_timer_set_mode(enum clock_event_mode mode,
- struct clock_event_device *clk)
-{
- switch (mode) {
- case CLOCK_EVT_MODE_UNUSED:
- case CLOCK_EVT_MODE_SHUTDOWN:
- arch_timer_stop();
- break;
- default:
- break;
- }
-}
-
-static int arch_timer_set_next_event(unsigned long evt,
- struct clock_event_device *unused)
-{
- unsigned long ctrl;
-
- ctrl = arch_timer_reg_read(ARCH_TIMER_REG_CTRL);
- ctrl |= ARCH_TIMER_CTRL_ENABLE;
- ctrl &= ~ARCH_TIMER_CTRL_IMASK;
-
- arch_timer_reg_write(ARCH_TIMER_REG_TVAL, evt);
- arch_timer_reg_write(ARCH_TIMER_REG_CTRL, ctrl);
-
- return 0;
-}
-
-static void __cpuinit arch_timer_setup(struct clock_event_device *clk)
-{
- /* Let's make sure the timer is off before doing anything else */
- arch_timer_stop();
-
- clk->features = CLOCK_EVT_FEAT_ONESHOT | CLOCK_EVT_FEAT_C3STOP;
- clk->name = "arch_sys_timer";
- clk->rating = 400;
- clk->set_mode = arch_timer_set_mode;
- clk->set_next_event = arch_timer_set_next_event;
- clk->irq = arch_timer_ppi;
- clk->cpumask = cpumask_of(smp_processor_id());
-
- clockevents_config_and_register(clk, arch_timer_rate,
- 0xf, 0x7fffffff);
-
- enable_percpu_irq(clk->irq, 0);
-
- /* Ensure the virtual counter is visible to userspace for the vDSO. */
- arch_counter_enable_user_access();
-}
-
-static void __init arch_timer_calibrate(void)
-{
- if (arch_timer_rate == 0) {
- arch_timer_reg_write(ARCH_TIMER_REG_CTRL, 0);
- arch_timer_rate = arch_timer_reg_read(ARCH_TIMER_REG_FREQ);
-
- /* Check the timer frequency. */
- if (arch_timer_rate == 0)
- panic("Architected timer frequency is set to zero.\n"
- "You must set this in your .dts file\n");
- }
-
- /* Cache the sched_clock multiplier to save a divide in the hot path. */
-
- sched_clock_mult = DIV_ROUND_CLOSEST(NSEC_PER_SEC, arch_timer_rate);
-
- pr_info("Architected local timer running at %u.%02uMHz.\n",
- arch_timer_rate / 1000000, (arch_timer_rate / 10000) % 100);
-}
-
-static cycle_t arch_counter_read(struct clocksource *cs)
-{
- return arch_counter_get_cntpct();
-}
-
-static struct clocksource clocksource_counter = {
- .name = "arch_sys_counter",
- .rating = 400,
- .read = arch_counter_read,
- .mask = CLOCKSOURCE_MASK(56),
- .flags = (CLOCK_SOURCE_IS_CONTINUOUS | CLOCK_SOURCE_VALID_FOR_HRES),
-};
-
-int read_current_timer(unsigned long *timer_value)
-{
- *timer_value = arch_counter_get_cntpct();
- return 0;
-}
-
-unsigned long long notrace sched_clock(void)
-{
- return arch_counter_get_cntvct() * sched_clock_mult;
-}
-
-static int __cpuinit arch_timer_cpu_notify(struct notifier_block *self,
- unsigned long action, void *hcpu)
-{
- int cpu = (long)hcpu;
- struct clock_event_device *clk = per_cpu_ptr(&arch_timer_evt, cpu);
-
- switch(action) {
- case CPU_STARTING:
- case CPU_STARTING_FROZEN:
- arch_timer_setup(clk);
- break;
-
- case CPU_DYING:
- case CPU_DYING_FROZEN:
- pr_debug("arch_timer_teardown disable IRQ%d cpu #%d\n",
- clk->irq, cpu);
- disable_percpu_irq(clk->irq);
- arch_timer_set_mode(CLOCK_EVT_MODE_UNUSED, clk);
- break;
- }
-
- return NOTIFY_OK;
-}
-
-static struct notifier_block __cpuinitdata arch_timer_cpu_nb = {
- .notifier_call = arch_timer_cpu_notify,
-};
-
-static const struct of_device_id arch_timer_of_match[] __initconst = {
- { .compatible = "arm,armv8-timer" },
- {},
-};
-
-int __init arm_generic_timer_init(void)
-{
- struct device_node *np;
- int err;
- u32 freq;
-
- np = of_find_matching_node(NULL, arch_timer_of_match);
- if (!np) {
- pr_err("arch_timer: can't find DT node\n");
- return -ENODEV;
- }
-
- /* Try to determine the frequency from the device tree or CNTFRQ */
- if (!of_property_read_u32(np, "clock-frequency", &freq))
- arch_timer_rate = freq;
- arch_timer_calibrate();
-
- arch_timer_ppi = irq_of_parse_and_map(np, 0);
- pr_info("arch_timer: found %s irq %d\n", np->name, arch_timer_ppi);
-
- err = request_percpu_irq(arch_timer_ppi, arch_timer_handle_irq,
- np->name, &arch_timer_evt);
- if (err) {
- pr_err("arch_timer: can't register interrupt %d (%d)\n",
- arch_timer_ppi, err);
- return err;
- }
-
- clocksource_register_hz(&clocksource_counter, arch_timer_rate);
-
- /* Calibrate the delay loop directly */
- lpj_fine = DIV_ROUND_CLOSEST(arch_timer_rate, HZ);
-
- /* Immediately configure the timer on the boot CPU */
- arch_timer_setup(this_cpu_ptr(&arch_timer_evt));
-
- register_cpu_notifier(&arch_timer_cpu_nb);
-
- return 0;
-}
diff --git a/drivers/clocksource/bcm2835_timer.c b/drivers/clocksource/bcm2835_timer.c
index bc19f12c20ce..50c68fef944b 100644
--- a/drivers/clocksource/bcm2835_timer.c
+++ b/drivers/clocksource/bcm2835_timer.c
@@ -16,7 +16,6 @@
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
-#include <linux/bcm2835_timer.h>
#include <linux/bitops.h>
#include <linux/clockchips.h>
#include <linux/clocksource.h>
@@ -101,7 +100,7 @@ static struct of_device_id bcm2835_time_match[] __initconst = {
{}
};
-static void __init bcm2835_time_init(void)
+static void __init bcm2835_timer_init(void)
{
struct device_node *node;
void __iomem *base;
@@ -155,7 +154,5 @@ static void __init bcm2835_time_init(void)
pr_info("bcm2835: system timer (irq = %d)\n", irq);
}
-
-struct sys_timer bcm2835_timer = {
- .init = bcm2835_time_init,
-};
+CLOCKSOURCE_OF_DECLARE(bcm2835, "brcm,bcm2835-system-timer",
+ bcm2835_timer_init);
diff --git a/drivers/clocksource/clksrc-of.c b/drivers/clocksource/clksrc-of.c
new file mode 100644
index 000000000000..bdabdaa8d00f
--- /dev/null
+++ b/drivers/clocksource/clksrc-of.c
@@ -0,0 +1,35 @@
+/*
+ * Copyright (c) 2012, NVIDIA CORPORATION. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include <linux/init.h>
+#include <linux/of.h>
+
+extern struct of_device_id __clksrc_of_table[];
+
+static const struct of_device_id __clksrc_of_table_sentinel
+ __used __section(__clksrc_of_table_end);
+
+void __init clocksource_of_init(void)
+{
+ struct device_node *np;
+ const struct of_device_id *match;
+ void (*init_func)(void);
+
+ for_each_matching_node_and_match(np, __clksrc_of_table, &match) {
+ init_func = match->data;
+ init_func();
+ }
+}
diff --git a/drivers/clocksource/cs5535-clockevt.c b/drivers/clocksource/cs5535-clockevt.c
index d9279385304d..ea210482dd20 100644
--- a/drivers/clocksource/cs5535-clockevt.c
+++ b/drivers/clocksource/cs5535-clockevt.c
@@ -100,7 +100,6 @@ static struct clock_event_device cs5535_clockevent = {
.set_mode = mfgpt_set_mode,
.set_next_event = mfgpt_next_event,
.rating = 250,
- .shift = 32
};
static irqreturn_t mfgpt_tick(int irq, void *dev_id)
@@ -169,17 +168,11 @@ static int __init cs5535_mfgpt_init(void)
cs5535_mfgpt_write(cs5535_event_clock, MFGPT_REG_SETUP, val);
/* Set up the clock event */
- cs5535_clockevent.mult = div_sc(MFGPT_HZ, NSEC_PER_SEC,
- cs5535_clockevent.shift);
- cs5535_clockevent.min_delta_ns = clockevent_delta2ns(0xF,
- &cs5535_clockevent);
- cs5535_clockevent.max_delta_ns = clockevent_delta2ns(0xFFFE,
- &cs5535_clockevent);
-
printk(KERN_INFO DRV_NAME
": Registering MFGPT timer as a clock event, using IRQ %d\n",
timer_irq);
- clockevents_register_device(&cs5535_clockevent);
+ clockevents_config_and_register(&cs5535_clockevent, MFGPT_HZ,
+ 0xF, 0xFFFE);
return 0;
diff --git a/drivers/clocksource/dw_apb_timer_of.c b/drivers/clocksource/dw_apb_timer_of.c
index f7dba5b79b44..ab09ed3742ee 100644
--- a/drivers/clocksource/dw_apb_timer_of.c
+++ b/drivers/clocksource/dw_apb_timer_of.c
@@ -107,7 +107,7 @@ static const struct of_device_id osctimer_ids[] __initconst = {
{},
};
-static void __init timer_init(void)
+void __init dw_apb_timer_init(void)
{
struct device_node *event_timer, *source_timer;
@@ -125,7 +125,3 @@ static void __init timer_init(void)
init_sched_clock();
}
-
-struct sys_timer dw_apb_timer = {
- .init = timer_init,
-};
diff --git a/drivers/clocksource/nomadik-mtu.c b/drivers/clocksource/nomadik-mtu.c
index 8914c3c1c88b..071f6eadfea2 100644
--- a/drivers/clocksource/nomadik-mtu.c
+++ b/drivers/clocksource/nomadik-mtu.c
@@ -15,6 +15,7 @@
#include <linux/clocksource.h>
#include <linux/clk.h>
#include <linux/jiffies.h>
+#include <linux/delay.h>
#include <linux/err.h>
#include <linux/platform_data/clocksource-nomadik-mtu.h>
#include <asm/mach/time.h>
@@ -64,6 +65,7 @@ static void __iomem *mtu_base;
static bool clkevt_periodic;
static u32 clk_prescale;
static u32 nmdk_cycle; /* write-once */
+static struct delay_timer mtu_delay_timer;
#ifdef CONFIG_NOMADIK_MTU_SCHED_CLOCK
/*
@@ -80,6 +82,11 @@ static u32 notrace nomadik_read_sched_clock(void)
}
#endif
+static unsigned long nmdk_timer_read_current_timer(void)
+{
+ return ~readl_relaxed(mtu_base + MTU_VAL(0));
+}
+
/* Clockevent device: use one-shot mode */
static int nmdk_clkevt_next(unsigned long evt, struct clock_event_device *ev)
{
@@ -134,12 +141,32 @@ static void nmdk_clkevt_mode(enum clock_event_mode mode,
}
}
+void nmdk_clksrc_reset(void)
+{
+ /* Disable */
+ writel(0, mtu_base + MTU_CR(0));
+
+ /* ClockSource: configure load and background-load, and fire it up */
+ writel(nmdk_cycle, mtu_base + MTU_LR(0));
+ writel(nmdk_cycle, mtu_base + MTU_BGLR(0));
+
+ writel(clk_prescale | MTU_CRn_32BITS | MTU_CRn_ENA,
+ mtu_base + MTU_CR(0));
+}
+
+static void nmdk_clkevt_resume(struct clock_event_device *cedev)
+{
+ nmdk_clkevt_reset();
+ nmdk_clksrc_reset();
+}
+
static struct clock_event_device nmdk_clkevt = {
.name = "mtu_1",
.features = CLOCK_EVT_FEAT_ONESHOT | CLOCK_EVT_FEAT_PERIODIC,
.rating = 200,
.set_mode = nmdk_clkevt_mode,
.set_next_event = nmdk_clkevt_next,
+ .resume = nmdk_clkevt_resume,
};
/*
@@ -161,19 +188,6 @@ static struct irqaction nmdk_timer_irq = {
.dev_id = &nmdk_clkevt,
};
-void nmdk_clksrc_reset(void)
-{
- /* Disable */
- writel(0, mtu_base + MTU_CR(0));
-
- /* ClockSource: configure load and background-load, and fire it up */
- writel(nmdk_cycle, mtu_base + MTU_LR(0));
- writel(nmdk_cycle, mtu_base + MTU_BGLR(0));
-
- writel(clk_prescale | MTU_CRn_32BITS | MTU_CRn_ENA,
- mtu_base + MTU_CR(0));
-}
-
void __init nmdk_timer_init(void __iomem *base, int irq)
{
unsigned long rate;
@@ -226,5 +240,10 @@ void __init nmdk_timer_init(void __iomem *base, int irq)
/* Timer 1 is used for events, register irq and clockevents */
setup_irq(irq, &nmdk_timer_irq);
nmdk_clkevt.cpumask = cpumask_of(0);
+ nmdk_clkevt.irq = irq;
clockevents_config_and_register(&nmdk_clkevt, rate, 2, 0xffffffffU);
+
+ mtu_delay_timer.read_current_timer = &nmdk_timer_read_current_timer;
+ mtu_delay_timer.freq = rate;
+ register_current_timer_delay(&mtu_delay_timer);
}
diff --git a/drivers/clocksource/sunxi_timer.c b/drivers/clocksource/sunxi_timer.c
index 93d09d0e009f..4086b9167159 100644
--- a/drivers/clocksource/sunxi_timer.c
+++ b/drivers/clocksource/sunxi_timer.c
@@ -74,7 +74,6 @@ static int sunxi_clkevt_next_event(unsigned long evt,
static struct clock_event_device sunxi_clockevent = {
.name = "sunxi_tick",
- .shift = 32,
.rating = 300,
.features = CLOCK_EVT_FEAT_PERIODIC | CLOCK_EVT_FEAT_ONESHOT,
.set_mode = sunxi_clkevt_mode,
@@ -104,7 +103,7 @@ static struct of_device_id sunxi_timer_dt_ids[] = {
{ }
};
-static void __init sunxi_timer_init(void)
+void __init sunxi_timer_init(void)
{
struct device_node *node;
unsigned long rate = 0;
@@ -154,18 +153,8 @@ static void __init sunxi_timer_init(void)
val = readl(timer_base + TIMER_CTL_REG);
writel(val | TIMER_CTL_ENABLE, timer_base + TIMER_CTL_REG);
- sunxi_clockevent.mult = div_sc(rate / TIMER_SCAL,
- NSEC_PER_SEC,
- sunxi_clockevent.shift);
- sunxi_clockevent.max_delta_ns = clockevent_delta2ns(0xff,
- &sunxi_clockevent);
- sunxi_clockevent.min_delta_ns = clockevent_delta2ns(0x1,
- &sunxi_clockevent);
sunxi_clockevent.cpumask = cpumask_of(0);
- clockevents_register_device(&sunxi_clockevent);
+ clockevents_config_and_register(&sunxi_clockevent, rate / TIMER_SCAL,
+ 0x1, 0xff);
}
-
-struct sys_timer sunxi_timer = {
- .init = sunxi_timer_init,
-};
diff --git a/drivers/clocksource/tcb_clksrc.c b/drivers/clocksource/tcb_clksrc.c
index 32cb929b8eb6..8a6187225dd0 100644
--- a/drivers/clocksource/tcb_clksrc.c
+++ b/drivers/clocksource/tcb_clksrc.c
@@ -157,7 +157,6 @@ static struct tc_clkevt_device clkevt = {
.name = "tc_clkevt",
.features = CLOCK_EVT_FEAT_PERIODIC
| CLOCK_EVT_FEAT_ONESHOT,
- .shift = 32,
/* Should be lower than at91rm9200's system timer */
.rating = 125,
.set_next_event = tc_next_event,
@@ -196,13 +195,9 @@ static void __init setup_clkevents(struct atmel_tc *tc, int clk32k_divisor_idx)
timer_clock = clk32k_divisor_idx;
- clkevt.clkevt.mult = div_sc(32768, NSEC_PER_SEC, clkevt.clkevt.shift);
- clkevt.clkevt.max_delta_ns
- = clockevent_delta2ns(0xffff, &clkevt.clkevt);
- clkevt.clkevt.min_delta_ns = clockevent_delta2ns(1, &clkevt.clkevt) + 1;
clkevt.clkevt.cpumask = cpumask_of(0);
- clockevents_register_device(&clkevt.clkevt);
+ clockevents_config_and_register(&clkevt.clkevt, 32768, 1, 0xffff);
setup_irq(irq, &tc_irqaction);
}
diff --git a/drivers/clocksource/tegra20_timer.c b/drivers/clocksource/tegra20_timer.c
new file mode 100644
index 000000000000..0bde03feb095
--- /dev/null
+++ b/drivers/clocksource/tegra20_timer.c
@@ -0,0 +1,281 @@
+/*
+ * Copyright (C) 2010 Google, Inc.
+ *
+ * Author:
+ * Colin Cross <ccross@google.com>
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/init.h>
+#include <linux/err.h>
+#include <linux/time.h>
+#include <linux/interrupt.h>
+#include <linux/irq.h>
+#include <linux/clockchips.h>
+#include <linux/clocksource.h>
+#include <linux/clk.h>
+#include <linux/io.h>
+#include <linux/of_address.h>
+#include <linux/of_irq.h>
+
+#include <asm/mach/time.h>
+#include <asm/smp_twd.h>
+#include <asm/sched_clock.h>
+
+#define RTC_SECONDS 0x08
+#define RTC_SHADOW_SECONDS 0x0c
+#define RTC_MILLISECONDS 0x10
+
+#define TIMERUS_CNTR_1US 0x10
+#define TIMERUS_USEC_CFG 0x14
+#define TIMERUS_CNTR_FREEZE 0x4c
+
+#define TIMER1_BASE 0x0
+#define TIMER2_BASE 0x8
+#define TIMER3_BASE 0x50
+#define TIMER4_BASE 0x58
+
+#define TIMER_PTV 0x0
+#define TIMER_PCR 0x4
+
+static void __iomem *timer_reg_base;
+static void __iomem *rtc_base;
+
+static struct timespec persistent_ts;
+static u64 persistent_ms, last_persistent_ms;
+
+#define timer_writel(value, reg) \
+ __raw_writel(value, timer_reg_base + (reg))
+#define timer_readl(reg) \
+ __raw_readl(timer_reg_base + (reg))
+
+static int tegra_timer_set_next_event(unsigned long cycles,
+ struct clock_event_device *evt)
+{
+ u32 reg;
+
+ reg = 0x80000000 | ((cycles > 1) ? (cycles-1) : 0);
+ timer_writel(reg, TIMER3_BASE + TIMER_PTV);
+
+ return 0;
+}
+
+static void tegra_timer_set_mode(enum clock_event_mode mode,
+ struct clock_event_device *evt)
+{
+ u32 reg;
+
+ timer_writel(0, TIMER3_BASE + TIMER_PTV);
+
+ switch (mode) {
+ case CLOCK_EVT_MODE_PERIODIC:
+ reg = 0xC0000000 | ((1000000/HZ)-1);
+ timer_writel(reg, TIMER3_BASE + TIMER_PTV);
+ break;
+ case CLOCK_EVT_MODE_ONESHOT:
+ break;
+ case CLOCK_EVT_MODE_UNUSED:
+ case CLOCK_EVT_MODE_SHUTDOWN:
+ case CLOCK_EVT_MODE_RESUME:
+ break;
+ }
+}
+
+static struct clock_event_device tegra_clockevent = {
+ .name = "timer0",
+ .rating = 300,
+ .features = CLOCK_EVT_FEAT_ONESHOT | CLOCK_EVT_FEAT_PERIODIC,
+ .set_next_event = tegra_timer_set_next_event,
+ .set_mode = tegra_timer_set_mode,
+};
+
+static u32 notrace tegra_read_sched_clock(void)
+{
+ return timer_readl(TIMERUS_CNTR_1US);
+}
+
+/*
+ * tegra_rtc_read - Reads the Tegra RTC registers
+ * Care must be taken that this funciton is not called while the
+ * tegra_rtc driver could be executing to avoid race conditions
+ * on the RTC shadow register
+ */
+static u64 tegra_rtc_read_ms(void)
+{
+ u32 ms = readl(rtc_base + RTC_MILLISECONDS);
+ u32 s = readl(rtc_base + RTC_SHADOW_SECONDS);
+ return (u64)s * MSEC_PER_SEC + ms;
+}
+
+/*
+ * tegra_read_persistent_clock - Return time from a persistent clock.
+ *
+ * Reads the time from a source which isn't disabled during PM, the
+ * 32k sync timer. Convert the cycles elapsed since last read into
+ * nsecs and adds to a monotonically increasing timespec.
+ * Care must be taken that this funciton is not called while the
+ * tegra_rtc driver could be executing to avoid race conditions
+ * on the RTC shadow register
+ */
+static void tegra_read_persistent_clock(struct timespec *ts)
+{
+ u64 delta;
+ struct timespec *tsp = &persistent_ts;
+
+ last_persistent_ms = persistent_ms;
+ persistent_ms = tegra_rtc_read_ms();
+ delta = persistent_ms - last_persistent_ms;
+
+ timespec_add_ns(tsp, delta * NSEC_PER_MSEC);
+ *ts = *tsp;
+}
+
+static irqreturn_t tegra_timer_interrupt(int irq, void *dev_id)
+{
+ struct clock_event_device *evt = (struct clock_event_device *)dev_id;
+ timer_writel(1<<30, TIMER3_BASE + TIMER_PCR);
+ evt->event_handler(evt);
+ return IRQ_HANDLED;
+}
+
+static struct irqaction tegra_timer_irq = {
+ .name = "timer0",
+ .flags = IRQF_DISABLED | IRQF_TIMER | IRQF_TRIGGER_HIGH,
+ .handler = tegra_timer_interrupt,
+ .dev_id = &tegra_clockevent,
+};
+
+static const struct of_device_id timer_match[] __initconst = {
+ { .compatible = "nvidia,tegra20-timer" },
+ {}
+};
+
+static const struct of_device_id rtc_match[] __initconst = {
+ { .compatible = "nvidia,tegra20-rtc" },
+ {}
+};
+
+static void __init tegra20_init_timer(void)
+{
+ struct device_node *np;
+ struct clk *clk;
+ unsigned long rate;
+ int ret;
+
+ np = of_find_matching_node(NULL, timer_match);
+ if (!np) {
+ pr_err("Failed to find timer DT node\n");
+ BUG();
+ }
+
+ timer_reg_base = of_iomap(np, 0);
+ if (!timer_reg_base) {
+ pr_err("Can't map timer registers\n");
+ BUG();
+ }
+
+ tegra_timer_irq.irq = irq_of_parse_and_map(np, 2);
+ if (tegra_timer_irq.irq <= 0) {
+ pr_err("Failed to map timer IRQ\n");
+ BUG();
+ }
+
+ clk = clk_get_sys("timer", NULL);
+ if (IS_ERR(clk)) {
+ pr_warn("Unable to get timer clock. Assuming 12Mhz input clock.\n");
+ rate = 12000000;
+ } else {
+ clk_prepare_enable(clk);
+ rate = clk_get_rate(clk);
+ }
+
+ of_node_put(np);
+
+ np = of_find_matching_node(NULL, rtc_match);
+ if (!np) {
+ pr_err("Failed to find RTC DT node\n");
+ BUG();
+ }
+
+ rtc_base = of_iomap(np, 0);
+ if (!rtc_base) {
+ pr_err("Can't map RTC registers");
+ BUG();
+ }
+
+ /*
+ * rtc registers are used by read_persistent_clock, keep the rtc clock
+ * enabled
+ */
+ clk = clk_get_sys("rtc-tegra", NULL);
+ if (IS_ERR(clk))
+ pr_warn("Unable to get rtc-tegra clock\n");
+ else
+ clk_prepare_enable(clk);
+
+ of_node_put(np);
+
+ switch (rate) {
+ case 12000000:
+ timer_writel(0x000b, TIMERUS_USEC_CFG);
+ break;
+ case 13000000:
+ timer_writel(0x000c, TIMERUS_USEC_CFG);
+ break;
+ case 19200000:
+ timer_writel(0x045f, TIMERUS_USEC_CFG);
+ break;
+ case 26000000:
+ timer_writel(0x0019, TIMERUS_USEC_CFG);
+ break;
+ default:
+ WARN(1, "Unknown clock rate");
+ }
+
+ setup_sched_clock(tegra_read_sched_clock, 32, 1000000);
+
+ if (clocksource_mmio_init(timer_reg_base + TIMERUS_CNTR_1US,
+ "timer_us", 1000000, 300, 32, clocksource_mmio_readl_up)) {
+ pr_err("Failed to register clocksource\n");
+ BUG();
+ }
+
+ ret = setup_irq(tegra_timer_irq.irq, &tegra_timer_irq);
+ if (ret) {
+ pr_err("Failed to register timer IRQ: %d\n", ret);
+ BUG();
+ }
+
+ tegra_clockevent.cpumask = cpu_all_mask;
+ tegra_clockevent.irq = tegra_timer_irq.irq;
+ clockevents_config_and_register(&tegra_clockevent, 1000000,
+ 0x1, 0x1fffffff);
+#ifdef CONFIG_HAVE_ARM_TWD
+ twd_local_timer_of_register();
+#endif
+ register_persistent_clock(NULL, tegra_read_persistent_clock);
+}
+CLOCKSOURCE_OF_DECLARE(tegra20, "nvidia,tegra20-timer", tegra20_init_timer);
+
+#ifdef CONFIG_PM
+static u32 usec_config;
+
+void tegra_timer_suspend(void)
+{
+ usec_config = timer_readl(TIMERUS_USEC_CFG);
+}
+
+void tegra_timer_resume(void)
+{
+ timer_writel(usec_config, TIMERUS_USEC_CFG);
+}
+#endif
diff --git a/drivers/clocksource/time-armada-370-xp.c b/drivers/clocksource/time-armada-370-xp.c
index a4605fd7e303..47a673070d70 100644
--- a/drivers/clocksource/time-armada-370-xp.c
+++ b/drivers/clocksource/time-armada-370-xp.c
@@ -27,8 +27,10 @@
#include <linux/of_address.h>
#include <linux/irq.h>
#include <linux/module.h>
-#include <asm/sched_clock.h>
+#include <asm/sched_clock.h>
+#include <asm/localtimer.h>
+#include <linux/percpu.h>
/*
* Timer block registers.
*/
@@ -49,6 +51,7 @@
#define TIMER1_RELOAD_OFF 0x0018
#define TIMER1_VAL_OFF 0x001c
+#define LCL_TIMER_EVENTS_STATUS 0x0028
/* Global timers are connected to the coherency fabric clock, and the
below divider reduces their incrementing frequency. */
#define TIMER_DIVIDER_SHIFT 5
@@ -57,14 +60,17 @@
/*
* SoC-specific data.
*/
-static void __iomem *timer_base;
-static int timer_irq;
+static void __iomem *timer_base, *local_base;
+static unsigned int timer_clk;
+static bool timer25Mhz = true;
/*
* Number of timer ticks per jiffy.
*/
static u32 ticks_per_jiffy;
+static struct clock_event_device __percpu **percpu_armada_370_xp_evt;
+
static u32 notrace armada_370_xp_read_sched_clock(void)
{
return ~readl(timer_base + TIMER0_VAL_OFF);
@@ -78,24 +84,23 @@ armada_370_xp_clkevt_next_event(unsigned long delta,
struct clock_event_device *dev)
{
u32 u;
-
/*
* Clear clockevent timer interrupt.
*/
- writel(TIMER1_CLR_MASK, timer_base + TIMER_EVENTS_STATUS);
+ writel(TIMER0_CLR_MASK, local_base + LCL_TIMER_EVENTS_STATUS);
/*
* Setup new clockevent timer value.
*/
- writel(delta, timer_base + TIMER1_VAL_OFF);
+ writel(delta, local_base + TIMER0_VAL_OFF);
/*
* Enable the timer.
*/
- u = readl(timer_base + TIMER_CTRL_OFF);
- u = ((u & ~TIMER1_RELOAD_EN) | TIMER1_EN |
- TIMER1_DIV(TIMER_DIVIDER_SHIFT));
- writel(u, timer_base + TIMER_CTRL_OFF);
+ u = readl(local_base + TIMER_CTRL_OFF);
+ u = ((u & ~TIMER0_RELOAD_EN) | TIMER0_EN |
+ TIMER0_DIV(TIMER_DIVIDER_SHIFT));
+ writel(u, local_base + TIMER_CTRL_OFF);
return 0;
}
@@ -107,37 +112,38 @@ armada_370_xp_clkevt_mode(enum clock_event_mode mode,
u32 u;
if (mode == CLOCK_EVT_MODE_PERIODIC) {
+
/*
* Setup timer to fire at 1/HZ intervals.
*/
- writel(ticks_per_jiffy - 1, timer_base + TIMER1_RELOAD_OFF);
- writel(ticks_per_jiffy - 1, timer_base + TIMER1_VAL_OFF);
+ writel(ticks_per_jiffy - 1, local_base + TIMER0_RELOAD_OFF);
+ writel(ticks_per_jiffy - 1, local_base + TIMER0_VAL_OFF);
/*
* Enable timer.
*/
- u = readl(timer_base + TIMER_CTRL_OFF);
- writel((u | TIMER1_EN | TIMER1_RELOAD_EN |
- TIMER1_DIV(TIMER_DIVIDER_SHIFT)),
- timer_base + TIMER_CTRL_OFF);
+ u = readl(local_base + TIMER_CTRL_OFF);
+
+ writel((u | TIMER0_EN | TIMER0_RELOAD_EN |
+ TIMER0_DIV(TIMER_DIVIDER_SHIFT)),
+ local_base + TIMER_CTRL_OFF);
} else {
/*
* Disable timer.
*/
- u = readl(timer_base + TIMER_CTRL_OFF);
- writel(u & ~TIMER1_EN, timer_base + TIMER_CTRL_OFF);
+ u = readl(local_base + TIMER_CTRL_OFF);
+ writel(u & ~TIMER0_EN, local_base + TIMER_CTRL_OFF);
/*
* ACK pending timer interrupt.
*/
- writel(TIMER1_CLR_MASK, timer_base + TIMER_EVENTS_STATUS);
-
+ writel(TIMER0_CLR_MASK, local_base + LCL_TIMER_EVENTS_STATUS);
}
}
static struct clock_event_device armada_370_xp_clkevt = {
- .name = "armada_370_xp_tick",
+ .name = "armada_370_xp_per_cpu_tick",
.features = CLOCK_EVT_FEAT_ONESHOT | CLOCK_EVT_FEAT_PERIODIC,
.shift = 32,
.rating = 300,
@@ -150,32 +156,78 @@ static irqreturn_t armada_370_xp_timer_interrupt(int irq, void *dev_id)
/*
* ACK timer interrupt and call event handler.
*/
+ struct clock_event_device *evt = *(struct clock_event_device **)dev_id;
- writel(TIMER1_CLR_MASK, timer_base + TIMER_EVENTS_STATUS);
- armada_370_xp_clkevt.event_handler(&armada_370_xp_clkevt);
+ writel(TIMER0_CLR_MASK, local_base + LCL_TIMER_EVENTS_STATUS);
+ evt->event_handler(evt);
return IRQ_HANDLED;
}
-static struct irqaction armada_370_xp_timer_irq = {
- .name = "armada_370_xp_tick",
- .flags = IRQF_DISABLED | IRQF_TIMER,
- .handler = armada_370_xp_timer_interrupt
+/*
+ * Setup the local clock events for a CPU.
+ */
+static int __cpuinit armada_370_xp_timer_setup(struct clock_event_device *evt)
+{
+ u32 u;
+ int cpu = smp_processor_id();
+
+ /* Use existing clock_event for cpu 0 */
+ if (!smp_processor_id())
+ return 0;
+
+ u = readl(local_base + TIMER_CTRL_OFF);
+ if (timer25Mhz)
+ writel(u | TIMER0_25MHZ, local_base + TIMER_CTRL_OFF);
+ else
+ writel(u & ~TIMER0_25MHZ, local_base + TIMER_CTRL_OFF);
+
+ evt->name = armada_370_xp_clkevt.name;
+ evt->irq = armada_370_xp_clkevt.irq;
+ evt->features = armada_370_xp_clkevt.features;
+ evt->shift = armada_370_xp_clkevt.shift;
+ evt->rating = armada_370_xp_clkevt.rating,
+ evt->set_next_event = armada_370_xp_clkevt_next_event,
+ evt->set_mode = armada_370_xp_clkevt_mode,
+ evt->cpumask = cpumask_of(cpu);
+
+ *__this_cpu_ptr(percpu_armada_370_xp_evt) = evt;
+
+ clockevents_config_and_register(evt, timer_clk, 1, 0xfffffffe);
+ enable_percpu_irq(evt->irq, 0);
+
+ return 0;
+}
+
+static void armada_370_xp_timer_stop(struct clock_event_device *evt)
+{
+ evt->set_mode(CLOCK_EVT_MODE_UNUSED, evt);
+ disable_percpu_irq(evt->irq);
+}
+
+static struct local_timer_ops armada_370_xp_local_timer_ops __cpuinitdata = {
+ .setup = armada_370_xp_timer_setup,
+ .stop = armada_370_xp_timer_stop,
};
void __init armada_370_xp_timer_init(void)
{
u32 u;
struct device_node *np;
- unsigned int timer_clk;
+ int res;
+
np = of_find_compatible_node(NULL, NULL, "marvell,armada-370-xp-timer");
timer_base = of_iomap(np, 0);
WARN_ON(!timer_base);
+ local_base = of_iomap(np, 1);
if (of_find_property(np, "marvell,timer-25Mhz", NULL)) {
/* The fixed 25MHz timer is available so let's use it */
+ u = readl(local_base + TIMER_CTRL_OFF);
+ writel(u | TIMER0_25MHZ,
+ local_base + TIMER_CTRL_OFF);
u = readl(timer_base + TIMER_CTRL_OFF);
- writel(u | TIMER0_25MHZ | TIMER1_25MHZ,
+ writel(u | TIMER0_25MHZ,
timer_base + TIMER_CTRL_OFF);
timer_clk = 25000000;
} else {
@@ -183,15 +235,23 @@ void __init armada_370_xp_timer_init(void)
struct clk *clk = of_clk_get(np, 0);
WARN_ON(IS_ERR(clk));
rate = clk_get_rate(clk);
+ u = readl(local_base + TIMER_CTRL_OFF);
+ writel(u & ~(TIMER0_25MHZ),
+ local_base + TIMER_CTRL_OFF);
+
u = readl(timer_base + TIMER_CTRL_OFF);
- writel(u & ~(TIMER0_25MHZ | TIMER1_25MHZ),
+ writel(u & ~(TIMER0_25MHZ),
timer_base + TIMER_CTRL_OFF);
+
timer_clk = rate / TIMER_DIVIDER;
+ timer25Mhz = false;
}
- /* We use timer 0 as clocksource, and timer 1 for
- clockevents */
- timer_irq = irq_of_parse_and_map(np, 1);
+ /*
+ * We use timer 0 as clocksource, and private(local) timer 0
+ * for clockevents
+ */
+ armada_370_xp_clkevt.irq = irq_of_parse_and_map(np, 4);
ticks_per_jiffy = (timer_clk + HZ / 2) / HZ;
@@ -216,12 +276,26 @@ void __init armada_370_xp_timer_init(void)
"armada_370_xp_clocksource",
timer_clk, 300, 32, clocksource_mmio_readl_down);
- /*
- * Setup clockevent timer (interrupt-driven).
- */
- setup_irq(timer_irq, &armada_370_xp_timer_irq);
+ /* Register the clockevent on the private timer of CPU 0 */
armada_370_xp_clkevt.cpumask = cpumask_of(0);
clockevents_config_and_register(&armada_370_xp_clkevt,
timer_clk, 1, 0xfffffffe);
-}
+ percpu_armada_370_xp_evt = alloc_percpu(struct clock_event_device *);
+
+
+ /*
+ * Setup clockevent timer (interrupt-driven).
+ */
+ *__this_cpu_ptr(percpu_armada_370_xp_evt) = &armada_370_xp_clkevt;
+ res = request_percpu_irq(armada_370_xp_clkevt.irq,
+ armada_370_xp_timer_interrupt,
+ armada_370_xp_clkevt.name,
+ percpu_armada_370_xp_evt);
+ if (!res) {
+ enable_percpu_irq(armada_370_xp_clkevt.irq, 0);
+#ifdef CONFIG_LOCAL_TIMERS
+ local_timer_register(&armada_370_xp_local_timer_ops);
+#endif
+ }
+}
diff --git a/drivers/clocksource/vt8500_timer.c b/drivers/clocksource/vt8500_timer.c
new file mode 100644
index 000000000000..8efc86b5b5dd
--- /dev/null
+++ b/drivers/clocksource/vt8500_timer.c
@@ -0,0 +1,180 @@
+/*
+ * arch/arm/mach-vt8500/timer.c
+ *
+ * Copyright (C) 2012 Tony Prisk <linux@prisktech.co.nz>
+ * Copyright (C) 2010 Alexey Charkov <alchark@gmail.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+
+/*
+ * This file is copied and modified from the original timer.c provided by
+ * Alexey Charkov. Minor changes have been made for Device Tree Support.
+ */
+
+#include <linux/io.h>
+#include <linux/irq.h>
+#include <linux/interrupt.h>
+#include <linux/clocksource.h>
+#include <linux/clockchips.h>
+#include <linux/delay.h>
+#include <asm/mach/time.h>
+
+#include <linux/of.h>
+#include <linux/of_address.h>
+#include <linux/of_irq.h>
+
+#define VT8500_TIMER_OFFSET 0x0100
+#define VT8500_TIMER_HZ 3000000
+#define TIMER_MATCH_VAL 0x0000
+#define TIMER_COUNT_VAL 0x0010
+#define TIMER_STATUS_VAL 0x0014
+#define TIMER_IER_VAL 0x001c /* interrupt enable */
+#define TIMER_CTRL_VAL 0x0020
+#define TIMER_AS_VAL 0x0024 /* access status */
+#define TIMER_COUNT_R_ACTIVE (1 << 5) /* not ready for read */
+#define TIMER_COUNT_W_ACTIVE (1 << 4) /* not ready for write */
+#define TIMER_MATCH_W_ACTIVE (1 << 0) /* not ready for write */
+
+#define msecs_to_loops(t) (loops_per_jiffy / 1000 * HZ * t)
+
+static void __iomem *regbase;
+
+static cycle_t vt8500_timer_read(struct clocksource *cs)
+{
+ int loops = msecs_to_loops(10);
+ writel(3, regbase + TIMER_CTRL_VAL);
+ while ((readl((regbase + TIMER_AS_VAL)) & TIMER_COUNT_R_ACTIVE)
+ && --loops)
+ cpu_relax();
+ return readl(regbase + TIMER_COUNT_VAL);
+}
+
+static struct clocksource clocksource = {
+ .name = "vt8500_timer",
+ .rating = 200,
+ .read = vt8500_timer_read,
+ .mask = CLOCKSOURCE_MASK(32),
+ .flags = CLOCK_SOURCE_IS_CONTINUOUS,
+};
+
+static int vt8500_timer_set_next_event(unsigned long cycles,
+ struct clock_event_device *evt)
+{
+ int loops = msecs_to_loops(10);
+ cycle_t alarm = clocksource.read(&clocksource) + cycles;
+ while ((readl(regbase + TIMER_AS_VAL) & TIMER_MATCH_W_ACTIVE)
+ && --loops)
+ cpu_relax();
+ writel((unsigned long)alarm, regbase + TIMER_MATCH_VAL);
+
+ if ((signed)(alarm - clocksource.read(&clocksource)) <= 16)
+ return -ETIME;
+
+ writel(1, regbase + TIMER_IER_VAL);
+
+ return 0;
+}
+
+static void vt8500_timer_set_mode(enum clock_event_mode mode,
+ struct clock_event_device *evt)
+{
+ switch (mode) {
+ case CLOCK_EVT_MODE_RESUME:
+ case CLOCK_EVT_MODE_PERIODIC:
+ break;
+ case CLOCK_EVT_MODE_ONESHOT:
+ case CLOCK_EVT_MODE_UNUSED:
+ case CLOCK_EVT_MODE_SHUTDOWN:
+ writel(readl(regbase + TIMER_CTRL_VAL) | 1,
+ regbase + TIMER_CTRL_VAL);
+ writel(0, regbase + TIMER_IER_VAL);
+ break;
+ }
+}
+
+static struct clock_event_device clockevent = {
+ .name = "vt8500_timer",
+ .features = CLOCK_EVT_FEAT_ONESHOT,
+ .rating = 200,
+ .set_next_event = vt8500_timer_set_next_event,
+ .set_mode = vt8500_timer_set_mode,
+};
+
+static irqreturn_t vt8500_timer_interrupt(int irq, void *dev_id)
+{
+ struct clock_event_device *evt = dev_id;
+ writel(0xf, regbase + TIMER_STATUS_VAL);
+ evt->event_handler(evt);
+
+ return IRQ_HANDLED;
+}
+
+static struct irqaction irq = {
+ .name = "vt8500_timer",
+ .flags = IRQF_DISABLED | IRQF_TIMER | IRQF_IRQPOLL,
+ .handler = vt8500_timer_interrupt,
+ .dev_id = &clockevent,
+};
+
+static struct of_device_id vt8500_timer_ids[] = {
+ { .compatible = "via,vt8500-timer" },
+ { }
+};
+
+static void __init vt8500_timer_init(void)
+{
+ struct device_node *np;
+ int timer_irq;
+
+ np = of_find_matching_node(NULL, vt8500_timer_ids);
+ if (!np) {
+ pr_err("%s: Timer description missing from Device Tree\n",
+ __func__);
+ return;
+ }
+ regbase = of_iomap(np, 0);
+ if (!regbase) {
+ pr_err("%s: Missing iobase description in Device Tree\n",
+ __func__);
+ of_node_put(np);
+ return;
+ }
+ timer_irq = irq_of_parse_and_map(np, 0);
+ if (!timer_irq) {
+ pr_err("%s: Missing irq description in Device Tree\n",
+ __func__);
+ of_node_put(np);
+ return;
+ }
+
+ writel(1, regbase + TIMER_CTRL_VAL);
+ writel(0xf, regbase + TIMER_STATUS_VAL);
+ writel(~0, regbase + TIMER_MATCH_VAL);
+
+ if (clocksource_register_hz(&clocksource, VT8500_TIMER_HZ))
+ pr_err("%s: vt8500_timer_init: clocksource_register failed for %s\n",
+ __func__, clocksource.name);
+
+ clockevent.cpumask = cpumask_of(0);
+
+ if (setup_irq(timer_irq, &irq))
+ pr_err("%s: setup_irq failed for %s\n", __func__,
+ clockevent.name);
+ clockevents_config_and_register(&clockevent, VT8500_TIMER_HZ,
+ 4, 0xf0000000);
+}
+
+CLOCKSOURCE_OF_DECLARE(vt8500, "via,vt8500-timer", vt8500_timer_init)
diff --git a/drivers/cpufreq/Makefile b/drivers/cpufreq/Makefile
index 5399c45ac311..863fd1865d45 100644
--- a/drivers/cpufreq/Makefile
+++ b/drivers/cpufreq/Makefile
@@ -44,7 +44,7 @@ obj-$(CONFIG_X86_INTEL_PSTATE) += intel_pstate.o
##################################################################################
# ARM SoC drivers
-obj-$(CONFIG_UX500_SOC_DB8500) += db8500-cpufreq.o
+obj-$(CONFIG_UX500_SOC_DB8500) += dbx500-cpufreq.o
obj-$(CONFIG_ARM_S3C2416_CPUFREQ) += s3c2416-cpufreq.o
obj-$(CONFIG_ARM_S3C64XX_CPUFREQ) += s3c64xx-cpufreq.o
obj-$(CONFIG_ARM_S5PV210_CPUFREQ) += s5pv210-cpufreq.o
diff --git a/drivers/cpufreq/db8500-cpufreq.c b/drivers/cpufreq/dbx500-cpufreq.c
index 79a84860ea56..72f0c3efa76e 100644
--- a/drivers/cpufreq/db8500-cpufreq.c
+++ b/drivers/cpufreq/dbx500-cpufreq.c
@@ -1,13 +1,13 @@
/*
* Copyright (C) STMicroelectronics 2009
- * Copyright (C) ST-Ericsson SA 2010
+ * Copyright (C) ST-Ericsson SA 2010-2012
*
* License Terms: GNU General Public License v2
* Author: Sundar Iyer <sundar.iyer@stericsson.com>
* Author: Martin Persson <martin.persson@stericsson.com>
* Author: Jonas Aaberg <jonas.aberg@stericsson.com>
- *
*/
+
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/cpufreq.h>
@@ -15,27 +15,27 @@
#include <linux/slab.h>
#include <linux/platform_device.h>
#include <linux/clk.h>
-#include <mach/id.h>
static struct cpufreq_frequency_table *freq_table;
static struct clk *armss_clk;
-static struct freq_attr *db8500_cpufreq_attr[] = {
+static struct freq_attr *dbx500_cpufreq_attr[] = {
&cpufreq_freq_attr_scaling_available_freqs,
NULL,
};
-static int db8500_cpufreq_verify_speed(struct cpufreq_policy *policy)
+static int dbx500_cpufreq_verify_speed(struct cpufreq_policy *policy)
{
return cpufreq_frequency_table_verify(policy, freq_table);
}
-static int db8500_cpufreq_target(struct cpufreq_policy *policy,
+static int dbx500_cpufreq_target(struct cpufreq_policy *policy,
unsigned int target_freq,
unsigned int relation)
{
struct cpufreq_freqs freqs;
unsigned int idx;
+ int ret;
/* scale the target frequency to one of the extremes supported */
if (target_freq < policy->cpuinfo.min_freq)
@@ -44,10 +44,9 @@ static int db8500_cpufreq_target(struct cpufreq_policy *policy,
target_freq = policy->cpuinfo.max_freq;
/* Lookup the next frequency */
- if (cpufreq_frequency_table_target
- (policy, freq_table, target_freq, relation, &idx)) {
+ if (cpufreq_frequency_table_target(policy, freq_table, target_freq,
+ relation, &idx))
return -EINVAL;
- }
freqs.old = policy->cur;
freqs.new = freq_table[idx].frequency;
@@ -60,9 +59,12 @@ static int db8500_cpufreq_target(struct cpufreq_policy *policy,
cpufreq_notify_transition(&freqs, CPUFREQ_PRECHANGE);
/* update armss clk frequency */
- if (clk_set_rate(armss_clk, freq_table[idx].frequency * 1000)) {
- pr_err("db8500-cpufreq: Failed to update armss clk\n");
- return -EINVAL;
+ ret = clk_set_rate(armss_clk, freqs.new * 1000);
+
+ if (ret) {
+ pr_err("dbx500-cpufreq: Failed to set armss_clk to %d Hz: error %d\n",
+ freqs.new * 1000, ret);
+ return ret;
}
/* post change notification */
@@ -72,7 +74,7 @@ static int db8500_cpufreq_target(struct cpufreq_policy *policy,
return 0;
}
-static unsigned int db8500_cpufreq_getspeed(unsigned int cpu)
+static unsigned int dbx500_cpufreq_getspeed(unsigned int cpu)
{
int i = 0;
unsigned long freq = clk_get_rate(armss_clk) / 1000;
@@ -84,40 +86,26 @@ static unsigned int db8500_cpufreq_getspeed(unsigned int cpu)
}
/* We could not find a corresponding frequency. */
- pr_err("db8500-cpufreq: Failed to find cpufreq speed\n");
+ pr_err("dbx500-cpufreq: Failed to find cpufreq speed\n");
return 0;
}
-static int __cpuinit db8500_cpufreq_init(struct cpufreq_policy *policy)
+static int __cpuinit dbx500_cpufreq_init(struct cpufreq_policy *policy)
{
- int i = 0;
int res;
- armss_clk = clk_get(NULL, "armss");
- if (IS_ERR(armss_clk)) {
- pr_err("db8500-cpufreq : Failed to get armss clk\n");
- return PTR_ERR(armss_clk);
- }
-
- pr_info("db8500-cpufreq : Available frequencies:\n");
- while (freq_table[i].frequency != CPUFREQ_TABLE_END) {
- pr_info(" %d Mhz\n", freq_table[i].frequency/1000);
- i++;
- }
-
/* get policy fields based on the table */
res = cpufreq_frequency_table_cpuinfo(policy, freq_table);
if (!res)
cpufreq_frequency_table_get_attr(freq_table, policy->cpu);
else {
- pr_err("db8500-cpufreq : Failed to read policy table\n");
- clk_put(armss_clk);
+ pr_err("dbx500-cpufreq: Failed to read policy table\n");
return res;
}
policy->min = policy->cpuinfo.min_freq;
policy->max = policy->cpuinfo.max_freq;
- policy->cur = db8500_cpufreq_getspeed(policy->cpu);
+ policy->cur = dbx500_cpufreq_getspeed(policy->cpu);
policy->governor = CPUFREQ_DEFAULT_GOVERNOR;
/*
@@ -133,45 +121,54 @@ static int __cpuinit db8500_cpufreq_init(struct cpufreq_policy *policy)
return 0;
}
-static struct cpufreq_driver db8500_cpufreq_driver = {
- .flags = CPUFREQ_STICKY,
- .verify = db8500_cpufreq_verify_speed,
- .target = db8500_cpufreq_target,
- .get = db8500_cpufreq_getspeed,
- .init = db8500_cpufreq_init,
- .name = "DB8500",
- .attr = db8500_cpufreq_attr,
+static struct cpufreq_driver dbx500_cpufreq_driver = {
+ .flags = CPUFREQ_STICKY | CPUFREQ_CONST_LOOPS,
+ .verify = dbx500_cpufreq_verify_speed,
+ .target = dbx500_cpufreq_target,
+ .get = dbx500_cpufreq_getspeed,
+ .init = dbx500_cpufreq_init,
+ .name = "DBX500",
+ .attr = dbx500_cpufreq_attr,
};
-static int db8500_cpufreq_probe(struct platform_device *pdev)
+static int dbx500_cpufreq_probe(struct platform_device *pdev)
{
- freq_table = dev_get_platdata(&pdev->dev);
+ int i = 0;
+ freq_table = dev_get_platdata(&pdev->dev);
if (!freq_table) {
- pr_err("db8500-cpufreq: Failed to fetch cpufreq table\n");
+ pr_err("dbx500-cpufreq: Failed to fetch cpufreq table\n");
return -ENODEV;
}
- return cpufreq_register_driver(&db8500_cpufreq_driver);
+ armss_clk = clk_get(&pdev->dev, "armss");
+ if (IS_ERR(armss_clk)) {
+ pr_err("dbx500-cpufreq: Failed to get armss clk\n");
+ return PTR_ERR(armss_clk);
+ }
+
+ pr_info("dbx500-cpufreq: Available frequencies:\n");
+ while (freq_table[i].frequency != CPUFREQ_TABLE_END) {
+ pr_info(" %d Mhz\n", freq_table[i].frequency/1000);
+ i++;
+ }
+
+ return cpufreq_register_driver(&dbx500_cpufreq_driver);
}
-static struct platform_driver db8500_cpufreq_plat_driver = {
+static struct platform_driver dbx500_cpufreq_plat_driver = {
.driver = {
- .name = "cpufreq-u8500",
+ .name = "cpufreq-ux500",
.owner = THIS_MODULE,
},
- .probe = db8500_cpufreq_probe,
+ .probe = dbx500_cpufreq_probe,
};
-static int __init db8500_cpufreq_register(void)
+static int __init dbx500_cpufreq_register(void)
{
- if (!cpu_is_u8500_family())
- return -ENODEV;
-
- pr_info("cpufreq for DB8500 started\n");
- return platform_driver_register(&db8500_cpufreq_plat_driver);
+ return platform_driver_register(&dbx500_cpufreq_plat_driver);
}
-device_initcall(db8500_cpufreq_register);
+device_initcall(dbx500_cpufreq_register);
MODULE_LICENSE("GPL v2");
-MODULE_DESCRIPTION("cpufreq driver for DB8500");
+MODULE_DESCRIPTION("cpufreq driver for DBX500");
diff --git a/drivers/cpufreq/exynos-cpufreq.c b/drivers/cpufreq/exynos-cpufreq.c
index 69b676dd3358..78057a357ddb 100644
--- a/drivers/cpufreq/exynos-cpufreq.c
+++ b/drivers/cpufreq/exynos-cpufreq.c
@@ -18,10 +18,10 @@
#include <linux/cpufreq.h>
#include <linux/suspend.h>
-#include <mach/cpufreq.h>
-
#include <plat/cpu.h>
+#include "exynos-cpufreq.h"
+
static struct exynos_dvfs_info *exynos_info;
static struct regulator *arm_regulator;
diff --git a/drivers/cpufreq/exynos-cpufreq.h b/drivers/cpufreq/exynos-cpufreq.h
new file mode 100644
index 000000000000..92b852ee5ddc
--- /dev/null
+++ b/drivers/cpufreq/exynos-cpufreq.h
@@ -0,0 +1,48 @@
+/*
+ * Copyright (c) 2010 Samsung Electronics Co., Ltd.
+ * http://www.samsung.com
+ *
+ * EXYNOS - CPUFreq support
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+*/
+
+enum cpufreq_level_index {
+ L0, L1, L2, L3, L4,
+ L5, L6, L7, L8, L9,
+ L10, L11, L12, L13, L14,
+ L15, L16, L17, L18, L19,
+ L20,
+};
+
+#define APLL_FREQ(f, a0, a1, a2, a3, a4, a5, a6, a7, b0, b1, b2, m, p, s) \
+ { \
+ .freq = (f) * 1000, \
+ .clk_div_cpu0 = ((a0) | (a1) << 4 | (a2) << 8 | (a3) << 12 | \
+ (a4) << 16 | (a5) << 20 | (a6) << 24 | (a7) << 28), \
+ .clk_div_cpu1 = (b0 << 0 | b1 << 4 | b2 << 8), \
+ .mps = ((m) << 16 | (p) << 8 | (s)), \
+ }
+
+struct apll_freq {
+ unsigned int freq;
+ u32 clk_div_cpu0;
+ u32 clk_div_cpu1;
+ u32 mps;
+};
+
+struct exynos_dvfs_info {
+ unsigned long mpll_freq_khz;
+ unsigned int pll_safe_idx;
+ struct clk *cpu_clk;
+ unsigned int *volt_table;
+ struct cpufreq_frequency_table *freq_table;
+ void (*set_freq)(unsigned int, unsigned int);
+ bool (*need_apll_change)(unsigned int, unsigned int);
+};
+
+extern int exynos4210_cpufreq_init(struct exynos_dvfs_info *);
+extern int exynos4x12_cpufreq_init(struct exynos_dvfs_info *);
+extern int exynos5250_cpufreq_init(struct exynos_dvfs_info *);
diff --git a/drivers/cpufreq/exynos4210-cpufreq.c b/drivers/cpufreq/exynos4210-cpufreq.c
index de91755e2556..add7fbec4fc9 100644
--- a/drivers/cpufreq/exynos4210-cpufreq.c
+++ b/drivers/cpufreq/exynos4210-cpufreq.c
@@ -18,7 +18,8 @@
#include <linux/cpufreq.h>
#include <mach/regs-clock.h>
-#include <mach/cpufreq.h>
+
+#include "exynos-cpufreq.h"
static struct clk *cpu_clk;
static struct clk *moutcore;
diff --git a/drivers/cpufreq/exynos4x12-cpufreq.c b/drivers/cpufreq/exynos4x12-cpufreq.c
index 0661039e5d4a..08b7477b0aa2 100644
--- a/drivers/cpufreq/exynos4x12-cpufreq.c
+++ b/drivers/cpufreq/exynos4x12-cpufreq.c
@@ -18,7 +18,8 @@
#include <linux/cpufreq.h>
#include <mach/regs-clock.h>
-#include <mach/cpufreq.h>
+
+#include "exynos-cpufreq.h"
static struct clk *cpu_clk;
static struct clk *moutcore;
diff --git a/drivers/cpufreq/exynos5250-cpufreq.c b/drivers/cpufreq/exynos5250-cpufreq.c
index b9344869f822..9fae466d7746 100644
--- a/drivers/cpufreq/exynos5250-cpufreq.c
+++ b/drivers/cpufreq/exynos5250-cpufreq.c
@@ -19,7 +19,8 @@
#include <mach/map.h>
#include <mach/regs-clock.h>
-#include <mach/cpufreq.h>
+
+#include "exynos-cpufreq.h"
static struct clk *cpu_clk;
static struct clk *moutcore;
diff --git a/drivers/cpufreq/imx6q-cpufreq.c b/drivers/cpufreq/imx6q-cpufreq.c
index d6b6ef350cb6..54e336de373b 100644
--- a/drivers/cpufreq/imx6q-cpufreq.c
+++ b/drivers/cpufreq/imx6q-cpufreq.c
@@ -245,7 +245,7 @@ static int imx6q_cpufreq_probe(struct platform_device *pdev)
arm_reg = devm_regulator_get(cpu_dev, "arm");
pu_reg = devm_regulator_get(cpu_dev, "pu");
soc_reg = devm_regulator_get(cpu_dev, "soc");
- if (!arm_reg || !pu_reg || !soc_reg) {
+ if (IS_ERR(arm_reg) || IS_ERR(pu_reg) || IS_ERR(soc_reg)) {
dev_err(cpu_dev, "failed to get regulators\n");
ret = -ENOENT;
goto put_node;
diff --git a/drivers/cpuidle/Kconfig b/drivers/cpuidle/Kconfig
index c4cc27e5c8a5..071e2c3eec4f 100644
--- a/drivers/cpuidle/Kconfig
+++ b/drivers/cpuidle/Kconfig
@@ -39,4 +39,10 @@ config CPU_IDLE_CALXEDA
help
Select this to enable cpuidle on Calxeda processors.
+config CPU_IDLE_KIRKWOOD
+ bool "CPU Idle Driver for Kirkwood processors"
+ depends on ARCH_KIRKWOOD
+ help
+ Select this to enable cpuidle on Kirkwood processors.
+
endif
diff --git a/drivers/cpuidle/Makefile b/drivers/cpuidle/Makefile
index 03ee87482c71..24c6e7d945ed 100644
--- a/drivers/cpuidle/Makefile
+++ b/drivers/cpuidle/Makefile
@@ -6,3 +6,4 @@ obj-y += cpuidle.o driver.o governor.o sysfs.o governors/
obj-$(CONFIG_ARCH_NEEDS_CPU_IDLE_COUPLED) += coupled.o
obj-$(CONFIG_CPU_IDLE_CALXEDA) += cpuidle-calxeda.o
+obj-$(CONFIG_CPU_IDLE_KIRKWOOD) += cpuidle-kirkwood.o
diff --git a/drivers/cpuidle/cpuidle-kirkwood.c b/drivers/cpuidle/cpuidle-kirkwood.c
new file mode 100644
index 000000000000..670aa1e55cd6
--- /dev/null
+++ b/drivers/cpuidle/cpuidle-kirkwood.c
@@ -0,0 +1,106 @@
+/*
+ * arch/arm/mach-kirkwood/cpuidle.c
+ *
+ * CPU idle Marvell Kirkwood SoCs
+ *
+ * This file is licensed under the terms of the GNU General Public
+ * License version 2. This program is licensed "as is" without any
+ * warranty of any kind, whether express or implied.
+ *
+ * The cpu idle uses wait-for-interrupt and DDR self refresh in order
+ * to implement two idle states -
+ * #1 wait-for-interrupt
+ * #2 wait-for-interrupt and DDR self refresh
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/platform_device.h>
+#include <linux/cpuidle.h>
+#include <linux/io.h>
+#include <linux/export.h>
+#include <asm/proc-fns.h>
+#include <asm/cpuidle.h>
+
+#define KIRKWOOD_MAX_STATES 2
+
+static void __iomem *ddr_operation_base;
+
+/* Actual code that puts the SoC in different idle states */
+static int kirkwood_enter_idle(struct cpuidle_device *dev,
+ struct cpuidle_driver *drv,
+ int index)
+{
+ writel(0x7, ddr_operation_base);
+ cpu_do_idle();
+
+ return index;
+}
+
+static struct cpuidle_driver kirkwood_idle_driver = {
+ .name = "kirkwood_idle",
+ .owner = THIS_MODULE,
+ .en_core_tk_irqen = 1,
+ .states[0] = ARM_CPUIDLE_WFI_STATE,
+ .states[1] = {
+ .enter = kirkwood_enter_idle,
+ .exit_latency = 10,
+ .target_residency = 100000,
+ .flags = CPUIDLE_FLAG_TIME_VALID,
+ .name = "DDR SR",
+ .desc = "WFI and DDR Self Refresh",
+ },
+ .state_count = KIRKWOOD_MAX_STATES,
+};
+static struct cpuidle_device *device;
+
+static DEFINE_PER_CPU(struct cpuidle_device, kirkwood_cpuidle_device);
+
+/* Initialize CPU idle by registering the idle states */
+static int kirkwood_cpuidle_probe(struct platform_device *pdev)
+{
+ struct resource *res;
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (res == NULL)
+ return -EINVAL;
+
+ ddr_operation_base = devm_request_and_ioremap(&pdev->dev, res);
+ if (!ddr_operation_base)
+ return -EADDRNOTAVAIL;
+
+ device = &per_cpu(kirkwood_cpuidle_device, smp_processor_id());
+ device->state_count = KIRKWOOD_MAX_STATES;
+
+ cpuidle_register_driver(&kirkwood_idle_driver);
+ if (cpuidle_register_device(device)) {
+ pr_err("kirkwood_init_cpuidle: Failed registering\n");
+ return -EIO;
+ }
+ return 0;
+}
+
+int kirkwood_cpuidle_remove(struct platform_device *pdev)
+{
+ cpuidle_unregister_device(device);
+ cpuidle_unregister_driver(&kirkwood_idle_driver);
+
+ return 0;
+}
+
+static struct platform_driver kirkwood_cpuidle_driver = {
+ .probe = kirkwood_cpuidle_probe,
+ .remove = kirkwood_cpuidle_remove,
+ .driver = {
+ .name = "kirkwood_cpuidle",
+ .owner = THIS_MODULE,
+ },
+};
+
+module_platform_driver(kirkwood_cpuidle_driver);
+
+MODULE_AUTHOR("Andrew Lunn <andrew@lunn.ch>");
+MODULE_DESCRIPTION("Kirkwood cpu idle driver");
+MODULE_LICENSE("GPL v2");
+MODULE_ALIAS("platform:kirkwood-cpuidle");
diff --git a/drivers/crypto/atmel-aes.c b/drivers/crypto/atmel-aes.c
index c9d9d5c16f94..6f22ba51f969 100644
--- a/drivers/crypto/atmel-aes.c
+++ b/drivers/crypto/atmel-aes.c
@@ -332,7 +332,7 @@ static int atmel_aes_crypt_cpu_start(struct atmel_aes_dev *dd)
return -EINVAL;
dd->nb_out_sg = atmel_aes_sg_length(dd->req, dd->out_sg);
- if (!dd->nb_in_sg)
+ if (!dd->nb_out_sg)
return -EINVAL;
dd->bufcnt = sg_copy_to_buffer(dd->in_sg, dd->nb_in_sg,
diff --git a/drivers/crypto/bfin_crc.c b/drivers/crypto/bfin_crc.c
index a22f1a9f895f..827913d7d33a 100644
--- a/drivers/crypto/bfin_crc.c
+++ b/drivers/crypto/bfin_crc.c
@@ -694,7 +694,7 @@ out_error_dma:
dma_free_coherent(&pdev->dev, PAGE_SIZE, crc->sg_cpu, crc->sg_dma);
free_dma(crc->dma_ch);
out_error_irq:
- free_irq(crc->irq, crc->dev);
+ free_irq(crc->irq, crc);
out_error_unmap:
iounmap((void *)crc->regs);
out_error_free_mem:
@@ -720,10 +720,10 @@ static int bfin_crypto_crc_remove(struct platform_device *pdev)
crypto_unregister_ahash(&algs);
tasklet_kill(&crc->done_task);
- iounmap((void *)crc->regs);
free_dma(crc->dma_ch);
if (crc->irq > 0)
- free_irq(crc->irq, crc->dev);
+ free_irq(crc->irq, crc);
+ iounmap((void *)crc->regs);
kfree(crc);
return 0;
diff --git a/drivers/crypto/omap-aes.c b/drivers/crypto/omap-aes.c
index e66e8ee5a9af..6aa425fe0ed5 100644
--- a/drivers/crypto/omap-aes.c
+++ b/drivers/crypto/omap-aes.c
@@ -5,6 +5,7 @@
*
* Copyright (c) 2010 Nokia Corporation
* Author: Dmitry Kasatkin <dmitry.kasatkin@nokia.com>
+ * Copyright (c) 2011 Texas Instruments Incorporated
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as published
@@ -19,28 +20,39 @@
#include <linux/init.h>
#include <linux/errno.h>
#include <linux/kernel.h>
-#include <linux/clk.h>
#include <linux/platform_device.h>
#include <linux/scatterlist.h>
#include <linux/dma-mapping.h>
+#include <linux/dmaengine.h>
+#include <linux/omap-dma.h>
+#include <linux/pm_runtime.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/of_address.h>
#include <linux/io.h>
#include <linux/crypto.h>
#include <linux/interrupt.h>
#include <crypto/scatterwalk.h>
#include <crypto/aes.h>
-#include <linux/omap-dma.h>
+#define DST_MAXBURST 4
+#define DMA_MIN (DST_MAXBURST * sizeof(u32))
/* OMAP TRM gives bitfields as start:end, where start is the higher bit
number. For example 7:0 */
#define FLD_MASK(start, end) (((1 << ((start) - (end) + 1)) - 1) << (end))
#define FLD_VAL(val, start, end) (((val) << (end)) & FLD_MASK(start, end))
-#define AES_REG_KEY(x) (0x1C - ((x ^ 0x01) * 0x04))
-#define AES_REG_IV(x) (0x20 + ((x) * 0x04))
+#define AES_REG_KEY(dd, x) ((dd)->pdata->key_ofs - \
+ ((x ^ 0x01) * 0x04))
+#define AES_REG_IV(dd, x) ((dd)->pdata->iv_ofs + ((x) * 0x04))
-#define AES_REG_CTRL 0x30
-#define AES_REG_CTRL_CTR_WIDTH (1 << 7)
+#define AES_REG_CTRL(dd) ((dd)->pdata->ctrl_ofs)
+#define AES_REG_CTRL_CTR_WIDTH_MASK (3 << 7)
+#define AES_REG_CTRL_CTR_WIDTH_32 (0 << 7)
+#define AES_REG_CTRL_CTR_WIDTH_64 (1 << 7)
+#define AES_REG_CTRL_CTR_WIDTH_96 (2 << 7)
+#define AES_REG_CTRL_CTR_WIDTH_128 (3 << 7)
#define AES_REG_CTRL_CTR (1 << 6)
#define AES_REG_CTRL_CBC (1 << 5)
#define AES_REG_CTRL_KEY_SIZE (3 << 3)
@@ -48,14 +60,11 @@
#define AES_REG_CTRL_INPUT_READY (1 << 1)
#define AES_REG_CTRL_OUTPUT_READY (1 << 0)
-#define AES_REG_DATA 0x34
-#define AES_REG_DATA_N(x) (0x34 + ((x) * 0x04))
+#define AES_REG_DATA_N(dd, x) ((dd)->pdata->data_ofs + ((x) * 0x04))
-#define AES_REG_REV 0x44
-#define AES_REG_REV_MAJOR 0xF0
-#define AES_REG_REV_MINOR 0x0F
+#define AES_REG_REV(dd) ((dd)->pdata->rev_ofs)
-#define AES_REG_MASK 0x48
+#define AES_REG_MASK(dd) ((dd)->pdata->mask_ofs)
#define AES_REG_MASK_SIDLE (1 << 6)
#define AES_REG_MASK_START (1 << 5)
#define AES_REG_MASK_DMA_OUT_EN (1 << 3)
@@ -63,8 +72,7 @@
#define AES_REG_MASK_SOFTRESET (1 << 1)
#define AES_REG_AUTOIDLE (1 << 0)
-#define AES_REG_SYSSTATUS 0x4C
-#define AES_REG_SYSSTATUS_RESETDONE (1 << 0)
+#define AES_REG_LENGTH_N(x) (0x54 + ((x) * 0x04))
#define DEFAULT_TIMEOUT (5*HZ)
@@ -72,6 +80,7 @@
#define FLAGS_ENCRYPT BIT(0)
#define FLAGS_CBC BIT(1)
#define FLAGS_GIV BIT(2)
+#define FLAGS_CTR BIT(3)
#define FLAGS_INIT BIT(4)
#define FLAGS_FAST BIT(5)
@@ -92,11 +101,39 @@ struct omap_aes_reqctx {
#define OMAP_AES_QUEUE_LENGTH 1
#define OMAP_AES_CACHE_SIZE 0
+struct omap_aes_algs_info {
+ struct crypto_alg *algs_list;
+ unsigned int size;
+ unsigned int registered;
+};
+
+struct omap_aes_pdata {
+ struct omap_aes_algs_info *algs_info;
+ unsigned int algs_info_size;
+
+ void (*trigger)(struct omap_aes_dev *dd, int length);
+
+ u32 key_ofs;
+ u32 iv_ofs;
+ u32 ctrl_ofs;
+ u32 data_ofs;
+ u32 rev_ofs;
+ u32 mask_ofs;
+
+ u32 dma_enable_in;
+ u32 dma_enable_out;
+ u32 dma_start;
+
+ u32 major_mask;
+ u32 major_shift;
+ u32 minor_mask;
+ u32 minor_shift;
+};
+
struct omap_aes_dev {
struct list_head list;
unsigned long phys_base;
void __iomem *io_base;
- struct clk *iclk;
struct omap_aes_ctx *ctx;
struct device *dev;
unsigned long flags;
@@ -111,20 +148,24 @@ struct omap_aes_dev {
struct ablkcipher_request *req;
size_t total;
struct scatterlist *in_sg;
+ struct scatterlist in_sgl;
size_t in_offset;
struct scatterlist *out_sg;
+ struct scatterlist out_sgl;
size_t out_offset;
size_t buflen;
void *buf_in;
size_t dma_size;
int dma_in;
- int dma_lch_in;
+ struct dma_chan *dma_lch_in;
dma_addr_t dma_addr_in;
void *buf_out;
int dma_out;
- int dma_lch_out;
+ struct dma_chan *dma_lch_out;
dma_addr_t dma_addr_out;
+
+ const struct omap_aes_pdata *pdata;
};
/* keep registered devices data here */
@@ -160,19 +201,6 @@ static void omap_aes_write_n(struct omap_aes_dev *dd, u32 offset,
omap_aes_write(dd, offset, *value);
}
-static int omap_aes_wait(struct omap_aes_dev *dd, u32 offset, u32 bit)
-{
- unsigned long timeout = jiffies + DEFAULT_TIMEOUT;
-
- while (!(omap_aes_read(dd, offset) & bit)) {
- if (time_is_before_jiffies(timeout)) {
- dev_err(dd->dev, "omap-aes timeout\n");
- return -ETIMEDOUT;
- }
- }
- return 0;
-}
-
static int omap_aes_hw_init(struct omap_aes_dev *dd)
{
/*
@@ -180,23 +208,9 @@ static int omap_aes_hw_init(struct omap_aes_dev *dd)
* It may be long delays between requests.
* Device might go to off mode to save power.
*/
- clk_enable(dd->iclk);
+ pm_runtime_get_sync(dd->dev);
if (!(dd->flags & FLAGS_INIT)) {
- /* is it necessary to reset before every operation? */
- omap_aes_write_mask(dd, AES_REG_MASK, AES_REG_MASK_SOFTRESET,
- AES_REG_MASK_SOFTRESET);
- /*
- * prevent OCP bus error (SRESP) in case an access to the module
- * is performed while the module is coming out of soft reset
- */
- __asm__ __volatile__("nop");
- __asm__ __volatile__("nop");
-
- if (omap_aes_wait(dd, AES_REG_SYSSTATUS,
- AES_REG_SYSSTATUS_RESETDONE))
- return -ETIMEDOUT;
-
dd->flags |= FLAGS_INIT;
dd->err = 0;
}
@@ -208,59 +222,75 @@ static int omap_aes_write_ctrl(struct omap_aes_dev *dd)
{
unsigned int key32;
int i, err;
- u32 val, mask;
+ u32 val, mask = 0;
err = omap_aes_hw_init(dd);
if (err)
return err;
- val = 0;
- if (dd->dma_lch_out >= 0)
- val |= AES_REG_MASK_DMA_OUT_EN;
- if (dd->dma_lch_in >= 0)
- val |= AES_REG_MASK_DMA_IN_EN;
-
- mask = AES_REG_MASK_DMA_IN_EN | AES_REG_MASK_DMA_OUT_EN;
-
- omap_aes_write_mask(dd, AES_REG_MASK, val, mask);
-
key32 = dd->ctx->keylen / sizeof(u32);
/* it seems a key should always be set even if it has not changed */
for (i = 0; i < key32; i++) {
- omap_aes_write(dd, AES_REG_KEY(i),
+ omap_aes_write(dd, AES_REG_KEY(dd, i),
__le32_to_cpu(dd->ctx->key[i]));
}
- if ((dd->flags & FLAGS_CBC) && dd->req->info)
- omap_aes_write_n(dd, AES_REG_IV(0), dd->req->info, 4);
+ if ((dd->flags & (FLAGS_CBC | FLAGS_CTR)) && dd->req->info)
+ omap_aes_write_n(dd, AES_REG_IV(dd, 0), dd->req->info, 4);
val = FLD_VAL(((dd->ctx->keylen >> 3) - 1), 4, 3);
if (dd->flags & FLAGS_CBC)
val |= AES_REG_CTRL_CBC;
+ if (dd->flags & FLAGS_CTR) {
+ val |= AES_REG_CTRL_CTR | AES_REG_CTRL_CTR_WIDTH_32;
+ mask = AES_REG_CTRL_CTR | AES_REG_CTRL_CTR_WIDTH_MASK;
+ }
if (dd->flags & FLAGS_ENCRYPT)
val |= AES_REG_CTRL_DIRECTION;
- mask = AES_REG_CTRL_CBC | AES_REG_CTRL_DIRECTION |
+ mask |= AES_REG_CTRL_CBC | AES_REG_CTRL_DIRECTION |
AES_REG_CTRL_KEY_SIZE;
- omap_aes_write_mask(dd, AES_REG_CTRL, val, mask);
+ omap_aes_write_mask(dd, AES_REG_CTRL(dd), val, mask);
- /* IN */
- omap_set_dma_dest_params(dd->dma_lch_in, 0, OMAP_DMA_AMODE_CONSTANT,
- dd->phys_base + AES_REG_DATA, 0, 4);
+ return 0;
+}
- omap_set_dma_dest_burst_mode(dd->dma_lch_in, OMAP_DMA_DATA_BURST_4);
- omap_set_dma_src_burst_mode(dd->dma_lch_in, OMAP_DMA_DATA_BURST_4);
+static void omap_aes_dma_trigger_omap2(struct omap_aes_dev *dd, int length)
+{
+ u32 mask, val;
- /* OUT */
- omap_set_dma_src_params(dd->dma_lch_out, 0, OMAP_DMA_AMODE_CONSTANT,
- dd->phys_base + AES_REG_DATA, 0, 4);
+ val = dd->pdata->dma_start;
- omap_set_dma_src_burst_mode(dd->dma_lch_out, OMAP_DMA_DATA_BURST_4);
- omap_set_dma_dest_burst_mode(dd->dma_lch_out, OMAP_DMA_DATA_BURST_4);
+ if (dd->dma_lch_out != NULL)
+ val |= dd->pdata->dma_enable_out;
+ if (dd->dma_lch_in != NULL)
+ val |= dd->pdata->dma_enable_in;
+
+ mask = dd->pdata->dma_enable_out | dd->pdata->dma_enable_in |
+ dd->pdata->dma_start;
+
+ omap_aes_write_mask(dd, AES_REG_MASK(dd), val, mask);
- return 0;
+}
+
+static void omap_aes_dma_trigger_omap4(struct omap_aes_dev *dd, int length)
+{
+ omap_aes_write(dd, AES_REG_LENGTH_N(0), length);
+ omap_aes_write(dd, AES_REG_LENGTH_N(1), 0);
+
+ omap_aes_dma_trigger_omap2(dd, length);
+}
+
+static void omap_aes_dma_stop(struct omap_aes_dev *dd)
+{
+ u32 mask;
+
+ mask = dd->pdata->dma_enable_out | dd->pdata->dma_enable_in |
+ dd->pdata->dma_start;
+
+ omap_aes_write_mask(dd, AES_REG_MASK(dd), 0, mask);
}
static struct omap_aes_dev *omap_aes_find_dev(struct omap_aes_ctx *ctx)
@@ -284,18 +314,10 @@ static struct omap_aes_dev *omap_aes_find_dev(struct omap_aes_ctx *ctx)
return dd;
}
-static void omap_aes_dma_callback(int lch, u16 ch_status, void *data)
+static void omap_aes_dma_out_callback(void *data)
{
struct omap_aes_dev *dd = data;
- if (ch_status != OMAP_DMA_BLOCK_IRQ) {
- pr_err("omap-aes DMA error status: 0x%hx\n", ch_status);
- dd->err = -EIO;
- dd->flags &= ~FLAGS_INIT; /* request to re-initialize */
- } else if (lch == dd->dma_lch_in) {
- return;
- }
-
/* dma_lch_out - completed */
tasklet_schedule(&dd->done_task);
}
@@ -303,9 +325,10 @@ static void omap_aes_dma_callback(int lch, u16 ch_status, void *data)
static int omap_aes_dma_init(struct omap_aes_dev *dd)
{
int err = -ENOMEM;
+ dma_cap_mask_t mask;
- dd->dma_lch_out = -1;
- dd->dma_lch_in = -1;
+ dd->dma_lch_out = NULL;
+ dd->dma_lch_in = NULL;
dd->buf_in = (void *)__get_free_pages(GFP_KERNEL, OMAP_AES_CACHE_SIZE);
dd->buf_out = (void *)__get_free_pages(GFP_KERNEL, OMAP_AES_CACHE_SIZE);
@@ -334,23 +357,31 @@ static int omap_aes_dma_init(struct omap_aes_dev *dd)
goto err_map_out;
}
- err = omap_request_dma(dd->dma_in, "omap-aes-rx",
- omap_aes_dma_callback, dd, &dd->dma_lch_in);
- if (err) {
- dev_err(dd->dev, "Unable to request DMA channel\n");
+ dma_cap_zero(mask);
+ dma_cap_set(DMA_SLAVE, mask);
+
+ dd->dma_lch_in = dma_request_slave_channel_compat(mask,
+ omap_dma_filter_fn,
+ &dd->dma_in,
+ dd->dev, "rx");
+ if (!dd->dma_lch_in) {
+ dev_err(dd->dev, "Unable to request in DMA channel\n");
goto err_dma_in;
}
- err = omap_request_dma(dd->dma_out, "omap-aes-tx",
- omap_aes_dma_callback, dd, &dd->dma_lch_out);
- if (err) {
- dev_err(dd->dev, "Unable to request DMA channel\n");
+
+ dd->dma_lch_out = dma_request_slave_channel_compat(mask,
+ omap_dma_filter_fn,
+ &dd->dma_out,
+ dd->dev, "tx");
+ if (!dd->dma_lch_out) {
+ dev_err(dd->dev, "Unable to request out DMA channel\n");
goto err_dma_out;
}
return 0;
err_dma_out:
- omap_free_dma(dd->dma_lch_in);
+ dma_release_channel(dd->dma_lch_in);
err_dma_in:
dma_unmap_single(dd->dev, dd->dma_addr_out, dd->buflen,
DMA_FROM_DEVICE);
@@ -367,8 +398,8 @@ err_alloc:
static void omap_aes_dma_cleanup(struct omap_aes_dev *dd)
{
- omap_free_dma(dd->dma_lch_out);
- omap_free_dma(dd->dma_lch_in);
+ dma_release_channel(dd->dma_lch_out);
+ dma_release_channel(dd->dma_lch_in);
dma_unmap_single(dd->dev, dd->dma_addr_out, dd->buflen,
DMA_FROM_DEVICE);
dma_unmap_single(dd->dev, dd->dma_addr_in, dd->buflen, DMA_TO_DEVICE);
@@ -426,12 +457,15 @@ static int sg_copy(struct scatterlist **sg, size_t *offset, void *buf,
return off;
}
-static int omap_aes_crypt_dma(struct crypto_tfm *tfm, dma_addr_t dma_addr_in,
- dma_addr_t dma_addr_out, int length)
+static int omap_aes_crypt_dma(struct crypto_tfm *tfm,
+ struct scatterlist *in_sg, struct scatterlist *out_sg)
{
struct omap_aes_ctx *ctx = crypto_tfm_ctx(tfm);
struct omap_aes_dev *dd = ctx->dd;
- int len32;
+ struct dma_async_tx_descriptor *tx_in, *tx_out;
+ struct dma_slave_config cfg;
+ dma_addr_t dma_addr_in = sg_dma_address(in_sg);
+ int ret, length = sg_dma_len(in_sg);
pr_debug("len: %d\n", length);
@@ -441,30 +475,61 @@ static int omap_aes_crypt_dma(struct crypto_tfm *tfm, dma_addr_t dma_addr_in,
dma_sync_single_for_device(dd->dev, dma_addr_in, length,
DMA_TO_DEVICE);
- len32 = DIV_ROUND_UP(length, sizeof(u32));
+ memset(&cfg, 0, sizeof(cfg));
+
+ cfg.src_addr = dd->phys_base + AES_REG_DATA_N(dd, 0);
+ cfg.dst_addr = dd->phys_base + AES_REG_DATA_N(dd, 0);
+ cfg.src_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
+ cfg.dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
+ cfg.src_maxburst = DST_MAXBURST;
+ cfg.dst_maxburst = DST_MAXBURST;
/* IN */
- omap_set_dma_transfer_params(dd->dma_lch_in, OMAP_DMA_DATA_TYPE_S32,
- len32, 1, OMAP_DMA_SYNC_PACKET, dd->dma_in,
- OMAP_DMA_DST_SYNC);
+ ret = dmaengine_slave_config(dd->dma_lch_in, &cfg);
+ if (ret) {
+ dev_err(dd->dev, "can't configure IN dmaengine slave: %d\n",
+ ret);
+ return ret;
+ }
+
+ tx_in = dmaengine_prep_slave_sg(dd->dma_lch_in, in_sg, 1,
+ DMA_MEM_TO_DEV,
+ DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
+ if (!tx_in) {
+ dev_err(dd->dev, "IN prep_slave_sg() failed\n");
+ return -EINVAL;
+ }
- omap_set_dma_src_params(dd->dma_lch_in, 0, OMAP_DMA_AMODE_POST_INC,
- dma_addr_in, 0, 0);
+ /* No callback necessary */
+ tx_in->callback_param = dd;
/* OUT */
- omap_set_dma_transfer_params(dd->dma_lch_out, OMAP_DMA_DATA_TYPE_S32,
- len32, 1, OMAP_DMA_SYNC_PACKET,
- dd->dma_out, OMAP_DMA_SRC_SYNC);
+ ret = dmaengine_slave_config(dd->dma_lch_out, &cfg);
+ if (ret) {
+ dev_err(dd->dev, "can't configure OUT dmaengine slave: %d\n",
+ ret);
+ return ret;
+ }
+
+ tx_out = dmaengine_prep_slave_sg(dd->dma_lch_out, out_sg, 1,
+ DMA_DEV_TO_MEM,
+ DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
+ if (!tx_out) {
+ dev_err(dd->dev, "OUT prep_slave_sg() failed\n");
+ return -EINVAL;
+ }
- omap_set_dma_dest_params(dd->dma_lch_out, 0, OMAP_DMA_AMODE_POST_INC,
- dma_addr_out, 0, 0);
+ tx_out->callback = omap_aes_dma_out_callback;
+ tx_out->callback_param = dd;
- omap_start_dma(dd->dma_lch_in);
- omap_start_dma(dd->dma_lch_out);
+ dmaengine_submit(tx_in);
+ dmaengine_submit(tx_out);
- /* start DMA or disable idle mode */
- omap_aes_write_mask(dd, AES_REG_MASK, AES_REG_MASK_START,
- AES_REG_MASK_START);
+ dma_async_issue_pending(dd->dma_lch_in);
+ dma_async_issue_pending(dd->dma_lch_out);
+
+ /* start DMA */
+ dd->pdata->trigger(dd, length);
return 0;
}
@@ -476,6 +541,8 @@ static int omap_aes_crypt_dma_start(struct omap_aes_dev *dd)
int err, fast = 0, in, out;
size_t count;
dma_addr_t addr_in, addr_out;
+ struct scatterlist *in_sg, *out_sg;
+ int len32;
pr_debug("total: %d\n", dd->total);
@@ -514,6 +581,9 @@ static int omap_aes_crypt_dma_start(struct omap_aes_dev *dd)
addr_in = sg_dma_address(dd->in_sg);
addr_out = sg_dma_address(dd->out_sg);
+ in_sg = dd->in_sg;
+ out_sg = dd->out_sg;
+
dd->flags |= FLAGS_FAST;
} else {
@@ -521,6 +591,27 @@ static int omap_aes_crypt_dma_start(struct omap_aes_dev *dd)
count = sg_copy(&dd->in_sg, &dd->in_offset, dd->buf_in,
dd->buflen, dd->total, 0);
+ len32 = DIV_ROUND_UP(count, DMA_MIN) * DMA_MIN;
+
+ /*
+ * The data going into the AES module has been copied
+ * to a local buffer and the data coming out will go
+ * into a local buffer so set up local SG entries for
+ * both.
+ */
+ sg_init_table(&dd->in_sgl, 1);
+ dd->in_sgl.offset = dd->in_offset;
+ sg_dma_len(&dd->in_sgl) = len32;
+ sg_dma_address(&dd->in_sgl) = dd->dma_addr_in;
+
+ sg_init_table(&dd->out_sgl, 1);
+ dd->out_sgl.offset = dd->out_offset;
+ sg_dma_len(&dd->out_sgl) = len32;
+ sg_dma_address(&dd->out_sgl) = dd->dma_addr_out;
+
+ in_sg = &dd->in_sgl;
+ out_sg = &dd->out_sgl;
+
addr_in = dd->dma_addr_in;
addr_out = dd->dma_addr_out;
@@ -530,7 +621,7 @@ static int omap_aes_crypt_dma_start(struct omap_aes_dev *dd)
dd->total -= count;
- err = omap_aes_crypt_dma(tfm, addr_in, addr_out, count);
+ err = omap_aes_crypt_dma(tfm, in_sg, out_sg);
if (err) {
dma_unmap_sg(dd->dev, dd->in_sg, 1, DMA_TO_DEVICE);
dma_unmap_sg(dd->dev, dd->out_sg, 1, DMA_TO_DEVICE);
@@ -545,7 +636,7 @@ static void omap_aes_finish_req(struct omap_aes_dev *dd, int err)
pr_debug("err: %d\n", err);
- clk_disable(dd->iclk);
+ pm_runtime_put_sync(dd->dev);
dd->flags &= ~FLAGS_BUSY;
req->base.complete(&req->base, err);
@@ -558,10 +649,10 @@ static int omap_aes_crypt_dma_stop(struct omap_aes_dev *dd)
pr_debug("total: %d\n", dd->total);
- omap_aes_write_mask(dd, AES_REG_MASK, 0, AES_REG_MASK_START);
+ omap_aes_dma_stop(dd);
- omap_stop_dma(dd->dma_lch_in);
- omap_stop_dma(dd->dma_lch_out);
+ dmaengine_terminate_all(dd->dma_lch_in);
+ dmaengine_terminate_all(dd->dma_lch_out);
if (dd->flags & FLAGS_FAST) {
dma_unmap_sg(dd->dev, dd->out_sg, 1, DMA_FROM_DEVICE);
@@ -734,6 +825,16 @@ static int omap_aes_cbc_decrypt(struct ablkcipher_request *req)
return omap_aes_crypt(req, FLAGS_CBC);
}
+static int omap_aes_ctr_encrypt(struct ablkcipher_request *req)
+{
+ return omap_aes_crypt(req, FLAGS_ENCRYPT | FLAGS_CTR);
+}
+
+static int omap_aes_ctr_decrypt(struct ablkcipher_request *req)
+{
+ return omap_aes_crypt(req, FLAGS_CTR);
+}
+
static int omap_aes_cra_init(struct crypto_tfm *tfm)
{
pr_debug("enter\n");
@@ -750,7 +851,7 @@ static void omap_aes_cra_exit(struct crypto_tfm *tfm)
/* ********************** ALGS ************************************ */
-static struct crypto_alg algs[] = {
+static struct crypto_alg algs_ecb_cbc[] = {
{
.cra_name = "ecb(aes)",
.cra_driver_name = "ecb-aes-omap",
@@ -798,11 +899,213 @@ static struct crypto_alg algs[] = {
}
};
+static struct crypto_alg algs_ctr[] = {
+{
+ .cra_name = "ctr(aes)",
+ .cra_driver_name = "ctr-aes-omap",
+ .cra_priority = 100,
+ .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER |
+ CRYPTO_ALG_KERN_DRIVER_ONLY |
+ CRYPTO_ALG_ASYNC,
+ .cra_blocksize = AES_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct omap_aes_ctx),
+ .cra_alignmask = 0,
+ .cra_type = &crypto_ablkcipher_type,
+ .cra_module = THIS_MODULE,
+ .cra_init = omap_aes_cra_init,
+ .cra_exit = omap_aes_cra_exit,
+ .cra_u.ablkcipher = {
+ .min_keysize = AES_MIN_KEY_SIZE,
+ .max_keysize = AES_MAX_KEY_SIZE,
+ .geniv = "eseqiv",
+ .ivsize = AES_BLOCK_SIZE,
+ .setkey = omap_aes_setkey,
+ .encrypt = omap_aes_ctr_encrypt,
+ .decrypt = omap_aes_ctr_decrypt,
+ }
+} ,
+};
+
+static struct omap_aes_algs_info omap_aes_algs_info_ecb_cbc[] = {
+ {
+ .algs_list = algs_ecb_cbc,
+ .size = ARRAY_SIZE(algs_ecb_cbc),
+ },
+};
+
+static const struct omap_aes_pdata omap_aes_pdata_omap2 = {
+ .algs_info = omap_aes_algs_info_ecb_cbc,
+ .algs_info_size = ARRAY_SIZE(omap_aes_algs_info_ecb_cbc),
+ .trigger = omap_aes_dma_trigger_omap2,
+ .key_ofs = 0x1c,
+ .iv_ofs = 0x20,
+ .ctrl_ofs = 0x30,
+ .data_ofs = 0x34,
+ .rev_ofs = 0x44,
+ .mask_ofs = 0x48,
+ .dma_enable_in = BIT(2),
+ .dma_enable_out = BIT(3),
+ .dma_start = BIT(5),
+ .major_mask = 0xf0,
+ .major_shift = 4,
+ .minor_mask = 0x0f,
+ .minor_shift = 0,
+};
+
+#ifdef CONFIG_OF
+static struct omap_aes_algs_info omap_aes_algs_info_ecb_cbc_ctr[] = {
+ {
+ .algs_list = algs_ecb_cbc,
+ .size = ARRAY_SIZE(algs_ecb_cbc),
+ },
+ {
+ .algs_list = algs_ctr,
+ .size = ARRAY_SIZE(algs_ctr),
+ },
+};
+
+static const struct omap_aes_pdata omap_aes_pdata_omap3 = {
+ .algs_info = omap_aes_algs_info_ecb_cbc_ctr,
+ .algs_info_size = ARRAY_SIZE(omap_aes_algs_info_ecb_cbc_ctr),
+ .trigger = omap_aes_dma_trigger_omap2,
+ .key_ofs = 0x1c,
+ .iv_ofs = 0x20,
+ .ctrl_ofs = 0x30,
+ .data_ofs = 0x34,
+ .rev_ofs = 0x44,
+ .mask_ofs = 0x48,
+ .dma_enable_in = BIT(2),
+ .dma_enable_out = BIT(3),
+ .dma_start = BIT(5),
+ .major_mask = 0xf0,
+ .major_shift = 4,
+ .minor_mask = 0x0f,
+ .minor_shift = 0,
+};
+
+static const struct omap_aes_pdata omap_aes_pdata_omap4 = {
+ .algs_info = omap_aes_algs_info_ecb_cbc_ctr,
+ .algs_info_size = ARRAY_SIZE(omap_aes_algs_info_ecb_cbc_ctr),
+ .trigger = omap_aes_dma_trigger_omap4,
+ .key_ofs = 0x3c,
+ .iv_ofs = 0x40,
+ .ctrl_ofs = 0x50,
+ .data_ofs = 0x60,
+ .rev_ofs = 0x80,
+ .mask_ofs = 0x84,
+ .dma_enable_in = BIT(5),
+ .dma_enable_out = BIT(6),
+ .major_mask = 0x0700,
+ .major_shift = 8,
+ .minor_mask = 0x003f,
+ .minor_shift = 0,
+};
+
+static const struct of_device_id omap_aes_of_match[] = {
+ {
+ .compatible = "ti,omap2-aes",
+ .data = &omap_aes_pdata_omap2,
+ },
+ {
+ .compatible = "ti,omap3-aes",
+ .data = &omap_aes_pdata_omap3,
+ },
+ {
+ .compatible = "ti,omap4-aes",
+ .data = &omap_aes_pdata_omap4,
+ },
+ {},
+};
+MODULE_DEVICE_TABLE(of, omap_aes_of_match);
+
+static int omap_aes_get_res_of(struct omap_aes_dev *dd,
+ struct device *dev, struct resource *res)
+{
+ struct device_node *node = dev->of_node;
+ const struct of_device_id *match;
+ int err = 0;
+
+ match = of_match_device(of_match_ptr(omap_aes_of_match), dev);
+ if (!match) {
+ dev_err(dev, "no compatible OF match\n");
+ err = -EINVAL;
+ goto err;
+ }
+
+ err = of_address_to_resource(node, 0, res);
+ if (err < 0) {
+ dev_err(dev, "can't translate OF node address\n");
+ err = -EINVAL;
+ goto err;
+ }
+
+ dd->dma_out = -1; /* Dummy value that's unused */
+ dd->dma_in = -1; /* Dummy value that's unused */
+
+ dd->pdata = match->data;
+
+err:
+ return err;
+}
+#else
+static const struct of_device_id omap_aes_of_match[] = {
+ {},
+};
+
+static int omap_aes_get_res_of(struct omap_aes_dev *dd,
+ struct device *dev, struct resource *res)
+{
+ return -EINVAL;
+}
+#endif
+
+static int omap_aes_get_res_pdev(struct omap_aes_dev *dd,
+ struct platform_device *pdev, struct resource *res)
+{
+ struct device *dev = &pdev->dev;
+ struct resource *r;
+ int err = 0;
+
+ /* Get the base address */
+ r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!r) {
+ dev_err(dev, "no MEM resource info\n");
+ err = -ENODEV;
+ goto err;
+ }
+ memcpy(res, r, sizeof(*res));
+
+ /* Get the DMA out channel */
+ r = platform_get_resource(pdev, IORESOURCE_DMA, 0);
+ if (!r) {
+ dev_err(dev, "no DMA out resource info\n");
+ err = -ENODEV;
+ goto err;
+ }
+ dd->dma_out = r->start;
+
+ /* Get the DMA in channel */
+ r = platform_get_resource(pdev, IORESOURCE_DMA, 1);
+ if (!r) {
+ dev_err(dev, "no DMA in resource info\n");
+ err = -ENODEV;
+ goto err;
+ }
+ dd->dma_in = r->start;
+
+ /* Only OMAP2/3 can be non-DT */
+ dd->pdata = &omap_aes_pdata_omap2;
+
+err:
+ return err;
+}
+
static int omap_aes_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct omap_aes_dev *dd;
- struct resource *res;
+ struct crypto_alg *algp;
+ struct resource res;
int err = -ENOMEM, i, j;
u32 reg;
@@ -817,49 +1120,31 @@ static int omap_aes_probe(struct platform_device *pdev)
spin_lock_init(&dd->lock);
crypto_init_queue(&dd->queue, OMAP_AES_QUEUE_LENGTH);
- /* Get the base address */
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- if (!res) {
- dev_err(dev, "invalid resource type\n");
- err = -ENODEV;
- goto err_res;
- }
- dd->phys_base = res->start;
-
- /* Get the DMA */
- res = platform_get_resource(pdev, IORESOURCE_DMA, 0);
- if (!res)
- dev_info(dev, "no DMA info\n");
- else
- dd->dma_out = res->start;
-
- /* Get the DMA */
- res = platform_get_resource(pdev, IORESOURCE_DMA, 1);
- if (!res)
- dev_info(dev, "no DMA info\n");
- else
- dd->dma_in = res->start;
-
- /* Initializing the clock */
- dd->iclk = clk_get(dev, "ick");
- if (IS_ERR(dd->iclk)) {
- dev_err(dev, "clock intialization failed.\n");
- err = PTR_ERR(dd->iclk);
+ err = (dev->of_node) ? omap_aes_get_res_of(dd, dev, &res) :
+ omap_aes_get_res_pdev(dd, pdev, &res);
+ if (err)
goto err_res;
- }
- dd->io_base = ioremap(dd->phys_base, SZ_4K);
+ dd->io_base = devm_request_and_ioremap(dev, &res);
if (!dd->io_base) {
dev_err(dev, "can't ioremap\n");
err = -ENOMEM;
- goto err_io;
+ goto err_res;
}
+ dd->phys_base = res.start;
+
+ pm_runtime_enable(dev);
+ pm_runtime_get_sync(dev);
+
+ omap_aes_dma_stop(dd);
+
+ reg = omap_aes_read(dd, AES_REG_REV(dd));
+
+ pm_runtime_put_sync(dev);
- clk_enable(dd->iclk);
- reg = omap_aes_read(dd, AES_REG_REV);
dev_info(dev, "OMAP AES hw accel rev: %u.%u\n",
- (reg & AES_REG_REV_MAJOR) >> 4, reg & AES_REG_REV_MINOR);
- clk_disable(dd->iclk);
+ (reg & dd->pdata->major_mask) >> dd->pdata->major_shift,
+ (reg & dd->pdata->minor_mask) >> dd->pdata->minor_shift);
tasklet_init(&dd->done_task, omap_aes_done_task, (unsigned long)dd);
tasklet_init(&dd->queue_task, omap_aes_queue_task, (unsigned long)dd);
@@ -873,26 +1158,32 @@ static int omap_aes_probe(struct platform_device *pdev)
list_add_tail(&dd->list, &dev_list);
spin_unlock(&list_lock);
- for (i = 0; i < ARRAY_SIZE(algs); i++) {
- pr_debug("i: %d\n", i);
- err = crypto_register_alg(&algs[i]);
- if (err)
- goto err_algs;
- }
+ for (i = 0; i < dd->pdata->algs_info_size; i++) {
+ for (j = 0; j < dd->pdata->algs_info[i].size; j++) {
+ algp = &dd->pdata->algs_info[i].algs_list[j];
+
+ pr_debug("reg alg: %s\n", algp->cra_name);
+ INIT_LIST_HEAD(&algp->cra_list);
+
+ err = crypto_register_alg(algp);
+ if (err)
+ goto err_algs;
- pr_info("probe() done\n");
+ dd->pdata->algs_info[i].registered++;
+ }
+ }
return 0;
err_algs:
- for (j = 0; j < i; j++)
- crypto_unregister_alg(&algs[j]);
+ for (i = dd->pdata->algs_info_size - 1; i >= 0; i--)
+ for (j = dd->pdata->algs_info[i].registered - 1; j >= 0; j--)
+ crypto_unregister_alg(
+ &dd->pdata->algs_info[i].algs_list[j]);
omap_aes_dma_cleanup(dd);
err_dma:
tasklet_kill(&dd->done_task);
tasklet_kill(&dd->queue_task);
- iounmap(dd->io_base);
-err_io:
- clk_put(dd->iclk);
+ pm_runtime_disable(dev);
err_res:
kfree(dd);
dd = NULL;
@@ -904,7 +1195,7 @@ err_data:
static int omap_aes_remove(struct platform_device *pdev)
{
struct omap_aes_dev *dd = platform_get_drvdata(pdev);
- int i;
+ int i, j;
if (!dd)
return -ENODEV;
@@ -913,33 +1204,52 @@ static int omap_aes_remove(struct platform_device *pdev)
list_del(&dd->list);
spin_unlock(&list_lock);
- for (i = 0; i < ARRAY_SIZE(algs); i++)
- crypto_unregister_alg(&algs[i]);
+ for (i = dd->pdata->algs_info_size - 1; i >= 0; i--)
+ for (j = dd->pdata->algs_info[i].registered - 1; j >= 0; j--)
+ crypto_unregister_alg(
+ &dd->pdata->algs_info[i].algs_list[j]);
tasklet_kill(&dd->done_task);
tasklet_kill(&dd->queue_task);
omap_aes_dma_cleanup(dd);
- iounmap(dd->io_base);
- clk_put(dd->iclk);
+ pm_runtime_disable(dd->dev);
kfree(dd);
dd = NULL;
return 0;
}
+#ifdef CONFIG_PM_SLEEP
+static int omap_aes_suspend(struct device *dev)
+{
+ pm_runtime_put_sync(dev);
+ return 0;
+}
+
+static int omap_aes_resume(struct device *dev)
+{
+ pm_runtime_get_sync(dev);
+ return 0;
+}
+#endif
+
+static const struct dev_pm_ops omap_aes_pm_ops = {
+ SET_SYSTEM_SLEEP_PM_OPS(omap_aes_suspend, omap_aes_resume)
+};
+
static struct platform_driver omap_aes_driver = {
.probe = omap_aes_probe,
.remove = omap_aes_remove,
.driver = {
.name = "omap-aes",
.owner = THIS_MODULE,
+ .pm = &omap_aes_pm_ops,
+ .of_match_table = omap_aes_of_match,
},
};
static int __init omap_aes_mod_init(void)
{
- pr_info("loading %s driver\n", "omap-aes");
-
return platform_driver_register(&omap_aes_driver);
}
diff --git a/drivers/crypto/omap-sham.c b/drivers/crypto/omap-sham.c
index 90d34adc2a66..3d1611f5aecf 100644
--- a/drivers/crypto/omap-sham.c
+++ b/drivers/crypto/omap-sham.c
@@ -5,6 +5,7 @@
*
* Copyright (c) 2010 Nokia Corporation
* Author: Dmitry Kasatkin <dmitry.kasatkin@nokia.com>
+ * Copyright (c) 2011 Texas Instruments Incorporated
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as published
@@ -22,12 +23,18 @@
#include <linux/errno.h>
#include <linux/interrupt.h>
#include <linux/kernel.h>
-#include <linux/clk.h>
#include <linux/irq.h>
#include <linux/io.h>
#include <linux/platform_device.h>
#include <linux/scatterlist.h>
#include <linux/dma-mapping.h>
+#include <linux/dmaengine.h>
+#include <linux/omap-dma.h>
+#include <linux/pm_runtime.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/of_address.h>
+#include <linux/of_irq.h>
#include <linux/delay.h>
#include <linux/crypto.h>
#include <linux/cryptohash.h>
@@ -37,16 +44,17 @@
#include <crypto/hash.h>
#include <crypto/internal/hash.h>
-#include <linux/omap-dma.h>
-#include <mach/irqs.h>
-
-#define SHA_REG_DIGEST(x) (0x00 + ((x) * 0x04))
-#define SHA_REG_DIN(x) (0x1C + ((x) * 0x04))
-
#define SHA1_MD5_BLOCK_SIZE SHA1_BLOCK_SIZE
#define MD5_DIGEST_SIZE 16
-#define SHA_REG_DIGCNT 0x14
+#define DST_MAXBURST 16
+#define DMA_MIN (DST_MAXBURST * sizeof(u32))
+
+#define SHA_REG_IDIGEST(dd, x) ((dd)->pdata->idigest_ofs + ((x)*0x04))
+#define SHA_REG_DIN(dd, x) ((dd)->pdata->din_ofs + ((x) * 0x04))
+#define SHA_REG_DIGCNT(dd) ((dd)->pdata->digcnt_ofs)
+
+#define SHA_REG_ODIGEST(x) (0x00 + ((x) * 0x04))
#define SHA_REG_CTRL 0x18
#define SHA_REG_CTRL_LENGTH (0xFFFFFFFF << 5)
@@ -56,19 +64,42 @@
#define SHA_REG_CTRL_INPUT_READY (1 << 1)
#define SHA_REG_CTRL_OUTPUT_READY (1 << 0)
-#define SHA_REG_REV 0x5C
-#define SHA_REG_REV_MAJOR 0xF0
-#define SHA_REG_REV_MINOR 0x0F
+#define SHA_REG_REV(dd) ((dd)->pdata->rev_ofs)
-#define SHA_REG_MASK 0x60
+#define SHA_REG_MASK(dd) ((dd)->pdata->mask_ofs)
#define SHA_REG_MASK_DMA_EN (1 << 3)
#define SHA_REG_MASK_IT_EN (1 << 2)
#define SHA_REG_MASK_SOFTRESET (1 << 1)
#define SHA_REG_AUTOIDLE (1 << 0)
-#define SHA_REG_SYSSTATUS 0x64
+#define SHA_REG_SYSSTATUS(dd) ((dd)->pdata->sysstatus_ofs)
#define SHA_REG_SYSSTATUS_RESETDONE (1 << 0)
+#define SHA_REG_MODE 0x44
+#define SHA_REG_MODE_HMAC_OUTER_HASH (1 << 7)
+#define SHA_REG_MODE_HMAC_KEY_PROC (1 << 5)
+#define SHA_REG_MODE_CLOSE_HASH (1 << 4)
+#define SHA_REG_MODE_ALGO_CONSTANT (1 << 3)
+#define SHA_REG_MODE_ALGO_MASK (3 << 1)
+#define SHA_REG_MODE_ALGO_MD5_128 (0 << 1)
+#define SHA_REG_MODE_ALGO_SHA1_160 (1 << 1)
+#define SHA_REG_MODE_ALGO_SHA2_224 (2 << 1)
+#define SHA_REG_MODE_ALGO_SHA2_256 (3 << 1)
+
+#define SHA_REG_LENGTH 0x48
+
+#define SHA_REG_IRQSTATUS 0x118
+#define SHA_REG_IRQSTATUS_CTX_RDY (1 << 3)
+#define SHA_REG_IRQSTATUS_PARTHASH_RDY (1 << 2)
+#define SHA_REG_IRQSTATUS_INPUT_RDY (1 << 1)
+#define SHA_REG_IRQSTATUS_OUTPUT_RDY (1 << 0)
+
+#define SHA_REG_IRQENA 0x11C
+#define SHA_REG_IRQENA_CTX_RDY (1 << 3)
+#define SHA_REG_IRQENA_PARTHASH_RDY (1 << 2)
+#define SHA_REG_IRQENA_INPUT_RDY (1 << 1)
+#define SHA_REG_IRQENA_OUTPUT_RDY (1 << 0)
+
#define DEFAULT_TIMEOUT_INTERVAL HZ
/* mostly device flags */
@@ -79,20 +110,33 @@
#define FLAGS_INIT 4
#define FLAGS_CPU 5
#define FLAGS_DMA_READY 6
+#define FLAGS_AUTO_XOR 7
+#define FLAGS_BE32_SHA1 8
/* context flags */
#define FLAGS_FINUP 16
#define FLAGS_SG 17
-#define FLAGS_SHA1 18
-#define FLAGS_HMAC 19
-#define FLAGS_ERROR 20
-#define OP_UPDATE 1
-#define OP_FINAL 2
+#define FLAGS_MODE_SHIFT 18
+#define FLAGS_MODE_MASK (SHA_REG_MODE_ALGO_MASK \
+ << (FLAGS_MODE_SHIFT - 1))
+#define FLAGS_MODE_MD5 (SHA_REG_MODE_ALGO_MD5_128 \
+ << (FLAGS_MODE_SHIFT - 1))
+#define FLAGS_MODE_SHA1 (SHA_REG_MODE_ALGO_SHA1_160 \
+ << (FLAGS_MODE_SHIFT - 1))
+#define FLAGS_MODE_SHA224 (SHA_REG_MODE_ALGO_SHA2_224 \
+ << (FLAGS_MODE_SHIFT - 1))
+#define FLAGS_MODE_SHA256 (SHA_REG_MODE_ALGO_SHA2_256 \
+ << (FLAGS_MODE_SHIFT - 1))
+#define FLAGS_HMAC 20
+#define FLAGS_ERROR 21
+
+#define OP_UPDATE 1
+#define OP_FINAL 2
#define OMAP_ALIGN_MASK (sizeof(u32)-1)
#define OMAP_ALIGNED __attribute__((aligned(sizeof(u32))))
-#define BUFLEN PAGE_SIZE
+#define BUFLEN PAGE_SIZE
struct omap_sham_dev;
@@ -101,7 +145,7 @@ struct omap_sham_reqctx {
unsigned long flags;
unsigned long op;
- u8 digest[SHA1_DIGEST_SIZE] OMAP_ALIGNED;
+ u8 digest[SHA256_DIGEST_SIZE] OMAP_ALIGNED;
size_t digcnt;
size_t bufcnt;
size_t buflen;
@@ -109,6 +153,7 @@ struct omap_sham_reqctx {
/* walk state */
struct scatterlist *sg;
+ struct scatterlist sgl;
unsigned int offset; /* offset in current sg */
unsigned int total; /* total request */
@@ -117,8 +162,8 @@ struct omap_sham_reqctx {
struct omap_sham_hmac_ctx {
struct crypto_shash *shash;
- u8 ipad[SHA1_MD5_BLOCK_SIZE];
- u8 opad[SHA1_MD5_BLOCK_SIZE];
+ u8 ipad[SHA1_MD5_BLOCK_SIZE] OMAP_ALIGNED;
+ u8 opad[SHA1_MD5_BLOCK_SIZE] OMAP_ALIGNED;
};
struct omap_sham_ctx {
@@ -134,22 +179,56 @@ struct omap_sham_ctx {
#define OMAP_SHAM_QUEUE_LENGTH 1
+struct omap_sham_algs_info {
+ struct ahash_alg *algs_list;
+ unsigned int size;
+ unsigned int registered;
+};
+
+struct omap_sham_pdata {
+ struct omap_sham_algs_info *algs_info;
+ unsigned int algs_info_size;
+ unsigned long flags;
+ int digest_size;
+
+ void (*copy_hash)(struct ahash_request *req, int out);
+ void (*write_ctrl)(struct omap_sham_dev *dd, size_t length,
+ int final, int dma);
+ void (*trigger)(struct omap_sham_dev *dd, size_t length);
+ int (*poll_irq)(struct omap_sham_dev *dd);
+ irqreturn_t (*intr_hdlr)(int irq, void *dev_id);
+
+ u32 odigest_ofs;
+ u32 idigest_ofs;
+ u32 din_ofs;
+ u32 digcnt_ofs;
+ u32 rev_ofs;
+ u32 mask_ofs;
+ u32 sysstatus_ofs;
+
+ u32 major_mask;
+ u32 major_shift;
+ u32 minor_mask;
+ u32 minor_shift;
+};
+
struct omap_sham_dev {
struct list_head list;
unsigned long phys_base;
struct device *dev;
void __iomem *io_base;
int irq;
- struct clk *iclk;
spinlock_t lock;
int err;
- int dma;
- int dma_lch;
+ unsigned int dma;
+ struct dma_chan *dma_lch;
struct tasklet_struct done_task;
unsigned long flags;
struct crypto_queue queue;
struct ahash_request *req;
+
+ const struct omap_sham_pdata *pdata;
};
struct omap_sham_drv {
@@ -197,21 +276,44 @@ static inline int omap_sham_wait(struct omap_sham_dev *dd, u32 offset, u32 bit)
return 0;
}
-static void omap_sham_copy_hash(struct ahash_request *req, int out)
+static void omap_sham_copy_hash_omap2(struct ahash_request *req, int out)
{
struct omap_sham_reqctx *ctx = ahash_request_ctx(req);
+ struct omap_sham_dev *dd = ctx->dd;
u32 *hash = (u32 *)ctx->digest;
int i;
- /* MD5 is almost unused. So copy sha1 size to reduce code */
- for (i = 0; i < SHA1_DIGEST_SIZE / sizeof(u32); i++) {
+ for (i = 0; i < dd->pdata->digest_size / sizeof(u32); i++) {
if (out)
- hash[i] = omap_sham_read(ctx->dd,
- SHA_REG_DIGEST(i));
+ hash[i] = omap_sham_read(dd, SHA_REG_IDIGEST(dd, i));
else
- omap_sham_write(ctx->dd,
- SHA_REG_DIGEST(i), hash[i]);
+ omap_sham_write(dd, SHA_REG_IDIGEST(dd, i), hash[i]);
+ }
+}
+
+static void omap_sham_copy_hash_omap4(struct ahash_request *req, int out)
+{
+ struct omap_sham_reqctx *ctx = ahash_request_ctx(req);
+ struct omap_sham_dev *dd = ctx->dd;
+ int i;
+
+ if (ctx->flags & BIT(FLAGS_HMAC)) {
+ struct crypto_ahash *tfm = crypto_ahash_reqtfm(dd->req);
+ struct omap_sham_ctx *tctx = crypto_ahash_ctx(tfm);
+ struct omap_sham_hmac_ctx *bctx = tctx->base;
+ u32 *opad = (u32 *)bctx->opad;
+
+ for (i = 0; i < dd->pdata->digest_size / sizeof(u32); i++) {
+ if (out)
+ opad[i] = omap_sham_read(dd,
+ SHA_REG_ODIGEST(i));
+ else
+ omap_sham_write(dd, SHA_REG_ODIGEST(i),
+ opad[i]);
+ }
}
+
+ omap_sham_copy_hash_omap2(req, out);
}
static void omap_sham_copy_ready_hash(struct ahash_request *req)
@@ -219,34 +321,44 @@ static void omap_sham_copy_ready_hash(struct ahash_request *req)
struct omap_sham_reqctx *ctx = ahash_request_ctx(req);
u32 *in = (u32 *)ctx->digest;
u32 *hash = (u32 *)req->result;
- int i;
+ int i, d, big_endian = 0;
if (!hash)
return;
- if (likely(ctx->flags & BIT(FLAGS_SHA1))) {
- /* SHA1 results are in big endian */
- for (i = 0; i < SHA1_DIGEST_SIZE / sizeof(u32); i++)
+ switch (ctx->flags & FLAGS_MODE_MASK) {
+ case FLAGS_MODE_MD5:
+ d = MD5_DIGEST_SIZE / sizeof(u32);
+ break;
+ case FLAGS_MODE_SHA1:
+ /* OMAP2 SHA1 is big endian */
+ if (test_bit(FLAGS_BE32_SHA1, &ctx->dd->flags))
+ big_endian = 1;
+ d = SHA1_DIGEST_SIZE / sizeof(u32);
+ break;
+ case FLAGS_MODE_SHA224:
+ d = SHA224_DIGEST_SIZE / sizeof(u32);
+ break;
+ case FLAGS_MODE_SHA256:
+ d = SHA256_DIGEST_SIZE / sizeof(u32);
+ break;
+ default:
+ d = 0;
+ }
+
+ if (big_endian)
+ for (i = 0; i < d; i++)
hash[i] = be32_to_cpu(in[i]);
- } else {
- /* MD5 results are in little endian */
- for (i = 0; i < MD5_DIGEST_SIZE / sizeof(u32); i++)
+ else
+ for (i = 0; i < d; i++)
hash[i] = le32_to_cpu(in[i]);
- }
}
static int omap_sham_hw_init(struct omap_sham_dev *dd)
{
- clk_enable(dd->iclk);
+ pm_runtime_get_sync(dd->dev);
if (!test_bit(FLAGS_INIT, &dd->flags)) {
- omap_sham_write_mask(dd, SHA_REG_MASK,
- SHA_REG_MASK_SOFTRESET, SHA_REG_MASK_SOFTRESET);
-
- if (omap_sham_wait(dd, SHA_REG_SYSSTATUS,
- SHA_REG_SYSSTATUS_RESETDONE))
- return -ETIMEDOUT;
-
set_bit(FLAGS_INIT, &dd->flags);
dd->err = 0;
}
@@ -254,23 +366,23 @@ static int omap_sham_hw_init(struct omap_sham_dev *dd)
return 0;
}
-static void omap_sham_write_ctrl(struct omap_sham_dev *dd, size_t length,
+static void omap_sham_write_ctrl_omap2(struct omap_sham_dev *dd, size_t length,
int final, int dma)
{
struct omap_sham_reqctx *ctx = ahash_request_ctx(dd->req);
u32 val = length << 5, mask;
if (likely(ctx->digcnt))
- omap_sham_write(dd, SHA_REG_DIGCNT, ctx->digcnt);
+ omap_sham_write(dd, SHA_REG_DIGCNT(dd), ctx->digcnt);
- omap_sham_write_mask(dd, SHA_REG_MASK,
+ omap_sham_write_mask(dd, SHA_REG_MASK(dd),
SHA_REG_MASK_IT_EN | (dma ? SHA_REG_MASK_DMA_EN : 0),
SHA_REG_MASK_IT_EN | SHA_REG_MASK_DMA_EN);
/*
* Setting ALGO_CONST only for the first iteration
* and CLOSE_HASH only for the last one.
*/
- if (ctx->flags & BIT(FLAGS_SHA1))
+ if ((ctx->flags & FLAGS_MODE_MASK) == FLAGS_MODE_SHA1)
val |= SHA_REG_CTRL_ALGO;
if (!ctx->digcnt)
val |= SHA_REG_CTRL_ALGO_CONST;
@@ -283,6 +395,81 @@ static void omap_sham_write_ctrl(struct omap_sham_dev *dd, size_t length,
omap_sham_write_mask(dd, SHA_REG_CTRL, val, mask);
}
+static void omap_sham_trigger_omap2(struct omap_sham_dev *dd, size_t length)
+{
+}
+
+static int omap_sham_poll_irq_omap2(struct omap_sham_dev *dd)
+{
+ return omap_sham_wait(dd, SHA_REG_CTRL, SHA_REG_CTRL_INPUT_READY);
+}
+
+static void omap_sham_write_n(struct omap_sham_dev *dd, u32 offset,
+ u32 *value, int count)
+{
+ for (; count--; value++, offset += 4)
+ omap_sham_write(dd, offset, *value);
+}
+
+static void omap_sham_write_ctrl_omap4(struct omap_sham_dev *dd, size_t length,
+ int final, int dma)
+{
+ struct omap_sham_reqctx *ctx = ahash_request_ctx(dd->req);
+ u32 val, mask;
+
+ /*
+ * Setting ALGO_CONST only for the first iteration and
+ * CLOSE_HASH only for the last one. Note that flags mode bits
+ * correspond to algorithm encoding in mode register.
+ */
+ val = (ctx->flags & FLAGS_MODE_MASK) >> (FLAGS_MODE_SHIFT - 1);
+ if (!ctx->digcnt) {
+ struct crypto_ahash *tfm = crypto_ahash_reqtfm(dd->req);
+ struct omap_sham_ctx *tctx = crypto_ahash_ctx(tfm);
+ struct omap_sham_hmac_ctx *bctx = tctx->base;
+
+ val |= SHA_REG_MODE_ALGO_CONSTANT;
+
+ if (ctx->flags & BIT(FLAGS_HMAC)) {
+ val |= SHA_REG_MODE_HMAC_KEY_PROC;
+ omap_sham_write_n(dd, SHA_REG_ODIGEST(0),
+ (u32 *)bctx->ipad,
+ SHA1_BLOCK_SIZE / sizeof(u32));
+ ctx->digcnt += SHA1_BLOCK_SIZE;
+ }
+ }
+
+ if (final) {
+ val |= SHA_REG_MODE_CLOSE_HASH;
+
+ if (ctx->flags & BIT(FLAGS_HMAC))
+ val |= SHA_REG_MODE_HMAC_OUTER_HASH;
+ }
+
+ mask = SHA_REG_MODE_ALGO_CONSTANT | SHA_REG_MODE_CLOSE_HASH |
+ SHA_REG_MODE_ALGO_MASK | SHA_REG_MODE_HMAC_OUTER_HASH |
+ SHA_REG_MODE_HMAC_KEY_PROC;
+
+ dev_dbg(dd->dev, "ctrl: %08x, flags: %08lx\n", val, ctx->flags);
+ omap_sham_write_mask(dd, SHA_REG_MODE, val, mask);
+ omap_sham_write(dd, SHA_REG_IRQENA, SHA_REG_IRQENA_OUTPUT_RDY);
+ omap_sham_write_mask(dd, SHA_REG_MASK(dd),
+ SHA_REG_MASK_IT_EN |
+ (dma ? SHA_REG_MASK_DMA_EN : 0),
+ SHA_REG_MASK_IT_EN | SHA_REG_MASK_DMA_EN);
+}
+
+static void omap_sham_trigger_omap4(struct omap_sham_dev *dd, size_t length)
+{
+ omap_sham_write(dd, SHA_REG_LENGTH, length);
+}
+
+static int omap_sham_poll_irq_omap4(struct omap_sham_dev *dd)
+{
+ return omap_sham_wait(dd, SHA_REG_IRQSTATUS,
+ SHA_REG_IRQSTATUS_INPUT_RDY);
+}
+
static int omap_sham_xmit_cpu(struct omap_sham_dev *dd, const u8 *buf,
size_t length, int final)
{
@@ -293,12 +480,13 @@ static int omap_sham_xmit_cpu(struct omap_sham_dev *dd, const u8 *buf,
dev_dbg(dd->dev, "xmit_cpu: digcnt: %d, length: %d, final: %d\n",
ctx->digcnt, length, final);
- omap_sham_write_ctrl(dd, length, final, 0);
+ dd->pdata->write_ctrl(dd, length, final, 0);
+ dd->pdata->trigger(dd, length);
/* should be non-zero before next lines to disable clocks later */
ctx->digcnt += length;
- if (omap_sham_wait(dd, SHA_REG_CTRL, SHA_REG_CTRL_INPUT_READY))
+ if (dd->pdata->poll_irq(dd))
return -ETIMEDOUT;
if (final)
@@ -309,30 +497,73 @@ static int omap_sham_xmit_cpu(struct omap_sham_dev *dd, const u8 *buf,
len32 = DIV_ROUND_UP(length, sizeof(u32));
for (count = 0; count < len32; count++)
- omap_sham_write(dd, SHA_REG_DIN(count), buffer[count]);
+ omap_sham_write(dd, SHA_REG_DIN(dd, count), buffer[count]);
return -EINPROGRESS;
}
+static void omap_sham_dma_callback(void *param)
+{
+ struct omap_sham_dev *dd = param;
+
+ set_bit(FLAGS_DMA_READY, &dd->flags);
+ tasklet_schedule(&dd->done_task);
+}
+
static int omap_sham_xmit_dma(struct omap_sham_dev *dd, dma_addr_t dma_addr,
- size_t length, int final)
+ size_t length, int final, int is_sg)
{
struct omap_sham_reqctx *ctx = ahash_request_ctx(dd->req);
- int len32;
+ struct dma_async_tx_descriptor *tx;
+ struct dma_slave_config cfg;
+ int len32, ret;
dev_dbg(dd->dev, "xmit_dma: digcnt: %d, length: %d, final: %d\n",
ctx->digcnt, length, final);
- len32 = DIV_ROUND_UP(length, sizeof(u32));
+ memset(&cfg, 0, sizeof(cfg));
+
+ cfg.dst_addr = dd->phys_base + SHA_REG_DIN(dd, 0);
+ cfg.dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
+ cfg.dst_maxburst = DST_MAXBURST;
+
+ ret = dmaengine_slave_config(dd->dma_lch, &cfg);
+ if (ret) {
+ pr_err("omap-sham: can't configure dmaengine slave: %d\n", ret);
+ return ret;
+ }
- omap_set_dma_transfer_params(dd->dma_lch, OMAP_DMA_DATA_TYPE_S32, len32,
- 1, OMAP_DMA_SYNC_PACKET, dd->dma,
- OMAP_DMA_DST_SYNC_PREFETCH);
+ len32 = DIV_ROUND_UP(length, DMA_MIN) * DMA_MIN;
+
+ if (is_sg) {
+ /*
+ * The SG entry passed in may not have the 'length' member
+ * set correctly so use a local SG entry (sgl) with the
+ * proper value for 'length' instead. If this is not done,
+ * the dmaengine may try to DMA the incorrect amount of data.
+ */
+ sg_init_table(&ctx->sgl, 1);
+ ctx->sgl.page_link = ctx->sg->page_link;
+ ctx->sgl.offset = ctx->sg->offset;
+ sg_dma_len(&ctx->sgl) = len32;
+ sg_dma_address(&ctx->sgl) = sg_dma_address(ctx->sg);
+
+ tx = dmaengine_prep_slave_sg(dd->dma_lch, &ctx->sgl, 1,
+ DMA_MEM_TO_DEV, DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
+ } else {
+ tx = dmaengine_prep_slave_single(dd->dma_lch, dma_addr, len32,
+ DMA_MEM_TO_DEV, DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
+ }
+
+ if (!tx) {
+ dev_err(dd->dev, "prep_slave_sg/single() failed\n");
+ return -EINVAL;
+ }
- omap_set_dma_src_params(dd->dma_lch, 0, OMAP_DMA_AMODE_POST_INC,
- dma_addr, 0, 0);
+ tx->callback = omap_sham_dma_callback;
+ tx->callback_param = dd;
- omap_sham_write_ctrl(dd, length, final, 1);
+ dd->pdata->write_ctrl(dd, length, final, 1);
ctx->digcnt += length;
@@ -341,7 +572,10 @@ static int omap_sham_xmit_dma(struct omap_sham_dev *dd, dma_addr_t dma_addr,
set_bit(FLAGS_DMA_ACTIVE, &dd->flags);
- omap_start_dma(dd->dma_lch);
+ dmaengine_submit(tx);
+ dma_async_issue_pending(dd->dma_lch);
+
+ dd->pdata->trigger(dd, length);
return -EINPROGRESS;
}
@@ -388,6 +622,8 @@ static int omap_sham_xmit_dma_map(struct omap_sham_dev *dd,
struct omap_sham_reqctx *ctx,
size_t length, int final)
{
+ int ret;
+
ctx->dma_addr = dma_map_single(dd->dev, ctx->buffer, ctx->buflen,
DMA_TO_DEVICE);
if (dma_mapping_error(dd->dev, ctx->dma_addr)) {
@@ -397,8 +633,12 @@ static int omap_sham_xmit_dma_map(struct omap_sham_dev *dd,
ctx->flags &= ~BIT(FLAGS_SG);
- /* next call does not fail... so no unmap in the case of error */
- return omap_sham_xmit_dma(dd, ctx->dma_addr, length, final);
+ ret = omap_sham_xmit_dma(dd, ctx->dma_addr, length, final, 0);
+ if (ret != -EINPROGRESS)
+ dma_unmap_single(dd->dev, ctx->dma_addr, ctx->buflen,
+ DMA_TO_DEVICE);
+
+ return ret;
}
static int omap_sham_update_dma_slow(struct omap_sham_dev *dd)
@@ -433,6 +673,7 @@ static int omap_sham_update_dma_start(struct omap_sham_dev *dd)
struct omap_sham_reqctx *ctx = ahash_request_ctx(dd->req);
unsigned int length, final, tail;
struct scatterlist *sg;
+ int ret;
if (!ctx->total)
return 0;
@@ -440,6 +681,15 @@ static int omap_sham_update_dma_start(struct omap_sham_dev *dd)
if (ctx->bufcnt || ctx->offset)
return omap_sham_update_dma_slow(dd);
+ /*
+ * Don't use the sg interface when the transfer size is less
+ * than the number of elements in a DMA frame. Otherwise,
+ * the dmaengine infrastructure will calculate that it needs
+ * to transfer 0 frames which ultimately fails.
+ */
+ if (ctx->total < (DST_MAXBURST * sizeof(u32)))
+ return omap_sham_update_dma_slow(dd);
+
dev_dbg(dd->dev, "fast: digcnt: %d, bufcnt: %u, total: %u\n",
ctx->digcnt, ctx->bufcnt, ctx->total);
@@ -477,8 +727,11 @@ static int omap_sham_update_dma_start(struct omap_sham_dev *dd)
final = (ctx->flags & BIT(FLAGS_FINUP)) && !ctx->total;
- /* next call does not fail... so no unmap in the case of error */
- return omap_sham_xmit_dma(dd, sg_dma_address(ctx->sg), length, final);
+ ret = omap_sham_xmit_dma(dd, sg_dma_address(ctx->sg), length, final, 1);
+ if (ret != -EINPROGRESS)
+ dma_unmap_sg(dd->dev, ctx->sg, 1, DMA_TO_DEVICE);
+
+ return ret;
}
static int omap_sham_update_cpu(struct omap_sham_dev *dd)
@@ -497,7 +750,8 @@ static int omap_sham_update_dma_stop(struct omap_sham_dev *dd)
{
struct omap_sham_reqctx *ctx = ahash_request_ctx(dd->req);
- omap_stop_dma(dd->dma_lch);
+ dmaengine_terminate_all(dd->dma_lch);
+
if (ctx->flags & BIT(FLAGS_SG)) {
dma_unmap_sg(dd->dev, ctx->sg, 1, DMA_TO_DEVICE);
if (ctx->sg->length == ctx->offset) {
@@ -539,18 +793,33 @@ static int omap_sham_init(struct ahash_request *req)
dev_dbg(dd->dev, "init: digest size: %d\n",
crypto_ahash_digestsize(tfm));
- if (crypto_ahash_digestsize(tfm) == SHA1_DIGEST_SIZE)
- ctx->flags |= BIT(FLAGS_SHA1);
+ switch (crypto_ahash_digestsize(tfm)) {
+ case MD5_DIGEST_SIZE:
+ ctx->flags |= FLAGS_MODE_MD5;
+ break;
+ case SHA1_DIGEST_SIZE:
+ ctx->flags |= FLAGS_MODE_SHA1;
+ break;
+ case SHA224_DIGEST_SIZE:
+ ctx->flags |= FLAGS_MODE_SHA224;
+ break;
+ case SHA256_DIGEST_SIZE:
+ ctx->flags |= FLAGS_MODE_SHA256;
+ break;
+ }
ctx->bufcnt = 0;
ctx->digcnt = 0;
ctx->buflen = BUFLEN;
if (tctx->flags & BIT(FLAGS_HMAC)) {
- struct omap_sham_hmac_ctx *bctx = tctx->base;
+ if (!test_bit(FLAGS_AUTO_XOR, &dd->flags)) {
+ struct omap_sham_hmac_ctx *bctx = tctx->base;
+
+ memcpy(ctx->buffer, bctx->ipad, SHA1_MD5_BLOCK_SIZE);
+ ctx->bufcnt = SHA1_MD5_BLOCK_SIZE;
+ }
- memcpy(ctx->buffer, bctx->ipad, SHA1_MD5_BLOCK_SIZE);
- ctx->bufcnt = SHA1_MD5_BLOCK_SIZE;
ctx->flags |= BIT(FLAGS_HMAC);
}
@@ -584,7 +853,7 @@ static int omap_sham_final_req(struct omap_sham_dev *dd)
struct omap_sham_reqctx *ctx = ahash_request_ctx(req);
int err = 0, use_dma = 1;
- if (ctx->bufcnt <= 64)
+ if (ctx->bufcnt <= DMA_MIN)
/* faster to handle last block with cpu */
use_dma = 0;
@@ -627,7 +896,8 @@ static int omap_sham_finish(struct ahash_request *req)
if (ctx->digcnt) {
omap_sham_copy_ready_hash(req);
- if (ctx->flags & BIT(FLAGS_HMAC))
+ if ((ctx->flags & BIT(FLAGS_HMAC)) &&
+ !test_bit(FLAGS_AUTO_XOR, &dd->flags))
err = omap_sham_finish_hmac(req);
}
@@ -642,7 +912,7 @@ static void omap_sham_finish_req(struct ahash_request *req, int err)
struct omap_sham_dev *dd = ctx->dd;
if (!err) {
- omap_sham_copy_hash(req, 1);
+ dd->pdata->copy_hash(req, 1);
if (test_bit(FLAGS_FINAL, &dd->flags))
err = omap_sham_finish(req);
} else {
@@ -652,7 +922,8 @@ static void omap_sham_finish_req(struct ahash_request *req, int err)
/* atomic operation is not needed here */
dd->flags &= ~(BIT(FLAGS_BUSY) | BIT(FLAGS_FINAL) | BIT(FLAGS_CPU) |
BIT(FLAGS_DMA_READY) | BIT(FLAGS_OUTPUT_READY));
- clk_disable(dd->iclk);
+
+ pm_runtime_put_sync(dd->dev);
if (req->base.complete)
req->base.complete(&req->base, err);
@@ -699,19 +970,9 @@ static int omap_sham_handle_queue(struct omap_sham_dev *dd,
if (err)
goto err1;
- omap_set_dma_dest_params(dd->dma_lch, 0,
- OMAP_DMA_AMODE_CONSTANT,
- dd->phys_base + SHA_REG_DIN(0), 0, 16);
-
- omap_set_dma_dest_burst_mode(dd->dma_lch,
- OMAP_DMA_DATA_BURST_16);
-
- omap_set_dma_src_burst_mode(dd->dma_lch,
- OMAP_DMA_DATA_BURST_4);
-
if (ctx->digcnt)
/* request has changed - restore hash */
- omap_sham_copy_hash(req, 0);
+ dd->pdata->copy_hash(req, 0);
if (ctx->op == OP_UPDATE) {
err = omap_sham_update_req(dd);
@@ -850,7 +1111,21 @@ static int omap_sham_setkey(struct crypto_ahash *tfm, const u8 *key,
struct omap_sham_hmac_ctx *bctx = tctx->base;
int bs = crypto_shash_blocksize(bctx->shash);
int ds = crypto_shash_digestsize(bctx->shash);
+ struct omap_sham_dev *dd = NULL, *tmp;
int err, i;
+
+ spin_lock_bh(&sham.lock);
+ if (!tctx->dd) {
+ list_for_each_entry(tmp, &sham.dev_list, list) {
+ dd = tmp;
+ break;
+ }
+ tctx->dd = dd;
+ } else {
+ dd = tctx->dd;
+ }
+ spin_unlock_bh(&sham.lock);
+
err = crypto_shash_setkey(tctx->fallback, key, keylen);
if (err)
return err;
@@ -867,11 +1142,14 @@ static int omap_sham_setkey(struct crypto_ahash *tfm, const u8 *key,
}
memset(bctx->ipad + keylen, 0, bs - keylen);
- memcpy(bctx->opad, bctx->ipad, bs);
- for (i = 0; i < bs; i++) {
- bctx->ipad[i] ^= 0x36;
- bctx->opad[i] ^= 0x5c;
+ if (!test_bit(FLAGS_AUTO_XOR, &dd->flags)) {
+ memcpy(bctx->opad, bctx->ipad, bs);
+
+ for (i = 0; i < bs; i++) {
+ bctx->ipad[i] ^= 0x36;
+ bctx->opad[i] ^= 0x5c;
+ }
}
return err;
@@ -921,6 +1199,16 @@ static int omap_sham_cra_sha1_init(struct crypto_tfm *tfm)
return omap_sham_cra_init_alg(tfm, "sha1");
}
+static int omap_sham_cra_sha224_init(struct crypto_tfm *tfm)
+{
+ return omap_sham_cra_init_alg(tfm, "sha224");
+}
+
+static int omap_sham_cra_sha256_init(struct crypto_tfm *tfm)
+{
+ return omap_sham_cra_init_alg(tfm, "sha256");
+}
+
static int omap_sham_cra_md5_init(struct crypto_tfm *tfm)
{
return omap_sham_cra_init_alg(tfm, "md5");
@@ -939,7 +1227,7 @@ static void omap_sham_cra_exit(struct crypto_tfm *tfm)
}
}
-static struct ahash_alg algs[] = {
+static struct ahash_alg algs_sha1_md5[] = {
{
.init = omap_sham_init,
.update = omap_sham_update,
@@ -1038,6 +1326,102 @@ static struct ahash_alg algs[] = {
}
};
+/* OMAP4 has some algs in addition to what OMAP2 has */
+static struct ahash_alg algs_sha224_sha256[] = {
+{
+ .init = omap_sham_init,
+ .update = omap_sham_update,
+ .final = omap_sham_final,
+ .finup = omap_sham_finup,
+ .digest = omap_sham_digest,
+ .halg.digestsize = SHA224_DIGEST_SIZE,
+ .halg.base = {
+ .cra_name = "sha224",
+ .cra_driver_name = "omap-sha224",
+ .cra_priority = 100,
+ .cra_flags = CRYPTO_ALG_TYPE_AHASH |
+ CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_NEED_FALLBACK,
+ .cra_blocksize = SHA224_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct omap_sham_ctx),
+ .cra_alignmask = 0,
+ .cra_module = THIS_MODULE,
+ .cra_init = omap_sham_cra_init,
+ .cra_exit = omap_sham_cra_exit,
+ }
+},
+{
+ .init = omap_sham_init,
+ .update = omap_sham_update,
+ .final = omap_sham_final,
+ .finup = omap_sham_finup,
+ .digest = omap_sham_digest,
+ .halg.digestsize = SHA256_DIGEST_SIZE,
+ .halg.base = {
+ .cra_name = "sha256",
+ .cra_driver_name = "omap-sha256",
+ .cra_priority = 100,
+ .cra_flags = CRYPTO_ALG_TYPE_AHASH |
+ CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_NEED_FALLBACK,
+ .cra_blocksize = SHA256_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct omap_sham_ctx),
+ .cra_alignmask = 0,
+ .cra_module = THIS_MODULE,
+ .cra_init = omap_sham_cra_init,
+ .cra_exit = omap_sham_cra_exit,
+ }
+},
+{
+ .init = omap_sham_init,
+ .update = omap_sham_update,
+ .final = omap_sham_final,
+ .finup = omap_sham_finup,
+ .digest = omap_sham_digest,
+ .setkey = omap_sham_setkey,
+ .halg.digestsize = SHA224_DIGEST_SIZE,
+ .halg.base = {
+ .cra_name = "hmac(sha224)",
+ .cra_driver_name = "omap-hmac-sha224",
+ .cra_priority = 100,
+ .cra_flags = CRYPTO_ALG_TYPE_AHASH |
+ CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_NEED_FALLBACK,
+ .cra_blocksize = SHA224_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct omap_sham_ctx) +
+ sizeof(struct omap_sham_hmac_ctx),
+ .cra_alignmask = OMAP_ALIGN_MASK,
+ .cra_module = THIS_MODULE,
+ .cra_init = omap_sham_cra_sha224_init,
+ .cra_exit = omap_sham_cra_exit,
+ }
+},
+{
+ .init = omap_sham_init,
+ .update = omap_sham_update,
+ .final = omap_sham_final,
+ .finup = omap_sham_finup,
+ .digest = omap_sham_digest,
+ .setkey = omap_sham_setkey,
+ .halg.digestsize = SHA256_DIGEST_SIZE,
+ .halg.base = {
+ .cra_name = "hmac(sha256)",
+ .cra_driver_name = "omap-hmac-sha256",
+ .cra_priority = 100,
+ .cra_flags = CRYPTO_ALG_TYPE_AHASH |
+ CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_NEED_FALLBACK,
+ .cra_blocksize = SHA256_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct omap_sham_ctx) +
+ sizeof(struct omap_sham_hmac_ctx),
+ .cra_alignmask = OMAP_ALIGN_MASK,
+ .cra_module = THIS_MODULE,
+ .cra_init = omap_sham_cra_sha256_init,
+ .cra_exit = omap_sham_cra_exit,
+ }
+},
+};
+
static void omap_sham_done_task(unsigned long data)
{
struct omap_sham_dev *dd = (struct omap_sham_dev *)data;
@@ -1076,7 +1460,19 @@ finish:
omap_sham_finish_req(dd->req, err);
}
-static irqreturn_t omap_sham_irq(int irq, void *dev_id)
+static irqreturn_t omap_sham_irq_common(struct omap_sham_dev *dd)
+{
+ if (!test_bit(FLAGS_BUSY, &dd->flags)) {
+ dev_warn(dd->dev, "Interrupt when no active requests.\n");
+ } else {
+ set_bit(FLAGS_OUTPUT_READY, &dd->flags);
+ tasklet_schedule(&dd->done_task);
+ }
+
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t omap_sham_irq_omap2(int irq, void *dev_id)
{
struct omap_sham_dev *dd = dev_id;
@@ -1088,61 +1484,188 @@ static irqreturn_t omap_sham_irq(int irq, void *dev_id)
SHA_REG_CTRL_OUTPUT_READY);
omap_sham_read(dd, SHA_REG_CTRL);
- if (!test_bit(FLAGS_BUSY, &dd->flags)) {
- dev_warn(dd->dev, "Interrupt when no active requests.\n");
- return IRQ_HANDLED;
- }
+ return omap_sham_irq_common(dd);
+}
- set_bit(FLAGS_OUTPUT_READY, &dd->flags);
- tasklet_schedule(&dd->done_task);
+static irqreturn_t omap_sham_irq_omap4(int irq, void *dev_id)
+{
+ struct omap_sham_dev *dd = dev_id;
- return IRQ_HANDLED;
+ omap_sham_write_mask(dd, SHA_REG_MASK(dd), 0, SHA_REG_MASK_IT_EN);
+
+ return omap_sham_irq_common(dd);
}
-static void omap_sham_dma_callback(int lch, u16 ch_status, void *data)
+static struct omap_sham_algs_info omap_sham_algs_info_omap2[] = {
+ {
+ .algs_list = algs_sha1_md5,
+ .size = ARRAY_SIZE(algs_sha1_md5),
+ },
+};
+
+static const struct omap_sham_pdata omap_sham_pdata_omap2 = {
+ .algs_info = omap_sham_algs_info_omap2,
+ .algs_info_size = ARRAY_SIZE(omap_sham_algs_info_omap2),
+ .flags = BIT(FLAGS_BE32_SHA1),
+ .digest_size = SHA1_DIGEST_SIZE,
+ .copy_hash = omap_sham_copy_hash_omap2,
+ .write_ctrl = omap_sham_write_ctrl_omap2,
+ .trigger = omap_sham_trigger_omap2,
+ .poll_irq = omap_sham_poll_irq_omap2,
+ .intr_hdlr = omap_sham_irq_omap2,
+ .idigest_ofs = 0x00,
+ .din_ofs = 0x1c,
+ .digcnt_ofs = 0x14,
+ .rev_ofs = 0x5c,
+ .mask_ofs = 0x60,
+ .sysstatus_ofs = 0x64,
+ .major_mask = 0xf0,
+ .major_shift = 4,
+ .minor_mask = 0x0f,
+ .minor_shift = 0,
+};
+
+#ifdef CONFIG_OF
+static struct omap_sham_algs_info omap_sham_algs_info_omap4[] = {
+ {
+ .algs_list = algs_sha1_md5,
+ .size = ARRAY_SIZE(algs_sha1_md5),
+ },
+ {
+ .algs_list = algs_sha224_sha256,
+ .size = ARRAY_SIZE(algs_sha224_sha256),
+ },
+};
+
+static const struct omap_sham_pdata omap_sham_pdata_omap4 = {
+ .algs_info = omap_sham_algs_info_omap4,
+ .algs_info_size = ARRAY_SIZE(omap_sham_algs_info_omap4),
+ .flags = BIT(FLAGS_AUTO_XOR),
+ .digest_size = SHA256_DIGEST_SIZE,
+ .copy_hash = omap_sham_copy_hash_omap4,
+ .write_ctrl = omap_sham_write_ctrl_omap4,
+ .trigger = omap_sham_trigger_omap4,
+ .poll_irq = omap_sham_poll_irq_omap4,
+ .intr_hdlr = omap_sham_irq_omap4,
+ .idigest_ofs = 0x020,
+ .din_ofs = 0x080,
+ .digcnt_ofs = 0x040,
+ .rev_ofs = 0x100,
+ .mask_ofs = 0x110,
+ .sysstatus_ofs = 0x114,
+ .major_mask = 0x0700,
+ .major_shift = 8,
+ .minor_mask = 0x003f,
+ .minor_shift = 0,
+};
+
+static const struct of_device_id omap_sham_of_match[] = {
+ {
+ .compatible = "ti,omap2-sham",
+ .data = &omap_sham_pdata_omap2,
+ },
+ {
+ .compatible = "ti,omap4-sham",
+ .data = &omap_sham_pdata_omap4,
+ },
+ {},
+};
+MODULE_DEVICE_TABLE(of, omap_sham_of_match);
+
+static int omap_sham_get_res_of(struct omap_sham_dev *dd,
+ struct device *dev, struct resource *res)
{
- struct omap_sham_dev *dd = data;
+ struct device_node *node = dev->of_node;
+ const struct of_device_id *match;
+ int err = 0;
- if (ch_status != OMAP_DMA_BLOCK_IRQ) {
- pr_err("omap-sham DMA error status: 0x%hx\n", ch_status);
- dd->err = -EIO;
- clear_bit(FLAGS_INIT, &dd->flags);/* request to re-initialize */
+ match = of_match_device(of_match_ptr(omap_sham_of_match), dev);
+ if (!match) {
+ dev_err(dev, "no compatible OF match\n");
+ err = -EINVAL;
+ goto err;
}
- set_bit(FLAGS_DMA_READY, &dd->flags);
- tasklet_schedule(&dd->done_task);
+ err = of_address_to_resource(node, 0, res);
+ if (err < 0) {
+ dev_err(dev, "can't translate OF node address\n");
+ err = -EINVAL;
+ goto err;
+ }
+
+ dd->irq = of_irq_to_resource(node, 0, NULL);
+ if (!dd->irq) {
+ dev_err(dev, "can't translate OF irq value\n");
+ err = -EINVAL;
+ goto err;
+ }
+
+ dd->dma = -1; /* Dummy value that's unused */
+ dd->pdata = match->data;
+
+err:
+ return err;
}
+#else
+static const struct of_device_id omap_sham_of_match[] = {
+ {},
+};
-static int omap_sham_dma_init(struct omap_sham_dev *dd)
+static int omap_sham_get_res_of(struct omap_sham_dev *dd,
+ struct device *dev, struct resource *res)
{
- int err;
+ return -EINVAL;
+}
+#endif
- dd->dma_lch = -1;
+static int omap_sham_get_res_pdev(struct omap_sham_dev *dd,
+ struct platform_device *pdev, struct resource *res)
+{
+ struct device *dev = &pdev->dev;
+ struct resource *r;
+ int err = 0;
- err = omap_request_dma(dd->dma, dev_name(dd->dev),
- omap_sham_dma_callback, dd, &dd->dma_lch);
- if (err) {
- dev_err(dd->dev, "Unable to request DMA channel\n");
- return err;
+ /* Get the base address */
+ r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!r) {
+ dev_err(dev, "no MEM resource info\n");
+ err = -ENODEV;
+ goto err;
}
+ memcpy(res, r, sizeof(*res));
- return 0;
-}
+ /* Get the IRQ */
+ dd->irq = platform_get_irq(pdev, 0);
+ if (dd->irq < 0) {
+ dev_err(dev, "no IRQ resource info\n");
+ err = dd->irq;
+ goto err;
+ }
-static void omap_sham_dma_cleanup(struct omap_sham_dev *dd)
-{
- if (dd->dma_lch >= 0) {
- omap_free_dma(dd->dma_lch);
- dd->dma_lch = -1;
+ /* Get the DMA */
+ r = platform_get_resource(pdev, IORESOURCE_DMA, 0);
+ if (!r) {
+ dev_err(dev, "no DMA resource info\n");
+ err = -ENODEV;
+ goto err;
}
+ dd->dma = r->start;
+
+ /* Only OMAP2/3 can be non-DT */
+ dd->pdata = &omap_sham_pdata_omap2;
+
+err:
+ return err;
}
static int omap_sham_probe(struct platform_device *pdev)
{
struct omap_sham_dev *dd;
struct device *dev = &pdev->dev;
- struct resource *res;
+ struct resource res;
+ dma_cap_mask_t mask;
int err, i, j;
+ u32 rev;
dd = kzalloc(sizeof(struct omap_sham_dev), GFP_KERNEL);
if (dd == NULL) {
@@ -1158,89 +1681,75 @@ static int omap_sham_probe(struct platform_device *pdev)
tasklet_init(&dd->done_task, omap_sham_done_task, (unsigned long)dd);
crypto_init_queue(&dd->queue, OMAP_SHAM_QUEUE_LENGTH);
- dd->irq = -1;
-
- /* Get the base address */
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- if (!res) {
- dev_err(dev, "no MEM resource info\n");
- err = -ENODEV;
- goto res_err;
- }
- dd->phys_base = res->start;
-
- /* Get the DMA */
- res = platform_get_resource(pdev, IORESOURCE_DMA, 0);
- if (!res) {
- dev_err(dev, "no DMA resource info\n");
- err = -ENODEV;
+ err = (dev->of_node) ? omap_sham_get_res_of(dd, dev, &res) :
+ omap_sham_get_res_pdev(dd, pdev, &res);
+ if (err)
goto res_err;
- }
- dd->dma = res->start;
- /* Get the IRQ */
- dd->irq = platform_get_irq(pdev, 0);
- if (dd->irq < 0) {
- dev_err(dev, "no IRQ resource info\n");
- err = dd->irq;
+ dd->io_base = devm_request_and_ioremap(dev, &res);
+ if (!dd->io_base) {
+ dev_err(dev, "can't ioremap\n");
+ err = -ENOMEM;
goto res_err;
}
+ dd->phys_base = res.start;
- err = request_irq(dd->irq, omap_sham_irq,
- IRQF_TRIGGER_LOW, dev_name(dev), dd);
+ err = request_irq(dd->irq, dd->pdata->intr_hdlr, IRQF_TRIGGER_LOW,
+ dev_name(dev), dd);
if (err) {
dev_err(dev, "unable to request irq.\n");
goto res_err;
}
- err = omap_sham_dma_init(dd);
- if (err)
- goto dma_err;
+ dma_cap_zero(mask);
+ dma_cap_set(DMA_SLAVE, mask);
- /* Initializing the clock */
- dd->iclk = clk_get(dev, "ick");
- if (IS_ERR(dd->iclk)) {
- dev_err(dev, "clock intialization failed.\n");
- err = PTR_ERR(dd->iclk);
- goto clk_err;
+ dd->dma_lch = dma_request_slave_channel_compat(mask, omap_dma_filter_fn,
+ &dd->dma, dev, "rx");
+ if (!dd->dma_lch) {
+ dev_err(dev, "unable to obtain RX DMA engine channel %u\n",
+ dd->dma);
+ err = -ENXIO;
+ goto dma_err;
}
- dd->io_base = ioremap(dd->phys_base, SZ_4K);
- if (!dd->io_base) {
- dev_err(dev, "can't ioremap\n");
- err = -ENOMEM;
- goto io_err;
- }
+ dd->flags |= dd->pdata->flags;
+
+ pm_runtime_enable(dev);
+ pm_runtime_get_sync(dev);
+ rev = omap_sham_read(dd, SHA_REG_REV(dd));
+ pm_runtime_put_sync(&pdev->dev);
- clk_enable(dd->iclk);
dev_info(dev, "hw accel on OMAP rev %u.%u\n",
- (omap_sham_read(dd, SHA_REG_REV) & SHA_REG_REV_MAJOR) >> 4,
- omap_sham_read(dd, SHA_REG_REV) & SHA_REG_REV_MINOR);
- clk_disable(dd->iclk);
+ (rev & dd->pdata->major_mask) >> dd->pdata->major_shift,
+ (rev & dd->pdata->minor_mask) >> dd->pdata->minor_shift);
spin_lock(&sham.lock);
list_add_tail(&dd->list, &sham.dev_list);
spin_unlock(&sham.lock);
- for (i = 0; i < ARRAY_SIZE(algs); i++) {
- err = crypto_register_ahash(&algs[i]);
- if (err)
- goto err_algs;
+ for (i = 0; i < dd->pdata->algs_info_size; i++) {
+ for (j = 0; j < dd->pdata->algs_info[i].size; j++) {
+ err = crypto_register_ahash(
+ &dd->pdata->algs_info[i].algs_list[j]);
+ if (err)
+ goto err_algs;
+
+ dd->pdata->algs_info[i].registered++;
+ }
}
return 0;
err_algs:
- for (j = 0; j < i; j++)
- crypto_unregister_ahash(&algs[j]);
- iounmap(dd->io_base);
-io_err:
- clk_put(dd->iclk);
-clk_err:
- omap_sham_dma_cleanup(dd);
+ for (i = dd->pdata->algs_info_size - 1; i >= 0; i--)
+ for (j = dd->pdata->algs_info[i].registered - 1; j >= 0; j--)
+ crypto_unregister_ahash(
+ &dd->pdata->algs_info[i].algs_list[j]);
+ pm_runtime_disable(dev);
+ dma_release_channel(dd->dma_lch);
dma_err:
- if (dd->irq >= 0)
- free_irq(dd->irq, dd);
+ free_irq(dd->irq, dd);
res_err:
kfree(dd);
dd = NULL;
@@ -1253,7 +1762,7 @@ data_err:
static int omap_sham_remove(struct platform_device *pdev)
{
static struct omap_sham_dev *dd;
- int i;
+ int i, j;
dd = platform_get_drvdata(pdev);
if (!dd)
@@ -1261,33 +1770,51 @@ static int omap_sham_remove(struct platform_device *pdev)
spin_lock(&sham.lock);
list_del(&dd->list);
spin_unlock(&sham.lock);
- for (i = 0; i < ARRAY_SIZE(algs); i++)
- crypto_unregister_ahash(&algs[i]);
+ for (i = dd->pdata->algs_info_size - 1; i >= 0; i--)
+ for (j = dd->pdata->algs_info[i].registered - 1; j >= 0; j--)
+ crypto_unregister_ahash(
+ &dd->pdata->algs_info[i].algs_list[j]);
tasklet_kill(&dd->done_task);
- iounmap(dd->io_base);
- clk_put(dd->iclk);
- omap_sham_dma_cleanup(dd);
- if (dd->irq >= 0)
- free_irq(dd->irq, dd);
+ pm_runtime_disable(&pdev->dev);
+ dma_release_channel(dd->dma_lch);
+ free_irq(dd->irq, dd);
kfree(dd);
dd = NULL;
return 0;
}
+#ifdef CONFIG_PM_SLEEP
+static int omap_sham_suspend(struct device *dev)
+{
+ pm_runtime_put_sync(dev);
+ return 0;
+}
+
+static int omap_sham_resume(struct device *dev)
+{
+ pm_runtime_get_sync(dev);
+ return 0;
+}
+#endif
+
+static const struct dev_pm_ops omap_sham_pm_ops = {
+ SET_SYSTEM_SLEEP_PM_OPS(omap_sham_suspend, omap_sham_resume)
+};
+
static struct platform_driver omap_sham_driver = {
.probe = omap_sham_probe,
.remove = omap_sham_remove,
.driver = {
.name = "omap-sham",
.owner = THIS_MODULE,
+ .pm = &omap_sham_pm_ops,
+ .of_match_table = omap_sham_of_match,
},
};
static int __init omap_sham_mod_init(void)
{
- pr_info("loading %s driver\n", "omap-sham");
-
return platform_driver_register(&omap_sham_driver);
}
diff --git a/drivers/crypto/s5p-sss.c b/drivers/crypto/s5p-sss.c
index 49ad8cbade69..4b314326f48a 100644
--- a/drivers/crypto/s5p-sss.c
+++ b/drivers/crypto/s5p-sss.c
@@ -580,7 +580,7 @@ static int s5p_aes_probe(struct platform_device *pdev)
resource_size(res), pdev->name))
return -EBUSY;
- pdata->clk = clk_get(dev, "secss");
+ pdata->clk = devm_clk_get(dev, "secss");
if (IS_ERR(pdata->clk)) {
dev_err(dev, "failed to find secss clock source\n");
return -ENOENT;
@@ -645,7 +645,6 @@ static int s5p_aes_probe(struct platform_device *pdev)
err_irq:
clk_disable(pdata->clk);
- clk_put(pdata->clk);
s5p_dev = NULL;
platform_set_drvdata(pdev, NULL);
@@ -667,7 +666,6 @@ static int s5p_aes_remove(struct platform_device *pdev)
tasklet_kill(&pdata->tasklet);
clk_disable(pdata->clk);
- clk_put(pdata->clk);
s5p_dev = NULL;
platform_set_drvdata(pdev, NULL);
diff --git a/drivers/dca/dca-core.c b/drivers/dca/dca-core.c
index bc6f5faa1e9e..819dfda88236 100644
--- a/drivers/dca/dca-core.c
+++ b/drivers/dca/dca-core.c
@@ -420,6 +420,11 @@ void unregister_dca_provider(struct dca_provider *dca, struct device *dev)
raw_spin_lock_irqsave(&dca_lock, flags);
+ if (list_empty(&dca_domains)) {
+ raw_spin_unlock_irqrestore(&dca_lock, flags);
+ return;
+ }
+
list_del(&dca->node);
pci_rc = dca_pci_rc_from_dev(dev);
diff --git a/drivers/dca/dca-sysfs.c b/drivers/dca/dca-sysfs.c
index 591b6597c00a..126cf295b198 100644
--- a/drivers/dca/dca-sysfs.c
+++ b/drivers/dca/dca-sysfs.c
@@ -53,22 +53,19 @@ void dca_sysfs_remove_req(struct dca_provider *dca, int slot)
int dca_sysfs_add_provider(struct dca_provider *dca, struct device *dev)
{
struct device *cd;
- int err = 0;
+ int ret;
-idr_try_again:
- if (!idr_pre_get(&dca_idr, GFP_KERNEL))
- return -ENOMEM;
+ idr_preload(GFP_KERNEL);
spin_lock(&dca_idr_lock);
- err = idr_get_new(&dca_idr, dca, &dca->id);
+
+ ret = idr_alloc(&dca_idr, dca, 0, 0, GFP_NOWAIT);
+ if (ret >= 0)
+ dca->id = ret;
+
spin_unlock(&dca_idr_lock);
- switch (err) {
- case 0:
- break;
- case -EAGAIN:
- goto idr_try_again;
- default:
- return err;
- }
+ idr_preload_end();
+ if (ret < 0)
+ return ret;
cd = device_create(dca_class, dev, MKDEV(0, 0), NULL, "dca%d", dca->id);
if (IS_ERR(cd)) {
diff --git a/drivers/devfreq/exynos4_bus.c b/drivers/devfreq/exynos4_bus.c
index 46d94e9e95b5..3f37f3b3f268 100644
--- a/drivers/devfreq/exynos4_bus.c
+++ b/drivers/devfreq/exynos4_bus.c
@@ -658,7 +658,7 @@ static int exynos4_bus_target(struct device *dev, unsigned long *_freq,
if (old_freq == freq)
return 0;
- dev_dbg(dev, "targetting %lukHz %luuV\n", freq, new_oppinfo.volt);
+ dev_dbg(dev, "targeting %lukHz %luuV\n", freq, new_oppinfo.volt);
mutex_lock(&data->lock);
diff --git a/drivers/dma/Kconfig b/drivers/dma/Kconfig
index d4c12180c654..80b69971cf28 100644
--- a/drivers/dma/Kconfig
+++ b/drivers/dma/Kconfig
@@ -51,7 +51,7 @@ config ASYNC_TX_ENABLE_CHANNEL_SWITCH
config AMBA_PL08X
bool "ARM PrimeCell PL080 or PL081 support"
- depends on ARM_AMBA && EXPERIMENTAL
+ depends on ARM_AMBA
select DMA_ENGINE
select DMA_VIRTUAL_CHANNELS
help
@@ -83,7 +83,6 @@ config INTEL_IOP_ADMA
config DW_DMAC
tristate "Synopsys DesignWare AHB DMA support"
- depends on HAVE_CLK
select DMA_ENGINE
default y if CPU_AT32AP7000
help
@@ -125,6 +124,8 @@ config MPC512X_DMA
---help---
Enable support for the Freescale MPC512x built-in DMA engine.
+source "drivers/dma/bestcomm/Kconfig"
+
config MV_XOR
bool "Marvell XOR engine support"
depends on PLAT_ORION
@@ -213,8 +214,8 @@ config TIMB_DMA
Enable support for the Timberdale FPGA DMA engine.
config SIRF_DMA
- tristate "CSR SiRFprimaII DMA support"
- depends on ARCH_PRIMA2
+ tristate "CSR SiRFprimaII/SiRFmarco DMA support"
+ depends on ARCH_SIRF
select DMA_ENGINE
help
Enable support for the CSR SiRFprimaII DMA engine.
@@ -326,6 +327,10 @@ config DMA_ENGINE
config DMA_VIRTUAL_CHANNELS
tristate
+config DMA_OF
+ def_bool y
+ depends on OF
+
comment "DMA Clients"
depends on DMA_ENGINE
diff --git a/drivers/dma/Makefile b/drivers/dma/Makefile
index 7428feaa8705..488e3ff85b52 100644
--- a/drivers/dma/Makefile
+++ b/drivers/dma/Makefile
@@ -3,6 +3,8 @@ ccflags-$(CONFIG_DMADEVICES_VDEBUG) += -DVERBOSE_DEBUG
obj-$(CONFIG_DMA_ENGINE) += dmaengine.o
obj-$(CONFIG_DMA_VIRTUAL_CHANNELS) += virt-dma.o
+obj-$(CONFIG_DMA_OF) += of-dma.o
+
obj-$(CONFIG_NET_DMA) += iovlock.o
obj-$(CONFIG_INTEL_MID_DMAC) += intel_mid_dma.o
obj-$(CONFIG_DMATEST) += dmatest.o
@@ -10,6 +12,7 @@ obj-$(CONFIG_INTEL_IOATDMA) += ioat/
obj-$(CONFIG_INTEL_IOP_ADMA) += iop-adma.o
obj-$(CONFIG_FSL_DMA) += fsldma.o
obj-$(CONFIG_MPC512X_DMA) += mpc512x_dma.o
+obj-$(CONFIG_PPC_BESTCOMM) += bestcomm/
obj-$(CONFIG_MV_XOR) += mv_xor.o
obj-$(CONFIG_DW_DMAC) += dw_dmac.o
obj-$(CONFIG_AT_HDMAC) += at_hdmac.o
diff --git a/drivers/dma/amba-pl08x.c b/drivers/dma/amba-pl08x.c
index d1cc5791476b..8bad254a498d 100644
--- a/drivers/dma/amba-pl08x.c
+++ b/drivers/dma/amba-pl08x.c
@@ -83,7 +83,7 @@
#include <linux/pm_runtime.h>
#include <linux/seq_file.h>
#include <linux/slab.h>
-#include <asm/hardware/pl080.h>
+#include <linux/amba/pl080.h>
#include "dmaengine.h"
#include "virt-dma.h"
@@ -1096,15 +1096,9 @@ static void pl08x_free_txd_list(struct pl08x_driver_data *pl08x,
struct pl08x_dma_chan *plchan)
{
LIST_HEAD(head);
- struct pl08x_txd *txd;
vchan_get_all_descriptors(&plchan->vc, &head);
-
- while (!list_empty(&head)) {
- txd = list_first_entry(&head, struct pl08x_txd, vd.node);
- list_del(&txd->vd.node);
- pl08x_desc_free(&txd->vd);
- }
+ vchan_dma_desc_free_list(&plchan->vc, &head);
}
/*
diff --git a/drivers/dma/at_hdmac.c b/drivers/dma/at_hdmac.c
index 13a02f4425b0..6e13f262139a 100644
--- a/drivers/dma/at_hdmac.c
+++ b/drivers/dma/at_hdmac.c
@@ -778,7 +778,7 @@ err:
*/
static int
atc_dma_cyclic_check_values(unsigned int reg_width, dma_addr_t buf_addr,
- size_t period_len, enum dma_transfer_direction direction)
+ size_t period_len)
{
if (period_len > (ATC_BTSIZE_MAX << reg_width))
goto err_out;
@@ -786,8 +786,6 @@ atc_dma_cyclic_check_values(unsigned int reg_width, dma_addr_t buf_addr,
goto err_out;
if (unlikely(buf_addr & ((1 << reg_width) - 1)))
goto err_out;
- if (unlikely(!(direction & (DMA_DEV_TO_MEM | DMA_MEM_TO_DEV))))
- goto err_out;
return 0;
@@ -886,14 +884,16 @@ atc_prep_dma_cyclic(struct dma_chan *chan, dma_addr_t buf_addr, size_t buf_len,
return NULL;
}
+ if (unlikely(!is_slave_direction(direction)))
+ goto err_out;
+
if (sconfig->direction == DMA_MEM_TO_DEV)
reg_width = convert_buswidth(sconfig->dst_addr_width);
else
reg_width = convert_buswidth(sconfig->src_addr_width);
/* Check for too big/unaligned periods and unaligned DMA buffer */
- if (atc_dma_cyclic_check_values(reg_width, buf_addr,
- period_len, direction))
+ if (atc_dma_cyclic_check_values(reg_width, buf_addr, period_len))
goto err_out;
/* build cyclic linked list */
diff --git a/drivers/dma/at_hdmac_regs.h b/drivers/dma/at_hdmac_regs.h
index 116e4adffb08..0eb3c1388667 100644
--- a/drivers/dma/at_hdmac_regs.h
+++ b/drivers/dma/at_hdmac_regs.h
@@ -369,10 +369,10 @@ static void vdbg_dump_regs(struct at_dma_chan *atchan) {}
static void atc_dump_lli(struct at_dma_chan *atchan, struct at_lli *lli)
{
- dev_printk(KERN_CRIT, chan2dev(&atchan->chan_common),
- " desc: s0x%x d0x%x ctrl0x%x:0x%x l0x%x\n",
- lli->saddr, lli->daddr,
- lli->ctrla, lli->ctrlb, lli->dscr);
+ dev_crit(chan2dev(&atchan->chan_common),
+ " desc: s0x%x d0x%x ctrl0x%x:0x%x l0x%x\n",
+ lli->saddr, lli->daddr,
+ lli->ctrla, lli->ctrlb, lli->dscr);
}
diff --git a/drivers/dma/bestcomm/Kconfig b/drivers/dma/bestcomm/Kconfig
new file mode 100644
index 000000000000..29e427085efb
--- /dev/null
+++ b/drivers/dma/bestcomm/Kconfig
@@ -0,0 +1,36 @@
+#
+# Kconfig options for Bestcomm
+#
+
+config PPC_BESTCOMM
+ tristate "Bestcomm DMA engine support"
+ depends on PPC_MPC52xx
+ default n
+ select PPC_LIB_RHEAP
+ help
+ BestComm is the name of the communication coprocessor found
+ on the Freescale MPC5200 family of processor. Its usage is
+ optional for some drivers (like ATA), but required for
+ others (like FEC).
+
+ If you want to use drivers that require DMA operations,
+ answer Y or M. Otherwise say N.
+
+config PPC_BESTCOMM_ATA
+ tristate
+ depends on PPC_BESTCOMM
+ help
+ This option enables the support for the ATA task.
+
+config PPC_BESTCOMM_FEC
+ tristate
+ depends on PPC_BESTCOMM
+ help
+ This option enables the support for the FEC tasks.
+
+config PPC_BESTCOMM_GEN_BD
+ tristate
+ depends on PPC_BESTCOMM
+ help
+ This option enables the support for the GenBD tasks.
+
diff --git a/drivers/dma/bestcomm/Makefile b/drivers/dma/bestcomm/Makefile
new file mode 100644
index 000000000000..aed2df2a6580
--- /dev/null
+++ b/drivers/dma/bestcomm/Makefile
@@ -0,0 +1,14 @@
+#
+# Makefile for BestComm & co
+#
+
+bestcomm-core-objs := bestcomm.o sram.o
+bestcomm-ata-objs := ata.o bcom_ata_task.o
+bestcomm-fec-objs := fec.o bcom_fec_rx_task.o bcom_fec_tx_task.o
+bestcomm-gen-bd-objs := gen_bd.o bcom_gen_bd_rx_task.o bcom_gen_bd_tx_task.o
+
+obj-$(CONFIG_PPC_BESTCOMM) += bestcomm-core.o
+obj-$(CONFIG_PPC_BESTCOMM_ATA) += bestcomm-ata.o
+obj-$(CONFIG_PPC_BESTCOMM_FEC) += bestcomm-fec.o
+obj-$(CONFIG_PPC_BESTCOMM_GEN_BD) += bestcomm-gen-bd.o
+
diff --git a/drivers/dma/bestcomm/ata.c b/drivers/dma/bestcomm/ata.c
new file mode 100644
index 000000000000..2fd87f83cf90
--- /dev/null
+++ b/drivers/dma/bestcomm/ata.c
@@ -0,0 +1,157 @@
+/*
+ * Bestcomm ATA task driver
+ *
+ *
+ * Patterned after bestcomm/fec.c by Dale Farnsworth <dfarnsworth@mvista.com>
+ * 2003-2004 (c) MontaVista, Software, Inc.
+ *
+ * Copyright (C) 2006-2007 Sylvain Munaut <tnt@246tNt.com>
+ * Copyright (C) 2006 Freescale - John Rigby
+ *
+ * This file is licensed under the terms of the GNU General Public License
+ * version 2. This program is licensed "as is" without any warranty of any
+ * kind, whether express or implied.
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/types.h>
+#include <asm/io.h>
+
+#include <linux/fsl/bestcomm/bestcomm.h>
+#include <linux/fsl/bestcomm/bestcomm_priv.h>
+#include <linux/fsl/bestcomm/ata.h>
+
+
+/* ======================================================================== */
+/* Task image/var/inc */
+/* ======================================================================== */
+
+/* ata task image */
+extern u32 bcom_ata_task[];
+
+/* ata task vars that need to be set before enabling the task */
+struct bcom_ata_var {
+ u32 enable; /* (u16*) address of task's control register */
+ u32 bd_base; /* (struct bcom_bd*) beginning of ring buffer */
+ u32 bd_last; /* (struct bcom_bd*) end of ring buffer */
+ u32 bd_start; /* (struct bcom_bd*) current bd */
+ u32 buffer_size; /* size of receive buffer */
+};
+
+/* ata task incs that need to be set before enabling the task */
+struct bcom_ata_inc {
+ u16 pad0;
+ s16 incr_bytes;
+ u16 pad1;
+ s16 incr_dst;
+ u16 pad2;
+ s16 incr_src;
+};
+
+
+/* ======================================================================== */
+/* Task support code */
+/* ======================================================================== */
+
+struct bcom_task *
+bcom_ata_init(int queue_len, int maxbufsize)
+{
+ struct bcom_task *tsk;
+ struct bcom_ata_var *var;
+ struct bcom_ata_inc *inc;
+
+ /* Prefetch breaks ATA DMA. Turn it off for ATA DMA */
+ bcom_disable_prefetch();
+
+ tsk = bcom_task_alloc(queue_len, sizeof(struct bcom_ata_bd), 0);
+ if (!tsk)
+ return NULL;
+
+ tsk->flags = BCOM_FLAGS_NONE;
+
+ bcom_ata_reset_bd(tsk);
+
+ var = (struct bcom_ata_var *) bcom_task_var(tsk->tasknum);
+ inc = (struct bcom_ata_inc *) bcom_task_inc(tsk->tasknum);
+
+ if (bcom_load_image(tsk->tasknum, bcom_ata_task)) {
+ bcom_task_free(tsk);
+ return NULL;
+ }
+
+ var->enable = bcom_eng->regs_base +
+ offsetof(struct mpc52xx_sdma, tcr[tsk->tasknum]);
+ var->bd_base = tsk->bd_pa;
+ var->bd_last = tsk->bd_pa + ((tsk->num_bd-1) * tsk->bd_size);
+ var->bd_start = tsk->bd_pa;
+ var->buffer_size = maxbufsize;
+
+ /* Configure some stuff */
+ bcom_set_task_pragma(tsk->tasknum, BCOM_ATA_PRAGMA);
+ bcom_set_task_auto_start(tsk->tasknum, tsk->tasknum);
+
+ out_8(&bcom_eng->regs->ipr[BCOM_INITIATOR_ATA_RX], BCOM_IPR_ATA_RX);
+ out_8(&bcom_eng->regs->ipr[BCOM_INITIATOR_ATA_TX], BCOM_IPR_ATA_TX);
+
+ out_be32(&bcom_eng->regs->IntPend, 1<<tsk->tasknum); /* Clear ints */
+
+ return tsk;
+}
+EXPORT_SYMBOL_GPL(bcom_ata_init);
+
+void bcom_ata_rx_prepare(struct bcom_task *tsk)
+{
+ struct bcom_ata_inc *inc;
+
+ inc = (struct bcom_ata_inc *) bcom_task_inc(tsk->tasknum);
+
+ inc->incr_bytes = -(s16)sizeof(u32);
+ inc->incr_src = 0;
+ inc->incr_dst = sizeof(u32);
+
+ bcom_set_initiator(tsk->tasknum, BCOM_INITIATOR_ATA_RX);
+}
+EXPORT_SYMBOL_GPL(bcom_ata_rx_prepare);
+
+void bcom_ata_tx_prepare(struct bcom_task *tsk)
+{
+ struct bcom_ata_inc *inc;
+
+ inc = (struct bcom_ata_inc *) bcom_task_inc(tsk->tasknum);
+
+ inc->incr_bytes = -(s16)sizeof(u32);
+ inc->incr_src = sizeof(u32);
+ inc->incr_dst = 0;
+
+ bcom_set_initiator(tsk->tasknum, BCOM_INITIATOR_ATA_TX);
+}
+EXPORT_SYMBOL_GPL(bcom_ata_tx_prepare);
+
+void bcom_ata_reset_bd(struct bcom_task *tsk)
+{
+ struct bcom_ata_var *var;
+
+ /* Reset all BD */
+ memset(tsk->bd, 0x00, tsk->num_bd * tsk->bd_size);
+
+ tsk->index = 0;
+ tsk->outdex = 0;
+
+ var = (struct bcom_ata_var *) bcom_task_var(tsk->tasknum);
+ var->bd_start = var->bd_base;
+}
+EXPORT_SYMBOL_GPL(bcom_ata_reset_bd);
+
+void bcom_ata_release(struct bcom_task *tsk)
+{
+ /* Nothing special for the ATA tasks */
+ bcom_task_free(tsk);
+}
+EXPORT_SYMBOL_GPL(bcom_ata_release);
+
+
+MODULE_DESCRIPTION("BestComm ATA task driver");
+MODULE_AUTHOR("John Rigby");
+MODULE_LICENSE("GPL v2");
+
diff --git a/drivers/dma/bestcomm/bcom_ata_task.c b/drivers/dma/bestcomm/bcom_ata_task.c
new file mode 100644
index 000000000000..cc6049a4e469
--- /dev/null
+++ b/drivers/dma/bestcomm/bcom_ata_task.c
@@ -0,0 +1,67 @@
+/*
+ * Bestcomm ATA task microcode
+ *
+ * Copyright (c) 2004 Freescale Semiconductor, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published
+ * by the Free Software Foundation.
+ *
+ * Created based on bestcom/code_dma/image_rtos1/dma_image.hex
+ */
+
+#include <asm/types.h>
+
+/*
+ * The header consists of the following fields:
+ * u32 magic;
+ * u8 desc_size;
+ * u8 var_size;
+ * u8 inc_size;
+ * u8 first_var;
+ * u8 reserved[8];
+ *
+ * The size fields contain the number of 32-bit words.
+ */
+
+u32 bcom_ata_task[] = {
+ /* header */
+ 0x4243544b,
+ 0x0e060709,
+ 0x00000000,
+ 0x00000000,
+
+ /* Task descriptors */
+ 0x8198009b, /* LCD: idx0 = var3; idx0 <= var2; idx0 += inc3 */
+ 0x13e00c08, /* DRD1A: var3 = var1; FN=0 MORE init=31 WS=0 RS=0 */
+ 0xb8000264, /* LCD: idx1 = *idx0, idx2 = var0; idx1 < var9; idx1 += inc4, idx2 += inc4 */
+ 0x10000f00, /* DRD1A: var3 = idx0; FN=0 MORE init=0 WS=0 RS=0 */
+ 0x60140002, /* DRD2A: EU0=0 EU1=0 EU2=0 EU3=2 EXT init=0 WS=2 RS=2 */
+ 0x0c8cfc8a, /* DRD2B1: *idx2 = EU3(); EU3(*idx2,var10) */
+ 0xd8988240, /* LCDEXT: idx1 = idx1; idx1 > var9; idx1 += inc0 */
+ 0xf845e011, /* LCDEXT: idx2 = *(idx0 + var00000015); ; idx2 += inc2 */
+ 0xb845e00a, /* LCD: idx3 = *(idx0 + var00000019); ; idx3 += inc1 */
+ 0x0bfecf90, /* DRD1A: *idx3 = *idx2; FN=0 TFD init=31 WS=3 RS=3 */
+ 0x9898802d, /* LCD: idx1 = idx1; idx1 once var0; idx1 += inc5 */
+ 0x64000005, /* DRD2A: EU0=0 EU1=0 EU2=0 EU3=5 INT EXT init=0 WS=0 RS=0 */
+ 0x0c0cf849, /* DRD2B1: *idx0 = EU3(); EU3(idx1,var9) */
+ 0x000001f8, /* NOP */
+
+ /* VAR[9]-VAR[14] */
+ 0x40000000,
+ 0x7fff7fff,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+
+ /* INC[0]-INC[6] */
+ 0x40000000,
+ 0xe0000000,
+ 0xe0000000,
+ 0xa000000c,
+ 0x20000000,
+ 0x00000000,
+ 0x00000000,
+};
+
diff --git a/drivers/dma/bestcomm/bcom_fec_rx_task.c b/drivers/dma/bestcomm/bcom_fec_rx_task.c
new file mode 100644
index 000000000000..a1ad6a02fcef
--- /dev/null
+++ b/drivers/dma/bestcomm/bcom_fec_rx_task.c
@@ -0,0 +1,78 @@
+/*
+ * Bestcomm FEC RX task microcode
+ *
+ * Copyright (c) 2004 Freescale Semiconductor, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published
+ * by the Free Software Foundation.
+ *
+ * Automatically created based on BestCommAPI-2.2/code_dma/image_rtos1/dma_image.hex
+ * on Tue Mar 22 11:19:38 2005 GMT
+ */
+
+#include <asm/types.h>
+
+/*
+ * The header consists of the following fields:
+ * u32 magic;
+ * u8 desc_size;
+ * u8 var_size;
+ * u8 inc_size;
+ * u8 first_var;
+ * u8 reserved[8];
+ *
+ * The size fields contain the number of 32-bit words.
+ */
+
+u32 bcom_fec_rx_task[] = {
+ /* header */
+ 0x4243544b,
+ 0x18060709,
+ 0x00000000,
+ 0x00000000,
+
+ /* Task descriptors */
+ 0x808220e3, /* LCD: idx0 = var1, idx1 = var4; idx1 <= var3; idx0 += inc4, idx1 += inc3 */
+ 0x10601010, /* DRD1A: var4 = var2; FN=0 MORE init=3 WS=0 RS=0 */
+ 0xb8800264, /* LCD: idx2 = *idx1, idx3 = var0; idx2 < var9; idx2 += inc4, idx3 += inc4 */
+ 0x10001308, /* DRD1A: var4 = idx1; FN=0 MORE init=0 WS=0 RS=0 */
+ 0x60140002, /* DRD2A: EU0=0 EU1=0 EU2=0 EU3=2 EXT init=0 WS=2 RS=2 */
+ 0x0cccfcca, /* DRD2B1: *idx3 = EU3(); EU3(*idx3,var10) */
+ 0x80004000, /* LCDEXT: idx2 = 0x00000000; ; */
+ 0xb8c58029, /* LCD: idx3 = *(idx1 + var00000015); idx3 once var0; idx3 += inc5 */
+ 0x60000002, /* DRD2A: EU0=0 EU1=0 EU2=0 EU3=2 EXT init=0 WS=0 RS=0 */
+ 0x088cf8cc, /* DRD2B1: idx2 = EU3(); EU3(idx3,var12) */
+ 0x991982f2, /* LCD: idx2 = idx2, idx3 = idx3; idx2 > var11; idx2 += inc6, idx3 += inc2 */
+ 0x006acf80, /* DRD1A: *idx3 = *idx0; FN=0 init=3 WS=1 RS=1 */
+ 0x80004000, /* LCDEXT: idx2 = 0x00000000; ; */
+ 0x9999802d, /* LCD: idx3 = idx3; idx3 once var0; idx3 += inc5 */
+ 0x70000002, /* DRD2A: EU0=0 EU1=0 EU2=0 EU3=2 EXT MORE init=0 WS=0 RS=0 */
+ 0x034cfc4e, /* DRD2B1: var13 = EU3(); EU3(*idx1,var14) */
+ 0x00008868, /* DRD1A: idx2 = var13; FN=0 init=0 WS=0 RS=0 */
+ 0x99198341, /* LCD: idx2 = idx2, idx3 = idx3; idx2 > var13; idx2 += inc0, idx3 += inc1 */
+ 0x007ecf80, /* DRD1A: *idx3 = *idx0; FN=0 init=3 WS=3 RS=3 */
+ 0x99198272, /* LCD: idx2 = idx2, idx3 = idx3; idx2 > var9; idx2 += inc6, idx3 += inc2 */
+ 0x046acf80, /* DRD1A: *idx3 = *idx0; FN=0 INT init=3 WS=1 RS=1 */
+ 0x9819002d, /* LCD: idx2 = idx0; idx2 once var0; idx2 += inc5 */
+ 0x0060c790, /* DRD1A: *idx1 = *idx2; FN=0 init=3 WS=0 RS=0 */
+ 0x000001f8, /* NOP */
+
+ /* VAR[9]-VAR[14] */
+ 0x40000000,
+ 0x7fff7fff,
+ 0x00000000,
+ 0x00000003,
+ 0x40000008,
+ 0x43ffffff,
+
+ /* INC[0]-INC[6] */
+ 0x40000000,
+ 0xe0000000,
+ 0xe0000000,
+ 0xa0000008,
+ 0x20000000,
+ 0x00000000,
+ 0x4000ffff,
+};
+
diff --git a/drivers/dma/bestcomm/bcom_fec_tx_task.c b/drivers/dma/bestcomm/bcom_fec_tx_task.c
new file mode 100644
index 000000000000..b1c495c3a65a
--- /dev/null
+++ b/drivers/dma/bestcomm/bcom_fec_tx_task.c
@@ -0,0 +1,91 @@
+/*
+ * Bestcomm FEC TX task microcode
+ *
+ * Copyright (c) 2004 Freescale Semiconductor, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published
+ * by the Free Software Foundation.
+ *
+ * Automatically created based on BestCommAPI-2.2/code_dma/image_rtos1/dma_image.hex
+ * on Tue Mar 22 11:19:29 2005 GMT
+ */
+
+#include <asm/types.h>
+
+/*
+ * The header consists of the following fields:
+ * u32 magic;
+ * u8 desc_size;
+ * u8 var_size;
+ * u8 inc_size;
+ * u8 first_var;
+ * u8 reserved[8];
+ *
+ * The size fields contain the number of 32-bit words.
+ */
+
+u32 bcom_fec_tx_task[] = {
+ /* header */
+ 0x4243544b,
+ 0x2407070d,
+ 0x00000000,
+ 0x00000000,
+
+ /* Task descriptors */
+ 0x8018001b, /* LCD: idx0 = var0; idx0 <= var0; idx0 += inc3 */
+ 0x60000005, /* DRD2A: EU0=0 EU1=0 EU2=0 EU3=5 EXT init=0 WS=0 RS=0 */
+ 0x01ccfc0d, /* DRD2B1: var7 = EU3(); EU3(*idx0,var13) */
+ 0x8082a123, /* LCD: idx0 = var1, idx1 = var5; idx1 <= var4; idx0 += inc4, idx1 += inc3 */
+ 0x10801418, /* DRD1A: var5 = var3; FN=0 MORE init=4 WS=0 RS=0 */
+ 0xf88103a4, /* LCDEXT: idx2 = *idx1, idx3 = var2; idx2 < var14; idx2 += inc4, idx3 += inc4 */
+ 0x801a6024, /* LCD: idx4 = var0; ; idx4 += inc4 */
+ 0x10001708, /* DRD1A: var5 = idx1; FN=0 MORE init=0 WS=0 RS=0 */
+ 0x60140002, /* DRD2A: EU0=0 EU1=0 EU2=0 EU3=2 EXT init=0 WS=2 RS=2 */
+ 0x0cccfccf, /* DRD2B1: *idx3 = EU3(); EU3(*idx3,var15) */
+ 0x991a002c, /* LCD: idx2 = idx2, idx3 = idx4; idx2 once var0; idx2 += inc5, idx3 += inc4 */
+ 0x70000002, /* DRD2A: EU0=0 EU1=0 EU2=0 EU3=2 EXT MORE init=0 WS=0 RS=0 */
+ 0x024cfc4d, /* DRD2B1: var9 = EU3(); EU3(*idx1,var13) */
+ 0x60000003, /* DRD2A: EU0=0 EU1=0 EU2=0 EU3=3 EXT init=0 WS=0 RS=0 */
+ 0x0cccf247, /* DRD2B1: *idx3 = EU3(); EU3(var9,var7) */
+ 0x80004000, /* LCDEXT: idx2 = 0x00000000; ; */
+ 0xb8c80029, /* LCD: idx3 = *(idx1 + var0000001a); idx3 once var0; idx3 += inc5 */
+ 0x70000002, /* DRD2A: EU0=0 EU1=0 EU2=0 EU3=2 EXT MORE init=0 WS=0 RS=0 */
+ 0x088cf8d1, /* DRD2B1: idx2 = EU3(); EU3(idx3,var17) */
+ 0x00002f10, /* DRD1A: var11 = idx2; FN=0 init=0 WS=0 RS=0 */
+ 0x99198432, /* LCD: idx2 = idx2, idx3 = idx3; idx2 > var16; idx2 += inc6, idx3 += inc2 */
+ 0x008ac398, /* DRD1A: *idx0 = *idx3; FN=0 init=4 WS=1 RS=1 */
+ 0x80004000, /* LCDEXT: idx2 = 0x00000000; ; */
+ 0x9999802d, /* LCD: idx3 = idx3; idx3 once var0; idx3 += inc5 */
+ 0x70000002, /* DRD2A: EU0=0 EU1=0 EU2=0 EU3=2 EXT MORE init=0 WS=0 RS=0 */
+ 0x048cfc53, /* DRD2B1: var18 = EU3(); EU3(*idx1,var19) */
+ 0x60000008, /* DRD2A: EU0=0 EU1=0 EU2=0 EU3=8 EXT init=0 WS=0 RS=0 */
+ 0x088cf48b, /* DRD2B1: idx2 = EU3(); EU3(var18,var11) */
+ 0x99198481, /* LCD: idx2 = idx2, idx3 = idx3; idx2 > var18; idx2 += inc0, idx3 += inc1 */
+ 0x009ec398, /* DRD1A: *idx0 = *idx3; FN=0 init=4 WS=3 RS=3 */
+ 0x991983b2, /* LCD: idx2 = idx2, idx3 = idx3; idx2 > var14; idx2 += inc6, idx3 += inc2 */
+ 0x088ac398, /* DRD1A: *idx0 = *idx3; FN=0 TFD init=4 WS=1 RS=1 */
+ 0x9919002d, /* LCD: idx2 = idx2; idx2 once var0; idx2 += inc5 */
+ 0x60000005, /* DRD2A: EU0=0 EU1=0 EU2=0 EU3=5 EXT init=0 WS=0 RS=0 */
+ 0x0c4cf88e, /* DRD2B1: *idx1 = EU3(); EU3(idx2,var14) */
+ 0x000001f8, /* NOP */
+
+ /* VAR[13]-VAR[19] */
+ 0x0c000000,
+ 0x40000000,
+ 0x7fff7fff,
+ 0x00000000,
+ 0x00000003,
+ 0x40000004,
+ 0x43ffffff,
+
+ /* INC[0]-INC[6] */
+ 0x40000000,
+ 0xe0000000,
+ 0xe0000000,
+ 0xa0000008,
+ 0x20000000,
+ 0x00000000,
+ 0x4000ffff,
+};
+
diff --git a/drivers/dma/bestcomm/bcom_gen_bd_rx_task.c b/drivers/dma/bestcomm/bcom_gen_bd_rx_task.c
new file mode 100644
index 000000000000..efee022b0256
--- /dev/null
+++ b/drivers/dma/bestcomm/bcom_gen_bd_rx_task.c
@@ -0,0 +1,63 @@
+/*
+ * Bestcomm GenBD RX task microcode
+ *
+ * Copyright (C) 2006 AppSpec Computer Technologies Corp.
+ * Jeff Gibbons <jeff.gibbons@appspec.com>
+ * Copyright (c) 2004 Freescale Semiconductor, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published
+ * by the Free Software Foundation.
+ *
+ * Based on BestCommAPI-2.2/code_dma/image_rtos1/dma_image.hex
+ * on Tue Mar 4 10:14:12 2006 GMT
+ *
+ */
+
+#include <asm/types.h>
+
+/*
+ * The header consists of the following fields:
+ * u32 magic;
+ * u8 desc_size;
+ * u8 var_size;
+ * u8 inc_size;
+ * u8 first_var;
+ * u8 reserved[8];
+ *
+ * The size fields contain the number of 32-bit words.
+ */
+
+u32 bcom_gen_bd_rx_task[] = {
+ /* header */
+ 0x4243544b,
+ 0x0d020409,
+ 0x00000000,
+ 0x00000000,
+
+ /* Task descriptors */
+ 0x808220da, /* LCD: idx0 = var1, idx1 = var4; idx1 <= var3; idx0 += inc3, idx1 += inc2 */
+ 0x13e01010, /* DRD1A: var4 = var2; FN=0 MORE init=31 WS=0 RS=0 */
+ 0xb880025b, /* LCD: idx2 = *idx1, idx3 = var0; idx2 < var9; idx2 += inc3, idx3 += inc3 */
+ 0x10001308, /* DRD1A: var4 = idx1; FN=0 MORE init=0 WS=0 RS=0 */
+ 0x60140002, /* DRD2A: EU0=0 EU1=0 EU2=0 EU3=2 EXT init=0 WS=2 RS=2 */
+ 0x0cccfcca, /* DRD2B1: *idx3 = EU3(); EU3(*idx3,var10) */
+ 0xd9190240, /* LCDEXT: idx2 = idx2; idx2 > var9; idx2 += inc0 */
+ 0xb8c5e009, /* LCD: idx3 = *(idx1 + var00000015); ; idx3 += inc1 */
+ 0x07fecf80, /* DRD1A: *idx3 = *idx0; FN=0 INT init=31 WS=3 RS=3 */
+ 0x99190024, /* LCD: idx2 = idx2; idx2 once var0; idx2 += inc4 */
+ 0x60000005, /* DRD2A: EU0=0 EU1=0 EU2=0 EU3=5 EXT init=0 WS=0 RS=0 */
+ 0x0c4cf889, /* DRD2B1: *idx1 = EU3(); EU3(idx2,var9) */
+ 0x000001f8, /* NOP */
+
+ /* VAR[9]-VAR[10] */
+ 0x40000000,
+ 0x7fff7fff,
+
+ /* INC[0]-INC[3] */
+ 0x40000000,
+ 0xe0000000,
+ 0xa0000008,
+ 0x20000000,
+};
+
diff --git a/drivers/dma/bestcomm/bcom_gen_bd_tx_task.c b/drivers/dma/bestcomm/bcom_gen_bd_tx_task.c
new file mode 100644
index 000000000000..c605aa42ecbb
--- /dev/null
+++ b/drivers/dma/bestcomm/bcom_gen_bd_tx_task.c
@@ -0,0 +1,69 @@
+/*
+ * Bestcomm GenBD TX task microcode
+ *
+ * Copyright (C) 2006 AppSpec Computer Technologies Corp.
+ * Jeff Gibbons <jeff.gibbons@appspec.com>
+ * Copyright (c) 2004 Freescale Semiconductor, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published
+ * by the Free Software Foundation.
+ *
+ * Based on BestCommAPI-2.2/code_dma/image_rtos1/dma_image.hex
+ * on Tue Mar 4 10:14:12 2006 GMT
+ *
+ */
+
+#include <asm/types.h>
+
+/*
+ * The header consists of the following fields:
+ * u32 magic;
+ * u8 desc_size;
+ * u8 var_size;
+ * u8 inc_size;
+ * u8 first_var;
+ * u8 reserved[8];
+ *
+ * The size fields contain the number of 32-bit words.
+ */
+
+u32 bcom_gen_bd_tx_task[] = {
+ /* header */
+ 0x4243544b,
+ 0x0f040609,
+ 0x00000000,
+ 0x00000000,
+
+ /* Task descriptors */
+ 0x800220e3, /* LCD: idx0 = var0, idx1 = var4; idx1 <= var3; idx0 += inc4, idx1 += inc3 */
+ 0x13e01010, /* DRD1A: var4 = var2; FN=0 MORE init=31 WS=0 RS=0 */
+ 0xb8808264, /* LCD: idx2 = *idx1, idx3 = var1; idx2 < var9; idx2 += inc4, idx3 += inc4 */
+ 0x10001308, /* DRD1A: var4 = idx1; FN=0 MORE init=0 WS=0 RS=0 */
+ 0x60140002, /* DRD2A: EU0=0 EU1=0 EU2=0 EU3=2 EXT init=0 WS=2 RS=2 */
+ 0x0cccfcca, /* DRD2B1: *idx3 = EU3(); EU3(*idx3,var10) */
+ 0xd9190300, /* LCDEXT: idx2 = idx2; idx2 > var12; idx2 += inc0 */
+ 0xb8c5e009, /* LCD: idx3 = *(idx1 + var00000015); ; idx3 += inc1 */
+ 0x03fec398, /* DRD1A: *idx0 = *idx3; FN=0 init=31 WS=3 RS=3 */
+ 0x9919826a, /* LCD: idx2 = idx2, idx3 = idx3; idx2 > var9; idx2 += inc5, idx3 += inc2 */
+ 0x0feac398, /* DRD1A: *idx0 = *idx3; FN=0 TFD INT init=31 WS=1 RS=1 */
+ 0x99190036, /* LCD: idx2 = idx2; idx2 once var0; idx2 += inc6 */
+ 0x60000005, /* DRD2A: EU0=0 EU1=0 EU2=0 EU3=5 EXT init=0 WS=0 RS=0 */
+ 0x0c4cf889, /* DRD2B1: *idx1 = EU3(); EU3(idx2,var9) */
+ 0x000001f8, /* NOP */
+
+ /* VAR[9]-VAR[12] */
+ 0x40000000,
+ 0x7fff7fff,
+ 0x00000000,
+ 0x40000004,
+
+ /* INC[0]-INC[5] */
+ 0x40000000,
+ 0xe0000000,
+ 0xe0000000,
+ 0xa0000008,
+ 0x20000000,
+ 0x4000ffff,
+};
+
diff --git a/drivers/dma/bestcomm/bestcomm.c b/drivers/dma/bestcomm/bestcomm.c
new file mode 100644
index 000000000000..a8c2e2994d2e
--- /dev/null
+++ b/drivers/dma/bestcomm/bestcomm.c
@@ -0,0 +1,531 @@
+/*
+ * Driver for MPC52xx processor BestComm peripheral controller
+ *
+ *
+ * Copyright (C) 2006-2007 Sylvain Munaut <tnt@246tNt.com>
+ * Copyright (C) 2005 Varma Electronics Oy,
+ * ( by Andrey Volkov <avolkov@varma-el.com> )
+ * Copyright (C) 2003-2004 MontaVista, Software, Inc.
+ * ( by Dale Farnsworth <dfarnsworth@mvista.com> )
+ *
+ * This file is licensed under the terms of the GNU General Public License
+ * version 2. This program is licensed "as is" without any warranty of any
+ * kind, whether express or implied.
+ */
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/slab.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/of_platform.h>
+#include <asm/io.h>
+#include <asm/irq.h>
+#include <asm/mpc52xx.h>
+
+#include <linux/fsl/bestcomm/sram.h>
+#include <linux/fsl/bestcomm/bestcomm_priv.h>
+#include "linux/fsl/bestcomm/bestcomm.h"
+
+#define DRIVER_NAME "bestcomm-core"
+
+/* MPC5200 device tree match tables */
+static struct of_device_id mpc52xx_sram_ids[] = {
+ { .compatible = "fsl,mpc5200-sram", },
+ { .compatible = "mpc5200-sram", },
+ {}
+};
+
+
+struct bcom_engine *bcom_eng = NULL;
+EXPORT_SYMBOL_GPL(bcom_eng); /* needed for inline functions */
+
+/* ======================================================================== */
+/* Public and private API */
+/* ======================================================================== */
+
+/* Private API */
+
+struct bcom_task *
+bcom_task_alloc(int bd_count, int bd_size, int priv_size)
+{
+ int i, tasknum = -1;
+ struct bcom_task *tsk;
+
+ /* Don't try to do anything if bestcomm init failed */
+ if (!bcom_eng)
+ return NULL;
+
+ /* Get and reserve a task num */
+ spin_lock(&bcom_eng->lock);
+
+ for (i=0; i<BCOM_MAX_TASKS; i++)
+ if (!bcom_eng->tdt[i].stop) { /* we use stop as a marker */
+ bcom_eng->tdt[i].stop = 0xfffffffful; /* dummy addr */
+ tasknum = i;
+ break;
+ }
+
+ spin_unlock(&bcom_eng->lock);
+
+ if (tasknum < 0)
+ return NULL;
+
+ /* Allocate our structure */
+ tsk = kzalloc(sizeof(struct bcom_task) + priv_size, GFP_KERNEL);
+ if (!tsk)
+ goto error;
+
+ tsk->tasknum = tasknum;
+ if (priv_size)
+ tsk->priv = (void*)tsk + sizeof(struct bcom_task);
+
+ /* Get IRQ of that task */
+ tsk->irq = irq_of_parse_and_map(bcom_eng->ofnode, tsk->tasknum);
+ if (tsk->irq == NO_IRQ)
+ goto error;
+
+ /* Init the BDs, if needed */
+ if (bd_count) {
+ tsk->cookie = kmalloc(sizeof(void*) * bd_count, GFP_KERNEL);
+ if (!tsk->cookie)
+ goto error;
+
+ tsk->bd = bcom_sram_alloc(bd_count * bd_size, 4, &tsk->bd_pa);
+ if (!tsk->bd)
+ goto error;
+ memset(tsk->bd, 0x00, bd_count * bd_size);
+
+ tsk->num_bd = bd_count;
+ tsk->bd_size = bd_size;
+ }
+
+ return tsk;
+
+error:
+ if (tsk) {
+ if (tsk->irq != NO_IRQ)
+ irq_dispose_mapping(tsk->irq);
+ bcom_sram_free(tsk->bd);
+ kfree(tsk->cookie);
+ kfree(tsk);
+ }
+
+ bcom_eng->tdt[tasknum].stop = 0;
+
+ return NULL;
+}
+EXPORT_SYMBOL_GPL(bcom_task_alloc);
+
+void
+bcom_task_free(struct bcom_task *tsk)
+{
+ /* Stop the task */
+ bcom_disable_task(tsk->tasknum);
+
+ /* Clear TDT */
+ bcom_eng->tdt[tsk->tasknum].start = 0;
+ bcom_eng->tdt[tsk->tasknum].stop = 0;
+
+ /* Free everything */
+ irq_dispose_mapping(tsk->irq);
+ bcom_sram_free(tsk->bd);
+ kfree(tsk->cookie);
+ kfree(tsk);
+}
+EXPORT_SYMBOL_GPL(bcom_task_free);
+
+int
+bcom_load_image(int task, u32 *task_image)
+{
+ struct bcom_task_header *hdr = (struct bcom_task_header *)task_image;
+ struct bcom_tdt *tdt;
+ u32 *desc, *var, *inc;
+ u32 *desc_src, *var_src, *inc_src;
+
+ /* Safety checks */
+ if (hdr->magic != BCOM_TASK_MAGIC) {
+ printk(KERN_ERR DRIVER_NAME
+ ": Trying to load invalid microcode\n");
+ return -EINVAL;
+ }
+
+ if ((task < 0) || (task >= BCOM_MAX_TASKS)) {
+ printk(KERN_ERR DRIVER_NAME
+ ": Trying to load invalid task %d\n", task);
+ return -EINVAL;
+ }
+
+ /* Initial load or reload */
+ tdt = &bcom_eng->tdt[task];
+
+ if (tdt->start) {
+ desc = bcom_task_desc(task);
+ if (hdr->desc_size != bcom_task_num_descs(task)) {
+ printk(KERN_ERR DRIVER_NAME
+ ": Trying to reload wrong task image "
+ "(%d size %d/%d)!\n",
+ task,
+ hdr->desc_size,
+ bcom_task_num_descs(task));
+ return -EINVAL;
+ }
+ } else {
+ phys_addr_t start_pa;
+
+ desc = bcom_sram_alloc(hdr->desc_size * sizeof(u32), 4, &start_pa);
+ if (!desc)
+ return -ENOMEM;
+
+ tdt->start = start_pa;
+ tdt->stop = start_pa + ((hdr->desc_size-1) * sizeof(u32));
+ }
+
+ var = bcom_task_var(task);
+ inc = bcom_task_inc(task);
+
+ /* Clear & copy */
+ memset(var, 0x00, BCOM_VAR_SIZE);
+ memset(inc, 0x00, BCOM_INC_SIZE);
+
+ desc_src = (u32 *)(hdr + 1);
+ var_src = desc_src + hdr->desc_size;
+ inc_src = var_src + hdr->var_size;
+
+ memcpy(desc, desc_src, hdr->desc_size * sizeof(u32));
+ memcpy(var + hdr->first_var, var_src, hdr->var_size * sizeof(u32));
+ memcpy(inc, inc_src, hdr->inc_size * sizeof(u32));
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(bcom_load_image);
+
+void
+bcom_set_initiator(int task, int initiator)
+{
+ int i;
+ int num_descs;
+ u32 *desc;
+ int next_drd_has_initiator;
+
+ bcom_set_tcr_initiator(task, initiator);
+
+ /* Just setting tcr is apparently not enough due to some problem */
+ /* with it. So we just go thru all the microcode and replace in */
+ /* the DRD directly */
+
+ desc = bcom_task_desc(task);
+ next_drd_has_initiator = 1;
+ num_descs = bcom_task_num_descs(task);
+
+ for (i=0; i<num_descs; i++, desc++) {
+ if (!bcom_desc_is_drd(*desc))
+ continue;
+ if (next_drd_has_initiator)
+ if (bcom_desc_initiator(*desc) != BCOM_INITIATOR_ALWAYS)
+ bcom_set_desc_initiator(desc, initiator);
+ next_drd_has_initiator = !bcom_drd_is_extended(*desc);
+ }
+}
+EXPORT_SYMBOL_GPL(bcom_set_initiator);
+
+
+/* Public API */
+
+void
+bcom_enable(struct bcom_task *tsk)
+{
+ bcom_enable_task(tsk->tasknum);
+}
+EXPORT_SYMBOL_GPL(bcom_enable);
+
+void
+bcom_disable(struct bcom_task *tsk)
+{
+ bcom_disable_task(tsk->tasknum);
+}
+EXPORT_SYMBOL_GPL(bcom_disable);
+
+
+/* ======================================================================== */
+/* Engine init/cleanup */
+/* ======================================================================== */
+
+/* Function Descriptor table */
+/* this will need to be updated if Freescale changes their task code FDT */
+static u32 fdt_ops[] = {
+ 0xa0045670, /* FDT[48] - load_acc() */
+ 0x80045670, /* FDT[49] - unload_acc() */
+ 0x21800000, /* FDT[50] - and() */
+ 0x21e00000, /* FDT[51] - or() */
+ 0x21500000, /* FDT[52] - xor() */
+ 0x21400000, /* FDT[53] - andn() */
+ 0x21500000, /* FDT[54] - not() */
+ 0x20400000, /* FDT[55] - add() */
+ 0x20500000, /* FDT[56] - sub() */
+ 0x20800000, /* FDT[57] - lsh() */
+ 0x20a00000, /* FDT[58] - rsh() */
+ 0xc0170000, /* FDT[59] - crc8() */
+ 0xc0145670, /* FDT[60] - crc16() */
+ 0xc0345670, /* FDT[61] - crc32() */
+ 0xa0076540, /* FDT[62] - endian32() */
+ 0xa0000760, /* FDT[63] - endian16() */
+};
+
+
+static int bcom_engine_init(void)
+{
+ int task;
+ phys_addr_t tdt_pa, ctx_pa, var_pa, fdt_pa;
+ unsigned int tdt_size, ctx_size, var_size, fdt_size;
+
+ /* Allocate & clear SRAM zones for FDT, TDTs, contexts and vars/incs */
+ tdt_size = BCOM_MAX_TASKS * sizeof(struct bcom_tdt);
+ ctx_size = BCOM_MAX_TASKS * BCOM_CTX_SIZE;
+ var_size = BCOM_MAX_TASKS * (BCOM_VAR_SIZE + BCOM_INC_SIZE);
+ fdt_size = BCOM_FDT_SIZE;
+
+ bcom_eng->tdt = bcom_sram_alloc(tdt_size, sizeof(u32), &tdt_pa);
+ bcom_eng->ctx = bcom_sram_alloc(ctx_size, BCOM_CTX_ALIGN, &ctx_pa);
+ bcom_eng->var = bcom_sram_alloc(var_size, BCOM_VAR_ALIGN, &var_pa);
+ bcom_eng->fdt = bcom_sram_alloc(fdt_size, BCOM_FDT_ALIGN, &fdt_pa);
+
+ if (!bcom_eng->tdt || !bcom_eng->ctx || !bcom_eng->var || !bcom_eng->fdt) {
+ printk(KERN_ERR "DMA: SRAM alloc failed in engine init !\n");
+
+ bcom_sram_free(bcom_eng->tdt);
+ bcom_sram_free(bcom_eng->ctx);
+ bcom_sram_free(bcom_eng->var);
+ bcom_sram_free(bcom_eng->fdt);
+
+ return -ENOMEM;
+ }
+
+ memset(bcom_eng->tdt, 0x00, tdt_size);
+ memset(bcom_eng->ctx, 0x00, ctx_size);
+ memset(bcom_eng->var, 0x00, var_size);
+ memset(bcom_eng->fdt, 0x00, fdt_size);
+
+ /* Copy the FDT for the EU#3 */
+ memcpy(&bcom_eng->fdt[48], fdt_ops, sizeof(fdt_ops));
+
+ /* Initialize Task base structure */
+ for (task=0; task<BCOM_MAX_TASKS; task++)
+ {
+ out_be16(&bcom_eng->regs->tcr[task], 0);
+ out_8(&bcom_eng->regs->ipr[task], 0);
+
+ bcom_eng->tdt[task].context = ctx_pa;
+ bcom_eng->tdt[task].var = var_pa;
+ bcom_eng->tdt[task].fdt = fdt_pa;
+
+ var_pa += BCOM_VAR_SIZE + BCOM_INC_SIZE;
+ ctx_pa += BCOM_CTX_SIZE;
+ }
+
+ out_be32(&bcom_eng->regs->taskBar, tdt_pa);
+
+ /* Init 'always' initiator */
+ out_8(&bcom_eng->regs->ipr[BCOM_INITIATOR_ALWAYS], BCOM_IPR_ALWAYS);
+
+ /* Disable COMM Bus Prefetch on the original 5200; it's broken */
+ if ((mfspr(SPRN_SVR) & MPC5200_SVR_MASK) == MPC5200_SVR)
+ bcom_disable_prefetch();
+
+ /* Init lock */
+ spin_lock_init(&bcom_eng->lock);
+
+ return 0;
+}
+
+static void
+bcom_engine_cleanup(void)
+{
+ int task;
+
+ /* Stop all tasks */
+ for (task=0; task<BCOM_MAX_TASKS; task++)
+ {
+ out_be16(&bcom_eng->regs->tcr[task], 0);
+ out_8(&bcom_eng->regs->ipr[task], 0);
+ }
+
+ out_be32(&bcom_eng->regs->taskBar, 0ul);
+
+ /* Release the SRAM zones */
+ bcom_sram_free(bcom_eng->tdt);
+ bcom_sram_free(bcom_eng->ctx);
+ bcom_sram_free(bcom_eng->var);
+ bcom_sram_free(bcom_eng->fdt);
+}
+
+
+/* ======================================================================== */
+/* OF platform driver */
+/* ======================================================================== */
+
+static int mpc52xx_bcom_probe(struct platform_device *op)
+{
+ struct device_node *ofn_sram;
+ struct resource res_bcom;
+
+ int rv;
+
+ /* Inform user we're ok so far */
+ printk(KERN_INFO "DMA: MPC52xx BestComm driver\n");
+
+ /* Get the bestcomm node */
+ of_node_get(op->dev.of_node);
+
+ /* Prepare SRAM */
+ ofn_sram = of_find_matching_node(NULL, mpc52xx_sram_ids);
+ if (!ofn_sram) {
+ printk(KERN_ERR DRIVER_NAME ": "
+ "No SRAM found in device tree\n");
+ rv = -ENODEV;
+ goto error_ofput;
+ }
+ rv = bcom_sram_init(ofn_sram, DRIVER_NAME);
+ of_node_put(ofn_sram);
+
+ if (rv) {
+ printk(KERN_ERR DRIVER_NAME ": "
+ "Error in SRAM init\n");
+ goto error_ofput;
+ }
+
+ /* Get a clean struct */
+ bcom_eng = kzalloc(sizeof(struct bcom_engine), GFP_KERNEL);
+ if (!bcom_eng) {
+ printk(KERN_ERR DRIVER_NAME ": "
+ "Can't allocate state structure\n");
+ rv = -ENOMEM;
+ goto error_sramclean;
+ }
+
+ /* Save the node */
+ bcom_eng->ofnode = op->dev.of_node;
+
+ /* Get, reserve & map io */
+ if (of_address_to_resource(op->dev.of_node, 0, &res_bcom)) {
+ printk(KERN_ERR DRIVER_NAME ": "
+ "Can't get resource\n");
+ rv = -EINVAL;
+ goto error_sramclean;
+ }
+
+ if (!request_mem_region(res_bcom.start, resource_size(&res_bcom),
+ DRIVER_NAME)) {
+ printk(KERN_ERR DRIVER_NAME ": "
+ "Can't request registers region\n");
+ rv = -EBUSY;
+ goto error_sramclean;
+ }
+
+ bcom_eng->regs_base = res_bcom.start;
+ bcom_eng->regs = ioremap(res_bcom.start, sizeof(struct mpc52xx_sdma));
+ if (!bcom_eng->regs) {
+ printk(KERN_ERR DRIVER_NAME ": "
+ "Can't map registers\n");
+ rv = -ENOMEM;
+ goto error_release;
+ }
+
+ /* Now, do the real init */
+ rv = bcom_engine_init();
+ if (rv)
+ goto error_unmap;
+
+ /* Done ! */
+ printk(KERN_INFO "DMA: MPC52xx BestComm engine @%08lx ok !\n",
+ (long)bcom_eng->regs_base);
+
+ return 0;
+
+ /* Error path */
+error_unmap:
+ iounmap(bcom_eng->regs);
+error_release:
+ release_mem_region(res_bcom.start, sizeof(struct mpc52xx_sdma));
+error_sramclean:
+ kfree(bcom_eng);
+ bcom_sram_cleanup();
+error_ofput:
+ of_node_put(op->dev.of_node);
+
+ printk(KERN_ERR "DMA: MPC52xx BestComm init failed !\n");
+
+ return rv;
+}
+
+
+static int mpc52xx_bcom_remove(struct platform_device *op)
+{
+ /* Clean up the engine */
+ bcom_engine_cleanup();
+
+ /* Cleanup SRAM */
+ bcom_sram_cleanup();
+
+ /* Release regs */
+ iounmap(bcom_eng->regs);
+ release_mem_region(bcom_eng->regs_base, sizeof(struct mpc52xx_sdma));
+
+ /* Release the node */
+ of_node_put(bcom_eng->ofnode);
+
+ /* Release memory */
+ kfree(bcom_eng);
+ bcom_eng = NULL;
+
+ return 0;
+}
+
+static struct of_device_id mpc52xx_bcom_of_match[] = {
+ { .compatible = "fsl,mpc5200-bestcomm", },
+ { .compatible = "mpc5200-bestcomm", },
+ {},
+};
+
+MODULE_DEVICE_TABLE(of, mpc52xx_bcom_of_match);
+
+
+static struct platform_driver mpc52xx_bcom_of_platform_driver = {
+ .probe = mpc52xx_bcom_probe,
+ .remove = mpc52xx_bcom_remove,
+ .driver = {
+ .name = DRIVER_NAME,
+ .owner = THIS_MODULE,
+ .of_match_table = mpc52xx_bcom_of_match,
+ },
+};
+
+
+/* ======================================================================== */
+/* Module */
+/* ======================================================================== */
+
+static int __init
+mpc52xx_bcom_init(void)
+{
+ return platform_driver_register(&mpc52xx_bcom_of_platform_driver);
+}
+
+static void __exit
+mpc52xx_bcom_exit(void)
+{
+ platform_driver_unregister(&mpc52xx_bcom_of_platform_driver);
+}
+
+/* If we're not a module, we must make sure everything is setup before */
+/* anyone tries to use us ... that's why we use subsys_initcall instead */
+/* of module_init. */
+subsys_initcall(mpc52xx_bcom_init);
+module_exit(mpc52xx_bcom_exit);
+
+MODULE_DESCRIPTION("Freescale MPC52xx BestComm DMA");
+MODULE_AUTHOR("Sylvain Munaut <tnt@246tNt.com>");
+MODULE_AUTHOR("Andrey Volkov <avolkov@varma-el.com>");
+MODULE_AUTHOR("Dale Farnsworth <dfarnsworth@mvista.com>");
+MODULE_LICENSE("GPL v2");
+
diff --git a/drivers/dma/bestcomm/fec.c b/drivers/dma/bestcomm/fec.c
new file mode 100644
index 000000000000..7f1fb1c999e4
--- /dev/null
+++ b/drivers/dma/bestcomm/fec.c
@@ -0,0 +1,270 @@
+/*
+ * Bestcomm FEC tasks driver
+ *
+ *
+ * Copyright (C) 2006-2007 Sylvain Munaut <tnt@246tNt.com>
+ * Copyright (C) 2003-2004 MontaVista, Software, Inc.
+ * ( by Dale Farnsworth <dfarnsworth@mvista.com> )
+ *
+ * This file is licensed under the terms of the GNU General Public License
+ * version 2. This program is licensed "as is" without any warranty of any
+ * kind, whether express or implied.
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/types.h>
+#include <asm/io.h>
+
+#include <linux/fsl/bestcomm/bestcomm.h>
+#include <linux/fsl/bestcomm/bestcomm_priv.h>
+#include <linux/fsl/bestcomm/fec.h>
+
+
+/* ======================================================================== */
+/* Task image/var/inc */
+/* ======================================================================== */
+
+/* fec tasks images */
+extern u32 bcom_fec_rx_task[];
+extern u32 bcom_fec_tx_task[];
+
+/* rx task vars that need to be set before enabling the task */
+struct bcom_fec_rx_var {
+ u32 enable; /* (u16*) address of task's control register */
+ u32 fifo; /* (u32*) address of fec's fifo */
+ u32 bd_base; /* (struct bcom_bd*) beginning of ring buffer */
+ u32 bd_last; /* (struct bcom_bd*) end of ring buffer */
+ u32 bd_start; /* (struct bcom_bd*) current bd */
+ u32 buffer_size; /* size of receive buffer */
+};
+
+/* rx task incs that need to be set before enabling the task */
+struct bcom_fec_rx_inc {
+ u16 pad0;
+ s16 incr_bytes;
+ u16 pad1;
+ s16 incr_dst;
+ u16 pad2;
+ s16 incr_dst_ma;
+};
+
+/* tx task vars that need to be set before enabling the task */
+struct bcom_fec_tx_var {
+ u32 DRD; /* (u32*) address of self-modified DRD */
+ u32 fifo; /* (u32*) address of fec's fifo */
+ u32 enable; /* (u16*) address of task's control register */
+ u32 bd_base; /* (struct bcom_bd*) beginning of ring buffer */
+ u32 bd_last; /* (struct bcom_bd*) end of ring buffer */
+ u32 bd_start; /* (struct bcom_bd*) current bd */
+ u32 buffer_size; /* set by uCode for each packet */
+};
+
+/* tx task incs that need to be set before enabling the task */
+struct bcom_fec_tx_inc {
+ u16 pad0;
+ s16 incr_bytes;
+ u16 pad1;
+ s16 incr_src;
+ u16 pad2;
+ s16 incr_src_ma;
+};
+
+/* private structure in the task */
+struct bcom_fec_priv {
+ phys_addr_t fifo;
+ int maxbufsize;
+};
+
+
+/* ======================================================================== */
+/* Task support code */
+/* ======================================================================== */
+
+struct bcom_task *
+bcom_fec_rx_init(int queue_len, phys_addr_t fifo, int maxbufsize)
+{
+ struct bcom_task *tsk;
+ struct bcom_fec_priv *priv;
+
+ tsk = bcom_task_alloc(queue_len, sizeof(struct bcom_fec_bd),
+ sizeof(struct bcom_fec_priv));
+ if (!tsk)
+ return NULL;
+
+ tsk->flags = BCOM_FLAGS_NONE;
+
+ priv = tsk->priv;
+ priv->fifo = fifo;
+ priv->maxbufsize = maxbufsize;
+
+ if (bcom_fec_rx_reset(tsk)) {
+ bcom_task_free(tsk);
+ return NULL;
+ }
+
+ return tsk;
+}
+EXPORT_SYMBOL_GPL(bcom_fec_rx_init);
+
+int
+bcom_fec_rx_reset(struct bcom_task *tsk)
+{
+ struct bcom_fec_priv *priv = tsk->priv;
+ struct bcom_fec_rx_var *var;
+ struct bcom_fec_rx_inc *inc;
+
+ /* Shutdown the task */
+ bcom_disable_task(tsk->tasknum);
+
+ /* Reset the microcode */
+ var = (struct bcom_fec_rx_var *) bcom_task_var(tsk->tasknum);
+ inc = (struct bcom_fec_rx_inc *) bcom_task_inc(tsk->tasknum);
+
+ if (bcom_load_image(tsk->tasknum, bcom_fec_rx_task))
+ return -1;
+
+ var->enable = bcom_eng->regs_base +
+ offsetof(struct mpc52xx_sdma, tcr[tsk->tasknum]);
+ var->fifo = (u32) priv->fifo;
+ var->bd_base = tsk->bd_pa;
+ var->bd_last = tsk->bd_pa + ((tsk->num_bd-1) * tsk->bd_size);
+ var->bd_start = tsk->bd_pa;
+ var->buffer_size = priv->maxbufsize;
+
+ inc->incr_bytes = -(s16)sizeof(u32); /* These should be in the */
+ inc->incr_dst = sizeof(u32); /* task image, but we stick */
+ inc->incr_dst_ma= sizeof(u8); /* to the official ones */
+
+ /* Reset the BDs */
+ tsk->index = 0;
+ tsk->outdex = 0;
+
+ memset(tsk->bd, 0x00, tsk->num_bd * tsk->bd_size);
+
+ /* Configure some stuff */
+ bcom_set_task_pragma(tsk->tasknum, BCOM_FEC_RX_BD_PRAGMA);
+ bcom_set_task_auto_start(tsk->tasknum, tsk->tasknum);
+
+ out_8(&bcom_eng->regs->ipr[BCOM_INITIATOR_FEC_RX], BCOM_IPR_FEC_RX);
+
+ out_be32(&bcom_eng->regs->IntPend, 1<<tsk->tasknum); /* Clear ints */
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(bcom_fec_rx_reset);
+
+void
+bcom_fec_rx_release(struct bcom_task *tsk)
+{
+ /* Nothing special for the FEC tasks */
+ bcom_task_free(tsk);
+}
+EXPORT_SYMBOL_GPL(bcom_fec_rx_release);
+
+
+
+ /* Return 2nd to last DRD */
+ /* This is an ugly hack, but at least it's only done
+ once at initialization */
+static u32 *self_modified_drd(int tasknum)
+{
+ u32 *desc;
+ int num_descs;
+ int drd_count;
+ int i;
+
+ num_descs = bcom_task_num_descs(tasknum);
+ desc = bcom_task_desc(tasknum) + num_descs - 1;
+ drd_count = 0;
+ for (i=0; i<num_descs; i++, desc--)
+ if (bcom_desc_is_drd(*desc) && ++drd_count == 3)
+ break;
+ return desc;
+}
+
+struct bcom_task *
+bcom_fec_tx_init(int queue_len, phys_addr_t fifo)
+{
+ struct bcom_task *tsk;
+ struct bcom_fec_priv *priv;
+
+ tsk = bcom_task_alloc(queue_len, sizeof(struct bcom_fec_bd),
+ sizeof(struct bcom_fec_priv));
+ if (!tsk)
+ return NULL;
+
+ tsk->flags = BCOM_FLAGS_ENABLE_TASK;
+
+ priv = tsk->priv;
+ priv->fifo = fifo;
+
+ if (bcom_fec_tx_reset(tsk)) {
+ bcom_task_free(tsk);
+ return NULL;
+ }
+
+ return tsk;
+}
+EXPORT_SYMBOL_GPL(bcom_fec_tx_init);
+
+int
+bcom_fec_tx_reset(struct bcom_task *tsk)
+{
+ struct bcom_fec_priv *priv = tsk->priv;
+ struct bcom_fec_tx_var *var;
+ struct bcom_fec_tx_inc *inc;
+
+ /* Shutdown the task */
+ bcom_disable_task(tsk->tasknum);
+
+ /* Reset the microcode */
+ var = (struct bcom_fec_tx_var *) bcom_task_var(tsk->tasknum);
+ inc = (struct bcom_fec_tx_inc *) bcom_task_inc(tsk->tasknum);
+
+ if (bcom_load_image(tsk->tasknum, bcom_fec_tx_task))
+ return -1;
+
+ var->enable = bcom_eng->regs_base +
+ offsetof(struct mpc52xx_sdma, tcr[tsk->tasknum]);
+ var->fifo = (u32) priv->fifo;
+ var->DRD = bcom_sram_va2pa(self_modified_drd(tsk->tasknum));
+ var->bd_base = tsk->bd_pa;
+ var->bd_last = tsk->bd_pa + ((tsk->num_bd-1) * tsk->bd_size);
+ var->bd_start = tsk->bd_pa;
+
+ inc->incr_bytes = -(s16)sizeof(u32); /* These should be in the */
+ inc->incr_src = sizeof(u32); /* task image, but we stick */
+ inc->incr_src_ma= sizeof(u8); /* to the official ones */
+
+ /* Reset the BDs */
+ tsk->index = 0;
+ tsk->outdex = 0;
+
+ memset(tsk->bd, 0x00, tsk->num_bd * tsk->bd_size);
+
+ /* Configure some stuff */
+ bcom_set_task_pragma(tsk->tasknum, BCOM_FEC_TX_BD_PRAGMA);
+ bcom_set_task_auto_start(tsk->tasknum, tsk->tasknum);
+
+ out_8(&bcom_eng->regs->ipr[BCOM_INITIATOR_FEC_TX], BCOM_IPR_FEC_TX);
+
+ out_be32(&bcom_eng->regs->IntPend, 1<<tsk->tasknum); /* Clear ints */
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(bcom_fec_tx_reset);
+
+void
+bcom_fec_tx_release(struct bcom_task *tsk)
+{
+ /* Nothing special for the FEC tasks */
+ bcom_task_free(tsk);
+}
+EXPORT_SYMBOL_GPL(bcom_fec_tx_release);
+
+
+MODULE_DESCRIPTION("BestComm FEC tasks driver");
+MODULE_AUTHOR("Dale Farnsworth <dfarnsworth@mvista.com>");
+MODULE_LICENSE("GPL v2");
+
diff --git a/drivers/dma/bestcomm/gen_bd.c b/drivers/dma/bestcomm/gen_bd.c
new file mode 100644
index 000000000000..1a5b22d88127
--- /dev/null
+++ b/drivers/dma/bestcomm/gen_bd.c
@@ -0,0 +1,354 @@
+/*
+ * Driver for MPC52xx processor BestComm General Buffer Descriptor
+ *
+ * Copyright (C) 2007 Sylvain Munaut <tnt@246tNt.com>
+ * Copyright (C) 2006 AppSpec Computer Technologies Corp.
+ * Jeff Gibbons <jeff.gibbons@appspec.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published
+ * by the Free Software Foundation.
+ *
+ */
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/string.h>
+#include <linux/types.h>
+#include <asm/errno.h>
+#include <asm/io.h>
+
+#include <asm/mpc52xx.h>
+#include <asm/mpc52xx_psc.h>
+
+#include <linux/fsl/bestcomm/bestcomm.h>
+#include <linux/fsl/bestcomm/bestcomm_priv.h>
+#include <linux/fsl/bestcomm/gen_bd.h>
+
+
+/* ======================================================================== */
+/* Task image/var/inc */
+/* ======================================================================== */
+
+/* gen_bd tasks images */
+extern u32 bcom_gen_bd_rx_task[];
+extern u32 bcom_gen_bd_tx_task[];
+
+/* rx task vars that need to be set before enabling the task */
+struct bcom_gen_bd_rx_var {
+ u32 enable; /* (u16*) address of task's control register */
+ u32 fifo; /* (u32*) address of gen_bd's fifo */
+ u32 bd_base; /* (struct bcom_bd*) beginning of ring buffer */
+ u32 bd_last; /* (struct bcom_bd*) end of ring buffer */
+ u32 bd_start; /* (struct bcom_bd*) current bd */
+ u32 buffer_size; /* size of receive buffer */
+};
+
+/* rx task incs that need to be set before enabling the task */
+struct bcom_gen_bd_rx_inc {
+ u16 pad0;
+ s16 incr_bytes;
+ u16 pad1;
+ s16 incr_dst;
+};
+
+/* tx task vars that need to be set before enabling the task */
+struct bcom_gen_bd_tx_var {
+ u32 fifo; /* (u32*) address of gen_bd's fifo */
+ u32 enable; /* (u16*) address of task's control register */
+ u32 bd_base; /* (struct bcom_bd*) beginning of ring buffer */
+ u32 bd_last; /* (struct bcom_bd*) end of ring buffer */
+ u32 bd_start; /* (struct bcom_bd*) current bd */
+ u32 buffer_size; /* set by uCode for each packet */
+};
+
+/* tx task incs that need to be set before enabling the task */
+struct bcom_gen_bd_tx_inc {
+ u16 pad0;
+ s16 incr_bytes;
+ u16 pad1;
+ s16 incr_src;
+ u16 pad2;
+ s16 incr_src_ma;
+};
+
+/* private structure */
+struct bcom_gen_bd_priv {
+ phys_addr_t fifo;
+ int initiator;
+ int ipr;
+ int maxbufsize;
+};
+
+
+/* ======================================================================== */
+/* Task support code */
+/* ======================================================================== */
+
+struct bcom_task *
+bcom_gen_bd_rx_init(int queue_len, phys_addr_t fifo,
+ int initiator, int ipr, int maxbufsize)
+{
+ struct bcom_task *tsk;
+ struct bcom_gen_bd_priv *priv;
+
+ tsk = bcom_task_alloc(queue_len, sizeof(struct bcom_gen_bd),
+ sizeof(struct bcom_gen_bd_priv));
+ if (!tsk)
+ return NULL;
+
+ tsk->flags = BCOM_FLAGS_NONE;
+
+ priv = tsk->priv;
+ priv->fifo = fifo;
+ priv->initiator = initiator;
+ priv->ipr = ipr;
+ priv->maxbufsize = maxbufsize;
+
+ if (bcom_gen_bd_rx_reset(tsk)) {
+ bcom_task_free(tsk);
+ return NULL;
+ }
+
+ return tsk;
+}
+EXPORT_SYMBOL_GPL(bcom_gen_bd_rx_init);
+
+int
+bcom_gen_bd_rx_reset(struct bcom_task *tsk)
+{
+ struct bcom_gen_bd_priv *priv = tsk->priv;
+ struct bcom_gen_bd_rx_var *var;
+ struct bcom_gen_bd_rx_inc *inc;
+
+ /* Shutdown the task */
+ bcom_disable_task(tsk->tasknum);
+
+ /* Reset the microcode */
+ var = (struct bcom_gen_bd_rx_var *) bcom_task_var(tsk->tasknum);
+ inc = (struct bcom_gen_bd_rx_inc *) bcom_task_inc(tsk->tasknum);
+
+ if (bcom_load_image(tsk->tasknum, bcom_gen_bd_rx_task))
+ return -1;
+
+ var->enable = bcom_eng->regs_base +
+ offsetof(struct mpc52xx_sdma, tcr[tsk->tasknum]);
+ var->fifo = (u32) priv->fifo;
+ var->bd_base = tsk->bd_pa;
+ var->bd_last = tsk->bd_pa + ((tsk->num_bd-1) * tsk->bd_size);
+ var->bd_start = tsk->bd_pa;
+ var->buffer_size = priv->maxbufsize;
+
+ inc->incr_bytes = -(s16)sizeof(u32);
+ inc->incr_dst = sizeof(u32);
+
+ /* Reset the BDs */
+ tsk->index = 0;
+ tsk->outdex = 0;
+
+ memset(tsk->bd, 0x00, tsk->num_bd * tsk->bd_size);
+
+ /* Configure some stuff */
+ bcom_set_task_pragma(tsk->tasknum, BCOM_GEN_RX_BD_PRAGMA);
+ bcom_set_task_auto_start(tsk->tasknum, tsk->tasknum);
+
+ out_8(&bcom_eng->regs->ipr[priv->initiator], priv->ipr);
+ bcom_set_initiator(tsk->tasknum, priv->initiator);
+
+ out_be32(&bcom_eng->regs->IntPend, 1<<tsk->tasknum); /* Clear ints */
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(bcom_gen_bd_rx_reset);
+
+void
+bcom_gen_bd_rx_release(struct bcom_task *tsk)
+{
+ /* Nothing special for the GenBD tasks */
+ bcom_task_free(tsk);
+}
+EXPORT_SYMBOL_GPL(bcom_gen_bd_rx_release);
+
+
+extern struct bcom_task *
+bcom_gen_bd_tx_init(int queue_len, phys_addr_t fifo,
+ int initiator, int ipr)
+{
+ struct bcom_task *tsk;
+ struct bcom_gen_bd_priv *priv;
+
+ tsk = bcom_task_alloc(queue_len, sizeof(struct bcom_gen_bd),
+ sizeof(struct bcom_gen_bd_priv));
+ if (!tsk)
+ return NULL;
+
+ tsk->flags = BCOM_FLAGS_NONE;
+
+ priv = tsk->priv;
+ priv->fifo = fifo;
+ priv->initiator = initiator;
+ priv->ipr = ipr;
+
+ if (bcom_gen_bd_tx_reset(tsk)) {
+ bcom_task_free(tsk);
+ return NULL;
+ }
+
+ return tsk;
+}
+EXPORT_SYMBOL_GPL(bcom_gen_bd_tx_init);
+
+int
+bcom_gen_bd_tx_reset(struct bcom_task *tsk)
+{
+ struct bcom_gen_bd_priv *priv = tsk->priv;
+ struct bcom_gen_bd_tx_var *var;
+ struct bcom_gen_bd_tx_inc *inc;
+
+ /* Shutdown the task */
+ bcom_disable_task(tsk->tasknum);
+
+ /* Reset the microcode */
+ var = (struct bcom_gen_bd_tx_var *) bcom_task_var(tsk->tasknum);
+ inc = (struct bcom_gen_bd_tx_inc *) bcom_task_inc(tsk->tasknum);
+
+ if (bcom_load_image(tsk->tasknum, bcom_gen_bd_tx_task))
+ return -1;
+
+ var->enable = bcom_eng->regs_base +
+ offsetof(struct mpc52xx_sdma, tcr[tsk->tasknum]);
+ var->fifo = (u32) priv->fifo;
+ var->bd_base = tsk->bd_pa;
+ var->bd_last = tsk->bd_pa + ((tsk->num_bd-1) * tsk->bd_size);
+ var->bd_start = tsk->bd_pa;
+
+ inc->incr_bytes = -(s16)sizeof(u32);
+ inc->incr_src = sizeof(u32);
+ inc->incr_src_ma = sizeof(u8);
+
+ /* Reset the BDs */
+ tsk->index = 0;
+ tsk->outdex = 0;
+
+ memset(tsk->bd, 0x00, tsk->num_bd * tsk->bd_size);
+
+ /* Configure some stuff */
+ bcom_set_task_pragma(tsk->tasknum, BCOM_GEN_TX_BD_PRAGMA);
+ bcom_set_task_auto_start(tsk->tasknum, tsk->tasknum);
+
+ out_8(&bcom_eng->regs->ipr[priv->initiator], priv->ipr);
+ bcom_set_initiator(tsk->tasknum, priv->initiator);
+
+ out_be32(&bcom_eng->regs->IntPend, 1<<tsk->tasknum); /* Clear ints */
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(bcom_gen_bd_tx_reset);
+
+void
+bcom_gen_bd_tx_release(struct bcom_task *tsk)
+{
+ /* Nothing special for the GenBD tasks */
+ bcom_task_free(tsk);
+}
+EXPORT_SYMBOL_GPL(bcom_gen_bd_tx_release);
+
+/* ---------------------------------------------------------------------
+ * PSC support code
+ */
+
+/**
+ * bcom_psc_parameters - Bestcomm initialization value table for PSC devices
+ *
+ * This structure is only used internally. It is a lookup table for PSC
+ * specific parameters to bestcomm tasks.
+ */
+static struct bcom_psc_params {
+ int rx_initiator;
+ int rx_ipr;
+ int tx_initiator;
+ int tx_ipr;
+} bcom_psc_params[] = {
+ [0] = {
+ .rx_initiator = BCOM_INITIATOR_PSC1_RX,
+ .rx_ipr = BCOM_IPR_PSC1_RX,
+ .tx_initiator = BCOM_INITIATOR_PSC1_TX,
+ .tx_ipr = BCOM_IPR_PSC1_TX,
+ },
+ [1] = {
+ .rx_initiator = BCOM_INITIATOR_PSC2_RX,
+ .rx_ipr = BCOM_IPR_PSC2_RX,
+ .tx_initiator = BCOM_INITIATOR_PSC2_TX,
+ .tx_ipr = BCOM_IPR_PSC2_TX,
+ },
+ [2] = {
+ .rx_initiator = BCOM_INITIATOR_PSC3_RX,
+ .rx_ipr = BCOM_IPR_PSC3_RX,
+ .tx_initiator = BCOM_INITIATOR_PSC3_TX,
+ .tx_ipr = BCOM_IPR_PSC3_TX,
+ },
+ [3] = {
+ .rx_initiator = BCOM_INITIATOR_PSC4_RX,
+ .rx_ipr = BCOM_IPR_PSC4_RX,
+ .tx_initiator = BCOM_INITIATOR_PSC4_TX,
+ .tx_ipr = BCOM_IPR_PSC4_TX,
+ },
+ [4] = {
+ .rx_initiator = BCOM_INITIATOR_PSC5_RX,
+ .rx_ipr = BCOM_IPR_PSC5_RX,
+ .tx_initiator = BCOM_INITIATOR_PSC5_TX,
+ .tx_ipr = BCOM_IPR_PSC5_TX,
+ },
+ [5] = {
+ .rx_initiator = BCOM_INITIATOR_PSC6_RX,
+ .rx_ipr = BCOM_IPR_PSC6_RX,
+ .tx_initiator = BCOM_INITIATOR_PSC6_TX,
+ .tx_ipr = BCOM_IPR_PSC6_TX,
+ },
+};
+
+/**
+ * bcom_psc_gen_bd_rx_init - Allocate a receive bcom_task for a PSC port
+ * @psc_num: Number of the PSC to allocate a task for
+ * @queue_len: number of buffer descriptors to allocate for the task
+ * @fifo: physical address of FIFO register
+ * @maxbufsize: Maximum receive data size in bytes.
+ *
+ * Allocate a bestcomm task structure for receiving data from a PSC.
+ */
+struct bcom_task * bcom_psc_gen_bd_rx_init(unsigned psc_num, int queue_len,
+ phys_addr_t fifo, int maxbufsize)
+{
+ if (psc_num >= MPC52xx_PSC_MAXNUM)
+ return NULL;
+
+ return bcom_gen_bd_rx_init(queue_len, fifo,
+ bcom_psc_params[psc_num].rx_initiator,
+ bcom_psc_params[psc_num].rx_ipr,
+ maxbufsize);
+}
+EXPORT_SYMBOL_GPL(bcom_psc_gen_bd_rx_init);
+
+/**
+ * bcom_psc_gen_bd_tx_init - Allocate a transmit bcom_task for a PSC port
+ * @psc_num: Number of the PSC to allocate a task for
+ * @queue_len: number of buffer descriptors to allocate for the task
+ * @fifo: physical address of FIFO register
+ *
+ * Allocate a bestcomm task structure for transmitting data to a PSC.
+ */
+struct bcom_task *
+bcom_psc_gen_bd_tx_init(unsigned psc_num, int queue_len, phys_addr_t fifo)
+{
+ struct psc;
+ return bcom_gen_bd_tx_init(queue_len, fifo,
+ bcom_psc_params[psc_num].tx_initiator,
+ bcom_psc_params[psc_num].tx_ipr);
+}
+EXPORT_SYMBOL_GPL(bcom_psc_gen_bd_tx_init);
+
+
+MODULE_DESCRIPTION("BestComm General Buffer Descriptor tasks driver");
+MODULE_AUTHOR("Jeff Gibbons <jeff.gibbons@appspec.com>");
+MODULE_LICENSE("GPL v2");
+
diff --git a/drivers/dma/bestcomm/sram.c b/drivers/dma/bestcomm/sram.c
new file mode 100644
index 000000000000..5e2ed30ba2c4
--- /dev/null
+++ b/drivers/dma/bestcomm/sram.c
@@ -0,0 +1,178 @@
+/*
+ * Simple memory allocator for on-board SRAM
+ *
+ *
+ * Maintainer : Sylvain Munaut <tnt@246tNt.com>
+ *
+ * Copyright (C) 2005 Sylvain Munaut <tnt@246tNt.com>
+ *
+ * This file is licensed under the terms of the GNU General Public License
+ * version 2. This program is licensed "as is" without any warranty of any
+ * kind, whether express or implied.
+ */
+
+#include <linux/err.h>
+#include <linux/kernel.h>
+#include <linux/export.h>
+#include <linux/slab.h>
+#include <linux/spinlock.h>
+#include <linux/string.h>
+#include <linux/ioport.h>
+#include <linux/of.h>
+
+#include <asm/io.h>
+#include <asm/mmu.h>
+
+#include <linux/fsl/bestcomm/sram.h>
+
+
+/* Struct keeping our 'state' */
+struct bcom_sram *bcom_sram = NULL;
+EXPORT_SYMBOL_GPL(bcom_sram); /* needed for inline functions */
+
+
+/* ======================================================================== */
+/* Public API */
+/* ======================================================================== */
+/* DO NOT USE in interrupts, if needed in irq handler, we should use the
+ _irqsave version of the spin_locks */
+
+int bcom_sram_init(struct device_node *sram_node, char *owner)
+{
+ int rv;
+ const u32 *regaddr_p;
+ u64 regaddr64, size64;
+ unsigned int psize;
+
+ /* Create our state struct */
+ if (bcom_sram) {
+ printk(KERN_ERR "%s: bcom_sram_init: "
+ "Already initialized !\n", owner);
+ return -EBUSY;
+ }
+
+ bcom_sram = kmalloc(sizeof(struct bcom_sram), GFP_KERNEL);
+ if (!bcom_sram) {
+ printk(KERN_ERR "%s: bcom_sram_init: "
+ "Couldn't allocate internal state !\n", owner);
+ return -ENOMEM;
+ }
+
+ /* Get address and size of the sram */
+ regaddr_p = of_get_address(sram_node, 0, &size64, NULL);
+ if (!regaddr_p) {
+ printk(KERN_ERR "%s: bcom_sram_init: "
+ "Invalid device node !\n", owner);
+ rv = -EINVAL;
+ goto error_free;
+ }
+
+ regaddr64 = of_translate_address(sram_node, regaddr_p);
+
+ bcom_sram->base_phys = (phys_addr_t) regaddr64;
+ bcom_sram->size = (unsigned int) size64;
+
+ /* Request region */
+ if (!request_mem_region(bcom_sram->base_phys, bcom_sram->size, owner)) {
+ printk(KERN_ERR "%s: bcom_sram_init: "
+ "Couldn't request region !\n", owner);
+ rv = -EBUSY;
+ goto error_free;
+ }
+
+ /* Map SRAM */
+ /* sram is not really __iomem */
+ bcom_sram->base_virt = (void*) ioremap(bcom_sram->base_phys, bcom_sram->size);
+
+ if (!bcom_sram->base_virt) {
+ printk(KERN_ERR "%s: bcom_sram_init: "
+ "Map error SRAM zone 0x%08lx (0x%0x)!\n",
+ owner, (long)bcom_sram->base_phys, bcom_sram->size );
+ rv = -ENOMEM;
+ goto error_release;
+ }
+
+ /* Create an rheap (defaults to 32 bits word alignment) */
+ bcom_sram->rh = rh_create(4);
+
+ /* Attach the free zones */
+#if 0
+ /* Currently disabled ... for future use only */
+ reg_addr_p = of_get_property(sram_node, "available", &psize);
+#else
+ regaddr_p = NULL;
+ psize = 0;
+#endif
+
+ if (!regaddr_p || !psize) {
+ /* Attach the whole zone */
+ rh_attach_region(bcom_sram->rh, 0, bcom_sram->size);
+ } else {
+ /* Attach each zone independently */
+ while (psize >= 2 * sizeof(u32)) {
+ phys_addr_t zbase = of_translate_address(sram_node, regaddr_p);
+ rh_attach_region(bcom_sram->rh, zbase - bcom_sram->base_phys, regaddr_p[1]);
+ regaddr_p += 2;
+ psize -= 2 * sizeof(u32);
+ }
+ }
+
+ /* Init our spinlock */
+ spin_lock_init(&bcom_sram->lock);
+
+ return 0;
+
+error_release:
+ release_mem_region(bcom_sram->base_phys, bcom_sram->size);
+error_free:
+ kfree(bcom_sram);
+ bcom_sram = NULL;
+
+ return rv;
+}
+EXPORT_SYMBOL_GPL(bcom_sram_init);
+
+void bcom_sram_cleanup(void)
+{
+ /* Free resources */
+ if (bcom_sram) {
+ rh_destroy(bcom_sram->rh);
+ iounmap((void __iomem *)bcom_sram->base_virt);
+ release_mem_region(bcom_sram->base_phys, bcom_sram->size);
+ kfree(bcom_sram);
+ bcom_sram = NULL;
+ }
+}
+EXPORT_SYMBOL_GPL(bcom_sram_cleanup);
+
+void* bcom_sram_alloc(int size, int align, phys_addr_t *phys)
+{
+ unsigned long offset;
+
+ spin_lock(&bcom_sram->lock);
+ offset = rh_alloc_align(bcom_sram->rh, size, align, NULL);
+ spin_unlock(&bcom_sram->lock);
+
+ if (IS_ERR_VALUE(offset))
+ return NULL;
+
+ *phys = bcom_sram->base_phys + offset;
+ return bcom_sram->base_virt + offset;
+}
+EXPORT_SYMBOL_GPL(bcom_sram_alloc);
+
+void bcom_sram_free(void *ptr)
+{
+ unsigned long offset;
+
+ if (!ptr)
+ return;
+
+ offset = ptr - bcom_sram->base_virt;
+
+ spin_lock(&bcom_sram->lock);
+ rh_free(bcom_sram->rh, offset);
+ spin_unlock(&bcom_sram->lock);
+}
+EXPORT_SYMBOL_GPL(bcom_sram_free);
+
diff --git a/drivers/dma/coh901318.c b/drivers/dma/coh901318.c
index aa384e53b7ac..797940e532ff 100644
--- a/drivers/dma/coh901318.c
+++ b/drivers/dma/coh901318.c
@@ -21,11 +21,1241 @@
#include <linux/io.h>
#include <linux/uaccess.h>
#include <linux/debugfs.h>
-#include <mach/coh901318.h>
+#include <linux/platform_data/dma-coh901318.h>
-#include "coh901318_lli.h"
+#include "coh901318.h"
#include "dmaengine.h"
+#define COH901318_MOD32_MASK (0x1F)
+#define COH901318_WORD_MASK (0xFFFFFFFF)
+/* INT_STATUS - Interrupt Status Registers 32bit (R/-) */
+#define COH901318_INT_STATUS1 (0x0000)
+#define COH901318_INT_STATUS2 (0x0004)
+/* TC_INT_STATUS - Terminal Count Interrupt Status Registers 32bit (R/-) */
+#define COH901318_TC_INT_STATUS1 (0x0008)
+#define COH901318_TC_INT_STATUS2 (0x000C)
+/* TC_INT_CLEAR - Terminal Count Interrupt Clear Registers 32bit (-/W) */
+#define COH901318_TC_INT_CLEAR1 (0x0010)
+#define COH901318_TC_INT_CLEAR2 (0x0014)
+/* RAW_TC_INT_STATUS - Raw Term Count Interrupt Status Registers 32bit (R/-) */
+#define COH901318_RAW_TC_INT_STATUS1 (0x0018)
+#define COH901318_RAW_TC_INT_STATUS2 (0x001C)
+/* BE_INT_STATUS - Bus Error Interrupt Status Registers 32bit (R/-) */
+#define COH901318_BE_INT_STATUS1 (0x0020)
+#define COH901318_BE_INT_STATUS2 (0x0024)
+/* BE_INT_CLEAR - Bus Error Interrupt Clear Registers 32bit (-/W) */
+#define COH901318_BE_INT_CLEAR1 (0x0028)
+#define COH901318_BE_INT_CLEAR2 (0x002C)
+/* RAW_BE_INT_STATUS - Raw Term Count Interrupt Status Registers 32bit (R/-) */
+#define COH901318_RAW_BE_INT_STATUS1 (0x0030)
+#define COH901318_RAW_BE_INT_STATUS2 (0x0034)
+
+/*
+ * CX_CFG - Channel Configuration Registers 32bit (R/W)
+ */
+#define COH901318_CX_CFG (0x0100)
+#define COH901318_CX_CFG_SPACING (0x04)
+/* Channel enable activates tha dma job */
+#define COH901318_CX_CFG_CH_ENABLE (0x00000001)
+#define COH901318_CX_CFG_CH_DISABLE (0x00000000)
+/* Request Mode */
+#define COH901318_CX_CFG_RM_MASK (0x00000006)
+#define COH901318_CX_CFG_RM_MEMORY_TO_MEMORY (0x0 << 1)
+#define COH901318_CX_CFG_RM_PRIMARY_TO_MEMORY (0x1 << 1)
+#define COH901318_CX_CFG_RM_MEMORY_TO_PRIMARY (0x1 << 1)
+#define COH901318_CX_CFG_RM_PRIMARY_TO_SECONDARY (0x3 << 1)
+#define COH901318_CX_CFG_RM_SECONDARY_TO_PRIMARY (0x3 << 1)
+/* Linked channel request field. RM must == 11 */
+#define COH901318_CX_CFG_LCRF_SHIFT 3
+#define COH901318_CX_CFG_LCRF_MASK (0x000001F8)
+#define COH901318_CX_CFG_LCR_DISABLE (0x00000000)
+/* Terminal Counter Interrupt Request Mask */
+#define COH901318_CX_CFG_TC_IRQ_ENABLE (0x00000200)
+#define COH901318_CX_CFG_TC_IRQ_DISABLE (0x00000000)
+/* Bus Error interrupt Mask */
+#define COH901318_CX_CFG_BE_IRQ_ENABLE (0x00000400)
+#define COH901318_CX_CFG_BE_IRQ_DISABLE (0x00000000)
+
+/*
+ * CX_STAT - Channel Status Registers 32bit (R/-)
+ */
+#define COH901318_CX_STAT (0x0200)
+#define COH901318_CX_STAT_SPACING (0x04)
+#define COH901318_CX_STAT_RBE_IRQ_IND (0x00000008)
+#define COH901318_CX_STAT_RTC_IRQ_IND (0x00000004)
+#define COH901318_CX_STAT_ACTIVE (0x00000002)
+#define COH901318_CX_STAT_ENABLED (0x00000001)
+
+/*
+ * CX_CTRL - Channel Control Registers 32bit (R/W)
+ */
+#define COH901318_CX_CTRL (0x0400)
+#define COH901318_CX_CTRL_SPACING (0x10)
+/* Transfer Count Enable */
+#define COH901318_CX_CTRL_TC_ENABLE (0x00001000)
+#define COH901318_CX_CTRL_TC_DISABLE (0x00000000)
+/* Transfer Count Value 0 - 4095 */
+#define COH901318_CX_CTRL_TC_VALUE_MASK (0x00000FFF)
+/* Burst count */
+#define COH901318_CX_CTRL_BURST_COUNT_MASK (0x0000E000)
+#define COH901318_CX_CTRL_BURST_COUNT_64_BYTES (0x7 << 13)
+#define COH901318_CX_CTRL_BURST_COUNT_48_BYTES (0x6 << 13)
+#define COH901318_CX_CTRL_BURST_COUNT_32_BYTES (0x5 << 13)
+#define COH901318_CX_CTRL_BURST_COUNT_16_BYTES (0x4 << 13)
+#define COH901318_CX_CTRL_BURST_COUNT_8_BYTES (0x3 << 13)
+#define COH901318_CX_CTRL_BURST_COUNT_4_BYTES (0x2 << 13)
+#define COH901318_CX_CTRL_BURST_COUNT_2_BYTES (0x1 << 13)
+#define COH901318_CX_CTRL_BURST_COUNT_1_BYTE (0x0 << 13)
+/* Source bus size */
+#define COH901318_CX_CTRL_SRC_BUS_SIZE_MASK (0x00030000)
+#define COH901318_CX_CTRL_SRC_BUS_SIZE_32_BITS (0x2 << 16)
+#define COH901318_CX_CTRL_SRC_BUS_SIZE_16_BITS (0x1 << 16)
+#define COH901318_CX_CTRL_SRC_BUS_SIZE_8_BITS (0x0 << 16)
+/* Source address increment */
+#define COH901318_CX_CTRL_SRC_ADDR_INC_ENABLE (0x00040000)
+#define COH901318_CX_CTRL_SRC_ADDR_INC_DISABLE (0x00000000)
+/* Destination Bus Size */
+#define COH901318_CX_CTRL_DST_BUS_SIZE_MASK (0x00180000)
+#define COH901318_CX_CTRL_DST_BUS_SIZE_32_BITS (0x2 << 19)
+#define COH901318_CX_CTRL_DST_BUS_SIZE_16_BITS (0x1 << 19)
+#define COH901318_CX_CTRL_DST_BUS_SIZE_8_BITS (0x0 << 19)
+/* Destination address increment */
+#define COH901318_CX_CTRL_DST_ADDR_INC_ENABLE (0x00200000)
+#define COH901318_CX_CTRL_DST_ADDR_INC_DISABLE (0x00000000)
+/* Master Mode (Master2 is only connected to MSL) */
+#define COH901318_CX_CTRL_MASTER_MODE_MASK (0x00C00000)
+#define COH901318_CX_CTRL_MASTER_MODE_M2R_M1W (0x3 << 22)
+#define COH901318_CX_CTRL_MASTER_MODE_M1R_M2W (0x2 << 22)
+#define COH901318_CX_CTRL_MASTER_MODE_M2RW (0x1 << 22)
+#define COH901318_CX_CTRL_MASTER_MODE_M1RW (0x0 << 22)
+/* Terminal Count flag to PER enable */
+#define COH901318_CX_CTRL_TCP_ENABLE (0x01000000)
+#define COH901318_CX_CTRL_TCP_DISABLE (0x00000000)
+/* Terminal Count flags to CPU enable */
+#define COH901318_CX_CTRL_TC_IRQ_ENABLE (0x02000000)
+#define COH901318_CX_CTRL_TC_IRQ_DISABLE (0x00000000)
+/* Hand shake to peripheral */
+#define COH901318_CX_CTRL_HSP_ENABLE (0x04000000)
+#define COH901318_CX_CTRL_HSP_DISABLE (0x00000000)
+#define COH901318_CX_CTRL_HSS_ENABLE (0x08000000)
+#define COH901318_CX_CTRL_HSS_DISABLE (0x00000000)
+/* DMA mode */
+#define COH901318_CX_CTRL_DDMA_MASK (0x30000000)
+#define COH901318_CX_CTRL_DDMA_LEGACY (0x0 << 28)
+#define COH901318_CX_CTRL_DDMA_DEMAND_DMA1 (0x1 << 28)
+#define COH901318_CX_CTRL_DDMA_DEMAND_DMA2 (0x2 << 28)
+/* Primary Request Data Destination */
+#define COH901318_CX_CTRL_PRDD_MASK (0x40000000)
+#define COH901318_CX_CTRL_PRDD_DEST (0x1 << 30)
+#define COH901318_CX_CTRL_PRDD_SOURCE (0x0 << 30)
+
+/*
+ * CX_SRC_ADDR - Channel Source Address Registers 32bit (R/W)
+ */
+#define COH901318_CX_SRC_ADDR (0x0404)
+#define COH901318_CX_SRC_ADDR_SPACING (0x10)
+
+/*
+ * CX_DST_ADDR - Channel Destination Address Registers 32bit R/W
+ */
+#define COH901318_CX_DST_ADDR (0x0408)
+#define COH901318_CX_DST_ADDR_SPACING (0x10)
+
+/*
+ * CX_LNK_ADDR - Channel Link Address Registers 32bit (R/W)
+ */
+#define COH901318_CX_LNK_ADDR (0x040C)
+#define COH901318_CX_LNK_ADDR_SPACING (0x10)
+#define COH901318_CX_LNK_LINK_IMMEDIATE (0x00000001)
+
+/**
+ * struct coh901318_params - parameters for DMAC configuration
+ * @config: DMA config register
+ * @ctrl_lli_last: DMA control register for the last lli in the list
+ * @ctrl_lli: DMA control register for an lli
+ * @ctrl_lli_chained: DMA control register for a chained lli
+ */
+struct coh901318_params {
+ u32 config;
+ u32 ctrl_lli_last;
+ u32 ctrl_lli;
+ u32 ctrl_lli_chained;
+};
+
+/**
+ * struct coh_dma_channel - dma channel base
+ * @name: ascii name of dma channel
+ * @number: channel id number
+ * @desc_nbr_max: number of preallocated descriptors
+ * @priority_high: prio of channel, 0 low otherwise high.
+ * @param: configuration parameters
+ */
+struct coh_dma_channel {
+ const char name[32];
+ const int number;
+ const int desc_nbr_max;
+ const int priority_high;
+ const struct coh901318_params param;
+};
+
+/**
+ * struct powersave - DMA power save structure
+ * @lock: lock protecting data in this struct
+ * @started_channels: bit mask indicating active dma channels
+ */
+struct powersave {
+ spinlock_t lock;
+ u64 started_channels;
+};
+
+/* points out all dma slave channels.
+ * Syntax is [A1, B1, A2, B2, .... ,-1,-1]
+ * Select all channels from A to B, end of list is marked with -1,-1
+ */
+static int dma_slave_channels[] = {
+ U300_DMA_MSL_TX_0, U300_DMA_SPI_RX,
+ U300_DMA_UART1_TX, U300_DMA_UART1_RX, -1, -1};
+
+/* points out all dma memcpy channels. */
+static int dma_memcpy_channels[] = {
+ U300_DMA_GENERAL_PURPOSE_0, U300_DMA_GENERAL_PURPOSE_8, -1, -1};
+
+#define flags_memcpy_config (COH901318_CX_CFG_CH_DISABLE | \
+ COH901318_CX_CFG_RM_MEMORY_TO_MEMORY | \
+ COH901318_CX_CFG_LCR_DISABLE | \
+ COH901318_CX_CFG_TC_IRQ_ENABLE | \
+ COH901318_CX_CFG_BE_IRQ_ENABLE)
+#define flags_memcpy_lli_chained (COH901318_CX_CTRL_TC_ENABLE | \
+ COH901318_CX_CTRL_BURST_COUNT_32_BYTES | \
+ COH901318_CX_CTRL_SRC_BUS_SIZE_32_BITS | \
+ COH901318_CX_CTRL_SRC_ADDR_INC_ENABLE | \
+ COH901318_CX_CTRL_DST_BUS_SIZE_32_BITS | \
+ COH901318_CX_CTRL_DST_ADDR_INC_ENABLE | \
+ COH901318_CX_CTRL_MASTER_MODE_M1RW | \
+ COH901318_CX_CTRL_TCP_DISABLE | \
+ COH901318_CX_CTRL_TC_IRQ_DISABLE | \
+ COH901318_CX_CTRL_HSP_DISABLE | \
+ COH901318_CX_CTRL_HSS_DISABLE | \
+ COH901318_CX_CTRL_DDMA_LEGACY | \
+ COH901318_CX_CTRL_PRDD_SOURCE)
+#define flags_memcpy_lli (COH901318_CX_CTRL_TC_ENABLE | \
+ COH901318_CX_CTRL_BURST_COUNT_32_BYTES | \
+ COH901318_CX_CTRL_SRC_BUS_SIZE_32_BITS | \
+ COH901318_CX_CTRL_SRC_ADDR_INC_ENABLE | \
+ COH901318_CX_CTRL_DST_BUS_SIZE_32_BITS | \
+ COH901318_CX_CTRL_DST_ADDR_INC_ENABLE | \
+ COH901318_CX_CTRL_MASTER_MODE_M1RW | \
+ COH901318_CX_CTRL_TCP_DISABLE | \
+ COH901318_CX_CTRL_TC_IRQ_DISABLE | \
+ COH901318_CX_CTRL_HSP_DISABLE | \
+ COH901318_CX_CTRL_HSS_DISABLE | \
+ COH901318_CX_CTRL_DDMA_LEGACY | \
+ COH901318_CX_CTRL_PRDD_SOURCE)
+#define flags_memcpy_lli_last (COH901318_CX_CTRL_TC_ENABLE | \
+ COH901318_CX_CTRL_BURST_COUNT_32_BYTES | \
+ COH901318_CX_CTRL_SRC_BUS_SIZE_32_BITS | \
+ COH901318_CX_CTRL_SRC_ADDR_INC_ENABLE | \
+ COH901318_CX_CTRL_DST_BUS_SIZE_32_BITS | \
+ COH901318_CX_CTRL_DST_ADDR_INC_ENABLE | \
+ COH901318_CX_CTRL_MASTER_MODE_M1RW | \
+ COH901318_CX_CTRL_TCP_DISABLE | \
+ COH901318_CX_CTRL_TC_IRQ_ENABLE | \
+ COH901318_CX_CTRL_HSP_DISABLE | \
+ COH901318_CX_CTRL_HSS_DISABLE | \
+ COH901318_CX_CTRL_DDMA_LEGACY | \
+ COH901318_CX_CTRL_PRDD_SOURCE)
+
+const struct coh_dma_channel chan_config[U300_DMA_CHANNELS] = {
+ {
+ .number = U300_DMA_MSL_TX_0,
+ .name = "MSL TX 0",
+ .priority_high = 0,
+ },
+ {
+ .number = U300_DMA_MSL_TX_1,
+ .name = "MSL TX 1",
+ .priority_high = 0,
+ .param.config = COH901318_CX_CFG_CH_DISABLE |
+ COH901318_CX_CFG_LCR_DISABLE |
+ COH901318_CX_CFG_TC_IRQ_ENABLE |
+ COH901318_CX_CFG_BE_IRQ_ENABLE,
+ .param.ctrl_lli_chained = 0 |
+ COH901318_CX_CTRL_TC_ENABLE |
+ COH901318_CX_CTRL_BURST_COUNT_32_BYTES |
+ COH901318_CX_CTRL_SRC_BUS_SIZE_32_BITS |
+ COH901318_CX_CTRL_SRC_ADDR_INC_ENABLE |
+ COH901318_CX_CTRL_DST_BUS_SIZE_32_BITS |
+ COH901318_CX_CTRL_DST_ADDR_INC_DISABLE |
+ COH901318_CX_CTRL_MASTER_MODE_M1R_M2W |
+ COH901318_CX_CTRL_TCP_DISABLE |
+ COH901318_CX_CTRL_TC_IRQ_DISABLE |
+ COH901318_CX_CTRL_HSP_ENABLE |
+ COH901318_CX_CTRL_HSS_DISABLE |
+ COH901318_CX_CTRL_DDMA_LEGACY |
+ COH901318_CX_CTRL_PRDD_SOURCE,
+ .param.ctrl_lli = 0 |
+ COH901318_CX_CTRL_TC_ENABLE |
+ COH901318_CX_CTRL_BURST_COUNT_32_BYTES |
+ COH901318_CX_CTRL_SRC_BUS_SIZE_32_BITS |
+ COH901318_CX_CTRL_SRC_ADDR_INC_ENABLE |
+ COH901318_CX_CTRL_DST_BUS_SIZE_32_BITS |
+ COH901318_CX_CTRL_DST_ADDR_INC_DISABLE |
+ COH901318_CX_CTRL_MASTER_MODE_M1R_M2W |
+ COH901318_CX_CTRL_TCP_ENABLE |
+ COH901318_CX_CTRL_TC_IRQ_DISABLE |
+ COH901318_CX_CTRL_HSP_ENABLE |
+ COH901318_CX_CTRL_HSS_DISABLE |
+ COH901318_CX_CTRL_DDMA_LEGACY |
+ COH901318_CX_CTRL_PRDD_SOURCE,
+ .param.ctrl_lli_last = 0 |
+ COH901318_CX_CTRL_TC_ENABLE |
+ COH901318_CX_CTRL_BURST_COUNT_32_BYTES |
+ COH901318_CX_CTRL_SRC_BUS_SIZE_32_BITS |
+ COH901318_CX_CTRL_SRC_ADDR_INC_ENABLE |
+ COH901318_CX_CTRL_DST_BUS_SIZE_32_BITS |
+ COH901318_CX_CTRL_DST_ADDR_INC_DISABLE |
+ COH901318_CX_CTRL_MASTER_MODE_M1R_M2W |
+ COH901318_CX_CTRL_TCP_ENABLE |
+ COH901318_CX_CTRL_TC_IRQ_ENABLE |
+ COH901318_CX_CTRL_HSP_ENABLE |
+ COH901318_CX_CTRL_HSS_DISABLE |
+ COH901318_CX_CTRL_DDMA_LEGACY |
+ COH901318_CX_CTRL_PRDD_SOURCE,
+ },
+ {
+ .number = U300_DMA_MSL_TX_2,
+ .name = "MSL TX 2",
+ .priority_high = 0,
+ .param.config = COH901318_CX_CFG_CH_DISABLE |
+ COH901318_CX_CFG_LCR_DISABLE |
+ COH901318_CX_CFG_TC_IRQ_ENABLE |
+ COH901318_CX_CFG_BE_IRQ_ENABLE,
+ .param.ctrl_lli_chained = 0 |
+ COH901318_CX_CTRL_TC_ENABLE |
+ COH901318_CX_CTRL_BURST_COUNT_32_BYTES |
+ COH901318_CX_CTRL_SRC_BUS_SIZE_32_BITS |
+ COH901318_CX_CTRL_SRC_ADDR_INC_ENABLE |
+ COH901318_CX_CTRL_DST_BUS_SIZE_32_BITS |
+ COH901318_CX_CTRL_DST_ADDR_INC_DISABLE |
+ COH901318_CX_CTRL_MASTER_MODE_M1R_M2W |
+ COH901318_CX_CTRL_TCP_DISABLE |
+ COH901318_CX_CTRL_TC_IRQ_DISABLE |
+ COH901318_CX_CTRL_HSP_ENABLE |
+ COH901318_CX_CTRL_HSS_DISABLE |
+ COH901318_CX_CTRL_DDMA_LEGACY |
+ COH901318_CX_CTRL_PRDD_SOURCE,
+ .param.ctrl_lli = 0 |
+ COH901318_CX_CTRL_TC_ENABLE |
+ COH901318_CX_CTRL_BURST_COUNT_32_BYTES |
+ COH901318_CX_CTRL_SRC_BUS_SIZE_32_BITS |
+ COH901318_CX_CTRL_SRC_ADDR_INC_ENABLE |
+ COH901318_CX_CTRL_DST_BUS_SIZE_32_BITS |
+ COH901318_CX_CTRL_DST_ADDR_INC_DISABLE |
+ COH901318_CX_CTRL_MASTER_MODE_M1R_M2W |
+ COH901318_CX_CTRL_TCP_ENABLE |
+ COH901318_CX_CTRL_TC_IRQ_DISABLE |
+ COH901318_CX_CTRL_HSP_ENABLE |
+ COH901318_CX_CTRL_HSS_DISABLE |
+ COH901318_CX_CTRL_DDMA_LEGACY |
+ COH901318_CX_CTRL_PRDD_SOURCE,
+ .param.ctrl_lli_last = 0 |
+ COH901318_CX_CTRL_TC_ENABLE |
+ COH901318_CX_CTRL_BURST_COUNT_32_BYTES |
+ COH901318_CX_CTRL_SRC_BUS_SIZE_32_BITS |
+ COH901318_CX_CTRL_SRC_ADDR_INC_ENABLE |
+ COH901318_CX_CTRL_DST_BUS_SIZE_32_BITS |
+ COH901318_CX_CTRL_DST_ADDR_INC_DISABLE |
+ COH901318_CX_CTRL_MASTER_MODE_M1R_M2W |
+ COH901318_CX_CTRL_TCP_ENABLE |
+ COH901318_CX_CTRL_TC_IRQ_ENABLE |
+ COH901318_CX_CTRL_HSP_ENABLE |
+ COH901318_CX_CTRL_HSS_DISABLE |
+ COH901318_CX_CTRL_DDMA_LEGACY |
+ COH901318_CX_CTRL_PRDD_SOURCE,
+ .desc_nbr_max = 10,
+ },
+ {
+ .number = U300_DMA_MSL_TX_3,
+ .name = "MSL TX 3",
+ .priority_high = 0,
+ .param.config = COH901318_CX_CFG_CH_DISABLE |
+ COH901318_CX_CFG_LCR_DISABLE |
+ COH901318_CX_CFG_TC_IRQ_ENABLE |
+ COH901318_CX_CFG_BE_IRQ_ENABLE,
+ .param.ctrl_lli_chained = 0 |
+ COH901318_CX_CTRL_TC_ENABLE |
+ COH901318_CX_CTRL_BURST_COUNT_32_BYTES |
+ COH901318_CX_CTRL_SRC_BUS_SIZE_32_BITS |
+ COH901318_CX_CTRL_SRC_ADDR_INC_ENABLE |
+ COH901318_CX_CTRL_DST_BUS_SIZE_32_BITS |
+ COH901318_CX_CTRL_DST_ADDR_INC_DISABLE |
+ COH901318_CX_CTRL_MASTER_MODE_M1R_M2W |
+ COH901318_CX_CTRL_TCP_DISABLE |
+ COH901318_CX_CTRL_TC_IRQ_DISABLE |
+ COH901318_CX_CTRL_HSP_ENABLE |
+ COH901318_CX_CTRL_HSS_DISABLE |
+ COH901318_CX_CTRL_DDMA_LEGACY |
+ COH901318_CX_CTRL_PRDD_SOURCE,
+ .param.ctrl_lli = 0 |
+ COH901318_CX_CTRL_TC_ENABLE |
+ COH901318_CX_CTRL_BURST_COUNT_32_BYTES |
+ COH901318_CX_CTRL_SRC_BUS_SIZE_32_BITS |
+ COH901318_CX_CTRL_SRC_ADDR_INC_ENABLE |
+ COH901318_CX_CTRL_DST_BUS_SIZE_32_BITS |
+ COH901318_CX_CTRL_DST_ADDR_INC_DISABLE |
+ COH901318_CX_CTRL_MASTER_MODE_M1R_M2W |
+ COH901318_CX_CTRL_TCP_ENABLE |
+ COH901318_CX_CTRL_TC_IRQ_DISABLE |
+ COH901318_CX_CTRL_HSP_ENABLE |
+ COH901318_CX_CTRL_HSS_DISABLE |
+ COH901318_CX_CTRL_DDMA_LEGACY |
+ COH901318_CX_CTRL_PRDD_SOURCE,
+ .param.ctrl_lli_last = 0 |
+ COH901318_CX_CTRL_TC_ENABLE |
+ COH901318_CX_CTRL_BURST_COUNT_32_BYTES |
+ COH901318_CX_CTRL_SRC_BUS_SIZE_32_BITS |
+ COH901318_CX_CTRL_SRC_ADDR_INC_ENABLE |
+ COH901318_CX_CTRL_DST_BUS_SIZE_32_BITS |
+ COH901318_CX_CTRL_DST_ADDR_INC_DISABLE |
+ COH901318_CX_CTRL_MASTER_MODE_M1R_M2W |
+ COH901318_CX_CTRL_TCP_ENABLE |
+ COH901318_CX_CTRL_TC_IRQ_ENABLE |
+ COH901318_CX_CTRL_HSP_ENABLE |
+ COH901318_CX_CTRL_HSS_DISABLE |
+ COH901318_CX_CTRL_DDMA_LEGACY |
+ COH901318_CX_CTRL_PRDD_SOURCE,
+ },
+ {
+ .number = U300_DMA_MSL_TX_4,
+ .name = "MSL TX 4",
+ .priority_high = 0,
+ .param.config = COH901318_CX_CFG_CH_DISABLE |
+ COH901318_CX_CFG_LCR_DISABLE |
+ COH901318_CX_CFG_TC_IRQ_ENABLE |
+ COH901318_CX_CFG_BE_IRQ_ENABLE,
+ .param.ctrl_lli_chained = 0 |
+ COH901318_CX_CTRL_TC_ENABLE |
+ COH901318_CX_CTRL_BURST_COUNT_32_BYTES |
+ COH901318_CX_CTRL_SRC_BUS_SIZE_32_BITS |
+ COH901318_CX_CTRL_SRC_ADDR_INC_ENABLE |
+ COH901318_CX_CTRL_DST_BUS_SIZE_32_BITS |
+ COH901318_CX_CTRL_DST_ADDR_INC_DISABLE |
+ COH901318_CX_CTRL_MASTER_MODE_M1R_M2W |
+ COH901318_CX_CTRL_TCP_DISABLE |
+ COH901318_CX_CTRL_TC_IRQ_DISABLE |
+ COH901318_CX_CTRL_HSP_ENABLE |
+ COH901318_CX_CTRL_HSS_DISABLE |
+ COH901318_CX_CTRL_DDMA_LEGACY |
+ COH901318_CX_CTRL_PRDD_SOURCE,
+ .param.ctrl_lli = 0 |
+ COH901318_CX_CTRL_TC_ENABLE |
+ COH901318_CX_CTRL_BURST_COUNT_32_BYTES |
+ COH901318_CX_CTRL_SRC_BUS_SIZE_32_BITS |
+ COH901318_CX_CTRL_SRC_ADDR_INC_ENABLE |
+ COH901318_CX_CTRL_DST_BUS_SIZE_32_BITS |
+ COH901318_CX_CTRL_DST_ADDR_INC_DISABLE |
+ COH901318_CX_CTRL_MASTER_MODE_M1R_M2W |
+ COH901318_CX_CTRL_TCP_ENABLE |
+ COH901318_CX_CTRL_TC_IRQ_DISABLE |
+ COH901318_CX_CTRL_HSP_ENABLE |
+ COH901318_CX_CTRL_HSS_DISABLE |
+ COH901318_CX_CTRL_DDMA_LEGACY |
+ COH901318_CX_CTRL_PRDD_SOURCE,
+ .param.ctrl_lli_last = 0 |
+ COH901318_CX_CTRL_TC_ENABLE |
+ COH901318_CX_CTRL_BURST_COUNT_32_BYTES |
+ COH901318_CX_CTRL_SRC_BUS_SIZE_32_BITS |
+ COH901318_CX_CTRL_SRC_ADDR_INC_ENABLE |
+ COH901318_CX_CTRL_DST_BUS_SIZE_32_BITS |
+ COH901318_CX_CTRL_DST_ADDR_INC_DISABLE |
+ COH901318_CX_CTRL_MASTER_MODE_M1R_M2W |
+ COH901318_CX_CTRL_TCP_ENABLE |
+ COH901318_CX_CTRL_TC_IRQ_ENABLE |
+ COH901318_CX_CTRL_HSP_ENABLE |
+ COH901318_CX_CTRL_HSS_DISABLE |
+ COH901318_CX_CTRL_DDMA_LEGACY |
+ COH901318_CX_CTRL_PRDD_SOURCE,
+ },
+ {
+ .number = U300_DMA_MSL_TX_5,
+ .name = "MSL TX 5",
+ .priority_high = 0,
+ },
+ {
+ .number = U300_DMA_MSL_TX_6,
+ .name = "MSL TX 6",
+ .priority_high = 0,
+ },
+ {
+ .number = U300_DMA_MSL_RX_0,
+ .name = "MSL RX 0",
+ .priority_high = 0,
+ },
+ {
+ .number = U300_DMA_MSL_RX_1,
+ .name = "MSL RX 1",
+ .priority_high = 0,
+ .param.config = COH901318_CX_CFG_CH_DISABLE |
+ COH901318_CX_CFG_LCR_DISABLE |
+ COH901318_CX_CFG_TC_IRQ_ENABLE |
+ COH901318_CX_CFG_BE_IRQ_ENABLE,
+ .param.ctrl_lli_chained = 0 |
+ COH901318_CX_CTRL_TC_ENABLE |
+ COH901318_CX_CTRL_BURST_COUNT_32_BYTES |
+ COH901318_CX_CTRL_SRC_BUS_SIZE_32_BITS |
+ COH901318_CX_CTRL_SRC_ADDR_INC_DISABLE |
+ COH901318_CX_CTRL_DST_BUS_SIZE_32_BITS |
+ COH901318_CX_CTRL_DST_ADDR_INC_ENABLE |
+ COH901318_CX_CTRL_MASTER_MODE_M2R_M1W |
+ COH901318_CX_CTRL_TCP_DISABLE |
+ COH901318_CX_CTRL_TC_IRQ_DISABLE |
+ COH901318_CX_CTRL_HSP_ENABLE |
+ COH901318_CX_CTRL_HSS_DISABLE |
+ COH901318_CX_CTRL_DDMA_DEMAND_DMA1 |
+ COH901318_CX_CTRL_PRDD_DEST,
+ .param.ctrl_lli = 0,
+ .param.ctrl_lli_last = 0 |
+ COH901318_CX_CTRL_TC_ENABLE |
+ COH901318_CX_CTRL_BURST_COUNT_32_BYTES |
+ COH901318_CX_CTRL_SRC_BUS_SIZE_32_BITS |
+ COH901318_CX_CTRL_SRC_ADDR_INC_DISABLE |
+ COH901318_CX_CTRL_DST_BUS_SIZE_32_BITS |
+ COH901318_CX_CTRL_DST_ADDR_INC_ENABLE |
+ COH901318_CX_CTRL_MASTER_MODE_M2R_M1W |
+ COH901318_CX_CTRL_TCP_DISABLE |
+ COH901318_CX_CTRL_TC_IRQ_ENABLE |
+ COH901318_CX_CTRL_HSP_ENABLE |
+ COH901318_CX_CTRL_HSS_DISABLE |
+ COH901318_CX_CTRL_DDMA_DEMAND_DMA1 |
+ COH901318_CX_CTRL_PRDD_DEST,
+ },
+ {
+ .number = U300_DMA_MSL_RX_2,
+ .name = "MSL RX 2",
+ .priority_high = 0,
+ .param.config = COH901318_CX_CFG_CH_DISABLE |
+ COH901318_CX_CFG_LCR_DISABLE |
+ COH901318_CX_CFG_TC_IRQ_ENABLE |
+ COH901318_CX_CFG_BE_IRQ_ENABLE,
+ .param.ctrl_lli_chained = 0 |
+ COH901318_CX_CTRL_TC_ENABLE |
+ COH901318_CX_CTRL_BURST_COUNT_32_BYTES |
+ COH901318_CX_CTRL_SRC_BUS_SIZE_32_BITS |
+ COH901318_CX_CTRL_SRC_ADDR_INC_DISABLE |
+ COH901318_CX_CTRL_DST_BUS_SIZE_32_BITS |
+ COH901318_CX_CTRL_DST_ADDR_INC_ENABLE |
+ COH901318_CX_CTRL_MASTER_MODE_M2R_M1W |
+ COH901318_CX_CTRL_TCP_DISABLE |
+ COH901318_CX_CTRL_TC_IRQ_DISABLE |
+ COH901318_CX_CTRL_HSP_ENABLE |
+ COH901318_CX_CTRL_HSS_DISABLE |
+ COH901318_CX_CTRL_DDMA_DEMAND_DMA1 |
+ COH901318_CX_CTRL_PRDD_DEST,
+ .param.ctrl_lli = 0 |
+ COH901318_CX_CTRL_TC_ENABLE |
+ COH901318_CX_CTRL_BURST_COUNT_32_BYTES |
+ COH901318_CX_CTRL_SRC_BUS_SIZE_32_BITS |
+ COH901318_CX_CTRL_SRC_ADDR_INC_DISABLE |
+ COH901318_CX_CTRL_DST_BUS_SIZE_32_BITS |
+ COH901318_CX_CTRL_DST_ADDR_INC_ENABLE |
+ COH901318_CX_CTRL_MASTER_MODE_M2R_M1W |
+ COH901318_CX_CTRL_TCP_DISABLE |
+ COH901318_CX_CTRL_TC_IRQ_ENABLE |
+ COH901318_CX_CTRL_HSP_ENABLE |
+ COH901318_CX_CTRL_HSS_DISABLE |
+ COH901318_CX_CTRL_DDMA_DEMAND_DMA1 |
+ COH901318_CX_CTRL_PRDD_DEST,
+ .param.ctrl_lli_last = 0 |
+ COH901318_CX_CTRL_TC_ENABLE |
+ COH901318_CX_CTRL_BURST_COUNT_32_BYTES |
+ COH901318_CX_CTRL_SRC_BUS_SIZE_32_BITS |
+ COH901318_CX_CTRL_SRC_ADDR_INC_DISABLE |
+ COH901318_CX_CTRL_DST_BUS_SIZE_32_BITS |
+ COH901318_CX_CTRL_DST_ADDR_INC_ENABLE |
+ COH901318_CX_CTRL_MASTER_MODE_M2R_M1W |
+ COH901318_CX_CTRL_TCP_DISABLE |
+ COH901318_CX_CTRL_TC_IRQ_ENABLE |
+ COH901318_CX_CTRL_HSP_ENABLE |
+ COH901318_CX_CTRL_HSS_DISABLE |
+ COH901318_CX_CTRL_DDMA_DEMAND_DMA1 |
+ COH901318_CX_CTRL_PRDD_DEST,
+ },
+ {
+ .number = U300_DMA_MSL_RX_3,
+ .name = "MSL RX 3",
+ .priority_high = 0,
+ .param.config = COH901318_CX_CFG_CH_DISABLE |
+ COH901318_CX_CFG_LCR_DISABLE |
+ COH901318_CX_CFG_TC_IRQ_ENABLE |
+ COH901318_CX_CFG_BE_IRQ_ENABLE,
+ .param.ctrl_lli_chained = 0 |
+ COH901318_CX_CTRL_TC_ENABLE |
+ COH901318_CX_CTRL_BURST_COUNT_32_BYTES |
+ COH901318_CX_CTRL_SRC_BUS_SIZE_32_BITS |
+ COH901318_CX_CTRL_SRC_ADDR_INC_DISABLE |
+ COH901318_CX_CTRL_DST_BUS_SIZE_32_BITS |
+ COH901318_CX_CTRL_DST_ADDR_INC_ENABLE |
+ COH901318_CX_CTRL_MASTER_MODE_M2R_M1W |
+ COH901318_CX_CTRL_TCP_DISABLE |
+ COH901318_CX_CTRL_TC_IRQ_DISABLE |
+ COH901318_CX_CTRL_HSP_ENABLE |
+ COH901318_CX_CTRL_HSS_DISABLE |
+ COH901318_CX_CTRL_DDMA_DEMAND_DMA1 |
+ COH901318_CX_CTRL_PRDD_DEST,
+ .param.ctrl_lli = 0 |
+ COH901318_CX_CTRL_TC_ENABLE |
+ COH901318_CX_CTRL_BURST_COUNT_32_BYTES |
+ COH901318_CX_CTRL_SRC_BUS_SIZE_32_BITS |
+ COH901318_CX_CTRL_SRC_ADDR_INC_DISABLE |
+ COH901318_CX_CTRL_DST_BUS_SIZE_32_BITS |
+ COH901318_CX_CTRL_DST_ADDR_INC_ENABLE |
+ COH901318_CX_CTRL_MASTER_MODE_M2R_M1W |
+ COH901318_CX_CTRL_TCP_DISABLE |
+ COH901318_CX_CTRL_TC_IRQ_ENABLE |
+ COH901318_CX_CTRL_HSP_ENABLE |
+ COH901318_CX_CTRL_HSS_DISABLE |
+ COH901318_CX_CTRL_DDMA_DEMAND_DMA1 |
+ COH901318_CX_CTRL_PRDD_DEST,
+ .param.ctrl_lli_last = 0 |
+ COH901318_CX_CTRL_TC_ENABLE |
+ COH901318_CX_CTRL_BURST_COUNT_32_BYTES |
+ COH901318_CX_CTRL_SRC_BUS_SIZE_32_BITS |
+ COH901318_CX_CTRL_SRC_ADDR_INC_DISABLE |
+ COH901318_CX_CTRL_DST_BUS_SIZE_32_BITS |
+ COH901318_CX_CTRL_DST_ADDR_INC_ENABLE |
+ COH901318_CX_CTRL_MASTER_MODE_M2R_M1W |
+ COH901318_CX_CTRL_TCP_DISABLE |
+ COH901318_CX_CTRL_TC_IRQ_ENABLE |
+ COH901318_CX_CTRL_HSP_ENABLE |
+ COH901318_CX_CTRL_HSS_DISABLE |
+ COH901318_CX_CTRL_DDMA_DEMAND_DMA1 |
+ COH901318_CX_CTRL_PRDD_DEST,
+ },
+ {
+ .number = U300_DMA_MSL_RX_4,
+ .name = "MSL RX 4",
+ .priority_high = 0,
+ .param.config = COH901318_CX_CFG_CH_DISABLE |
+ COH901318_CX_CFG_LCR_DISABLE |
+ COH901318_CX_CFG_TC_IRQ_ENABLE |
+ COH901318_CX_CFG_BE_IRQ_ENABLE,
+ .param.ctrl_lli_chained = 0 |
+ COH901318_CX_CTRL_TC_ENABLE |
+ COH901318_CX_CTRL_BURST_COUNT_32_BYTES |
+ COH901318_CX_CTRL_SRC_BUS_SIZE_32_BITS |
+ COH901318_CX_CTRL_SRC_ADDR_INC_DISABLE |
+ COH901318_CX_CTRL_DST_BUS_SIZE_32_BITS |
+ COH901318_CX_CTRL_DST_ADDR_INC_ENABLE |
+ COH901318_CX_CTRL_MASTER_MODE_M2R_M1W |
+ COH901318_CX_CTRL_TCP_DISABLE |
+ COH901318_CX_CTRL_TC_IRQ_DISABLE |
+ COH901318_CX_CTRL_HSP_ENABLE |
+ COH901318_CX_CTRL_HSS_DISABLE |
+ COH901318_CX_CTRL_DDMA_DEMAND_DMA1 |
+ COH901318_CX_CTRL_PRDD_DEST,
+ .param.ctrl_lli = 0 |
+ COH901318_CX_CTRL_TC_ENABLE |
+ COH901318_CX_CTRL_BURST_COUNT_32_BYTES |
+ COH901318_CX_CTRL_SRC_BUS_SIZE_32_BITS |
+ COH901318_CX_CTRL_SRC_ADDR_INC_DISABLE |
+ COH901318_CX_CTRL_DST_BUS_SIZE_32_BITS |
+ COH901318_CX_CTRL_DST_ADDR_INC_ENABLE |
+ COH901318_CX_CTRL_MASTER_MODE_M2R_M1W |
+ COH901318_CX_CTRL_TCP_DISABLE |
+ COH901318_CX_CTRL_TC_IRQ_ENABLE |
+ COH901318_CX_CTRL_HSP_ENABLE |
+ COH901318_CX_CTRL_HSS_DISABLE |
+ COH901318_CX_CTRL_DDMA_DEMAND_DMA1 |
+ COH901318_CX_CTRL_PRDD_DEST,
+ .param.ctrl_lli_last = 0 |
+ COH901318_CX_CTRL_TC_ENABLE |
+ COH901318_CX_CTRL_BURST_COUNT_32_BYTES |
+ COH901318_CX_CTRL_SRC_BUS_SIZE_32_BITS |
+ COH901318_CX_CTRL_SRC_ADDR_INC_DISABLE |
+ COH901318_CX_CTRL_DST_BUS_SIZE_32_BITS |
+ COH901318_CX_CTRL_DST_ADDR_INC_ENABLE |
+ COH901318_CX_CTRL_MASTER_MODE_M2R_M1W |
+ COH901318_CX_CTRL_TCP_DISABLE |
+ COH901318_CX_CTRL_TC_IRQ_ENABLE |
+ COH901318_CX_CTRL_HSP_ENABLE |
+ COH901318_CX_CTRL_HSS_DISABLE |
+ COH901318_CX_CTRL_DDMA_DEMAND_DMA1 |
+ COH901318_CX_CTRL_PRDD_DEST,
+ },
+ {
+ .number = U300_DMA_MSL_RX_5,
+ .name = "MSL RX 5",
+ .priority_high = 0,
+ .param.config = COH901318_CX_CFG_CH_DISABLE |
+ COH901318_CX_CFG_LCR_DISABLE |
+ COH901318_CX_CFG_TC_IRQ_ENABLE |
+ COH901318_CX_CFG_BE_IRQ_ENABLE,
+ .param.ctrl_lli_chained = 0 |
+ COH901318_CX_CTRL_TC_ENABLE |
+ COH901318_CX_CTRL_BURST_COUNT_32_BYTES |
+ COH901318_CX_CTRL_SRC_BUS_SIZE_32_BITS |
+ COH901318_CX_CTRL_SRC_ADDR_INC_DISABLE |
+ COH901318_CX_CTRL_DST_BUS_SIZE_32_BITS |
+ COH901318_CX_CTRL_DST_ADDR_INC_ENABLE |
+ COH901318_CX_CTRL_MASTER_MODE_M2R_M1W |
+ COH901318_CX_CTRL_TCP_DISABLE |
+ COH901318_CX_CTRL_TC_IRQ_DISABLE |
+ COH901318_CX_CTRL_HSP_ENABLE |
+ COH901318_CX_CTRL_HSS_DISABLE |
+ COH901318_CX_CTRL_DDMA_DEMAND_DMA1 |
+ COH901318_CX_CTRL_PRDD_DEST,
+ .param.ctrl_lli = 0 |
+ COH901318_CX_CTRL_TC_ENABLE |
+ COH901318_CX_CTRL_BURST_COUNT_32_BYTES |
+ COH901318_CX_CTRL_SRC_BUS_SIZE_32_BITS |
+ COH901318_CX_CTRL_SRC_ADDR_INC_DISABLE |
+ COH901318_CX_CTRL_DST_BUS_SIZE_32_BITS |
+ COH901318_CX_CTRL_DST_ADDR_INC_ENABLE |
+ COH901318_CX_CTRL_MASTER_MODE_M2R_M1W |
+ COH901318_CX_CTRL_TCP_DISABLE |
+ COH901318_CX_CTRL_TC_IRQ_ENABLE |
+ COH901318_CX_CTRL_HSP_ENABLE |
+ COH901318_CX_CTRL_HSS_DISABLE |
+ COH901318_CX_CTRL_DDMA_DEMAND_DMA1 |
+ COH901318_CX_CTRL_PRDD_DEST,
+ .param.ctrl_lli_last = 0 |
+ COH901318_CX_CTRL_TC_ENABLE |
+ COH901318_CX_CTRL_BURST_COUNT_32_BYTES |
+ COH901318_CX_CTRL_SRC_BUS_SIZE_32_BITS |
+ COH901318_CX_CTRL_SRC_ADDR_INC_DISABLE |
+ COH901318_CX_CTRL_DST_BUS_SIZE_32_BITS |
+ COH901318_CX_CTRL_DST_ADDR_INC_ENABLE |
+ COH901318_CX_CTRL_MASTER_MODE_M2R_M1W |
+ COH901318_CX_CTRL_TCP_DISABLE |
+ COH901318_CX_CTRL_TC_IRQ_ENABLE |
+ COH901318_CX_CTRL_HSP_ENABLE |
+ COH901318_CX_CTRL_HSS_DISABLE |
+ COH901318_CX_CTRL_DDMA_DEMAND_DMA1 |
+ COH901318_CX_CTRL_PRDD_DEST,
+ },
+ {
+ .number = U300_DMA_MSL_RX_6,
+ .name = "MSL RX 6",
+ .priority_high = 0,
+ },
+ /*
+ * Don't set up device address, burst count or size of src
+ * or dst bus for this peripheral - handled by PrimeCell
+ * DMA extension.
+ */
+ {
+ .number = U300_DMA_MMCSD_RX_TX,
+ .name = "MMCSD RX TX",
+ .priority_high = 0,
+ .param.config = COH901318_CX_CFG_CH_DISABLE |
+ COH901318_CX_CFG_LCR_DISABLE |
+ COH901318_CX_CFG_TC_IRQ_ENABLE |
+ COH901318_CX_CFG_BE_IRQ_ENABLE,
+ .param.ctrl_lli_chained = 0 |
+ COH901318_CX_CTRL_TC_ENABLE |
+ COH901318_CX_CTRL_MASTER_MODE_M1RW |
+ COH901318_CX_CTRL_TCP_ENABLE |
+ COH901318_CX_CTRL_TC_IRQ_DISABLE |
+ COH901318_CX_CTRL_HSP_ENABLE |
+ COH901318_CX_CTRL_HSS_DISABLE |
+ COH901318_CX_CTRL_DDMA_LEGACY,
+ .param.ctrl_lli = 0 |
+ COH901318_CX_CTRL_TC_ENABLE |
+ COH901318_CX_CTRL_MASTER_MODE_M1RW |
+ COH901318_CX_CTRL_TCP_ENABLE |
+ COH901318_CX_CTRL_TC_IRQ_DISABLE |
+ COH901318_CX_CTRL_HSP_ENABLE |
+ COH901318_CX_CTRL_HSS_DISABLE |
+ COH901318_CX_CTRL_DDMA_LEGACY,
+ .param.ctrl_lli_last = 0 |
+ COH901318_CX_CTRL_TC_ENABLE |
+ COH901318_CX_CTRL_MASTER_MODE_M1RW |
+ COH901318_CX_CTRL_TCP_DISABLE |
+ COH901318_CX_CTRL_TC_IRQ_ENABLE |
+ COH901318_CX_CTRL_HSP_ENABLE |
+ COH901318_CX_CTRL_HSS_DISABLE |
+ COH901318_CX_CTRL_DDMA_LEGACY,
+
+ },
+ {
+ .number = U300_DMA_MSPRO_TX,
+ .name = "MSPRO TX",
+ .priority_high = 0,
+ },
+ {
+ .number = U300_DMA_MSPRO_RX,
+ .name = "MSPRO RX",
+ .priority_high = 0,
+ },
+ /*
+ * Don't set up device address, burst count or size of src
+ * or dst bus for this peripheral - handled by PrimeCell
+ * DMA extension.
+ */
+ {
+ .number = U300_DMA_UART0_TX,
+ .name = "UART0 TX",
+ .priority_high = 0,
+ .param.config = COH901318_CX_CFG_CH_DISABLE |
+ COH901318_CX_CFG_LCR_DISABLE |
+ COH901318_CX_CFG_TC_IRQ_ENABLE |
+ COH901318_CX_CFG_BE_IRQ_ENABLE,
+ .param.ctrl_lli_chained = 0 |
+ COH901318_CX_CTRL_TC_ENABLE |
+ COH901318_CX_CTRL_MASTER_MODE_M1RW |
+ COH901318_CX_CTRL_TCP_ENABLE |
+ COH901318_CX_CTRL_TC_IRQ_DISABLE |
+ COH901318_CX_CTRL_HSP_ENABLE |
+ COH901318_CX_CTRL_HSS_DISABLE |
+ COH901318_CX_CTRL_DDMA_LEGACY,
+ .param.ctrl_lli = 0 |
+ COH901318_CX_CTRL_TC_ENABLE |
+ COH901318_CX_CTRL_MASTER_MODE_M1RW |
+ COH901318_CX_CTRL_TCP_ENABLE |
+ COH901318_CX_CTRL_TC_IRQ_ENABLE |
+ COH901318_CX_CTRL_HSP_ENABLE |
+ COH901318_CX_CTRL_HSS_DISABLE |
+ COH901318_CX_CTRL_DDMA_LEGACY,
+ .param.ctrl_lli_last = 0 |
+ COH901318_CX_CTRL_TC_ENABLE |
+ COH901318_CX_CTRL_MASTER_MODE_M1RW |
+ COH901318_CX_CTRL_TCP_ENABLE |
+ COH901318_CX_CTRL_TC_IRQ_ENABLE |
+ COH901318_CX_CTRL_HSP_ENABLE |
+ COH901318_CX_CTRL_HSS_DISABLE |
+ COH901318_CX_CTRL_DDMA_LEGACY,
+ },
+ {
+ .number = U300_DMA_UART0_RX,
+ .name = "UART0 RX",
+ .priority_high = 0,
+ .param.config = COH901318_CX_CFG_CH_DISABLE |
+ COH901318_CX_CFG_LCR_DISABLE |
+ COH901318_CX_CFG_TC_IRQ_ENABLE |
+ COH901318_CX_CFG_BE_IRQ_ENABLE,
+ .param.ctrl_lli_chained = 0 |
+ COH901318_CX_CTRL_TC_ENABLE |
+ COH901318_CX_CTRL_MASTER_MODE_M1RW |
+ COH901318_CX_CTRL_TCP_ENABLE |
+ COH901318_CX_CTRL_TC_IRQ_DISABLE |
+ COH901318_CX_CTRL_HSP_ENABLE |
+ COH901318_CX_CTRL_HSS_DISABLE |
+ COH901318_CX_CTRL_DDMA_LEGACY,
+ .param.ctrl_lli = 0 |
+ COH901318_CX_CTRL_TC_ENABLE |
+ COH901318_CX_CTRL_MASTER_MODE_M1RW |
+ COH901318_CX_CTRL_TCP_ENABLE |
+ COH901318_CX_CTRL_TC_IRQ_ENABLE |
+ COH901318_CX_CTRL_HSP_ENABLE |
+ COH901318_CX_CTRL_HSS_DISABLE |
+ COH901318_CX_CTRL_DDMA_LEGACY,
+ .param.ctrl_lli_last = 0 |
+ COH901318_CX_CTRL_TC_ENABLE |
+ COH901318_CX_CTRL_MASTER_MODE_M1RW |
+ COH901318_CX_CTRL_TCP_ENABLE |
+ COH901318_CX_CTRL_TC_IRQ_ENABLE |
+ COH901318_CX_CTRL_HSP_ENABLE |
+ COH901318_CX_CTRL_HSS_DISABLE |
+ COH901318_CX_CTRL_DDMA_LEGACY,
+ },
+ {
+ .number = U300_DMA_APEX_TX,
+ .name = "APEX TX",
+ .priority_high = 0,
+ },
+ {
+ .number = U300_DMA_APEX_RX,
+ .name = "APEX RX",
+ .priority_high = 0,
+ },
+ {
+ .number = U300_DMA_PCM_I2S0_TX,
+ .name = "PCM I2S0 TX",
+ .priority_high = 1,
+ .param.config = COH901318_CX_CFG_CH_DISABLE |
+ COH901318_CX_CFG_LCR_DISABLE |
+ COH901318_CX_CFG_TC_IRQ_ENABLE |
+ COH901318_CX_CFG_BE_IRQ_ENABLE,
+ .param.ctrl_lli_chained = 0 |
+ COH901318_CX_CTRL_TC_ENABLE |
+ COH901318_CX_CTRL_BURST_COUNT_16_BYTES |
+ COH901318_CX_CTRL_SRC_BUS_SIZE_32_BITS |
+ COH901318_CX_CTRL_SRC_ADDR_INC_ENABLE |
+ COH901318_CX_CTRL_DST_BUS_SIZE_32_BITS |
+ COH901318_CX_CTRL_DST_ADDR_INC_DISABLE |
+ COH901318_CX_CTRL_MASTER_MODE_M1RW |
+ COH901318_CX_CTRL_TCP_DISABLE |
+ COH901318_CX_CTRL_TC_IRQ_DISABLE |
+ COH901318_CX_CTRL_HSP_ENABLE |
+ COH901318_CX_CTRL_HSS_DISABLE |
+ COH901318_CX_CTRL_DDMA_LEGACY |
+ COH901318_CX_CTRL_PRDD_SOURCE,
+ .param.ctrl_lli = 0 |
+ COH901318_CX_CTRL_TC_ENABLE |
+ COH901318_CX_CTRL_BURST_COUNT_16_BYTES |
+ COH901318_CX_CTRL_SRC_BUS_SIZE_32_BITS |
+ COH901318_CX_CTRL_SRC_ADDR_INC_ENABLE |
+ COH901318_CX_CTRL_DST_BUS_SIZE_32_BITS |
+ COH901318_CX_CTRL_DST_ADDR_INC_DISABLE |
+ COH901318_CX_CTRL_MASTER_MODE_M1RW |
+ COH901318_CX_CTRL_TCP_ENABLE |
+ COH901318_CX_CTRL_TC_IRQ_DISABLE |
+ COH901318_CX_CTRL_HSP_ENABLE |
+ COH901318_CX_CTRL_HSS_DISABLE |
+ COH901318_CX_CTRL_DDMA_LEGACY |
+ COH901318_CX_CTRL_PRDD_SOURCE,
+ .param.ctrl_lli_last = 0 |
+ COH901318_CX_CTRL_TC_ENABLE |
+ COH901318_CX_CTRL_BURST_COUNT_16_BYTES |
+ COH901318_CX_CTRL_SRC_BUS_SIZE_32_BITS |
+ COH901318_CX_CTRL_SRC_ADDR_INC_ENABLE |
+ COH901318_CX_CTRL_DST_BUS_SIZE_32_BITS |
+ COH901318_CX_CTRL_DST_ADDR_INC_DISABLE |
+ COH901318_CX_CTRL_MASTER_MODE_M1RW |
+ COH901318_CX_CTRL_TCP_ENABLE |
+ COH901318_CX_CTRL_TC_IRQ_DISABLE |
+ COH901318_CX_CTRL_HSP_ENABLE |
+ COH901318_CX_CTRL_HSS_DISABLE |
+ COH901318_CX_CTRL_DDMA_LEGACY |
+ COH901318_CX_CTRL_PRDD_SOURCE,
+ },
+ {
+ .number = U300_DMA_PCM_I2S0_RX,
+ .name = "PCM I2S0 RX",
+ .priority_high = 1,
+ .param.config = COH901318_CX_CFG_CH_DISABLE |
+ COH901318_CX_CFG_LCR_DISABLE |
+ COH901318_CX_CFG_TC_IRQ_ENABLE |
+ COH901318_CX_CFG_BE_IRQ_ENABLE,
+ .param.ctrl_lli_chained = 0 |
+ COH901318_CX_CTRL_TC_ENABLE |
+ COH901318_CX_CTRL_BURST_COUNT_16_BYTES |
+ COH901318_CX_CTRL_SRC_BUS_SIZE_32_BITS |
+ COH901318_CX_CTRL_SRC_ADDR_INC_DISABLE |
+ COH901318_CX_CTRL_DST_BUS_SIZE_32_BITS |
+ COH901318_CX_CTRL_DST_ADDR_INC_ENABLE |
+ COH901318_CX_CTRL_MASTER_MODE_M1RW |
+ COH901318_CX_CTRL_TCP_DISABLE |
+ COH901318_CX_CTRL_TC_IRQ_DISABLE |
+ COH901318_CX_CTRL_HSP_ENABLE |
+ COH901318_CX_CTRL_HSS_DISABLE |
+ COH901318_CX_CTRL_DDMA_LEGACY |
+ COH901318_CX_CTRL_PRDD_DEST,
+ .param.ctrl_lli = 0 |
+ COH901318_CX_CTRL_TC_ENABLE |
+ COH901318_CX_CTRL_BURST_COUNT_16_BYTES |
+ COH901318_CX_CTRL_SRC_BUS_SIZE_32_BITS |
+ COH901318_CX_CTRL_SRC_ADDR_INC_DISABLE |
+ COH901318_CX_CTRL_DST_BUS_SIZE_32_BITS |
+ COH901318_CX_CTRL_DST_ADDR_INC_ENABLE |
+ COH901318_CX_CTRL_MASTER_MODE_M1RW |
+ COH901318_CX_CTRL_TCP_ENABLE |
+ COH901318_CX_CTRL_TC_IRQ_DISABLE |
+ COH901318_CX_CTRL_HSP_ENABLE |
+ COH901318_CX_CTRL_HSS_DISABLE |
+ COH901318_CX_CTRL_DDMA_LEGACY |
+ COH901318_CX_CTRL_PRDD_DEST,
+ .param.ctrl_lli_last = 0 |
+ COH901318_CX_CTRL_TC_ENABLE |
+ COH901318_CX_CTRL_BURST_COUNT_16_BYTES |
+ COH901318_CX_CTRL_SRC_BUS_SIZE_32_BITS |
+ COH901318_CX_CTRL_SRC_ADDR_INC_DISABLE |
+ COH901318_CX_CTRL_DST_BUS_SIZE_32_BITS |
+ COH901318_CX_CTRL_DST_ADDR_INC_ENABLE |
+ COH901318_CX_CTRL_MASTER_MODE_M1RW |
+ COH901318_CX_CTRL_TCP_ENABLE |
+ COH901318_CX_CTRL_TC_IRQ_ENABLE |
+ COH901318_CX_CTRL_HSP_ENABLE |
+ COH901318_CX_CTRL_HSS_DISABLE |
+ COH901318_CX_CTRL_DDMA_LEGACY |
+ COH901318_CX_CTRL_PRDD_DEST,
+ },
+ {
+ .number = U300_DMA_PCM_I2S1_TX,
+ .name = "PCM I2S1 TX",
+ .priority_high = 1,
+ .param.config = COH901318_CX_CFG_CH_DISABLE |
+ COH901318_CX_CFG_LCR_DISABLE |
+ COH901318_CX_CFG_TC_IRQ_ENABLE |
+ COH901318_CX_CFG_BE_IRQ_ENABLE,
+ .param.ctrl_lli_chained = 0 |
+ COH901318_CX_CTRL_TC_ENABLE |
+ COH901318_CX_CTRL_BURST_COUNT_16_BYTES |
+ COH901318_CX_CTRL_SRC_BUS_SIZE_32_BITS |
+ COH901318_CX_CTRL_SRC_ADDR_INC_ENABLE |
+ COH901318_CX_CTRL_DST_BUS_SIZE_32_BITS |
+ COH901318_CX_CTRL_DST_ADDR_INC_DISABLE |
+ COH901318_CX_CTRL_MASTER_MODE_M1RW |
+ COH901318_CX_CTRL_TCP_DISABLE |
+ COH901318_CX_CTRL_TC_IRQ_DISABLE |
+ COH901318_CX_CTRL_HSP_ENABLE |
+ COH901318_CX_CTRL_HSS_DISABLE |
+ COH901318_CX_CTRL_DDMA_LEGACY |
+ COH901318_CX_CTRL_PRDD_SOURCE,
+ .param.ctrl_lli = 0 |
+ COH901318_CX_CTRL_TC_ENABLE |
+ COH901318_CX_CTRL_BURST_COUNT_16_BYTES |
+ COH901318_CX_CTRL_SRC_BUS_SIZE_32_BITS |
+ COH901318_CX_CTRL_SRC_ADDR_INC_ENABLE |
+ COH901318_CX_CTRL_DST_BUS_SIZE_32_BITS |
+ COH901318_CX_CTRL_DST_ADDR_INC_DISABLE |
+ COH901318_CX_CTRL_MASTER_MODE_M1RW |
+ COH901318_CX_CTRL_TCP_ENABLE |
+ COH901318_CX_CTRL_TC_IRQ_DISABLE |
+ COH901318_CX_CTRL_HSP_ENABLE |
+ COH901318_CX_CTRL_HSS_DISABLE |
+ COH901318_CX_CTRL_DDMA_LEGACY |
+ COH901318_CX_CTRL_PRDD_SOURCE,
+ .param.ctrl_lli_last = 0 |
+ COH901318_CX_CTRL_TC_ENABLE |
+ COH901318_CX_CTRL_BURST_COUNT_16_BYTES |
+ COH901318_CX_CTRL_SRC_BUS_SIZE_32_BITS |
+ COH901318_CX_CTRL_SRC_ADDR_INC_ENABLE |
+ COH901318_CX_CTRL_DST_BUS_SIZE_32_BITS |
+ COH901318_CX_CTRL_DST_ADDR_INC_DISABLE |
+ COH901318_CX_CTRL_MASTER_MODE_M1RW |
+ COH901318_CX_CTRL_TCP_ENABLE |
+ COH901318_CX_CTRL_TC_IRQ_ENABLE |
+ COH901318_CX_CTRL_HSP_ENABLE |
+ COH901318_CX_CTRL_HSS_DISABLE |
+ COH901318_CX_CTRL_DDMA_LEGACY |
+ COH901318_CX_CTRL_PRDD_SOURCE,
+ },
+ {
+ .number = U300_DMA_PCM_I2S1_RX,
+ .name = "PCM I2S1 RX",
+ .priority_high = 1,
+ .param.config = COH901318_CX_CFG_CH_DISABLE |
+ COH901318_CX_CFG_LCR_DISABLE |
+ COH901318_CX_CFG_TC_IRQ_ENABLE |
+ COH901318_CX_CFG_BE_IRQ_ENABLE,
+ .param.ctrl_lli_chained = 0 |
+ COH901318_CX_CTRL_TC_ENABLE |
+ COH901318_CX_CTRL_BURST_COUNT_16_BYTES |
+ COH901318_CX_CTRL_SRC_BUS_SIZE_32_BITS |
+ COH901318_CX_CTRL_SRC_ADDR_INC_DISABLE |
+ COH901318_CX_CTRL_DST_BUS_SIZE_32_BITS |
+ COH901318_CX_CTRL_DST_ADDR_INC_ENABLE |
+ COH901318_CX_CTRL_MASTER_MODE_M1RW |
+ COH901318_CX_CTRL_TCP_DISABLE |
+ COH901318_CX_CTRL_TC_IRQ_DISABLE |
+ COH901318_CX_CTRL_HSP_ENABLE |
+ COH901318_CX_CTRL_HSS_DISABLE |
+ COH901318_CX_CTRL_DDMA_LEGACY |
+ COH901318_CX_CTRL_PRDD_DEST,
+ .param.ctrl_lli = 0 |
+ COH901318_CX_CTRL_TC_ENABLE |
+ COH901318_CX_CTRL_BURST_COUNT_16_BYTES |
+ COH901318_CX_CTRL_SRC_BUS_SIZE_32_BITS |
+ COH901318_CX_CTRL_SRC_ADDR_INC_DISABLE |
+ COH901318_CX_CTRL_DST_BUS_SIZE_32_BITS |
+ COH901318_CX_CTRL_DST_ADDR_INC_ENABLE |
+ COH901318_CX_CTRL_MASTER_MODE_M1RW |
+ COH901318_CX_CTRL_TCP_ENABLE |
+ COH901318_CX_CTRL_TC_IRQ_DISABLE |
+ COH901318_CX_CTRL_HSP_ENABLE |
+ COH901318_CX_CTRL_HSS_DISABLE |
+ COH901318_CX_CTRL_DDMA_LEGACY |
+ COH901318_CX_CTRL_PRDD_DEST,
+ .param.ctrl_lli_last = 0 |
+ COH901318_CX_CTRL_TC_ENABLE |
+ COH901318_CX_CTRL_BURST_COUNT_16_BYTES |
+ COH901318_CX_CTRL_SRC_BUS_SIZE_32_BITS |
+ COH901318_CX_CTRL_SRC_ADDR_INC_DISABLE |
+ COH901318_CX_CTRL_DST_BUS_SIZE_32_BITS |
+ COH901318_CX_CTRL_DST_ADDR_INC_ENABLE |
+ COH901318_CX_CTRL_MASTER_MODE_M1RW |
+ COH901318_CX_CTRL_TCP_ENABLE |
+ COH901318_CX_CTRL_TC_IRQ_ENABLE |
+ COH901318_CX_CTRL_HSP_ENABLE |
+ COH901318_CX_CTRL_HSS_DISABLE |
+ COH901318_CX_CTRL_DDMA_LEGACY |
+ COH901318_CX_CTRL_PRDD_DEST,
+ },
+ {
+ .number = U300_DMA_XGAM_CDI,
+ .name = "XGAM CDI",
+ .priority_high = 0,
+ },
+ {
+ .number = U300_DMA_XGAM_PDI,
+ .name = "XGAM PDI",
+ .priority_high = 0,
+ },
+ /*
+ * Don't set up device address, burst count or size of src
+ * or dst bus for this peripheral - handled by PrimeCell
+ * DMA extension.
+ */
+ {
+ .number = U300_DMA_SPI_TX,
+ .name = "SPI TX",
+ .priority_high = 0,
+ .param.config = COH901318_CX_CFG_CH_DISABLE |
+ COH901318_CX_CFG_LCR_DISABLE |
+ COH901318_CX_CFG_TC_IRQ_ENABLE |
+ COH901318_CX_CFG_BE_IRQ_ENABLE,
+ .param.ctrl_lli_chained = 0 |
+ COH901318_CX_CTRL_TC_ENABLE |
+ COH901318_CX_CTRL_MASTER_MODE_M1RW |
+ COH901318_CX_CTRL_TCP_DISABLE |
+ COH901318_CX_CTRL_TC_IRQ_DISABLE |
+ COH901318_CX_CTRL_HSP_ENABLE |
+ COH901318_CX_CTRL_HSS_DISABLE |
+ COH901318_CX_CTRL_DDMA_LEGACY,
+ .param.ctrl_lli = 0 |
+ COH901318_CX_CTRL_TC_ENABLE |
+ COH901318_CX_CTRL_MASTER_MODE_M1RW |
+ COH901318_CX_CTRL_TCP_DISABLE |
+ COH901318_CX_CTRL_TC_IRQ_ENABLE |
+ COH901318_CX_CTRL_HSP_ENABLE |
+ COH901318_CX_CTRL_HSS_DISABLE |
+ COH901318_CX_CTRL_DDMA_LEGACY,
+ .param.ctrl_lli_last = 0 |
+ COH901318_CX_CTRL_TC_ENABLE |
+ COH901318_CX_CTRL_MASTER_MODE_M1RW |
+ COH901318_CX_CTRL_TCP_DISABLE |
+ COH901318_CX_CTRL_TC_IRQ_ENABLE |
+ COH901318_CX_CTRL_HSP_ENABLE |
+ COH901318_CX_CTRL_HSS_DISABLE |
+ COH901318_CX_CTRL_DDMA_LEGACY,
+ },
+ {
+ .number = U300_DMA_SPI_RX,
+ .name = "SPI RX",
+ .priority_high = 0,
+ .param.config = COH901318_CX_CFG_CH_DISABLE |
+ COH901318_CX_CFG_LCR_DISABLE |
+ COH901318_CX_CFG_TC_IRQ_ENABLE |
+ COH901318_CX_CFG_BE_IRQ_ENABLE,
+ .param.ctrl_lli_chained = 0 |
+ COH901318_CX_CTRL_TC_ENABLE |
+ COH901318_CX_CTRL_MASTER_MODE_M1RW |
+ COH901318_CX_CTRL_TCP_DISABLE |
+ COH901318_CX_CTRL_TC_IRQ_DISABLE |
+ COH901318_CX_CTRL_HSP_ENABLE |
+ COH901318_CX_CTRL_HSS_DISABLE |
+ COH901318_CX_CTRL_DDMA_LEGACY,
+ .param.ctrl_lli = 0 |
+ COH901318_CX_CTRL_TC_ENABLE |
+ COH901318_CX_CTRL_MASTER_MODE_M1RW |
+ COH901318_CX_CTRL_TCP_DISABLE |
+ COH901318_CX_CTRL_TC_IRQ_ENABLE |
+ COH901318_CX_CTRL_HSP_ENABLE |
+ COH901318_CX_CTRL_HSS_DISABLE |
+ COH901318_CX_CTRL_DDMA_LEGACY,
+ .param.ctrl_lli_last = 0 |
+ COH901318_CX_CTRL_TC_ENABLE |
+ COH901318_CX_CTRL_MASTER_MODE_M1RW |
+ COH901318_CX_CTRL_TCP_DISABLE |
+ COH901318_CX_CTRL_TC_IRQ_ENABLE |
+ COH901318_CX_CTRL_HSP_ENABLE |
+ COH901318_CX_CTRL_HSS_DISABLE |
+ COH901318_CX_CTRL_DDMA_LEGACY,
+
+ },
+ {
+ .number = U300_DMA_GENERAL_PURPOSE_0,
+ .name = "GENERAL 00",
+ .priority_high = 0,
+
+ .param.config = flags_memcpy_config,
+ .param.ctrl_lli_chained = flags_memcpy_lli_chained,
+ .param.ctrl_lli = flags_memcpy_lli,
+ .param.ctrl_lli_last = flags_memcpy_lli_last,
+ },
+ {
+ .number = U300_DMA_GENERAL_PURPOSE_1,
+ .name = "GENERAL 01",
+ .priority_high = 0,
+
+ .param.config = flags_memcpy_config,
+ .param.ctrl_lli_chained = flags_memcpy_lli_chained,
+ .param.ctrl_lli = flags_memcpy_lli,
+ .param.ctrl_lli_last = flags_memcpy_lli_last,
+ },
+ {
+ .number = U300_DMA_GENERAL_PURPOSE_2,
+ .name = "GENERAL 02",
+ .priority_high = 0,
+
+ .param.config = flags_memcpy_config,
+ .param.ctrl_lli_chained = flags_memcpy_lli_chained,
+ .param.ctrl_lli = flags_memcpy_lli,
+ .param.ctrl_lli_last = flags_memcpy_lli_last,
+ },
+ {
+ .number = U300_DMA_GENERAL_PURPOSE_3,
+ .name = "GENERAL 03",
+ .priority_high = 0,
+
+ .param.config = flags_memcpy_config,
+ .param.ctrl_lli_chained = flags_memcpy_lli_chained,
+ .param.ctrl_lli = flags_memcpy_lli,
+ .param.ctrl_lli_last = flags_memcpy_lli_last,
+ },
+ {
+ .number = U300_DMA_GENERAL_PURPOSE_4,
+ .name = "GENERAL 04",
+ .priority_high = 0,
+
+ .param.config = flags_memcpy_config,
+ .param.ctrl_lli_chained = flags_memcpy_lli_chained,
+ .param.ctrl_lli = flags_memcpy_lli,
+ .param.ctrl_lli_last = flags_memcpy_lli_last,
+ },
+ {
+ .number = U300_DMA_GENERAL_PURPOSE_5,
+ .name = "GENERAL 05",
+ .priority_high = 0,
+
+ .param.config = flags_memcpy_config,
+ .param.ctrl_lli_chained = flags_memcpy_lli_chained,
+ .param.ctrl_lli = flags_memcpy_lli,
+ .param.ctrl_lli_last = flags_memcpy_lli_last,
+ },
+ {
+ .number = U300_DMA_GENERAL_PURPOSE_6,
+ .name = "GENERAL 06",
+ .priority_high = 0,
+
+ .param.config = flags_memcpy_config,
+ .param.ctrl_lli_chained = flags_memcpy_lli_chained,
+ .param.ctrl_lli = flags_memcpy_lli,
+ .param.ctrl_lli_last = flags_memcpy_lli_last,
+ },
+ {
+ .number = U300_DMA_GENERAL_PURPOSE_7,
+ .name = "GENERAL 07",
+ .priority_high = 0,
+
+ .param.config = flags_memcpy_config,
+ .param.ctrl_lli_chained = flags_memcpy_lli_chained,
+ .param.ctrl_lli = flags_memcpy_lli,
+ .param.ctrl_lli_last = flags_memcpy_lli_last,
+ },
+ {
+ .number = U300_DMA_GENERAL_PURPOSE_8,
+ .name = "GENERAL 08",
+ .priority_high = 0,
+
+ .param.config = flags_memcpy_config,
+ .param.ctrl_lli_chained = flags_memcpy_lli_chained,
+ .param.ctrl_lli = flags_memcpy_lli,
+ .param.ctrl_lli_last = flags_memcpy_lli_last,
+ },
+ {
+ .number = U300_DMA_UART1_TX,
+ .name = "UART1 TX",
+ .priority_high = 0,
+ },
+ {
+ .number = U300_DMA_UART1_RX,
+ .name = "UART1 RX",
+ .priority_high = 0,
+ }
+};
+
#define COHC_2_DEV(cohc) (&cohc->chan.dev->device)
#ifdef VERBOSE_DEBUG
@@ -54,7 +1284,6 @@ struct coh901318_base {
struct dma_device dma_slave;
struct dma_device dma_memcpy;
struct coh901318_chan *chans;
- struct coh901318_platform *platform;
};
struct coh901318_chan {
@@ -75,8 +1304,8 @@ struct coh901318_chan {
unsigned long nbr_active_done;
unsigned long busy;
- u32 runtime_addr;
- u32 runtime_ctrl;
+ u32 addr;
+ u32 ctrl;
struct coh901318_base *base;
};
@@ -122,7 +1351,7 @@ static int coh901318_debugfs_read(struct file *file, char __user *buf,
tmp += sprintf(tmp, "DMA -- enabled dma channels\n");
- for (i = 0; i < debugfs_dma_base->platform->max_channels; i++)
+ for (i = 0; i < U300_DMA_CHANNELS; i++)
if (started_channels & (1 << i))
tmp += sprintf(tmp, "channel %d\n", i);
@@ -187,25 +1416,16 @@ static inline struct coh901318_chan *to_coh901318_chan(struct dma_chan *chan)
return container_of(chan, struct coh901318_chan, chan);
}
-static inline dma_addr_t
-cohc_dev_addr(struct coh901318_chan *cohc)
-{
- /* Runtime supplied address will take precedence */
- if (cohc->runtime_addr)
- return cohc->runtime_addr;
- return cohc->base->platform->chan_conf[cohc->id].dev_addr;
-}
-
static inline const struct coh901318_params *
cohc_chan_param(struct coh901318_chan *cohc)
{
- return &cohc->base->platform->chan_conf[cohc->id].param;
+ return &chan_config[cohc->id].param;
}
static inline const struct coh_dma_channel *
cohc_chan_conf(struct coh901318_chan *cohc)
{
- return &cohc->base->platform->chan_conf[cohc->id];
+ return &chan_config[cohc->id];
}
static void enable_powersave(struct coh901318_chan *cohc)
@@ -217,12 +1437,6 @@ static void enable_powersave(struct coh901318_chan *cohc)
pm->started_channels &= ~(1ULL << cohc->id);
- if (!pm->started_channels) {
- /* DMA no longer intends to access memory */
- cohc->base->platform->access_memory_state(cohc->base->dev,
- false);
- }
-
spin_unlock_irqrestore(&pm->lock, flags);
}
static void disable_powersave(struct coh901318_chan *cohc)
@@ -232,12 +1446,6 @@ static void disable_powersave(struct coh901318_chan *cohc)
spin_lock_irqsave(&pm->lock, flags);
- if (!pm->started_channels) {
- /* DMA intends to access memory */
- cohc->base->platform->access_memory_state(cohc->base->dev,
- true);
- }
-
pm->started_channels |= (1ULL << cohc->id);
spin_unlock_irqrestore(&pm->lock, flags);
@@ -596,7 +1804,7 @@ static int coh901318_config(struct coh901318_chan *cohc,
if (param)
p = param;
else
- p = &cohc->base->platform->chan_conf[channel].param;
+ p = cohc_chan_param(cohc);
/* Clear any pending BE or TC interrupt */
if (channel < 32) {
@@ -1052,9 +2260,9 @@ coh901318_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
* sure the bits you set per peripheral channel are
* cleared in the default config from the platform.
*/
- ctrl_chained |= cohc->runtime_ctrl;
- ctrl_last |= cohc->runtime_ctrl;
- ctrl |= cohc->runtime_ctrl;
+ ctrl_chained |= cohc->ctrl;
+ ctrl_last |= cohc->ctrl;
+ ctrl |= cohc->ctrl;
if (direction == DMA_MEM_TO_DEV) {
u32 tx_flags = COH901318_CX_CTRL_PRDD_SOURCE |
@@ -1103,7 +2311,7 @@ coh901318_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
/* initiate allocated lli list */
ret = coh901318_lli_fill_sg(&cohc->base->pool, lli, sgl, sg_len,
- cohc_dev_addr(cohc),
+ cohc->addr,
ctrl_chained,
ctrl,
ctrl_last,
@@ -1147,7 +2355,9 @@ coh901318_tx_status(struct dma_chan *chan, dma_cookie_t cookie,
enum dma_status ret;
ret = dma_cookie_status(chan, cookie, txstate);
- /* FIXME: should be conditional on ret != DMA_SUCCESS? */
+ if (ret == DMA_SUCCESS)
+ return ret;
+
dma_set_residue(txstate, coh901318_get_bytes_left(chan));
if (ret == DMA_IN_PROGRESS && cohc->stopped)
@@ -1244,7 +2454,7 @@ static void coh901318_dma_set_runtimeconfig(struct dma_chan *chan,
dma_addr_t addr;
enum dma_slave_buswidth addr_width;
u32 maxburst;
- u32 runtime_ctrl = 0;
+ u32 ctrl = 0;
int i = 0;
/* We only support mem to per or per to mem transfers */
@@ -1265,7 +2475,7 @@ static void coh901318_dma_set_runtimeconfig(struct dma_chan *chan,
addr_width);
switch (addr_width) {
case DMA_SLAVE_BUSWIDTH_1_BYTE:
- runtime_ctrl |=
+ ctrl |=
COH901318_CX_CTRL_SRC_BUS_SIZE_8_BITS |
COH901318_CX_CTRL_DST_BUS_SIZE_8_BITS;
@@ -1277,7 +2487,7 @@ static void coh901318_dma_set_runtimeconfig(struct dma_chan *chan,
break;
case DMA_SLAVE_BUSWIDTH_2_BYTES:
- runtime_ctrl |=
+ ctrl |=
COH901318_CX_CTRL_SRC_BUS_SIZE_16_BITS |
COH901318_CX_CTRL_DST_BUS_SIZE_16_BITS;
@@ -1290,7 +2500,7 @@ static void coh901318_dma_set_runtimeconfig(struct dma_chan *chan,
break;
case DMA_SLAVE_BUSWIDTH_4_BYTES:
/* Direction doesn't matter here, it's 32/32 bits */
- runtime_ctrl |=
+ ctrl |=
COH901318_CX_CTRL_SRC_BUS_SIZE_32_BITS |
COH901318_CX_CTRL_DST_BUS_SIZE_32_BITS;
@@ -1307,13 +2517,13 @@ static void coh901318_dma_set_runtimeconfig(struct dma_chan *chan,
return;
}
- runtime_ctrl |= burst_sizes[i].reg;
+ ctrl |= burst_sizes[i].reg;
dev_dbg(COHC_2_DEV(cohc),
"selected burst size %d bytes for address width %d bytes, maxburst %d\n",
burst_sizes[i].burst_8bit, addr_width, maxburst);
- cohc->runtime_addr = addr;
- cohc->runtime_ctrl = runtime_ctrl;
+ cohc->addr = addr;
+ cohc->ctrl = ctrl;
}
static int
@@ -1431,7 +2641,6 @@ void coh901318_base_init(struct dma_device *dma, const int *pick_chans,
static int __init coh901318_probe(struct platform_device *pdev)
{
int err = 0;
- struct coh901318_platform *pdata;
struct coh901318_base *base;
int irq;
struct resource *io;
@@ -1447,13 +2656,9 @@ static int __init coh901318_probe(struct platform_device *pdev)
pdev->dev.driver->name) == NULL)
return -ENOMEM;
- pdata = pdev->dev.platform_data;
- if (!pdata)
- return -ENODEV;
-
base = devm_kzalloc(&pdev->dev,
ALIGN(sizeof(struct coh901318_base), 4) +
- pdata->max_channels *
+ U300_DMA_CHANNELS *
sizeof(struct coh901318_chan),
GFP_KERNEL);
if (!base)
@@ -1466,7 +2671,6 @@ static int __init coh901318_probe(struct platform_device *pdev)
return -ENOMEM;
base->dev = &pdev->dev;
- base->platform = pdata;
spin_lock_init(&base->pm.lock);
base->pm.started_channels = 0;
@@ -1488,7 +2692,7 @@ static int __init coh901318_probe(struct platform_device *pdev)
return err;
/* init channels for device transfers */
- coh901318_base_init(&base->dma_slave, base->platform->chans_slave,
+ coh901318_base_init(&base->dma_slave, dma_slave_channels,
base);
dma_cap_zero(base->dma_slave.cap_mask);
@@ -1508,7 +2712,7 @@ static int __init coh901318_probe(struct platform_device *pdev)
goto err_register_slave;
/* init channels for memcpy */
- coh901318_base_init(&base->dma_memcpy, base->platform->chans_memcpy,
+ coh901318_base_init(&base->dma_memcpy, dma_memcpy_channels,
base);
dma_cap_zero(base->dma_memcpy.cap_mask);
diff --git a/drivers/dma/coh901318_lli.h b/drivers/dma/coh901318.h
index abff3714fdda..95ce1e2123ec 100644
--- a/drivers/dma/coh901318_lli.h
+++ b/drivers/dma/coh901318.h
@@ -1,16 +1,15 @@
/*
- * driver/dma/coh901318_lli.h
- *
- * Copyright (C) 2007-2009 ST-Ericsson
+ * Copyright (C) 2007-2013 ST-Ericsson
* License terms: GNU General Public License (GPL) version 2
- * Support functions for handling lli for coh901318
+ * DMA driver for COH 901 318
* Author: Per Friden <per.friden@stericsson.com>
*/
-#ifndef COH901318_LLI_H
-#define COH901318_LLI_H
+#ifndef COH901318_H
+#define COH901318_H
-#include <mach/coh901318.h>
+#define MAX_DMA_PACKET_SIZE_SHIFT 11
+#define MAX_DMA_PACKET_SIZE (1 << MAX_DMA_PACKET_SIZE_SHIFT)
struct device;
@@ -24,7 +23,25 @@ struct coh901318_pool {
#endif
};
-struct device;
+/**
+ * struct coh901318_lli - linked list item for DMAC
+ * @control: control settings for DMAC
+ * @src_addr: transfer source address
+ * @dst_addr: transfer destination address
+ * @link_addr: physical address to next lli
+ * @virt_link_addr: virtual address of next lli (only used by pool_free)
+ * @phy_this: physical address of current lli (only used by pool_free)
+ */
+struct coh901318_lli {
+ u32 control;
+ dma_addr_t src_addr;
+ dma_addr_t dst_addr;
+ dma_addr_t link_addr;
+
+ void *virt_link_addr;
+ dma_addr_t phy_this;
+};
+
/**
* coh901318_pool_create() - Creates an dma pool for lli:s
* @pool: pool handle
@@ -121,4 +138,4 @@ coh901318_lli_fill_sg(struct coh901318_pool *pool,
u32 ctrl, u32 ctrl_last,
enum dma_transfer_direction dir, u32 ctrl_irq_mask);
-#endif /* COH901318_LLI_H */
+#endif /* COH901318_H */
diff --git a/drivers/dma/coh901318_lli.c b/drivers/dma/coh901318_lli.c
index 780e0429b38c..702112d547c8 100644
--- a/drivers/dma/coh901318_lli.c
+++ b/drivers/dma/coh901318_lli.c
@@ -11,9 +11,9 @@
#include <linux/memory.h>
#include <linux/gfp.h>
#include <linux/dmapool.h>
-#include <mach/coh901318.h>
+#include <linux/dmaengine.h>
-#include "coh901318_lli.h"
+#include "coh901318.h"
#if (defined(CONFIG_DEBUG_FS) && defined(CONFIG_U300_DEBUG))
#define DEBUGFS_POOL_COUNTER_RESET(pool) (pool->debugfs_pool_counter = 0)
@@ -61,7 +61,7 @@ coh901318_lli_alloc(struct coh901318_pool *pool, unsigned int len)
dma_addr_t phy;
if (len == 0)
- goto err;
+ return NULL;
spin_lock(&pool->lock);
diff --git a/drivers/dma/dmaengine.c b/drivers/dma/dmaengine.c
index a815d44c70a4..b2728d6ba2fd 100644
--- a/drivers/dma/dmaengine.c
+++ b/drivers/dma/dmaengine.c
@@ -62,6 +62,7 @@
#include <linux/rculist.h>
#include <linux/idr.h>
#include <linux/slab.h>
+#include <linux/of_dma.h>
static DEFINE_MUTEX(dma_list_mutex);
static DEFINE_IDR(dma_idr);
@@ -266,7 +267,10 @@ enum dma_status dma_sync_wait(struct dma_chan *chan, dma_cookie_t cookie)
pr_err("%s: timeout!\n", __func__);
return DMA_ERROR;
}
- } while (status == DMA_IN_PROGRESS);
+ if (status != DMA_IN_PROGRESS)
+ break;
+ cpu_relax();
+ } while (1);
return status;
}
@@ -546,6 +550,21 @@ struct dma_chan *__dma_request_channel(dma_cap_mask_t *mask, dma_filter_fn fn, v
}
EXPORT_SYMBOL_GPL(__dma_request_channel);
+/**
+ * dma_request_slave_channel - try to allocate an exclusive slave channel
+ * @dev: pointer to client device structure
+ * @name: slave channel name
+ */
+struct dma_chan *dma_request_slave_channel(struct device *dev, char *name)
+{
+ /* If device-tree is present get slave info from here */
+ if (dev->of_node)
+ return of_dma_request_slave_channel(dev->of_node, name);
+
+ return NULL;
+}
+EXPORT_SYMBOL_GPL(dma_request_slave_channel);
+
void dma_release_channel(struct dma_chan *chan)
{
mutex_lock(&dma_list_mutex);
@@ -667,18 +686,14 @@ static int get_dma_id(struct dma_device *device)
{
int rc;
- idr_retry:
- if (!idr_pre_get(&dma_idr, GFP_KERNEL))
- return -ENOMEM;
mutex_lock(&dma_list_mutex);
- rc = idr_get_new(&dma_idr, NULL, &device->dev_id);
- mutex_unlock(&dma_list_mutex);
- if (rc == -EAGAIN)
- goto idr_retry;
- else if (rc != 0)
- return rc;
- return 0;
+ rc = idr_alloc(&dma_idr, NULL, 0, 0, GFP_KERNEL);
+ if (rc >= 0)
+ device->dev_id = rc;
+
+ mutex_unlock(&dma_list_mutex);
+ return rc < 0 ? rc : 0;
}
/**
diff --git a/drivers/dma/dmatest.c b/drivers/dma/dmatest.c
index 64b048d7fba7..a2c8904b63ea 100644
--- a/drivers/dma/dmatest.c
+++ b/drivers/dma/dmatest.c
@@ -242,6 +242,13 @@ static inline void unmap_dst(struct device *dev, dma_addr_t *addr, size_t len,
dma_unmap_single(dev, addr[count], len, DMA_BIDIRECTIONAL);
}
+static unsigned int min_odd(unsigned int x, unsigned int y)
+{
+ unsigned int val = min(x, y);
+
+ return val % 2 ? val : val - 1;
+}
+
/*
* This function repeatedly tests DMA transfers of various lengths and
* offsets for a given operation type until it is told to exit by
@@ -262,6 +269,7 @@ static int dmatest_func(void *data)
struct dmatest_thread *thread = data;
struct dmatest_done done = { .wait = &done_wait };
struct dma_chan *chan;
+ struct dma_device *dev;
const char *thread_name;
unsigned int src_off, dst_off, len;
unsigned int error_count;
@@ -283,13 +291,16 @@ static int dmatest_func(void *data)
smp_rmb();
chan = thread->chan;
+ dev = chan->device;
if (thread->type == DMA_MEMCPY)
src_cnt = dst_cnt = 1;
else if (thread->type == DMA_XOR) {
- src_cnt = xor_sources | 1; /* force odd to ensure dst = src */
+ /* force odd to ensure dst = src */
+ src_cnt = min_odd(xor_sources | 1, dev->max_xor);
dst_cnt = 1;
} else if (thread->type == DMA_PQ) {
- src_cnt = pq_sources | 1; /* force odd to ensure dst = src */
+ /* force odd to ensure dst = src */
+ src_cnt = min_odd(pq_sources | 1, dma_maxpq(dev, 0));
dst_cnt = 2;
for (i = 0; i < src_cnt; i++)
pq_coefs[i] = 1;
@@ -327,7 +338,6 @@ static int dmatest_func(void *data)
while (!kthread_should_stop()
&& !(iterations && total_tests >= iterations)) {
- struct dma_device *dev = chan->device;
struct dma_async_tx_descriptor *tx = NULL;
dma_addr_t dma_srcs[src_cnt];
dma_addr_t dma_dsts[dst_cnt];
@@ -526,7 +536,9 @@ err_srcs:
thread_name, total_tests, failed_tests, ret);
/* terminate all transfers on specified channels */
- chan->device->device_control(chan, DMA_TERMINATE_ALL, 0);
+ if (ret)
+ dmaengine_terminate_all(chan);
+
if (iterations > 0)
while (!kthread_should_stop()) {
DECLARE_WAIT_QUEUE_HEAD_ONSTACK(wait_dmatest_exit);
@@ -551,7 +563,7 @@ static void dmatest_cleanup_channel(struct dmatest_chan *dtc)
}
/* terminate all transfers on specified channels */
- dtc->chan->device->device_control(dtc->chan, DMA_TERMINATE_ALL, 0);
+ dmaengine_terminate_all(dtc->chan);
kfree(dtc);
}
diff --git a/drivers/dma/dw_dmac.c b/drivers/dma/dw_dmac.c
index b33d1f6e1333..c599558faeda 100644
--- a/drivers/dma/dw_dmac.c
+++ b/drivers/dma/dw_dmac.c
@@ -1,6 +1,5 @@
/*
- * Driver for the Synopsys DesignWare DMA Controller (aka DMACA on
- * AVR32 systems.)
+ * Core driver for the Synopsys DesignWare DMA Controller
*
* Copyright (C) 2007-2008 Atmel Corporation
* Copyright (C) 2010-2011 ST Microelectronics
@@ -9,16 +8,19 @@
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
+
#include <linux/bitops.h>
#include <linux/clk.h>
#include <linux/delay.h>
#include <linux/dmaengine.h>
#include <linux/dma-mapping.h>
+#include <linux/dmapool.h>
#include <linux/err.h>
#include <linux/init.h>
#include <linux/interrupt.h>
#include <linux/io.h>
#include <linux/of.h>
+#include <linux/of_dma.h>
#include <linux/mm.h>
#include <linux/module.h>
#include <linux/platform_device.h>
@@ -47,15 +49,32 @@ static inline unsigned int dwc_get_sms(struct dw_dma_slave *slave)
return slave ? slave->src_master : 1;
}
+#define SRC_MASTER 0
+#define DST_MASTER 1
+
+static inline unsigned int dwc_get_master(struct dma_chan *chan, int master)
+{
+ struct dw_dma *dw = to_dw_dma(chan->device);
+ struct dw_dma_slave *dws = chan->private;
+ unsigned int m;
+
+ if (master == SRC_MASTER)
+ m = dwc_get_sms(dws);
+ else
+ m = dwc_get_dms(dws);
+
+ return min_t(unsigned int, dw->nr_masters - 1, m);
+}
+
#define DWC_DEFAULT_CTLLO(_chan) ({ \
- struct dw_dma_slave *__slave = (_chan->private); \
struct dw_dma_chan *_dwc = to_dw_dma_chan(_chan); \
struct dma_slave_config *_sconfig = &_dwc->dma_sconfig; \
- int _dms = dwc_get_dms(__slave); \
- int _sms = dwc_get_sms(__slave); \
- u8 _smsize = __slave ? _sconfig->src_maxburst : \
+ bool _is_slave = is_slave_direction(_dwc->direction); \
+ int _dms = dwc_get_master(_chan, DST_MASTER); \
+ int _sms = dwc_get_master(_chan, SRC_MASTER); \
+ u8 _smsize = _is_slave ? _sconfig->src_maxburst : \
DW_DMA_MSIZE_16; \
- u8 _dmsize = __slave ? _sconfig->dst_maxburst : \
+ u8 _dmsize = _is_slave ? _sconfig->dst_maxburst : \
DW_DMA_MSIZE_16; \
\
(DWC_CTLL_DST_MSIZE(_dmsize) \
@@ -73,15 +92,14 @@ static inline unsigned int dwc_get_sms(struct dw_dma_slave *slave)
*/
#define NR_DESCS_PER_CHANNEL 64
-/*----------------------------------------------------------------------*/
+static inline unsigned int dwc_get_data_width(struct dma_chan *chan, int master)
+{
+ struct dw_dma *dw = to_dw_dma(chan->device);
-/*
- * Because we're not relying on writeback from the controller (it may not
- * even be configured into the core!) we don't need to use dma_pool. These
- * descriptors -- and associated data -- are cacheable. We do need to make
- * sure their dcache entries are written back before handing them off to
- * the controller, though.
- */
+ return dw->data_width[dwc_get_master(chan, master)];
+}
+
+/*----------------------------------------------------------------------*/
static struct device *chan2dev(struct dma_chan *chan)
{
@@ -94,7 +112,7 @@ static struct device *chan2parent(struct dma_chan *chan)
static struct dw_desc *dwc_first_active(struct dw_dma_chan *dwc)
{
- return list_entry(dwc->active_list.next, struct dw_desc, desc_node);
+ return to_dw_desc(dwc->active_list.next);
}
static struct dw_desc *dwc_desc_get(struct dw_dma_chan *dwc)
@@ -121,19 +139,6 @@ static struct dw_desc *dwc_desc_get(struct dw_dma_chan *dwc)
return ret;
}
-static void dwc_sync_desc_for_cpu(struct dw_dma_chan *dwc, struct dw_desc *desc)
-{
- struct dw_desc *child;
-
- list_for_each_entry(child, &desc->tx_list, desc_node)
- dma_sync_single_for_cpu(chan2parent(&dwc->chan),
- child->txd.phys, sizeof(child->lli),
- DMA_TO_DEVICE);
- dma_sync_single_for_cpu(chan2parent(&dwc->chan),
- desc->txd.phys, sizeof(desc->lli),
- DMA_TO_DEVICE);
-}
-
/*
* Move a descriptor, including any children, to the free list.
* `desc' must not be on any lists.
@@ -145,8 +150,6 @@ static void dwc_desc_put(struct dw_dma_chan *dwc, struct dw_desc *desc)
if (desc) {
struct dw_desc *child;
- dwc_sync_desc_for_cpu(dwc, desc);
-
spin_lock_irqsave(&dwc->lock, flags);
list_for_each_entry(child, &desc->tx_list, desc_node)
dev_vdbg(chan2dev(&dwc->chan),
@@ -169,7 +172,13 @@ static void dwc_initialize(struct dw_dma_chan *dwc)
if (dwc->initialized == true)
return;
- if (dws) {
+ if (dws && dws->cfg_hi == ~0 && dws->cfg_lo == ~0) {
+ /* autoconfigure based on request line from DT */
+ if (dwc->direction == DMA_MEM_TO_DEV)
+ cfghi = DWC_CFGH_DST_PER(dwc->request_line);
+ else if (dwc->direction == DMA_DEV_TO_MEM)
+ cfghi = DWC_CFGH_SRC_PER(dwc->request_line);
+ } else if (dws) {
/*
* We need controller-specific data to set up slave
* transfers.
@@ -179,9 +188,9 @@ static void dwc_initialize(struct dw_dma_chan *dwc)
cfghi = dws->cfg_hi;
cfglo |= dws->cfg_lo & ~DWC_CFGL_CH_PRIOR_MASK;
} else {
- if (dwc->dma_sconfig.direction == DMA_MEM_TO_DEV)
+ if (dwc->direction == DMA_MEM_TO_DEV)
cfghi = DWC_CFGH_DST_PER(dwc->dma_sconfig.slave_id);
- else if (dwc->dma_sconfig.direction == DMA_DEV_TO_MEM)
+ else if (dwc->direction == DMA_DEV_TO_MEM)
cfghi = DWC_CFGH_SRC_PER(dwc->dma_sconfig.slave_id);
}
@@ -223,7 +232,6 @@ static inline void dwc_dump_chan_regs(struct dw_dma_chan *dwc)
channel_readl(dwc, CTL_LO));
}
-
static inline void dwc_chan_disable(struct dw_dma *dw, struct dw_dma_chan *dwc)
{
channel_clear_bit(dw, CH_EN, dwc->mask);
@@ -249,6 +257,9 @@ static inline void dwc_do_single_block(struct dw_dma_chan *dwc,
channel_writel(dwc, CTL_LO, ctllo);
channel_writel(dwc, CTL_HI, desc->lli.ctlhi);
channel_set_bit(dw, CH_EN, dwc->mask);
+
+ /* Move pointer to next descriptor */
+ dwc->tx_node_active = dwc->tx_node_active->next;
}
/* Called with dwc->lock held and bh disabled */
@@ -279,9 +290,10 @@ static void dwc_dostart(struct dw_dma_chan *dwc, struct dw_desc *first)
dwc_initialize(dwc);
- dwc->tx_list = &first->tx_list;
- dwc->tx_node_active = first->tx_list.next;
+ dwc->residue = first->total_len;
+ dwc->tx_node_active = &first->tx_list;
+ /* Submit first block */
dwc_do_single_block(dwc, first);
return;
@@ -317,8 +329,6 @@ dwc_descriptor_complete(struct dw_dma_chan *dwc, struct dw_desc *desc,
param = txd->callback_param;
}
- dwc_sync_desc_for_cpu(dwc, desc);
-
/* async_tx_ack */
list_for_each_entry(child, &desc->tx_list, desc_node)
async_tx_ack(&child->txd);
@@ -327,29 +337,29 @@ dwc_descriptor_complete(struct dw_dma_chan *dwc, struct dw_desc *desc,
list_splice_init(&desc->tx_list, &dwc->free_list);
list_move(&desc->desc_node, &dwc->free_list);
- if (!dwc->chan.private) {
+ if (!is_slave_direction(dwc->direction)) {
struct device *parent = chan2parent(&dwc->chan);
if (!(txd->flags & DMA_COMPL_SKIP_DEST_UNMAP)) {
if (txd->flags & DMA_COMPL_DEST_UNMAP_SINGLE)
dma_unmap_single(parent, desc->lli.dar,
- desc->len, DMA_FROM_DEVICE);
+ desc->total_len, DMA_FROM_DEVICE);
else
dma_unmap_page(parent, desc->lli.dar,
- desc->len, DMA_FROM_DEVICE);
+ desc->total_len, DMA_FROM_DEVICE);
}
if (!(txd->flags & DMA_COMPL_SKIP_SRC_UNMAP)) {
if (txd->flags & DMA_COMPL_SRC_UNMAP_SINGLE)
dma_unmap_single(parent, desc->lli.sar,
- desc->len, DMA_TO_DEVICE);
+ desc->total_len, DMA_TO_DEVICE);
else
dma_unmap_page(parent, desc->lli.sar,
- desc->len, DMA_TO_DEVICE);
+ desc->total_len, DMA_TO_DEVICE);
}
}
spin_unlock_irqrestore(&dwc->lock, flags);
- if (callback_required && callback)
+ if (callback)
callback(param);
}
@@ -384,6 +394,15 @@ static void dwc_complete_all(struct dw_dma *dw, struct dw_dma_chan *dwc)
dwc_descriptor_complete(dwc, desc, true);
}
+/* Returns how many bytes were already received from source */
+static inline u32 dwc_get_sent(struct dw_dma_chan *dwc)
+{
+ u32 ctlhi = channel_readl(dwc, CTL_HI);
+ u32 ctllo = channel_readl(dwc, CTL_LO);
+
+ return (ctlhi & DWC_CTLH_BLOCK_TS_MASK) * (1 << (ctllo >> 4 & 7));
+}
+
static void dwc_scan_descriptors(struct dw_dma *dw, struct dw_dma_chan *dwc)
{
dma_addr_t llp;
@@ -399,6 +418,39 @@ static void dwc_scan_descriptors(struct dw_dma *dw, struct dw_dma_chan *dwc)
if (status_xfer & dwc->mask) {
/* Everything we've submitted is done */
dma_writel(dw, CLEAR.XFER, dwc->mask);
+
+ if (test_bit(DW_DMA_IS_SOFT_LLP, &dwc->flags)) {
+ struct list_head *head, *active = dwc->tx_node_active;
+
+ /*
+ * We are inside first active descriptor.
+ * Otherwise something is really wrong.
+ */
+ desc = dwc_first_active(dwc);
+
+ head = &desc->tx_list;
+ if (active != head) {
+ /* Update desc to reflect last sent one */
+ if (active != head->next)
+ desc = to_dw_desc(active->prev);
+
+ dwc->residue -= desc->len;
+
+ child = to_dw_desc(active);
+
+ /* Submit next block */
+ dwc_do_single_block(dwc, child);
+
+ spin_unlock_irqrestore(&dwc->lock, flags);
+ return;
+ }
+
+ /* We are done here */
+ clear_bit(DW_DMA_IS_SOFT_LLP, &dwc->flags);
+ }
+
+ dwc->residue = 0;
+
spin_unlock_irqrestore(&dwc->lock, flags);
dwc_complete_all(dw, dwc);
@@ -406,6 +458,13 @@ static void dwc_scan_descriptors(struct dw_dma *dw, struct dw_dma_chan *dwc)
}
if (list_empty(&dwc->active_list)) {
+ dwc->residue = 0;
+ spin_unlock_irqrestore(&dwc->lock, flags);
+ return;
+ }
+
+ if (test_bit(DW_DMA_IS_SOFT_LLP, &dwc->flags)) {
+ dev_vdbg(chan2dev(&dwc->chan), "%s: soft LLP mode\n", __func__);
spin_unlock_irqrestore(&dwc->lock, flags);
return;
}
@@ -414,6 +473,9 @@ static void dwc_scan_descriptors(struct dw_dma *dw, struct dw_dma_chan *dwc)
(unsigned long long)llp);
list_for_each_entry_safe(desc, _desc, &dwc->active_list, desc_node) {
+ /* initial residue value */
+ dwc->residue = desc->total_len;
+
/* check first descriptors addr */
if (desc->txd.phys == llp) {
spin_unlock_irqrestore(&dwc->lock, flags);
@@ -423,16 +485,21 @@ static void dwc_scan_descriptors(struct dw_dma *dw, struct dw_dma_chan *dwc)
/* check first descriptors llp */
if (desc->lli.llp == llp) {
/* This one is currently in progress */
+ dwc->residue -= dwc_get_sent(dwc);
spin_unlock_irqrestore(&dwc->lock, flags);
return;
}
- list_for_each_entry(child, &desc->tx_list, desc_node)
+ dwc->residue -= desc->len;
+ list_for_each_entry(child, &desc->tx_list, desc_node) {
if (child->lli.llp == llp) {
/* Currently in progress */
+ dwc->residue -= dwc_get_sent(dwc);
spin_unlock_irqrestore(&dwc->lock, flags);
return;
}
+ dwc->residue -= child->len;
+ }
/*
* No descriptors so far seem to be in progress, i.e.
@@ -458,9 +525,8 @@ static void dwc_scan_descriptors(struct dw_dma *dw, struct dw_dma_chan *dwc)
static inline void dwc_dump_lli(struct dw_dma_chan *dwc, struct dw_lli *lli)
{
- dev_printk(KERN_CRIT, chan2dev(&dwc->chan),
- " desc: s0x%x d0x%x l0x%x c0x%x:%x\n",
- lli->sar, lli->dar, lli->llp, lli->ctlhi, lli->ctllo);
+ dev_crit(chan2dev(&dwc->chan), " desc: s0x%x d0x%x l0x%x c0x%x:%x\n",
+ lli->sar, lli->dar, lli->llp, lli->ctlhi, lli->ctllo);
}
static void dwc_handle_error(struct dw_dma *dw, struct dw_dma_chan *dwc)
@@ -488,16 +554,14 @@ static void dwc_handle_error(struct dw_dma *dw, struct dw_dma_chan *dwc)
dwc_dostart(dwc, dwc_first_active(dwc));
/*
- * KERN_CRITICAL may seem harsh, but since this only happens
+ * WARN may seem harsh, but since this only happens
* when someone submits a bad physical address in a
* descriptor, we should consider ourselves lucky that the
* controller flagged an error instead of scribbling over
* random memory locations.
*/
- dev_printk(KERN_CRIT, chan2dev(&dwc->chan),
- "Bad descriptor submitted for DMA!\n");
- dev_printk(KERN_CRIT, chan2dev(&dwc->chan),
- " cookie: %d\n", bad_desc->txd.cookie);
+ dev_WARN(chan2dev(&dwc->chan), "Bad descriptor submitted for DMA!\n"
+ " cookie: %d\n", bad_desc->txd.cookie);
dwc_dump_lli(dwc, &bad_desc->lli);
list_for_each_entry(child, &bad_desc->tx_list, desc_node)
dwc_dump_lli(dwc, &child->lli);
@@ -598,36 +662,8 @@ static void dw_dma_tasklet(unsigned long data)
dwc_handle_cyclic(dw, dwc, status_err, status_xfer);
else if (status_err & (1 << i))
dwc_handle_error(dw, dwc);
- else if (status_xfer & (1 << i)) {
- unsigned long flags;
-
- spin_lock_irqsave(&dwc->lock, flags);
- if (test_bit(DW_DMA_IS_SOFT_LLP, &dwc->flags)) {
- if (dwc->tx_node_active != dwc->tx_list) {
- struct dw_desc *desc =
- list_entry(dwc->tx_node_active,
- struct dw_desc,
- desc_node);
-
- dma_writel(dw, CLEAR.XFER, dwc->mask);
-
- /* move pointer to next descriptor */
- dwc->tx_node_active =
- dwc->tx_node_active->next;
-
- dwc_do_single_block(dwc, desc);
-
- spin_unlock_irqrestore(&dwc->lock, flags);
- continue;
- } else {
- /* we are done here */
- clear_bit(DW_DMA_IS_SOFT_LLP, &dwc->flags);
- }
- }
- spin_unlock_irqrestore(&dwc->lock, flags);
-
+ else if (status_xfer & (1 << i))
dwc_scan_descriptors(dw, dwc);
- }
}
/*
@@ -709,7 +745,6 @@ dwc_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dest, dma_addr_t src,
size_t len, unsigned long flags)
{
struct dw_dma_chan *dwc = to_dw_dma_chan(chan);
- struct dw_dma_slave *dws = chan->private;
struct dw_desc *desc;
struct dw_desc *first;
struct dw_desc *prev;
@@ -730,8 +765,10 @@ dwc_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dest, dma_addr_t src,
return NULL;
}
- data_width = min_t(unsigned int, dwc->dw->data_width[dwc_get_sms(dws)],
- dwc->dw->data_width[dwc_get_dms(dws)]);
+ dwc->direction = DMA_MEM_TO_MEM;
+
+ data_width = min_t(unsigned int, dwc_get_data_width(chan, SRC_MASTER),
+ dwc_get_data_width(chan, DST_MASTER));
src_width = dst_width = min_t(unsigned int, data_width,
dwc_fast_fls(src | dest | len));
@@ -756,32 +793,25 @@ dwc_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dest, dma_addr_t src,
desc->lli.dar = dest + offset;
desc->lli.ctllo = ctllo;
desc->lli.ctlhi = xfer_count;
+ desc->len = xfer_count << src_width;
if (!first) {
first = desc;
} else {
prev->lli.llp = desc->txd.phys;
- dma_sync_single_for_device(chan2parent(chan),
- prev->txd.phys, sizeof(prev->lli),
- DMA_TO_DEVICE);
list_add_tail(&desc->desc_node,
&first->tx_list);
}
prev = desc;
}
-
if (flags & DMA_PREP_INTERRUPT)
/* Trigger interrupt after last block */
prev->lli.ctllo |= DWC_CTLL_INT_EN;
prev->lli.llp = 0;
- dma_sync_single_for_device(chan2parent(chan),
- prev->txd.phys, sizeof(prev->lli),
- DMA_TO_DEVICE);
-
first->txd.flags = flags;
- first->len = len;
+ first->total_len = len;
return &first->txd;
@@ -796,7 +826,6 @@ dwc_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
unsigned long flags, void *context)
{
struct dw_dma_chan *dwc = to_dw_dma_chan(chan);
- struct dw_dma_slave *dws = chan->private;
struct dma_slave_config *sconfig = &dwc->dma_sconfig;
struct dw_desc *prev;
struct dw_desc *first;
@@ -811,9 +840,11 @@ dwc_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
dev_vdbg(chan2dev(chan), "%s\n", __func__);
- if (unlikely(!dws || !sg_len))
+ if (unlikely(!is_slave_direction(direction) || !sg_len))
return NULL;
+ dwc->direction = direction;
+
prev = first = NULL;
switch (direction) {
@@ -828,7 +859,7 @@ dwc_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
ctllo |= sconfig->device_fc ? DWC_CTLL_FC(DW_DMA_FC_P_M2P) :
DWC_CTLL_FC(DW_DMA_FC_D_M2P);
- data_width = dwc->dw->data_width[dwc_get_sms(dws)];
+ data_width = dwc_get_data_width(chan, SRC_MASTER);
for_each_sg(sgl, sg, sg_len, i) {
struct dw_desc *desc;
@@ -861,15 +892,12 @@ slave_sg_todev_fill_desc:
}
desc->lli.ctlhi = dlen >> mem_width;
+ desc->len = dlen;
if (!first) {
first = desc;
} else {
prev->lli.llp = desc->txd.phys;
- dma_sync_single_for_device(chan2parent(chan),
- prev->txd.phys,
- sizeof(prev->lli),
- DMA_TO_DEVICE);
list_add_tail(&desc->desc_node,
&first->tx_list);
}
@@ -891,7 +919,7 @@ slave_sg_todev_fill_desc:
ctllo |= sconfig->device_fc ? DWC_CTLL_FC(DW_DMA_FC_P_P2M) :
DWC_CTLL_FC(DW_DMA_FC_D_P2M);
- data_width = dwc->dw->data_width[dwc_get_dms(dws)];
+ data_width = dwc_get_data_width(chan, DST_MASTER);
for_each_sg(sgl, sg, sg_len, i) {
struct dw_desc *desc;
@@ -923,15 +951,12 @@ slave_sg_fromdev_fill_desc:
len = 0;
}
desc->lli.ctlhi = dlen >> reg_width;
+ desc->len = dlen;
if (!first) {
first = desc;
} else {
prev->lli.llp = desc->txd.phys;
- dma_sync_single_for_device(chan2parent(chan),
- prev->txd.phys,
- sizeof(prev->lli),
- DMA_TO_DEVICE);
list_add_tail(&desc->desc_node,
&first->tx_list);
}
@@ -951,11 +976,7 @@ slave_sg_fromdev_fill_desc:
prev->lli.ctllo |= DWC_CTLL_INT_EN;
prev->lli.llp = 0;
- dma_sync_single_for_device(chan2parent(chan),
- prev->txd.phys, sizeof(prev->lli),
- DMA_TO_DEVICE);
-
- first->len = total_len;
+ first->total_len = total_len;
return &first->txd;
@@ -985,11 +1006,12 @@ set_runtime_config(struct dma_chan *chan, struct dma_slave_config *sconfig)
{
struct dw_dma_chan *dwc = to_dw_dma_chan(chan);
- /* Check if it is chan is configured for slave transfers */
- if (!chan->private)
+ /* Check if chan will be configured for slave transfers */
+ if (!is_slave_direction(sconfig->direction))
return -EINVAL;
memcpy(&dwc->dma_sconfig, sconfig, sizeof(*sconfig));
+ dwc->direction = sconfig->direction;
convert_burst(&dwc->dma_sconfig.src_maxburst);
convert_burst(&dwc->dma_sconfig.dst_maxburst);
@@ -997,6 +1019,26 @@ set_runtime_config(struct dma_chan *chan, struct dma_slave_config *sconfig)
return 0;
}
+static inline void dwc_chan_pause(struct dw_dma_chan *dwc)
+{
+ u32 cfglo = channel_readl(dwc, CFG_LO);
+
+ channel_writel(dwc, CFG_LO, cfglo | DWC_CFGL_CH_SUSP);
+ while (!(channel_readl(dwc, CFG_LO) & DWC_CFGL_FIFO_EMPTY))
+ cpu_relax();
+
+ dwc->paused = true;
+}
+
+static inline void dwc_chan_resume(struct dw_dma_chan *dwc)
+{
+ u32 cfglo = channel_readl(dwc, CFG_LO);
+
+ channel_writel(dwc, CFG_LO, cfglo & ~DWC_CFGL_CH_SUSP);
+
+ dwc->paused = false;
+}
+
static int dwc_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd,
unsigned long arg)
{
@@ -1004,18 +1046,13 @@ static int dwc_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd,
struct dw_dma *dw = to_dw_dma(chan->device);
struct dw_desc *desc, *_desc;
unsigned long flags;
- u32 cfglo;
LIST_HEAD(list);
if (cmd == DMA_PAUSE) {
spin_lock_irqsave(&dwc->lock, flags);
- cfglo = channel_readl(dwc, CFG_LO);
- channel_writel(dwc, CFG_LO, cfglo | DWC_CFGL_CH_SUSP);
- while (!(channel_readl(dwc, CFG_LO) & DWC_CFGL_FIFO_EMPTY))
- cpu_relax();
+ dwc_chan_pause(dwc);
- dwc->paused = true;
spin_unlock_irqrestore(&dwc->lock, flags);
} else if (cmd == DMA_RESUME) {
if (!dwc->paused)
@@ -1023,9 +1060,7 @@ static int dwc_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd,
spin_lock_irqsave(&dwc->lock, flags);
- cfglo = channel_readl(dwc, CFG_LO);
- channel_writel(dwc, CFG_LO, cfglo & ~DWC_CFGL_CH_SUSP);
- dwc->paused = false;
+ dwc_chan_resume(dwc);
spin_unlock_irqrestore(&dwc->lock, flags);
} else if (cmd == DMA_TERMINATE_ALL) {
@@ -1035,7 +1070,7 @@ static int dwc_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd,
dwc_chan_disable(dw, dwc);
- dwc->paused = false;
+ dwc_chan_resume(dwc);
/* active_list entries will end up before queued entries */
list_splice_init(&dwc->queue, &list);
@@ -1055,6 +1090,21 @@ static int dwc_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd,
return 0;
}
+static inline u32 dwc_get_residue(struct dw_dma_chan *dwc)
+{
+ unsigned long flags;
+ u32 residue;
+
+ spin_lock_irqsave(&dwc->lock, flags);
+
+ residue = dwc->residue;
+ if (test_bit(DW_DMA_IS_SOFT_LLP, &dwc->flags) && residue)
+ residue -= dwc_get_sent(dwc);
+
+ spin_unlock_irqrestore(&dwc->lock, flags);
+ return residue;
+}
+
static enum dma_status
dwc_tx_status(struct dma_chan *chan,
dma_cookie_t cookie,
@@ -1071,7 +1121,7 @@ dwc_tx_status(struct dma_chan *chan,
}
if (ret != DMA_SUCCESS)
- dma_set_residue(txstate, dwc_first_active(dwc)->len);
+ dma_set_residue(txstate, dwc_get_residue(dwc));
if (dwc->paused)
return DMA_PAUSED;
@@ -1114,22 +1164,22 @@ static int dwc_alloc_chan_resources(struct dma_chan *chan)
spin_lock_irqsave(&dwc->lock, flags);
i = dwc->descs_allocated;
while (dwc->descs_allocated < NR_DESCS_PER_CHANNEL) {
+ dma_addr_t phys;
+
spin_unlock_irqrestore(&dwc->lock, flags);
- desc = kzalloc(sizeof(struct dw_desc), GFP_KERNEL);
- if (!desc) {
- dev_info(chan2dev(chan),
- "only allocated %d descriptors\n", i);
- spin_lock_irqsave(&dwc->lock, flags);
- break;
- }
+ desc = dma_pool_alloc(dw->desc_pool, GFP_ATOMIC, &phys);
+ if (!desc)
+ goto err_desc_alloc;
+
+ memset(desc, 0, sizeof(struct dw_desc));
INIT_LIST_HEAD(&desc->tx_list);
dma_async_tx_descriptor_init(&desc->txd, chan);
desc->txd.tx_submit = dwc_tx_submit;
desc->txd.flags = DMA_CTRL_ACK;
- desc->txd.phys = dma_map_single(chan2parent(chan), &desc->lli,
- sizeof(desc->lli), DMA_TO_DEVICE);
+ desc->txd.phys = phys;
+
dwc_desc_put(dwc, desc);
spin_lock_irqsave(&dwc->lock, flags);
@@ -1141,6 +1191,11 @@ static int dwc_alloc_chan_resources(struct dma_chan *chan)
dev_dbg(chan2dev(chan), "%s: allocated %d descriptors\n", __func__, i);
return i;
+
+err_desc_alloc:
+ dev_info(chan2dev(chan), "only allocated %d descriptors\n", i);
+
+ return i;
}
static void dwc_free_chan_resources(struct dma_chan *chan)
@@ -1172,14 +1227,71 @@ static void dwc_free_chan_resources(struct dma_chan *chan)
list_for_each_entry_safe(desc, _desc, &list, desc_node) {
dev_vdbg(chan2dev(chan), " freeing descriptor %p\n", desc);
- dma_unmap_single(chan2parent(chan), desc->txd.phys,
- sizeof(desc->lli), DMA_TO_DEVICE);
- kfree(desc);
+ dma_pool_free(dw->desc_pool, desc, desc->txd.phys);
}
dev_vdbg(chan2dev(chan), "%s: done\n", __func__);
}
+struct dw_dma_filter_args {
+ struct dw_dma *dw;
+ unsigned int req;
+ unsigned int src;
+ unsigned int dst;
+};
+
+static bool dw_dma_generic_filter(struct dma_chan *chan, void *param)
+{
+ struct dw_dma_chan *dwc = to_dw_dma_chan(chan);
+ struct dw_dma *dw = to_dw_dma(chan->device);
+ struct dw_dma_filter_args *fargs = param;
+ struct dw_dma_slave *dws = &dwc->slave;
+
+ /* ensure the device matches our channel */
+ if (chan->device != &fargs->dw->dma)
+ return false;
+
+ dws->dma_dev = dw->dma.dev;
+ dws->cfg_hi = ~0;
+ dws->cfg_lo = ~0;
+ dws->src_master = fargs->src;
+ dws->dst_master = fargs->dst;
+
+ dwc->request_line = fargs->req;
+
+ chan->private = dws;
+
+ return true;
+}
+
+static struct dma_chan *dw_dma_xlate(struct of_phandle_args *dma_spec,
+ struct of_dma *ofdma)
+{
+ struct dw_dma *dw = ofdma->of_dma_data;
+ struct dw_dma_filter_args fargs = {
+ .dw = dw,
+ };
+ dma_cap_mask_t cap;
+
+ if (dma_spec->args_count != 3)
+ return NULL;
+
+ fargs.req = be32_to_cpup(dma_spec->args+0);
+ fargs.src = be32_to_cpup(dma_spec->args+1);
+ fargs.dst = be32_to_cpup(dma_spec->args+2);
+
+ if (WARN_ON(fargs.req >= DW_DMA_MAX_NR_REQUESTS ||
+ fargs.src >= dw->nr_masters ||
+ fargs.dst >= dw->nr_masters))
+ return NULL;
+
+ dma_cap_zero(cap);
+ dma_cap_set(DMA_SLAVE, cap);
+
+ /* TODO: there should be a simpler way to do this */
+ return dma_request_channel(cap, dw_dma_generic_filter, &fargs);
+}
+
/* --------------------- Cyclic DMA API extensions -------------------- */
/**
@@ -1299,6 +1411,11 @@ struct dw_cyclic_desc *dw_dma_cyclic_prep(struct dma_chan *chan,
retval = ERR_PTR(-EINVAL);
+ if (unlikely(!is_slave_direction(direction)))
+ goto out_err;
+
+ dwc->direction = direction;
+
if (direction == DMA_MEM_TO_DEV)
reg_width = __ffs(sconfig->dst_addr_width);
else
@@ -1313,8 +1430,6 @@ struct dw_cyclic_desc *dw_dma_cyclic_prep(struct dma_chan *chan,
goto out_err;
if (unlikely(buf_addr & ((1 << reg_width) - 1)))
goto out_err;
- if (unlikely(!(direction & (DMA_MEM_TO_DEV | DMA_DEV_TO_MEM))))
- goto out_err;
retval = ERR_PTR(-ENOMEM);
@@ -1372,20 +1487,14 @@ struct dw_cyclic_desc *dw_dma_cyclic_prep(struct dma_chan *chan,
desc->lli.ctlhi = (period_len >> reg_width);
cdesc->desc[i] = desc;
- if (last) {
+ if (last)
last->lli.llp = desc->txd.phys;
- dma_sync_single_for_device(chan2parent(chan),
- last->txd.phys, sizeof(last->lli),
- DMA_TO_DEVICE);
- }
last = desc;
}
/* lets make a cyclic list */
last->lli.llp = cdesc->desc[0]->txd.phys;
- dma_sync_single_for_device(chan2parent(chan), last->txd.phys,
- sizeof(last->lli), DMA_TO_DEVICE);
dev_dbg(chan2dev(&dwc->chan), "cyclic prepared buf 0x%llx len %zu "
"period %zu periods %d\n", (unsigned long long)buf_addr,
@@ -1463,6 +1572,60 @@ static void dw_dma_off(struct dw_dma *dw)
dw->chan[i].initialized = false;
}
+#ifdef CONFIG_OF
+static struct dw_dma_platform_data *
+dw_dma_parse_dt(struct platform_device *pdev)
+{
+ struct device_node *np = pdev->dev.of_node;
+ struct dw_dma_platform_data *pdata;
+ u32 tmp, arr[4];
+
+ if (!np) {
+ dev_err(&pdev->dev, "Missing DT data\n");
+ return NULL;
+ }
+
+ pdata = devm_kzalloc(&pdev->dev, sizeof(*pdata), GFP_KERNEL);
+ if (!pdata)
+ return NULL;
+
+ if (of_property_read_u32(np, "dma-channels", &pdata->nr_channels))
+ return NULL;
+
+ if (of_property_read_bool(np, "is_private"))
+ pdata->is_private = true;
+
+ if (!of_property_read_u32(np, "chan_allocation_order", &tmp))
+ pdata->chan_allocation_order = (unsigned char)tmp;
+
+ if (!of_property_read_u32(np, "chan_priority", &tmp))
+ pdata->chan_priority = tmp;
+
+ if (!of_property_read_u32(np, "block_size", &tmp))
+ pdata->block_size = tmp;
+
+ if (!of_property_read_u32(np, "dma-masters", &tmp)) {
+ if (tmp > 4)
+ return NULL;
+
+ pdata->nr_masters = tmp;
+ }
+
+ if (!of_property_read_u32_array(np, "data_width", arr,
+ pdata->nr_masters))
+ for (tmp = 0; tmp < pdata->nr_masters; tmp++)
+ pdata->data_width[tmp] = arr[tmp];
+
+ return pdata;
+}
+#else
+static inline struct dw_dma_platform_data *
+dw_dma_parse_dt(struct platform_device *pdev)
+{
+ return NULL;
+}
+#endif
+
static int dw_probe(struct platform_device *pdev)
{
struct dw_dma_platform_data *pdata;
@@ -1478,10 +1641,6 @@ static int dw_probe(struct platform_device *pdev)
int err;
int i;
- pdata = dev_get_platdata(&pdev->dev);
- if (!pdata || pdata->nr_channels > DW_DMA_MAX_NR_CHANNELS)
- return -EINVAL;
-
io = platform_get_resource(pdev, IORESOURCE_MEM, 0);
if (!io)
return -EINVAL;
@@ -1494,9 +1653,33 @@ static int dw_probe(struct platform_device *pdev)
if (IS_ERR(regs))
return PTR_ERR(regs);
+ /* Apply default dma_mask if needed */
+ if (!pdev->dev.dma_mask) {
+ pdev->dev.dma_mask = &pdev->dev.coherent_dma_mask;
+ pdev->dev.coherent_dma_mask = DMA_BIT_MASK(32);
+ }
+
dw_params = dma_read_byaddr(regs, DW_PARAMS);
autocfg = dw_params >> DW_PARAMS_EN & 0x1;
+ dev_dbg(&pdev->dev, "DW_PARAMS: 0x%08x\n", dw_params);
+
+ pdata = dev_get_platdata(&pdev->dev);
+ if (!pdata)
+ pdata = dw_dma_parse_dt(pdev);
+
+ if (!pdata && autocfg) {
+ pdata = devm_kzalloc(&pdev->dev, sizeof(*pdata), GFP_KERNEL);
+ if (!pdata)
+ return -ENOMEM;
+
+ /* Fill platform data with the default values */
+ pdata->is_private = true;
+ pdata->chan_allocation_order = CHAN_ALLOCATION_ASCENDING;
+ pdata->chan_priority = CHAN_PRIORITY_ASCENDING;
+ } else if (!pdata || pdata->nr_channels > DW_DMA_MAX_NR_CHANNELS)
+ return -EINVAL;
+
if (autocfg)
nr_channels = (dw_params >> DW_PARAMS_NR_CHAN & 0x7) + 1;
else
@@ -1544,6 +1727,14 @@ static int dw_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, dw);
+ /* create a pool of consistent memory blocks for hardware descriptors */
+ dw->desc_pool = dmam_pool_create("dw_dmac_desc_pool", &pdev->dev,
+ sizeof(struct dw_desc), 4, 0);
+ if (!dw->desc_pool) {
+ dev_err(&pdev->dev, "No memory for descriptors dma pool\n");
+ return -ENOMEM;
+ }
+
tasklet_init(&dw->tasklet, dw_dma_tasklet, (unsigned long)dw);
INIT_LIST_HEAD(&dw->dma.channels);
@@ -1575,7 +1766,7 @@ static int dw_probe(struct platform_device *pdev)
channel_clear_bit(dw, CH_EN, dwc->mask);
- dwc->dw = dw;
+ dwc->direction = DMA_TRANS_NONE;
/* hardware configuration */
if (autocfg) {
@@ -1584,6 +1775,9 @@ static int dw_probe(struct platform_device *pdev)
dwc_params = dma_read_byaddr(regs + r * sizeof(u32),
DWC_PARAMS);
+ dev_dbg(&pdev->dev, "DWC_PARAMS[%d]: 0x%08x\n", i,
+ dwc_params);
+
/* Decode maximum block size for given channel. The
* stored 4 bit value represents blocks from 0x00 for 3
* up to 0x0a for 4095. */
@@ -1627,11 +1821,19 @@ static int dw_probe(struct platform_device *pdev)
dma_writel(dw, CFG, DW_CFG_DMA_EN);
- printk(KERN_INFO "%s: DesignWare DMA Controller, %d channels\n",
- dev_name(&pdev->dev), nr_channels);
+ dev_info(&pdev->dev, "DesignWare DMA Controller, %d channels\n",
+ nr_channels);
dma_async_device_register(&dw->dma);
+ if (pdev->dev.of_node) {
+ err = of_dma_controller_register(pdev->dev.of_node,
+ dw_dma_xlate, dw);
+ if (err && err != -ENODEV)
+ dev_err(&pdev->dev,
+ "could not register of_dma_controller\n");
+ }
+
return 0;
}
@@ -1640,6 +1842,8 @@ static int dw_remove(struct platform_device *pdev)
struct dw_dma *dw = platform_get_drvdata(pdev);
struct dw_dma_chan *dwc, *_dwc;
+ if (pdev->dev.of_node)
+ of_dma_controller_free(pdev->dev.of_node);
dw_dma_off(dw);
dma_async_device_unregister(&dw->dma);
@@ -1658,7 +1862,7 @@ static void dw_shutdown(struct platform_device *pdev)
{
struct dw_dma *dw = platform_get_drvdata(pdev);
- dw_dma_off(platform_get_drvdata(pdev));
+ dw_dma_off(dw);
clk_disable_unprepare(dw->clk);
}
@@ -1667,7 +1871,7 @@ static int dw_suspend_noirq(struct device *dev)
struct platform_device *pdev = to_platform_device(dev);
struct dw_dma *dw = platform_get_drvdata(pdev);
- dw_dma_off(platform_get_drvdata(pdev));
+ dw_dma_off(dw);
clk_disable_unprepare(dw->clk);
return 0;
@@ -1680,6 +1884,7 @@ static int dw_resume_noirq(struct device *dev)
clk_prepare_enable(dw->clk);
dma_writel(dw, CFG, DW_CFG_DMA_EN);
+
return 0;
}
@@ -1700,7 +1905,13 @@ static const struct of_device_id dw_dma_id_table[] = {
MODULE_DEVICE_TABLE(of, dw_dma_id_table);
#endif
+static const struct platform_device_id dw_dma_ids[] = {
+ { "INTL9C60", 0 },
+ { }
+};
+
static struct platform_driver dw_driver = {
+ .probe = dw_probe,
.remove = dw_remove,
.shutdown = dw_shutdown,
.driver = {
@@ -1708,11 +1919,12 @@ static struct platform_driver dw_driver = {
.pm = &dw_dev_pm_ops,
.of_match_table = of_match_ptr(dw_dma_id_table),
},
+ .id_table = dw_dma_ids,
};
static int __init dw_init(void)
{
- return platform_driver_probe(&dw_driver, dw_probe);
+ return platform_driver_register(&dw_driver);
}
subsys_initcall(dw_init);
diff --git a/drivers/dma/dw_dmac_regs.h b/drivers/dma/dw_dmac_regs.h
index 88965597b7d0..cf0ce5c77d60 100644
--- a/drivers/dma/dw_dmac_regs.h
+++ b/drivers/dma/dw_dmac_regs.h
@@ -9,9 +9,11 @@
* published by the Free Software Foundation.
*/
+#include <linux/dmaengine.h>
#include <linux/dw_dmac.h>
#define DW_DMA_MAX_NR_CHANNELS 8
+#define DW_DMA_MAX_NR_REQUESTS 16
/* flow controller */
enum dw_dma_fc {
@@ -184,15 +186,15 @@ enum dw_dmac_flags {
};
struct dw_dma_chan {
- struct dma_chan chan;
- void __iomem *ch_regs;
- u8 mask;
- u8 priority;
- bool paused;
- bool initialized;
+ struct dma_chan chan;
+ void __iomem *ch_regs;
+ u8 mask;
+ u8 priority;
+ enum dma_transfer_direction direction;
+ bool paused;
+ bool initialized;
/* software emulation of the LLP transfers */
- struct list_head *tx_list;
struct list_head *tx_node_active;
spinlock_t lock;
@@ -202,6 +204,7 @@ struct dw_dma_chan {
struct list_head active_list;
struct list_head queue;
struct list_head free_list;
+ u32 residue;
struct dw_cyclic_desc *cdesc;
unsigned int descs_allocated;
@@ -209,12 +212,11 @@ struct dw_dma_chan {
/* hardware configuration */
unsigned int block_size;
bool nollp;
+ unsigned int request_line;
+ struct dw_dma_slave slave;
/* configuration passed via DMA_SLAVE_CONFIG */
struct dma_slave_config dma_sconfig;
-
- /* backlink to dw_dma */
- struct dw_dma *dw;
};
static inline struct dw_dma_chan_regs __iomem *
@@ -236,6 +238,7 @@ static inline struct dw_dma_chan *to_dw_dma_chan(struct dma_chan *chan)
struct dw_dma {
struct dma_device dma;
void __iomem *regs;
+ struct dma_pool *desc_pool;
struct tasklet_struct tasklet;
struct clk *clk;
@@ -293,8 +296,11 @@ struct dw_desc {
struct list_head tx_list;
struct dma_async_tx_descriptor txd;
size_t len;
+ size_t total_len;
};
+#define to_dw_desc(h) list_entry(h, struct dw_desc, desc_node)
+
static inline struct dw_desc *
txd_to_dw_desc(struct dma_async_tx_descriptor *txd)
{
diff --git a/drivers/dma/edma.c b/drivers/dma/edma.c
index f424298f1ac5..cd7e3280fadd 100644
--- a/drivers/dma/edma.c
+++ b/drivers/dma/edma.c
@@ -69,9 +69,7 @@ struct edma_chan {
int ch_num;
bool alloced;
int slot[EDMA_MAX_SLOTS];
- dma_addr_t addr;
- int addr_width;
- int maxburst;
+ struct dma_slave_config cfg;
};
struct edma_cc {
@@ -178,29 +176,14 @@ static int edma_terminate_all(struct edma_chan *echan)
return 0;
}
-
static int edma_slave_config(struct edma_chan *echan,
- struct dma_slave_config *config)
+ struct dma_slave_config *cfg)
{
- if ((config->src_addr_width > DMA_SLAVE_BUSWIDTH_4_BYTES) ||
- (config->dst_addr_width > DMA_SLAVE_BUSWIDTH_4_BYTES))
+ if (cfg->src_addr_width == DMA_SLAVE_BUSWIDTH_8_BYTES ||
+ cfg->dst_addr_width == DMA_SLAVE_BUSWIDTH_8_BYTES)
return -EINVAL;
- if (config->direction == DMA_MEM_TO_DEV) {
- if (config->dst_addr)
- echan->addr = config->dst_addr;
- if (config->dst_addr_width)
- echan->addr_width = config->dst_addr_width;
- if (config->dst_maxburst)
- echan->maxburst = config->dst_maxburst;
- } else if (config->direction == DMA_DEV_TO_MEM) {
- if (config->src_addr)
- echan->addr = config->src_addr;
- if (config->src_addr_width)
- echan->addr_width = config->src_addr_width;
- if (config->src_maxburst)
- echan->maxburst = config->src_maxburst;
- }
+ memcpy(&echan->cfg, cfg, sizeof(echan->cfg));
return 0;
}
@@ -235,6 +218,9 @@ static struct dma_async_tx_descriptor *edma_prep_slave_sg(
struct edma_chan *echan = to_edma_chan(chan);
struct device *dev = chan->device->dev;
struct edma_desc *edesc;
+ dma_addr_t dev_addr;
+ enum dma_slave_buswidth dev_width;
+ u32 burst;
struct scatterlist *sg;
int i;
int acnt, bcnt, ccnt, src, dst, cidx;
@@ -243,7 +229,20 @@ static struct dma_async_tx_descriptor *edma_prep_slave_sg(
if (unlikely(!echan || !sgl || !sg_len))
return NULL;
- if (echan->addr_width == DMA_SLAVE_BUSWIDTH_UNDEFINED) {
+ if (direction == DMA_DEV_TO_MEM) {
+ dev_addr = echan->cfg.src_addr;
+ dev_width = echan->cfg.src_addr_width;
+ burst = echan->cfg.src_maxburst;
+ } else if (direction == DMA_MEM_TO_DEV) {
+ dev_addr = echan->cfg.dst_addr;
+ dev_width = echan->cfg.dst_addr_width;
+ burst = echan->cfg.dst_maxburst;
+ } else {
+ dev_err(dev, "%s: bad direction?\n", __func__);
+ return NULL;
+ }
+
+ if (dev_width == DMA_SLAVE_BUSWIDTH_UNDEFINED) {
dev_err(dev, "Undefined slave buswidth\n");
return NULL;
}
@@ -275,14 +274,14 @@ static struct dma_async_tx_descriptor *edma_prep_slave_sg(
}
}
- acnt = echan->addr_width;
+ acnt = dev_width;
/*
* If the maxburst is equal to the fifo width, use
* A-synced transfers. This allows for large contiguous
* buffer transfers using only one PaRAM set.
*/
- if (echan->maxburst == 1) {
+ if (burst == 1) {
edesc->absync = false;
ccnt = sg_dma_len(sg) / acnt / (SZ_64K - 1);
bcnt = sg_dma_len(sg) / acnt - ccnt * (SZ_64K - 1);
@@ -302,7 +301,7 @@ static struct dma_async_tx_descriptor *edma_prep_slave_sg(
*/
} else {
edesc->absync = true;
- bcnt = echan->maxburst;
+ bcnt = burst;
ccnt = sg_dma_len(sg) / (acnt * bcnt);
if (ccnt > (SZ_64K - 1)) {
dev_err(dev, "Exceeded max SG segment size\n");
@@ -313,13 +312,13 @@ static struct dma_async_tx_descriptor *edma_prep_slave_sg(
if (direction == DMA_MEM_TO_DEV) {
src = sg_dma_address(sg);
- dst = echan->addr;
+ dst = dev_addr;
src_bidx = acnt;
src_cidx = cidx;
dst_bidx = 0;
dst_cidx = 0;
} else {
- src = echan->addr;
+ src = dev_addr;
dst = sg_dma_address(sg);
src_bidx = 0;
src_cidx = 0;
@@ -621,13 +620,11 @@ static struct platform_device *pdev0, *pdev1;
static const struct platform_device_info edma_dev_info0 = {
.name = "edma-dma-engine",
.id = 0,
- .dma_mask = DMA_BIT_MASK(32),
};
static const struct platform_device_info edma_dev_info1 = {
.name = "edma-dma-engine",
.id = 1,
- .dma_mask = DMA_BIT_MASK(32),
};
static int edma_init(void)
@@ -641,6 +638,8 @@ static int edma_init(void)
ret = PTR_ERR(pdev0);
goto out;
}
+ pdev0->dev.dma_mask = &pdev0->dev.coherent_dma_mask;
+ pdev0->dev.coherent_dma_mask = DMA_BIT_MASK(32);
}
if (EDMA_CTLRS == 2) {
@@ -650,6 +649,8 @@ static int edma_init(void)
platform_device_unregister(pdev0);
ret = PTR_ERR(pdev1);
}
+ pdev1->dev.dma_mask = &pdev1->dev.coherent_dma_mask;
+ pdev1->dev.coherent_dma_mask = DMA_BIT_MASK(32);
}
out:
diff --git a/drivers/dma/ep93xx_dma.c b/drivers/dma/ep93xx_dma.c
index bcfde400904f..f2bf8c0c4675 100644
--- a/drivers/dma/ep93xx_dma.c
+++ b/drivers/dma/ep93xx_dma.c
@@ -903,8 +903,7 @@ static int ep93xx_dma_alloc_chan_resources(struct dma_chan *chan)
switch (data->port) {
case EP93XX_DMA_SSP:
case EP93XX_DMA_IDE:
- if (data->direction != DMA_MEM_TO_DEV &&
- data->direction != DMA_DEV_TO_MEM)
+ if (!is_slave_direction(data->direction))
return -EINVAL;
break;
default:
diff --git a/drivers/dma/ioat/dma.c b/drivers/dma/ioat/dma.c
index 1a68a8ba87e6..1879a5942bfc 100644
--- a/drivers/dma/ioat/dma.c
+++ b/drivers/dma/ioat/dma.c
@@ -833,14 +833,14 @@ int ioat_dma_self_test(struct ioatdma_device *device)
dma_src = dma_map_single(dev, src, IOAT_TEST_SIZE, DMA_TO_DEVICE);
dma_dest = dma_map_single(dev, dest, IOAT_TEST_SIZE, DMA_FROM_DEVICE);
- flags = DMA_COMPL_SRC_UNMAP_SINGLE | DMA_COMPL_DEST_UNMAP_SINGLE |
+ flags = DMA_COMPL_SKIP_SRC_UNMAP | DMA_COMPL_SKIP_DEST_UNMAP |
DMA_PREP_INTERRUPT;
tx = device->common.device_prep_dma_memcpy(dma_chan, dma_dest, dma_src,
IOAT_TEST_SIZE, flags);
if (!tx) {
dev_err(dev, "Self-test prep failed, disabling\n");
err = -ENODEV;
- goto free_resources;
+ goto unmap_dma;
}
async_tx_ack(tx);
@@ -851,7 +851,7 @@ int ioat_dma_self_test(struct ioatdma_device *device)
if (cookie < 0) {
dev_err(dev, "Self-test setup failed, disabling\n");
err = -ENODEV;
- goto free_resources;
+ goto unmap_dma;
}
dma->device_issue_pending(dma_chan);
@@ -862,7 +862,7 @@ int ioat_dma_self_test(struct ioatdma_device *device)
!= DMA_SUCCESS) {
dev_err(dev, "Self-test copy timed out, disabling\n");
err = -ENODEV;
- goto free_resources;
+ goto unmap_dma;
}
if (memcmp(src, dest, IOAT_TEST_SIZE)) {
dev_err(dev, "Self-test copy failed compare, disabling\n");
@@ -870,6 +870,9 @@ int ioat_dma_self_test(struct ioatdma_device *device)
goto free_resources;
}
+unmap_dma:
+ dma_unmap_single(dev, dma_src, IOAT_TEST_SIZE, DMA_TO_DEVICE);
+ dma_unmap_single(dev, dma_dest, IOAT_TEST_SIZE, DMA_FROM_DEVICE);
free_resources:
dma->device_free_chan_resources(dma_chan);
out:
diff --git a/drivers/dma/ioat/dma.h b/drivers/dma/ioat/dma.h
index 087935f1565f..53a4cbb78f47 100644
--- a/drivers/dma/ioat/dma.h
+++ b/drivers/dma/ioat/dma.h
@@ -97,6 +97,7 @@ struct ioat_chan_common {
#define IOAT_KOBJ_INIT_FAIL 3
#define IOAT_RESHAPE_PENDING 4
#define IOAT_RUN 5
+ #define IOAT_CHAN_ACTIVE 6
struct timer_list timer;
#define COMPLETION_TIMEOUT msecs_to_jiffies(100)
#define IDLE_TIMEOUT msecs_to_jiffies(2000)
diff --git a/drivers/dma/ioat/dma_v2.c b/drivers/dma/ioat/dma_v2.c
index 82d4e306c32e..b925e1b1d139 100644
--- a/drivers/dma/ioat/dma_v2.c
+++ b/drivers/dma/ioat/dma_v2.c
@@ -269,61 +269,22 @@ static void ioat2_restart_channel(struct ioat2_dma_chan *ioat)
__ioat2_restart_chan(ioat);
}
-void ioat2_timer_event(unsigned long data)
+static void check_active(struct ioat2_dma_chan *ioat)
{
- struct ioat2_dma_chan *ioat = to_ioat2_chan((void *) data);
struct ioat_chan_common *chan = &ioat->base;
- if (test_bit(IOAT_COMPLETION_PENDING, &chan->state)) {
- dma_addr_t phys_complete;
- u64 status;
-
- status = ioat_chansts(chan);
-
- /* when halted due to errors check for channel
- * programming errors before advancing the completion state
- */
- if (is_ioat_halted(status)) {
- u32 chanerr;
-
- chanerr = readl(chan->reg_base + IOAT_CHANERR_OFFSET);
- dev_err(to_dev(chan), "%s: Channel halted (%x)\n",
- __func__, chanerr);
- if (test_bit(IOAT_RUN, &chan->state))
- BUG_ON(is_ioat_bug(chanerr));
- else /* we never got off the ground */
- return;
- }
-
- /* if we haven't made progress and we have already
- * acknowledged a pending completion once, then be more
- * forceful with a restart
- */
- spin_lock_bh(&chan->cleanup_lock);
- if (ioat_cleanup_preamble(chan, &phys_complete)) {
- __cleanup(ioat, phys_complete);
- } else if (test_bit(IOAT_COMPLETION_ACK, &chan->state)) {
- spin_lock_bh(&ioat->prep_lock);
- ioat2_restart_channel(ioat);
- spin_unlock_bh(&ioat->prep_lock);
- } else {
- set_bit(IOAT_COMPLETION_ACK, &chan->state);
- mod_timer(&chan->timer, jiffies + COMPLETION_TIMEOUT);
- }
- spin_unlock_bh(&chan->cleanup_lock);
- } else {
- u16 active;
+ if (ioat2_ring_active(ioat)) {
+ mod_timer(&chan->timer, jiffies + COMPLETION_TIMEOUT);
+ return;
+ }
+ if (test_and_clear_bit(IOAT_CHAN_ACTIVE, &chan->state))
+ mod_timer(&chan->timer, jiffies + IDLE_TIMEOUT);
+ else if (ioat->alloc_order > ioat_get_alloc_order()) {
/* if the ring is idle, empty, and oversized try to step
* down the size
*/
- spin_lock_bh(&chan->cleanup_lock);
- spin_lock_bh(&ioat->prep_lock);
- active = ioat2_ring_active(ioat);
- if (active == 0 && ioat->alloc_order > ioat_get_alloc_order())
- reshape_ring(ioat, ioat->alloc_order-1);
- spin_unlock_bh(&ioat->prep_lock);
- spin_unlock_bh(&chan->cleanup_lock);
+ reshape_ring(ioat, ioat->alloc_order - 1);
/* keep shrinking until we get back to our minimum
* default size
@@ -331,6 +292,60 @@ void ioat2_timer_event(unsigned long data)
if (ioat->alloc_order > ioat_get_alloc_order())
mod_timer(&chan->timer, jiffies + IDLE_TIMEOUT);
}
+
+}
+
+void ioat2_timer_event(unsigned long data)
+{
+ struct ioat2_dma_chan *ioat = to_ioat2_chan((void *) data);
+ struct ioat_chan_common *chan = &ioat->base;
+ dma_addr_t phys_complete;
+ u64 status;
+
+ status = ioat_chansts(chan);
+
+ /* when halted due to errors check for channel
+ * programming errors before advancing the completion state
+ */
+ if (is_ioat_halted(status)) {
+ u32 chanerr;
+
+ chanerr = readl(chan->reg_base + IOAT_CHANERR_OFFSET);
+ dev_err(to_dev(chan), "%s: Channel halted (%x)\n",
+ __func__, chanerr);
+ if (test_bit(IOAT_RUN, &chan->state))
+ BUG_ON(is_ioat_bug(chanerr));
+ else /* we never got off the ground */
+ return;
+ }
+
+ /* if we haven't made progress and we have already
+ * acknowledged a pending completion once, then be more
+ * forceful with a restart
+ */
+ spin_lock_bh(&chan->cleanup_lock);
+ if (ioat_cleanup_preamble(chan, &phys_complete))
+ __cleanup(ioat, phys_complete);
+ else if (test_bit(IOAT_COMPLETION_ACK, &chan->state)) {
+ spin_lock_bh(&ioat->prep_lock);
+ ioat2_restart_channel(ioat);
+ spin_unlock_bh(&ioat->prep_lock);
+ spin_unlock_bh(&chan->cleanup_lock);
+ return;
+ } else {
+ set_bit(IOAT_COMPLETION_ACK, &chan->state);
+ mod_timer(&chan->timer, jiffies + COMPLETION_TIMEOUT);
+ }
+
+
+ if (ioat2_ring_active(ioat))
+ mod_timer(&chan->timer, jiffies + COMPLETION_TIMEOUT);
+ else {
+ spin_lock_bh(&ioat->prep_lock);
+ check_active(ioat);
+ spin_unlock_bh(&ioat->prep_lock);
+ }
+ spin_unlock_bh(&chan->cleanup_lock);
}
static int ioat2_reset_hw(struct ioat_chan_common *chan)
@@ -404,7 +419,7 @@ static dma_cookie_t ioat2_tx_submit_unlock(struct dma_async_tx_descriptor *tx)
cookie = dma_cookie_assign(tx);
dev_dbg(to_dev(&ioat->base), "%s: cookie: %d\n", __func__, cookie);
- if (!test_and_set_bit(IOAT_COMPLETION_PENDING, &chan->state))
+ if (!test_and_set_bit(IOAT_CHAN_ACTIVE, &chan->state))
mod_timer(&chan->timer, jiffies + COMPLETION_TIMEOUT);
/* make descriptor updates visible before advancing ioat->head,
diff --git a/drivers/dma/ioat/dma_v3.c b/drivers/dma/ioat/dma_v3.c
index 3e9d66920eb3..e8336cce360b 100644
--- a/drivers/dma/ioat/dma_v3.c
+++ b/drivers/dma/ioat/dma_v3.c
@@ -342,61 +342,22 @@ static void ioat3_restart_channel(struct ioat2_dma_chan *ioat)
__ioat2_restart_chan(ioat);
}
-static void ioat3_timer_event(unsigned long data)
+static void check_active(struct ioat2_dma_chan *ioat)
{
- struct ioat2_dma_chan *ioat = to_ioat2_chan((void *) data);
struct ioat_chan_common *chan = &ioat->base;
- if (test_bit(IOAT_COMPLETION_PENDING, &chan->state)) {
- dma_addr_t phys_complete;
- u64 status;
-
- status = ioat_chansts(chan);
-
- /* when halted due to errors check for channel
- * programming errors before advancing the completion state
- */
- if (is_ioat_halted(status)) {
- u32 chanerr;
-
- chanerr = readl(chan->reg_base + IOAT_CHANERR_OFFSET);
- dev_err(to_dev(chan), "%s: Channel halted (%x)\n",
- __func__, chanerr);
- if (test_bit(IOAT_RUN, &chan->state))
- BUG_ON(is_ioat_bug(chanerr));
- else /* we never got off the ground */
- return;
- }
-
- /* if we haven't made progress and we have already
- * acknowledged a pending completion once, then be more
- * forceful with a restart
- */
- spin_lock_bh(&chan->cleanup_lock);
- if (ioat_cleanup_preamble(chan, &phys_complete))
- __cleanup(ioat, phys_complete);
- else if (test_bit(IOAT_COMPLETION_ACK, &chan->state)) {
- spin_lock_bh(&ioat->prep_lock);
- ioat3_restart_channel(ioat);
- spin_unlock_bh(&ioat->prep_lock);
- } else {
- set_bit(IOAT_COMPLETION_ACK, &chan->state);
- mod_timer(&chan->timer, jiffies + COMPLETION_TIMEOUT);
- }
- spin_unlock_bh(&chan->cleanup_lock);
- } else {
- u16 active;
+ if (ioat2_ring_active(ioat)) {
+ mod_timer(&chan->timer, jiffies + COMPLETION_TIMEOUT);
+ return;
+ }
+ if (test_and_clear_bit(IOAT_CHAN_ACTIVE, &chan->state))
+ mod_timer(&chan->timer, jiffies + IDLE_TIMEOUT);
+ else if (ioat->alloc_order > ioat_get_alloc_order()) {
/* if the ring is idle, empty, and oversized try to step
* down the size
*/
- spin_lock_bh(&chan->cleanup_lock);
- spin_lock_bh(&ioat->prep_lock);
- active = ioat2_ring_active(ioat);
- if (active == 0 && ioat->alloc_order > ioat_get_alloc_order())
- reshape_ring(ioat, ioat->alloc_order-1);
- spin_unlock_bh(&ioat->prep_lock);
- spin_unlock_bh(&chan->cleanup_lock);
+ reshape_ring(ioat, ioat->alloc_order - 1);
/* keep shrinking until we get back to our minimum
* default size
@@ -404,6 +365,60 @@ static void ioat3_timer_event(unsigned long data)
if (ioat->alloc_order > ioat_get_alloc_order())
mod_timer(&chan->timer, jiffies + IDLE_TIMEOUT);
}
+
+}
+
+static void ioat3_timer_event(unsigned long data)
+{
+ struct ioat2_dma_chan *ioat = to_ioat2_chan((void *) data);
+ struct ioat_chan_common *chan = &ioat->base;
+ dma_addr_t phys_complete;
+ u64 status;
+
+ status = ioat_chansts(chan);
+
+ /* when halted due to errors check for channel
+ * programming errors before advancing the completion state
+ */
+ if (is_ioat_halted(status)) {
+ u32 chanerr;
+
+ chanerr = readl(chan->reg_base + IOAT_CHANERR_OFFSET);
+ dev_err(to_dev(chan), "%s: Channel halted (%x)\n",
+ __func__, chanerr);
+ if (test_bit(IOAT_RUN, &chan->state))
+ BUG_ON(is_ioat_bug(chanerr));
+ else /* we never got off the ground */
+ return;
+ }
+
+ /* if we haven't made progress and we have already
+ * acknowledged a pending completion once, then be more
+ * forceful with a restart
+ */
+ spin_lock_bh(&chan->cleanup_lock);
+ if (ioat_cleanup_preamble(chan, &phys_complete))
+ __cleanup(ioat, phys_complete);
+ else if (test_bit(IOAT_COMPLETION_ACK, &chan->state)) {
+ spin_lock_bh(&ioat->prep_lock);
+ ioat3_restart_channel(ioat);
+ spin_unlock_bh(&ioat->prep_lock);
+ spin_unlock_bh(&chan->cleanup_lock);
+ return;
+ } else {
+ set_bit(IOAT_COMPLETION_ACK, &chan->state);
+ mod_timer(&chan->timer, jiffies + COMPLETION_TIMEOUT);
+ }
+
+
+ if (ioat2_ring_active(ioat))
+ mod_timer(&chan->timer, jiffies + COMPLETION_TIMEOUT);
+ else {
+ spin_lock_bh(&ioat->prep_lock);
+ check_active(ioat);
+ spin_unlock_bh(&ioat->prep_lock);
+ }
+ spin_unlock_bh(&chan->cleanup_lock);
}
static enum dma_status
@@ -863,6 +878,7 @@ static int ioat_xor_val_self_test(struct ioatdma_device *device)
unsigned long tmo;
struct device *dev = &device->pdev->dev;
struct dma_device *dma = &device->common;
+ u8 op = 0;
dev_dbg(dev, "%s\n", __func__);
@@ -908,18 +924,22 @@ static int ioat_xor_val_self_test(struct ioatdma_device *device)
}
/* test xor */
+ op = IOAT_OP_XOR;
+
dest_dma = dma_map_page(dev, dest, 0, PAGE_SIZE, DMA_FROM_DEVICE);
for (i = 0; i < IOAT_NUM_SRC_TEST; i++)
dma_srcs[i] = dma_map_page(dev, xor_srcs[i], 0, PAGE_SIZE,
DMA_TO_DEVICE);
tx = dma->device_prep_dma_xor(dma_chan, dest_dma, dma_srcs,
IOAT_NUM_SRC_TEST, PAGE_SIZE,
- DMA_PREP_INTERRUPT);
+ DMA_PREP_INTERRUPT |
+ DMA_COMPL_SKIP_SRC_UNMAP |
+ DMA_COMPL_SKIP_DEST_UNMAP);
if (!tx) {
dev_err(dev, "Self-test xor prep failed\n");
err = -ENODEV;
- goto free_resources;
+ goto dma_unmap;
}
async_tx_ack(tx);
@@ -930,7 +950,7 @@ static int ioat_xor_val_self_test(struct ioatdma_device *device)
if (cookie < 0) {
dev_err(dev, "Self-test xor setup failed\n");
err = -ENODEV;
- goto free_resources;
+ goto dma_unmap;
}
dma->device_issue_pending(dma_chan);
@@ -939,9 +959,13 @@ static int ioat_xor_val_self_test(struct ioatdma_device *device)
if (dma->device_tx_status(dma_chan, cookie, NULL) != DMA_SUCCESS) {
dev_err(dev, "Self-test xor timed out\n");
err = -ENODEV;
- goto free_resources;
+ goto dma_unmap;
}
+ dma_unmap_page(dev, dest_dma, PAGE_SIZE, DMA_FROM_DEVICE);
+ for (i = 0; i < IOAT_NUM_SRC_TEST; i++)
+ dma_unmap_page(dev, dma_srcs[i], PAGE_SIZE, DMA_TO_DEVICE);
+
dma_sync_single_for_cpu(dev, dest_dma, PAGE_SIZE, DMA_FROM_DEVICE);
for (i = 0; i < (PAGE_SIZE / sizeof(u32)); i++) {
u32 *ptr = page_address(dest);
@@ -957,6 +981,8 @@ static int ioat_xor_val_self_test(struct ioatdma_device *device)
if (!dma_has_cap(DMA_XOR_VAL, dma_chan->device->cap_mask))
goto free_resources;
+ op = IOAT_OP_XOR_VAL;
+
/* validate the sources with the destintation page */
for (i = 0; i < IOAT_NUM_SRC_TEST; i++)
xor_val_srcs[i] = xor_srcs[i];
@@ -969,11 +995,13 @@ static int ioat_xor_val_self_test(struct ioatdma_device *device)
DMA_TO_DEVICE);
tx = dma->device_prep_dma_xor_val(dma_chan, dma_srcs,
IOAT_NUM_SRC_TEST + 1, PAGE_SIZE,
- &xor_val_result, DMA_PREP_INTERRUPT);
+ &xor_val_result, DMA_PREP_INTERRUPT |
+ DMA_COMPL_SKIP_SRC_UNMAP |
+ DMA_COMPL_SKIP_DEST_UNMAP);
if (!tx) {
dev_err(dev, "Self-test zero prep failed\n");
err = -ENODEV;
- goto free_resources;
+ goto dma_unmap;
}
async_tx_ack(tx);
@@ -984,7 +1012,7 @@ static int ioat_xor_val_self_test(struct ioatdma_device *device)
if (cookie < 0) {
dev_err(dev, "Self-test zero setup failed\n");
err = -ENODEV;
- goto free_resources;
+ goto dma_unmap;
}
dma->device_issue_pending(dma_chan);
@@ -993,9 +1021,12 @@ static int ioat_xor_val_self_test(struct ioatdma_device *device)
if (dma->device_tx_status(dma_chan, cookie, NULL) != DMA_SUCCESS) {
dev_err(dev, "Self-test validate timed out\n");
err = -ENODEV;
- goto free_resources;
+ goto dma_unmap;
}
+ for (i = 0; i < IOAT_NUM_SRC_TEST + 1; i++)
+ dma_unmap_page(dev, dma_srcs[i], PAGE_SIZE, DMA_TO_DEVICE);
+
if (xor_val_result != 0) {
dev_err(dev, "Self-test validate failed compare\n");
err = -ENODEV;
@@ -1007,14 +1038,18 @@ static int ioat_xor_val_self_test(struct ioatdma_device *device)
goto free_resources;
/* test memset */
+ op = IOAT_OP_FILL;
+
dma_addr = dma_map_page(dev, dest, 0,
PAGE_SIZE, DMA_FROM_DEVICE);
tx = dma->device_prep_dma_memset(dma_chan, dma_addr, 0, PAGE_SIZE,
- DMA_PREP_INTERRUPT);
+ DMA_PREP_INTERRUPT |
+ DMA_COMPL_SKIP_SRC_UNMAP |
+ DMA_COMPL_SKIP_DEST_UNMAP);
if (!tx) {
dev_err(dev, "Self-test memset prep failed\n");
err = -ENODEV;
- goto free_resources;
+ goto dma_unmap;
}
async_tx_ack(tx);
@@ -1025,7 +1060,7 @@ static int ioat_xor_val_self_test(struct ioatdma_device *device)
if (cookie < 0) {
dev_err(dev, "Self-test memset setup failed\n");
err = -ENODEV;
- goto free_resources;
+ goto dma_unmap;
}
dma->device_issue_pending(dma_chan);
@@ -1034,9 +1069,11 @@ static int ioat_xor_val_self_test(struct ioatdma_device *device)
if (dma->device_tx_status(dma_chan, cookie, NULL) != DMA_SUCCESS) {
dev_err(dev, "Self-test memset timed out\n");
err = -ENODEV;
- goto free_resources;
+ goto dma_unmap;
}
+ dma_unmap_page(dev, dma_addr, PAGE_SIZE, DMA_FROM_DEVICE);
+
for (i = 0; i < PAGE_SIZE/sizeof(u32); i++) {
u32 *ptr = page_address(dest);
if (ptr[i]) {
@@ -1047,17 +1084,21 @@ static int ioat_xor_val_self_test(struct ioatdma_device *device)
}
/* test for non-zero parity sum */
+ op = IOAT_OP_XOR_VAL;
+
xor_val_result = 0;
for (i = 0; i < IOAT_NUM_SRC_TEST + 1; i++)
dma_srcs[i] = dma_map_page(dev, xor_val_srcs[i], 0, PAGE_SIZE,
DMA_TO_DEVICE);
tx = dma->device_prep_dma_xor_val(dma_chan, dma_srcs,
IOAT_NUM_SRC_TEST + 1, PAGE_SIZE,
- &xor_val_result, DMA_PREP_INTERRUPT);
+ &xor_val_result, DMA_PREP_INTERRUPT |
+ DMA_COMPL_SKIP_SRC_UNMAP |
+ DMA_COMPL_SKIP_DEST_UNMAP);
if (!tx) {
dev_err(dev, "Self-test 2nd zero prep failed\n");
err = -ENODEV;
- goto free_resources;
+ goto dma_unmap;
}
async_tx_ack(tx);
@@ -1068,7 +1109,7 @@ static int ioat_xor_val_self_test(struct ioatdma_device *device)
if (cookie < 0) {
dev_err(dev, "Self-test 2nd zero setup failed\n");
err = -ENODEV;
- goto free_resources;
+ goto dma_unmap;
}
dma->device_issue_pending(dma_chan);
@@ -1077,15 +1118,31 @@ static int ioat_xor_val_self_test(struct ioatdma_device *device)
if (dma->device_tx_status(dma_chan, cookie, NULL) != DMA_SUCCESS) {
dev_err(dev, "Self-test 2nd validate timed out\n");
err = -ENODEV;
- goto free_resources;
+ goto dma_unmap;
}
if (xor_val_result != SUM_CHECK_P_RESULT) {
dev_err(dev, "Self-test validate failed compare\n");
err = -ENODEV;
- goto free_resources;
+ goto dma_unmap;
}
+ for (i = 0; i < IOAT_NUM_SRC_TEST + 1; i++)
+ dma_unmap_page(dev, dma_srcs[i], PAGE_SIZE, DMA_TO_DEVICE);
+
+ goto free_resources;
+dma_unmap:
+ if (op == IOAT_OP_XOR) {
+ dma_unmap_page(dev, dest_dma, PAGE_SIZE, DMA_FROM_DEVICE);
+ for (i = 0; i < IOAT_NUM_SRC_TEST; i++)
+ dma_unmap_page(dev, dma_srcs[i], PAGE_SIZE,
+ DMA_TO_DEVICE);
+ } else if (op == IOAT_OP_XOR_VAL) {
+ for (i = 0; i < IOAT_NUM_SRC_TEST + 1; i++)
+ dma_unmap_page(dev, dma_srcs[i], PAGE_SIZE,
+ DMA_TO_DEVICE);
+ } else if (op == IOAT_OP_FILL)
+ dma_unmap_page(dev, dma_addr, PAGE_SIZE, DMA_FROM_DEVICE);
free_resources:
dma->device_free_chan_resources(dma_chan);
out:
@@ -1126,12 +1183,7 @@ static int ioat3_reset_hw(struct ioat_chan_common *chan)
chanerr = readl(chan->reg_base + IOAT_CHANERR_OFFSET);
writel(chanerr, chan->reg_base + IOAT_CHANERR_OFFSET);
- /* -= IOAT ver.3 workarounds =- */
- /* Write CHANERRMSK_INT with 3E07h to mask out the errors
- * that can cause stability issues for IOAT ver.3, and clear any
- * pending errors
- */
- pci_write_config_dword(pdev, IOAT_PCI_CHANERRMASK_INT_OFFSET, 0x3e07);
+ /* clear any pending errors */
err = pci_read_config_dword(pdev, IOAT_PCI_CHANERR_INT_OFFSET, &chanerr);
if (err) {
dev_err(&pdev->dev, "channel error register unreachable\n");
@@ -1187,6 +1239,26 @@ static bool is_snb_ioat(struct pci_dev *pdev)
}
}
+static bool is_ivb_ioat(struct pci_dev *pdev)
+{
+ switch (pdev->device) {
+ case PCI_DEVICE_ID_INTEL_IOAT_IVB0:
+ case PCI_DEVICE_ID_INTEL_IOAT_IVB1:
+ case PCI_DEVICE_ID_INTEL_IOAT_IVB2:
+ case PCI_DEVICE_ID_INTEL_IOAT_IVB3:
+ case PCI_DEVICE_ID_INTEL_IOAT_IVB4:
+ case PCI_DEVICE_ID_INTEL_IOAT_IVB5:
+ case PCI_DEVICE_ID_INTEL_IOAT_IVB6:
+ case PCI_DEVICE_ID_INTEL_IOAT_IVB7:
+ case PCI_DEVICE_ID_INTEL_IOAT_IVB8:
+ case PCI_DEVICE_ID_INTEL_IOAT_IVB9:
+ return true;
+ default:
+ return false;
+ }
+
+}
+
int ioat3_dma_probe(struct ioatdma_device *device, int dca)
{
struct pci_dev *pdev = device->pdev;
@@ -1207,7 +1279,7 @@ int ioat3_dma_probe(struct ioatdma_device *device, int dca)
dma->device_alloc_chan_resources = ioat2_alloc_chan_resources;
dma->device_free_chan_resources = ioat2_free_chan_resources;
- if (is_jf_ioat(pdev) || is_snb_ioat(pdev))
+ if (is_jf_ioat(pdev) || is_snb_ioat(pdev) || is_ivb_ioat(pdev))
dma->copy_align = 6;
dma_cap_set(DMA_INTERRUPT, dma->cap_mask);
diff --git a/drivers/dma/ioat/hw.h b/drivers/dma/ioat/hw.h
index d2ff3fda0b18..7cb74c62c719 100644
--- a/drivers/dma/ioat/hw.h
+++ b/drivers/dma/ioat/hw.h
@@ -35,6 +35,17 @@
#define IOAT_VER_3_0 0x30 /* Version 3.0 */
#define IOAT_VER_3_2 0x32 /* Version 3.2 */
+#define PCI_DEVICE_ID_INTEL_IOAT_IVB0 0x0e20
+#define PCI_DEVICE_ID_INTEL_IOAT_IVB1 0x0e21
+#define PCI_DEVICE_ID_INTEL_IOAT_IVB2 0x0e22
+#define PCI_DEVICE_ID_INTEL_IOAT_IVB3 0x0e23
+#define PCI_DEVICE_ID_INTEL_IOAT_IVB4 0x0e24
+#define PCI_DEVICE_ID_INTEL_IOAT_IVB5 0x0e25
+#define PCI_DEVICE_ID_INTEL_IOAT_IVB6 0x0e26
+#define PCI_DEVICE_ID_INTEL_IOAT_IVB7 0x0e27
+#define PCI_DEVICE_ID_INTEL_IOAT_IVB8 0x0e2e
+#define PCI_DEVICE_ID_INTEL_IOAT_IVB9 0x0e2f
+
int system_has_dca_enabled(struct pci_dev *pdev);
struct ioat_dma_descriptor {
diff --git a/drivers/dma/ioat/pci.c b/drivers/dma/ioat/pci.c
index 4f686c527ab6..71c7ecd80fac 100644
--- a/drivers/dma/ioat/pci.c
+++ b/drivers/dma/ioat/pci.c
@@ -40,17 +40,6 @@ MODULE_VERSION(IOAT_DMA_VERSION);
MODULE_LICENSE("Dual BSD/GPL");
MODULE_AUTHOR("Intel Corporation");
-#define PCI_DEVICE_ID_INTEL_IOAT_IVB0 0x0e20
-#define PCI_DEVICE_ID_INTEL_IOAT_IVB1 0x0e21
-#define PCI_DEVICE_ID_INTEL_IOAT_IVB2 0x0e22
-#define PCI_DEVICE_ID_INTEL_IOAT_IVB3 0x0e23
-#define PCI_DEVICE_ID_INTEL_IOAT_IVB4 0x0e24
-#define PCI_DEVICE_ID_INTEL_IOAT_IVB5 0x0e25
-#define PCI_DEVICE_ID_INTEL_IOAT_IVB6 0x0e26
-#define PCI_DEVICE_ID_INTEL_IOAT_IVB7 0x0e27
-#define PCI_DEVICE_ID_INTEL_IOAT_IVB8 0x0e2e
-#define PCI_DEVICE_ID_INTEL_IOAT_IVB9 0x0e2f
-
static struct pci_device_id ioat_pci_tbl[] = {
/* I/OAT v1 platforms */
{ PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT) },
diff --git a/drivers/dma/iop-adma.c b/drivers/dma/iop-adma.c
index eacb8be99812..7dafb9f3785f 100644
--- a/drivers/dma/iop-adma.c
+++ b/drivers/dma/iop-adma.c
@@ -936,7 +936,7 @@ static irqreturn_t iop_adma_err_handler(int irq, void *data)
struct iop_adma_chan *chan = data;
unsigned long status = iop_chan_get_status(chan);
- dev_printk(KERN_ERR, chan->device->common.dev,
+ dev_err(chan->device->common.dev,
"error ( %s%s%s%s%s%s%s)\n",
iop_is_err_int_parity(status, chan) ? "int_parity " : "",
iop_is_err_mcu_abort(status, chan) ? "mcu_abort " : "",
@@ -1017,7 +1017,7 @@ static int iop_adma_memcpy_self_test(struct iop_adma_device *device)
if (iop_adma_status(dma_chan, cookie, NULL) !=
DMA_SUCCESS) {
- dev_printk(KERN_ERR, dma_chan->device->dev,
+ dev_err(dma_chan->device->dev,
"Self-test copy timed out, disabling\n");
err = -ENODEV;
goto free_resources;
@@ -1027,7 +1027,7 @@ static int iop_adma_memcpy_self_test(struct iop_adma_device *device)
dma_sync_single_for_cpu(&iop_chan->device->pdev->dev, dest_dma,
IOP_ADMA_TEST_SIZE, DMA_FROM_DEVICE);
if (memcmp(src, dest, IOP_ADMA_TEST_SIZE)) {
- dev_printk(KERN_ERR, dma_chan->device->dev,
+ dev_err(dma_chan->device->dev,
"Self-test copy failed compare, disabling\n");
err = -ENODEV;
goto free_resources;
@@ -1117,7 +1117,7 @@ iop_adma_xor_val_self_test(struct iop_adma_device *device)
if (iop_adma_status(dma_chan, cookie, NULL) !=
DMA_SUCCESS) {
- dev_printk(KERN_ERR, dma_chan->device->dev,
+ dev_err(dma_chan->device->dev,
"Self-test xor timed out, disabling\n");
err = -ENODEV;
goto free_resources;
@@ -1129,7 +1129,7 @@ iop_adma_xor_val_self_test(struct iop_adma_device *device)
for (i = 0; i < (PAGE_SIZE / sizeof(u32)); i++) {
u32 *ptr = page_address(dest);
if (ptr[i] != cmp_word) {
- dev_printk(KERN_ERR, dma_chan->device->dev,
+ dev_err(dma_chan->device->dev,
"Self-test xor failed compare, disabling\n");
err = -ENODEV;
goto free_resources;
@@ -1163,14 +1163,14 @@ iop_adma_xor_val_self_test(struct iop_adma_device *device)
msleep(8);
if (iop_adma_status(dma_chan, cookie, NULL) != DMA_SUCCESS) {
- dev_printk(KERN_ERR, dma_chan->device->dev,
+ dev_err(dma_chan->device->dev,
"Self-test zero sum timed out, disabling\n");
err = -ENODEV;
goto free_resources;
}
if (zero_sum_result != 0) {
- dev_printk(KERN_ERR, dma_chan->device->dev,
+ dev_err(dma_chan->device->dev,
"Self-test zero sum failed compare, disabling\n");
err = -ENODEV;
goto free_resources;
@@ -1187,7 +1187,7 @@ iop_adma_xor_val_self_test(struct iop_adma_device *device)
msleep(8);
if (iop_adma_status(dma_chan, cookie, NULL) != DMA_SUCCESS) {
- dev_printk(KERN_ERR, dma_chan->device->dev,
+ dev_err(dma_chan->device->dev,
"Self-test memset timed out, disabling\n");
err = -ENODEV;
goto free_resources;
@@ -1196,7 +1196,7 @@ iop_adma_xor_val_self_test(struct iop_adma_device *device)
for (i = 0; i < PAGE_SIZE/sizeof(u32); i++) {
u32 *ptr = page_address(dest);
if (ptr[i]) {
- dev_printk(KERN_ERR, dma_chan->device->dev,
+ dev_err(dma_chan->device->dev,
"Self-test memset failed compare, disabling\n");
err = -ENODEV;
goto free_resources;
@@ -1219,14 +1219,14 @@ iop_adma_xor_val_self_test(struct iop_adma_device *device)
msleep(8);
if (iop_adma_status(dma_chan, cookie, NULL) != DMA_SUCCESS) {
- dev_printk(KERN_ERR, dma_chan->device->dev,
+ dev_err(dma_chan->device->dev,
"Self-test non-zero sum timed out, disabling\n");
err = -ENODEV;
goto free_resources;
}
if (zero_sum_result != 1) {
- dev_printk(KERN_ERR, dma_chan->device->dev,
+ dev_err(dma_chan->device->dev,
"Self-test non-zero sum failed compare, disabling\n");
err = -ENODEV;
goto free_resources;
@@ -1579,15 +1579,14 @@ static int iop_adma_probe(struct platform_device *pdev)
goto err_free_iop_chan;
}
- dev_printk(KERN_INFO, &pdev->dev, "Intel(R) IOP: "
- "( %s%s%s%s%s%s%s)\n",
- dma_has_cap(DMA_PQ, dma_dev->cap_mask) ? "pq " : "",
- dma_has_cap(DMA_PQ_VAL, dma_dev->cap_mask) ? "pq_val " : "",
- dma_has_cap(DMA_XOR, dma_dev->cap_mask) ? "xor " : "",
- dma_has_cap(DMA_XOR_VAL, dma_dev->cap_mask) ? "xor_val " : "",
- dma_has_cap(DMA_MEMSET, dma_dev->cap_mask) ? "fill " : "",
- dma_has_cap(DMA_MEMCPY, dma_dev->cap_mask) ? "cpy " : "",
- dma_has_cap(DMA_INTERRUPT, dma_dev->cap_mask) ? "intr " : "");
+ dev_info(&pdev->dev, "Intel(R) IOP: ( %s%s%s%s%s%s%s)\n",
+ dma_has_cap(DMA_PQ, dma_dev->cap_mask) ? "pq " : "",
+ dma_has_cap(DMA_PQ_VAL, dma_dev->cap_mask) ? "pq_val " : "",
+ dma_has_cap(DMA_XOR, dma_dev->cap_mask) ? "xor " : "",
+ dma_has_cap(DMA_XOR_VAL, dma_dev->cap_mask) ? "xor_val " : "",
+ dma_has_cap(DMA_MEMSET, dma_dev->cap_mask) ? "fill " : "",
+ dma_has_cap(DMA_MEMCPY, dma_dev->cap_mask) ? "cpy " : "",
+ dma_has_cap(DMA_INTERRUPT, dma_dev->cap_mask) ? "intr " : "");
dma_async_device_register(dma_dev);
goto out;
@@ -1651,8 +1650,8 @@ static void iop_chan_start_null_memcpy(struct iop_adma_chan *iop_chan)
/* run the descriptor */
iop_chan_enable(iop_chan);
} else
- dev_printk(KERN_ERR, iop_chan->device->common.dev,
- "failed to allocate null descriptor\n");
+ dev_err(iop_chan->device->common.dev,
+ "failed to allocate null descriptor\n");
spin_unlock_bh(&iop_chan->lock);
}
@@ -1704,7 +1703,7 @@ static void iop_chan_start_null_xor(struct iop_adma_chan *iop_chan)
/* run the descriptor */
iop_chan_enable(iop_chan);
} else
- dev_printk(KERN_ERR, iop_chan->device->common.dev,
+ dev_err(iop_chan->device->common.dev,
"failed to allocate null descriptor\n");
spin_unlock_bh(&iop_chan->lock);
}
diff --git a/drivers/dma/ipu/ipu_idmac.c b/drivers/dma/ipu/ipu_idmac.c
index 65855373cee6..8c61d17a86bf 100644
--- a/drivers/dma/ipu/ipu_idmac.c
+++ b/drivers/dma/ipu/ipu_idmac.c
@@ -1347,7 +1347,7 @@ static struct dma_async_tx_descriptor *idmac_prep_slave_sg(struct dma_chan *chan
chan->chan_id != IDMAC_IC_7)
return NULL;
- if (direction != DMA_DEV_TO_MEM && direction != DMA_MEM_TO_DEV) {
+ if (!is_slave_direction(direction)) {
dev_err(chan->device->dev, "Invalid DMA direction %d!\n", direction);
return NULL;
}
diff --git a/drivers/dma/ipu/ipu_irq.c b/drivers/dma/ipu/ipu_irq.c
index a5ee37d5320f..2e284a4438bc 100644
--- a/drivers/dma/ipu/ipu_irq.c
+++ b/drivers/dma/ipu/ipu_irq.c
@@ -44,7 +44,6 @@ static void ipu_write_reg(struct ipu *ipu, u32 value, unsigned long reg)
struct ipu_irq_bank {
unsigned int control;
unsigned int status;
- spinlock_t lock;
struct ipu *ipu;
};
diff --git a/drivers/dma/mmp_pdma.c b/drivers/dma/mmp_pdma.c
index dc7466563507..c26699f9c4df 100644
--- a/drivers/dma/mmp_pdma.c
+++ b/drivers/dma/mmp_pdma.c
@@ -618,10 +618,8 @@ static int mmp_pdma_control(struct dma_chan *dchan, enum dma_ctrl_cmd cmd,
else if (maxburst == 32)
chan->dcmd |= DCMD_BURST32;
- if (cfg) {
- chan->dir = cfg->direction;
- chan->drcmr = cfg->slave_id;
- }
+ chan->dir = cfg->direction;
+ chan->drcmr = cfg->slave_id;
chan->dev_addr = addr;
break;
default:
diff --git a/drivers/dma/mv_xor.c b/drivers/dma/mv_xor.c
index e17fad03cb80..d64ae14f2706 100644
--- a/drivers/dma/mv_xor.c
+++ b/drivers/dma/mv_xor.c
@@ -210,7 +210,7 @@ static void mv_set_mode(struct mv_xor_chan *chan,
break;
default:
dev_err(mv_chan_to_devp(chan),
- "error: unsupported operation %d.\n",
+ "error: unsupported operation %d\n",
type);
BUG();
return;
@@ -828,28 +828,22 @@ static void mv_dump_xor_regs(struct mv_xor_chan *chan)
u32 val;
val = __raw_readl(XOR_CONFIG(chan));
- dev_err(mv_chan_to_devp(chan),
- "config 0x%08x.\n", val);
+ dev_err(mv_chan_to_devp(chan), "config 0x%08x\n", val);
val = __raw_readl(XOR_ACTIVATION(chan));
- dev_err(mv_chan_to_devp(chan),
- "activation 0x%08x.\n", val);
+ dev_err(mv_chan_to_devp(chan), "activation 0x%08x\n", val);
val = __raw_readl(XOR_INTR_CAUSE(chan));
- dev_err(mv_chan_to_devp(chan),
- "intr cause 0x%08x.\n", val);
+ dev_err(mv_chan_to_devp(chan), "intr cause 0x%08x\n", val);
val = __raw_readl(XOR_INTR_MASK(chan));
- dev_err(mv_chan_to_devp(chan),
- "intr mask 0x%08x.\n", val);
+ dev_err(mv_chan_to_devp(chan), "intr mask 0x%08x\n", val);
val = __raw_readl(XOR_ERROR_CAUSE(chan));
- dev_err(mv_chan_to_devp(chan),
- "error cause 0x%08x.\n", val);
+ dev_err(mv_chan_to_devp(chan), "error cause 0x%08x\n", val);
val = __raw_readl(XOR_ERROR_ADDR(chan));
- dev_err(mv_chan_to_devp(chan),
- "error addr 0x%08x.\n", val);
+ dev_err(mv_chan_to_devp(chan), "error addr 0x%08x\n", val);
}
static void mv_xor_err_interrupt_handler(struct mv_xor_chan *chan,
@@ -862,7 +856,7 @@ static void mv_xor_err_interrupt_handler(struct mv_xor_chan *chan,
}
dev_err(mv_chan_to_devp(chan),
- "error on chan %d. intr cause 0x%08x.\n",
+ "error on chan %d. intr cause 0x%08x\n",
chan->idx, intr_cause);
mv_dump_xor_regs(chan);
@@ -1052,9 +1046,8 @@ mv_xor_xor_self_test(struct mv_xor_chan *mv_chan)
u32 *ptr = page_address(dest);
if (ptr[i] != cmp_word) {
dev_err(dma_chan->device->dev,
- "Self-test xor failed compare, disabling."
- " index %d, data %x, expected %x\n", i,
- ptr[i], cmp_word);
+ "Self-test xor failed compare, disabling. index %d, data %x, expected %x\n",
+ i, ptr[i], cmp_word);
err = -ENODEV;
goto free_resources;
}
@@ -1194,12 +1187,11 @@ mv_xor_channel_add(struct mv_xor_device *xordev,
goto err_free_irq;
}
- dev_info(&pdev->dev, "Marvell XOR: "
- "( %s%s%s%s)\n",
- dma_has_cap(DMA_XOR, dma_dev->cap_mask) ? "xor " : "",
- dma_has_cap(DMA_MEMSET, dma_dev->cap_mask) ? "fill " : "",
- dma_has_cap(DMA_MEMCPY, dma_dev->cap_mask) ? "cpy " : "",
- dma_has_cap(DMA_INTERRUPT, dma_dev->cap_mask) ? "intr " : "");
+ dev_info(&pdev->dev, "Marvell XOR: ( %s%s%s%s)\n",
+ dma_has_cap(DMA_XOR, dma_dev->cap_mask) ? "xor " : "",
+ dma_has_cap(DMA_MEMSET, dma_dev->cap_mask) ? "fill " : "",
+ dma_has_cap(DMA_MEMCPY, dma_dev->cap_mask) ? "cpy " : "",
+ dma_has_cap(DMA_INTERRUPT, dma_dev->cap_mask) ? "intr " : "");
dma_async_device_register(dma_dev);
return mv_chan;
@@ -1253,7 +1245,7 @@ static int mv_xor_probe(struct platform_device *pdev)
struct resource *res;
int i, ret;
- dev_notice(&pdev->dev, "Marvell XOR driver\n");
+ dev_notice(&pdev->dev, "Marvell shared XOR driver\n");
xordev = devm_kzalloc(&pdev->dev, sizeof(*xordev), GFP_KERNEL);
if (!xordev)
diff --git a/drivers/dma/mxs-dma.c b/drivers/dma/mxs-dma.c
index 9f02e794b12b..8f6d30d37c45 100644
--- a/drivers/dma/mxs-dma.c
+++ b/drivers/dma/mxs-dma.c
@@ -109,7 +109,7 @@ struct mxs_dma_chan {
struct dma_chan chan;
struct dma_async_tx_descriptor desc;
struct tasklet_struct tasklet;
- int chan_irq;
+ unsigned int chan_irq;
struct mxs_dma_ccw *ccw;
dma_addr_t ccw_phys;
int desc_count;
@@ -441,7 +441,7 @@ static struct dma_async_tx_descriptor *mxs_dma_prep_slave_sg(
struct mxs_dma_engine *mxs_dma = mxs_chan->mxs_dma;
struct mxs_dma_ccw *ccw;
struct scatterlist *sg;
- int i, j;
+ u32 i, j;
u32 *pio;
bool append = flags & DMA_PREP_INTERRUPT;
int idx = append ? mxs_chan->desc_count : 0;
@@ -537,8 +537,8 @@ static struct dma_async_tx_descriptor *mxs_dma_prep_dma_cyclic(
{
struct mxs_dma_chan *mxs_chan = to_mxs_dma_chan(chan);
struct mxs_dma_engine *mxs_dma = mxs_chan->mxs_dma;
- int num_periods = buf_len / period_len;
- int i = 0, buf = 0;
+ u32 num_periods = buf_len / period_len;
+ u32 i = 0, buf = 0;
if (mxs_chan->status == DMA_IN_PROGRESS)
return NULL;
diff --git a/drivers/dma/of-dma.c b/drivers/dma/of-dma.c
new file mode 100644
index 000000000000..69d04d28b1ef
--- /dev/null
+++ b/drivers/dma/of-dma.c
@@ -0,0 +1,267 @@
+/*
+ * Device tree helpers for DMA request / controller
+ *
+ * Based on of_gpio.c
+ *
+ * Copyright (C) 2012 Texas Instruments Incorporated - http://www.ti.com/
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/device.h>
+#include <linux/err.h>
+#include <linux/module.h>
+#include <linux/rculist.h>
+#include <linux/slab.h>
+#include <linux/of.h>
+#include <linux/of_dma.h>
+
+static LIST_HEAD(of_dma_list);
+static DEFINE_SPINLOCK(of_dma_lock);
+
+/**
+ * of_dma_get_controller - Get a DMA controller in DT DMA helpers list
+ * @dma_spec: pointer to DMA specifier as found in the device tree
+ *
+ * Finds a DMA controller with matching device node and number for dma cells
+ * in a list of registered DMA controllers. If a match is found the use_count
+ * variable is increased and a valid pointer to the DMA data stored is retuned.
+ * A NULL pointer is returned if no match is found.
+ */
+static struct of_dma *of_dma_get_controller(struct of_phandle_args *dma_spec)
+{
+ struct of_dma *ofdma;
+
+ spin_lock(&of_dma_lock);
+
+ if (list_empty(&of_dma_list)) {
+ spin_unlock(&of_dma_lock);
+ return NULL;
+ }
+
+ list_for_each_entry(ofdma, &of_dma_list, of_dma_controllers)
+ if ((ofdma->of_node == dma_spec->np) &&
+ (ofdma->of_dma_nbcells == dma_spec->args_count)) {
+ ofdma->use_count++;
+ spin_unlock(&of_dma_lock);
+ return ofdma;
+ }
+
+ spin_unlock(&of_dma_lock);
+
+ pr_debug("%s: can't find DMA controller %s\n", __func__,
+ dma_spec->np->full_name);
+
+ return NULL;
+}
+
+/**
+ * of_dma_put_controller - Decrement use count for a registered DMA controller
+ * @of_dma: pointer to DMA controller data
+ *
+ * Decrements the use_count variable in the DMA data structure. This function
+ * should be called only when a valid pointer is returned from
+ * of_dma_get_controller() and no further accesses to data referenced by that
+ * pointer are needed.
+ */
+static void of_dma_put_controller(struct of_dma *ofdma)
+{
+ spin_lock(&of_dma_lock);
+ ofdma->use_count--;
+ spin_unlock(&of_dma_lock);
+}
+
+/**
+ * of_dma_controller_register - Register a DMA controller to DT DMA helpers
+ * @np: device node of DMA controller
+ * @of_dma_xlate: translation function which converts a phandle
+ * arguments list into a dma_chan structure
+ * @data pointer to controller specific data to be used by
+ * translation function
+ *
+ * Returns 0 on success or appropriate errno value on error.
+ *
+ * Allocated memory should be freed with appropriate of_dma_controller_free()
+ * call.
+ */
+int of_dma_controller_register(struct device_node *np,
+ struct dma_chan *(*of_dma_xlate)
+ (struct of_phandle_args *, struct of_dma *),
+ void *data)
+{
+ struct of_dma *ofdma;
+ int nbcells;
+
+ if (!np || !of_dma_xlate) {
+ pr_err("%s: not enough information provided\n", __func__);
+ return -EINVAL;
+ }
+
+ ofdma = kzalloc(sizeof(*ofdma), GFP_KERNEL);
+ if (!ofdma)
+ return -ENOMEM;
+
+ nbcells = be32_to_cpup(of_get_property(np, "#dma-cells", NULL));
+ if (!nbcells) {
+ pr_err("%s: #dma-cells property is missing or invalid\n",
+ __func__);
+ kfree(ofdma);
+ return -EINVAL;
+ }
+
+ ofdma->of_node = np;
+ ofdma->of_dma_nbcells = nbcells;
+ ofdma->of_dma_xlate = of_dma_xlate;
+ ofdma->of_dma_data = data;
+ ofdma->use_count = 0;
+
+ /* Now queue of_dma controller structure in list */
+ spin_lock(&of_dma_lock);
+ list_add_tail(&ofdma->of_dma_controllers, &of_dma_list);
+ spin_unlock(&of_dma_lock);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(of_dma_controller_register);
+
+/**
+ * of_dma_controller_free - Remove a DMA controller from DT DMA helpers list
+ * @np: device node of DMA controller
+ *
+ * Memory allocated by of_dma_controller_register() is freed here.
+ */
+int of_dma_controller_free(struct device_node *np)
+{
+ struct of_dma *ofdma;
+
+ spin_lock(&of_dma_lock);
+
+ if (list_empty(&of_dma_list)) {
+ spin_unlock(&of_dma_lock);
+ return -ENODEV;
+ }
+
+ list_for_each_entry(ofdma, &of_dma_list, of_dma_controllers)
+ if (ofdma->of_node == np) {
+ if (ofdma->use_count) {
+ spin_unlock(&of_dma_lock);
+ return -EBUSY;
+ }
+
+ list_del(&ofdma->of_dma_controllers);
+ spin_unlock(&of_dma_lock);
+ kfree(ofdma);
+ return 0;
+ }
+
+ spin_unlock(&of_dma_lock);
+ return -ENODEV;
+}
+EXPORT_SYMBOL_GPL(of_dma_controller_free);
+
+/**
+ * of_dma_match_channel - Check if a DMA specifier matches name
+ * @np: device node to look for DMA channels
+ * @name: channel name to be matched
+ * @index: index of DMA specifier in list of DMA specifiers
+ * @dma_spec: pointer to DMA specifier as found in the device tree
+ *
+ * Check if the DMA specifier pointed to by the index in a list of DMA
+ * specifiers, matches the name provided. Returns 0 if the name matches and
+ * a valid pointer to the DMA specifier is found. Otherwise returns -ENODEV.
+ */
+static int of_dma_match_channel(struct device_node *np, char *name, int index,
+ struct of_phandle_args *dma_spec)
+{
+ const char *s;
+
+ if (of_property_read_string_index(np, "dma-names", index, &s))
+ return -ENODEV;
+
+ if (strcmp(name, s))
+ return -ENODEV;
+
+ if (of_parse_phandle_with_args(np, "dmas", "#dma-cells", index,
+ dma_spec))
+ return -ENODEV;
+
+ return 0;
+}
+
+/**
+ * of_dma_request_slave_channel - Get the DMA slave channel
+ * @np: device node to get DMA request from
+ * @name: name of desired channel
+ *
+ * Returns pointer to appropriate dma channel on success or NULL on error.
+ */
+struct dma_chan *of_dma_request_slave_channel(struct device_node *np,
+ char *name)
+{
+ struct of_phandle_args dma_spec;
+ struct of_dma *ofdma;
+ struct dma_chan *chan;
+ int count, i;
+
+ if (!np || !name) {
+ pr_err("%s: not enough information provided\n", __func__);
+ return NULL;
+ }
+
+ count = of_property_count_strings(np, "dma-names");
+ if (count < 0) {
+ pr_err("%s: dma-names property missing or empty\n", __func__);
+ return NULL;
+ }
+
+ for (i = 0; i < count; i++) {
+ if (of_dma_match_channel(np, name, i, &dma_spec))
+ continue;
+
+ ofdma = of_dma_get_controller(&dma_spec);
+
+ if (!ofdma)
+ continue;
+
+ chan = ofdma->of_dma_xlate(&dma_spec, ofdma);
+
+ of_dma_put_controller(ofdma);
+
+ of_node_put(dma_spec.np);
+
+ if (chan)
+ return chan;
+ }
+
+ return NULL;
+}
+
+/**
+ * of_dma_simple_xlate - Simple DMA engine translation function
+ * @dma_spec: pointer to DMA specifier as found in the device tree
+ * @of_dma: pointer to DMA controller data
+ *
+ * A simple translation function for devices that use a 32-bit value for the
+ * filter_param when calling the DMA engine dma_request_channel() function.
+ * Note that this translation function requires that #dma-cells is equal to 1
+ * and the argument of the dma specifier is the 32-bit filter_param. Returns
+ * pointer to appropriate dma channel on success or NULL on error.
+ */
+struct dma_chan *of_dma_simple_xlate(struct of_phandle_args *dma_spec,
+ struct of_dma *ofdma)
+{
+ int count = dma_spec->args_count;
+ struct of_dma_filter_info *info = ofdma->of_dma_data;
+
+ if (!info || !info->filter_fn)
+ return NULL;
+
+ if (count != 1)
+ return NULL;
+
+ return dma_request_channel(info->dma_cap, info->filter_fn,
+ &dma_spec->args[0]);
+}
+EXPORT_SYMBOL_GPL(of_dma_simple_xlate);
diff --git a/drivers/dma/omap-dma.c b/drivers/dma/omap-dma.c
index 5a31264f2bd1..c4b4fd2acc42 100644
--- a/drivers/dma/omap-dma.c
+++ b/drivers/dma/omap-dma.c
@@ -661,32 +661,14 @@ bool omap_dma_filter_fn(struct dma_chan *chan, void *param)
}
EXPORT_SYMBOL_GPL(omap_dma_filter_fn);
-static struct platform_device *pdev;
-
-static const struct platform_device_info omap_dma_dev_info = {
- .name = "omap-dma-engine",
- .id = -1,
- .dma_mask = DMA_BIT_MASK(32),
-};
-
static int omap_dma_init(void)
{
- int rc = platform_driver_register(&omap_dma_driver);
-
- if (rc == 0) {
- pdev = platform_device_register_full(&omap_dma_dev_info);
- if (IS_ERR(pdev)) {
- platform_driver_unregister(&omap_dma_driver);
- rc = PTR_ERR(pdev);
- }
- }
- return rc;
+ return platform_driver_register(&omap_dma_driver);
}
subsys_initcall(omap_dma_init);
static void __exit omap_dma_exit(void)
{
- platform_device_unregister(pdev);
platform_driver_unregister(&omap_dma_driver);
}
module_exit(omap_dma_exit);
diff --git a/drivers/dma/pch_dma.c b/drivers/dma/pch_dma.c
index 3f2617255ef2..d01faeb0f27c 100644
--- a/drivers/dma/pch_dma.c
+++ b/drivers/dma/pch_dma.c
@@ -1029,18 +1029,7 @@ static struct pci_driver pch_dma_driver = {
#endif
};
-static int __init pch_dma_init(void)
-{
- return pci_register_driver(&pch_dma_driver);
-}
-
-static void __exit pch_dma_exit(void)
-{
- pci_unregister_driver(&pch_dma_driver);
-}
-
-module_init(pch_dma_init);
-module_exit(pch_dma_exit);
+module_pci_driver(pch_dma_driver);
MODULE_DESCRIPTION("Intel EG20T PCH / LAPIS Semicon ML7213/ML7223/ML7831 IOH "
"DMA controller driver");
diff --git a/drivers/dma/pl330.c b/drivers/dma/pl330.c
index 80680eee0171..718153122759 100644
--- a/drivers/dma/pl330.c
+++ b/drivers/dma/pl330.c
@@ -25,6 +25,7 @@
#include <linux/amba/pl330.h>
#include <linux/scatterlist.h>
#include <linux/of.h>
+#include <linux/of_dma.h>
#include "dmaengine.h"
#define PL330_MAX_CHAN 8
@@ -606,6 +607,11 @@ struct dma_pl330_desc {
struct dma_pl330_chan *pchan;
};
+struct dma_pl330_filter_args {
+ struct dma_pl330_dmac *pdmac;
+ unsigned int chan_id;
+};
+
static inline void _callback(struct pl330_req *r, enum pl330_op_err err)
{
if (r && r->xfer_cb)
@@ -2352,6 +2358,16 @@ static void dma_pl330_rqcb(void *token, enum pl330_op_err err)
tasklet_schedule(&pch->task);
}
+static bool pl330_dt_filter(struct dma_chan *chan, void *param)
+{
+ struct dma_pl330_filter_args *fargs = param;
+
+ if (chan->device != &fargs->pdmac->ddma)
+ return false;
+
+ return (chan->chan_id == fargs->chan_id);
+}
+
bool pl330_filter(struct dma_chan *chan, void *param)
{
u8 *peri_id;
@@ -2359,25 +2375,35 @@ bool pl330_filter(struct dma_chan *chan, void *param)
if (chan->device->dev->driver != &pl330_driver.drv)
return false;
-#ifdef CONFIG_OF
- if (chan->device->dev->of_node) {
- const __be32 *prop_value;
- phandle phandle;
- struct device_node *node;
-
- prop_value = ((struct property *)param)->value;
- phandle = be32_to_cpup(prop_value++);
- node = of_find_node_by_phandle(phandle);
- return ((chan->private == node) &&
- (chan->chan_id == be32_to_cpup(prop_value)));
- }
-#endif
-
peri_id = chan->private;
return *peri_id == (unsigned)param;
}
EXPORT_SYMBOL(pl330_filter);
+static struct dma_chan *of_dma_pl330_xlate(struct of_phandle_args *dma_spec,
+ struct of_dma *ofdma)
+{
+ int count = dma_spec->args_count;
+ struct dma_pl330_dmac *pdmac = ofdma->of_dma_data;
+ struct dma_pl330_filter_args fargs;
+ dma_cap_mask_t cap;
+
+ if (!pdmac)
+ return NULL;
+
+ if (count != 1)
+ return NULL;
+
+ fargs.pdmac = pdmac;
+ fargs.chan_id = dma_spec->args[0];
+
+ dma_cap_zero(cap);
+ dma_cap_set(DMA_SLAVE, cap);
+ dma_cap_set(DMA_CYCLIC, cap);
+
+ return dma_request_channel(cap, pl330_dt_filter, &fargs);
+}
+
static int pl330_alloc_chan_resources(struct dma_chan *chan)
{
struct dma_pl330_chan *pch = to_pchan(chan);
@@ -2866,7 +2892,7 @@ pl330_probe(struct amba_device *adev, const struct amba_id *id)
pdat = adev->dev.platform_data;
/* Allocate a new DMAC and its Channels */
- pdmac = kzalloc(sizeof(*pdmac), GFP_KERNEL);
+ pdmac = devm_kzalloc(&adev->dev, sizeof(*pdmac), GFP_KERNEL);
if (!pdmac) {
dev_err(&adev->dev, "unable to allocate mem\n");
return -ENOMEM;
@@ -2878,13 +2904,9 @@ pl330_probe(struct amba_device *adev, const struct amba_id *id)
pi->mcbufsz = pdat ? pdat->mcbuf_sz : 0;
res = &adev->res;
- request_mem_region(res->start, resource_size(res), "dma-pl330");
-
- pi->base = ioremap(res->start, resource_size(res));
- if (!pi->base) {
- ret = -ENXIO;
- goto probe_err1;
- }
+ pi->base = devm_request_and_ioremap(&adev->dev, res);
+ if (!pi->base)
+ return -ENXIO;
amba_set_drvdata(adev, pdmac);
@@ -2892,11 +2914,11 @@ pl330_probe(struct amba_device *adev, const struct amba_id *id)
ret = request_irq(irq, pl330_irq_handler, 0,
dev_name(&adev->dev), pi);
if (ret)
- goto probe_err2;
+ return ret;
ret = pl330_add(pi);
if (ret)
- goto probe_err3;
+ goto probe_err1;
INIT_LIST_HEAD(&pdmac->desc_pool);
spin_lock_init(&pdmac->pool_lock);
@@ -2918,7 +2940,7 @@ pl330_probe(struct amba_device *adev, const struct amba_id *id)
if (!pdmac->peripherals) {
ret = -ENOMEM;
dev_err(&adev->dev, "unable to allocate pdmac->peripherals\n");
- goto probe_err4;
+ goto probe_err2;
}
for (i = 0; i < num_chan; i++) {
@@ -2962,7 +2984,7 @@ pl330_probe(struct amba_device *adev, const struct amba_id *id)
ret = dma_async_device_register(pd);
if (ret) {
dev_err(&adev->dev, "unable to register DMAC\n");
- goto probe_err4;
+ goto probe_err2;
}
dev_info(&adev->dev,
@@ -2973,17 +2995,20 @@ pl330_probe(struct amba_device *adev, const struct amba_id *id)
pi->pcfg.data_bus_width / 8, pi->pcfg.num_chan,
pi->pcfg.num_peri, pi->pcfg.num_events);
+ ret = of_dma_controller_register(adev->dev.of_node,
+ of_dma_pl330_xlate, pdmac);
+ if (ret) {
+ dev_err(&adev->dev,
+ "unable to register DMA to the generic DT DMA helpers\n");
+ goto probe_err2;
+ }
+
return 0;
-probe_err4:
- pl330_del(pi);
-probe_err3:
- free_irq(irq, pi);
probe_err2:
- iounmap(pi->base);
+ pl330_del(pi);
probe_err1:
- release_mem_region(res->start, resource_size(res));
- kfree(pdmac);
+ free_irq(irq, pi);
return ret;
}
@@ -2993,12 +3018,13 @@ static int pl330_remove(struct amba_device *adev)
struct dma_pl330_dmac *pdmac = amba_get_drvdata(adev);
struct dma_pl330_chan *pch, *_p;
struct pl330_info *pi;
- struct resource *res;
int irq;
if (!pdmac)
return 0;
+ of_dma_controller_free(adev->dev.of_node);
+
amba_set_drvdata(adev, NULL);
/* Idle the DMAC */
@@ -3020,13 +3046,6 @@ static int pl330_remove(struct amba_device *adev)
irq = adev->irq[0];
free_irq(irq, pi);
- iounmap(pi->base);
-
- res = &adev->res;
- release_mem_region(res->start, resource_size(res));
-
- kfree(pdmac);
-
return 0;
}
diff --git a/drivers/dma/sh/shdma-base.c b/drivers/dma/sh/shdma-base.c
index f4cd946d259d..4acb85a10250 100644
--- a/drivers/dma/sh/shdma-base.c
+++ b/drivers/dma/sh/shdma-base.c
@@ -638,9 +638,6 @@ static int shdma_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd,
unsigned long flags;
int ret;
- if (!chan)
- return -EINVAL;
-
switch (cmd) {
case DMA_TERMINATE_ALL:
spin_lock_irqsave(&schan->chan_lock, flags);
diff --git a/drivers/dma/sh/shdma.c b/drivers/dma/sh/shdma.c
index 3315e4be9b85..b70709b030d8 100644
--- a/drivers/dma/sh/shdma.c
+++ b/drivers/dma/sh/shdma.c
@@ -326,7 +326,7 @@ static int sh_dmae_set_slave(struct shdma_chan *schan,
shdma_chan);
const struct sh_dmae_slave_config *cfg = dmae_find_slave(sh_chan, slave_id);
if (!cfg)
- return -ENODEV;
+ return -ENXIO;
if (!try)
sh_chan->config = cfg;
diff --git a/drivers/dma/sirf-dma.c b/drivers/dma/sirf-dma.c
index 94674a96c646..1d627e2391f4 100644
--- a/drivers/dma/sirf-dma.c
+++ b/drivers/dma/sirf-dma.c
@@ -32,7 +32,9 @@
#define SIRFSOC_DMA_CH_VALID 0x140
#define SIRFSOC_DMA_CH_INT 0x144
#define SIRFSOC_DMA_INT_EN 0x148
+#define SIRFSOC_DMA_INT_EN_CLR 0x14C
#define SIRFSOC_DMA_CH_LOOP_CTRL 0x150
+#define SIRFSOC_DMA_CH_LOOP_CTRL_CLR 0x15C
#define SIRFSOC_DMA_MODE_CTRL_BIT 4
#define SIRFSOC_DMA_DIR_CTRL_BIT 5
@@ -76,6 +78,7 @@ struct sirfsoc_dma {
struct sirfsoc_dma_chan channels[SIRFSOC_DMA_CHANNELS];
void __iomem *base;
int irq;
+ bool is_marco;
};
#define DRV_NAME "sirfsoc_dma"
@@ -288,17 +291,67 @@ static int sirfsoc_dma_terminate_all(struct sirfsoc_dma_chan *schan)
int cid = schan->chan.chan_id;
unsigned long flags;
- writel_relaxed(readl_relaxed(sdma->base + SIRFSOC_DMA_INT_EN) &
- ~(1 << cid), sdma->base + SIRFSOC_DMA_INT_EN);
- writel_relaxed(1 << cid, sdma->base + SIRFSOC_DMA_CH_VALID);
+ spin_lock_irqsave(&schan->lock, flags);
- writel_relaxed(readl_relaxed(sdma->base + SIRFSOC_DMA_CH_LOOP_CTRL)
- & ~((1 << cid) | 1 << (cid + 16)),
+ if (!sdma->is_marco) {
+ writel_relaxed(readl_relaxed(sdma->base + SIRFSOC_DMA_INT_EN) &
+ ~(1 << cid), sdma->base + SIRFSOC_DMA_INT_EN);
+ writel_relaxed(readl_relaxed(sdma->base + SIRFSOC_DMA_CH_LOOP_CTRL)
+ & ~((1 << cid) | 1 << (cid + 16)),
sdma->base + SIRFSOC_DMA_CH_LOOP_CTRL);
+ } else {
+ writel_relaxed(1 << cid, sdma->base + SIRFSOC_DMA_INT_EN_CLR);
+ writel_relaxed((1 << cid) | 1 << (cid + 16),
+ sdma->base + SIRFSOC_DMA_CH_LOOP_CTRL_CLR);
+ }
+
+ writel_relaxed(1 << cid, sdma->base + SIRFSOC_DMA_CH_VALID);
- spin_lock_irqsave(&schan->lock, flags);
list_splice_tail_init(&schan->active, &schan->free);
list_splice_tail_init(&schan->queued, &schan->free);
+
+ spin_unlock_irqrestore(&schan->lock, flags);
+
+ return 0;
+}
+
+static int sirfsoc_dma_pause_chan(struct sirfsoc_dma_chan *schan)
+{
+ struct sirfsoc_dma *sdma = dma_chan_to_sirfsoc_dma(&schan->chan);
+ int cid = schan->chan.chan_id;
+ unsigned long flags;
+
+ spin_lock_irqsave(&schan->lock, flags);
+
+ if (!sdma->is_marco)
+ writel_relaxed(readl_relaxed(sdma->base + SIRFSOC_DMA_CH_LOOP_CTRL)
+ & ~((1 << cid) | 1 << (cid + 16)),
+ sdma->base + SIRFSOC_DMA_CH_LOOP_CTRL);
+ else
+ writel_relaxed((1 << cid) | 1 << (cid + 16),
+ sdma->base + SIRFSOC_DMA_CH_LOOP_CTRL_CLR);
+
+ spin_unlock_irqrestore(&schan->lock, flags);
+
+ return 0;
+}
+
+static int sirfsoc_dma_resume_chan(struct sirfsoc_dma_chan *schan)
+{
+ struct sirfsoc_dma *sdma = dma_chan_to_sirfsoc_dma(&schan->chan);
+ int cid = schan->chan.chan_id;
+ unsigned long flags;
+
+ spin_lock_irqsave(&schan->lock, flags);
+
+ if (!sdma->is_marco)
+ writel_relaxed(readl_relaxed(sdma->base + SIRFSOC_DMA_CH_LOOP_CTRL)
+ | ((1 << cid) | 1 << (cid + 16)),
+ sdma->base + SIRFSOC_DMA_CH_LOOP_CTRL);
+ else
+ writel_relaxed((1 << cid) | 1 << (cid + 16),
+ sdma->base + SIRFSOC_DMA_CH_LOOP_CTRL);
+
spin_unlock_irqrestore(&schan->lock, flags);
return 0;
@@ -311,6 +364,10 @@ static int sirfsoc_dma_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd,
struct sirfsoc_dma_chan *schan = dma_chan_to_sirfsoc_dma_chan(chan);
switch (cmd) {
+ case DMA_PAUSE:
+ return sirfsoc_dma_pause_chan(schan);
+ case DMA_RESUME:
+ return sirfsoc_dma_resume_chan(schan);
case DMA_TERMINATE_ALL:
return sirfsoc_dma_terminate_all(schan);
case DMA_SLAVE_CONFIG:
@@ -568,6 +625,9 @@ static int sirfsoc_dma_probe(struct platform_device *op)
return -ENOMEM;
}
+ if (of_device_is_compatible(dn, "sirf,marco-dmac"))
+ sdma->is_marco = true;
+
if (of_property_read_u32(dn, "cell-index", &id)) {
dev_err(dev, "Fail to get DMAC index\n");
return -ENODEV;
@@ -668,6 +728,7 @@ static int sirfsoc_dma_remove(struct platform_device *op)
static struct of_device_id sirfsoc_dma_match[] = {
{ .compatible = "sirf,prima2-dmac", },
+ { .compatible = "sirf,marco-dmac", },
{},
};
diff --git a/drivers/dma/ste_dma40.c b/drivers/dma/ste_dma40.c
index 23c5573e62dd..1734feec47b1 100644
--- a/drivers/dma/ste_dma40.c
+++ b/drivers/dma/ste_dma40.c
@@ -53,6 +53,8 @@
#define D40_ALLOC_PHY (1 << 30)
#define D40_ALLOC_LOG_FREE 0
+#define MAX(a, b) (((a) < (b)) ? (b) : (a))
+
/**
* enum 40_command - The different commands and/or statuses.
*
@@ -100,8 +102,19 @@ static u32 d40_backup_regs[] = {
#define BACKUP_REGS_SZ ARRAY_SIZE(d40_backup_regs)
-/* TODO: Check if all these registers have to be saved/restored on dma40 v3 */
-static u32 d40_backup_regs_v3[] = {
+/*
+ * since 9540 and 8540 has the same HW revision
+ * use v4a for 9540 or ealier
+ * use v4b for 8540 or later
+ * HW revision:
+ * DB8500ed has revision 0
+ * DB8500v1 has revision 2
+ * DB8500v2 has revision 3
+ * AP9540v1 has revision 4
+ * DB8540v1 has revision 4
+ * TODO: Check if all these registers have to be saved/restored on dma40 v4a
+ */
+static u32 d40_backup_regs_v4a[] = {
D40_DREG_PSEG1,
D40_DREG_PSEG2,
D40_DREG_PSEG3,
@@ -120,7 +133,32 @@ static u32 d40_backup_regs_v3[] = {
D40_DREG_RCEG4,
};
-#define BACKUP_REGS_SZ_V3 ARRAY_SIZE(d40_backup_regs_v3)
+#define BACKUP_REGS_SZ_V4A ARRAY_SIZE(d40_backup_regs_v4a)
+
+static u32 d40_backup_regs_v4b[] = {
+ D40_DREG_CPSEG1,
+ D40_DREG_CPSEG2,
+ D40_DREG_CPSEG3,
+ D40_DREG_CPSEG4,
+ D40_DREG_CPSEG5,
+ D40_DREG_CPCEG1,
+ D40_DREG_CPCEG2,
+ D40_DREG_CPCEG3,
+ D40_DREG_CPCEG4,
+ D40_DREG_CPCEG5,
+ D40_DREG_CRSEG1,
+ D40_DREG_CRSEG2,
+ D40_DREG_CRSEG3,
+ D40_DREG_CRSEG4,
+ D40_DREG_CRSEG5,
+ D40_DREG_CRCEG1,
+ D40_DREG_CRCEG2,
+ D40_DREG_CRCEG3,
+ D40_DREG_CRCEG4,
+ D40_DREG_CRCEG5,
+};
+
+#define BACKUP_REGS_SZ_V4B ARRAY_SIZE(d40_backup_regs_v4b)
static u32 d40_backup_regs_chan[] = {
D40_CHAN_REG_SSCFG,
@@ -134,6 +172,102 @@ static u32 d40_backup_regs_chan[] = {
};
/**
+ * struct d40_interrupt_lookup - lookup table for interrupt handler
+ *
+ * @src: Interrupt mask register.
+ * @clr: Interrupt clear register.
+ * @is_error: true if this is an error interrupt.
+ * @offset: start delta in the lookup_log_chans in d40_base. If equals to
+ * D40_PHY_CHAN, the lookup_phy_chans shall be used instead.
+ */
+struct d40_interrupt_lookup {
+ u32 src;
+ u32 clr;
+ bool is_error;
+ int offset;
+};
+
+
+static struct d40_interrupt_lookup il_v4a[] = {
+ {D40_DREG_LCTIS0, D40_DREG_LCICR0, false, 0},
+ {D40_DREG_LCTIS1, D40_DREG_LCICR1, false, 32},
+ {D40_DREG_LCTIS2, D40_DREG_LCICR2, false, 64},
+ {D40_DREG_LCTIS3, D40_DREG_LCICR3, false, 96},
+ {D40_DREG_LCEIS0, D40_DREG_LCICR0, true, 0},
+ {D40_DREG_LCEIS1, D40_DREG_LCICR1, true, 32},
+ {D40_DREG_LCEIS2, D40_DREG_LCICR2, true, 64},
+ {D40_DREG_LCEIS3, D40_DREG_LCICR3, true, 96},
+ {D40_DREG_PCTIS, D40_DREG_PCICR, false, D40_PHY_CHAN},
+ {D40_DREG_PCEIS, D40_DREG_PCICR, true, D40_PHY_CHAN},
+};
+
+static struct d40_interrupt_lookup il_v4b[] = {
+ {D40_DREG_CLCTIS1, D40_DREG_CLCICR1, false, 0},
+ {D40_DREG_CLCTIS2, D40_DREG_CLCICR2, false, 32},
+ {D40_DREG_CLCTIS3, D40_DREG_CLCICR3, false, 64},
+ {D40_DREG_CLCTIS4, D40_DREG_CLCICR4, false, 96},
+ {D40_DREG_CLCTIS5, D40_DREG_CLCICR5, false, 128},
+ {D40_DREG_CLCEIS1, D40_DREG_CLCICR1, true, 0},
+ {D40_DREG_CLCEIS2, D40_DREG_CLCICR2, true, 32},
+ {D40_DREG_CLCEIS3, D40_DREG_CLCICR3, true, 64},
+ {D40_DREG_CLCEIS4, D40_DREG_CLCICR4, true, 96},
+ {D40_DREG_CLCEIS5, D40_DREG_CLCICR5, true, 128},
+ {D40_DREG_CPCTIS, D40_DREG_CPCICR, false, D40_PHY_CHAN},
+ {D40_DREG_CPCEIS, D40_DREG_CPCICR, true, D40_PHY_CHAN},
+};
+
+/**
+ * struct d40_reg_val - simple lookup struct
+ *
+ * @reg: The register.
+ * @val: The value that belongs to the register in reg.
+ */
+struct d40_reg_val {
+ unsigned int reg;
+ unsigned int val;
+};
+
+static __initdata struct d40_reg_val dma_init_reg_v4a[] = {
+ /* Clock every part of the DMA block from start */
+ { .reg = D40_DREG_GCC, .val = D40_DREG_GCC_ENABLE_ALL},
+
+ /* Interrupts on all logical channels */
+ { .reg = D40_DREG_LCMIS0, .val = 0xFFFFFFFF},
+ { .reg = D40_DREG_LCMIS1, .val = 0xFFFFFFFF},
+ { .reg = D40_DREG_LCMIS2, .val = 0xFFFFFFFF},
+ { .reg = D40_DREG_LCMIS3, .val = 0xFFFFFFFF},
+ { .reg = D40_DREG_LCICR0, .val = 0xFFFFFFFF},
+ { .reg = D40_DREG_LCICR1, .val = 0xFFFFFFFF},
+ { .reg = D40_DREG_LCICR2, .val = 0xFFFFFFFF},
+ { .reg = D40_DREG_LCICR3, .val = 0xFFFFFFFF},
+ { .reg = D40_DREG_LCTIS0, .val = 0xFFFFFFFF},
+ { .reg = D40_DREG_LCTIS1, .val = 0xFFFFFFFF},
+ { .reg = D40_DREG_LCTIS2, .val = 0xFFFFFFFF},
+ { .reg = D40_DREG_LCTIS3, .val = 0xFFFFFFFF}
+};
+static __initdata struct d40_reg_val dma_init_reg_v4b[] = {
+ /* Clock every part of the DMA block from start */
+ { .reg = D40_DREG_GCC, .val = D40_DREG_GCC_ENABLE_ALL},
+
+ /* Interrupts on all logical channels */
+ { .reg = D40_DREG_CLCMIS1, .val = 0xFFFFFFFF},
+ { .reg = D40_DREG_CLCMIS2, .val = 0xFFFFFFFF},
+ { .reg = D40_DREG_CLCMIS3, .val = 0xFFFFFFFF},
+ { .reg = D40_DREG_CLCMIS4, .val = 0xFFFFFFFF},
+ { .reg = D40_DREG_CLCMIS5, .val = 0xFFFFFFFF},
+ { .reg = D40_DREG_CLCICR1, .val = 0xFFFFFFFF},
+ { .reg = D40_DREG_CLCICR2, .val = 0xFFFFFFFF},
+ { .reg = D40_DREG_CLCICR3, .val = 0xFFFFFFFF},
+ { .reg = D40_DREG_CLCICR4, .val = 0xFFFFFFFF},
+ { .reg = D40_DREG_CLCICR5, .val = 0xFFFFFFFF},
+ { .reg = D40_DREG_CLCTIS1, .val = 0xFFFFFFFF},
+ { .reg = D40_DREG_CLCTIS2, .val = 0xFFFFFFFF},
+ { .reg = D40_DREG_CLCTIS3, .val = 0xFFFFFFFF},
+ { .reg = D40_DREG_CLCTIS4, .val = 0xFFFFFFFF},
+ { .reg = D40_DREG_CLCTIS5, .val = 0xFFFFFFFF}
+};
+
+/**
* struct d40_lli_pool - Structure for keeping LLIs in memory
*
* @base: Pointer to memory area when the pre_alloc_lli's are not large
@@ -221,6 +355,7 @@ struct d40_lcla_pool {
* @allocated_dst: Same as for src but is dst.
* allocated_dst and allocated_src uses the D40_ALLOC* defines as well as
* event line number.
+ * @use_soft_lli: To mark if the linked lists of channel are managed by SW.
*/
struct d40_phy_res {
spinlock_t lock;
@@ -228,6 +363,7 @@ struct d40_phy_res {
int num;
u32 allocated_src;
u32 allocated_dst;
+ bool use_soft_lli;
};
struct d40_base;
@@ -248,6 +384,7 @@ struct d40_base;
* @client: Cliented owned descriptor list.
* @pending_queue: Submitted jobs, to be issued by issue_pending()
* @active: Active descriptor.
+ * @done: Completed jobs
* @queue: Queued jobs.
* @prepare_queue: Prepared jobs.
* @dma_cfg: The client configuration of this dma channel.
@@ -273,6 +410,7 @@ struct d40_chan {
struct list_head client;
struct list_head pending_queue;
struct list_head active;
+ struct list_head done;
struct list_head queue;
struct list_head prepare_queue;
struct stedma40_chan_cfg dma_cfg;
@@ -289,6 +427,38 @@ struct d40_chan {
};
/**
+ * struct d40_gen_dmac - generic values to represent u8500/u8540 DMA
+ * controller
+ *
+ * @backup: the pointer to the registers address array for backup
+ * @backup_size: the size of the registers address array for backup
+ * @realtime_en: the realtime enable register
+ * @realtime_clear: the realtime clear register
+ * @high_prio_en: the high priority enable register
+ * @high_prio_clear: the high priority clear register
+ * @interrupt_en: the interrupt enable register
+ * @interrupt_clear: the interrupt clear register
+ * @il: the pointer to struct d40_interrupt_lookup
+ * @il_size: the size of d40_interrupt_lookup array
+ * @init_reg: the pointer to the struct d40_reg_val
+ * @init_reg_size: the size of d40_reg_val array
+ */
+struct d40_gen_dmac {
+ u32 *backup;
+ u32 backup_size;
+ u32 realtime_en;
+ u32 realtime_clear;
+ u32 high_prio_en;
+ u32 high_prio_clear;
+ u32 interrupt_en;
+ u32 interrupt_clear;
+ struct d40_interrupt_lookup *il;
+ u32 il_size;
+ struct d40_reg_val *init_reg;
+ u32 init_reg_size;
+};
+
+/**
* struct d40_base - The big global struct, one for each probe'd instance.
*
* @interrupt_lock: Lock used to make sure one interrupt is handle a time.
@@ -326,11 +496,13 @@ struct d40_chan {
* @desc_slab: cache for descriptors.
* @reg_val_backup: Here the values of some hardware registers are stored
* before the DMA is powered off. They are restored when the power is back on.
- * @reg_val_backup_v3: Backup of registers that only exits on dma40 v3 and
- * later.
+ * @reg_val_backup_v4: Backup of registers that only exits on dma40 v3 and
+ * later
* @reg_val_backup_chan: Backup data for standard channel parameter registers.
* @gcc_pwr_off_mask: Mask to maintain the channels that can be turned off.
* @initialized: true if the dma has been initialized
+ * @gen_dmac: the struct for generic registers values to represent u8500/8540
+ * DMA controller
*/
struct d40_base {
spinlock_t interrupt_lock;
@@ -344,6 +516,7 @@ struct d40_base {
int irq;
int num_phy_chans;
int num_log_chans;
+ struct device_dma_parameters dma_parms;
struct dma_device dma_both;
struct dma_device dma_slave;
struct dma_device dma_memcpy;
@@ -361,37 +534,11 @@ struct d40_base {
resource_size_t lcpa_size;
struct kmem_cache *desc_slab;
u32 reg_val_backup[BACKUP_REGS_SZ];
- u32 reg_val_backup_v3[BACKUP_REGS_SZ_V3];
+ u32 reg_val_backup_v4[MAX(BACKUP_REGS_SZ_V4A, BACKUP_REGS_SZ_V4B)];
u32 *reg_val_backup_chan;
u16 gcc_pwr_off_mask;
bool initialized;
-};
-
-/**
- * struct d40_interrupt_lookup - lookup table for interrupt handler
- *
- * @src: Interrupt mask register.
- * @clr: Interrupt clear register.
- * @is_error: true if this is an error interrupt.
- * @offset: start delta in the lookup_log_chans in d40_base. If equals to
- * D40_PHY_CHAN, the lookup_phy_chans shall be used instead.
- */
-struct d40_interrupt_lookup {
- u32 src;
- u32 clr;
- bool is_error;
- int offset;
-};
-
-/**
- * struct d40_reg_val - simple lookup struct
- *
- * @reg: The register.
- * @val: The value that belongs to the register in reg.
- */
-struct d40_reg_val {
- unsigned int reg;
- unsigned int val;
+ struct d40_gen_dmac gen_dmac;
};
static struct device *chan2dev(struct d40_chan *d40c)
@@ -494,19 +641,18 @@ static int d40_lcla_alloc_one(struct d40_chan *d40c,
unsigned long flags;
int i;
int ret = -EINVAL;
- int p;
spin_lock_irqsave(&d40c->base->lcla_pool.lock, flags);
- p = d40c->phy_chan->num * D40_LCLA_LINK_PER_EVENT_GRP;
-
/*
* Allocate both src and dst at the same time, therefore the half
* start on 1 since 0 can't be used since zero is used as end marker.
*/
for (i = 1 ; i < D40_LCLA_LINK_PER_EVENT_GRP / 2; i++) {
- if (!d40c->base->lcla_pool.alloc_map[p + i]) {
- d40c->base->lcla_pool.alloc_map[p + i] = d40d;
+ int idx = d40c->phy_chan->num * D40_LCLA_LINK_PER_EVENT_GRP + i;
+
+ if (!d40c->base->lcla_pool.alloc_map[idx]) {
+ d40c->base->lcla_pool.alloc_map[idx] = d40d;
d40d->lcla_alloc++;
ret = i;
break;
@@ -531,10 +677,10 @@ static int d40_lcla_free_all(struct d40_chan *d40c,
spin_lock_irqsave(&d40c->base->lcla_pool.lock, flags);
for (i = 1 ; i < D40_LCLA_LINK_PER_EVENT_GRP / 2; i++) {
- if (d40c->base->lcla_pool.alloc_map[d40c->phy_chan->num *
- D40_LCLA_LINK_PER_EVENT_GRP + i] == d40d) {
- d40c->base->lcla_pool.alloc_map[d40c->phy_chan->num *
- D40_LCLA_LINK_PER_EVENT_GRP + i] = NULL;
+ int idx = d40c->phy_chan->num * D40_LCLA_LINK_PER_EVENT_GRP + i;
+
+ if (d40c->base->lcla_pool.alloc_map[idx] == d40d) {
+ d40c->base->lcla_pool.alloc_map[idx] = NULL;
d40d->lcla_alloc--;
if (d40d->lcla_alloc == 0) {
ret = 0;
@@ -611,6 +757,11 @@ static void d40_phy_lli_load(struct d40_chan *chan, struct d40_desc *desc)
writel(lli_dst->reg_lnk, base + D40_CHAN_REG_SDLNK);
}
+static void d40_desc_done(struct d40_chan *d40c, struct d40_desc *desc)
+{
+ list_add_tail(&desc->node, &d40c->done);
+}
+
static void d40_log_lli_to_lcxa(struct d40_chan *chan, struct d40_desc *desc)
{
struct d40_lcla_pool *pool = &chan->base->lcla_pool;
@@ -634,7 +785,16 @@ static void d40_log_lli_to_lcxa(struct d40_chan *chan, struct d40_desc *desc)
* can't link back to the one in LCPA space
*/
if (linkback || (lli_len - lli_current > 1)) {
- curr_lcla = d40_lcla_alloc_one(chan, desc);
+ /*
+ * If the channel is expected to use only soft_lli don't
+ * allocate a lcla. This is to avoid a HW issue that exists
+ * in some controller during a peripheral to memory transfer
+ * that uses linked lists.
+ */
+ if (!(chan->phy_chan->use_soft_lli &&
+ chan->dma_cfg.dir == STEDMA40_PERIPH_TO_MEM))
+ curr_lcla = d40_lcla_alloc_one(chan, desc);
+
first_lcla = curr_lcla;
}
@@ -771,6 +931,14 @@ static struct d40_desc *d40_first_queued(struct d40_chan *d40c)
return d;
}
+static struct d40_desc *d40_first_done(struct d40_chan *d40c)
+{
+ if (list_empty(&d40c->done))
+ return NULL;
+
+ return list_first_entry(&d40c->done, struct d40_desc, node);
+}
+
static int d40_psize_2_burst_size(bool is_log, int psize)
{
if (is_log) {
@@ -874,11 +1042,11 @@ static void d40_save_restore_registers(struct d40_base *base, bool save)
save);
/* Save/Restore registers only existing on dma40 v3 and later */
- if (base->rev >= 3)
- dma40_backup(base->virtbase, base->reg_val_backup_v3,
- d40_backup_regs_v3,
- ARRAY_SIZE(d40_backup_regs_v3),
- save);
+ if (base->gen_dmac.backup)
+ dma40_backup(base->virtbase, base->reg_val_backup_v4,
+ base->gen_dmac.backup,
+ base->gen_dmac.backup_size,
+ save);
}
#else
static void d40_save_restore_registers(struct d40_base *base, bool save)
@@ -961,6 +1129,12 @@ static void d40_term_all(struct d40_chan *d40c)
struct d40_desc *d40d;
struct d40_desc *_d;
+ /* Release completed descriptors */
+ while ((d40d = d40_first_done(d40c))) {
+ d40_desc_remove(d40d);
+ d40_desc_free(d40c, d40d);
+ }
+
/* Release active descriptors */
while ((d40d = d40_first_active_get(d40c))) {
d40_desc_remove(d40d);
@@ -1396,6 +1570,9 @@ static void dma_tc_handle(struct d40_chan *d40c)
d40c->busy = false;
pm_runtime_mark_last_busy(d40c->base->dev);
pm_runtime_put_autosuspend(d40c->base->dev);
+
+ d40_desc_remove(d40d);
+ d40_desc_done(d40c, d40d);
}
d40c->pending_tx++;
@@ -1413,10 +1590,14 @@ static void dma_tasklet(unsigned long data)
spin_lock_irqsave(&d40c->lock, flags);
- /* Get first active entry from list */
- d40d = d40_first_active_get(d40c);
- if (d40d == NULL)
- goto err;
+ /* Get first entry from the done list */
+ d40d = d40_first_done(d40c);
+ if (d40d == NULL) {
+ /* Check if we have reached here for cyclic job */
+ d40d = d40_first_active_get(d40c);
+ if (d40d == NULL || !d40d->cyclic)
+ goto err;
+ }
if (!d40d->cyclic)
dma_cookie_complete(&d40d->txd);
@@ -1438,13 +1619,11 @@ static void dma_tasklet(unsigned long data)
if (async_tx_test_ack(&d40d->txd)) {
d40_desc_remove(d40d);
d40_desc_free(d40c, d40d);
- } else {
- if (!d40d->is_in_client_list) {
- d40_desc_remove(d40d);
- d40_lcla_free_all(d40c, d40d);
- list_add_tail(&d40d->node, &d40c->client);
- d40d->is_in_client_list = true;
- }
+ } else if (!d40d->is_in_client_list) {
+ d40_desc_remove(d40d);
+ d40_lcla_free_all(d40c, d40d);
+ list_add_tail(&d40d->node, &d40c->client);
+ d40d->is_in_client_list = true;
}
}
@@ -1469,53 +1648,51 @@ err:
static irqreturn_t d40_handle_interrupt(int irq, void *data)
{
- static const struct d40_interrupt_lookup il[] = {
- {D40_DREG_LCTIS0, D40_DREG_LCICR0, false, 0},
- {D40_DREG_LCTIS1, D40_DREG_LCICR1, false, 32},
- {D40_DREG_LCTIS2, D40_DREG_LCICR2, false, 64},
- {D40_DREG_LCTIS3, D40_DREG_LCICR3, false, 96},
- {D40_DREG_LCEIS0, D40_DREG_LCICR0, true, 0},
- {D40_DREG_LCEIS1, D40_DREG_LCICR1, true, 32},
- {D40_DREG_LCEIS2, D40_DREG_LCICR2, true, 64},
- {D40_DREG_LCEIS3, D40_DREG_LCICR3, true, 96},
- {D40_DREG_PCTIS, D40_DREG_PCICR, false, D40_PHY_CHAN},
- {D40_DREG_PCEIS, D40_DREG_PCICR, true, D40_PHY_CHAN},
- };
-
int i;
- u32 regs[ARRAY_SIZE(il)];
u32 idx;
u32 row;
long chan = -1;
struct d40_chan *d40c;
unsigned long flags;
struct d40_base *base = data;
+ u32 regs[base->gen_dmac.il_size];
+ struct d40_interrupt_lookup *il = base->gen_dmac.il;
+ u32 il_size = base->gen_dmac.il_size;
spin_lock_irqsave(&base->interrupt_lock, flags);
/* Read interrupt status of both logical and physical channels */
- for (i = 0; i < ARRAY_SIZE(il); i++)
+ for (i = 0; i < il_size; i++)
regs[i] = readl(base->virtbase + il[i].src);
for (;;) {
chan = find_next_bit((unsigned long *)regs,
- BITS_PER_LONG * ARRAY_SIZE(il), chan + 1);
+ BITS_PER_LONG * il_size, chan + 1);
/* No more set bits found? */
- if (chan == BITS_PER_LONG * ARRAY_SIZE(il))
+ if (chan == BITS_PER_LONG * il_size)
break;
row = chan / BITS_PER_LONG;
idx = chan & (BITS_PER_LONG - 1);
- /* ACK interrupt */
- writel(1 << idx, base->virtbase + il[row].clr);
-
if (il[row].offset == D40_PHY_CHAN)
d40c = base->lookup_phy_chans[idx];
else
d40c = base->lookup_log_chans[il[row].offset + idx];
+
+ if (!d40c) {
+ /*
+ * No error because this can happen if something else
+ * in the system is using the channel.
+ */
+ continue;
+ }
+
+ /* ACK interrupt */
+ writel(1 << idx, base->virtbase + il[row].clr);
+
spin_lock(&d40c->lock);
if (!il[row].is_error)
@@ -1710,10 +1887,12 @@ static int d40_allocate_channel(struct d40_chan *d40c, bool *first_phy_user)
int i;
int j;
int log_num;
+ int num_phy_chans;
bool is_src;
bool is_log = d40c->dma_cfg.mode == STEDMA40_MODE_LOGICAL;
phys = d40c->base->phy_res;
+ num_phy_chans = d40c->base->num_phy_chans;
if (d40c->dma_cfg.dir == STEDMA40_PERIPH_TO_MEM) {
dev_type = d40c->dma_cfg.src_dev_type;
@@ -1734,12 +1913,19 @@ static int d40_allocate_channel(struct d40_chan *d40c, bool *first_phy_user)
if (!is_log) {
if (d40c->dma_cfg.dir == STEDMA40_MEM_TO_MEM) {
/* Find physical half channel */
- for (i = 0; i < d40c->base->num_phy_chans; i++) {
-
+ if (d40c->dma_cfg.use_fixed_channel) {
+ i = d40c->dma_cfg.phy_channel;
if (d40_alloc_mask_set(&phys[i], is_src,
0, is_log,
first_phy_user))
goto found_phy;
+ } else {
+ for (i = 0; i < num_phy_chans; i++) {
+ if (d40_alloc_mask_set(&phys[i], is_src,
+ 0, is_log,
+ first_phy_user))
+ goto found_phy;
+ }
}
} else
for (j = 0; j < d40c->base->num_phy_chans; j += 8) {
@@ -1954,7 +2140,6 @@ _exit:
}
-
static u32 stedma40_residue(struct dma_chan *chan)
{
struct d40_chan *d40c =
@@ -2030,7 +2215,6 @@ d40_prep_sg_phy(struct d40_chan *chan, struct d40_desc *desc,
return ret < 0 ? ret : 0;
}
-
static struct d40_desc *
d40_prep_desc(struct d40_chan *chan, struct scatterlist *sg,
unsigned int sg_len, unsigned long dma_flags)
@@ -2056,7 +2240,6 @@ d40_prep_desc(struct d40_chan *chan, struct scatterlist *sg,
goto err;
}
-
desc->lli_current = 0;
desc->txd.flags = dma_flags;
desc->txd.tx_submit = d40_tx_submit;
@@ -2105,7 +2288,6 @@ d40_prep_sg(struct dma_chan *dchan, struct scatterlist *sg_src,
return NULL;
}
-
spin_lock_irqsave(&chan->lock, flags);
desc = d40_prep_desc(chan, sg_src, sg_len, dma_flags);
@@ -2179,11 +2361,26 @@ static void __d40_set_prio_rt(struct d40_chan *d40c, int dev_type, bool src)
{
bool realtime = d40c->dma_cfg.realtime;
bool highprio = d40c->dma_cfg.high_priority;
- u32 prioreg = highprio ? D40_DREG_PSEG1 : D40_DREG_PCEG1;
- u32 rtreg = realtime ? D40_DREG_RSEG1 : D40_DREG_RCEG1;
+ u32 rtreg;
u32 event = D40_TYPE_TO_EVENT(dev_type);
u32 group = D40_TYPE_TO_GROUP(dev_type);
u32 bit = 1 << event;
+ u32 prioreg;
+ struct d40_gen_dmac *dmac = &d40c->base->gen_dmac;
+
+ rtreg = realtime ? dmac->realtime_en : dmac->realtime_clear;
+ /*
+ * Due to a hardware bug, in some cases a logical channel triggered by
+ * a high priority destination event line can generate extra packet
+ * transactions.
+ *
+ * The workaround is to not set the high priority level for the
+ * destination event lines that trigger logical channels.
+ */
+ if (!src && chan_is_logical(d40c))
+ highprio = false;
+
+ prioreg = highprio ? dmac->high_prio_en : dmac->high_prio_clear;
/* Destination event lines are stored in the upper halfword */
if (!src)
@@ -2248,11 +2445,11 @@ static int d40_alloc_chan_resources(struct dma_chan *chan)
if (d40c->dma_cfg.dir == STEDMA40_PERIPH_TO_MEM)
d40c->lcpa = d40c->base->lcpa_base +
- d40c->dma_cfg.src_dev_type * D40_LCPA_CHAN_SIZE;
+ d40c->dma_cfg.src_dev_type * D40_LCPA_CHAN_SIZE;
else
d40c->lcpa = d40c->base->lcpa_base +
- d40c->dma_cfg.dst_dev_type *
- D40_LCPA_CHAN_SIZE + D40_LCPA_CHAN_DST_DELTA;
+ d40c->dma_cfg.dst_dev_type *
+ D40_LCPA_CHAN_SIZE + D40_LCPA_CHAN_DST_DELTA;
}
dev_dbg(chan2dev(d40c), "allocated %s channel (phy %d%s)\n",
@@ -2287,7 +2484,6 @@ static void d40_free_chan_resources(struct dma_chan *chan)
return;
}
-
spin_lock_irqsave(&d40c->lock, flags);
err = d40_free_dma(d40c);
@@ -2330,14 +2526,12 @@ d40_prep_memcpy_sg(struct dma_chan *chan,
return d40_prep_sg(chan, src_sg, dst_sg, src_nents, DMA_NONE, dma_flags);
}
-static struct dma_async_tx_descriptor *d40_prep_slave_sg(struct dma_chan *chan,
- struct scatterlist *sgl,
- unsigned int sg_len,
- enum dma_transfer_direction direction,
- unsigned long dma_flags,
- void *context)
+static struct dma_async_tx_descriptor *
+d40_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
+ unsigned int sg_len, enum dma_transfer_direction direction,
+ unsigned long dma_flags, void *context)
{
- if (direction != DMA_DEV_TO_MEM && direction != DMA_MEM_TO_DEV)
+ if (!is_slave_direction(direction))
return NULL;
return d40_prep_sg(chan, sgl, sgl, sg_len, direction, dma_flags);
@@ -2577,6 +2771,14 @@ static int d40_set_runtime_config(struct dma_chan *chan,
return -EINVAL;
}
+ if (src_maxburst > 16) {
+ src_maxburst = 16;
+ dst_maxburst = src_maxburst * src_addr_width / dst_addr_width;
+ } else if (dst_maxburst > 16) {
+ dst_maxburst = 16;
+ src_maxburst = dst_maxburst * dst_addr_width / src_addr_width;
+ }
+
ret = dma40_config_to_halfchannel(d40c, &cfg->src_info,
src_addr_width,
src_maxburst);
@@ -2659,6 +2861,7 @@ static void __init d40_chan_init(struct d40_base *base, struct dma_device *dma,
d40c->log_num = D40_PHY_CHAN;
+ INIT_LIST_HEAD(&d40c->done);
INIT_LIST_HEAD(&d40c->active);
INIT_LIST_HEAD(&d40c->queue);
INIT_LIST_HEAD(&d40c->pending_queue);
@@ -2773,8 +2976,6 @@ static int dma40_pm_suspend(struct device *dev)
struct platform_device *pdev = to_platform_device(dev);
struct d40_base *base = platform_get_drvdata(pdev);
int ret = 0;
- if (!pm_runtime_suspended(dev))
- return -EBUSY;
if (base->lcpa_regulator)
ret = regulator_disable(base->lcpa_regulator);
@@ -2882,6 +3083,13 @@ static int __init d40_phy_res_init(struct d40_base *base)
num_phy_chans_avail--;
}
+ /* Mark soft_lli channels */
+ for (i = 0; i < base->plat_data->num_of_soft_lli_chans; i++) {
+ int chan = base->plat_data->soft_lli_chans[i];
+
+ base->phy_res[chan].use_soft_lli = true;
+ }
+
dev_info(base->dev, "%d of %d physical DMA channels available\n",
num_phy_chans_avail, base->num_phy_chans);
@@ -2975,14 +3183,21 @@ static struct d40_base * __init d40_hw_detect_init(struct platform_device *pdev)
* ? has revision 1
* DB8500v1 has revision 2
* DB8500v2 has revision 3
+ * AP9540v1 has revision 4
+ * DB8540v1 has revision 4
*/
rev = AMBA_REV_BITS(pid);
+ plat_data = pdev->dev.platform_data;
+
/* The number of physical channels on this HW */
- num_phy_chans = 4 * (readl(virtbase + D40_DREG_ICFG) & 0x7) + 4;
+ if (plat_data->num_of_phy_chans)
+ num_phy_chans = plat_data->num_of_phy_chans;
+ else
+ num_phy_chans = 4 * (readl(virtbase + D40_DREG_ICFG) & 0x7) + 4;
- dev_info(&pdev->dev, "hardware revision: %d @ 0x%x\n",
- rev, res->start);
+ dev_info(&pdev->dev, "hardware revision: %d @ 0x%x with %d physical channels\n",
+ rev, res->start, num_phy_chans);
if (rev < 2) {
d40_err(&pdev->dev, "hardware revision: %d is not supported",
@@ -2990,8 +3205,6 @@ static struct d40_base * __init d40_hw_detect_init(struct platform_device *pdev)
goto failure;
}
- plat_data = pdev->dev.platform_data;
-
/* Count the number of logical channels in use */
for (i = 0; i < plat_data->dev_len; i++)
if (plat_data->dev_rx[i] != 0)
@@ -3022,6 +3235,36 @@ static struct d40_base * __init d40_hw_detect_init(struct platform_device *pdev)
base->phy_chans = ((void *)base) + ALIGN(sizeof(struct d40_base), 4);
base->log_chans = &base->phy_chans[num_phy_chans];
+ if (base->plat_data->num_of_phy_chans == 14) {
+ base->gen_dmac.backup = d40_backup_regs_v4b;
+ base->gen_dmac.backup_size = BACKUP_REGS_SZ_V4B;
+ base->gen_dmac.interrupt_en = D40_DREG_CPCMIS;
+ base->gen_dmac.interrupt_clear = D40_DREG_CPCICR;
+ base->gen_dmac.realtime_en = D40_DREG_CRSEG1;
+ base->gen_dmac.realtime_clear = D40_DREG_CRCEG1;
+ base->gen_dmac.high_prio_en = D40_DREG_CPSEG1;
+ base->gen_dmac.high_prio_clear = D40_DREG_CPCEG1;
+ base->gen_dmac.il = il_v4b;
+ base->gen_dmac.il_size = ARRAY_SIZE(il_v4b);
+ base->gen_dmac.init_reg = dma_init_reg_v4b;
+ base->gen_dmac.init_reg_size = ARRAY_SIZE(dma_init_reg_v4b);
+ } else {
+ if (base->rev >= 3) {
+ base->gen_dmac.backup = d40_backup_regs_v4a;
+ base->gen_dmac.backup_size = BACKUP_REGS_SZ_V4A;
+ }
+ base->gen_dmac.interrupt_en = D40_DREG_PCMIS;
+ base->gen_dmac.interrupt_clear = D40_DREG_PCICR;
+ base->gen_dmac.realtime_en = D40_DREG_RSEG1;
+ base->gen_dmac.realtime_clear = D40_DREG_RCEG1;
+ base->gen_dmac.high_prio_en = D40_DREG_PSEG1;
+ base->gen_dmac.high_prio_clear = D40_DREG_PCEG1;
+ base->gen_dmac.il = il_v4a;
+ base->gen_dmac.il_size = ARRAY_SIZE(il_v4a);
+ base->gen_dmac.init_reg = dma_init_reg_v4a;
+ base->gen_dmac.init_reg_size = ARRAY_SIZE(dma_init_reg_v4a);
+ }
+
base->phy_res = kzalloc(num_phy_chans * sizeof(struct d40_phy_res),
GFP_KERNEL);
if (!base->phy_res)
@@ -3093,31 +3336,15 @@ failure:
static void __init d40_hw_init(struct d40_base *base)
{
- static struct d40_reg_val dma_init_reg[] = {
- /* Clock every part of the DMA block from start */
- { .reg = D40_DREG_GCC, .val = D40_DREG_GCC_ENABLE_ALL},
-
- /* Interrupts on all logical channels */
- { .reg = D40_DREG_LCMIS0, .val = 0xFFFFFFFF},
- { .reg = D40_DREG_LCMIS1, .val = 0xFFFFFFFF},
- { .reg = D40_DREG_LCMIS2, .val = 0xFFFFFFFF},
- { .reg = D40_DREG_LCMIS3, .val = 0xFFFFFFFF},
- { .reg = D40_DREG_LCICR0, .val = 0xFFFFFFFF},
- { .reg = D40_DREG_LCICR1, .val = 0xFFFFFFFF},
- { .reg = D40_DREG_LCICR2, .val = 0xFFFFFFFF},
- { .reg = D40_DREG_LCICR3, .val = 0xFFFFFFFF},
- { .reg = D40_DREG_LCTIS0, .val = 0xFFFFFFFF},
- { .reg = D40_DREG_LCTIS1, .val = 0xFFFFFFFF},
- { .reg = D40_DREG_LCTIS2, .val = 0xFFFFFFFF},
- { .reg = D40_DREG_LCTIS3, .val = 0xFFFFFFFF}
- };
int i;
u32 prmseo[2] = {0, 0};
u32 activeo[2] = {0xFFFFFFFF, 0xFFFFFFFF};
u32 pcmis = 0;
u32 pcicr = 0;
+ struct d40_reg_val *dma_init_reg = base->gen_dmac.init_reg;
+ u32 reg_size = base->gen_dmac.init_reg_size;
- for (i = 0; i < ARRAY_SIZE(dma_init_reg); i++)
+ for (i = 0; i < reg_size; i++)
writel(dma_init_reg[i].val,
base->virtbase + dma_init_reg[i].reg);
@@ -3150,11 +3377,14 @@ static void __init d40_hw_init(struct d40_base *base)
writel(activeo[0], base->virtbase + D40_DREG_ACTIVO);
/* Write which interrupt to enable */
- writel(pcmis, base->virtbase + D40_DREG_PCMIS);
+ writel(pcmis, base->virtbase + base->gen_dmac.interrupt_en);
/* Write which interrupt to clear */
- writel(pcicr, base->virtbase + D40_DREG_PCICR);
+ writel(pcicr, base->virtbase + base->gen_dmac.interrupt_clear);
+ /* These are __initdata and cannot be accessed after init */
+ base->gen_dmac.init_reg = NULL;
+ base->gen_dmac.init_reg_size = 0;
}
static int __init d40_lcla_allocate(struct d40_base *base)
@@ -3362,6 +3592,13 @@ static int __init d40_probe(struct platform_device *pdev)
if (err)
goto failure;
+ base->dev->dma_parms = &base->dma_parms;
+ err = dma_set_max_seg_size(base->dev, STEDMA40_MAX_SEG_SIZE);
+ if (err) {
+ d40_err(&pdev->dev, "Failed to set dma max seg size\n");
+ goto failure;
+ }
+
d40_hw_init(base);
dev_info(base->dev, "initialized\n");
@@ -3397,7 +3634,7 @@ failure:
release_mem_region(base->phy_start,
base->phy_size);
if (base->clk) {
- clk_disable(base->clk);
+ clk_disable_unprepare(base->clk);
clk_put(base->clk);
}
diff --git a/drivers/dma/ste_dma40_ll.c b/drivers/dma/ste_dma40_ll.c
index 851ad56e8409..7180e0d41722 100644
--- a/drivers/dma/ste_dma40_ll.c
+++ b/drivers/dma/ste_dma40_ll.c
@@ -102,17 +102,18 @@ void d40_phy_cfg(struct stedma40_chan_cfg *cfg,
src |= cfg->src_info.data_width << D40_SREG_CFG_ESIZE_POS;
dst |= cfg->dst_info.data_width << D40_SREG_CFG_ESIZE_POS;
+ /* Set the priority bit to high for the physical channel */
+ if (cfg->high_priority) {
+ src |= 1 << D40_SREG_CFG_PRI_POS;
+ dst |= 1 << D40_SREG_CFG_PRI_POS;
+ }
+
} else {
/* Logical channel */
dst |= 1 << D40_SREG_CFG_LOG_GIM_POS;
src |= 1 << D40_SREG_CFG_LOG_GIM_POS;
}
- if (cfg->high_priority) {
- src |= 1 << D40_SREG_CFG_PRI_POS;
- dst |= 1 << D40_SREG_CFG_PRI_POS;
- }
-
if (cfg->src_info.big_endian)
src |= 1 << D40_SREG_CFG_LBE_POS;
if (cfg->dst_info.big_endian)
@@ -250,7 +251,7 @@ d40_phy_buf_to_lli(struct d40_phy_lli *lli, dma_addr_t addr, u32 size,
return lli;
- err:
+err:
return NULL;
}
@@ -331,10 +332,10 @@ void d40_log_lli_lcpa_write(struct d40_log_lli_full *lcpa,
{
d40_log_lli_link(lli_dst, lli_src, next, flags);
- writel(lli_src->lcsp02, &lcpa[0].lcsp0);
- writel(lli_src->lcsp13, &lcpa[0].lcsp1);
- writel(lli_dst->lcsp02, &lcpa[0].lcsp2);
- writel(lli_dst->lcsp13, &lcpa[0].lcsp3);
+ writel_relaxed(lli_src->lcsp02, &lcpa[0].lcsp0);
+ writel_relaxed(lli_src->lcsp13, &lcpa[0].lcsp1);
+ writel_relaxed(lli_dst->lcsp02, &lcpa[0].lcsp2);
+ writel_relaxed(lli_dst->lcsp13, &lcpa[0].lcsp3);
}
void d40_log_lli_lcla_write(struct d40_log_lli *lcla,
@@ -344,10 +345,10 @@ void d40_log_lli_lcla_write(struct d40_log_lli *lcla,
{
d40_log_lli_link(lli_dst, lli_src, next, flags);
- writel(lli_src->lcsp02, &lcla[0].lcsp02);
- writel(lli_src->lcsp13, &lcla[0].lcsp13);
- writel(lli_dst->lcsp02, &lcla[1].lcsp02);
- writel(lli_dst->lcsp13, &lcla[1].lcsp13);
+ writel_relaxed(lli_src->lcsp02, &lcla[0].lcsp02);
+ writel_relaxed(lli_src->lcsp13, &lcla[0].lcsp13);
+ writel_relaxed(lli_dst->lcsp02, &lcla[1].lcsp02);
+ writel_relaxed(lli_dst->lcsp13, &lcla[1].lcsp13);
}
static void d40_log_fill_lli(struct d40_log_lli *lli,
diff --git a/drivers/dma/ste_dma40_ll.h b/drivers/dma/ste_dma40_ll.h
index 6d47373f3f58..fdde8ef77542 100644
--- a/drivers/dma/ste_dma40_ll.h
+++ b/drivers/dma/ste_dma40_ll.h
@@ -125,7 +125,7 @@
#define D40_DREG_GCC 0x000
#define D40_DREG_GCC_ENA 0x1
/* This assumes that there are only 4 event groups */
-#define D40_DREG_GCC_ENABLE_ALL 0xff01
+#define D40_DREG_GCC_ENABLE_ALL 0x3ff01
#define D40_DREG_GCC_EVTGRP_POS 8
#define D40_DREG_GCC_SRC 0
#define D40_DREG_GCC_DST 1
@@ -148,14 +148,31 @@
#define D40_DREG_LCPA 0x020
#define D40_DREG_LCLA 0x024
+
+#define D40_DREG_SSEG1 0x030
+#define D40_DREG_SSEG2 0x034
+#define D40_DREG_SSEG3 0x038
+#define D40_DREG_SSEG4 0x03C
+
+#define D40_DREG_SCEG1 0x040
+#define D40_DREG_SCEG2 0x044
+#define D40_DREG_SCEG3 0x048
+#define D40_DREG_SCEG4 0x04C
+
#define D40_DREG_ACTIVE 0x050
#define D40_DREG_ACTIVO 0x054
-#define D40_DREG_FSEB1 0x058
-#define D40_DREG_FSEB2 0x05C
+#define D40_DREG_CIDMOD 0x058
+#define D40_DREG_TCIDV 0x05C
#define D40_DREG_PCMIS 0x060
#define D40_DREG_PCICR 0x064
#define D40_DREG_PCTIS 0x068
#define D40_DREG_PCEIS 0x06C
+
+#define D40_DREG_SPCMIS 0x070
+#define D40_DREG_SPCICR 0x074
+#define D40_DREG_SPCTIS 0x078
+#define D40_DREG_SPCEIS 0x07C
+
#define D40_DREG_LCMIS0 0x080
#define D40_DREG_LCMIS1 0x084
#define D40_DREG_LCMIS2 0x088
@@ -172,6 +189,33 @@
#define D40_DREG_LCEIS1 0x0B4
#define D40_DREG_LCEIS2 0x0B8
#define D40_DREG_LCEIS3 0x0BC
+
+#define D40_DREG_SLCMIS1 0x0C0
+#define D40_DREG_SLCMIS2 0x0C4
+#define D40_DREG_SLCMIS3 0x0C8
+#define D40_DREG_SLCMIS4 0x0CC
+
+#define D40_DREG_SLCICR1 0x0D0
+#define D40_DREG_SLCICR2 0x0D4
+#define D40_DREG_SLCICR3 0x0D8
+#define D40_DREG_SLCICR4 0x0DC
+
+#define D40_DREG_SLCTIS1 0x0E0
+#define D40_DREG_SLCTIS2 0x0E4
+#define D40_DREG_SLCTIS3 0x0E8
+#define D40_DREG_SLCTIS4 0x0EC
+
+#define D40_DREG_SLCEIS1 0x0F0
+#define D40_DREG_SLCEIS2 0x0F4
+#define D40_DREG_SLCEIS3 0x0F8
+#define D40_DREG_SLCEIS4 0x0FC
+
+#define D40_DREG_FSESS1 0x100
+#define D40_DREG_FSESS2 0x104
+
+#define D40_DREG_FSEBS1 0x108
+#define D40_DREG_FSEBS2 0x10C
+
#define D40_DREG_PSEG1 0x110
#define D40_DREG_PSEG2 0x114
#define D40_DREG_PSEG3 0x118
@@ -188,6 +232,86 @@
#define D40_DREG_RCEG2 0x144
#define D40_DREG_RCEG3 0x148
#define D40_DREG_RCEG4 0x14C
+
+#define D40_DREG_PREFOT 0x15C
+#define D40_DREG_EXTCFG 0x160
+
+#define D40_DREG_CPSEG1 0x200
+#define D40_DREG_CPSEG2 0x204
+#define D40_DREG_CPSEG3 0x208
+#define D40_DREG_CPSEG4 0x20C
+#define D40_DREG_CPSEG5 0x210
+
+#define D40_DREG_CPCEG1 0x220
+#define D40_DREG_CPCEG2 0x224
+#define D40_DREG_CPCEG3 0x228
+#define D40_DREG_CPCEG4 0x22C
+#define D40_DREG_CPCEG5 0x230
+
+#define D40_DREG_CRSEG1 0x240
+#define D40_DREG_CRSEG2 0x244
+#define D40_DREG_CRSEG3 0x248
+#define D40_DREG_CRSEG4 0x24C
+#define D40_DREG_CRSEG5 0x250
+
+#define D40_DREG_CRCEG1 0x260
+#define D40_DREG_CRCEG2 0x264
+#define D40_DREG_CRCEG3 0x268
+#define D40_DREG_CRCEG4 0x26C
+#define D40_DREG_CRCEG5 0x270
+
+#define D40_DREG_CFSESS1 0x280
+#define D40_DREG_CFSESS2 0x284
+#define D40_DREG_CFSESS3 0x288
+
+#define D40_DREG_CFSEBS1 0x290
+#define D40_DREG_CFSEBS2 0x294
+#define D40_DREG_CFSEBS3 0x298
+
+#define D40_DREG_CLCMIS1 0x300
+#define D40_DREG_CLCMIS2 0x304
+#define D40_DREG_CLCMIS3 0x308
+#define D40_DREG_CLCMIS4 0x30C
+#define D40_DREG_CLCMIS5 0x310
+
+#define D40_DREG_CLCICR1 0x320
+#define D40_DREG_CLCICR2 0x324
+#define D40_DREG_CLCICR3 0x328
+#define D40_DREG_CLCICR4 0x32C
+#define D40_DREG_CLCICR5 0x330
+
+#define D40_DREG_CLCTIS1 0x340
+#define D40_DREG_CLCTIS2 0x344
+#define D40_DREG_CLCTIS3 0x348
+#define D40_DREG_CLCTIS4 0x34C
+#define D40_DREG_CLCTIS5 0x350
+
+#define D40_DREG_CLCEIS1 0x360
+#define D40_DREG_CLCEIS2 0x364
+#define D40_DREG_CLCEIS3 0x368
+#define D40_DREG_CLCEIS4 0x36C
+#define D40_DREG_CLCEIS5 0x370
+
+#define D40_DREG_CPCMIS 0x380
+#define D40_DREG_CPCICR 0x384
+#define D40_DREG_CPCTIS 0x388
+#define D40_DREG_CPCEIS 0x38C
+
+#define D40_DREG_SCCIDA1 0xE80
+#define D40_DREG_SCCIDA2 0xE90
+#define D40_DREG_SCCIDA3 0xEA0
+#define D40_DREG_SCCIDA4 0xEB0
+#define D40_DREG_SCCIDA5 0xEC0
+
+#define D40_DREG_SCCIDB1 0xE84
+#define D40_DREG_SCCIDB2 0xE94
+#define D40_DREG_SCCIDB3 0xEA4
+#define D40_DREG_SCCIDB4 0xEB4
+#define D40_DREG_SCCIDB5 0xEC4
+
+#define D40_DREG_PRSCCIDA 0xF80
+#define D40_DREG_PRSCCIDB 0xF84
+
#define D40_DREG_STFU 0xFC8
#define D40_DREG_ICFG 0xFCC
#define D40_DREG_PERIPHID0 0xFE0
diff --git a/drivers/dma/tegra20-apb-dma.c b/drivers/dma/tegra20-apb-dma.c
index 58c1896271e1..fcee27eae1f6 100644
--- a/drivers/dma/tegra20-apb-dma.c
+++ b/drivers/dma/tegra20-apb-dma.c
@@ -32,8 +32,8 @@
#include <linux/platform_device.h>
#include <linux/pm_runtime.h>
#include <linux/slab.h>
+#include <linux/clk/tegra.h>
-#include <mach/clk.h>
#include "dmaengine.h"
#define TEGRA_APBDMA_GENERAL 0x0
@@ -63,6 +63,9 @@
#define TEGRA_APBDMA_STATUS_COUNT_SHIFT 2
#define TEGRA_APBDMA_STATUS_COUNT_MASK 0xFFFC
+#define TEGRA_APBDMA_CHAN_CSRE 0x00C
+#define TEGRA_APBDMA_CHAN_CSRE_PAUSE (1 << 31)
+
/* AHB memory address */
#define TEGRA_APBDMA_CHAN_AHBPTR 0x010
@@ -113,10 +116,12 @@ struct tegra_dma;
* tegra_dma_chip_data Tegra chip specific DMA data
* @nr_channels: Number of channels available in the controller.
* @max_dma_count: Maximum DMA transfer count supported by DMA controller.
+ * @support_channel_pause: Support channel wise pause of dma.
*/
struct tegra_dma_chip_data {
int nr_channels;
int max_dma_count;
+ bool support_channel_pause;
};
/* DMA channel registers */
@@ -355,6 +360,32 @@ static void tegra_dma_global_resume(struct tegra_dma_channel *tdc)
spin_unlock(&tdma->global_lock);
}
+static void tegra_dma_pause(struct tegra_dma_channel *tdc,
+ bool wait_for_burst_complete)
+{
+ struct tegra_dma *tdma = tdc->tdma;
+
+ if (tdma->chip_data->support_channel_pause) {
+ tdc_write(tdc, TEGRA_APBDMA_CHAN_CSRE,
+ TEGRA_APBDMA_CHAN_CSRE_PAUSE);
+ if (wait_for_burst_complete)
+ udelay(TEGRA_APBDMA_BURST_COMPLETE_TIME);
+ } else {
+ tegra_dma_global_pause(tdc, wait_for_burst_complete);
+ }
+}
+
+static void tegra_dma_resume(struct tegra_dma_channel *tdc)
+{
+ struct tegra_dma *tdma = tdc->tdma;
+
+ if (tdma->chip_data->support_channel_pause) {
+ tdc_write(tdc, TEGRA_APBDMA_CHAN_CSRE, 0);
+ } else {
+ tegra_dma_global_resume(tdc);
+ }
+}
+
static void tegra_dma_stop(struct tegra_dma_channel *tdc)
{
u32 csr;
@@ -410,7 +441,7 @@ static void tegra_dma_configure_for_next(struct tegra_dma_channel *tdc,
* If there is already IEC status then interrupt handler need to
* load new configuration.
*/
- tegra_dma_global_pause(tdc, false);
+ tegra_dma_pause(tdc, false);
status = tdc_read(tdc, TEGRA_APBDMA_CHAN_STATUS);
/*
@@ -420,7 +451,7 @@ static void tegra_dma_configure_for_next(struct tegra_dma_channel *tdc,
if (status & TEGRA_APBDMA_STATUS_ISE_EOC) {
dev_err(tdc2dev(tdc),
"Skipping new configuration as interrupt is pending\n");
- tegra_dma_global_resume(tdc);
+ tegra_dma_resume(tdc);
return;
}
@@ -431,7 +462,7 @@ static void tegra_dma_configure_for_next(struct tegra_dma_channel *tdc,
nsg_req->ch_regs.csr | TEGRA_APBDMA_CSR_ENB);
nsg_req->configured = true;
- tegra_dma_global_resume(tdc);
+ tegra_dma_resume(tdc);
}
static void tdc_start_head_req(struct tegra_dma_channel *tdc)
@@ -692,7 +723,7 @@ static void tegra_dma_terminate_all(struct dma_chan *dc)
goto skip_dma_stop;
/* Pause DMA before checking the queue status */
- tegra_dma_global_pause(tdc, true);
+ tegra_dma_pause(tdc, true);
status = tdc_read(tdc, TEGRA_APBDMA_CHAN_STATUS);
if (status & TEGRA_APBDMA_STATUS_ISE_EOC) {
@@ -710,7 +741,7 @@ static void tegra_dma_terminate_all(struct dma_chan *dc)
sgreq->dma_desc->bytes_transferred +=
get_current_xferred_count(tdc, sgreq, status);
}
- tegra_dma_global_resume(tdc);
+ tegra_dma_resume(tdc);
skip_dma_stop:
tegra_dma_abort_all(tdc);
@@ -738,7 +769,6 @@ static enum dma_status tegra_dma_tx_status(struct dma_chan *dc,
ret = dma_cookie_status(dc, cookie, txstate);
if (ret == DMA_SUCCESS) {
- dma_set_residue(txstate, 0);
spin_unlock_irqrestore(&tdc->lock, flags);
return ret;
}
@@ -1180,6 +1210,7 @@ static void tegra_dma_free_chan_resources(struct dma_chan *dc)
static const struct tegra_dma_chip_data tegra20_dma_chip_data = {
.nr_channels = 16,
.max_dma_count = 1024UL * 64,
+ .support_channel_pause = false,
};
#if defined(CONFIG_OF)
@@ -1187,10 +1218,22 @@ static const struct tegra_dma_chip_data tegra20_dma_chip_data = {
static const struct tegra_dma_chip_data tegra30_dma_chip_data = {
.nr_channels = 32,
.max_dma_count = 1024UL * 64,
+ .support_channel_pause = false,
};
+/* Tegra114 specific DMA controller information */
+static const struct tegra_dma_chip_data tegra114_dma_chip_data = {
+ .nr_channels = 32,
+ .max_dma_count = 1024UL * 64,
+ .support_channel_pause = true,
+};
+
+
static const struct of_device_id tegra_dma_of_match[] = {
{
+ .compatible = "nvidia,tegra114-apbdma",
+ .data = &tegra114_dma_chip_data,
+ }, {
.compatible = "nvidia,tegra30-apbdma",
.data = &tegra30_dma_chip_data,
}, {
diff --git a/drivers/edac/Kconfig b/drivers/edac/Kconfig
index acb709bfac0f..e443f2c1dfd1 100644
--- a/drivers/edac/Kconfig
+++ b/drivers/edac/Kconfig
@@ -80,6 +80,29 @@ config EDAC_MM_EDAC
occurred so that a particular failing memory module can be
replaced. If unsure, select 'Y'.
+config EDAC_GHES
+ bool "Output ACPI APEI/GHES BIOS detected errors via EDAC"
+ depends on ACPI_APEI_GHES && (EDAC_MM_EDAC=y)
+ default y
+ help
+ Not all machines support hardware-driven error report. Some of those
+ provide a BIOS-driven error report mechanism via ACPI, using the
+ APEI/GHES driver. By enabling this option, the error reports provided
+ by GHES are sent to userspace via the EDAC API.
+
+ When this option is enabled, it will disable the hardware-driven
+ mechanisms, if a GHES BIOS is detected, entering into the
+ "Firmware First" mode.
+
+ It should be noticed that keeping both GHES and a hardware-driven
+ error mechanism won't work well, as BIOS will race with OS, while
+ reading the error registers. So, if you want to not use "Firmware
+ first" GHES error mechanism, you should disable GHES either at
+ compilation time or by passing "ghes.disable=1" Kernel parameter
+ at boot time.
+
+ In doubt, say 'Y'.
+
config EDAC_AMD64
tristate "AMD64 (Opteron, Athlon64) K8, F10h"
depends on EDAC_MM_EDAC && AMD_NB && X86_64 && EDAC_DECODE_MCE
diff --git a/drivers/edac/Makefile b/drivers/edac/Makefile
index 5608a9ba61b7..4154ed6a02c6 100644
--- a/drivers/edac/Makefile
+++ b/drivers/edac/Makefile
@@ -16,6 +16,7 @@ ifdef CONFIG_PCI
edac_core-y += edac_pci.o edac_pci_sysfs.o
endif
+obj-$(CONFIG_EDAC_GHES) += ghes_edac.o
obj-$(CONFIG_EDAC_MCE_INJ) += mce_amd_inj.o
edac_mce_amd-y := mce_amd.o
diff --git a/drivers/edac/edac_core.h b/drivers/edac/edac_core.h
index 23bb99fa44f1..3c2625e7980d 100644
--- a/drivers/edac/edac_core.h
+++ b/drivers/edac/edac_core.h
@@ -453,6 +453,11 @@ extern struct mem_ctl_info *find_mci_by_dev(struct device *dev);
extern struct mem_ctl_info *edac_mc_del_mc(struct device *dev);
extern int edac_mc_find_csrow_by_page(struct mem_ctl_info *mci,
unsigned long page);
+
+void edac_raw_mc_handle_error(const enum hw_event_mc_err_type type,
+ struct mem_ctl_info *mci,
+ struct edac_raw_error_desc *e);
+
void edac_mc_handle_error(const enum hw_event_mc_err_type type,
struct mem_ctl_info *mci,
const u16 error_count,
diff --git a/drivers/edac/edac_mc.c b/drivers/edac/edac_mc.c
index d1e9eb191f2b..cdb81aa73ab7 100644
--- a/drivers/edac/edac_mc.c
+++ b/drivers/edac/edac_mc.c
@@ -42,6 +42,12 @@
static DEFINE_MUTEX(mem_ctls_mutex);
static LIST_HEAD(mc_devices);
+/*
+ * Used to lock EDAC MC to just one module, avoiding two drivers e. g.
+ * apei/ghes and i7core_edac to be used at the same time.
+ */
+static void const *edac_mc_owner;
+
unsigned edac_dimm_info_location(struct dimm_info *dimm, char *buf,
unsigned len)
{
@@ -441,13 +447,6 @@ struct mem_ctl_info *edac_mc_alloc(unsigned mc_num,
mci->op_state = OP_ALLOC;
- /* at this point, the root kobj is valid, and in order to
- * 'free' the object, then the function:
- * edac_mc_unregister_sysfs_main_kobj() must be called
- * which will perform kobj unregistration and the actual free
- * will occur during the kobject callback operation
- */
-
return mci;
error:
@@ -666,9 +665,9 @@ fail1:
return 1;
}
-static void del_mc_from_global_list(struct mem_ctl_info *mci)
+static int del_mc_from_global_list(struct mem_ctl_info *mci)
{
- atomic_dec(&edac_handlers);
+ int handlers = atomic_dec_return(&edac_handlers);
list_del_rcu(&mci->link);
/* these are for safe removal of devices from global list while
@@ -676,6 +675,8 @@ static void del_mc_from_global_list(struct mem_ctl_info *mci)
*/
synchronize_rcu();
INIT_LIST_HEAD(&mci->link);
+
+ return handlers;
}
/**
@@ -719,6 +720,7 @@ EXPORT_SYMBOL(edac_mc_find);
/* FIXME - should a warning be printed if no error detection? correction? */
int edac_mc_add_mc(struct mem_ctl_info *mci)
{
+ int ret = -EINVAL;
edac_dbg(0, "\n");
#ifdef CONFIG_EDAC_DEBUG
@@ -749,6 +751,11 @@ int edac_mc_add_mc(struct mem_ctl_info *mci)
#endif
mutex_lock(&mem_ctls_mutex);
+ if (edac_mc_owner && edac_mc_owner != mci->mod_name) {
+ ret = -EPERM;
+ goto fail0;
+ }
+
if (add_mc_to_global_list(mci))
goto fail0;
@@ -775,6 +782,8 @@ int edac_mc_add_mc(struct mem_ctl_info *mci)
edac_mc_printk(mci, KERN_INFO, "Giving out device to '%s' '%s':"
" DEV %s\n", mci->mod_name, mci->ctl_name, edac_dev_name(mci));
+ edac_mc_owner = mci->mod_name;
+
mutex_unlock(&mem_ctls_mutex);
return 0;
@@ -783,7 +792,7 @@ fail1:
fail0:
mutex_unlock(&mem_ctls_mutex);
- return 1;
+ return ret;
}
EXPORT_SYMBOL_GPL(edac_mc_add_mc);
@@ -809,7 +818,8 @@ struct mem_ctl_info *edac_mc_del_mc(struct device *dev)
return NULL;
}
- del_mc_from_global_list(mci);
+ if (!del_mc_from_global_list(mci))
+ edac_mc_owner = NULL;
mutex_unlock(&mem_ctls_mutex);
/* flush workq processes */
@@ -907,6 +917,7 @@ const char *edac_layer_name[] = {
[EDAC_MC_LAYER_CHANNEL] = "channel",
[EDAC_MC_LAYER_SLOT] = "slot",
[EDAC_MC_LAYER_CHIP_SELECT] = "csrow",
+ [EDAC_MC_LAYER_ALL_MEM] = "memory",
};
EXPORT_SYMBOL_GPL(edac_layer_name);
@@ -1054,7 +1065,46 @@ static void edac_ue_error(struct mem_ctl_info *mci,
edac_inc_ue_error(mci, enable_per_layer_report, pos, error_count);
}
-#define OTHER_LABEL " or "
+/**
+ * edac_raw_mc_handle_error - reports a memory event to userspace without doing
+ * anything to discover the error location
+ *
+ * @type: severity of the error (CE/UE/Fatal)
+ * @mci: a struct mem_ctl_info pointer
+ * @e: error description
+ *
+ * This raw function is used internally by edac_mc_handle_error(). It should
+ * only be called directly when the hardware error come directly from BIOS,
+ * like in the case of APEI GHES driver.
+ */
+void edac_raw_mc_handle_error(const enum hw_event_mc_err_type type,
+ struct mem_ctl_info *mci,
+ struct edac_raw_error_desc *e)
+{
+ char detail[80];
+ int pos[EDAC_MAX_LAYERS] = { e->top_layer, e->mid_layer, e->low_layer };
+
+ /* Memory type dependent details about the error */
+ if (type == HW_EVENT_ERR_CORRECTED) {
+ snprintf(detail, sizeof(detail),
+ "page:0x%lx offset:0x%lx grain:%ld syndrome:0x%lx",
+ e->page_frame_number, e->offset_in_page,
+ e->grain, e->syndrome);
+ edac_ce_error(mci, e->error_count, pos, e->msg, e->location, e->label,
+ detail, e->other_detail, e->enable_per_layer_report,
+ e->page_frame_number, e->offset_in_page, e->grain);
+ } else {
+ snprintf(detail, sizeof(detail),
+ "page:0x%lx offset:0x%lx grain:%ld",
+ e->page_frame_number, e->offset_in_page, e->grain);
+
+ edac_ue_error(mci, e->error_count, pos, e->msg, e->location, e->label,
+ detail, e->other_detail, e->enable_per_layer_report);
+ }
+
+
+}
+EXPORT_SYMBOL_GPL(edac_raw_mc_handle_error);
/**
* edac_mc_handle_error - reports a memory event to userspace
@@ -1086,19 +1136,27 @@ void edac_mc_handle_error(const enum hw_event_mc_err_type type,
const char *msg,
const char *other_detail)
{
- /* FIXME: too much for stack: move it to some pre-alocated area */
- char detail[80], location[80];
- char label[(EDAC_MC_LABEL_LEN + 1 + sizeof(OTHER_LABEL)) * mci->tot_dimms];
char *p;
int row = -1, chan = -1;
int pos[EDAC_MAX_LAYERS] = { top_layer, mid_layer, low_layer };
- int i;
- long grain;
- bool enable_per_layer_report = false;
+ int i, n_labels = 0;
u8 grain_bits;
+ struct edac_raw_error_desc *e = &mci->error_desc;
edac_dbg(3, "MC%d\n", mci->mc_idx);
+ /* Fills the error report buffer */
+ memset(e, 0, sizeof (*e));
+ e->error_count = error_count;
+ e->top_layer = top_layer;
+ e->mid_layer = mid_layer;
+ e->low_layer = low_layer;
+ e->page_frame_number = page_frame_number;
+ e->offset_in_page = offset_in_page;
+ e->syndrome = syndrome;
+ e->msg = msg;
+ e->other_detail = other_detail;
+
/*
* Check if the event report is consistent and if the memory
* location is known. If it is known, enable_per_layer_report will be
@@ -1121,7 +1179,7 @@ void edac_mc_handle_error(const enum hw_event_mc_err_type type,
pos[i] = -1;
}
if (pos[i] >= 0)
- enable_per_layer_report = true;
+ e->enable_per_layer_report = true;
}
/*
@@ -1135,8 +1193,7 @@ void edac_mc_handle_error(const enum hw_event_mc_err_type type,
* where each memory belongs to a separate channel within the same
* branch.
*/
- grain = 0;
- p = label;
+ p = e->label;
*p = '\0';
for (i = 0; i < mci->tot_dimms; i++) {
@@ -1150,8 +1207,8 @@ void edac_mc_handle_error(const enum hw_event_mc_err_type type,
continue;
/* get the max grain, over the error match range */
- if (dimm->grain > grain)
- grain = dimm->grain;
+ if (dimm->grain > e->grain)
+ e->grain = dimm->grain;
/*
* If the error is memory-controller wide, there's no need to
@@ -1159,8 +1216,13 @@ void edac_mc_handle_error(const enum hw_event_mc_err_type type,
* channel/memory controller/... may be affected.
* Also, don't show errors for empty DIMM slots.
*/
- if (enable_per_layer_report && dimm->nr_pages) {
- if (p != label) {
+ if (e->enable_per_layer_report && dimm->nr_pages) {
+ if (n_labels >= EDAC_MAX_LABELS) {
+ e->enable_per_layer_report = false;
+ break;
+ }
+ n_labels++;
+ if (p != e->label) {
strcpy(p, OTHER_LABEL);
p += strlen(OTHER_LABEL);
}
@@ -1187,12 +1249,12 @@ void edac_mc_handle_error(const enum hw_event_mc_err_type type,
}
}
- if (!enable_per_layer_report) {
- strcpy(label, "any memory");
+ if (!e->enable_per_layer_report) {
+ strcpy(e->label, "any memory");
} else {
edac_dbg(4, "csrow/channel to increment: (%d,%d)\n", row, chan);
- if (p == label)
- strcpy(label, "unknown memory");
+ if (p == e->label)
+ strcpy(e->label, "unknown memory");
if (type == HW_EVENT_ERR_CORRECTED) {
if (row >= 0) {
mci->csrows[row]->ce_count += error_count;
@@ -1205,7 +1267,7 @@ void edac_mc_handle_error(const enum hw_event_mc_err_type type,
}
/* Fill the RAM location data */
- p = location;
+ p = e->location;
for (i = 0; i < mci->n_layers; i++) {
if (pos[i] < 0)
@@ -1215,32 +1277,16 @@ void edac_mc_handle_error(const enum hw_event_mc_err_type type,
edac_layer_name[mci->layers[i].type],
pos[i]);
}
- if (p > location)
+ if (p > e->location)
*(p - 1) = '\0';
/* Report the error via the trace interface */
- grain_bits = fls_long(grain) + 1;
- trace_mc_event(type, msg, label, error_count,
- mci->mc_idx, top_layer, mid_layer, low_layer,
- PAGES_TO_MiB(page_frame_number) | offset_in_page,
- grain_bits, syndrome, other_detail);
+ grain_bits = fls_long(e->grain) + 1;
+ trace_mc_event(type, e->msg, e->label, e->error_count,
+ mci->mc_idx, e->top_layer, e->mid_layer, e->low_layer,
+ PAGES_TO_MiB(e->page_frame_number) | e->offset_in_page,
+ grain_bits, e->syndrome, e->other_detail);
- /* Memory type dependent details about the error */
- if (type == HW_EVENT_ERR_CORRECTED) {
- snprintf(detail, sizeof(detail),
- "page:0x%lx offset:0x%lx grain:%ld syndrome:0x%lx",
- page_frame_number, offset_in_page,
- grain, syndrome);
- edac_ce_error(mci, error_count, pos, msg, location, label,
- detail, other_detail, enable_per_layer_report,
- page_frame_number, offset_in_page, grain);
- } else {
- snprintf(detail, sizeof(detail),
- "page:0x%lx offset:0x%lx grain:%ld",
- page_frame_number, offset_in_page, grain);
-
- edac_ue_error(mci, error_count, pos, msg, location, label,
- detail, other_detail, enable_per_layer_report);
- }
+ edac_raw_mc_handle_error(type, mci, e);
}
EXPORT_SYMBOL_GPL(edac_mc_handle_error);
diff --git a/drivers/edac/edac_mc_sysfs.c b/drivers/edac/edac_mc_sysfs.c
index 0ca1ca71157f..4f4b6137d74e 100644
--- a/drivers/edac/edac_mc_sysfs.c
+++ b/drivers/edac/edac_mc_sysfs.c
@@ -7,7 +7,7 @@
*
* Written Doug Thompson <norsk5@xmission.com> www.softwarebitmaker.com
*
- * (c) 2012 - Mauro Carvalho Chehab <mchehab@redhat.com>
+ * (c) 2012-2013 - Mauro Carvalho Chehab <mchehab@redhat.com>
* The entire API were re-written, and ported to use struct device
*
*/
@@ -429,8 +429,12 @@ static int edac_create_csrow_objects(struct mem_ctl_info *mci)
if (!nr_pages_per_csrow(csrow))
continue;
err = edac_create_csrow_object(mci, mci->csrows[i], i);
- if (err < 0)
+ if (err < 0) {
+ edac_dbg(1,
+ "failure: create csrow objects for csrow %d\n",
+ i);
goto error;
+ }
}
return 0;
@@ -677,9 +681,6 @@ static ssize_t mci_sdram_scrub_rate_store(struct device *dev,
unsigned long bandwidth = 0;
int new_bw = 0;
- if (!mci->set_sdram_scrub_rate)
- return -ENODEV;
-
if (strict_strtoul(data, 10, &bandwidth) < 0)
return -EINVAL;
@@ -703,9 +704,6 @@ static ssize_t mci_sdram_scrub_rate_show(struct device *dev,
struct mem_ctl_info *mci = to_mci(dev);
int bandwidth = 0;
- if (!mci->get_sdram_scrub_rate)
- return -ENODEV;
-
bandwidth = mci->get_sdram_scrub_rate(mci);
if (bandwidth < 0) {
edac_printk(KERN_DEBUG, EDAC_MC, "Error reading scrub rate\n");
@@ -866,8 +864,7 @@ DEVICE_ATTR(ce_count, S_IRUGO, mci_ce_count_show, NULL);
DEVICE_ATTR(max_location, S_IRUGO, mci_max_location_show, NULL);
/* memory scrubber attribute file */
-DEVICE_ATTR(sdram_scrub_rate, S_IRUGO | S_IWUSR, mci_sdram_scrub_rate_show,
- mci_sdram_scrub_rate_store);
+DEVICE_ATTR(sdram_scrub_rate, 0, NULL, NULL);
static struct attribute *mci_attrs[] = {
&dev_attr_reset_counters.attr,
@@ -878,7 +875,6 @@ static struct attribute *mci_attrs[] = {
&dev_attr_ce_noinfo_count.attr,
&dev_attr_ue_count.attr,
&dev_attr_ce_count.attr,
- &dev_attr_sdram_scrub_rate.attr,
&dev_attr_max_location.attr,
NULL
};
@@ -1007,11 +1003,28 @@ int edac_create_sysfs_mci_device(struct mem_ctl_info *mci)
edac_dbg(0, "creating device %s\n", dev_name(&mci->dev));
err = device_add(&mci->dev);
if (err < 0) {
+ edac_dbg(1, "failure: create device %s\n", dev_name(&mci->dev));
bus_unregister(&mci->bus);
kfree(mci->bus.name);
return err;
}
+ if (mci->set_sdram_scrub_rate || mci->get_sdram_scrub_rate) {
+ if (mci->get_sdram_scrub_rate) {
+ dev_attr_sdram_scrub_rate.attr.mode |= S_IRUGO;
+ dev_attr_sdram_scrub_rate.show = &mci_sdram_scrub_rate_show;
+ }
+ if (mci->set_sdram_scrub_rate) {
+ dev_attr_sdram_scrub_rate.attr.mode |= S_IWUSR;
+ dev_attr_sdram_scrub_rate.store = &mci_sdram_scrub_rate_store;
+ }
+ err = device_create_file(&mci->dev,
+ &dev_attr_sdram_scrub_rate);
+ if (err) {
+ edac_dbg(1, "failure: create sdram_scrub_rate\n");
+ goto fail2;
+ }
+ }
/*
* Create the dimm/rank devices
*/
@@ -1056,6 +1069,7 @@ fail:
continue;
device_unregister(&dimm->dev);
}
+fail2:
device_unregister(&mci->dev);
bus_unregister(&mci->bus);
kfree(mci->bus.name);
diff --git a/drivers/edac/edac_module.c b/drivers/edac/edac_module.c
index 12c951a2c33d..a66941fea5a4 100644
--- a/drivers/edac/edac_module.c
+++ b/drivers/edac/edac_module.c
@@ -146,7 +146,7 @@ static void __exit edac_exit(void)
/*
* Inform the kernel of our entry and exit points
*/
-module_init(edac_init);
+subsys_initcall(edac_init);
module_exit(edac_exit);
MODULE_LICENSE("GPL");
diff --git a/drivers/edac/edac_pci_sysfs.c b/drivers/edac/edac_pci_sysfs.c
index 0056c4dae9d5..e8658e451762 100644
--- a/drivers/edac/edac_pci_sysfs.c
+++ b/drivers/edac/edac_pci_sysfs.c
@@ -429,8 +429,8 @@ static void edac_pci_main_kobj_teardown(void)
if (atomic_dec_return(&edac_pci_sysfs_refcount) == 0) {
edac_dbg(0, "called kobject_put on main kobj\n");
kobject_put(edac_pci_top_main_kobj);
+ edac_put_sysfs_subsys();
}
- edac_put_sysfs_subsys();
}
/*
diff --git a/drivers/edac/ghes_edac.c b/drivers/edac/ghes_edac.c
new file mode 100644
index 000000000000..bb534670ec02
--- /dev/null
+++ b/drivers/edac/ghes_edac.c
@@ -0,0 +1,537 @@
+/*
+ * GHES/EDAC Linux driver
+ *
+ * This file may be distributed under the terms of the GNU General Public
+ * License version 2.
+ *
+ * Copyright (c) 2013 by Mauro Carvalho Chehab <mchehab@redhat.com>
+ *
+ * Red Hat Inc. http://www.redhat.com
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <acpi/ghes.h>
+#include <linux/edac.h>
+#include <linux/dmi.h>
+#include "edac_core.h"
+#include <ras/ras_event.h>
+
+#define GHES_EDAC_REVISION " Ver: 1.0.0"
+
+struct ghes_edac_pvt {
+ struct list_head list;
+ struct ghes *ghes;
+ struct mem_ctl_info *mci;
+
+ /* Buffers for the error handling routine */
+ char detail_location[240];
+ char other_detail[160];
+ char msg[80];
+};
+
+static LIST_HEAD(ghes_reglist);
+static DEFINE_MUTEX(ghes_edac_lock);
+static int ghes_edac_mc_num;
+
+
+/* Memory Device - Type 17 of SMBIOS spec */
+struct memdev_dmi_entry {
+ u8 type;
+ u8 length;
+ u16 handle;
+ u16 phys_mem_array_handle;
+ u16 mem_err_info_handle;
+ u16 total_width;
+ u16 data_width;
+ u16 size;
+ u8 form_factor;
+ u8 device_set;
+ u8 device_locator;
+ u8 bank_locator;
+ u8 memory_type;
+ u16 type_detail;
+ u16 speed;
+ u8 manufacturer;
+ u8 serial_number;
+ u8 asset_tag;
+ u8 part_number;
+ u8 attributes;
+ u32 extended_size;
+ u16 conf_mem_clk_speed;
+} __attribute__((__packed__));
+
+struct ghes_edac_dimm_fill {
+ struct mem_ctl_info *mci;
+ unsigned count;
+};
+
+char *memory_type[] = {
+ [MEM_EMPTY] = "EMPTY",
+ [MEM_RESERVED] = "RESERVED",
+ [MEM_UNKNOWN] = "UNKNOWN",
+ [MEM_FPM] = "FPM",
+ [MEM_EDO] = "EDO",
+ [MEM_BEDO] = "BEDO",
+ [MEM_SDR] = "SDR",
+ [MEM_RDR] = "RDR",
+ [MEM_DDR] = "DDR",
+ [MEM_RDDR] = "RDDR",
+ [MEM_RMBS] = "RMBS",
+ [MEM_DDR2] = "DDR2",
+ [MEM_FB_DDR2] = "FB_DDR2",
+ [MEM_RDDR2] = "RDDR2",
+ [MEM_XDR] = "XDR",
+ [MEM_DDR3] = "DDR3",
+ [MEM_RDDR3] = "RDDR3",
+};
+
+static void ghes_edac_count_dimms(const struct dmi_header *dh, void *arg)
+{
+ int *num_dimm = arg;
+
+ if (dh->type == DMI_ENTRY_MEM_DEVICE)
+ (*num_dimm)++;
+}
+
+static void ghes_edac_dmidecode(const struct dmi_header *dh, void *arg)
+{
+ struct ghes_edac_dimm_fill *dimm_fill = arg;
+ struct mem_ctl_info *mci = dimm_fill->mci;
+
+ if (dh->type == DMI_ENTRY_MEM_DEVICE) {
+ struct memdev_dmi_entry *entry = (struct memdev_dmi_entry *)dh;
+ struct dimm_info *dimm = EDAC_DIMM_PTR(mci->layers, mci->dimms,
+ mci->n_layers,
+ dimm_fill->count, 0, 0);
+
+ if (entry->size == 0xffff) {
+ pr_info("Can't get DIMM%i size\n",
+ dimm_fill->count);
+ dimm->nr_pages = MiB_TO_PAGES(32);/* Unknown */
+ } else if (entry->size == 0x7fff) {
+ dimm->nr_pages = MiB_TO_PAGES(entry->extended_size);
+ } else {
+ if (entry->size & 1 << 15)
+ dimm->nr_pages = MiB_TO_PAGES((entry->size &
+ 0x7fff) << 10);
+ else
+ dimm->nr_pages = MiB_TO_PAGES(entry->size);
+ }
+
+ switch (entry->memory_type) {
+ case 0x12:
+ if (entry->type_detail & 1 << 13)
+ dimm->mtype = MEM_RDDR;
+ else
+ dimm->mtype = MEM_DDR;
+ break;
+ case 0x13:
+ if (entry->type_detail & 1 << 13)
+ dimm->mtype = MEM_RDDR2;
+ else
+ dimm->mtype = MEM_DDR2;
+ break;
+ case 0x14:
+ dimm->mtype = MEM_FB_DDR2;
+ break;
+ case 0x18:
+ if (entry->type_detail & 1 << 13)
+ dimm->mtype = MEM_RDDR3;
+ else
+ dimm->mtype = MEM_DDR3;
+ break;
+ default:
+ if (entry->type_detail & 1 << 6)
+ dimm->mtype = MEM_RMBS;
+ else if ((entry->type_detail & ((1 << 7) | (1 << 13)))
+ == ((1 << 7) | (1 << 13)))
+ dimm->mtype = MEM_RDR;
+ else if (entry->type_detail & 1 << 7)
+ dimm->mtype = MEM_SDR;
+ else if (entry->type_detail & 1 << 9)
+ dimm->mtype = MEM_EDO;
+ else
+ dimm->mtype = MEM_UNKNOWN;
+ }
+
+ /*
+ * Actually, we can only detect if the memory has bits for
+ * checksum or not
+ */
+ if (entry->total_width == entry->data_width)
+ dimm->edac_mode = EDAC_NONE;
+ else
+ dimm->edac_mode = EDAC_SECDED;
+
+ dimm->dtype = DEV_UNKNOWN;
+ dimm->grain = 128; /* Likely, worse case */
+
+ /*
+ * FIXME: It shouldn't be hard to also fill the DIMM labels
+ */
+
+ if (dimm->nr_pages) {
+ edac_dbg(1, "DIMM%i: %s size = %d MB%s\n",
+ dimm_fill->count, memory_type[dimm->mtype],
+ PAGES_TO_MiB(dimm->nr_pages),
+ (dimm->edac_mode != EDAC_NONE) ? "(ECC)" : "");
+ edac_dbg(2, "\ttype %d, detail 0x%02x, width %d(total %d)\n",
+ entry->memory_type, entry->type_detail,
+ entry->total_width, entry->data_width);
+ }
+
+ dimm_fill->count++;
+ }
+}
+
+void ghes_edac_report_mem_error(struct ghes *ghes, int sev,
+ struct cper_sec_mem_err *mem_err)
+{
+ enum hw_event_mc_err_type type;
+ struct edac_raw_error_desc *e;
+ struct mem_ctl_info *mci;
+ struct ghes_edac_pvt *pvt = NULL;
+ char *p;
+ u8 grain_bits;
+
+ list_for_each_entry(pvt, &ghes_reglist, list) {
+ if (ghes == pvt->ghes)
+ break;
+ }
+ if (!pvt) {
+ pr_err("Internal error: Can't find EDAC structure\n");
+ return;
+ }
+ mci = pvt->mci;
+ e = &mci->error_desc;
+
+ /* Cleans the error report buffer */
+ memset(e, 0, sizeof (*e));
+ e->error_count = 1;
+ strcpy(e->label, "unknown label");
+ e->msg = pvt->msg;
+ e->other_detail = pvt->other_detail;
+ e->top_layer = -1;
+ e->mid_layer = -1;
+ e->low_layer = -1;
+ *pvt->other_detail = '\0';
+ *pvt->msg = '\0';
+
+ switch (sev) {
+ case GHES_SEV_CORRECTED:
+ type = HW_EVENT_ERR_CORRECTED;
+ break;
+ case GHES_SEV_RECOVERABLE:
+ type = HW_EVENT_ERR_UNCORRECTED;
+ break;
+ case GHES_SEV_PANIC:
+ type = HW_EVENT_ERR_FATAL;
+ break;
+ default:
+ case GHES_SEV_NO:
+ type = HW_EVENT_ERR_INFO;
+ }
+
+ edac_dbg(1, "error validation_bits: 0x%08llx\n",
+ (long long)mem_err->validation_bits);
+
+ /* Error type, mapped on e->msg */
+ if (mem_err->validation_bits & CPER_MEM_VALID_ERROR_TYPE) {
+ p = pvt->msg;
+ switch (mem_err->error_type) {
+ case 0:
+ p += sprintf(p, "Unknown");
+ break;
+ case 1:
+ p += sprintf(p, "No error");
+ break;
+ case 2:
+ p += sprintf(p, "Single-bit ECC");
+ break;
+ case 3:
+ p += sprintf(p, "Multi-bit ECC");
+ break;
+ case 4:
+ p += sprintf(p, "Single-symbol ChipKill ECC");
+ break;
+ case 5:
+ p += sprintf(p, "Multi-symbol ChipKill ECC");
+ break;
+ case 6:
+ p += sprintf(p, "Master abort");
+ break;
+ case 7:
+ p += sprintf(p, "Target abort");
+ break;
+ case 8:
+ p += sprintf(p, "Parity Error");
+ break;
+ case 9:
+ p += sprintf(p, "Watchdog timeout");
+ break;
+ case 10:
+ p += sprintf(p, "Invalid address");
+ break;
+ case 11:
+ p += sprintf(p, "Mirror Broken");
+ break;
+ case 12:
+ p += sprintf(p, "Memory Sparing");
+ break;
+ case 13:
+ p += sprintf(p, "Scrub corrected error");
+ break;
+ case 14:
+ p += sprintf(p, "Scrub uncorrected error");
+ break;
+ case 15:
+ p += sprintf(p, "Physical Memory Map-out event");
+ break;
+ default:
+ p += sprintf(p, "reserved error (%d)",
+ mem_err->error_type);
+ }
+ } else {
+ strcpy(pvt->msg, "unknown error");
+ }
+
+ /* Error address */
+ if (mem_err->validation_bits & CPER_MEM_VALID_PHYSICAL_ADDRESS) {
+ e->page_frame_number = mem_err->physical_addr >> PAGE_SHIFT;
+ e->offset_in_page = mem_err->physical_addr & ~PAGE_MASK;
+ }
+
+ /* Error grain */
+ if (mem_err->validation_bits & CPER_MEM_VALID_PHYSICAL_ADDRESS_MASK) {
+ e->grain = ~(mem_err->physical_addr_mask & ~PAGE_MASK);
+ }
+
+ /* Memory error location, mapped on e->location */
+ p = e->location;
+ if (mem_err->validation_bits & CPER_MEM_VALID_NODE)
+ p += sprintf(p, "node:%d ", mem_err->node);
+ if (mem_err->validation_bits & CPER_MEM_VALID_CARD)
+ p += sprintf(p, "card:%d ", mem_err->card);
+ if (mem_err->validation_bits & CPER_MEM_VALID_MODULE)
+ p += sprintf(p, "module:%d ", mem_err->module);
+ if (mem_err->validation_bits & CPER_MEM_VALID_BANK)
+ p += sprintf(p, "bank:%d ", mem_err->bank);
+ if (mem_err->validation_bits & CPER_MEM_VALID_ROW)
+ p += sprintf(p, "row:%d ", mem_err->row);
+ if (mem_err->validation_bits & CPER_MEM_VALID_COLUMN)
+ p += sprintf(p, "col:%d ", mem_err->column);
+ if (mem_err->validation_bits & CPER_MEM_VALID_BIT_POSITION)
+ p += sprintf(p, "bit_pos:%d ", mem_err->bit_pos);
+ if (p > e->location)
+ *(p - 1) = '\0';
+
+ /* All other fields are mapped on e->other_detail */
+ p = pvt->other_detail;
+ if (mem_err->validation_bits & CPER_MEM_VALID_ERROR_STATUS) {
+ u64 status = mem_err->error_status;
+
+ p += sprintf(p, "status(0x%016llx): ", (long long)status);
+ switch ((status >> 8) & 0xff) {
+ case 1:
+ p += sprintf(p, "Error detected internal to the component ");
+ break;
+ case 16:
+ p += sprintf(p, "Error detected in the bus ");
+ break;
+ case 4:
+ p += sprintf(p, "Storage error in DRAM memory ");
+ break;
+ case 5:
+ p += sprintf(p, "Storage error in TLB ");
+ break;
+ case 6:
+ p += sprintf(p, "Storage error in cache ");
+ break;
+ case 7:
+ p += sprintf(p, "Error in one or more functional units ");
+ break;
+ case 8:
+ p += sprintf(p, "component failed self test ");
+ break;
+ case 9:
+ p += sprintf(p, "Overflow or undervalue of internal queue ");
+ break;
+ case 17:
+ p += sprintf(p, "Virtual address not found on IO-TLB or IO-PDIR ");
+ break;
+ case 18:
+ p += sprintf(p, "Improper access error ");
+ break;
+ case 19:
+ p += sprintf(p, "Access to a memory address which is not mapped to any component ");
+ break;
+ case 20:
+ p += sprintf(p, "Loss of Lockstep ");
+ break;
+ case 21:
+ p += sprintf(p, "Response not associated with a request ");
+ break;
+ case 22:
+ p += sprintf(p, "Bus parity error - must also set the A, C, or D Bits ");
+ break;
+ case 23:
+ p += sprintf(p, "Detection of a PATH_ERROR ");
+ break;
+ case 25:
+ p += sprintf(p, "Bus operation timeout ");
+ break;
+ case 26:
+ p += sprintf(p, "A read was issued to data that has been poisoned ");
+ break;
+ default:
+ p += sprintf(p, "reserved ");
+ break;
+ }
+ }
+ if (mem_err->validation_bits & CPER_MEM_VALID_REQUESTOR_ID)
+ p += sprintf(p, "requestorID: 0x%016llx ",
+ (long long)mem_err->requestor_id);
+ if (mem_err->validation_bits & CPER_MEM_VALID_RESPONDER_ID)
+ p += sprintf(p, "responderID: 0x%016llx ",
+ (long long)mem_err->responder_id);
+ if (mem_err->validation_bits & CPER_MEM_VALID_TARGET_ID)
+ p += sprintf(p, "targetID: 0x%016llx ",
+ (long long)mem_err->responder_id);
+ if (p > pvt->other_detail)
+ *(p - 1) = '\0';
+
+ /* Generate the trace event */
+ grain_bits = fls_long(e->grain);
+ sprintf(pvt->detail_location, "APEI location: %s %s",
+ e->location, e->other_detail);
+ trace_mc_event(type, e->msg, e->label, e->error_count,
+ mci->mc_idx, e->top_layer, e->mid_layer, e->low_layer,
+ PAGES_TO_MiB(e->page_frame_number) | e->offset_in_page,
+ grain_bits, e->syndrome, pvt->detail_location);
+
+ /* Report the error via EDAC API */
+ edac_raw_mc_handle_error(type, mci, e);
+}
+EXPORT_SYMBOL_GPL(ghes_edac_report_mem_error);
+
+int ghes_edac_register(struct ghes *ghes, struct device *dev)
+{
+ bool fake = false;
+ int rc, num_dimm = 0;
+ struct mem_ctl_info *mci;
+ struct edac_mc_layer layers[1];
+ struct ghes_edac_pvt *pvt;
+ struct ghes_edac_dimm_fill dimm_fill;
+
+ /* Get the number of DIMMs */
+ dmi_walk(ghes_edac_count_dimms, &num_dimm);
+
+ /* Check if we've got a bogus BIOS */
+ if (num_dimm == 0) {
+ fake = true;
+ num_dimm = 1;
+ }
+
+ layers[0].type = EDAC_MC_LAYER_ALL_MEM;
+ layers[0].size = num_dimm;
+ layers[0].is_virt_csrow = true;
+
+ /*
+ * We need to serialize edac_mc_alloc() and edac_mc_add_mc(),
+ * to avoid duplicated memory controller numbers
+ */
+ mutex_lock(&ghes_edac_lock);
+ mci = edac_mc_alloc(ghes_edac_mc_num, ARRAY_SIZE(layers), layers,
+ sizeof(*pvt));
+ if (!mci) {
+ pr_info("Can't allocate memory for EDAC data\n");
+ mutex_unlock(&ghes_edac_lock);
+ return -ENOMEM;
+ }
+
+ pvt = mci->pvt_info;
+ memset(pvt, 0, sizeof(*pvt));
+ list_add_tail(&pvt->list, &ghes_reglist);
+ pvt->ghes = ghes;
+ pvt->mci = mci;
+ mci->pdev = dev;
+
+ mci->mtype_cap = MEM_FLAG_EMPTY;
+ mci->edac_ctl_cap = EDAC_FLAG_NONE;
+ mci->edac_cap = EDAC_FLAG_NONE;
+ mci->mod_name = "ghes_edac.c";
+ mci->mod_ver = GHES_EDAC_REVISION;
+ mci->ctl_name = "ghes_edac";
+ mci->dev_name = "ghes";
+
+ if (!ghes_edac_mc_num) {
+ if (!fake) {
+ pr_info("This EDAC driver relies on BIOS to enumerate memory and get error reports.\n");
+ pr_info("Unfortunately, not all BIOSes reflect the memory layout correctly.\n");
+ pr_info("So, the end result of using this driver varies from vendor to vendor.\n");
+ pr_info("If you find incorrect reports, please contact your hardware vendor\n");
+ pr_info("to correct its BIOS.\n");
+ pr_info("This system has %d DIMM sockets.\n",
+ num_dimm);
+ } else {
+ pr_info("This system has a very crappy BIOS: It doesn't even list the DIMMS.\n");
+ pr_info("Its SMBIOS info is wrong. It is doubtful that the error report would\n");
+ pr_info("work on such system. Use this driver with caution\n");
+ }
+ }
+
+ if (!fake) {
+ /*
+ * Fill DIMM info from DMI for the memory controller #0
+ *
+ * Keep it in blank for the other memory controllers, as
+ * there's no reliable way to properly credit each DIMM to
+ * the memory controller, as different BIOSes fill the
+ * DMI bank location fields on different ways
+ */
+ if (!ghes_edac_mc_num) {
+ dimm_fill.count = 0;
+ dimm_fill.mci = mci;
+ dmi_walk(ghes_edac_dmidecode, &dimm_fill);
+ }
+ } else {
+ struct dimm_info *dimm = EDAC_DIMM_PTR(mci->layers, mci->dimms,
+ mci->n_layers, 0, 0, 0);
+
+ dimm->nr_pages = 1;
+ dimm->grain = 128;
+ dimm->mtype = MEM_UNKNOWN;
+ dimm->dtype = DEV_UNKNOWN;
+ dimm->edac_mode = EDAC_SECDED;
+ }
+
+ rc = edac_mc_add_mc(mci);
+ if (rc < 0) {
+ pr_info("Can't register at EDAC core\n");
+ edac_mc_free(mci);
+ mutex_unlock(&ghes_edac_lock);
+ return -ENODEV;
+ }
+
+ ghes_edac_mc_num++;
+ mutex_unlock(&ghes_edac_lock);
+ return 0;
+}
+EXPORT_SYMBOL_GPL(ghes_edac_register);
+
+void ghes_edac_unregister(struct ghes *ghes)
+{
+ struct mem_ctl_info *mci;
+ struct ghes_edac_pvt *pvt, *tmp;
+
+ list_for_each_entry_safe(pvt, tmp, &ghes_reglist, list) {
+ if (ghes == pvt->ghes) {
+ mci = pvt->mci;
+ edac_mc_del_mc(mci->pdev);
+ edac_mc_free(mci);
+ list_del(&pvt->list);
+ }
+ }
+}
+EXPORT_SYMBOL_GPL(ghes_edac_unregister);
diff --git a/drivers/edac/i3200_edac.c b/drivers/edac/i3200_edac.c
index 4e8337602e78..aa44c1718f50 100644
--- a/drivers/edac/i3200_edac.c
+++ b/drivers/edac/i3200_edac.c
@@ -106,16 +106,26 @@ static int nr_channels;
static int how_many_channels(struct pci_dev *pdev)
{
+ int n_channels;
+
unsigned char capid0_8b; /* 8th byte of CAPID0 */
pci_read_config_byte(pdev, I3200_CAPID0 + 8, &capid0_8b);
+
if (capid0_8b & 0x20) { /* check DCD: Dual Channel Disable */
edac_dbg(0, "In single channel mode\n");
- return 1;
+ n_channels = 1;
} else {
edac_dbg(0, "In dual channel mode\n");
- return 2;
+ n_channels = 2;
}
+
+ if (capid0_8b & 0x10) /* check if both channels are filled */
+ edac_dbg(0, "2 DIMMS per channel disabled\n");
+ else
+ edac_dbg(0, "2 DIMMS per channel enabled\n");
+
+ return n_channels;
}
static unsigned long eccerrlog_syndrome(u64 log)
@@ -290,6 +300,8 @@ static void i3200_get_drbs(void __iomem *window,
for (i = 0; i < I3200_RANKS_PER_CHANNEL; i++) {
drbs[0][i] = readw(window + I3200_C0DRB + 2*i) & I3200_DRB_MASK;
drbs[1][i] = readw(window + I3200_C1DRB + 2*i) & I3200_DRB_MASK;
+
+ edac_dbg(0, "drb[0][%d] = %d, drb[1][%d] = %d\n", i, drbs[0][i], i, drbs[1][i]);
}
}
@@ -311,6 +323,9 @@ static unsigned long drb_to_nr_pages(
int n;
n = drbs[channel][rank];
+ if (!n)
+ return 0;
+
if (rank > 0)
n -= drbs[channel][rank - 1];
if (stacked && (channel == 1) &&
@@ -377,19 +392,19 @@ static int i3200_probe1(struct pci_dev *pdev, int dev_idx)
* cumulative; the last one will contain the total memory
* contained in all ranks.
*/
- for (i = 0; i < mci->nr_csrows; i++) {
+ for (i = 0; i < I3200_DIMMS; i++) {
unsigned long nr_pages;
- struct csrow_info *csrow = mci->csrows[i];
- nr_pages = drb_to_nr_pages(drbs, stacked,
- i / I3200_RANKS_PER_CHANNEL,
- i % I3200_RANKS_PER_CHANNEL);
+ for (j = 0; j < nr_channels; j++) {
+ struct dimm_info *dimm = EDAC_DIMM_PTR(mci->layers, mci->dimms,
+ mci->n_layers, i, j, 0);
- if (nr_pages == 0)
- continue;
+ nr_pages = drb_to_nr_pages(drbs, stacked, j, i);
+ if (nr_pages == 0)
+ continue;
- for (j = 0; j < nr_channels; j++) {
- struct dimm_info *dimm = csrow->channels[j]->dimm;
+ edac_dbg(0, "csrow %d, channel %d%s, size = %ld Mb\n", i, j,
+ stacked ? " (stacked)" : "", PAGES_TO_MiB(nr_pages));
dimm->nr_pages = nr_pages;
dimm->grain = nr_pages << PAGE_SHIFT;
diff --git a/drivers/edac/i5100_edac.c b/drivers/edac/i5100_edac.c
index d6955b2cc99f..1b635178cc44 100644
--- a/drivers/edac/i5100_edac.c
+++ b/drivers/edac/i5100_edac.c
@@ -27,6 +27,7 @@
#include <linux/edac.h>
#include <linux/delay.h>
#include <linux/mmzone.h>
+#include <linux/debugfs.h>
#include "edac_core.h"
@@ -68,6 +69,14 @@
I5100_FERR_NF_MEM_M1ERR_MASK)
#define I5100_NERR_NF_MEM 0xa4 /* MC Next Non-Fatal Errors */
#define I5100_EMASK_MEM 0xa8 /* MC Error Mask Register */
+#define I5100_MEM0EINJMSK0 0x200 /* Injection Mask0 Register Channel 0 */
+#define I5100_MEM1EINJMSK0 0x208 /* Injection Mask0 Register Channel 1 */
+#define I5100_MEMXEINJMSK0_EINJEN (1 << 27)
+#define I5100_MEM0EINJMSK1 0x204 /* Injection Mask1 Register Channel 0 */
+#define I5100_MEM1EINJMSK1 0x206 /* Injection Mask1 Register Channel 1 */
+
+/* Device 19, Function 0 */
+#define I5100_DINJ0 0x9a
/* device 21 and 22, func 0 */
#define I5100_MTR_0 0x154 /* Memory Technology Registers 0-3 */
@@ -338,13 +347,26 @@ struct i5100_priv {
unsigned ranksperchan; /* number of ranks per channel */
struct pci_dev *mc; /* device 16 func 1 */
+ struct pci_dev *einj; /* device 19 func 0 */
struct pci_dev *ch0mm; /* device 21 func 0 */
struct pci_dev *ch1mm; /* device 22 func 0 */
struct delayed_work i5100_scrubbing;
int scrub_enable;
+
+ /* Error injection */
+ u8 inject_channel;
+ u8 inject_hlinesel;
+ u8 inject_deviceptr1;
+ u8 inject_deviceptr2;
+ u16 inject_eccmask1;
+ u16 inject_eccmask2;
+
+ struct dentry *debugfs;
};
+static struct dentry *i5100_debugfs;
+
/* map a rank/chan to a slot number on the mainboard */
static int i5100_rank_to_slot(const struct mem_ctl_info *mci,
int chan, int rank)
@@ -863,13 +885,126 @@ static void i5100_init_csrows(struct mem_ctl_info *mci)
}
}
+/****************************************************************************
+ * Error injection routines
+ ****************************************************************************/
+
+static void i5100_do_inject(struct mem_ctl_info *mci)
+{
+ struct i5100_priv *priv = mci->pvt_info;
+ u32 mask0;
+ u16 mask1;
+
+ /* MEM[1:0]EINJMSK0
+ * 31 - ADDRMATCHEN
+ * 29:28 - HLINESEL
+ * 00 Reserved
+ * 01 Lower half of cache line
+ * 10 Upper half of cache line
+ * 11 Both upper and lower parts of cache line
+ * 27 - EINJEN
+ * 25:19 - XORMASK1 for deviceptr1
+ * 9:5 - SEC2RAM for deviceptr2
+ * 4:0 - FIR2RAM for deviceptr1
+ */
+ mask0 = ((priv->inject_hlinesel & 0x3) << 28) |
+ I5100_MEMXEINJMSK0_EINJEN |
+ ((priv->inject_eccmask1 & 0xffff) << 10) |
+ ((priv->inject_deviceptr2 & 0x1f) << 5) |
+ (priv->inject_deviceptr1 & 0x1f);
+
+ /* MEM[1:0]EINJMSK1
+ * 15:0 - XORMASK2 for deviceptr2
+ */
+ mask1 = priv->inject_eccmask2;
+
+ if (priv->inject_channel == 0) {
+ pci_write_config_dword(priv->mc, I5100_MEM0EINJMSK0, mask0);
+ pci_write_config_word(priv->mc, I5100_MEM0EINJMSK1, mask1);
+ } else {
+ pci_write_config_dword(priv->mc, I5100_MEM1EINJMSK0, mask0);
+ pci_write_config_word(priv->mc, I5100_MEM1EINJMSK1, mask1);
+ }
+
+ /* Error Injection Response Function
+ * Intel 5100 Memory Controller Hub Chipset (318378) datasheet
+ * hints about this register but carry no data about them. All
+ * data regarding device 19 is based on experimentation and the
+ * Intel 7300 Chipset Memory Controller Hub (318082) datasheet
+ * which appears to be accurate for the i5100 in this area.
+ *
+ * The injection code don't work without setting this register.
+ * The register needs to be flipped off then on else the hardware
+ * will only preform the first injection.
+ *
+ * Stop condition bits 7:4
+ * 1010 - Stop after one injection
+ * 1011 - Never stop injecting faults
+ *
+ * Start condition bits 3:0
+ * 1010 - Never start
+ * 1011 - Start immediately
+ */
+ pci_write_config_byte(priv->einj, I5100_DINJ0, 0xaa);
+ pci_write_config_byte(priv->einj, I5100_DINJ0, 0xab);
+}
+
+#define to_mci(k) container_of(k, struct mem_ctl_info, dev)
+static ssize_t inject_enable_write(struct file *file, const char __user *data,
+ size_t count, loff_t *ppos)
+{
+ struct device *dev = file->private_data;
+ struct mem_ctl_info *mci = to_mci(dev);
+
+ i5100_do_inject(mci);
+
+ return count;
+}
+
+static const struct file_operations i5100_inject_enable_fops = {
+ .open = simple_open,
+ .write = inject_enable_write,
+ .llseek = generic_file_llseek,
+};
+
+static int i5100_setup_debugfs(struct mem_ctl_info *mci)
+{
+ struct i5100_priv *priv = mci->pvt_info;
+
+ if (!i5100_debugfs)
+ return -ENODEV;
+
+ priv->debugfs = debugfs_create_dir(mci->bus.name, i5100_debugfs);
+
+ if (!priv->debugfs)
+ return -ENOMEM;
+
+ debugfs_create_x8("inject_channel", S_IRUGO | S_IWUSR, priv->debugfs,
+ &priv->inject_channel);
+ debugfs_create_x8("inject_hlinesel", S_IRUGO | S_IWUSR, priv->debugfs,
+ &priv->inject_hlinesel);
+ debugfs_create_x8("inject_deviceptr1", S_IRUGO | S_IWUSR, priv->debugfs,
+ &priv->inject_deviceptr1);
+ debugfs_create_x8("inject_deviceptr2", S_IRUGO | S_IWUSR, priv->debugfs,
+ &priv->inject_deviceptr2);
+ debugfs_create_x16("inject_eccmask1", S_IRUGO | S_IWUSR, priv->debugfs,
+ &priv->inject_eccmask1);
+ debugfs_create_x16("inject_eccmask2", S_IRUGO | S_IWUSR, priv->debugfs,
+ &priv->inject_eccmask2);
+ debugfs_create_file("inject_enable", S_IWUSR, priv->debugfs,
+ &mci->dev, &i5100_inject_enable_fops);
+
+ return 0;
+
+}
+
static int i5100_init_one(struct pci_dev *pdev, const struct pci_device_id *id)
{
int rc;
struct mem_ctl_info *mci;
struct edac_mc_layer layers[2];
struct i5100_priv *priv;
- struct pci_dev *ch0mm, *ch1mm;
+ struct pci_dev *ch0mm, *ch1mm, *einj;
int ret = 0;
u32 dw;
int ranksperch;
@@ -941,6 +1076,22 @@ static int i5100_init_one(struct pci_dev *pdev, const struct pci_device_id *id)
goto bail_disable_ch1;
}
+
+ /* device 19, func 0, Error injection */
+ einj = pci_get_device_func(PCI_VENDOR_ID_INTEL,
+ PCI_DEVICE_ID_INTEL_5100_19, 0);
+ if (!einj) {
+ ret = -ENODEV;
+ goto bail_einj;
+ }
+
+ rc = pci_enable_device(einj);
+ if (rc < 0) {
+ ret = rc;
+ goto bail_disable_einj;
+ }
+
+
mci->pdev = &pdev->dev;
priv = mci->pvt_info;
@@ -948,6 +1099,7 @@ static int i5100_init_one(struct pci_dev *pdev, const struct pci_device_id *id)
priv->mc = pdev;
priv->ch0mm = ch0mm;
priv->ch1mm = ch1mm;
+ priv->einj = einj;
INIT_DELAYED_WORK(&(priv->i5100_scrubbing), i5100_refresh_scrubbing);
@@ -975,6 +1127,13 @@ static int i5100_init_one(struct pci_dev *pdev, const struct pci_device_id *id)
mci->set_sdram_scrub_rate = i5100_set_scrub_rate;
mci->get_sdram_scrub_rate = i5100_get_scrub_rate;
+ priv->inject_channel = 0;
+ priv->inject_hlinesel = 0;
+ priv->inject_deviceptr1 = 0;
+ priv->inject_deviceptr2 = 0;
+ priv->inject_eccmask1 = 0;
+ priv->inject_eccmask2 = 0;
+
i5100_init_csrows(mci);
/* this strange construction seems to be in every driver, dunno why */
@@ -992,6 +1151,8 @@ static int i5100_init_one(struct pci_dev *pdev, const struct pci_device_id *id)
goto bail_scrub;
}
+ i5100_setup_debugfs(mci);
+
return ret;
bail_scrub:
@@ -999,6 +1160,12 @@ bail_scrub:
cancel_delayed_work_sync(&(priv->i5100_scrubbing));
edac_mc_free(mci);
+bail_disable_einj:
+ pci_disable_device(einj);
+
+bail_einj:
+ pci_dev_put(einj);
+
bail_disable_ch1:
pci_disable_device(ch1mm);
@@ -1030,14 +1197,18 @@ static void i5100_remove_one(struct pci_dev *pdev)
priv = mci->pvt_info;
+ debugfs_remove_recursive(priv->debugfs);
+
priv->scrub_enable = 0;
cancel_delayed_work_sync(&(priv->i5100_scrubbing));
pci_disable_device(pdev);
pci_disable_device(priv->ch0mm);
pci_disable_device(priv->ch1mm);
+ pci_disable_device(priv->einj);
pci_dev_put(priv->ch0mm);
pci_dev_put(priv->ch1mm);
+ pci_dev_put(priv->einj);
edac_mc_free(mci);
}
@@ -1060,13 +1231,16 @@ static int __init i5100_init(void)
{
int pci_rc;
- pci_rc = pci_register_driver(&i5100_driver);
+ i5100_debugfs = debugfs_create_dir("i5100_edac", NULL);
+ pci_rc = pci_register_driver(&i5100_driver);
return (pci_rc < 0) ? pci_rc : 0;
}
static void __exit i5100_exit(void)
{
+ debugfs_remove(i5100_debugfs);
+
pci_unregister_driver(&i5100_driver);
}
diff --git a/drivers/edac/i7core_edac.c b/drivers/edac/i7core_edac.c
index e213d030b0dd..0ec3e95a12cd 100644
--- a/drivers/edac/i7core_edac.c
+++ b/drivers/edac/i7core_edac.c
@@ -420,21 +420,21 @@ static inline int numdimms(u32 dimms)
static inline int numrank(u32 rank)
{
- static int ranks[4] = { 1, 2, 4, -EINVAL };
+ static const int ranks[] = { 1, 2, 4, -EINVAL };
return ranks[rank & 0x3];
}
static inline int numbank(u32 bank)
{
- static int banks[4] = { 4, 8, 16, -EINVAL };
+ static const int banks[] = { 4, 8, 16, -EINVAL };
return banks[bank & 0x3];
}
static inline int numrow(u32 row)
{
- static int rows[8] = {
+ static const int rows[] = {
1 << 12, 1 << 13, 1 << 14, 1 << 15,
1 << 16, -EINVAL, -EINVAL, -EINVAL,
};
@@ -444,7 +444,7 @@ static inline int numrow(u32 row)
static inline int numcol(u32 col)
{
- static int cols[8] = {
+ static const int cols[] = {
1 << 10, 1 << 11, 1 << 12, -EINVAL,
};
return cols[col & 0x3];
diff --git a/drivers/edac/sb_edac.c b/drivers/edac/sb_edac.c
index da7e2986e3d5..57244f995614 100644
--- a/drivers/edac/sb_edac.c
+++ b/drivers/edac/sb_edac.c
@@ -639,7 +639,7 @@ static void get_memory_layout(const struct mem_ctl_info *mci)
tmp_mb = (1 + pvt->tohm) >> 20;
mb = div_u64_rem(tmp_mb, 1000, &kb);
- edac_dbg(0, "TOHM: %u.%03u GB (0x%016Lx)", mb, kb, (u64)pvt->tohm);
+ edac_dbg(0, "TOHM: %u.%03u GB (0x%016Lx)\n", mb, kb, (u64)pvt->tohm);
/*
* Step 2) Get SAD range and SAD Interleave list
diff --git a/drivers/extcon/Kconfig b/drivers/extcon/Kconfig
index 07122a9ef36e..5168a1324a65 100644
--- a/drivers/extcon/Kconfig
+++ b/drivers/extcon/Kconfig
@@ -29,7 +29,7 @@ config EXTCON_ADC_JACK
config EXTCON_MAX77693
tristate "MAX77693 EXTCON Support"
- depends on MFD_MAX77693
+ depends on MFD_MAX77693 && INPUT
select IRQ_DOMAIN
select REGMAP_I2C
help
@@ -47,7 +47,7 @@ config EXTCON_MAX8997
config EXTCON_ARIZONA
tristate "Wolfson Arizona EXTCON support"
- depends on MFD_ARIZONA && INPUT
+ depends on MFD_ARIZONA && INPUT && SND_SOC
help
Say Y here to enable support for external accessory detection
with Wolfson Arizona devices. These are audio CODECs with
diff --git a/drivers/extcon/extcon-arizona.c b/drivers/extcon/extcon-arizona.c
index 414aed50b1bc..dc357a4051f6 100644
--- a/drivers/extcon/extcon-arizona.c
+++ b/drivers/extcon/extcon-arizona.c
@@ -27,12 +27,18 @@
#include <linux/regulator/consumer.h>
#include <linux/extcon.h>
+#include <sound/soc.h>
+
#include <linux/mfd/arizona/core.h>
#include <linux/mfd/arizona/pdata.h>
#include <linux/mfd/arizona/registers.h>
#define ARIZONA_NUM_BUTTONS 6
+#define ARIZONA_ACCDET_MODE_MIC 0
+#define ARIZONA_ACCDET_MODE_HPL 1
+#define ARIZONA_ACCDET_MODE_HPR 2
+
struct arizona_extcon_info {
struct device *dev;
struct arizona *arizona;
@@ -45,17 +51,28 @@ struct arizona_extcon_info {
int micd_num_modes;
bool micd_reva;
+ bool micd_clamp;
+
+ struct delayed_work hpdet_work;
+
+ bool hpdet_active;
+ bool hpdet_done;
+
+ int num_hpdet_res;
+ unsigned int hpdet_res[3];
bool mic;
bool detecting;
int jack_flips;
+ int hpdet_ip;
+
struct extcon_dev edev;
};
static const struct arizona_micd_config micd_default_modes[] = {
- { ARIZONA_ACCDET_SRC, 1 << ARIZONA_MICD_BIAS_SRC_SHIFT, 0 },
{ 0, 2 << ARIZONA_MICD_BIAS_SRC_SHIFT, 1 },
+ { ARIZONA_ACCDET_SRC, 1 << ARIZONA_MICD_BIAS_SRC_SHIFT, 0 },
};
static struct {
@@ -73,11 +90,13 @@ static struct {
#define ARIZONA_CABLE_MECHANICAL 0
#define ARIZONA_CABLE_MICROPHONE 1
#define ARIZONA_CABLE_HEADPHONE 2
+#define ARIZONA_CABLE_LINEOUT 3
static const char *arizona_cable[] = {
"Mechanical",
"Microphone",
"Headphone",
+ "Line-out",
NULL,
};
@@ -85,8 +104,9 @@ static void arizona_extcon_set_mode(struct arizona_extcon_info *info, int mode)
{
struct arizona *arizona = info->arizona;
- gpio_set_value_cansleep(arizona->pdata.micd_pol_gpio,
- info->micd_modes[mode].gpio);
+ if (arizona->pdata.micd_pol_gpio > 0)
+ gpio_set_value_cansleep(arizona->pdata.micd_pol_gpio,
+ info->micd_modes[mode].gpio);
regmap_update_bits(arizona->regmap, ARIZONA_MIC_DETECT_1,
ARIZONA_MICD_BIAS_SRC_MASK,
info->micd_modes[mode].bias);
@@ -98,19 +118,70 @@ static void arizona_extcon_set_mode(struct arizona_extcon_info *info, int mode)
dev_dbg(arizona->dev, "Set jack polarity to %d\n", mode);
}
+static const char *arizona_extcon_get_micbias(struct arizona_extcon_info *info)
+{
+ switch (info->micd_modes[0].bias >> ARIZONA_MICD_BIAS_SRC_SHIFT) {
+ case 1:
+ return "MICBIAS1";
+ case 2:
+ return "MICBIAS2";
+ case 3:
+ return "MICBIAS3";
+ default:
+ return "MICVDD";
+ }
+}
+
+static void arizona_extcon_pulse_micbias(struct arizona_extcon_info *info)
+{
+ struct arizona *arizona = info->arizona;
+ const char *widget = arizona_extcon_get_micbias(info);
+ struct snd_soc_dapm_context *dapm = arizona->dapm;
+ int ret;
+
+ mutex_lock(&dapm->card->dapm_mutex);
+
+ ret = snd_soc_dapm_force_enable_pin(dapm, widget);
+ if (ret != 0)
+ dev_warn(arizona->dev, "Failed to enable %s: %d\n",
+ widget, ret);
+
+ mutex_unlock(&dapm->card->dapm_mutex);
+
+ snd_soc_dapm_sync(dapm);
+
+ if (!arizona->pdata.micd_force_micbias) {
+ mutex_lock(&dapm->card->dapm_mutex);
+
+ ret = snd_soc_dapm_disable_pin(arizona->dapm, widget);
+ if (ret != 0)
+ dev_warn(arizona->dev, "Failed to disable %s: %d\n",
+ widget, ret);
+
+ mutex_unlock(&dapm->card->dapm_mutex);
+
+ snd_soc_dapm_sync(dapm);
+ }
+}
+
static void arizona_start_mic(struct arizona_extcon_info *info)
{
struct arizona *arizona = info->arizona;
bool change;
int ret;
- info->detecting = true;
- info->mic = false;
- info->jack_flips = 0;
-
/* Microphone detection can't use idle mode */
pm_runtime_get(info->dev);
+ if (info->detecting) {
+ ret = regulator_allow_bypass(info->micvdd, false);
+ if (ret != 0) {
+ dev_err(arizona->dev,
+ "Failed to regulate MICVDD: %d\n",
+ ret);
+ }
+ }
+
ret = regulator_enable(info->micvdd);
if (ret != 0) {
dev_err(arizona->dev, "Failed to enable MICVDD: %d\n",
@@ -123,6 +194,12 @@ static void arizona_start_mic(struct arizona_extcon_info *info)
regmap_write(arizona->regmap, 0x80, 0x0);
}
+ regmap_update_bits(arizona->regmap,
+ ARIZONA_ACCESSORY_DETECT_MODE_1,
+ ARIZONA_ACCDET_MODE_MASK, ARIZONA_ACCDET_MODE_MIC);
+
+ arizona_extcon_pulse_micbias(info);
+
regmap_update_bits_check(arizona->regmap, ARIZONA_MIC_DETECT_1,
ARIZONA_MICD_ENA, ARIZONA_MICD_ENA,
&change);
@@ -135,18 +212,39 @@ static void arizona_start_mic(struct arizona_extcon_info *info)
static void arizona_stop_mic(struct arizona_extcon_info *info)
{
struct arizona *arizona = info->arizona;
+ const char *widget = arizona_extcon_get_micbias(info);
+ struct snd_soc_dapm_context *dapm = arizona->dapm;
bool change;
+ int ret;
regmap_update_bits_check(arizona->regmap, ARIZONA_MIC_DETECT_1,
ARIZONA_MICD_ENA, 0,
&change);
+ mutex_lock(&dapm->card->dapm_mutex);
+
+ ret = snd_soc_dapm_disable_pin(dapm, widget);
+ if (ret != 0)
+ dev_warn(arizona->dev,
+ "Failed to disable %s: %d\n",
+ widget, ret);
+
+ mutex_unlock(&dapm->card->dapm_mutex);
+
+ snd_soc_dapm_sync(dapm);
+
if (info->micd_reva) {
regmap_write(arizona->regmap, 0x80, 0x3);
regmap_write(arizona->regmap, 0x294, 2);
regmap_write(arizona->regmap, 0x80, 0x0);
}
+ ret = regulator_allow_bypass(info->micvdd, true);
+ if (ret != 0) {
+ dev_err(arizona->dev, "Failed to bypass MICVDD: %d\n",
+ ret);
+ }
+
if (change) {
regulator_disable(info->micvdd);
pm_runtime_mark_last_busy(info->dev);
@@ -154,6 +252,478 @@ static void arizona_stop_mic(struct arizona_extcon_info *info)
}
}
+static struct {
+ unsigned int factor_a;
+ unsigned int factor_b;
+} arizona_hpdet_b_ranges[] = {
+ { 5528, 362464 },
+ { 11084, 6186851 },
+ { 11065, 65460395 },
+};
+
+static struct {
+ int min;
+ int max;
+} arizona_hpdet_c_ranges[] = {
+ { 0, 30 },
+ { 8, 100 },
+ { 100, 1000 },
+ { 1000, 10000 },
+};
+
+static int arizona_hpdet_read(struct arizona_extcon_info *info)
+{
+ struct arizona *arizona = info->arizona;
+ unsigned int val, range;
+ int ret;
+
+ ret = regmap_read(arizona->regmap, ARIZONA_HEADPHONE_DETECT_2, &val);
+ if (ret != 0) {
+ dev_err(arizona->dev, "Failed to read HPDET status: %d\n",
+ ret);
+ return ret;
+ }
+
+ switch (info->hpdet_ip) {
+ case 0:
+ if (!(val & ARIZONA_HP_DONE)) {
+ dev_err(arizona->dev, "HPDET did not complete: %x\n",
+ val);
+ return -EAGAIN;
+ }
+
+ val &= ARIZONA_HP_LVL_MASK;
+ break;
+
+ case 1:
+ if (!(val & ARIZONA_HP_DONE_B)) {
+ dev_err(arizona->dev, "HPDET did not complete: %x\n",
+ val);
+ return -EAGAIN;
+ }
+
+ ret = regmap_read(arizona->regmap, ARIZONA_HP_DACVAL, &val);
+ if (ret != 0) {
+ dev_err(arizona->dev, "Failed to read HP value: %d\n",
+ ret);
+ return -EAGAIN;
+ }
+
+ regmap_read(arizona->regmap, ARIZONA_HEADPHONE_DETECT_1,
+ &range);
+ range = (range & ARIZONA_HP_IMPEDANCE_RANGE_MASK)
+ >> ARIZONA_HP_IMPEDANCE_RANGE_SHIFT;
+
+ if (range < ARRAY_SIZE(arizona_hpdet_b_ranges) - 1 &&
+ (val < 100 || val > 0x3fb)) {
+ range++;
+ dev_dbg(arizona->dev, "Moving to HPDET range %d\n",
+ range);
+ regmap_update_bits(arizona->regmap,
+ ARIZONA_HEADPHONE_DETECT_1,
+ ARIZONA_HP_IMPEDANCE_RANGE_MASK,
+ range <<
+ ARIZONA_HP_IMPEDANCE_RANGE_SHIFT);
+ return -EAGAIN;
+ }
+
+ /* If we go out of range report top of range */
+ if (val < 100 || val > 0x3fb) {
+ dev_dbg(arizona->dev, "Measurement out of range\n");
+ return 10000;
+ }
+
+ dev_dbg(arizona->dev, "HPDET read %d in range %d\n",
+ val, range);
+
+ val = arizona_hpdet_b_ranges[range].factor_b
+ / ((val * 100) -
+ arizona_hpdet_b_ranges[range].factor_a);
+ break;
+
+ default:
+ dev_warn(arizona->dev, "Unknown HPDET IP revision %d\n",
+ info->hpdet_ip);
+ case 2:
+ if (!(val & ARIZONA_HP_DONE_B)) {
+ dev_err(arizona->dev, "HPDET did not complete: %x\n",
+ val);
+ return -EAGAIN;
+ }
+
+ val &= ARIZONA_HP_LVL_B_MASK;
+
+ regmap_read(arizona->regmap, ARIZONA_HEADPHONE_DETECT_1,
+ &range);
+ range = (range & ARIZONA_HP_IMPEDANCE_RANGE_MASK)
+ >> ARIZONA_HP_IMPEDANCE_RANGE_SHIFT;
+
+ /* Skip up or down a range? */
+ if (range && (val < arizona_hpdet_c_ranges[range].min)) {
+ range--;
+ dev_dbg(arizona->dev, "Moving to HPDET range %d-%d\n",
+ arizona_hpdet_c_ranges[range].min,
+ arizona_hpdet_c_ranges[range].max);
+ regmap_update_bits(arizona->regmap,
+ ARIZONA_HEADPHONE_DETECT_1,
+ ARIZONA_HP_IMPEDANCE_RANGE_MASK,
+ range <<
+ ARIZONA_HP_IMPEDANCE_RANGE_SHIFT);
+ return -EAGAIN;
+ }
+
+ if (range < ARRAY_SIZE(arizona_hpdet_c_ranges) - 1 &&
+ (val >= arizona_hpdet_c_ranges[range].max)) {
+ range++;
+ dev_dbg(arizona->dev, "Moving to HPDET range %d-%d\n",
+ arizona_hpdet_c_ranges[range].min,
+ arizona_hpdet_c_ranges[range].max);
+ regmap_update_bits(arizona->regmap,
+ ARIZONA_HEADPHONE_DETECT_1,
+ ARIZONA_HP_IMPEDANCE_RANGE_MASK,
+ range <<
+ ARIZONA_HP_IMPEDANCE_RANGE_SHIFT);
+ return -EAGAIN;
+ }
+ }
+
+ dev_dbg(arizona->dev, "HP impedance %d ohms\n", val);
+ return val;
+}
+
+static int arizona_hpdet_do_id(struct arizona_extcon_info *info, int *reading)
+{
+ struct arizona *arizona = info->arizona;
+ int id_gpio = arizona->pdata.hpdet_id_gpio;
+
+ /*
+ * If we're using HPDET for accessory identification we need
+ * to take multiple measurements, step through them in sequence.
+ */
+ if (arizona->pdata.hpdet_acc_id) {
+ info->hpdet_res[info->num_hpdet_res++] = *reading;
+
+ /*
+ * If the impedence is too high don't measure the
+ * second ground.
+ */
+ if (info->num_hpdet_res == 1 && *reading >= 45) {
+ dev_dbg(arizona->dev, "Skipping ground flip\n");
+ info->hpdet_res[info->num_hpdet_res++] = *reading;
+ }
+
+ if (info->num_hpdet_res == 1) {
+ dev_dbg(arizona->dev, "Flipping ground\n");
+
+ regmap_update_bits(arizona->regmap,
+ ARIZONA_ACCESSORY_DETECT_MODE_1,
+ ARIZONA_ACCDET_SRC,
+ ~info->micd_modes[0].src);
+
+ regmap_update_bits(arizona->regmap,
+ ARIZONA_HEADPHONE_DETECT_1,
+ ARIZONA_HP_POLL, ARIZONA_HP_POLL);
+ return -EAGAIN;
+ }
+
+ /* Only check the mic directly if we didn't already ID it */
+ if (id_gpio && info->num_hpdet_res == 2 &&
+ !((info->hpdet_res[0] > info->hpdet_res[1] * 2))) {
+ dev_dbg(arizona->dev, "Measuring mic\n");
+
+ regmap_update_bits(arizona->regmap,
+ ARIZONA_ACCESSORY_DETECT_MODE_1,
+ ARIZONA_ACCDET_MODE_MASK |
+ ARIZONA_ACCDET_SRC,
+ ARIZONA_ACCDET_MODE_HPR |
+ info->micd_modes[0].src);
+
+ gpio_set_value_cansleep(id_gpio, 1);
+
+ regmap_update_bits(arizona->regmap,
+ ARIZONA_HEADPHONE_DETECT_1,
+ ARIZONA_HP_POLL, ARIZONA_HP_POLL);
+ return -EAGAIN;
+ }
+
+ /* OK, got both. Now, compare... */
+ dev_dbg(arizona->dev, "HPDET measured %d %d %d\n",
+ info->hpdet_res[0], info->hpdet_res[1],
+ info->hpdet_res[2]);
+
+
+ /* Take the headphone impedance for the main report */
+ *reading = info->hpdet_res[0];
+
+ /*
+ * Either the two grounds measure differently or we
+ * measure the mic as high impedance.
+ */
+ if ((info->hpdet_res[0] > info->hpdet_res[1] * 2) ||
+ (id_gpio && info->hpdet_res[2] > 10)) {
+ dev_dbg(arizona->dev, "Detected mic\n");
+ info->mic = true;
+ info->detecting = true;
+ } else {
+ dev_dbg(arizona->dev, "Detected headphone\n");
+ }
+
+ /* Make sure everything is reset back to the real polarity */
+ regmap_update_bits(arizona->regmap,
+ ARIZONA_ACCESSORY_DETECT_MODE_1,
+ ARIZONA_ACCDET_SRC,
+ info->micd_modes[0].src);
+ }
+
+ return 0;
+}
+
+static irqreturn_t arizona_hpdet_irq(int irq, void *data)
+{
+ struct arizona_extcon_info *info = data;
+ struct arizona *arizona = info->arizona;
+ int id_gpio = arizona->pdata.hpdet_id_gpio;
+ int report = ARIZONA_CABLE_HEADPHONE;
+ unsigned int val;
+ int ret, reading;
+
+ mutex_lock(&info->lock);
+
+ /* If we got a spurious IRQ for some reason then ignore it */
+ if (!info->hpdet_active) {
+ dev_warn(arizona->dev, "Spurious HPDET IRQ\n");
+ mutex_unlock(&info->lock);
+ return IRQ_NONE;
+ }
+
+ /* If the cable was removed while measuring ignore the result */
+ ret = extcon_get_cable_state_(&info->edev, ARIZONA_CABLE_MECHANICAL);
+ if (ret < 0) {
+ dev_err(arizona->dev, "Failed to check cable state: %d\n",
+ ret);
+ goto out;
+ } else if (!ret) {
+ dev_dbg(arizona->dev, "Ignoring HPDET for removed cable\n");
+ goto done;
+ }
+
+ ret = arizona_hpdet_read(info);
+ if (ret == -EAGAIN) {
+ goto out;
+ } else if (ret < 0) {
+ goto done;
+ }
+ reading = ret;
+
+ /* Reset back to starting range */
+ regmap_update_bits(arizona->regmap,
+ ARIZONA_HEADPHONE_DETECT_1,
+ ARIZONA_HP_IMPEDANCE_RANGE_MASK | ARIZONA_HP_POLL,
+ 0);
+
+ ret = arizona_hpdet_do_id(info, &reading);
+ if (ret == -EAGAIN) {
+ goto out;
+ } else if (ret < 0) {
+ goto done;
+ }
+
+ /* Report high impedence cables as line outputs */
+ if (reading >= 5000)
+ report = ARIZONA_CABLE_LINEOUT;
+ else
+ report = ARIZONA_CABLE_HEADPHONE;
+
+ ret = extcon_set_cable_state_(&info->edev, report, true);
+ if (ret != 0)
+ dev_err(arizona->dev, "Failed to report HP/line: %d\n",
+ ret);
+
+ mutex_lock(&arizona->dapm->card->dapm_mutex);
+
+ ret = regmap_read(arizona->regmap, ARIZONA_OUTPUT_ENABLES_1, &val);
+ if (ret != 0) {
+ dev_err(arizona->dev, "Failed to read output enables: %d\n",
+ ret);
+ val = 0;
+ }
+
+ if (!(val & (ARIZONA_OUT1L_ENA | ARIZONA_OUT1R_ENA))) {
+ ret = regmap_update_bits(arizona->regmap, 0x225, 0x4000, 0);
+ if (ret != 0)
+ dev_warn(arizona->dev, "Failed to undo magic: %d\n",
+ ret);
+
+ ret = regmap_update_bits(arizona->regmap, 0x226, 0x4000, 0);
+ if (ret != 0)
+ dev_warn(arizona->dev, "Failed to undo magic: %d\n",
+ ret);
+ }
+
+ mutex_unlock(&arizona->dapm->card->dapm_mutex);
+
+done:
+ if (id_gpio)
+ gpio_set_value_cansleep(id_gpio, 0);
+
+ /* Revert back to MICDET mode */
+ regmap_update_bits(arizona->regmap,
+ ARIZONA_ACCESSORY_DETECT_MODE_1,
+ ARIZONA_ACCDET_MODE_MASK, ARIZONA_ACCDET_MODE_MIC);
+
+ /* If we have a mic then reenable MICDET */
+ if (info->mic)
+ arizona_start_mic(info);
+
+ if (info->hpdet_active) {
+ pm_runtime_put_autosuspend(info->dev);
+ info->hpdet_active = false;
+ }
+
+ info->hpdet_done = true;
+
+out:
+ mutex_unlock(&info->lock);
+
+ return IRQ_HANDLED;
+}
+
+static void arizona_identify_headphone(struct arizona_extcon_info *info)
+{
+ struct arizona *arizona = info->arizona;
+ int ret;
+
+ if (info->hpdet_done)
+ return;
+
+ dev_dbg(arizona->dev, "Starting HPDET\n");
+
+ /* Make sure we keep the device enabled during the measurement */
+ pm_runtime_get(info->dev);
+
+ info->hpdet_active = true;
+
+ if (info->mic)
+ arizona_stop_mic(info);
+
+ ret = regmap_update_bits(arizona->regmap, 0x225, 0x4000, 0x4000);
+ if (ret != 0)
+ dev_warn(arizona->dev, "Failed to do magic: %d\n", ret);
+
+ ret = regmap_update_bits(arizona->regmap, 0x226, 0x4000, 0x4000);
+ if (ret != 0)
+ dev_warn(arizona->dev, "Failed to do magic: %d\n", ret);
+
+ ret = regmap_update_bits(arizona->regmap,
+ ARIZONA_ACCESSORY_DETECT_MODE_1,
+ ARIZONA_ACCDET_MODE_MASK,
+ ARIZONA_ACCDET_MODE_HPL);
+ if (ret != 0) {
+ dev_err(arizona->dev, "Failed to set HPDETL mode: %d\n", ret);
+ goto err;
+ }
+
+ ret = regmap_update_bits(arizona->regmap, ARIZONA_HEADPHONE_DETECT_1,
+ ARIZONA_HP_POLL, ARIZONA_HP_POLL);
+ if (ret != 0) {
+ dev_err(arizona->dev, "Can't start HPDETL measurement: %d\n",
+ ret);
+ goto err;
+ }
+
+ return;
+
+err:
+ regmap_update_bits(arizona->regmap, ARIZONA_ACCESSORY_DETECT_MODE_1,
+ ARIZONA_ACCDET_MODE_MASK, ARIZONA_ACCDET_MODE_MIC);
+
+ /* Just report headphone */
+ ret = extcon_update_state(&info->edev,
+ 1 << ARIZONA_CABLE_HEADPHONE,
+ 1 << ARIZONA_CABLE_HEADPHONE);
+ if (ret != 0)
+ dev_err(arizona->dev, "Failed to report headphone: %d\n", ret);
+
+ if (info->mic)
+ arizona_start_mic(info);
+
+ info->hpdet_active = false;
+}
+
+static void arizona_start_hpdet_acc_id(struct arizona_extcon_info *info)
+{
+ struct arizona *arizona = info->arizona;
+ unsigned int val;
+ int ret;
+
+ dev_dbg(arizona->dev, "Starting identification via HPDET\n");
+
+ /* Make sure we keep the device enabled during the measurement */
+ pm_runtime_get_sync(info->dev);
+
+ info->hpdet_active = true;
+
+ arizona_extcon_pulse_micbias(info);
+
+ mutex_lock(&arizona->dapm->card->dapm_mutex);
+
+ ret = regmap_read(arizona->regmap, ARIZONA_OUTPUT_ENABLES_1, &val);
+ if (ret != 0) {
+ dev_err(arizona->dev, "Failed to read output enables: %d\n",
+ ret);
+ val = 0;
+ }
+
+ if (!(val & (ARIZONA_OUT1L_ENA | ARIZONA_OUT1R_ENA))) {
+ ret = regmap_update_bits(arizona->regmap, 0x225, 0x4000,
+ 0x4000);
+ if (ret != 0)
+ dev_warn(arizona->dev, "Failed to do magic: %d\n",
+ ret);
+
+ ret = regmap_update_bits(arizona->regmap, 0x226, 0x4000,
+ 0x4000);
+ if (ret != 0)
+ dev_warn(arizona->dev, "Failed to do magic: %d\n",
+ ret);
+ }
+
+ mutex_unlock(&arizona->dapm->card->dapm_mutex);
+
+ ret = regmap_update_bits(arizona->regmap,
+ ARIZONA_ACCESSORY_DETECT_MODE_1,
+ ARIZONA_ACCDET_SRC | ARIZONA_ACCDET_MODE_MASK,
+ info->micd_modes[0].src |
+ ARIZONA_ACCDET_MODE_HPL);
+ if (ret != 0) {
+ dev_err(arizona->dev, "Failed to set HPDETL mode: %d\n", ret);
+ goto err;
+ }
+
+ ret = regmap_update_bits(arizona->regmap, ARIZONA_HEADPHONE_DETECT_1,
+ ARIZONA_HP_POLL, ARIZONA_HP_POLL);
+ if (ret != 0) {
+ dev_err(arizona->dev, "Can't start HPDETL measurement: %d\n",
+ ret);
+ goto err;
+ }
+
+ return;
+
+err:
+ regmap_update_bits(arizona->regmap, ARIZONA_ACCESSORY_DETECT_MODE_1,
+ ARIZONA_ACCDET_MODE_MASK, ARIZONA_ACCDET_MODE_MIC);
+
+ /* Just report headphone */
+ ret = extcon_update_state(&info->edev,
+ 1 << ARIZONA_CABLE_HEADPHONE,
+ 1 << ARIZONA_CABLE_HEADPHONE);
+ if (ret != 0)
+ dev_err(arizona->dev, "Failed to report headphone: %d\n", ret);
+
+ info->hpdet_active = false;
+}
+
static irqreturn_t arizona_micdet(int irq, void *data)
{
struct arizona_extcon_info *info = data;
@@ -187,16 +757,23 @@ static irqreturn_t arizona_micdet(int irq, void *data)
/* If we got a high impedence we should have a headset, report it. */
if (info->detecting && (val & 0x400)) {
+ arizona_identify_headphone(info);
+
ret = extcon_update_state(&info->edev,
- 1 << ARIZONA_CABLE_MICROPHONE |
- 1 << ARIZONA_CABLE_HEADPHONE,
- 1 << ARIZONA_CABLE_MICROPHONE |
- 1 << ARIZONA_CABLE_HEADPHONE);
+ 1 << ARIZONA_CABLE_MICROPHONE,
+ 1 << ARIZONA_CABLE_MICROPHONE);
if (ret != 0)
dev_err(arizona->dev, "Headset report failed: %d\n",
ret);
+ /* Don't need to regulate for button detection */
+ ret = regulator_allow_bypass(info->micvdd, false);
+ if (ret != 0) {
+ dev_err(arizona->dev, "Failed to bypass MICVDD: %d\n",
+ ret);
+ }
+
info->mic = true;
info->detecting = false;
goto handled;
@@ -209,20 +786,13 @@ static irqreturn_t arizona_micdet(int irq, void *data)
* impedence then give up and report headphones.
*/
if (info->detecting && (val & 0x3f8)) {
- info->jack_flips++;
-
if (info->jack_flips >= info->micd_num_modes) {
- dev_dbg(arizona->dev, "Detected headphone\n");
+ dev_dbg(arizona->dev, "Detected HP/line\n");
+ arizona_identify_headphone(info);
+
info->detecting = false;
- arizona_stop_mic(info);
- ret = extcon_set_cable_state_(&info->edev,
- ARIZONA_CABLE_HEADPHONE,
- true);
- if (ret != 0)
- dev_err(arizona->dev,
- "Headphone report failed: %d\n",
- ret);
+ arizona_stop_mic(info);
} else {
info->micd_mode++;
if (info->micd_mode == info->micd_num_modes)
@@ -258,13 +828,7 @@ static irqreturn_t arizona_micdet(int irq, void *data)
info->detecting = false;
arizona_stop_mic(info);
- ret = extcon_set_cable_state_(&info->edev,
- ARIZONA_CABLE_HEADPHONE,
- true);
- if (ret != 0)
- dev_err(arizona->dev,
- "Headphone report failed: %d\n",
- ret);
+ arizona_identify_headphone(info);
} else {
dev_warn(arizona->dev, "Button with no mic: %x\n",
val);
@@ -275,6 +839,7 @@ static irqreturn_t arizona_micdet(int irq, void *data)
input_report_key(info->input,
arizona_lvl_to_key[i].report, 0);
input_sync(info->input);
+ arizona_extcon_pulse_micbias(info);
}
handled:
@@ -284,17 +849,38 @@ handled:
return IRQ_HANDLED;
}
+static void arizona_hpdet_work(struct work_struct *work)
+{
+ struct arizona_extcon_info *info = container_of(work,
+ struct arizona_extcon_info,
+ hpdet_work.work);
+
+ mutex_lock(&info->lock);
+ arizona_start_hpdet_acc_id(info);
+ mutex_unlock(&info->lock);
+}
+
static irqreturn_t arizona_jackdet(int irq, void *data)
{
struct arizona_extcon_info *info = data;
struct arizona *arizona = info->arizona;
- unsigned int val;
+ unsigned int val, present, mask;
int ret, i;
pm_runtime_get_sync(info->dev);
+ cancel_delayed_work_sync(&info->hpdet_work);
+
mutex_lock(&info->lock);
+ if (arizona->pdata.jd_gpio5) {
+ mask = ARIZONA_MICD_CLAMP_STS;
+ present = 0;
+ } else {
+ mask = ARIZONA_JD1_STS;
+ present = ARIZONA_JD1_STS;
+ }
+
ret = regmap_read(arizona->regmap, ARIZONA_AOD_IRQ_RAW_STATUS, &val);
if (ret != 0) {
dev_err(arizona->dev, "Failed to read jackdet status: %d\n",
@@ -304,7 +890,7 @@ static irqreturn_t arizona_jackdet(int irq, void *data)
return IRQ_NONE;
}
- if (val & ARIZONA_JD1_STS) {
+ if ((val & mask) == present) {
dev_dbg(arizona->dev, "Detected jack\n");
ret = extcon_set_cable_state_(&info->edev,
ARIZONA_CABLE_MECHANICAL, true);
@@ -313,12 +899,31 @@ static irqreturn_t arizona_jackdet(int irq, void *data)
dev_err(arizona->dev, "Mechanical report failed: %d\n",
ret);
- arizona_start_mic(info);
+ if (!arizona->pdata.hpdet_acc_id) {
+ info->detecting = true;
+ info->mic = false;
+ info->jack_flips = 0;
+
+ arizona_start_mic(info);
+ } else {
+ schedule_delayed_work(&info->hpdet_work,
+ msecs_to_jiffies(250));
+ }
+
+ regmap_update_bits(arizona->regmap,
+ ARIZONA_JACK_DETECT_DEBOUNCE,
+ ARIZONA_MICD_CLAMP_DB | ARIZONA_JD1_DB, 0);
} else {
dev_dbg(arizona->dev, "Detected jack removal\n");
arizona_stop_mic(info);
+ info->num_hpdet_res = 0;
+ for (i = 0; i < ARRAY_SIZE(info->hpdet_res); i++)
+ info->hpdet_res[i] = 0;
+ info->mic = false;
+ info->hpdet_done = false;
+
for (i = 0; i < ARIZONA_NUM_BUTTONS; i++)
input_report_key(info->input,
arizona_lvl_to_key[i].report, 0);
@@ -328,8 +933,20 @@ static irqreturn_t arizona_jackdet(int irq, void *data)
if (ret != 0)
dev_err(arizona->dev, "Removal report failed: %d\n",
ret);
+
+ regmap_update_bits(arizona->regmap,
+ ARIZONA_JACK_DETECT_DEBOUNCE,
+ ARIZONA_MICD_CLAMP_DB | ARIZONA_JD1_DB,
+ ARIZONA_MICD_CLAMP_DB | ARIZONA_JD1_DB);
}
+ /* Clear trig_sts to make sure DCVDD is not forced up */
+ regmap_write(arizona->regmap, ARIZONA_AOD_WKUP_AND_TRIG,
+ ARIZONA_MICD_CLAMP_FALL_TRIG_STS |
+ ARIZONA_MICD_CLAMP_RISE_TRIG_STS |
+ ARIZONA_JD1_FALL_TRIG_STS |
+ ARIZONA_JD1_RISE_TRIG_STS);
+
mutex_unlock(&info->lock);
pm_runtime_mark_last_busy(info->dev);
@@ -343,8 +960,12 @@ static int arizona_extcon_probe(struct platform_device *pdev)
struct arizona *arizona = dev_get_drvdata(pdev->dev.parent);
struct arizona_pdata *pdata;
struct arizona_extcon_info *info;
+ int jack_irq_fall, jack_irq_rise;
int ret, mode, i;
+ if (!arizona->dapm || !arizona->dapm->card)
+ return -EPROBE_DEFER;
+
pdata = dev_get_platdata(arizona->dev);
info = devm_kzalloc(&pdev->dev, sizeof(*info), GFP_KERNEL);
@@ -364,7 +985,7 @@ static int arizona_extcon_probe(struct platform_device *pdev)
mutex_init(&info->lock);
info->arizona = arizona;
info->dev = &pdev->dev;
- info->detecting = true;
+ INIT_DELAYED_WORK(&info->hpdet_work, arizona_hpdet_work);
platform_set_drvdata(pdev, info);
switch (arizona->type) {
@@ -374,6 +995,8 @@ static int arizona_extcon_probe(struct platform_device *pdev)
info->micd_reva = true;
break;
default:
+ info->micd_clamp = true;
+ info->hpdet_ip = 1;
break;
}
break;
@@ -416,9 +1039,64 @@ static int arizona_extcon_probe(struct platform_device *pdev)
}
}
+ if (arizona->pdata.hpdet_id_gpio > 0) {
+ ret = devm_gpio_request_one(&pdev->dev,
+ arizona->pdata.hpdet_id_gpio,
+ GPIOF_OUT_INIT_LOW,
+ "HPDET");
+ if (ret != 0) {
+ dev_err(arizona->dev, "Failed to request GPIO%d: %d\n",
+ arizona->pdata.hpdet_id_gpio, ret);
+ goto err_register;
+ }
+ }
+
+ if (arizona->pdata.micd_bias_start_time)
+ regmap_update_bits(arizona->regmap, ARIZONA_MIC_DETECT_1,
+ ARIZONA_MICD_BIAS_STARTTIME_MASK,
+ arizona->pdata.micd_bias_start_time
+ << ARIZONA_MICD_BIAS_STARTTIME_SHIFT);
+
+ if (arizona->pdata.micd_rate)
+ regmap_update_bits(arizona->regmap, ARIZONA_MIC_DETECT_1,
+ ARIZONA_MICD_RATE_MASK,
+ arizona->pdata.micd_rate
+ << ARIZONA_MICD_RATE_SHIFT);
+
+ if (arizona->pdata.micd_dbtime)
+ regmap_update_bits(arizona->regmap, ARIZONA_MIC_DETECT_1,
+ ARIZONA_MICD_DBTIME_MASK,
+ arizona->pdata.micd_dbtime
+ << ARIZONA_MICD_DBTIME_SHIFT);
+
+ /*
+ * If we have a clamp use it, activating in conjunction with
+ * GPIO5 if that is connected for jack detect operation.
+ */
+ if (info->micd_clamp) {
+ if (arizona->pdata.jd_gpio5) {
+ /* Put the GPIO into input mode */
+ regmap_write(arizona->regmap, ARIZONA_GPIO5_CTRL,
+ 0xc101);
+
+ regmap_update_bits(arizona->regmap,
+ ARIZONA_MICD_CLAMP_CONTROL,
+ ARIZONA_MICD_CLAMP_MODE_MASK, 0x9);
+ } else {
+ regmap_update_bits(arizona->regmap,
+ ARIZONA_MICD_CLAMP_CONTROL,
+ ARIZONA_MICD_CLAMP_MODE_MASK, 0x4);
+ }
+
+ regmap_update_bits(arizona->regmap,
+ ARIZONA_JACK_DETECT_DEBOUNCE,
+ ARIZONA_MICD_CLAMP_DB,
+ ARIZONA_MICD_CLAMP_DB);
+ }
+
arizona_extcon_set_mode(info, 0);
- info->input = input_allocate_device();
+ info->input = devm_input_allocate_device(&pdev->dev);
if (!info->input) {
dev_err(arizona->dev, "Can't allocate input dev\n");
ret = -ENOMEM;
@@ -436,7 +1114,15 @@ static int arizona_extcon_probe(struct platform_device *pdev)
pm_runtime_idle(&pdev->dev);
pm_runtime_get_sync(&pdev->dev);
- ret = arizona_request_irq(arizona, ARIZONA_IRQ_JD_RISE,
+ if (arizona->pdata.jd_gpio5) {
+ jack_irq_rise = ARIZONA_IRQ_MICD_CLAMP_RISE;
+ jack_irq_fall = ARIZONA_IRQ_MICD_CLAMP_FALL;
+ } else {
+ jack_irq_rise = ARIZONA_IRQ_JD_RISE;
+ jack_irq_fall = ARIZONA_IRQ_JD_FALL;
+ }
+
+ ret = arizona_request_irq(arizona, jack_irq_rise,
"JACKDET rise", arizona_jackdet, info);
if (ret != 0) {
dev_err(&pdev->dev, "Failed to get JACKDET rise IRQ: %d\n",
@@ -444,21 +1130,21 @@ static int arizona_extcon_probe(struct platform_device *pdev)
goto err_input;
}
- ret = arizona_set_irq_wake(arizona, ARIZONA_IRQ_JD_RISE, 1);
+ ret = arizona_set_irq_wake(arizona, jack_irq_rise, 1);
if (ret != 0) {
dev_err(&pdev->dev, "Failed to set JD rise IRQ wake: %d\n",
ret);
goto err_rise;
}
- ret = arizona_request_irq(arizona, ARIZONA_IRQ_JD_FALL,
+ ret = arizona_request_irq(arizona, jack_irq_fall,
"JACKDET fall", arizona_jackdet, info);
if (ret != 0) {
dev_err(&pdev->dev, "Failed to get JD fall IRQ: %d\n", ret);
goto err_rise_wake;
}
- ret = arizona_set_irq_wake(arizona, ARIZONA_IRQ_JD_FALL, 1);
+ ret = arizona_set_irq_wake(arizona, jack_irq_fall, 1);
if (ret != 0) {
dev_err(&pdev->dev, "Failed to set JD fall IRQ wake: %d\n",
ret);
@@ -472,11 +1158,12 @@ static int arizona_extcon_probe(struct platform_device *pdev)
goto err_fall_wake;
}
- regmap_update_bits(arizona->regmap, ARIZONA_MIC_DETECT_1,
- ARIZONA_MICD_BIAS_STARTTIME_MASK |
- ARIZONA_MICD_RATE_MASK,
- 7 << ARIZONA_MICD_BIAS_STARTTIME_SHIFT |
- 8 << ARIZONA_MICD_RATE_SHIFT);
+ ret = arizona_request_irq(arizona, ARIZONA_IRQ_HPDET,
+ "HPDET", arizona_hpdet_irq, info);
+ if (ret != 0) {
+ dev_err(&pdev->dev, "Failed to get HPDET IRQ: %d\n", ret);
+ goto err_micdet;
+ }
arizona_clk32k_enable(arizona);
regmap_update_bits(arizona->regmap, ARIZONA_JACK_DETECT_DEBOUNCE,
@@ -494,23 +1181,24 @@ static int arizona_extcon_probe(struct platform_device *pdev)
ret = input_register_device(info->input);
if (ret) {
dev_err(&pdev->dev, "Can't register input device: %d\n", ret);
- goto err_micdet;
+ goto err_hpdet;
}
return 0;
+err_hpdet:
+ arizona_free_irq(arizona, ARIZONA_IRQ_HPDET, info);
err_micdet:
arizona_free_irq(arizona, ARIZONA_IRQ_MICDET, info);
err_fall_wake:
- arizona_set_irq_wake(arizona, ARIZONA_IRQ_JD_FALL, 0);
+ arizona_set_irq_wake(arizona, jack_irq_fall, 0);
err_fall:
- arizona_free_irq(arizona, ARIZONA_IRQ_JD_FALL, info);
+ arizona_free_irq(arizona, jack_irq_fall, info);
err_rise_wake:
- arizona_set_irq_wake(arizona, ARIZONA_IRQ_JD_RISE, 0);
+ arizona_set_irq_wake(arizona, jack_irq_rise, 0);
err_rise:
- arizona_free_irq(arizona, ARIZONA_IRQ_JD_RISE, info);
+ arizona_free_irq(arizona, jack_irq_rise, info);
err_input:
- input_free_device(info->input);
err_register:
pm_runtime_disable(&pdev->dev);
extcon_dev_unregister(&info->edev);
@@ -522,18 +1210,32 @@ static int arizona_extcon_remove(struct platform_device *pdev)
{
struct arizona_extcon_info *info = platform_get_drvdata(pdev);
struct arizona *arizona = info->arizona;
+ int jack_irq_rise, jack_irq_fall;
pm_runtime_disable(&pdev->dev);
- arizona_set_irq_wake(arizona, ARIZONA_IRQ_JD_RISE, 0);
- arizona_set_irq_wake(arizona, ARIZONA_IRQ_JD_FALL, 0);
+ regmap_update_bits(arizona->regmap,
+ ARIZONA_MICD_CLAMP_CONTROL,
+ ARIZONA_MICD_CLAMP_MODE_MASK, 0);
+
+ if (arizona->pdata.jd_gpio5) {
+ jack_irq_rise = ARIZONA_IRQ_MICD_CLAMP_RISE;
+ jack_irq_fall = ARIZONA_IRQ_MICD_CLAMP_FALL;
+ } else {
+ jack_irq_rise = ARIZONA_IRQ_JD_RISE;
+ jack_irq_fall = ARIZONA_IRQ_JD_FALL;
+ }
+
+ arizona_set_irq_wake(arizona, jack_irq_rise, 0);
+ arizona_set_irq_wake(arizona, jack_irq_fall, 0);
+ arizona_free_irq(arizona, ARIZONA_IRQ_HPDET, info);
arizona_free_irq(arizona, ARIZONA_IRQ_MICDET, info);
- arizona_free_irq(arizona, ARIZONA_IRQ_JD_RISE, info);
- arizona_free_irq(arizona, ARIZONA_IRQ_JD_FALL, info);
+ arizona_free_irq(arizona, jack_irq_rise, info);
+ arizona_free_irq(arizona, jack_irq_fall, info);
+ cancel_delayed_work_sync(&info->hpdet_work);
regmap_update_bits(arizona->regmap, ARIZONA_JACK_DETECT_ANALOGUE,
ARIZONA_JD1_ENA, 0);
arizona_clk32k_disable(arizona);
- input_unregister_device(info->input);
extcon_dev_unregister(&info->edev);
return 0;
diff --git a/drivers/extcon/extcon-gpio.c b/drivers/extcon/extcon-gpio.c
index 1b14bfcdc176..02bec32adde4 100644
--- a/drivers/extcon/extcon-gpio.c
+++ b/drivers/extcon/extcon-gpio.c
@@ -29,7 +29,7 @@
#include <linux/workqueue.h>
#include <linux/gpio.h>
#include <linux/extcon.h>
-#include <linux/extcon/extcon_gpio.h>
+#include <linux/extcon/extcon-gpio.h>
struct gpio_extcon_data {
struct extcon_dev edev;
diff --git a/drivers/extcon/extcon-max77693.c b/drivers/extcon/extcon-max77693.c
index 8c17b65eb74d..b70e3815c459 100644
--- a/drivers/extcon/extcon-max77693.c
+++ b/drivers/extcon/extcon-max77693.c
@@ -19,6 +19,7 @@
#include <linux/module.h>
#include <linux/i2c.h>
#include <linux/slab.h>
+#include <linux/input.h>
#include <linux/interrupt.h>
#include <linux/err.h>
#include <linux/platform_device.h>
@@ -29,92 +30,7 @@
#include <linux/irqdomain.h>
#define DEV_NAME "max77693-muic"
-
-/* MAX77693 MUIC - STATUS1~3 Register */
-#define STATUS1_ADC_SHIFT (0)
-#define STATUS1_ADCLOW_SHIFT (5)
-#define STATUS1_ADCERR_SHIFT (6)
-#define STATUS1_ADC1K_SHIFT (7)
-#define STATUS1_ADC_MASK (0x1f << STATUS1_ADC_SHIFT)
-#define STATUS1_ADCLOW_MASK (0x1 << STATUS1_ADCLOW_SHIFT)
-#define STATUS1_ADCERR_MASK (0x1 << STATUS1_ADCERR_SHIFT)
-#define STATUS1_ADC1K_MASK (0x1 << STATUS1_ADC1K_SHIFT)
-
-#define STATUS2_CHGTYP_SHIFT (0)
-#define STATUS2_CHGDETRUN_SHIFT (3)
-#define STATUS2_DCDTMR_SHIFT (4)
-#define STATUS2_DXOVP_SHIFT (5)
-#define STATUS2_VBVOLT_SHIFT (6)
-#define STATUS2_VIDRM_SHIFT (7)
-#define STATUS2_CHGTYP_MASK (0x7 << STATUS2_CHGTYP_SHIFT)
-#define STATUS2_CHGDETRUN_MASK (0x1 << STATUS2_CHGDETRUN_SHIFT)
-#define STATUS2_DCDTMR_MASK (0x1 << STATUS2_DCDTMR_SHIFT)
-#define STATUS2_DXOVP_MASK (0x1 << STATUS2_DXOVP_SHIFT)
-#define STATUS2_VBVOLT_MASK (0x1 << STATUS2_VBVOLT_SHIFT)
-#define STATUS2_VIDRM_MASK (0x1 << STATUS2_VIDRM_SHIFT)
-
-#define STATUS3_OVP_SHIFT (2)
-#define STATUS3_OVP_MASK (0x1 << STATUS3_OVP_SHIFT)
-
-/* MAX77693 CDETCTRL1~2 register */
-#define CDETCTRL1_CHGDETEN_SHIFT (0)
-#define CDETCTRL1_CHGTYPMAN_SHIFT (1)
-#define CDETCTRL1_DCDEN_SHIFT (2)
-#define CDETCTRL1_DCD2SCT_SHIFT (3)
-#define CDETCTRL1_CDDELAY_SHIFT (4)
-#define CDETCTRL1_DCDCPL_SHIFT (5)
-#define CDETCTRL1_CDPDET_SHIFT (7)
-#define CDETCTRL1_CHGDETEN_MASK (0x1 << CDETCTRL1_CHGDETEN_SHIFT)
-#define CDETCTRL1_CHGTYPMAN_MASK (0x1 << CDETCTRL1_CHGTYPMAN_SHIFT)
-#define CDETCTRL1_DCDEN_MASK (0x1 << CDETCTRL1_DCDEN_SHIFT)
-#define CDETCTRL1_DCD2SCT_MASK (0x1 << CDETCTRL1_DCD2SCT_SHIFT)
-#define CDETCTRL1_CDDELAY_MASK (0x1 << CDETCTRL1_CDDELAY_SHIFT)
-#define CDETCTRL1_DCDCPL_MASK (0x1 << CDETCTRL1_DCDCPL_SHIFT)
-#define CDETCTRL1_CDPDET_MASK (0x1 << CDETCTRL1_CDPDET_SHIFT)
-
-#define CDETCTRL2_VIDRMEN_SHIFT (1)
-#define CDETCTRL2_DXOVPEN_SHIFT (3)
-#define CDETCTRL2_VIDRMEN_MASK (0x1 << CDETCTRL2_VIDRMEN_SHIFT)
-#define CDETCTRL2_DXOVPEN_MASK (0x1 << CDETCTRL2_DXOVPEN_SHIFT)
-
-/* MAX77693 MUIC - CONTROL1~3 register */
-#define COMN1SW_SHIFT (0)
-#define COMP2SW_SHIFT (3)
-#define COMN1SW_MASK (0x7 << COMN1SW_SHIFT)
-#define COMP2SW_MASK (0x7 << COMP2SW_SHIFT)
-#define COMP_SW_MASK (COMP2SW_MASK | COMN1SW_MASK)
-#define CONTROL1_SW_USB ((1 << COMP2SW_SHIFT) \
- | (1 << COMN1SW_SHIFT))
-#define CONTROL1_SW_AUDIO ((2 << COMP2SW_SHIFT) \
- | (2 << COMN1SW_SHIFT))
-#define CONTROL1_SW_UART ((3 << COMP2SW_SHIFT) \
- | (3 << COMN1SW_SHIFT))
-#define CONTROL1_SW_OPEN ((0 << COMP2SW_SHIFT) \
- | (0 << COMN1SW_SHIFT))
-
-#define CONTROL2_LOWPWR_SHIFT (0)
-#define CONTROL2_ADCEN_SHIFT (1)
-#define CONTROL2_CPEN_SHIFT (2)
-#define CONTROL2_SFOUTASRT_SHIFT (3)
-#define CONTROL2_SFOUTORD_SHIFT (4)
-#define CONTROL2_ACCDET_SHIFT (5)
-#define CONTROL2_USBCPINT_SHIFT (6)
-#define CONTROL2_RCPS_SHIFT (7)
-#define CONTROL2_LOWPWR_MASK (0x1 << CONTROL2_LOWPWR_SHIFT)
-#define CONTROL2_ADCEN_MASK (0x1 << CONTROL2_ADCEN_SHIFT)
-#define CONTROL2_CPEN_MASK (0x1 << CONTROL2_CPEN_SHIFT)
-#define CONTROL2_SFOUTASRT_MASK (0x1 << CONTROL2_SFOUTASRT_SHIFT)
-#define CONTROL2_SFOUTORD_MASK (0x1 << CONTROL2_SFOUTORD_SHIFT)
-#define CONTROL2_ACCDET_MASK (0x1 << CONTROL2_ACCDET_SHIFT)
-#define CONTROL2_USBCPINT_MASK (0x1 << CONTROL2_USBCPINT_SHIFT)
-#define CONTROL2_RCPS_MASK (0x1 << CONTROL2_RCPS_SHIFT)
-
-#define CONTROL3_JIGSET_SHIFT (0)
-#define CONTROL3_BTLDSET_SHIFT (2)
-#define CONTROL3_ADCDBSET_SHIFT (4)
-#define CONTROL3_JIGSET_MASK (0x3 << CONTROL3_JIGSET_SHIFT)
-#define CONTROL3_BTLDSET_MASK (0x3 << CONTROL3_BTLDSET_SHIFT)
-#define CONTROL3_ADCDBSET_MASK (0x3 << CONTROL3_ADCDBSET_SHIFT)
+#define DELAY_MS_DEFAULT 20000 /* unit: millisecond */
enum max77693_muic_adc_debounce_time {
ADC_DEBOUNCE_TIME_5MS = 0,
@@ -127,14 +43,40 @@ struct max77693_muic_info {
struct device *dev;
struct max77693_dev *max77693;
struct extcon_dev *edev;
- int prev_adc;
- int prev_adc_gnd;
+ int prev_cable_type;
+ int prev_cable_type_gnd;
int prev_chg_type;
+ int prev_button_type;
u8 status[2];
int irq;
struct work_struct irq_work;
struct mutex mutex;
+
+ /*
+ * Use delayed workqueue to detect cable state and then
+ * notify cable state to notifiee/platform through uevent.
+ * After completing the booting of platform, the extcon provider
+ * driver should notify cable state to upper layer.
+ */
+ struct delayed_work wq_detcable;
+
+ /* Button of dock device */
+ struct input_dev *dock;
+
+ /*
+ * Default usb/uart path whether UART/USB or AUX_UART/AUX_USB
+ * h/w path of COMP2/COMN1 on CONTROL1 register.
+ */
+ int path_usb;
+ int path_uart;
+};
+
+enum max77693_muic_cable_group {
+ MAX77693_CABLE_GROUP_ADC = 0,
+ MAX77693_CABLE_GROUP_ADC_GND,
+ MAX77693_CABLE_GROUP_CHG,
+ MAX77693_CABLE_GROUP_VBVOLT,
};
enum max77693_muic_charger_type {
@@ -215,27 +157,59 @@ enum max77693_muic_acc_type {
/* The below accessories have same ADC value so ADCLow and
ADC1K bit is used to separate specific accessory */
- MAX77693_MUIC_GND_USB_OTG = 0x100, /* ADC:0x0, ADCLow:0, ADC1K:0 */
- MAX77693_MUIC_GND_AV_CABLE_LOAD = 0x102,/* ADC:0x0, ADCLow:1, ADC1K:0 */
- MAX77693_MUIC_GND_MHL_CABLE = 0x103, /* ADC:0x0, ADCLow:1, ADC1K:1 */
+ MAX77693_MUIC_GND_USB_OTG = 0x100, /* ADC:0x0, VBVolot:0, ADCLow:0, ADC1K:0 */
+ MAX77693_MUIC_GND_USB_OTG_VB = 0x104, /* ADC:0x0, VBVolot:1, ADCLow:0, ADC1K:0 */
+ MAX77693_MUIC_GND_AV_CABLE_LOAD = 0x102,/* ADC:0x0, VBVolot:0, ADCLow:1, ADC1K:0 */
+ MAX77693_MUIC_GND_MHL = 0x103, /* ADC:0x0, VBVolot:0, ADCLow:1, ADC1K:1 */
+ MAX77693_MUIC_GND_MHL_VB = 0x107, /* ADC:0x0, VBVolot:1, ADCLow:1, ADC1K:1 */
};
/* MAX77693 MUIC device support below list of accessories(external connector) */
-const char *max77693_extcon_cable[] = {
- [0] = "USB",
- [1] = "USB-Host",
- [2] = "TA",
- [3] = "Fast-charger",
- [4] = "Slow-charger",
- [5] = "Charge-downstream",
- [6] = "MHL",
- [7] = "Audio-video-load",
- [8] = "Audio-video-noload",
- [9] = "JIG",
+enum {
+ EXTCON_CABLE_USB = 0,
+ EXTCON_CABLE_USB_HOST,
+ EXTCON_CABLE_TA,
+ EXTCON_CABLE_FAST_CHARGER,
+ EXTCON_CABLE_SLOW_CHARGER,
+ EXTCON_CABLE_CHARGE_DOWNSTREAM,
+ EXTCON_CABLE_MHL,
+ EXTCON_CABLE_MHL_TA,
+ EXTCON_CABLE_JIG_USB_ON,
+ EXTCON_CABLE_JIG_USB_OFF,
+ EXTCON_CABLE_JIG_UART_OFF,
+ EXTCON_CABLE_JIG_UART_ON,
+ EXTCON_CABLE_DOCK_SMART,
+ EXTCON_CABLE_DOCK_DESK,
+ EXTCON_CABLE_DOCK_AUDIO,
+
+ _EXTCON_CABLE_NUM,
+};
+
+static const char *max77693_extcon_cable[] = {
+ [EXTCON_CABLE_USB] = "USB",
+ [EXTCON_CABLE_USB_HOST] = "USB-Host",
+ [EXTCON_CABLE_TA] = "TA",
+ [EXTCON_CABLE_FAST_CHARGER] = "Fast-charger",
+ [EXTCON_CABLE_SLOW_CHARGER] = "Slow-charger",
+ [EXTCON_CABLE_CHARGE_DOWNSTREAM] = "Charge-downstream",
+ [EXTCON_CABLE_MHL] = "MHL",
+ [EXTCON_CABLE_MHL_TA] = "MHL_TA",
+ [EXTCON_CABLE_JIG_USB_ON] = "JIG-USB-ON",
+ [EXTCON_CABLE_JIG_USB_OFF] = "JIG-USB-OFF",
+ [EXTCON_CABLE_JIG_UART_OFF] = "JIG-UART-OFF",
+ [EXTCON_CABLE_JIG_UART_ON] = "Dock-Car",
+ [EXTCON_CABLE_DOCK_SMART] = "Dock-Smart",
+ [EXTCON_CABLE_DOCK_DESK] = "Dock-Desk",
+ [EXTCON_CABLE_DOCK_AUDIO] = "Dock-Audio",
NULL,
};
+/*
+ * max77693_muic_set_debounce_time - Set the debounce time of ADC
+ * @info: the instance including private data of max77693 MUIC
+ * @time: the debounce time of ADC
+ */
static int max77693_muic_set_debounce_time(struct max77693_muic_info *info,
enum max77693_muic_adc_debounce_time time)
{
@@ -250,18 +224,29 @@ static int max77693_muic_set_debounce_time(struct max77693_muic_info *info,
MAX77693_MUIC_REG_CTRL3,
time << CONTROL3_ADCDBSET_SHIFT,
CONTROL3_ADCDBSET_MASK);
- if (ret)
+ if (ret) {
dev_err(info->dev, "failed to set ADC debounce time\n");
+ return -EAGAIN;
+ }
break;
default:
dev_err(info->dev, "invalid ADC debounce time\n");
- ret = -EINVAL;
- break;
+ return -EINVAL;
}
- return ret;
+ return 0;
};
+/*
+ * max77693_muic_set_path - Set hardware line according to attached cable
+ * @info: the instance including private data of max77693 MUIC
+ * @value: the path according to attached cable
+ * @attached: the state of cable (true:attached, false:detached)
+ *
+ * The max77693 MUIC device share outside H/W line among a varity of cables
+ * so, this function set internal path of H/W line according to the type of
+ * attached cable.
+ */
static int max77693_muic_set_path(struct max77693_muic_info *info,
u8 val, bool attached)
{
@@ -277,7 +262,7 @@ static int max77693_muic_set_path(struct max77693_muic_info *info,
MAX77693_MUIC_REG_CTRL1, ctrl1, COMP_SW_MASK);
if (ret < 0) {
dev_err(info->dev, "failed to update MUIC register\n");
- goto out;
+ return -EAGAIN;
}
if (attached)
@@ -290,141 +275,457 @@ static int max77693_muic_set_path(struct max77693_muic_info *info,
CONTROL2_LOWPWR_MASK | CONTROL2_CPEN_MASK);
if (ret < 0) {
dev_err(info->dev, "failed to update MUIC register\n");
- goto out;
+ return -EAGAIN;
}
dev_info(info->dev,
"CONTROL1 : 0x%02x, CONTROL2 : 0x%02x, state : %s\n",
ctrl1, ctrl2, attached ? "attached" : "detached");
-out:
- return ret;
+
+ return 0;
}
-static int max77693_muic_adc_ground_handler(struct max77693_muic_info *info,
- bool attached)
+/*
+ * max77693_muic_get_cable_type - Return cable type and check cable state
+ * @info: the instance including private data of max77693 MUIC
+ * @group: the path according to attached cable
+ * @attached: store cable state and return
+ *
+ * This function check the cable state either attached or detached,
+ * and then divide precise type of cable according to cable group.
+ * - MAX77693_CABLE_GROUP_ADC
+ * - MAX77693_CABLE_GROUP_ADC_GND
+ * - MAX77693_CABLE_GROUP_CHG
+ * - MAX77693_CABLE_GROUP_VBVOLT
+ */
+static int max77693_muic_get_cable_type(struct max77693_muic_info *info,
+ enum max77693_muic_cable_group group, bool *attached)
{
- int ret = 0;
- int type;
- int adc, adc1k, adclow;
+ int cable_type = 0;
+ int adc;
+ int adc1k;
+ int adclow;
+ int vbvolt;
+ int chg_type;
+
+ switch (group) {
+ case MAX77693_CABLE_GROUP_ADC:
+ /*
+ * Read ADC value to check cable type and decide cable state
+ * according to cable type
+ */
+ adc = info->status[0] & STATUS1_ADC_MASK;
+ adc >>= STATUS1_ADC_SHIFT;
+
+ /*
+ * Check current cable state/cable type and store cable type
+ * (info->prev_cable_type) for handling cable when cable is
+ * detached.
+ */
+ if (adc == MAX77693_MUIC_ADC_OPEN) {
+ *attached = false;
+
+ cable_type = info->prev_cable_type;
+ info->prev_cable_type = MAX77693_MUIC_ADC_OPEN;
+ } else {
+ *attached = true;
+
+ cable_type = info->prev_cable_type = adc;
+ }
+ break;
+ case MAX77693_CABLE_GROUP_ADC_GND:
+ /*
+ * Read ADC value to check cable type and decide cable state
+ * according to cable type
+ */
+ adc = info->status[0] & STATUS1_ADC_MASK;
+ adc >>= STATUS1_ADC_SHIFT;
+
+ /*
+ * Check current cable state/cable type and store cable type
+ * (info->prev_cable_type/_gnd) for handling cable when cable
+ * is detached.
+ */
+ if (adc == MAX77693_MUIC_ADC_OPEN) {
+ *attached = false;
+
+ cable_type = info->prev_cable_type_gnd;
+ info->prev_cable_type_gnd = MAX77693_MUIC_ADC_OPEN;
+ } else {
+ *attached = true;
+
+ adclow = info->status[0] & STATUS1_ADCLOW_MASK;
+ adclow >>= STATUS1_ADCLOW_SHIFT;
+ adc1k = info->status[0] & STATUS1_ADC1K_MASK;
+ adc1k >>= STATUS1_ADC1K_SHIFT;
+
+ vbvolt = info->status[1] & STATUS2_VBVOLT_MASK;
+ vbvolt >>= STATUS2_VBVOLT_SHIFT;
+
+ /**
+ * [0x1][VBVolt][ADCLow][ADC1K]
+ * [0x1 0 0 0 ] : USB_OTG
+ * [0x1 1 0 0 ] : USB_OTG_VB
+ * [0x1 0 1 0 ] : Audio Video Cable with load
+ * [0x1 0 1 1 ] : MHL without charging connector
+ * [0x1 1 1 1 ] : MHL with charging connector
+ */
+ cable_type = ((0x1 << 8)
+ | (vbvolt << 2)
+ | (adclow << 1)
+ | adc1k);
+
+ info->prev_cable_type = adc;
+ info->prev_cable_type_gnd = cable_type;
+ }
- if (attached) {
+ break;
+ case MAX77693_CABLE_GROUP_CHG:
+ /*
+ * Read charger type to check cable type and decide cable state
+ * according to type of charger cable.
+ */
+ chg_type = info->status[1] & STATUS2_CHGTYP_MASK;
+ chg_type >>= STATUS2_CHGTYP_SHIFT;
+
+ if (chg_type == MAX77693_CHARGER_TYPE_NONE) {
+ *attached = false;
+
+ cable_type = info->prev_chg_type;
+ info->prev_chg_type = MAX77693_CHARGER_TYPE_NONE;
+ } else {
+ *attached = true;
+
+ /*
+ * Check current cable state/cable type and store cable
+ * type(info->prev_chg_type) for handling cable when
+ * charger cable is detached.
+ */
+ cable_type = info->prev_chg_type = chg_type;
+ }
+
+ break;
+ case MAX77693_CABLE_GROUP_VBVOLT:
+ /*
+ * Read ADC value to check cable type and decide cable state
+ * according to cable type
+ */
adc = info->status[0] & STATUS1_ADC_MASK;
- adclow = info->status[0] & STATUS1_ADCLOW_MASK;
- adclow >>= STATUS1_ADCLOW_SHIFT;
- adc1k = info->status[0] & STATUS1_ADC1K_MASK;
- adc1k >>= STATUS1_ADC1K_SHIFT;
-
- /**
- * [0x1][ADCLow][ADC1K]
- * [0x1 0 0 ] : USB_OTG
- * [0x1 1 0 ] : Audio Video Cable with load
- * [0x1 1 1 ] : MHL
+ adc >>= STATUS1_ADC_SHIFT;
+ chg_type = info->status[1] & STATUS2_CHGTYP_MASK;
+ chg_type >>= STATUS2_CHGTYP_SHIFT;
+
+ if (adc == MAX77693_MUIC_ADC_OPEN
+ && chg_type == MAX77693_CHARGER_TYPE_NONE)
+ *attached = false;
+ else
+ *attached = true;
+
+ /*
+ * Read vbvolt field, if vbvolt is 1,
+ * this cable is used for charging.
+ */
+ vbvolt = info->status[1] & STATUS2_VBVOLT_MASK;
+ vbvolt >>= STATUS2_VBVOLT_SHIFT;
+
+ cable_type = vbvolt;
+ break;
+ default:
+ dev_err(info->dev, "Unknown cable group (%d)\n", group);
+ cable_type = -EINVAL;
+ break;
+ }
+
+ return cable_type;
+}
+
+static int max77693_muic_dock_handler(struct max77693_muic_info *info,
+ int cable_type, bool attached)
+{
+ int ret = 0;
+ int vbvolt;
+ bool cable_attached;
+ char dock_name[CABLE_NAME_MAX];
+
+ dev_info(info->dev,
+ "external connector is %s (adc:0x%02x)\n",
+ attached ? "attached" : "detached", cable_type);
+
+ switch (cable_type) {
+ case MAX77693_MUIC_ADC_RESERVED_ACC_3: /* Dock-Smart */
+ /*
+ * Check power cable whether attached or detached state.
+ * The Dock-Smart device need surely external power supply.
+ * If power cable(USB/TA) isn't connected to Dock device,
+ * user can't use Dock-Smart for desktop mode.
+ */
+ vbvolt = max77693_muic_get_cable_type(info,
+ MAX77693_CABLE_GROUP_VBVOLT, &cable_attached);
+ if (attached && !vbvolt) {
+ dev_warn(info->dev,
+ "Cannot detect external power supply\n");
+ return 0;
+ }
+
+ /*
+ * Notify Dock-Smart/MHL state.
+ * - Dock-Smart device include three type of cable which
+ * are HDMI, USB for mouse/keyboard and micro-usb port
+ * for USB/TA cable. Dock-Smart device need always exteranl
+ * power supply(USB/TA cable through micro-usb cable). Dock-
+ * Smart device support screen output of target to separate
+ * monitor and mouse/keyboard for desktop mode.
+ *
+ * Features of 'USB/TA cable with Dock-Smart device'
+ * - Support MHL
+ * - Support external output feature of audio
+ * - Support charging through micro-usb port without data
+ * connection if TA cable is connected to target.
+ * - Support charging and data connection through micro-usb port
+ * if USB cable is connected between target and host
+ * device.
+ * - Support OTG device (Mouse/Keyboard)
*/
- type = ((0x1 << 8) | (adclow << 1) | adc1k);
+ ret = max77693_muic_set_path(info, info->path_usb, attached);
+ if (ret < 0)
+ return ret;
+
+ extcon_set_cable_state(info->edev, "Dock-Smart", attached);
+ extcon_set_cable_state(info->edev, "MHL", attached);
+ goto out;
+ case MAX77693_MUIC_ADC_FACTORY_MODE_UART_ON: /* Dock-Car */
+ strcpy(dock_name, "Dock-Car");
+ break;
+ case MAX77693_MUIC_ADC_AUDIO_MODE_REMOTE: /* Dock-Desk */
+ strcpy(dock_name, "Dock-Desk");
+ break;
+ case MAX77693_MUIC_ADC_AV_CABLE_NOLOAD: /* Dock-Audio */
+ strcpy(dock_name, "Dock-Audio");
+ if (!attached)
+ extcon_set_cable_state(info->edev, "USB", false);
+ break;
+ default:
+ dev_err(info->dev, "failed to detect %s dock device\n",
+ attached ? "attached" : "detached");
+ return -EINVAL;
+ }
+
+ /* Dock-Car/Desk/Audio, PATH:AUDIO */
+ ret = max77693_muic_set_path(info, CONTROL1_SW_AUDIO, attached);
+ if (ret < 0)
+ return ret;
+ extcon_set_cable_state(info->edev, dock_name, attached);
+
+out:
+ return 0;
+}
+
+static int max77693_muic_dock_button_handler(struct max77693_muic_info *info,
+ int button_type, bool attached)
+{
+ struct input_dev *dock = info->dock;
+ unsigned int code;
+
+ switch (button_type) {
+ case MAX77693_MUIC_ADC_REMOTE_S3_BUTTON-1
+ ... MAX77693_MUIC_ADC_REMOTE_S3_BUTTON+1:
+ /* DOCK_KEY_PREV */
+ code = KEY_PREVIOUSSONG;
+ break;
+ case MAX77693_MUIC_ADC_REMOTE_S7_BUTTON-1
+ ... MAX77693_MUIC_ADC_REMOTE_S7_BUTTON+1:
+ /* DOCK_KEY_NEXT */
+ code = KEY_NEXTSONG;
+ break;
+ case MAX77693_MUIC_ADC_REMOTE_S9_BUTTON:
+ /* DOCK_VOL_DOWN */
+ code = KEY_VOLUMEDOWN;
+ break;
+ case MAX77693_MUIC_ADC_REMOTE_S10_BUTTON:
+ /* DOCK_VOL_UP */
+ code = KEY_VOLUMEUP;
+ break;
+ case MAX77693_MUIC_ADC_REMOTE_S12_BUTTON-1
+ ... MAX77693_MUIC_ADC_REMOTE_S12_BUTTON+1:
+ /* DOCK_KEY_PLAY_PAUSE */
+ code = KEY_PLAYPAUSE;
+ break;
+ default:
+ dev_err(info->dev,
+ "failed to detect %s key (adc:0x%x)\n",
+ attached ? "pressed" : "released", button_type);
+ return -EINVAL;
+ }
+
+ input_event(dock, EV_KEY, code, attached);
+ input_sync(dock);
+
+ return 0;
+}
- /* Store previous ADC value to handle accessory
- when accessory will be detached */
- info->prev_adc = adc;
- info->prev_adc_gnd = type;
- } else
- type = info->prev_adc_gnd;
+static int max77693_muic_adc_ground_handler(struct max77693_muic_info *info)
+{
+ int cable_type_gnd;
+ int ret = 0;
+ bool attached;
- switch (type) {
+ cable_type_gnd = max77693_muic_get_cable_type(info,
+ MAX77693_CABLE_GROUP_ADC_GND, &attached);
+
+ switch (cable_type_gnd) {
case MAX77693_MUIC_GND_USB_OTG:
- /* USB_OTG */
+ case MAX77693_MUIC_GND_USB_OTG_VB:
+ /* USB_OTG, PATH: AP_USB */
ret = max77693_muic_set_path(info, CONTROL1_SW_USB, attached);
if (ret < 0)
- goto out;
+ return ret;
extcon_set_cable_state(info->edev, "USB-Host", attached);
break;
case MAX77693_MUIC_GND_AV_CABLE_LOAD:
- /* Audio Video Cable with load */
+ /* Audio Video Cable with load, PATH:AUDIO */
ret = max77693_muic_set_path(info, CONTROL1_SW_AUDIO, attached);
if (ret < 0)
- goto out;
+ return ret;
extcon_set_cable_state(info->edev,
"Audio-video-load", attached);
break;
- case MAX77693_MUIC_GND_MHL_CABLE:
- /* MHL */
+ case MAX77693_MUIC_GND_MHL:
+ case MAX77693_MUIC_GND_MHL_VB:
+ /* MHL or MHL with USB/TA cable */
extcon_set_cable_state(info->edev, "MHL", attached);
break;
default:
- dev_err(info->dev, "failed to detect %s accessory\n",
+ dev_err(info->dev, "failed to detect %s cable of gnd type\n",
attached ? "attached" : "detached");
- dev_err(info->dev, "- adc:0x%x, adclow:0x%x, adc1k:0x%x\n",
- adc, adclow, adc1k);
- ret = -EINVAL;
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int max77693_muic_jig_handler(struct max77693_muic_info *info,
+ int cable_type, bool attached)
+{
+ char cable_name[32];
+ int ret = 0;
+ u8 path = CONTROL1_SW_OPEN;
+
+ dev_info(info->dev,
+ "external connector is %s (adc:0x%02x)\n",
+ attached ? "attached" : "detached", cable_type);
+
+ switch (cable_type) {
+ case MAX77693_MUIC_ADC_FACTORY_MODE_USB_OFF: /* ADC_JIG_USB_OFF */
+ /* PATH:AP_USB */
+ strcpy(cable_name, "JIG-USB-OFF");
+ path = CONTROL1_SW_USB;
+ break;
+ case MAX77693_MUIC_ADC_FACTORY_MODE_USB_ON: /* ADC_JIG_USB_ON */
+ /* PATH:AP_USB */
+ strcpy(cable_name, "JIG-USB-ON");
+ path = CONTROL1_SW_USB;
break;
+ case MAX77693_MUIC_ADC_FACTORY_MODE_UART_OFF: /* ADC_JIG_UART_OFF */
+ /* PATH:AP_UART */
+ strcpy(cable_name, "JIG-UART-OFF");
+ path = CONTROL1_SW_UART;
+ break;
+ default:
+ dev_err(info->dev, "failed to detect %s jig cable\n",
+ attached ? "attached" : "detached");
+ return -EINVAL;
}
-out:
- return ret;
+ ret = max77693_muic_set_path(info, path, attached);
+ if (ret < 0)
+ return ret;
+
+ extcon_set_cable_state(info->edev, cable_name, attached);
+
+ return 0;
}
-static int max77693_muic_adc_handler(struct max77693_muic_info *info,
- int curr_adc, bool attached)
+static int max77693_muic_adc_handler(struct max77693_muic_info *info)
{
+ int cable_type;
+ int button_type;
+ bool attached;
int ret = 0;
- int adc;
- if (attached) {
- /* Store ADC value to handle accessory
- when accessory will be detached */
- info->prev_adc = curr_adc;
- adc = curr_adc;
- } else
- adc = info->prev_adc;
+ /* Check accessory state which is either detached or attached */
+ cable_type = max77693_muic_get_cable_type(info,
+ MAX77693_CABLE_GROUP_ADC, &attached);
dev_info(info->dev,
"external connector is %s (adc:0x%02x, prev_adc:0x%x)\n",
- attached ? "attached" : "detached", curr_adc, info->prev_adc);
+ attached ? "attached" : "detached", cable_type,
+ info->prev_cable_type);
- switch (adc) {
+ switch (cable_type) {
case MAX77693_MUIC_ADC_GROUND:
/* USB_OTG/MHL/Audio */
- max77693_muic_adc_ground_handler(info, attached);
+ max77693_muic_adc_ground_handler(info);
break;
case MAX77693_MUIC_ADC_FACTORY_MODE_USB_OFF:
case MAX77693_MUIC_ADC_FACTORY_MODE_USB_ON:
- /* USB */
- ret = max77693_muic_set_path(info, CONTROL1_SW_USB, attached);
- if (ret < 0)
- goto out;
- extcon_set_cable_state(info->edev, "USB", attached);
- break;
case MAX77693_MUIC_ADC_FACTORY_MODE_UART_OFF:
- case MAX77693_MUIC_ADC_FACTORY_MODE_UART_ON:
/* JIG */
- ret = max77693_muic_set_path(info, CONTROL1_SW_UART, attached);
+ ret = max77693_muic_jig_handler(info, cable_type, attached);
if (ret < 0)
- goto out;
- extcon_set_cable_state(info->edev, "JIG", attached);
+ return ret;
break;
- case MAX77693_MUIC_ADC_AUDIO_MODE_REMOTE:
- /* Audio Video cable with no-load */
- ret = max77693_muic_set_path(info, CONTROL1_SW_AUDIO, attached);
+ case MAX77693_MUIC_ADC_RESERVED_ACC_3: /* Dock-Smart */
+ case MAX77693_MUIC_ADC_FACTORY_MODE_UART_ON: /* Dock-Car */
+ case MAX77693_MUIC_ADC_AUDIO_MODE_REMOTE: /* Dock-Desk */
+ case MAX77693_MUIC_ADC_AV_CABLE_NOLOAD: /* Dock-Audio */
+ /*
+ * DOCK device
+ *
+ * The MAX77693 MUIC device can detect total 34 cable type
+ * except of charger cable and MUIC device didn't define
+ * specfic role of cable in the range of from 0x01 to 0x12
+ * of ADC value. So, can use/define cable with no role according
+ * to schema of hardware board.
+ */
+ ret = max77693_muic_dock_handler(info, cable_type, attached);
if (ret < 0)
- goto out;
- extcon_set_cable_state(info->edev,
- "Audio-video-noload", attached);
+ return ret;
+ break;
+ case MAX77693_MUIC_ADC_REMOTE_S3_BUTTON: /* DOCK_KEY_PREV */
+ case MAX77693_MUIC_ADC_REMOTE_S7_BUTTON: /* DOCK_KEY_NEXT */
+ case MAX77693_MUIC_ADC_REMOTE_S9_BUTTON: /* DOCK_VOL_DOWN */
+ case MAX77693_MUIC_ADC_REMOTE_S10_BUTTON: /* DOCK_VOL_UP */
+ case MAX77693_MUIC_ADC_REMOTE_S12_BUTTON: /* DOCK_KEY_PLAY_PAUSE */
+ /*
+ * Button of DOCK device
+ * - the Prev/Next/Volume Up/Volume Down/Play-Pause button
+ *
+ * The MAX77693 MUIC device can detect total 34 cable type
+ * except of charger cable and MUIC device didn't define
+ * specfic role of cable in the range of from 0x01 to 0x12
+ * of ADC value. So, can use/define cable with no role according
+ * to schema of hardware board.
+ */
+ if (attached)
+ button_type = info->prev_button_type = cable_type;
+ else
+ button_type = info->prev_button_type;
+
+ ret = max77693_muic_dock_button_handler(info, button_type,
+ attached);
+ if (ret < 0)
+ return ret;
break;
case MAX77693_MUIC_ADC_SEND_END_BUTTON:
case MAX77693_MUIC_ADC_REMOTE_S1_BUTTON:
case MAX77693_MUIC_ADC_REMOTE_S2_BUTTON:
- case MAX77693_MUIC_ADC_REMOTE_S3_BUTTON:
case MAX77693_MUIC_ADC_REMOTE_S4_BUTTON:
case MAX77693_MUIC_ADC_REMOTE_S5_BUTTON:
case MAX77693_MUIC_ADC_REMOTE_S6_BUTTON:
- case MAX77693_MUIC_ADC_REMOTE_S7_BUTTON:
case MAX77693_MUIC_ADC_REMOTE_S8_BUTTON:
- case MAX77693_MUIC_ADC_REMOTE_S9_BUTTON:
- case MAX77693_MUIC_ADC_REMOTE_S10_BUTTON:
case MAX77693_MUIC_ADC_REMOTE_S11_BUTTON:
- case MAX77693_MUIC_ADC_REMOTE_S12_BUTTON:
case MAX77693_MUIC_ADC_RESERVED_ACC_1:
case MAX77693_MUIC_ADC_RESERVED_ACC_2:
- case MAX77693_MUIC_ADC_RESERVED_ACC_3:
case MAX77693_MUIC_ADC_RESERVED_ACC_4:
case MAX77693_MUIC_ADC_RESERVED_ACC_5:
case MAX77693_MUIC_ADC_CEA936_AUDIO:
@@ -432,60 +733,164 @@ static int max77693_muic_adc_handler(struct max77693_muic_info *info,
case MAX77693_MUIC_ADC_TTY_CONVERTER:
case MAX77693_MUIC_ADC_UART_CABLE:
case MAX77693_MUIC_ADC_CEA936A_TYPE1_CHG:
- case MAX77693_MUIC_ADC_AV_CABLE_NOLOAD:
case MAX77693_MUIC_ADC_CEA936A_TYPE2_CHG:
- /* This accessory isn't used in general case if it is specially
- needed to detect additional accessory, should implement
- proper operation when this accessory is attached/detached. */
+ /*
+ * This accessory isn't used in general case if it is specially
+ * needed to detect additional accessory, should implement
+ * proper operation when this accessory is attached/detached.
+ */
dev_info(info->dev,
"accessory is %s but it isn't used (adc:0x%x)\n",
- attached ? "attached" : "detached", adc);
- goto out;
+ attached ? "attached" : "detached", cable_type);
+ return -EAGAIN;
default:
dev_err(info->dev,
"failed to detect %s accessory (adc:0x%x)\n",
- attached ? "attached" : "detached", adc);
- ret = -EINVAL;
- goto out;
+ attached ? "attached" : "detached", cable_type);
+ return -EINVAL;
}
-out:
- return ret;
+ return 0;
}
-static int max77693_muic_chg_handler(struct max77693_muic_info *info,
- int curr_chg_type, bool attached)
+static int max77693_muic_chg_handler(struct max77693_muic_info *info)
{
- int ret = 0;
int chg_type;
+ int cable_type_gnd;
+ int cable_type;
+ bool attached;
+ bool cable_attached;
+ int ret = 0;
- if (attached) {
- /* Store previous charger type to control
- when charger accessory will be detached */
- info->prev_chg_type = curr_chg_type;
- chg_type = curr_chg_type;
- } else
- chg_type = info->prev_chg_type;
+ chg_type = max77693_muic_get_cable_type(info,
+ MAX77693_CABLE_GROUP_CHG, &attached);
dev_info(info->dev,
"external connector is %s(chg_type:0x%x, prev_chg_type:0x%x)\n",
attached ? "attached" : "detached",
- curr_chg_type, info->prev_chg_type);
+ chg_type, info->prev_chg_type);
switch (chg_type) {
case MAX77693_CHARGER_TYPE_USB:
- ret = max77693_muic_set_path(info, CONTROL1_SW_USB, attached);
- if (ret < 0)
- goto out;
- extcon_set_cable_state(info->edev, "USB", attached);
+ case MAX77693_CHARGER_TYPE_DEDICATED_CHG:
+ case MAX77693_CHARGER_TYPE_NONE:
+ /* Check MAX77693_CABLE_GROUP_ADC_GND type */
+ cable_type_gnd = max77693_muic_get_cable_type(info,
+ MAX77693_CABLE_GROUP_ADC_GND,
+ &cable_attached);
+ switch (cable_type_gnd) {
+ case MAX77693_MUIC_GND_MHL:
+ case MAX77693_MUIC_GND_MHL_VB:
+ /*
+ * MHL cable with MHL_TA(USB/TA) cable
+ * - MHL cable include two port(HDMI line and separate micro-
+ * usb port. When the target connect MHL cable, extcon driver
+ * check whether MHL_TA(USB/TA) cable is connected. If MHL_TA
+ * cable is connected, extcon driver notify state to notifiee
+ * for charging battery.
+ *
+ * Features of 'MHL_TA(USB/TA) with MHL cable'
+ * - Support MHL
+ * - Support charging through micro-usb port without data connection
+ */
+ extcon_set_cable_state(info->edev, "MHL_TA", attached);
+ if (!cable_attached)
+ extcon_set_cable_state(info->edev, "MHL", cable_attached);
+ break;
+ }
+
+ /* Check MAX77693_CABLE_GROUP_ADC type */
+ cable_type = max77693_muic_get_cable_type(info,
+ MAX77693_CABLE_GROUP_ADC,
+ &cable_attached);
+ switch (cable_type) {
+ case MAX77693_MUIC_ADC_AV_CABLE_NOLOAD: /* Dock-Audio */
+ /*
+ * Dock-Audio device with USB/TA cable
+ * - Dock device include two port(Dock-Audio and micro-usb
+ * port). When the target connect Dock-Audio device, extcon
+ * driver check whether USB/TA cable is connected. If USB/TA
+ * cable is connected, extcon driver notify state to notifiee
+ * for charging battery.
+ *
+ * Features of 'USB/TA cable with Dock-Audio device'
+ * - Support external output feature of audio.
+ * - Support charging through micro-usb port without data
+ * connection.
+ */
+ extcon_set_cable_state(info->edev, "USB", attached);
+
+ if (!cable_attached)
+ extcon_set_cable_state(info->edev, "Dock-Audio", cable_attached);
+ break;
+ case MAX77693_MUIC_ADC_RESERVED_ACC_3: /* Dock-Smart */
+ /*
+ * Dock-Smart device with USB/TA cable
+ * - Dock-Desk device include three type of cable which
+ * are HDMI, USB for mouse/keyboard and micro-usb port
+ * for USB/TA cable. Dock-Smart device need always exteranl
+ * power supply(USB/TA cable through micro-usb cable). Dock-
+ * Smart device support screen output of target to separate
+ * monitor and mouse/keyboard for desktop mode.
+ *
+ * Features of 'USB/TA cable with Dock-Smart device'
+ * - Support MHL
+ * - Support external output feature of audio
+ * - Support charging through micro-usb port without data
+ * connection if TA cable is connected to target.
+ * - Support charging and data connection through micro-usb port
+ * if USB cable is connected between target and host
+ * device.
+ * - Support OTG device (Mouse/Keyboard)
+ */
+ ret = max77693_muic_set_path(info, info->path_usb, attached);
+ if (ret < 0)
+ return ret;
+
+ extcon_set_cable_state(info->edev, "Dock-Smart", attached);
+ extcon_set_cable_state(info->edev, "MHL", attached);
+
+ break;
+ }
+
+ /* Check MAX77693_CABLE_GROUP_CHG type */
+ switch (chg_type) {
+ case MAX77693_CHARGER_TYPE_NONE:
+ /*
+ * When MHL(with USB/TA cable) or Dock-Audio with USB/TA cable
+ * is attached, muic device happen below two interrupt.
+ * - 'MAX77693_MUIC_IRQ_INT1_ADC' for detecting MHL/Dock-Audio.
+ * - 'MAX77693_MUIC_IRQ_INT2_CHGTYP' for detecting USB/TA cable
+ * connected to MHL or Dock-Audio.
+ * Always, happen eariler MAX77693_MUIC_IRQ_INT1_ADC interrupt
+ * than MAX77693_MUIC_IRQ_INT2_CHGTYP interrupt.
+ *
+ * If user attach MHL (with USB/TA cable and immediately detach
+ * MHL with USB/TA cable before MAX77693_MUIC_IRQ_INT2_CHGTYP
+ * interrupt is happened, USB/TA cable remain connected state to
+ * target. But USB/TA cable isn't connected to target. The user
+ * be face with unusual action. So, driver should check this
+ * situation in spite of, that previous charger type is N/A.
+ */
+ break;
+ case MAX77693_CHARGER_TYPE_USB:
+ /* Only USB cable, PATH:AP_USB */
+ ret = max77693_muic_set_path(info, info->path_usb, attached);
+ if (ret < 0)
+ return ret;
+
+ extcon_set_cable_state(info->edev, "USB", attached);
+ break;
+ case MAX77693_CHARGER_TYPE_DEDICATED_CHG:
+ /* Only TA cable */
+ extcon_set_cable_state(info->edev, "TA", attached);
+ break;
+ }
break;
case MAX77693_CHARGER_TYPE_DOWNSTREAM_PORT:
extcon_set_cable_state(info->edev,
"Charge-downstream", attached);
break;
- case MAX77693_CHARGER_TYPE_DEDICATED_CHG:
- extcon_set_cable_state(info->edev, "TA", attached);
- break;
case MAX77693_CHARGER_TYPE_APPLE_500MA:
extcon_set_cable_state(info->edev, "Slow-charger", attached);
break;
@@ -498,22 +903,18 @@ static int max77693_muic_chg_handler(struct max77693_muic_info *info,
dev_err(info->dev,
"failed to detect %s accessory (chg_type:0x%x)\n",
attached ? "attached" : "detached", chg_type);
- ret = -EINVAL;
- goto out;
+ return -EINVAL;
}
-out:
- return ret;
+ return 0;
}
static void max77693_muic_irq_work(struct work_struct *work)
{
struct max77693_muic_info *info = container_of(work,
struct max77693_muic_info, irq_work);
- int curr_adc, curr_chg_type;
int irq_type = -1;
int i, ret = 0;
- bool attached = true;
if (!info->edev)
return;
@@ -539,14 +940,7 @@ static void max77693_muic_irq_work(struct work_struct *work)
case MAX77693_MUIC_IRQ_INT1_ADC1K:
/* Handle all of accessory except for
type of charger accessory */
- curr_adc = info->status[0] & STATUS1_ADC_MASK;
- curr_adc >>= STATUS1_ADC_SHIFT;
-
- /* Check accessory state which is either detached or attached */
- if (curr_adc == MAX77693_MUIC_ADC_OPEN)
- attached = false;
-
- ret = max77693_muic_adc_handler(info, curr_adc, attached);
+ ret = max77693_muic_adc_handler(info);
break;
case MAX77693_MUIC_IRQ_INT2_CHGTYP:
case MAX77693_MUIC_IRQ_INT2_CHGDETREUN:
@@ -555,15 +949,7 @@ static void max77693_muic_irq_work(struct work_struct *work)
case MAX77693_MUIC_IRQ_INT2_VBVOLT:
case MAX77693_MUIC_IRQ_INT2_VIDRM:
/* Handle charger accessory */
- curr_chg_type = info->status[1] & STATUS2_CHGTYP_MASK;
- curr_chg_type >>= STATUS2_CHGTYP_SHIFT;
-
- /* Check charger accessory state which
- is either detached or attached */
- if (curr_chg_type == MAX77693_CHARGER_TYPE_NONE)
- attached = false;
-
- ret = max77693_muic_chg_handler(info, curr_chg_type, attached);
+ ret = max77693_muic_chg_handler(info);
break;
case MAX77693_MUIC_IRQ_INT3_EOC:
case MAX77693_MUIC_IRQ_INT3_CGMBC:
@@ -575,7 +961,8 @@ static void max77693_muic_irq_work(struct work_struct *work)
default:
dev_err(info->dev, "muic interrupt: irq %d occurred\n",
irq_type);
- break;
+ mutex_unlock(&info->mutex);
+ return;
}
if (ret < 0)
@@ -604,7 +991,9 @@ static struct regmap_config max77693_muic_regmap_config = {
static int max77693_muic_detect_accessory(struct max77693_muic_info *info)
{
int ret = 0;
- int adc, chg_type;
+ int adc;
+ int chg_type;
+ bool attached;
mutex_lock(&info->mutex);
@@ -617,35 +1006,39 @@ static int max77693_muic_detect_accessory(struct max77693_muic_info *info)
return -EINVAL;
}
- adc = info->status[0] & STATUS1_ADC_MASK;
- adc >>= STATUS1_ADC_SHIFT;
-
- if (adc != MAX77693_MUIC_ADC_OPEN) {
- dev_info(info->dev,
- "external connector is attached (adc:0x%02x)\n", adc);
+ adc = max77693_muic_get_cable_type(info, MAX77693_CABLE_GROUP_ADC,
+ &attached);
+ if (attached && adc != MAX77693_MUIC_ADC_OPEN) {
+ ret = max77693_muic_adc_handler(info);
+ if (ret < 0) {
+ dev_err(info->dev, "Cannot detect accessory\n");
+ mutex_unlock(&info->mutex);
+ return ret;
+ }
+ }
- ret = max77693_muic_adc_handler(info, adc, true);
- if (ret < 0)
- dev_err(info->dev, "failed to detect accessory\n");
- goto out;
+ chg_type = max77693_muic_get_cable_type(info, MAX77693_CABLE_GROUP_CHG,
+ &attached);
+ if (attached && chg_type != MAX77693_CHARGER_TYPE_NONE) {
+ ret = max77693_muic_chg_handler(info);
+ if (ret < 0) {
+ dev_err(info->dev, "Cannot detect charger accessory\n");
+ mutex_unlock(&info->mutex);
+ return ret;
+ }
}
- chg_type = info->status[1] & STATUS2_CHGTYP_MASK;
- chg_type >>= STATUS2_CHGTYP_SHIFT;
+ mutex_unlock(&info->mutex);
- if (chg_type != MAX77693_CHARGER_TYPE_NONE) {
- dev_info(info->dev,
- "external connector is attached (chg_type:0x%x)\n",
- chg_type);
+ return 0;
+}
- max77693_muic_chg_handler(info, chg_type, true);
- if (ret < 0)
- dev_err(info->dev, "failed to detect charger accessory\n");
- }
+static void max77693_muic_detect_cable_wq(struct work_struct *work)
+{
+ struct max77693_muic_info *info = container_of(to_delayed_work(work),
+ struct max77693_muic_info, wq_detcable);
-out:
- mutex_unlock(&info->mutex);
- return ret;
+ max77693_muic_detect_accessory(info);
}
static int max77693_muic_probe(struct platform_device *pdev)
@@ -654,7 +1047,9 @@ static int max77693_muic_probe(struct platform_device *pdev)
struct max77693_platform_data *pdata = dev_get_platdata(max77693->dev);
struct max77693_muic_platform_data *muic_pdata = pdata->muic_data;
struct max77693_muic_info *info;
- int ret, i;
+ int delay_jiffies;
+ int ret;
+ int i;
u8 id;
info = devm_kzalloc(&pdev->dev, sizeof(struct max77693_muic_info),
@@ -678,6 +1073,32 @@ static int max77693_muic_probe(struct platform_device *pdev)
return ret;
}
}
+
+ /* Register input device for button of dock device */
+ info->dock = devm_input_allocate_device(&pdev->dev);
+ if (!info->dock) {
+ dev_err(&pdev->dev, "%s: failed to allocate input\n", __func__);
+ return -ENOMEM;
+ }
+ info->dock->name = "max77693-muic/dock";
+ info->dock->phys = "max77693-muic/extcon";
+ info->dock->dev.parent = &pdev->dev;
+
+ __set_bit(EV_REP, info->dock->evbit);
+
+ input_set_capability(info->dock, EV_KEY, KEY_VOLUMEUP);
+ input_set_capability(info->dock, EV_KEY, KEY_VOLUMEDOWN);
+ input_set_capability(info->dock, EV_KEY, KEY_PLAYPAUSE);
+ input_set_capability(info->dock, EV_KEY, KEY_PREVIOUSSONG);
+ input_set_capability(info->dock, EV_KEY, KEY_NEXTSONG);
+
+ ret = input_register_device(info->dock);
+ if (ret < 0) {
+ dev_err(&pdev->dev, "Cannot register input device error(%d)\n",
+ ret);
+ return ret;
+ }
+
platform_set_drvdata(pdev, info);
mutex_init(&info->mutex);
@@ -697,13 +1118,13 @@ static int max77693_muic_probe(struct platform_device *pdev)
ret = request_threaded_irq(virq, NULL,
max77693_muic_irq_handler,
- IRQF_ONESHOT, muic_irq->name, info);
+ IRQF_NO_SUSPEND,
+ muic_irq->name, info);
if (ret) {
dev_err(&pdev->dev,
"failed: irq request (IRQ: %d,"
" error :%d)\n",
muic_irq->irq, ret);
-
goto err_irq;
}
}
@@ -749,23 +1170,54 @@ static int max77693_muic_probe(struct platform_device *pdev)
= muic_pdata->init_data[i].data;
}
+ /*
+ * Default usb/uart path whether UART/USB or AUX_UART/AUX_USB
+ * h/w path of COMP2/COMN1 on CONTROL1 register.
+ */
+ if (muic_pdata->path_uart)
+ info->path_uart = muic_pdata->path_uart;
+ else
+ info->path_uart = CONTROL1_SW_UART;
+
+ if (muic_pdata->path_usb)
+ info->path_usb = muic_pdata->path_usb;
+ else
+ info->path_usb = CONTROL1_SW_USB;
+
+ /* Set initial path for UART */
+ max77693_muic_set_path(info, info->path_uart, true);
+
/* Check revision number of MUIC device*/
ret = max77693_read_reg(info->max77693->regmap_muic,
MAX77693_MUIC_REG_ID, &id);
if (ret < 0) {
dev_err(&pdev->dev, "failed to read revision number\n");
- goto err_irq;
+ goto err_extcon;
}
dev_info(info->dev, "device ID : 0x%x\n", id);
/* Set ADC debounce time */
max77693_muic_set_debounce_time(info, ADC_DEBOUNCE_TIME_25MS);
- /* Detect accessory on boot */
- max77693_muic_detect_accessory(info);
+ /*
+ * Detect accessory after completing the initialization of platform
+ *
+ * - Use delayed workqueue to detect cable state and then
+ * notify cable state to notifiee/platform through uevent.
+ * After completing the booting of platform, the extcon provider
+ * driver should notify cable state to upper layer.
+ */
+ INIT_DELAYED_WORK(&info->wq_detcable, max77693_muic_detect_cable_wq);
+ if (muic_pdata->detcable_delay_ms)
+ delay_jiffies = msecs_to_jiffies(muic_pdata->detcable_delay_ms);
+ else
+ delay_jiffies = msecs_to_jiffies(DELAY_MS_DEFAULT);
+ schedule_delayed_work(&info->wq_detcable, delay_jiffies);
return ret;
+err_extcon:
+ extcon_dev_unregister(info->edev);
err_irq:
while (--i >= 0)
free_irq(muic_irqs[i].virq, info);
@@ -780,6 +1232,7 @@ static int max77693_muic_remove(struct platform_device *pdev)
for (i = 0; i < ARRAY_SIZE(muic_irqs); i++)
free_irq(muic_irqs[i].virq, info);
cancel_work_sync(&info->irq_work);
+ input_unregister_device(info->dock);
extcon_dev_unregister(info->edev);
return 0;
diff --git a/drivers/extcon/extcon-max8997.c b/drivers/extcon/extcon-max8997.c
index 93009fe6ef05..e636d950ad6c 100644
--- a/drivers/extcon/extcon-max8997.c
+++ b/drivers/extcon/extcon-max8997.c
@@ -29,51 +29,14 @@
#include <linux/irqdomain.h>
#define DEV_NAME "max8997-muic"
+#define DELAY_MS_DEFAULT 20000 /* unit: millisecond */
-/* MAX8997-MUIC STATUS1 register */
-#define STATUS1_ADC_SHIFT 0
-#define STATUS1_ADCLOW_SHIFT 5
-#define STATUS1_ADCERR_SHIFT 6
-#define STATUS1_ADC_MASK (0x1f << STATUS1_ADC_SHIFT)
-#define STATUS1_ADCLOW_MASK (0x1 << STATUS1_ADCLOW_SHIFT)
-#define STATUS1_ADCERR_MASK (0x1 << STATUS1_ADCERR_SHIFT)
-
-/* MAX8997-MUIC STATUS2 register */
-#define STATUS2_CHGTYP_SHIFT 0
-#define STATUS2_CHGDETRUN_SHIFT 3
-#define STATUS2_DCDTMR_SHIFT 4
-#define STATUS2_DBCHG_SHIFT 5
-#define STATUS2_VBVOLT_SHIFT 6
-#define STATUS2_CHGTYP_MASK (0x7 << STATUS2_CHGTYP_SHIFT)
-#define STATUS2_CHGDETRUN_MASK (0x1 << STATUS2_CHGDETRUN_SHIFT)
-#define STATUS2_DCDTMR_MASK (0x1 << STATUS2_DCDTMR_SHIFT)
-#define STATUS2_DBCHG_MASK (0x1 << STATUS2_DBCHG_SHIFT)
-#define STATUS2_VBVOLT_MASK (0x1 << STATUS2_VBVOLT_SHIFT)
-
-/* MAX8997-MUIC STATUS3 register */
-#define STATUS3_OVP_SHIFT 2
-#define STATUS3_OVP_MASK (0x1 << STATUS3_OVP_SHIFT)
-
-/* MAX8997-MUIC CONTROL1 register */
-#define COMN1SW_SHIFT 0
-#define COMP2SW_SHIFT 3
-#define COMN1SW_MASK (0x7 << COMN1SW_SHIFT)
-#define COMP2SW_MASK (0x7 << COMP2SW_SHIFT)
-#define SW_MASK (COMP2SW_MASK | COMN1SW_MASK)
-
-#define MAX8997_SW_USB ((1 << COMP2SW_SHIFT) | (1 << COMN1SW_SHIFT))
-#define MAX8997_SW_AUDIO ((2 << COMP2SW_SHIFT) | (2 << COMN1SW_SHIFT))
-#define MAX8997_SW_UART ((3 << COMP2SW_SHIFT) | (3 << COMN1SW_SHIFT))
-#define MAX8997_SW_OPEN ((0 << COMP2SW_SHIFT) | (0 << COMN1SW_SHIFT))
-
-#define MAX8997_ADC_GROUND 0x00
-#define MAX8997_ADC_MHL 0x01
-#define MAX8997_ADC_JIG_USB_1 0x18
-#define MAX8997_ADC_JIG_USB_2 0x19
-#define MAX8997_ADC_DESKDOCK 0x1a
-#define MAX8997_ADC_JIG_UART 0x1c
-#define MAX8997_ADC_CARDOCK 0x1d
-#define MAX8997_ADC_OPEN 0x1f
+enum max8997_muic_adc_debounce_time {
+ ADC_DEBOUNCE_TIME_0_5MS = 0, /* 0.5ms */
+ ADC_DEBOUNCE_TIME_10MS, /* 10ms */
+ ADC_DEBOUNCE_TIME_25MS, /* 25ms */
+ ADC_DEBOUNCE_TIME_38_62MS, /* 38.62ms */
+};
struct max8997_muic_irq {
unsigned int irq;
@@ -82,61 +45,303 @@ struct max8997_muic_irq {
};
static struct max8997_muic_irq muic_irqs[] = {
- { MAX8997_MUICIRQ_ADCError, "muic-ADC_error" },
- { MAX8997_MUICIRQ_ADCLow, "muic-ADC_low" },
- { MAX8997_MUICIRQ_ADC, "muic-ADC" },
- { MAX8997_MUICIRQ_VBVolt, "muic-VB_voltage" },
- { MAX8997_MUICIRQ_DBChg, "muic-DB_charger" },
- { MAX8997_MUICIRQ_DCDTmr, "muic-DCD_timer" },
- { MAX8997_MUICIRQ_ChgDetRun, "muic-CDR_status" },
- { MAX8997_MUICIRQ_ChgTyp, "muic-charger_type" },
- { MAX8997_MUICIRQ_OVP, "muic-over_voltage" },
+ { MAX8997_MUICIRQ_ADCError, "muic-ADCERROR" },
+ { MAX8997_MUICIRQ_ADCLow, "muic-ADCLOW" },
+ { MAX8997_MUICIRQ_ADC, "muic-ADC" },
+ { MAX8997_MUICIRQ_VBVolt, "muic-VBVOLT" },
+ { MAX8997_MUICIRQ_DBChg, "muic-DBCHG" },
+ { MAX8997_MUICIRQ_DCDTmr, "muic-DCDTMR" },
+ { MAX8997_MUICIRQ_ChgDetRun, "muic-CHGDETRUN" },
+ { MAX8997_MUICIRQ_ChgTyp, "muic-CHGTYP" },
+ { MAX8997_MUICIRQ_OVP, "muic-OVP" },
+};
+
+/* Define supported cable type */
+enum max8997_muic_acc_type {
+ MAX8997_MUIC_ADC_GROUND = 0x0,
+ MAX8997_MUIC_ADC_MHL, /* MHL*/
+ MAX8997_MUIC_ADC_REMOTE_S1_BUTTON,
+ MAX8997_MUIC_ADC_REMOTE_S2_BUTTON,
+ MAX8997_MUIC_ADC_REMOTE_S3_BUTTON,
+ MAX8997_MUIC_ADC_REMOTE_S4_BUTTON,
+ MAX8997_MUIC_ADC_REMOTE_S5_BUTTON,
+ MAX8997_MUIC_ADC_REMOTE_S6_BUTTON,
+ MAX8997_MUIC_ADC_REMOTE_S7_BUTTON,
+ MAX8997_MUIC_ADC_REMOTE_S8_BUTTON,
+ MAX8997_MUIC_ADC_REMOTE_S9_BUTTON,
+ MAX8997_MUIC_ADC_REMOTE_S10_BUTTON,
+ MAX8997_MUIC_ADC_REMOTE_S11_BUTTON,
+ MAX8997_MUIC_ADC_REMOTE_S12_BUTTON,
+ MAX8997_MUIC_ADC_RESERVED_ACC_1,
+ MAX8997_MUIC_ADC_RESERVED_ACC_2,
+ MAX8997_MUIC_ADC_RESERVED_ACC_3,
+ MAX8997_MUIC_ADC_RESERVED_ACC_4,
+ MAX8997_MUIC_ADC_RESERVED_ACC_5,
+ MAX8997_MUIC_ADC_CEA936_AUDIO,
+ MAX8997_MUIC_ADC_PHONE_POWERED_DEV,
+ MAX8997_MUIC_ADC_TTY_CONVERTER,
+ MAX8997_MUIC_ADC_UART_CABLE,
+ MAX8997_MUIC_ADC_CEA936A_TYPE1_CHG,
+ MAX8997_MUIC_ADC_FACTORY_MODE_USB_OFF, /* JIG-USB-OFF */
+ MAX8997_MUIC_ADC_FACTORY_MODE_USB_ON, /* JIG-USB-ON */
+ MAX8997_MUIC_ADC_AV_CABLE_NOLOAD, /* DESKDOCK */
+ MAX8997_MUIC_ADC_CEA936A_TYPE2_CHG,
+ MAX8997_MUIC_ADC_FACTORY_MODE_UART_OFF, /* JIG-UART */
+ MAX8997_MUIC_ADC_FACTORY_MODE_UART_ON, /* CARDOCK */
+ MAX8997_MUIC_ADC_AUDIO_MODE_REMOTE,
+ MAX8997_MUIC_ADC_OPEN, /* OPEN */
+};
+
+enum max8997_muic_cable_group {
+ MAX8997_CABLE_GROUP_ADC = 0,
+ MAX8997_CABLE_GROUP_ADC_GND,
+ MAX8997_CABLE_GROUP_CHG,
+ MAX8997_CABLE_GROUP_VBVOLT,
+};
+
+enum max8997_muic_usb_type {
+ MAX8997_USB_HOST,
+ MAX8997_USB_DEVICE,
+};
+
+enum max8997_muic_charger_type {
+ MAX8997_CHARGER_TYPE_NONE = 0,
+ MAX8997_CHARGER_TYPE_USB,
+ MAX8997_CHARGER_TYPE_DOWNSTREAM_PORT,
+ MAX8997_CHARGER_TYPE_DEDICATED_CHG,
+ MAX8997_CHARGER_TYPE_500MA,
+ MAX8997_CHARGER_TYPE_1A,
+ MAX8997_CHARGER_TYPE_DEAD_BATTERY = 7,
};
struct max8997_muic_info {
struct device *dev;
struct i2c_client *muic;
- struct max8997_muic_platform_data *muic_pdata;
+ struct extcon_dev *edev;
+ int prev_cable_type;
+ int prev_chg_type;
+ u8 status[2];
int irq;
struct work_struct irq_work;
+ struct mutex mutex;
+ struct max8997_muic_platform_data *muic_pdata;
enum max8997_muic_charger_type pre_charger_type;
- int pre_adc;
- struct mutex mutex;
+ /*
+ * Use delayed workqueue to detect cable state and then
+ * notify cable state to notifiee/platform through uevent.
+ * After completing the booting of platform, the extcon provider
+ * driver should notify cable state to upper layer.
+ */
+ struct delayed_work wq_detcable;
+
+ /*
+ * Default usb/uart path whether UART/USB or AUX_UART/AUX_USB
+ * h/w path of COMP2/COMN1 on CONTROL1 register.
+ */
+ int path_usb;
+ int path_uart;
+};
- struct extcon_dev *edev;
+enum {
+ EXTCON_CABLE_USB = 0,
+ EXTCON_CABLE_USB_HOST,
+ EXTCON_CABLE_TA,
+ EXTCON_CABLE_FAST_CHARGER,
+ EXTCON_CABLE_SLOW_CHARGER,
+ EXTCON_CABLE_CHARGE_DOWNSTREAM,
+ EXTCON_CABLE_MHL,
+ EXTCON_CABLE_DOCK_DESK,
+ EXTCON_CABLE_DOCK_CARD,
+ EXTCON_CABLE_JIG,
+
+ _EXTCON_CABLE_NUM,
};
-const char *max8997_extcon_cable[] = {
- [0] = "USB",
- [1] = "USB-Host",
- [2] = "TA",
- [3] = "Fast-charger",
- [4] = "Slow-charger",
- [5] = "Charge-downstream",
- [6] = "MHL",
- [7] = "Dock-desk",
- [8] = "Dock-card",
- [9] = "JIG",
+static const char *max8997_extcon_cable[] = {
+ [EXTCON_CABLE_USB] = "USB",
+ [EXTCON_CABLE_USB_HOST] = "USB-Host",
+ [EXTCON_CABLE_TA] = "TA",
+ [EXTCON_CABLE_FAST_CHARGER] = "Fast-charger",
+ [EXTCON_CABLE_SLOW_CHARGER] = "Slow-charger",
+ [EXTCON_CABLE_CHARGE_DOWNSTREAM] = "Charge-downstream",
+ [EXTCON_CABLE_MHL] = "MHL",
+ [EXTCON_CABLE_DOCK_DESK] = "Dock-Desk",
+ [EXTCON_CABLE_DOCK_CARD] = "Dock-Card",
+ [EXTCON_CABLE_JIG] = "JIG",
NULL,
};
+/*
+ * max8997_muic_set_debounce_time - Set the debounce time of ADC
+ * @info: the instance including private data of max8997 MUIC
+ * @time: the debounce time of ADC
+ */
+static int max8997_muic_set_debounce_time(struct max8997_muic_info *info,
+ enum max8997_muic_adc_debounce_time time)
+{
+ int ret;
+
+ switch (time) {
+ case ADC_DEBOUNCE_TIME_0_5MS:
+ case ADC_DEBOUNCE_TIME_10MS:
+ case ADC_DEBOUNCE_TIME_25MS:
+ case ADC_DEBOUNCE_TIME_38_62MS:
+ ret = max8997_update_reg(info->muic,
+ MAX8997_MUIC_REG_CONTROL3,
+ time << CONTROL3_ADCDBSET_SHIFT,
+ CONTROL3_ADCDBSET_MASK);
+ if (ret) {
+ dev_err(info->dev, "failed to set ADC debounce time\n");
+ return -EAGAIN;
+ }
+ break;
+ default:
+ dev_err(info->dev, "invalid ADC debounce time\n");
+ return -EINVAL;
+ }
+
+ return 0;
+};
+
+/*
+ * max8997_muic_set_path - Set hardware line according to attached cable
+ * @info: the instance including private data of max8997 MUIC
+ * @value: the path according to attached cable
+ * @attached: the state of cable (true:attached, false:detached)
+ *
+ * The max8997 MUIC device share outside H/W line among a varity of cables,
+ * so this function set internal path of H/W line according to the type of
+ * attached cable.
+ */
+static int max8997_muic_set_path(struct max8997_muic_info *info,
+ u8 val, bool attached)
+{
+ int ret = 0;
+ u8 ctrl1, ctrl2 = 0;
+
+ if (attached)
+ ctrl1 = val;
+ else
+ ctrl1 = CONTROL1_SW_OPEN;
+
+ ret = max8997_update_reg(info->muic,
+ MAX8997_MUIC_REG_CONTROL1, ctrl1, COMP_SW_MASK);
+ if (ret < 0) {
+ dev_err(info->dev, "failed to update MUIC register\n");
+ return -EAGAIN;
+ }
+
+ if (attached)
+ ctrl2 |= CONTROL2_CPEN_MASK; /* LowPwr=0, CPEn=1 */
+ else
+ ctrl2 |= CONTROL2_LOWPWR_MASK; /* LowPwr=1, CPEn=0 */
+
+ ret = max8997_update_reg(info->muic,
+ MAX8997_MUIC_REG_CONTROL2, ctrl2,
+ CONTROL2_LOWPWR_MASK | CONTROL2_CPEN_MASK);
+ if (ret < 0) {
+ dev_err(info->dev, "failed to update MUIC register\n");
+ return -EAGAIN;
+ }
+
+ dev_info(info->dev,
+ "CONTROL1 : 0x%02x, CONTROL2 : 0x%02x, state : %s\n",
+ ctrl1, ctrl2, attached ? "attached" : "detached");
+
+ return 0;
+}
+
+/*
+ * max8997_muic_get_cable_type - Return cable type and check cable state
+ * @info: the instance including private data of max8997 MUIC
+ * @group: the path according to attached cable
+ * @attached: store cable state and return
+ *
+ * This function check the cable state either attached or detached,
+ * and then divide precise type of cable according to cable group.
+ * - MAX8997_CABLE_GROUP_ADC
+ * - MAX8997_CABLE_GROUP_CHG
+ */
+static int max8997_muic_get_cable_type(struct max8997_muic_info *info,
+ enum max8997_muic_cable_group group, bool *attached)
+{
+ int cable_type = 0;
+ int adc;
+ int chg_type;
+
+ switch (group) {
+ case MAX8997_CABLE_GROUP_ADC:
+ /*
+ * Read ADC value to check cable type and decide cable state
+ * according to cable type
+ */
+ adc = info->status[0] & STATUS1_ADC_MASK;
+ adc >>= STATUS1_ADC_SHIFT;
+
+ /*
+ * Check current cable state/cable type and store cable type
+ * (info->prev_cable_type) for handling cable when cable is
+ * detached.
+ */
+ if (adc == MAX8997_MUIC_ADC_OPEN) {
+ *attached = false;
+
+ cable_type = info->prev_cable_type;
+ info->prev_cable_type = MAX8997_MUIC_ADC_OPEN;
+ } else {
+ *attached = true;
+
+ cable_type = info->prev_cable_type = adc;
+ }
+ break;
+ case MAX8997_CABLE_GROUP_CHG:
+ /*
+ * Read charger type to check cable type and decide cable state
+ * according to type of charger cable.
+ */
+ chg_type = info->status[1] & STATUS2_CHGTYP_MASK;
+ chg_type >>= STATUS2_CHGTYP_SHIFT;
+
+ if (chg_type == MAX8997_CHARGER_TYPE_NONE) {
+ *attached = false;
+
+ cable_type = info->prev_chg_type;
+ info->prev_chg_type = MAX8997_CHARGER_TYPE_NONE;
+ } else {
+ *attached = true;
+
+ /*
+ * Check current cable state/cable type and store cable
+ * type(info->prev_chg_type) for handling cable when
+ * charger cable is detached.
+ */
+ cable_type = info->prev_chg_type = chg_type;
+ }
+
+ break;
+ default:
+ dev_err(info->dev, "Unknown cable group (%d)\n", group);
+ cable_type = -EINVAL;
+ break;
+ }
+
+ return cable_type;
+}
+
static int max8997_muic_handle_usb(struct max8997_muic_info *info,
enum max8997_muic_usb_type usb_type, bool attached)
{
int ret = 0;
if (usb_type == MAX8997_USB_HOST) {
- /* switch to USB */
- ret = max8997_update_reg(info->muic, MAX8997_MUIC_REG_CONTROL1,
- attached ? MAX8997_SW_USB : MAX8997_SW_OPEN,
- SW_MASK);
- if (ret) {
+ ret = max8997_muic_set_path(info, info->path_usb, attached);
+ if (ret < 0) {
dev_err(info->dev, "failed to update muic register\n");
- goto out;
+ return ret;
}
}
@@ -148,41 +353,39 @@ static int max8997_muic_handle_usb(struct max8997_muic_info *info,
extcon_set_cable_state(info->edev, "USB", attached);
break;
default:
- ret = -EINVAL;
- break;
+ dev_err(info->dev, "failed to detect %s usb cable\n",
+ attached ? "attached" : "detached");
+ return -EINVAL;
}
-out:
- return ret;
+ return 0;
}
static int max8997_muic_handle_dock(struct max8997_muic_info *info,
- int adc, bool attached)
+ int cable_type, bool attached)
{
int ret = 0;
- /* switch to AUDIO */
- ret = max8997_update_reg(info->muic, MAX8997_MUIC_REG_CONTROL1,
- attached ? MAX8997_SW_AUDIO : MAX8997_SW_OPEN,
- SW_MASK);
+ ret = max8997_muic_set_path(info, CONTROL1_SW_AUDIO, attached);
if (ret) {
dev_err(info->dev, "failed to update muic register\n");
- goto out;
+ return ret;
}
- switch (adc) {
- case MAX8997_ADC_DESKDOCK:
+ switch (cable_type) {
+ case MAX8997_MUIC_ADC_AV_CABLE_NOLOAD:
extcon_set_cable_state(info->edev, "Dock-desk", attached);
break;
- case MAX8997_ADC_CARDOCK:
+ case MAX8997_MUIC_ADC_FACTORY_MODE_UART_ON:
extcon_set_cable_state(info->edev, "Dock-card", attached);
break;
default:
- ret = -EINVAL;
- break;
+ dev_err(info->dev, "failed to detect %s dock device\n",
+ attached ? "attached" : "detached");
+ return -EINVAL;
}
-out:
- return ret;
+
+ return 0;
}
static int max8997_muic_handle_jig_uart(struct max8997_muic_info *info,
@@ -191,199 +394,188 @@ static int max8997_muic_handle_jig_uart(struct max8997_muic_info *info,
int ret = 0;
/* switch to UART */
- ret = max8997_update_reg(info->muic, MAX8997_MUIC_REG_CONTROL1,
- attached ? MAX8997_SW_UART : MAX8997_SW_OPEN,
- SW_MASK);
+ ret = max8997_muic_set_path(info, info->path_uart, attached);
if (ret) {
dev_err(info->dev, "failed to update muic register\n");
- goto out;
+ return -EINVAL;
}
extcon_set_cable_state(info->edev, "JIG", attached);
-out:
- return ret;
-}
-
-static int max8997_muic_handle_adc_detach(struct max8997_muic_info *info)
-{
- int ret = 0;
- switch (info->pre_adc) {
- case MAX8997_ADC_GROUND:
- ret = max8997_muic_handle_usb(info, MAX8997_USB_HOST, false);
- break;
- case MAX8997_ADC_MHL:
- extcon_set_cable_state(info->edev, "MHL", false);
- break;
- case MAX8997_ADC_JIG_USB_1:
- case MAX8997_ADC_JIG_USB_2:
- ret = max8997_muic_handle_usb(info, MAX8997_USB_DEVICE, false);
- break;
- case MAX8997_ADC_DESKDOCK:
- case MAX8997_ADC_CARDOCK:
- ret = max8997_muic_handle_dock(info, info->pre_adc, false);
- break;
- case MAX8997_ADC_JIG_UART:
- ret = max8997_muic_handle_jig_uart(info, false);
- break;
- default:
- break;
- }
-
- return ret;
+ return 0;
}
-static int max8997_muic_handle_adc(struct max8997_muic_info *info, int adc)
+static int max8997_muic_adc_handler(struct max8997_muic_info *info)
{
+ int cable_type;
+ bool attached;
int ret = 0;
- switch (adc) {
- case MAX8997_ADC_GROUND:
- ret = max8997_muic_handle_usb(info, MAX8997_USB_HOST, true);
- break;
- case MAX8997_ADC_MHL:
- extcon_set_cable_state(info->edev, "MHL", true);
- break;
- case MAX8997_ADC_JIG_USB_1:
- case MAX8997_ADC_JIG_USB_2:
- ret = max8997_muic_handle_usb(info, MAX8997_USB_DEVICE, true);
- break;
- case MAX8997_ADC_DESKDOCK:
- case MAX8997_ADC_CARDOCK:
- ret = max8997_muic_handle_dock(info, adc, true);
- break;
- case MAX8997_ADC_JIG_UART:
- ret = max8997_muic_handle_jig_uart(info, true);
- break;
- case MAX8997_ADC_OPEN:
- ret = max8997_muic_handle_adc_detach(info);
- break;
- default:
- ret = -EINVAL;
- goto out;
- }
-
- info->pre_adc = adc;
-out:
- return ret;
-}
-
-static int max8997_muic_handle_charger_type_detach(
- struct max8997_muic_info *info)
-{
- switch (info->pre_charger_type) {
- case MAX8997_CHARGER_TYPE_USB:
- extcon_set_cable_state(info->edev, "USB", false);
- break;
- case MAX8997_CHARGER_TYPE_DOWNSTREAM_PORT:
- extcon_set_cable_state(info->edev, "Charge-downstream", false);
- break;
- case MAX8997_CHARGER_TYPE_DEDICATED_CHG:
- extcon_set_cable_state(info->edev, "TA", false);
- break;
- case MAX8997_CHARGER_TYPE_500MA:
- extcon_set_cable_state(info->edev, "Slow-charger", false);
- break;
- case MAX8997_CHARGER_TYPE_1A:
- extcon_set_cable_state(info->edev, "Fast-charger", false);
- break;
+ /* Check cable state which is either detached or attached */
+ cable_type = max8997_muic_get_cable_type(info,
+ MAX8997_CABLE_GROUP_ADC, &attached);
+
+ switch (cable_type) {
+ case MAX8997_MUIC_ADC_GROUND:
+ ret = max8997_muic_handle_usb(info, MAX8997_USB_HOST, attached);
+ if (ret < 0)
+ return ret;
+ break;
+ case MAX8997_MUIC_ADC_MHL:
+ extcon_set_cable_state(info->edev, "MHL", attached);
+ break;
+ case MAX8997_MUIC_ADC_FACTORY_MODE_USB_OFF:
+ case MAX8997_MUIC_ADC_FACTORY_MODE_USB_ON:
+ ret = max8997_muic_handle_usb(info, MAX8997_USB_DEVICE, attached);
+ if (ret < 0)
+ return ret;
+ break;
+ case MAX8997_MUIC_ADC_AV_CABLE_NOLOAD:
+ case MAX8997_MUIC_ADC_FACTORY_MODE_UART_ON:
+ ret = max8997_muic_handle_dock(info, cable_type, attached);
+ if (ret < 0)
+ return ret;
+ break;
+ case MAX8997_MUIC_ADC_FACTORY_MODE_UART_OFF:
+ ret = max8997_muic_handle_jig_uart(info, attached);
+ break;
+ case MAX8997_MUIC_ADC_REMOTE_S1_BUTTON:
+ case MAX8997_MUIC_ADC_REMOTE_S2_BUTTON:
+ case MAX8997_MUIC_ADC_REMOTE_S3_BUTTON:
+ case MAX8997_MUIC_ADC_REMOTE_S4_BUTTON:
+ case MAX8997_MUIC_ADC_REMOTE_S5_BUTTON:
+ case MAX8997_MUIC_ADC_REMOTE_S6_BUTTON:
+ case MAX8997_MUIC_ADC_REMOTE_S7_BUTTON:
+ case MAX8997_MUIC_ADC_REMOTE_S8_BUTTON:
+ case MAX8997_MUIC_ADC_REMOTE_S9_BUTTON:
+ case MAX8997_MUIC_ADC_REMOTE_S10_BUTTON:
+ case MAX8997_MUIC_ADC_REMOTE_S11_BUTTON:
+ case MAX8997_MUIC_ADC_REMOTE_S12_BUTTON:
+ case MAX8997_MUIC_ADC_RESERVED_ACC_1:
+ case MAX8997_MUIC_ADC_RESERVED_ACC_2:
+ case MAX8997_MUIC_ADC_RESERVED_ACC_3:
+ case MAX8997_MUIC_ADC_RESERVED_ACC_4:
+ case MAX8997_MUIC_ADC_RESERVED_ACC_5:
+ case MAX8997_MUIC_ADC_CEA936_AUDIO:
+ case MAX8997_MUIC_ADC_PHONE_POWERED_DEV:
+ case MAX8997_MUIC_ADC_TTY_CONVERTER:
+ case MAX8997_MUIC_ADC_UART_CABLE:
+ case MAX8997_MUIC_ADC_CEA936A_TYPE1_CHG:
+ case MAX8997_MUIC_ADC_CEA936A_TYPE2_CHG:
+ case MAX8997_MUIC_ADC_AUDIO_MODE_REMOTE:
+ /*
+ * This cable isn't used in general case if it is specially
+ * needed to detect additional cable, should implement
+ * proper operation when this cable is attached/detached.
+ */
+ dev_info(info->dev,
+ "cable is %s but it isn't used (type:0x%x)\n",
+ attached ? "attached" : "detached", cable_type);
+ return -EAGAIN;
default:
+ dev_err(info->dev,
+ "failed to detect %s unknown cable (type:0x%x)\n",
+ attached ? "attached" : "detached", cable_type);
return -EINVAL;
- break;
}
return 0;
}
-static int max8997_muic_handle_charger_type(struct max8997_muic_info *info,
- enum max8997_muic_charger_type charger_type)
+static int max8997_muic_chg_handler(struct max8997_muic_info *info)
{
- u8 adc;
- int ret;
+ int chg_type;
+ bool attached;
+ int adc;
- ret = max8997_read_reg(info->muic, MAX8997_MUIC_REG_STATUS1, &adc);
- if (ret) {
- dev_err(info->dev, "failed to read muic register\n");
- goto out;
- }
+ chg_type = max8997_muic_get_cable_type(info,
+ MAX8997_CABLE_GROUP_CHG, &attached);
- switch (charger_type) {
+ switch (chg_type) {
case MAX8997_CHARGER_TYPE_NONE:
- ret = max8997_muic_handle_charger_type_detach(info);
break;
case MAX8997_CHARGER_TYPE_USB:
- if ((adc & STATUS1_ADC_MASK) == MAX8997_ADC_OPEN) {
+ adc = info->status[0] & STATUS1_ADC_MASK;
+ adc >>= STATUS1_ADC_SHIFT;
+
+ if ((adc & STATUS1_ADC_MASK) == MAX8997_MUIC_ADC_OPEN) {
max8997_muic_handle_usb(info,
- MAX8997_USB_DEVICE, true);
+ MAX8997_USB_DEVICE, attached);
}
break;
case MAX8997_CHARGER_TYPE_DOWNSTREAM_PORT:
- extcon_set_cable_state(info->edev, "Charge-downstream", true);
+ extcon_set_cable_state(info->edev, "Charge-downstream", attached);
break;
case MAX8997_CHARGER_TYPE_DEDICATED_CHG:
- extcon_set_cable_state(info->edev, "TA", true);
+ extcon_set_cable_state(info->edev, "TA", attached);
break;
case MAX8997_CHARGER_TYPE_500MA:
- extcon_set_cable_state(info->edev, "Slow-charger", true);
+ extcon_set_cable_state(info->edev, "Slow-charger", attached);
break;
case MAX8997_CHARGER_TYPE_1A:
- extcon_set_cable_state(info->edev, "Fast-charger", true);
+ extcon_set_cable_state(info->edev, "Fast-charger", attached);
break;
default:
- ret = -EINVAL;
- goto out;
+ dev_err(info->dev,
+ "failed to detect %s unknown chg cable (type:0x%x)\n",
+ attached ? "attached" : "detached", chg_type);
+ return -EINVAL;
}
- info->pre_charger_type = charger_type;
-out:
- return ret;
+ return 0;
}
static void max8997_muic_irq_work(struct work_struct *work)
{
struct max8997_muic_info *info = container_of(work,
struct max8997_muic_info, irq_work);
- u8 status[2];
- u8 adc, chg_type;
int irq_type = 0;
int i, ret;
+ if (!info->edev)
+ return;
+
mutex_lock(&info->mutex);
+ for (i = 0 ; i < ARRAY_SIZE(muic_irqs) ; i++)
+ if (info->irq == muic_irqs[i].virq)
+ irq_type = muic_irqs[i].irq;
+
ret = max8997_bulk_read(info->muic, MAX8997_MUIC_REG_STATUS1,
- 2, status);
+ 2, info->status);
if (ret) {
dev_err(info->dev, "failed to read muic register\n");
mutex_unlock(&info->mutex);
return;
}
- dev_dbg(info->dev, "%s: STATUS1:0x%x, 2:0x%x\n", __func__,
- status[0], status[1]);
-
- for (i = 0 ; i < ARRAY_SIZE(muic_irqs) ; i++)
- if (info->irq == muic_irqs[i].virq)
- irq_type = muic_irqs[i].irq;
-
switch (irq_type) {
+ case MAX8997_MUICIRQ_ADCError:
+ case MAX8997_MUICIRQ_ADCLow:
case MAX8997_MUICIRQ_ADC:
- adc = status[0] & STATUS1_ADC_MASK;
- adc >>= STATUS1_ADC_SHIFT;
-
- max8997_muic_handle_adc(info, adc);
+ /* Handle all of cable except for charger cable */
+ ret = max8997_muic_adc_handler(info);
break;
+ case MAX8997_MUICIRQ_VBVolt:
+ case MAX8997_MUICIRQ_DBChg:
+ case MAX8997_MUICIRQ_DCDTmr:
+ case MAX8997_MUICIRQ_ChgDetRun:
case MAX8997_MUICIRQ_ChgTyp:
- chg_type = status[1] & STATUS2_CHGTYP_MASK;
- chg_type >>= STATUS2_CHGTYP_SHIFT;
-
- max8997_muic_handle_charger_type(info, chg_type);
+ /* Handle charger cable */
+ ret = max8997_muic_chg_handler(info);
+ break;
+ case MAX8997_MUICIRQ_OVP:
break;
default:
dev_info(info->dev, "misc interrupt: irq %d occurred\n",
irq_type);
- break;
+ mutex_unlock(&info->mutex);
+ return;
}
+ if (ret < 0)
+ dev_err(info->dev, "failed to handle MUIC interrupt\n");
+
mutex_unlock(&info->mutex);
return;
@@ -401,29 +593,60 @@ static irqreturn_t max8997_muic_irq_handler(int irq, void *data)
return IRQ_HANDLED;
}
-static void max8997_muic_detect_dev(struct max8997_muic_info *info)
+static int max8997_muic_detect_dev(struct max8997_muic_info *info)
{
- int ret;
- u8 status[2], adc, chg_type;
+ int ret = 0;
+ int adc;
+ int chg_type;
+ bool attached;
- ret = max8997_bulk_read(info->muic, MAX8997_MUIC_REG_STATUS1,
- 2, status);
+ mutex_lock(&info->mutex);
+
+ /* Read STATUSx register to detect accessory */
+ ret = max8997_bulk_read(info->muic,
+ MAX8997_MUIC_REG_STATUS1, 2, info->status);
if (ret) {
- dev_err(info->dev, "failed to read muic register\n");
- return;
+ dev_err(info->dev, "failed to read MUIC register\n");
+ mutex_unlock(&info->mutex);
+ return -EINVAL;
}
- dev_info(info->dev, "STATUS1:0x%x, STATUS2:0x%x\n",
- status[0], status[1]);
+ adc = max8997_muic_get_cable_type(info, MAX8997_CABLE_GROUP_ADC,
+ &attached);
+ if (attached && adc != MAX8997_MUIC_ADC_OPEN) {
+ ret = max8997_muic_adc_handler(info);
+ if (ret < 0) {
+ dev_err(info->dev, "Cannot detect ADC cable\n");
+ mutex_unlock(&info->mutex);
+ return ret;
+ }
+ }
- adc = status[0] & STATUS1_ADC_MASK;
- adc >>= STATUS1_ADC_SHIFT;
+ chg_type = max8997_muic_get_cable_type(info, MAX8997_CABLE_GROUP_CHG,
+ &attached);
+ if (attached && chg_type != MAX8997_CHARGER_TYPE_NONE) {
+ ret = max8997_muic_chg_handler(info);
+ if (ret < 0) {
+ dev_err(info->dev, "Cannot detect charger cable\n");
+ mutex_unlock(&info->mutex);
+ return ret;
+ }
+ }
+
+ mutex_unlock(&info->mutex);
+
+ return 0;
+}
- chg_type = status[1] & STATUS2_CHGTYP_MASK;
- chg_type >>= STATUS2_CHGTYP_SHIFT;
+static void max8997_muic_detect_cable_wq(struct work_struct *work)
+{
+ struct max8997_muic_info *info = container_of(to_delayed_work(work),
+ struct max8997_muic_info, wq_detcable);
+ int ret;
- max8997_muic_handle_adc(info, adc);
- max8997_muic_handle_charger_type(info, chg_type);
+ ret = max8997_muic_detect_dev(info);
+ if (ret < 0)
+ pr_err("failed to detect cable type\n");
}
static int max8997_muic_probe(struct platform_device *pdev)
@@ -431,6 +654,7 @@ static int max8997_muic_probe(struct platform_device *pdev)
struct max8997_dev *max8997 = dev_get_drvdata(pdev->dev.parent);
struct max8997_platform_data *pdata = dev_get_platdata(max8997->dev);
struct max8997_muic_info *info;
+ int delay_jiffies;
int ret, i;
info = devm_kzalloc(&pdev->dev, sizeof(struct max8997_muic_info),
@@ -459,8 +683,10 @@ static int max8997_muic_probe(struct platform_device *pdev)
}
muic_irq->virq = virq;
- ret = request_threaded_irq(virq, NULL, max8997_muic_irq_handler,
- 0, muic_irq->name, info);
+ ret = request_threaded_irq(virq, NULL,
+ max8997_muic_irq_handler,
+ IRQF_NO_SUSPEND,
+ muic_irq->name, info);
if (ret) {
dev_err(&pdev->dev,
"failed: irq request (IRQ: %d,"
@@ -496,10 +722,42 @@ static int max8997_muic_probe(struct platform_device *pdev)
}
}
- /* Initial device detection */
- max8997_muic_detect_dev(info);
+ /*
+ * Default usb/uart path whether UART/USB or AUX_UART/AUX_USB
+ * h/w path of COMP2/COMN1 on CONTROL1 register.
+ */
+ if (pdata->muic_pdata->path_uart)
+ info->path_uart = pdata->muic_pdata->path_uart;
+ else
+ info->path_uart = CONTROL1_SW_UART;
+
+ if (pdata->muic_pdata->path_usb)
+ info->path_usb = pdata->muic_pdata->path_usb;
+ else
+ info->path_usb = CONTROL1_SW_USB;
+
+ /* Set initial path for UART */
+ max8997_muic_set_path(info, info->path_uart, true);
+
+ /* Set ADC debounce time */
+ max8997_muic_set_debounce_time(info, ADC_DEBOUNCE_TIME_25MS);
+
+ /*
+ * Detect accessory after completing the initialization of platform
+ *
+ * - Use delayed workqueue to detect cable state and then
+ * notify cable state to notifiee/platform through uevent.
+ * After completing the booting of platform, the extcon provider
+ * driver should notify cable state to upper layer.
+ */
+ INIT_DELAYED_WORK(&info->wq_detcable, max8997_muic_detect_cable_wq);
+ if (pdata->muic_pdata->detcable_delay_ms)
+ delay_jiffies = msecs_to_jiffies(pdata->muic_pdata->detcable_delay_ms);
+ else
+ delay_jiffies = msecs_to_jiffies(DELAY_MS_DEFAULT);
+ schedule_delayed_work(&info->wq_detcable, delay_jiffies);
- return ret;
+ return 0;
err_irq:
while (--i >= 0)
diff --git a/drivers/firewire/core-cdev.c b/drivers/firewire/core-cdev.c
index f8d22872d753..27ac423ab25e 100644
--- a/drivers/firewire/core-cdev.c
+++ b/drivers/firewire/core-cdev.c
@@ -487,27 +487,28 @@ static int ioctl_get_info(struct client *client, union ioctl_arg *arg)
static int add_client_resource(struct client *client,
struct client_resource *resource, gfp_t gfp_mask)
{
+ bool preload = gfp_mask & __GFP_WAIT;
unsigned long flags;
int ret;
- retry:
- if (idr_pre_get(&client->resource_idr, gfp_mask) == 0)
- return -ENOMEM;
-
+ if (preload)
+ idr_preload(gfp_mask);
spin_lock_irqsave(&client->lock, flags);
+
if (client->in_shutdown)
ret = -ECANCELED;
else
- ret = idr_get_new(&client->resource_idr, resource,
- &resource->handle);
+ ret = idr_alloc(&client->resource_idr, resource, 0, 0,
+ GFP_NOWAIT);
if (ret >= 0) {
+ resource->handle = ret;
client_get(client);
schedule_if_iso_resource(resource);
}
- spin_unlock_irqrestore(&client->lock, flags);
- if (ret == -EAGAIN)
- goto retry;
+ spin_unlock_irqrestore(&client->lock, flags);
+ if (preload)
+ idr_preload_end();
return ret < 0 ? ret : 0;
}
@@ -1779,7 +1780,6 @@ static int fw_device_op_release(struct inode *inode, struct file *file)
wait_event(client->tx_flush_wait, !has_outbound_transactions(client));
idr_for_each(&client->resource_idr, shutdown_resource, client);
- idr_remove_all(&client->resource_idr);
idr_destroy(&client->resource_idr);
list_for_each_entry_safe(event, next_event, &client->event_list, link)
diff --git a/drivers/firewire/core-device.c b/drivers/firewire/core-device.c
index 3873d535b28d..03ce7d980c6a 100644
--- a/drivers/firewire/core-device.c
+++ b/drivers/firewire/core-device.c
@@ -1017,12 +1017,11 @@ static void fw_device_init(struct work_struct *work)
fw_device_get(device);
down_write(&fw_device_rwsem);
- ret = idr_pre_get(&fw_device_idr, GFP_KERNEL) ?
- idr_get_new(&fw_device_idr, device, &minor) :
- -ENOMEM;
+ minor = idr_alloc(&fw_device_idr, device, 0, 1 << MINORBITS,
+ GFP_KERNEL);
up_write(&fw_device_rwsem);
- if (ret < 0)
+ if (minor < 0)
goto error;
device->device.bus = &fw_bus_type;
diff --git a/drivers/firewire/ohci.c b/drivers/firewire/ohci.c
index 6ce6e07c38c1..45912e6e0ac2 100644
--- a/drivers/firewire/ohci.c
+++ b/drivers/firewire/ohci.c
@@ -329,7 +329,7 @@ module_param_named(quirks, param_quirks, int, 0644);
MODULE_PARM_DESC(quirks, "Chip quirks (default = 0"
", nonatomic cycle timer = " __stringify(QUIRK_CYCLE_TIMER)
", reset packet generation = " __stringify(QUIRK_RESET_PACKET)
- ", AR/selfID endianess = " __stringify(QUIRK_BE_HEADERS)
+ ", AR/selfID endianness = " __stringify(QUIRK_BE_HEADERS)
", no 1394a enhancements = " __stringify(QUIRK_NO_1394A)
", disable MSI = " __stringify(QUIRK_NO_MSI)
", TI SLLZ059 erratum = " __stringify(QUIRK_TI_SLLZ059)
diff --git a/drivers/firmware/efivars.c b/drivers/firmware/efivars.c
index fed08b661711..7320bf891706 100644
--- a/drivers/firmware/efivars.c
+++ b/drivers/firmware/efivars.c
@@ -79,6 +79,7 @@
#include <linux/device.h>
#include <linux/slab.h>
#include <linux/pstore.h>
+#include <linux/ctype.h>
#include <linux/fs.h>
#include <linux/ramfs.h>
@@ -908,6 +909,48 @@ static struct inode *efivarfs_get_inode(struct super_block *sb,
return inode;
}
+/*
+ * Return true if 'str' is a valid efivarfs filename of the form,
+ *
+ * VariableName-12345678-1234-1234-1234-1234567891bc
+ */
+static bool efivarfs_valid_name(const char *str, int len)
+{
+ static const char dashes[GUID_LEN] = {
+ [8] = 1, [13] = 1, [18] = 1, [23] = 1
+ };
+ const char *s = str + len - GUID_LEN;
+ int i;
+
+ /*
+ * We need a GUID, plus at least one letter for the variable name,
+ * plus the '-' separator
+ */
+ if (len < GUID_LEN + 2)
+ return false;
+
+ /* GUID should be right after the first '-' */
+ if (s - 1 != strchr(str, '-'))
+ return false;
+
+ /*
+ * Validate that 's' is of the correct format, e.g.
+ *
+ * 12345678-1234-1234-1234-123456789abc
+ */
+ for (i = 0; i < GUID_LEN; i++) {
+ if (dashes[i]) {
+ if (*s++ != '-')
+ return false;
+ } else {
+ if (!isxdigit(*s++))
+ return false;
+ }
+ }
+
+ return true;
+}
+
static void efivarfs_hex_to_guid(const char *str, efi_guid_t *guid)
{
guid->b[0] = hex_to_bin(str[6]) << 4 | hex_to_bin(str[7]);
@@ -936,11 +979,7 @@ static int efivarfs_create(struct inode *dir, struct dentry *dentry,
struct efivar_entry *var;
int namelen, i = 0, err = 0;
- /*
- * We need a GUID, plus at least one letter for the variable name,
- * plus the '-' separator
- */
- if (dentry->d_name.len < GUID_LEN + 2)
+ if (!efivarfs_valid_name(dentry->d_name.name, dentry->d_name.len))
return -EINVAL;
inode = efivarfs_get_inode(dir->i_sb, dir, mode, 0);
@@ -1012,6 +1051,84 @@ static int efivarfs_unlink(struct inode *dir, struct dentry *dentry)
return -EINVAL;
};
+/*
+ * Compare two efivarfs file names.
+ *
+ * An efivarfs filename is composed of two parts,
+ *
+ * 1. A case-sensitive variable name
+ * 2. A case-insensitive GUID
+ *
+ * So we need to perform a case-sensitive match on part 1 and a
+ * case-insensitive match on part 2.
+ */
+static int efivarfs_d_compare(const struct dentry *parent, const struct inode *pinode,
+ const struct dentry *dentry, const struct inode *inode,
+ unsigned int len, const char *str,
+ const struct qstr *name)
+{
+ int guid = len - GUID_LEN;
+
+ if (name->len != len)
+ return 1;
+
+ /* Case-sensitive compare for the variable name */
+ if (memcmp(str, name->name, guid))
+ return 1;
+
+ /* Case-insensitive compare for the GUID */
+ return strncasecmp(name->name + guid, str + guid, GUID_LEN);
+}
+
+static int efivarfs_d_hash(const struct dentry *dentry,
+ const struct inode *inode, struct qstr *qstr)
+{
+ unsigned long hash = init_name_hash();
+ const unsigned char *s = qstr->name;
+ unsigned int len = qstr->len;
+
+ if (!efivarfs_valid_name(s, len))
+ return -EINVAL;
+
+ while (len-- > GUID_LEN)
+ hash = partial_name_hash(*s++, hash);
+
+ /* GUID is case-insensitive. */
+ while (len--)
+ hash = partial_name_hash(tolower(*s++), hash);
+
+ qstr->hash = end_name_hash(hash);
+ return 0;
+}
+
+/*
+ * Retaining negative dentries for an in-memory filesystem just wastes
+ * memory and lookup time: arrange for them to be deleted immediately.
+ */
+static int efivarfs_delete_dentry(const struct dentry *dentry)
+{
+ return 1;
+}
+
+static struct dentry_operations efivarfs_d_ops = {
+ .d_compare = efivarfs_d_compare,
+ .d_hash = efivarfs_d_hash,
+ .d_delete = efivarfs_delete_dentry,
+};
+
+static struct dentry *efivarfs_alloc_dentry(struct dentry *parent, char *name)
+{
+ struct qstr q;
+
+ q.name = name;
+ q.len = strlen(name);
+
+ if (efivarfs_d_hash(NULL, NULL, &q))
+ return NULL;
+
+ return d_alloc(parent, &q);
+}
+
static int efivarfs_fill_super(struct super_block *sb, void *data, int silent)
{
struct inode *inode = NULL;
@@ -1027,6 +1144,7 @@ static int efivarfs_fill_super(struct super_block *sb, void *data, int silent)
sb->s_blocksize_bits = PAGE_CACHE_SHIFT;
sb->s_magic = EFIVARFS_MAGIC;
sb->s_op = &efivarfs_ops;
+ sb->s_d_op = &efivarfs_d_ops;
sb->s_time_gran = 1;
inode = efivarfs_get_inode(sb, NULL, S_IFDIR | 0755, 0);
@@ -1067,7 +1185,7 @@ static int efivarfs_fill_super(struct super_block *sb, void *data, int silent)
if (!inode)
goto fail_name;
- dentry = d_alloc_name(root, name);
+ dentry = efivarfs_alloc_dentry(root, name);
if (!dentry)
goto fail_inode;
@@ -1084,7 +1202,7 @@ static int efivarfs_fill_super(struct super_block *sb, void *data, int silent)
mutex_lock(&inode->i_mutex);
inode->i_private = entry;
- i_size_write(inode, size+4);
+ i_size_write(inode, size + sizeof(entry->var.Attributes));
mutex_unlock(&inode->i_mutex);
d_add(dentry, inode);
}
@@ -1117,8 +1235,20 @@ static struct file_system_type efivarfs_type = {
.kill_sb = efivarfs_kill_sb,
};
+/*
+ * Handle negative dentry.
+ */
+static struct dentry *efivarfs_lookup(struct inode *dir, struct dentry *dentry,
+ unsigned int flags)
+{
+ if (dentry->d_name.len > NAME_MAX)
+ return ERR_PTR(-ENAMETOOLONG);
+ d_add(dentry, NULL);
+ return NULL;
+}
+
static const struct inode_operations efivarfs_dir_inode_operations = {
- .lookup = simple_lookup,
+ .lookup = efivarfs_lookup,
.unlink = efivarfs_unlink,
.create = efivarfs_create,
};
diff --git a/drivers/firmware/memmap.c b/drivers/firmware/memmap.c
index 90723e65b081..0b5b5f619c75 100644
--- a/drivers/firmware/memmap.c
+++ b/drivers/firmware/memmap.c
@@ -21,6 +21,7 @@
#include <linux/types.h>
#include <linux/bootmem.h>
#include <linux/slab.h>
+#include <linux/mm.h>
/*
* Data types ------------------------------------------------------------------
@@ -52,6 +53,9 @@ static ssize_t start_show(struct firmware_map_entry *entry, char *buf);
static ssize_t end_show(struct firmware_map_entry *entry, char *buf);
static ssize_t type_show(struct firmware_map_entry *entry, char *buf);
+static struct firmware_map_entry * __meminit
+firmware_map_find_entry(u64 start, u64 end, const char *type);
+
/*
* Static data -----------------------------------------------------------------
*/
@@ -79,7 +83,52 @@ static const struct sysfs_ops memmap_attr_ops = {
.show = memmap_attr_show,
};
-static struct kobj_type memmap_ktype = {
+/* Firmware memory map entries. */
+static LIST_HEAD(map_entries);
+static DEFINE_SPINLOCK(map_entries_lock);
+
+/*
+ * For memory hotplug, there is no way to free memory map entries allocated
+ * by boot mem after the system is up. So when we hot-remove memory whose
+ * map entry is allocated by bootmem, we need to remember the storage and
+ * reuse it when the memory is hot-added again.
+ */
+static LIST_HEAD(map_entries_bootmem);
+static DEFINE_SPINLOCK(map_entries_bootmem_lock);
+
+
+static inline struct firmware_map_entry *
+to_memmap_entry(struct kobject *kobj)
+{
+ return container_of(kobj, struct firmware_map_entry, kobj);
+}
+
+static void __meminit release_firmware_map_entry(struct kobject *kobj)
+{
+ struct firmware_map_entry *entry = to_memmap_entry(kobj);
+
+ if (PageReserved(virt_to_page(entry))) {
+ /*
+ * Remember the storage allocated by bootmem, and reuse it when
+ * the memory is hot-added again. The entry will be added to
+ * map_entries_bootmem here, and deleted from &map_entries in
+ * firmware_map_remove_entry().
+ */
+ if (firmware_map_find_entry(entry->start, entry->end,
+ entry->type)) {
+ spin_lock(&map_entries_bootmem_lock);
+ list_add(&entry->list, &map_entries_bootmem);
+ spin_unlock(&map_entries_bootmem_lock);
+ }
+
+ return;
+ }
+
+ kfree(entry);
+}
+
+static struct kobj_type __refdata memmap_ktype = {
+ .release = release_firmware_map_entry,
.sysfs_ops = &memmap_attr_ops,
.default_attrs = def_attrs,
};
@@ -88,13 +137,6 @@ static struct kobj_type memmap_ktype = {
* Registration functions ------------------------------------------------------
*/
-/*
- * Firmware memory map entries. No locking is needed because the
- * firmware_map_add() and firmware_map_add_early() functions are called
- * in firmware initialisation code in one single thread of execution.
- */
-static LIST_HEAD(map_entries);
-
/**
* firmware_map_add_entry() - Does the real work to add a firmware memmap entry.
* @start: Start of the memory range.
@@ -118,11 +160,25 @@ static int firmware_map_add_entry(u64 start, u64 end,
INIT_LIST_HEAD(&entry->list);
kobject_init(&entry->kobj, &memmap_ktype);
+ spin_lock(&map_entries_lock);
list_add_tail(&entry->list, &map_entries);
+ spin_unlock(&map_entries_lock);
return 0;
}
+/**
+ * firmware_map_remove_entry() - Does the real work to remove a firmware
+ * memmap entry.
+ * @entry: removed entry.
+ *
+ * The caller must hold map_entries_lock, and release it properly.
+ **/
+static inline void firmware_map_remove_entry(struct firmware_map_entry *entry)
+{
+ list_del(&entry->list);
+}
+
/*
* Add memmap entry on sysfs
*/
@@ -144,6 +200,78 @@ static int add_sysfs_fw_map_entry(struct firmware_map_entry *entry)
return 0;
}
+/*
+ * Remove memmap entry on sysfs
+ */
+static inline void remove_sysfs_fw_map_entry(struct firmware_map_entry *entry)
+{
+ kobject_put(&entry->kobj);
+}
+
+/*
+ * firmware_map_find_entry_in_list() - Search memmap entry in a given list.
+ * @start: Start of the memory range.
+ * @end: End of the memory range (exclusive).
+ * @type: Type of the memory range.
+ * @list: In which to find the entry.
+ *
+ * This function is to find the memmap entey of a given memory range in a
+ * given list. The caller must hold map_entries_lock, and must not release
+ * the lock until the processing of the returned entry has completed.
+ *
+ * Return: Pointer to the entry to be found on success, or NULL on failure.
+ */
+static struct firmware_map_entry * __meminit
+firmware_map_find_entry_in_list(u64 start, u64 end, const char *type,
+ struct list_head *list)
+{
+ struct firmware_map_entry *entry;
+
+ list_for_each_entry(entry, list, list)
+ if ((entry->start == start) && (entry->end == end) &&
+ (!strcmp(entry->type, type))) {
+ return entry;
+ }
+
+ return NULL;
+}
+
+/*
+ * firmware_map_find_entry() - Search memmap entry in map_entries.
+ * @start: Start of the memory range.
+ * @end: End of the memory range (exclusive).
+ * @type: Type of the memory range.
+ *
+ * This function is to find the memmap entey of a given memory range.
+ * The caller must hold map_entries_lock, and must not release the lock
+ * until the processing of the returned entry has completed.
+ *
+ * Return: Pointer to the entry to be found on success, or NULL on failure.
+ */
+static struct firmware_map_entry * __meminit
+firmware_map_find_entry(u64 start, u64 end, const char *type)
+{
+ return firmware_map_find_entry_in_list(start, end, type, &map_entries);
+}
+
+/*
+ * firmware_map_find_entry_bootmem() - Search memmap entry in map_entries_bootmem.
+ * @start: Start of the memory range.
+ * @end: End of the memory range (exclusive).
+ * @type: Type of the memory range.
+ *
+ * This function is similar to firmware_map_find_entry except that it find the
+ * given entry in map_entries_bootmem.
+ *
+ * Return: Pointer to the entry to be found on success, or NULL on failure.
+ */
+static struct firmware_map_entry * __meminit
+firmware_map_find_entry_bootmem(u64 start, u64 end, const char *type)
+{
+ return firmware_map_find_entry_in_list(start, end, type,
+ &map_entries_bootmem);
+}
+
/**
* firmware_map_add_hotplug() - Adds a firmware mapping entry when we do
* memory hotplug.
@@ -161,9 +289,19 @@ int __meminit firmware_map_add_hotplug(u64 start, u64 end, const char *type)
{
struct firmware_map_entry *entry;
- entry = kzalloc(sizeof(struct firmware_map_entry), GFP_ATOMIC);
- if (!entry)
- return -ENOMEM;
+ entry = firmware_map_find_entry_bootmem(start, end, type);
+ if (!entry) {
+ entry = kzalloc(sizeof(struct firmware_map_entry), GFP_ATOMIC);
+ if (!entry)
+ return -ENOMEM;
+ } else {
+ /* Reuse storage allocated by bootmem. */
+ spin_lock(&map_entries_bootmem_lock);
+ list_del(&entry->list);
+ spin_unlock(&map_entries_bootmem_lock);
+
+ memset(entry, 0, sizeof(*entry));
+ }
firmware_map_add_entry(start, end, type, entry);
/* create the memmap entry */
@@ -196,6 +334,36 @@ int __init firmware_map_add_early(u64 start, u64 end, const char *type)
return firmware_map_add_entry(start, end, type, entry);
}
+/**
+ * firmware_map_remove() - remove a firmware mapping entry
+ * @start: Start of the memory range.
+ * @end: End of the memory range.
+ * @type: Type of the memory range.
+ *
+ * removes a firmware mapping entry.
+ *
+ * Returns 0 on success, or -EINVAL if no entry.
+ **/
+int __meminit firmware_map_remove(u64 start, u64 end, const char *type)
+{
+ struct firmware_map_entry *entry;
+
+ spin_lock(&map_entries_lock);
+ entry = firmware_map_find_entry(start, end - 1, type);
+ if (!entry) {
+ spin_unlock(&map_entries_lock);
+ return -EINVAL;
+ }
+
+ firmware_map_remove_entry(entry);
+ spin_unlock(&map_entries_lock);
+
+ /* remove the memmap entry */
+ remove_sysfs_fw_map_entry(entry);
+
+ return 0;
+}
+
/*
* Sysfs functions -------------------------------------------------------------
*/
@@ -217,8 +385,10 @@ static ssize_t type_show(struct firmware_map_entry *entry, char *buf)
return snprintf(buf, PAGE_SIZE, "%s\n", entry->type);
}
-#define to_memmap_attr(_attr) container_of(_attr, struct memmap_attribute, attr)
-#define to_memmap_entry(obj) container_of(obj, struct firmware_map_entry, kobj)
+static inline struct memmap_attribute *to_memmap_attr(struct attribute *attr)
+{
+ return container_of(attr, struct memmap_attribute, attr);
+}
static ssize_t memmap_attr_show(struct kobject *kobj,
struct attribute *attr, char *buf)
diff --git a/drivers/gpio/Kconfig b/drivers/gpio/Kconfig
index 1855a6fd2b0a..93aaadf99f28 100644
--- a/drivers/gpio/Kconfig
+++ b/drivers/gpio/Kconfig
@@ -30,6 +30,9 @@ config ARCH_REQUIRE_GPIOLIB
Selecting this from the architecture code will cause the gpiolib
code to always get built in.
+config GPIO_DEVRES
+ def_bool y
+ depends on HAS_IOMEM
menuconfig GPIOLIB
@@ -277,7 +280,7 @@ config GPIO_ICH
config GPIO_VX855
tristate "VIA VX855/VX875 GPIO"
- depends on PCI
+ depends on PCI && GENERIC_HARDIRQS
select MFD_CORE
select MFD_VX855
help
@@ -298,6 +301,14 @@ config GPIO_GE_FPGA
and write pin state) for GPIO implemented in a number of GE single
board computers.
+config GPIO_LYNXPOINT
+ bool "Intel Lynxpoint GPIO support"
+ depends on ACPI
+ select IRQ_DOMAIN
+ help
+ driver for GPIO functionality on Intel Lynxpoint PCH chipset
+ Requires ACPI device enumeration code to set up a platform device.
+
comment "I2C GPIO expanders:"
config GPIO_ARIZONA
@@ -599,7 +610,7 @@ config GPIO_TIMBERDALE
config GPIO_RDC321X
tristate "RDC R-321x GPIO support"
- depends on PCI
+ depends on PCI && GENERIC_HARDIRQS
select MFD_CORE
select MFD_RDC321X
help
@@ -657,6 +668,13 @@ config GPIO_JANZ_TTL
This driver provides support for driving the pins in output
mode only. Input mode is not supported.
+config GPIO_PALMAS
+ bool "TI PALMAS series PMICs GPIO"
+ depends on MFD_PALMAS
+ help
+ Select this option to enable GPIO driver for the TI PALMAS
+ series chip family.
+
config GPIO_TPS6586X
bool "TPS6586X GPIO"
depends on MFD_TPS6586X
diff --git a/drivers/gpio/Makefile b/drivers/gpio/Makefile
index 45a388c21d04..22e07bc9fcb5 100644
--- a/drivers/gpio/Makefile
+++ b/drivers/gpio/Makefile
@@ -2,7 +2,8 @@
ccflags-$(CONFIG_DEBUG_GPIO) += -DDEBUG
-obj-$(CONFIG_GPIOLIB) += gpiolib.o devres.o
+obj-$(CONFIG_GPIO_DEVRES) += devres.o
+obj-$(CONFIG_GPIOLIB) += gpiolib.o
obj-$(CONFIG_OF_GPIO) += gpiolib-of.o
obj-$(CONFIG_GPIO_ACPI) += gpiolib-acpi.o
@@ -30,6 +31,7 @@ obj-$(CONFIG_GPIO_JANZ_TTL) += gpio-janz-ttl.o
obj-$(CONFIG_ARCH_KS8695) += gpio-ks8695.o
obj-$(CONFIG_GPIO_LANGWELL) += gpio-langwell.o
obj-$(CONFIG_ARCH_LPC32XX) += gpio-lpc32xx.o
+obj-$(CONFIG_GPIO_LYNXPOINT) += gpio-lynxpoint.o
obj-$(CONFIG_GPIO_MAX730X) += gpio-max730x.o
obj-$(CONFIG_GPIO_MAX7300) += gpio-max7300.o
obj-$(CONFIG_GPIO_MAX7301) += gpio-max7301.o
@@ -68,6 +70,7 @@ obj-$(CONFIG_GPIO_TC3589X) += gpio-tc3589x.o
obj-$(CONFIG_ARCH_TEGRA) += gpio-tegra.o
obj-$(CONFIG_GPIO_TIMBERDALE) += gpio-timberdale.o
obj-$(CONFIG_ARCH_DAVINCI_TNETV107X) += gpio-tnetv107x.o
+obj-$(CONFIG_GPIO_PALMAS) += gpio-palmas.o
obj-$(CONFIG_GPIO_TPS6586X) += gpio-tps6586x.o
obj-$(CONFIG_GPIO_TPS65910) += gpio-tps65910.o
obj-$(CONFIG_GPIO_TPS65912) += gpio-tps65912.o
diff --git a/drivers/gpio/gpio-em.c b/drivers/gpio/gpio-em.c
index bdc8302e711a..deca78f99316 100644
--- a/drivers/gpio/gpio-em.c
+++ b/drivers/gpio/gpio-em.c
@@ -299,8 +299,9 @@ static int em_gio_probe(struct platform_device *pdev)
irq_chip->irq_set_type = em_gio_irq_set_type;
irq_chip->flags = IRQCHIP_SKIP_SET_WAKE;
- p->irq_domain = irq_domain_add_linear(pdev->dev.of_node,
+ p->irq_domain = irq_domain_add_simple(pdev->dev.of_node,
pdata->number_of_pins,
+ pdata->irq_base,
&em_gio_irq_domain_ops, p);
if (!p->irq_domain) {
ret = -ENXIO;
diff --git a/drivers/gpio/gpio-langwell.c b/drivers/gpio/gpio-langwell.c
index e77b2b3e94af..634c3d37f7b5 100644
--- a/drivers/gpio/gpio-langwell.c
+++ b/drivers/gpio/gpio-langwell.c
@@ -71,10 +71,12 @@ struct lnw_gpio {
struct irq_domain *domain;
};
+#define to_lnw_priv(chip) container_of(chip, struct lnw_gpio, chip)
+
static void __iomem *gpio_reg(struct gpio_chip *chip, unsigned offset,
enum GPIO_REG reg_type)
{
- struct lnw_gpio *lnw = container_of(chip, struct lnw_gpio, chip);
+ struct lnw_gpio *lnw = to_lnw_priv(chip);
unsigned nreg = chip->ngpio / 32;
u8 reg = offset / 32;
void __iomem *ptr;
@@ -86,7 +88,7 @@ static void __iomem *gpio_reg(struct gpio_chip *chip, unsigned offset,
static void __iomem *gpio_reg_2bit(struct gpio_chip *chip, unsigned offset,
enum GPIO_REG reg_type)
{
- struct lnw_gpio *lnw = container_of(chip, struct lnw_gpio, chip);
+ struct lnw_gpio *lnw = to_lnw_priv(chip);
unsigned nreg = chip->ngpio / 32;
u8 reg = offset / 16;
void __iomem *ptr;
@@ -130,7 +132,7 @@ static void lnw_gpio_set(struct gpio_chip *chip, unsigned offset, int value)
static int lnw_gpio_direction_input(struct gpio_chip *chip, unsigned offset)
{
- struct lnw_gpio *lnw = container_of(chip, struct lnw_gpio, chip);
+ struct lnw_gpio *lnw = to_lnw_priv(chip);
void __iomem *gpdr = gpio_reg(chip, offset, GPDR);
u32 value;
unsigned long flags;
@@ -153,7 +155,7 @@ static int lnw_gpio_direction_input(struct gpio_chip *chip, unsigned offset)
static int lnw_gpio_direction_output(struct gpio_chip *chip,
unsigned offset, int value)
{
- struct lnw_gpio *lnw = container_of(chip, struct lnw_gpio, chip);
+ struct lnw_gpio *lnw = to_lnw_priv(chip);
void __iomem *gpdr = gpio_reg(chip, offset, GPDR);
unsigned long flags;
@@ -176,7 +178,7 @@ static int lnw_gpio_direction_output(struct gpio_chip *chip,
static int lnw_gpio_to_irq(struct gpio_chip *chip, unsigned offset)
{
- struct lnw_gpio *lnw = container_of(chip, struct lnw_gpio, chip);
+ struct lnw_gpio *lnw = to_lnw_priv(chip);
return irq_create_mapping(lnw->domain, offset);
}
@@ -234,6 +236,8 @@ static DEFINE_PCI_DEVICE_TABLE(lnw_gpio_ids) = { /* pin number */
{ PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x080f), .driver_data = 64 },
{ PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x081f), .driver_data = 96 },
{ PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x081a), .driver_data = 96 },
+ { PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x08eb), .driver_data = 96 },
+ { PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x08f7), .driver_data = 96 },
{ 0, }
};
MODULE_DEVICE_TABLE(pci, lnw_gpio_ids);
@@ -299,17 +303,6 @@ static const struct irq_domain_ops lnw_gpio_irq_ops = {
.xlate = irq_domain_xlate_twocell,
};
-#ifdef CONFIG_PM
-static int lnw_gpio_runtime_resume(struct device *dev)
-{
- return 0;
-}
-
-static int lnw_gpio_runtime_suspend(struct device *dev)
-{
- return 0;
-}
-
static int lnw_gpio_runtime_idle(struct device *dev)
{
int err = pm_schedule_suspend(dev, 500);
@@ -320,16 +313,8 @@ static int lnw_gpio_runtime_idle(struct device *dev)
return -EBUSY;
}
-#else
-#define lnw_gpio_runtime_suspend NULL
-#define lnw_gpio_runtime_resume NULL
-#define lnw_gpio_runtime_idle NULL
-#endif
-
static const struct dev_pm_ops lnw_gpio_pm_ops = {
- .runtime_suspend = lnw_gpio_runtime_suspend,
- .runtime_resume = lnw_gpio_runtime_resume,
- .runtime_idle = lnw_gpio_runtime_idle,
+ SET_RUNTIME_PM_OPS(NULL, NULL, lnw_gpio_runtime_idle)
};
static int lnw_gpio_probe(struct pci_dev *pdev,
@@ -349,7 +334,7 @@ static int lnw_gpio_probe(struct pci_dev *pdev,
retval = pci_request_regions(pdev, "langwell_gpio");
if (retval) {
dev_err(&pdev->dev, "error requesting resources\n");
- goto err2;
+ goto err_pci_req_region;
}
/* get the gpio_base from bar1 */
start = pci_resource_start(pdev, 1);
@@ -358,7 +343,7 @@ static int lnw_gpio_probe(struct pci_dev *pdev,
if (!base) {
dev_err(&pdev->dev, "error mapping bar1\n");
retval = -EFAULT;
- goto err3;
+ goto err_ioremap;
}
gpio_base = *((u32 *)base + 1);
/* release the IO mapping, since we already get the info from bar1 */
@@ -370,21 +355,21 @@ static int lnw_gpio_probe(struct pci_dev *pdev,
if (!base) {
dev_err(&pdev->dev, "error mapping bar0\n");
retval = -EFAULT;
- goto err3;
+ goto err_ioremap;
}
- lnw = devm_kzalloc(&pdev->dev, sizeof(struct lnw_gpio), GFP_KERNEL);
+ lnw = devm_kzalloc(&pdev->dev, sizeof(*lnw), GFP_KERNEL);
if (!lnw) {
dev_err(&pdev->dev, "can't allocate langwell_gpio chip data\n");
retval = -ENOMEM;
- goto err3;
+ goto err_ioremap;
}
lnw->domain = irq_domain_add_linear(pdev->dev.of_node, ngpio,
&lnw_gpio_irq_ops, lnw);
if (!lnw->domain) {
retval = -ENOMEM;
- goto err3;
+ goto err_ioremap;
}
lnw->reg_base = base;
@@ -403,7 +388,7 @@ static int lnw_gpio_probe(struct pci_dev *pdev,
retval = gpiochip_add(&lnw->chip);
if (retval) {
dev_err(&pdev->dev, "langwell gpiochip_add error %d\n", retval);
- goto err3;
+ goto err_ioremap;
}
lnw_irq_init_hw(lnw);
@@ -418,9 +403,9 @@ static int lnw_gpio_probe(struct pci_dev *pdev,
return 0;
-err3:
+err_ioremap:
pci_release_regions(pdev);
-err2:
+err_pci_req_region:
pci_disable_device(pdev);
return retval;
}
diff --git a/drivers/gpio/gpio-lynxpoint.c b/drivers/gpio/gpio-lynxpoint.c
new file mode 100644
index 000000000000..3472b05ac512
--- /dev/null
+++ b/drivers/gpio/gpio-lynxpoint.c
@@ -0,0 +1,469 @@
+/*
+ * GPIO controller driver for Intel Lynxpoint PCH chipset>
+ * Copyright (c) 2012, Intel Corporation.
+ *
+ * Author: Mathias Nyman <mathias.nyman@linux.intel.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/types.h>
+#include <linux/bitops.h>
+#include <linux/interrupt.h>
+#include <linux/irq.h>
+#include <linux/gpio.h>
+#include <linux/irqdomain.h>
+#include <linux/slab.h>
+#include <linux/acpi.h>
+#include <linux/platform_device.h>
+#include <linux/pm_runtime.h>
+
+/* LynxPoint chipset has support for 94 gpio pins */
+
+#define LP_NUM_GPIO 94
+
+/* Bitmapped register offsets */
+#define LP_ACPI_OWNED 0x00 /* Bitmap, set by bios, 0: pin reserved for ACPI */
+#define LP_GC 0x7C /* set APIC IRQ to IRQ14 or IRQ15 for all pins */
+#define LP_INT_STAT 0x80
+#define LP_INT_ENABLE 0x90
+
+/* Each pin has two 32 bit config registers, starting at 0x100 */
+#define LP_CONFIG1 0x100
+#define LP_CONFIG2 0x104
+
+/* LP_CONFIG1 reg bits */
+#define OUT_LVL_BIT BIT(31)
+#define IN_LVL_BIT BIT(30)
+#define TRIG_SEL_BIT BIT(4) /* 0: Edge, 1: Level */
+#define INT_INV_BIT BIT(3) /* Invert interrupt triggering */
+#define DIR_BIT BIT(2) /* 0: Output, 1: Input */
+#define USE_SEL_BIT BIT(0) /* 0: Native, 1: GPIO */
+
+/* LP_CONFIG2 reg bits */
+#define GPINDIS_BIT BIT(2) /* disable input sensing */
+#define GPIWP_BIT (BIT(0) | BIT(1)) /* weak pull options */
+
+struct lp_gpio {
+ struct gpio_chip chip;
+ struct irq_domain *domain;
+ struct platform_device *pdev;
+ spinlock_t lock;
+ unsigned long reg_base;
+};
+
+/*
+ * Lynxpoint gpios are controlled through both bitmapped registers and
+ * per gpio specific registers. The bitmapped registers are in chunks of
+ * 3 x 32bit registers to cover all 94 gpios
+ *
+ * per gpio specific registers consist of two 32bit registers per gpio
+ * (LP_CONFIG1 and LP_CONFIG2), with 94 gpios there's a total of
+ * 188 config registes.
+ *
+ * A simplified view of the register layout look like this:
+ *
+ * LP_ACPI_OWNED[31:0] gpio ownerships for gpios 0-31 (bitmapped registers)
+ * LP_ACPI_OWNED[63:32] gpio ownerships for gpios 32-63
+ * LP_ACPI_OWNED[94:64] gpio ownerships for gpios 63-94
+ * ...
+ * LP_INT_ENABLE[31:0] ...
+ * LP_INT_ENABLE[63:31] ...
+ * LP_INT_ENABLE[94:64] ...
+ * LP0_CONFIG1 (gpio 0) config1 reg for gpio 0 (per gpio registers)
+ * LP0_CONFIG2 (gpio 0) config2 reg for gpio 0
+ * LP1_CONFIG1 (gpio 1) config1 reg for gpio 1
+ * LP1_CONFIG2 (gpio 1) config2 reg for gpio 1
+ * LP2_CONFIG1 (gpio 2) ...
+ * LP2_CONFIG2 (gpio 2) ...
+ * ...
+ * LP94_CONFIG1 (gpio 94) ...
+ * LP94_CONFIG2 (gpio 94) ...
+ */
+
+static unsigned long lp_gpio_reg(struct gpio_chip *chip, unsigned offset,
+ int reg)
+{
+ struct lp_gpio *lg = container_of(chip, struct lp_gpio, chip);
+ int reg_offset;
+
+ if (reg == LP_CONFIG1 || reg == LP_CONFIG2)
+ /* per gpio specific config registers */
+ reg_offset = offset * 8;
+ else
+ /* bitmapped registers */
+ reg_offset = (offset / 32) * 4;
+
+ return lg->reg_base + reg + reg_offset;
+}
+
+static int lp_gpio_request(struct gpio_chip *chip, unsigned offset)
+{
+ struct lp_gpio *lg = container_of(chip, struct lp_gpio, chip);
+ unsigned long reg = lp_gpio_reg(chip, offset, LP_CONFIG1);
+ unsigned long conf2 = lp_gpio_reg(chip, offset, LP_CONFIG2);
+ unsigned long acpi_use = lp_gpio_reg(chip, offset, LP_ACPI_OWNED);
+
+ pm_runtime_get(&lg->pdev->dev); /* should we put if failed */
+
+ /* Fail if BIOS reserved pin for ACPI use */
+ if (!(inl(acpi_use) & BIT(offset % 32))) {
+ dev_err(&lg->pdev->dev, "gpio %d reserved for ACPI\n", offset);
+ return -EBUSY;
+ }
+ /* Fail if pin is in alternate function mode (not GPIO mode) */
+ if (!(inl(reg) & USE_SEL_BIT))
+ return -ENODEV;
+
+ /* enable input sensing */
+ outl(inl(conf2) & ~GPINDIS_BIT, conf2);
+
+
+ return 0;
+}
+
+static void lp_gpio_free(struct gpio_chip *chip, unsigned offset)
+{
+ struct lp_gpio *lg = container_of(chip, struct lp_gpio, chip);
+ unsigned long conf2 = lp_gpio_reg(chip, offset, LP_CONFIG2);
+
+ /* disable input sensing */
+ outl(inl(conf2) | GPINDIS_BIT, conf2);
+
+ pm_runtime_put(&lg->pdev->dev);
+}
+
+static int lp_irq_type(struct irq_data *d, unsigned type)
+{
+ struct lp_gpio *lg = irq_data_get_irq_chip_data(d);
+ u32 hwirq = irqd_to_hwirq(d);
+ unsigned long flags;
+ u32 value;
+ unsigned long reg = lp_gpio_reg(&lg->chip, hwirq, LP_CONFIG1);
+
+ if (hwirq >= lg->chip.ngpio)
+ return -EINVAL;
+
+ spin_lock_irqsave(&lg->lock, flags);
+ value = inl(reg);
+
+ /* set both TRIG_SEL and INV bits to 0 for rising edge */
+ if (type & IRQ_TYPE_EDGE_RISING)
+ value &= ~(TRIG_SEL_BIT | INT_INV_BIT);
+
+ /* TRIG_SEL bit 0, INV bit 1 for falling edge */
+ if (type & IRQ_TYPE_EDGE_FALLING)
+ value = (value | INT_INV_BIT) & ~TRIG_SEL_BIT;
+
+ /* TRIG_SEL bit 1, INV bit 0 for level low */
+ if (type & IRQ_TYPE_LEVEL_LOW)
+ value = (value | TRIG_SEL_BIT) & ~INT_INV_BIT;
+
+ /* TRIG_SEL bit 1, INV bit 1 for level high */
+ if (type & IRQ_TYPE_LEVEL_HIGH)
+ value |= TRIG_SEL_BIT | INT_INV_BIT;
+
+ outl(value, reg);
+ spin_unlock_irqrestore(&lg->lock, flags);
+
+ return 0;
+}
+
+static int lp_gpio_get(struct gpio_chip *chip, unsigned offset)
+{
+ unsigned long reg = lp_gpio_reg(chip, offset, LP_CONFIG1);
+ return inl(reg) & IN_LVL_BIT;
+}
+
+static void lp_gpio_set(struct gpio_chip *chip, unsigned offset, int value)
+{
+ struct lp_gpio *lg = container_of(chip, struct lp_gpio, chip);
+ unsigned long reg = lp_gpio_reg(chip, offset, LP_CONFIG1);
+ unsigned long flags;
+
+ spin_lock_irqsave(&lg->lock, flags);
+
+ if (value)
+ outl(inl(reg) | OUT_LVL_BIT, reg);
+ else
+ outl(inl(reg) & ~OUT_LVL_BIT, reg);
+
+ spin_unlock_irqrestore(&lg->lock, flags);
+}
+
+static int lp_gpio_direction_input(struct gpio_chip *chip, unsigned offset)
+{
+ struct lp_gpio *lg = container_of(chip, struct lp_gpio, chip);
+ unsigned long reg = lp_gpio_reg(chip, offset, LP_CONFIG1);
+ unsigned long flags;
+
+ spin_lock_irqsave(&lg->lock, flags);
+ outl(inl(reg) | DIR_BIT, reg);
+ spin_unlock_irqrestore(&lg->lock, flags);
+
+ return 0;
+}
+
+static int lp_gpio_direction_output(struct gpio_chip *chip,
+ unsigned offset, int value)
+{
+ struct lp_gpio *lg = container_of(chip, struct lp_gpio, chip);
+ unsigned long reg = lp_gpio_reg(chip, offset, LP_CONFIG1);
+ unsigned long flags;
+
+ lp_gpio_set(chip, offset, value);
+
+ spin_lock_irqsave(&lg->lock, flags);
+ outl(inl(reg) & ~DIR_BIT, reg);
+ spin_unlock_irqrestore(&lg->lock, flags);
+
+ return 0;
+}
+
+static int lp_gpio_to_irq(struct gpio_chip *chip, unsigned offset)
+{
+ struct lp_gpio *lg = container_of(chip, struct lp_gpio, chip);
+ return irq_create_mapping(lg->domain, offset);
+}
+
+static void lp_gpio_irq_handler(unsigned irq, struct irq_desc *desc)
+{
+ struct irq_data *data = irq_desc_get_irq_data(desc);
+ struct lp_gpio *lg = irq_data_get_irq_handler_data(data);
+ struct irq_chip *chip = irq_data_get_irq_chip(data);
+ u32 base, pin, mask;
+ unsigned long reg, pending;
+ unsigned virq;
+
+ /* check from GPIO controller which pin triggered the interrupt */
+ for (base = 0; base < lg->chip.ngpio; base += 32) {
+ reg = lp_gpio_reg(&lg->chip, base, LP_INT_STAT);
+
+ while ((pending = inl(reg))) {
+ pin = __ffs(pending);
+ mask = BIT(pin);
+ /* Clear before handling so we don't lose an edge */
+ outl(mask, reg);
+ virq = irq_find_mapping(lg->domain, base + pin);
+ generic_handle_irq(virq);
+ }
+ }
+ chip->irq_eoi(data);
+}
+
+static void lp_irq_unmask(struct irq_data *d)
+{
+}
+
+static void lp_irq_mask(struct irq_data *d)
+{
+}
+
+static void lp_irq_enable(struct irq_data *d)
+{
+ struct lp_gpio *lg = irq_data_get_irq_chip_data(d);
+ u32 hwirq = irqd_to_hwirq(d);
+ unsigned long reg = lp_gpio_reg(&lg->chip, hwirq, LP_INT_ENABLE);
+ unsigned long flags;
+
+ spin_lock_irqsave(&lg->lock, flags);
+ outl(inl(reg) | BIT(hwirq % 32), reg);
+ spin_unlock_irqrestore(&lg->lock, flags);
+}
+
+static void lp_irq_disable(struct irq_data *d)
+{
+ struct lp_gpio *lg = irq_data_get_irq_chip_data(d);
+ u32 hwirq = irqd_to_hwirq(d);
+ unsigned long reg = lp_gpio_reg(&lg->chip, hwirq, LP_INT_ENABLE);
+ unsigned long flags;
+
+ spin_lock_irqsave(&lg->lock, flags);
+ outl(inl(reg) & ~BIT(hwirq % 32), reg);
+ spin_unlock_irqrestore(&lg->lock, flags);
+}
+
+static struct irq_chip lp_irqchip = {
+ .name = "LP-GPIO",
+ .irq_mask = lp_irq_mask,
+ .irq_unmask = lp_irq_unmask,
+ .irq_enable = lp_irq_enable,
+ .irq_disable = lp_irq_disable,
+ .irq_set_type = lp_irq_type,
+ .flags = IRQCHIP_SKIP_SET_WAKE,
+};
+
+static void lp_gpio_irq_init_hw(struct lp_gpio *lg)
+{
+ unsigned long reg;
+ unsigned base;
+
+ for (base = 0; base < lg->chip.ngpio; base += 32) {
+ /* disable gpio pin interrupts */
+ reg = lp_gpio_reg(&lg->chip, base, LP_INT_ENABLE);
+ outl(0, reg);
+ /* Clear interrupt status register */
+ reg = lp_gpio_reg(&lg->chip, base, LP_INT_STAT);
+ outl(0xffffffff, reg);
+ }
+}
+
+static int lp_gpio_irq_map(struct irq_domain *d, unsigned int virq,
+ irq_hw_number_t hw)
+{
+ struct lp_gpio *lg = d->host_data;
+
+ irq_set_chip_and_handler_name(virq, &lp_irqchip, handle_simple_irq,
+ "demux");
+ irq_set_chip_data(virq, lg);
+ irq_set_irq_type(virq, IRQ_TYPE_NONE);
+
+ return 0;
+}
+
+static const struct irq_domain_ops lp_gpio_irq_ops = {
+ .map = lp_gpio_irq_map,
+};
+
+static int lp_gpio_probe(struct platform_device *pdev)
+{
+ struct lp_gpio *lg;
+ struct gpio_chip *gc;
+ struct resource *io_rc, *irq_rc;
+ struct device *dev = &pdev->dev;
+ unsigned long reg_len;
+ unsigned hwirq;
+ int ret = -ENODEV;
+
+ lg = devm_kzalloc(dev, sizeof(struct lp_gpio), GFP_KERNEL);
+ if (!lg) {
+ dev_err(dev, "can't allocate lp_gpio chip data\n");
+ return -ENOMEM;
+ }
+
+ lg->pdev = pdev;
+ platform_set_drvdata(pdev, lg);
+
+ io_rc = platform_get_resource(pdev, IORESOURCE_IO, 0);
+ irq_rc = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
+
+ if (!io_rc) {
+ dev_err(dev, "missing IO resources\n");
+ return -EINVAL;
+ }
+
+ lg->reg_base = io_rc->start;
+ reg_len = resource_size(io_rc);
+
+ if (!devm_request_region(dev, lg->reg_base, reg_len, "lp-gpio")) {
+ dev_err(dev, "failed requesting IO region 0x%x\n",
+ (unsigned int)lg->reg_base);
+ return -EBUSY;
+ }
+
+ spin_lock_init(&lg->lock);
+
+ gc = &lg->chip;
+ gc->label = dev_name(dev);
+ gc->owner = THIS_MODULE;
+ gc->request = lp_gpio_request;
+ gc->free = lp_gpio_free;
+ gc->direction_input = lp_gpio_direction_input;
+ gc->direction_output = lp_gpio_direction_output;
+ gc->get = lp_gpio_get;
+ gc->set = lp_gpio_set;
+ gc->base = -1;
+ gc->ngpio = LP_NUM_GPIO;
+ gc->can_sleep = 0;
+ gc->dev = dev;
+
+ /* set up interrupts */
+ if (irq_rc && irq_rc->start) {
+ hwirq = irq_rc->start;
+ gc->to_irq = lp_gpio_to_irq;
+
+ lg->domain = irq_domain_add_linear(NULL, LP_NUM_GPIO,
+ &lp_gpio_irq_ops, lg);
+ if (!lg->domain)
+ return -ENXIO;
+
+ lp_gpio_irq_init_hw(lg);
+
+ irq_set_handler_data(hwirq, lg);
+ irq_set_chained_handler(hwirq, lp_gpio_irq_handler);
+ }
+
+ ret = gpiochip_add(gc);
+ if (ret) {
+ dev_err(dev, "failed adding lp-gpio chip\n");
+ return ret;
+ }
+ pm_runtime_enable(dev);
+
+ return 0;
+}
+
+static int lp_gpio_runtime_suspend(struct device *dev)
+{
+ return 0;
+}
+
+static int lp_gpio_runtime_resume(struct device *dev)
+{
+ return 0;
+}
+
+static const struct dev_pm_ops lp_gpio_pm_ops = {
+ .runtime_suspend = lp_gpio_runtime_suspend,
+ .runtime_resume = lp_gpio_runtime_resume,
+};
+
+static const struct acpi_device_id lynxpoint_gpio_acpi_match[] = {
+ { "INT33C7", 0 },
+ { }
+};
+MODULE_DEVICE_TABLE(acpi, lynxpoint_gpio_acpi_match);
+
+static int lp_gpio_remove(struct platform_device *pdev)
+{
+ struct lp_gpio *lg = platform_get_drvdata(pdev);
+ int err;
+ err = gpiochip_remove(&lg->chip);
+ if (err)
+ dev_warn(&pdev->dev, "failed to remove gpio_chip.\n");
+ platform_set_drvdata(pdev, NULL);
+ return 0;
+}
+
+static struct platform_driver lp_gpio_driver = {
+ .probe = lp_gpio_probe,
+ .remove = lp_gpio_remove,
+ .driver = {
+ .name = "lp_gpio",
+ .owner = THIS_MODULE,
+ .pm = &lp_gpio_pm_ops,
+ .acpi_match_table = ACPI_PTR(lynxpoint_gpio_acpi_match),
+ },
+};
+
+static int __init lp_gpio_init(void)
+{
+ return platform_driver_register(&lp_gpio_driver);
+}
+
+subsys_initcall(lp_gpio_init);
diff --git a/drivers/gpio/gpio-mpc8xxx.c b/drivers/gpio/gpio-mpc8xxx.c
index 9ae29cc0d17f..a0b33a216d4a 100644
--- a/drivers/gpio/gpio-mpc8xxx.c
+++ b/drivers/gpio/gpio-mpc8xxx.c
@@ -292,7 +292,6 @@ static int mpc8xxx_gpio_irq_map(struct irq_domain *h, unsigned int virq,
irq_set_chip_data(virq, h->host_data);
irq_set_chip_and_handler(virq, &mpc8xxx_irq_chip, handle_level_irq);
- irq_set_irq_type(virq, IRQ_TYPE_NONE);
return 0;
}
diff --git a/drivers/gpio/gpio-mxs.c b/drivers/gpio/gpio-mxs.c
index 45d97c46831a..25000b0f8453 100644
--- a/drivers/gpio/gpio-mxs.c
+++ b/drivers/gpio/gpio-mxs.c
@@ -66,6 +66,7 @@ struct mxs_gpio_port {
struct irq_domain *domain;
struct bgpio_chip bgc;
enum mxs_gpio_id devid;
+ u32 both_edges;
};
static inline int is_imx23_gpio(struct mxs_gpio_port *port)
@@ -82,13 +83,23 @@ static inline int is_imx28_gpio(struct mxs_gpio_port *port)
static int mxs_gpio_set_irq_type(struct irq_data *d, unsigned int type)
{
+ u32 val;
u32 pin_mask = 1 << d->hwirq;
struct irq_chip_generic *gc = irq_data_get_irq_chip_data(d);
struct mxs_gpio_port *port = gc->private;
void __iomem *pin_addr;
int edge;
+ port->both_edges &= ~pin_mask;
switch (type) {
+ case IRQ_TYPE_EDGE_BOTH:
+ val = gpio_get_value(port->bgc.gc.base + d->hwirq);
+ if (val)
+ edge = GPIO_INT_FALL_EDGE;
+ else
+ edge = GPIO_INT_RISE_EDGE;
+ port->both_edges |= pin_mask;
+ break;
case IRQ_TYPE_EDGE_RISING:
edge = GPIO_INT_RISE_EDGE;
break;
@@ -125,6 +136,23 @@ static int mxs_gpio_set_irq_type(struct irq_data *d, unsigned int type)
return 0;
}
+static void mxs_flip_edge(struct mxs_gpio_port *port, u32 gpio)
+{
+ u32 bit, val, edge;
+ void __iomem *pin_addr;
+
+ bit = 1 << gpio;
+
+ pin_addr = port->base + PINCTRL_IRQPOL(port);
+ val = readl(pin_addr);
+ edge = val & bit;
+
+ if (edge)
+ writel(bit, pin_addr + MXS_CLR);
+ else
+ writel(bit, pin_addr + MXS_SET);
+}
+
/* MXS has one interrupt *per* gpio port */
static void mxs_gpio_irq_handler(u32 irq, struct irq_desc *desc)
{
@@ -138,6 +166,9 @@ static void mxs_gpio_irq_handler(u32 irq, struct irq_desc *desc)
while (irq_stat != 0) {
int irqoffset = fls(irq_stat) - 1;
+ if (port->both_edges & (1 << irqoffset))
+ mxs_flip_edge(port, irqoffset);
+
generic_handle_irq(irq_find_mapping(port->domain, irqoffset));
irq_stat &= ~(1 << irqoffset);
}
diff --git a/drivers/gpio/gpio-omap.c b/drivers/gpio/gpio-omap.c
index f1fbedb2a6f9..159f5c57eb45 100644
--- a/drivers/gpio/gpio-omap.c
+++ b/drivers/gpio/gpio-omap.c
@@ -1476,19 +1476,19 @@ static struct omap_gpio_reg_offs omap4_gpio_regs = {
.fallingdetect = OMAP4_GPIO_FALLINGDETECT,
};
-const static struct omap_gpio_platform_data omap2_pdata = {
+static const struct omap_gpio_platform_data omap2_pdata = {
.regs = &omap2_gpio_regs,
.bank_width = 32,
.dbck_flag = false,
};
-const static struct omap_gpio_platform_data omap3_pdata = {
+static const struct omap_gpio_platform_data omap3_pdata = {
.regs = &omap2_gpio_regs,
.bank_width = 32,
.dbck_flag = true,
};
-const static struct omap_gpio_platform_data omap4_pdata = {
+static const struct omap_gpio_platform_data omap4_pdata = {
.regs = &omap4_gpio_regs,
.bank_width = 32,
.dbck_flag = true,
diff --git a/drivers/gpio/gpio-palmas.c b/drivers/gpio/gpio-palmas.c
new file mode 100644
index 000000000000..e3a4e56f5a42
--- /dev/null
+++ b/drivers/gpio/gpio-palmas.c
@@ -0,0 +1,184 @@
+/*
+ * TI Palma series PMIC's GPIO driver.
+ *
+ * Copyright (c) 2012, NVIDIA CORPORATION. All rights reserved.
+ *
+ * Author: Laxman Dewangan <ldewangan@nvidia.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include <linux/gpio.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/mfd/palmas.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/platform_device.h>
+
+struct palmas_gpio {
+ struct gpio_chip gpio_chip;
+ struct palmas *palmas;
+};
+
+static inline struct palmas_gpio *to_palmas_gpio(struct gpio_chip *chip)
+{
+ return container_of(chip, struct palmas_gpio, gpio_chip);
+}
+
+static int palmas_gpio_get(struct gpio_chip *gc, unsigned offset)
+{
+ struct palmas_gpio *pg = to_palmas_gpio(gc);
+ struct palmas *palmas = pg->palmas;
+ unsigned int val;
+ int ret;
+
+ ret = palmas_read(palmas, PALMAS_GPIO_BASE, PALMAS_GPIO_DATA_IN, &val);
+ if (ret < 0) {
+ dev_err(gc->dev, "GPIO_DATA_IN read failed, err = %d\n", ret);
+ return ret;
+ }
+ return !!(val & BIT(offset));
+}
+
+static void palmas_gpio_set(struct gpio_chip *gc, unsigned offset,
+ int value)
+{
+ struct palmas_gpio *pg = to_palmas_gpio(gc);
+ struct palmas *palmas = pg->palmas;
+ int ret;
+
+ if (value)
+ ret = palmas_write(palmas, PALMAS_GPIO_BASE,
+ PALMAS_GPIO_SET_DATA_OUT, BIT(offset));
+ else
+ ret = palmas_write(palmas, PALMAS_GPIO_BASE,
+ PALMAS_GPIO_CLEAR_DATA_OUT, BIT(offset));
+ if (ret < 0)
+ dev_err(gc->dev, "%s write failed, err = %d\n",
+ (value) ? "GPIO_SET_DATA_OUT" : "GPIO_CLEAR_DATA_OUT",
+ ret);
+}
+
+static int palmas_gpio_output(struct gpio_chip *gc, unsigned offset,
+ int value)
+{
+ struct palmas_gpio *pg = to_palmas_gpio(gc);
+ struct palmas *palmas = pg->palmas;
+ int ret;
+
+ /* Set the initial value */
+ palmas_gpio_set(gc, offset, value);
+
+ ret = palmas_update_bits(palmas, PALMAS_GPIO_BASE,
+ PALMAS_GPIO_DATA_DIR, BIT(offset), BIT(offset));
+ if (ret < 0)
+ dev_err(gc->dev, "GPIO_DATA_DIR write failed, err = %d\n", ret);
+ return ret;
+}
+
+static int palmas_gpio_input(struct gpio_chip *gc, unsigned offset)
+{
+ struct palmas_gpio *pg = to_palmas_gpio(gc);
+ struct palmas *palmas = pg->palmas;
+ int ret;
+
+ ret = palmas_update_bits(palmas, PALMAS_GPIO_BASE,
+ PALMAS_GPIO_DATA_DIR, BIT(offset), 0);
+ if (ret < 0)
+ dev_err(gc->dev, "GPIO_DATA_DIR write failed, err = %d\n", ret);
+ return ret;
+}
+
+static int palmas_gpio_to_irq(struct gpio_chip *gc, unsigned offset)
+{
+ struct palmas_gpio *pg = to_palmas_gpio(gc);
+ struct palmas *palmas = pg->palmas;
+
+ return palmas_irq_get_virq(palmas, PALMAS_GPIO_0_IRQ + offset);
+}
+
+static int palmas_gpio_probe(struct platform_device *pdev)
+{
+ struct palmas *palmas = dev_get_drvdata(pdev->dev.parent);
+ struct palmas_platform_data *palmas_pdata;
+ struct palmas_gpio *palmas_gpio;
+ int ret;
+
+ palmas_gpio = devm_kzalloc(&pdev->dev,
+ sizeof(*palmas_gpio), GFP_KERNEL);
+ if (!palmas_gpio) {
+ dev_err(&pdev->dev, "Could not allocate palmas_gpio\n");
+ return -ENOMEM;
+ }
+
+ palmas_gpio->palmas = palmas;
+ palmas_gpio->gpio_chip.owner = THIS_MODULE;
+ palmas_gpio->gpio_chip.label = dev_name(&pdev->dev);
+ palmas_gpio->gpio_chip.ngpio = 8;
+ palmas_gpio->gpio_chip.can_sleep = 1;
+ palmas_gpio->gpio_chip.direction_input = palmas_gpio_input;
+ palmas_gpio->gpio_chip.direction_output = palmas_gpio_output;
+ palmas_gpio->gpio_chip.to_irq = palmas_gpio_to_irq;
+ palmas_gpio->gpio_chip.set = palmas_gpio_set;
+ palmas_gpio->gpio_chip.get = palmas_gpio_get;
+ palmas_gpio->gpio_chip.dev = &pdev->dev;
+#ifdef CONFIG_OF_GPIO
+ palmas_gpio->gpio_chip.of_node = palmas->dev->of_node;
+#endif
+ palmas_pdata = dev_get_platdata(palmas->dev);
+ if (palmas_pdata && palmas_pdata->gpio_base)
+ palmas_gpio->gpio_chip.base = palmas_pdata->gpio_base;
+ else
+ palmas_gpio->gpio_chip.base = -1;
+
+ ret = gpiochip_add(&palmas_gpio->gpio_chip);
+ if (ret < 0) {
+ dev_err(&pdev->dev, "Could not register gpiochip, %d\n", ret);
+ return ret;
+ }
+
+ platform_set_drvdata(pdev, palmas_gpio);
+ return ret;
+}
+
+static int palmas_gpio_remove(struct platform_device *pdev)
+{
+ struct palmas_gpio *palmas_gpio = platform_get_drvdata(pdev);
+
+ return gpiochip_remove(&palmas_gpio->gpio_chip);
+}
+
+static struct platform_driver palmas_gpio_driver = {
+ .driver.name = "palmas-gpio",
+ .driver.owner = THIS_MODULE,
+ .probe = palmas_gpio_probe,
+ .remove = palmas_gpio_remove,
+};
+
+static int __init palmas_gpio_init(void)
+{
+ return platform_driver_register(&palmas_gpio_driver);
+}
+subsys_initcall(palmas_gpio_init);
+
+static void __exit palmas_gpio_exit(void)
+{
+ platform_driver_unregister(&palmas_gpio_driver);
+}
+module_exit(palmas_gpio_exit);
+
+MODULE_ALIAS("platform:palmas-gpio");
+MODULE_AUTHOR("Laxman Dewangan <ldewangan@nvidia.com>");
+MODULE_DESCRIPTION("GPIO driver for TI Palmas series PMICs");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/gpio/gpio-pca953x.c b/drivers/gpio/gpio-pca953x.c
index cc102d25ee24..24059462c87f 100644
--- a/drivers/gpio/gpio-pca953x.c
+++ b/drivers/gpio/gpio-pca953x.c
@@ -46,6 +46,7 @@
#define PCA957X_TYPE 0x2000
static const struct i2c_device_id pca953x_id[] = {
+ { "pca9505", 40 | PCA953X_TYPE | PCA_INT, },
{ "pca9534", 8 | PCA953X_TYPE | PCA_INT, },
{ "pca9535", 16 | PCA953X_TYPE | PCA_INT, },
{ "pca9536", 4 | PCA953X_TYPE, },
@@ -71,19 +72,23 @@ static const struct i2c_device_id pca953x_id[] = {
};
MODULE_DEVICE_TABLE(i2c, pca953x_id);
+#define MAX_BANK 5
+#define BANK_SZ 8
+
+#define NBANK(chip) (chip->gpio_chip.ngpio / BANK_SZ)
+
struct pca953x_chip {
unsigned gpio_start;
- u32 reg_output;
- u32 reg_direction;
+ u8 reg_output[MAX_BANK];
+ u8 reg_direction[MAX_BANK];
struct mutex i2c_lock;
#ifdef CONFIG_GPIO_PCA953X_IRQ
struct mutex irq_lock;
- u32 irq_mask;
- u32 irq_stat;
- u32 irq_trig_raise;
- u32 irq_trig_fall;
- int irq_base;
+ u8 irq_mask[MAX_BANK];
+ u8 irq_stat[MAX_BANK];
+ u8 irq_trig_raise[MAX_BANK];
+ u8 irq_trig_fall[MAX_BANK];
struct irq_domain *domain;
#endif
@@ -93,33 +98,69 @@ struct pca953x_chip {
int chip_type;
};
-static int pca953x_write_reg(struct pca953x_chip *chip, int reg, u32 val)
+static int pca953x_read_single(struct pca953x_chip *chip, int reg, u32 *val,
+ int off)
+{
+ int ret;
+ int bank_shift = fls((chip->gpio_chip.ngpio - 1) / BANK_SZ);
+ int offset = off / BANK_SZ;
+
+ ret = i2c_smbus_read_byte_data(chip->client,
+ (reg << bank_shift) + offset);
+ *val = ret;
+
+ if (ret < 0) {
+ dev_err(&chip->client->dev, "failed reading register\n");
+ return ret;
+ }
+
+ return 0;
+}
+
+static int pca953x_write_single(struct pca953x_chip *chip, int reg, u32 val,
+ int off)
+{
+ int ret = 0;
+ int bank_shift = fls((chip->gpio_chip.ngpio - 1) / BANK_SZ);
+ int offset = off / BANK_SZ;
+
+ ret = i2c_smbus_write_byte_data(chip->client,
+ (reg << bank_shift) + offset, val);
+
+ if (ret < 0) {
+ dev_err(&chip->client->dev, "failed writing register\n");
+ return ret;
+ }
+
+ return 0;
+}
+
+static int pca953x_write_regs(struct pca953x_chip *chip, int reg, u8 *val)
{
int ret = 0;
if (chip->gpio_chip.ngpio <= 8)
- ret = i2c_smbus_write_byte_data(chip->client, reg, val);
- else if (chip->gpio_chip.ngpio == 24) {
- cpu_to_le32s(&val);
+ ret = i2c_smbus_write_byte_data(chip->client, reg, *val);
+ else if (chip->gpio_chip.ngpio >= 24) {
+ int bank_shift = fls((chip->gpio_chip.ngpio - 1) / BANK_SZ);
ret = i2c_smbus_write_i2c_block_data(chip->client,
- (reg << 2) | REG_ADDR_AI,
- 3,
- (u8 *) &val);
+ (reg << bank_shift) | REG_ADDR_AI,
+ NBANK(chip), val);
}
else {
switch (chip->chip_type) {
case PCA953X_TYPE:
ret = i2c_smbus_write_word_data(chip->client,
- reg << 1, val);
+ reg << 1, (u16) *val);
break;
case PCA957X_TYPE:
ret = i2c_smbus_write_byte_data(chip->client, reg << 1,
- val & 0xff);
+ val[0]);
if (ret < 0)
break;
ret = i2c_smbus_write_byte_data(chip->client,
(reg << 1) + 1,
- (val & 0xff00) >> 8);
+ val[1]);
break;
}
}
@@ -132,26 +173,24 @@ static int pca953x_write_reg(struct pca953x_chip *chip, int reg, u32 val)
return 0;
}
-static int pca953x_read_reg(struct pca953x_chip *chip, int reg, u32 *val)
+static int pca953x_read_regs(struct pca953x_chip *chip, int reg, u8 *val)
{
int ret;
if (chip->gpio_chip.ngpio <= 8) {
ret = i2c_smbus_read_byte_data(chip->client, reg);
*val = ret;
- }
- else if (chip->gpio_chip.ngpio == 24) {
- *val = 0;
+ } else if (chip->gpio_chip.ngpio >= 24) {
+ int bank_shift = fls((chip->gpio_chip.ngpio - 1) / BANK_SZ);
+
ret = i2c_smbus_read_i2c_block_data(chip->client,
- (reg << 2) | REG_ADDR_AI,
- 3,
- (u8 *) val);
- le32_to_cpus(val);
+ (reg << bank_shift) | REG_ADDR_AI,
+ NBANK(chip), val);
} else {
ret = i2c_smbus_read_word_data(chip->client, reg << 1);
- *val = ret;
+ val[0] = (u16)ret & 0xFF;
+ val[1] = (u16)ret >> 8;
}
-
if (ret < 0) {
dev_err(&chip->client->dev, "failed reading register\n");
return ret;
@@ -163,13 +202,13 @@ static int pca953x_read_reg(struct pca953x_chip *chip, int reg, u32 *val)
static int pca953x_gpio_direction_input(struct gpio_chip *gc, unsigned off)
{
struct pca953x_chip *chip;
- uint reg_val;
+ u8 reg_val;
int ret, offset = 0;
chip = container_of(gc, struct pca953x_chip, gpio_chip);
mutex_lock(&chip->i2c_lock);
- reg_val = chip->reg_direction | (1u << off);
+ reg_val = chip->reg_direction[off / BANK_SZ] | (1u << (off % BANK_SZ));
switch (chip->chip_type) {
case PCA953X_TYPE:
@@ -179,11 +218,11 @@ static int pca953x_gpio_direction_input(struct gpio_chip *gc, unsigned off)
offset = PCA957X_CFG;
break;
}
- ret = pca953x_write_reg(chip, offset, reg_val);
+ ret = pca953x_write_single(chip, offset, reg_val, off);
if (ret)
goto exit;
- chip->reg_direction = reg_val;
+ chip->reg_direction[off / BANK_SZ] = reg_val;
ret = 0;
exit:
mutex_unlock(&chip->i2c_lock);
@@ -194,7 +233,7 @@ static int pca953x_gpio_direction_output(struct gpio_chip *gc,
unsigned off, int val)
{
struct pca953x_chip *chip;
- uint reg_val;
+ u8 reg_val;
int ret, offset = 0;
chip = container_of(gc, struct pca953x_chip, gpio_chip);
@@ -202,9 +241,11 @@ static int pca953x_gpio_direction_output(struct gpio_chip *gc,
mutex_lock(&chip->i2c_lock);
/* set output level */
if (val)
- reg_val = chip->reg_output | (1u << off);
+ reg_val = chip->reg_output[off / BANK_SZ]
+ | (1u << (off % BANK_SZ));
else
- reg_val = chip->reg_output & ~(1u << off);
+ reg_val = chip->reg_output[off / BANK_SZ]
+ & ~(1u << (off % BANK_SZ));
switch (chip->chip_type) {
case PCA953X_TYPE:
@@ -214,14 +255,14 @@ static int pca953x_gpio_direction_output(struct gpio_chip *gc,
offset = PCA957X_OUT;
break;
}
- ret = pca953x_write_reg(chip, offset, reg_val);
+ ret = pca953x_write_single(chip, offset, reg_val, off);
if (ret)
goto exit;
- chip->reg_output = reg_val;
+ chip->reg_output[off / BANK_SZ] = reg_val;
/* then direction */
- reg_val = chip->reg_direction & ~(1u << off);
+ reg_val = chip->reg_direction[off / BANK_SZ] & ~(1u << (off % BANK_SZ));
switch (chip->chip_type) {
case PCA953X_TYPE:
offset = PCA953X_DIRECTION;
@@ -230,11 +271,11 @@ static int pca953x_gpio_direction_output(struct gpio_chip *gc,
offset = PCA957X_CFG;
break;
}
- ret = pca953x_write_reg(chip, offset, reg_val);
+ ret = pca953x_write_single(chip, offset, reg_val, off);
if (ret)
goto exit;
- chip->reg_direction = reg_val;
+ chip->reg_direction[off / BANK_SZ] = reg_val;
ret = 0;
exit:
mutex_unlock(&chip->i2c_lock);
@@ -258,7 +299,7 @@ static int pca953x_gpio_get_value(struct gpio_chip *gc, unsigned off)
offset = PCA957X_IN;
break;
}
- ret = pca953x_read_reg(chip, offset, &reg_val);
+ ret = pca953x_read_single(chip, offset, &reg_val, off);
mutex_unlock(&chip->i2c_lock);
if (ret < 0) {
/* NOTE: diagnostic already emitted; that's all we should
@@ -274,16 +315,18 @@ static int pca953x_gpio_get_value(struct gpio_chip *gc, unsigned off)
static void pca953x_gpio_set_value(struct gpio_chip *gc, unsigned off, int val)
{
struct pca953x_chip *chip;
- u32 reg_val;
+ u8 reg_val;
int ret, offset = 0;
chip = container_of(gc, struct pca953x_chip, gpio_chip);
mutex_lock(&chip->i2c_lock);
if (val)
- reg_val = chip->reg_output | (1u << off);
+ reg_val = chip->reg_output[off / BANK_SZ]
+ | (1u << (off % BANK_SZ));
else
- reg_val = chip->reg_output & ~(1u << off);
+ reg_val = chip->reg_output[off / BANK_SZ]
+ & ~(1u << (off % BANK_SZ));
switch (chip->chip_type) {
case PCA953X_TYPE:
@@ -293,11 +336,11 @@ static void pca953x_gpio_set_value(struct gpio_chip *gc, unsigned off, int val)
offset = PCA957X_OUT;
break;
}
- ret = pca953x_write_reg(chip, offset, reg_val);
+ ret = pca953x_write_single(chip, offset, reg_val, off);
if (ret)
goto exit;
- chip->reg_output = reg_val;
+ chip->reg_output[off / BANK_SZ] = reg_val;
exit:
mutex_unlock(&chip->i2c_lock);
}
@@ -328,21 +371,21 @@ static int pca953x_gpio_to_irq(struct gpio_chip *gc, unsigned off)
struct pca953x_chip *chip;
chip = container_of(gc, struct pca953x_chip, gpio_chip);
- return chip->irq_base + off;
+ return irq_create_mapping(chip->domain, off);
}
static void pca953x_irq_mask(struct irq_data *d)
{
struct pca953x_chip *chip = irq_data_get_irq_chip_data(d);
- chip->irq_mask &= ~(1 << d->hwirq);
+ chip->irq_mask[d->hwirq / BANK_SZ] &= ~(1 << (d->hwirq % BANK_SZ));
}
static void pca953x_irq_unmask(struct irq_data *d)
{
struct pca953x_chip *chip = irq_data_get_irq_chip_data(d);
- chip->irq_mask |= 1 << d->hwirq;
+ chip->irq_mask[d->hwirq / BANK_SZ] |= 1 << (d->hwirq % BANK_SZ);
}
static void pca953x_irq_bus_lock(struct irq_data *d)
@@ -355,17 +398,20 @@ static void pca953x_irq_bus_lock(struct irq_data *d)
static void pca953x_irq_bus_sync_unlock(struct irq_data *d)
{
struct pca953x_chip *chip = irq_data_get_irq_chip_data(d);
- u32 new_irqs;
- u32 level;
+ u8 new_irqs;
+ int level, i;
/* Look for any newly setup interrupt */
- new_irqs = chip->irq_trig_fall | chip->irq_trig_raise;
- new_irqs &= ~chip->reg_direction;
-
- while (new_irqs) {
- level = __ffs(new_irqs);
- pca953x_gpio_direction_input(&chip->gpio_chip, level);
- new_irqs &= ~(1 << level);
+ for (i = 0; i < NBANK(chip); i++) {
+ new_irqs = chip->irq_trig_fall[i] | chip->irq_trig_raise[i];
+ new_irqs &= ~chip->reg_direction[i];
+
+ while (new_irqs) {
+ level = __ffs(new_irqs);
+ pca953x_gpio_direction_input(&chip->gpio_chip,
+ level + (BANK_SZ * i));
+ new_irqs &= ~(1 << level);
+ }
}
mutex_unlock(&chip->irq_lock);
@@ -374,7 +420,8 @@ static void pca953x_irq_bus_sync_unlock(struct irq_data *d)
static int pca953x_irq_set_type(struct irq_data *d, unsigned int type)
{
struct pca953x_chip *chip = irq_data_get_irq_chip_data(d);
- u32 mask = 1 << d->hwirq;
+ int bank_nb = d->hwirq / BANK_SZ;
+ u8 mask = 1 << (d->hwirq % BANK_SZ);
if (!(type & IRQ_TYPE_EDGE_BOTH)) {
dev_err(&chip->client->dev, "irq %d: unsupported type %d\n",
@@ -383,14 +430,14 @@ static int pca953x_irq_set_type(struct irq_data *d, unsigned int type)
}
if (type & IRQ_TYPE_EDGE_FALLING)
- chip->irq_trig_fall |= mask;
+ chip->irq_trig_fall[bank_nb] |= mask;
else
- chip->irq_trig_fall &= ~mask;
+ chip->irq_trig_fall[bank_nb] &= ~mask;
if (type & IRQ_TYPE_EDGE_RISING)
- chip->irq_trig_raise |= mask;
+ chip->irq_trig_raise[bank_nb] |= mask;
else
- chip->irq_trig_raise &= ~mask;
+ chip->irq_trig_raise[bank_nb] &= ~mask;
return 0;
}
@@ -404,13 +451,13 @@ static struct irq_chip pca953x_irq_chip = {
.irq_set_type = pca953x_irq_set_type,
};
-static u32 pca953x_irq_pending(struct pca953x_chip *chip)
+static u8 pca953x_irq_pending(struct pca953x_chip *chip, u8 *pending)
{
- u32 cur_stat;
- u32 old_stat;
- u32 pending;
- u32 trigger;
- int ret, offset = 0;
+ u8 cur_stat[MAX_BANK];
+ u8 old_stat[MAX_BANK];
+ u8 pendings = 0;
+ u8 trigger[MAX_BANK], triggers = 0;
+ int ret, i, offset = 0;
switch (chip->chip_type) {
case PCA953X_TYPE:
@@ -420,60 +467,88 @@ static u32 pca953x_irq_pending(struct pca953x_chip *chip)
offset = PCA957X_IN;
break;
}
- ret = pca953x_read_reg(chip, offset, &cur_stat);
+ ret = pca953x_read_regs(chip, offset, cur_stat);
if (ret)
return 0;
/* Remove output pins from the equation */
- cur_stat &= chip->reg_direction;
+ for (i = 0; i < NBANK(chip); i++)
+ cur_stat[i] &= chip->reg_direction[i];
+
+ memcpy(old_stat, chip->irq_stat, NBANK(chip));
- old_stat = chip->irq_stat;
- trigger = (cur_stat ^ old_stat) & chip->irq_mask;
+ for (i = 0; i < NBANK(chip); i++) {
+ trigger[i] = (cur_stat[i] ^ old_stat[i]) & chip->irq_mask[i];
+ triggers += trigger[i];
+ }
- if (!trigger)
+ if (!triggers)
return 0;
- chip->irq_stat = cur_stat;
+ memcpy(chip->irq_stat, cur_stat, NBANK(chip));
- pending = (old_stat & chip->irq_trig_fall) |
- (cur_stat & chip->irq_trig_raise);
- pending &= trigger;
+ for (i = 0; i < NBANK(chip); i++) {
+ pending[i] = (old_stat[i] & chip->irq_trig_fall[i]) |
+ (cur_stat[i] & chip->irq_trig_raise[i]);
+ pending[i] &= trigger[i];
+ pendings += pending[i];
+ }
- return pending;
+ return pendings;
}
static irqreturn_t pca953x_irq_handler(int irq, void *devid)
{
struct pca953x_chip *chip = devid;
- u32 pending;
- u32 level;
-
- pending = pca953x_irq_pending(chip);
+ u8 pending[MAX_BANK];
+ u8 level;
+ int i;
- if (!pending)
+ if (!pca953x_irq_pending(chip, pending))
return IRQ_HANDLED;
- do {
- level = __ffs(pending);
- handle_nested_irq(irq_find_mapping(chip->domain, level));
-
- pending &= ~(1 << level);
- } while (pending);
+ for (i = 0; i < NBANK(chip); i++) {
+ while (pending[i]) {
+ level = __ffs(pending[i]);
+ handle_nested_irq(irq_find_mapping(chip->domain,
+ level + (BANK_SZ * i)));
+ pending[i] &= ~(1 << level);
+ }
+ }
return IRQ_HANDLED;
}
+static int pca953x_gpio_irq_map(struct irq_domain *d, unsigned int irq,
+ irq_hw_number_t hwirq)
+{
+ irq_clear_status_flags(irq, IRQ_NOREQUEST);
+ irq_set_chip_data(irq, d->host_data);
+ irq_set_chip(irq, &pca953x_irq_chip);
+ irq_set_nested_thread(irq, true);
+#ifdef CONFIG_ARM
+ set_irq_flags(irq, IRQF_VALID);
+#else
+ irq_set_noprobe(irq);
+#endif
+
+ return 0;
+}
+
+static const struct irq_domain_ops pca953x_irq_simple_ops = {
+ .map = pca953x_gpio_irq_map,
+ .xlate = irq_domain_xlate_twocell,
+};
+
static int pca953x_irq_setup(struct pca953x_chip *chip,
const struct i2c_device_id *id,
int irq_base)
{
struct i2c_client *client = chip->client;
- int ret, offset = 0;
- u32 temporary;
+ int ret, i, offset = 0;
if (irq_base != -1
&& (id->driver_data & PCA_INT)) {
- int lvl;
switch (chip->chip_type) {
case PCA953X_TYPE:
@@ -483,49 +558,29 @@ static int pca953x_irq_setup(struct pca953x_chip *chip,
offset = PCA957X_IN;
break;
}
- ret = pca953x_read_reg(chip, offset, &temporary);
- chip->irq_stat = temporary;
+ ret = pca953x_read_regs(chip, offset, chip->irq_stat);
if (ret)
- goto out_failed;
+ return ret;
/*
* There is no way to know which GPIO line generated the
* interrupt. We have to rely on the previous read for
* this purpose.
*/
- chip->irq_stat &= chip->reg_direction;
+ for (i = 0; i < NBANK(chip); i++)
+ chip->irq_stat[i] &= chip->reg_direction[i];
mutex_init(&chip->irq_lock);
- chip->irq_base = irq_alloc_descs(-1, irq_base, chip->gpio_chip.ngpio, -1);
- if (chip->irq_base < 0)
- goto out_failed;
-
- chip->domain = irq_domain_add_legacy(client->dev.of_node,
+ chip->domain = irq_domain_add_simple(client->dev.of_node,
chip->gpio_chip.ngpio,
- chip->irq_base,
- 0,
- &irq_domain_simple_ops,
+ irq_base,
+ &pca953x_irq_simple_ops,
NULL);
- if (!chip->domain) {
- ret = -ENODEV;
- goto out_irqdesc_free;
- }
+ if (!chip->domain)
+ return -ENODEV;
- for (lvl = 0; lvl < chip->gpio_chip.ngpio; lvl++) {
- int irq = lvl + chip->irq_base;
-
- irq_clear_status_flags(irq, IRQ_NOREQUEST);
- irq_set_chip_data(irq, chip);
- irq_set_chip(irq, &pca953x_irq_chip);
- irq_set_nested_thread(irq, true);
-#ifdef CONFIG_ARM
- set_irq_flags(irq, IRQF_VALID);
-#else
- irq_set_noprobe(irq);
-#endif
- }
-
- ret = request_threaded_irq(client->irq,
+ ret = devm_request_threaded_irq(&client->dev,
+ client->irq,
NULL,
pca953x_irq_handler,
IRQF_TRIGGER_LOW | IRQF_ONESHOT,
@@ -533,28 +588,15 @@ static int pca953x_irq_setup(struct pca953x_chip *chip,
if (ret) {
dev_err(&client->dev, "failed to request irq %d\n",
client->irq);
- goto out_irqdesc_free;
+ return ret;
}
chip->gpio_chip.to_irq = pca953x_gpio_to_irq;
}
return 0;
-
-out_irqdesc_free:
- irq_free_descs(chip->irq_base, chip->gpio_chip.ngpio);
-out_failed:
- chip->irq_base = -1;
- return ret;
}
-static void pca953x_irq_teardown(struct pca953x_chip *chip)
-{
- if (chip->irq_base != -1) {
- irq_free_descs(chip->irq_base, chip->gpio_chip.ngpio);
- free_irq(chip->client->irq, chip);
- }
-}
#else /* CONFIG_GPIO_PCA953X_IRQ */
static int pca953x_irq_setup(struct pca953x_chip *chip,
const struct i2c_device_id *id,
@@ -567,10 +609,6 @@ static int pca953x_irq_setup(struct pca953x_chip *chip,
return 0;
}
-
-static void pca953x_irq_teardown(struct pca953x_chip *chip)
-{
-}
#endif
/*
@@ -619,18 +657,24 @@ pca953x_get_alt_pdata(struct i2c_client *client, int *gpio_base, u32 *invert)
static int device_pca953x_init(struct pca953x_chip *chip, u32 invert)
{
int ret;
+ u8 val[MAX_BANK];
- ret = pca953x_read_reg(chip, PCA953X_OUTPUT, &chip->reg_output);
+ ret = pca953x_read_regs(chip, PCA953X_OUTPUT, chip->reg_output);
if (ret)
goto out;
- ret = pca953x_read_reg(chip, PCA953X_DIRECTION,
- &chip->reg_direction);
+ ret = pca953x_read_regs(chip, PCA953X_DIRECTION,
+ chip->reg_direction);
if (ret)
goto out;
/* set platform specific polarity inversion */
- ret = pca953x_write_reg(chip, PCA953X_INVERT, invert);
+ if (invert)
+ memset(val, 0xFF, NBANK(chip));
+ else
+ memset(val, 0, NBANK(chip));
+
+ ret = pca953x_write_regs(chip, PCA953X_INVERT, val);
out:
return ret;
}
@@ -638,28 +682,36 @@ out:
static int device_pca957x_init(struct pca953x_chip *chip, u32 invert)
{
int ret;
- u32 val = 0;
+ u8 val[MAX_BANK];
/* Let every port in proper state, that could save power */
- pca953x_write_reg(chip, PCA957X_PUPD, 0x0);
- pca953x_write_reg(chip, PCA957X_CFG, 0xffff);
- pca953x_write_reg(chip, PCA957X_OUT, 0x0);
-
- ret = pca953x_read_reg(chip, PCA957X_IN, &val);
+ memset(val, 0, NBANK(chip));
+ pca953x_write_regs(chip, PCA957X_PUPD, val);
+ memset(val, 0xFF, NBANK(chip));
+ pca953x_write_regs(chip, PCA957X_CFG, val);
+ memset(val, 0, NBANK(chip));
+ pca953x_write_regs(chip, PCA957X_OUT, val);
+
+ ret = pca953x_read_regs(chip, PCA957X_IN, val);
if (ret)
goto out;
- ret = pca953x_read_reg(chip, PCA957X_OUT, &chip->reg_output);
+ ret = pca953x_read_regs(chip, PCA957X_OUT, chip->reg_output);
if (ret)
goto out;
- ret = pca953x_read_reg(chip, PCA957X_CFG, &chip->reg_direction);
+ ret = pca953x_read_regs(chip, PCA957X_CFG, chip->reg_direction);
if (ret)
goto out;
/* set platform specific polarity inversion */
- pca953x_write_reg(chip, PCA957X_INVRT, invert);
+ if (invert)
+ memset(val, 0xFF, NBANK(chip));
+ else
+ memset(val, 0, NBANK(chip));
+ pca953x_write_regs(chip, PCA957X_INVRT, val);
/* To enable register 6, 7 to controll pull up and pull down */
- pca953x_write_reg(chip, PCA957X_BKEN, 0x202);
+ memset(val, 0x02, NBANK(chip));
+ pca953x_write_regs(chip, PCA957X_BKEN, val);
return 0;
out:
@@ -675,7 +727,8 @@ static int pca953x_probe(struct i2c_client *client,
int ret;
u32 invert = 0;
- chip = kzalloc(sizeof(struct pca953x_chip), GFP_KERNEL);
+ chip = devm_kzalloc(&client->dev,
+ sizeof(struct pca953x_chip), GFP_KERNEL);
if (chip == NULL)
return -ENOMEM;
@@ -710,15 +763,15 @@ static int pca953x_probe(struct i2c_client *client,
else
ret = device_pca957x_init(chip, invert);
if (ret)
- goto out_failed;
+ return ret;
ret = pca953x_irq_setup(chip, id, irq_base);
if (ret)
- goto out_failed;
+ return ret;
ret = gpiochip_add(&chip->gpio_chip);
if (ret)
- goto out_failed_irq;
+ return ret;
if (pdata && pdata->setup) {
ret = pdata->setup(client, chip->gpio_chip.base,
@@ -729,12 +782,6 @@ static int pca953x_probe(struct i2c_client *client,
i2c_set_clientdata(client, chip);
return 0;
-
-out_failed_irq:
- pca953x_irq_teardown(chip);
-out_failed:
- kfree(chip);
- return ret;
}
static int pca953x_remove(struct i2c_client *client)
@@ -760,12 +807,11 @@ static int pca953x_remove(struct i2c_client *client)
return ret;
}
- pca953x_irq_teardown(chip);
- kfree(chip);
return 0;
}
static const struct of_device_id pca953x_dt_ids[] = {
+ { .compatible = "nxp,pca9505", },
{ .compatible = "nxp,pca9534", },
{ .compatible = "nxp,pca9535", },
{ .compatible = "nxp,pca9536", },
diff --git a/drivers/gpio/gpio-pl061.c b/drivers/gpio/gpio-pl061.c
index c1720de18a4f..b820869ca93c 100644
--- a/drivers/gpio/gpio-pl061.c
+++ b/drivers/gpio/gpio-pl061.c
@@ -365,7 +365,7 @@ static int __init pl061_gpio_init(void)
{
return amba_driver_register(&pl061_gpio_driver);
}
-subsys_initcall(pl061_gpio_init);
+module_init(pl061_gpio_init);
MODULE_AUTHOR("Baruch Siach <baruch@tkos.co.il>");
MODULE_DESCRIPTION("PL061 GPIO driver");
diff --git a/drivers/gpio/gpio-pxa.c b/drivers/gpio/gpio-pxa.c
index 8325f580c0f1..9cc108d2b770 100644
--- a/drivers/gpio/gpio-pxa.c
+++ b/drivers/gpio/gpio-pxa.c
@@ -642,12 +642,7 @@ static struct platform_driver pxa_gpio_driver = {
.of_match_table = of_match_ptr(pxa_gpio_dt_ids),
},
};
-
-static int __init pxa_gpio_init(void)
-{
- return platform_driver_register(&pxa_gpio_driver);
-}
-postcore_initcall(pxa_gpio_init);
+module_platform_driver(pxa_gpio_driver);
#ifdef CONFIG_PM
static int pxa_gpio_suspend(void)
diff --git a/drivers/gpio/gpio-samsung.c b/drivers/gpio/gpio-samsung.c
index 76be7eed79de..b3643ff007e4 100644
--- a/drivers/gpio/gpio-samsung.c
+++ b/drivers/gpio/gpio-samsung.c
@@ -38,7 +38,6 @@
#include <plat/gpio-core.h>
#include <plat/gpio-cfg.h>
#include <plat/gpio-cfg-helpers.h>
-#include <plat/gpio-fns.h>
#include <plat/pm.h>
int samsung_gpio_setpull_updown(struct samsung_gpio_chip *chip,
@@ -3023,9 +3022,9 @@ static __init int samsung_gpiolib_init(void)
*/
struct device_node *pctrl_np;
static const struct of_device_id exynos_pinctrl_ids[] = {
- { .compatible = "samsung,pinctrl-exynos4210", },
- { .compatible = "samsung,pinctrl-exynos4x12", },
- { .compatible = "samsung,pinctrl-exynos5440", },
+ { .compatible = "samsung,exynos4210-pinctrl", },
+ { .compatible = "samsung,exynos4x12-pinctrl", },
+ { .compatible = "samsung,exynos5440-pinctrl", },
};
for_each_matching_node(pctrl_np, exynos_pinctrl_ids)
if (pctrl_np && of_device_is_available(pctrl_np))
diff --git a/drivers/gpio/gpio-twl4030.c b/drivers/gpio/gpio-twl4030.c
index 9572aa137e6f..4d330e36da1d 100644
--- a/drivers/gpio/gpio-twl4030.c
+++ b/drivers/gpio/gpio-twl4030.c
@@ -37,7 +37,6 @@
#include <linux/i2c/twl.h>
-
/*
* The GPIO "subchip" supports 18 GPIOs which can be configured as
* inputs or outputs, with pullups or pulldowns on each pin. Each
@@ -49,11 +48,6 @@
* There are also two LED pins used sometimes as output-only GPIOs.
*/
-
-static struct gpio_chip twl_gpiochip;
-static int twl4030_gpio_base;
-static int twl4030_gpio_irq_base;
-
/* genirq interfaces are not available to modules */
#ifdef MODULE
#define is_module() true
@@ -69,14 +63,24 @@ static int twl4030_gpio_irq_base;
/* Mask for GPIO registers when aggregated into a 32-bit integer */
#define GPIO_32_MASK 0x0003ffff
-/* Data structures */
-static DEFINE_MUTEX(gpio_lock);
+struct gpio_twl4030_priv {
+ struct gpio_chip gpio_chip;
+ struct mutex mutex;
+ int irq_base;
-/* store usage of each GPIO. - each bit represents one GPIO */
-static unsigned int gpio_usage_count;
+ /* Bitfields for state caching */
+ unsigned int usage_count;
+ unsigned int direction;
+ unsigned int out_state;
+};
/*----------------------------------------------------------------------*/
+static inline struct gpio_twl4030_priv *to_gpio_twl4030(struct gpio_chip *chip)
+{
+ return container_of(chip, struct gpio_twl4030_priv, gpio_chip);
+}
+
/*
* To configure TWL4030 GPIO module registers
*/
@@ -126,7 +130,7 @@ static inline int gpio_twl4030_read(u8 address)
/*----------------------------------------------------------------------*/
-static u8 cached_leden; /* protected by gpio_lock */
+static u8 cached_leden;
/* The LED lines are open drain outputs ... a FET pulls to GND, so an
* external pullup is needed. We could also expose the integrated PWM
@@ -140,14 +144,12 @@ static void twl4030_led_set_value(int led, int value)
if (led)
mask <<= 1;
- mutex_lock(&gpio_lock);
if (value)
cached_leden &= ~mask;
else
cached_leden |= mask;
status = twl_i2c_write_u8(TWL4030_MODULE_LED, cached_leden,
TWL4030_LED_LEDEN_REG);
- mutex_unlock(&gpio_lock);
}
static int twl4030_set_gpio_direction(int gpio, int is_input)
@@ -158,7 +160,6 @@ static int twl4030_set_gpio_direction(int gpio, int is_input)
u8 base = REG_GPIODATADIR1 + d_bnk;
int ret = 0;
- mutex_lock(&gpio_lock);
ret = gpio_twl4030_read(base);
if (ret >= 0) {
if (is_input)
@@ -168,7 +169,6 @@ static int twl4030_set_gpio_direction(int gpio, int is_input)
ret = gpio_twl4030_write(base, reg);
}
- mutex_unlock(&gpio_lock);
return ret;
}
@@ -193,10 +193,6 @@ static int twl4030_get_gpio_datain(int gpio)
u8 base = 0;
int ret = 0;
- if (unlikely((gpio >= TWL4030_GPIO_MAX)
- || !(gpio_usage_count & BIT(gpio))))
- return -EPERM;
-
base = REG_GPIODATAIN1 + d_bnk;
ret = gpio_twl4030_read(base);
if (ret > 0)
@@ -209,9 +205,10 @@ static int twl4030_get_gpio_datain(int gpio)
static int twl_request(struct gpio_chip *chip, unsigned offset)
{
+ struct gpio_twl4030_priv *priv = to_gpio_twl4030(chip);
int status = 0;
- mutex_lock(&gpio_lock);
+ mutex_lock(&priv->mutex);
/* Support the two LED outputs as output-only GPIOs. */
if (offset >= TWL4030_GPIO_MAX) {
@@ -252,7 +249,7 @@ static int twl_request(struct gpio_chip *chip, unsigned offset)
}
/* on first use, turn GPIO module "on" */
- if (!gpio_usage_count) {
+ if (!priv->usage_count) {
struct twl4030_gpio_platform_data *pdata;
u8 value = MASK_GPIO_CTRL_GPIO_ON;
@@ -266,79 +263,120 @@ static int twl_request(struct gpio_chip *chip, unsigned offset)
status = gpio_twl4030_write(REG_GPIO_CTRL, value);
}
+done:
if (!status)
- gpio_usage_count |= (0x1 << offset);
+ priv->usage_count |= BIT(offset);
-done:
- mutex_unlock(&gpio_lock);
+ mutex_unlock(&priv->mutex);
return status;
}
static void twl_free(struct gpio_chip *chip, unsigned offset)
{
+ struct gpio_twl4030_priv *priv = to_gpio_twl4030(chip);
+
+ mutex_lock(&priv->mutex);
if (offset >= TWL4030_GPIO_MAX) {
twl4030_led_set_value(offset - TWL4030_GPIO_MAX, 1);
- return;
+ goto out;
}
- mutex_lock(&gpio_lock);
-
- gpio_usage_count &= ~BIT(offset);
+ priv->usage_count &= ~BIT(offset);
/* on last use, switch off GPIO module */
- if (!gpio_usage_count)
+ if (!priv->usage_count)
gpio_twl4030_write(REG_GPIO_CTRL, 0x0);
- mutex_unlock(&gpio_lock);
+out:
+ mutex_unlock(&priv->mutex);
}
static int twl_direction_in(struct gpio_chip *chip, unsigned offset)
{
- return (offset < TWL4030_GPIO_MAX)
- ? twl4030_set_gpio_direction(offset, 1)
- : -EINVAL;
+ struct gpio_twl4030_priv *priv = to_gpio_twl4030(chip);
+ int ret;
+
+ mutex_lock(&priv->mutex);
+ if (offset < TWL4030_GPIO_MAX)
+ ret = twl4030_set_gpio_direction(offset, 1);
+ else
+ ret = -EINVAL;
+
+ if (!ret)
+ priv->direction &= ~BIT(offset);
+
+ mutex_unlock(&priv->mutex);
+
+ return ret;
}
static int twl_get(struct gpio_chip *chip, unsigned offset)
{
+ struct gpio_twl4030_priv *priv = to_gpio_twl4030(chip);
+ int ret;
int status = 0;
- if (offset < TWL4030_GPIO_MAX)
- status = twl4030_get_gpio_datain(offset);
- else if (offset == TWL4030_GPIO_MAX)
- status = cached_leden & LEDEN_LEDAON;
+ mutex_lock(&priv->mutex);
+ if (!(priv->usage_count & BIT(offset))) {
+ ret = -EPERM;
+ goto out;
+ }
+
+ if (priv->direction & BIT(offset))
+ status = priv->out_state & BIT(offset);
else
- status = cached_leden & LEDEN_LEDBON;
- return (status < 0) ? 0 : status;
+ status = twl4030_get_gpio_datain(offset);
+
+ ret = (status <= 0) ? 0 : 1;
+out:
+ mutex_unlock(&priv->mutex);
+ return ret;
}
-static int twl_direction_out(struct gpio_chip *chip, unsigned offset, int value)
+static void twl_set(struct gpio_chip *chip, unsigned offset, int value)
{
- if (offset < TWL4030_GPIO_MAX) {
+ struct gpio_twl4030_priv *priv = to_gpio_twl4030(chip);
+
+ mutex_lock(&priv->mutex);
+ if (offset < TWL4030_GPIO_MAX)
twl4030_set_gpio_dataout(offset, value);
- return twl4030_set_gpio_direction(offset, 0);
- } else {
+ else
twl4030_led_set_value(offset - TWL4030_GPIO_MAX, value);
- return 0;
- }
+
+ if (value)
+ priv->out_state |= BIT(offset);
+ else
+ priv->out_state &= ~BIT(offset);
+
+ mutex_unlock(&priv->mutex);
}
-static void twl_set(struct gpio_chip *chip, unsigned offset, int value)
+static int twl_direction_out(struct gpio_chip *chip, unsigned offset, int value)
{
+ struct gpio_twl4030_priv *priv = to_gpio_twl4030(chip);
+
+ mutex_lock(&priv->mutex);
if (offset < TWL4030_GPIO_MAX)
twl4030_set_gpio_dataout(offset, value);
- else
- twl4030_led_set_value(offset - TWL4030_GPIO_MAX, value);
+
+ priv->direction |= BIT(offset);
+ mutex_unlock(&priv->mutex);
+
+ twl_set(chip, offset, value);
+
+ return 0;
}
static int twl_to_irq(struct gpio_chip *chip, unsigned offset)
{
- return (twl4030_gpio_irq_base && (offset < TWL4030_GPIO_MAX))
- ? (twl4030_gpio_irq_base + offset)
+ struct gpio_twl4030_priv *priv = to_gpio_twl4030(chip);
+
+ return (priv->irq_base && (offset < TWL4030_GPIO_MAX))
+ ? (priv->irq_base + offset)
: -EINVAL;
}
-static struct gpio_chip twl_gpiochip = {
+static struct gpio_chip template_chip = {
.label = "twl4030",
.owner = THIS_MODULE,
.request = twl_request,
@@ -424,8 +462,14 @@ static int gpio_twl4030_probe(struct platform_device *pdev)
{
struct twl4030_gpio_platform_data *pdata = pdev->dev.platform_data;
struct device_node *node = pdev->dev.of_node;
+ struct gpio_twl4030_priv *priv;
int ret, irq_base;
+ priv = devm_kzalloc(&pdev->dev, sizeof(struct gpio_twl4030_priv),
+ GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
/* maybe setup IRQs */
if (is_module()) {
dev_err(&pdev->dev, "can't dispatch IRQs from modules\n");
@@ -445,12 +489,15 @@ static int gpio_twl4030_probe(struct platform_device *pdev)
if (ret < 0)
return ret;
- twl4030_gpio_irq_base = irq_base;
+ priv->irq_base = irq_base;
no_irqs:
- twl_gpiochip.base = -1;
- twl_gpiochip.ngpio = TWL4030_GPIO_MAX;
- twl_gpiochip.dev = &pdev->dev;
+ priv->gpio_chip = template_chip;
+ priv->gpio_chip.base = -1;
+ priv->gpio_chip.ngpio = TWL4030_GPIO_MAX;
+ priv->gpio_chip.dev = &pdev->dev;
+
+ mutex_init(&priv->mutex);
if (node)
pdata = of_gpio_twl4030(&pdev->dev);
@@ -481,23 +528,23 @@ no_irqs:
* is (still) clear if use_leds is set.
*/
if (pdata->use_leds)
- twl_gpiochip.ngpio += 2;
+ priv->gpio_chip.ngpio += 2;
- ret = gpiochip_add(&twl_gpiochip);
+ ret = gpiochip_add(&priv->gpio_chip);
if (ret < 0) {
dev_err(&pdev->dev, "could not register gpiochip, %d\n", ret);
- twl_gpiochip.ngpio = 0;
+ priv->gpio_chip.ngpio = 0;
gpio_twl4030_remove(pdev);
goto out;
}
- twl4030_gpio_base = twl_gpiochip.base;
+ platform_set_drvdata(pdev, priv);
if (pdata && pdata->setup) {
int status;
- status = pdata->setup(&pdev->dev,
- twl4030_gpio_base, TWL4030_GPIO_MAX);
+ status = pdata->setup(&pdev->dev, priv->gpio_chip.base,
+ TWL4030_GPIO_MAX);
if (status)
dev_dbg(&pdev->dev, "setup --> %d\n", status);
}
@@ -510,18 +557,19 @@ out:
static int gpio_twl4030_remove(struct platform_device *pdev)
{
struct twl4030_gpio_platform_data *pdata = pdev->dev.platform_data;
+ struct gpio_twl4030_priv *priv = platform_get_drvdata(pdev);
int status;
if (pdata && pdata->teardown) {
- status = pdata->teardown(&pdev->dev,
- twl4030_gpio_base, TWL4030_GPIO_MAX);
+ status = pdata->teardown(&pdev->dev, priv->gpio_chip.base,
+ TWL4030_GPIO_MAX);
if (status) {
dev_dbg(&pdev->dev, "teardown --> %d\n", status);
return status;
}
}
- status = gpiochip_remove(&twl_gpiochip);
+ status = gpiochip_remove(&priv->gpio_chip);
if (status < 0)
return status;
diff --git a/drivers/gpio/gpio-vt8500.c b/drivers/gpio/gpio-vt8500.c
index b53320a16fc8..81683ca35ac1 100644
--- a/drivers/gpio/gpio-vt8500.c
+++ b/drivers/gpio/gpio-vt8500.c
@@ -73,19 +73,20 @@ struct vt8500_gpio_data {
static struct vt8500_gpio_data vt8500_data = {
.num_banks = 7,
.banks = {
+ VT8500_BANK(NO_REG, 0x3C, 0x5C, 0x7C, 9),
VT8500_BANK(0x00, 0x20, 0x40, 0x60, 26),
VT8500_BANK(0x04, 0x24, 0x44, 0x64, 28),
VT8500_BANK(0x08, 0x28, 0x48, 0x68, 31),
VT8500_BANK(0x0C, 0x2C, 0x4C, 0x6C, 19),
VT8500_BANK(0x10, 0x30, 0x50, 0x70, 19),
VT8500_BANK(0x14, 0x34, 0x54, 0x74, 23),
- VT8500_BANK(NO_REG, 0x3C, 0x5C, 0x7C, 9),
},
};
static struct vt8500_gpio_data wm8505_data = {
.num_banks = 10,
.banks = {
+ VT8500_BANK(0x64, 0x8C, 0xB4, 0xDC, 22),
VT8500_BANK(0x40, 0x68, 0x90, 0xB8, 8),
VT8500_BANK(0x44, 0x6C, 0x94, 0xBC, 32),
VT8500_BANK(0x48, 0x70, 0x98, 0xC0, 6),
@@ -95,7 +96,6 @@ static struct vt8500_gpio_data wm8505_data = {
VT8500_BANK(0x58, 0x80, 0xA8, 0xD0, 5),
VT8500_BANK(0x5C, 0x84, 0xAC, 0xD4, 12),
VT8500_BANK(0x60, 0x88, 0xB0, 0xD8, 16),
- VT8500_BANK(0x64, 0x8C, 0xB4, 0xDC, 22),
VT8500_BANK(0x500, 0x504, 0x508, 0x50C, 6),
},
};
@@ -127,6 +127,12 @@ struct vt8500_gpio_chip {
void __iomem *base;
};
+struct vt8500_data {
+ struct vt8500_gpio_chip *chip;
+ void __iomem *iobase;
+ int num_banks;
+};
+
#define to_vt8500(__chip) container_of(__chip, struct vt8500_gpio_chip, chip)
@@ -224,19 +230,32 @@ static int vt8500_of_xlate(struct gpio_chip *gc,
static int vt8500_add_chips(struct platform_device *pdev, void __iomem *base,
const struct vt8500_gpio_data *data)
{
+ struct vt8500_data *priv;
struct vt8500_gpio_chip *vtchip;
struct gpio_chip *chip;
int i;
int pin_cnt = 0;
- vtchip = devm_kzalloc(&pdev->dev,
+ priv = devm_kzalloc(&pdev->dev, sizeof(struct vt8500_data), GFP_KERNEL);
+ if (!priv) {
+ dev_err(&pdev->dev, "failed to allocate memory\n");
+ return -ENOMEM;
+ }
+
+ priv->chip = devm_kzalloc(&pdev->dev,
sizeof(struct vt8500_gpio_chip) * data->num_banks,
GFP_KERNEL);
- if (!vtchip) {
- pr_err("%s: failed to allocate chip memory\n", __func__);
+ if (!priv->chip) {
+ dev_err(&pdev->dev, "failed to allocate chip memory\n");
return -ENOMEM;
}
+ priv->iobase = base;
+ priv->num_banks = data->num_banks;
+ platform_set_drvdata(pdev, priv);
+
+ vtchip = priv->chip;
+
for (i = 0; i < data->num_banks; i++) {
vtchip[i].base = base;
vtchip[i].regs = &data->banks[i];
@@ -273,36 +292,54 @@ static struct of_device_id vt8500_gpio_dt_ids[] = {
static int vt8500_gpio_probe(struct platform_device *pdev)
{
+ int ret;
void __iomem *gpio_base;
- struct device_node *np;
+ struct resource *res;
const struct of_device_id *of_id =
of_match_device(vt8500_gpio_dt_ids, &pdev->dev);
if (!of_id) {
- dev_err(&pdev->dev, "Failed to find gpio controller\n");
+ dev_err(&pdev->dev, "No matching driver data\n");
return -ENODEV;
}
- np = pdev->dev.of_node;
- if (!np) {
- dev_err(&pdev->dev, "Missing GPIO description in devicetree\n");
- return -EFAULT;
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!res) {
+ dev_err(&pdev->dev, "Unable to get IO resource\n");
+ return -ENODEV;
}
- gpio_base = of_iomap(np, 0);
+ gpio_base = devm_request_and_ioremap(&pdev->dev, res);
if (!gpio_base) {
dev_err(&pdev->dev, "Unable to map GPIO registers\n");
- of_node_put(np);
return -ENOMEM;
}
- vt8500_add_chips(pdev, gpio_base, of_id->data);
+ ret = vt8500_add_chips(pdev, gpio_base, of_id->data);
+
+ return ret;
+}
+
+static int vt8500_gpio_remove(struct platform_device *pdev)
+{
+ int i;
+ int ret;
+ struct vt8500_data *priv = platform_get_drvdata(pdev);
+ struct vt8500_gpio_chip *vtchip = priv->chip;
+
+ for (i = 0; i < priv->num_banks; i++) {
+ ret = gpiochip_remove(&vtchip[i].chip);
+ if (ret)
+ dev_warn(&pdev->dev, "gpiochip_remove returned %d\n",
+ ret);
+ }
return 0;
}
static struct platform_driver vt8500_gpio_driver = {
.probe = vt8500_gpio_probe,
+ .remove = vt8500_gpio_remove,
.driver = {
.name = "vt8500-gpio",
.owner = THIS_MODULE,
diff --git a/drivers/gpio/gpiolib-acpi.c b/drivers/gpio/gpiolib-acpi.c
index cbad6e908d30..a063eb04b6ce 100644
--- a/drivers/gpio/gpiolib-acpi.c
+++ b/drivers/gpio/gpiolib-acpi.c
@@ -15,6 +15,7 @@
#include <linux/export.h>
#include <linux/acpi_gpio.h>
#include <linux/acpi.h>
+#include <linux/interrupt.h>
static int acpi_gpiochip_find(struct gpio_chip *gc, void *data)
{
@@ -52,3 +53,89 @@ int acpi_get_gpio(char *path, int pin)
return chip->base + pin;
}
EXPORT_SYMBOL_GPL(acpi_get_gpio);
+
+
+static irqreturn_t acpi_gpio_irq_handler(int irq, void *data)
+{
+ acpi_handle handle = data;
+
+ acpi_evaluate_object(handle, NULL, NULL, NULL);
+
+ return IRQ_HANDLED;
+}
+
+/**
+ * acpi_gpiochip_request_interrupts() - Register isr for gpio chip ACPI events
+ * @chip: gpio chip
+ *
+ * ACPI5 platforms can use GPIO signaled ACPI events. These GPIO interrupts are
+ * handled by ACPI event methods which need to be called from the GPIO
+ * chip's interrupt handler. acpi_gpiochip_request_interrupts finds out which
+ * gpio pins have acpi event methods and assigns interrupt handlers that calls
+ * the acpi event methods for those pins.
+ *
+ * Interrupts are automatically freed on driver detach
+ */
+
+void acpi_gpiochip_request_interrupts(struct gpio_chip *chip)
+{
+ struct acpi_buffer buf = {ACPI_ALLOCATE_BUFFER, NULL};
+ struct acpi_resource *res;
+ acpi_handle handle, ev_handle;
+ acpi_status status;
+ unsigned int pin;
+ int irq, ret;
+ char ev_name[5];
+
+ if (!chip->dev || !chip->to_irq)
+ return;
+
+ handle = ACPI_HANDLE(chip->dev);
+ if (!handle)
+ return;
+
+ status = acpi_get_event_resources(handle, &buf);
+ if (ACPI_FAILURE(status))
+ return;
+
+ /* If a gpio interrupt has an acpi event handler method, then
+ * set up an interrupt handler that calls the acpi event handler
+ */
+
+ for (res = buf.pointer;
+ res && (res->type != ACPI_RESOURCE_TYPE_END_TAG);
+ res = ACPI_NEXT_RESOURCE(res)) {
+
+ if (res->type != ACPI_RESOURCE_TYPE_GPIO ||
+ res->data.gpio.connection_type !=
+ ACPI_RESOURCE_GPIO_TYPE_INT)
+ continue;
+
+ pin = res->data.gpio.pin_table[0];
+ if (pin > chip->ngpio)
+ continue;
+
+ sprintf(ev_name, "_%c%02X",
+ res->data.gpio.triggering ? 'E' : 'L', pin);
+
+ status = acpi_get_handle(handle, ev_name, &ev_handle);
+ if (ACPI_FAILURE(status))
+ continue;
+
+ irq = chip->to_irq(chip, pin);
+ if (irq < 0)
+ continue;
+
+ /* Assume BIOS sets the triggering, so no flags */
+ ret = devm_request_threaded_irq(chip->dev, irq, NULL,
+ acpi_gpio_irq_handler,
+ 0,
+ "GPIO-signaled-ACPI-event",
+ ev_handle);
+ if (ret)
+ dev_err(chip->dev,
+ "Failed to request IRQ %d ACPI event handler\n",
+ irq);
+ }
+}
+EXPORT_SYMBOL(acpi_gpiochip_request_interrupts);
diff --git a/drivers/gpio/gpiolib.c b/drivers/gpio/gpiolib.c
index 5359ca78130f..fff9786cdc64 100644
--- a/drivers/gpio/gpiolib.c
+++ b/drivers/gpio/gpiolib.c
@@ -3,6 +3,7 @@
#include <linux/interrupt.h>
#include <linux/irq.h>
#include <linux/spinlock.h>
+#include <linux/list.h>
#include <linux/device.h>
#include <linux/err.h>
#include <linux/debugfs.h>
@@ -52,14 +53,13 @@ struct gpio_desc {
/* flag symbols are bit numbers */
#define FLAG_REQUESTED 0
#define FLAG_IS_OUT 1
-#define FLAG_RESERVED 2
-#define FLAG_EXPORT 3 /* protected by sysfs_lock */
-#define FLAG_SYSFS 4 /* exported via /sys/class/gpio/control */
-#define FLAG_TRIG_FALL 5 /* trigger on falling edge */
-#define FLAG_TRIG_RISE 6 /* trigger on rising edge */
-#define FLAG_ACTIVE_LOW 7 /* sysfs value has active low */
-#define FLAG_OPEN_DRAIN 8 /* Gpio is open drain type */
-#define FLAG_OPEN_SOURCE 9 /* Gpio is open source type */
+#define FLAG_EXPORT 2 /* protected by sysfs_lock */
+#define FLAG_SYSFS 3 /* exported via /sys/class/gpio/control */
+#define FLAG_TRIG_FALL 4 /* trigger on falling edge */
+#define FLAG_TRIG_RISE 5 /* trigger on rising edge */
+#define FLAG_ACTIVE_LOW 6 /* sysfs value has active low */
+#define FLAG_OPEN_DRAIN 7 /* Gpio is open drain type */
+#define FLAG_OPEN_SOURCE 8 /* Gpio is open source type */
#define ID_SHIFT 16 /* add new flags before this one */
@@ -72,10 +72,36 @@ struct gpio_desc {
};
static struct gpio_desc gpio_desc[ARCH_NR_GPIOS];
+#define GPIO_OFFSET_VALID(chip, offset) (offset >= 0 && offset < chip->ngpio)
+
+static LIST_HEAD(gpio_chips);
+
#ifdef CONFIG_GPIO_SYSFS
static DEFINE_IDR(dirent_idr);
#endif
+/*
+ * Internal gpiod_* API using descriptors instead of the integer namespace.
+ * Most of this should eventually go public.
+ */
+static int gpiod_request(struct gpio_desc *desc, const char *label);
+static void gpiod_free(struct gpio_desc *desc);
+static int gpiod_direction_input(struct gpio_desc *desc);
+static int gpiod_direction_output(struct gpio_desc *desc, int value);
+static int gpiod_set_debounce(struct gpio_desc *desc, unsigned debounce);
+static int gpiod_get_value_cansleep(struct gpio_desc *desc);
+static void gpiod_set_value_cansleep(struct gpio_desc *desc, int value);
+static int gpiod_get_value(struct gpio_desc *desc);
+static void gpiod_set_value(struct gpio_desc *desc, int value);
+static int gpiod_cansleep(struct gpio_desc *desc);
+static int gpiod_to_irq(struct gpio_desc *desc);
+static int gpiod_export(struct gpio_desc *desc, bool direction_may_change);
+static int gpiod_export_link(struct device *dev, const char *name,
+ struct gpio_desc *desc);
+static int gpiod_sysfs_set_active_low(struct gpio_desc *desc, int value);
+static void gpiod_unexport(struct gpio_desc *desc);
+
+
static inline void desc_set_label(struct gpio_desc *d, const char *label)
{
#ifdef CONFIG_DEBUG_FS
@@ -83,6 +109,36 @@ static inline void desc_set_label(struct gpio_desc *d, const char *label)
#endif
}
+/*
+ * Return the GPIO number of the passed descriptor relative to its chip
+ */
+static int gpio_chip_hwgpio(const struct gpio_desc *desc)
+{
+ return desc - &desc->chip->desc[0];
+}
+
+/**
+ * Convert a GPIO number to its descriptor
+ */
+static struct gpio_desc *gpio_to_desc(unsigned gpio)
+{
+ if (WARN(!gpio_is_valid(gpio), "invalid GPIO %d\n", gpio))
+ return NULL;
+ else
+ return &gpio_desc[gpio];
+}
+
+/**
+ * Convert a GPIO descriptor to the integer namespace.
+ * This should disappear in the future but is needed since we still
+ * use GPIO numbers for error messages and sysfs nodes
+ */
+static int desc_to_gpio(const struct gpio_desc *desc)
+{
+ return desc->chip->base + gpio_chip_hwgpio(desc);
+}
+
+
/* Warn when drivers omit gpio_request() calls -- legal but ill-advised
* when setting direction, and otherwise illegal. Until board setup code
* and drivers use explicit requests everywhere (which won't happen when
@@ -94,10 +150,10 @@ static inline void desc_set_label(struct gpio_desc *d, const char *label)
* only "legal" in the sense that (old) code using it won't break yet,
* but instead only triggers a WARN() stack dump.
*/
-static int gpio_ensure_requested(struct gpio_desc *desc, unsigned offset)
+static int gpio_ensure_requested(struct gpio_desc *desc)
{
const struct gpio_chip *chip = desc->chip;
- const int gpio = chip->base + offset;
+ const int gpio = desc_to_gpio(desc);
if (WARN(test_and_set_bit(FLAG_REQUESTED, &desc->flags) == 0,
"autorequest GPIO-%d\n", gpio)) {
@@ -116,95 +172,54 @@ static int gpio_ensure_requested(struct gpio_desc *desc, unsigned offset)
}
/* caller holds gpio_lock *OR* gpio is marked as requested */
+static struct gpio_chip *gpiod_to_chip(struct gpio_desc *desc)
+{
+ return desc->chip;
+}
+
struct gpio_chip *gpio_to_chip(unsigned gpio)
{
- return gpio_desc[gpio].chip;
+ return gpiod_to_chip(gpio_to_desc(gpio));
}
/* dynamic allocation of GPIOs, e.g. on a hotplugged device */
static int gpiochip_find_base(int ngpio)
{
- int i;
- int spare = 0;
- int base = -ENOSPC;
-
- for (i = ARCH_NR_GPIOS - 1; i >= 0 ; i--) {
- struct gpio_desc *desc = &gpio_desc[i];
- struct gpio_chip *chip = desc->chip;
+ struct gpio_chip *chip;
+ int base = ARCH_NR_GPIOS - ngpio;
- if (!chip && !test_bit(FLAG_RESERVED, &desc->flags)) {
- spare++;
- if (spare == ngpio) {
- base = i;
- break;
- }
- } else {
- spare = 0;
- if (chip)
- i -= chip->ngpio - 1;
- }
+ list_for_each_entry_reverse(chip, &gpio_chips, list) {
+ /* found a free space? */
+ if (chip->base + chip->ngpio <= base)
+ break;
+ else
+ /* nope, check the space right before the chip */
+ base = chip->base - ngpio;
}
- if (gpio_is_valid(base))
+ if (gpio_is_valid(base)) {
pr_debug("%s: found new base at %d\n", __func__, base);
- return base;
-}
-
-/**
- * gpiochip_reserve() - reserve range of gpios to use with platform code only
- * @start: starting gpio number
- * @ngpio: number of gpios to reserve
- * Context: platform init, potentially before irqs or kmalloc will work
- *
- * Returns a negative errno if any gpio within the range is already reserved
- * or registered, else returns zero as a success code. Use this function
- * to mark a range of gpios as unavailable for dynamic gpio number allocation,
- * for example because its driver support is not yet loaded.
- */
-int __init gpiochip_reserve(int start, int ngpio)
-{
- int ret = 0;
- unsigned long flags;
- int i;
-
- if (!gpio_is_valid(start) || !gpio_is_valid(start + ngpio - 1))
- return -EINVAL;
-
- spin_lock_irqsave(&gpio_lock, flags);
-
- for (i = start; i < start + ngpio; i++) {
- struct gpio_desc *desc = &gpio_desc[i];
-
- if (desc->chip || test_bit(FLAG_RESERVED, &desc->flags)) {
- ret = -EBUSY;
- goto err;
- }
-
- set_bit(FLAG_RESERVED, &desc->flags);
+ return base;
+ } else {
+ pr_err("%s: cannot find free range\n", __func__);
+ return -ENOSPC;
}
-
- pr_debug("%s: reserved gpios from %d to %d\n",
- __func__, start, start + ngpio - 1);
-err:
- spin_unlock_irqrestore(&gpio_lock, flags);
-
- return ret;
}
/* caller ensures gpio is valid and requested, chip->get_direction may sleep */
-static int gpio_get_direction(unsigned gpio)
+static int gpiod_get_direction(struct gpio_desc *desc)
{
struct gpio_chip *chip;
- struct gpio_desc *desc = &gpio_desc[gpio];
+ unsigned offset;
int status = -EINVAL;
- chip = gpio_to_chip(gpio);
- gpio -= chip->base;
+ chip = gpiod_to_chip(desc);
+ offset = gpio_chip_hwgpio(desc);
if (!chip->get_direction)
return status;
- status = chip->get_direction(chip, gpio);
+ status = chip->get_direction(chip, offset);
if (status > 0) {
/* GPIOF_DIR_IN, or other positive */
status = 1;
@@ -248,19 +263,19 @@ static DEFINE_MUTEX(sysfs_lock);
static ssize_t gpio_direction_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
- const struct gpio_desc *desc = dev_get_drvdata(dev);
- unsigned gpio = desc - gpio_desc;
+ struct gpio_desc *desc = dev_get_drvdata(dev);
ssize_t status;
mutex_lock(&sysfs_lock);
- if (!test_bit(FLAG_EXPORT, &desc->flags))
+ if (!test_bit(FLAG_EXPORT, &desc->flags)) {
status = -EIO;
- else
- gpio_get_direction(gpio);
+ } else {
+ gpiod_get_direction(desc);
status = sprintf(buf, "%s\n",
test_bit(FLAG_IS_OUT, &desc->flags)
? "out" : "in");
+ }
mutex_unlock(&sysfs_lock);
return status;
@@ -269,8 +284,7 @@ static ssize_t gpio_direction_show(struct device *dev,
static ssize_t gpio_direction_store(struct device *dev,
struct device_attribute *attr, const char *buf, size_t size)
{
- const struct gpio_desc *desc = dev_get_drvdata(dev);
- unsigned gpio = desc - gpio_desc;
+ struct gpio_desc *desc = dev_get_drvdata(dev);
ssize_t status;
mutex_lock(&sysfs_lock);
@@ -278,11 +292,11 @@ static ssize_t gpio_direction_store(struct device *dev,
if (!test_bit(FLAG_EXPORT, &desc->flags))
status = -EIO;
else if (sysfs_streq(buf, "high"))
- status = gpio_direction_output(gpio, 1);
+ status = gpiod_direction_output(desc, 1);
else if (sysfs_streq(buf, "out") || sysfs_streq(buf, "low"))
- status = gpio_direction_output(gpio, 0);
+ status = gpiod_direction_output(desc, 0);
else if (sysfs_streq(buf, "in"))
- status = gpio_direction_input(gpio);
+ status = gpiod_direction_input(desc);
else
status = -EINVAL;
@@ -296,8 +310,7 @@ static /* const */ DEVICE_ATTR(direction, 0644,
static ssize_t gpio_value_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
- const struct gpio_desc *desc = dev_get_drvdata(dev);
- unsigned gpio = desc - gpio_desc;
+ struct gpio_desc *desc = dev_get_drvdata(dev);
ssize_t status;
mutex_lock(&sysfs_lock);
@@ -307,7 +320,7 @@ static ssize_t gpio_value_show(struct device *dev,
} else {
int value;
- value = !!gpio_get_value_cansleep(gpio);
+ value = !!gpiod_get_value_cansleep(desc);
if (test_bit(FLAG_ACTIVE_LOW, &desc->flags))
value = !value;
@@ -321,8 +334,7 @@ static ssize_t gpio_value_show(struct device *dev,
static ssize_t gpio_value_store(struct device *dev,
struct device_attribute *attr, const char *buf, size_t size)
{
- const struct gpio_desc *desc = dev_get_drvdata(dev);
- unsigned gpio = desc - gpio_desc;
+ struct gpio_desc *desc = dev_get_drvdata(dev);
ssize_t status;
mutex_lock(&sysfs_lock);
@@ -338,7 +350,7 @@ static ssize_t gpio_value_store(struct device *dev,
if (status == 0) {
if (test_bit(FLAG_ACTIVE_LOW, &desc->flags))
value = !value;
- gpio_set_value_cansleep(gpio, value != 0);
+ gpiod_set_value_cansleep(desc, value != 0);
status = size;
}
}
@@ -368,7 +380,7 @@ static int gpio_setup_irq(struct gpio_desc *desc, struct device *dev,
if ((desc->flags & GPIO_TRIGGER_MASK) == gpio_flags)
return 0;
- irq = gpio_to_irq(desc - gpio_desc);
+ irq = gpiod_to_irq(desc);
if (irq < 0)
return -EIO;
@@ -399,15 +411,10 @@ static int gpio_setup_irq(struct gpio_desc *desc, struct device *dev,
goto err_out;
}
- do {
- ret = -ENOMEM;
- if (idr_pre_get(&dirent_idr, GFP_KERNEL))
- ret = idr_get_new_above(&dirent_idr, value_sd,
- 1, &id);
- } while (ret == -EAGAIN);
-
- if (ret)
+ ret = idr_alloc(&dirent_idr, value_sd, 1, 0, GFP_KERNEL);
+ if (ret < 0)
goto free_sd;
+ id = ret;
desc->flags &= GPIO_FLAGS_MASK;
desc->flags |= (unsigned long)id << ID_SHIFT;
@@ -638,29 +645,32 @@ static ssize_t export_store(struct class *class,
struct class_attribute *attr,
const char *buf, size_t len)
{
- long gpio;
- int status;
+ long gpio;
+ struct gpio_desc *desc;
+ int status;
status = strict_strtol(buf, 0, &gpio);
if (status < 0)
goto done;
+ desc = gpio_to_desc(gpio);
+
/* No extra locking here; FLAG_SYSFS just signifies that the
* request and export were done by on behalf of userspace, so
* they may be undone on its behalf too.
*/
- status = gpio_request(gpio, "sysfs");
+ status = gpiod_request(desc, "sysfs");
if (status < 0) {
if (status == -EPROBE_DEFER)
status = -ENODEV;
goto done;
}
- status = gpio_export(gpio, true);
+ status = gpiod_export(desc, true);
if (status < 0)
- gpio_free(gpio);
+ gpiod_free(desc);
else
- set_bit(FLAG_SYSFS, &gpio_desc[gpio].flags);
+ set_bit(FLAG_SYSFS, &desc->flags);
done:
if (status)
@@ -672,8 +682,9 @@ static ssize_t unexport_store(struct class *class,
struct class_attribute *attr,
const char *buf, size_t len)
{
- long gpio;
- int status;
+ long gpio;
+ struct gpio_desc *desc;
+ int status;
status = strict_strtol(buf, 0, &gpio);
if (status < 0)
@@ -681,17 +692,18 @@ static ssize_t unexport_store(struct class *class,
status = -EINVAL;
+ desc = gpio_to_desc(gpio);
/* reject bogus commands (gpio_unexport ignores them) */
- if (!gpio_is_valid(gpio))
+ if (!desc)
goto done;
/* No extra locking here; FLAG_SYSFS just signifies that the
* request and export were done by on behalf of userspace, so
* they may be undone on its behalf too.
*/
- if (test_and_clear_bit(FLAG_SYSFS, &gpio_desc[gpio].flags)) {
+ if (test_and_clear_bit(FLAG_SYSFS, &desc->flags)) {
status = 0;
- gpio_free(gpio);
+ gpiod_free(desc);
}
done:
if (status)
@@ -728,13 +740,13 @@ static struct class gpio_class = {
*
* Returns zero on success, else an error.
*/
-int gpio_export(unsigned gpio, bool direction_may_change)
+static int gpiod_export(struct gpio_desc *desc, bool direction_may_change)
{
unsigned long flags;
- struct gpio_desc *desc;
int status;
const char *ioname = NULL;
struct device *dev;
+ int offset;
/* can't export until sysfs is available ... */
if (!gpio_class.p) {
@@ -742,20 +754,19 @@ int gpio_export(unsigned gpio, bool direction_may_change)
return -ENOENT;
}
- if (!gpio_is_valid(gpio)) {
- pr_debug("%s: gpio %d is not valid\n", __func__, gpio);
+ if (!desc) {
+ pr_debug("%s: invalid gpio descriptor\n", __func__);
return -EINVAL;
}
mutex_lock(&sysfs_lock);
spin_lock_irqsave(&gpio_lock, flags);
- desc = &gpio_desc[gpio];
if (!test_bit(FLAG_REQUESTED, &desc->flags) ||
test_bit(FLAG_EXPORT, &desc->flags)) {
spin_unlock_irqrestore(&gpio_lock, flags);
pr_debug("%s: gpio %d unavailable (requested=%d, exported=%d)\n",
- __func__, gpio,
+ __func__, desc_to_gpio(desc),
test_bit(FLAG_REQUESTED, &desc->flags),
test_bit(FLAG_EXPORT, &desc->flags));
status = -EPERM;
@@ -766,11 +777,13 @@ int gpio_export(unsigned gpio, bool direction_may_change)
direction_may_change = false;
spin_unlock_irqrestore(&gpio_lock, flags);
- if (desc->chip->names && desc->chip->names[gpio - desc->chip->base])
- ioname = desc->chip->names[gpio - desc->chip->base];
+ offset = gpio_chip_hwgpio(desc);
+ if (desc->chip->names && desc->chip->names[offset])
+ ioname = desc->chip->names[offset];
dev = device_create(&gpio_class, desc->chip->dev, MKDEV(0, 0),
- desc, ioname ? ioname : "gpio%u", gpio);
+ desc, ioname ? ioname : "gpio%u",
+ desc_to_gpio(desc));
if (IS_ERR(dev)) {
status = PTR_ERR(dev);
goto fail_unlock;
@@ -786,7 +799,7 @@ int gpio_export(unsigned gpio, bool direction_may_change)
goto fail_unregister_device;
}
- if (gpio_to_irq(gpio) >= 0 && (direction_may_change ||
+ if (gpiod_to_irq(desc) >= 0 && (direction_may_change ||
!test_bit(FLAG_IS_OUT, &desc->flags))) {
status = device_create_file(dev, &dev_attr_edge);
if (status)
@@ -801,9 +814,15 @@ fail_unregister_device:
device_unregister(dev);
fail_unlock:
mutex_unlock(&sysfs_lock);
- pr_debug("%s: gpio%d status %d\n", __func__, gpio, status);
+ pr_debug("%s: gpio%d status %d\n", __func__, desc_to_gpio(desc),
+ status);
return status;
}
+
+int gpio_export(unsigned gpio, bool direction_may_change)
+{
+ return gpiod_export(gpio_to_desc(gpio), direction_may_change);
+}
EXPORT_SYMBOL_GPL(gpio_export);
static int match_export(struct device *dev, const void *data)
@@ -822,18 +841,16 @@ static int match_export(struct device *dev, const void *data)
*
* Returns zero on success, else an error.
*/
-int gpio_export_link(struct device *dev, const char *name, unsigned gpio)
+static int gpiod_export_link(struct device *dev, const char *name,
+ struct gpio_desc *desc)
{
- struct gpio_desc *desc;
int status = -EINVAL;
- if (!gpio_is_valid(gpio))
+ if (!desc)
goto done;
mutex_lock(&sysfs_lock);
- desc = &gpio_desc[gpio];
-
if (test_bit(FLAG_EXPORT, &desc->flags)) {
struct device *tdev;
@@ -850,12 +867,17 @@ int gpio_export_link(struct device *dev, const char *name, unsigned gpio)
done:
if (status)
- pr_debug("%s: gpio%d status %d\n", __func__, gpio, status);
+ pr_debug("%s: gpio%d status %d\n", __func__, desc_to_gpio(desc),
+ status);
return status;
}
-EXPORT_SYMBOL_GPL(gpio_export_link);
+int gpio_export_link(struct device *dev, const char *name, unsigned gpio)
+{
+ return gpiod_export_link(dev, name, gpio_to_desc(gpio));
+}
+EXPORT_SYMBOL_GPL(gpio_export_link);
/**
* gpio_sysfs_set_active_low - set the polarity of gpio sysfs value
@@ -869,19 +891,16 @@ EXPORT_SYMBOL_GPL(gpio_export_link);
*
* Returns zero on success, else an error.
*/
-int gpio_sysfs_set_active_low(unsigned gpio, int value)
+static int gpiod_sysfs_set_active_low(struct gpio_desc *desc, int value)
{
- struct gpio_desc *desc;
struct device *dev = NULL;
int status = -EINVAL;
- if (!gpio_is_valid(gpio))
+ if (!desc)
goto done;
mutex_lock(&sysfs_lock);
- desc = &gpio_desc[gpio];
-
if (test_bit(FLAG_EXPORT, &desc->flags)) {
dev = class_find_device(&gpio_class, NULL, desc, match_export);
if (dev == NULL) {
@@ -897,10 +916,16 @@ unlock:
done:
if (status)
- pr_debug("%s: gpio%d status %d\n", __func__, gpio, status);
+ pr_debug("%s: gpio%d status %d\n", __func__, desc_to_gpio(desc),
+ status);
return status;
}
+
+int gpio_sysfs_set_active_low(unsigned gpio, int value)
+{
+ return gpiod_sysfs_set_active_low(gpio_to_desc(gpio), value);
+}
EXPORT_SYMBOL_GPL(gpio_sysfs_set_active_low);
/**
@@ -909,21 +934,18 @@ EXPORT_SYMBOL_GPL(gpio_sysfs_set_active_low);
*
* This is implicit on gpio_free().
*/
-void gpio_unexport(unsigned gpio)
+static void gpiod_unexport(struct gpio_desc *desc)
{
- struct gpio_desc *desc;
int status = 0;
struct device *dev = NULL;
- if (!gpio_is_valid(gpio)) {
+ if (!desc) {
status = -EINVAL;
goto done;
}
mutex_lock(&sysfs_lock);
- desc = &gpio_desc[gpio];
-
if (test_bit(FLAG_EXPORT, &desc->flags)) {
dev = class_find_device(&gpio_class, NULL, desc, match_export);
@@ -935,13 +957,20 @@ void gpio_unexport(unsigned gpio)
}
mutex_unlock(&sysfs_lock);
+
if (dev) {
device_unregister(dev);
put_device(dev);
}
done:
if (status)
- pr_debug("%s: gpio%d status %d\n", __func__, gpio, status);
+ pr_debug("%s: gpio%d status %d\n", __func__, desc_to_gpio(desc),
+ status);
+}
+
+void gpio_unexport(unsigned gpio)
+{
+ gpiod_unexport(gpio_to_desc(gpio));
}
EXPORT_SYMBOL_GPL(gpio_unexport);
@@ -975,9 +1004,9 @@ static int gpiochip_export(struct gpio_chip *chip)
unsigned gpio;
spin_lock_irqsave(&gpio_lock, flags);
- gpio = chip->base;
- while (gpio_desc[gpio].chip == chip)
- gpio_desc[gpio++].chip = NULL;
+ gpio = 0;
+ while (gpio < chip->ngpio)
+ chip->desc[gpio++].chip = NULL;
spin_unlock_irqrestore(&gpio_lock, flags);
pr_debug("%s: chip %s status %d\n", __func__,
@@ -1012,7 +1041,7 @@ static int __init gpiolib_sysfs_init(void)
{
int status;
unsigned long flags;
- unsigned gpio;
+ struct gpio_chip *chip;
status = class_register(&gpio_class);
if (status < 0)
@@ -1025,10 +1054,7 @@ static int __init gpiolib_sysfs_init(void)
* registered, and so arch_initcall() can always gpio_export().
*/
spin_lock_irqsave(&gpio_lock, flags);
- for (gpio = 0; gpio < ARCH_NR_GPIOS; gpio++) {
- struct gpio_chip *chip;
-
- chip = gpio_desc[gpio].chip;
+ list_for_each_entry(chip, &gpio_chips, list) {
if (!chip || chip->exported)
continue;
@@ -1053,8 +1079,66 @@ static inline void gpiochip_unexport(struct gpio_chip *chip)
{
}
+static inline int gpiod_export(struct gpio_desc *desc,
+ bool direction_may_change)
+{
+ return -ENOSYS;
+}
+
+static inline int gpiod_export_link(struct device *dev, const char *name,
+ struct gpio_desc *desc)
+{
+ return -ENOSYS;
+}
+
+static inline int gpiod_sysfs_set_active_low(struct gpio_desc *desc, int value)
+{
+ return -ENOSYS;
+}
+
+static inline void gpiod_unexport(struct gpio_desc *desc)
+{
+}
+
#endif /* CONFIG_GPIO_SYSFS */
+/*
+ * Add a new chip to the global chips list, keeping the list of chips sorted
+ * by base order.
+ *
+ * Return -EBUSY if the new chip overlaps with some other chip's integer
+ * space.
+ */
+static int gpiochip_add_to_list(struct gpio_chip *chip)
+{
+ struct list_head *pos = &gpio_chips;
+ struct gpio_chip *_chip;
+ int err = 0;
+
+ /* find where to insert our chip */
+ list_for_each(pos, &gpio_chips) {
+ _chip = list_entry(pos, struct gpio_chip, list);
+ /* shall we insert before _chip? */
+ if (_chip->base >= chip->base + chip->ngpio)
+ break;
+ }
+
+ /* are we stepping on the chip right before? */
+ if (pos != &gpio_chips && pos->prev != &gpio_chips) {
+ _chip = list_entry(pos->prev, struct gpio_chip, list);
+ if (_chip->base + _chip->ngpio > chip->base) {
+ dev_err(chip->dev,
+ "GPIO integer space overlap, cannot add chip\n");
+ err = -EBUSY;
+ }
+ }
+
+ if (!err)
+ list_add_tail(&chip->list, pos);
+
+ return err;
+}
+
/**
* gpiochip_add() - register a gpio_chip
* @chip: the chip to register, with chip->base initialized
@@ -1096,16 +1180,14 @@ int gpiochip_add(struct gpio_chip *chip)
chip->base = base;
}
- /* these GPIO numbers must not be managed by another gpio_chip */
- for (id = base; id < base + chip->ngpio; id++) {
- if (gpio_desc[id].chip != NULL) {
- status = -EBUSY;
- break;
- }
- }
+ status = gpiochip_add_to_list(chip);
+
if (status == 0) {
- for (id = base; id < base + chip->ngpio; id++) {
- gpio_desc[id].chip = chip;
+ chip->desc = &gpio_desc[chip->base];
+
+ for (id = 0; id < chip->ngpio; id++) {
+ struct gpio_desc *desc = &chip->desc[id];
+ desc->chip = chip;
/* REVISIT: most hardware initializes GPIOs as
* inputs (often with pullups enabled) so power
@@ -1114,7 +1196,7 @@ int gpiochip_add(struct gpio_chip *chip)
* and in case chip->get_direction is not set,
* we may expose the wrong direction in sysfs.
*/
- gpio_desc[id].flags = !chip->direction_input
+ desc->flags = !chip->direction_input
? (1 << FLAG_IS_OUT)
: 0;
}
@@ -1167,15 +1249,17 @@ int gpiochip_remove(struct gpio_chip *chip)
gpiochip_remove_pin_ranges(chip);
of_gpiochip_remove(chip);
- for (id = chip->base; id < chip->base + chip->ngpio; id++) {
- if (test_bit(FLAG_REQUESTED, &gpio_desc[id].flags)) {
+ for (id = 0; id < chip->ngpio; id++) {
+ if (test_bit(FLAG_REQUESTED, &chip->desc[id].flags)) {
status = -EBUSY;
break;
}
}
if (status == 0) {
- for (id = chip->base; id < chip->base + chip->ngpio; id++)
- gpio_desc[id].chip = NULL;
+ for (id = 0; id < chip->ngpio; id++)
+ chip->desc[id].chip = NULL;
+
+ list_del(&chip->list);
}
spin_unlock_irqrestore(&gpio_lock, flags);
@@ -1202,20 +1286,17 @@ struct gpio_chip *gpiochip_find(void *data,
int (*match)(struct gpio_chip *chip,
void *data))
{
- struct gpio_chip *chip = NULL;
+ struct gpio_chip *chip;
unsigned long flags;
- int i;
spin_lock_irqsave(&gpio_lock, flags);
- for (i = 0; i < ARCH_NR_GPIOS; i++) {
- if (!gpio_desc[i].chip)
- continue;
-
- if (match(gpio_desc[i].chip, data)) {
- chip = gpio_desc[i].chip;
+ list_for_each_entry(chip, &gpio_chips, list)
+ if (match(chip, data))
break;
- }
- }
+
+ /* No match? */
+ if (&chip->list == &gpio_chips)
+ chip = NULL;
spin_unlock_irqrestore(&gpio_lock, flags);
return chip;
@@ -1297,20 +1378,18 @@ EXPORT_SYMBOL_GPL(gpiochip_remove_pin_ranges);
* on each other, and help provide better diagnostics in debugfs.
* They're called even less than the "set direction" calls.
*/
-int gpio_request(unsigned gpio, const char *label)
+static int gpiod_request(struct gpio_desc *desc, const char *label)
{
- struct gpio_desc *desc;
struct gpio_chip *chip;
int status = -EPROBE_DEFER;
unsigned long flags;
spin_lock_irqsave(&gpio_lock, flags);
- if (!gpio_is_valid(gpio)) {
+ if (!desc) {
status = -EINVAL;
goto done;
}
- desc = &gpio_desc[gpio];
chip = desc->chip;
if (chip == NULL)
goto done;
@@ -1334,7 +1413,7 @@ int gpio_request(unsigned gpio, const char *label)
if (chip->request) {
/* chip->request may sleep */
spin_unlock_irqrestore(&gpio_lock, flags);
- status = chip->request(chip, gpio - chip->base);
+ status = chip->request(chip, gpio_chip_hwgpio(desc));
spin_lock_irqsave(&gpio_lock, flags);
if (status < 0) {
@@ -1347,42 +1426,46 @@ int gpio_request(unsigned gpio, const char *label)
if (chip->get_direction) {
/* chip->get_direction may sleep */
spin_unlock_irqrestore(&gpio_lock, flags);
- gpio_get_direction(gpio);
+ gpiod_get_direction(desc);
spin_lock_irqsave(&gpio_lock, flags);
}
done:
if (status)
- pr_debug("gpio_request: gpio-%d (%s) status %d\n",
- gpio, label ? : "?", status);
+ pr_debug("_gpio_request: gpio-%d (%s) status %d\n",
+ desc ? desc_to_gpio(desc) : -1,
+ label ? : "?", status);
spin_unlock_irqrestore(&gpio_lock, flags);
return status;
}
+
+int gpio_request(unsigned gpio, const char *label)
+{
+ return gpiod_request(gpio_to_desc(gpio), label);
+}
EXPORT_SYMBOL_GPL(gpio_request);
-void gpio_free(unsigned gpio)
+static void gpiod_free(struct gpio_desc *desc)
{
unsigned long flags;
- struct gpio_desc *desc;
struct gpio_chip *chip;
might_sleep();
- if (!gpio_is_valid(gpio)) {
+ if (!desc) {
WARN_ON(extra_checks);
return;
}
- gpio_unexport(gpio);
+ gpiod_unexport(desc);
spin_lock_irqsave(&gpio_lock, flags);
- desc = &gpio_desc[gpio];
chip = desc->chip;
if (chip && test_bit(FLAG_REQUESTED, &desc->flags)) {
if (chip->free) {
spin_unlock_irqrestore(&gpio_lock, flags);
might_sleep_if(chip->can_sleep);
- chip->free(chip, gpio - chip->base);
+ chip->free(chip, gpio_chip_hwgpio(desc));
spin_lock_irqsave(&gpio_lock, flags);
}
desc_set_label(desc, NULL);
@@ -1396,6 +1479,11 @@ void gpio_free(unsigned gpio)
spin_unlock_irqrestore(&gpio_lock, flags);
}
+
+void gpio_free(unsigned gpio)
+{
+ gpiod_free(gpio_to_desc(gpio));
+}
EXPORT_SYMBOL_GPL(gpio_free);
/**
@@ -1406,29 +1494,32 @@ EXPORT_SYMBOL_GPL(gpio_free);
*/
int gpio_request_one(unsigned gpio, unsigned long flags, const char *label)
{
+ struct gpio_desc *desc;
int err;
- err = gpio_request(gpio, label);
+ desc = gpio_to_desc(gpio);
+
+ err = gpiod_request(desc, label);
if (err)
return err;
if (flags & GPIOF_OPEN_DRAIN)
- set_bit(FLAG_OPEN_DRAIN, &gpio_desc[gpio].flags);
+ set_bit(FLAG_OPEN_DRAIN, &desc->flags);
if (flags & GPIOF_OPEN_SOURCE)
- set_bit(FLAG_OPEN_SOURCE, &gpio_desc[gpio].flags);
+ set_bit(FLAG_OPEN_SOURCE, &desc->flags);
if (flags & GPIOF_DIR_IN)
- err = gpio_direction_input(gpio);
+ err = gpiod_direction_input(desc);
else
- err = gpio_direction_output(gpio,
+ err = gpiod_direction_output(desc,
(flags & GPIOF_INIT_HIGH) ? 1 : 0);
if (err)
goto free_gpio;
if (flags & GPIOF_EXPORT) {
- err = gpio_export(gpio, flags & GPIOF_EXPORT_CHANGEABLE);
+ err = gpiod_export(desc, flags & GPIOF_EXPORT_CHANGEABLE);
if (err)
goto free_gpio;
}
@@ -1436,7 +1527,7 @@ int gpio_request_one(unsigned gpio, unsigned long flags, const char *label)
return 0;
free_gpio:
- gpio_free(gpio);
+ gpiod_free(desc);
return err;
}
EXPORT_SYMBOL_GPL(gpio_request_one);
@@ -1491,14 +1582,17 @@ EXPORT_SYMBOL_GPL(gpio_free_array);
*/
const char *gpiochip_is_requested(struct gpio_chip *chip, unsigned offset)
{
- unsigned gpio = chip->base + offset;
+ struct gpio_desc *desc;
- if (!gpio_is_valid(gpio) || gpio_desc[gpio].chip != chip)
+ if (!GPIO_OFFSET_VALID(chip, offset))
return NULL;
- if (test_bit(FLAG_REQUESTED, &gpio_desc[gpio].flags) == 0)
+
+ desc = &chip->desc[offset];
+
+ if (test_bit(FLAG_REQUESTED, &desc->flags) == 0)
return NULL;
#ifdef CONFIG_DEBUG_FS
- return gpio_desc[gpio].label;
+ return desc->label;
#else
return "?";
#endif
@@ -1515,24 +1609,21 @@ EXPORT_SYMBOL_GPL(gpiochip_is_requested);
* rely on gpio_request() having been called beforehand.
*/
-int gpio_direction_input(unsigned gpio)
+static int gpiod_direction_input(struct gpio_desc *desc)
{
unsigned long flags;
struct gpio_chip *chip;
- struct gpio_desc *desc = &gpio_desc[gpio];
int status = -EINVAL;
+ int offset;
spin_lock_irqsave(&gpio_lock, flags);
- if (!gpio_is_valid(gpio))
+ if (!desc)
goto fail;
chip = desc->chip;
if (!chip || !chip->get || !chip->direction_input)
goto fail;
- gpio -= chip->base;
- if (gpio >= chip->ngpio)
- goto fail;
- status = gpio_ensure_requested(desc, gpio);
+ status = gpio_ensure_requested(desc);
if (status < 0)
goto fail;
@@ -1542,11 +1633,12 @@ int gpio_direction_input(unsigned gpio)
might_sleep_if(chip->can_sleep);
+ offset = gpio_chip_hwgpio(desc);
if (status) {
- status = chip->request(chip, gpio);
+ status = chip->request(chip, offset);
if (status < 0) {
pr_debug("GPIO-%d: chip request fail, %d\n",
- chip->base + gpio, status);
+ desc_to_gpio(desc), status);
/* and it's not available to anyone else ...
* gpio_request() is the fully clean solution.
*/
@@ -1554,48 +1646,54 @@ int gpio_direction_input(unsigned gpio)
}
}
- status = chip->direction_input(chip, gpio);
+ status = chip->direction_input(chip, offset);
if (status == 0)
clear_bit(FLAG_IS_OUT, &desc->flags);
- trace_gpio_direction(chip->base + gpio, 1, status);
+ trace_gpio_direction(desc_to_gpio(desc), 1, status);
lose:
return status;
fail:
spin_unlock_irqrestore(&gpio_lock, flags);
- if (status)
+ if (status) {
+ int gpio = -1;
+ if (desc)
+ gpio = desc_to_gpio(desc);
pr_debug("%s: gpio-%d status %d\n",
__func__, gpio, status);
+ }
return status;
}
+
+int gpio_direction_input(unsigned gpio)
+{
+ return gpiod_direction_input(gpio_to_desc(gpio));
+}
EXPORT_SYMBOL_GPL(gpio_direction_input);
-int gpio_direction_output(unsigned gpio, int value)
+static int gpiod_direction_output(struct gpio_desc *desc, int value)
{
unsigned long flags;
struct gpio_chip *chip;
- struct gpio_desc *desc = &gpio_desc[gpio];
int status = -EINVAL;
+ int offset;
/* Open drain pin should not be driven to 1 */
if (value && test_bit(FLAG_OPEN_DRAIN, &desc->flags))
- return gpio_direction_input(gpio);
+ return gpiod_direction_input(desc);
/* Open source pin should not be driven to 0 */
if (!value && test_bit(FLAG_OPEN_SOURCE, &desc->flags))
- return gpio_direction_input(gpio);
+ return gpiod_direction_input(desc);
spin_lock_irqsave(&gpio_lock, flags);
- if (!gpio_is_valid(gpio))
+ if (!desc)
goto fail;
chip = desc->chip;
if (!chip || !chip->set || !chip->direction_output)
goto fail;
- gpio -= chip->base;
- if (gpio >= chip->ngpio)
- goto fail;
- status = gpio_ensure_requested(desc, gpio);
+ status = gpio_ensure_requested(desc);
if (status < 0)
goto fail;
@@ -1605,11 +1703,12 @@ int gpio_direction_output(unsigned gpio, int value)
might_sleep_if(chip->can_sleep);
+ offset = gpio_chip_hwgpio(desc);
if (status) {
- status = chip->request(chip, gpio);
+ status = chip->request(chip, offset);
if (status < 0) {
pr_debug("GPIO-%d: chip request fail, %d\n",
- chip->base + gpio, status);
+ desc_to_gpio(desc), status);
/* and it's not available to anyone else ...
* gpio_request() is the fully clean solution.
*/
@@ -1617,20 +1716,29 @@ int gpio_direction_output(unsigned gpio, int value)
}
}
- status = chip->direction_output(chip, gpio, value);
+ status = chip->direction_output(chip, offset, value);
if (status == 0)
set_bit(FLAG_IS_OUT, &desc->flags);
- trace_gpio_value(chip->base + gpio, 0, value);
- trace_gpio_direction(chip->base + gpio, 0, status);
+ trace_gpio_value(desc_to_gpio(desc), 0, value);
+ trace_gpio_direction(desc_to_gpio(desc), 0, status);
lose:
return status;
fail:
spin_unlock_irqrestore(&gpio_lock, flags);
- if (status)
+ if (status) {
+ int gpio = -1;
+ if (desc)
+ gpio = desc_to_gpio(desc);
pr_debug("%s: gpio-%d status %d\n",
__func__, gpio, status);
+ }
return status;
}
+
+int gpio_direction_output(unsigned gpio, int value)
+{
+ return gpiod_direction_output(gpio_to_desc(gpio), value);
+}
EXPORT_SYMBOL_GPL(gpio_direction_output);
/**
@@ -1638,24 +1746,22 @@ EXPORT_SYMBOL_GPL(gpio_direction_output);
* @gpio: the gpio to set debounce time
* @debounce: debounce time is microseconds
*/
-int gpio_set_debounce(unsigned gpio, unsigned debounce)
+static int gpiod_set_debounce(struct gpio_desc *desc, unsigned debounce)
{
unsigned long flags;
struct gpio_chip *chip;
- struct gpio_desc *desc = &gpio_desc[gpio];
int status = -EINVAL;
+ int offset;
spin_lock_irqsave(&gpio_lock, flags);
- if (!gpio_is_valid(gpio))
+ if (!desc)
goto fail;
chip = desc->chip;
if (!chip || !chip->set || !chip->set_debounce)
goto fail;
- gpio -= chip->base;
- if (gpio >= chip->ngpio)
- goto fail;
- status = gpio_ensure_requested(desc, gpio);
+
+ status = gpio_ensure_requested(desc);
if (status < 0)
goto fail;
@@ -1665,16 +1771,26 @@ int gpio_set_debounce(unsigned gpio, unsigned debounce)
might_sleep_if(chip->can_sleep);
- return chip->set_debounce(chip, gpio, debounce);
+ offset = gpio_chip_hwgpio(desc);
+ return chip->set_debounce(chip, offset, debounce);
fail:
spin_unlock_irqrestore(&gpio_lock, flags);
- if (status)
+ if (status) {
+ int gpio = -1;
+ if (desc)
+ gpio = desc_to_gpio(desc);
pr_debug("%s: gpio-%d status %d\n",
__func__, gpio, status);
+ }
return status;
}
+
+int gpio_set_debounce(unsigned gpio, unsigned debounce)
+{
+ return gpiod_set_debounce(gpio_to_desc(gpio), debounce);
+}
EXPORT_SYMBOL_GPL(gpio_set_debounce);
/* I/O calls are only valid after configuration completed; the relevant
@@ -1708,18 +1824,25 @@ EXPORT_SYMBOL_GPL(gpio_set_debounce);
* It returns the zero or nonzero value provided by the associated
* gpio_chip.get() method; or zero if no such method is provided.
*/
-int __gpio_get_value(unsigned gpio)
+static int gpiod_get_value(struct gpio_desc *desc)
{
struct gpio_chip *chip;
int value;
+ int offset;
- chip = gpio_to_chip(gpio);
+ chip = desc->chip;
+ offset = gpio_chip_hwgpio(desc);
/* Should be using gpio_get_value_cansleep() */
WARN_ON(chip->can_sleep);
- value = chip->get ? chip->get(chip, gpio - chip->base) : 0;
- trace_gpio_value(gpio, 1, value);
+ value = chip->get ? chip->get(chip, offset) : 0;
+ trace_gpio_value(desc_to_gpio(desc), 1, value);
return value;
}
+
+int __gpio_get_value(unsigned gpio)
+{
+ return gpiod_get_value(gpio_to_desc(gpio));
+}
EXPORT_SYMBOL_GPL(__gpio_get_value);
/*
@@ -1728,23 +1851,25 @@ EXPORT_SYMBOL_GPL(__gpio_get_value);
* @chip: Gpio chip.
* @value: Non-zero for setting it HIGH otherise it will set to LOW.
*/
-static void _gpio_set_open_drain_value(unsigned gpio,
- struct gpio_chip *chip, int value)
+static void _gpio_set_open_drain_value(struct gpio_desc *desc, int value)
{
int err = 0;
+ struct gpio_chip *chip = desc->chip;
+ int offset = gpio_chip_hwgpio(desc);
+
if (value) {
- err = chip->direction_input(chip, gpio - chip->base);
+ err = chip->direction_input(chip, offset);
if (!err)
- clear_bit(FLAG_IS_OUT, &gpio_desc[gpio].flags);
+ clear_bit(FLAG_IS_OUT, &desc->flags);
} else {
- err = chip->direction_output(chip, gpio - chip->base, 0);
+ err = chip->direction_output(chip, offset, 0);
if (!err)
- set_bit(FLAG_IS_OUT, &gpio_desc[gpio].flags);
+ set_bit(FLAG_IS_OUT, &desc->flags);
}
- trace_gpio_direction(gpio, value, err);
+ trace_gpio_direction(desc_to_gpio(desc), value, err);
if (err < 0)
pr_err("%s: Error in set_value for open drain gpio%d err %d\n",
- __func__, gpio, err);
+ __func__, desc_to_gpio(desc), err);
}
/*
@@ -1753,26 +1878,27 @@ static void _gpio_set_open_drain_value(unsigned gpio,
* @chip: Gpio chip.
* @value: Non-zero for setting it HIGH otherise it will set to LOW.
*/
-static void _gpio_set_open_source_value(unsigned gpio,
- struct gpio_chip *chip, int value)
+static void _gpio_set_open_source_value(struct gpio_desc *desc, int value)
{
int err = 0;
+ struct gpio_chip *chip = desc->chip;
+ int offset = gpio_chip_hwgpio(desc);
+
if (value) {
- err = chip->direction_output(chip, gpio - chip->base, 1);
+ err = chip->direction_output(chip, offset, 1);
if (!err)
- set_bit(FLAG_IS_OUT, &gpio_desc[gpio].flags);
+ set_bit(FLAG_IS_OUT, &desc->flags);
} else {
- err = chip->direction_input(chip, gpio - chip->base);
+ err = chip->direction_input(chip, offset);
if (!err)
- clear_bit(FLAG_IS_OUT, &gpio_desc[gpio].flags);
+ clear_bit(FLAG_IS_OUT, &desc->flags);
}
- trace_gpio_direction(gpio, !value, err);
+ trace_gpio_direction(desc_to_gpio(desc), !value, err);
if (err < 0)
pr_err("%s: Error in set_value for open source gpio%d err %d\n",
- __func__, gpio, err);
+ __func__, desc_to_gpio(desc), err);
}
-
/**
* __gpio_set_value() - assign a gpio's value
* @gpio: gpio whose value will be assigned
@@ -1782,20 +1908,25 @@ static void _gpio_set_open_source_value(unsigned gpio,
* This is used directly or indirectly to implement gpio_set_value().
* It invokes the associated gpio_chip.set() method.
*/
-void __gpio_set_value(unsigned gpio, int value)
+static void gpiod_set_value(struct gpio_desc *desc, int value)
{
struct gpio_chip *chip;
- chip = gpio_to_chip(gpio);
+ chip = desc->chip;
/* Should be using gpio_set_value_cansleep() */
WARN_ON(chip->can_sleep);
- trace_gpio_value(gpio, 0, value);
- if (test_bit(FLAG_OPEN_DRAIN, &gpio_desc[gpio].flags))
- _gpio_set_open_drain_value(gpio, chip, value);
- else if (test_bit(FLAG_OPEN_SOURCE, &gpio_desc[gpio].flags))
- _gpio_set_open_source_value(gpio, chip, value);
+ trace_gpio_value(desc_to_gpio(desc), 0, value);
+ if (test_bit(FLAG_OPEN_DRAIN, &desc->flags))
+ _gpio_set_open_drain_value(desc, value);
+ else if (test_bit(FLAG_OPEN_SOURCE, &desc->flags))
+ _gpio_set_open_source_value(desc, value);
else
- chip->set(chip, gpio - chip->base, value);
+ chip->set(chip, gpio_chip_hwgpio(desc), value);
+}
+
+void __gpio_set_value(unsigned gpio, int value)
+{
+ return gpiod_set_value(gpio_to_desc(gpio), value);
}
EXPORT_SYMBOL_GPL(__gpio_set_value);
@@ -1807,14 +1938,15 @@ EXPORT_SYMBOL_GPL(__gpio_set_value);
* This is used directly or indirectly to implement gpio_cansleep(). It
* returns nonzero if access reading or writing the GPIO value can sleep.
*/
-int __gpio_cansleep(unsigned gpio)
+static int gpiod_cansleep(struct gpio_desc *desc)
{
- struct gpio_chip *chip;
-
/* only call this on GPIOs that are valid! */
- chip = gpio_to_chip(gpio);
+ return desc->chip->can_sleep;
+}
- return chip->can_sleep;
+int __gpio_cansleep(unsigned gpio)
+{
+ return gpiod_cansleep(gpio_to_desc(gpio));
}
EXPORT_SYMBOL_GPL(__gpio_cansleep);
@@ -1827,50 +1959,67 @@ EXPORT_SYMBOL_GPL(__gpio_cansleep);
* It returns the number of the IRQ signaled by this (input) GPIO,
* or a negative errno.
*/
-int __gpio_to_irq(unsigned gpio)
+static int gpiod_to_irq(struct gpio_desc *desc)
{
struct gpio_chip *chip;
+ int offset;
- chip = gpio_to_chip(gpio);
- return chip->to_irq ? chip->to_irq(chip, gpio - chip->base) : -ENXIO;
+ chip = desc->chip;
+ offset = gpio_chip_hwgpio(desc);
+ return chip->to_irq ? chip->to_irq(chip, offset) : -ENXIO;
}
-EXPORT_SYMBOL_GPL(__gpio_to_irq);
+int __gpio_to_irq(unsigned gpio)
+{
+ return gpiod_to_irq(gpio_to_desc(gpio));
+}
+EXPORT_SYMBOL_GPL(__gpio_to_irq);
/* There's no value in making it easy to inline GPIO calls that may sleep.
* Common examples include ones connected to I2C or SPI chips.
*/
-int gpio_get_value_cansleep(unsigned gpio)
+static int gpiod_get_value_cansleep(struct gpio_desc *desc)
{
struct gpio_chip *chip;
int value;
+ int offset;
might_sleep_if(extra_checks);
- chip = gpio_to_chip(gpio);
- value = chip->get ? chip->get(chip, gpio - chip->base) : 0;
- trace_gpio_value(gpio, 1, value);
+ chip = desc->chip;
+ offset = gpio_chip_hwgpio(desc);
+ value = chip->get ? chip->get(chip, offset) : 0;
+ trace_gpio_value(desc_to_gpio(desc), 1, value);
return value;
}
+
+int gpio_get_value_cansleep(unsigned gpio)
+{
+ return gpiod_get_value_cansleep(gpio_to_desc(gpio));
+}
EXPORT_SYMBOL_GPL(gpio_get_value_cansleep);
-void gpio_set_value_cansleep(unsigned gpio, int value)
+static void gpiod_set_value_cansleep(struct gpio_desc *desc, int value)
{
struct gpio_chip *chip;
might_sleep_if(extra_checks);
- chip = gpio_to_chip(gpio);
- trace_gpio_value(gpio, 0, value);
- if (test_bit(FLAG_OPEN_DRAIN, &gpio_desc[gpio].flags))
- _gpio_set_open_drain_value(gpio, chip, value);
- else if (test_bit(FLAG_OPEN_SOURCE, &gpio_desc[gpio].flags))
- _gpio_set_open_source_value(gpio, chip, value);
+ chip = desc->chip;
+ trace_gpio_value(desc_to_gpio(desc), 0, value);
+ if (test_bit(FLAG_OPEN_DRAIN, &desc->flags))
+ _gpio_set_open_drain_value(desc, value);
+ else if (test_bit(FLAG_OPEN_SOURCE, &desc->flags))
+ _gpio_set_open_source_value(desc, value);
else
- chip->set(chip, gpio - chip->base, value);
+ chip->set(chip, gpio_chip_hwgpio(desc), value);
}
-EXPORT_SYMBOL_GPL(gpio_set_value_cansleep);
+void gpio_set_value_cansleep(unsigned gpio, int value)
+{
+ return gpiod_set_value_cansleep(gpio_to_desc(gpio), value);
+}
+EXPORT_SYMBOL_GPL(gpio_set_value_cansleep);
#ifdef CONFIG_DEBUG_FS
@@ -1878,14 +2027,14 @@ static void gpiolib_dbg_show(struct seq_file *s, struct gpio_chip *chip)
{
unsigned i;
unsigned gpio = chip->base;
- struct gpio_desc *gdesc = &gpio_desc[gpio];
+ struct gpio_desc *gdesc = &chip->desc[0];
int is_out;
for (i = 0; i < chip->ngpio; i++, gpio++, gdesc++) {
if (!test_bit(FLAG_REQUESTED, &gdesc->flags))
continue;
- gpio_get_direction(gpio);
+ gpiod_get_direction(gdesc);
is_out = test_bit(FLAG_IS_OUT, &gdesc->flags);
seq_printf(s, " gpio-%-3d (%-20.20s) %s %s",
gpio, gdesc->label,
@@ -1899,46 +2048,35 @@ static void gpiolib_dbg_show(struct seq_file *s, struct gpio_chip *chip)
static void *gpiolib_seq_start(struct seq_file *s, loff_t *pos)
{
+ unsigned long flags;
struct gpio_chip *chip = NULL;
- unsigned int gpio;
- void *ret = NULL;
- loff_t index = 0;
-
- /* REVISIT this isn't locked against gpio_chip removal ... */
+ loff_t index = *pos;
- for (gpio = 0; gpio_is_valid(gpio); gpio++) {
- if (gpio_desc[gpio].chip == chip)
- continue;
-
- chip = gpio_desc[gpio].chip;
- if (!chip)
- continue;
+ s->private = "";
- if (index++ >= *pos) {
- ret = chip;
- break;
+ spin_lock_irqsave(&gpio_lock, flags);
+ list_for_each_entry(chip, &gpio_chips, list)
+ if (index-- == 0) {
+ spin_unlock_irqrestore(&gpio_lock, flags);
+ return chip;
}
- }
-
- s->private = "";
+ spin_unlock_irqrestore(&gpio_lock, flags);
- return ret;
+ return NULL;
}
static void *gpiolib_seq_next(struct seq_file *s, void *v, loff_t *pos)
{
+ unsigned long flags;
struct gpio_chip *chip = v;
- unsigned int gpio;
void *ret = NULL;
- /* skip GPIOs provided by the current chip */
- for (gpio = chip->base + chip->ngpio; gpio_is_valid(gpio); gpio++) {
- chip = gpio_desc[gpio].chip;
- if (chip) {
- ret = chip;
- break;
- }
- }
+ spin_lock_irqsave(&gpio_lock, flags);
+ if (list_is_last(&chip->list, &gpio_chips))
+ ret = NULL;
+ else
+ ret = list_entry(chip->list.next, struct gpio_chip, list);
+ spin_unlock_irqrestore(&gpio_lock, flags);
s->private = "\n";
++*pos;
diff --git a/drivers/gpu/Makefile b/drivers/gpu/Makefile
index cc9277885dd0..30879df3daea 100644
--- a/drivers/gpu/Makefile
+++ b/drivers/gpu/Makefile
@@ -1 +1 @@
-obj-y += drm/ vga/ stub/
+obj-y += drm/ vga/
diff --git a/drivers/gpu/drm/Kconfig b/drivers/gpu/drm/Kconfig
index 983201b450f1..1e82882da9de 100644
--- a/drivers/gpu/drm/Kconfig
+++ b/drivers/gpu/drm/Kconfig
@@ -7,6 +7,7 @@
menuconfig DRM
tristate "Direct Rendering Manager (XFree86 4.1.0 and higher DRI support)"
depends on (AGP || AGP=n) && !EMULATED_CMPXCHG && MMU
+ select HDMI
select I2C
select I2C_ALGOBIT
select DMA_SHARED_BUFFER
@@ -69,6 +70,8 @@ config DRM_KMS_CMA_HELPER
help
Choose this if you need the KMS CMA helper functions
+source "drivers/gpu/drm/i2c/Kconfig"
+
config DRM_TDFX
tristate "3dfx Banshee/Voodoo3+"
depends on DRM && PCI
@@ -96,6 +99,7 @@ config DRM_RADEON
select DRM_TTM
select POWER_SUPPLY
select HWMON
+ select BACKLIGHT_CLASS_DEVICE
help
Choose this option if you have an ATI Radeon graphics card. There
are both PCI and AGP versions. You don't need to choose this to
@@ -212,3 +216,7 @@ source "drivers/gpu/drm/cirrus/Kconfig"
source "drivers/gpu/drm/shmobile/Kconfig"
source "drivers/gpu/drm/tegra/Kconfig"
+
+source "drivers/gpu/drm/omapdrm/Kconfig"
+
+source "drivers/gpu/drm/tilcdc/Kconfig"
diff --git a/drivers/gpu/drm/Makefile b/drivers/gpu/drm/Makefile
index 6f58c81cfcbc..0d59b24f8d23 100644
--- a/drivers/gpu/drm/Makefile
+++ b/drivers/gpu/drm/Makefile
@@ -50,4 +50,6 @@ obj-$(CONFIG_DRM_UDL) += udl/
obj-$(CONFIG_DRM_AST) += ast/
obj-$(CONFIG_DRM_SHMOBILE) +=shmobile/
obj-$(CONFIG_DRM_TEGRA) += tegra/
+obj-$(CONFIG_DRM_OMAP) += omapdrm/
+obj-$(CONFIG_DRM_TILCDC) += tilcdc/
obj-y += i2c/
diff --git a/drivers/gpu/drm/ast/ast_drv.c b/drivers/gpu/drm/ast/ast_drv.c
index 2d2c2f8d6dc6..df0d0a08097a 100644
--- a/drivers/gpu/drm/ast/ast_drv.c
+++ b/drivers/gpu/drm/ast/ast_drv.c
@@ -94,9 +94,9 @@ static int ast_drm_thaw(struct drm_device *dev)
ast_post_gpu(dev);
drm_mode_config_reset(dev);
- mutex_lock(&dev->mode_config.mutex);
+ drm_modeset_lock_all(dev);
drm_helper_resume_force_mode(dev);
- mutex_unlock(&dev->mode_config.mutex);
+ drm_modeset_unlock_all(dev);
console_lock();
ast_fbdev_set_suspend(dev, 0);
diff --git a/drivers/gpu/drm/ast/ast_drv.h b/drivers/gpu/drm/ast/ast_drv.h
index 5ccf984f063a..528429252f0f 100644
--- a/drivers/gpu/drm/ast/ast_drv.h
+++ b/drivers/gpu/drm/ast/ast_drv.h
@@ -98,6 +98,8 @@ struct ast_private {
struct drm_gem_object *cursor_cache;
uint64_t cursor_cache_gpu_addr;
+ /* Acces to this cache is protected by the crtc->mutex of the only crtc
+ * we have. */
struct ttm_bo_kmap_obj cache_kmap;
int next_cursor;
};
diff --git a/drivers/gpu/drm/ast/ast_fb.c b/drivers/gpu/drm/ast/ast_fb.c
index d9ec77959dff..34931fe7d2c5 100644
--- a/drivers/gpu/drm/ast/ast_fb.c
+++ b/drivers/gpu/drm/ast/ast_fb.c
@@ -40,6 +40,7 @@
#include <drm/drmP.h>
#include <drm/drm_crtc.h>
#include <drm/drm_fb_helper.h>
+#include <drm/drm_crtc_helper.h>
#include "ast_drv.h"
static void ast_dirty_update(struct ast_fbdev *afbdev,
@@ -145,9 +146,10 @@ static int astfb_create_object(struct ast_fbdev *afbdev,
return ret;
}
-static int astfb_create(struct ast_fbdev *afbdev,
+static int astfb_create(struct drm_fb_helper *helper,
struct drm_fb_helper_surface_size *sizes)
{
+ struct ast_fbdev *afbdev = (struct ast_fbdev *)helper;
struct drm_device *dev = afbdev->helper.dev;
struct drm_mode_fb_cmd2 mode_cmd;
struct drm_framebuffer *fb;
@@ -248,26 +250,10 @@ static void ast_fb_gamma_get(struct drm_crtc *crtc, u16 *red, u16 *green,
*blue = ast_crtc->lut_b[regno] << 8;
}
-static int ast_find_or_create_single(struct drm_fb_helper *helper,
- struct drm_fb_helper_surface_size *sizes)
-{
- struct ast_fbdev *afbdev = (struct ast_fbdev *)helper;
- int new_fb = 0;
- int ret;
-
- if (!helper->fb) {
- ret = astfb_create(afbdev, sizes);
- if (ret)
- return ret;
- new_fb = 1;
- }
- return new_fb;
-}
-
static struct drm_fb_helper_funcs ast_fb_helper_funcs = {
.gamma_set = ast_fb_gamma_set,
.gamma_get = ast_fb_gamma_get,
- .fb_probe = ast_find_or_create_single,
+ .fb_probe = astfb_create,
};
static void ast_fbdev_destroy(struct drm_device *dev,
@@ -290,6 +276,7 @@ static void ast_fbdev_destroy(struct drm_device *dev,
drm_fb_helper_fini(&afbdev->helper);
vfree(afbdev->sysram);
+ drm_framebuffer_unregister_private(&afb->base);
drm_framebuffer_cleanup(&afb->base);
}
@@ -313,6 +300,10 @@ int ast_fbdev_init(struct drm_device *dev)
}
drm_fb_helper_single_add_all_connectors(&afbdev->helper);
+
+ /* disable all the possible outputs/crtcs before entering KMS mode */
+ drm_helper_disable_unused_functions(dev);
+
drm_fb_helper_initial_config(&afbdev->helper, 32);
return 0;
}
diff --git a/drivers/gpu/drm/ast/ast_main.c b/drivers/gpu/drm/ast/ast_main.c
index f668e6cc0f7a..f60fd7bd1183 100644
--- a/drivers/gpu/drm/ast/ast_main.c
+++ b/drivers/gpu/drm/ast/ast_main.c
@@ -246,16 +246,8 @@ static void ast_user_framebuffer_destroy(struct drm_framebuffer *fb)
kfree(fb);
}
-static int ast_user_framebuffer_create_handle(struct drm_framebuffer *fb,
- struct drm_file *file,
- unsigned int *handle)
-{
- return -EINVAL;
-}
-
static const struct drm_framebuffer_funcs ast_fb_funcs = {
.destroy = ast_user_framebuffer_destroy,
- .create_handle = ast_user_framebuffer_create_handle,
};
@@ -266,13 +258,13 @@ int ast_framebuffer_init(struct drm_device *dev,
{
int ret;
+ drm_helper_mode_fill_fb_struct(&ast_fb->base, mode_cmd);
+ ast_fb->obj = obj;
ret = drm_framebuffer_init(dev, &ast_fb->base, &ast_fb_funcs);
if (ret) {
DRM_ERROR("framebuffer init failed %d\n", ret);
return ret;
}
- drm_helper_mode_fill_fb_struct(&ast_fb->base, mode_cmd);
- ast_fb->obj = obj;
return 0;
}
diff --git a/drivers/gpu/drm/cirrus/cirrus_fbdev.c b/drivers/gpu/drm/cirrus/cirrus_fbdev.c
index 6c6b4c87d309..e25afccaf85b 100644
--- a/drivers/gpu/drm/cirrus/cirrus_fbdev.c
+++ b/drivers/gpu/drm/cirrus/cirrus_fbdev.c
@@ -11,6 +11,7 @@
#include <linux/module.h>
#include <drm/drmP.h>
#include <drm/drm_fb_helper.h>
+#include <drm/drm_crtc_helper.h>
#include <linux/fb.h>
@@ -120,9 +121,10 @@ static int cirrusfb_create_object(struct cirrus_fbdev *afbdev,
return ret;
}
-static int cirrusfb_create(struct cirrus_fbdev *gfbdev,
+static int cirrusfb_create(struct drm_fb_helper *helper,
struct drm_fb_helper_surface_size *sizes)
{
+ struct cirrus_fbdev *gfbdev = (struct cirrus_fbdev *)helper;
struct drm_device *dev = gfbdev->helper.dev;
struct cirrus_device *cdev = gfbdev->helper.dev->dev_private;
struct fb_info *info;
@@ -219,23 +221,6 @@ out_iounmap:
return ret;
}
-static int cirrus_fb_find_or_create_single(struct drm_fb_helper *helper,
- struct drm_fb_helper_surface_size
- *sizes)
-{
- struct cirrus_fbdev *gfbdev = (struct cirrus_fbdev *)helper;
- int new_fb = 0;
- int ret;
-
- if (!helper->fb) {
- ret = cirrusfb_create(gfbdev, sizes);
- if (ret)
- return ret;
- new_fb = 1;
- }
- return new_fb;
-}
-
static int cirrus_fbdev_destroy(struct drm_device *dev,
struct cirrus_fbdev *gfbdev)
{
@@ -258,6 +243,7 @@ static int cirrus_fbdev_destroy(struct drm_device *dev,
vfree(gfbdev->sysram);
drm_fb_helper_fini(&gfbdev->helper);
+ drm_framebuffer_unregister_private(&gfb->base);
drm_framebuffer_cleanup(&gfb->base);
return 0;
@@ -266,7 +252,7 @@ static int cirrus_fbdev_destroy(struct drm_device *dev,
static struct drm_fb_helper_funcs cirrus_fb_helper_funcs = {
.gamma_set = cirrus_crtc_fb_gamma_set,
.gamma_get = cirrus_crtc_fb_gamma_get,
- .fb_probe = cirrus_fb_find_or_create_single,
+ .fb_probe = cirrusfb_create,
};
int cirrus_fbdev_init(struct cirrus_device *cdev)
@@ -290,6 +276,9 @@ int cirrus_fbdev_init(struct cirrus_device *cdev)
return ret;
}
drm_fb_helper_single_add_all_connectors(&gfbdev->helper);
+
+ /* disable all the possible outputs/crtcs before entering KMS mode */
+ drm_helper_disable_unused_functions(cdev->dev);
drm_fb_helper_initial_config(&gfbdev->helper, bpp_sel);
return 0;
diff --git a/drivers/gpu/drm/cirrus/cirrus_main.c b/drivers/gpu/drm/cirrus/cirrus_main.c
index 6a9b12e88d46..35cbae827771 100644
--- a/drivers/gpu/drm/cirrus/cirrus_main.c
+++ b/drivers/gpu/drm/cirrus/cirrus_main.c
@@ -23,16 +23,8 @@ static void cirrus_user_framebuffer_destroy(struct drm_framebuffer *fb)
kfree(fb);
}
-static int cirrus_user_framebuffer_create_handle(struct drm_framebuffer *fb,
- struct drm_file *file_priv,
- unsigned int *handle)
-{
- return 0;
-}
-
static const struct drm_framebuffer_funcs cirrus_fb_funcs = {
.destroy = cirrus_user_framebuffer_destroy,
- .create_handle = cirrus_user_framebuffer_create_handle,
};
int cirrus_framebuffer_init(struct drm_device *dev,
@@ -42,13 +34,13 @@ int cirrus_framebuffer_init(struct drm_device *dev,
{
int ret;
+ drm_helper_mode_fill_fb_struct(&gfb->base, mode_cmd);
+ gfb->obj = obj;
ret = drm_framebuffer_init(dev, &gfb->base, &cirrus_fb_funcs);
if (ret) {
DRM_ERROR("drm_framebuffer_init failed: %d\n", ret);
return ret;
}
- drm_helper_mode_fill_fb_struct(&gfb->base, mode_cmd);
- gfb->obj = obj;
return 0;
}
diff --git a/drivers/gpu/drm/drm_context.c b/drivers/gpu/drm/drm_context.c
index 45adf97e678f..725968d38976 100644
--- a/drivers/gpu/drm/drm_context.c
+++ b/drivers/gpu/drm/drm_context.c
@@ -74,24 +74,13 @@ void drm_ctxbitmap_free(struct drm_device * dev, int ctx_handle)
*/
static int drm_ctxbitmap_next(struct drm_device * dev)
{
- int new_id;
int ret;
-again:
- if (idr_pre_get(&dev->ctx_idr, GFP_KERNEL) == 0) {
- DRM_ERROR("Out of memory expanding drawable idr\n");
- return -ENOMEM;
- }
mutex_lock(&dev->struct_mutex);
- ret = idr_get_new_above(&dev->ctx_idr, NULL,
- DRM_RESERVED_CONTEXTS, &new_id);
+ ret = idr_alloc(&dev->ctx_idr, NULL, DRM_RESERVED_CONTEXTS, 0,
+ GFP_KERNEL);
mutex_unlock(&dev->struct_mutex);
- if (ret == -EAGAIN)
- goto again;
- else if (ret)
- return ret;
-
- return new_id;
+ return ret;
}
/**
@@ -118,7 +107,7 @@ int drm_ctxbitmap_init(struct drm_device * dev)
void drm_ctxbitmap_cleanup(struct drm_device * dev)
{
mutex_lock(&dev->struct_mutex);
- idr_remove_all(&dev->ctx_idr);
+ idr_destroy(&dev->ctx_idr);
mutex_unlock(&dev->struct_mutex);
}
diff --git a/drivers/gpu/drm/drm_crtc.c b/drivers/gpu/drm/drm_crtc.c
index f2d667b8bee2..792c3e3795ca 100644
--- a/drivers/gpu/drm/drm_crtc.c
+++ b/drivers/gpu/drm/drm_crtc.c
@@ -37,6 +37,54 @@
#include <drm/drm_edid.h>
#include <drm/drm_fourcc.h>
+/**
+ * drm_modeset_lock_all - take all modeset locks
+ * @dev: drm device
+ *
+ * This function takes all modeset locks, suitable where a more fine-grained
+ * scheme isn't (yet) implemented.
+ */
+void drm_modeset_lock_all(struct drm_device *dev)
+{
+ struct drm_crtc *crtc;
+
+ mutex_lock(&dev->mode_config.mutex);
+
+ list_for_each_entry(crtc, &dev->mode_config.crtc_list, head)
+ mutex_lock_nest_lock(&crtc->mutex, &dev->mode_config.mutex);
+}
+EXPORT_SYMBOL(drm_modeset_lock_all);
+
+/**
+ * drm_modeset_unlock_all - drop all modeset locks
+ * @dev: device
+ */
+void drm_modeset_unlock_all(struct drm_device *dev)
+{
+ struct drm_crtc *crtc;
+
+ list_for_each_entry(crtc, &dev->mode_config.crtc_list, head)
+ mutex_unlock(&crtc->mutex);
+
+ mutex_unlock(&dev->mode_config.mutex);
+}
+EXPORT_SYMBOL(drm_modeset_unlock_all);
+
+/**
+ * drm_warn_on_modeset_not_all_locked - check that all modeset locks are locked
+ * @dev: device
+ */
+void drm_warn_on_modeset_not_all_locked(struct drm_device *dev)
+{
+ struct drm_crtc *crtc;
+
+ list_for_each_entry(crtc, &dev->mode_config.crtc_list, head)
+ WARN_ON(!mutex_is_locked(&crtc->mutex));
+
+ WARN_ON(!mutex_is_locked(&dev->mode_config.mutex));
+}
+EXPORT_SYMBOL(drm_warn_on_modeset_not_all_locked);
+
/* Avoid boilerplate. I'm tired of typing. */
#define DRM_ENUM_NAME_FN(fnname, list) \
char *fnname(int val) \
@@ -203,12 +251,10 @@ char *drm_get_connector_status_name(enum drm_connector_status status)
}
/**
- * drm_mode_object_get - allocate a new identifier
+ * drm_mode_object_get - allocate a new modeset identifier
* @dev: DRM device
- * @ptr: object pointer, used to generate unique ID
- * @type: object type
- *
- * LOCKING:
+ * @obj: object pointer, used to generate unique ID
+ * @obj_type: object type
*
* Create a unique identifier based on @ptr in @dev's identifier space. Used
* for tracking modes, CRTCs and connectors.
@@ -220,35 +266,27 @@ char *drm_get_connector_status_name(enum drm_connector_status status)
static int drm_mode_object_get(struct drm_device *dev,
struct drm_mode_object *obj, uint32_t obj_type)
{
- int new_id = 0;
int ret;
-again:
- if (idr_pre_get(&dev->mode_config.crtc_idr, GFP_KERNEL) == 0) {
- DRM_ERROR("Ran out memory getting a mode number\n");
- return -ENOMEM;
- }
-
mutex_lock(&dev->mode_config.idr_mutex);
- ret = idr_get_new_above(&dev->mode_config.crtc_idr, obj, 1, &new_id);
+ ret = idr_alloc(&dev->mode_config.crtc_idr, obj, 1, 0, GFP_KERNEL);
+ if (ret >= 0) {
+ /*
+ * Set up the object linking under the protection of the idr
+ * lock so that other users can't see inconsistent state.
+ */
+ obj->id = ret;
+ obj->type = obj_type;
+ }
mutex_unlock(&dev->mode_config.idr_mutex);
- if (ret == -EAGAIN)
- goto again;
- else if (ret)
- return ret;
- obj->id = new_id;
- obj->type = obj_type;
- return 0;
+ return ret < 0 ? ret : 0;
}
/**
- * drm_mode_object_put - free an identifer
+ * drm_mode_object_put - free a modeset identifer
* @dev: DRM device
- * @id: ID to free
- *
- * LOCKING:
- * Caller must hold DRM mode_config lock.
+ * @object: object to free
*
* Free @id from @dev's unique identifier pool.
*/
@@ -260,11 +298,24 @@ static void drm_mode_object_put(struct drm_device *dev,
mutex_unlock(&dev->mode_config.idr_mutex);
}
+/**
+ * drm_mode_object_find - look up a drm object with static lifetime
+ * @dev: drm device
+ * @id: id of the mode object
+ * @type: type of the mode object
+ *
+ * Note that framebuffers cannot be looked up with this functions - since those
+ * are reference counted, they need special treatment.
+ */
struct drm_mode_object *drm_mode_object_find(struct drm_device *dev,
uint32_t id, uint32_t type)
{
struct drm_mode_object *obj = NULL;
+ /* Framebuffers are reference counted and need their own lookup
+ * function.*/
+ WARN_ON(type == DRM_MODE_OBJECT_FB);
+
mutex_lock(&dev->mode_config.idr_mutex);
obj = idr_find(&dev->mode_config.crtc_idr, id);
if (!obj || (obj->type != type) || (obj->id != id))
@@ -278,13 +329,18 @@ EXPORT_SYMBOL(drm_mode_object_find);
/**
* drm_framebuffer_init - initialize a framebuffer
* @dev: DRM device
- *
- * LOCKING:
- * Caller must hold mode config lock.
+ * @fb: framebuffer to be initialized
+ * @funcs: ... with these functions
*
* Allocates an ID for the framebuffer's parent mode object, sets its mode
* functions & device file and adds it to the master fd list.
*
+ * IMPORTANT:
+ * This functions publishes the fb and makes it available for concurrent access
+ * by other users. Which means by this point the fb _must_ be fully set up -
+ * since all the fb attributes are invariant over its lifetime, no further
+ * locking but only correct reference counting is required.
+ *
* RETURNS:
* Zero on success, error code on failure.
*/
@@ -293,16 +349,23 @@ int drm_framebuffer_init(struct drm_device *dev, struct drm_framebuffer *fb,
{
int ret;
+ mutex_lock(&dev->mode_config.fb_lock);
kref_init(&fb->refcount);
+ INIT_LIST_HEAD(&fb->filp_head);
+ fb->dev = dev;
+ fb->funcs = funcs;
ret = drm_mode_object_get(dev, &fb->base, DRM_MODE_OBJECT_FB);
if (ret)
- return ret;
+ goto out;
+
+ /* Grab the idr reference. */
+ drm_framebuffer_reference(fb);
- fb->dev = dev;
- fb->funcs = funcs;
dev->mode_config.num_fb++;
list_add(&fb->head, &dev->mode_config.fb_list);
+out:
+ mutex_unlock(&dev->mode_config.fb_lock);
return 0;
}
@@ -315,23 +378,63 @@ static void drm_framebuffer_free(struct kref *kref)
fb->funcs->destroy(fb);
}
+static struct drm_framebuffer *__drm_framebuffer_lookup(struct drm_device *dev,
+ uint32_t id)
+{
+ struct drm_mode_object *obj = NULL;
+ struct drm_framebuffer *fb;
+
+ mutex_lock(&dev->mode_config.idr_mutex);
+ obj = idr_find(&dev->mode_config.crtc_idr, id);
+ if (!obj || (obj->type != DRM_MODE_OBJECT_FB) || (obj->id != id))
+ fb = NULL;
+ else
+ fb = obj_to_fb(obj);
+ mutex_unlock(&dev->mode_config.idr_mutex);
+
+ return fb;
+}
+
+/**
+ * drm_framebuffer_lookup - look up a drm framebuffer and grab a reference
+ * @dev: drm device
+ * @id: id of the fb object
+ *
+ * If successful, this grabs an additional reference to the framebuffer -
+ * callers need to make sure to eventually unreference the returned framebuffer
+ * again.
+ */
+struct drm_framebuffer *drm_framebuffer_lookup(struct drm_device *dev,
+ uint32_t id)
+{
+ struct drm_framebuffer *fb;
+
+ mutex_lock(&dev->mode_config.fb_lock);
+ fb = __drm_framebuffer_lookup(dev, id);
+ if (fb)
+ kref_get(&fb->refcount);
+ mutex_unlock(&dev->mode_config.fb_lock);
+
+ return fb;
+}
+EXPORT_SYMBOL(drm_framebuffer_lookup);
+
/**
* drm_framebuffer_unreference - unref a framebuffer
+ * @fb: framebuffer to unref
*
- * LOCKING:
- * Caller must hold mode config lock.
+ * This functions decrements the fb's refcount and frees it if it drops to zero.
*/
void drm_framebuffer_unreference(struct drm_framebuffer *fb)
{
- struct drm_device *dev = fb->dev;
DRM_DEBUG("FB ID: %d\n", fb->base.id);
- WARN_ON(!mutex_is_locked(&dev->mode_config.mutex));
kref_put(&fb->refcount, drm_framebuffer_free);
}
EXPORT_SYMBOL(drm_framebuffer_unreference);
/**
* drm_framebuffer_reference - incr the fb refcnt
+ * @fb: framebuffer
*/
void drm_framebuffer_reference(struct drm_framebuffer *fb)
{
@@ -340,29 +443,74 @@ void drm_framebuffer_reference(struct drm_framebuffer *fb)
}
EXPORT_SYMBOL(drm_framebuffer_reference);
+static void drm_framebuffer_free_bug(struct kref *kref)
+{
+ BUG();
+}
+
+static void __drm_framebuffer_unreference(struct drm_framebuffer *fb)
+{
+ DRM_DEBUG("FB ID: %d\n", fb->base.id);
+ kref_put(&fb->refcount, drm_framebuffer_free_bug);
+}
+
+/* dev->mode_config.fb_lock must be held! */
+static void __drm_framebuffer_unregister(struct drm_device *dev,
+ struct drm_framebuffer *fb)
+{
+ mutex_lock(&dev->mode_config.idr_mutex);
+ idr_remove(&dev->mode_config.crtc_idr, fb->base.id);
+ mutex_unlock(&dev->mode_config.idr_mutex);
+
+ fb->base.id = 0;
+
+ __drm_framebuffer_unreference(fb);
+}
+
+/**
+ * drm_framebuffer_unregister_private - unregister a private fb from the lookup idr
+ * @fb: fb to unregister
+ *
+ * Drivers need to call this when cleaning up driver-private framebuffers, e.g.
+ * those used for fbdev. Note that the caller must hold a reference of it's own,
+ * i.e. the object may not be destroyed through this call (since it'll lead to a
+ * locking inversion).
+ */
+void drm_framebuffer_unregister_private(struct drm_framebuffer *fb)
+{
+ struct drm_device *dev = fb->dev;
+
+ mutex_lock(&dev->mode_config.fb_lock);
+ /* Mark fb as reaped and drop idr ref. */
+ __drm_framebuffer_unregister(dev, fb);
+ mutex_unlock(&dev->mode_config.fb_lock);
+}
+EXPORT_SYMBOL(drm_framebuffer_unregister_private);
+
/**
* drm_framebuffer_cleanup - remove a framebuffer object
* @fb: framebuffer to remove
*
- * LOCKING:
- * Caller must hold mode config lock.
+ * Cleanup references to a user-created framebuffer. This function is intended
+ * to be used from the drivers ->destroy callback.
*
- * Scans all the CRTCs in @dev's mode_config. If they're using @fb, removes
- * it, setting it to NULL.
+ * Note that this function does not remove the fb from active usuage - if it is
+ * still used anywhere, hilarity can ensue since userspace could call getfb on
+ * the id and get back -EINVAL. Obviously no concern at driver unload time.
+ *
+ * Also, the framebuffer will not be removed from the lookup idr - for
+ * user-created framebuffers this will happen in in the rmfb ioctl. For
+ * driver-private objects (e.g. for fbdev) drivers need to explicitly call
+ * drm_framebuffer_unregister_private.
*/
void drm_framebuffer_cleanup(struct drm_framebuffer *fb)
{
struct drm_device *dev = fb->dev;
- /*
- * This could be moved to drm_framebuffer_remove(), but for
- * debugging is nice to keep around the list of fb's that are
- * no longer associated w/ a drm_file but are not unreferenced
- * yet. (i915 and omapdrm have debugfs files which will show
- * this.)
- */
- drm_mode_object_put(dev, &fb->base);
+
+ mutex_lock(&dev->mode_config.fb_lock);
list_del(&fb->head);
dev->mode_config.num_fb--;
+ mutex_unlock(&dev->mode_config.fb_lock);
}
EXPORT_SYMBOL(drm_framebuffer_cleanup);
@@ -370,11 +518,13 @@ EXPORT_SYMBOL(drm_framebuffer_cleanup);
* drm_framebuffer_remove - remove and unreference a framebuffer object
* @fb: framebuffer to remove
*
- * LOCKING:
- * Caller must hold mode config lock.
- *
* Scans all the CRTCs and planes in @dev's mode_config. If they're
- * using @fb, removes it, setting it to NULL.
+ * using @fb, removes it, setting it to NULL. Then drops the reference to the
+ * passed-in framebuffer. Might take the modeset locks.
+ *
+ * Note that this function optimizes the cleanup away if the caller holds the
+ * last reference to the framebuffer. It is also guaranteed to not take the
+ * modeset locks in this case.
*/
void drm_framebuffer_remove(struct drm_framebuffer *fb)
{
@@ -384,33 +534,53 @@ void drm_framebuffer_remove(struct drm_framebuffer *fb)
struct drm_mode_set set;
int ret;
- /* remove from any CRTC */
- list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
- if (crtc->fb == fb) {
- /* should turn off the crtc */
- memset(&set, 0, sizeof(struct drm_mode_set));
- set.crtc = crtc;
- set.fb = NULL;
- ret = crtc->funcs->set_config(&set);
- if (ret)
- DRM_ERROR("failed to reset crtc %p when fb was deleted\n", crtc);
+ WARN_ON(!list_empty(&fb->filp_head));
+
+ /*
+ * drm ABI mandates that we remove any deleted framebuffers from active
+ * useage. But since most sane clients only remove framebuffers they no
+ * longer need, try to optimize this away.
+ *
+ * Since we're holding a reference ourselves, observing a refcount of 1
+ * means that we're the last holder and can skip it. Also, the refcount
+ * can never increase from 1 again, so we don't need any barriers or
+ * locks.
+ *
+ * Note that userspace could try to race with use and instate a new
+ * usage _after_ we've cleared all current ones. End result will be an
+ * in-use fb with fb-id == 0. Userspace is allowed to shoot its own foot
+ * in this manner.
+ */
+ if (atomic_read(&fb->refcount.refcount) > 1) {
+ drm_modeset_lock_all(dev);
+ /* remove from any CRTC */
+ list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
+ if (crtc->fb == fb) {
+ /* should turn off the crtc */
+ memset(&set, 0, sizeof(struct drm_mode_set));
+ set.crtc = crtc;
+ set.fb = NULL;
+ ret = drm_mode_set_config_internal(&set);
+ if (ret)
+ DRM_ERROR("failed to reset crtc %p when fb was deleted\n", crtc);
+ }
}
- }
- list_for_each_entry(plane, &dev->mode_config.plane_list, head) {
- if (plane->fb == fb) {
- /* should turn off the crtc */
- ret = plane->funcs->disable_plane(plane);
- if (ret)
- DRM_ERROR("failed to disable plane with busy fb\n");
- /* disconnect the plane from the fb and crtc: */
- plane->fb = NULL;
- plane->crtc = NULL;
+ list_for_each_entry(plane, &dev->mode_config.plane_list, head) {
+ if (plane->fb == fb) {
+ /* should turn off the crtc */
+ ret = plane->funcs->disable_plane(plane);
+ if (ret)
+ DRM_ERROR("failed to disable plane with busy fb\n");
+ /* disconnect the plane from the fb and crtc: */
+ __drm_framebuffer_unreference(plane->fb);
+ plane->fb = NULL;
+ plane->crtc = NULL;
+ }
}
+ drm_modeset_unlock_all(dev);
}
- list_del(&fb->filp_head);
-
drm_framebuffer_unreference(fb);
}
EXPORT_SYMBOL(drm_framebuffer_remove);
@@ -421,9 +591,6 @@ EXPORT_SYMBOL(drm_framebuffer_remove);
* @crtc: CRTC object to init
* @funcs: callbacks for the new CRTC
*
- * LOCKING:
- * Takes mode_config lock.
- *
* Inits a new object created as base part of an driver crtc object.
*
* RETURNS:
@@ -438,7 +605,9 @@ int drm_crtc_init(struct drm_device *dev, struct drm_crtc *crtc,
crtc->funcs = funcs;
crtc->invert_dimensions = false;
- mutex_lock(&dev->mode_config.mutex);
+ drm_modeset_lock_all(dev);
+ mutex_init(&crtc->mutex);
+ mutex_lock_nest_lock(&crtc->mutex, &dev->mode_config.mutex);
ret = drm_mode_object_get(dev, &crtc->base, DRM_MODE_OBJECT_CRTC);
if (ret)
@@ -450,7 +619,7 @@ int drm_crtc_init(struct drm_device *dev, struct drm_crtc *crtc,
dev->mode_config.num_crtc++;
out:
- mutex_unlock(&dev->mode_config.mutex);
+ drm_modeset_unlock_all(dev);
return ret;
}
@@ -460,9 +629,6 @@ EXPORT_SYMBOL(drm_crtc_init);
* drm_crtc_cleanup - Cleans up the core crtc usage.
* @crtc: CRTC to cleanup
*
- * LOCKING:
- * Caller must hold mode config lock.
- *
* Cleanup @crtc. Removes from drm modesetting space
* does NOT free object, caller does that.
*/
@@ -484,9 +650,6 @@ EXPORT_SYMBOL(drm_crtc_cleanup);
* @connector: connector the new mode
* @mode: mode data
*
- * LOCKING:
- * Caller must hold mode config lock.
- *
* Add @mode to @connector's mode list for later use.
*/
void drm_mode_probed_add(struct drm_connector *connector,
@@ -501,9 +664,6 @@ EXPORT_SYMBOL(drm_mode_probed_add);
* @connector: connector list to modify
* @mode: mode to remove
*
- * LOCKING:
- * Caller must hold mode config lock.
- *
* Remove @mode from @connector's mode list, then free it.
*/
void drm_mode_remove(struct drm_connector *connector,
@@ -519,10 +679,7 @@ EXPORT_SYMBOL(drm_mode_remove);
* @dev: DRM device
* @connector: the connector to init
* @funcs: callbacks for this connector
- * @name: user visible name of the connector
- *
- * LOCKING:
- * Takes mode config lock.
+ * @connector_type: user visible type of the connector
*
* Initialises a preallocated connector. Connectors should be
* subclassed as part of driver connector objects.
@@ -537,7 +694,7 @@ int drm_connector_init(struct drm_device *dev,
{
int ret;
- mutex_lock(&dev->mode_config.mutex);
+ drm_modeset_lock_all(dev);
ret = drm_mode_object_get(dev, &connector->base, DRM_MODE_OBJECT_CONNECTOR);
if (ret)
@@ -567,7 +724,7 @@ int drm_connector_init(struct drm_device *dev,
dev->mode_config.dpms_property, 0);
out:
- mutex_unlock(&dev->mode_config.mutex);
+ drm_modeset_unlock_all(dev);
return ret;
}
@@ -577,9 +734,6 @@ EXPORT_SYMBOL(drm_connector_init);
* drm_connector_cleanup - cleans up an initialised connector
* @connector: connector to cleanup
*
- * LOCKING:
- * Takes mode config lock.
- *
* Cleans up the connector but doesn't free the object.
*/
void drm_connector_cleanup(struct drm_connector *connector)
@@ -596,11 +750,9 @@ void drm_connector_cleanup(struct drm_connector *connector)
list_for_each_entry_safe(mode, t, &connector->user_modes, head)
drm_mode_remove(connector, mode);
- mutex_lock(&dev->mode_config.mutex);
drm_mode_object_put(dev, &connector->base);
list_del(&connector->head);
dev->mode_config.num_connector--;
- mutex_unlock(&dev->mode_config.mutex);
}
EXPORT_SYMBOL(drm_connector_cleanup);
@@ -622,7 +774,7 @@ int drm_encoder_init(struct drm_device *dev,
{
int ret;
- mutex_lock(&dev->mode_config.mutex);
+ drm_modeset_lock_all(dev);
ret = drm_mode_object_get(dev, &encoder->base, DRM_MODE_OBJECT_ENCODER);
if (ret)
@@ -636,7 +788,7 @@ int drm_encoder_init(struct drm_device *dev,
dev->mode_config.num_encoder++;
out:
- mutex_unlock(&dev->mode_config.mutex);
+ drm_modeset_unlock_all(dev);
return ret;
}
@@ -645,11 +797,11 @@ EXPORT_SYMBOL(drm_encoder_init);
void drm_encoder_cleanup(struct drm_encoder *encoder)
{
struct drm_device *dev = encoder->dev;
- mutex_lock(&dev->mode_config.mutex);
+ drm_modeset_lock_all(dev);
drm_mode_object_put(dev, &encoder->base);
list_del(&encoder->head);
dev->mode_config.num_encoder--;
- mutex_unlock(&dev->mode_config.mutex);
+ drm_modeset_unlock_all(dev);
}
EXPORT_SYMBOL(drm_encoder_cleanup);
@@ -661,7 +813,7 @@ int drm_plane_init(struct drm_device *dev, struct drm_plane *plane,
{
int ret;
- mutex_lock(&dev->mode_config.mutex);
+ drm_modeset_lock_all(dev);
ret = drm_mode_object_get(dev, &plane->base, DRM_MODE_OBJECT_PLANE);
if (ret)
@@ -695,7 +847,7 @@ int drm_plane_init(struct drm_device *dev, struct drm_plane *plane,
}
out:
- mutex_unlock(&dev->mode_config.mutex);
+ drm_modeset_unlock_all(dev);
return ret;
}
@@ -705,7 +857,7 @@ void drm_plane_cleanup(struct drm_plane *plane)
{
struct drm_device *dev = plane->dev;
- mutex_lock(&dev->mode_config.mutex);
+ drm_modeset_lock_all(dev);
kfree(plane->format_types);
drm_mode_object_put(dev, &plane->base);
/* if not added to a list, it must be a private plane */
@@ -713,7 +865,7 @@ void drm_plane_cleanup(struct drm_plane *plane)
list_del(&plane->head);
dev->mode_config.num_plane--;
}
- mutex_unlock(&dev->mode_config.mutex);
+ drm_modeset_unlock_all(dev);
}
EXPORT_SYMBOL(drm_plane_cleanup);
@@ -721,9 +873,6 @@ EXPORT_SYMBOL(drm_plane_cleanup);
* drm_mode_create - create a new display mode
* @dev: DRM device
*
- * LOCKING:
- * Caller must hold DRM mode_config lock.
- *
* Create a new drm_display_mode, give it an ID, and return it.
*
* RETURNS:
@@ -751,9 +900,6 @@ EXPORT_SYMBOL(drm_mode_create);
* @dev: DRM device
* @mode: mode to remove
*
- * LOCKING:
- * Caller must hold mode config lock.
- *
* Free @mode's unique identifier, then free it.
*/
void drm_mode_destroy(struct drm_device *dev, struct drm_display_mode *mode)
@@ -978,16 +1124,19 @@ EXPORT_SYMBOL(drm_mode_create_dirty_info_property);
* drm_mode_config_init - initialize DRM mode_configuration structure
* @dev: DRM device
*
- * LOCKING:
- * None, should happen single threaded at init time.
- *
* Initialize @dev's mode_config structure, used for tracking the graphics
* configuration of @dev.
+ *
+ * Since this initializes the modeset locks, no locking is possible. Which is no
+ * problem, since this should happen single threaded at init time. It is the
+ * driver's problem to ensure this guarantee.
+ *
*/
void drm_mode_config_init(struct drm_device *dev)
{
mutex_init(&dev->mode_config.mutex);
mutex_init(&dev->mode_config.idr_mutex);
+ mutex_init(&dev->mode_config.fb_lock);
INIT_LIST_HEAD(&dev->mode_config.fb_list);
INIT_LIST_HEAD(&dev->mode_config.crtc_list);
INIT_LIST_HEAD(&dev->mode_config.connector_list);
@@ -997,9 +1146,9 @@ void drm_mode_config_init(struct drm_device *dev)
INIT_LIST_HEAD(&dev->mode_config.plane_list);
idr_init(&dev->mode_config.crtc_idr);
- mutex_lock(&dev->mode_config.mutex);
+ drm_modeset_lock_all(dev);
drm_mode_create_standard_connector_properties(dev);
- mutex_unlock(&dev->mode_config.mutex);
+ drm_modeset_unlock_all(dev);
/* Just to be sure */
dev->mode_config.num_fb = 0;
@@ -1057,12 +1206,13 @@ EXPORT_SYMBOL(drm_mode_group_init_legacy_group);
* drm_mode_config_cleanup - free up DRM mode_config info
* @dev: DRM device
*
- * LOCKING:
- * Caller must hold mode config lock.
- *
* Free up all the connectors and CRTCs associated with this DRM device, then
* free up the framebuffers and associated buffer objects.
*
+ * Note that since this /should/ happen single-threaded at driver/device
+ * teardown time, no locking is required. It's the driver's job to ensure that
+ * this guarantee actually holds true.
+ *
* FIXME: cleanup any dangling user buffer objects too
*/
void drm_mode_config_cleanup(struct drm_device *dev)
@@ -1089,6 +1239,15 @@ void drm_mode_config_cleanup(struct drm_device *dev)
drm_property_destroy(dev, property);
}
+ /*
+ * Single-threaded teardown context, so it's not required to grab the
+ * fb_lock to protect against concurrent fb_list access. Contrary, it
+ * would actually deadlock with the drm_framebuffer_cleanup function.
+ *
+ * Also, if there are any framebuffers left, that's a driver leak now,
+ * so politely WARN about this.
+ */
+ WARN_ON(!list_empty(&dev->mode_config.fb_list));
list_for_each_entry_safe(fb, fbt, &dev->mode_config.fb_list, head) {
drm_framebuffer_remove(fb);
}
@@ -1102,7 +1261,6 @@ void drm_mode_config_cleanup(struct drm_device *dev)
crtc->funcs->destroy(crtc);
}
- idr_remove_all(&dev->mode_config.crtc_idr);
idr_destroy(&dev->mode_config.crtc_idr);
}
EXPORT_SYMBOL(drm_mode_config_cleanup);
@@ -1112,9 +1270,6 @@ EXPORT_SYMBOL(drm_mode_config_cleanup);
* @out: drm_mode_modeinfo struct to return to the user
* @in: drm_display_mode to use
*
- * LOCKING:
- * None.
- *
* Convert a drm_display_mode into a drm_mode_modeinfo structure to return to
* the user.
*/
@@ -1151,9 +1306,6 @@ static void drm_crtc_convert_to_umode(struct drm_mode_modeinfo *out,
* @out: drm_display_mode to return to the user
* @in: drm_mode_modeinfo to use
*
- * LOCKING:
- * None.
- *
* Convert a drm_mode_modeinfo into a drm_display_mode structure to return to
* the caller.
*
@@ -1188,13 +1340,9 @@ static int drm_crtc_convert_umode(struct drm_display_mode *out,
/**
* drm_mode_getresources - get graphics configuration
- * @inode: inode from the ioctl
- * @filp: file * from the ioctl
- * @cmd: cmd from ioctl
- * @arg: arg from ioctl
- *
- * LOCKING:
- * Takes mode config lock.
+ * @dev: drm device for the ioctl
+ * @data: data pointer for the ioctl
+ * @file_priv: drm file for the ioctl call
*
* Construct a set of configuration description structures and return
* them to the user, including CRTC, connector and framebuffer configuration.
@@ -1228,8 +1376,8 @@ int drm_mode_getresources(struct drm_device *dev, void *data,
if (!drm_core_check_feature(dev, DRIVER_MODESET))
return -EINVAL;
- mutex_lock(&dev->mode_config.mutex);
+ mutex_lock(&file_priv->fbs_lock);
/*
* For the non-control nodes we need to limit the list of resources
* by IDs in the group list for this node
@@ -1237,6 +1385,23 @@ int drm_mode_getresources(struct drm_device *dev, void *data,
list_for_each(lh, &file_priv->fbs)
fb_count++;
+ /* handle this in 4 parts */
+ /* FBs */
+ if (card_res->count_fbs >= fb_count) {
+ copied = 0;
+ fb_id = (uint32_t __user *)(unsigned long)card_res->fb_id_ptr;
+ list_for_each_entry(fb, &file_priv->fbs, filp_head) {
+ if (put_user(fb->base.id, fb_id + copied)) {
+ mutex_unlock(&file_priv->fbs_lock);
+ return -EFAULT;
+ }
+ copied++;
+ }
+ }
+ card_res->count_fbs = fb_count;
+ mutex_unlock(&file_priv->fbs_lock);
+
+ drm_modeset_lock_all(dev);
mode_group = &file_priv->master->minor->mode_group;
if (file_priv->master->minor->type == DRM_MINOR_CONTROL) {
@@ -1260,21 +1425,6 @@ int drm_mode_getresources(struct drm_device *dev, void *data,
card_res->max_width = dev->mode_config.max_width;
card_res->min_width = dev->mode_config.min_width;
- /* handle this in 4 parts */
- /* FBs */
- if (card_res->count_fbs >= fb_count) {
- copied = 0;
- fb_id = (uint32_t __user *)(unsigned long)card_res->fb_id_ptr;
- list_for_each_entry(fb, &file_priv->fbs, filp_head) {
- if (put_user(fb->base.id, fb_id + copied)) {
- ret = -EFAULT;
- goto out;
- }
- copied++;
- }
- }
- card_res->count_fbs = fb_count;
-
/* CRTCs */
if (card_res->count_crtcs >= crtc_count) {
copied = 0;
@@ -1370,19 +1520,15 @@ int drm_mode_getresources(struct drm_device *dev, void *data,
card_res->count_connectors, card_res->count_encoders);
out:
- mutex_unlock(&dev->mode_config.mutex);
+ drm_modeset_unlock_all(dev);
return ret;
}
/**
* drm_mode_getcrtc - get CRTC configuration
- * @inode: inode from the ioctl
- * @filp: file * from the ioctl
- * @cmd: cmd from ioctl
- * @arg: arg from ioctl
- *
- * LOCKING:
- * Takes mode config lock.
+ * @dev: drm device for the ioctl
+ * @data: data pointer for the ioctl
+ * @file_priv: drm file for the ioctl call
*
* Construct a CRTC configuration structure to return to the user.
*
@@ -1402,7 +1548,7 @@ int drm_mode_getcrtc(struct drm_device *dev,
if (!drm_core_check_feature(dev, DRIVER_MODESET))
return -EINVAL;
- mutex_lock(&dev->mode_config.mutex);
+ drm_modeset_lock_all(dev);
obj = drm_mode_object_find(dev, crtc_resp->crtc_id,
DRM_MODE_OBJECT_CRTC);
@@ -1430,19 +1576,15 @@ int drm_mode_getcrtc(struct drm_device *dev,
}
out:
- mutex_unlock(&dev->mode_config.mutex);
+ drm_modeset_unlock_all(dev);
return ret;
}
/**
* drm_mode_getconnector - get connector configuration
- * @inode: inode from the ioctl
- * @filp: file * from the ioctl
- * @cmd: cmd from ioctl
- * @arg: arg from ioctl
- *
- * LOCKING:
- * Takes mode config lock.
+ * @dev: drm device for the ioctl
+ * @data: data pointer for the ioctl
+ * @file_priv: drm file for the ioctl call
*
* Construct a connector configuration structure to return to the user.
*
@@ -1575,6 +1717,7 @@ int drm_mode_getconnector(struct drm_device *dev, void *data,
out:
mutex_unlock(&dev->mode_config.mutex);
+
return ret;
}
@@ -1589,7 +1732,7 @@ int drm_mode_getencoder(struct drm_device *dev, void *data,
if (!drm_core_check_feature(dev, DRIVER_MODESET))
return -EINVAL;
- mutex_lock(&dev->mode_config.mutex);
+ drm_modeset_lock_all(dev);
obj = drm_mode_object_find(dev, enc_resp->encoder_id,
DRM_MODE_OBJECT_ENCODER);
if (!obj) {
@@ -1608,7 +1751,7 @@ int drm_mode_getencoder(struct drm_device *dev, void *data,
enc_resp->possible_clones = encoder->possible_clones;
out:
- mutex_unlock(&dev->mode_config.mutex);
+ drm_modeset_unlock_all(dev);
return ret;
}
@@ -1618,9 +1761,6 @@ out:
* @data: ioctl data
* @file_priv: DRM file info
*
- * LOCKING:
- * Takes mode config lock.
- *
* Return an plane count and set of IDs.
*/
int drm_mode_getplane_res(struct drm_device *dev, void *data,
@@ -1635,7 +1775,7 @@ int drm_mode_getplane_res(struct drm_device *dev, void *data,
if (!drm_core_check_feature(dev, DRIVER_MODESET))
return -EINVAL;
- mutex_lock(&dev->mode_config.mutex);
+ drm_modeset_lock_all(dev);
config = &dev->mode_config;
/*
@@ -1657,7 +1797,7 @@ int drm_mode_getplane_res(struct drm_device *dev, void *data,
plane_resp->count_planes = config->num_plane;
out:
- mutex_unlock(&dev->mode_config.mutex);
+ drm_modeset_unlock_all(dev);
return ret;
}
@@ -1667,9 +1807,6 @@ out:
* @data: ioctl data
* @file_priv: DRM file info
*
- * LOCKING:
- * Takes mode config lock.
- *
* Return plane info, including formats supported, gamma size, any
* current fb, etc.
*/
@@ -1685,7 +1822,7 @@ int drm_mode_getplane(struct drm_device *dev, void *data,
if (!drm_core_check_feature(dev, DRIVER_MODESET))
return -EINVAL;
- mutex_lock(&dev->mode_config.mutex);
+ drm_modeset_lock_all(dev);
obj = drm_mode_object_find(dev, plane_resp->plane_id,
DRM_MODE_OBJECT_PLANE);
if (!obj) {
@@ -1725,7 +1862,7 @@ int drm_mode_getplane(struct drm_device *dev, void *data,
plane_resp->count_format_types = plane->format_count;
out:
- mutex_unlock(&dev->mode_config.mutex);
+ drm_modeset_unlock_all(dev);
return ret;
}
@@ -1733,10 +1870,7 @@ out:
* drm_mode_setplane - set up or tear down an plane
* @dev: DRM device
* @data: ioctl data*
- * @file_prive: DRM file info
- *
- * LOCKING:
- * Takes mode config lock.
+ * @file_priv: DRM file info
*
* Set plane info, including placement, fb, scaling, and other factors.
* Or pass a NULL fb to disable.
@@ -1748,7 +1882,7 @@ int drm_mode_setplane(struct drm_device *dev, void *data,
struct drm_mode_object *obj;
struct drm_plane *plane;
struct drm_crtc *crtc;
- struct drm_framebuffer *fb;
+ struct drm_framebuffer *fb = NULL, *old_fb = NULL;
int ret = 0;
unsigned int fb_width, fb_height;
int i;
@@ -1756,8 +1890,6 @@ int drm_mode_setplane(struct drm_device *dev, void *data,
if (!drm_core_check_feature(dev, DRIVER_MODESET))
return -EINVAL;
- mutex_lock(&dev->mode_config.mutex);
-
/*
* First, find the plane, crtc, and fb objects. If not available,
* we don't bother to call the driver.
@@ -1767,16 +1899,18 @@ int drm_mode_setplane(struct drm_device *dev, void *data,
if (!obj) {
DRM_DEBUG_KMS("Unknown plane ID %d\n",
plane_req->plane_id);
- ret = -ENOENT;
- goto out;
+ return -ENOENT;
}
plane = obj_to_plane(obj);
/* No fb means shut it down */
if (!plane_req->fb_id) {
+ drm_modeset_lock_all(dev);
+ old_fb = plane->fb;
plane->funcs->disable_plane(plane);
plane->crtc = NULL;
plane->fb = NULL;
+ drm_modeset_unlock_all(dev);
goto out;
}
@@ -1790,15 +1924,13 @@ int drm_mode_setplane(struct drm_device *dev, void *data,
}
crtc = obj_to_crtc(obj);
- obj = drm_mode_object_find(dev, plane_req->fb_id,
- DRM_MODE_OBJECT_FB);
- if (!obj) {
+ fb = drm_framebuffer_lookup(dev, plane_req->fb_id);
+ if (!fb) {
DRM_DEBUG_KMS("Unknown framebuffer ID %d\n",
plane_req->fb_id);
ret = -ENOENT;
goto out;
}
- fb = obj_to_fb(obj);
/* Check whether this plane supports the fb pixel format. */
for (i = 0; i < plane->format_count; i++)
@@ -1844,31 +1976,62 @@ int drm_mode_setplane(struct drm_device *dev, void *data,
goto out;
}
+ drm_modeset_lock_all(dev);
ret = plane->funcs->update_plane(plane, crtc, fb,
plane_req->crtc_x, plane_req->crtc_y,
plane_req->crtc_w, plane_req->crtc_h,
plane_req->src_x, plane_req->src_y,
plane_req->src_w, plane_req->src_h);
if (!ret) {
+ old_fb = plane->fb;
plane->crtc = crtc;
plane->fb = fb;
+ fb = NULL;
}
+ drm_modeset_unlock_all(dev);
out:
- mutex_unlock(&dev->mode_config.mutex);
+ if (fb)
+ drm_framebuffer_unreference(fb);
+ if (old_fb)
+ drm_framebuffer_unreference(old_fb);
return ret;
}
/**
- * drm_mode_setcrtc - set CRTC configuration
- * @inode: inode from the ioctl
- * @filp: file * from the ioctl
- * @cmd: cmd from ioctl
- * @arg: arg from ioctl
+ * drm_mode_set_config_internal - helper to call ->set_config
+ * @set: modeset config to set
*
- * LOCKING:
- * Takes mode config lock.
+ * This is a little helper to wrap internal calls to the ->set_config driver
+ * interface. The only thing it adds is correct refcounting dance.
+ */
+int drm_mode_set_config_internal(struct drm_mode_set *set)
+{
+ struct drm_crtc *crtc = set->crtc;
+ struct drm_framebuffer *fb, *old_fb;
+ int ret;
+
+ old_fb = crtc->fb;
+ fb = set->fb;
+
+ ret = crtc->funcs->set_config(set);
+ if (ret == 0) {
+ if (old_fb)
+ drm_framebuffer_unreference(old_fb);
+ if (fb)
+ drm_framebuffer_reference(fb);
+ }
+
+ return ret;
+}
+EXPORT_SYMBOL(drm_mode_set_config_internal);
+
+/**
+ * drm_mode_setcrtc - set CRTC configuration
+ * @dev: drm device for the ioctl
+ * @data: data pointer for the ioctl
+ * @file_priv: drm file for the ioctl call
*
* Build a new CRTC configuration based on user request.
*
@@ -1899,7 +2062,7 @@ int drm_mode_setcrtc(struct drm_device *dev, void *data,
if (crtc_req->x > INT_MAX || crtc_req->y > INT_MAX)
return -ERANGE;
- mutex_lock(&dev->mode_config.mutex);
+ drm_modeset_lock_all(dev);
obj = drm_mode_object_find(dev, crtc_req->crtc_id,
DRM_MODE_OBJECT_CRTC);
if (!obj) {
@@ -1921,16 +2084,16 @@ int drm_mode_setcrtc(struct drm_device *dev, void *data,
goto out;
}
fb = crtc->fb;
+ /* Make refcounting symmetric with the lookup path. */
+ drm_framebuffer_reference(fb);
} else {
- obj = drm_mode_object_find(dev, crtc_req->fb_id,
- DRM_MODE_OBJECT_FB);
- if (!obj) {
+ fb = drm_framebuffer_lookup(dev, crtc_req->fb_id);
+ if (!fb) {
DRM_DEBUG_KMS("Unknown FB ID%d\n",
crtc_req->fb_id);
ret = -EINVAL;
goto out;
}
- fb = obj_to_fb(obj);
}
mode = drm_mode_create(dev);
@@ -2027,12 +2190,15 @@ int drm_mode_setcrtc(struct drm_device *dev, void *data,
set.connectors = connector_set;
set.num_connectors = crtc_req->count_connectors;
set.fb = fb;
- ret = crtc->funcs->set_config(&set);
+ ret = drm_mode_set_config_internal(&set);
out:
+ if (fb)
+ drm_framebuffer_unreference(fb);
+
kfree(connector_set);
drm_mode_destroy(dev, mode);
- mutex_unlock(&dev->mode_config.mutex);
+ drm_modeset_unlock_all(dev);
return ret;
}
@@ -2050,15 +2216,14 @@ int drm_mode_cursor_ioctl(struct drm_device *dev,
if (!req->flags || (~DRM_MODE_CURSOR_FLAGS & req->flags))
return -EINVAL;
- mutex_lock(&dev->mode_config.mutex);
obj = drm_mode_object_find(dev, req->crtc_id, DRM_MODE_OBJECT_CRTC);
if (!obj) {
DRM_DEBUG_KMS("Unknown CRTC ID %d\n", req->crtc_id);
- ret = -EINVAL;
- goto out;
+ return -EINVAL;
}
crtc = obj_to_crtc(obj);
+ mutex_lock(&crtc->mutex);
if (req->flags & DRM_MODE_CURSOR_BO) {
if (!crtc->funcs->cursor_set) {
ret = -ENXIO;
@@ -2078,7 +2243,8 @@ int drm_mode_cursor_ioctl(struct drm_device *dev,
}
}
out:
- mutex_unlock(&dev->mode_config.mutex);
+ mutex_unlock(&crtc->mutex);
+
return ret;
}
@@ -2089,7 +2255,7 @@ uint32_t drm_mode_legacy_fb_format(uint32_t bpp, uint32_t depth)
switch (bpp) {
case 8:
- fmt = DRM_FORMAT_RGB332;
+ fmt = DRM_FORMAT_C8;
break;
case 16:
if (depth == 15)
@@ -2120,13 +2286,9 @@ EXPORT_SYMBOL(drm_mode_legacy_fb_format);
/**
* drm_mode_addfb - add an FB to the graphics configuration
- * @inode: inode from the ioctl
- * @filp: file * from the ioctl
- * @cmd: cmd from ioctl
- * @arg: arg from ioctl
- *
- * LOCKING:
- * Takes mode config lock.
+ * @dev: drm device for the ioctl
+ * @data: data pointer for the ioctl
+ * @file_priv: drm file for the ioctl call
*
* Add a new FB to the specified CRTC, given a user request.
*
@@ -2161,24 +2323,19 @@ int drm_mode_addfb(struct drm_device *dev,
if ((config->min_height > r.height) || (r.height > config->max_height))
return -EINVAL;
- mutex_lock(&dev->mode_config.mutex);
-
- /* TODO check buffer is sufficiently large */
- /* TODO setup destructor callback */
-
fb = dev->mode_config.funcs->fb_create(dev, file_priv, &r);
if (IS_ERR(fb)) {
DRM_DEBUG_KMS("could not create framebuffer\n");
- ret = PTR_ERR(fb);
- goto out;
+ drm_modeset_unlock_all(dev);
+ return PTR_ERR(fb);
}
+ mutex_lock(&file_priv->fbs_lock);
or->fb_id = fb->base.id;
list_add(&fb->filp_head, &file_priv->fbs);
DRM_DEBUG_KMS("[FB:%d]\n", fb->base.id);
+ mutex_unlock(&file_priv->fbs_lock);
-out:
- mutex_unlock(&dev->mode_config.mutex);
return ret;
}
@@ -2304,13 +2461,9 @@ static int framebuffer_check(const struct drm_mode_fb_cmd2 *r)
/**
* drm_mode_addfb2 - add an FB to the graphics configuration
- * @inode: inode from the ioctl
- * @filp: file * from the ioctl
- * @cmd: cmd from ioctl
- * @arg: arg from ioctl
- *
- * LOCKING:
- * Takes mode config lock.
+ * @dev: drm device for the ioctl
+ * @data: data pointer for the ioctl
+ * @file_priv: drm file for the ioctl call
*
* Add a new FB to the specified CRTC, given a user request with format.
*
@@ -2350,33 +2503,28 @@ int drm_mode_addfb2(struct drm_device *dev,
if (ret)
return ret;
- mutex_lock(&dev->mode_config.mutex);
-
fb = dev->mode_config.funcs->fb_create(dev, file_priv, r);
if (IS_ERR(fb)) {
DRM_DEBUG_KMS("could not create framebuffer\n");
- ret = PTR_ERR(fb);
- goto out;
+ drm_modeset_unlock_all(dev);
+ return PTR_ERR(fb);
}
+ mutex_lock(&file_priv->fbs_lock);
r->fb_id = fb->base.id;
list_add(&fb->filp_head, &file_priv->fbs);
DRM_DEBUG_KMS("[FB:%d]\n", fb->base.id);
+ mutex_unlock(&file_priv->fbs_lock);
+
-out:
- mutex_unlock(&dev->mode_config.mutex);
return ret;
}
/**
* drm_mode_rmfb - remove an FB from the configuration
- * @inode: inode from the ioctl
- * @filp: file * from the ioctl
- * @cmd: cmd from ioctl
- * @arg: arg from ioctl
- *
- * LOCKING:
- * Takes mode config lock.
+ * @dev: drm device for the ioctl
+ * @data: data pointer for the ioctl
+ * @file_priv: drm file for the ioctl call
*
* Remove the FB specified by the user.
*
@@ -2388,50 +2536,49 @@ out:
int drm_mode_rmfb(struct drm_device *dev,
void *data, struct drm_file *file_priv)
{
- struct drm_mode_object *obj;
struct drm_framebuffer *fb = NULL;
struct drm_framebuffer *fbl = NULL;
uint32_t *id = data;
- int ret = 0;
int found = 0;
if (!drm_core_check_feature(dev, DRIVER_MODESET))
return -EINVAL;
- mutex_lock(&dev->mode_config.mutex);
- obj = drm_mode_object_find(dev, *id, DRM_MODE_OBJECT_FB);
- /* TODO check that we really get a framebuffer back. */
- if (!obj) {
- ret = -EINVAL;
- goto out;
- }
- fb = obj_to_fb(obj);
+ mutex_lock(&file_priv->fbs_lock);
+ mutex_lock(&dev->mode_config.fb_lock);
+ fb = __drm_framebuffer_lookup(dev, *id);
+ if (!fb)
+ goto fail_lookup;
list_for_each_entry(fbl, &file_priv->fbs, filp_head)
if (fb == fbl)
found = 1;
+ if (!found)
+ goto fail_lookup;
- if (!found) {
- ret = -EINVAL;
- goto out;
- }
+ /* Mark fb as reaped, we still have a ref from fpriv->fbs. */
+ __drm_framebuffer_unregister(dev, fb);
+
+ list_del_init(&fb->filp_head);
+ mutex_unlock(&dev->mode_config.fb_lock);
+ mutex_unlock(&file_priv->fbs_lock);
drm_framebuffer_remove(fb);
-out:
- mutex_unlock(&dev->mode_config.mutex);
- return ret;
+ return 0;
+
+fail_lookup:
+ mutex_unlock(&dev->mode_config.fb_lock);
+ mutex_unlock(&file_priv->fbs_lock);
+
+ return -EINVAL;
}
/**
* drm_mode_getfb - get FB info
- * @inode: inode from the ioctl
- * @filp: file * from the ioctl
- * @cmd: cmd from ioctl
- * @arg: arg from ioctl
- *
- * LOCKING:
- * Takes mode config lock.
+ * @dev: drm device for the ioctl
+ * @data: data pointer for the ioctl
+ * @file_priv: drm file for the ioctl call
*
* Lookup the FB given its ID and return info about it.
*
@@ -2444,30 +2591,28 @@ int drm_mode_getfb(struct drm_device *dev,
void *data, struct drm_file *file_priv)
{
struct drm_mode_fb_cmd *r = data;
- struct drm_mode_object *obj;
struct drm_framebuffer *fb;
- int ret = 0;
+ int ret;
if (!drm_core_check_feature(dev, DRIVER_MODESET))
return -EINVAL;
- mutex_lock(&dev->mode_config.mutex);
- obj = drm_mode_object_find(dev, r->fb_id, DRM_MODE_OBJECT_FB);
- if (!obj) {
- ret = -EINVAL;
- goto out;
- }
- fb = obj_to_fb(obj);
+ fb = drm_framebuffer_lookup(dev, r->fb_id);
+ if (!fb)
+ return -EINVAL;
r->height = fb->height;
r->width = fb->width;
r->depth = fb->depth;
r->bpp = fb->bits_per_pixel;
r->pitch = fb->pitches[0];
- fb->funcs->create_handle(fb, file_priv, &r->handle);
+ if (fb->funcs->create_handle)
+ ret = fb->funcs->create_handle(fb, file_priv, &r->handle);
+ else
+ ret = -ENODEV;
+
+ drm_framebuffer_unreference(fb);
-out:
- mutex_unlock(&dev->mode_config.mutex);
return ret;
}
@@ -2477,7 +2622,6 @@ int drm_mode_dirtyfb_ioctl(struct drm_device *dev,
struct drm_clip_rect __user *clips_ptr;
struct drm_clip_rect *clips = NULL;
struct drm_mode_fb_dirty_cmd *r = data;
- struct drm_mode_object *obj;
struct drm_framebuffer *fb;
unsigned flags;
int num_clips;
@@ -2486,13 +2630,9 @@ int drm_mode_dirtyfb_ioctl(struct drm_device *dev,
if (!drm_core_check_feature(dev, DRIVER_MODESET))
return -EINVAL;
- mutex_lock(&dev->mode_config.mutex);
- obj = drm_mode_object_find(dev, r->fb_id, DRM_MODE_OBJECT_FB);
- if (!obj) {
- ret = -EINVAL;
- goto out_err1;
- }
- fb = obj_to_fb(obj);
+ fb = drm_framebuffer_lookup(dev, r->fb_id);
+ if (!fb)
+ return -EINVAL;
num_clips = r->num_clips;
clips_ptr = (struct drm_clip_rect __user *)(unsigned long)r->clips_ptr;
@@ -2530,27 +2670,26 @@ int drm_mode_dirtyfb_ioctl(struct drm_device *dev,
}
if (fb->funcs->dirty) {
+ drm_modeset_lock_all(dev);
ret = fb->funcs->dirty(fb, file_priv, flags, r->color,
clips, num_clips);
+ drm_modeset_unlock_all(dev);
} else {
ret = -ENOSYS;
- goto out_err2;
}
out_err2:
kfree(clips);
out_err1:
- mutex_unlock(&dev->mode_config.mutex);
+ drm_framebuffer_unreference(fb);
+
return ret;
}
/**
* drm_fb_release - remove and free the FBs on this file
- * @filp: file * from the ioctl
- *
- * LOCKING:
- * Takes mode config lock.
+ * @priv: drm file for the ioctl
*
* Destroy all the FBs associated with @filp.
*
@@ -2564,11 +2703,20 @@ void drm_fb_release(struct drm_file *priv)
struct drm_device *dev = priv->minor->dev;
struct drm_framebuffer *fb, *tfb;
- mutex_lock(&dev->mode_config.mutex);
+ mutex_lock(&priv->fbs_lock);
list_for_each_entry_safe(fb, tfb, &priv->fbs, filp_head) {
+
+ mutex_lock(&dev->mode_config.fb_lock);
+ /* Mark fb as reaped, we still have a ref from fpriv->fbs. */
+ __drm_framebuffer_unregister(dev, fb);
+ mutex_unlock(&dev->mode_config.fb_lock);
+
+ list_del_init(&fb->filp_head);
+
+ /* This will also drop the fpriv->fbs reference. */
drm_framebuffer_remove(fb);
}
- mutex_unlock(&dev->mode_config.mutex);
+ mutex_unlock(&priv->fbs_lock);
}
/**
@@ -2660,10 +2808,9 @@ EXPORT_SYMBOL(drm_mode_detachmode_crtc);
/**
* drm_fb_attachmode - Attach a user mode to an connector
- * @inode: inode from the ioctl
- * @filp: file * from the ioctl
- * @cmd: cmd from ioctl
- * @arg: arg from ioctl
+ * @dev: drm device for the ioctl
+ * @data: data pointer for the ioctl
+ * @file_priv: drm file for the ioctl call
*
* This attaches a user specified mode to an connector.
* Called by the user via ioctl.
@@ -2684,7 +2831,7 @@ int drm_mode_attachmode_ioctl(struct drm_device *dev,
if (!drm_core_check_feature(dev, DRIVER_MODESET))
return -EINVAL;
- mutex_lock(&dev->mode_config.mutex);
+ drm_modeset_lock_all(dev);
obj = drm_mode_object_find(dev, mode_cmd->connector_id, DRM_MODE_OBJECT_CONNECTOR);
if (!obj) {
@@ -2708,17 +2855,16 @@ int drm_mode_attachmode_ioctl(struct drm_device *dev,
drm_mode_attachmode(dev, connector, mode);
out:
- mutex_unlock(&dev->mode_config.mutex);
+ drm_modeset_unlock_all(dev);
return ret;
}
/**
* drm_fb_detachmode - Detach a user specified mode from an connector
- * @inode: inode from the ioctl
- * @filp: file * from the ioctl
- * @cmd: cmd from ioctl
- * @arg: arg from ioctl
+ * @dev: drm device for the ioctl
+ * @data: data pointer for the ioctl
+ * @file_priv: drm file for the ioctl call
*
* Called by the user via ioctl.
*
@@ -2738,7 +2884,7 @@ int drm_mode_detachmode_ioctl(struct drm_device *dev,
if (!drm_core_check_feature(dev, DRIVER_MODESET))
return -EINVAL;
- mutex_lock(&dev->mode_config.mutex);
+ drm_modeset_lock_all(dev);
obj = drm_mode_object_find(dev, mode_cmd->connector_id, DRM_MODE_OBJECT_CONNECTOR);
if (!obj) {
@@ -2755,7 +2901,7 @@ int drm_mode_detachmode_ioctl(struct drm_device *dev,
ret = drm_mode_detachmode(dev, connector, &mode);
out:
- mutex_unlock(&dev->mode_config.mutex);
+ drm_modeset_unlock_all(dev);
return ret;
}
@@ -3001,7 +3147,7 @@ int drm_mode_getproperty_ioctl(struct drm_device *dev,
if (!drm_core_check_feature(dev, DRIVER_MODESET))
return -EINVAL;
- mutex_lock(&dev->mode_config.mutex);
+ drm_modeset_lock_all(dev);
obj = drm_mode_object_find(dev, out_resp->prop_id, DRM_MODE_OBJECT_PROPERTY);
if (!obj) {
ret = -EINVAL;
@@ -3079,7 +3225,7 @@ int drm_mode_getproperty_ioctl(struct drm_device *dev,
out_resp->count_enum_blobs = blob_count;
}
done:
- mutex_unlock(&dev->mode_config.mutex);
+ drm_modeset_unlock_all(dev);
return ret;
}
@@ -3130,7 +3276,7 @@ int drm_mode_getblob_ioctl(struct drm_device *dev,
if (!drm_core_check_feature(dev, DRIVER_MODESET))
return -EINVAL;
- mutex_lock(&dev->mode_config.mutex);
+ drm_modeset_lock_all(dev);
obj = drm_mode_object_find(dev, out_resp->blob_id, DRM_MODE_OBJECT_BLOB);
if (!obj) {
ret = -EINVAL;
@@ -3148,7 +3294,7 @@ int drm_mode_getblob_ioctl(struct drm_device *dev,
out_resp->length = blob->length;
done:
- mutex_unlock(&dev->mode_config.mutex);
+ drm_modeset_unlock_all(dev);
return ret;
}
@@ -3290,7 +3436,7 @@ int drm_mode_obj_get_properties_ioctl(struct drm_device *dev, void *data,
if (!drm_core_check_feature(dev, DRIVER_MODESET))
return -EINVAL;
- mutex_lock(&dev->mode_config.mutex);
+ drm_modeset_lock_all(dev);
obj = drm_mode_object_find(dev, arg->obj_id, arg->obj_type);
if (!obj) {
@@ -3327,7 +3473,7 @@ int drm_mode_obj_get_properties_ioctl(struct drm_device *dev, void *data,
}
arg->count_props = props_count;
out:
- mutex_unlock(&dev->mode_config.mutex);
+ drm_modeset_unlock_all(dev);
return ret;
}
@@ -3344,7 +3490,7 @@ int drm_mode_obj_set_property_ioctl(struct drm_device *dev, void *data,
if (!drm_core_check_feature(dev, DRIVER_MODESET))
return -EINVAL;
- mutex_lock(&dev->mode_config.mutex);
+ drm_modeset_lock_all(dev);
arg_obj = drm_mode_object_find(dev, arg->obj_id, arg->obj_type);
if (!arg_obj)
@@ -3382,7 +3528,7 @@ int drm_mode_obj_set_property_ioctl(struct drm_device *dev, void *data,
}
out:
- mutex_unlock(&dev->mode_config.mutex);
+ drm_modeset_unlock_all(dev);
return ret;
}
@@ -3444,7 +3590,7 @@ int drm_mode_gamma_set_ioctl(struct drm_device *dev,
if (!drm_core_check_feature(dev, DRIVER_MODESET))
return -EINVAL;
- mutex_lock(&dev->mode_config.mutex);
+ drm_modeset_lock_all(dev);
obj = drm_mode_object_find(dev, crtc_lut->crtc_id, DRM_MODE_OBJECT_CRTC);
if (!obj) {
ret = -EINVAL;
@@ -3485,7 +3631,7 @@ int drm_mode_gamma_set_ioctl(struct drm_device *dev,
crtc->funcs->gamma_set(crtc, r_base, g_base, b_base, 0, crtc->gamma_size);
out:
- mutex_unlock(&dev->mode_config.mutex);
+ drm_modeset_unlock_all(dev);
return ret;
}
@@ -3503,7 +3649,7 @@ int drm_mode_gamma_get_ioctl(struct drm_device *dev,
if (!drm_core_check_feature(dev, DRIVER_MODESET))
return -EINVAL;
- mutex_lock(&dev->mode_config.mutex);
+ drm_modeset_lock_all(dev);
obj = drm_mode_object_find(dev, crtc_lut->crtc_id, DRM_MODE_OBJECT_CRTC);
if (!obj) {
ret = -EINVAL;
@@ -3536,7 +3682,7 @@ int drm_mode_gamma_get_ioctl(struct drm_device *dev,
goto out;
}
out:
- mutex_unlock(&dev->mode_config.mutex);
+ drm_modeset_unlock_all(dev);
return ret;
}
@@ -3546,7 +3692,7 @@ int drm_mode_page_flip_ioctl(struct drm_device *dev,
struct drm_mode_crtc_page_flip *page_flip = data;
struct drm_mode_object *obj;
struct drm_crtc *crtc;
- struct drm_framebuffer *fb;
+ struct drm_framebuffer *fb = NULL, *old_fb = NULL;
struct drm_pending_vblank_event *e = NULL;
unsigned long flags;
int hdisplay, vdisplay;
@@ -3556,12 +3702,12 @@ int drm_mode_page_flip_ioctl(struct drm_device *dev,
page_flip->reserved != 0)
return -EINVAL;
- mutex_lock(&dev->mode_config.mutex);
obj = drm_mode_object_find(dev, page_flip->crtc_id, DRM_MODE_OBJECT_CRTC);
if (!obj)
- goto out;
+ return -EINVAL;
crtc = obj_to_crtc(obj);
+ mutex_lock(&crtc->mutex);
if (crtc->fb == NULL) {
/* The framebuffer is currently unbound, presumably
* due to a hotplug event, that userspace has not
@@ -3574,10 +3720,9 @@ int drm_mode_page_flip_ioctl(struct drm_device *dev,
if (crtc->funcs->page_flip == NULL)
goto out;
- obj = drm_mode_object_find(dev, page_flip->fb_id, DRM_MODE_OBJECT_FB);
- if (!obj)
+ fb = drm_framebuffer_lookup(dev, page_flip->fb_id);
+ if (!fb)
goto out;
- fb = obj_to_fb(obj);
hdisplay = crtc->mode.hdisplay;
vdisplay = crtc->mode.vdisplay;
@@ -3623,6 +3768,7 @@ int drm_mode_page_flip_ioctl(struct drm_device *dev,
(void (*) (struct drm_pending_event *)) kfree;
}
+ old_fb = crtc->fb;
ret = crtc->funcs->page_flip(crtc, fb, e);
if (ret) {
if (page_flip->flags & DRM_MODE_PAGE_FLIP_EVENT) {
@@ -3631,10 +3777,27 @@ int drm_mode_page_flip_ioctl(struct drm_device *dev,
spin_unlock_irqrestore(&dev->event_lock, flags);
kfree(e);
}
+ /* Keep the old fb, don't unref it. */
+ old_fb = NULL;
+ } else {
+ /*
+ * Warn if the driver hasn't properly updated the crtc->fb
+ * field to reflect that the new framebuffer is now used.
+ * Failing to do so will screw with the reference counting
+ * on framebuffers.
+ */
+ WARN_ON(crtc->fb != fb);
+ /* Unref only the old framebuffer. */
+ fb = NULL;
}
out:
- mutex_unlock(&dev->mode_config.mutex);
+ if (fb)
+ drm_framebuffer_unreference(fb);
+ if (old_fb)
+ drm_framebuffer_unreference(old_fb);
+ mutex_unlock(&crtc->mutex);
+
return ret;
}
@@ -3702,6 +3865,7 @@ void drm_fb_get_bpp_depth(uint32_t format, unsigned int *depth,
int *bpp)
{
switch (format) {
+ case DRM_FORMAT_C8:
case DRM_FORMAT_RGB332:
case DRM_FORMAT_BGR233:
*depth = 8;
diff --git a/drivers/gpu/drm/drm_drv.c b/drivers/gpu/drm/drm_drv.c
index be174cab105a..25f91cd23e60 100644
--- a/drivers/gpu/drm/drm_drv.c
+++ b/drivers/gpu/drm/drm_drv.c
@@ -297,7 +297,6 @@ static void __exit drm_core_exit(void)
unregister_chrdev(DRM_MAJOR, "drm");
- idr_remove_all(&drm_minors_idr);
idr_destroy(&drm_minors_idr);
}
diff --git a/drivers/gpu/drm/drm_edid.c b/drivers/gpu/drm/drm_edid.c
index 5a3770fbd770..c194f4e680ad 100644
--- a/drivers/gpu/drm/drm_edid.c
+++ b/drivers/gpu/drm/drm_edid.c
@@ -29,11 +29,11 @@
*/
#include <linux/kernel.h>
#include <linux/slab.h>
+#include <linux/hdmi.h>
#include <linux/i2c.h>
#include <linux/module.h>
#include <drm/drmP.h>
#include <drm/drm_edid.h>
-#include "drm_edid_modes.h"
#define version_greater(edid, maj, min) \
(((edid)->version > (maj)) || \
@@ -87,9 +87,6 @@ static struct edid_quirk {
int product_id;
u32 quirks;
} edid_quirk_list[] = {
- /* ASUS VW222S */
- { "ACI", 0x22a2, EDID_QUIRK_FORCE_REDUCED_BLANKING },
-
/* Acer AL1706 */
{ "ACR", 44358, EDID_QUIRK_PREFER_LARGE_60 },
/* Acer F51 */
@@ -130,6 +127,746 @@ static struct edid_quirk {
{ "VSC", 5020, EDID_QUIRK_FORCE_REDUCED_BLANKING },
};
+/*
+ * Autogenerated from the DMT spec.
+ * This table is copied from xfree86/modes/xf86EdidModes.c.
+ */
+static const struct drm_display_mode drm_dmt_modes[] = {
+ /* 640x350@85Hz */
+ { DRM_MODE("640x350", DRM_MODE_TYPE_DRIVER, 31500, 640, 672,
+ 736, 832, 0, 350, 382, 385, 445, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NVSYNC) },
+ /* 640x400@85Hz */
+ { DRM_MODE("640x400", DRM_MODE_TYPE_DRIVER, 31500, 640, 672,
+ 736, 832, 0, 400, 401, 404, 445, 0,
+ DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
+ /* 720x400@85Hz */
+ { DRM_MODE("720x400", DRM_MODE_TYPE_DRIVER, 35500, 720, 756,
+ 828, 936, 0, 400, 401, 404, 446, 0,
+ DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
+ /* 640x480@60Hz */
+ { DRM_MODE("640x480", DRM_MODE_TYPE_DRIVER, 25175, 640, 656,
+ 752, 800, 0, 480, 489, 492, 525, 0,
+ DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) },
+ /* 640x480@72Hz */
+ { DRM_MODE("640x480", DRM_MODE_TYPE_DRIVER, 31500, 640, 664,
+ 704, 832, 0, 480, 489, 492, 520, 0,
+ DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) },
+ /* 640x480@75Hz */
+ { DRM_MODE("640x480", DRM_MODE_TYPE_DRIVER, 31500, 640, 656,
+ 720, 840, 0, 480, 481, 484, 500, 0,
+ DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) },
+ /* 640x480@85Hz */
+ { DRM_MODE("640x480", DRM_MODE_TYPE_DRIVER, 36000, 640, 696,
+ 752, 832, 0, 480, 481, 484, 509, 0,
+ DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) },
+ /* 800x600@56Hz */
+ { DRM_MODE("800x600", DRM_MODE_TYPE_DRIVER, 36000, 800, 824,
+ 896, 1024, 0, 600, 601, 603, 625, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
+ /* 800x600@60Hz */
+ { DRM_MODE("800x600", DRM_MODE_TYPE_DRIVER, 40000, 800, 840,
+ 968, 1056, 0, 600, 601, 605, 628, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
+ /* 800x600@72Hz */
+ { DRM_MODE("800x600", DRM_MODE_TYPE_DRIVER, 50000, 800, 856,
+ 976, 1040, 0, 600, 637, 643, 666, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
+ /* 800x600@75Hz */
+ { DRM_MODE("800x600", DRM_MODE_TYPE_DRIVER, 49500, 800, 816,
+ 896, 1056, 0, 600, 601, 604, 625, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
+ /* 800x600@85Hz */
+ { DRM_MODE("800x600", DRM_MODE_TYPE_DRIVER, 56250, 800, 832,
+ 896, 1048, 0, 600, 601, 604, 631, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
+ /* 800x600@120Hz RB */
+ { DRM_MODE("800x600", DRM_MODE_TYPE_DRIVER, 73250, 800, 848,
+ 880, 960, 0, 600, 603, 607, 636, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NVSYNC) },
+ /* 848x480@60Hz */
+ { DRM_MODE("848x480", DRM_MODE_TYPE_DRIVER, 33750, 848, 864,
+ 976, 1088, 0, 480, 486, 494, 517, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
+ /* 1024x768@43Hz, interlace */
+ { DRM_MODE("1024x768i", DRM_MODE_TYPE_DRIVER, 44900, 1024, 1032,
+ 1208, 1264, 0, 768, 768, 772, 817, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC |
+ DRM_MODE_FLAG_INTERLACE) },
+ /* 1024x768@60Hz */
+ { DRM_MODE("1024x768", DRM_MODE_TYPE_DRIVER, 65000, 1024, 1048,
+ 1184, 1344, 0, 768, 771, 777, 806, 0,
+ DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) },
+ /* 1024x768@70Hz */
+ { DRM_MODE("1024x768", DRM_MODE_TYPE_DRIVER, 75000, 1024, 1048,
+ 1184, 1328, 0, 768, 771, 777, 806, 0,
+ DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) },
+ /* 1024x768@75Hz */
+ { DRM_MODE("1024x768", DRM_MODE_TYPE_DRIVER, 78750, 1024, 1040,
+ 1136, 1312, 0, 768, 769, 772, 800, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
+ /* 1024x768@85Hz */
+ { DRM_MODE("1024x768", DRM_MODE_TYPE_DRIVER, 94500, 1024, 1072,
+ 1168, 1376, 0, 768, 769, 772, 808, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
+ /* 1024x768@120Hz RB */
+ { DRM_MODE("1024x768", DRM_MODE_TYPE_DRIVER, 115500, 1024, 1072,
+ 1104, 1184, 0, 768, 771, 775, 813, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NVSYNC) },
+ /* 1152x864@75Hz */
+ { DRM_MODE("1152x864", DRM_MODE_TYPE_DRIVER, 108000, 1152, 1216,
+ 1344, 1600, 0, 864, 865, 868, 900, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
+ /* 1280x768@60Hz RB */
+ { DRM_MODE("1280x768", DRM_MODE_TYPE_DRIVER, 68250, 1280, 1328,
+ 1360, 1440, 0, 768, 771, 778, 790, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NVSYNC) },
+ /* 1280x768@60Hz */
+ { DRM_MODE("1280x768", DRM_MODE_TYPE_DRIVER, 79500, 1280, 1344,
+ 1472, 1664, 0, 768, 771, 778, 798, 0,
+ DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
+ /* 1280x768@75Hz */
+ { DRM_MODE("1280x768", DRM_MODE_TYPE_DRIVER, 102250, 1280, 1360,
+ 1488, 1696, 0, 768, 771, 778, 805, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NVSYNC) },
+ /* 1280x768@85Hz */
+ { DRM_MODE("1280x768", DRM_MODE_TYPE_DRIVER, 117500, 1280, 1360,
+ 1496, 1712, 0, 768, 771, 778, 809, 0,
+ DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
+ /* 1280x768@120Hz RB */
+ { DRM_MODE("1280x768", DRM_MODE_TYPE_DRIVER, 140250, 1280, 1328,
+ 1360, 1440, 0, 768, 771, 778, 813, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NVSYNC) },
+ /* 1280x800@60Hz RB */
+ { DRM_MODE("1280x800", DRM_MODE_TYPE_DRIVER, 71000, 1280, 1328,
+ 1360, 1440, 0, 800, 803, 809, 823, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NVSYNC) },
+ /* 1280x800@60Hz */
+ { DRM_MODE("1280x800", DRM_MODE_TYPE_DRIVER, 83500, 1280, 1352,
+ 1480, 1680, 0, 800, 803, 809, 831, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NVSYNC) },
+ /* 1280x800@75Hz */
+ { DRM_MODE("1280x800", DRM_MODE_TYPE_DRIVER, 106500, 1280, 1360,
+ 1488, 1696, 0, 800, 803, 809, 838, 0,
+ DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
+ /* 1280x800@85Hz */
+ { DRM_MODE("1280x800", DRM_MODE_TYPE_DRIVER, 122500, 1280, 1360,
+ 1496, 1712, 0, 800, 803, 809, 843, 0,
+ DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
+ /* 1280x800@120Hz RB */
+ { DRM_MODE("1280x800", DRM_MODE_TYPE_DRIVER, 146250, 1280, 1328,
+ 1360, 1440, 0, 800, 803, 809, 847, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NVSYNC) },
+ /* 1280x960@60Hz */
+ { DRM_MODE("1280x960", DRM_MODE_TYPE_DRIVER, 108000, 1280, 1376,
+ 1488, 1800, 0, 960, 961, 964, 1000, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
+ /* 1280x960@85Hz */
+ { DRM_MODE("1280x960", DRM_MODE_TYPE_DRIVER, 148500, 1280, 1344,
+ 1504, 1728, 0, 960, 961, 964, 1011, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
+ /* 1280x960@120Hz RB */
+ { DRM_MODE("1280x960", DRM_MODE_TYPE_DRIVER, 175500, 1280, 1328,
+ 1360, 1440, 0, 960, 963, 967, 1017, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NVSYNC) },
+ /* 1280x1024@60Hz */
+ { DRM_MODE("1280x1024", DRM_MODE_TYPE_DRIVER, 108000, 1280, 1328,
+ 1440, 1688, 0, 1024, 1025, 1028, 1066, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
+ /* 1280x1024@75Hz */
+ { DRM_MODE("1280x1024", DRM_MODE_TYPE_DRIVER, 135000, 1280, 1296,
+ 1440, 1688, 0, 1024, 1025, 1028, 1066, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
+ /* 1280x1024@85Hz */
+ { DRM_MODE("1280x1024", DRM_MODE_TYPE_DRIVER, 157500, 1280, 1344,
+ 1504, 1728, 0, 1024, 1025, 1028, 1072, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
+ /* 1280x1024@120Hz RB */
+ { DRM_MODE("1280x1024", DRM_MODE_TYPE_DRIVER, 187250, 1280, 1328,
+ 1360, 1440, 0, 1024, 1027, 1034, 1084, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NVSYNC) },
+ /* 1360x768@60Hz */
+ { DRM_MODE("1360x768", DRM_MODE_TYPE_DRIVER, 85500, 1360, 1424,
+ 1536, 1792, 0, 768, 771, 777, 795, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
+ /* 1360x768@120Hz RB */
+ { DRM_MODE("1360x768", DRM_MODE_TYPE_DRIVER, 148250, 1360, 1408,
+ 1440, 1520, 0, 768, 771, 776, 813, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NVSYNC) },
+ /* 1400x1050@60Hz RB */
+ { DRM_MODE("1400x1050", DRM_MODE_TYPE_DRIVER, 101000, 1400, 1448,
+ 1480, 1560, 0, 1050, 1053, 1057, 1080, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NVSYNC) },
+ /* 1400x1050@60Hz */
+ { DRM_MODE("1400x1050", DRM_MODE_TYPE_DRIVER, 121750, 1400, 1488,
+ 1632, 1864, 0, 1050, 1053, 1057, 1089, 0,
+ DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
+ /* 1400x1050@75Hz */
+ { DRM_MODE("1400x1050", DRM_MODE_TYPE_DRIVER, 156000, 1400, 1504,
+ 1648, 1896, 0, 1050, 1053, 1057, 1099, 0,
+ DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
+ /* 1400x1050@85Hz */
+ { DRM_MODE("1400x1050", DRM_MODE_TYPE_DRIVER, 179500, 1400, 1504,
+ 1656, 1912, 0, 1050, 1053, 1057, 1105, 0,
+ DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
+ /* 1400x1050@120Hz RB */
+ { DRM_MODE("1400x1050", DRM_MODE_TYPE_DRIVER, 208000, 1400, 1448,
+ 1480, 1560, 0, 1050, 1053, 1057, 1112, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NVSYNC) },
+ /* 1440x900@60Hz RB */
+ { DRM_MODE("1440x900", DRM_MODE_TYPE_DRIVER, 88750, 1440, 1488,
+ 1520, 1600, 0, 900, 903, 909, 926, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NVSYNC) },
+ /* 1440x900@60Hz */
+ { DRM_MODE("1440x900", DRM_MODE_TYPE_DRIVER, 106500, 1440, 1520,
+ 1672, 1904, 0, 900, 903, 909, 934, 0,
+ DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
+ /* 1440x900@75Hz */
+ { DRM_MODE("1440x900", DRM_MODE_TYPE_DRIVER, 136750, 1440, 1536,
+ 1688, 1936, 0, 900, 903, 909, 942, 0,
+ DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
+ /* 1440x900@85Hz */
+ { DRM_MODE("1440x900", DRM_MODE_TYPE_DRIVER, 157000, 1440, 1544,
+ 1696, 1952, 0, 900, 903, 909, 948, 0,
+ DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
+ /* 1440x900@120Hz RB */
+ { DRM_MODE("1440x900", DRM_MODE_TYPE_DRIVER, 182750, 1440, 1488,
+ 1520, 1600, 0, 900, 903, 909, 953, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NVSYNC) },
+ /* 1600x1200@60Hz */
+ { DRM_MODE("1600x1200", DRM_MODE_TYPE_DRIVER, 162000, 1600, 1664,
+ 1856, 2160, 0, 1200, 1201, 1204, 1250, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
+ /* 1600x1200@65Hz */
+ { DRM_MODE("1600x1200", DRM_MODE_TYPE_DRIVER, 175500, 1600, 1664,
+ 1856, 2160, 0, 1200, 1201, 1204, 1250, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
+ /* 1600x1200@70Hz */
+ { DRM_MODE("1600x1200", DRM_MODE_TYPE_DRIVER, 189000, 1600, 1664,
+ 1856, 2160, 0, 1200, 1201, 1204, 1250, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
+ /* 1600x1200@75Hz */
+ { DRM_MODE("1600x1200", DRM_MODE_TYPE_DRIVER, 202500, 1600, 1664,
+ 1856, 2160, 0, 1200, 1201, 1204, 1250, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
+ /* 1600x1200@85Hz */
+ { DRM_MODE("1600x1200", DRM_MODE_TYPE_DRIVER, 229500, 1600, 1664,
+ 1856, 2160, 0, 1200, 1201, 1204, 1250, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
+ /* 1600x1200@120Hz RB */
+ { DRM_MODE("1600x1200", DRM_MODE_TYPE_DRIVER, 268250, 1600, 1648,
+ 1680, 1760, 0, 1200, 1203, 1207, 1271, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NVSYNC) },
+ /* 1680x1050@60Hz RB */
+ { DRM_MODE("1680x1050", DRM_MODE_TYPE_DRIVER, 119000, 1680, 1728,
+ 1760, 1840, 0, 1050, 1053, 1059, 1080, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NVSYNC) },
+ /* 1680x1050@60Hz */
+ { DRM_MODE("1680x1050", DRM_MODE_TYPE_DRIVER, 146250, 1680, 1784,
+ 1960, 2240, 0, 1050, 1053, 1059, 1089, 0,
+ DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
+ /* 1680x1050@75Hz */
+ { DRM_MODE("1680x1050", DRM_MODE_TYPE_DRIVER, 187000, 1680, 1800,
+ 1976, 2272, 0, 1050, 1053, 1059, 1099, 0,
+ DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
+ /* 1680x1050@85Hz */
+ { DRM_MODE("1680x1050", DRM_MODE_TYPE_DRIVER, 214750, 1680, 1808,
+ 1984, 2288, 0, 1050, 1053, 1059, 1105, 0,
+ DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
+ /* 1680x1050@120Hz RB */
+ { DRM_MODE("1680x1050", DRM_MODE_TYPE_DRIVER, 245500, 1680, 1728,
+ 1760, 1840, 0, 1050, 1053, 1059, 1112, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NVSYNC) },
+ /* 1792x1344@60Hz */
+ { DRM_MODE("1792x1344", DRM_MODE_TYPE_DRIVER, 204750, 1792, 1920,
+ 2120, 2448, 0, 1344, 1345, 1348, 1394, 0,
+ DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
+ /* 1792x1344@75Hz */
+ { DRM_MODE("1792x1344", DRM_MODE_TYPE_DRIVER, 261000, 1792, 1888,
+ 2104, 2456, 0, 1344, 1345, 1348, 1417, 0,
+ DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
+ /* 1792x1344@120Hz RB */
+ { DRM_MODE("1792x1344", DRM_MODE_TYPE_DRIVER, 333250, 1792, 1840,
+ 1872, 1952, 0, 1344, 1347, 1351, 1423, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NVSYNC) },
+ /* 1856x1392@60Hz */
+ { DRM_MODE("1856x1392", DRM_MODE_TYPE_DRIVER, 218250, 1856, 1952,
+ 2176, 2528, 0, 1392, 1393, 1396, 1439, 0,
+ DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
+ /* 1856x1392@75Hz */
+ { DRM_MODE("1856x1392", DRM_MODE_TYPE_DRIVER, 288000, 1856, 1984,
+ 2208, 2560, 0, 1392, 1395, 1399, 1500, 0,
+ DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
+ /* 1856x1392@120Hz RB */
+ { DRM_MODE("1856x1392", DRM_MODE_TYPE_DRIVER, 356500, 1856, 1904,
+ 1936, 2016, 0, 1392, 1395, 1399, 1474, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NVSYNC) },
+ /* 1920x1200@60Hz RB */
+ { DRM_MODE("1920x1200", DRM_MODE_TYPE_DRIVER, 154000, 1920, 1968,
+ 2000, 2080, 0, 1200, 1203, 1209, 1235, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NVSYNC) },
+ /* 1920x1200@60Hz */
+ { DRM_MODE("1920x1200", DRM_MODE_TYPE_DRIVER, 193250, 1920, 2056,
+ 2256, 2592, 0, 1200, 1203, 1209, 1245, 0,
+ DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
+ /* 1920x1200@75Hz */
+ { DRM_MODE("1920x1200", DRM_MODE_TYPE_DRIVER, 245250, 1920, 2056,
+ 2264, 2608, 0, 1200, 1203, 1209, 1255, 0,
+ DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
+ /* 1920x1200@85Hz */
+ { DRM_MODE("1920x1200", DRM_MODE_TYPE_DRIVER, 281250, 1920, 2064,
+ 2272, 2624, 0, 1200, 1203, 1209, 1262, 0,
+ DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
+ /* 1920x1200@120Hz RB */
+ { DRM_MODE("1920x1200", DRM_MODE_TYPE_DRIVER, 317000, 1920, 1968,
+ 2000, 2080, 0, 1200, 1203, 1209, 1271, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NVSYNC) },
+ /* 1920x1440@60Hz */
+ { DRM_MODE("1920x1440", DRM_MODE_TYPE_DRIVER, 234000, 1920, 2048,
+ 2256, 2600, 0, 1440, 1441, 1444, 1500, 0,
+ DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
+ /* 1920x1440@75Hz */
+ { DRM_MODE("1920x1440", DRM_MODE_TYPE_DRIVER, 297000, 1920, 2064,
+ 2288, 2640, 0, 1440, 1441, 1444, 1500, 0,
+ DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
+ /* 1920x1440@120Hz RB */
+ { DRM_MODE("1920x1440", DRM_MODE_TYPE_DRIVER, 380500, 1920, 1968,
+ 2000, 2080, 0, 1440, 1443, 1447, 1525, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NVSYNC) },
+ /* 2560x1600@60Hz RB */
+ { DRM_MODE("2560x1600", DRM_MODE_TYPE_DRIVER, 268500, 2560, 2608,
+ 2640, 2720, 0, 1600, 1603, 1609, 1646, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NVSYNC) },
+ /* 2560x1600@60Hz */
+ { DRM_MODE("2560x1600", DRM_MODE_TYPE_DRIVER, 348500, 2560, 2752,
+ 3032, 3504, 0, 1600, 1603, 1609, 1658, 0,
+ DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
+ /* 2560x1600@75HZ */
+ { DRM_MODE("2560x1600", DRM_MODE_TYPE_DRIVER, 443250, 2560, 2768,
+ 3048, 3536, 0, 1600, 1603, 1609, 1672, 0,
+ DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
+ /* 2560x1600@85HZ */
+ { DRM_MODE("2560x1600", DRM_MODE_TYPE_DRIVER, 505250, 2560, 2768,
+ 3048, 3536, 0, 1600, 1603, 1609, 1682, 0,
+ DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
+ /* 2560x1600@120Hz RB */
+ { DRM_MODE("2560x1600", DRM_MODE_TYPE_DRIVER, 552750, 2560, 2608,
+ 2640, 2720, 0, 1600, 1603, 1609, 1694, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NVSYNC) },
+};
+
+static const struct drm_display_mode edid_est_modes[] = {
+ { DRM_MODE("800x600", DRM_MODE_TYPE_DRIVER, 40000, 800, 840,
+ 968, 1056, 0, 600, 601, 605, 628, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) }, /* 800x600@60Hz */
+ { DRM_MODE("800x600", DRM_MODE_TYPE_DRIVER, 36000, 800, 824,
+ 896, 1024, 0, 600, 601, 603, 625, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) }, /* 800x600@56Hz */
+ { DRM_MODE("640x480", DRM_MODE_TYPE_DRIVER, 31500, 640, 656,
+ 720, 840, 0, 480, 481, 484, 500, 0,
+ DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) }, /* 640x480@75Hz */
+ { DRM_MODE("640x480", DRM_MODE_TYPE_DRIVER, 31500, 640, 664,
+ 704, 832, 0, 480, 489, 491, 520, 0,
+ DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) }, /* 640x480@72Hz */
+ { DRM_MODE("640x480", DRM_MODE_TYPE_DRIVER, 30240, 640, 704,
+ 768, 864, 0, 480, 483, 486, 525, 0,
+ DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) }, /* 640x480@67Hz */
+ { DRM_MODE("640x480", DRM_MODE_TYPE_DRIVER, 25200, 640, 656,
+ 752, 800, 0, 480, 490, 492, 525, 0,
+ DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) }, /* 640x480@60Hz */
+ { DRM_MODE("720x400", DRM_MODE_TYPE_DRIVER, 35500, 720, 738,
+ 846, 900, 0, 400, 421, 423, 449, 0,
+ DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) }, /* 720x400@88Hz */
+ { DRM_MODE("720x400", DRM_MODE_TYPE_DRIVER, 28320, 720, 738,
+ 846, 900, 0, 400, 412, 414, 449, 0,
+ DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) }, /* 720x400@70Hz */
+ { DRM_MODE("1280x1024", DRM_MODE_TYPE_DRIVER, 135000, 1280, 1296,
+ 1440, 1688, 0, 1024, 1025, 1028, 1066, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) }, /* 1280x1024@75Hz */
+ { DRM_MODE("1024x768", DRM_MODE_TYPE_DRIVER, 78800, 1024, 1040,
+ 1136, 1312, 0, 768, 769, 772, 800, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) }, /* 1024x768@75Hz */
+ { DRM_MODE("1024x768", DRM_MODE_TYPE_DRIVER, 75000, 1024, 1048,
+ 1184, 1328, 0, 768, 771, 777, 806, 0,
+ DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) }, /* 1024x768@70Hz */
+ { DRM_MODE("1024x768", DRM_MODE_TYPE_DRIVER, 65000, 1024, 1048,
+ 1184, 1344, 0, 768, 771, 777, 806, 0,
+ DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) }, /* 1024x768@60Hz */
+ { DRM_MODE("1024x768i", DRM_MODE_TYPE_DRIVER,44900, 1024, 1032,
+ 1208, 1264, 0, 768, 768, 776, 817, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC | DRM_MODE_FLAG_INTERLACE) }, /* 1024x768@43Hz */
+ { DRM_MODE("832x624", DRM_MODE_TYPE_DRIVER, 57284, 832, 864,
+ 928, 1152, 0, 624, 625, 628, 667, 0,
+ DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) }, /* 832x624@75Hz */
+ { DRM_MODE("800x600", DRM_MODE_TYPE_DRIVER, 49500, 800, 816,
+ 896, 1056, 0, 600, 601, 604, 625, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) }, /* 800x600@75Hz */
+ { DRM_MODE("800x600", DRM_MODE_TYPE_DRIVER, 50000, 800, 856,
+ 976, 1040, 0, 600, 637, 643, 666, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) }, /* 800x600@72Hz */
+ { DRM_MODE("1152x864", DRM_MODE_TYPE_DRIVER, 108000, 1152, 1216,
+ 1344, 1600, 0, 864, 865, 868, 900, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) }, /* 1152x864@75Hz */
+};
+
+struct minimode {
+ short w;
+ short h;
+ short r;
+ short rb;
+};
+
+static const struct minimode est3_modes[] = {
+ /* byte 6 */
+ { 640, 350, 85, 0 },
+ { 640, 400, 85, 0 },
+ { 720, 400, 85, 0 },
+ { 640, 480, 85, 0 },
+ { 848, 480, 60, 0 },
+ { 800, 600, 85, 0 },
+ { 1024, 768, 85, 0 },
+ { 1152, 864, 75, 0 },
+ /* byte 7 */
+ { 1280, 768, 60, 1 },
+ { 1280, 768, 60, 0 },
+ { 1280, 768, 75, 0 },
+ { 1280, 768, 85, 0 },
+ { 1280, 960, 60, 0 },
+ { 1280, 960, 85, 0 },
+ { 1280, 1024, 60, 0 },
+ { 1280, 1024, 85, 0 },
+ /* byte 8 */
+ { 1360, 768, 60, 0 },
+ { 1440, 900, 60, 1 },
+ { 1440, 900, 60, 0 },
+ { 1440, 900, 75, 0 },
+ { 1440, 900, 85, 0 },
+ { 1400, 1050, 60, 1 },
+ { 1400, 1050, 60, 0 },
+ { 1400, 1050, 75, 0 },
+ /* byte 9 */
+ { 1400, 1050, 85, 0 },
+ { 1680, 1050, 60, 1 },
+ { 1680, 1050, 60, 0 },
+ { 1680, 1050, 75, 0 },
+ { 1680, 1050, 85, 0 },
+ { 1600, 1200, 60, 0 },
+ { 1600, 1200, 65, 0 },
+ { 1600, 1200, 70, 0 },
+ /* byte 10 */
+ { 1600, 1200, 75, 0 },
+ { 1600, 1200, 85, 0 },
+ { 1792, 1344, 60, 0 },
+ { 1792, 1344, 85, 0 },
+ { 1856, 1392, 60, 0 },
+ { 1856, 1392, 75, 0 },
+ { 1920, 1200, 60, 1 },
+ { 1920, 1200, 60, 0 },
+ /* byte 11 */
+ { 1920, 1200, 75, 0 },
+ { 1920, 1200, 85, 0 },
+ { 1920, 1440, 60, 0 },
+ { 1920, 1440, 75, 0 },
+};
+
+static const struct minimode extra_modes[] = {
+ { 1024, 576, 60, 0 },
+ { 1366, 768, 60, 0 },
+ { 1600, 900, 60, 0 },
+ { 1680, 945, 60, 0 },
+ { 1920, 1080, 60, 0 },
+ { 2048, 1152, 60, 0 },
+ { 2048, 1536, 60, 0 },
+};
+
+/*
+ * Probably taken from CEA-861 spec.
+ * This table is converted from xorg's hw/xfree86/modes/xf86EdidModes.c.
+ */
+static const struct drm_display_mode edid_cea_modes[] = {
+ /* 1 - 640x480@60Hz */
+ { DRM_MODE("640x480", DRM_MODE_TYPE_DRIVER, 25175, 640, 656,
+ 752, 800, 0, 480, 490, 492, 525, 0,
+ DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) },
+ /* 2 - 720x480@60Hz */
+ { DRM_MODE("720x480", DRM_MODE_TYPE_DRIVER, 27000, 720, 736,
+ 798, 858, 0, 480, 489, 495, 525, 0,
+ DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) },
+ /* 3 - 720x480@60Hz */
+ { DRM_MODE("720x480", DRM_MODE_TYPE_DRIVER, 27000, 720, 736,
+ 798, 858, 0, 480, 489, 495, 525, 0,
+ DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) },
+ /* 4 - 1280x720@60Hz */
+ { DRM_MODE("1280x720", DRM_MODE_TYPE_DRIVER, 74250, 1280, 1390,
+ 1430, 1650, 0, 720, 725, 730, 750, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
+ /* 5 - 1920x1080i@60Hz */
+ { DRM_MODE("1920x1080i", DRM_MODE_TYPE_DRIVER, 74250, 1920, 2008,
+ 2052, 2200, 0, 1080, 1084, 1094, 1125, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC |
+ DRM_MODE_FLAG_INTERLACE) },
+ /* 6 - 1440x480i@60Hz */
+ { DRM_MODE("1440x480i", DRM_MODE_TYPE_DRIVER, 27000, 1440, 1478,
+ 1602, 1716, 0, 480, 488, 494, 525, 0,
+ DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC |
+ DRM_MODE_FLAG_INTERLACE | DRM_MODE_FLAG_DBLCLK) },
+ /* 7 - 1440x480i@60Hz */
+ { DRM_MODE("1440x480i", DRM_MODE_TYPE_DRIVER, 27000, 1440, 1478,
+ 1602, 1716, 0, 480, 488, 494, 525, 0,
+ DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC |
+ DRM_MODE_FLAG_INTERLACE | DRM_MODE_FLAG_DBLCLK) },
+ /* 8 - 1440x240@60Hz */
+ { DRM_MODE("1440x240", DRM_MODE_TYPE_DRIVER, 27000, 1440, 1478,
+ 1602, 1716, 0, 240, 244, 247, 262, 0,
+ DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC |
+ DRM_MODE_FLAG_DBLCLK) },
+ /* 9 - 1440x240@60Hz */
+ { DRM_MODE("1440x240", DRM_MODE_TYPE_DRIVER, 27000, 1440, 1478,
+ 1602, 1716, 0, 240, 244, 247, 262, 0,
+ DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC |
+ DRM_MODE_FLAG_DBLCLK) },
+ /* 10 - 2880x480i@60Hz */
+ { DRM_MODE("2880x480i", DRM_MODE_TYPE_DRIVER, 54000, 2880, 2956,
+ 3204, 3432, 0, 480, 488, 494, 525, 0,
+ DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC |
+ DRM_MODE_FLAG_INTERLACE) },
+ /* 11 - 2880x480i@60Hz */
+ { DRM_MODE("2880x480i", DRM_MODE_TYPE_DRIVER, 54000, 2880, 2956,
+ 3204, 3432, 0, 480, 488, 494, 525, 0,
+ DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC |
+ DRM_MODE_FLAG_INTERLACE) },
+ /* 12 - 2880x240@60Hz */
+ { DRM_MODE("2880x240", DRM_MODE_TYPE_DRIVER, 54000, 2880, 2956,
+ 3204, 3432, 0, 240, 244, 247, 262, 0,
+ DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) },
+ /* 13 - 2880x240@60Hz */
+ { DRM_MODE("2880x240", DRM_MODE_TYPE_DRIVER, 54000, 2880, 2956,
+ 3204, 3432, 0, 240, 244, 247, 262, 0,
+ DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) },
+ /* 14 - 1440x480@60Hz */
+ { DRM_MODE("1440x480", DRM_MODE_TYPE_DRIVER, 54000, 1440, 1472,
+ 1596, 1716, 0, 480, 489, 495, 525, 0,
+ DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) },
+ /* 15 - 1440x480@60Hz */
+ { DRM_MODE("1440x480", DRM_MODE_TYPE_DRIVER, 54000, 1440, 1472,
+ 1596, 1716, 0, 480, 489, 495, 525, 0,
+ DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) },
+ /* 16 - 1920x1080@60Hz */
+ { DRM_MODE("1920x1080", DRM_MODE_TYPE_DRIVER, 148500, 1920, 2008,
+ 2052, 2200, 0, 1080, 1084, 1089, 1125, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
+ /* 17 - 720x576@50Hz */
+ { DRM_MODE("720x576", DRM_MODE_TYPE_DRIVER, 27000, 720, 732,
+ 796, 864, 0, 576, 581, 586, 625, 0,
+ DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) },
+ /* 18 - 720x576@50Hz */
+ { DRM_MODE("720x576", DRM_MODE_TYPE_DRIVER, 27000, 720, 732,
+ 796, 864, 0, 576, 581, 586, 625, 0,
+ DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) },
+ /* 19 - 1280x720@50Hz */
+ { DRM_MODE("1280x720", DRM_MODE_TYPE_DRIVER, 74250, 1280, 1720,
+ 1760, 1980, 0, 720, 725, 730, 750, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
+ /* 20 - 1920x1080i@50Hz */
+ { DRM_MODE("1920x1080i", DRM_MODE_TYPE_DRIVER, 74250, 1920, 2448,
+ 2492, 2640, 0, 1080, 1084, 1094, 1125, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC |
+ DRM_MODE_FLAG_INTERLACE) },
+ /* 21 - 1440x576i@50Hz */
+ { DRM_MODE("1440x576i", DRM_MODE_TYPE_DRIVER, 27000, 1440, 1464,
+ 1590, 1728, 0, 576, 580, 586, 625, 0,
+ DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC |
+ DRM_MODE_FLAG_INTERLACE | DRM_MODE_FLAG_DBLCLK) },
+ /* 22 - 1440x576i@50Hz */
+ { DRM_MODE("1440x576i", DRM_MODE_TYPE_DRIVER, 27000, 1440, 1464,
+ 1590, 1728, 0, 576, 580, 586, 625, 0,
+ DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC |
+ DRM_MODE_FLAG_INTERLACE | DRM_MODE_FLAG_DBLCLK) },
+ /* 23 - 1440x288@50Hz */
+ { DRM_MODE("1440x288", DRM_MODE_TYPE_DRIVER, 27000, 1440, 1464,
+ 1590, 1728, 0, 288, 290, 293, 312, 0,
+ DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC |
+ DRM_MODE_FLAG_DBLCLK) },
+ /* 24 - 1440x288@50Hz */
+ { DRM_MODE("1440x288", DRM_MODE_TYPE_DRIVER, 27000, 1440, 1464,
+ 1590, 1728, 0, 288, 290, 293, 312, 0,
+ DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC |
+ DRM_MODE_FLAG_DBLCLK) },
+ /* 25 - 2880x576i@50Hz */
+ { DRM_MODE("2880x576i", DRM_MODE_TYPE_DRIVER, 54000, 2880, 2928,
+ 3180, 3456, 0, 576, 580, 586, 625, 0,
+ DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC |
+ DRM_MODE_FLAG_INTERLACE) },
+ /* 26 - 2880x576i@50Hz */
+ { DRM_MODE("2880x576i", DRM_MODE_TYPE_DRIVER, 54000, 2880, 2928,
+ 3180, 3456, 0, 576, 580, 586, 625, 0,
+ DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC |
+ DRM_MODE_FLAG_INTERLACE) },
+ /* 27 - 2880x288@50Hz */
+ { DRM_MODE("2880x288", DRM_MODE_TYPE_DRIVER, 54000, 2880, 2928,
+ 3180, 3456, 0, 288, 290, 293, 312, 0,
+ DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) },
+ /* 28 - 2880x288@50Hz */
+ { DRM_MODE("2880x288", DRM_MODE_TYPE_DRIVER, 54000, 2880, 2928,
+ 3180, 3456, 0, 288, 290, 293, 312, 0,
+ DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) },
+ /* 29 - 1440x576@50Hz */
+ { DRM_MODE("1440x576", DRM_MODE_TYPE_DRIVER, 54000, 1440, 1464,
+ 1592, 1728, 0, 576, 581, 586, 625, 0,
+ DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) },
+ /* 30 - 1440x576@50Hz */
+ { DRM_MODE("1440x576", DRM_MODE_TYPE_DRIVER, 54000, 1440, 1464,
+ 1592, 1728, 0, 576, 581, 586, 625, 0,
+ DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) },
+ /* 31 - 1920x1080@50Hz */
+ { DRM_MODE("1920x1080", DRM_MODE_TYPE_DRIVER, 148500, 1920, 2448,
+ 2492, 2640, 0, 1080, 1084, 1089, 1125, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
+ /* 32 - 1920x1080@24Hz */
+ { DRM_MODE("1920x1080", DRM_MODE_TYPE_DRIVER, 74250, 1920, 2558,
+ 2602, 2750, 0, 1080, 1084, 1089, 1125, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
+ /* 33 - 1920x1080@25Hz */
+ { DRM_MODE("1920x1080", DRM_MODE_TYPE_DRIVER, 74250, 1920, 2448,
+ 2492, 2640, 0, 1080, 1084, 1089, 1125, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
+ /* 34 - 1920x1080@30Hz */
+ { DRM_MODE("1920x1080", DRM_MODE_TYPE_DRIVER, 74250, 1920, 2008,
+ 2052, 2200, 0, 1080, 1084, 1089, 1125, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
+ /* 35 - 2880x480@60Hz */
+ { DRM_MODE("2880x480", DRM_MODE_TYPE_DRIVER, 108000, 2880, 2944,
+ 3192, 3432, 0, 480, 489, 495, 525, 0,
+ DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) },
+ /* 36 - 2880x480@60Hz */
+ { DRM_MODE("2880x480", DRM_MODE_TYPE_DRIVER, 108000, 2880, 2944,
+ 3192, 3432, 0, 480, 489, 495, 525, 0,
+ DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) },
+ /* 37 - 2880x576@50Hz */
+ { DRM_MODE("2880x576", DRM_MODE_TYPE_DRIVER, 108000, 2880, 2928,
+ 3184, 3456, 0, 576, 581, 586, 625, 0,
+ DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) },
+ /* 38 - 2880x576@50Hz */
+ { DRM_MODE("2880x576", DRM_MODE_TYPE_DRIVER, 108000, 2880, 2928,
+ 3184, 3456, 0, 576, 581, 586, 625, 0,
+ DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) },
+ /* 39 - 1920x1080i@50Hz */
+ { DRM_MODE("1920x1080i", DRM_MODE_TYPE_DRIVER, 72000, 1920, 1952,
+ 2120, 2304, 0, 1080, 1126, 1136, 1250, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NVSYNC |
+ DRM_MODE_FLAG_INTERLACE) },
+ /* 40 - 1920x1080i@100Hz */
+ { DRM_MODE("1920x1080i", DRM_MODE_TYPE_DRIVER, 148500, 1920, 2448,
+ 2492, 2640, 0, 1080, 1084, 1094, 1125, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC |
+ DRM_MODE_FLAG_INTERLACE) },
+ /* 41 - 1280x720@100Hz */
+ { DRM_MODE("1280x720", DRM_MODE_TYPE_DRIVER, 148500, 1280, 1720,
+ 1760, 1980, 0, 720, 725, 730, 750, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
+ /* 42 - 720x576@100Hz */
+ { DRM_MODE("720x576", DRM_MODE_TYPE_DRIVER, 54000, 720, 732,
+ 796, 864, 0, 576, 581, 586, 625, 0,
+ DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) },
+ /* 43 - 720x576@100Hz */
+ { DRM_MODE("720x576", DRM_MODE_TYPE_DRIVER, 54000, 720, 732,
+ 796, 864, 0, 576, 581, 586, 625, 0,
+ DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) },
+ /* 44 - 1440x576i@100Hz */
+ { DRM_MODE("1440x576", DRM_MODE_TYPE_DRIVER, 54000, 1440, 1464,
+ 1590, 1728, 0, 576, 580, 586, 625, 0,
+ DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC |
+ DRM_MODE_FLAG_DBLCLK) },
+ /* 45 - 1440x576i@100Hz */
+ { DRM_MODE("1440x576", DRM_MODE_TYPE_DRIVER, 54000, 1440, 1464,
+ 1590, 1728, 0, 576, 580, 586, 625, 0,
+ DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC |
+ DRM_MODE_FLAG_DBLCLK) },
+ /* 46 - 1920x1080i@120Hz */
+ { DRM_MODE("1920x1080i", DRM_MODE_TYPE_DRIVER, 148500, 1920, 2008,
+ 2052, 2200, 0, 1080, 1084, 1094, 1125, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC |
+ DRM_MODE_FLAG_INTERLACE) },
+ /* 47 - 1280x720@120Hz */
+ { DRM_MODE("1280x720", DRM_MODE_TYPE_DRIVER, 148500, 1280, 1390,
+ 1430, 1650, 0, 720, 725, 730, 750, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
+ /* 48 - 720x480@120Hz */
+ { DRM_MODE("720x480", DRM_MODE_TYPE_DRIVER, 54000, 720, 736,
+ 798, 858, 0, 480, 489, 495, 525, 0,
+ DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) },
+ /* 49 - 720x480@120Hz */
+ { DRM_MODE("720x480", DRM_MODE_TYPE_DRIVER, 54000, 720, 736,
+ 798, 858, 0, 480, 489, 495, 525, 0,
+ DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) },
+ /* 50 - 1440x480i@120Hz */
+ { DRM_MODE("1440x480i", DRM_MODE_TYPE_DRIVER, 54000, 1440, 1478,
+ 1602, 1716, 0, 480, 488, 494, 525, 0,
+ DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC |
+ DRM_MODE_FLAG_INTERLACE | DRM_MODE_FLAG_DBLCLK) },
+ /* 51 - 1440x480i@120Hz */
+ { DRM_MODE("1440x480i", DRM_MODE_TYPE_DRIVER, 54000, 1440, 1478,
+ 1602, 1716, 0, 480, 488, 494, 525, 0,
+ DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC |
+ DRM_MODE_FLAG_INTERLACE | DRM_MODE_FLAG_DBLCLK) },
+ /* 52 - 720x576@200Hz */
+ { DRM_MODE("720x576", DRM_MODE_TYPE_DRIVER, 108000, 720, 732,
+ 796, 864, 0, 576, 581, 586, 625, 0,
+ DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) },
+ /* 53 - 720x576@200Hz */
+ { DRM_MODE("720x576", DRM_MODE_TYPE_DRIVER, 108000, 720, 732,
+ 796, 864, 0, 576, 581, 586, 625, 0,
+ DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) },
+ /* 54 - 1440x576i@200Hz */
+ { DRM_MODE("1440x576i", DRM_MODE_TYPE_DRIVER, 108000, 1440, 1464,
+ 1590, 1728, 0, 576, 580, 586, 625, 0,
+ DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC |
+ DRM_MODE_FLAG_INTERLACE | DRM_MODE_FLAG_DBLCLK) },
+ /* 55 - 1440x576i@200Hz */
+ { DRM_MODE("1440x576i", DRM_MODE_TYPE_DRIVER, 108000, 1440, 1464,
+ 1590, 1728, 0, 576, 580, 586, 625, 0,
+ DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC |
+ DRM_MODE_FLAG_INTERLACE | DRM_MODE_FLAG_DBLCLK) },
+ /* 56 - 720x480@240Hz */
+ { DRM_MODE("720x480", DRM_MODE_TYPE_DRIVER, 108000, 720, 736,
+ 798, 858, 0, 480, 489, 495, 525, 0,
+ DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) },
+ /* 57 - 720x480@240Hz */
+ { DRM_MODE("720x480", DRM_MODE_TYPE_DRIVER, 108000, 720, 736,
+ 798, 858, 0, 480, 489, 495, 525, 0,
+ DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) },
+ /* 58 - 1440x480i@240 */
+ { DRM_MODE("1440x480i", DRM_MODE_TYPE_DRIVER, 108000, 1440, 1478,
+ 1602, 1716, 0, 480, 488, 494, 525, 0,
+ DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC |
+ DRM_MODE_FLAG_INTERLACE | DRM_MODE_FLAG_DBLCLK) },
+ /* 59 - 1440x480i@240 */
+ { DRM_MODE("1440x480i", DRM_MODE_TYPE_DRIVER, 108000, 1440, 1478,
+ 1602, 1716, 0, 480, 488, 494, 525, 0,
+ DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC |
+ DRM_MODE_FLAG_INTERLACE | DRM_MODE_FLAG_DBLCLK) },
+ /* 60 - 1280x720@24Hz */
+ { DRM_MODE("1280x720", DRM_MODE_TYPE_DRIVER, 59400, 1280, 3040,
+ 3080, 3300, 0, 720, 725, 730, 750, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
+ /* 61 - 1280x720@25Hz */
+ { DRM_MODE("1280x720", DRM_MODE_TYPE_DRIVER, 74250, 1280, 3700,
+ 3740, 3960, 0, 720, 725, 730, 750, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
+ /* 62 - 1280x720@30Hz */
+ { DRM_MODE("1280x720", DRM_MODE_TYPE_DRIVER, 74250, 1280, 3040,
+ 3080, 3300, 0, 720, 725, 730, 750, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
+ /* 63 - 1920x1080@120Hz */
+ { DRM_MODE("1920x1080", DRM_MODE_TYPE_DRIVER, 297000, 1920, 2008,
+ 2052, 2200, 0, 1080, 1084, 1089, 1125, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
+ /* 64 - 1920x1080@100Hz */
+ { DRM_MODE("1920x1080", DRM_MODE_TYPE_DRIVER, 297000, 1920, 2448,
+ 2492, 2640, 0, 1080, 1084, 1094, 1125, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
+};
+
/*** DDC fetch and block validation ***/
static const u8 edid_header[] = {
@@ -357,10 +1094,14 @@ drm_do_get_edid(struct drm_connector *connector, struct i2c_adapter *adapter)
break;
}
}
- if (i == 4)
+
+ if (i == 4 && print_bad_edid) {
dev_warn(connector->dev->dev,
"%s: Ignoring invalid EDID block %d.\n",
drm_get_connector_name(connector), j);
+
+ connector->bad_edid_counter++;
+ }
}
if (valid_extensions != block[0x7e]) {
@@ -541,7 +1282,7 @@ struct drm_display_mode *drm_mode_find_dmt(struct drm_device *dev,
{
int i;
- for (i = 0; i < drm_num_dmt_modes; i++) {
+ for (i = 0; i < ARRAY_SIZE(drm_dmt_modes); i++) {
const struct drm_display_mode *ptr = &drm_dmt_modes[i];
if (hsize != ptr->hdisplay)
continue;
@@ -1082,7 +1823,7 @@ drm_dmt_modes_for_range(struct drm_connector *connector, struct edid *edid,
struct drm_display_mode *newmode;
struct drm_device *dev = connector->dev;
- for (i = 0; i < drm_num_dmt_modes; i++) {
+ for (i = 0; i < ARRAY_SIZE(drm_dmt_modes); i++) {
if (mode_in_range(drm_dmt_modes + i, edid, timing) &&
valid_inferred_mode(connector, drm_dmt_modes + i)) {
newmode = drm_mode_duplicate(dev, &drm_dmt_modes[i]);
@@ -1117,7 +1858,7 @@ drm_gtf_modes_for_range(struct drm_connector *connector, struct edid *edid,
struct drm_display_mode *newmode;
struct drm_device *dev = connector->dev;
- for (i = 0; i < num_extra_modes; i++) {
+ for (i = 0; i < ARRAY_SIZE(extra_modes); i++) {
const struct minimode *m = &extra_modes[i];
newmode = drm_gtf_mode(dev, m->w, m->h, m->r, 0, 0);
if (!newmode)
@@ -1146,7 +1887,7 @@ drm_cvt_modes_for_range(struct drm_connector *connector, struct edid *edid,
struct drm_device *dev = connector->dev;
bool rb = drm_monitor_supports_rb(edid);
- for (i = 0; i < num_extra_modes; i++) {
+ for (i = 0; i < ARRAY_SIZE(extra_modes); i++) {
const struct minimode *m = &extra_modes[i];
newmode = drm_cvt_mode(dev, m->w, m->h, m->r, rb, 0, 0);
if (!newmode)
@@ -1483,9 +2224,11 @@ add_detailed_modes(struct drm_connector *connector, struct edid *edid,
#define VIDEO_BLOCK 0x02
#define VENDOR_BLOCK 0x03
#define SPEAKER_BLOCK 0x04
+#define VIDEO_CAPABILITY_BLOCK 0x07
#define EDID_BASIC_AUDIO (1 << 6)
#define EDID_CEA_YCRCB444 (1 << 5)
#define EDID_CEA_YCRCB422 (1 << 4)
+#define EDID_CEA_VCDB_QS (1 << 6)
/**
* Search EDID for CEA extension block.
@@ -1513,16 +2256,19 @@ u8 *drm_find_cea_extension(struct edid *edid)
}
EXPORT_SYMBOL(drm_find_cea_extension);
-/*
- * Looks for a CEA mode matching given drm_display_mode.
- * Returns its CEA Video ID code, or 0 if not found.
+/**
+ * drm_match_cea_mode - look for a CEA mode matching given mode
+ * @to_match: display mode
+ *
+ * Returns the CEA Video ID (VIC) of the mode or 0 if it isn't a CEA-861
+ * mode.
*/
-u8 drm_match_cea_mode(struct drm_display_mode *to_match)
+u8 drm_match_cea_mode(const struct drm_display_mode *to_match)
{
struct drm_display_mode *cea_mode;
u8 mode;
- for (mode = 0; mode < drm_num_cea_modes; mode++) {
+ for (mode = 0; mode < ARRAY_SIZE(edid_cea_modes); mode++) {
cea_mode = (struct drm_display_mode *)&edid_cea_modes[mode];
if (drm_mode_equal(to_match, cea_mode))
@@ -1542,7 +2288,7 @@ do_cea_modes (struct drm_connector *connector, u8 *db, u8 len)
for (mode = db; mode < db + len; mode++) {
cea_mode = (*mode & 127) - 1; /* CEA modes are numbered 1..127 */
- if (cea_mode < drm_num_cea_modes) {
+ if (cea_mode < ARRAY_SIZE(edid_cea_modes)) {
struct drm_display_mode *newmode;
newmode = drm_mode_duplicate(dev,
&edid_cea_modes[cea_mode]);
@@ -1902,6 +2648,37 @@ end:
EXPORT_SYMBOL(drm_detect_monitor_audio);
/**
+ * drm_rgb_quant_range_selectable - is RGB quantization range selectable?
+ *
+ * Check whether the monitor reports the RGB quantization range selection
+ * as supported. The AVI infoframe can then be used to inform the monitor
+ * which quantization range (full or limited) is used.
+ */
+bool drm_rgb_quant_range_selectable(struct edid *edid)
+{
+ u8 *edid_ext;
+ int i, start, end;
+
+ edid_ext = drm_find_cea_extension(edid);
+ if (!edid_ext)
+ return false;
+
+ if (cea_db_offsets(edid_ext, &start, &end))
+ return false;
+
+ for_each_cea_db(edid_ext, i, start, end) {
+ if (cea_db_tag(&edid_ext[i]) == VIDEO_CAPABILITY_BLOCK &&
+ cea_db_payload_len(&edid_ext[i]) == 2) {
+ DRM_DEBUG_KMS("CEA VCDB 0x%02x\n", edid_ext[i + 2]);
+ return edid_ext[i + 2] & EDID_CEA_VCDB_QS;
+ }
+ }
+
+ return false;
+}
+EXPORT_SYMBOL(drm_rgb_quant_range_selectable);
+
+/**
* drm_add_display_info - pull display info out if present
* @edid: EDID data
* @info: display info (attached to connector)
@@ -2020,7 +2797,8 @@ int drm_add_edid_modes(struct drm_connector *connector, struct edid *edid)
num_modes += add_cvt_modes(connector, edid);
num_modes += add_standard_modes(connector, edid);
num_modes += add_established_modes(connector, edid);
- num_modes += add_inferred_modes(connector, edid);
+ if (edid->features & DRM_EDID_FEATURE_DEFAULT_GTF)
+ num_modes += add_inferred_modes(connector, edid);
num_modes += add_cea_modes(connector, edid);
if (quirks & (EDID_QUIRK_PREFER_LARGE_60 | EDID_QUIRK_PREFER_LARGE_75))
@@ -2081,20 +2859,33 @@ int drm_add_modes_noedid(struct drm_connector *connector,
EXPORT_SYMBOL(drm_add_modes_noedid);
/**
- * drm_mode_cea_vic - return the CEA-861 VIC of a given mode
- * @mode: mode
+ * drm_hdmi_avi_infoframe_from_display_mode() - fill an HDMI AVI infoframe with
+ * data from a DRM display mode
+ * @frame: HDMI AVI infoframe
+ * @mode: DRM display mode
*
- * RETURNS:
- * The VIC number, 0 in case it's not a CEA-861 mode.
+ * Returns 0 on success or a negative error code on failure.
*/
-uint8_t drm_mode_cea_vic(const struct drm_display_mode *mode)
+int
+drm_hdmi_avi_infoframe_from_display_mode(struct hdmi_avi_infoframe *frame,
+ const struct drm_display_mode *mode)
{
- uint8_t i;
+ int err;
+
+ if (!frame || !mode)
+ return -EINVAL;
+
+ err = hdmi_avi_infoframe_init(frame);
+ if (err < 0)
+ return err;
+
+ frame->video_code = drm_match_cea_mode(mode);
+ if (!frame->video_code)
+ return 0;
- for (i = 0; i < drm_num_cea_modes; i++)
- if (drm_mode_equal(mode, &edid_cea_modes[i]))
- return i + 1;
+ frame->picture_aspect = HDMI_PICTURE_ASPECT_NONE;
+ frame->active_aspect = HDMI_ACTIVE_ASPECT_PICTURE;
return 0;
}
-EXPORT_SYMBOL(drm_mode_cea_vic);
+EXPORT_SYMBOL(drm_hdmi_avi_infoframe_from_display_mode);
diff --git a/drivers/gpu/drm/drm_edid_modes.h b/drivers/gpu/drm/drm_edid_modes.h
deleted file mode 100644
index 5dbf7d2557b4..000000000000
--- a/drivers/gpu/drm/drm_edid_modes.h
+++ /dev/null
@@ -1,774 +0,0 @@
-/*
- * Copyright (c) 2007-2008 Intel Corporation
- * Jesse Barnes <jesse.barnes@intel.com>
- * Copyright 2010 Red Hat, Inc.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sub license,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice (including the
- * next paragraph) shall be included in all copies or substantial portions
- * of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
- * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
- * DEALINGS IN THE SOFTWARE.
- */
-
-#include <linux/kernel.h>
-#include <drm/drmP.h>
-#include <drm/drm_edid.h>
-
-/*
- * Autogenerated from the DMT spec.
- * This table is copied from xfree86/modes/xf86EdidModes.c.
- */
-static const struct drm_display_mode drm_dmt_modes[] = {
- /* 640x350@85Hz */
- { DRM_MODE("640x350", DRM_MODE_TYPE_DRIVER, 31500, 640, 672,
- 736, 832, 0, 350, 382, 385, 445, 0,
- DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NVSYNC) },
- /* 640x400@85Hz */
- { DRM_MODE("640x400", DRM_MODE_TYPE_DRIVER, 31500, 640, 672,
- 736, 832, 0, 400, 401, 404, 445, 0,
- DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
- /* 720x400@85Hz */
- { DRM_MODE("720x400", DRM_MODE_TYPE_DRIVER, 35500, 720, 756,
- 828, 936, 0, 400, 401, 404, 446, 0,
- DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
- /* 640x480@60Hz */
- { DRM_MODE("640x480", DRM_MODE_TYPE_DRIVER, 25175, 640, 656,
- 752, 800, 0, 480, 489, 492, 525, 0,
- DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) },
- /* 640x480@72Hz */
- { DRM_MODE("640x480", DRM_MODE_TYPE_DRIVER, 31500, 640, 664,
- 704, 832, 0, 480, 489, 492, 520, 0,
- DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) },
- /* 640x480@75Hz */
- { DRM_MODE("640x480", DRM_MODE_TYPE_DRIVER, 31500, 640, 656,
- 720, 840, 0, 480, 481, 484, 500, 0,
- DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) },
- /* 640x480@85Hz */
- { DRM_MODE("640x480", DRM_MODE_TYPE_DRIVER, 36000, 640, 696,
- 752, 832, 0, 480, 481, 484, 509, 0,
- DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) },
- /* 800x600@56Hz */
- { DRM_MODE("800x600", DRM_MODE_TYPE_DRIVER, 36000, 800, 824,
- 896, 1024, 0, 600, 601, 603, 625, 0,
- DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
- /* 800x600@60Hz */
- { DRM_MODE("800x600", DRM_MODE_TYPE_DRIVER, 40000, 800, 840,
- 968, 1056, 0, 600, 601, 605, 628, 0,
- DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
- /* 800x600@72Hz */
- { DRM_MODE("800x600", DRM_MODE_TYPE_DRIVER, 50000, 800, 856,
- 976, 1040, 0, 600, 637, 643, 666, 0,
- DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
- /* 800x600@75Hz */
- { DRM_MODE("800x600", DRM_MODE_TYPE_DRIVER, 49500, 800, 816,
- 896, 1056, 0, 600, 601, 604, 625, 0,
- DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
- /* 800x600@85Hz */
- { DRM_MODE("800x600", DRM_MODE_TYPE_DRIVER, 56250, 800, 832,
- 896, 1048, 0, 600, 601, 604, 631, 0,
- DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
- /* 800x600@120Hz RB */
- { DRM_MODE("800x600", DRM_MODE_TYPE_DRIVER, 73250, 800, 848,
- 880, 960, 0, 600, 603, 607, 636, 0,
- DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NVSYNC) },
- /* 848x480@60Hz */
- { DRM_MODE("848x480", DRM_MODE_TYPE_DRIVER, 33750, 848, 864,
- 976, 1088, 0, 480, 486, 494, 517, 0,
- DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
- /* 1024x768@43Hz, interlace */
- { DRM_MODE("1024x768i", DRM_MODE_TYPE_DRIVER, 44900, 1024, 1032,
- 1208, 1264, 0, 768, 768, 772, 817, 0,
- DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC |
- DRM_MODE_FLAG_INTERLACE) },
- /* 1024x768@60Hz */
- { DRM_MODE("1024x768", DRM_MODE_TYPE_DRIVER, 65000, 1024, 1048,
- 1184, 1344, 0, 768, 771, 777, 806, 0,
- DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) },
- /* 1024x768@70Hz */
- { DRM_MODE("1024x768", DRM_MODE_TYPE_DRIVER, 75000, 1024, 1048,
- 1184, 1328, 0, 768, 771, 777, 806, 0,
- DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) },
- /* 1024x768@75Hz */
- { DRM_MODE("1024x768", DRM_MODE_TYPE_DRIVER, 78750, 1024, 1040,
- 1136, 1312, 0, 768, 769, 772, 800, 0,
- DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
- /* 1024x768@85Hz */
- { DRM_MODE("1024x768", DRM_MODE_TYPE_DRIVER, 94500, 1024, 1072,
- 1168, 1376, 0, 768, 769, 772, 808, 0,
- DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
- /* 1024x768@120Hz RB */
- { DRM_MODE("1024x768", DRM_MODE_TYPE_DRIVER, 115500, 1024, 1072,
- 1104, 1184, 0, 768, 771, 775, 813, 0,
- DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NVSYNC) },
- /* 1152x864@75Hz */
- { DRM_MODE("1152x864", DRM_MODE_TYPE_DRIVER, 108000, 1152, 1216,
- 1344, 1600, 0, 864, 865, 868, 900, 0,
- DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
- /* 1280x768@60Hz RB */
- { DRM_MODE("1280x768", DRM_MODE_TYPE_DRIVER, 68250, 1280, 1328,
- 1360, 1440, 0, 768, 771, 778, 790, 0,
- DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NVSYNC) },
- /* 1280x768@60Hz */
- { DRM_MODE("1280x768", DRM_MODE_TYPE_DRIVER, 79500, 1280, 1344,
- 1472, 1664, 0, 768, 771, 778, 798, 0,
- DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
- /* 1280x768@75Hz */
- { DRM_MODE("1280x768", DRM_MODE_TYPE_DRIVER, 102250, 1280, 1360,
- 1488, 1696, 0, 768, 771, 778, 805, 0,
- DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NVSYNC) },
- /* 1280x768@85Hz */
- { DRM_MODE("1280x768", DRM_MODE_TYPE_DRIVER, 117500, 1280, 1360,
- 1496, 1712, 0, 768, 771, 778, 809, 0,
- DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
- /* 1280x768@120Hz RB */
- { DRM_MODE("1280x768", DRM_MODE_TYPE_DRIVER, 140250, 1280, 1328,
- 1360, 1440, 0, 768, 771, 778, 813, 0,
- DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NVSYNC) },
- /* 1280x800@60Hz RB */
- { DRM_MODE("1280x800", DRM_MODE_TYPE_DRIVER, 71000, 1280, 1328,
- 1360, 1440, 0, 800, 803, 809, 823, 0,
- DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NVSYNC) },
- /* 1280x800@60Hz */
- { DRM_MODE("1280x800", DRM_MODE_TYPE_DRIVER, 83500, 1280, 1352,
- 1480, 1680, 0, 800, 803, 809, 831, 0,
- DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NVSYNC) },
- /* 1280x800@75Hz */
- { DRM_MODE("1280x800", DRM_MODE_TYPE_DRIVER, 106500, 1280, 1360,
- 1488, 1696, 0, 800, 803, 809, 838, 0,
- DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
- /* 1280x800@85Hz */
- { DRM_MODE("1280x800", DRM_MODE_TYPE_DRIVER, 122500, 1280, 1360,
- 1496, 1712, 0, 800, 803, 809, 843, 0,
- DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
- /* 1280x800@120Hz RB */
- { DRM_MODE("1280x800", DRM_MODE_TYPE_DRIVER, 146250, 1280, 1328,
- 1360, 1440, 0, 800, 803, 809, 847, 0,
- DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NVSYNC) },
- /* 1280x960@60Hz */
- { DRM_MODE("1280x960", DRM_MODE_TYPE_DRIVER, 108000, 1280, 1376,
- 1488, 1800, 0, 960, 961, 964, 1000, 0,
- DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
- /* 1280x960@85Hz */
- { DRM_MODE("1280x960", DRM_MODE_TYPE_DRIVER, 148500, 1280, 1344,
- 1504, 1728, 0, 960, 961, 964, 1011, 0,
- DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
- /* 1280x960@120Hz RB */
- { DRM_MODE("1280x960", DRM_MODE_TYPE_DRIVER, 175500, 1280, 1328,
- 1360, 1440, 0, 960, 963, 967, 1017, 0,
- DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NVSYNC) },
- /* 1280x1024@60Hz */
- { DRM_MODE("1280x1024", DRM_MODE_TYPE_DRIVER, 108000, 1280, 1328,
- 1440, 1688, 0, 1024, 1025, 1028, 1066, 0,
- DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
- /* 1280x1024@75Hz */
- { DRM_MODE("1280x1024", DRM_MODE_TYPE_DRIVER, 135000, 1280, 1296,
- 1440, 1688, 0, 1024, 1025, 1028, 1066, 0,
- DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
- /* 1280x1024@85Hz */
- { DRM_MODE("1280x1024", DRM_MODE_TYPE_DRIVER, 157500, 1280, 1344,
- 1504, 1728, 0, 1024, 1025, 1028, 1072, 0,
- DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
- /* 1280x1024@120Hz RB */
- { DRM_MODE("1280x1024", DRM_MODE_TYPE_DRIVER, 187250, 1280, 1328,
- 1360, 1440, 0, 1024, 1027, 1034, 1084, 0,
- DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NVSYNC) },
- /* 1360x768@60Hz */
- { DRM_MODE("1360x768", DRM_MODE_TYPE_DRIVER, 85500, 1360, 1424,
- 1536, 1792, 0, 768, 771, 777, 795, 0,
- DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
- /* 1360x768@120Hz RB */
- { DRM_MODE("1360x768", DRM_MODE_TYPE_DRIVER, 148250, 1360, 1408,
- 1440, 1520, 0, 768, 771, 776, 813, 0,
- DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NVSYNC) },
- /* 1400x1050@60Hz RB */
- { DRM_MODE("1400x1050", DRM_MODE_TYPE_DRIVER, 101000, 1400, 1448,
- 1480, 1560, 0, 1050, 1053, 1057, 1080, 0,
- DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NVSYNC) },
- /* 1400x1050@60Hz */
- { DRM_MODE("1400x1050", DRM_MODE_TYPE_DRIVER, 121750, 1400, 1488,
- 1632, 1864, 0, 1050, 1053, 1057, 1089, 0,
- DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
- /* 1400x1050@75Hz */
- { DRM_MODE("1400x1050", DRM_MODE_TYPE_DRIVER, 156000, 1400, 1504,
- 1648, 1896, 0, 1050, 1053, 1057, 1099, 0,
- DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
- /* 1400x1050@85Hz */
- { DRM_MODE("1400x1050", DRM_MODE_TYPE_DRIVER, 179500, 1400, 1504,
- 1656, 1912, 0, 1050, 1053, 1057, 1105, 0,
- DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
- /* 1400x1050@120Hz RB */
- { DRM_MODE("1400x1050", DRM_MODE_TYPE_DRIVER, 208000, 1400, 1448,
- 1480, 1560, 0, 1050, 1053, 1057, 1112, 0,
- DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NVSYNC) },
- /* 1440x900@60Hz RB */
- { DRM_MODE("1440x900", DRM_MODE_TYPE_DRIVER, 88750, 1440, 1488,
- 1520, 1600, 0, 900, 903, 909, 926, 0,
- DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NVSYNC) },
- /* 1440x900@60Hz */
- { DRM_MODE("1440x900", DRM_MODE_TYPE_DRIVER, 106500, 1440, 1520,
- 1672, 1904, 0, 900, 903, 909, 934, 0,
- DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
- /* 1440x900@75Hz */
- { DRM_MODE("1440x900", DRM_MODE_TYPE_DRIVER, 136750, 1440, 1536,
- 1688, 1936, 0, 900, 903, 909, 942, 0,
- DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
- /* 1440x900@85Hz */
- { DRM_MODE("1440x900", DRM_MODE_TYPE_DRIVER, 157000, 1440, 1544,
- 1696, 1952, 0, 900, 903, 909, 948, 0,
- DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
- /* 1440x900@120Hz RB */
- { DRM_MODE("1440x900", DRM_MODE_TYPE_DRIVER, 182750, 1440, 1488,
- 1520, 1600, 0, 900, 903, 909, 953, 0,
- DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NVSYNC) },
- /* 1600x1200@60Hz */
- { DRM_MODE("1600x1200", DRM_MODE_TYPE_DRIVER, 162000, 1600, 1664,
- 1856, 2160, 0, 1200, 1201, 1204, 1250, 0,
- DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
- /* 1600x1200@65Hz */
- { DRM_MODE("1600x1200", DRM_MODE_TYPE_DRIVER, 175500, 1600, 1664,
- 1856, 2160, 0, 1200, 1201, 1204, 1250, 0,
- DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
- /* 1600x1200@70Hz */
- { DRM_MODE("1600x1200", DRM_MODE_TYPE_DRIVER, 189000, 1600, 1664,
- 1856, 2160, 0, 1200, 1201, 1204, 1250, 0,
- DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
- /* 1600x1200@75Hz */
- { DRM_MODE("1600x1200", DRM_MODE_TYPE_DRIVER, 202500, 1600, 1664,
- 1856, 2160, 0, 1200, 1201, 1204, 1250, 0,
- DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
- /* 1600x1200@85Hz */
- { DRM_MODE("1600x1200", DRM_MODE_TYPE_DRIVER, 229500, 1600, 1664,
- 1856, 2160, 0, 1200, 1201, 1204, 1250, 0,
- DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
- /* 1600x1200@120Hz RB */
- { DRM_MODE("1600x1200", DRM_MODE_TYPE_DRIVER, 268250, 1600, 1648,
- 1680, 1760, 0, 1200, 1203, 1207, 1271, 0,
- DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NVSYNC) },
- /* 1680x1050@60Hz RB */
- { DRM_MODE("1680x1050", DRM_MODE_TYPE_DRIVER, 119000, 1680, 1728,
- 1760, 1840, 0, 1050, 1053, 1059, 1080, 0,
- DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NVSYNC) },
- /* 1680x1050@60Hz */
- { DRM_MODE("1680x1050", DRM_MODE_TYPE_DRIVER, 146250, 1680, 1784,
- 1960, 2240, 0, 1050, 1053, 1059, 1089, 0,
- DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
- /* 1680x1050@75Hz */
- { DRM_MODE("1680x1050", DRM_MODE_TYPE_DRIVER, 187000, 1680, 1800,
- 1976, 2272, 0, 1050, 1053, 1059, 1099, 0,
- DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
- /* 1680x1050@85Hz */
- { DRM_MODE("1680x1050", DRM_MODE_TYPE_DRIVER, 214750, 1680, 1808,
- 1984, 2288, 0, 1050, 1053, 1059, 1105, 0,
- DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
- /* 1680x1050@120Hz RB */
- { DRM_MODE("1680x1050", DRM_MODE_TYPE_DRIVER, 245500, 1680, 1728,
- 1760, 1840, 0, 1050, 1053, 1059, 1112, 0,
- DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NVSYNC) },
- /* 1792x1344@60Hz */
- { DRM_MODE("1792x1344", DRM_MODE_TYPE_DRIVER, 204750, 1792, 1920,
- 2120, 2448, 0, 1344, 1345, 1348, 1394, 0,
- DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
- /* 1792x1344@75Hz */
- { DRM_MODE("1792x1344", DRM_MODE_TYPE_DRIVER, 261000, 1792, 1888,
- 2104, 2456, 0, 1344, 1345, 1348, 1417, 0,
- DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
- /* 1792x1344@120Hz RB */
- { DRM_MODE("1792x1344", DRM_MODE_TYPE_DRIVER, 333250, 1792, 1840,
- 1872, 1952, 0, 1344, 1347, 1351, 1423, 0,
- DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NVSYNC) },
- /* 1856x1392@60Hz */
- { DRM_MODE("1856x1392", DRM_MODE_TYPE_DRIVER, 218250, 1856, 1952,
- 2176, 2528, 0, 1392, 1393, 1396, 1439, 0,
- DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
- /* 1856x1392@75Hz */
- { DRM_MODE("1856x1392", DRM_MODE_TYPE_DRIVER, 288000, 1856, 1984,
- 2208, 2560, 0, 1392, 1395, 1399, 1500, 0,
- DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
- /* 1856x1392@120Hz RB */
- { DRM_MODE("1856x1392", DRM_MODE_TYPE_DRIVER, 356500, 1856, 1904,
- 1936, 2016, 0, 1392, 1395, 1399, 1474, 0,
- DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NVSYNC) },
- /* 1920x1200@60Hz RB */
- { DRM_MODE("1920x1200", DRM_MODE_TYPE_DRIVER, 154000, 1920, 1968,
- 2000, 2080, 0, 1200, 1203, 1209, 1235, 0,
- DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NVSYNC) },
- /* 1920x1200@60Hz */
- { DRM_MODE("1920x1200", DRM_MODE_TYPE_DRIVER, 193250, 1920, 2056,
- 2256, 2592, 0, 1200, 1203, 1209, 1245, 0,
- DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
- /* 1920x1200@75Hz */
- { DRM_MODE("1920x1200", DRM_MODE_TYPE_DRIVER, 245250, 1920, 2056,
- 2264, 2608, 0, 1200, 1203, 1209, 1255, 0,
- DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
- /* 1920x1200@85Hz */
- { DRM_MODE("1920x1200", DRM_MODE_TYPE_DRIVER, 281250, 1920, 2064,
- 2272, 2624, 0, 1200, 1203, 1209, 1262, 0,
- DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
- /* 1920x1200@120Hz RB */
- { DRM_MODE("1920x1200", DRM_MODE_TYPE_DRIVER, 317000, 1920, 1968,
- 2000, 2080, 0, 1200, 1203, 1209, 1271, 0,
- DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NVSYNC) },
- /* 1920x1440@60Hz */
- { DRM_MODE("1920x1440", DRM_MODE_TYPE_DRIVER, 234000, 1920, 2048,
- 2256, 2600, 0, 1440, 1441, 1444, 1500, 0,
- DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
- /* 1920x1440@75Hz */
- { DRM_MODE("1920x1440", DRM_MODE_TYPE_DRIVER, 297000, 1920, 2064,
- 2288, 2640, 0, 1440, 1441, 1444, 1500, 0,
- DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
- /* 1920x1440@120Hz RB */
- { DRM_MODE("1920x1440", DRM_MODE_TYPE_DRIVER, 380500, 1920, 1968,
- 2000, 2080, 0, 1440, 1443, 1447, 1525, 0,
- DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NVSYNC) },
- /* 2560x1600@60Hz RB */
- { DRM_MODE("2560x1600", DRM_MODE_TYPE_DRIVER, 268500, 2560, 2608,
- 2640, 2720, 0, 1600, 1603, 1609, 1646, 0,
- DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NVSYNC) },
- /* 2560x1600@60Hz */
- { DRM_MODE("2560x1600", DRM_MODE_TYPE_DRIVER, 348500, 2560, 2752,
- 3032, 3504, 0, 1600, 1603, 1609, 1658, 0,
- DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
- /* 2560x1600@75HZ */
- { DRM_MODE("2560x1600", DRM_MODE_TYPE_DRIVER, 443250, 2560, 2768,
- 3048, 3536, 0, 1600, 1603, 1609, 1672, 0,
- DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
- /* 2560x1600@85HZ */
- { DRM_MODE("2560x1600", DRM_MODE_TYPE_DRIVER, 505250, 2560, 2768,
- 3048, 3536, 0, 1600, 1603, 1609, 1682, 0,
- DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
- /* 2560x1600@120Hz RB */
- { DRM_MODE("2560x1600", DRM_MODE_TYPE_DRIVER, 552750, 2560, 2608,
- 2640, 2720, 0, 1600, 1603, 1609, 1694, 0,
- DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NVSYNC) },
-
-};
-static const int drm_num_dmt_modes =
- sizeof(drm_dmt_modes) / sizeof(struct drm_display_mode);
-
-static const struct drm_display_mode edid_est_modes[] = {
- { DRM_MODE("800x600", DRM_MODE_TYPE_DRIVER, 40000, 800, 840,
- 968, 1056, 0, 600, 601, 605, 628, 0,
- DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) }, /* 800x600@60Hz */
- { DRM_MODE("800x600", DRM_MODE_TYPE_DRIVER, 36000, 800, 824,
- 896, 1024, 0, 600, 601, 603, 625, 0,
- DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) }, /* 800x600@56Hz */
- { DRM_MODE("640x480", DRM_MODE_TYPE_DRIVER, 31500, 640, 656,
- 720, 840, 0, 480, 481, 484, 500, 0,
- DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) }, /* 640x480@75Hz */
- { DRM_MODE("640x480", DRM_MODE_TYPE_DRIVER, 31500, 640, 664,
- 704, 832, 0, 480, 489, 491, 520, 0,
- DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) }, /* 640x480@72Hz */
- { DRM_MODE("640x480", DRM_MODE_TYPE_DRIVER, 30240, 640, 704,
- 768, 864, 0, 480, 483, 486, 525, 0,
- DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) }, /* 640x480@67Hz */
- { DRM_MODE("640x480", DRM_MODE_TYPE_DRIVER, 25200, 640, 656,
- 752, 800, 0, 480, 490, 492, 525, 0,
- DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) }, /* 640x480@60Hz */
- { DRM_MODE("720x400", DRM_MODE_TYPE_DRIVER, 35500, 720, 738,
- 846, 900, 0, 400, 421, 423, 449, 0,
- DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) }, /* 720x400@88Hz */
- { DRM_MODE("720x400", DRM_MODE_TYPE_DRIVER, 28320, 720, 738,
- 846, 900, 0, 400, 412, 414, 449, 0,
- DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) }, /* 720x400@70Hz */
- { DRM_MODE("1280x1024", DRM_MODE_TYPE_DRIVER, 135000, 1280, 1296,
- 1440, 1688, 0, 1024, 1025, 1028, 1066, 0,
- DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) }, /* 1280x1024@75Hz */
- { DRM_MODE("1024x768", DRM_MODE_TYPE_DRIVER, 78800, 1024, 1040,
- 1136, 1312, 0, 768, 769, 772, 800, 0,
- DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) }, /* 1024x768@75Hz */
- { DRM_MODE("1024x768", DRM_MODE_TYPE_DRIVER, 75000, 1024, 1048,
- 1184, 1328, 0, 768, 771, 777, 806, 0,
- DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) }, /* 1024x768@70Hz */
- { DRM_MODE("1024x768", DRM_MODE_TYPE_DRIVER, 65000, 1024, 1048,
- 1184, 1344, 0, 768, 771, 777, 806, 0,
- DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) }, /* 1024x768@60Hz */
- { DRM_MODE("1024x768i", DRM_MODE_TYPE_DRIVER,44900, 1024, 1032,
- 1208, 1264, 0, 768, 768, 776, 817, 0,
- DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC | DRM_MODE_FLAG_INTERLACE) }, /* 1024x768@43Hz */
- { DRM_MODE("832x624", DRM_MODE_TYPE_DRIVER, 57284, 832, 864,
- 928, 1152, 0, 624, 625, 628, 667, 0,
- DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) }, /* 832x624@75Hz */
- { DRM_MODE("800x600", DRM_MODE_TYPE_DRIVER, 49500, 800, 816,
- 896, 1056, 0, 600, 601, 604, 625, 0,
- DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) }, /* 800x600@75Hz */
- { DRM_MODE("800x600", DRM_MODE_TYPE_DRIVER, 50000, 800, 856,
- 976, 1040, 0, 600, 637, 643, 666, 0,
- DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) }, /* 800x600@72Hz */
- { DRM_MODE("1152x864", DRM_MODE_TYPE_DRIVER, 108000, 1152, 1216,
- 1344, 1600, 0, 864, 865, 868, 900, 0,
- DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) }, /* 1152x864@75Hz */
-};
-
-struct minimode {
- short w;
- short h;
- short r;
- short rb;
-};
-
-static const struct minimode est3_modes[] = {
- /* byte 6 */
- { 640, 350, 85, 0 },
- { 640, 400, 85, 0 },
- { 720, 400, 85, 0 },
- { 640, 480, 85, 0 },
- { 848, 480, 60, 0 },
- { 800, 600, 85, 0 },
- { 1024, 768, 85, 0 },
- { 1152, 864, 75, 0 },
- /* byte 7 */
- { 1280, 768, 60, 1 },
- { 1280, 768, 60, 0 },
- { 1280, 768, 75, 0 },
- { 1280, 768, 85, 0 },
- { 1280, 960, 60, 0 },
- { 1280, 960, 85, 0 },
- { 1280, 1024, 60, 0 },
- { 1280, 1024, 85, 0 },
- /* byte 8 */
- { 1360, 768, 60, 0 },
- { 1440, 900, 60, 1 },
- { 1440, 900, 60, 0 },
- { 1440, 900, 75, 0 },
- { 1440, 900, 85, 0 },
- { 1400, 1050, 60, 1 },
- { 1400, 1050, 60, 0 },
- { 1400, 1050, 75, 0 },
- /* byte 9 */
- { 1400, 1050, 85, 0 },
- { 1680, 1050, 60, 1 },
- { 1680, 1050, 60, 0 },
- { 1680, 1050, 75, 0 },
- { 1680, 1050, 85, 0 },
- { 1600, 1200, 60, 0 },
- { 1600, 1200, 65, 0 },
- { 1600, 1200, 70, 0 },
- /* byte 10 */
- { 1600, 1200, 75, 0 },
- { 1600, 1200, 85, 0 },
- { 1792, 1344, 60, 0 },
- { 1792, 1344, 85, 0 },
- { 1856, 1392, 60, 0 },
- { 1856, 1392, 75, 0 },
- { 1920, 1200, 60, 1 },
- { 1920, 1200, 60, 0 },
- /* byte 11 */
- { 1920, 1200, 75, 0 },
- { 1920, 1200, 85, 0 },
- { 1920, 1440, 60, 0 },
- { 1920, 1440, 75, 0 },
-};
-static const int num_est3_modes = ARRAY_SIZE(est3_modes);
-
-static const struct minimode extra_modes[] = {
- { 1024, 576, 60, 0 },
- { 1366, 768, 60, 0 },
- { 1600, 900, 60, 0 },
- { 1680, 945, 60, 0 },
- { 1920, 1080, 60, 0 },
- { 2048, 1152, 60, 0 },
- { 2048, 1536, 60, 0 },
-};
-static const int num_extra_modes = ARRAY_SIZE(extra_modes);
-
-/*
- * Probably taken from CEA-861 spec.
- * This table is converted from xorg's hw/xfree86/modes/xf86EdidModes.c.
- */
-static const struct drm_display_mode edid_cea_modes[] = {
- /* 1 - 640x480@60Hz */
- { DRM_MODE("640x480", DRM_MODE_TYPE_DRIVER, 25175, 640, 656,
- 752, 800, 0, 480, 490, 492, 525, 0,
- DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) },
- /* 2 - 720x480@60Hz */
- { DRM_MODE("720x480", DRM_MODE_TYPE_DRIVER, 27000, 720, 736,
- 798, 858, 0, 480, 489, 495, 525, 0,
- DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) },
- /* 3 - 720x480@60Hz */
- { DRM_MODE("720x480", DRM_MODE_TYPE_DRIVER, 27000, 720, 736,
- 798, 858, 0, 480, 489, 495, 525, 0,
- DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) },
- /* 4 - 1280x720@60Hz */
- { DRM_MODE("1280x720", DRM_MODE_TYPE_DRIVER, 74250, 1280, 1390,
- 1430, 1650, 0, 720, 725, 730, 750, 0,
- DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
- /* 5 - 1920x1080i@60Hz */
- { DRM_MODE("1920x1080i", DRM_MODE_TYPE_DRIVER, 74250, 1920, 2008,
- 2052, 2200, 0, 1080, 1084, 1094, 1125, 0,
- DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC |
- DRM_MODE_FLAG_INTERLACE) },
- /* 6 - 1440x480i@60Hz */
- { DRM_MODE("1440x480i", DRM_MODE_TYPE_DRIVER, 27000, 1440, 1478,
- 1602, 1716, 0, 480, 488, 494, 525, 0,
- DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC |
- DRM_MODE_FLAG_INTERLACE | DRM_MODE_FLAG_DBLCLK) },
- /* 7 - 1440x480i@60Hz */
- { DRM_MODE("1440x480i", DRM_MODE_TYPE_DRIVER, 27000, 1440, 1478,
- 1602, 1716, 0, 480, 488, 494, 525, 0,
- DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC |
- DRM_MODE_FLAG_INTERLACE | DRM_MODE_FLAG_DBLCLK) },
- /* 8 - 1440x240@60Hz */
- { DRM_MODE("1440x240", DRM_MODE_TYPE_DRIVER, 27000, 1440, 1478,
- 1602, 1716, 0, 240, 244, 247, 262, 0,
- DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC |
- DRM_MODE_FLAG_DBLCLK) },
- /* 9 - 1440x240@60Hz */
- { DRM_MODE("1440x240", DRM_MODE_TYPE_DRIVER, 27000, 1440, 1478,
- 1602, 1716, 0, 240, 244, 247, 262, 0,
- DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC |
- DRM_MODE_FLAG_DBLCLK) },
- /* 10 - 2880x480i@60Hz */
- { DRM_MODE("2880x480i", DRM_MODE_TYPE_DRIVER, 54000, 2880, 2956,
- 3204, 3432, 0, 480, 488, 494, 525, 0,
- DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC |
- DRM_MODE_FLAG_INTERLACE) },
- /* 11 - 2880x480i@60Hz */
- { DRM_MODE("2880x480i", DRM_MODE_TYPE_DRIVER, 54000, 2880, 2956,
- 3204, 3432, 0, 480, 488, 494, 525, 0,
- DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC |
- DRM_MODE_FLAG_INTERLACE) },
- /* 12 - 2880x240@60Hz */
- { DRM_MODE("2880x240", DRM_MODE_TYPE_DRIVER, 54000, 2880, 2956,
- 3204, 3432, 0, 240, 244, 247, 262, 0,
- DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) },
- /* 13 - 2880x240@60Hz */
- { DRM_MODE("2880x240", DRM_MODE_TYPE_DRIVER, 54000, 2880, 2956,
- 3204, 3432, 0, 240, 244, 247, 262, 0,
- DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) },
- /* 14 - 1440x480@60Hz */
- { DRM_MODE("1440x480", DRM_MODE_TYPE_DRIVER, 54000, 1440, 1472,
- 1596, 1716, 0, 480, 489, 495, 525, 0,
- DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) },
- /* 15 - 1440x480@60Hz */
- { DRM_MODE("1440x480", DRM_MODE_TYPE_DRIVER, 54000, 1440, 1472,
- 1596, 1716, 0, 480, 489, 495, 525, 0,
- DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) },
- /* 16 - 1920x1080@60Hz */
- { DRM_MODE("1920x1080", DRM_MODE_TYPE_DRIVER, 148500, 1920, 2008,
- 2052, 2200, 0, 1080, 1084, 1089, 1125, 0,
- DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
- /* 17 - 720x576@50Hz */
- { DRM_MODE("720x576", DRM_MODE_TYPE_DRIVER, 27000, 720, 732,
- 796, 864, 0, 576, 581, 586, 625, 0,
- DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) },
- /* 18 - 720x576@50Hz */
- { DRM_MODE("720x576", DRM_MODE_TYPE_DRIVER, 27000, 720, 732,
- 796, 864, 0, 576, 581, 586, 625, 0,
- DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) },
- /* 19 - 1280x720@50Hz */
- { DRM_MODE("1280x720", DRM_MODE_TYPE_DRIVER, 74250, 1280, 1720,
- 1760, 1980, 0, 720, 725, 730, 750, 0,
- DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
- /* 20 - 1920x1080i@50Hz */
- { DRM_MODE("1920x1080i", DRM_MODE_TYPE_DRIVER, 74250, 1920, 2448,
- 2492, 2640, 0, 1080, 1084, 1094, 1125, 0,
- DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC |
- DRM_MODE_FLAG_INTERLACE) },
- /* 21 - 1440x576i@50Hz */
- { DRM_MODE("1440x576i", DRM_MODE_TYPE_DRIVER, 27000, 1440, 1464,
- 1590, 1728, 0, 576, 580, 586, 625, 0,
- DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC |
- DRM_MODE_FLAG_INTERLACE | DRM_MODE_FLAG_DBLCLK) },
- /* 22 - 1440x576i@50Hz */
- { DRM_MODE("1440x576i", DRM_MODE_TYPE_DRIVER, 27000, 1440, 1464,
- 1590, 1728, 0, 576, 580, 586, 625, 0,
- DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC |
- DRM_MODE_FLAG_INTERLACE | DRM_MODE_FLAG_DBLCLK) },
- /* 23 - 1440x288@50Hz */
- { DRM_MODE("1440x288", DRM_MODE_TYPE_DRIVER, 27000, 1440, 1464,
- 1590, 1728, 0, 288, 290, 293, 312, 0,
- DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC |
- DRM_MODE_FLAG_DBLCLK) },
- /* 24 - 1440x288@50Hz */
- { DRM_MODE("1440x288", DRM_MODE_TYPE_DRIVER, 27000, 1440, 1464,
- 1590, 1728, 0, 288, 290, 293, 312, 0,
- DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC |
- DRM_MODE_FLAG_DBLCLK) },
- /* 25 - 2880x576i@50Hz */
- { DRM_MODE("2880x576i", DRM_MODE_TYPE_DRIVER, 54000, 2880, 2928,
- 3180, 3456, 0, 576, 580, 586, 625, 0,
- DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC |
- DRM_MODE_FLAG_INTERLACE) },
- /* 26 - 2880x576i@50Hz */
- { DRM_MODE("2880x576i", DRM_MODE_TYPE_DRIVER, 54000, 2880, 2928,
- 3180, 3456, 0, 576, 580, 586, 625, 0,
- DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC |
- DRM_MODE_FLAG_INTERLACE) },
- /* 27 - 2880x288@50Hz */
- { DRM_MODE("2880x288", DRM_MODE_TYPE_DRIVER, 54000, 2880, 2928,
- 3180, 3456, 0, 288, 290, 293, 312, 0,
- DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) },
- /* 28 - 2880x288@50Hz */
- { DRM_MODE("2880x288", DRM_MODE_TYPE_DRIVER, 54000, 2880, 2928,
- 3180, 3456, 0, 288, 290, 293, 312, 0,
- DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) },
- /* 29 - 1440x576@50Hz */
- { DRM_MODE("1440x576", DRM_MODE_TYPE_DRIVER, 54000, 1440, 1464,
- 1592, 1728, 0, 576, 581, 586, 625, 0,
- DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) },
- /* 30 - 1440x576@50Hz */
- { DRM_MODE("1440x576", DRM_MODE_TYPE_DRIVER, 54000, 1440, 1464,
- 1592, 1728, 0, 576, 581, 586, 625, 0,
- DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) },
- /* 31 - 1920x1080@50Hz */
- { DRM_MODE("1920x1080", DRM_MODE_TYPE_DRIVER, 148500, 1920, 2448,
- 2492, 2640, 0, 1080, 1084, 1089, 1125, 0,
- DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
- /* 32 - 1920x1080@24Hz */
- { DRM_MODE("1920x1080", DRM_MODE_TYPE_DRIVER, 74250, 1920, 2558,
- 2602, 2750, 0, 1080, 1084, 1089, 1125, 0,
- DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
- /* 33 - 1920x1080@25Hz */
- { DRM_MODE("1920x1080", DRM_MODE_TYPE_DRIVER, 74250, 1920, 2448,
- 2492, 2640, 0, 1080, 1084, 1089, 1125, 0,
- DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
- /* 34 - 1920x1080@30Hz */
- { DRM_MODE("1920x1080", DRM_MODE_TYPE_DRIVER, 74250, 1920, 2008,
- 2052, 2200, 0, 1080, 1084, 1089, 1125, 0,
- DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
- /* 35 - 2880x480@60Hz */
- { DRM_MODE("2880x480", DRM_MODE_TYPE_DRIVER, 108000, 2880, 2944,
- 3192, 3432, 0, 480, 489, 495, 525, 0,
- DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) },
- /* 36 - 2880x480@60Hz */
- { DRM_MODE("2880x480", DRM_MODE_TYPE_DRIVER, 108000, 2880, 2944,
- 3192, 3432, 0, 480, 489, 495, 525, 0,
- DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) },
- /* 37 - 2880x576@50Hz */
- { DRM_MODE("2880x576", DRM_MODE_TYPE_DRIVER, 108000, 2880, 2928,
- 3184, 3456, 0, 576, 581, 586, 625, 0,
- DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) },
- /* 38 - 2880x576@50Hz */
- { DRM_MODE("2880x576", DRM_MODE_TYPE_DRIVER, 108000, 2880, 2928,
- 3184, 3456, 0, 576, 581, 586, 625, 0,
- DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) },
- /* 39 - 1920x1080i@50Hz */
- { DRM_MODE("1920x1080i", DRM_MODE_TYPE_DRIVER, 72000, 1920, 1952,
- 2120, 2304, 0, 1080, 1126, 1136, 1250, 0,
- DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NVSYNC |
- DRM_MODE_FLAG_INTERLACE) },
- /* 40 - 1920x1080i@100Hz */
- { DRM_MODE("1920x1080i", DRM_MODE_TYPE_DRIVER, 148500, 1920, 2448,
- 2492, 2640, 0, 1080, 1084, 1094, 1125, 0,
- DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC |
- DRM_MODE_FLAG_INTERLACE) },
- /* 41 - 1280x720@100Hz */
- { DRM_MODE("1280x720", DRM_MODE_TYPE_DRIVER, 148500, 1280, 1720,
- 1760, 1980, 0, 720, 725, 730, 750, 0,
- DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
- /* 42 - 720x576@100Hz */
- { DRM_MODE("720x576", DRM_MODE_TYPE_DRIVER, 54000, 720, 732,
- 796, 864, 0, 576, 581, 586, 625, 0,
- DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) },
- /* 43 - 720x576@100Hz */
- { DRM_MODE("720x576", DRM_MODE_TYPE_DRIVER, 54000, 720, 732,
- 796, 864, 0, 576, 581, 586, 625, 0,
- DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) },
- /* 44 - 1440x576i@100Hz */
- { DRM_MODE("1440x576", DRM_MODE_TYPE_DRIVER, 54000, 1440, 1464,
- 1590, 1728, 0, 576, 580, 586, 625, 0,
- DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC |
- DRM_MODE_FLAG_DBLCLK) },
- /* 45 - 1440x576i@100Hz */
- { DRM_MODE("1440x576", DRM_MODE_TYPE_DRIVER, 54000, 1440, 1464,
- 1590, 1728, 0, 576, 580, 586, 625, 0,
- DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC |
- DRM_MODE_FLAG_DBLCLK) },
- /* 46 - 1920x1080i@120Hz */
- { DRM_MODE("1920x1080i", DRM_MODE_TYPE_DRIVER, 148500, 1920, 2008,
- 2052, 2200, 0, 1080, 1084, 1094, 1125, 0,
- DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC |
- DRM_MODE_FLAG_INTERLACE) },
- /* 47 - 1280x720@120Hz */
- { DRM_MODE("1280x720", DRM_MODE_TYPE_DRIVER, 148500, 1280, 1390,
- 1430, 1650, 0, 720, 725, 730, 750, 0,
- DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
- /* 48 - 720x480@120Hz */
- { DRM_MODE("720x480", DRM_MODE_TYPE_DRIVER, 54000, 720, 736,
- 798, 858, 0, 480, 489, 495, 525, 0,
- DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) },
- /* 49 - 720x480@120Hz */
- { DRM_MODE("720x480", DRM_MODE_TYPE_DRIVER, 54000, 720, 736,
- 798, 858, 0, 480, 489, 495, 525, 0,
- DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) },
- /* 50 - 1440x480i@120Hz */
- { DRM_MODE("1440x480i", DRM_MODE_TYPE_DRIVER, 54000, 1440, 1478,
- 1602, 1716, 0, 480, 488, 494, 525, 0,
- DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC |
- DRM_MODE_FLAG_INTERLACE | DRM_MODE_FLAG_DBLCLK) },
- /* 51 - 1440x480i@120Hz */
- { DRM_MODE("1440x480i", DRM_MODE_TYPE_DRIVER, 54000, 1440, 1478,
- 1602, 1716, 0, 480, 488, 494, 525, 0,
- DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC |
- DRM_MODE_FLAG_INTERLACE | DRM_MODE_FLAG_DBLCLK) },
- /* 52 - 720x576@200Hz */
- { DRM_MODE("720x576", DRM_MODE_TYPE_DRIVER, 108000, 720, 732,
- 796, 864, 0, 576, 581, 586, 625, 0,
- DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) },
- /* 53 - 720x576@200Hz */
- { DRM_MODE("720x576", DRM_MODE_TYPE_DRIVER, 108000, 720, 732,
- 796, 864, 0, 576, 581, 586, 625, 0,
- DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) },
- /* 54 - 1440x576i@200Hz */
- { DRM_MODE("1440x576i", DRM_MODE_TYPE_DRIVER, 108000, 1440, 1464,
- 1590, 1728, 0, 576, 580, 586, 625, 0,
- DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC |
- DRM_MODE_FLAG_INTERLACE | DRM_MODE_FLAG_DBLCLK) },
- /* 55 - 1440x576i@200Hz */
- { DRM_MODE("1440x576i", DRM_MODE_TYPE_DRIVER, 108000, 1440, 1464,
- 1590, 1728, 0, 576, 580, 586, 625, 0,
- DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC |
- DRM_MODE_FLAG_INTERLACE | DRM_MODE_FLAG_DBLCLK) },
- /* 56 - 720x480@240Hz */
- { DRM_MODE("720x480", DRM_MODE_TYPE_DRIVER, 108000, 720, 736,
- 798, 858, 0, 480, 489, 495, 525, 0,
- DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) },
- /* 57 - 720x480@240Hz */
- { DRM_MODE("720x480", DRM_MODE_TYPE_DRIVER, 108000, 720, 736,
- 798, 858, 0, 480, 489, 495, 525, 0,
- DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) },
- /* 58 - 1440x480i@240 */
- { DRM_MODE("1440x480i", DRM_MODE_TYPE_DRIVER, 108000, 1440, 1478,
- 1602, 1716, 0, 480, 488, 494, 525, 0,
- DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC |
- DRM_MODE_FLAG_INTERLACE | DRM_MODE_FLAG_DBLCLK) },
- /* 59 - 1440x480i@240 */
- { DRM_MODE("1440x480i", DRM_MODE_TYPE_DRIVER, 108000, 1440, 1478,
- 1602, 1716, 0, 480, 488, 494, 525, 0,
- DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC |
- DRM_MODE_FLAG_INTERLACE | DRM_MODE_FLAG_DBLCLK) },
- /* 60 - 1280x720@24Hz */
- { DRM_MODE("1280x720", DRM_MODE_TYPE_DRIVER, 59400, 1280, 3040,
- 3080, 3300, 0, 720, 725, 730, 750, 0,
- DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
- /* 61 - 1280x720@25Hz */
- { DRM_MODE("1280x720", DRM_MODE_TYPE_DRIVER, 74250, 1280, 3700,
- 3740, 3960, 0, 720, 725, 730, 750, 0,
- DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
- /* 62 - 1280x720@30Hz */
- { DRM_MODE("1280x720", DRM_MODE_TYPE_DRIVER, 74250, 1280, 3040,
- 3080, 3300, 0, 720, 725, 730, 750, 0,
- DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
- /* 63 - 1920x1080@120Hz */
- { DRM_MODE("1920x1080", DRM_MODE_TYPE_DRIVER, 297000, 1920, 2008,
- 2052, 2200, 0, 1080, 1084, 1089, 1125, 0,
- DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
- /* 64 - 1920x1080@100Hz */
- { DRM_MODE("1920x1080", DRM_MODE_TYPE_DRIVER, 297000, 1920, 2448,
- 2492, 2640, 0, 1080, 1084, 1094, 1125, 0,
- DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
-};
-static const int drm_num_cea_modes = ARRAY_SIZE(edid_cea_modes);
diff --git a/drivers/gpu/drm/drm_encoder_slave.c b/drivers/gpu/drm/drm_encoder_slave.c
index 63e733408b6d..48c52f7df4e6 100644
--- a/drivers/gpu/drm/drm_encoder_slave.c
+++ b/drivers/gpu/drm/drm_encoder_slave.c
@@ -123,3 +123,66 @@ void drm_i2c_encoder_destroy(struct drm_encoder *drm_encoder)
module_put(module);
}
EXPORT_SYMBOL(drm_i2c_encoder_destroy);
+
+/*
+ * Wrapper fxns which can be plugged in to drm_encoder_helper_funcs:
+ */
+
+static inline struct drm_encoder_slave_funcs *
+get_slave_funcs(struct drm_encoder *enc)
+{
+ return to_encoder_slave(enc)->slave_funcs;
+}
+
+void drm_i2c_encoder_dpms(struct drm_encoder *encoder, int mode)
+{
+ get_slave_funcs(encoder)->dpms(encoder, mode);
+}
+EXPORT_SYMBOL(drm_i2c_encoder_dpms);
+
+bool drm_i2c_encoder_mode_fixup(struct drm_encoder *encoder,
+ const struct drm_display_mode *mode,
+ struct drm_display_mode *adjusted_mode)
+{
+ return get_slave_funcs(encoder)->mode_fixup(encoder, mode, adjusted_mode);
+}
+EXPORT_SYMBOL(drm_i2c_encoder_mode_fixup);
+
+void drm_i2c_encoder_prepare(struct drm_encoder *encoder)
+{
+ drm_i2c_encoder_dpms(encoder, DRM_MODE_DPMS_OFF);
+}
+EXPORT_SYMBOL(drm_i2c_encoder_prepare);
+
+void drm_i2c_encoder_commit(struct drm_encoder *encoder)
+{
+ drm_i2c_encoder_dpms(encoder, DRM_MODE_DPMS_ON);
+}
+EXPORT_SYMBOL(drm_i2c_encoder_commit);
+
+void drm_i2c_encoder_mode_set(struct drm_encoder *encoder,
+ struct drm_display_mode *mode,
+ struct drm_display_mode *adjusted_mode)
+{
+ get_slave_funcs(encoder)->mode_set(encoder, mode, adjusted_mode);
+}
+EXPORT_SYMBOL(drm_i2c_encoder_mode_set);
+
+enum drm_connector_status drm_i2c_encoder_detect(struct drm_encoder *encoder,
+ struct drm_connector *connector)
+{
+ return get_slave_funcs(encoder)->detect(encoder, connector);
+}
+EXPORT_SYMBOL(drm_i2c_encoder_detect);
+
+void drm_i2c_encoder_save(struct drm_encoder *encoder)
+{
+ get_slave_funcs(encoder)->save(encoder);
+}
+EXPORT_SYMBOL(drm_i2c_encoder_save);
+
+void drm_i2c_encoder_restore(struct drm_encoder *encoder)
+{
+ get_slave_funcs(encoder)->restore(encoder);
+}
+EXPORT_SYMBOL(drm_i2c_encoder_restore);
diff --git a/drivers/gpu/drm/drm_fb_cma_helper.c b/drivers/gpu/drm/drm_fb_cma_helper.c
index fd9d0af4d536..0b5af7d0edb1 100644
--- a/drivers/gpu/drm/drm_fb_cma_helper.c
+++ b/drivers/gpu/drm/drm_fb_cma_helper.c
@@ -85,6 +85,11 @@ static struct drm_fb_cma *drm_fb_cma_alloc(struct drm_device *dev,
if (!fb_cma)
return ERR_PTR(-ENOMEM);
+ drm_helper_mode_fill_fb_struct(&fb_cma->fb, mode_cmd);
+
+ for (i = 0; i < num_planes; i++)
+ fb_cma->obj[i] = obj[i];
+
ret = drm_framebuffer_init(dev, &fb_cma->fb, &drm_fb_cma_funcs);
if (ret) {
dev_err(dev->dev, "Failed to initalize framebuffer: %d\n", ret);
@@ -92,11 +97,6 @@ static struct drm_fb_cma *drm_fb_cma_alloc(struct drm_device *dev,
return ERR_PTR(ret);
}
- drm_helper_mode_fill_fb_struct(&fb_cma->fb, mode_cmd);
-
- for (i = 0; i < num_planes; i++)
- fb_cma->obj[i] = obj[i];
-
return fb_cma;
}
@@ -180,6 +180,59 @@ struct drm_gem_cma_object *drm_fb_cma_get_gem_obj(struct drm_framebuffer *fb,
}
EXPORT_SYMBOL_GPL(drm_fb_cma_get_gem_obj);
+#ifdef CONFIG_DEBUG_FS
+/**
+ * drm_fb_cma_describe() - Helper to dump information about a single
+ * CMA framebuffer object
+ */
+void drm_fb_cma_describe(struct drm_framebuffer *fb, struct seq_file *m)
+{
+ struct drm_fb_cma *fb_cma = to_fb_cma(fb);
+ int i, n = drm_format_num_planes(fb->pixel_format);
+
+ seq_printf(m, "fb: %dx%d@%4.4s\n", fb->width, fb->height,
+ (char *)&fb->pixel_format);
+
+ for (i = 0; i < n; i++) {
+ seq_printf(m, " %d: offset=%d pitch=%d, obj: ",
+ i, fb->offsets[i], fb->pitches[i]);
+ drm_gem_cma_describe(fb_cma->obj[i], m);
+ }
+}
+EXPORT_SYMBOL_GPL(drm_fb_cma_describe);
+
+/**
+ * drm_fb_cma_debugfs_show() - Helper to list CMA framebuffer objects
+ * in debugfs.
+ */
+int drm_fb_cma_debugfs_show(struct seq_file *m, void *arg)
+{
+ struct drm_info_node *node = (struct drm_info_node *) m->private;
+ struct drm_device *dev = node->minor->dev;
+ struct drm_framebuffer *fb;
+ int ret;
+
+ ret = mutex_lock_interruptible(&dev->mode_config.mutex);
+ if (ret)
+ return ret;
+
+ ret = mutex_lock_interruptible(&dev->struct_mutex);
+ if (ret) {
+ mutex_unlock(&dev->mode_config.mutex);
+ return ret;
+ }
+
+ list_for_each_entry(fb, &dev->mode_config.fb_list, head)
+ drm_fb_cma_describe(fb, m);
+
+ mutex_unlock(&dev->struct_mutex);
+ mutex_unlock(&dev->mode_config.mutex);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(drm_fb_cma_debugfs_show);
+#endif
+
static struct fb_ops drm_fbdev_cma_ops = {
.owner = THIS_MODULE,
.fb_fillrect = sys_fillrect,
@@ -266,6 +319,7 @@ static int drm_fbdev_cma_create(struct drm_fb_helper *helper,
return 0;
err_drm_fb_cma_destroy:
+ drm_framebuffer_unregister_private(fb);
drm_fb_cma_destroy(fb);
err_framebuffer_release:
framebuffer_release(fbi);
@@ -274,23 +328,8 @@ err_drm_gem_cma_free_object:
return ret;
}
-static int drm_fbdev_cma_probe(struct drm_fb_helper *helper,
- struct drm_fb_helper_surface_size *sizes)
-{
- int ret = 0;
-
- if (!helper->fb) {
- ret = drm_fbdev_cma_create(helper, sizes);
- if (ret < 0)
- return ret;
- ret = 1;
- }
-
- return ret;
-}
-
static struct drm_fb_helper_funcs drm_fb_cma_helper_funcs = {
- .fb_probe = drm_fbdev_cma_probe,
+ .fb_probe = drm_fbdev_cma_create,
};
/**
@@ -332,6 +371,9 @@ struct drm_fbdev_cma *drm_fbdev_cma_init(struct drm_device *dev,
}
+ /* disable all the possible outputs/crtcs before entering KMS mode */
+ drm_helper_disable_unused_functions(dev);
+
ret = drm_fb_helper_initial_config(helper, preferred_bpp);
if (ret < 0) {
dev_err(dev->dev, "Failed to set inital hw configuration.\n");
@@ -370,8 +412,10 @@ void drm_fbdev_cma_fini(struct drm_fbdev_cma *fbdev_cma)
framebuffer_release(info);
}
- if (fbdev_cma->fb)
+ if (fbdev_cma->fb) {
+ drm_framebuffer_unregister_private(&fbdev_cma->fb->fb);
drm_fb_cma_destroy(&fbdev_cma->fb->fb);
+ }
drm_fb_helper_fini(&fbdev_cma->fb_helper);
kfree(fbdev_cma);
@@ -386,8 +430,13 @@ EXPORT_SYMBOL_GPL(drm_fbdev_cma_fini);
*/
void drm_fbdev_cma_restore_mode(struct drm_fbdev_cma *fbdev_cma)
{
- if (fbdev_cma)
+ if (fbdev_cma) {
+ struct drm_device *dev = fbdev_cma->fb_helper.dev;
+
+ drm_modeset_lock_all(dev);
drm_fb_helper_restore_fbdev_mode(&fbdev_cma->fb_helper);
+ drm_modeset_unlock_all(dev);
+ }
}
EXPORT_SYMBOL_GPL(drm_fbdev_cma_restore_mode);
diff --git a/drivers/gpu/drm/drm_fb_helper.c b/drivers/gpu/drm/drm_fb_helper.c
index 954d175bd7fa..59d6b9bf204b 100644
--- a/drivers/gpu/drm/drm_fb_helper.c
+++ b/drivers/gpu/drm/drm_fb_helper.c
@@ -52,9 +52,36 @@ static LIST_HEAD(kernel_fb_helper_list);
* mode setting driver. They can be used mostly independantely from the crtc
* helper functions used by many drivers to implement the kernel mode setting
* interfaces.
+ *
+ * Initialization is done as a three-step process with drm_fb_helper_init(),
+ * drm_fb_helper_single_add_all_connectors() and drm_fb_helper_initial_config().
+ * Drivers with fancier requirements than the default beheviour can override the
+ * second step with their own code. Teardown is done with drm_fb_helper_fini().
+ *
+ * At runtime drivers should restore the fbdev console by calling
+ * drm_fb_helper_restore_fbdev_mode() from their ->lastclose callback. They
+ * should also notify the fb helper code from updates to the output
+ * configuration by calling drm_fb_helper_hotplug_event(). For easier
+ * integration with the output polling code in drm_crtc_helper.c the modeset
+ * code proves a ->output_poll_changed callback.
+ *
+ * All other functions exported by the fb helper library can be used to
+ * implement the fbdev driver interface by the driver.
*/
-/* simple single crtc case helper function */
+/**
+ * drm_fb_helper_single_add_all_connectors() - add all connectors to fbdev
+ * emulation helper
+ * @fb_helper: fbdev initialized with drm_fb_helper_init
+ *
+ * This functions adds all the available connectors for use with the given
+ * fb_helper. This is a separate step to allow drivers to freely assign
+ * connectors to the fbdev, e.g. if some are reserved for special purposes or
+ * not adequate to be used for the fbcon.
+ *
+ * Since this is part of the initial setup before the fbdev is published, no
+ * locking is required.
+ */
int drm_fb_helper_single_add_all_connectors(struct drm_fb_helper *fb_helper)
{
struct drm_device *dev = fb_helper->dev;
@@ -163,6 +190,10 @@ static void drm_fb_helper_restore_lut_atomic(struct drm_crtc *crtc)
crtc->funcs->gamma_set(crtc, r_base, g_base, b_base, 0, crtc->gamma_size);
}
+/**
+ * drm_fb_helper_debug_enter - implementation for ->fb_debug_enter
+ * @info: fbdev registered by the helper
+ */
int drm_fb_helper_debug_enter(struct fb_info *info)
{
struct drm_fb_helper *helper = info->par;
@@ -208,6 +239,10 @@ static struct drm_framebuffer *drm_mode_config_fb(struct drm_crtc *crtc)
return NULL;
}
+/**
+ * drm_fb_helper_debug_leave - implementation for ->fb_debug_leave
+ * @info: fbdev registered by the helper
+ */
int drm_fb_helper_debug_leave(struct fb_info *info)
{
struct drm_fb_helper *helper = info->par;
@@ -239,13 +274,24 @@ int drm_fb_helper_debug_leave(struct fb_info *info)
}
EXPORT_SYMBOL(drm_fb_helper_debug_leave);
+/**
+ * drm_fb_helper_restore_fbdev_mode - restore fbdev configuration
+ * @fb_helper: fbcon to restore
+ *
+ * This should be called from driver's drm ->lastclose callback
+ * when implementing an fbcon on top of kms using this helper. This ensures that
+ * the user isn't greeted with a black screen when e.g. X dies.
+ */
bool drm_fb_helper_restore_fbdev_mode(struct drm_fb_helper *fb_helper)
{
bool error = false;
int i, ret;
+
+ drm_warn_on_modeset_not_all_locked(fb_helper->dev);
+
for (i = 0; i < fb_helper->crtc_count; i++) {
struct drm_mode_set *mode_set = &fb_helper->crtc_info[i].mode_set;
- ret = mode_set->crtc->funcs->set_config(mode_set);
+ ret = drm_mode_set_config_internal(mode_set);
if (ret)
error = true;
}
@@ -253,6 +299,10 @@ bool drm_fb_helper_restore_fbdev_mode(struct drm_fb_helper *fb_helper)
}
EXPORT_SYMBOL(drm_fb_helper_restore_fbdev_mode);
+/*
+ * restore fbcon display for all kms driver's using this helper, used for sysrq
+ * and panic handling.
+ */
static bool drm_fb_helper_force_kernel_mode(void)
{
bool ret, error = false;
@@ -272,7 +322,7 @@ static bool drm_fb_helper_force_kernel_mode(void)
return error;
}
-int drm_fb_helper_panic(struct notifier_block *n, unsigned long ununsed,
+static int drm_fb_helper_panic(struct notifier_block *n, unsigned long ununsed,
void *panic_str)
{
/*
@@ -285,30 +335,36 @@ int drm_fb_helper_panic(struct notifier_block *n, unsigned long ununsed,
pr_err("panic occurred, switching back to text console\n");
return drm_fb_helper_force_kernel_mode();
}
-EXPORT_SYMBOL(drm_fb_helper_panic);
static struct notifier_block paniced = {
.notifier_call = drm_fb_helper_panic,
};
-/**
- * drm_fb_helper_restore - restore the framebuffer console (kernel) config
- *
- * Restore's the kernel's fbcon mode, used for lastclose & panic paths.
- */
-void drm_fb_helper_restore(void)
+static bool drm_fb_helper_is_bound(struct drm_fb_helper *fb_helper)
{
- bool ret;
- ret = drm_fb_helper_force_kernel_mode();
- if (ret == true)
- DRM_ERROR("Failed to restore crtc configuration\n");
+ struct drm_device *dev = fb_helper->dev;
+ struct drm_crtc *crtc;
+ int bound = 0, crtcs_bound = 0;
+
+ list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
+ if (crtc->fb)
+ crtcs_bound++;
+ if (crtc->fb == fb_helper->fb)
+ bound++;
+ }
+
+ if (bound < crtcs_bound)
+ return false;
+ return true;
}
-EXPORT_SYMBOL(drm_fb_helper_restore);
#ifdef CONFIG_MAGIC_SYSRQ
static void drm_fb_helper_restore_work_fn(struct work_struct *ignored)
{
- drm_fb_helper_restore();
+ bool ret;
+ ret = drm_fb_helper_force_kernel_mode();
+ if (ret == true)
+ DRM_ERROR("Failed to restore crtc configuration\n");
}
static DECLARE_WORK(drm_fb_helper_restore_work, drm_fb_helper_restore_work_fn);
@@ -335,9 +391,22 @@ static void drm_fb_helper_dpms(struct fb_info *info, int dpms_mode)
int i, j;
/*
+ * fbdev->blank can be called from irq context in case of a panic.
+ * Since we already have our own special panic handler which will
+ * restore the fbdev console mode completely, just bail out early.
+ */
+ if (oops_in_progress)
+ return;
+
+ /*
* For each CRTC in this fb, turn the connectors on/off.
*/
- mutex_lock(&dev->mode_config.mutex);
+ drm_modeset_lock_all(dev);
+ if (!drm_fb_helper_is_bound(fb_helper)) {
+ drm_modeset_unlock_all(dev);
+ return;
+ }
+
for (i = 0; i < fb_helper->crtc_count; i++) {
crtc = fb_helper->crtc_info[i].mode_set.crtc;
@@ -352,9 +421,14 @@ static void drm_fb_helper_dpms(struct fb_info *info, int dpms_mode)
dev->mode_config.dpms_property, dpms_mode);
}
}
- mutex_unlock(&dev->mode_config.mutex);
+ drm_modeset_unlock_all(dev);
}
+/**
+ * drm_fb_helper_blank - implementation for ->fb_blank
+ * @blank: desired blanking state
+ * @info: fbdev registered by the helper
+ */
int drm_fb_helper_blank(int blank, struct fb_info *info)
{
switch (blank) {
@@ -398,6 +472,24 @@ static void drm_fb_helper_crtc_free(struct drm_fb_helper *helper)
kfree(helper->crtc_info);
}
+/**
+ * drm_fb_helper_init - initialize a drm_fb_helper structure
+ * @dev: drm device
+ * @fb_helper: driver-allocated fbdev helper structure to initialize
+ * @crtc_count: maximum number of crtcs to support in this fbdev emulation
+ * @max_conn_count: max connector count
+ *
+ * This allocates the structures for the fbdev helper with the given limits.
+ * Note that this won't yet touch the hardware (through the driver interfaces)
+ * nor register the fbdev. This is only done in drm_fb_helper_initial_config()
+ * to allow driver writes more control over the exact init sequence.
+ *
+ * Drivers must set fb_helper->funcs before calling
+ * drm_fb_helper_initial_config().
+ *
+ * RETURNS:
+ * Zero if everything went ok, nonzero otherwise.
+ */
int drm_fb_helper_init(struct drm_device *dev,
struct drm_fb_helper *fb_helper,
int crtc_count, int max_conn_count)
@@ -526,6 +618,11 @@ static int setcolreg(struct drm_crtc *crtc, u16 red, u16 green,
return 0;
}
+/**
+ * drm_fb_helper_setcmap - implementation for ->fb_setcmap
+ * @cmap: cmap to set
+ * @info: fbdev registered by the helper
+ */
int drm_fb_helper_setcmap(struct fb_cmap *cmap, struct fb_info *info)
{
struct drm_fb_helper *fb_helper = info->par;
@@ -565,6 +662,11 @@ int drm_fb_helper_setcmap(struct fb_cmap *cmap, struct fb_info *info)
}
EXPORT_SYMBOL(drm_fb_helper_setcmap);
+/**
+ * drm_fb_helper_check_var - implementation for ->fb_check_var
+ * @var: screeninfo to check
+ * @info: fbdev registered by the helper
+ */
int drm_fb_helper_check_var(struct fb_var_screeninfo *var,
struct fb_info *info)
{
@@ -657,13 +759,19 @@ int drm_fb_helper_check_var(struct fb_var_screeninfo *var,
}
EXPORT_SYMBOL(drm_fb_helper_check_var);
-/* this will let fbcon do the mode init */
+/**
+ * drm_fb_helper_set_par - implementation for ->fb_set_par
+ * @info: fbdev registered by the helper
+ *
+ * This will let fbcon do the mode init and is called at initialization time by
+ * the fbdev core when registering the driver, and later on through the hotplug
+ * callback.
+ */
int drm_fb_helper_set_par(struct fb_info *info)
{
struct drm_fb_helper *fb_helper = info->par;
struct drm_device *dev = fb_helper->dev;
struct fb_var_screeninfo *var = &info->var;
- struct drm_crtc *crtc;
int ret;
int i;
@@ -672,16 +780,15 @@ int drm_fb_helper_set_par(struct fb_info *info)
return -EINVAL;
}
- mutex_lock(&dev->mode_config.mutex);
+ drm_modeset_lock_all(dev);
for (i = 0; i < fb_helper->crtc_count; i++) {
- crtc = fb_helper->crtc_info[i].mode_set.crtc;
- ret = crtc->funcs->set_config(&fb_helper->crtc_info[i].mode_set);
+ ret = drm_mode_set_config_internal(&fb_helper->crtc_info[i].mode_set);
if (ret) {
- mutex_unlock(&dev->mode_config.mutex);
+ drm_modeset_unlock_all(dev);
return ret;
}
}
- mutex_unlock(&dev->mode_config.mutex);
+ drm_modeset_unlock_all(dev);
if (fb_helper->delayed_hotplug) {
fb_helper->delayed_hotplug = false;
@@ -691,6 +798,11 @@ int drm_fb_helper_set_par(struct fb_info *info)
}
EXPORT_SYMBOL(drm_fb_helper_set_par);
+/**
+ * drm_fb_helper_pan_display - implementation for ->fb_pan_display
+ * @var: updated screen information
+ * @info: fbdev registered by the helper
+ */
int drm_fb_helper_pan_display(struct fb_var_screeninfo *var,
struct fb_info *info)
{
@@ -701,7 +813,12 @@ int drm_fb_helper_pan_display(struct fb_var_screeninfo *var,
int ret = 0;
int i;
- mutex_lock(&dev->mode_config.mutex);
+ drm_modeset_lock_all(dev);
+ if (!drm_fb_helper_is_bound(fb_helper)) {
+ drm_modeset_unlock_all(dev);
+ return -EBUSY;
+ }
+
for (i = 0; i < fb_helper->crtc_count; i++) {
crtc = fb_helper->crtc_info[i].mode_set.crtc;
@@ -711,22 +828,27 @@ int drm_fb_helper_pan_display(struct fb_var_screeninfo *var,
modeset->y = var->yoffset;
if (modeset->num_connectors) {
- ret = crtc->funcs->set_config(modeset);
+ ret = drm_mode_set_config_internal(modeset);
if (!ret) {
info->var.xoffset = var->xoffset;
info->var.yoffset = var->yoffset;
}
}
}
- mutex_unlock(&dev->mode_config.mutex);
+ drm_modeset_unlock_all(dev);
return ret;
}
EXPORT_SYMBOL(drm_fb_helper_pan_display);
-int drm_fb_helper_single_fb_probe(struct drm_fb_helper *fb_helper,
- int preferred_bpp)
+/*
+ * Allocates the backing storage and sets up the fbdev info structure through
+ * the ->fb_probe callback and then registers the fbdev and sets up the panic
+ * notifier.
+ */
+static int drm_fb_helper_single_fb_probe(struct drm_fb_helper *fb_helper,
+ int preferred_bpp)
{
- int new_fb = 0;
+ int ret = 0;
int crtc_count = 0;
int i;
struct fb_info *info;
@@ -804,27 +926,30 @@ int drm_fb_helper_single_fb_probe(struct drm_fb_helper *fb_helper,
}
/* push down into drivers */
- new_fb = (*fb_helper->funcs->fb_probe)(fb_helper, &sizes);
- if (new_fb < 0)
- return new_fb;
+ ret = (*fb_helper->funcs->fb_probe)(fb_helper, &sizes);
+ if (ret < 0)
+ return ret;
info = fb_helper->fbdev;
- /* set the fb pointer */
+ /*
+ * Set the fb pointer - usually drm_setup_crtcs does this for hotplug
+ * events, but at init time drm_setup_crtcs needs to be called before
+ * the fb is allocated (since we need to figure out the desired size of
+ * the fb before we can allocate it ...). Hence we need to fix things up
+ * here again.
+ */
for (i = 0; i < fb_helper->crtc_count; i++)
- fb_helper->crtc_info[i].mode_set.fb = fb_helper->fb;
+ if (fb_helper->crtc_info[i].mode_set.num_connectors)
+ fb_helper->crtc_info[i].mode_set.fb = fb_helper->fb;
- if (new_fb) {
- info->var.pixclock = 0;
- if (register_framebuffer(info) < 0)
- return -EINVAL;
- dev_info(fb_helper->dev->dev, "fb%d: %s frame buffer device\n",
- info->node, info->fix.id);
+ info->var.pixclock = 0;
+ if (register_framebuffer(info) < 0)
+ return -EINVAL;
- } else {
- drm_fb_helper_set_par(info);
- }
+ dev_info(fb_helper->dev->dev, "fb%d: %s frame buffer device\n",
+ info->node, info->fix.id);
/* Switch back to kernel console on panic */
/* multi card linked list maybe */
@@ -834,13 +959,25 @@ int drm_fb_helper_single_fb_probe(struct drm_fb_helper *fb_helper,
&paniced);
register_sysrq_key('v', &sysrq_drm_fb_helper_restore_op);
}
- if (new_fb)
- list_add(&fb_helper->kernel_fb_list, &kernel_fb_helper_list);
+
+ list_add(&fb_helper->kernel_fb_list, &kernel_fb_helper_list);
return 0;
}
-EXPORT_SYMBOL(drm_fb_helper_single_fb_probe);
+/**
+ * drm_fb_helper_fill_fix - initializes fixed fbdev information
+ * @info: fbdev registered by the helper
+ * @pitch: desired pitch
+ * @depth: desired depth
+ *
+ * Helper to fill in the fixed fbdev information useful for a non-accelerated
+ * fbdev emulations. Drivers which support acceleration methods which impose
+ * additional constraints need to set up their own limits.
+ *
+ * Drivers should call this (or their equivalent setup code) from their
+ * ->fb_probe callback.
+ */
void drm_fb_helper_fill_fix(struct fb_info *info, uint32_t pitch,
uint32_t depth)
{
@@ -861,6 +998,20 @@ void drm_fb_helper_fill_fix(struct fb_info *info, uint32_t pitch,
}
EXPORT_SYMBOL(drm_fb_helper_fill_fix);
+/**
+ * drm_fb_helper_fill_var - initalizes variable fbdev information
+ * @info: fbdev instance to set up
+ * @fb_helper: fb helper instance to use as template
+ * @fb_width: desired fb width
+ * @fb_height: desired fb height
+ *
+ * Sets up the variable fbdev metainformation from the given fb helper instance
+ * and the drm framebuffer allocated in fb_helper->fb.
+ *
+ * Drivers should call this (or their equivalent setup code) from their
+ * ->fb_probe callback after having allocated the fbdev backing
+ * storage framebuffer.
+ */
void drm_fb_helper_fill_var(struct fb_info *info, struct drm_fb_helper *fb_helper,
uint32_t fb_width, uint32_t fb_height)
{
@@ -1284,6 +1435,7 @@ static void drm_setup_crtcs(struct drm_fb_helper *fb_helper)
for (i = 0; i < fb_helper->crtc_count; i++) {
modeset = &fb_helper->crtc_info[i].mode_set;
modeset->num_connectors = 0;
+ modeset->fb = NULL;
}
for (i = 0; i < fb_helper->connector_count; i++) {
@@ -1300,9 +1452,21 @@ static void drm_setup_crtcs(struct drm_fb_helper *fb_helper)
modeset->mode = drm_mode_duplicate(dev,
fb_crtc->desired_mode);
modeset->connectors[modeset->num_connectors++] = fb_helper->connector_info[i]->connector;
+ modeset->fb = fb_helper->fb;
}
}
+ /* Clear out any old modes if there are no more connected outputs. */
+ for (i = 0; i < fb_helper->crtc_count; i++) {
+ modeset = &fb_helper->crtc_info[i].mode_set;
+ if (modeset->num_connectors == 0) {
+ BUG_ON(modeset->fb);
+ BUG_ON(modeset->num_connectors);
+ if (modeset->mode)
+ drm_mode_destroy(dev, modeset->mode);
+ modeset->mode = NULL;
+ }
+ }
out:
kfree(crtcs);
kfree(modes);
@@ -1310,18 +1474,23 @@ out:
}
/**
- * drm_helper_initial_config - setup a sane initial connector configuration
+ * drm_fb_helper_initial_config - setup a sane initial connector configuration
* @fb_helper: fb_helper device struct
* @bpp_sel: bpp value to use for the framebuffer configuration
*
- * LOCKING:
- * Called at init time by the driver to set up the @fb_helper initial
- * configuration, must take the mode config lock.
- *
* Scans the CRTCs and connectors and tries to put together an initial setup.
* At the moment, this is a cloned configuration across all heads with
* a new framebuffer object as the backing store.
*
+ * Note that this also registers the fbdev and so allows userspace to call into
+ * the driver through the fbdev interfaces.
+ *
+ * This function will call down into the ->fb_probe callback to let
+ * the driver allocate and initialize the fbdev info structure and the drm
+ * framebuffer used to back the fbdev. drm_fb_helper_fill_var() and
+ * drm_fb_helper_fill_fix() are provided as helpers to setup simple default
+ * values for the fbdev info structure.
+ *
* RETURNS:
* Zero if everything went ok, nonzero otherwise.
*/
@@ -1330,9 +1499,6 @@ bool drm_fb_helper_initial_config(struct drm_fb_helper *fb_helper, int bpp_sel)
struct drm_device *dev = fb_helper->dev;
int count = 0;
- /* disable all the possible outputs/crtcs before entering KMS mode */
- drm_helper_disable_unused_functions(fb_helper->dev);
-
drm_fb_helper_parse_command_line(fb_helper);
count = drm_fb_helper_probe_connector_modes(fb_helper,
@@ -1355,12 +1521,17 @@ EXPORT_SYMBOL(drm_fb_helper_initial_config);
* probing all the outputs attached to the fb
* @fb_helper: the drm_fb_helper
*
- * LOCKING:
- * Called at runtime, must take mode config lock.
- *
* Scan the connectors attached to the fb_helper and try to put together a
* setup after *notification of a change in output configuration.
*
+ * Called at runtime, takes the mode config locks to be able to check/change the
+ * modeset configuration. Must be run from process context (which usually means
+ * either the output polling work or a work item launched from the driver's
+ * hotplug interrupt).
+ *
+ * Note that the driver must ensure that this is only called _after_ the fb has
+ * been fully set up, i.e. after the call to drm_fb_helper_initial_config.
+ *
* RETURNS:
* 0 on success and a non-zero error code otherwise.
*/
@@ -1369,23 +1540,14 @@ int drm_fb_helper_hotplug_event(struct drm_fb_helper *fb_helper)
struct drm_device *dev = fb_helper->dev;
int count = 0;
u32 max_width, max_height, bpp_sel;
- int bound = 0, crtcs_bound = 0;
- struct drm_crtc *crtc;
if (!fb_helper->fb)
return 0;
- mutex_lock(&dev->mode_config.mutex);
- list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
- if (crtc->fb)
- crtcs_bound++;
- if (crtc->fb == fb_helper->fb)
- bound++;
- }
-
- if (bound < crtcs_bound) {
+ drm_modeset_lock_all(dev);
+ if (!drm_fb_helper_is_bound(fb_helper)) {
fb_helper->delayed_hotplug = true;
- mutex_unlock(&dev->mode_config.mutex);
+ drm_modeset_unlock_all(dev);
return 0;
}
DRM_DEBUG_KMS("\n");
@@ -1397,9 +1559,11 @@ int drm_fb_helper_hotplug_event(struct drm_fb_helper *fb_helper)
count = drm_fb_helper_probe_connector_modes(fb_helper, max_width,
max_height);
drm_setup_crtcs(fb_helper);
- mutex_unlock(&dev->mode_config.mutex);
+ drm_modeset_unlock_all(dev);
- return drm_fb_helper_single_fb_probe(fb_helper, bpp_sel);
+ drm_fb_helper_set_par(fb_helper->fbdev);
+
+ return 0;
}
EXPORT_SYMBOL(drm_fb_helper_hotplug_event);
diff --git a/drivers/gpu/drm/drm_fops.c b/drivers/gpu/drm/drm_fops.c
index 133b4132983e..13fdcd10a605 100644
--- a/drivers/gpu/drm/drm_fops.c
+++ b/drivers/gpu/drm/drm_fops.c
@@ -276,6 +276,7 @@ static int drm_open_helper(struct inode *inode, struct file *filp,
INIT_LIST_HEAD(&priv->lhead);
INIT_LIST_HEAD(&priv->fbs);
+ mutex_init(&priv->fbs_lock);
INIT_LIST_HEAD(&priv->event_list);
init_waitqueue_head(&priv->event_wait);
priv->event_space = 4096; /* set aside 4k for event buffer */
diff --git a/drivers/gpu/drm/drm_gem.c b/drivers/gpu/drm/drm_gem.c
index 24efae464e2c..af779ae19ebf 100644
--- a/drivers/gpu/drm/drm_gem.c
+++ b/drivers/gpu/drm/drm_gem.c
@@ -270,21 +270,19 @@ drm_gem_handle_create(struct drm_file *file_priv,
int ret;
/*
- * Get the user-visible handle using idr.
+ * Get the user-visible handle using idr. Preload and perform
+ * allocation under our spinlock.
*/
-again:
- /* ensure there is space available to allocate a handle */
- if (idr_pre_get(&file_priv->object_idr, GFP_KERNEL) == 0)
- return -ENOMEM;
-
- /* do the allocation under our spinlock */
+ idr_preload(GFP_KERNEL);
spin_lock(&file_priv->table_lock);
- ret = idr_get_new_above(&file_priv->object_idr, obj, 1, (int *)handlep);
+
+ ret = idr_alloc(&file_priv->object_idr, obj, 1, 0, GFP_NOWAIT);
+
spin_unlock(&file_priv->table_lock);
- if (ret == -EAGAIN)
- goto again;
- else if (ret)
+ idr_preload_end();
+ if (ret < 0)
return ret;
+ *handlep = ret;
drm_gem_object_handle_reference(obj);
@@ -451,29 +449,25 @@ drm_gem_flink_ioctl(struct drm_device *dev, void *data,
if (obj == NULL)
return -ENOENT;
-again:
- if (idr_pre_get(&dev->object_name_idr, GFP_KERNEL) == 0) {
- ret = -ENOMEM;
- goto err;
- }
-
+ idr_preload(GFP_KERNEL);
spin_lock(&dev->object_name_lock);
if (!obj->name) {
- ret = idr_get_new_above(&dev->object_name_idr, obj, 1,
- &obj->name);
+ ret = idr_alloc(&dev->object_name_idr, obj, 1, 0, GFP_NOWAIT);
+ obj->name = ret;
args->name = (uint64_t) obj->name;
spin_unlock(&dev->object_name_lock);
+ idr_preload_end();
- if (ret == -EAGAIN)
- goto again;
- else if (ret)
+ if (ret < 0)
goto err;
+ ret = 0;
/* Allocate a reference for the name table. */
drm_gem_object_reference(obj);
} else {
args->name = (uint64_t) obj->name;
spin_unlock(&dev->object_name_lock);
+ idr_preload_end();
ret = 0;
}
@@ -561,8 +555,6 @@ drm_gem_release(struct drm_device *dev, struct drm_file *file_private)
{
idr_for_each(&file_private->object_idr,
&drm_gem_object_release_handle, file_private);
-
- idr_remove_all(&file_private->object_idr);
idr_destroy(&file_private->object_idr);
}
diff --git a/drivers/gpu/drm/drm_gem_cma_helper.c b/drivers/gpu/drm/drm_gem_cma_helper.c
index 1aa8fee1e865..0a7e011509bd 100644
--- a/drivers/gpu/drm/drm_gem_cma_helper.c
+++ b/drivers/gpu/drm/drm_gem_cma_helper.c
@@ -249,3 +249,24 @@ int drm_gem_cma_dumb_destroy(struct drm_file *file_priv,
return drm_gem_handle_delete(file_priv, handle);
}
EXPORT_SYMBOL_GPL(drm_gem_cma_dumb_destroy);
+
+#ifdef CONFIG_DEBUG_FS
+void drm_gem_cma_describe(struct drm_gem_cma_object *cma_obj, struct seq_file *m)
+{
+ struct drm_gem_object *obj = &cma_obj->base;
+ struct drm_device *dev = obj->dev;
+ uint64_t off = 0;
+
+ WARN_ON(!mutex_is_locked(&dev->struct_mutex));
+
+ if (obj->map_list.map)
+ off = (uint64_t)obj->map_list.hash.key;
+
+ seq_printf(m, "%2d (%2d) %08llx %08Zx %p %d",
+ obj->name, obj->refcount.refcount.counter,
+ off, cma_obj->paddr, cma_obj->vaddr, obj->size);
+
+ seq_printf(m, "\n");
+}
+EXPORT_SYMBOL_GPL(drm_gem_cma_describe);
+#endif
diff --git a/drivers/gpu/drm/drm_hashtab.c b/drivers/gpu/drm/drm_hashtab.c
index 80254547a3f8..7e4bae760e27 100644
--- a/drivers/gpu/drm/drm_hashtab.c
+++ b/drivers/gpu/drm/drm_hashtab.c
@@ -60,14 +60,13 @@ void drm_ht_verbose_list(struct drm_open_hash *ht, unsigned long key)
{
struct drm_hash_item *entry;
struct hlist_head *h_list;
- struct hlist_node *list;
unsigned int hashed_key;
int count = 0;
hashed_key = hash_long(key, ht->order);
DRM_DEBUG("Key is 0x%08lx, Hashed key is 0x%08x\n", key, hashed_key);
h_list = &ht->table[hashed_key];
- hlist_for_each_entry(entry, list, h_list, head)
+ hlist_for_each_entry(entry, h_list, head)
DRM_DEBUG("count %d, key: 0x%08lx\n", count++, entry->key);
}
@@ -76,14 +75,13 @@ static struct hlist_node *drm_ht_find_key(struct drm_open_hash *ht,
{
struct drm_hash_item *entry;
struct hlist_head *h_list;
- struct hlist_node *list;
unsigned int hashed_key;
hashed_key = hash_long(key, ht->order);
h_list = &ht->table[hashed_key];
- hlist_for_each_entry(entry, list, h_list, head) {
+ hlist_for_each_entry(entry, h_list, head) {
if (entry->key == key)
- return list;
+ return &entry->head;
if (entry->key > key)
break;
}
@@ -95,14 +93,13 @@ static struct hlist_node *drm_ht_find_key_rcu(struct drm_open_hash *ht,
{
struct drm_hash_item *entry;
struct hlist_head *h_list;
- struct hlist_node *list;
unsigned int hashed_key;
hashed_key = hash_long(key, ht->order);
h_list = &ht->table[hashed_key];
- hlist_for_each_entry_rcu(entry, list, h_list, head) {
+ hlist_for_each_entry_rcu(entry, h_list, head) {
if (entry->key == key)
- return list;
+ return &entry->head;
if (entry->key > key)
break;
}
@@ -113,19 +110,19 @@ int drm_ht_insert_item(struct drm_open_hash *ht, struct drm_hash_item *item)
{
struct drm_hash_item *entry;
struct hlist_head *h_list;
- struct hlist_node *list, *parent;
+ struct hlist_node *parent;
unsigned int hashed_key;
unsigned long key = item->key;
hashed_key = hash_long(key, ht->order);
h_list = &ht->table[hashed_key];
parent = NULL;
- hlist_for_each_entry(entry, list, h_list, head) {
+ hlist_for_each_entry(entry, h_list, head) {
if (entry->key == key)
return -EINVAL;
if (entry->key > key)
break;
- parent = list;
+ parent = &entry->head;
}
if (parent) {
hlist_add_after_rcu(parent, &item->head);
diff --git a/drivers/gpu/drm/drm_irq.c b/drivers/gpu/drm/drm_irq.c
index 19c01ca3cc76..a6a8643a6a77 100644
--- a/drivers/gpu/drm/drm_irq.c
+++ b/drivers/gpu/drm/drm_irq.c
@@ -505,6 +505,7 @@ void drm_calc_timestamping_constants(struct drm_crtc *crtc)
/* Valid dotclock? */
if (dotclock > 0) {
+ int frame_size;
/* Convert scanline length in pixels and video dot clock to
* line duration, frame duration and pixel duration in
* nanoseconds:
@@ -512,7 +513,10 @@ void drm_calc_timestamping_constants(struct drm_crtc *crtc)
pixeldur_ns = (s64) div64_u64(1000000000, dotclock);
linedur_ns = (s64) div64_u64(((u64) crtc->hwmode.crtc_htotal *
1000000000), dotclock);
- framedur_ns = (s64) crtc->hwmode.crtc_vtotal * linedur_ns;
+ frame_size = crtc->hwmode.crtc_htotal *
+ crtc->hwmode.crtc_vtotal;
+ framedur_ns = (s64) div64_u64((u64) frame_size * 1000000000,
+ dotclock);
} else
DRM_ERROR("crtc %d: Can't calculate constants, dotclock = 0!\n",
crtc->base.id);
@@ -863,6 +867,7 @@ void drm_send_vblank_event(struct drm_device *dev, int crtc,
now = get_drm_timestamp();
}
+ e->pipe = crtc;
send_vblank_event(dev, e, seq, &now);
}
EXPORT_SYMBOL(drm_send_vblank_event);
@@ -1218,8 +1223,9 @@ int drm_wait_vblank(struct drm_device *dev, void *data,
int ret;
unsigned int flags, seq, crtc, high_crtc;
- if ((!drm_dev_to_irq(dev)) || (!dev->irq_enabled))
- return -EINVAL;
+ if (drm_core_check_feature(dev, DRIVER_HAVE_IRQ))
+ if ((!drm_dev_to_irq(dev)) || (!dev->irq_enabled))
+ return -EINVAL;
if (vblwait->request.type & _DRM_VBLANK_SIGNAL)
return -EINVAL;
diff --git a/drivers/gpu/drm/drm_mm.c b/drivers/gpu/drm/drm_mm.c
index 2aa331499f81..db1e2d6f90d7 100644
--- a/drivers/gpu/drm/drm_mm.c
+++ b/drivers/gpu/drm/drm_mm.c
@@ -102,20 +102,6 @@ int drm_mm_pre_get(struct drm_mm *mm)
}
EXPORT_SYMBOL(drm_mm_pre_get);
-static inline unsigned long drm_mm_hole_node_start(struct drm_mm_node *hole_node)
-{
- return hole_node->start + hole_node->size;
-}
-
-static inline unsigned long drm_mm_hole_node_end(struct drm_mm_node *hole_node)
-{
- struct drm_mm_node *next_node =
- list_entry(hole_node->node_list.next, struct drm_mm_node,
- node_list);
-
- return next_node->start;
-}
-
static void drm_mm_insert_helper(struct drm_mm_node *hole_node,
struct drm_mm_node *node,
unsigned long size, unsigned alignment,
@@ -127,7 +113,7 @@ static void drm_mm_insert_helper(struct drm_mm_node *hole_node,
unsigned long adj_start = hole_start;
unsigned long adj_end = hole_end;
- BUG_ON(!hole_node->hole_follows || node->allocated);
+ BUG_ON(node->allocated);
if (mm->color_adjust)
mm->color_adjust(hole_node, color, &adj_start, &adj_end);
@@ -155,12 +141,57 @@ static void drm_mm_insert_helper(struct drm_mm_node *hole_node,
BUG_ON(node->start + node->size > adj_end);
node->hole_follows = 0;
- if (node->start + node->size < hole_end) {
+ if (__drm_mm_hole_node_start(node) < hole_end) {
list_add(&node->hole_stack, &mm->hole_stack);
node->hole_follows = 1;
}
}
+struct drm_mm_node *drm_mm_create_block(struct drm_mm *mm,
+ unsigned long start,
+ unsigned long size,
+ bool atomic)
+{
+ struct drm_mm_node *hole, *node;
+ unsigned long end = start + size;
+ unsigned long hole_start;
+ unsigned long hole_end;
+
+ drm_mm_for_each_hole(hole, mm, hole_start, hole_end) {
+ if (hole_start > start || hole_end < end)
+ continue;
+
+ node = drm_mm_kmalloc(mm, atomic);
+ if (unlikely(node == NULL))
+ return NULL;
+
+ node->start = start;
+ node->size = size;
+ node->mm = mm;
+ node->allocated = 1;
+
+ INIT_LIST_HEAD(&node->hole_stack);
+ list_add(&node->node_list, &hole->node_list);
+
+ if (start == hole_start) {
+ hole->hole_follows = 0;
+ list_del_init(&hole->hole_stack);
+ }
+
+ node->hole_follows = 0;
+ if (end != hole_end) {
+ list_add(&node->hole_stack, &mm->hole_stack);
+ node->hole_follows = 1;
+ }
+
+ return node;
+ }
+
+ WARN(1, "no hole found for block 0x%lx + 0x%lx\n", start, size);
+ return NULL;
+}
+EXPORT_SYMBOL(drm_mm_create_block);
+
struct drm_mm_node *drm_mm_get_block_generic(struct drm_mm_node *hole_node,
unsigned long size,
unsigned alignment,
@@ -253,7 +284,7 @@ static void drm_mm_insert_helper_range(struct drm_mm_node *hole_node,
BUG_ON(node->start + node->size > end);
node->hole_follows = 0;
- if (node->start + node->size < hole_end) {
+ if (__drm_mm_hole_node_start(node) < hole_end) {
list_add(&node->hole_stack, &mm->hole_stack);
node->hole_follows = 1;
}
@@ -327,12 +358,13 @@ void drm_mm_remove_node(struct drm_mm_node *node)
list_entry(node->node_list.prev, struct drm_mm_node, node_list);
if (node->hole_follows) {
- BUG_ON(drm_mm_hole_node_start(node)
- == drm_mm_hole_node_end(node));
+ BUG_ON(__drm_mm_hole_node_start(node) ==
+ __drm_mm_hole_node_end(node));
list_del(&node->hole_stack);
} else
- BUG_ON(drm_mm_hole_node_start(node)
- != drm_mm_hole_node_end(node));
+ BUG_ON(__drm_mm_hole_node_start(node) !=
+ __drm_mm_hole_node_end(node));
+
if (!prev_node->hole_follows) {
prev_node->hole_follows = 1;
@@ -390,6 +422,8 @@ struct drm_mm_node *drm_mm_search_free_generic(const struct drm_mm *mm,
{
struct drm_mm_node *entry;
struct drm_mm_node *best;
+ unsigned long adj_start;
+ unsigned long adj_end;
unsigned long best_size;
BUG_ON(mm->scanned_blocks);
@@ -397,17 +431,13 @@ struct drm_mm_node *drm_mm_search_free_generic(const struct drm_mm *mm,
best = NULL;
best_size = ~0UL;
- list_for_each_entry(entry, &mm->hole_stack, hole_stack) {
- unsigned long adj_start = drm_mm_hole_node_start(entry);
- unsigned long adj_end = drm_mm_hole_node_end(entry);
-
+ drm_mm_for_each_hole(entry, mm, adj_start, adj_end) {
if (mm->color_adjust) {
mm->color_adjust(entry, color, &adj_start, &adj_end);
if (adj_end <= adj_start)
continue;
}
- BUG_ON(!entry->hole_follows);
if (!check_free_hole(adj_start, adj_end, size, alignment))
continue;
@@ -434,6 +464,8 @@ struct drm_mm_node *drm_mm_search_free_in_range_generic(const struct drm_mm *mm,
{
struct drm_mm_node *entry;
struct drm_mm_node *best;
+ unsigned long adj_start;
+ unsigned long adj_end;
unsigned long best_size;
BUG_ON(mm->scanned_blocks);
@@ -441,13 +473,11 @@ struct drm_mm_node *drm_mm_search_free_in_range_generic(const struct drm_mm *mm,
best = NULL;
best_size = ~0UL;
- list_for_each_entry(entry, &mm->hole_stack, hole_stack) {
- unsigned long adj_start = drm_mm_hole_node_start(entry) < start ?
- start : drm_mm_hole_node_start(entry);
- unsigned long adj_end = drm_mm_hole_node_end(entry) > end ?
- end : drm_mm_hole_node_end(entry);
-
- BUG_ON(!entry->hole_follows);
+ drm_mm_for_each_hole(entry, mm, adj_start, adj_end) {
+ if (adj_start < start)
+ adj_start = start;
+ if (adj_end > end)
+ adj_end = end;
if (mm->color_adjust) {
mm->color_adjust(entry, color, &adj_start, &adj_end);
diff --git a/drivers/gpu/drm/drm_modes.c b/drivers/gpu/drm/drm_modes.c
index d8da30e90db5..04fa6f1808d1 100644
--- a/drivers/gpu/drm/drm_modes.c
+++ b/drivers/gpu/drm/drm_modes.c
@@ -35,6 +35,8 @@
#include <linux/export.h>
#include <drm/drmP.h>
#include <drm/drm_crtc.h>
+#include <video/of_videomode.h>
+#include <video/videomode.h>
/**
* drm_mode_debug_printmodeline - debug print a mode
@@ -504,6 +506,74 @@ drm_gtf_mode(struct drm_device *dev, int hdisplay, int vdisplay, int vrefresh,
}
EXPORT_SYMBOL(drm_gtf_mode);
+#if IS_ENABLED(CONFIG_VIDEOMODE)
+int drm_display_mode_from_videomode(const struct videomode *vm,
+ struct drm_display_mode *dmode)
+{
+ dmode->hdisplay = vm->hactive;
+ dmode->hsync_start = dmode->hdisplay + vm->hfront_porch;
+ dmode->hsync_end = dmode->hsync_start + vm->hsync_len;
+ dmode->htotal = dmode->hsync_end + vm->hback_porch;
+
+ dmode->vdisplay = vm->vactive;
+ dmode->vsync_start = dmode->vdisplay + vm->vfront_porch;
+ dmode->vsync_end = dmode->vsync_start + vm->vsync_len;
+ dmode->vtotal = dmode->vsync_end + vm->vback_porch;
+
+ dmode->clock = vm->pixelclock / 1000;
+
+ dmode->flags = 0;
+ if (vm->dmt_flags & VESA_DMT_HSYNC_HIGH)
+ dmode->flags |= DRM_MODE_FLAG_PHSYNC;
+ else if (vm->dmt_flags & VESA_DMT_HSYNC_LOW)
+ dmode->flags |= DRM_MODE_FLAG_NHSYNC;
+ if (vm->dmt_flags & VESA_DMT_VSYNC_HIGH)
+ dmode->flags |= DRM_MODE_FLAG_PVSYNC;
+ else if (vm->dmt_flags & VESA_DMT_VSYNC_LOW)
+ dmode->flags |= DRM_MODE_FLAG_NVSYNC;
+ if (vm->data_flags & DISPLAY_FLAGS_INTERLACED)
+ dmode->flags |= DRM_MODE_FLAG_INTERLACE;
+ if (vm->data_flags & DISPLAY_FLAGS_DOUBLESCAN)
+ dmode->flags |= DRM_MODE_FLAG_DBLSCAN;
+ drm_mode_set_name(dmode);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(drm_display_mode_from_videomode);
+#endif
+
+#if IS_ENABLED(CONFIG_OF_VIDEOMODE)
+/**
+ * of_get_drm_display_mode - get a drm_display_mode from devicetree
+ * @np: device_node with the timing specification
+ * @dmode: will be set to the return value
+ * @index: index into the list of display timings in devicetree
+ *
+ * This function is expensive and should only be used, if only one mode is to be
+ * read from DT. To get multiple modes start with of_get_display_timings and
+ * work with that instead.
+ */
+int of_get_drm_display_mode(struct device_node *np,
+ struct drm_display_mode *dmode, int index)
+{
+ struct videomode vm;
+ int ret;
+
+ ret = of_get_videomode(np, &vm, index);
+ if (ret)
+ return ret;
+
+ drm_display_mode_from_videomode(&vm, dmode);
+
+ pr_debug("%s: got %dx%d display mode from %s\n",
+ of_node_full_name(np), vm.hactive, vm.vactive, np->name);
+ drm_mode_debug_printmodeline(dmode);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(of_get_drm_display_mode);
+#endif
+
/**
* drm_mode_set_name - set the name on a mode
* @mode: name will be set in this mode
diff --git a/drivers/gpu/drm/drm_pci.c b/drivers/gpu/drm/drm_pci.c
index 754bc96e10c7..bd719e936e13 100644
--- a/drivers/gpu/drm/drm_pci.c
+++ b/drivers/gpu/drm/drm_pci.c
@@ -439,78 +439,67 @@ int drm_pci_init(struct drm_driver *driver, struct pci_driver *pdriver)
return 0;
}
-#else
-
-int drm_pci_init(struct drm_driver *driver, struct pci_driver *pdriver)
-{
- return -1;
-}
-
-#endif
-
-EXPORT_SYMBOL(drm_pci_init);
-
-/*@}*/
-void drm_pci_exit(struct drm_driver *driver, struct pci_driver *pdriver)
-{
- struct drm_device *dev, *tmp;
- DRM_DEBUG("\n");
-
- if (driver->driver_features & DRIVER_MODESET) {
- pci_unregister_driver(pdriver);
- } else {
- list_for_each_entry_safe(dev, tmp, &driver->device_list, driver_item)
- drm_put_dev(dev);
- }
- DRM_INFO("Module unloaded\n");
-}
-EXPORT_SYMBOL(drm_pci_exit);
-
int drm_pcie_get_speed_cap_mask(struct drm_device *dev, u32 *mask)
{
struct pci_dev *root;
- int pos;
- u32 lnkcap = 0, lnkcap2 = 0;
+ u32 lnkcap, lnkcap2;
*mask = 0;
if (!dev->pdev)
return -EINVAL;
- if (!pci_is_pcie(dev->pdev))
- return -EINVAL;
-
root = dev->pdev->bus->self;
- pos = pci_pcie_cap(root);
- if (!pos)
- return -EINVAL;
-
/* we've been informed via and serverworks don't make the cut */
if (root->vendor == PCI_VENDOR_ID_VIA ||
root->vendor == PCI_VENDOR_ID_SERVERWORKS)
return -EINVAL;
- pci_read_config_dword(root, pos + PCI_EXP_LNKCAP, &lnkcap);
- pci_read_config_dword(root, pos + PCI_EXP_LNKCAP2, &lnkcap2);
+ pcie_capability_read_dword(root, PCI_EXP_LNKCAP, &lnkcap);
+ pcie_capability_read_dword(root, PCI_EXP_LNKCAP2, &lnkcap2);
- lnkcap &= PCI_EXP_LNKCAP_SLS;
- lnkcap2 &= 0xfe;
-
- if (lnkcap2) { /* PCIE GEN 3.0 */
+ if (lnkcap2) { /* PCIe r3.0-compliant */
if (lnkcap2 & PCI_EXP_LNKCAP2_SLS_2_5GB)
*mask |= DRM_PCIE_SPEED_25;
if (lnkcap2 & PCI_EXP_LNKCAP2_SLS_5_0GB)
*mask |= DRM_PCIE_SPEED_50;
if (lnkcap2 & PCI_EXP_LNKCAP2_SLS_8_0GB)
*mask |= DRM_PCIE_SPEED_80;
- } else {
- if (lnkcap & 1)
+ } else { /* pre-r3.0 */
+ if (lnkcap & PCI_EXP_LNKCAP_SLS_2_5GB)
*mask |= DRM_PCIE_SPEED_25;
- if (lnkcap & 2)
- *mask |= DRM_PCIE_SPEED_50;
+ if (lnkcap & PCI_EXP_LNKCAP_SLS_5_0GB)
+ *mask |= (DRM_PCIE_SPEED_25 | DRM_PCIE_SPEED_50);
}
DRM_INFO("probing gen 2 caps for device %x:%x = %x/%x\n", root->vendor, root->device, lnkcap, lnkcap2);
return 0;
}
EXPORT_SYMBOL(drm_pcie_get_speed_cap_mask);
+
+#else
+
+int drm_pci_init(struct drm_driver *driver, struct pci_driver *pdriver)
+{
+ return -1;
+}
+
+#endif
+
+EXPORT_SYMBOL(drm_pci_init);
+
+/*@}*/
+void drm_pci_exit(struct drm_driver *driver, struct pci_driver *pdriver)
+{
+ struct drm_device *dev, *tmp;
+ DRM_DEBUG("\n");
+
+ if (driver->driver_features & DRIVER_MODESET) {
+ pci_unregister_driver(pdriver);
+ } else {
+ list_for_each_entry_safe(dev, tmp, &driver->device_list, driver_item)
+ drm_put_dev(dev);
+ }
+ DRM_INFO("Module unloaded\n");
+}
+EXPORT_SYMBOL(drm_pci_exit);
diff --git a/drivers/gpu/drm/drm_prime.c b/drivers/gpu/drm/drm_prime.c
index 7f125738f44e..366910ddcfcb 100644
--- a/drivers/gpu/drm/drm_prime.c
+++ b/drivers/gpu/drm/drm_prime.c
@@ -53,7 +53,8 @@
* Self-importing: if userspace is using PRIME as a replacement for flink
* then it will get a fd->handle request for a GEM object that it created.
* Drivers should detect this situation and return back the gem object
- * from the dma-buf private.
+ * from the dma-buf private. Prime will do this automatically for drivers that
+ * use the drm_gem_prime_{import,export} helpers.
*/
struct drm_prime_member {
@@ -62,6 +63,137 @@ struct drm_prime_member {
uint32_t handle;
};
+static struct sg_table *drm_gem_map_dma_buf(struct dma_buf_attachment *attach,
+ enum dma_data_direction dir)
+{
+ struct drm_gem_object *obj = attach->dmabuf->priv;
+ struct sg_table *sgt;
+
+ mutex_lock(&obj->dev->struct_mutex);
+
+ sgt = obj->dev->driver->gem_prime_get_sg_table(obj);
+
+ if (!IS_ERR_OR_NULL(sgt))
+ dma_map_sg(attach->dev, sgt->sgl, sgt->nents, dir);
+
+ mutex_unlock(&obj->dev->struct_mutex);
+ return sgt;
+}
+
+static void drm_gem_unmap_dma_buf(struct dma_buf_attachment *attach,
+ struct sg_table *sgt, enum dma_data_direction dir)
+{
+ dma_unmap_sg(attach->dev, sgt->sgl, sgt->nents, dir);
+ sg_free_table(sgt);
+ kfree(sgt);
+}
+
+static void drm_gem_dmabuf_release(struct dma_buf *dma_buf)
+{
+ struct drm_gem_object *obj = dma_buf->priv;
+
+ if (obj->export_dma_buf == dma_buf) {
+ /* drop the reference on the export fd holds */
+ obj->export_dma_buf = NULL;
+ drm_gem_object_unreference_unlocked(obj);
+ }
+}
+
+static void *drm_gem_dmabuf_vmap(struct dma_buf *dma_buf)
+{
+ struct drm_gem_object *obj = dma_buf->priv;
+ struct drm_device *dev = obj->dev;
+
+ return dev->driver->gem_prime_vmap(obj);
+}
+
+static void drm_gem_dmabuf_vunmap(struct dma_buf *dma_buf, void *vaddr)
+{
+ struct drm_gem_object *obj = dma_buf->priv;
+ struct drm_device *dev = obj->dev;
+
+ dev->driver->gem_prime_vunmap(obj, vaddr);
+}
+
+static void *drm_gem_dmabuf_kmap_atomic(struct dma_buf *dma_buf,
+ unsigned long page_num)
+{
+ return NULL;
+}
+
+static void drm_gem_dmabuf_kunmap_atomic(struct dma_buf *dma_buf,
+ unsigned long page_num, void *addr)
+{
+
+}
+static void *drm_gem_dmabuf_kmap(struct dma_buf *dma_buf,
+ unsigned long page_num)
+{
+ return NULL;
+}
+
+static void drm_gem_dmabuf_kunmap(struct dma_buf *dma_buf,
+ unsigned long page_num, void *addr)
+{
+
+}
+
+static int drm_gem_dmabuf_mmap(struct dma_buf *dma_buf,
+ struct vm_area_struct *vma)
+{
+ return -EINVAL;
+}
+
+static const struct dma_buf_ops drm_gem_prime_dmabuf_ops = {
+ .map_dma_buf = drm_gem_map_dma_buf,
+ .unmap_dma_buf = drm_gem_unmap_dma_buf,
+ .release = drm_gem_dmabuf_release,
+ .kmap = drm_gem_dmabuf_kmap,
+ .kmap_atomic = drm_gem_dmabuf_kmap_atomic,
+ .kunmap = drm_gem_dmabuf_kunmap,
+ .kunmap_atomic = drm_gem_dmabuf_kunmap_atomic,
+ .mmap = drm_gem_dmabuf_mmap,
+ .vmap = drm_gem_dmabuf_vmap,
+ .vunmap = drm_gem_dmabuf_vunmap,
+};
+
+/**
+ * DOC: PRIME Helpers
+ *
+ * Drivers can implement @gem_prime_export and @gem_prime_import in terms of
+ * simpler APIs by using the helper functions @drm_gem_prime_export and
+ * @drm_gem_prime_import. These functions implement dma-buf support in terms of
+ * five lower-level driver callbacks:
+ *
+ * Export callbacks:
+ *
+ * - @gem_prime_pin (optional): prepare a GEM object for exporting
+ *
+ * - @gem_prime_get_sg_table: provide a scatter/gather table of pinned pages
+ *
+ * - @gem_prime_vmap: vmap a buffer exported by your driver
+ *
+ * - @gem_prime_vunmap: vunmap a buffer exported by your driver
+ *
+ * Import callback:
+ *
+ * - @gem_prime_import_sg_table (import): produce a GEM object from another
+ * driver's scatter/gather table
+ */
+
+struct dma_buf *drm_gem_prime_export(struct drm_device *dev,
+ struct drm_gem_object *obj, int flags)
+{
+ if (dev->driver->gem_prime_pin) {
+ int ret = dev->driver->gem_prime_pin(obj);
+ if (ret)
+ return ERR_PTR(ret);
+ }
+ return dma_buf_export(obj, &drm_gem_prime_dmabuf_ops, obj->size,
+ 0600);
+}
+EXPORT_SYMBOL(drm_gem_prime_export);
+
int drm_gem_prime_handle_to_fd(struct drm_device *dev,
struct drm_file *file_priv, uint32_t handle, uint32_t flags,
int *prime_fd)
@@ -117,6 +249,58 @@ int drm_gem_prime_handle_to_fd(struct drm_device *dev,
}
EXPORT_SYMBOL(drm_gem_prime_handle_to_fd);
+struct drm_gem_object *drm_gem_prime_import(struct drm_device *dev,
+ struct dma_buf *dma_buf)
+{
+ struct dma_buf_attachment *attach;
+ struct sg_table *sgt;
+ struct drm_gem_object *obj;
+ int ret;
+
+ if (!dev->driver->gem_prime_import_sg_table)
+ return ERR_PTR(-EINVAL);
+
+ if (dma_buf->ops == &drm_gem_prime_dmabuf_ops) {
+ obj = dma_buf->priv;
+ if (obj->dev == dev) {
+ /*
+ * Importing dmabuf exported from out own gem increases
+ * refcount on gem itself instead of f_count of dmabuf.
+ */
+ drm_gem_object_reference(obj);
+ dma_buf_put(dma_buf);
+ return obj;
+ }
+ }
+
+ attach = dma_buf_attach(dma_buf, dev->dev);
+ if (IS_ERR(attach))
+ return ERR_PTR(PTR_ERR(attach));
+
+ sgt = dma_buf_map_attachment(attach, DMA_BIDIRECTIONAL);
+ if (IS_ERR_OR_NULL(sgt)) {
+ ret = PTR_ERR(sgt);
+ goto fail_detach;
+ }
+
+ obj = dev->driver->gem_prime_import_sg_table(dev, dma_buf->size, sgt);
+ if (IS_ERR(obj)) {
+ ret = PTR_ERR(obj);
+ goto fail_unmap;
+ }
+
+ obj->import_attach = attach;
+
+ return obj;
+
+fail_unmap:
+ dma_buf_unmap_attachment(attach, sgt, DMA_BIDIRECTIONAL);
+fail_detach:
+ dma_buf_detach(dma_buf, attach);
+ return ERR_PTR(ret);
+}
+EXPORT_SYMBOL(drm_gem_prime_import);
+
int drm_gem_prime_fd_to_handle(struct drm_device *dev,
struct drm_file *file_priv, int prime_fd, uint32_t *handle)
{
diff --git a/drivers/gpu/drm/drm_stub.c b/drivers/gpu/drm/drm_stub.c
index 200e104f1fa0..7d30802a018f 100644
--- a/drivers/gpu/drm/drm_stub.c
+++ b/drivers/gpu/drm/drm_stub.c
@@ -109,7 +109,6 @@ EXPORT_SYMBOL(drm_ut_debug_printk);
static int drm_minor_get_id(struct drm_device *dev, int type)
{
- int new_id;
int ret;
int base = 0, limit = 63;
@@ -121,25 +120,11 @@ static int drm_minor_get_id(struct drm_device *dev, int type)
limit = base + 255;
}
-again:
- if (idr_pre_get(&drm_minors_idr, GFP_KERNEL) == 0) {
- DRM_ERROR("Out of memory expanding drawable idr\n");
- return -ENOMEM;
- }
mutex_lock(&dev->struct_mutex);
- ret = idr_get_new_above(&drm_minors_idr, NULL,
- base, &new_id);
+ ret = idr_alloc(&drm_minors_idr, NULL, base, limit, GFP_KERNEL);
mutex_unlock(&dev->struct_mutex);
- if (ret == -EAGAIN)
- goto again;
- else if (ret)
- return ret;
- if (new_id >= limit) {
- idr_remove(&drm_minors_idr, new_id);
- return -EINVAL;
- }
- return new_id;
+ return ret == -ENOSPC ? -EINVAL : ret;
}
struct drm_master *drm_master_create(struct drm_minor *minor)
diff --git a/drivers/gpu/drm/drm_usb.c b/drivers/gpu/drm/drm_usb.c
index 3cec30611417..34a156f0c336 100644
--- a/drivers/gpu/drm/drm_usb.c
+++ b/drivers/gpu/drm/drm_usb.c
@@ -18,7 +18,7 @@ int drm_get_usb_dev(struct usb_interface *interface,
usbdev = interface_to_usbdev(interface);
dev->usbdev = usbdev;
- dev->dev = &usbdev->dev;
+ dev->dev = &interface->dev;
mutex_lock(&drm_global_mutex);
diff --git a/drivers/gpu/drm/exynos/exynos_drm_fb.c b/drivers/gpu/drm/exynos/exynos_drm_fb.c
index 294c0513f587..0e04f4ea441f 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_fb.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_fb.c
@@ -99,6 +99,10 @@ static int exynos_drm_fb_create_handle(struct drm_framebuffer *fb,
DRM_DEBUG_KMS("%s\n", __FILE__);
+ /* This fb should have only one gem object. */
+ if (WARN_ON(exynos_fb->buf_cnt != 1))
+ return -EINVAL;
+
return drm_gem_handle_create(file_priv,
&exynos_fb->exynos_gem_obj[0]->base, handle);
}
@@ -217,23 +221,25 @@ exynos_user_fb_create(struct drm_device *dev, struct drm_file *file_priv,
struct drm_mode_fb_cmd2 *mode_cmd)
{
struct drm_gem_object *obj;
+ struct exynos_drm_gem_obj *exynos_gem_obj;
struct exynos_drm_fb *exynos_fb;
int i, ret;
DRM_DEBUG_KMS("%s\n", __FILE__);
- obj = drm_gem_object_lookup(dev, file_priv, mode_cmd->handles[0]);
- if (!obj) {
- DRM_ERROR("failed to lookup gem object\n");
- return ERR_PTR(-ENOENT);
- }
-
exynos_fb = kzalloc(sizeof(*exynos_fb), GFP_KERNEL);
if (!exynos_fb) {
DRM_ERROR("failed to allocate exynos drm framebuffer\n");
return ERR_PTR(-ENOMEM);
}
+ obj = drm_gem_object_lookup(dev, file_priv, mode_cmd->handles[0]);
+ if (!obj) {
+ DRM_ERROR("failed to lookup gem object\n");
+ ret = -ENOENT;
+ goto err_free;
+ }
+
drm_helper_mode_fill_fb_struct(&exynos_fb->fb, mode_cmd);
exynos_fb->exynos_gem_obj[0] = to_exynos_gem_obj(obj);
exynos_fb->buf_cnt = exynos_drm_format_num_buffers(mode_cmd);
@@ -241,43 +247,44 @@ exynos_user_fb_create(struct drm_device *dev, struct drm_file *file_priv,
DRM_DEBUG_KMS("buf_cnt = %d\n", exynos_fb->buf_cnt);
for (i = 1; i < exynos_fb->buf_cnt; i++) {
- struct exynos_drm_gem_obj *exynos_gem_obj;
- int ret;
-
obj = drm_gem_object_lookup(dev, file_priv,
mode_cmd->handles[i]);
if (!obj) {
DRM_ERROR("failed to lookup gem object\n");
- kfree(exynos_fb);
- return ERR_PTR(-ENOENT);
+ ret = -ENOENT;
+ exynos_fb->buf_cnt = i;
+ goto err_unreference;
}
exynos_gem_obj = to_exynos_gem_obj(obj);
+ exynos_fb->exynos_gem_obj[i] = exynos_gem_obj;
ret = check_fb_gem_memory_type(dev, exynos_gem_obj);
if (ret < 0) {
DRM_ERROR("cannot use this gem memory type for fb.\n");
- kfree(exynos_fb);
- return ERR_PTR(ret);
+ goto err_unreference;
}
-
- exynos_fb->exynos_gem_obj[i] = to_exynos_gem_obj(obj);
}
ret = drm_framebuffer_init(dev, &exynos_fb->fb, &exynos_drm_fb_funcs);
if (ret) {
- for (i = 0; i < exynos_fb->buf_cnt; i++) {
- struct exynos_drm_gem_obj *gem_obj;
-
- gem_obj = exynos_fb->exynos_gem_obj[i];
- drm_gem_object_unreference_unlocked(&gem_obj->base);
- }
-
- kfree(exynos_fb);
- return ERR_PTR(ret);
+ DRM_ERROR("failed to init framebuffer.\n");
+ goto err_unreference;
}
return &exynos_fb->fb;
+
+err_unreference:
+ for (i = 0; i < exynos_fb->buf_cnt; i++) {
+ struct drm_gem_object *obj;
+
+ obj = &exynos_fb->exynos_gem_obj[i]->base;
+ if (obj)
+ drm_gem_object_unreference_unlocked(obj);
+ }
+err_free:
+ kfree(exynos_fb);
+ return ERR_PTR(ret);
}
struct exynos_drm_gem_buf *exynos_drm_fb_buffer(struct drm_framebuffer *fb,
diff --git a/drivers/gpu/drm/exynos/exynos_drm_fbdev.c b/drivers/gpu/drm/exynos/exynos_drm_fbdev.c
index 71f867340a88..68f0045f86b8 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_fbdev.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_fbdev.c
@@ -226,36 +226,8 @@ out:
return ret;
}
-static int exynos_drm_fbdev_probe(struct drm_fb_helper *helper,
- struct drm_fb_helper_surface_size *sizes)
-{
- int ret = 0;
-
- DRM_DEBUG_KMS("%s\n", __FILE__);
-
- /*
- * with !helper->fb, it means that this funcion is called first time
- * and after that, the helper->fb would be used as clone mode.
- */
- if (!helper->fb) {
- ret = exynos_drm_fbdev_create(helper, sizes);
- if (ret < 0) {
- DRM_ERROR("failed to create fbdev.\n");
- return ret;
- }
-
- /*
- * fb_helper expects a value more than 1 if succeed
- * because register_framebuffer() should be called.
- */
- ret = 1;
- }
-
- return ret;
-}
-
static struct drm_fb_helper_funcs exynos_drm_fb_helper_funcs = {
- .fb_probe = exynos_drm_fbdev_probe,
+ .fb_probe = exynos_drm_fbdev_create,
};
int exynos_drm_fbdev_init(struct drm_device *dev)
@@ -295,6 +267,9 @@ int exynos_drm_fbdev_init(struct drm_device *dev)
}
+ /* disable all the possible outputs/crtcs before entering KMS mode */
+ drm_helper_disable_unused_functions(dev);
+
ret = drm_fb_helper_initial_config(helper, PREFERRED_BPP);
if (ret < 0) {
DRM_ERROR("failed to set up hw configuration.\n");
@@ -326,8 +301,10 @@ static void exynos_drm_fbdev_destroy(struct drm_device *dev,
/* release drm framebuffer and real buffer */
if (fb_helper->fb && fb_helper->fb->funcs) {
fb = fb_helper->fb;
- if (fb)
+ if (fb) {
+ drm_framebuffer_unregister_private(fb);
drm_framebuffer_remove(fb);
+ }
}
/* release linux framebuffer */
@@ -374,5 +351,7 @@ void exynos_drm_fbdev_restore_mode(struct drm_device *dev)
if (!private || !private->fb_helper)
return;
+ drm_modeset_lock_all(dev);
drm_fb_helper_restore_fbdev_mode(private->fb_helper);
+ drm_modeset_unlock_all(dev);
}
diff --git a/drivers/gpu/drm/exynos/exynos_drm_g2d.c b/drivers/gpu/drm/exynos/exynos_drm_g2d.c
index fb2f81b8063d..3b0da0378acf 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_g2d.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_g2d.c
@@ -19,6 +19,7 @@
#include <linux/workqueue.h>
#include <linux/dma-mapping.h>
#include <linux/dma-attrs.h>
+#include <linux/of.h>
#include <drm/drmP.h>
#include <drm/exynos_drm.h>
@@ -429,7 +430,7 @@ static dma_addr_t *g2d_userptr_get_dma_addr(struct drm_device *drm_dev,
g2d_userptr->pages = pages;
- sgt = kzalloc(sizeof *sgt, GFP_KERNEL);
+ sgt = kzalloc(sizeof(*sgt), GFP_KERNEL);
if (!sgt) {
DRM_ERROR("failed to allocate sg table.\n");
ret = -ENOMEM;
@@ -1239,6 +1240,14 @@ static int g2d_resume(struct device *dev)
static SIMPLE_DEV_PM_OPS(g2d_pm_ops, g2d_suspend, g2d_resume);
+#ifdef CONFIG_OF
+static const struct of_device_id exynos_g2d_match[] = {
+ { .compatible = "samsung,exynos5250-g2d" },
+ {},
+};
+MODULE_DEVICE_TABLE(of, exynos_g2d_match);
+#endif
+
struct platform_driver g2d_driver = {
.probe = g2d_probe,
.remove = g2d_remove,
@@ -1246,5 +1255,6 @@ struct platform_driver g2d_driver = {
.name = "s5p-g2d",
.owner = THIS_MODULE,
.pm = &g2d_pm_ops,
+ .of_match_table = of_match_ptr(exynos_g2d_match),
},
};
diff --git a/drivers/gpu/drm/exynos/exynos_drm_gem.c b/drivers/gpu/drm/exynos/exynos_drm_gem.c
index 473180776528..67e17ce112b6 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_gem.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_gem.c
@@ -329,17 +329,11 @@ static struct drm_file *exynos_drm_find_drm_file(struct drm_device *drm_dev,
{
struct drm_file *file_priv;
- mutex_lock(&drm_dev->struct_mutex);
-
/* find current process's drm_file from filelist. */
- list_for_each_entry(file_priv, &drm_dev->filelist, lhead) {
- if (file_priv->filp == filp) {
- mutex_unlock(&drm_dev->struct_mutex);
+ list_for_each_entry(file_priv, &drm_dev->filelist, lhead)
+ if (file_priv->filp == filp)
return file_priv;
- }
- }
- mutex_unlock(&drm_dev->struct_mutex);
WARN_ON(1);
return ERR_PTR(-EFAULT);
@@ -400,9 +394,7 @@ static int exynos_drm_gem_mmap_buffer(struct file *filp,
*/
drm_gem_object_reference(obj);
- mutex_lock(&drm_dev->struct_mutex);
drm_vm_open_locked(drm_dev, vma);
- mutex_unlock(&drm_dev->struct_mutex);
return 0;
}
@@ -432,6 +424,16 @@ int exynos_drm_gem_mmap_ioctl(struct drm_device *dev, void *data,
}
/*
+ * We have to use gem object and its fops for specific mmaper,
+ * but vm_mmap() can deliver only filp. So we have to change
+ * filp->f_op and filp->private_data temporarily, then restore
+ * again. So it is important to keep lock until restoration the
+ * settings to prevent others from misuse of filp->f_op or
+ * filp->private_data.
+ */
+ mutex_lock(&dev->struct_mutex);
+
+ /*
* Set specific mmper's fops. And it will be restored by
* exynos_drm_gem_mmap_buffer to dev->driver->fops.
* This is used to call specific mapper temporarily.
@@ -448,13 +450,20 @@ int exynos_drm_gem_mmap_ioctl(struct drm_device *dev, void *data,
addr = vm_mmap(file_priv->filp, 0, args->size,
PROT_READ | PROT_WRITE, MAP_SHARED, 0);
- drm_gem_object_unreference_unlocked(obj);
+ drm_gem_object_unreference(obj);
if (IS_ERR((void *)addr)) {
- file_priv->filp->private_data = file_priv;
+ /* check filp->f_op, filp->private_data are restored */
+ if (file_priv->filp->f_op == &exynos_drm_gem_fops) {
+ file_priv->filp->f_op = fops_get(dev->driver->fops);
+ file_priv->filp->private_data = file_priv;
+ }
+ mutex_unlock(&dev->struct_mutex);
return PTR_ERR((void *)addr);
}
+ mutex_unlock(&dev->struct_mutex);
+
args->mapped = addr;
DRM_DEBUG_KMS("mapped = 0x%lx\n", (unsigned long)args->mapped);
diff --git a/drivers/gpu/drm/exynos/exynos_drm_hdmi.c b/drivers/gpu/drm/exynos/exynos_drm_hdmi.c
index 28644539b305..7c27df03c9ff 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_hdmi.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_hdmi.c
@@ -124,9 +124,21 @@ static struct edid *drm_hdmi_get_edid(struct device *dev,
static int drm_hdmi_check_timing(struct device *dev, void *timing)
{
struct drm_hdmi_context *ctx = to_context(dev);
+ int ret = 0;
DRM_DEBUG_KMS("%s\n", __FILE__);
+ /*
+ * Both, mixer and hdmi should be able to handle the requested mode.
+ * If any of the two fails, return mode as BAD.
+ */
+
+ if (mixer_ops && mixer_ops->check_timing)
+ ret = mixer_ops->check_timing(ctx->mixer_ctx->ctx, timing);
+
+ if (ret)
+ return ret;
+
if (hdmi_ops && hdmi_ops->check_timing)
return hdmi_ops->check_timing(ctx->hdmi_ctx->ctx, timing);
diff --git a/drivers/gpu/drm/exynos/exynos_drm_hdmi.h b/drivers/gpu/drm/exynos/exynos_drm_hdmi.h
index d80516fc9ed7..b7faa3662307 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_hdmi.h
+++ b/drivers/gpu/drm/exynos/exynos_drm_hdmi.h
@@ -32,7 +32,7 @@ struct exynos_hdmi_ops {
bool (*is_connected)(void *ctx);
struct edid *(*get_edid)(void *ctx,
struct drm_connector *connector);
- int (*check_timing)(void *ctx, void *timing);
+ int (*check_timing)(void *ctx, struct fb_videomode *timing);
int (*power_on)(void *ctx, int mode);
/* manager */
@@ -58,6 +58,9 @@ struct exynos_mixer_ops {
void (*win_mode_set)(void *ctx, struct exynos_drm_overlay *overlay);
void (*win_commit)(void *ctx, int zpos);
void (*win_disable)(void *ctx, int zpos);
+
+ /* display */
+ int (*check_timing)(void *ctx, struct fb_videomode *timing);
};
void exynos_hdmi_drv_attach(struct exynos_drm_hdmi_context *ctx);
diff --git a/drivers/gpu/drm/exynos/exynos_drm_iommu.h b/drivers/gpu/drm/exynos/exynos_drm_iommu.h
index 53b7deea8ab7..598e60f57d4b 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_iommu.h
+++ b/drivers/gpu/drm/exynos/exynos_drm_iommu.h
@@ -14,7 +14,7 @@
#define EXYNOS_DEV_ADDR_START 0x20000000
#define EXYNOS_DEV_ADDR_SIZE 0x40000000
-#define EXYNOS_DEV_ADDR_ORDER 0x4
+#define EXYNOS_DEV_ADDR_ORDER 0x0
#ifdef CONFIG_DRM_EXYNOS_IOMMU
diff --git a/drivers/gpu/drm/exynos/exynos_drm_ipp.c b/drivers/gpu/drm/exynos/exynos_drm_ipp.c
index 1a556354e92f..1adce07ecb5b 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_ipp.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_ipp.c
@@ -137,21 +137,15 @@ static int ipp_create_id(struct idr *id_idr, struct mutex *lock, void *obj,
DRM_DEBUG_KMS("%s\n", __func__);
-again:
- /* ensure there is space available to allocate a handle */
- if (idr_pre_get(id_idr, GFP_KERNEL) == 0) {
- DRM_ERROR("failed to get idr.\n");
- return -ENOMEM;
- }
-
/* do the allocation under our mutexlock */
mutex_lock(lock);
- ret = idr_get_new_above(id_idr, obj, 1, (int *)idp);
+ ret = idr_alloc(id_idr, obj, 1, 0, GFP_KERNEL);
mutex_unlock(lock);
- if (ret == -EAGAIN)
- goto again;
+ if (ret < 0)
+ return ret;
- return ret;
+ *idp = ret;
+ return 0;
}
static void *ipp_find_obj(struct idr *id_idr, struct mutex *lock, u32 id)
@@ -1786,8 +1780,6 @@ err_iommu:
drm_iommu_detach_device(drm_dev, ippdrv->dev);
err_idr:
- idr_remove_all(&ctx->ipp_idr);
- idr_remove_all(&ctx->prop_idr);
idr_destroy(&ctx->ipp_idr);
idr_destroy(&ctx->prop_idr);
return ret;
@@ -1965,8 +1957,6 @@ static int ipp_remove(struct platform_device *pdev)
exynos_drm_subdrv_unregister(&ctx->subdrv);
/* remove,destroy ipp idr */
- idr_remove_all(&ctx->ipp_idr);
- idr_remove_all(&ctx->prop_idr);
idr_destroy(&ctx->ipp_idr);
idr_destroy(&ctx->prop_idr);
diff --git a/drivers/gpu/drm/exynos/exynos_hdmi.c b/drivers/gpu/drm/exynos/exynos_hdmi.c
index 233247505ff8..2c5f266154ad 100644
--- a/drivers/gpu/drm/exynos/exynos_hdmi.c
+++ b/drivers/gpu/drm/exynos/exynos_hdmi.c
@@ -87,6 +87,73 @@ struct hdmi_resources {
int regul_count;
};
+struct hdmi_tg_regs {
+ u8 cmd[1];
+ u8 h_fsz[2];
+ u8 hact_st[2];
+ u8 hact_sz[2];
+ u8 v_fsz[2];
+ u8 vsync[2];
+ u8 vsync2[2];
+ u8 vact_st[2];
+ u8 vact_sz[2];
+ u8 field_chg[2];
+ u8 vact_st2[2];
+ u8 vact_st3[2];
+ u8 vact_st4[2];
+ u8 vsync_top_hdmi[2];
+ u8 vsync_bot_hdmi[2];
+ u8 field_top_hdmi[2];
+ u8 field_bot_hdmi[2];
+ u8 tg_3d[1];
+};
+
+struct hdmi_core_regs {
+ u8 h_blank[2];
+ u8 v2_blank[2];
+ u8 v1_blank[2];
+ u8 v_line[2];
+ u8 h_line[2];
+ u8 hsync_pol[1];
+ u8 vsync_pol[1];
+ u8 int_pro_mode[1];
+ u8 v_blank_f0[2];
+ u8 v_blank_f1[2];
+ u8 h_sync_start[2];
+ u8 h_sync_end[2];
+ u8 v_sync_line_bef_2[2];
+ u8 v_sync_line_bef_1[2];
+ u8 v_sync_line_aft_2[2];
+ u8 v_sync_line_aft_1[2];
+ u8 v_sync_line_aft_pxl_2[2];
+ u8 v_sync_line_aft_pxl_1[2];
+ u8 v_blank_f2[2]; /* for 3D mode */
+ u8 v_blank_f3[2]; /* for 3D mode */
+ u8 v_blank_f4[2]; /* for 3D mode */
+ u8 v_blank_f5[2]; /* for 3D mode */
+ u8 v_sync_line_aft_3[2];
+ u8 v_sync_line_aft_4[2];
+ u8 v_sync_line_aft_5[2];
+ u8 v_sync_line_aft_6[2];
+ u8 v_sync_line_aft_pxl_3[2];
+ u8 v_sync_line_aft_pxl_4[2];
+ u8 v_sync_line_aft_pxl_5[2];
+ u8 v_sync_line_aft_pxl_6[2];
+ u8 vact_space_1[2];
+ u8 vact_space_2[2];
+ u8 vact_space_3[2];
+ u8 vact_space_4[2];
+ u8 vact_space_5[2];
+ u8 vact_space_6[2];
+};
+
+struct hdmi_v14_conf {
+ int pixel_clock;
+ struct hdmi_core_regs core;
+ struct hdmi_tg_regs tg;
+ int cea_video_id;
+};
+
struct hdmi_context {
struct device *dev;
struct drm_device *drm_dev;
@@ -104,6 +171,7 @@ struct hdmi_context {
/* current hdmiphy conf index */
int cur_conf;
+ struct hdmi_v14_conf mode_conf;
struct hdmi_resources res;
@@ -392,586 +460,132 @@ static const struct hdmi_v13_conf hdmi_v13_confs[] = {
};
/* HDMI Version 1.4 */
-static const u8 hdmiphy_conf27_027[32] = {
- 0x01, 0xd1, 0x2d, 0x72, 0x40, 0x64, 0x12, 0x08,
- 0x43, 0xa0, 0x0e, 0xd9, 0x45, 0xa0, 0xac, 0x80,
- 0x08, 0x80, 0x11, 0x04, 0x02, 0x22, 0x44, 0x86,
- 0x54, 0xe3, 0x24, 0x00, 0x00, 0x00, 0x01, 0x00,
-};
-
-static const u8 hdmiphy_conf74_176[32] = {
- 0x01, 0xd1, 0x1f, 0x10, 0x40, 0x5b, 0xef, 0x08,
- 0x81, 0xa0, 0xb9, 0xd8, 0x45, 0xa0, 0xac, 0x80,
- 0x5a, 0x80, 0x11, 0x04, 0x02, 0x22, 0x44, 0x86,
- 0x54, 0xa6, 0x24, 0x01, 0x00, 0x00, 0x01, 0x00,
-};
-
-static const u8 hdmiphy_conf74_25[32] = {
- 0x01, 0xd1, 0x1f, 0x10, 0x40, 0x40, 0xf8, 0x08,
- 0x81, 0xa0, 0xba, 0xd8, 0x45, 0xa0, 0xac, 0x80,
- 0x3c, 0x80, 0x11, 0x04, 0x02, 0x22, 0x44, 0x86,
- 0x54, 0xa5, 0x24, 0x01, 0x00, 0x00, 0x01, 0x00,
-};
-
-static const u8 hdmiphy_conf148_5[32] = {
- 0x01, 0xd1, 0x1f, 0x00, 0x40, 0x40, 0xf8, 0x08,
- 0x81, 0xa0, 0xba, 0xd8, 0x45, 0xa0, 0xac, 0x80,
- 0x3c, 0x80, 0x11, 0x04, 0x02, 0x22, 0x44, 0x86,
- 0x54, 0x4b, 0x25, 0x03, 0x00, 0x00, 0x01, 0x00,
-};
-
-struct hdmi_tg_regs {
- u8 cmd;
- u8 h_fsz_l;
- u8 h_fsz_h;
- u8 hact_st_l;
- u8 hact_st_h;
- u8 hact_sz_l;
- u8 hact_sz_h;
- u8 v_fsz_l;
- u8 v_fsz_h;
- u8 vsync_l;
- u8 vsync_h;
- u8 vsync2_l;
- u8 vsync2_h;
- u8 vact_st_l;
- u8 vact_st_h;
- u8 vact_sz_l;
- u8 vact_sz_h;
- u8 field_chg_l;
- u8 field_chg_h;
- u8 vact_st2_l;
- u8 vact_st2_h;
- u8 vact_st3_l;
- u8 vact_st3_h;
- u8 vact_st4_l;
- u8 vact_st4_h;
- u8 vsync_top_hdmi_l;
- u8 vsync_top_hdmi_h;
- u8 vsync_bot_hdmi_l;
- u8 vsync_bot_hdmi_h;
- u8 field_top_hdmi_l;
- u8 field_top_hdmi_h;
- u8 field_bot_hdmi_l;
- u8 field_bot_hdmi_h;
- u8 tg_3d;
-};
-
-struct hdmi_core_regs {
- u8 h_blank[2];
- u8 v2_blank[2];
- u8 v1_blank[2];
- u8 v_line[2];
- u8 h_line[2];
- u8 hsync_pol[1];
- u8 vsync_pol[1];
- u8 int_pro_mode[1];
- u8 v_blank_f0[2];
- u8 v_blank_f1[2];
- u8 h_sync_start[2];
- u8 h_sync_end[2];
- u8 v_sync_line_bef_2[2];
- u8 v_sync_line_bef_1[2];
- u8 v_sync_line_aft_2[2];
- u8 v_sync_line_aft_1[2];
- u8 v_sync_line_aft_pxl_2[2];
- u8 v_sync_line_aft_pxl_1[2];
- u8 v_blank_f2[2]; /* for 3D mode */
- u8 v_blank_f3[2]; /* for 3D mode */
- u8 v_blank_f4[2]; /* for 3D mode */
- u8 v_blank_f5[2]; /* for 3D mode */
- u8 v_sync_line_aft_3[2];
- u8 v_sync_line_aft_4[2];
- u8 v_sync_line_aft_5[2];
- u8 v_sync_line_aft_6[2];
- u8 v_sync_line_aft_pxl_3[2];
- u8 v_sync_line_aft_pxl_4[2];
- u8 v_sync_line_aft_pxl_5[2];
- u8 v_sync_line_aft_pxl_6[2];
- u8 vact_space_1[2];
- u8 vact_space_2[2];
- u8 vact_space_3[2];
- u8 vact_space_4[2];
- u8 vact_space_5[2];
- u8 vact_space_6[2];
-};
-
-struct hdmi_preset_conf {
- struct hdmi_core_regs core;
- struct hdmi_tg_regs tg;
-};
-
-struct hdmi_conf {
- int width;
- int height;
- int vrefresh;
- bool interlace;
- int cea_video_id;
- const u8 *hdmiphy_data;
- const struct hdmi_preset_conf *conf;
-};
-
-static const struct hdmi_preset_conf hdmi_conf_480p60 = {
- .core = {
- .h_blank = {0x8a, 0x00},
- .v2_blank = {0x0d, 0x02},
- .v1_blank = {0x2d, 0x00},
- .v_line = {0x0d, 0x02},
- .h_line = {0x5a, 0x03},
- .hsync_pol = {0x01},
- .vsync_pol = {0x01},
- .int_pro_mode = {0x00},
- .v_blank_f0 = {0xff, 0xff},
- .v_blank_f1 = {0xff, 0xff},
- .h_sync_start = {0x0e, 0x00},
- .h_sync_end = {0x4c, 0x00},
- .v_sync_line_bef_2 = {0x0f, 0x00},
- .v_sync_line_bef_1 = {0x09, 0x00},
- .v_sync_line_aft_2 = {0xff, 0xff},
- .v_sync_line_aft_1 = {0xff, 0xff},
- .v_sync_line_aft_pxl_2 = {0xff, 0xff},
- .v_sync_line_aft_pxl_1 = {0xff, 0xff},
- .v_blank_f2 = {0xff, 0xff},
- .v_blank_f3 = {0xff, 0xff},
- .v_blank_f4 = {0xff, 0xff},
- .v_blank_f5 = {0xff, 0xff},
- .v_sync_line_aft_3 = {0xff, 0xff},
- .v_sync_line_aft_4 = {0xff, 0xff},
- .v_sync_line_aft_5 = {0xff, 0xff},
- .v_sync_line_aft_6 = {0xff, 0xff},
- .v_sync_line_aft_pxl_3 = {0xff, 0xff},
- .v_sync_line_aft_pxl_4 = {0xff, 0xff},
- .v_sync_line_aft_pxl_5 = {0xff, 0xff},
- .v_sync_line_aft_pxl_6 = {0xff, 0xff},
- .vact_space_1 = {0xff, 0xff},
- .vact_space_2 = {0xff, 0xff},
- .vact_space_3 = {0xff, 0xff},
- .vact_space_4 = {0xff, 0xff},
- .vact_space_5 = {0xff, 0xff},
- .vact_space_6 = {0xff, 0xff},
- /* other don't care */
- },
- .tg = {
- 0x00, /* cmd */
- 0x5a, 0x03, /* h_fsz */
- 0x8a, 0x00, 0xd0, 0x02, /* hact */
- 0x0d, 0x02, /* v_fsz */
- 0x01, 0x00, 0x33, 0x02, /* vsync */
- 0x2d, 0x00, 0xe0, 0x01, /* vact */
- 0x33, 0x02, /* field_chg */
- 0x48, 0x02, /* vact_st2 */
- 0x00, 0x00, /* vact_st3 */
- 0x00, 0x00, /* vact_st4 */
- 0x01, 0x00, 0x01, 0x00, /* vsync top/bot */
- 0x01, 0x00, 0x33, 0x02, /* field top/bot */
- 0x00, /* 3d FP */
- },
+struct hdmiphy_config {
+ int pixel_clock;
+ u8 conf[32];
};
-static const struct hdmi_preset_conf hdmi_conf_720p50 = {
- .core = {
- .h_blank = {0xbc, 0x02},
- .v2_blank = {0xee, 0x02},
- .v1_blank = {0x1e, 0x00},
- .v_line = {0xee, 0x02},
- .h_line = {0xbc, 0x07},
- .hsync_pol = {0x00},
- .vsync_pol = {0x00},
- .int_pro_mode = {0x00},
- .v_blank_f0 = {0xff, 0xff},
- .v_blank_f1 = {0xff, 0xff},
- .h_sync_start = {0xb6, 0x01},
- .h_sync_end = {0xde, 0x01},
- .v_sync_line_bef_2 = {0x0a, 0x00},
- .v_sync_line_bef_1 = {0x05, 0x00},
- .v_sync_line_aft_2 = {0xff, 0xff},
- .v_sync_line_aft_1 = {0xff, 0xff},
- .v_sync_line_aft_pxl_2 = {0xff, 0xff},
- .v_sync_line_aft_pxl_1 = {0xff, 0xff},
- .v_blank_f2 = {0xff, 0xff},
- .v_blank_f3 = {0xff, 0xff},
- .v_blank_f4 = {0xff, 0xff},
- .v_blank_f5 = {0xff, 0xff},
- .v_sync_line_aft_3 = {0xff, 0xff},
- .v_sync_line_aft_4 = {0xff, 0xff},
- .v_sync_line_aft_5 = {0xff, 0xff},
- .v_sync_line_aft_6 = {0xff, 0xff},
- .v_sync_line_aft_pxl_3 = {0xff, 0xff},
- .v_sync_line_aft_pxl_4 = {0xff, 0xff},
- .v_sync_line_aft_pxl_5 = {0xff, 0xff},
- .v_sync_line_aft_pxl_6 = {0xff, 0xff},
- .vact_space_1 = {0xff, 0xff},
- .vact_space_2 = {0xff, 0xff},
- .vact_space_3 = {0xff, 0xff},
- .vact_space_4 = {0xff, 0xff},
- .vact_space_5 = {0xff, 0xff},
- .vact_space_6 = {0xff, 0xff},
- /* other don't care */
- },
- .tg = {
- 0x00, /* cmd */
- 0xbc, 0x07, /* h_fsz */
- 0xbc, 0x02, 0x00, 0x05, /* hact */
- 0xee, 0x02, /* v_fsz */
- 0x01, 0x00, 0x33, 0x02, /* vsync */
- 0x1e, 0x00, 0xd0, 0x02, /* vact */
- 0x33, 0x02, /* field_chg */
- 0x48, 0x02, /* vact_st2 */
- 0x00, 0x00, /* vact_st3 */
- 0x00, 0x00, /* vact_st4 */
- 0x01, 0x00, 0x01, 0x00, /* vsync top/bot */
- 0x01, 0x00, 0x33, 0x02, /* field top/bot */
- 0x00, /* 3d FP */
+/* list of all required phy config settings */
+static const struct hdmiphy_config hdmiphy_v14_configs[] = {
+ {
+ .pixel_clock = 25200000,
+ .conf = {
+ 0x01, 0x51, 0x2A, 0x75, 0x40, 0x01, 0x00, 0x08,
+ 0x82, 0x80, 0xfc, 0xd8, 0x45, 0xa0, 0xac, 0x80,
+ 0x08, 0x80, 0x11, 0x04, 0x02, 0x22, 0x44, 0x86,
+ 0x54, 0xf4, 0x24, 0x00, 0x00, 0x00, 0x01, 0x80,
+ },
},
-};
-
-static const struct hdmi_preset_conf hdmi_conf_720p60 = {
- .core = {
- .h_blank = {0x72, 0x01},
- .v2_blank = {0xee, 0x02},
- .v1_blank = {0x1e, 0x00},
- .v_line = {0xee, 0x02},
- .h_line = {0x72, 0x06},
- .hsync_pol = {0x00},
- .vsync_pol = {0x00},
- .int_pro_mode = {0x00},
- .v_blank_f0 = {0xff, 0xff},
- .v_blank_f1 = {0xff, 0xff},
- .h_sync_start = {0x6c, 0x00},
- .h_sync_end = {0x94, 0x00},
- .v_sync_line_bef_2 = {0x0a, 0x00},
- .v_sync_line_bef_1 = {0x05, 0x00},
- .v_sync_line_aft_2 = {0xff, 0xff},
- .v_sync_line_aft_1 = {0xff, 0xff},
- .v_sync_line_aft_pxl_2 = {0xff, 0xff},
- .v_sync_line_aft_pxl_1 = {0xff, 0xff},
- .v_blank_f2 = {0xff, 0xff},
- .v_blank_f3 = {0xff, 0xff},
- .v_blank_f4 = {0xff, 0xff},
- .v_blank_f5 = {0xff, 0xff},
- .v_sync_line_aft_3 = {0xff, 0xff},
- .v_sync_line_aft_4 = {0xff, 0xff},
- .v_sync_line_aft_5 = {0xff, 0xff},
- .v_sync_line_aft_6 = {0xff, 0xff},
- .v_sync_line_aft_pxl_3 = {0xff, 0xff},
- .v_sync_line_aft_pxl_4 = {0xff, 0xff},
- .v_sync_line_aft_pxl_5 = {0xff, 0xff},
- .v_sync_line_aft_pxl_6 = {0xff, 0xff},
- .vact_space_1 = {0xff, 0xff},
- .vact_space_2 = {0xff, 0xff},
- .vact_space_3 = {0xff, 0xff},
- .vact_space_4 = {0xff, 0xff},
- .vact_space_5 = {0xff, 0xff},
- .vact_space_6 = {0xff, 0xff},
- /* other don't care */
+ {
+ .pixel_clock = 27000000,
+ .conf = {
+ 0x01, 0xd1, 0x22, 0x51, 0x40, 0x08, 0xfc, 0x20,
+ 0x98, 0xa0, 0xcb, 0xd8, 0x45, 0xa0, 0xac, 0x80,
+ 0x06, 0x80, 0x11, 0x04, 0x02, 0x22, 0x44, 0x86,
+ 0x54, 0xe4, 0x24, 0x00, 0x00, 0x00, 0x01, 0x80,
+ },
},
- .tg = {
- 0x00, /* cmd */
- 0x72, 0x06, /* h_fsz */
- 0x72, 0x01, 0x00, 0x05, /* hact */
- 0xee, 0x02, /* v_fsz */
- 0x01, 0x00, 0x33, 0x02, /* vsync */
- 0x1e, 0x00, 0xd0, 0x02, /* vact */
- 0x33, 0x02, /* field_chg */
- 0x48, 0x02, /* vact_st2 */
- 0x00, 0x00, /* vact_st3 */
- 0x00, 0x00, /* vact_st4 */
- 0x01, 0x00, 0x01, 0x00, /* vsync top/bot */
- 0x01, 0x00, 0x33, 0x02, /* field top/bot */
- 0x00, /* 3d FP */
+ {
+ .pixel_clock = 27027000,
+ .conf = {
+ 0x01, 0xd1, 0x2d, 0x72, 0x40, 0x64, 0x12, 0x08,
+ 0x43, 0xa0, 0x0e, 0xd9, 0x45, 0xa0, 0xac, 0x80,
+ 0x08, 0x80, 0x11, 0x04, 0x02, 0x22, 0x44, 0x86,
+ 0x54, 0xe3, 0x24, 0x00, 0x00, 0x00, 0x01, 0x00,
+ },
},
-};
-
-static const struct hdmi_preset_conf hdmi_conf_1080i50 = {
- .core = {
- .h_blank = {0xd0, 0x02},
- .v2_blank = {0x32, 0x02},
- .v1_blank = {0x16, 0x00},
- .v_line = {0x65, 0x04},
- .h_line = {0x50, 0x0a},
- .hsync_pol = {0x00},
- .vsync_pol = {0x00},
- .int_pro_mode = {0x01},
- .v_blank_f0 = {0x49, 0x02},
- .v_blank_f1 = {0x65, 0x04},
- .h_sync_start = {0x0e, 0x02},
- .h_sync_end = {0x3a, 0x02},
- .v_sync_line_bef_2 = {0x07, 0x00},
- .v_sync_line_bef_1 = {0x02, 0x00},
- .v_sync_line_aft_2 = {0x39, 0x02},
- .v_sync_line_aft_1 = {0x34, 0x02},
- .v_sync_line_aft_pxl_2 = {0x38, 0x07},
- .v_sync_line_aft_pxl_1 = {0x38, 0x07},
- .v_blank_f2 = {0xff, 0xff},
- .v_blank_f3 = {0xff, 0xff},
- .v_blank_f4 = {0xff, 0xff},
- .v_blank_f5 = {0xff, 0xff},
- .v_sync_line_aft_3 = {0xff, 0xff},
- .v_sync_line_aft_4 = {0xff, 0xff},
- .v_sync_line_aft_5 = {0xff, 0xff},
- .v_sync_line_aft_6 = {0xff, 0xff},
- .v_sync_line_aft_pxl_3 = {0xff, 0xff},
- .v_sync_line_aft_pxl_4 = {0xff, 0xff},
- .v_sync_line_aft_pxl_5 = {0xff, 0xff},
- .v_sync_line_aft_pxl_6 = {0xff, 0xff},
- .vact_space_1 = {0xff, 0xff},
- .vact_space_2 = {0xff, 0xff},
- .vact_space_3 = {0xff, 0xff},
- .vact_space_4 = {0xff, 0xff},
- .vact_space_5 = {0xff, 0xff},
- .vact_space_6 = {0xff, 0xff},
- /* other don't care */
+ {
+ .pixel_clock = 36000000,
+ .conf = {
+ 0x01, 0x51, 0x2d, 0x55, 0x40, 0x01, 0x00, 0x08,
+ 0x82, 0x80, 0x0e, 0xd9, 0x45, 0xa0, 0xac, 0x80,
+ 0x08, 0x80, 0x11, 0x04, 0x02, 0x22, 0x44, 0x86,
+ 0x54, 0xab, 0x24, 0x00, 0x00, 0x00, 0x01, 0x80,
+ },
},
- .tg = {
- 0x00, /* cmd */
- 0x50, 0x0a, /* h_fsz */
- 0xd0, 0x02, 0x80, 0x07, /* hact */
- 0x65, 0x04, /* v_fsz */
- 0x01, 0x00, 0x33, 0x02, /* vsync */
- 0x16, 0x00, 0x1c, 0x02, /* vact */
- 0x33, 0x02, /* field_chg */
- 0x49, 0x02, /* vact_st2 */
- 0x00, 0x00, /* vact_st3 */
- 0x00, 0x00, /* vact_st4 */
- 0x01, 0x00, 0x33, 0x02, /* vsync top/bot */
- 0x01, 0x00, 0x33, 0x02, /* field top/bot */
- 0x00, /* 3d FP */
+ {
+ .pixel_clock = 40000000,
+ .conf = {
+ 0x01, 0x51, 0x32, 0x55, 0x40, 0x01, 0x00, 0x08,
+ 0x82, 0x80, 0x2c, 0xd9, 0x45, 0xa0, 0xac, 0x80,
+ 0x08, 0x80, 0x11, 0x04, 0x02, 0x22, 0x44, 0x86,
+ 0x54, 0x9a, 0x24, 0x00, 0x00, 0x00, 0x01, 0x80,
+ },
},
-};
-
-static const struct hdmi_preset_conf hdmi_conf_1080i60 = {
- .core = {
- .h_blank = {0x18, 0x01},
- .v2_blank = {0x32, 0x02},
- .v1_blank = {0x16, 0x00},
- .v_line = {0x65, 0x04},
- .h_line = {0x98, 0x08},
- .hsync_pol = {0x00},
- .vsync_pol = {0x00},
- .int_pro_mode = {0x01},
- .v_blank_f0 = {0x49, 0x02},
- .v_blank_f1 = {0x65, 0x04},
- .h_sync_start = {0x56, 0x00},
- .h_sync_end = {0x82, 0x00},
- .v_sync_line_bef_2 = {0x07, 0x00},
- .v_sync_line_bef_1 = {0x02, 0x00},
- .v_sync_line_aft_2 = {0x39, 0x02},
- .v_sync_line_aft_1 = {0x34, 0x02},
- .v_sync_line_aft_pxl_2 = {0xa4, 0x04},
- .v_sync_line_aft_pxl_1 = {0xa4, 0x04},
- .v_blank_f2 = {0xff, 0xff},
- .v_blank_f3 = {0xff, 0xff},
- .v_blank_f4 = {0xff, 0xff},
- .v_blank_f5 = {0xff, 0xff},
- .v_sync_line_aft_3 = {0xff, 0xff},
- .v_sync_line_aft_4 = {0xff, 0xff},
- .v_sync_line_aft_5 = {0xff, 0xff},
- .v_sync_line_aft_6 = {0xff, 0xff},
- .v_sync_line_aft_pxl_3 = {0xff, 0xff},
- .v_sync_line_aft_pxl_4 = {0xff, 0xff},
- .v_sync_line_aft_pxl_5 = {0xff, 0xff},
- .v_sync_line_aft_pxl_6 = {0xff, 0xff},
- .vact_space_1 = {0xff, 0xff},
- .vact_space_2 = {0xff, 0xff},
- .vact_space_3 = {0xff, 0xff},
- .vact_space_4 = {0xff, 0xff},
- .vact_space_5 = {0xff, 0xff},
- .vact_space_6 = {0xff, 0xff},
- /* other don't care */
+ {
+ .pixel_clock = 65000000,
+ .conf = {
+ 0x01, 0xd1, 0x36, 0x34, 0x40, 0x1e, 0x0a, 0x08,
+ 0x82, 0xa0, 0x45, 0xd9, 0x45, 0xa0, 0xac, 0x80,
+ 0x08, 0x80, 0x11, 0x04, 0x02, 0x22, 0x44, 0x86,
+ 0x54, 0xbd, 0x24, 0x01, 0x00, 0x00, 0x01, 0x80,
+ },
},
- .tg = {
- 0x00, /* cmd */
- 0x98, 0x08, /* h_fsz */
- 0x18, 0x01, 0x80, 0x07, /* hact */
- 0x65, 0x04, /* v_fsz */
- 0x01, 0x00, 0x33, 0x02, /* vsync */
- 0x16, 0x00, 0x1c, 0x02, /* vact */
- 0x33, 0x02, /* field_chg */
- 0x49, 0x02, /* vact_st2 */
- 0x00, 0x00, /* vact_st3 */
- 0x00, 0x00, /* vact_st4 */
- 0x01, 0x00, 0x33, 0x02, /* vsync top/bot */
- 0x01, 0x00, 0x33, 0x02, /* field top/bot */
- 0x00, /* 3d FP */
+ {
+ .pixel_clock = 74176000,
+ .conf = {
+ 0x01, 0xd1, 0x3e, 0x35, 0x40, 0x5b, 0xde, 0x08,
+ 0x82, 0xa0, 0x73, 0xd9, 0x45, 0xa0, 0xac, 0x80,
+ 0x56, 0x80, 0x11, 0x04, 0x02, 0x22, 0x44, 0x86,
+ 0x54, 0xa6, 0x24, 0x01, 0x00, 0x00, 0x01, 0x80,
+ },
},
-};
-
-static const struct hdmi_preset_conf hdmi_conf_1080p30 = {
- .core = {
- .h_blank = {0x18, 0x01},
- .v2_blank = {0x65, 0x04},
- .v1_blank = {0x2d, 0x00},
- .v_line = {0x65, 0x04},
- .h_line = {0x98, 0x08},
- .hsync_pol = {0x00},
- .vsync_pol = {0x00},
- .int_pro_mode = {0x00},
- .v_blank_f0 = {0xff, 0xff},
- .v_blank_f1 = {0xff, 0xff},
- .h_sync_start = {0x56, 0x00},
- .h_sync_end = {0x82, 0x00},
- .v_sync_line_bef_2 = {0x09, 0x00},
- .v_sync_line_bef_1 = {0x04, 0x00},
- .v_sync_line_aft_2 = {0xff, 0xff},
- .v_sync_line_aft_1 = {0xff, 0xff},
- .v_sync_line_aft_pxl_2 = {0xff, 0xff},
- .v_sync_line_aft_pxl_1 = {0xff, 0xff},
- .v_blank_f2 = {0xff, 0xff},
- .v_blank_f3 = {0xff, 0xff},
- .v_blank_f4 = {0xff, 0xff},
- .v_blank_f5 = {0xff, 0xff},
- .v_sync_line_aft_3 = {0xff, 0xff},
- .v_sync_line_aft_4 = {0xff, 0xff},
- .v_sync_line_aft_5 = {0xff, 0xff},
- .v_sync_line_aft_6 = {0xff, 0xff},
- .v_sync_line_aft_pxl_3 = {0xff, 0xff},
- .v_sync_line_aft_pxl_4 = {0xff, 0xff},
- .v_sync_line_aft_pxl_5 = {0xff, 0xff},
- .v_sync_line_aft_pxl_6 = {0xff, 0xff},
- .vact_space_1 = {0xff, 0xff},
- .vact_space_2 = {0xff, 0xff},
- .vact_space_3 = {0xff, 0xff},
- .vact_space_4 = {0xff, 0xff},
- .vact_space_5 = {0xff, 0xff},
- .vact_space_6 = {0xff, 0xff},
- /* other don't care */
+ {
+ .pixel_clock = 74250000,
+ .conf = {
+ 0x01, 0xd1, 0x1f, 0x10, 0x40, 0x40, 0xf8, 0x08,
+ 0x81, 0xa0, 0xba, 0xd8, 0x45, 0xa0, 0xac, 0x80,
+ 0x3c, 0x80, 0x11, 0x04, 0x02, 0x22, 0x44, 0x86,
+ 0x54, 0xa5, 0x24, 0x01, 0x00, 0x00, 0x01, 0x00,
+ },
},
- .tg = {
- 0x00, /* cmd */
- 0x98, 0x08, /* h_fsz */
- 0x18, 0x01, 0x80, 0x07, /* hact */
- 0x65, 0x04, /* v_fsz */
- 0x01, 0x00, 0x33, 0x02, /* vsync */
- 0x2d, 0x00, 0x38, 0x04, /* vact */
- 0x33, 0x02, /* field_chg */
- 0x48, 0x02, /* vact_st2 */
- 0x00, 0x00, /* vact_st3 */
- 0x00, 0x00, /* vact_st4 */
- 0x01, 0x00, 0x01, 0x00, /* vsync top/bot */
- 0x01, 0x00, 0x33, 0x02, /* field top/bot */
- 0x00, /* 3d FP */
+ {
+ .pixel_clock = 83500000,
+ .conf = {
+ 0x01, 0xd1, 0x23, 0x11, 0x40, 0x0c, 0xfb, 0x08,
+ 0x85, 0xa0, 0xd1, 0xd8, 0x45, 0xa0, 0xac, 0x80,
+ 0x08, 0x80, 0x11, 0x04, 0x02, 0x22, 0x44, 0x86,
+ 0x54, 0x93, 0x24, 0x01, 0x00, 0x00, 0x01, 0x80,
+ },
},
-};
-
-static const struct hdmi_preset_conf hdmi_conf_1080p50 = {
- .core = {
- .h_blank = {0xd0, 0x02},
- .v2_blank = {0x65, 0x04},
- .v1_blank = {0x2d, 0x00},
- .v_line = {0x65, 0x04},
- .h_line = {0x50, 0x0a},
- .hsync_pol = {0x00},
- .vsync_pol = {0x00},
- .int_pro_mode = {0x00},
- .v_blank_f0 = {0xff, 0xff},
- .v_blank_f1 = {0xff, 0xff},
- .h_sync_start = {0x0e, 0x02},
- .h_sync_end = {0x3a, 0x02},
- .v_sync_line_bef_2 = {0x09, 0x00},
- .v_sync_line_bef_1 = {0x04, 0x00},
- .v_sync_line_aft_2 = {0xff, 0xff},
- .v_sync_line_aft_1 = {0xff, 0xff},
- .v_sync_line_aft_pxl_2 = {0xff, 0xff},
- .v_sync_line_aft_pxl_1 = {0xff, 0xff},
- .v_blank_f2 = {0xff, 0xff},
- .v_blank_f3 = {0xff, 0xff},
- .v_blank_f4 = {0xff, 0xff},
- .v_blank_f5 = {0xff, 0xff},
- .v_sync_line_aft_3 = {0xff, 0xff},
- .v_sync_line_aft_4 = {0xff, 0xff},
- .v_sync_line_aft_5 = {0xff, 0xff},
- .v_sync_line_aft_6 = {0xff, 0xff},
- .v_sync_line_aft_pxl_3 = {0xff, 0xff},
- .v_sync_line_aft_pxl_4 = {0xff, 0xff},
- .v_sync_line_aft_pxl_5 = {0xff, 0xff},
- .v_sync_line_aft_pxl_6 = {0xff, 0xff},
- .vact_space_1 = {0xff, 0xff},
- .vact_space_2 = {0xff, 0xff},
- .vact_space_3 = {0xff, 0xff},
- .vact_space_4 = {0xff, 0xff},
- .vact_space_5 = {0xff, 0xff},
- .vact_space_6 = {0xff, 0xff},
- /* other don't care */
+ {
+ .pixel_clock = 106500000,
+ .conf = {
+ 0x01, 0xd1, 0x2c, 0x12, 0x40, 0x0c, 0x09, 0x08,
+ 0x84, 0xa0, 0x0a, 0xd9, 0x45, 0xa0, 0xac, 0x80,
+ 0x08, 0x80, 0x11, 0x04, 0x02, 0x22, 0x44, 0x86,
+ 0x54, 0x73, 0x24, 0x01, 0x00, 0x00, 0x01, 0x80,
+ },
},
- .tg = {
- 0x00, /* cmd */
- 0x50, 0x0a, /* h_fsz */
- 0xd0, 0x02, 0x80, 0x07, /* hact */
- 0x65, 0x04, /* v_fsz */
- 0x01, 0x00, 0x33, 0x02, /* vsync */
- 0x2d, 0x00, 0x38, 0x04, /* vact */
- 0x33, 0x02, /* field_chg */
- 0x48, 0x02, /* vact_st2 */
- 0x00, 0x00, /* vact_st3 */
- 0x00, 0x00, /* vact_st4 */
- 0x01, 0x00, 0x01, 0x00, /* vsync top/bot */
- 0x01, 0x00, 0x33, 0x02, /* field top/bot */
- 0x00, /* 3d FP */
+ {
+ .pixel_clock = 108000000,
+ .conf = {
+ 0x01, 0x51, 0x2d, 0x15, 0x40, 0x01, 0x00, 0x08,
+ 0x82, 0x80, 0x0e, 0xd9, 0x45, 0xa0, 0xac, 0x80,
+ 0x08, 0x80, 0x11, 0x04, 0x02, 0x22, 0x44, 0x86,
+ 0x54, 0xc7, 0x25, 0x03, 0x00, 0x00, 0x01, 0x80,
+ },
},
-};
-
-static const struct hdmi_preset_conf hdmi_conf_1080p60 = {
- .core = {
- .h_blank = {0x18, 0x01},
- .v2_blank = {0x65, 0x04},
- .v1_blank = {0x2d, 0x00},
- .v_line = {0x65, 0x04},
- .h_line = {0x98, 0x08},
- .hsync_pol = {0x00},
- .vsync_pol = {0x00},
- .int_pro_mode = {0x00},
- .v_blank_f0 = {0xff, 0xff},
- .v_blank_f1 = {0xff, 0xff},
- .h_sync_start = {0x56, 0x00},
- .h_sync_end = {0x82, 0x00},
- .v_sync_line_bef_2 = {0x09, 0x00},
- .v_sync_line_bef_1 = {0x04, 0x00},
- .v_sync_line_aft_2 = {0xff, 0xff},
- .v_sync_line_aft_1 = {0xff, 0xff},
- .v_sync_line_aft_pxl_2 = {0xff, 0xff},
- .v_sync_line_aft_pxl_1 = {0xff, 0xff},
- .v_blank_f2 = {0xff, 0xff},
- .v_blank_f3 = {0xff, 0xff},
- .v_blank_f4 = {0xff, 0xff},
- .v_blank_f5 = {0xff, 0xff},
- .v_sync_line_aft_3 = {0xff, 0xff},
- .v_sync_line_aft_4 = {0xff, 0xff},
- .v_sync_line_aft_5 = {0xff, 0xff},
- .v_sync_line_aft_6 = {0xff, 0xff},
- .v_sync_line_aft_pxl_3 = {0xff, 0xff},
- .v_sync_line_aft_pxl_4 = {0xff, 0xff},
- .v_sync_line_aft_pxl_5 = {0xff, 0xff},
- .v_sync_line_aft_pxl_6 = {0xff, 0xff},
- /* other don't care */
+ {
+ .pixel_clock = 146250000,
+ .conf = {
+ 0x01, 0xd1, 0x3d, 0x15, 0x40, 0x18, 0xfd, 0x08,
+ 0x83, 0xa0, 0x6e, 0xd9, 0x45, 0xa0, 0xac, 0x80,
+ 0x08, 0x80, 0x11, 0x04, 0x02, 0x22, 0x44, 0x86,
+ 0x54, 0x50, 0x25, 0x03, 0x00, 0x00, 0x01, 0x80,
+ },
},
- .tg = {
- 0x00, /* cmd */
- 0x98, 0x08, /* h_fsz */
- 0x18, 0x01, 0x80, 0x07, /* hact */
- 0x65, 0x04, /* v_fsz */
- 0x01, 0x00, 0x33, 0x02, /* vsync */
- 0x2d, 0x00, 0x38, 0x04, /* vact */
- 0x33, 0x02, /* field_chg */
- 0x48, 0x02, /* vact_st2 */
- 0x00, 0x00, /* vact_st3 */
- 0x00, 0x00, /* vact_st4 */
- 0x01, 0x00, 0x01, 0x00, /* vsync top/bot */
- 0x01, 0x00, 0x33, 0x02, /* field top/bot */
- 0x00, /* 3d FP */
+ {
+ .pixel_clock = 148500000,
+ .conf = {
+ 0x01, 0xd1, 0x1f, 0x00, 0x40, 0x40, 0xf8, 0x08,
+ 0x81, 0xa0, 0xba, 0xd8, 0x45, 0xa0, 0xac, 0x80,
+ 0x3c, 0x80, 0x11, 0x04, 0x02, 0x22, 0x44, 0x86,
+ 0x54, 0x4b, 0x25, 0x03, 0x00, 0x00, 0x01, 0x00,
+ },
},
};
-static const struct hdmi_conf hdmi_confs[] = {
- { 720, 480, 60, false, 3, hdmiphy_conf27_027, &hdmi_conf_480p60 },
- { 1280, 720, 50, false, 19, hdmiphy_conf74_25, &hdmi_conf_720p50 },
- { 1280, 720, 60, false, 4, hdmiphy_conf74_25, &hdmi_conf_720p60 },
- { 1920, 1080, 50, true, 20, hdmiphy_conf74_25, &hdmi_conf_1080i50 },
- { 1920, 1080, 60, true, 5, hdmiphy_conf74_25, &hdmi_conf_1080i60 },
- { 1920, 1080, 30, false, 34, hdmiphy_conf74_176, &hdmi_conf_1080p30 },
- { 1920, 1080, 50, false, 31, hdmiphy_conf148_5, &hdmi_conf_1080p50 },
- { 1920, 1080, 60, false, 16, hdmiphy_conf148_5, &hdmi_conf_1080p60 },
-};
-
struct hdmi_infoframe {
enum HDMI_PACKET_TYPE type;
u8 ver;
@@ -1275,31 +889,6 @@ static int hdmi_v13_conf_index(struct drm_display_mode *mode)
return -EINVAL;
}
-static int hdmi_v14_conf_index(struct drm_display_mode *mode)
-{
- int i;
-
- for (i = 0; i < ARRAY_SIZE(hdmi_confs); ++i)
- if (hdmi_confs[i].width == mode->hdisplay &&
- hdmi_confs[i].height == mode->vdisplay &&
- hdmi_confs[i].vrefresh == mode->vrefresh &&
- hdmi_confs[i].interlace ==
- ((mode->flags & DRM_MODE_FLAG_INTERLACE) ?
- true : false))
- return i;
-
- return -EINVAL;
-}
-
-static int hdmi_conf_index(struct hdmi_context *hdata,
- struct drm_display_mode *mode)
-{
- if (hdata->type == HDMI_TYPE13)
- return hdmi_v13_conf_index(mode);
-
- return hdmi_v14_conf_index(mode);
-}
-
static u8 hdmi_chksum(struct hdmi_context *hdata,
u32 start, u8 len, u32 hdr_sum)
{
@@ -1357,7 +946,7 @@ static void hdmi_reg_infoframe(struct hdmi_context *hdata,
if (hdata->type == HDMI_TYPE13)
vic = hdmi_v13_confs[hdata->cur_conf].cea_video_id;
else
- vic = hdmi_confs[hdata->cur_conf].cea_video_id;
+ vic = hdata->mode_conf.cea_video_id;
hdmi_reg_writeb(hdata, HDMI_AVI_BYTE(4), vic);
@@ -1434,44 +1023,51 @@ static int hdmi_v13_check_timing(struct fb_videomode *check_timing)
return -EINVAL;
}
+static int hdmi_v14_find_phy_conf(int pixel_clock)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(hdmiphy_v14_configs); i++) {
+ if (hdmiphy_v14_configs[i].pixel_clock == pixel_clock)
+ return i;
+ }
+
+ DRM_DEBUG_KMS("Could not find phy config for %d\n", pixel_clock);
+ return -EINVAL;
+}
+
static int hdmi_v14_check_timing(struct fb_videomode *check_timing)
{
int i;
- DRM_DEBUG_KMS("valid mode : xres=%d, yres=%d, refresh=%d, intl=%d\n",
+ DRM_DEBUG_KMS("mode: xres=%d, yres=%d, refresh=%d, clock=%d, intl=%d\n",
check_timing->xres, check_timing->yres,
- check_timing->refresh, (check_timing->vmode &
- FB_VMODE_INTERLACED) ? true : false);
+ check_timing->refresh, check_timing->pixclock,
+ (check_timing->vmode & FB_VMODE_INTERLACED) ?
+ true : false);
- for (i = 0; i < ARRAY_SIZE(hdmi_confs); i++)
- if (hdmi_confs[i].width == check_timing->xres &&
- hdmi_confs[i].height == check_timing->yres &&
- hdmi_confs[i].vrefresh == check_timing->refresh &&
- hdmi_confs[i].interlace ==
- ((check_timing->vmode & FB_VMODE_INTERLACED) ?
- true : false))
- return 0;
-
- /* TODO */
+ for (i = 0; i < ARRAY_SIZE(hdmiphy_v14_configs); i++)
+ if (hdmiphy_v14_configs[i].pixel_clock ==
+ check_timing->pixclock)
+ return 0;
return -EINVAL;
}
-static int hdmi_check_timing(void *ctx, void *timing)
+static int hdmi_check_timing(void *ctx, struct fb_videomode *timing)
{
struct hdmi_context *hdata = ctx;
- struct fb_videomode *check_timing = timing;
DRM_DEBUG_KMS("[%d] %s\n", __LINE__, __func__);
- DRM_DEBUG_KMS("[%d]x[%d] [%d]Hz [%x]\n", check_timing->xres,
- check_timing->yres, check_timing->refresh,
- check_timing->vmode);
+ DRM_DEBUG_KMS("[%d]x[%d] [%d]Hz [%x]\n", timing->xres,
+ timing->yres, timing->refresh,
+ timing->vmode);
if (hdata->type == HDMI_TYPE13)
- return hdmi_v13_check_timing(check_timing);
+ return hdmi_v13_check_timing(timing);
else
- return hdmi_v14_check_timing(check_timing);
+ return hdmi_v14_check_timing(timing);
}
static void hdmi_set_acr(u32 freq, u8 *acr)
@@ -1795,9 +1391,8 @@ static void hdmi_v13_timing_apply(struct hdmi_context *hdata)
static void hdmi_v14_timing_apply(struct hdmi_context *hdata)
{
- const struct hdmi_preset_conf *conf = hdmi_confs[hdata->cur_conf].conf;
- const struct hdmi_core_regs *core = &conf->core;
- const struct hdmi_tg_regs *tg = &conf->tg;
+ struct hdmi_core_regs *core = &hdata->mode_conf.core;
+ struct hdmi_tg_regs *tg = &hdata->mode_conf.tg;
int tries;
/* setting core registers */
@@ -1900,39 +1495,39 @@ static void hdmi_v14_timing_apply(struct hdmi_context *hdata)
hdmi_reg_writeb(hdata, HDMI_VACT_SPACE_6_1, core->vact_space_6[1]);
/* Timing generator registers */
- hdmi_reg_writeb(hdata, HDMI_TG_H_FSZ_L, tg->h_fsz_l);
- hdmi_reg_writeb(hdata, HDMI_TG_H_FSZ_H, tg->h_fsz_h);
- hdmi_reg_writeb(hdata, HDMI_TG_HACT_ST_L, tg->hact_st_l);
- hdmi_reg_writeb(hdata, HDMI_TG_HACT_ST_H, tg->hact_st_h);
- hdmi_reg_writeb(hdata, HDMI_TG_HACT_SZ_L, tg->hact_sz_l);
- hdmi_reg_writeb(hdata, HDMI_TG_HACT_SZ_H, tg->hact_sz_h);
- hdmi_reg_writeb(hdata, HDMI_TG_V_FSZ_L, tg->v_fsz_l);
- hdmi_reg_writeb(hdata, HDMI_TG_V_FSZ_H, tg->v_fsz_h);
- hdmi_reg_writeb(hdata, HDMI_TG_VSYNC_L, tg->vsync_l);
- hdmi_reg_writeb(hdata, HDMI_TG_VSYNC_H, tg->vsync_h);
- hdmi_reg_writeb(hdata, HDMI_TG_VSYNC2_L, tg->vsync2_l);
- hdmi_reg_writeb(hdata, HDMI_TG_VSYNC2_H, tg->vsync2_h);
- hdmi_reg_writeb(hdata, HDMI_TG_VACT_ST_L, tg->vact_st_l);
- hdmi_reg_writeb(hdata, HDMI_TG_VACT_ST_H, tg->vact_st_h);
- hdmi_reg_writeb(hdata, HDMI_TG_VACT_SZ_L, tg->vact_sz_l);
- hdmi_reg_writeb(hdata, HDMI_TG_VACT_SZ_H, tg->vact_sz_h);
- hdmi_reg_writeb(hdata, HDMI_TG_FIELD_CHG_L, tg->field_chg_l);
- hdmi_reg_writeb(hdata, HDMI_TG_FIELD_CHG_H, tg->field_chg_h);
- hdmi_reg_writeb(hdata, HDMI_TG_VACT_ST2_L, tg->vact_st2_l);
- hdmi_reg_writeb(hdata, HDMI_TG_VACT_ST2_H, tg->vact_st2_h);
- hdmi_reg_writeb(hdata, HDMI_TG_VACT_ST3_L, tg->vact_st3_l);
- hdmi_reg_writeb(hdata, HDMI_TG_VACT_ST3_H, tg->vact_st3_h);
- hdmi_reg_writeb(hdata, HDMI_TG_VACT_ST4_L, tg->vact_st4_l);
- hdmi_reg_writeb(hdata, HDMI_TG_VACT_ST4_H, tg->vact_st4_h);
- hdmi_reg_writeb(hdata, HDMI_TG_VSYNC_TOP_HDMI_L, tg->vsync_top_hdmi_l);
- hdmi_reg_writeb(hdata, HDMI_TG_VSYNC_TOP_HDMI_H, tg->vsync_top_hdmi_h);
- hdmi_reg_writeb(hdata, HDMI_TG_VSYNC_BOT_HDMI_L, tg->vsync_bot_hdmi_l);
- hdmi_reg_writeb(hdata, HDMI_TG_VSYNC_BOT_HDMI_H, tg->vsync_bot_hdmi_h);
- hdmi_reg_writeb(hdata, HDMI_TG_FIELD_TOP_HDMI_L, tg->field_top_hdmi_l);
- hdmi_reg_writeb(hdata, HDMI_TG_FIELD_TOP_HDMI_H, tg->field_top_hdmi_h);
- hdmi_reg_writeb(hdata, HDMI_TG_FIELD_BOT_HDMI_L, tg->field_bot_hdmi_l);
- hdmi_reg_writeb(hdata, HDMI_TG_FIELD_BOT_HDMI_H, tg->field_bot_hdmi_h);
- hdmi_reg_writeb(hdata, HDMI_TG_3D, tg->tg_3d);
+ hdmi_reg_writeb(hdata, HDMI_TG_H_FSZ_L, tg->h_fsz[0]);
+ hdmi_reg_writeb(hdata, HDMI_TG_H_FSZ_H, tg->h_fsz[1]);
+ hdmi_reg_writeb(hdata, HDMI_TG_HACT_ST_L, tg->hact_st[0]);
+ hdmi_reg_writeb(hdata, HDMI_TG_HACT_ST_H, tg->hact_st[1]);
+ hdmi_reg_writeb(hdata, HDMI_TG_HACT_SZ_L, tg->hact_sz[0]);
+ hdmi_reg_writeb(hdata, HDMI_TG_HACT_SZ_H, tg->hact_sz[1]);
+ hdmi_reg_writeb(hdata, HDMI_TG_V_FSZ_L, tg->v_fsz[0]);
+ hdmi_reg_writeb(hdata, HDMI_TG_V_FSZ_H, tg->v_fsz[1]);
+ hdmi_reg_writeb(hdata, HDMI_TG_VSYNC_L, tg->vsync[0]);
+ hdmi_reg_writeb(hdata, HDMI_TG_VSYNC_H, tg->vsync[1]);
+ hdmi_reg_writeb(hdata, HDMI_TG_VSYNC2_L, tg->vsync2[0]);
+ hdmi_reg_writeb(hdata, HDMI_TG_VSYNC2_H, tg->vsync2[1]);
+ hdmi_reg_writeb(hdata, HDMI_TG_VACT_ST_L, tg->vact_st[0]);
+ hdmi_reg_writeb(hdata, HDMI_TG_VACT_ST_H, tg->vact_st[1]);
+ hdmi_reg_writeb(hdata, HDMI_TG_VACT_SZ_L, tg->vact_sz[0]);
+ hdmi_reg_writeb(hdata, HDMI_TG_VACT_SZ_H, tg->vact_sz[1]);
+ hdmi_reg_writeb(hdata, HDMI_TG_FIELD_CHG_L, tg->field_chg[0]);
+ hdmi_reg_writeb(hdata, HDMI_TG_FIELD_CHG_H, tg->field_chg[1]);
+ hdmi_reg_writeb(hdata, HDMI_TG_VACT_ST2_L, tg->vact_st2[0]);
+ hdmi_reg_writeb(hdata, HDMI_TG_VACT_ST2_H, tg->vact_st2[1]);
+ hdmi_reg_writeb(hdata, HDMI_TG_VACT_ST3_L, tg->vact_st3[0]);
+ hdmi_reg_writeb(hdata, HDMI_TG_VACT_ST3_H, tg->vact_st3[1]);
+ hdmi_reg_writeb(hdata, HDMI_TG_VACT_ST4_L, tg->vact_st4[0]);
+ hdmi_reg_writeb(hdata, HDMI_TG_VACT_ST4_H, tg->vact_st4[1]);
+ hdmi_reg_writeb(hdata, HDMI_TG_VSYNC_TOP_HDMI_L, tg->vsync_top_hdmi[0]);
+ hdmi_reg_writeb(hdata, HDMI_TG_VSYNC_TOP_HDMI_H, tg->vsync_top_hdmi[1]);
+ hdmi_reg_writeb(hdata, HDMI_TG_VSYNC_BOT_HDMI_L, tg->vsync_bot_hdmi[0]);
+ hdmi_reg_writeb(hdata, HDMI_TG_VSYNC_BOT_HDMI_H, tg->vsync_bot_hdmi[1]);
+ hdmi_reg_writeb(hdata, HDMI_TG_FIELD_TOP_HDMI_L, tg->field_top_hdmi[0]);
+ hdmi_reg_writeb(hdata, HDMI_TG_FIELD_TOP_HDMI_H, tg->field_top_hdmi[1]);
+ hdmi_reg_writeb(hdata, HDMI_TG_FIELD_BOT_HDMI_L, tg->field_bot_hdmi[0]);
+ hdmi_reg_writeb(hdata, HDMI_TG_FIELD_BOT_HDMI_H, tg->field_bot_hdmi[1]);
+ hdmi_reg_writeb(hdata, HDMI_TG_3D, tg->tg_3d[0]);
/* waiting for HDMIPHY's PLL to get to steady state */
for (tries = 100; tries; --tries) {
@@ -2029,10 +1624,17 @@ static void hdmiphy_conf_apply(struct hdmi_context *hdata)
}
/* pixel clock */
- if (hdata->type == HDMI_TYPE13)
+ if (hdata->type == HDMI_TYPE13) {
hdmiphy_data = hdmi_v13_confs[hdata->cur_conf].hdmiphy_data;
- else
- hdmiphy_data = hdmi_confs[hdata->cur_conf].hdmiphy_data;
+ } else {
+ i = hdmi_v14_find_phy_conf(hdata->mode_conf.pixel_clock);
+ if (i < 0) {
+ DRM_ERROR("failed to find hdmiphy conf\n");
+ return;
+ }
+
+ hdmiphy_data = hdmiphy_v14_configs[i].conf;
+ }
memcpy(buffer, hdmiphy_data, 32);
ret = i2c_master_send(hdata->hdmiphy_port, buffer, 32);
@@ -2100,7 +1702,7 @@ static void hdmi_mode_fixup(void *ctx, struct drm_connector *connector,
if (hdata->type == HDMI_TYPE13)
index = hdmi_v13_conf_index(adjusted_mode);
else
- index = hdmi_v14_conf_index(adjusted_mode);
+ index = hdmi_v14_find_phy_conf(adjusted_mode->clock * 1000);
/* just return if user desired mode exists. */
if (index >= 0)
@@ -2114,7 +1716,7 @@ static void hdmi_mode_fixup(void *ctx, struct drm_connector *connector,
if (hdata->type == HDMI_TYPE13)
index = hdmi_v13_conf_index(m);
else
- index = hdmi_v14_conf_index(m);
+ index = hdmi_v14_find_phy_conf(m->clock * 1000);
if (index >= 0) {
struct drm_mode_object base;
@@ -2123,6 +1725,9 @@ static void hdmi_mode_fixup(void *ctx, struct drm_connector *connector,
DRM_INFO("desired mode doesn't exist so\n");
DRM_INFO("use the most suitable mode among modes.\n");
+ DRM_DEBUG_KMS("Adjusted Mode: [%d]x[%d] [%d]Hz\n",
+ m->hdisplay, m->vdisplay, m->vrefresh);
+
/* preserve display mode header while copying. */
head = adjusted_mode->head;
base = adjusted_mode->base;
@@ -2134,6 +1739,122 @@ static void hdmi_mode_fixup(void *ctx, struct drm_connector *connector,
}
}
+static void hdmi_set_reg(u8 *reg_pair, int num_bytes, u32 value)
+{
+ int i;
+ BUG_ON(num_bytes > 4);
+ for (i = 0; i < num_bytes; i++)
+ reg_pair[i] = (value >> (8 * i)) & 0xff;
+}
+
+static void hdmi_v14_mode_set(struct hdmi_context *hdata,
+ struct drm_display_mode *m)
+{
+ struct hdmi_core_regs *core = &hdata->mode_conf.core;
+ struct hdmi_tg_regs *tg = &hdata->mode_conf.tg;
+
+ hdata->mode_conf.cea_video_id = drm_match_cea_mode(m);
+
+ hdata->mode_conf.pixel_clock = m->clock * 1000;
+ hdmi_set_reg(core->h_blank, 2, m->htotal - m->hdisplay);
+ hdmi_set_reg(core->v_line, 2, m->vtotal);
+ hdmi_set_reg(core->h_line, 2, m->htotal);
+ hdmi_set_reg(core->hsync_pol, 1,
+ (m->flags & DRM_MODE_FLAG_NHSYNC) ? 1 : 0);
+ hdmi_set_reg(core->vsync_pol, 1,
+ (m->flags & DRM_MODE_FLAG_NVSYNC) ? 1 : 0);
+ hdmi_set_reg(core->int_pro_mode, 1,
+ (m->flags & DRM_MODE_FLAG_INTERLACE) ? 1 : 0);
+
+ /*
+ * Quirk requirement for exynos 5 HDMI IP design,
+ * 2 pixels less than the actual calculation for hsync_start
+ * and end.
+ */
+
+ /* Following values & calculations differ for different type of modes */
+ if (m->flags & DRM_MODE_FLAG_INTERLACE) {
+ /* Interlaced Mode */
+ hdmi_set_reg(core->v_sync_line_bef_2, 2,
+ (m->vsync_end - m->vdisplay) / 2);
+ hdmi_set_reg(core->v_sync_line_bef_1, 2,
+ (m->vsync_start - m->vdisplay) / 2);
+ hdmi_set_reg(core->v2_blank, 2, m->vtotal / 2);
+ hdmi_set_reg(core->v1_blank, 2, (m->vtotal - m->vdisplay) / 2);
+ hdmi_set_reg(core->v_blank_f0, 2, (m->vtotal +
+ ((m->vsync_end - m->vsync_start) * 4) + 5) / 2);
+ hdmi_set_reg(core->v_blank_f1, 2, m->vtotal);
+ hdmi_set_reg(core->v_sync_line_aft_2, 2, (m->vtotal / 2) + 7);
+ hdmi_set_reg(core->v_sync_line_aft_1, 2, (m->vtotal / 2) + 2);
+ hdmi_set_reg(core->v_sync_line_aft_pxl_2, 2,
+ (m->htotal / 2) + (m->hsync_start - m->hdisplay));
+ hdmi_set_reg(core->v_sync_line_aft_pxl_1, 2,
+ (m->htotal / 2) + (m->hsync_start - m->hdisplay));
+ hdmi_set_reg(tg->vact_st, 2, (m->vtotal - m->vdisplay) / 2);
+ hdmi_set_reg(tg->vact_sz, 2, m->vdisplay / 2);
+ hdmi_set_reg(tg->vact_st2, 2, 0x249);/* Reset value + 1*/
+ hdmi_set_reg(tg->vact_st3, 2, 0x0);
+ hdmi_set_reg(tg->vact_st4, 2, 0x0);
+ } else {
+ /* Progressive Mode */
+ hdmi_set_reg(core->v_sync_line_bef_2, 2,
+ m->vsync_end - m->vdisplay);
+ hdmi_set_reg(core->v_sync_line_bef_1, 2,
+ m->vsync_start - m->vdisplay);
+ hdmi_set_reg(core->v2_blank, 2, m->vtotal);
+ hdmi_set_reg(core->v1_blank, 2, m->vtotal - m->vdisplay);
+ hdmi_set_reg(core->v_blank_f0, 2, 0xffff);
+ hdmi_set_reg(core->v_blank_f1, 2, 0xffff);
+ hdmi_set_reg(core->v_sync_line_aft_2, 2, 0xffff);
+ hdmi_set_reg(core->v_sync_line_aft_1, 2, 0xffff);
+ hdmi_set_reg(core->v_sync_line_aft_pxl_2, 2, 0xffff);
+ hdmi_set_reg(core->v_sync_line_aft_pxl_1, 2, 0xffff);
+ hdmi_set_reg(tg->vact_st, 2, m->vtotal - m->vdisplay);
+ hdmi_set_reg(tg->vact_sz, 2, m->vdisplay);
+ hdmi_set_reg(tg->vact_st2, 2, 0x248); /* Reset value */
+ hdmi_set_reg(tg->vact_st3, 2, 0x47b); /* Reset value */
+ hdmi_set_reg(tg->vact_st4, 2, 0x6ae); /* Reset value */
+ }
+
+ /* Following values & calculations are same irrespective of mode type */
+ hdmi_set_reg(core->h_sync_start, 2, m->hsync_start - m->hdisplay - 2);
+ hdmi_set_reg(core->h_sync_end, 2, m->hsync_end - m->hdisplay - 2);
+ hdmi_set_reg(core->vact_space_1, 2, 0xffff);
+ hdmi_set_reg(core->vact_space_2, 2, 0xffff);
+ hdmi_set_reg(core->vact_space_3, 2, 0xffff);
+ hdmi_set_reg(core->vact_space_4, 2, 0xffff);
+ hdmi_set_reg(core->vact_space_5, 2, 0xffff);
+ hdmi_set_reg(core->vact_space_6, 2, 0xffff);
+ hdmi_set_reg(core->v_blank_f2, 2, 0xffff);
+ hdmi_set_reg(core->v_blank_f3, 2, 0xffff);
+ hdmi_set_reg(core->v_blank_f4, 2, 0xffff);
+ hdmi_set_reg(core->v_blank_f5, 2, 0xffff);
+ hdmi_set_reg(core->v_sync_line_aft_3, 2, 0xffff);
+ hdmi_set_reg(core->v_sync_line_aft_4, 2, 0xffff);
+ hdmi_set_reg(core->v_sync_line_aft_5, 2, 0xffff);
+ hdmi_set_reg(core->v_sync_line_aft_6, 2, 0xffff);
+ hdmi_set_reg(core->v_sync_line_aft_pxl_3, 2, 0xffff);
+ hdmi_set_reg(core->v_sync_line_aft_pxl_4, 2, 0xffff);
+ hdmi_set_reg(core->v_sync_line_aft_pxl_5, 2, 0xffff);
+ hdmi_set_reg(core->v_sync_line_aft_pxl_6, 2, 0xffff);
+
+ /* Timing generator registers */
+ hdmi_set_reg(tg->cmd, 1, 0x0);
+ hdmi_set_reg(tg->h_fsz, 2, m->htotal);
+ hdmi_set_reg(tg->hact_st, 2, m->htotal - m->hdisplay);
+ hdmi_set_reg(tg->hact_sz, 2, m->hdisplay);
+ hdmi_set_reg(tg->v_fsz, 2, m->vtotal);
+ hdmi_set_reg(tg->vsync, 2, 0x1);
+ hdmi_set_reg(tg->vsync2, 2, 0x233); /* Reset value */
+ hdmi_set_reg(tg->field_chg, 2, 0x233); /* Reset value */
+ hdmi_set_reg(tg->vsync_top_hdmi, 2, 0x1); /* Reset value */
+ hdmi_set_reg(tg->vsync_bot_hdmi, 2, 0x233); /* Reset value */
+ hdmi_set_reg(tg->field_top_hdmi, 2, 0x1); /* Reset value */
+ hdmi_set_reg(tg->field_bot_hdmi, 2, 0x233); /* Reset value */
+ hdmi_set_reg(tg->tg_3d, 1, 0x0);
+
+}
+
static void hdmi_mode_set(void *ctx, void *mode)
{
struct hdmi_context *hdata = ctx;
@@ -2141,11 +1862,15 @@ static void hdmi_mode_set(void *ctx, void *mode)
DRM_DEBUG_KMS("[%d] %s\n", __LINE__, __func__);
- conf_idx = hdmi_conf_index(hdata, mode);
- if (conf_idx >= 0)
- hdata->cur_conf = conf_idx;
- else
- DRM_DEBUG_KMS("not supported mode\n");
+ if (hdata->type == HDMI_TYPE13) {
+ conf_idx = hdmi_v13_conf_index(mode);
+ if (conf_idx >= 0)
+ hdata->cur_conf = conf_idx;
+ else
+ DRM_DEBUG_KMS("not supported mode\n");
+ } else {
+ hdmi_v14_mode_set(hdata, mode);
+ }
}
static void hdmi_get_max_resol(void *ctx, unsigned int *width,
diff --git a/drivers/gpu/drm/exynos/exynos_mixer.c b/drivers/gpu/drm/exynos/exynos_mixer.c
index c414584bfbae..e919aba29b3d 100644
--- a/drivers/gpu/drm/exynos/exynos_mixer.c
+++ b/drivers/gpu/drm/exynos/exynos_mixer.c
@@ -284,13 +284,13 @@ static void mixer_cfg_scan(struct mixer_context *ctx, unsigned int height)
MXR_CFG_SCAN_PROGRASSIVE);
/* choosing between porper HD and SD mode */
- if (height == 480)
+ if (height <= 480)
val |= MXR_CFG_SCAN_NTSC | MXR_CFG_SCAN_SD;
- else if (height == 576)
+ else if (height <= 576)
val |= MXR_CFG_SCAN_PAL | MXR_CFG_SCAN_SD;
- else if (height == 720)
+ else if (height <= 720)
val |= MXR_CFG_SCAN_HD_720 | MXR_CFG_SCAN_HD;
- else if (height == 1080)
+ else if (height <= 1080)
val |= MXR_CFG_SCAN_HD_1080 | MXR_CFG_SCAN_HD;
else
val |= MXR_CFG_SCAN_HD_720 | MXR_CFG_SCAN_HD;
@@ -818,6 +818,29 @@ static void mixer_win_disable(void *ctx, int win)
mixer_ctx->win_data[win].enabled = false;
}
+int mixer_check_timing(void *ctx, struct fb_videomode *timing)
+{
+ struct mixer_context *mixer_ctx = ctx;
+ u32 w, h;
+
+ w = timing->xres;
+ h = timing->yres;
+
+ DRM_DEBUG_KMS("%s : xres=%d, yres=%d, refresh=%d, intl=%d\n",
+ __func__, timing->xres, timing->yres,
+ timing->refresh, (timing->vmode &
+ FB_VMODE_INTERLACED) ? true : false);
+
+ if (mixer_ctx->mxr_ver == MXR_VER_0_0_0_16)
+ return 0;
+
+ if ((w >= 464 && w <= 720 && h >= 261 && h <= 576) ||
+ (w >= 1024 && w <= 1280 && h >= 576 && h <= 720) ||
+ (w >= 1664 && w <= 1920 && h >= 936 && h <= 1080))
+ return 0;
+
+ return -EINVAL;
+}
static void mixer_wait_for_vblank(void *ctx)
{
struct mixer_context *mixer_ctx = ctx;
@@ -955,6 +978,9 @@ static struct exynos_mixer_ops mixer_ops = {
.win_mode_set = mixer_win_mode_set,
.win_commit = mixer_win_commit,
.win_disable = mixer_win_disable,
+
+ /* display */
+ .check_timing = mixer_check_timing,
};
static irqreturn_t mixer_irq_handler(int irq, void *arg)
diff --git a/drivers/gpu/drm/gma500/cdv_intel_dp.c b/drivers/gpu/drm/gma500/cdv_intel_dp.c
index 51044cc55cf2..88d9ef6b5b4a 100644
--- a/drivers/gpu/drm/gma500/cdv_intel_dp.c
+++ b/drivers/gpu/drm/gma500/cdv_intel_dp.c
@@ -27,6 +27,7 @@
#include <linux/i2c.h>
#include <linux/slab.h>
+#include <linux/module.h>
#include <drm/drmP.h>
#include <drm/drm_crtc.h>
#include <drm/drm_crtc_helper.h>
diff --git a/drivers/gpu/drm/gma500/framebuffer.c b/drivers/gpu/drm/gma500/framebuffer.c
index afded54dbb10..2590cac84257 100644
--- a/drivers/gpu/drm/gma500/framebuffer.c
+++ b/drivers/gpu/drm/gma500/framebuffer.c
@@ -260,13 +260,13 @@ static int psb_framebuffer_init(struct drm_device *dev,
default:
return -EINVAL;
}
+ drm_helper_mode_fill_fb_struct(&fb->base, mode_cmd);
+ fb->gtt = gt;
ret = drm_framebuffer_init(dev, &fb->base, &psb_fb_funcs);
if (ret) {
dev_err(dev->dev, "framebuffer init failed: %d\n", ret);
return ret;
}
- drm_helper_mode_fill_fb_struct(&fb->base, mode_cmd);
- fb->gtt = gt;
return 0;
}
@@ -545,9 +545,7 @@ static int psbfb_probe(struct drm_fb_helper *helper,
struct psb_fbdev *psb_fbdev = (struct psb_fbdev *)helper;
struct drm_device *dev = psb_fbdev->psb_fb_helper.dev;
struct drm_psb_private *dev_priv = dev->dev_private;
- int new_fb = 0;
int bytespp;
- int ret;
bytespp = sizes->surface_bpp / 8;
if (bytespp == 3) /* no 24bit packed */
@@ -562,13 +560,7 @@ static int psbfb_probe(struct drm_fb_helper *helper,
sizes->surface_depth = 16;
}
- if (!helper->fb) {
- ret = psbfb_create(psb_fbdev, sizes);
- if (ret)
- return ret;
- new_fb = 1;
- }
- return new_fb;
+ return psbfb_create(psb_fbdev, sizes);
}
static struct drm_fb_helper_funcs psb_fb_helper_funcs = {
@@ -590,6 +582,7 @@ static int psb_fbdev_destroy(struct drm_device *dev, struct psb_fbdev *fbdev)
framebuffer_release(info);
}
drm_fb_helper_fini(&fbdev->psb_fb_helper);
+ drm_framebuffer_unregister_private(&psbfb->base);
drm_framebuffer_cleanup(&psbfb->base);
if (psbfb->gtt)
@@ -615,6 +608,10 @@ int psb_fbdev_init(struct drm_device *dev)
INTELFB_CONN_LIMIT);
drm_fb_helper_single_add_all_connectors(&fbdev->psb_fb_helper);
+
+ /* disable all the possible outputs/crtcs before entering KMS mode */
+ drm_helper_disable_unused_functions(dev);
+
drm_fb_helper_initial_config(&fbdev->psb_fb_helper, 32);
return 0;
}
@@ -668,30 +665,6 @@ static void psb_user_framebuffer_destroy(struct drm_framebuffer *fb)
{
struct psb_framebuffer *psbfb = to_psb_fb(fb);
struct gtt_range *r = psbfb->gtt;
- struct drm_device *dev = fb->dev;
- struct drm_psb_private *dev_priv = dev->dev_private;
- struct psb_fbdev *fbdev = dev_priv->fbdev;
- struct drm_crtc *crtc;
- int reset = 0;
-
- /* Should never get stolen memory for a user fb */
- WARN_ON(r->stolen);
-
- /* Check if we are erroneously live */
- list_for_each_entry(crtc, &dev->mode_config.crtc_list, head)
- if (crtc->fb == fb)
- reset = 1;
-
- if (reset)
- /*
- * Now force a sane response before we permit the DRM CRTC
- * layer to do stupid things like blank the display. Instead
- * we reset this framebuffer as if the user had forced a reset.
- * We must do this before the cleanup so that the DRM layer
- * doesn't get a chance to stick its oar in where it isn't
- * wanted.
- */
- drm_fb_helper_restore_fbdev_mode(&fbdev->psb_fb_helper);
/* Let DRM do its clean up */
drm_framebuffer_cleanup(fb);
diff --git a/drivers/gpu/drm/gma500/gtt.c b/drivers/gpu/drm/gma500/gtt.c
index 04a371aceb34..054e26e769ec 100644
--- a/drivers/gpu/drm/gma500/gtt.c
+++ b/drivers/gpu/drm/gma500/gtt.c
@@ -202,7 +202,7 @@ static int psb_gtt_attach_pages(struct gtt_range *gt)
WARN_ON(gt->pages);
/* This is the shared memory object that backs the GEM resource */
- inode = gt->gem.filp->f_path.dentry->d_inode;
+ inode = file_inode(gt->gem.filp);
mapping = inode->i_mapping;
gt->pages = kmalloc(pages * sizeof(struct page *), GFP_KERNEL);
diff --git a/drivers/gpu/drm/gma500/psb_device.c b/drivers/gpu/drm/gma500/psb_device.c
index b58c4701c4e8..f6f534b4197e 100644
--- a/drivers/gpu/drm/gma500/psb_device.c
+++ b/drivers/gpu/drm/gma500/psb_device.c
@@ -194,7 +194,7 @@ static int psb_save_display_registers(struct drm_device *dev)
regs->saveCHICKENBIT = PSB_RVDC32(DSPCHICKENBIT);
/* Save crtc and output state */
- mutex_lock(&dev->mode_config.mutex);
+ drm_modeset_lock_all(dev);
list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
if (drm_helper_crtc_in_use(crtc))
crtc->funcs->save(crtc);
@@ -204,7 +204,7 @@ static int psb_save_display_registers(struct drm_device *dev)
if (connector->funcs->save)
connector->funcs->save(connector);
- mutex_unlock(&dev->mode_config.mutex);
+ drm_modeset_unlock_all(dev);
return 0;
}
@@ -234,7 +234,7 @@ static int psb_restore_display_registers(struct drm_device *dev)
/*make sure VGA plane is off. it initializes to on after reset!*/
PSB_WVDC32(0x80000000, VGACNTRL);
- mutex_lock(&dev->mode_config.mutex);
+ drm_modeset_lock_all(dev);
list_for_each_entry(crtc, &dev->mode_config.crtc_list, head)
if (drm_helper_crtc_in_use(crtc))
crtc->funcs->restore(crtc);
@@ -243,7 +243,7 @@ static int psb_restore_display_registers(struct drm_device *dev)
if (connector->funcs->restore)
connector->funcs->restore(connector);
- mutex_unlock(&dev->mode_config.mutex);
+ drm_modeset_unlock_all(dev);
return 0;
}
diff --git a/drivers/gpu/drm/gma500/psb_drv.c b/drivers/gpu/drm/gma500/psb_drv.c
index dd1fbfa7e467..111e3df9c5de 100644
--- a/drivers/gpu/drm/gma500/psb_drv.c
+++ b/drivers/gpu/drm/gma500/psb_drv.c
@@ -149,6 +149,16 @@ static struct drm_ioctl_desc psb_ioctls[] = {
static void psb_lastclose(struct drm_device *dev)
{
+ int ret;
+ struct drm_psb_private *dev_priv = dev->dev_private;
+ struct psb_fbdev *fbdev = dev_priv->fbdev;
+
+ drm_modeset_lock_all(dev);
+ ret = drm_fb_helper_restore_fbdev_mode(&fbdev->psb_fb_helper);
+ if (ret)
+ DRM_DEBUG("failed to restore crtc mode\n");
+ drm_modeset_unlock_all(dev);
+
return;
}
@@ -476,7 +486,7 @@ static int psb_mode_operation_ioctl(struct drm_device *dev, void *data,
case PSB_MODE_OPERATION_MODE_VALID:
umode = &arg->mode;
- mutex_lock(&dev->mode_config.mutex);
+ drm_modeset_lock_all(dev);
obj = drm_mode_object_find(dev, obj_id,
DRM_MODE_OBJECT_CONNECTOR);
@@ -525,7 +535,7 @@ static int psb_mode_operation_ioctl(struct drm_device *dev, void *data,
if (mode)
drm_mode_destroy(dev, mode);
mode_op_out:
- mutex_unlock(&dev->mode_config.mutex);
+ drm_modeset_unlock_all(dev);
return ret;
default:
diff --git a/drivers/gpu/drm/gma500/psb_intel_display.c b/drivers/gpu/drm/gma500/psb_intel_display.c
index 8033526bb53b..9edb1902a096 100644
--- a/drivers/gpu/drm/gma500/psb_intel_display.c
+++ b/drivers/gpu/drm/gma500/psb_intel_display.c
@@ -85,14 +85,14 @@ struct psb_intel_limit_t {
#define I9XX_DOT_MAX 400000
#define I9XX_VCO_MIN 1400000
#define I9XX_VCO_MAX 2800000
-#define I9XX_N_MIN 3
-#define I9XX_N_MAX 8
+#define I9XX_N_MIN 1
+#define I9XX_N_MAX 6
#define I9XX_M_MIN 70
#define I9XX_M_MAX 120
-#define I9XX_M1_MIN 10
-#define I9XX_M1_MAX 20
-#define I9XX_M2_MIN 5
-#define I9XX_M2_MAX 9
+#define I9XX_M1_MIN 8
+#define I9XX_M1_MAX 18
+#define I9XX_M2_MIN 3
+#define I9XX_M2_MAX 7
#define I9XX_P_SDVO_DAC_MIN 5
#define I9XX_P_SDVO_DAC_MAX 80
#define I9XX_P_LVDS_MIN 7
diff --git a/drivers/gpu/drm/i2c/Kconfig b/drivers/gpu/drm/i2c/Kconfig
new file mode 100644
index 000000000000..4d341db462a2
--- /dev/null
+++ b/drivers/gpu/drm/i2c/Kconfig
@@ -0,0 +1,28 @@
+menu "I2C encoder or helper chips"
+ depends on DRM && DRM_KMS_HELPER && I2C
+
+config DRM_I2C_CH7006
+ tristate "Chrontel ch7006 TV encoder"
+ default m if DRM_NOUVEAU
+ help
+ Support for Chrontel ch7006 and similar TV encoders, found
+ on some nVidia video cards.
+
+ This driver is currently only useful if you're also using
+ the nouveau driver.
+
+config DRM_I2C_SIL164
+ tristate "Silicon Image sil164 TMDS transmitter"
+ default m if DRM_NOUVEAU
+ help
+ Support for sil164 and similar single-link (or dual-link
+ when used in pairs) TMDS transmitters, used in some nVidia
+ video cards.
+
+config DRM_I2C_NXP_TDA998X
+ tristate "NXP Semiconductors TDA998X HDMI encoder"
+ default m if DRM_TILCDC
+ help
+ Support for NXP Semiconductors TDA998X HDMI encoders.
+
+endmenu
diff --git a/drivers/gpu/drm/i2c/Makefile b/drivers/gpu/drm/i2c/Makefile
index 92862563e7ee..43aa33baebed 100644
--- a/drivers/gpu/drm/i2c/Makefile
+++ b/drivers/gpu/drm/i2c/Makefile
@@ -5,3 +5,6 @@ obj-$(CONFIG_DRM_I2C_CH7006) += ch7006.o
sil164-y := sil164_drv.o
obj-$(CONFIG_DRM_I2C_SIL164) += sil164.o
+
+tda998x-y := tda998x_drv.o
+obj-$(CONFIG_DRM_I2C_NXP_TDA998X) += tda998x.o
diff --git a/drivers/gpu/drm/i2c/ch7006_drv.c b/drivers/gpu/drm/i2c/ch7006_drv.c
index b865d0728e28..51fa32392029 100644
--- a/drivers/gpu/drm/i2c/ch7006_drv.c
+++ b/drivers/gpu/drm/i2c/ch7006_drv.c
@@ -364,7 +364,7 @@ static int ch7006_encoder_set_property(struct drm_encoder *encoder,
.crtc = crtc,
};
- crtc->funcs->set_config(&modeset);
+ drm_mode_set_config_internal(&modeset);
}
}
diff --git a/drivers/gpu/drm/i2c/tda998x_drv.c b/drivers/gpu/drm/i2c/tda998x_drv.c
new file mode 100644
index 000000000000..e68b58a1aaf9
--- /dev/null
+++ b/drivers/gpu/drm/i2c/tda998x_drv.c
@@ -0,0 +1,906 @@
+/*
+ * Copyright (C) 2012 Texas Instruments
+ * Author: Rob Clark <robdclark@gmail.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+
+
+#include <linux/module.h>
+
+#include <drm/drmP.h>
+#include <drm/drm_crtc_helper.h>
+#include <drm/drm_encoder_slave.h>
+#include <drm/drm_edid.h>
+
+
+#define DBG(fmt, ...) DRM_DEBUG(fmt"\n", ##__VA_ARGS__)
+
+struct tda998x_priv {
+ struct i2c_client *cec;
+ uint16_t rev;
+ uint8_t current_page;
+ int dpms;
+};
+
+#define to_tda998x_priv(x) ((struct tda998x_priv *)to_encoder_slave(x)->slave_priv)
+
+/* The TDA9988 series of devices use a paged register scheme.. to simplify
+ * things we encode the page # in upper bits of the register #. To read/
+ * write a given register, we need to make sure CURPAGE register is set
+ * appropriately. Which implies reads/writes are not atomic. Fun!
+ */
+
+#define REG(page, addr) (((page) << 8) | (addr))
+#define REG2ADDR(reg) ((reg) & 0xff)
+#define REG2PAGE(reg) (((reg) >> 8) & 0xff)
+
+#define REG_CURPAGE 0xff /* write */
+
+
+/* Page 00h: General Control */
+#define REG_VERSION_LSB REG(0x00, 0x00) /* read */
+#define REG_MAIN_CNTRL0 REG(0x00, 0x01) /* read/write */
+# define MAIN_CNTRL0_SR (1 << 0)
+# define MAIN_CNTRL0_DECS (1 << 1)
+# define MAIN_CNTRL0_DEHS (1 << 2)
+# define MAIN_CNTRL0_CECS (1 << 3)
+# define MAIN_CNTRL0_CEHS (1 << 4)
+# define MAIN_CNTRL0_SCALER (1 << 7)
+#define REG_VERSION_MSB REG(0x00, 0x02) /* read */
+#define REG_SOFTRESET REG(0x00, 0x0a) /* write */
+# define SOFTRESET_AUDIO (1 << 0)
+# define SOFTRESET_I2C_MASTER (1 << 1)
+#define REG_DDC_DISABLE REG(0x00, 0x0b) /* read/write */
+#define REG_CCLK_ON REG(0x00, 0x0c) /* read/write */
+#define REG_I2C_MASTER REG(0x00, 0x0d) /* read/write */
+# define I2C_MASTER_DIS_MM (1 << 0)
+# define I2C_MASTER_DIS_FILT (1 << 1)
+# define I2C_MASTER_APP_STRT_LAT (1 << 2)
+#define REG_INT_FLAGS_0 REG(0x00, 0x0f) /* read/write */
+#define REG_INT_FLAGS_1 REG(0x00, 0x10) /* read/write */
+#define REG_INT_FLAGS_2 REG(0x00, 0x11) /* read/write */
+# define INT_FLAGS_2_EDID_BLK_RD (1 << 1)
+#define REG_ENA_VP_0 REG(0x00, 0x18) /* read/write */
+#define REG_ENA_VP_1 REG(0x00, 0x19) /* read/write */
+#define REG_ENA_VP_2 REG(0x00, 0x1a) /* read/write */
+#define REG_ENA_AP REG(0x00, 0x1e) /* read/write */
+#define REG_VIP_CNTRL_0 REG(0x00, 0x20) /* write */
+# define VIP_CNTRL_0_MIRR_A (1 << 7)
+# define VIP_CNTRL_0_SWAP_A(x) (((x) & 7) << 4)
+# define VIP_CNTRL_0_MIRR_B (1 << 3)
+# define VIP_CNTRL_0_SWAP_B(x) (((x) & 7) << 0)
+#define REG_VIP_CNTRL_1 REG(0x00, 0x21) /* write */
+# define VIP_CNTRL_1_MIRR_C (1 << 7)
+# define VIP_CNTRL_1_SWAP_C(x) (((x) & 7) << 4)
+# define VIP_CNTRL_1_MIRR_D (1 << 3)
+# define VIP_CNTRL_1_SWAP_D(x) (((x) & 7) << 0)
+#define REG_VIP_CNTRL_2 REG(0x00, 0x22) /* write */
+# define VIP_CNTRL_2_MIRR_E (1 << 7)
+# define VIP_CNTRL_2_SWAP_E(x) (((x) & 7) << 4)
+# define VIP_CNTRL_2_MIRR_F (1 << 3)
+# define VIP_CNTRL_2_SWAP_F(x) (((x) & 7) << 0)
+#define REG_VIP_CNTRL_3 REG(0x00, 0x23) /* write */
+# define VIP_CNTRL_3_X_TGL (1 << 0)
+# define VIP_CNTRL_3_H_TGL (1 << 1)
+# define VIP_CNTRL_3_V_TGL (1 << 2)
+# define VIP_CNTRL_3_EMB (1 << 3)
+# define VIP_CNTRL_3_SYNC_DE (1 << 4)
+# define VIP_CNTRL_3_SYNC_HS (1 << 5)
+# define VIP_CNTRL_3_DE_INT (1 << 6)
+# define VIP_CNTRL_3_EDGE (1 << 7)
+#define REG_VIP_CNTRL_4 REG(0x00, 0x24) /* write */
+# define VIP_CNTRL_4_BLC(x) (((x) & 3) << 0)
+# define VIP_CNTRL_4_BLANKIT(x) (((x) & 3) << 2)
+# define VIP_CNTRL_4_CCIR656 (1 << 4)
+# define VIP_CNTRL_4_656_ALT (1 << 5)
+# define VIP_CNTRL_4_TST_656 (1 << 6)
+# define VIP_CNTRL_4_TST_PAT (1 << 7)
+#define REG_VIP_CNTRL_5 REG(0x00, 0x25) /* write */
+# define VIP_CNTRL_5_CKCASE (1 << 0)
+# define VIP_CNTRL_5_SP_CNT(x) (((x) & 3) << 1)
+#define REG_MAT_CONTRL REG(0x00, 0x80) /* write */
+# define MAT_CONTRL_MAT_SC(x) (((x) & 3) << 0)
+# define MAT_CONTRL_MAT_BP (1 << 2)
+#define REG_VIDFORMAT REG(0x00, 0xa0) /* write */
+#define REG_REFPIX_MSB REG(0x00, 0xa1) /* write */
+#define REG_REFPIX_LSB REG(0x00, 0xa2) /* write */
+#define REG_REFLINE_MSB REG(0x00, 0xa3) /* write */
+#define REG_REFLINE_LSB REG(0x00, 0xa4) /* write */
+#define REG_NPIX_MSB REG(0x00, 0xa5) /* write */
+#define REG_NPIX_LSB REG(0x00, 0xa6) /* write */
+#define REG_NLINE_MSB REG(0x00, 0xa7) /* write */
+#define REG_NLINE_LSB REG(0x00, 0xa8) /* write */
+#define REG_VS_LINE_STRT_1_MSB REG(0x00, 0xa9) /* write */
+#define REG_VS_LINE_STRT_1_LSB REG(0x00, 0xaa) /* write */
+#define REG_VS_PIX_STRT_1_MSB REG(0x00, 0xab) /* write */
+#define REG_VS_PIX_STRT_1_LSB REG(0x00, 0xac) /* write */
+#define REG_VS_LINE_END_1_MSB REG(0x00, 0xad) /* write */
+#define REG_VS_LINE_END_1_LSB REG(0x00, 0xae) /* write */
+#define REG_VS_PIX_END_1_MSB REG(0x00, 0xaf) /* write */
+#define REG_VS_PIX_END_1_LSB REG(0x00, 0xb0) /* write */
+#define REG_VS_PIX_STRT_2_MSB REG(0x00, 0xb3) /* write */
+#define REG_VS_PIX_STRT_2_LSB REG(0x00, 0xb4) /* write */
+#define REG_VS_PIX_END_2_MSB REG(0x00, 0xb7) /* write */
+#define REG_VS_PIX_END_2_LSB REG(0x00, 0xb8) /* write */
+#define REG_HS_PIX_START_MSB REG(0x00, 0xb9) /* write */
+#define REG_HS_PIX_START_LSB REG(0x00, 0xba) /* write */
+#define REG_HS_PIX_STOP_MSB REG(0x00, 0xbb) /* write */
+#define REG_HS_PIX_STOP_LSB REG(0x00, 0xbc) /* write */
+#define REG_VWIN_START_1_MSB REG(0x00, 0xbd) /* write */
+#define REG_VWIN_START_1_LSB REG(0x00, 0xbe) /* write */
+#define REG_VWIN_END_1_MSB REG(0x00, 0xbf) /* write */
+#define REG_VWIN_END_1_LSB REG(0x00, 0xc0) /* write */
+#define REG_DE_START_MSB REG(0x00, 0xc5) /* write */
+#define REG_DE_START_LSB REG(0x00, 0xc6) /* write */
+#define REG_DE_STOP_MSB REG(0x00, 0xc7) /* write */
+#define REG_DE_STOP_LSB REG(0x00, 0xc8) /* write */
+#define REG_TBG_CNTRL_0 REG(0x00, 0xca) /* write */
+# define TBG_CNTRL_0_FRAME_DIS (1 << 5)
+# define TBG_CNTRL_0_SYNC_MTHD (1 << 6)
+# define TBG_CNTRL_0_SYNC_ONCE (1 << 7)
+#define REG_TBG_CNTRL_1 REG(0x00, 0xcb) /* write */
+# define TBG_CNTRL_1_VH_TGL_0 (1 << 0)
+# define TBG_CNTRL_1_VH_TGL_1 (1 << 1)
+# define TBG_CNTRL_1_VH_TGL_2 (1 << 2)
+# define TBG_CNTRL_1_VHX_EXT_DE (1 << 3)
+# define TBG_CNTRL_1_VHX_EXT_HS (1 << 4)
+# define TBG_CNTRL_1_VHX_EXT_VS (1 << 5)
+# define TBG_CNTRL_1_DWIN_DIS (1 << 6)
+#define REG_ENABLE_SPACE REG(0x00, 0xd6) /* write */
+#define REG_HVF_CNTRL_0 REG(0x00, 0xe4) /* write */
+# define HVF_CNTRL_0_SM (1 << 7)
+# define HVF_CNTRL_0_RWB (1 << 6)
+# define HVF_CNTRL_0_PREFIL(x) (((x) & 3) << 2)
+# define HVF_CNTRL_0_INTPOL(x) (((x) & 3) << 0)
+#define REG_HVF_CNTRL_1 REG(0x00, 0xe5) /* write */
+# define HVF_CNTRL_1_FOR (1 << 0)
+# define HVF_CNTRL_1_YUVBLK (1 << 1)
+# define HVF_CNTRL_1_VQR(x) (((x) & 3) << 2)
+# define HVF_CNTRL_1_PAD(x) (((x) & 3) << 4)
+# define HVF_CNTRL_1_SEMI_PLANAR (1 << 6)
+#define REG_RPT_CNTRL REG(0x00, 0xf0) /* write */
+
+
+/* Page 02h: PLL settings */
+#define REG_PLL_SERIAL_1 REG(0x02, 0x00) /* read/write */
+# define PLL_SERIAL_1_SRL_FDN (1 << 0)
+# define PLL_SERIAL_1_SRL_IZ(x) (((x) & 3) << 1)
+# define PLL_SERIAL_1_SRL_MAN_IZ (1 << 6)
+#define REG_PLL_SERIAL_2 REG(0x02, 0x01) /* read/write */
+# define PLL_SERIAL_2_SRL_NOSC(x) (((x) & 3) << 0)
+# define PLL_SERIAL_2_SRL_PR(x) (((x) & 0xf) << 4)
+#define REG_PLL_SERIAL_3 REG(0x02, 0x02) /* read/write */
+# define PLL_SERIAL_3_SRL_CCIR (1 << 0)
+# define PLL_SERIAL_3_SRL_DE (1 << 2)
+# define PLL_SERIAL_3_SRL_PXIN_SEL (1 << 4)
+#define REG_SERIALIZER REG(0x02, 0x03) /* read/write */
+#define REG_BUFFER_OUT REG(0x02, 0x04) /* read/write */
+#define REG_PLL_SCG1 REG(0x02, 0x05) /* read/write */
+#define REG_PLL_SCG2 REG(0x02, 0x06) /* read/write */
+#define REG_PLL_SCGN1 REG(0x02, 0x07) /* read/write */
+#define REG_PLL_SCGN2 REG(0x02, 0x08) /* read/write */
+#define REG_PLL_SCGR1 REG(0x02, 0x09) /* read/write */
+#define REG_PLL_SCGR2 REG(0x02, 0x0a) /* read/write */
+#define REG_AUDIO_DIV REG(0x02, 0x0e) /* read/write */
+#define REG_SEL_CLK REG(0x02, 0x11) /* read/write */
+# define SEL_CLK_SEL_CLK1 (1 << 0)
+# define SEL_CLK_SEL_VRF_CLK(x) (((x) & 3) << 1)
+# define SEL_CLK_ENA_SC_CLK (1 << 3)
+#define REG_ANA_GENERAL REG(0x02, 0x12) /* read/write */
+
+
+/* Page 09h: EDID Control */
+#define REG_EDID_DATA_0 REG(0x09, 0x00) /* read */
+/* next 127 successive registers are the EDID block */
+#define REG_EDID_CTRL REG(0x09, 0xfa) /* read/write */
+#define REG_DDC_ADDR REG(0x09, 0xfb) /* read/write */
+#define REG_DDC_OFFS REG(0x09, 0xfc) /* read/write */
+#define REG_DDC_SEGM_ADDR REG(0x09, 0xfd) /* read/write */
+#define REG_DDC_SEGM REG(0x09, 0xfe) /* read/write */
+
+
+/* Page 10h: information frames and packets */
+
+
+/* Page 11h: audio settings and content info packets */
+#define REG_AIP_CNTRL_0 REG(0x11, 0x00) /* read/write */
+# define AIP_CNTRL_0_RST_FIFO (1 << 0)
+# define AIP_CNTRL_0_SWAP (1 << 1)
+# define AIP_CNTRL_0_LAYOUT (1 << 2)
+# define AIP_CNTRL_0_ACR_MAN (1 << 5)
+# define AIP_CNTRL_0_RST_CTS (1 << 6)
+#define REG_ENC_CNTRL REG(0x11, 0x0d) /* read/write */
+# define ENC_CNTRL_RST_ENC (1 << 0)
+# define ENC_CNTRL_RST_SEL (1 << 1)
+# define ENC_CNTRL_CTL_CODE(x) (((x) & 3) << 2)
+
+
+/* Page 12h: HDCP and OTP */
+#define REG_TX3 REG(0x12, 0x9a) /* read/write */
+#define REG_TX33 REG(0x12, 0xb8) /* read/write */
+# define TX33_HDMI (1 << 1)
+
+
+/* Page 13h: Gamut related metadata packets */
+
+
+
+/* CEC registers: (not paged)
+ */
+#define REG_CEC_FRO_IM_CLK_CTRL 0xfb /* read/write */
+# define CEC_FRO_IM_CLK_CTRL_GHOST_DIS (1 << 7)
+# define CEC_FRO_IM_CLK_CTRL_ENA_OTP (1 << 6)
+# define CEC_FRO_IM_CLK_CTRL_IMCLK_SEL (1 << 1)
+# define CEC_FRO_IM_CLK_CTRL_FRO_DIV (1 << 0)
+#define REG_CEC_RXSHPDLEV 0xfe /* read */
+# define CEC_RXSHPDLEV_RXSENS (1 << 0)
+# define CEC_RXSHPDLEV_HPD (1 << 1)
+
+#define REG_CEC_ENAMODS 0xff /* read/write */
+# define CEC_ENAMODS_DIS_FRO (1 << 6)
+# define CEC_ENAMODS_DIS_CCLK (1 << 5)
+# define CEC_ENAMODS_EN_RXSENS (1 << 2)
+# define CEC_ENAMODS_EN_HDMI (1 << 1)
+# define CEC_ENAMODS_EN_CEC (1 << 0)
+
+
+/* Device versions: */
+#define TDA9989N2 0x0101
+#define TDA19989 0x0201
+#define TDA19989N2 0x0202
+#define TDA19988 0x0301
+
+static void
+cec_write(struct drm_encoder *encoder, uint16_t addr, uint8_t val)
+{
+ struct i2c_client *client = to_tda998x_priv(encoder)->cec;
+ uint8_t buf[] = {addr, val};
+ int ret;
+
+ ret = i2c_master_send(client, buf, ARRAY_SIZE(buf));
+ if (ret < 0)
+ dev_err(&client->dev, "Error %d writing to cec:0x%x\n", ret, addr);
+}
+
+static uint8_t
+cec_read(struct drm_encoder *encoder, uint8_t addr)
+{
+ struct i2c_client *client = to_tda998x_priv(encoder)->cec;
+ uint8_t val;
+ int ret;
+
+ ret = i2c_master_send(client, &addr, sizeof(addr));
+ if (ret < 0)
+ goto fail;
+
+ ret = i2c_master_recv(client, &val, sizeof(val));
+ if (ret < 0)
+ goto fail;
+
+ return val;
+
+fail:
+ dev_err(&client->dev, "Error %d reading from cec:0x%x\n", ret, addr);
+ return 0;
+}
+
+static void
+set_page(struct drm_encoder *encoder, uint16_t reg)
+{
+ struct tda998x_priv *priv = to_tda998x_priv(encoder);
+
+ if (REG2PAGE(reg) != priv->current_page) {
+ struct i2c_client *client = drm_i2c_encoder_get_client(encoder);
+ uint8_t buf[] = {
+ REG_CURPAGE, REG2PAGE(reg)
+ };
+ int ret = i2c_master_send(client, buf, sizeof(buf));
+ if (ret < 0)
+ dev_err(&client->dev, "Error %d writing to REG_CURPAGE\n", ret);
+
+ priv->current_page = REG2PAGE(reg);
+ }
+}
+
+static int
+reg_read_range(struct drm_encoder *encoder, uint16_t reg, char *buf, int cnt)
+{
+ struct i2c_client *client = drm_i2c_encoder_get_client(encoder);
+ uint8_t addr = REG2ADDR(reg);
+ int ret;
+
+ set_page(encoder, reg);
+
+ ret = i2c_master_send(client, &addr, sizeof(addr));
+ if (ret < 0)
+ goto fail;
+
+ ret = i2c_master_recv(client, buf, cnt);
+ if (ret < 0)
+ goto fail;
+
+ return ret;
+
+fail:
+ dev_err(&client->dev, "Error %d reading from 0x%x\n", ret, reg);
+ return ret;
+}
+
+static uint8_t
+reg_read(struct drm_encoder *encoder, uint16_t reg)
+{
+ uint8_t val = 0;
+ reg_read_range(encoder, reg, &val, sizeof(val));
+ return val;
+}
+
+static void
+reg_write(struct drm_encoder *encoder, uint16_t reg, uint8_t val)
+{
+ struct i2c_client *client = drm_i2c_encoder_get_client(encoder);
+ uint8_t buf[] = {REG2ADDR(reg), val};
+ int ret;
+
+ set_page(encoder, reg);
+
+ ret = i2c_master_send(client, buf, ARRAY_SIZE(buf));
+ if (ret < 0)
+ dev_err(&client->dev, "Error %d writing to 0x%x\n", ret, reg);
+}
+
+static void
+reg_write16(struct drm_encoder *encoder, uint16_t reg, uint16_t val)
+{
+ struct i2c_client *client = drm_i2c_encoder_get_client(encoder);
+ uint8_t buf[] = {REG2ADDR(reg), val >> 8, val};
+ int ret;
+
+ set_page(encoder, reg);
+
+ ret = i2c_master_send(client, buf, ARRAY_SIZE(buf));
+ if (ret < 0)
+ dev_err(&client->dev, "Error %d writing to 0x%x\n", ret, reg);
+}
+
+static void
+reg_set(struct drm_encoder *encoder, uint16_t reg, uint8_t val)
+{
+ reg_write(encoder, reg, reg_read(encoder, reg) | val);
+}
+
+static void
+reg_clear(struct drm_encoder *encoder, uint16_t reg, uint8_t val)
+{
+ reg_write(encoder, reg, reg_read(encoder, reg) & ~val);
+}
+
+static void
+tda998x_reset(struct drm_encoder *encoder)
+{
+ /* reset audio and i2c master: */
+ reg_set(encoder, REG_SOFTRESET, SOFTRESET_AUDIO | SOFTRESET_I2C_MASTER);
+ msleep(50);
+ reg_clear(encoder, REG_SOFTRESET, SOFTRESET_AUDIO | SOFTRESET_I2C_MASTER);
+ msleep(50);
+
+ /* reset transmitter: */
+ reg_set(encoder, REG_MAIN_CNTRL0, MAIN_CNTRL0_SR);
+ reg_clear(encoder, REG_MAIN_CNTRL0, MAIN_CNTRL0_SR);
+
+ /* PLL registers common configuration */
+ reg_write(encoder, REG_PLL_SERIAL_1, 0x00);
+ reg_write(encoder, REG_PLL_SERIAL_2, PLL_SERIAL_2_SRL_NOSC(1));
+ reg_write(encoder, REG_PLL_SERIAL_3, 0x00);
+ reg_write(encoder, REG_SERIALIZER, 0x00);
+ reg_write(encoder, REG_BUFFER_OUT, 0x00);
+ reg_write(encoder, REG_PLL_SCG1, 0x00);
+ reg_write(encoder, REG_AUDIO_DIV, 0x03);
+ reg_write(encoder, REG_SEL_CLK, SEL_CLK_SEL_CLK1 | SEL_CLK_ENA_SC_CLK);
+ reg_write(encoder, REG_PLL_SCGN1, 0xfa);
+ reg_write(encoder, REG_PLL_SCGN2, 0x00);
+ reg_write(encoder, REG_PLL_SCGR1, 0x5b);
+ reg_write(encoder, REG_PLL_SCGR2, 0x00);
+ reg_write(encoder, REG_PLL_SCG2, 0x10);
+}
+
+/* DRM encoder functions */
+
+static void
+tda998x_encoder_set_config(struct drm_encoder *encoder, void *params)
+{
+}
+
+static void
+tda998x_encoder_dpms(struct drm_encoder *encoder, int mode)
+{
+ struct tda998x_priv *priv = to_tda998x_priv(encoder);
+
+ /* we only care about on or off: */
+ if (mode != DRM_MODE_DPMS_ON)
+ mode = DRM_MODE_DPMS_OFF;
+
+ if (mode == priv->dpms)
+ return;
+
+ switch (mode) {
+ case DRM_MODE_DPMS_ON:
+ /* enable audio and video ports */
+ reg_write(encoder, REG_ENA_AP, 0xff);
+ reg_write(encoder, REG_ENA_VP_0, 0xff);
+ reg_write(encoder, REG_ENA_VP_1, 0xff);
+ reg_write(encoder, REG_ENA_VP_2, 0xff);
+ /* set muxing after enabling ports: */
+ reg_write(encoder, REG_VIP_CNTRL_0,
+ VIP_CNTRL_0_SWAP_A(2) | VIP_CNTRL_0_SWAP_B(3));
+ reg_write(encoder, REG_VIP_CNTRL_1,
+ VIP_CNTRL_1_SWAP_C(0) | VIP_CNTRL_1_SWAP_D(1));
+ reg_write(encoder, REG_VIP_CNTRL_2,
+ VIP_CNTRL_2_SWAP_E(4) | VIP_CNTRL_2_SWAP_F(5));
+ break;
+ case DRM_MODE_DPMS_OFF:
+ /* disable audio and video ports */
+ reg_write(encoder, REG_ENA_AP, 0x00);
+ reg_write(encoder, REG_ENA_VP_0, 0x00);
+ reg_write(encoder, REG_ENA_VP_1, 0x00);
+ reg_write(encoder, REG_ENA_VP_2, 0x00);
+ break;
+ }
+
+ priv->dpms = mode;
+}
+
+static void
+tda998x_encoder_save(struct drm_encoder *encoder)
+{
+ DBG("");
+}
+
+static void
+tda998x_encoder_restore(struct drm_encoder *encoder)
+{
+ DBG("");
+}
+
+static bool
+tda998x_encoder_mode_fixup(struct drm_encoder *encoder,
+ const struct drm_display_mode *mode,
+ struct drm_display_mode *adjusted_mode)
+{
+ return true;
+}
+
+static int
+tda998x_encoder_mode_valid(struct drm_encoder *encoder,
+ struct drm_display_mode *mode)
+{
+ return MODE_OK;
+}
+
+static void
+tda998x_encoder_mode_set(struct drm_encoder *encoder,
+ struct drm_display_mode *mode,
+ struct drm_display_mode *adjusted_mode)
+{
+ struct tda998x_priv *priv = to_tda998x_priv(encoder);
+ uint16_t hs_start, hs_end, line_start, line_end;
+ uint16_t vwin_start, vwin_end, de_start, de_end;
+ uint16_t ref_pix, ref_line, pix_start2;
+ uint8_t reg, div, rep;
+
+ hs_start = mode->hsync_start - mode->hdisplay;
+ hs_end = mode->hsync_end - mode->hdisplay;
+ line_start = 1;
+ line_end = 1 + mode->vsync_end - mode->vsync_start;
+ vwin_start = mode->vtotal - mode->vsync_start;
+ vwin_end = vwin_start + mode->vdisplay;
+ de_start = mode->htotal - mode->hdisplay;
+ de_end = mode->htotal;
+
+ pix_start2 = 0;
+ if (mode->flags & DRM_MODE_FLAG_INTERLACE)
+ pix_start2 = (mode->htotal / 2) + hs_start;
+
+ /* TODO how is this value calculated? It is 2 for all common
+ * formats in the tables in out of tree nxp driver (assuming
+ * I've properly deciphered their byzantine table system)
+ */
+ ref_line = 2;
+
+ /* this might changes for other color formats from the CRTC: */
+ ref_pix = 3 + hs_start;
+
+ div = 148500 / mode->clock;
+
+ DBG("clock=%d, div=%u", mode->clock, div);
+ DBG("hs_start=%u, hs_end=%u, line_start=%u, line_end=%u",
+ hs_start, hs_end, line_start, line_end);
+ DBG("vwin_start=%u, vwin_end=%u, de_start=%u, de_end=%u",
+ vwin_start, vwin_end, de_start, de_end);
+ DBG("ref_line=%u, ref_pix=%u, pix_start2=%u",
+ ref_line, ref_pix, pix_start2);
+
+ /* mute the audio FIFO: */
+ reg_set(encoder, REG_AIP_CNTRL_0, AIP_CNTRL_0_RST_FIFO);
+
+ /* set HDMI HDCP mode off: */
+ reg_set(encoder, REG_TBG_CNTRL_1, TBG_CNTRL_1_DWIN_DIS);
+ reg_clear(encoder, REG_TX33, TX33_HDMI);
+
+ reg_write(encoder, REG_ENC_CNTRL, ENC_CNTRL_CTL_CODE(0));
+ /* no pre-filter or interpolator: */
+ reg_write(encoder, REG_HVF_CNTRL_0, HVF_CNTRL_0_PREFIL(0) |
+ HVF_CNTRL_0_INTPOL(0));
+ reg_write(encoder, REG_VIP_CNTRL_5, VIP_CNTRL_5_SP_CNT(0));
+ reg_write(encoder, REG_VIP_CNTRL_4, VIP_CNTRL_4_BLANKIT(0) |
+ VIP_CNTRL_4_BLC(0));
+ reg_clear(encoder, REG_PLL_SERIAL_3, PLL_SERIAL_3_SRL_CCIR);
+
+ reg_clear(encoder, REG_PLL_SERIAL_1, PLL_SERIAL_1_SRL_MAN_IZ);
+ reg_clear(encoder, REG_PLL_SERIAL_3, PLL_SERIAL_3_SRL_DE);
+ reg_write(encoder, REG_SERIALIZER, 0);
+ reg_write(encoder, REG_HVF_CNTRL_1, HVF_CNTRL_1_VQR(0));
+
+ /* TODO enable pixel repeat for pixel rates less than 25Msamp/s */
+ rep = 0;
+ reg_write(encoder, REG_RPT_CNTRL, 0);
+ reg_write(encoder, REG_SEL_CLK, SEL_CLK_SEL_VRF_CLK(0) |
+ SEL_CLK_SEL_CLK1 | SEL_CLK_ENA_SC_CLK);
+
+ reg_write(encoder, REG_PLL_SERIAL_2, PLL_SERIAL_2_SRL_NOSC(div) |
+ PLL_SERIAL_2_SRL_PR(rep));
+
+ reg_write16(encoder, REG_VS_PIX_STRT_2_MSB, pix_start2);
+ reg_write16(encoder, REG_VS_PIX_END_2_MSB, pix_start2);
+
+ /* set color matrix bypass flag: */
+ reg_set(encoder, REG_MAT_CONTRL, MAT_CONTRL_MAT_BP);
+
+ /* set BIAS tmds value: */
+ reg_write(encoder, REG_ANA_GENERAL, 0x09);
+
+ reg_clear(encoder, REG_TBG_CNTRL_0, TBG_CNTRL_0_SYNC_MTHD);
+
+ reg_write(encoder, REG_VIP_CNTRL_3, 0);
+ reg_set(encoder, REG_VIP_CNTRL_3, VIP_CNTRL_3_SYNC_HS);
+ if (mode->flags & DRM_MODE_FLAG_NVSYNC)
+ reg_set(encoder, REG_VIP_CNTRL_3, VIP_CNTRL_3_V_TGL);
+
+ if (mode->flags & DRM_MODE_FLAG_NHSYNC)
+ reg_set(encoder, REG_VIP_CNTRL_3, VIP_CNTRL_3_H_TGL);
+
+ reg_write(encoder, REG_VIDFORMAT, 0x00);
+ reg_write16(encoder, REG_NPIX_MSB, mode->hdisplay - 1);
+ reg_write16(encoder, REG_NLINE_MSB, mode->vdisplay - 1);
+ reg_write16(encoder, REG_VS_LINE_STRT_1_MSB, line_start);
+ reg_write16(encoder, REG_VS_LINE_END_1_MSB, line_end);
+ reg_write16(encoder, REG_VS_PIX_STRT_1_MSB, hs_start);
+ reg_write16(encoder, REG_VS_PIX_END_1_MSB, hs_start);
+ reg_write16(encoder, REG_HS_PIX_START_MSB, hs_start);
+ reg_write16(encoder, REG_HS_PIX_STOP_MSB, hs_end);
+ reg_write16(encoder, REG_VWIN_START_1_MSB, vwin_start);
+ reg_write16(encoder, REG_VWIN_END_1_MSB, vwin_end);
+ reg_write16(encoder, REG_DE_START_MSB, de_start);
+ reg_write16(encoder, REG_DE_STOP_MSB, de_end);
+
+ if (priv->rev == TDA19988) {
+ /* let incoming pixels fill the active space (if any) */
+ reg_write(encoder, REG_ENABLE_SPACE, 0x01);
+ }
+
+ reg_write16(encoder, REG_REFPIX_MSB, ref_pix);
+ reg_write16(encoder, REG_REFLINE_MSB, ref_line);
+
+ reg = TBG_CNTRL_1_VHX_EXT_DE |
+ TBG_CNTRL_1_VHX_EXT_HS |
+ TBG_CNTRL_1_VHX_EXT_VS |
+ TBG_CNTRL_1_DWIN_DIS | /* HDCP off */
+ TBG_CNTRL_1_VH_TGL_2;
+ if (mode->flags & (DRM_MODE_FLAG_NVSYNC | DRM_MODE_FLAG_NHSYNC))
+ reg |= TBG_CNTRL_1_VH_TGL_0;
+ reg_set(encoder, REG_TBG_CNTRL_1, reg);
+
+ /* must be last register set: */
+ reg_clear(encoder, REG_TBG_CNTRL_0, TBG_CNTRL_0_SYNC_ONCE);
+}
+
+static enum drm_connector_status
+tda998x_encoder_detect(struct drm_encoder *encoder,
+ struct drm_connector *connector)
+{
+ uint8_t val = cec_read(encoder, REG_CEC_RXSHPDLEV);
+ return (val & CEC_RXSHPDLEV_HPD) ? connector_status_connected :
+ connector_status_disconnected;
+}
+
+static int
+read_edid_block(struct drm_encoder *encoder, uint8_t *buf, int blk)
+{
+ uint8_t offset, segptr;
+ int ret, i;
+
+ /* enable EDID read irq: */
+ reg_set(encoder, REG_INT_FLAGS_2, INT_FLAGS_2_EDID_BLK_RD);
+
+ offset = (blk & 1) ? 128 : 0;
+ segptr = blk / 2;
+
+ reg_write(encoder, REG_DDC_ADDR, 0xa0);
+ reg_write(encoder, REG_DDC_OFFS, offset);
+ reg_write(encoder, REG_DDC_SEGM_ADDR, 0x60);
+ reg_write(encoder, REG_DDC_SEGM, segptr);
+
+ /* enable reading EDID: */
+ reg_write(encoder, REG_EDID_CTRL, 0x1);
+
+ /* flag must be cleared by sw: */
+ reg_write(encoder, REG_EDID_CTRL, 0x0);
+
+ /* wait for block read to complete: */
+ for (i = 100; i > 0; i--) {
+ uint8_t val = reg_read(encoder, REG_INT_FLAGS_2);
+ if (val & INT_FLAGS_2_EDID_BLK_RD)
+ break;
+ msleep(1);
+ }
+
+ if (i == 0)
+ return -ETIMEDOUT;
+
+ ret = reg_read_range(encoder, REG_EDID_DATA_0, buf, EDID_LENGTH);
+ if (ret != EDID_LENGTH) {
+ dev_err(encoder->dev->dev, "failed to read edid block %d: %d",
+ blk, ret);
+ return ret;
+ }
+
+ reg_clear(encoder, REG_INT_FLAGS_2, INT_FLAGS_2_EDID_BLK_RD);
+
+ return 0;
+}
+
+static uint8_t *
+do_get_edid(struct drm_encoder *encoder)
+{
+ int j = 0, valid_extensions = 0;
+ uint8_t *block, *new;
+ bool print_bad_edid = drm_debug & DRM_UT_KMS;
+
+ if ((block = kmalloc(EDID_LENGTH, GFP_KERNEL)) == NULL)
+ return NULL;
+
+ /* base block fetch */
+ if (read_edid_block(encoder, block, 0))
+ goto fail;
+
+ if (!drm_edid_block_valid(block, 0, print_bad_edid))
+ goto fail;
+
+ /* if there's no extensions, we're done */
+ if (block[0x7e] == 0)
+ return block;
+
+ new = krealloc(block, (block[0x7e] + 1) * EDID_LENGTH, GFP_KERNEL);
+ if (!new)
+ goto fail;
+ block = new;
+
+ for (j = 1; j <= block[0x7e]; j++) {
+ uint8_t *ext_block = block + (valid_extensions + 1) * EDID_LENGTH;
+ if (read_edid_block(encoder, ext_block, j))
+ goto fail;
+
+ if (!drm_edid_block_valid(ext_block, j, print_bad_edid))
+ goto fail;
+
+ valid_extensions++;
+ }
+
+ if (valid_extensions != block[0x7e]) {
+ block[EDID_LENGTH-1] += block[0x7e] - valid_extensions;
+ block[0x7e] = valid_extensions;
+ new = krealloc(block, (valid_extensions + 1) * EDID_LENGTH, GFP_KERNEL);
+ if (!new)
+ goto fail;
+ block = new;
+ }
+
+ return block;
+
+fail:
+ dev_warn(encoder->dev->dev, "failed to read EDID\n");
+ kfree(block);
+ return NULL;
+}
+
+static int
+tda998x_encoder_get_modes(struct drm_encoder *encoder,
+ struct drm_connector *connector)
+{
+ struct edid *edid = (struct edid *)do_get_edid(encoder);
+ int n = 0;
+
+ if (edid) {
+ drm_mode_connector_update_edid_property(connector, edid);
+ n = drm_add_edid_modes(connector, edid);
+ kfree(edid);
+ }
+
+ return n;
+}
+
+static int
+tda998x_encoder_create_resources(struct drm_encoder *encoder,
+ struct drm_connector *connector)
+{
+ DBG("");
+ return 0;
+}
+
+static int
+tda998x_encoder_set_property(struct drm_encoder *encoder,
+ struct drm_connector *connector,
+ struct drm_property *property,
+ uint64_t val)
+{
+ DBG("");
+ return 0;
+}
+
+static void
+tda998x_encoder_destroy(struct drm_encoder *encoder)
+{
+ struct tda998x_priv *priv = to_tda998x_priv(encoder);
+ drm_i2c_encoder_destroy(encoder);
+ kfree(priv);
+}
+
+static struct drm_encoder_slave_funcs tda998x_encoder_funcs = {
+ .set_config = tda998x_encoder_set_config,
+ .destroy = tda998x_encoder_destroy,
+ .dpms = tda998x_encoder_dpms,
+ .save = tda998x_encoder_save,
+ .restore = tda998x_encoder_restore,
+ .mode_fixup = tda998x_encoder_mode_fixup,
+ .mode_valid = tda998x_encoder_mode_valid,
+ .mode_set = tda998x_encoder_mode_set,
+ .detect = tda998x_encoder_detect,
+ .get_modes = tda998x_encoder_get_modes,
+ .create_resources = tda998x_encoder_create_resources,
+ .set_property = tda998x_encoder_set_property,
+};
+
+/* I2C driver functions */
+
+static int
+tda998x_probe(struct i2c_client *client, const struct i2c_device_id *id)
+{
+ return 0;
+}
+
+static int
+tda998x_remove(struct i2c_client *client)
+{
+ return 0;
+}
+
+static int
+tda998x_encoder_init(struct i2c_client *client,
+ struct drm_device *dev,
+ struct drm_encoder_slave *encoder_slave)
+{
+ struct drm_encoder *encoder = &encoder_slave->base;
+ struct tda998x_priv *priv;
+
+ priv = kzalloc(sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ priv->current_page = 0;
+ priv->cec = i2c_new_dummy(client->adapter, 0x34);
+ priv->dpms = DRM_MODE_DPMS_OFF;
+
+ encoder_slave->slave_priv = priv;
+ encoder_slave->slave_funcs = &tda998x_encoder_funcs;
+
+ /* wake up the device: */
+ cec_write(encoder, REG_CEC_ENAMODS,
+ CEC_ENAMODS_EN_RXSENS | CEC_ENAMODS_EN_HDMI);
+
+ tda998x_reset(encoder);
+
+ /* read version: */
+ priv->rev = reg_read(encoder, REG_VERSION_LSB) |
+ reg_read(encoder, REG_VERSION_MSB) << 8;
+
+ /* mask off feature bits: */
+ priv->rev &= ~0x30; /* not-hdcp and not-scalar bit */
+
+ switch (priv->rev) {
+ case TDA9989N2: dev_info(dev->dev, "found TDA9989 n2"); break;
+ case TDA19989: dev_info(dev->dev, "found TDA19989"); break;
+ case TDA19989N2: dev_info(dev->dev, "found TDA19989 n2"); break;
+ case TDA19988: dev_info(dev->dev, "found TDA19988"); break;
+ default:
+ DBG("found unsupported device: %04x", priv->rev);
+ goto fail;
+ }
+
+ /* after reset, enable DDC: */
+ reg_write(encoder, REG_DDC_DISABLE, 0x00);
+
+ /* set clock on DDC channel: */
+ reg_write(encoder, REG_TX3, 39);
+
+ /* if necessary, disable multi-master: */
+ if (priv->rev == TDA19989)
+ reg_set(encoder, REG_I2C_MASTER, I2C_MASTER_DIS_MM);
+
+ cec_write(encoder, REG_CEC_FRO_IM_CLK_CTRL,
+ CEC_FRO_IM_CLK_CTRL_GHOST_DIS | CEC_FRO_IM_CLK_CTRL_IMCLK_SEL);
+
+ return 0;
+
+fail:
+ /* if encoder_init fails, the encoder slave is never registered,
+ * so cleanup here:
+ */
+ if (priv->cec)
+ i2c_unregister_device(priv->cec);
+ kfree(priv);
+ encoder_slave->slave_priv = NULL;
+ encoder_slave->slave_funcs = NULL;
+ return -ENXIO;
+}
+
+static struct i2c_device_id tda998x_ids[] = {
+ { "tda998x", 0 },
+ { }
+};
+MODULE_DEVICE_TABLE(i2c, tda998x_ids);
+
+static struct drm_i2c_encoder_driver tda998x_driver = {
+ .i2c_driver = {
+ .probe = tda998x_probe,
+ .remove = tda998x_remove,
+ .driver = {
+ .name = "tda998x",
+ },
+ .id_table = tda998x_ids,
+ },
+ .encoder_init = tda998x_encoder_init,
+};
+
+/* Module initialization */
+
+static int __init
+tda998x_init(void)
+{
+ DBG("");
+ return drm_i2c_encoder_register(THIS_MODULE, &tda998x_driver);
+}
+
+static void __exit
+tda998x_exit(void)
+{
+ DBG("");
+ drm_i2c_encoder_unregister(&tda998x_driver);
+}
+
+MODULE_AUTHOR("Rob Clark <robdclark@gmail.com");
+MODULE_DESCRIPTION("NXP Semiconductors TDA998X HDMI Encoder");
+MODULE_LICENSE("GPL");
+
+module_init(tda998x_init);
+module_exit(tda998x_exit);
diff --git a/drivers/gpu/drm/i915/Makefile b/drivers/gpu/drm/i915/Makefile
index 0f2c5493242b..91f3ac6cef35 100644
--- a/drivers/gpu/drm/i915/Makefile
+++ b/drivers/gpu/drm/i915/Makefile
@@ -16,6 +16,7 @@ i915-y := i915_drv.o i915_dma.o i915_irq.o \
i915_gem_tiling.o \
i915_sysfs.o \
i915_trace_points.o \
+ i915_ums.o \
intel_display.o \
intel_crt.o \
intel_lvds.o \
diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c
index 9d4a2c2adf0e..aae31489c893 100644
--- a/drivers/gpu/drm/i915/i915_debugfs.c
+++ b/drivers/gpu/drm/i915/i915_debugfs.c
@@ -103,7 +103,7 @@ static const char *cache_level_str(int type)
static void
describe_obj(struct seq_file *m, struct drm_i915_gem_object *obj)
{
- seq_printf(m, "%p: %s%s %8zdKiB %04x %04x %d %d %d%s%s%s",
+ seq_printf(m, "%p: %s%s %8zdKiB %02x %02x %d %d %d%s%s%s",
&obj->base,
get_pin_flag(obj),
get_tiling_flag(obj),
@@ -125,6 +125,8 @@ describe_obj(struct seq_file *m, struct drm_i915_gem_object *obj)
if (obj->gtt_space != NULL)
seq_printf(m, " (gtt offset: %08x, size: %08x)",
obj->gtt_offset, (unsigned int)obj->gtt_space->size);
+ if (obj->stolen)
+ seq_printf(m, " (stolen: %08lx)", obj->stolen->start);
if (obj->pin_mappable || obj->fault_mappable) {
char s[3], *t = s;
if (obj->pin_mappable)
@@ -257,8 +259,9 @@ static int i915_gem_object_info(struct seq_file *m, void* data)
seq_printf(m, "%u fault mappable objects, %zu bytes\n",
count, size);
- seq_printf(m, "%zu [%zu] gtt total\n",
- dev_priv->mm.gtt_total, dev_priv->mm.mappable_gtt_total);
+ seq_printf(m, "%zu [%lu] gtt total\n",
+ dev_priv->gtt.total,
+ dev_priv->gtt.mappable_end - dev_priv->gtt.start);
mutex_unlock(&dev->struct_mutex);
@@ -388,7 +391,7 @@ static void i915_ring_seqno_info(struct seq_file *m,
struct intel_ring_buffer *ring)
{
if (ring->get_seqno) {
- seq_printf(m, "Current sequence (%s): %d\n",
+ seq_printf(m, "Current sequence (%s): %u\n",
ring->name, ring->get_seqno(ring, false));
}
}
@@ -545,11 +548,11 @@ static int i915_hws_info(struct seq_file *m, void *data)
struct drm_device *dev = node->minor->dev;
drm_i915_private_t *dev_priv = dev->dev_private;
struct intel_ring_buffer *ring;
- const volatile u32 __iomem *hws;
+ const u32 *hws;
int i;
ring = &dev_priv->ring[(uintptr_t)node->info_ent->data];
- hws = (volatile u32 __iomem *)ring->status_page.page_addr;
+ hws = ring->status_page.page_addr;
if (hws == NULL)
return 0;
@@ -609,7 +612,7 @@ static void print_error_buffers(struct seq_file *m,
seq_printf(m, "%s [%d]:\n", name, count);
while (count--) {
- seq_printf(m, " %08x %8u %04x %04x %x %x%s%s%s%s%s%s%s",
+ seq_printf(m, " %08x %8u %02x %02x %x %x%s%s%s%s%s%s%s",
err->gtt_offset,
err->size,
err->read_domains,
@@ -691,7 +694,7 @@ static int i915_error_state(struct seq_file *m, void *unused)
seq_printf(m, "Time: %ld s %ld us\n", error->time.tv_sec,
error->time.tv_usec);
- seq_printf(m, "Kernel: " UTS_RELEASE);
+ seq_printf(m, "Kernel: " UTS_RELEASE "\n");
seq_printf(m, "PCI ID: 0x%04x\n", dev->pci_device);
seq_printf(m, "EIR: 0x%08x\n", error->eir);
seq_printf(m, "IER: 0x%08x\n", error->ier);
@@ -816,11 +819,11 @@ static int i915_error_state_open(struct inode *inode, struct file *file)
error_priv->dev = dev;
- spin_lock_irqsave(&dev_priv->error_lock, flags);
- error_priv->error = dev_priv->first_error;
+ spin_lock_irqsave(&dev_priv->gpu_error.lock, flags);
+ error_priv->error = dev_priv->gpu_error.first_error;
if (error_priv->error)
kref_get(&error_priv->error->ref);
- spin_unlock_irqrestore(&dev_priv->error_lock, flags);
+ spin_unlock_irqrestore(&dev_priv->gpu_error.lock, flags);
return single_open(file, i915_error_state, error_priv);
}
@@ -846,6 +849,77 @@ static const struct file_operations i915_error_state_fops = {
.release = i915_error_state_release,
};
+static ssize_t
+i915_next_seqno_read(struct file *filp,
+ char __user *ubuf,
+ size_t max,
+ loff_t *ppos)
+{
+ struct drm_device *dev = filp->private_data;
+ drm_i915_private_t *dev_priv = dev->dev_private;
+ char buf[80];
+ int len;
+ int ret;
+
+ ret = mutex_lock_interruptible(&dev->struct_mutex);
+ if (ret)
+ return ret;
+
+ len = snprintf(buf, sizeof(buf),
+ "next_seqno : 0x%x\n",
+ dev_priv->next_seqno);
+
+ mutex_unlock(&dev->struct_mutex);
+
+ if (len > sizeof(buf))
+ len = sizeof(buf);
+
+ return simple_read_from_buffer(ubuf, max, ppos, buf, len);
+}
+
+static ssize_t
+i915_next_seqno_write(struct file *filp,
+ const char __user *ubuf,
+ size_t cnt,
+ loff_t *ppos)
+{
+ struct drm_device *dev = filp->private_data;
+ char buf[20];
+ u32 val = 1;
+ int ret;
+
+ if (cnt > 0) {
+ if (cnt > sizeof(buf) - 1)
+ return -EINVAL;
+
+ if (copy_from_user(buf, ubuf, cnt))
+ return -EFAULT;
+ buf[cnt] = 0;
+
+ ret = kstrtouint(buf, 0, &val);
+ if (ret < 0)
+ return ret;
+ }
+
+ ret = mutex_lock_interruptible(&dev->struct_mutex);
+ if (ret)
+ return ret;
+
+ ret = i915_gem_set_seqno(dev, val);
+
+ mutex_unlock(&dev->struct_mutex);
+
+ return ret ?: cnt;
+}
+
+static const struct file_operations i915_next_seqno_fops = {
+ .owner = THIS_MODULE,
+ .open = simple_open,
+ .read = i915_next_seqno_read,
+ .write = i915_next_seqno_write,
+ .llseek = default_llseek,
+};
+
static int i915_rstdby_delays(struct seq_file *m, void *unused)
{
struct drm_info_node *node = (struct drm_info_node *) m->private;
@@ -888,7 +962,7 @@ static int i915_cur_delayinfo(struct seq_file *m, void *unused)
u32 gt_perf_status = I915_READ(GEN6_GT_PERF_STATUS);
u32 rp_state_limits = I915_READ(GEN6_RP_STATE_LIMITS);
u32 rp_state_cap = I915_READ(GEN6_RP_STATE_CAP);
- u32 rpstat;
+ u32 rpstat, cagf;
u32 rpupei, rpcurup, rpprevup;
u32 rpdownei, rpcurdown, rpprevdown;
int max_freq;
@@ -907,6 +981,11 @@ static int i915_cur_delayinfo(struct seq_file *m, void *unused)
rpdownei = I915_READ(GEN6_RP_CUR_DOWN_EI);
rpcurdown = I915_READ(GEN6_RP_CUR_DOWN);
rpprevdown = I915_READ(GEN6_RP_PREV_DOWN);
+ if (IS_HASWELL(dev))
+ cagf = (rpstat & HSW_CAGF_MASK) >> HSW_CAGF_SHIFT;
+ else
+ cagf = (rpstat & GEN6_CAGF_MASK) >> GEN6_CAGF_SHIFT;
+ cagf *= GT_FREQUENCY_MULTIPLIER;
gen6_gt_force_wake_put(dev_priv);
mutex_unlock(&dev->struct_mutex);
@@ -919,8 +998,7 @@ static int i915_cur_delayinfo(struct seq_file *m, void *unused)
gt_perf_status & 0xff);
seq_printf(m, "Render p-state limit: %d\n",
rp_state_limits & 0xff);
- seq_printf(m, "CAGF: %dMHz\n", ((rpstat & GEN6_CAGF_MASK) >>
- GEN6_CAGF_SHIFT) * GT_FREQUENCY_MULTIPLIER);
+ seq_printf(m, "CAGF: %dMHz\n", cagf);
seq_printf(m, "RP CUR UP EI: %dus\n", rpupei &
GEN6_CURICONT_MASK);
seq_printf(m, "RP CUR UP: %dus\n", rpcurup &
@@ -1372,28 +1450,31 @@ static int i915_gem_framebuffer_info(struct seq_file *m, void *data)
ifbdev = dev_priv->fbdev;
fb = to_intel_framebuffer(ifbdev->helper.fb);
- seq_printf(m, "fbcon size: %d x %d, depth %d, %d bpp, obj ",
+ seq_printf(m, "fbcon size: %d x %d, depth %d, %d bpp, refcount %d, obj ",
fb->base.width,
fb->base.height,
fb->base.depth,
- fb->base.bits_per_pixel);
+ fb->base.bits_per_pixel,
+ atomic_read(&fb->base.refcount.refcount));
describe_obj(m, fb->obj);
seq_printf(m, "\n");
+ mutex_unlock(&dev->mode_config.mutex);
+ mutex_lock(&dev->mode_config.fb_lock);
list_for_each_entry(fb, &dev->mode_config.fb_list, base.head) {
if (&fb->base == ifbdev->helper.fb)
continue;
- seq_printf(m, "user size: %d x %d, depth %d, %d bpp, obj ",
+ seq_printf(m, "user size: %d x %d, depth %d, %d bpp, refcount %d, obj ",
fb->base.width,
fb->base.height,
fb->base.depth,
- fb->base.bits_per_pixel);
+ fb->base.bits_per_pixel,
+ atomic_read(&fb->base.refcount.refcount));
describe_obj(m, fb->obj);
seq_printf(m, "\n");
}
-
- mutex_unlock(&dev->mode_config.mutex);
+ mutex_unlock(&dev->mode_config.fb_lock);
return 0;
}
@@ -1403,7 +1484,8 @@ static int i915_context_status(struct seq_file *m, void *unused)
struct drm_info_node *node = (struct drm_info_node *) m->private;
struct drm_device *dev = node->minor->dev;
drm_i915_private_t *dev_priv = dev->dev_private;
- int ret;
+ struct intel_ring_buffer *ring;
+ int ret, i;
ret = mutex_lock_interruptible(&dev->mode_config.mutex);
if (ret)
@@ -1421,6 +1503,14 @@ static int i915_context_status(struct seq_file *m, void *unused)
seq_printf(m, "\n");
}
+ for_each_ring(ring, dev_priv, i) {
+ if (ring->default_context) {
+ seq_printf(m, "HW default context %s ring ", ring->name);
+ describe_obj(m, ring->default_context->obj);
+ seq_printf(m, "\n");
+ }
+ }
+
mutex_unlock(&dev->mode_config.mutex);
return 0;
@@ -1460,7 +1550,7 @@ static const char *swizzle_string(unsigned swizzle)
case I915_BIT_6_SWIZZLE_9_10_17:
return "bit9/bit10/bit17";
case I915_BIT_6_SWIZZLE_UNKNOWN:
- return "unkown";
+ return "unknown";
}
return "bug";
@@ -1556,7 +1646,7 @@ static int i915_dpio_info(struct seq_file *m, void *data)
return 0;
}
- ret = mutex_lock_interruptible(&dev->mode_config.mutex);
+ ret = mutex_lock_interruptible(&dev_priv->dpio_lock);
if (ret)
return ret;
@@ -1585,7 +1675,7 @@ static int i915_dpio_info(struct seq_file *m, void *data)
seq_printf(m, "DPIO_FASTCLK_DISABLE: 0x%08x\n",
intel_dpio_read(dev_priv, DPIO_FASTCLK_DISABLE));
- mutex_unlock(&dev->mode_config.mutex);
+ mutex_unlock(&dev_priv->dpio_lock);
return 0;
}
@@ -1603,7 +1693,7 @@ i915_wedged_read(struct file *filp,
len = snprintf(buf, sizeof(buf),
"wedged : %d\n",
- atomic_read(&dev_priv->mm.wedged));
+ atomic_read(&dev_priv->gpu_error.reset_counter));
if (len > sizeof(buf))
len = sizeof(buf);
@@ -1658,7 +1748,7 @@ i915_ring_stop_read(struct file *filp,
int len;
len = snprintf(buf, sizeof(buf),
- "0x%08x\n", dev_priv->stop_rings);
+ "0x%08x\n", dev_priv->gpu_error.stop_rings);
if (len > sizeof(buf))
len = sizeof(buf);
@@ -1694,7 +1784,7 @@ i915_ring_stop_write(struct file *filp,
if (ret)
return ret;
- dev_priv->stop_rings = val;
+ dev_priv->gpu_error.stop_rings = val;
mutex_unlock(&dev->struct_mutex);
return cnt;
@@ -1708,6 +1798,102 @@ static const struct file_operations i915_ring_stop_fops = {
.llseek = default_llseek,
};
+#define DROP_UNBOUND 0x1
+#define DROP_BOUND 0x2
+#define DROP_RETIRE 0x4
+#define DROP_ACTIVE 0x8
+#define DROP_ALL (DROP_UNBOUND | \
+ DROP_BOUND | \
+ DROP_RETIRE | \
+ DROP_ACTIVE)
+static ssize_t
+i915_drop_caches_read(struct file *filp,
+ char __user *ubuf,
+ size_t max,
+ loff_t *ppos)
+{
+ char buf[20];
+ int len;
+
+ len = snprintf(buf, sizeof(buf), "0x%08x\n", DROP_ALL);
+ if (len > sizeof(buf))
+ len = sizeof(buf);
+
+ return simple_read_from_buffer(ubuf, max, ppos, buf, len);
+}
+
+static ssize_t
+i915_drop_caches_write(struct file *filp,
+ const char __user *ubuf,
+ size_t cnt,
+ loff_t *ppos)
+{
+ struct drm_device *dev = filp->private_data;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_gem_object *obj, *next;
+ char buf[20];
+ int val = 0, ret;
+
+ if (cnt > 0) {
+ if (cnt > sizeof(buf) - 1)
+ return -EINVAL;
+
+ if (copy_from_user(buf, ubuf, cnt))
+ return -EFAULT;
+ buf[cnt] = 0;
+
+ val = simple_strtoul(buf, NULL, 0);
+ }
+
+ DRM_DEBUG_DRIVER("Dropping caches: 0x%08x\n", val);
+
+ /* No need to check and wait for gpu resets, only libdrm auto-restarts
+ * on ioctls on -EAGAIN. */
+ ret = mutex_lock_interruptible(&dev->struct_mutex);
+ if (ret)
+ return ret;
+
+ if (val & DROP_ACTIVE) {
+ ret = i915_gpu_idle(dev);
+ if (ret)
+ goto unlock;
+ }
+
+ if (val & (DROP_RETIRE | DROP_ACTIVE))
+ i915_gem_retire_requests(dev);
+
+ if (val & DROP_BOUND) {
+ list_for_each_entry_safe(obj, next, &dev_priv->mm.inactive_list, mm_list)
+ if (obj->pin_count == 0) {
+ ret = i915_gem_object_unbind(obj);
+ if (ret)
+ goto unlock;
+ }
+ }
+
+ if (val & DROP_UNBOUND) {
+ list_for_each_entry_safe(obj, next, &dev_priv->mm.unbound_list, gtt_list)
+ if (obj->pages_pin_count == 0) {
+ ret = i915_gem_object_put_pages(obj);
+ if (ret)
+ goto unlock;
+ }
+ }
+
+unlock:
+ mutex_unlock(&dev->struct_mutex);
+
+ return ret ?: cnt;
+}
+
+static const struct file_operations i915_drop_caches_fops = {
+ .owner = THIS_MODULE,
+ .open = simple_open,
+ .read = i915_drop_caches_read,
+ .write = i915_drop_caches_write,
+ .llseek = default_llseek,
+};
+
static ssize_t
i915_max_freq_read(struct file *filp,
char __user *ubuf,
@@ -2105,11 +2291,23 @@ int i915_debugfs_init(struct drm_minor *minor)
return ret;
ret = i915_debugfs_create(minor->debugfs_root, minor,
+ "i915_gem_drop_caches",
+ &i915_drop_caches_fops);
+ if (ret)
+ return ret;
+
+ ret = i915_debugfs_create(minor->debugfs_root, minor,
"i915_error_state",
&i915_error_state_fops);
if (ret)
return ret;
+ ret = i915_debugfs_create(minor->debugfs_root, minor,
+ "i915_next_seqno",
+ &i915_next_seqno_fops);
+ if (ret)
+ return ret;
+
return drm_debugfs_create_files(i915_debugfs_list,
I915_DEBUGFS_ENTRIES,
minor->debugfs_root, minor);
@@ -2129,10 +2327,14 @@ void i915_debugfs_cleanup(struct drm_minor *minor)
1, minor);
drm_debugfs_remove_files((struct drm_info_list *) &i915_cache_sharing_fops,
1, minor);
+ drm_debugfs_remove_files((struct drm_info_list *) &i915_drop_caches_fops,
+ 1, minor);
drm_debugfs_remove_files((struct drm_info_list *) &i915_ring_stop_fops,
1, minor);
drm_debugfs_remove_files((struct drm_info_list *) &i915_error_state_fops,
1, minor);
+ drm_debugfs_remove_files((struct drm_info_list *) &i915_next_seqno_fops,
+ 1, minor);
}
#endif /* CONFIG_DEBUG_FS */
diff --git a/drivers/gpu/drm/i915/i915_dma.c b/drivers/gpu/drm/i915/i915_dma.c
index 99daa896105d..4fa6beb14c77 100644
--- a/drivers/gpu/drm/i915/i915_dma.c
+++ b/drivers/gpu/drm/i915/i915_dma.c
@@ -992,6 +992,12 @@ static int i915_getparam(struct drm_device *dev, void *data,
case I915_PARAM_HAS_PINNED_BATCHES:
value = 1;
break;
+ case I915_PARAM_HAS_EXEC_NO_RELOC:
+ value = 1;
+ break;
+ case I915_PARAM_HAS_EXEC_HANDLE_LUT:
+ value = 1;
+ break;
default:
DRM_DEBUG_DRIVER("Unknown parameter %d\n",
param->param);
@@ -1070,7 +1076,7 @@ static int i915_set_status_page(struct drm_device *dev, void *data,
ring->status_page.gfx_addr = hws->addr & (0x1ffff<<12);
dev_priv->dri1.gfx_hws_cpu_addr =
- ioremap_wc(dev_priv->mm.gtt_base_addr + hws->addr, 4096);
+ ioremap_wc(dev_priv->gtt.mappable_base + hws->addr, 4096);
if (dev_priv->dri1.gfx_hws_cpu_addr == NULL) {
i915_dma_cleanup(dev);
ring->status_page.gfx_addr = 0;
@@ -1297,19 +1303,21 @@ static int i915_load_modeset_init(struct drm_device *dev)
if (ret)
goto cleanup_vga_switcheroo;
+ ret = drm_irq_install(dev);
+ if (ret)
+ goto cleanup_gem_stolen;
+
+ /* Important: The output setup functions called by modeset_init need
+ * working irqs for e.g. gmbus and dp aux transfers. */
intel_modeset_init(dev);
ret = i915_gem_init(dev);
if (ret)
- goto cleanup_gem_stolen;
-
- intel_modeset_gem_init(dev);
+ goto cleanup_irq;
INIT_WORK(&dev_priv->console_resume_work, intel_console_resume);
- ret = drm_irq_install(dev);
- if (ret)
- goto cleanup_gem;
+ intel_modeset_gem_init(dev);
/* Always safe in the mode setting case. */
/* FIXME: do pre/post-mode set stuff in core KMS code */
@@ -1317,7 +1325,25 @@ static int i915_load_modeset_init(struct drm_device *dev)
ret = intel_fbdev_init(dev);
if (ret)
- goto cleanup_irq;
+ goto cleanup_gem;
+
+ /* Only enable hotplug handling once the fbdev is fully set up. */
+ intel_hpd_init(dev);
+
+ /*
+ * Some ports require correctly set-up hpd registers for detection to
+ * work properly (leading to ghost connected connector status), e.g. VGA
+ * on gm45. Hence we can only set up the initial fbdev config after hpd
+ * irqs are fully enabled. Now we should scan for the initial config
+ * only once hotplug handling is enabled, but due to screwed-up locking
+ * around kms/fbdev init we can't protect the fdbev initial config
+ * scanning against hotplug events. Hence do this first and ignore the
+ * tiny window where we will loose hotplug notifactions.
+ */
+ intel_fbdev_initial_config(dev);
+
+ /* Only enable hotplug handling once the fbdev is fully set up. */
+ dev_priv->enable_hotplug_processing = true;
drm_kms_helper_poll_init(dev);
@@ -1326,13 +1352,13 @@ static int i915_load_modeset_init(struct drm_device *dev)
return 0;
-cleanup_irq:
- drm_irq_uninstall(dev);
cleanup_gem:
mutex_lock(&dev->struct_mutex);
i915_gem_cleanup_ringbuffer(dev);
mutex_unlock(&dev->struct_mutex);
i915_gem_cleanup_aliasing_ppgtt(dev);
+cleanup_irq:
+ drm_irq_uninstall(dev);
cleanup_gem_stolen:
i915_gem_cleanup_stolen(dev);
cleanup_vga_switcheroo:
@@ -1400,9 +1426,9 @@ static void i915_kick_out_firmware_fb(struct drm_i915_private *dev_priv)
if (!ap)
return;
- ap->ranges[0].base = dev_priv->mm.gtt->gma_bus_addr;
- ap->ranges[0].size =
- dev_priv->mm.gtt->gtt_mappable_entries << PAGE_SHIFT;
+ ap->ranges[0].base = dev_priv->gtt.mappable_base;
+ ap->ranges[0].size = dev_priv->gtt.mappable_end - dev_priv->gtt.start;
+
primary =
pdev->resource[PCI_ROM_RESOURCE].flags & IORESOURCE_ROM_SHADOW;
@@ -1516,18 +1542,17 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
goto put_gmch;
}
- aperture_size = dev_priv->mm.gtt->gtt_mappable_entries << PAGE_SHIFT;
- dev_priv->mm.gtt_base_addr = dev_priv->mm.gtt->gma_bus_addr;
+ aperture_size = dev_priv->gtt.mappable_end;
- dev_priv->mm.gtt_mapping =
- io_mapping_create_wc(dev_priv->mm.gtt_base_addr,
+ dev_priv->gtt.mappable =
+ io_mapping_create_wc(dev_priv->gtt.mappable_base,
aperture_size);
- if (dev_priv->mm.gtt_mapping == NULL) {
+ if (dev_priv->gtt.mappable == NULL) {
ret = -EIO;
goto out_rmmap;
}
- i915_mtrr_setup(dev_priv, dev_priv->mm.gtt_base_addr,
+ i915_mtrr_setup(dev_priv, dev_priv->gtt.mappable_base,
aperture_size);
/* The i915 workqueue is primarily used for batched retirement of
@@ -1580,11 +1605,12 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
pci_enable_msi(dev->pdev);
spin_lock_init(&dev_priv->irq_lock);
- spin_lock_init(&dev_priv->error_lock);
+ spin_lock_init(&dev_priv->gpu_error.lock);
spin_lock_init(&dev_priv->rps.lock);
- spin_lock_init(&dev_priv->dpio_lock);
+ mutex_init(&dev_priv->dpio_lock);
mutex_init(&dev_priv->rps.hw_lock);
+ mutex_init(&dev_priv->modeset_restore_lock);
if (IS_IVYBRIDGE(dev) || IS_HASWELL(dev))
dev_priv->num_pipe = 3;
@@ -1614,9 +1640,6 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
intel_opregion_init(dev);
acpi_video_register();
- setup_timer(&dev_priv->hangcheck_timer, i915_hangcheck_elapsed,
- (unsigned long) dev);
-
if (IS_GEN5(dev))
intel_gpu_ips_init(dev_priv);
@@ -1635,15 +1658,15 @@ out_gem_unload:
out_mtrrfree:
if (dev_priv->mm.gtt_mtrr >= 0) {
mtrr_del(dev_priv->mm.gtt_mtrr,
- dev_priv->mm.gtt_base_addr,
+ dev_priv->gtt.mappable_base,
aperture_size);
dev_priv->mm.gtt_mtrr = -1;
}
- io_mapping_free(dev_priv->mm.gtt_mapping);
+ io_mapping_free(dev_priv->gtt.mappable);
out_rmmap:
pci_iounmap(dev->pdev, dev_priv->regs);
put_gmch:
- i915_gem_gtt_fini(dev);
+ dev_priv->gtt.gtt_remove(dev);
put_bridge:
pci_dev_put(dev_priv->bridge_dev);
free_priv:
@@ -1673,11 +1696,11 @@ int i915_driver_unload(struct drm_device *dev)
/* Cancel the retire work handler, which should be idle now. */
cancel_delayed_work_sync(&dev_priv->mm.retire_work);
- io_mapping_free(dev_priv->mm.gtt_mapping);
+ io_mapping_free(dev_priv->gtt.mappable);
if (dev_priv->mm.gtt_mtrr >= 0) {
mtrr_del(dev_priv->mm.gtt_mtrr,
- dev_priv->mm.gtt_base_addr,
- dev_priv->mm.gtt->gtt_mappable_entries * PAGE_SIZE);
+ dev_priv->gtt.mappable_base,
+ dev_priv->gtt.mappable_end);
dev_priv->mm.gtt_mtrr = -1;
}
@@ -1703,8 +1726,8 @@ int i915_driver_unload(struct drm_device *dev)
}
/* Free error state after interrupts are fully disabled. */
- del_timer_sync(&dev_priv->hangcheck_timer);
- cancel_work_sync(&dev_priv->error_work);
+ del_timer_sync(&dev_priv->gpu_error.hangcheck_timer);
+ cancel_work_sync(&dev_priv->gpu_error.work);
i915_destroy_error_state(dev);
if (dev->pdev->msi_enabled)
@@ -1723,9 +1746,6 @@ int i915_driver_unload(struct drm_device *dev)
mutex_unlock(&dev->struct_mutex);
i915_gem_cleanup_aliasing_ppgtt(dev);
i915_gem_cleanup_stolen(dev);
- drm_mm_takedown(&dev_priv->mm.stolen);
-
- intel_cleanup_overlay(dev);
if (!I915_NEED_GFX_HWS(dev))
i915_free_hws(dev);
@@ -1738,6 +1758,10 @@ int i915_driver_unload(struct drm_device *dev)
intel_teardown_mchbar(dev);
destroy_workqueue(dev_priv->wq);
+ pm_qos_remove_request(&dev_priv->pm_qos);
+
+ if (dev_priv->slab)
+ kmem_cache_destroy(dev_priv->slab);
pci_dev_put(dev_priv->bridge_dev);
kfree(dev->dev_private);
diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c
index 117265840b1f..c5b8c81b9440 100644
--- a/drivers/gpu/drm/i915/i915_drv.c
+++ b/drivers/gpu/drm/i915/i915_drv.c
@@ -276,6 +276,7 @@ static const struct intel_device_info intel_valleyview_m_info = {
.has_bsd_ring = 1,
.has_blt_ring = 1,
.is_valleyview = 1,
+ .display_mmio_offset = VLV_DISPLAY_BASE,
};
static const struct intel_device_info intel_valleyview_d_info = {
@@ -285,6 +286,7 @@ static const struct intel_device_info intel_valleyview_d_info = {
.has_bsd_ring = 1,
.has_blt_ring = 1,
.is_valleyview = 1,
+ .display_mmio_offset = VLV_DISPLAY_BASE,
};
static const struct intel_device_info intel_haswell_d_info = {
@@ -468,6 +470,13 @@ static int i915_drm_freeze(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
+ /* ignore lid events during suspend */
+ mutex_lock(&dev_priv->modeset_restore_lock);
+ dev_priv->modeset_restore = MODESET_SUSPENDED;
+ mutex_unlock(&dev_priv->modeset_restore_lock);
+
+ intel_set_power_well(dev, true);
+
drm_kms_helper_poll_disable(dev);
pci_save_state(dev->pdev);
@@ -492,9 +501,6 @@ static int i915_drm_freeze(struct drm_device *dev)
intel_opregion_fini(dev);
- /* Modeset on resume, not lid events */
- dev_priv->modeset_on_lid = 0;
-
console_lock();
intel_fbdev_set_suspend(dev, 1);
console_unlock();
@@ -565,12 +571,11 @@ static int __i915_drm_thaw(struct drm_device *dev)
intel_modeset_init_hw(dev);
intel_modeset_setup_hw_state(dev, false);
drm_irq_install(dev);
+ intel_hpd_init(dev);
}
intel_opregion_init(dev);
- dev_priv->modeset_on_lid = 0;
-
/*
* The console lock can be pretty contented on resume due
* to all the printk activity. Try to keep it out of the hot
@@ -583,6 +588,9 @@ static int __i915_drm_thaw(struct drm_device *dev)
schedule_work(&dev_priv->console_resume_work);
}
+ mutex_lock(&dev_priv->modeset_restore_lock);
+ dev_priv->modeset_restore = MODESET_DONE;
+ mutex_unlock(&dev_priv->modeset_restore_lock);
return error;
}
@@ -778,9 +786,9 @@ int intel_gpu_reset(struct drm_device *dev)
}
/* Also reset the gpu hangman. */
- if (dev_priv->stop_rings) {
+ if (dev_priv->gpu_error.stop_rings) {
DRM_DEBUG("Simulated gpu hang, resetting stop_rings\n");
- dev_priv->stop_rings = 0;
+ dev_priv->gpu_error.stop_rings = 0;
if (ret == -ENODEV) {
DRM_ERROR("Reset not implemented, but ignoring "
"error for simulated gpu hangs\n");
@@ -819,12 +827,12 @@ int i915_reset(struct drm_device *dev)
i915_gem_reset(dev);
ret = -ENODEV;
- if (get_seconds() - dev_priv->last_gpu_reset < 5)
+ if (get_seconds() - dev_priv->gpu_error.last_reset < 5)
DRM_ERROR("GPU hanging too fast, declaring wedged!\n");
else
ret = intel_gpu_reset(dev);
- dev_priv->last_gpu_reset = get_seconds();
+ dev_priv->gpu_error.last_reset = get_seconds();
if (ret) {
DRM_ERROR("Failed to reset chip.\n");
mutex_unlock(&dev->struct_mutex);
@@ -870,6 +878,7 @@ int i915_reset(struct drm_device *dev)
drm_irq_uninstall(dev);
drm_irq_install(dev);
+ intel_hpd_init(dev);
} else {
mutex_unlock(&dev->struct_mutex);
}
@@ -1113,102 +1122,6 @@ MODULE_LICENSE("GPL and additional rights");
((HAS_FORCE_WAKE((dev_priv)->dev)) && \
((reg) < 0x40000) && \
((reg) != FORCEWAKE))
-
-static bool IS_DISPLAYREG(u32 reg)
-{
- /*
- * This should make it easier to transition modules over to the
- * new register block scheme, since we can do it incrementally.
- */
- if (reg >= VLV_DISPLAY_BASE)
- return false;
-
- if (reg >= RENDER_RING_BASE &&
- reg < RENDER_RING_BASE + 0xff)
- return false;
- if (reg >= GEN6_BSD_RING_BASE &&
- reg < GEN6_BSD_RING_BASE + 0xff)
- return false;
- if (reg >= BLT_RING_BASE &&
- reg < BLT_RING_BASE + 0xff)
- return false;
-
- if (reg == PGTBL_ER)
- return false;
-
- if (reg >= IPEIR_I965 &&
- reg < HWSTAM)
- return false;
-
- if (reg == MI_MODE)
- return false;
-
- if (reg == GFX_MODE_GEN7)
- return false;
-
- if (reg == RENDER_HWS_PGA_GEN7 ||
- reg == BSD_HWS_PGA_GEN7 ||
- reg == BLT_HWS_PGA_GEN7)
- return false;
-
- if (reg == GEN6_BSD_SLEEP_PSMI_CONTROL ||
- reg == GEN6_BSD_RNCID)
- return false;
-
- if (reg == GEN6_BLITTER_ECOSKPD)
- return false;
-
- if (reg >= 0x4000c &&
- reg <= 0x4002c)
- return false;
-
- if (reg >= 0x4f000 &&
- reg <= 0x4f08f)
- return false;
-
- if (reg >= 0x4f100 &&
- reg <= 0x4f11f)
- return false;
-
- if (reg >= VLV_MASTER_IER &&
- reg <= GEN6_PMIER)
- return false;
-
- if (reg >= FENCE_REG_SANDYBRIDGE_0 &&
- reg < (FENCE_REG_SANDYBRIDGE_0 + (16*8)))
- return false;
-
- if (reg >= VLV_IIR_RW &&
- reg <= VLV_ISR)
- return false;
-
- if (reg == FORCEWAKE_VLV ||
- reg == FORCEWAKE_ACK_VLV)
- return false;
-
- if (reg == GEN6_GDRST)
- return false;
-
- switch (reg) {
- case _3D_CHICKEN3:
- case IVB_CHICKEN3:
- case GEN7_COMMON_SLICE_CHICKEN1:
- case GEN7_L3CNTLREG1:
- case GEN7_L3_CHICKEN_MODE_REGISTER:
- case GEN7_ROW_CHICKEN2:
- case GEN7_L3SQCREG4:
- case GEN7_SQ_CHICKEN_MBCUNIT_CONFIG:
- case GEN7_HALF_SLICE_CHICKEN1:
- case GEN6_MBCTL:
- case GEN6_UCGCTL2:
- return false;
- default:
- break;
- }
-
- return true;
-}
-
static void
ilk_dummy_write(struct drm_i915_private *dev_priv)
{
@@ -1232,8 +1145,6 @@ u##x i915_read##x(struct drm_i915_private *dev_priv, u32 reg) { \
if (dev_priv->forcewake_count == 0) \
dev_priv->gt.force_wake_put(dev_priv); \
spin_unlock_irqrestore(&dev_priv->gt_lock, irqflags); \
- } else if (IS_VALLEYVIEW(dev_priv->dev) && IS_DISPLAYREG(reg)) { \
- val = read##y(dev_priv->regs + reg + 0x180000); \
} else { \
val = read##y(dev_priv->regs + reg); \
} \
@@ -1260,11 +1171,7 @@ void i915_write##x(struct drm_i915_private *dev_priv, u32 reg, u##x val) { \
DRM_ERROR("Unknown unclaimed register before writing to %x\n", reg); \
I915_WRITE_NOTRACE(GEN7_ERR_INT, ERR_INT_MMIO_UNCLAIMED); \
} \
- if (IS_VALLEYVIEW(dev_priv->dev) && IS_DISPLAYREG(reg)) { \
- write##y(val, dev_priv->regs + reg + 0x180000); \
- } else { \
- write##y(val, dev_priv->regs + reg); \
- } \
+ write##y(val, dev_priv->regs + reg); \
if (unlikely(__fifo_ret)) { \
gen6_gt_check_fifodbg(dev_priv); \
} \
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index 12ab3bdea54d..e95337c97459 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -30,6 +30,8 @@
#ifndef _I915_DRV_H_
#define _I915_DRV_H_
+#include <uapi/drm/i915_drm.h>
+
#include "i915_reg.h"
#include "intel_bios.h"
#include "intel_ringbuffer.h"
@@ -40,6 +42,7 @@
#include <linux/backlight.h>
#include <linux/intel-iommu.h>
#include <linux/kref.h>
+#include <linux/pm_qos.h>
/* General customization:
*/
@@ -83,7 +86,12 @@ enum port {
};
#define port_name(p) ((p) + 'A')
-#define I915_GEM_GPU_DOMAINS (~(I915_GEM_DOMAIN_CPU | I915_GEM_DOMAIN_GTT))
+#define I915_GEM_GPU_DOMAINS \
+ (I915_GEM_DOMAIN_RENDER | \
+ I915_GEM_DOMAIN_SAMPLER | \
+ I915_GEM_DOMAIN_COMMAND | \
+ I915_GEM_DOMAIN_INSTRUCTION | \
+ I915_GEM_DOMAIN_VERTEX)
#define for_each_pipe(p) for ((p) = 0; (p) < dev_priv->num_pipe; (p)++)
@@ -101,6 +109,19 @@ struct intel_pch_pll {
};
#define I915_NUM_PLLS 2
+/* Used by dp and fdi links */
+struct intel_link_m_n {
+ uint32_t tu;
+ uint32_t gmch_m;
+ uint32_t gmch_n;
+ uint32_t link_m;
+ uint32_t link_n;
+};
+
+void intel_link_compute_m_n(int bpp, int nlanes,
+ int pixel_clock, int link_clock,
+ struct intel_link_m_n *m_n);
+
struct intel_ddi_plls {
int spll_refcount;
int wrpll1_refcount;
@@ -279,6 +300,7 @@ struct drm_i915_display_funcs {
struct drm_i915_gem_object *obj);
int (*update_plane)(struct drm_crtc *crtc, struct drm_framebuffer *fb,
int x, int y);
+ void (*hpd_irq_setup)(struct drm_device *dev);
/* clock updates for mode set */
/* cursor updates */
/* render clock increase/decrease */
@@ -318,6 +340,7 @@ struct drm_i915_gt_funcs {
DEV_INFO_FLAG(has_llc)
struct intel_device_info {
+ u32 display_mmio_offset;
u8 gen;
u8 is_mobile:1;
u8 is_i85x:1;
@@ -345,6 +368,50 @@ struct intel_device_info {
u8 has_llc:1;
};
+enum i915_cache_level {
+ I915_CACHE_NONE = 0,
+ I915_CACHE_LLC,
+ I915_CACHE_LLC_MLC, /* gen6+, in docs at least! */
+};
+
+/* The Graphics Translation Table is the way in which GEN hardware translates a
+ * Graphics Virtual Address into a Physical Address. In addition to the normal
+ * collateral associated with any va->pa translations GEN hardware also has a
+ * portion of the GTT which can be mapped by the CPU and remain both coherent
+ * and correct (in cases like swizzling). That region is referred to as GMADR in
+ * the spec.
+ */
+struct i915_gtt {
+ unsigned long start; /* Start offset of used GTT */
+ size_t total; /* Total size GTT can map */
+ size_t stolen_size; /* Total size of stolen memory */
+
+ unsigned long mappable_end; /* End offset that we can CPU map */
+ struct io_mapping *mappable; /* Mapping to our CPU mappable region */
+ phys_addr_t mappable_base; /* PA of our GMADR */
+
+ /** "Graphics Stolen Memory" holds the global PTEs */
+ void __iomem *gsm;
+
+ bool do_idle_maps;
+ dma_addr_t scratch_page_dma;
+ struct page *scratch_page;
+
+ /* global gtt ops */
+ int (*gtt_probe)(struct drm_device *dev, size_t *gtt_total,
+ size_t *stolen, phys_addr_t *mappable_base,
+ unsigned long *mappable_end);
+ void (*gtt_remove)(struct drm_device *dev);
+ void (*gtt_clear_range)(struct drm_device *dev,
+ unsigned int first_entry,
+ unsigned int num_entries);
+ void (*gtt_insert_entries)(struct drm_device *dev,
+ struct sg_table *st,
+ unsigned int pg_start,
+ enum i915_cache_level cache_level);
+};
+#define gtt_total_entries(gtt) ((gtt).total >> PAGE_SHIFT)
+
#define I915_PPGTT_PD_ENTRIES 512
#define I915_PPGTT_PT_ENTRIES 1024
struct i915_hw_ppgtt {
@@ -354,6 +421,16 @@ struct i915_hw_ppgtt {
uint32_t pd_offset;
dma_addr_t *pt_dma_addr;
dma_addr_t scratch_page_dma_addr;
+
+ /* pte functions, mirroring the interface of the global gtt. */
+ void (*clear_range)(struct i915_hw_ppgtt *ppgtt,
+ unsigned int first_entry,
+ unsigned int num_entries);
+ void (*insert_entries)(struct i915_hw_ppgtt *ppgtt,
+ struct sg_table *st,
+ unsigned int pg_start,
+ enum i915_cache_level cache_level);
+ void (*cleanup)(struct i915_hw_ppgtt *ppgtt);
};
@@ -580,6 +657,9 @@ struct intel_gen6_power_mgmt {
struct mutex hw_lock;
};
+/* defined intel_pm.c */
+extern spinlock_t mchdev_lock;
+
struct intel_ilk_power_mgmt {
u8 cur_delay;
u8 min_delay;
@@ -620,8 +700,162 @@ struct intel_l3_parity {
struct work_struct error_work;
};
+struct i915_gem_mm {
+ /** Memory allocator for GTT stolen memory */
+ struct drm_mm stolen;
+ /** Memory allocator for GTT */
+ struct drm_mm gtt_space;
+ /** List of all objects in gtt_space. Used to restore gtt
+ * mappings on resume */
+ struct list_head bound_list;
+ /**
+ * List of objects which are not bound to the GTT (thus
+ * are idle and not used by the GPU) but still have
+ * (presumably uncached) pages still attached.
+ */
+ struct list_head unbound_list;
+
+ /** Usable portion of the GTT for GEM */
+ unsigned long stolen_base; /* limited to low memory (32-bit) */
+
+ int gtt_mtrr;
+
+ /** PPGTT used for aliasing the PPGTT with the GTT */
+ struct i915_hw_ppgtt *aliasing_ppgtt;
+
+ struct shrinker inactive_shrinker;
+ bool shrinker_no_lock_stealing;
+
+ /**
+ * List of objects currently involved in rendering.
+ *
+ * Includes buffers having the contents of their GPU caches
+ * flushed, not necessarily primitives. last_rendering_seqno
+ * represents when the rendering involved will be completed.
+ *
+ * A reference is held on the buffer while on this list.
+ */
+ struct list_head active_list;
+
+ /**
+ * LRU list of objects which are not in the ringbuffer and
+ * are ready to unbind, but are still in the GTT.
+ *
+ * last_rendering_seqno is 0 while an object is in this list.
+ *
+ * A reference is not held on the buffer while on this list,
+ * as merely being GTT-bound shouldn't prevent its being
+ * freed, and we'll pull it off the list in the free path.
+ */
+ struct list_head inactive_list;
+
+ /** LRU list of objects with fence regs on them. */
+ struct list_head fence_list;
+
+ /**
+ * We leave the user IRQ off as much as possible,
+ * but this means that requests will finish and never
+ * be retired once the system goes idle. Set a timer to
+ * fire periodically while the ring is running. When it
+ * fires, go retire requests.
+ */
+ struct delayed_work retire_work;
+
+ /**
+ * Are we in a non-interruptible section of code like
+ * modesetting?
+ */
+ bool interruptible;
+
+ /**
+ * Flag if the X Server, and thus DRM, is not currently in
+ * control of the device.
+ *
+ * This is set between LeaveVT and EnterVT. It needs to be
+ * replaced with a semaphore. It also needs to be
+ * transitioned away from for kernel modesetting.
+ */
+ int suspended;
+
+ /** Bit 6 swizzling required for X tiling */
+ uint32_t bit_6_swizzle_x;
+ /** Bit 6 swizzling required for Y tiling */
+ uint32_t bit_6_swizzle_y;
+
+ /* storage for physical objects */
+ struct drm_i915_gem_phys_object *phys_objs[I915_MAX_PHYS_OBJECT];
+
+ /* accounting, useful for userland debugging */
+ size_t object_memory;
+ u32 object_count;
+};
+
+struct i915_gpu_error {
+ /* For hangcheck timer */
+#define DRM_I915_HANGCHECK_PERIOD 1500 /* in ms */
+#define DRM_I915_HANGCHECK_JIFFIES msecs_to_jiffies(DRM_I915_HANGCHECK_PERIOD)
+ struct timer_list hangcheck_timer;
+ int hangcheck_count;
+ uint32_t last_acthd[I915_NUM_RINGS];
+ uint32_t prev_instdone[I915_NUM_INSTDONE_REG];
+
+ /* For reset and error_state handling. */
+ spinlock_t lock;
+ /* Protected by the above dev->gpu_error.lock. */
+ struct drm_i915_error_state *first_error;
+ struct work_struct work;
+
+ unsigned long last_reset;
+
+ /**
+ * State variable and reset counter controlling the reset flow
+ *
+ * Upper bits are for the reset counter. This counter is used by the
+ * wait_seqno code to race-free noticed that a reset event happened and
+ * that it needs to restart the entire ioctl (since most likely the
+ * seqno it waited for won't ever signal anytime soon).
+ *
+ * This is important for lock-free wait paths, where no contended lock
+ * naturally enforces the correct ordering between the bail-out of the
+ * waiter and the gpu reset work code.
+ *
+ * Lowest bit controls the reset state machine: Set means a reset is in
+ * progress. This state will (presuming we don't have any bugs) decay
+ * into either unset (successful reset) or the special WEDGED value (hw
+ * terminally sour). All waiters on the reset_queue will be woken when
+ * that happens.
+ */
+ atomic_t reset_counter;
+
+ /**
+ * Special values/flags for reset_counter
+ *
+ * Note that the code relies on
+ * I915_WEDGED & I915_RESET_IN_PROGRESS_FLAG
+ * being true.
+ */
+#define I915_RESET_IN_PROGRESS_FLAG 1
+#define I915_WEDGED 0xffffffff
+
+ /**
+ * Waitqueue to signal when the reset has completed. Used by clients
+ * that wait for dev_priv->mm.wedged to settle.
+ */
+ wait_queue_head_t reset_queue;
+
+ /* For gpu hang simulation. */
+ unsigned int stop_rings;
+};
+
+enum modeset_restore {
+ MODESET_ON_LID_OPEN,
+ MODESET_DONE,
+ MODESET_SUSPENDED,
+};
+
typedef struct drm_i915_private {
struct drm_device *dev;
+ struct kmem_cache *slab;
const struct intel_device_info *info;
@@ -636,10 +870,11 @@ typedef struct drm_i915_private {
/** forcewake_count is protected by gt_lock */
unsigned forcewake_count;
/** gt_lock is also taken in irq contexts. */
- struct spinlock gt_lock;
+ spinlock_t gt_lock;
struct intel_gmbus gmbus[GMBUS_NUM_PORTS];
+
/** gmbus_mutex protects against concurrent usage of the single hw gmbus
* controller on different i2c buses. */
struct mutex gmbus_mutex;
@@ -649,9 +884,11 @@ typedef struct drm_i915_private {
*/
uint32_t gpio_mmio_base;
+ wait_queue_head_t gmbus_wait_queue;
+
struct pci_dev *bridge_dev;
struct intel_ring_buffer ring[I915_NUM_RINGS];
- uint32_t next_seqno;
+ uint32_t last_seqno, next_seqno;
drm_dma_handle_t *status_page_dmah;
struct resource mch_res;
@@ -661,31 +898,24 @@ typedef struct drm_i915_private {
/* protects the irq masks */
spinlock_t irq_lock;
+ /* To control wakeup latency, e.g. for irq-driven dp aux transfers. */
+ struct pm_qos_request pm_qos;
+
/* DPIO indirect register protection */
- spinlock_t dpio_lock;
+ struct mutex dpio_lock;
/** Cached value of IMR to avoid reads in updating the bitfield */
u32 pipestat[2];
u32 irq_mask;
u32 gt_irq_mask;
- u32 pch_irq_mask;
u32 hotplug_supported_mask;
struct work_struct hotplug_work;
+ bool enable_hotplug_processing;
int num_pipe;
int num_pch_pll;
- /* For hangcheck timer */
-#define DRM_I915_HANGCHECK_PERIOD 1500 /* in ms */
-#define DRM_I915_HANGCHECK_JIFFIES msecs_to_jiffies(DRM_I915_HANGCHECK_PERIOD)
- struct timer_list hangcheck_timer;
- int hangcheck_count;
- uint32_t last_acthd[I915_NUM_RINGS];
- uint32_t prev_instdone[I915_NUM_INSTDONE_REG];
-
- unsigned int stop_rings;
-
unsigned long cfb_size;
unsigned int cfb_fb;
enum plane cfb_plane;
@@ -696,7 +926,7 @@ typedef struct drm_i915_private {
/* overlay */
struct intel_overlay *overlay;
- bool sprite_scaling_enabled;
+ unsigned int sprite_scaling_enabled;
/* LVDS info */
int backlight_level; /* restore backlight to this value */
@@ -713,7 +943,6 @@ typedef struct drm_i915_private {
unsigned int display_clock_mode:1;
int lvds_ssc_freq;
unsigned int bios_lvds_val; /* initial [PCH_]LVDS reg val in VBIOS */
- unsigned int lvds_val; /* used for checking LVDS channel mode */
struct {
int rate;
int lanes;
@@ -734,11 +963,6 @@ typedef struct drm_i915_private {
unsigned int fsb_freq, mem_freq, is_ddr3;
- spinlock_t error_lock;
- /* Protected by dev->error_lock. */
- struct drm_i915_error_state *first_error;
- struct work_struct error_work;
- struct completion error_completion;
struct workqueue_struct *wq;
/* Display functions */
@@ -750,115 +974,12 @@ typedef struct drm_i915_private {
unsigned long quirks;
- /* Register state */
- bool modeset_on_lid;
+ enum modeset_restore modeset_restore;
+ struct mutex modeset_restore_lock;
- struct {
- /** Bridge to intel-gtt-ko */
- struct intel_gtt *gtt;
- /** Memory allocator for GTT stolen memory */
- struct drm_mm stolen;
- /** Memory allocator for GTT */
- struct drm_mm gtt_space;
- /** List of all objects in gtt_space. Used to restore gtt
- * mappings on resume */
- struct list_head bound_list;
- /**
- * List of objects which are not bound to the GTT (thus
- * are idle and not used by the GPU) but still have
- * (presumably uncached) pages still attached.
- */
- struct list_head unbound_list;
-
- /** Usable portion of the GTT for GEM */
- unsigned long gtt_start;
- unsigned long gtt_mappable_end;
- unsigned long gtt_end;
-
- struct io_mapping *gtt_mapping;
- phys_addr_t gtt_base_addr;
- int gtt_mtrr;
-
- /** PPGTT used for aliasing the PPGTT with the GTT */
- struct i915_hw_ppgtt *aliasing_ppgtt;
-
- struct shrinker inactive_shrinker;
- bool shrinker_no_lock_stealing;
-
- /**
- * List of objects currently involved in rendering.
- *
- * Includes buffers having the contents of their GPU caches
- * flushed, not necessarily primitives. last_rendering_seqno
- * represents when the rendering involved will be completed.
- *
- * A reference is held on the buffer while on this list.
- */
- struct list_head active_list;
-
- /**
- * LRU list of objects which are not in the ringbuffer and
- * are ready to unbind, but are still in the GTT.
- *
- * last_rendering_seqno is 0 while an object is in this list.
- *
- * A reference is not held on the buffer while on this list,
- * as merely being GTT-bound shouldn't prevent its being
- * freed, and we'll pull it off the list in the free path.
- */
- struct list_head inactive_list;
-
- /** LRU list of objects with fence regs on them. */
- struct list_head fence_list;
-
- /**
- * We leave the user IRQ off as much as possible,
- * but this means that requests will finish and never
- * be retired once the system goes idle. Set a timer to
- * fire periodically while the ring is running. When it
- * fires, go retire requests.
- */
- struct delayed_work retire_work;
-
- /**
- * Are we in a non-interruptible section of code like
- * modesetting?
- */
- bool interruptible;
-
- /**
- * Flag if the X Server, and thus DRM, is not currently in
- * control of the device.
- *
- * This is set between LeaveVT and EnterVT. It needs to be
- * replaced with a semaphore. It also needs to be
- * transitioned away from for kernel modesetting.
- */
- int suspended;
-
- /**
- * Flag if the hardware appears to be wedged.
- *
- * This is set when attempts to idle the device timeout.
- * It prevents command submission from occurring and makes
- * every pending request fail
- */
- atomic_t wedged;
-
- /** Bit 6 swizzling required for X tiling */
- uint32_t bit_6_swizzle_x;
- /** Bit 6 swizzling required for Y tiling */
- uint32_t bit_6_swizzle_y;
-
- /* storage for physical objects */
- struct drm_i915_gem_phys_object *phys_objs[I915_MAX_PHYS_OBJECT];
-
- /* accounting, useful for userland debugging */
- size_t gtt_total;
- size_t mappable_gtt_total;
- size_t object_memory;
- u32 object_count;
- } mm;
+ struct i915_gtt gtt;
+
+ struct i915_gem_mm mm;
/* Kernel Modesetting */
@@ -900,7 +1021,7 @@ typedef struct drm_i915_private {
struct drm_mm_node *compressed_fb;
struct drm_mm_node *compressed_llb;
- unsigned long last_gpu_reset;
+ struct i915_gpu_error gpu_error;
/* list of fbdev register on this device */
struct intel_fbdev *fbdev;
@@ -919,7 +1040,7 @@ typedef struct drm_i915_private {
bool hw_contexts_disabled;
uint32_t hw_context_size;
- bool fdi_rx_polarity_reversed;
+ u32 fdi_rx_config;
struct i915_suspend_saved_registers regfile;
@@ -940,11 +1061,7 @@ enum hdmi_force_audio {
HDMI_AUDIO_ON, /* force turn on HDMI audio */
};
-enum i915_cache_level {
- I915_CACHE_NONE = 0,
- I915_CACHE_LLC,
- I915_CACHE_LLC_MLC, /* gen6+, in docs at least! */
-};
+#define I915_GTT_RESERVED ((struct drm_mm_node *)0x1)
struct drm_i915_gem_object_ops {
/* Interface between the GEM object and its backing storage.
@@ -971,6 +1088,8 @@ struct drm_i915_gem_object {
/** Current space allocated to this object in the GTT, if any. */
struct drm_mm_node *gtt_space;
+ /** Stolen memory for this object, instead of being backed by shmem. */
+ struct drm_mm_node *stolen;
struct list_head gtt_list;
/** This object's place on the active/inactive lists */
@@ -1096,13 +1215,6 @@ struct drm_i915_gem_object {
/** for phy allocated objects */
struct drm_i915_gem_phys_object *phys_obj;
-
- /**
- * Number of crtcs where this object is currently the fb, but
- * will be page flipped away on the next vblank. When it
- * reaches 0, dev_priv->pending_flip_queue will be woken up.
- */
- atomic_t pending_flip;
};
#define to_gem_object(obj) (&((struct drm_i915_gem_object *)(obj))->base)
@@ -1141,7 +1253,7 @@ struct drm_i915_gem_request {
struct drm_i915_file_private {
struct {
- struct spinlock lock;
+ spinlock_t lock;
struct list_head request_list;
} mm;
struct idr context_idr;
@@ -1227,6 +1339,8 @@ struct drm_i915_file_private {
#define HAS_PIPE_CONTROL(dev) (INTEL_INFO(dev)->gen >= 5)
+#define HAS_DDI(dev) (IS_HASWELL(dev))
+
#define INTEL_PCH_DEVICE_ID_MASK 0xff00
#define INTEL_PCH_IBX_DEVICE_ID_TYPE 0x3b00
#define INTEL_PCH_CPT_DEVICE_ID_TYPE 0x1c00
@@ -1323,6 +1437,7 @@ void i915_hangcheck_elapsed(unsigned long data);
void i915_handle_error(struct drm_device *dev, bool wedged);
extern void intel_irq_init(struct drm_device *dev);
+extern void intel_hpd_init(struct drm_device *dev);
extern void intel_gt_init(struct drm_device *dev);
extern void intel_gt_reset(struct drm_device *dev);
@@ -1391,18 +1506,22 @@ int i915_gem_get_aperture_ioctl(struct drm_device *dev, void *data,
int i915_gem_wait_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv);
void i915_gem_load(struct drm_device *dev);
+void *i915_gem_object_alloc(struct drm_device *dev);
+void i915_gem_object_free(struct drm_i915_gem_object *obj);
int i915_gem_init_object(struct drm_gem_object *obj);
void i915_gem_object_init(struct drm_i915_gem_object *obj,
const struct drm_i915_gem_object_ops *ops);
struct drm_i915_gem_object *i915_gem_alloc_object(struct drm_device *dev,
size_t size);
void i915_gem_free_object(struct drm_gem_object *obj);
+
int __must_check i915_gem_object_pin(struct drm_i915_gem_object *obj,
uint32_t alignment,
bool map_and_fenceable,
bool nonblocking);
void i915_gem_object_unpin(struct drm_i915_gem_object *obj);
int __must_check i915_gem_object_unbind(struct drm_i915_gem_object *obj);
+int i915_gem_object_put_pages(struct drm_i915_gem_object *obj);
void i915_gem_release_mmap(struct drm_i915_gem_object *obj);
void i915_gem_lastclose(struct drm_device *dev);
@@ -1454,8 +1573,8 @@ i915_seqno_passed(uint32_t seq1, uint32_t seq2)
return (int32_t)(seq1 - seq2) >= 0;
}
-extern int i915_gem_get_seqno(struct drm_device *dev, u32 *seqno);
-
+int __must_check i915_gem_get_seqno(struct drm_device *dev, u32 *seqno);
+int __must_check i915_gem_set_seqno(struct drm_device *dev, u32 seqno);
int __must_check i915_gem_object_get_fence(struct drm_i915_gem_object *obj);
int __must_check i915_gem_object_put_fence(struct drm_i915_gem_object *obj);
@@ -1481,8 +1600,18 @@ i915_gem_object_unpin_fence(struct drm_i915_gem_object *obj)
void i915_gem_retire_requests(struct drm_device *dev);
void i915_gem_retire_requests_ring(struct intel_ring_buffer *ring);
-int __must_check i915_gem_check_wedge(struct drm_i915_private *dev_priv,
+int __must_check i915_gem_check_wedge(struct i915_gpu_error *error,
bool interruptible);
+static inline bool i915_reset_in_progress(struct i915_gpu_error *error)
+{
+ return unlikely(atomic_read(&error->reset_counter)
+ & I915_RESET_IN_PROGRESS_FLAG);
+}
+
+static inline bool i915_terminally_wedged(struct i915_gpu_error *error)
+{
+ return atomic_read(&error->reset_counter) == I915_WEDGED;
+}
void i915_gem_reset(struct drm_device *dev);
void i915_gem_clflush_object(struct drm_i915_gem_object *obj);
@@ -1523,9 +1652,10 @@ void i915_gem_free_all_phys_object(struct drm_device *dev);
void i915_gem_release(struct drm_device *dev, struct drm_file *file);
uint32_t
-i915_gem_get_unfenced_gtt_alignment(struct drm_device *dev,
- uint32_t size,
- int tiling_mode);
+i915_gem_get_gtt_size(struct drm_device *dev, uint32_t size, int tiling_mode);
+uint32_t
+i915_gem_get_gtt_alignment(struct drm_device *dev, uint32_t size,
+ int tiling_mode, bool fenced);
int i915_gem_object_set_cache_level(struct drm_i915_gem_object *obj,
enum i915_cache_level cache_level);
@@ -1548,7 +1678,6 @@ int i915_gem_context_destroy_ioctl(struct drm_device *dev, void *data,
struct drm_file *file);
/* i915_gem_gtt.c */
-int __must_check i915_gem_init_aliasing_ppgtt(struct drm_device *dev);
void i915_gem_cleanup_aliasing_ppgtt(struct drm_device *dev);
void i915_ppgtt_bind_object(struct i915_hw_ppgtt *ppgtt,
struct drm_i915_gem_object *obj,
@@ -1562,12 +1691,10 @@ void i915_gem_gtt_bind_object(struct drm_i915_gem_object *obj,
enum i915_cache_level cache_level);
void i915_gem_gtt_unbind_object(struct drm_i915_gem_object *obj);
void i915_gem_gtt_finish_object(struct drm_i915_gem_object *obj);
-void i915_gem_init_global_gtt(struct drm_device *dev,
- unsigned long start,
- unsigned long mappable_end,
- unsigned long end);
+void i915_gem_init_global_gtt(struct drm_device *dev);
+void i915_gem_setup_global_gtt(struct drm_device *dev, unsigned long start,
+ unsigned long mappable_end, unsigned long end);
int i915_gem_gtt_init(struct drm_device *dev);
-void i915_gem_gtt_fini(struct drm_device *dev);
static inline void i915_gem_chipset_flush(struct drm_device *dev)
{
if (INTEL_INFO(dev)->gen < 6)
@@ -1585,9 +1712,22 @@ int i915_gem_evict_everything(struct drm_device *dev);
/* i915_gem_stolen.c */
int i915_gem_init_stolen(struct drm_device *dev);
+int i915_gem_stolen_setup_compression(struct drm_device *dev, int size);
+void i915_gem_stolen_cleanup_compression(struct drm_device *dev);
void i915_gem_cleanup_stolen(struct drm_device *dev);
+struct drm_i915_gem_object *
+i915_gem_object_create_stolen(struct drm_device *dev, u32 size);
+void i915_gem_object_release_stolen(struct drm_i915_gem_object *obj);
/* i915_gem_tiling.c */
+inline static bool i915_gem_object_needs_bit17_swizzle(struct drm_i915_gem_object *obj)
+{
+ drm_i915_private_t *dev_priv = obj->base.dev->dev_private;
+
+ return dev_priv->mm.bit_6_swizzle_x == I915_BIT_6_SWIZZLE_9_10_17 &&
+ obj->tiling_mode != I915_TILING_NONE;
+}
+
void i915_gem_detect_bit_6_swizzle(struct drm_device *dev);
void i915_gem_object_do_bit_17_swizzle(struct drm_i915_gem_object *obj);
void i915_gem_object_save_bit_17_swizzle(struct drm_i915_gem_object *obj);
@@ -1613,9 +1753,9 @@ void i915_debugfs_cleanup(struct drm_minor *minor);
extern int i915_save_state(struct drm_device *dev);
extern int i915_restore_state(struct drm_device *dev);
-/* i915_suspend.c */
-extern int i915_save_state(struct drm_device *dev);
-extern int i915_restore_state(struct drm_device *dev);
+/* i915_ums.c */
+void i915_save_display_reg(struct drm_device *dev);
+void i915_restore_display_reg(struct drm_device *dev);
/* i915_sysfs.c */
void i915_setup_sysfs(struct drm_device *dev_priv);
@@ -1672,6 +1812,7 @@ extern void intel_modeset_cleanup(struct drm_device *dev);
extern int intel_modeset_vga_set_state(struct drm_device *dev, bool state);
extern void intel_modeset_setup_hw_state(struct drm_device *dev,
bool force_restore);
+extern void i915_redisable_vga(struct drm_device *dev);
extern bool intel_fbc_enabled(struct drm_device *dev);
extern void intel_disable_fbc(struct drm_device *dev);
extern bool ironlake_set_drps(struct drm_device *dev, u8 val);
@@ -1744,5 +1885,19 @@ __i915_write(64, q)
#define POSTING_READ(reg) (void)I915_READ_NOTRACE(reg)
#define POSTING_READ16(reg) (void)I915_READ16_NOTRACE(reg)
+/* "Broadcast RGB" property */
+#define INTEL_BROADCAST_RGB_AUTO 0
+#define INTEL_BROADCAST_RGB_FULL 1
+#define INTEL_BROADCAST_RGB_LIMITED 2
+
+static inline uint32_t i915_vgacntrl_reg(struct drm_device *dev)
+{
+ if (HAS_PCH_SPLIT(dev))
+ return CPU_VGACNTRL;
+ else if (IS_VALLEYVIEW(dev))
+ return VLV_VGACNTRL;
+ else
+ return VGACNTRL;
+}
#endif
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index 8febea6daa08..0e207e6e0df8 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -87,47 +87,43 @@ static void i915_gem_info_remove_obj(struct drm_i915_private *dev_priv,
}
static int
-i915_gem_wait_for_error(struct drm_device *dev)
+i915_gem_wait_for_error(struct i915_gpu_error *error)
{
- struct drm_i915_private *dev_priv = dev->dev_private;
- struct completion *x = &dev_priv->error_completion;
- unsigned long flags;
int ret;
- if (!atomic_read(&dev_priv->mm.wedged))
+#define EXIT_COND (!i915_reset_in_progress(error))
+ if (EXIT_COND)
return 0;
+ /* GPU is already declared terminally dead, give up. */
+ if (i915_terminally_wedged(error))
+ return -EIO;
+
/*
* Only wait 10 seconds for the gpu reset to complete to avoid hanging
* userspace. If it takes that long something really bad is going on and
* we should simply try to bail out and fail as gracefully as possible.
*/
- ret = wait_for_completion_interruptible_timeout(x, 10*HZ);
+ ret = wait_event_interruptible_timeout(error->reset_queue,
+ EXIT_COND,
+ 10*HZ);
if (ret == 0) {
DRM_ERROR("Timed out waiting for the gpu reset to complete\n");
return -EIO;
} else if (ret < 0) {
return ret;
}
+#undef EXIT_COND
- if (atomic_read(&dev_priv->mm.wedged)) {
- /* GPU is hung, bump the completion count to account for
- * the token we just consumed so that we never hit zero and
- * end up waiting upon a subsequent completion event that
- * will never happen.
- */
- spin_lock_irqsave(&x->wait.lock, flags);
- x->done++;
- spin_unlock_irqrestore(&x->wait.lock, flags);
- }
return 0;
}
int i915_mutex_lock_interruptible(struct drm_device *dev)
{
+ struct drm_i915_private *dev_priv = dev->dev_private;
int ret;
- ret = i915_gem_wait_for_error(dev);
+ ret = i915_gem_wait_for_error(&dev_priv->gpu_error);
if (ret)
return ret;
@@ -149,6 +145,7 @@ int
i915_gem_init_ioctl(struct drm_device *dev, void *data,
struct drm_file *file)
{
+ struct drm_i915_private *dev_priv = dev->dev_private;
struct drm_i915_gem_init *args = data;
if (drm_core_check_feature(dev, DRIVER_MODESET))
@@ -163,8 +160,9 @@ i915_gem_init_ioctl(struct drm_device *dev, void *data,
return -ENODEV;
mutex_lock(&dev->struct_mutex);
- i915_gem_init_global_gtt(dev, args->gtt_start,
- args->gtt_end, args->gtt_end);
+ i915_gem_setup_global_gtt(dev, args->gtt_start, args->gtt_end,
+ args->gtt_end);
+ dev_priv->gtt.mappable_end = args->gtt_end;
mutex_unlock(&dev->struct_mutex);
return 0;
@@ -186,12 +184,24 @@ i915_gem_get_aperture_ioctl(struct drm_device *dev, void *data,
pinned += obj->gtt_space->size;
mutex_unlock(&dev->struct_mutex);
- args->aper_size = dev_priv->mm.gtt_total;
+ args->aper_size = dev_priv->gtt.total;
args->aper_available_size = args->aper_size - pinned;
return 0;
}
+void *i915_gem_object_alloc(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ return kmem_cache_alloc(dev_priv->slab, GFP_KERNEL | __GFP_ZERO);
+}
+
+void i915_gem_object_free(struct drm_i915_gem_object *obj)
+{
+ struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
+ kmem_cache_free(dev_priv->slab, obj);
+}
+
static int
i915_gem_create(struct drm_file *file,
struct drm_device *dev,
@@ -215,7 +225,7 @@ i915_gem_create(struct drm_file *file,
if (ret) {
drm_gem_object_release(&obj->base);
i915_gem_info_remove_obj(dev->dev_private, obj->base.size);
- kfree(obj);
+ i915_gem_object_free(obj);
return ret;
}
@@ -259,14 +269,6 @@ i915_gem_create_ioctl(struct drm_device *dev, void *data,
args->size, &args->handle);
}
-static int i915_gem_object_needs_bit17_swizzle(struct drm_i915_gem_object *obj)
-{
- drm_i915_private_t *dev_priv = obj->base.dev->dev_private;
-
- return dev_priv->mm.bit_6_swizzle_x == I915_BIT_6_SWIZZLE_9_10_17 &&
- obj->tiling_mode != I915_TILING_NONE;
-}
-
static inline int
__copy_to_user_swizzled(char __user *cpu_vaddr,
const char *gpu_vaddr, int gpu_offset,
@@ -407,7 +409,6 @@ i915_gem_shmem_pread(struct drm_device *dev,
loff_t offset;
int shmem_page_offset, page_length, ret = 0;
int obj_do_bit17_swizzling, page_do_bit17_swizzling;
- int hit_slowpath = 0;
int prefaulted = 0;
int needs_clflush = 0;
struct scatterlist *sg;
@@ -469,7 +470,6 @@ i915_gem_shmem_pread(struct drm_device *dev,
if (ret == 0)
goto next_page;
- hit_slowpath = 1;
mutex_unlock(&dev->struct_mutex);
if (!prefaulted) {
@@ -502,12 +502,6 @@ next_page:
out:
i915_gem_object_unpin_pages(obj);
- if (hit_slowpath) {
- /* Fixup: Kill any reinstated backing storage pages */
- if (obj->madv == __I915_MADV_PURGED)
- i915_gem_object_truncate(obj);
- }
-
return ret;
}
@@ -641,7 +635,7 @@ i915_gem_gtt_pwrite_fast(struct drm_device *dev,
* source page isn't available. Return the error and we'll
* retry in the slow path.
*/
- if (fast_user_write(dev_priv->mm.gtt_mapping, page_base,
+ if (fast_user_write(dev_priv->gtt.mappable, page_base,
page_offset, user_data, page_length)) {
ret = -EFAULT;
goto out_unpin;
@@ -838,12 +832,13 @@ out:
i915_gem_object_unpin_pages(obj);
if (hit_slowpath) {
- /* Fixup: Kill any reinstated backing storage pages */
- if (obj->madv == __I915_MADV_PURGED)
- i915_gem_object_truncate(obj);
- /* and flush dirty cachelines in case the object isn't in the cpu write
- * domain anymore. */
- if (obj->base.write_domain != I915_GEM_DOMAIN_CPU) {
+ /*
+ * Fixup: Flush cpu caches in case we didn't flush the dirty
+ * cachelines in-line while writing and the object moved
+ * out of the cpu write domain while we've dropped the lock.
+ */
+ if (!needs_clflush_after &&
+ obj->base.write_domain != I915_GEM_DOMAIN_CPU) {
i915_gem_clflush_object(obj);
i915_gem_chipset_flush(dev);
}
@@ -940,26 +935,17 @@ unlock:
}
int
-i915_gem_check_wedge(struct drm_i915_private *dev_priv,
+i915_gem_check_wedge(struct i915_gpu_error *error,
bool interruptible)
{
- if (atomic_read(&dev_priv->mm.wedged)) {
- struct completion *x = &dev_priv->error_completion;
- bool recovery_complete;
- unsigned long flags;
-
- /* Give the error handler a chance to run. */
- spin_lock_irqsave(&x->wait.lock, flags);
- recovery_complete = x->done > 0;
- spin_unlock_irqrestore(&x->wait.lock, flags);
-
+ if (i915_reset_in_progress(error)) {
/* Non-interruptible callers can't handle -EAGAIN, hence return
* -EIO unconditionally for these. */
if (!interruptible)
return -EIO;
- /* Recovery complete, but still wedged means reset failure. */
- if (recovery_complete)
+ /* Recovery complete, but the reset failed ... */
+ if (i915_terminally_wedged(error))
return -EIO;
return -EAGAIN;
@@ -990,13 +976,22 @@ i915_gem_check_olr(struct intel_ring_buffer *ring, u32 seqno)
* __wait_seqno - wait until execution of seqno has finished
* @ring: the ring expected to report seqno
* @seqno: duh!
+ * @reset_counter: reset sequence associated with the given seqno
* @interruptible: do an interruptible wait (normally yes)
* @timeout: in - how long to wait (NULL forever); out - how much time remaining
*
+ * Note: It is of utmost importance that the passed in seqno and reset_counter
+ * values have been read by the caller in an smp safe manner. Where read-side
+ * locks are involved, it is sufficient to read the reset_counter before
+ * unlocking the lock that protects the seqno. For lockless tricks, the
+ * reset_counter _must_ be read before, and an appropriate smp_rmb must be
+ * inserted.
+ *
* Returns 0 if the seqno was found within the alloted time. Else returns the
* errno with remaining time filled in timeout argument.
*/
static int __wait_seqno(struct intel_ring_buffer *ring, u32 seqno,
+ unsigned reset_counter,
bool interruptible, struct timespec *timeout)
{
drm_i915_private_t *dev_priv = ring->dev->dev_private;
@@ -1026,7 +1021,8 @@ static int __wait_seqno(struct intel_ring_buffer *ring, u32 seqno,
#define EXIT_COND \
(i915_seqno_passed(ring->get_seqno(ring, false), seqno) || \
- atomic_read(&dev_priv->mm.wedged))
+ i915_reset_in_progress(&dev_priv->gpu_error) || \
+ reset_counter != atomic_read(&dev_priv->gpu_error.reset_counter))
do {
if (interruptible)
end = wait_event_interruptible_timeout(ring->irq_queue,
@@ -1036,7 +1032,14 @@ static int __wait_seqno(struct intel_ring_buffer *ring, u32 seqno,
end = wait_event_timeout(ring->irq_queue, EXIT_COND,
timeout_jiffies);
- ret = i915_gem_check_wedge(dev_priv, interruptible);
+ /* We need to check whether any gpu reset happened in between
+ * the caller grabbing the seqno and now ... */
+ if (reset_counter != atomic_read(&dev_priv->gpu_error.reset_counter))
+ end = -EAGAIN;
+
+ /* ... but upgrade the -EGAIN to an -EIO if the gpu is truely
+ * gone. */
+ ret = i915_gem_check_wedge(&dev_priv->gpu_error, interruptible);
if (ret)
end = ret;
} while (end == 0 && wait_forever);
@@ -1082,7 +1085,7 @@ i915_wait_seqno(struct intel_ring_buffer *ring, uint32_t seqno)
BUG_ON(!mutex_is_locked(&dev->struct_mutex));
BUG_ON(seqno == 0);
- ret = i915_gem_check_wedge(dev_priv, interruptible);
+ ret = i915_gem_check_wedge(&dev_priv->gpu_error, interruptible);
if (ret)
return ret;
@@ -1090,7 +1093,9 @@ i915_wait_seqno(struct intel_ring_buffer *ring, uint32_t seqno)
if (ret)
return ret;
- return __wait_seqno(ring, seqno, interruptible, NULL);
+ return __wait_seqno(ring, seqno,
+ atomic_read(&dev_priv->gpu_error.reset_counter),
+ interruptible, NULL);
}
/**
@@ -1137,6 +1142,7 @@ i915_gem_object_wait_rendering__nonblocking(struct drm_i915_gem_object *obj,
struct drm_device *dev = obj->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_ring_buffer *ring = obj->ring;
+ unsigned reset_counter;
u32 seqno;
int ret;
@@ -1147,7 +1153,7 @@ i915_gem_object_wait_rendering__nonblocking(struct drm_i915_gem_object *obj,
if (seqno == 0)
return 0;
- ret = i915_gem_check_wedge(dev_priv, true);
+ ret = i915_gem_check_wedge(&dev_priv->gpu_error, true);
if (ret)
return ret;
@@ -1155,8 +1161,9 @@ i915_gem_object_wait_rendering__nonblocking(struct drm_i915_gem_object *obj,
if (ret)
return ret;
+ reset_counter = atomic_read(&dev_priv->gpu_error.reset_counter);
mutex_unlock(&dev->struct_mutex);
- ret = __wait_seqno(ring, seqno, true, NULL);
+ ret = __wait_seqno(ring, seqno, reset_counter, true, NULL);
mutex_lock(&dev->struct_mutex);
i915_gem_retire_requests_ring(ring);
@@ -1344,6 +1351,12 @@ int i915_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
trace_i915_gem_object_fault(obj, page_offset, true, write);
+ /* Access to snoopable pages through the GTT is incoherent. */
+ if (obj->cache_level != I915_CACHE_NONE && !HAS_LLC(dev)) {
+ ret = -EINVAL;
+ goto unlock;
+ }
+
/* Now bind it into the GTT if needed */
ret = i915_gem_object_pin(obj, 0, true, false);
if (ret)
@@ -1359,7 +1372,7 @@ int i915_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
obj->fault_mappable = true;
- pfn = ((dev_priv->mm.gtt_base_addr + obj->gtt_offset) >> PAGE_SHIFT) +
+ pfn = ((dev_priv->gtt.mappable_base + obj->gtt_offset) >> PAGE_SHIFT) +
page_offset;
/* Finally, remap it using the new GTT offset */
@@ -1374,7 +1387,7 @@ out:
/* If this -EIO is due to a gpu hang, give the reset code a
* chance to clean up the mess. Otherwise return the proper
* SIGBUS. */
- if (!atomic_read(&dev_priv->mm.wedged))
+ if (i915_terminally_wedged(&dev_priv->gpu_error))
return VM_FAULT_SIGBUS;
case -EAGAIN:
/* Give the error handler a chance to run and move the
@@ -1432,7 +1445,7 @@ i915_gem_release_mmap(struct drm_i915_gem_object *obj)
obj->fault_mappable = false;
}
-static uint32_t
+uint32_t
i915_gem_get_gtt_size(struct drm_device *dev, uint32_t size, int tiling_mode)
{
uint32_t gtt_size;
@@ -1460,16 +1473,15 @@ i915_gem_get_gtt_size(struct drm_device *dev, uint32_t size, int tiling_mode)
* Return the required GTT alignment for an object, taking into account
* potential fence register mapping.
*/
-static uint32_t
-i915_gem_get_gtt_alignment(struct drm_device *dev,
- uint32_t size,
- int tiling_mode)
+uint32_t
+i915_gem_get_gtt_alignment(struct drm_device *dev, uint32_t size,
+ int tiling_mode, bool fenced)
{
/*
* Minimum alignment is 4k (GTT page size), but might be greater
* if a fence register is needed for the object.
*/
- if (INTEL_INFO(dev)->gen >= 4 ||
+ if (INTEL_INFO(dev)->gen >= 4 || (!fenced && IS_G33(dev)) ||
tiling_mode == I915_TILING_NONE)
return 4096;
@@ -1480,35 +1492,6 @@ i915_gem_get_gtt_alignment(struct drm_device *dev,
return i915_gem_get_gtt_size(dev, size, tiling_mode);
}
-/**
- * i915_gem_get_unfenced_gtt_alignment - return required GTT alignment for an
- * unfenced object
- * @dev: the device
- * @size: size of the object
- * @tiling_mode: tiling mode of the object
- *
- * Return the required GTT alignment for an object, only taking into account
- * unfenced tiled surface requirements.
- */
-uint32_t
-i915_gem_get_unfenced_gtt_alignment(struct drm_device *dev,
- uint32_t size,
- int tiling_mode)
-{
- /*
- * Minimum alignment is 4k (GTT page size) for sane hw.
- */
- if (INTEL_INFO(dev)->gen >= 4 || IS_G33(dev) ||
- tiling_mode == I915_TILING_NONE)
- return 4096;
-
- /* Previous hardware however needs to be aligned to a power-of-two
- * tile height. The simplest method for determining this is to reuse
- * the power-of-tile object size.
- */
- return i915_gem_get_gtt_size(dev, size, tiling_mode);
-}
-
static int i915_gem_object_create_mmap_offset(struct drm_i915_gem_object *obj)
{
struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
@@ -1571,7 +1554,7 @@ i915_gem_mmap_gtt(struct drm_file *file,
goto unlock;
}
- if (obj->base.size > dev_priv->mm.gtt_mappable_end) {
+ if (obj->base.size > dev_priv->gtt.mappable_end) {
ret = -E2BIG;
goto out;
}
@@ -1635,7 +1618,7 @@ i915_gem_object_truncate(struct drm_i915_gem_object *obj)
* To do this we must instruct the shmfs to drop all of its
* backing pages, *now*.
*/
- inode = obj->base.filp->f_path.dentry->d_inode;
+ inode = file_inode(obj->base.filp);
shmem_truncate_range(inode, 0, (loff_t)-1);
obj->madv = __I915_MADV_PURGED;
@@ -1689,7 +1672,7 @@ i915_gem_object_put_pages_gtt(struct drm_i915_gem_object *obj)
kfree(obj->pages);
}
-static int
+int
i915_gem_object_put_pages(struct drm_i915_gem_object *obj)
{
const struct drm_i915_gem_object_ops *ops = obj->ops;
@@ -1800,7 +1783,7 @@ i915_gem_object_get_pages_gtt(struct drm_i915_gem_object *obj)
*
* Fail silently without starting the shrinker
*/
- mapping = obj->base.filp->f_path.dentry->d_inode->i_mapping;
+ mapping = file_inode(obj->base.filp)->i_mapping;
gfp = mapping_gfp_mask(mapping);
gfp |= __GFP_NORETRY | __GFP_NOWARN | __GFP_NO_KSWAPD;
gfp &= ~(__GFP_IO | __GFP_WAIT);
@@ -1862,6 +1845,11 @@ i915_gem_object_get_pages(struct drm_i915_gem_object *obj)
if (obj->pages)
return 0;
+ if (obj->madv != I915_MADV_WILLNEED) {
+ DRM_ERROR("Attempting to obtain a purgeable object\n");
+ return -EINVAL;
+ }
+
BUG_ON(obj->pages_pin_count);
ret = ops->get_pages(obj);
@@ -1918,9 +1906,6 @@ i915_gem_object_move_to_inactive(struct drm_i915_gem_object *obj)
BUG_ON(obj->base.write_domain & ~I915_GEM_GPU_DOMAINS);
BUG_ON(!obj->active);
- if (obj->pin_count) /* are we a framebuffer? */
- intel_mark_fb_idle(obj);
-
list_move_tail(&obj->mm_list, &dev_priv->mm.inactive_list);
list_del_init(&obj->ring_list);
@@ -1940,30 +1925,24 @@ i915_gem_object_move_to_inactive(struct drm_i915_gem_object *obj)
}
static int
-i915_gem_handle_seqno_wrap(struct drm_device *dev)
+i915_gem_init_seqno(struct drm_device *dev, u32 seqno)
{
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_ring_buffer *ring;
int ret, i, j;
- /* The hardware uses various monotonic 32-bit counters, if we
- * detect that they will wraparound we need to idle the GPU
- * and reset those counters.
- */
- ret = 0;
+ /* Carefully retire all requests without writing to the rings */
for_each_ring(ring, dev_priv, i) {
- for (j = 0; j < ARRAY_SIZE(ring->sync_seqno); j++)
- ret |= ring->sync_seqno[j] != 0;
+ ret = intel_ring_idle(ring);
+ if (ret)
+ return ret;
}
- if (ret == 0)
- return ret;
-
- ret = i915_gpu_idle(dev);
- if (ret)
- return ret;
-
i915_gem_retire_requests(dev);
+
+ /* Finally reset hw state */
for_each_ring(ring, dev_priv, i) {
+ intel_ring_init_seqno(ring, seqno);
+
for (j = 0; j < ARRAY_SIZE(ring->sync_seqno); j++)
ring->sync_seqno[j] = 0;
}
@@ -1971,6 +1950,32 @@ i915_gem_handle_seqno_wrap(struct drm_device *dev)
return 0;
}
+int i915_gem_set_seqno(struct drm_device *dev, u32 seqno)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ int ret;
+
+ if (seqno == 0)
+ return -EINVAL;
+
+ /* HWS page needs to be set less than what we
+ * will inject to ring
+ */
+ ret = i915_gem_init_seqno(dev, seqno - 1);
+ if (ret)
+ return ret;
+
+ /* Carefully set the last_seqno value so that wrap
+ * detection still works
+ */
+ dev_priv->next_seqno = seqno;
+ dev_priv->last_seqno = seqno - 1;
+ if (dev_priv->last_seqno == 0)
+ dev_priv->last_seqno--;
+
+ return 0;
+}
+
int
i915_gem_get_seqno(struct drm_device *dev, u32 *seqno)
{
@@ -1978,14 +1983,14 @@ i915_gem_get_seqno(struct drm_device *dev, u32 *seqno)
/* reserve 0 for non-seqno */
if (dev_priv->next_seqno == 0) {
- int ret = i915_gem_handle_seqno_wrap(dev);
+ int ret = i915_gem_init_seqno(dev, 0);
if (ret)
return ret;
dev_priv->next_seqno = 1;
}
- *seqno = dev_priv->next_seqno++;
+ *seqno = dev_priv->last_seqno = dev_priv->next_seqno++;
return 0;
}
@@ -2052,7 +2057,7 @@ i915_add_request(struct intel_ring_buffer *ring,
if (!dev_priv->mm.suspended) {
if (i915_enable_hangcheck) {
- mod_timer(&dev_priv->hangcheck_timer,
+ mod_timer(&dev_priv->gpu_error.hangcheck_timer,
round_jiffies_up(jiffies + DRM_I915_HANGCHECK_JIFFIES));
}
if (was_empty) {
@@ -2317,10 +2322,12 @@ i915_gem_object_flush_active(struct drm_i915_gem_object *obj)
int
i915_gem_wait_ioctl(struct drm_device *dev, void *data, struct drm_file *file)
{
+ drm_i915_private_t *dev_priv = dev->dev_private;
struct drm_i915_gem_wait *args = data;
struct drm_i915_gem_object *obj;
struct intel_ring_buffer *ring = NULL;
struct timespec timeout_stack, *timeout = NULL;
+ unsigned reset_counter;
u32 seqno = 0;
int ret = 0;
@@ -2361,9 +2368,10 @@ i915_gem_wait_ioctl(struct drm_device *dev, void *data, struct drm_file *file)
}
drm_gem_object_unreference(&obj->base);
+ reset_counter = atomic_read(&dev_priv->gpu_error.reset_counter);
mutex_unlock(&dev->struct_mutex);
- ret = __wait_seqno(ring, seqno, true, timeout);
+ ret = __wait_seqno(ring, seqno, reset_counter, true, timeout);
if (timeout) {
WARN_ON(!timespec_valid(timeout));
args->timeout_ns = timespec_to_ns(timeout);
@@ -2427,15 +2435,15 @@ static void i915_gem_object_finish_gtt(struct drm_i915_gem_object *obj)
{
u32 old_write_domain, old_read_domains;
- /* Act a barrier for all accesses through the GTT */
- mb();
-
/* Force a pagefault for domain tracking on next user access */
i915_gem_release_mmap(obj);
if ((obj->base.read_domains & I915_GEM_DOMAIN_GTT) == 0)
return;
+ /* Wait for any direct GTT access to complete */
+ mb();
+
old_read_domains = obj->base.read_domains;
old_write_domain = obj->base.write_domain;
@@ -2454,7 +2462,7 @@ int
i915_gem_object_unbind(struct drm_i915_gem_object *obj)
{
drm_i915_private_t *dev_priv = obj->base.dev->dev_private;
- int ret = 0;
+ int ret;
if (obj->gtt_space == NULL)
return 0;
@@ -2521,52 +2529,38 @@ int i915_gpu_idle(struct drm_device *dev)
return 0;
}
-static void sandybridge_write_fence_reg(struct drm_device *dev, int reg,
- struct drm_i915_gem_object *obj)
-{
- drm_i915_private_t *dev_priv = dev->dev_private;
- uint64_t val;
-
- if (obj) {
- u32 size = obj->gtt_space->size;
-
- val = (uint64_t)((obj->gtt_offset + size - 4096) &
- 0xfffff000) << 32;
- val |= obj->gtt_offset & 0xfffff000;
- val |= (uint64_t)((obj->stride / 128) - 1) <<
- SANDYBRIDGE_FENCE_PITCH_SHIFT;
-
- if (obj->tiling_mode == I915_TILING_Y)
- val |= 1 << I965_FENCE_TILING_Y_SHIFT;
- val |= I965_FENCE_REG_VALID;
- } else
- val = 0;
-
- I915_WRITE64(FENCE_REG_SANDYBRIDGE_0 + reg * 8, val);
- POSTING_READ(FENCE_REG_SANDYBRIDGE_0 + reg * 8);
-}
-
static void i965_write_fence_reg(struct drm_device *dev, int reg,
struct drm_i915_gem_object *obj)
{
drm_i915_private_t *dev_priv = dev->dev_private;
+ int fence_reg;
+ int fence_pitch_shift;
uint64_t val;
+ if (INTEL_INFO(dev)->gen >= 6) {
+ fence_reg = FENCE_REG_SANDYBRIDGE_0;
+ fence_pitch_shift = SANDYBRIDGE_FENCE_PITCH_SHIFT;
+ } else {
+ fence_reg = FENCE_REG_965_0;
+ fence_pitch_shift = I965_FENCE_PITCH_SHIFT;
+ }
+
if (obj) {
u32 size = obj->gtt_space->size;
val = (uint64_t)((obj->gtt_offset + size - 4096) &
0xfffff000) << 32;
val |= obj->gtt_offset & 0xfffff000;
- val |= ((obj->stride / 128) - 1) << I965_FENCE_PITCH_SHIFT;
+ val |= (uint64_t)((obj->stride / 128) - 1) << fence_pitch_shift;
if (obj->tiling_mode == I915_TILING_Y)
val |= 1 << I965_FENCE_TILING_Y_SHIFT;
val |= I965_FENCE_REG_VALID;
} else
val = 0;
- I915_WRITE64(FENCE_REG_965_0 + reg * 8, val);
- POSTING_READ(FENCE_REG_965_0 + reg * 8);
+ fence_reg += reg * 8;
+ I915_WRITE64(fence_reg, val);
+ POSTING_READ(fence_reg);
}
static void i915_write_fence_reg(struct drm_device *dev, int reg,
@@ -2645,18 +2639,37 @@ static void i830_write_fence_reg(struct drm_device *dev, int reg,
POSTING_READ(FENCE_REG_830_0 + reg * 4);
}
+inline static bool i915_gem_object_needs_mb(struct drm_i915_gem_object *obj)
+{
+ return obj && obj->base.read_domains & I915_GEM_DOMAIN_GTT;
+}
+
static void i915_gem_write_fence(struct drm_device *dev, int reg,
struct drm_i915_gem_object *obj)
{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+
+ /* Ensure that all CPU reads are completed before installing a fence
+ * and all writes before removing the fence.
+ */
+ if (i915_gem_object_needs_mb(dev_priv->fence_regs[reg].obj))
+ mb();
+
switch (INTEL_INFO(dev)->gen) {
case 7:
- case 6: sandybridge_write_fence_reg(dev, reg, obj); break;
+ case 6:
case 5:
case 4: i965_write_fence_reg(dev, reg, obj); break;
case 3: i915_write_fence_reg(dev, reg, obj); break;
case 2: i830_write_fence_reg(dev, reg, obj); break;
- default: break;
+ default: BUG();
}
+
+ /* And similarly be paranoid that no direct access to this region
+ * is reordered to before the fence is installed.
+ */
+ if (i915_gem_object_needs_mb(obj))
+ mb();
}
static inline int fence_number(struct drm_i915_private *dev_priv,
@@ -2686,7 +2699,7 @@ static void i915_gem_object_update_fence(struct drm_i915_gem_object *obj,
}
static int
-i915_gem_object_flush_fence(struct drm_i915_gem_object *obj)
+i915_gem_object_wait_fence(struct drm_i915_gem_object *obj)
{
if (obj->last_fenced_seqno) {
int ret = i915_wait_seqno(obj->ring, obj->last_fenced_seqno);
@@ -2696,12 +2709,6 @@ i915_gem_object_flush_fence(struct drm_i915_gem_object *obj)
obj->last_fenced_seqno = 0;
}
- /* Ensure that all CPU reads are completed before installing a fence
- * and all writes before removing the fence.
- */
- if (obj->base.read_domains & I915_GEM_DOMAIN_GTT)
- mb();
-
obj->fenced_gpu_access = false;
return 0;
}
@@ -2712,7 +2719,7 @@ i915_gem_object_put_fence(struct drm_i915_gem_object *obj)
struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
int ret;
- ret = i915_gem_object_flush_fence(obj);
+ ret = i915_gem_object_wait_fence(obj);
if (ret)
return ret;
@@ -2786,7 +2793,7 @@ i915_gem_object_get_fence(struct drm_i915_gem_object *obj)
* will need to serialise the write to the associated fence register?
*/
if (obj->fence_dirty) {
- ret = i915_gem_object_flush_fence(obj);
+ ret = i915_gem_object_wait_fence(obj);
if (ret)
return ret;
}
@@ -2807,7 +2814,7 @@ i915_gem_object_get_fence(struct drm_i915_gem_object *obj)
if (reg->obj) {
struct drm_i915_gem_object *old = reg->obj;
- ret = i915_gem_object_flush_fence(old);
+ ret = i915_gem_object_wait_fence(old);
if (ret)
return ret;
@@ -2830,7 +2837,7 @@ static bool i915_gem_valid_gtt_space(struct drm_device *dev,
/* On non-LLC machines we have to be careful when putting differing
* types of snoopable memory together to avoid the prefetcher
- * crossing memory domains and dieing.
+ * crossing memory domains and dying.
*/
if (HAS_LLC(dev))
return true;
@@ -2908,21 +2915,16 @@ i915_gem_object_bind_to_gtt(struct drm_i915_gem_object *obj,
bool mappable, fenceable;
int ret;
- if (obj->madv != I915_MADV_WILLNEED) {
- DRM_ERROR("Attempting to bind a purgeable object\n");
- return -EINVAL;
- }
-
fence_size = i915_gem_get_gtt_size(dev,
obj->base.size,
obj->tiling_mode);
fence_alignment = i915_gem_get_gtt_alignment(dev,
obj->base.size,
- obj->tiling_mode);
+ obj->tiling_mode, true);
unfenced_alignment =
- i915_gem_get_unfenced_gtt_alignment(dev,
+ i915_gem_get_gtt_alignment(dev,
obj->base.size,
- obj->tiling_mode);
+ obj->tiling_mode, false);
if (alignment == 0)
alignment = map_and_fenceable ? fence_alignment :
@@ -2938,7 +2940,7 @@ i915_gem_object_bind_to_gtt(struct drm_i915_gem_object *obj,
* before evicting everything in a vain attempt to find space.
*/
if (obj->base.size >
- (map_and_fenceable ? dev_priv->mm.gtt_mappable_end : dev_priv->mm.gtt_total)) {
+ (map_and_fenceable ? dev_priv->gtt.mappable_end : dev_priv->gtt.total)) {
DRM_ERROR("Attempting to bind an object larger than the aperture\n");
return -E2BIG;
}
@@ -2959,7 +2961,7 @@ i915_gem_object_bind_to_gtt(struct drm_i915_gem_object *obj,
if (map_and_fenceable)
ret = drm_mm_insert_node_in_range_generic(&dev_priv->mm.gtt_space, node,
size, alignment, obj->cache_level,
- 0, dev_priv->mm.gtt_mappable_end);
+ 0, dev_priv->gtt.mappable_end);
else
ret = drm_mm_insert_node_generic(&dev_priv->mm.gtt_space, node,
size, alignment, obj->cache_level);
@@ -2999,7 +3001,7 @@ i915_gem_object_bind_to_gtt(struct drm_i915_gem_object *obj,
(node->start & (fence_alignment - 1)) == 0;
mappable =
- obj->gtt_offset + obj->base.size <= dev_priv->mm.gtt_mappable_end;
+ obj->gtt_offset + obj->base.size <= dev_priv->gtt.mappable_end;
obj->map_and_fenceable = mappable && fenceable;
@@ -3019,6 +3021,13 @@ i915_gem_clflush_object(struct drm_i915_gem_object *obj)
if (obj->pages == NULL)
return;
+ /*
+ * Stolen memory is always coherent with the GPU as it is explicitly
+ * marked as wc by the system, or the system is cache-coherent.
+ */
+ if (obj->stolen)
+ return;
+
/* If the GPU is snooping the contents of the CPU cache,
* we do not need to manually clear the CPU cache lines. However,
* the caches are only snooped when the render cache is
@@ -3107,6 +3116,13 @@ i915_gem_object_set_to_gtt_domain(struct drm_i915_gem_object *obj, bool write)
i915_gem_object_flush_cpu_write_domain(obj);
+ /* Serialise direct access to this object with the barriers for
+ * coherent writes from the GPU, by effectively invalidating the
+ * GTT domain upon first access.
+ */
+ if ((obj->base.read_domains & I915_GEM_DOMAIN_GTT) == 0)
+ mb();
+
old_write_domain = obj->base.write_domain;
old_read_domains = obj->base.read_domains;
@@ -3413,11 +3429,17 @@ i915_gem_ring_throttle(struct drm_device *dev, struct drm_file *file)
unsigned long recent_enough = jiffies - msecs_to_jiffies(20);
struct drm_i915_gem_request *request;
struct intel_ring_buffer *ring = NULL;
+ unsigned reset_counter;
u32 seqno = 0;
int ret;
- if (atomic_read(&dev_priv->mm.wedged))
- return -EIO;
+ ret = i915_gem_wait_for_error(&dev_priv->gpu_error);
+ if (ret)
+ return ret;
+
+ ret = i915_gem_check_wedge(&dev_priv->gpu_error, false);
+ if (ret)
+ return ret;
spin_lock(&file_priv->mm.lock);
list_for_each_entry(request, &file_priv->mm.request_list, client_list) {
@@ -3427,12 +3449,13 @@ i915_gem_ring_throttle(struct drm_device *dev, struct drm_file *file)
ring = request->ring;
seqno = request->seqno;
}
+ reset_counter = atomic_read(&dev_priv->gpu_error.reset_counter);
spin_unlock(&file_priv->mm.lock);
if (seqno == 0)
return 0;
- ret = __wait_seqno(ring, seqno, true, NULL);
+ ret = __wait_seqno(ring, seqno, reset_counter, true, NULL);
if (ret == 0)
queue_delayed_work(dev_priv->wq, &dev_priv->mm.retire_work, 0);
@@ -3706,14 +3729,14 @@ struct drm_i915_gem_object *i915_gem_alloc_object(struct drm_device *dev,
{
struct drm_i915_gem_object *obj;
struct address_space *mapping;
- u32 mask;
+ gfp_t mask;
- obj = kzalloc(sizeof(*obj), GFP_KERNEL);
+ obj = i915_gem_object_alloc(dev);
if (obj == NULL)
return NULL;
if (drm_gem_object_init(dev, &obj->base, size) != 0) {
- kfree(obj);
+ i915_gem_object_free(obj);
return NULL;
}
@@ -3724,7 +3747,7 @@ struct drm_i915_gem_object *i915_gem_alloc_object(struct drm_device *dev,
mask |= __GFP_DMA32;
}
- mapping = obj->base.filp->f_path.dentry->d_inode->i_mapping;
+ mapping = file_inode(obj->base.filp)->i_mapping;
mapping_set_gfp_mask(mapping, mask);
i915_gem_object_init(obj, &i915_gem_object_ops);
@@ -3785,6 +3808,7 @@ void i915_gem_free_object(struct drm_gem_object *gem_obj)
obj->pages_pin_count = 0;
i915_gem_object_put_pages(obj);
i915_gem_object_free_mmap_offset(obj);
+ i915_gem_object_release_stolen(obj);
BUG_ON(obj->pages);
@@ -3795,7 +3819,7 @@ void i915_gem_free_object(struct drm_gem_object *gem_obj)
i915_gem_info_remove_obj(dev_priv, obj->base.size);
kfree(obj->bit_17);
- kfree(obj);
+ i915_gem_object_free(obj);
}
int
@@ -3829,7 +3853,7 @@ i915_gem_idle(struct drm_device *dev)
* And not confound mm.suspended!
*/
dev_priv->mm.suspended = 1;
- del_timer_sync(&dev_priv->hangcheck_timer);
+ del_timer_sync(&dev_priv->gpu_error.hangcheck_timer);
i915_kernel_lost_context(dev);
i915_gem_cleanup_ringbuffer(dev);
@@ -3848,7 +3872,7 @@ void i915_gem_l3_remap(struct drm_device *dev)
u32 misccpctl;
int i;
- if (!IS_IVYBRIDGE(dev))
+ if (!HAS_L3_GPU_CACHE(dev))
return;
if (!dev_priv->l3_parity.remap_info)
@@ -3891,8 +3915,10 @@ void i915_gem_init_swizzling(struct drm_device *dev)
I915_WRITE(TILECTL, I915_READ(TILECTL) | TILECTL_SWZCTL);
if (IS_GEN6(dev))
I915_WRITE(ARB_MODE, _MASKED_BIT_ENABLE(ARB_MODE_SWIZZLE_SNB));
- else
+ else if (IS_GEN7(dev))
I915_WRITE(ARB_MODE, _MASKED_BIT_ENABLE(ARB_MODE_SWIZZLE_IVB));
+ else
+ BUG();
}
static bool
@@ -3911,22 +3937,11 @@ intel_enable_blt(struct drm_device *dev)
return true;
}
-int
-i915_gem_init_hw(struct drm_device *dev)
+static int i915_gem_init_rings(struct drm_device *dev)
{
- drm_i915_private_t *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = dev->dev_private;
int ret;
- if (INTEL_INFO(dev)->gen < 6 && !intel_enable_gtt())
- return -EIO;
-
- if (IS_HASWELL(dev) && (I915_READ(0x120010) == 1))
- I915_WRITE(0x9008, I915_READ(0x9008) | 0xf0000);
-
- i915_gem_l3_remap(dev);
-
- i915_gem_init_swizzling(dev);
-
ret = intel_init_render_ring_buffer(dev);
if (ret)
return ret;
@@ -3943,76 +3958,59 @@ i915_gem_init_hw(struct drm_device *dev)
goto cleanup_bsd_ring;
}
- dev_priv->next_seqno = 1;
-
- /*
- * XXX: There was some w/a described somewhere suggesting loading
- * contexts before PPGTT.
- */
- i915_gem_context_init(dev);
- i915_gem_init_ppgtt(dev);
+ ret = i915_gem_set_seqno(dev, ((u32)~0 - 0x1000));
+ if (ret)
+ goto cleanup_blt_ring;
return 0;
+cleanup_blt_ring:
+ intel_cleanup_ring_buffer(&dev_priv->ring[BCS]);
cleanup_bsd_ring:
intel_cleanup_ring_buffer(&dev_priv->ring[VCS]);
cleanup_render_ring:
intel_cleanup_ring_buffer(&dev_priv->ring[RCS]);
+
return ret;
}
-static bool
-intel_enable_ppgtt(struct drm_device *dev)
+int
+i915_gem_init_hw(struct drm_device *dev)
{
- if (i915_enable_ppgtt >= 0)
- return i915_enable_ppgtt;
+ drm_i915_private_t *dev_priv = dev->dev_private;
+ int ret;
-#ifdef CONFIG_INTEL_IOMMU
- /* Disable ppgtt on SNB if VT-d is on. */
- if (INTEL_INFO(dev)->gen == 6 && intel_iommu_gfx_mapped)
- return false;
-#endif
+ if (INTEL_INFO(dev)->gen < 6 && !intel_enable_gtt())
+ return -EIO;
- return true;
+ if (IS_HASWELL(dev) && (I915_READ(0x120010) == 1))
+ I915_WRITE(0x9008, I915_READ(0x9008) | 0xf0000);
+
+ i915_gem_l3_remap(dev);
+
+ i915_gem_init_swizzling(dev);
+
+ ret = i915_gem_init_rings(dev);
+ if (ret)
+ return ret;
+
+ /*
+ * XXX: There was some w/a described somewhere suggesting loading
+ * contexts before PPGTT.
+ */
+ i915_gem_context_init(dev);
+ i915_gem_init_ppgtt(dev);
+
+ return 0;
}
int i915_gem_init(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
- unsigned long gtt_size, mappable_size;
int ret;
- gtt_size = dev_priv->mm.gtt->gtt_total_entries << PAGE_SHIFT;
- mappable_size = dev_priv->mm.gtt->gtt_mappable_entries << PAGE_SHIFT;
-
mutex_lock(&dev->struct_mutex);
- if (intel_enable_ppgtt(dev) && HAS_ALIASING_PPGTT(dev)) {
- /* PPGTT pdes are stolen from global gtt ptes, so shrink the
- * aperture accordingly when using aliasing ppgtt. */
- gtt_size -= I915_PPGTT_PD_ENTRIES*PAGE_SIZE;
-
- i915_gem_init_global_gtt(dev, 0, mappable_size, gtt_size);
-
- ret = i915_gem_init_aliasing_ppgtt(dev);
- if (ret) {
- mutex_unlock(&dev->struct_mutex);
- return ret;
- }
- } else {
- /* Let GEM Manage all of the aperture.
- *
- * However, leave one page at the end still bound to the scratch
- * page. There are a number of places where the hardware
- * apparently prefetches past the end of the object, and we've
- * seen multiple hangs with the GPU head pointer stuck in a
- * batchbuffer bound at the last page of the aperture. One page
- * should be enough to keep any prefetching inside of the
- * aperture.
- */
- i915_gem_init_global_gtt(dev, 0, mappable_size,
- gtt_size);
- }
-
+ i915_gem_init_global_gtt(dev);
ret = i915_gem_init_hw(dev);
mutex_unlock(&dev->struct_mutex);
if (ret) {
@@ -4047,9 +4045,9 @@ i915_gem_entervt_ioctl(struct drm_device *dev, void *data,
if (drm_core_check_feature(dev, DRIVER_MODESET))
return 0;
- if (atomic_read(&dev_priv->mm.wedged)) {
+ if (i915_reset_in_progress(&dev_priv->gpu_error)) {
DRM_ERROR("Reenabling wedged hardware, good luck\n");
- atomic_set(&dev_priv->mm.wedged, 0);
+ atomic_set(&dev_priv->gpu_error.reset_counter, 0);
}
mutex_lock(&dev->struct_mutex);
@@ -4113,8 +4111,14 @@ init_ring_lists(struct intel_ring_buffer *ring)
void
i915_gem_load(struct drm_device *dev)
{
- int i;
drm_i915_private_t *dev_priv = dev->dev_private;
+ int i;
+
+ dev_priv->slab =
+ kmem_cache_create("i915_gem_object",
+ sizeof(struct drm_i915_gem_object), 0,
+ SLAB_HWCACHE_ALIGN,
+ NULL);
INIT_LIST_HEAD(&dev_priv->mm.active_list);
INIT_LIST_HEAD(&dev_priv->mm.inactive_list);
@@ -4127,7 +4131,7 @@ i915_gem_load(struct drm_device *dev)
INIT_LIST_HEAD(&dev_priv->fence_regs[i].lru_list);
INIT_DELAYED_WORK(&dev_priv->mm.retire_work,
i915_gem_retire_work_handler);
- init_completion(&dev_priv->error_completion);
+ init_waitqueue_head(&dev_priv->gpu_error.reset_queue);
/* On GEN3 we really need to make sure the ARB C3 LP bit is set */
if (IS_GEN3(dev)) {
@@ -4228,7 +4232,7 @@ void i915_gem_free_all_phys_object(struct drm_device *dev)
void i915_gem_detach_phys_object(struct drm_device *dev,
struct drm_i915_gem_object *obj)
{
- struct address_space *mapping = obj->base.filp->f_path.dentry->d_inode->i_mapping;
+ struct address_space *mapping = file_inode(obj->base.filp)->i_mapping;
char *vaddr;
int i;
int page_count;
@@ -4264,7 +4268,7 @@ i915_gem_attach_phys_object(struct drm_device *dev,
int id,
int align)
{
- struct address_space *mapping = obj->base.filp->f_path.dentry->d_inode->i_mapping;
+ struct address_space *mapping = file_inode(obj->base.filp)->i_mapping;
drm_i915_private_t *dev_priv = dev->dev_private;
int ret = 0;
int page_count;
diff --git a/drivers/gpu/drm/i915/i915_gem_context.c b/drivers/gpu/drm/i915/i915_gem_context.c
index a3f06bcad551..94d873a6cffb 100644
--- a/drivers/gpu/drm/i915/i915_gem_context.c
+++ b/drivers/gpu/drm/i915/i915_gem_context.c
@@ -126,13 +126,8 @@ static int get_context_size(struct drm_device *dev)
static void do_destroy(struct i915_hw_context *ctx)
{
- struct drm_device *dev = ctx->obj->base.dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
-
if (ctx->file_priv)
idr_remove(&ctx->file_priv->context_idr, ctx->id);
- else
- BUG_ON(ctx != dev_priv->ring[RCS].default_context);
drm_gem_object_unreference(&ctx->obj->base);
kfree(ctx);
@@ -144,7 +139,7 @@ create_hw_context(struct drm_device *dev,
{
struct drm_i915_private *dev_priv = dev->dev_private;
struct i915_hw_context *ctx;
- int ret, id;
+ int ret;
ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
if (ctx == NULL)
@@ -169,22 +164,11 @@ create_hw_context(struct drm_device *dev,
ctx->file_priv = file_priv;
-again:
- if (idr_pre_get(&file_priv->context_idr, GFP_KERNEL) == 0) {
- ret = -ENOMEM;
- DRM_DEBUG_DRIVER("idr allocation failed\n");
- goto err_out;
- }
-
- ret = idr_get_new_above(&file_priv->context_idr, ctx,
- DEFAULT_CONTEXT_ID + 1, &id);
- if (ret == 0)
- ctx->id = id;
-
- if (ret == -EAGAIN)
- goto again;
- else if (ret)
+ ret = idr_alloc(&file_priv->context_idr, ctx, DEFAULT_CONTEXT_ID + 1, 0,
+ GFP_KERNEL);
+ if (ret < 0)
goto err_out;
+ ctx->id = ret;
return ctx;
@@ -242,7 +226,6 @@ err_destroy:
void i915_gem_context_init(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
- uint32_t ctx_size;
if (!HAS_HW_CONTEXTS(dev)) {
dev_priv->hw_contexts_disabled = true;
@@ -254,11 +237,9 @@ void i915_gem_context_init(struct drm_device *dev)
dev_priv->ring[RCS].default_context)
return;
- ctx_size = get_context_size(dev);
- dev_priv->hw_context_size = get_context_size(dev);
- dev_priv->hw_context_size = round_up(dev_priv->hw_context_size, 4096);
+ dev_priv->hw_context_size = round_up(get_context_size(dev), 4096);
- if (ctx_size <= 0 || ctx_size > (1<<20)) {
+ if (dev_priv->hw_context_size > (1<<20)) {
dev_priv->hw_contexts_disabled = true;
return;
}
diff --git a/drivers/gpu/drm/i915/i915_gem_dmabuf.c b/drivers/gpu/drm/i915/i915_gem_dmabuf.c
index abeaafef6d7e..6a5af6828624 100644
--- a/drivers/gpu/drm/i915/i915_gem_dmabuf.c
+++ b/drivers/gpu/drm/i915/i915_gem_dmabuf.c
@@ -281,8 +281,7 @@ struct drm_gem_object *i915_gem_prime_import(struct drm_device *dev,
if (IS_ERR(attach))
return ERR_CAST(attach);
-
- obj = kzalloc(sizeof(*obj), GFP_KERNEL);
+ obj = i915_gem_object_alloc(dev);
if (obj == NULL) {
ret = -ENOMEM;
goto fail_detach;
@@ -290,7 +289,7 @@ struct drm_gem_object *i915_gem_prime_import(struct drm_device *dev,
ret = drm_gem_private_object_init(dev, &obj->base, dma_buf->size);
if (ret) {
- kfree(obj);
+ i915_gem_object_free(obj);
goto fail_detach;
}
diff --git a/drivers/gpu/drm/i915/i915_gem_evict.c b/drivers/gpu/drm/i915/i915_gem_evict.c
index 776a3225184c..c86d5d9356fd 100644
--- a/drivers/gpu/drm/i915/i915_gem_evict.c
+++ b/drivers/gpu/drm/i915/i915_gem_evict.c
@@ -80,7 +80,7 @@ i915_gem_evict_something(struct drm_device *dev, int min_size,
if (mappable)
drm_mm_init_scan_with_range(&dev_priv->mm.gtt_space,
min_size, alignment, cache_level,
- 0, dev_priv->mm.gtt_mappable_end);
+ 0, dev_priv->gtt.mappable_end);
else
drm_mm_init_scan(&dev_priv->mm.gtt_space,
min_size, alignment, cache_level);
diff --git a/drivers/gpu/drm/i915/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
index 26d08bb58218..2f2daebd0eef 100644
--- a/drivers/gpu/drm/i915/i915_gem_execbuffer.c
+++ b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
@@ -34,61 +34,133 @@
#include <linux/dma_remapping.h>
struct eb_objects {
+ struct list_head objects;
int and;
- struct hlist_head buckets[0];
+ union {
+ struct drm_i915_gem_object *lut[0];
+ struct hlist_head buckets[0];
+ };
};
static struct eb_objects *
-eb_create(int size)
+eb_create(struct drm_i915_gem_execbuffer2 *args)
{
- struct eb_objects *eb;
- int count = PAGE_SIZE / sizeof(struct hlist_head) / 2;
- BUILD_BUG_ON(!is_power_of_2(PAGE_SIZE / sizeof(struct hlist_head)));
- while (count > size)
- count >>= 1;
- eb = kzalloc(count*sizeof(struct hlist_head) +
- sizeof(struct eb_objects),
- GFP_KERNEL);
- if (eb == NULL)
- return eb;
-
- eb->and = count - 1;
+ struct eb_objects *eb = NULL;
+
+ if (args->flags & I915_EXEC_HANDLE_LUT) {
+ int size = args->buffer_count;
+ size *= sizeof(struct drm_i915_gem_object *);
+ size += sizeof(struct eb_objects);
+ eb = kmalloc(size, GFP_TEMPORARY | __GFP_NOWARN | __GFP_NORETRY);
+ }
+
+ if (eb == NULL) {
+ int size = args->buffer_count;
+ int count = PAGE_SIZE / sizeof(struct hlist_head) / 2;
+ BUILD_BUG_ON(!is_power_of_2(PAGE_SIZE / sizeof(struct hlist_head)));
+ while (count > 2*size)
+ count >>= 1;
+ eb = kzalloc(count*sizeof(struct hlist_head) +
+ sizeof(struct eb_objects),
+ GFP_TEMPORARY);
+ if (eb == NULL)
+ return eb;
+
+ eb->and = count - 1;
+ } else
+ eb->and = -args->buffer_count;
+
+ INIT_LIST_HEAD(&eb->objects);
return eb;
}
static void
eb_reset(struct eb_objects *eb)
{
- memset(eb->buckets, 0, (eb->and+1)*sizeof(struct hlist_head));
+ if (eb->and >= 0)
+ memset(eb->buckets, 0, (eb->and+1)*sizeof(struct hlist_head));
}
-static void
-eb_add_object(struct eb_objects *eb, struct drm_i915_gem_object *obj)
+static int
+eb_lookup_objects(struct eb_objects *eb,
+ struct drm_i915_gem_exec_object2 *exec,
+ const struct drm_i915_gem_execbuffer2 *args,
+ struct drm_file *file)
{
- hlist_add_head(&obj->exec_node,
- &eb->buckets[obj->exec_handle & eb->and]);
+ int i;
+
+ spin_lock(&file->table_lock);
+ for (i = 0; i < args->buffer_count; i++) {
+ struct drm_i915_gem_object *obj;
+
+ obj = to_intel_bo(idr_find(&file->object_idr, exec[i].handle));
+ if (obj == NULL) {
+ spin_unlock(&file->table_lock);
+ DRM_DEBUG("Invalid object handle %d at index %d\n",
+ exec[i].handle, i);
+ return -ENOENT;
+ }
+
+ if (!list_empty(&obj->exec_list)) {
+ spin_unlock(&file->table_lock);
+ DRM_DEBUG("Object %p [handle %d, index %d] appears more than once in object list\n",
+ obj, exec[i].handle, i);
+ return -EINVAL;
+ }
+
+ drm_gem_object_reference(&obj->base);
+ list_add_tail(&obj->exec_list, &eb->objects);
+
+ obj->exec_entry = &exec[i];
+ if (eb->and < 0) {
+ eb->lut[i] = obj;
+ } else {
+ uint32_t handle = args->flags & I915_EXEC_HANDLE_LUT ? i : exec[i].handle;
+ obj->exec_handle = handle;
+ hlist_add_head(&obj->exec_node,
+ &eb->buckets[handle & eb->and]);
+ }
+ }
+ spin_unlock(&file->table_lock);
+
+ return 0;
}
static struct drm_i915_gem_object *
eb_get_object(struct eb_objects *eb, unsigned long handle)
{
- struct hlist_head *head;
- struct hlist_node *node;
- struct drm_i915_gem_object *obj;
+ if (eb->and < 0) {
+ if (handle >= -eb->and)
+ return NULL;
+ return eb->lut[handle];
+ } else {
+ struct hlist_head *head;
+ struct hlist_node *node;
- head = &eb->buckets[handle & eb->and];
- hlist_for_each(node, head) {
- obj = hlist_entry(node, struct drm_i915_gem_object, exec_node);
- if (obj->exec_handle == handle)
- return obj;
- }
+ head = &eb->buckets[handle & eb->and];
+ hlist_for_each(node, head) {
+ struct drm_i915_gem_object *obj;
- return NULL;
+ obj = hlist_entry(node, struct drm_i915_gem_object, exec_node);
+ if (obj->exec_handle == handle)
+ return obj;
+ }
+ return NULL;
+ }
}
static void
eb_destroy(struct eb_objects *eb)
{
+ while (!list_empty(&eb->objects)) {
+ struct drm_i915_gem_object *obj;
+
+ obj = list_first_entry(&eb->objects,
+ struct drm_i915_gem_object,
+ exec_list);
+ list_del_init(&obj->exec_list);
+ drm_gem_object_unreference(&obj->base);
+ }
kfree(eb);
}
@@ -150,17 +222,6 @@ i915_gem_execbuffer_relocate_entry(struct drm_i915_gem_object *obj,
reloc->write_domain);
return ret;
}
- if (unlikely(reloc->write_domain && target_obj->pending_write_domain &&
- reloc->write_domain != target_obj->pending_write_domain)) {
- DRM_DEBUG("Write domain conflict: "
- "obj %p target %d offset %d "
- "new %08x old %08x\n",
- obj, reloc->target_handle,
- (int) reloc->offset,
- reloc->write_domain,
- target_obj->pending_write_domain);
- return ret;
- }
target_obj->pending_read_domains |= reloc->read_domains;
target_obj->pending_write_domain |= reloc->write_domain;
@@ -220,7 +281,7 @@ i915_gem_execbuffer_relocate_entry(struct drm_i915_gem_object *obj,
/* Map the page containing the relocation we're going to perform. */
reloc->offset += obj->gtt_offset;
- reloc_page = io_mapping_map_atomic_wc(dev_priv->mm.gtt_mapping,
+ reloc_page = io_mapping_map_atomic_wc(dev_priv->gtt.mappable,
reloc->offset & PAGE_MASK);
reloc_entry = (uint32_t __iomem *)
(reloc_page + (reloc->offset & ~PAGE_MASK));
@@ -299,8 +360,7 @@ i915_gem_execbuffer_relocate_object_slow(struct drm_i915_gem_object *obj,
static int
i915_gem_execbuffer_relocate(struct drm_device *dev,
- struct eb_objects *eb,
- struct list_head *objects)
+ struct eb_objects *eb)
{
struct drm_i915_gem_object *obj;
int ret = 0;
@@ -313,7 +373,7 @@ i915_gem_execbuffer_relocate(struct drm_device *dev,
* lockdep complains vehemently.
*/
pagefault_disable();
- list_for_each_entry(obj, objects, exec_list) {
+ list_for_each_entry(obj, &eb->objects, exec_list) {
ret = i915_gem_execbuffer_relocate_object(obj, eb);
if (ret)
break;
@@ -335,7 +395,8 @@ need_reloc_mappable(struct drm_i915_gem_object *obj)
static int
i915_gem_execbuffer_reserve_object(struct drm_i915_gem_object *obj,
- struct intel_ring_buffer *ring)
+ struct intel_ring_buffer *ring,
+ bool *need_reloc)
{
struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
struct drm_i915_gem_exec_object2 *entry = obj->exec_entry;
@@ -376,7 +437,20 @@ i915_gem_execbuffer_reserve_object(struct drm_i915_gem_object *obj,
obj->has_aliasing_ppgtt_mapping = 1;
}
- entry->offset = obj->gtt_offset;
+ if (entry->offset != obj->gtt_offset) {
+ entry->offset = obj->gtt_offset;
+ *need_reloc = true;
+ }
+
+ if (entry->flags & EXEC_OBJECT_WRITE) {
+ obj->base.pending_read_domains = I915_GEM_DOMAIN_RENDER;
+ obj->base.pending_write_domain = I915_GEM_DOMAIN_RENDER;
+ }
+
+ if (entry->flags & EXEC_OBJECT_NEEDS_GTT &&
+ !obj->has_global_gtt_mapping)
+ i915_gem_gtt_bind_object(obj, obj->cache_level);
+
return 0;
}
@@ -402,7 +476,8 @@ i915_gem_execbuffer_unreserve_object(struct drm_i915_gem_object *obj)
static int
i915_gem_execbuffer_reserve(struct intel_ring_buffer *ring,
struct drm_file *file,
- struct list_head *objects)
+ struct list_head *objects,
+ bool *need_relocs)
{
struct drm_i915_gem_object *obj;
struct list_head ordered_objects;
@@ -430,7 +505,7 @@ i915_gem_execbuffer_reserve(struct intel_ring_buffer *ring,
else
list_move_tail(&obj->exec_list, &ordered_objects);
- obj->base.pending_read_domains = 0;
+ obj->base.pending_read_domains = I915_GEM_GPU_DOMAINS & ~I915_GEM_DOMAIN_COMMAND;
obj->base.pending_write_domain = 0;
obj->pending_fenced_gpu_access = false;
}
@@ -470,7 +545,7 @@ i915_gem_execbuffer_reserve(struct intel_ring_buffer *ring,
(need_mappable && !obj->map_and_fenceable))
ret = i915_gem_object_unbind(obj);
else
- ret = i915_gem_execbuffer_reserve_object(obj, ring);
+ ret = i915_gem_execbuffer_reserve_object(obj, ring, need_relocs);
if (ret)
goto err;
}
@@ -480,7 +555,7 @@ i915_gem_execbuffer_reserve(struct intel_ring_buffer *ring,
if (obj->gtt_space)
continue;
- ret = i915_gem_execbuffer_reserve_object(obj, ring);
+ ret = i915_gem_execbuffer_reserve_object(obj, ring, need_relocs);
if (ret)
goto err;
}
@@ -500,21 +575,22 @@ err: /* Decrement pin count for bound objects */
static int
i915_gem_execbuffer_relocate_slow(struct drm_device *dev,
+ struct drm_i915_gem_execbuffer2 *args,
struct drm_file *file,
struct intel_ring_buffer *ring,
- struct list_head *objects,
struct eb_objects *eb,
- struct drm_i915_gem_exec_object2 *exec,
- int count)
+ struct drm_i915_gem_exec_object2 *exec)
{
struct drm_i915_gem_relocation_entry *reloc;
struct drm_i915_gem_object *obj;
+ bool need_relocs;
int *reloc_offset;
int i, total, ret;
+ int count = args->buffer_count;
/* We may process another execbuffer during the unlock... */
- while (!list_empty(objects)) {
- obj = list_first_entry(objects,
+ while (!list_empty(&eb->objects)) {
+ obj = list_first_entry(&eb->objects,
struct drm_i915_gem_object,
exec_list);
list_del_init(&obj->exec_list);
@@ -582,27 +658,16 @@ i915_gem_execbuffer_relocate_slow(struct drm_device *dev,
/* reacquire the objects */
eb_reset(eb);
- for (i = 0; i < count; i++) {
- obj = to_intel_bo(drm_gem_object_lookup(dev, file,
- exec[i].handle));
- if (&obj->base == NULL) {
- DRM_DEBUG("Invalid object handle %d at index %d\n",
- exec[i].handle, i);
- ret = -ENOENT;
- goto err;
- }
-
- list_add_tail(&obj->exec_list, objects);
- obj->exec_handle = exec[i].handle;
- obj->exec_entry = &exec[i];
- eb_add_object(eb, obj);
- }
+ ret = eb_lookup_objects(eb, exec, args, file);
+ if (ret)
+ goto err;
- ret = i915_gem_execbuffer_reserve(ring, file, objects);
+ need_relocs = (args->flags & I915_EXEC_NO_RELOC) == 0;
+ ret = i915_gem_execbuffer_reserve(ring, file, &eb->objects, &need_relocs);
if (ret)
goto err;
- list_for_each_entry(obj, objects, exec_list) {
+ list_for_each_entry(obj, &eb->objects, exec_list) {
int offset = obj->exec_entry - exec;
ret = i915_gem_execbuffer_relocate_object_slow(obj, eb,
reloc + reloc_offset[offset]);
@@ -623,44 +688,11 @@ err:
}
static int
-i915_gem_execbuffer_wait_for_flips(struct intel_ring_buffer *ring, u32 flips)
-{
- u32 plane, flip_mask;
- int ret;
-
- /* Check for any pending flips. As we only maintain a flip queue depth
- * of 1, we can simply insert a WAIT for the next display flip prior
- * to executing the batch and avoid stalling the CPU.
- */
-
- for (plane = 0; flips >> plane; plane++) {
- if (((flips >> plane) & 1) == 0)
- continue;
-
- if (plane)
- flip_mask = MI_WAIT_FOR_PLANE_B_FLIP;
- else
- flip_mask = MI_WAIT_FOR_PLANE_A_FLIP;
-
- ret = intel_ring_begin(ring, 2);
- if (ret)
- return ret;
-
- intel_ring_emit(ring, MI_WAIT_FOR_EVENT | flip_mask);
- intel_ring_emit(ring, MI_NOOP);
- intel_ring_advance(ring);
- }
-
- return 0;
-}
-
-static int
i915_gem_execbuffer_move_to_gpu(struct intel_ring_buffer *ring,
struct list_head *objects)
{
struct drm_i915_gem_object *obj;
uint32_t flush_domains = 0;
- uint32_t flips = 0;
int ret;
list_for_each_entry(obj, objects, exec_list) {
@@ -671,18 +703,9 @@ i915_gem_execbuffer_move_to_gpu(struct intel_ring_buffer *ring,
if (obj->base.write_domain & I915_GEM_DOMAIN_CPU)
i915_gem_clflush_object(obj);
- if (obj->base.pending_write_domain)
- flips |= atomic_read(&obj->pending_flip);
-
flush_domains |= obj->base.write_domain;
}
- if (flips) {
- ret = i915_gem_execbuffer_wait_for_flips(ring, flips);
- if (ret)
- return ret;
- }
-
if (flush_domains & I915_GEM_DOMAIN_CPU)
i915_gem_chipset_flush(ring->dev);
@@ -698,6 +721,9 @@ i915_gem_execbuffer_move_to_gpu(struct intel_ring_buffer *ring,
static bool
i915_gem_check_execbuffer(struct drm_i915_gem_execbuffer2 *exec)
{
+ if (exec->flags & __I915_EXEC_UNKNOWN_FLAGS)
+ return false;
+
return ((exec->batch_start_offset | exec->batch_len) & 0x7) == 0;
}
@@ -711,6 +737,9 @@ validate_exec_list(struct drm_i915_gem_exec_object2 *exec,
char __user *ptr = (char __user *)(uintptr_t)exec[i].relocs_ptr;
int length; /* limited by fault_in_pages_readable() */
+ if (exec[i].flags & __EXEC_OBJECT_UNKNOWN_FLAGS)
+ return -EINVAL;
+
/* First check for malicious input causing overflow */
if (exec[i].relocation_count >
INT_MAX / sizeof(struct drm_i915_gem_relocation_entry))
@@ -718,9 +747,6 @@ validate_exec_list(struct drm_i915_gem_exec_object2 *exec,
length = exec[i].relocation_count *
sizeof(struct drm_i915_gem_relocation_entry);
- if (!access_ok(VERIFY_READ, ptr, length))
- return -EFAULT;
-
/* we may also need to update the presumed offsets */
if (!access_ok(VERIFY_WRITE, ptr, length))
return -EFAULT;
@@ -742,8 +768,10 @@ i915_gem_execbuffer_move_to_active(struct list_head *objects,
u32 old_read = obj->base.read_domains;
u32 old_write = obj->base.write_domain;
- obj->base.read_domains = obj->base.pending_read_domains;
obj->base.write_domain = obj->base.pending_write_domain;
+ if (obj->base.write_domain == 0)
+ obj->base.pending_read_domains |= obj->base.read_domains;
+ obj->base.read_domains = obj->base.pending_read_domains;
obj->fenced_gpu_access = obj->pending_fenced_gpu_access;
i915_gem_object_move_to_active(obj, ring);
@@ -802,21 +830,18 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
struct drm_i915_gem_exec_object2 *exec)
{
drm_i915_private_t *dev_priv = dev->dev_private;
- struct list_head objects;
struct eb_objects *eb;
struct drm_i915_gem_object *batch_obj;
struct drm_clip_rect *cliprects = NULL;
struct intel_ring_buffer *ring;
u32 ctx_id = i915_execbuffer2_get_context_id(*args);
u32 exec_start, exec_len;
- u32 mask;
- u32 flags;
+ u32 mask, flags;
int ret, mode, i;
+ bool need_relocs;
- if (!i915_gem_check_execbuffer(args)) {
- DRM_DEBUG("execbuf with invalid offset/length\n");
+ if (!i915_gem_check_execbuffer(args))
return -EINVAL;
- }
ret = validate_exec_list(exec, args->buffer_count);
if (ret)
@@ -937,7 +962,7 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
goto pre_mutex_err;
}
- eb = eb_create(args->buffer_count);
+ eb = eb_create(args);
if (eb == NULL) {
mutex_unlock(&dev->struct_mutex);
ret = -ENOMEM;
@@ -945,51 +970,28 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
}
/* Look up object handles */
- INIT_LIST_HEAD(&objects);
- for (i = 0; i < args->buffer_count; i++) {
- struct drm_i915_gem_object *obj;
-
- obj = to_intel_bo(drm_gem_object_lookup(dev, file,
- exec[i].handle));
- if (&obj->base == NULL) {
- DRM_DEBUG("Invalid object handle %d at index %d\n",
- exec[i].handle, i);
- /* prevent error path from reading uninitialized data */
- ret = -ENOENT;
- goto err;
- }
-
- if (!list_empty(&obj->exec_list)) {
- DRM_DEBUG("Object %p [handle %d, index %d] appears more than once in object list\n",
- obj, exec[i].handle, i);
- ret = -EINVAL;
- goto err;
- }
-
- list_add_tail(&obj->exec_list, &objects);
- obj->exec_handle = exec[i].handle;
- obj->exec_entry = &exec[i];
- eb_add_object(eb, obj);
- }
+ ret = eb_lookup_objects(eb, exec, args, file);
+ if (ret)
+ goto err;
/* take note of the batch buffer before we might reorder the lists */
- batch_obj = list_entry(objects.prev,
+ batch_obj = list_entry(eb->objects.prev,
struct drm_i915_gem_object,
exec_list);
/* Move the objects en-masse into the GTT, evicting if necessary. */
- ret = i915_gem_execbuffer_reserve(ring, file, &objects);
+ need_relocs = (args->flags & I915_EXEC_NO_RELOC) == 0;
+ ret = i915_gem_execbuffer_reserve(ring, file, &eb->objects, &need_relocs);
if (ret)
goto err;
/* The objects are in their final locations, apply the relocations. */
- ret = i915_gem_execbuffer_relocate(dev, eb, &objects);
+ if (need_relocs)
+ ret = i915_gem_execbuffer_relocate(dev, eb);
if (ret) {
if (ret == -EFAULT) {
- ret = i915_gem_execbuffer_relocate_slow(dev, file, ring,
- &objects, eb,
- exec,
- args->buffer_count);
+ ret = i915_gem_execbuffer_relocate_slow(dev, args, file, ring,
+ eb, exec);
BUG_ON(!mutex_is_locked(&dev->struct_mutex));
}
if (ret)
@@ -1011,7 +1013,7 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
if (flags & I915_DISPATCH_SECURE && !batch_obj->has_global_gtt_mapping)
i915_gem_gtt_bind_object(batch_obj, batch_obj->cache_level);
- ret = i915_gem_execbuffer_move_to_gpu(ring, &objects);
+ ret = i915_gem_execbuffer_move_to_gpu(ring, &eb->objects);
if (ret)
goto err;
@@ -1065,20 +1067,11 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
trace_i915_gem_ring_dispatch(ring, intel_ring_get_seqno(ring), flags);
- i915_gem_execbuffer_move_to_active(&objects, ring);
+ i915_gem_execbuffer_move_to_active(&eb->objects, ring);
i915_gem_execbuffer_retire_commands(dev, file, ring);
err:
eb_destroy(eb);
- while (!list_empty(&objects)) {
- struct drm_i915_gem_object *obj;
-
- obj = list_first_entry(&objects,
- struct drm_i915_gem_object,
- exec_list);
- list_del_init(&obj->exec_list);
- drm_gem_object_unreference(&obj->base);
- }
mutex_unlock(&dev->struct_mutex);
@@ -1187,7 +1180,7 @@ i915_gem_execbuffer2(struct drm_device *dev, void *data,
}
exec2_list = kmalloc(sizeof(*exec2_list)*args->buffer_count,
- GFP_KERNEL | __GFP_NOWARN | __GFP_NORETRY);
+ GFP_TEMPORARY | __GFP_NOWARN | __GFP_NORETRY);
if (exec2_list == NULL)
exec2_list = drm_malloc_ab(sizeof(*exec2_list),
args->buffer_count);
diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.c b/drivers/gpu/drm/i915/i915_gem_gtt.c
index 2c150dee78a7..926a1e2dd234 100644
--- a/drivers/gpu/drm/i915/i915_gem_gtt.c
+++ b/drivers/gpu/drm/i915/i915_gem_gtt.c
@@ -44,9 +44,9 @@ typedef uint32_t gtt_pte_t;
#define GEN6_PTE_CACHE_LLC_MLC (3 << 1)
#define GEN6_PTE_ADDR_ENCODE(addr) GEN6_GTT_ADDR_ENCODE(addr)
-static inline gtt_pte_t pte_encode(struct drm_device *dev,
- dma_addr_t addr,
- enum i915_cache_level level)
+static inline gtt_pte_t gen6_pte_encode(struct drm_device *dev,
+ dma_addr_t addr,
+ enum i915_cache_level level)
{
gtt_pte_t pte = GEN6_PTE_VALID;
pte |= GEN6_PTE_ADDR_ENCODE(addr);
@@ -77,7 +77,7 @@ static inline gtt_pte_t pte_encode(struct drm_device *dev,
}
/* PPGTT support for Sandybdrige/Gen6 and later */
-static void i915_ppgtt_clear_range(struct i915_hw_ppgtt *ppgtt,
+static void gen6_ppgtt_clear_range(struct i915_hw_ppgtt *ppgtt,
unsigned first_entry,
unsigned num_entries)
{
@@ -87,8 +87,9 @@ static void i915_ppgtt_clear_range(struct i915_hw_ppgtt *ppgtt,
unsigned first_pte = first_entry % I915_PPGTT_PT_ENTRIES;
unsigned last_pte, i;
- scratch_pte = pte_encode(ppgtt->dev, ppgtt->scratch_page_dma_addr,
- I915_CACHE_LLC);
+ scratch_pte = gen6_pte_encode(ppgtt->dev,
+ ppgtt->scratch_page_dma_addr,
+ I915_CACHE_LLC);
while (num_entries) {
last_pte = first_pte + num_entries;
@@ -108,10 +109,72 @@ static void i915_ppgtt_clear_range(struct i915_hw_ppgtt *ppgtt,
}
}
-int i915_gem_init_aliasing_ppgtt(struct drm_device *dev)
+static void gen6_ppgtt_insert_entries(struct i915_hw_ppgtt *ppgtt,
+ struct sg_table *pages,
+ unsigned first_entry,
+ enum i915_cache_level cache_level)
{
+ gtt_pte_t *pt_vaddr;
+ unsigned act_pd = first_entry / I915_PPGTT_PT_ENTRIES;
+ unsigned first_pte = first_entry % I915_PPGTT_PT_ENTRIES;
+ unsigned i, j, m, segment_len;
+ dma_addr_t page_addr;
+ struct scatterlist *sg;
+
+ /* init sg walking */
+ sg = pages->sgl;
+ i = 0;
+ segment_len = sg_dma_len(sg) >> PAGE_SHIFT;
+ m = 0;
+
+ while (i < pages->nents) {
+ pt_vaddr = kmap_atomic(ppgtt->pt_pages[act_pd]);
+
+ for (j = first_pte; j < I915_PPGTT_PT_ENTRIES; j++) {
+ page_addr = sg_dma_address(sg) + (m << PAGE_SHIFT);
+ pt_vaddr[j] = gen6_pte_encode(ppgtt->dev, page_addr,
+ cache_level);
+
+ /* grab the next page */
+ if (++m == segment_len) {
+ if (++i == pages->nents)
+ break;
+
+ sg = sg_next(sg);
+ segment_len = sg_dma_len(sg) >> PAGE_SHIFT;
+ m = 0;
+ }
+ }
+
+ kunmap_atomic(pt_vaddr);
+
+ first_pte = 0;
+ act_pd++;
+ }
+}
+
+static void gen6_ppgtt_cleanup(struct i915_hw_ppgtt *ppgtt)
+{
+ int i;
+
+ if (ppgtt->pt_dma_addr) {
+ for (i = 0; i < ppgtt->num_pd_entries; i++)
+ pci_unmap_page(ppgtt->dev->pdev,
+ ppgtt->pt_dma_addr[i],
+ 4096, PCI_DMA_BIDIRECTIONAL);
+ }
+
+ kfree(ppgtt->pt_dma_addr);
+ for (i = 0; i < ppgtt->num_pd_entries; i++)
+ __free_page(ppgtt->pt_pages[i]);
+ kfree(ppgtt->pt_pages);
+ kfree(ppgtt);
+}
+
+static int gen6_ppgtt_init(struct i915_hw_ppgtt *ppgtt)
+{
+ struct drm_device *dev = ppgtt->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
- struct i915_hw_ppgtt *ppgtt;
unsigned first_pd_entry_in_global_pt;
int i;
int ret = -ENOMEM;
@@ -119,18 +182,17 @@ int i915_gem_init_aliasing_ppgtt(struct drm_device *dev)
/* ppgtt PDEs reside in the global gtt pagetable, which has 512*1024
* entries. For aliasing ppgtt support we just steal them at the end for
* now. */
- first_pd_entry_in_global_pt = dev_priv->mm.gtt->gtt_total_entries - I915_PPGTT_PD_ENTRIES;
-
- ppgtt = kzalloc(sizeof(*ppgtt), GFP_KERNEL);
- if (!ppgtt)
- return ret;
+ first_pd_entry_in_global_pt =
+ gtt_total_entries(dev_priv->gtt) - I915_PPGTT_PD_ENTRIES;
- ppgtt->dev = dev;
ppgtt->num_pd_entries = I915_PPGTT_PD_ENTRIES;
+ ppgtt->clear_range = gen6_ppgtt_clear_range;
+ ppgtt->insert_entries = gen6_ppgtt_insert_entries;
+ ppgtt->cleanup = gen6_ppgtt_cleanup;
ppgtt->pt_pages = kzalloc(sizeof(struct page *)*ppgtt->num_pd_entries,
GFP_KERNEL);
if (!ppgtt->pt_pages)
- goto err_ppgtt;
+ return -ENOMEM;
for (i = 0; i < ppgtt->num_pd_entries; i++) {
ppgtt->pt_pages[i] = alloc_page(GFP_KERNEL);
@@ -138,39 +200,32 @@ int i915_gem_init_aliasing_ppgtt(struct drm_device *dev)
goto err_pt_alloc;
}
- if (dev_priv->mm.gtt->needs_dmar) {
- ppgtt->pt_dma_addr = kzalloc(sizeof(dma_addr_t)
- *ppgtt->num_pd_entries,
- GFP_KERNEL);
- if (!ppgtt->pt_dma_addr)
- goto err_pt_alloc;
+ ppgtt->pt_dma_addr = kzalloc(sizeof(dma_addr_t) *ppgtt->num_pd_entries,
+ GFP_KERNEL);
+ if (!ppgtt->pt_dma_addr)
+ goto err_pt_alloc;
- for (i = 0; i < ppgtt->num_pd_entries; i++) {
- dma_addr_t pt_addr;
+ for (i = 0; i < ppgtt->num_pd_entries; i++) {
+ dma_addr_t pt_addr;
- pt_addr = pci_map_page(dev->pdev, ppgtt->pt_pages[i],
- 0, 4096,
- PCI_DMA_BIDIRECTIONAL);
+ pt_addr = pci_map_page(dev->pdev, ppgtt->pt_pages[i], 0, 4096,
+ PCI_DMA_BIDIRECTIONAL);
- if (pci_dma_mapping_error(dev->pdev,
- pt_addr)) {
- ret = -EIO;
- goto err_pd_pin;
+ if (pci_dma_mapping_error(dev->pdev, pt_addr)) {
+ ret = -EIO;
+ goto err_pd_pin;
- }
- ppgtt->pt_dma_addr[i] = pt_addr;
}
+ ppgtt->pt_dma_addr[i] = pt_addr;
}
- ppgtt->scratch_page_dma_addr = dev_priv->mm.gtt->scratch_page_dma;
+ ppgtt->scratch_page_dma_addr = dev_priv->gtt.scratch_page_dma;
- i915_ppgtt_clear_range(ppgtt, 0,
- ppgtt->num_pd_entries*I915_PPGTT_PT_ENTRIES);
+ ppgtt->clear_range(ppgtt, 0,
+ ppgtt->num_pd_entries*I915_PPGTT_PT_ENTRIES);
ppgtt->pd_offset = (first_pd_entry_in_global_pt)*sizeof(gtt_pte_t);
- dev_priv->mm.aliasing_ppgtt = ppgtt;
-
return 0;
err_pd_pin:
@@ -186,94 +241,57 @@ err_pt_alloc:
__free_page(ppgtt->pt_pages[i]);
}
kfree(ppgtt->pt_pages);
-err_ppgtt:
- kfree(ppgtt);
return ret;
}
-void i915_gem_cleanup_aliasing_ppgtt(struct drm_device *dev)
+static int i915_gem_init_aliasing_ppgtt(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
- struct i915_hw_ppgtt *ppgtt = dev_priv->mm.aliasing_ppgtt;
- int i;
+ struct i915_hw_ppgtt *ppgtt;
+ int ret;
+ ppgtt = kzalloc(sizeof(*ppgtt), GFP_KERNEL);
if (!ppgtt)
- return;
+ return -ENOMEM;
- if (ppgtt->pt_dma_addr) {
- for (i = 0; i < ppgtt->num_pd_entries; i++)
- pci_unmap_page(dev->pdev, ppgtt->pt_dma_addr[i],
- 4096, PCI_DMA_BIDIRECTIONAL);
- }
+ ppgtt->dev = dev;
- kfree(ppgtt->pt_dma_addr);
- for (i = 0; i < ppgtt->num_pd_entries; i++)
- __free_page(ppgtt->pt_pages[i]);
- kfree(ppgtt->pt_pages);
- kfree(ppgtt);
+ ret = gen6_ppgtt_init(ppgtt);
+ if (ret)
+ kfree(ppgtt);
+ else
+ dev_priv->mm.aliasing_ppgtt = ppgtt;
+
+ return ret;
}
-static void i915_ppgtt_insert_sg_entries(struct i915_hw_ppgtt *ppgtt,
- const struct sg_table *pages,
- unsigned first_entry,
- enum i915_cache_level cache_level)
+void i915_gem_cleanup_aliasing_ppgtt(struct drm_device *dev)
{
- gtt_pte_t *pt_vaddr;
- unsigned act_pd = first_entry / I915_PPGTT_PT_ENTRIES;
- unsigned first_pte = first_entry % I915_PPGTT_PT_ENTRIES;
- unsigned i, j, m, segment_len;
- dma_addr_t page_addr;
- struct scatterlist *sg;
-
- /* init sg walking */
- sg = pages->sgl;
- i = 0;
- segment_len = sg_dma_len(sg) >> PAGE_SHIFT;
- m = 0;
-
- while (i < pages->nents) {
- pt_vaddr = kmap_atomic(ppgtt->pt_pages[act_pd]);
-
- for (j = first_pte; j < I915_PPGTT_PT_ENTRIES; j++) {
- page_addr = sg_dma_address(sg) + (m << PAGE_SHIFT);
- pt_vaddr[j] = pte_encode(ppgtt->dev, page_addr,
- cache_level);
-
- /* grab the next page */
- if (++m == segment_len) {
- if (++i == pages->nents)
- break;
-
- sg = sg_next(sg);
- segment_len = sg_dma_len(sg) >> PAGE_SHIFT;
- m = 0;
- }
- }
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct i915_hw_ppgtt *ppgtt = dev_priv->mm.aliasing_ppgtt;
- kunmap_atomic(pt_vaddr);
+ if (!ppgtt)
+ return;
- first_pte = 0;
- act_pd++;
- }
+ ppgtt->cleanup(ppgtt);
}
void i915_ppgtt_bind_object(struct i915_hw_ppgtt *ppgtt,
struct drm_i915_gem_object *obj,
enum i915_cache_level cache_level)
{
- i915_ppgtt_insert_sg_entries(ppgtt,
- obj->pages,
- obj->gtt_space->start >> PAGE_SHIFT,
- cache_level);
+ ppgtt->insert_entries(ppgtt, obj->pages,
+ obj->gtt_space->start >> PAGE_SHIFT,
+ cache_level);
}
void i915_ppgtt_unbind_object(struct i915_hw_ppgtt *ppgtt,
struct drm_i915_gem_object *obj)
{
- i915_ppgtt_clear_range(ppgtt,
- obj->gtt_space->start >> PAGE_SHIFT,
- obj->base.size >> PAGE_SHIFT);
+ ppgtt->clear_range(ppgtt,
+ obj->gtt_space->start >> PAGE_SHIFT,
+ obj->base.size >> PAGE_SHIFT);
}
void i915_gem_init_ppgtt(struct drm_device *dev)
@@ -282,7 +300,7 @@ void i915_gem_init_ppgtt(struct drm_device *dev)
uint32_t pd_offset;
struct intel_ring_buffer *ring;
struct i915_hw_ppgtt *ppgtt = dev_priv->mm.aliasing_ppgtt;
- uint32_t __iomem *pd_addr;
+ gtt_pte_t __iomem *pd_addr;
uint32_t pd_entry;
int i;
@@ -290,15 +308,11 @@ void i915_gem_init_ppgtt(struct drm_device *dev)
return;
- pd_addr = dev_priv->mm.gtt->gtt + ppgtt->pd_offset/sizeof(uint32_t);
+ pd_addr = (gtt_pte_t __iomem*)dev_priv->gtt.gsm + ppgtt->pd_offset/sizeof(gtt_pte_t);
for (i = 0; i < ppgtt->num_pd_entries; i++) {
dma_addr_t pt_addr;
- if (dev_priv->mm.gtt->needs_dmar)
- pt_addr = ppgtt->pt_dma_addr[i];
- else
- pt_addr = page_to_phys(ppgtt->pt_pages[i]);
-
+ pt_addr = ppgtt->pt_dma_addr[i];
pd_entry = GEN6_PDE_ADDR_ENCODE(pt_addr);
pd_entry |= GEN6_PDE_VALID;
@@ -338,11 +352,27 @@ void i915_gem_init_ppgtt(struct drm_device *dev)
}
}
+extern int intel_iommu_gfx_mapped;
+/* Certain Gen5 chipsets require require idling the GPU before
+ * unmapping anything from the GTT when VT-d is enabled.
+ */
+static inline bool needs_idle_maps(struct drm_device *dev)
+{
+#ifdef CONFIG_INTEL_IOMMU
+ /* Query intel_iommu to see if we need the workaround. Presumably that
+ * was loaded first.
+ */
+ if (IS_GEN5(dev) && IS_MOBILE(dev) && intel_iommu_gfx_mapped)
+ return true;
+#endif
+ return false;
+}
+
static bool do_idling(struct drm_i915_private *dev_priv)
{
bool ret = dev_priv->mm.interruptible;
- if (unlikely(dev_priv->mm.gtt->do_idle_maps)) {
+ if (unlikely(dev_priv->gtt.do_idle_maps)) {
dev_priv->mm.interruptible = false;
if (i915_gpu_idle(dev_priv->dev)) {
DRM_ERROR("Couldn't idle GPU\n");
@@ -356,45 +386,18 @@ static bool do_idling(struct drm_i915_private *dev_priv)
static void undo_idling(struct drm_i915_private *dev_priv, bool interruptible)
{
- if (unlikely(dev_priv->mm.gtt->do_idle_maps))
+ if (unlikely(dev_priv->gtt.do_idle_maps))
dev_priv->mm.interruptible = interruptible;
}
-
-static void i915_ggtt_clear_range(struct drm_device *dev,
- unsigned first_entry,
- unsigned num_entries)
-{
- struct drm_i915_private *dev_priv = dev->dev_private;
- gtt_pte_t scratch_pte;
- gtt_pte_t __iomem *gtt_base = dev_priv->mm.gtt->gtt + first_entry;
- const int max_entries = dev_priv->mm.gtt->gtt_total_entries - first_entry;
- int i;
-
- if (INTEL_INFO(dev)->gen < 6) {
- intel_gtt_clear_range(first_entry, num_entries);
- return;
- }
-
- if (WARN(num_entries > max_entries,
- "First entry = %d; Num entries = %d (max=%d)\n",
- first_entry, num_entries, max_entries))
- num_entries = max_entries;
-
- scratch_pte = pte_encode(dev, dev_priv->mm.gtt->scratch_page_dma, I915_CACHE_LLC);
- for (i = 0; i < num_entries; i++)
- iowrite32(scratch_pte, &gtt_base[i]);
- readl(gtt_base);
-}
-
void i915_gem_restore_gtt_mappings(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
struct drm_i915_gem_object *obj;
/* First fill our portion of the GTT with scratch pages */
- i915_ggtt_clear_range(dev, dev_priv->mm.gtt_start / PAGE_SIZE,
- (dev_priv->mm.gtt_end - dev_priv->mm.gtt_start) / PAGE_SIZE);
+ dev_priv->gtt.gtt_clear_range(dev, dev_priv->gtt.start / PAGE_SIZE,
+ dev_priv->gtt.total / PAGE_SIZE);
list_for_each_entry(obj, &dev_priv->mm.bound_list, gtt_list) {
i915_gem_clflush_object(obj);
@@ -423,16 +426,15 @@ int i915_gem_gtt_prepare_object(struct drm_i915_gem_object *obj)
* within the global GTT as well as accessible by the GPU through the GMADR
* mapped BAR (dev_priv->mm.gtt->gtt).
*/
-static void gen6_ggtt_bind_object(struct drm_i915_gem_object *obj,
- enum i915_cache_level level)
+static void gen6_ggtt_insert_entries(struct drm_device *dev,
+ struct sg_table *st,
+ unsigned int first_entry,
+ enum i915_cache_level level)
{
- struct drm_device *dev = obj->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
- struct sg_table *st = obj->pages;
struct scatterlist *sg = st->sgl;
- const int first_entry = obj->gtt_space->start >> PAGE_SHIFT;
- const int max_entries = dev_priv->mm.gtt->gtt_total_entries - first_entry;
- gtt_pte_t __iomem *gtt_entries = dev_priv->mm.gtt->gtt + first_entry;
+ gtt_pte_t __iomem *gtt_entries =
+ (gtt_pte_t __iomem *)dev_priv->gtt.gsm + first_entry;
int unused, i = 0;
unsigned int len, m = 0;
dma_addr_t addr;
@@ -441,14 +443,12 @@ static void gen6_ggtt_bind_object(struct drm_i915_gem_object *obj,
len = sg_dma_len(sg) >> PAGE_SHIFT;
for (m = 0; m < len; m++) {
addr = sg_dma_address(sg) + (m << PAGE_SHIFT);
- iowrite32(pte_encode(dev, addr, level), &gtt_entries[i]);
+ iowrite32(gen6_pte_encode(dev, addr, level),
+ &gtt_entries[i]);
i++;
}
}
- BUG_ON(i > max_entries);
- BUG_ON(i != obj->base.size / PAGE_SIZE);
-
/* XXX: This serves as a posting read to make sure that the PTE has
* actually been updated. There is some concern that even though
* registers and PTEs are within the same BAR that they are potentially
@@ -456,7 +456,8 @@ static void gen6_ggtt_bind_object(struct drm_i915_gem_object *obj,
* hardware should work, we must keep this posting read for paranoia.
*/
if (i != 0)
- WARN_ON(readl(&gtt_entries[i-1]) != pte_encode(dev, addr, level));
+ WARN_ON(readl(&gtt_entries[i-1])
+ != gen6_pte_encode(dev, addr, level));
/* This next bit makes the above posting read even more important. We
* want to flush the TLBs only after we're certain all the PTE updates
@@ -466,28 +467,70 @@ static void gen6_ggtt_bind_object(struct drm_i915_gem_object *obj,
POSTING_READ(GFX_FLSH_CNTL_GEN6);
}
+static void gen6_ggtt_clear_range(struct drm_device *dev,
+ unsigned int first_entry,
+ unsigned int num_entries)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ gtt_pte_t scratch_pte;
+ gtt_pte_t __iomem *gtt_base = (gtt_pte_t __iomem *) dev_priv->gtt.gsm + first_entry;
+ const int max_entries = gtt_total_entries(dev_priv->gtt) - first_entry;
+ int i;
+
+ if (WARN(num_entries > max_entries,
+ "First entry = %d; Num entries = %d (max=%d)\n",
+ first_entry, num_entries, max_entries))
+ num_entries = max_entries;
+
+ scratch_pte = gen6_pte_encode(dev, dev_priv->gtt.scratch_page_dma,
+ I915_CACHE_LLC);
+ for (i = 0; i < num_entries; i++)
+ iowrite32(scratch_pte, &gtt_base[i]);
+ readl(gtt_base);
+}
+
+
+static void i915_ggtt_insert_entries(struct drm_device *dev,
+ struct sg_table *st,
+ unsigned int pg_start,
+ enum i915_cache_level cache_level)
+{
+ unsigned int flags = (cache_level == I915_CACHE_NONE) ?
+ AGP_USER_MEMORY : AGP_USER_CACHED_MEMORY;
+
+ intel_gtt_insert_sg_entries(st, pg_start, flags);
+
+}
+
+static void i915_ggtt_clear_range(struct drm_device *dev,
+ unsigned int first_entry,
+ unsigned int num_entries)
+{
+ intel_gtt_clear_range(first_entry, num_entries);
+}
+
+
void i915_gem_gtt_bind_object(struct drm_i915_gem_object *obj,
enum i915_cache_level cache_level)
{
struct drm_device *dev = obj->base.dev;
- if (INTEL_INFO(dev)->gen < 6) {
- unsigned int flags = (cache_level == I915_CACHE_NONE) ?
- AGP_USER_MEMORY : AGP_USER_CACHED_MEMORY;
- intel_gtt_insert_sg_entries(obj->pages,
- obj->gtt_space->start >> PAGE_SHIFT,
- flags);
- } else {
- gen6_ggtt_bind_object(obj, cache_level);
- }
+ struct drm_i915_private *dev_priv = dev->dev_private;
+
+ dev_priv->gtt.gtt_insert_entries(dev, obj->pages,
+ obj->gtt_space->start >> PAGE_SHIFT,
+ cache_level);
obj->has_global_gtt_mapping = 1;
}
void i915_gem_gtt_unbind_object(struct drm_i915_gem_object *obj)
{
- i915_ggtt_clear_range(obj->base.dev,
- obj->gtt_space->start >> PAGE_SHIFT,
- obj->base.size >> PAGE_SHIFT);
+ struct drm_device *dev = obj->base.dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+
+ dev_priv->gtt.gtt_clear_range(obj->base.dev,
+ obj->gtt_space->start >> PAGE_SHIFT,
+ obj->base.size >> PAGE_SHIFT);
obj->has_global_gtt_mapping = 0;
}
@@ -524,27 +567,101 @@ static void i915_gtt_color_adjust(struct drm_mm_node *node,
*end -= 4096;
}
}
-
-void i915_gem_init_global_gtt(struct drm_device *dev,
- unsigned long start,
- unsigned long mappable_end,
- unsigned long end)
+void i915_gem_setup_global_gtt(struct drm_device *dev,
+ unsigned long start,
+ unsigned long mappable_end,
+ unsigned long end)
{
+ /* Let GEM Manage all of the aperture.
+ *
+ * However, leave one page at the end still bound to the scratch page.
+ * There are a number of places where the hardware apparently prefetches
+ * past the end of the object, and we've seen multiple hangs with the
+ * GPU head pointer stuck in a batchbuffer bound at the last page of the
+ * aperture. One page should be enough to keep any prefetching inside
+ * of the aperture.
+ */
drm_i915_private_t *dev_priv = dev->dev_private;
+ struct drm_mm_node *entry;
+ struct drm_i915_gem_object *obj;
+ unsigned long hole_start, hole_end;
- /* Substract the guard page ... */
+ BUG_ON(mappable_end > end);
+
+ /* Subtract the guard page ... */
drm_mm_init(&dev_priv->mm.gtt_space, start, end - start - PAGE_SIZE);
if (!HAS_LLC(dev))
dev_priv->mm.gtt_space.color_adjust = i915_gtt_color_adjust;
- dev_priv->mm.gtt_start = start;
- dev_priv->mm.gtt_mappable_end = mappable_end;
- dev_priv->mm.gtt_end = end;
- dev_priv->mm.gtt_total = end - start;
- dev_priv->mm.mappable_gtt_total = min(end, mappable_end) - start;
+ /* Mark any preallocated objects as occupied */
+ list_for_each_entry(obj, &dev_priv->mm.bound_list, gtt_list) {
+ DRM_DEBUG_KMS("reserving preallocated space: %x + %zx\n",
+ obj->gtt_offset, obj->base.size);
+
+ BUG_ON(obj->gtt_space != I915_GTT_RESERVED);
+ obj->gtt_space = drm_mm_create_block(&dev_priv->mm.gtt_space,
+ obj->gtt_offset,
+ obj->base.size,
+ false);
+ obj->has_global_gtt_mapping = 1;
+ }
+
+ dev_priv->gtt.start = start;
+ dev_priv->gtt.total = end - start;
+
+ /* Clear any non-preallocated blocks */
+ drm_mm_for_each_hole(entry, &dev_priv->mm.gtt_space,
+ hole_start, hole_end) {
+ DRM_DEBUG_KMS("clearing unused GTT space: [%lx, %lx]\n",
+ hole_start, hole_end);
+ dev_priv->gtt.gtt_clear_range(dev, hole_start / PAGE_SIZE,
+ (hole_end-hole_start) / PAGE_SIZE);
+ }
- /* ... but ensure that we clear the entire range. */
- i915_ggtt_clear_range(dev, start / PAGE_SIZE, (end-start) / PAGE_SIZE);
+ /* And finally clear the reserved guard page */
+ dev_priv->gtt.gtt_clear_range(dev, end / PAGE_SIZE - 1, 1);
+}
+
+static bool
+intel_enable_ppgtt(struct drm_device *dev)
+{
+ if (i915_enable_ppgtt >= 0)
+ return i915_enable_ppgtt;
+
+#ifdef CONFIG_INTEL_IOMMU
+ /* Disable ppgtt on SNB if VT-d is on. */
+ if (INTEL_INFO(dev)->gen == 6 && intel_iommu_gfx_mapped)
+ return false;
+#endif
+
+ return true;
+}
+
+void i915_gem_init_global_gtt(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ unsigned long gtt_size, mappable_size;
+
+ gtt_size = dev_priv->gtt.total;
+ mappable_size = dev_priv->gtt.mappable_end;
+
+ if (intel_enable_ppgtt(dev) && HAS_ALIASING_PPGTT(dev)) {
+ int ret;
+ /* PPGTT pdes are stolen from global gtt ptes, so shrink the
+ * aperture accordingly when using aliasing ppgtt. */
+ gtt_size -= I915_PPGTT_PD_ENTRIES*PAGE_SIZE;
+
+ i915_gem_setup_global_gtt(dev, 0, mappable_size, gtt_size);
+
+ ret = i915_gem_init_aliasing_ppgtt(dev);
+ if (!ret)
+ return;
+
+ DRM_ERROR("Aliased PPGTT setup failed %d\n", ret);
+ drm_mm_takedown(&dev_priv->mm.gtt_space);
+ gtt_size += I915_PPGTT_PD_ENTRIES*PAGE_SIZE;
+ }
+ i915_gem_setup_global_gtt(dev, 0, mappable_size, gtt_size);
}
static int setup_scratch_page(struct drm_device *dev)
@@ -567,8 +684,8 @@ static int setup_scratch_page(struct drm_device *dev)
#else
dma_addr = page_to_phys(page);
#endif
- dev_priv->mm.gtt->scratch_page = page;
- dev_priv->mm.gtt->scratch_page_dma = dma_addr;
+ dev_priv->gtt.scratch_page = page;
+ dev_priv->gtt.scratch_page_dma = dma_addr;
return 0;
}
@@ -576,11 +693,11 @@ static int setup_scratch_page(struct drm_device *dev)
static void teardown_scratch_page(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
- set_pages_wb(dev_priv->mm.gtt->scratch_page, 1);
- pci_unmap_page(dev->pdev, dev_priv->mm.gtt->scratch_page_dma,
+ set_pages_wb(dev_priv->gtt.scratch_page, 1);
+ pci_unmap_page(dev->pdev, dev_priv->gtt.scratch_page_dma,
PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
- put_page(dev_priv->mm.gtt->scratch_page);
- __free_page(dev_priv->mm.gtt->scratch_page);
+ put_page(dev_priv->gtt.scratch_page);
+ __free_page(dev_priv->gtt.scratch_page);
}
static inline unsigned int gen6_get_total_gtt_size(u16 snb_gmch_ctl)
@@ -590,14 +707,14 @@ static inline unsigned int gen6_get_total_gtt_size(u16 snb_gmch_ctl)
return snb_gmch_ctl << 20;
}
-static inline unsigned int gen6_get_stolen_size(u16 snb_gmch_ctl)
+static inline size_t gen6_get_stolen_size(u16 snb_gmch_ctl)
{
snb_gmch_ctl >>= SNB_GMCH_GMS_SHIFT;
snb_gmch_ctl &= SNB_GMCH_GMS_MASK;
return snb_gmch_ctl << 25; /* 32 MB units */
}
-static inline unsigned int gen7_get_stolen_size(u16 snb_gmch_ctl)
+static inline size_t gen7_get_stolen_size(u16 snb_gmch_ctl)
{
static const int stolen_decoder[] = {
0, 0, 0, 0, 0, 32, 48, 64, 128, 256, 96, 160, 224, 352};
@@ -606,103 +723,127 @@ static inline unsigned int gen7_get_stolen_size(u16 snb_gmch_ctl)
return stolen_decoder[snb_gmch_ctl] << 20;
}
-int i915_gem_gtt_init(struct drm_device *dev)
+static int gen6_gmch_probe(struct drm_device *dev,
+ size_t *gtt_total,
+ size_t *stolen,
+ phys_addr_t *mappable_base,
+ unsigned long *mappable_end)
{
struct drm_i915_private *dev_priv = dev->dev_private;
phys_addr_t gtt_bus_addr;
+ unsigned int gtt_size;
u16 snb_gmch_ctl;
int ret;
- /* On modern platforms we need not worry ourself with the legacy
- * hostbridge query stuff. Skip it entirely
- */
- if (INTEL_INFO(dev)->gen < 6) {
- ret = intel_gmch_probe(dev_priv->bridge_dev, dev->pdev, NULL);
- if (!ret) {
- DRM_ERROR("failed to set up gmch\n");
- return -EIO;
- }
+ *mappable_base = pci_resource_start(dev->pdev, 2);
+ *mappable_end = pci_resource_len(dev->pdev, 2);
- dev_priv->mm.gtt = intel_gtt_get();
- if (!dev_priv->mm.gtt) {
- DRM_ERROR("Failed to initialize GTT\n");
- intel_gmch_remove();
- return -ENODEV;
- }
- return 0;
+ /* 64/512MB is the current min/max we actually know of, but this is just
+ * a coarse sanity check.
+ */
+ if ((*mappable_end < (64<<20) || (*mappable_end > (512<<20)))) {
+ DRM_ERROR("Unknown GMADR size (%lx)\n",
+ dev_priv->gtt.mappable_end);
+ return -ENXIO;
}
- dev_priv->mm.gtt = kzalloc(sizeof(*dev_priv->mm.gtt), GFP_KERNEL);
- if (!dev_priv->mm.gtt)
- return -ENOMEM;
-
if (!pci_set_dma_mask(dev->pdev, DMA_BIT_MASK(40)))
pci_set_consistent_dma_mask(dev->pdev, DMA_BIT_MASK(40));
+ pci_read_config_word(dev->pdev, SNB_GMCH_CTRL, &snb_gmch_ctl);
+ gtt_size = gen6_get_total_gtt_size(snb_gmch_ctl);
-#ifdef CONFIG_INTEL_IOMMU
- dev_priv->mm.gtt->needs_dmar = 1;
-#endif
+ if (IS_GEN7(dev))
+ *stolen = gen7_get_stolen_size(snb_gmch_ctl);
+ else
+ *stolen = gen6_get_stolen_size(snb_gmch_ctl);
+
+ *gtt_total = (gtt_size / sizeof(gtt_pte_t)) << PAGE_SHIFT;
/* For GEN6+ the PTEs for the ggtt live at 2MB + BAR0 */
gtt_bus_addr = pci_resource_start(dev->pdev, 0) + (2<<20);
- dev_priv->mm.gtt->gma_bus_addr = pci_resource_start(dev->pdev, 2);
-
- /* i9xx_setup */
- pci_read_config_word(dev->pdev, SNB_GMCH_CTRL, &snb_gmch_ctl);
- dev_priv->mm.gtt->gtt_total_entries =
- gen6_get_total_gtt_size(snb_gmch_ctl) / sizeof(gtt_pte_t);
- if (INTEL_INFO(dev)->gen < 7)
- dev_priv->mm.gtt->stolen_size = gen6_get_stolen_size(snb_gmch_ctl);
- else
- dev_priv->mm.gtt->stolen_size = gen7_get_stolen_size(snb_gmch_ctl);
-
- dev_priv->mm.gtt->gtt_mappable_entries = pci_resource_len(dev->pdev, 2) >> PAGE_SHIFT;
- /* 64/512MB is the current min/max we actually know of, but this is just a
- * coarse sanity check.
- */
- if ((dev_priv->mm.gtt->gtt_mappable_entries >> 8) < 64 ||
- dev_priv->mm.gtt->gtt_mappable_entries > dev_priv->mm.gtt->gtt_total_entries) {
- DRM_ERROR("Unknown GMADR entries (%d)\n",
- dev_priv->mm.gtt->gtt_mappable_entries);
- ret = -ENXIO;
- goto err_out;
+ dev_priv->gtt.gsm = ioremap_wc(gtt_bus_addr, gtt_size);
+ if (!dev_priv->gtt.gsm) {
+ DRM_ERROR("Failed to map the gtt page table\n");
+ return -ENOMEM;
}
ret = setup_scratch_page(dev);
- if (ret) {
+ if (ret)
DRM_ERROR("Scratch setup failed\n");
- goto err_out;
- }
- dev_priv->mm.gtt->gtt = ioremap_wc(gtt_bus_addr,
- dev_priv->mm.gtt->gtt_total_entries * sizeof(gtt_pte_t));
- if (!dev_priv->mm.gtt->gtt) {
- DRM_ERROR("Failed to map the gtt page table\n");
- teardown_scratch_page(dev);
- ret = -ENOMEM;
- goto err_out;
+ dev_priv->gtt.gtt_clear_range = gen6_ggtt_clear_range;
+ dev_priv->gtt.gtt_insert_entries = gen6_ggtt_insert_entries;
+
+ return ret;
+}
+
+static void gen6_gmch_remove(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ iounmap(dev_priv->gtt.gsm);
+ teardown_scratch_page(dev_priv->dev);
+}
+
+static int i915_gmch_probe(struct drm_device *dev,
+ size_t *gtt_total,
+ size_t *stolen,
+ phys_addr_t *mappable_base,
+ unsigned long *mappable_end)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ int ret;
+
+ ret = intel_gmch_probe(dev_priv->bridge_dev, dev_priv->dev->pdev, NULL);
+ if (!ret) {
+ DRM_ERROR("failed to set up gmch\n");
+ return -EIO;
}
- /* GMADR is the PCI aperture used by SW to access tiled GFX surfaces in a linear fashion. */
- DRM_INFO("Memory usable by graphics device = %dM\n", dev_priv->mm.gtt->gtt_total_entries >> 8);
- DRM_DEBUG_DRIVER("GMADR size = %dM\n", dev_priv->mm.gtt->gtt_mappable_entries >> 8);
- DRM_DEBUG_DRIVER("GTT stolen size = %dM\n", dev_priv->mm.gtt->stolen_size >> 20);
+ intel_gtt_get(gtt_total, stolen, mappable_base, mappable_end);
+
+ dev_priv->gtt.do_idle_maps = needs_idle_maps(dev_priv->dev);
+ dev_priv->gtt.gtt_clear_range = i915_ggtt_clear_range;
+ dev_priv->gtt.gtt_insert_entries = i915_ggtt_insert_entries;
return 0;
+}
-err_out:
- kfree(dev_priv->mm.gtt);
- if (INTEL_INFO(dev)->gen < 6)
- intel_gmch_remove();
- return ret;
+static void i915_gmch_remove(struct drm_device *dev)
+{
+ intel_gmch_remove();
}
-void i915_gem_gtt_fini(struct drm_device *dev)
+int i915_gem_gtt_init(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
- iounmap(dev_priv->mm.gtt->gtt);
- teardown_scratch_page(dev);
- if (INTEL_INFO(dev)->gen < 6)
- intel_gmch_remove();
- kfree(dev_priv->mm.gtt);
+ struct i915_gtt *gtt = &dev_priv->gtt;
+ unsigned long gtt_size;
+ int ret;
+
+ if (INTEL_INFO(dev)->gen <= 5) {
+ dev_priv->gtt.gtt_probe = i915_gmch_probe;
+ dev_priv->gtt.gtt_remove = i915_gmch_remove;
+ } else {
+ dev_priv->gtt.gtt_probe = gen6_gmch_probe;
+ dev_priv->gtt.gtt_remove = gen6_gmch_remove;
+ }
+
+ ret = dev_priv->gtt.gtt_probe(dev, &dev_priv->gtt.total,
+ &dev_priv->gtt.stolen_size,
+ &gtt->mappable_base,
+ &gtt->mappable_end);
+ if (ret)
+ return ret;
+
+ gtt_size = (dev_priv->gtt.total >> PAGE_SHIFT) * sizeof(gtt_pte_t);
+
+ /* GMADR is the PCI mmio aperture into the global GTT. */
+ DRM_INFO("Memory usable by graphics device = %zdM\n",
+ dev_priv->gtt.total >> 20);
+ DRM_DEBUG_DRIVER("GMADR size = %ldM\n",
+ dev_priv->gtt.mappable_end >> 20);
+ DRM_DEBUG_DRIVER("GTT stolen size = %zdM\n",
+ dev_priv->gtt.stolen_size >> 20);
+
+ return 0;
}
diff --git a/drivers/gpu/drm/i915/i915_gem_stolen.c b/drivers/gpu/drm/i915/i915_gem_stolen.c
index 8e91083b126f..69d97cbac13c 100644
--- a/drivers/gpu/drm/i915/i915_gem_stolen.c
+++ b/drivers/gpu/drm/i915/i915_gem_stolen.c
@@ -42,85 +42,73 @@
* for is a boon.
*/
-#define PTE_ADDRESS_MASK 0xfffff000
-#define PTE_ADDRESS_MASK_HIGH 0x000000f0 /* i915+ */
-#define PTE_MAPPING_TYPE_UNCACHED (0 << 1)
-#define PTE_MAPPING_TYPE_DCACHE (1 << 1) /* i830 only */
-#define PTE_MAPPING_TYPE_CACHED (3 << 1)
-#define PTE_MAPPING_TYPE_MASK (3 << 1)
-#define PTE_VALID (1 << 0)
-
-/**
- * i915_stolen_to_phys - take an offset into stolen memory and turn it into
- * a physical one
- * @dev: drm device
- * @offset: address to translate
- *
- * Some chip functions require allocations from stolen space and need the
- * physical address of the memory in question.
- */
-static unsigned long i915_stolen_to_phys(struct drm_device *dev, u32 offset)
+static unsigned long i915_stolen_to_physical(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
struct pci_dev *pdev = dev_priv->bridge_dev;
u32 base;
-#if 0
/* On the machines I have tested the Graphics Base of Stolen Memory
- * is unreliable, so compute the base by subtracting the stolen memory
- * from the Top of Low Usable DRAM which is where the BIOS places
- * the graphics stolen memory.
+ * is unreliable, so on those compute the base by subtracting the
+ * stolen memory from the Top of Low Usable DRAM which is where the
+ * BIOS places the graphics stolen memory.
+ *
+ * On gen2, the layout is slightly different with the Graphics Segment
+ * immediately following Top of Memory (or Top of Usable DRAM). Note
+ * it appears that TOUD is only reported by 865g, so we just use the
+ * top of memory as determined by the e820 probe.
+ *
+ * XXX gen2 requires an unavailable symbol and 945gm fails with
+ * its value of TOLUD.
*/
- if (INTEL_INFO(dev)->gen > 3 || IS_G33(dev)) {
- /* top 32bits are reserved = 0 */
+ base = 0;
+ if (INTEL_INFO(dev)->gen >= 6) {
+ /* Read Base Data of Stolen Memory Register (BDSM) directly.
+ * Note that there is also a MCHBAR miror at 0x1080c0 or
+ * we could use device 2:0x5c instead.
+ */
+ pci_read_config_dword(pdev, 0xB0, &base);
+ base &= ~4095; /* lower bits used for locking register */
+ } else if (INTEL_INFO(dev)->gen > 3 || IS_G33(dev)) {
+ /* Read Graphics Base of Stolen Memory directly */
pci_read_config_dword(pdev, 0xA4, &base);
- } else {
- /* XXX presume 8xx is the same as i915 */
- pci_bus_read_config_dword(pdev->bus, 2, 0x5C, &base);
- }
-#else
- if (INTEL_INFO(dev)->gen > 3 || IS_G33(dev)) {
- u16 val;
- pci_read_config_word(pdev, 0xb0, &val);
- base = val >> 4 << 20;
- } else {
+#if 0
+ } else if (IS_GEN3(dev)) {
u8 val;
+ /* Stolen is immediately below Top of Low Usable DRAM */
pci_read_config_byte(pdev, 0x9c, &val);
base = val >> 3 << 27;
- }
- base -= dev_priv->mm.gtt->stolen_size;
+ base -= dev_priv->mm.gtt->stolen_size;
+ } else {
+ /* Stolen is immediately above Top of Memory */
+ base = max_low_pfn_mapped << PAGE_SHIFT;
#endif
+ }
- return base + offset;
+ return base;
}
-static void i915_warn_stolen(struct drm_device *dev)
-{
- DRM_INFO("not enough stolen space for compressed buffer, disabling\n");
- DRM_INFO("hint: you may be able to increase stolen memory size in the BIOS to avoid this\n");
-}
-
-static void i915_setup_compression(struct drm_device *dev, int size)
+static int i915_setup_compression(struct drm_device *dev, int size)
{
struct drm_i915_private *dev_priv = dev->dev_private;
struct drm_mm_node *compressed_fb, *uninitialized_var(compressed_llb);
- unsigned long cfb_base;
- unsigned long ll_base = 0;
-
- /* Just in case the BIOS is doing something questionable. */
- intel_disable_fbc(dev);
- compressed_fb = drm_mm_search_free(&dev_priv->mm.stolen, size, 4096, 0);
+ /* Try to over-allocate to reduce reallocations and fragmentation */
+ compressed_fb = drm_mm_search_free(&dev_priv->mm.stolen,
+ size <<= 1, 4096, 0);
+ if (!compressed_fb)
+ compressed_fb = drm_mm_search_free(&dev_priv->mm.stolen,
+ size >>= 1, 4096, 0);
if (compressed_fb)
compressed_fb = drm_mm_get_block(compressed_fb, size, 4096);
if (!compressed_fb)
goto err;
- cfb_base = i915_stolen_to_phys(dev, compressed_fb->start);
- if (!cfb_base)
- goto err_fb;
-
- if (!(IS_GM45(dev) || HAS_PCH_SPLIT(dev))) {
+ if (HAS_PCH_SPLIT(dev))
+ I915_WRITE(ILK_DPFC_CB_BASE, compressed_fb->start);
+ else if (IS_GM45(dev)) {
+ I915_WRITE(DPFC_CB_BASE, compressed_fb->start);
+ } else {
compressed_llb = drm_mm_search_free(&dev_priv->mm.stolen,
4096, 4096, 0);
if (compressed_llb)
@@ -129,73 +117,206 @@ static void i915_setup_compression(struct drm_device *dev, int size)
if (!compressed_llb)
goto err_fb;
- ll_base = i915_stolen_to_phys(dev, compressed_llb->start);
- if (!ll_base)
- goto err_llb;
+ dev_priv->compressed_llb = compressed_llb;
+
+ I915_WRITE(FBC_CFB_BASE,
+ dev_priv->mm.stolen_base + compressed_fb->start);
+ I915_WRITE(FBC_LL_BASE,
+ dev_priv->mm.stolen_base + compressed_llb->start);
}
+ dev_priv->compressed_fb = compressed_fb;
dev_priv->cfb_size = size;
- dev_priv->compressed_fb = compressed_fb;
- if (HAS_PCH_SPLIT(dev))
- I915_WRITE(ILK_DPFC_CB_BASE, compressed_fb->start);
- else if (IS_GM45(dev)) {
- I915_WRITE(DPFC_CB_BASE, compressed_fb->start);
- } else {
- I915_WRITE(FBC_CFB_BASE, cfb_base);
- I915_WRITE(FBC_LL_BASE, ll_base);
- dev_priv->compressed_llb = compressed_llb;
- }
+ DRM_DEBUG_KMS("reserved %d bytes of contiguous stolen space for FBC\n",
+ size);
- DRM_DEBUG_KMS("FBC base 0x%08lx, ll base 0x%08lx, size %dM\n",
- cfb_base, ll_base, size >> 20);
- return;
+ return 0;
-err_llb:
- drm_mm_put_block(compressed_llb);
err_fb:
drm_mm_put_block(compressed_fb);
err:
- dev_priv->no_fbc_reason = FBC_STOLEN_TOO_SMALL;
- i915_warn_stolen(dev);
+ return -ENOSPC;
+}
+
+int i915_gem_stolen_setup_compression(struct drm_device *dev, int size)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+
+ if (dev_priv->mm.stolen_base == 0)
+ return -ENODEV;
+
+ if (size < dev_priv->cfb_size)
+ return 0;
+
+ /* Release any current block */
+ i915_gem_stolen_cleanup_compression(dev);
+
+ return i915_setup_compression(dev, size);
}
-static void i915_cleanup_compression(struct drm_device *dev)
+void i915_gem_stolen_cleanup_compression(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
- drm_mm_put_block(dev_priv->compressed_fb);
+ if (dev_priv->cfb_size == 0)
+ return;
+
+ if (dev_priv->compressed_fb)
+ drm_mm_put_block(dev_priv->compressed_fb);
+
if (dev_priv->compressed_llb)
drm_mm_put_block(dev_priv->compressed_llb);
+
+ dev_priv->cfb_size = 0;
}
void i915_gem_cleanup_stolen(struct drm_device *dev)
{
- if (I915_HAS_FBC(dev) && i915_powersave)
- i915_cleanup_compression(dev);
+ struct drm_i915_private *dev_priv = dev->dev_private;
+
+ i915_gem_stolen_cleanup_compression(dev);
+ drm_mm_takedown(&dev_priv->mm.stolen);
}
int i915_gem_init_stolen(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
- unsigned long prealloc_size = dev_priv->mm.gtt->stolen_size;
+
+ dev_priv->mm.stolen_base = i915_stolen_to_physical(dev);
+ if (dev_priv->mm.stolen_base == 0)
+ return 0;
+
+ DRM_DEBUG_KMS("found %zd bytes of stolen memory at %08lx\n",
+ dev_priv->gtt.stolen_size, dev_priv->mm.stolen_base);
/* Basic memrange allocator for stolen space */
- drm_mm_init(&dev_priv->mm.stolen, 0, prealloc_size);
+ drm_mm_init(&dev_priv->mm.stolen, 0, dev_priv->gtt.stolen_size);
+
+ return 0;
+}
- /* Try to set up FBC with a reasonable compressed buffer size */
- if (I915_HAS_FBC(dev) && i915_powersave) {
- int cfb_size;
+static struct sg_table *
+i915_pages_create_for_stolen(struct drm_device *dev,
+ u32 offset, u32 size)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct sg_table *st;
+ struct scatterlist *sg;
+
+ DRM_DEBUG_DRIVER("offset=0x%x, size=%d\n", offset, size);
+ BUG_ON(offset > dev_priv->gtt.stolen_size - size);
- /* Leave 1M for line length buffer & misc. */
+ /* We hide that we have no struct page backing our stolen object
+ * by wrapping the contiguous physical allocation with a fake
+ * dma mapping in a single scatterlist.
+ */
+
+ st = kmalloc(sizeof(*st), GFP_KERNEL);
+ if (st == NULL)
+ return NULL;
- /* Try to get a 32M buffer... */
- if (prealloc_size > (36*1024*1024))
- cfb_size = 32*1024*1024;
- else /* fall back to 7/8 of the stolen space */
- cfb_size = prealloc_size * 7 / 8;
- i915_setup_compression(dev, cfb_size);
+ if (sg_alloc_table(st, 1, GFP_KERNEL)) {
+ kfree(st);
+ return NULL;
}
- return 0;
+ sg = st->sgl;
+ sg->offset = offset;
+ sg->length = size;
+
+ sg_dma_address(sg) = (dma_addr_t)dev_priv->mm.stolen_base + offset;
+ sg_dma_len(sg) = size;
+
+ return st;
+}
+
+static int i915_gem_object_get_pages_stolen(struct drm_i915_gem_object *obj)
+{
+ BUG();
+ return -EINVAL;
+}
+
+static void i915_gem_object_put_pages_stolen(struct drm_i915_gem_object *obj)
+{
+ /* Should only be called during free */
+ sg_free_table(obj->pages);
+ kfree(obj->pages);
+}
+
+static const struct drm_i915_gem_object_ops i915_gem_object_stolen_ops = {
+ .get_pages = i915_gem_object_get_pages_stolen,
+ .put_pages = i915_gem_object_put_pages_stolen,
+};
+
+static struct drm_i915_gem_object *
+_i915_gem_object_create_stolen(struct drm_device *dev,
+ struct drm_mm_node *stolen)
+{
+ struct drm_i915_gem_object *obj;
+
+ obj = i915_gem_object_alloc(dev);
+ if (obj == NULL)
+ return NULL;
+
+ if (drm_gem_private_object_init(dev, &obj->base, stolen->size))
+ goto cleanup;
+
+ i915_gem_object_init(obj, &i915_gem_object_stolen_ops);
+
+ obj->pages = i915_pages_create_for_stolen(dev,
+ stolen->start, stolen->size);
+ if (obj->pages == NULL)
+ goto cleanup;
+
+ obj->has_dma_mapping = true;
+ obj->pages_pin_count = 1;
+ obj->stolen = stolen;
+
+ obj->base.write_domain = I915_GEM_DOMAIN_GTT;
+ obj->base.read_domains = I915_GEM_DOMAIN_GTT;
+ obj->cache_level = I915_CACHE_NONE;
+
+ return obj;
+
+cleanup:
+ i915_gem_object_free(obj);
+ return NULL;
+}
+
+struct drm_i915_gem_object *
+i915_gem_object_create_stolen(struct drm_device *dev, u32 size)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_gem_object *obj;
+ struct drm_mm_node *stolen;
+
+ if (dev_priv->mm.stolen_base == 0)
+ return NULL;
+
+ DRM_DEBUG_KMS("creating stolen object: size=%x\n", size);
+ if (size == 0)
+ return NULL;
+
+ stolen = drm_mm_search_free(&dev_priv->mm.stolen, size, 4096, 0);
+ if (stolen)
+ stolen = drm_mm_get_block(stolen, size, 4096);
+ if (stolen == NULL)
+ return NULL;
+
+ obj = _i915_gem_object_create_stolen(dev, stolen);
+ if (obj)
+ return obj;
+
+ drm_mm_put_block(stolen);
+ return NULL;
+}
+
+void
+i915_gem_object_release_stolen(struct drm_i915_gem_object *obj)
+{
+ if (obj->stolen) {
+ drm_mm_put_block(obj->stolen);
+ obj->stolen = NULL;
+ }
}
diff --git a/drivers/gpu/drm/i915/i915_gem_tiling.c b/drivers/gpu/drm/i915/i915_gem_tiling.c
index cedbfd7b3dfa..abcba2f5a788 100644
--- a/drivers/gpu/drm/i915/i915_gem_tiling.c
+++ b/drivers/gpu/drm/i915/i915_gem_tiling.c
@@ -272,18 +272,7 @@ i915_gem_object_fence_ok(struct drm_i915_gem_object *obj, int tiling_mode)
return false;
}
- /*
- * Previous chips need to be aligned to the size of the smallest
- * fence register that can contain the object.
- */
- if (INTEL_INFO(obj->base.dev)->gen == 3)
- size = 1024*1024;
- else
- size = 512*1024;
-
- while (size < obj->base.size)
- size <<= 1;
-
+ size = i915_gem_get_gtt_size(obj->base.dev, obj->base.size, tiling_mode);
if (obj->gtt_space->size != size)
return false;
@@ -368,15 +357,15 @@ i915_gem_set_tiling(struct drm_device *dev, void *data,
obj->map_and_fenceable =
obj->gtt_space == NULL ||
- (obj->gtt_offset + obj->base.size <= dev_priv->mm.gtt_mappable_end &&
+ (obj->gtt_offset + obj->base.size <= dev_priv->gtt.mappable_end &&
i915_gem_object_fence_ok(obj, args->tiling_mode));
/* Rebind if we need a change of alignment */
if (!obj->map_and_fenceable) {
u32 unfenced_alignment =
- i915_gem_get_unfenced_gtt_alignment(dev,
- obj->base.size,
- args->tiling_mode);
+ i915_gem_get_gtt_alignment(dev, obj->base.size,
+ args->tiling_mode,
+ false);
if (obj->gtt_offset & (unfenced_alignment - 1))
ret = i915_gem_object_unbind(obj);
}
@@ -396,6 +385,18 @@ i915_gem_set_tiling(struct drm_device *dev, void *data,
/* we have to maintain this existing ABI... */
args->stride = obj->stride;
args->tiling_mode = obj->tiling_mode;
+
+ /* Try to preallocate memory required to save swizzling on put-pages */
+ if (i915_gem_object_needs_bit17_swizzle(obj)) {
+ if (obj->bit_17 == NULL) {
+ obj->bit_17 = kmalloc(BITS_TO_LONGS(obj->base.size >> PAGE_SHIFT) *
+ sizeof(long), GFP_KERNEL);
+ }
+ } else {
+ kfree(obj->bit_17);
+ obj->bit_17 = NULL;
+ }
+
drm_gem_object_unreference(&obj->base);
mutex_unlock(&dev->struct_mutex);
diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c
index fe843389c7b4..2cd97d1cc920 100644
--- a/drivers/gpu/drm/i915/i915_irq.c
+++ b/drivers/gpu/drm/i915/i915_irq.c
@@ -287,6 +287,10 @@ static void i915_hotplug_work_func(struct work_struct *work)
struct drm_mode_config *mode_config = &dev->mode_config;
struct intel_encoder *encoder;
+ /* HPD irq before everything is fully set up. */
+ if (!dev_priv->enable_hotplug_processing)
+ return;
+
mutex_lock(&mode_config->mutex);
DRM_DEBUG_KMS("running encoder hotplug functions\n");
@@ -300,9 +304,6 @@ static void i915_hotplug_work_func(struct work_struct *work)
drm_helper_hpd_irq_event(dev);
}
-/* defined intel_pm.c */
-extern spinlock_t mchdev_lock;
-
static void ironlake_handle_rps_change(struct drm_device *dev)
{
drm_i915_private_t *dev_priv = dev->dev_private;
@@ -355,8 +356,8 @@ static void notify_ring(struct drm_device *dev,
wake_up_all(&ring->irq_queue);
if (i915_enable_hangcheck) {
- dev_priv->hangcheck_count = 0;
- mod_timer(&dev_priv->hangcheck_timer,
+ dev_priv->gpu_error.hangcheck_count = 0;
+ mod_timer(&dev_priv->gpu_error.hangcheck_timer,
round_jiffies_up(jiffies + DRM_I915_HANGCHECK_JIFFIES));
}
}
@@ -524,6 +525,20 @@ static void gen6_queue_rps_work(struct drm_i915_private *dev_priv,
queue_work(dev_priv->wq, &dev_priv->rps.work);
}
+static void gmbus_irq_handler(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = (drm_i915_private_t *) dev->dev_private;
+
+ wake_up_all(&dev_priv->gmbus_wait_queue);
+}
+
+static void dp_aux_irq_handler(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = (drm_i915_private_t *) dev->dev_private;
+
+ wake_up_all(&dev_priv->gmbus_wait_queue);
+}
+
static irqreturn_t valleyview_irq_handler(int irq, void *arg)
{
struct drm_device *dev = (struct drm_device *) arg;
@@ -533,7 +548,6 @@ static irqreturn_t valleyview_irq_handler(int irq, void *arg)
unsigned long irqflags;
int pipe;
u32 pipe_stats[I915_MAX_PIPES];
- bool blc_event;
atomic_inc(&dev_priv->irq_received);
@@ -590,8 +604,8 @@ static irqreturn_t valleyview_irq_handler(int irq, void *arg)
I915_READ(PORT_HOTPLUG_STAT);
}
- if (pipe_stats[pipe] & PIPE_LEGACY_BLC_EVENT_STATUS)
- blc_event = true;
+ if (pipe_stats[0] & PIPE_GMBUS_INTERRUPT_STATUS)
+ gmbus_irq_handler(dev);
if (pm_iir & GEN6_PM_DEFERRED_EVENTS)
gen6_queue_rps_work(dev_priv, pm_iir);
@@ -618,8 +632,11 @@ static void ibx_irq_handler(struct drm_device *dev, u32 pch_iir)
(pch_iir & SDE_AUDIO_POWER_MASK) >>
SDE_AUDIO_POWER_SHIFT);
+ if (pch_iir & SDE_AUX_MASK)
+ dp_aux_irq_handler(dev);
+
if (pch_iir & SDE_GMBUS)
- DRM_DEBUG_DRIVER("PCH GMBUS interrupt\n");
+ gmbus_irq_handler(dev);
if (pch_iir & SDE_AUDIO_HDCP_MASK)
DRM_DEBUG_DRIVER("PCH HDCP audio interrupt\n");
@@ -662,10 +679,10 @@ static void cpt_irq_handler(struct drm_device *dev, u32 pch_iir)
SDE_AUDIO_POWER_SHIFT_CPT);
if (pch_iir & SDE_AUX_MASK_CPT)
- DRM_DEBUG_DRIVER("AUX channel interrupt\n");
+ dp_aux_irq_handler(dev);
if (pch_iir & SDE_GMBUS_CPT)
- DRM_DEBUG_DRIVER("PCH GMBUS interrupt\n");
+ gmbus_irq_handler(dev);
if (pch_iir & SDE_AUDIO_CP_REQ_CPT)
DRM_DEBUG_DRIVER("Audio CP request interrupt\n");
@@ -703,6 +720,9 @@ static irqreturn_t ivybridge_irq_handler(int irq, void *arg)
de_iir = I915_READ(DEIIR);
if (de_iir) {
+ if (de_iir & DE_AUX_CHANNEL_A_IVB)
+ dp_aux_irq_handler(dev);
+
if (de_iir & DE_GSE_IVB)
intel_opregion_gse_intr(dev);
@@ -758,7 +778,7 @@ static irqreturn_t ironlake_irq_handler(int irq, void *arg)
struct drm_device *dev = (struct drm_device *) arg;
drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
int ret = IRQ_NONE;
- u32 de_iir, gt_iir, de_ier, pch_iir, pm_iir;
+ u32 de_iir, gt_iir, de_ier, pm_iir;
atomic_inc(&dev_priv->irq_received);
@@ -769,11 +789,9 @@ static irqreturn_t ironlake_irq_handler(int irq, void *arg)
de_iir = I915_READ(DEIIR);
gt_iir = I915_READ(GTIIR);
- pch_iir = I915_READ(SDEIIR);
pm_iir = I915_READ(GEN6_PMIIR);
- if (de_iir == 0 && gt_iir == 0 && pch_iir == 0 &&
- (!IS_GEN6(dev) || pm_iir == 0))
+ if (de_iir == 0 && gt_iir == 0 && (!IS_GEN6(dev) || pm_iir == 0))
goto done;
ret = IRQ_HANDLED;
@@ -783,6 +801,9 @@ static irqreturn_t ironlake_irq_handler(int irq, void *arg)
else
snb_gt_irq_handler(dev, dev_priv, gt_iir);
+ if (de_iir & DE_AUX_CHANNEL_A)
+ dp_aux_irq_handler(dev);
+
if (de_iir & DE_GSE)
intel_opregion_gse_intr(dev);
@@ -804,10 +825,15 @@ static irqreturn_t ironlake_irq_handler(int irq, void *arg)
/* check event from PCH */
if (de_iir & DE_PCH_EVENT) {
+ u32 pch_iir = I915_READ(SDEIIR);
+
if (HAS_PCH_CPT(dev))
cpt_irq_handler(dev, pch_iir);
else
ibx_irq_handler(dev, pch_iir);
+
+ /* should clear PCH hotplug event before clear CPU irq */
+ I915_WRITE(SDEIIR, pch_iir);
}
if (IS_GEN5(dev) && de_iir & DE_PCU_EVENT)
@@ -816,8 +842,6 @@ static irqreturn_t ironlake_irq_handler(int irq, void *arg)
if (IS_GEN6(dev) && pm_iir & GEN6_PM_DEFERRED_EVENTS)
gen6_queue_rps_work(dev_priv, pm_iir);
- /* should clear PCH hotplug event before clear CPU irq */
- I915_WRITE(SDEIIR, pch_iir);
I915_WRITE(GTIIR, gt_iir);
I915_WRITE(DEIIR, de_iir);
I915_WRITE(GEN6_PMIIR, pm_iir);
@@ -838,23 +862,60 @@ done:
*/
static void i915_error_work_func(struct work_struct *work)
{
- drm_i915_private_t *dev_priv = container_of(work, drm_i915_private_t,
- error_work);
+ struct i915_gpu_error *error = container_of(work, struct i915_gpu_error,
+ work);
+ drm_i915_private_t *dev_priv = container_of(error, drm_i915_private_t,
+ gpu_error);
struct drm_device *dev = dev_priv->dev;
+ struct intel_ring_buffer *ring;
char *error_event[] = { "ERROR=1", NULL };
char *reset_event[] = { "RESET=1", NULL };
char *reset_done_event[] = { "ERROR=0", NULL };
+ int i, ret;
kobject_uevent_env(&dev->primary->kdev.kobj, KOBJ_CHANGE, error_event);
- if (atomic_read(&dev_priv->mm.wedged)) {
+ /*
+ * Note that there's only one work item which does gpu resets, so we
+ * need not worry about concurrent gpu resets potentially incrementing
+ * error->reset_counter twice. We only need to take care of another
+ * racing irq/hangcheck declaring the gpu dead for a second time. A
+ * quick check for that is good enough: schedule_work ensures the
+ * correct ordering between hang detection and this work item, and since
+ * the reset in-progress bit is only ever set by code outside of this
+ * work we don't need to worry about any other races.
+ */
+ if (i915_reset_in_progress(error) && !i915_terminally_wedged(error)) {
DRM_DEBUG_DRIVER("resetting chip\n");
- kobject_uevent_env(&dev->primary->kdev.kobj, KOBJ_CHANGE, reset_event);
- if (!i915_reset(dev)) {
- atomic_set(&dev_priv->mm.wedged, 0);
- kobject_uevent_env(&dev->primary->kdev.kobj, KOBJ_CHANGE, reset_done_event);
+ kobject_uevent_env(&dev->primary->kdev.kobj, KOBJ_CHANGE,
+ reset_event);
+
+ ret = i915_reset(dev);
+
+ if (ret == 0) {
+ /*
+ * After all the gem state is reset, increment the reset
+ * counter and wake up everyone waiting for the reset to
+ * complete.
+ *
+ * Since unlock operations are a one-sided barrier only,
+ * we need to insert a barrier here to order any seqno
+ * updates before
+ * the counter increment.
+ */
+ smp_mb__before_atomic_inc();
+ atomic_inc(&dev_priv->gpu_error.reset_counter);
+
+ kobject_uevent_env(&dev->primary->kdev.kobj,
+ KOBJ_CHANGE, reset_done_event);
+ } else {
+ atomic_set(&error->reset_counter, I915_WEDGED);
}
- complete_all(&dev_priv->error_completion);
+
+ for_each_ring(ring, dev_priv, i)
+ wake_up_all(&ring->irq_queue);
+
+ wake_up_all(&dev_priv->gpu_error.reset_queue);
}
}
@@ -915,7 +976,7 @@ i915_error_object_create(struct drm_i915_private *dev_priv,
goto unwind;
local_irq_save(flags);
- if (reloc_offset < dev_priv->mm.gtt_mappable_end &&
+ if (reloc_offset < dev_priv->gtt.mappable_end &&
src->has_global_gtt_mapping) {
void __iomem *s;
@@ -924,10 +985,18 @@ i915_error_object_create(struct drm_i915_private *dev_priv,
* captures what the GPU read.
*/
- s = io_mapping_map_atomic_wc(dev_priv->mm.gtt_mapping,
+ s = io_mapping_map_atomic_wc(dev_priv->gtt.mappable,
reloc_offset);
memcpy_fromio(d, s, PAGE_SIZE);
io_mapping_unmap_atomic(s);
+ } else if (src->stolen) {
+ unsigned long offset;
+
+ offset = dev_priv->mm.stolen_base;
+ offset += src->stolen->start;
+ offset += i << PAGE_SHIFT;
+
+ memcpy_fromio(d, (void __iomem *) offset, PAGE_SIZE);
} else {
struct page *page;
void *s;
@@ -1074,6 +1143,8 @@ static void i915_gem_record_fences(struct drm_device *dev,
error->fence[i] = I915_READ(FENCE_REG_830_0 + (i * 4));
break;
+ default:
+ BUG();
}
}
@@ -1222,9 +1293,9 @@ static void i915_capture_error_state(struct drm_device *dev)
unsigned long flags;
int i, pipe;
- spin_lock_irqsave(&dev_priv->error_lock, flags);
- error = dev_priv->first_error;
- spin_unlock_irqrestore(&dev_priv->error_lock, flags);
+ spin_lock_irqsave(&dev_priv->gpu_error.lock, flags);
+ error = dev_priv->gpu_error.first_error;
+ spin_unlock_irqrestore(&dev_priv->gpu_error.lock, flags);
if (error)
return;
@@ -1235,7 +1306,8 @@ static void i915_capture_error_state(struct drm_device *dev)
return;
}
- DRM_INFO("capturing error event; look for more information in /debug/dri/%d/i915_error_state\n",
+ DRM_INFO("capturing error event; look for more information in"
+ "/sys/kernel/debug/dri/%d/i915_error_state\n",
dev->primary->index);
kref_init(&error->ref);
@@ -1318,12 +1390,12 @@ static void i915_capture_error_state(struct drm_device *dev)
error->overlay = intel_overlay_capture_error_state(dev);
error->display = intel_display_capture_error_state(dev);
- spin_lock_irqsave(&dev_priv->error_lock, flags);
- if (dev_priv->first_error == NULL) {
- dev_priv->first_error = error;
+ spin_lock_irqsave(&dev_priv->gpu_error.lock, flags);
+ if (dev_priv->gpu_error.first_error == NULL) {
+ dev_priv->gpu_error.first_error = error;
error = NULL;
}
- spin_unlock_irqrestore(&dev_priv->error_lock, flags);
+ spin_unlock_irqrestore(&dev_priv->gpu_error.lock, flags);
if (error)
i915_error_state_free(&error->ref);
@@ -1335,10 +1407,10 @@ void i915_destroy_error_state(struct drm_device *dev)
struct drm_i915_error_state *error;
unsigned long flags;
- spin_lock_irqsave(&dev_priv->error_lock, flags);
- error = dev_priv->first_error;
- dev_priv->first_error = NULL;
- spin_unlock_irqrestore(&dev_priv->error_lock, flags);
+ spin_lock_irqsave(&dev_priv->gpu_error.lock, flags);
+ error = dev_priv->gpu_error.first_error;
+ dev_priv->gpu_error.first_error = NULL;
+ spin_unlock_irqrestore(&dev_priv->gpu_error.lock, flags);
if (error)
kref_put(&error->ref, i915_error_state_free);
@@ -1459,17 +1531,18 @@ void i915_handle_error(struct drm_device *dev, bool wedged)
i915_report_and_clear_eir(dev);
if (wedged) {
- INIT_COMPLETION(dev_priv->error_completion);
- atomic_set(&dev_priv->mm.wedged, 1);
+ atomic_set_mask(I915_RESET_IN_PROGRESS_FLAG,
+ &dev_priv->gpu_error.reset_counter);
/*
- * Wakeup waiting processes so they don't hang
+ * Wakeup waiting processes so that the reset work item
+ * doesn't deadlock trying to grab various locks.
*/
for_each_ring(ring, dev_priv, i)
wake_up_all(&ring->irq_queue);
}
- queue_work(dev_priv->wq, &dev_priv->error_work);
+ queue_work(dev_priv->wq, &dev_priv->gpu_error.work);
}
static void i915_pageflip_stall_check(struct drm_device *dev, int pipe)
@@ -1700,7 +1773,7 @@ static bool i915_hangcheck_hung(struct drm_device *dev)
{
drm_i915_private_t *dev_priv = dev->dev_private;
- if (dev_priv->hangcheck_count++ > 1) {
+ if (dev_priv->gpu_error.hangcheck_count++ > 1) {
bool hung = true;
DRM_ERROR("Hangcheck timer elapsed... GPU hung\n");
@@ -1759,25 +1832,29 @@ void i915_hangcheck_elapsed(unsigned long data)
goto repeat;
}
- dev_priv->hangcheck_count = 0;
+ dev_priv->gpu_error.hangcheck_count = 0;
return;
}
i915_get_extra_instdone(dev, instdone);
- if (memcmp(dev_priv->last_acthd, acthd, sizeof(acthd)) == 0 &&
- memcmp(dev_priv->prev_instdone, instdone, sizeof(instdone)) == 0) {
+ if (memcmp(dev_priv->gpu_error.last_acthd, acthd,
+ sizeof(acthd)) == 0 &&
+ memcmp(dev_priv->gpu_error.prev_instdone, instdone,
+ sizeof(instdone)) == 0) {
if (i915_hangcheck_hung(dev))
return;
} else {
- dev_priv->hangcheck_count = 0;
+ dev_priv->gpu_error.hangcheck_count = 0;
- memcpy(dev_priv->last_acthd, acthd, sizeof(acthd));
- memcpy(dev_priv->prev_instdone, instdone, sizeof(instdone));
+ memcpy(dev_priv->gpu_error.last_acthd, acthd,
+ sizeof(acthd));
+ memcpy(dev_priv->gpu_error.prev_instdone, instdone,
+ sizeof(instdone));
}
repeat:
/* Reset timer case chip hangs without another request being added */
- mod_timer(&dev_priv->hangcheck_timer,
+ mod_timer(&dev_priv->gpu_error.hangcheck_timer,
round_jiffies_up(jiffies + DRM_I915_HANGCHECK_JIFFIES));
}
@@ -1847,7 +1924,7 @@ static void valleyview_irq_preinstall(struct drm_device *dev)
* This register is the same on all known PCH chips.
*/
-static void ironlake_enable_pch_hotplug(struct drm_device *dev)
+static void ibx_enable_hotplug(struct drm_device *dev)
{
drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
u32 hotplug;
@@ -1860,14 +1937,36 @@ static void ironlake_enable_pch_hotplug(struct drm_device *dev)
I915_WRITE(PCH_PORT_HOTPLUG, hotplug);
}
+static void ibx_irq_postinstall(struct drm_device *dev)
+{
+ drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
+ u32 mask;
+
+ if (HAS_PCH_IBX(dev))
+ mask = SDE_HOTPLUG_MASK |
+ SDE_GMBUS |
+ SDE_AUX_MASK;
+ else
+ mask = SDE_HOTPLUG_MASK_CPT |
+ SDE_GMBUS_CPT |
+ SDE_AUX_MASK_CPT;
+
+ I915_WRITE(SDEIIR, I915_READ(SDEIIR));
+ I915_WRITE(SDEIMR, ~mask);
+ I915_WRITE(SDEIER, mask);
+ POSTING_READ(SDEIER);
+
+ ibx_enable_hotplug(dev);
+}
+
static int ironlake_irq_postinstall(struct drm_device *dev)
{
drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
/* enable kind of interrupts always enabled */
u32 display_mask = DE_MASTER_IRQ_CONTROL | DE_GSE | DE_PCH_EVENT |
- DE_PLANEA_FLIP_DONE | DE_PLANEB_FLIP_DONE;
+ DE_PLANEA_FLIP_DONE | DE_PLANEB_FLIP_DONE |
+ DE_AUX_CHANNEL_A;
u32 render_irqs;
- u32 hotplug_mask;
dev_priv->irq_mask = ~display_mask;
@@ -1895,27 +1994,7 @@ static int ironlake_irq_postinstall(struct drm_device *dev)
I915_WRITE(GTIER, render_irqs);
POSTING_READ(GTIER);
- if (HAS_PCH_CPT(dev)) {
- hotplug_mask = (SDE_CRT_HOTPLUG_CPT |
- SDE_PORTB_HOTPLUG_CPT |
- SDE_PORTC_HOTPLUG_CPT |
- SDE_PORTD_HOTPLUG_CPT);
- } else {
- hotplug_mask = (SDE_CRT_HOTPLUG |
- SDE_PORTB_HOTPLUG |
- SDE_PORTC_HOTPLUG |
- SDE_PORTD_HOTPLUG |
- SDE_AUX_MASK);
- }
-
- dev_priv->pch_irq_mask = ~hotplug_mask;
-
- I915_WRITE(SDEIIR, I915_READ(SDEIIR));
- I915_WRITE(SDEIMR, dev_priv->pch_irq_mask);
- I915_WRITE(SDEIER, hotplug_mask);
- POSTING_READ(SDEIER);
-
- ironlake_enable_pch_hotplug(dev);
+ ibx_irq_postinstall(dev);
if (IS_IRONLAKE_M(dev)) {
/* Clear & enable PCU event interrupts */
@@ -1935,9 +2014,9 @@ static int ivybridge_irq_postinstall(struct drm_device *dev)
DE_MASTER_IRQ_CONTROL | DE_GSE_IVB | DE_PCH_EVENT_IVB |
DE_PLANEC_FLIP_DONE_IVB |
DE_PLANEB_FLIP_DONE_IVB |
- DE_PLANEA_FLIP_DONE_IVB;
+ DE_PLANEA_FLIP_DONE_IVB |
+ DE_AUX_CHANNEL_A_IVB;
u32 render_irqs;
- u32 hotplug_mask;
dev_priv->irq_mask = ~display_mask;
@@ -1961,18 +2040,7 @@ static int ivybridge_irq_postinstall(struct drm_device *dev)
I915_WRITE(GTIER, render_irqs);
POSTING_READ(GTIER);
- hotplug_mask = (SDE_CRT_HOTPLUG_CPT |
- SDE_PORTB_HOTPLUG_CPT |
- SDE_PORTC_HOTPLUG_CPT |
- SDE_PORTD_HOTPLUG_CPT);
- dev_priv->pch_irq_mask = ~hotplug_mask;
-
- I915_WRITE(SDEIIR, I915_READ(SDEIIR));
- I915_WRITE(SDEIMR, dev_priv->pch_irq_mask);
- I915_WRITE(SDEIER, hotplug_mask);
- POSTING_READ(SDEIER);
-
- ironlake_enable_pch_hotplug(dev);
+ ibx_irq_postinstall(dev);
return 0;
}
@@ -1981,7 +2049,6 @@ static int valleyview_irq_postinstall(struct drm_device *dev)
{
drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
u32 enable_mask;
- u32 hotplug_en = I915_READ(PORT_HOTPLUG_EN);
u32 pipestat_enable = PLANE_FLIP_DONE_INT_EN_VLV;
u32 render_irqs;
u16 msid;
@@ -2010,6 +2077,9 @@ static int valleyview_irq_postinstall(struct drm_device *dev)
msid |= (1<<14);
pci_write_config_word(dev_priv->dev->pdev, 0x98, msid);
+ I915_WRITE(PORT_HOTPLUG_EN, 0);
+ POSTING_READ(PORT_HOTPLUG_EN);
+
I915_WRITE(VLV_IMR, dev_priv->irq_mask);
I915_WRITE(VLV_IER, enable_mask);
I915_WRITE(VLV_IIR, 0xffffffff);
@@ -2018,6 +2088,7 @@ static int valleyview_irq_postinstall(struct drm_device *dev)
POSTING_READ(VLV_IER);
i915_enable_pipestat(dev_priv, 0, pipestat_enable);
+ i915_enable_pipestat(dev_priv, 0, PIPE_GMBUS_EVENT_ENABLE);
i915_enable_pipestat(dev_priv, 1, pipestat_enable);
I915_WRITE(VLV_IIR, 0xffffffff);
@@ -2038,13 +2109,22 @@ static int valleyview_irq_postinstall(struct drm_device *dev)
#endif
I915_WRITE(VLV_MASTER_IER, MASTER_INTERRUPT_ENABLE);
+
+ return 0;
+}
+
+static void valleyview_hpd_irq_setup(struct drm_device *dev)
+{
+ drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
+ u32 hotplug_en = I915_READ(PORT_HOTPLUG_EN);
+
/* Note HDMI and DP share bits */
- if (dev_priv->hotplug_supported_mask & HDMIB_HOTPLUG_INT_STATUS)
- hotplug_en |= HDMIB_HOTPLUG_INT_EN;
- if (dev_priv->hotplug_supported_mask & HDMIC_HOTPLUG_INT_STATUS)
- hotplug_en |= HDMIC_HOTPLUG_INT_EN;
- if (dev_priv->hotplug_supported_mask & HDMID_HOTPLUG_INT_STATUS)
- hotplug_en |= HDMID_HOTPLUG_INT_EN;
+ if (dev_priv->hotplug_supported_mask & PORTB_HOTPLUG_INT_STATUS)
+ hotplug_en |= PORTB_HOTPLUG_INT_EN;
+ if (dev_priv->hotplug_supported_mask & PORTC_HOTPLUG_INT_STATUS)
+ hotplug_en |= PORTC_HOTPLUG_INT_EN;
+ if (dev_priv->hotplug_supported_mask & PORTD_HOTPLUG_INT_STATUS)
+ hotplug_en |= PORTD_HOTPLUG_INT_EN;
if (dev_priv->hotplug_supported_mask & SDVOC_HOTPLUG_INT_STATUS_I915)
hotplug_en |= SDVOC_HOTPLUG_INT_EN;
if (dev_priv->hotplug_supported_mask & SDVOB_HOTPLUG_INT_STATUS_I915)
@@ -2055,8 +2135,6 @@ static int valleyview_irq_postinstall(struct drm_device *dev)
}
I915_WRITE(PORT_HOTPLUG_EN, hotplug_en);
-
- return 0;
}
static void valleyview_irq_uninstall(struct drm_device *dev)
@@ -2286,6 +2364,9 @@ static int i915_irq_postinstall(struct drm_device *dev)
I915_USER_INTERRUPT;
if (I915_HAS_HOTPLUG(dev)) {
+ I915_WRITE(PORT_HOTPLUG_EN, 0);
+ POSTING_READ(PORT_HOTPLUG_EN);
+
/* Enable in IER... */
enable_mask |= I915_DISPLAY_PORT_INTERRUPT;
/* and unmask in IMR */
@@ -2296,15 +2377,25 @@ static int i915_irq_postinstall(struct drm_device *dev)
I915_WRITE(IER, enable_mask);
POSTING_READ(IER);
+ intel_opregion_enable_asle(dev);
+
+ return 0;
+}
+
+static void i915_hpd_irq_setup(struct drm_device *dev)
+{
+ drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
+ u32 hotplug_en;
+
if (I915_HAS_HOTPLUG(dev)) {
- u32 hotplug_en = I915_READ(PORT_HOTPLUG_EN);
-
- if (dev_priv->hotplug_supported_mask & HDMIB_HOTPLUG_INT_STATUS)
- hotplug_en |= HDMIB_HOTPLUG_INT_EN;
- if (dev_priv->hotplug_supported_mask & HDMIC_HOTPLUG_INT_STATUS)
- hotplug_en |= HDMIC_HOTPLUG_INT_EN;
- if (dev_priv->hotplug_supported_mask & HDMID_HOTPLUG_INT_STATUS)
- hotplug_en |= HDMID_HOTPLUG_INT_EN;
+ hotplug_en = I915_READ(PORT_HOTPLUG_EN);
+
+ if (dev_priv->hotplug_supported_mask & PORTB_HOTPLUG_INT_STATUS)
+ hotplug_en |= PORTB_HOTPLUG_INT_EN;
+ if (dev_priv->hotplug_supported_mask & PORTC_HOTPLUG_INT_STATUS)
+ hotplug_en |= PORTC_HOTPLUG_INT_EN;
+ if (dev_priv->hotplug_supported_mask & PORTD_HOTPLUG_INT_STATUS)
+ hotplug_en |= PORTD_HOTPLUG_INT_EN;
if (dev_priv->hotplug_supported_mask & SDVOC_HOTPLUG_INT_STATUS_I915)
hotplug_en |= SDVOC_HOTPLUG_INT_EN;
if (dev_priv->hotplug_supported_mask & SDVOB_HOTPLUG_INT_STATUS_I915)
@@ -2318,10 +2409,6 @@ static int i915_irq_postinstall(struct drm_device *dev)
I915_WRITE(PORT_HOTPLUG_EN, hotplug_en);
}
-
- intel_opregion_enable_asle(dev);
-
- return 0;
}
static irqreturn_t i915_irq_handler(int irq, void *arg)
@@ -2481,7 +2568,6 @@ static void i965_irq_preinstall(struct drm_device * dev)
static int i965_irq_postinstall(struct drm_device *dev)
{
drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
- u32 hotplug_en;
u32 enable_mask;
u32 error_mask;
@@ -2502,6 +2588,7 @@ static int i965_irq_postinstall(struct drm_device *dev)
dev_priv->pipestat[0] = 0;
dev_priv->pipestat[1] = 0;
+ i915_enable_pipestat(dev_priv, 0, PIPE_GMBUS_EVENT_ENABLE);
/*
* Enable some error detection, note the instruction error mask
@@ -2522,14 +2609,27 @@ static int i965_irq_postinstall(struct drm_device *dev)
I915_WRITE(IER, enable_mask);
POSTING_READ(IER);
+ I915_WRITE(PORT_HOTPLUG_EN, 0);
+ POSTING_READ(PORT_HOTPLUG_EN);
+
+ intel_opregion_enable_asle(dev);
+
+ return 0;
+}
+
+static void i965_hpd_irq_setup(struct drm_device *dev)
+{
+ drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
+ u32 hotplug_en;
+
/* Note HDMI and DP share hotplug bits */
hotplug_en = 0;
- if (dev_priv->hotplug_supported_mask & HDMIB_HOTPLUG_INT_STATUS)
- hotplug_en |= HDMIB_HOTPLUG_INT_EN;
- if (dev_priv->hotplug_supported_mask & HDMIC_HOTPLUG_INT_STATUS)
- hotplug_en |= HDMIC_HOTPLUG_INT_EN;
- if (dev_priv->hotplug_supported_mask & HDMID_HOTPLUG_INT_STATUS)
- hotplug_en |= HDMID_HOTPLUG_INT_EN;
+ if (dev_priv->hotplug_supported_mask & PORTB_HOTPLUG_INT_STATUS)
+ hotplug_en |= PORTB_HOTPLUG_INT_EN;
+ if (dev_priv->hotplug_supported_mask & PORTC_HOTPLUG_INT_STATUS)
+ hotplug_en |= PORTC_HOTPLUG_INT_EN;
+ if (dev_priv->hotplug_supported_mask & PORTD_HOTPLUG_INT_STATUS)
+ hotplug_en |= PORTD_HOTPLUG_INT_EN;
if (IS_G4X(dev)) {
if (dev_priv->hotplug_supported_mask & SDVOC_HOTPLUG_INT_STATUS_G4X)
hotplug_en |= SDVOC_HOTPLUG_INT_EN;
@@ -2556,10 +2656,6 @@ static int i965_irq_postinstall(struct drm_device *dev)
/* Ignore TV since it's buggy */
I915_WRITE(PORT_HOTPLUG_EN, hotplug_en);
-
- intel_opregion_enable_asle(dev);
-
- return 0;
}
static irqreturn_t i965_irq_handler(int irq, void *arg)
@@ -2655,6 +2751,9 @@ static irqreturn_t i965_irq_handler(int irq, void *arg)
if (blc_event || (iir & I915_ASLE_INTERRUPT))
intel_opregion_asle_intr(dev);
+ if (pipe_stats[0] & PIPE_GMBUS_INTERRUPT_STATUS)
+ gmbus_irq_handler(dev);
+
/* With MSI, interrupts are only generated when iir
* transitions from zero to nonzero. If another bit got
* set while we were handling the existing iir bits, then
@@ -2706,10 +2805,16 @@ void intel_irq_init(struct drm_device *dev)
struct drm_i915_private *dev_priv = dev->dev_private;
INIT_WORK(&dev_priv->hotplug_work, i915_hotplug_work_func);
- INIT_WORK(&dev_priv->error_work, i915_error_work_func);
+ INIT_WORK(&dev_priv->gpu_error.work, i915_error_work_func);
INIT_WORK(&dev_priv->rps.work, gen6_pm_rps_work);
INIT_WORK(&dev_priv->l3_parity.error_work, ivybridge_parity_work);
+ setup_timer(&dev_priv->gpu_error.hangcheck_timer,
+ i915_hangcheck_elapsed,
+ (unsigned long) dev);
+
+ pm_qos_add_request(&dev_priv->pm_qos, PM_QOS_CPU_DMA_LATENCY, PM_QOS_DEFAULT_VALUE);
+
dev->driver->get_vblank_counter = i915_get_vblank_counter;
dev->max_vblank_count = 0xffffff; /* only 24 bits of frame count */
if (IS_G4X(dev) || INTEL_INFO(dev)->gen >= 5) {
@@ -2730,7 +2835,8 @@ void intel_irq_init(struct drm_device *dev)
dev->driver->irq_uninstall = valleyview_irq_uninstall;
dev->driver->enable_vblank = valleyview_enable_vblank;
dev->driver->disable_vblank = valleyview_disable_vblank;
- } else if (IS_IVYBRIDGE(dev)) {
+ dev_priv->display.hpd_irq_setup = valleyview_hpd_irq_setup;
+ } else if (IS_IVYBRIDGE(dev) || IS_HASWELL(dev)) {
/* Share pre & uninstall handlers with ILK/SNB */
dev->driver->irq_handler = ivybridge_irq_handler;
dev->driver->irq_preinstall = ironlake_irq_preinstall;
@@ -2738,14 +2844,6 @@ void intel_irq_init(struct drm_device *dev)
dev->driver->irq_uninstall = ironlake_irq_uninstall;
dev->driver->enable_vblank = ivybridge_enable_vblank;
dev->driver->disable_vblank = ivybridge_disable_vblank;
- } else if (IS_HASWELL(dev)) {
- /* Share interrupts handling with IVB */
- dev->driver->irq_handler = ivybridge_irq_handler;
- dev->driver->irq_preinstall = ironlake_irq_preinstall;
- dev->driver->irq_postinstall = ivybridge_irq_postinstall;
- dev->driver->irq_uninstall = ironlake_irq_uninstall;
- dev->driver->enable_vblank = ivybridge_enable_vblank;
- dev->driver->disable_vblank = ivybridge_disable_vblank;
} else if (HAS_PCH_SPLIT(dev)) {
dev->driver->irq_handler = ironlake_irq_handler;
dev->driver->irq_preinstall = ironlake_irq_preinstall;
@@ -2764,13 +2862,23 @@ void intel_irq_init(struct drm_device *dev)
dev->driver->irq_postinstall = i915_irq_postinstall;
dev->driver->irq_uninstall = i915_irq_uninstall;
dev->driver->irq_handler = i915_irq_handler;
+ dev_priv->display.hpd_irq_setup = i915_hpd_irq_setup;
} else {
dev->driver->irq_preinstall = i965_irq_preinstall;
dev->driver->irq_postinstall = i965_irq_postinstall;
dev->driver->irq_uninstall = i965_irq_uninstall;
dev->driver->irq_handler = i965_irq_handler;
+ dev_priv->display.hpd_irq_setup = i965_hpd_irq_setup;
}
dev->driver->enable_vblank = i915_enable_vblank;
dev->driver->disable_vblank = i915_disable_vblank;
}
}
+
+void intel_hpd_init(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+
+ if (dev_priv->display.hpd_irq_setup)
+ dev_priv->display.hpd_irq_setup(dev);
+}
diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h
index 59afb7eb6db6..527b664d3434 100644
--- a/drivers/gpu/drm/i915/i915_reg.h
+++ b/drivers/gpu/drm/i915/i915_reg.h
@@ -141,8 +141,15 @@
#define VGA_MSR_MEM_EN (1<<1)
#define VGA_MSR_CGA_MODE (1<<0)
-#define VGA_SR_INDEX 0x3c4
-#define VGA_SR_DATA 0x3c5
+/*
+ * SR01 is the only VGA register touched on non-UMS setups.
+ * VLV doesn't do UMS, so the sequencer index/data registers
+ * are the only VGA registers which need to include
+ * display_mmio_offset.
+ */
+#define VGA_SR_INDEX (dev_priv->info->display_mmio_offset + 0x3c4)
+#define SR01 1
+#define VGA_SR_DATA (dev_priv->info->display_mmio_offset + 0x3c5)
#define VGA_AR_INDEX 0x3c0
#define VGA_AR_VID_EN (1<<5)
@@ -301,6 +308,7 @@
#define DISPLAY_PLANE_A (0<<20)
#define DISPLAY_PLANE_B (1<<20)
#define GFX_OP_PIPE_CONTROL(len) ((0x3<<29)|(0x3<<27)|(0x2<<24)|(len-2))
+#define PIPE_CONTROL_GLOBAL_GTT_IVB (1<<24) /* gen7+ */
#define PIPE_CONTROL_CS_STALL (1<<20)
#define PIPE_CONTROL_TLB_INVALIDATE (1<<18)
#define PIPE_CONTROL_QW_WRITE (1<<14)
@@ -335,17 +343,19 @@
* 0x801c/3c: core clock bits
* 0x8048/68: low pass filter coefficients
* 0x8100: fast clock controls
+ *
+ * DPIO is VLV only.
*/
-#define DPIO_PKT 0x2100
+#define DPIO_PKT (VLV_DISPLAY_BASE + 0x2100)
#define DPIO_RID (0<<24)
#define DPIO_OP_WRITE (1<<16)
#define DPIO_OP_READ (0<<16)
#define DPIO_PORTID (0x12<<8)
#define DPIO_BYTE (0xf<<4)
#define DPIO_BUSY (1<<0) /* status only */
-#define DPIO_DATA 0x2104
-#define DPIO_REG 0x2108
-#define DPIO_CTL 0x2110
+#define DPIO_DATA (VLV_DISPLAY_BASE + 0x2104)
+#define DPIO_REG (VLV_DISPLAY_BASE + 0x2108)
+#define DPIO_CTL (VLV_DISPLAY_BASE + 0x2110)
#define DPIO_MODSEL1 (1<<3) /* if ref clk b == 27 */
#define DPIO_MODSEL0 (1<<2) /* if ref clk a == 27 */
#define DPIO_SFR_BYPASS (1<<1)
@@ -556,13 +566,13 @@
#define IIR 0x020a4
#define IMR 0x020a8
#define ISR 0x020ac
-#define VLV_GUNIT_CLOCK_GATE 0x182060
+#define VLV_GUNIT_CLOCK_GATE (VLV_DISPLAY_BASE + 0x2060)
#define GCFG_DIS (1<<8)
-#define VLV_IIR_RW 0x182084
-#define VLV_IER 0x1820a0
-#define VLV_IIR 0x1820a4
-#define VLV_IMR 0x1820a8
-#define VLV_ISR 0x1820ac
+#define VLV_IIR_RW (VLV_DISPLAY_BASE + 0x2084)
+#define VLV_IER (VLV_DISPLAY_BASE + 0x20a0)
+#define VLV_IIR (VLV_DISPLAY_BASE + 0x20a4)
+#define VLV_IMR (VLV_DISPLAY_BASE + 0x20a8)
+#define VLV_ISR (VLV_DISPLAY_BASE + 0x20ac)
#define I915_PIPE_CONTROL_NOTIFY_INTERRUPT (1<<18)
#define I915_DISPLAY_PORT_INTERRUPT (1<<17)
#define I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT (1<<15)
@@ -735,6 +745,7 @@
#define GEN7_FF_TS_SCHED_HS0 (0x3<<16)
#define GEN7_FF_TS_SCHED_LOAD_BALANCE (0x1<<16)
#define GEN7_FF_TS_SCHED_HW (0x0<<16) /* Default */
+#define GEN7_FF_VS_REF_CNT_FFME (1 << 15)
#define GEN7_FF_VS_SCHED_HS1 (0x5<<12)
#define GEN7_FF_VS_SCHED_HS0 (0x3<<12)
#define GEN7_FF_VS_SCHED_LOAD_BALANCE (0x1<<12) /* Default */
@@ -921,8 +932,8 @@
#define VGA1_PD_P1_DIV_2 (1 << 13)
#define VGA1_PD_P1_SHIFT 8
#define VGA1_PD_P1_MASK (0x1f << 8)
-#define _DPLL_A 0x06014
-#define _DPLL_B 0x06018
+#define _DPLL_A (dev_priv->info->display_mmio_offset + 0x6014)
+#define _DPLL_B (dev_priv->info->display_mmio_offset + 0x6018)
#define DPLL(pipe) _PIPE(pipe, _DPLL_A, _DPLL_B)
#define DPLL_VCO_ENABLE (1 << 31)
#define DPLL_DVO_HIGH_SPEED (1 << 30)
@@ -943,23 +954,6 @@
#define DPLL_LOCK_VLV (1<<15)
#define DPLL_INTEGRATED_CLOCK_VLV (1<<13)
-#define SRX_INDEX 0x3c4
-#define SRX_DATA 0x3c5
-#define SR01 1
-#define SR01_SCREEN_OFF (1<<5)
-
-#define PPCR 0x61204
-#define PPCR_ON (1<<0)
-
-#define DVOB 0x61140
-#define DVOB_ON (1<<31)
-#define DVOC 0x61160
-#define DVOC_ON (1<<31)
-#define LVDS 0x61180
-#define LVDS_ON (1<<31)
-
-/* Scratch pad debug 0 reg:
- */
#define DPLL_FPA01_P1_POST_DIV_MASK_I830 0x001f0000
/*
* The i830 generation, in LVDS mode, defines P1 as the bit number set within
@@ -998,7 +992,7 @@
#define SDVO_MULTIPLIER_MASK 0x000000ff
#define SDVO_MULTIPLIER_SHIFT_HIRES 4
#define SDVO_MULTIPLIER_SHIFT_VGA 0
-#define _DPLL_A_MD 0x0601c /* 965+ only */
+#define _DPLL_A_MD (dev_priv->info->display_mmio_offset + 0x601c) /* 965+ only */
/*
* UDI pixel divider, controlling how many pixels are stuffed into a packet.
*
@@ -1035,7 +1029,7 @@
*/
#define DPLL_MD_VGA_UDI_MULTIPLIER_MASK 0x0000003f
#define DPLL_MD_VGA_UDI_MULTIPLIER_SHIFT 0
-#define _DPLL_B_MD 0x06020 /* 965+ only */
+#define _DPLL_B_MD (dev_priv->info->display_mmio_offset + 0x6020) /* 965+ only */
#define DPLL_MD(pipe) _PIPE(pipe, _DPLL_A_MD, _DPLL_B_MD)
#define _FPA0 0x06040
@@ -1178,15 +1172,15 @@
#define RAMCLK_GATE_D 0x6210 /* CRL only */
#define DEUC 0x6214 /* CRL only */
-#define FW_BLC_SELF_VLV 0x6500
+#define FW_BLC_SELF_VLV (VLV_DISPLAY_BASE + 0x6500)
#define FW_CSPWRDWNEN (1<<15)
/*
* Palette regs
*/
-#define _PALETTE_A 0x0a000
-#define _PALETTE_B 0x0a800
+#define _PALETTE_A (dev_priv->info->display_mmio_offset + 0xa000)
+#define _PALETTE_B (dev_priv->info->display_mmio_offset + 0xa800)
#define PALETTE(pipe) _PIPE(pipe, _PALETTE_A, _PALETTE_B)
/* MCH MMIO space */
@@ -1242,6 +1236,10 @@
#define MAD_DIMM_A_SIZE_SHIFT 0
#define MAD_DIMM_A_SIZE_MASK (0xff << MAD_DIMM_A_SIZE_SHIFT)
+/** snb MCH registers for priority tuning */
+#define MCH_SSKPD (MCHBAR_MIRROR_BASE_SNB + 0x5d10)
+#define MCH_SSKPD_WM0_MASK 0x3f
+#define MCH_SSKPD_WM0_VAL 0xc
/* Clocking configuration register */
#define CLKCFG 0x10c00
@@ -1551,26 +1549,26 @@
*/
/* Pipe A timing regs */
-#define _HTOTAL_A 0x60000
-#define _HBLANK_A 0x60004
-#define _HSYNC_A 0x60008
-#define _VTOTAL_A 0x6000c
-#define _VBLANK_A 0x60010
-#define _VSYNC_A 0x60014
-#define _PIPEASRC 0x6001c
-#define _BCLRPAT_A 0x60020
-#define _VSYNCSHIFT_A 0x60028
+#define _HTOTAL_A (dev_priv->info->display_mmio_offset + 0x60000)
+#define _HBLANK_A (dev_priv->info->display_mmio_offset + 0x60004)
+#define _HSYNC_A (dev_priv->info->display_mmio_offset + 0x60008)
+#define _VTOTAL_A (dev_priv->info->display_mmio_offset + 0x6000c)
+#define _VBLANK_A (dev_priv->info->display_mmio_offset + 0x60010)
+#define _VSYNC_A (dev_priv->info->display_mmio_offset + 0x60014)
+#define _PIPEASRC (dev_priv->info->display_mmio_offset + 0x6001c)
+#define _BCLRPAT_A (dev_priv->info->display_mmio_offset + 0x60020)
+#define _VSYNCSHIFT_A (dev_priv->info->display_mmio_offset + 0x60028)
/* Pipe B timing regs */
-#define _HTOTAL_B 0x61000
-#define _HBLANK_B 0x61004
-#define _HSYNC_B 0x61008
-#define _VTOTAL_B 0x6100c
-#define _VBLANK_B 0x61010
-#define _VSYNC_B 0x61014
-#define _PIPEBSRC 0x6101c
-#define _BCLRPAT_B 0x61020
-#define _VSYNCSHIFT_B 0x61028
+#define _HTOTAL_B (dev_priv->info->display_mmio_offset + 0x61000)
+#define _HBLANK_B (dev_priv->info->display_mmio_offset + 0x61004)
+#define _HSYNC_B (dev_priv->info->display_mmio_offset + 0x61008)
+#define _VTOTAL_B (dev_priv->info->display_mmio_offset + 0x6100c)
+#define _VBLANK_B (dev_priv->info->display_mmio_offset + 0x61010)
+#define _VSYNC_B (dev_priv->info->display_mmio_offset + 0x61014)
+#define _PIPEBSRC (dev_priv->info->display_mmio_offset + 0x6101c)
+#define _BCLRPAT_B (dev_priv->info->display_mmio_offset + 0x61020)
+#define _VSYNCSHIFT_B (dev_priv->info->display_mmio_offset + 0x61028)
#define HTOTAL(trans) _TRANSCODER(trans, _HTOTAL_A, _HTOTAL_B)
@@ -1631,13 +1629,10 @@
/* Hotplug control (945+ only) */
-#define PORT_HOTPLUG_EN 0x61110
-#define HDMIB_HOTPLUG_INT_EN (1 << 29)
-#define DPB_HOTPLUG_INT_EN (1 << 29)
-#define HDMIC_HOTPLUG_INT_EN (1 << 28)
-#define DPC_HOTPLUG_INT_EN (1 << 28)
-#define HDMID_HOTPLUG_INT_EN (1 << 27)
-#define DPD_HOTPLUG_INT_EN (1 << 27)
+#define PORT_HOTPLUG_EN (dev_priv->info->display_mmio_offset + 0x61110)
+#define PORTB_HOTPLUG_INT_EN (1 << 29)
+#define PORTC_HOTPLUG_INT_EN (1 << 28)
+#define PORTD_HOTPLUG_INT_EN (1 << 27)
#define SDVOB_HOTPLUG_INT_EN (1 << 26)
#define SDVOC_HOTPLUG_INT_EN (1 << 25)
#define TV_HOTPLUG_INT_EN (1 << 18)
@@ -1658,21 +1653,14 @@
#define CRT_HOTPLUG_DETECT_VOLTAGE_325MV (0 << 2)
#define CRT_HOTPLUG_DETECT_VOLTAGE_475MV (1 << 2)
-#define PORT_HOTPLUG_STAT 0x61114
+#define PORT_HOTPLUG_STAT (dev_priv->info->display_mmio_offset + 0x61114)
/* HDMI/DP bits are gen4+ */
-#define DPB_HOTPLUG_LIVE_STATUS (1 << 29)
-#define DPC_HOTPLUG_LIVE_STATUS (1 << 28)
-#define DPD_HOTPLUG_LIVE_STATUS (1 << 27)
-#define DPD_HOTPLUG_INT_STATUS (3 << 21)
-#define DPC_HOTPLUG_INT_STATUS (3 << 19)
-#define DPB_HOTPLUG_INT_STATUS (3 << 17)
-/* HDMI bits are shared with the DP bits */
-#define HDMIB_HOTPLUG_LIVE_STATUS (1 << 29)
-#define HDMIC_HOTPLUG_LIVE_STATUS (1 << 28)
-#define HDMID_HOTPLUG_LIVE_STATUS (1 << 27)
-#define HDMID_HOTPLUG_INT_STATUS (3 << 21)
-#define HDMIC_HOTPLUG_INT_STATUS (3 << 19)
-#define HDMIB_HOTPLUG_INT_STATUS (3 << 17)
+#define PORTB_HOTPLUG_LIVE_STATUS (1 << 29)
+#define PORTC_HOTPLUG_LIVE_STATUS (1 << 28)
+#define PORTD_HOTPLUG_LIVE_STATUS (1 << 27)
+#define PORTD_HOTPLUG_INT_STATUS (3 << 21)
+#define PORTC_HOTPLUG_INT_STATUS (3 << 19)
+#define PORTB_HOTPLUG_INT_STATUS (3 << 17)
/* CRT/TV common between gen3+ */
#define CRT_HOTPLUG_INT_STATUS (1 << 11)
#define TV_HOTPLUG_INT_STATUS (1 << 10)
@@ -1877,7 +1865,7 @@
#define PP_DIVISOR 0x61210
/* Panel fitting */
-#define PFIT_CONTROL 0x61230
+#define PFIT_CONTROL (dev_priv->info->display_mmio_offset + 0x61230)
#define PFIT_ENABLE (1 << 31)
#define PFIT_PIPE_MASK (3 << 29)
#define PFIT_PIPE_SHIFT 29
@@ -1895,9 +1883,7 @@
#define PFIT_SCALING_PROGRAMMED (1 << 26)
#define PFIT_SCALING_PILLAR (2 << 26)
#define PFIT_SCALING_LETTER (3 << 26)
-#define PFIT_PGM_RATIOS 0x61234
-#define PFIT_VERT_SCALE_MASK 0xfff00000
-#define PFIT_HORIZ_SCALE_MASK 0x0000fff0
+#define PFIT_PGM_RATIOS (dev_priv->info->display_mmio_offset + 0x61234)
/* Pre-965 */
#define PFIT_VERT_SCALE_SHIFT 20
#define PFIT_VERT_SCALE_MASK 0xfff00000
@@ -1909,7 +1895,7 @@
#define PFIT_HORIZ_SCALE_SHIFT_965 0
#define PFIT_HORIZ_SCALE_MASK_965 0x00001fff
-#define PFIT_AUTO_RATIOS 0x61238
+#define PFIT_AUTO_RATIOS (dev_priv->info->display_mmio_offset + 0x61238)
/* Backlight control */
#define BLC_PWM_CTL2 0x61250 /* 965+ only */
@@ -2639,10 +2625,10 @@
/* Display & cursor control */
/* Pipe A */
-#define _PIPEADSL 0x70000
+#define _PIPEADSL (dev_priv->info->display_mmio_offset + 0x70000)
#define DSL_LINEMASK_GEN2 0x00000fff
#define DSL_LINEMASK_GEN3 0x00001fff
-#define _PIPEACONF 0x70008
+#define _PIPEACONF (dev_priv->info->display_mmio_offset + 0x70008)
#define PIPECONF_ENABLE (1<<31)
#define PIPECONF_DISABLE 0
#define PIPECONF_DOUBLE_WIDE (1<<30)
@@ -2671,18 +2657,19 @@
#define PIPECONF_INTERLACED_DBL_ILK (4 << 21) /* ilk/snb only */
#define PIPECONF_PFIT_PF_INTERLACED_DBL_ILK (5 << 21) /* ilk/snb only */
#define PIPECONF_CXSR_DOWNCLOCK (1<<16)
-#define PIPECONF_BPP_MASK (0x000000e0)
-#define PIPECONF_BPP_8 (0<<5)
-#define PIPECONF_BPP_10 (1<<5)
-#define PIPECONF_BPP_6 (2<<5)
-#define PIPECONF_BPP_12 (3<<5)
+#define PIPECONF_COLOR_RANGE_SELECT (1 << 13)
+#define PIPECONF_BPC_MASK (0x7 << 5)
+#define PIPECONF_8BPC (0<<5)
+#define PIPECONF_10BPC (1<<5)
+#define PIPECONF_6BPC (2<<5)
+#define PIPECONF_12BPC (3<<5)
#define PIPECONF_DITHER_EN (1<<4)
#define PIPECONF_DITHER_TYPE_MASK (0x0000000c)
#define PIPECONF_DITHER_TYPE_SP (0<<2)
#define PIPECONF_DITHER_TYPE_ST1 (1<<2)
#define PIPECONF_DITHER_TYPE_ST2 (2<<2)
#define PIPECONF_DITHER_TYPE_TEMP (3<<2)
-#define _PIPEASTAT 0x70024
+#define _PIPEASTAT (dev_priv->info->display_mmio_offset + 0x70024)
#define PIPE_FIFO_UNDERRUN_STATUS (1UL<<31)
#define SPRITE1_FLIPDONE_INT_EN_VLV (1UL<<30)
#define PIPE_CRC_ERROR_ENABLE (1UL<<29)
@@ -2693,7 +2680,7 @@
#define PIPE_VSYNC_INTERRUPT_ENABLE (1UL<<25)
#define PIPE_DISPLAY_LINE_COMPARE_ENABLE (1UL<<24)
#define PIPE_DPST_EVENT_ENABLE (1UL<<23)
-#define SPRITE0_FLIP_DONE_INT_EN_VLV (1UL<<26)
+#define SPRITE0_FLIP_DONE_INT_EN_VLV (1UL<<22)
#define PIPE_LEGACY_BLC_EVENT_ENABLE (1UL<<22)
#define PIPE_ODD_FIELD_INTERRUPT_ENABLE (1UL<<21)
#define PIPE_EVEN_FIELD_INTERRUPT_ENABLE (1UL<<20)
@@ -2703,7 +2690,7 @@
#define PIPEA_HBLANK_INT_EN_VLV (1UL<<16)
#define PIPE_OVERLAY_UPDATED_ENABLE (1UL<<16)
#define SPRITE1_FLIPDONE_INT_STATUS_VLV (1UL<<15)
-#define SPRITE0_FLIPDONE_INT_STATUS_VLV (1UL<<15)
+#define SPRITE0_FLIPDONE_INT_STATUS_VLV (1UL<<14)
#define PIPE_CRC_ERROR_INTERRUPT_STATUS (1UL<<13)
#define PIPE_CRC_DONE_INTERRUPT_STATUS (1UL<<12)
#define PIPE_GMBUS_INTERRUPT_STATUS (1UL<<11)
@@ -2719,11 +2706,6 @@
#define PIPE_START_VBLANK_INTERRUPT_STATUS (1UL<<2) /* 965 or later */
#define PIPE_VBLANK_INTERRUPT_STATUS (1UL<<1)
#define PIPE_OVERLAY_UPDATED_STATUS (1UL<<0)
-#define PIPE_BPC_MASK (7 << 5) /* Ironlake */
-#define PIPE_8BPC (0 << 5)
-#define PIPE_10BPC (1 << 5)
-#define PIPE_6BPC (2 << 5)
-#define PIPE_12BPC (3 << 5)
#define PIPESRC(pipe) _PIPE(pipe, _PIPEASRC, _PIPEBSRC)
#define PIPECONF(tran) _TRANSCODER(tran, _PIPEACONF, _PIPEBCONF)
@@ -2732,7 +2714,7 @@
#define PIPEFRAMEPIXEL(pipe) _PIPE(pipe, _PIPEAFRAMEPIXEL, _PIPEBFRAMEPIXEL)
#define PIPESTAT(pipe) _PIPE(pipe, _PIPEASTAT, _PIPEBSTAT)
-#define VLV_DPFLIPSTAT 0x70028
+#define VLV_DPFLIPSTAT (VLV_DISPLAY_BASE + 0x70028)
#define PIPEB_LINE_COMPARE_INT_EN (1<<29)
#define PIPEB_HLINE_INT_EN (1<<28)
#define PIPEB_VBLANK_INT_EN (1<<27)
@@ -2746,7 +2728,7 @@
#define SPRITEA_FLIPDONE_INT_EN (1<<17)
#define PLANEA_FLIPDONE_INT_EN (1<<16)
-#define DPINVGTT 0x7002c /* VLV only */
+#define DPINVGTT (VLV_DISPLAY_BASE + 0x7002c) /* VLV only */
#define CURSORB_INVALID_GTT_INT_EN (1<<23)
#define CURSORA_INVALID_GTT_INT_EN (1<<22)
#define SPRITED_INVALID_GTT_INT_EN (1<<21)
@@ -2774,7 +2756,7 @@
#define DSPARB_BEND_SHIFT 9 /* on 855 */
#define DSPARB_AEND_SHIFT 0
-#define DSPFW1 0x70034
+#define DSPFW1 (dev_priv->info->display_mmio_offset + 0x70034)
#define DSPFW_SR_SHIFT 23
#define DSPFW_SR_MASK (0x1ff<<23)
#define DSPFW_CURSORB_SHIFT 16
@@ -2782,11 +2764,11 @@
#define DSPFW_PLANEB_SHIFT 8
#define DSPFW_PLANEB_MASK (0x7f<<8)
#define DSPFW_PLANEA_MASK (0x7f)
-#define DSPFW2 0x70038
+#define DSPFW2 (dev_priv->info->display_mmio_offset + 0x70038)
#define DSPFW_CURSORA_MASK 0x00003f00
#define DSPFW_CURSORA_SHIFT 8
#define DSPFW_PLANEC_MASK (0x7f)
-#define DSPFW3 0x7003c
+#define DSPFW3 (dev_priv->info->display_mmio_offset + 0x7003c)
#define DSPFW_HPLL_SR_EN (1<<31)
#define DSPFW_CURSOR_SR_SHIFT 24
#define PINEVIEW_SELF_REFRESH_EN (1<<30)
@@ -2798,13 +2780,13 @@
/* drain latency register values*/
#define DRAIN_LATENCY_PRECISION_32 32
#define DRAIN_LATENCY_PRECISION_16 16
-#define VLV_DDL1 0x70050
+#define VLV_DDL1 (VLV_DISPLAY_BASE + 0x70050)
#define DDL_CURSORA_PRECISION_32 (1<<31)
#define DDL_CURSORA_PRECISION_16 (0<<31)
#define DDL_CURSORA_SHIFT 24
#define DDL_PLANEA_PRECISION_32 (1<<7)
#define DDL_PLANEA_PRECISION_16 (0<<7)
-#define VLV_DDL2 0x70054
+#define VLV_DDL2 (VLV_DISPLAY_BASE + 0x70054)
#define DDL_CURSORB_PRECISION_32 (1<<31)
#define DDL_CURSORB_PRECISION_16 (0<<31)
#define DDL_CURSORB_SHIFT 24
@@ -2948,10 +2930,10 @@
* } while (high1 != high2);
* frame = (high1 << 8) | low1;
*/
-#define _PIPEAFRAMEHIGH 0x70040
+#define _PIPEAFRAMEHIGH (dev_priv->info->display_mmio_offset + 0x70040)
#define PIPE_FRAME_HIGH_MASK 0x0000ffff
#define PIPE_FRAME_HIGH_SHIFT 0
-#define _PIPEAFRAMEPIXEL 0x70044
+#define _PIPEAFRAMEPIXEL (dev_priv->info->display_mmio_offset + 0x70044)
#define PIPE_FRAME_LOW_MASK 0xff000000
#define PIPE_FRAME_LOW_SHIFT 24
#define PIPE_PIXEL_MASK 0x00ffffff
@@ -2962,11 +2944,12 @@
#define PIPE_FRMCOUNT_GM45(pipe) _PIPE(pipe, _PIPEA_FRMCOUNT_GM45, _PIPEB_FRMCOUNT_GM45)
/* Cursor A & B regs */
-#define _CURACNTR 0x70080
+#define _CURACNTR (dev_priv->info->display_mmio_offset + 0x70080)
/* Old style CUR*CNTR flags (desktop 8xx) */
#define CURSOR_ENABLE 0x80000000
#define CURSOR_GAMMA_ENABLE 0x40000000
#define CURSOR_STRIDE_MASK 0x30000000
+#define CURSOR_PIPE_CSC_ENABLE (1<<24)
#define CURSOR_FORMAT_SHIFT 24
#define CURSOR_FORMAT_MASK (0x07 << CURSOR_FORMAT_SHIFT)
#define CURSOR_FORMAT_2C (0x00 << CURSOR_FORMAT_SHIFT)
@@ -2983,16 +2966,16 @@
#define MCURSOR_PIPE_A 0x00
#define MCURSOR_PIPE_B (1 << 28)
#define MCURSOR_GAMMA_ENABLE (1 << 26)
-#define _CURABASE 0x70084
-#define _CURAPOS 0x70088
+#define _CURABASE (dev_priv->info->display_mmio_offset + 0x70084)
+#define _CURAPOS (dev_priv->info->display_mmio_offset + 0x70088)
#define CURSOR_POS_MASK 0x007FF
#define CURSOR_POS_SIGN 0x8000
#define CURSOR_X_SHIFT 0
#define CURSOR_Y_SHIFT 16
#define CURSIZE 0x700a0
-#define _CURBCNTR 0x700c0
-#define _CURBBASE 0x700c4
-#define _CURBPOS 0x700c8
+#define _CURBCNTR (dev_priv->info->display_mmio_offset + 0x700c0)
+#define _CURBBASE (dev_priv->info->display_mmio_offset + 0x700c4)
+#define _CURBPOS (dev_priv->info->display_mmio_offset + 0x700c8)
#define _CURBCNTR_IVB 0x71080
#define _CURBBASE_IVB 0x71084
@@ -3007,7 +2990,7 @@
#define CURPOS_IVB(pipe) _PIPE(pipe, _CURAPOS, _CURBPOS_IVB)
/* Display A control */
-#define _DSPACNTR 0x70180
+#define _DSPACNTR (dev_priv->info->display_mmio_offset + 0x70180)
#define DISPLAY_PLANE_ENABLE (1<<31)
#define DISPLAY_PLANE_DISABLE 0
#define DISPPLANE_GAMMA_ENABLE (1<<30)
@@ -3028,6 +3011,7 @@
#define DISPPLANE_RGBA888 (0xf<<26)
#define DISPPLANE_STEREO_ENABLE (1<<25)
#define DISPPLANE_STEREO_DISABLE 0
+#define DISPPLANE_PIPE_CSC_ENABLE (1<<24)
#define DISPPLANE_SEL_PIPE_SHIFT 24
#define DISPPLANE_SEL_PIPE_MASK (3<<DISPPLANE_SEL_PIPE_SHIFT)
#define DISPPLANE_SEL_PIPE_A 0
@@ -3040,14 +3024,14 @@
#define DISPPLANE_STEREO_POLARITY_SECOND (1<<18)
#define DISPPLANE_TRICKLE_FEED_DISABLE (1<<14) /* Ironlake */
#define DISPPLANE_TILED (1<<10)
-#define _DSPAADDR 0x70184
-#define _DSPASTRIDE 0x70188
-#define _DSPAPOS 0x7018C /* reserved */
-#define _DSPASIZE 0x70190
-#define _DSPASURF 0x7019C /* 965+ only */
-#define _DSPATILEOFF 0x701A4 /* 965+ only */
-#define _DSPAOFFSET 0x701A4 /* HSW */
-#define _DSPASURFLIVE 0x701AC
+#define _DSPAADDR (dev_priv->info->display_mmio_offset + 0x70184)
+#define _DSPASTRIDE (dev_priv->info->display_mmio_offset + 0x70188)
+#define _DSPAPOS (dev_priv->info->display_mmio_offset + 0x7018C) /* reserved */
+#define _DSPASIZE (dev_priv->info->display_mmio_offset + 0x70190)
+#define _DSPASURF (dev_priv->info->display_mmio_offset + 0x7019C) /* 965+ only */
+#define _DSPATILEOFF (dev_priv->info->display_mmio_offset + 0x701A4) /* 965+ only */
+#define _DSPAOFFSET (dev_priv->info->display_mmio_offset + 0x701A4) /* HSW */
+#define _DSPASURFLIVE (dev_priv->info->display_mmio_offset + 0x701AC)
#define DSPCNTR(plane) _PIPE(plane, _DSPACNTR, _DSPBCNTR)
#define DSPADDR(plane) _PIPE(plane, _DSPAADDR, _DSPBADDR)
@@ -3068,44 +3052,44 @@
(I915_WRITE((reg), (gfx_addr) | I915_LO_DISPBASE(I915_READ(reg))))
/* VBIOS flags */
-#define SWF00 0x71410
-#define SWF01 0x71414
-#define SWF02 0x71418
-#define SWF03 0x7141c
-#define SWF04 0x71420
-#define SWF05 0x71424
-#define SWF06 0x71428
-#define SWF10 0x70410
-#define SWF11 0x70414
-#define SWF14 0x71420
-#define SWF30 0x72414
-#define SWF31 0x72418
-#define SWF32 0x7241c
+#define SWF00 (dev_priv->info->display_mmio_offset + 0x71410)
+#define SWF01 (dev_priv->info->display_mmio_offset + 0x71414)
+#define SWF02 (dev_priv->info->display_mmio_offset + 0x71418)
+#define SWF03 (dev_priv->info->display_mmio_offset + 0x7141c)
+#define SWF04 (dev_priv->info->display_mmio_offset + 0x71420)
+#define SWF05 (dev_priv->info->display_mmio_offset + 0x71424)
+#define SWF06 (dev_priv->info->display_mmio_offset + 0x71428)
+#define SWF10 (dev_priv->info->display_mmio_offset + 0x70410)
+#define SWF11 (dev_priv->info->display_mmio_offset + 0x70414)
+#define SWF14 (dev_priv->info->display_mmio_offset + 0x71420)
+#define SWF30 (dev_priv->info->display_mmio_offset + 0x72414)
+#define SWF31 (dev_priv->info->display_mmio_offset + 0x72418)
+#define SWF32 (dev_priv->info->display_mmio_offset + 0x7241c)
/* Pipe B */
-#define _PIPEBDSL 0x71000
-#define _PIPEBCONF 0x71008
-#define _PIPEBSTAT 0x71024
-#define _PIPEBFRAMEHIGH 0x71040
-#define _PIPEBFRAMEPIXEL 0x71044
+#define _PIPEBDSL (dev_priv->info->display_mmio_offset + 0x71000)
+#define _PIPEBCONF (dev_priv->info->display_mmio_offset + 0x71008)
+#define _PIPEBSTAT (dev_priv->info->display_mmio_offset + 0x71024)
+#define _PIPEBFRAMEHIGH (dev_priv->info->display_mmio_offset + 0x71040)
+#define _PIPEBFRAMEPIXEL (dev_priv->info->display_mmio_offset + 0x71044)
#define _PIPEB_FRMCOUNT_GM45 0x71040
#define _PIPEB_FLIPCOUNT_GM45 0x71044
/* Display B control */
-#define _DSPBCNTR 0x71180
+#define _DSPBCNTR (dev_priv->info->display_mmio_offset + 0x71180)
#define DISPPLANE_ALPHA_TRANS_ENABLE (1<<15)
#define DISPPLANE_ALPHA_TRANS_DISABLE 0
#define DISPPLANE_SPRITE_ABOVE_DISPLAY 0
#define DISPPLANE_SPRITE_ABOVE_OVERLAY (1)
-#define _DSPBADDR 0x71184
-#define _DSPBSTRIDE 0x71188
-#define _DSPBPOS 0x7118C
-#define _DSPBSIZE 0x71190
-#define _DSPBSURF 0x7119C
-#define _DSPBTILEOFF 0x711A4
-#define _DSPBOFFSET 0x711A4
-#define _DSPBSURFLIVE 0x711AC
+#define _DSPBADDR (dev_priv->info->display_mmio_offset + 0x71184)
+#define _DSPBSTRIDE (dev_priv->info->display_mmio_offset + 0x71188)
+#define _DSPBPOS (dev_priv->info->display_mmio_offset + 0x7118C)
+#define _DSPBSIZE (dev_priv->info->display_mmio_offset + 0x71190)
+#define _DSPBSURF (dev_priv->info->display_mmio_offset + 0x7119C)
+#define _DSPBTILEOFF (dev_priv->info->display_mmio_offset + 0x711A4)
+#define _DSPBOFFSET (dev_priv->info->display_mmio_offset + 0x711A4)
+#define _DSPBSURFLIVE (dev_priv->info->display_mmio_offset + 0x711AC)
/* Sprite A control */
#define _DVSACNTR 0x72180
@@ -3116,6 +3100,7 @@
#define DVS_FORMAT_RGBX101010 (1<<25)
#define DVS_FORMAT_RGBX888 (2<<25)
#define DVS_FORMAT_RGBX161616 (3<<25)
+#define DVS_PIPE_CSC_ENABLE (1<<24)
#define DVS_SOURCE_KEY (1<<22)
#define DVS_RGB_ORDER_XBGR (1<<20)
#define DVS_YUV_BYTE_ORDER_MASK (3<<16)
@@ -3183,7 +3168,7 @@
#define SPRITE_FORMAT_RGBX161616 (3<<25)
#define SPRITE_FORMAT_YUV444 (4<<25)
#define SPRITE_FORMAT_XR_BGR101010 (5<<25) /* Extended range */
-#define SPRITE_CSC_ENABLE (1<<24)
+#define SPRITE_PIPE_CSC_ENABLE (1<<24)
#define SPRITE_SOURCE_KEY (1<<22)
#define SPRITE_RGB_ORDER_RGBX (1<<20) /* only for 888 and 161616 */
#define SPRITE_YUV_TO_RGB_CSC_DISABLE (1<<19)
@@ -3254,6 +3239,8 @@
# define VGA_2X_MODE (1 << 30)
# define VGA_PIPE_B_SELECT (1 << 29)
+#define VLV_VGACNTRL (VLV_DISPLAY_BASE + 0x71400)
+
/* Ironlake */
#define CPU_VGACNTRL 0x41000
@@ -3294,41 +3281,41 @@
#define FDI_PLL_FREQ_DISABLE_COUNT_LIMIT_MASK 0xff
-#define _PIPEA_DATA_M1 0x60030
+#define _PIPEA_DATA_M1 (dev_priv->info->display_mmio_offset + 0x60030)
#define TU_SIZE(x) (((x)-1) << 25) /* default size 64 */
#define TU_SIZE_MASK 0x7e000000
#define PIPE_DATA_M1_OFFSET 0
-#define _PIPEA_DATA_N1 0x60034
+#define _PIPEA_DATA_N1 (dev_priv->info->display_mmio_offset + 0x60034)
#define PIPE_DATA_N1_OFFSET 0
-#define _PIPEA_DATA_M2 0x60038
+#define _PIPEA_DATA_M2 (dev_priv->info->display_mmio_offset + 0x60038)
#define PIPE_DATA_M2_OFFSET 0
-#define _PIPEA_DATA_N2 0x6003c
+#define _PIPEA_DATA_N2 (dev_priv->info->display_mmio_offset + 0x6003c)
#define PIPE_DATA_N2_OFFSET 0
-#define _PIPEA_LINK_M1 0x60040
+#define _PIPEA_LINK_M1 (dev_priv->info->display_mmio_offset + 0x60040)
#define PIPE_LINK_M1_OFFSET 0
-#define _PIPEA_LINK_N1 0x60044
+#define _PIPEA_LINK_N1 (dev_priv->info->display_mmio_offset + 0x60044)
#define PIPE_LINK_N1_OFFSET 0
-#define _PIPEA_LINK_M2 0x60048
+#define _PIPEA_LINK_M2 (dev_priv->info->display_mmio_offset + 0x60048)
#define PIPE_LINK_M2_OFFSET 0
-#define _PIPEA_LINK_N2 0x6004c
+#define _PIPEA_LINK_N2 (dev_priv->info->display_mmio_offset + 0x6004c)
#define PIPE_LINK_N2_OFFSET 0
/* PIPEB timing regs are same start from 0x61000 */
-#define _PIPEB_DATA_M1 0x61030
-#define _PIPEB_DATA_N1 0x61034
+#define _PIPEB_DATA_M1 (dev_priv->info->display_mmio_offset + 0x61030)
+#define _PIPEB_DATA_N1 (dev_priv->info->display_mmio_offset + 0x61034)
-#define _PIPEB_DATA_M2 0x61038
-#define _PIPEB_DATA_N2 0x6103c
+#define _PIPEB_DATA_M2 (dev_priv->info->display_mmio_offset + 0x61038)
+#define _PIPEB_DATA_N2 (dev_priv->info->display_mmio_offset + 0x6103c)
-#define _PIPEB_LINK_M1 0x61040
-#define _PIPEB_LINK_N1 0x61044
+#define _PIPEB_LINK_M1 (dev_priv->info->display_mmio_offset + 0x61040)
+#define _PIPEB_LINK_N1 (dev_priv->info->display_mmio_offset + 0x61044)
-#define _PIPEB_LINK_M2 0x61048
-#define _PIPEB_LINK_N2 0x6104c
+#define _PIPEB_LINK_M2 (dev_priv->info->display_mmio_offset + 0x61048)
+#define _PIPEB_LINK_N2 (dev_priv->info->display_mmio_offset + 0x6104c)
#define PIPE_DATA_M1(tran) _TRANSCODER(tran, _PIPEA_DATA_M1, _PIPEB_DATA_M1)
#define PIPE_DATA_N1(tran) _TRANSCODER(tran, _PIPEA_DATA_N1, _PIPEB_DATA_N1)
@@ -3581,27 +3568,30 @@
#define PORTD_PULSE_DURATION_6ms (2 << 18)
#define PORTD_PULSE_DURATION_100ms (3 << 18)
#define PORTD_PULSE_DURATION_MASK (3 << 18)
-#define PORTD_HOTPLUG_NO_DETECT (0)
-#define PORTD_HOTPLUG_SHORT_DETECT (1 << 16)
-#define PORTD_HOTPLUG_LONG_DETECT (1 << 17)
+#define PORTD_HOTPLUG_STATUS_MASK (0x3 << 16)
+#define PORTD_HOTPLUG_NO_DETECT (0 << 16)
+#define PORTD_HOTPLUG_SHORT_DETECT (1 << 16)
+#define PORTD_HOTPLUG_LONG_DETECT (2 << 16)
#define PORTC_HOTPLUG_ENABLE (1 << 12)
#define PORTC_PULSE_DURATION_2ms (0)
#define PORTC_PULSE_DURATION_4_5ms (1 << 10)
#define PORTC_PULSE_DURATION_6ms (2 << 10)
#define PORTC_PULSE_DURATION_100ms (3 << 10)
#define PORTC_PULSE_DURATION_MASK (3 << 10)
-#define PORTC_HOTPLUG_NO_DETECT (0)
-#define PORTC_HOTPLUG_SHORT_DETECT (1 << 8)
-#define PORTC_HOTPLUG_LONG_DETECT (1 << 9)
+#define PORTC_HOTPLUG_STATUS_MASK (0x3 << 8)
+#define PORTC_HOTPLUG_NO_DETECT (0 << 8)
+#define PORTC_HOTPLUG_SHORT_DETECT (1 << 8)
+#define PORTC_HOTPLUG_LONG_DETECT (2 << 8)
#define PORTB_HOTPLUG_ENABLE (1 << 4)
#define PORTB_PULSE_DURATION_2ms (0)
#define PORTB_PULSE_DURATION_4_5ms (1 << 2)
#define PORTB_PULSE_DURATION_6ms (2 << 2)
#define PORTB_PULSE_DURATION_100ms (3 << 2)
#define PORTB_PULSE_DURATION_MASK (3 << 2)
-#define PORTB_HOTPLUG_NO_DETECT (0)
-#define PORTB_HOTPLUG_SHORT_DETECT (1 << 0)
-#define PORTB_HOTPLUG_LONG_DETECT (1 << 1)
+#define PORTB_HOTPLUG_STATUS_MASK (0x3 << 0)
+#define PORTB_HOTPLUG_NO_DETECT (0 << 0)
+#define PORTB_HOTPLUG_SHORT_DETECT (1 << 0)
+#define PORTB_HOTPLUG_LONG_DETECT (2 << 0)
#define PCH_GPIOA 0xc5010
#define PCH_GPIOB 0xc5014
@@ -3722,13 +3712,13 @@
#define TVIDEO_DIP_DATA(pipe) _PIPE(pipe, _VIDEO_DIP_DATA_A, _VIDEO_DIP_DATA_B)
#define TVIDEO_DIP_GCP(pipe) _PIPE(pipe, _VIDEO_DIP_GCP_A, _VIDEO_DIP_GCP_B)
-#define VLV_VIDEO_DIP_CTL_A 0x60200
-#define VLV_VIDEO_DIP_DATA_A 0x60208
-#define VLV_VIDEO_DIP_GDCP_PAYLOAD_A 0x60210
+#define VLV_VIDEO_DIP_CTL_A (VLV_DISPLAY_BASE + 0x60200)
+#define VLV_VIDEO_DIP_DATA_A (VLV_DISPLAY_BASE + 0x60208)
+#define VLV_VIDEO_DIP_GDCP_PAYLOAD_A (VLV_DISPLAY_BASE + 0x60210)
-#define VLV_VIDEO_DIP_CTL_B 0x61170
-#define VLV_VIDEO_DIP_DATA_B 0x61174
-#define VLV_VIDEO_DIP_GDCP_PAYLOAD_B 0x61178
+#define VLV_VIDEO_DIP_CTL_B (VLV_DISPLAY_BASE + 0x61170)
+#define VLV_VIDEO_DIP_DATA_B (VLV_DISPLAY_BASE + 0x61174)
+#define VLV_VIDEO_DIP_GDCP_PAYLOAD_B (VLV_DISPLAY_BASE + 0x61178)
#define VLV_TVIDEO_DIP_CTL(pipe) \
_PIPE(pipe, VLV_VIDEO_DIP_CTL_A, VLV_VIDEO_DIP_CTL_B)
@@ -3820,8 +3810,6 @@
#define TRANS_FSYNC_DELAY_HB2 (1<<27)
#define TRANS_FSYNC_DELAY_HB3 (2<<27)
#define TRANS_FSYNC_DELAY_HB4 (3<<27)
-#define TRANS_DP_AUDIO_ONLY (1<<26)
-#define TRANS_DP_VIDEO_AUDIO (0<<26)
#define TRANS_INTERLACE_MASK (7<<21)
#define TRANS_PROGRESSIVE (0<<21)
#define TRANS_INTERLACED (3<<21)
@@ -3927,7 +3915,7 @@
#define FDI_10BPC (1<<16)
#define FDI_6BPC (2<<16)
#define FDI_12BPC (3<<16)
-#define FDI_LINK_REVERSE_OVERWRITE (1<<15)
+#define FDI_RX_LINK_REVERSAL_OVERRIDE (1<<15)
#define FDI_DMI_LINK_REVERSE_MASK (1<<14)
#define FDI_RX_PLL_ENABLE (1<<13)
#define FDI_FS_ERR_CORRECT_ENABLE (1<<11)
@@ -4020,17 +4008,17 @@
#define LVDS_DETECTED (1 << 1)
/* vlv has 2 sets of panel control regs. */
-#define PIPEA_PP_STATUS 0x61200
-#define PIPEA_PP_CONTROL 0x61204
-#define PIPEA_PP_ON_DELAYS 0x61208
-#define PIPEA_PP_OFF_DELAYS 0x6120c
-#define PIPEA_PP_DIVISOR 0x61210
-
-#define PIPEB_PP_STATUS 0x61300
-#define PIPEB_PP_CONTROL 0x61304
-#define PIPEB_PP_ON_DELAYS 0x61308
-#define PIPEB_PP_OFF_DELAYS 0x6130c
-#define PIPEB_PP_DIVISOR 0x61310
+#define PIPEA_PP_STATUS (VLV_DISPLAY_BASE + 0x61200)
+#define PIPEA_PP_CONTROL (VLV_DISPLAY_BASE + 0x61204)
+#define PIPEA_PP_ON_DELAYS (VLV_DISPLAY_BASE + 0x61208)
+#define PIPEA_PP_OFF_DELAYS (VLV_DISPLAY_BASE + 0x6120c)
+#define PIPEA_PP_DIVISOR (VLV_DISPLAY_BASE + 0x61210)
+
+#define PIPEB_PP_STATUS (VLV_DISPLAY_BASE + 0x61300)
+#define PIPEB_PP_CONTROL (VLV_DISPLAY_BASE + 0x61304)
+#define PIPEB_PP_ON_DELAYS (VLV_DISPLAY_BASE + 0x61308)
+#define PIPEB_PP_OFF_DELAYS (VLV_DISPLAY_BASE + 0x6130c)
+#define PIPEB_PP_DIVISOR (VLV_DISPLAY_BASE + 0x61310)
#define PCH_PP_STATUS 0xc7200
#define PCH_PP_CONTROL 0xc7204
@@ -4211,7 +4199,9 @@
#define GEN6_RP_INTERRUPT_LIMITS 0xA014
#define GEN6_RPSTAT1 0xA01C
#define GEN6_CAGF_SHIFT 8
+#define HSW_CAGF_SHIFT 7
#define GEN6_CAGF_MASK (0x7f << GEN6_CAGF_SHIFT)
+#define HSW_CAGF_MASK (0x7f << HSW_CAGF_SHIFT)
#define GEN6_RP_CONTROL 0xA024
#define GEN6_RP_MEDIA_TURBO (1<<11)
#define GEN6_RP_MEDIA_MODE_MASK (3<<9)
@@ -4280,8 +4270,8 @@
#define GEN6_PCODE_READ_MIN_FREQ_TABLE 0x9
#define GEN6_PCODE_WRITE_RC6VIDS 0x4
#define GEN6_PCODE_READ_RC6VIDS 0x5
-#define GEN6_ENCODE_RC6_VID(mv) (((mv) / 5) - 245) < 0 ?: 0
-#define GEN6_DECODE_RC6_VID(vids) (((vids) * 5) > 0 ? ((vids) * 5) + 245 : 0)
+#define GEN6_ENCODE_RC6_VID(mv) (((mv) - 245) / 5)
+#define GEN6_DECODE_RC6_VID(vids) (((vids) * 5) + 245)
#define GEN6_PCODE_DATA 0x138128
#define GEN6_PCODE_FREQ_IA_RATIO_SHIFT 8
@@ -4322,7 +4312,7 @@
#define GEN7_ROW_CHICKEN2_GT2 0xf4f4
#define DOP_CLOCK_GATING_DISABLE (1<<0)
-#define G4X_AUD_VID_DID 0x62020
+#define G4X_AUD_VID_DID (dev_priv->info->display_mmio_offset + 0x62020)
#define INTEL_AUDIO_DEVCL 0x808629FB
#define INTEL_AUDIO_DEVBLC 0x80862801
#define INTEL_AUDIO_DEVCTG 0x80862802
@@ -4438,10 +4428,10 @@
#define AUDIO_CP_READY_C (1<<9)
/* HSW Power Wells */
-#define HSW_PWR_WELL_CTL1 0x45400 /* BIOS */
-#define HSW_PWR_WELL_CTL2 0x45404 /* Driver */
-#define HSW_PWR_WELL_CTL3 0x45408 /* KVMR */
-#define HSW_PWR_WELL_CTL4 0x4540C /* Debug */
+#define HSW_PWR_WELL_BIOS 0x45400 /* CTL1 */
+#define HSW_PWR_WELL_DRIVER 0x45404 /* CTL2 */
+#define HSW_PWR_WELL_KVMR 0x45408 /* CTL3 */
+#define HSW_PWR_WELL_DEBUG 0x4540C /* CTL4 */
#define HSW_PWR_WELL_ENABLE (1<<31)
#define HSW_PWR_WELL_STATE (1<<30)
#define HSW_PWR_WELL_CTL5 0x45410
@@ -4524,6 +4514,7 @@
#define DDI_BUF_EMP_800MV_0DB_HSW (7<<24) /* Sel7 */
#define DDI_BUF_EMP_800MV_3_5DB_HSW (8<<24) /* Sel8 */
#define DDI_BUF_EMP_MASK (0xf<<24)
+#define DDI_BUF_PORT_REVERSAL (1<<16)
#define DDI_BUF_IS_IDLE (1<<7)
#define DDI_A_4_LANES (1<<4)
#define DDI_PORT_WIDTH_X1 (0<<1)
@@ -4657,4 +4648,51 @@
#define WM_DBG_DISALLOW_MAXFIFO (1<<1)
#define WM_DBG_DISALLOW_SPRITE (1<<2)
+/* pipe CSC */
+#define _PIPE_A_CSC_COEFF_RY_GY 0x49010
+#define _PIPE_A_CSC_COEFF_BY 0x49014
+#define _PIPE_A_CSC_COEFF_RU_GU 0x49018
+#define _PIPE_A_CSC_COEFF_BU 0x4901c
+#define _PIPE_A_CSC_COEFF_RV_GV 0x49020
+#define _PIPE_A_CSC_COEFF_BV 0x49024
+#define _PIPE_A_CSC_MODE 0x49028
+#define _PIPE_A_CSC_PREOFF_HI 0x49030
+#define _PIPE_A_CSC_PREOFF_ME 0x49034
+#define _PIPE_A_CSC_PREOFF_LO 0x49038
+#define _PIPE_A_CSC_POSTOFF_HI 0x49040
+#define _PIPE_A_CSC_POSTOFF_ME 0x49044
+#define _PIPE_A_CSC_POSTOFF_LO 0x49048
+
+#define _PIPE_B_CSC_COEFF_RY_GY 0x49110
+#define _PIPE_B_CSC_COEFF_BY 0x49114
+#define _PIPE_B_CSC_COEFF_RU_GU 0x49118
+#define _PIPE_B_CSC_COEFF_BU 0x4911c
+#define _PIPE_B_CSC_COEFF_RV_GV 0x49120
+#define _PIPE_B_CSC_COEFF_BV 0x49124
+#define _PIPE_B_CSC_MODE 0x49128
+#define _PIPE_B_CSC_PREOFF_HI 0x49130
+#define _PIPE_B_CSC_PREOFF_ME 0x49134
+#define _PIPE_B_CSC_PREOFF_LO 0x49138
+#define _PIPE_B_CSC_POSTOFF_HI 0x49140
+#define _PIPE_B_CSC_POSTOFF_ME 0x49144
+#define _PIPE_B_CSC_POSTOFF_LO 0x49148
+
+#define CSC_BLACK_SCREEN_OFFSET (1 << 2)
+#define CSC_POSITION_BEFORE_GAMMA (1 << 1)
+#define CSC_MODE_YUV_TO_RGB (1 << 0)
+
+#define PIPE_CSC_COEFF_RY_GY(pipe) _PIPE(pipe, _PIPE_A_CSC_COEFF_RY_GY, _PIPE_B_CSC_COEFF_RY_GY)
+#define PIPE_CSC_COEFF_BY(pipe) _PIPE(pipe, _PIPE_A_CSC_COEFF_BY, _PIPE_B_CSC_COEFF_BY)
+#define PIPE_CSC_COEFF_RU_GU(pipe) _PIPE(pipe, _PIPE_A_CSC_COEFF_RU_GU, _PIPE_B_CSC_COEFF_RU_GU)
+#define PIPE_CSC_COEFF_BU(pipe) _PIPE(pipe, _PIPE_A_CSC_COEFF_BU, _PIPE_B_CSC_COEFF_BU)
+#define PIPE_CSC_COEFF_RV_GV(pipe) _PIPE(pipe, _PIPE_A_CSC_COEFF_RV_GV, _PIPE_B_CSC_COEFF_RV_GV)
+#define PIPE_CSC_COEFF_BV(pipe) _PIPE(pipe, _PIPE_A_CSC_COEFF_BV, _PIPE_B_CSC_COEFF_BV)
+#define PIPE_CSC_MODE(pipe) _PIPE(pipe, _PIPE_A_CSC_MODE, _PIPE_B_CSC_MODE)
+#define PIPE_CSC_PREOFF_HI(pipe) _PIPE(pipe, _PIPE_A_CSC_PREOFF_HI, _PIPE_B_CSC_PREOFF_HI)
+#define PIPE_CSC_PREOFF_ME(pipe) _PIPE(pipe, _PIPE_A_CSC_PREOFF_ME, _PIPE_B_CSC_PREOFF_ME)
+#define PIPE_CSC_PREOFF_LO(pipe) _PIPE(pipe, _PIPE_A_CSC_PREOFF_LO, _PIPE_B_CSC_PREOFF_LO)
+#define PIPE_CSC_POSTOFF_HI(pipe) _PIPE(pipe, _PIPE_A_CSC_POSTOFF_HI, _PIPE_B_CSC_POSTOFF_HI)
+#define PIPE_CSC_POSTOFF_ME(pipe) _PIPE(pipe, _PIPE_A_CSC_POSTOFF_ME, _PIPE_B_CSC_POSTOFF_ME)
+#define PIPE_CSC_POSTOFF_LO(pipe) _PIPE(pipe, _PIPE_A_CSC_POSTOFF_LO, _PIPE_B_CSC_POSTOFF_LO)
+
#endif /* _I915_REG_H_ */
diff --git a/drivers/gpu/drm/i915/i915_suspend.c b/drivers/gpu/drm/i915/i915_suspend.c
index 63d4d30c39de..2135f21ea458 100644
--- a/drivers/gpu/drm/i915/i915_suspend.c
+++ b/drivers/gpu/drm/i915/i915_suspend.c
@@ -29,67 +29,6 @@
#include "intel_drv.h"
#include "i915_reg.h"
-static bool i915_pipe_enabled(struct drm_device *dev, enum pipe pipe)
-{
- struct drm_i915_private *dev_priv = dev->dev_private;
- u32 dpll_reg;
-
- /* On IVB, 3rd pipe shares PLL with another one */
- if (pipe > 1)
- return false;
-
- if (HAS_PCH_SPLIT(dev))
- dpll_reg = _PCH_DPLL(pipe);
- else
- dpll_reg = (pipe == PIPE_A) ? _DPLL_A : _DPLL_B;
-
- return (I915_READ(dpll_reg) & DPLL_VCO_ENABLE);
-}
-
-static void i915_save_palette(struct drm_device *dev, enum pipe pipe)
-{
- struct drm_i915_private *dev_priv = dev->dev_private;
- unsigned long reg = (pipe == PIPE_A ? _PALETTE_A : _PALETTE_B);
- u32 *array;
- int i;
-
- if (!i915_pipe_enabled(dev, pipe))
- return;
-
- if (HAS_PCH_SPLIT(dev))
- reg = (pipe == PIPE_A) ? _LGC_PALETTE_A : _LGC_PALETTE_B;
-
- if (pipe == PIPE_A)
- array = dev_priv->regfile.save_palette_a;
- else
- array = dev_priv->regfile.save_palette_b;
-
- for (i = 0; i < 256; i++)
- array[i] = I915_READ(reg + (i << 2));
-}
-
-static void i915_restore_palette(struct drm_device *dev, enum pipe pipe)
-{
- struct drm_i915_private *dev_priv = dev->dev_private;
- unsigned long reg = (pipe == PIPE_A ? _PALETTE_A : _PALETTE_B);
- u32 *array;
- int i;
-
- if (!i915_pipe_enabled(dev, pipe))
- return;
-
- if (HAS_PCH_SPLIT(dev))
- reg = (pipe == PIPE_A) ? _LGC_PALETTE_A : _LGC_PALETTE_B;
-
- if (pipe == PIPE_A)
- array = dev_priv->regfile.save_palette_a;
- else
- array = dev_priv->regfile.save_palette_b;
-
- for (i = 0; i < 256; i++)
- I915_WRITE(reg + (i << 2), array[i]);
-}
-
static u8 i915_read_indexed(struct drm_device *dev, u16 index_port, u16 data_port, u8 reg)
{
struct drm_i915_private *dev_priv = dev->dev_private;
@@ -130,6 +69,12 @@ static void i915_save_vga(struct drm_device *dev)
int i;
u16 cr_index, cr_data, st01;
+ /* VGA state */
+ dev_priv->regfile.saveVGA0 = I915_READ(VGA0);
+ dev_priv->regfile.saveVGA1 = I915_READ(VGA1);
+ dev_priv->regfile.saveVGA_PD = I915_READ(VGA_PD);
+ dev_priv->regfile.saveVGACNTRL = I915_READ(i915_vgacntrl_reg(dev));
+
/* VGA color palette registers */
dev_priv->regfile.saveDACMASK = I915_READ8(VGA_DACMASK);
@@ -188,6 +133,15 @@ static void i915_restore_vga(struct drm_device *dev)
int i;
u16 cr_index, cr_data, st01;
+ /* VGA state */
+ I915_WRITE(i915_vgacntrl_reg(dev), dev_priv->regfile.saveVGACNTRL);
+
+ I915_WRITE(VGA0, dev_priv->regfile.saveVGA0);
+ I915_WRITE(VGA1, dev_priv->regfile.saveVGA1);
+ I915_WRITE(VGA_PD, dev_priv->regfile.saveVGA_PD);
+ POSTING_READ(VGA_PD);
+ udelay(150);
+
/* MSR bits */
I915_WRITE8(VGA_MSR_WRITE, dev_priv->regfile.saveMSR);
if (dev_priv->regfile.saveMSR & VGA_MSR_CGA_MODE) {
@@ -235,396 +189,18 @@ static void i915_restore_vga(struct drm_device *dev)
I915_WRITE8(VGA_DACMASK, dev_priv->regfile.saveDACMASK);
}
-static void i915_save_modeset_reg(struct drm_device *dev)
-{
- struct drm_i915_private *dev_priv = dev->dev_private;
- int i;
-
- if (drm_core_check_feature(dev, DRIVER_MODESET))
- return;
-
- /* Cursor state */
- dev_priv->regfile.saveCURACNTR = I915_READ(_CURACNTR);
- dev_priv->regfile.saveCURAPOS = I915_READ(_CURAPOS);
- dev_priv->regfile.saveCURABASE = I915_READ(_CURABASE);
- dev_priv->regfile.saveCURBCNTR = I915_READ(_CURBCNTR);
- dev_priv->regfile.saveCURBPOS = I915_READ(_CURBPOS);
- dev_priv->regfile.saveCURBBASE = I915_READ(_CURBBASE);
- if (IS_GEN2(dev))
- dev_priv->regfile.saveCURSIZE = I915_READ(CURSIZE);
-
- if (HAS_PCH_SPLIT(dev)) {
- dev_priv->regfile.savePCH_DREF_CONTROL = I915_READ(PCH_DREF_CONTROL);
- dev_priv->regfile.saveDISP_ARB_CTL = I915_READ(DISP_ARB_CTL);
- }
-
- /* Pipe & plane A info */
- dev_priv->regfile.savePIPEACONF = I915_READ(_PIPEACONF);
- dev_priv->regfile.savePIPEASRC = I915_READ(_PIPEASRC);
- if (HAS_PCH_SPLIT(dev)) {
- dev_priv->regfile.saveFPA0 = I915_READ(_PCH_FPA0);
- dev_priv->regfile.saveFPA1 = I915_READ(_PCH_FPA1);
- dev_priv->regfile.saveDPLL_A = I915_READ(_PCH_DPLL_A);
- } else {
- dev_priv->regfile.saveFPA0 = I915_READ(_FPA0);
- dev_priv->regfile.saveFPA1 = I915_READ(_FPA1);
- dev_priv->regfile.saveDPLL_A = I915_READ(_DPLL_A);
- }
- if (INTEL_INFO(dev)->gen >= 4 && !HAS_PCH_SPLIT(dev))
- dev_priv->regfile.saveDPLL_A_MD = I915_READ(_DPLL_A_MD);
- dev_priv->regfile.saveHTOTAL_A = I915_READ(_HTOTAL_A);
- dev_priv->regfile.saveHBLANK_A = I915_READ(_HBLANK_A);
- dev_priv->regfile.saveHSYNC_A = I915_READ(_HSYNC_A);
- dev_priv->regfile.saveVTOTAL_A = I915_READ(_VTOTAL_A);
- dev_priv->regfile.saveVBLANK_A = I915_READ(_VBLANK_A);
- dev_priv->regfile.saveVSYNC_A = I915_READ(_VSYNC_A);
- if (!HAS_PCH_SPLIT(dev))
- dev_priv->regfile.saveBCLRPAT_A = I915_READ(_BCLRPAT_A);
-
- if (HAS_PCH_SPLIT(dev)) {
- dev_priv->regfile.savePIPEA_DATA_M1 = I915_READ(_PIPEA_DATA_M1);
- dev_priv->regfile.savePIPEA_DATA_N1 = I915_READ(_PIPEA_DATA_N1);
- dev_priv->regfile.savePIPEA_LINK_M1 = I915_READ(_PIPEA_LINK_M1);
- dev_priv->regfile.savePIPEA_LINK_N1 = I915_READ(_PIPEA_LINK_N1);
-
- dev_priv->regfile.saveFDI_TXA_CTL = I915_READ(_FDI_TXA_CTL);
- dev_priv->regfile.saveFDI_RXA_CTL = I915_READ(_FDI_RXA_CTL);
-
- dev_priv->regfile.savePFA_CTL_1 = I915_READ(_PFA_CTL_1);
- dev_priv->regfile.savePFA_WIN_SZ = I915_READ(_PFA_WIN_SZ);
- dev_priv->regfile.savePFA_WIN_POS = I915_READ(_PFA_WIN_POS);
-
- dev_priv->regfile.saveTRANSACONF = I915_READ(_TRANSACONF);
- dev_priv->regfile.saveTRANS_HTOTAL_A = I915_READ(_TRANS_HTOTAL_A);
- dev_priv->regfile.saveTRANS_HBLANK_A = I915_READ(_TRANS_HBLANK_A);
- dev_priv->regfile.saveTRANS_HSYNC_A = I915_READ(_TRANS_HSYNC_A);
- dev_priv->regfile.saveTRANS_VTOTAL_A = I915_READ(_TRANS_VTOTAL_A);
- dev_priv->regfile.saveTRANS_VBLANK_A = I915_READ(_TRANS_VBLANK_A);
- dev_priv->regfile.saveTRANS_VSYNC_A = I915_READ(_TRANS_VSYNC_A);
- }
-
- dev_priv->regfile.saveDSPACNTR = I915_READ(_DSPACNTR);
- dev_priv->regfile.saveDSPASTRIDE = I915_READ(_DSPASTRIDE);
- dev_priv->regfile.saveDSPASIZE = I915_READ(_DSPASIZE);
- dev_priv->regfile.saveDSPAPOS = I915_READ(_DSPAPOS);
- dev_priv->regfile.saveDSPAADDR = I915_READ(_DSPAADDR);
- if (INTEL_INFO(dev)->gen >= 4) {
- dev_priv->regfile.saveDSPASURF = I915_READ(_DSPASURF);
- dev_priv->regfile.saveDSPATILEOFF = I915_READ(_DSPATILEOFF);
- }
- i915_save_palette(dev, PIPE_A);
- dev_priv->regfile.savePIPEASTAT = I915_READ(_PIPEASTAT);
-
- /* Pipe & plane B info */
- dev_priv->regfile.savePIPEBCONF = I915_READ(_PIPEBCONF);
- dev_priv->regfile.savePIPEBSRC = I915_READ(_PIPEBSRC);
- if (HAS_PCH_SPLIT(dev)) {
- dev_priv->regfile.saveFPB0 = I915_READ(_PCH_FPB0);
- dev_priv->regfile.saveFPB1 = I915_READ(_PCH_FPB1);
- dev_priv->regfile.saveDPLL_B = I915_READ(_PCH_DPLL_B);
- } else {
- dev_priv->regfile.saveFPB0 = I915_READ(_FPB0);
- dev_priv->regfile.saveFPB1 = I915_READ(_FPB1);
- dev_priv->regfile.saveDPLL_B = I915_READ(_DPLL_B);
- }
- if (INTEL_INFO(dev)->gen >= 4 && !HAS_PCH_SPLIT(dev))
- dev_priv->regfile.saveDPLL_B_MD = I915_READ(_DPLL_B_MD);
- dev_priv->regfile.saveHTOTAL_B = I915_READ(_HTOTAL_B);
- dev_priv->regfile.saveHBLANK_B = I915_READ(_HBLANK_B);
- dev_priv->regfile.saveHSYNC_B = I915_READ(_HSYNC_B);
- dev_priv->regfile.saveVTOTAL_B = I915_READ(_VTOTAL_B);
- dev_priv->regfile.saveVBLANK_B = I915_READ(_VBLANK_B);
- dev_priv->regfile.saveVSYNC_B = I915_READ(_VSYNC_B);
- if (!HAS_PCH_SPLIT(dev))
- dev_priv->regfile.saveBCLRPAT_B = I915_READ(_BCLRPAT_B);
-
- if (HAS_PCH_SPLIT(dev)) {
- dev_priv->regfile.savePIPEB_DATA_M1 = I915_READ(_PIPEB_DATA_M1);
- dev_priv->regfile.savePIPEB_DATA_N1 = I915_READ(_PIPEB_DATA_N1);
- dev_priv->regfile.savePIPEB_LINK_M1 = I915_READ(_PIPEB_LINK_M1);
- dev_priv->regfile.savePIPEB_LINK_N1 = I915_READ(_PIPEB_LINK_N1);
-
- dev_priv->regfile.saveFDI_TXB_CTL = I915_READ(_FDI_TXB_CTL);
- dev_priv->regfile.saveFDI_RXB_CTL = I915_READ(_FDI_RXB_CTL);
-
- dev_priv->regfile.savePFB_CTL_1 = I915_READ(_PFB_CTL_1);
- dev_priv->regfile.savePFB_WIN_SZ = I915_READ(_PFB_WIN_SZ);
- dev_priv->regfile.savePFB_WIN_POS = I915_READ(_PFB_WIN_POS);
-
- dev_priv->regfile.saveTRANSBCONF = I915_READ(_TRANSBCONF);
- dev_priv->regfile.saveTRANS_HTOTAL_B = I915_READ(_TRANS_HTOTAL_B);
- dev_priv->regfile.saveTRANS_HBLANK_B = I915_READ(_TRANS_HBLANK_B);
- dev_priv->regfile.saveTRANS_HSYNC_B = I915_READ(_TRANS_HSYNC_B);
- dev_priv->regfile.saveTRANS_VTOTAL_B = I915_READ(_TRANS_VTOTAL_B);
- dev_priv->regfile.saveTRANS_VBLANK_B = I915_READ(_TRANS_VBLANK_B);
- dev_priv->regfile.saveTRANS_VSYNC_B = I915_READ(_TRANS_VSYNC_B);
- }
-
- dev_priv->regfile.saveDSPBCNTR = I915_READ(_DSPBCNTR);
- dev_priv->regfile.saveDSPBSTRIDE = I915_READ(_DSPBSTRIDE);
- dev_priv->regfile.saveDSPBSIZE = I915_READ(_DSPBSIZE);
- dev_priv->regfile.saveDSPBPOS = I915_READ(_DSPBPOS);
- dev_priv->regfile.saveDSPBADDR = I915_READ(_DSPBADDR);
- if (INTEL_INFO(dev)->gen >= 4) {
- dev_priv->regfile.saveDSPBSURF = I915_READ(_DSPBSURF);
- dev_priv->regfile.saveDSPBTILEOFF = I915_READ(_DSPBTILEOFF);
- }
- i915_save_palette(dev, PIPE_B);
- dev_priv->regfile.savePIPEBSTAT = I915_READ(_PIPEBSTAT);
-
- /* Fences */
- switch (INTEL_INFO(dev)->gen) {
- case 7:
- case 6:
- for (i = 0; i < 16; i++)
- dev_priv->regfile.saveFENCE[i] = I915_READ64(FENCE_REG_SANDYBRIDGE_0 + (i * 8));
- break;
- case 5:
- case 4:
- for (i = 0; i < 16; i++)
- dev_priv->regfile.saveFENCE[i] = I915_READ64(FENCE_REG_965_0 + (i * 8));
- break;
- case 3:
- if (IS_I945G(dev) || IS_I945GM(dev) || IS_G33(dev))
- for (i = 0; i < 8; i++)
- dev_priv->regfile.saveFENCE[i+8] = I915_READ(FENCE_REG_945_8 + (i * 4));
- case 2:
- for (i = 0; i < 8; i++)
- dev_priv->regfile.saveFENCE[i] = I915_READ(FENCE_REG_830_0 + (i * 4));
- break;
- }
-
- /* CRT state */
- if (HAS_PCH_SPLIT(dev))
- dev_priv->regfile.saveADPA = I915_READ(PCH_ADPA);
- else
- dev_priv->regfile.saveADPA = I915_READ(ADPA);
-
- return;
-}
-
-static void i915_restore_modeset_reg(struct drm_device *dev)
-{
- struct drm_i915_private *dev_priv = dev->dev_private;
- int dpll_a_reg, fpa0_reg, fpa1_reg;
- int dpll_b_reg, fpb0_reg, fpb1_reg;
- int i;
-
- if (drm_core_check_feature(dev, DRIVER_MODESET))
- return;
-
- /* Fences */
- switch (INTEL_INFO(dev)->gen) {
- case 7:
- case 6:
- for (i = 0; i < 16; i++)
- I915_WRITE64(FENCE_REG_SANDYBRIDGE_0 + (i * 8), dev_priv->regfile.saveFENCE[i]);
- break;
- case 5:
- case 4:
- for (i = 0; i < 16; i++)
- I915_WRITE64(FENCE_REG_965_0 + (i * 8), dev_priv->regfile.saveFENCE[i]);
- break;
- case 3:
- case 2:
- if (IS_I945G(dev) || IS_I945GM(dev) || IS_G33(dev))
- for (i = 0; i < 8; i++)
- I915_WRITE(FENCE_REG_945_8 + (i * 4), dev_priv->regfile.saveFENCE[i+8]);
- for (i = 0; i < 8; i++)
- I915_WRITE(FENCE_REG_830_0 + (i * 4), dev_priv->regfile.saveFENCE[i]);
- break;
- }
-
-
- if (HAS_PCH_SPLIT(dev)) {
- dpll_a_reg = _PCH_DPLL_A;
- dpll_b_reg = _PCH_DPLL_B;
- fpa0_reg = _PCH_FPA0;
- fpb0_reg = _PCH_FPB0;
- fpa1_reg = _PCH_FPA1;
- fpb1_reg = _PCH_FPB1;
- } else {
- dpll_a_reg = _DPLL_A;
- dpll_b_reg = _DPLL_B;
- fpa0_reg = _FPA0;
- fpb0_reg = _FPB0;
- fpa1_reg = _FPA1;
- fpb1_reg = _FPB1;
- }
-
- if (HAS_PCH_SPLIT(dev)) {
- I915_WRITE(PCH_DREF_CONTROL, dev_priv->regfile.savePCH_DREF_CONTROL);
- I915_WRITE(DISP_ARB_CTL, dev_priv->regfile.saveDISP_ARB_CTL);
- }
-
- /* Pipe & plane A info */
- /* Prime the clock */
- if (dev_priv->regfile.saveDPLL_A & DPLL_VCO_ENABLE) {
- I915_WRITE(dpll_a_reg, dev_priv->regfile.saveDPLL_A &
- ~DPLL_VCO_ENABLE);
- POSTING_READ(dpll_a_reg);
- udelay(150);
- }
- I915_WRITE(fpa0_reg, dev_priv->regfile.saveFPA0);
- I915_WRITE(fpa1_reg, dev_priv->regfile.saveFPA1);
- /* Actually enable it */
- I915_WRITE(dpll_a_reg, dev_priv->regfile.saveDPLL_A);
- POSTING_READ(dpll_a_reg);
- udelay(150);
- if (INTEL_INFO(dev)->gen >= 4 && !HAS_PCH_SPLIT(dev)) {
- I915_WRITE(_DPLL_A_MD, dev_priv->regfile.saveDPLL_A_MD);
- POSTING_READ(_DPLL_A_MD);
- }
- udelay(150);
-
- /* Restore mode */
- I915_WRITE(_HTOTAL_A, dev_priv->regfile.saveHTOTAL_A);
- I915_WRITE(_HBLANK_A, dev_priv->regfile.saveHBLANK_A);
- I915_WRITE(_HSYNC_A, dev_priv->regfile.saveHSYNC_A);
- I915_WRITE(_VTOTAL_A, dev_priv->regfile.saveVTOTAL_A);
- I915_WRITE(_VBLANK_A, dev_priv->regfile.saveVBLANK_A);
- I915_WRITE(_VSYNC_A, dev_priv->regfile.saveVSYNC_A);
- if (!HAS_PCH_SPLIT(dev))
- I915_WRITE(_BCLRPAT_A, dev_priv->regfile.saveBCLRPAT_A);
-
- if (HAS_PCH_SPLIT(dev)) {
- I915_WRITE(_PIPEA_DATA_M1, dev_priv->regfile.savePIPEA_DATA_M1);
- I915_WRITE(_PIPEA_DATA_N1, dev_priv->regfile.savePIPEA_DATA_N1);
- I915_WRITE(_PIPEA_LINK_M1, dev_priv->regfile.savePIPEA_LINK_M1);
- I915_WRITE(_PIPEA_LINK_N1, dev_priv->regfile.savePIPEA_LINK_N1);
-
- I915_WRITE(_FDI_RXA_CTL, dev_priv->regfile.saveFDI_RXA_CTL);
- I915_WRITE(_FDI_TXA_CTL, dev_priv->regfile.saveFDI_TXA_CTL);
-
- I915_WRITE(_PFA_CTL_1, dev_priv->regfile.savePFA_CTL_1);
- I915_WRITE(_PFA_WIN_SZ, dev_priv->regfile.savePFA_WIN_SZ);
- I915_WRITE(_PFA_WIN_POS, dev_priv->regfile.savePFA_WIN_POS);
-
- I915_WRITE(_TRANSACONF, dev_priv->regfile.saveTRANSACONF);
- I915_WRITE(_TRANS_HTOTAL_A, dev_priv->regfile.saveTRANS_HTOTAL_A);
- I915_WRITE(_TRANS_HBLANK_A, dev_priv->regfile.saveTRANS_HBLANK_A);
- I915_WRITE(_TRANS_HSYNC_A, dev_priv->regfile.saveTRANS_HSYNC_A);
- I915_WRITE(_TRANS_VTOTAL_A, dev_priv->regfile.saveTRANS_VTOTAL_A);
- I915_WRITE(_TRANS_VBLANK_A, dev_priv->regfile.saveTRANS_VBLANK_A);
- I915_WRITE(_TRANS_VSYNC_A, dev_priv->regfile.saveTRANS_VSYNC_A);
- }
-
- /* Restore plane info */
- I915_WRITE(_DSPASIZE, dev_priv->regfile.saveDSPASIZE);
- I915_WRITE(_DSPAPOS, dev_priv->regfile.saveDSPAPOS);
- I915_WRITE(_PIPEASRC, dev_priv->regfile.savePIPEASRC);
- I915_WRITE(_DSPAADDR, dev_priv->regfile.saveDSPAADDR);
- I915_WRITE(_DSPASTRIDE, dev_priv->regfile.saveDSPASTRIDE);
- if (INTEL_INFO(dev)->gen >= 4) {
- I915_WRITE(_DSPASURF, dev_priv->regfile.saveDSPASURF);
- I915_WRITE(_DSPATILEOFF, dev_priv->regfile.saveDSPATILEOFF);
- }
-
- I915_WRITE(_PIPEACONF, dev_priv->regfile.savePIPEACONF);
-
- i915_restore_palette(dev, PIPE_A);
- /* Enable the plane */
- I915_WRITE(_DSPACNTR, dev_priv->regfile.saveDSPACNTR);
- I915_WRITE(_DSPAADDR, I915_READ(_DSPAADDR));
-
- /* Pipe & plane B info */
- if (dev_priv->regfile.saveDPLL_B & DPLL_VCO_ENABLE) {
- I915_WRITE(dpll_b_reg, dev_priv->regfile.saveDPLL_B &
- ~DPLL_VCO_ENABLE);
- POSTING_READ(dpll_b_reg);
- udelay(150);
- }
- I915_WRITE(fpb0_reg, dev_priv->regfile.saveFPB0);
- I915_WRITE(fpb1_reg, dev_priv->regfile.saveFPB1);
- /* Actually enable it */
- I915_WRITE(dpll_b_reg, dev_priv->regfile.saveDPLL_B);
- POSTING_READ(dpll_b_reg);
- udelay(150);
- if (INTEL_INFO(dev)->gen >= 4 && !HAS_PCH_SPLIT(dev)) {
- I915_WRITE(_DPLL_B_MD, dev_priv->regfile.saveDPLL_B_MD);
- POSTING_READ(_DPLL_B_MD);
- }
- udelay(150);
-
- /* Restore mode */
- I915_WRITE(_HTOTAL_B, dev_priv->regfile.saveHTOTAL_B);
- I915_WRITE(_HBLANK_B, dev_priv->regfile.saveHBLANK_B);
- I915_WRITE(_HSYNC_B, dev_priv->regfile.saveHSYNC_B);
- I915_WRITE(_VTOTAL_B, dev_priv->regfile.saveVTOTAL_B);
- I915_WRITE(_VBLANK_B, dev_priv->regfile.saveVBLANK_B);
- I915_WRITE(_VSYNC_B, dev_priv->regfile.saveVSYNC_B);
- if (!HAS_PCH_SPLIT(dev))
- I915_WRITE(_BCLRPAT_B, dev_priv->regfile.saveBCLRPAT_B);
-
- if (HAS_PCH_SPLIT(dev)) {
- I915_WRITE(_PIPEB_DATA_M1, dev_priv->regfile.savePIPEB_DATA_M1);
- I915_WRITE(_PIPEB_DATA_N1, dev_priv->regfile.savePIPEB_DATA_N1);
- I915_WRITE(_PIPEB_LINK_M1, dev_priv->regfile.savePIPEB_LINK_M1);
- I915_WRITE(_PIPEB_LINK_N1, dev_priv->regfile.savePIPEB_LINK_N1);
-
- I915_WRITE(_FDI_RXB_CTL, dev_priv->regfile.saveFDI_RXB_CTL);
- I915_WRITE(_FDI_TXB_CTL, dev_priv->regfile.saveFDI_TXB_CTL);
-
- I915_WRITE(_PFB_CTL_1, dev_priv->regfile.savePFB_CTL_1);
- I915_WRITE(_PFB_WIN_SZ, dev_priv->regfile.savePFB_WIN_SZ);
- I915_WRITE(_PFB_WIN_POS, dev_priv->regfile.savePFB_WIN_POS);
-
- I915_WRITE(_TRANSBCONF, dev_priv->regfile.saveTRANSBCONF);
- I915_WRITE(_TRANS_HTOTAL_B, dev_priv->regfile.saveTRANS_HTOTAL_B);
- I915_WRITE(_TRANS_HBLANK_B, dev_priv->regfile.saveTRANS_HBLANK_B);
- I915_WRITE(_TRANS_HSYNC_B, dev_priv->regfile.saveTRANS_HSYNC_B);
- I915_WRITE(_TRANS_VTOTAL_B, dev_priv->regfile.saveTRANS_VTOTAL_B);
- I915_WRITE(_TRANS_VBLANK_B, dev_priv->regfile.saveTRANS_VBLANK_B);
- I915_WRITE(_TRANS_VSYNC_B, dev_priv->regfile.saveTRANS_VSYNC_B);
- }
-
- /* Restore plane info */
- I915_WRITE(_DSPBSIZE, dev_priv->regfile.saveDSPBSIZE);
- I915_WRITE(_DSPBPOS, dev_priv->regfile.saveDSPBPOS);
- I915_WRITE(_PIPEBSRC, dev_priv->regfile.savePIPEBSRC);
- I915_WRITE(_DSPBADDR, dev_priv->regfile.saveDSPBADDR);
- I915_WRITE(_DSPBSTRIDE, dev_priv->regfile.saveDSPBSTRIDE);
- if (INTEL_INFO(dev)->gen >= 4) {
- I915_WRITE(_DSPBSURF, dev_priv->regfile.saveDSPBSURF);
- I915_WRITE(_DSPBTILEOFF, dev_priv->regfile.saveDSPBTILEOFF);
- }
-
- I915_WRITE(_PIPEBCONF, dev_priv->regfile.savePIPEBCONF);
-
- i915_restore_palette(dev, PIPE_B);
- /* Enable the plane */
- I915_WRITE(_DSPBCNTR, dev_priv->regfile.saveDSPBCNTR);
- I915_WRITE(_DSPBADDR, I915_READ(_DSPBADDR));
-
- /* Cursor state */
- I915_WRITE(_CURAPOS, dev_priv->regfile.saveCURAPOS);
- I915_WRITE(_CURACNTR, dev_priv->regfile.saveCURACNTR);
- I915_WRITE(_CURABASE, dev_priv->regfile.saveCURABASE);
- I915_WRITE(_CURBPOS, dev_priv->regfile.saveCURBPOS);
- I915_WRITE(_CURBCNTR, dev_priv->regfile.saveCURBCNTR);
- I915_WRITE(_CURBBASE, dev_priv->regfile.saveCURBBASE);
- if (IS_GEN2(dev))
- I915_WRITE(CURSIZE, dev_priv->regfile.saveCURSIZE);
-
- /* CRT state */
- if (HAS_PCH_SPLIT(dev))
- I915_WRITE(PCH_ADPA, dev_priv->regfile.saveADPA);
- else
- I915_WRITE(ADPA, dev_priv->regfile.saveADPA);
-
- return;
-}
-
static void i915_save_display(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
/* Display arbitration control */
- dev_priv->regfile.saveDSPARB = I915_READ(DSPARB);
+ if (INTEL_INFO(dev)->gen <= 4)
+ dev_priv->regfile.saveDSPARB = I915_READ(DSPARB);
/* This is only meaningful in non-KMS mode */
/* Don't regfile.save them in KMS mode */
- i915_save_modeset_reg(dev);
+ if (!drm_core_check_feature(dev, DRIVER_MODESET))
+ i915_save_display_reg(dev);
/* LVDS state */
if (HAS_PCH_SPLIT(dev)) {
@@ -658,24 +234,6 @@ static void i915_save_display(struct drm_device *dev)
dev_priv->regfile.savePP_DIVISOR = I915_READ(PP_DIVISOR);
}
- if (!drm_core_check_feature(dev, DRIVER_MODESET)) {
- /* Display Port state */
- if (SUPPORTS_INTEGRATED_DP(dev)) {
- dev_priv->regfile.saveDP_B = I915_READ(DP_B);
- dev_priv->regfile.saveDP_C = I915_READ(DP_C);
- dev_priv->regfile.saveDP_D = I915_READ(DP_D);
- dev_priv->regfile.savePIPEA_GMCH_DATA_M = I915_READ(_PIPEA_GMCH_DATA_M);
- dev_priv->regfile.savePIPEB_GMCH_DATA_M = I915_READ(_PIPEB_GMCH_DATA_M);
- dev_priv->regfile.savePIPEA_GMCH_DATA_N = I915_READ(_PIPEA_GMCH_DATA_N);
- dev_priv->regfile.savePIPEB_GMCH_DATA_N = I915_READ(_PIPEB_GMCH_DATA_N);
- dev_priv->regfile.savePIPEA_DP_LINK_M = I915_READ(_PIPEA_DP_LINK_M);
- dev_priv->regfile.savePIPEB_DP_LINK_M = I915_READ(_PIPEB_DP_LINK_M);
- dev_priv->regfile.savePIPEA_DP_LINK_N = I915_READ(_PIPEA_DP_LINK_N);
- dev_priv->regfile.savePIPEB_DP_LINK_N = I915_READ(_PIPEB_DP_LINK_N);
- }
- /* FIXME: regfile.save TV & SDVO state */
- }
-
/* Only regfile.save FBC state on the platform that supports FBC */
if (I915_HAS_FBC(dev)) {
if (HAS_PCH_SPLIT(dev)) {
@@ -690,16 +248,8 @@ static void i915_save_display(struct drm_device *dev)
}
}
- /* VGA state */
- dev_priv->regfile.saveVGA0 = I915_READ(VGA0);
- dev_priv->regfile.saveVGA1 = I915_READ(VGA1);
- dev_priv->regfile.saveVGA_PD = I915_READ(VGA_PD);
- if (HAS_PCH_SPLIT(dev))
- dev_priv->regfile.saveVGACNTRL = I915_READ(CPU_VGACNTRL);
- else
- dev_priv->regfile.saveVGACNTRL = I915_READ(VGACNTRL);
-
- i915_save_vga(dev);
+ if (!drm_core_check_feature(dev, DRIVER_MODESET))
+ i915_save_vga(dev);
}
static void i915_restore_display(struct drm_device *dev)
@@ -707,25 +257,11 @@ static void i915_restore_display(struct drm_device *dev)
struct drm_i915_private *dev_priv = dev->dev_private;
/* Display arbitration */
- I915_WRITE(DSPARB, dev_priv->regfile.saveDSPARB);
+ if (INTEL_INFO(dev)->gen <= 4)
+ I915_WRITE(DSPARB, dev_priv->regfile.saveDSPARB);
- if (!drm_core_check_feature(dev, DRIVER_MODESET)) {
- /* Display port ratios (must be done before clock is set) */
- if (SUPPORTS_INTEGRATED_DP(dev)) {
- I915_WRITE(_PIPEA_GMCH_DATA_M, dev_priv->regfile.savePIPEA_GMCH_DATA_M);
- I915_WRITE(_PIPEB_GMCH_DATA_M, dev_priv->regfile.savePIPEB_GMCH_DATA_M);
- I915_WRITE(_PIPEA_GMCH_DATA_N, dev_priv->regfile.savePIPEA_GMCH_DATA_N);
- I915_WRITE(_PIPEB_GMCH_DATA_N, dev_priv->regfile.savePIPEB_GMCH_DATA_N);
- I915_WRITE(_PIPEA_DP_LINK_M, dev_priv->regfile.savePIPEA_DP_LINK_M);
- I915_WRITE(_PIPEB_DP_LINK_M, dev_priv->regfile.savePIPEB_DP_LINK_M);
- I915_WRITE(_PIPEA_DP_LINK_N, dev_priv->regfile.savePIPEA_DP_LINK_N);
- I915_WRITE(_PIPEB_DP_LINK_N, dev_priv->regfile.savePIPEB_DP_LINK_N);
- }
- }
-
- /* This is only meaningful in non-KMS mode */
- /* Don't restore them in KMS mode */
- i915_restore_modeset_reg(dev);
+ if (!drm_core_check_feature(dev, DRIVER_MODESET))
+ i915_restore_display_reg(dev);
/* LVDS state */
if (INTEL_INFO(dev)->gen >= 4 && !HAS_PCH_SPLIT(dev))
@@ -763,16 +299,6 @@ static void i915_restore_display(struct drm_device *dev)
I915_WRITE(PP_CONTROL, dev_priv->regfile.savePP_CONTROL);
}
- if (!drm_core_check_feature(dev, DRIVER_MODESET)) {
- /* Display Port state */
- if (SUPPORTS_INTEGRATED_DP(dev)) {
- I915_WRITE(DP_B, dev_priv->regfile.saveDP_B);
- I915_WRITE(DP_C, dev_priv->regfile.saveDP_C);
- I915_WRITE(DP_D, dev_priv->regfile.saveDP_D);
- }
- /* FIXME: restore TV & SDVO state */
- }
-
/* only restore FBC info on the platform that supports FBC*/
intel_disable_fbc(dev);
if (I915_HAS_FBC(dev)) {
@@ -787,19 +313,11 @@ static void i915_restore_display(struct drm_device *dev)
I915_WRITE(FBC_CONTROL, dev_priv->regfile.saveFBC_CONTROL);
}
}
- /* VGA state */
- if (HAS_PCH_SPLIT(dev))
- I915_WRITE(CPU_VGACNTRL, dev_priv->regfile.saveVGACNTRL);
- else
- I915_WRITE(VGACNTRL, dev_priv->regfile.saveVGACNTRL);
- I915_WRITE(VGA0, dev_priv->regfile.saveVGA0);
- I915_WRITE(VGA1, dev_priv->regfile.saveVGA1);
- I915_WRITE(VGA_PD, dev_priv->regfile.saveVGA_PD);
- POSTING_READ(VGA_PD);
- udelay(150);
-
- i915_restore_vga(dev);
+ if (!drm_core_check_feature(dev, DRIVER_MODESET))
+ i915_restore_vga(dev);
+ else
+ i915_redisable_vga(dev);
}
int i915_save_state(struct drm_device *dev)
diff --git a/drivers/gpu/drm/i915/i915_ums.c b/drivers/gpu/drm/i915/i915_ums.c
new file mode 100644
index 000000000000..985a09716237
--- /dev/null
+++ b/drivers/gpu/drm/i915/i915_ums.c
@@ -0,0 +1,503 @@
+/*
+ *
+ * Copyright 2008 (c) Intel Corporation
+ * Jesse Barnes <jbarnes@virtuousgeek.org>
+ * Copyright 2013 (c) Intel Corporation
+ * Daniel Vetter <daniel.vetter@ffwll.ch>
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
+ * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
+ * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+#include <drm/drmP.h>
+#include <drm/i915_drm.h>
+#include "intel_drv.h"
+#include "i915_reg.h"
+
+static bool i915_pipe_enabled(struct drm_device *dev, enum pipe pipe)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ u32 dpll_reg;
+
+ /* On IVB, 3rd pipe shares PLL with another one */
+ if (pipe > 1)
+ return false;
+
+ if (HAS_PCH_SPLIT(dev))
+ dpll_reg = _PCH_DPLL(pipe);
+ else
+ dpll_reg = (pipe == PIPE_A) ? _DPLL_A : _DPLL_B;
+
+ return (I915_READ(dpll_reg) & DPLL_VCO_ENABLE);
+}
+
+static void i915_save_palette(struct drm_device *dev, enum pipe pipe)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ unsigned long reg = (pipe == PIPE_A ? _PALETTE_A : _PALETTE_B);
+ u32 *array;
+ int i;
+
+ if (!i915_pipe_enabled(dev, pipe))
+ return;
+
+ if (HAS_PCH_SPLIT(dev))
+ reg = (pipe == PIPE_A) ? _LGC_PALETTE_A : _LGC_PALETTE_B;
+
+ if (pipe == PIPE_A)
+ array = dev_priv->regfile.save_palette_a;
+ else
+ array = dev_priv->regfile.save_palette_b;
+
+ for (i = 0; i < 256; i++)
+ array[i] = I915_READ(reg + (i << 2));
+}
+
+static void i915_restore_palette(struct drm_device *dev, enum pipe pipe)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ unsigned long reg = (pipe == PIPE_A ? _PALETTE_A : _PALETTE_B);
+ u32 *array;
+ int i;
+
+ if (!i915_pipe_enabled(dev, pipe))
+ return;
+
+ if (HAS_PCH_SPLIT(dev))
+ reg = (pipe == PIPE_A) ? _LGC_PALETTE_A : _LGC_PALETTE_B;
+
+ if (pipe == PIPE_A)
+ array = dev_priv->regfile.save_palette_a;
+ else
+ array = dev_priv->regfile.save_palette_b;
+
+ for (i = 0; i < 256; i++)
+ I915_WRITE(reg + (i << 2), array[i]);
+}
+
+void i915_save_display_reg(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ int i;
+
+ /* Cursor state */
+ dev_priv->regfile.saveCURACNTR = I915_READ(_CURACNTR);
+ dev_priv->regfile.saveCURAPOS = I915_READ(_CURAPOS);
+ dev_priv->regfile.saveCURABASE = I915_READ(_CURABASE);
+ dev_priv->regfile.saveCURBCNTR = I915_READ(_CURBCNTR);
+ dev_priv->regfile.saveCURBPOS = I915_READ(_CURBPOS);
+ dev_priv->regfile.saveCURBBASE = I915_READ(_CURBBASE);
+ if (IS_GEN2(dev))
+ dev_priv->regfile.saveCURSIZE = I915_READ(CURSIZE);
+
+ if (HAS_PCH_SPLIT(dev)) {
+ dev_priv->regfile.savePCH_DREF_CONTROL = I915_READ(PCH_DREF_CONTROL);
+ dev_priv->regfile.saveDISP_ARB_CTL = I915_READ(DISP_ARB_CTL);
+ }
+
+ /* Pipe & plane A info */
+ dev_priv->regfile.savePIPEACONF = I915_READ(_PIPEACONF);
+ dev_priv->regfile.savePIPEASRC = I915_READ(_PIPEASRC);
+ if (HAS_PCH_SPLIT(dev)) {
+ dev_priv->regfile.saveFPA0 = I915_READ(_PCH_FPA0);
+ dev_priv->regfile.saveFPA1 = I915_READ(_PCH_FPA1);
+ dev_priv->regfile.saveDPLL_A = I915_READ(_PCH_DPLL_A);
+ } else {
+ dev_priv->regfile.saveFPA0 = I915_READ(_FPA0);
+ dev_priv->regfile.saveFPA1 = I915_READ(_FPA1);
+ dev_priv->regfile.saveDPLL_A = I915_READ(_DPLL_A);
+ }
+ if (INTEL_INFO(dev)->gen >= 4 && !HAS_PCH_SPLIT(dev))
+ dev_priv->regfile.saveDPLL_A_MD = I915_READ(_DPLL_A_MD);
+ dev_priv->regfile.saveHTOTAL_A = I915_READ(_HTOTAL_A);
+ dev_priv->regfile.saveHBLANK_A = I915_READ(_HBLANK_A);
+ dev_priv->regfile.saveHSYNC_A = I915_READ(_HSYNC_A);
+ dev_priv->regfile.saveVTOTAL_A = I915_READ(_VTOTAL_A);
+ dev_priv->regfile.saveVBLANK_A = I915_READ(_VBLANK_A);
+ dev_priv->regfile.saveVSYNC_A = I915_READ(_VSYNC_A);
+ if (!HAS_PCH_SPLIT(dev))
+ dev_priv->regfile.saveBCLRPAT_A = I915_READ(_BCLRPAT_A);
+
+ if (HAS_PCH_SPLIT(dev)) {
+ dev_priv->regfile.savePIPEA_DATA_M1 = I915_READ(_PIPEA_DATA_M1);
+ dev_priv->regfile.savePIPEA_DATA_N1 = I915_READ(_PIPEA_DATA_N1);
+ dev_priv->regfile.savePIPEA_LINK_M1 = I915_READ(_PIPEA_LINK_M1);
+ dev_priv->regfile.savePIPEA_LINK_N1 = I915_READ(_PIPEA_LINK_N1);
+
+ dev_priv->regfile.saveFDI_TXA_CTL = I915_READ(_FDI_TXA_CTL);
+ dev_priv->regfile.saveFDI_RXA_CTL = I915_READ(_FDI_RXA_CTL);
+
+ dev_priv->regfile.savePFA_CTL_1 = I915_READ(_PFA_CTL_1);
+ dev_priv->regfile.savePFA_WIN_SZ = I915_READ(_PFA_WIN_SZ);
+ dev_priv->regfile.savePFA_WIN_POS = I915_READ(_PFA_WIN_POS);
+
+ dev_priv->regfile.saveTRANSACONF = I915_READ(_TRANSACONF);
+ dev_priv->regfile.saveTRANS_HTOTAL_A = I915_READ(_TRANS_HTOTAL_A);
+ dev_priv->regfile.saveTRANS_HBLANK_A = I915_READ(_TRANS_HBLANK_A);
+ dev_priv->regfile.saveTRANS_HSYNC_A = I915_READ(_TRANS_HSYNC_A);
+ dev_priv->regfile.saveTRANS_VTOTAL_A = I915_READ(_TRANS_VTOTAL_A);
+ dev_priv->regfile.saveTRANS_VBLANK_A = I915_READ(_TRANS_VBLANK_A);
+ dev_priv->regfile.saveTRANS_VSYNC_A = I915_READ(_TRANS_VSYNC_A);
+ }
+
+ dev_priv->regfile.saveDSPACNTR = I915_READ(_DSPACNTR);
+ dev_priv->regfile.saveDSPASTRIDE = I915_READ(_DSPASTRIDE);
+ dev_priv->regfile.saveDSPASIZE = I915_READ(_DSPASIZE);
+ dev_priv->regfile.saveDSPAPOS = I915_READ(_DSPAPOS);
+ dev_priv->regfile.saveDSPAADDR = I915_READ(_DSPAADDR);
+ if (INTEL_INFO(dev)->gen >= 4) {
+ dev_priv->regfile.saveDSPASURF = I915_READ(_DSPASURF);
+ dev_priv->regfile.saveDSPATILEOFF = I915_READ(_DSPATILEOFF);
+ }
+ i915_save_palette(dev, PIPE_A);
+ dev_priv->regfile.savePIPEASTAT = I915_READ(_PIPEASTAT);
+
+ /* Pipe & plane B info */
+ dev_priv->regfile.savePIPEBCONF = I915_READ(_PIPEBCONF);
+ dev_priv->regfile.savePIPEBSRC = I915_READ(_PIPEBSRC);
+ if (HAS_PCH_SPLIT(dev)) {
+ dev_priv->regfile.saveFPB0 = I915_READ(_PCH_FPB0);
+ dev_priv->regfile.saveFPB1 = I915_READ(_PCH_FPB1);
+ dev_priv->regfile.saveDPLL_B = I915_READ(_PCH_DPLL_B);
+ } else {
+ dev_priv->regfile.saveFPB0 = I915_READ(_FPB0);
+ dev_priv->regfile.saveFPB1 = I915_READ(_FPB1);
+ dev_priv->regfile.saveDPLL_B = I915_READ(_DPLL_B);
+ }
+ if (INTEL_INFO(dev)->gen >= 4 && !HAS_PCH_SPLIT(dev))
+ dev_priv->regfile.saveDPLL_B_MD = I915_READ(_DPLL_B_MD);
+ dev_priv->regfile.saveHTOTAL_B = I915_READ(_HTOTAL_B);
+ dev_priv->regfile.saveHBLANK_B = I915_READ(_HBLANK_B);
+ dev_priv->regfile.saveHSYNC_B = I915_READ(_HSYNC_B);
+ dev_priv->regfile.saveVTOTAL_B = I915_READ(_VTOTAL_B);
+ dev_priv->regfile.saveVBLANK_B = I915_READ(_VBLANK_B);
+ dev_priv->regfile.saveVSYNC_B = I915_READ(_VSYNC_B);
+ if (!HAS_PCH_SPLIT(dev))
+ dev_priv->regfile.saveBCLRPAT_B = I915_READ(_BCLRPAT_B);
+
+ if (HAS_PCH_SPLIT(dev)) {
+ dev_priv->regfile.savePIPEB_DATA_M1 = I915_READ(_PIPEB_DATA_M1);
+ dev_priv->regfile.savePIPEB_DATA_N1 = I915_READ(_PIPEB_DATA_N1);
+ dev_priv->regfile.savePIPEB_LINK_M1 = I915_READ(_PIPEB_LINK_M1);
+ dev_priv->regfile.savePIPEB_LINK_N1 = I915_READ(_PIPEB_LINK_N1);
+
+ dev_priv->regfile.saveFDI_TXB_CTL = I915_READ(_FDI_TXB_CTL);
+ dev_priv->regfile.saveFDI_RXB_CTL = I915_READ(_FDI_RXB_CTL);
+
+ dev_priv->regfile.savePFB_CTL_1 = I915_READ(_PFB_CTL_1);
+ dev_priv->regfile.savePFB_WIN_SZ = I915_READ(_PFB_WIN_SZ);
+ dev_priv->regfile.savePFB_WIN_POS = I915_READ(_PFB_WIN_POS);
+
+ dev_priv->regfile.saveTRANSBCONF = I915_READ(_TRANSBCONF);
+ dev_priv->regfile.saveTRANS_HTOTAL_B = I915_READ(_TRANS_HTOTAL_B);
+ dev_priv->regfile.saveTRANS_HBLANK_B = I915_READ(_TRANS_HBLANK_B);
+ dev_priv->regfile.saveTRANS_HSYNC_B = I915_READ(_TRANS_HSYNC_B);
+ dev_priv->regfile.saveTRANS_VTOTAL_B = I915_READ(_TRANS_VTOTAL_B);
+ dev_priv->regfile.saveTRANS_VBLANK_B = I915_READ(_TRANS_VBLANK_B);
+ dev_priv->regfile.saveTRANS_VSYNC_B = I915_READ(_TRANS_VSYNC_B);
+ }
+
+ dev_priv->regfile.saveDSPBCNTR = I915_READ(_DSPBCNTR);
+ dev_priv->regfile.saveDSPBSTRIDE = I915_READ(_DSPBSTRIDE);
+ dev_priv->regfile.saveDSPBSIZE = I915_READ(_DSPBSIZE);
+ dev_priv->regfile.saveDSPBPOS = I915_READ(_DSPBPOS);
+ dev_priv->regfile.saveDSPBADDR = I915_READ(_DSPBADDR);
+ if (INTEL_INFO(dev)->gen >= 4) {
+ dev_priv->regfile.saveDSPBSURF = I915_READ(_DSPBSURF);
+ dev_priv->regfile.saveDSPBTILEOFF = I915_READ(_DSPBTILEOFF);
+ }
+ i915_save_palette(dev, PIPE_B);
+ dev_priv->regfile.savePIPEBSTAT = I915_READ(_PIPEBSTAT);
+
+ /* Fences */
+ switch (INTEL_INFO(dev)->gen) {
+ case 7:
+ case 6:
+ for (i = 0; i < 16; i++)
+ dev_priv->regfile.saveFENCE[i] = I915_READ64(FENCE_REG_SANDYBRIDGE_0 + (i * 8));
+ break;
+ case 5:
+ case 4:
+ for (i = 0; i < 16; i++)
+ dev_priv->regfile.saveFENCE[i] = I915_READ64(FENCE_REG_965_0 + (i * 8));
+ break;
+ case 3:
+ if (IS_I945G(dev) || IS_I945GM(dev) || IS_G33(dev))
+ for (i = 0; i < 8; i++)
+ dev_priv->regfile.saveFENCE[i+8] = I915_READ(FENCE_REG_945_8 + (i * 4));
+ case 2:
+ for (i = 0; i < 8; i++)
+ dev_priv->regfile.saveFENCE[i] = I915_READ(FENCE_REG_830_0 + (i * 4));
+ break;
+ }
+
+ /* CRT state */
+ if (HAS_PCH_SPLIT(dev))
+ dev_priv->regfile.saveADPA = I915_READ(PCH_ADPA);
+ else
+ dev_priv->regfile.saveADPA = I915_READ(ADPA);
+
+ /* Display Port state */
+ if (SUPPORTS_INTEGRATED_DP(dev)) {
+ dev_priv->regfile.saveDP_B = I915_READ(DP_B);
+ dev_priv->regfile.saveDP_C = I915_READ(DP_C);
+ dev_priv->regfile.saveDP_D = I915_READ(DP_D);
+ dev_priv->regfile.savePIPEA_GMCH_DATA_M = I915_READ(_PIPEA_GMCH_DATA_M);
+ dev_priv->regfile.savePIPEB_GMCH_DATA_M = I915_READ(_PIPEB_GMCH_DATA_M);
+ dev_priv->regfile.savePIPEA_GMCH_DATA_N = I915_READ(_PIPEA_GMCH_DATA_N);
+ dev_priv->regfile.savePIPEB_GMCH_DATA_N = I915_READ(_PIPEB_GMCH_DATA_N);
+ dev_priv->regfile.savePIPEA_DP_LINK_M = I915_READ(_PIPEA_DP_LINK_M);
+ dev_priv->regfile.savePIPEB_DP_LINK_M = I915_READ(_PIPEB_DP_LINK_M);
+ dev_priv->regfile.savePIPEA_DP_LINK_N = I915_READ(_PIPEA_DP_LINK_N);
+ dev_priv->regfile.savePIPEB_DP_LINK_N = I915_READ(_PIPEB_DP_LINK_N);
+ }
+ /* FIXME: regfile.save TV & SDVO state */
+
+ return;
+}
+
+void i915_restore_display_reg(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ int dpll_a_reg, fpa0_reg, fpa1_reg;
+ int dpll_b_reg, fpb0_reg, fpb1_reg;
+ int i;
+
+ /* Display port ratios (must be done before clock is set) */
+ if (SUPPORTS_INTEGRATED_DP(dev)) {
+ I915_WRITE(_PIPEA_GMCH_DATA_M, dev_priv->regfile.savePIPEA_GMCH_DATA_M);
+ I915_WRITE(_PIPEB_GMCH_DATA_M, dev_priv->regfile.savePIPEB_GMCH_DATA_M);
+ I915_WRITE(_PIPEA_GMCH_DATA_N, dev_priv->regfile.savePIPEA_GMCH_DATA_N);
+ I915_WRITE(_PIPEB_GMCH_DATA_N, dev_priv->regfile.savePIPEB_GMCH_DATA_N);
+ I915_WRITE(_PIPEA_DP_LINK_M, dev_priv->regfile.savePIPEA_DP_LINK_M);
+ I915_WRITE(_PIPEB_DP_LINK_M, dev_priv->regfile.savePIPEB_DP_LINK_M);
+ I915_WRITE(_PIPEA_DP_LINK_N, dev_priv->regfile.savePIPEA_DP_LINK_N);
+ I915_WRITE(_PIPEB_DP_LINK_N, dev_priv->regfile.savePIPEB_DP_LINK_N);
+ }
+
+ /* Fences */
+ switch (INTEL_INFO(dev)->gen) {
+ case 7:
+ case 6:
+ for (i = 0; i < 16; i++)
+ I915_WRITE64(FENCE_REG_SANDYBRIDGE_0 + (i * 8), dev_priv->regfile.saveFENCE[i]);
+ break;
+ case 5:
+ case 4:
+ for (i = 0; i < 16; i++)
+ I915_WRITE64(FENCE_REG_965_0 + (i * 8), dev_priv->regfile.saveFENCE[i]);
+ break;
+ case 3:
+ case 2:
+ if (IS_I945G(dev) || IS_I945GM(dev) || IS_G33(dev))
+ for (i = 0; i < 8; i++)
+ I915_WRITE(FENCE_REG_945_8 + (i * 4), dev_priv->regfile.saveFENCE[i+8]);
+ for (i = 0; i < 8; i++)
+ I915_WRITE(FENCE_REG_830_0 + (i * 4), dev_priv->regfile.saveFENCE[i]);
+ break;
+ }
+
+
+ if (HAS_PCH_SPLIT(dev)) {
+ dpll_a_reg = _PCH_DPLL_A;
+ dpll_b_reg = _PCH_DPLL_B;
+ fpa0_reg = _PCH_FPA0;
+ fpb0_reg = _PCH_FPB0;
+ fpa1_reg = _PCH_FPA1;
+ fpb1_reg = _PCH_FPB1;
+ } else {
+ dpll_a_reg = _DPLL_A;
+ dpll_b_reg = _DPLL_B;
+ fpa0_reg = _FPA0;
+ fpb0_reg = _FPB0;
+ fpa1_reg = _FPA1;
+ fpb1_reg = _FPB1;
+ }
+
+ if (HAS_PCH_SPLIT(dev)) {
+ I915_WRITE(PCH_DREF_CONTROL, dev_priv->regfile.savePCH_DREF_CONTROL);
+ I915_WRITE(DISP_ARB_CTL, dev_priv->regfile.saveDISP_ARB_CTL);
+ }
+
+ /* Pipe & plane A info */
+ /* Prime the clock */
+ if (dev_priv->regfile.saveDPLL_A & DPLL_VCO_ENABLE) {
+ I915_WRITE(dpll_a_reg, dev_priv->regfile.saveDPLL_A &
+ ~DPLL_VCO_ENABLE);
+ POSTING_READ(dpll_a_reg);
+ udelay(150);
+ }
+ I915_WRITE(fpa0_reg, dev_priv->regfile.saveFPA0);
+ I915_WRITE(fpa1_reg, dev_priv->regfile.saveFPA1);
+ /* Actually enable it */
+ I915_WRITE(dpll_a_reg, dev_priv->regfile.saveDPLL_A);
+ POSTING_READ(dpll_a_reg);
+ udelay(150);
+ if (INTEL_INFO(dev)->gen >= 4 && !HAS_PCH_SPLIT(dev)) {
+ I915_WRITE(_DPLL_A_MD, dev_priv->regfile.saveDPLL_A_MD);
+ POSTING_READ(_DPLL_A_MD);
+ }
+ udelay(150);
+
+ /* Restore mode */
+ I915_WRITE(_HTOTAL_A, dev_priv->regfile.saveHTOTAL_A);
+ I915_WRITE(_HBLANK_A, dev_priv->regfile.saveHBLANK_A);
+ I915_WRITE(_HSYNC_A, dev_priv->regfile.saveHSYNC_A);
+ I915_WRITE(_VTOTAL_A, dev_priv->regfile.saveVTOTAL_A);
+ I915_WRITE(_VBLANK_A, dev_priv->regfile.saveVBLANK_A);
+ I915_WRITE(_VSYNC_A, dev_priv->regfile.saveVSYNC_A);
+ if (!HAS_PCH_SPLIT(dev))
+ I915_WRITE(_BCLRPAT_A, dev_priv->regfile.saveBCLRPAT_A);
+
+ if (HAS_PCH_SPLIT(dev)) {
+ I915_WRITE(_PIPEA_DATA_M1, dev_priv->regfile.savePIPEA_DATA_M1);
+ I915_WRITE(_PIPEA_DATA_N1, dev_priv->regfile.savePIPEA_DATA_N1);
+ I915_WRITE(_PIPEA_LINK_M1, dev_priv->regfile.savePIPEA_LINK_M1);
+ I915_WRITE(_PIPEA_LINK_N1, dev_priv->regfile.savePIPEA_LINK_N1);
+
+ I915_WRITE(_FDI_RXA_CTL, dev_priv->regfile.saveFDI_RXA_CTL);
+ I915_WRITE(_FDI_TXA_CTL, dev_priv->regfile.saveFDI_TXA_CTL);
+
+ I915_WRITE(_PFA_CTL_1, dev_priv->regfile.savePFA_CTL_1);
+ I915_WRITE(_PFA_WIN_SZ, dev_priv->regfile.savePFA_WIN_SZ);
+ I915_WRITE(_PFA_WIN_POS, dev_priv->regfile.savePFA_WIN_POS);
+
+ I915_WRITE(_TRANSACONF, dev_priv->regfile.saveTRANSACONF);
+ I915_WRITE(_TRANS_HTOTAL_A, dev_priv->regfile.saveTRANS_HTOTAL_A);
+ I915_WRITE(_TRANS_HBLANK_A, dev_priv->regfile.saveTRANS_HBLANK_A);
+ I915_WRITE(_TRANS_HSYNC_A, dev_priv->regfile.saveTRANS_HSYNC_A);
+ I915_WRITE(_TRANS_VTOTAL_A, dev_priv->regfile.saveTRANS_VTOTAL_A);
+ I915_WRITE(_TRANS_VBLANK_A, dev_priv->regfile.saveTRANS_VBLANK_A);
+ I915_WRITE(_TRANS_VSYNC_A, dev_priv->regfile.saveTRANS_VSYNC_A);
+ }
+
+ /* Restore plane info */
+ I915_WRITE(_DSPASIZE, dev_priv->regfile.saveDSPASIZE);
+ I915_WRITE(_DSPAPOS, dev_priv->regfile.saveDSPAPOS);
+ I915_WRITE(_PIPEASRC, dev_priv->regfile.savePIPEASRC);
+ I915_WRITE(_DSPAADDR, dev_priv->regfile.saveDSPAADDR);
+ I915_WRITE(_DSPASTRIDE, dev_priv->regfile.saveDSPASTRIDE);
+ if (INTEL_INFO(dev)->gen >= 4) {
+ I915_WRITE(_DSPASURF, dev_priv->regfile.saveDSPASURF);
+ I915_WRITE(_DSPATILEOFF, dev_priv->regfile.saveDSPATILEOFF);
+ }
+
+ I915_WRITE(_PIPEACONF, dev_priv->regfile.savePIPEACONF);
+
+ i915_restore_palette(dev, PIPE_A);
+ /* Enable the plane */
+ I915_WRITE(_DSPACNTR, dev_priv->regfile.saveDSPACNTR);
+ I915_WRITE(_DSPAADDR, I915_READ(_DSPAADDR));
+
+ /* Pipe & plane B info */
+ if (dev_priv->regfile.saveDPLL_B & DPLL_VCO_ENABLE) {
+ I915_WRITE(dpll_b_reg, dev_priv->regfile.saveDPLL_B &
+ ~DPLL_VCO_ENABLE);
+ POSTING_READ(dpll_b_reg);
+ udelay(150);
+ }
+ I915_WRITE(fpb0_reg, dev_priv->regfile.saveFPB0);
+ I915_WRITE(fpb1_reg, dev_priv->regfile.saveFPB1);
+ /* Actually enable it */
+ I915_WRITE(dpll_b_reg, dev_priv->regfile.saveDPLL_B);
+ POSTING_READ(dpll_b_reg);
+ udelay(150);
+ if (INTEL_INFO(dev)->gen >= 4 && !HAS_PCH_SPLIT(dev)) {
+ I915_WRITE(_DPLL_B_MD, dev_priv->regfile.saveDPLL_B_MD);
+ POSTING_READ(_DPLL_B_MD);
+ }
+ udelay(150);
+
+ /* Restore mode */
+ I915_WRITE(_HTOTAL_B, dev_priv->regfile.saveHTOTAL_B);
+ I915_WRITE(_HBLANK_B, dev_priv->regfile.saveHBLANK_B);
+ I915_WRITE(_HSYNC_B, dev_priv->regfile.saveHSYNC_B);
+ I915_WRITE(_VTOTAL_B, dev_priv->regfile.saveVTOTAL_B);
+ I915_WRITE(_VBLANK_B, dev_priv->regfile.saveVBLANK_B);
+ I915_WRITE(_VSYNC_B, dev_priv->regfile.saveVSYNC_B);
+ if (!HAS_PCH_SPLIT(dev))
+ I915_WRITE(_BCLRPAT_B, dev_priv->regfile.saveBCLRPAT_B);
+
+ if (HAS_PCH_SPLIT(dev)) {
+ I915_WRITE(_PIPEB_DATA_M1, dev_priv->regfile.savePIPEB_DATA_M1);
+ I915_WRITE(_PIPEB_DATA_N1, dev_priv->regfile.savePIPEB_DATA_N1);
+ I915_WRITE(_PIPEB_LINK_M1, dev_priv->regfile.savePIPEB_LINK_M1);
+ I915_WRITE(_PIPEB_LINK_N1, dev_priv->regfile.savePIPEB_LINK_N1);
+
+ I915_WRITE(_FDI_RXB_CTL, dev_priv->regfile.saveFDI_RXB_CTL);
+ I915_WRITE(_FDI_TXB_CTL, dev_priv->regfile.saveFDI_TXB_CTL);
+
+ I915_WRITE(_PFB_CTL_1, dev_priv->regfile.savePFB_CTL_1);
+ I915_WRITE(_PFB_WIN_SZ, dev_priv->regfile.savePFB_WIN_SZ);
+ I915_WRITE(_PFB_WIN_POS, dev_priv->regfile.savePFB_WIN_POS);
+
+ I915_WRITE(_TRANSBCONF, dev_priv->regfile.saveTRANSBCONF);
+ I915_WRITE(_TRANS_HTOTAL_B, dev_priv->regfile.saveTRANS_HTOTAL_B);
+ I915_WRITE(_TRANS_HBLANK_B, dev_priv->regfile.saveTRANS_HBLANK_B);
+ I915_WRITE(_TRANS_HSYNC_B, dev_priv->regfile.saveTRANS_HSYNC_B);
+ I915_WRITE(_TRANS_VTOTAL_B, dev_priv->regfile.saveTRANS_VTOTAL_B);
+ I915_WRITE(_TRANS_VBLANK_B, dev_priv->regfile.saveTRANS_VBLANK_B);
+ I915_WRITE(_TRANS_VSYNC_B, dev_priv->regfile.saveTRANS_VSYNC_B);
+ }
+
+ /* Restore plane info */
+ I915_WRITE(_DSPBSIZE, dev_priv->regfile.saveDSPBSIZE);
+ I915_WRITE(_DSPBPOS, dev_priv->regfile.saveDSPBPOS);
+ I915_WRITE(_PIPEBSRC, dev_priv->regfile.savePIPEBSRC);
+ I915_WRITE(_DSPBADDR, dev_priv->regfile.saveDSPBADDR);
+ I915_WRITE(_DSPBSTRIDE, dev_priv->regfile.saveDSPBSTRIDE);
+ if (INTEL_INFO(dev)->gen >= 4) {
+ I915_WRITE(_DSPBSURF, dev_priv->regfile.saveDSPBSURF);
+ I915_WRITE(_DSPBTILEOFF, dev_priv->regfile.saveDSPBTILEOFF);
+ }
+
+ I915_WRITE(_PIPEBCONF, dev_priv->regfile.savePIPEBCONF);
+
+ i915_restore_palette(dev, PIPE_B);
+ /* Enable the plane */
+ I915_WRITE(_DSPBCNTR, dev_priv->regfile.saveDSPBCNTR);
+ I915_WRITE(_DSPBADDR, I915_READ(_DSPBADDR));
+
+ /* Cursor state */
+ I915_WRITE(_CURAPOS, dev_priv->regfile.saveCURAPOS);
+ I915_WRITE(_CURACNTR, dev_priv->regfile.saveCURACNTR);
+ I915_WRITE(_CURABASE, dev_priv->regfile.saveCURABASE);
+ I915_WRITE(_CURBPOS, dev_priv->regfile.saveCURBPOS);
+ I915_WRITE(_CURBCNTR, dev_priv->regfile.saveCURBCNTR);
+ I915_WRITE(_CURBBASE, dev_priv->regfile.saveCURBBASE);
+ if (IS_GEN2(dev))
+ I915_WRITE(CURSIZE, dev_priv->regfile.saveCURSIZE);
+
+ /* CRT state */
+ if (HAS_PCH_SPLIT(dev))
+ I915_WRITE(PCH_ADPA, dev_priv->regfile.saveADPA);
+ else
+ I915_WRITE(ADPA, dev_priv->regfile.saveADPA);
+
+ /* Display Port state */
+ if (SUPPORTS_INTEGRATED_DP(dev)) {
+ I915_WRITE(DP_B, dev_priv->regfile.saveDP_B);
+ I915_WRITE(DP_C, dev_priv->regfile.saveDP_C);
+ I915_WRITE(DP_D, dev_priv->regfile.saveDP_D);
+ }
+ /* FIXME: restore TV & SDVO state */
+
+ return;
+}
diff --git a/drivers/gpu/drm/i915/intel_crt.c b/drivers/gpu/drm/i915/intel_crt.c
index 9293878ec7eb..969d08c72d10 100644
--- a/drivers/gpu/drm/i915/intel_crt.c
+++ b/drivers/gpu/drm/i915/intel_crt.c
@@ -267,27 +267,27 @@ static bool intel_ironlake_crt_detect_hotplug(struct drm_connector *connector)
crt->force_hotplug_required = 0;
- save_adpa = adpa = I915_READ(PCH_ADPA);
+ save_adpa = adpa = I915_READ(crt->adpa_reg);
DRM_DEBUG_KMS("trigger hotplug detect cycle: adpa=0x%x\n", adpa);
adpa |= ADPA_CRT_HOTPLUG_FORCE_TRIGGER;
if (turn_off_dac)
adpa &= ~ADPA_DAC_ENABLE;
- I915_WRITE(PCH_ADPA, adpa);
+ I915_WRITE(crt->adpa_reg, adpa);
- if (wait_for((I915_READ(PCH_ADPA) & ADPA_CRT_HOTPLUG_FORCE_TRIGGER) == 0,
+ if (wait_for((I915_READ(crt->adpa_reg) & ADPA_CRT_HOTPLUG_FORCE_TRIGGER) == 0,
1000))
DRM_DEBUG_KMS("timed out waiting for FORCE_TRIGGER");
if (turn_off_dac) {
- I915_WRITE(PCH_ADPA, save_adpa);
- POSTING_READ(PCH_ADPA);
+ I915_WRITE(crt->adpa_reg, save_adpa);
+ POSTING_READ(crt->adpa_reg);
}
}
/* Check the status to see if both blue and green are on now */
- adpa = I915_READ(PCH_ADPA);
+ adpa = I915_READ(crt->adpa_reg);
if ((adpa & ADPA_CRT_HOTPLUG_MONITOR_MASK) != 0)
ret = true;
else
@@ -300,26 +300,27 @@ static bool intel_ironlake_crt_detect_hotplug(struct drm_connector *connector)
static bool valleyview_crt_detect_hotplug(struct drm_connector *connector)
{
struct drm_device *dev = connector->dev;
+ struct intel_crt *crt = intel_attached_crt(connector);
struct drm_i915_private *dev_priv = dev->dev_private;
u32 adpa;
bool ret;
u32 save_adpa;
- save_adpa = adpa = I915_READ(ADPA);
+ save_adpa = adpa = I915_READ(crt->adpa_reg);
DRM_DEBUG_KMS("trigger hotplug detect cycle: adpa=0x%x\n", adpa);
adpa |= ADPA_CRT_HOTPLUG_FORCE_TRIGGER;
- I915_WRITE(ADPA, adpa);
+ I915_WRITE(crt->adpa_reg, adpa);
- if (wait_for((I915_READ(ADPA) & ADPA_CRT_HOTPLUG_FORCE_TRIGGER) == 0,
+ if (wait_for((I915_READ(crt->adpa_reg) & ADPA_CRT_HOTPLUG_FORCE_TRIGGER) == 0,
1000)) {
DRM_DEBUG_KMS("timed out waiting for FORCE_TRIGGER");
- I915_WRITE(ADPA, save_adpa);
+ I915_WRITE(crt->adpa_reg, save_adpa);
}
/* Check the status to see if both blue and green are on now */
- adpa = I915_READ(ADPA);
+ adpa = I915_READ(crt->adpa_reg);
if ((adpa & ADPA_CRT_HOTPLUG_MONITOR_MASK) != 0)
ret = true;
else
@@ -665,11 +666,11 @@ static void intel_crt_reset(struct drm_connector *connector)
if (HAS_PCH_SPLIT(dev)) {
u32 adpa;
- adpa = I915_READ(PCH_ADPA);
+ adpa = I915_READ(crt->adpa_reg);
adpa &= ~ADPA_CRT_HOTPLUG_MASK;
adpa |= ADPA_HOTPLUG_BITS;
- I915_WRITE(PCH_ADPA, adpa);
- POSTING_READ(PCH_ADPA);
+ I915_WRITE(crt->adpa_reg, adpa);
+ POSTING_READ(crt->adpa_reg);
DRM_DEBUG_KMS("pch crt adpa set to 0x%x\n", adpa);
crt->force_hotplug_required = 1;
@@ -684,7 +685,6 @@ static void intel_crt_reset(struct drm_connector *connector)
static const struct drm_encoder_helper_funcs crt_encoder_funcs = {
.mode_fixup = intel_crt_mode_fixup,
.mode_set = intel_crt_mode_set,
- .disable = intel_encoder_noop,
};
static const struct drm_connector_funcs intel_crt_connector_funcs = {
@@ -776,7 +776,7 @@ void intel_crt_init(struct drm_device *dev)
crt->base.disable = intel_disable_crt;
crt->base.enable = intel_enable_crt;
- if (IS_HASWELL(dev))
+ if (HAS_DDI(dev))
crt->base.get_hw_state = intel_ddi_get_hw_state;
else
crt->base.get_hw_state = intel_crt_get_hw_state;
@@ -800,10 +800,14 @@ void intel_crt_init(struct drm_device *dev)
dev_priv->hotplug_supported_mask |= CRT_HOTPLUG_INT_STATUS;
/*
- * TODO: find a proper way to discover whether we need to set the
- * polarity reversal bit or not, instead of relying on the BIOS.
+ * TODO: find a proper way to discover whether we need to set the the
+ * polarity and link reversal bits or not, instead of relying on the
+ * BIOS.
*/
- if (HAS_PCH_LPT(dev))
- dev_priv->fdi_rx_polarity_reversed =
- !!(I915_READ(_FDI_RXA_CTL) & FDI_RX_POLARITY_REVERSED_LPT);
+ if (HAS_PCH_LPT(dev)) {
+ u32 fdi_config = FDI_RX_POLARITY_REVERSED_LPT |
+ FDI_RX_LINK_REVERSAL_OVERRIDE;
+
+ dev_priv->fdi_rx_config = I915_READ(_FDI_RXA_CTL) & fdi_config;
+ }
}
diff --git a/drivers/gpu/drm/i915/intel_ddi.c b/drivers/gpu/drm/i915/intel_ddi.c
index 4bad0f724019..d64af5aa4a1c 100644
--- a/drivers/gpu/drm/i915/intel_ddi.c
+++ b/drivers/gpu/drm/i915/intel_ddi.c
@@ -84,7 +84,8 @@ static enum port intel_ddi_get_encoder_port(struct intel_encoder *intel_encoder)
* in either FDI or DP modes only, as HDMI connections will work with both
* of those
*/
-void intel_prepare_ddi_buffers(struct drm_device *dev, enum port port, bool use_fdi_mode)
+static void intel_prepare_ddi_buffers(struct drm_device *dev, enum port port,
+ bool use_fdi_mode)
{
struct drm_i915_private *dev_priv = dev->dev_private;
u32 reg;
@@ -114,16 +115,17 @@ void intel_prepare_ddi(struct drm_device *dev)
{
int port;
- if (IS_HASWELL(dev)) {
- for (port = PORT_A; port < PORT_E; port++)
- intel_prepare_ddi_buffers(dev, port, false);
+ if (!HAS_DDI(dev))
+ return;
- /* DDI E is the suggested one to work in FDI mode, so program is as such by
- * default. It will have to be re-programmed in case a digital DP output
- * will be detected on it
- */
- intel_prepare_ddi_buffers(dev, PORT_E, true);
- }
+ for (port = PORT_A; port < PORT_E; port++)
+ intel_prepare_ddi_buffers(dev, port, false);
+
+ /* DDI E is the suggested one to work in FDI mode, so program is as such
+ * by default. It will have to be re-programmed in case a digital DP
+ * output will be detected on it
+ */
+ intel_prepare_ddi_buffers(dev, PORT_E, true);
}
static const long hsw_ddi_buf_ctl_values[] = {
@@ -178,10 +180,8 @@ void hsw_fdi_link_train(struct drm_crtc *crtc)
FDI_RX_TP1_TO_TP2_48 | FDI_RX_FDI_DELAY_90);
/* Enable the PCH Receiver FDI PLL */
- rx_ctl_val = FDI_RX_PLL_ENABLE | FDI_RX_ENHANCE_FRAME_ENABLE |
- ((intel_crtc->fdi_lanes - 1) << 19);
- if (dev_priv->fdi_rx_polarity_reversed)
- rx_ctl_val |= FDI_RX_POLARITY_REVERSED_LPT;
+ rx_ctl_val = dev_priv->fdi_rx_config | FDI_RX_ENHANCE_FRAME_ENABLE |
+ FDI_RX_PLL_ENABLE | ((intel_crtc->fdi_lanes - 1) << 19);
I915_WRITE(_FDI_RXA_CTL, rx_ctl_val);
POSTING_READ(_FDI_RXA_CTL);
udelay(220);
@@ -203,7 +203,10 @@ void hsw_fdi_link_train(struct drm_crtc *crtc)
DP_TP_CTL_LINK_TRAIN_PAT1 |
DP_TP_CTL_ENABLE);
- /* Configure and enable DDI_BUF_CTL for DDI E with next voltage */
+ /* Configure and enable DDI_BUF_CTL for DDI E with next voltage.
+ * DDI E does not support port reversal, the functionality is
+ * achieved on the PCH side in FDI_RX_CTL, so no need to set the
+ * port reversal bit */
I915_WRITE(DDI_BUF_CTL(PORT_E),
DDI_BUF_CTL_ENABLE |
((intel_crtc->fdi_lanes - 1) << 1) |
@@ -675,10 +678,14 @@ static void intel_ddi_mode_set(struct drm_encoder *encoder,
DRM_DEBUG_KMS("Preparing DDI mode for Haswell on port %c, pipe %c\n",
port_name(port), pipe_name(pipe));
+ intel_crtc->eld_vld = false;
if (type == INTEL_OUTPUT_DISPLAYPORT || type == INTEL_OUTPUT_EDP) {
struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
+ struct intel_digital_port *intel_dig_port =
+ enc_to_dig_port(encoder);
- intel_dp->DP = DDI_BUF_CTL_ENABLE | DDI_BUF_EMP_400MV_0DB_HSW;
+ intel_dp->DP = intel_dig_port->port_reversal |
+ DDI_BUF_CTL_ENABLE | DDI_BUF_EMP_400MV_0DB_HSW;
switch (intel_dp->lane_count) {
case 1:
intel_dp->DP |= DDI_PORT_WIDTH_X1;
@@ -985,7 +992,13 @@ void intel_ddi_enable_pipe_func(struct drm_crtc *crtc)
if (cpu_transcoder == TRANSCODER_EDP) {
switch (pipe) {
case PIPE_A:
- temp |= TRANS_DDI_EDP_INPUT_A_ONOFF;
+ /* Can only use the always-on power well for eDP when
+ * not using the panel fitter, and when not using motion
+ * blur mitigation (which we don't support). */
+ if (dev_priv->pch_pf_size)
+ temp |= TRANS_DDI_EDP_INPUT_A_ONOFF;
+ else
+ temp |= TRANS_DDI_EDP_INPUT_A_ON;
break;
case PIPE_B:
temp |= TRANS_DDI_EDP_INPUT_B_ONOFF;
@@ -1069,7 +1082,7 @@ bool intel_ddi_connector_get_hw_state(struct intel_connector *intel_connector)
if (port == PORT_A)
cpu_transcoder = TRANSCODER_EDP;
else
- cpu_transcoder = pipe;
+ cpu_transcoder = (enum transcoder) pipe;
tmp = I915_READ(TRANS_DDI_FUNC_CTL(cpu_transcoder));
@@ -1285,34 +1298,58 @@ static void intel_ddi_post_disable(struct intel_encoder *intel_encoder)
static void intel_enable_ddi(struct intel_encoder *intel_encoder)
{
struct drm_encoder *encoder = &intel_encoder->base;
+ struct drm_crtc *crtc = encoder->crtc;
+ struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+ int pipe = intel_crtc->pipe;
struct drm_device *dev = encoder->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
enum port port = intel_ddi_get_encoder_port(intel_encoder);
int type = intel_encoder->type;
+ uint32_t tmp;
if (type == INTEL_OUTPUT_HDMI) {
+ struct intel_digital_port *intel_dig_port =
+ enc_to_dig_port(encoder);
+
/* In HDMI/DVI mode, the port width, and swing/emphasis values
* are ignored so nothing special needs to be done besides
* enabling the port.
*/
- I915_WRITE(DDI_BUF_CTL(port), DDI_BUF_CTL_ENABLE);
+ I915_WRITE(DDI_BUF_CTL(port),
+ intel_dig_port->port_reversal | DDI_BUF_CTL_ENABLE);
} else if (type == INTEL_OUTPUT_EDP) {
struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
ironlake_edp_backlight_on(intel_dp);
}
+
+ if (intel_crtc->eld_vld) {
+ tmp = I915_READ(HSW_AUD_PIN_ELD_CP_VLD);
+ tmp |= ((AUDIO_OUTPUT_ENABLE_A | AUDIO_ELD_VALID_A) << (pipe * 4));
+ I915_WRITE(HSW_AUD_PIN_ELD_CP_VLD, tmp);
+ }
}
static void intel_disable_ddi(struct intel_encoder *intel_encoder)
{
struct drm_encoder *encoder = &intel_encoder->base;
+ struct drm_crtc *crtc = encoder->crtc;
+ struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+ int pipe = intel_crtc->pipe;
int type = intel_encoder->type;
+ struct drm_device *dev = encoder->dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ uint32_t tmp;
if (type == INTEL_OUTPUT_EDP) {
struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
ironlake_edp_backlight_off(intel_dp);
}
+
+ tmp = I915_READ(HSW_AUD_PIN_ELD_CP_VLD);
+ tmp &= ~((AUDIO_OUTPUT_ENABLE_A | AUDIO_ELD_VALID_A) << (pipe * 4));
+ I915_WRITE(HSW_AUD_PIN_ELD_CP_VLD, tmp);
}
int intel_ddi_get_cdclk_freq(struct drm_i915_private *dev_priv)
@@ -1452,11 +1489,11 @@ static const struct drm_encoder_funcs intel_ddi_funcs = {
static const struct drm_encoder_helper_funcs intel_ddi_helper_funcs = {
.mode_fixup = intel_ddi_mode_fixup,
.mode_set = intel_ddi_mode_set,
- .disable = intel_encoder_noop,
};
void intel_ddi_init(struct drm_device *dev, enum port port)
{
+ struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_digital_port *intel_dig_port;
struct intel_encoder *intel_encoder;
struct drm_encoder *encoder;
@@ -1497,6 +1534,8 @@ void intel_ddi_init(struct drm_device *dev, enum port port)
intel_encoder->get_hw_state = intel_ddi_get_hw_state;
intel_dig_port->port = port;
+ intel_dig_port->port_reversal = I915_READ(DDI_BUF_CTL(port)) &
+ DDI_BUF_PORT_REVERSAL;
if (hdmi_connector)
intel_dig_port->hdmi.sdvox_reg = DDI_BUF_CTL(port);
else
diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
index da1ad9c80bb5..a05ac2c91ba2 100644
--- a/drivers/gpu/drm/i915/intel_display.c
+++ b/drivers/gpu/drm/i915/intel_display.c
@@ -154,8 +154,8 @@ static const intel_limit_t intel_limits_i9xx_sdvo = {
.vco = { .min = 1400000, .max = 2800000 },
.n = { .min = 1, .max = 6 },
.m = { .min = 70, .max = 120 },
- .m1 = { .min = 10, .max = 22 },
- .m2 = { .min = 5, .max = 9 },
+ .m1 = { .min = 8, .max = 18 },
+ .m2 = { .min = 3, .max = 7 },
.p = { .min = 5, .max = 80 },
.p1 = { .min = 1, .max = 8 },
.p2 = { .dot_limit = 200000,
@@ -168,8 +168,8 @@ static const intel_limit_t intel_limits_i9xx_lvds = {
.vco = { .min = 1400000, .max = 2800000 },
.n = { .min = 1, .max = 6 },
.m = { .min = 70, .max = 120 },
- .m1 = { .min = 10, .max = 22 },
- .m2 = { .min = 5, .max = 9 },
+ .m1 = { .min = 8, .max = 18 },
+ .m2 = { .min = 3, .max = 7 },
.p = { .min = 7, .max = 98 },
.p1 = { .min = 1, .max = 8 },
.p2 = { .dot_limit = 112000,
@@ -416,13 +416,11 @@ static const intel_limit_t intel_limits_vlv_dp = {
u32 intel_dpio_read(struct drm_i915_private *dev_priv, int reg)
{
- unsigned long flags;
- u32 val = 0;
+ WARN_ON(!mutex_is_locked(&dev_priv->dpio_lock));
- spin_lock_irqsave(&dev_priv->dpio_lock, flags);
if (wait_for_atomic_us((I915_READ(DPIO_PKT) & DPIO_BUSY) == 0, 100)) {
DRM_ERROR("DPIO idle wait timed out\n");
- goto out_unlock;
+ return 0;
}
I915_WRITE(DPIO_REG, reg);
@@ -430,24 +428,20 @@ u32 intel_dpio_read(struct drm_i915_private *dev_priv, int reg)
DPIO_BYTE);
if (wait_for_atomic_us((I915_READ(DPIO_PKT) & DPIO_BUSY) == 0, 100)) {
DRM_ERROR("DPIO read wait timed out\n");
- goto out_unlock;
+ return 0;
}
- val = I915_READ(DPIO_DATA);
-out_unlock:
- spin_unlock_irqrestore(&dev_priv->dpio_lock, flags);
- return val;
+ return I915_READ(DPIO_DATA);
}
static void intel_dpio_write(struct drm_i915_private *dev_priv, int reg,
u32 val)
{
- unsigned long flags;
+ WARN_ON(!mutex_is_locked(&dev_priv->dpio_lock));
- spin_lock_irqsave(&dev_priv->dpio_lock, flags);
if (wait_for_atomic_us((I915_READ(DPIO_PKT) & DPIO_BUSY) == 0, 100)) {
DRM_ERROR("DPIO idle wait timed out\n");
- goto out_unlock;
+ return;
}
I915_WRITE(DPIO_DATA, val);
@@ -456,9 +450,6 @@ static void intel_dpio_write(struct drm_i915_private *dev_priv, int reg,
DPIO_BYTE);
if (wait_for_atomic_us((I915_READ(DPIO_PKT) & DPIO_BUSY) == 0, 100))
DRM_ERROR("DPIO write wait timed out\n");
-
-out_unlock:
- spin_unlock_irqrestore(&dev_priv->dpio_lock, flags);
}
static void vlv_init_dpio(struct drm_device *dev)
@@ -472,61 +463,14 @@ static void vlv_init_dpio(struct drm_device *dev)
POSTING_READ(DPIO_CTL);
}
-static int intel_dual_link_lvds_callback(const struct dmi_system_id *id)
-{
- DRM_INFO("Forcing lvds to dual link mode on %s\n", id->ident);
- return 1;
-}
-
-static const struct dmi_system_id intel_dual_link_lvds[] = {
- {
- .callback = intel_dual_link_lvds_callback,
- .ident = "Apple MacBook Pro (Core i5/i7 Series)",
- .matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "Apple Inc."),
- DMI_MATCH(DMI_PRODUCT_NAME, "MacBookPro8,2"),
- },
- },
- { } /* terminating entry */
-};
-
-static bool is_dual_link_lvds(struct drm_i915_private *dev_priv,
- unsigned int reg)
-{
- unsigned int val;
-
- /* use the module option value if specified */
- if (i915_lvds_channel_mode > 0)
- return i915_lvds_channel_mode == 2;
-
- if (dmi_check_system(intel_dual_link_lvds))
- return true;
-
- if (dev_priv->lvds_val)
- val = dev_priv->lvds_val;
- else {
- /* BIOS should set the proper LVDS register value at boot, but
- * in reality, it doesn't set the value when the lid is closed;
- * we need to check "the value to be set" in VBT when LVDS
- * register is uninitialized.
- */
- val = I915_READ(reg);
- if (!(val & ~(LVDS_PIPE_MASK | LVDS_DETECTED)))
- val = dev_priv->bios_lvds_val;
- dev_priv->lvds_val = val;
- }
- return (val & LVDS_CLKB_POWER_MASK) == LVDS_CLKB_POWER_UP;
-}
-
static const intel_limit_t *intel_ironlake_limit(struct drm_crtc *crtc,
int refclk)
{
struct drm_device *dev = crtc->dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
const intel_limit_t *limit;
if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS)) {
- if (is_dual_link_lvds(dev_priv, PCH_LVDS)) {
+ if (intel_is_dual_link_lvds(dev)) {
/* LVDS dual channel */
if (refclk == 100000)
limit = &intel_limits_ironlake_dual_lvds_100m;
@@ -550,11 +494,10 @@ static const intel_limit_t *intel_ironlake_limit(struct drm_crtc *crtc,
static const intel_limit_t *intel_g4x_limit(struct drm_crtc *crtc)
{
struct drm_device *dev = crtc->dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
const intel_limit_t *limit;
if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS)) {
- if (is_dual_link_lvds(dev_priv, LVDS))
+ if (intel_is_dual_link_lvds(dev))
/* LVDS with dual channel */
limit = &intel_limits_g4x_dual_channel_lvds;
else
@@ -686,19 +629,16 @@ intel_find_best_PLL(const intel_limit_t *limit, struct drm_crtc *crtc,
{
struct drm_device *dev = crtc->dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
intel_clock_t clock;
int err = target;
- if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS) &&
- (I915_READ(LVDS)) != 0) {
+ if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS)) {
/*
- * For LVDS, if the panel is on, just rely on its current
- * settings for dual-channel. We haven't figured out how to
- * reliably set up different single/dual channel state, if we
- * even can.
+ * For LVDS just rely on its current settings for dual-channel.
+ * We haven't figured out how to reliably set up different
+ * single/dual channel state, if we even can.
*/
- if (is_dual_link_lvds(dev_priv, LVDS))
+ if (intel_is_dual_link_lvds(dev))
clock.p2 = limit->p2.p2_fast;
else
clock.p2 = limit->p2.p2_slow;
@@ -751,7 +691,6 @@ intel_g4x_find_best_PLL(const intel_limit_t *limit, struct drm_crtc *crtc,
intel_clock_t *best_clock)
{
struct drm_device *dev = crtc->dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
intel_clock_t clock;
int max_n;
bool found;
@@ -766,8 +705,7 @@ intel_g4x_find_best_PLL(const intel_limit_t *limit, struct drm_crtc *crtc,
lvds_reg = PCH_LVDS;
else
lvds_reg = LVDS;
- if ((I915_READ(lvds_reg) & LVDS_CLKB_POWER_MASK) ==
- LVDS_CLKB_POWER_UP)
+ if (intel_is_dual_link_lvds(dev))
clock.p2 = limit->p2.p2_fast;
else
clock.p2 = limit->p2.p2_slow;
@@ -1047,6 +985,51 @@ void intel_wait_for_pipe_off(struct drm_device *dev, int pipe)
}
}
+/*
+ * ibx_digital_port_connected - is the specified port connected?
+ * @dev_priv: i915 private structure
+ * @port: the port to test
+ *
+ * Returns true if @port is connected, false otherwise.
+ */
+bool ibx_digital_port_connected(struct drm_i915_private *dev_priv,
+ struct intel_digital_port *port)
+{
+ u32 bit;
+
+ if (HAS_PCH_IBX(dev_priv->dev)) {
+ switch(port->port) {
+ case PORT_B:
+ bit = SDE_PORTB_HOTPLUG;
+ break;
+ case PORT_C:
+ bit = SDE_PORTC_HOTPLUG;
+ break;
+ case PORT_D:
+ bit = SDE_PORTD_HOTPLUG;
+ break;
+ default:
+ return true;
+ }
+ } else {
+ switch(port->port) {
+ case PORT_B:
+ bit = SDE_PORTB_HOTPLUG_CPT;
+ break;
+ case PORT_C:
+ bit = SDE_PORTC_HOTPLUG_CPT;
+ break;
+ case PORT_D:
+ bit = SDE_PORTD_HOTPLUG_CPT;
+ break;
+ default:
+ return true;
+ }
+ }
+
+ return I915_READ(SDEISR) & bit;
+}
+
static const char *state_string(bool enabled)
{
return enabled ? "on" : "off";
@@ -1125,8 +1108,8 @@ static void assert_fdi_tx(struct drm_i915_private *dev_priv,
enum transcoder cpu_transcoder = intel_pipe_to_cpu_transcoder(dev_priv,
pipe);
- if (IS_HASWELL(dev_priv->dev)) {
- /* On Haswell, DDI is used instead of FDI_TX_CTL */
+ if (HAS_DDI(dev_priv->dev)) {
+ /* DDI does not have a specific FDI_TX register */
reg = TRANS_DDI_FUNC_CTL(cpu_transcoder);
val = I915_READ(reg);
cur_state = !!(val & TRANS_DDI_FUNC_ENABLE);
@@ -1170,7 +1153,7 @@ static void assert_fdi_tx_pll_enabled(struct drm_i915_private *dev_priv,
return;
/* On Haswell, DDI ports are responsible for the FDI PLL setup */
- if (IS_HASWELL(dev_priv->dev))
+ if (HAS_DDI(dev_priv->dev))
return;
reg = FDI_TX_CTL(pipe);
@@ -1231,9 +1214,15 @@ void assert_pipe(struct drm_i915_private *dev_priv,
if (pipe == PIPE_A && dev_priv->quirks & QUIRK_PIPEA_FORCE)
state = true;
- reg = PIPECONF(cpu_transcoder);
- val = I915_READ(reg);
- cur_state = !!(val & PIPECONF_ENABLE);
+ if (IS_HASWELL(dev_priv->dev) && cpu_transcoder != TRANSCODER_EDP &&
+ !(I915_READ(HSW_PWR_WELL_DRIVER) & HSW_PWR_WELL_ENABLE)) {
+ cur_state = false;
+ } else {
+ reg = PIPECONF(cpu_transcoder);
+ val = I915_READ(reg);
+ cur_state = !!(val & PIPECONF_ENABLE);
+ }
+
WARN(cur_state != state,
"pipe %c assertion failure (expected %s, current %s)\n",
pipe_name(pipe), state_string(state), state_string(cur_state));
@@ -1509,13 +1498,14 @@ static void
intel_sbi_write(struct drm_i915_private *dev_priv, u16 reg, u32 value,
enum intel_sbi_destination destination)
{
- unsigned long flags;
u32 tmp;
- spin_lock_irqsave(&dev_priv->dpio_lock, flags);
- if (wait_for((I915_READ(SBI_CTL_STAT) & SBI_BUSY) == 0, 100)) {
+ WARN_ON(!mutex_is_locked(&dev_priv->dpio_lock));
+
+ if (wait_for((I915_READ(SBI_CTL_STAT) & SBI_BUSY) == 0,
+ 100)) {
DRM_ERROR("timeout waiting for SBI to become ready\n");
- goto out_unlock;
+ return;
}
I915_WRITE(SBI_ADDR, (reg << 16));
@@ -1530,24 +1520,21 @@ intel_sbi_write(struct drm_i915_private *dev_priv, u16 reg, u32 value,
if (wait_for((I915_READ(SBI_CTL_STAT) & (SBI_BUSY | SBI_RESPONSE_FAIL)) == 0,
100)) {
DRM_ERROR("timeout waiting for SBI to complete write transaction\n");
- goto out_unlock;
+ return;
}
-
-out_unlock:
- spin_unlock_irqrestore(&dev_priv->dpio_lock, flags);
}
static u32
intel_sbi_read(struct drm_i915_private *dev_priv, u16 reg,
enum intel_sbi_destination destination)
{
- unsigned long flags;
u32 value = 0;
+ WARN_ON(!mutex_is_locked(&dev_priv->dpio_lock));
- spin_lock_irqsave(&dev_priv->dpio_lock, flags);
- if (wait_for((I915_READ(SBI_CTL_STAT) & SBI_BUSY) == 0, 100)) {
+ if (wait_for((I915_READ(SBI_CTL_STAT) & SBI_BUSY) == 0,
+ 100)) {
DRM_ERROR("timeout waiting for SBI to become ready\n");
- goto out_unlock;
+ return 0;
}
I915_WRITE(SBI_ADDR, (reg << 16));
@@ -1561,14 +1548,10 @@ intel_sbi_read(struct drm_i915_private *dev_priv, u16 reg,
if (wait_for((I915_READ(SBI_CTL_STAT) & (SBI_BUSY | SBI_RESPONSE_FAIL)) == 0,
100)) {
DRM_ERROR("timeout waiting for SBI to complete read transaction\n");
- goto out_unlock;
+ return 0;
}
- value = I915_READ(SBI_DATA);
-
-out_unlock:
- spin_unlock_irqrestore(&dev_priv->dpio_lock, flags);
- return value;
+ return I915_READ(SBI_DATA);
}
/**
@@ -1700,8 +1683,8 @@ static void ironlake_enable_pch_transcoder(struct drm_i915_private *dev_priv,
* make the BPC in transcoder be consistent with
* that in pipeconf reg.
*/
- val &= ~PIPE_BPC_MASK;
- val |= pipeconf_val & PIPE_BPC_MASK;
+ val &= ~PIPECONF_BPC_MASK;
+ val |= pipeconf_val & PIPECONF_BPC_MASK;
}
val &= ~TRANS_INTERLACE_MASK;
@@ -1728,7 +1711,7 @@ static void lpt_enable_pch_transcoder(struct drm_i915_private *dev_priv,
BUG_ON(dev_priv->info->gen < 5);
/* FDI must be feeding us bits for PCH ports */
- assert_fdi_tx_enabled(dev_priv, cpu_transcoder);
+ assert_fdi_tx_enabled(dev_priv, (enum pipe) cpu_transcoder);
assert_fdi_rx_enabled(dev_priv, TRANSCODER_A);
/* Workaround: set timing override bit. */
@@ -1816,11 +1799,11 @@ static void intel_enable_pipe(struct drm_i915_private *dev_priv, enum pipe pipe,
{
enum transcoder cpu_transcoder = intel_pipe_to_cpu_transcoder(dev_priv,
pipe);
- enum transcoder pch_transcoder;
+ enum pipe pch_transcoder;
int reg;
u32 val;
- if (IS_HASWELL(dev_priv->dev))
+ if (HAS_PCH_LPT(dev_priv->dev))
pch_transcoder = TRANSCODER_A;
else
pch_transcoder = pipe;
@@ -1836,7 +1819,8 @@ static void intel_enable_pipe(struct drm_i915_private *dev_priv, enum pipe pipe,
if (pch_port) {
/* if driving the PCH, we need FDI enabled */
assert_fdi_rx_pll_enabled(dev_priv, pch_transcoder);
- assert_fdi_tx_pll_enabled(dev_priv, cpu_transcoder);
+ assert_fdi_tx_pll_enabled(dev_priv,
+ (enum pipe) cpu_transcoder);
}
/* FIXME: assert CPU port conditions for SNB+ */
}
@@ -2017,18 +2001,29 @@ void intel_unpin_fb_obj(struct drm_i915_gem_object *obj)
/* Computes the linear offset to the base tile and adjusts x, y. bytes per pixel
* is assumed to be a power-of-two. */
-unsigned long intel_gen4_compute_offset_xtiled(int *x, int *y,
- unsigned int bpp,
- unsigned int pitch)
+unsigned long intel_gen4_compute_page_offset(int *x, int *y,
+ unsigned int tiling_mode,
+ unsigned int cpp,
+ unsigned int pitch)
{
- int tile_rows, tiles;
+ if (tiling_mode != I915_TILING_NONE) {
+ unsigned int tile_rows, tiles;
+
+ tile_rows = *y / 8;
+ *y %= 8;
+
+ tiles = *x / (512/cpp);
+ *x %= 512/cpp;
- tile_rows = *y / 8;
- *y %= 8;
- tiles = *x / (512/bpp);
- *x %= 512/bpp;
+ return tile_rows * pitch * 8 + tiles * 4096;
+ } else {
+ unsigned int offset;
- return tile_rows * pitch * 8 + tiles * 4096;
+ offset = *y * pitch + *x * cpp;
+ *y = 0;
+ *x = (offset & 4095) / cpp;
+ return offset & -4096;
+ }
}
static int i9xx_update_plane(struct drm_crtc *crtc, struct drm_framebuffer *fb,
@@ -2105,9 +2100,9 @@ static int i9xx_update_plane(struct drm_crtc *crtc, struct drm_framebuffer *fb,
if (INTEL_INFO(dev)->gen >= 4) {
intel_crtc->dspaddr_offset =
- intel_gen4_compute_offset_xtiled(&x, &y,
- fb->bits_per_pixel / 8,
- fb->pitches[0]);
+ intel_gen4_compute_page_offset(&x, &y, obj->tiling_mode,
+ fb->bits_per_pixel / 8,
+ fb->pitches[0]);
linear_offset -= intel_crtc->dspaddr_offset;
} else {
intel_crtc->dspaddr_offset = linear_offset;
@@ -2198,9 +2193,9 @@ static int ironlake_update_plane(struct drm_crtc *crtc,
linear_offset = y * fb->pitches[0] + x * (fb->bits_per_pixel / 8);
intel_crtc->dspaddr_offset =
- intel_gen4_compute_offset_xtiled(&x, &y,
- fb->bits_per_pixel / 8,
- fb->pitches[0]);
+ intel_gen4_compute_page_offset(&x, &y, obj->tiling_mode,
+ fb->bits_per_pixel / 8,
+ fb->pitches[0]);
linear_offset -= intel_crtc->dspaddr_offset;
DRM_DEBUG_KMS("Writing base %08X %08lX %d %d %d\n",
@@ -2242,10 +2237,6 @@ intel_finish_fb(struct drm_framebuffer *old_fb)
bool was_interruptible = dev_priv->mm.interruptible;
int ret;
- wait_event(dev_priv->pending_flip_queue,
- atomic_read(&dev_priv->mm.wedged) ||
- atomic_read(&obj->pending_flip) == 0);
-
/* Big Hammer, we also need to ensure that any pending
* MI_WAIT_FOR_EVENT inside a user batch buffer on the
* current scanout is retired before unpinning the old
@@ -2350,43 +2341,6 @@ intel_pipe_set_base(struct drm_crtc *crtc, int x, int y,
return 0;
}
-static void ironlake_set_pll_edp(struct drm_crtc *crtc, int clock)
-{
- struct drm_device *dev = crtc->dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
- u32 dpa_ctl;
-
- DRM_DEBUG_KMS("eDP PLL enable for clock %d\n", clock);
- dpa_ctl = I915_READ(DP_A);
- dpa_ctl &= ~DP_PLL_FREQ_MASK;
-
- if (clock < 200000) {
- u32 temp;
- dpa_ctl |= DP_PLL_FREQ_160MHZ;
- /* workaround for 160Mhz:
- 1) program 0x4600c bits 15:0 = 0x8124
- 2) program 0x46010 bit 0 = 1
- 3) program 0x46034 bit 24 = 1
- 4) program 0x64000 bit 14 = 1
- */
- temp = I915_READ(0x4600c);
- temp &= 0xffff0000;
- I915_WRITE(0x4600c, temp | 0x8124);
-
- temp = I915_READ(0x46010);
- I915_WRITE(0x46010, temp | 1);
-
- temp = I915_READ(0x46034);
- I915_WRITE(0x46034, temp | (1 << 24));
- } else {
- dpa_ctl |= DP_PLL_FREQ_270MHZ;
- }
- I915_WRITE(DP_A, dpa_ctl);
-
- POSTING_READ(DP_A);
- udelay(500);
-}
-
static void intel_fdi_normal_train(struct drm_crtc *crtc)
{
struct drm_device *dev = crtc->dev;
@@ -2815,7 +2769,7 @@ static void ironlake_fdi_pll_enable(struct intel_crtc *intel_crtc)
temp = I915_READ(reg);
temp &= ~((0x7 << 19) | (0x7 << 16));
temp |= (intel_crtc->fdi_lanes - 1) << 19;
- temp |= (I915_READ(PIPECONF(pipe)) & PIPE_BPC_MASK) << 11;
+ temp |= (I915_READ(PIPECONF(pipe)) & PIPECONF_BPC_MASK) << 11;
I915_WRITE(reg, temp | FDI_RX_PLL_ENABLE);
POSTING_READ(reg);
@@ -2828,18 +2782,14 @@ static void ironlake_fdi_pll_enable(struct intel_crtc *intel_crtc)
POSTING_READ(reg);
udelay(200);
- /* On Haswell, the PLL configuration for ports and pipes is handled
- * separately, as part of DDI setup */
- if (!IS_HASWELL(dev)) {
- /* Enable CPU FDI TX PLL, always on for Ironlake */
- reg = FDI_TX_CTL(pipe);
- temp = I915_READ(reg);
- if ((temp & FDI_TX_PLL_ENABLE) == 0) {
- I915_WRITE(reg, temp | FDI_TX_PLL_ENABLE);
+ /* Enable CPU FDI TX PLL, always on for Ironlake */
+ reg = FDI_TX_CTL(pipe);
+ temp = I915_READ(reg);
+ if ((temp & FDI_TX_PLL_ENABLE) == 0) {
+ I915_WRITE(reg, temp | FDI_TX_PLL_ENABLE);
- POSTING_READ(reg);
- udelay(100);
- }
+ POSTING_READ(reg);
+ udelay(100);
}
}
@@ -2889,7 +2839,7 @@ static void ironlake_fdi_disable(struct drm_crtc *crtc)
reg = FDI_RX_CTL(pipe);
temp = I915_READ(reg);
temp &= ~(0x7 << 16);
- temp |= (I915_READ(PIPECONF(pipe)) & PIPE_BPC_MASK) << 11;
+ temp |= (I915_READ(PIPECONF(pipe)) & PIPECONF_BPC_MASK) << 11;
I915_WRITE(reg, temp & ~FDI_RX_ENABLE);
POSTING_READ(reg);
@@ -2918,7 +2868,7 @@ static void ironlake_fdi_disable(struct drm_crtc *crtc)
}
/* BPC in FDI rx is consistent with that in PIPECONF */
temp &= ~(0x07 << 16);
- temp |= (I915_READ(PIPECONF(pipe)) & PIPE_BPC_MASK) << 11;
+ temp |= (I915_READ(PIPECONF(pipe)) & PIPECONF_BPC_MASK) << 11;
I915_WRITE(reg, temp);
POSTING_READ(reg);
@@ -2929,10 +2879,12 @@ static bool intel_crtc_has_pending_flip(struct drm_crtc *crtc)
{
struct drm_device *dev = crtc->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
+ struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
unsigned long flags;
bool pending;
- if (atomic_read(&dev_priv->mm.wedged))
+ if (i915_reset_in_progress(&dev_priv->gpu_error) ||
+ intel_crtc->reset_counter != atomic_read(&dev_priv->gpu_error.reset_counter))
return false;
spin_lock_irqsave(&dev->event_lock, flags);
@@ -2950,6 +2902,8 @@ static void intel_crtc_wait_for_pending_flips(struct drm_crtc *crtc)
if (crtc->fb == NULL)
return;
+ WARN_ON(waitqueue_active(&dev_priv->pending_flip_queue));
+
wait_event(dev_priv->pending_flip_queue,
!intel_crtc_has_pending_flip(crtc));
@@ -2992,6 +2946,8 @@ static void lpt_program_iclkip(struct drm_crtc *crtc)
u32 divsel, phaseinc, auxdiv, phasedir = 0;
u32 temp;
+ mutex_lock(&dev_priv->dpio_lock);
+
/* It is necessary to ungate the pixclk gate prior to programming
* the divisors, and gate it back when it is done.
*/
@@ -3066,6 +3022,8 @@ static void lpt_program_iclkip(struct drm_crtc *crtc)
udelay(24);
I915_WRITE(PIXCLK_GATE, PIXCLK_GATE_UNGATE);
+
+ mutex_unlock(&dev_priv->dpio_lock);
}
/*
@@ -3146,7 +3104,7 @@ static void ironlake_pch_enable(struct drm_crtc *crtc)
if (HAS_PCH_CPT(dev) &&
(intel_pipe_has_type(crtc, INTEL_OUTPUT_DISPLAYPORT) ||
intel_pipe_has_type(crtc, INTEL_OUTPUT_EDP))) {
- u32 bpc = (I915_READ(PIPECONF(pipe)) & PIPE_BPC_MASK) >> 5;
+ u32 bpc = (I915_READ(PIPECONF(pipe)) & PIPECONF_BPC_MASK) >> 5;
reg = TRANS_DP_CTL(pipe);
temp = I915_READ(reg);
temp &= ~(TRANS_DP_PORT_SEL_MASK |
@@ -3623,7 +3581,7 @@ static void haswell_crtc_off(struct drm_crtc *crtc)
/* Stop saying we're using TRANSCODER_EDP because some other CRTC might
* start using it. */
- intel_crtc->cpu_transcoder = intel_crtc->pipe;
+ intel_crtc->cpu_transcoder = (enum transcoder) intel_crtc->pipe;
intel_ddi_put_crtc_pll(crtc);
}
@@ -3664,6 +3622,11 @@ static void i9xx_crtc_enable(struct drm_crtc *crtc)
intel_update_watermarks(dev);
intel_enable_pll(dev_priv, pipe);
+
+ for_each_encoder_on_crtc(dev, crtc, encoder)
+ if (encoder->pre_enable)
+ encoder->pre_enable(encoder);
+
intel_enable_pipe(dev_priv, pipe, false);
intel_enable_plane(dev_priv, plane, pipe);
@@ -3686,6 +3649,7 @@ static void i9xx_crtc_disable(struct drm_crtc *crtc)
struct intel_encoder *encoder;
int pipe = intel_crtc->pipe;
int plane = intel_crtc->plane;
+ u32 pctl;
if (!intel_crtc->active)
@@ -3705,6 +3669,13 @@ static void i9xx_crtc_disable(struct drm_crtc *crtc)
intel_disable_plane(dev_priv, plane, pipe);
intel_disable_pipe(dev_priv, pipe);
+
+ /* Disable pannel fitter if it is on this pipe. */
+ pctl = I915_READ(PFIT_CONTROL);
+ if ((pctl & PFIT_ENABLE) &&
+ ((pctl & PFIT_PIPE_MASK) >> PFIT_PIPE_SHIFT) == pipe)
+ I915_WRITE(PFIT_CONTROL, 0);
+
intel_disable_pll(dev_priv, pipe);
intel_crtc->active = false;
@@ -3767,19 +3738,17 @@ void intel_crtc_update_dpms(struct drm_crtc *crtc)
intel_crtc_update_sarea(crtc, enable);
}
-static void intel_crtc_noop(struct drm_crtc *crtc)
-{
-}
-
static void intel_crtc_disable(struct drm_crtc *crtc)
{
struct drm_device *dev = crtc->dev;
struct drm_connector *connector;
struct drm_i915_private *dev_priv = dev->dev_private;
+ struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
/* crtc should still be enabled when we disable it. */
WARN_ON(!crtc->enabled);
+ intel_crtc->eld_vld = false;
dev_priv->display.crtc_disable(crtc);
intel_crtc_update_sarea(crtc, false);
dev_priv->display.off(crtc);
@@ -3817,10 +3786,6 @@ void intel_modeset_disable(struct drm_device *dev)
}
}
-void intel_encoder_noop(struct drm_encoder *encoder)
-{
-}
-
void intel_encoder_destroy(struct drm_encoder *encoder)
{
struct intel_encoder *intel_encoder = to_intel_encoder(encoder);
@@ -4012,16 +3977,8 @@ static int i830_get_display_clock_speed(struct drm_device *dev)
return 133000;
}
-struct fdi_m_n {
- u32 tu;
- u32 gmch_m;
- u32 gmch_n;
- u32 link_m;
- u32 link_n;
-};
-
static void
-fdi_reduce_ratio(u32 *num, u32 *den)
+intel_reduce_ratio(uint32_t *num, uint32_t *den)
{
while (*num > 0xffffff || *den > 0xffffff) {
*num >>= 1;
@@ -4029,20 +3986,18 @@ fdi_reduce_ratio(u32 *num, u32 *den)
}
}
-static void
-ironlake_compute_m_n(int bits_per_pixel, int nlanes, int pixel_clock,
- int link_clock, struct fdi_m_n *m_n)
+void
+intel_link_compute_m_n(int bits_per_pixel, int nlanes,
+ int pixel_clock, int link_clock,
+ struct intel_link_m_n *m_n)
{
- m_n->tu = 64; /* default size */
-
- /* BUG_ON(pixel_clock > INT_MAX / 36); */
+ m_n->tu = 64;
m_n->gmch_m = bits_per_pixel * pixel_clock;
m_n->gmch_n = link_clock * nlanes * 8;
- fdi_reduce_ratio(&m_n->gmch_m, &m_n->gmch_n);
-
+ intel_reduce_ratio(&m_n->gmch_m, &m_n->gmch_n);
m_n->link_m = pixel_clock;
m_n->link_n = link_clock;
- fdi_reduce_ratio(&m_n->link_m, &m_n->link_n);
+ intel_reduce_ratio(&m_n->link_m, &m_n->link_n);
}
static inline bool intel_panel_use_ssc(struct drm_i915_private *dev_priv)
@@ -4289,51 +4244,6 @@ static void i9xx_update_pll_dividers(struct drm_crtc *crtc,
}
}
-static void intel_update_lvds(struct drm_crtc *crtc, intel_clock_t *clock,
- struct drm_display_mode *adjusted_mode)
-{
- struct drm_device *dev = crtc->dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
- struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
- int pipe = intel_crtc->pipe;
- u32 temp;
-
- temp = I915_READ(LVDS);
- temp |= LVDS_PORT_EN | LVDS_A0A2_CLKA_POWER_UP;
- if (pipe == 1) {
- temp |= LVDS_PIPEB_SELECT;
- } else {
- temp &= ~LVDS_PIPEB_SELECT;
- }
- /* set the corresponsding LVDS_BORDER bit */
- temp |= dev_priv->lvds_border_bits;
- /* Set the B0-B3 data pairs corresponding to whether we're going to
- * set the DPLLs for dual-channel mode or not.
- */
- if (clock->p2 == 7)
- temp |= LVDS_B0B3_POWER_UP | LVDS_CLKB_POWER_UP;
- else
- temp &= ~(LVDS_B0B3_POWER_UP | LVDS_CLKB_POWER_UP);
-
- /* It would be nice to set 24 vs 18-bit mode (LVDS_A3_POWER_UP)
- * appropriately here, but we need to look more thoroughly into how
- * panels behave in the two modes.
- */
- /* set the dithering flag on LVDS as needed */
- if (INTEL_INFO(dev)->gen >= 4) {
- if (dev_priv->lvds_dither)
- temp |= LVDS_ENABLE_DITHER;
- else
- temp &= ~LVDS_ENABLE_DITHER;
- }
- temp &= ~(LVDS_HSYNC_POLARITY | LVDS_VSYNC_POLARITY);
- if (adjusted_mode->flags & DRM_MODE_FLAG_NHSYNC)
- temp |= LVDS_HSYNC_POLARITY;
- if (adjusted_mode->flags & DRM_MODE_FLAG_NVSYNC)
- temp |= LVDS_VSYNC_POLARITY;
- I915_WRITE(LVDS, temp);
-}
-
static void vlv_update_pll(struct drm_crtc *crtc,
struct drm_display_mode *mode,
struct drm_display_mode *adjusted_mode,
@@ -4349,6 +4259,8 @@ static void vlv_update_pll(struct drm_crtc *crtc,
bool is_sdvo;
u32 temp;
+ mutex_lock(&dev_priv->dpio_lock);
+
is_sdvo = intel_pipe_has_type(crtc, INTEL_OUTPUT_SDVO) ||
intel_pipe_has_type(crtc, INTEL_OUTPUT_HDMI);
@@ -4432,6 +4344,8 @@ static void vlv_update_pll(struct drm_crtc *crtc,
temp |= (1 << 21);
intel_dpio_write(dev_priv, DPIO_DATA_CHANNEL2, temp);
}
+
+ mutex_unlock(&dev_priv->dpio_lock);
}
static void i9xx_update_pll(struct drm_crtc *crtc,
@@ -4443,6 +4357,7 @@ static void i9xx_update_pll(struct drm_crtc *crtc,
struct drm_device *dev = crtc->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+ struct intel_encoder *encoder;
int pipe = intel_crtc->pipe;
u32 dpll;
bool is_sdvo;
@@ -4511,12 +4426,9 @@ static void i9xx_update_pll(struct drm_crtc *crtc,
POSTING_READ(DPLL(pipe));
udelay(150);
- /* The LVDS pin pair needs to be on before the DPLLs are enabled.
- * This is an exception to the general rule that mode_set doesn't turn
- * things on.
- */
- if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS))
- intel_update_lvds(crtc, clock, adjusted_mode);
+ for_each_encoder_on_crtc(dev, crtc, encoder)
+ if (encoder->pre_pll_enable)
+ encoder->pre_pll_enable(encoder);
if (intel_pipe_has_type(crtc, INTEL_OUTPUT_DISPLAYPORT))
intel_dp_set_m_n(crtc, mode, adjusted_mode);
@@ -4555,6 +4467,7 @@ static void i8xx_update_pll(struct drm_crtc *crtc,
struct drm_device *dev = crtc->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+ struct intel_encoder *encoder;
int pipe = intel_crtc->pipe;
u32 dpll;
@@ -4588,12 +4501,9 @@ static void i8xx_update_pll(struct drm_crtc *crtc,
POSTING_READ(DPLL(pipe));
udelay(150);
- /* The LVDS pin pair needs to be on before the DPLLs are enabled.
- * This is an exception to the general rule that mode_set doesn't turn
- * things on.
- */
- if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS))
- intel_update_lvds(crtc, clock, adjusted_mode);
+ for_each_encoder_on_crtc(dev, crtc, encoder)
+ if (encoder->pre_pll_enable)
+ encoder->pre_pll_enable(encoder);
I915_WRITE(DPLL(pipe), dpll);
@@ -4783,10 +4693,10 @@ static int i9xx_crtc_mode_set(struct drm_crtc *crtc,
}
/* default to 8bpc */
- pipeconf &= ~(PIPECONF_BPP_MASK | PIPECONF_DITHER_EN);
+ pipeconf &= ~(PIPECONF_BPC_MASK | PIPECONF_DITHER_EN);
if (is_dp) {
if (adjusted_mode->private_flags & INTEL_MODE_DP_FORCE_6BPC) {
- pipeconf |= PIPECONF_BPP_6 |
+ pipeconf |= PIPECONF_6BPC |
PIPECONF_DITHER_EN |
PIPECONF_DITHER_TYPE_SP;
}
@@ -4794,7 +4704,7 @@ static int i9xx_crtc_mode_set(struct drm_crtc *crtc,
if (IS_VALLEYVIEW(dev) && intel_pipe_has_type(crtc, INTEL_OUTPUT_EDP)) {
if (adjusted_mode->private_flags & INTEL_MODE_DP_FORCE_6BPC) {
- pipeconf |= PIPECONF_BPP_6 |
+ pipeconf |= PIPECONF_6BPC |
PIPECONF_ENABLE |
I965_PIPECONF_ACTIVE;
}
@@ -4981,6 +4891,8 @@ static void lpt_init_pch_refclk(struct drm_device *dev)
if (!has_vga)
return;
+ mutex_lock(&dev_priv->dpio_lock);
+
/* XXX: Rip out SDV support once Haswell ships for real. */
if (IS_HASWELL(dev) && (dev->pci_device & 0xFF00) == 0x0C00)
is_sdv = true;
@@ -5123,6 +5035,8 @@ static void lpt_init_pch_refclk(struct drm_device *dev)
tmp = intel_sbi_read(dev_priv, SBI_DBUFF0, SBI_ICLK);
tmp |= SBI_DBUFF0_ENABLE;
intel_sbi_write(dev_priv, SBI_DBUFF0, tmp, SBI_ICLK);
+
+ mutex_unlock(&dev_priv->dpio_lock);
}
/*
@@ -5177,19 +5091,19 @@ static void ironlake_set_pipeconf(struct drm_crtc *crtc,
val = I915_READ(PIPECONF(pipe));
- val &= ~PIPE_BPC_MASK;
+ val &= ~PIPECONF_BPC_MASK;
switch (intel_crtc->bpp) {
case 18:
- val |= PIPE_6BPC;
+ val |= PIPECONF_6BPC;
break;
case 24:
- val |= PIPE_8BPC;
+ val |= PIPECONF_8BPC;
break;
case 30:
- val |= PIPE_10BPC;
+ val |= PIPECONF_10BPC;
break;
case 36:
- val |= PIPE_12BPC;
+ val |= PIPECONF_12BPC;
break;
default:
/* Case prevented by intel_choose_pipe_bpp_dither. */
@@ -5206,10 +5120,80 @@ static void ironlake_set_pipeconf(struct drm_crtc *crtc,
else
val |= PIPECONF_PROGRESSIVE;
+ if (adjusted_mode->private_flags & INTEL_MODE_LIMITED_COLOR_RANGE)
+ val |= PIPECONF_COLOR_RANGE_SELECT;
+ else
+ val &= ~PIPECONF_COLOR_RANGE_SELECT;
+
I915_WRITE(PIPECONF(pipe), val);
POSTING_READ(PIPECONF(pipe));
}
+/*
+ * Set up the pipe CSC unit.
+ *
+ * Currently only full range RGB to limited range RGB conversion
+ * is supported, but eventually this should handle various
+ * RGB<->YCbCr scenarios as well.
+ */
+static void intel_set_pipe_csc(struct drm_crtc *crtc,
+ const struct drm_display_mode *adjusted_mode)
+{
+ struct drm_device *dev = crtc->dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+ int pipe = intel_crtc->pipe;
+ uint16_t coeff = 0x7800; /* 1.0 */
+
+ /*
+ * TODO: Check what kind of values actually come out of the pipe
+ * with these coeff/postoff values and adjust to get the best
+ * accuracy. Perhaps we even need to take the bpc value into
+ * consideration.
+ */
+
+ if (adjusted_mode->private_flags & INTEL_MODE_LIMITED_COLOR_RANGE)
+ coeff = ((235 - 16) * (1 << 12) / 255) & 0xff8; /* 0.xxx... */
+
+ /*
+ * GY/GU and RY/RU should be the other way around according
+ * to BSpec, but reality doesn't agree. Just set them up in
+ * a way that results in the correct picture.
+ */
+ I915_WRITE(PIPE_CSC_COEFF_RY_GY(pipe), coeff << 16);
+ I915_WRITE(PIPE_CSC_COEFF_BY(pipe), 0);
+
+ I915_WRITE(PIPE_CSC_COEFF_RU_GU(pipe), coeff);
+ I915_WRITE(PIPE_CSC_COEFF_BU(pipe), 0);
+
+ I915_WRITE(PIPE_CSC_COEFF_RV_GV(pipe), 0);
+ I915_WRITE(PIPE_CSC_COEFF_BV(pipe), coeff << 16);
+
+ I915_WRITE(PIPE_CSC_PREOFF_HI(pipe), 0);
+ I915_WRITE(PIPE_CSC_PREOFF_ME(pipe), 0);
+ I915_WRITE(PIPE_CSC_PREOFF_LO(pipe), 0);
+
+ if (INTEL_INFO(dev)->gen > 6) {
+ uint16_t postoff = 0;
+
+ if (adjusted_mode->private_flags & INTEL_MODE_LIMITED_COLOR_RANGE)
+ postoff = (16 * (1 << 13) / 255) & 0x1fff;
+
+ I915_WRITE(PIPE_CSC_POSTOFF_HI(pipe), postoff);
+ I915_WRITE(PIPE_CSC_POSTOFF_ME(pipe), postoff);
+ I915_WRITE(PIPE_CSC_POSTOFF_LO(pipe), postoff);
+
+ I915_WRITE(PIPE_CSC_MODE(pipe), 0);
+ } else {
+ uint32_t mode = CSC_MODE_YUV_TO_RGB;
+
+ if (adjusted_mode->private_flags & INTEL_MODE_LIMITED_COLOR_RANGE)
+ mode |= CSC_BLACK_SCREEN_OFFSET;
+
+ I915_WRITE(PIPE_CSC_MODE(pipe), mode);
+ }
+}
+
static void haswell_set_pipeconf(struct drm_crtc *crtc,
struct drm_display_mode *adjusted_mode,
bool dither)
@@ -5400,7 +5384,7 @@ static void ironlake_set_m_n(struct drm_crtc *crtc,
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
enum transcoder cpu_transcoder = intel_crtc->cpu_transcoder;
struct intel_encoder *intel_encoder, *edp_encoder = NULL;
- struct fdi_m_n m_n = {0};
+ struct intel_link_m_n m_n = {0};
int target_clock, pixel_multiplier, lane, link_bw;
bool is_dp = false, is_cpu_edp = false;
@@ -5452,8 +5436,7 @@ static void ironlake_set_m_n(struct drm_crtc *crtc,
if (pixel_multiplier > 1)
link_bw *= pixel_multiplier;
- ironlake_compute_m_n(intel_crtc->bpp, lane, target_clock, link_bw,
- &m_n);
+ intel_link_compute_m_n(intel_crtc->bpp, lane, target_clock, link_bw, &m_n);
I915_WRITE(PIPE_DATA_M1(cpu_transcoder), TU_SIZE(m_n.tu) | m_n.gmch_m);
I915_WRITE(PIPE_DATA_N1(cpu_transcoder), m_n.gmch_n);
@@ -5506,7 +5489,7 @@ static uint32_t ironlake_compute_dpll(struct intel_crtc *intel_crtc,
if (is_lvds) {
if ((intel_panel_use_ssc(dev_priv) &&
dev_priv->lvds_ssc_freq == 100) ||
- (I915_READ(PCH_LVDS) & LVDS_CLKB_POWER_MASK) == LVDS_CLKB_POWER_UP)
+ intel_is_dual_link_lvds(dev))
factor = 25;
} else if (is_sdvo && is_tv)
factor = 20;
@@ -5581,7 +5564,6 @@ static int ironlake_crtc_mode_set(struct drm_crtc *crtc,
bool ok, has_reduced_clock = false;
bool is_lvds = false, is_dp = false, is_cpu_edp = false;
struct intel_encoder *encoder;
- u32 temp;
int ret;
bool dither, fdi_config_ok;
@@ -5645,54 +5627,12 @@ static int ironlake_crtc_mode_set(struct drm_crtc *crtc,
} else
intel_put_pch_pll(intel_crtc);
- /* The LVDS pin pair needs to be on before the DPLLs are enabled.
- * This is an exception to the general rule that mode_set doesn't turn
- * things on.
- */
- if (is_lvds) {
- temp = I915_READ(PCH_LVDS);
- temp |= LVDS_PORT_EN | LVDS_A0A2_CLKA_POWER_UP;
- if (HAS_PCH_CPT(dev)) {
- temp &= ~PORT_TRANS_SEL_MASK;
- temp |= PORT_TRANS_SEL_CPT(pipe);
- } else {
- if (pipe == 1)
- temp |= LVDS_PIPEB_SELECT;
- else
- temp &= ~LVDS_PIPEB_SELECT;
- }
-
- /* set the corresponsding LVDS_BORDER bit */
- temp |= dev_priv->lvds_border_bits;
- /* Set the B0-B3 data pairs corresponding to whether we're going to
- * set the DPLLs for dual-channel mode or not.
- */
- if (clock.p2 == 7)
- temp |= LVDS_B0B3_POWER_UP | LVDS_CLKB_POWER_UP;
- else
- temp &= ~(LVDS_B0B3_POWER_UP | LVDS_CLKB_POWER_UP);
-
- /* It would be nice to set 24 vs 18-bit mode (LVDS_A3_POWER_UP)
- * appropriately here, but we need to look more thoroughly into how
- * panels behave in the two modes.
- */
- temp &= ~(LVDS_HSYNC_POLARITY | LVDS_VSYNC_POLARITY);
- if (adjusted_mode->flags & DRM_MODE_FLAG_NHSYNC)
- temp |= LVDS_HSYNC_POLARITY;
- if (adjusted_mode->flags & DRM_MODE_FLAG_NVSYNC)
- temp |= LVDS_VSYNC_POLARITY;
- I915_WRITE(PCH_LVDS, temp);
- }
-
- if (is_dp && !is_cpu_edp) {
+ if (is_dp && !is_cpu_edp)
intel_dp_set_m_n(crtc, mode, adjusted_mode);
- } else {
- /* For non-DP output, clear any trans DP clock recovery setting.*/
- I915_WRITE(TRANSDATA_M1(pipe), 0);
- I915_WRITE(TRANSDATA_N1(pipe), 0);
- I915_WRITE(TRANSDPLINK_M1(pipe), 0);
- I915_WRITE(TRANSDPLINK_N1(pipe), 0);
- }
+
+ for_each_encoder_on_crtc(dev, crtc, encoder)
+ if (encoder->pre_pll_enable)
+ encoder->pre_pll_enable(encoder);
if (intel_crtc->pch_pll) {
I915_WRITE(intel_crtc->pch_pll->pll_reg, dpll);
@@ -5727,9 +5667,6 @@ static int ironlake_crtc_mode_set(struct drm_crtc *crtc,
fdi_config_ok = ironlake_check_fdi_lanes(intel_crtc);
- if (is_cpu_edp)
- ironlake_set_pll_edp(crtc, adjusted_mode->clock);
-
ironlake_set_pipeconf(crtc, adjusted_mode, dither);
intel_wait_for_vblank(dev, pipe);
@@ -5747,6 +5684,35 @@ static int ironlake_crtc_mode_set(struct drm_crtc *crtc,
return fdi_config_ok ? ret : -EINVAL;
}
+static void haswell_modeset_global_resources(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ bool enable = false;
+ struct intel_crtc *crtc;
+ struct intel_encoder *encoder;
+
+ list_for_each_entry(crtc, &dev->mode_config.crtc_list, base.head) {
+ if (crtc->pipe != PIPE_A && crtc->base.enabled)
+ enable = true;
+ /* XXX: Should check for edp transcoder here, but thanks to init
+ * sequence that's not yet available. Just in case desktop eDP
+ * on PORT D is possible on haswell, too. */
+ }
+
+ list_for_each_entry(encoder, &dev->mode_config.encoder_list,
+ base.head) {
+ if (encoder->type != INTEL_OUTPUT_EDP &&
+ encoder->connectors_active)
+ enable = true;
+ }
+
+ /* Even the eDP panel fitter is outside the always-on well. */
+ if (dev_priv->pch_pf_size)
+ enable = true;
+
+ intel_set_power_well(dev, enable);
+}
+
static int haswell_crtc_mode_set(struct drm_crtc *crtc,
struct drm_display_mode *mode,
struct drm_display_mode *adjusted_mode,
@@ -5759,20 +5725,13 @@ static int haswell_crtc_mode_set(struct drm_crtc *crtc,
int pipe = intel_crtc->pipe;
int plane = intel_crtc->plane;
int num_connectors = 0;
- intel_clock_t clock, reduced_clock;
- u32 dpll = 0, fp = 0, fp2 = 0;
- bool ok, has_reduced_clock = false;
- bool is_lvds = false, is_dp = false, is_cpu_edp = false;
+ bool is_dp = false, is_cpu_edp = false;
struct intel_encoder *encoder;
- u32 temp;
int ret;
bool dither;
for_each_encoder_on_crtc(dev, crtc, encoder) {
switch (encoder->type) {
- case INTEL_OUTPUT_LVDS:
- is_lvds = true;
- break;
case INTEL_OUTPUT_DISPLAYPORT:
is_dp = true;
break;
@@ -5786,11 +5745,6 @@ static int haswell_crtc_mode_set(struct drm_crtc *crtc,
num_connectors++;
}
- if (is_cpu_edp)
- intel_crtc->cpu_transcoder = TRANSCODER_EDP;
- else
- intel_crtc->cpu_transcoder = pipe;
-
/* We are not sure yet this won't happen. */
WARN(!HAS_PCH_LPT(dev), "Unexpected PCH type %d\n",
INTEL_PCH_TYPE(dev));
@@ -5806,147 +5760,32 @@ static int haswell_crtc_mode_set(struct drm_crtc *crtc,
if (!intel_ddi_pll_mode_set(crtc, adjusted_mode->clock))
return -EINVAL;
- if (HAS_PCH_IBX(dev) || HAS_PCH_CPT(dev)) {
- ok = ironlake_compute_clocks(crtc, adjusted_mode, &clock,
- &has_reduced_clock,
- &reduced_clock);
- if (!ok) {
- DRM_ERROR("Couldn't find PLL settings for mode!\n");
- return -EINVAL;
- }
- }
-
/* Ensure that the cursor is valid for the new mode before changing... */
intel_crtc_update_cursor(crtc, true);
/* determine panel color depth */
dither = intel_choose_pipe_bpp_dither(crtc, fb, &intel_crtc->bpp,
adjusted_mode);
- if (is_lvds && dev_priv->lvds_dither)
- dither = true;
DRM_DEBUG_KMS("Mode for pipe %d:\n", pipe);
drm_mode_debug_printmodeline(mode);
- if (HAS_PCH_IBX(dev) || HAS_PCH_CPT(dev)) {
- fp = clock.n << 16 | clock.m1 << 8 | clock.m2;
- if (has_reduced_clock)
- fp2 = reduced_clock.n << 16 | reduced_clock.m1 << 8 |
- reduced_clock.m2;
-
- dpll = ironlake_compute_dpll(intel_crtc, adjusted_mode, &clock,
- fp);
-
- /* CPU eDP is the only output that doesn't need a PCH PLL of its
- * own on pre-Haswell/LPT generation */
- if (!is_cpu_edp) {
- struct intel_pch_pll *pll;
-
- pll = intel_get_pch_pll(intel_crtc, dpll, fp);
- if (pll == NULL) {
- DRM_DEBUG_DRIVER("failed to find PLL for pipe %d\n",
- pipe);
- return -EINVAL;
- }
- } else
- intel_put_pch_pll(intel_crtc);
-
- /* The LVDS pin pair needs to be on before the DPLLs are
- * enabled. This is an exception to the general rule that
- * mode_set doesn't turn things on.
- */
- if (is_lvds) {
- temp = I915_READ(PCH_LVDS);
- temp |= LVDS_PORT_EN | LVDS_A0A2_CLKA_POWER_UP;
- if (HAS_PCH_CPT(dev)) {
- temp &= ~PORT_TRANS_SEL_MASK;
- temp |= PORT_TRANS_SEL_CPT(pipe);
- } else {
- if (pipe == 1)
- temp |= LVDS_PIPEB_SELECT;
- else
- temp &= ~LVDS_PIPEB_SELECT;
- }
-
- /* set the corresponsding LVDS_BORDER bit */
- temp |= dev_priv->lvds_border_bits;
- /* Set the B0-B3 data pairs corresponding to whether
- * we're going to set the DPLLs for dual-channel mode or
- * not.
- */
- if (clock.p2 == 7)
- temp |= LVDS_B0B3_POWER_UP | LVDS_CLKB_POWER_UP;
- else
- temp &= ~(LVDS_B0B3_POWER_UP |
- LVDS_CLKB_POWER_UP);
-
- /* It would be nice to set 24 vs 18-bit mode
- * (LVDS_A3_POWER_UP) appropriately here, but we need to
- * look more thoroughly into how panels behave in the
- * two modes.
- */
- temp &= ~(LVDS_HSYNC_POLARITY | LVDS_VSYNC_POLARITY);
- if (adjusted_mode->flags & DRM_MODE_FLAG_NHSYNC)
- temp |= LVDS_HSYNC_POLARITY;
- if (adjusted_mode->flags & DRM_MODE_FLAG_NVSYNC)
- temp |= LVDS_VSYNC_POLARITY;
- I915_WRITE(PCH_LVDS, temp);
- }
- }
-
- if (is_dp && !is_cpu_edp) {
+ if (is_dp && !is_cpu_edp)
intel_dp_set_m_n(crtc, mode, adjusted_mode);
- } else {
- if (HAS_PCH_IBX(dev) || HAS_PCH_CPT(dev)) {
- /* For non-DP output, clear any trans DP clock recovery
- * setting.*/
- I915_WRITE(TRANSDATA_M1(pipe), 0);
- I915_WRITE(TRANSDATA_N1(pipe), 0);
- I915_WRITE(TRANSDPLINK_M1(pipe), 0);
- I915_WRITE(TRANSDPLINK_N1(pipe), 0);
- }
- }
intel_crtc->lowfreq_avail = false;
- if (HAS_PCH_IBX(dev) || HAS_PCH_CPT(dev)) {
- if (intel_crtc->pch_pll) {
- I915_WRITE(intel_crtc->pch_pll->pll_reg, dpll);
-
- /* Wait for the clocks to stabilize. */
- POSTING_READ(intel_crtc->pch_pll->pll_reg);
- udelay(150);
-
- /* The pixel multiplier can only be updated once the
- * DPLL is enabled and the clocks are stable.
- *
- * So write it again.
- */
- I915_WRITE(intel_crtc->pch_pll->pll_reg, dpll);
- }
-
- if (intel_crtc->pch_pll) {
- if (is_lvds && has_reduced_clock && i915_powersave) {
- I915_WRITE(intel_crtc->pch_pll->fp1_reg, fp2);
- intel_crtc->lowfreq_avail = true;
- } else {
- I915_WRITE(intel_crtc->pch_pll->fp1_reg, fp);
- }
- }
- }
intel_set_pipe_timings(intel_crtc, mode, adjusted_mode);
if (!is_dp || is_cpu_edp)
ironlake_set_m_n(crtc, mode, adjusted_mode);
- if (HAS_PCH_IBX(dev) || HAS_PCH_CPT(dev))
- if (is_cpu_edp)
- ironlake_set_pll_edp(crtc, adjusted_mode->clock);
-
haswell_set_pipeconf(crtc, adjusted_mode, dither);
+ intel_set_pipe_csc(crtc, adjusted_mode);
+
/* Set up the display plane register */
- I915_WRITE(DSPCNTR(plane), DISPPLANE_GAMMA_ENABLE);
+ I915_WRITE(DSPCNTR(plane), DISPPLANE_GAMMA_ENABLE | DISPPLANE_PIPE_CSC_ENABLE);
POSTING_READ(DSPCNTR(plane));
ret = intel_pipe_set_base(crtc, x, y, fb);
@@ -5972,6 +5811,11 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc,
int pipe = intel_crtc->pipe;
int ret;
+ if (IS_HASWELL(dev) && intel_pipe_has_type(crtc, INTEL_OUTPUT_EDP))
+ intel_crtc->cpu_transcoder = TRANSCODER_EDP;
+ else
+ intel_crtc->cpu_transcoder = pipe;
+
drm_vblank_pre_modeset(dev, pipe);
ret = dev_priv->display.crtc_mode_set(crtc, mode, adjusted_mode,
@@ -6068,6 +5912,7 @@ static void haswell_write_eld(struct drm_connector *connector,
struct drm_i915_private *dev_priv = connector->dev->dev_private;
uint8_t *eld = connector->eld;
struct drm_device *dev = crtc->dev;
+ struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
uint32_t eldv;
uint32_t i;
int len;
@@ -6109,6 +5954,7 @@ static void haswell_write_eld(struct drm_connector *connector,
DRM_DEBUG_DRIVER("ELD on pipe %c\n", pipe_name(pipe));
eldv = AUDIO_ELD_VALID_A << (pipe * 4);
+ intel_crtc->eld_vld = true;
if (intel_pipe_has_type(crtc, INTEL_OUTPUT_DISPLAYPORT)) {
DRM_DEBUG_DRIVER("ELD: DisplayPort detected\n");
@@ -6344,6 +6190,8 @@ static void ivb_update_cursor(struct drm_crtc *crtc, u32 base)
cntl &= ~(CURSOR_MODE | MCURSOR_GAMMA_ENABLE);
cntl |= CURSOR_MODE_DISABLE;
}
+ if (IS_HASWELL(dev))
+ cntl |= CURSOR_PIPE_CSC_ENABLE;
I915_WRITE(CURCNTR_IVB(pipe), cntl);
intel_crtc->cursor_visible = visible;
@@ -6700,6 +6548,8 @@ bool intel_get_load_detect_pipe(struct drm_connector *connector,
if (encoder->crtc) {
crtc = encoder->crtc;
+ mutex_lock(&crtc->mutex);
+
old->dpms_mode = connector->dpms;
old->load_detect_temp = false;
@@ -6729,6 +6579,7 @@ bool intel_get_load_detect_pipe(struct drm_connector *connector,
return false;
}
+ mutex_lock(&crtc->mutex);
intel_encoder->new_crtc = to_intel_crtc(crtc);
to_intel_connector(connector)->new_encoder = intel_encoder;
@@ -6756,13 +6607,15 @@ bool intel_get_load_detect_pipe(struct drm_connector *connector,
DRM_DEBUG_KMS("reusing fbdev for load-detection framebuffer\n");
if (IS_ERR(fb)) {
DRM_DEBUG_KMS("failed to allocate framebuffer for load-detection\n");
+ mutex_unlock(&crtc->mutex);
return false;
}
- if (!intel_set_mode(crtc, mode, 0, 0, fb)) {
+ if (intel_set_mode(crtc, mode, 0, 0, fb)) {
DRM_DEBUG_KMS("failed to set mode on load-detect pipe\n");
if (old->release_fb)
old->release_fb->funcs->destroy(old->release_fb);
+ mutex_unlock(&crtc->mutex);
return false;
}
@@ -6777,27 +6630,31 @@ void intel_release_load_detect_pipe(struct drm_connector *connector,
struct intel_encoder *intel_encoder =
intel_attached_encoder(connector);
struct drm_encoder *encoder = &intel_encoder->base;
+ struct drm_crtc *crtc = encoder->crtc;
DRM_DEBUG_KMS("[CONNECTOR:%d:%s], [ENCODER:%d:%s]\n",
connector->base.id, drm_get_connector_name(connector),
encoder->base.id, drm_get_encoder_name(encoder));
if (old->load_detect_temp) {
- struct drm_crtc *crtc = encoder->crtc;
-
to_intel_connector(connector)->new_encoder = NULL;
intel_encoder->new_crtc = NULL;
intel_set_mode(crtc, NULL, 0, 0, NULL);
- if (old->release_fb)
- old->release_fb->funcs->destroy(old->release_fb);
+ if (old->release_fb) {
+ drm_framebuffer_unregister_private(old->release_fb);
+ drm_framebuffer_unreference(old->release_fb);
+ }
+ mutex_unlock(&crtc->mutex);
return;
}
/* Switch crtc and encoder back off if necessary */
if (old->dpms_mode != DRM_MODE_DPMS_ON)
connector->funcs->dpms(connector, old->dpms_mode);
+
+ mutex_unlock(&crtc->mutex);
}
/* Returns the clock of the currently programmed mode of the given pipe. */
@@ -6993,11 +6850,6 @@ void intel_mark_busy(struct drm_device *dev)
void intel_mark_idle(struct drm_device *dev)
{
-}
-
-void intel_mark_fb_busy(struct drm_i915_gem_object *obj)
-{
- struct drm_device *dev = obj->base.dev;
struct drm_crtc *crtc;
if (!i915_powersave)
@@ -7007,12 +6859,11 @@ void intel_mark_fb_busy(struct drm_i915_gem_object *obj)
if (!crtc->fb)
continue;
- if (to_intel_framebuffer(crtc->fb)->obj == obj)
- intel_increase_pllclock(crtc);
+ intel_decrease_pllclock(crtc);
}
}
-void intel_mark_fb_idle(struct drm_i915_gem_object *obj)
+void intel_mark_fb_busy(struct drm_i915_gem_object *obj)
{
struct drm_device *dev = obj->base.dev;
struct drm_crtc *crtc;
@@ -7025,7 +6876,7 @@ void intel_mark_fb_idle(struct drm_i915_gem_object *obj)
continue;
if (to_intel_framebuffer(crtc->fb)->obj == obj)
- intel_decrease_pllclock(crtc);
+ intel_increase_pllclock(crtc);
}
}
@@ -7109,9 +6960,7 @@ static void do_intel_finish_page_flip(struct drm_device *dev,
obj = work->old_fb_obj;
- atomic_clear_mask(1 << intel_crtc->plane,
- &obj->pending_flip.counter);
- wake_up(&dev_priv->pending_flip_queue);
+ wake_up_all(&dev_priv->pending_flip_queue);
queue_work(dev_priv->wq, &work->work);
@@ -7474,11 +7323,8 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc,
work->enable_stall_check = true;
- /* Block clients from rendering to the new back buffer until
- * the flip occurs and the object is no longer visible.
- */
- atomic_add(1 << intel_crtc->plane, &work->old_fb_obj->pending_flip);
atomic_inc(&intel_crtc->unpin_work_count);
+ intel_crtc->reset_counter = atomic_read(&dev_priv->gpu_error.reset_counter);
ret = dev_priv->display.queue_flip(dev, crtc, fb, obj);
if (ret)
@@ -7494,7 +7340,6 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc,
cleanup_pending:
atomic_dec(&intel_crtc->unpin_work_count);
- atomic_sub(1 << intel_crtc->plane, &work->old_fb_obj->pending_flip);
drm_gem_object_unreference(&work->old_fb_obj->base);
drm_gem_object_unreference(&obj->base);
mutex_unlock(&dev->struct_mutex);
@@ -7514,7 +7359,6 @@ free_work:
static struct drm_crtc_helper_funcs intel_helper_funcs = {
.mode_set_base_atomic = intel_pipe_set_base_atomic,
.load_lut = intel_crtc_load_lut,
- .disable = intel_crtc_noop,
};
bool intel_encoder_check_is_cloned(struct intel_encoder *encoder)
@@ -7904,16 +7748,21 @@ intel_modeset_check_state(struct drm_device *dev)
}
}
-bool intel_set_mode(struct drm_crtc *crtc,
- struct drm_display_mode *mode,
- int x, int y, struct drm_framebuffer *fb)
+int intel_set_mode(struct drm_crtc *crtc,
+ struct drm_display_mode *mode,
+ int x, int y, struct drm_framebuffer *fb)
{
struct drm_device *dev = crtc->dev;
drm_i915_private_t *dev_priv = dev->dev_private;
- struct drm_display_mode *adjusted_mode, saved_mode, saved_hwmode;
+ struct drm_display_mode *adjusted_mode, *saved_mode, *saved_hwmode;
struct intel_crtc *intel_crtc;
unsigned disable_pipes, prepare_pipes, modeset_pipes;
- bool ret = true;
+ int ret = 0;
+
+ saved_mode = kmalloc(2 * sizeof(*saved_mode), GFP_KERNEL);
+ if (!saved_mode)
+ return -ENOMEM;
+ saved_hwmode = saved_mode + 1;
intel_modeset_affected_pipes(crtc, &modeset_pipes,
&prepare_pipes, &disable_pipes);
@@ -7924,8 +7773,8 @@ bool intel_set_mode(struct drm_crtc *crtc,
for_each_intel_crtc_masked(dev, disable_pipes, intel_crtc)
intel_crtc_disable(&intel_crtc->base);
- saved_hwmode = crtc->hwmode;
- saved_mode = crtc->mode;
+ *saved_hwmode = crtc->hwmode;
+ *saved_mode = crtc->mode;
/* Hack: Because we don't (yet) support global modeset on multiple
* crtcs, we don't keep track of the new mode for more than one crtc.
@@ -7936,7 +7785,8 @@ bool intel_set_mode(struct drm_crtc *crtc,
if (modeset_pipes) {
adjusted_mode = intel_modeset_adjusted_mode(crtc, mode);
if (IS_ERR(adjusted_mode)) {
- return false;
+ ret = PTR_ERR(adjusted_mode);
+ goto out;
}
}
@@ -7962,11 +7812,11 @@ bool intel_set_mode(struct drm_crtc *crtc,
* on the DPLL.
*/
for_each_intel_crtc_masked(dev, modeset_pipes, intel_crtc) {
- ret = !intel_crtc_mode_set(&intel_crtc->base,
- mode, adjusted_mode,
- x, y, fb);
- if (!ret)
- goto done;
+ ret = intel_crtc_mode_set(&intel_crtc->base,
+ mode, adjusted_mode,
+ x, y, fb);
+ if (ret)
+ goto done;
}
/* Now enable the clocks, plane, pipe, and connectors that we set up. */
@@ -7987,16 +7837,23 @@ bool intel_set_mode(struct drm_crtc *crtc,
/* FIXME: add subpixel order */
done:
drm_mode_destroy(dev, adjusted_mode);
- if (!ret && crtc->enabled) {
- crtc->hwmode = saved_hwmode;
- crtc->mode = saved_mode;
+ if (ret && crtc->enabled) {
+ crtc->hwmode = *saved_hwmode;
+ crtc->mode = *saved_mode;
} else {
intel_modeset_check_state(dev);
}
+out:
+ kfree(saved_mode);
return ret;
}
+void intel_crtc_restore_mode(struct drm_crtc *crtc)
+{
+ intel_set_mode(crtc, &crtc->mode, crtc->x, crtc->y, crtc->fb);
+}
+
#undef for_each_intel_crtc_masked
static void intel_set_config_free(struct intel_set_config *config)
@@ -8109,7 +7966,7 @@ intel_modeset_stage_output_state(struct drm_device *dev,
struct intel_encoder *encoder;
int count, ro;
- /* The upper layers ensure that we either disabl a crtc or have a list
+ /* The upper layers ensure that we either disable a crtc or have a list
* of connectors. For paranoia, double-check this. */
WARN_ON(!set->fb && (set->num_connectors != 0));
WARN_ON(set->fb && (set->num_connectors == 0));
@@ -8211,14 +8068,9 @@ static int intel_crtc_set_config(struct drm_mode_set *set)
BUG_ON(!set->crtc);
BUG_ON(!set->crtc->helper_private);
- if (!set->mode)
- set->fb = NULL;
-
- /* The fb helper likes to play gross jokes with ->mode_set_config.
- * Unfortunately the crtc helper doesn't do much at all for this case,
- * so we have to cope with this madness until the fb helper is fixed up. */
- if (set->fb && set->num_connectors == 0)
- return 0;
+ /* Enforce sane interface api - has been abused by the fb helper. */
+ BUG_ON(!set->mode && set->fb);
+ BUG_ON(set->fb && set->num_connectors == 0);
if (set->fb) {
DRM_DEBUG_KMS("[CRTC:%d] [FB:%d] #connectors=%d (x y) (%i %i)\n",
@@ -8262,11 +8114,11 @@ static int intel_crtc_set_config(struct drm_mode_set *set)
drm_mode_debug_printmodeline(set->mode);
}
- if (!intel_set_mode(set->crtc, set->mode,
- set->x, set->y, set->fb)) {
- DRM_ERROR("failed to set mode on [CRTC:%d]\n",
- set->crtc->base.id);
- ret = -EINVAL;
+ ret = intel_set_mode(set->crtc, set->mode,
+ set->x, set->y, set->fb);
+ if (ret) {
+ DRM_ERROR("failed to set mode on [CRTC:%d], err = %d\n",
+ set->crtc->base.id, ret);
goto fail;
}
} else if (config->fb_changed) {
@@ -8283,8 +8135,8 @@ fail:
/* Try to restore the config */
if (config->mode_changed &&
- !intel_set_mode(save_set.crtc, save_set.mode,
- save_set.x, save_set.y, save_set.fb))
+ intel_set_mode(save_set.crtc, save_set.mode,
+ save_set.x, save_set.y, save_set.fb))
DRM_ERROR("failed to restore config after modeset failure\n");
out_config:
@@ -8303,7 +8155,7 @@ static const struct drm_crtc_funcs intel_crtc_funcs = {
static void intel_cpu_pll_init(struct drm_device *dev)
{
- if (IS_HASWELL(dev))
+ if (HAS_DDI(dev))
intel_ddi_pll_init(dev);
}
@@ -8439,11 +8291,10 @@ static void intel_setup_outputs(struct drm_device *dev)
I915_WRITE(PFIT_CONTROL, 0);
}
- if (!(IS_HASWELL(dev) &&
- (I915_READ(DDI_BUF_CTL(PORT_A)) & DDI_A_4_LANES)))
+ if (!(HAS_DDI(dev) && (I915_READ(DDI_BUF_CTL(PORT_A)) & DDI_A_4_LANES)))
intel_crt_init(dev);
- if (IS_HASWELL(dev)) {
+ if (HAS_DDI(dev)) {
int found;
/* Haswell uses DDI functions to detect digital outputs */
@@ -8490,23 +8341,18 @@ static void intel_setup_outputs(struct drm_device *dev)
if (I915_READ(PCH_DP_D) & DP_DETECTED)
intel_dp_init(dev, PCH_DP_D, PORT_D);
} else if (IS_VALLEYVIEW(dev)) {
- int found;
-
/* Check for built-in panel first. Shares lanes with HDMI on SDVOC */
- if (I915_READ(DP_C) & DP_DETECTED)
- intel_dp_init(dev, DP_C, PORT_C);
+ if (I915_READ(VLV_DISPLAY_BASE + DP_C) & DP_DETECTED)
+ intel_dp_init(dev, VLV_DISPLAY_BASE + DP_C, PORT_C);
- if (I915_READ(SDVOB) & PORT_DETECTED) {
- /* SDVOB multiplex with HDMIB */
- found = intel_sdvo_init(dev, SDVOB, true);
- if (!found)
- intel_hdmi_init(dev, SDVOB, PORT_B);
- if (!found && (I915_READ(DP_B) & DP_DETECTED))
- intel_dp_init(dev, DP_B, PORT_B);
+ if (I915_READ(VLV_DISPLAY_BASE + SDVOB) & PORT_DETECTED) {
+ intel_hdmi_init(dev, VLV_DISPLAY_BASE + SDVOB, PORT_B);
+ if (I915_READ(VLV_DISPLAY_BASE + DP_B) & DP_DETECTED)
+ intel_dp_init(dev, VLV_DISPLAY_BASE + DP_B, PORT_B);
}
- if (I915_READ(SDVOC) & PORT_DETECTED)
- intel_hdmi_init(dev, SDVOC, PORT_C);
+ if (I915_READ(VLV_DISPLAY_BASE + SDVOC) & PORT_DETECTED)
+ intel_hdmi_init(dev, VLV_DISPLAY_BASE + SDVOC, PORT_C);
} else if (SUPPORTS_DIGITAL_OUTPUTS(dev)) {
bool found = false;
@@ -8666,14 +8512,15 @@ int intel_framebuffer_init(struct drm_device *dev,
if (mode_cmd->offsets[0] != 0)
return -EINVAL;
+ drm_helper_mode_fill_fb_struct(&intel_fb->base, mode_cmd);
+ intel_fb->obj = obj;
+
ret = drm_framebuffer_init(dev, &intel_fb->base, &intel_fb_funcs);
if (ret) {
DRM_ERROR("framebuffer init failed %d\n", ret);
return ret;
}
- drm_helper_mode_fill_fb_struct(&intel_fb->base, mode_cmd);
- intel_fb->obj = obj;
return 0;
}
@@ -8703,7 +8550,7 @@ static void intel_init_display(struct drm_device *dev)
struct drm_i915_private *dev_priv = dev->dev_private;
/* We always want a DPMS function */
- if (IS_HASWELL(dev)) {
+ if (HAS_DDI(dev)) {
dev_priv->display.crtc_mode_set = haswell_crtc_mode_set;
dev_priv->display.crtc_enable = haswell_crtc_enable;
dev_priv->display.crtc_disable = haswell_crtc_disable;
@@ -8765,8 +8612,9 @@ static void intel_init_display(struct drm_device *dev)
} else if (IS_HASWELL(dev)) {
dev_priv->display.fdi_link_train = hsw_fdi_link_train;
dev_priv->display.write_eld = haswell_write_eld;
- } else
- dev_priv->display.update_wm = NULL;
+ dev_priv->display.modeset_global_resources =
+ haswell_modeset_global_resources;
+ }
} else if (IS_G4X(dev)) {
dev_priv->display.write_eld = g4x_write_eld;
}
@@ -8888,6 +8736,18 @@ static struct intel_quirk intel_quirks[] = {
/* Acer Aspire 5734Z must invert backlight brightness */
{ 0x2a42, 0x1025, 0x0459, quirk_invert_brightness },
+
+ /* Acer/eMachines G725 */
+ { 0x2a42, 0x1025, 0x0210, quirk_invert_brightness },
+
+ /* Acer/eMachines e725 */
+ { 0x2a42, 0x1025, 0x0212, quirk_invert_brightness },
+
+ /* Acer/Packard Bell NCL20 */
+ { 0x2a42, 0x1025, 0x034b, quirk_invert_brightness },
+
+ /* Acer Aspire 4736Z */
+ { 0x2a42, 0x1025, 0x0260, quirk_invert_brightness },
};
static void intel_init_quirks(struct drm_device *dev)
@@ -8916,12 +8776,7 @@ static void i915_disable_vga(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
u8 sr1;
- u32 vga_reg;
-
- if (HAS_PCH_SPLIT(dev))
- vga_reg = CPU_VGACNTRL;
- else
- vga_reg = VGACNTRL;
+ u32 vga_reg = i915_vgacntrl_reg(dev);
vga_get_uninterruptible(dev->pdev, VGA_RSRC_LEGACY_IO);
outb(SR01, VGA_SR_INDEX);
@@ -8936,10 +8791,7 @@ static void i915_disable_vga(struct drm_device *dev)
void intel_modeset_init_hw(struct drm_device *dev)
{
- /* We attempt to init the necessary power wells early in the initialization
- * time, so the subsystems that expect power to be enabled can work.
- */
- intel_init_power_wells(dev);
+ intel_init_power_well(dev);
intel_prepare_ddi(dev);
@@ -8981,7 +8833,7 @@ void intel_modeset_init(struct drm_device *dev)
dev->mode_config.max_width = 8192;
dev->mode_config.max_height = 8192;
}
- dev->mode_config.fb_base = dev_priv->mm.gtt_base_addr;
+ dev->mode_config.fb_base = dev_priv->gtt.mappable_base;
DRM_DEBUG_KMS("%d display pipe%s available.\n",
dev_priv->num_pipe, dev_priv->num_pipe > 1 ? "s" : "");
@@ -8999,6 +8851,9 @@ void intel_modeset_init(struct drm_device *dev)
/* Just disable it once at startup */
i915_disable_vga(dev);
intel_setup_outputs(dev);
+
+ /* Just in case the BIOS is doing something questionable. */
+ intel_disable_fbc(dev);
}
static void
@@ -9180,20 +9035,14 @@ static void intel_sanitize_encoder(struct intel_encoder *encoder)
* the crtc fixup. */
}
-static void i915_redisable_vga(struct drm_device *dev)
+void i915_redisable_vga(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
- u32 vga_reg;
-
- if (HAS_PCH_SPLIT(dev))
- vga_reg = CPU_VGACNTRL;
- else
- vga_reg = VGACNTRL;
+ u32 vga_reg = i915_vgacntrl_reg(dev);
if (I915_READ(vga_reg) != VGA_DISP_DISABLE) {
DRM_DEBUG_KMS("Something enabled VGA plane, disabling it\n");
- I915_WRITE(vga_reg, VGA_DISP_DISABLE);
- POSTING_READ(vga_reg);
+ i915_disable_vga(dev);
}
}
@@ -9209,7 +9058,7 @@ void intel_modeset_setup_hw_state(struct drm_device *dev,
struct intel_encoder *encoder;
struct intel_connector *connector;
- if (IS_HASWELL(dev)) {
+ if (HAS_DDI(dev)) {
tmp = I915_READ(TRANS_DDI_FUNC_CTL(TRANSCODER_EDP));
if (tmp & TRANS_DDI_FUNC_ENABLE) {
@@ -9250,7 +9099,7 @@ void intel_modeset_setup_hw_state(struct drm_device *dev,
crtc->active ? "enabled" : "disabled");
}
- if (IS_HASWELL(dev))
+ if (HAS_DDI(dev))
intel_ddi_setup_hw_pll_state(dev);
list_for_each_entry(encoder, &dev->mode_config.encoder_list,
@@ -9301,9 +9150,7 @@ void intel_modeset_setup_hw_state(struct drm_device *dev,
if (force_restore) {
for_each_pipe(pipe) {
- crtc = to_intel_crtc(dev_priv->pipe_to_crtc_mapping[pipe]);
- intel_set_mode(&crtc->base, &crtc->base.mode,
- crtc->base.x, crtc->base.y, crtc->base.fb);
+ intel_crtc_restore_mode(dev_priv->pipe_to_crtc_mapping[pipe]);
}
i915_redisable_vga(dev);
@@ -9367,6 +9214,8 @@ void intel_modeset_cleanup(struct drm_device *dev)
flush_scheduled_work();
drm_mode_config_cleanup(dev);
+
+ intel_cleanup_overlay(dev);
}
/*
diff --git a/drivers/gpu/drm/i915/intel_dp.c b/drivers/gpu/drm/i915/intel_dp.c
index fb3715b4b09d..f61cb7998c72 100644
--- a/drivers/gpu/drm/i915/intel_dp.c
+++ b/drivers/gpu/drm/i915/intel_dp.c
@@ -148,15 +148,6 @@ intel_dp_max_link_bw(struct intel_dp *intel_dp)
return max_link_bw;
}
-static int
-intel_dp_link_clock(uint8_t link_bw)
-{
- if (link_bw == DP_LINK_BW_2_7)
- return 270000;
- else
- return 162000;
-}
-
/*
* The units on the numbers in the next two are... bizarre. Examples will
* make it clearer; this one parallels an example in the eDP spec.
@@ -191,7 +182,8 @@ intel_dp_adjust_dithering(struct intel_dp *intel_dp,
struct drm_display_mode *mode,
bool adjust_mode)
{
- int max_link_clock = intel_dp_link_clock(intel_dp_max_link_bw(intel_dp));
+ int max_link_clock =
+ drm_dp_bw_code_to_link_rate(intel_dp_max_link_bw(intel_dp));
int max_lanes = drm_dp_max_lane_count(intel_dp->dpcd);
int max_rate, mode_rate;
@@ -330,6 +322,48 @@ intel_dp_check_edp(struct intel_dp *intel_dp)
}
}
+static uint32_t
+intel_dp_aux_wait_done(struct intel_dp *intel_dp, bool has_aux_irq)
+{
+ struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
+ struct drm_device *dev = intel_dig_port->base.base.dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ uint32_t ch_ctl = intel_dp->output_reg + 0x10;
+ uint32_t status;
+ bool done;
+
+ if (IS_HASWELL(dev)) {
+ switch (intel_dig_port->port) {
+ case PORT_A:
+ ch_ctl = DPA_AUX_CH_CTL;
+ break;
+ case PORT_B:
+ ch_ctl = PCH_DPB_AUX_CH_CTL;
+ break;
+ case PORT_C:
+ ch_ctl = PCH_DPC_AUX_CH_CTL;
+ break;
+ case PORT_D:
+ ch_ctl = PCH_DPD_AUX_CH_CTL;
+ break;
+ default:
+ BUG();
+ }
+ }
+
+#define C (((status = I915_READ_NOTRACE(ch_ctl)) & DP_AUX_CH_CTL_SEND_BUSY) == 0)
+ if (has_aux_irq)
+ done = wait_event_timeout(dev_priv->gmbus_wait_queue, C, 10);
+ else
+ done = wait_for_atomic(C, 10) == 0;
+ if (!done)
+ DRM_ERROR("dp aux hw did not signal timeout (has irq: %i)!\n",
+ has_aux_irq);
+#undef C
+
+ return status;
+}
+
static int
intel_dp_aux_ch(struct intel_dp *intel_dp,
uint8_t *send, int send_bytes,
@@ -341,11 +375,17 @@ intel_dp_aux_ch(struct intel_dp *intel_dp,
struct drm_i915_private *dev_priv = dev->dev_private;
uint32_t ch_ctl = output_reg + 0x10;
uint32_t ch_data = ch_ctl + 4;
- int i;
- int recv_bytes;
+ int i, ret, recv_bytes;
uint32_t status;
uint32_t aux_clock_divider;
int try, precharge;
+ bool has_aux_irq = INTEL_INFO(dev)->gen >= 5 && !IS_VALLEYVIEW(dev);
+
+ /* dp aux is extremely sensitive to irq latency, hence request the
+ * lowest possible wakeup latency and so prevent the cpu from going into
+ * deep sleep states.
+ */
+ pm_qos_update_request(&dev_priv->pm_qos, 0);
if (IS_HASWELL(dev)) {
switch (intel_dig_port->port) {
@@ -379,7 +419,7 @@ intel_dp_aux_ch(struct intel_dp *intel_dp,
* clock divider.
*/
if (is_cpu_edp(intel_dp)) {
- if (IS_HASWELL(dev))
+ if (HAS_DDI(dev))
aux_clock_divider = intel_ddi_get_cdclk_freq(dev_priv) >> 1;
else if (IS_VALLEYVIEW(dev))
aux_clock_divider = 100;
@@ -399,7 +439,7 @@ intel_dp_aux_ch(struct intel_dp *intel_dp,
/* Try to wait for any previous AUX channel activity */
for (try = 0; try < 3; try++) {
- status = I915_READ(ch_ctl);
+ status = I915_READ_NOTRACE(ch_ctl);
if ((status & DP_AUX_CH_CTL_SEND_BUSY) == 0)
break;
msleep(1);
@@ -408,7 +448,8 @@ intel_dp_aux_ch(struct intel_dp *intel_dp,
if (try == 3) {
WARN(1, "dp_aux_ch not started status 0x%08x\n",
I915_READ(ch_ctl));
- return -EBUSY;
+ ret = -EBUSY;
+ goto out;
}
/* Must try at least 3 times according to DP spec */
@@ -421,6 +462,7 @@ intel_dp_aux_ch(struct intel_dp *intel_dp,
/* Send the command and wait for it to complete */
I915_WRITE(ch_ctl,
DP_AUX_CH_CTL_SEND_BUSY |
+ (has_aux_irq ? DP_AUX_CH_CTL_INTERRUPT : 0) |
DP_AUX_CH_CTL_TIME_OUT_400us |
(send_bytes << DP_AUX_CH_CTL_MESSAGE_SIZE_SHIFT) |
(precharge << DP_AUX_CH_CTL_PRECHARGE_2US_SHIFT) |
@@ -428,12 +470,8 @@ intel_dp_aux_ch(struct intel_dp *intel_dp,
DP_AUX_CH_CTL_DONE |
DP_AUX_CH_CTL_TIME_OUT_ERROR |
DP_AUX_CH_CTL_RECEIVE_ERROR);
- for (;;) {
- status = I915_READ(ch_ctl);
- if ((status & DP_AUX_CH_CTL_SEND_BUSY) == 0)
- break;
- udelay(100);
- }
+
+ status = intel_dp_aux_wait_done(intel_dp, has_aux_irq);
/* Clear done status and any errors */
I915_WRITE(ch_ctl,
@@ -451,7 +489,8 @@ intel_dp_aux_ch(struct intel_dp *intel_dp,
if ((status & DP_AUX_CH_CTL_DONE) == 0) {
DRM_ERROR("dp_aux_ch not done status 0x%08x\n", status);
- return -EBUSY;
+ ret = -EBUSY;
+ goto out;
}
/* Check for timeout or receive error.
@@ -459,14 +498,16 @@ intel_dp_aux_ch(struct intel_dp *intel_dp,
*/
if (status & DP_AUX_CH_CTL_RECEIVE_ERROR) {
DRM_ERROR("dp_aux_ch receive error status 0x%08x\n", status);
- return -EIO;
+ ret = -EIO;
+ goto out;
}
/* Timeouts occur when the device isn't connected, so they're
* "normal" -- don't fill the kernel log with these */
if (status & DP_AUX_CH_CTL_TIME_OUT_ERROR) {
DRM_DEBUG_KMS("dp_aux_ch timeout status 0x%08x\n", status);
- return -ETIMEDOUT;
+ ret = -ETIMEDOUT;
+ goto out;
}
/* Unload any bytes sent back from the other side */
@@ -479,7 +520,11 @@ intel_dp_aux_ch(struct intel_dp *intel_dp,
unpack_aux(I915_READ(ch_data + i),
recv + i, recv_bytes - i);
- return recv_bytes;
+ ret = recv_bytes;
+out:
+ pm_qos_update_request(&dev_priv->pm_qos, PM_QOS_DEFAULT_VALUE);
+
+ return ret;
}
/* Write data to the aux channel in native mode */
@@ -718,16 +763,35 @@ intel_dp_mode_fixup(struct drm_encoder *encoder,
return false;
bpp = adjusted_mode->private_flags & INTEL_MODE_DP_FORCE_6BPC ? 18 : 24;
+
+ if (intel_dp->color_range_auto) {
+ /*
+ * See:
+ * CEA-861-E - 5.1 Default Encoding Parameters
+ * VESA DisplayPort Ver.1.2a - 5.1.1.1 Video Colorimetry
+ */
+ if (bpp != 18 && drm_match_cea_mode(adjusted_mode) > 1)
+ intel_dp->color_range = DP_COLOR_RANGE_16_235;
+ else
+ intel_dp->color_range = 0;
+ }
+
+ if (intel_dp->color_range)
+ adjusted_mode->private_flags |= INTEL_MODE_LIMITED_COLOR_RANGE;
+
mode_rate = intel_dp_link_required(adjusted_mode->clock, bpp);
for (clock = 0; clock <= max_clock; clock++) {
for (lane_count = 1; lane_count <= max_lane_count; lane_count <<= 1) {
- int link_avail = intel_dp_max_data_rate(intel_dp_link_clock(bws[clock]), lane_count);
+ int link_bw_clock =
+ drm_dp_bw_code_to_link_rate(bws[clock]);
+ int link_avail = intel_dp_max_data_rate(link_bw_clock,
+ lane_count);
if (mode_rate <= link_avail) {
intel_dp->link_bw = bws[clock];
intel_dp->lane_count = lane_count;
- adjusted_mode->clock = intel_dp_link_clock(intel_dp->link_bw);
+ adjusted_mode->clock = link_bw_clock;
DRM_DEBUG_KMS("DP link bw %02x lane "
"count %d clock %d bpp %d\n",
intel_dp->link_bw, intel_dp->lane_count,
@@ -742,39 +806,6 @@ intel_dp_mode_fixup(struct drm_encoder *encoder,
return false;
}
-struct intel_dp_m_n {
- uint32_t tu;
- uint32_t gmch_m;
- uint32_t gmch_n;
- uint32_t link_m;
- uint32_t link_n;
-};
-
-static void
-intel_reduce_ratio(uint32_t *num, uint32_t *den)
-{
- while (*num > 0xffffff || *den > 0xffffff) {
- *num >>= 1;
- *den >>= 1;
- }
-}
-
-static void
-intel_dp_compute_m_n(int bpp,
- int nlanes,
- int pixel_clock,
- int link_clock,
- struct intel_dp_m_n *m_n)
-{
- m_n->tu = 64;
- m_n->gmch_m = (pixel_clock * bpp) >> 3;
- m_n->gmch_n = link_clock * nlanes;
- intel_reduce_ratio(&m_n->gmch_m, &m_n->gmch_n);
- m_n->link_m = pixel_clock;
- m_n->link_n = link_clock;
- intel_reduce_ratio(&m_n->link_m, &m_n->link_n);
-}
-
void
intel_dp_set_m_n(struct drm_crtc *crtc, struct drm_display_mode *mode,
struct drm_display_mode *adjusted_mode)
@@ -785,7 +816,7 @@ intel_dp_set_m_n(struct drm_crtc *crtc, struct drm_display_mode *mode,
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
int lane_count = 4;
- struct intel_dp_m_n m_n;
+ struct intel_link_m_n m_n;
int pipe = intel_crtc->pipe;
enum transcoder cpu_transcoder = intel_crtc->cpu_transcoder;
@@ -808,8 +839,8 @@ intel_dp_set_m_n(struct drm_crtc *crtc, struct drm_display_mode *mode,
* the number of bytes_per_pixel post-LUT, which we always
* set up for 8-bits of R/G/B, or 3 bytes total.
*/
- intel_dp_compute_m_n(intel_crtc->bpp, lane_count,
- mode->clock, adjusted_mode->clock, &m_n);
+ intel_link_compute_m_n(intel_crtc->bpp, lane_count,
+ mode->clock, adjusted_mode->clock, &m_n);
if (IS_HASWELL(dev)) {
I915_WRITE(PIPE_DATA_M1(cpu_transcoder),
@@ -851,6 +882,32 @@ void intel_dp_init_link_config(struct intel_dp *intel_dp)
}
}
+static void ironlake_set_pll_edp(struct drm_crtc *crtc, int clock)
+{
+ struct drm_device *dev = crtc->dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ u32 dpa_ctl;
+
+ DRM_DEBUG_KMS("eDP PLL enable for clock %d\n", clock);
+ dpa_ctl = I915_READ(DP_A);
+ dpa_ctl &= ~DP_PLL_FREQ_MASK;
+
+ if (clock < 200000) {
+ /* For a long time we've carried around a ILK-DevA w/a for the
+ * 160MHz clock. If we're really unlucky, it's still required.
+ */
+ DRM_DEBUG_KMS("160MHz cpu eDP clock, might need ilk devA w/a\n");
+ dpa_ctl |= DP_PLL_FREQ_160MHZ;
+ } else {
+ dpa_ctl |= DP_PLL_FREQ_270MHZ;
+ }
+
+ I915_WRITE(DP_A, dpa_ctl);
+
+ POSTING_READ(DP_A);
+ udelay(500);
+}
+
static void
intel_dp_mode_set(struct drm_encoder *encoder, struct drm_display_mode *mode,
struct drm_display_mode *adjusted_mode)
@@ -926,7 +983,8 @@ intel_dp_mode_set(struct drm_encoder *encoder, struct drm_display_mode *mode,
else
intel_dp->DP |= DP_PLL_FREQ_270MHZ;
} else if (!HAS_PCH_CPT(dev) || is_cpu_edp(intel_dp)) {
- intel_dp->DP |= intel_dp->color_range;
+ if (!HAS_PCH_SPLIT(dev))
+ intel_dp->DP |= intel_dp->color_range;
if (adjusted_mode->flags & DRM_MODE_FLAG_PHSYNC)
intel_dp->DP |= DP_SYNC_HS_HIGH;
@@ -950,6 +1008,9 @@ intel_dp_mode_set(struct drm_encoder *encoder, struct drm_display_mode *mode,
} else {
intel_dp->DP |= DP_LINK_TRAIN_OFF_CPT;
}
+
+ if (is_cpu_edp(intel_dp))
+ ironlake_set_pll_edp(crtc, adjusted_mode->clock);
}
#define IDLE_ON_MASK (PP_ON | 0 | PP_SEQUENCE_MASK | 0 | PP_SEQUENCE_STATE_MASK)
@@ -1057,6 +1118,8 @@ static void ironlake_panel_vdd_off_sync(struct intel_dp *intel_dp)
struct drm_i915_private *dev_priv = dev->dev_private;
u32 pp;
+ WARN_ON(!mutex_is_locked(&dev->mode_config.mutex));
+
if (!intel_dp->want_panel_vdd && ironlake_edp_have_panel_vdd(intel_dp)) {
pp = ironlake_get_pp_control(dev_priv);
pp &= ~EDP_FORCE_VDD;
@@ -1543,7 +1606,7 @@ intel_get_adjust_train(struct intel_dp *intel_dp, uint8_t link_status[DP_LINK_ST
}
static uint32_t
-intel_dp_signal_levels(uint8_t train_set)
+intel_gen4_signal_levels(uint8_t train_set)
{
uint32_t signal_levels = 0;
@@ -1641,7 +1704,7 @@ intel_gen7_edp_signal_levels(uint8_t train_set)
/* Gen7.5's (HSW) DP voltage swing and pre-emphasis control */
static uint32_t
-intel_dp_signal_levels_hsw(uint8_t train_set)
+intel_hsw_signal_levels(uint8_t train_set)
{
int signal_levels = train_set & (DP_TRAIN_VOLTAGE_SWING_MASK |
DP_TRAIN_PRE_EMPHASIS_MASK);
@@ -1673,6 +1736,34 @@ intel_dp_signal_levels_hsw(uint8_t train_set)
}
}
+/* Properly updates "DP" with the correct signal levels. */
+static void
+intel_dp_set_signal_levels(struct intel_dp *intel_dp, uint32_t *DP)
+{
+ struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
+ struct drm_device *dev = intel_dig_port->base.base.dev;
+ uint32_t signal_levels, mask;
+ uint8_t train_set = intel_dp->train_set[0];
+
+ if (IS_HASWELL(dev)) {
+ signal_levels = intel_hsw_signal_levels(train_set);
+ mask = DDI_BUF_EMP_MASK;
+ } else if (IS_GEN7(dev) && is_cpu_edp(intel_dp) && !IS_VALLEYVIEW(dev)) {
+ signal_levels = intel_gen7_edp_signal_levels(train_set);
+ mask = EDP_LINK_TRAIN_VOL_EMP_MASK_IVB;
+ } else if (IS_GEN6(dev) && is_cpu_edp(intel_dp)) {
+ signal_levels = intel_gen6_edp_signal_levels(train_set);
+ mask = EDP_LINK_TRAIN_VOL_EMP_MASK_SNB;
+ } else {
+ signal_levels = intel_gen4_signal_levels(train_set);
+ mask = DP_VOLTAGE_MASK | DP_PRE_EMPHASIS_MASK;
+ }
+
+ DRM_DEBUG_KMS("Using signal levels %08x\n", signal_levels);
+
+ *DP = (*DP & ~mask) | signal_levels;
+}
+
static bool
intel_dp_set_link_train(struct intel_dp *intel_dp,
uint32_t dp_reg_value,
@@ -1696,14 +1787,18 @@ intel_dp_set_link_train(struct intel_dp *intel_dp,
temp &= ~DP_TP_CTL_LINK_TRAIN_MASK;
switch (dp_train_pat & DP_TRAINING_PATTERN_MASK) {
case DP_TRAINING_PATTERN_DISABLE:
- temp |= DP_TP_CTL_LINK_TRAIN_IDLE;
- I915_WRITE(DP_TP_CTL(port), temp);
- if (wait_for((I915_READ(DP_TP_STATUS(port)) &
- DP_TP_STATUS_IDLE_DONE), 1))
- DRM_ERROR("Timed out waiting for DP idle patterns\n");
+ if (port != PORT_A) {
+ temp |= DP_TP_CTL_LINK_TRAIN_IDLE;
+ I915_WRITE(DP_TP_CTL(port), temp);
+
+ if (wait_for((I915_READ(DP_TP_STATUS(port)) &
+ DP_TP_STATUS_IDLE_DONE), 1))
+ DRM_ERROR("Timed out waiting for DP idle patterns\n");
+
+ temp &= ~DP_TP_CTL_LINK_TRAIN_MASK;
+ }
- temp &= ~DP_TP_CTL_LINK_TRAIN_MASK;
temp |= DP_TP_CTL_LINK_TRAIN_NORMAL;
break;
@@ -1791,7 +1886,7 @@ intel_dp_start_link_train(struct intel_dp *intel_dp)
int voltage_tries, loop_tries;
uint32_t DP = intel_dp->DP;
- if (IS_HASWELL(dev))
+ if (HAS_DDI(dev))
intel_ddi_prepare_link_retrain(encoder);
/* Write the link configuration data */
@@ -1809,24 +1904,8 @@ intel_dp_start_link_train(struct intel_dp *intel_dp)
for (;;) {
/* Use intel_dp->train_set[0] to set the voltage and pre emphasis values */
uint8_t link_status[DP_LINK_STATUS_SIZE];
- uint32_t signal_levels;
-
- if (IS_HASWELL(dev)) {
- signal_levels = intel_dp_signal_levels_hsw(
- intel_dp->train_set[0]);
- DP = (DP & ~DDI_BUF_EMP_MASK) | signal_levels;
- } else if (IS_GEN7(dev) && is_cpu_edp(intel_dp) && !IS_VALLEYVIEW(dev)) {
- signal_levels = intel_gen7_edp_signal_levels(intel_dp->train_set[0]);
- DP = (DP & ~EDP_LINK_TRAIN_VOL_EMP_MASK_IVB) | signal_levels;
- } else if (IS_GEN6(dev) && is_cpu_edp(intel_dp)) {
- signal_levels = intel_gen6_edp_signal_levels(intel_dp->train_set[0]);
- DP = (DP & ~EDP_LINK_TRAIN_VOL_EMP_MASK_SNB) | signal_levels;
- } else {
- signal_levels = intel_dp_signal_levels(intel_dp->train_set[0]);
- DP = (DP & ~(DP_VOLTAGE_MASK|DP_PRE_EMPHASIS_MASK)) | signal_levels;
- }
- DRM_DEBUG_KMS("training pattern 1 signal levels %08x\n",
- signal_levels);
+
+ intel_dp_set_signal_levels(intel_dp, &DP);
/* Set training pattern 1 */
if (!intel_dp_set_link_train(intel_dp, DP,
@@ -1882,7 +1961,6 @@ intel_dp_start_link_train(struct intel_dp *intel_dp)
void
intel_dp_complete_link_train(struct intel_dp *intel_dp)
{
- struct drm_device *dev = intel_dp_to_dev(intel_dp);
bool channel_eq = false;
int tries, cr_tries;
uint32_t DP = intel_dp->DP;
@@ -1892,8 +1970,6 @@ intel_dp_complete_link_train(struct intel_dp *intel_dp)
cr_tries = 0;
channel_eq = false;
for (;;) {
- /* Use intel_dp->train_set[0] to set the voltage and pre emphasis values */
- uint32_t signal_levels;
uint8_t link_status[DP_LINK_STATUS_SIZE];
if (cr_tries > 5) {
@@ -1902,19 +1978,7 @@ intel_dp_complete_link_train(struct intel_dp *intel_dp)
break;
}
- if (IS_HASWELL(dev)) {
- signal_levels = intel_dp_signal_levels_hsw(intel_dp->train_set[0]);
- DP = (DP & ~DDI_BUF_EMP_MASK) | signal_levels;
- } else if (IS_GEN7(dev) && is_cpu_edp(intel_dp) && !IS_VALLEYVIEW(dev)) {
- signal_levels = intel_gen7_edp_signal_levels(intel_dp->train_set[0]);
- DP = (DP & ~EDP_LINK_TRAIN_VOL_EMP_MASK_IVB) | signal_levels;
- } else if (IS_GEN6(dev) && is_cpu_edp(intel_dp)) {
- signal_levels = intel_gen6_edp_signal_levels(intel_dp->train_set[0]);
- DP = (DP & ~EDP_LINK_TRAIN_VOL_EMP_MASK_SNB) | signal_levels;
- } else {
- signal_levels = intel_dp_signal_levels(intel_dp->train_set[0]);
- DP = (DP & ~(DP_VOLTAGE_MASK|DP_PRE_EMPHASIS_MASK)) | signal_levels;
- }
+ intel_dp_set_signal_levels(intel_dp, &DP);
/* channel eq pattern */
if (!intel_dp_set_link_train(intel_dp, DP,
@@ -1964,6 +2028,8 @@ intel_dp_link_down(struct intel_dp *intel_dp)
struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
struct drm_device *dev = intel_dig_port->base.base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
+ struct intel_crtc *intel_crtc =
+ to_intel_crtc(intel_dig_port->base.base.crtc);
uint32_t DP = intel_dp->DP;
/*
@@ -1981,7 +2047,7 @@ intel_dp_link_down(struct intel_dp *intel_dp)
* intel_ddi_prepare_link_retrain will take care of redoing the link
* train.
*/
- if (IS_HASWELL(dev))
+ if (HAS_DDI(dev))
return;
if (WARN_ON((I915_READ(intel_dp->output_reg) & DP_PORT_EN) == 0))
@@ -1998,7 +2064,8 @@ intel_dp_link_down(struct intel_dp *intel_dp)
}
POSTING_READ(intel_dp->output_reg);
- msleep(17);
+ /* We don't really know why we're doing this */
+ intel_wait_for_vblank(dev, intel_crtc->pipe);
if (HAS_PCH_IBX(dev) &&
I915_READ(intel_dp->output_reg) & DP_PIPEB_SELECT) {
@@ -2018,19 +2085,14 @@ intel_dp_link_down(struct intel_dp *intel_dp)
/* Changes to enable or select take place the vblank
* after being written.
*/
- if (crtc == NULL) {
- /* We can arrive here never having been attached
- * to a CRTC, for instance, due to inheriting
- * random state from the BIOS.
- *
- * If the pipe is not running, play safe and
- * wait for the clocks to stabilise before
- * continuing.
- */
+ if (WARN_ON(crtc == NULL)) {
+ /* We should never try to disable a port without a crtc
+ * attached. For paranoia keep the code around for a
+ * bit. */
POSTING_READ(intel_dp->output_reg);
msleep(50);
} else
- intel_wait_for_vblank(dev, to_intel_crtc(crtc)->pipe);
+ intel_wait_for_vblank(dev, intel_crtc->pipe);
}
DP &= ~DP_AUDIO_OUTPUT_ENABLE;
@@ -2042,10 +2104,16 @@ intel_dp_link_down(struct intel_dp *intel_dp)
static bool
intel_dp_get_dpcd(struct intel_dp *intel_dp)
{
+ char dpcd_hex_dump[sizeof(intel_dp->dpcd) * 3];
+
if (intel_dp_aux_native_read_retry(intel_dp, 0x000, intel_dp->dpcd,
sizeof(intel_dp->dpcd)) == 0)
return false; /* aux transfer failed */
+ hex_dump_to_buffer(intel_dp->dpcd, sizeof(intel_dp->dpcd),
+ 32, 1, dpcd_hex_dump, sizeof(dpcd_hex_dump), false);
+ DRM_DEBUG_KMS("DPCD: %s\n", dpcd_hex_dump);
+
if (intel_dp->dpcd[DP_DPCD_REV] == 0)
return false; /* DPCD not present */
@@ -2206,6 +2274,8 @@ static enum drm_connector_status
ironlake_dp_detect(struct intel_dp *intel_dp)
{
struct drm_device *dev = intel_dp_to_dev(intel_dp);
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
enum drm_connector_status status;
/* Can't disconnect eDP, but you can close the lid... */
@@ -2216,6 +2286,9 @@ ironlake_dp_detect(struct intel_dp *intel_dp)
return status;
}
+ if (!ibx_digital_port_connected(dev_priv, intel_dig_port))
+ return connector_status_disconnected;
+
return intel_dp_detect_dpcd(intel_dp);
}
@@ -2224,17 +2297,18 @@ g4x_dp_detect(struct intel_dp *intel_dp)
{
struct drm_device *dev = intel_dp_to_dev(intel_dp);
struct drm_i915_private *dev_priv = dev->dev_private;
+ struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
uint32_t bit;
- switch (intel_dp->output_reg) {
- case DP_B:
- bit = DPB_HOTPLUG_LIVE_STATUS;
+ switch (intel_dig_port->port) {
+ case PORT_B:
+ bit = PORTB_HOTPLUG_LIVE_STATUS;
break;
- case DP_C:
- bit = DPC_HOTPLUG_LIVE_STATUS;
+ case PORT_C:
+ bit = PORTC_HOTPLUG_LIVE_STATUS;
break;
- case DP_D:
- bit = DPD_HOTPLUG_LIVE_STATUS;
+ case PORT_D:
+ bit = PORTD_HOTPLUG_LIVE_STATUS;
break;
default:
return connector_status_unknown;
@@ -2290,13 +2364,6 @@ intel_dp_get_edid_modes(struct drm_connector *connector, struct i2c_adapter *ada
return intel_ddc_get_modes(connector, adapter);
}
-
-/**
- * Uses CRT_HOTPLUG_EN and CRT_HOTPLUG_STAT to detect DP connection.
- *
- * \return true if DP port is connected.
- * \return false if DP port is disconnected.
- */
static enum drm_connector_status
intel_dp_detect(struct drm_connector *connector, bool force)
{
@@ -2306,7 +2373,6 @@ intel_dp_detect(struct drm_connector *connector, bool force)
struct drm_device *dev = connector->dev;
enum drm_connector_status status;
struct edid *edid = NULL;
- char dpcd_hex_dump[sizeof(intel_dp->dpcd) * 3];
intel_dp->has_audio = false;
@@ -2315,10 +2381,6 @@ intel_dp_detect(struct drm_connector *connector, bool force)
else
status = g4x_dp_detect(intel_dp);
- hex_dump_to_buffer(intel_dp->dpcd, sizeof(intel_dp->dpcd),
- 32, 1, dpcd_hex_dump, sizeof(dpcd_hex_dump), false);
- DRM_DEBUG_KMS("DPCD: %s\n", dpcd_hex_dump);
-
if (status != connector_status_connected)
return status;
@@ -2419,10 +2481,21 @@ intel_dp_set_property(struct drm_connector *connector,
}
if (property == dev_priv->broadcast_rgb_property) {
- if (val == !!intel_dp->color_range)
- return 0;
-
- intel_dp->color_range = val ? DP_COLOR_RANGE_16_235 : 0;
+ switch (val) {
+ case INTEL_BROADCAST_RGB_AUTO:
+ intel_dp->color_range_auto = true;
+ break;
+ case INTEL_BROADCAST_RGB_FULL:
+ intel_dp->color_range_auto = false;
+ intel_dp->color_range = 0;
+ break;
+ case INTEL_BROADCAST_RGB_LIMITED:
+ intel_dp->color_range_auto = false;
+ intel_dp->color_range = DP_COLOR_RANGE_16_235;
+ break;
+ default:
+ return -EINVAL;
+ }
goto done;
}
@@ -2445,11 +2518,8 @@ intel_dp_set_property(struct drm_connector *connector,
return -EINVAL;
done:
- if (intel_encoder->base.crtc) {
- struct drm_crtc *crtc = intel_encoder->base.crtc;
- intel_set_mode(crtc, &crtc->mode,
- crtc->x, crtc->y, crtc->fb);
- }
+ if (intel_encoder->base.crtc)
+ intel_crtc_restore_mode(intel_encoder->base.crtc);
return 0;
}
@@ -2491,7 +2561,6 @@ void intel_dp_encoder_destroy(struct drm_encoder *encoder)
static const struct drm_encoder_helper_funcs intel_dp_helper_funcs = {
.mode_fixup = intel_dp_mode_fixup,
.mode_set = intel_dp_mode_set,
- .disable = intel_encoder_noop,
};
static const struct drm_connector_funcs intel_dp_connector_funcs = {
@@ -2566,6 +2635,7 @@ intel_dp_add_properties(struct intel_dp *intel_dp, struct drm_connector *connect
intel_attach_force_audio_property(connector);
intel_attach_broadcast_rgb_property(connector);
+ intel_dp->color_range_auto = true;
if (is_edp(intel_dp)) {
drm_mode_create_scaling_mode_property(connector->dev);
@@ -2755,7 +2825,7 @@ intel_dp_init_connector(struct intel_digital_port *intel_dig_port,
intel_connector_attach_encoder(intel_connector, intel_encoder);
drm_sysfs_connector_add(connector);
- if (IS_HASWELL(dev))
+ if (HAS_DDI(dev))
intel_connector->get_hw_state = intel_ddi_connector_get_hw_state;
else
intel_connector->get_hw_state = intel_connector_get_hw_state;
@@ -2767,15 +2837,15 @@ intel_dp_init_connector(struct intel_digital_port *intel_dig_port,
name = "DPDDC-A";
break;
case PORT_B:
- dev_priv->hotplug_supported_mask |= DPB_HOTPLUG_INT_STATUS;
+ dev_priv->hotplug_supported_mask |= PORTB_HOTPLUG_INT_STATUS;
name = "DPDDC-B";
break;
case PORT_C:
- dev_priv->hotplug_supported_mask |= DPC_HOTPLUG_INT_STATUS;
+ dev_priv->hotplug_supported_mask |= PORTC_HOTPLUG_INT_STATUS;
name = "DPDDC-C";
break;
case PORT_D:
- dev_priv->hotplug_supported_mask |= DPD_HOTPLUG_INT_STATUS;
+ dev_priv->hotplug_supported_mask |= PORTD_HOTPLUG_INT_STATUS;
name = "DPDDC-D";
break;
default:
diff --git a/drivers/gpu/drm/i915/intel_drv.h b/drivers/gpu/drm/i915/intel_drv.h
index 8a1bd4a3ad0d..07ebac6fe8ca 100644
--- a/drivers/gpu/drm/i915/intel_drv.h
+++ b/drivers/gpu/drm/i915/intel_drv.h
@@ -109,6 +109,11 @@
* timings in the mode to prevent the crtc fixup from overwriting them.
* Currently only lvds needs that. */
#define INTEL_MODE_CRTC_TIMINGS_SET (0x20)
+/*
+ * Set when limited 16-235 (as opposed to full 0-255) RGB color range is
+ * to be used.
+ */
+#define INTEL_MODE_LIMITED_COLOR_RANGE (0x40)
static inline void
intel_mode_set_pixel_multiplier(struct drm_display_mode *mode,
@@ -153,6 +158,7 @@ struct intel_encoder {
bool cloneable;
bool connectors_active;
void (*hot_plug)(struct intel_encoder *);
+ void (*pre_pll_enable)(struct intel_encoder *);
void (*pre_enable)(struct intel_encoder *);
void (*enable)(struct intel_encoder *);
void (*disable)(struct intel_encoder *);
@@ -205,6 +211,7 @@ struct intel_crtc {
* some outputs connected to this crtc.
*/
bool active;
+ bool eld_vld;
bool primary_disabled; /* is the crtc obscured by a plane? */
bool lowfreq_avail;
struct intel_overlay *overlay;
@@ -228,6 +235,9 @@ struct intel_crtc {
/* We can share PLLs across outputs if the timings match */
struct intel_pch_pll *pch_pll;
uint32_t ddi_pll_sel;
+
+ /* reset counter value when the last flip was submitted */
+ unsigned int reset_counter;
};
struct intel_plane {
@@ -283,6 +293,9 @@ struct cxsr_latency {
#define DIP_LEN_AVI 13
#define DIP_AVI_PR_1 0
#define DIP_AVI_PR_2 1
+#define DIP_AVI_RGB_QUANT_RANGE_DEFAULT (0 << 2)
+#define DIP_AVI_RGB_QUANT_RANGE_LIMITED (1 << 2)
+#define DIP_AVI_RGB_QUANT_RANGE_FULL (2 << 2)
#define DIP_TYPE_SPD 0x83
#define DIP_VERSION_SPD 0x1
@@ -337,9 +350,11 @@ struct intel_hdmi {
u32 sdvox_reg;
int ddc_bus;
uint32_t color_range;
+ bool color_range_auto;
bool has_hdmi_sink;
bool has_audio;
enum hdmi_force_audio force_audio;
+ bool rgb_quant_range_selectable;
void (*write_infoframe)(struct drm_encoder *encoder,
struct dip_infoframe *frame);
void (*set_infoframes)(struct drm_encoder *encoder,
@@ -356,6 +371,7 @@ struct intel_dp {
bool has_audio;
enum hdmi_force_audio force_audio;
uint32_t color_range;
+ bool color_range_auto;
uint8_t link_bw;
uint8_t lane_count;
uint8_t dpcd[DP_RECEIVER_CAP_SIZE];
@@ -377,6 +393,7 @@ struct intel_dp {
struct intel_digital_port {
struct intel_encoder base;
enum port port;
+ u32 port_reversal;
struct intel_dp dp;
struct intel_hdmi hdmi;
};
@@ -439,10 +456,10 @@ extern bool intel_sdvo_init(struct drm_device *dev, uint32_t sdvo_reg,
extern void intel_dvo_init(struct drm_device *dev);
extern void intel_tv_init(struct drm_device *dev);
extern void intel_mark_busy(struct drm_device *dev);
-extern void intel_mark_idle(struct drm_device *dev);
extern void intel_mark_fb_busy(struct drm_i915_gem_object *obj);
-extern void intel_mark_fb_idle(struct drm_i915_gem_object *obj);
+extern void intel_mark_idle(struct drm_device *dev);
extern bool intel_lvds_init(struct drm_device *dev);
+extern bool intel_is_dual_link_lvds(struct drm_device *dev);
extern void intel_dp_init(struct drm_device *dev, int output_reg,
enum port port);
extern void intel_dp_init_connector(struct intel_digital_port *intel_dig_port,
@@ -502,12 +519,12 @@ struct intel_set_config {
bool mode_changed;
};
-extern bool intel_set_mode(struct drm_crtc *crtc, struct drm_display_mode *mode,
- int x, int y, struct drm_framebuffer *old_fb);
+extern int intel_set_mode(struct drm_crtc *crtc, struct drm_display_mode *mode,
+ int x, int y, struct drm_framebuffer *old_fb);
extern void intel_modeset_disable(struct drm_device *dev);
+extern void intel_crtc_restore_mode(struct drm_crtc *crtc);
extern void intel_crtc_load_lut(struct drm_crtc *crtc);
extern void intel_crtc_update_dpms(struct drm_crtc *crtc);
-extern void intel_encoder_noop(struct drm_encoder *encoder);
extern void intel_encoder_destroy(struct drm_encoder *encoder);
extern void intel_encoder_dpms(struct intel_encoder *encoder, int mode);
extern bool intel_encoder_check_is_cloned(struct intel_encoder *encoder);
@@ -546,6 +563,9 @@ hdmi_to_dig_port(struct intel_hdmi *intel_hdmi)
return container_of(intel_hdmi, struct intel_digital_port, hdmi);
}
+bool ibx_digital_port_connected(struct drm_i915_private *dev_priv,
+ struct intel_digital_port *port);
+
extern void intel_connector_attach_encoder(struct intel_connector *connector,
struct intel_encoder *encoder);
extern struct drm_encoder *intel_best_encoder(struct drm_connector *connector);
@@ -589,6 +609,7 @@ extern int intel_framebuffer_init(struct drm_device *dev,
struct drm_mode_fb_cmd2 *mode_cmd,
struct drm_i915_gem_object *obj);
extern int intel_fbdev_init(struct drm_device *dev);
+extern void intel_fbdev_initial_config(struct drm_device *dev);
extern void intel_fbdev_fini(struct drm_device *dev);
extern void intel_fbdev_set_suspend(struct drm_device *dev, int state);
extern void intel_prepare_page_flip(struct drm_device *dev, int plane);
@@ -627,9 +648,10 @@ extern void intel_update_sprite_watermarks(struct drm_device *dev, int pipe,
extern void intel_update_linetime_watermarks(struct drm_device *dev, int pipe,
struct drm_display_mode *mode);
-extern unsigned long intel_gen4_compute_offset_xtiled(int *x, int *y,
- unsigned int bpp,
- unsigned int pitch);
+extern unsigned long intel_gen4_compute_page_offset(int *x, int *y,
+ unsigned int tiling_mode,
+ unsigned int bpp,
+ unsigned int pitch);
extern int intel_sprite_set_colorkey(struct drm_device *dev, void *data,
struct drm_file *file_priv);
@@ -648,7 +670,8 @@ extern void intel_update_fbc(struct drm_device *dev);
extern void intel_gpu_ips_init(struct drm_i915_private *dev_priv);
extern void intel_gpu_ips_teardown(void);
-extern void intel_init_power_wells(struct drm_device *dev);
+extern void intel_init_power_well(struct drm_device *dev);
+extern void intel_set_power_well(struct drm_device *dev, bool enable);
extern void intel_enable_gt_powersave(struct drm_device *dev);
extern void intel_disable_gt_powersave(struct drm_device *dev);
extern void gen6_gt_check_fifodbg(struct drm_i915_private *dev_priv);
diff --git a/drivers/gpu/drm/i915/intel_dvo.c b/drivers/gpu/drm/i915/intel_dvo.c
index 15da99533e5b..00e70dbe82da 100644
--- a/drivers/gpu/drm/i915/intel_dvo.c
+++ b/drivers/gpu/drm/i915/intel_dvo.c
@@ -345,7 +345,6 @@ static void intel_dvo_destroy(struct drm_connector *connector)
static const struct drm_encoder_helper_funcs intel_dvo_helper_funcs = {
.mode_fixup = intel_dvo_mode_fixup,
.mode_set = intel_dvo_mode_set,
- .disable = intel_encoder_noop,
};
static const struct drm_connector_funcs intel_dvo_connector_funcs = {
diff --git a/drivers/gpu/drm/i915/intel_fb.c b/drivers/gpu/drm/i915/intel_fb.c
index 7b30b5c2c4ee..981bdce3634e 100644
--- a/drivers/gpu/drm/i915/intel_fb.c
+++ b/drivers/gpu/drm/i915/intel_fb.c
@@ -57,9 +57,10 @@ static struct fb_ops intelfb_ops = {
.fb_debug_leave = drm_fb_helper_debug_leave,
};
-static int intelfb_create(struct intel_fbdev *ifbdev,
+static int intelfb_create(struct drm_fb_helper *helper,
struct drm_fb_helper_surface_size *sizes)
{
+ struct intel_fbdev *ifbdev = (struct intel_fbdev *)helper;
struct drm_device *dev = ifbdev->helper.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct fb_info *info;
@@ -83,7 +84,9 @@ static int intelfb_create(struct intel_fbdev *ifbdev,
size = mode_cmd.pitches[0] * mode_cmd.height;
size = ALIGN(size, PAGE_SIZE);
- obj = i915_gem_alloc_object(dev, size);
+ obj = i915_gem_object_create_stolen(dev, size);
+ if (obj == NULL)
+ obj = i915_gem_alloc_object(dev, size);
if (!obj) {
DRM_ERROR("failed to allocate framebuffer\n");
ret = -ENOMEM;
@@ -133,14 +136,13 @@ static int intelfb_create(struct intel_fbdev *ifbdev,
goto out_unpin;
}
info->apertures->ranges[0].base = dev->mode_config.fb_base;
- info->apertures->ranges[0].size =
- dev_priv->mm.gtt->gtt_mappable_entries << PAGE_SHIFT;
+ info->apertures->ranges[0].size = dev_priv->gtt.mappable_end;
info->fix.smem_start = dev->mode_config.fb_base + obj->gtt_offset;
info->fix.smem_len = size;
info->screen_base =
- ioremap_wc(dev_priv->mm.gtt_base_addr + obj->gtt_offset,
+ ioremap_wc(dev_priv->gtt.mappable_base + obj->gtt_offset,
size);
if (!info->screen_base) {
ret = -ENOSPC;
@@ -153,6 +155,13 @@ static int intelfb_create(struct intel_fbdev *ifbdev,
drm_fb_helper_fill_fix(info, fb->pitches[0], fb->depth);
drm_fb_helper_fill_var(info, &ifbdev->helper, sizes->fb_width, sizes->fb_height);
+ /* If the object is shmemfs backed, it will have given us zeroed pages.
+ * If the object is stolen however, it will be full of whatever
+ * garbage was left in there.
+ */
+ if (ifbdev->ifb.obj->stolen)
+ memset_io(info->screen_base, 0, info->screen_size);
+
/* Use default scratch pixmap (info->pixmap.flags = FB_PIXMAP_SYSTEM) */
DRM_DEBUG_KMS("allocated %dx%d fb: 0x%08x, bo %p\n",
@@ -173,26 +182,10 @@ out:
return ret;
}
-static int intel_fb_find_or_create_single(struct drm_fb_helper *helper,
- struct drm_fb_helper_surface_size *sizes)
-{
- struct intel_fbdev *ifbdev = (struct intel_fbdev *)helper;
- int new_fb = 0;
- int ret;
-
- if (!helper->fb) {
- ret = intelfb_create(ifbdev, sizes);
- if (ret)
- return ret;
- new_fb = 1;
- }
- return new_fb;
-}
-
static struct drm_fb_helper_funcs intel_fb_helper_funcs = {
.gamma_set = intel_crtc_fb_gamma_set,
.gamma_get = intel_crtc_fb_gamma_get,
- .fb_probe = intel_fb_find_or_create_single,
+ .fb_probe = intelfb_create,
};
static void intel_fbdev_destroy(struct drm_device *dev,
@@ -212,6 +205,7 @@ static void intel_fbdev_destroy(struct drm_device *dev,
drm_fb_helper_fini(&ifbdev->helper);
+ drm_framebuffer_unregister_private(&ifb->base);
drm_framebuffer_cleanup(&ifb->base);
if (ifb->obj) {
drm_gem_object_unreference_unlocked(&ifb->obj->base);
@@ -241,10 +235,18 @@ int intel_fbdev_init(struct drm_device *dev)
}
drm_fb_helper_single_add_all_connectors(&ifbdev->helper);
- drm_fb_helper_initial_config(&ifbdev->helper, 32);
+
return 0;
}
+void intel_fbdev_initial_config(struct drm_device *dev)
+{
+ drm_i915_private_t *dev_priv = dev->dev_private;
+
+ /* Due to peculiar init order wrt to hpd handling this is separate. */
+ drm_fb_helper_initial_config(&dev_priv->fbdev->helper, 32);
+}
+
void intel_fbdev_fini(struct drm_device *dev)
{
drm_i915_private_t *dev_priv = dev->dev_private;
@@ -280,7 +282,7 @@ void intel_fb_restore_mode(struct drm_device *dev)
struct drm_mode_config *config = &dev->mode_config;
struct drm_plane *plane;
- mutex_lock(&dev->mode_config.mutex);
+ drm_modeset_lock_all(dev);
ret = drm_fb_helper_restore_fbdev_mode(&dev_priv->fbdev->helper);
if (ret)
@@ -288,7 +290,8 @@ void intel_fb_restore_mode(struct drm_device *dev)
/* Be sure to shut off any planes that may be active */
list_for_each_entry(plane, &config->plane_list, head)
- plane->funcs->disable_plane(plane);
+ if (plane->enabled)
+ plane->funcs->disable_plane(plane);
- mutex_unlock(&dev->mode_config.mutex);
+ drm_modeset_unlock_all(dev);
}
diff --git a/drivers/gpu/drm/i915/intel_hdmi.c b/drivers/gpu/drm/i915/intel_hdmi.c
index 2ee9821b9d93..fa8ec4a26041 100644
--- a/drivers/gpu/drm/i915/intel_hdmi.c
+++ b/drivers/gpu/drm/i915/intel_hdmi.c
@@ -48,7 +48,7 @@ assert_hdmi_port_disabled(struct intel_hdmi *intel_hdmi)
struct drm_i915_private *dev_priv = dev->dev_private;
uint32_t enabled_bits;
- enabled_bits = IS_HASWELL(dev) ? DDI_BUF_CTL_ENABLE : SDVO_ENABLE;
+ enabled_bits = HAS_DDI(dev) ? DDI_BUF_CTL_ENABLE : SDVO_ENABLE;
WARN(I915_READ(intel_hdmi->sdvox_reg) & enabled_bits,
"HDMI port enabled, expecting disabled\n");
@@ -331,6 +331,7 @@ static void intel_set_infoframe(struct drm_encoder *encoder,
static void intel_hdmi_set_avi_infoframe(struct drm_encoder *encoder,
struct drm_display_mode *adjusted_mode)
{
+ struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(encoder);
struct dip_infoframe avi_if = {
.type = DIP_TYPE_AVI,
.ver = DIP_VERSION_AVI,
@@ -340,7 +341,14 @@ static void intel_hdmi_set_avi_infoframe(struct drm_encoder *encoder,
if (adjusted_mode->flags & DRM_MODE_FLAG_DBLCLK)
avi_if.body.avi.YQ_CN_PR |= DIP_AVI_PR_2;
- avi_if.body.avi.VIC = drm_mode_cea_vic(adjusted_mode);
+ if (intel_hdmi->rgb_quant_range_selectable) {
+ if (adjusted_mode->private_flags & INTEL_MODE_LIMITED_COLOR_RANGE)
+ avi_if.body.avi.ITC_EC_Q_SC |= DIP_AVI_RGB_QUANT_RANGE_LIMITED;
+ else
+ avi_if.body.avi.ITC_EC_Q_SC |= DIP_AVI_RGB_QUANT_RANGE_FULL;
+ }
+
+ avi_if.body.avi.VIC = drm_match_cea_mode(adjusted_mode);
intel_set_infoframe(encoder, &avi_if);
}
@@ -364,7 +372,8 @@ static void g4x_set_infoframes(struct drm_encoder *encoder,
struct drm_display_mode *adjusted_mode)
{
struct drm_i915_private *dev_priv = encoder->dev->dev_private;
- struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(encoder);
+ struct intel_digital_port *intel_dig_port = enc_to_dig_port(encoder);
+ struct intel_hdmi *intel_hdmi = &intel_dig_port->hdmi;
u32 reg = VIDEO_DIP_CTL;
u32 val = I915_READ(reg);
u32 port;
@@ -391,11 +400,11 @@ static void g4x_set_infoframes(struct drm_encoder *encoder,
return;
}
- switch (intel_hdmi->sdvox_reg) {
- case SDVOB:
+ switch (intel_dig_port->port) {
+ case PORT_B:
port = VIDEO_DIP_PORT_B;
break;
- case SDVOC:
+ case PORT_C:
port = VIDEO_DIP_PORT_C;
break;
default:
@@ -428,7 +437,8 @@ static void ibx_set_infoframes(struct drm_encoder *encoder,
{
struct drm_i915_private *dev_priv = encoder->dev->dev_private;
struct intel_crtc *intel_crtc = to_intel_crtc(encoder->crtc);
- struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(encoder);
+ struct intel_digital_port *intel_dig_port = enc_to_dig_port(encoder);
+ struct intel_hdmi *intel_hdmi = &intel_dig_port->hdmi;
u32 reg = TVIDEO_DIP_CTL(intel_crtc->pipe);
u32 val = I915_READ(reg);
u32 port;
@@ -447,14 +457,14 @@ static void ibx_set_infoframes(struct drm_encoder *encoder,
return;
}
- switch (intel_hdmi->sdvox_reg) {
- case HDMIB:
+ switch (intel_dig_port->port) {
+ case PORT_B:
port = VIDEO_DIP_PORT_B;
break;
- case HDMIC:
+ case PORT_C:
port = VIDEO_DIP_PORT_C;
break;
- case HDMID:
+ case PORT_D:
port = VIDEO_DIP_PORT_D;
break;
default:
@@ -766,46 +776,38 @@ bool intel_hdmi_mode_fixup(struct drm_encoder *encoder,
const struct drm_display_mode *mode,
struct drm_display_mode *adjusted_mode)
{
- return true;
-}
-
-static bool g4x_hdmi_connected(struct intel_hdmi *intel_hdmi)
-{
- struct drm_device *dev = intel_hdmi_to_dev(intel_hdmi);
- struct drm_i915_private *dev_priv = dev->dev_private;
- uint32_t bit;
+ struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(encoder);
- switch (intel_hdmi->sdvox_reg) {
- case SDVOB:
- bit = HDMIB_HOTPLUG_LIVE_STATUS;
- break;
- case SDVOC:
- bit = HDMIC_HOTPLUG_LIVE_STATUS;
- break;
- default:
- bit = 0;
- break;
+ if (intel_hdmi->color_range_auto) {
+ /* See CEA-861-E - 5.1 Default Encoding Parameters */
+ if (intel_hdmi->has_hdmi_sink &&
+ drm_match_cea_mode(adjusted_mode) > 1)
+ intel_hdmi->color_range = SDVO_COLOR_RANGE_16_235;
+ else
+ intel_hdmi->color_range = 0;
}
- return I915_READ(PORT_HOTPLUG_STAT) & bit;
+ if (intel_hdmi->color_range)
+ adjusted_mode->private_flags |= INTEL_MODE_LIMITED_COLOR_RANGE;
+
+ return true;
}
static enum drm_connector_status
intel_hdmi_detect(struct drm_connector *connector, bool force)
{
+ struct drm_device *dev = connector->dev;
struct intel_hdmi *intel_hdmi = intel_attached_hdmi(connector);
struct intel_digital_port *intel_dig_port =
hdmi_to_dig_port(intel_hdmi);
struct intel_encoder *intel_encoder = &intel_dig_port->base;
- struct drm_i915_private *dev_priv = connector->dev->dev_private;
+ struct drm_i915_private *dev_priv = dev->dev_private;
struct edid *edid;
enum drm_connector_status status = connector_status_disconnected;
- if (IS_G4X(connector->dev) && !g4x_hdmi_connected(intel_hdmi))
- return status;
-
intel_hdmi->has_hdmi_sink = false;
intel_hdmi->has_audio = false;
+ intel_hdmi->rgb_quant_range_selectable = false;
edid = drm_get_edid(connector,
intel_gmbus_get_adapter(dev_priv,
intel_hdmi->ddc_bus));
@@ -817,6 +819,8 @@ intel_hdmi_detect(struct drm_connector *connector, bool force)
intel_hdmi->has_hdmi_sink =
drm_detect_hdmi_monitor(edid);
intel_hdmi->has_audio = drm_detect_monitor_audio(edid);
+ intel_hdmi->rgb_quant_range_selectable =
+ drm_rgb_quant_range_selectable(edid);
}
kfree(edid);
}
@@ -902,21 +906,29 @@ intel_hdmi_set_property(struct drm_connector *connector,
}
if (property == dev_priv->broadcast_rgb_property) {
- if (val == !!intel_hdmi->color_range)
- return 0;
-
- intel_hdmi->color_range = val ? SDVO_COLOR_RANGE_16_235 : 0;
+ switch (val) {
+ case INTEL_BROADCAST_RGB_AUTO:
+ intel_hdmi->color_range_auto = true;
+ break;
+ case INTEL_BROADCAST_RGB_FULL:
+ intel_hdmi->color_range_auto = false;
+ intel_hdmi->color_range = 0;
+ break;
+ case INTEL_BROADCAST_RGB_LIMITED:
+ intel_hdmi->color_range_auto = false;
+ intel_hdmi->color_range = SDVO_COLOR_RANGE_16_235;
+ break;
+ default:
+ return -EINVAL;
+ }
goto done;
}
return -EINVAL;
done:
- if (intel_dig_port->base.base.crtc) {
- struct drm_crtc *crtc = intel_dig_port->base.base.crtc;
- intel_set_mode(crtc, &crtc->mode,
- crtc->x, crtc->y, crtc->fb);
- }
+ if (intel_dig_port->base.base.crtc)
+ intel_crtc_restore_mode(intel_dig_port->base.base.crtc);
return 0;
}
@@ -931,7 +943,6 @@ static void intel_hdmi_destroy(struct drm_connector *connector)
static const struct drm_encoder_helper_funcs intel_hdmi_helper_funcs = {
.mode_fixup = intel_hdmi_mode_fixup,
.mode_set = intel_hdmi_mode_set,
- .disable = intel_encoder_noop,
};
static const struct drm_connector_funcs intel_hdmi_connector_funcs = {
@@ -957,6 +968,7 @@ intel_hdmi_add_properties(struct intel_hdmi *intel_hdmi, struct drm_connector *c
{
intel_attach_force_audio_property(connector);
intel_attach_broadcast_rgb_property(connector);
+ intel_hdmi->color_range_auto = true;
}
void intel_hdmi_init_connector(struct intel_digital_port *intel_dig_port,
@@ -980,15 +992,15 @@ void intel_hdmi_init_connector(struct intel_digital_port *intel_dig_port,
switch (port) {
case PORT_B:
intel_hdmi->ddc_bus = GMBUS_PORT_DPB;
- dev_priv->hotplug_supported_mask |= HDMIB_HOTPLUG_INT_STATUS;
+ dev_priv->hotplug_supported_mask |= PORTB_HOTPLUG_INT_STATUS;
break;
case PORT_C:
intel_hdmi->ddc_bus = GMBUS_PORT_DPC;
- dev_priv->hotplug_supported_mask |= HDMIC_HOTPLUG_INT_STATUS;
+ dev_priv->hotplug_supported_mask |= PORTC_HOTPLUG_INT_STATUS;
break;
case PORT_D:
intel_hdmi->ddc_bus = GMBUS_PORT_DPD;
- dev_priv->hotplug_supported_mask |= HDMID_HOTPLUG_INT_STATUS;
+ dev_priv->hotplug_supported_mask |= PORTD_HOTPLUG_INT_STATUS;
break;
case PORT_A:
/* Internal port only for eDP. */
@@ -1013,7 +1025,7 @@ void intel_hdmi_init_connector(struct intel_digital_port *intel_dig_port,
intel_hdmi->set_infoframes = cpt_set_infoframes;
}
- if (IS_HASWELL(dev))
+ if (HAS_DDI(dev))
intel_connector->get_hw_state = intel_ddi_connector_get_hw_state;
else
intel_connector->get_hw_state = intel_connector_get_hw_state;
diff --git a/drivers/gpu/drm/i915/intel_i2c.c b/drivers/gpu/drm/i915/intel_i2c.c
index 3ef5af15b812..acf8aec9ada7 100644
--- a/drivers/gpu/drm/i915/intel_i2c.c
+++ b/drivers/gpu/drm/i915/intel_i2c.c
@@ -63,6 +63,7 @@ intel_i2c_reset(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
I915_WRITE(dev_priv->gpio_mmio_base + GMBUS0, 0);
+ I915_WRITE(dev_priv->gpio_mmio_base + GMBUS4, 0);
}
static void intel_i2c_quirk_set(struct drm_i915_private *dev_priv, bool enable)
@@ -202,6 +203,68 @@ intel_gpio_setup(struct intel_gmbus *bus, u32 pin)
algo->data = bus;
}
+#define HAS_GMBUS_IRQ(dev) (INTEL_INFO(dev)->gen >= 4)
+static int
+gmbus_wait_hw_status(struct drm_i915_private *dev_priv,
+ u32 gmbus2_status,
+ u32 gmbus4_irq_en)
+{
+ int i;
+ int reg_offset = dev_priv->gpio_mmio_base;
+ u32 gmbus2 = 0;
+ DEFINE_WAIT(wait);
+
+ /* Important: The hw handles only the first bit, so set only one! Since
+ * we also need to check for NAKs besides the hw ready/idle signal, we
+ * need to wake up periodically and check that ourselves. */
+ I915_WRITE(GMBUS4 + reg_offset, gmbus4_irq_en);
+
+ for (i = 0; i < msecs_to_jiffies(50) + 1; i++) {
+ prepare_to_wait(&dev_priv->gmbus_wait_queue, &wait,
+ TASK_UNINTERRUPTIBLE);
+
+ gmbus2 = I915_READ_NOTRACE(GMBUS2 + reg_offset);
+ if (gmbus2 & (GMBUS_SATOER | gmbus2_status))
+ break;
+
+ schedule_timeout(1);
+ }
+ finish_wait(&dev_priv->gmbus_wait_queue, &wait);
+
+ I915_WRITE(GMBUS4 + reg_offset, 0);
+
+ if (gmbus2 & GMBUS_SATOER)
+ return -ENXIO;
+ if (gmbus2 & gmbus2_status)
+ return 0;
+ return -ETIMEDOUT;
+}
+
+static int
+gmbus_wait_idle(struct drm_i915_private *dev_priv)
+{
+ int ret;
+ int reg_offset = dev_priv->gpio_mmio_base;
+
+#define C ((I915_READ_NOTRACE(GMBUS2 + reg_offset) & GMBUS_ACTIVE) == 0)
+
+ if (!HAS_GMBUS_IRQ(dev_priv->dev))
+ return wait_for(C, 10);
+
+ /* Important: The hw handles only the first bit, so set only one! */
+ I915_WRITE(GMBUS4 + reg_offset, GMBUS_IDLE_EN);
+
+ ret = wait_event_timeout(dev_priv->gmbus_wait_queue, C, 10);
+
+ I915_WRITE(GMBUS4 + reg_offset, 0);
+
+ if (ret)
+ return 0;
+ else
+ return -ETIMEDOUT;
+#undef C
+}
+
static int
gmbus_xfer_read(struct drm_i915_private *dev_priv, struct i2c_msg *msg,
u32 gmbus1_index)
@@ -219,15 +282,11 @@ gmbus_xfer_read(struct drm_i915_private *dev_priv, struct i2c_msg *msg,
while (len) {
int ret;
u32 val, loop = 0;
- u32 gmbus2;
- ret = wait_for((gmbus2 = I915_READ(GMBUS2 + reg_offset)) &
- (GMBUS_SATOER | GMBUS_HW_RDY),
- 50);
+ ret = gmbus_wait_hw_status(dev_priv, GMBUS_HW_RDY,
+ GMBUS_HW_RDY_EN);
if (ret)
- return -ETIMEDOUT;
- if (gmbus2 & GMBUS_SATOER)
- return -ENXIO;
+ return ret;
val = I915_READ(GMBUS3 + reg_offset);
do {
@@ -261,7 +320,6 @@ gmbus_xfer_write(struct drm_i915_private *dev_priv, struct i2c_msg *msg)
GMBUS_SLAVE_WRITE | GMBUS_SW_RDY);
while (len) {
int ret;
- u32 gmbus2;
val = loop = 0;
do {
@@ -270,13 +328,10 @@ gmbus_xfer_write(struct drm_i915_private *dev_priv, struct i2c_msg *msg)
I915_WRITE(GMBUS3 + reg_offset, val);
- ret = wait_for((gmbus2 = I915_READ(GMBUS2 + reg_offset)) &
- (GMBUS_SATOER | GMBUS_HW_RDY),
- 50);
+ ret = gmbus_wait_hw_status(dev_priv, GMBUS_HW_RDY,
+ GMBUS_HW_RDY_EN);
if (ret)
- return -ETIMEDOUT;
- if (gmbus2 & GMBUS_SATOER)
- return -ENXIO;
+ return ret;
}
return 0;
}
@@ -345,8 +400,6 @@ gmbus_xfer(struct i2c_adapter *adapter,
I915_WRITE(GMBUS0 + reg_offset, bus->reg0);
for (i = 0; i < num; i++) {
- u32 gmbus2;
-
if (gmbus_is_index_read(msgs, i, num)) {
ret = gmbus_xfer_index_read(dev_priv, &msgs[i]);
i += 1; /* set i to the index of the read xfer */
@@ -361,13 +414,12 @@ gmbus_xfer(struct i2c_adapter *adapter,
if (ret == -ENXIO)
goto clear_err;
- ret = wait_for((gmbus2 = I915_READ(GMBUS2 + reg_offset)) &
- (GMBUS_SATOER | GMBUS_HW_WAIT_PHASE),
- 50);
+ ret = gmbus_wait_hw_status(dev_priv, GMBUS_HW_WAIT_PHASE,
+ GMBUS_HW_WAIT_EN);
+ if (ret == -ENXIO)
+ goto clear_err;
if (ret)
goto timeout;
- if (gmbus2 & GMBUS_SATOER)
- goto clear_err;
}
/* Generate a STOP condition on the bus. Note that gmbus can't generata
@@ -380,8 +432,7 @@ gmbus_xfer(struct i2c_adapter *adapter,
* We will re-enable it at the start of the next xfer,
* till then let it sleep.
*/
- if (wait_for((I915_READ(GMBUS2 + reg_offset) & GMBUS_ACTIVE) == 0,
- 10)) {
+ if (gmbus_wait_idle(dev_priv)) {
DRM_DEBUG_KMS("GMBUS [%s] timed out waiting for idle\n",
adapter->name);
ret = -ETIMEDOUT;
@@ -405,8 +456,7 @@ clear_err:
* it's slow responding and only answers on the 2nd retry.
*/
ret = -ENXIO;
- if (wait_for((I915_READ(GMBUS2 + reg_offset) & GMBUS_ACTIVE) == 0,
- 10)) {
+ if (gmbus_wait_idle(dev_priv)) {
DRM_DEBUG_KMS("GMBUS [%s] timed out after NAK\n",
adapter->name);
ret = -ETIMEDOUT;
@@ -465,10 +515,13 @@ int intel_setup_gmbus(struct drm_device *dev)
if (HAS_PCH_SPLIT(dev))
dev_priv->gpio_mmio_base = PCH_GPIOA - GPIOA;
+ else if (IS_VALLEYVIEW(dev))
+ dev_priv->gpio_mmio_base = VLV_DISPLAY_BASE;
else
dev_priv->gpio_mmio_base = 0;
mutex_init(&dev_priv->gmbus_mutex);
+ init_waitqueue_head(&dev_priv->gmbus_wait_queue);
for (i = 0; i < GMBUS_NUM_PORTS; i++) {
struct intel_gmbus *bus = &dev_priv->gmbus[i];
diff --git a/drivers/gpu/drm/i915/intel_lvds.c b/drivers/gpu/drm/i915/intel_lvds.c
index 17aee74258ad..3d1d97488cc9 100644
--- a/drivers/gpu/drm/i915/intel_lvds.c
+++ b/drivers/gpu/drm/i915/intel_lvds.c
@@ -51,7 +51,8 @@ struct intel_lvds_encoder {
u32 pfit_control;
u32 pfit_pgm_ratios;
- bool pfit_dirty;
+ bool is_dual_link;
+ u32 reg;
struct intel_lvds_connector *attached_connector;
};
@@ -71,15 +72,10 @@ static bool intel_lvds_get_hw_state(struct intel_encoder *encoder,
{
struct drm_device *dev = encoder->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
- u32 lvds_reg, tmp;
-
- if (HAS_PCH_SPLIT(dev)) {
- lvds_reg = PCH_LVDS;
- } else {
- lvds_reg = LVDS;
- }
+ struct intel_lvds_encoder *lvds_encoder = to_lvds_encoder(&encoder->base);
+ u32 tmp;
- tmp = I915_READ(lvds_reg);
+ tmp = I915_READ(lvds_encoder->reg);
if (!(tmp & LVDS_PORT_EN))
return false;
@@ -92,6 +88,91 @@ static bool intel_lvds_get_hw_state(struct intel_encoder *encoder,
return true;
}
+/* The LVDS pin pair needs to be on before the DPLLs are enabled.
+ * This is an exception to the general rule that mode_set doesn't turn
+ * things on.
+ */
+static void intel_pre_pll_enable_lvds(struct intel_encoder *encoder)
+{
+ struct intel_lvds_encoder *lvds_encoder = to_lvds_encoder(&encoder->base);
+ struct drm_device *dev = encoder->base.dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct intel_crtc *intel_crtc = to_intel_crtc(encoder->base.crtc);
+ struct drm_display_mode *fixed_mode =
+ lvds_encoder->attached_connector->base.panel.fixed_mode;
+ int pipe = intel_crtc->pipe;
+ u32 temp;
+
+ temp = I915_READ(lvds_encoder->reg);
+ temp |= LVDS_PORT_EN | LVDS_A0A2_CLKA_POWER_UP;
+
+ if (HAS_PCH_CPT(dev)) {
+ temp &= ~PORT_TRANS_SEL_MASK;
+ temp |= PORT_TRANS_SEL_CPT(pipe);
+ } else {
+ if (pipe == 1) {
+ temp |= LVDS_PIPEB_SELECT;
+ } else {
+ temp &= ~LVDS_PIPEB_SELECT;
+ }
+ }
+
+ /* set the corresponsding LVDS_BORDER bit */
+ temp |= dev_priv->lvds_border_bits;
+ /* Set the B0-B3 data pairs corresponding to whether we're going to
+ * set the DPLLs for dual-channel mode or not.
+ */
+ if (lvds_encoder->is_dual_link)
+ temp |= LVDS_B0B3_POWER_UP | LVDS_CLKB_POWER_UP;
+ else
+ temp &= ~(LVDS_B0B3_POWER_UP | LVDS_CLKB_POWER_UP);
+
+ /* It would be nice to set 24 vs 18-bit mode (LVDS_A3_POWER_UP)
+ * appropriately here, but we need to look more thoroughly into how
+ * panels behave in the two modes.
+ */
+
+ /* Set the dithering flag on LVDS as needed, note that there is no
+ * special lvds dither control bit on pch-split platforms, dithering is
+ * only controlled through the PIPECONF reg. */
+ if (INTEL_INFO(dev)->gen == 4) {
+ if (dev_priv->lvds_dither)
+ temp |= LVDS_ENABLE_DITHER;
+ else
+ temp &= ~LVDS_ENABLE_DITHER;
+ }
+ temp &= ~(LVDS_HSYNC_POLARITY | LVDS_VSYNC_POLARITY);
+ if (fixed_mode->flags & DRM_MODE_FLAG_NHSYNC)
+ temp |= LVDS_HSYNC_POLARITY;
+ if (fixed_mode->flags & DRM_MODE_FLAG_NVSYNC)
+ temp |= LVDS_VSYNC_POLARITY;
+
+ I915_WRITE(lvds_encoder->reg, temp);
+}
+
+static void intel_pre_enable_lvds(struct intel_encoder *encoder)
+{
+ struct drm_device *dev = encoder->base.dev;
+ struct intel_lvds_encoder *enc = to_lvds_encoder(&encoder->base);
+ struct drm_i915_private *dev_priv = dev->dev_private;
+
+ if (HAS_PCH_SPLIT(dev) || !enc->pfit_control)
+ return;
+
+ /*
+ * Enable automatic panel scaling so that non-native modes
+ * fill the screen. The panel fitter should only be
+ * adjusted whilst the pipe is disabled, according to
+ * register description and PRM.
+ */
+ DRM_DEBUG_KMS("applying panel-fitter: %x, %x\n",
+ enc->pfit_control,
+ enc->pfit_pgm_ratios);
+
+ I915_WRITE(PFIT_PGM_RATIOS, enc->pfit_pgm_ratios);
+ I915_WRITE(PFIT_CONTROL, enc->pfit_control);
+}
+
/**
* Sets the power state for the panel.
*/
@@ -101,38 +182,20 @@ static void intel_enable_lvds(struct intel_encoder *encoder)
struct intel_lvds_encoder *lvds_encoder = to_lvds_encoder(&encoder->base);
struct intel_crtc *intel_crtc = to_intel_crtc(encoder->base.crtc);
struct drm_i915_private *dev_priv = dev->dev_private;
- u32 ctl_reg, lvds_reg, stat_reg;
+ u32 ctl_reg, stat_reg;
if (HAS_PCH_SPLIT(dev)) {
ctl_reg = PCH_PP_CONTROL;
- lvds_reg = PCH_LVDS;
stat_reg = PCH_PP_STATUS;
} else {
ctl_reg = PP_CONTROL;
- lvds_reg = LVDS;
stat_reg = PP_STATUS;
}
- I915_WRITE(lvds_reg, I915_READ(lvds_reg) | LVDS_PORT_EN);
-
- if (lvds_encoder->pfit_dirty) {
- /*
- * Enable automatic panel scaling so that non-native modes
- * fill the screen. The panel fitter should only be
- * adjusted whilst the pipe is disabled, according to
- * register description and PRM.
- */
- DRM_DEBUG_KMS("applying panel-fitter: %x, %x\n",
- lvds_encoder->pfit_control,
- lvds_encoder->pfit_pgm_ratios);
-
- I915_WRITE(PFIT_PGM_RATIOS, lvds_encoder->pfit_pgm_ratios);
- I915_WRITE(PFIT_CONTROL, lvds_encoder->pfit_control);
- lvds_encoder->pfit_dirty = false;
- }
+ I915_WRITE(lvds_encoder->reg, I915_READ(lvds_encoder->reg) | LVDS_PORT_EN);
I915_WRITE(ctl_reg, I915_READ(ctl_reg) | POWER_TARGET_ON);
- POSTING_READ(lvds_reg);
+ POSTING_READ(lvds_encoder->reg);
if (wait_for((I915_READ(stat_reg) & PP_ON) != 0, 1000))
DRM_ERROR("timed out waiting for panel to power on\n");
@@ -144,15 +207,13 @@ static void intel_disable_lvds(struct intel_encoder *encoder)
struct drm_device *dev = encoder->base.dev;
struct intel_lvds_encoder *lvds_encoder = to_lvds_encoder(&encoder->base);
struct drm_i915_private *dev_priv = dev->dev_private;
- u32 ctl_reg, lvds_reg, stat_reg;
+ u32 ctl_reg, stat_reg;
if (HAS_PCH_SPLIT(dev)) {
ctl_reg = PCH_PP_CONTROL;
- lvds_reg = PCH_LVDS;
stat_reg = PCH_PP_STATUS;
} else {
ctl_reg = PP_CONTROL;
- lvds_reg = LVDS;
stat_reg = PP_STATUS;
}
@@ -162,13 +223,8 @@ static void intel_disable_lvds(struct intel_encoder *encoder)
if (wait_for((I915_READ(stat_reg) & PP_ON) == 0, 1000))
DRM_ERROR("timed out waiting for panel to power off\n");
- if (lvds_encoder->pfit_control) {
- I915_WRITE(PFIT_CONTROL, 0);
- lvds_encoder->pfit_dirty = true;
- }
-
- I915_WRITE(lvds_reg, I915_READ(lvds_reg) & ~LVDS_PORT_EN);
- POSTING_READ(lvds_reg);
+ I915_WRITE(lvds_encoder->reg, I915_READ(lvds_encoder->reg) & ~LVDS_PORT_EN);
+ POSTING_READ(lvds_encoder->reg);
}
static int intel_lvds_mode_valid(struct drm_connector *connector,
@@ -406,7 +462,6 @@ out:
pfit_pgm_ratios != lvds_encoder->pfit_pgm_ratios) {
lvds_encoder->pfit_control = pfit_control;
lvds_encoder->pfit_pgm_ratios = pfit_pgm_ratios;
- lvds_encoder->pfit_dirty = true;
}
dev_priv->lvds_border_bits = border;
@@ -492,13 +547,14 @@ static const struct dmi_system_id intel_no_modeset_on_lid[] = {
};
/*
- * Lid events. Note the use of 'modeset_on_lid':
- * - we set it on lid close, and reset it on open
+ * Lid events. Note the use of 'modeset':
+ * - we set it to MODESET_ON_LID_OPEN on lid close,
+ * and set it to MODESET_DONE on open
* - we use it as a "only once" bit (ie we ignore
- * duplicate events where it was already properly
- * set/reset)
- * - the suspend/resume paths will also set it to
- * zero, since they restore the mode ("lid open").
+ * duplicate events where it was already properly set)
+ * - the suspend/resume paths will set it to
+ * MODESET_SUSPENDED and ignore the lid open event,
+ * because they restore the mode ("lid open").
*/
static int intel_lid_notify(struct notifier_block *nb, unsigned long val,
void *unused)
@@ -512,6 +568,9 @@ static int intel_lid_notify(struct notifier_block *nb, unsigned long val,
if (dev->switch_power_state != DRM_SWITCH_POWER_ON)
return NOTIFY_OK;
+ mutex_lock(&dev_priv->modeset_restore_lock);
+ if (dev_priv->modeset_restore == MODESET_SUSPENDED)
+ goto exit;
/*
* check and update the status of LVDS connector after receiving
* the LID nofication event.
@@ -520,21 +579,24 @@ static int intel_lid_notify(struct notifier_block *nb, unsigned long val,
/* Don't force modeset on machines where it causes a GPU lockup */
if (dmi_check_system(intel_no_modeset_on_lid))
- return NOTIFY_OK;
+ goto exit;
if (!acpi_lid_open()) {
- dev_priv->modeset_on_lid = 1;
- return NOTIFY_OK;
+ /* do modeset on next lid open event */
+ dev_priv->modeset_restore = MODESET_ON_LID_OPEN;
+ goto exit;
}
- if (!dev_priv->modeset_on_lid)
- return NOTIFY_OK;
-
- dev_priv->modeset_on_lid = 0;
+ if (dev_priv->modeset_restore == MODESET_DONE)
+ goto exit;
- mutex_lock(&dev->mode_config.mutex);
+ drm_modeset_lock_all(dev);
intel_modeset_setup_hw_state(dev, true);
- mutex_unlock(&dev->mode_config.mutex);
+ drm_modeset_unlock_all(dev);
+
+ dev_priv->modeset_restore = MODESET_DONE;
+exit:
+ mutex_unlock(&dev_priv->modeset_restore_lock);
return NOTIFY_OK;
}
@@ -591,8 +653,7 @@ static int intel_lvds_set_property(struct drm_connector *connector,
* If the CRTC is enabled, the display will be changed
* according to the new panel fitting mode.
*/
- intel_set_mode(crtc, &crtc->mode,
- crtc->x, crtc->y, crtc->fb);
+ intel_crtc_restore_mode(crtc);
}
}
@@ -602,7 +663,6 @@ static int intel_lvds_set_property(struct drm_connector *connector,
static const struct drm_encoder_helper_funcs intel_lvds_helper_funcs = {
.mode_fixup = intel_lvds_mode_fixup,
.mode_set = intel_lvds_mode_set,
- .disable = intel_encoder_noop,
};
static const struct drm_connector_helper_funcs intel_lvds_connector_helper_funcs = {
@@ -895,6 +955,66 @@ static bool lvds_is_present_in_vbt(struct drm_device *dev,
return false;
}
+static int intel_dual_link_lvds_callback(const struct dmi_system_id *id)
+{
+ DRM_INFO("Forcing lvds to dual link mode on %s\n", id->ident);
+ return 1;
+}
+
+static const struct dmi_system_id intel_dual_link_lvds[] = {
+ {
+ .callback = intel_dual_link_lvds_callback,
+ .ident = "Apple MacBook Pro (Core i5/i7 Series)",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Apple Inc."),
+ DMI_MATCH(DMI_PRODUCT_NAME, "MacBookPro8,2"),
+ },
+ },
+ { } /* terminating entry */
+};
+
+bool intel_is_dual_link_lvds(struct drm_device *dev)
+{
+ struct intel_encoder *encoder;
+ struct intel_lvds_encoder *lvds_encoder;
+
+ list_for_each_entry(encoder, &dev->mode_config.encoder_list,
+ base.head) {
+ if (encoder->type == INTEL_OUTPUT_LVDS) {
+ lvds_encoder = to_lvds_encoder(&encoder->base);
+
+ return lvds_encoder->is_dual_link;
+ }
+ }
+
+ return false;
+}
+
+static bool compute_is_dual_link_lvds(struct intel_lvds_encoder *lvds_encoder)
+{
+ struct drm_device *dev = lvds_encoder->base.base.dev;
+ unsigned int val;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+
+ /* use the module option value if specified */
+ if (i915_lvds_channel_mode > 0)
+ return i915_lvds_channel_mode == 2;
+
+ if (dmi_check_system(intel_dual_link_lvds))
+ return true;
+
+ /* BIOS should set the proper LVDS register value at boot, but
+ * in reality, it doesn't set the value when the lid is closed;
+ * we need to check "the value to be set" in VBT when LVDS
+ * register is uninitialized.
+ */
+ val = I915_READ(lvds_encoder->reg);
+ if (!(val & ~(LVDS_PIPE_MASK | LVDS_DETECTED)))
+ val = dev_priv->bios_lvds_val;
+
+ return (val & LVDS_CLKB_POWER_MASK) == LVDS_CLKB_POWER_UP;
+}
+
static bool intel_lvds_supported(struct drm_device *dev)
{
/* With the introduction of the PCH we gained a dedicated
@@ -980,6 +1100,8 @@ bool intel_lvds_init(struct drm_device *dev)
DRM_MODE_ENCODER_LVDS);
intel_encoder->enable = intel_enable_lvds;
+ intel_encoder->pre_enable = intel_pre_enable_lvds;
+ intel_encoder->pre_pll_enable = intel_pre_pll_enable_lvds;
intel_encoder->disable = intel_disable_lvds;
intel_encoder->get_hw_state = intel_lvds_get_hw_state;
intel_connector->get_hw_state = intel_connector_get_hw_state;
@@ -1001,6 +1123,12 @@ bool intel_lvds_init(struct drm_device *dev)
connector->interlace_allowed = false;
connector->doublescan_allowed = false;
+ if (HAS_PCH_SPLIT(dev)) {
+ lvds_encoder->reg = PCH_LVDS;
+ } else {
+ lvds_encoder->reg = LVDS;
+ }
+
/* create the scaling mode property */
drm_mode_create_scaling_mode_property(dev);
drm_object_attach_property(&connector->base,
@@ -1101,6 +1229,10 @@ bool intel_lvds_init(struct drm_device *dev)
goto failed;
out:
+ lvds_encoder->is_dual_link = compute_is_dual_link_lvds(lvds_encoder);
+ DRM_DEBUG_KMS("detected %s-link lvds configuration\n",
+ lvds_encoder->is_dual_link ? "dual" : "single");
+
/*
* Unlock registers and just
* leave them unlocked
diff --git a/drivers/gpu/drm/i915/intel_modes.c b/drivers/gpu/drm/i915/intel_modes.c
index b00f1c83adce..0e860f39933d 100644
--- a/drivers/gpu/drm/i915/intel_modes.c
+++ b/drivers/gpu/drm/i915/intel_modes.c
@@ -28,7 +28,6 @@
#include <linux/fb.h>
#include <drm/drm_edid.h>
#include <drm/drmP.h>
-#include <drm/drm_edid.h>
#include "intel_drv.h"
#include "i915_drv.h"
@@ -101,8 +100,9 @@ intel_attach_force_audio_property(struct drm_connector *connector)
}
static const struct drm_prop_enum_list broadcast_rgb_names[] = {
- { 0, "Full" },
- { 1, "Limited 16:235" },
+ { INTEL_BROADCAST_RGB_AUTO, "Automatic" },
+ { INTEL_BROADCAST_RGB_FULL, "Full" },
+ { INTEL_BROADCAST_RGB_LIMITED, "Limited 16:235" },
};
void
diff --git a/drivers/gpu/drm/i915/intel_opregion.c b/drivers/gpu/drm/i915/intel_opregion.c
index 7741c22c934c..4d338740f2cb 100644
--- a/drivers/gpu/drm/i915/intel_opregion.c
+++ b/drivers/gpu/drm/i915/intel_opregion.c
@@ -347,7 +347,7 @@ static void intel_didl_outputs(struct drm_device *dev)
int i = 0;
handle = DEVICE_ACPI_HANDLE(&dev->pdev->dev);
- if (!handle || ACPI_FAILURE(acpi_bus_get_device(handle, &acpi_dev)))
+ if (!handle || acpi_bus_get_device(handle, &acpi_dev))
return;
if (acpi_is_video_device(acpi_dev))
diff --git a/drivers/gpu/drm/i915/intel_overlay.c b/drivers/gpu/drm/i915/intel_overlay.c
index d7bc817f51a0..67a2501d519d 100644
--- a/drivers/gpu/drm/i915/intel_overlay.c
+++ b/drivers/gpu/drm/i915/intel_overlay.c
@@ -195,7 +195,7 @@ intel_overlay_map_regs(struct intel_overlay *overlay)
if (OVERLAY_NEEDS_PHYSICAL(overlay->dev))
regs = (struct overlay_registers __iomem *)overlay->reg_bo->phys_obj->handle->vaddr;
else
- regs = io_mapping_map_wc(dev_priv->mm.gtt_mapping,
+ regs = io_mapping_map_wc(dev_priv->gtt.mappable,
overlay->reg_bo->gtt_offset);
return regs;
@@ -1045,13 +1045,13 @@ int intel_overlay_put_image(struct drm_device *dev, void *data,
}
if (!(put_image_rec->flags & I915_OVERLAY_ENABLE)) {
- mutex_lock(&dev->mode_config.mutex);
+ drm_modeset_lock_all(dev);
mutex_lock(&dev->struct_mutex);
ret = intel_overlay_switch_off(overlay);
mutex_unlock(&dev->struct_mutex);
- mutex_unlock(&dev->mode_config.mutex);
+ drm_modeset_unlock_all(dev);
return ret;
}
@@ -1075,7 +1075,7 @@ int intel_overlay_put_image(struct drm_device *dev, void *data,
goto out_free;
}
- mutex_lock(&dev->mode_config.mutex);
+ drm_modeset_lock_all(dev);
mutex_lock(&dev->struct_mutex);
if (new_bo->tiling_mode) {
@@ -1157,7 +1157,7 @@ int intel_overlay_put_image(struct drm_device *dev, void *data,
goto out_unlock;
mutex_unlock(&dev->struct_mutex);
- mutex_unlock(&dev->mode_config.mutex);
+ drm_modeset_unlock_all(dev);
kfree(params);
@@ -1165,7 +1165,7 @@ int intel_overlay_put_image(struct drm_device *dev, void *data,
out_unlock:
mutex_unlock(&dev->struct_mutex);
- mutex_unlock(&dev->mode_config.mutex);
+ drm_modeset_unlock_all(dev);
drm_gem_object_unreference_unlocked(&new_bo->base);
out_free:
kfree(params);
@@ -1241,7 +1241,7 @@ int intel_overlay_attrs(struct drm_device *dev, void *data,
return -ENODEV;
}
- mutex_lock(&dev->mode_config.mutex);
+ drm_modeset_lock_all(dev);
mutex_lock(&dev->struct_mutex);
ret = -EINVAL;
@@ -1307,7 +1307,7 @@ int intel_overlay_attrs(struct drm_device *dev, void *data,
ret = 0;
out_unlock:
mutex_unlock(&dev->struct_mutex);
- mutex_unlock(&dev->mode_config.mutex);
+ drm_modeset_unlock_all(dev);
return ret;
}
@@ -1333,8 +1333,10 @@ void intel_setup_overlay(struct drm_device *dev)
overlay->dev = dev;
- reg_bo = i915_gem_alloc_object(dev, PAGE_SIZE);
- if (!reg_bo)
+ reg_bo = i915_gem_object_create_stolen(dev, PAGE_SIZE);
+ if (reg_bo == NULL)
+ reg_bo = i915_gem_alloc_object(dev, PAGE_SIZE);
+ if (reg_bo == NULL)
goto out_free;
overlay->reg_bo = reg_bo;
@@ -1432,7 +1434,7 @@ intel_overlay_map_regs_atomic(struct intel_overlay *overlay)
regs = (struct overlay_registers __iomem *)
overlay->reg_bo->phys_obj->handle->vaddr;
else
- regs = io_mapping_map_atomic_wc(dev_priv->mm.gtt_mapping,
+ regs = io_mapping_map_atomic_wc(dev_priv->gtt.mappable,
overlay->reg_bo->gtt_offset);
return regs;
diff --git a/drivers/gpu/drm/i915/intel_panel.c b/drivers/gpu/drm/i915/intel_panel.c
index bee8cb6108a7..a3730e0289e5 100644
--- a/drivers/gpu/drm/i915/intel_panel.c
+++ b/drivers/gpu/drm/i915/intel_panel.c
@@ -321,6 +321,9 @@ void intel_panel_enable_backlight(struct drm_device *dev,
if (dev_priv->backlight_level == 0)
dev_priv->backlight_level = intel_panel_get_max_backlight(dev);
+ dev_priv->backlight_enabled = true;
+ intel_panel_actually_set_backlight(dev, dev_priv->backlight_level);
+
if (INTEL_INFO(dev)->gen >= 4) {
uint32_t reg, tmp;
@@ -356,12 +359,12 @@ void intel_panel_enable_backlight(struct drm_device *dev,
}
set_level:
- /* Call below after setting BLC_PWM_CPU_CTL2 and BLC_PWM_PCH_CTL1.
- * BLC_PWM_CPU_CTL may be cleared to zero automatically when these
- * registers are set.
+ /* Check the current backlight level and try to set again if it's zero.
+ * On some machines, BLC_PWM_CPU_CTL is cleared to zero automatically
+ * when BLC_PWM_CPU_CTL2 and BLC_PWM_PCH_CTL1 are written.
*/
- dev_priv->backlight_enabled = true;
- intel_panel_actually_set_backlight(dev, dev_priv->backlight_level);
+ if (!intel_panel_get_backlight(dev))
+ intel_panel_actually_set_backlight(dev, dev_priv->backlight_level);
}
static void intel_panel_init_backlight(struct drm_device *dev)
diff --git a/drivers/gpu/drm/i915/intel_pm.c b/drivers/gpu/drm/i915/intel_pm.c
index 3280cffe50f4..61fee7fcdc2c 100644
--- a/drivers/gpu/drm/i915/intel_pm.c
+++ b/drivers/gpu/drm/i915/intel_pm.c
@@ -447,12 +447,6 @@ void intel_update_fbc(struct drm_device *dev)
dev_priv->no_fbc_reason = FBC_MODULE_PARAM;
goto out_disable;
}
- if (intel_fb->obj->base.size > dev_priv->cfb_size) {
- DRM_DEBUG_KMS("framebuffer too large, disabling "
- "compression\n");
- dev_priv->no_fbc_reason = FBC_STOLEN_TOO_SMALL;
- goto out_disable;
- }
if ((crtc->mode.flags & DRM_MODE_FLAG_INTERLACE) ||
(crtc->mode.flags & DRM_MODE_FLAG_DBLSCAN)) {
DRM_DEBUG_KMS("mode incompatible with compression, "
@@ -486,6 +480,14 @@ void intel_update_fbc(struct drm_device *dev)
if (in_dbg_master())
goto out_disable;
+ if (i915_gem_stolen_setup_compression(dev, intel_fb->obj->base.size)) {
+ DRM_INFO("not enough stolen space for compressed buffer (need %zd bytes), disabling\n", intel_fb->obj->base.size);
+ DRM_INFO("hint: you may be able to increase stolen memory size in the BIOS to avoid this\n");
+ DRM_DEBUG_KMS("framebuffer too large, disabling compression\n");
+ dev_priv->no_fbc_reason = FBC_STOLEN_TOO_SMALL;
+ goto out_disable;
+ }
+
/* If the scanout has not changed, don't modify the FBC settings.
* Note that we make the fundamental assumption that the fb->obj
* cannot be unpinned (and have its GTT offset and fence revoked)
@@ -533,6 +535,7 @@ out_disable:
DRM_DEBUG_KMS("unsupported config, disabling FBC\n");
intel_disable_fbc(dev);
}
+ i915_gem_stolen_cleanup_compression(dev);
}
static void i915_pineview_get_mem_freq(struct drm_device *dev)
@@ -2286,7 +2289,6 @@ err_unpin:
i915_gem_object_unpin(ctx);
err_unref:
drm_gem_object_unreference(&ctx->base);
- mutex_unlock(&dev->struct_mutex);
return NULL;
}
@@ -3581,6 +3583,19 @@ static void cpt_init_clock_gating(struct drm_device *dev)
}
}
+static void gen6_check_mch_setup(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ uint32_t tmp;
+
+ tmp = I915_READ(MCH_SSKPD);
+ if ((tmp & MCH_SSKPD_WM0_MASK) != MCH_SSKPD_WM0_VAL) {
+ DRM_INFO("Wrong MCH_SSKPD value: 0x%08x\n", tmp);
+ DRM_INFO("This can cause pipe underruns and display issues.\n");
+ DRM_INFO("Please upgrade your BIOS to fix this.\n");
+ }
+}
+
static void gen6_init_clock_gating(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
@@ -3673,6 +3688,8 @@ static void gen6_init_clock_gating(struct drm_device *dev)
I915_WRITE(GEN6_GT_MODE, _MASKED_BIT_ENABLE(GEN6_GT_MODE_HI));
cpt_init_clock_gating(dev);
+
+ gen6_check_mch_setup(dev);
}
static void gen7_setup_fixed_func_scheduler(struct drm_i915_private *dev_priv)
@@ -3684,6 +3701,10 @@ static void gen7_setup_fixed_func_scheduler(struct drm_i915_private *dev_priv)
reg |= GEN7_FF_VS_SCHED_HW;
reg |= GEN7_FF_DS_SCHED_HW;
+ /* WaVSRefCountFullforceMissDisable */
+ if (IS_HASWELL(dev_priv->dev))
+ reg &= ~GEN7_FF_VS_REF_CNT_FFME;
+
I915_WRITE(GEN7_FF_THREAD_MODE, reg);
}
@@ -3854,6 +3875,8 @@ static void ivybridge_init_clock_gating(struct drm_device *dev)
I915_WRITE(GEN6_MBCUNIT_SNPCR, snpcr);
cpt_init_clock_gating(dev);
+
+ gen6_check_mch_setup(dev);
}
static void valleyview_init_clock_gating(struct drm_device *dev)
@@ -4047,35 +4070,57 @@ void intel_init_clock_gating(struct drm_device *dev)
dev_priv->display.init_clock_gating(dev);
}
-/* Starting with Haswell, we have different power wells for
- * different parts of the GPU. This attempts to enable them all.
- */
-void intel_init_power_wells(struct drm_device *dev)
+void intel_set_power_well(struct drm_device *dev, bool enable)
{
struct drm_i915_private *dev_priv = dev->dev_private;
- unsigned long power_wells[] = {
- HSW_PWR_WELL_CTL1,
- HSW_PWR_WELL_CTL2,
- HSW_PWR_WELL_CTL4
- };
- int i;
+ bool is_enabled, enable_requested;
+ uint32_t tmp;
if (!IS_HASWELL(dev))
return;
- mutex_lock(&dev->struct_mutex);
+ tmp = I915_READ(HSW_PWR_WELL_DRIVER);
+ is_enabled = tmp & HSW_PWR_WELL_STATE;
+ enable_requested = tmp & HSW_PWR_WELL_ENABLE;
- for (i = 0; i < ARRAY_SIZE(power_wells); i++) {
- int well = I915_READ(power_wells[i]);
+ if (enable) {
+ if (!enable_requested)
+ I915_WRITE(HSW_PWR_WELL_DRIVER, HSW_PWR_WELL_ENABLE);
- if ((well & HSW_PWR_WELL_STATE) == 0) {
- I915_WRITE(power_wells[i], well & HSW_PWR_WELL_ENABLE);
- if (wait_for((I915_READ(power_wells[i]) & HSW_PWR_WELL_STATE), 20))
- DRM_ERROR("Error enabling power well %lx\n", power_wells[i]);
+ if (!is_enabled) {
+ DRM_DEBUG_KMS("Enabling power well\n");
+ if (wait_for((I915_READ(HSW_PWR_WELL_DRIVER) &
+ HSW_PWR_WELL_STATE), 20))
+ DRM_ERROR("Timeout enabling power well\n");
+ }
+ } else {
+ if (enable_requested) {
+ I915_WRITE(HSW_PWR_WELL_DRIVER, 0);
+ DRM_DEBUG_KMS("Requesting to disable the power well\n");
}
}
+}
- mutex_unlock(&dev->struct_mutex);
+/*
+ * Starting with Haswell, we have a "Power Down Well" that can be turned off
+ * when not needed anymore. We have 4 registers that can request the power well
+ * to be enabled, and it will only be disabled if none of the registers is
+ * requesting it to be enabled.
+ */
+void intel_init_power_well(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+
+ if (!IS_HASWELL(dev))
+ return;
+
+ /* For now, we need the power well to be always enabled. */
+ intel_set_power_well(dev, true);
+
+ /* We're taking over the BIOS, so clear any requests made by it since
+ * the driver is in charge now. */
+ if (I915_READ(HSW_PWR_WELL_BIOS) & HSW_PWR_WELL_ENABLE)
+ I915_WRITE(HSW_PWR_WELL_BIOS, 0);
}
/* Set up chip specific power management-related functions */
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c b/drivers/gpu/drm/i915/intel_ringbuffer.c
index 42ff97d667d2..1d5d613eb6be 100644
--- a/drivers/gpu/drm/i915/intel_ringbuffer.c
+++ b/drivers/gpu/drm/i915/intel_ringbuffer.c
@@ -318,6 +318,7 @@ gen7_render_ring_flush(struct intel_ring_buffer *ring,
* TLB invalidate requires a post-sync write.
*/
flags |= PIPE_CONTROL_QW_WRITE;
+ flags |= PIPE_CONTROL_GLOBAL_GTT_IVB;
/* Workaround: we must issue a pipe_control with CS-stall bit
* set before a pipe_control command that has the state cache
@@ -331,7 +332,7 @@ gen7_render_ring_flush(struct intel_ring_buffer *ring,
intel_ring_emit(ring, GFX_OP_PIPE_CONTROL(4));
intel_ring_emit(ring, flags);
- intel_ring_emit(ring, scratch_addr | PIPE_CONTROL_GLOBAL_GTT);
+ intel_ring_emit(ring, scratch_addr);
intel_ring_emit(ring, 0);
intel_ring_advance(ring);
@@ -467,6 +468,9 @@ init_pipe_control(struct intel_ring_buffer *ring)
if (pc->cpu_page == NULL)
goto err_unpin;
+ DRM_DEBUG_DRIVER("%s pipe control offset: 0x%08x\n",
+ ring->name, pc->gtt_offset);
+
pc->obj = obj;
ring->private = pc;
return 0;
@@ -613,6 +617,13 @@ gen6_add_request(struct intel_ring_buffer *ring)
return 0;
}
+static inline bool i915_gem_has_seqno_wrapped(struct drm_device *dev,
+ u32 seqno)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ return dev_priv->last_seqno < seqno;
+}
+
/**
* intel_ring_sync - sync the waiter to the signaller on seqno
*
@@ -643,11 +654,20 @@ gen6_ring_sync(struct intel_ring_buffer *waiter,
if (ret)
return ret;
- intel_ring_emit(waiter,
- dw1 | signaller->semaphore_register[waiter->id]);
- intel_ring_emit(waiter, seqno);
- intel_ring_emit(waiter, 0);
- intel_ring_emit(waiter, MI_NOOP);
+ /* If seqno wrap happened, omit the wait with no-ops */
+ if (likely(!i915_gem_has_seqno_wrapped(waiter->dev, seqno))) {
+ intel_ring_emit(waiter,
+ dw1 |
+ signaller->semaphore_register[waiter->id]);
+ intel_ring_emit(waiter, seqno);
+ intel_ring_emit(waiter, 0);
+ intel_ring_emit(waiter, MI_NOOP);
+ } else {
+ intel_ring_emit(waiter, MI_NOOP);
+ intel_ring_emit(waiter, MI_NOOP);
+ intel_ring_emit(waiter, MI_NOOP);
+ intel_ring_emit(waiter, MI_NOOP);
+ }
intel_ring_advance(waiter);
return 0;
@@ -728,6 +748,12 @@ ring_get_seqno(struct intel_ring_buffer *ring, bool lazy_coherency)
return intel_read_status_page(ring, I915_GEM_HWS_INDEX);
}
+static void
+ring_set_seqno(struct intel_ring_buffer *ring, u32 seqno)
+{
+ intel_write_status_page(ring, I915_GEM_HWS_INDEX, seqno);
+}
+
static u32
pc_render_get_seqno(struct intel_ring_buffer *ring, bool lazy_coherency)
{
@@ -735,6 +761,13 @@ pc_render_get_seqno(struct intel_ring_buffer *ring, bool lazy_coherency)
return pc->cpu_page[0];
}
+static void
+pc_render_set_seqno(struct intel_ring_buffer *ring, u32 seqno)
+{
+ struct pipe_control *pc = ring->private;
+ pc->cpu_page[0] = seqno;
+}
+
static bool
gen5_ring_get_irq(struct intel_ring_buffer *ring)
{
@@ -1164,7 +1197,11 @@ static int intel_init_ring_buffer(struct drm_device *dev,
return ret;
}
- obj = i915_gem_alloc_object(dev, ring->size);
+ obj = NULL;
+ if (!HAS_LLC(dev))
+ obj = i915_gem_object_create_stolen(dev, ring->size);
+ if (obj == NULL)
+ obj = i915_gem_alloc_object(dev, ring->size);
if (obj == NULL) {
DRM_ERROR("Failed to allocate ringbuffer\n");
ret = -ENOMEM;
@@ -1182,7 +1219,7 @@ static int intel_init_ring_buffer(struct drm_device *dev,
goto err_unpin;
ring->virtual_start =
- ioremap_wc(dev_priv->mm.gtt->gma_bus_addr + obj->gtt_offset,
+ ioremap_wc(dev_priv->gtt.mappable_base + obj->gtt_offset,
ring->size);
if (ring->virtual_start == NULL) {
DRM_ERROR("Failed to map ringbuffer.\n");
@@ -1348,7 +1385,8 @@ static int ring_wait_for_space(struct intel_ring_buffer *ring, int n)
msleep(1);
- ret = i915_gem_check_wedge(dev_priv, dev_priv->mm.interruptible);
+ ret = i915_gem_check_wedge(&dev_priv->gpu_error,
+ dev_priv->mm.interruptible);
if (ret)
return ret;
} while (!time_after(jiffies, end));
@@ -1410,14 +1448,35 @@ intel_ring_alloc_seqno(struct intel_ring_buffer *ring)
return i915_gem_get_seqno(ring->dev, &ring->outstanding_lazy_request);
}
+static int __intel_ring_begin(struct intel_ring_buffer *ring,
+ int bytes)
+{
+ int ret;
+
+ if (unlikely(ring->tail + bytes > ring->effective_size)) {
+ ret = intel_wrap_ring_buffer(ring);
+ if (unlikely(ret))
+ return ret;
+ }
+
+ if (unlikely(ring->space < bytes)) {
+ ret = ring_wait_for_space(ring, bytes);
+ if (unlikely(ret))
+ return ret;
+ }
+
+ ring->space -= bytes;
+ return 0;
+}
+
int intel_ring_begin(struct intel_ring_buffer *ring,
int num_dwords)
{
drm_i915_private_t *dev_priv = ring->dev->dev_private;
- int n = 4*num_dwords;
int ret;
- ret = i915_gem_check_wedge(dev_priv, dev_priv->mm.interruptible);
+ ret = i915_gem_check_wedge(&dev_priv->gpu_error,
+ dev_priv->mm.interruptible);
if (ret)
return ret;
@@ -1426,20 +1485,21 @@ int intel_ring_begin(struct intel_ring_buffer *ring,
if (ret)
return ret;
- if (unlikely(ring->tail + n > ring->effective_size)) {
- ret = intel_wrap_ring_buffer(ring);
- if (unlikely(ret))
- return ret;
- }
+ return __intel_ring_begin(ring, num_dwords * sizeof(uint32_t));
+}
- if (unlikely(ring->space < n)) {
- ret = ring_wait_for_space(ring, n);
- if (unlikely(ret))
- return ret;
+void intel_ring_init_seqno(struct intel_ring_buffer *ring, u32 seqno)
+{
+ struct drm_i915_private *dev_priv = ring->dev->dev_private;
+
+ BUG_ON(ring->outstanding_lazy_request);
+
+ if (INTEL_INFO(ring->dev)->gen >= 6) {
+ I915_WRITE(RING_SYNC_0(ring->mmio_base), 0);
+ I915_WRITE(RING_SYNC_1(ring->mmio_base), 0);
}
- ring->space -= n;
- return 0;
+ ring->set_seqno(ring, seqno);
}
void intel_ring_advance(struct intel_ring_buffer *ring)
@@ -1447,7 +1507,7 @@ void intel_ring_advance(struct intel_ring_buffer *ring)
struct drm_i915_private *dev_priv = ring->dev->dev_private;
ring->tail &= ring->size - 1;
- if (dev_priv->stop_rings & intel_ring_flag(ring))
+ if (dev_priv->gpu_error.stop_rings & intel_ring_flag(ring))
return;
ring->write_tail(ring, ring->tail);
}
@@ -1604,6 +1664,7 @@ int intel_init_render_ring_buffer(struct drm_device *dev)
ring->irq_put = gen6_ring_put_irq;
ring->irq_enable_mask = GT_USER_INTERRUPT;
ring->get_seqno = gen6_ring_get_seqno;
+ ring->set_seqno = ring_set_seqno;
ring->sync_to = gen6_ring_sync;
ring->semaphore_register[0] = MI_SEMAPHORE_SYNC_INVALID;
ring->semaphore_register[1] = MI_SEMAPHORE_SYNC_RV;
@@ -1614,6 +1675,7 @@ int intel_init_render_ring_buffer(struct drm_device *dev)
ring->add_request = pc_render_add_request;
ring->flush = gen4_render_ring_flush;
ring->get_seqno = pc_render_get_seqno;
+ ring->set_seqno = pc_render_set_seqno;
ring->irq_get = gen5_ring_get_irq;
ring->irq_put = gen5_ring_put_irq;
ring->irq_enable_mask = GT_USER_INTERRUPT | GT_PIPE_NOTIFY;
@@ -1624,6 +1686,7 @@ int intel_init_render_ring_buffer(struct drm_device *dev)
else
ring->flush = gen4_render_ring_flush;
ring->get_seqno = ring_get_seqno;
+ ring->set_seqno = ring_set_seqno;
if (IS_GEN2(dev)) {
ring->irq_get = i8xx_ring_get_irq;
ring->irq_put = i8xx_ring_put_irq;
@@ -1695,6 +1758,7 @@ int intel_render_ring_init_dri(struct drm_device *dev, u64 start, u32 size)
else
ring->flush = gen4_render_ring_flush;
ring->get_seqno = ring_get_seqno;
+ ring->set_seqno = ring_set_seqno;
if (IS_GEN2(dev)) {
ring->irq_get = i8xx_ring_get_irq;
ring->irq_put = i8xx_ring_put_irq;
@@ -1755,6 +1819,7 @@ int intel_init_bsd_ring_buffer(struct drm_device *dev)
ring->flush = gen6_ring_flush;
ring->add_request = gen6_add_request;
ring->get_seqno = gen6_ring_get_seqno;
+ ring->set_seqno = ring_set_seqno;
ring->irq_enable_mask = GEN6_BSD_USER_INTERRUPT;
ring->irq_get = gen6_ring_get_irq;
ring->irq_put = gen6_ring_put_irq;
@@ -1770,6 +1835,7 @@ int intel_init_bsd_ring_buffer(struct drm_device *dev)
ring->flush = bsd_ring_flush;
ring->add_request = i9xx_add_request;
ring->get_seqno = ring_get_seqno;
+ ring->set_seqno = ring_set_seqno;
if (IS_GEN5(dev)) {
ring->irq_enable_mask = GT_BSD_USER_INTERRUPT;
ring->irq_get = gen5_ring_get_irq;
@@ -1799,6 +1865,7 @@ int intel_init_blt_ring_buffer(struct drm_device *dev)
ring->flush = blt_ring_flush;
ring->add_request = gen6_add_request;
ring->get_seqno = gen6_ring_get_seqno;
+ ring->set_seqno = ring_set_seqno;
ring->irq_enable_mask = GEN6_BLITTER_USER_INTERRUPT;
ring->irq_get = gen6_ring_get_irq;
ring->irq_put = gen6_ring_put_irq;
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.h b/drivers/gpu/drm/i915/intel_ringbuffer.h
index 6af87cd05725..d66208c2c48b 100644
--- a/drivers/gpu/drm/i915/intel_ringbuffer.h
+++ b/drivers/gpu/drm/i915/intel_ringbuffer.h
@@ -90,6 +90,8 @@ struct intel_ring_buffer {
*/
u32 (*get_seqno)(struct intel_ring_buffer *ring,
bool lazy_coherency);
+ void (*set_seqno)(struct intel_ring_buffer *ring,
+ u32 seqno);
int (*dispatch_execbuffer)(struct intel_ring_buffer *ring,
u32 offset, u32 length,
unsigned flags);
@@ -178,6 +180,13 @@ intel_read_status_page(struct intel_ring_buffer *ring,
return ring->status_page.page_addr[reg];
}
+static inline void
+intel_write_status_page(struct intel_ring_buffer *ring,
+ int reg, u32 value)
+{
+ ring->status_page.page_addr[reg] = value;
+}
+
/**
* Reads a dword out of the status page, which is written to from the command
* queue by automatic updates, MI_REPORT_HEAD, MI_STORE_DATA_INDEX, or
@@ -208,7 +217,7 @@ static inline void intel_ring_emit(struct intel_ring_buffer *ring,
}
void intel_ring_advance(struct intel_ring_buffer *ring);
int __must_check intel_ring_idle(struct intel_ring_buffer *ring);
-
+void intel_ring_init_seqno(struct intel_ring_buffer *ring, u32 seqno);
int intel_ring_flush_all_caches(struct intel_ring_buffer *ring);
int intel_ring_invalidate_all_caches(struct intel_ring_buffer *ring);
diff --git a/drivers/gpu/drm/i915/intel_sdvo.c b/drivers/gpu/drm/i915/intel_sdvo.c
index c275bf0fa36d..d07a8cdf998e 100644
--- a/drivers/gpu/drm/i915/intel_sdvo.c
+++ b/drivers/gpu/drm/i915/intel_sdvo.c
@@ -103,6 +103,7 @@ struct intel_sdvo {
* It is only valid when using TMDS encoding and 8 bit per color mode.
*/
uint32_t color_range;
+ bool color_range_auto;
/**
* This is set if we're going to treat the device as TV-out.
@@ -125,6 +126,7 @@ struct intel_sdvo {
bool is_hdmi;
bool has_hdmi_monitor;
bool has_hdmi_audio;
+ bool rgb_quant_range_selectable;
/**
* This is set if we detect output of sdvo device as LVDS and
@@ -946,7 +948,8 @@ static bool intel_sdvo_write_infoframe(struct intel_sdvo *intel_sdvo,
&tx_rate, 1);
}
-static bool intel_sdvo_set_avi_infoframe(struct intel_sdvo *intel_sdvo)
+static bool intel_sdvo_set_avi_infoframe(struct intel_sdvo *intel_sdvo,
+ const struct drm_display_mode *adjusted_mode)
{
struct dip_infoframe avi_if = {
.type = DIP_TYPE_AVI,
@@ -955,6 +958,13 @@ static bool intel_sdvo_set_avi_infoframe(struct intel_sdvo *intel_sdvo)
};
uint8_t sdvo_data[4 + sizeof(avi_if.body.avi)];
+ if (intel_sdvo->rgb_quant_range_selectable) {
+ if (adjusted_mode->private_flags & INTEL_MODE_LIMITED_COLOR_RANGE)
+ avi_if.body.avi.ITC_EC_Q_SC |= DIP_AVI_RGB_QUANT_RANGE_LIMITED;
+ else
+ avi_if.body.avi.ITC_EC_Q_SC |= DIP_AVI_RGB_QUANT_RANGE_FULL;
+ }
+
intel_dip_infoframe_csum(&avi_if);
/* sdvo spec says that the ecc is handled by the hw, and it looks like
@@ -1064,6 +1074,18 @@ static bool intel_sdvo_mode_fixup(struct drm_encoder *encoder,
multiplier = intel_sdvo_get_pixel_multiplier(adjusted_mode);
intel_mode_set_pixel_multiplier(adjusted_mode, multiplier);
+ if (intel_sdvo->color_range_auto) {
+ /* See CEA-861-E - 5.1 Default Encoding Parameters */
+ if (intel_sdvo->has_hdmi_monitor &&
+ drm_match_cea_mode(adjusted_mode) > 1)
+ intel_sdvo->color_range = SDVO_COLOR_RANGE_16_235;
+ else
+ intel_sdvo->color_range = 0;
+ }
+
+ if (intel_sdvo->color_range)
+ adjusted_mode->private_flags |= INTEL_MODE_LIMITED_COLOR_RANGE;
+
return true;
}
@@ -1121,7 +1143,7 @@ static void intel_sdvo_mode_set(struct drm_encoder *encoder,
intel_sdvo_set_encode(intel_sdvo, SDVO_ENCODE_HDMI);
intel_sdvo_set_colorimetry(intel_sdvo,
SDVO_COLORIMETRY_RGB256);
- intel_sdvo_set_avi_infoframe(intel_sdvo);
+ intel_sdvo_set_avi_infoframe(intel_sdvo, adjusted_mode);
} else
intel_sdvo_set_encode(intel_sdvo, SDVO_ENCODE_DVI);
@@ -1153,7 +1175,7 @@ static void intel_sdvo_mode_set(struct drm_encoder *encoder,
/* The real mode polarity is set by the SDVO commands, using
* struct intel_sdvo_dtd. */
sdvox = SDVO_VSYNC_ACTIVE_HIGH | SDVO_HSYNC_ACTIVE_HIGH;
- if (intel_sdvo->is_hdmi)
+ if (!HAS_PCH_SPLIT(dev) && intel_sdvo->is_hdmi)
sdvox |= intel_sdvo->color_range;
if (INTEL_INFO(dev)->gen < 5)
sdvox |= SDVO_BORDER_ENABLE;
@@ -1513,6 +1535,8 @@ intel_sdvo_tmds_sink_detect(struct drm_connector *connector)
if (intel_sdvo->is_hdmi) {
intel_sdvo->has_hdmi_monitor = drm_detect_hdmi_monitor(edid);
intel_sdvo->has_hdmi_audio = drm_detect_monitor_audio(edid);
+ intel_sdvo->rgb_quant_range_selectable =
+ drm_rgb_quant_range_selectable(edid);
}
} else
status = connector_status_disconnected;
@@ -1564,6 +1588,7 @@ intel_sdvo_detect(struct drm_connector *connector, bool force)
intel_sdvo->has_hdmi_monitor = false;
intel_sdvo->has_hdmi_audio = false;
+ intel_sdvo->rgb_quant_range_selectable = false;
if ((intel_sdvo_connector->output_flag & response) == 0)
ret = connector_status_disconnected;
@@ -1897,10 +1922,21 @@ intel_sdvo_set_property(struct drm_connector *connector,
}
if (property == dev_priv->broadcast_rgb_property) {
- if (val == !!intel_sdvo->color_range)
- return 0;
-
- intel_sdvo->color_range = val ? SDVO_COLOR_RANGE_16_235 : 0;
+ switch (val) {
+ case INTEL_BROADCAST_RGB_AUTO:
+ intel_sdvo->color_range_auto = true;
+ break;
+ case INTEL_BROADCAST_RGB_FULL:
+ intel_sdvo->color_range_auto = false;
+ intel_sdvo->color_range = 0;
+ break;
+ case INTEL_BROADCAST_RGB_LIMITED:
+ intel_sdvo->color_range_auto = false;
+ intel_sdvo->color_range = SDVO_COLOR_RANGE_16_235;
+ break;
+ default:
+ return -EINVAL;
+ }
goto done;
}
@@ -1997,11 +2033,8 @@ set_value:
done:
- if (intel_sdvo->base.base.crtc) {
- struct drm_crtc *crtc = intel_sdvo->base.base.crtc;
- intel_set_mode(crtc, &crtc->mode,
- crtc->x, crtc->y, crtc->fb);
- }
+ if (intel_sdvo->base.base.crtc)
+ intel_crtc_restore_mode(intel_sdvo->base.base.crtc);
return 0;
#undef CHECK_PROPERTY
@@ -2010,7 +2043,6 @@ done:
static const struct drm_encoder_helper_funcs intel_sdvo_helper_funcs = {
.mode_fixup = intel_sdvo_mode_fixup,
.mode_set = intel_sdvo_mode_set,
- .disable = intel_encoder_noop,
};
static const struct drm_connector_funcs intel_sdvo_connector_funcs = {
@@ -2200,13 +2232,16 @@ intel_sdvo_connector_init(struct intel_sdvo_connector *connector,
}
static void
-intel_sdvo_add_hdmi_properties(struct intel_sdvo_connector *connector)
+intel_sdvo_add_hdmi_properties(struct intel_sdvo *intel_sdvo,
+ struct intel_sdvo_connector *connector)
{
struct drm_device *dev = connector->base.base.dev;
intel_attach_force_audio_property(&connector->base.base);
- if (INTEL_INFO(dev)->gen >= 4 && IS_MOBILE(dev))
+ if (INTEL_INFO(dev)->gen >= 4 && IS_MOBILE(dev)) {
intel_attach_broadcast_rgb_property(&connector->base.base);
+ intel_sdvo->color_range_auto = true;
+ }
}
static bool
@@ -2254,7 +2289,7 @@ intel_sdvo_dvi_init(struct intel_sdvo *intel_sdvo, int device)
intel_sdvo_connector_init(intel_sdvo_connector, intel_sdvo);
if (intel_sdvo->is_hdmi)
- intel_sdvo_add_hdmi_properties(intel_sdvo_connector);
+ intel_sdvo_add_hdmi_properties(intel_sdvo, intel_sdvo_connector);
return true;
}
diff --git a/drivers/gpu/drm/i915/intel_sprite.c b/drivers/gpu/drm/i915/intel_sprite.c
index d7b060e0a231..1b6eb76beb7c 100644
--- a/drivers/gpu/drm/i915/intel_sprite.c
+++ b/drivers/gpu/drm/i915/intel_sprite.c
@@ -50,6 +50,7 @@ ivb_update_plane(struct drm_plane *plane, struct drm_framebuffer *fb,
u32 sprctl, sprscale = 0;
unsigned long sprsurf_offset, linear_offset;
int pixel_size = drm_format_plane_cpp(fb->pixel_format, 0);
+ bool scaling_was_enabled = dev_priv->sprite_scaling_enabled;
sprctl = I915_READ(SPRCTL(pipe));
@@ -89,6 +90,9 @@ ivb_update_plane(struct drm_plane *plane, struct drm_framebuffer *fb,
sprctl |= SPRITE_TRICKLE_FEED_DISABLE;
sprctl |= SPRITE_ENABLE;
+ if (IS_HASWELL(dev))
+ sprctl |= SPRITE_PIPE_CSC_ENABLE;
+
/* Sizes are 0 based */
src_w--;
src_h--;
@@ -103,27 +107,23 @@ ivb_update_plane(struct drm_plane *plane, struct drm_framebuffer *fb,
* when scaling is disabled.
*/
if (crtc_w != src_w || crtc_h != src_h) {
- if (!dev_priv->sprite_scaling_enabled) {
- dev_priv->sprite_scaling_enabled = true;
+ dev_priv->sprite_scaling_enabled |= 1 << pipe;
+
+ if (!scaling_was_enabled) {
intel_update_watermarks(dev);
intel_wait_for_vblank(dev, pipe);
}
sprscale = SPRITE_SCALE_ENABLE | (src_w << 16) | src_h;
- } else {
- if (dev_priv->sprite_scaling_enabled) {
- dev_priv->sprite_scaling_enabled = false;
- /* potentially re-enable LP watermarks */
- intel_update_watermarks(dev);
- }
- }
+ } else
+ dev_priv->sprite_scaling_enabled &= ~(1 << pipe);
I915_WRITE(SPRSTRIDE(pipe), fb->pitches[0]);
I915_WRITE(SPRPOS(pipe), (crtc_y << 16) | crtc_x);
linear_offset = y * fb->pitches[0] + x * pixel_size;
sprsurf_offset =
- intel_gen4_compute_offset_xtiled(&x, &y,
- pixel_size, fb->pitches[0]);
+ intel_gen4_compute_page_offset(&x, &y, obj->tiling_mode,
+ pixel_size, fb->pitches[0]);
linear_offset -= sprsurf_offset;
/* HSW consolidates SPRTILEOFF and SPRLINOFF into a single SPROFFSET
@@ -141,6 +141,10 @@ ivb_update_plane(struct drm_plane *plane, struct drm_framebuffer *fb,
I915_WRITE(SPRCTL(pipe), sprctl);
I915_MODIFY_DISPBASE(SPRSURF(pipe), obj->gtt_offset + sprsurf_offset);
POSTING_READ(SPRSURF(pipe));
+
+ /* potentially re-enable LP watermarks */
+ if (scaling_was_enabled && !dev_priv->sprite_scaling_enabled)
+ intel_update_watermarks(dev);
}
static void
@@ -150,6 +154,7 @@ ivb_disable_plane(struct drm_plane *plane)
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_plane *intel_plane = to_intel_plane(plane);
int pipe = intel_plane->pipe;
+ bool scaling_was_enabled = dev_priv->sprite_scaling_enabled;
I915_WRITE(SPRCTL(pipe), I915_READ(SPRCTL(pipe)) & ~SPRITE_ENABLE);
/* Can't leave the scaler enabled... */
@@ -159,8 +164,11 @@ ivb_disable_plane(struct drm_plane *plane)
I915_MODIFY_DISPBASE(SPRSURF(pipe), 0);
POSTING_READ(SPRSURF(pipe));
- dev_priv->sprite_scaling_enabled = false;
- intel_update_watermarks(dev);
+ dev_priv->sprite_scaling_enabled &= ~(1 << pipe);
+
+ /* potentially re-enable LP watermarks */
+ if (scaling_was_enabled && !dev_priv->sprite_scaling_enabled)
+ intel_update_watermarks(dev);
}
static int
@@ -287,8 +295,8 @@ ilk_update_plane(struct drm_plane *plane, struct drm_framebuffer *fb,
linear_offset = y * fb->pitches[0] + x * pixel_size;
dvssurf_offset =
- intel_gen4_compute_offset_xtiled(&x, &y,
- pixel_size, fb->pitches[0]);
+ intel_gen4_compute_page_offset(&x, &y, obj->tiling_mode,
+ pixel_size, fb->pitches[0]);
linear_offset -= dvssurf_offset;
if (obj->tiling_mode != I915_TILING_NONE)
@@ -593,7 +601,7 @@ int intel_sprite_set_colorkey(struct drm_device *dev, void *data,
if ((set->flags & (I915_SET_COLORKEY_DESTINATION | I915_SET_COLORKEY_SOURCE)) == (I915_SET_COLORKEY_DESTINATION | I915_SET_COLORKEY_SOURCE))
return -EINVAL;
- mutex_lock(&dev->mode_config.mutex);
+ drm_modeset_lock_all(dev);
obj = drm_mode_object_find(dev, set->plane_id, DRM_MODE_OBJECT_PLANE);
if (!obj) {
@@ -606,7 +614,7 @@ int intel_sprite_set_colorkey(struct drm_device *dev, void *data,
ret = intel_plane->update_colorkey(plane, set);
out_unlock:
- mutex_unlock(&dev->mode_config.mutex);
+ drm_modeset_unlock_all(dev);
return ret;
}
@@ -622,7 +630,7 @@ int intel_sprite_get_colorkey(struct drm_device *dev, void *data,
if (!drm_core_check_feature(dev, DRIVER_MODESET))
return -ENODEV;
- mutex_lock(&dev->mode_config.mutex);
+ drm_modeset_lock_all(dev);
obj = drm_mode_object_find(dev, get->plane_id, DRM_MODE_OBJECT_PLANE);
if (!obj) {
@@ -635,7 +643,7 @@ int intel_sprite_get_colorkey(struct drm_device *dev, void *data,
intel_plane->get_colorkey(plane, get);
out_unlock:
- mutex_unlock(&dev->mode_config.mutex);
+ drm_modeset_unlock_all(dev);
return ret;
}
diff --git a/drivers/gpu/drm/i915/intel_tv.c b/drivers/gpu/drm/i915/intel_tv.c
index ea93520c1278..d808421c1c80 100644
--- a/drivers/gpu/drm/i915/intel_tv.c
+++ b/drivers/gpu/drm/i915/intel_tv.c
@@ -1479,8 +1479,7 @@ intel_tv_set_property(struct drm_connector *connector, struct drm_property *prop
}
if (changed && crtc)
- intel_set_mode(crtc, &crtc->mode,
- crtc->x, crtc->y, crtc->fb);
+ intel_crtc_restore_mode(crtc);
out:
return ret;
}
@@ -1488,7 +1487,6 @@ out:
static const struct drm_encoder_helper_funcs intel_tv_helper_funcs = {
.mode_fixup = intel_tv_mode_fixup,
.mode_set = intel_tv_mode_set,
- .disable = intel_encoder_noop,
};
static const struct drm_connector_funcs intel_tv_connector_funcs = {
diff --git a/drivers/gpu/drm/mgag200/mgag200_fb.c b/drivers/gpu/drm/mgag200/mgag200_fb.c
index 2f486481d79a..d2253f639481 100644
--- a/drivers/gpu/drm/mgag200/mgag200_fb.c
+++ b/drivers/gpu/drm/mgag200/mgag200_fb.c
@@ -13,6 +13,7 @@
#include <linux/module.h>
#include <drm/drmP.h>
#include <drm/drm_fb_helper.h>
+#include <drm/drm_crtc_helper.h>
#include <linux/fb.h>
@@ -120,9 +121,10 @@ static int mgag200fb_create_object(struct mga_fbdev *afbdev,
return ret;
}
-static int mgag200fb_create(struct mga_fbdev *mfbdev,
+static int mgag200fb_create(struct drm_fb_helper *helper,
struct drm_fb_helper_surface_size *sizes)
{
+ struct mga_fbdev *mfbdev = (struct mga_fbdev *)helper;
struct drm_device *dev = mfbdev->helper.dev;
struct drm_mode_fb_cmd2 mode_cmd;
struct mga_device *mdev = dev->dev_private;
@@ -209,23 +211,6 @@ out:
return ret;
}
-static int mga_fb_find_or_create_single(struct drm_fb_helper *helper,
- struct drm_fb_helper_surface_size
- *sizes)
-{
- struct mga_fbdev *mfbdev = (struct mga_fbdev *)helper;
- int new_fb = 0;
- int ret;
-
- if (!helper->fb) {
- ret = mgag200fb_create(mfbdev, sizes);
- if (ret)
- return ret;
- new_fb = 1;
- }
- return new_fb;
-}
-
static int mga_fbdev_destroy(struct drm_device *dev,
struct mga_fbdev *mfbdev)
{
@@ -247,6 +232,7 @@ static int mga_fbdev_destroy(struct drm_device *dev,
}
drm_fb_helper_fini(&mfbdev->helper);
vfree(mfbdev->sysram);
+ drm_framebuffer_unregister_private(&mfb->base);
drm_framebuffer_cleanup(&mfb->base);
return 0;
@@ -255,7 +241,7 @@ static int mga_fbdev_destroy(struct drm_device *dev,
static struct drm_fb_helper_funcs mga_fb_helper_funcs = {
.gamma_set = mga_crtc_fb_gamma_set,
.gamma_get = mga_crtc_fb_gamma_get,
- .fb_probe = mga_fb_find_or_create_single,
+ .fb_probe = mgag200fb_create,
};
int mgag200_fbdev_init(struct mga_device *mdev)
@@ -277,6 +263,10 @@ int mgag200_fbdev_init(struct mga_device *mdev)
return ret;
}
drm_fb_helper_single_add_all_connectors(&mfbdev->helper);
+
+ /* disable all the possible outputs/crtcs before entering KMS mode */
+ drm_helper_disable_unused_functions(mdev->dev);
+
drm_fb_helper_initial_config(&mfbdev->helper, 32);
return 0;
diff --git a/drivers/gpu/drm/mgag200/mgag200_main.c b/drivers/gpu/drm/mgag200/mgag200_main.c
index 70dd3c5529d4..64297c72464f 100644
--- a/drivers/gpu/drm/mgag200/mgag200_main.c
+++ b/drivers/gpu/drm/mgag200/mgag200_main.c
@@ -23,16 +23,8 @@ static void mga_user_framebuffer_destroy(struct drm_framebuffer *fb)
kfree(fb);
}
-static int mga_user_framebuffer_create_handle(struct drm_framebuffer *fb,
- struct drm_file *file_priv,
- unsigned int *handle)
-{
- return 0;
-}
-
static const struct drm_framebuffer_funcs mga_fb_funcs = {
.destroy = mga_user_framebuffer_destroy,
- .create_handle = mga_user_framebuffer_create_handle,
};
int mgag200_framebuffer_init(struct drm_device *dev,
@@ -40,13 +32,15 @@ int mgag200_framebuffer_init(struct drm_device *dev,
struct drm_mode_fb_cmd2 *mode_cmd,
struct drm_gem_object *obj)
{
- int ret = drm_framebuffer_init(dev, &gfb->base, &mga_fb_funcs);
+ int ret;
+
+ drm_helper_mode_fill_fb_struct(&gfb->base, mode_cmd);
+ gfb->obj = obj;
+ ret = drm_framebuffer_init(dev, &gfb->base, &mga_fb_funcs);
if (ret) {
DRM_ERROR("drm_framebuffer_init failed: %d\n", ret);
return ret;
}
- drm_helper_mode_fill_fb_struct(&gfb->base, mode_cmd);
- gfb->obj = obj;
return 0;
}
diff --git a/drivers/gpu/drm/nouveau/Kconfig b/drivers/gpu/drm/nouveau/Kconfig
index 8a55beeb8bdc..a7ff6d5a34b9 100644
--- a/drivers/gpu/drm/nouveau/Kconfig
+++ b/drivers/gpu/drm/nouveau/Kconfig
@@ -11,8 +11,9 @@ config DRM_NOUVEAU
select FRAMEBUFFER_CONSOLE if !EXPERT
select FB_BACKLIGHT if DRM_NOUVEAU_BACKLIGHT
select ACPI_VIDEO if ACPI && X86 && BACKLIGHT_CLASS_DEVICE && VIDEO_OUTPUT_CONTROL && INPUT
- select ACPI_WMI if ACPI
- select MXM_WMI if ACPI
+ select X86_PLATFORM_DEVICES if ACPI && X86
+ select ACPI_WMI if ACPI && X86
+ select MXM_WMI if ACPI && X86
select POWER_SUPPLY
help
Choose this option for open-source nVidia support.
@@ -52,26 +53,3 @@ config DRM_NOUVEAU_BACKLIGHT
help
Say Y here if you want to control the backlight of your display
(e.g. a laptop panel).
-
-menu "I2C encoder or helper chips"
- depends on DRM && DRM_KMS_HELPER && I2C
-
-config DRM_I2C_CH7006
- tristate "Chrontel ch7006 TV encoder"
- default m if DRM_NOUVEAU
- help
- Support for Chrontel ch7006 and similar TV encoders, found
- on some nVidia video cards.
-
- This driver is currently only useful if you're also using
- the nouveau driver.
-
-config DRM_I2C_SIL164
- tristate "Silicon Image sil164 TMDS transmitter"
- default m if DRM_NOUVEAU
- help
- Support for sil164 and similar single-link (or dual-link
- when used in pairs) TMDS transmitters, used in some nVidia
- video cards.
-
-endmenu
diff --git a/drivers/gpu/drm/nouveau/Makefile b/drivers/gpu/drm/nouveau/Makefile
index ab25752a0b1e..90f9140eeefd 100644
--- a/drivers/gpu/drm/nouveau/Makefile
+++ b/drivers/gpu/drm/nouveau/Makefile
@@ -11,6 +11,7 @@ nouveau-y := core/core/client.o
nouveau-y += core/core/engctx.o
nouveau-y += core/core/engine.o
nouveau-y += core/core/enum.o
+nouveau-y += core/core/event.o
nouveau-y += core/core/falcon.o
nouveau-y += core/core/gpuobj.o
nouveau-y += core/core/handle.o
@@ -40,6 +41,11 @@ nouveau-y += core/subdev/bios/mxm.o
nouveau-y += core/subdev/bios/perf.o
nouveau-y += core/subdev/bios/pll.o
nouveau-y += core/subdev/bios/therm.o
+nouveau-y += core/subdev/bios/xpio.o
+nouveau-y += core/subdev/bus/nv04.o
+nouveau-y += core/subdev/bus/nv31.o
+nouveau-y += core/subdev/bus/nv50.o
+nouveau-y += core/subdev/bus/nvc0.o
nouveau-y += core/subdev/clock/nv04.o
nouveau-y += core/subdev/clock/nv40.o
nouveau-y += core/subdev/clock/nv50.o
@@ -85,9 +91,16 @@ nouveau-y += core/subdev/gpio/base.o
nouveau-y += core/subdev/gpio/nv10.o
nouveau-y += core/subdev/gpio/nv50.o
nouveau-y += core/subdev/gpio/nvd0.o
+nouveau-y += core/subdev/gpio/nve0.o
nouveau-y += core/subdev/i2c/base.o
+nouveau-y += core/subdev/i2c/anx9805.o
nouveau-y += core/subdev/i2c/aux.o
nouveau-y += core/subdev/i2c/bit.o
+nouveau-y += core/subdev/i2c/nv04.o
+nouveau-y += core/subdev/i2c/nv4e.o
+nouveau-y += core/subdev/i2c/nv50.o
+nouveau-y += core/subdev/i2c/nv94.o
+nouveau-y += core/subdev/i2c/nvd0.o
nouveau-y += core/subdev/ibus/nvc0.o
nouveau-y += core/subdev/ibus/nve0.o
nouveau-y += core/subdev/instmem/base.o
@@ -106,10 +119,15 @@ nouveau-y += core/subdev/mxm/mxms.o
nouveau-y += core/subdev/mxm/nv50.o
nouveau-y += core/subdev/therm/base.o
nouveau-y += core/subdev/therm/fan.o
+nouveau-y += core/subdev/therm/fannil.o
+nouveau-y += core/subdev/therm/fanpwm.o
+nouveau-y += core/subdev/therm/fantog.o
nouveau-y += core/subdev/therm/ic.o
+nouveau-y += core/subdev/therm/temp.o
nouveau-y += core/subdev/therm/nv40.o
nouveau-y += core/subdev/therm/nv50.o
-nouveau-y += core/subdev/therm/temp.o
+nouveau-y += core/subdev/therm/nva3.o
+nouveau-y += core/subdev/therm/nvd0.o
nouveau-y += core/subdev/timer/base.o
nouveau-y += core/subdev/timer/nv04.o
nouveau-y += core/subdev/vm/base.o
@@ -132,6 +150,7 @@ nouveau-y += core/engine/copy/nvc0.o
nouveau-y += core/engine/copy/nve0.o
nouveau-y += core/engine/crypt/nv84.o
nouveau-y += core/engine/crypt/nv98.o
+nouveau-y += core/engine/disp/base.o
nouveau-y += core/engine/disp/nv04.o
nouveau-y += core/engine/disp/nv50.o
nouveau-y += core/engine/disp/nv84.o
@@ -141,11 +160,13 @@ nouveau-y += core/engine/disp/nva3.o
nouveau-y += core/engine/disp/nvd0.o
nouveau-y += core/engine/disp/nve0.o
nouveau-y += core/engine/disp/dacnv50.o
+nouveau-y += core/engine/disp/dport.o
nouveau-y += core/engine/disp/hdanva3.o
nouveau-y += core/engine/disp/hdanvd0.o
nouveau-y += core/engine/disp/hdminv84.o
nouveau-y += core/engine/disp/hdminva3.o
nouveau-y += core/engine/disp/hdminvd0.o
+nouveau-y += core/engine/disp/piornv50.o
nouveau-y += core/engine/disp/sornv50.o
nouveau-y += core/engine/disp/sornv94.o
nouveau-y += core/engine/disp/sornvd0.o
@@ -194,7 +215,8 @@ nouveau-y += nouveau_drm.o nouveau_chan.o nouveau_dma.o nouveau_fence.o
nouveau-y += nouveau_irq.o nouveau_vga.o nouveau_agp.o
nouveau-y += nouveau_ttm.o nouveau_sgdma.o nouveau_bo.o nouveau_gem.o
nouveau-y += nouveau_prime.o nouveau_abi16.o
-nouveau-y += nv04_fence.o nv10_fence.o nv50_fence.o nv84_fence.o nvc0_fence.o
+nouveau-y += nv04_fence.o nv10_fence.o nv17_fence.o
+nouveau-y += nv50_fence.o nv84_fence.o nvc0_fence.o
# drm/kms
nouveau-y += nouveau_bios.o nouveau_fbcon.o nouveau_display.o
@@ -216,7 +238,10 @@ nouveau-y += nouveau_mem.o
# other random bits
nouveau-$(CONFIG_COMPAT) += nouveau_ioc32.o
+ifdef CONFIG_X86
nouveau-$(CONFIG_ACPI) += nouveau_acpi.o
+endif
nouveau-$(CONFIG_DRM_NOUVEAU_BACKLIGHT) += nouveau_backlight.o
+nouveau-$(CONFIG_DEBUG_FS) += nouveau_debugfs.o
obj-$(CONFIG_DRM_NOUVEAU)+= nouveau.o
diff --git a/drivers/gpu/drm/nouveau/core/core/client.c b/drivers/gpu/drm/nouveau/core/core/client.c
index 8bbb58f94a19..295c22165eac 100644
--- a/drivers/gpu/drm/nouveau/core/core/client.c
+++ b/drivers/gpu/drm/nouveau/core/core/client.c
@@ -99,3 +99,13 @@ nouveau_client_fini(struct nouveau_client *client, bool suspend)
nv_debug(client, "%s completed with %d\n", name[suspend], ret);
return ret;
}
+
+const char *
+nouveau_client_name(void *obj)
+{
+ const char *client_name = "unknown";
+ struct nouveau_client *client = nouveau_client(obj);
+ if (client)
+ client_name = client->name;
+ return client_name;
+}
diff --git a/drivers/gpu/drm/nouveau/core/core/enum.c b/drivers/gpu/drm/nouveau/core/core/enum.c
index 7cc7133d82de..dd434790ccc4 100644
--- a/drivers/gpu/drm/nouveau/core/core/enum.c
+++ b/drivers/gpu/drm/nouveau/core/core/enum.c
@@ -40,14 +40,15 @@ nouveau_enum_find(const struct nouveau_enum *en, u32 value)
return NULL;
}
-void
+const struct nouveau_enum *
nouveau_enum_print(const struct nouveau_enum *en, u32 value)
{
en = nouveau_enum_find(en, value);
if (en)
- printk("%s", en->name);
+ pr_cont("%s", en->name);
else
- printk("(unknown enum 0x%08x)", value);
+ pr_cont("(unknown enum 0x%08x)", value);
+ return en;
}
void
@@ -55,7 +56,7 @@ nouveau_bitfield_print(const struct nouveau_bitfield *bf, u32 value)
{
while (bf->name) {
if (value & bf->mask) {
- printk(" %s", bf->name);
+ pr_cont(" %s", bf->name);
value &= ~bf->mask;
}
@@ -63,5 +64,5 @@ nouveau_bitfield_print(const struct nouveau_bitfield *bf, u32 value)
}
if (value)
- printk(" (unknown bits 0x%08x)", value);
+ pr_cont(" (unknown bits 0x%08x)", value);
}
diff --git a/drivers/gpu/drm/nouveau/core/core/event.c b/drivers/gpu/drm/nouveau/core/core/event.c
new file mode 100644
index 000000000000..6d01e0f0fc8a
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/core/event.c
@@ -0,0 +1,106 @@
+/*
+ * Copyright 2013 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+#include <core/os.h>
+#include <core/event.h>
+
+static void
+nouveau_event_put_locked(struct nouveau_event *event, int index,
+ struct nouveau_eventh *handler)
+{
+ if (!--event->index[index].refs)
+ event->disable(event, index);
+ list_del(&handler->head);
+}
+
+void
+nouveau_event_put(struct nouveau_event *event, int index,
+ struct nouveau_eventh *handler)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&event->lock, flags);
+ if (index < event->index_nr)
+ nouveau_event_put_locked(event, index, handler);
+ spin_unlock_irqrestore(&event->lock, flags);
+}
+
+void
+nouveau_event_get(struct nouveau_event *event, int index,
+ struct nouveau_eventh *handler)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&event->lock, flags);
+ if (index < event->index_nr) {
+ list_add(&handler->head, &event->index[index].list);
+ if (!event->index[index].refs++)
+ event->enable(event, index);
+ }
+ spin_unlock_irqrestore(&event->lock, flags);
+}
+
+void
+nouveau_event_trigger(struct nouveau_event *event, int index)
+{
+ struct nouveau_eventh *handler, *temp;
+ unsigned long flags;
+
+ if (index >= event->index_nr)
+ return;
+
+ spin_lock_irqsave(&event->lock, flags);
+ list_for_each_entry_safe(handler, temp, &event->index[index].list, head) {
+ if (handler->func(handler, index) == NVKM_EVENT_DROP) {
+ nouveau_event_put_locked(event, index, handler);
+ }
+ }
+ spin_unlock_irqrestore(&event->lock, flags);
+}
+
+void
+nouveau_event_destroy(struct nouveau_event **pevent)
+{
+ struct nouveau_event *event = *pevent;
+ if (event) {
+ kfree(event);
+ *pevent = NULL;
+ }
+}
+
+int
+nouveau_event_create(int index_nr, struct nouveau_event **pevent)
+{
+ struct nouveau_event *event;
+ int i;
+
+ event = *pevent = kzalloc(sizeof(*event) + index_nr *
+ sizeof(event->index[0]), GFP_KERNEL);
+ if (!event)
+ return -ENOMEM;
+
+ spin_lock_init(&event->lock);
+ for (i = 0; i < index_nr; i++)
+ INIT_LIST_HEAD(&event->index[i].list);
+ event->index_nr = index_nr;
+ return 0;
+}
diff --git a/drivers/gpu/drm/nouveau/core/engine/copy/nva3.c b/drivers/gpu/drm/nouveau/core/engine/copy/nva3.c
index 283248c7b050..d6dc2a65ccd1 100644
--- a/drivers/gpu/drm/nouveau/core/engine/copy/nva3.c
+++ b/drivers/gpu/drm/nouveau/core/engine/copy/nva3.c
@@ -22,6 +22,7 @@
* Authors: Ben Skeggs
*/
+#include <core/client.h>
#include <core/falcon.h>
#include <core/class.h>
#include <core/enum.h>
@@ -100,8 +101,9 @@ nva3_copy_intr(struct nouveau_subdev *subdev)
if (stat & 0x00000040) {
nv_error(falcon, "DISPATCH_ERROR [");
nouveau_enum_print(nva3_copy_isr_error_name, ssta);
- printk("] ch %d [0x%010llx] subc %d mthd 0x%04x data 0x%08x\n",
- chid, inst << 12, subc, mthd, data);
+ pr_cont("] ch %d [0x%010llx %s] subc %d mthd 0x%04x data 0x%08x\n",
+ chid, inst << 12, nouveau_client_name(engctx), subc,
+ mthd, data);
nv_wo32(falcon, 0x004, 0x00000040);
stat &= ~0x00000040;
}
diff --git a/drivers/gpu/drm/nouveau/core/engine/crypt/nv84.c b/drivers/gpu/drm/nouveau/core/engine/crypt/nv84.c
index b97490512723..5bc021f471f9 100644
--- a/drivers/gpu/drm/nouveau/core/engine/crypt/nv84.c
+++ b/drivers/gpu/drm/nouveau/core/engine/crypt/nv84.c
@@ -22,6 +22,7 @@
* Authors: Ben Skeggs
*/
+#include <core/client.h>
#include <core/os.h>
#include <core/enum.h>
#include <core/class.h>
@@ -126,10 +127,11 @@ nv84_crypt_intr(struct nouveau_subdev *subdev)
chid = pfifo->chid(pfifo, engctx);
if (stat) {
- nv_error(priv, "");
+ nv_error(priv, "%s", "");
nouveau_bitfield_print(nv84_crypt_intr_mask, stat);
- printk(" ch %d [0x%010llx] mthd 0x%04x data 0x%08x\n",
- chid, (u64)inst << 12, mthd, data);
+ pr_cont(" ch %d [0x%010llx %s] mthd 0x%04x data 0x%08x\n",
+ chid, (u64)inst << 12, nouveau_client_name(engctx),
+ mthd, data);
}
nv_wr32(priv, 0x102130, stat);
diff --git a/drivers/gpu/drm/nouveau/core/engine/crypt/nv98.c b/drivers/gpu/drm/nouveau/core/engine/crypt/nv98.c
index 21986f3bf0c8..8bf8955051d4 100644
--- a/drivers/gpu/drm/nouveau/core/engine/crypt/nv98.c
+++ b/drivers/gpu/drm/nouveau/core/engine/crypt/nv98.c
@@ -22,6 +22,7 @@
* Authors: Ben Skeggs
*/
+#include <core/client.h>
#include <core/os.h>
#include <core/enum.h>
#include <core/class.h>
@@ -102,8 +103,9 @@ nv98_crypt_intr(struct nouveau_subdev *subdev)
if (stat & 0x00000040) {
nv_error(priv, "DISPATCH_ERROR [");
nouveau_enum_print(nv98_crypt_isr_error_name, ssta);
- printk("] ch %d [0x%010llx] subc %d mthd 0x%04x data 0x%08x\n",
- chid, (u64)inst << 12, subc, mthd, data);
+ pr_cont("] ch %d [0x%010llx %s] subc %d mthd 0x%04x data 0x%08x\n",
+ chid, (u64)inst << 12, nouveau_client_name(engctx),
+ subc, mthd, data);
nv_wr32(priv, 0x087004, 0x00000040);
stat &= ~0x00000040;
}
diff --git a/drivers/gpu/drm/nouveau/core/engine/disp/base.c b/drivers/gpu/drm/nouveau/core/engine/disp/base.c
new file mode 100644
index 000000000000..7a5cae42834f
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/engine/disp/base.c
@@ -0,0 +1,52 @@
+/*
+ * Copyright 2013 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+
+#include <engine/disp.h>
+
+void
+_nouveau_disp_dtor(struct nouveau_object *object)
+{
+ struct nouveau_disp *disp = (void *)object;
+ nouveau_event_destroy(&disp->vblank);
+ nouveau_engine_destroy(&disp->base);
+}
+
+int
+nouveau_disp_create_(struct nouveau_object *parent,
+ struct nouveau_object *engine,
+ struct nouveau_oclass *oclass, int heads,
+ const char *intname, const char *extname,
+ int length, void **pobject)
+{
+ struct nouveau_disp *disp;
+ int ret;
+
+ ret = nouveau_engine_create_(parent, engine, oclass, true,
+ intname, extname, length, pobject);
+ disp = *pobject;
+ if (ret)
+ return ret;
+
+ return nouveau_event_create(heads, &disp->vblank);
+}
diff --git a/drivers/gpu/drm/nouveau/core/engine/disp/dport.c b/drivers/gpu/drm/nouveau/core/engine/disp/dport.c
new file mode 100644
index 000000000000..fa27b02ff829
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/engine/disp/dport.c
@@ -0,0 +1,346 @@
+/*
+ * Copyright 2013 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+
+#include <subdev/bios.h>
+#include <subdev/bios/dcb.h>
+#include <subdev/bios/dp.h>
+#include <subdev/bios/init.h>
+#include <subdev/i2c.h>
+
+#include <engine/disp.h>
+
+#include "dport.h"
+
+#define DBG(fmt, args...) nv_debug(dp->disp, "DP:%04x:%04x: " fmt, \
+ dp->outp->hasht, dp->outp->hashm, ##args)
+#define ERR(fmt, args...) nv_error(dp->disp, "DP:%04x:%04x: " fmt, \
+ dp->outp->hasht, dp->outp->hashm, ##args)
+
+/******************************************************************************
+ * link training
+ *****************************************************************************/
+struct dp_state {
+ const struct nouveau_dp_func *func;
+ struct nouveau_disp *disp;
+ struct dcb_output *outp;
+ struct nvbios_dpout info;
+ u8 version;
+ struct nouveau_i2c_port *aux;
+ int head;
+ u8 dpcd[4];
+ int link_nr;
+ u32 link_bw;
+ u8 stat[6];
+ u8 conf[4];
+};
+
+static int
+dp_set_link_config(struct dp_state *dp)
+{
+ struct nouveau_disp *disp = dp->disp;
+ struct nouveau_bios *bios = nouveau_bios(disp);
+ struct nvbios_init init = {
+ .subdev = nv_subdev(dp->disp),
+ .bios = bios,
+ .offset = 0x0000,
+ .outp = dp->outp,
+ .crtc = dp->head,
+ .execute = 1,
+ };
+ u32 lnkcmp;
+ u8 sink[2];
+
+ DBG("%d lanes at %d KB/s\n", dp->link_nr, dp->link_bw);
+
+ /* set desired link configuration on the sink */
+ sink[0] = dp->link_bw / 27000;
+ sink[1] = dp->link_nr;
+ if (dp->dpcd[DPCD_RC02] & DPCD_RC02_ENHANCED_FRAME_CAP)
+ sink[1] |= DPCD_LC01_ENHANCED_FRAME_EN;
+
+ nv_wraux(dp->aux, DPCD_LC00, sink, 2);
+
+ /* set desired link configuration on the source */
+ if ((lnkcmp = dp->info.lnkcmp)) {
+ if (dp->version < 0x30) {
+ while ((dp->link_bw / 10) < nv_ro16(bios, lnkcmp))
+ lnkcmp += 4;
+ init.offset = nv_ro16(bios, lnkcmp + 2);
+ } else {
+ while ((dp->link_bw / 27000) < nv_ro08(bios, lnkcmp))
+ lnkcmp += 3;
+ init.offset = nv_ro16(bios, lnkcmp + 1);
+ }
+
+ nvbios_exec(&init);
+ }
+
+ return dp->func->lnk_ctl(dp->disp, dp->outp, dp->head,
+ dp->link_nr, dp->link_bw / 27000,
+ dp->dpcd[DPCD_RC02] &
+ DPCD_RC02_ENHANCED_FRAME_CAP);
+}
+
+static void
+dp_set_training_pattern(struct dp_state *dp, u8 pattern)
+{
+ u8 sink_tp;
+
+ DBG("training pattern %d\n", pattern);
+ dp->func->pattern(dp->disp, dp->outp, dp->head, pattern);
+
+ nv_rdaux(dp->aux, DPCD_LC02, &sink_tp, 1);
+ sink_tp &= ~DPCD_LC02_TRAINING_PATTERN_SET;
+ sink_tp |= pattern;
+ nv_wraux(dp->aux, DPCD_LC02, &sink_tp, 1);
+}
+
+static int
+dp_link_train_commit(struct dp_state *dp)
+{
+ int i;
+
+ for (i = 0; i < dp->link_nr; i++) {
+ u8 lane = (dp->stat[4 + (i >> 1)] >> ((i & 1) * 4)) & 0xf;
+ u8 lpre = (lane & 0x0c) >> 2;
+ u8 lvsw = (lane & 0x03) >> 0;
+
+ dp->conf[i] = (lpre << 3) | lvsw;
+ if (lvsw == 3)
+ dp->conf[i] |= DPCD_LC03_MAX_SWING_REACHED;
+ if (lpre == 3)
+ dp->conf[i] |= DPCD_LC03_MAX_PRE_EMPHASIS_REACHED;
+
+ DBG("config lane %d %02x\n", i, dp->conf[i]);
+ dp->func->drv_ctl(dp->disp, dp->outp, dp->head, i, lvsw, lpre);
+ }
+
+ return nv_wraux(dp->aux, DPCD_LC03(0), dp->conf, 4);
+}
+
+static int
+dp_link_train_update(struct dp_state *dp, u32 delay)
+{
+ int ret;
+
+ udelay(delay);
+
+ ret = nv_rdaux(dp->aux, DPCD_LS02, dp->stat, 6);
+ if (ret)
+ return ret;
+
+ DBG("status %*ph\n", 6, dp->stat);
+ return 0;
+}
+
+static int
+dp_link_train_cr(struct dp_state *dp)
+{
+ bool cr_done = false, abort = false;
+ int voltage = dp->conf[0] & DPCD_LC03_VOLTAGE_SWING_SET;
+ int tries = 0, i;
+
+ dp_set_training_pattern(dp, 1);
+
+ do {
+ if (dp_link_train_commit(dp) ||
+ dp_link_train_update(dp, 100))
+ break;
+
+ cr_done = true;
+ for (i = 0; i < dp->link_nr; i++) {
+ u8 lane = (dp->stat[i >> 1] >> ((i & 1) * 4)) & 0xf;
+ if (!(lane & DPCD_LS02_LANE0_CR_DONE)) {
+ cr_done = false;
+ if (dp->conf[i] & DPCD_LC03_MAX_SWING_REACHED)
+ abort = true;
+ break;
+ }
+ }
+
+ if ((dp->conf[0] & DPCD_LC03_VOLTAGE_SWING_SET) != voltage) {
+ voltage = dp->conf[0] & DPCD_LC03_VOLTAGE_SWING_SET;
+ tries = 0;
+ }
+ } while (!cr_done && !abort && ++tries < 5);
+
+ return cr_done ? 0 : -1;
+}
+
+static int
+dp_link_train_eq(struct dp_state *dp)
+{
+ bool eq_done, cr_done = true;
+ int tries = 0, i;
+
+ dp_set_training_pattern(dp, 2);
+
+ do {
+ if (dp_link_train_update(dp, 400))
+ break;
+
+ eq_done = !!(dp->stat[2] & DPCD_LS04_INTERLANE_ALIGN_DONE);
+ for (i = 0; i < dp->link_nr && eq_done; i++) {
+ u8 lane = (dp->stat[i >> 1] >> ((i & 1) * 4)) & 0xf;
+ if (!(lane & DPCD_LS02_LANE0_CR_DONE))
+ cr_done = false;
+ if (!(lane & DPCD_LS02_LANE0_CHANNEL_EQ_DONE) ||
+ !(lane & DPCD_LS02_LANE0_SYMBOL_LOCKED))
+ eq_done = false;
+ }
+
+ if (dp_link_train_commit(dp))
+ break;
+ } while (!eq_done && cr_done && ++tries <= 5);
+
+ return eq_done ? 0 : -1;
+}
+
+static void
+dp_link_train_init(struct dp_state *dp, bool spread)
+{
+ struct nvbios_init init = {
+ .subdev = nv_subdev(dp->disp),
+ .bios = nouveau_bios(dp->disp),
+ .outp = dp->outp,
+ .crtc = dp->head,
+ .execute = 1,
+ };
+
+ /* set desired spread */
+ if (spread)
+ init.offset = dp->info.script[2];
+ else
+ init.offset = dp->info.script[3];
+ nvbios_exec(&init);
+
+ /* pre-train script */
+ init.offset = dp->info.script[0];
+ nvbios_exec(&init);
+}
+
+static void
+dp_link_train_fini(struct dp_state *dp)
+{
+ struct nvbios_init init = {
+ .subdev = nv_subdev(dp->disp),
+ .bios = nouveau_bios(dp->disp),
+ .outp = dp->outp,
+ .crtc = dp->head,
+ .execute = 1,
+ };
+
+ /* post-train script */
+ init.offset = dp->info.script[1],
+ nvbios_exec(&init);
+}
+
+int
+nouveau_dp_train(struct nouveau_disp *disp, const struct nouveau_dp_func *func,
+ struct dcb_output *outp, int head, u32 datarate)
+{
+ struct nouveau_bios *bios = nouveau_bios(disp);
+ struct nouveau_i2c *i2c = nouveau_i2c(disp);
+ struct dp_state _dp = {
+ .disp = disp,
+ .func = func,
+ .outp = outp,
+ .head = head,
+ }, *dp = &_dp;
+ const u32 bw_list[] = { 270000, 162000, 0 };
+ const u32 *link_bw = bw_list;
+ u8 hdr, cnt, len;
+ u32 data;
+ int ret;
+
+ /* find the bios displayport data relevant to this output */
+ data = nvbios_dpout_match(bios, outp->hasht, outp->hashm, &dp->version,
+ &hdr, &cnt, &len, &dp->info);
+ if (!data) {
+ ERR("bios data not found\n");
+ return -EINVAL;
+ }
+
+ /* acquire the aux channel and fetch some info about the display */
+ if (outp->location)
+ dp->aux = i2c->find_type(i2c, NV_I2C_TYPE_EXTAUX(outp->extdev));
+ else
+ dp->aux = i2c->find(i2c, NV_I2C_TYPE_DCBI2C(outp->i2c_index));
+ if (!dp->aux) {
+ ERR("no aux channel?!\n");
+ return -ENODEV;
+ }
+
+ ret = nv_rdaux(dp->aux, 0x00000, dp->dpcd, sizeof(dp->dpcd));
+ if (ret) {
+ ERR("failed to read DPCD\n");
+ return ret;
+ }
+
+ /* adjust required bandwidth for 8B/10B coding overhead */
+ datarate = (datarate / 8) * 10;
+
+ /* enable down-spreading and execute pre-train script from vbios */
+ dp_link_train_init(dp, dp->dpcd[3] & 0x01);
+
+ /* start off at highest link rate supported by encoder and display */
+ while (*link_bw > (dp->dpcd[1] * 27000))
+ link_bw++;
+
+ while (link_bw[0]) {
+ /* find minimum required lane count at this link rate */
+ dp->link_nr = dp->dpcd[2] & DPCD_RC02_MAX_LANE_COUNT;
+ while ((dp->link_nr >> 1) * link_bw[0] > datarate)
+ dp->link_nr >>= 1;
+
+ /* drop link rate to minimum with this lane count */
+ while ((link_bw[1] * dp->link_nr) > datarate)
+ link_bw++;
+ dp->link_bw = link_bw[0];
+
+ /* program selected link configuration */
+ ret = dp_set_link_config(dp);
+ if (ret == 0) {
+ /* attempt to train the link at this configuration */
+ memset(dp->stat, 0x00, sizeof(dp->stat));
+ if (!dp_link_train_cr(dp) &&
+ !dp_link_train_eq(dp))
+ break;
+ } else
+ if (ret >= 1) {
+ /* dp_set_link_config() handled training */
+ break;
+ }
+
+ /* retry at lower rate */
+ link_bw++;
+ }
+
+ /* finish link training */
+ dp_set_training_pattern(dp, 0);
+
+ /* execute post-train script from vbios */
+ dp_link_train_fini(dp);
+ return true;
+}
diff --git a/drivers/gpu/drm/nouveau/core/engine/disp/dport.h b/drivers/gpu/drm/nouveau/core/engine/disp/dport.h
new file mode 100644
index 000000000000..0e1bbd18ff6c
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/engine/disp/dport.h
@@ -0,0 +1,78 @@
+#ifndef __NVKM_DISP_DPORT_H__
+#define __NVKM_DISP_DPORT_H__
+
+/* DPCD Receiver Capabilities */
+#define DPCD_RC00 0x00000
+#define DPCD_RC00_DPCD_REV 0xff
+#define DPCD_RC01 0x00001
+#define DPCD_RC01_MAX_LINK_RATE 0xff
+#define DPCD_RC02 0x00002
+#define DPCD_RC02_ENHANCED_FRAME_CAP 0x80
+#define DPCD_RC02_MAX_LANE_COUNT 0x1f
+#define DPCD_RC03 0x00003
+#define DPCD_RC03_MAX_DOWNSPREAD 0x01
+
+/* DPCD Link Configuration */
+#define DPCD_LC00 0x00100
+#define DPCD_LC00_LINK_BW_SET 0xff
+#define DPCD_LC01 0x00101
+#define DPCD_LC01_ENHANCED_FRAME_EN 0x80
+#define DPCD_LC01_LANE_COUNT_SET 0x1f
+#define DPCD_LC02 0x00102
+#define DPCD_LC02_TRAINING_PATTERN_SET 0x03
+#define DPCD_LC03(l) ((l) + 0x00103)
+#define DPCD_LC03_MAX_PRE_EMPHASIS_REACHED 0x20
+#define DPCD_LC03_PRE_EMPHASIS_SET 0x18
+#define DPCD_LC03_MAX_SWING_REACHED 0x04
+#define DPCD_LC03_VOLTAGE_SWING_SET 0x03
+
+/* DPCD Link/Sink Status */
+#define DPCD_LS02 0x00202
+#define DPCD_LS02_LANE1_SYMBOL_LOCKED 0x40
+#define DPCD_LS02_LANE1_CHANNEL_EQ_DONE 0x20
+#define DPCD_LS02_LANE1_CR_DONE 0x10
+#define DPCD_LS02_LANE0_SYMBOL_LOCKED 0x04
+#define DPCD_LS02_LANE0_CHANNEL_EQ_DONE 0x02
+#define DPCD_LS02_LANE0_CR_DONE 0x01
+#define DPCD_LS03 0x00203
+#define DPCD_LS03_LANE3_SYMBOL_LOCKED 0x40
+#define DPCD_LS03_LANE3_CHANNEL_EQ_DONE 0x20
+#define DPCD_LS03_LANE3_CR_DONE 0x10
+#define DPCD_LS03_LANE2_SYMBOL_LOCKED 0x04
+#define DPCD_LS03_LANE2_CHANNEL_EQ_DONE 0x02
+#define DPCD_LS03_LANE2_CR_DONE 0x01
+#define DPCD_LS04 0x00204
+#define DPCD_LS04_LINK_STATUS_UPDATED 0x80
+#define DPCD_LS04_DOWNSTREAM_PORT_STATUS_CHANGED 0x40
+#define DPCD_LS04_INTERLANE_ALIGN_DONE 0x01
+#define DPCD_LS06 0x00206
+#define DPCD_LS06_LANE1_PRE_EMPHASIS 0xc0
+#define DPCD_LS06_LANE1_VOLTAGE_SWING 0x30
+#define DPCD_LS06_LANE0_PRE_EMPHASIS 0x0c
+#define DPCD_LS06_LANE0_VOLTAGE_SWING 0x03
+#define DPCD_LS07 0x00207
+#define DPCD_LS07_LANE3_PRE_EMPHASIS 0xc0
+#define DPCD_LS07_LANE3_VOLTAGE_SWING 0x30
+#define DPCD_LS07_LANE2_PRE_EMPHASIS 0x0c
+#define DPCD_LS07_LANE2_VOLTAGE_SWING 0x03
+
+struct nouveau_disp;
+struct dcb_output;
+
+struct nouveau_dp_func {
+ int (*pattern)(struct nouveau_disp *, struct dcb_output *,
+ int head, int pattern);
+ int (*lnk_ctl)(struct nouveau_disp *, struct dcb_output *, int head,
+ int link_nr, int link_bw, bool enh_frame);
+ int (*drv_ctl)(struct nouveau_disp *, struct dcb_output *, int head,
+ int lane, int swing, int preem);
+};
+
+extern const struct nouveau_dp_func nv94_sor_dp_func;
+extern const struct nouveau_dp_func nvd0_sor_dp_func;
+extern const struct nouveau_dp_func nv50_pior_dp_func;
+
+int nouveau_dp_train(struct nouveau_disp *, const struct nouveau_dp_func *,
+ struct dcb_output *, int, u32);
+
+#endif
diff --git a/drivers/gpu/drm/nouveau/core/engine/disp/nv04.c b/drivers/gpu/drm/nouveau/core/engine/disp/nv04.c
index 1c919f2af89f..05e903f08a36 100644
--- a/drivers/gpu/drm/nouveau/core/engine/disp/nv04.c
+++ b/drivers/gpu/drm/nouveau/core/engine/disp/nv04.c
@@ -24,21 +24,33 @@
#include <engine/disp.h>
+#include <core/event.h>
+#include <core/class.h>
+
struct nv04_disp_priv {
struct nouveau_disp base;
};
static struct nouveau_oclass
nv04_disp_sclass[] = {
+ { NV04_DISP_CLASS, &nouveau_object_ofuncs },
{},
};
+/*******************************************************************************
+ * Display engine implementation
+ ******************************************************************************/
+
+static void
+nv04_disp_vblank_enable(struct nouveau_event *event, int head)
+{
+ nv_wr32(event->priv, 0x600140 + (head * 0x2000) , 0x00000001);
+}
+
static void
-nv04_disp_intr_vblank(struct nv04_disp_priv *priv, int crtc)
+nv04_disp_vblank_disable(struct nouveau_event *event, int head)
{
- struct nouveau_disp *disp = &priv->base;
- if (disp->vblank.notify)
- disp->vblank.notify(disp->vblank.data, crtc);
+ nv_wr32(event->priv, 0x600140 + (head * 0x2000) , 0x00000000);
}
static void
@@ -49,25 +61,25 @@ nv04_disp_intr(struct nouveau_subdev *subdev)
u32 crtc1 = nv_rd32(priv, 0x602100);
if (crtc0 & 0x00000001) {
- nv04_disp_intr_vblank(priv, 0);
+ nouveau_event_trigger(priv->base.vblank, 0);
nv_wr32(priv, 0x600100, 0x00000001);
}
if (crtc1 & 0x00000001) {
- nv04_disp_intr_vblank(priv, 1);
+ nouveau_event_trigger(priv->base.vblank, 1);
nv_wr32(priv, 0x602100, 0x00000001);
}
}
static int
nv04_disp_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
- struct nouveau_oclass *oclass, void *data, u32 size,
- struct nouveau_object **pobject)
+ struct nouveau_oclass *oclass, void *data, u32 size,
+ struct nouveau_object **pobject)
{
struct nv04_disp_priv *priv;
int ret;
- ret = nouveau_disp_create(parent, engine, oclass, "DISPLAY",
+ ret = nouveau_disp_create(parent, engine, oclass, 2, "DISPLAY",
"display", &priv);
*pobject = nv_object(priv);
if (ret)
@@ -75,6 +87,9 @@ nv04_disp_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
nv_engine(priv)->sclass = nv04_disp_sclass;
nv_subdev(priv)->intr = nv04_disp_intr;
+ priv->base.vblank->priv = priv;
+ priv->base.vblank->enable = nv04_disp_vblank_enable;
+ priv->base.vblank->disable = nv04_disp_vblank_disable;
return 0;
}
diff --git a/drivers/gpu/drm/nouveau/core/engine/disp/nv50.c b/drivers/gpu/drm/nouveau/core/engine/disp/nv50.c
index ca1a7d76a95b..5fa13267bd9f 100644
--- a/drivers/gpu/drm/nouveau/core/engine/disp/nv50.c
+++ b/drivers/gpu/drm/nouveau/core/engine/disp/nv50.c
@@ -27,7 +27,6 @@
#include <core/handle.h>
#include <core/class.h>
-#include <engine/software.h>
#include <engine/disp.h>
#include <subdev/bios.h>
@@ -37,7 +36,6 @@
#include <subdev/bios/pll.h>
#include <subdev/timer.h>
#include <subdev/fb.h>
-#include <subdev/bar.h>
#include <subdev/clock.h>
#include "nv50.h"
@@ -335,7 +333,7 @@ nv50_disp_sync_ctor(struct nouveau_object *parent,
struct nv50_disp_dmac *dmac;
int ret;
- if (size < sizeof(*data) || args->head > 1)
+ if (size < sizeof(*args) || args->head > 1)
return -EINVAL;
ret = nv50_disp_dmac_create_(parent, engine, oclass, args->pushbuf,
@@ -374,7 +372,7 @@ nv50_disp_ovly_ctor(struct nouveau_object *parent,
struct nv50_disp_dmac *dmac;
int ret;
- if (size < sizeof(*data) || args->head > 1)
+ if (size < sizeof(*args) || args->head > 1)
return -EINVAL;
ret = nv50_disp_dmac_create_(parent, engine, oclass, args->pushbuf,
@@ -543,6 +541,18 @@ nv50_disp_curs_ofuncs = {
* Base display object
******************************************************************************/
+static void
+nv50_disp_base_vblank_enable(struct nouveau_event *event, int head)
+{
+ nv_mask(event->priv, 0x61002c, (1 << head), (1 << head));
+}
+
+static void
+nv50_disp_base_vblank_disable(struct nouveau_event *event, int head)
+{
+ nv_mask(event->priv, 0x61002c, (1 << head), (0 << head));
+}
+
static int
nv50_disp_base_ctor(struct nouveau_object *parent,
struct nouveau_object *engine,
@@ -559,6 +569,9 @@ nv50_disp_base_ctor(struct nouveau_object *parent,
if (ret)
return ret;
+ priv->base.vblank->priv = priv;
+ priv->base.vblank->enable = nv50_disp_base_vblank_enable;
+ priv->base.vblank->disable = nv50_disp_base_vblank_disable;
return nouveau_ramht_new(parent, parent, 0x1000, 0, &base->ramht);
}
@@ -613,7 +626,7 @@ nv50_disp_base_init(struct nouveau_object *object)
nv_wr32(priv, 0x6101e0 + (i * 0x04), tmp);
}
- /* ... EXT caps */
+ /* ... PIOR caps */
for (i = 0; i < 3; i++) {
tmp = nv_rd32(priv, 0x61e000 + (i * 0x800));
nv_wr32(priv, 0x6101f0 + (i * 0x04), tmp);
@@ -665,6 +678,9 @@ nv50_disp_base_omthds[] = {
{ SOR_MTHD(NV50_DISP_SOR_LVDS_SCRIPT) , nv50_sor_mthd },
{ DAC_MTHD(NV50_DISP_DAC_PWR) , nv50_dac_mthd },
{ DAC_MTHD(NV50_DISP_DAC_LOAD) , nv50_dac_mthd },
+ { PIOR_MTHD(NV50_DISP_PIOR_PWR) , nv50_pior_mthd },
+ { PIOR_MTHD(NV50_DISP_PIOR_TMDS_PWR) , nv50_pior_mthd },
+ { PIOR_MTHD(NV50_DISP_PIOR_DP_PWR) , nv50_pior_mthd },
{},
};
@@ -756,50 +772,6 @@ nv50_disp_intr_error(struct nv50_disp_priv *priv)
}
}
-static void
-nv50_disp_intr_vblank(struct nv50_disp_priv *priv, int crtc)
-{
- struct nouveau_bar *bar = nouveau_bar(priv);
- struct nouveau_disp *disp = &priv->base;
- struct nouveau_software_chan *chan, *temp;
- unsigned long flags;
-
- spin_lock_irqsave(&disp->vblank.lock, flags);
- list_for_each_entry_safe(chan, temp, &disp->vblank.list, vblank.head) {
- if (chan->vblank.crtc != crtc)
- continue;
-
- if (nv_device(priv)->chipset >= 0xc0) {
- nv_wr32(priv, 0x001718, 0x80000000 | chan->vblank.channel);
- bar->flush(bar);
- nv_wr32(priv, 0x06000c,
- upper_32_bits(chan->vblank.offset));
- nv_wr32(priv, 0x060010,
- lower_32_bits(chan->vblank.offset));
- nv_wr32(priv, 0x060014, chan->vblank.value);
- } else {
- nv_wr32(priv, 0x001704, chan->vblank.channel);
- nv_wr32(priv, 0x001710, 0x80000000 | chan->vblank.ctxdma);
- bar->flush(bar);
- if (nv_device(priv)->chipset == 0x50) {
- nv_wr32(priv, 0x001570, chan->vblank.offset);
- nv_wr32(priv, 0x001574, chan->vblank.value);
- } else {
- nv_wr32(priv, 0x060010, chan->vblank.offset);
- nv_wr32(priv, 0x060014, chan->vblank.value);
- }
- }
-
- list_del(&chan->vblank.head);
- if (disp->vblank.put)
- disp->vblank.put(disp->vblank.data, crtc);
- }
- spin_unlock_irqrestore(&disp->vblank.lock, flags);
-
- if (disp->vblank.notify)
- disp->vblank.notify(disp->vblank.data, crtc);
-}
-
static u16
exec_lookup(struct nv50_disp_priv *priv, int head, int outp, u32 ctrl,
struct dcb_output *dcb, u8 *ver, u8 *hdr, u8 *cnt, u8 *len,
@@ -811,8 +783,8 @@ exec_lookup(struct nv50_disp_priv *priv, int head, int outp, u32 ctrl,
if (outp < 4) {
type = DCB_OUTPUT_ANALOG;
mask = 0;
- } else {
- outp -= 4;
+ } else
+ if (outp < 8) {
switch (ctrl & 0x00000f00) {
case 0x00000000: type = DCB_OUTPUT_LVDS; mask = 1; break;
case 0x00000100: type = DCB_OUTPUT_TMDS; mask = 1; break;
@@ -824,6 +796,17 @@ exec_lookup(struct nv50_disp_priv *priv, int head, int outp, u32 ctrl,
nv_error(priv, "unknown SOR mc 0x%08x\n", ctrl);
return 0x0000;
}
+ outp -= 4;
+ } else {
+ outp = outp - 8;
+ type = 0x0010;
+ mask = 0;
+ switch (ctrl & 0x00000f00) {
+ case 0x00000000: type |= priv->pior.type[outp]; break;
+ default:
+ nv_error(priv, "unknown PIOR mc 0x%08x\n", ctrl);
+ return 0x0000;
+ }
}
mask = 0x00c0 & (mask << 6);
@@ -834,6 +817,10 @@ exec_lookup(struct nv50_disp_priv *priv, int head, int outp, u32 ctrl,
if (!data)
return 0x0000;
+ /* off-chip encoders require matching the exact encoder type */
+ if (dcb->location != 0)
+ type |= dcb->extdev << 8;
+
return nvbios_outp_match(bios, type, mask, ver, hdr, cnt, len, info);
}
@@ -848,9 +835,11 @@ exec_script(struct nv50_disp_priv *priv, int head, int id)
u32 ctrl = 0x00000000;
int i;
+ /* DAC */
for (i = 0; !(ctrl & (1 << head)) && i < 3; i++)
ctrl = nv_rd32(priv, 0x610b5c + (i * 8));
+ /* SOR */
if (!(ctrl & (1 << head))) {
if (nv_device(priv)->chipset < 0x90 ||
nv_device(priv)->chipset == 0x92 ||
@@ -865,6 +854,13 @@ exec_script(struct nv50_disp_priv *priv, int head, int id)
}
}
+ /* PIOR */
+ if (!(ctrl & (1 << head))) {
+ for (i = 0; !(ctrl & (1 << head)) && i < 3; i++)
+ ctrl = nv_rd32(priv, 0x610b84 + (i * 8));
+ i += 8;
+ }
+
if (!(ctrl & (1 << head)))
return false;
i--;
@@ -894,13 +890,15 @@ exec_clkcmp(struct nv50_disp_priv *priv, int head, int id, u32 pclk,
struct nvbios_outp info1;
struct nvbios_ocfg info2;
u8 ver, hdr, cnt, len;
- u16 data, conf;
u32 ctrl = 0x00000000;
+ u32 data, conf = ~0;
int i;
+ /* DAC */
for (i = 0; !(ctrl & (1 << head)) && i < 3; i++)
ctrl = nv_rd32(priv, 0x610b58 + (i * 8));
+ /* SOR */
if (!(ctrl & (1 << head))) {
if (nv_device(priv)->chipset < 0x90 ||
nv_device(priv)->chipset == 0x92 ||
@@ -915,34 +913,46 @@ exec_clkcmp(struct nv50_disp_priv *priv, int head, int id, u32 pclk,
}
}
+ /* PIOR */
+ if (!(ctrl & (1 << head))) {
+ for (i = 0; !(ctrl & (1 << head)) && i < 3; i++)
+ ctrl = nv_rd32(priv, 0x610b80 + (i * 8));
+ i += 8;
+ }
+
if (!(ctrl & (1 << head)))
- return 0x0000;
+ return conf;
i--;
data = exec_lookup(priv, head, i, ctrl, outp, &ver, &hdr, &cnt, &len, &info1);
if (!data)
- return 0x0000;
-
- switch (outp->type) {
- case DCB_OUTPUT_TMDS:
- conf = (ctrl & 0x00000f00) >> 8;
- if (pclk >= 165000)
- conf |= 0x0100;
- break;
- case DCB_OUTPUT_LVDS:
- conf = priv->sor.lvdsconf;
- break;
- case DCB_OUTPUT_DP:
+ return conf;
+
+ if (outp->location == 0) {
+ switch (outp->type) {
+ case DCB_OUTPUT_TMDS:
+ conf = (ctrl & 0x00000f00) >> 8;
+ if (pclk >= 165000)
+ conf |= 0x0100;
+ break;
+ case DCB_OUTPUT_LVDS:
+ conf = priv->sor.lvdsconf;
+ break;
+ case DCB_OUTPUT_DP:
+ conf = (ctrl & 0x00000f00) >> 8;
+ break;
+ case DCB_OUTPUT_ANALOG:
+ default:
+ conf = 0x00ff;
+ break;
+ }
+ } else {
conf = (ctrl & 0x00000f00) >> 8;
- break;
- case DCB_OUTPUT_ANALOG:
- default:
- conf = 0x00ff;
- break;
+ pclk = pclk / 2;
}
data = nvbios_ocfg_match(bios, data, conf, &ver, &hdr, &cnt, &len, &info2);
- if (data) {
+ if (data && id < 0xff) {
data = nvbios_oclk_match(bios, info2.clkcmp[id], pclk);
if (data) {
struct nvbios_init init = {
@@ -954,32 +964,37 @@ exec_clkcmp(struct nv50_disp_priv *priv, int head, int id, u32 pclk,
.execute = 1,
};
- if (nvbios_exec(&init))
- return 0x0000;
- return conf;
+ nvbios_exec(&init);
}
}
- return 0x0000;
+ return conf;
}
static void
-nv50_disp_intr_unk10(struct nv50_disp_priv *priv, u32 super)
+nv50_disp_intr_unk10_0(struct nv50_disp_priv *priv, int head)
{
- int head = ffs((super & 0x00000060) >> 5) - 1;
- if (head >= 0) {
- head = ffs((super & 0x00000180) >> 7) - 1;
- if (head >= 0)
- exec_script(priv, head, 1);
- }
+ exec_script(priv, head, 1);
+}
- nv_wr32(priv, 0x610024, 0x00000010);
- nv_wr32(priv, 0x610030, 0x80000000);
+static void
+nv50_disp_intr_unk20_0(struct nv50_disp_priv *priv, int head)
+{
+ exec_script(priv, head, 2);
+}
+
+static void
+nv50_disp_intr_unk20_1(struct nv50_disp_priv *priv, int head)
+{
+ struct nouveau_clock *clk = nouveau_clock(priv);
+ u32 pclk = nv_rd32(priv, 0x610ad0 + (head * 0x540)) & 0x3fffff;
+ if (pclk)
+ clk->pll_set(clk, PLL_VPLL0 + head, pclk);
}
static void
-nv50_disp_intr_unk20_dp(struct nv50_disp_priv *priv,
- struct dcb_output *outp, u32 pclk)
+nv50_disp_intr_unk20_2_dp(struct nv50_disp_priv *priv,
+ struct dcb_output *outp, u32 pclk)
{
const int link = !(outp->sorconf.link & 1);
const int or = ffs(outp->or) - 1;
@@ -1085,53 +1100,54 @@ nv50_disp_intr_unk20_dp(struct nv50_disp_priv *priv,
}
static void
-nv50_disp_intr_unk20(struct nv50_disp_priv *priv, u32 super)
+nv50_disp_intr_unk20_2(struct nv50_disp_priv *priv, int head)
{
struct dcb_output outp;
- u32 addr, mask, data;
- int head;
+ u32 pclk = nv_rd32(priv, 0x610ad0 + (head * 0x540)) & 0x3fffff;
+ u32 hval, hreg = 0x614200 + (head * 0x800);
+ u32 oval, oreg;
+ u32 conf = exec_clkcmp(priv, head, 0xff, pclk, &outp);
+ if (conf != ~0) {
+ if (outp.location == 0 && outp.type == DCB_OUTPUT_DP) {
+ u32 soff = (ffs(outp.or) - 1) * 0x08;
+ u32 ctrl = nv_rd32(priv, 0x610798 + soff);
+ u32 datarate;
+
+ switch ((ctrl & 0x000f0000) >> 16) {
+ case 6: datarate = pclk * 30 / 8; break;
+ case 5: datarate = pclk * 24 / 8; break;
+ case 2:
+ default:
+ datarate = pclk * 18 / 8;
+ break;
+ }
- /* finish detaching encoder? */
- head = ffs((super & 0x00000180) >> 7) - 1;
- if (head >= 0)
- exec_script(priv, head, 2);
-
- /* check whether a vpll change is required */
- head = ffs((super & 0x00000600) >> 9) - 1;
- if (head >= 0) {
- u32 pclk = nv_rd32(priv, 0x610ad0 + (head * 0x540)) & 0x3fffff;
- if (pclk) {
- struct nouveau_clock *clk = nouveau_clock(priv);
- clk->pll_set(clk, PLL_VPLL0 + head, pclk);
+ nouveau_dp_train(&priv->base, priv->sor.dp,
+ &outp, head, datarate);
}
- nv_mask(priv, 0x614200 + head * 0x800, 0x0000000f, 0x00000000);
- }
-
- /* (re)attach the relevant OR to the head */
- head = ffs((super & 0x00000180) >> 7) - 1;
- if (head >= 0) {
- u32 pclk = nv_rd32(priv, 0x610ad0 + (head * 0x540)) & 0x3fffff;
- u32 conf = exec_clkcmp(priv, head, 0, pclk, &outp);
- if (conf) {
- if (outp.type == DCB_OUTPUT_ANALOG) {
- addr = 0x614280 + (ffs(outp.or) - 1) * 0x800;
- mask = 0xffffffff;
- data = 0x00000000;
- } else {
- if (outp.type == DCB_OUTPUT_DP)
- nv50_disp_intr_unk20_dp(priv, &outp, pclk);
- addr = 0x614300 + (ffs(outp.or) - 1) * 0x800;
- mask = 0x00000707;
- data = (conf & 0x0100) ? 0x0101 : 0x0000;
- }
-
- nv_mask(priv, addr, mask, data);
+ exec_clkcmp(priv, head, 0, pclk, &outp);
+
+ if (!outp.location && outp.type == DCB_OUTPUT_ANALOG) {
+ oreg = 0x614280 + (ffs(outp.or) - 1) * 0x800;
+ oval = 0x00000000;
+ hval = 0x00000000;
+ } else
+ if (!outp.location) {
+ if (outp.type == DCB_OUTPUT_DP)
+ nv50_disp_intr_unk20_2_dp(priv, &outp, pclk);
+ oreg = 0x614300 + (ffs(outp.or) - 1) * 0x800;
+ oval = (conf & 0x0100) ? 0x00000101 : 0x00000000;
+ hval = 0x00000000;
+ } else {
+ oreg = 0x614380 + (ffs(outp.or) - 1) * 0x800;
+ oval = 0x00000001;
+ hval = 0x00000001;
}
- }
- nv_wr32(priv, 0x610024, 0x00000020);
- nv_wr32(priv, 0x610030, 0x80000000);
+ nv_mask(priv, hreg, 0x0000000f, hval);
+ nv_mask(priv, oreg, 0x00000707, oval);
+ }
}
/* If programming a TMDS output on a SOR that can also be configured for
@@ -1143,7 +1159,7 @@ nv50_disp_intr_unk20(struct nv50_disp_priv *priv, u32 super)
* programmed for DisplayPort.
*/
static void
-nv50_disp_intr_unk40_tmds(struct nv50_disp_priv *priv, struct dcb_output *outp)
+nv50_disp_intr_unk40_0_tmds(struct nv50_disp_priv *priv, struct dcb_output *outp)
{
struct nouveau_bios *bios = nouveau_bios(priv);
const int link = !(outp->sorconf.link & 1);
@@ -1157,35 +1173,79 @@ nv50_disp_intr_unk40_tmds(struct nv50_disp_priv *priv, struct dcb_output *outp)
}
static void
-nv50_disp_intr_unk40(struct nv50_disp_priv *priv, u32 super)
+nv50_disp_intr_unk40_0(struct nv50_disp_priv *priv, int head)
{
- int head = ffs((super & 0x00000180) >> 7) - 1;
- if (head >= 0) {
- struct dcb_output outp;
- u32 pclk = nv_rd32(priv, 0x610ad0 + (head * 0x540)) & 0x3fffff;
- if (pclk && exec_clkcmp(priv, head, 1, pclk, &outp)) {
- if (outp.type == DCB_OUTPUT_TMDS)
- nv50_disp_intr_unk40_tmds(priv, &outp);
+ struct dcb_output outp;
+ u32 pclk = nv_rd32(priv, 0x610ad0 + (head * 0x540)) & 0x3fffff;
+ if (exec_clkcmp(priv, head, 1, pclk, &outp) != ~0) {
+ if (outp.location == 0 && outp.type == DCB_OUTPUT_TMDS)
+ nv50_disp_intr_unk40_0_tmds(priv, &outp);
+ else
+ if (outp.location == 1 && outp.type == DCB_OUTPUT_DP) {
+ u32 soff = (ffs(outp.or) - 1) * 0x08;
+ u32 ctrl = nv_rd32(priv, 0x610b84 + soff);
+ u32 datarate;
+
+ switch ((ctrl & 0x000f0000) >> 16) {
+ case 6: datarate = pclk * 30 / 8; break;
+ case 5: datarate = pclk * 24 / 8; break;
+ case 2:
+ default:
+ datarate = pclk * 18 / 8;
+ break;
+ }
+
+ nouveau_dp_train(&priv->base, priv->pior.dp,
+ &outp, head, datarate);
}
}
-
- nv_wr32(priv, 0x610024, 0x00000040);
- nv_wr32(priv, 0x610030, 0x80000000);
}
-static void
-nv50_disp_intr_super(struct nv50_disp_priv *priv, u32 intr1)
+void
+nv50_disp_intr_supervisor(struct work_struct *work)
{
+ struct nv50_disp_priv *priv =
+ container_of(work, struct nv50_disp_priv, supervisor);
u32 super = nv_rd32(priv, 0x610030);
+ int head;
- nv_debug(priv, "supervisor 0x%08x 0x%08x\n", intr1, super);
+ nv_debug(priv, "supervisor 0x%08x 0x%08x\n", priv->super, super);
- if (intr1 & 0x00000010)
- nv50_disp_intr_unk10(priv, super);
- if (intr1 & 0x00000020)
- nv50_disp_intr_unk20(priv, super);
- if (intr1 & 0x00000040)
- nv50_disp_intr_unk40(priv, super);
+ if (priv->super & 0x00000010) {
+ for (head = 0; head < priv->head.nr; head++) {
+ if (!(super & (0x00000020 << head)))
+ continue;
+ if (!(super & (0x00000080 << head)))
+ continue;
+ nv50_disp_intr_unk10_0(priv, head);
+ }
+ } else
+ if (priv->super & 0x00000020) {
+ for (head = 0; head < priv->head.nr; head++) {
+ if (!(super & (0x00000080 << head)))
+ continue;
+ nv50_disp_intr_unk20_0(priv, head);
+ }
+ for (head = 0; head < priv->head.nr; head++) {
+ if (!(super & (0x00000200 << head)))
+ continue;
+ nv50_disp_intr_unk20_1(priv, head);
+ }
+ for (head = 0; head < priv->head.nr; head++) {
+ if (!(super & (0x00000080 << head)))
+ continue;
+ nv50_disp_intr_unk20_2(priv, head);
+ }
+ } else
+ if (priv->super & 0x00000040) {
+ for (head = 0; head < priv->head.nr; head++) {
+ if (!(super & (0x00000080 << head)))
+ continue;
+ nv50_disp_intr_unk40_0(priv, head);
+ }
+ }
+
+ nv_wr32(priv, 0x610030, 0x80000000);
}
void
@@ -1201,19 +1261,21 @@ nv50_disp_intr(struct nouveau_subdev *subdev)
}
if (intr1 & 0x00000004) {
- nv50_disp_intr_vblank(priv, 0);
+ nouveau_event_trigger(priv->base.vblank, 0);
nv_wr32(priv, 0x610024, 0x00000004);
intr1 &= ~0x00000004;
}
if (intr1 & 0x00000008) {
- nv50_disp_intr_vblank(priv, 1);
+ nouveau_event_trigger(priv->base.vblank, 1);
nv_wr32(priv, 0x610024, 0x00000008);
intr1 &= ~0x00000008;
}
if (intr1 & 0x00000070) {
- nv50_disp_intr_super(priv, intr1);
+ priv->super = (intr1 & 0x00000070);
+ schedule_work(&priv->supervisor);
+ nv_wr32(priv, 0x610024, priv->super);
intr1 &= ~0x00000070;
}
}
@@ -1226,7 +1288,7 @@ nv50_disp_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
struct nv50_disp_priv *priv;
int ret;
- ret = nouveau_disp_create(parent, engine, oclass, "PDISP",
+ ret = nouveau_disp_create(parent, engine, oclass, 2, "PDISP",
"display", &priv);
*pobject = nv_object(priv);
if (ret)
@@ -1235,16 +1297,17 @@ nv50_disp_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
nv_engine(priv)->sclass = nv50_disp_base_oclass;
nv_engine(priv)->cclass = &nv50_disp_cclass;
nv_subdev(priv)->intr = nv50_disp_intr;
+ INIT_WORK(&priv->supervisor, nv50_disp_intr_supervisor);
priv->sclass = nv50_disp_sclass;
priv->head.nr = 2;
priv->dac.nr = 3;
priv->sor.nr = 2;
+ priv->pior.nr = 3;
priv->dac.power = nv50_dac_power;
priv->dac.sense = nv50_dac_sense;
priv->sor.power = nv50_sor_power;
-
- INIT_LIST_HEAD(&priv->base.vblank.list);
- spin_lock_init(&priv->base.vblank.lock);
+ priv->pior.power = nv50_pior_power;
+ priv->pior.dp = &nv50_pior_dp_func;
return 0;
}
diff --git a/drivers/gpu/drm/nouveau/core/engine/disp/nv50.h b/drivers/gpu/drm/nouveau/core/engine/disp/nv50.h
index a6bb931450f1..1ae6ceb56704 100644
--- a/drivers/gpu/drm/nouveau/core/engine/disp/nv50.h
+++ b/drivers/gpu/drm/nouveau/core/engine/disp/nv50.h
@@ -3,16 +3,22 @@
#include <core/parent.h>
#include <core/namedb.h>
+#include <core/engctx.h>
#include <core/ramht.h>
+#include <core/event.h>
#include <engine/dmaobj.h>
#include <engine/disp.h>
-struct dcb_output;
+#include "dport.h"
struct nv50_disp_priv {
struct nouveau_disp base;
struct nouveau_oclass *sclass;
+
+ struct work_struct supervisor;
+ u32 super;
+
struct {
int nr;
} head;
@@ -26,23 +32,15 @@ struct nv50_disp_priv {
int (*power)(struct nv50_disp_priv *, int sor, u32 data);
int (*hda_eld)(struct nv50_disp_priv *, int sor, u8 *, u32);
int (*hdmi)(struct nv50_disp_priv *, int head, int sor, u32);
- int (*dp_train_init)(struct nv50_disp_priv *, int sor, int link,
- int head, u16 type, u16 mask, u32 data,
- struct dcb_output *);
- int (*dp_train_fini)(struct nv50_disp_priv *, int sor, int link,
- int head, u16 type, u16 mask, u32 data,
- struct dcb_output *);
- int (*dp_train)(struct nv50_disp_priv *, int sor, int link,
- u16 type, u16 mask, u32 data,
- struct dcb_output *);
- int (*dp_lnkctl)(struct nv50_disp_priv *, int sor, int link,
- int head, u16 type, u16 mask, u32 data,
- struct dcb_output *);
- int (*dp_drvctl)(struct nv50_disp_priv *, int sor, int link,
- int lane, u16 type, u16 mask, u32 data,
- struct dcb_output *);
u32 lvdsconf;
+ const struct nouveau_dp_func *dp;
} sor;
+ struct {
+ int nr;
+ int (*power)(struct nv50_disp_priv *, int ext, u32 data);
+ u8 type[3];
+ const struct nouveau_dp_func *dp;
+ } pior;
};
#define DAC_MTHD(n) (n), (n) + 0x03
@@ -81,6 +79,11 @@ int nvd0_sor_dp_lnkctl(struct nv50_disp_priv *, int, int, int, u16, u16, u32,
int nvd0_sor_dp_drvctl(struct nv50_disp_priv *, int, int, int, u16, u16, u32,
struct dcb_output *);
+#define PIOR_MTHD(n) (n), (n) + 0x03
+
+int nv50_pior_mthd(struct nouveau_object *, u32, void *, u32);
+int nv50_pior_power(struct nv50_disp_priv *, int, u32);
+
struct nv50_disp_base {
struct nouveau_parent base;
struct nouveau_ramht *ramht;
@@ -124,6 +127,7 @@ extern struct nouveau_ofuncs nv50_disp_oimm_ofuncs;
extern struct nouveau_ofuncs nv50_disp_curs_ofuncs;
extern struct nouveau_ofuncs nv50_disp_base_ofuncs;
extern struct nouveau_oclass nv50_disp_cclass;
+void nv50_disp_intr_supervisor(struct work_struct *);
void nv50_disp_intr(struct nouveau_subdev *);
extern struct nouveau_omthds nv84_disp_base_omthds[];
@@ -137,6 +141,7 @@ extern struct nouveau_ofuncs nvd0_disp_oimm_ofuncs;
extern struct nouveau_ofuncs nvd0_disp_curs_ofuncs;
extern struct nouveau_ofuncs nvd0_disp_base_ofuncs;
extern struct nouveau_oclass nvd0_disp_cclass;
+void nvd0_disp_intr_supervisor(struct work_struct *);
void nvd0_disp_intr(struct nouveau_subdev *);
#endif
diff --git a/drivers/gpu/drm/nouveau/core/engine/disp/nv84.c b/drivers/gpu/drm/nouveau/core/engine/disp/nv84.c
index fc84eacdfbec..d8c74c0883a1 100644
--- a/drivers/gpu/drm/nouveau/core/engine/disp/nv84.c
+++ b/drivers/gpu/drm/nouveau/core/engine/disp/nv84.c
@@ -46,6 +46,9 @@ nv84_disp_base_omthds[] = {
{ SOR_MTHD(NV50_DISP_SOR_LVDS_SCRIPT) , nv50_sor_mthd },
{ DAC_MTHD(NV50_DISP_DAC_PWR) , nv50_dac_mthd },
{ DAC_MTHD(NV50_DISP_DAC_LOAD) , nv50_dac_mthd },
+ { PIOR_MTHD(NV50_DISP_PIOR_PWR) , nv50_pior_mthd },
+ { PIOR_MTHD(NV50_DISP_PIOR_TMDS_PWR) , nv50_pior_mthd },
+ { PIOR_MTHD(NV50_DISP_PIOR_DP_PWR) , nv50_pior_mthd },
{},
};
@@ -63,7 +66,7 @@ nv84_disp_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
struct nv50_disp_priv *priv;
int ret;
- ret = nouveau_disp_create(parent, engine, oclass, "PDISP",
+ ret = nouveau_disp_create(parent, engine, oclass, 2, "PDISP",
"display", &priv);
*pobject = nv_object(priv);
if (ret)
@@ -72,17 +75,18 @@ nv84_disp_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
nv_engine(priv)->sclass = nv84_disp_base_oclass;
nv_engine(priv)->cclass = &nv50_disp_cclass;
nv_subdev(priv)->intr = nv50_disp_intr;
+ INIT_WORK(&priv->supervisor, nv50_disp_intr_supervisor);
priv->sclass = nv84_disp_sclass;
priv->head.nr = 2;
priv->dac.nr = 3;
priv->sor.nr = 2;
+ priv->pior.nr = 3;
priv->dac.power = nv50_dac_power;
priv->dac.sense = nv50_dac_sense;
priv->sor.power = nv50_sor_power;
priv->sor.hdmi = nv84_hdmi_ctrl;
-
- INIT_LIST_HEAD(&priv->base.vblank.list);
- spin_lock_init(&priv->base.vblank.lock);
+ priv->pior.power = nv50_pior_power;
+ priv->pior.dp = &nv50_pior_dp_func;
return 0;
}
diff --git a/drivers/gpu/drm/nouveau/core/engine/disp/nv94.c b/drivers/gpu/drm/nouveau/core/engine/disp/nv94.c
index ba9dfd4669a2..a66f949c1f84 100644
--- a/drivers/gpu/drm/nouveau/core/engine/disp/nv94.c
+++ b/drivers/gpu/drm/nouveau/core/engine/disp/nv94.c
@@ -44,14 +44,11 @@ nv94_disp_base_omthds[] = {
{ SOR_MTHD(NV50_DISP_SOR_PWR) , nv50_sor_mthd },
{ SOR_MTHD(NV84_DISP_SOR_HDMI_PWR) , nv50_sor_mthd },
{ SOR_MTHD(NV50_DISP_SOR_LVDS_SCRIPT) , nv50_sor_mthd },
- { SOR_MTHD(NV94_DISP_SOR_DP_TRAIN) , nv50_sor_mthd },
- { SOR_MTHD(NV94_DISP_SOR_DP_LNKCTL) , nv50_sor_mthd },
- { SOR_MTHD(NV94_DISP_SOR_DP_DRVCTL(0)), nv50_sor_mthd },
- { SOR_MTHD(NV94_DISP_SOR_DP_DRVCTL(1)), nv50_sor_mthd },
- { SOR_MTHD(NV94_DISP_SOR_DP_DRVCTL(2)), nv50_sor_mthd },
- { SOR_MTHD(NV94_DISP_SOR_DP_DRVCTL(3)), nv50_sor_mthd },
{ DAC_MTHD(NV50_DISP_DAC_PWR) , nv50_dac_mthd },
{ DAC_MTHD(NV50_DISP_DAC_LOAD) , nv50_dac_mthd },
+ { PIOR_MTHD(NV50_DISP_PIOR_PWR) , nv50_pior_mthd },
+ { PIOR_MTHD(NV50_DISP_PIOR_TMDS_PWR) , nv50_pior_mthd },
+ { PIOR_MTHD(NV50_DISP_PIOR_DP_PWR) , nv50_pior_mthd },
{},
};
@@ -69,7 +66,7 @@ nv94_disp_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
struct nv50_disp_priv *priv;
int ret;
- ret = nouveau_disp_create(parent, engine, oclass, "PDISP",
+ ret = nouveau_disp_create(parent, engine, oclass, 2, "PDISP",
"display", &priv);
*pobject = nv_object(priv);
if (ret)
@@ -78,22 +75,19 @@ nv94_disp_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
nv_engine(priv)->sclass = nv94_disp_base_oclass;
nv_engine(priv)->cclass = &nv50_disp_cclass;
nv_subdev(priv)->intr = nv50_disp_intr;
+ INIT_WORK(&priv->supervisor, nv50_disp_intr_supervisor);
priv->sclass = nv94_disp_sclass;
priv->head.nr = 2;
priv->dac.nr = 3;
priv->sor.nr = 4;
+ priv->pior.nr = 3;
priv->dac.power = nv50_dac_power;
priv->dac.sense = nv50_dac_sense;
priv->sor.power = nv50_sor_power;
priv->sor.hdmi = nv84_hdmi_ctrl;
- priv->sor.dp_train = nv94_sor_dp_train;
- priv->sor.dp_train_init = nv94_sor_dp_train_init;
- priv->sor.dp_train_fini = nv94_sor_dp_train_fini;
- priv->sor.dp_lnkctl = nv94_sor_dp_lnkctl;
- priv->sor.dp_drvctl = nv94_sor_dp_drvctl;
-
- INIT_LIST_HEAD(&priv->base.vblank.list);
- spin_lock_init(&priv->base.vblank.lock);
+ priv->sor.dp = &nv94_sor_dp_func;
+ priv->pior.power = nv50_pior_power;
+ priv->pior.dp = &nv50_pior_dp_func;
return 0;
}
diff --git a/drivers/gpu/drm/nouveau/core/engine/disp/nva0.c b/drivers/gpu/drm/nouveau/core/engine/disp/nva0.c
index 5d63902cdeda..6cf8eefac368 100644
--- a/drivers/gpu/drm/nouveau/core/engine/disp/nva0.c
+++ b/drivers/gpu/drm/nouveau/core/engine/disp/nva0.c
@@ -53,7 +53,7 @@ nva0_disp_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
struct nv50_disp_priv *priv;
int ret;
- ret = nouveau_disp_create(parent, engine, oclass, "PDISP",
+ ret = nouveau_disp_create(parent, engine, oclass, 2, "PDISP",
"display", &priv);
*pobject = nv_object(priv);
if (ret)
@@ -62,17 +62,18 @@ nva0_disp_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
nv_engine(priv)->sclass = nva0_disp_base_oclass;
nv_engine(priv)->cclass = &nv50_disp_cclass;
nv_subdev(priv)->intr = nv50_disp_intr;
+ INIT_WORK(&priv->supervisor, nv50_disp_intr_supervisor);
priv->sclass = nva0_disp_sclass;
priv->head.nr = 2;
priv->dac.nr = 3;
priv->sor.nr = 2;
+ priv->pior.nr = 3;
priv->dac.power = nv50_dac_power;
priv->dac.sense = nv50_dac_sense;
priv->sor.power = nv50_sor_power;
priv->sor.hdmi = nv84_hdmi_ctrl;
-
- INIT_LIST_HEAD(&priv->base.vblank.list);
- spin_lock_init(&priv->base.vblank.lock);
+ priv->pior.power = nv50_pior_power;
+ priv->pior.dp = &nv50_pior_dp_func;
return 0;
}
diff --git a/drivers/gpu/drm/nouveau/core/engine/disp/nva3.c b/drivers/gpu/drm/nouveau/core/engine/disp/nva3.c
index e9192ca389fa..b75413169eae 100644
--- a/drivers/gpu/drm/nouveau/core/engine/disp/nva3.c
+++ b/drivers/gpu/drm/nouveau/core/engine/disp/nva3.c
@@ -45,14 +45,11 @@ nva3_disp_base_omthds[] = {
{ SOR_MTHD(NVA3_DISP_SOR_HDA_ELD) , nv50_sor_mthd },
{ SOR_MTHD(NV84_DISP_SOR_HDMI_PWR) , nv50_sor_mthd },
{ SOR_MTHD(NV50_DISP_SOR_LVDS_SCRIPT) , nv50_sor_mthd },
- { SOR_MTHD(NV94_DISP_SOR_DP_TRAIN) , nv50_sor_mthd },
- { SOR_MTHD(NV94_DISP_SOR_DP_LNKCTL) , nv50_sor_mthd },
- { SOR_MTHD(NV94_DISP_SOR_DP_DRVCTL(0)), nv50_sor_mthd },
- { SOR_MTHD(NV94_DISP_SOR_DP_DRVCTL(1)), nv50_sor_mthd },
- { SOR_MTHD(NV94_DISP_SOR_DP_DRVCTL(2)), nv50_sor_mthd },
- { SOR_MTHD(NV94_DISP_SOR_DP_DRVCTL(3)), nv50_sor_mthd },
{ DAC_MTHD(NV50_DISP_DAC_PWR) , nv50_dac_mthd },
{ DAC_MTHD(NV50_DISP_DAC_LOAD) , nv50_dac_mthd },
+ { PIOR_MTHD(NV50_DISP_PIOR_PWR) , nv50_pior_mthd },
+ { PIOR_MTHD(NV50_DISP_PIOR_TMDS_PWR) , nv50_pior_mthd },
+ { PIOR_MTHD(NV50_DISP_PIOR_DP_PWR) , nv50_pior_mthd },
{},
};
@@ -70,7 +67,7 @@ nva3_disp_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
struct nv50_disp_priv *priv;
int ret;
- ret = nouveau_disp_create(parent, engine, oclass, "PDISP",
+ ret = nouveau_disp_create(parent, engine, oclass, 2, "PDISP",
"display", &priv);
*pobject = nv_object(priv);
if (ret)
@@ -79,23 +76,20 @@ nva3_disp_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
nv_engine(priv)->sclass = nva3_disp_base_oclass;
nv_engine(priv)->cclass = &nv50_disp_cclass;
nv_subdev(priv)->intr = nv50_disp_intr;
+ INIT_WORK(&priv->supervisor, nv50_disp_intr_supervisor);
priv->sclass = nva3_disp_sclass;
priv->head.nr = 2;
priv->dac.nr = 3;
priv->sor.nr = 4;
+ priv->pior.nr = 3;
priv->dac.power = nv50_dac_power;
priv->dac.sense = nv50_dac_sense;
priv->sor.power = nv50_sor_power;
priv->sor.hda_eld = nva3_hda_eld;
priv->sor.hdmi = nva3_hdmi_ctrl;
- priv->sor.dp_train = nv94_sor_dp_train;
- priv->sor.dp_train_init = nv94_sor_dp_train_init;
- priv->sor.dp_train_fini = nv94_sor_dp_train_fini;
- priv->sor.dp_lnkctl = nv94_sor_dp_lnkctl;
- priv->sor.dp_drvctl = nv94_sor_dp_drvctl;
-
- INIT_LIST_HEAD(&priv->base.vblank.list);
- spin_lock_init(&priv->base.vblank.lock);
+ priv->sor.dp = &nv94_sor_dp_func;
+ priv->pior.power = nv50_pior_power;
+ priv->pior.dp = &nv50_pior_dp_func;
return 0;
}
diff --git a/drivers/gpu/drm/nouveau/core/engine/disp/nvd0.c b/drivers/gpu/drm/nouveau/core/engine/disp/nvd0.c
index 9e38ebff5fb3..788dd34ccb54 100644
--- a/drivers/gpu/drm/nouveau/core/engine/disp/nvd0.c
+++ b/drivers/gpu/drm/nouveau/core/engine/disp/nvd0.c
@@ -27,12 +27,10 @@
#include <core/handle.h>
#include <core/class.h>
-#include <engine/software.h>
#include <engine/disp.h>
#include <subdev/timer.h>
#include <subdev/fb.h>
-#include <subdev/bar.h>
#include <subdev/clock.h>
#include <subdev/bios.h>
@@ -230,7 +228,7 @@ nvd0_disp_sync_ctor(struct nouveau_object *parent,
struct nv50_disp_dmac *dmac;
int ret;
- if (size < sizeof(*data) || args->head >= priv->head.nr)
+ if (size < sizeof(*args) || args->head >= priv->head.nr)
return -EINVAL;
ret = nv50_disp_dmac_create_(parent, engine, oclass, args->pushbuf,
@@ -270,7 +268,7 @@ nvd0_disp_ovly_ctor(struct nouveau_object *parent,
struct nv50_disp_dmac *dmac;
int ret;
- if (size < sizeof(*data) || args->head >= priv->head.nr)
+ if (size < sizeof(*args) || args->head >= priv->head.nr)
return -EINVAL;
ret = nv50_disp_dmac_create_(parent, engine, oclass, args->pushbuf,
@@ -443,6 +441,18 @@ nvd0_disp_curs_ofuncs = {
* Base display object
******************************************************************************/
+static void
+nvd0_disp_base_vblank_enable(struct nouveau_event *event, int head)
+{
+ nv_mask(event->priv, 0x6100c0 + (head * 0x800), 0x00000001, 0x00000001);
+}
+
+static void
+nvd0_disp_base_vblank_disable(struct nouveau_event *event, int head)
+{
+ nv_mask(event->priv, 0x6100c0 + (head * 0x800), 0x00000001, 0x00000000);
+}
+
static int
nvd0_disp_base_ctor(struct nouveau_object *parent,
struct nouveau_object *engine,
@@ -459,6 +469,10 @@ nvd0_disp_base_ctor(struct nouveau_object *parent,
if (ret)
return ret;
+ priv->base.vblank->priv = priv;
+ priv->base.vblank->enable = nvd0_disp_base_vblank_enable;
+ priv->base.vblank->disable = nvd0_disp_base_vblank_disable;
+
return nouveau_ramht_new(parent, parent, 0x1000, 0, &base->ramht);
}
@@ -609,13 +623,24 @@ exec_lookup(struct nv50_disp_priv *priv, int head, int outp, u32 ctrl,
}
static bool
-exec_script(struct nv50_disp_priv *priv, int head, int outp, u32 ctrl, int id)
+exec_script(struct nv50_disp_priv *priv, int head, int id)
{
struct nouveau_bios *bios = nouveau_bios(priv);
struct nvbios_outp info;
struct dcb_output dcb;
u8 ver, hdr, cnt, len;
+ u32 ctrl = 0x00000000;
u16 data;
+ int outp;
+
+ for (outp = 0; !(ctrl & (1 << head)) && outp < 8; outp++) {
+ ctrl = nv_rd32(priv, 0x640180 + (outp * 0x20));
+ if (ctrl & (1 << head))
+ break;
+ }
+
+ if (outp == 8)
+ return false;
data = exec_lookup(priv, head, outp, ctrl, &dcb, &ver, &hdr, &cnt, &len, &info);
if (data) {
@@ -635,21 +660,31 @@ exec_script(struct nv50_disp_priv *priv, int head, int outp, u32 ctrl, int id)
}
static u32
-exec_clkcmp(struct nv50_disp_priv *priv, int head, int outp,
- u32 ctrl, int id, u32 pclk)
+exec_clkcmp(struct nv50_disp_priv *priv, int head, int id,
+ u32 pclk, struct dcb_output *dcb)
{
struct nouveau_bios *bios = nouveau_bios(priv);
struct nvbios_outp info1;
struct nvbios_ocfg info2;
- struct dcb_output dcb;
u8 ver, hdr, cnt, len;
- u16 data, conf;
+ u32 ctrl = 0x00000000;
+ u32 data, conf = ~0;
+ int outp;
- data = exec_lookup(priv, head, outp, ctrl, &dcb, &ver, &hdr, &cnt, &len, &info1);
- if (data == 0x0000)
+ for (outp = 0; !(ctrl & (1 << head)) && outp < 8; outp++) {
+ ctrl = nv_rd32(priv, 0x660180 + (outp * 0x20));
+ if (ctrl & (1 << head))
+ break;
+ }
+
+ if (outp == 8)
return false;
- switch (dcb.type) {
+ data = exec_lookup(priv, head, outp, ctrl, dcb, &ver, &hdr, &cnt, &len, &info1);
+ if (data == 0x0000)
+ return conf;
+
+ switch (dcb->type) {
case DCB_OUTPUT_TMDS:
conf = (ctrl & 0x00000f00) >> 8;
if (pclk >= 165000)
@@ -668,46 +703,52 @@ exec_clkcmp(struct nv50_disp_priv *priv, int head, int outp,
}
data = nvbios_ocfg_match(bios, data, conf, &ver, &hdr, &cnt, &len, &info2);
- if (data) {
+ if (data && id < 0xff) {
data = nvbios_oclk_match(bios, info2.clkcmp[id], pclk);
if (data) {
struct nvbios_init init = {
.subdev = nv_subdev(priv),
.bios = bios,
.offset = data,
- .outp = &dcb,
+ .outp = dcb,
.crtc = head,
.execute = 1,
};
- if (nvbios_exec(&init))
- return 0x0000;
- return conf;
+ nvbios_exec(&init);
}
}
- return 0x0000;
+ return conf;
}
static void
-nvd0_display_unk1_handler(struct nv50_disp_priv *priv, u32 head, u32 mask)
+nvd0_disp_intr_unk1_0(struct nv50_disp_priv *priv, int head)
{
- int i;
+ exec_script(priv, head, 1);
+}
- for (i = 0; mask && i < 8; i++) {
- u32 mcc = nv_rd32(priv, 0x640180 + (i * 0x20));
- if (mcc & (1 << head))
- exec_script(priv, head, i, mcc, 1);
- }
+static void
+nvd0_disp_intr_unk2_0(struct nv50_disp_priv *priv, int head)
+{
+ exec_script(priv, head, 2);
+}
- nv_wr32(priv, 0x6101d4, 0x00000000);
- nv_wr32(priv, 0x6109d4, 0x00000000);
- nv_wr32(priv, 0x6101d0, 0x80000000);
+static void
+nvd0_disp_intr_unk2_1(struct nv50_disp_priv *priv, int head)
+{
+ struct nouveau_clock *clk = nouveau_clock(priv);
+ u32 pclk = nv_rd32(priv, 0x660450 + (head * 0x300)) / 1000;
+ if (pclk)
+ clk->pll_set(clk, PLL_VPLL0 + head, pclk);
+ nv_wr32(priv, 0x612200 + (head * 0x800), 0x00000000);
}
static void
-nvd0_display_unk2_calc_tu(struct nv50_disp_priv *priv, int head, int or)
+nvd0_disp_intr_unk2_2_tu(struct nv50_disp_priv *priv, int head,
+ struct dcb_output *outp)
{
+ const int or = ffs(outp->or) - 1;
const u32 ctrl = nv_rd32(priv, 0x660200 + (or * 0x020));
const u32 conf = nv_rd32(priv, 0x660404 + (head * 0x300));
const u32 pclk = nv_rd32(priv, 0x660450 + (head * 0x300)) / 1000;
@@ -750,105 +791,102 @@ nvd0_display_unk2_calc_tu(struct nv50_disp_priv *priv, int head, int or)
}
static void
-nvd0_display_unk2_handler(struct nv50_disp_priv *priv, u32 head, u32 mask)
+nvd0_disp_intr_unk2_2(struct nv50_disp_priv *priv, int head)
{
- u32 pclk;
- int i;
-
- for (i = 0; mask && i < 8; i++) {
- u32 mcc = nv_rd32(priv, 0x640180 + (i * 0x20));
- if (mcc & (1 << head))
- exec_script(priv, head, i, mcc, 2);
- }
+ struct dcb_output outp;
+ u32 pclk = nv_rd32(priv, 0x660450 + (head * 0x300)) / 1000;
+ u32 conf = exec_clkcmp(priv, head, 0xff, pclk, &outp);
+ if (conf != ~0) {
+ u32 addr, data;
+
+ if (outp.type == DCB_OUTPUT_DP) {
+ u32 sync = nv_rd32(priv, 0x660404 + (head * 0x300));
+ switch ((sync & 0x000003c0) >> 6) {
+ case 6: pclk = pclk * 30 / 8; break;
+ case 5: pclk = pclk * 24 / 8; break;
+ case 2:
+ default:
+ pclk = pclk * 18 / 8;
+ break;
+ }
- pclk = nv_rd32(priv, 0x660450 + (head * 0x300)) / 1000;
- nv_debug(priv, "head %d pclk %d mask 0x%08x\n", head, pclk, mask);
- if (pclk && (mask & 0x00010000)) {
- struct nouveau_clock *clk = nouveau_clock(priv);
- clk->pll_set(clk, PLL_VPLL0 + head, pclk);
- }
+ nouveau_dp_train(&priv->base, priv->sor.dp,
+ &outp, head, pclk);
+ }
- nv_wr32(priv, 0x612200 + (head * 0x800), 0x00000000);
+ exec_clkcmp(priv, head, 0, pclk, &outp);
- for (i = 0; mask && i < 8; i++) {
- u32 mcp = nv_rd32(priv, 0x660180 + (i * 0x20)), cfg;
- if (mcp & (1 << head)) {
- if ((cfg = exec_clkcmp(priv, head, i, mcp, 0, pclk))) {
- u32 addr, mask, data = 0x00000000;
- if (i < 4) {
- addr = 0x612280 + ((i - 0) * 0x800);
- mask = 0xffffffff;
- } else {
- switch (mcp & 0x00000f00) {
- case 0x00000800:
- case 0x00000900:
- nvd0_display_unk2_calc_tu(priv, head, i - 4);
- break;
- default:
- break;
- }
-
- addr = 0x612300 + ((i - 4) * 0x800);
- mask = 0x00000707;
- if (cfg & 0x00000100)
- data = 0x00000101;
- }
- nv_mask(priv, addr, mask, data);
- }
- break;
+ if (outp.type == DCB_OUTPUT_ANALOG) {
+ addr = 0x612280 + (ffs(outp.or) - 1) * 0x800;
+ data = 0x00000000;
+ } else {
+ if (outp.type == DCB_OUTPUT_DP)
+ nvd0_disp_intr_unk2_2_tu(priv, head, &outp);
+ addr = 0x612300 + (ffs(outp.or) - 1) * 0x800;
+ data = (conf & 0x0100) ? 0x00000101 : 0x00000000;
}
- }
- nv_wr32(priv, 0x6101d4, 0x00000000);
- nv_wr32(priv, 0x6109d4, 0x00000000);
- nv_wr32(priv, 0x6101d0, 0x80000000);
+ nv_mask(priv, addr, 0x00000707, data);
+ }
}
static void
-nvd0_display_unk4_handler(struct nv50_disp_priv *priv, u32 head, u32 mask)
+nvd0_disp_intr_unk4_0(struct nv50_disp_priv *priv, int head)
{
- int pclk, i;
-
- pclk = nv_rd32(priv, 0x660450 + (head * 0x300)) / 1000;
+ struct dcb_output outp;
+ u32 pclk = nv_rd32(priv, 0x660450 + (head * 0x300)) / 1000;
+ exec_clkcmp(priv, head, 1, pclk, &outp);
+}
- for (i = 0; mask && i < 8; i++) {
- u32 mcp = nv_rd32(priv, 0x660180 + (i * 0x20));
- if (mcp & (1 << head))
- exec_clkcmp(priv, head, i, mcp, 1, pclk);
+void
+nvd0_disp_intr_supervisor(struct work_struct *work)
+{
+ struct nv50_disp_priv *priv =
+ container_of(work, struct nv50_disp_priv, supervisor);
+ u32 mask[4];
+ int head;
+
+ nv_debug(priv, "supervisor %08x\n", priv->super);
+ for (head = 0; head < priv->head.nr; head++) {
+ mask[head] = nv_rd32(priv, 0x6101d4 + (head * 0x800));
+ nv_debug(priv, "head %d: 0x%08x\n", head, mask[head]);
}
- nv_wr32(priv, 0x6101d4, 0x00000000);
- nv_wr32(priv, 0x6109d4, 0x00000000);
- nv_wr32(priv, 0x6101d0, 0x80000000);
-}
-
-static void
-nvd0_disp_intr_vblank(struct nv50_disp_priv *priv, int crtc)
-{
- struct nouveau_bar *bar = nouveau_bar(priv);
- struct nouveau_disp *disp = &priv->base;
- struct nouveau_software_chan *chan, *temp;
- unsigned long flags;
-
- spin_lock_irqsave(&disp->vblank.lock, flags);
- list_for_each_entry_safe(chan, temp, &disp->vblank.list, vblank.head) {
- if (chan->vblank.crtc != crtc)
- continue;
-
- nv_wr32(priv, 0x001718, 0x80000000 | chan->vblank.channel);
- bar->flush(bar);
- nv_wr32(priv, 0x06000c, upper_32_bits(chan->vblank.offset));
- nv_wr32(priv, 0x060010, lower_32_bits(chan->vblank.offset));
- nv_wr32(priv, 0x060014, chan->vblank.value);
-
- list_del(&chan->vblank.head);
- if (disp->vblank.put)
- disp->vblank.put(disp->vblank.data, crtc);
+ if (priv->super & 0x00000001) {
+ for (head = 0; head < priv->head.nr; head++) {
+ if (!(mask[head] & 0x00001000))
+ continue;
+ nvd0_disp_intr_unk1_0(priv, head);
+ }
+ } else
+ if (priv->super & 0x00000002) {
+ for (head = 0; head < priv->head.nr; head++) {
+ if (!(mask[head] & 0x00001000))
+ continue;
+ nvd0_disp_intr_unk2_0(priv, head);
+ }
+ for (head = 0; head < priv->head.nr; head++) {
+ if (!(mask[head] & 0x00010000))
+ continue;
+ nvd0_disp_intr_unk2_1(priv, head);
+ }
+ for (head = 0; head < priv->head.nr; head++) {
+ if (!(mask[head] & 0x00001000))
+ continue;
+ nvd0_disp_intr_unk2_2(priv, head);
+ }
+ } else
+ if (priv->super & 0x00000004) {
+ for (head = 0; head < priv->head.nr; head++) {
+ if (!(mask[head] & 0x00001000))
+ continue;
+ nvd0_disp_intr_unk4_0(priv, head);
+ }
}
- spin_unlock_irqrestore(&disp->vblank.lock, flags);
- if (disp->vblank.notify)
- disp->vblank.notify(disp->vblank.data, crtc);
+ for (head = 0; head < priv->head.nr; head++)
+ nv_wr32(priv, 0x6101d4 + (head * 0x800), 0x00000000);
+ nv_wr32(priv, 0x6101d0, 0x80000000);
}
void
@@ -884,27 +922,11 @@ nvd0_disp_intr(struct nouveau_subdev *subdev)
if (intr & 0x00100000) {
u32 stat = nv_rd32(priv, 0x6100ac);
- u32 mask = 0, crtc = ~0;
-
- while (!mask && ++crtc < priv->head.nr)
- mask = nv_rd32(priv, 0x6101d4 + (crtc * 0x800));
-
- if (stat & 0x00000001) {
- nv_wr32(priv, 0x6100ac, 0x00000001);
- nvd0_display_unk1_handler(priv, crtc, mask);
- stat &= ~0x00000001;
- }
-
- if (stat & 0x00000002) {
- nv_wr32(priv, 0x6100ac, 0x00000002);
- nvd0_display_unk2_handler(priv, crtc, mask);
- stat &= ~0x00000002;
- }
-
- if (stat & 0x00000004) {
- nv_wr32(priv, 0x6100ac, 0x00000004);
- nvd0_display_unk4_handler(priv, crtc, mask);
- stat &= ~0x00000004;
+ if (stat & 0x00000007) {
+ priv->super = (stat & 0x00000007);
+ schedule_work(&priv->supervisor);
+ nv_wr32(priv, 0x6100ac, priv->super);
+ stat &= ~0x00000007;
}
if (stat) {
@@ -920,7 +942,7 @@ nvd0_disp_intr(struct nouveau_subdev *subdev)
if (mask & intr) {
u32 stat = nv_rd32(priv, 0x6100bc + (i * 0x800));
if (stat & 0x00000001)
- nvd0_disp_intr_vblank(priv, i);
+ nouveau_event_trigger(priv->base.vblank, i);
nv_mask(priv, 0x6100bc + (i * 0x800), 0, 0);
nv_rd32(priv, 0x6100c0 + (i * 0x800));
}
@@ -933,10 +955,11 @@ nvd0_disp_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
struct nouveau_object **pobject)
{
struct nv50_disp_priv *priv;
+ int heads = nv_rd32(parent, 0x022448);
int ret;
- ret = nouveau_disp_create(parent, engine, oclass, "PDISP",
- "display", &priv);
+ ret = nouveau_disp_create(parent, engine, oclass, heads,
+ "PDISP", "display", &priv);
*pobject = nv_object(priv);
if (ret)
return ret;
@@ -944,8 +967,9 @@ nvd0_disp_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
nv_engine(priv)->sclass = nvd0_disp_base_oclass;
nv_engine(priv)->cclass = &nv50_disp_cclass;
nv_subdev(priv)->intr = nvd0_disp_intr;
+ INIT_WORK(&priv->supervisor, nvd0_disp_intr_supervisor);
priv->sclass = nvd0_disp_sclass;
- priv->head.nr = nv_rd32(priv, 0x022448);
+ priv->head.nr = heads;
priv->dac.nr = 3;
priv->sor.nr = 4;
priv->dac.power = nv50_dac_power;
@@ -953,14 +977,7 @@ nvd0_disp_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
priv->sor.power = nv50_sor_power;
priv->sor.hda_eld = nvd0_hda_eld;
priv->sor.hdmi = nvd0_hdmi_ctrl;
- priv->sor.dp_train = nvd0_sor_dp_train;
- priv->sor.dp_train_init = nv94_sor_dp_train_init;
- priv->sor.dp_train_fini = nv94_sor_dp_train_fini;
- priv->sor.dp_lnkctl = nvd0_sor_dp_lnkctl;
- priv->sor.dp_drvctl = nvd0_sor_dp_drvctl;
-
- INIT_LIST_HEAD(&priv->base.vblank.list);
- spin_lock_init(&priv->base.vblank.lock);
+ priv->sor.dp = &nvd0_sor_dp_func;
return 0;
}
diff --git a/drivers/gpu/drm/nouveau/core/engine/disp/nve0.c b/drivers/gpu/drm/nouveau/core/engine/disp/nve0.c
index 259537c4587e..20725b363d58 100644
--- a/drivers/gpu/drm/nouveau/core/engine/disp/nve0.c
+++ b/drivers/gpu/drm/nouveau/core/engine/disp/nve0.c
@@ -51,10 +51,11 @@ nve0_disp_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
struct nouveau_object **pobject)
{
struct nv50_disp_priv *priv;
+ int heads = nv_rd32(parent, 0x022448);
int ret;
- ret = nouveau_disp_create(parent, engine, oclass, "PDISP",
- "display", &priv);
+ ret = nouveau_disp_create(parent, engine, oclass, heads,
+ "PDISP", "display", &priv);
*pobject = nv_object(priv);
if (ret)
return ret;
@@ -62,8 +63,9 @@ nve0_disp_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
nv_engine(priv)->sclass = nve0_disp_base_oclass;
nv_engine(priv)->cclass = &nv50_disp_cclass;
nv_subdev(priv)->intr = nvd0_disp_intr;
+ INIT_WORK(&priv->supervisor, nvd0_disp_intr_supervisor);
priv->sclass = nve0_disp_sclass;
- priv->head.nr = nv_rd32(priv, 0x022448);
+ priv->head.nr = heads;
priv->dac.nr = 3;
priv->sor.nr = 4;
priv->dac.power = nv50_dac_power;
@@ -71,14 +73,7 @@ nve0_disp_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
priv->sor.power = nv50_sor_power;
priv->sor.hda_eld = nvd0_hda_eld;
priv->sor.hdmi = nvd0_hdmi_ctrl;
- priv->sor.dp_train = nvd0_sor_dp_train;
- priv->sor.dp_train_init = nv94_sor_dp_train_init;
- priv->sor.dp_train_fini = nv94_sor_dp_train_fini;
- priv->sor.dp_lnkctl = nvd0_sor_dp_lnkctl;
- priv->sor.dp_drvctl = nvd0_sor_dp_drvctl;
-
- INIT_LIST_HEAD(&priv->base.vblank.list);
- spin_lock_init(&priv->base.vblank.lock);
+ priv->sor.dp = &nvd0_sor_dp_func;
return 0;
}
diff --git a/drivers/gpu/drm/nouveau/core/engine/disp/piornv50.c b/drivers/gpu/drm/nouveau/core/engine/disp/piornv50.c
new file mode 100644
index 000000000000..2c8ce351b52d
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/engine/disp/piornv50.c
@@ -0,0 +1,140 @@
+/*
+ * Copyright 2012 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+
+#include <core/os.h>
+#include <core/class.h>
+
+#include <subdev/bios.h>
+#include <subdev/bios/dcb.h>
+#include <subdev/timer.h>
+#include <subdev/i2c.h>
+
+#include "nv50.h"
+
+/******************************************************************************
+ * DisplayPort
+ *****************************************************************************/
+static struct nouveau_i2c_port *
+nv50_pior_dp_find(struct nouveau_disp *disp, struct dcb_output *outp)
+{
+ struct nouveau_i2c *i2c = nouveau_i2c(disp);
+ return i2c->find_type(i2c, NV_I2C_TYPE_EXTAUX(outp->extdev));
+}
+
+static int
+nv50_pior_dp_pattern(struct nouveau_disp *disp, struct dcb_output *outp,
+ int head, int pattern)
+{
+ struct nouveau_i2c_port *port;
+ int ret = -EINVAL;
+
+ port = nv50_pior_dp_find(disp, outp);
+ if (port) {
+ if (port->func->pattern)
+ ret = port->func->pattern(port, pattern);
+ else
+ ret = 0;
+ }
+
+ return ret;
+}
+
+static int
+nv50_pior_dp_lnk_ctl(struct nouveau_disp *disp, struct dcb_output *outp,
+ int head, int lane_nr, int link_bw, bool enh)
+{
+ struct nouveau_i2c_port *port;
+ int ret = -EINVAL;
+
+ port = nv50_pior_dp_find(disp, outp);
+ if (port && port->func->lnk_ctl)
+ ret = port->func->lnk_ctl(port, lane_nr, link_bw, enh);
+
+ return ret;
+}
+
+static int
+nv50_pior_dp_drv_ctl(struct nouveau_disp *disp, struct dcb_output *outp,
+ int head, int lane, int vsw, int pre)
+{
+ struct nouveau_i2c_port *port;
+ int ret = -EINVAL;
+
+ port = nv50_pior_dp_find(disp, outp);
+ if (port) {
+ if (port->func->drv_ctl)
+ ret = port->func->drv_ctl(port, lane, vsw, pre);
+ else
+ ret = 0;
+ }
+
+ return ret;
+}
+
+const struct nouveau_dp_func
+nv50_pior_dp_func = {
+ .pattern = nv50_pior_dp_pattern,
+ .lnk_ctl = nv50_pior_dp_lnk_ctl,
+ .drv_ctl = nv50_pior_dp_drv_ctl,
+};
+
+/******************************************************************************
+ * General PIOR handling
+ *****************************************************************************/
+int
+nv50_pior_power(struct nv50_disp_priv *priv, int or, u32 data)
+{
+ const u32 stat = data & NV50_DISP_PIOR_PWR_STATE;
+ const u32 soff = (or * 0x800);
+ nv_wait(priv, 0x61e004 + soff, 0x80000000, 0x00000000);
+ nv_mask(priv, 0x61e004 + soff, 0x80000101, 0x80000000 | stat);
+ nv_wait(priv, 0x61e004 + soff, 0x80000000, 0x00000000);
+ return 0;
+}
+
+int
+nv50_pior_mthd(struct nouveau_object *object, u32 mthd, void *args, u32 size)
+{
+ struct nv50_disp_priv *priv = (void *)object->engine;
+ const u8 type = (mthd & NV50_DISP_PIOR_MTHD_TYPE) >> 12;
+ const u8 or = (mthd & NV50_DISP_PIOR_MTHD_OR);
+ u32 *data = args;
+ int ret;
+
+ if (size < sizeof(u32))
+ return -EINVAL;
+
+ mthd &= ~NV50_DISP_PIOR_MTHD_TYPE;
+ mthd &= ~NV50_DISP_PIOR_MTHD_OR;
+ switch (mthd) {
+ case NV50_DISP_PIOR_PWR:
+ ret = priv->pior.power(priv, or, data[0]);
+ priv->pior.type[or] = type;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return ret;
+}
diff --git a/drivers/gpu/drm/nouveau/core/engine/disp/sornv50.c b/drivers/gpu/drm/nouveau/core/engine/disp/sornv50.c
index 39b6b67732d0..ab1e918469a8 100644
--- a/drivers/gpu/drm/nouveau/core/engine/disp/sornv50.c
+++ b/drivers/gpu/drm/nouveau/core/engine/disp/sornv50.c
@@ -79,31 +79,6 @@ nv50_sor_mthd(struct nouveau_object *object, u32 mthd, void *args, u32 size)
priv->sor.lvdsconf = data & NV50_DISP_SOR_LVDS_SCRIPT_ID;
ret = 0;
break;
- case NV94_DISP_SOR_DP_TRAIN:
- switch (data & NV94_DISP_SOR_DP_TRAIN_OP) {
- case NV94_DISP_SOR_DP_TRAIN_OP_PATTERN:
- ret = priv->sor.dp_train(priv, or, link, type, mask, data, &outp);
- break;
- case NV94_DISP_SOR_DP_TRAIN_OP_INIT:
- ret = priv->sor.dp_train_init(priv, or, link, head, type, mask, data, &outp);
- break;
- case NV94_DISP_SOR_DP_TRAIN_OP_FINI:
- ret = priv->sor.dp_train_fini(priv, or, link, head, type, mask, data, &outp);
- break;
- default:
- break;
- }
- break;
- case NV94_DISP_SOR_DP_LNKCTL:
- ret = priv->sor.dp_lnkctl(priv, or, link, head, type, mask, data, &outp);
- break;
- case NV94_DISP_SOR_DP_DRVCTL(0):
- case NV94_DISP_SOR_DP_DRVCTL(1):
- case NV94_DISP_SOR_DP_DRVCTL(2):
- case NV94_DISP_SOR_DP_DRVCTL(3):
- ret = priv->sor.dp_drvctl(priv, or, link, (mthd & 0xc0) >> 6,
- type, mask, data, &outp);
- break;
default:
BUG_ON(1);
}
diff --git a/drivers/gpu/drm/nouveau/core/engine/disp/sornv94.c b/drivers/gpu/drm/nouveau/core/engine/disp/sornv94.c
index f6edd009762e..7ec4ee83fb64 100644
--- a/drivers/gpu/drm/nouveau/core/engine/disp/sornv94.c
+++ b/drivers/gpu/drm/nouveau/core/engine/disp/sornv94.c
@@ -33,124 +33,53 @@
#include "nv50.h"
static inline u32
-nv94_sor_dp_lane_map(struct nv50_disp_priv *priv, u8 lane)
+nv94_sor_soff(struct dcb_output *outp)
{
- static const u8 nvaf[] = { 24, 16, 8, 0 }; /* thanks, apple.. */
- static const u8 nv94[] = { 16, 8, 0, 24 };
- if (nv_device(priv)->chipset == 0xaf)
- return nvaf[lane];
- return nv94[lane];
+ return (ffs(outp->or) - 1) * 0x800;
}
-int
-nv94_sor_dp_train_init(struct nv50_disp_priv *priv, int or, int link, int head,
- u16 type, u16 mask, u32 data, struct dcb_output *dcbo)
+static inline u32
+nv94_sor_loff(struct dcb_output *outp)
{
- struct nouveau_bios *bios = nouveau_bios(priv);
- struct nvbios_dpout info;
- u8 ver, hdr, cnt, len;
- u16 outp;
-
- outp = nvbios_dpout_match(bios, type, mask, &ver, &hdr, &cnt, &len, &info);
- if (outp) {
- struct nvbios_init init = {
- .subdev = nv_subdev(priv),
- .bios = bios,
- .outp = dcbo,
- .crtc = head,
- .execute = 1,
- };
-
- if (data & NV94_DISP_SOR_DP_TRAIN_INIT_SPREAD_ON)
- init.offset = info.script[2];
- else
- init.offset = info.script[3];
- nvbios_exec(&init);
-
- init.offset = info.script[0];
- nvbios_exec(&init);
- }
-
- return 0;
+ return nv94_sor_soff(outp) + !(outp->sorconf.link & 1) * 0x80;
}
-int
-nv94_sor_dp_train_fini(struct nv50_disp_priv *priv, int or, int link, int head,
- u16 type, u16 mask, u32 data, struct dcb_output *dcbo)
+static inline u32
+nv94_sor_dp_lane_map(struct nv50_disp_priv *priv, u8 lane)
{
- struct nouveau_bios *bios = nouveau_bios(priv);
- struct nvbios_dpout info;
- u8 ver, hdr, cnt, len;
- u16 outp;
-
- outp = nvbios_dpout_match(bios, type, mask, &ver, &hdr, &cnt, &len, &info);
- if (outp) {
- struct nvbios_init init = {
- .subdev = nv_subdev(priv),
- .bios = bios,
- .offset = info.script[1],
- .outp = dcbo,
- .crtc = head,
- .execute = 1,
- };
-
- nvbios_exec(&init);
- }
-
- return 0;
+ static const u8 nvaf[] = { 24, 16, 8, 0 }; /* thanks, apple.. */
+ static const u8 nv94[] = { 16, 8, 0, 24 };
+ if (nv_device(priv)->chipset == 0xaf)
+ return nvaf[lane];
+ return nv94[lane];
}
-int
-nv94_sor_dp_train(struct nv50_disp_priv *priv, int or, int link,
- u16 type, u16 mask, u32 data, struct dcb_output *info)
+static int
+nv94_sor_dp_pattern(struct nouveau_disp *disp, struct dcb_output *outp,
+ int head, int pattern)
{
- const u32 loff = (or * 0x800) + (link * 0x80);
- const u32 patt = (data & NV94_DISP_SOR_DP_TRAIN_PATTERN);
- nv_mask(priv, 0x61c10c + loff, 0x0f000000, patt << 24);
+ struct nv50_disp_priv *priv = (void *)disp;
+ const u32 loff = nv94_sor_loff(outp);
+ nv_mask(priv, 0x61c10c + loff, 0x0f000000, pattern << 24);
return 0;
}
-int
-nv94_sor_dp_lnkctl(struct nv50_disp_priv *priv, int or, int link, int head,
- u16 type, u16 mask, u32 data, struct dcb_output *dcbo)
+static int
+nv94_sor_dp_lnk_ctl(struct nouveau_disp *disp, struct dcb_output *outp,
+ int head, int link_nr, int link_bw, bool enh_frame)
{
- struct nouveau_bios *bios = nouveau_bios(priv);
- const u32 loff = (or * 0x800) + (link * 0x80);
- const u32 soff = (or * 0x800);
- u16 link_bw = (data & NV94_DISP_SOR_DP_LNKCTL_WIDTH) >> 8;
- u8 link_nr = (data & NV94_DISP_SOR_DP_LNKCTL_COUNT);
+ struct nv50_disp_priv *priv = (void *)disp;
+ const u32 soff = nv94_sor_soff(outp);
+ const u32 loff = nv94_sor_loff(outp);
u32 dpctrl = 0x00000000;
u32 clksor = 0x00000000;
- u32 outp, lane = 0;
- u8 ver, hdr, cnt, len;
- struct nvbios_dpout info;
+ u32 lane = 0;
int i;
- /* -> 10Khz units */
- link_bw *= 2700;
-
- outp = nvbios_dpout_match(bios, type, mask, &ver, &hdr, &cnt, &len, &info);
- if (outp && info.lnkcmp) {
- struct nvbios_init init = {
- .subdev = nv_subdev(priv),
- .bios = bios,
- .offset = 0x0000,
- .outp = dcbo,
- .crtc = head,
- .execute = 1,
- };
-
- while (link_bw < nv_ro16(bios, info.lnkcmp))
- info.lnkcmp += 4;
- init.offset = nv_ro16(bios, info.lnkcmp + 2);
-
- nvbios_exec(&init);
- }
-
dpctrl |= ((1 << link_nr) - 1) << 16;
- if (data & NV94_DISP_SOR_DP_LNKCTL_FRAME_ENH)
+ if (enh_frame)
dpctrl |= 0x00004000;
- if (link_bw > 16200)
+ if (link_bw > 0x06)
clksor |= 0x00040000;
for (i = 0; i < link_nr; i++)
@@ -162,24 +91,25 @@ nv94_sor_dp_lnkctl(struct nv50_disp_priv *priv, int or, int link, int head,
return 0;
}
-int
-nv94_sor_dp_drvctl(struct nv50_disp_priv *priv, int or, int link, int lane,
- u16 type, u16 mask, u32 data, struct dcb_output *dcbo)
+static int
+nv94_sor_dp_drv_ctl(struct nouveau_disp *disp, struct dcb_output *outp,
+ int head, int lane, int swing, int preem)
{
- struct nouveau_bios *bios = nouveau_bios(priv);
- const u32 loff = (or * 0x800) + (link * 0x80);
- const u8 swing = (data & NV94_DISP_SOR_DP_DRVCTL_VS) >> 8;
- const u8 preem = (data & NV94_DISP_SOR_DP_DRVCTL_PE);
+ struct nouveau_bios *bios = nouveau_bios(disp);
+ struct nv50_disp_priv *priv = (void *)disp;
+ const u32 loff = nv94_sor_loff(outp);
u32 addr, shift = nv94_sor_dp_lane_map(priv, lane);
u8 ver, hdr, cnt, len;
- struct nvbios_dpout outp;
+ struct nvbios_dpout info;
struct nvbios_dpcfg ocfg;
- addr = nvbios_dpout_match(bios, type, mask, &ver, &hdr, &cnt, &len, &outp);
+ addr = nvbios_dpout_match(bios, outp->hasht, outp->hashm,
+ &ver, &hdr, &cnt, &len, &info);
if (!addr)
return -ENODEV;
- addr = nvbios_dpcfg_match(bios, addr, 0, swing, preem, &ver, &hdr, &cnt, &len, &ocfg);
+ addr = nvbios_dpcfg_match(bios, addr, 0, swing, preem,
+ &ver, &hdr, &cnt, &len, &ocfg);
if (!addr)
return -EINVAL;
@@ -188,3 +118,10 @@ nv94_sor_dp_drvctl(struct nv50_disp_priv *priv, int or, int link, int lane,
nv_mask(priv, 0x61c130 + loff, 0x0000ff00, ocfg.unk << 8);
return 0;
}
+
+const struct nouveau_dp_func
+nv94_sor_dp_func = {
+ .pattern = nv94_sor_dp_pattern,
+ .lnk_ctl = nv94_sor_dp_lnk_ctl,
+ .drv_ctl = nv94_sor_dp_drv_ctl,
+};
diff --git a/drivers/gpu/drm/nouveau/core/engine/disp/sornvd0.c b/drivers/gpu/drm/nouveau/core/engine/disp/sornvd0.c
index c37ce7e29f5d..9e1d435d7282 100644
--- a/drivers/gpu/drm/nouveau/core/engine/disp/sornvd0.c
+++ b/drivers/gpu/drm/nouveau/core/engine/disp/sornvd0.c
@@ -33,59 +33,49 @@
#include "nv50.h"
static inline u32
+nvd0_sor_soff(struct dcb_output *outp)
+{
+ return (ffs(outp->or) - 1) * 0x800;
+}
+
+static inline u32
+nvd0_sor_loff(struct dcb_output *outp)
+{
+ return nvd0_sor_soff(outp) + !(outp->sorconf.link & 1) * 0x80;
+}
+
+static inline u32
nvd0_sor_dp_lane_map(struct nv50_disp_priv *priv, u8 lane)
{
static const u8 nvd0[] = { 16, 8, 0, 24 };
return nvd0[lane];
}
-int
-nvd0_sor_dp_train(struct nv50_disp_priv *priv, int or, int link,
- u16 type, u16 mask, u32 data, struct dcb_output *info)
+static int
+nvd0_sor_dp_pattern(struct nouveau_disp *disp, struct dcb_output *outp,
+ int head, int pattern)
{
- const u32 loff = (or * 0x800) + (link * 0x80);
- const u32 patt = (data & NV94_DISP_SOR_DP_TRAIN_PATTERN);
- nv_mask(priv, 0x61c110 + loff, 0x0f0f0f0f, 0x01010101 * patt);
+ struct nv50_disp_priv *priv = (void *)disp;
+ const u32 loff = nvd0_sor_loff(outp);
+ nv_mask(priv, 0x61c110 + loff, 0x0f0f0f0f, 0x01010101 * pattern);
return 0;
}
-int
-nvd0_sor_dp_lnkctl(struct nv50_disp_priv *priv, int or, int link, int head,
- u16 type, u16 mask, u32 data, struct dcb_output *dcbo)
+static int
+nvd0_sor_dp_lnk_ctl(struct nouveau_disp *disp, struct dcb_output *outp,
+ int head, int link_nr, int link_bw, bool enh_frame)
{
- struct nouveau_bios *bios = nouveau_bios(priv);
- const u32 loff = (or * 0x800) + (link * 0x80);
- const u32 soff = (or * 0x800);
- const u8 link_bw = (data & NV94_DISP_SOR_DP_LNKCTL_WIDTH) >> 8;
- const u8 link_nr = (data & NV94_DISP_SOR_DP_LNKCTL_COUNT);
+ struct nv50_disp_priv *priv = (void *)disp;
+ const u32 soff = nvd0_sor_soff(outp);
+ const u32 loff = nvd0_sor_loff(outp);
u32 dpctrl = 0x00000000;
u32 clksor = 0x00000000;
- u32 outp, lane = 0;
- u8 ver, hdr, cnt, len;
- struct nvbios_dpout info;
+ u32 lane = 0;
int i;
- outp = nvbios_dpout_match(bios, type, mask, &ver, &hdr, &cnt, &len, &info);
- if (outp && info.lnkcmp) {
- struct nvbios_init init = {
- .subdev = nv_subdev(priv),
- .bios = bios,
- .offset = 0x0000,
- .outp = dcbo,
- .crtc = head,
- .execute = 1,
- };
-
- while (nv_ro08(bios, info.lnkcmp) < link_bw)
- info.lnkcmp += 3;
- init.offset = nv_ro16(bios, info.lnkcmp + 1);
-
- nvbios_exec(&init);
- }
-
clksor |= link_bw << 18;
dpctrl |= ((1 << link_nr) - 1) << 16;
- if (data & NV94_DISP_SOR_DP_LNKCTL_FRAME_ENH)
+ if (enh_frame)
dpctrl |= 0x00004000;
for (i = 0; i < link_nr; i++)
@@ -97,24 +87,25 @@ nvd0_sor_dp_lnkctl(struct nv50_disp_priv *priv, int or, int link, int head,
return 0;
}
-int
-nvd0_sor_dp_drvctl(struct nv50_disp_priv *priv, int or, int link, int lane,
- u16 type, u16 mask, u32 data, struct dcb_output *dcbo)
+static int
+nvd0_sor_dp_drv_ctl(struct nouveau_disp *disp, struct dcb_output *outp,
+ int head, int lane, int swing, int preem)
{
- struct nouveau_bios *bios = nouveau_bios(priv);
- const u32 loff = (or * 0x800) + (link * 0x80);
- const u8 swing = (data & NV94_DISP_SOR_DP_DRVCTL_VS) >> 8;
- const u8 preem = (data & NV94_DISP_SOR_DP_DRVCTL_PE);
+ struct nouveau_bios *bios = nouveau_bios(disp);
+ struct nv50_disp_priv *priv = (void *)disp;
+ const u32 loff = nvd0_sor_loff(outp);
u32 addr, shift = nvd0_sor_dp_lane_map(priv, lane);
u8 ver, hdr, cnt, len;
- struct nvbios_dpout outp;
+ struct nvbios_dpout info;
struct nvbios_dpcfg ocfg;
- addr = nvbios_dpout_match(bios, type, mask, &ver, &hdr, &cnt, &len, &outp);
+ addr = nvbios_dpout_match(bios, outp->hasht, outp->hashm,
+ &ver, &hdr, &cnt, &len, &info);
if (!addr)
return -ENODEV;
- addr = nvbios_dpcfg_match(bios, addr, 0, swing, preem, &ver, &hdr, &cnt, &len, &ocfg);
+ addr = nvbios_dpcfg_match(bios, addr, 0, swing, preem,
+ &ver, &hdr, &cnt, &len, &ocfg);
if (!addr)
return -EINVAL;
@@ -124,3 +115,10 @@ nvd0_sor_dp_drvctl(struct nv50_disp_priv *priv, int or, int link, int lane,
nv_mask(priv, 0x61c13c + loff, 0x00000000, 0x00000000);
return 0;
}
+
+const struct nouveau_dp_func
+nvd0_sor_dp_func = {
+ .pattern = nvd0_sor_dp_pattern,
+ .lnk_ctl = nvd0_sor_dp_lnk_ctl,
+ .drv_ctl = nvd0_sor_dp_drv_ctl,
+};
diff --git a/drivers/gpu/drm/nouveau/core/engine/fifo/base.c b/drivers/gpu/drm/nouveau/core/engine/fifo/base.c
index c2b9db335816..7341ebe131fa 100644
--- a/drivers/gpu/drm/nouveau/core/engine/fifo/base.c
+++ b/drivers/gpu/drm/nouveau/core/engine/fifo/base.c
@@ -22,8 +22,10 @@
* Authors: Ben Skeggs
*/
+#include <core/client.h>
#include <core/object.h>
#include <core/handle.h>
+#include <core/event.h>
#include <core/class.h>
#include <engine/dmaobj.h>
@@ -146,10 +148,25 @@ nouveau_fifo_chid(struct nouveau_fifo *priv, struct nouveau_object *object)
return -1;
}
+const char *
+nouveau_client_name_for_fifo_chid(struct nouveau_fifo *fifo, u32 chid)
+{
+ struct nouveau_fifo_chan *chan = NULL;
+ unsigned long flags;
+
+ spin_lock_irqsave(&fifo->lock, flags);
+ if (chid >= fifo->min && chid <= fifo->max)
+ chan = (void *)fifo->channel[chid];
+ spin_unlock_irqrestore(&fifo->lock, flags);
+
+ return nouveau_client_name(chan);
+}
+
void
nouveau_fifo_destroy(struct nouveau_fifo *priv)
{
kfree(priv->channel);
+ nouveau_event_destroy(&priv->uevent);
nouveau_engine_destroy(&priv->base);
}
@@ -174,6 +191,10 @@ nouveau_fifo_create_(struct nouveau_object *parent,
if (!priv->channel)
return -ENOMEM;
+ ret = nouveau_event_create(1, &priv->uevent);
+ if (ret)
+ return ret;
+
priv->chid = nouveau_fifo_chid;
spin_lock_init(&priv->lock);
return 0;
diff --git a/drivers/gpu/drm/nouveau/core/engine/fifo/nv04.c b/drivers/gpu/drm/nouveau/core/engine/fifo/nv04.c
index a47a8548f9e0..f877bd524a92 100644
--- a/drivers/gpu/drm/nouveau/core/engine/fifo/nv04.c
+++ b/drivers/gpu/drm/nouveau/core/engine/fifo/nv04.c
@@ -28,6 +28,7 @@
#include <core/namedb.h>
#include <core/handle.h>
#include <core/ramht.h>
+#include <core/event.h>
#include <subdev/instmem.h>
#include <subdev/instmem/nv04.h>
@@ -398,6 +399,98 @@ out:
return handled;
}
+static void
+nv04_fifo_cache_error(struct nouveau_device *device,
+ struct nv04_fifo_priv *priv, u32 chid, u32 get)
+{
+ u32 mthd, data;
+ int ptr;
+
+ /* NV_PFIFO_CACHE1_GET actually goes to 0xffc before wrapping on my
+ * G80 chips, but CACHE1 isn't big enough for this much data.. Tests
+ * show that it wraps around to the start at GET=0x800.. No clue as to
+ * why..
+ */
+ ptr = (get & 0x7ff) >> 2;
+
+ if (device->card_type < NV_40) {
+ mthd = nv_rd32(priv, NV04_PFIFO_CACHE1_METHOD(ptr));
+ data = nv_rd32(priv, NV04_PFIFO_CACHE1_DATA(ptr));
+ } else {
+ mthd = nv_rd32(priv, NV40_PFIFO_CACHE1_METHOD(ptr));
+ data = nv_rd32(priv, NV40_PFIFO_CACHE1_DATA(ptr));
+ }
+
+ if (!nv04_fifo_swmthd(priv, chid, mthd, data)) {
+ const char *client_name =
+ nouveau_client_name_for_fifo_chid(&priv->base, chid);
+ nv_error(priv,
+ "CACHE_ERROR - ch %d [%s] subc %d mthd 0x%04x data 0x%08x\n",
+ chid, client_name, (mthd >> 13) & 7, mthd & 0x1ffc,
+ data);
+ }
+
+ nv_wr32(priv, NV04_PFIFO_CACHE1_DMA_PUSH, 0);
+ nv_wr32(priv, NV03_PFIFO_INTR_0, NV_PFIFO_INTR_CACHE_ERROR);
+
+ nv_wr32(priv, NV03_PFIFO_CACHE1_PUSH0,
+ nv_rd32(priv, NV03_PFIFO_CACHE1_PUSH0) & ~1);
+ nv_wr32(priv, NV03_PFIFO_CACHE1_GET, get + 4);
+ nv_wr32(priv, NV03_PFIFO_CACHE1_PUSH0,
+ nv_rd32(priv, NV03_PFIFO_CACHE1_PUSH0) | 1);
+ nv_wr32(priv, NV04_PFIFO_CACHE1_HASH, 0);
+
+ nv_wr32(priv, NV04_PFIFO_CACHE1_DMA_PUSH,
+ nv_rd32(priv, NV04_PFIFO_CACHE1_DMA_PUSH) | 1);
+ nv_wr32(priv, NV04_PFIFO_CACHE1_PULL0, 1);
+}
+
+static void
+nv04_fifo_dma_pusher(struct nouveau_device *device, struct nv04_fifo_priv *priv,
+ u32 chid)
+{
+ const char *client_name;
+ u32 dma_get = nv_rd32(priv, 0x003244);
+ u32 dma_put = nv_rd32(priv, 0x003240);
+ u32 push = nv_rd32(priv, 0x003220);
+ u32 state = nv_rd32(priv, 0x003228);
+
+ client_name = nouveau_client_name_for_fifo_chid(&priv->base, chid);
+
+ if (device->card_type == NV_50) {
+ u32 ho_get = nv_rd32(priv, 0x003328);
+ u32 ho_put = nv_rd32(priv, 0x003320);
+ u32 ib_get = nv_rd32(priv, 0x003334);
+ u32 ib_put = nv_rd32(priv, 0x003330);
+
+ nv_error(priv,
+ "DMA_PUSHER - ch %d [%s] get 0x%02x%08x put 0x%02x%08x ib_get 0x%08x ib_put 0x%08x state 0x%08x (err: %s) push 0x%08x\n",
+ chid, client_name, ho_get, dma_get, ho_put, dma_put,
+ ib_get, ib_put, state, nv_dma_state_err(state), push);
+
+ /* METHOD_COUNT, in DMA_STATE on earlier chipsets */
+ nv_wr32(priv, 0x003364, 0x00000000);
+ if (dma_get != dma_put || ho_get != ho_put) {
+ nv_wr32(priv, 0x003244, dma_put);
+ nv_wr32(priv, 0x003328, ho_put);
+ } else
+ if (ib_get != ib_put)
+ nv_wr32(priv, 0x003334, ib_put);
+ } else {
+ nv_error(priv,
+ "DMA_PUSHER - ch %d [%s] get 0x%08x put 0x%08x state 0x%08x (err: %s) push 0x%08x\n",
+ chid, client_name, dma_get, dma_put, state,
+ nv_dma_state_err(state), push);
+
+ if (dma_get != dma_put)
+ nv_wr32(priv, 0x003244, dma_put);
+ }
+
+ nv_wr32(priv, 0x003228, 0x00000000);
+ nv_wr32(priv, 0x003220, 0x00000001);
+ nv_wr32(priv, 0x002100, NV_PFIFO_INTR_DMA_PUSHER);
+}
+
void
nv04_fifo_intr(struct nouveau_subdev *subdev)
{
@@ -416,96 +509,12 @@ nv04_fifo_intr(struct nouveau_subdev *subdev)
get = nv_rd32(priv, NV03_PFIFO_CACHE1_GET);
if (status & NV_PFIFO_INTR_CACHE_ERROR) {
- uint32_t mthd, data;
- int ptr;
-
- /* NV_PFIFO_CACHE1_GET actually goes to 0xffc before
- * wrapping on my G80 chips, but CACHE1 isn't big
- * enough for this much data.. Tests show that it
- * wraps around to the start at GET=0x800.. No clue
- * as to why..
- */
- ptr = (get & 0x7ff) >> 2;
-
- if (device->card_type < NV_40) {
- mthd = nv_rd32(priv,
- NV04_PFIFO_CACHE1_METHOD(ptr));
- data = nv_rd32(priv,
- NV04_PFIFO_CACHE1_DATA(ptr));
- } else {
- mthd = nv_rd32(priv,
- NV40_PFIFO_CACHE1_METHOD(ptr));
- data = nv_rd32(priv,
- NV40_PFIFO_CACHE1_DATA(ptr));
- }
-
- if (!nv04_fifo_swmthd(priv, chid, mthd, data)) {
- nv_error(priv, "CACHE_ERROR - Ch %d/%d "
- "Mthd 0x%04x Data 0x%08x\n",
- chid, (mthd >> 13) & 7, mthd & 0x1ffc,
- data);
- }
-
- nv_wr32(priv, NV04_PFIFO_CACHE1_DMA_PUSH, 0);
- nv_wr32(priv, NV03_PFIFO_INTR_0,
- NV_PFIFO_INTR_CACHE_ERROR);
-
- nv_wr32(priv, NV03_PFIFO_CACHE1_PUSH0,
- nv_rd32(priv, NV03_PFIFO_CACHE1_PUSH0) & ~1);
- nv_wr32(priv, NV03_PFIFO_CACHE1_GET, get + 4);
- nv_wr32(priv, NV03_PFIFO_CACHE1_PUSH0,
- nv_rd32(priv, NV03_PFIFO_CACHE1_PUSH0) | 1);
- nv_wr32(priv, NV04_PFIFO_CACHE1_HASH, 0);
-
- nv_wr32(priv, NV04_PFIFO_CACHE1_DMA_PUSH,
- nv_rd32(priv, NV04_PFIFO_CACHE1_DMA_PUSH) | 1);
- nv_wr32(priv, NV04_PFIFO_CACHE1_PULL0, 1);
-
+ nv04_fifo_cache_error(device, priv, chid, get);
status &= ~NV_PFIFO_INTR_CACHE_ERROR;
}
if (status & NV_PFIFO_INTR_DMA_PUSHER) {
- u32 dma_get = nv_rd32(priv, 0x003244);
- u32 dma_put = nv_rd32(priv, 0x003240);
- u32 push = nv_rd32(priv, 0x003220);
- u32 state = nv_rd32(priv, 0x003228);
-
- if (device->card_type == NV_50) {
- u32 ho_get = nv_rd32(priv, 0x003328);
- u32 ho_put = nv_rd32(priv, 0x003320);
- u32 ib_get = nv_rd32(priv, 0x003334);
- u32 ib_put = nv_rd32(priv, 0x003330);
-
- nv_error(priv, "DMA_PUSHER - Ch %d Get 0x%02x%08x "
- "Put 0x%02x%08x IbGet 0x%08x IbPut 0x%08x "
- "State 0x%08x (err: %s) Push 0x%08x\n",
- chid, ho_get, dma_get, ho_put,
- dma_put, ib_get, ib_put, state,
- nv_dma_state_err(state),
- push);
-
- /* METHOD_COUNT, in DMA_STATE on earlier chipsets */
- nv_wr32(priv, 0x003364, 0x00000000);
- if (dma_get != dma_put || ho_get != ho_put) {
- nv_wr32(priv, 0x003244, dma_put);
- nv_wr32(priv, 0x003328, ho_put);
- } else
- if (ib_get != ib_put) {
- nv_wr32(priv, 0x003334, ib_put);
- }
- } else {
- nv_error(priv, "DMA_PUSHER - Ch %d Get 0x%08x "
- "Put 0x%08x State 0x%08x (err: %s) Push 0x%08x\n",
- chid, dma_get, dma_put, state,
- nv_dma_state_err(state), push);
-
- if (dma_get != dma_put)
- nv_wr32(priv, 0x003244, dma_put);
- }
-
- nv_wr32(priv, 0x003228, 0x00000000);
- nv_wr32(priv, 0x003220, 0x00000001);
- nv_wr32(priv, 0x002100, NV_PFIFO_INTR_DMA_PUSHER);
+ nv04_fifo_dma_pusher(device, priv, chid);
status &= ~NV_PFIFO_INTR_DMA_PUSHER;
}
@@ -528,6 +537,12 @@ nv04_fifo_intr(struct nouveau_subdev *subdev)
status &= ~0x00000010;
nv_wr32(priv, 0x002100, 0x00000010);
}
+
+ if (status & 0x40000000) {
+ nouveau_event_trigger(priv->base.uevent, 0);
+ nv_wr32(priv, 0x002100, 0x40000000);
+ status &= ~0x40000000;
+ }
}
if (status) {
diff --git a/drivers/gpu/drm/nouveau/core/engine/fifo/nv50.c b/drivers/gpu/drm/nouveau/core/engine/fifo/nv50.c
index bd096364f680..840af6172788 100644
--- a/drivers/gpu/drm/nouveau/core/engine/fifo/nv50.c
+++ b/drivers/gpu/drm/nouveau/core/engine/fifo/nv50.c
@@ -129,7 +129,8 @@ nv50_fifo_context_detach(struct nouveau_object *parent, bool suspend,
/* do the kickoff... */
nv_wr32(priv, 0x0032fc, nv_gpuobj(base)->addr >> 12);
if (!nv_wait_ne(priv, 0x0032fc, 0xffffffff, 0xffffffff)) {
- nv_error(priv, "channel %d unload timeout\n", chan->base.chid);
+ nv_error(priv, "channel %d [%s] unload timeout\n",
+ chan->base.chid, nouveau_client_name(chan));
if (suspend)
ret = -EBUSY;
}
@@ -480,7 +481,7 @@ nv50_fifo_init(struct nouveau_object *object)
nv_wr32(priv, 0x002044, 0x01003fff);
nv_wr32(priv, 0x002100, 0xffffffff);
- nv_wr32(priv, 0x002140, 0xffffffff);
+ nv_wr32(priv, 0x002140, 0xbfffffff);
for (i = 0; i < 128; i++)
nv_wr32(priv, 0x002600 + (i * 4), 0x00000000);
diff --git a/drivers/gpu/drm/nouveau/core/engine/fifo/nv84.c b/drivers/gpu/drm/nouveau/core/engine/fifo/nv84.c
index 1eb1c512f503..094000e87871 100644
--- a/drivers/gpu/drm/nouveau/core/engine/fifo/nv84.c
+++ b/drivers/gpu/drm/nouveau/core/engine/fifo/nv84.c
@@ -26,6 +26,7 @@
#include <core/client.h>
#include <core/engctx.h>
#include <core/ramht.h>
+#include <core/event.h>
#include <core/class.h>
#include <core/math.h>
@@ -100,7 +101,8 @@ nv84_fifo_context_detach(struct nouveau_object *parent, bool suspend,
done = nv_wait_ne(priv, 0x0032fc, 0xffffffff, 0xffffffff);
nv_wr32(priv, 0x002520, save);
if (!done) {
- nv_error(priv, "channel %d unload timeout\n", chan->base.chid);
+ nv_error(priv, "channel %d [%s] unload timeout\n",
+ chan->base.chid, nouveau_client_name(chan));
if (suspend)
return -EBUSY;
}
@@ -378,6 +380,20 @@ nv84_fifo_cclass = {
* PFIFO engine
******************************************************************************/
+static void
+nv84_fifo_uevent_enable(struct nouveau_event *event, int index)
+{
+ struct nv84_fifo_priv *priv = event->priv;
+ nv_mask(priv, 0x002140, 0x40000000, 0x40000000);
+}
+
+static void
+nv84_fifo_uevent_disable(struct nouveau_event *event, int index)
+{
+ struct nv84_fifo_priv *priv = event->priv;
+ nv_mask(priv, 0x002140, 0x40000000, 0x00000000);
+}
+
static int
nv84_fifo_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
struct nouveau_oclass *oclass, void *data, u32 size,
@@ -401,6 +417,10 @@ nv84_fifo_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
if (ret)
return ret;
+ priv->base.uevent->enable = nv84_fifo_uevent_enable;
+ priv->base.uevent->disable = nv84_fifo_uevent_disable;
+ priv->base.uevent->priv = priv;
+
nv_subdev(priv)->unit = 0x00000100;
nv_subdev(priv)->intr = nv04_fifo_intr;
nv_engine(priv)->cclass = &nv84_fifo_cclass;
diff --git a/drivers/gpu/drm/nouveau/core/engine/fifo/nvc0.c b/drivers/gpu/drm/nouveau/core/engine/fifo/nvc0.c
index b4365dde1859..4f226afb5591 100644
--- a/drivers/gpu/drm/nouveau/core/engine/fifo/nvc0.c
+++ b/drivers/gpu/drm/nouveau/core/engine/fifo/nvc0.c
@@ -27,6 +27,7 @@
#include <core/namedb.h>
#include <core/gpuobj.h>
#include <core/engctx.h>
+#include <core/event.h>
#include <core/class.h>
#include <core/math.h>
#include <core/enum.h>
@@ -149,7 +150,8 @@ nvc0_fifo_context_detach(struct nouveau_object *parent, bool suspend,
nv_wr32(priv, 0x002634, chan->base.chid);
if (!nv_wait(priv, 0x002634, 0xffffffff, chan->base.chid)) {
- nv_error(priv, "channel %d kick timeout\n", chan->base.chid);
+ nv_error(priv, "channel %d [%s] kick timeout\n",
+ chan->base.chid, nouveau_client_name(chan));
if (suspend)
return -EBUSY;
}
@@ -333,17 +335,17 @@ nvc0_fifo_cclass = {
******************************************************************************/
static const struct nouveau_enum nvc0_fifo_fault_unit[] = {
- { 0x00, "PGRAPH" },
+ { 0x00, "PGRAPH", NULL, NVDEV_ENGINE_GR },
{ 0x03, "PEEPHOLE" },
{ 0x04, "BAR1" },
{ 0x05, "BAR3" },
- { 0x07, "PFIFO" },
- { 0x10, "PBSP" },
- { 0x11, "PPPP" },
+ { 0x07, "PFIFO", NULL, NVDEV_ENGINE_FIFO },
+ { 0x10, "PBSP", NULL, NVDEV_ENGINE_BSP },
+ { 0x11, "PPPP", NULL, NVDEV_ENGINE_PPP },
{ 0x13, "PCOUNTER" },
- { 0x14, "PVP" },
- { 0x15, "PCOPY0" },
- { 0x16, "PCOPY1" },
+ { 0x14, "PVP", NULL, NVDEV_ENGINE_VP },
+ { 0x15, "PCOPY0", NULL, NVDEV_ENGINE_COPY0 },
+ { 0x16, "PCOPY1", NULL, NVDEV_ENGINE_COPY1 },
{ 0x17, "PDAEMON" },
{}
};
@@ -402,6 +404,9 @@ nvc0_fifo_isr_vm_fault(struct nvc0_fifo_priv *priv, int unit)
u32 vahi = nv_rd32(priv, 0x002808 + (unit * 0x10));
u32 stat = nv_rd32(priv, 0x00280c + (unit * 0x10));
u32 client = (stat & 0x00001f00) >> 8;
+ const struct nouveau_enum *en;
+ struct nouveau_engine *engine;
+ struct nouveau_object *engctx = NULL;
switch (unit) {
case 3: /* PEEPHOLE */
@@ -420,16 +425,26 @@ nvc0_fifo_isr_vm_fault(struct nvc0_fifo_priv *priv, int unit)
nv_error(priv, "%s fault at 0x%010llx [", (stat & 0x00000080) ?
"write" : "read", (u64)vahi << 32 | valo);
nouveau_enum_print(nvc0_fifo_fault_reason, stat & 0x0000000f);
- printk("] from ");
- nouveau_enum_print(nvc0_fifo_fault_unit, unit);
+ pr_cont("] from ");
+ en = nouveau_enum_print(nvc0_fifo_fault_unit, unit);
if (stat & 0x00000040) {
- printk("/");
+ pr_cont("/");
nouveau_enum_print(nvc0_fifo_fault_hubclient, client);
} else {
- printk("/GPC%d/", (stat & 0x1f000000) >> 24);
+ pr_cont("/GPC%d/", (stat & 0x1f000000) >> 24);
nouveau_enum_print(nvc0_fifo_fault_gpcclient, client);
}
- printk(" on channel 0x%010llx\n", (u64)inst << 12);
+
+ if (en && en->data2) {
+ engine = nouveau_engine(priv, en->data2);
+ if (engine)
+ engctx = nouveau_engctx_get(engine, inst);
+
+ }
+ pr_cont(" on channel 0x%010llx [%s]\n", (u64)inst << 12,
+ nouveau_client_name(engctx));
+
+ nouveau_engctx_put(engctx);
}
static int
@@ -484,10 +499,12 @@ nvc0_fifo_isr_subfifo_intr(struct nvc0_fifo_priv *priv, int unit)
if (show) {
nv_error(priv, "SUBFIFO%d:", unit);
nouveau_bitfield_print(nvc0_fifo_subfifo_intr, show);
- printk("\n");
- nv_error(priv, "SUBFIFO%d: ch %d subc %d mthd 0x%04x "
- "data 0x%08x\n",
- unit, chid, subc, mthd, data);
+ pr_cont("\n");
+ nv_error(priv,
+ "SUBFIFO%d: ch %d [%s] subc %d mthd 0x%04x data 0x%08x\n",
+ unit, chid,
+ nouveau_client_name_for_fifo_chid(&priv->base, chid),
+ subc, mthd, data);
}
nv_wr32(priv, 0x0400c0 + (unit * 0x2000), 0x80600008);
@@ -501,12 +518,34 @@ nvc0_fifo_intr(struct nouveau_subdev *subdev)
u32 mask = nv_rd32(priv, 0x002140);
u32 stat = nv_rd32(priv, 0x002100) & mask;
+ if (stat & 0x00000001) {
+ u32 intr = nv_rd32(priv, 0x00252c);
+ nv_warn(priv, "INTR 0x00000001: 0x%08x\n", intr);
+ nv_wr32(priv, 0x002100, 0x00000001);
+ stat &= ~0x00000001;
+ }
+
if (stat & 0x00000100) {
- nv_warn(priv, "unknown status 0x00000100\n");
+ u32 intr = nv_rd32(priv, 0x00254c);
+ nv_warn(priv, "INTR 0x00000100: 0x%08x\n", intr);
nv_wr32(priv, 0x002100, 0x00000100);
stat &= ~0x00000100;
}
+ if (stat & 0x00010000) {
+ u32 intr = nv_rd32(priv, 0x00256c);
+ nv_warn(priv, "INTR 0x00010000: 0x%08x\n", intr);
+ nv_wr32(priv, 0x002100, 0x00010000);
+ stat &= ~0x00010000;
+ }
+
+ if (stat & 0x01000000) {
+ u32 intr = nv_rd32(priv, 0x00258c);
+ nv_warn(priv, "INTR 0x01000000: 0x%08x\n", intr);
+ nv_wr32(priv, 0x002100, 0x01000000);
+ stat &= ~0x01000000;
+ }
+
if (stat & 0x10000000) {
u32 units = nv_rd32(priv, 0x00259c);
u32 u = units;
@@ -536,11 +575,20 @@ nvc0_fifo_intr(struct nouveau_subdev *subdev)
}
if (stat & 0x40000000) {
- nv_warn(priv, "unknown status 0x40000000\n");
- nv_mask(priv, 0x002a00, 0x00000000, 0x00000000);
+ u32 intr0 = nv_rd32(priv, 0x0025a4);
+ u32 intr1 = nv_mask(priv, 0x002a00, 0x00000000, 0x00000);
+ nv_debug(priv, "INTR 0x40000000: 0x%08x 0x%08x\n",
+ intr0, intr1);
stat &= ~0x40000000;
}
+ if (stat & 0x80000000) {
+ u32 intr = nv_mask(priv, 0x0025a8, 0x00000000, 0x00000000);
+ nouveau_event_trigger(priv->base.uevent, 0);
+ nv_debug(priv, "INTR 0x80000000: 0x%08x\n", intr);
+ stat &= ~0x80000000;
+ }
+
if (stat) {
nv_fatal(priv, "unhandled status 0x%08x\n", stat);
nv_wr32(priv, 0x002100, stat);
@@ -548,6 +596,20 @@ nvc0_fifo_intr(struct nouveau_subdev *subdev)
}
}
+static void
+nvc0_fifo_uevent_enable(struct nouveau_event *event, int index)
+{
+ struct nvc0_fifo_priv *priv = event->priv;
+ nv_mask(priv, 0x002140, 0x80000000, 0x80000000);
+}
+
+static void
+nvc0_fifo_uevent_disable(struct nouveau_event *event, int index)
+{
+ struct nvc0_fifo_priv *priv = event->priv;
+ nv_mask(priv, 0x002140, 0x80000000, 0x00000000);
+}
+
static int
nvc0_fifo_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
struct nouveau_oclass *oclass, void *data, u32 size,
@@ -581,6 +643,10 @@ nvc0_fifo_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
if (ret)
return ret;
+ priv->base.uevent->enable = nvc0_fifo_uevent_enable;
+ priv->base.uevent->disable = nvc0_fifo_uevent_disable;
+ priv->base.uevent->priv = priv;
+
nv_subdev(priv)->unit = 0x00000100;
nv_subdev(priv)->intr = nvc0_fifo_intr;
nv_engine(priv)->cclass = &nvc0_fifo_cclass;
@@ -639,7 +705,8 @@ nvc0_fifo_init(struct nouveau_object *object)
nv_wr32(priv, 0x002a00, 0xffffffff); /* clears PFIFO.INTR bit 30 */
nv_wr32(priv, 0x002100, 0xffffffff);
- nv_wr32(priv, 0x002140, 0xbfffffff);
+ nv_wr32(priv, 0x002140, 0x3fffffff);
+ nv_wr32(priv, 0x002628, 0x00000001); /* makes mthd 0x20 work */
return 0;
}
diff --git a/drivers/gpu/drm/nouveau/core/engine/fifo/nve0.c b/drivers/gpu/drm/nouveau/core/engine/fifo/nve0.c
index c930da99c2c1..4419e40d88e9 100644
--- a/drivers/gpu/drm/nouveau/core/engine/fifo/nve0.c
+++ b/drivers/gpu/drm/nouveau/core/engine/fifo/nve0.c
@@ -27,6 +27,7 @@
#include <core/namedb.h>
#include <core/gpuobj.h>
#include <core/engctx.h>
+#include <core/event.h>
#include <core/class.h>
#include <core/math.h>
#include <core/enum.h>
@@ -184,7 +185,8 @@ nve0_fifo_context_detach(struct nouveau_object *parent, bool suspend,
nv_wr32(priv, 0x002634, chan->base.chid);
if (!nv_wait(priv, 0x002634, 0xffffffff, chan->base.chid)) {
- nv_error(priv, "channel %d kick timeout\n", chan->base.chid);
+ nv_error(priv, "channel %d [%s] kick timeout\n",
+ chan->base.chid, nouveau_client_name(chan));
if (suspend)
return -EBUSY;
}
@@ -412,20 +414,34 @@ nve0_fifo_isr_vm_fault(struct nve0_fifo_priv *priv, int unit)
u32 vahi = nv_rd32(priv, 0x2808 + (unit * 0x10));
u32 stat = nv_rd32(priv, 0x280c + (unit * 0x10));
u32 client = (stat & 0x00001f00) >> 8;
+ const struct nouveau_enum *en;
+ struct nouveau_engine *engine;
+ struct nouveau_object *engctx = NULL;
nv_error(priv, "PFIFO: %s fault at 0x%010llx [", (stat & 0x00000080) ?
"write" : "read", (u64)vahi << 32 | valo);
nouveau_enum_print(nve0_fifo_fault_reason, stat & 0x0000000f);
- printk("] from ");
- nouveau_enum_print(nve0_fifo_fault_unit, unit);
+ pr_cont("] from ");
+ en = nouveau_enum_print(nve0_fifo_fault_unit, unit);
if (stat & 0x00000040) {
- printk("/");
+ pr_cont("/");
nouveau_enum_print(nve0_fifo_fault_hubclient, client);
} else {
- printk("/GPC%d/", (stat & 0x1f000000) >> 24);
+ pr_cont("/GPC%d/", (stat & 0x1f000000) >> 24);
nouveau_enum_print(nve0_fifo_fault_gpcclient, client);
}
- printk(" on channel 0x%010llx\n", (u64)inst << 12);
+
+ if (en && en->data2) {
+ engine = nouveau_engine(priv, en->data2);
+ if (engine)
+ engctx = nouveau_engctx_get(engine, inst);
+
+ }
+
+ pr_cont(" on channel 0x%010llx [%s]\n", (u64)inst << 12,
+ nouveau_client_name(engctx));
+
+ nouveau_engctx_put(engctx);
}
static int
@@ -480,10 +496,12 @@ nve0_fifo_isr_subfifo_intr(struct nve0_fifo_priv *priv, int unit)
if (show) {
nv_error(priv, "SUBFIFO%d:", unit);
nouveau_bitfield_print(nve0_fifo_subfifo_intr, show);
- printk("\n");
- nv_error(priv, "SUBFIFO%d: ch %d subc %d mthd 0x%04x "
- "data 0x%08x\n",
- unit, chid, subc, mthd, data);
+ pr_cont("\n");
+ nv_error(priv,
+ "SUBFIFO%d: ch %d [%s] subc %d mthd 0x%04x data 0x%08x\n",
+ unit, chid,
+ nouveau_client_name_for_fifo_chid(&priv->base, chid),
+ subc, mthd, data);
}
nv_wr32(priv, 0x0400c0 + (unit * 0x2000), 0x80600008);
@@ -537,6 +555,12 @@ nve0_fifo_intr(struct nouveau_subdev *subdev)
stat &= ~0x40000000;
}
+ if (stat & 0x80000000) {
+ nouveau_event_trigger(priv->base.uevent, 0);
+ nv_wr32(priv, 0x002100, 0x80000000);
+ stat &= ~0x80000000;
+ }
+
if (stat) {
nv_fatal(priv, "unhandled status 0x%08x\n", stat);
nv_wr32(priv, 0x002100, stat);
@@ -544,6 +568,20 @@ nve0_fifo_intr(struct nouveau_subdev *subdev)
}
}
+static void
+nve0_fifo_uevent_enable(struct nouveau_event *event, int index)
+{
+ struct nve0_fifo_priv *priv = event->priv;
+ nv_mask(priv, 0x002140, 0x80000000, 0x80000000);
+}
+
+static void
+nve0_fifo_uevent_disable(struct nouveau_event *event, int index)
+{
+ struct nve0_fifo_priv *priv = event->priv;
+ nv_mask(priv, 0x002140, 0x80000000, 0x00000000);
+}
+
static int
nve0_fifo_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
struct nouveau_oclass *oclass, void *data, u32 size,
@@ -567,6 +605,10 @@ nve0_fifo_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
if (ret)
return ret;
+ priv->base.uevent->enable = nve0_fifo_uevent_enable;
+ priv->base.uevent->disable = nve0_fifo_uevent_disable;
+ priv->base.uevent->priv = priv;
+
nv_subdev(priv)->unit = 0x00000100;
nv_subdev(priv)->intr = nve0_fifo_intr;
nv_engine(priv)->cclass = &nve0_fifo_cclass;
@@ -617,7 +659,7 @@ nve0_fifo_init(struct nouveau_object *object)
nv_wr32(priv, 0x002a00, 0xffffffff);
nv_wr32(priv, 0x002100, 0xffffffff);
- nv_wr32(priv, 0x002140, 0xbfffffff);
+ nv_wr32(priv, 0x002140, 0x3fffffff);
return 0;
}
diff --git a/drivers/gpu/drm/nouveau/core/engine/graph/nv04.c b/drivers/gpu/drm/nouveau/core/engine/graph/nv04.c
index e30a9c5ff1fc..ad13dcdd15f9 100644
--- a/drivers/gpu/drm/nouveau/core/engine/graph/nv04.c
+++ b/drivers/gpu/drm/nouveau/core/engine/graph/nv04.c
@@ -22,6 +22,7 @@
* DEALINGS IN THE SOFTWARE.
*/
+#include <core/client.h>
#include <core/os.h>
#include <core/class.h>
#include <core/handle.h>
@@ -1297,16 +1298,17 @@ nv04_graph_intr(struct nouveau_subdev *subdev)
nv_wr32(priv, NV04_PGRAPH_FIFO, 0x00000001);
if (show) {
- nv_error(priv, "");
+ nv_error(priv, "%s", "");
nouveau_bitfield_print(nv04_graph_intr_name, show);
- printk(" nsource:");
+ pr_cont(" nsource:");
nouveau_bitfield_print(nv04_graph_nsource, nsource);
- printk(" nstatus:");
+ pr_cont(" nstatus:");
nouveau_bitfield_print(nv04_graph_nstatus, nstatus);
- printk("\n");
- nv_error(priv, "ch %d/%d class 0x%04x "
- "mthd 0x%04x data 0x%08x\n",
- chid, subc, class, mthd, data);
+ pr_cont("\n");
+ nv_error(priv,
+ "ch %d [%s] subc %d class 0x%04x mthd 0x%04x data 0x%08x\n",
+ chid, nouveau_client_name(chan), subc, class, mthd,
+ data);
}
nouveau_namedb_put(handle);
diff --git a/drivers/gpu/drm/nouveau/core/engine/graph/nv10.c b/drivers/gpu/drm/nouveau/core/engine/graph/nv10.c
index 5c0f843ea249..23c143aaa556 100644
--- a/drivers/gpu/drm/nouveau/core/engine/graph/nv10.c
+++ b/drivers/gpu/drm/nouveau/core/engine/graph/nv10.c
@@ -22,6 +22,7 @@
* DEALINGS IN THE SOFTWARE.
*/
+#include <core/client.h>
#include <core/os.h>
#include <core/class.h>
#include <core/handle.h>
@@ -1193,16 +1194,17 @@ nv10_graph_intr(struct nouveau_subdev *subdev)
nv_wr32(priv, NV04_PGRAPH_FIFO, 0x00000001);
if (show) {
- nv_error(priv, "");
+ nv_error(priv, "%s", "");
nouveau_bitfield_print(nv10_graph_intr_name, show);
- printk(" nsource:");
+ pr_cont(" nsource:");
nouveau_bitfield_print(nv04_graph_nsource, nsource);
- printk(" nstatus:");
+ pr_cont(" nstatus:");
nouveau_bitfield_print(nv10_graph_nstatus, nstatus);
- printk("\n");
- nv_error(priv, "ch %d/%d class 0x%04x "
- "mthd 0x%04x data 0x%08x\n",
- chid, subc, class, mthd, data);
+ pr_cont("\n");
+ nv_error(priv,
+ "ch %d [%s] subc %d class 0x%04x mthd 0x%04x data 0x%08x\n",
+ chid, nouveau_client_name(chan), subc, class, mthd,
+ data);
}
nouveau_namedb_put(handle);
diff --git a/drivers/gpu/drm/nouveau/core/engine/graph/nv20.c b/drivers/gpu/drm/nouveau/core/engine/graph/nv20.c
index 5b20401bf911..0607b9801748 100644
--- a/drivers/gpu/drm/nouveau/core/engine/graph/nv20.c
+++ b/drivers/gpu/drm/nouveau/core/engine/graph/nv20.c
@@ -1,3 +1,4 @@
+#include <core/client.h>
#include <core/os.h>
#include <core/class.h>
#include <core/engctx.h>
@@ -224,15 +225,17 @@ nv20_graph_intr(struct nouveau_subdev *subdev)
nv_wr32(priv, NV04_PGRAPH_FIFO, 0x00000001);
if (show) {
- nv_error(priv, "");
+ nv_error(priv, "%s", "");
nouveau_bitfield_print(nv10_graph_intr_name, show);
- printk(" nsource:");
+ pr_cont(" nsource:");
nouveau_bitfield_print(nv04_graph_nsource, nsource);
- printk(" nstatus:");
+ pr_cont(" nstatus:");
nouveau_bitfield_print(nv10_graph_nstatus, nstatus);
- printk("\n");
- nv_error(priv, "ch %d/%d class 0x%04x mthd 0x%04x data 0x%08x\n",
- chid, subc, class, mthd, data);
+ pr_cont("\n");
+ nv_error(priv,
+ "ch %d [%s] subc %d class 0x%04x mthd 0x%04x data 0x%08x\n",
+ chid, nouveau_client_name(engctx), subc, class, mthd,
+ data);
}
nouveau_engctx_put(engctx);
diff --git a/drivers/gpu/drm/nouveau/core/engine/graph/nv40.c b/drivers/gpu/drm/nouveau/core/engine/graph/nv40.c
index 0b36dd3deebd..17049d5c723d 100644
--- a/drivers/gpu/drm/nouveau/core/engine/graph/nv40.c
+++ b/drivers/gpu/drm/nouveau/core/engine/graph/nv40.c
@@ -22,6 +22,7 @@
* Authors: Ben Skeggs
*/
+#include <core/client.h>
#include <core/os.h>
#include <core/class.h>
#include <core/handle.h>
@@ -321,16 +322,17 @@ nv40_graph_intr(struct nouveau_subdev *subdev)
nv_wr32(priv, NV04_PGRAPH_FIFO, 0x00000001);
if (show) {
- nv_error(priv, "");
+ nv_error(priv, "%s", "");
nouveau_bitfield_print(nv10_graph_intr_name, show);
- printk(" nsource:");
+ pr_cont(" nsource:");
nouveau_bitfield_print(nv04_graph_nsource, nsource);
- printk(" nstatus:");
+ pr_cont(" nstatus:");
nouveau_bitfield_print(nv10_graph_nstatus, nstatus);
- printk("\n");
- nv_error(priv, "ch %d [0x%08x] subc %d class 0x%04x "
- "mthd 0x%04x data 0x%08x\n",
- chid, inst << 4, subc, class, mthd, data);
+ pr_cont("\n");
+ nv_error(priv,
+ "ch %d [0x%08x %s] subc %d class 0x%04x mthd 0x%04x data 0x%08x\n",
+ chid, inst << 4, nouveau_client_name(engctx), subc,
+ class, mthd, data);
}
nouveau_engctx_put(engctx);
diff --git a/drivers/gpu/drm/nouveau/core/engine/graph/nv50.c b/drivers/gpu/drm/nouveau/core/engine/graph/nv50.c
index b1c3d835b4c2..f2b1a7a124f2 100644
--- a/drivers/gpu/drm/nouveau/core/engine/graph/nv50.c
+++ b/drivers/gpu/drm/nouveau/core/engine/graph/nv50.c
@@ -24,6 +24,7 @@
#include <core/os.h>
#include <core/class.h>
+#include <core/client.h>
#include <core/handle.h>
#include <core/engctx.h>
#include <core/enum.h>
@@ -418,7 +419,7 @@ nv50_priv_mp_trap(struct nv50_graph_priv *priv, int tpid, int display)
nv_error(priv, "TRAP_MP_EXEC - "
"TP %d MP %d: ", tpid, i);
nouveau_enum_print(nv50_mp_exec_error_names, status);
- printk(" at %06x warp %d, opcode %08x %08x\n",
+ pr_cont(" at %06x warp %d, opcode %08x %08x\n",
pc&0xffffff, pc >> 24,
oplow, ophigh);
}
@@ -532,7 +533,7 @@ nv50_priv_tp_trap(struct nv50_graph_priv *priv, int type, u32 ustatus_old,
static int
nv50_graph_trap_handler(struct nv50_graph_priv *priv, u32 display,
- int chid, u64 inst)
+ int chid, u64 inst, struct nouveau_object *engctx)
{
u32 status = nv_rd32(priv, 0x400108);
u32 ustatus;
@@ -565,12 +566,11 @@ nv50_graph_trap_handler(struct nv50_graph_priv *priv, u32 display,
nv_error(priv, "TRAP DISPATCH_FAULT\n");
if (display && (addr & 0x80000000)) {
- nv_error(priv, "ch %d [0x%010llx] "
- "subc %d class 0x%04x mthd 0x%04x "
- "data 0x%08x%08x "
- "400808 0x%08x 400848 0x%08x\n",
- chid, inst, subc, class, mthd, datah,
- datal, addr, r848);
+ nv_error(priv,
+ "ch %d [0x%010llx %s] subc %d class 0x%04x mthd 0x%04x data 0x%08x%08x 400808 0x%08x 400848 0x%08x\n",
+ chid, inst,
+ nouveau_client_name(engctx), subc,
+ class, mthd, datah, datal, addr, r848);
} else
if (display) {
nv_error(priv, "no stuck command?\n");
@@ -591,11 +591,11 @@ nv50_graph_trap_handler(struct nv50_graph_priv *priv, u32 display,
nv_error(priv, "TRAP DISPATCH_QUERY\n");
if (display && (addr & 0x80000000)) {
- nv_error(priv, "ch %d [0x%010llx] "
- "subc %d class 0x%04x mthd 0x%04x "
- "data 0x%08x 40084c 0x%08x\n",
- chid, inst, subc, class, mthd,
- data, addr);
+ nv_error(priv,
+ "ch %d [0x%010llx %s] subc %d class 0x%04x mthd 0x%04x data 0x%08x 40084c 0x%08x\n",
+ chid, inst,
+ nouveau_client_name(engctx), subc,
+ class, mthd, data, addr);
} else
if (display) {
nv_error(priv, "no stuck command?\n");
@@ -623,7 +623,7 @@ nv50_graph_trap_handler(struct nv50_graph_priv *priv, u32 display,
if (display) {
nv_error(priv, "TRAP_M2MF");
nouveau_bitfield_print(nv50_graph_trap_m2mf, ustatus);
- printk("\n");
+ pr_cont("\n");
nv_error(priv, "TRAP_M2MF %08x %08x %08x %08x\n",
nv_rd32(priv, 0x406804), nv_rd32(priv, 0x406808),
nv_rd32(priv, 0x40680c), nv_rd32(priv, 0x406810));
@@ -644,7 +644,7 @@ nv50_graph_trap_handler(struct nv50_graph_priv *priv, u32 display,
if (display) {
nv_error(priv, "TRAP_VFETCH");
nouveau_bitfield_print(nv50_graph_trap_vfetch, ustatus);
- printk("\n");
+ pr_cont("\n");
nv_error(priv, "TRAP_VFETCH %08x %08x %08x %08x\n",
nv_rd32(priv, 0x400c00), nv_rd32(priv, 0x400c08),
nv_rd32(priv, 0x400c0c), nv_rd32(priv, 0x400c10));
@@ -661,7 +661,7 @@ nv50_graph_trap_handler(struct nv50_graph_priv *priv, u32 display,
if (display) {
nv_error(priv, "TRAP_STRMOUT");
nouveau_bitfield_print(nv50_graph_trap_strmout, ustatus);
- printk("\n");
+ pr_cont("\n");
nv_error(priv, "TRAP_STRMOUT %08x %08x %08x %08x\n",
nv_rd32(priv, 0x401804), nv_rd32(priv, 0x401808),
nv_rd32(priv, 0x40180c), nv_rd32(priv, 0x401810));
@@ -682,7 +682,7 @@ nv50_graph_trap_handler(struct nv50_graph_priv *priv, u32 display,
if (display) {
nv_error(priv, "TRAP_CCACHE");
nouveau_bitfield_print(nv50_graph_trap_ccache, ustatus);
- printk("\n");
+ pr_cont("\n");
nv_error(priv, "TRAP_CCACHE %08x %08x %08x %08x"
" %08x %08x %08x\n",
nv_rd32(priv, 0x405000), nv_rd32(priv, 0x405004),
@@ -774,11 +774,12 @@ nv50_graph_intr(struct nouveau_subdev *subdev)
u32 ecode = nv_rd32(priv, 0x400110);
nv_error(priv, "DATA_ERROR ");
nouveau_enum_print(nv50_data_error_names, ecode);
- printk("\n");
+ pr_cont("\n");
}
if (stat & 0x00200000) {
- if (!nv50_graph_trap_handler(priv, show, chid, (u64)inst << 12))
+ if (!nv50_graph_trap_handler(priv, show, chid, (u64)inst << 12,
+ engctx))
show &= ~0x00200000;
}
@@ -786,12 +787,13 @@ nv50_graph_intr(struct nouveau_subdev *subdev)
nv_wr32(priv, 0x400500, 0x00010001);
if (show) {
- nv_error(priv, "");
+ nv_error(priv, "%s", "");
nouveau_bitfield_print(nv50_graph_intr_name, show);
- printk("\n");
- nv_error(priv, "ch %d [0x%010llx] subc %d class 0x%04x "
- "mthd 0x%04x data 0x%08x\n",
- chid, (u64)inst << 12, subc, class, mthd, data);
+ pr_cont("\n");
+ nv_error(priv,
+ "ch %d [0x%010llx %s] subc %d class 0x%04x mthd 0x%04x data 0x%08x\n",
+ chid, (u64)inst << 12, nouveau_client_name(engctx),
+ subc, class, mthd, data);
}
if (nv_rd32(priv, 0x400824) & (1 << 31))
@@ -907,9 +909,8 @@ nv50_graph_init(struct nouveau_object *object)
nv_wr32(priv, 0x400828, 0x00000000);
nv_wr32(priv, 0x40082c, 0x00000000);
nv_wr32(priv, 0x400830, 0x00000000);
- nv_wr32(priv, 0x400724, 0x00000000);
nv_wr32(priv, 0x40032c, 0x00000000);
- nv_wr32(priv, 0x400320, 4); /* CTXCTL_CMD = NEWCTXDMA */
+ nv_wr32(priv, 0x400330, 0x00000000);
/* some unknown zcull magic */
switch (nv_device(priv)->chipset & 0xf0) {
diff --git a/drivers/gpu/drm/nouveau/core/engine/graph/nvc0.c b/drivers/gpu/drm/nouveau/core/engine/graph/nvc0.c
index 45aff5f5085a..0de0dd724aff 100644
--- a/drivers/gpu/drm/nouveau/core/engine/graph/nvc0.c
+++ b/drivers/gpu/drm/nouveau/core/engine/graph/nvc0.c
@@ -433,10 +433,10 @@ nvc0_graph_intr(struct nouveau_subdev *subdev)
if (stat & 0x00000010) {
handle = nouveau_handle_get_class(engctx, class);
if (!handle || nv_call(handle->object, mthd, data)) {
- nv_error(priv, "ILLEGAL_MTHD ch %d [0x%010llx] "
- "subc %d class 0x%04x mthd 0x%04x "
- "data 0x%08x\n",
- chid, inst << 12, subc, class, mthd, data);
+ nv_error(priv,
+ "ILLEGAL_MTHD ch %d [0x%010llx %s] subc %d class 0x%04x mthd 0x%04x data 0x%08x\n",
+ chid, inst << 12, nouveau_client_name(engctx),
+ subc, class, mthd, data);
}
nouveau_handle_put(handle);
nv_wr32(priv, 0x400100, 0x00000010);
@@ -444,9 +444,10 @@ nvc0_graph_intr(struct nouveau_subdev *subdev)
}
if (stat & 0x00000020) {
- nv_error(priv, "ILLEGAL_CLASS ch %d [0x%010llx] subc %d "
- "class 0x%04x mthd 0x%04x data 0x%08x\n",
- chid, inst << 12, subc, class, mthd, data);
+ nv_error(priv,
+ "ILLEGAL_CLASS ch %d [0x%010llx %s] subc %d class 0x%04x mthd 0x%04x data 0x%08x\n",
+ chid, inst << 12, nouveau_client_name(engctx), subc,
+ class, mthd, data);
nv_wr32(priv, 0x400100, 0x00000020);
stat &= ~0x00000020;
}
@@ -454,15 +455,16 @@ nvc0_graph_intr(struct nouveau_subdev *subdev)
if (stat & 0x00100000) {
nv_error(priv, "DATA_ERROR [");
nouveau_enum_print(nv50_data_error_names, code);
- printk("] ch %d [0x%010llx] subc %d class 0x%04x "
- "mthd 0x%04x data 0x%08x\n",
- chid, inst << 12, subc, class, mthd, data);
+ pr_cont("] ch %d [0x%010llx %s] subc %d class 0x%04x mthd 0x%04x data 0x%08x\n",
+ chid, inst << 12, nouveau_client_name(engctx), subc,
+ class, mthd, data);
nv_wr32(priv, 0x400100, 0x00100000);
stat &= ~0x00100000;
}
if (stat & 0x00200000) {
- nv_error(priv, "TRAP ch %d [0x%010llx]\n", chid, inst << 12);
+ nv_error(priv, "TRAP ch %d [0x%010llx %s]\n", chid, inst << 12,
+ nouveau_client_name(engctx));
nvc0_graph_trap_intr(priv);
nv_wr32(priv, 0x400100, 0x00200000);
stat &= ~0x00200000;
@@ -611,10 +613,8 @@ nvc0_graph_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
static void
nvc0_graph_dtor_fw(struct nvc0_graph_fuc *fuc)
{
- if (fuc->data) {
- kfree(fuc->data);
- fuc->data = NULL;
- }
+ kfree(fuc->data);
+ fuc->data = NULL;
}
void
@@ -622,8 +622,7 @@ nvc0_graph_dtor(struct nouveau_object *object)
{
struct nvc0_graph_priv *priv = (void *)object;
- if (priv->data)
- kfree(priv->data);
+ kfree(priv->data);
nvc0_graph_dtor_fw(&priv->fuc409c);
nvc0_graph_dtor_fw(&priv->fuc409d);
diff --git a/drivers/gpu/drm/nouveau/core/engine/graph/nve0.c b/drivers/gpu/drm/nouveau/core/engine/graph/nve0.c
index 9f82e9702b46..61cec0f6ff1c 100644
--- a/drivers/gpu/drm/nouveau/core/engine/graph/nve0.c
+++ b/drivers/gpu/drm/nouveau/core/engine/graph/nve0.c
@@ -78,15 +78,16 @@ nve0_graph_ctxctl_isr(struct nvc0_graph_priv *priv)
}
static void
-nve0_graph_trap_isr(struct nvc0_graph_priv *priv, int chid, u64 inst)
+nve0_graph_trap_isr(struct nvc0_graph_priv *priv, int chid, u64 inst,
+ struct nouveau_object *engctx)
{
u32 trap = nv_rd32(priv, 0x400108);
int rop;
if (trap & 0x00000001) {
u32 stat = nv_rd32(priv, 0x404000);
- nv_error(priv, "DISPATCH ch %d [0x%010llx] 0x%08x\n",
- chid, inst, stat);
+ nv_error(priv, "DISPATCH ch %d [0x%010llx %s] 0x%08x\n",
+ chid, inst, nouveau_client_name(engctx), stat);
nv_wr32(priv, 0x404000, 0xc0000000);
nv_wr32(priv, 0x400108, 0x00000001);
trap &= ~0x00000001;
@@ -94,8 +95,8 @@ nve0_graph_trap_isr(struct nvc0_graph_priv *priv, int chid, u64 inst)
if (trap & 0x00000010) {
u32 stat = nv_rd32(priv, 0x405840);
- nv_error(priv, "SHADER ch %d [0x%010llx] 0x%08x\n",
- chid, inst, stat);
+ nv_error(priv, "SHADER ch %d [0x%010llx %s] 0x%08x\n",
+ chid, inst, nouveau_client_name(engctx), stat);
nv_wr32(priv, 0x405840, 0xc0000000);
nv_wr32(priv, 0x400108, 0x00000010);
trap &= ~0x00000010;
@@ -105,8 +106,10 @@ nve0_graph_trap_isr(struct nvc0_graph_priv *priv, int chid, u64 inst)
for (rop = 0; rop < priv->rop_nr; rop++) {
u32 statz = nv_rd32(priv, ROP_UNIT(rop, 0x070));
u32 statc = nv_rd32(priv, ROP_UNIT(rop, 0x144));
- nv_error(priv, "ROP%d ch %d [0x%010llx] 0x%08x 0x%08x\n",
- rop, chid, inst, statz, statc);
+ nv_error(priv,
+ "ROP%d ch %d [0x%010llx %s] 0x%08x 0x%08x\n",
+ rop, chid, inst, nouveau_client_name(engctx),
+ statz, statc);
nv_wr32(priv, ROP_UNIT(rop, 0x070), 0xc0000000);
nv_wr32(priv, ROP_UNIT(rop, 0x144), 0xc0000000);
}
@@ -115,8 +118,8 @@ nve0_graph_trap_isr(struct nvc0_graph_priv *priv, int chid, u64 inst)
}
if (trap) {
- nv_error(priv, "TRAP ch %d [0x%010llx] 0x%08x\n",
- chid, inst, trap);
+ nv_error(priv, "TRAP ch %d [0x%010llx %s] 0x%08x\n",
+ chid, inst, nouveau_client_name(engctx), trap);
nv_wr32(priv, 0x400108, trap);
}
}
@@ -145,10 +148,10 @@ nve0_graph_intr(struct nouveau_subdev *subdev)
if (stat & 0x00000010) {
handle = nouveau_handle_get_class(engctx, class);
if (!handle || nv_call(handle->object, mthd, data)) {
- nv_error(priv, "ILLEGAL_MTHD ch %d [0x%010llx] "
- "subc %d class 0x%04x mthd 0x%04x "
- "data 0x%08x\n",
- chid, inst, subc, class, mthd, data);
+ nv_error(priv,
+ "ILLEGAL_MTHD ch %d [0x%010llx %s] subc %d class 0x%04x mthd 0x%04x data 0x%08x\n",
+ chid, inst, nouveau_client_name(engctx), subc,
+ class, mthd, data);
}
nouveau_handle_put(handle);
nv_wr32(priv, 0x400100, 0x00000010);
@@ -156,9 +159,10 @@ nve0_graph_intr(struct nouveau_subdev *subdev)
}
if (stat & 0x00000020) {
- nv_error(priv, "ILLEGAL_CLASS ch %d [0x%010llx] subc %d "
- "class 0x%04x mthd 0x%04x data 0x%08x\n",
- chid, inst, subc, class, mthd, data);
+ nv_error(priv,
+ "ILLEGAL_CLASS ch %d [0x%010llx %s] subc %d class 0x%04x mthd 0x%04x data 0x%08x\n",
+ chid, inst, nouveau_client_name(engctx), subc, class,
+ mthd, data);
nv_wr32(priv, 0x400100, 0x00000020);
stat &= ~0x00000020;
}
@@ -166,15 +170,15 @@ nve0_graph_intr(struct nouveau_subdev *subdev)
if (stat & 0x00100000) {
nv_error(priv, "DATA_ERROR [");
nouveau_enum_print(nv50_data_error_names, code);
- printk("] ch %d [0x%010llx] subc %d class 0x%04x "
- "mthd 0x%04x data 0x%08x\n",
- chid, inst, subc, class, mthd, data);
+ pr_cont("] ch %d [0x%010llx %s] subc %d class 0x%04x mthd 0x%04x data 0x%08x\n",
+ chid, inst, nouveau_client_name(engctx), subc, class,
+ mthd, data);
nv_wr32(priv, 0x400100, 0x00100000);
stat &= ~0x00100000;
}
if (stat & 0x00200000) {
- nve0_graph_trap_isr(priv, chid, inst);
+ nve0_graph_trap_isr(priv, chid, inst, engctx);
nv_wr32(priv, 0x400100, 0x00200000);
stat &= ~0x00200000;
}
diff --git a/drivers/gpu/drm/nouveau/core/engine/mpeg/nv31.c b/drivers/gpu/drm/nouveau/core/engine/mpeg/nv31.c
index 9fd86375f4c4..49ecbb859b25 100644
--- a/drivers/gpu/drm/nouveau/core/engine/mpeg/nv31.c
+++ b/drivers/gpu/drm/nouveau/core/engine/mpeg/nv31.c
@@ -22,6 +22,7 @@
* Authors: Ben Skeggs
*/
+#include <core/client.h>
#include <core/os.h>
#include <core/class.h>
#include <core/engctx.h>
@@ -231,8 +232,10 @@ nv31_mpeg_intr(struct nouveau_subdev *subdev)
nv_wr32(priv, 0x00b230, 0x00000001);
if (show) {
- nv_error(priv, "ch %d [0x%08x] 0x%08x 0x%08x 0x%08x 0x%08x\n",
- chid, inst << 4, stat, type, mthd, data);
+ nv_error(priv,
+ "ch %d [0x%08x %s] 0x%08x 0x%08x 0x%08x 0x%08x\n",
+ chid, inst << 4, nouveau_client_name(engctx), stat,
+ type, mthd, data);
}
nouveau_engctx_put(engctx);
diff --git a/drivers/gpu/drm/nouveau/core/engine/software/nv50.c b/drivers/gpu/drm/nouveau/core/engine/software/nv50.c
index b0e7e1c01ce6..c48e74953771 100644
--- a/drivers/gpu/drm/nouveau/core/engine/software/nv50.c
+++ b/drivers/gpu/drm/nouveau/core/engine/software/nv50.c
@@ -28,6 +28,9 @@
#include <core/namedb.h>
#include <core/handle.h>
#include <core/gpuobj.h>
+#include <core/event.h>
+
+#include <subdev/bar.h>
#include <engine/software.h>
#include <engine/disp.h>
@@ -90,18 +93,11 @@ nv50_software_mthd_vblsem_release(struct nouveau_object *object, u32 mthd,
{
struct nv50_software_chan *chan = (void *)nv_engctx(object->parent);
struct nouveau_disp *disp = nouveau_disp(object);
- unsigned long flags;
u32 crtc = *(u32 *)args;
-
if (crtc > 1)
return -EINVAL;
- disp->vblank.get(disp->vblank.data, crtc);
-
- spin_lock_irqsave(&disp->vblank.lock, flags);
- list_add(&chan->base.vblank.head, &disp->vblank.list);
- chan->base.vblank.crtc = crtc;
- spin_unlock_irqrestore(&disp->vblank.lock, flags);
+ nouveau_event_get(disp->vblank, crtc, &chan->base.vblank.event);
return 0;
}
@@ -136,6 +132,29 @@ nv50_software_sclass[] = {
******************************************************************************/
static int
+nv50_software_vblsem_release(struct nouveau_eventh *event, int head)
+{
+ struct nouveau_software_chan *chan =
+ container_of(event, struct nouveau_software_chan, vblank.event);
+ struct nv50_software_priv *priv = (void *)nv_object(chan)->engine;
+ struct nouveau_bar *bar = nouveau_bar(priv);
+
+ nv_wr32(priv, 0x001704, chan->vblank.channel);
+ nv_wr32(priv, 0x001710, 0x80000000 | chan->vblank.ctxdma);
+ bar->flush(bar);
+
+ if (nv_device(priv)->chipset == 0x50) {
+ nv_wr32(priv, 0x001570, chan->vblank.offset);
+ nv_wr32(priv, 0x001574, chan->vblank.value);
+ } else {
+ nv_wr32(priv, 0x060010, chan->vblank.offset);
+ nv_wr32(priv, 0x060014, chan->vblank.value);
+ }
+
+ return NVKM_EVENT_DROP;
+}
+
+static int
nv50_software_context_ctor(struct nouveau_object *parent,
struct nouveau_object *engine,
struct nouveau_oclass *oclass, void *data, u32 size,
@@ -150,6 +169,7 @@ nv50_software_context_ctor(struct nouveau_object *parent,
return ret;
chan->base.vblank.channel = nv_gpuobj(parent->parent)->addr >> 12;
+ chan->base.vblank.event.func = nv50_software_vblsem_release;
return 0;
}
@@ -170,8 +190,8 @@ nv50_software_cclass = {
static int
nv50_software_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
- struct nouveau_oclass *oclass, void *data, u32 size,
- struct nouveau_object **pobject)
+ struct nouveau_oclass *oclass, void *data, u32 size,
+ struct nouveau_object **pobject)
{
struct nv50_software_priv *priv;
int ret;
diff --git a/drivers/gpu/drm/nouveau/core/engine/software/nvc0.c b/drivers/gpu/drm/nouveau/core/engine/software/nvc0.c
index 282a1cd1bc2f..a523eaad47e3 100644
--- a/drivers/gpu/drm/nouveau/core/engine/software/nvc0.c
+++ b/drivers/gpu/drm/nouveau/core/engine/software/nvc0.c
@@ -25,6 +25,9 @@
#include <core/os.h>
#include <core/class.h>
#include <core/engctx.h>
+#include <core/event.h>
+
+#include <subdev/bar.h>
#include <engine/software.h>
#include <engine/disp.h>
@@ -72,18 +75,12 @@ nvc0_software_mthd_vblsem_release(struct nouveau_object *object, u32 mthd,
{
struct nvc0_software_chan *chan = (void *)nv_engctx(object->parent);
struct nouveau_disp *disp = nouveau_disp(object);
- unsigned long flags;
u32 crtc = *(u32 *)args;
if ((nv_device(object)->card_type < NV_E0 && crtc > 1) || crtc > 3)
return -EINVAL;
- disp->vblank.get(disp->vblank.data, crtc);
-
- spin_lock_irqsave(&disp->vblank.lock, flags);
- list_add(&chan->base.vblank.head, &disp->vblank.list);
- chan->base.vblank.crtc = crtc;
- spin_unlock_irqrestore(&disp->vblank.lock, flags);
+ nouveau_event_get(disp->vblank, crtc, &chan->base.vblank.event);
return 0;
}
@@ -118,6 +115,23 @@ nvc0_software_sclass[] = {
******************************************************************************/
static int
+nvc0_software_vblsem_release(struct nouveau_eventh *event, int head)
+{
+ struct nouveau_software_chan *chan =
+ container_of(event, struct nouveau_software_chan, vblank.event);
+ struct nvc0_software_priv *priv = (void *)nv_object(chan)->engine;
+ struct nouveau_bar *bar = nouveau_bar(priv);
+
+ nv_wr32(priv, 0x001718, 0x80000000 | chan->vblank.channel);
+ bar->flush(bar);
+ nv_wr32(priv, 0x06000c, upper_32_bits(chan->vblank.offset));
+ nv_wr32(priv, 0x060010, lower_32_bits(chan->vblank.offset));
+ nv_wr32(priv, 0x060014, chan->vblank.value);
+
+ return NVKM_EVENT_DROP;
+}
+
+static int
nvc0_software_context_ctor(struct nouveau_object *parent,
struct nouveau_object *engine,
struct nouveau_oclass *oclass, void *data, u32 size,
@@ -132,6 +146,7 @@ nvc0_software_context_ctor(struct nouveau_object *parent,
return ret;
chan->base.vblank.channel = nv_gpuobj(parent->parent)->addr >> 12;
+ chan->base.vblank.event.func = nvc0_software_vblsem_release;
return 0;
}
diff --git a/drivers/gpu/drm/nouveau/core/include/core/class.h b/drivers/gpu/drm/nouveau/core/include/core/class.h
index 47c4b3a5bd3a..92d3ab11d962 100644
--- a/drivers/gpu/drm/nouveau/core/include/core/class.h
+++ b/drivers/gpu/drm/nouveau/core/include/core/class.h
@@ -154,6 +154,14 @@ struct nve0_channel_ind_class {
u32 engine;
};
+/* 0046: NV04_DISP
+ */
+
+#define NV04_DISP_CLASS 0x00000046
+
+struct nv04_display_class {
+};
+
/* 5070: NV50_DISP
* 8270: NV84_DISP
* 8370: NVA0_DISP
@@ -190,25 +198,6 @@ struct nve0_channel_ind_class {
#define NV84_DISP_SOR_HDMI_PWR_REKEY 0x0000007f
#define NV50_DISP_SOR_LVDS_SCRIPT 0x00013000
#define NV50_DISP_SOR_LVDS_SCRIPT_ID 0x0000ffff
-#define NV94_DISP_SOR_DP_TRAIN 0x00016000
-#define NV94_DISP_SOR_DP_TRAIN_OP 0xf0000000
-#define NV94_DISP_SOR_DP_TRAIN_OP_PATTERN 0x00000000
-#define NV94_DISP_SOR_DP_TRAIN_OP_INIT 0x10000000
-#define NV94_DISP_SOR_DP_TRAIN_OP_FINI 0x20000000
-#define NV94_DISP_SOR_DP_TRAIN_INIT_SPREAD 0x00000001
-#define NV94_DISP_SOR_DP_TRAIN_INIT_SPREAD_OFF 0x00000000
-#define NV94_DISP_SOR_DP_TRAIN_INIT_SPREAD_ON 0x00000001
-#define NV94_DISP_SOR_DP_TRAIN_PATTERN 0x00000003
-#define NV94_DISP_SOR_DP_TRAIN_PATTERN_DISABLED 0x00000000
-#define NV94_DISP_SOR_DP_LNKCTL 0x00016040
-#define NV94_DISP_SOR_DP_LNKCTL_FRAME 0x80000000
-#define NV94_DISP_SOR_DP_LNKCTL_FRAME_STD 0x00000000
-#define NV94_DISP_SOR_DP_LNKCTL_FRAME_ENH 0x80000000
-#define NV94_DISP_SOR_DP_LNKCTL_WIDTH 0x00001f00
-#define NV94_DISP_SOR_DP_LNKCTL_COUNT 0x00000007
-#define NV94_DISP_SOR_DP_DRVCTL(l) ((l) * 0x40 + 0x00016100)
-#define NV94_DISP_SOR_DP_DRVCTL_VS 0x00000300
-#define NV94_DISP_SOR_DP_DRVCTL_PE 0x00000003
#define NV50_DISP_DAC_MTHD 0x00020000
#define NV50_DISP_DAC_MTHD_TYPE 0x0000f000
@@ -230,6 +219,23 @@ struct nve0_channel_ind_class {
#define NV50_DISP_DAC_LOAD 0x0002000c
#define NV50_DISP_DAC_LOAD_VALUE 0x00000007
+#define NV50_DISP_PIOR_MTHD 0x00030000
+#define NV50_DISP_PIOR_MTHD_TYPE 0x0000f000
+#define NV50_DISP_PIOR_MTHD_OR 0x00000003
+
+#define NV50_DISP_PIOR_PWR 0x00030000
+#define NV50_DISP_PIOR_PWR_STATE 0x00000001
+#define NV50_DISP_PIOR_PWR_STATE_ON 0x00000001
+#define NV50_DISP_PIOR_PWR_STATE_OFF 0x00000000
+#define NV50_DISP_PIOR_TMDS_PWR 0x00032000
+#define NV50_DISP_PIOR_TMDS_PWR_STATE 0x00000001
+#define NV50_DISP_PIOR_TMDS_PWR_STATE_ON 0x00000001
+#define NV50_DISP_PIOR_TMDS_PWR_STATE_OFF 0x00000000
+#define NV50_DISP_PIOR_DP_PWR 0x00036000
+#define NV50_DISP_PIOR_DP_PWR_STATE 0x00000001
+#define NV50_DISP_PIOR_DP_PWR_STATE_ON 0x00000001
+#define NV50_DISP_PIOR_DP_PWR_STATE_OFF 0x00000000
+
struct nv50_display_class {
};
diff --git a/drivers/gpu/drm/nouveau/core/include/core/client.h b/drivers/gpu/drm/nouveau/core/include/core/client.h
index 63acc0346ff2..c66eac513803 100644
--- a/drivers/gpu/drm/nouveau/core/include/core/client.h
+++ b/drivers/gpu/drm/nouveau/core/include/core/client.h
@@ -7,7 +7,7 @@ struct nouveau_client {
struct nouveau_namedb base;
struct nouveau_handle *root;
struct nouveau_object *device;
- char name[16];
+ char name[32];
u32 debug;
struct nouveau_vm *vm;
};
@@ -41,5 +41,6 @@ int nouveau_client_create_(const char *name, u64 device, const char *cfg,
int nouveau_client_init(struct nouveau_client *);
int nouveau_client_fini(struct nouveau_client *, bool suspend);
+const char *nouveau_client_name(void *obj);
#endif
diff --git a/drivers/gpu/drm/nouveau/core/include/core/device.h b/drivers/gpu/drm/nouveau/core/include/core/device.h
index e58b6f0984c1..d351a4e5819c 100644
--- a/drivers/gpu/drm/nouveau/core/include/core/device.h
+++ b/drivers/gpu/drm/nouveau/core/include/core/device.h
@@ -26,6 +26,7 @@ enum nv_subdev_type {
*/
NVDEV_SUBDEV_MXM,
NVDEV_SUBDEV_MC,
+ NVDEV_SUBDEV_BUS,
NVDEV_SUBDEV_TIMER,
NVDEV_SUBDEV_FB,
NVDEV_SUBDEV_LTCG,
diff --git a/drivers/gpu/drm/nouveau/core/include/core/enum.h b/drivers/gpu/drm/nouveau/core/include/core/enum.h
index e7b1e181943b..4fc62bb8c1f0 100644
--- a/drivers/gpu/drm/nouveau/core/include/core/enum.h
+++ b/drivers/gpu/drm/nouveau/core/include/core/enum.h
@@ -5,12 +5,13 @@ struct nouveau_enum {
u32 value;
const char *name;
const void *data;
+ u32 data2;
};
const struct nouveau_enum *
nouveau_enum_find(const struct nouveau_enum *, u32 value);
-void
+const struct nouveau_enum *
nouveau_enum_print(const struct nouveau_enum *en, u32 value);
struct nouveau_bitfield {
diff --git a/drivers/gpu/drm/nouveau/core/include/core/event.h b/drivers/gpu/drm/nouveau/core/include/core/event.h
new file mode 100644
index 000000000000..9e094408f14e
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/include/core/event.h
@@ -0,0 +1,36 @@
+#ifndef __NVKM_EVENT_H__
+#define __NVKM_EVENT_H__
+
+/* return codes from event handlers */
+#define NVKM_EVENT_DROP 0
+#define NVKM_EVENT_KEEP 1
+
+struct nouveau_eventh {
+ struct list_head head;
+ int (*func)(struct nouveau_eventh *, int index);
+};
+
+struct nouveau_event {
+ spinlock_t lock;
+
+ void *priv;
+ void (*enable)(struct nouveau_event *, int index);
+ void (*disable)(struct nouveau_event *, int index);
+
+ int index_nr;
+ struct {
+ struct list_head list;
+ int refs;
+ } index[];
+};
+
+int nouveau_event_create(int index_nr, struct nouveau_event **);
+void nouveau_event_destroy(struct nouveau_event **);
+void nouveau_event_trigger(struct nouveau_event *, int index);
+
+void nouveau_event_get(struct nouveau_event *, int index,
+ struct nouveau_eventh *);
+void nouveau_event_put(struct nouveau_event *, int index,
+ struct nouveau_eventh *);
+
+#endif
diff --git a/drivers/gpu/drm/nouveau/core/include/core/object.h b/drivers/gpu/drm/nouveau/core/include/core/object.h
index 106bb19fdd9a..62e68baef087 100644
--- a/drivers/gpu/drm/nouveau/core/include/core/object.h
+++ b/drivers/gpu/drm/nouveau/core/include/core/object.h
@@ -136,7 +136,7 @@ static inline u8
nv_ro08(void *obj, u64 addr)
{
u8 data = nv_ofuncs(obj)->rd08(obj, addr);
- nv_spam(obj, "nv_ro08 0x%08x 0x%02x\n", addr, data);
+ nv_spam(obj, "nv_ro08 0x%08llx 0x%02x\n", addr, data);
return data;
}
@@ -144,7 +144,7 @@ static inline u16
nv_ro16(void *obj, u64 addr)
{
u16 data = nv_ofuncs(obj)->rd16(obj, addr);
- nv_spam(obj, "nv_ro16 0x%08x 0x%04x\n", addr, data);
+ nv_spam(obj, "nv_ro16 0x%08llx 0x%04x\n", addr, data);
return data;
}
@@ -152,28 +152,28 @@ static inline u32
nv_ro32(void *obj, u64 addr)
{
u32 data = nv_ofuncs(obj)->rd32(obj, addr);
- nv_spam(obj, "nv_ro32 0x%08x 0x%08x\n", addr, data);
+ nv_spam(obj, "nv_ro32 0x%08llx 0x%08x\n", addr, data);
return data;
}
static inline void
nv_wo08(void *obj, u64 addr, u8 data)
{
- nv_spam(obj, "nv_wo08 0x%08x 0x%02x\n", addr, data);
+ nv_spam(obj, "nv_wo08 0x%08llx 0x%02x\n", addr, data);
nv_ofuncs(obj)->wr08(obj, addr, data);
}
static inline void
nv_wo16(void *obj, u64 addr, u16 data)
{
- nv_spam(obj, "nv_wo16 0x%08x 0x%04x\n", addr, data);
+ nv_spam(obj, "nv_wo16 0x%08llx 0x%04x\n", addr, data);
nv_ofuncs(obj)->wr16(obj, addr, data);
}
static inline void
nv_wo32(void *obj, u64 addr, u32 data)
{
- nv_spam(obj, "nv_wo32 0x%08x 0x%08x\n", addr, data);
+ nv_spam(obj, "nv_wo32 0x%08llx 0x%08x\n", addr, data);
nv_ofuncs(obj)->wr32(obj, addr, data);
}
diff --git a/drivers/gpu/drm/nouveau/core/include/core/printk.h b/drivers/gpu/drm/nouveau/core/include/core/printk.h
index 1d629664f32d..febed2ea5c80 100644
--- a/drivers/gpu/drm/nouveau/core/include/core/printk.h
+++ b/drivers/gpu/drm/nouveau/core/include/core/printk.h
@@ -15,7 +15,8 @@ struct nouveau_object;
#define NV_PRINTK_TRACE KERN_DEBUG
#define NV_PRINTK_SPAM KERN_DEBUG
-void nv_printk_(struct nouveau_object *, const char *, int, const char *, ...);
+void __printf(4, 5)
+nv_printk_(struct nouveau_object *, const char *, int, const char *, ...);
#define nv_printk(o,l,f,a...) do { \
if (NV_DBG_##l <= CONFIG_NOUVEAU_DEBUG) \
diff --git a/drivers/gpu/drm/nouveau/core/include/engine/disp.h b/drivers/gpu/drm/nouveau/core/include/engine/disp.h
index 46948285f3e7..28da6772c095 100644
--- a/drivers/gpu/drm/nouveau/core/include/engine/disp.h
+++ b/drivers/gpu/drm/nouveau/core/include/engine/disp.h
@@ -4,18 +4,11 @@
#include <core/object.h>
#include <core/engine.h>
#include <core/device.h>
+#include <core/event.h>
struct nouveau_disp {
struct nouveau_engine base;
-
- struct {
- struct list_head list;
- spinlock_t lock;
- void (*notify)(void *, int);
- void (*get)(void *, int);
- void (*put)(void *, int);
- void *data;
- } vblank;
+ struct nouveau_event *vblank;
};
static inline struct nouveau_disp *
@@ -24,16 +17,22 @@ nouveau_disp(void *obj)
return (void *)nv_device(obj)->subdev[NVDEV_ENGINE_DISP];
}
-#define nouveau_disp_create(p,e,c,i,x,d) \
- nouveau_engine_create((p), (e), (c), true, (i), (x), (d))
-#define nouveau_disp_destroy(d) \
- nouveau_engine_destroy(&(d)->base)
+#define nouveau_disp_create(p,e,c,h,i,x,d) \
+ nouveau_disp_create_((p), (e), (c), (h), (i), (x), \
+ sizeof(**d), (void **)d)
+#define nouveau_disp_destroy(d) ({ \
+ struct nouveau_disp *disp = (d); \
+ _nouveau_disp_dtor(nv_object(disp)); \
+})
#define nouveau_disp_init(d) \
nouveau_engine_init(&(d)->base)
#define nouveau_disp_fini(d,s) \
nouveau_engine_fini(&(d)->base, (s))
-#define _nouveau_disp_dtor _nouveau_engine_dtor
+int nouveau_disp_create_(struct nouveau_object *, struct nouveau_object *,
+ struct nouveau_oclass *, int heads,
+ const char *, const char *, int, void **);
+void _nouveau_disp_dtor(struct nouveau_object *);
#define _nouveau_disp_init _nouveau_engine_init
#define _nouveau_disp_fini _nouveau_engine_fini
diff --git a/drivers/gpu/drm/nouveau/core/include/engine/fifo.h b/drivers/gpu/drm/nouveau/core/include/engine/fifo.h
index f18846c8c6fe..b46c197709f3 100644
--- a/drivers/gpu/drm/nouveau/core/include/engine/fifo.h
+++ b/drivers/gpu/drm/nouveau/core/include/engine/fifo.h
@@ -65,6 +65,8 @@ struct nouveau_fifo_base {
struct nouveau_fifo {
struct nouveau_engine base;
+ struct nouveau_event *uevent;
+
struct nouveau_object **channel;
spinlock_t lock;
u16 min;
@@ -92,6 +94,8 @@ int nouveau_fifo_create_(struct nouveau_object *, struct nouveau_object *,
struct nouveau_oclass *, int min, int max,
int size, void **);
void nouveau_fifo_destroy(struct nouveau_fifo *);
+const char *
+nouveau_client_name_for_fifo_chid(struct nouveau_fifo *fifo, u32 chid);
#define _nouveau_fifo_init _nouveau_engine_init
#define _nouveau_fifo_fini _nouveau_engine_fini
diff --git a/drivers/gpu/drm/nouveau/core/include/engine/software.h b/drivers/gpu/drm/nouveau/core/include/engine/software.h
index c945691c8564..45799487e573 100644
--- a/drivers/gpu/drm/nouveau/core/include/engine/software.h
+++ b/drivers/gpu/drm/nouveau/core/include/engine/software.h
@@ -3,17 +3,17 @@
#include <core/engine.h>
#include <core/engctx.h>
+#include <core/event.h>
struct nouveau_software_chan {
struct nouveau_engctx base;
struct {
- struct list_head head;
+ struct nouveau_eventh event;
u32 channel;
u32 ctxdma;
u64 offset;
u32 value;
- u32 crtc;
} vblank;
int (*flip)(void *);
diff --git a/drivers/gpu/drm/nouveau/core/include/subdev/bios/dcb.h b/drivers/gpu/drm/nouveau/core/include/subdev/bios/dcb.h
index b79025da581e..123270e9813a 100644
--- a/drivers/gpu/drm/nouveau/core/include/subdev/bios/dcb.h
+++ b/drivers/gpu/drm/nouveau/core/include/subdev/bios/dcb.h
@@ -16,6 +16,8 @@ enum dcb_output_type {
struct dcb_output {
int index; /* may not be raw dcb index if merging has happened */
+ u16 hasht;
+ u16 hashm;
enum dcb_output_type type;
uint8_t i2c_index;
uint8_t heads;
@@ -25,6 +27,7 @@ struct dcb_output {
uint8_t or;
uint8_t link;
bool duallink_possible;
+ uint8_t extdev;
union {
struct sor_conf {
int link;
diff --git a/drivers/gpu/drm/nouveau/core/include/subdev/bios/gpio.h b/drivers/gpu/drm/nouveau/core/include/subdev/bios/gpio.h
index e6563b5cb08e..96d3364f6db3 100644
--- a/drivers/gpu/drm/nouveau/core/include/subdev/bios/gpio.h
+++ b/drivers/gpu/drm/nouveau/core/include/subdev/bios/gpio.h
@@ -1,17 +1,22 @@
#ifndef __NVBIOS_GPIO_H__
#define __NVBIOS_GPIO_H__
-struct nouveau_bios;
-
enum dcb_gpio_func_name {
DCB_GPIO_PANEL_POWER = 0x01,
DCB_GPIO_TVDAC0 = 0x0c,
DCB_GPIO_TVDAC1 = 0x2d,
- DCB_GPIO_PWM_FAN = 0x09,
+ DCB_GPIO_FAN = 0x09,
DCB_GPIO_FAN_SENSE = 0x3d,
DCB_GPIO_UNUSED = 0xff
};
+#define DCB_GPIO_LOG_DIR 0x02
+#define DCB_GPIO_LOG_DIR_OUT 0x00
+#define DCB_GPIO_LOG_DIR_IN 0x02
+#define DCB_GPIO_LOG_VAL 0x01
+#define DCB_GPIO_LOG_VAL_LO 0x00
+#define DCB_GPIO_LOG_VAL_HI 0x01
+
struct dcb_gpio_func {
u8 func;
u8 line;
diff --git a/drivers/gpu/drm/nouveau/core/include/subdev/bios/i2c.h b/drivers/gpu/drm/nouveau/core/include/subdev/bios/i2c.h
index 5079bedfd985..10b57a19a7de 100644
--- a/drivers/gpu/drm/nouveau/core/include/subdev/bios/i2c.h
+++ b/drivers/gpu/drm/nouveau/core/include/subdev/bios/i2c.h
@@ -15,7 +15,7 @@ struct dcb_i2c_entry {
enum dcb_i2c_type type;
u8 drive;
u8 sense;
- u32 data;
+ u8 share;
};
u16 dcb_i2c_table(struct nouveau_bios *, u8 *ver, u8 *hdr, u8 *cnt, u8 *len);
diff --git a/drivers/gpu/drm/nouveau/core/include/subdev/bios/therm.h b/drivers/gpu/drm/nouveau/core/include/subdev/bios/therm.h
index a2c4296fc5f6..083541dbe9c8 100644
--- a/drivers/gpu/drm/nouveau/core/include/subdev/bios/therm.h
+++ b/drivers/gpu/drm/nouveau/core/include/subdev/bios/therm.h
@@ -23,11 +23,27 @@ struct nvbios_therm_sensor {
struct nvbios_therm_threshold thrs_shutdown;
};
+/* no vbios have more than 6 */
+#define NOUVEAU_TEMP_FAN_TRIP_MAX 10
+struct nouveau_therm_trip_point {
+ int fan_duty;
+ int temp;
+ int hysteresis;
+};
+
struct nvbios_therm_fan {
u16 pwm_freq;
u8 min_duty;
u8 max_duty;
+
+ u16 bump_period;
+ u16 slow_down_period;
+
+ struct nouveau_therm_trip_point trip[NOUVEAU_TEMP_FAN_TRIP_MAX];
+ u8 nr_fan_trip;
+ u8 linear_min_temp;
+ u8 linear_max_temp;
};
enum nvbios_therm_domain {
diff --git a/drivers/gpu/drm/nouveau/core/include/subdev/bios/xpio.h b/drivers/gpu/drm/nouveau/core/include/subdev/bios/xpio.h
new file mode 100644
index 000000000000..360baab52e4c
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/include/subdev/bios/xpio.h
@@ -0,0 +1,19 @@
+#ifndef __NVBIOS_XPIO_H__
+#define __NVBIOS_XPIO_H__
+
+#define NVBIOS_XPIO_FLAG_AUX 0x10
+#define NVBIOS_XPIO_FLAG_AUX0 0x00
+#define NVBIOS_XPIO_FLAG_AUX1 0x10
+
+struct nvbios_xpio {
+ u8 type;
+ u8 addr;
+ u8 flags;
+};
+
+u16 dcb_xpio_table(struct nouveau_bios *, u8 idx,
+ u8 *ver, u8 *hdr, u8 *cnt, u8 *len);
+u16 dcb_xpio_parse(struct nouveau_bios *, u8 idx,
+ u8 *ver, u8 *hdr, u8 *cnt, u8 *len, struct nvbios_xpio *);
+
+#endif
diff --git a/drivers/gpu/drm/nouveau/core/include/subdev/bus.h b/drivers/gpu/drm/nouveau/core/include/subdev/bus.h
new file mode 100644
index 000000000000..7d88ec4a6d06
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/include/subdev/bus.h
@@ -0,0 +1,41 @@
+#ifndef __NOUVEAU_BUS_H__
+#define __NOUVEAU_BUS_H__
+
+#include <core/subdev.h>
+#include <core/device.h>
+
+struct nouveau_bus_intr {
+ u32 stat;
+ u32 unit;
+};
+
+struct nouveau_bus {
+ struct nouveau_subdev base;
+};
+
+static inline struct nouveau_bus *
+nouveau_bus(void *obj)
+{
+ return (void *)nv_device(obj)->subdev[NVDEV_SUBDEV_BUS];
+}
+
+#define nouveau_bus_create(p, e, o, d) \
+ nouveau_subdev_create_((p), (e), (o), 0, "PBUS", "master", \
+ sizeof(**d), (void **)d)
+#define nouveau_bus_destroy(p) \
+ nouveau_subdev_destroy(&(p)->base)
+#define nouveau_bus_init(p) \
+ nouveau_subdev_init(&(p)->base)
+#define nouveau_bus_fini(p, s) \
+ nouveau_subdev_fini(&(p)->base, (s))
+
+#define _nouveau_bus_dtor _nouveau_subdev_dtor
+#define _nouveau_bus_init _nouveau_subdev_init
+#define _nouveau_bus_fini _nouveau_subdev_fini
+
+extern struct nouveau_oclass nv04_bus_oclass;
+extern struct nouveau_oclass nv31_bus_oclass;
+extern struct nouveau_oclass nv50_bus_oclass;
+extern struct nouveau_oclass nvc0_bus_oclass;
+
+#endif
diff --git a/drivers/gpu/drm/nouveau/core/include/subdev/gpio.h b/drivers/gpu/drm/nouveau/core/include/subdev/gpio.h
index b75e8f18e52c..c85b9f1579ad 100644
--- a/drivers/gpu/drm/nouveau/core/include/subdev/gpio.h
+++ b/drivers/gpu/drm/nouveau/core/include/subdev/gpio.h
@@ -3,6 +3,7 @@
#include <core/subdev.h>
#include <core/device.h>
+#include <core/event.h>
#include <subdev/bios.h>
#include <subdev/bios/gpio.h>
@@ -10,28 +11,18 @@
struct nouveau_gpio {
struct nouveau_subdev base;
+ struct nouveau_event *events;
+
/* hardware interfaces */
void (*reset)(struct nouveau_gpio *, u8 func);
int (*drive)(struct nouveau_gpio *, int line, int dir, int out);
int (*sense)(struct nouveau_gpio *, int line);
- void (*irq_enable)(struct nouveau_gpio *, int line, bool);
/* software interfaces */
int (*find)(struct nouveau_gpio *, int idx, u8 tag, u8 line,
struct dcb_gpio_func *);
int (*set)(struct nouveau_gpio *, int idx, u8 tag, u8 line, int state);
int (*get)(struct nouveau_gpio *, int idx, u8 tag, u8 line);
- int (*irq)(struct nouveau_gpio *, int idx, u8 tag, u8 line, bool on);
-
- /* interrupt handling */
- struct list_head isr;
- spinlock_t lock;
-
- void (*isr_run)(struct nouveau_gpio *, int idx, u32 mask);
- int (*isr_add)(struct nouveau_gpio *, int idx, u8 tag, u8 line,
- void (*)(void *, int state), void *data);
- void (*isr_del)(struct nouveau_gpio *, int idx, u8 tag, u8 line,
- void (*)(void *, int state), void *data);
};
static inline struct nouveau_gpio *
@@ -40,25 +31,23 @@ nouveau_gpio(void *obj)
return (void *)nv_device(obj)->subdev[NVDEV_SUBDEV_GPIO];
}
-#define nouveau_gpio_create(p,e,o,d) \
- nouveau_gpio_create_((p), (e), (o), sizeof(**d), (void **)d)
-#define nouveau_gpio_destroy(p) \
- nouveau_subdev_destroy(&(p)->base)
+#define nouveau_gpio_create(p,e,o,l,d) \
+ nouveau_gpio_create_((p), (e), (o), (l), sizeof(**d), (void **)d)
+#define nouveau_gpio_destroy(p) ({ \
+ struct nouveau_gpio *gpio = (p); \
+ _nouveau_gpio_dtor(nv_object(gpio)); \
+})
#define nouveau_gpio_fini(p,s) \
nouveau_subdev_fini(&(p)->base, (s))
-int nouveau_gpio_create_(struct nouveau_object *, struct nouveau_object *,
- struct nouveau_oclass *, int, void **);
-int nouveau_gpio_init(struct nouveau_gpio *);
+int nouveau_gpio_create_(struct nouveau_object *, struct nouveau_object *,
+ struct nouveau_oclass *, int, int, void **);
+void _nouveau_gpio_dtor(struct nouveau_object *);
+int nouveau_gpio_init(struct nouveau_gpio *);
extern struct nouveau_oclass nv10_gpio_oclass;
extern struct nouveau_oclass nv50_gpio_oclass;
extern struct nouveau_oclass nvd0_gpio_oclass;
-
-void nv50_gpio_dtor(struct nouveau_object *);
-int nv50_gpio_init(struct nouveau_object *);
-int nv50_gpio_fini(struct nouveau_object *, bool);
-void nv50_gpio_intr(struct nouveau_subdev *);
-void nv50_gpio_irq_enable(struct nouveau_gpio *, int line, bool);
+extern struct nouveau_oclass nve0_gpio_oclass;
#endif
diff --git a/drivers/gpu/drm/nouveau/core/include/subdev/i2c.h b/drivers/gpu/drm/nouveau/core/include/subdev/i2c.h
index b93ab01e3785..888384c0bed8 100644
--- a/drivers/gpu/drm/nouveau/core/include/subdev/i2c.h
+++ b/drivers/gpu/drm/nouveau/core/include/subdev/i2c.h
@@ -10,23 +10,59 @@
#define NV_I2C_PORT(n) (0x00 + (n))
#define NV_I2C_DEFAULT(n) (0x80 + (n))
+#define NV_I2C_TYPE_DCBI2C(n) (0x0000 | (n))
+#define NV_I2C_TYPE_EXTDDC(e) (0x0005 | (e) << 8)
+#define NV_I2C_TYPE_EXTAUX(e) (0x0006 | (e) << 8)
+
struct nouveau_i2c_port {
+ struct nouveau_object base;
struct i2c_adapter adapter;
- struct nouveau_i2c *i2c;
- struct i2c_algo_bit_data bit;
+
struct list_head head;
u8 index;
- u8 type;
- u32 dcb;
- u32 drive;
- u32 sense;
- u32 state;
+
+ const struct nouveau_i2c_func *func;
+};
+
+struct nouveau_i2c_func {
+ void (*acquire)(struct nouveau_i2c_port *);
+ void (*release)(struct nouveau_i2c_port *);
+
+ void (*drive_scl)(struct nouveau_i2c_port *, int);
+ void (*drive_sda)(struct nouveau_i2c_port *, int);
+ int (*sense_scl)(struct nouveau_i2c_port *);
+ int (*sense_sda)(struct nouveau_i2c_port *);
+
+ int (*aux)(struct nouveau_i2c_port *, u8, u32, u8 *, u8);
+ int (*pattern)(struct nouveau_i2c_port *, int pattern);
+ int (*lnk_ctl)(struct nouveau_i2c_port *, int nr, int bw, bool enh);
+ int (*drv_ctl)(struct nouveau_i2c_port *, int lane, int sw, int pe);
};
+#define nouveau_i2c_port_create(p,e,o,i,a,d) \
+ nouveau_i2c_port_create_((p), (e), (o), (i), (a), \
+ sizeof(**d), (void **)d)
+#define nouveau_i2c_port_destroy(p) ({ \
+ struct nouveau_i2c_port *port = (p); \
+ _nouveau_i2c_port_dtor(nv_object(i2c)); \
+})
+#define nouveau_i2c_port_init(p) \
+ nouveau_object_init(&(p)->base)
+#define nouveau_i2c_port_fini(p,s) \
+ nouveau_object_fini(&(p)->base, (s))
+
+int nouveau_i2c_port_create_(struct nouveau_object *, struct nouveau_object *,
+ struct nouveau_oclass *, u8,
+ const struct i2c_algorithm *, int, void **);
+void _nouveau_i2c_port_dtor(struct nouveau_object *);
+#define _nouveau_i2c_port_init nouveau_object_init
+#define _nouveau_i2c_port_fini nouveau_object_fini
+
struct nouveau_i2c {
struct nouveau_subdev base;
struct nouveau_i2c_port *(*find)(struct nouveau_i2c *, u8 index);
+ struct nouveau_i2c_port *(*find_type)(struct nouveau_i2c *, u16 type);
int (*identify)(struct nouveau_i2c *, int index,
const char *what, struct i2c_board_info *,
bool (*match)(struct nouveau_i2c_port *,
@@ -40,21 +76,76 @@ nouveau_i2c(void *obj)
return (void *)nv_device(obj)->subdev[NVDEV_SUBDEV_I2C];
}
-extern struct nouveau_oclass nouveau_i2c_oclass;
+#define nouveau_i2c_create(p,e,o,s,d) \
+ nouveau_i2c_create_((p), (e), (o), (s), sizeof(**d), (void **)d)
+#define nouveau_i2c_destroy(p) ({ \
+ struct nouveau_i2c *i2c = (p); \
+ _nouveau_i2c_dtor(nv_object(i2c)); \
+})
+#define nouveau_i2c_init(p) ({ \
+ struct nouveau_i2c *i2c = (p); \
+ _nouveau_i2c_init(nv_object(i2c)); \
+})
+#define nouveau_i2c_fini(p,s) ({ \
+ struct nouveau_i2c *i2c = (p); \
+ _nouveau_i2c_fini(nv_object(i2c), (s)); \
+})
-void nouveau_i2c_drive_scl(void *, int);
-void nouveau_i2c_drive_sda(void *, int);
-int nouveau_i2c_sense_scl(void *);
-int nouveau_i2c_sense_sda(void *);
+int nouveau_i2c_create_(struct nouveau_object *, struct nouveau_object *,
+ struct nouveau_oclass *, struct nouveau_oclass *,
+ int, void **);
+void _nouveau_i2c_dtor(struct nouveau_object *);
+int _nouveau_i2c_init(struct nouveau_object *);
+int _nouveau_i2c_fini(struct nouveau_object *, bool);
-int nv_rdi2cr(struct nouveau_i2c_port *, u8 addr, u8 reg);
-int nv_wri2cr(struct nouveau_i2c_port *, u8 addr, u8 reg, u8 val);
-bool nv_probe_i2c(struct nouveau_i2c_port *, u8 addr);
-
-int nv_rdaux(struct nouveau_i2c_port *, u32 addr, u8 *data, u8 size);
-int nv_wraux(struct nouveau_i2c_port *, u32 addr, u8 *data, u8 size);
+extern struct nouveau_oclass nv04_i2c_oclass;
+extern struct nouveau_oclass nv4e_i2c_oclass;
+extern struct nouveau_oclass nv50_i2c_oclass;
+extern struct nouveau_oclass nv94_i2c_oclass;
+extern struct nouveau_oclass nvd0_i2c_oclass;
+extern struct nouveau_oclass nouveau_anx9805_sclass[];
extern const struct i2c_algorithm nouveau_i2c_bit_algo;
extern const struct i2c_algorithm nouveau_i2c_aux_algo;
+static inline int
+nv_rdi2cr(struct nouveau_i2c_port *port, u8 addr, u8 reg)
+{
+ u8 val;
+ struct i2c_msg msgs[] = {
+ { .addr = addr, .flags = 0, .len = 1, .buf = &reg },
+ { .addr = addr, .flags = I2C_M_RD, .len = 1, .buf = &val },
+ };
+
+ int ret = i2c_transfer(&port->adapter, msgs, 2);
+ if (ret != 2)
+ return -EIO;
+
+ return val;
+}
+
+static inline int
+nv_wri2cr(struct nouveau_i2c_port *port, u8 addr, u8 reg, u8 val)
+{
+ u8 buf[2] = { reg, val };
+ struct i2c_msg msgs[] = {
+ { .addr = addr, .flags = 0, .len = 2, .buf = buf },
+ };
+
+ int ret = i2c_transfer(&port->adapter, msgs, 1);
+ if (ret != 1)
+ return -EIO;
+
+ return 0;
+}
+
+static inline bool
+nv_probe_i2c(struct nouveau_i2c_port *port, u8 addr)
+{
+ return nv_rdi2cr(port, addr, 0) >= 0;
+}
+
+int nv_rdaux(struct nouveau_i2c_port *, u32 addr, u8 *data, u8 size);
+int nv_wraux(struct nouveau_i2c_port *, u32 addr, u8 *data, u8 size);
+
#endif
diff --git a/drivers/gpu/drm/nouveau/core/include/subdev/therm.h b/drivers/gpu/drm/nouveau/core/include/subdev/therm.h
index faee569fd458..6b17b614629f 100644
--- a/drivers/gpu/drm/nouveau/core/include/subdev/therm.h
+++ b/drivers/gpu/drm/nouveau/core/include/subdev/therm.h
@@ -4,10 +4,10 @@
#include <core/device.h>
#include <core/subdev.h>
-enum nouveau_therm_fan_mode {
- FAN_CONTROL_NONE = 0,
- FAN_CONTROL_MANUAL = 1,
- FAN_CONTROL_NR,
+enum nouveau_therm_mode {
+ NOUVEAU_THERM_CTRL_NONE = 0,
+ NOUVEAU_THERM_CTRL_MANUAL = 1,
+ NOUVEAU_THERM_CTRL_AUTO = 2,
};
enum nouveau_therm_attr_type {
@@ -28,6 +28,11 @@ enum nouveau_therm_attr_type {
struct nouveau_therm {
struct nouveau_subdev base;
+ int (*pwm_ctrl)(struct nouveau_therm *, int line, bool);
+ int (*pwm_get)(struct nouveau_therm *, int line, u32 *, u32 *);
+ int (*pwm_set)(struct nouveau_therm *, int line, u32, u32);
+ int (*pwm_clock)(struct nouveau_therm *);
+
int (*fan_get)(struct nouveau_therm *);
int (*fan_set)(struct nouveau_therm *, int);
int (*fan_sense)(struct nouveau_therm *);
@@ -46,13 +51,29 @@ nouveau_therm(void *obj)
}
#define nouveau_therm_create(p,e,o,d) \
- nouveau_subdev_create((p), (e), (o), 0, "THERM", "therm", d)
-#define nouveau_therm_destroy(p) \
- nouveau_subdev_destroy(&(p)->base)
+ nouveau_therm_create_((p), (e), (o), sizeof(**d), (void **)d)
+#define nouveau_therm_destroy(p) ({ \
+ struct nouveau_therm *therm = (p); \
+ _nouveau_therm_dtor(nv_object(therm)); \
+})
+#define nouveau_therm_init(p) ({ \
+ struct nouveau_therm *therm = (p); \
+ _nouveau_therm_init(nv_object(therm)); \
+})
+#define nouveau_therm_fini(p,s) ({ \
+ struct nouveau_therm *therm = (p); \
+ _nouveau_therm_init(nv_object(therm), (s)); \
+})
-#define _nouveau_therm_dtor _nouveau_subdev_dtor
+int nouveau_therm_create_(struct nouveau_object *, struct nouveau_object *,
+ struct nouveau_oclass *, int, void **);
+void _nouveau_therm_dtor(struct nouveau_object *);
+int _nouveau_therm_init(struct nouveau_object *);
+int _nouveau_therm_fini(struct nouveau_object *, bool);
extern struct nouveau_oclass nv40_therm_oclass;
extern struct nouveau_oclass nv50_therm_oclass;
+extern struct nouveau_oclass nva3_therm_oclass;
+extern struct nouveau_oclass nvd0_therm_oclass;
#endif
diff --git a/drivers/gpu/drm/nouveau/core/include/subdev/timer.h b/drivers/gpu/drm/nouveau/core/include/subdev/timer.h
index c24ec8ab3db4..e465d158d352 100644
--- a/drivers/gpu/drm/nouveau/core/include/subdev/timer.h
+++ b/drivers/gpu/drm/nouveau/core/include/subdev/timer.h
@@ -10,6 +10,14 @@ struct nouveau_alarm {
void (*func)(struct nouveau_alarm *);
};
+static inline void
+nouveau_alarm_init(struct nouveau_alarm *alarm,
+ void (*func)(struct nouveau_alarm *))
+{
+ INIT_LIST_HEAD(&alarm->head);
+ alarm->func = func;
+}
+
bool nouveau_timer_wait_eq(void *, u64 nsec, u32 addr, u32 mask, u32 data);
bool nouveau_timer_wait_ne(void *, u64 nsec, u32 addr, u32 mask, u32 data);
bool nouveau_timer_wait_cb(void *, u64 nsec, bool (*func)(void *), void *data);
diff --git a/drivers/gpu/drm/nouveau/core/os.h b/drivers/gpu/drm/nouveau/core/os.h
index cfe3b9cad156..eb496033b55c 100644
--- a/drivers/gpu/drm/nouveau/core/os.h
+++ b/drivers/gpu/drm/nouveau/core/os.h
@@ -16,6 +16,7 @@
#include <linux/vmalloc.h>
#include <linux/acpi.h>
#include <linux/dmi.h>
+#include <linux/reboot.h>
#include <asm/unaligned.h>
diff --git a/drivers/gpu/drm/nouveau/core/subdev/bios/base.c b/drivers/gpu/drm/nouveau/core/subdev/bios/base.c
index f621f69fa1a2..e816f06637a7 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/bios/base.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/bios/base.c
@@ -172,7 +172,7 @@ out:
nv_wr32(bios, pcireg, access);
}
-#if defined(CONFIG_ACPI)
+#if defined(CONFIG_ACPI) && defined(CONFIG_X86)
int nouveau_acpi_get_bios_chunk(uint8_t *bios, int offset, int len);
bool nouveau_acpi_rom_supported(struct pci_dev *pdev);
#else
diff --git a/drivers/gpu/drm/nouveau/core/subdev/bios/dcb.c b/drivers/gpu/drm/nouveau/core/subdev/bios/dcb.c
index 0fd87df99dd6..2d9b9d7a7992 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/bios/dcb.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/bios/dcb.c
@@ -107,6 +107,18 @@ dcb_outp(struct nouveau_bios *bios, u8 idx, u8 *ver, u8 *len)
return 0x0000;
}
+static inline u16
+dcb_outp_hasht(struct dcb_output *outp)
+{
+ return (outp->extdev << 8) | (outp->location << 4) | outp->type;
+}
+
+static inline u16
+dcb_outp_hashm(struct dcb_output *outp)
+{
+ return (outp->heads << 8) | (outp->link << 6) | outp->or;
+}
+
u16
dcb_outp_parse(struct nouveau_bios *bios, u8 idx, u8 *ver, u8 *len,
struct dcb_output *outp)
@@ -135,34 +147,28 @@ dcb_outp_parse(struct nouveau_bios *bios, u8 idx, u8 *ver, u8 *len,
case DCB_OUTPUT_DP:
outp->link = (conf & 0x00000030) >> 4;
outp->sorconf.link = outp->link; /*XXX*/
+ outp->extdev = 0x00;
+ if (outp->location != 0)
+ outp->extdev = (conf & 0x0000ff00) >> 8;
break;
default:
break;
}
}
+
+ outp->hasht = dcb_outp_hasht(outp);
+ outp->hashm = dcb_outp_hashm(outp);
}
return dcb;
}
-static inline u16
-dcb_outp_hasht(struct dcb_output *outp)
-{
- return outp->type;
-}
-
-static inline u16
-dcb_outp_hashm(struct dcb_output *outp)
-{
- return (outp->heads << 8) | (outp->link << 6) | outp->or;
-}
-
u16
dcb_outp_match(struct nouveau_bios *bios, u16 type, u16 mask,
u8 *ver, u8 *len, struct dcb_output *outp)
{
u16 dcb, idx = 0;
while ((dcb = dcb_outp_parse(bios, idx++, ver, len, outp))) {
- if (dcb_outp_hasht(outp) == type) {
+ if ((dcb_outp_hasht(outp) & 0x00ff) == (type & 0x00ff)) {
if ((dcb_outp_hashm(outp) & mask) == mask)
break;
}
diff --git a/drivers/gpu/drm/nouveau/core/subdev/bios/extdev.c b/drivers/gpu/drm/nouveau/core/subdev/bios/extdev.c
index 5afb568b2d69..b2a676e53580 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/bios/extdev.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/bios/extdev.c
@@ -48,7 +48,7 @@ extdev_table(struct nouveau_bios *bios, u8 *ver, u8 *hdr, u8 *len, u8 *cnt)
return extdev + *hdr;
}
-u16
+static u16
nvbios_extdev_entry(struct nouveau_bios *bios, int idx, u8 *ver, u8 *len)
{
u8 hdr, cnt;
diff --git a/drivers/gpu/drm/nouveau/core/subdev/bios/gpio.c b/drivers/gpu/drm/nouveau/core/subdev/bios/gpio.c
index c84e93fa6d95..172a4f999990 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/bios/gpio.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/bios/gpio.c
@@ -25,6 +25,7 @@
#include <subdev/bios.h>
#include <subdev/bios/dcb.h>
#include <subdev/bios/gpio.h>
+#include <subdev/bios/xpio.h>
u16
dcb_gpio_table(struct nouveau_bios *bios, u8 *ver, u8 *hdr, u8 *cnt, u8 *len)
@@ -60,8 +61,14 @@ dcb_gpio_table(struct nouveau_bios *bios, u8 *ver, u8 *hdr, u8 *cnt, u8 *len)
u16
dcb_gpio_entry(struct nouveau_bios *bios, int idx, int ent, u8 *ver, u8 *len)
{
- u8 hdr, cnt;
- u16 gpio = !idx ? dcb_gpio_table(bios, ver, &hdr, &cnt, len) : 0x0000;
+ u8 hdr, cnt, xver; /* use gpio version for xpio entry parsing */
+ u16 gpio;
+
+ if (!idx--)
+ gpio = dcb_gpio_table(bios, ver, &hdr, &cnt, len);
+ else
+ gpio = dcb_xpio_table(bios, idx, &xver, &hdr, &cnt, len);
+
if (gpio && ent < cnt)
return gpio + hdr + (ent * *len);
return 0x0000;
diff --git a/drivers/gpu/drm/nouveau/core/subdev/bios/i2c.c b/drivers/gpu/drm/nouveau/core/subdev/bios/i2c.c
index ad577db83766..cfb9288c6d28 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/bios/i2c.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/bios/i2c.c
@@ -70,12 +70,12 @@ dcb_i2c_parse(struct nouveau_bios *bios, u8 idx, struct dcb_i2c_entry *info)
u8 ver, len;
u16 ent = dcb_i2c_entry(bios, idx, &ver, &len);
if (ent) {
- info->data = nv_ro32(bios, ent + 0);
- info->type = nv_ro08(bios, ent + 3);
+ info->type = nv_ro08(bios, ent + 3);
+ info->share = DCB_I2C_UNUSED;
if (ver < 0x30) {
info->type &= 0x07;
if (info->type == 0x07)
- info->type = 0xff;
+ info->type = DCB_I2C_UNUSED;
}
switch (info->type) {
@@ -88,7 +88,11 @@ dcb_i2c_parse(struct nouveau_bios *bios, u8 idx, struct dcb_i2c_entry *info)
return 0;
case DCB_I2C_NVIO_BIT:
case DCB_I2C_NVIO_AUX:
- info->drive = nv_ro08(bios, ent + 0);
+ info->drive = nv_ro08(bios, ent + 0) & 0x0f;
+ if (nv_ro08(bios, ent + 1) & 0x01) {
+ info->share = nv_ro08(bios, ent + 1) >> 1;
+ info->share &= 0x0f;
+ }
return 0;
case DCB_I2C_UNUSED:
return 0;
@@ -121,7 +125,8 @@ dcb_i2c_parse(struct nouveau_bios *bios, u8 idx, struct dcb_i2c_entry *info)
if (!info->sense) info->sense = 0x36;
}
- info->type = DCB_I2C_NV04_BIT;
+ info->type = DCB_I2C_NV04_BIT;
+ info->share = DCB_I2C_UNUSED;
return 0;
}
diff --git a/drivers/gpu/drm/nouveau/core/subdev/bios/init.c b/drivers/gpu/drm/nouveau/core/subdev/bios/init.c
index 690ed438b2ad..2cc1e6a5eb6a 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/bios/init.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/bios/init.c
@@ -231,6 +231,11 @@ init_i2c(struct nvbios_init *init, int index)
return NULL;
}
+ if (index == -2 && init->outp->location) {
+ index = NV_I2C_TYPE_EXTAUX(init->outp->extdev);
+ return i2c->find_type(i2c, index);
+ }
+
index = init->outp->i2c_index;
}
@@ -258,7 +263,7 @@ init_wri2cr(struct nvbios_init *init, u8 index, u8 addr, u8 reg, u8 val)
static int
init_rdauxr(struct nvbios_init *init, u32 addr)
{
- struct nouveau_i2c_port *port = init_i2c(init, -1);
+ struct nouveau_i2c_port *port = init_i2c(init, -2);
u8 data;
if (port && init_exec(init)) {
@@ -274,7 +279,7 @@ init_rdauxr(struct nvbios_init *init, u32 addr)
static int
init_wrauxr(struct nvbios_init *init, u32 addr, u8 data)
{
- struct nouveau_i2c_port *port = init_i2c(init, -1);
+ struct nouveau_i2c_port *port = init_i2c(init, -2);
if (port && init_exec(init))
return nv_wraux(port, addr, &data, 1);
return -ENODEV;
@@ -1816,7 +1821,7 @@ init_ram_restrict_zm_reg_group(struct nvbios_init *init)
u8 i, j;
trace("RAM_RESTRICT_ZM_REG_GROUP\t"
- "R[%08x] 0x%02x 0x%02x\n", addr, incr, num);
+ "R[0x%08x] 0x%02x 0x%02x\n", addr, incr, num);
init->offset += 7;
for (i = 0; i < num; i++) {
@@ -1849,7 +1854,7 @@ init_copy_zm_reg(struct nvbios_init *init)
u32 sreg = nv_ro32(bios, init->offset + 1);
u32 dreg = nv_ro32(bios, init->offset + 5);
- trace("COPY_ZM_REG\tR[0x%06x] = R[0x%06x]\n", sreg, dreg);
+ trace("COPY_ZM_REG\tR[0x%06x] = R[0x%06x]\n", dreg, sreg);
init->offset += 9;
init_wr32(init, dreg, init_rd32(init, sreg));
@@ -1866,7 +1871,7 @@ init_zm_reg_group(struct nvbios_init *init)
u32 addr = nv_ro32(bios, init->offset + 1);
u8 count = nv_ro08(bios, init->offset + 5);
- trace("ZM_REG_GROUP\tR[0x%06x] =\n");
+ trace("ZM_REG_GROUP\tR[0x%06x] =\n", addr);
init->offset += 6;
while (count--) {
diff --git a/drivers/gpu/drm/nouveau/core/subdev/bios/therm.c b/drivers/gpu/drm/nouveau/core/subdev/bios/therm.c
index 862a08a2ae27..22a20573ed1b 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/bios/therm.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/bios/therm.c
@@ -55,7 +55,7 @@ therm_table(struct nouveau_bios *bios, u8 *ver, u8 *hdr, u8 *len, u8 *cnt)
return therm + nv_ro08(bios, therm + 1);
}
-u16
+static u16
nvbios_therm_entry(struct nouveau_bios *bios, int idx, u8 *ver, u8 *len)
{
u8 hdr, cnt;
@@ -155,10 +155,15 @@ int
nvbios_therm_fan_parse(struct nouveau_bios *bios,
struct nvbios_therm_fan *fan)
{
+ struct nouveau_therm_trip_point *cur_trip = NULL;
u8 ver, len, i;
u16 entry;
+ uint8_t duty_lut[] = { 0, 0, 25, 0, 40, 0, 50, 0,
+ 75, 0, 85, 0, 100, 0, 100, 0 };
+
i = 0;
+ fan->nr_fan_trip = 0;
while ((entry = nvbios_therm_entry(bios, i++, &ver, &len))) {
s16 value = nv_ro16(bios, entry + 1);
@@ -167,9 +172,30 @@ nvbios_therm_fan_parse(struct nouveau_bios *bios,
fan->min_duty = value & 0xff;
fan->max_duty = (value & 0xff00) >> 8;
break;
+ case 0x24:
+ fan->nr_fan_trip++;
+ cur_trip = &fan->trip[fan->nr_fan_trip - 1];
+ cur_trip->hysteresis = value & 0xf;
+ cur_trip->temp = (value & 0xff0) >> 4;
+ cur_trip->fan_duty = duty_lut[(value & 0xf000) >> 12];
+ break;
+ case 0x25:
+ cur_trip = &fan->trip[fan->nr_fan_trip - 1];
+ cur_trip->fan_duty = value;
+ break;
case 0x26:
fan->pwm_freq = value;
break;
+ case 0x3b:
+ fan->bump_period = value;
+ break;
+ case 0x3c:
+ fan->slow_down_period = value;
+ break;
+ case 0x46:
+ fan->linear_min_temp = nv_ro08(bios, entry + 1);
+ fan->linear_max_temp = nv_ro08(bios, entry + 2);
+ break;
}
}
diff --git a/drivers/gpu/drm/nouveau/core/subdev/bios/xpio.c b/drivers/gpu/drm/nouveau/core/subdev/bios/xpio.c
new file mode 100644
index 000000000000..e9b8e5d30a7a
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/subdev/bios/xpio.c
@@ -0,0 +1,76 @@
+/*
+ * Copyright 2012 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+
+#include <subdev/bios.h>
+#include <subdev/bios/gpio.h>
+#include <subdev/bios/xpio.h>
+
+static u16
+dcb_xpiod_table(struct nouveau_bios *bios, u8 *ver, u8 *hdr, u8 *cnt, u8 *len)
+{
+ u16 data = dcb_gpio_table(bios, ver, hdr, cnt, len);
+ if (data && *ver >= 0x40 && *hdr >= 0x06) {
+ u16 xpio = nv_ro16(bios, data + 0x04);
+ if (xpio) {
+ *ver = nv_ro08(bios, data + 0x00);
+ *hdr = nv_ro08(bios, data + 0x01);
+ *cnt = nv_ro08(bios, data + 0x02);
+ *len = nv_ro08(bios, data + 0x03);
+ return xpio;
+ }
+ }
+ return 0x0000;
+}
+
+u16
+dcb_xpio_table(struct nouveau_bios *bios, u8 idx,
+ u8 *ver, u8 *hdr, u8 *cnt, u8 *len)
+{
+ u16 data = dcb_xpiod_table(bios, ver, hdr, cnt, len);
+ if (data && idx < *cnt) {
+ u16 xpio = nv_ro16(bios, data + *hdr + (idx * *len));
+ if (xpio) {
+ *ver = nv_ro08(bios, data + 0x00);
+ *hdr = nv_ro08(bios, data + 0x01);
+ *cnt = nv_ro08(bios, data + 0x02);
+ *len = nv_ro08(bios, data + 0x03);
+ return xpio;
+ }
+ }
+ return 0x0000;
+}
+
+u16
+dcb_xpio_parse(struct nouveau_bios *bios, u8 idx,
+ u8 *ver, u8 *hdr, u8 *cnt, u8 *len,
+ struct nvbios_xpio *info)
+{
+ u16 data = dcb_xpio_table(bios, idx, ver, hdr, cnt, len);
+ if (data && *len >= 6) {
+ info->type = nv_ro08(bios, data + 0x04);
+ info->addr = nv_ro08(bios, data + 0x05);
+ info->flags = nv_ro08(bios, data + 0x06);
+ }
+ return 0x0000;
+}
diff --git a/drivers/gpu/drm/nouveau/core/subdev/bus/nv04.c b/drivers/gpu/drm/nouveau/core/subdev/bus/nv04.c
new file mode 100644
index 000000000000..8c7f8057a185
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/subdev/bus/nv04.c
@@ -0,0 +1,95 @@
+/*
+ * Copyright 2012 Nouveau Community
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Martin Peres <martin.peres@labri.fr>
+ * Ben Skeggs
+ */
+
+#include <subdev/bus.h>
+
+struct nv04_bus_priv {
+ struct nouveau_bus base;
+};
+
+static void
+nv04_bus_intr(struct nouveau_subdev *subdev)
+{
+ struct nouveau_bus *pbus = nouveau_bus(subdev);
+ u32 stat = nv_rd32(pbus, 0x001100) & nv_rd32(pbus, 0x001140);
+
+ if (stat & 0x00000001) {
+ nv_error(pbus, "BUS ERROR\n");
+ stat &= ~0x00000001;
+ nv_wr32(pbus, 0x001100, 0x00000001);
+ }
+
+ if (stat & 0x00000110) {
+ subdev = nouveau_subdev(subdev, NVDEV_SUBDEV_GPIO);
+ if (subdev && subdev->intr)
+ subdev->intr(subdev);
+ stat &= ~0x00000110;
+ nv_wr32(pbus, 0x001100, 0x00000110);
+ }
+
+ if (stat) {
+ nv_error(pbus, "unknown intr 0x%08x\n", stat);
+ nv_mask(pbus, 0x001140, stat, 0x00000000);
+ }
+}
+
+static int
+nv04_bus_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
+ struct nouveau_oclass *oclass, void *data, u32 size,
+ struct nouveau_object **pobject)
+{
+ struct nv04_bus_priv *priv;
+ int ret;
+
+ ret = nouveau_bus_create(parent, engine, oclass, &priv);
+ *pobject = nv_object(priv);
+ if (ret)
+ return ret;
+
+ nv_subdev(priv)->intr = nv04_bus_intr;
+ return 0;
+}
+
+static int
+nv04_bus_init(struct nouveau_object *object)
+{
+ struct nv04_bus_priv *priv = (void *)object;
+
+ nv_wr32(priv, 0x001100, 0xffffffff);
+ nv_wr32(priv, 0x001140, 0x00000111);
+
+ return nouveau_bus_init(&priv->base);
+}
+
+struct nouveau_oclass
+nv04_bus_oclass = {
+ .handle = NV_SUBDEV(BUS, 0x04),
+ .ofuncs = &(struct nouveau_ofuncs) {
+ .ctor = nv04_bus_ctor,
+ .dtor = _nouveau_bus_dtor,
+ .init = nv04_bus_init,
+ .fini = _nouveau_bus_fini,
+ },
+};
diff --git a/drivers/gpu/drm/nouveau/core/subdev/bus/nv31.c b/drivers/gpu/drm/nouveau/core/subdev/bus/nv31.c
new file mode 100644
index 000000000000..34132aef34e1
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/subdev/bus/nv31.c
@@ -0,0 +1,112 @@
+/*
+ * Copyright 2012 Nouveau Community
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Martin Peres <martin.peres@labri.fr>
+ * Ben Skeggs
+ */
+
+#include <subdev/bus.h>
+
+struct nv31_bus_priv {
+ struct nouveau_bus base;
+};
+
+static void
+nv31_bus_intr(struct nouveau_subdev *subdev)
+{
+ struct nouveau_bus *pbus = nouveau_bus(subdev);
+ u32 stat = nv_rd32(pbus, 0x001100) & nv_rd32(pbus, 0x001140);
+ u32 gpio = nv_rd32(pbus, 0x001104) & nv_rd32(pbus, 0x001144);
+
+ if (gpio) {
+ subdev = nouveau_subdev(pbus, NVDEV_SUBDEV_GPIO);
+ if (subdev && subdev->intr)
+ subdev->intr(subdev);
+ }
+
+ if (stat & 0x00000008) { /* NV41- */
+ u32 addr = nv_rd32(pbus, 0x009084);
+ u32 data = nv_rd32(pbus, 0x009088);
+
+ nv_error(pbus, "MMIO %s of 0x%08x FAULT at 0x%06x\n",
+ (addr & 0x00000002) ? "write" : "read", data,
+ (addr & 0x00fffffc));
+
+ stat &= ~0x00000008;
+ nv_wr32(pbus, 0x001100, 0x00000008);
+ }
+
+ if (stat & 0x00070000) {
+ subdev = nouveau_subdev(pbus, NVDEV_SUBDEV_THERM);
+ if (subdev && subdev->intr)
+ subdev->intr(subdev);
+ stat &= ~0x00070000;
+ nv_wr32(pbus, 0x001100, 0x00070000);
+ }
+
+ if (stat) {
+ nv_error(pbus, "unknown intr 0x%08x\n", stat);
+ nv_mask(pbus, 0x001140, stat, 0x00000000);
+ }
+}
+
+static int
+nv31_bus_init(struct nouveau_object *object)
+{
+ struct nv31_bus_priv *priv = (void *)object;
+ int ret;
+
+ ret = nouveau_bus_init(&priv->base);
+ if (ret)
+ return ret;
+
+ nv_wr32(priv, 0x001100, 0xffffffff);
+ nv_wr32(priv, 0x001140, 0x00070008);
+ return 0;
+}
+
+static int
+nv31_bus_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
+ struct nouveau_oclass *oclass, void *data, u32 size,
+ struct nouveau_object **pobject)
+{
+ struct nv31_bus_priv *priv;
+ int ret;
+
+ ret = nouveau_bus_create(parent, engine, oclass, &priv);
+ *pobject = nv_object(priv);
+ if (ret)
+ return ret;
+
+ nv_subdev(priv)->intr = nv31_bus_intr;
+ return 0;
+}
+
+struct nouveau_oclass
+nv31_bus_oclass = {
+ .handle = NV_SUBDEV(BUS, 0x31),
+ .ofuncs = &(struct nouveau_ofuncs) {
+ .ctor = nv31_bus_ctor,
+ .dtor = _nouveau_bus_dtor,
+ .init = nv31_bus_init,
+ .fini = _nouveau_bus_fini,
+ },
+};
diff --git a/drivers/gpu/drm/nouveau/core/subdev/bus/nv50.c b/drivers/gpu/drm/nouveau/core/subdev/bus/nv50.c
new file mode 100644
index 000000000000..f5b2117fa8c6
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/subdev/bus/nv50.c
@@ -0,0 +1,105 @@
+/*
+ * Copyright 2012 Nouveau Community
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Martin Peres <martin.peres@labri.fr>
+ * Ben Skeggs
+ */
+
+#include <subdev/bus.h>
+
+struct nv50_bus_priv {
+ struct nouveau_bus base;
+};
+
+static void
+nv50_bus_intr(struct nouveau_subdev *subdev)
+{
+ struct nouveau_bus *pbus = nouveau_bus(subdev);
+ u32 stat = nv_rd32(pbus, 0x001100) & nv_rd32(pbus, 0x001140);
+
+ if (stat & 0x00000008) {
+ u32 addr = nv_rd32(pbus, 0x009084);
+ u32 data = nv_rd32(pbus, 0x009088);
+
+ nv_error(pbus, "MMIO %s of 0x%08x FAULT at 0x%06x\n",
+ (addr & 0x00000002) ? "write" : "read", data,
+ (addr & 0x00fffffc));
+
+ stat &= ~0x00000008;
+ nv_wr32(pbus, 0x001100, 0x00000008);
+ }
+
+ if (stat & 0x00010000) {
+ subdev = nouveau_subdev(pbus, NVDEV_SUBDEV_THERM);
+ if (subdev && subdev->intr)
+ subdev->intr(subdev);
+ stat &= ~0x00010000;
+ nv_wr32(pbus, 0x001100, 0x00010000);
+ }
+
+ if (stat) {
+ nv_error(pbus, "unknown intr 0x%08x\n", stat);
+ nv_mask(pbus, 0x001140, stat, 0);
+ }
+}
+
+static int
+nv50_bus_init(struct nouveau_object *object)
+{
+ struct nv50_bus_priv *priv = (void *)object;
+ int ret;
+
+ ret = nouveau_bus_init(&priv->base);
+ if (ret)
+ return ret;
+
+ nv_wr32(priv, 0x001100, 0xffffffff);
+ nv_wr32(priv, 0x001140, 0x00010008);
+ return 0;
+}
+
+static int
+nv50_bus_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
+ struct nouveau_oclass *oclass, void *data, u32 size,
+ struct nouveau_object **pobject)
+{
+ struct nv50_bus_priv *priv;
+ int ret;
+
+ ret = nouveau_bus_create(parent, engine, oclass, &priv);
+ *pobject = nv_object(priv);
+ if (ret)
+ return ret;
+
+ nv_subdev(priv)->intr = nv50_bus_intr;
+ return 0;
+}
+
+struct nouveau_oclass
+nv50_bus_oclass = {
+ .handle = NV_SUBDEV(BUS, 0x50),
+ .ofuncs = &(struct nouveau_ofuncs) {
+ .ctor = nv50_bus_ctor,
+ .dtor = _nouveau_bus_dtor,
+ .init = nv50_bus_init,
+ .fini = _nouveau_bus_fini,
+ },
+};
diff --git a/drivers/gpu/drm/nouveau/core/subdev/bus/nvc0.c b/drivers/gpu/drm/nouveau/core/subdev/bus/nvc0.c
new file mode 100644
index 000000000000..b192d6246363
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/subdev/bus/nvc0.c
@@ -0,0 +1,101 @@
+/*
+ * Copyright 2012 Nouveau Community
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Martin Peres <martin.peres@labri.fr>
+ * Ben Skeggs
+ */
+
+#include <subdev/bus.h>
+
+struct nvc0_bus_priv {
+ struct nouveau_bus base;
+};
+
+static void
+nvc0_bus_intr(struct nouveau_subdev *subdev)
+{
+ struct nouveau_bus *pbus = nouveau_bus(subdev);
+ u32 stat = nv_rd32(pbus, 0x001100) & nv_rd32(pbus, 0x001140);
+
+ if (stat & 0x0000000e) {
+ u32 addr = nv_rd32(pbus, 0x009084);
+ u32 data = nv_rd32(pbus, 0x009088);
+
+ nv_error(pbus, "MMIO %s of 0x%08x FAULT at 0x%06x [ %s%s%s]\n",
+ (addr & 0x00000002) ? "write" : "read", data,
+ (addr & 0x00fffffc),
+ (stat & 0x00000002) ? "!ENGINE " : "",
+ (stat & 0x00000004) ? "IBUS " : "",
+ (stat & 0x00000008) ? "TIMEOUT " : "");
+
+ nv_wr32(pbus, 0x009084, 0x00000000);
+ nv_wr32(pbus, 0x001100, (stat & 0x0000000e));
+ stat &= ~0x0000000e;
+ }
+
+ if (stat) {
+ nv_error(pbus, "unknown intr 0x%08x\n", stat);
+ nv_mask(pbus, 0x001140, stat, 0x00000000);
+ }
+}
+
+static int
+nvc0_bus_init(struct nouveau_object *object)
+{
+ struct nvc0_bus_priv *priv = (void *)object;
+ int ret;
+
+ ret = nouveau_bus_init(&priv->base);
+ if (ret)
+ return ret;
+
+ nv_wr32(priv, 0x001100, 0xffffffff);
+ nv_wr32(priv, 0x001140, 0x0000000e);
+ return 0;
+}
+
+static int
+nvc0_bus_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
+ struct nouveau_oclass *oclass, void *data, u32 size,
+ struct nouveau_object **pobject)
+{
+ struct nvc0_bus_priv *priv;
+ int ret;
+
+ ret = nouveau_bus_create(parent, engine, oclass, &priv);
+ *pobject = nv_object(priv);
+ if (ret)
+ return ret;
+
+ nv_subdev(priv)->intr = nvc0_bus_intr;
+ return 0;
+}
+
+struct nouveau_oclass
+nvc0_bus_oclass = {
+ .handle = NV_SUBDEV(BUS, 0xc0),
+ .ofuncs = &(struct nouveau_ofuncs) {
+ .ctor = nvc0_bus_ctor,
+ .dtor = _nouveau_bus_dtor,
+ .init = nvc0_bus_init,
+ .fini = _nouveau_bus_fini,
+ },
+};
diff --git a/drivers/gpu/drm/nouveau/core/subdev/device/base.c b/drivers/gpu/drm/nouveau/core/subdev/device/base.c
index f8a7ed4166cf..3937ced5c753 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/device/base.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/device/base.c
@@ -66,6 +66,7 @@ static const u64 disable_map[] = {
[NVDEV_SUBDEV_CLOCK] = NV_DEVICE_DISABLE_CORE,
[NVDEV_SUBDEV_MXM] = NV_DEVICE_DISABLE_CORE,
[NVDEV_SUBDEV_MC] = NV_DEVICE_DISABLE_CORE,
+ [NVDEV_SUBDEV_BUS] = NV_DEVICE_DISABLE_CORE,
[NVDEV_SUBDEV_TIMER] = NV_DEVICE_DISABLE_CORE,
[NVDEV_SUBDEV_FB] = NV_DEVICE_DISABLE_CORE,
[NVDEV_SUBDEV_LTCG] = NV_DEVICE_DISABLE_CORE,
@@ -103,8 +104,8 @@ nouveau_devobj_ctor(struct nouveau_object *parent,
struct nouveau_device *device;
struct nouveau_devobj *devobj;
struct nv_device_class *args = data;
- u64 disable, boot0, strap;
- u64 mmio_base, mmio_size;
+ u32 boot0, strap;
+ u64 disable, mmio_base, mmio_size;
void __iomem *map;
int ret, i, c;
diff --git a/drivers/gpu/drm/nouveau/core/subdev/device/nv04.c b/drivers/gpu/drm/nouveau/core/subdev/device/nv04.c
index 8626d0d6cbbc..473c5c03d3c9 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/device/nv04.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/device/nv04.c
@@ -24,6 +24,7 @@
#include <subdev/device.h>
#include <subdev/bios.h>
+#include <subdev/bus.h>
#include <subdev/i2c.h>
#include <subdev/clock.h>
#include <subdev/devinit.h>
@@ -46,10 +47,11 @@ nv04_identify(struct nouveau_device *device)
case 0x04:
device->cname = "NV04";
device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass;
- device->oclass[NVDEV_SUBDEV_I2C ] = &nouveau_i2c_oclass;
+ device->oclass[NVDEV_SUBDEV_I2C ] = &nv04_i2c_oclass;
device->oclass[NVDEV_SUBDEV_CLOCK ] = &nv04_clock_oclass;
device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv04_devinit_oclass;
device->oclass[NVDEV_SUBDEV_MC ] = &nv04_mc_oclass;
+ device->oclass[NVDEV_SUBDEV_BUS ] = &nv04_bus_oclass;
device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
device->oclass[NVDEV_SUBDEV_FB ] = &nv04_fb_oclass;
device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv04_instmem_oclass;
@@ -63,10 +65,11 @@ nv04_identify(struct nouveau_device *device)
case 0x05:
device->cname = "NV05";
device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass;
- device->oclass[NVDEV_SUBDEV_I2C ] = &nouveau_i2c_oclass;
+ device->oclass[NVDEV_SUBDEV_I2C ] = &nv04_i2c_oclass;
device->oclass[NVDEV_SUBDEV_CLOCK ] = &nv04_clock_oclass;
device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv05_devinit_oclass;
device->oclass[NVDEV_SUBDEV_MC ] = &nv04_mc_oclass;
+ device->oclass[NVDEV_SUBDEV_BUS ] = &nv04_bus_oclass;
device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
device->oclass[NVDEV_SUBDEV_FB ] = &nv04_fb_oclass;
device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv04_instmem_oclass;
diff --git a/drivers/gpu/drm/nouveau/core/subdev/device/nv10.c b/drivers/gpu/drm/nouveau/core/subdev/device/nv10.c
index 9c40b0fb23f6..d0774f5bebe1 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/device/nv10.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/device/nv10.c
@@ -24,6 +24,7 @@
#include <subdev/device.h>
#include <subdev/bios.h>
+#include <subdev/bus.h>
#include <subdev/gpio.h>
#include <subdev/i2c.h>
#include <subdev/clock.h>
@@ -48,10 +49,11 @@ nv10_identify(struct nouveau_device *device)
device->cname = "NV10";
device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass;
device->oclass[NVDEV_SUBDEV_GPIO ] = &nv10_gpio_oclass;
- device->oclass[NVDEV_SUBDEV_I2C ] = &nouveau_i2c_oclass;
+ device->oclass[NVDEV_SUBDEV_I2C ] = &nv04_i2c_oclass;
device->oclass[NVDEV_SUBDEV_CLOCK ] = &nv04_clock_oclass;
device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv10_devinit_oclass;
device->oclass[NVDEV_SUBDEV_MC ] = &nv04_mc_oclass;
+ device->oclass[NVDEV_SUBDEV_BUS ] = &nv04_bus_oclass;
device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
device->oclass[NVDEV_SUBDEV_FB ] = &nv10_fb_oclass;
device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv04_instmem_oclass;
@@ -64,10 +66,11 @@ nv10_identify(struct nouveau_device *device)
device->cname = "NV15";
device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass;
device->oclass[NVDEV_SUBDEV_GPIO ] = &nv10_gpio_oclass;
- device->oclass[NVDEV_SUBDEV_I2C ] = &nouveau_i2c_oclass;
+ device->oclass[NVDEV_SUBDEV_I2C ] = &nv04_i2c_oclass;
device->oclass[NVDEV_SUBDEV_CLOCK ] = &nv04_clock_oclass;
device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv10_devinit_oclass;
device->oclass[NVDEV_SUBDEV_MC ] = &nv04_mc_oclass;
+ device->oclass[NVDEV_SUBDEV_BUS ] = &nv04_bus_oclass;
device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
device->oclass[NVDEV_SUBDEV_FB ] = &nv10_fb_oclass;
device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv04_instmem_oclass;
@@ -82,10 +85,11 @@ nv10_identify(struct nouveau_device *device)
device->cname = "NV16";
device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass;
device->oclass[NVDEV_SUBDEV_GPIO ] = &nv10_gpio_oclass;
- device->oclass[NVDEV_SUBDEV_I2C ] = &nouveau_i2c_oclass;
+ device->oclass[NVDEV_SUBDEV_I2C ] = &nv04_i2c_oclass;
device->oclass[NVDEV_SUBDEV_CLOCK ] = &nv04_clock_oclass;
device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv10_devinit_oclass;
device->oclass[NVDEV_SUBDEV_MC ] = &nv04_mc_oclass;
+ device->oclass[NVDEV_SUBDEV_BUS ] = &nv04_bus_oclass;
device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
device->oclass[NVDEV_SUBDEV_FB ] = &nv10_fb_oclass;
device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv04_instmem_oclass;
@@ -100,10 +104,11 @@ nv10_identify(struct nouveau_device *device)
device->cname = "nForce";
device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass;
device->oclass[NVDEV_SUBDEV_GPIO ] = &nv10_gpio_oclass;
- device->oclass[NVDEV_SUBDEV_I2C ] = &nouveau_i2c_oclass;
+ device->oclass[NVDEV_SUBDEV_I2C ] = &nv04_i2c_oclass;
device->oclass[NVDEV_SUBDEV_CLOCK ] = &nv04_clock_oclass;
device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv1a_devinit_oclass;
device->oclass[NVDEV_SUBDEV_MC ] = &nv04_mc_oclass;
+ device->oclass[NVDEV_SUBDEV_BUS ] = &nv04_bus_oclass;
device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
device->oclass[NVDEV_SUBDEV_FB ] = &nv1a_fb_oclass;
device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv04_instmem_oclass;
@@ -118,10 +123,11 @@ nv10_identify(struct nouveau_device *device)
device->cname = "NV11";
device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass;
device->oclass[NVDEV_SUBDEV_GPIO ] = &nv10_gpio_oclass;
- device->oclass[NVDEV_SUBDEV_I2C ] = &nouveau_i2c_oclass;
+ device->oclass[NVDEV_SUBDEV_I2C ] = &nv04_i2c_oclass;
device->oclass[NVDEV_SUBDEV_CLOCK ] = &nv04_clock_oclass;
device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv10_devinit_oclass;
device->oclass[NVDEV_SUBDEV_MC ] = &nv04_mc_oclass;
+ device->oclass[NVDEV_SUBDEV_BUS ] = &nv04_bus_oclass;
device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
device->oclass[NVDEV_SUBDEV_FB ] = &nv10_fb_oclass;
device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv04_instmem_oclass;
@@ -136,10 +142,11 @@ nv10_identify(struct nouveau_device *device)
device->cname = "NV17";
device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass;
device->oclass[NVDEV_SUBDEV_GPIO ] = &nv10_gpio_oclass;
- device->oclass[NVDEV_SUBDEV_I2C ] = &nouveau_i2c_oclass;
+ device->oclass[NVDEV_SUBDEV_I2C ] = &nv04_i2c_oclass;
device->oclass[NVDEV_SUBDEV_CLOCK ] = &nv04_clock_oclass;
device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv10_devinit_oclass;
device->oclass[NVDEV_SUBDEV_MC ] = &nv04_mc_oclass;
+ device->oclass[NVDEV_SUBDEV_BUS ] = &nv04_bus_oclass;
device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
device->oclass[NVDEV_SUBDEV_FB ] = &nv10_fb_oclass;
device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv04_instmem_oclass;
@@ -154,10 +161,11 @@ nv10_identify(struct nouveau_device *device)
device->cname = "nForce2";
device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass;
device->oclass[NVDEV_SUBDEV_GPIO ] = &nv10_gpio_oclass;
- device->oclass[NVDEV_SUBDEV_I2C ] = &nouveau_i2c_oclass;
+ device->oclass[NVDEV_SUBDEV_I2C ] = &nv04_i2c_oclass;
device->oclass[NVDEV_SUBDEV_CLOCK ] = &nv04_clock_oclass;
device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv1a_devinit_oclass;
device->oclass[NVDEV_SUBDEV_MC ] = &nv04_mc_oclass;
+ device->oclass[NVDEV_SUBDEV_BUS ] = &nv04_bus_oclass;
device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
device->oclass[NVDEV_SUBDEV_FB ] = &nv1a_fb_oclass;
device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv04_instmem_oclass;
@@ -172,10 +180,11 @@ nv10_identify(struct nouveau_device *device)
device->cname = "NV18";
device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass;
device->oclass[NVDEV_SUBDEV_GPIO ] = &nv10_gpio_oclass;
- device->oclass[NVDEV_SUBDEV_I2C ] = &nouveau_i2c_oclass;
+ device->oclass[NVDEV_SUBDEV_I2C ] = &nv04_i2c_oclass;
device->oclass[NVDEV_SUBDEV_CLOCK ] = &nv04_clock_oclass;
device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv10_devinit_oclass;
device->oclass[NVDEV_SUBDEV_MC ] = &nv04_mc_oclass;
+ device->oclass[NVDEV_SUBDEV_BUS ] = &nv04_bus_oclass;
device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
device->oclass[NVDEV_SUBDEV_FB ] = &nv10_fb_oclass;
device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv04_instmem_oclass;
diff --git a/drivers/gpu/drm/nouveau/core/subdev/device/nv20.c b/drivers/gpu/drm/nouveau/core/subdev/device/nv20.c
index 74f88f48e1c2..ab920e0dc45b 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/device/nv20.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/device/nv20.c
@@ -24,6 +24,7 @@
#include <subdev/device.h>
#include <subdev/bios.h>
+#include <subdev/bus.h>
#include <subdev/gpio.h>
#include <subdev/i2c.h>
#include <subdev/clock.h>
@@ -49,10 +50,11 @@ nv20_identify(struct nouveau_device *device)
device->cname = "NV20";
device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass;
device->oclass[NVDEV_SUBDEV_GPIO ] = &nv10_gpio_oclass;
- device->oclass[NVDEV_SUBDEV_I2C ] = &nouveau_i2c_oclass;
+ device->oclass[NVDEV_SUBDEV_I2C ] = &nv04_i2c_oclass;
device->oclass[NVDEV_SUBDEV_CLOCK ] = &nv04_clock_oclass;
device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv20_devinit_oclass;
device->oclass[NVDEV_SUBDEV_MC ] = &nv04_mc_oclass;
+ device->oclass[NVDEV_SUBDEV_BUS ] = &nv04_bus_oclass;
device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
device->oclass[NVDEV_SUBDEV_FB ] = &nv20_fb_oclass;
device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv04_instmem_oclass;
@@ -67,10 +69,11 @@ nv20_identify(struct nouveau_device *device)
device->cname = "NV25";
device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass;
device->oclass[NVDEV_SUBDEV_GPIO ] = &nv10_gpio_oclass;
- device->oclass[NVDEV_SUBDEV_I2C ] = &nouveau_i2c_oclass;
+ device->oclass[NVDEV_SUBDEV_I2C ] = &nv04_i2c_oclass;
device->oclass[NVDEV_SUBDEV_CLOCK ] = &nv04_clock_oclass;
device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv20_devinit_oclass;
device->oclass[NVDEV_SUBDEV_MC ] = &nv04_mc_oclass;
+ device->oclass[NVDEV_SUBDEV_BUS ] = &nv04_bus_oclass;
device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
device->oclass[NVDEV_SUBDEV_FB ] = &nv25_fb_oclass;
device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv04_instmem_oclass;
@@ -85,10 +88,11 @@ nv20_identify(struct nouveau_device *device)
device->cname = "NV28";
device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass;
device->oclass[NVDEV_SUBDEV_GPIO ] = &nv10_gpio_oclass;
- device->oclass[NVDEV_SUBDEV_I2C ] = &nouveau_i2c_oclass;
+ device->oclass[NVDEV_SUBDEV_I2C ] = &nv04_i2c_oclass;
device->oclass[NVDEV_SUBDEV_CLOCK ] = &nv04_clock_oclass;
device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv20_devinit_oclass;
device->oclass[NVDEV_SUBDEV_MC ] = &nv04_mc_oclass;
+ device->oclass[NVDEV_SUBDEV_BUS ] = &nv04_bus_oclass;
device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
device->oclass[NVDEV_SUBDEV_FB ] = &nv25_fb_oclass;
device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv04_instmem_oclass;
@@ -103,10 +107,11 @@ nv20_identify(struct nouveau_device *device)
device->cname = "NV2A";
device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass;
device->oclass[NVDEV_SUBDEV_GPIO ] = &nv10_gpio_oclass;
- device->oclass[NVDEV_SUBDEV_I2C ] = &nouveau_i2c_oclass;
+ device->oclass[NVDEV_SUBDEV_I2C ] = &nv04_i2c_oclass;
device->oclass[NVDEV_SUBDEV_CLOCK ] = &nv04_clock_oclass;
device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv20_devinit_oclass;
device->oclass[NVDEV_SUBDEV_MC ] = &nv04_mc_oclass;
+ device->oclass[NVDEV_SUBDEV_BUS ] = &nv04_bus_oclass;
device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
device->oclass[NVDEV_SUBDEV_FB ] = &nv25_fb_oclass;
device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv04_instmem_oclass;
diff --git a/drivers/gpu/drm/nouveau/core/subdev/device/nv30.c b/drivers/gpu/drm/nouveau/core/subdev/device/nv30.c
index 0ac1b2c4f61d..5f2110261b04 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/device/nv30.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/device/nv30.c
@@ -24,6 +24,7 @@
#include <subdev/device.h>
#include <subdev/bios.h>
+#include <subdev/bus.h>
#include <subdev/gpio.h>
#include <subdev/i2c.h>
#include <subdev/clock.h>
@@ -49,10 +50,11 @@ nv30_identify(struct nouveau_device *device)
device->cname = "NV30";
device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass;
device->oclass[NVDEV_SUBDEV_GPIO ] = &nv10_gpio_oclass;
- device->oclass[NVDEV_SUBDEV_I2C ] = &nouveau_i2c_oclass;
+ device->oclass[NVDEV_SUBDEV_I2C ] = &nv04_i2c_oclass;
device->oclass[NVDEV_SUBDEV_CLOCK ] = &nv04_clock_oclass;
device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv20_devinit_oclass;
device->oclass[NVDEV_SUBDEV_MC ] = &nv04_mc_oclass;
+ device->oclass[NVDEV_SUBDEV_BUS ] = &nv04_bus_oclass;
device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
device->oclass[NVDEV_SUBDEV_FB ] = &nv30_fb_oclass;
device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv04_instmem_oclass;
@@ -67,10 +69,11 @@ nv30_identify(struct nouveau_device *device)
device->cname = "NV35";
device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass;
device->oclass[NVDEV_SUBDEV_GPIO ] = &nv10_gpio_oclass;
- device->oclass[NVDEV_SUBDEV_I2C ] = &nouveau_i2c_oclass;
+ device->oclass[NVDEV_SUBDEV_I2C ] = &nv04_i2c_oclass;
device->oclass[NVDEV_SUBDEV_CLOCK ] = &nv04_clock_oclass;
device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv20_devinit_oclass;
device->oclass[NVDEV_SUBDEV_MC ] = &nv04_mc_oclass;
+ device->oclass[NVDEV_SUBDEV_BUS ] = &nv04_bus_oclass;
device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
device->oclass[NVDEV_SUBDEV_FB ] = &nv35_fb_oclass;
device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv04_instmem_oclass;
@@ -85,10 +88,11 @@ nv30_identify(struct nouveau_device *device)
device->cname = "NV31";
device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass;
device->oclass[NVDEV_SUBDEV_GPIO ] = &nv10_gpio_oclass;
- device->oclass[NVDEV_SUBDEV_I2C ] = &nouveau_i2c_oclass;
+ device->oclass[NVDEV_SUBDEV_I2C ] = &nv04_i2c_oclass;
device->oclass[NVDEV_SUBDEV_CLOCK ] = &nv04_clock_oclass;
device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv20_devinit_oclass;
device->oclass[NVDEV_SUBDEV_MC ] = &nv04_mc_oclass;
+ device->oclass[NVDEV_SUBDEV_BUS ] = &nv31_bus_oclass;
device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
device->oclass[NVDEV_SUBDEV_FB ] = &nv30_fb_oclass;
device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv04_instmem_oclass;
@@ -104,10 +108,11 @@ nv30_identify(struct nouveau_device *device)
device->cname = "NV36";
device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass;
device->oclass[NVDEV_SUBDEV_GPIO ] = &nv10_gpio_oclass;
- device->oclass[NVDEV_SUBDEV_I2C ] = &nouveau_i2c_oclass;
+ device->oclass[NVDEV_SUBDEV_I2C ] = &nv04_i2c_oclass;
device->oclass[NVDEV_SUBDEV_CLOCK ] = &nv04_clock_oclass;
device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv20_devinit_oclass;
device->oclass[NVDEV_SUBDEV_MC ] = &nv04_mc_oclass;
+ device->oclass[NVDEV_SUBDEV_BUS ] = &nv31_bus_oclass;
device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
device->oclass[NVDEV_SUBDEV_FB ] = &nv36_fb_oclass;
device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv04_instmem_oclass;
@@ -123,10 +128,11 @@ nv30_identify(struct nouveau_device *device)
device->cname = "NV34";
device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass;
device->oclass[NVDEV_SUBDEV_GPIO ] = &nv10_gpio_oclass;
- device->oclass[NVDEV_SUBDEV_I2C ] = &nouveau_i2c_oclass;
+ device->oclass[NVDEV_SUBDEV_I2C ] = &nv04_i2c_oclass;
device->oclass[NVDEV_SUBDEV_CLOCK ] = &nv04_clock_oclass;
device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv10_devinit_oclass;
device->oclass[NVDEV_SUBDEV_MC ] = &nv04_mc_oclass;
+ device->oclass[NVDEV_SUBDEV_BUS ] = &nv31_bus_oclass;
device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
device->oclass[NVDEV_SUBDEV_FB ] = &nv10_fb_oclass;
device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv04_instmem_oclass;
diff --git a/drivers/gpu/drm/nouveau/core/subdev/device/nv40.c b/drivers/gpu/drm/nouveau/core/subdev/device/nv40.c
index 41d59689a021..f3d55efe9ac9 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/device/nv40.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/device/nv40.c
@@ -24,6 +24,8 @@
#include <subdev/device.h>
#include <subdev/bios.h>
+#include <subdev/bus.h>
+#include <subdev/vm.h>
#include <subdev/gpio.h>
#include <subdev/i2c.h>
#include <subdev/clock.h>
@@ -50,11 +52,12 @@ nv40_identify(struct nouveau_device *device)
device->cname = "NV40";
device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass;
device->oclass[NVDEV_SUBDEV_GPIO ] = &nv10_gpio_oclass;
- device->oclass[NVDEV_SUBDEV_I2C ] = &nouveau_i2c_oclass;
+ device->oclass[NVDEV_SUBDEV_I2C ] = &nv04_i2c_oclass;
device->oclass[NVDEV_SUBDEV_CLOCK ] = &nv40_clock_oclass;
device->oclass[NVDEV_SUBDEV_THERM ] = &nv40_therm_oclass;
device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv1a_devinit_oclass;
device->oclass[NVDEV_SUBDEV_MC ] = &nv04_mc_oclass;
+ device->oclass[NVDEV_SUBDEV_BUS ] = &nv31_bus_oclass;
device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
device->oclass[NVDEV_SUBDEV_FB ] = &nv40_fb_oclass;
device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv40_instmem_oclass;
@@ -70,11 +73,12 @@ nv40_identify(struct nouveau_device *device)
device->cname = "NV41";
device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass;
device->oclass[NVDEV_SUBDEV_GPIO ] = &nv10_gpio_oclass;
- device->oclass[NVDEV_SUBDEV_I2C ] = &nouveau_i2c_oclass;
+ device->oclass[NVDEV_SUBDEV_I2C ] = &nv04_i2c_oclass;
device->oclass[NVDEV_SUBDEV_CLOCK ] = &nv40_clock_oclass;
device->oclass[NVDEV_SUBDEV_THERM ] = &nv40_therm_oclass;
device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv1a_devinit_oclass;
device->oclass[NVDEV_SUBDEV_MC ] = &nv04_mc_oclass;
+ device->oclass[NVDEV_SUBDEV_BUS ] = &nv31_bus_oclass;
device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
device->oclass[NVDEV_SUBDEV_FB ] = &nv41_fb_oclass;
device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv40_instmem_oclass;
@@ -90,11 +94,12 @@ nv40_identify(struct nouveau_device *device)
device->cname = "NV42";
device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass;
device->oclass[NVDEV_SUBDEV_GPIO ] = &nv10_gpio_oclass;
- device->oclass[NVDEV_SUBDEV_I2C ] = &nouveau_i2c_oclass;
+ device->oclass[NVDEV_SUBDEV_I2C ] = &nv04_i2c_oclass;
device->oclass[NVDEV_SUBDEV_CLOCK ] = &nv40_clock_oclass;
device->oclass[NVDEV_SUBDEV_THERM ] = &nv40_therm_oclass;
device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv1a_devinit_oclass;
device->oclass[NVDEV_SUBDEV_MC ] = &nv04_mc_oclass;
+ device->oclass[NVDEV_SUBDEV_BUS ] = &nv31_bus_oclass;
device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
device->oclass[NVDEV_SUBDEV_FB ] = &nv41_fb_oclass;
device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv40_instmem_oclass;
@@ -110,11 +115,12 @@ nv40_identify(struct nouveau_device *device)
device->cname = "NV43";
device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass;
device->oclass[NVDEV_SUBDEV_GPIO ] = &nv10_gpio_oclass;
- device->oclass[NVDEV_SUBDEV_I2C ] = &nouveau_i2c_oclass;
+ device->oclass[NVDEV_SUBDEV_I2C ] = &nv04_i2c_oclass;
device->oclass[NVDEV_SUBDEV_CLOCK ] = &nv40_clock_oclass;
device->oclass[NVDEV_SUBDEV_THERM ] = &nv40_therm_oclass;
device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv1a_devinit_oclass;
device->oclass[NVDEV_SUBDEV_MC ] = &nv04_mc_oclass;
+ device->oclass[NVDEV_SUBDEV_BUS ] = &nv31_bus_oclass;
device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
device->oclass[NVDEV_SUBDEV_FB ] = &nv41_fb_oclass;
device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv40_instmem_oclass;
@@ -130,11 +136,12 @@ nv40_identify(struct nouveau_device *device)
device->cname = "NV45";
device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass;
device->oclass[NVDEV_SUBDEV_GPIO ] = &nv10_gpio_oclass;
- device->oclass[NVDEV_SUBDEV_I2C ] = &nouveau_i2c_oclass;
+ device->oclass[NVDEV_SUBDEV_I2C ] = &nv04_i2c_oclass;
device->oclass[NVDEV_SUBDEV_CLOCK ] = &nv40_clock_oclass;
device->oclass[NVDEV_SUBDEV_THERM ] = &nv40_therm_oclass;
device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv1a_devinit_oclass;
device->oclass[NVDEV_SUBDEV_MC ] = &nv04_mc_oclass;
+ device->oclass[NVDEV_SUBDEV_BUS ] = &nv31_bus_oclass;
device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
device->oclass[NVDEV_SUBDEV_FB ] = &nv40_fb_oclass;
device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv40_instmem_oclass;
@@ -150,11 +157,12 @@ nv40_identify(struct nouveau_device *device)
device->cname = "G70";
device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass;
device->oclass[NVDEV_SUBDEV_GPIO ] = &nv10_gpio_oclass;
- device->oclass[NVDEV_SUBDEV_I2C ] = &nouveau_i2c_oclass;
+ device->oclass[NVDEV_SUBDEV_I2C ] = &nv04_i2c_oclass;
device->oclass[NVDEV_SUBDEV_CLOCK ] = &nv40_clock_oclass;
device->oclass[NVDEV_SUBDEV_THERM ] = &nv40_therm_oclass;
device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv1a_devinit_oclass;
device->oclass[NVDEV_SUBDEV_MC ] = &nv04_mc_oclass;
+ device->oclass[NVDEV_SUBDEV_BUS ] = &nv31_bus_oclass;
device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
device->oclass[NVDEV_SUBDEV_FB ] = &nv47_fb_oclass;
device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv40_instmem_oclass;
@@ -170,11 +178,12 @@ nv40_identify(struct nouveau_device *device)
device->cname = "G71";
device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass;
device->oclass[NVDEV_SUBDEV_GPIO ] = &nv10_gpio_oclass;
- device->oclass[NVDEV_SUBDEV_I2C ] = &nouveau_i2c_oclass;
+ device->oclass[NVDEV_SUBDEV_I2C ] = &nv04_i2c_oclass;
device->oclass[NVDEV_SUBDEV_CLOCK ] = &nv40_clock_oclass;
device->oclass[NVDEV_SUBDEV_THERM ] = &nv40_therm_oclass;
device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv1a_devinit_oclass;
device->oclass[NVDEV_SUBDEV_MC ] = &nv04_mc_oclass;
+ device->oclass[NVDEV_SUBDEV_BUS ] = &nv31_bus_oclass;
device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
device->oclass[NVDEV_SUBDEV_FB ] = &nv49_fb_oclass;
device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv40_instmem_oclass;
@@ -190,11 +199,12 @@ nv40_identify(struct nouveau_device *device)
device->cname = "G73";
device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass;
device->oclass[NVDEV_SUBDEV_GPIO ] = &nv10_gpio_oclass;
- device->oclass[NVDEV_SUBDEV_I2C ] = &nouveau_i2c_oclass;
+ device->oclass[NVDEV_SUBDEV_I2C ] = &nv04_i2c_oclass;
device->oclass[NVDEV_SUBDEV_CLOCK ] = &nv40_clock_oclass;
device->oclass[NVDEV_SUBDEV_THERM ] = &nv40_therm_oclass;
device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv1a_devinit_oclass;
device->oclass[NVDEV_SUBDEV_MC ] = &nv04_mc_oclass;
+ device->oclass[NVDEV_SUBDEV_BUS ] = &nv31_bus_oclass;
device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
device->oclass[NVDEV_SUBDEV_FB ] = &nv49_fb_oclass;
device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv40_instmem_oclass;
@@ -210,11 +220,12 @@ nv40_identify(struct nouveau_device *device)
device->cname = "NV44";
device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass;
device->oclass[NVDEV_SUBDEV_GPIO ] = &nv10_gpio_oclass;
- device->oclass[NVDEV_SUBDEV_I2C ] = &nouveau_i2c_oclass;
+ device->oclass[NVDEV_SUBDEV_I2C ] = &nv04_i2c_oclass;
device->oclass[NVDEV_SUBDEV_CLOCK ] = &nv40_clock_oclass;
device->oclass[NVDEV_SUBDEV_THERM ] = &nv40_therm_oclass;
device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv1a_devinit_oclass;
device->oclass[NVDEV_SUBDEV_MC ] = &nv44_mc_oclass;
+ device->oclass[NVDEV_SUBDEV_BUS ] = &nv31_bus_oclass;
device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
device->oclass[NVDEV_SUBDEV_FB ] = &nv44_fb_oclass;
device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv40_instmem_oclass;
@@ -230,11 +241,12 @@ nv40_identify(struct nouveau_device *device)
device->cname = "G72";
device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass;
device->oclass[NVDEV_SUBDEV_GPIO ] = &nv10_gpio_oclass;
- device->oclass[NVDEV_SUBDEV_I2C ] = &nouveau_i2c_oclass;
+ device->oclass[NVDEV_SUBDEV_I2C ] = &nv04_i2c_oclass;
device->oclass[NVDEV_SUBDEV_CLOCK ] = &nv40_clock_oclass;
device->oclass[NVDEV_SUBDEV_THERM ] = &nv40_therm_oclass;
device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv1a_devinit_oclass;
device->oclass[NVDEV_SUBDEV_MC ] = &nv44_mc_oclass;
+ device->oclass[NVDEV_SUBDEV_BUS ] = &nv31_bus_oclass;
device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
device->oclass[NVDEV_SUBDEV_FB ] = &nv46_fb_oclass;
device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv40_instmem_oclass;
@@ -250,11 +262,12 @@ nv40_identify(struct nouveau_device *device)
device->cname = "NV44A";
device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass;
device->oclass[NVDEV_SUBDEV_GPIO ] = &nv10_gpio_oclass;
- device->oclass[NVDEV_SUBDEV_I2C ] = &nouveau_i2c_oclass;
+ device->oclass[NVDEV_SUBDEV_I2C ] = &nv04_i2c_oclass;
device->oclass[NVDEV_SUBDEV_CLOCK ] = &nv40_clock_oclass;
device->oclass[NVDEV_SUBDEV_THERM ] = &nv40_therm_oclass;
device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv1a_devinit_oclass;
device->oclass[NVDEV_SUBDEV_MC ] = &nv44_mc_oclass;
+ device->oclass[NVDEV_SUBDEV_BUS ] = &nv31_bus_oclass;
device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
device->oclass[NVDEV_SUBDEV_FB ] = &nv44_fb_oclass;
device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv40_instmem_oclass;
@@ -270,11 +283,12 @@ nv40_identify(struct nouveau_device *device)
device->cname = "C61";
device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass;
device->oclass[NVDEV_SUBDEV_GPIO ] = &nv10_gpio_oclass;
- device->oclass[NVDEV_SUBDEV_I2C ] = &nouveau_i2c_oclass;
+ device->oclass[NVDEV_SUBDEV_I2C ] = &nv04_i2c_oclass;
device->oclass[NVDEV_SUBDEV_CLOCK ] = &nv40_clock_oclass;
device->oclass[NVDEV_SUBDEV_THERM ] = &nv40_therm_oclass;
device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv1a_devinit_oclass;
device->oclass[NVDEV_SUBDEV_MC ] = &nv44_mc_oclass;
+ device->oclass[NVDEV_SUBDEV_BUS ] = &nv31_bus_oclass;
device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
device->oclass[NVDEV_SUBDEV_FB ] = &nv46_fb_oclass;
device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv40_instmem_oclass;
@@ -290,11 +304,12 @@ nv40_identify(struct nouveau_device *device)
device->cname = "C51";
device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass;
device->oclass[NVDEV_SUBDEV_GPIO ] = &nv10_gpio_oclass;
- device->oclass[NVDEV_SUBDEV_I2C ] = &nouveau_i2c_oclass;
+ device->oclass[NVDEV_SUBDEV_I2C ] = &nv4e_i2c_oclass;
device->oclass[NVDEV_SUBDEV_CLOCK ] = &nv40_clock_oclass;
device->oclass[NVDEV_SUBDEV_THERM ] = &nv40_therm_oclass;
device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv1a_devinit_oclass;
device->oclass[NVDEV_SUBDEV_MC ] = &nv44_mc_oclass;
+ device->oclass[NVDEV_SUBDEV_BUS ] = &nv31_bus_oclass;
device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
device->oclass[NVDEV_SUBDEV_FB ] = &nv4e_fb_oclass;
device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv40_instmem_oclass;
@@ -310,11 +325,12 @@ nv40_identify(struct nouveau_device *device)
device->cname = "C73";
device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass;
device->oclass[NVDEV_SUBDEV_GPIO ] = &nv10_gpio_oclass;
- device->oclass[NVDEV_SUBDEV_I2C ] = &nouveau_i2c_oclass;
+ device->oclass[NVDEV_SUBDEV_I2C ] = &nv04_i2c_oclass;
device->oclass[NVDEV_SUBDEV_CLOCK ] = &nv40_clock_oclass;
device->oclass[NVDEV_SUBDEV_THERM ] = &nv40_therm_oclass;
device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv1a_devinit_oclass;
device->oclass[NVDEV_SUBDEV_MC ] = &nv44_mc_oclass;
+ device->oclass[NVDEV_SUBDEV_BUS ] = &nv31_bus_oclass;
device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
device->oclass[NVDEV_SUBDEV_FB ] = &nv46_fb_oclass;
device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv40_instmem_oclass;
@@ -330,11 +346,12 @@ nv40_identify(struct nouveau_device *device)
device->cname = "C67";
device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass;
device->oclass[NVDEV_SUBDEV_GPIO ] = &nv10_gpio_oclass;
- device->oclass[NVDEV_SUBDEV_I2C ] = &nouveau_i2c_oclass;
+ device->oclass[NVDEV_SUBDEV_I2C ] = &nv04_i2c_oclass;
device->oclass[NVDEV_SUBDEV_CLOCK ] = &nv40_clock_oclass;
device->oclass[NVDEV_SUBDEV_THERM ] = &nv40_therm_oclass;
device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv1a_devinit_oclass;
device->oclass[NVDEV_SUBDEV_MC ] = &nv44_mc_oclass;
+ device->oclass[NVDEV_SUBDEV_BUS ] = &nv31_bus_oclass;
device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
device->oclass[NVDEV_SUBDEV_FB ] = &nv46_fb_oclass;
device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv40_instmem_oclass;
@@ -350,11 +367,12 @@ nv40_identify(struct nouveau_device *device)
device->cname = "C68";
device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass;
device->oclass[NVDEV_SUBDEV_GPIO ] = &nv10_gpio_oclass;
- device->oclass[NVDEV_SUBDEV_I2C ] = &nouveau_i2c_oclass;
+ device->oclass[NVDEV_SUBDEV_I2C ] = &nv04_i2c_oclass;
device->oclass[NVDEV_SUBDEV_CLOCK ] = &nv40_clock_oclass;
device->oclass[NVDEV_SUBDEV_THERM ] = &nv40_therm_oclass;
device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv1a_devinit_oclass;
device->oclass[NVDEV_SUBDEV_MC ] = &nv44_mc_oclass;
+ device->oclass[NVDEV_SUBDEV_BUS ] = &nv31_bus_oclass;
device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
device->oclass[NVDEV_SUBDEV_FB ] = &nv46_fb_oclass;
device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv40_instmem_oclass;
diff --git a/drivers/gpu/drm/nouveau/core/subdev/device/nv50.c b/drivers/gpu/drm/nouveau/core/subdev/device/nv50.c
index 6ccfd8585ba2..5ed2fa51ddc2 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/device/nv50.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/device/nv50.c
@@ -24,6 +24,7 @@
#include <subdev/device.h>
#include <subdev/bios.h>
+#include <subdev/bus.h>
#include <subdev/gpio.h>
#include <subdev/i2c.h>
#include <subdev/clock.h>
@@ -57,12 +58,13 @@ nv50_identify(struct nouveau_device *device)
device->cname = "G80";
device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass;
device->oclass[NVDEV_SUBDEV_GPIO ] = &nv50_gpio_oclass;
- device->oclass[NVDEV_SUBDEV_I2C ] = &nouveau_i2c_oclass;
+ device->oclass[NVDEV_SUBDEV_I2C ] = &nv50_i2c_oclass;
device->oclass[NVDEV_SUBDEV_CLOCK ] = &nv50_clock_oclass;
device->oclass[NVDEV_SUBDEV_THERM ] = &nv50_therm_oclass;
device->oclass[NVDEV_SUBDEV_MXM ] = &nv50_mxm_oclass;
device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv50_devinit_oclass;
device->oclass[NVDEV_SUBDEV_MC ] = &nv50_mc_oclass;
+ device->oclass[NVDEV_SUBDEV_BUS ] = &nv50_bus_oclass;
device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
device->oclass[NVDEV_SUBDEV_FB ] = &nv50_fb_oclass;
device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv50_instmem_oclass;
@@ -79,12 +81,13 @@ nv50_identify(struct nouveau_device *device)
device->cname = "G84";
device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass;
device->oclass[NVDEV_SUBDEV_GPIO ] = &nv50_gpio_oclass;
- device->oclass[NVDEV_SUBDEV_I2C ] = &nouveau_i2c_oclass;
+ device->oclass[NVDEV_SUBDEV_I2C ] = &nv50_i2c_oclass;
device->oclass[NVDEV_SUBDEV_CLOCK ] = &nv50_clock_oclass;
device->oclass[NVDEV_SUBDEV_THERM ] = &nv50_therm_oclass;
device->oclass[NVDEV_SUBDEV_MXM ] = &nv50_mxm_oclass;
device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv50_devinit_oclass;
device->oclass[NVDEV_SUBDEV_MC ] = &nv50_mc_oclass;
+ device->oclass[NVDEV_SUBDEV_BUS ] = &nv50_bus_oclass;
device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
device->oclass[NVDEV_SUBDEV_FB ] = &nv50_fb_oclass;
device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv50_instmem_oclass;
@@ -104,12 +107,13 @@ nv50_identify(struct nouveau_device *device)
device->cname = "G86";
device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass;
device->oclass[NVDEV_SUBDEV_GPIO ] = &nv50_gpio_oclass;
- device->oclass[NVDEV_SUBDEV_I2C ] = &nouveau_i2c_oclass;
+ device->oclass[NVDEV_SUBDEV_I2C ] = &nv50_i2c_oclass;
device->oclass[NVDEV_SUBDEV_CLOCK ] = &nv50_clock_oclass;
device->oclass[NVDEV_SUBDEV_THERM ] = &nv50_therm_oclass;
device->oclass[NVDEV_SUBDEV_MXM ] = &nv50_mxm_oclass;
device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv50_devinit_oclass;
device->oclass[NVDEV_SUBDEV_MC ] = &nv50_mc_oclass;
+ device->oclass[NVDEV_SUBDEV_BUS ] = &nv50_bus_oclass;
device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
device->oclass[NVDEV_SUBDEV_FB ] = &nv50_fb_oclass;
device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv50_instmem_oclass;
@@ -129,12 +133,13 @@ nv50_identify(struct nouveau_device *device)
device->cname = "G92";
device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass;
device->oclass[NVDEV_SUBDEV_GPIO ] = &nv50_gpio_oclass;
- device->oclass[NVDEV_SUBDEV_I2C ] = &nouveau_i2c_oclass;
+ device->oclass[NVDEV_SUBDEV_I2C ] = &nv50_i2c_oclass;
device->oclass[NVDEV_SUBDEV_CLOCK ] = &nv50_clock_oclass;
device->oclass[NVDEV_SUBDEV_THERM ] = &nv50_therm_oclass;
device->oclass[NVDEV_SUBDEV_MXM ] = &nv50_mxm_oclass;
device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv50_devinit_oclass;
device->oclass[NVDEV_SUBDEV_MC ] = &nv50_mc_oclass;
+ device->oclass[NVDEV_SUBDEV_BUS ] = &nv50_bus_oclass;
device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
device->oclass[NVDEV_SUBDEV_FB ] = &nv50_fb_oclass;
device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv50_instmem_oclass;
@@ -154,12 +159,13 @@ nv50_identify(struct nouveau_device *device)
device->cname = "G94";
device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass;
device->oclass[NVDEV_SUBDEV_GPIO ] = &nv50_gpio_oclass;
- device->oclass[NVDEV_SUBDEV_I2C ] = &nouveau_i2c_oclass;
+ device->oclass[NVDEV_SUBDEV_I2C ] = &nv94_i2c_oclass;
device->oclass[NVDEV_SUBDEV_CLOCK ] = &nv50_clock_oclass;
device->oclass[NVDEV_SUBDEV_THERM ] = &nv50_therm_oclass;
device->oclass[NVDEV_SUBDEV_MXM ] = &nv50_mxm_oclass;
device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv50_devinit_oclass;
device->oclass[NVDEV_SUBDEV_MC ] = &nv50_mc_oclass;
+ device->oclass[NVDEV_SUBDEV_BUS ] = &nv50_bus_oclass;
device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
device->oclass[NVDEV_SUBDEV_FB ] = &nv50_fb_oclass;
device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv50_instmem_oclass;
@@ -179,12 +185,13 @@ nv50_identify(struct nouveau_device *device)
device->cname = "G96";
device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass;
device->oclass[NVDEV_SUBDEV_GPIO ] = &nv50_gpio_oclass;
- device->oclass[NVDEV_SUBDEV_I2C ] = &nouveau_i2c_oclass;
+ device->oclass[NVDEV_SUBDEV_I2C ] = &nv94_i2c_oclass;
device->oclass[NVDEV_SUBDEV_CLOCK ] = &nv50_clock_oclass;
device->oclass[NVDEV_SUBDEV_THERM ] = &nv50_therm_oclass;
device->oclass[NVDEV_SUBDEV_MXM ] = &nv50_mxm_oclass;
device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv50_devinit_oclass;
device->oclass[NVDEV_SUBDEV_MC ] = &nv50_mc_oclass;
+ device->oclass[NVDEV_SUBDEV_BUS ] = &nv50_bus_oclass;
device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
device->oclass[NVDEV_SUBDEV_FB ] = &nv50_fb_oclass;
device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv50_instmem_oclass;
@@ -204,12 +211,13 @@ nv50_identify(struct nouveau_device *device)
device->cname = "G98";
device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass;
device->oclass[NVDEV_SUBDEV_GPIO ] = &nv50_gpio_oclass;
- device->oclass[NVDEV_SUBDEV_I2C ] = &nouveau_i2c_oclass;
+ device->oclass[NVDEV_SUBDEV_I2C ] = &nv94_i2c_oclass;
device->oclass[NVDEV_SUBDEV_CLOCK ] = &nv50_clock_oclass;
device->oclass[NVDEV_SUBDEV_THERM ] = &nv50_therm_oclass;
device->oclass[NVDEV_SUBDEV_MXM ] = &nv50_mxm_oclass;
device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv50_devinit_oclass;
device->oclass[NVDEV_SUBDEV_MC ] = &nv98_mc_oclass;
+ device->oclass[NVDEV_SUBDEV_BUS ] = &nv50_bus_oclass;
device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
device->oclass[NVDEV_SUBDEV_FB ] = &nv50_fb_oclass;
device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv50_instmem_oclass;
@@ -229,12 +237,13 @@ nv50_identify(struct nouveau_device *device)
device->cname = "G200";
device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass;
device->oclass[NVDEV_SUBDEV_GPIO ] = &nv50_gpio_oclass;
- device->oclass[NVDEV_SUBDEV_I2C ] = &nouveau_i2c_oclass;
+ device->oclass[NVDEV_SUBDEV_I2C ] = &nv50_i2c_oclass;
device->oclass[NVDEV_SUBDEV_CLOCK ] = &nv50_clock_oclass;
device->oclass[NVDEV_SUBDEV_THERM ] = &nv50_therm_oclass;
device->oclass[NVDEV_SUBDEV_MXM ] = &nv50_mxm_oclass;
device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv50_devinit_oclass;
device->oclass[NVDEV_SUBDEV_MC ] = &nv98_mc_oclass;
+ device->oclass[NVDEV_SUBDEV_BUS ] = &nv50_bus_oclass;
device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
device->oclass[NVDEV_SUBDEV_FB ] = &nv50_fb_oclass;
device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv50_instmem_oclass;
@@ -254,12 +263,13 @@ nv50_identify(struct nouveau_device *device)
device->cname = "MCP77/MCP78";
device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass;
device->oclass[NVDEV_SUBDEV_GPIO ] = &nv50_gpio_oclass;
- device->oclass[NVDEV_SUBDEV_I2C ] = &nouveau_i2c_oclass;
+ device->oclass[NVDEV_SUBDEV_I2C ] = &nv94_i2c_oclass;
device->oclass[NVDEV_SUBDEV_CLOCK ] = &nv50_clock_oclass;
device->oclass[NVDEV_SUBDEV_THERM ] = &nv50_therm_oclass;
device->oclass[NVDEV_SUBDEV_MXM ] = &nv50_mxm_oclass;
device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv50_devinit_oclass;
device->oclass[NVDEV_SUBDEV_MC ] = &nv98_mc_oclass;
+ device->oclass[NVDEV_SUBDEV_BUS ] = &nv50_bus_oclass;
device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
device->oclass[NVDEV_SUBDEV_FB ] = &nv50_fb_oclass;
device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv50_instmem_oclass;
@@ -279,12 +289,13 @@ nv50_identify(struct nouveau_device *device)
device->cname = "MCP79/MCP7A";
device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass;
device->oclass[NVDEV_SUBDEV_GPIO ] = &nv50_gpio_oclass;
- device->oclass[NVDEV_SUBDEV_I2C ] = &nouveau_i2c_oclass;
+ device->oclass[NVDEV_SUBDEV_I2C ] = &nv94_i2c_oclass;
device->oclass[NVDEV_SUBDEV_CLOCK ] = &nv50_clock_oclass;
device->oclass[NVDEV_SUBDEV_THERM ] = &nv50_therm_oclass;
device->oclass[NVDEV_SUBDEV_MXM ] = &nv50_mxm_oclass;
device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv50_devinit_oclass;
device->oclass[NVDEV_SUBDEV_MC ] = &nv98_mc_oclass;
+ device->oclass[NVDEV_SUBDEV_BUS ] = &nv50_bus_oclass;
device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
device->oclass[NVDEV_SUBDEV_FB ] = &nv50_fb_oclass;
device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv50_instmem_oclass;
@@ -304,12 +315,13 @@ nv50_identify(struct nouveau_device *device)
device->cname = "GT215";
device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass;
device->oclass[NVDEV_SUBDEV_GPIO ] = &nv50_gpio_oclass;
- device->oclass[NVDEV_SUBDEV_I2C ] = &nouveau_i2c_oclass;
+ device->oclass[NVDEV_SUBDEV_I2C ] = &nv94_i2c_oclass;
device->oclass[NVDEV_SUBDEV_CLOCK ] = &nva3_clock_oclass;
- device->oclass[NVDEV_SUBDEV_THERM ] = &nv50_therm_oclass;
+ device->oclass[NVDEV_SUBDEV_THERM ] = &nva3_therm_oclass;
device->oclass[NVDEV_SUBDEV_MXM ] = &nv50_mxm_oclass;
device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv50_devinit_oclass;
device->oclass[NVDEV_SUBDEV_MC ] = &nv98_mc_oclass;
+ device->oclass[NVDEV_SUBDEV_BUS ] = &nv50_bus_oclass;
device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
device->oclass[NVDEV_SUBDEV_FB ] = &nv50_fb_oclass;
device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv50_instmem_oclass;
@@ -330,12 +342,13 @@ nv50_identify(struct nouveau_device *device)
device->cname = "GT216";
device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass;
device->oclass[NVDEV_SUBDEV_GPIO ] = &nv50_gpio_oclass;
- device->oclass[NVDEV_SUBDEV_I2C ] = &nouveau_i2c_oclass;
+ device->oclass[NVDEV_SUBDEV_I2C ] = &nv94_i2c_oclass;
device->oclass[NVDEV_SUBDEV_CLOCK ] = &nva3_clock_oclass;
- device->oclass[NVDEV_SUBDEV_THERM ] = &nv50_therm_oclass;
+ device->oclass[NVDEV_SUBDEV_THERM ] = &nva3_therm_oclass;
device->oclass[NVDEV_SUBDEV_MXM ] = &nv50_mxm_oclass;
device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv50_devinit_oclass;
device->oclass[NVDEV_SUBDEV_MC ] = &nv98_mc_oclass;
+ device->oclass[NVDEV_SUBDEV_BUS ] = &nv50_bus_oclass;
device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
device->oclass[NVDEV_SUBDEV_FB ] = &nv50_fb_oclass;
device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv50_instmem_oclass;
@@ -355,12 +368,13 @@ nv50_identify(struct nouveau_device *device)
device->cname = "GT218";
device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass;
device->oclass[NVDEV_SUBDEV_GPIO ] = &nv50_gpio_oclass;
- device->oclass[NVDEV_SUBDEV_I2C ] = &nouveau_i2c_oclass;
+ device->oclass[NVDEV_SUBDEV_I2C ] = &nv94_i2c_oclass;
device->oclass[NVDEV_SUBDEV_CLOCK ] = &nva3_clock_oclass;
- device->oclass[NVDEV_SUBDEV_THERM ] = &nv50_therm_oclass;
+ device->oclass[NVDEV_SUBDEV_THERM ] = &nva3_therm_oclass;
device->oclass[NVDEV_SUBDEV_MXM ] = &nv50_mxm_oclass;
device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv50_devinit_oclass;
device->oclass[NVDEV_SUBDEV_MC ] = &nv98_mc_oclass;
+ device->oclass[NVDEV_SUBDEV_BUS ] = &nv50_bus_oclass;
device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
device->oclass[NVDEV_SUBDEV_FB ] = &nv50_fb_oclass;
device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv50_instmem_oclass;
@@ -380,12 +394,13 @@ nv50_identify(struct nouveau_device *device)
device->cname = "MCP89";
device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass;
device->oclass[NVDEV_SUBDEV_GPIO ] = &nv50_gpio_oclass;
- device->oclass[NVDEV_SUBDEV_I2C ] = &nouveau_i2c_oclass;
+ device->oclass[NVDEV_SUBDEV_I2C ] = &nv94_i2c_oclass;
device->oclass[NVDEV_SUBDEV_CLOCK ] = &nva3_clock_oclass;
- device->oclass[NVDEV_SUBDEV_THERM ] = &nv50_therm_oclass;
+ device->oclass[NVDEV_SUBDEV_THERM ] = &nva3_therm_oclass;
device->oclass[NVDEV_SUBDEV_MXM ] = &nv50_mxm_oclass;
device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv50_devinit_oclass;
device->oclass[NVDEV_SUBDEV_MC ] = &nv98_mc_oclass;
+ device->oclass[NVDEV_SUBDEV_BUS ] = &nv50_bus_oclass;
device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
device->oclass[NVDEV_SUBDEV_FB ] = &nv50_fb_oclass;
device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv50_instmem_oclass;
diff --git a/drivers/gpu/drm/nouveau/core/subdev/device/nvc0.c b/drivers/gpu/drm/nouveau/core/subdev/device/nvc0.c
index f0461685a422..4393eb4d6564 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/device/nvc0.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/device/nvc0.c
@@ -24,6 +24,7 @@
#include <subdev/device.h>
#include <subdev/bios.h>
+#include <subdev/bus.h>
#include <subdev/gpio.h>
#include <subdev/i2c.h>
#include <subdev/clock.h>
@@ -57,12 +58,13 @@ nvc0_identify(struct nouveau_device *device)
device->cname = "GF100";
device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass;
device->oclass[NVDEV_SUBDEV_GPIO ] = &nv50_gpio_oclass;
- device->oclass[NVDEV_SUBDEV_I2C ] = &nouveau_i2c_oclass;
+ device->oclass[NVDEV_SUBDEV_I2C ] = &nv94_i2c_oclass;
device->oclass[NVDEV_SUBDEV_CLOCK ] = &nvc0_clock_oclass;
- device->oclass[NVDEV_SUBDEV_THERM ] = &nv50_therm_oclass;
+ device->oclass[NVDEV_SUBDEV_THERM ] = &nva3_therm_oclass;
device->oclass[NVDEV_SUBDEV_MXM ] = &nv50_mxm_oclass;
device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv50_devinit_oclass;
device->oclass[NVDEV_SUBDEV_MC ] = &nvc0_mc_oclass;
+ device->oclass[NVDEV_SUBDEV_BUS ] = &nvc0_bus_oclass;
device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
device->oclass[NVDEV_SUBDEV_FB ] = &nvc0_fb_oclass;
device->oclass[NVDEV_SUBDEV_LTCG ] = &nvc0_ltcg_oclass;
@@ -85,12 +87,13 @@ nvc0_identify(struct nouveau_device *device)
device->cname = "GF104";
device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass;
device->oclass[NVDEV_SUBDEV_GPIO ] = &nv50_gpio_oclass;
- device->oclass[NVDEV_SUBDEV_I2C ] = &nouveau_i2c_oclass;
+ device->oclass[NVDEV_SUBDEV_I2C ] = &nv94_i2c_oclass;
device->oclass[NVDEV_SUBDEV_CLOCK ] = &nvc0_clock_oclass;
- device->oclass[NVDEV_SUBDEV_THERM ] = &nv50_therm_oclass;
+ device->oclass[NVDEV_SUBDEV_THERM ] = &nva3_therm_oclass;
device->oclass[NVDEV_SUBDEV_MXM ] = &nv50_mxm_oclass;
device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv50_devinit_oclass;
device->oclass[NVDEV_SUBDEV_MC ] = &nvc0_mc_oclass;
+ device->oclass[NVDEV_SUBDEV_BUS ] = &nvc0_bus_oclass;
device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
device->oclass[NVDEV_SUBDEV_FB ] = &nvc0_fb_oclass;
device->oclass[NVDEV_SUBDEV_LTCG ] = &nvc0_ltcg_oclass;
@@ -113,12 +116,13 @@ nvc0_identify(struct nouveau_device *device)
device->cname = "GF106";
device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass;
device->oclass[NVDEV_SUBDEV_GPIO ] = &nv50_gpio_oclass;
- device->oclass[NVDEV_SUBDEV_I2C ] = &nouveau_i2c_oclass;
+ device->oclass[NVDEV_SUBDEV_I2C ] = &nv94_i2c_oclass;
device->oclass[NVDEV_SUBDEV_CLOCK ] = &nvc0_clock_oclass;
- device->oclass[NVDEV_SUBDEV_THERM ] = &nv50_therm_oclass;
+ device->oclass[NVDEV_SUBDEV_THERM ] = &nva3_therm_oclass;
device->oclass[NVDEV_SUBDEV_MXM ] = &nv50_mxm_oclass;
device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv50_devinit_oclass;
device->oclass[NVDEV_SUBDEV_MC ] = &nvc0_mc_oclass;
+ device->oclass[NVDEV_SUBDEV_BUS ] = &nvc0_bus_oclass;
device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
device->oclass[NVDEV_SUBDEV_FB ] = &nvc0_fb_oclass;
device->oclass[NVDEV_SUBDEV_LTCG ] = &nvc0_ltcg_oclass;
@@ -141,12 +145,13 @@ nvc0_identify(struct nouveau_device *device)
device->cname = "GF114";
device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass;
device->oclass[NVDEV_SUBDEV_GPIO ] = &nv50_gpio_oclass;
- device->oclass[NVDEV_SUBDEV_I2C ] = &nouveau_i2c_oclass;
+ device->oclass[NVDEV_SUBDEV_I2C ] = &nv94_i2c_oclass;
device->oclass[NVDEV_SUBDEV_CLOCK ] = &nvc0_clock_oclass;
- device->oclass[NVDEV_SUBDEV_THERM ] = &nv50_therm_oclass;
+ device->oclass[NVDEV_SUBDEV_THERM ] = &nva3_therm_oclass;
device->oclass[NVDEV_SUBDEV_MXM ] = &nv50_mxm_oclass;
device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv50_devinit_oclass;
device->oclass[NVDEV_SUBDEV_MC ] = &nvc0_mc_oclass;
+ device->oclass[NVDEV_SUBDEV_BUS ] = &nvc0_bus_oclass;
device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
device->oclass[NVDEV_SUBDEV_FB ] = &nvc0_fb_oclass;
device->oclass[NVDEV_SUBDEV_LTCG ] = &nvc0_ltcg_oclass;
@@ -169,12 +174,13 @@ nvc0_identify(struct nouveau_device *device)
device->cname = "GF116";
device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass;
device->oclass[NVDEV_SUBDEV_GPIO ] = &nv50_gpio_oclass;
- device->oclass[NVDEV_SUBDEV_I2C ] = &nouveau_i2c_oclass;
+ device->oclass[NVDEV_SUBDEV_I2C ] = &nv94_i2c_oclass;
device->oclass[NVDEV_SUBDEV_CLOCK ] = &nvc0_clock_oclass;
- device->oclass[NVDEV_SUBDEV_THERM ] = &nv50_therm_oclass;
+ device->oclass[NVDEV_SUBDEV_THERM ] = &nva3_therm_oclass;
device->oclass[NVDEV_SUBDEV_MXM ] = &nv50_mxm_oclass;
device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv50_devinit_oclass;
device->oclass[NVDEV_SUBDEV_MC ] = &nvc0_mc_oclass;
+ device->oclass[NVDEV_SUBDEV_BUS ] = &nvc0_bus_oclass;
device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
device->oclass[NVDEV_SUBDEV_FB ] = &nvc0_fb_oclass;
device->oclass[NVDEV_SUBDEV_LTCG ] = &nvc0_ltcg_oclass;
@@ -197,12 +203,13 @@ nvc0_identify(struct nouveau_device *device)
device->cname = "GF108";
device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass;
device->oclass[NVDEV_SUBDEV_GPIO ] = &nv50_gpio_oclass;
- device->oclass[NVDEV_SUBDEV_I2C ] = &nouveau_i2c_oclass;
+ device->oclass[NVDEV_SUBDEV_I2C ] = &nv94_i2c_oclass;
device->oclass[NVDEV_SUBDEV_CLOCK ] = &nvc0_clock_oclass;
- device->oclass[NVDEV_SUBDEV_THERM ] = &nv50_therm_oclass;
+ device->oclass[NVDEV_SUBDEV_THERM ] = &nva3_therm_oclass;
device->oclass[NVDEV_SUBDEV_MXM ] = &nv50_mxm_oclass;
device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv50_devinit_oclass;
device->oclass[NVDEV_SUBDEV_MC ] = &nvc0_mc_oclass;
+ device->oclass[NVDEV_SUBDEV_BUS ] = &nvc0_bus_oclass;
device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
device->oclass[NVDEV_SUBDEV_FB ] = &nvc0_fb_oclass;
device->oclass[NVDEV_SUBDEV_LTCG ] = &nvc0_ltcg_oclass;
@@ -225,12 +232,13 @@ nvc0_identify(struct nouveau_device *device)
device->cname = "GF110";
device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass;
device->oclass[NVDEV_SUBDEV_GPIO ] = &nv50_gpio_oclass;
- device->oclass[NVDEV_SUBDEV_I2C ] = &nouveau_i2c_oclass;
+ device->oclass[NVDEV_SUBDEV_I2C ] = &nv94_i2c_oclass;
device->oclass[NVDEV_SUBDEV_CLOCK ] = &nvc0_clock_oclass;
- device->oclass[NVDEV_SUBDEV_THERM ] = &nv50_therm_oclass;
+ device->oclass[NVDEV_SUBDEV_THERM ] = &nva3_therm_oclass;
device->oclass[NVDEV_SUBDEV_MXM ] = &nv50_mxm_oclass;
device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv50_devinit_oclass;
device->oclass[NVDEV_SUBDEV_MC ] = &nvc0_mc_oclass;
+ device->oclass[NVDEV_SUBDEV_BUS ] = &nvc0_bus_oclass;
device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
device->oclass[NVDEV_SUBDEV_FB ] = &nvc0_fb_oclass;
device->oclass[NVDEV_SUBDEV_LTCG ] = &nvc0_ltcg_oclass;
@@ -253,12 +261,13 @@ nvc0_identify(struct nouveau_device *device)
device->cname = "GF119";
device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass;
device->oclass[NVDEV_SUBDEV_GPIO ] = &nvd0_gpio_oclass;
- device->oclass[NVDEV_SUBDEV_I2C ] = &nouveau_i2c_oclass;
+ device->oclass[NVDEV_SUBDEV_I2C ] = &nvd0_i2c_oclass;
device->oclass[NVDEV_SUBDEV_CLOCK ] = &nvc0_clock_oclass;
- device->oclass[NVDEV_SUBDEV_THERM ] = &nv50_therm_oclass;
+ device->oclass[NVDEV_SUBDEV_THERM ] = &nvd0_therm_oclass;
device->oclass[NVDEV_SUBDEV_MXM ] = &nv50_mxm_oclass;
device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv50_devinit_oclass;
device->oclass[NVDEV_SUBDEV_MC ] = &nvc0_mc_oclass;
+ device->oclass[NVDEV_SUBDEV_BUS ] = &nvc0_bus_oclass;
device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
device->oclass[NVDEV_SUBDEV_FB ] = &nvc0_fb_oclass;
device->oclass[NVDEV_SUBDEV_LTCG ] = &nvc0_ltcg_oclass;
@@ -282,4 +291,4 @@ nvc0_identify(struct nouveau_device *device)
}
return 0;
-}
+ }
diff --git a/drivers/gpu/drm/nouveau/core/subdev/device/nve0.c b/drivers/gpu/drm/nouveau/core/subdev/device/nve0.c
index 03a652876e73..5c12391619fd 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/device/nve0.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/device/nve0.c
@@ -24,6 +24,7 @@
#include <subdev/device.h>
#include <subdev/bios.h>
+#include <subdev/bus.h>
#include <subdev/gpio.h>
#include <subdev/i2c.h>
#include <subdev/clock.h>
@@ -56,13 +57,14 @@ nve0_identify(struct nouveau_device *device)
case 0xe4:
device->cname = "GK104";
device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass;
- device->oclass[NVDEV_SUBDEV_GPIO ] = &nvd0_gpio_oclass;
- device->oclass[NVDEV_SUBDEV_I2C ] = &nouveau_i2c_oclass;
+ device->oclass[NVDEV_SUBDEV_GPIO ] = &nve0_gpio_oclass;
+ device->oclass[NVDEV_SUBDEV_I2C ] = &nvd0_i2c_oclass;
device->oclass[NVDEV_SUBDEV_CLOCK ] = &nvc0_clock_oclass;
- device->oclass[NVDEV_SUBDEV_THERM ] = &nv50_therm_oclass;
+ device->oclass[NVDEV_SUBDEV_THERM ] = &nvd0_therm_oclass;
device->oclass[NVDEV_SUBDEV_MXM ] = &nv50_mxm_oclass;
device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv50_devinit_oclass;
device->oclass[NVDEV_SUBDEV_MC ] = &nvc0_mc_oclass;
+ device->oclass[NVDEV_SUBDEV_BUS ] = &nvc0_bus_oclass;
device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
device->oclass[NVDEV_SUBDEV_FB ] = &nvc0_fb_oclass;
device->oclass[NVDEV_SUBDEV_LTCG ] = &nvc0_ltcg_oclass;
@@ -84,13 +86,14 @@ nve0_identify(struct nouveau_device *device)
case 0xe7:
device->cname = "GK107";
device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass;
- device->oclass[NVDEV_SUBDEV_GPIO ] = &nvd0_gpio_oclass;
- device->oclass[NVDEV_SUBDEV_I2C ] = &nouveau_i2c_oclass;
+ device->oclass[NVDEV_SUBDEV_GPIO ] = &nve0_gpio_oclass;
+ device->oclass[NVDEV_SUBDEV_I2C ] = &nvd0_i2c_oclass;
device->oclass[NVDEV_SUBDEV_CLOCK ] = &nvc0_clock_oclass;
- device->oclass[NVDEV_SUBDEV_THERM ] = &nv50_therm_oclass;
+ device->oclass[NVDEV_SUBDEV_THERM ] = &nvd0_therm_oclass;
device->oclass[NVDEV_SUBDEV_MXM ] = &nv50_mxm_oclass;
device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv50_devinit_oclass;
device->oclass[NVDEV_SUBDEV_MC ] = &nvc0_mc_oclass;
+ device->oclass[NVDEV_SUBDEV_BUS ] = &nvc0_bus_oclass;
device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
device->oclass[NVDEV_SUBDEV_FB ] = &nvc0_fb_oclass;
device->oclass[NVDEV_SUBDEV_LTCG ] = &nvc0_ltcg_oclass;
@@ -112,13 +115,14 @@ nve0_identify(struct nouveau_device *device)
case 0xe6:
device->cname = "GK106";
device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass;
- device->oclass[NVDEV_SUBDEV_GPIO ] = &nvd0_gpio_oclass;
- device->oclass[NVDEV_SUBDEV_I2C ] = &nouveau_i2c_oclass;
+ device->oclass[NVDEV_SUBDEV_GPIO ] = &nve0_gpio_oclass;
+ device->oclass[NVDEV_SUBDEV_I2C ] = &nvd0_i2c_oclass;
device->oclass[NVDEV_SUBDEV_CLOCK ] = &nvc0_clock_oclass;
- device->oclass[NVDEV_SUBDEV_THERM ] = &nv50_therm_oclass;
+ device->oclass[NVDEV_SUBDEV_THERM ] = &nvd0_therm_oclass;
device->oclass[NVDEV_SUBDEV_MXM ] = &nv50_mxm_oclass;
device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv50_devinit_oclass;
device->oclass[NVDEV_SUBDEV_MC ] = &nvc0_mc_oclass;
+ device->oclass[NVDEV_SUBDEV_BUS ] = &nvc0_bus_oclass;
device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
device->oclass[NVDEV_SUBDEV_FB ] = &nvc0_fb_oclass;
device->oclass[NVDEV_SUBDEV_LTCG ] = &nvc0_ltcg_oclass;
diff --git a/drivers/gpu/drm/nouveau/core/subdev/devinit/nv50.c b/drivers/gpu/drm/nouveau/core/subdev/devinit/nv50.c
index ae7249b09797..4a8577838417 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/devinit/nv50.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/devinit/nv50.c
@@ -78,12 +78,13 @@ nv50_devinit_init(struct nouveau_object *object)
if (ret)
return ret;
- /* if we ran the init tables, execute first script pointer for each
- * display table output entry that has a matching dcb entry.
+ /* if we ran the init tables, we have to execute the first script
+ * pointer of each dcb entry's display encoder table in order
+ * to properly initialise each encoder.
*/
- while (priv->base.post && ver) {
- u16 data = nvbios_outp_parse(bios, i++, &ver, &hdr, &cnt, &len, &info);
- if (data && dcb_outp_match(bios, info.type, info.mask, &ver, &len, &outp)) {
+ while (priv->base.post && dcb_outp_parse(bios, i, &ver, &hdr, &outp)) {
+ if (nvbios_outp_match(bios, outp.hasht, outp.hashm,
+ &ver, &hdr, &cnt, &len, &info)) {
struct nvbios_init init = {
.subdev = nv_subdev(priv),
.bios = bios,
@@ -95,7 +96,8 @@ nv50_devinit_init(struct nouveau_object *object)
nvbios_exec(&init);
}
- };
+ i++;
+ }
return 0;
}
diff --git a/drivers/gpu/drm/nouveau/core/subdev/fb/nv50.c b/drivers/gpu/drm/nouveau/core/subdev/fb/nv50.c
index eac236ed19b2..0772ec978165 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/fb/nv50.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/fb/nv50.c
@@ -22,8 +22,10 @@
* Authors: Ben Skeggs
*/
-#include <core/object.h>
+#include <core/client.h>
#include <core/enum.h>
+#include <core/engctx.h>
+#include <core/object.h>
#include <subdev/fb.h>
#include <subdev/bios.h>
@@ -303,17 +305,18 @@ static const struct nouveau_enum vm_client[] = {
};
static const struct nouveau_enum vm_engine[] = {
- { 0x00000000, "PGRAPH", NULL },
- { 0x00000001, "PVP", NULL },
+ { 0x00000000, "PGRAPH", NULL, NVDEV_ENGINE_GR },
+ { 0x00000001, "PVP", NULL, NVDEV_ENGINE_VP },
{ 0x00000004, "PEEPHOLE", NULL },
- { 0x00000005, "PFIFO", vm_pfifo_subclients },
+ { 0x00000005, "PFIFO", vm_pfifo_subclients, NVDEV_ENGINE_FIFO },
{ 0x00000006, "BAR", vm_bar_subclients },
- { 0x00000008, "PPPP", NULL },
- { 0x00000009, "PBSP", NULL },
- { 0x0000000a, "PCRYPT", NULL },
+ { 0x00000008, "PPPP", NULL, NVDEV_ENGINE_PPP },
+ { 0x00000008, "PMPEG", NULL, NVDEV_ENGINE_MPEG },
+ { 0x00000009, "PBSP", NULL, NVDEV_ENGINE_BSP },
+ { 0x0000000a, "PCRYPT", NULL, NVDEV_ENGINE_CRYPT },
{ 0x0000000b, "PCOUNTER", NULL },
{ 0x0000000c, "SEMAPHORE_BG", NULL },
- { 0x0000000d, "PCOPY", NULL },
+ { 0x0000000d, "PCOPY", NULL, NVDEV_ENGINE_COPY0 },
{ 0x0000000e, "PDAEMON", NULL },
{}
};
@@ -335,8 +338,10 @@ static void
nv50_fb_intr(struct nouveau_subdev *subdev)
{
struct nouveau_device *device = nv_device(subdev);
+ struct nouveau_engine *engine;
struct nv50_fb_priv *priv = (void *)subdev;
const struct nouveau_enum *en, *cl;
+ struct nouveau_object *engctx = NULL;
u32 trap[6], idx, chan;
u8 st0, st1, st2, st3;
int i;
@@ -367,36 +372,55 @@ nv50_fb_intr(struct nouveau_subdev *subdev)
}
chan = (trap[2] << 16) | trap[1];
- nv_error(priv, "trapped %s at 0x%02x%04x%04x on channel 0x%08x ",
+ en = nouveau_enum_find(vm_engine, st0);
+
+ if (en && en->data2) {
+ const struct nouveau_enum *orig_en = en;
+ while (en->name && en->value == st0 && en->data2) {
+ engine = nouveau_engine(subdev, en->data2);
+ if (engine) {
+ engctx = nouveau_engctx_get(engine, chan);
+ if (engctx)
+ break;
+ }
+ en++;
+ }
+ if (!engctx)
+ en = orig_en;
+ }
+
+ nv_error(priv, "trapped %s at 0x%02x%04x%04x on channel 0x%08x [%s] ",
(trap[5] & 0x00000100) ? "read" : "write",
- trap[5] & 0xff, trap[4] & 0xffff, trap[3] & 0xffff, chan);
+ trap[5] & 0xff, trap[4] & 0xffff, trap[3] & 0xffff, chan,
+ nouveau_client_name(engctx));
+
+ nouveau_engctx_put(engctx);
- en = nouveau_enum_find(vm_engine, st0);
if (en)
- printk("%s/", en->name);
+ pr_cont("%s/", en->name);
else
- printk("%02x/", st0);
+ pr_cont("%02x/", st0);
cl = nouveau_enum_find(vm_client, st2);
if (cl)
- printk("%s/", cl->name);
+ pr_cont("%s/", cl->name);
else
- printk("%02x/", st2);
+ pr_cont("%02x/", st2);
if (cl && cl->data) cl = nouveau_enum_find(cl->data, st3);
else if (en && en->data) cl = nouveau_enum_find(en->data, st3);
else cl = NULL;
if (cl)
- printk("%s", cl->name);
+ pr_cont("%s", cl->name);
else
- printk("%02x", st3);
+ pr_cont("%02x", st3);
- printk(" reason: ");
+ pr_cont(" reason: ");
en = nouveau_enum_find(vm_fault, st1);
if (en)
- printk("%s\n", en->name);
+ pr_cont("%s\n", en->name);
else
- printk("0x%08x\n", st1);
+ pr_cont("0x%08x\n", st1);
}
static int
diff --git a/drivers/gpu/drm/nouveau/core/subdev/gpio/base.c b/drivers/gpu/drm/nouveau/core/subdev/gpio/base.c
index 9fb0f9b92d49..d422acc9af15 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/gpio/base.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/gpio/base.c
@@ -102,135 +102,19 @@ nouveau_gpio_get(struct nouveau_gpio *gpio, int idx, u8 tag, u8 line)
return ret;
}
-static int
-nouveau_gpio_irq(struct nouveau_gpio *gpio, int idx, u8 tag, u8 line, bool on)
-{
- struct dcb_gpio_func func;
- int ret;
-
- ret = nouveau_gpio_find(gpio, idx, tag, line, &func);
- if (ret == 0) {
- if (idx == 0 && gpio->irq_enable)
- gpio->irq_enable(gpio, func.line, on);
- else
- ret = -ENODEV;
- }
-
- return ret;
-}
-
-struct gpio_isr {
- struct nouveau_gpio *gpio;
- struct list_head head;
- struct work_struct work;
- int idx;
- struct dcb_gpio_func func;
- void (*handler)(void *, int);
- void *data;
- bool inhibit;
-};
-
-static void
-nouveau_gpio_isr_bh(struct work_struct *work)
-{
- struct gpio_isr *isr = container_of(work, struct gpio_isr, work);
- struct nouveau_gpio *gpio = isr->gpio;
- unsigned long flags;
- int state;
-
- state = nouveau_gpio_get(gpio, isr->idx, isr->func.func,
- isr->func.line);
- if (state >= 0)
- isr->handler(isr->data, state);
-
- spin_lock_irqsave(&gpio->lock, flags);
- isr->inhibit = false;
- spin_unlock_irqrestore(&gpio->lock, flags);
-}
-
-static void
-nouveau_gpio_isr_run(struct nouveau_gpio *gpio, int idx, u32 line_mask)
-{
- struct gpio_isr *isr;
-
- if (idx != 0)
- return;
-
- spin_lock(&gpio->lock);
- list_for_each_entry(isr, &gpio->isr, head) {
- if (line_mask & (1 << isr->func.line)) {
- if (isr->inhibit)
- continue;
- isr->inhibit = true;
- schedule_work(&isr->work);
- }
- }
- spin_unlock(&gpio->lock);
-}
-
-static int
-nouveau_gpio_isr_add(struct nouveau_gpio *gpio, int idx, u8 tag, u8 line,
- void (*handler)(void *, int), void *data)
-{
- struct gpio_isr *isr;
- unsigned long flags;
- int ret;
-
- isr = kzalloc(sizeof(*isr), GFP_KERNEL);
- if (!isr)
- return -ENOMEM;
-
- ret = nouveau_gpio_find(gpio, idx, tag, line, &isr->func);
- if (ret) {
- kfree(isr);
- return ret;
- }
-
- INIT_WORK(&isr->work, nouveau_gpio_isr_bh);
- isr->gpio = gpio;
- isr->handler = handler;
- isr->data = data;
- isr->idx = idx;
-
- spin_lock_irqsave(&gpio->lock, flags);
- list_add(&isr->head, &gpio->isr);
- spin_unlock_irqrestore(&gpio->lock, flags);
- return 0;
-}
-
-static void
-nouveau_gpio_isr_del(struct nouveau_gpio *gpio, int idx, u8 tag, u8 line,
- void (*handler)(void *, int), void *data)
+void
+_nouveau_gpio_dtor(struct nouveau_object *object)
{
- struct gpio_isr *isr, *tmp;
- struct dcb_gpio_func func;
- unsigned long flags;
- LIST_HEAD(tofree);
- int ret;
-
- ret = nouveau_gpio_find(gpio, idx, tag, line, &func);
- if (ret == 0) {
- spin_lock_irqsave(&gpio->lock, flags);
- list_for_each_entry_safe(isr, tmp, &gpio->isr, head) {
- if (memcmp(&isr->func, &func, sizeof(func)) ||
- isr->idx != idx ||
- isr->handler != handler || isr->data != data)
- continue;
- list_move_tail(&isr->head, &tofree);
- }
- spin_unlock_irqrestore(&gpio->lock, flags);
-
- list_for_each_entry_safe(isr, tmp, &tofree, head) {
- flush_work(&isr->work);
- kfree(isr);
- }
- }
+ struct nouveau_gpio *gpio = (void *)object;
+ nouveau_event_destroy(&gpio->events);
+ nouveau_subdev_destroy(&gpio->base);
}
int
nouveau_gpio_create_(struct nouveau_object *parent,
struct nouveau_object *engine,
- struct nouveau_oclass *oclass, int length, void **pobject)
+ struct nouveau_oclass *oclass, int lines,
+ int length, void **pobject)
{
struct nouveau_gpio *gpio;
int ret;
@@ -241,15 +125,13 @@ nouveau_gpio_create_(struct nouveau_object *parent,
if (ret)
return ret;
+ ret = nouveau_event_create(lines, &gpio->events);
+ if (ret)
+ return ret;
+
gpio->find = nouveau_gpio_find;
gpio->set = nouveau_gpio_set;
gpio->get = nouveau_gpio_get;
- gpio->irq = nouveau_gpio_irq;
- gpio->isr_run = nouveau_gpio_isr_run;
- gpio->isr_add = nouveau_gpio_isr_add;
- gpio->isr_del = nouveau_gpio_isr_del;
- INIT_LIST_HEAD(&gpio->isr);
- spin_lock_init(&gpio->lock);
return 0;
}
diff --git a/drivers/gpu/drm/nouveau/core/subdev/gpio/nv10.c b/drivers/gpu/drm/nouveau/core/subdev/gpio/nv10.c
index 168d16a9a8e9..76d5d5465ddd 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/gpio/nv10.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/gpio/nv10.c
@@ -24,7 +24,7 @@
*
*/
-#include <subdev/gpio.h>
+#include "priv.h"
struct nv10_gpio_priv {
struct nouveau_gpio base;
@@ -83,27 +83,36 @@ nv10_gpio_drive(struct nouveau_gpio *gpio, int line, int dir, int out)
}
static void
-nv10_gpio_irq_enable(struct nouveau_gpio *gpio, int line, bool on)
-{
- u32 mask = 0x00010001 << line;
-
- nv_wr32(gpio, 0x001104, mask);
- nv_mask(gpio, 0x001144, mask, on ? mask : 0);
-}
-
-static void
nv10_gpio_intr(struct nouveau_subdev *subdev)
{
struct nv10_gpio_priv *priv = (void *)subdev;
u32 intr = nv_rd32(priv, 0x001104);
u32 hi = (intr & 0x0000ffff) >> 0;
u32 lo = (intr & 0xffff0000) >> 16;
+ int i;
- priv->base.isr_run(&priv->base, 0, hi | lo);
+ for (i = 0; (hi | lo) && i < 32; i++) {
+ if ((hi | lo) & (1 << i))
+ nouveau_event_trigger(priv->base.events, i);
+ }
nv_wr32(priv, 0x001104, intr);
}
+static void
+nv10_gpio_intr_enable(struct nouveau_event *event, int line)
+{
+ nv_wr32(event->priv, 0x001104, 0x00010001 << line);
+ nv_mask(event->priv, 0x001144, 0x00010001 << line, 0x00010001 << line);
+}
+
+static void
+nv10_gpio_intr_disable(struct nouveau_event *event, int line)
+{
+ nv_wr32(event->priv, 0x001104, 0x00010001 << line);
+ nv_mask(event->priv, 0x001144, 0x00010001 << line, 0x00000000);
+}
+
static int
nv10_gpio_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
struct nouveau_oclass *oclass, void *data, u32 size,
@@ -112,14 +121,16 @@ nv10_gpio_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
struct nv10_gpio_priv *priv;
int ret;
- ret = nouveau_gpio_create(parent, engine, oclass, &priv);
+ ret = nouveau_gpio_create(parent, engine, oclass, 16, &priv);
*pobject = nv_object(priv);
if (ret)
return ret;
priv->base.drive = nv10_gpio_drive;
priv->base.sense = nv10_gpio_sense;
- priv->base.irq_enable = nv10_gpio_irq_enable;
+ priv->base.events->priv = priv;
+ priv->base.events->enable = nv10_gpio_intr_enable;
+ priv->base.events->disable = nv10_gpio_intr_disable;
nv_subdev(priv)->intr = nv10_gpio_intr;
return 0;
}
@@ -141,8 +152,6 @@ nv10_gpio_init(struct nouveau_object *object)
if (ret)
return ret;
- nv_wr32(priv, 0x001140, 0x00000000);
- nv_wr32(priv, 0x001100, 0xffffffff);
nv_wr32(priv, 0x001144, 0x00000000);
nv_wr32(priv, 0x001104, 0xffffffff);
return 0;
@@ -152,7 +161,6 @@ static int
nv10_gpio_fini(struct nouveau_object *object, bool suspend)
{
struct nv10_gpio_priv *priv = (void *)object;
- nv_wr32(priv, 0x001140, 0x00000000);
nv_wr32(priv, 0x001144, 0x00000000);
return nouveau_gpio_fini(&priv->base, suspend);
}
diff --git a/drivers/gpu/drm/nouveau/core/subdev/gpio/nv50.c b/drivers/gpu/drm/nouveau/core/subdev/gpio/nv50.c
index bf13a1200f26..bf489dcf46e2 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/gpio/nv50.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/gpio/nv50.c
@@ -22,7 +22,7 @@
* Authors: Ben Skeggs
*/
-#include <subdev/gpio.h>
+#include "priv.h"
struct nv50_gpio_priv {
struct nouveau_gpio base;
@@ -95,21 +95,12 @@ nv50_gpio_sense(struct nouveau_gpio *gpio, int line)
}
void
-nv50_gpio_irq_enable(struct nouveau_gpio *gpio, int line, bool on)
-{
- u32 reg = line < 16 ? 0xe050 : 0xe070;
- u32 mask = 0x00010001 << (line & 0xf);
-
- nv_wr32(gpio, reg + 4, mask);
- nv_mask(gpio, reg + 0, mask, on ? mask : 0);
-}
-
-void
nv50_gpio_intr(struct nouveau_subdev *subdev)
{
struct nv50_gpio_priv *priv = (void *)subdev;
u32 intr0, intr1 = 0;
u32 hi, lo;
+ int i;
intr0 = nv_rd32(priv, 0xe054) & nv_rd32(priv, 0xe050);
if (nv_device(priv)->chipset >= 0x90)
@@ -117,13 +108,35 @@ nv50_gpio_intr(struct nouveau_subdev *subdev)
hi = (intr0 & 0x0000ffff) | (intr1 << 16);
lo = (intr0 >> 16) | (intr1 & 0xffff0000);
- priv->base.isr_run(&priv->base, 0, hi | lo);
+
+ for (i = 0; (hi | lo) && i < 32; i++) {
+ if ((hi | lo) & (1 << i))
+ nouveau_event_trigger(priv->base.events, i);
+ }
nv_wr32(priv, 0xe054, intr0);
if (nv_device(priv)->chipset >= 0x90)
nv_wr32(priv, 0xe074, intr1);
}
+void
+nv50_gpio_intr_enable(struct nouveau_event *event, int line)
+{
+ const u32 addr = line < 16 ? 0xe050 : 0xe070;
+ const u32 mask = 0x00010001 << (line & 0xf);
+ nv_wr32(event->priv, addr + 0x04, mask);
+ nv_mask(event->priv, addr + 0x00, mask, mask);
+}
+
+void
+nv50_gpio_intr_disable(struct nouveau_event *event, int line)
+{
+ const u32 addr = line < 16 ? 0xe050 : 0xe070;
+ const u32 mask = 0x00010001 << (line & 0xf);
+ nv_wr32(event->priv, addr + 0x04, mask);
+ nv_mask(event->priv, addr + 0x00, mask, 0x00000000);
+}
+
static int
nv50_gpio_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
struct nouveau_oclass *oclass, void *data, u32 size,
@@ -132,7 +145,9 @@ nv50_gpio_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
struct nv50_gpio_priv *priv;
int ret;
- ret = nouveau_gpio_create(parent, engine, oclass, &priv);
+ ret = nouveau_gpio_create(parent, engine, oclass,
+ nv_device(parent)->chipset >= 0x90 ? 32 : 16,
+ &priv);
*pobject = nv_object(priv);
if (ret)
return ret;
@@ -140,7 +155,9 @@ nv50_gpio_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
priv->base.reset = nv50_gpio_reset;
priv->base.drive = nv50_gpio_drive;
priv->base.sense = nv50_gpio_sense;
- priv->base.irq_enable = nv50_gpio_irq_enable;
+ priv->base.events->priv = priv;
+ priv->base.events->enable = nv50_gpio_intr_enable;
+ priv->base.events->disable = nv50_gpio_intr_disable;
nv_subdev(priv)->intr = nv50_gpio_intr;
return 0;
}
diff --git a/drivers/gpu/drm/nouveau/core/subdev/gpio/nvd0.c b/drivers/gpu/drm/nouveau/core/subdev/gpio/nvd0.c
index 83e8b8f16e6a..010431e3acec 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/gpio/nvd0.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/gpio/nvd0.c
@@ -22,13 +22,13 @@
* Authors: Ben Skeggs
*/
-#include <subdev/gpio.h>
+#include "priv.h"
struct nvd0_gpio_priv {
struct nouveau_gpio base;
};
-static void
+void
nvd0_gpio_reset(struct nouveau_gpio *gpio, u8 match)
{
struct nouveau_bios *bios = nouveau_bios(gpio);
@@ -57,7 +57,7 @@ nvd0_gpio_reset(struct nouveau_gpio *gpio, u8 match)
}
}
-static int
+int
nvd0_gpio_drive(struct nouveau_gpio *gpio, int line, int dir, int out)
{
u32 data = ((dir ^ 1) << 13) | (out << 12);
@@ -66,7 +66,7 @@ nvd0_gpio_drive(struct nouveau_gpio *gpio, int line, int dir, int out)
return 0;
}
-static int
+int
nvd0_gpio_sense(struct nouveau_gpio *gpio, int line)
{
return !!(nv_rd32(gpio, 0x00d610 + (line * 4)) & 0x00004000);
@@ -80,7 +80,7 @@ nvd0_gpio_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
struct nvd0_gpio_priv *priv;
int ret;
- ret = nouveau_gpio_create(parent, engine, oclass, &priv);
+ ret = nouveau_gpio_create(parent, engine, oclass, 32, &priv);
*pobject = nv_object(priv);
if (ret)
return ret;
@@ -88,7 +88,9 @@ nvd0_gpio_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
priv->base.reset = nvd0_gpio_reset;
priv->base.drive = nvd0_gpio_drive;
priv->base.sense = nvd0_gpio_sense;
- priv->base.irq_enable = nv50_gpio_irq_enable;
+ priv->base.events->priv = priv;
+ priv->base.events->enable = nv50_gpio_intr_enable;
+ priv->base.events->disable = nv50_gpio_intr_disable;
nv_subdev(priv)->intr = nv50_gpio_intr;
return 0;
}
diff --git a/drivers/gpu/drm/nouveau/core/subdev/gpio/nve0.c b/drivers/gpu/drm/nouveau/core/subdev/gpio/nve0.c
new file mode 100644
index 000000000000..16b8c5bf5efa
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/subdev/gpio/nve0.c
@@ -0,0 +1,131 @@
+/*
+ * Copyright 2012 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+
+#include "priv.h"
+
+struct nve0_gpio_priv {
+ struct nouveau_gpio base;
+};
+
+void
+nve0_gpio_intr(struct nouveau_subdev *subdev)
+{
+ struct nve0_gpio_priv *priv = (void *)subdev;
+ u32 intr0 = nv_rd32(priv, 0xdc00) & nv_rd32(priv, 0xdc08);
+ u32 intr1 = nv_rd32(priv, 0xdc80) & nv_rd32(priv, 0xdc88);
+ u32 hi = (intr0 & 0x0000ffff) | (intr1 << 16);
+ u32 lo = (intr0 >> 16) | (intr1 & 0xffff0000);
+ int i;
+
+ for (i = 0; (hi | lo) && i < 32; i++) {
+ if ((hi | lo) & (1 << i))
+ nouveau_event_trigger(priv->base.events, i);
+ }
+
+ nv_wr32(priv, 0xdc00, intr0);
+ nv_wr32(priv, 0xdc88, intr1);
+}
+
+void
+nve0_gpio_intr_enable(struct nouveau_event *event, int line)
+{
+ const u32 addr = line < 16 ? 0xdc00 : 0xdc80;
+ const u32 mask = 0x00010001 << (line & 0xf);
+ nv_wr32(event->priv, addr + 0x08, mask);
+ nv_mask(event->priv, addr + 0x00, mask, mask);
+}
+
+void
+nve0_gpio_intr_disable(struct nouveau_event *event, int line)
+{
+ const u32 addr = line < 16 ? 0xdc00 : 0xdc80;
+ const u32 mask = 0x00010001 << (line & 0xf);
+ nv_wr32(event->priv, addr + 0x08, mask);
+ nv_mask(event->priv, addr + 0x00, mask, 0x00000000);
+}
+
+int
+nve0_gpio_fini(struct nouveau_object *object, bool suspend)
+{
+ struct nve0_gpio_priv *priv = (void *)object;
+ nv_wr32(priv, 0xdc08, 0x00000000);
+ nv_wr32(priv, 0xdc88, 0x00000000);
+ return nouveau_gpio_fini(&priv->base, suspend);
+}
+
+int
+nve0_gpio_init(struct nouveau_object *object)
+{
+ struct nve0_gpio_priv *priv = (void *)object;
+ int ret;
+
+ ret = nouveau_gpio_init(&priv->base);
+ if (ret)
+ return ret;
+
+ nv_wr32(priv, 0xdc00, 0xffffffff);
+ nv_wr32(priv, 0xdc80, 0xffffffff);
+ return 0;
+}
+
+void
+nve0_gpio_dtor(struct nouveau_object *object)
+{
+ struct nve0_gpio_priv *priv = (void *)object;
+ nouveau_gpio_destroy(&priv->base);
+}
+
+static int
+nve0_gpio_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
+ struct nouveau_oclass *oclass, void *data, u32 size,
+ struct nouveau_object **pobject)
+{
+ struct nve0_gpio_priv *priv;
+ int ret;
+
+ ret = nouveau_gpio_create(parent, engine, oclass, 32, &priv);
+ *pobject = nv_object(priv);
+ if (ret)
+ return ret;
+
+ priv->base.reset = nvd0_gpio_reset;
+ priv->base.drive = nvd0_gpio_drive;
+ priv->base.sense = nvd0_gpio_sense;
+ priv->base.events->priv = priv;
+ priv->base.events->enable = nve0_gpio_intr_enable;
+ priv->base.events->disable = nve0_gpio_intr_disable;
+ nv_subdev(priv)->intr = nve0_gpio_intr;
+ return 0;
+}
+
+struct nouveau_oclass
+nve0_gpio_oclass = {
+ .handle = NV_SUBDEV(GPIO, 0xe0),
+ .ofuncs = &(struct nouveau_ofuncs) {
+ .ctor = nve0_gpio_ctor,
+ .dtor = nv50_gpio_dtor,
+ .init = nve0_gpio_init,
+ .fini = nve0_gpio_fini,
+ },
+};
diff --git a/drivers/gpu/drm/nouveau/core/subdev/gpio/priv.h b/drivers/gpu/drm/nouveau/core/subdev/gpio/priv.h
new file mode 100644
index 000000000000..2ee1c895c782
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/subdev/gpio/priv.h
@@ -0,0 +1,17 @@
+#ifndef __NVKM_GPIO_H__
+#define __NVKM_GPIO_H__
+
+#include <subdev/gpio.h>
+
+void nv50_gpio_dtor(struct nouveau_object *);
+int nv50_gpio_init(struct nouveau_object *);
+int nv50_gpio_fini(struct nouveau_object *, bool);
+void nv50_gpio_intr(struct nouveau_subdev *);
+void nv50_gpio_intr_enable(struct nouveau_event *, int line);
+void nv50_gpio_intr_disable(struct nouveau_event *, int line);
+
+void nvd0_gpio_reset(struct nouveau_gpio *, u8);
+int nvd0_gpio_drive(struct nouveau_gpio *, int, int, int);
+int nvd0_gpio_sense(struct nouveau_gpio *, int);
+
+#endif
diff --git a/drivers/gpu/drm/nouveau/core/subdev/i2c/anx9805.c b/drivers/gpu/drm/nouveau/core/subdev/i2c/anx9805.c
new file mode 100644
index 000000000000..dec94e9d776a
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/subdev/i2c/anx9805.c
@@ -0,0 +1,279 @@
+/*
+ * Copyright 2013 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs <bskeggs@redhat.com>
+ */
+
+#include <subdev/i2c.h>
+
+struct anx9805_i2c_port {
+ struct nouveau_i2c_port base;
+ u32 addr;
+ u32 ctrl;
+};
+
+static int
+anx9805_train(struct nouveau_i2c_port *port, int link_nr, int link_bw, bool enh)
+{
+ struct anx9805_i2c_port *chan = (void *)port;
+ struct nouveau_i2c_port *mast = (void *)nv_object(chan)->parent;
+ u8 tmp, i;
+
+ nv_wri2cr(mast, chan->addr, 0xa0, link_bw);
+ nv_wri2cr(mast, chan->addr, 0xa1, link_nr | (enh ? 0x80 : 0x00));
+ nv_wri2cr(mast, chan->addr, 0xa2, 0x01);
+ nv_wri2cr(mast, chan->addr, 0xa8, 0x01);
+
+ i = 0;
+ while ((tmp = nv_rdi2cr(mast, chan->addr, 0xa8)) & 0x01) {
+ mdelay(5);
+ if (i++ == 100) {
+ nv_error(port, "link training timed out\n");
+ return -ETIMEDOUT;
+ }
+ }
+
+ if (tmp & 0x70) {
+ nv_error(port, "link training failed: 0x%02x\n", tmp);
+ return -EIO;
+ }
+
+ return 1;
+}
+
+static int
+anx9805_aux(struct nouveau_i2c_port *port, u8 type, u32 addr, u8 *data, u8 size)
+{
+ struct anx9805_i2c_port *chan = (void *)port;
+ struct nouveau_i2c_port *mast = (void *)nv_object(chan)->parent;
+ int i, ret = -ETIMEDOUT;
+ u8 tmp;
+
+ tmp = nv_rdi2cr(mast, chan->ctrl, 0x07) & ~0x04;
+ nv_wri2cr(mast, chan->ctrl, 0x07, tmp | 0x04);
+ nv_wri2cr(mast, chan->ctrl, 0x07, tmp);
+ nv_wri2cr(mast, chan->ctrl, 0xf7, 0x01);
+
+ nv_wri2cr(mast, chan->addr, 0xe4, 0x80);
+ for (i = 0; !(type & 1) && i < size; i++)
+ nv_wri2cr(mast, chan->addr, 0xf0 + i, data[i]);
+ nv_wri2cr(mast, chan->addr, 0xe5, ((size - 1) << 4) | type);
+ nv_wri2cr(mast, chan->addr, 0xe6, (addr & 0x000ff) >> 0);
+ nv_wri2cr(mast, chan->addr, 0xe7, (addr & 0x0ff00) >> 8);
+ nv_wri2cr(mast, chan->addr, 0xe8, (addr & 0xf0000) >> 16);
+ nv_wri2cr(mast, chan->addr, 0xe9, 0x01);
+
+ i = 0;
+ while ((tmp = nv_rdi2cr(mast, chan->addr, 0xe9)) & 0x01) {
+ mdelay(5);
+ if (i++ == 32)
+ goto done;
+ }
+
+ if ((tmp = nv_rdi2cr(mast, chan->ctrl, 0xf7)) & 0x01) {
+ ret = -EIO;
+ goto done;
+ }
+
+ for (i = 0; (type & 1) && i < size; i++)
+ data[i] = nv_rdi2cr(mast, chan->addr, 0xf0 + i);
+ ret = 0;
+done:
+ nv_wri2cr(mast, chan->ctrl, 0xf7, 0x01);
+ return ret;
+}
+
+static const struct nouveau_i2c_func
+anx9805_aux_func = {
+ .aux = anx9805_aux,
+ .lnk_ctl = anx9805_train,
+};
+
+static int
+anx9805_aux_chan_ctor(struct nouveau_object *parent,
+ struct nouveau_object *engine,
+ struct nouveau_oclass *oclass, void *data, u32 index,
+ struct nouveau_object **pobject)
+{
+ struct nouveau_i2c_port *mast = (void *)parent;
+ struct anx9805_i2c_port *chan;
+ int ret;
+
+ ret = nouveau_i2c_port_create(parent, engine, oclass, index,
+ &nouveau_i2c_aux_algo, &chan);
+ *pobject = nv_object(chan);
+ if (ret)
+ return ret;
+
+ switch ((oclass->handle & 0xff00) >> 8) {
+ case 0x0d:
+ chan->addr = 0x38;
+ chan->ctrl = 0x39;
+ break;
+ case 0x0e:
+ chan->addr = 0x3c;
+ chan->ctrl = 0x3b;
+ break;
+ default:
+ BUG_ON(1);
+ }
+
+ if (mast->adapter.algo == &i2c_bit_algo) {
+ struct i2c_algo_bit_data *algo = mast->adapter.algo_data;
+ algo->udelay = max(algo->udelay, 40);
+ }
+
+ chan->base.func = &anx9805_aux_func;
+ return 0;
+}
+
+static struct nouveau_ofuncs
+anx9805_aux_ofuncs = {
+ .ctor = anx9805_aux_chan_ctor,
+ .dtor = _nouveau_i2c_port_dtor,
+ .init = _nouveau_i2c_port_init,
+ .fini = _nouveau_i2c_port_fini,
+};
+
+static int
+anx9805_xfer(struct i2c_adapter *adap, struct i2c_msg *msgs, int num)
+{
+ struct anx9805_i2c_port *port = adap->algo_data;
+ struct nouveau_i2c_port *mast = (void *)nv_object(port)->parent;
+ struct i2c_msg *msg = msgs;
+ int ret = -ETIMEDOUT;
+ int i, j, cnt = num;
+ u8 seg = 0x00, off = 0x00, tmp;
+
+ tmp = nv_rdi2cr(mast, port->ctrl, 0x07) & ~0x10;
+ nv_wri2cr(mast, port->ctrl, 0x07, tmp | 0x10);
+ nv_wri2cr(mast, port->ctrl, 0x07, tmp);
+ nv_wri2cr(mast, port->addr, 0x43, 0x05);
+ mdelay(5);
+
+ while (cnt--) {
+ if ( (msg->flags & I2C_M_RD) && msg->addr == 0x50) {
+ nv_wri2cr(mast, port->addr, 0x40, msg->addr << 1);
+ nv_wri2cr(mast, port->addr, 0x41, seg);
+ nv_wri2cr(mast, port->addr, 0x42, off);
+ nv_wri2cr(mast, port->addr, 0x44, msg->len);
+ nv_wri2cr(mast, port->addr, 0x45, 0x00);
+ nv_wri2cr(mast, port->addr, 0x43, 0x01);
+ for (i = 0; i < msg->len; i++) {
+ j = 0;
+ while (nv_rdi2cr(mast, port->addr, 0x46) & 0x10) {
+ mdelay(5);
+ if (j++ == 32)
+ goto done;
+ }
+ msg->buf[i] = nv_rdi2cr(mast, port->addr, 0x47);
+ }
+ } else
+ if (!(msg->flags & I2C_M_RD)) {
+ if (msg->addr == 0x50 && msg->len == 0x01) {
+ off = msg->buf[0];
+ } else
+ if (msg->addr == 0x30 && msg->len == 0x01) {
+ seg = msg->buf[0];
+ } else
+ goto done;
+ } else {
+ goto done;
+ }
+ msg++;
+ }
+
+ ret = num;
+done:
+ nv_wri2cr(mast, port->addr, 0x43, 0x00);
+ return ret;
+}
+
+static u32
+anx9805_func(struct i2c_adapter *adap)
+{
+ return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL;
+}
+
+static const struct i2c_algorithm
+anx9805_i2c_algo = {
+ .master_xfer = anx9805_xfer,
+ .functionality = anx9805_func
+};
+
+static const struct nouveau_i2c_func
+anx9805_i2c_func = {
+};
+
+static int
+anx9805_ddc_port_ctor(struct nouveau_object *parent,
+ struct nouveau_object *engine,
+ struct nouveau_oclass *oclass, void *data, u32 index,
+ struct nouveau_object **pobject)
+{
+ struct nouveau_i2c_port *mast = (void *)parent;
+ struct anx9805_i2c_port *port;
+ int ret;
+
+ ret = nouveau_i2c_port_create(parent, engine, oclass, index,
+ &anx9805_i2c_algo, &port);
+ *pobject = nv_object(port);
+ if (ret)
+ return ret;
+
+ switch ((oclass->handle & 0xff00) >> 8) {
+ case 0x0d:
+ port->addr = 0x3d;
+ port->ctrl = 0x39;
+ break;
+ case 0x0e:
+ port->addr = 0x3f;
+ port->ctrl = 0x3b;
+ break;
+ default:
+ BUG_ON(1);
+ }
+
+ if (mast->adapter.algo == &i2c_bit_algo) {
+ struct i2c_algo_bit_data *algo = mast->adapter.algo_data;
+ algo->udelay = max(algo->udelay, 40);
+ }
+
+ port->base.func = &anx9805_i2c_func;
+ return 0;
+}
+
+static struct nouveau_ofuncs
+anx9805_ddc_ofuncs = {
+ .ctor = anx9805_ddc_port_ctor,
+ .dtor = _nouveau_i2c_port_dtor,
+ .init = _nouveau_i2c_port_init,
+ .fini = _nouveau_i2c_port_fini,
+};
+
+struct nouveau_oclass
+nouveau_anx9805_sclass[] = {
+ { .handle = NV_I2C_TYPE_EXTDDC(0x0d), .ofuncs = &anx9805_ddc_ofuncs },
+ { .handle = NV_I2C_TYPE_EXTAUX(0x0d), .ofuncs = &anx9805_aux_ofuncs },
+ { .handle = NV_I2C_TYPE_EXTDDC(0x0e), .ofuncs = &anx9805_ddc_ofuncs },
+ { .handle = NV_I2C_TYPE_EXTAUX(0x0e), .ofuncs = &anx9805_aux_ofuncs },
+ {}
+};
diff --git a/drivers/gpu/drm/nouveau/core/subdev/i2c/aux.c b/drivers/gpu/drm/nouveau/core/subdev/i2c/aux.c
index dc27e794a851..5de074ad170b 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/i2c/aux.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/i2c/aux.c
@@ -24,151 +24,40 @@
#include <subdev/i2c.h>
-/******************************************************************************
- * aux channel util functions
- *****************************************************************************/
-#define AUX_DBG(fmt, args...) nv_debug(aux, "AUXCH(%d): " fmt, ch, ##args)
-#define AUX_ERR(fmt, args...) nv_error(aux, "AUXCH(%d): " fmt, ch, ##args)
-
-static void
-auxch_fini(struct nouveau_i2c *aux, int ch)
-{
- nv_mask(aux, 0x00e4e4 + (ch * 0x50), 0x00310000, 0x00000000);
-}
-
-static int
-auxch_init(struct nouveau_i2c *aux, int ch)
-{
- const u32 unksel = 1; /* nfi which to use, or if it matters.. */
- const u32 ureq = unksel ? 0x00100000 : 0x00200000;
- const u32 urep = unksel ? 0x01000000 : 0x02000000;
- u32 ctrl, timeout;
-
- /* wait up to 1ms for any previous transaction to be done... */
- timeout = 1000;
- do {
- ctrl = nv_rd32(aux, 0x00e4e4 + (ch * 0x50));
- udelay(1);
- if (!timeout--) {
- AUX_ERR("begin idle timeout 0x%08x\n", ctrl);
- return -EBUSY;
- }
- } while (ctrl & 0x03010000);
-
- /* set some magic, and wait up to 1ms for it to appear */
- nv_mask(aux, 0x00e4e4 + (ch * 0x50), 0x00300000, ureq);
- timeout = 1000;
- do {
- ctrl = nv_rd32(aux, 0x00e4e4 + (ch * 0x50));
- udelay(1);
- if (!timeout--) {
- AUX_ERR("magic wait 0x%08x\n", ctrl);
- auxch_fini(aux, ch);
- return -EBUSY;
- }
- } while ((ctrl & 0x03000000) != urep);
-
- return 0;
-}
-
-static int
-auxch_tx(struct nouveau_i2c *aux, int ch, u8 type, u32 addr, u8 *data, u8 size)
-{
- u32 ctrl, stat, timeout, retries;
- u32 xbuf[4] = {};
- int ret, i;
-
- AUX_DBG("%d: 0x%08x %d\n", type, addr, size);
-
- ret = auxch_init(aux, ch);
- if (ret)
- goto out;
-
- stat = nv_rd32(aux, 0x00e4e8 + (ch * 0x50));
- if (!(stat & 0x10000000)) {
- AUX_DBG("sink not detected\n");
- ret = -ENXIO;
- goto out;
- }
-
- if (!(type & 1)) {
- memcpy(xbuf, data, size);
- for (i = 0; i < 16; i += 4) {
- AUX_DBG("wr 0x%08x\n", xbuf[i / 4]);
- nv_wr32(aux, 0x00e4c0 + (ch * 0x50) + i, xbuf[i / 4]);
- }
- }
-
- ctrl = nv_rd32(aux, 0x00e4e4 + (ch * 0x50));
- ctrl &= ~0x0001f0ff;
- ctrl |= type << 12;
- ctrl |= size - 1;
- nv_wr32(aux, 0x00e4e0 + (ch * 0x50), addr);
-
- /* retry transaction a number of times on failure... */
- ret = -EREMOTEIO;
- for (retries = 0; retries < 32; retries++) {
- /* reset, and delay a while if this is a retry */
- nv_wr32(aux, 0x00e4e4 + (ch * 0x50), 0x80000000 | ctrl);
- nv_wr32(aux, 0x00e4e4 + (ch * 0x50), 0x00000000 | ctrl);
- if (retries)
- udelay(400);
-
- /* transaction request, wait up to 1ms for it to complete */
- nv_wr32(aux, 0x00e4e4 + (ch * 0x50), 0x00010000 | ctrl);
-
- timeout = 1000;
- do {
- ctrl = nv_rd32(aux, 0x00e4e4 + (ch * 0x50));
- udelay(1);
- if (!timeout--) {
- AUX_ERR("tx req timeout 0x%08x\n", ctrl);
- goto out;
- }
- } while (ctrl & 0x00010000);
-
- /* read status, and check if transaction completed ok */
- stat = nv_mask(aux, 0x00e4e8 + (ch * 0x50), 0, 0);
- if (!(stat & 0x000f0f00)) {
- ret = 0;
- break;
- }
-
- AUX_DBG("%02d 0x%08x 0x%08x\n", retries, ctrl, stat);
- }
-
- if (type & 1) {
- for (i = 0; i < 16; i += 4) {
- xbuf[i / 4] = nv_rd32(aux, 0x00e4d0 + (ch * 0x50) + i);
- AUX_DBG("rd 0x%08x\n", xbuf[i / 4]);
- }
- memcpy(data, xbuf, size);
- }
-
-out:
- auxch_fini(aux, ch);
- return ret;
-}
-
int
-nv_rdaux(struct nouveau_i2c_port *auxch, u32 addr, u8 *data, u8 size)
+nv_rdaux(struct nouveau_i2c_port *port, u32 addr, u8 *data, u8 size)
{
- return auxch_tx(auxch->i2c, auxch->drive, 9, addr, data, size);
+ if (port->func->aux) {
+ if (port->func->acquire)
+ port->func->acquire(port);
+ return port->func->aux(port, 9, addr, data, size);
+ }
+ return -ENODEV;
}
int
-nv_wraux(struct nouveau_i2c_port *auxch, u32 addr, u8 *data, u8 size)
+nv_wraux(struct nouveau_i2c_port *port, u32 addr, u8 *data, u8 size)
{
- return auxch_tx(auxch->i2c, auxch->drive, 8, addr, data, size);
+ if (port->func->aux) {
+ if (port->func->acquire)
+ port->func->acquire(port);
+ return port->func->aux(port, 8, addr, data, size);
+ }
+ return -ENODEV;
}
static int
aux_xfer(struct i2c_adapter *adap, struct i2c_msg *msgs, int num)
{
- struct nouveau_i2c_port *auxch = (struct nouveau_i2c_port *)adap;
+ struct nouveau_i2c_port *port = adap->algo_data;
struct i2c_msg *msg = msgs;
int ret, mcnt = num;
+ if (!port->func->aux)
+ return -ENODEV;
+ if ( port->func->acquire)
+ port->func->acquire(port);
+
while (mcnt--) {
u8 remaining = msg->len;
u8 *ptr = msg->buf;
@@ -185,8 +74,7 @@ aux_xfer(struct i2c_adapter *adap, struct i2c_msg *msgs, int num)
if (mcnt || remaining > 16)
cmd |= 4; /* MOT */
- ret = auxch_tx(auxch->i2c, auxch->drive, cmd,
- msg->addr, ptr, cnt);
+ ret = port->func->aux(port, cmd, msg->addr, ptr, cnt);
if (ret < 0)
return ret;
diff --git a/drivers/gpu/drm/nouveau/core/subdev/i2c/base.c b/drivers/gpu/drm/nouveau/core/subdev/i2c/base.c
index dbfc2abf0cfe..a114a0ed7e98 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/i2c/base.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/i2c/base.c
@@ -1,5 +1,5 @@
/*
- * Copyright 2012 Red Hat Inc.
+ * Copyright 2013 Red Hat Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
@@ -22,64 +22,136 @@
* Authors: Ben Skeggs
*/
-#include "core/option.h"
+#include <core/option.h>
-#include "subdev/i2c.h"
-#include "subdev/vga.h"
+#include <subdev/bios.h>
+#include <subdev/bios/dcb.h>
+#include <subdev/bios/i2c.h>
+#include <subdev/i2c.h>
+#include <subdev/vga.h>
-int
-nv_rdi2cr(struct nouveau_i2c_port *port, u8 addr, u8 reg)
+/******************************************************************************
+ * interface to linux i2c bit-banging algorithm
+ *****************************************************************************/
+
+#ifdef CONFIG_NOUVEAU_I2C_INTERNAL_DEFAULT
+#define CSTMSEL true
+#else
+#define CSTMSEL false
+#endif
+
+static int
+nouveau_i2c_pre_xfer(struct i2c_adapter *adap)
{
- u8 val;
- struct i2c_msg msgs[] = {
- { .addr = addr, .flags = 0, .len = 1, .buf = &reg },
- { .addr = addr, .flags = I2C_M_RD, .len = 1, .buf = &val },
- };
+ struct i2c_algo_bit_data *bit = adap->algo_data;
+ struct nouveau_i2c_port *port = bit->data;
+ if (port->func->acquire)
+ port->func->acquire(port);
+ return 0;
+}
- int ret = i2c_transfer(&port->adapter, msgs, 2);
- if (ret != 2)
- return -EIO;
+static void
+nouveau_i2c_setscl(void *data, int state)
+{
+ struct nouveau_i2c_port *port = data;
+ port->func->drive_scl(port, state);
+}
- return val;
+static void
+nouveau_i2c_setsda(void *data, int state)
+{
+ struct nouveau_i2c_port *port = data;
+ port->func->drive_sda(port, state);
}
-int
-nv_wri2cr(struct nouveau_i2c_port *port, u8 addr, u8 reg, u8 val)
+static int
+nouveau_i2c_getscl(void *data)
{
- struct i2c_msg msgs[] = {
- { .addr = addr, .flags = 0, .len = 1, .buf = &reg },
- { .addr = addr, .flags = 0, .len = 1, .buf = &val },
- };
+ struct nouveau_i2c_port *port = data;
+ return port->func->sense_scl(port);
+}
- int ret = i2c_transfer(&port->adapter, msgs, 2);
- if (ret != 2)
- return -EIO;
+static int
+nouveau_i2c_getsda(void *data)
+{
+ struct nouveau_i2c_port *port = data;
+ return port->func->sense_sda(port);
+}
- return 0;
+/******************************************************************************
+ * base i2c "port" class implementation
+ *****************************************************************************/
+
+void
+_nouveau_i2c_port_dtor(struct nouveau_object *object)
+{
+ struct nouveau_i2c_port *port = (void *)object;
+ i2c_del_adapter(&port->adapter);
+ nouveau_object_destroy(&port->base);
}
-bool
-nv_probe_i2c(struct nouveau_i2c_port *port, u8 addr)
+int
+nouveau_i2c_port_create_(struct nouveau_object *parent,
+ struct nouveau_object *engine,
+ struct nouveau_oclass *oclass, u8 index,
+ const struct i2c_algorithm *algo,
+ int size, void **pobject)
{
- u8 buf[] = { 0 };
- struct i2c_msg msgs[] = {
- {
- .addr = addr,
- .flags = 0,
- .len = 1,
- .buf = buf,
- },
- {
- .addr = addr,
- .flags = I2C_M_RD,
- .len = 1,
- .buf = buf,
- }
- };
+ struct nouveau_device *device = nv_device(parent);
+ struct nouveau_i2c *i2c = (void *)engine;
+ struct nouveau_i2c_port *port;
+ int ret;
- return i2c_transfer(&port->adapter, msgs, 2) == 2;
+ ret = nouveau_object_create_(parent, engine, oclass, 0, size, pobject);
+ port = *pobject;
+ if (ret)
+ return ret;
+
+ snprintf(port->adapter.name, sizeof(port->adapter.name),
+ "nouveau-%s-%d", device->name, index);
+ port->adapter.owner = THIS_MODULE;
+ port->adapter.dev.parent = &device->pdev->dev;
+ port->index = index;
+ i2c_set_adapdata(&port->adapter, i2c);
+
+ if ( algo == &nouveau_i2c_bit_algo &&
+ !nouveau_boolopt(device->cfgopt, "NvI2C", CSTMSEL)) {
+ struct i2c_algo_bit_data *bit;
+
+ bit = kzalloc(sizeof(*bit), GFP_KERNEL);
+ if (!bit)
+ return -ENOMEM;
+
+ bit->udelay = 10;
+ bit->timeout = usecs_to_jiffies(2200);
+ bit->data = port;
+ bit->pre_xfer = nouveau_i2c_pre_xfer;
+ bit->setsda = nouveau_i2c_setsda;
+ bit->setscl = nouveau_i2c_setscl;
+ bit->getsda = nouveau_i2c_getsda;
+ bit->getscl = nouveau_i2c_getscl;
+
+ port->adapter.algo_data = bit;
+ ret = i2c_bit_add_bus(&port->adapter);
+ } else {
+ port->adapter.algo_data = port;
+ port->adapter.algo = algo;
+ ret = i2c_add_adapter(&port->adapter);
+ }
+
+ /* drop port's i2c subdev refcount, i2c handles this itself */
+ if (ret == 0) {
+ list_add_tail(&port->head, &i2c->ports);
+ atomic_dec(&engine->refcount);
+ }
+
+ return ret;
}
+/******************************************************************************
+ * base i2c subdev class implementation
+ *****************************************************************************/
+
static struct nouveau_i2c_port *
nouveau_i2c_find(struct nouveau_i2c *i2c, u8 index)
{
@@ -103,29 +175,23 @@ nouveau_i2c_find(struct nouveau_i2c *i2c, u8 index)
list_for_each_entry(port, &i2c->ports, head) {
if (port->index == index)
- break;
+ return port;
}
- if (&port->head == &i2c->ports)
- return NULL;
+ return NULL;
+}
- if (nv_device(i2c)->card_type >= NV_50 && (port->dcb & 0x00000100)) {
- u32 reg = 0x00e500, val;
- if (port->type == 6) {
- reg += port->drive * 0x50;
- val = 0x2002;
- } else {
- reg += ((port->dcb & 0x1e00) >> 9) * 0x50;
- val = 0xe001;
- }
+static struct nouveau_i2c_port *
+nouveau_i2c_find_type(struct nouveau_i2c *i2c, u16 type)
+{
+ struct nouveau_i2c_port *port;
- /* nfi, but neither auxch or i2c work if it's 1 */
- nv_mask(i2c, reg + 0x0c, 0x00000001, 0x00000000);
- /* nfi, but switches auxch vs normal i2c */
- nv_mask(i2c, reg + 0x00, 0x0000f003, val);
+ list_for_each_entry(port, &i2c->ports, head) {
+ if (nv_hclass(port) == type)
+ return port;
}
- return port;
+ return NULL;
}
static int
@@ -155,109 +221,86 @@ nouveau_i2c_identify(struct nouveau_i2c *i2c, int index, const char *what,
return -ENODEV;
}
-void
-nouveau_i2c_drive_scl(void *data, int state)
+int
+_nouveau_i2c_fini(struct nouveau_object *object, bool suspend)
{
- struct nouveau_i2c_port *port = data;
+ struct nouveau_i2c *i2c = (void *)object;
+ struct nouveau_i2c_port *port;
+ int ret;
- if (port->type == DCB_I2C_NV04_BIT) {
- u8 val = nv_rdvgac(port->i2c, 0, port->drive);
- if (state) val |= 0x20;
- else val &= 0xdf;
- nv_wrvgac(port->i2c, 0, port->drive, val | 0x01);
- } else
- if (port->type == DCB_I2C_NV4E_BIT) {
- nv_mask(port->i2c, port->drive, 0x2f, state ? 0x21 : 0x01);
- } else
- if (port->type == DCB_I2C_NVIO_BIT) {
- if (state) port->state |= 0x01;
- else port->state &= 0xfe;
- nv_wr32(port->i2c, port->drive, 4 | port->state);
+ list_for_each_entry(port, &i2c->ports, head) {
+ ret = nv_ofuncs(port)->fini(nv_object(port), suspend);
+ if (ret && suspend)
+ goto fail;
}
-}
-
-void
-nouveau_i2c_drive_sda(void *data, int state)
-{
- struct nouveau_i2c_port *port = data;
- if (port->type == DCB_I2C_NV04_BIT) {
- u8 val = nv_rdvgac(port->i2c, 0, port->drive);
- if (state) val |= 0x10;
- else val &= 0xef;
- nv_wrvgac(port->i2c, 0, port->drive, val | 0x01);
- } else
- if (port->type == DCB_I2C_NV4E_BIT) {
- nv_mask(port->i2c, port->drive, 0x1f, state ? 0x11 : 0x01);
- } else
- if (port->type == DCB_I2C_NVIO_BIT) {
- if (state) port->state |= 0x02;
- else port->state &= 0xfd;
- nv_wr32(port->i2c, port->drive, 4 | port->state);
+ return nouveau_subdev_fini(&i2c->base, suspend);
+fail:
+ list_for_each_entry_continue_reverse(port, &i2c->ports, head) {
+ nv_ofuncs(port)->init(nv_object(port));
}
+
+ return ret;
}
int
-nouveau_i2c_sense_scl(void *data)
+_nouveau_i2c_init(struct nouveau_object *object)
{
- struct nouveau_i2c_port *port = data;
- struct nouveau_device *device = nv_device(port->i2c);
-
- if (port->type == DCB_I2C_NV04_BIT) {
- return !!(nv_rdvgac(port->i2c, 0, port->sense) & 0x04);
- } else
- if (port->type == DCB_I2C_NV4E_BIT) {
- return !!(nv_rd32(port->i2c, port->sense) & 0x00040000);
- } else
- if (port->type == DCB_I2C_NVIO_BIT) {
- if (device->card_type < NV_D0)
- return !!(nv_rd32(port->i2c, port->sense) & 0x01);
- else
- return !!(nv_rd32(port->i2c, port->sense) & 0x10);
+ struct nouveau_i2c *i2c = (void *)object;
+ struct nouveau_i2c_port *port;
+ int ret;
+
+ ret = nouveau_subdev_init(&i2c->base);
+ if (ret == 0) {
+ list_for_each_entry(port, &i2c->ports, head) {
+ ret = nv_ofuncs(port)->init(nv_object(port));
+ if (ret)
+ goto fail;
+ }
}
- return 0;
+ return ret;
+fail:
+ list_for_each_entry_continue_reverse(port, &i2c->ports, head) {
+ nv_ofuncs(port)->fini(nv_object(port), false);
+ }
+
+ return ret;
}
-int
-nouveau_i2c_sense_sda(void *data)
+void
+_nouveau_i2c_dtor(struct nouveau_object *object)
{
- struct nouveau_i2c_port *port = data;
- struct nouveau_device *device = nv_device(port->i2c);
-
- if (port->type == DCB_I2C_NV04_BIT) {
- return !!(nv_rdvgac(port->i2c, 0, port->sense) & 0x08);
- } else
- if (port->type == DCB_I2C_NV4E_BIT) {
- return !!(nv_rd32(port->i2c, port->sense) & 0x00080000);
- } else
- if (port->type == DCB_I2C_NVIO_BIT) {
- if (device->card_type < NV_D0)
- return !!(nv_rd32(port->i2c, port->sense) & 0x02);
- else
- return !!(nv_rd32(port->i2c, port->sense) & 0x20);
+ struct nouveau_i2c *i2c = (void *)object;
+ struct nouveau_i2c_port *port, *temp;
+
+ list_for_each_entry_safe(port, temp, &i2c->ports, head) {
+ nouveau_object_ref(NULL, (struct nouveau_object **)&port);
}
- return 0;
+ nouveau_subdev_destroy(&i2c->base);
}
-static const u32 nv50_i2c_port[] = {
- 0x00e138, 0x00e150, 0x00e168, 0x00e180,
- 0x00e254, 0x00e274, 0x00e764, 0x00e780,
- 0x00e79c, 0x00e7b8
+static struct nouveau_oclass *
+nouveau_i2c_extdev_sclass[] = {
+ nouveau_anx9805_sclass,
};
-static int
-nouveau_i2c_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
- struct nouveau_oclass *oclass, void *data, u32 size,
- struct nouveau_object **pobject)
+int
+nouveau_i2c_create_(struct nouveau_object *parent,
+ struct nouveau_object *engine,
+ struct nouveau_oclass *oclass,
+ struct nouveau_oclass *sclass,
+ int length, void **pobject)
{
- struct nouveau_device *device = nv_device(parent);
struct nouveau_bios *bios = nouveau_bios(parent);
- struct nouveau_i2c_port *port;
struct nouveau_i2c *i2c;
+ struct nouveau_object *object;
struct dcb_i2c_entry info;
- int ret, i = -1;
+ int ret, i, j, index = -1;
+ struct dcb_output outp;
+ u8 ver, hdr;
+ u32 data;
ret = nouveau_subdev_create(parent, engine, oclass, 0,
"I2C", "i2c", &i2c);
@@ -266,142 +309,60 @@ nouveau_i2c_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
return ret;
i2c->find = nouveau_i2c_find;
+ i2c->find_type = nouveau_i2c_find_type;
i2c->identify = nouveau_i2c_identify;
INIT_LIST_HEAD(&i2c->ports);
- while (!dcb_i2c_parse(bios, ++i, &info)) {
+ while (!dcb_i2c_parse(bios, ++index, &info)) {
if (info.type == DCB_I2C_UNUSED)
continue;
- port = kzalloc(sizeof(*port), GFP_KERNEL);
- if (!port) {
- nv_error(i2c, "failed port memory alloc at %d\n", i);
- break;
- }
-
- port->type = info.type;
- switch (port->type) {
- case DCB_I2C_NV04_BIT:
- port->drive = info.drive;
- port->sense = info.sense;
- break;
- case DCB_I2C_NV4E_BIT:
- port->drive = 0x600800 + info.drive;
- port->sense = port->drive;
- break;
- case DCB_I2C_NVIO_BIT:
- port->drive = info.drive & 0x0f;
- if (device->card_type < NV_D0) {
- if (port->drive >= ARRAY_SIZE(nv50_i2c_port))
- break;
- port->drive = nv50_i2c_port[port->drive];
- port->sense = port->drive;
- } else {
- port->drive = 0x00d014 + (port->drive * 0x20);
- port->sense = port->drive;
+ oclass = sclass;
+ do {
+ ret = -EINVAL;
+ if (oclass->handle == info.type) {
+ ret = nouveau_object_ctor(*pobject, *pobject,
+ oclass, &info,
+ index, &object);
}
+ } while (ret && (++oclass)->handle);
+ }
+
+ /* in addition to the busses specified in the i2c table, there
+ * may be ddc/aux channels hiding behind external tmds/dp/etc
+ * transmitters.
+ */
+ index = ((index + 0x0f) / 0x10) * 0x10;
+ i = -1;
+ while ((data = dcb_outp_parse(bios, ++i, &ver, &hdr, &outp))) {
+ if (!outp.location || !outp.extdev)
+ continue;
+
+ switch (outp.type) {
+ case DCB_OUTPUT_TMDS:
+ info.type = NV_I2C_TYPE_EXTDDC(outp.extdev);
break;
- case DCB_I2C_NVIO_AUX:
- port->drive = info.drive & 0x0f;
- port->sense = port->drive;
- port->adapter.algo = &nouveau_i2c_aux_algo;
+ case DCB_OUTPUT_DP:
+ info.type = NV_I2C_TYPE_EXTAUX(outp.extdev);
break;
default:
- break;
- }
-
- if (!port->adapter.algo && !port->drive) {
- nv_error(i2c, "I2C%d: type %d index %x/%x unknown\n",
- i, port->type, port->drive, port->sense);
- kfree(port);
continue;
}
- snprintf(port->adapter.name, sizeof(port->adapter.name),
- "nouveau-%s-%d", device->name, i);
- port->adapter.owner = THIS_MODULE;
- port->adapter.dev.parent = &device->pdev->dev;
- port->i2c = i2c;
- port->index = i;
- port->dcb = info.data;
- i2c_set_adapdata(&port->adapter, i2c);
-
- if (port->adapter.algo != &nouveau_i2c_aux_algo) {
- nouveau_i2c_drive_scl(port, 0);
- nouveau_i2c_drive_sda(port, 1);
- nouveau_i2c_drive_scl(port, 1);
-
-#ifdef CONFIG_NOUVEAU_I2C_INTERNAL_DEFAULT
- if (nouveau_boolopt(device->cfgopt, "NvI2C", true)) {
-#else
- if (nouveau_boolopt(device->cfgopt, "NvI2C", false)) {
-#endif
- port->adapter.algo = &nouveau_i2c_bit_algo;
- ret = i2c_add_adapter(&port->adapter);
- } else {
- port->adapter.algo_data = &port->bit;
- port->bit.udelay = 10;
- port->bit.timeout = usecs_to_jiffies(2200);
- port->bit.data = port;
- port->bit.setsda = nouveau_i2c_drive_sda;
- port->bit.setscl = nouveau_i2c_drive_scl;
- port->bit.getsda = nouveau_i2c_sense_sda;
- port->bit.getscl = nouveau_i2c_sense_scl;
- ret = i2c_bit_add_bus(&port->adapter);
- }
- } else {
- port->adapter.algo = &nouveau_i2c_aux_algo;
- ret = i2c_add_adapter(&port->adapter);
- }
-
- if (ret) {
- nv_error(i2c, "I2C%d: failed register: %d\n", i, ret);
- kfree(port);
- continue;
+ ret = -ENODEV;
+ j = -1;
+ while (ret && ++j < ARRAY_SIZE(nouveau_i2c_extdev_sclass)) {
+ parent = nv_object(i2c->find(i2c, outp.i2c_index));
+ oclass = nouveau_i2c_extdev_sclass[j];
+ do {
+ if (oclass->handle != info.type)
+ continue;
+ ret = nouveau_object_ctor(parent, *pobject,
+ oclass, NULL,
+ index++, &object);
+ } while (ret && (++oclass)->handle);
}
-
- list_add_tail(&port->head, &i2c->ports);
}
return 0;
}
-
-static void
-nouveau_i2c_dtor(struct nouveau_object *object)
-{
- struct nouveau_i2c *i2c = (void *)object;
- struct nouveau_i2c_port *port, *temp;
-
- list_for_each_entry_safe(port, temp, &i2c->ports, head) {
- i2c_del_adapter(&port->adapter);
- list_del(&port->head);
- kfree(port);
- }
-
- nouveau_subdev_destroy(&i2c->base);
-}
-
-static int
-nouveau_i2c_init(struct nouveau_object *object)
-{
- struct nouveau_i2c *i2c = (void *)object;
- return nouveau_subdev_init(&i2c->base);
-}
-
-static int
-nouveau_i2c_fini(struct nouveau_object *object, bool suspend)
-{
- struct nouveau_i2c *i2c = (void *)object;
- return nouveau_subdev_fini(&i2c->base, suspend);
-}
-
-struct nouveau_oclass
-nouveau_i2c_oclass = {
- .handle = NV_SUBDEV(I2C, 0x00),
- .ofuncs = &(struct nouveau_ofuncs) {
- .ctor = nouveau_i2c_ctor,
- .dtor = nouveau_i2c_dtor,
- .init = nouveau_i2c_init,
- .fini = nouveau_i2c_fini,
- },
-};
diff --git a/drivers/gpu/drm/nouveau/core/subdev/i2c/bit.c b/drivers/gpu/drm/nouveau/core/subdev/i2c/bit.c
index 1c4c9a5c8e2e..a6e72d3b06b5 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/i2c/bit.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/i2c/bit.c
@@ -32,25 +32,25 @@
static inline void
i2c_drive_scl(struct nouveau_i2c_port *port, int state)
{
- nouveau_i2c_drive_scl(port, state);
+ port->func->drive_scl(port, state);
}
static inline void
i2c_drive_sda(struct nouveau_i2c_port *port, int state)
{
- nouveau_i2c_drive_sda(port, state);
+ port->func->drive_sda(port, state);
}
static inline int
i2c_sense_scl(struct nouveau_i2c_port *port)
{
- return nouveau_i2c_sense_scl(port);
+ return port->func->sense_scl(port);
}
static inline int
i2c_sense_sda(struct nouveau_i2c_port *port)
{
- return nouveau_i2c_sense_sda(port);
+ return port->func->sense_sda(port);
}
static void
@@ -77,9 +77,8 @@ i2c_start(struct nouveau_i2c_port *port)
{
int ret = 0;
- port->state = i2c_sense_scl(port);
- port->state |= i2c_sense_sda(port) << 1;
- if (port->state != 3) {
+ if (!i2c_sense_scl(port) ||
+ !i2c_sense_sda(port)) {
i2c_drive_scl(port, 0);
i2c_drive_sda(port, 1);
if (!i2c_raise_scl(port))
@@ -184,10 +183,13 @@ i2c_addr(struct nouveau_i2c_port *port, struct i2c_msg *msg)
static int
i2c_bit_xfer(struct i2c_adapter *adap, struct i2c_msg *msgs, int num)
{
- struct nouveau_i2c_port *port = (struct nouveau_i2c_port *)adap;
+ struct nouveau_i2c_port *port = adap->algo_data;
struct i2c_msg *msg = msgs;
int ret = 0, mcnt = num;
+ if (port->func->acquire)
+ port->func->acquire(port);
+
while (!ret && mcnt--) {
u8 remaining = msg->len;
u8 *ptr = msg->buf;
diff --git a/drivers/gpu/drm/nouveau/core/subdev/i2c/nv04.c b/drivers/gpu/drm/nouveau/core/subdev/i2c/nv04.c
new file mode 100644
index 000000000000..2ad18840fe63
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/subdev/i2c/nv04.c
@@ -0,0 +1,143 @@
+/*
+ * Copyright 2012 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+
+#include <subdev/i2c.h>
+#include <subdev/vga.h>
+
+struct nv04_i2c_priv {
+ struct nouveau_i2c base;
+};
+
+struct nv04_i2c_port {
+ struct nouveau_i2c_port base;
+ u8 drive;
+ u8 sense;
+};
+
+static void
+nv04_i2c_drive_scl(struct nouveau_i2c_port *base, int state)
+{
+ struct nv04_i2c_priv *priv = (void *)nv_object(base)->engine;
+ struct nv04_i2c_port *port = (void *)base;
+ u8 val = nv_rdvgac(priv, 0, port->drive);
+ if (state) val |= 0x20;
+ else val &= 0xdf;
+ nv_wrvgac(priv, 0, port->drive, val | 0x01);
+}
+
+static void
+nv04_i2c_drive_sda(struct nouveau_i2c_port *base, int state)
+{
+ struct nv04_i2c_priv *priv = (void *)nv_object(base)->engine;
+ struct nv04_i2c_port *port = (void *)base;
+ u8 val = nv_rdvgac(priv, 0, port->drive);
+ if (state) val |= 0x10;
+ else val &= 0xef;
+ nv_wrvgac(priv, 0, port->drive, val | 0x01);
+}
+
+static int
+nv04_i2c_sense_scl(struct nouveau_i2c_port *base)
+{
+ struct nv04_i2c_priv *priv = (void *)nv_object(base)->engine;
+ struct nv04_i2c_port *port = (void *)base;
+ return !!(nv_rdvgac(priv, 0, port->sense) & 0x04);
+}
+
+static int
+nv04_i2c_sense_sda(struct nouveau_i2c_port *base)
+{
+ struct nv04_i2c_priv *priv = (void *)nv_object(base)->engine;
+ struct nv04_i2c_port *port = (void *)base;
+ return !!(nv_rdvgac(priv, 0, port->sense) & 0x08);
+}
+
+static const struct nouveau_i2c_func
+nv04_i2c_func = {
+ .drive_scl = nv04_i2c_drive_scl,
+ .drive_sda = nv04_i2c_drive_sda,
+ .sense_scl = nv04_i2c_sense_scl,
+ .sense_sda = nv04_i2c_sense_sda,
+};
+
+static int
+nv04_i2c_port_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
+ struct nouveau_oclass *oclass, void *data, u32 index,
+ struct nouveau_object **pobject)
+{
+ struct dcb_i2c_entry *info = data;
+ struct nv04_i2c_port *port;
+ int ret;
+
+ ret = nouveau_i2c_port_create(parent, engine, oclass, index,
+ &nouveau_i2c_bit_algo, &port);
+ *pobject = nv_object(port);
+ if (ret)
+ return ret;
+
+ port->base.func = &nv04_i2c_func;
+ port->drive = info->drive;
+ port->sense = info->sense;
+ return 0;
+}
+
+static struct nouveau_oclass
+nv04_i2c_sclass[] = {
+ { .handle = NV_I2C_TYPE_DCBI2C(DCB_I2C_NV04_BIT),
+ .ofuncs = &(struct nouveau_ofuncs) {
+ .ctor = nv04_i2c_port_ctor,
+ .dtor = _nouveau_i2c_port_dtor,
+ .init = _nouveau_i2c_port_init,
+ .fini = _nouveau_i2c_port_fini,
+ },
+ },
+ {}
+};
+
+static int
+nv04_i2c_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
+ struct nouveau_oclass *oclass, void *data, u32 size,
+ struct nouveau_object **pobject)
+{
+ struct nv04_i2c_priv *priv;
+ int ret;
+
+ ret = nouveau_i2c_create(parent, engine, oclass, nv04_i2c_sclass, &priv);
+ *pobject = nv_object(priv);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+struct nouveau_oclass
+nv04_i2c_oclass = {
+ .handle = NV_SUBDEV(I2C, 0x04),
+ .ofuncs = &(struct nouveau_ofuncs) {
+ .ctor = nv04_i2c_ctor,
+ .dtor = _nouveau_i2c_dtor,
+ .init = _nouveau_i2c_init,
+ .fini = _nouveau_i2c_fini,
+ },
+};
diff --git a/drivers/gpu/drm/nouveau/core/subdev/i2c/nv4e.c b/drivers/gpu/drm/nouveau/core/subdev/i2c/nv4e.c
new file mode 100644
index 000000000000..f501ae25dbb3
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/subdev/i2c/nv4e.c
@@ -0,0 +1,135 @@
+/*
+ * Copyright 2012 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+
+#include <subdev/i2c.h>
+#include <subdev/vga.h>
+
+struct nv4e_i2c_priv {
+ struct nouveau_i2c base;
+};
+
+struct nv4e_i2c_port {
+ struct nouveau_i2c_port base;
+ u32 addr;
+};
+
+static void
+nv4e_i2c_drive_scl(struct nouveau_i2c_port *base, int state)
+{
+ struct nv4e_i2c_priv *priv = (void *)nv_object(base)->engine;
+ struct nv4e_i2c_port *port = (void *)base;
+ nv_mask(priv, port->addr, 0x2f, state ? 0x21 : 0x01);
+}
+
+static void
+nv4e_i2c_drive_sda(struct nouveau_i2c_port *base, int state)
+{
+ struct nv4e_i2c_priv *priv = (void *)nv_object(base)->engine;
+ struct nv4e_i2c_port *port = (void *)base;
+ nv_mask(priv, port->addr, 0x1f, state ? 0x11 : 0x01);
+}
+
+static int
+nv4e_i2c_sense_scl(struct nouveau_i2c_port *base)
+{
+ struct nv4e_i2c_priv *priv = (void *)nv_object(base)->engine;
+ struct nv4e_i2c_port *port = (void *)base;
+ return !!(nv_rd32(priv, port->addr) & 0x00040000);
+}
+
+static int
+nv4e_i2c_sense_sda(struct nouveau_i2c_port *base)
+{
+ struct nv4e_i2c_priv *priv = (void *)nv_object(base)->engine;
+ struct nv4e_i2c_port *port = (void *)base;
+ return !!(nv_rd32(priv, port->addr) & 0x00080000);
+}
+
+static const struct nouveau_i2c_func
+nv4e_i2c_func = {
+ .drive_scl = nv4e_i2c_drive_scl,
+ .drive_sda = nv4e_i2c_drive_sda,
+ .sense_scl = nv4e_i2c_sense_scl,
+ .sense_sda = nv4e_i2c_sense_sda,
+};
+
+static int
+nv4e_i2c_port_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
+ struct nouveau_oclass *oclass, void *data, u32 index,
+ struct nouveau_object **pobject)
+{
+ struct dcb_i2c_entry *info = data;
+ struct nv4e_i2c_port *port;
+ int ret;
+
+ ret = nouveau_i2c_port_create(parent, engine, oclass, index,
+ &nouveau_i2c_bit_algo, &port);
+ *pobject = nv_object(port);
+ if (ret)
+ return ret;
+
+ port->base.func = &nv4e_i2c_func;
+ port->addr = 0x600800 + info->drive;
+ return 0;
+}
+
+static struct nouveau_oclass
+nv4e_i2c_sclass[] = {
+ { .handle = NV_I2C_TYPE_DCBI2C(DCB_I2C_NV4E_BIT),
+ .ofuncs = &(struct nouveau_ofuncs) {
+ .ctor = nv4e_i2c_port_ctor,
+ .dtor = _nouveau_i2c_port_dtor,
+ .init = _nouveau_i2c_port_init,
+ .fini = _nouveau_i2c_port_fini,
+ },
+ },
+ {}
+};
+
+static int
+nv4e_i2c_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
+ struct nouveau_oclass *oclass, void *data, u32 size,
+ struct nouveau_object **pobject)
+{
+ struct nv4e_i2c_priv *priv;
+ int ret;
+
+ ret = nouveau_i2c_create(parent, engine, oclass, nv4e_i2c_sclass, &priv);
+ *pobject = nv_object(priv);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+struct nouveau_oclass
+nv4e_i2c_oclass = {
+ .handle = NV_SUBDEV(I2C, 0x4e),
+ .ofuncs = &(struct nouveau_ofuncs) {
+ .ctor = nv4e_i2c_ctor,
+ .dtor = _nouveau_i2c_dtor,
+ .init = _nouveau_i2c_init,
+ .fini = _nouveau_i2c_fini,
+ },
+};
diff --git a/drivers/gpu/drm/nouveau/core/subdev/i2c/nv50.c b/drivers/gpu/drm/nouveau/core/subdev/i2c/nv50.c
new file mode 100644
index 000000000000..378dfa324e5f
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/subdev/i2c/nv50.c
@@ -0,0 +1,149 @@
+/*
+ * Copyright 2012 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+
+#include "nv50.h"
+
+void
+nv50_i2c_drive_scl(struct nouveau_i2c_port *base, int state)
+{
+ struct nv50_i2c_priv *priv = (void *)nv_object(base)->engine;
+ struct nv50_i2c_port *port = (void *)base;
+ if (state) port->state |= 0x01;
+ else port->state &= 0xfe;
+ nv_wr32(priv, port->addr, port->state);
+}
+
+void
+nv50_i2c_drive_sda(struct nouveau_i2c_port *base, int state)
+{
+ struct nv50_i2c_priv *priv = (void *)nv_object(base)->engine;
+ struct nv50_i2c_port *port = (void *)base;
+ if (state) port->state |= 0x02;
+ else port->state &= 0xfd;
+ nv_wr32(priv, port->addr, port->state);
+}
+
+int
+nv50_i2c_sense_scl(struct nouveau_i2c_port *base)
+{
+ struct nv50_i2c_priv *priv = (void *)nv_object(base)->engine;
+ struct nv50_i2c_port *port = (void *)base;
+ return !!(nv_rd32(priv, port->addr) & 0x00000001);
+}
+
+int
+nv50_i2c_sense_sda(struct nouveau_i2c_port *base)
+{
+ struct nv50_i2c_priv *priv = (void *)nv_object(base)->engine;
+ struct nv50_i2c_port *port = (void *)base;
+ return !!(nv_rd32(priv, port->addr) & 0x00000002);
+}
+
+static const struct nouveau_i2c_func
+nv50_i2c_func = {
+ .drive_scl = nv50_i2c_drive_scl,
+ .drive_sda = nv50_i2c_drive_sda,
+ .sense_scl = nv50_i2c_sense_scl,
+ .sense_sda = nv50_i2c_sense_sda,
+};
+
+const u32 nv50_i2c_addr[] = {
+ 0x00e138, 0x00e150, 0x00e168, 0x00e180,
+ 0x00e254, 0x00e274, 0x00e764, 0x00e780,
+ 0x00e79c, 0x00e7b8
+};
+const int nv50_i2c_addr_nr = ARRAY_SIZE(nv50_i2c_addr);
+
+static int
+nv50_i2c_port_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
+ struct nouveau_oclass *oclass, void *data, u32 index,
+ struct nouveau_object **pobject)
+{
+ struct dcb_i2c_entry *info = data;
+ struct nv50_i2c_port *port;
+ int ret;
+
+ ret = nouveau_i2c_port_create(parent, engine, oclass, index,
+ &nouveau_i2c_bit_algo, &port);
+ *pobject = nv_object(port);
+ if (ret)
+ return ret;
+
+ if (info->drive >= nv50_i2c_addr_nr)
+ return -EINVAL;
+
+ port->base.func = &nv50_i2c_func;
+ port->state = 0x00000007;
+ port->addr = nv50_i2c_addr[info->drive];
+ return 0;
+}
+
+int
+nv50_i2c_port_init(struct nouveau_object *object)
+{
+ struct nv50_i2c_priv *priv = (void *)object->engine;
+ struct nv50_i2c_port *port = (void *)object;
+ nv_wr32(priv, port->addr, port->state);
+ return nouveau_i2c_port_init(&port->base);
+}
+
+static struct nouveau_oclass
+nv50_i2c_sclass[] = {
+ { .handle = NV_I2C_TYPE_DCBI2C(DCB_I2C_NVIO_BIT),
+ .ofuncs = &(struct nouveau_ofuncs) {
+ .ctor = nv50_i2c_port_ctor,
+ .dtor = _nouveau_i2c_port_dtor,
+ .init = nv50_i2c_port_init,
+ .fini = _nouveau_i2c_port_fini,
+ },
+ },
+ {}
+};
+
+static int
+nv50_i2c_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
+ struct nouveau_oclass *oclass, void *data, u32 size,
+ struct nouveau_object **pobject)
+{
+ struct nv50_i2c_priv *priv;
+ int ret;
+
+ ret = nouveau_i2c_create(parent, engine, oclass, nv50_i2c_sclass, &priv);
+ *pobject = nv_object(priv);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+struct nouveau_oclass
+nv50_i2c_oclass = {
+ .handle = NV_SUBDEV(I2C, 0x50),
+ .ofuncs = &(struct nouveau_ofuncs) {
+ .ctor = nv50_i2c_ctor,
+ .dtor = _nouveau_i2c_dtor,
+ .init = _nouveau_i2c_init,
+ .fini = _nouveau_i2c_fini,
+ },
+};
diff --git a/drivers/gpu/drm/nouveau/core/subdev/i2c/nv50.h b/drivers/gpu/drm/nouveau/core/subdev/i2c/nv50.h
new file mode 100644
index 000000000000..4e5ba48ebf5a
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/subdev/i2c/nv50.h
@@ -0,0 +1,32 @@
+#ifndef __NV50_I2C_H__
+#define __NV50_I2C_H__
+
+#include <subdev/i2c.h>
+
+struct nv50_i2c_priv {
+ struct nouveau_i2c base;
+};
+
+struct nv50_i2c_port {
+ struct nouveau_i2c_port base;
+ u32 addr;
+ u32 ctrl;
+ u32 data;
+ u32 state;
+};
+
+extern const u32 nv50_i2c_addr[];
+extern const int nv50_i2c_addr_nr;
+int nv50_i2c_port_init(struct nouveau_object *);
+int nv50_i2c_sense_scl(struct nouveau_i2c_port *);
+int nv50_i2c_sense_sda(struct nouveau_i2c_port *);
+void nv50_i2c_drive_scl(struct nouveau_i2c_port *, int state);
+void nv50_i2c_drive_sda(struct nouveau_i2c_port *, int state);
+
+int nv94_aux_port_ctor(struct nouveau_object *, struct nouveau_object *,
+ struct nouveau_oclass *, void *, u32,
+ struct nouveau_object **);
+void nv94_i2c_acquire(struct nouveau_i2c_port *);
+void nv94_i2c_release(struct nouveau_i2c_port *);
+
+#endif
diff --git a/drivers/gpu/drm/nouveau/core/subdev/i2c/nv94.c b/drivers/gpu/drm/nouveau/core/subdev/i2c/nv94.c
new file mode 100644
index 000000000000..61b771670bfe
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/subdev/i2c/nv94.c
@@ -0,0 +1,285 @@
+/*
+ * Copyright 2012 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+
+#include "nv50.h"
+
+#define AUX_DBG(fmt, args...) nv_debug(aux, "AUXCH(%d): " fmt, ch, ##args)
+#define AUX_ERR(fmt, args...) nv_error(aux, "AUXCH(%d): " fmt, ch, ##args)
+
+static void
+auxch_fini(struct nouveau_i2c *aux, int ch)
+{
+ nv_mask(aux, 0x00e4e4 + (ch * 0x50), 0x00310000, 0x00000000);
+}
+
+static int
+auxch_init(struct nouveau_i2c *aux, int ch)
+{
+ const u32 unksel = 1; /* nfi which to use, or if it matters.. */
+ const u32 ureq = unksel ? 0x00100000 : 0x00200000;
+ const u32 urep = unksel ? 0x01000000 : 0x02000000;
+ u32 ctrl, timeout;
+
+ /* wait up to 1ms for any previous transaction to be done... */
+ timeout = 1000;
+ do {
+ ctrl = nv_rd32(aux, 0x00e4e4 + (ch * 0x50));
+ udelay(1);
+ if (!timeout--) {
+ AUX_ERR("begin idle timeout 0x%08x\n", ctrl);
+ return -EBUSY;
+ }
+ } while (ctrl & 0x03010000);
+
+ /* set some magic, and wait up to 1ms for it to appear */
+ nv_mask(aux, 0x00e4e4 + (ch * 0x50), 0x00300000, ureq);
+ timeout = 1000;
+ do {
+ ctrl = nv_rd32(aux, 0x00e4e4 + (ch * 0x50));
+ udelay(1);
+ if (!timeout--) {
+ AUX_ERR("magic wait 0x%08x\n", ctrl);
+ auxch_fini(aux, ch);
+ return -EBUSY;
+ }
+ } while ((ctrl & 0x03000000) != urep);
+
+ return 0;
+}
+
+int
+nv94_aux(struct nouveau_i2c_port *base, u8 type, u32 addr, u8 *data, u8 size)
+{
+ struct nouveau_i2c *aux = nouveau_i2c(base);
+ struct nv50_i2c_port *port = (void *)base;
+ u32 ctrl, stat, timeout, retries;
+ u32 xbuf[4] = {};
+ int ch = port->addr;
+ int ret, i;
+
+ AUX_DBG("%d: 0x%08x %d\n", type, addr, size);
+
+ ret = auxch_init(aux, ch);
+ if (ret)
+ goto out;
+
+ stat = nv_rd32(aux, 0x00e4e8 + (ch * 0x50));
+ if (!(stat & 0x10000000)) {
+ AUX_DBG("sink not detected\n");
+ ret = -ENXIO;
+ goto out;
+ }
+
+ if (!(type & 1)) {
+ memcpy(xbuf, data, size);
+ for (i = 0; i < 16; i += 4) {
+ AUX_DBG("wr 0x%08x\n", xbuf[i / 4]);
+ nv_wr32(aux, 0x00e4c0 + (ch * 0x50) + i, xbuf[i / 4]);
+ }
+ }
+
+ ctrl = nv_rd32(aux, 0x00e4e4 + (ch * 0x50));
+ ctrl &= ~0x0001f0ff;
+ ctrl |= type << 12;
+ ctrl |= size - 1;
+ nv_wr32(aux, 0x00e4e0 + (ch * 0x50), addr);
+
+ /* retry transaction a number of times on failure... */
+ ret = -EREMOTEIO;
+ for (retries = 0; retries < 32; retries++) {
+ /* reset, and delay a while if this is a retry */
+ nv_wr32(aux, 0x00e4e4 + (ch * 0x50), 0x80000000 | ctrl);
+ nv_wr32(aux, 0x00e4e4 + (ch * 0x50), 0x00000000 | ctrl);
+ if (retries)
+ udelay(400);
+
+ /* transaction request, wait up to 1ms for it to complete */
+ nv_wr32(aux, 0x00e4e4 + (ch * 0x50), 0x00010000 | ctrl);
+
+ timeout = 1000;
+ do {
+ ctrl = nv_rd32(aux, 0x00e4e4 + (ch * 0x50));
+ udelay(1);
+ if (!timeout--) {
+ AUX_ERR("tx req timeout 0x%08x\n", ctrl);
+ goto out;
+ }
+ } while (ctrl & 0x00010000);
+
+ /* read status, and check if transaction completed ok */
+ stat = nv_mask(aux, 0x00e4e8 + (ch * 0x50), 0, 0);
+ if (!(stat & 0x000f0f00)) {
+ ret = 0;
+ break;
+ }
+
+ AUX_DBG("%02d 0x%08x 0x%08x\n", retries, ctrl, stat);
+ }
+
+ if (type & 1) {
+ for (i = 0; i < 16; i += 4) {
+ xbuf[i / 4] = nv_rd32(aux, 0x00e4d0 + (ch * 0x50) + i);
+ AUX_DBG("rd 0x%08x\n", xbuf[i / 4]);
+ }
+ memcpy(data, xbuf, size);
+ }
+
+out:
+ auxch_fini(aux, ch);
+ return ret;
+}
+
+void
+nv94_i2c_acquire(struct nouveau_i2c_port *base)
+{
+ struct nv50_i2c_priv *priv = (void *)nv_object(base)->engine;
+ struct nv50_i2c_port *port = (void *)base;
+ if (port->ctrl) {
+ nv_mask(priv, port->ctrl + 0x0c, 0x00000001, 0x00000000);
+ nv_mask(priv, port->ctrl + 0x00, 0x0000f003, port->data);
+ }
+}
+
+void
+nv94_i2c_release(struct nouveau_i2c_port *base)
+{
+}
+
+static const struct nouveau_i2c_func
+nv94_i2c_func = {
+ .acquire = nv94_i2c_acquire,
+ .release = nv94_i2c_release,
+ .drive_scl = nv50_i2c_drive_scl,
+ .drive_sda = nv50_i2c_drive_sda,
+ .sense_scl = nv50_i2c_sense_scl,
+ .sense_sda = nv50_i2c_sense_sda,
+};
+
+static int
+nv94_i2c_port_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
+ struct nouveau_oclass *oclass, void *data, u32 index,
+ struct nouveau_object **pobject)
+{
+ struct dcb_i2c_entry *info = data;
+ struct nv50_i2c_port *port;
+ int ret;
+
+ ret = nouveau_i2c_port_create(parent, engine, oclass, index,
+ &nouveau_i2c_bit_algo, &port);
+ *pobject = nv_object(port);
+ if (ret)
+ return ret;
+
+ if (info->drive >= nv50_i2c_addr_nr)
+ return -EINVAL;
+
+ port->base.func = &nv94_i2c_func;
+ port->state = 7;
+ port->addr = nv50_i2c_addr[info->drive];
+ if (info->share != DCB_I2C_UNUSED) {
+ port->ctrl = 0x00e500 + (info->share * 0x50);
+ port->data = 0x0000e001;
+ }
+ return 0;
+}
+
+static const struct nouveau_i2c_func
+nv94_aux_func = {
+ .acquire = nv94_i2c_acquire,
+ .release = nv94_i2c_release,
+ .aux = nv94_aux,
+};
+
+int
+nv94_aux_port_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
+ struct nouveau_oclass *oclass, void *data, u32 index,
+ struct nouveau_object **pobject)
+{
+ struct dcb_i2c_entry *info = data;
+ struct nv50_i2c_port *port;
+ int ret;
+
+ ret = nouveau_i2c_port_create(parent, engine, oclass, index,
+ &nouveau_i2c_aux_algo, &port);
+ *pobject = nv_object(port);
+ if (ret)
+ return ret;
+
+ port->base.func = &nv94_aux_func;
+ port->addr = info->drive;
+ if (info->share != DCB_I2C_UNUSED) {
+ port->ctrl = 0x00e500 + (info->drive * 0x50);
+ port->data = 0x00002002;
+ }
+
+ return 0;
+}
+
+static struct nouveau_oclass
+nv94_i2c_sclass[] = {
+ { .handle = NV_I2C_TYPE_DCBI2C(DCB_I2C_NVIO_BIT),
+ .ofuncs = &(struct nouveau_ofuncs) {
+ .ctor = nv94_i2c_port_ctor,
+ .dtor = _nouveau_i2c_port_dtor,
+ .init = nv50_i2c_port_init,
+ .fini = _nouveau_i2c_port_fini,
+ },
+ },
+ { .handle = NV_I2C_TYPE_DCBI2C(DCB_I2C_NVIO_AUX),
+ .ofuncs = &(struct nouveau_ofuncs) {
+ .ctor = nv94_aux_port_ctor,
+ .dtor = _nouveau_i2c_port_dtor,
+ .init = _nouveau_i2c_port_init,
+ .fini = _nouveau_i2c_port_fini,
+ },
+ },
+ {}
+};
+
+static int
+nv94_i2c_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
+ struct nouveau_oclass *oclass, void *data, u32 size,
+ struct nouveau_object **pobject)
+{
+ struct nv50_i2c_priv *priv;
+ int ret;
+
+ ret = nouveau_i2c_create(parent, engine, oclass, nv94_i2c_sclass, &priv);
+ *pobject = nv_object(priv);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+struct nouveau_oclass
+nv94_i2c_oclass = {
+ .handle = NV_SUBDEV(I2C, 0x94),
+ .ofuncs = &(struct nouveau_ofuncs) {
+ .ctor = nv94_i2c_ctor,
+ .dtor = _nouveau_i2c_dtor,
+ .init = _nouveau_i2c_init,
+ .fini = _nouveau_i2c_fini,
+ },
+};
diff --git a/drivers/gpu/drm/nouveau/core/subdev/i2c/nvd0.c b/drivers/gpu/drm/nouveau/core/subdev/i2c/nvd0.c
new file mode 100644
index 000000000000..f761b8a610f1
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/subdev/i2c/nvd0.c
@@ -0,0 +1,124 @@
+/*
+ * Copyright 2012 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+
+#include "nv50.h"
+
+static int
+nvd0_i2c_sense_scl(struct nouveau_i2c_port *base)
+{
+ struct nv50_i2c_priv *priv = (void *)nv_object(base)->engine;
+ struct nv50_i2c_port *port = (void *)base;
+ return !!(nv_rd32(priv, port->addr) & 0x00000010);
+}
+
+static int
+nvd0_i2c_sense_sda(struct nouveau_i2c_port *base)
+{
+ struct nv50_i2c_priv *priv = (void *)nv_object(base)->engine;
+ struct nv50_i2c_port *port = (void *)base;
+ return !!(nv_rd32(priv, port->addr) & 0x00000020);
+}
+
+static const struct nouveau_i2c_func
+nvd0_i2c_func = {
+ .acquire = nv94_i2c_acquire,
+ .release = nv94_i2c_release,
+ .drive_scl = nv50_i2c_drive_scl,
+ .drive_sda = nv50_i2c_drive_sda,
+ .sense_scl = nvd0_i2c_sense_scl,
+ .sense_sda = nvd0_i2c_sense_sda,
+};
+
+static int
+nvd0_i2c_port_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
+ struct nouveau_oclass *oclass, void *data, u32 index,
+ struct nouveau_object **pobject)
+{
+ struct dcb_i2c_entry *info = data;
+ struct nv50_i2c_port *port;
+ int ret;
+
+ ret = nouveau_i2c_port_create(parent, engine, oclass, index,
+ &nouveau_i2c_bit_algo, &port);
+ *pobject = nv_object(port);
+ if (ret)
+ return ret;
+
+ port->base.func = &nvd0_i2c_func;
+ port->state = 0x00000007;
+ port->addr = 0x00d014 + (info->drive * 0x20);
+ if (info->share != DCB_I2C_UNUSED) {
+ port->ctrl = 0x00e500 + (info->share * 0x50);
+ port->data = 0x0000e001;
+ }
+ return 0;
+}
+
+static struct nouveau_oclass
+nvd0_i2c_sclass[] = {
+ { .handle = NV_I2C_TYPE_DCBI2C(DCB_I2C_NVIO_BIT),
+ .ofuncs = &(struct nouveau_ofuncs) {
+ .ctor = nvd0_i2c_port_ctor,
+ .dtor = _nouveau_i2c_port_dtor,
+ .init = nv50_i2c_port_init,
+ .fini = _nouveau_i2c_port_fini,
+ },
+ },
+ { .handle = NV_I2C_TYPE_DCBI2C(DCB_I2C_NVIO_AUX),
+ .ofuncs = &(struct nouveau_ofuncs) {
+ .ctor = nv94_aux_port_ctor,
+ .dtor = _nouveau_i2c_port_dtor,
+ .init = _nouveau_i2c_port_init,
+ .fini = _nouveau_i2c_port_fini,
+ },
+ },
+ {}
+};
+
+static int
+nvd0_i2c_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
+ struct nouveau_oclass *oclass, void *data, u32 size,
+ struct nouveau_object **pobject)
+{
+ struct nv50_i2c_priv *priv;
+ int ret;
+
+ ret = nouveau_i2c_create(parent, engine, oclass, nvd0_i2c_sclass, &priv);
+ *pobject = nv_object(priv);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+struct nouveau_oclass
+nvd0_i2c_oclass = {
+ .handle = NV_SUBDEV(I2C, 0xd0),
+ .ofuncs = &(struct nouveau_ofuncs) {
+ .ctor = nvd0_i2c_ctor,
+ .dtor = _nouveau_i2c_dtor,
+ .init = _nouveau_i2c_init,
+ .fini = _nouveau_i2c_fini,
+ },
+};
diff --git a/drivers/gpu/drm/nouveau/core/subdev/mc/nv04.c b/drivers/gpu/drm/nouveau/core/subdev/mc/nv04.c
index 23ebe477a6f0..89da8fa7ea0f 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/mc/nv04.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/mc/nv04.c
@@ -37,7 +37,7 @@ nv04_mc_intr[] = {
{ 0x00100000, NVDEV_SUBDEV_TIMER },
{ 0x01000000, NVDEV_ENGINE_DISP }, /* NV04- PCRTC0 */
{ 0x02000000, NVDEV_ENGINE_DISP }, /* NV11- PCRTC1 */
- { 0x10000000, NVDEV_SUBDEV_GPIO }, /* PBUS */
+ { 0x10000000, NVDEV_SUBDEV_BUS },
{ 0x80000000, NVDEV_ENGINE_SW },
{}
};
diff --git a/drivers/gpu/drm/nouveau/core/subdev/mc/nv50.c b/drivers/gpu/drm/nouveau/core/subdev/mc/nv50.c
index 8d759f830323..5965add6daee 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/mc/nv50.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/mc/nv50.c
@@ -38,6 +38,7 @@ nv50_mc_intr[] = {
{ 0x00100000, NVDEV_SUBDEV_TIMER },
{ 0x00200000, NVDEV_SUBDEV_GPIO },
{ 0x04000000, NVDEV_ENGINE_DISP },
+ { 0x10000000, NVDEV_SUBDEV_BUS },
{ 0x80000000, NVDEV_ENGINE_SW },
{ 0x0000d101, NVDEV_SUBDEV_FB },
{},
diff --git a/drivers/gpu/drm/nouveau/core/subdev/mc/nv98.c b/drivers/gpu/drm/nouveau/core/subdev/mc/nv98.c
index ceb5c83f9459..3a80b29dce0f 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/mc/nv98.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/mc/nv98.c
@@ -35,10 +35,12 @@ nv98_mc_intr[] = {
{ 0x00001000, NVDEV_ENGINE_GR },
{ 0x00004000, NVDEV_ENGINE_CRYPT }, /* NV84:NVA3 */
{ 0x00008000, NVDEV_ENGINE_BSP },
+ { 0x00080000, NVDEV_SUBDEV_THERM }, /* NVA3:NVC0 */
{ 0x00100000, NVDEV_SUBDEV_TIMER },
{ 0x00200000, NVDEV_SUBDEV_GPIO },
{ 0x00400000, NVDEV_ENGINE_COPY0 }, /* NVA3- */
{ 0x04000000, NVDEV_ENGINE_DISP },
+ { 0x10000000, NVDEV_SUBDEV_BUS },
{ 0x80000000, NVDEV_ENGINE_SW },
{ 0x0040d101, NVDEV_SUBDEV_FB },
{},
diff --git a/drivers/gpu/drm/nouveau/core/subdev/mc/nvc0.c b/drivers/gpu/drm/nouveau/core/subdev/mc/nvc0.c
index 92796682722d..42bbf72023a8 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/mc/nvc0.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/mc/nvc0.c
@@ -36,11 +36,13 @@ nvc0_mc_intr[] = {
{ 0x00000100, NVDEV_ENGINE_FIFO },
{ 0x00001000, NVDEV_ENGINE_GR },
{ 0x00008000, NVDEV_ENGINE_BSP },
+ { 0x00040000, NVDEV_SUBDEV_THERM },
{ 0x00020000, NVDEV_ENGINE_VP },
{ 0x00100000, NVDEV_SUBDEV_TIMER },
{ 0x00200000, NVDEV_SUBDEV_GPIO },
{ 0x02000000, NVDEV_SUBDEV_LTCG },
{ 0x04000000, NVDEV_ENGINE_DISP },
+ { 0x10000000, NVDEV_SUBDEV_BUS },
{ 0x40000000, NVDEV_SUBDEV_IBUS },
{ 0x80000000, NVDEV_ENGINE_SW },
{},
diff --git a/drivers/gpu/drm/nouveau/core/subdev/mxm/mxms.c b/drivers/gpu/drm/nouveau/core/subdev/mxm/mxms.c
index 839ca1edc132..4bde7f7f7b81 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/mxm/mxms.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/mxm/mxms.c
@@ -156,15 +156,15 @@ mxms_foreach(struct nouveau_mxm *mxm, u8 types,
nv_debug(mxm, "%4s: ", mxms_desc_name[type]);
for (j = headerlen - 1; j >= 0; j--)
- printk("%02x", dump[j]);
- printk("\n");
+ pr_cont("%02x", dump[j]);
+ pr_cont("\n");
dump += headerlen;
for (i = 0; i < entries; i++, dump += recordlen) {
nv_debug(mxm, " ");
for (j = recordlen - 1; j >= 0; j--)
- printk("%02x", dump[j]);
- printk("\n");
+ pr_cont("%02x", dump[j]);
+ pr_cont("\n");
}
}
diff --git a/drivers/gpu/drm/nouveau/core/subdev/therm/base.c b/drivers/gpu/drm/nouveau/core/subdev/therm/base.c
index 1674c74a76c8..f794dc89a3b2 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/therm/base.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/therm/base.c
@@ -29,6 +29,134 @@
#include "priv.h"
+static int
+nouveau_therm_update_trip(struct nouveau_therm *therm)
+{
+ struct nouveau_therm_priv *priv = (void *)therm;
+ struct nouveau_therm_trip_point *trip = priv->fan->bios.trip,
+ *cur_trip = NULL,
+ *last_trip = priv->last_trip;
+ u8 temp = therm->temp_get(therm);
+ u16 duty, i;
+
+ /* look for the trip point corresponding to the current temperature */
+ cur_trip = NULL;
+ for (i = 0; i < priv->fan->bios.nr_fan_trip; i++) {
+ if (temp >= trip[i].temp)
+ cur_trip = &trip[i];
+ }
+
+ /* account for the hysteresis cycle */
+ if (last_trip && temp <= (last_trip->temp) &&
+ temp > (last_trip->temp - last_trip->hysteresis))
+ cur_trip = last_trip;
+
+ if (cur_trip) {
+ duty = cur_trip->fan_duty;
+ priv->last_trip = cur_trip;
+ } else {
+ duty = 0;
+ priv->last_trip = NULL;
+ }
+
+ return duty;
+}
+
+static int
+nouveau_therm_update_linear(struct nouveau_therm *therm)
+{
+ struct nouveau_therm_priv *priv = (void *)therm;
+ u8 linear_min_temp = priv->fan->bios.linear_min_temp;
+ u8 linear_max_temp = priv->fan->bios.linear_max_temp;
+ u8 temp = therm->temp_get(therm);
+ u16 duty;
+
+ /* handle the non-linear part first */
+ if (temp < linear_min_temp)
+ return priv->fan->bios.min_duty;
+ else if (temp > linear_max_temp)
+ return priv->fan->bios.max_duty;
+
+ /* we are in the linear zone */
+ duty = (temp - linear_min_temp);
+ duty *= (priv->fan->bios.max_duty - priv->fan->bios.min_duty);
+ duty /= (linear_max_temp - linear_min_temp);
+ duty += priv->fan->bios.min_duty;
+
+ return duty;
+}
+
+static void
+nouveau_therm_update(struct nouveau_therm *therm, int mode)
+{
+ struct nouveau_timer *ptimer = nouveau_timer(therm);
+ struct nouveau_therm_priv *priv = (void *)therm;
+ unsigned long flags;
+ int duty;
+
+ spin_lock_irqsave(&priv->lock, flags);
+ if (mode < 0)
+ mode = priv->mode;
+ priv->mode = mode;
+
+ switch (mode) {
+ case NOUVEAU_THERM_CTRL_MANUAL:
+ duty = nouveau_therm_fan_get(therm);
+ if (duty < 0)
+ duty = 100;
+ break;
+ case NOUVEAU_THERM_CTRL_AUTO:
+ if (priv->fan->bios.nr_fan_trip)
+ duty = nouveau_therm_update_trip(therm);
+ else
+ duty = nouveau_therm_update_linear(therm);
+ break;
+ case NOUVEAU_THERM_CTRL_NONE:
+ default:
+ goto done;
+ }
+
+ nv_debug(therm, "FAN target request: %d%%\n", duty);
+ nouveau_therm_fan_set(therm, (mode != NOUVEAU_THERM_CTRL_AUTO), duty);
+
+done:
+ if (list_empty(&priv->alarm.head) && (mode == NOUVEAU_THERM_CTRL_AUTO))
+ ptimer->alarm(ptimer, 1000000000ULL, &priv->alarm);
+ spin_unlock_irqrestore(&priv->lock, flags);
+}
+
+static void
+nouveau_therm_alarm(struct nouveau_alarm *alarm)
+{
+ struct nouveau_therm_priv *priv =
+ container_of(alarm, struct nouveau_therm_priv, alarm);
+ nouveau_therm_update(&priv->base, -1);
+}
+
+int
+nouveau_therm_mode(struct nouveau_therm *therm, int mode)
+{
+ struct nouveau_therm_priv *priv = (void *)therm;
+ struct nouveau_device *device = nv_device(therm);
+ static const char *name[] = {
+ "disabled",
+ "manual",
+ "automatic"
+ };
+
+ /* The default PDAEMON ucode interferes with fan management */
+ if ((mode >= ARRAY_SIZE(name)) ||
+ (mode != NOUVEAU_THERM_CTRL_NONE && device->card_type >= NV_C0))
+ return -EINVAL;
+
+ if (priv->mode == mode)
+ return 0;
+
+ nv_info(therm, "Thermal management: %s\n", name[mode]);
+ nouveau_therm_update(therm, mode);
+ return 0;
+}
+
int
nouveau_therm_attr_get(struct nouveau_therm *therm,
enum nouveau_therm_attr_type type)
@@ -37,11 +165,11 @@ nouveau_therm_attr_get(struct nouveau_therm *therm,
switch (type) {
case NOUVEAU_THERM_ATTR_FAN_MIN_DUTY:
- return priv->bios_fan.min_duty;
+ return priv->fan->bios.min_duty;
case NOUVEAU_THERM_ATTR_FAN_MAX_DUTY:
- return priv->bios_fan.max_duty;
+ return priv->fan->bios.max_duty;
case NOUVEAU_THERM_ATTR_FAN_MODE:
- return priv->fan.mode;
+ return priv->mode;
case NOUVEAU_THERM_ATTR_THRS_FAN_BOOST:
return priv->bios_sensor.thrs_fan_boost.temp;
case NOUVEAU_THERM_ATTR_THRS_FAN_BOOST_HYST:
@@ -73,42 +201,50 @@ nouveau_therm_attr_set(struct nouveau_therm *therm,
case NOUVEAU_THERM_ATTR_FAN_MIN_DUTY:
if (value < 0)
value = 0;
- if (value > priv->bios_fan.max_duty)
- value = priv->bios_fan.max_duty;
- priv->bios_fan.min_duty = value;
+ if (value > priv->fan->bios.max_duty)
+ value = priv->fan->bios.max_duty;
+ priv->fan->bios.min_duty = value;
return 0;
case NOUVEAU_THERM_ATTR_FAN_MAX_DUTY:
if (value < 0)
value = 0;
- if (value < priv->bios_fan.min_duty)
- value = priv->bios_fan.min_duty;
- priv->bios_fan.max_duty = value;
+ if (value < priv->fan->bios.min_duty)
+ value = priv->fan->bios.min_duty;
+ priv->fan->bios.max_duty = value;
return 0;
case NOUVEAU_THERM_ATTR_FAN_MODE:
- return nouveau_therm_fan_set_mode(therm, value);
+ return nouveau_therm_mode(therm, value);
case NOUVEAU_THERM_ATTR_THRS_FAN_BOOST:
priv->bios_sensor.thrs_fan_boost.temp = value;
+ priv->sensor.program_alarms(therm);
return 0;
case NOUVEAU_THERM_ATTR_THRS_FAN_BOOST_HYST:
priv->bios_sensor.thrs_fan_boost.hysteresis = value;
+ priv->sensor.program_alarms(therm);
return 0;
case NOUVEAU_THERM_ATTR_THRS_DOWN_CLK:
priv->bios_sensor.thrs_down_clock.temp = value;
+ priv->sensor.program_alarms(therm);
return 0;
case NOUVEAU_THERM_ATTR_THRS_DOWN_CLK_HYST:
priv->bios_sensor.thrs_down_clock.hysteresis = value;
+ priv->sensor.program_alarms(therm);
return 0;
case NOUVEAU_THERM_ATTR_THRS_CRITICAL:
priv->bios_sensor.thrs_critical.temp = value;
+ priv->sensor.program_alarms(therm);
return 0;
case NOUVEAU_THERM_ATTR_THRS_CRITICAL_HYST:
priv->bios_sensor.thrs_critical.hysteresis = value;
+ priv->sensor.program_alarms(therm);
return 0;
case NOUVEAU_THERM_ATTR_THRS_SHUTDOWN:
priv->bios_sensor.thrs_shutdown.temp = value;
+ priv->sensor.program_alarms(therm);
return 0;
case NOUVEAU_THERM_ATTR_THRS_SHUTDOWN_HYST:
priv->bios_sensor.thrs_shutdown.hysteresis = value;
+ priv->sensor.program_alarms(therm);
return 0;
}
@@ -116,7 +252,7 @@ nouveau_therm_attr_set(struct nouveau_therm *therm,
}
int
-nouveau_therm_init(struct nouveau_object *object)
+_nouveau_therm_init(struct nouveau_object *object)
{
struct nouveau_therm *therm = (void *)object;
struct nouveau_therm_priv *priv = (void *)therm;
@@ -126,19 +262,69 @@ nouveau_therm_init(struct nouveau_object *object)
if (ret)
return ret;
- if (priv->fan.percent >= 0)
- therm->fan_set(therm, priv->fan.percent);
-
+ if (priv->suspend >= 0)
+ nouveau_therm_mode(therm, priv->mode);
+ priv->sensor.program_alarms(therm);
return 0;
}
int
-nouveau_therm_fini(struct nouveau_object *object, bool suspend)
+_nouveau_therm_fini(struct nouveau_object *object, bool suspend)
{
struct nouveau_therm *therm = (void *)object;
struct nouveau_therm_priv *priv = (void *)therm;
- priv->fan.percent = therm->fan_get(therm);
+ if (suspend) {
+ priv->suspend = priv->mode;
+ priv->mode = NOUVEAU_THERM_CTRL_NONE;
+ }
return nouveau_subdev_fini(&therm->base, suspend);
}
+
+int
+nouveau_therm_create_(struct nouveau_object *parent,
+ struct nouveau_object *engine,
+ struct nouveau_oclass *oclass,
+ int length, void **pobject)
+{
+ struct nouveau_therm_priv *priv;
+ int ret;
+
+ ret = nouveau_subdev_create_(parent, engine, oclass, 0, "PTHERM",
+ "therm", length, pobject);
+ priv = *pobject;
+ if (ret)
+ return ret;
+
+ nouveau_alarm_init(&priv->alarm, nouveau_therm_alarm);
+ spin_lock_init(&priv->lock);
+ spin_lock_init(&priv->sensor.alarm_program_lock);
+
+ priv->base.fan_get = nouveau_therm_fan_user_get;
+ priv->base.fan_set = nouveau_therm_fan_user_set;
+ priv->base.fan_sense = nouveau_therm_fan_sense;
+ priv->base.attr_get = nouveau_therm_attr_get;
+ priv->base.attr_set = nouveau_therm_attr_set;
+ priv->mode = priv->suspend = -1; /* undefined */
+ return 0;
+}
+
+int
+nouveau_therm_preinit(struct nouveau_therm *therm)
+{
+ nouveau_therm_ic_ctor(therm);
+ nouveau_therm_sensor_ctor(therm);
+ nouveau_therm_fan_ctor(therm);
+
+ nouveau_therm_mode(therm, NOUVEAU_THERM_CTRL_NONE);
+ return 0;
+}
+
+void
+_nouveau_therm_dtor(struct nouveau_object *object)
+{
+ struct nouveau_therm_priv *priv = (void *)object;
+ kfree(priv->fan);
+ nouveau_subdev_destroy(&priv->base.base);
+}
diff --git a/drivers/gpu/drm/nouveau/core/subdev/therm/fan.c b/drivers/gpu/drm/nouveau/core/subdev/therm/fan.c
index 523178685180..c728380d3d62 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/therm/fan.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/therm/fan.c
@@ -27,90 +27,107 @@
#include <core/object.h>
#include <core/device.h>
+
#include <subdev/gpio.h>
#include <subdev/timer.h>
-int
-nouveau_therm_fan_get(struct nouveau_therm *therm)
+static int
+nouveau_fan_update(struct nouveau_fan *fan, bool immediate, int target)
{
+ struct nouveau_therm *therm = fan->parent;
struct nouveau_therm_priv *priv = (void *)therm;
- struct nouveau_gpio *gpio = nouveau_gpio(therm);
- struct dcb_gpio_func func;
- int card_type = nv_device(therm)->card_type;
- u32 divs, duty;
- int ret;
-
- if (!priv->fan.pwm_get)
- return -ENODEV;
+ struct nouveau_timer *ptimer = nouveau_timer(priv);
+ unsigned long flags;
+ int ret = 0;
+ int duty;
+
+ /* update target fan speed, restricting to allowed range */
+ spin_lock_irqsave(&fan->lock, flags);
+ if (target < 0)
+ target = fan->percent;
+ target = max_t(u8, target, fan->bios.min_duty);
+ target = min_t(u8, target, fan->bios.max_duty);
+ if (fan->percent != target) {
+ nv_debug(therm, "FAN target: %d\n", target);
+ fan->percent = target;
+ }
- ret = gpio->find(gpio, 0, DCB_GPIO_PWM_FAN, 0xff, &func);
- if (ret == 0) {
- ret = priv->fan.pwm_get(therm, func.line, &divs, &duty);
- if (ret == 0 && divs) {
- divs = max(divs, duty);
- if (card_type <= NV_40 || (func.log[0] & 1))
- duty = divs - duty;
- return (duty * 100) / divs;
- }
+ /* check that we're not already at the target duty cycle */
+ duty = fan->get(therm);
+ if (duty == target)
+ goto done;
+
+ /* smooth out the fanspeed increase/decrease */
+ if (!immediate && duty >= 0) {
+ /* the constant "3" is a rough approximation taken from
+ * nvidia's behaviour.
+ * it is meant to bump the fan speed more incrementally
+ */
+ if (duty < target)
+ duty = min(duty + 3, target);
+ else if (duty > target)
+ duty = max(duty - 3, target);
+ } else {
+ duty = target;
+ }
- return gpio->get(gpio, 0, func.func, func.line) * 100;
+ nv_debug(therm, "FAN update: %d\n", duty);
+ ret = fan->set(therm, duty);
+ if (ret)
+ goto done;
+
+ /* schedule next fan update, if not at target speed already */
+ if (list_empty(&fan->alarm.head) && target != duty) {
+ u16 bump_period = fan->bios.bump_period;
+ u16 slow_down_period = fan->bios.slow_down_period;
+ u64 delay;
+
+ if (duty > target)
+ delay = slow_down_period;
+ else if (duty == target)
+ delay = min(bump_period, slow_down_period) ;
+ else
+ delay = bump_period;
+
+ ptimer->alarm(ptimer, delay * 1000 * 1000, &fan->alarm);
}
- return -ENODEV;
+done:
+ spin_unlock_irqrestore(&fan->lock, flags);
+ return ret;
+}
+
+static void
+nouveau_fan_alarm(struct nouveau_alarm *alarm)
+{
+ struct nouveau_fan *fan = container_of(alarm, struct nouveau_fan, alarm);
+ nouveau_fan_update(fan, false, -1);
}
int
-nouveau_therm_fan_set(struct nouveau_therm *therm, int percent)
+nouveau_therm_fan_get(struct nouveau_therm *therm)
{
struct nouveau_therm_priv *priv = (void *)therm;
- struct nouveau_gpio *gpio = nouveau_gpio(therm);
- struct dcb_gpio_func func;
- int card_type = nv_device(therm)->card_type;
- u32 divs, duty;
- int ret;
-
- if (priv->fan.mode == FAN_CONTROL_NONE)
- return -EINVAL;
-
- if (!priv->fan.pwm_set)
- return -ENODEV;
-
- if (percent < priv->bios_fan.min_duty)
- percent = priv->bios_fan.min_duty;
- if (percent > priv->bios_fan.max_duty)
- percent = priv->bios_fan.max_duty;
-
- ret = gpio->find(gpio, 0, DCB_GPIO_PWM_FAN, 0xff, &func);
- if (ret == 0) {
- divs = priv->bios_perf_fan.pwm_divisor;
- if (priv->bios_fan.pwm_freq) {
- divs = 1;
- if (priv->fan.pwm_clock)
- divs = priv->fan.pwm_clock(therm);
- divs /= priv->bios_fan.pwm_freq;
- }
-
- duty = ((divs * percent) + 99) / 100;
- if (card_type <= NV_40 || (func.log[0] & 1))
- duty = divs - duty;
-
- ret = priv->fan.pwm_set(therm, func.line, divs, duty);
- return ret;
- }
+ return priv->fan->get(therm);
+}
- return -ENODEV;
+int
+nouveau_therm_fan_set(struct nouveau_therm *therm, bool immediate, int percent)
+{
+ struct nouveau_therm_priv *priv = (void *)therm;
+ return nouveau_fan_update(priv->fan, immediate, percent);
}
int
nouveau_therm_fan_sense(struct nouveau_therm *therm)
{
+ struct nouveau_therm_priv *priv = (void *)therm;
struct nouveau_timer *ptimer = nouveau_timer(therm);
struct nouveau_gpio *gpio = nouveau_gpio(therm);
- struct dcb_gpio_func func;
u32 cycles, cur, prev;
u64 start, end, tach;
- if (gpio->find(gpio, 0, DCB_GPIO_FAN_SENSE, 0xff, &func))
+ if (priv->fan->tach.func == DCB_GPIO_UNUSED)
return -ENODEV;
/* Time a complete rotation and extrapolate to RPM:
@@ -118,12 +135,12 @@ nouveau_therm_fan_sense(struct nouveau_therm *therm)
* We get 4 changes (0 -> 1 -> 0 -> 1) per complete rotation.
*/
start = ptimer->read(ptimer);
- prev = gpio->get(gpio, 0, func.func, func.line);
+ prev = gpio->get(gpio, 0, priv->fan->tach.func, priv->fan->tach.line);
cycles = 0;
do {
usleep_range(500, 1000); /* supports 0 < rpm < 7500 */
- cur = gpio->get(gpio, 0, func.func, func.line);
+ cur = gpio->get(gpio, 0, priv->fan->tach.func, priv->fan->tach.line);
if (prev != cur) {
if (!start)
start = ptimer->read(ptimer);
@@ -142,34 +159,6 @@ nouveau_therm_fan_sense(struct nouveau_therm *therm)
}
int
-nouveau_therm_fan_set_mode(struct nouveau_therm *therm,
- enum nouveau_therm_fan_mode mode)
-{
- struct nouveau_therm_priv *priv = (void *)therm;
-
- if (priv->fan.mode == mode)
- return 0;
-
- if (mode < FAN_CONTROL_NONE || mode >= FAN_CONTROL_NR)
- return -EINVAL;
-
- switch (mode)
- {
- case FAN_CONTROL_NONE:
- nv_info(therm, "switch fan to no-control mode\n");
- break;
- case FAN_CONTROL_MANUAL:
- nv_info(therm, "switch fan to manual mode\n");
- break;
- case FAN_CONTROL_NR:
- break;
- }
-
- priv->fan.mode = mode;
- return 0;
-}
-
-int
nouveau_therm_fan_user_get(struct nouveau_therm *therm)
{
return nouveau_therm_fan_get(therm);
@@ -180,55 +169,86 @@ nouveau_therm_fan_user_set(struct nouveau_therm *therm, int percent)
{
struct nouveau_therm_priv *priv = (void *)therm;
- if (priv->fan.mode != FAN_CONTROL_MANUAL)
+ if (priv->mode != NOUVEAU_THERM_CTRL_MANUAL)
return -EINVAL;
- return nouveau_therm_fan_set(therm, percent);
+ return nouveau_therm_fan_set(therm, true, percent);
}
-void
+static void
nouveau_therm_fan_set_defaults(struct nouveau_therm *therm)
{
struct nouveau_therm_priv *priv = (void *)therm;
- priv->bios_fan.pwm_freq = 0;
- priv->bios_fan.min_duty = 0;
- priv->bios_fan.max_duty = 100;
+ priv->fan->bios.pwm_freq = 0;
+ priv->fan->bios.min_duty = 0;
+ priv->fan->bios.max_duty = 100;
+ priv->fan->bios.bump_period = 500;
+ priv->fan->bios.slow_down_period = 2000;
+ priv->fan->bios.linear_min_temp = 40;
+ priv->fan->bios.linear_max_temp = 85;
}
-
static void
nouveau_therm_fan_safety_checks(struct nouveau_therm *therm)
{
struct nouveau_therm_priv *priv = (void *)therm;
- if (priv->bios_fan.min_duty > 100)
- priv->bios_fan.min_duty = 100;
- if (priv->bios_fan.max_duty > 100)
- priv->bios_fan.max_duty = 100;
+ if (priv->fan->bios.min_duty > 100)
+ priv->fan->bios.min_duty = 100;
+ if (priv->fan->bios.max_duty > 100)
+ priv->fan->bios.max_duty = 100;
- if (priv->bios_fan.min_duty > priv->bios_fan.max_duty)
- priv->bios_fan.min_duty = priv->bios_fan.max_duty;
-}
-
-int nouveau_fan_pwm_clock_dummy(struct nouveau_therm *therm)
-{
- return 1;
+ if (priv->fan->bios.min_duty > priv->fan->bios.max_duty)
+ priv->fan->bios.min_duty = priv->fan->bios.max_duty;
}
int
nouveau_therm_fan_ctor(struct nouveau_therm *therm)
{
struct nouveau_therm_priv *priv = (void *)therm;
+ struct nouveau_gpio *gpio = nouveau_gpio(therm);
struct nouveau_bios *bios = nouveau_bios(therm);
+ struct dcb_gpio_func func;
+ int ret;
+ /* attempt to locate a drivable fan, and determine control method */
+ ret = gpio->find(gpio, 0, DCB_GPIO_FAN, 0xff, &func);
+ if (ret == 0) {
+ if (func.log[0] & DCB_GPIO_LOG_DIR_IN) {
+ nv_debug(therm, "GPIO_FAN is in input mode\n");
+ ret = -EINVAL;
+ } else {
+ ret = nouveau_fanpwm_create(therm, &func);
+ if (ret != 0)
+ ret = nouveau_fantog_create(therm, &func);
+ }
+ }
+
+ /* no controllable fan found, create a dummy fan module */
+ if (ret != 0) {
+ ret = nouveau_fannil_create(therm);
+ if (ret)
+ return ret;
+ }
+
+ nv_info(therm, "FAN control: %s\n", priv->fan->type);
+
+ /* attempt to detect a tachometer connection */
+ ret = gpio->find(gpio, 0, DCB_GPIO_FAN_SENSE, 0xff, &priv->fan->tach);
+ if (ret)
+ priv->fan->tach.func = DCB_GPIO_UNUSED;
+
+ /* initialise fan bump/slow update handling */
+ priv->fan->parent = therm;
+ nouveau_alarm_init(&priv->fan->alarm, nouveau_fan_alarm);
+ spin_lock_init(&priv->fan->lock);
+
+ /* other random init... */
nouveau_therm_fan_set_defaults(therm);
- nvbios_perf_fan_parse(bios, &priv->bios_perf_fan);
- if (nvbios_therm_fan_parse(bios, &priv->bios_fan))
+ nvbios_perf_fan_parse(bios, &priv->fan->perf);
+ if (nvbios_therm_fan_parse(bios, &priv->fan->bios))
nv_error(therm, "parsing the thermal table failed\n");
nouveau_therm_fan_safety_checks(therm);
-
- nouveau_therm_fan_set_mode(therm, FAN_CONTROL_NONE);
-
return 0;
}
diff --git a/drivers/gpu/drm/nouveau/core/subdev/therm/fannil.c b/drivers/gpu/drm/nouveau/core/subdev/therm/fannil.c
new file mode 100644
index 000000000000..b78c182e1d51
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/subdev/therm/fannil.c
@@ -0,0 +1,54 @@
+/*
+ * Copyright 2012 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+
+#include "priv.h"
+
+static int
+nouveau_fannil_get(struct nouveau_therm *therm)
+{
+ return -ENODEV;
+}
+
+static int
+nouveau_fannil_set(struct nouveau_therm *therm, int percent)
+{
+ return -ENODEV;
+}
+
+int
+nouveau_fannil_create(struct nouveau_therm *therm)
+{
+ struct nouveau_therm_priv *tpriv = (void *)therm;
+ struct nouveau_fan *priv;
+
+ priv = kzalloc(sizeof(*priv), GFP_KERNEL);
+ tpriv->fan = priv;
+ if (!priv)
+ return -ENOMEM;
+
+ priv->type = "none / external";
+ priv->get = nouveau_fannil_get;
+ priv->set = nouveau_fannil_set;
+ return 0;
+}
diff --git a/drivers/gpu/drm/nouveau/core/subdev/therm/fanpwm.c b/drivers/gpu/drm/nouveau/core/subdev/therm/fanpwm.c
new file mode 100644
index 000000000000..5f71db8e8992
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/subdev/therm/fanpwm.c
@@ -0,0 +1,107 @@
+/*
+ * Copyright 2012 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ * Martin Peres
+ */
+
+#include <core/option.h>
+#include <subdev/gpio.h>
+
+#include "priv.h"
+
+struct nouveau_fanpwm_priv {
+ struct nouveau_fan base;
+ struct dcb_gpio_func func;
+};
+
+static int
+nouveau_fanpwm_get(struct nouveau_therm *therm)
+{
+ struct nouveau_therm_priv *tpriv = (void *)therm;
+ struct nouveau_fanpwm_priv *priv = (void *)tpriv->fan;
+ struct nouveau_gpio *gpio = nouveau_gpio(therm);
+ int card_type = nv_device(therm)->card_type;
+ u32 divs, duty;
+ int ret;
+
+ ret = therm->pwm_get(therm, priv->func.line, &divs, &duty);
+ if (ret == 0 && divs) {
+ divs = max(divs, duty);
+ if (card_type <= NV_40 || (priv->func.log[0] & 1))
+ duty = divs - duty;
+ return (duty * 100) / divs;
+ }
+
+ return gpio->get(gpio, 0, priv->func.func, priv->func.line) * 100;
+}
+
+static int
+nouveau_fanpwm_set(struct nouveau_therm *therm, int percent)
+{
+ struct nouveau_therm_priv *tpriv = (void *)therm;
+ struct nouveau_fanpwm_priv *priv = (void *)tpriv->fan;
+ int card_type = nv_device(therm)->card_type;
+ u32 divs, duty;
+ int ret;
+
+ divs = priv->base.perf.pwm_divisor;
+ if (priv->base.bios.pwm_freq) {
+ divs = 1;
+ if (therm->pwm_clock)
+ divs = therm->pwm_clock(therm);
+ divs /= priv->base.bios.pwm_freq;
+ }
+
+ duty = ((divs * percent) + 99) / 100;
+ if (card_type <= NV_40 || (priv->func.log[0] & 1))
+ duty = divs - duty;
+
+ ret = therm->pwm_set(therm, priv->func.line, divs, duty);
+ if (ret == 0)
+ ret = therm->pwm_ctrl(therm, priv->func.line, true);
+ return ret;
+}
+
+int
+nouveau_fanpwm_create(struct nouveau_therm *therm, struct dcb_gpio_func *func)
+{
+ struct nouveau_device *device = nv_device(therm);
+ struct nouveau_therm_priv *tpriv = (void *)therm;
+ struct nouveau_fanpwm_priv *priv;
+ u32 divs, duty;
+
+ if (!nouveau_boolopt(device->cfgopt, "NvFanPWM", func->param) ||
+ !therm->pwm_ctrl ||
+ therm->pwm_get(therm, func->line, &divs, &duty) == -ENODEV)
+ return -ENODEV;
+
+ priv = kzalloc(sizeof(*priv), GFP_KERNEL);
+ tpriv->fan = &priv->base;
+ if (!priv)
+ return -ENOMEM;
+
+ priv->base.type = "PWM";
+ priv->base.get = nouveau_fanpwm_get;
+ priv->base.set = nouveau_fanpwm_set;
+ priv->func = *func;
+ return 0;
+}
diff --git a/drivers/gpu/drm/nouveau/core/subdev/therm/fantog.c b/drivers/gpu/drm/nouveau/core/subdev/therm/fantog.c
new file mode 100644
index 000000000000..e601773ee475
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/subdev/therm/fantog.c
@@ -0,0 +1,115 @@
+/*
+ * Copyright 2012 The Nouveau community
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Martin Peres
+ */
+
+#include "priv.h"
+
+#include <core/object.h>
+#include <core/device.h>
+
+#include <subdev/gpio.h>
+#include <subdev/timer.h>
+
+struct nouveau_fantog_priv {
+ struct nouveau_fan base;
+ struct nouveau_alarm alarm;
+ spinlock_t lock;
+ u32 period_us;
+ u32 percent;
+ struct dcb_gpio_func func;
+};
+
+static void
+nouveau_fantog_update(struct nouveau_fantog_priv *priv, int percent)
+{
+ struct nouveau_therm_priv *tpriv = (void *)priv->base.parent;
+ struct nouveau_timer *ptimer = nouveau_timer(tpriv);
+ struct nouveau_gpio *gpio = nouveau_gpio(tpriv);
+ unsigned long flags;
+ int duty;
+
+ spin_lock_irqsave(&priv->lock, flags);
+ if (percent < 0)
+ percent = priv->percent;
+ priv->percent = percent;
+
+ duty = !gpio->get(gpio, 0, DCB_GPIO_FAN, 0xff);
+ gpio->set(gpio, 0, DCB_GPIO_FAN, 0xff, duty);
+
+ if (list_empty(&priv->alarm.head) && percent != (duty * 100)) {
+ u64 next_change = (percent * priv->period_us) / 100;
+ if (!duty)
+ next_change = priv->period_us - next_change;
+ ptimer->alarm(ptimer, next_change * 1000, &priv->alarm);
+ }
+ spin_unlock_irqrestore(&priv->lock, flags);
+}
+
+static void
+nouveau_fantog_alarm(struct nouveau_alarm *alarm)
+{
+ struct nouveau_fantog_priv *priv =
+ container_of(alarm, struct nouveau_fantog_priv, alarm);
+ nouveau_fantog_update(priv, -1);
+}
+
+static int
+nouveau_fantog_get(struct nouveau_therm *therm)
+{
+ struct nouveau_therm_priv *tpriv = (void *)therm;
+ struct nouveau_fantog_priv *priv = (void *)tpriv->fan;
+ return priv->percent;
+}
+
+static int
+nouveau_fantog_set(struct nouveau_therm *therm, int percent)
+{
+ struct nouveau_therm_priv *tpriv = (void *)therm;
+ struct nouveau_fantog_priv *priv = (void *)tpriv->fan;
+ if (therm->pwm_ctrl)
+ therm->pwm_ctrl(therm, priv->func.line, false);
+ nouveau_fantog_update(priv, percent);
+ return 0;
+}
+
+int
+nouveau_fantog_create(struct nouveau_therm *therm, struct dcb_gpio_func *func)
+{
+ struct nouveau_therm_priv *tpriv = (void *)therm;
+ struct nouveau_fantog_priv *priv;
+
+ priv = kzalloc(sizeof(*priv), GFP_KERNEL);
+ tpriv->fan = &priv->base;
+ if (!priv)
+ return -ENOMEM;
+
+ priv->base.type = "toggle";
+ priv->base.get = nouveau_fantog_get;
+ priv->base.set = nouveau_fantog_set;
+ nouveau_alarm_init(&priv->alarm, nouveau_fantog_alarm);
+ priv->period_us = 100000; /* 10Hz */
+ priv->percent = 100;
+ priv->func = *func;
+ spin_lock_init(&priv->lock);
+ return 0;
+}
diff --git a/drivers/gpu/drm/nouveau/core/subdev/therm/ic.c b/drivers/gpu/drm/nouveau/core/subdev/therm/ic.c
index e512ff0aae60..e24090bac195 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/therm/ic.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/therm/ic.c
@@ -31,7 +31,7 @@ static bool
probe_monitoring_device(struct nouveau_i2c_port *i2c,
struct i2c_board_info *info)
{
- struct nouveau_therm_priv *priv = (void *)nouveau_therm(i2c->i2c);
+ struct nouveau_therm_priv *priv = (void *)nouveau_therm(i2c);
struct i2c_client *client;
request_module("%s%s", I2C_MODULE_PREFIX, info->type);
@@ -53,6 +53,31 @@ probe_monitoring_device(struct nouveau_i2c_port *i2c,
return true;
}
+static struct i2c_board_info
+nv_board_infos[] = {
+ { I2C_BOARD_INFO("w83l785ts", 0x2d) },
+ { I2C_BOARD_INFO("w83781d", 0x2d) },
+ { I2C_BOARD_INFO("adt7473", 0x2e) },
+ { I2C_BOARD_INFO("adt7473", 0x2d) },
+ { I2C_BOARD_INFO("adt7473", 0x2c) },
+ { I2C_BOARD_INFO("f75375", 0x2e) },
+ { I2C_BOARD_INFO("lm99", 0x4c) },
+ { I2C_BOARD_INFO("lm90", 0x4c) },
+ { I2C_BOARD_INFO("lm90", 0x4d) },
+ { I2C_BOARD_INFO("adm1021", 0x18) },
+ { I2C_BOARD_INFO("adm1021", 0x19) },
+ { I2C_BOARD_INFO("adm1021", 0x1a) },
+ { I2C_BOARD_INFO("adm1021", 0x29) },
+ { I2C_BOARD_INFO("adm1021", 0x2a) },
+ { I2C_BOARD_INFO("adm1021", 0x2b) },
+ { I2C_BOARD_INFO("adm1021", 0x4c) },
+ { I2C_BOARD_INFO("adm1021", 0x4d) },
+ { I2C_BOARD_INFO("adm1021", 0x4e) },
+ { I2C_BOARD_INFO("lm63", 0x18) },
+ { I2C_BOARD_INFO("lm63", 0x4e) },
+ { }
+};
+
void
nouveau_therm_ic_ctor(struct nouveau_therm *therm)
{
@@ -60,29 +85,6 @@ nouveau_therm_ic_ctor(struct nouveau_therm *therm)
struct nouveau_bios *bios = nouveau_bios(therm);
struct nouveau_i2c *i2c = nouveau_i2c(therm);
struct nvbios_extdev_func extdev_entry;
- struct i2c_board_info info[] = {
- { I2C_BOARD_INFO("w83l785ts", 0x2d) },
- { I2C_BOARD_INFO("w83781d", 0x2d) },
- { I2C_BOARD_INFO("adt7473", 0x2e) },
- { I2C_BOARD_INFO("adt7473", 0x2d) },
- { I2C_BOARD_INFO("adt7473", 0x2c) },
- { I2C_BOARD_INFO("f75375", 0x2e) },
- { I2C_BOARD_INFO("lm99", 0x4c) },
- { I2C_BOARD_INFO("lm90", 0x4c) },
- { I2C_BOARD_INFO("lm90", 0x4d) },
- { I2C_BOARD_INFO("adm1021", 0x18) },
- { I2C_BOARD_INFO("adm1021", 0x19) },
- { I2C_BOARD_INFO("adm1021", 0x1a) },
- { I2C_BOARD_INFO("adm1021", 0x29) },
- { I2C_BOARD_INFO("adm1021", 0x2a) },
- { I2C_BOARD_INFO("adm1021", 0x2b) },
- { I2C_BOARD_INFO("adm1021", 0x4c) },
- { I2C_BOARD_INFO("adm1021", 0x4d) },
- { I2C_BOARD_INFO("adm1021", 0x4e) },
- { I2C_BOARD_INFO("lm63", 0x18) },
- { I2C_BOARD_INFO("lm63", 0x4e) },
- { }
- };
if (!nvbios_extdev_find(bios, NVBIOS_EXTDEV_LM89, &extdev_entry)) {
struct i2c_board_info board[] = {
@@ -111,6 +113,6 @@ nouveau_therm_ic_ctor(struct nouveau_therm *therm)
/* The vbios doesn't provide the address of an exisiting monitoring
device. Let's try our static list.
*/
- i2c->identify(i2c, NV_I2C_DEFAULT(0), "monitoring device", info,
- probe_monitoring_device);
+ i2c->identify(i2c, NV_I2C_DEFAULT(0), "monitoring device",
+ nv_board_infos, probe_monitoring_device);
}
diff --git a/drivers/gpu/drm/nouveau/core/subdev/therm/nv40.c b/drivers/gpu/drm/nouveau/core/subdev/therm/nv40.c
index fcf2cfe731d6..0f5363edb964 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/therm/nv40.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/therm/nv40.c
@@ -25,6 +25,10 @@
#include "priv.h"
+struct nv40_therm_priv {
+ struct nouveau_therm_priv base;
+};
+
static int
nv40_sensor_setup(struct nouveau_therm *therm)
{
@@ -34,6 +38,7 @@ nv40_sensor_setup(struct nouveau_therm *therm)
if (device->chipset >= 0x46) {
nv_mask(therm, 0x15b8, 0x80000000, 0);
nv_wr32(therm, 0x15b0, 0x80003fff);
+ mdelay(10); /* wait for the temperature to stabilize */
return nv_rd32(therm, 0x15b4) & 0x3fff;
} else {
nv_wr32(therm, 0x15b0, 0xff);
@@ -75,7 +80,20 @@ nv40_temp_get(struct nouveau_therm *therm)
return core_temp;
}
-int
+static int
+nv40_fan_pwm_ctrl(struct nouveau_therm *therm, int line, bool enable)
+{
+ u32 mask = enable ? 0x80000000 : 0x0000000;
+ if (line == 2) nv_mask(therm, 0x0010f0, 0x80000000, mask);
+ else if (line == 9) nv_mask(therm, 0x0015f4, 0x80000000, mask);
+ else {
+ nv_error(therm, "unknown pwm ctrl for gpio %d\n", line);
+ return -ENODEV;
+ }
+ return 0;
+}
+
+static int
nv40_fan_pwm_get(struct nouveau_therm *therm, int line, u32 *divs, u32 *duty)
{
if (line == 2) {
@@ -101,15 +119,15 @@ nv40_fan_pwm_get(struct nouveau_therm *therm, int line, u32 *divs, u32 *duty)
return -EINVAL;
}
-int
+static int
nv40_fan_pwm_set(struct nouveau_therm *therm, int line, u32 divs, u32 duty)
{
if (line == 2) {
- nv_wr32(therm, 0x0010f0, 0x80000000 | (duty << 16) | divs);
+ nv_mask(therm, 0x0010f0, 0x7fff7fff, (duty << 16) | divs);
} else
if (line == 9) {
nv_wr32(therm, 0x0015f8, divs);
- nv_wr32(therm, 0x0015f4, duty | 0x80000000);
+ nv_mask(therm, 0x0015f4, 0x7fffffff, duty);
} else {
nv_error(therm, "unknown pwm ctrl for gpio %d\n", line);
return -ENODEV;
@@ -118,37 +136,51 @@ nv40_fan_pwm_set(struct nouveau_therm *therm, int line, u32 divs, u32 duty)
return 0;
}
+static void
+nv40_therm_intr(struct nouveau_subdev *subdev)
+{
+ struct nouveau_therm *therm = nouveau_therm(subdev);
+ uint32_t stat = nv_rd32(therm, 0x1100);
+
+ /* traitement */
+
+ /* ack all IRQs */
+ nv_wr32(therm, 0x1100, 0x70000);
+
+ nv_error(therm, "THERM received an IRQ: stat = %x\n", stat);
+}
+
static int
nv40_therm_ctor(struct nouveau_object *parent,
- struct nouveau_object *engine,
- struct nouveau_oclass *oclass, void *data, u32 size,
- struct nouveau_object **pobject)
+ struct nouveau_object *engine,
+ struct nouveau_oclass *oclass, void *data, u32 size,
+ struct nouveau_object **pobject)
{
- struct nouveau_therm_priv *priv;
- struct nouveau_therm *therm;
+ struct nv40_therm_priv *priv;
int ret;
ret = nouveau_therm_create(parent, engine, oclass, &priv);
*pobject = nv_object(priv);
- therm = (void *) priv;
if (ret)
return ret;
- nouveau_therm_ic_ctor(therm);
- nouveau_therm_sensor_ctor(therm);
- nouveau_therm_fan_ctor(therm);
+ priv->base.base.pwm_ctrl = nv40_fan_pwm_ctrl;
+ priv->base.base.pwm_get = nv40_fan_pwm_get;
+ priv->base.base.pwm_set = nv40_fan_pwm_set;
+ priv->base.base.temp_get = nv40_temp_get;
+ priv->base.sensor.program_alarms = nouveau_therm_program_alarms_polling;
+ nv_subdev(priv)->intr = nv40_therm_intr;
+ return nouveau_therm_preinit(&priv->base.base);
+}
- priv->fan.pwm_get = nv40_fan_pwm_get;
- priv->fan.pwm_set = nv40_fan_pwm_set;
+static int
+nv40_therm_init(struct nouveau_object *object)
+{
+ struct nouveau_therm *therm = (void *)object;
- therm->temp_get = nv40_temp_get;
- therm->fan_get = nouveau_therm_fan_user_get;
- therm->fan_set = nouveau_therm_fan_user_set;
- therm->fan_sense = nouveau_therm_fan_sense;
- therm->attr_get = nouveau_therm_attr_get;
- therm->attr_set = nouveau_therm_attr_set;
+ nv40_sensor_setup(therm);
- return 0;
+ return _nouveau_therm_init(object);
}
struct nouveau_oclass
@@ -157,7 +189,7 @@ nv40_therm_oclass = {
.ofuncs = &(struct nouveau_ofuncs) {
.ctor = nv40_therm_ctor,
.dtor = _nouveau_therm_dtor,
- .init = nouveau_therm_init,
- .fini = nouveau_therm_fini,
+ .init = nv40_therm_init,
+ .fini = _nouveau_therm_fini,
},
-}; \ No newline at end of file
+};
diff --git a/drivers/gpu/drm/nouveau/core/subdev/therm/nv50.c b/drivers/gpu/drm/nouveau/core/subdev/therm/nv50.c
index 9360ddd469e7..86632cbd65ce 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/therm/nv50.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/therm/nv50.c
@@ -25,6 +25,10 @@
#include "priv.h"
+struct nv50_therm_priv {
+ struct nouveau_therm_priv base;
+};
+
static int
pwm_info(struct nouveau_therm *therm, int *line, int *ctrl, int *indx)
{
@@ -51,6 +55,16 @@ pwm_info(struct nouveau_therm *therm, int *line, int *ctrl, int *indx)
}
int
+nv50_fan_pwm_ctrl(struct nouveau_therm *therm, int line, bool enable)
+{
+ u32 data = enable ? 0x00000001 : 0x00000000;
+ int ctrl, id, ret = pwm_info(therm, &line, &ctrl, &id);
+ if (ret == 0)
+ nv_mask(therm, ctrl, 0x00010001 << line, data << line);
+ return ret;
+}
+
+int
nv50_fan_pwm_get(struct nouveau_therm *therm, int line, u32 *divs, u32 *duty)
{
int ctrl, id, ret = pwm_info(therm, &line, &ctrl, &id);
@@ -73,7 +87,6 @@ nv50_fan_pwm_set(struct nouveau_therm *therm, int line, u32 divs, u32 duty)
if (ret)
return ret;
- nv_mask(therm, ctrl, 0x00010001 << line, 0x00000001 << line);
nv_wr32(therm, 0x00e114 + (id * 8), divs);
nv_wr32(therm, 0x00e118 + (id * 8), duty | 0x80000000);
return 0;
@@ -111,38 +124,178 @@ nv50_temp_get(struct nouveau_therm *therm)
return nv_rd32(therm, 0x20400);
}
+static void
+nv50_therm_program_alarms(struct nouveau_therm *therm)
+{
+ struct nouveau_therm_priv *priv = (void *)therm;
+ struct nvbios_therm_sensor *sensor = &priv->bios_sensor;
+ unsigned long flags;
+
+ spin_lock_irqsave(&priv->sensor.alarm_program_lock, flags);
+
+ /* enable RISING and FALLING IRQs for shutdown, THRS 0, 1, 2 and 4 */
+ nv_wr32(therm, 0x20000, 0x000003ff);
+
+ /* shutdown: The computer should be shutdown when reached */
+ nv_wr32(therm, 0x20484, sensor->thrs_shutdown.hysteresis);
+ nv_wr32(therm, 0x20480, sensor->thrs_shutdown.temp);
+
+ /* THRS_1 : fan boost*/
+ nv_wr32(therm, 0x204c4, sensor->thrs_fan_boost.temp);
+
+ /* THRS_2 : critical */
+ nv_wr32(therm, 0x204c0, sensor->thrs_critical.temp);
+
+ /* THRS_4 : down clock */
+ nv_wr32(therm, 0x20414, sensor->thrs_down_clock.temp);
+ spin_unlock_irqrestore(&priv->sensor.alarm_program_lock, flags);
+
+ nv_info(therm,
+ "Programmed thresholds [ %d(%d), %d(%d), %d(%d), %d(%d) ]\n",
+ sensor->thrs_fan_boost.temp, sensor->thrs_fan_boost.hysteresis,
+ sensor->thrs_down_clock.temp,
+ sensor->thrs_down_clock.hysteresis,
+ sensor->thrs_critical.temp, sensor->thrs_critical.hysteresis,
+ sensor->thrs_shutdown.temp, sensor->thrs_shutdown.hysteresis);
+
+}
+
+/* must be called with alarm_program_lock taken ! */
+static void
+nv50_therm_threshold_hyst_emulation(struct nouveau_therm *therm,
+ uint32_t thrs_reg, u8 status_bit,
+ const struct nvbios_therm_threshold *thrs,
+ enum nouveau_therm_thrs thrs_name)
+{
+ enum nouveau_therm_thrs_direction direction;
+ enum nouveau_therm_thrs_state prev_state, new_state;
+ int temp, cur;
+
+ prev_state = nouveau_therm_sensor_get_threshold_state(therm, thrs_name);
+ temp = nv_rd32(therm, thrs_reg);
+
+ /* program the next threshold */
+ if (temp == thrs->temp) {
+ nv_wr32(therm, thrs_reg, thrs->temp - thrs->hysteresis);
+ new_state = NOUVEAU_THERM_THRS_HIGHER;
+ } else {
+ nv_wr32(therm, thrs_reg, thrs->temp);
+ new_state = NOUVEAU_THERM_THRS_LOWER;
+ }
+
+ /* fix the state (in case someone reprogrammed the alarms) */
+ cur = therm->temp_get(therm);
+ if (new_state == NOUVEAU_THERM_THRS_LOWER && cur > thrs->temp)
+ new_state = NOUVEAU_THERM_THRS_HIGHER;
+ else if (new_state == NOUVEAU_THERM_THRS_HIGHER &&
+ cur < thrs->temp - thrs->hysteresis)
+ new_state = NOUVEAU_THERM_THRS_LOWER;
+ nouveau_therm_sensor_set_threshold_state(therm, thrs_name, new_state);
+
+ /* find the direction */
+ if (prev_state < new_state)
+ direction = NOUVEAU_THERM_THRS_RISING;
+ else if (prev_state > new_state)
+ direction = NOUVEAU_THERM_THRS_FALLING;
+ else
+ return;
+
+ /* advertise a change in direction */
+ nouveau_therm_sensor_event(therm, thrs_name, direction);
+}
+
+static void
+nv50_therm_intr(struct nouveau_subdev *subdev)
+{
+ struct nouveau_therm *therm = nouveau_therm(subdev);
+ struct nouveau_therm_priv *priv = (void *)therm;
+ struct nvbios_therm_sensor *sensor = &priv->bios_sensor;
+ unsigned long flags;
+ uint32_t intr;
+
+ spin_lock_irqsave(&priv->sensor.alarm_program_lock, flags);
+
+ intr = nv_rd32(therm, 0x20100);
+
+ /* THRS_4: downclock */
+ if (intr & 0x002) {
+ nv50_therm_threshold_hyst_emulation(therm, 0x20414, 24,
+ &sensor->thrs_down_clock,
+ NOUVEAU_THERM_THRS_DOWNCLOCK);
+ intr &= ~0x002;
+ }
+
+ /* shutdown */
+ if (intr & 0x004) {
+ nv50_therm_threshold_hyst_emulation(therm, 0x20480, 20,
+ &sensor->thrs_shutdown,
+ NOUVEAU_THERM_THRS_SHUTDOWN);
+ intr &= ~0x004;
+ }
+
+ /* THRS_1 : fan boost */
+ if (intr & 0x008) {
+ nv50_therm_threshold_hyst_emulation(therm, 0x204c4, 21,
+ &sensor->thrs_fan_boost,
+ NOUVEAU_THERM_THRS_FANBOOST);
+ intr &= ~0x008;
+ }
+
+ /* THRS_2 : critical */
+ if (intr & 0x010) {
+ nv50_therm_threshold_hyst_emulation(therm, 0x204c0, 22,
+ &sensor->thrs_critical,
+ NOUVEAU_THERM_THRS_CRITICAL);
+ intr &= ~0x010;
+ }
+
+ if (intr)
+ nv_error(therm, "unhandled intr 0x%08x\n", intr);
+
+ /* ACK everything */
+ nv_wr32(therm, 0x20100, 0xffffffff);
+ nv_wr32(therm, 0x1100, 0x10000); /* PBUS */
+
+ spin_unlock_irqrestore(&priv->sensor.alarm_program_lock, flags);
+}
+
static int
nv50_therm_ctor(struct nouveau_object *parent,
- struct nouveau_object *engine,
- struct nouveau_oclass *oclass, void *data, u32 size,
- struct nouveau_object **pobject)
+ struct nouveau_object *engine,
+ struct nouveau_oclass *oclass, void *data, u32 size,
+ struct nouveau_object **pobject)
{
- struct nouveau_therm_priv *priv;
- struct nouveau_therm *therm;
+ struct nv50_therm_priv *priv;
int ret;
ret = nouveau_therm_create(parent, engine, oclass, &priv);
*pobject = nv_object(priv);
- therm = (void *) priv;
if (ret)
return ret;
- nouveau_therm_ic_ctor(therm);
- nouveau_therm_sensor_ctor(therm);
- nouveau_therm_fan_ctor(therm);
+ priv->base.base.pwm_ctrl = nv50_fan_pwm_ctrl;
+ priv->base.base.pwm_get = nv50_fan_pwm_get;
+ priv->base.base.pwm_set = nv50_fan_pwm_set;
+ priv->base.base.pwm_clock = nv50_fan_pwm_clock;
+ priv->base.base.temp_get = nv50_temp_get;
+ priv->base.sensor.program_alarms = nv50_therm_program_alarms;
+ nv_subdev(priv)->intr = nv50_therm_intr;
- priv->fan.pwm_get = nv50_fan_pwm_get;
- priv->fan.pwm_set = nv50_fan_pwm_set;
- priv->fan.pwm_clock = nv50_fan_pwm_clock;
+ /* init the thresholds */
+ nouveau_therm_sensor_set_threshold_state(&priv->base.base,
+ NOUVEAU_THERM_THRS_SHUTDOWN,
+ NOUVEAU_THERM_THRS_LOWER);
+ nouveau_therm_sensor_set_threshold_state(&priv->base.base,
+ NOUVEAU_THERM_THRS_FANBOOST,
+ NOUVEAU_THERM_THRS_LOWER);
+ nouveau_therm_sensor_set_threshold_state(&priv->base.base,
+ NOUVEAU_THERM_THRS_CRITICAL,
+ NOUVEAU_THERM_THRS_LOWER);
+ nouveau_therm_sensor_set_threshold_state(&priv->base.base,
+ NOUVEAU_THERM_THRS_DOWNCLOCK,
+ NOUVEAU_THERM_THRS_LOWER);
- therm->temp_get = nv50_temp_get;
- therm->fan_get = nouveau_therm_fan_user_get;
- therm->fan_set = nouveau_therm_fan_user_set;
- therm->fan_sense = nouveau_therm_fan_sense;
- therm->attr_get = nouveau_therm_attr_get;
- therm->attr_set = nouveau_therm_attr_set;
-
- return 0;
+ return nouveau_therm_preinit(&priv->base.base);
}
struct nouveau_oclass
@@ -151,7 +304,7 @@ nv50_therm_oclass = {
.ofuncs = &(struct nouveau_ofuncs) {
.ctor = nv50_therm_ctor,
.dtor = _nouveau_therm_dtor,
- .init = nouveau_therm_init,
- .fini = nouveau_therm_fini,
+ .init = _nouveau_therm_init,
+ .fini = _nouveau_therm_fini,
},
};
diff --git a/drivers/gpu/drm/nouveau/core/subdev/therm/nva3.c b/drivers/gpu/drm/nouveau/core/subdev/therm/nva3.c
new file mode 100644
index 000000000000..2dcc5437116a
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/subdev/therm/nva3.c
@@ -0,0 +1,99 @@
+/*
+ * Copyright 2012 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+
+#include <subdev/gpio.h>
+
+#include "priv.h"
+
+struct nva3_therm_priv {
+ struct nouveau_therm_priv base;
+};
+
+int
+nva3_therm_fan_sense(struct nouveau_therm *therm)
+{
+ u32 tach = nv_rd32(therm, 0x00e728) & 0x0000ffff;
+ u32 ctrl = nv_rd32(therm, 0x00e720);
+ if (ctrl & 0x00000001)
+ return tach * 60;
+ return -ENODEV;
+}
+
+static int
+nva3_therm_init(struct nouveau_object *object)
+{
+ struct nva3_therm_priv *priv = (void *)object;
+ struct dcb_gpio_func *tach = &priv->base.fan->tach;
+ int ret;
+
+ ret = nouveau_therm_init(&priv->base.base);
+ if (ret)
+ return ret;
+
+ /* enable fan tach, count revolutions per-second */
+ nv_mask(priv, 0x00e720, 0x00000003, 0x00000002);
+ if (tach->func != DCB_GPIO_UNUSED) {
+ nv_wr32(priv, 0x00e724, nv_device(priv)->crystal * 1000);
+ nv_mask(priv, 0x00e720, 0x001f0000, tach->line << 16);
+ nv_mask(priv, 0x00e720, 0x00000001, 0x00000001);
+ }
+ nv_mask(priv, 0x00e720, 0x00000002, 0x00000000);
+
+ return 0;
+}
+
+static int
+nva3_therm_ctor(struct nouveau_object *parent,
+ struct nouveau_object *engine,
+ struct nouveau_oclass *oclass, void *data, u32 size,
+ struct nouveau_object **pobject)
+{
+ struct nva3_therm_priv *priv;
+ int ret;
+
+ ret = nouveau_therm_create(parent, engine, oclass, &priv);
+ *pobject = nv_object(priv);
+ if (ret)
+ return ret;
+
+ priv->base.base.pwm_ctrl = nv50_fan_pwm_ctrl;
+ priv->base.base.pwm_get = nv50_fan_pwm_get;
+ priv->base.base.pwm_set = nv50_fan_pwm_set;
+ priv->base.base.pwm_clock = nv50_fan_pwm_clock;
+ priv->base.base.temp_get = nv50_temp_get;
+ priv->base.base.fan_sense = nva3_therm_fan_sense;
+ priv->base.sensor.program_alarms = nouveau_therm_program_alarms_polling;
+ return nouveau_therm_preinit(&priv->base.base);
+}
+
+struct nouveau_oclass
+nva3_therm_oclass = {
+ .handle = NV_SUBDEV(THERM, 0xa3),
+ .ofuncs = &(struct nouveau_ofuncs) {
+ .ctor = nva3_therm_ctor,
+ .dtor = _nouveau_therm_dtor,
+ .init = nva3_therm_init,
+ .fini = _nouveau_therm_fini,
+ },
+};
diff --git a/drivers/gpu/drm/nouveau/core/subdev/therm/nvd0.c b/drivers/gpu/drm/nouveau/core/subdev/therm/nvd0.c
new file mode 100644
index 000000000000..d7d30ee8332e
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/subdev/therm/nvd0.c
@@ -0,0 +1,153 @@
+/*
+ * Copyright 2012 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+
+#include "priv.h"
+
+struct nvd0_therm_priv {
+ struct nouveau_therm_priv base;
+};
+
+static int
+pwm_info(struct nouveau_therm *therm, int line)
+{
+ u32 gpio = nv_rd32(therm, 0x00d610 + (line * 0x04));
+ switch (gpio & 0x000000c0) {
+ case 0x00000000: /* normal mode, possibly pwm forced off by us */
+ case 0x00000040: /* nvio special */
+ switch (gpio & 0x0000001f) {
+ case 0x19: return 1;
+ case 0x1c: return 0;
+ default:
+ break;
+ }
+ default:
+ break;
+ }
+
+ nv_error(therm, "GPIO %d unknown PWM: 0x%08x\n", line, gpio);
+ return -ENODEV;
+}
+
+static int
+nvd0_fan_pwm_ctrl(struct nouveau_therm *therm, int line, bool enable)
+{
+ u32 data = enable ? 0x00000040 : 0x00000000;
+ int indx = pwm_info(therm, line);
+ if (indx < 0)
+ return indx;
+
+ nv_mask(therm, 0x00d610 + (line * 0x04), 0x000000c0, data);
+ return 0;
+}
+
+static int
+nvd0_fan_pwm_get(struct nouveau_therm *therm, int line, u32 *divs, u32 *duty)
+{
+ int indx = pwm_info(therm, line);
+ if (indx < 0)
+ return indx;
+
+ if (nv_rd32(therm, 0x00d610 + (line * 0x04)) & 0x00000040) {
+ *divs = nv_rd32(therm, 0x00e114 + (indx * 8));
+ *duty = nv_rd32(therm, 0x00e118 + (indx * 8));
+ return 0;
+ }
+
+ return -EINVAL;
+}
+
+static int
+nvd0_fan_pwm_set(struct nouveau_therm *therm, int line, u32 divs, u32 duty)
+{
+ int indx = pwm_info(therm, line);
+ if (indx < 0)
+ return indx;
+
+ nv_wr32(therm, 0x00e114 + (indx * 8), divs);
+ nv_wr32(therm, 0x00e118 + (indx * 8), duty | 0x80000000);
+ return 0;
+}
+
+static int
+nvd0_fan_pwm_clock(struct nouveau_therm *therm)
+{
+ return (nv_device(therm)->crystal * 1000) / 20;
+}
+
+static int
+nvd0_therm_init(struct nouveau_object *object)
+{
+ struct nvd0_therm_priv *priv = (void *)object;
+ int ret;
+
+ ret = nouveau_therm_init(&priv->base.base);
+ if (ret)
+ return ret;
+
+ /* enable fan tach, count revolutions per-second */
+ nv_mask(priv, 0x00e720, 0x00000003, 0x00000002);
+ if (priv->base.fan->tach.func != DCB_GPIO_UNUSED) {
+ nv_mask(priv, 0x00d79c, 0x000000ff, priv->base.fan->tach.line);
+ nv_wr32(priv, 0x00e724, nv_device(priv)->crystal * 1000);
+ nv_mask(priv, 0x00e720, 0x00000001, 0x00000001);
+ }
+ nv_mask(priv, 0x00e720, 0x00000002, 0x00000000);
+
+ return 0;
+}
+
+static int
+nvd0_therm_ctor(struct nouveau_object *parent,
+ struct nouveau_object *engine,
+ struct nouveau_oclass *oclass, void *data, u32 size,
+ struct nouveau_object **pobject)
+{
+ struct nvd0_therm_priv *priv;
+ int ret;
+
+ ret = nouveau_therm_create(parent, engine, oclass, &priv);
+ *pobject = nv_object(priv);
+ if (ret)
+ return ret;
+
+ priv->base.base.pwm_ctrl = nvd0_fan_pwm_ctrl;
+ priv->base.base.pwm_get = nvd0_fan_pwm_get;
+ priv->base.base.pwm_set = nvd0_fan_pwm_set;
+ priv->base.base.pwm_clock = nvd0_fan_pwm_clock;
+ priv->base.base.temp_get = nv50_temp_get;
+ priv->base.base.fan_sense = nva3_therm_fan_sense;
+ priv->base.sensor.program_alarms = nouveau_therm_program_alarms_polling;
+ return nouveau_therm_preinit(&priv->base.base);
+}
+
+struct nouveau_oclass
+nvd0_therm_oclass = {
+ .handle = NV_SUBDEV(THERM, 0xd0),
+ .ofuncs = &(struct nouveau_ofuncs) {
+ .ctor = nvd0_therm_ctor,
+ .dtor = _nouveau_therm_dtor,
+ .init = nvd0_therm_init,
+ .fini = _nouveau_therm_fini,
+ },
+};
diff --git a/drivers/gpu/drm/nouveau/core/subdev/therm/priv.h b/drivers/gpu/drm/nouveau/core/subdev/therm/priv.h
index 1c3cd6abc36e..06b98706b3fc 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/therm/priv.h
+++ b/drivers/gpu/drm/nouveau/core/subdev/therm/priv.h
@@ -1,3 +1,6 @@
+#ifndef __NVTHERM_PRIV_H__
+#define __NVTHERM_PRIV_H__
+
/*
* Copyright 2012 The Nouveau community
*
@@ -25,33 +28,81 @@
#include <subdev/therm.h>
#include <subdev/bios/extdev.h>
+#include <subdev/bios/gpio.h>
#include <subdev/bios/perf.h>
#include <subdev/bios/therm.h>
+#include <subdev/timer.h>
+
+struct nouveau_fan {
+ struct nouveau_therm *parent;
+ const char *type;
+
+ struct nvbios_therm_fan bios;
+ struct nvbios_perf_fan perf;
+
+ struct nouveau_alarm alarm;
+ spinlock_t lock;
+ int percent;
+
+ int (*get)(struct nouveau_therm *therm);
+ int (*set)(struct nouveau_therm *therm, int percent);
+
+ struct dcb_gpio_func tach;
+};
+
+enum nouveau_therm_thrs_direction {
+ NOUVEAU_THERM_THRS_FALLING = 0,
+ NOUVEAU_THERM_THRS_RISING = 1
+};
+
+enum nouveau_therm_thrs_state {
+ NOUVEAU_THERM_THRS_LOWER = 0,
+ NOUVEAU_THERM_THRS_HIGHER = 1
+};
+
+enum nouveau_therm_thrs {
+ NOUVEAU_THERM_THRS_FANBOOST = 0,
+ NOUVEAU_THERM_THRS_DOWNCLOCK = 1,
+ NOUVEAU_THERM_THRS_CRITICAL = 2,
+ NOUVEAU_THERM_THRS_SHUTDOWN = 3,
+ NOUVEAU_THERM_THRS_NR
+};
struct nouveau_therm_priv {
struct nouveau_therm base;
+ /* automatic thermal management */
+ struct nouveau_alarm alarm;
+ spinlock_t lock;
+ struct nouveau_therm_trip_point *last_trip;
+ int mode;
+ int suspend;
+
/* bios */
struct nvbios_therm_sensor bios_sensor;
- struct nvbios_therm_fan bios_fan;
- struct nvbios_perf_fan bios_perf_fan;
/* fan priv */
+ struct nouveau_fan *fan;
+
+ /* alarms priv */
struct {
- enum nouveau_therm_fan_mode mode;
- int percent;
+ spinlock_t alarm_program_lock;
+ struct nouveau_alarm therm_poll_alarm;
+ enum nouveau_therm_thrs_state alarm_state[NOUVEAU_THERM_THRS_NR];
+ void (*program_alarms)(struct nouveau_therm *);
+ } sensor;
- int (*pwm_get)(struct nouveau_therm *, int line, u32*, u32*);
- int (*pwm_set)(struct nouveau_therm *, int line, u32, u32);
- int (*pwm_clock)(struct nouveau_therm *);
- } fan;
+ /* what should be done if the card overheats */
+ struct {
+ void (*downclock)(struct nouveau_therm *, bool active);
+ void (*pause)(struct nouveau_therm *, bool active);
+ } emergency;
/* ic */
struct i2c_client *ic;
};
-int nouveau_therm_init(struct nouveau_object *object);
-int nouveau_therm_fini(struct nouveau_object *object, bool suspend);
+int nouveau_therm_mode(struct nouveau_therm *therm, int mode);
int nouveau_therm_attr_get(struct nouveau_therm *therm,
enum nouveau_therm_attr_type type);
int nouveau_therm_attr_set(struct nouveau_therm *therm,
@@ -63,11 +114,35 @@ int nouveau_therm_sensor_ctor(struct nouveau_therm *therm);
int nouveau_therm_fan_ctor(struct nouveau_therm *therm);
int nouveau_therm_fan_get(struct nouveau_therm *therm);
-int nouveau_therm_fan_set(struct nouveau_therm *therm, int percent);
+int nouveau_therm_fan_set(struct nouveau_therm *therm, bool now, int percent);
int nouveau_therm_fan_user_get(struct nouveau_therm *therm);
int nouveau_therm_fan_user_set(struct nouveau_therm *therm, int percent);
-int nouveau_therm_fan_set_mode(struct nouveau_therm *therm,
- enum nouveau_therm_fan_mode mode);
-
int nouveau_therm_fan_sense(struct nouveau_therm *therm);
+
+int nouveau_therm_preinit(struct nouveau_therm *);
+
+void nouveau_therm_sensor_set_threshold_state(struct nouveau_therm *therm,
+ enum nouveau_therm_thrs thrs,
+ enum nouveau_therm_thrs_state st);
+enum nouveau_therm_thrs_state
+nouveau_therm_sensor_get_threshold_state(struct nouveau_therm *therm,
+ enum nouveau_therm_thrs thrs);
+void nouveau_therm_sensor_event(struct nouveau_therm *therm,
+ enum nouveau_therm_thrs thrs,
+ enum nouveau_therm_thrs_direction dir);
+void nouveau_therm_program_alarms_polling(struct nouveau_therm *therm);
+
+int nv50_fan_pwm_ctrl(struct nouveau_therm *, int, bool);
+int nv50_fan_pwm_get(struct nouveau_therm *, int, u32 *, u32 *);
+int nv50_fan_pwm_set(struct nouveau_therm *, int, u32, u32);
+int nv50_fan_pwm_clock(struct nouveau_therm *);
+int nv50_temp_get(struct nouveau_therm *therm);
+
+int nva3_therm_fan_sense(struct nouveau_therm *);
+
+int nouveau_fanpwm_create(struct nouveau_therm *, struct dcb_gpio_func *);
+int nouveau_fantog_create(struct nouveau_therm *, struct dcb_gpio_func *);
+int nouveau_fannil_create(struct nouveau_therm *);
+
+#endif
diff --git a/drivers/gpu/drm/nouveau/core/subdev/therm/temp.c b/drivers/gpu/drm/nouveau/core/subdev/therm/temp.c
index 204282301fb1..b37624af8297 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/therm/temp.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/therm/temp.c
@@ -58,11 +58,171 @@ static void
nouveau_therm_temp_safety_checks(struct nouveau_therm *therm)
{
struct nouveau_therm_priv *priv = (void *)therm;
+ struct nvbios_therm_sensor *s = &priv->bios_sensor;
if (!priv->bios_sensor.slope_div)
priv->bios_sensor.slope_div = 1;
if (!priv->bios_sensor.offset_den)
priv->bios_sensor.offset_den = 1;
+
+ /* enforce a minimum hysteresis on thresholds */
+ s->thrs_fan_boost.hysteresis = max_t(u8, s->thrs_fan_boost.hysteresis, 2);
+ s->thrs_down_clock.hysteresis = max_t(u8, s->thrs_down_clock.hysteresis, 2);
+ s->thrs_critical.hysteresis = max_t(u8, s->thrs_critical.hysteresis, 2);
+ s->thrs_shutdown.hysteresis = max_t(u8, s->thrs_shutdown.hysteresis, 2);
+}
+
+/* must be called with alarm_program_lock taken ! */
+void nouveau_therm_sensor_set_threshold_state(struct nouveau_therm *therm,
+ enum nouveau_therm_thrs thrs,
+ enum nouveau_therm_thrs_state st)
+{
+ struct nouveau_therm_priv *priv = (void *)therm;
+ priv->sensor.alarm_state[thrs] = st;
+}
+
+/* must be called with alarm_program_lock taken ! */
+enum nouveau_therm_thrs_state
+nouveau_therm_sensor_get_threshold_state(struct nouveau_therm *therm,
+ enum nouveau_therm_thrs thrs)
+{
+ struct nouveau_therm_priv *priv = (void *)therm;
+ return priv->sensor.alarm_state[thrs];
+}
+
+static void
+nv_poweroff_work(struct work_struct *work)
+{
+ orderly_poweroff(true);
+ kfree(work);
+}
+
+void nouveau_therm_sensor_event(struct nouveau_therm *therm,
+ enum nouveau_therm_thrs thrs,
+ enum nouveau_therm_thrs_direction dir)
+{
+ struct nouveau_therm_priv *priv = (void *)therm;
+ bool active;
+ const char *thresolds[] = {
+ "fanboost", "downclock", "critical", "shutdown"
+ };
+ uint8_t temperature = therm->temp_get(therm);
+
+ if (thrs < 0 || thrs > 3)
+ return;
+
+ if (dir == NOUVEAU_THERM_THRS_FALLING)
+ nv_info(therm, "temperature (%u C) went below the '%s' threshold\n",
+ temperature, thresolds[thrs]);
+ else
+ nv_info(therm, "temperature (%u C) hit the '%s' threshold\n",
+ temperature, thresolds[thrs]);
+
+ active = (dir == NOUVEAU_THERM_THRS_RISING);
+ switch (thrs) {
+ case NOUVEAU_THERM_THRS_FANBOOST:
+ if (active) {
+ nouveau_therm_fan_set(therm, true, 100);
+ nouveau_therm_mode(therm, NOUVEAU_THERM_CTRL_AUTO);
+ }
+ break;
+ case NOUVEAU_THERM_THRS_DOWNCLOCK:
+ if (priv->emergency.downclock)
+ priv->emergency.downclock(therm, active);
+ break;
+ case NOUVEAU_THERM_THRS_CRITICAL:
+ if (priv->emergency.pause)
+ priv->emergency.pause(therm, active);
+ break;
+ case NOUVEAU_THERM_THRS_SHUTDOWN:
+ if (active) {
+ struct work_struct *work;
+
+ work = kmalloc(sizeof(*work), GFP_ATOMIC);
+ if (work) {
+ INIT_WORK(work, nv_poweroff_work);
+ schedule_work(work);
+ }
+ }
+ break;
+ case NOUVEAU_THERM_THRS_NR:
+ break;
+ }
+
+}
+
+/* must be called with alarm_program_lock taken ! */
+static void
+nouveau_therm_threshold_hyst_polling(struct nouveau_therm *therm,
+ const struct nvbios_therm_threshold *thrs,
+ enum nouveau_therm_thrs thrs_name)
+{
+ enum nouveau_therm_thrs_direction direction;
+ enum nouveau_therm_thrs_state prev_state, new_state;
+ int temp = therm->temp_get(therm);
+
+ prev_state = nouveau_therm_sensor_get_threshold_state(therm, thrs_name);
+
+ if (temp >= thrs->temp && prev_state == NOUVEAU_THERM_THRS_LOWER) {
+ direction = NOUVEAU_THERM_THRS_RISING;
+ new_state = NOUVEAU_THERM_THRS_HIGHER;
+ } else if (temp <= thrs->temp - thrs->hysteresis &&
+ prev_state == NOUVEAU_THERM_THRS_HIGHER) {
+ direction = NOUVEAU_THERM_THRS_FALLING;
+ new_state = NOUVEAU_THERM_THRS_LOWER;
+ } else
+ return; /* nothing to do */
+
+ nouveau_therm_sensor_set_threshold_state(therm, thrs_name, new_state);
+ nouveau_therm_sensor_event(therm, thrs_name, direction);
+}
+
+static void
+alarm_timer_callback(struct nouveau_alarm *alarm)
+{
+ struct nouveau_therm_priv *priv =
+ container_of(alarm, struct nouveau_therm_priv, sensor.therm_poll_alarm);
+ struct nvbios_therm_sensor *sensor = &priv->bios_sensor;
+ struct nouveau_timer *ptimer = nouveau_timer(priv);
+ struct nouveau_therm *therm = &priv->base;
+ unsigned long flags;
+
+ spin_lock_irqsave(&priv->sensor.alarm_program_lock, flags);
+
+ nouveau_therm_threshold_hyst_polling(therm, &sensor->thrs_fan_boost,
+ NOUVEAU_THERM_THRS_FANBOOST);
+
+ nouveau_therm_threshold_hyst_polling(therm, &sensor->thrs_down_clock,
+ NOUVEAU_THERM_THRS_DOWNCLOCK);
+
+ nouveau_therm_threshold_hyst_polling(therm, &sensor->thrs_critical,
+ NOUVEAU_THERM_THRS_CRITICAL);
+
+ nouveau_therm_threshold_hyst_polling(therm, &sensor->thrs_shutdown,
+ NOUVEAU_THERM_THRS_SHUTDOWN);
+
+ /* schedule the next poll in one second */
+ if (list_empty(&alarm->head))
+ ptimer->alarm(ptimer, 1000 * 1000 * 1000, alarm);
+
+ spin_unlock_irqrestore(&priv->sensor.alarm_program_lock, flags);
+}
+
+void
+nouveau_therm_program_alarms_polling(struct nouveau_therm *therm)
+{
+ struct nouveau_therm_priv *priv = (void *)therm;
+ struct nvbios_therm_sensor *sensor = &priv->bios_sensor;
+
+ nv_info(therm,
+ "programmed thresholds [ %d(%d), %d(%d), %d(%d), %d(%d) ]\n",
+ sensor->thrs_fan_boost.temp, sensor->thrs_fan_boost.hysteresis,
+ sensor->thrs_down_clock.temp,
+ sensor->thrs_down_clock.hysteresis,
+ sensor->thrs_critical.temp, sensor->thrs_critical.hysteresis,
+ sensor->thrs_shutdown.temp, sensor->thrs_shutdown.hysteresis);
+
+ alarm_timer_callback(&priv->sensor.therm_poll_alarm);
}
int
@@ -71,6 +231,8 @@ nouveau_therm_sensor_ctor(struct nouveau_therm *therm)
struct nouveau_therm_priv *priv = (void *)therm;
struct nouveau_bios *bios = nouveau_bios(therm);
+ nouveau_alarm_init(&priv->sensor.therm_poll_alarm, alarm_timer_callback);
+
nouveau_therm_temp_set_defaults(therm);
if (nvbios_therm_sensor_parse(bios, NVBIOS_THERM_DOMAIN_CORE,
&priv->bios_sensor))
diff --git a/drivers/gpu/drm/nouveau/core/subdev/timer/nv04.c b/drivers/gpu/drm/nouveau/core/subdev/timer/nv04.c
index c26ca9bef671..8e1bae4f12e8 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/timer/nv04.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/timer/nv04.c
@@ -79,7 +79,7 @@ nv04_timer_alarm_trigger(struct nouveau_timer *ptimer)
/* execute any pending alarm handlers */
list_for_each_entry_safe(alarm, atemp, &exec, head) {
- list_del(&alarm->head);
+ list_del_init(&alarm->head);
alarm->func(alarm);
}
}
diff --git a/drivers/gpu/drm/nouveau/nouveau_acpi.h b/drivers/gpu/drm/nouveau/nouveau_acpi.h
index d0da230d7706..74acf0f87785 100644
--- a/drivers/gpu/drm/nouveau/nouveau_acpi.h
+++ b/drivers/gpu/drm/nouveau/nouveau_acpi.h
@@ -3,7 +3,7 @@
#define ROM_BIOS_PAGE 4096
-#if defined(CONFIG_ACPI)
+#if defined(CONFIG_ACPI) && defined(CONFIG_X86)
bool nouveau_is_optimus(void);
bool nouveau_is_v1_dsm(void);
void nouveau_register_dsm_handler(void);
diff --git a/drivers/gpu/drm/nouveau/nouveau_backlight.c b/drivers/gpu/drm/nouveau/nouveau_backlight.c
index f65b20a375f6..5d940302d2aa 100644
--- a/drivers/gpu/drm/nouveau/nouveau_backlight.c
+++ b/drivers/gpu/drm/nouveau/nouveau_backlight.c
@@ -84,6 +84,8 @@ nv40_backlight_init(struct drm_connector *connector)
props.max_brightness = 31;
bd = backlight_device_register("nv_backlight", &connector->kdev, drm,
&nv40_bl_ops, &props);
+ if (IS_ERR(bd))
+ return PTR_ERR(bd);
drm->backlight = bd;
bd->props.brightness = nv40_get_intensity(bd);
backlight_update_status(bd);
diff --git a/drivers/gpu/drm/nouveau/nouveau_bios.c b/drivers/gpu/drm/nouveau/nouveau_bios.c
index 865eddfa30a7..50a6dd02f7c5 100644
--- a/drivers/gpu/drm/nouveau/nouveau_bios.c
+++ b/drivers/gpu/drm/nouveau/nouveau_bios.c
@@ -678,23 +678,6 @@ int run_tmds_table(struct drm_device *dev, struct dcb_output *dcbent, int head,
return 0;
}
-static void parse_bios_version(struct drm_device *dev, struct nvbios *bios, uint16_t offset)
-{
- /*
- * offset + 0 (8 bits): Micro version
- * offset + 1 (8 bits): Minor version
- * offset + 2 (8 bits): Chip version
- * offset + 3 (8 bits): Major version
- */
- struct nouveau_drm *drm = nouveau_drm(dev);
-
- bios->major_version = bios->data[offset + 3];
- bios->chip_version = bios->data[offset + 2];
- NV_INFO(drm, "Bios version %02x.%02x.%02x.%02x\n",
- bios->data[offset + 3], bios->data[offset + 2],
- bios->data[offset + 1], bios->data[offset]);
-}
-
static void parse_script_table_pointers(struct nvbios *bios, uint16_t offset)
{
/*
@@ -710,12 +693,6 @@ static void parse_script_table_pointers(struct nvbios *bios, uint16_t offset)
*/
bios->init_script_tbls_ptr = ROM16(bios->data[offset]);
- bios->macro_index_tbl_ptr = ROM16(bios->data[offset + 2]);
- bios->macro_tbl_ptr = ROM16(bios->data[offset + 4]);
- bios->condition_tbl_ptr = ROM16(bios->data[offset + 6]);
- bios->io_condition_tbl_ptr = ROM16(bios->data[offset + 8]);
- bios->io_flag_condition_tbl_ptr = ROM16(bios->data[offset + 10]);
- bios->init_function_tbl_ptr = ROM16(bios->data[offset + 12]);
}
static int parse_bit_A_tbl_entry(struct drm_device *dev, struct nvbios *bios, struct bit_entry *bitentry)
@@ -765,25 +742,6 @@ static int parse_bit_A_tbl_entry(struct drm_device *dev, struct nvbios *bios, st
return 0;
}
-static int parse_bit_C_tbl_entry(struct drm_device *dev, struct nvbios *bios, struct bit_entry *bitentry)
-{
- /*
- * offset + 8 (16 bits): PLL limits table pointer
- *
- * There's more in here, but that's unknown.
- */
- struct nouveau_drm *drm = nouveau_drm(dev);
-
- if (bitentry->length < 10) {
- NV_ERROR(drm, "Do not understand BIT C table\n");
- return -EINVAL;
- }
-
- bios->pll_limit_tbl_ptr = ROM16(bios->data[bitentry->offset + 8]);
-
- return 0;
-}
-
static int parse_bit_display_tbl_entry(struct drm_device *dev, struct nvbios *bios, struct bit_entry *bitentry)
{
/*
@@ -821,12 +779,6 @@ static int parse_bit_init_tbl_entry(struct drm_device *dev, struct nvbios *bios,
}
parse_script_table_pointers(bios, bitentry->offset);
-
- if (bitentry->length >= 16)
- bios->some_script_ptr = ROM16(bios->data[bitentry->offset + 14]);
- if (bitentry->length >= 18)
- bios->init96_tbl_ptr = ROM16(bios->data[bitentry->offset + 16]);
-
return 0;
}
@@ -852,8 +804,6 @@ static int parse_bit_i_tbl_entry(struct drm_device *dev, struct nvbios *bios, st
return -EINVAL;
}
- parse_bios_version(dev, bios, bitentry->offset);
-
/*
* bit 4 seems to indicate a mobile bios (doesn't suffer from BMP's
* Quadro identity crisis), other bits possibly as for BMP feature byte
@@ -1078,9 +1028,6 @@ parse_bit_structure(struct nvbios *bios, const uint16_t bitoffset)
return ret;
if (bios->major_version >= 0x60) /* g80+ */
parse_bit_table(bios, bitoffset, &BIT_TABLE('A', A));
- ret = parse_bit_table(bios, bitoffset, &BIT_TABLE('C', C));
- if (ret)
- return ret;
parse_bit_table(bios, bitoffset, &BIT_TABLE('D', display));
ret = parse_bit_table(bios, bitoffset, &BIT_TABLE('I', init));
if (ret)
@@ -1228,8 +1175,6 @@ static int parse_bmp_structure(struct drm_device *dev, struct nvbios *bios, unsi
*/
bios->feature_byte = bmp[9];
- parse_bios_version(dev, bios, offset + 10);
-
if (bmp_version_major < 5 || bmp_version_minor < 0x10)
bios->old_style_init = true;
legacy_scripts_offset = 18;
@@ -1276,8 +1221,10 @@ static int parse_bmp_structure(struct drm_device *dev, struct nvbios *bios, unsi
bios->fp.lvdsmanufacturerpointer = ROM16(bmp[117]);
bios->fp.fpxlatemanufacturertableptr = ROM16(bmp[119]);
}
+#if 0
if (bmplength > 143)
bios->pll_limit_tbl_ptr = ROM16(bmp[142]);
+#endif
if (bmplength > 157)
bios->fp.duallink_transition_clk = ROM16(bmp[156]) * 10;
@@ -1522,6 +1469,7 @@ parse_dcb20_entry(struct drm_device *dev, struct dcb_table *dcb,
}
case DCB_OUTPUT_DP:
entry->dpconf.sor.link = (conf & 0x00000030) >> 4;
+ entry->extdev = (conf & 0x0000ff00) >> 8;
switch ((conf & 0x00e00000) >> 21) {
case 0:
entry->dpconf.link_bw = 162000;
@@ -1543,8 +1491,10 @@ parse_dcb20_entry(struct drm_device *dev, struct dcb_table *dcb,
}
break;
case DCB_OUTPUT_TMDS:
- if (dcb->version >= 0x40)
+ if (dcb->version >= 0x40) {
entry->tmdsconf.sor.link = (conf & 0x00000030) >> 4;
+ entry->extdev = (conf & 0x0000ff00) >> 8;
+ }
else if (dcb->version >= 0x30)
entry->tmdsconf.slave_addr = (conf & 0x00000700) >> 8;
else if (dcb->version >= 0x22)
@@ -1937,9 +1887,9 @@ parse_dcb_table(struct drm_device *dev, struct nvbios *bios)
if (conn[0] != 0xff) {
NV_INFO(drm, "DCB conn %02d: ", idx);
if (olddcb_conntab(dev)[3] < 4)
- printk("%04x\n", ROM16(conn[0]));
+ pr_cont("%04x\n", ROM16(conn[0]));
else
- printk("%08x\n", ROM32(conn[0]));
+ pr_cont("%08x\n", ROM32(conn[0]));
}
}
dcb_fake_connectors(bios);
@@ -2052,45 +2002,29 @@ uint8_t *nouveau_bios_embedded_edid(struct drm_device *dev)
static bool NVInitVBIOS(struct drm_device *dev)
{
struct nouveau_drm *drm = nouveau_drm(dev);
- struct nvbios *bios = &drm->vbios;
-
- memset(bios, 0, sizeof(struct nvbios));
- spin_lock_init(&bios->lock);
- bios->dev = dev;
-
- bios->data = nouveau_bios(drm->device)->data;
- bios->length = nouveau_bios(drm->device)->size;
- return true;
-}
+ struct nouveau_bios *bios = nouveau_bios(drm->device);
+ struct nvbios *legacy = &drm->vbios;
+
+ memset(legacy, 0, sizeof(struct nvbios));
+ spin_lock_init(&legacy->lock);
+ legacy->dev = dev;
+
+ legacy->data = bios->data;
+ legacy->length = bios->size;
+ legacy->major_version = bios->version.major;
+ legacy->chip_version = bios->version.chip;
+ if (bios->bit_offset) {
+ legacy->type = NVBIOS_BIT;
+ legacy->offset = bios->bit_offset;
+ return !parse_bit_structure(legacy, legacy->offset + 6);
+ } else
+ if (bios->bmp_offset) {
+ legacy->type = NVBIOS_BMP;
+ legacy->offset = bios->bmp_offset;
+ return !parse_bmp_structure(dev, legacy, legacy->offset);
+ }
-static int nouveau_parse_vbios_struct(struct drm_device *dev)
-{
- struct nouveau_drm *drm = nouveau_drm(dev);
- struct nvbios *bios = &drm->vbios;
- const uint8_t bit_signature[] = { 0xff, 0xb8, 'B', 'I', 'T' };
- const uint8_t bmp_signature[] = { 0xff, 0x7f, 'N', 'V', 0x0 };
- int offset;
-
- offset = findstr(bios->data, bios->length,
- bit_signature, sizeof(bit_signature));
- if (offset) {
- NV_INFO(drm, "BIT BIOS found\n");
- bios->type = NVBIOS_BIT;
- bios->offset = offset;
- return parse_bit_structure(bios, offset + 6);
- }
-
- offset = findstr(bios->data, bios->length,
- bmp_signature, sizeof(bmp_signature));
- if (offset) {
- NV_INFO(drm, "BMP BIOS found\n");
- bios->type = NVBIOS_BMP;
- bios->offset = offset;
- return parse_bmp_structure(dev, bios, offset);
- }
-
- NV_ERROR(drm, "No known BIOS signature found\n");
- return -ENODEV;
+ return false;
}
int
@@ -2146,10 +2080,6 @@ nouveau_bios_init(struct drm_device *dev)
if (!NVInitVBIOS(dev))
return -ENODEV;
- ret = nouveau_parse_vbios_struct(dev);
- if (ret)
- return ret;
-
ret = parse_dcb_table(dev, bios);
if (ret)
return ret;
diff --git a/drivers/gpu/drm/nouveau/nouveau_bios.h b/drivers/gpu/drm/nouveau/nouveau_bios.h
index f68c54ca422f..7ccd28f11adf 100644
--- a/drivers/gpu/drm/nouveau/nouveau_bios.h
+++ b/drivers/gpu/drm/nouveau/nouveau_bios.h
@@ -107,20 +107,10 @@ struct nvbios {
bool old_style_init;
uint16_t init_script_tbls_ptr;
uint16_t extra_init_script_tbl_ptr;
- uint16_t macro_index_tbl_ptr;
- uint16_t macro_tbl_ptr;
- uint16_t condition_tbl_ptr;
- uint16_t io_condition_tbl_ptr;
- uint16_t io_flag_condition_tbl_ptr;
- uint16_t init_function_tbl_ptr;
-
- uint16_t pll_limit_tbl_ptr;
+
uint16_t ram_restrict_tbl_ptr;
uint8_t ram_restrict_group_count;
- uint16_t some_script_ptr; /* BIT I + 14 */
- uint16_t init96_tbl_ptr; /* BIT I + 16 */
-
struct dcb_table dcb;
struct {
diff --git a/drivers/gpu/drm/nouveau/nouveau_bo.c b/drivers/gpu/drm/nouveau/nouveau_bo.c
index 1699a9083a2f..11ca82148edc 100644
--- a/drivers/gpu/drm/nouveau/nouveau_bo.c
+++ b/drivers/gpu/drm/nouveau/nouveau_bo.c
@@ -301,17 +301,18 @@ nouveau_bo_pin(struct nouveau_bo *nvbo, uint32_t memtype)
struct ttm_buffer_object *bo = &nvbo->bo;
int ret;
+ ret = ttm_bo_reserve(bo, false, false, false, 0);
+ if (ret)
+ goto out;
+
if (nvbo->pin_refcnt && !(memtype & (1 << bo->mem.mem_type))) {
NV_ERROR(drm, "bo %p pinned elsewhere: 0x%08x vs 0x%08x\n", bo,
1 << bo->mem.mem_type, memtype);
- return -EINVAL;
+ ret = -EINVAL;
+ goto out;
}
if (nvbo->pin_refcnt++)
- return 0;
-
- ret = ttm_bo_reserve(bo, false, false, false, 0);
- if (ret)
goto out;
nouveau_bo_placement_set(nvbo, memtype, 0);
@@ -329,10 +330,8 @@ nouveau_bo_pin(struct nouveau_bo *nvbo, uint32_t memtype)
break;
}
}
- ttm_bo_unreserve(bo);
out:
- if (unlikely(ret))
- nvbo->pin_refcnt--;
+ ttm_bo_unreserve(bo);
return ret;
}
@@ -343,13 +342,13 @@ nouveau_bo_unpin(struct nouveau_bo *nvbo)
struct ttm_buffer_object *bo = &nvbo->bo;
int ret;
- if (--nvbo->pin_refcnt)
- return 0;
-
ret = ttm_bo_reserve(bo, false, false, false, 0);
if (ret)
return ret;
+ if (--nvbo->pin_refcnt)
+ goto out;
+
nouveau_bo_placement_set(nvbo, bo->mem.placement, 0);
ret = nouveau_bo_validate(nvbo, false, false);
@@ -366,6 +365,7 @@ nouveau_bo_unpin(struct nouveau_bo *nvbo)
}
}
+out:
ttm_bo_unreserve(bo);
return ret;
}
@@ -562,7 +562,7 @@ nouveau_bo_move_accel_cleanup(struct nouveau_channel *chan,
struct nouveau_fence *fence = NULL;
int ret;
- ret = nouveau_fence_new(chan, &fence);
+ ret = nouveau_fence_new(chan, false, &fence);
if (ret)
return ret;
diff --git a/drivers/gpu/drm/nouveau/nouveau_bo.h b/drivers/gpu/drm/nouveau/nouveau_bo.h
index 25ca37989d2c..653dbbbd4fa1 100644
--- a/drivers/gpu/drm/nouveau/nouveau_bo.h
+++ b/drivers/gpu/drm/nouveau/nouveau_bo.h
@@ -28,10 +28,11 @@ struct nouveau_bo {
struct nouveau_drm_tile *tile;
struct drm_gem_object *gem;
+
+ /* protect by the ttm reservation lock */
int pin_refcnt;
struct ttm_bo_kmap_obj dma_buf_vmap;
- int vmapping_count;
};
static inline struct nouveau_bo *
diff --git a/drivers/gpu/drm/nouveau/nouveau_chan.c b/drivers/gpu/drm/nouveau/nouveau_chan.c
index 174300b6a02e..eaa80a2b81ee 100644
--- a/drivers/gpu/drm/nouveau/nouveau_chan.c
+++ b/drivers/gpu/drm/nouveau/nouveau_chan.c
@@ -51,14 +51,15 @@ nouveau_channel_idle(struct nouveau_channel *chan)
struct nouveau_fence *fence = NULL;
int ret;
- ret = nouveau_fence_new(chan, &fence);
+ ret = nouveau_fence_new(chan, false, &fence);
if (!ret) {
ret = nouveau_fence_wait(fence, false, false);
nouveau_fence_unref(&fence);
}
if (ret)
- NV_ERROR(cli, "failed to idle channel 0x%08x\n", chan->handle);
+ NV_ERROR(cli, "failed to idle channel 0x%08x [%s]\n",
+ chan->handle, cli->base.name);
return ret;
}
diff --git a/drivers/gpu/drm/nouveau/nouveau_connector.c b/drivers/gpu/drm/nouveau/nouveau_connector.c
index e620ba8271b4..4dd7ae2ac6c6 100644
--- a/drivers/gpu/drm/nouveau/nouveau_connector.c
+++ b/drivers/gpu/drm/nouveau/nouveau_connector.c
@@ -55,8 +55,6 @@ MODULE_PARM_DESC(duallink, "Allow dual-link TMDS (default: enabled)");
static int nouveau_duallink = 1;
module_param_named(duallink, nouveau_duallink, int, 0400);
-static void nouveau_connector_hotplug(void *, int);
-
struct nouveau_encoder *
find_encoder(struct drm_connector *connector, int type)
{
@@ -100,22 +98,6 @@ static void
nouveau_connector_destroy(struct drm_connector *connector)
{
struct nouveau_connector *nv_connector = nouveau_connector(connector);
- struct nouveau_gpio *gpio;
- struct nouveau_drm *drm;
- struct drm_device *dev;
-
- if (!nv_connector)
- return;
-
- dev = nv_connector->base.dev;
- drm = nouveau_drm(dev);
- gpio = nouveau_gpio(drm->device);
-
- if (gpio && nv_connector->hpd != DCB_GPIO_UNUSED) {
- gpio->isr_del(gpio, 0, nv_connector->hpd, 0xff,
- nouveau_connector_hotplug, connector);
- }
-
kfree(nv_connector->edid);
drm_sysfs_connector_remove(connector);
drm_connector_cleanup(connector);
@@ -130,7 +112,6 @@ nouveau_connector_ddc_detect(struct drm_connector *connector,
struct nouveau_connector *nv_connector = nouveau_connector(connector);
struct nouveau_drm *drm = nouveau_drm(dev);
struct nouveau_gpio *gpio = nouveau_gpio(drm->device);
- struct nouveau_i2c *i2c = nouveau_i2c(drm->device);
struct nouveau_i2c_port *port = NULL;
int i, panel = -ENODEV;
@@ -160,8 +141,7 @@ nouveau_connector_ddc_detect(struct drm_connector *connector,
continue;
nv_encoder = nouveau_encoder(obj_to_encoder(obj));
- if (nv_encoder->dcb->i2c_index < 0xf)
- port = i2c->find(i2c, nv_encoder->dcb->i2c_index);
+ port = nv_encoder->i2c;
if (port && nv_probe_i2c(port, 0x50)) {
*pnv_encoder = nv_encoder;
break;
@@ -399,9 +379,10 @@ nouveau_connector_detect_lvds(struct drm_connector *connector, bool force)
struct edid *edid =
(struct edid *)nouveau_bios_embedded_edid(dev);
if (edid) {
- nv_connector->edid = kmalloc(EDID_LENGTH, GFP_KERNEL);
- *(nv_connector->edid) = *edid;
- status = connector_status_connected;
+ nv_connector->edid =
+ kmemdup(edid, EDID_LENGTH, GFP_KERNEL);
+ if (nv_connector->edid)
+ status = connector_status_connected;
}
}
@@ -911,6 +892,37 @@ nouveau_connector_funcs_lvds = {
.force = nouveau_connector_force
};
+static void
+nouveau_connector_hotplug_work(struct work_struct *work)
+{
+ struct nouveau_connector *nv_connector =
+ container_of(work, struct nouveau_connector, hpd_work);
+ struct drm_connector *connector = &nv_connector->base;
+ struct drm_device *dev = connector->dev;
+ struct nouveau_drm *drm = nouveau_drm(dev);
+ struct nouveau_gpio *gpio = nouveau_gpio(drm->device);
+ bool plugged = gpio->get(gpio, 0, nv_connector->hpd.func, 0xff);
+
+ NV_DEBUG(drm, "%splugged %s\n", plugged ? "" : "un",
+ drm_get_connector_name(connector));
+
+ if (plugged)
+ drm_helper_connector_dpms(connector, DRM_MODE_DPMS_ON);
+ else
+ drm_helper_connector_dpms(connector, DRM_MODE_DPMS_OFF);
+
+ drm_helper_hpd_irq_event(dev);
+}
+
+static int
+nouveau_connector_hotplug(struct nouveau_eventh *event, int index)
+{
+ struct nouveau_connector *nv_connector =
+ container_of(event, struct nouveau_connector, hpd_func);
+ schedule_work(&nv_connector->hpd_work);
+ return NVKM_EVENT_KEEP;
+}
+
static int
drm_conntype_from_dcb(enum dcb_connector_type dcb)
{
@@ -961,6 +973,7 @@ nouveau_connector_create(struct drm_device *dev, int index)
return ERR_PTR(-ENOMEM);
connector = &nv_connector->base;
+ INIT_WORK(&nv_connector->hpd_work, nouveau_connector_hotplug_work);
nv_connector->index = index;
/* attempt to parse vbios connector type and hotplug gpio */
@@ -975,8 +988,11 @@ nouveau_connector_create(struct drm_device *dev, int index)
if (olddcb_conntab(dev)[3] >= 4)
entry |= (u32)ROM16(nv_connector->dcb[2]) << 16;
- nv_connector->hpd = ffs((entry & 0x07033000) >> 12);
- nv_connector->hpd = hpd[nv_connector->hpd];
+ ret = gpio->find(gpio, 0, hpd[ffs((entry & 0x07033000) >> 12)],
+ DCB_GPIO_UNUSED, &nv_connector->hpd);
+ nv_connector->hpd_func.func = nouveau_connector_hotplug;
+ if (ret)
+ nv_connector->hpd.func = DCB_GPIO_UNUSED;
nv_connector->type = nv_connector->dcb[0];
if (drm_conntype_from_dcb(nv_connector->type) ==
@@ -999,7 +1015,7 @@ nouveau_connector_create(struct drm_device *dev, int index)
}
} else {
nv_connector->type = DCB_CONNECTOR_NONE;
- nv_connector->hpd = DCB_GPIO_UNUSED;
+ nv_connector->hpd.func = DCB_GPIO_UNUSED;
}
/* no vbios data, or an unknown dcb connector type - attempt to
@@ -1126,31 +1142,9 @@ nouveau_connector_create(struct drm_device *dev, int index)
}
connector->polled = DRM_CONNECTOR_POLL_CONNECT;
- if (gpio && nv_connector->hpd != DCB_GPIO_UNUSED) {
- ret = gpio->isr_add(gpio, 0, nv_connector->hpd, 0xff,
- nouveau_connector_hotplug, connector);
- if (ret == 0)
- connector->polled = DRM_CONNECTOR_POLL_HPD;
- }
+ if (nv_connector->hpd.func != DCB_GPIO_UNUSED)
+ connector->polled = DRM_CONNECTOR_POLL_HPD;
drm_sysfs_connector_add(connector);
return connector;
}
-
-static void
-nouveau_connector_hotplug(void *data, int plugged)
-{
- struct drm_connector *connector = data;
- struct drm_device *dev = connector->dev;
- struct nouveau_drm *drm = nouveau_drm(dev);
-
- NV_DEBUG(drm, "%splugged %s\n", plugged ? "" : "un",
- drm_get_connector_name(connector));
-
- if (plugged)
- drm_helper_connector_dpms(connector, DRM_MODE_DPMS_ON);
- else
- drm_helper_connector_dpms(connector, DRM_MODE_DPMS_OFF);
-
- drm_helper_hpd_irq_event(dev);
-}
diff --git a/drivers/gpu/drm/nouveau/nouveau_connector.h b/drivers/gpu/drm/nouveau/nouveau_connector.h
index 20eb84cce9e6..6e399aad491a 100644
--- a/drivers/gpu/drm/nouveau/nouveau_connector.h
+++ b/drivers/gpu/drm/nouveau/nouveau_connector.h
@@ -30,6 +30,11 @@
#include <drm/drm_edid.h>
#include "nouveau_crtc.h"
+#include <core/event.h>
+
+#include <subdev/bios.h>
+#include <subdev/bios/gpio.h>
+
struct nouveau_i2c_port;
enum nouveau_underscan_type {
@@ -61,7 +66,10 @@ struct nouveau_connector {
enum dcb_connector_type type;
u8 index;
u8 *dcb;
- u8 hpd;
+
+ struct dcb_gpio_func hpd;
+ struct work_struct hpd_work;
+ struct nouveau_eventh hpd_func;
int dithering_mode;
int dithering_depth;
diff --git a/drivers/gpu/drm/nouveau/nouveau_debugfs.c b/drivers/gpu/drm/nouveau/nouveau_debugfs.c
new file mode 100644
index 000000000000..5392e07edfc6
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nouveau_debugfs.c
@@ -0,0 +1,64 @@
+/*
+ * Copyright (C) 2009 Red Hat <bskeggs@redhat.com>
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining
+ * a copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sublicense, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial
+ * portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
+ * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
+ * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
+ * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
+ * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+/*
+ * Authors:
+ * Ben Skeggs <bskeggs@redhat.com>
+ */
+
+#include "nouveau_debugfs.h"
+#include "nouveau_drm.h"
+
+static int
+nouveau_debugfs_vbios_image(struct seq_file *m, void *data)
+{
+ struct drm_info_node *node = (struct drm_info_node *) m->private;
+ struct nouveau_drm *drm = nouveau_drm(node->minor->dev);
+ int i;
+
+ for (i = 0; i < drm->vbios.length; i++)
+ seq_printf(m, "%c", drm->vbios.data[i]);
+ return 0;
+}
+
+static struct drm_info_list nouveau_debugfs_list[] = {
+ { "vbios.rom", nouveau_debugfs_vbios_image, 0, NULL },
+};
+#define NOUVEAU_DEBUGFS_ENTRIES ARRAY_SIZE(nouveau_debugfs_list)
+
+int
+nouveau_debugfs_init(struct drm_minor *minor)
+{
+ drm_debugfs_create_files(nouveau_debugfs_list, NOUVEAU_DEBUGFS_ENTRIES,
+ minor->debugfs_root, minor);
+ return 0;
+}
+
+void
+nouveau_debugfs_takedown(struct drm_minor *minor)
+{
+ drm_debugfs_remove_files(nouveau_debugfs_list, NOUVEAU_DEBUGFS_ENTRIES,
+ minor);
+}
diff --git a/drivers/gpu/drm/nouveau/nouveau_debugfs.h b/drivers/gpu/drm/nouveau/nouveau_debugfs.h
new file mode 100644
index 000000000000..a62af6fb5f99
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nouveau_debugfs.h
@@ -0,0 +1,22 @@
+#ifndef __NOUVEAU_DEBUGFS_H__
+#define __NOUVEAU_DEBUGFS_H__
+
+#include <drm/drmP.h>
+
+#if defined(CONFIG_DEBUG_FS)
+extern int nouveau_debugfs_init(struct drm_minor *);
+extern void nouveau_debugfs_takedown(struct drm_minor *);
+#else
+static inline int
+nouveau_debugfs_init(struct drm_minor *minor)
+{
+ return 0;
+}
+
+static inline void nouveau_debugfs_takedown(struct drm_minor *minor)
+{
+}
+
+#endif
+
+#endif
diff --git a/drivers/gpu/drm/nouveau/nouveau_display.c b/drivers/gpu/drm/nouveau/nouveau_display.c
index 508b00a2ce0d..4610c3a29bbe 100644
--- a/drivers/gpu/drm/nouveau/nouveau_display.c
+++ b/drivers/gpu/drm/nouveau/nouveau_display.c
@@ -41,6 +41,8 @@
#include <subdev/gpio.h>
#include <engine/disp.h>
+#include <core/class.h>
+
static void
nouveau_user_framebuffer_destroy(struct drm_framebuffer *drm_fb)
{
@@ -78,11 +80,6 @@ nouveau_framebuffer_init(struct drm_device *dev,
struct drm_framebuffer *fb = &nv_fb->base;
int ret;
- ret = drm_framebuffer_init(dev, fb, &nouveau_framebuffer_funcs);
- if (ret) {
- return ret;
- }
-
drm_helper_mode_fill_fb_struct(fb, mode_cmd);
nv_fb->nvbo = nvbo;
@@ -125,6 +122,11 @@ nouveau_framebuffer_init(struct drm_device *dev,
}
}
+ ret = drm_framebuffer_init(dev, fb, &nouveau_framebuffer_funcs);
+ if (ret) {
+ return ret;
+ }
+
return 0;
}
@@ -231,8 +233,10 @@ nouveau_display_init(struct drm_device *dev)
/* enable hotplug interrupts */
list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
struct nouveau_connector *conn = nouveau_connector(connector);
- if (gpio)
- gpio->irq(gpio, 0, conn->hpd, 0xff, true);
+ if (gpio && conn->hpd.func != DCB_GPIO_UNUSED) {
+ nouveau_event_get(gpio->events, conn->hpd.line,
+ &conn->hpd_func);
+ }
}
return ret;
@@ -249,37 +253,20 @@ nouveau_display_fini(struct drm_device *dev)
/* disable hotplug interrupts */
list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
struct nouveau_connector *conn = nouveau_connector(connector);
- if (gpio)
- gpio->irq(gpio, 0, conn->hpd, 0xff, false);
+ if (gpio && conn->hpd.func != DCB_GPIO_UNUSED) {
+ nouveau_event_put(gpio->events, conn->hpd.line,
+ &conn->hpd_func);
+ }
}
drm_kms_helper_poll_disable(dev);
disp->fini(dev);
}
-static void
-nouveau_display_vblank_notify(void *data, int crtc)
-{
- drm_handle_vblank(data, crtc);
-}
-
-static void
-nouveau_display_vblank_get(void *data, int crtc)
-{
- drm_vblank_get(data, crtc);
-}
-
-static void
-nouveau_display_vblank_put(void *data, int crtc)
-{
- drm_vblank_put(data, crtc);
-}
-
int
nouveau_display_create(struct drm_device *dev)
{
struct nouveau_drm *drm = nouveau_drm(dev);
- struct nouveau_disp *pdisp = nouveau_disp(drm->device);
struct nouveau_display *disp;
u32 pclass = dev->pdev->class >> 8;
int ret, gen;
@@ -288,11 +275,6 @@ nouveau_display_create(struct drm_device *dev)
if (!disp)
return -ENOMEM;
- pdisp->vblank.data = dev;
- pdisp->vblank.notify = nouveau_display_vblank_notify;
- pdisp->vblank.get = nouveau_display_vblank_get;
- pdisp->vblank.put = nouveau_display_vblank_put;
-
drm_mode_config_init(dev);
drm_mode_create_scaling_mode_property(dev);
drm_mode_create_dvi_i_properties(dev);
@@ -316,17 +298,13 @@ nouveau_display_create(struct drm_device *dev)
drm_property_create_range(dev, 0, "underscan vborder", 0, 128);
if (gen >= 1) {
+ /* -90..+90 */
disp->vibrant_hue_property =
- drm_property_create(dev, DRM_MODE_PROP_RANGE,
- "vibrant hue", 2);
- disp->vibrant_hue_property->values[0] = 0;
- disp->vibrant_hue_property->values[1] = 180; /* -90..+90 */
+ drm_property_create_range(dev, 0, "vibrant hue", 0, 180);
+ /* -100..+100 */
disp->color_vibrance_property =
- drm_property_create(dev, DRM_MODE_PROP_RANGE,
- "color vibrance", 2);
- disp->color_vibrance_property->values[0] = 0;
- disp->color_vibrance_property->values[1] = 200; /* -100..+100 */
+ drm_property_create_range(dev, 0, "color vibrance", 0, 200);
}
dev->mode_config.funcs = &nouveau_mode_config_funcs;
@@ -478,39 +456,6 @@ nouveau_display_resume(struct drm_device *dev)
}
}
-int
-nouveau_vblank_enable(struct drm_device *dev, int crtc)
-{
- struct nouveau_device *device = nouveau_dev(dev);
-
- if (device->card_type >= NV_D0)
- nv_mask(device, 0x6100c0 + (crtc * 0x800), 1, 1);
- else
- if (device->card_type >= NV_50)
- nv_mask(device, NV50_PDISPLAY_INTR_EN_1, 0,
- NV50_PDISPLAY_INTR_EN_1_VBLANK_CRTC_(crtc));
- else
- NVWriteCRTC(dev, crtc, NV_PCRTC_INTR_EN_0,
- NV_PCRTC_INTR_0_VBLANK);
-
- return 0;
-}
-
-void
-nouveau_vblank_disable(struct drm_device *dev, int crtc)
-{
- struct nouveau_device *device = nouveau_dev(dev);
-
- if (device->card_type >= NV_D0)
- nv_mask(device, 0x6100c0 + (crtc * 0x800), 1, 0);
- else
- if (device->card_type >= NV_50)
- nv_mask(device, NV50_PDISPLAY_INTR_EN_1,
- NV50_PDISPLAY_INTR_EN_1_VBLANK_CRTC_(crtc), 0);
- else
- NVWriteCRTC(dev, crtc, NV_PCRTC_INTR_EN_0, 0);
-}
-
static int
nouveau_page_flip_reserve(struct nouveau_bo *old_bo,
struct nouveau_bo *new_bo)
@@ -595,7 +540,7 @@ nouveau_page_flip_emit(struct nouveau_channel *chan,
}
FIRE_RING (chan);
- ret = nouveau_fence_new(chan, pfence);
+ ret = nouveau_fence_new(chan, false, pfence);
if (ret)
goto fail;
diff --git a/drivers/gpu/drm/nouveau/nouveau_display.h b/drivers/gpu/drm/nouveau/nouveau_display.h
index 722548bb3bd3..1ea3e4734b62 100644
--- a/drivers/gpu/drm/nouveau/nouveau_display.h
+++ b/drivers/gpu/drm/nouveau/nouveau_display.h
@@ -59,9 +59,6 @@ void nouveau_display_fini(struct drm_device *dev);
int nouveau_display_suspend(struct drm_device *dev);
void nouveau_display_resume(struct drm_device *dev);
-int nouveau_vblank_enable(struct drm_device *dev, int crtc);
-void nouveau_vblank_disable(struct drm_device *dev, int crtc);
-
int nouveau_crtc_page_flip(struct drm_crtc *crtc, struct drm_framebuffer *fb,
struct drm_pending_vblank_event *event);
int nouveau_finish_page_flip(struct nouveau_channel *,
diff --git a/drivers/gpu/drm/nouveau/nouveau_dma.h b/drivers/gpu/drm/nouveau/nouveau_dma.h
index 5c2e22932d1c..690d5930ce32 100644
--- a/drivers/gpu/drm/nouveau/nouveau_dma.h
+++ b/drivers/gpu/drm/nouveau/nouveau_dma.h
@@ -191,7 +191,7 @@ WIND_RING(struct nouveau_channel *chan)
#define NV84_SUBCHAN_SEMAPHORE_TRIGGER_WRITE_LONG 0x00000002
#define NV84_SUBCHAN_SEMAPHORE_TRIGGER_ACQUIRE_GEQUAL 0x00000004
#define NVC0_SUBCHAN_SEMAPHORE_TRIGGER_YIELD 0x00001000
-#define NV84_SUBCHAN_NOTIFY_INTR 0x00000020
+#define NV84_SUBCHAN_UEVENT 0x00000020
#define NV84_SUBCHAN_WRCACHE_FLUSH 0x00000024
#define NV10_SUBCHAN_REF_CNT 0x00000050
#define NVSW_SUBCHAN_PAGE_FLIP 0x00000054
diff --git a/drivers/gpu/drm/nouveau/nouveau_dp.c b/drivers/gpu/drm/nouveau/nouveau_dp.c
index 59838651ee8f..36fd22500569 100644
--- a/drivers/gpu/drm/nouveau/nouveau_dp.c
+++ b/drivers/gpu/drm/nouveau/nouveau_dp.c
@@ -35,300 +35,6 @@
#include <subdev/gpio.h>
#include <subdev/i2c.h>
-/******************************************************************************
- * link training
- *****************************************************************************/
-struct dp_state {
- struct nouveau_i2c_port *auxch;
- struct nouveau_object *core;
- struct dcb_output *dcb;
- int crtc;
- u8 *dpcd;
- int link_nr;
- u32 link_bw;
- u8 stat[6];
- u8 conf[4];
-};
-
-static void
-dp_set_link_config(struct drm_device *dev, struct dp_state *dp)
-{
- struct nouveau_drm *drm = nouveau_drm(dev);
- struct dcb_output *dcb = dp->dcb;
- const u32 or = ffs(dcb->or) - 1, link = !(dcb->sorconf.link & 1);
- const u32 moff = (dp->crtc << 3) | (link << 2) | or;
- u8 sink[2];
- u32 data;
-
- NV_DEBUG(drm, "%d lanes at %d KB/s\n", dp->link_nr, dp->link_bw);
-
- /* set desired link configuration on the source */
- data = ((dp->link_bw / 27000) << 8) | dp->link_nr;
- if (dp->dpcd[2] & DP_ENHANCED_FRAME_CAP)
- data |= NV94_DISP_SOR_DP_LNKCTL_FRAME_ENH;
-
- nv_call(dp->core, NV94_DISP_SOR_DP_LNKCTL + moff, data);
-
- /* inform the sink of the new configuration */
- sink[0] = dp->link_bw / 27000;
- sink[1] = dp->link_nr;
- if (dp->dpcd[2] & DP_ENHANCED_FRAME_CAP)
- sink[1] |= DP_LANE_COUNT_ENHANCED_FRAME_EN;
-
- nv_wraux(dp->auxch, DP_LINK_BW_SET, sink, 2);
-}
-
-static void
-dp_set_training_pattern(struct drm_device *dev, struct dp_state *dp, u8 pattern)
-{
- struct nouveau_drm *drm = nouveau_drm(dev);
- struct dcb_output *dcb = dp->dcb;
- const u32 or = ffs(dcb->or) - 1, link = !(dcb->sorconf.link & 1);
- const u32 moff = (dp->crtc << 3) | (link << 2) | or;
- u8 sink_tp;
-
- NV_DEBUG(drm, "training pattern %d\n", pattern);
-
- nv_call(dp->core, NV94_DISP_SOR_DP_TRAIN + moff, pattern);
-
- nv_rdaux(dp->auxch, DP_TRAINING_PATTERN_SET, &sink_tp, 1);
- sink_tp &= ~DP_TRAINING_PATTERN_MASK;
- sink_tp |= pattern;
- nv_wraux(dp->auxch, DP_TRAINING_PATTERN_SET, &sink_tp, 1);
-}
-
-static int
-dp_link_train_commit(struct drm_device *dev, struct dp_state *dp)
-{
- struct nouveau_drm *drm = nouveau_drm(dev);
- struct dcb_output *dcb = dp->dcb;
- const u32 or = ffs(dcb->or) - 1, link = !(dcb->sorconf.link & 1);
- const u32 moff = (dp->crtc << 3) | (link << 2) | or;
- int i;
-
- for (i = 0; i < dp->link_nr; i++) {
- u8 lane = (dp->stat[4 + (i >> 1)] >> ((i & 1) * 4)) & 0xf;
- u8 lpre = (lane & 0x0c) >> 2;
- u8 lvsw = (lane & 0x03) >> 0;
-
- dp->conf[i] = (lpre << 3) | lvsw;
- if (lvsw == DP_TRAIN_VOLTAGE_SWING_1200)
- dp->conf[i] |= DP_TRAIN_MAX_SWING_REACHED;
- if ((lpre << 3) == DP_TRAIN_PRE_EMPHASIS_9_5)
- dp->conf[i] |= DP_TRAIN_MAX_PRE_EMPHASIS_REACHED;
-
- NV_DEBUG(drm, "config lane %d %02x\n", i, dp->conf[i]);
-
- nv_call(dp->core, NV94_DISP_SOR_DP_DRVCTL(i) + moff, (lvsw << 8) | lpre);
- }
-
- return nv_wraux(dp->auxch, DP_TRAINING_LANE0_SET, dp->conf, 4);
-}
-
-static int
-dp_link_train_update(struct drm_device *dev, struct dp_state *dp, u32 delay)
-{
- struct nouveau_drm *drm = nouveau_drm(dev);
- int ret;
-
- udelay(delay);
-
- ret = nv_rdaux(dp->auxch, DP_LANE0_1_STATUS, dp->stat, 6);
- if (ret)
- return ret;
-
- NV_DEBUG(drm, "status %*ph\n", 6, dp->stat);
- return 0;
-}
-
-static int
-dp_link_train_cr(struct drm_device *dev, struct dp_state *dp)
-{
- bool cr_done = false, abort = false;
- int voltage = dp->conf[0] & DP_TRAIN_VOLTAGE_SWING_MASK;
- int tries = 0, i;
-
- dp_set_training_pattern(dev, dp, DP_TRAINING_PATTERN_1);
-
- do {
- if (dp_link_train_commit(dev, dp) ||
- dp_link_train_update(dev, dp, 100))
- break;
-
- cr_done = true;
- for (i = 0; i < dp->link_nr; i++) {
- u8 lane = (dp->stat[i >> 1] >> ((i & 1) * 4)) & 0xf;
- if (!(lane & DP_LANE_CR_DONE)) {
- cr_done = false;
- if (dp->conf[i] & DP_TRAIN_MAX_SWING_REACHED)
- abort = true;
- break;
- }
- }
-
- if ((dp->conf[0] & DP_TRAIN_VOLTAGE_SWING_MASK) != voltage) {
- voltage = dp->conf[0] & DP_TRAIN_VOLTAGE_SWING_MASK;
- tries = 0;
- }
- } while (!cr_done && !abort && ++tries < 5);
-
- return cr_done ? 0 : -1;
-}
-
-static int
-dp_link_train_eq(struct drm_device *dev, struct dp_state *dp)
-{
- bool eq_done, cr_done = true;
- int tries = 0, i;
-
- dp_set_training_pattern(dev, dp, DP_TRAINING_PATTERN_2);
-
- do {
- if (dp_link_train_update(dev, dp, 400))
- break;
-
- eq_done = !!(dp->stat[2] & DP_INTERLANE_ALIGN_DONE);
- for (i = 0; i < dp->link_nr && eq_done; i++) {
- u8 lane = (dp->stat[i >> 1] >> ((i & 1) * 4)) & 0xf;
- if (!(lane & DP_LANE_CR_DONE))
- cr_done = false;
- if (!(lane & DP_LANE_CHANNEL_EQ_DONE) ||
- !(lane & DP_LANE_SYMBOL_LOCKED))
- eq_done = false;
- }
-
- if (dp_link_train_commit(dev, dp))
- break;
- } while (!eq_done && cr_done && ++tries <= 5);
-
- return eq_done ? 0 : -1;
-}
-
-static void
-dp_link_train_init(struct drm_device *dev, struct dp_state *dp, bool spread)
-{
- struct dcb_output *dcb = dp->dcb;
- const u32 or = ffs(dcb->or) - 1, link = !(dcb->sorconf.link & 1);
- const u32 moff = (dp->crtc << 3) | (link << 2) | or;
-
- nv_call(dp->core, NV94_DISP_SOR_DP_TRAIN + moff, (spread ?
- NV94_DISP_SOR_DP_TRAIN_INIT_SPREAD_ON :
- NV94_DISP_SOR_DP_TRAIN_INIT_SPREAD_OFF) |
- NV94_DISP_SOR_DP_TRAIN_OP_INIT);
-}
-
-static void
-dp_link_train_fini(struct drm_device *dev, struct dp_state *dp)
-{
- struct dcb_output *dcb = dp->dcb;
- const u32 or = ffs(dcb->or) - 1, link = !(dcb->sorconf.link & 1);
- const u32 moff = (dp->crtc << 3) | (link << 2) | or;
-
- nv_call(dp->core, NV94_DISP_SOR_DP_TRAIN + moff,
- NV94_DISP_SOR_DP_TRAIN_OP_FINI);
-}
-
-static bool
-nouveau_dp_link_train(struct drm_encoder *encoder, u32 datarate,
- struct nouveau_object *core)
-{
- struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
- struct nouveau_crtc *nv_crtc = nouveau_crtc(encoder->crtc);
- struct nouveau_connector *nv_connector =
- nouveau_encoder_connector_get(nv_encoder);
- struct drm_device *dev = encoder->dev;
- struct nouveau_drm *drm = nouveau_drm(dev);
- struct nouveau_i2c *i2c = nouveau_i2c(drm->device);
- struct nouveau_gpio *gpio = nouveau_gpio(drm->device);
- const u32 bw_list[] = { 270000, 162000, 0 };
- const u32 *link_bw = bw_list;
- struct dp_state dp;
-
- dp.auxch = i2c->find(i2c, nv_encoder->dcb->i2c_index);
- if (!dp.auxch)
- return false;
-
- dp.core = core;
- dp.dcb = nv_encoder->dcb;
- dp.crtc = nv_crtc->index;
- dp.dpcd = nv_encoder->dp.dpcd;
-
- /* adjust required bandwidth for 8B/10B coding overhead */
- datarate = (datarate / 8) * 10;
-
- /* some sinks toggle hotplug in response to some of the actions
- * we take during link training (DP_SET_POWER is one), we need
- * to ignore them for the moment to avoid races.
- */
- gpio->irq(gpio, 0, nv_connector->hpd, 0xff, false);
-
- /* enable down-spreading and execute pre-train script from vbios */
- dp_link_train_init(dev, &dp, nv_encoder->dp.dpcd[3] & 1);
-
- /* start off at highest link rate supported by encoder and display */
- while (*link_bw > nv_encoder->dp.link_bw)
- link_bw++;
-
- while (link_bw[0]) {
- /* find minimum required lane count at this link rate */
- dp.link_nr = nv_encoder->dp.link_nr;
- while ((dp.link_nr >> 1) * link_bw[0] > datarate)
- dp.link_nr >>= 1;
-
- /* drop link rate to minimum with this lane count */
- while ((link_bw[1] * dp.link_nr) > datarate)
- link_bw++;
- dp.link_bw = link_bw[0];
-
- /* program selected link configuration */
- dp_set_link_config(dev, &dp);
-
- /* attempt to train the link at this configuration */
- memset(dp.stat, 0x00, sizeof(dp.stat));
- if (!dp_link_train_cr(dev, &dp) &&
- !dp_link_train_eq(dev, &dp))
- break;
-
- /* retry at lower rate */
- link_bw++;
- }
-
- /* finish link training */
- dp_set_training_pattern(dev, &dp, DP_TRAINING_PATTERN_DISABLE);
-
- /* execute post-train script from vbios */
- dp_link_train_fini(dev, &dp);
-
- /* re-enable hotplug detect */
- gpio->irq(gpio, 0, nv_connector->hpd, 0xff, true);
- return true;
-}
-
-void
-nouveau_dp_dpms(struct drm_encoder *encoder, int mode, u32 datarate,
- struct nouveau_object *core)
-{
- struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
- struct nouveau_drm *drm = nouveau_drm(encoder->dev);
- struct nouveau_i2c *i2c = nouveau_i2c(drm->device);
- struct nouveau_i2c_port *auxch;
- u8 status;
-
- auxch = i2c->find(i2c, nv_encoder->dcb->i2c_index);
- if (!auxch)
- return;
-
- if (mode == DRM_MODE_DPMS_ON)
- status = DP_SET_POWER_D0;
- else
- status = DP_SET_POWER_D3;
-
- nv_wraux(auxch, DP_SET_POWER, &status, 1);
-
- if (mode == DRM_MODE_DPMS_ON)
- nouveau_dp_link_train(encoder, datarate, core);
-}
-
static void
nouveau_dp_probe_oui(struct drm_device *dev, struct nouveau_i2c_port *auxch,
u8 *dpcd)
@@ -355,12 +61,11 @@ nouveau_dp_detect(struct drm_encoder *encoder)
struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
struct drm_device *dev = encoder->dev;
struct nouveau_drm *drm = nouveau_drm(dev);
- struct nouveau_i2c *i2c = nouveau_i2c(drm->device);
struct nouveau_i2c_port *auxch;
u8 *dpcd = nv_encoder->dp.dpcd;
int ret;
- auxch = i2c->find(i2c, nv_encoder->dcb->i2c_index);
+ auxch = nv_encoder->i2c;
if (!auxch)
return false;
diff --git a/drivers/gpu/drm/nouveau/nouveau_drm.c b/drivers/gpu/drm/nouveau/nouveau_drm.c
index 5e7aef23825a..d1099365bfc1 100644
--- a/drivers/gpu/drm/nouveau/nouveau_drm.c
+++ b/drivers/gpu/drm/nouveau/nouveau_drm.c
@@ -34,6 +34,8 @@
#include <subdev/device.h>
#include <subdev/vm.h>
+#include <engine/disp.h>
+
#include "nouveau_drm.h"
#include "nouveau_irq.h"
#include "nouveau_dma.h"
@@ -48,6 +50,7 @@
#include "nouveau_abi16.h"
#include "nouveau_fbcon.h"
#include "nouveau_fence.h"
+#include "nouveau_debugfs.h"
MODULE_PARM_DESC(config, "option string to pass to driver core");
static char *nouveau_config;
@@ -68,6 +71,32 @@ module_param_named(modeset, nouveau_modeset, int, 0400);
static struct drm_driver driver;
+static int
+nouveau_drm_vblank_enable(struct drm_device *dev, int head)
+{
+ struct nouveau_drm *drm = nouveau_drm(dev);
+ struct nouveau_disp *pdisp = nouveau_disp(drm->device);
+ nouveau_event_get(pdisp->vblank, head, &drm->vblank);
+ return 0;
+}
+
+static void
+nouveau_drm_vblank_disable(struct drm_device *dev, int head)
+{
+ struct nouveau_drm *drm = nouveau_drm(dev);
+ struct nouveau_disp *pdisp = nouveau_disp(drm->device);
+ nouveau_event_put(pdisp->vblank, head, &drm->vblank);
+}
+
+static int
+nouveau_drm_vblank_handler(struct nouveau_eventh *event, int head)
+{
+ struct nouveau_drm *drm =
+ container_of(event, struct nouveau_drm, vblank);
+ drm_handle_vblank(drm->dev, head);
+ return NVKM_EVENT_KEEP;
+}
+
static u64
nouveau_name(struct pci_dev *pdev)
{
@@ -132,7 +161,8 @@ nouveau_accel_init(struct nouveau_drm *drm)
/* initialise synchronisation routines */
if (device->card_type < NV_10) ret = nv04_fence_create(drm);
- else if (device->card_type < NV_50) ret = nv10_fence_create(drm);
+ else if (device->chipset < 0x17) ret = nv10_fence_create(drm);
+ else if (device->card_type < NV_50) ret = nv17_fence_create(drm);
else if (device->chipset < 0x84) ret = nv50_fence_create(drm);
else if (device->card_type < NV_C0) ret = nv84_fence_create(drm);
else ret = nvc0_fence_create(drm);
@@ -262,6 +292,7 @@ nouveau_drm_load(struct drm_device *dev, unsigned long flags)
dev->dev_private = drm;
drm->dev = dev;
+ drm->vblank.func = nouveau_drm_vblank_handler;
INIT_LIST_HEAD(&drm->clients);
spin_lock_init(&drm->tile.lock);
@@ -401,7 +432,7 @@ nouveau_drm_remove(struct pci_dev *pdev)
nouveau_object_debug();
}
-int
+static int
nouveau_do_suspend(struct drm_device *dev)
{
struct nouveau_drm *drm = nouveau_drm(dev);
@@ -472,7 +503,7 @@ int nouveau_pmops_suspend(struct device *dev)
return 0;
}
-int
+static int
nouveau_do_resume(struct drm_device *dev)
{
struct nouveau_drm *drm = nouveau_drm(dev);
@@ -546,10 +577,11 @@ nouveau_drm_open(struct drm_device *dev, struct drm_file *fpriv)
struct pci_dev *pdev = dev->pdev;
struct nouveau_drm *drm = nouveau_drm(dev);
struct nouveau_cli *cli;
- char name[16];
+ char name[32], tmpname[TASK_COMM_LEN];
int ret;
- snprintf(name, sizeof(name), "%d", pid_nr(fpriv->pid));
+ get_task_comm(tmpname, current);
+ snprintf(name, sizeof(name), "%s[%d]", tmpname, pid_nr(fpriv->pid));
ret = nouveau_cli_create(pdev, name, sizeof(*cli), (void **)&cli);
if (ret)
@@ -639,22 +671,32 @@ driver = {
.postclose = nouveau_drm_postclose,
.lastclose = nouveau_vga_lastclose,
+#if defined(CONFIG_DEBUG_FS)
+ .debugfs_init = nouveau_debugfs_init,
+ .debugfs_cleanup = nouveau_debugfs_takedown,
+#endif
+
.irq_preinstall = nouveau_irq_preinstall,
.irq_postinstall = nouveau_irq_postinstall,
.irq_uninstall = nouveau_irq_uninstall,
.irq_handler = nouveau_irq_handler,
.get_vblank_counter = drm_vblank_count,
- .enable_vblank = nouveau_vblank_enable,
- .disable_vblank = nouveau_vblank_disable,
+ .enable_vblank = nouveau_drm_vblank_enable,
+ .disable_vblank = nouveau_drm_vblank_disable,
.ioctls = nouveau_ioctls,
.fops = &nouveau_driver_fops,
.prime_handle_to_fd = drm_gem_prime_handle_to_fd,
.prime_fd_to_handle = drm_gem_prime_fd_to_handle,
- .gem_prime_export = nouveau_gem_prime_export,
- .gem_prime_import = nouveau_gem_prime_import,
+ .gem_prime_export = drm_gem_prime_export,
+ .gem_prime_import = drm_gem_prime_import,
+ .gem_prime_pin = nouveau_gem_prime_pin,
+ .gem_prime_get_sg_table = nouveau_gem_prime_get_sg_table,
+ .gem_prime_import_sg_table = nouveau_gem_prime_import_sg_table,
+ .gem_prime_vmap = nouveau_gem_prime_vmap,
+ .gem_prime_vunmap = nouveau_gem_prime_vunmap,
.gem_init_object = nouveau_gem_object_new,
.gem_free_object = nouveau_gem_object_del,
diff --git a/drivers/gpu/drm/nouveau/nouveau_drm.h b/drivers/gpu/drm/nouveau/nouveau_drm.h
index aa89eb938b47..b25df374c901 100644
--- a/drivers/gpu/drm/nouveau/nouveau_drm.h
+++ b/drivers/gpu/drm/nouveau/nouveau_drm.h
@@ -13,6 +13,7 @@
#define DRIVER_PATCHLEVEL 0
#include <core/client.h>
+#include <core/event.h>
#include <subdev/vm.h>
@@ -112,6 +113,7 @@ struct nouveau_drm {
struct nvbios vbios;
struct nouveau_display *display;
struct backlight_device *backlight;
+ struct nouveau_eventh vblank;
/* power management */
struct nouveau_pm *pm;
diff --git a/drivers/gpu/drm/nouveau/nouveau_encoder.h b/drivers/gpu/drm/nouveau/nouveau_encoder.h
index d0d95bd511ab..e24341229d5e 100644
--- a/drivers/gpu/drm/nouveau/nouveau_encoder.h
+++ b/drivers/gpu/drm/nouveau/nouveau_encoder.h
@@ -36,19 +36,12 @@
struct nouveau_i2c_port;
-struct dp_train_func {
- void (*link_set)(struct drm_device *, struct dcb_output *, int crtc,
- int nr, u32 bw, bool enhframe);
- void (*train_set)(struct drm_device *, struct dcb_output *, u8 pattern);
- void (*train_adj)(struct drm_device *, struct dcb_output *,
- u8 lane, u8 swing, u8 preem);
-};
-
struct nouveau_encoder {
struct drm_encoder_slave base;
struct dcb_output *dcb;
int or;
+ struct nouveau_i2c_port *i2c;
/* different to drm_encoder.crtc, this reflects what's
* actually programmed on the hw, not the proposed crtc */
diff --git a/drivers/gpu/drm/nouveau/nouveau_fbcon.c b/drivers/gpu/drm/nouveau/nouveau_fbcon.c
index 67a1a069de28..b03531781580 100644
--- a/drivers/gpu/drm/nouveau/nouveau_fbcon.c
+++ b/drivers/gpu/drm/nouveau/nouveau_fbcon.c
@@ -251,9 +251,10 @@ nouveau_fbcon_zfill(struct drm_device *dev, struct nouveau_fbdev *fbcon)
}
static int
-nouveau_fbcon_create(struct nouveau_fbdev *fbcon,
+nouveau_fbcon_create(struct drm_fb_helper *helper,
struct drm_fb_helper_surface_size *sizes)
{
+ struct nouveau_fbdev *fbcon = (struct nouveau_fbdev *)helper;
struct drm_device *dev = fbcon->dev;
struct nouveau_drm *drm = nouveau_drm(dev);
struct nouveau_device *device = nv_device(drm->device);
@@ -388,23 +389,6 @@ out:
return ret;
}
-static int
-nouveau_fbcon_find_or_create_single(struct drm_fb_helper *helper,
- struct drm_fb_helper_surface_size *sizes)
-{
- struct nouveau_fbdev *fbcon = (struct nouveau_fbdev *)helper;
- int new_fb = 0;
- int ret;
-
- if (!helper->fb) {
- ret = nouveau_fbcon_create(fbcon, sizes);
- if (ret)
- return ret;
- new_fb = 1;
- }
- return new_fb;
-}
-
void
nouveau_fbcon_output_poll_changed(struct drm_device *dev)
{
@@ -433,6 +417,7 @@ nouveau_fbcon_destroy(struct drm_device *dev, struct nouveau_fbdev *fbcon)
nouveau_fb->nvbo = NULL;
}
drm_fb_helper_fini(&fbcon->helper);
+ drm_framebuffer_unregister_private(&nouveau_fb->base);
drm_framebuffer_cleanup(&nouveau_fb->base);
return 0;
}
@@ -449,7 +434,7 @@ void nouveau_fbcon_gpu_lockup(struct fb_info *info)
static struct drm_fb_helper_funcs nouveau_fbcon_helper_funcs = {
.gamma_set = nouveau_fbcon_gamma_set,
.gamma_get = nouveau_fbcon_gamma_get,
- .fb_probe = nouveau_fbcon_find_or_create_single,
+ .fb_probe = nouveau_fbcon_create,
};
@@ -490,6 +475,9 @@ nouveau_fbcon_init(struct drm_device *dev)
else
preferred_bpp = 32;
+ /* disable all the possible outputs/crtcs before entering KMS mode */
+ drm_helper_disable_unused_functions(dev);
+
drm_fb_helper_initial_config(&fbcon->helper, preferred_bpp);
return 0;
}
diff --git a/drivers/gpu/drm/nouveau/nouveau_fence.c b/drivers/gpu/drm/nouveau/nouveau_fence.c
index 1d049be79f74..6c946837a0aa 100644
--- a/drivers/gpu/drm/nouveau/nouveau_fence.c
+++ b/drivers/gpu/drm/nouveau/nouveau_fence.c
@@ -33,14 +33,14 @@
#include "nouveau_dma.h"
#include "nouveau_fence.h"
+#include <engine/fifo.h>
+
void
nouveau_fence_context_del(struct nouveau_fence_chan *fctx)
{
struct nouveau_fence *fence, *fnext;
spin_lock(&fctx->lock);
list_for_each_entry_safe(fence, fnext, &fctx->pending, head) {
- if (fence->work)
- fence->work(fence->priv, false);
fence->channel = NULL;
list_del(&fence->head);
nouveau_fence_unref(&fence);
@@ -59,17 +59,14 @@ nouveau_fence_context_new(struct nouveau_fence_chan *fctx)
static void
nouveau_fence_update(struct nouveau_channel *chan)
{
- struct nouveau_fence_priv *priv = chan->drm->fence;
struct nouveau_fence_chan *fctx = chan->fence;
struct nouveau_fence *fence, *fnext;
spin_lock(&fctx->lock);
list_for_each_entry_safe(fence, fnext, &fctx->pending, head) {
- if (priv->read(chan) < fence->sequence)
+ if (fctx->read(chan) < fence->sequence)
break;
- if (fence->work)
- fence->work(fence->priv, true);
fence->channel = NULL;
list_del(&fence->head);
nouveau_fence_unref(&fence);
@@ -80,7 +77,6 @@ nouveau_fence_update(struct nouveau_channel *chan)
int
nouveau_fence_emit(struct nouveau_fence *fence, struct nouveau_channel *chan)
{
- struct nouveau_fence_priv *priv = chan->drm->fence;
struct nouveau_fence_chan *fctx = chan->fence;
int ret;
@@ -88,7 +84,7 @@ nouveau_fence_emit(struct nouveau_fence *fence, struct nouveau_channel *chan)
fence->timeout = jiffies + (3 * DRM_HZ);
fence->sequence = ++fctx->sequence;
- ret = priv->emit(fence);
+ ret = fctx->emit(fence);
if (!ret) {
kref_get(&fence->kref);
spin_lock(&fctx->lock);
@@ -107,13 +103,87 @@ nouveau_fence_done(struct nouveau_fence *fence)
return !fence->channel;
}
+struct nouveau_fence_uevent {
+ struct nouveau_eventh handler;
+ struct nouveau_fence_priv *priv;
+};
+
+static int
+nouveau_fence_wait_uevent_handler(struct nouveau_eventh *event, int index)
+{
+ struct nouveau_fence_uevent *uevent =
+ container_of(event, struct nouveau_fence_uevent, handler);
+ wake_up_all(&uevent->priv->waiting);
+ return NVKM_EVENT_KEEP;
+}
+
+static int
+nouveau_fence_wait_uevent(struct nouveau_fence *fence, bool intr)
+
+{
+ struct nouveau_channel *chan = fence->channel;
+ struct nouveau_fifo *pfifo = nouveau_fifo(chan->drm->device);
+ struct nouveau_fence_priv *priv = chan->drm->fence;
+ struct nouveau_fence_uevent uevent = {
+ .handler.func = nouveau_fence_wait_uevent_handler,
+ .priv = priv,
+ };
+ int ret = 0;
+
+ nouveau_event_get(pfifo->uevent, 0, &uevent.handler);
+
+ if (fence->timeout) {
+ unsigned long timeout = fence->timeout - jiffies;
+
+ if (time_before(jiffies, fence->timeout)) {
+ if (intr) {
+ ret = wait_event_interruptible_timeout(
+ priv->waiting,
+ nouveau_fence_done(fence),
+ timeout);
+ } else {
+ ret = wait_event_timeout(priv->waiting,
+ nouveau_fence_done(fence),
+ timeout);
+ }
+ }
+
+ if (ret >= 0) {
+ fence->timeout = jiffies + ret;
+ if (time_after_eq(jiffies, fence->timeout))
+ ret = -EBUSY;
+ }
+ } else {
+ if (intr) {
+ ret = wait_event_interruptible(priv->waiting,
+ nouveau_fence_done(fence));
+ } else {
+ wait_event(priv->waiting, nouveau_fence_done(fence));
+ }
+ }
+
+ nouveau_event_put(pfifo->uevent, 0, &uevent.handler);
+ if (unlikely(ret < 0))
+ return ret;
+
+ return 0;
+}
+
int
nouveau_fence_wait(struct nouveau_fence *fence, bool lazy, bool intr)
{
+ struct nouveau_channel *chan = fence->channel;
+ struct nouveau_fence_priv *priv = chan ? chan->drm->fence : NULL;
unsigned long sleep_time = NSEC_PER_MSEC / 1000;
ktime_t t;
int ret = 0;
+ while (priv && priv->uevent && lazy && !nouveau_fence_done(fence)) {
+ ret = nouveau_fence_wait_uevent(fence, intr);
+ if (ret < 0)
+ return ret;
+ }
+
while (!nouveau_fence_done(fence)) {
if (fence->timeout && time_after_eq(jiffies, fence->timeout)) {
ret = -EBUSY;
@@ -143,14 +213,14 @@ nouveau_fence_wait(struct nouveau_fence *fence, bool lazy, bool intr)
int
nouveau_fence_sync(struct nouveau_fence *fence, struct nouveau_channel *chan)
{
- struct nouveau_fence_priv *priv = chan->drm->fence;
+ struct nouveau_fence_chan *fctx = chan->fence;
struct nouveau_channel *prev;
int ret = 0;
prev = fence ? fence->channel : NULL;
if (prev) {
if (unlikely(prev != chan && !nouveau_fence_done(fence))) {
- ret = priv->sync(fence, prev, chan);
+ ret = fctx->sync(fence, prev, chan);
if (unlikely(ret))
ret = nouveau_fence_wait(fence, true, false);
}
@@ -182,7 +252,8 @@ nouveau_fence_ref(struct nouveau_fence *fence)
}
int
-nouveau_fence_new(struct nouveau_channel *chan, struct nouveau_fence **pfence)
+nouveau_fence_new(struct nouveau_channel *chan, bool sysmem,
+ struct nouveau_fence **pfence)
{
struct nouveau_fence *fence;
int ret = 0;
@@ -193,13 +264,13 @@ nouveau_fence_new(struct nouveau_channel *chan, struct nouveau_fence **pfence)
fence = kzalloc(sizeof(*fence), GFP_KERNEL);
if (!fence)
return -ENOMEM;
+
+ fence->sysmem = sysmem;
kref_init(&fence->kref);
- if (chan) {
- ret = nouveau_fence_emit(fence, chan);
- if (ret)
- nouveau_fence_unref(&fence);
- }
+ ret = nouveau_fence_emit(fence, chan);
+ if (ret)
+ nouveau_fence_unref(&fence);
*pfence = fence;
return ret;
diff --git a/drivers/gpu/drm/nouveau/nouveau_fence.h b/drivers/gpu/drm/nouveau/nouveau_fence.h
index cdb83acdffe2..c89943407b52 100644
--- a/drivers/gpu/drm/nouveau/nouveau_fence.h
+++ b/drivers/gpu/drm/nouveau/nouveau_fence.h
@@ -7,15 +7,15 @@ struct nouveau_fence {
struct list_head head;
struct kref kref;
+ bool sysmem;
+
struct nouveau_channel *channel;
unsigned long timeout;
u32 sequence;
-
- void (*work)(void *priv, bool signalled);
- void *priv;
};
-int nouveau_fence_new(struct nouveau_channel *, struct nouveau_fence **);
+int nouveau_fence_new(struct nouveau_channel *, bool sysmem,
+ struct nouveau_fence **);
struct nouveau_fence *
nouveau_fence_ref(struct nouveau_fence *);
void nouveau_fence_unref(struct nouveau_fence **);
@@ -29,6 +29,13 @@ struct nouveau_fence_chan {
struct list_head pending;
struct list_head flip;
+ int (*emit)(struct nouveau_fence *);
+ int (*sync)(struct nouveau_fence *, struct nouveau_channel *,
+ struct nouveau_channel *);
+ u32 (*read)(struct nouveau_channel *);
+ int (*emit32)(struct nouveau_channel *, u64, u32);
+ int (*sync32)(struct nouveau_channel *, u64, u32);
+
spinlock_t lock;
u32 sequence;
};
@@ -39,10 +46,9 @@ struct nouveau_fence_priv {
void (*resume)(struct nouveau_drm *);
int (*context_new)(struct nouveau_channel *);
void (*context_del)(struct nouveau_channel *);
- int (*emit)(struct nouveau_fence *);
- int (*sync)(struct nouveau_fence *, struct nouveau_channel *,
- struct nouveau_channel *);
- u32 (*read)(struct nouveau_channel *);
+
+ wait_queue_head_t waiting;
+ bool uevent;
};
#define nouveau_fence(drm) ((struct nouveau_fence_priv *)(drm)->fence)
@@ -60,13 +66,31 @@ u32 nv10_fence_read(struct nouveau_channel *);
void nv10_fence_context_del(struct nouveau_channel *);
void nv10_fence_destroy(struct nouveau_drm *);
int nv10_fence_create(struct nouveau_drm *);
+
+int nv17_fence_create(struct nouveau_drm *);
void nv17_fence_resume(struct nouveau_drm *drm);
int nv50_fence_create(struct nouveau_drm *);
int nv84_fence_create(struct nouveau_drm *);
int nvc0_fence_create(struct nouveau_drm *);
-u64 nvc0_fence_crtc(struct nouveau_channel *, int crtc);
int nouveau_flip_complete(void *chan);
+struct nv84_fence_chan {
+ struct nouveau_fence_chan base;
+ struct nouveau_vma vma;
+ struct nouveau_vma vma_gart;
+ struct nouveau_vma dispc_vma[4];
+};
+
+struct nv84_fence_priv {
+ struct nouveau_fence_priv base;
+ struct nouveau_bo *bo;
+ struct nouveau_bo *bo_gart;
+ u32 *suspend;
+};
+
+u64 nv84_fence_crtc(struct nouveau_channel *, int);
+int nv84_fence_context_new(struct nouveau_channel *);
+
#endif
diff --git a/drivers/gpu/drm/nouveau/nouveau_gem.c b/drivers/gpu/drm/nouveau/nouveau_gem.c
index 8bf695c52f95..b4b4d0c1f4af 100644
--- a/drivers/gpu/drm/nouveau/nouveau_gem.c
+++ b/drivers/gpu/drm/nouveau/nouveau_gem.c
@@ -24,8 +24,6 @@
*
*/
-#include <linux/dma-buf.h>
-
#include <subdev/fb.h>
#include "nouveau_drm.h"
@@ -205,6 +203,7 @@ nouveau_gem_ioctl_new(struct drm_device *dev, void *data,
struct drm_file *file_priv)
{
struct nouveau_drm *drm = nouveau_drm(dev);
+ struct nouveau_cli *cli = nouveau_cli(file_priv);
struct nouveau_fb *pfb = nouveau_fb(drm->device);
struct drm_nouveau_gem_new *req = data;
struct nouveau_bo *nvbo = NULL;
@@ -213,7 +212,7 @@ nouveau_gem_ioctl_new(struct drm_device *dev, void *data,
drm->ttm.bdev.dev_mapping = drm->dev->dev_mapping;
if (!pfb->memtype_valid(pfb, req->info.tile_flags)) {
- NV_ERROR(drm, "bad page flags: 0x%08x\n", req->info.tile_flags);
+ NV_ERROR(cli, "bad page flags: 0x%08x\n", req->info.tile_flags);
return -EINVAL;
}
@@ -315,16 +314,18 @@ validate_init(struct nouveau_channel *chan, struct drm_file *file_priv,
struct drm_nouveau_gem_pushbuf_bo *pbbo,
int nr_buffers, struct validate_op *op)
{
+ struct nouveau_cli *cli = nouveau_cli(file_priv);
struct drm_device *dev = chan->drm->dev;
struct nouveau_drm *drm = nouveau_drm(dev);
uint32_t sequence;
int trycnt = 0;
int ret, i;
+ struct nouveau_bo *res_bo = NULL;
sequence = atomic_add_return(1, &drm->ttm.validate_sequence);
retry:
if (++trycnt > 100000) {
- NV_ERROR(drm, "%s failed and gave up.\n", __func__);
+ NV_ERROR(cli, "%s failed and gave up.\n", __func__);
return -EINVAL;
}
@@ -335,14 +336,19 @@ retry:
gem = drm_gem_object_lookup(dev, file_priv, b->handle);
if (!gem) {
- NV_ERROR(drm, "Unknown handle 0x%08x\n", b->handle);
+ NV_ERROR(cli, "Unknown handle 0x%08x\n", b->handle);
validate_fini(op, NULL);
return -ENOENT;
}
nvbo = gem->driver_private;
+ if (nvbo == res_bo) {
+ res_bo = NULL;
+ drm_gem_object_unreference_unlocked(gem);
+ continue;
+ }
if (nvbo->reserved_by && nvbo->reserved_by == file_priv) {
- NV_ERROR(drm, "multiple instances of buffer %d on "
+ NV_ERROR(cli, "multiple instances of buffer %d on "
"validation list\n", b->handle);
drm_gem_object_unreference_unlocked(gem);
validate_fini(op, NULL);
@@ -352,15 +358,19 @@ retry:
ret = ttm_bo_reserve(&nvbo->bo, true, false, true, sequence);
if (ret) {
validate_fini(op, NULL);
- if (unlikely(ret == -EAGAIN))
- ret = ttm_bo_wait_unreserved(&nvbo->bo, true);
- drm_gem_object_unreference_unlocked(gem);
+ if (unlikely(ret == -EAGAIN)) {
+ sequence = atomic_add_return(1, &drm->ttm.validate_sequence);
+ ret = ttm_bo_reserve_slowpath(&nvbo->bo, true,
+ sequence);
+ if (!ret)
+ res_bo = nvbo;
+ }
if (unlikely(ret)) {
+ drm_gem_object_unreference_unlocked(gem);
if (ret != -ERESTARTSYS)
- NV_ERROR(drm, "fail reserve\n");
+ NV_ERROR(cli, "fail reserve\n");
return ret;
}
- goto retry;
}
b->user_priv = (uint64_t)(unsigned long)nvbo;
@@ -376,12 +386,14 @@ retry:
if (b->valid_domains & NOUVEAU_GEM_DOMAIN_GART)
list_add_tail(&nvbo->entry, &op->gart_list);
else {
- NV_ERROR(drm, "invalid valid domains: 0x%08x\n",
+ NV_ERROR(cli, "invalid valid domains: 0x%08x\n",
b->valid_domains);
list_add_tail(&nvbo->entry, &op->both_list);
validate_fini(op, NULL);
return -EINVAL;
}
+ if (nvbo == res_bo)
+ goto retry;
}
return 0;
@@ -407,8 +419,9 @@ validate_sync(struct nouveau_channel *chan, struct nouveau_bo *nvbo)
}
static int
-validate_list(struct nouveau_channel *chan, struct list_head *list,
- struct drm_nouveau_gem_pushbuf_bo *pbbo, uint64_t user_pbbo_ptr)
+validate_list(struct nouveau_channel *chan, struct nouveau_cli *cli,
+ struct list_head *list, struct drm_nouveau_gem_pushbuf_bo *pbbo,
+ uint64_t user_pbbo_ptr)
{
struct nouveau_drm *drm = chan->drm;
struct drm_nouveau_gem_pushbuf_bo __user *upbbo =
@@ -421,7 +434,7 @@ validate_list(struct nouveau_channel *chan, struct list_head *list,
ret = validate_sync(chan, nvbo);
if (unlikely(ret)) {
- NV_ERROR(drm, "fail pre-validate sync\n");
+ NV_ERROR(cli, "fail pre-validate sync\n");
return ret;
}
@@ -429,20 +442,20 @@ validate_list(struct nouveau_channel *chan, struct list_head *list,
b->write_domains,
b->valid_domains);
if (unlikely(ret)) {
- NV_ERROR(drm, "fail set_domain\n");
+ NV_ERROR(cli, "fail set_domain\n");
return ret;
}
ret = nouveau_bo_validate(nvbo, true, false);
if (unlikely(ret)) {
if (ret != -ERESTARTSYS)
- NV_ERROR(drm, "fail ttm_validate\n");
+ NV_ERROR(cli, "fail ttm_validate\n");
return ret;
}
ret = validate_sync(chan, nvbo);
if (unlikely(ret)) {
- NV_ERROR(drm, "fail post-validate sync\n");
+ NV_ERROR(cli, "fail post-validate sync\n");
return ret;
}
@@ -478,7 +491,7 @@ nouveau_gem_pushbuf_validate(struct nouveau_channel *chan,
uint64_t user_buffers, int nr_buffers,
struct validate_op *op, int *apply_relocs)
{
- struct nouveau_drm *drm = chan->drm;
+ struct nouveau_cli *cli = nouveau_cli(file_priv);
int ret, relocs = 0;
INIT_LIST_HEAD(&op->vram_list);
@@ -491,32 +504,32 @@ nouveau_gem_pushbuf_validate(struct nouveau_channel *chan,
ret = validate_init(chan, file_priv, pbbo, nr_buffers, op);
if (unlikely(ret)) {
if (ret != -ERESTARTSYS)
- NV_ERROR(drm, "validate_init\n");
+ NV_ERROR(cli, "validate_init\n");
return ret;
}
- ret = validate_list(chan, &op->vram_list, pbbo, user_buffers);
+ ret = validate_list(chan, cli, &op->vram_list, pbbo, user_buffers);
if (unlikely(ret < 0)) {
if (ret != -ERESTARTSYS)
- NV_ERROR(drm, "validate vram_list\n");
+ NV_ERROR(cli, "validate vram_list\n");
validate_fini(op, NULL);
return ret;
}
relocs += ret;
- ret = validate_list(chan, &op->gart_list, pbbo, user_buffers);
+ ret = validate_list(chan, cli, &op->gart_list, pbbo, user_buffers);
if (unlikely(ret < 0)) {
if (ret != -ERESTARTSYS)
- NV_ERROR(drm, "validate gart_list\n");
+ NV_ERROR(cli, "validate gart_list\n");
validate_fini(op, NULL);
return ret;
}
relocs += ret;
- ret = validate_list(chan, &op->both_list, pbbo, user_buffers);
+ ret = validate_list(chan, cli, &op->both_list, pbbo, user_buffers);
if (unlikely(ret < 0)) {
if (ret != -ERESTARTSYS)
- NV_ERROR(drm, "validate both_list\n");
+ NV_ERROR(cli, "validate both_list\n");
validate_fini(op, NULL);
return ret;
}
@@ -545,11 +558,10 @@ u_memcpya(uint64_t user, unsigned nmemb, unsigned size)
}
static int
-nouveau_gem_pushbuf_reloc_apply(struct drm_device *dev,
+nouveau_gem_pushbuf_reloc_apply(struct nouveau_cli *cli,
struct drm_nouveau_gem_pushbuf *req,
struct drm_nouveau_gem_pushbuf_bo *bo)
{
- struct nouveau_drm *drm = nouveau_drm(dev);
struct drm_nouveau_gem_pushbuf_reloc *reloc = NULL;
int ret = 0;
unsigned i;
@@ -565,7 +577,7 @@ nouveau_gem_pushbuf_reloc_apply(struct drm_device *dev,
uint32_t data;
if (unlikely(r->bo_index > req->nr_buffers)) {
- NV_ERROR(drm, "reloc bo index invalid\n");
+ NV_ERROR(cli, "reloc bo index invalid\n");
ret = -EINVAL;
break;
}
@@ -575,7 +587,7 @@ nouveau_gem_pushbuf_reloc_apply(struct drm_device *dev,
continue;
if (unlikely(r->reloc_bo_index > req->nr_buffers)) {
- NV_ERROR(drm, "reloc container bo index invalid\n");
+ NV_ERROR(cli, "reloc container bo index invalid\n");
ret = -EINVAL;
break;
}
@@ -583,7 +595,7 @@ nouveau_gem_pushbuf_reloc_apply(struct drm_device *dev,
if (unlikely(r->reloc_bo_offset + 4 >
nvbo->bo.mem.num_pages << PAGE_SHIFT)) {
- NV_ERROR(drm, "reloc outside of bo\n");
+ NV_ERROR(cli, "reloc outside of bo\n");
ret = -EINVAL;
break;
}
@@ -592,7 +604,7 @@ nouveau_gem_pushbuf_reloc_apply(struct drm_device *dev,
ret = ttm_bo_kmap(&nvbo->bo, 0, nvbo->bo.mem.num_pages,
&nvbo->kmap);
if (ret) {
- NV_ERROR(drm, "failed kmap for reloc\n");
+ NV_ERROR(cli, "failed kmap for reloc\n");
break;
}
nvbo->validate_mapped = true;
@@ -617,7 +629,7 @@ nouveau_gem_pushbuf_reloc_apply(struct drm_device *dev,
ret = ttm_bo_wait(&nvbo->bo, false, false, false);
spin_unlock(&nvbo->bo.bdev->fence_lock);
if (ret) {
- NV_ERROR(drm, "reloc wait_idle failed: %d\n", ret);
+ NV_ERROR(cli, "reloc wait_idle failed: %d\n", ret);
break;
}
@@ -633,6 +645,7 @@ nouveau_gem_ioctl_pushbuf(struct drm_device *dev, void *data,
struct drm_file *file_priv)
{
struct nouveau_abi16 *abi16 = nouveau_abi16_get(file_priv, dev);
+ struct nouveau_cli *cli = nouveau_cli(file_priv);
struct nouveau_abi16_chan *temp;
struct nouveau_drm *drm = nouveau_drm(dev);
struct drm_nouveau_gem_pushbuf *req = data;
@@ -662,19 +675,19 @@ nouveau_gem_ioctl_pushbuf(struct drm_device *dev, void *data,
goto out_next;
if (unlikely(req->nr_push > NOUVEAU_GEM_MAX_PUSH)) {
- NV_ERROR(drm, "pushbuf push count exceeds limit: %d max %d\n",
+ NV_ERROR(cli, "pushbuf push count exceeds limit: %d max %d\n",
req->nr_push, NOUVEAU_GEM_MAX_PUSH);
return nouveau_abi16_put(abi16, -EINVAL);
}
if (unlikely(req->nr_buffers > NOUVEAU_GEM_MAX_BUFFERS)) {
- NV_ERROR(drm, "pushbuf bo count exceeds limit: %d max %d\n",
+ NV_ERROR(cli, "pushbuf bo count exceeds limit: %d max %d\n",
req->nr_buffers, NOUVEAU_GEM_MAX_BUFFERS);
return nouveau_abi16_put(abi16, -EINVAL);
}
if (unlikely(req->nr_relocs > NOUVEAU_GEM_MAX_RELOCS)) {
- NV_ERROR(drm, "pushbuf reloc count exceeds limit: %d max %d\n",
+ NV_ERROR(cli, "pushbuf reloc count exceeds limit: %d max %d\n",
req->nr_relocs, NOUVEAU_GEM_MAX_RELOCS);
return nouveau_abi16_put(abi16, -EINVAL);
}
@@ -692,7 +705,7 @@ nouveau_gem_ioctl_pushbuf(struct drm_device *dev, void *data,
/* Ensure all push buffers are on validate list */
for (i = 0; i < req->nr_push; i++) {
if (push[i].bo_index >= req->nr_buffers) {
- NV_ERROR(drm, "push %d buffer not in list\n", i);
+ NV_ERROR(cli, "push %d buffer not in list\n", i);
ret = -EINVAL;
goto out_prevalid;
}
@@ -703,15 +716,15 @@ nouveau_gem_ioctl_pushbuf(struct drm_device *dev, void *data,
req->nr_buffers, &op, &do_reloc);
if (ret) {
if (ret != -ERESTARTSYS)
- NV_ERROR(drm, "validate: %d\n", ret);
+ NV_ERROR(cli, "validate: %d\n", ret);
goto out_prevalid;
}
/* Apply any relocations that are required */
if (do_reloc) {
- ret = nouveau_gem_pushbuf_reloc_apply(dev, req, bo);
+ ret = nouveau_gem_pushbuf_reloc_apply(cli, req, bo);
if (ret) {
- NV_ERROR(drm, "reloc apply: %d\n", ret);
+ NV_ERROR(cli, "reloc apply: %d\n", ret);
goto out;
}
}
@@ -719,7 +732,7 @@ nouveau_gem_ioctl_pushbuf(struct drm_device *dev, void *data,
if (chan->dma.ib_max) {
ret = nouveau_dma_wait(chan, req->nr_push + 1, 16);
if (ret) {
- NV_ERROR(drm, "nv50cal_space: %d\n", ret);
+ NV_ERROR(cli, "nv50cal_space: %d\n", ret);
goto out;
}
@@ -734,7 +747,7 @@ nouveau_gem_ioctl_pushbuf(struct drm_device *dev, void *data,
if (nv_device(drm->device)->chipset >= 0x25) {
ret = RING_SPACE(chan, req->nr_push * 2);
if (ret) {
- NV_ERROR(drm, "cal_space: %d\n", ret);
+ NV_ERROR(cli, "cal_space: %d\n", ret);
goto out;
}
@@ -748,7 +761,7 @@ nouveau_gem_ioctl_pushbuf(struct drm_device *dev, void *data,
} else {
ret = RING_SPACE(chan, req->nr_push * (2 + NOUVEAU_DMA_SKIPS));
if (ret) {
- NV_ERROR(drm, "jmp_space: %d\n", ret);
+ NV_ERROR(cli, "jmp_space: %d\n", ret);
goto out;
}
@@ -784,9 +797,9 @@ nouveau_gem_ioctl_pushbuf(struct drm_device *dev, void *data,
}
}
- ret = nouveau_fence_new(chan, &fence);
+ ret = nouveau_fence_new(chan, false, &fence);
if (ret) {
- NV_ERROR(drm, "error fencing pushbuf: %d\n", ret);
+ NV_ERROR(cli, "error fencing pushbuf: %d\n", ret);
WIND_RING(chan);
goto out;
}
diff --git a/drivers/gpu/drm/nouveau/nouveau_gem.h b/drivers/gpu/drm/nouveau/nouveau_gem.h
index 5c1049236d22..8d7a3f0aeb86 100644
--- a/drivers/gpu/drm/nouveau/nouveau_gem.h
+++ b/drivers/gpu/drm/nouveau/nouveau_gem.h
@@ -35,9 +35,11 @@ extern int nouveau_gem_ioctl_cpu_fini(struct drm_device *, void *,
extern int nouveau_gem_ioctl_info(struct drm_device *, void *,
struct drm_file *);
-extern struct dma_buf *nouveau_gem_prime_export(struct drm_device *dev,
- struct drm_gem_object *obj, int flags);
-extern struct drm_gem_object *nouveau_gem_prime_import(struct drm_device *dev,
- struct dma_buf *dma_buf);
+extern int nouveau_gem_prime_pin(struct drm_gem_object *);
+extern struct sg_table *nouveau_gem_prime_get_sg_table(struct drm_gem_object *);
+extern struct drm_gem_object *nouveau_gem_prime_import_sg_table(
+ struct drm_device *, size_t size, struct sg_table *);
+extern void *nouveau_gem_prime_vmap(struct drm_gem_object *);
+extern void nouveau_gem_prime_vunmap(struct drm_gem_object *, void *);
#endif
diff --git a/drivers/gpu/drm/nouveau/nouveau_pm.c b/drivers/gpu/drm/nouveau/nouveau_pm.c
index a701ff5ffa5b..bb54098c6d97 100644
--- a/drivers/gpu/drm/nouveau/nouveau_pm.c
+++ b/drivers/gpu/drm/nouveau/nouveau_pm.c
@@ -409,6 +409,81 @@ static SENSOR_DEVICE_ATTR(temp1_input, S_IRUGO, nouveau_hwmon_show_temp,
NULL, 0);
static ssize_t
+nouveau_hwmon_show_temp1_auto_point1_pwm(struct device *d,
+ struct device_attribute *a, char *buf)
+{
+ return snprintf(buf, PAGE_SIZE, "%d\n", 100);
+}
+static SENSOR_DEVICE_ATTR(temp1_auto_point1_pwm, S_IRUGO,
+ nouveau_hwmon_show_temp1_auto_point1_pwm, NULL, 0);
+
+static ssize_t
+nouveau_hwmon_temp1_auto_point1_temp(struct device *d,
+ struct device_attribute *a, char *buf)
+{
+ struct drm_device *dev = dev_get_drvdata(d);
+ struct nouveau_drm *drm = nouveau_drm(dev);
+ struct nouveau_therm *therm = nouveau_therm(drm->device);
+
+ return snprintf(buf, PAGE_SIZE, "%d\n",
+ therm->attr_get(therm, NOUVEAU_THERM_ATTR_THRS_FAN_BOOST) * 1000);
+}
+static ssize_t
+nouveau_hwmon_set_temp1_auto_point1_temp(struct device *d,
+ struct device_attribute *a,
+ const char *buf, size_t count)
+{
+ struct drm_device *dev = dev_get_drvdata(d);
+ struct nouveau_drm *drm = nouveau_drm(dev);
+ struct nouveau_therm *therm = nouveau_therm(drm->device);
+ long value;
+
+ if (kstrtol(buf, 10, &value) == -EINVAL)
+ return count;
+
+ therm->attr_set(therm, NOUVEAU_THERM_ATTR_THRS_FAN_BOOST,
+ value / 1000);
+
+ return count;
+}
+static SENSOR_DEVICE_ATTR(temp1_auto_point1_temp, S_IRUGO | S_IWUSR,
+ nouveau_hwmon_temp1_auto_point1_temp,
+ nouveau_hwmon_set_temp1_auto_point1_temp, 0);
+
+static ssize_t
+nouveau_hwmon_temp1_auto_point1_temp_hyst(struct device *d,
+ struct device_attribute *a, char *buf)
+{
+ struct drm_device *dev = dev_get_drvdata(d);
+ struct nouveau_drm *drm = nouveau_drm(dev);
+ struct nouveau_therm *therm = nouveau_therm(drm->device);
+
+ return snprintf(buf, PAGE_SIZE, "%d\n",
+ therm->attr_get(therm, NOUVEAU_THERM_ATTR_THRS_FAN_BOOST_HYST) * 1000);
+}
+static ssize_t
+nouveau_hwmon_set_temp1_auto_point1_temp_hyst(struct device *d,
+ struct device_attribute *a,
+ const char *buf, size_t count)
+{
+ struct drm_device *dev = dev_get_drvdata(d);
+ struct nouveau_drm *drm = nouveau_drm(dev);
+ struct nouveau_therm *therm = nouveau_therm(drm->device);
+ long value;
+
+ if (kstrtol(buf, 10, &value) == -EINVAL)
+ return count;
+
+ therm->attr_set(therm, NOUVEAU_THERM_ATTR_THRS_FAN_BOOST_HYST,
+ value / 1000);
+
+ return count;
+}
+static SENSOR_DEVICE_ATTR(temp1_auto_point1_temp_hyst, S_IRUGO | S_IWUSR,
+ nouveau_hwmon_temp1_auto_point1_temp_hyst,
+ nouveau_hwmon_set_temp1_auto_point1_temp_hyst, 0);
+
+static ssize_t
nouveau_hwmon_max_temp(struct device *d, struct device_attribute *a, char *buf)
{
struct drm_device *dev = dev_get_drvdata(d);
@@ -439,6 +514,38 @@ static SENSOR_DEVICE_ATTR(temp1_max, S_IRUGO | S_IWUSR, nouveau_hwmon_max_temp,
0);
static ssize_t
+nouveau_hwmon_max_temp_hyst(struct device *d, struct device_attribute *a,
+ char *buf)
+{
+ struct drm_device *dev = dev_get_drvdata(d);
+ struct nouveau_drm *drm = nouveau_drm(dev);
+ struct nouveau_therm *therm = nouveau_therm(drm->device);
+
+ return snprintf(buf, PAGE_SIZE, "%d\n",
+ therm->attr_get(therm, NOUVEAU_THERM_ATTR_THRS_DOWN_CLK_HYST) * 1000);
+}
+static ssize_t
+nouveau_hwmon_set_max_temp_hyst(struct device *d, struct device_attribute *a,
+ const char *buf, size_t count)
+{
+ struct drm_device *dev = dev_get_drvdata(d);
+ struct nouveau_drm *drm = nouveau_drm(dev);
+ struct nouveau_therm *therm = nouveau_therm(drm->device);
+ long value;
+
+ if (kstrtol(buf, 10, &value) == -EINVAL)
+ return count;
+
+ therm->attr_set(therm, NOUVEAU_THERM_ATTR_THRS_DOWN_CLK_HYST,
+ value / 1000);
+
+ return count;
+}
+static SENSOR_DEVICE_ATTR(temp1_max_hyst, S_IRUGO | S_IWUSR,
+ nouveau_hwmon_max_temp_hyst,
+ nouveau_hwmon_set_max_temp_hyst, 0);
+
+static ssize_t
nouveau_hwmon_critical_temp(struct device *d, struct device_attribute *a,
char *buf)
{
@@ -471,6 +578,107 @@ static SENSOR_DEVICE_ATTR(temp1_crit, S_IRUGO | S_IWUSR,
nouveau_hwmon_set_critical_temp,
0);
+static ssize_t
+nouveau_hwmon_critical_temp_hyst(struct device *d, struct device_attribute *a,
+ char *buf)
+{
+ struct drm_device *dev = dev_get_drvdata(d);
+ struct nouveau_drm *drm = nouveau_drm(dev);
+ struct nouveau_therm *therm = nouveau_therm(drm->device);
+
+ return snprintf(buf, PAGE_SIZE, "%d\n",
+ therm->attr_get(therm, NOUVEAU_THERM_ATTR_THRS_CRITICAL_HYST) * 1000);
+}
+static ssize_t
+nouveau_hwmon_set_critical_temp_hyst(struct device *d,
+ struct device_attribute *a,
+ const char *buf,
+ size_t count)
+{
+ struct drm_device *dev = dev_get_drvdata(d);
+ struct nouveau_drm *drm = nouveau_drm(dev);
+ struct nouveau_therm *therm = nouveau_therm(drm->device);
+ long value;
+
+ if (kstrtol(buf, 10, &value) == -EINVAL)
+ return count;
+
+ therm->attr_set(therm, NOUVEAU_THERM_ATTR_THRS_CRITICAL_HYST,
+ value / 1000);
+
+ return count;
+}
+static SENSOR_DEVICE_ATTR(temp1_crit_hyst, S_IRUGO | S_IWUSR,
+ nouveau_hwmon_critical_temp_hyst,
+ nouveau_hwmon_set_critical_temp_hyst, 0);
+static ssize_t
+nouveau_hwmon_emergency_temp(struct device *d, struct device_attribute *a,
+ char *buf)
+{
+ struct drm_device *dev = dev_get_drvdata(d);
+ struct nouveau_drm *drm = nouveau_drm(dev);
+ struct nouveau_therm *therm = nouveau_therm(drm->device);
+
+ return snprintf(buf, PAGE_SIZE, "%d\n",
+ therm->attr_get(therm, NOUVEAU_THERM_ATTR_THRS_SHUTDOWN) * 1000);
+}
+static ssize_t
+nouveau_hwmon_set_emergency_temp(struct device *d, struct device_attribute *a,
+ const char *buf,
+ size_t count)
+{
+ struct drm_device *dev = dev_get_drvdata(d);
+ struct nouveau_drm *drm = nouveau_drm(dev);
+ struct nouveau_therm *therm = nouveau_therm(drm->device);
+ long value;
+
+ if (kstrtol(buf, 10, &value) == -EINVAL)
+ return count;
+
+ therm->attr_set(therm, NOUVEAU_THERM_ATTR_THRS_SHUTDOWN, value / 1000);
+
+ return count;
+}
+static SENSOR_DEVICE_ATTR(temp1_emergency, S_IRUGO | S_IWUSR,
+ nouveau_hwmon_emergency_temp,
+ nouveau_hwmon_set_emergency_temp,
+ 0);
+
+static ssize_t
+nouveau_hwmon_emergency_temp_hyst(struct device *d, struct device_attribute *a,
+ char *buf)
+{
+ struct drm_device *dev = dev_get_drvdata(d);
+ struct nouveau_drm *drm = nouveau_drm(dev);
+ struct nouveau_therm *therm = nouveau_therm(drm->device);
+
+ return snprintf(buf, PAGE_SIZE, "%d\n",
+ therm->attr_get(therm, NOUVEAU_THERM_ATTR_THRS_SHUTDOWN_HYST) * 1000);
+}
+static ssize_t
+nouveau_hwmon_set_emergency_temp_hyst(struct device *d,
+ struct device_attribute *a,
+ const char *buf,
+ size_t count)
+{
+ struct drm_device *dev = dev_get_drvdata(d);
+ struct nouveau_drm *drm = nouveau_drm(dev);
+ struct nouveau_therm *therm = nouveau_therm(drm->device);
+ long value;
+
+ if (kstrtol(buf, 10, &value) == -EINVAL)
+ return count;
+
+ therm->attr_set(therm, NOUVEAU_THERM_ATTR_THRS_SHUTDOWN_HYST,
+ value / 1000);
+
+ return count;
+}
+static SENSOR_DEVICE_ATTR(temp1_emergency_hyst, S_IRUGO | S_IWUSR,
+ nouveau_hwmon_emergency_temp_hyst,
+ nouveau_hwmon_set_emergency_temp_hyst,
+ 0);
+
static ssize_t nouveau_hwmon_show_name(struct device *dev,
struct device_attribute *attr,
char *buf)
@@ -490,7 +698,7 @@ static SENSOR_DEVICE_ATTR(update_rate, S_IRUGO,
NULL, 0);
static ssize_t
-nouveau_hwmon_show_fan0_input(struct device *d, struct device_attribute *attr,
+nouveau_hwmon_show_fan1_input(struct device *d, struct device_attribute *attr,
char *buf)
{
struct drm_device *dev = dev_get_drvdata(d);
@@ -499,7 +707,7 @@ nouveau_hwmon_show_fan0_input(struct device *d, struct device_attribute *attr,
return snprintf(buf, PAGE_SIZE, "%d\n", therm->fan_sense(therm));
}
-static SENSOR_DEVICE_ATTR(fan0_input, S_IRUGO, nouveau_hwmon_show_fan0_input,
+static SENSOR_DEVICE_ATTR(fan1_input, S_IRUGO, nouveau_hwmon_show_fan1_input,
NULL, 0);
static ssize_t
@@ -665,14 +873,21 @@ static SENSOR_DEVICE_ATTR(pwm1_max, S_IRUGO | S_IWUSR,
static struct attribute *hwmon_attributes[] = {
&sensor_dev_attr_temp1_input.dev_attr.attr,
+ &sensor_dev_attr_temp1_auto_point1_pwm.dev_attr.attr,
+ &sensor_dev_attr_temp1_auto_point1_temp.dev_attr.attr,
+ &sensor_dev_attr_temp1_auto_point1_temp_hyst.dev_attr.attr,
&sensor_dev_attr_temp1_max.dev_attr.attr,
+ &sensor_dev_attr_temp1_max_hyst.dev_attr.attr,
&sensor_dev_attr_temp1_crit.dev_attr.attr,
+ &sensor_dev_attr_temp1_crit_hyst.dev_attr.attr,
+ &sensor_dev_attr_temp1_emergency.dev_attr.attr,
+ &sensor_dev_attr_temp1_emergency_hyst.dev_attr.attr,
&sensor_dev_attr_name.dev_attr.attr,
&sensor_dev_attr_update_rate.dev_attr.attr,
NULL
};
static struct attribute *hwmon_fan_rpm_attributes[] = {
- &sensor_dev_attr_fan0_input.dev_attr.attr,
+ &sensor_dev_attr_fan1_input.dev_attr.attr,
NULL
};
static struct attribute *hwmon_pwm_fan_attributes[] = {
@@ -717,7 +932,7 @@ nouveau_hwmon_init(struct drm_device *dev)
dev_set_drvdata(hwmon_dev, dev);
/* default sysfs entries */
- ret = sysfs_create_group(&dev->pdev->dev.kobj, &hwmon_attrgroup);
+ ret = sysfs_create_group(&hwmon_dev->kobj, &hwmon_attrgroup);
if (ret) {
if (ret)
goto error;
@@ -728,7 +943,7 @@ nouveau_hwmon_init(struct drm_device *dev)
* the gpio entries for pwm fan control even when there's no
* actual fan connected to it... therm table? */
if (therm->fan_get && therm->fan_get(therm) >= 0) {
- ret = sysfs_create_group(&dev->pdev->dev.kobj,
+ ret = sysfs_create_group(&hwmon_dev->kobj,
&hwmon_pwm_fan_attrgroup);
if (ret)
goto error;
@@ -736,7 +951,7 @@ nouveau_hwmon_init(struct drm_device *dev)
/* if the card can read the fan rpm */
if (therm->fan_sense(therm) >= 0) {
- ret = sysfs_create_group(&dev->pdev->dev.kobj,
+ ret = sysfs_create_group(&hwmon_dev->kobj,
&hwmon_fan_rpm_attrgroup);
if (ret)
goto error;
@@ -764,10 +979,10 @@ nouveau_hwmon_fini(struct drm_device *dev)
struct nouveau_pm *pm = nouveau_pm(dev);
if (pm->hwmon) {
- sysfs_remove_group(&dev->pdev->dev.kobj, &hwmon_attrgroup);
- sysfs_remove_group(&dev->pdev->dev.kobj,
+ sysfs_remove_group(&pm->hwmon->kobj, &hwmon_attrgroup);
+ sysfs_remove_group(&pm->hwmon->kobj,
&hwmon_pwm_fan_attrgroup);
- sysfs_remove_group(&dev->pdev->dev.kobj,
+ sysfs_remove_group(&pm->hwmon->kobj,
&hwmon_fan_rpm_attrgroup);
hwmon_device_unregister(pm->hwmon);
diff --git a/drivers/gpu/drm/nouveau/nouveau_prime.c b/drivers/gpu/drm/nouveau/nouveau_prime.c
index b8e05ae38212..f53e10874cae 100644
--- a/drivers/gpu/drm/nouveau/nouveau_prime.c
+++ b/drivers/gpu/drm/nouveau/nouveau_prime.c
@@ -22,126 +22,42 @@
* Authors: Dave Airlie
*/
-#include <linux/dma-buf.h>
-
#include <drm/drmP.h>
#include "nouveau_drm.h"
#include "nouveau_gem.h"
-static struct sg_table *nouveau_gem_map_dma_buf(struct dma_buf_attachment *attachment,
- enum dma_data_direction dir)
+struct sg_table *nouveau_gem_prime_get_sg_table(struct drm_gem_object *obj)
{
- struct nouveau_bo *nvbo = attachment->dmabuf->priv;
- struct drm_device *dev = nvbo->gem->dev;
+ struct nouveau_bo *nvbo = nouveau_gem_object(obj);
int npages = nvbo->bo.num_pages;
- struct sg_table *sg;
- int nents;
-
- mutex_lock(&dev->struct_mutex);
- sg = drm_prime_pages_to_sg(nvbo->bo.ttm->pages, npages);
- nents = dma_map_sg(attachment->dev, sg->sgl, sg->nents, dir);
- mutex_unlock(&dev->struct_mutex);
- return sg;
-}
-
-static void nouveau_gem_unmap_dma_buf(struct dma_buf_attachment *attachment,
- struct sg_table *sg, enum dma_data_direction dir)
-{
- dma_unmap_sg(attachment->dev, sg->sgl, sg->nents, dir);
- sg_free_table(sg);
- kfree(sg);
-}
-
-static void nouveau_gem_dmabuf_release(struct dma_buf *dma_buf)
-{
- struct nouveau_bo *nvbo = dma_buf->priv;
-
- if (nvbo->gem->export_dma_buf == dma_buf) {
- nvbo->gem->export_dma_buf = NULL;
- drm_gem_object_unreference_unlocked(nvbo->gem);
- }
-}
-
-static void *nouveau_gem_kmap_atomic(struct dma_buf *dma_buf, unsigned long page_num)
-{
- return NULL;
-}
-
-static void nouveau_gem_kunmap_atomic(struct dma_buf *dma_buf, unsigned long page_num, void *addr)
-{
-}
-static void *nouveau_gem_kmap(struct dma_buf *dma_buf, unsigned long page_num)
-{
- return NULL;
+ return drm_prime_pages_to_sg(nvbo->bo.ttm->pages, npages);
}
-static void nouveau_gem_kunmap(struct dma_buf *dma_buf, unsigned long page_num, void *addr)
+void *nouveau_gem_prime_vmap(struct drm_gem_object *obj)
{
-
-}
-
-static int nouveau_gem_prime_mmap(struct dma_buf *dma_buf, struct vm_area_struct *vma)
-{
- return -EINVAL;
-}
-
-static void *nouveau_gem_prime_vmap(struct dma_buf *dma_buf)
-{
- struct nouveau_bo *nvbo = dma_buf->priv;
- struct drm_device *dev = nvbo->gem->dev;
+ struct nouveau_bo *nvbo = nouveau_gem_object(obj);
int ret;
- mutex_lock(&dev->struct_mutex);
- if (nvbo->vmapping_count) {
- nvbo->vmapping_count++;
- goto out_unlock;
- }
-
ret = ttm_bo_kmap(&nvbo->bo, 0, nvbo->bo.num_pages,
&nvbo->dma_buf_vmap);
- if (ret) {
- mutex_unlock(&dev->struct_mutex);
+ if (ret)
return ERR_PTR(ret);
- }
- nvbo->vmapping_count = 1;
-out_unlock:
- mutex_unlock(&dev->struct_mutex);
+
return nvbo->dma_buf_vmap.virtual;
}
-static void nouveau_gem_prime_vunmap(struct dma_buf *dma_buf, void *vaddr)
+void nouveau_gem_prime_vunmap(struct drm_gem_object *obj, void *vaddr)
{
- struct nouveau_bo *nvbo = dma_buf->priv;
- struct drm_device *dev = nvbo->gem->dev;
+ struct nouveau_bo *nvbo = nouveau_gem_object(obj);
- mutex_lock(&dev->struct_mutex);
- nvbo->vmapping_count--;
- if (nvbo->vmapping_count == 0) {
- ttm_bo_kunmap(&nvbo->dma_buf_vmap);
- }
- mutex_unlock(&dev->struct_mutex);
+ ttm_bo_kunmap(&nvbo->dma_buf_vmap);
}
-static const struct dma_buf_ops nouveau_dmabuf_ops = {
- .map_dma_buf = nouveau_gem_map_dma_buf,
- .unmap_dma_buf = nouveau_gem_unmap_dma_buf,
- .release = nouveau_gem_dmabuf_release,
- .kmap = nouveau_gem_kmap,
- .kmap_atomic = nouveau_gem_kmap_atomic,
- .kunmap = nouveau_gem_kunmap,
- .kunmap_atomic = nouveau_gem_kunmap_atomic,
- .mmap = nouveau_gem_prime_mmap,
- .vmap = nouveau_gem_prime_vmap,
- .vunmap = nouveau_gem_prime_vunmap,
-};
-
-static int
-nouveau_prime_new(struct drm_device *dev,
- size_t size,
- struct sg_table *sg,
- struct nouveau_bo **pnvbo)
+struct drm_gem_object *nouveau_gem_prime_import_sg_table(struct drm_device *dev,
+ size_t size,
+ struct sg_table *sg)
{
struct nouveau_bo *nvbo;
u32 flags = 0;
@@ -150,24 +66,22 @@ nouveau_prime_new(struct drm_device *dev,
flags = TTM_PL_FLAG_TT;
ret = nouveau_bo_new(dev, size, 0, flags, 0, 0,
- sg, pnvbo);
+ sg, &nvbo);
if (ret)
- return ret;
- nvbo = *pnvbo;
+ return ERR_PTR(ret);
nvbo->valid_domains = NOUVEAU_GEM_DOMAIN_GART;
nvbo->gem = drm_gem_object_alloc(dev, nvbo->bo.mem.size);
if (!nvbo->gem) {
- nouveau_bo_ref(NULL, pnvbo);
- return -ENOMEM;
+ nouveau_bo_ref(NULL, &nvbo);
+ return ERR_PTR(-ENOMEM);
}
nvbo->gem->driver_private = nvbo;
- return 0;
+ return nvbo->gem;
}
-struct dma_buf *nouveau_gem_prime_export(struct drm_device *dev,
- struct drm_gem_object *obj, int flags)
+int nouveau_gem_prime_pin(struct drm_gem_object *obj)
{
struct nouveau_bo *nvbo = nouveau_gem_object(obj);
int ret = 0;
@@ -175,52 +89,7 @@ struct dma_buf *nouveau_gem_prime_export(struct drm_device *dev,
/* pin buffer into GTT */
ret = nouveau_bo_pin(nvbo, TTM_PL_FLAG_TT);
if (ret)
- return ERR_PTR(-EINVAL);
-
- return dma_buf_export(nvbo, &nouveau_dmabuf_ops, obj->size, flags);
-}
-
-struct drm_gem_object *nouveau_gem_prime_import(struct drm_device *dev,
- struct dma_buf *dma_buf)
-{
- struct dma_buf_attachment *attach;
- struct sg_table *sg;
- struct nouveau_bo *nvbo;
- int ret;
-
- if (dma_buf->ops == &nouveau_dmabuf_ops) {
- nvbo = dma_buf->priv;
- if (nvbo->gem) {
- if (nvbo->gem->dev == dev) {
- drm_gem_object_reference(nvbo->gem);
- dma_buf_put(dma_buf);
- return nvbo->gem;
- }
- }
- }
- /* need to attach */
- attach = dma_buf_attach(dma_buf, dev->dev);
- if (IS_ERR(attach))
- return ERR_PTR(PTR_ERR(attach));
-
- sg = dma_buf_map_attachment(attach, DMA_BIDIRECTIONAL);
- if (IS_ERR(sg)) {
- ret = PTR_ERR(sg);
- goto fail_detach;
- }
-
- ret = nouveau_prime_new(dev, dma_buf->size, sg, &nvbo);
- if (ret)
- goto fail_unmap;
-
- nvbo->gem->import_attach = attach;
-
- return nvbo->gem;
+ return -EINVAL;
-fail_unmap:
- dma_buf_unmap_attachment(attach, sg, DMA_BIDIRECTIONAL);
-fail_detach:
- dma_buf_detach(dma_buf, attach);
- return ERR_PTR(ret);
+ return 0;
}
-
diff --git a/drivers/gpu/drm/nouveau/nv04_dfp.c b/drivers/gpu/drm/nouveau/nv04_dfp.c
index 39ffc07f906b..7e24cdf1cb39 100644
--- a/drivers/gpu/drm/nouveau/nv04_dfp.c
+++ b/drivers/gpu/drm/nouveau/nv04_dfp.c
@@ -490,8 +490,8 @@ static void nv04_dfp_update_backlight(struct drm_encoder *encoder, int mode)
/* BIOS scripts usually take care of the backlight, thanks
* Apple for your consistency.
*/
- if (dev->pci_device == 0x0179 || dev->pci_device == 0x0189 ||
- dev->pci_device == 0x0329) {
+ if (dev->pci_device == 0x0174 || dev->pci_device == 0x0179 ||
+ dev->pci_device == 0x0189 || dev->pci_device == 0x0329) {
if (mode == DRM_MODE_DPMS_ON) {
nv_mask(device, NV_PBUS_DEBUG_DUALHEAD_CTL, 0, 1 << 31);
nv_mask(device, NV_PCRTC_GPIO_EXT, 3, 1);
diff --git a/drivers/gpu/drm/nouveau/nv04_display.c b/drivers/gpu/drm/nouveau/nv04_display.c
index 2cd6fb8c548e..ad48444c385c 100644
--- a/drivers/gpu/drm/nouveau/nv04_display.c
+++ b/drivers/gpu/drm/nouveau/nv04_display.c
@@ -22,6 +22,9 @@
* Author: Ben Skeggs
*/
+#include <core/object.h>
+#include <core/class.h>
+
#include <drm/drmP.h>
#include <drm/drm_crtc_helper.h>
@@ -31,6 +34,8 @@
#include "nouveau_encoder.h"
#include "nouveau_connector.h"
+#include <subdev/i2c.h>
+
int
nv04_display_early_init(struct drm_device *dev)
{
@@ -53,6 +58,7 @@ int
nv04_display_create(struct drm_device *dev)
{
struct nouveau_drm *drm = nouveau_drm(dev);
+ struct nouveau_i2c *i2c = nouveau_i2c(drm->device);
struct dcb_table *dcb = &drm->vbios.dcb;
struct drm_connector *connector, *ct;
struct drm_encoder *encoder;
@@ -71,6 +77,11 @@ nv04_display_create(struct drm_device *dev)
nouveau_hw_save_vga_fonts(dev, 1);
+ ret = nouveau_object_new(nv_object(drm), NVDRM_DEVICE, 0xd1500000,
+ NV04_DISP_CLASS, NULL, 0, &disp->core);
+ if (ret)
+ return ret;
+
nv04_crtc_create(dev, 0);
if (nv_two_heads(dev))
nv04_crtc_create(dev, 1);
@@ -114,6 +125,11 @@ nv04_display_create(struct drm_device *dev)
}
}
+ list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
+ struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
+ nv_encoder->i2c = i2c->find(i2c, nv_encoder->dcb->i2c_index);
+ }
+
/* Save previous state */
list_for_each_entry(crtc, &dev->mode_config.crtc_list, head)
crtc->funcs->save(crtc);
@@ -140,7 +156,7 @@ nv04_display_destroy(struct drm_device *dev)
.crtc = crtc,
};
- crtc->funcs->set_config(&modeset);
+ drm_mode_set_config_internal(&modeset);
}
/* Restore state */
diff --git a/drivers/gpu/drm/nouveau/nv04_display.h b/drivers/gpu/drm/nouveau/nv04_display.h
index 45322802e37d..a0a031dad13f 100644
--- a/drivers/gpu/drm/nouveau/nv04_display.h
+++ b/drivers/gpu/drm/nouveau/nv04_display.h
@@ -80,6 +80,7 @@ struct nv04_display {
struct nv04_mode_state saved_reg;
uint32_t saved_vga_font[4][16384];
uint32_t dac_users[4];
+ struct nouveau_object *core;
};
static inline struct nv04_display *
diff --git a/drivers/gpu/drm/nouveau/nv04_fence.c b/drivers/gpu/drm/nouveau/nv04_fence.c
index a220b94ba9f2..94eadd1dd10a 100644
--- a/drivers/gpu/drm/nouveau/nv04_fence.c
+++ b/drivers/gpu/drm/nouveau/nv04_fence.c
@@ -78,6 +78,9 @@ nv04_fence_context_new(struct nouveau_channel *chan)
struct nv04_fence_chan *fctx = kzalloc(sizeof(*fctx), GFP_KERNEL);
if (fctx) {
nouveau_fence_context_new(&fctx->base);
+ fctx->base.emit = nv04_fence_emit;
+ fctx->base.sync = nv04_fence_sync;
+ fctx->base.read = nv04_fence_read;
chan->fence = fctx;
return 0;
}
@@ -104,8 +107,5 @@ nv04_fence_create(struct nouveau_drm *drm)
priv->base.dtor = nv04_fence_destroy;
priv->base.context_new = nv04_fence_context_new;
priv->base.context_del = nv04_fence_context_del;
- priv->base.emit = nv04_fence_emit;
- priv->base.sync = nv04_fence_sync;
- priv->base.read = nv04_fence_read;
return 0;
}
diff --git a/drivers/gpu/drm/nouveau/nv04_tv.c b/drivers/gpu/drm/nouveau/nv04_tv.c
index 62e826a139b3..4a69ccdef9b4 100644
--- a/drivers/gpu/drm/nouveau/nv04_tv.c
+++ b/drivers/gpu/drm/nouveau/nv04_tv.c
@@ -184,14 +184,23 @@ static const struct drm_encoder_funcs nv04_tv_funcs = {
.destroy = nv04_tv_destroy,
};
+static const struct drm_encoder_helper_funcs nv04_tv_helper_funcs = {
+ .dpms = nv04_tv_dpms,
+ .save = drm_i2c_encoder_save,
+ .restore = drm_i2c_encoder_restore,
+ .mode_fixup = drm_i2c_encoder_mode_fixup,
+ .prepare = nv04_tv_prepare,
+ .commit = nv04_tv_commit,
+ .mode_set = nv04_tv_mode_set,
+ .detect = drm_i2c_encoder_detect,
+};
+
int
nv04_tv_create(struct drm_connector *connector, struct dcb_output *entry)
{
struct nouveau_encoder *nv_encoder;
struct drm_encoder *encoder;
struct drm_device *dev = connector->dev;
- struct drm_encoder_helper_funcs *hfuncs;
- struct drm_encoder_slave_funcs *sfuncs;
struct nouveau_drm *drm = nouveau_drm(dev);
struct nouveau_i2c *i2c = nouveau_i2c(drm->device);
struct nouveau_i2c_port *port = i2c->find(i2c, entry->i2c_index);
@@ -207,17 +216,11 @@ nv04_tv_create(struct drm_connector *connector, struct dcb_output *entry)
if (!nv_encoder)
return -ENOMEM;
- hfuncs = kzalloc(sizeof(*hfuncs), GFP_KERNEL);
- if (!hfuncs) {
- ret = -ENOMEM;
- goto fail_free;
- }
-
/* Initialize the common members */
encoder = to_drm_encoder(nv_encoder);
drm_encoder_init(dev, encoder, &nv04_tv_funcs, DRM_MODE_ENCODER_TVDAC);
- drm_encoder_helper_add(encoder, hfuncs);
+ drm_encoder_helper_add(encoder, &nv04_tv_helper_funcs);
encoder->possible_crtcs = entry->heads;
encoder->possible_clones = 0;
@@ -230,30 +233,14 @@ nv04_tv_create(struct drm_connector *connector, struct dcb_output *entry)
if (ret < 0)
goto fail_cleanup;
- /* Fill the function pointers */
- sfuncs = get_slave_funcs(encoder);
-
- *hfuncs = (struct drm_encoder_helper_funcs) {
- .dpms = nv04_tv_dpms,
- .save = sfuncs->save,
- .restore = sfuncs->restore,
- .mode_fixup = sfuncs->mode_fixup,
- .prepare = nv04_tv_prepare,
- .commit = nv04_tv_commit,
- .mode_set = nv04_tv_mode_set,
- .detect = sfuncs->detect,
- };
-
/* Attach it to the specified connector. */
- sfuncs->create_resources(encoder, connector);
+ get_slave_funcs(encoder)->create_resources(encoder, connector);
drm_mode_connector_attach_encoder(connector, encoder);
return 0;
fail_cleanup:
drm_encoder_cleanup(encoder);
- kfree(hfuncs);
-fail_free:
kfree(nv_encoder);
return ret;
}
diff --git a/drivers/gpu/drm/nouveau/nv10_fence.c b/drivers/gpu/drm/nouveau/nv10_fence.c
index 03017f24d593..06f434f03fba 100644
--- a/drivers/gpu/drm/nouveau/nv10_fence.c
+++ b/drivers/gpu/drm/nouveau/nv10_fence.c
@@ -27,18 +27,7 @@
#include "nouveau_drm.h"
#include "nouveau_dma.h"
-#include "nouveau_fence.h"
-
-struct nv10_fence_chan {
- struct nouveau_fence_chan base;
-};
-
-struct nv10_fence_priv {
- struct nouveau_fence_priv base;
- struct nouveau_bo *bo;
- spinlock_t lock;
- u32 sequence;
-};
+#include "nv10_fence.h"
int
nv10_fence_emit(struct nouveau_fence *fence)
@@ -61,45 +50,6 @@ nv10_fence_sync(struct nouveau_fence *fence,
return -ENODEV;
}
-int
-nv17_fence_sync(struct nouveau_fence *fence,
- struct nouveau_channel *prev, struct nouveau_channel *chan)
-{
- struct nv10_fence_priv *priv = chan->drm->fence;
- u32 value;
- int ret;
-
- if (!mutex_trylock(&prev->cli->mutex))
- return -EBUSY;
-
- spin_lock(&priv->lock);
- value = priv->sequence;
- priv->sequence += 2;
- spin_unlock(&priv->lock);
-
- ret = RING_SPACE(prev, 5);
- if (!ret) {
- BEGIN_NV04(prev, 0, NV11_SUBCHAN_DMA_SEMAPHORE, 4);
- OUT_RING (prev, NvSema);
- OUT_RING (prev, 0);
- OUT_RING (prev, value + 0);
- OUT_RING (prev, value + 1);
- FIRE_RING (prev);
- }
-
- if (!ret && !(ret = RING_SPACE(chan, 5))) {
- BEGIN_NV04(chan, 0, NV11_SUBCHAN_DMA_SEMAPHORE, 4);
- OUT_RING (chan, NvSema);
- OUT_RING (chan, 0);
- OUT_RING (chan, value + 1);
- OUT_RING (chan, value + 2);
- FIRE_RING (chan);
- }
-
- mutex_unlock(&prev->cli->mutex);
- return 0;
-}
-
u32
nv10_fence_read(struct nouveau_channel *chan)
{
@@ -115,39 +65,20 @@ nv10_fence_context_del(struct nouveau_channel *chan)
kfree(fctx);
}
-static int
+int
nv10_fence_context_new(struct nouveau_channel *chan)
{
- struct nv10_fence_priv *priv = chan->drm->fence;
struct nv10_fence_chan *fctx;
- int ret = 0;
fctx = chan->fence = kzalloc(sizeof(*fctx), GFP_KERNEL);
if (!fctx)
return -ENOMEM;
nouveau_fence_context_new(&fctx->base);
-
- if (priv->bo) {
- struct ttm_mem_reg *mem = &priv->bo->bo.mem;
- struct nouveau_object *object;
- u32 start = mem->start * PAGE_SIZE;
- u32 limit = mem->start + mem->size - 1;
-
- ret = nouveau_object_new(nv_object(chan->cli), chan->handle,
- NvSema, 0x0002,
- &(struct nv_dma_class) {
- .flags = NV_DMA_TARGET_VRAM |
- NV_DMA_ACCESS_RDWR,
- .start = start,
- .limit = limit,
- }, sizeof(struct nv_dma_class),
- &object);
- }
-
- if (ret)
- nv10_fence_context_del(chan);
- return ret;
+ fctx->base.emit = nv10_fence_emit;
+ fctx->base.read = nv10_fence_read;
+ fctx->base.sync = nv10_fence_sync;
+ return 0;
}
void
@@ -162,18 +93,10 @@ nv10_fence_destroy(struct nouveau_drm *drm)
kfree(priv);
}
-void nv17_fence_resume(struct nouveau_drm *drm)
-{
- struct nv10_fence_priv *priv = drm->fence;
-
- nouveau_bo_wr32(priv->bo, 0, priv->sequence);
-}
-
int
nv10_fence_create(struct nouveau_drm *drm)
{
struct nv10_fence_priv *priv;
- int ret = 0;
priv = drm->fence = kzalloc(sizeof(*priv), GFP_KERNEL);
if (!priv)
@@ -182,33 +105,6 @@ nv10_fence_create(struct nouveau_drm *drm)
priv->base.dtor = nv10_fence_destroy;
priv->base.context_new = nv10_fence_context_new;
priv->base.context_del = nv10_fence_context_del;
- priv->base.emit = nv10_fence_emit;
- priv->base.read = nv10_fence_read;
- priv->base.sync = nv10_fence_sync;
spin_lock_init(&priv->lock);
-
- if (nv_device(drm->device)->chipset >= 0x17) {
- ret = nouveau_bo_new(drm->dev, 4096, 0x1000, TTM_PL_FLAG_VRAM,
- 0, 0x0000, NULL, &priv->bo);
- if (!ret) {
- ret = nouveau_bo_pin(priv->bo, TTM_PL_FLAG_VRAM);
- if (!ret) {
- ret = nouveau_bo_map(priv->bo);
- if (ret)
- nouveau_bo_unpin(priv->bo);
- }
- if (ret)
- nouveau_bo_ref(NULL, &priv->bo);
- }
-
- if (ret == 0) {
- nouveau_bo_wr32(priv->bo, 0x000, 0x00000000);
- priv->base.sync = nv17_fence_sync;
- priv->base.resume = nv17_fence_resume;
- }
- }
-
- if (ret)
- nv10_fence_destroy(drm);
- return ret;
+ return 0;
}
diff --git a/drivers/gpu/drm/nouveau/nv10_fence.h b/drivers/gpu/drm/nouveau/nv10_fence.h
new file mode 100644
index 000000000000..e5d9204826c2
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nv10_fence.h
@@ -0,0 +1,19 @@
+#ifndef __NV10_FENCE_H_
+#define __NV10_FENCE_H_
+
+#include <core/os.h>
+#include "nouveau_fence.h"
+#include "nouveau_bo.h"
+
+struct nv10_fence_chan {
+ struct nouveau_fence_chan base;
+};
+
+struct nv10_fence_priv {
+ struct nouveau_fence_priv base;
+ struct nouveau_bo *bo;
+ spinlock_t lock;
+ u32 sequence;
+};
+
+#endif
diff --git a/drivers/gpu/drm/nouveau/nv17_fence.c b/drivers/gpu/drm/nouveau/nv17_fence.c
new file mode 100644
index 000000000000..8e47a9bae8c3
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nv17_fence.c
@@ -0,0 +1,149 @@
+/*
+ * Copyright 2012 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs <bskeggs@redhat.com>
+ */
+
+#include <core/object.h>
+#include <core/class.h>
+
+#include "nouveau_drm.h"
+#include "nouveau_dma.h"
+#include "nv10_fence.h"
+
+int
+nv17_fence_sync(struct nouveau_fence *fence,
+ struct nouveau_channel *prev, struct nouveau_channel *chan)
+{
+ struct nv10_fence_priv *priv = chan->drm->fence;
+ u32 value;
+ int ret;
+
+ if (!mutex_trylock(&prev->cli->mutex))
+ return -EBUSY;
+
+ spin_lock(&priv->lock);
+ value = priv->sequence;
+ priv->sequence += 2;
+ spin_unlock(&priv->lock);
+
+ ret = RING_SPACE(prev, 5);
+ if (!ret) {
+ BEGIN_NV04(prev, 0, NV11_SUBCHAN_DMA_SEMAPHORE, 4);
+ OUT_RING (prev, NvSema);
+ OUT_RING (prev, 0);
+ OUT_RING (prev, value + 0);
+ OUT_RING (prev, value + 1);
+ FIRE_RING (prev);
+ }
+
+ if (!ret && !(ret = RING_SPACE(chan, 5))) {
+ BEGIN_NV04(chan, 0, NV11_SUBCHAN_DMA_SEMAPHORE, 4);
+ OUT_RING (chan, NvSema);
+ OUT_RING (chan, 0);
+ OUT_RING (chan, value + 1);
+ OUT_RING (chan, value + 2);
+ FIRE_RING (chan);
+ }
+
+ mutex_unlock(&prev->cli->mutex);
+ return 0;
+}
+
+static int
+nv17_fence_context_new(struct nouveau_channel *chan)
+{
+ struct nv10_fence_priv *priv = chan->drm->fence;
+ struct nv10_fence_chan *fctx;
+ struct ttm_mem_reg *mem = &priv->bo->bo.mem;
+ struct nouveau_object *object;
+ u32 start = mem->start * PAGE_SIZE;
+ u32 limit = mem->start + mem->size - 1;
+ int ret = 0;
+
+ fctx = chan->fence = kzalloc(sizeof(*fctx), GFP_KERNEL);
+ if (!fctx)
+ return -ENOMEM;
+
+ nouveau_fence_context_new(&fctx->base);
+ fctx->base.emit = nv10_fence_emit;
+ fctx->base.read = nv10_fence_read;
+ fctx->base.sync = nv17_fence_sync;
+
+ ret = nouveau_object_new(nv_object(chan->cli), chan->handle,
+ NvSema, 0x0002,
+ &(struct nv_dma_class) {
+ .flags = NV_DMA_TARGET_VRAM |
+ NV_DMA_ACCESS_RDWR,
+ .start = start,
+ .limit = limit,
+ }, sizeof(struct nv_dma_class),
+ &object);
+ if (ret)
+ nv10_fence_context_del(chan);
+ return ret;
+}
+
+void
+nv17_fence_resume(struct nouveau_drm *drm)
+{
+ struct nv10_fence_priv *priv = drm->fence;
+
+ nouveau_bo_wr32(priv->bo, 0, priv->sequence);
+}
+
+int
+nv17_fence_create(struct nouveau_drm *drm)
+{
+ struct nv10_fence_priv *priv;
+ int ret = 0;
+
+ priv = drm->fence = kzalloc(sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ priv->base.dtor = nv10_fence_destroy;
+ priv->base.resume = nv17_fence_resume;
+ priv->base.context_new = nv17_fence_context_new;
+ priv->base.context_del = nv10_fence_context_del;
+ spin_lock_init(&priv->lock);
+
+ ret = nouveau_bo_new(drm->dev, 4096, 0x1000, TTM_PL_FLAG_VRAM,
+ 0, 0x0000, NULL, &priv->bo);
+ if (!ret) {
+ ret = nouveau_bo_pin(priv->bo, TTM_PL_FLAG_VRAM);
+ if (!ret) {
+ ret = nouveau_bo_map(priv->bo);
+ if (ret)
+ nouveau_bo_unpin(priv->bo);
+ }
+ if (ret)
+ nouveau_bo_ref(NULL, &priv->bo);
+ }
+
+ if (ret) {
+ nv10_fence_destroy(drm);
+ return ret;
+ }
+
+ nouveau_bo_wr32(priv->bo, 0x000, 0x00000000);
+ return ret;
+}
diff --git a/drivers/gpu/drm/nouveau/nv17_tv.c b/drivers/gpu/drm/nouveau/nv17_tv.c
index 2ca276ada507..977e42be2050 100644
--- a/drivers/gpu/drm/nouveau/nv17_tv.c
+++ b/drivers/gpu/drm/nouveau/nv17_tv.c
@@ -768,7 +768,7 @@ static int nv17_tv_set_property(struct drm_encoder *encoder,
.crtc = crtc,
};
- crtc->funcs->set_config(&modeset);
+ drm_mode_set_config_internal(&modeset);
}
}
diff --git a/drivers/gpu/drm/nouveau/nv50_display.c b/drivers/gpu/drm/nouveau/nv50_display.c
index 35874085a61e..a6237c9cbbc3 100644
--- a/drivers/gpu/drm/nouveau/nv50_display.c
+++ b/drivers/gpu/drm/nouveau/nv50_display.c
@@ -43,6 +43,7 @@
#include <subdev/timer.h>
#include <subdev/bar.h>
#include <subdev/fb.h>
+#include <subdev/i2c.h>
#define EVO_DMA_NR 9
@@ -128,6 +129,11 @@ struct nv50_dmac {
struct nv50_chan base;
dma_addr_t handle;
u32 *ptr;
+
+ /* Protects against concurrent pushbuf access to this channel, lock is
+ * grabbed by evo_wait (if the pushbuf reservation is successful) and
+ * dropped again by evo_kick. */
+ struct mutex lock;
};
static void
@@ -271,6 +277,8 @@ nv50_dmac_create(struct nouveau_object *core, u32 bclass, u8 head,
u32 pushbuf = *(u32 *)data;
int ret;
+ mutex_init(&dmac->lock);
+
dmac->ptr = pci_alloc_consistent(nv_device(core)->pdev, PAGE_SIZE,
&dmac->handle);
if (!dmac->ptr)
@@ -395,11 +403,13 @@ evo_wait(void *evoc, int nr)
struct nv50_dmac *dmac = evoc;
u32 put = nv_ro32(dmac->base.user, 0x0000) / 4;
+ mutex_lock(&dmac->lock);
if (put + nr >= (PAGE_SIZE / 4) - 8) {
dmac->ptr[put] = 0x20000000;
nv_wo32(dmac->base.user, 0x0000, 0x00000000);
if (!nv_wait(dmac->base.user, 0x0004, ~0, 0x00000000)) {
+ mutex_unlock(&dmac->lock);
NV_ERROR(dmac->base.user, "channel stalled\n");
return NULL;
}
@@ -415,6 +425,7 @@ evo_kick(u32 *push, void *evoc)
{
struct nv50_dmac *dmac = evoc;
nv_wo32(dmac->base.user, 0x0000, (push - dmac->ptr) << 2);
+ mutex_unlock(&dmac->lock);
}
#define evo_mthd(p,m,s) *((p)++) = (((s) << 18) | (m))
@@ -423,7 +434,10 @@ evo_kick(u32 *push, void *evoc)
static bool
evo_sync_wait(void *data)
{
- return nouveau_bo_rd32(data, EVO_MAST_NTFY) != 0x00000000;
+ if (nouveau_bo_rd32(data, EVO_MAST_NTFY) != 0x00000000)
+ return true;
+ usleep_range(1, 2);
+ return false;
}
static int
@@ -502,7 +516,7 @@ nv50_display_flip_next(struct drm_crtc *crtc, struct drm_framebuffer *fb,
if (ret)
return ret;
- if (nv_mclass(chan->object) < NVC0_CHANNEL_IND_CLASS) {
+ if (nv_mclass(chan->object) < NV84_CHANNEL_IND_CLASS) {
BEGIN_NV04(chan, 0, NV11_SUBCHAN_DMA_SEMAPHORE, 2);
OUT_RING (chan, NvEvoSema0 + nv_crtc->index);
OUT_RING (chan, sync->sem.offset);
@@ -512,24 +526,36 @@ nv50_display_flip_next(struct drm_crtc *crtc, struct drm_framebuffer *fb,
OUT_RING (chan, sync->sem.offset ^ 0x10);
OUT_RING (chan, 0x74b1e000);
BEGIN_NV04(chan, 0, NV11_SUBCHAN_DMA_SEMAPHORE, 1);
- if (nv_mclass(chan->object) < NV84_CHANNEL_DMA_CLASS)
- OUT_RING (chan, NvSema);
- else
- OUT_RING (chan, chan->vram);
+ OUT_RING (chan, NvSema);
+ } else
+ if (nv_mclass(chan->object) < NVC0_CHANNEL_IND_CLASS) {
+ u64 offset = nv84_fence_crtc(chan, nv_crtc->index);
+ offset += sync->sem.offset;
+
+ BEGIN_NV04(chan, 0, NV84_SUBCHAN_SEMAPHORE_ADDRESS_HIGH, 4);
+ OUT_RING (chan, upper_32_bits(offset));
+ OUT_RING (chan, lower_32_bits(offset));
+ OUT_RING (chan, 0xf00d0000 | sync->sem.value);
+ OUT_RING (chan, 0x00000002);
+ BEGIN_NV04(chan, 0, NV84_SUBCHAN_SEMAPHORE_ADDRESS_HIGH, 4);
+ OUT_RING (chan, upper_32_bits(offset));
+ OUT_RING (chan, lower_32_bits(offset ^ 0x10));
+ OUT_RING (chan, 0x74b1e000);
+ OUT_RING (chan, 0x00000001);
} else {
- u64 offset = nvc0_fence_crtc(chan, nv_crtc->index);
+ u64 offset = nv84_fence_crtc(chan, nv_crtc->index);
offset += sync->sem.offset;
BEGIN_NVC0(chan, 0, NV84_SUBCHAN_SEMAPHORE_ADDRESS_HIGH, 4);
OUT_RING (chan, upper_32_bits(offset));
OUT_RING (chan, lower_32_bits(offset));
OUT_RING (chan, 0xf00d0000 | sync->sem.value);
- OUT_RING (chan, 0x1002);
+ OUT_RING (chan, 0x00001002);
BEGIN_NVC0(chan, 0, NV84_SUBCHAN_SEMAPHORE_ADDRESS_HIGH, 4);
OUT_RING (chan, upper_32_bits(offset));
OUT_RING (chan, lower_32_bits(offset ^ 0x10));
OUT_RING (chan, 0x74b1e000);
- OUT_RING (chan, 0x1001);
+ OUT_RING (chan, 0x00001001);
}
FIRE_RING (chan);
@@ -1493,9 +1519,6 @@ nv50_dac_disconnect(struct drm_encoder *encoder)
evo_mthd(push, 0x0180 + (or * 0x020), 1);
evo_data(push, 0x00000000);
}
-
- evo_mthd(push, 0x0080, 1);
- evo_data(push, 0x00000000);
evo_kick(push, mast);
}
}
@@ -1542,20 +1565,23 @@ static const struct drm_encoder_funcs nv50_dac_func = {
static int
nv50_dac_create(struct drm_connector *connector, struct dcb_output *dcbe)
{
- struct drm_device *dev = connector->dev;
+ struct nouveau_drm *drm = nouveau_drm(connector->dev);
+ struct nouveau_i2c *i2c = nouveau_i2c(drm->device);
struct nouveau_encoder *nv_encoder;
struct drm_encoder *encoder;
+ int type = DRM_MODE_ENCODER_DAC;
nv_encoder = kzalloc(sizeof(*nv_encoder), GFP_KERNEL);
if (!nv_encoder)
return -ENOMEM;
nv_encoder->dcb = dcbe;
nv_encoder->or = ffs(dcbe->or) - 1;
+ nv_encoder->i2c = i2c->find(i2c, dcbe->i2c_index);
encoder = to_drm_encoder(nv_encoder);
encoder->possible_crtcs = dcbe->heads;
encoder->possible_clones = 0;
- drm_encoder_init(dev, encoder, &nv50_dac_func, DRM_MODE_ENCODER_DAC);
+ drm_encoder_init(connector->dev, encoder, &nv50_dac_func, type);
drm_encoder_helper_add(encoder, &nv50_dac_hfunc);
drm_mode_connector_attach_encoder(connector, encoder);
@@ -1664,9 +1690,6 @@ nv50_sor_dpms(struct drm_encoder *encoder, int mode)
}
nv_call(disp->core, NV50_DISP_SOR_PWR + or, (mode == DRM_MODE_DPMS_ON));
-
- if (nv_encoder->dcb->type == DCB_OUTPUT_DP)
- nouveau_dp_dpms(encoder, mode, nv_encoder->dp.datarate, disp->core);
}
static bool
@@ -1709,9 +1732,6 @@ nv50_sor_disconnect(struct drm_encoder *encoder)
evo_mthd(push, 0x0200 + (or * 0x20), 1);
evo_data(push, 0x00000000);
}
-
- evo_mthd(push, 0x0080, 1);
- evo_data(push, 0x00000000);
evo_kick(push, mast);
}
@@ -1723,14 +1743,6 @@ nv50_sor_disconnect(struct drm_encoder *encoder)
}
static void
-nv50_sor_prepare(struct drm_encoder *encoder)
-{
- nv50_sor_disconnect(encoder);
- if (nouveau_encoder(encoder)->dcb->type == DCB_OUTPUT_DP)
- evo_sync(encoder->dev);
-}
-
-static void
nv50_sor_commit(struct drm_encoder *encoder)
{
}
@@ -1825,8 +1837,13 @@ nv50_sor_mode_set(struct drm_encoder *encoder, struct drm_display_mode *umode,
push = evo_wait(nv50_mast(dev), 8);
if (push) {
if (nv50_vers(mast) < NVD0_DISP_CLASS) {
+ u32 ctrl = (depth << 16) | (proto << 8) | owner;
+ if (mode->flags & DRM_MODE_FLAG_NHSYNC)
+ ctrl |= 0x00001000;
+ if (mode->flags & DRM_MODE_FLAG_NVSYNC)
+ ctrl |= 0x00002000;
evo_mthd(push, 0x0600 + (nv_encoder->or * 0x040), 1);
- evo_data(push, (depth << 16) | (proto << 8) | owner);
+ evo_data(push, ctrl);
} else {
u32 magic = 0x31ec6000 | (nv_crtc->index << 25);
u32 syncs = 0x00000001;
@@ -1862,7 +1879,7 @@ nv50_sor_destroy(struct drm_encoder *encoder)
static const struct drm_encoder_helper_funcs nv50_sor_hfunc = {
.dpms = nv50_sor_dpms,
.mode_fixup = nv50_sor_mode_fixup,
- .prepare = nv50_sor_prepare,
+ .prepare = nv50_sor_disconnect,
.commit = nv50_sor_commit,
.mode_set = nv50_sor_mode_set,
.disable = nv50_sor_disconnect,
@@ -1876,21 +1893,33 @@ static const struct drm_encoder_funcs nv50_sor_func = {
static int
nv50_sor_create(struct drm_connector *connector, struct dcb_output *dcbe)
{
- struct drm_device *dev = connector->dev;
+ struct nouveau_drm *drm = nouveau_drm(connector->dev);
+ struct nouveau_i2c *i2c = nouveau_i2c(drm->device);
struct nouveau_encoder *nv_encoder;
struct drm_encoder *encoder;
+ int type;
+
+ switch (dcbe->type) {
+ case DCB_OUTPUT_LVDS: type = DRM_MODE_ENCODER_LVDS; break;
+ case DCB_OUTPUT_TMDS:
+ case DCB_OUTPUT_DP:
+ default:
+ type = DRM_MODE_ENCODER_TMDS;
+ break;
+ }
nv_encoder = kzalloc(sizeof(*nv_encoder), GFP_KERNEL);
if (!nv_encoder)
return -ENOMEM;
nv_encoder->dcb = dcbe;
nv_encoder->or = ffs(dcbe->or) - 1;
+ nv_encoder->i2c = i2c->find(i2c, dcbe->i2c_index);
nv_encoder->last_dpms = DRM_MODE_DPMS_OFF;
encoder = to_drm_encoder(nv_encoder);
encoder->possible_crtcs = dcbe->heads;
encoder->possible_clones = 0;
- drm_encoder_init(dev, encoder, &nv50_sor_func, DRM_MODE_ENCODER_TMDS);
+ drm_encoder_init(connector->dev, encoder, &nv50_sor_func, type);
drm_encoder_helper_add(encoder, &nv50_sor_hfunc);
drm_mode_connector_attach_encoder(connector, encoder);
@@ -1898,6 +1927,181 @@ nv50_sor_create(struct drm_connector *connector, struct dcb_output *dcbe)
}
/******************************************************************************
+ * PIOR
+ *****************************************************************************/
+
+static void
+nv50_pior_dpms(struct drm_encoder *encoder, int mode)
+{
+ struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
+ struct nv50_disp *disp = nv50_disp(encoder->dev);
+ u32 mthd = (nv_encoder->dcb->type << 12) | nv_encoder->or;
+ u32 ctrl = (mode == DRM_MODE_DPMS_ON);
+ nv_call(disp->core, NV50_DISP_PIOR_PWR + mthd, ctrl);
+}
+
+static bool
+nv50_pior_mode_fixup(struct drm_encoder *encoder,
+ const struct drm_display_mode *mode,
+ struct drm_display_mode *adjusted_mode)
+{
+ struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
+ struct nouveau_connector *nv_connector;
+
+ nv_connector = nouveau_encoder_connector_get(nv_encoder);
+ if (nv_connector && nv_connector->native_mode) {
+ if (nv_connector->scaling_mode != DRM_MODE_SCALE_NONE) {
+ int id = adjusted_mode->base.id;
+ *adjusted_mode = *nv_connector->native_mode;
+ adjusted_mode->base.id = id;
+ }
+ }
+
+ adjusted_mode->clock *= 2;
+ return true;
+}
+
+static void
+nv50_pior_commit(struct drm_encoder *encoder)
+{
+}
+
+static void
+nv50_pior_mode_set(struct drm_encoder *encoder, struct drm_display_mode *mode,
+ struct drm_display_mode *adjusted_mode)
+{
+ struct nv50_mast *mast = nv50_mast(encoder->dev);
+ struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
+ struct nouveau_crtc *nv_crtc = nouveau_crtc(encoder->crtc);
+ struct nouveau_connector *nv_connector;
+ u8 owner = 1 << nv_crtc->index;
+ u8 proto, depth;
+ u32 *push;
+
+ nv_connector = nouveau_encoder_connector_get(nv_encoder);
+ switch (nv_connector->base.display_info.bpc) {
+ case 10: depth = 0x6; break;
+ case 8: depth = 0x5; break;
+ case 6: depth = 0x2; break;
+ default: depth = 0x0; break;
+ }
+
+ switch (nv_encoder->dcb->type) {
+ case DCB_OUTPUT_TMDS:
+ case DCB_OUTPUT_DP:
+ proto = 0x0;
+ break;
+ default:
+ BUG_ON(1);
+ break;
+ }
+
+ nv50_pior_dpms(encoder, DRM_MODE_DPMS_ON);
+
+ push = evo_wait(mast, 8);
+ if (push) {
+ if (nv50_vers(mast) < NVD0_DISP_MAST_CLASS) {
+ u32 ctrl = (depth << 16) | (proto << 8) | owner;
+ if (mode->flags & DRM_MODE_FLAG_NHSYNC)
+ ctrl |= 0x00001000;
+ if (mode->flags & DRM_MODE_FLAG_NVSYNC)
+ ctrl |= 0x00002000;
+ evo_mthd(push, 0x0700 + (nv_encoder->or * 0x040), 1);
+ evo_data(push, ctrl);
+ }
+
+ evo_kick(push, mast);
+ }
+
+ nv_encoder->crtc = encoder->crtc;
+}
+
+static void
+nv50_pior_disconnect(struct drm_encoder *encoder)
+{
+ struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
+ struct nv50_mast *mast = nv50_mast(encoder->dev);
+ const int or = nv_encoder->or;
+ u32 *push;
+
+ if (nv_encoder->crtc) {
+ nv50_crtc_prepare(nv_encoder->crtc);
+
+ push = evo_wait(mast, 4);
+ if (push) {
+ if (nv50_vers(mast) < NVD0_DISP_MAST_CLASS) {
+ evo_mthd(push, 0x0700 + (or * 0x040), 1);
+ evo_data(push, 0x00000000);
+ }
+ evo_kick(push, mast);
+ }
+ }
+
+ nv_encoder->crtc = NULL;
+}
+
+static void
+nv50_pior_destroy(struct drm_encoder *encoder)
+{
+ drm_encoder_cleanup(encoder);
+ kfree(encoder);
+}
+
+static const struct drm_encoder_helper_funcs nv50_pior_hfunc = {
+ .dpms = nv50_pior_dpms,
+ .mode_fixup = nv50_pior_mode_fixup,
+ .prepare = nv50_pior_disconnect,
+ .commit = nv50_pior_commit,
+ .mode_set = nv50_pior_mode_set,
+ .disable = nv50_pior_disconnect,
+ .get_crtc = nv50_display_crtc_get,
+};
+
+static const struct drm_encoder_funcs nv50_pior_func = {
+ .destroy = nv50_pior_destroy,
+};
+
+static int
+nv50_pior_create(struct drm_connector *connector, struct dcb_output *dcbe)
+{
+ struct nouveau_drm *drm = nouveau_drm(connector->dev);
+ struct nouveau_i2c *i2c = nouveau_i2c(drm->device);
+ struct nouveau_i2c_port *ddc = NULL;
+ struct nouveau_encoder *nv_encoder;
+ struct drm_encoder *encoder;
+ int type;
+
+ switch (dcbe->type) {
+ case DCB_OUTPUT_TMDS:
+ ddc = i2c->find_type(i2c, NV_I2C_TYPE_EXTDDC(dcbe->extdev));
+ type = DRM_MODE_ENCODER_TMDS;
+ break;
+ case DCB_OUTPUT_DP:
+ ddc = i2c->find_type(i2c, NV_I2C_TYPE_EXTAUX(dcbe->extdev));
+ type = DRM_MODE_ENCODER_TMDS;
+ break;
+ default:
+ return -ENODEV;
+ }
+
+ nv_encoder = kzalloc(sizeof(*nv_encoder), GFP_KERNEL);
+ if (!nv_encoder)
+ return -ENOMEM;
+ nv_encoder->dcb = dcbe;
+ nv_encoder->or = ffs(dcbe->or) - 1;
+ nv_encoder->i2c = ddc;
+
+ encoder = to_drm_encoder(nv_encoder);
+ encoder->possible_crtcs = dcbe->heads;
+ encoder->possible_clones = 0;
+ drm_encoder_init(connector->dev, encoder, &nv50_pior_func, type);
+ drm_encoder_helper_add(encoder, &nv50_pior_hfunc);
+
+ drm_mode_connector_attach_encoder(connector, encoder);
+ return 0;
+}
+
+/******************************************************************************
* Init
*****************************************************************************/
void
@@ -1913,7 +2117,7 @@ nv50_display_init(struct drm_device *dev)
evo_mthd(push, 0x0088, 1);
evo_data(push, NvEvoSync);
evo_kick(push, nv50_mast(dev));
- return evo_sync(dev);
+ return 0;
}
return -EBUSY;
@@ -2019,25 +2223,28 @@ nv50_display_create(struct drm_device *dev)
if (IS_ERR(connector))
continue;
- if (dcbe->location != DCB_LOC_ON_CHIP) {
- NV_WARN(drm, "skipping off-chip encoder %d/%d\n",
- dcbe->type, ffs(dcbe->or) - 1);
- continue;
+ if (dcbe->location == DCB_LOC_ON_CHIP) {
+ switch (dcbe->type) {
+ case DCB_OUTPUT_TMDS:
+ case DCB_OUTPUT_LVDS:
+ case DCB_OUTPUT_DP:
+ ret = nv50_sor_create(connector, dcbe);
+ break;
+ case DCB_OUTPUT_ANALOG:
+ ret = nv50_dac_create(connector, dcbe);
+ break;
+ default:
+ ret = -ENODEV;
+ break;
+ }
+ } else {
+ ret = nv50_pior_create(connector, dcbe);
}
- switch (dcbe->type) {
- case DCB_OUTPUT_TMDS:
- case DCB_OUTPUT_LVDS:
- case DCB_OUTPUT_DP:
- nv50_sor_create(connector, dcbe);
- break;
- case DCB_OUTPUT_ANALOG:
- nv50_dac_create(connector, dcbe);
- break;
- default:
- NV_WARN(drm, "skipping unsupported encoder %d/%d\n",
- dcbe->type, ffs(dcbe->or) - 1);
- continue;
+ if (ret) {
+ NV_WARN(drm, "failed to create encoder %d/%d/%d: %d\n",
+ dcbe->location, dcbe->type,
+ ffs(dcbe->or) - 1, ret);
}
}
diff --git a/drivers/gpu/drm/nouveau/nv50_fence.c b/drivers/gpu/drm/nouveau/nv50_fence.c
index d889f3ac0d41..f9701e567db8 100644
--- a/drivers/gpu/drm/nouveau/nv50_fence.c
+++ b/drivers/gpu/drm/nouveau/nv50_fence.c
@@ -27,27 +27,16 @@
#include "nouveau_drm.h"
#include "nouveau_dma.h"
-#include "nouveau_fence.h"
+#include "nv10_fence.h"
#include "nv50_display.h"
-struct nv50_fence_chan {
- struct nouveau_fence_chan base;
-};
-
-struct nv50_fence_priv {
- struct nouveau_fence_priv base;
- struct nouveau_bo *bo;
- spinlock_t lock;
- u32 sequence;
-};
-
static int
nv50_fence_context_new(struct nouveau_channel *chan)
{
struct drm_device *dev = chan->drm->dev;
- struct nv50_fence_priv *priv = chan->drm->fence;
- struct nv50_fence_chan *fctx;
+ struct nv10_fence_priv *priv = chan->drm->fence;
+ struct nv10_fence_chan *fctx;
struct ttm_mem_reg *mem = &priv->bo->bo.mem;
struct nouveau_object *object;
int ret, i;
@@ -57,6 +46,9 @@ nv50_fence_context_new(struct nouveau_channel *chan)
return -ENOMEM;
nouveau_fence_context_new(&fctx->base);
+ fctx->base.emit = nv10_fence_emit;
+ fctx->base.read = nv10_fence_read;
+ fctx->base.sync = nv17_fence_sync;
ret = nouveau_object_new(nv_object(chan->cli), chan->handle,
NvSema, 0x0002,
@@ -91,7 +83,7 @@ nv50_fence_context_new(struct nouveau_channel *chan)
int
nv50_fence_create(struct nouveau_drm *drm)
{
- struct nv50_fence_priv *priv;
+ struct nv10_fence_priv *priv;
int ret = 0;
priv = drm->fence = kzalloc(sizeof(*priv), GFP_KERNEL);
@@ -99,11 +91,9 @@ nv50_fence_create(struct nouveau_drm *drm)
return -ENOMEM;
priv->base.dtor = nv10_fence_destroy;
+ priv->base.resume = nv17_fence_resume;
priv->base.context_new = nv50_fence_context_new;
priv->base.context_del = nv10_fence_context_del;
- priv->base.emit = nv10_fence_emit;
- priv->base.read = nv10_fence_read;
- priv->base.sync = nv17_fence_sync;
spin_lock_init(&priv->lock);
ret = nouveau_bo_new(drm->dev, 4096, 0x1000, TTM_PL_FLAG_VRAM,
@@ -119,13 +109,11 @@ nv50_fence_create(struct nouveau_drm *drm)
nouveau_bo_ref(NULL, &priv->bo);
}
- if (ret == 0) {
- nouveau_bo_wr32(priv->bo, 0x000, 0x00000000);
- priv->base.sync = nv17_fence_sync;
- priv->base.resume = nv17_fence_resume;
+ if (ret) {
+ nv10_fence_destroy(drm);
+ return ret;
}
- if (ret)
- nv10_fence_destroy(drm);
+ nouveau_bo_wr32(priv->bo, 0x000, 0x00000000);
return ret;
}
diff --git a/drivers/gpu/drm/nouveau/nv84_fence.c b/drivers/gpu/drm/nouveau/nv84_fence.c
index c686650584b6..9fd475c89820 100644
--- a/drivers/gpu/drm/nouveau/nv84_fence.c
+++ b/drivers/gpu/drm/nouveau/nv84_fence.c
@@ -23,6 +23,7 @@
*/
#include <core/object.h>
+#include <core/client.h>
#include <core/class.h>
#include <engine/fifo.h>
@@ -33,79 +34,115 @@
#include "nv50_display.h"
-struct nv84_fence_chan {
- struct nouveau_fence_chan base;
-};
-
-struct nv84_fence_priv {
- struct nouveau_fence_priv base;
- struct nouveau_gpuobj *mem;
-};
+u64
+nv84_fence_crtc(struct nouveau_channel *chan, int crtc)
+{
+ struct nv84_fence_chan *fctx = chan->fence;
+ return fctx->dispc_vma[crtc].offset;
+}
static int
-nv84_fence_emit(struct nouveau_fence *fence)
+nv84_fence_emit32(struct nouveau_channel *chan, u64 virtual, u32 sequence)
{
- struct nouveau_channel *chan = fence->channel;
- struct nouveau_fifo_chan *fifo = (void *)chan->object;
- int ret = RING_SPACE(chan, 7);
+ int ret = RING_SPACE(chan, 8);
if (ret == 0) {
BEGIN_NV04(chan, 0, NV11_SUBCHAN_DMA_SEMAPHORE, 1);
- OUT_RING (chan, NvSema);
- BEGIN_NV04(chan, 0, NV84_SUBCHAN_SEMAPHORE_ADDRESS_HIGH, 4);
- OUT_RING (chan, upper_32_bits(fifo->chid * 16));
- OUT_RING (chan, lower_32_bits(fifo->chid * 16));
- OUT_RING (chan, fence->sequence);
+ OUT_RING (chan, chan->vram);
+ BEGIN_NV04(chan, 0, NV84_SUBCHAN_SEMAPHORE_ADDRESS_HIGH, 5);
+ OUT_RING (chan, upper_32_bits(virtual));
+ OUT_RING (chan, lower_32_bits(virtual));
+ OUT_RING (chan, sequence);
OUT_RING (chan, NV84_SUBCHAN_SEMAPHORE_TRIGGER_WRITE_LONG);
+ OUT_RING (chan, 0x00000000);
FIRE_RING (chan);
}
return ret;
}
-
static int
-nv84_fence_sync(struct nouveau_fence *fence,
- struct nouveau_channel *prev, struct nouveau_channel *chan)
+nv84_fence_sync32(struct nouveau_channel *chan, u64 virtual, u32 sequence)
{
- struct nouveau_fifo_chan *fifo = (void *)prev->object;
int ret = RING_SPACE(chan, 7);
if (ret == 0) {
BEGIN_NV04(chan, 0, NV11_SUBCHAN_DMA_SEMAPHORE, 1);
- OUT_RING (chan, NvSema);
+ OUT_RING (chan, chan->vram);
BEGIN_NV04(chan, 0, NV84_SUBCHAN_SEMAPHORE_ADDRESS_HIGH, 4);
- OUT_RING (chan, upper_32_bits(fifo->chid * 16));
- OUT_RING (chan, lower_32_bits(fifo->chid * 16));
- OUT_RING (chan, fence->sequence);
+ OUT_RING (chan, upper_32_bits(virtual));
+ OUT_RING (chan, lower_32_bits(virtual));
+ OUT_RING (chan, sequence);
OUT_RING (chan, NV84_SUBCHAN_SEMAPHORE_TRIGGER_ACQUIRE_GEQUAL);
FIRE_RING (chan);
}
return ret;
}
+static int
+nv84_fence_emit(struct nouveau_fence *fence)
+{
+ struct nouveau_channel *chan = fence->channel;
+ struct nv84_fence_chan *fctx = chan->fence;
+ struct nouveau_fifo_chan *fifo = (void *)chan->object;
+ u64 addr = fifo->chid * 16;
+
+ if (fence->sysmem)
+ addr += fctx->vma_gart.offset;
+ else
+ addr += fctx->vma.offset;
+
+ return fctx->base.emit32(chan, addr, fence->sequence);
+}
+
+static int
+nv84_fence_sync(struct nouveau_fence *fence,
+ struct nouveau_channel *prev, struct nouveau_channel *chan)
+{
+ struct nv84_fence_chan *fctx = chan->fence;
+ struct nouveau_fifo_chan *fifo = (void *)prev->object;
+ u64 addr = fifo->chid * 16;
+
+ if (fence->sysmem)
+ addr += fctx->vma_gart.offset;
+ else
+ addr += fctx->vma.offset;
+
+ return fctx->base.sync32(chan, addr, fence->sequence);
+}
+
static u32
nv84_fence_read(struct nouveau_channel *chan)
{
struct nouveau_fifo_chan *fifo = (void *)chan->object;
struct nv84_fence_priv *priv = chan->drm->fence;
- return nv_ro32(priv->mem, fifo->chid * 16);
+ return nouveau_bo_rd32(priv->bo, fifo->chid * 16/4);
}
static void
nv84_fence_context_del(struct nouveau_channel *chan)
{
+ struct drm_device *dev = chan->drm->dev;
+ struct nv84_fence_priv *priv = chan->drm->fence;
struct nv84_fence_chan *fctx = chan->fence;
+ int i;
+
+ for (i = 0; i < dev->mode_config.num_crtc; i++) {
+ struct nouveau_bo *bo = nv50_display_crtc_sema(dev, i);
+ nouveau_bo_vma_del(bo, &fctx->dispc_vma[i]);
+ }
+
+ nouveau_bo_vma_del(priv->bo, &fctx->vma_gart);
+ nouveau_bo_vma_del(priv->bo, &fctx->vma);
nouveau_fence_context_del(&fctx->base);
chan->fence = NULL;
kfree(fctx);
}
-static int
+int
nv84_fence_context_new(struct nouveau_channel *chan)
{
- struct drm_device *dev = chan->drm->dev;
struct nouveau_fifo_chan *fifo = (void *)chan->object;
+ struct nouveau_client *client = nouveau_client(fifo);
struct nv84_fence_priv *priv = chan->drm->fence;
struct nv84_fence_chan *fctx;
- struct nouveau_object *object;
int ret, i;
fctx = chan->fence = kzalloc(sizeof(*fctx), GFP_KERNEL);
@@ -113,44 +150,74 @@ nv84_fence_context_new(struct nouveau_channel *chan)
return -ENOMEM;
nouveau_fence_context_new(&fctx->base);
+ fctx->base.emit = nv84_fence_emit;
+ fctx->base.sync = nv84_fence_sync;
+ fctx->base.read = nv84_fence_read;
+ fctx->base.emit32 = nv84_fence_emit32;
+ fctx->base.sync32 = nv84_fence_sync32;
- ret = nouveau_object_new(nv_object(chan->cli), chan->handle,
- NvSema, 0x0002,
- &(struct nv_dma_class) {
- .flags = NV_DMA_TARGET_VRAM |
- NV_DMA_ACCESS_RDWR,
- .start = priv->mem->addr,
- .limit = priv->mem->addr +
- priv->mem->size - 1,
- }, sizeof(struct nv_dma_class),
- &object);
-
- /* dma objects for display sync channel semaphore blocks */
- for (i = 0; !ret && i < dev->mode_config.num_crtc; i++) {
- struct nouveau_bo *bo = nv50_display_crtc_sema(dev, i);
+ ret = nouveau_bo_vma_add(priv->bo, client->vm, &fctx->vma);
+ if (ret == 0) {
+ ret = nouveau_bo_vma_add(priv->bo_gart, client->vm,
+ &fctx->vma_gart);
+ }
- ret = nouveau_object_new(nv_object(chan->cli), chan->handle,
- NvEvoSema0 + i, 0x003d,
- &(struct nv_dma_class) {
- .flags = NV_DMA_TARGET_VRAM |
- NV_DMA_ACCESS_RDWR,
- .start = bo->bo.offset,
- .limit = bo->bo.offset + 0xfff,
- }, sizeof(struct nv_dma_class),
- &object);
+ /* map display semaphore buffers into channel's vm */
+ for (i = 0; !ret && i < chan->drm->dev->mode_config.num_crtc; i++) {
+ struct nouveau_bo *bo = nv50_display_crtc_sema(chan->drm->dev, i);
+ ret = nouveau_bo_vma_add(bo, client->vm, &fctx->dispc_vma[i]);
}
+ nouveau_bo_wr32(priv->bo, fifo->chid * 16/4, 0x00000000);
+
if (ret)
nv84_fence_context_del(chan);
- nv_wo32(priv->mem, fifo->chid * 16, 0x00000000);
return ret;
}
+static bool
+nv84_fence_suspend(struct nouveau_drm *drm)
+{
+ struct nouveau_fifo *pfifo = nouveau_fifo(drm->device);
+ struct nv84_fence_priv *priv = drm->fence;
+ int i;
+
+ priv->suspend = vmalloc((pfifo->max + 1) * sizeof(u32));
+ if (priv->suspend) {
+ for (i = 0; i <= pfifo->max; i++)
+ priv->suspend[i] = nouveau_bo_rd32(priv->bo, i*4);
+ }
+
+ return priv->suspend != NULL;
+}
+
+static void
+nv84_fence_resume(struct nouveau_drm *drm)
+{
+ struct nouveau_fifo *pfifo = nouveau_fifo(drm->device);
+ struct nv84_fence_priv *priv = drm->fence;
+ int i;
+
+ if (priv->suspend) {
+ for (i = 0; i <= pfifo->max; i++)
+ nouveau_bo_wr32(priv->bo, i*4, priv->suspend[i]);
+ vfree(priv->suspend);
+ priv->suspend = NULL;
+ }
+}
+
static void
nv84_fence_destroy(struct nouveau_drm *drm)
{
struct nv84_fence_priv *priv = drm->fence;
- nouveau_gpuobj_ref(NULL, &priv->mem);
+ nouveau_bo_unmap(priv->bo_gart);
+ if (priv->bo_gart)
+ nouveau_bo_unpin(priv->bo_gart);
+ nouveau_bo_ref(NULL, &priv->bo_gart);
+ nouveau_bo_unmap(priv->bo);
+ if (priv->bo)
+ nouveau_bo_unpin(priv->bo);
+ nouveau_bo_ref(NULL, &priv->bo);
drm->fence = NULL;
kfree(priv);
}
@@ -160,7 +227,6 @@ nv84_fence_create(struct nouveau_drm *drm)
{
struct nouveau_fifo *pfifo = nouveau_fifo(drm->device);
struct nv84_fence_priv *priv;
- u32 chan = pfifo->max + 1;
int ret;
priv = drm->fence = kzalloc(sizeof(*priv), GFP_KERNEL);
@@ -168,14 +234,42 @@ nv84_fence_create(struct nouveau_drm *drm)
return -ENOMEM;
priv->base.dtor = nv84_fence_destroy;
+ priv->base.suspend = nv84_fence_suspend;
+ priv->base.resume = nv84_fence_resume;
priv->base.context_new = nv84_fence_context_new;
priv->base.context_del = nv84_fence_context_del;
- priv->base.emit = nv84_fence_emit;
- priv->base.sync = nv84_fence_sync;
- priv->base.read = nv84_fence_read;
- ret = nouveau_gpuobj_new(drm->device, NULL, chan * 16, 0x1000, 0,
- &priv->mem);
+ init_waitqueue_head(&priv->base.waiting);
+ priv->base.uevent = true;
+
+ ret = nouveau_bo_new(drm->dev, 16 * (pfifo->max + 1), 0,
+ TTM_PL_FLAG_VRAM, 0, 0, NULL, &priv->bo);
+ if (ret == 0) {
+ ret = nouveau_bo_pin(priv->bo, TTM_PL_FLAG_VRAM);
+ if (ret == 0) {
+ ret = nouveau_bo_map(priv->bo);
+ if (ret)
+ nouveau_bo_unpin(priv->bo);
+ }
+ if (ret)
+ nouveau_bo_ref(NULL, &priv->bo);
+ }
+
+ if (ret == 0)
+ ret = nouveau_bo_new(drm->dev, 16 * (pfifo->max + 1), 0,
+ TTM_PL_FLAG_TT, 0, 0, NULL,
+ &priv->bo_gart);
+ if (ret == 0) {
+ ret = nouveau_bo_pin(priv->bo_gart, TTM_PL_FLAG_TT);
+ if (ret == 0) {
+ ret = nouveau_bo_map(priv->bo_gart);
+ if (ret)
+ nouveau_bo_unpin(priv->bo_gart);
+ }
+ if (ret)
+ nouveau_bo_ref(NULL, &priv->bo_gart);
+ }
+
if (ret)
nv84_fence_destroy(drm);
return ret;
diff --git a/drivers/gpu/drm/nouveau/nvc0_fence.c b/drivers/gpu/drm/nouveau/nvc0_fence.c
index 2a56b1b551cb..9566267fbc42 100644
--- a/drivers/gpu/drm/nouveau/nvc0_fence.c
+++ b/drivers/gpu/drm/nouveau/nvc0_fence.c
@@ -34,203 +34,57 @@
#include "nv50_display.h"
-struct nvc0_fence_priv {
- struct nouveau_fence_priv base;
- struct nouveau_bo *bo;
- u32 *suspend;
-};
-
-struct nvc0_fence_chan {
- struct nouveau_fence_chan base;
- struct nouveau_vma vma;
- struct nouveau_vma dispc_vma[4];
-};
-
-u64
-nvc0_fence_crtc(struct nouveau_channel *chan, int crtc)
-{
- struct nvc0_fence_chan *fctx = chan->fence;
- return fctx->dispc_vma[crtc].offset;
-}
-
static int
-nvc0_fence_emit(struct nouveau_fence *fence)
+nvc0_fence_emit32(struct nouveau_channel *chan, u64 virtual, u32 sequence)
{
- struct nouveau_channel *chan = fence->channel;
- struct nvc0_fence_chan *fctx = chan->fence;
- struct nouveau_fifo_chan *fifo = (void *)chan->object;
- u64 addr = fctx->vma.offset + fifo->chid * 16;
- int ret;
-
- ret = RING_SPACE(chan, 5);
+ int ret = RING_SPACE(chan, 6);
if (ret == 0) {
- BEGIN_NVC0(chan, 0, NV84_SUBCHAN_SEMAPHORE_ADDRESS_HIGH, 4);
- OUT_RING (chan, upper_32_bits(addr));
- OUT_RING (chan, lower_32_bits(addr));
- OUT_RING (chan, fence->sequence);
+ BEGIN_NVC0(chan, 0, NV84_SUBCHAN_SEMAPHORE_ADDRESS_HIGH, 5);
+ OUT_RING (chan, upper_32_bits(virtual));
+ OUT_RING (chan, lower_32_bits(virtual));
+ OUT_RING (chan, sequence);
OUT_RING (chan, NV84_SUBCHAN_SEMAPHORE_TRIGGER_WRITE_LONG);
+ OUT_RING (chan, 0x00000000);
FIRE_RING (chan);
}
-
return ret;
}
static int
-nvc0_fence_sync(struct nouveau_fence *fence,
- struct nouveau_channel *prev, struct nouveau_channel *chan)
+nvc0_fence_sync32(struct nouveau_channel *chan, u64 virtual, u32 sequence)
{
- struct nvc0_fence_chan *fctx = chan->fence;
- struct nouveau_fifo_chan *fifo = (void *)prev->object;
- u64 addr = fctx->vma.offset + fifo->chid * 16;
- int ret;
-
- ret = RING_SPACE(chan, 5);
+ int ret = RING_SPACE(chan, 5);
if (ret == 0) {
BEGIN_NVC0(chan, 0, NV84_SUBCHAN_SEMAPHORE_ADDRESS_HIGH, 4);
- OUT_RING (chan, upper_32_bits(addr));
- OUT_RING (chan, lower_32_bits(addr));
- OUT_RING (chan, fence->sequence);
+ OUT_RING (chan, upper_32_bits(virtual));
+ OUT_RING (chan, lower_32_bits(virtual));
+ OUT_RING (chan, sequence);
OUT_RING (chan, NV84_SUBCHAN_SEMAPHORE_TRIGGER_ACQUIRE_GEQUAL |
NVC0_SUBCHAN_SEMAPHORE_TRIGGER_YIELD);
FIRE_RING (chan);
}
-
return ret;
}
-static u32
-nvc0_fence_read(struct nouveau_channel *chan)
-{
- struct nouveau_fifo_chan *fifo = (void *)chan->object;
- struct nvc0_fence_priv *priv = chan->drm->fence;
- return nouveau_bo_rd32(priv->bo, fifo->chid * 16/4);
-}
-
-static void
-nvc0_fence_context_del(struct nouveau_channel *chan)
-{
- struct drm_device *dev = chan->drm->dev;
- struct nvc0_fence_priv *priv = chan->drm->fence;
- struct nvc0_fence_chan *fctx = chan->fence;
- int i;
-
- for (i = 0; i < dev->mode_config.num_crtc; i++) {
- struct nouveau_bo *bo = nv50_display_crtc_sema(dev, i);
- nouveau_bo_vma_del(bo, &fctx->dispc_vma[i]);
- }
-
- nouveau_bo_vma_del(priv->bo, &fctx->vma);
- nouveau_fence_context_del(&fctx->base);
- chan->fence = NULL;
- kfree(fctx);
-}
-
static int
nvc0_fence_context_new(struct nouveau_channel *chan)
{
- struct nouveau_fifo_chan *fifo = (void *)chan->object;
- struct nouveau_client *client = nouveau_client(fifo);
- struct nvc0_fence_priv *priv = chan->drm->fence;
- struct nvc0_fence_chan *fctx;
- int ret, i;
-
- fctx = chan->fence = kzalloc(sizeof(*fctx), GFP_KERNEL);
- if (!fctx)
- return -ENOMEM;
-
- nouveau_fence_context_new(&fctx->base);
-
- ret = nouveau_bo_vma_add(priv->bo, client->vm, &fctx->vma);
- if (ret)
- nvc0_fence_context_del(chan);
-
- /* map display semaphore buffers into channel's vm */
- for (i = 0; !ret && i < chan->drm->dev->mode_config.num_crtc; i++) {
- struct nouveau_bo *bo = nv50_display_crtc_sema(chan->drm->dev, i);
- ret = nouveau_bo_vma_add(bo, client->vm, &fctx->dispc_vma[i]);
+ int ret = nv84_fence_context_new(chan);
+ if (ret == 0) {
+ struct nv84_fence_chan *fctx = chan->fence;
+ fctx->base.emit32 = nvc0_fence_emit32;
+ fctx->base.sync32 = nvc0_fence_sync32;
}
-
- nouveau_bo_wr32(priv->bo, fifo->chid * 16/4, 0x00000000);
return ret;
}
-static bool
-nvc0_fence_suspend(struct nouveau_drm *drm)
-{
- struct nouveau_fifo *pfifo = nouveau_fifo(drm->device);
- struct nvc0_fence_priv *priv = drm->fence;
- int i;
-
- priv->suspend = vmalloc((pfifo->max + 1) * sizeof(u32));
- if (priv->suspend) {
- for (i = 0; i <= pfifo->max; i++)
- priv->suspend[i] = nouveau_bo_rd32(priv->bo, i);
- }
-
- return priv->suspend != NULL;
-}
-
-static void
-nvc0_fence_resume(struct nouveau_drm *drm)
-{
- struct nouveau_fifo *pfifo = nouveau_fifo(drm->device);
- struct nvc0_fence_priv *priv = drm->fence;
- int i;
-
- if (priv->suspend) {
- for (i = 0; i <= pfifo->max; i++)
- nouveau_bo_wr32(priv->bo, i, priv->suspend[i]);
- vfree(priv->suspend);
- priv->suspend = NULL;
- }
-}
-
-static void
-nvc0_fence_destroy(struct nouveau_drm *drm)
-{
- struct nvc0_fence_priv *priv = drm->fence;
- nouveau_bo_unmap(priv->bo);
- if (priv->bo)
- nouveau_bo_unpin(priv->bo);
- nouveau_bo_ref(NULL, &priv->bo);
- drm->fence = NULL;
- kfree(priv);
-}
-
int
nvc0_fence_create(struct nouveau_drm *drm)
{
- struct nouveau_fifo *pfifo = nouveau_fifo(drm->device);
- struct nvc0_fence_priv *priv;
- int ret;
-
- priv = drm->fence = kzalloc(sizeof(*priv), GFP_KERNEL);
- if (!priv)
- return -ENOMEM;
-
- priv->base.dtor = nvc0_fence_destroy;
- priv->base.suspend = nvc0_fence_suspend;
- priv->base.resume = nvc0_fence_resume;
- priv->base.context_new = nvc0_fence_context_new;
- priv->base.context_del = nvc0_fence_context_del;
- priv->base.emit = nvc0_fence_emit;
- priv->base.sync = nvc0_fence_sync;
- priv->base.read = nvc0_fence_read;
-
- ret = nouveau_bo_new(drm->dev, 16 * (pfifo->max + 1), 0,
- TTM_PL_FLAG_VRAM, 0, 0, NULL, &priv->bo);
+ int ret = nv84_fence_create(drm);
if (ret == 0) {
- ret = nouveau_bo_pin(priv->bo, TTM_PL_FLAG_VRAM);
- if (ret == 0) {
- ret = nouveau_bo_map(priv->bo);
- if (ret)
- nouveau_bo_unpin(priv->bo);
- }
- if (ret)
- nouveau_bo_ref(NULL, &priv->bo);
+ struct nv84_fence_priv *priv = drm->fence;
+ priv->base.context_new = nvc0_fence_context_new;
}
-
- if (ret)
- nvc0_fence_destroy(drm);
return ret;
}
diff --git a/drivers/staging/omapdrm/Kconfig b/drivers/gpu/drm/omapdrm/Kconfig
index 09f65dc3d2c8..09f65dc3d2c8 100644
--- a/drivers/staging/omapdrm/Kconfig
+++ b/drivers/gpu/drm/omapdrm/Kconfig
diff --git a/drivers/staging/omapdrm/Makefile b/drivers/gpu/drm/omapdrm/Makefile
index d85e058f2845..d85e058f2845 100644
--- a/drivers/staging/omapdrm/Makefile
+++ b/drivers/gpu/drm/omapdrm/Makefile
diff --git a/drivers/gpu/drm/omapdrm/TODO b/drivers/gpu/drm/omapdrm/TODO
new file mode 100644
index 000000000000..4d8c18aa5dd7
--- /dev/null
+++ b/drivers/gpu/drm/omapdrm/TODO
@@ -0,0 +1,23 @@
+TODO
+. Where should we do eviction (detatch_pages())? We aren't necessarily
+ accessing the pages via a GART, so maybe we need some other threshold
+ to put a cap on the # of pages that can be pin'd.
+ . Use mm_shrinker to trigger unpinning pages.
+ . This is mainly theoretical since most of these devices don't actually
+ have swap or harddrive.
+. GEM/shmem backed pages can have existing mappings (kernel linear map,
+ etc..), which isn't really ideal.
+. Revisit GEM sync object infrastructure.. TTM has some framework for this
+ already. Possibly this could be refactored out and made more common?
+ There should be some way to do this with less wheel-reinvention.
+ . This can be handled by the dma-buf fence/reservation stuff when it
+ lands
+
+Userspace:
+. git://anongit.freedesktop.org/xorg/driver/xf86-video-omap
+
+Currently tested on
+. OMAP3530 beagleboard
+. OMAP4430 pandaboard
+. OMAP4460 pandaboard
+. OMAP5432 uEVM
diff --git a/drivers/staging/omapdrm/omap_connector.c b/drivers/gpu/drm/omapdrm/omap_connector.c
index 8979c80adb5f..c451c41a7a7d 100644
--- a/drivers/staging/omapdrm/omap_connector.c
+++ b/drivers/gpu/drm/omapdrm/omap_connector.c
@@ -1,5 +1,5 @@
/*
- * drivers/staging/omapdrm/omap_connector.c
+ * drivers/gpu/drm/omapdrm/omap_connector.c
*
* Copyright (C) 2011 Texas Instruments
* Author: Rob Clark <rob@ti.com>
diff --git a/drivers/staging/omapdrm/omap_crtc.c b/drivers/gpu/drm/omapdrm/omap_crtc.c
index 32109c09357c..bec66a490b8f 100644
--- a/drivers/staging/omapdrm/omap_crtc.c
+++ b/drivers/gpu/drm/omapdrm/omap_crtc.c
@@ -1,5 +1,5 @@
/*
- * drivers/staging/omapdrm/omap_crtc.c
+ * drivers/gpu/drm/omapdrm/omap_crtc.c
*
* Copyright (C) 2011 Texas Instruments
* Author: Rob Clark <rob@ti.com>
@@ -274,17 +274,16 @@ static void page_flip_worker(struct work_struct *work)
struct omap_crtc *omap_crtc =
container_of(work, struct omap_crtc, page_flip_work);
struct drm_crtc *crtc = &omap_crtc->base;
- struct drm_device *dev = crtc->dev;
struct drm_display_mode *mode = &crtc->mode;
struct drm_gem_object *bo;
- mutex_lock(&dev->mode_config.mutex);
+ mutex_lock(&crtc->mutex);
omap_plane_mode_set(omap_crtc->plane, crtc, crtc->fb,
0, 0, mode->hdisplay, mode->vdisplay,
crtc->x << 16, crtc->y << 16,
mode->hdisplay << 16, mode->vdisplay << 16,
vblank_cb, crtc);
- mutex_unlock(&dev->mode_config.mutex);
+ mutex_unlock(&crtc->mutex);
bo = omap_framebuffer_bo(crtc->fb, 0);
drm_gem_object_unreference_unlocked(bo);
@@ -417,7 +416,7 @@ static void apply_worker(struct work_struct *work)
* the callbacks and list modification all serialized
* with respect to modesetting ioctls from userspace.
*/
- mutex_lock(&dev->mode_config.mutex);
+ mutex_lock(&crtc->mutex);
dispc_runtime_get();
/*
@@ -462,16 +461,15 @@ static void apply_worker(struct work_struct *work)
out:
dispc_runtime_put();
- mutex_unlock(&dev->mode_config.mutex);
+ mutex_unlock(&crtc->mutex);
}
int omap_crtc_apply(struct drm_crtc *crtc,
struct omap_drm_apply *apply)
{
struct omap_crtc *omap_crtc = to_omap_crtc(crtc);
- struct drm_device *dev = crtc->dev;
- WARN_ON(!mutex_is_locked(&dev->mode_config.mutex));
+ WARN_ON(!mutex_is_locked(&crtc->mutex));
/* no need to queue it again if it is already queued: */
if (apply->queued)
diff --git a/drivers/staging/omapdrm/omap_debugfs.c b/drivers/gpu/drm/omapdrm/omap_debugfs.c
index 2f122e00b51d..c27f59da7f29 100644
--- a/drivers/staging/omapdrm/omap_debugfs.c
+++ b/drivers/gpu/drm/omapdrm/omap_debugfs.c
@@ -1,5 +1,5 @@
/*
- * drivers/staging/omapdrm/omap_debugfs.c
+ * drivers/gpu/drm/omapdrm/omap_debugfs.c
*
* Copyright (C) 2011 Texas Instruments
* Author: Rob Clark <rob.clark@linaro.org>
@@ -57,21 +57,11 @@ static int fb_show(struct seq_file *m, void *arg)
struct drm_device *dev = node->minor->dev;
struct omap_drm_private *priv = dev->dev_private;
struct drm_framebuffer *fb;
- int ret;
-
- ret = mutex_lock_interruptible(&dev->mode_config.mutex);
- if (ret)
- return ret;
-
- ret = mutex_lock_interruptible(&dev->struct_mutex);
- if (ret) {
- mutex_unlock(&dev->mode_config.mutex);
- return ret;
- }
seq_printf(m, "fbcon ");
omap_framebuffer_describe(priv->fbdev->fb, m);
+ mutex_lock(&dev->mode_config.fb_lock);
list_for_each_entry(fb, &dev->mode_config.fb_list, head) {
if (fb == priv->fbdev->fb)
continue;
@@ -79,9 +69,7 @@ static int fb_show(struct seq_file *m, void *arg)
seq_printf(m, "user ");
omap_framebuffer_describe(fb, m);
}
-
- mutex_unlock(&dev->struct_mutex);
- mutex_unlock(&dev->mode_config.mutex);
+ mutex_unlock(&dev->mode_config.fb_lock);
return 0;
}
diff --git a/drivers/staging/omapdrm/omap_dmm_priv.h b/drivers/gpu/drm/omapdrm/omap_dmm_priv.h
index 58bcd6ae0255..58bcd6ae0255 100644
--- a/drivers/staging/omapdrm/omap_dmm_priv.h
+++ b/drivers/gpu/drm/omapdrm/omap_dmm_priv.h
diff --git a/drivers/staging/omapdrm/omap_dmm_tiler.c b/drivers/gpu/drm/omapdrm/omap_dmm_tiler.c
index 9b794c933c81..9b794c933c81 100644
--- a/drivers/staging/omapdrm/omap_dmm_tiler.c
+++ b/drivers/gpu/drm/omapdrm/omap_dmm_tiler.c
diff --git a/drivers/staging/omapdrm/omap_dmm_tiler.h b/drivers/gpu/drm/omapdrm/omap_dmm_tiler.h
index 4fdd61e54bd2..4fdd61e54bd2 100644
--- a/drivers/staging/omapdrm/omap_dmm_tiler.h
+++ b/drivers/gpu/drm/omapdrm/omap_dmm_tiler.h
diff --git a/drivers/staging/omapdrm/omap_drv.c b/drivers/gpu/drm/omapdrm/omap_drv.c
index 480dc343446c..079c54c6f94c 100644
--- a/drivers/staging/omapdrm/omap_drv.c
+++ b/drivers/gpu/drm/omapdrm/omap_drv.c
@@ -1,5 +1,5 @@
/*
- * drivers/staging/omapdrm/omap_drv.c
+ * drivers/gpu/drm/omapdrm/omap_drv.c
*
* Copyright (C) 2011 Texas Instruments
* Author: Rob Clark <rob@ti.com>
@@ -452,9 +452,9 @@ static void dev_lastclose(struct drm_device *dev)
}
}
- mutex_lock(&dev->mode_config.mutex);
+ drm_modeset_lock_all(dev);
ret = drm_fb_helper_restore_fbdev_mode(priv->fbdev);
- mutex_unlock(&dev->mode_config.mutex);
+ drm_modeset_unlock_all(dev);
if (ret)
DBG("failed to restore crtc mode");
}
diff --git a/drivers/staging/omapdrm/omap_drv.h b/drivers/gpu/drm/omapdrm/omap_drv.h
index f921027e7500..d4f997bb4ac0 100644
--- a/drivers/staging/omapdrm/omap_drv.h
+++ b/drivers/gpu/drm/omapdrm/omap_drv.h
@@ -1,5 +1,5 @@
/*
- * drivers/staging/omapdrm/omap_drv.h
+ * drivers/gpu/drm/omapdrm/omap_drv.h
*
* Copyright (C) 2011 Texas Instruments
* Author: Rob Clark <rob@ti.com>
@@ -25,8 +25,8 @@
#include <linux/types.h>
#include <drm/drmP.h>
#include <drm/drm_crtc_helper.h>
+#include <drm/omap_drm.h>
#include <linux/platform_data/omap_drm.h>
-#include "omap_drm.h"
#define DBG(fmt, ...) DRM_DEBUG(fmt"\n", ##__VA_ARGS__)
diff --git a/drivers/staging/omapdrm/omap_encoder.c b/drivers/gpu/drm/omapdrm/omap_encoder.c
index 25fc0c7b4f6d..21d126d0317e 100644
--- a/drivers/staging/omapdrm/omap_encoder.c
+++ b/drivers/gpu/drm/omapdrm/omap_encoder.c
@@ -1,5 +1,5 @@
/*
- * drivers/staging/omapdrm/omap_encoder.c
+ * drivers/gpu/drm/omapdrm/omap_encoder.c
*
* Copyright (C) 2011 Texas Instruments
* Author: Rob Clark <rob@ti.com>
diff --git a/drivers/staging/omapdrm/omap_fb.c b/drivers/gpu/drm/omapdrm/omap_fb.c
index bb4969942148..8031402e7951 100644
--- a/drivers/staging/omapdrm/omap_fb.c
+++ b/drivers/gpu/drm/omapdrm/omap_fb.c
@@ -1,5 +1,5 @@
/*
- * drivers/staging/omapdrm/omap_fb.c
+ * drivers/gpu/drm/omapdrm/omap_fb.c
*
* Copyright (C) 2011 Texas Instruments
* Author: Rob Clark <rob@ti.com>
@@ -423,14 +423,6 @@ struct drm_framebuffer *omap_framebuffer_init(struct drm_device *dev,
}
fb = &omap_fb->base;
- ret = drm_framebuffer_init(dev, fb, &omap_framebuffer_funcs);
- if (ret) {
- dev_err(dev->dev, "framebuffer init failed: %d\n", ret);
- goto fail;
- }
-
- DBG("create: FB ID: %d (%p)", fb->base.id, fb);
-
omap_fb->format = format;
for (i = 0; i < n; i++) {
@@ -461,6 +453,14 @@ struct drm_framebuffer *omap_framebuffer_init(struct drm_device *dev,
drm_helper_mode_fill_fb_struct(fb, mode_cmd);
+ ret = drm_framebuffer_init(dev, fb, &omap_framebuffer_funcs);
+ if (ret) {
+ dev_err(dev->dev, "framebuffer init failed: %d\n", ret);
+ goto fail;
+ }
+
+ DBG("create: FB ID: %d (%p)", fb->base.id, fb);
+
return fb;
fail:
diff --git a/drivers/staging/omapdrm/omap_fbdev.c b/drivers/gpu/drm/omapdrm/omap_fbdev.c
index 70f2d6ed2ed3..b11ce609fcc2 100644
--- a/drivers/staging/omapdrm/omap_fbdev.c
+++ b/drivers/gpu/drm/omapdrm/omap_fbdev.c
@@ -1,5 +1,5 @@
/*
- * drivers/staging/omapdrm/omap_fbdev.c
+ * drivers/gpu/drm/omapdrm/omap_fbdev.c
*
* Copyright (C) 2011 Texas Instruments
* Author: Rob Clark <rob@ti.com>
@@ -131,9 +131,6 @@ static struct fb_ops omap_fb_ops = {
.fb_pan_display = omap_fbdev_pan_display,
.fb_blank = drm_fb_helper_blank,
.fb_setcmap = drm_fb_helper_setcmap,
-
- .fb_debug_enter = drm_fb_helper_debug_enter,
- .fb_debug_leave = drm_fb_helper_debug_leave,
};
static int omap_fbdev_create(struct drm_fb_helper *helper,
@@ -275,8 +272,10 @@ fail:
if (ret) {
if (fbi)
framebuffer_release(fbi);
- if (fb)
+ if (fb) {
+ drm_framebuffer_unregister_private(fb);
drm_framebuffer_remove(fb);
+ }
}
return ret;
@@ -294,25 +293,10 @@ static void omap_crtc_fb_gamma_get(struct drm_crtc *crtc,
DBG("fbdev: get gamma");
}
-static int omap_fbdev_probe(struct drm_fb_helper *helper,
- struct drm_fb_helper_surface_size *sizes)
-{
- int new_fb = 0;
- int ret;
-
- if (!helper->fb) {
- ret = omap_fbdev_create(helper, sizes);
- if (ret)
- return ret;
- new_fb = 1;
- }
- return new_fb;
-}
-
static struct drm_fb_helper_funcs omap_fb_helper_funcs = {
.gamma_set = omap_crtc_fb_gamma_set,
.gamma_get = omap_crtc_fb_gamma_get,
- .fb_probe = omap_fbdev_probe,
+ .fb_probe = omap_fbdev_create,
};
static struct drm_fb_helper *get_fb(struct fb_info *fbi)
@@ -365,6 +349,10 @@ struct drm_fb_helper *omap_fbdev_init(struct drm_device *dev)
}
drm_fb_helper_single_add_all_connectors(helper);
+
+ /* disable all the possible outputs/crtcs before entering KMS mode */
+ drm_helper_disable_unused_functions(dev);
+
drm_fb_helper_initial_config(helper, 32);
priv->fbdev = helper;
@@ -398,8 +386,10 @@ void omap_fbdev_free(struct drm_device *dev)
fbdev = to_omap_fbdev(priv->fbdev);
/* this will free the backing object */
- if (fbdev->fb)
+ if (fbdev->fb) {
+ drm_framebuffer_unregister_private(fbdev->fb);
drm_framebuffer_remove(fbdev->fb);
+ }
kfree(fbdev);
diff --git a/drivers/staging/omapdrm/omap_gem.c b/drivers/gpu/drm/omapdrm/omap_gem.c
index 518d03d4d4f3..ebbdf4132e9c 100644
--- a/drivers/staging/omapdrm/omap_gem.c
+++ b/drivers/gpu/drm/omapdrm/omap_gem.c
@@ -1,5 +1,5 @@
/*
- * drivers/staging/omapdrm/omap_gem.c
+ * drivers/gpu/drm/omapdrm/omap_gem.c
*
* Copyright (C) 2011 Texas Instruments
* Author: Rob Clark <rob.clark@linaro.org>
diff --git a/drivers/staging/omapdrm/omap_gem_dmabuf.c b/drivers/gpu/drm/omapdrm/omap_gem_dmabuf.c
index a3236abfca3d..ac74d1bc67bf 100644
--- a/drivers/staging/omapdrm/omap_gem_dmabuf.c
+++ b/drivers/gpu/drm/omapdrm/omap_gem_dmabuf.c
@@ -1,5 +1,5 @@
/*
- * drivers/staging/omapdrm/omap_gem_dmabuf.c
+ * drivers/gpu/drm/omapdrm/omap_gem_dmabuf.c
*
* Copyright (C) 2011 Texas Instruments
* Author: Rob Clark <rob.clark@linaro.org>
diff --git a/drivers/staging/omapdrm/omap_gem_helpers.c b/drivers/gpu/drm/omapdrm/omap_gem_helpers.c
index ffb8cceaeb46..f9eb679eb79b 100644
--- a/drivers/staging/omapdrm/omap_gem_helpers.c
+++ b/drivers/gpu/drm/omapdrm/omap_gem_helpers.c
@@ -1,5 +1,5 @@
/*
- * drivers/staging/omapdrm/omap_gem_helpers.c
+ * drivers/gpu/drm/omapdrm/omap_gem_helpers.c
*
* Copyright (C) 2011 Texas Instruments
* Author: Rob Clark <rob.clark@linaro.org>
@@ -40,7 +40,7 @@ struct page **_drm_gem_get_pages(struct drm_gem_object *obj, gfp_t gfpmask)
int i, npages;
/* This is the shared memory object that backs the GEM resource */
- inode = obj->filp->f_path.dentry->d_inode;
+ inode = file_inode(obj->filp);
mapping = inode->i_mapping;
npages = obj->size >> PAGE_SHIFT;
diff --git a/drivers/staging/omapdrm/omap_irq.c b/drivers/gpu/drm/omapdrm/omap_irq.c
index 2629ba7be6c8..e01303ee00c3 100644
--- a/drivers/staging/omapdrm/omap_irq.c
+++ b/drivers/gpu/drm/omapdrm/omap_irq.c
@@ -1,5 +1,5 @@
/*
- * drivers/staging/omapdrm/omap_irq.c
+ * drivers/gpu/drm/omapdrm/omap_irq.c
*
* Copyright (C) 2012 Texas Instruments
* Author: Rob Clark <rob.clark@linaro.org>
diff --git a/drivers/staging/omapdrm/omap_plane.c b/drivers/gpu/drm/omapdrm/omap_plane.c
index c063476db3bb..2882cda6ea19 100644
--- a/drivers/staging/omapdrm/omap_plane.c
+++ b/drivers/gpu/drm/omapdrm/omap_plane.c
@@ -1,5 +1,5 @@
/*
- * drivers/staging/omapdrm/omap_plane.c
+ * drivers/gpu/drm/omapdrm/omap_plane.c
*
* Copyright (C) 2011 Texas Instruments
* Author: Rob Clark <rob.clark@linaro.org>
diff --git a/drivers/staging/omapdrm/tcm-sita.c b/drivers/gpu/drm/omapdrm/tcm-sita.c
index efb609510540..efb609510540 100644
--- a/drivers/staging/omapdrm/tcm-sita.c
+++ b/drivers/gpu/drm/omapdrm/tcm-sita.c
diff --git a/drivers/staging/omapdrm/tcm-sita.h b/drivers/gpu/drm/omapdrm/tcm-sita.h
index 0444f868671c..0444f868671c 100644
--- a/drivers/staging/omapdrm/tcm-sita.h
+++ b/drivers/gpu/drm/omapdrm/tcm-sita.h
diff --git a/drivers/staging/omapdrm/tcm.h b/drivers/gpu/drm/omapdrm/tcm.h
index a8d5ce47686f..a8d5ce47686f 100644
--- a/drivers/staging/omapdrm/tcm.h
+++ b/drivers/gpu/drm/omapdrm/tcm.h
diff --git a/drivers/gpu/drm/radeon/Kconfig b/drivers/gpu/drm/radeon/Kconfig
index ea92bbe3ed37..970f8e92dbb7 100644
--- a/drivers/gpu/drm/radeon/Kconfig
+++ b/drivers/gpu/drm/radeon/Kconfig
@@ -1,31 +1,8 @@
-config DRM_RADEON_KMS
- bool "Enable modesetting on radeon by default - NEW DRIVER"
+config DRM_RADEON_UMS
+ bool "Enable userspace modesetting on radeon (DEPRECATED)"
depends on DRM_RADEON
- select BACKLIGHT_CLASS_DEVICE
help
- Choose this option if you want kernel modesetting enabled by default.
+ Choose this option if you still need userspace modesetting.
- This is a completely new driver. It's only part of the existing drm
- for compatibility reasons. It requires an entirely different graphics
- stack above it and works very differently from the old drm stack.
- i.e. don't enable this unless you know what you are doing it may
- cause issues or bugs compared to the previous userspace driver stack.
-
- When kernel modesetting is enabled the IOCTL of radeon/drm
- driver are considered as invalid and an error message is printed
- in the log and they return failure.
-
- KMS enabled userspace will use new API to talk with the radeon/drm
- driver. The new API provide functions to create/destroy/share/mmap
- buffer object which are then managed by the kernel memory manager
- (here TTM). In order to submit command to the GPU the userspace
- provide a buffer holding the command stream, along this buffer
- userspace have to provide a list of buffer object used by the
- command stream. The kernel radeon driver will then place buffer
- in GPU accessible memory and will update command stream to reflect
- the position of the different buffers.
-
- The kernel will also perform security check on command stream
- provided by the user, we want to catch and forbid any illegal use
- of the GPU such as DMA into random system memory or into memory
- not owned by the process supplying the command stream.
+ Userspace modesetting is deprecated for quite some time now, so
+ enable this only if you have ancient versions of the DDX drivers.
diff --git a/drivers/gpu/drm/radeon/Makefile b/drivers/gpu/drm/radeon/Makefile
index a6598fd66423..bf172522ea68 100644
--- a/drivers/gpu/drm/radeon/Makefile
+++ b/drivers/gpu/drm/radeon/Makefile
@@ -56,8 +56,12 @@ $(obj)/r600_cs.o: $(obj)/r600_reg_safe.h
$(obj)/evergreen_cs.o: $(obj)/evergreen_reg_safe.h $(obj)/cayman_reg_safe.h
-radeon-y := radeon_drv.o radeon_cp.o radeon_state.o radeon_mem.o \
- radeon_irq.o r300_cmdbuf.o r600_cp.o
+radeon-y := radeon_drv.o
+
+# add UMS driver
+radeon-$(CONFIG_DRM_RADEON_UMS)+= radeon_cp.o radeon_state.o radeon_mem.o \
+ radeon_irq.o r300_cmdbuf.o r600_cp.o r600_blit.o
+
# add KMS driver
radeon-y += radeon_device.o radeon_asic.o radeon_kms.o \
radeon_atombios.o radeon_agp.o atombios_crtc.o radeon_combios.o \
@@ -67,7 +71,7 @@ radeon-y += radeon_device.o radeon_asic.o radeon_kms.o \
radeon_clocks.o radeon_fb.o radeon_gem.o radeon_ring.o radeon_irq_kms.o \
radeon_cs.o radeon_bios.o radeon_benchmark.o r100.o r300.o r420.o \
rs400.o rs600.o rs690.o rv515.o r520.o r600.o rv770.o radeon_test.o \
- r200.o radeon_legacy_tv.o r600_cs.o r600_blit.o r600_blit_shaders.o \
+ r200.o radeon_legacy_tv.o r600_cs.o r600_blit_shaders.o \
r600_blit_kms.o radeon_pm.o atombios_dp.o r600_audio.o r600_hdmi.o \
evergreen.o evergreen_cs.o evergreen_blit_shaders.o evergreen_blit_kms.o \
evergreen_hdmi.o radeon_trace_points.o ni.o cayman_blit_shaders.o \
diff --git a/drivers/gpu/drm/radeon/atom.c b/drivers/gpu/drm/radeon/atom.c
index 5ce9bf51a8de..46a9c3772850 100644
--- a/drivers/gpu/drm/radeon/atom.c
+++ b/drivers/gpu/drm/radeon/atom.c
@@ -1238,6 +1238,8 @@ static int atom_iio_len[] = { 1, 2, 3, 3, 3, 3, 4, 4, 4, 3 };
static void atom_index_iio(struct atom_context *ctx, int base)
{
ctx->iio = kzalloc(2 * 256, GFP_KERNEL);
+ if (!ctx->iio)
+ return;
while (CU8(base) == ATOM_IIO_START) {
ctx->iio[CU8(base + 1)] = base + 2;
base += 2;
@@ -1287,6 +1289,10 @@ struct atom_context *atom_parse(struct card_info *card, void *bios)
ctx->cmd_table = CU16(base + ATOM_ROM_CMD_PTR);
ctx->data_table = CU16(base + ATOM_ROM_DATA_PTR);
atom_index_iio(ctx, CU16(ctx->data_table + ATOM_DATA_IIO_PTR) + 4);
+ if (!ctx->iio) {
+ atom_destroy(ctx);
+ return NULL;
+ }
str = CSTR(CU16(base + ATOM_ROM_MSG_PTR));
while (*str && ((*str == '\n') || (*str == '\r')))
@@ -1335,8 +1341,7 @@ int atom_asic_init(struct atom_context *ctx)
void atom_destroy(struct atom_context *ctx)
{
- if (ctx->iio)
- kfree(ctx->iio);
+ kfree(ctx->iio);
kfree(ctx);
}
diff --git a/drivers/gpu/drm/radeon/atombios_crtc.c b/drivers/gpu/drm/radeon/atombios_crtc.c
index 9175615bbd8a..21a892c6ab9c 100644
--- a/drivers/gpu/drm/radeon/atombios_crtc.c
+++ b/drivers/gpu/drm/radeon/atombios_crtc.c
@@ -252,8 +252,6 @@ void atombios_crtc_dpms(struct drm_crtc *crtc, int mode)
radeon_crtc->enabled = true;
/* adjust pm to dpms changes BEFORE enabling crtcs */
radeon_pm_compute_clocks(rdev);
- if (ASIC_IS_DCE6(rdev) && !radeon_crtc->in_mode_set)
- atombios_powergate_crtc(crtc, ATOM_DISABLE);
atombios_enable_crtc(crtc, ATOM_ENABLE);
if (ASIC_IS_DCE3(rdev) && !ASIC_IS_DCE6(rdev))
atombios_enable_crtc_memreq(crtc, ATOM_ENABLE);
@@ -271,8 +269,6 @@ void atombios_crtc_dpms(struct drm_crtc *crtc, int mode)
atombios_enable_crtc_memreq(crtc, ATOM_DISABLE);
atombios_enable_crtc(crtc, ATOM_DISABLE);
radeon_crtc->enabled = false;
- if (ASIC_IS_DCE6(rdev) && !radeon_crtc->in_mode_set)
- atombios_powergate_crtc(crtc, ATOM_ENABLE);
/* adjust pm to dpms changes AFTER disabling crtcs */
radeon_pm_compute_clocks(rdev);
break;
@@ -1844,6 +1840,8 @@ static void atombios_crtc_disable(struct drm_crtc *crtc)
int i;
atombios_crtc_dpms(crtc, DRM_MODE_DPMS_OFF);
+ if (ASIC_IS_DCE6(rdev))
+ atombios_powergate_crtc(crtc, ATOM_ENABLE);
for (i = 0; i < rdev->num_crtc; i++) {
if (rdev->mode_info.crtcs[i] &&
diff --git a/drivers/gpu/drm/radeon/evergreen.c b/drivers/gpu/drm/radeon/evergreen.c
index a2d478e8692a..3c38ea46531c 100644
--- a/drivers/gpu/drm/radeon/evergreen.c
+++ b/drivers/gpu/drm/radeon/evergreen.c
@@ -403,6 +403,19 @@ void evergreen_pm_misc(struct radeon_device *rdev)
rdev->pm.current_vddc = voltage->voltage;
DRM_DEBUG("Setting: vddc: %d\n", voltage->voltage);
}
+
+ /* starting with BTC, there is one state that is used for both
+ * MH and SH. Difference is that we always use the high clock index for
+ * mclk and vddci.
+ */
+ if ((rdev->pm.pm_method == PM_METHOD_PROFILE) &&
+ (rdev->family >= CHIP_BARTS) &&
+ rdev->pm.active_crtc_count &&
+ ((rdev->pm.profile_index == PM_PROFILE_MID_MH_IDX) ||
+ (rdev->pm.profile_index == PM_PROFILE_LOW_MH_IDX)))
+ voltage = &rdev->pm.power_state[req_ps_idx].
+ clock_info[rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_on_cm_idx].voltage;
+
/* 0xff01 is a flag rather then an actual voltage */
if (voltage->vddci == 0xff01)
return;
@@ -2308,32 +2321,8 @@ int evergreen_mc_init(struct radeon_device *rdev)
return 0;
}
-bool evergreen_gpu_is_lockup(struct radeon_device *rdev, struct radeon_ring *ring)
+void evergreen_print_gpu_status_regs(struct radeon_device *rdev)
{
- u32 srbm_status;
- u32 grbm_status;
- u32 grbm_status_se0, grbm_status_se1;
-
- srbm_status = RREG32(SRBM_STATUS);
- grbm_status = RREG32(GRBM_STATUS);
- grbm_status_se0 = RREG32(GRBM_STATUS_SE0);
- grbm_status_se1 = RREG32(GRBM_STATUS_SE1);
- if (!(grbm_status & GUI_ACTIVE)) {
- radeon_ring_lockup_update(ring);
- return false;
- }
- /* force CP activities */
- radeon_ring_force_activity(rdev, ring);
- return radeon_ring_test_lockup(rdev, ring);
-}
-
-static void evergreen_gpu_soft_reset_gfx(struct radeon_device *rdev)
-{
- u32 grbm_reset = 0;
-
- if (!(RREG32(GRBM_STATUS) & GUI_ACTIVE))
- return;
-
dev_info(rdev->dev, " GRBM_STATUS = 0x%08X\n",
RREG32(GRBM_STATUS));
dev_info(rdev->dev, " GRBM_STATUS_SE0 = 0x%08X\n",
@@ -2342,6 +2331,8 @@ static void evergreen_gpu_soft_reset_gfx(struct radeon_device *rdev)
RREG32(GRBM_STATUS_SE1));
dev_info(rdev->dev, " SRBM_STATUS = 0x%08X\n",
RREG32(SRBM_STATUS));
+ dev_info(rdev->dev, " SRBM_STATUS2 = 0x%08X\n",
+ RREG32(SRBM_STATUS2));
dev_info(rdev->dev, " R_008674_CP_STALLED_STAT1 = 0x%08X\n",
RREG32(CP_STALLED_STAT1));
dev_info(rdev->dev, " R_008678_CP_STALLED_STAT2 = 0x%08X\n",
@@ -2350,112 +2341,283 @@ static void evergreen_gpu_soft_reset_gfx(struct radeon_device *rdev)
RREG32(CP_BUSY_STAT));
dev_info(rdev->dev, " R_008680_CP_STAT = 0x%08X\n",
RREG32(CP_STAT));
+ dev_info(rdev->dev, " R_00D034_DMA_STATUS_REG = 0x%08X\n",
+ RREG32(DMA_STATUS_REG));
+ if (rdev->family >= CHIP_CAYMAN) {
+ dev_info(rdev->dev, " R_00D834_DMA_STATUS_REG = 0x%08X\n",
+ RREG32(DMA_STATUS_REG + 0x800));
+ }
+}
- /* Disable CP parsing/prefetching */
- WREG32(CP_ME_CNTL, CP_ME_HALT | CP_PFP_HALT);
+bool evergreen_is_display_hung(struct radeon_device *rdev)
+{
+ u32 crtc_hung = 0;
+ u32 crtc_status[6];
+ u32 i, j, tmp;
- /* reset all the gfx blocks */
- grbm_reset = (SOFT_RESET_CP |
- SOFT_RESET_CB |
- SOFT_RESET_DB |
- SOFT_RESET_PA |
- SOFT_RESET_SC |
- SOFT_RESET_SPI |
- SOFT_RESET_SH |
- SOFT_RESET_SX |
- SOFT_RESET_TC |
- SOFT_RESET_TA |
- SOFT_RESET_VC |
- SOFT_RESET_VGT);
-
- dev_info(rdev->dev, " GRBM_SOFT_RESET=0x%08X\n", grbm_reset);
- WREG32(GRBM_SOFT_RESET, grbm_reset);
- (void)RREG32(GRBM_SOFT_RESET);
- udelay(50);
- WREG32(GRBM_SOFT_RESET, 0);
- (void)RREG32(GRBM_SOFT_RESET);
+ for (i = 0; i < rdev->num_crtc; i++) {
+ if (RREG32(EVERGREEN_CRTC_CONTROL + crtc_offsets[i]) & EVERGREEN_CRTC_MASTER_EN) {
+ crtc_status[i] = RREG32(EVERGREEN_CRTC_STATUS_HV_COUNT + crtc_offsets[i]);
+ crtc_hung |= (1 << i);
+ }
+ }
- dev_info(rdev->dev, " GRBM_STATUS = 0x%08X\n",
- RREG32(GRBM_STATUS));
- dev_info(rdev->dev, " GRBM_STATUS_SE0 = 0x%08X\n",
- RREG32(GRBM_STATUS_SE0));
- dev_info(rdev->dev, " GRBM_STATUS_SE1 = 0x%08X\n",
- RREG32(GRBM_STATUS_SE1));
- dev_info(rdev->dev, " SRBM_STATUS = 0x%08X\n",
- RREG32(SRBM_STATUS));
- dev_info(rdev->dev, " R_008674_CP_STALLED_STAT1 = 0x%08X\n",
- RREG32(CP_STALLED_STAT1));
- dev_info(rdev->dev, " R_008678_CP_STALLED_STAT2 = 0x%08X\n",
- RREG32(CP_STALLED_STAT2));
- dev_info(rdev->dev, " R_00867C_CP_BUSY_STAT = 0x%08X\n",
- RREG32(CP_BUSY_STAT));
- dev_info(rdev->dev, " R_008680_CP_STAT = 0x%08X\n",
- RREG32(CP_STAT));
+ for (j = 0; j < 10; j++) {
+ for (i = 0; i < rdev->num_crtc; i++) {
+ if (crtc_hung & (1 << i)) {
+ tmp = RREG32(EVERGREEN_CRTC_STATUS_HV_COUNT + crtc_offsets[i]);
+ if (tmp != crtc_status[i])
+ crtc_hung &= ~(1 << i);
+ }
+ }
+ if (crtc_hung == 0)
+ return false;
+ udelay(100);
+ }
+
+ return true;
}
-static void evergreen_gpu_soft_reset_dma(struct radeon_device *rdev)
+static u32 evergreen_gpu_check_soft_reset(struct radeon_device *rdev)
{
+ u32 reset_mask = 0;
u32 tmp;
- if (RREG32(DMA_STATUS_REG) & DMA_IDLE)
- return;
+ /* GRBM_STATUS */
+ tmp = RREG32(GRBM_STATUS);
+ if (tmp & (PA_BUSY | SC_BUSY |
+ SH_BUSY | SX_BUSY |
+ TA_BUSY | VGT_BUSY |
+ DB_BUSY | CB_BUSY |
+ SPI_BUSY | VGT_BUSY_NO_DMA))
+ reset_mask |= RADEON_RESET_GFX;
- dev_info(rdev->dev, " R_00D034_DMA_STATUS_REG = 0x%08X\n",
- RREG32(DMA_STATUS_REG));
+ if (tmp & (CF_RQ_PENDING | PF_RQ_PENDING |
+ CP_BUSY | CP_COHERENCY_BUSY))
+ reset_mask |= RADEON_RESET_CP;
- /* Disable DMA */
- tmp = RREG32(DMA_RB_CNTL);
- tmp &= ~DMA_RB_ENABLE;
- WREG32(DMA_RB_CNTL, tmp);
+ if (tmp & GRBM_EE_BUSY)
+ reset_mask |= RADEON_RESET_GRBM | RADEON_RESET_GFX | RADEON_RESET_CP;
- /* Reset dma */
- WREG32(SRBM_SOFT_RESET, SOFT_RESET_DMA);
- RREG32(SRBM_SOFT_RESET);
- udelay(50);
- WREG32(SRBM_SOFT_RESET, 0);
+ /* DMA_STATUS_REG */
+ tmp = RREG32(DMA_STATUS_REG);
+ if (!(tmp & DMA_IDLE))
+ reset_mask |= RADEON_RESET_DMA;
- dev_info(rdev->dev, " R_00D034_DMA_STATUS_REG = 0x%08X\n",
- RREG32(DMA_STATUS_REG));
+ /* SRBM_STATUS2 */
+ tmp = RREG32(SRBM_STATUS2);
+ if (tmp & DMA_BUSY)
+ reset_mask |= RADEON_RESET_DMA;
+
+ /* SRBM_STATUS */
+ tmp = RREG32(SRBM_STATUS);
+ if (tmp & (RLC_RQ_PENDING | RLC_BUSY))
+ reset_mask |= RADEON_RESET_RLC;
+
+ if (tmp & IH_BUSY)
+ reset_mask |= RADEON_RESET_IH;
+
+ if (tmp & SEM_BUSY)
+ reset_mask |= RADEON_RESET_SEM;
+
+ if (tmp & GRBM_RQ_PENDING)
+ reset_mask |= RADEON_RESET_GRBM;
+
+ if (tmp & VMC_BUSY)
+ reset_mask |= RADEON_RESET_VMC;
+
+ if (tmp & (MCB_BUSY | MCB_NON_DISPLAY_BUSY |
+ MCC_BUSY | MCD_BUSY))
+ reset_mask |= RADEON_RESET_MC;
+
+ if (evergreen_is_display_hung(rdev))
+ reset_mask |= RADEON_RESET_DISPLAY;
+
+ /* VM_L2_STATUS */
+ tmp = RREG32(VM_L2_STATUS);
+ if (tmp & L2_BUSY)
+ reset_mask |= RADEON_RESET_VMC;
+
+ return reset_mask;
}
-static int evergreen_gpu_soft_reset(struct radeon_device *rdev, u32 reset_mask)
+static void evergreen_gpu_soft_reset(struct radeon_device *rdev, u32 reset_mask)
{
struct evergreen_mc_save save;
-
- if (!(RREG32(GRBM_STATUS) & GUI_ACTIVE))
- reset_mask &= ~(RADEON_RESET_GFX | RADEON_RESET_COMPUTE);
-
- if (RREG32(DMA_STATUS_REG) & DMA_IDLE)
- reset_mask &= ~RADEON_RESET_DMA;
+ u32 grbm_soft_reset = 0, srbm_soft_reset = 0;
+ u32 tmp;
if (reset_mask == 0)
- return 0;
+ return;
dev_info(rdev->dev, "GPU softreset: 0x%08X\n", reset_mask);
+ evergreen_print_gpu_status_regs(rdev);
+
+ /* Disable CP parsing/prefetching */
+ WREG32(CP_ME_CNTL, CP_ME_HALT | CP_PFP_HALT);
+
+ if (reset_mask & RADEON_RESET_DMA) {
+ /* Disable DMA */
+ tmp = RREG32(DMA_RB_CNTL);
+ tmp &= ~DMA_RB_ENABLE;
+ WREG32(DMA_RB_CNTL, tmp);
+ }
+
+ udelay(50);
+
evergreen_mc_stop(rdev, &save);
if (evergreen_mc_wait_for_idle(rdev)) {
dev_warn(rdev->dev, "Wait for MC idle timedout !\n");
}
- if (reset_mask & (RADEON_RESET_GFX | RADEON_RESET_COMPUTE))
- evergreen_gpu_soft_reset_gfx(rdev);
+ if (reset_mask & (RADEON_RESET_GFX | RADEON_RESET_COMPUTE)) {
+ grbm_soft_reset |= SOFT_RESET_DB |
+ SOFT_RESET_CB |
+ SOFT_RESET_PA |
+ SOFT_RESET_SC |
+ SOFT_RESET_SPI |
+ SOFT_RESET_SX |
+ SOFT_RESET_SH |
+ SOFT_RESET_TC |
+ SOFT_RESET_TA |
+ SOFT_RESET_VC |
+ SOFT_RESET_VGT;
+ }
+
+ if (reset_mask & RADEON_RESET_CP) {
+ grbm_soft_reset |= SOFT_RESET_CP |
+ SOFT_RESET_VGT;
+
+ srbm_soft_reset |= SOFT_RESET_GRBM;
+ }
if (reset_mask & RADEON_RESET_DMA)
- evergreen_gpu_soft_reset_dma(rdev);
+ srbm_soft_reset |= SOFT_RESET_DMA;
+
+ if (reset_mask & RADEON_RESET_DISPLAY)
+ srbm_soft_reset |= SOFT_RESET_DC;
+
+ if (reset_mask & RADEON_RESET_RLC)
+ srbm_soft_reset |= SOFT_RESET_RLC;
+
+ if (reset_mask & RADEON_RESET_SEM)
+ srbm_soft_reset |= SOFT_RESET_SEM;
+
+ if (reset_mask & RADEON_RESET_IH)
+ srbm_soft_reset |= SOFT_RESET_IH;
+
+ if (reset_mask & RADEON_RESET_GRBM)
+ srbm_soft_reset |= SOFT_RESET_GRBM;
+
+ if (reset_mask & RADEON_RESET_VMC)
+ srbm_soft_reset |= SOFT_RESET_VMC;
+
+ if (!(rdev->flags & RADEON_IS_IGP)) {
+ if (reset_mask & RADEON_RESET_MC)
+ srbm_soft_reset |= SOFT_RESET_MC;
+ }
+
+ if (grbm_soft_reset) {
+ tmp = RREG32(GRBM_SOFT_RESET);
+ tmp |= grbm_soft_reset;
+ dev_info(rdev->dev, "GRBM_SOFT_RESET=0x%08X\n", tmp);
+ WREG32(GRBM_SOFT_RESET, tmp);
+ tmp = RREG32(GRBM_SOFT_RESET);
+
+ udelay(50);
+
+ tmp &= ~grbm_soft_reset;
+ WREG32(GRBM_SOFT_RESET, tmp);
+ tmp = RREG32(GRBM_SOFT_RESET);
+ }
+
+ if (srbm_soft_reset) {
+ tmp = RREG32(SRBM_SOFT_RESET);
+ tmp |= srbm_soft_reset;
+ dev_info(rdev->dev, "SRBM_SOFT_RESET=0x%08X\n", tmp);
+ WREG32(SRBM_SOFT_RESET, tmp);
+ tmp = RREG32(SRBM_SOFT_RESET);
+
+ udelay(50);
+
+ tmp &= ~srbm_soft_reset;
+ WREG32(SRBM_SOFT_RESET, tmp);
+ tmp = RREG32(SRBM_SOFT_RESET);
+ }
/* Wait a little for things to settle down */
udelay(50);
evergreen_mc_resume(rdev, &save);
- return 0;
+ udelay(50);
+
+ evergreen_print_gpu_status_regs(rdev);
}
int evergreen_asic_reset(struct radeon_device *rdev)
{
- return evergreen_gpu_soft_reset(rdev, (RADEON_RESET_GFX |
- RADEON_RESET_COMPUTE |
- RADEON_RESET_DMA));
+ u32 reset_mask;
+
+ reset_mask = evergreen_gpu_check_soft_reset(rdev);
+
+ if (reset_mask)
+ r600_set_bios_scratch_engine_hung(rdev, true);
+
+ evergreen_gpu_soft_reset(rdev, reset_mask);
+
+ reset_mask = evergreen_gpu_check_soft_reset(rdev);
+
+ if (!reset_mask)
+ r600_set_bios_scratch_engine_hung(rdev, false);
+
+ return 0;
+}
+
+/**
+ * evergreen_gfx_is_lockup - Check if the GFX engine is locked up
+ *
+ * @rdev: radeon_device pointer
+ * @ring: radeon_ring structure holding ring information
+ *
+ * Check if the GFX engine is locked up.
+ * Returns true if the engine appears to be locked up, false if not.
+ */
+bool evergreen_gfx_is_lockup(struct radeon_device *rdev, struct radeon_ring *ring)
+{
+ u32 reset_mask = evergreen_gpu_check_soft_reset(rdev);
+
+ if (!(reset_mask & (RADEON_RESET_GFX |
+ RADEON_RESET_COMPUTE |
+ RADEON_RESET_CP))) {
+ radeon_ring_lockup_update(ring);
+ return false;
+ }
+ /* force CP activities */
+ radeon_ring_force_activity(rdev, ring);
+ return radeon_ring_test_lockup(rdev, ring);
+}
+
+/**
+ * evergreen_dma_is_lockup - Check if the DMA engine is locked up
+ *
+ * @rdev: radeon_device pointer
+ * @ring: radeon_ring structure holding ring information
+ *
+ * Check if the async DMA engine is locked up.
+ * Returns true if the engine appears to be locked up, false if not.
+ */
+bool evergreen_dma_is_lockup(struct radeon_device *rdev, struct radeon_ring *ring)
+{
+ u32 reset_mask = evergreen_gpu_check_soft_reset(rdev);
+
+ if (!(reset_mask & RADEON_RESET_DMA)) {
+ radeon_ring_lockup_update(ring);
+ return false;
+ }
+ /* force ring activities */
+ radeon_ring_force_activity(rdev, ring);
+ return radeon_ring_test_lockup(rdev, ring);
}
/* Interrupts */
@@ -3280,14 +3442,14 @@ void evergreen_dma_fence_ring_emit(struct radeon_device *rdev,
struct radeon_ring *ring = &rdev->ring[fence->ring];
u64 addr = rdev->fence_drv[fence->ring].gpu_addr;
/* write the fence */
- radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_FENCE, 0, 0, 0));
+ radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_FENCE, 0, 0));
radeon_ring_write(ring, addr & 0xfffffffc);
radeon_ring_write(ring, (upper_32_bits(addr) & 0xff));
radeon_ring_write(ring, fence->seq);
/* generate an interrupt */
- radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_TRAP, 0, 0, 0));
+ radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_TRAP, 0, 0));
/* flush HDP */
- radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_SRBM_WRITE, 0, 0, 0));
+ radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_SRBM_WRITE, 0, 0));
radeon_ring_write(ring, (0xf << 16) | (HDP_MEM_COHERENCY_FLUSH_CNTL >> 2));
radeon_ring_write(ring, 1);
}
@@ -3310,7 +3472,7 @@ void evergreen_dma_ring_ib_execute(struct radeon_device *rdev,
while ((next_rptr & 7) != 5)
next_rptr++;
next_rptr += 3;
- radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_WRITE, 0, 0, 1));
+ radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_WRITE, 0, 1));
radeon_ring_write(ring, ring->next_rptr_gpu_addr & 0xfffffffc);
radeon_ring_write(ring, upper_32_bits(ring->next_rptr_gpu_addr) & 0xff);
radeon_ring_write(ring, next_rptr);
@@ -3320,8 +3482,8 @@ void evergreen_dma_ring_ib_execute(struct radeon_device *rdev,
* Pad as necessary with NOPs.
*/
while ((ring->wptr & 7) != 5)
- radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_NOP, 0, 0, 0));
- radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_INDIRECT_BUFFER, 0, 0, 0));
+ radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_NOP, 0, 0));
+ radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_INDIRECT_BUFFER, 0, 0));
radeon_ring_write(ring, (ib->gpu_addr & 0xFFFFFFE0));
radeon_ring_write(ring, (ib->length_dw << 12) | (upper_32_bits(ib->gpu_addr) & 0xFF));
@@ -3380,7 +3542,7 @@ int evergreen_copy_dma(struct radeon_device *rdev,
if (cur_size_in_dw > 0xFFFFF)
cur_size_in_dw = 0xFFFFF;
size_in_dw -= cur_size_in_dw;
- radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_COPY, 0, 0, cur_size_in_dw));
+ radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_COPY, 0, cur_size_in_dw));
radeon_ring_write(ring, dst_offset & 0xfffffffc);
radeon_ring_write(ring, src_offset & 0xfffffffc);
radeon_ring_write(ring, upper_32_bits(dst_offset) & 0xff);
@@ -3488,7 +3650,7 @@ static int evergreen_startup(struct radeon_device *rdev)
ring = &rdev->ring[R600_RING_TYPE_DMA_INDEX];
r = radeon_ring_init(rdev, ring, ring->ring_size, R600_WB_DMA_RPTR_OFFSET,
DMA_RB_RPTR, DMA_RB_WPTR,
- 2, 0x3fffc, DMA_PACKET(DMA_PACKET_NOP, 0, 0, 0));
+ 2, 0x3fffc, DMA_PACKET(DMA_PACKET_NOP, 0, 0));
if (r)
return r;
diff --git a/drivers/gpu/drm/radeon/evergreen_cs.c b/drivers/gpu/drm/radeon/evergreen_cs.c
index ee4cff534f10..99fb13286fd0 100644
--- a/drivers/gpu/drm/radeon/evergreen_cs.c
+++ b/drivers/gpu/drm/radeon/evergreen_cs.c
@@ -36,9 +36,6 @@
int r600_dma_cs_next_reloc(struct radeon_cs_parser *p,
struct radeon_cs_reloc **cs_reloc);
-static int evergreen_cs_packet_next_reloc(struct radeon_cs_parser *p,
- struct radeon_cs_reloc **cs_reloc);
-
struct evergreen_cs_track {
u32 group_size;
u32 nbanks;
@@ -1009,223 +1006,35 @@ static int evergreen_cs_track_check(struct radeon_cs_parser *p)
}
/**
- * evergreen_cs_packet_parse() - parse cp packet and point ib index to next packet
- * @parser: parser structure holding parsing context.
- * @pkt: where to store packet informations
- *
- * Assume that chunk_ib_index is properly set. Will return -EINVAL
- * if packet is bigger than remaining ib size. or if packets is unknown.
- **/
-static int evergreen_cs_packet_parse(struct radeon_cs_parser *p,
- struct radeon_cs_packet *pkt,
- unsigned idx)
-{
- struct radeon_cs_chunk *ib_chunk = &p->chunks[p->chunk_ib_idx];
- uint32_t header;
-
- if (idx >= ib_chunk->length_dw) {
- DRM_ERROR("Can not parse packet at %d after CS end %d !\n",
- idx, ib_chunk->length_dw);
- return -EINVAL;
- }
- header = radeon_get_ib_value(p, idx);
- pkt->idx = idx;
- pkt->type = CP_PACKET_GET_TYPE(header);
- pkt->count = CP_PACKET_GET_COUNT(header);
- pkt->one_reg_wr = 0;
- switch (pkt->type) {
- case PACKET_TYPE0:
- pkt->reg = CP_PACKET0_GET_REG(header);
- break;
- case PACKET_TYPE3:
- pkt->opcode = CP_PACKET3_GET_OPCODE(header);
- break;
- case PACKET_TYPE2:
- pkt->count = -1;
- break;
- default:
- DRM_ERROR("Unknown packet type %d at %d !\n", pkt->type, idx);
- return -EINVAL;
- }
- if ((pkt->count + 1 + pkt->idx) >= ib_chunk->length_dw) {
- DRM_ERROR("Packet (%d:%d:%d) end after CS buffer (%d) !\n",
- pkt->idx, pkt->type, pkt->count, ib_chunk->length_dw);
- return -EINVAL;
- }
- return 0;
-}
-
-/**
- * evergreen_cs_packet_next_reloc() - parse next packet which should be reloc packet3
+ * evergreen_cs_packet_parse_vline() - parse userspace VLINE packet
* @parser: parser structure holding parsing context.
- * @data: pointer to relocation data
- * @offset_start: starting offset
- * @offset_mask: offset mask (to align start offset on)
- * @reloc: reloc informations
*
- * Check next packet is relocation packet3, do bo validation and compute
- * GPU offset using the provided start.
- **/
-static int evergreen_cs_packet_next_reloc(struct radeon_cs_parser *p,
- struct radeon_cs_reloc **cs_reloc)
-{
- struct radeon_cs_chunk *relocs_chunk;
- struct radeon_cs_packet p3reloc;
- unsigned idx;
- int r;
-
- if (p->chunk_relocs_idx == -1) {
- DRM_ERROR("No relocation chunk !\n");
- return -EINVAL;
- }
- *cs_reloc = NULL;
- relocs_chunk = &p->chunks[p->chunk_relocs_idx];
- r = evergreen_cs_packet_parse(p, &p3reloc, p->idx);
- if (r) {
- return r;
- }
- p->idx += p3reloc.count + 2;
- if (p3reloc.type != PACKET_TYPE3 || p3reloc.opcode != PACKET3_NOP) {
- DRM_ERROR("No packet3 for relocation for packet at %d.\n",
- p3reloc.idx);
- return -EINVAL;
- }
- idx = radeon_get_ib_value(p, p3reloc.idx + 1);
- if (idx >= relocs_chunk->length_dw) {
- DRM_ERROR("Relocs at %d after relocations chunk end %d !\n",
- idx, relocs_chunk->length_dw);
- return -EINVAL;
- }
- /* FIXME: we assume reloc size is 4 dwords */
- *cs_reloc = p->relocs_ptr[(idx / 4)];
- return 0;
-}
-
-/**
- * evergreen_cs_packet_next_is_pkt3_nop() - test if the next packet is NOP
- * @p: structure holding the parser context.
- *
- * Check if the next packet is a relocation packet3.
- **/
-static bool evergreen_cs_packet_next_is_pkt3_nop(struct radeon_cs_parser *p)
-{
- struct radeon_cs_packet p3reloc;
- int r;
-
- r = evergreen_cs_packet_parse(p, &p3reloc, p->idx);
- if (r) {
- return false;
- }
- if (p3reloc.type != PACKET_TYPE3 || p3reloc.opcode != PACKET3_NOP) {
- return false;
- }
- return true;
-}
-
-/**
- * evergreen_cs_packet_next_vline() - parse userspace VLINE packet
- * @parser: parser structure holding parsing context.
- *
- * Userspace sends a special sequence for VLINE waits.
- * PACKET0 - VLINE_START_END + value
- * PACKET3 - WAIT_REG_MEM poll vline status reg
- * RELOC (P3) - crtc_id in reloc.
- *
- * This function parses this and relocates the VLINE START END
- * and WAIT_REG_MEM packets to the correct crtc.
- * It also detects a switched off crtc and nulls out the
- * wait in that case.
+ * This is an Evergreen(+)-specific function for parsing VLINE packets.
+ * Real work is done by r600_cs_common_vline_parse function.
+ * Here we just set up ASIC-specific register table and call
+ * the common implementation function.
*/
static int evergreen_cs_packet_parse_vline(struct radeon_cs_parser *p)
{
- struct drm_mode_object *obj;
- struct drm_crtc *crtc;
- struct radeon_crtc *radeon_crtc;
- struct radeon_cs_packet p3reloc, wait_reg_mem;
- int crtc_id;
- int r;
- uint32_t header, h_idx, reg, wait_reg_mem_info;
- volatile uint32_t *ib;
-
- ib = p->ib.ptr;
-
- /* parse the WAIT_REG_MEM */
- r = evergreen_cs_packet_parse(p, &wait_reg_mem, p->idx);
- if (r)
- return r;
-
- /* check its a WAIT_REG_MEM */
- if (wait_reg_mem.type != PACKET_TYPE3 ||
- wait_reg_mem.opcode != PACKET3_WAIT_REG_MEM) {
- DRM_ERROR("vline wait missing WAIT_REG_MEM segment\n");
- return -EINVAL;
- }
-
- wait_reg_mem_info = radeon_get_ib_value(p, wait_reg_mem.idx + 1);
- /* bit 4 is reg (0) or mem (1) */
- if (wait_reg_mem_info & 0x10) {
- DRM_ERROR("vline WAIT_REG_MEM waiting on MEM rather than REG\n");
- return -EINVAL;
- }
- /* waiting for value to be equal */
- if ((wait_reg_mem_info & 0x7) != 0x3) {
- DRM_ERROR("vline WAIT_REG_MEM function not equal\n");
- return -EINVAL;
- }
- if ((radeon_get_ib_value(p, wait_reg_mem.idx + 2) << 2) != EVERGREEN_VLINE_STATUS) {
- DRM_ERROR("vline WAIT_REG_MEM bad reg\n");
- return -EINVAL;
- }
-
- if (radeon_get_ib_value(p, wait_reg_mem.idx + 5) != EVERGREEN_VLINE_STAT) {
- DRM_ERROR("vline WAIT_REG_MEM bad bit mask\n");
- return -EINVAL;
- }
-
- /* jump over the NOP */
- r = evergreen_cs_packet_parse(p, &p3reloc, p->idx + wait_reg_mem.count + 2);
- if (r)
- return r;
-
- h_idx = p->idx - 2;
- p->idx += wait_reg_mem.count + 2;
- p->idx += p3reloc.count + 2;
- header = radeon_get_ib_value(p, h_idx);
- crtc_id = radeon_get_ib_value(p, h_idx + 2 + 7 + 1);
- reg = CP_PACKET0_GET_REG(header);
- obj = drm_mode_object_find(p->rdev->ddev, crtc_id, DRM_MODE_OBJECT_CRTC);
- if (!obj) {
- DRM_ERROR("cannot find crtc %d\n", crtc_id);
- return -EINVAL;
- }
- crtc = obj_to_crtc(obj);
- radeon_crtc = to_radeon_crtc(crtc);
- crtc_id = radeon_crtc->crtc_id;
-
- if (!crtc->enabled) {
- /* if the CRTC isn't enabled - we need to nop out the WAIT_REG_MEM */
- ib[h_idx + 2] = PACKET2(0);
- ib[h_idx + 3] = PACKET2(0);
- ib[h_idx + 4] = PACKET2(0);
- ib[h_idx + 5] = PACKET2(0);
- ib[h_idx + 6] = PACKET2(0);
- ib[h_idx + 7] = PACKET2(0);
- ib[h_idx + 8] = PACKET2(0);
- } else {
- switch (reg) {
- case EVERGREEN_VLINE_START_END:
- header &= ~R600_CP_PACKET0_REG_MASK;
- header |= (EVERGREEN_VLINE_START_END + radeon_crtc->crtc_offset) >> 2;
- ib[h_idx] = header;
- ib[h_idx + 4] = (EVERGREEN_VLINE_STATUS + radeon_crtc->crtc_offset) >> 2;
- break;
- default:
- DRM_ERROR("unknown crtc reloc\n");
- return -EINVAL;
- }
- }
- return 0;
+ static uint32_t vline_start_end[6] = {
+ EVERGREEN_VLINE_START_END + EVERGREEN_CRTC0_REGISTER_OFFSET,
+ EVERGREEN_VLINE_START_END + EVERGREEN_CRTC1_REGISTER_OFFSET,
+ EVERGREEN_VLINE_START_END + EVERGREEN_CRTC2_REGISTER_OFFSET,
+ EVERGREEN_VLINE_START_END + EVERGREEN_CRTC3_REGISTER_OFFSET,
+ EVERGREEN_VLINE_START_END + EVERGREEN_CRTC4_REGISTER_OFFSET,
+ EVERGREEN_VLINE_START_END + EVERGREEN_CRTC5_REGISTER_OFFSET
+ };
+ static uint32_t vline_status[6] = {
+ EVERGREEN_VLINE_STATUS + EVERGREEN_CRTC0_REGISTER_OFFSET,
+ EVERGREEN_VLINE_STATUS + EVERGREEN_CRTC1_REGISTER_OFFSET,
+ EVERGREEN_VLINE_STATUS + EVERGREEN_CRTC2_REGISTER_OFFSET,
+ EVERGREEN_VLINE_STATUS + EVERGREEN_CRTC3_REGISTER_OFFSET,
+ EVERGREEN_VLINE_STATUS + EVERGREEN_CRTC4_REGISTER_OFFSET,
+ EVERGREEN_VLINE_STATUS + EVERGREEN_CRTC5_REGISTER_OFFSET
+ };
+
+ return r600_cs_common_vline_parse(p, vline_start_end, vline_status);
}
static int evergreen_packet0_check(struct radeon_cs_parser *p,
@@ -1347,7 +1156,7 @@ static int evergreen_cs_check_reg(struct radeon_cs_parser *p, u32 reg, u32 idx)
case SQ_LSTMP_RING_BASE:
case SQ_PSTMP_RING_BASE:
case SQ_VSTMP_RING_BASE:
- r = evergreen_cs_packet_next_reloc(p, &reloc);
+ r = radeon_cs_packet_next_reloc(p, &reloc, 0);
if (r) {
dev_warn(p->dev, "bad SET_CONTEXT_REG "
"0x%04X\n", reg);
@@ -1376,7 +1185,7 @@ static int evergreen_cs_check_reg(struct radeon_cs_parser *p, u32 reg, u32 idx)
case DB_Z_INFO:
track->db_z_info = radeon_get_ib_value(p, idx);
if (!(p->cs_flags & RADEON_CS_KEEP_TILING_FLAGS)) {
- r = evergreen_cs_packet_next_reloc(p, &reloc);
+ r = radeon_cs_packet_next_reloc(p, &reloc, 0);
if (r) {
dev_warn(p->dev, "bad SET_CONTEXT_REG "
"0x%04X\n", reg);
@@ -1418,7 +1227,7 @@ static int evergreen_cs_check_reg(struct radeon_cs_parser *p, u32 reg, u32 idx)
track->db_dirty = true;
break;
case DB_Z_READ_BASE:
- r = evergreen_cs_packet_next_reloc(p, &reloc);
+ r = radeon_cs_packet_next_reloc(p, &reloc, 0);
if (r) {
dev_warn(p->dev, "bad SET_CONTEXT_REG "
"0x%04X\n", reg);
@@ -1430,7 +1239,7 @@ static int evergreen_cs_check_reg(struct radeon_cs_parser *p, u32 reg, u32 idx)
track->db_dirty = true;
break;
case DB_Z_WRITE_BASE:
- r = evergreen_cs_packet_next_reloc(p, &reloc);
+ r = radeon_cs_packet_next_reloc(p, &reloc, 0);
if (r) {
dev_warn(p->dev, "bad SET_CONTEXT_REG "
"0x%04X\n", reg);
@@ -1442,7 +1251,7 @@ static int evergreen_cs_check_reg(struct radeon_cs_parser *p, u32 reg, u32 idx)
track->db_dirty = true;
break;
case DB_STENCIL_READ_BASE:
- r = evergreen_cs_packet_next_reloc(p, &reloc);
+ r = radeon_cs_packet_next_reloc(p, &reloc, 0);
if (r) {
dev_warn(p->dev, "bad SET_CONTEXT_REG "
"0x%04X\n", reg);
@@ -1454,7 +1263,7 @@ static int evergreen_cs_check_reg(struct radeon_cs_parser *p, u32 reg, u32 idx)
track->db_dirty = true;
break;
case DB_STENCIL_WRITE_BASE:
- r = evergreen_cs_packet_next_reloc(p, &reloc);
+ r = radeon_cs_packet_next_reloc(p, &reloc, 0);
if (r) {
dev_warn(p->dev, "bad SET_CONTEXT_REG "
"0x%04X\n", reg);
@@ -1477,7 +1286,7 @@ static int evergreen_cs_check_reg(struct radeon_cs_parser *p, u32 reg, u32 idx)
case VGT_STRMOUT_BUFFER_BASE_1:
case VGT_STRMOUT_BUFFER_BASE_2:
case VGT_STRMOUT_BUFFER_BASE_3:
- r = evergreen_cs_packet_next_reloc(p, &reloc);
+ r = radeon_cs_packet_next_reloc(p, &reloc, 0);
if (r) {
dev_warn(p->dev, "bad SET_CONTEXT_REG "
"0x%04X\n", reg);
@@ -1499,7 +1308,7 @@ static int evergreen_cs_check_reg(struct radeon_cs_parser *p, u32 reg, u32 idx)
track->streamout_dirty = true;
break;
case CP_COHER_BASE:
- r = evergreen_cs_packet_next_reloc(p, &reloc);
+ r = radeon_cs_packet_next_reloc(p, &reloc, 0);
if (r) {
dev_warn(p->dev, "missing reloc for CP_COHER_BASE "
"0x%04X\n", reg);
@@ -1563,7 +1372,7 @@ static int evergreen_cs_check_reg(struct radeon_cs_parser *p, u32 reg, u32 idx)
tmp = (reg - CB_COLOR0_INFO) / 0x3c;
track->cb_color_info[tmp] = radeon_get_ib_value(p, idx);
if (!(p->cs_flags & RADEON_CS_KEEP_TILING_FLAGS)) {
- r = evergreen_cs_packet_next_reloc(p, &reloc);
+ r = radeon_cs_packet_next_reloc(p, &reloc, 0);
if (r) {
dev_warn(p->dev, "bad SET_CONTEXT_REG "
"0x%04X\n", reg);
@@ -1581,7 +1390,7 @@ static int evergreen_cs_check_reg(struct radeon_cs_parser *p, u32 reg, u32 idx)
tmp = ((reg - CB_COLOR8_INFO) / 0x1c) + 8;
track->cb_color_info[tmp] = radeon_get_ib_value(p, idx);
if (!(p->cs_flags & RADEON_CS_KEEP_TILING_FLAGS)) {
- r = evergreen_cs_packet_next_reloc(p, &reloc);
+ r = radeon_cs_packet_next_reloc(p, &reloc, 0);
if (r) {
dev_warn(p->dev, "bad SET_CONTEXT_REG "
"0x%04X\n", reg);
@@ -1642,7 +1451,7 @@ static int evergreen_cs_check_reg(struct radeon_cs_parser *p, u32 reg, u32 idx)
case CB_COLOR5_ATTRIB:
case CB_COLOR6_ATTRIB:
case CB_COLOR7_ATTRIB:
- r = evergreen_cs_packet_next_reloc(p, &reloc);
+ r = radeon_cs_packet_next_reloc(p, &reloc, 0);
if (r) {
dev_warn(p->dev, "bad SET_CONTEXT_REG "
"0x%04X\n", reg);
@@ -1670,7 +1479,7 @@ static int evergreen_cs_check_reg(struct radeon_cs_parser *p, u32 reg, u32 idx)
case CB_COLOR9_ATTRIB:
case CB_COLOR10_ATTRIB:
case CB_COLOR11_ATTRIB:
- r = evergreen_cs_packet_next_reloc(p, &reloc);
+ r = radeon_cs_packet_next_reloc(p, &reloc, 0);
if (r) {
dev_warn(p->dev, "bad SET_CONTEXT_REG "
"0x%04X\n", reg);
@@ -1703,7 +1512,7 @@ static int evergreen_cs_check_reg(struct radeon_cs_parser *p, u32 reg, u32 idx)
case CB_COLOR6_FMASK:
case CB_COLOR7_FMASK:
tmp = (reg - CB_COLOR0_FMASK) / 0x3c;
- r = evergreen_cs_packet_next_reloc(p, &reloc);
+ r = radeon_cs_packet_next_reloc(p, &reloc, 0);
if (r) {
dev_err(p->dev, "bad SET_CONTEXT_REG 0x%04X\n", reg);
return -EINVAL;
@@ -1720,7 +1529,7 @@ static int evergreen_cs_check_reg(struct radeon_cs_parser *p, u32 reg, u32 idx)
case CB_COLOR6_CMASK:
case CB_COLOR7_CMASK:
tmp = (reg - CB_COLOR0_CMASK) / 0x3c;
- r = evergreen_cs_packet_next_reloc(p, &reloc);
+ r = radeon_cs_packet_next_reloc(p, &reloc, 0);
if (r) {
dev_err(p->dev, "bad SET_CONTEXT_REG 0x%04X\n", reg);
return -EINVAL;
@@ -1758,7 +1567,7 @@ static int evergreen_cs_check_reg(struct radeon_cs_parser *p, u32 reg, u32 idx)
case CB_COLOR5_BASE:
case CB_COLOR6_BASE:
case CB_COLOR7_BASE:
- r = evergreen_cs_packet_next_reloc(p, &reloc);
+ r = radeon_cs_packet_next_reloc(p, &reloc, 0);
if (r) {
dev_warn(p->dev, "bad SET_CONTEXT_REG "
"0x%04X\n", reg);
@@ -1774,7 +1583,7 @@ static int evergreen_cs_check_reg(struct radeon_cs_parser *p, u32 reg, u32 idx)
case CB_COLOR9_BASE:
case CB_COLOR10_BASE:
case CB_COLOR11_BASE:
- r = evergreen_cs_packet_next_reloc(p, &reloc);
+ r = radeon_cs_packet_next_reloc(p, &reloc, 0);
if (r) {
dev_warn(p->dev, "bad SET_CONTEXT_REG "
"0x%04X\n", reg);
@@ -1787,7 +1596,7 @@ static int evergreen_cs_check_reg(struct radeon_cs_parser *p, u32 reg, u32 idx)
track->cb_dirty = true;
break;
case DB_HTILE_DATA_BASE:
- r = evergreen_cs_packet_next_reloc(p, &reloc);
+ r = radeon_cs_packet_next_reloc(p, &reloc, 0);
if (r) {
dev_warn(p->dev, "bad SET_CONTEXT_REG "
"0x%04X\n", reg);
@@ -1905,7 +1714,7 @@ static int evergreen_cs_check_reg(struct radeon_cs_parser *p, u32 reg, u32 idx)
case SQ_ALU_CONST_CACHE_LS_13:
case SQ_ALU_CONST_CACHE_LS_14:
case SQ_ALU_CONST_CACHE_LS_15:
- r = evergreen_cs_packet_next_reloc(p, &reloc);
+ r = radeon_cs_packet_next_reloc(p, &reloc, 0);
if (r) {
dev_warn(p->dev, "bad SET_CONTEXT_REG "
"0x%04X\n", reg);
@@ -1919,7 +1728,7 @@ static int evergreen_cs_check_reg(struct radeon_cs_parser *p, u32 reg, u32 idx)
"0x%04X\n", reg);
return -EINVAL;
}
- r = evergreen_cs_packet_next_reloc(p, &reloc);
+ r = radeon_cs_packet_next_reloc(p, &reloc, 0);
if (r) {
dev_warn(p->dev, "bad SET_CONFIG_REG "
"0x%04X\n", reg);
@@ -1933,7 +1742,7 @@ static int evergreen_cs_check_reg(struct radeon_cs_parser *p, u32 reg, u32 idx)
"0x%04X\n", reg);
return -EINVAL;
}
- r = evergreen_cs_packet_next_reloc(p, &reloc);
+ r = radeon_cs_packet_next_reloc(p, &reloc, 0);
if (r) {
dev_warn(p->dev, "bad SET_CONTEXT_REG "
"0x%04X\n", reg);
@@ -2018,7 +1827,7 @@ static int evergreen_packet3_check(struct radeon_cs_parser *p,
return -EINVAL;
}
- r = evergreen_cs_packet_next_reloc(p, &reloc);
+ r = radeon_cs_packet_next_reloc(p, &reloc, 0);
if (r) {
DRM_ERROR("bad SET PREDICATION\n");
return -EINVAL;
@@ -2064,7 +1873,7 @@ static int evergreen_packet3_check(struct radeon_cs_parser *p,
DRM_ERROR("bad INDEX_BASE\n");
return -EINVAL;
}
- r = evergreen_cs_packet_next_reloc(p, &reloc);
+ r = radeon_cs_packet_next_reloc(p, &reloc, 0);
if (r) {
DRM_ERROR("bad INDEX_BASE\n");
return -EINVAL;
@@ -2091,7 +1900,7 @@ static int evergreen_packet3_check(struct radeon_cs_parser *p,
DRM_ERROR("bad DRAW_INDEX\n");
return -EINVAL;
}
- r = evergreen_cs_packet_next_reloc(p, &reloc);
+ r = radeon_cs_packet_next_reloc(p, &reloc, 0);
if (r) {
DRM_ERROR("bad DRAW_INDEX\n");
return -EINVAL;
@@ -2119,7 +1928,7 @@ static int evergreen_packet3_check(struct radeon_cs_parser *p,
DRM_ERROR("bad DRAW_INDEX_2\n");
return -EINVAL;
}
- r = evergreen_cs_packet_next_reloc(p, &reloc);
+ r = radeon_cs_packet_next_reloc(p, &reloc, 0);
if (r) {
DRM_ERROR("bad DRAW_INDEX_2\n");
return -EINVAL;
@@ -2210,7 +2019,7 @@ static int evergreen_packet3_check(struct radeon_cs_parser *p,
DRM_ERROR("bad DISPATCH_INDIRECT\n");
return -EINVAL;
}
- r = evergreen_cs_packet_next_reloc(p, &reloc);
+ r = radeon_cs_packet_next_reloc(p, &reloc, 0);
if (r) {
DRM_ERROR("bad DISPATCH_INDIRECT\n");
return -EINVAL;
@@ -2231,7 +2040,7 @@ static int evergreen_packet3_check(struct radeon_cs_parser *p,
if (idx_value & 0x10) {
uint64_t offset;
- r = evergreen_cs_packet_next_reloc(p, &reloc);
+ r = radeon_cs_packet_next_reloc(p, &reloc, 0);
if (r) {
DRM_ERROR("bad WAIT_REG_MEM\n");
return -EINVAL;
@@ -2243,6 +2052,9 @@ static int evergreen_packet3_check(struct radeon_cs_parser *p,
ib[idx+1] = (ib[idx+1] & 0x3) | (offset & 0xfffffffc);
ib[idx+2] = upper_32_bits(offset) & 0xff;
+ } else if (idx_value & 0x100) {
+ DRM_ERROR("cannot use PFP on REG wait\n");
+ return -EINVAL;
}
break;
case PACKET3_CP_DMA:
@@ -2282,7 +2094,7 @@ static int evergreen_packet3_check(struct radeon_cs_parser *p,
}
/* src address space is memory */
if (((info & 0x60000000) >> 29) == 0) {
- r = evergreen_cs_packet_next_reloc(p, &reloc);
+ r = radeon_cs_packet_next_reloc(p, &reloc, 0);
if (r) {
DRM_ERROR("bad CP DMA SRC\n");
return -EINVAL;
@@ -2320,7 +2132,7 @@ static int evergreen_packet3_check(struct radeon_cs_parser *p,
return -EINVAL;
}
if (((info & 0x00300000) >> 20) == 0) {
- r = evergreen_cs_packet_next_reloc(p, &reloc);
+ r = radeon_cs_packet_next_reloc(p, &reloc, 0);
if (r) {
DRM_ERROR("bad CP DMA DST\n");
return -EINVAL;
@@ -2354,7 +2166,7 @@ static int evergreen_packet3_check(struct radeon_cs_parser *p,
/* 0xffffffff/0x0 is flush all cache flag */
if (radeon_get_ib_value(p, idx + 1) != 0xffffffff ||
radeon_get_ib_value(p, idx + 2) != 0) {
- r = evergreen_cs_packet_next_reloc(p, &reloc);
+ r = radeon_cs_packet_next_reloc(p, &reloc, 0);
if (r) {
DRM_ERROR("bad SURFACE_SYNC\n");
return -EINVAL;
@@ -2370,7 +2182,7 @@ static int evergreen_packet3_check(struct radeon_cs_parser *p,
if (pkt->count) {
uint64_t offset;
- r = evergreen_cs_packet_next_reloc(p, &reloc);
+ r = radeon_cs_packet_next_reloc(p, &reloc, 0);
if (r) {
DRM_ERROR("bad EVENT_WRITE\n");
return -EINVAL;
@@ -2391,7 +2203,7 @@ static int evergreen_packet3_check(struct radeon_cs_parser *p,
DRM_ERROR("bad EVENT_WRITE_EOP\n");
return -EINVAL;
}
- r = evergreen_cs_packet_next_reloc(p, &reloc);
+ r = radeon_cs_packet_next_reloc(p, &reloc, 0);
if (r) {
DRM_ERROR("bad EVENT_WRITE_EOP\n");
return -EINVAL;
@@ -2413,7 +2225,7 @@ static int evergreen_packet3_check(struct radeon_cs_parser *p,
DRM_ERROR("bad EVENT_WRITE_EOS\n");
return -EINVAL;
}
- r = evergreen_cs_packet_next_reloc(p, &reloc);
+ r = radeon_cs_packet_next_reloc(p, &reloc, 0);
if (r) {
DRM_ERROR("bad EVENT_WRITE_EOS\n");
return -EINVAL;
@@ -2480,7 +2292,7 @@ static int evergreen_packet3_check(struct radeon_cs_parser *p,
switch (G__SQ_CONSTANT_TYPE(radeon_get_ib_value(p, idx+1+(i*8)+7))) {
case SQ_TEX_VTX_VALID_TEXTURE:
/* tex base */
- r = evergreen_cs_packet_next_reloc(p, &reloc);
+ r = radeon_cs_packet_next_reloc(p, &reloc, 0);
if (r) {
DRM_ERROR("bad SET_RESOURCE (tex)\n");
return -EINVAL;
@@ -2511,13 +2323,13 @@ static int evergreen_packet3_check(struct radeon_cs_parser *p,
if ((tex_dim == SQ_TEX_DIM_2D_MSAA || tex_dim == SQ_TEX_DIM_2D_ARRAY_MSAA) &&
!mip_address &&
- !evergreen_cs_packet_next_is_pkt3_nop(p)) {
+ !radeon_cs_packet_next_is_pkt3_nop(p)) {
/* MIP_ADDRESS should point to FMASK for an MSAA texture.
* It should be 0 if FMASK is disabled. */
moffset = 0;
mipmap = NULL;
} else {
- r = evergreen_cs_packet_next_reloc(p, &reloc);
+ r = radeon_cs_packet_next_reloc(p, &reloc, 0);
if (r) {
DRM_ERROR("bad SET_RESOURCE (tex)\n");
return -EINVAL;
@@ -2536,7 +2348,7 @@ static int evergreen_packet3_check(struct radeon_cs_parser *p,
{
uint64_t offset64;
/* vtx base */
- r = evergreen_cs_packet_next_reloc(p, &reloc);
+ r = radeon_cs_packet_next_reloc(p, &reloc, 0);
if (r) {
DRM_ERROR("bad SET_RESOURCE (vtx)\n");
return -EINVAL;
@@ -2618,7 +2430,7 @@ static int evergreen_packet3_check(struct radeon_cs_parser *p,
/* Updating memory at DST_ADDRESS. */
if (idx_value & 0x1) {
u64 offset;
- r = evergreen_cs_packet_next_reloc(p, &reloc);
+ r = radeon_cs_packet_next_reloc(p, &reloc, 0);
if (r) {
DRM_ERROR("bad STRMOUT_BUFFER_UPDATE (missing dst reloc)\n");
return -EINVAL;
@@ -2637,7 +2449,7 @@ static int evergreen_packet3_check(struct radeon_cs_parser *p,
/* Reading data from SRC_ADDRESS. */
if (((idx_value >> 1) & 0x3) == 2) {
u64 offset;
- r = evergreen_cs_packet_next_reloc(p, &reloc);
+ r = radeon_cs_packet_next_reloc(p, &reloc, 0);
if (r) {
DRM_ERROR("bad STRMOUT_BUFFER_UPDATE (missing src reloc)\n");
return -EINVAL;
@@ -2662,7 +2474,7 @@ static int evergreen_packet3_check(struct radeon_cs_parser *p,
DRM_ERROR("bad MEM_WRITE (invalid count)\n");
return -EINVAL;
}
- r = evergreen_cs_packet_next_reloc(p, &reloc);
+ r = radeon_cs_packet_next_reloc(p, &reloc, 0);
if (r) {
DRM_ERROR("bad MEM_WRITE (missing reloc)\n");
return -EINVAL;
@@ -2691,7 +2503,7 @@ static int evergreen_packet3_check(struct radeon_cs_parser *p,
if (idx_value & 0x1) {
u64 offset;
/* SRC is memory. */
- r = evergreen_cs_packet_next_reloc(p, &reloc);
+ r = radeon_cs_packet_next_reloc(p, &reloc, 0);
if (r) {
DRM_ERROR("bad COPY_DW (missing src reloc)\n");
return -EINVAL;
@@ -2715,7 +2527,7 @@ static int evergreen_packet3_check(struct radeon_cs_parser *p,
if (idx_value & 0x2) {
u64 offset;
/* DST is memory. */
- r = evergreen_cs_packet_next_reloc(p, &reloc);
+ r = radeon_cs_packet_next_reloc(p, &reloc, 0);
if (r) {
DRM_ERROR("bad COPY_DW (missing dst reloc)\n");
return -EINVAL;
@@ -2819,7 +2631,7 @@ int evergreen_cs_parse(struct radeon_cs_parser *p)
p->track = track;
}
do {
- r = evergreen_cs_packet_parse(p, &pkt, p->idx);
+ r = radeon_cs_packet_parse(p, &pkt, p->idx);
if (r) {
kfree(p->track);
p->track = NULL;
@@ -2827,12 +2639,12 @@ int evergreen_cs_parse(struct radeon_cs_parser *p)
}
p->idx += pkt.count + 2;
switch (pkt.type) {
- case PACKET_TYPE0:
+ case RADEON_PACKET_TYPE0:
r = evergreen_cs_parse_packet0(p, &pkt);
break;
- case PACKET_TYPE2:
+ case RADEON_PACKET_TYPE2:
break;
- case PACKET_TYPE3:
+ case RADEON_PACKET_TYPE3:
r = evergreen_packet3_check(p, &pkt);
break;
default:
@@ -2858,16 +2670,6 @@ int evergreen_cs_parse(struct radeon_cs_parser *p)
return 0;
}
-/*
- * DMA
- */
-
-#define GET_DMA_CMD(h) (((h) & 0xf0000000) >> 28)
-#define GET_DMA_COUNT(h) ((h) & 0x000fffff)
-#define GET_DMA_T(h) (((h) & 0x00800000) >> 23)
-#define GET_DMA_NEW(h) (((h) & 0x04000000) >> 26)
-#define GET_DMA_MISC(h) (((h) & 0x0700000) >> 20)
-
/**
* evergreen_dma_cs_parse() - parse the DMA IB
* @p: parser structure holding parsing context.
@@ -2881,9 +2683,9 @@ int evergreen_dma_cs_parse(struct radeon_cs_parser *p)
{
struct radeon_cs_chunk *ib_chunk = &p->chunks[p->chunk_ib_idx];
struct radeon_cs_reloc *src_reloc, *dst_reloc, *dst2_reloc;
- u32 header, cmd, count, tiled, new_cmd, misc;
+ u32 header, cmd, count, sub_cmd;
volatile u32 *ib = p->ib.ptr;
- u32 idx, idx_value;
+ u32 idx;
u64 src_offset, dst_offset, dst2_offset;
int r;
@@ -2897,9 +2699,7 @@ int evergreen_dma_cs_parse(struct radeon_cs_parser *p)
header = radeon_get_ib_value(p, idx);
cmd = GET_DMA_CMD(header);
count = GET_DMA_COUNT(header);
- tiled = GET_DMA_T(header);
- new_cmd = GET_DMA_NEW(header);
- misc = GET_DMA_MISC(header);
+ sub_cmd = GET_DMA_SUB_CMD(header);
switch (cmd) {
case DMA_PACKET_WRITE:
@@ -2908,19 +2708,27 @@ int evergreen_dma_cs_parse(struct radeon_cs_parser *p)
DRM_ERROR("bad DMA_PACKET_WRITE\n");
return -EINVAL;
}
- if (tiled) {
+ switch (sub_cmd) {
+ /* tiled */
+ case 8:
dst_offset = radeon_get_ib_value(p, idx+1);
dst_offset <<= 8;
ib[idx+1] += (u32)(dst_reloc->lobj.gpu_offset >> 8);
p->idx += count + 7;
- } else {
+ break;
+ /* linear */
+ case 0:
dst_offset = radeon_get_ib_value(p, idx+1);
dst_offset |= ((u64)(radeon_get_ib_value(p, idx+2) & 0xff)) << 32;
ib[idx+1] += (u32)(dst_reloc->lobj.gpu_offset & 0xfffffffc);
ib[idx+2] += upper_32_bits(dst_reloc->lobj.gpu_offset) & 0xff;
p->idx += count + 3;
+ break;
+ default:
+ DRM_ERROR("bad DMA_PACKET_WRITE [%6d] 0x%08x sub cmd is not 0 or 8\n", idx, header);
+ return -EINVAL;
}
if ((dst_offset + (count * 4)) > radeon_bo_size(dst_reloc->robj)) {
dev_warn(p->dev, "DMA write buffer too small (%llu %lu)\n",
@@ -2939,338 +2747,330 @@ int evergreen_dma_cs_parse(struct radeon_cs_parser *p)
DRM_ERROR("bad DMA_PACKET_COPY\n");
return -EINVAL;
}
- if (tiled) {
- idx_value = radeon_get_ib_value(p, idx + 2);
- if (new_cmd) {
- switch (misc) {
- case 0:
- /* L2T, frame to fields */
- if (idx_value & (1 << 31)) {
- DRM_ERROR("bad L2T, frame to fields DMA_PACKET_COPY\n");
- return -EINVAL;
- }
- r = r600_dma_cs_next_reloc(p, &dst2_reloc);
- if (r) {
- DRM_ERROR("bad L2T, frame to fields DMA_PACKET_COPY\n");
- return -EINVAL;
- }
- dst_offset = radeon_get_ib_value(p, idx+1);
- dst_offset <<= 8;
- dst2_offset = radeon_get_ib_value(p, idx+2);
- dst2_offset <<= 8;
- src_offset = radeon_get_ib_value(p, idx+8);
- src_offset |= ((u64)(radeon_get_ib_value(p, idx+9) & 0xff)) << 32;
- if ((src_offset + (count * 4)) > radeon_bo_size(src_reloc->robj)) {
- dev_warn(p->dev, "DMA L2T, frame to fields src buffer too small (%llu %lu)\n",
- src_offset + (count * 4), radeon_bo_size(src_reloc->robj));
- return -EINVAL;
- }
- if ((dst_offset + (count * 4)) > radeon_bo_size(dst_reloc->robj)) {
- dev_warn(p->dev, "DMA L2T, frame to fields buffer too small (%llu %lu)\n",
- dst_offset + (count * 4), radeon_bo_size(dst_reloc->robj));
- return -EINVAL;
- }
- if ((dst2_offset + (count * 4)) > radeon_bo_size(dst2_reloc->robj)) {
- dev_warn(p->dev, "DMA L2T, frame to fields buffer too small (%llu %lu)\n",
- dst2_offset + (count * 4), radeon_bo_size(dst2_reloc->robj));
- return -EINVAL;
- }
- ib[idx+1] += (u32)(dst_reloc->lobj.gpu_offset >> 8);
- ib[idx+2] += (u32)(dst2_reloc->lobj.gpu_offset >> 8);
- ib[idx+8] += (u32)(src_reloc->lobj.gpu_offset & 0xfffffffc);
- ib[idx+9] += upper_32_bits(src_reloc->lobj.gpu_offset) & 0xff;
- p->idx += 10;
- break;
- case 1:
- /* L2T, T2L partial */
- if (p->family < CHIP_CAYMAN) {
- DRM_ERROR("L2T, T2L Partial is cayman only !\n");
- return -EINVAL;
- }
- /* detile bit */
- if (idx_value & (1 << 31)) {
- /* tiled src, linear dst */
- ib[idx+1] += (u32)(src_reloc->lobj.gpu_offset >> 8);
-
- ib[idx+7] += (u32)(dst_reloc->lobj.gpu_offset & 0xfffffffc);
- ib[idx+8] += upper_32_bits(dst_reloc->lobj.gpu_offset) & 0xff;
- } else {
- /* linear src, tiled dst */
- ib[idx+7] += (u32)(src_reloc->lobj.gpu_offset & 0xfffffffc);
- ib[idx+8] += upper_32_bits(src_reloc->lobj.gpu_offset) & 0xff;
-
- ib[idx+1] += (u32)(dst_reloc->lobj.gpu_offset >> 8);
- }
- p->idx += 12;
- break;
- case 3:
- /* L2T, broadcast */
- if (idx_value & (1 << 31)) {
- DRM_ERROR("bad L2T, broadcast DMA_PACKET_COPY\n");
- return -EINVAL;
- }
- r = r600_dma_cs_next_reloc(p, &dst2_reloc);
- if (r) {
- DRM_ERROR("bad L2T, broadcast DMA_PACKET_COPY\n");
- return -EINVAL;
- }
- dst_offset = radeon_get_ib_value(p, idx+1);
- dst_offset <<= 8;
- dst2_offset = radeon_get_ib_value(p, idx+2);
- dst2_offset <<= 8;
- src_offset = radeon_get_ib_value(p, idx+8);
- src_offset |= ((u64)(radeon_get_ib_value(p, idx+9) & 0xff)) << 32;
- if ((src_offset + (count * 4)) > radeon_bo_size(src_reloc->robj)) {
- dev_warn(p->dev, "DMA L2T, broadcast src buffer too small (%llu %lu)\n",
- src_offset + (count * 4), radeon_bo_size(src_reloc->robj));
- return -EINVAL;
- }
- if ((dst_offset + (count * 4)) > radeon_bo_size(dst_reloc->robj)) {
- dev_warn(p->dev, "DMA L2T, broadcast dst buffer too small (%llu %lu)\n",
- dst_offset + (count * 4), radeon_bo_size(dst_reloc->robj));
- return -EINVAL;
- }
- if ((dst2_offset + (count * 4)) > radeon_bo_size(dst2_reloc->robj)) {
- dev_warn(p->dev, "DMA L2T, broadcast dst2 buffer too small (%llu %lu)\n",
- dst2_offset + (count * 4), radeon_bo_size(dst2_reloc->robj));
- return -EINVAL;
- }
- ib[idx+1] += (u32)(dst_reloc->lobj.gpu_offset >> 8);
- ib[idx+2] += (u32)(dst2_reloc->lobj.gpu_offset >> 8);
- ib[idx+8] += (u32)(src_reloc->lobj.gpu_offset & 0xfffffffc);
- ib[idx+9] += upper_32_bits(src_reloc->lobj.gpu_offset) & 0xff;
- p->idx += 10;
- break;
- case 4:
- /* L2T, T2L */
- /* detile bit */
- if (idx_value & (1 << 31)) {
- /* tiled src, linear dst */
- src_offset = radeon_get_ib_value(p, idx+1);
- src_offset <<= 8;
- ib[idx+1] += (u32)(src_reloc->lobj.gpu_offset >> 8);
-
- dst_offset = radeon_get_ib_value(p, idx+7);
- dst_offset |= ((u64)(radeon_get_ib_value(p, idx+8) & 0xff)) << 32;
- ib[idx+7] += (u32)(dst_reloc->lobj.gpu_offset & 0xfffffffc);
- ib[idx+8] += upper_32_bits(dst_reloc->lobj.gpu_offset) & 0xff;
- } else {
- /* linear src, tiled dst */
- src_offset = radeon_get_ib_value(p, idx+7);
- src_offset |= ((u64)(radeon_get_ib_value(p, idx+8) & 0xff)) << 32;
- ib[idx+7] += (u32)(src_reloc->lobj.gpu_offset & 0xfffffffc);
- ib[idx+8] += upper_32_bits(src_reloc->lobj.gpu_offset) & 0xff;
-
- dst_offset = radeon_get_ib_value(p, idx+1);
- dst_offset <<= 8;
- ib[idx+1] += (u32)(dst_reloc->lobj.gpu_offset >> 8);
- }
- if ((src_offset + (count * 4)) > radeon_bo_size(src_reloc->robj)) {
- dev_warn(p->dev, "DMA L2T, T2L src buffer too small (%llu %lu)\n",
- src_offset + (count * 4), radeon_bo_size(src_reloc->robj));
- return -EINVAL;
- }
- if ((dst_offset + (count * 4)) > radeon_bo_size(dst_reloc->robj)) {
- dev_warn(p->dev, "DMA L2T, T2L dst buffer too small (%llu %lu)\n",
- dst_offset + (count * 4), radeon_bo_size(dst_reloc->robj));
- return -EINVAL;
- }
- p->idx += 9;
- break;
- case 5:
- /* T2T partial */
- if (p->family < CHIP_CAYMAN) {
- DRM_ERROR("L2T, T2L Partial is cayman only !\n");
- return -EINVAL;
- }
- ib[idx+1] += (u32)(src_reloc->lobj.gpu_offset >> 8);
- ib[idx+4] += (u32)(dst_reloc->lobj.gpu_offset >> 8);
- p->idx += 13;
- break;
- case 7:
- /* L2T, broadcast */
- if (idx_value & (1 << 31)) {
- DRM_ERROR("bad L2T, broadcast DMA_PACKET_COPY\n");
- return -EINVAL;
- }
- r = r600_dma_cs_next_reloc(p, &dst2_reloc);
- if (r) {
- DRM_ERROR("bad L2T, broadcast DMA_PACKET_COPY\n");
- return -EINVAL;
- }
- dst_offset = radeon_get_ib_value(p, idx+1);
- dst_offset <<= 8;
- dst2_offset = radeon_get_ib_value(p, idx+2);
- dst2_offset <<= 8;
- src_offset = radeon_get_ib_value(p, idx+8);
- src_offset |= ((u64)(radeon_get_ib_value(p, idx+9) & 0xff)) << 32;
- if ((src_offset + (count * 4)) > radeon_bo_size(src_reloc->robj)) {
- dev_warn(p->dev, "DMA L2T, broadcast src buffer too small (%llu %lu)\n",
- src_offset + (count * 4), radeon_bo_size(src_reloc->robj));
- return -EINVAL;
- }
- if ((dst_offset + (count * 4)) > radeon_bo_size(dst_reloc->robj)) {
- dev_warn(p->dev, "DMA L2T, broadcast dst buffer too small (%llu %lu)\n",
- dst_offset + (count * 4), radeon_bo_size(dst_reloc->robj));
- return -EINVAL;
- }
- if ((dst2_offset + (count * 4)) > radeon_bo_size(dst2_reloc->robj)) {
- dev_warn(p->dev, "DMA L2T, broadcast dst2 buffer too small (%llu %lu)\n",
- dst2_offset + (count * 4), radeon_bo_size(dst2_reloc->robj));
- return -EINVAL;
- }
- ib[idx+1] += (u32)(dst_reloc->lobj.gpu_offset >> 8);
- ib[idx+2] += (u32)(dst2_reloc->lobj.gpu_offset >> 8);
- ib[idx+8] += (u32)(src_reloc->lobj.gpu_offset & 0xfffffffc);
- ib[idx+9] += upper_32_bits(src_reloc->lobj.gpu_offset) & 0xff;
- p->idx += 10;
- break;
- default:
- DRM_ERROR("bad DMA_PACKET_COPY misc %u\n", misc);
- return -EINVAL;
- }
+ switch (sub_cmd) {
+ /* Copy L2L, DW aligned */
+ case 0x00:
+ /* L2L, dw */
+ src_offset = radeon_get_ib_value(p, idx+2);
+ src_offset |= ((u64)(radeon_get_ib_value(p, idx+4) & 0xff)) << 32;
+ dst_offset = radeon_get_ib_value(p, idx+1);
+ dst_offset |= ((u64)(radeon_get_ib_value(p, idx+3) & 0xff)) << 32;
+ if ((src_offset + (count * 4)) > radeon_bo_size(src_reloc->robj)) {
+ dev_warn(p->dev, "DMA L2L, dw src buffer too small (%llu %lu)\n",
+ src_offset + (count * 4), radeon_bo_size(src_reloc->robj));
+ return -EINVAL;
+ }
+ if ((dst_offset + (count * 4)) > radeon_bo_size(dst_reloc->robj)) {
+ dev_warn(p->dev, "DMA L2L, dw dst buffer too small (%llu %lu)\n",
+ dst_offset + (count * 4), radeon_bo_size(dst_reloc->robj));
+ return -EINVAL;
+ }
+ ib[idx+1] += (u32)(dst_reloc->lobj.gpu_offset & 0xfffffffc);
+ ib[idx+2] += (u32)(src_reloc->lobj.gpu_offset & 0xfffffffc);
+ ib[idx+3] += upper_32_bits(dst_reloc->lobj.gpu_offset) & 0xff;
+ ib[idx+4] += upper_32_bits(src_reloc->lobj.gpu_offset) & 0xff;
+ p->idx += 5;
+ break;
+ /* Copy L2T/T2L */
+ case 0x08:
+ /* detile bit */
+ if (radeon_get_ib_value(p, idx + 2) & (1 << 31)) {
+ /* tiled src, linear dst */
+ src_offset = radeon_get_ib_value(p, idx+1);
+ src_offset <<= 8;
+ ib[idx+1] += (u32)(src_reloc->lobj.gpu_offset >> 8);
+
+ dst_offset = radeon_get_ib_value(p, idx + 7);
+ dst_offset |= ((u64)(radeon_get_ib_value(p, idx+8) & 0xff)) << 32;
+ ib[idx+7] += (u32)(dst_reloc->lobj.gpu_offset & 0xfffffffc);
+ ib[idx+8] += upper_32_bits(dst_reloc->lobj.gpu_offset) & 0xff;
} else {
- switch (misc) {
- case 0:
- /* detile bit */
- if (idx_value & (1 << 31)) {
- /* tiled src, linear dst */
- src_offset = radeon_get_ib_value(p, idx+1);
- src_offset <<= 8;
- ib[idx+1] += (u32)(src_reloc->lobj.gpu_offset >> 8);
-
- dst_offset = radeon_get_ib_value(p, idx+7);
- dst_offset |= ((u64)(radeon_get_ib_value(p, idx+8) & 0xff)) << 32;
- ib[idx+7] += (u32)(dst_reloc->lobj.gpu_offset & 0xfffffffc);
- ib[idx+8] += upper_32_bits(dst_reloc->lobj.gpu_offset) & 0xff;
- } else {
- /* linear src, tiled dst */
- src_offset = radeon_get_ib_value(p, idx+7);
- src_offset |= ((u64)(radeon_get_ib_value(p, idx+8) & 0xff)) << 32;
- ib[idx+7] += (u32)(src_reloc->lobj.gpu_offset & 0xfffffffc);
- ib[idx+8] += upper_32_bits(src_reloc->lobj.gpu_offset) & 0xff;
-
- dst_offset = radeon_get_ib_value(p, idx+1);
- dst_offset <<= 8;
- ib[idx+1] += (u32)(dst_reloc->lobj.gpu_offset >> 8);
- }
- if ((src_offset + (count * 4)) > radeon_bo_size(src_reloc->robj)) {
- dev_warn(p->dev, "DMA L2T, broadcast src buffer too small (%llu %lu)\n",
- src_offset + (count * 4), radeon_bo_size(src_reloc->robj));
- return -EINVAL;
- }
- if ((dst_offset + (count * 4)) > radeon_bo_size(dst_reloc->robj)) {
- dev_warn(p->dev, "DMA L2T, broadcast dst buffer too small (%llu %lu)\n",
- dst_offset + (count * 4), radeon_bo_size(dst_reloc->robj));
- return -EINVAL;
- }
- p->idx += 9;
- break;
- default:
- DRM_ERROR("bad DMA_PACKET_COPY misc %u\n", misc);
- return -EINVAL;
- }
+ /* linear src, tiled dst */
+ src_offset = radeon_get_ib_value(p, idx+7);
+ src_offset |= ((u64)(radeon_get_ib_value(p, idx+8) & 0xff)) << 32;
+ ib[idx+7] += (u32)(src_reloc->lobj.gpu_offset & 0xfffffffc);
+ ib[idx+8] += upper_32_bits(src_reloc->lobj.gpu_offset) & 0xff;
+
+ dst_offset = radeon_get_ib_value(p, idx+1);
+ dst_offset <<= 8;
+ ib[idx+1] += (u32)(dst_reloc->lobj.gpu_offset >> 8);
}
- } else {
- if (new_cmd) {
- switch (misc) {
- case 0:
- /* L2L, byte */
- src_offset = radeon_get_ib_value(p, idx+2);
- src_offset |= ((u64)(radeon_get_ib_value(p, idx+4) & 0xff)) << 32;
- dst_offset = radeon_get_ib_value(p, idx+1);
- dst_offset |= ((u64)(radeon_get_ib_value(p, idx+3) & 0xff)) << 32;
- if ((src_offset + count) > radeon_bo_size(src_reloc->robj)) {
- dev_warn(p->dev, "DMA L2L, byte src buffer too small (%llu %lu)\n",
- src_offset + count, radeon_bo_size(src_reloc->robj));
- return -EINVAL;
- }
- if ((dst_offset + count) > radeon_bo_size(dst_reloc->robj)) {
- dev_warn(p->dev, "DMA L2L, byte dst buffer too small (%llu %lu)\n",
- dst_offset + count, radeon_bo_size(dst_reloc->robj));
- return -EINVAL;
- }
- ib[idx+1] += (u32)(dst_reloc->lobj.gpu_offset & 0xffffffff);
- ib[idx+2] += (u32)(src_reloc->lobj.gpu_offset & 0xffffffff);
- ib[idx+3] += upper_32_bits(dst_reloc->lobj.gpu_offset) & 0xff;
- ib[idx+4] += upper_32_bits(src_reloc->lobj.gpu_offset) & 0xff;
- p->idx += 5;
- break;
- case 1:
- /* L2L, partial */
- if (p->family < CHIP_CAYMAN) {
- DRM_ERROR("L2L Partial is cayman only !\n");
- return -EINVAL;
- }
- ib[idx+1] += (u32)(src_reloc->lobj.gpu_offset & 0xffffffff);
- ib[idx+2] += upper_32_bits(src_reloc->lobj.gpu_offset) & 0xff;
- ib[idx+4] += (u32)(dst_reloc->lobj.gpu_offset & 0xffffffff);
- ib[idx+5] += upper_32_bits(dst_reloc->lobj.gpu_offset) & 0xff;
-
- p->idx += 9;
- break;
- case 4:
- /* L2L, dw, broadcast */
- r = r600_dma_cs_next_reloc(p, &dst2_reloc);
- if (r) {
- DRM_ERROR("bad L2L, dw, broadcast DMA_PACKET_COPY\n");
- return -EINVAL;
- }
- dst_offset = radeon_get_ib_value(p, idx+1);
- dst_offset |= ((u64)(radeon_get_ib_value(p, idx+4) & 0xff)) << 32;
- dst2_offset = radeon_get_ib_value(p, idx+2);
- dst2_offset |= ((u64)(radeon_get_ib_value(p, idx+5) & 0xff)) << 32;
- src_offset = radeon_get_ib_value(p, idx+3);
- src_offset |= ((u64)(radeon_get_ib_value(p, idx+6) & 0xff)) << 32;
- if ((src_offset + (count * 4)) > radeon_bo_size(src_reloc->robj)) {
- dev_warn(p->dev, "DMA L2L, dw, broadcast src buffer too small (%llu %lu)\n",
- src_offset + (count * 4), radeon_bo_size(src_reloc->robj));
- return -EINVAL;
- }
- if ((dst_offset + (count * 4)) > radeon_bo_size(dst_reloc->robj)) {
- dev_warn(p->dev, "DMA L2L, dw, broadcast dst buffer too small (%llu %lu)\n",
- dst_offset + (count * 4), radeon_bo_size(dst_reloc->robj));
- return -EINVAL;
- }
- if ((dst2_offset + (count * 4)) > radeon_bo_size(dst2_reloc->robj)) {
- dev_warn(p->dev, "DMA L2L, dw, broadcast dst2 buffer too small (%llu %lu)\n",
- dst2_offset + (count * 4), radeon_bo_size(dst2_reloc->robj));
- return -EINVAL;
- }
- ib[idx+1] += (u32)(dst_reloc->lobj.gpu_offset & 0xfffffffc);
- ib[idx+2] += (u32)(dst2_reloc->lobj.gpu_offset & 0xfffffffc);
- ib[idx+3] += (u32)(src_reloc->lobj.gpu_offset & 0xfffffffc);
- ib[idx+4] += upper_32_bits(dst_reloc->lobj.gpu_offset) & 0xff;
- ib[idx+5] += upper_32_bits(dst2_reloc->lobj.gpu_offset) & 0xff;
- ib[idx+6] += upper_32_bits(src_reloc->lobj.gpu_offset) & 0xff;
- p->idx += 7;
- break;
- default:
- DRM_ERROR("bad DMA_PACKET_COPY misc %u\n", misc);
- return -EINVAL;
- }
+ if ((src_offset + (count * 4)) > radeon_bo_size(src_reloc->robj)) {
+ dev_warn(p->dev, "DMA L2T, src buffer too small (%llu %lu)\n",
+ src_offset + (count * 4), radeon_bo_size(src_reloc->robj));
+ return -EINVAL;
+ }
+ if ((dst_offset + (count * 4)) > radeon_bo_size(dst_reloc->robj)) {
+ dev_warn(p->dev, "DMA L2T, dst buffer too small (%llu %lu)\n",
+ dst_offset + (count * 4), radeon_bo_size(dst_reloc->robj));
+ return -EINVAL;
+ }
+ p->idx += 9;
+ break;
+ /* Copy L2L, byte aligned */
+ case 0x40:
+ /* L2L, byte */
+ src_offset = radeon_get_ib_value(p, idx+2);
+ src_offset |= ((u64)(radeon_get_ib_value(p, idx+4) & 0xff)) << 32;
+ dst_offset = radeon_get_ib_value(p, idx+1);
+ dst_offset |= ((u64)(radeon_get_ib_value(p, idx+3) & 0xff)) << 32;
+ if ((src_offset + count) > radeon_bo_size(src_reloc->robj)) {
+ dev_warn(p->dev, "DMA L2L, byte src buffer too small (%llu %lu)\n",
+ src_offset + count, radeon_bo_size(src_reloc->robj));
+ return -EINVAL;
+ }
+ if ((dst_offset + count) > radeon_bo_size(dst_reloc->robj)) {
+ dev_warn(p->dev, "DMA L2L, byte dst buffer too small (%llu %lu)\n",
+ dst_offset + count, radeon_bo_size(dst_reloc->robj));
+ return -EINVAL;
+ }
+ ib[idx+1] += (u32)(dst_reloc->lobj.gpu_offset & 0xffffffff);
+ ib[idx+2] += (u32)(src_reloc->lobj.gpu_offset & 0xffffffff);
+ ib[idx+3] += upper_32_bits(dst_reloc->lobj.gpu_offset) & 0xff;
+ ib[idx+4] += upper_32_bits(src_reloc->lobj.gpu_offset) & 0xff;
+ p->idx += 5;
+ break;
+ /* Copy L2L, partial */
+ case 0x41:
+ /* L2L, partial */
+ if (p->family < CHIP_CAYMAN) {
+ DRM_ERROR("L2L Partial is cayman only !\n");
+ return -EINVAL;
+ }
+ ib[idx+1] += (u32)(src_reloc->lobj.gpu_offset & 0xffffffff);
+ ib[idx+2] += upper_32_bits(src_reloc->lobj.gpu_offset) & 0xff;
+ ib[idx+4] += (u32)(dst_reloc->lobj.gpu_offset & 0xffffffff);
+ ib[idx+5] += upper_32_bits(dst_reloc->lobj.gpu_offset) & 0xff;
+
+ p->idx += 9;
+ break;
+ /* Copy L2L, DW aligned, broadcast */
+ case 0x44:
+ /* L2L, dw, broadcast */
+ r = r600_dma_cs_next_reloc(p, &dst2_reloc);
+ if (r) {
+ DRM_ERROR("bad L2L, dw, broadcast DMA_PACKET_COPY\n");
+ return -EINVAL;
+ }
+ dst_offset = radeon_get_ib_value(p, idx+1);
+ dst_offset |= ((u64)(radeon_get_ib_value(p, idx+4) & 0xff)) << 32;
+ dst2_offset = radeon_get_ib_value(p, idx+2);
+ dst2_offset |= ((u64)(radeon_get_ib_value(p, idx+5) & 0xff)) << 32;
+ src_offset = radeon_get_ib_value(p, idx+3);
+ src_offset |= ((u64)(radeon_get_ib_value(p, idx+6) & 0xff)) << 32;
+ if ((src_offset + (count * 4)) > radeon_bo_size(src_reloc->robj)) {
+ dev_warn(p->dev, "DMA L2L, dw, broadcast src buffer too small (%llu %lu)\n",
+ src_offset + (count * 4), radeon_bo_size(src_reloc->robj));
+ return -EINVAL;
+ }
+ if ((dst_offset + (count * 4)) > radeon_bo_size(dst_reloc->robj)) {
+ dev_warn(p->dev, "DMA L2L, dw, broadcast dst buffer too small (%llu %lu)\n",
+ dst_offset + (count * 4), radeon_bo_size(dst_reloc->robj));
+ return -EINVAL;
+ }
+ if ((dst2_offset + (count * 4)) > radeon_bo_size(dst2_reloc->robj)) {
+ dev_warn(p->dev, "DMA L2L, dw, broadcast dst2 buffer too small (%llu %lu)\n",
+ dst2_offset + (count * 4), radeon_bo_size(dst2_reloc->robj));
+ return -EINVAL;
+ }
+ ib[idx+1] += (u32)(dst_reloc->lobj.gpu_offset & 0xfffffffc);
+ ib[idx+2] += (u32)(dst2_reloc->lobj.gpu_offset & 0xfffffffc);
+ ib[idx+3] += (u32)(src_reloc->lobj.gpu_offset & 0xfffffffc);
+ ib[idx+4] += upper_32_bits(dst_reloc->lobj.gpu_offset) & 0xff;
+ ib[idx+5] += upper_32_bits(dst2_reloc->lobj.gpu_offset) & 0xff;
+ ib[idx+6] += upper_32_bits(src_reloc->lobj.gpu_offset) & 0xff;
+ p->idx += 7;
+ break;
+ /* Copy L2T Frame to Field */
+ case 0x48:
+ if (radeon_get_ib_value(p, idx + 2) & (1 << 31)) {
+ DRM_ERROR("bad L2T, frame to fields DMA_PACKET_COPY\n");
+ return -EINVAL;
+ }
+ r = r600_dma_cs_next_reloc(p, &dst2_reloc);
+ if (r) {
+ DRM_ERROR("bad L2T, frame to fields DMA_PACKET_COPY\n");
+ return -EINVAL;
+ }
+ dst_offset = radeon_get_ib_value(p, idx+1);
+ dst_offset <<= 8;
+ dst2_offset = radeon_get_ib_value(p, idx+2);
+ dst2_offset <<= 8;
+ src_offset = radeon_get_ib_value(p, idx+8);
+ src_offset |= ((u64)(radeon_get_ib_value(p, idx+9) & 0xff)) << 32;
+ if ((src_offset + (count * 4)) > radeon_bo_size(src_reloc->robj)) {
+ dev_warn(p->dev, "DMA L2T, frame to fields src buffer too small (%llu %lu)\n",
+ src_offset + (count * 4), radeon_bo_size(src_reloc->robj));
+ return -EINVAL;
+ }
+ if ((dst_offset + (count * 4)) > radeon_bo_size(dst_reloc->robj)) {
+ dev_warn(p->dev, "DMA L2T, frame to fields buffer too small (%llu %lu)\n",
+ dst_offset + (count * 4), radeon_bo_size(dst_reloc->robj));
+ return -EINVAL;
+ }
+ if ((dst2_offset + (count * 4)) > radeon_bo_size(dst2_reloc->robj)) {
+ dev_warn(p->dev, "DMA L2T, frame to fields buffer too small (%llu %lu)\n",
+ dst2_offset + (count * 4), radeon_bo_size(dst2_reloc->robj));
+ return -EINVAL;
+ }
+ ib[idx+1] += (u32)(dst_reloc->lobj.gpu_offset >> 8);
+ ib[idx+2] += (u32)(dst2_reloc->lobj.gpu_offset >> 8);
+ ib[idx+8] += (u32)(src_reloc->lobj.gpu_offset & 0xfffffffc);
+ ib[idx+9] += upper_32_bits(src_reloc->lobj.gpu_offset) & 0xff;
+ p->idx += 10;
+ break;
+ /* Copy L2T/T2L, partial */
+ case 0x49:
+ /* L2T, T2L partial */
+ if (p->family < CHIP_CAYMAN) {
+ DRM_ERROR("L2T, T2L Partial is cayman only !\n");
+ return -EINVAL;
+ }
+ /* detile bit */
+ if (radeon_get_ib_value(p, idx + 2) & (1 << 31)) {
+ /* tiled src, linear dst */
+ ib[idx+1] += (u32)(src_reloc->lobj.gpu_offset >> 8);
+
+ ib[idx+7] += (u32)(dst_reloc->lobj.gpu_offset & 0xfffffffc);
+ ib[idx+8] += upper_32_bits(dst_reloc->lobj.gpu_offset) & 0xff;
+ } else {
+ /* linear src, tiled dst */
+ ib[idx+7] += (u32)(src_reloc->lobj.gpu_offset & 0xfffffffc);
+ ib[idx+8] += upper_32_bits(src_reloc->lobj.gpu_offset) & 0xff;
+
+ ib[idx+1] += (u32)(dst_reloc->lobj.gpu_offset >> 8);
+ }
+ p->idx += 12;
+ break;
+ /* Copy L2T broadcast */
+ case 0x4b:
+ /* L2T, broadcast */
+ if (radeon_get_ib_value(p, idx + 2) & (1 << 31)) {
+ DRM_ERROR("bad L2T, broadcast DMA_PACKET_COPY\n");
+ return -EINVAL;
+ }
+ r = r600_dma_cs_next_reloc(p, &dst2_reloc);
+ if (r) {
+ DRM_ERROR("bad L2T, broadcast DMA_PACKET_COPY\n");
+ return -EINVAL;
+ }
+ dst_offset = radeon_get_ib_value(p, idx+1);
+ dst_offset <<= 8;
+ dst2_offset = radeon_get_ib_value(p, idx+2);
+ dst2_offset <<= 8;
+ src_offset = radeon_get_ib_value(p, idx+8);
+ src_offset |= ((u64)(radeon_get_ib_value(p, idx+9) & 0xff)) << 32;
+ if ((src_offset + (count * 4)) > radeon_bo_size(src_reloc->robj)) {
+ dev_warn(p->dev, "DMA L2T, broadcast src buffer too small (%llu %lu)\n",
+ src_offset + (count * 4), radeon_bo_size(src_reloc->robj));
+ return -EINVAL;
+ }
+ if ((dst_offset + (count * 4)) > radeon_bo_size(dst_reloc->robj)) {
+ dev_warn(p->dev, "DMA L2T, broadcast dst buffer too small (%llu %lu)\n",
+ dst_offset + (count * 4), radeon_bo_size(dst_reloc->robj));
+ return -EINVAL;
+ }
+ if ((dst2_offset + (count * 4)) > radeon_bo_size(dst2_reloc->robj)) {
+ dev_warn(p->dev, "DMA L2T, broadcast dst2 buffer too small (%llu %lu)\n",
+ dst2_offset + (count * 4), radeon_bo_size(dst2_reloc->robj));
+ return -EINVAL;
+ }
+ ib[idx+1] += (u32)(dst_reloc->lobj.gpu_offset >> 8);
+ ib[idx+2] += (u32)(dst2_reloc->lobj.gpu_offset >> 8);
+ ib[idx+8] += (u32)(src_reloc->lobj.gpu_offset & 0xfffffffc);
+ ib[idx+9] += upper_32_bits(src_reloc->lobj.gpu_offset) & 0xff;
+ p->idx += 10;
+ break;
+ /* Copy L2T/T2L (tile units) */
+ case 0x4c:
+ /* L2T, T2L */
+ /* detile bit */
+ if (radeon_get_ib_value(p, idx + 2) & (1 << 31)) {
+ /* tiled src, linear dst */
+ src_offset = radeon_get_ib_value(p, idx+1);
+ src_offset <<= 8;
+ ib[idx+1] += (u32)(src_reloc->lobj.gpu_offset >> 8);
+
+ dst_offset = radeon_get_ib_value(p, idx+7);
+ dst_offset |= ((u64)(radeon_get_ib_value(p, idx+8) & 0xff)) << 32;
+ ib[idx+7] += (u32)(dst_reloc->lobj.gpu_offset & 0xfffffffc);
+ ib[idx+8] += upper_32_bits(dst_reloc->lobj.gpu_offset) & 0xff;
} else {
- /* L2L, dw */
- src_offset = radeon_get_ib_value(p, idx+2);
- src_offset |= ((u64)(radeon_get_ib_value(p, idx+4) & 0xff)) << 32;
+ /* linear src, tiled dst */
+ src_offset = radeon_get_ib_value(p, idx+7);
+ src_offset |= ((u64)(radeon_get_ib_value(p, idx+8) & 0xff)) << 32;
+ ib[idx+7] += (u32)(src_reloc->lobj.gpu_offset & 0xfffffffc);
+ ib[idx+8] += upper_32_bits(src_reloc->lobj.gpu_offset) & 0xff;
+
dst_offset = radeon_get_ib_value(p, idx+1);
- dst_offset |= ((u64)(radeon_get_ib_value(p, idx+3) & 0xff)) << 32;
- if ((src_offset + (count * 4)) > radeon_bo_size(src_reloc->robj)) {
- dev_warn(p->dev, "DMA L2L, dw src buffer too small (%llu %lu)\n",
- src_offset + (count * 4), radeon_bo_size(src_reloc->robj));
- return -EINVAL;
- }
- if ((dst_offset + (count * 4)) > radeon_bo_size(dst_reloc->robj)) {
- dev_warn(p->dev, "DMA L2L, dw dst buffer too small (%llu %lu)\n",
- dst_offset + (count * 4), radeon_bo_size(dst_reloc->robj));
- return -EINVAL;
- }
- ib[idx+1] += (u32)(dst_reloc->lobj.gpu_offset & 0xfffffffc);
- ib[idx+2] += (u32)(src_reloc->lobj.gpu_offset & 0xfffffffc);
- ib[idx+3] += upper_32_bits(dst_reloc->lobj.gpu_offset) & 0xff;
- ib[idx+4] += upper_32_bits(src_reloc->lobj.gpu_offset) & 0xff;
- p->idx += 5;
+ dst_offset <<= 8;
+ ib[idx+1] += (u32)(dst_reloc->lobj.gpu_offset >> 8);
}
+ if ((src_offset + (count * 4)) > radeon_bo_size(src_reloc->robj)) {
+ dev_warn(p->dev, "DMA L2T, T2L src buffer too small (%llu %lu)\n",
+ src_offset + (count * 4), radeon_bo_size(src_reloc->robj));
+ return -EINVAL;
+ }
+ if ((dst_offset + (count * 4)) > radeon_bo_size(dst_reloc->robj)) {
+ dev_warn(p->dev, "DMA L2T, T2L dst buffer too small (%llu %lu)\n",
+ dst_offset + (count * 4), radeon_bo_size(dst_reloc->robj));
+ return -EINVAL;
+ }
+ p->idx += 9;
+ break;
+ /* Copy T2T, partial (tile units) */
+ case 0x4d:
+ /* T2T partial */
+ if (p->family < CHIP_CAYMAN) {
+ DRM_ERROR("L2T, T2L Partial is cayman only !\n");
+ return -EINVAL;
+ }
+ ib[idx+1] += (u32)(src_reloc->lobj.gpu_offset >> 8);
+ ib[idx+4] += (u32)(dst_reloc->lobj.gpu_offset >> 8);
+ p->idx += 13;
+ break;
+ /* Copy L2T broadcast (tile units) */
+ case 0x4f:
+ /* L2T, broadcast */
+ if (radeon_get_ib_value(p, idx + 2) & (1 << 31)) {
+ DRM_ERROR("bad L2T, broadcast DMA_PACKET_COPY\n");
+ return -EINVAL;
+ }
+ r = r600_dma_cs_next_reloc(p, &dst2_reloc);
+ if (r) {
+ DRM_ERROR("bad L2T, broadcast DMA_PACKET_COPY\n");
+ return -EINVAL;
+ }
+ dst_offset = radeon_get_ib_value(p, idx+1);
+ dst_offset <<= 8;
+ dst2_offset = radeon_get_ib_value(p, idx+2);
+ dst2_offset <<= 8;
+ src_offset = radeon_get_ib_value(p, idx+8);
+ src_offset |= ((u64)(radeon_get_ib_value(p, idx+9) & 0xff)) << 32;
+ if ((src_offset + (count * 4)) > radeon_bo_size(src_reloc->robj)) {
+ dev_warn(p->dev, "DMA L2T, broadcast src buffer too small (%llu %lu)\n",
+ src_offset + (count * 4), radeon_bo_size(src_reloc->robj));
+ return -EINVAL;
+ }
+ if ((dst_offset + (count * 4)) > radeon_bo_size(dst_reloc->robj)) {
+ dev_warn(p->dev, "DMA L2T, broadcast dst buffer too small (%llu %lu)\n",
+ dst_offset + (count * 4), radeon_bo_size(dst_reloc->robj));
+ return -EINVAL;
+ }
+ if ((dst2_offset + (count * 4)) > radeon_bo_size(dst2_reloc->robj)) {
+ dev_warn(p->dev, "DMA L2T, broadcast dst2 buffer too small (%llu %lu)\n",
+ dst2_offset + (count * 4), radeon_bo_size(dst2_reloc->robj));
+ return -EINVAL;
+ }
+ ib[idx+1] += (u32)(dst_reloc->lobj.gpu_offset >> 8);
+ ib[idx+2] += (u32)(dst2_reloc->lobj.gpu_offset >> 8);
+ ib[idx+8] += (u32)(src_reloc->lobj.gpu_offset & 0xfffffffc);
+ ib[idx+9] += upper_32_bits(src_reloc->lobj.gpu_offset) & 0xff;
+ p->idx += 10;
+ break;
+ default:
+ DRM_ERROR("bad DMA_PACKET_COPY [%6d] 0x%08x invalid sub cmd\n", idx, header);
+ return -EINVAL;
}
break;
case DMA_PACKET_CONSTANT_FILL:
@@ -3583,19 +3383,19 @@ int evergreen_ib_parse(struct radeon_device *rdev, struct radeon_ib *ib)
do {
pkt.idx = idx;
- pkt.type = CP_PACKET_GET_TYPE(ib->ptr[idx]);
- pkt.count = CP_PACKET_GET_COUNT(ib->ptr[idx]);
+ pkt.type = RADEON_CP_PACKET_GET_TYPE(ib->ptr[idx]);
+ pkt.count = RADEON_CP_PACKET_GET_COUNT(ib->ptr[idx]);
pkt.one_reg_wr = 0;
switch (pkt.type) {
- case PACKET_TYPE0:
+ case RADEON_PACKET_TYPE0:
dev_err(rdev->dev, "Packet0 not allowed!\n");
ret = -EINVAL;
break;
- case PACKET_TYPE2:
+ case RADEON_PACKET_TYPE2:
idx += 1;
break;
- case PACKET_TYPE3:
- pkt.opcode = CP_PACKET3_GET_OPCODE(ib->ptr[idx]);
+ case RADEON_PACKET_TYPE3:
+ pkt.opcode = RADEON_CP_PACKET3_GET_OPCODE(ib->ptr[idx]);
ret = evergreen_vm_packet3_check(rdev, ib->ptr, &pkt);
idx += pkt.count + 2;
break;
@@ -3623,88 +3423,79 @@ int evergreen_ib_parse(struct radeon_device *rdev, struct radeon_ib *ib)
int evergreen_dma_ib_parse(struct radeon_device *rdev, struct radeon_ib *ib)
{
u32 idx = 0;
- u32 header, cmd, count, tiled, new_cmd, misc;
+ u32 header, cmd, count, sub_cmd;
do {
header = ib->ptr[idx];
cmd = GET_DMA_CMD(header);
count = GET_DMA_COUNT(header);
- tiled = GET_DMA_T(header);
- new_cmd = GET_DMA_NEW(header);
- misc = GET_DMA_MISC(header);
+ sub_cmd = GET_DMA_SUB_CMD(header);
switch (cmd) {
case DMA_PACKET_WRITE:
- if (tiled)
+ switch (sub_cmd) {
+ /* tiled */
+ case 8:
idx += count + 7;
- else
+ break;
+ /* linear */
+ case 0:
idx += count + 3;
+ break;
+ default:
+ DRM_ERROR("bad DMA_PACKET_WRITE [%6d] 0x%08x sub cmd is not 0 or 8\n", idx, ib->ptr[idx]);
+ return -EINVAL;
+ }
break;
case DMA_PACKET_COPY:
- if (tiled) {
- if (new_cmd) {
- switch (misc) {
- case 0:
- /* L2T, frame to fields */
- idx += 10;
- break;
- case 1:
- /* L2T, T2L partial */
- idx += 12;
- break;
- case 3:
- /* L2T, broadcast */
- idx += 10;
- break;
- case 4:
- /* L2T, T2L */
- idx += 9;
- break;
- case 5:
- /* T2T partial */
- idx += 13;
- break;
- case 7:
- /* L2T, broadcast */
- idx += 10;
- break;
- default:
- DRM_ERROR("bad DMA_PACKET_COPY misc %u\n", misc);
- return -EINVAL;
- }
- } else {
- switch (misc) {
- case 0:
- idx += 9;
- break;
- default:
- DRM_ERROR("bad DMA_PACKET_COPY misc %u\n", misc);
- return -EINVAL;
- }
- }
- } else {
- if (new_cmd) {
- switch (misc) {
- case 0:
- /* L2L, byte */
- idx += 5;
- break;
- case 1:
- /* L2L, partial */
- idx += 9;
- break;
- case 4:
- /* L2L, dw, broadcast */
- idx += 7;
- break;
- default:
- DRM_ERROR("bad DMA_PACKET_COPY misc %u\n", misc);
- return -EINVAL;
- }
- } else {
- /* L2L, dw */
- idx += 5;
- }
+ switch (sub_cmd) {
+ /* Copy L2L, DW aligned */
+ case 0x00:
+ idx += 5;
+ break;
+ /* Copy L2T/T2L */
+ case 0x08:
+ idx += 9;
+ break;
+ /* Copy L2L, byte aligned */
+ case 0x40:
+ idx += 5;
+ break;
+ /* Copy L2L, partial */
+ case 0x41:
+ idx += 9;
+ break;
+ /* Copy L2L, DW aligned, broadcast */
+ case 0x44:
+ idx += 7;
+ break;
+ /* Copy L2T Frame to Field */
+ case 0x48:
+ idx += 10;
+ break;
+ /* Copy L2T/T2L, partial */
+ case 0x49:
+ idx += 12;
+ break;
+ /* Copy L2T broadcast */
+ case 0x4b:
+ idx += 10;
+ break;
+ /* Copy L2T/T2L (tile units) */
+ case 0x4c:
+ idx += 9;
+ break;
+ /* Copy T2T, partial (tile units) */
+ case 0x4d:
+ idx += 13;
+ break;
+ /* Copy L2T broadcast (tile units) */
+ case 0x4f:
+ idx += 10;
+ break;
+ default:
+ DRM_ERROR("bad DMA_PACKET_COPY [%6d] 0x%08x invalid sub cmd\n", idx, ib->ptr[idx]);
+ return -EINVAL;
}
break;
case DMA_PACKET_CONSTANT_FILL:
diff --git a/drivers/gpu/drm/radeon/evergreen_hdmi.c b/drivers/gpu/drm/radeon/evergreen_hdmi.c
index 327c08b54180..4fdecc2b4040 100644
--- a/drivers/gpu/drm/radeon/evergreen_hdmi.c
+++ b/drivers/gpu/drm/radeon/evergreen_hdmi.c
@@ -24,6 +24,7 @@
* Authors: Christian König
* Rafał Miłecki
*/
+#include <linux/hdmi.h>
#include <drm/drmP.h>
#include <drm/radeon_drm.h>
#include "radeon.h"
@@ -54,79 +55,18 @@ static void evergreen_hdmi_update_ACR(struct drm_encoder *encoder, uint32_t cloc
}
/*
- * calculate the crc for a given info frame
- */
-static void evergreen_hdmi_infoframe_checksum(uint8_t packetType,
- uint8_t versionNumber,
- uint8_t length,
- uint8_t *frame)
-{
- int i;
- frame[0] = packetType + versionNumber + length;
- for (i = 1; i <= length; i++)
- frame[0] += frame[i];
- frame[0] = 0x100 - frame[0];
-}
-
-/*
* build a HDMI Video Info Frame
*/
-static void evergreen_hdmi_videoinfoframe(
- struct drm_encoder *encoder,
- uint8_t color_format,
- int active_information_present,
- uint8_t active_format_aspect_ratio,
- uint8_t scan_information,
- uint8_t colorimetry,
- uint8_t ex_colorimetry,
- uint8_t quantization,
- int ITC,
- uint8_t picture_aspect_ratio,
- uint8_t video_format_identification,
- uint8_t pixel_repetition,
- uint8_t non_uniform_picture_scaling,
- uint8_t bar_info_data_valid,
- uint16_t top_bar,
- uint16_t bottom_bar,
- uint16_t left_bar,
- uint16_t right_bar
-)
+static void evergreen_hdmi_update_avi_infoframe(struct drm_encoder *encoder,
+ void *buffer, size_t size)
{
struct drm_device *dev = encoder->dev;
struct radeon_device *rdev = dev->dev_private;
struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv;
uint32_t offset = dig->afmt->offset;
+ uint8_t *frame = buffer + 3;
- uint8_t frame[14];
-
- frame[0x0] = 0;
- frame[0x1] =
- (scan_information & 0x3) |
- ((bar_info_data_valid & 0x3) << 2) |
- ((active_information_present & 0x1) << 4) |
- ((color_format & 0x3) << 5);
- frame[0x2] =
- (active_format_aspect_ratio & 0xF) |
- ((picture_aspect_ratio & 0x3) << 4) |
- ((colorimetry & 0x3) << 6);
- frame[0x3] =
- (non_uniform_picture_scaling & 0x3) |
- ((quantization & 0x3) << 2) |
- ((ex_colorimetry & 0x7) << 4) |
- ((ITC & 0x1) << 7);
- frame[0x4] = (video_format_identification & 0x7F);
- frame[0x5] = (pixel_repetition & 0xF);
- frame[0x6] = (top_bar & 0xFF);
- frame[0x7] = (top_bar >> 8);
- frame[0x8] = (bottom_bar & 0xFF);
- frame[0x9] = (bottom_bar >> 8);
- frame[0xA] = (left_bar & 0xFF);
- frame[0xB] = (left_bar >> 8);
- frame[0xC] = (right_bar & 0xFF);
- frame[0xD] = (right_bar >> 8);
-
- evergreen_hdmi_infoframe_checksum(0x82, 0x02, 0x0D, frame);
/* Our header values (type, version, length) should be alright, Intel
* is using the same. Checksum function also seems to be OK, it works
* fine for audio infoframe. However calculated value is always lower
@@ -154,7 +94,10 @@ void evergreen_hdmi_setmode(struct drm_encoder *encoder, struct drm_display_mode
struct radeon_device *rdev = dev->dev_private;
struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv;
+ u8 buffer[HDMI_INFOFRAME_HEADER_SIZE + HDMI_AVI_INFOFRAME_SIZE];
+ struct hdmi_avi_infoframe frame;
uint32_t offset;
+ ssize_t err;
/* Silent, r600_hdmi_enable will raise WARN for us */
if (!dig->afmt->enabled)
@@ -200,9 +143,19 @@ void evergreen_hdmi_setmode(struct drm_encoder *encoder, struct drm_display_mode
WREG32(HDMI_GC + offset, 0); /* unset HDMI_GC_AVMUTE */
- evergreen_hdmi_videoinfoframe(encoder, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0);
+ err = drm_hdmi_avi_infoframe_from_display_mode(&frame, mode);
+ if (err < 0) {
+ DRM_ERROR("failed to setup AVI infoframe: %zd\n", err);
+ return;
+ }
+
+ err = hdmi_avi_infoframe_pack(&frame, buffer, sizeof(buffer));
+ if (err < 0) {
+ DRM_ERROR("failed to pack AVI infoframe: %zd\n", err);
+ return;
+ }
+ evergreen_hdmi_update_avi_infoframe(encoder, buffer, sizeof(buffer));
evergreen_hdmi_update_ACR(encoder, mode->clock);
/* it's unknown what these bits do excatly, but it's indeed quite useful for debugging */
diff --git a/drivers/gpu/drm/radeon/evergreen_reg.h b/drivers/gpu/drm/radeon/evergreen_reg.h
index 034f4c22e5db..f585be16e2d5 100644
--- a/drivers/gpu/drm/radeon/evergreen_reg.h
+++ b/drivers/gpu/drm/radeon/evergreen_reg.h
@@ -223,6 +223,7 @@
#define EVERGREEN_CRTC_STATUS 0x6e8c
# define EVERGREEN_CRTC_V_BLANK (1 << 0)
#define EVERGREEN_CRTC_STATUS_POSITION 0x6e90
+#define EVERGREEN_CRTC_STATUS_HV_COUNT 0x6ea0
#define EVERGREEN_MASTER_UPDATE_MODE 0x6ef8
#define EVERGREEN_CRTC_UPDATE_LOCK 0x6ed4
diff --git a/drivers/gpu/drm/radeon/evergreend.h b/drivers/gpu/drm/radeon/evergreend.h
index 0bfd0e9e469b..982d25ad9af3 100644
--- a/drivers/gpu/drm/radeon/evergreend.h
+++ b/drivers/gpu/drm/radeon/evergreend.h
@@ -729,6 +729,18 @@
#define WAIT_UNTIL 0x8040
#define SRBM_STATUS 0x0E50
+#define RLC_RQ_PENDING (1 << 3)
+#define GRBM_RQ_PENDING (1 << 5)
+#define VMC_BUSY (1 << 8)
+#define MCB_BUSY (1 << 9)
+#define MCB_NON_DISPLAY_BUSY (1 << 10)
+#define MCC_BUSY (1 << 11)
+#define MCD_BUSY (1 << 12)
+#define SEM_BUSY (1 << 14)
+#define RLC_BUSY (1 << 15)
+#define IH_BUSY (1 << 17)
+#define SRBM_STATUS2 0x0EC4
+#define DMA_BUSY (1 << 5)
#define SRBM_SOFT_RESET 0x0E60
#define SRBM_SOFT_RESET_ALL_MASK 0x00FEEFA6
#define SOFT_RESET_BIF (1 << 1)
@@ -924,20 +936,23 @@
#define CAYMAN_DMA1_CNTL 0xd82c
/* async DMA packets */
-#define DMA_PACKET(cmd, t, s, n) ((((cmd) & 0xF) << 28) | \
- (((t) & 0x1) << 23) | \
- (((s) & 0x1) << 22) | \
- (((n) & 0xFFFFF) << 0))
+#define DMA_PACKET(cmd, sub_cmd, n) ((((cmd) & 0xF) << 28) | \
+ (((sub_cmd) & 0xFF) << 20) |\
+ (((n) & 0xFFFFF) << 0))
+#define GET_DMA_CMD(h) (((h) & 0xf0000000) >> 28)
+#define GET_DMA_COUNT(h) ((h) & 0x000fffff)
+#define GET_DMA_SUB_CMD(h) (((h) & 0x0ff00000) >> 20)
+
/* async DMA Packet types */
-#define DMA_PACKET_WRITE 0x2
-#define DMA_PACKET_COPY 0x3
-#define DMA_PACKET_INDIRECT_BUFFER 0x4
-#define DMA_PACKET_SEMAPHORE 0x5
-#define DMA_PACKET_FENCE 0x6
-#define DMA_PACKET_TRAP 0x7
-#define DMA_PACKET_SRBM_WRITE 0x9
-#define DMA_PACKET_CONSTANT_FILL 0xd
-#define DMA_PACKET_NOP 0xf
+#define DMA_PACKET_WRITE 0x2
+#define DMA_PACKET_COPY 0x3
+#define DMA_PACKET_INDIRECT_BUFFER 0x4
+#define DMA_PACKET_SEMAPHORE 0x5
+#define DMA_PACKET_FENCE 0x6
+#define DMA_PACKET_TRAP 0x7
+#define DMA_PACKET_SRBM_WRITE 0x9
+#define DMA_PACKET_CONSTANT_FILL 0xd
+#define DMA_PACKET_NOP 0xf
/* PCIE link stuff */
#define PCIE_LC_TRAINING_CNTL 0xa1 /* PCIE_P */
@@ -980,16 +995,7 @@
/*
* PM4
*/
-#define PACKET_TYPE0 0
-#define PACKET_TYPE1 1
-#define PACKET_TYPE2 2
-#define PACKET_TYPE3 3
-
-#define CP_PACKET_GET_TYPE(h) (((h) >> 30) & 3)
-#define CP_PACKET_GET_COUNT(h) (((h) >> 16) & 0x3FFF)
-#define CP_PACKET0_GET_REG(h) (((h) & 0xFFFF) << 2)
-#define CP_PACKET3_GET_OPCODE(h) (((h) >> 8) & 0xFF)
-#define PACKET0(reg, n) ((PACKET_TYPE0 << 30) | \
+#define PACKET0(reg, n) ((RADEON_PACKET_TYPE0 << 30) | \
(((reg) >> 2) & 0xFFFF) | \
((n) & 0x3FFF) << 16)
#define CP_PACKET2 0x80000000
@@ -998,7 +1004,7 @@
#define PACKET2(v) (CP_PACKET2 | REG_SET(PACKET2_PAD, (v)))
-#define PACKET3(op, n) ((PACKET_TYPE3 << 30) | \
+#define PACKET3(op, n) ((RADEON_PACKET_TYPE3 << 30) | \
(((op) & 0xFF) << 8) | \
((n) & 0x3FFF) << 16)
diff --git a/drivers/gpu/drm/radeon/ni.c b/drivers/gpu/drm/radeon/ni.c
index 835992d8d067..7cead763be9e 100644
--- a/drivers/gpu/drm/radeon/ni.c
+++ b/drivers/gpu/drm/radeon/ni.c
@@ -34,6 +34,8 @@
#include "ni_reg.h"
#include "cayman_blit_shaders.h"
+extern bool evergreen_is_display_hung(struct radeon_device *rdev);
+extern void evergreen_print_gpu_status_regs(struct radeon_device *rdev);
extern void evergreen_mc_stop(struct radeon_device *rdev, struct evergreen_mc_save *save);
extern void evergreen_mc_resume(struct radeon_device *rdev, struct evergreen_mc_save *save);
extern int evergreen_mc_wait_for_idle(struct radeon_device *rdev);
@@ -1310,120 +1312,90 @@ void cayman_dma_fini(struct radeon_device *rdev)
radeon_ring_fini(rdev, &rdev->ring[CAYMAN_RING_TYPE_DMA1_INDEX]);
}
-static void cayman_gpu_soft_reset_gfx(struct radeon_device *rdev)
+static u32 cayman_gpu_check_soft_reset(struct radeon_device *rdev)
{
- u32 grbm_reset = 0;
+ u32 reset_mask = 0;
+ u32 tmp;
- if (!(RREG32(GRBM_STATUS) & GUI_ACTIVE))
- return;
+ /* GRBM_STATUS */
+ tmp = RREG32(GRBM_STATUS);
+ if (tmp & (PA_BUSY | SC_BUSY |
+ SH_BUSY | SX_BUSY |
+ TA_BUSY | VGT_BUSY |
+ DB_BUSY | CB_BUSY |
+ GDS_BUSY | SPI_BUSY |
+ IA_BUSY | IA_BUSY_NO_DMA))
+ reset_mask |= RADEON_RESET_GFX;
- dev_info(rdev->dev, " GRBM_STATUS = 0x%08X\n",
- RREG32(GRBM_STATUS));
- dev_info(rdev->dev, " GRBM_STATUS_SE0 = 0x%08X\n",
- RREG32(GRBM_STATUS_SE0));
- dev_info(rdev->dev, " GRBM_STATUS_SE1 = 0x%08X\n",
- RREG32(GRBM_STATUS_SE1));
- dev_info(rdev->dev, " SRBM_STATUS = 0x%08X\n",
- RREG32(SRBM_STATUS));
- dev_info(rdev->dev, " R_008674_CP_STALLED_STAT1 = 0x%08X\n",
- RREG32(CP_STALLED_STAT1));
- dev_info(rdev->dev, " R_008678_CP_STALLED_STAT2 = 0x%08X\n",
- RREG32(CP_STALLED_STAT2));
- dev_info(rdev->dev, " R_00867C_CP_BUSY_STAT = 0x%08X\n",
- RREG32(CP_BUSY_STAT));
- dev_info(rdev->dev, " R_008680_CP_STAT = 0x%08X\n",
- RREG32(CP_STAT));
+ if (tmp & (CF_RQ_PENDING | PF_RQ_PENDING |
+ CP_BUSY | CP_COHERENCY_BUSY))
+ reset_mask |= RADEON_RESET_CP;
- /* Disable CP parsing/prefetching */
- WREG32(CP_ME_CNTL, CP_ME_HALT | CP_PFP_HALT);
+ if (tmp & GRBM_EE_BUSY)
+ reset_mask |= RADEON_RESET_GRBM | RADEON_RESET_GFX | RADEON_RESET_CP;
- /* reset all the gfx blocks */
- grbm_reset = (SOFT_RESET_CP |
- SOFT_RESET_CB |
- SOFT_RESET_DB |
- SOFT_RESET_GDS |
- SOFT_RESET_PA |
- SOFT_RESET_SC |
- SOFT_RESET_SPI |
- SOFT_RESET_SH |
- SOFT_RESET_SX |
- SOFT_RESET_TC |
- SOFT_RESET_TA |
- SOFT_RESET_VGT |
- SOFT_RESET_IA);
-
- dev_info(rdev->dev, " GRBM_SOFT_RESET=0x%08X\n", grbm_reset);
- WREG32(GRBM_SOFT_RESET, grbm_reset);
- (void)RREG32(GRBM_SOFT_RESET);
- udelay(50);
- WREG32(GRBM_SOFT_RESET, 0);
- (void)RREG32(GRBM_SOFT_RESET);
-
- dev_info(rdev->dev, " GRBM_STATUS = 0x%08X\n",
- RREG32(GRBM_STATUS));
- dev_info(rdev->dev, " GRBM_STATUS_SE0 = 0x%08X\n",
- RREG32(GRBM_STATUS_SE0));
- dev_info(rdev->dev, " GRBM_STATUS_SE1 = 0x%08X\n",
- RREG32(GRBM_STATUS_SE1));
- dev_info(rdev->dev, " SRBM_STATUS = 0x%08X\n",
- RREG32(SRBM_STATUS));
- dev_info(rdev->dev, " R_008674_CP_STALLED_STAT1 = 0x%08X\n",
- RREG32(CP_STALLED_STAT1));
- dev_info(rdev->dev, " R_008678_CP_STALLED_STAT2 = 0x%08X\n",
- RREG32(CP_STALLED_STAT2));
- dev_info(rdev->dev, " R_00867C_CP_BUSY_STAT = 0x%08X\n",
- RREG32(CP_BUSY_STAT));
- dev_info(rdev->dev, " R_008680_CP_STAT = 0x%08X\n",
- RREG32(CP_STAT));
+ /* DMA_STATUS_REG 0 */
+ tmp = RREG32(DMA_STATUS_REG + DMA0_REGISTER_OFFSET);
+ if (!(tmp & DMA_IDLE))
+ reset_mask |= RADEON_RESET_DMA;
-}
+ /* DMA_STATUS_REG 1 */
+ tmp = RREG32(DMA_STATUS_REG + DMA1_REGISTER_OFFSET);
+ if (!(tmp & DMA_IDLE))
+ reset_mask |= RADEON_RESET_DMA1;
-static void cayman_gpu_soft_reset_dma(struct radeon_device *rdev)
-{
- u32 tmp;
+ /* SRBM_STATUS2 */
+ tmp = RREG32(SRBM_STATUS2);
+ if (tmp & DMA_BUSY)
+ reset_mask |= RADEON_RESET_DMA;
- if (RREG32(DMA_STATUS_REG) & DMA_IDLE)
- return;
+ if (tmp & DMA1_BUSY)
+ reset_mask |= RADEON_RESET_DMA1;
- dev_info(rdev->dev, " R_00D034_DMA_STATUS_REG = 0x%08X\n",
- RREG32(DMA_STATUS_REG));
+ /* SRBM_STATUS */
+ tmp = RREG32(SRBM_STATUS);
+ if (tmp & (RLC_RQ_PENDING | RLC_BUSY))
+ reset_mask |= RADEON_RESET_RLC;
- /* dma0 */
- tmp = RREG32(DMA_RB_CNTL + DMA0_REGISTER_OFFSET);
- tmp &= ~DMA_RB_ENABLE;
- WREG32(DMA_RB_CNTL + DMA0_REGISTER_OFFSET, tmp);
+ if (tmp & IH_BUSY)
+ reset_mask |= RADEON_RESET_IH;
- /* dma1 */
- tmp = RREG32(DMA_RB_CNTL + DMA1_REGISTER_OFFSET);
- tmp &= ~DMA_RB_ENABLE;
- WREG32(DMA_RB_CNTL + DMA1_REGISTER_OFFSET, tmp);
+ if (tmp & SEM_BUSY)
+ reset_mask |= RADEON_RESET_SEM;
- /* Reset dma */
- WREG32(SRBM_SOFT_RESET, SOFT_RESET_DMA | SOFT_RESET_DMA1);
- RREG32(SRBM_SOFT_RESET);
- udelay(50);
- WREG32(SRBM_SOFT_RESET, 0);
+ if (tmp & GRBM_RQ_PENDING)
+ reset_mask |= RADEON_RESET_GRBM;
+
+ if (tmp & VMC_BUSY)
+ reset_mask |= RADEON_RESET_VMC;
+
+ if (tmp & (MCB_BUSY | MCB_NON_DISPLAY_BUSY |
+ MCC_BUSY | MCD_BUSY))
+ reset_mask |= RADEON_RESET_MC;
- dev_info(rdev->dev, " R_00D034_DMA_STATUS_REG = 0x%08X\n",
- RREG32(DMA_STATUS_REG));
+ if (evergreen_is_display_hung(rdev))
+ reset_mask |= RADEON_RESET_DISPLAY;
+ /* VM_L2_STATUS */
+ tmp = RREG32(VM_L2_STATUS);
+ if (tmp & L2_BUSY)
+ reset_mask |= RADEON_RESET_VMC;
+
+ return reset_mask;
}
-static int cayman_gpu_soft_reset(struct radeon_device *rdev, u32 reset_mask)
+static void cayman_gpu_soft_reset(struct radeon_device *rdev, u32 reset_mask)
{
struct evergreen_mc_save save;
-
- if (!(RREG32(GRBM_STATUS) & GUI_ACTIVE))
- reset_mask &= ~(RADEON_RESET_GFX | RADEON_RESET_COMPUTE);
-
- if (RREG32(DMA_STATUS_REG) & DMA_IDLE)
- reset_mask &= ~RADEON_RESET_DMA;
+ u32 grbm_soft_reset = 0, srbm_soft_reset = 0;
+ u32 tmp;
if (reset_mask == 0)
- return 0;
+ return;
dev_info(rdev->dev, "GPU softreset: 0x%08X\n", reset_mask);
+ evergreen_print_gpu_status_regs(rdev);
dev_info(rdev->dev, " VM_CONTEXT0_PROTECTION_FAULT_ADDR 0x%08X\n",
RREG32(0x14F8));
dev_info(rdev->dev, " VM_CONTEXT0_PROTECTION_FAULT_STATUS 0x%08X\n",
@@ -1433,29 +1405,158 @@ static int cayman_gpu_soft_reset(struct radeon_device *rdev, u32 reset_mask)
dev_info(rdev->dev, " VM_CONTEXT1_PROTECTION_FAULT_STATUS 0x%08X\n",
RREG32(0x14DC));
+ /* Disable CP parsing/prefetching */
+ WREG32(CP_ME_CNTL, CP_ME_HALT | CP_PFP_HALT);
+
+ if (reset_mask & RADEON_RESET_DMA) {
+ /* dma0 */
+ tmp = RREG32(DMA_RB_CNTL + DMA0_REGISTER_OFFSET);
+ tmp &= ~DMA_RB_ENABLE;
+ WREG32(DMA_RB_CNTL + DMA0_REGISTER_OFFSET, tmp);
+ }
+
+ if (reset_mask & RADEON_RESET_DMA1) {
+ /* dma1 */
+ tmp = RREG32(DMA_RB_CNTL + DMA1_REGISTER_OFFSET);
+ tmp &= ~DMA_RB_ENABLE;
+ WREG32(DMA_RB_CNTL + DMA1_REGISTER_OFFSET, tmp);
+ }
+
+ udelay(50);
+
evergreen_mc_stop(rdev, &save);
if (evergreen_mc_wait_for_idle(rdev)) {
dev_warn(rdev->dev, "Wait for MC idle timedout !\n");
}
- if (reset_mask & (RADEON_RESET_GFX | RADEON_RESET_COMPUTE))
- cayman_gpu_soft_reset_gfx(rdev);
+ if (reset_mask & (RADEON_RESET_GFX | RADEON_RESET_COMPUTE)) {
+ grbm_soft_reset = SOFT_RESET_CB |
+ SOFT_RESET_DB |
+ SOFT_RESET_GDS |
+ SOFT_RESET_PA |
+ SOFT_RESET_SC |
+ SOFT_RESET_SPI |
+ SOFT_RESET_SH |
+ SOFT_RESET_SX |
+ SOFT_RESET_TC |
+ SOFT_RESET_TA |
+ SOFT_RESET_VGT |
+ SOFT_RESET_IA;
+ }
+
+ if (reset_mask & RADEON_RESET_CP) {
+ grbm_soft_reset |= SOFT_RESET_CP | SOFT_RESET_VGT;
+
+ srbm_soft_reset |= SOFT_RESET_GRBM;
+ }
if (reset_mask & RADEON_RESET_DMA)
- cayman_gpu_soft_reset_dma(rdev);
+ srbm_soft_reset |= SOFT_RESET_DMA;
+
+ if (reset_mask & RADEON_RESET_DMA1)
+ srbm_soft_reset |= SOFT_RESET_DMA1;
+
+ if (reset_mask & RADEON_RESET_DISPLAY)
+ srbm_soft_reset |= SOFT_RESET_DC;
+
+ if (reset_mask & RADEON_RESET_RLC)
+ srbm_soft_reset |= SOFT_RESET_RLC;
+
+ if (reset_mask & RADEON_RESET_SEM)
+ srbm_soft_reset |= SOFT_RESET_SEM;
+
+ if (reset_mask & RADEON_RESET_IH)
+ srbm_soft_reset |= SOFT_RESET_IH;
+
+ if (reset_mask & RADEON_RESET_GRBM)
+ srbm_soft_reset |= SOFT_RESET_GRBM;
+
+ if (reset_mask & RADEON_RESET_VMC)
+ srbm_soft_reset |= SOFT_RESET_VMC;
+
+ if (!(rdev->flags & RADEON_IS_IGP)) {
+ if (reset_mask & RADEON_RESET_MC)
+ srbm_soft_reset |= SOFT_RESET_MC;
+ }
+
+ if (grbm_soft_reset) {
+ tmp = RREG32(GRBM_SOFT_RESET);
+ tmp |= grbm_soft_reset;
+ dev_info(rdev->dev, "GRBM_SOFT_RESET=0x%08X\n", tmp);
+ WREG32(GRBM_SOFT_RESET, tmp);
+ tmp = RREG32(GRBM_SOFT_RESET);
+
+ udelay(50);
+
+ tmp &= ~grbm_soft_reset;
+ WREG32(GRBM_SOFT_RESET, tmp);
+ tmp = RREG32(GRBM_SOFT_RESET);
+ }
+
+ if (srbm_soft_reset) {
+ tmp = RREG32(SRBM_SOFT_RESET);
+ tmp |= srbm_soft_reset;
+ dev_info(rdev->dev, "SRBM_SOFT_RESET=0x%08X\n", tmp);
+ WREG32(SRBM_SOFT_RESET, tmp);
+ tmp = RREG32(SRBM_SOFT_RESET);
+
+ udelay(50);
+
+ tmp &= ~srbm_soft_reset;
+ WREG32(SRBM_SOFT_RESET, tmp);
+ tmp = RREG32(SRBM_SOFT_RESET);
+ }
/* Wait a little for things to settle down */
udelay(50);
evergreen_mc_resume(rdev, &save);
- return 0;
+ udelay(50);
+
+ evergreen_print_gpu_status_regs(rdev);
}
int cayman_asic_reset(struct radeon_device *rdev)
{
- return cayman_gpu_soft_reset(rdev, (RADEON_RESET_GFX |
- RADEON_RESET_COMPUTE |
- RADEON_RESET_DMA));
+ u32 reset_mask;
+
+ reset_mask = cayman_gpu_check_soft_reset(rdev);
+
+ if (reset_mask)
+ r600_set_bios_scratch_engine_hung(rdev, true);
+
+ cayman_gpu_soft_reset(rdev, reset_mask);
+
+ reset_mask = cayman_gpu_check_soft_reset(rdev);
+
+ if (!reset_mask)
+ r600_set_bios_scratch_engine_hung(rdev, false);
+
+ return 0;
+}
+
+/**
+ * cayman_gfx_is_lockup - Check if the GFX engine is locked up
+ *
+ * @rdev: radeon_device pointer
+ * @ring: radeon_ring structure holding ring information
+ *
+ * Check if the GFX engine is locked up.
+ * Returns true if the engine appears to be locked up, false if not.
+ */
+bool cayman_gfx_is_lockup(struct radeon_device *rdev, struct radeon_ring *ring)
+{
+ u32 reset_mask = cayman_gpu_check_soft_reset(rdev);
+
+ if (!(reset_mask & (RADEON_RESET_GFX |
+ RADEON_RESET_COMPUTE |
+ RADEON_RESET_CP))) {
+ radeon_ring_lockup_update(ring);
+ return false;
+ }
+ /* force CP activities */
+ radeon_ring_force_activity(rdev, ring);
+ return radeon_ring_test_lockup(rdev, ring);
}
/**
@@ -1464,18 +1565,20 @@ int cayman_asic_reset(struct radeon_device *rdev)
* @rdev: radeon_device pointer
* @ring: radeon_ring structure holding ring information
*
- * Check if the async DMA engine is locked up (cayman-SI).
+ * Check if the async DMA engine is locked up.
* Returns true if the engine appears to be locked up, false if not.
*/
bool cayman_dma_is_lockup(struct radeon_device *rdev, struct radeon_ring *ring)
{
- u32 dma_status_reg;
+ u32 reset_mask = cayman_gpu_check_soft_reset(rdev);
+ u32 mask;
if (ring->idx == R600_RING_TYPE_DMA_INDEX)
- dma_status_reg = RREG32(DMA_STATUS_REG + DMA0_REGISTER_OFFSET);
+ mask = RADEON_RESET_DMA;
else
- dma_status_reg = RREG32(DMA_STATUS_REG + DMA1_REGISTER_OFFSET);
- if (dma_status_reg & DMA_IDLE) {
+ mask = RADEON_RESET_DMA1;
+
+ if (!(reset_mask & mask)) {
radeon_ring_lockup_update(ring);
return false;
}
@@ -1843,19 +1946,21 @@ uint32_t cayman_vm_page_flags(struct radeon_device *rdev, uint32_t flags)
* cayman_vm_set_page - update the page tables using the CP
*
* @rdev: radeon_device pointer
+ * @ib: indirect buffer to fill with commands
* @pe: addr of the page entry
* @addr: dst addr to write into pe
* @count: number of page entries to update
* @incr: increase next addr by incr bytes
* @flags: access flags
*
- * Update the page tables using the CP (cayman-si).
+ * Update the page tables using the CP (cayman/TN).
*/
-void cayman_vm_set_page(struct radeon_device *rdev, uint64_t pe,
+void cayman_vm_set_page(struct radeon_device *rdev,
+ struct radeon_ib *ib,
+ uint64_t pe,
uint64_t addr, unsigned count,
uint32_t incr, uint32_t flags)
{
- struct radeon_ring *ring = &rdev->ring[rdev->asic->vm.pt_ring_index];
uint32_t r600_flags = cayman_vm_page_flags(rdev, flags);
uint64_t value;
unsigned ndw;
@@ -1866,9 +1971,9 @@ void cayman_vm_set_page(struct radeon_device *rdev, uint64_t pe,
if (ndw > 0x3FFF)
ndw = 0x3FFF;
- radeon_ring_write(ring, PACKET3(PACKET3_ME_WRITE, ndw));
- radeon_ring_write(ring, pe);
- radeon_ring_write(ring, upper_32_bits(pe) & 0xff);
+ ib->ptr[ib->length_dw++] = PACKET3(PACKET3_ME_WRITE, ndw);
+ ib->ptr[ib->length_dw++] = pe;
+ ib->ptr[ib->length_dw++] = upper_32_bits(pe) & 0xff;
for (; ndw > 1; ndw -= 2, --count, pe += 8) {
if (flags & RADEON_VM_PAGE_SYSTEM) {
value = radeon_vm_map_gart(rdev, addr);
@@ -1880,8 +1985,8 @@ void cayman_vm_set_page(struct radeon_device *rdev, uint64_t pe,
}
addr += incr;
value |= r600_flags;
- radeon_ring_write(ring, value);
- radeon_ring_write(ring, upper_32_bits(value));
+ ib->ptr[ib->length_dw++] = value;
+ ib->ptr[ib->length_dw++] = upper_32_bits(value);
}
}
} else {
@@ -1891,9 +1996,9 @@ void cayman_vm_set_page(struct radeon_device *rdev, uint64_t pe,
ndw = 0xFFFFE;
/* for non-physically contiguous pages (system) */
- radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_WRITE, 0, 0, ndw));
- radeon_ring_write(ring, pe);
- radeon_ring_write(ring, upper_32_bits(pe) & 0xff);
+ ib->ptr[ib->length_dw++] = DMA_PACKET(DMA_PACKET_WRITE, 0, 0, ndw);
+ ib->ptr[ib->length_dw++] = pe;
+ ib->ptr[ib->length_dw++] = upper_32_bits(pe) & 0xff;
for (; ndw > 0; ndw -= 2, --count, pe += 8) {
if (flags & RADEON_VM_PAGE_SYSTEM) {
value = radeon_vm_map_gart(rdev, addr);
@@ -1905,10 +2010,12 @@ void cayman_vm_set_page(struct radeon_device *rdev, uint64_t pe,
}
addr += incr;
value |= r600_flags;
- radeon_ring_write(ring, value);
- radeon_ring_write(ring, upper_32_bits(value));
+ ib->ptr[ib->length_dw++] = value;
+ ib->ptr[ib->length_dw++] = upper_32_bits(value);
}
}
+ while (ib->length_dw & 0x7)
+ ib->ptr[ib->length_dw++] = DMA_PACKET(DMA_PACKET_NOP, 0, 0, 0);
}
}
diff --git a/drivers/gpu/drm/radeon/nid.h b/drivers/gpu/drm/radeon/nid.h
index 48e5022ee921..079dee202a9e 100644
--- a/drivers/gpu/drm/radeon/nid.h
+++ b/drivers/gpu/drm/radeon/nid.h
@@ -49,6 +49,16 @@
#define RINGID(x) (((x) & 0x3) << 0)
#define VMID(x) (((x) & 0x7) << 0)
#define SRBM_STATUS 0x0E50
+#define RLC_RQ_PENDING (1 << 3)
+#define GRBM_RQ_PENDING (1 << 5)
+#define VMC_BUSY (1 << 8)
+#define MCB_BUSY (1 << 9)
+#define MCB_NON_DISPLAY_BUSY (1 << 10)
+#define MCC_BUSY (1 << 11)
+#define MCD_BUSY (1 << 12)
+#define SEM_BUSY (1 << 14)
+#define RLC_BUSY (1 << 15)
+#define IH_BUSY (1 << 17)
#define SRBM_SOFT_RESET 0x0E60
#define SOFT_RESET_BIF (1 << 1)
@@ -68,6 +78,10 @@
#define SOFT_RESET_REGBB (1 << 22)
#define SOFT_RESET_ORB (1 << 23)
+#define SRBM_STATUS2 0x0EC4
+#define DMA_BUSY (1 << 5)
+#define DMA1_BUSY (1 << 6)
+
#define VM_CONTEXT0_REQUEST_RESPONSE 0x1470
#define REQUEST_TYPE(x) (((x) & 0xf) << 0)
#define RESPONSE_TYPE_MASK 0x000000F0
@@ -474,16 +488,7 @@
/*
* PM4
*/
-#define PACKET_TYPE0 0
-#define PACKET_TYPE1 1
-#define PACKET_TYPE2 2
-#define PACKET_TYPE3 3
-
-#define CP_PACKET_GET_TYPE(h) (((h) >> 30) & 3)
-#define CP_PACKET_GET_COUNT(h) (((h) >> 16) & 0x3FFF)
-#define CP_PACKET0_GET_REG(h) (((h) & 0xFFFF) << 2)
-#define CP_PACKET3_GET_OPCODE(h) (((h) >> 8) & 0xFF)
-#define PACKET0(reg, n) ((PACKET_TYPE0 << 30) | \
+#define PACKET0(reg, n) ((RADEON_PACKET_TYPE0 << 30) | \
(((reg) >> 2) & 0xFFFF) | \
((n) & 0x3FFF) << 16)
#define CP_PACKET2 0x80000000
@@ -492,7 +497,7 @@
#define PACKET2(v) (CP_PACKET2 | REG_SET(PACKET2_PAD, (v)))
-#define PACKET3(op, n) ((PACKET_TYPE3 << 30) | \
+#define PACKET3(op, n) ((RADEON_PACKET_TYPE3 << 30) | \
(((op) & 0xFF) << 8) | \
((n) & 0x3FFF) << 16)
diff --git a/drivers/gpu/drm/radeon/r100.c b/drivers/gpu/drm/radeon/r100.c
index 8ff7cac222dc..9db58530be37 100644
--- a/drivers/gpu/drm/radeon/r100.c
+++ b/drivers/gpu/drm/radeon/r100.c
@@ -1215,11 +1215,11 @@ int r100_reloc_pitch_offset(struct radeon_cs_parser *p,
struct radeon_cs_reloc *reloc;
u32 value;
- r = r100_cs_packet_next_reloc(p, &reloc);
+ r = radeon_cs_packet_next_reloc(p, &reloc, 0);
if (r) {
DRM_ERROR("No reloc for ib[%d]=0x%04X\n",
idx, reg);
- r100_cs_dump_packet(p, pkt);
+ radeon_cs_dump_packet(p, pkt);
return r;
}
@@ -1233,7 +1233,7 @@ int r100_reloc_pitch_offset(struct radeon_cs_parser *p,
if (reloc->lobj.tiling_flags & RADEON_TILING_MICRO) {
if (reg == RADEON_SRC_PITCH_OFFSET) {
DRM_ERROR("Cannot src blit from microtiled surface\n");
- r100_cs_dump_packet(p, pkt);
+ radeon_cs_dump_packet(p, pkt);
return -EINVAL;
}
tile_flags |= RADEON_DST_TILE_MICRO;
@@ -1263,16 +1263,16 @@ int r100_packet3_load_vbpntr(struct radeon_cs_parser *p,
if (c > 16) {
DRM_ERROR("Only 16 vertex buffers are allowed %d\n",
pkt->opcode);
- r100_cs_dump_packet(p, pkt);
+ radeon_cs_dump_packet(p, pkt);
return -EINVAL;
}
track->num_arrays = c;
for (i = 0; i < (c - 1); i+=2, idx+=3) {
- r = r100_cs_packet_next_reloc(p, &reloc);
+ r = radeon_cs_packet_next_reloc(p, &reloc, 0);
if (r) {
DRM_ERROR("No reloc for packet3 %d\n",
pkt->opcode);
- r100_cs_dump_packet(p, pkt);
+ radeon_cs_dump_packet(p, pkt);
return r;
}
idx_value = radeon_get_ib_value(p, idx);
@@ -1281,11 +1281,11 @@ int r100_packet3_load_vbpntr(struct radeon_cs_parser *p,
track->arrays[i + 0].esize = idx_value >> 8;
track->arrays[i + 0].robj = reloc->robj;
track->arrays[i + 0].esize &= 0x7F;
- r = r100_cs_packet_next_reloc(p, &reloc);
+ r = radeon_cs_packet_next_reloc(p, &reloc, 0);
if (r) {
DRM_ERROR("No reloc for packet3 %d\n",
pkt->opcode);
- r100_cs_dump_packet(p, pkt);
+ radeon_cs_dump_packet(p, pkt);
return r;
}
ib[idx+2] = radeon_get_ib_value(p, idx + 2) + ((u32)reloc->lobj.gpu_offset);
@@ -1294,11 +1294,11 @@ int r100_packet3_load_vbpntr(struct radeon_cs_parser *p,
track->arrays[i + 1].esize &= 0x7F;
}
if (c & 1) {
- r = r100_cs_packet_next_reloc(p, &reloc);
+ r = radeon_cs_packet_next_reloc(p, &reloc, 0);
if (r) {
DRM_ERROR("No reloc for packet3 %d\n",
pkt->opcode);
- r100_cs_dump_packet(p, pkt);
+ radeon_cs_dump_packet(p, pkt);
return r;
}
idx_value = radeon_get_ib_value(p, idx);
@@ -1355,67 +1355,6 @@ int r100_cs_parse_packet0(struct radeon_cs_parser *p,
return 0;
}
-void r100_cs_dump_packet(struct radeon_cs_parser *p,
- struct radeon_cs_packet *pkt)
-{
- volatile uint32_t *ib;
- unsigned i;
- unsigned idx;
-
- ib = p->ib.ptr;
- idx = pkt->idx;
- for (i = 0; i <= (pkt->count + 1); i++, idx++) {
- DRM_INFO("ib[%d]=0x%08X\n", idx, ib[idx]);
- }
-}
-
-/**
- * r100_cs_packet_parse() - parse cp packet and point ib index to next packet
- * @parser: parser structure holding parsing context.
- * @pkt: where to store packet informations
- *
- * Assume that chunk_ib_index is properly set. Will return -EINVAL
- * if packet is bigger than remaining ib size. or if packets is unknown.
- **/
-int r100_cs_packet_parse(struct radeon_cs_parser *p,
- struct radeon_cs_packet *pkt,
- unsigned idx)
-{
- struct radeon_cs_chunk *ib_chunk = &p->chunks[p->chunk_ib_idx];
- uint32_t header;
-
- if (idx >= ib_chunk->length_dw) {
- DRM_ERROR("Can not parse packet at %d after CS end %d !\n",
- idx, ib_chunk->length_dw);
- return -EINVAL;
- }
- header = radeon_get_ib_value(p, idx);
- pkt->idx = idx;
- pkt->type = CP_PACKET_GET_TYPE(header);
- pkt->count = CP_PACKET_GET_COUNT(header);
- switch (pkt->type) {
- case PACKET_TYPE0:
- pkt->reg = CP_PACKET0_GET_REG(header);
- pkt->one_reg_wr = CP_PACKET0_GET_ONE_REG_WR(header);
- break;
- case PACKET_TYPE3:
- pkt->opcode = CP_PACKET3_GET_OPCODE(header);
- break;
- case PACKET_TYPE2:
- pkt->count = -1;
- break;
- default:
- DRM_ERROR("Unknown packet type %d at %d !\n", pkt->type, idx);
- return -EINVAL;
- }
- if ((pkt->count + 1 + pkt->idx) >= ib_chunk->length_dw) {
- DRM_ERROR("Packet (%d:%d:%d) end after CS buffer (%d) !\n",
- pkt->idx, pkt->type, pkt->count, ib_chunk->length_dw);
- return -EINVAL;
- }
- return 0;
-}
-
/**
* r100_cs_packet_next_vline() - parse userspace VLINE packet
* @parser: parser structure holding parsing context.
@@ -1444,7 +1383,7 @@ int r100_cs_packet_parse_vline(struct radeon_cs_parser *p)
ib = p->ib.ptr;
/* parse the wait until */
- r = r100_cs_packet_parse(p, &waitreloc, p->idx);
+ r = radeon_cs_packet_parse(p, &waitreloc, p->idx);
if (r)
return r;
@@ -1461,7 +1400,7 @@ int r100_cs_packet_parse_vline(struct radeon_cs_parser *p)
}
/* jump over the NOP */
- r = r100_cs_packet_parse(p, &p3reloc, p->idx + waitreloc.count + 2);
+ r = radeon_cs_packet_parse(p, &p3reloc, p->idx + waitreloc.count + 2);
if (r)
return r;
@@ -1471,7 +1410,7 @@ int r100_cs_packet_parse_vline(struct radeon_cs_parser *p)
header = radeon_get_ib_value(p, h_idx);
crtc_id = radeon_get_ib_value(p, h_idx + 5);
- reg = CP_PACKET0_GET_REG(header);
+ reg = R100_CP_PACKET0_GET_REG(header);
obj = drm_mode_object_find(p->rdev->ddev, crtc_id, DRM_MODE_OBJECT_CRTC);
if (!obj) {
DRM_ERROR("cannot find crtc %d\n", crtc_id);
@@ -1506,54 +1445,6 @@ int r100_cs_packet_parse_vline(struct radeon_cs_parser *p)
return 0;
}
-/**
- * r100_cs_packet_next_reloc() - parse next packet which should be reloc packet3
- * @parser: parser structure holding parsing context.
- * @data: pointer to relocation data
- * @offset_start: starting offset
- * @offset_mask: offset mask (to align start offset on)
- * @reloc: reloc informations
- *
- * Check next packet is relocation packet3, do bo validation and compute
- * GPU offset using the provided start.
- **/
-int r100_cs_packet_next_reloc(struct radeon_cs_parser *p,
- struct radeon_cs_reloc **cs_reloc)
-{
- struct radeon_cs_chunk *relocs_chunk;
- struct radeon_cs_packet p3reloc;
- unsigned idx;
- int r;
-
- if (p->chunk_relocs_idx == -1) {
- DRM_ERROR("No relocation chunk !\n");
- return -EINVAL;
- }
- *cs_reloc = NULL;
- relocs_chunk = &p->chunks[p->chunk_relocs_idx];
- r = r100_cs_packet_parse(p, &p3reloc, p->idx);
- if (r) {
- return r;
- }
- p->idx += p3reloc.count + 2;
- if (p3reloc.type != PACKET_TYPE3 || p3reloc.opcode != PACKET3_NOP) {
- DRM_ERROR("No packet3 for relocation for packet at %d.\n",
- p3reloc.idx);
- r100_cs_dump_packet(p, &p3reloc);
- return -EINVAL;
- }
- idx = radeon_get_ib_value(p, p3reloc.idx + 1);
- if (idx >= relocs_chunk->length_dw) {
- DRM_ERROR("Relocs at %d after relocations chunk end %d !\n",
- idx, relocs_chunk->length_dw);
- r100_cs_dump_packet(p, &p3reloc);
- return -EINVAL;
- }
- /* FIXME: we assume reloc size is 4 dwords */
- *cs_reloc = p->relocs_ptr[(idx / 4)];
- return 0;
-}
-
static int r100_get_vtx_size(uint32_t vtx_fmt)
{
int vtx_size;
@@ -1631,7 +1522,7 @@ static int r100_packet0_check(struct radeon_cs_parser *p,
if (r) {
DRM_ERROR("No reloc for ib[%d]=0x%04X\n",
idx, reg);
- r100_cs_dump_packet(p, pkt);
+ radeon_cs_dump_packet(p, pkt);
return r;
}
break;
@@ -1644,11 +1535,11 @@ static int r100_packet0_check(struct radeon_cs_parser *p,
return r;
break;
case RADEON_RB3D_DEPTHOFFSET:
- r = r100_cs_packet_next_reloc(p, &reloc);
+ r = radeon_cs_packet_next_reloc(p, &reloc, 0);
if (r) {
DRM_ERROR("No reloc for ib[%d]=0x%04X\n",
idx, reg);
- r100_cs_dump_packet(p, pkt);
+ radeon_cs_dump_packet(p, pkt);
return r;
}
track->zb.robj = reloc->robj;
@@ -1657,11 +1548,11 @@ static int r100_packet0_check(struct radeon_cs_parser *p,
ib[idx] = idx_value + ((u32)reloc->lobj.gpu_offset);
break;
case RADEON_RB3D_COLOROFFSET:
- r = r100_cs_packet_next_reloc(p, &reloc);
+ r = radeon_cs_packet_next_reloc(p, &reloc, 0);
if (r) {
DRM_ERROR("No reloc for ib[%d]=0x%04X\n",
idx, reg);
- r100_cs_dump_packet(p, pkt);
+ radeon_cs_dump_packet(p, pkt);
return r;
}
track->cb[0].robj = reloc->robj;
@@ -1673,11 +1564,11 @@ static int r100_packet0_check(struct radeon_cs_parser *p,
case RADEON_PP_TXOFFSET_1:
case RADEON_PP_TXOFFSET_2:
i = (reg - RADEON_PP_TXOFFSET_0) / 24;
- r = r100_cs_packet_next_reloc(p, &reloc);
+ r = radeon_cs_packet_next_reloc(p, &reloc, 0);
if (r) {
DRM_ERROR("No reloc for ib[%d]=0x%04X\n",
idx, reg);
- r100_cs_dump_packet(p, pkt);
+ radeon_cs_dump_packet(p, pkt);
return r;
}
if (!(p->cs_flags & RADEON_CS_KEEP_TILING_FLAGS)) {
@@ -1700,11 +1591,11 @@ static int r100_packet0_check(struct radeon_cs_parser *p,
case RADEON_PP_CUBIC_OFFSET_T0_3:
case RADEON_PP_CUBIC_OFFSET_T0_4:
i = (reg - RADEON_PP_CUBIC_OFFSET_T0_0) / 4;
- r = r100_cs_packet_next_reloc(p, &reloc);
+ r = radeon_cs_packet_next_reloc(p, &reloc, 0);
if (r) {
DRM_ERROR("No reloc for ib[%d]=0x%04X\n",
idx, reg);
- r100_cs_dump_packet(p, pkt);
+ radeon_cs_dump_packet(p, pkt);
return r;
}
track->textures[0].cube_info[i].offset = idx_value;
@@ -1718,11 +1609,11 @@ static int r100_packet0_check(struct radeon_cs_parser *p,
case RADEON_PP_CUBIC_OFFSET_T1_3:
case RADEON_PP_CUBIC_OFFSET_T1_4:
i = (reg - RADEON_PP_CUBIC_OFFSET_T1_0) / 4;
- r = r100_cs_packet_next_reloc(p, &reloc);
+ r = radeon_cs_packet_next_reloc(p, &reloc, 0);
if (r) {
DRM_ERROR("No reloc for ib[%d]=0x%04X\n",
idx, reg);
- r100_cs_dump_packet(p, pkt);
+ radeon_cs_dump_packet(p, pkt);
return r;
}
track->textures[1].cube_info[i].offset = idx_value;
@@ -1736,11 +1627,11 @@ static int r100_packet0_check(struct radeon_cs_parser *p,
case RADEON_PP_CUBIC_OFFSET_T2_3:
case RADEON_PP_CUBIC_OFFSET_T2_4:
i = (reg - RADEON_PP_CUBIC_OFFSET_T2_0) / 4;
- r = r100_cs_packet_next_reloc(p, &reloc);
+ r = radeon_cs_packet_next_reloc(p, &reloc, 0);
if (r) {
DRM_ERROR("No reloc for ib[%d]=0x%04X\n",
idx, reg);
- r100_cs_dump_packet(p, pkt);
+ radeon_cs_dump_packet(p, pkt);
return r;
}
track->textures[2].cube_info[i].offset = idx_value;
@@ -1754,11 +1645,11 @@ static int r100_packet0_check(struct radeon_cs_parser *p,
track->zb_dirty = true;
break;
case RADEON_RB3D_COLORPITCH:
- r = r100_cs_packet_next_reloc(p, &reloc);
+ r = radeon_cs_packet_next_reloc(p, &reloc, 0);
if (r) {
DRM_ERROR("No reloc for ib[%d]=0x%04X\n",
idx, reg);
- r100_cs_dump_packet(p, pkt);
+ radeon_cs_dump_packet(p, pkt);
return r;
}
if (!(p->cs_flags & RADEON_CS_KEEP_TILING_FLAGS)) {
@@ -1825,11 +1716,11 @@ static int r100_packet0_check(struct radeon_cs_parser *p,
track->zb_dirty = true;
break;
case RADEON_RB3D_ZPASS_ADDR:
- r = r100_cs_packet_next_reloc(p, &reloc);
+ r = radeon_cs_packet_next_reloc(p, &reloc, 0);
if (r) {
DRM_ERROR("No reloc for ib[%d]=0x%04X\n",
idx, reg);
- r100_cs_dump_packet(p, pkt);
+ radeon_cs_dump_packet(p, pkt);
return r;
}
ib[idx] = idx_value + ((u32)reloc->lobj.gpu_offset);
@@ -1986,10 +1877,10 @@ static int r100_packet3_check(struct radeon_cs_parser *p,
return r;
break;
case PACKET3_INDX_BUFFER:
- r = r100_cs_packet_next_reloc(p, &reloc);
+ r = radeon_cs_packet_next_reloc(p, &reloc, 0);
if (r) {
DRM_ERROR("No reloc for packet3 %d\n", pkt->opcode);
- r100_cs_dump_packet(p, pkt);
+ radeon_cs_dump_packet(p, pkt);
return r;
}
ib[idx+1] = radeon_get_ib_value(p, idx+1) + ((u32)reloc->lobj.gpu_offset);
@@ -2000,10 +1891,10 @@ static int r100_packet3_check(struct radeon_cs_parser *p,
break;
case 0x23:
/* 3D_RNDR_GEN_INDX_PRIM on r100/r200 */
- r = r100_cs_packet_next_reloc(p, &reloc);
+ r = radeon_cs_packet_next_reloc(p, &reloc, 0);
if (r) {
DRM_ERROR("No reloc for packet3 %d\n", pkt->opcode);
- r100_cs_dump_packet(p, pkt);
+ radeon_cs_dump_packet(p, pkt);
return r;
}
ib[idx] = radeon_get_ib_value(p, idx) + ((u32)reloc->lobj.gpu_offset);
@@ -2100,37 +1991,36 @@ int r100_cs_parse(struct radeon_cs_parser *p)
r100_cs_track_clear(p->rdev, track);
p->track = track;
do {
- r = r100_cs_packet_parse(p, &pkt, p->idx);
+ r = radeon_cs_packet_parse(p, &pkt, p->idx);
if (r) {
return r;
}
p->idx += pkt.count + 2;
switch (pkt.type) {
- case PACKET_TYPE0:
- if (p->rdev->family >= CHIP_R200)
- r = r100_cs_parse_packet0(p, &pkt,
- p->rdev->config.r100.reg_safe_bm,
- p->rdev->config.r100.reg_safe_bm_size,
- &r200_packet0_check);
- else
- r = r100_cs_parse_packet0(p, &pkt,
- p->rdev->config.r100.reg_safe_bm,
- p->rdev->config.r100.reg_safe_bm_size,
- &r100_packet0_check);
- break;
- case PACKET_TYPE2:
- break;
- case PACKET_TYPE3:
- r = r100_packet3_check(p, &pkt);
- break;
- default:
- DRM_ERROR("Unknown packet type %d !\n",
- pkt.type);
- return -EINVAL;
+ case RADEON_PACKET_TYPE0:
+ if (p->rdev->family >= CHIP_R200)
+ r = r100_cs_parse_packet0(p, &pkt,
+ p->rdev->config.r100.reg_safe_bm,
+ p->rdev->config.r100.reg_safe_bm_size,
+ &r200_packet0_check);
+ else
+ r = r100_cs_parse_packet0(p, &pkt,
+ p->rdev->config.r100.reg_safe_bm,
+ p->rdev->config.r100.reg_safe_bm_size,
+ &r100_packet0_check);
+ break;
+ case RADEON_PACKET_TYPE2:
+ break;
+ case RADEON_PACKET_TYPE3:
+ r = r100_packet3_check(p, &pkt);
+ break;
+ default:
+ DRM_ERROR("Unknown packet type %d !\n",
+ pkt.type);
+ return -EINVAL;
}
- if (r) {
+ if (r)
return r;
- }
} while (p->idx < p->chunks[p->chunk_ib_idx].length_dw);
return 0;
}
diff --git a/drivers/gpu/drm/radeon/r100_track.h b/drivers/gpu/drm/radeon/r100_track.h
index 6a603b378adb..eb40888bdfcc 100644
--- a/drivers/gpu/drm/radeon/r100_track.h
+++ b/drivers/gpu/drm/radeon/r100_track.h
@@ -81,10 +81,6 @@ struct r100_cs_track {
int r100_cs_track_check(struct radeon_device *rdev, struct r100_cs_track *track);
void r100_cs_track_clear(struct radeon_device *rdev, struct r100_cs_track *track);
-int r100_cs_packet_next_reloc(struct radeon_cs_parser *p,
- struct radeon_cs_reloc **cs_reloc);
-void r100_cs_dump_packet(struct radeon_cs_parser *p,
- struct radeon_cs_packet *pkt);
int r100_cs_packet_parse_vline(struct radeon_cs_parser *p);
diff --git a/drivers/gpu/drm/radeon/r100d.h b/drivers/gpu/drm/radeon/r100d.h
index eab91760fae0..f0f8ee69f480 100644
--- a/drivers/gpu/drm/radeon/r100d.h
+++ b/drivers/gpu/drm/radeon/r100d.h
@@ -64,17 +64,6 @@
REG_SET(PACKET3_IT_OPCODE, (op)) | \
REG_SET(PACKET3_COUNT, (n)))
-#define PACKET_TYPE0 0
-#define PACKET_TYPE1 1
-#define PACKET_TYPE2 2
-#define PACKET_TYPE3 3
-
-#define CP_PACKET_GET_TYPE(h) (((h) >> 30) & 3)
-#define CP_PACKET_GET_COUNT(h) (((h) >> 16) & 0x3FFF)
-#define CP_PACKET0_GET_REG(h) (((h) & 0x1FFF) << 2)
-#define CP_PACKET0_GET_ONE_REG_WR(h) (((h) >> 15) & 1)
-#define CP_PACKET3_GET_OPCODE(h) (((h) >> 8) & 0xFF)
-
/* Registers */
#define R_0000F0_RBBM_SOFT_RESET 0x0000F0
#define S_0000F0_SOFT_RESET_CP(x) (((x) & 0x1) << 0)
diff --git a/drivers/gpu/drm/radeon/r200.c b/drivers/gpu/drm/radeon/r200.c
index 98143a5c5b73..b3807edb1936 100644
--- a/drivers/gpu/drm/radeon/r200.c
+++ b/drivers/gpu/drm/radeon/r200.c
@@ -162,7 +162,7 @@ int r200_packet0_check(struct radeon_cs_parser *p,
if (r) {
DRM_ERROR("No reloc for ib[%d]=0x%04X\n",
idx, reg);
- r100_cs_dump_packet(p, pkt);
+ radeon_cs_dump_packet(p, pkt);
return r;
}
break;
@@ -175,11 +175,11 @@ int r200_packet0_check(struct radeon_cs_parser *p,
return r;
break;
case RADEON_RB3D_DEPTHOFFSET:
- r = r100_cs_packet_next_reloc(p, &reloc);
+ r = radeon_cs_packet_next_reloc(p, &reloc, 0);
if (r) {
DRM_ERROR("No reloc for ib[%d]=0x%04X\n",
idx, reg);
- r100_cs_dump_packet(p, pkt);
+ radeon_cs_dump_packet(p, pkt);
return r;
}
track->zb.robj = reloc->robj;
@@ -188,11 +188,11 @@ int r200_packet0_check(struct radeon_cs_parser *p,
ib[idx] = idx_value + ((u32)reloc->lobj.gpu_offset);
break;
case RADEON_RB3D_COLOROFFSET:
- r = r100_cs_packet_next_reloc(p, &reloc);
+ r = radeon_cs_packet_next_reloc(p, &reloc, 0);
if (r) {
DRM_ERROR("No reloc for ib[%d]=0x%04X\n",
idx, reg);
- r100_cs_dump_packet(p, pkt);
+ radeon_cs_dump_packet(p, pkt);
return r;
}
track->cb[0].robj = reloc->robj;
@@ -207,11 +207,11 @@ int r200_packet0_check(struct radeon_cs_parser *p,
case R200_PP_TXOFFSET_4:
case R200_PP_TXOFFSET_5:
i = (reg - R200_PP_TXOFFSET_0) / 24;
- r = r100_cs_packet_next_reloc(p, &reloc);
+ r = radeon_cs_packet_next_reloc(p, &reloc, 0);
if (r) {
DRM_ERROR("No reloc for ib[%d]=0x%04X\n",
idx, reg);
- r100_cs_dump_packet(p, pkt);
+ radeon_cs_dump_packet(p, pkt);
return r;
}
if (!(p->cs_flags & RADEON_CS_KEEP_TILING_FLAGS)) {
@@ -260,11 +260,11 @@ int r200_packet0_check(struct radeon_cs_parser *p,
case R200_PP_CUBIC_OFFSET_F5_5:
i = (reg - R200_PP_TXOFFSET_0) / 24;
face = (reg - ((i * 24) + R200_PP_TXOFFSET_0)) / 4;
- r = r100_cs_packet_next_reloc(p, &reloc);
+ r = radeon_cs_packet_next_reloc(p, &reloc, 0);
if (r) {
DRM_ERROR("No reloc for ib[%d]=0x%04X\n",
idx, reg);
- r100_cs_dump_packet(p, pkt);
+ radeon_cs_dump_packet(p, pkt);
return r;
}
track->textures[i].cube_info[face - 1].offset = idx_value;
@@ -278,11 +278,11 @@ int r200_packet0_check(struct radeon_cs_parser *p,
track->zb_dirty = true;
break;
case RADEON_RB3D_COLORPITCH:
- r = r100_cs_packet_next_reloc(p, &reloc);
+ r = radeon_cs_packet_next_reloc(p, &reloc, 0);
if (r) {
DRM_ERROR("No reloc for ib[%d]=0x%04X\n",
idx, reg);
- r100_cs_dump_packet(p, pkt);
+ radeon_cs_dump_packet(p, pkt);
return r;
}
@@ -355,11 +355,11 @@ int r200_packet0_check(struct radeon_cs_parser *p,
track->zb_dirty = true;
break;
case RADEON_RB3D_ZPASS_ADDR:
- r = r100_cs_packet_next_reloc(p, &reloc);
+ r = radeon_cs_packet_next_reloc(p, &reloc, 0);
if (r) {
DRM_ERROR("No reloc for ib[%d]=0x%04X\n",
idx, reg);
- r100_cs_dump_packet(p, pkt);
+ radeon_cs_dump_packet(p, pkt);
return r;
}
ib[idx] = idx_value + ((u32)reloc->lobj.gpu_offset);
diff --git a/drivers/gpu/drm/radeon/r300.c b/drivers/gpu/drm/radeon/r300.c
index d0ba6023a1f8..c60350e6872d 100644
--- a/drivers/gpu/drm/radeon/r300.c
+++ b/drivers/gpu/drm/radeon/r300.c
@@ -615,7 +615,7 @@ static int r300_packet0_check(struct radeon_cs_parser *p,
if (r) {
DRM_ERROR("No reloc for ib[%d]=0x%04X\n",
idx, reg);
- r100_cs_dump_packet(p, pkt);
+ radeon_cs_dump_packet(p, pkt);
return r;
}
break;
@@ -630,11 +630,11 @@ static int r300_packet0_check(struct radeon_cs_parser *p,
case R300_RB3D_COLOROFFSET2:
case R300_RB3D_COLOROFFSET3:
i = (reg - R300_RB3D_COLOROFFSET0) >> 2;
- r = r100_cs_packet_next_reloc(p, &reloc);
+ r = radeon_cs_packet_next_reloc(p, &reloc, 0);
if (r) {
DRM_ERROR("No reloc for ib[%d]=0x%04X\n",
idx, reg);
- r100_cs_dump_packet(p, pkt);
+ radeon_cs_dump_packet(p, pkt);
return r;
}
track->cb[i].robj = reloc->robj;
@@ -643,11 +643,11 @@ static int r300_packet0_check(struct radeon_cs_parser *p,
ib[idx] = idx_value + ((u32)reloc->lobj.gpu_offset);
break;
case R300_ZB_DEPTHOFFSET:
- r = r100_cs_packet_next_reloc(p, &reloc);
+ r = radeon_cs_packet_next_reloc(p, &reloc, 0);
if (r) {
DRM_ERROR("No reloc for ib[%d]=0x%04X\n",
idx, reg);
- r100_cs_dump_packet(p, pkt);
+ radeon_cs_dump_packet(p, pkt);
return r;
}
track->zb.robj = reloc->robj;
@@ -672,11 +672,11 @@ static int r300_packet0_check(struct radeon_cs_parser *p,
case R300_TX_OFFSET_0+56:
case R300_TX_OFFSET_0+60:
i = (reg - R300_TX_OFFSET_0) >> 2;
- r = r100_cs_packet_next_reloc(p, &reloc);
+ r = radeon_cs_packet_next_reloc(p, &reloc, 0);
if (r) {
DRM_ERROR("No reloc for ib[%d]=0x%04X\n",
idx, reg);
- r100_cs_dump_packet(p, pkt);
+ radeon_cs_dump_packet(p, pkt);
return r;
}
@@ -745,11 +745,11 @@ static int r300_packet0_check(struct radeon_cs_parser *p,
/* RB3D_COLORPITCH2 */
/* RB3D_COLORPITCH3 */
if (!(p->cs_flags & RADEON_CS_KEEP_TILING_FLAGS)) {
- r = r100_cs_packet_next_reloc(p, &reloc);
+ r = radeon_cs_packet_next_reloc(p, &reloc, 0);
if (r) {
DRM_ERROR("No reloc for ib[%d]=0x%04X\n",
idx, reg);
- r100_cs_dump_packet(p, pkt);
+ radeon_cs_dump_packet(p, pkt);
return r;
}
@@ -830,11 +830,11 @@ static int r300_packet0_check(struct radeon_cs_parser *p,
case 0x4F24:
/* ZB_DEPTHPITCH */
if (!(p->cs_flags & RADEON_CS_KEEP_TILING_FLAGS)) {
- r = r100_cs_packet_next_reloc(p, &reloc);
+ r = radeon_cs_packet_next_reloc(p, &reloc, 0);
if (r) {
DRM_ERROR("No reloc for ib[%d]=0x%04X\n",
idx, reg);
- r100_cs_dump_packet(p, pkt);
+ radeon_cs_dump_packet(p, pkt);
return r;
}
@@ -1045,11 +1045,11 @@ static int r300_packet0_check(struct radeon_cs_parser *p,
track->tex_dirty = true;
break;
case R300_ZB_ZPASS_ADDR:
- r = r100_cs_packet_next_reloc(p, &reloc);
+ r = radeon_cs_packet_next_reloc(p, &reloc, 0);
if (r) {
DRM_ERROR("No reloc for ib[%d]=0x%04X\n",
idx, reg);
- r100_cs_dump_packet(p, pkt);
+ radeon_cs_dump_packet(p, pkt);
return r;
}
ib[idx] = idx_value + ((u32)reloc->lobj.gpu_offset);
@@ -1087,11 +1087,11 @@ static int r300_packet0_check(struct radeon_cs_parser *p,
track->cb_dirty = true;
break;
case R300_RB3D_AARESOLVE_OFFSET:
- r = r100_cs_packet_next_reloc(p, &reloc);
+ r = radeon_cs_packet_next_reloc(p, &reloc, 0);
if (r) {
DRM_ERROR("No reloc for ib[%d]=0x%04X\n",
idx, reg);
- r100_cs_dump_packet(p, pkt);
+ radeon_cs_dump_packet(p, pkt);
return r;
}
track->aa.robj = reloc->robj;
@@ -1156,10 +1156,10 @@ static int r300_packet3_check(struct radeon_cs_parser *p,
return r;
break;
case PACKET3_INDX_BUFFER:
- r = r100_cs_packet_next_reloc(p, &reloc);
+ r = radeon_cs_packet_next_reloc(p, &reloc, 0);
if (r) {
DRM_ERROR("No reloc for packet3 %d\n", pkt->opcode);
- r100_cs_dump_packet(p, pkt);
+ radeon_cs_dump_packet(p, pkt);
return r;
}
ib[idx+1] = radeon_get_ib_value(p, idx + 1) + ((u32)reloc->lobj.gpu_offset);
@@ -1257,21 +1257,21 @@ int r300_cs_parse(struct radeon_cs_parser *p)
r100_cs_track_clear(p->rdev, track);
p->track = track;
do {
- r = r100_cs_packet_parse(p, &pkt, p->idx);
+ r = radeon_cs_packet_parse(p, &pkt, p->idx);
if (r) {
return r;
}
p->idx += pkt.count + 2;
switch (pkt.type) {
- case PACKET_TYPE0:
+ case RADEON_PACKET_TYPE0:
r = r100_cs_parse_packet0(p, &pkt,
p->rdev->config.r300.reg_safe_bm,
p->rdev->config.r300.reg_safe_bm_size,
&r300_packet0_check);
break;
- case PACKET_TYPE2:
+ case RADEON_PACKET_TYPE2:
break;
- case PACKET_TYPE3:
+ case RADEON_PACKET_TYPE3:
r = r300_packet3_check(p, &pkt);
break;
default:
diff --git a/drivers/gpu/drm/radeon/r300_cmdbuf.c b/drivers/gpu/drm/radeon/r300_cmdbuf.c
index 002ab038d2ab..865e2c9980db 100644
--- a/drivers/gpu/drm/radeon/r300_cmdbuf.c
+++ b/drivers/gpu/drm/radeon/r300_cmdbuf.c
@@ -29,6 +29,8 @@
*
* Authors:
* Nicolai Haehnle <prefect_@gmx.net>
+ *
+ * ------------------------ This file is DEPRECATED! -------------------------
*/
#include <drm/drmP.h>
diff --git a/drivers/gpu/drm/radeon/r300d.h b/drivers/gpu/drm/radeon/r300d.h
index 1f519a5ffb8c..ff229a00d273 100644
--- a/drivers/gpu/drm/radeon/r300d.h
+++ b/drivers/gpu/drm/radeon/r300d.h
@@ -65,17 +65,6 @@
REG_SET(PACKET3_IT_OPCODE, (op)) | \
REG_SET(PACKET3_COUNT, (n)))
-#define PACKET_TYPE0 0
-#define PACKET_TYPE1 1
-#define PACKET_TYPE2 2
-#define PACKET_TYPE3 3
-
-#define CP_PACKET_GET_TYPE(h) (((h) >> 30) & 3)
-#define CP_PACKET_GET_COUNT(h) (((h) >> 16) & 0x3FFF)
-#define CP_PACKET0_GET_REG(h) (((h) & 0x1FFF) << 2)
-#define CP_PACKET0_GET_ONE_REG_WR(h) (((h) >> 15) & 1)
-#define CP_PACKET3_GET_OPCODE(h) (((h) >> 8) & 0xFF)
-
/* Registers */
#define R_000148_MC_FB_LOCATION 0x000148
#define S_000148_MC_FB_START(x) (((x) & 0xFFFF) << 0)
diff --git a/drivers/gpu/drm/radeon/r500_reg.h b/drivers/gpu/drm/radeon/r500_reg.h
index ec576aaafb73..c0dc8d3ba0bb 100644
--- a/drivers/gpu/drm/radeon/r500_reg.h
+++ b/drivers/gpu/drm/radeon/r500_reg.h
@@ -355,6 +355,7 @@
# define AVIVO_D1CRTC_V_BLANK (1 << 0)
#define AVIVO_D1CRTC_STATUS_POSITION 0x60a0
#define AVIVO_D1CRTC_FRAME_COUNT 0x60a4
+#define AVIVO_D1CRTC_STATUS_HV_COUNT 0x60ac
#define AVIVO_D1CRTC_STEREO_CONTROL 0x60c4
#define AVIVO_D1MODE_MASTER_UPDATE_MODE 0x60e4
diff --git a/drivers/gpu/drm/radeon/r600.c b/drivers/gpu/drm/radeon/r600.c
index becb03e8b32f..6d4b5611daf4 100644
--- a/drivers/gpu/drm/radeon/r600.c
+++ b/drivers/gpu/drm/radeon/r600.c
@@ -94,6 +94,12 @@ MODULE_FIRMWARE("radeon/SUMO_me.bin");
MODULE_FIRMWARE("radeon/SUMO2_pfp.bin");
MODULE_FIRMWARE("radeon/SUMO2_me.bin");
+static const u32 crtc_offsets[2] =
+{
+ 0,
+ AVIVO_D2CRTC_H_TOTAL - AVIVO_D1CRTC_H_TOTAL
+};
+
int r600_debugfs_mc_info_init(struct radeon_device *rdev);
/* r600,rv610,rv630,rv620,rv635,rv670 */
@@ -103,6 +109,19 @@ void r600_fini(struct radeon_device *rdev);
void r600_irq_disable(struct radeon_device *rdev);
static void r600_pcie_gen2_enable(struct radeon_device *rdev);
+/**
+ * r600_get_xclk - get the xclk
+ *
+ * @rdev: radeon_device pointer
+ *
+ * Returns the reference clock used by the gfx engine
+ * (r6xx, IGPs, APUs).
+ */
+u32 r600_get_xclk(struct radeon_device *rdev)
+{
+ return rdev->clock.spll.reference_freq;
+}
+
/* get temperature in millidegrees */
int rv6xx_get_temp(struct radeon_device *rdev)
{
@@ -1254,169 +1273,301 @@ void r600_vram_scratch_fini(struct radeon_device *rdev)
radeon_bo_unref(&rdev->vram_scratch.robj);
}
-/* We doesn't check that the GPU really needs a reset we simply do the
- * reset, it's up to the caller to determine if the GPU needs one. We
- * might add an helper function to check that.
- */
-static void r600_gpu_soft_reset_gfx(struct radeon_device *rdev)
+void r600_set_bios_scratch_engine_hung(struct radeon_device *rdev, bool hung)
{
- u32 grbm_busy_mask = S_008010_VC_BUSY(1) | S_008010_VGT_BUSY_NO_DMA(1) |
- S_008010_VGT_BUSY(1) | S_008010_TA03_BUSY(1) |
- S_008010_TC_BUSY(1) | S_008010_SX_BUSY(1) |
- S_008010_SH_BUSY(1) | S_008010_SPI03_BUSY(1) |
- S_008010_SMX_BUSY(1) | S_008010_SC_BUSY(1) |
- S_008010_PA_BUSY(1) | S_008010_DB03_BUSY(1) |
- S_008010_CR_BUSY(1) | S_008010_CB03_BUSY(1) |
- S_008010_GUI_ACTIVE(1);
- u32 grbm2_busy_mask = S_008014_SPI0_BUSY(1) | S_008014_SPI1_BUSY(1) |
- S_008014_SPI2_BUSY(1) | S_008014_SPI3_BUSY(1) |
- S_008014_TA0_BUSY(1) | S_008014_TA1_BUSY(1) |
- S_008014_TA2_BUSY(1) | S_008014_TA3_BUSY(1) |
- S_008014_DB0_BUSY(1) | S_008014_DB1_BUSY(1) |
- S_008014_DB2_BUSY(1) | S_008014_DB3_BUSY(1) |
- S_008014_CB0_BUSY(1) | S_008014_CB1_BUSY(1) |
- S_008014_CB2_BUSY(1) | S_008014_CB3_BUSY(1);
- u32 tmp;
+ u32 tmp = RREG32(R600_BIOS_3_SCRATCH);
- if (!(RREG32(GRBM_STATUS) & GUI_ACTIVE))
- return;
+ if (hung)
+ tmp |= ATOM_S3_ASIC_GUI_ENGINE_HUNG;
+ else
+ tmp &= ~ATOM_S3_ASIC_GUI_ENGINE_HUNG;
+
+ WREG32(R600_BIOS_3_SCRATCH, tmp);
+}
+static void r600_print_gpu_status_regs(struct radeon_device *rdev)
+{
dev_info(rdev->dev, " R_008010_GRBM_STATUS = 0x%08X\n",
- RREG32(R_008010_GRBM_STATUS));
+ RREG32(R_008010_GRBM_STATUS));
dev_info(rdev->dev, " R_008014_GRBM_STATUS2 = 0x%08X\n",
- RREG32(R_008014_GRBM_STATUS2));
+ RREG32(R_008014_GRBM_STATUS2));
dev_info(rdev->dev, " R_000E50_SRBM_STATUS = 0x%08X\n",
- RREG32(R_000E50_SRBM_STATUS));
+ RREG32(R_000E50_SRBM_STATUS));
dev_info(rdev->dev, " R_008674_CP_STALLED_STAT1 = 0x%08X\n",
- RREG32(CP_STALLED_STAT1));
+ RREG32(CP_STALLED_STAT1));
dev_info(rdev->dev, " R_008678_CP_STALLED_STAT2 = 0x%08X\n",
- RREG32(CP_STALLED_STAT2));
+ RREG32(CP_STALLED_STAT2));
dev_info(rdev->dev, " R_00867C_CP_BUSY_STAT = 0x%08X\n",
- RREG32(CP_BUSY_STAT));
+ RREG32(CP_BUSY_STAT));
dev_info(rdev->dev, " R_008680_CP_STAT = 0x%08X\n",
- RREG32(CP_STAT));
-
- /* Disable CP parsing/prefetching */
- WREG32(R_0086D8_CP_ME_CNTL, S_0086D8_CP_ME_HALT(1));
+ RREG32(CP_STAT));
+ dev_info(rdev->dev, " R_00D034_DMA_STATUS_REG = 0x%08X\n",
+ RREG32(DMA_STATUS_REG));
+}
- /* Check if any of the rendering block is busy and reset it */
- if ((RREG32(R_008010_GRBM_STATUS) & grbm_busy_mask) ||
- (RREG32(R_008014_GRBM_STATUS2) & grbm2_busy_mask)) {
- tmp = S_008020_SOFT_RESET_CR(1) |
- S_008020_SOFT_RESET_DB(1) |
- S_008020_SOFT_RESET_CB(1) |
- S_008020_SOFT_RESET_PA(1) |
- S_008020_SOFT_RESET_SC(1) |
- S_008020_SOFT_RESET_SMX(1) |
- S_008020_SOFT_RESET_SPI(1) |
- S_008020_SOFT_RESET_SX(1) |
- S_008020_SOFT_RESET_SH(1) |
- S_008020_SOFT_RESET_TC(1) |
- S_008020_SOFT_RESET_TA(1) |
- S_008020_SOFT_RESET_VC(1) |
- S_008020_SOFT_RESET_VGT(1);
- dev_info(rdev->dev, " R_008020_GRBM_SOFT_RESET=0x%08X\n", tmp);
- WREG32(R_008020_GRBM_SOFT_RESET, tmp);
- RREG32(R_008020_GRBM_SOFT_RESET);
- mdelay(15);
- WREG32(R_008020_GRBM_SOFT_RESET, 0);
+static bool r600_is_display_hung(struct radeon_device *rdev)
+{
+ u32 crtc_hung = 0;
+ u32 crtc_status[2];
+ u32 i, j, tmp;
+
+ for (i = 0; i < rdev->num_crtc; i++) {
+ if (RREG32(AVIVO_D1CRTC_CONTROL + crtc_offsets[i]) & AVIVO_CRTC_EN) {
+ crtc_status[i] = RREG32(AVIVO_D1CRTC_STATUS_HV_COUNT + crtc_offsets[i]);
+ crtc_hung |= (1 << i);
+ }
}
- /* Reset CP (we always reset CP) */
- tmp = S_008020_SOFT_RESET_CP(1);
- dev_info(rdev->dev, "R_008020_GRBM_SOFT_RESET=0x%08X\n", tmp);
- WREG32(R_008020_GRBM_SOFT_RESET, tmp);
- RREG32(R_008020_GRBM_SOFT_RESET);
- mdelay(15);
- WREG32(R_008020_GRBM_SOFT_RESET, 0);
- dev_info(rdev->dev, " R_008010_GRBM_STATUS = 0x%08X\n",
- RREG32(R_008010_GRBM_STATUS));
- dev_info(rdev->dev, " R_008014_GRBM_STATUS2 = 0x%08X\n",
- RREG32(R_008014_GRBM_STATUS2));
- dev_info(rdev->dev, " R_000E50_SRBM_STATUS = 0x%08X\n",
- RREG32(R_000E50_SRBM_STATUS));
- dev_info(rdev->dev, " R_008674_CP_STALLED_STAT1 = 0x%08X\n",
- RREG32(CP_STALLED_STAT1));
- dev_info(rdev->dev, " R_008678_CP_STALLED_STAT2 = 0x%08X\n",
- RREG32(CP_STALLED_STAT2));
- dev_info(rdev->dev, " R_00867C_CP_BUSY_STAT = 0x%08X\n",
- RREG32(CP_BUSY_STAT));
- dev_info(rdev->dev, " R_008680_CP_STAT = 0x%08X\n",
- RREG32(CP_STAT));
+ for (j = 0; j < 10; j++) {
+ for (i = 0; i < rdev->num_crtc; i++) {
+ if (crtc_hung & (1 << i)) {
+ tmp = RREG32(AVIVO_D1CRTC_STATUS_HV_COUNT + crtc_offsets[i]);
+ if (tmp != crtc_status[i])
+ crtc_hung &= ~(1 << i);
+ }
+ }
+ if (crtc_hung == 0)
+ return false;
+ udelay(100);
+ }
+ return true;
}
-static void r600_gpu_soft_reset_dma(struct radeon_device *rdev)
+static u32 r600_gpu_check_soft_reset(struct radeon_device *rdev)
{
+ u32 reset_mask = 0;
u32 tmp;
- if (RREG32(DMA_STATUS_REG) & DMA_IDLE)
- return;
+ /* GRBM_STATUS */
+ tmp = RREG32(R_008010_GRBM_STATUS);
+ if (rdev->family >= CHIP_RV770) {
+ if (G_008010_PA_BUSY(tmp) | G_008010_SC_BUSY(tmp) |
+ G_008010_SH_BUSY(tmp) | G_008010_SX_BUSY(tmp) |
+ G_008010_TA_BUSY(tmp) | G_008010_VGT_BUSY(tmp) |
+ G_008010_DB03_BUSY(tmp) | G_008010_CB03_BUSY(tmp) |
+ G_008010_SPI03_BUSY(tmp) | G_008010_VGT_BUSY_NO_DMA(tmp))
+ reset_mask |= RADEON_RESET_GFX;
+ } else {
+ if (G_008010_PA_BUSY(tmp) | G_008010_SC_BUSY(tmp) |
+ G_008010_SH_BUSY(tmp) | G_008010_SX_BUSY(tmp) |
+ G_008010_TA03_BUSY(tmp) | G_008010_VGT_BUSY(tmp) |
+ G_008010_DB03_BUSY(tmp) | G_008010_CB03_BUSY(tmp) |
+ G_008010_SPI03_BUSY(tmp) | G_008010_VGT_BUSY_NO_DMA(tmp))
+ reset_mask |= RADEON_RESET_GFX;
+ }
- dev_info(rdev->dev, " R_00D034_DMA_STATUS_REG = 0x%08X\n",
- RREG32(DMA_STATUS_REG));
+ if (G_008010_CF_RQ_PENDING(tmp) | G_008010_PF_RQ_PENDING(tmp) |
+ G_008010_CP_BUSY(tmp) | G_008010_CP_COHERENCY_BUSY(tmp))
+ reset_mask |= RADEON_RESET_CP;
- /* Disable DMA */
- tmp = RREG32(DMA_RB_CNTL);
- tmp &= ~DMA_RB_ENABLE;
- WREG32(DMA_RB_CNTL, tmp);
+ if (G_008010_GRBM_EE_BUSY(tmp))
+ reset_mask |= RADEON_RESET_GRBM | RADEON_RESET_GFX | RADEON_RESET_CP;
- /* Reset dma */
- if (rdev->family >= CHIP_RV770)
- WREG32(SRBM_SOFT_RESET, RV770_SOFT_RESET_DMA);
- else
- WREG32(SRBM_SOFT_RESET, SOFT_RESET_DMA);
- RREG32(SRBM_SOFT_RESET);
- udelay(50);
- WREG32(SRBM_SOFT_RESET, 0);
+ /* DMA_STATUS_REG */
+ tmp = RREG32(DMA_STATUS_REG);
+ if (!(tmp & DMA_IDLE))
+ reset_mask |= RADEON_RESET_DMA;
- dev_info(rdev->dev, " R_00D034_DMA_STATUS_REG = 0x%08X\n",
- RREG32(DMA_STATUS_REG));
+ /* SRBM_STATUS */
+ tmp = RREG32(R_000E50_SRBM_STATUS);
+ if (G_000E50_RLC_RQ_PENDING(tmp) | G_000E50_RLC_BUSY(tmp))
+ reset_mask |= RADEON_RESET_RLC;
+
+ if (G_000E50_IH_BUSY(tmp))
+ reset_mask |= RADEON_RESET_IH;
+
+ if (G_000E50_SEM_BUSY(tmp))
+ reset_mask |= RADEON_RESET_SEM;
+
+ if (G_000E50_GRBM_RQ_PENDING(tmp))
+ reset_mask |= RADEON_RESET_GRBM;
+
+ if (G_000E50_VMC_BUSY(tmp))
+ reset_mask |= RADEON_RESET_VMC;
+
+ if (G_000E50_MCB_BUSY(tmp) | G_000E50_MCDZ_BUSY(tmp) |
+ G_000E50_MCDY_BUSY(tmp) | G_000E50_MCDX_BUSY(tmp) |
+ G_000E50_MCDW_BUSY(tmp))
+ reset_mask |= RADEON_RESET_MC;
+
+ if (r600_is_display_hung(rdev))
+ reset_mask |= RADEON_RESET_DISPLAY;
+
+ return reset_mask;
}
-static int r600_gpu_soft_reset(struct radeon_device *rdev, u32 reset_mask)
+static void r600_gpu_soft_reset(struct radeon_device *rdev, u32 reset_mask)
{
struct rv515_mc_save save;
-
- if (!(RREG32(GRBM_STATUS) & GUI_ACTIVE))
- reset_mask &= ~(RADEON_RESET_GFX | RADEON_RESET_COMPUTE);
-
- if (RREG32(DMA_STATUS_REG) & DMA_IDLE)
- reset_mask &= ~RADEON_RESET_DMA;
+ u32 grbm_soft_reset = 0, srbm_soft_reset = 0;
+ u32 tmp;
if (reset_mask == 0)
- return 0;
+ return;
dev_info(rdev->dev, "GPU softreset: 0x%08X\n", reset_mask);
+ r600_print_gpu_status_regs(rdev);
+
+ /* Disable CP parsing/prefetching */
+ if (rdev->family >= CHIP_RV770)
+ WREG32(R_0086D8_CP_ME_CNTL, S_0086D8_CP_ME_HALT(1) | S_0086D8_CP_PFP_HALT(1));
+ else
+ WREG32(R_0086D8_CP_ME_CNTL, S_0086D8_CP_ME_HALT(1));
+
+ /* disable the RLC */
+ WREG32(RLC_CNTL, 0);
+
+ if (reset_mask & RADEON_RESET_DMA) {
+ /* Disable DMA */
+ tmp = RREG32(DMA_RB_CNTL);
+ tmp &= ~DMA_RB_ENABLE;
+ WREG32(DMA_RB_CNTL, tmp);
+ }
+
+ mdelay(50);
+
rv515_mc_stop(rdev, &save);
if (r600_mc_wait_for_idle(rdev)) {
dev_warn(rdev->dev, "Wait for MC idle timedout !\n");
}
- if (reset_mask & (RADEON_RESET_GFX | RADEON_RESET_COMPUTE))
- r600_gpu_soft_reset_gfx(rdev);
+ if (reset_mask & (RADEON_RESET_GFX | RADEON_RESET_COMPUTE)) {
+ if (rdev->family >= CHIP_RV770)
+ grbm_soft_reset |= S_008020_SOFT_RESET_DB(1) |
+ S_008020_SOFT_RESET_CB(1) |
+ S_008020_SOFT_RESET_PA(1) |
+ S_008020_SOFT_RESET_SC(1) |
+ S_008020_SOFT_RESET_SPI(1) |
+ S_008020_SOFT_RESET_SX(1) |
+ S_008020_SOFT_RESET_SH(1) |
+ S_008020_SOFT_RESET_TC(1) |
+ S_008020_SOFT_RESET_TA(1) |
+ S_008020_SOFT_RESET_VC(1) |
+ S_008020_SOFT_RESET_VGT(1);
+ else
+ grbm_soft_reset |= S_008020_SOFT_RESET_CR(1) |
+ S_008020_SOFT_RESET_DB(1) |
+ S_008020_SOFT_RESET_CB(1) |
+ S_008020_SOFT_RESET_PA(1) |
+ S_008020_SOFT_RESET_SC(1) |
+ S_008020_SOFT_RESET_SMX(1) |
+ S_008020_SOFT_RESET_SPI(1) |
+ S_008020_SOFT_RESET_SX(1) |
+ S_008020_SOFT_RESET_SH(1) |
+ S_008020_SOFT_RESET_TC(1) |
+ S_008020_SOFT_RESET_TA(1) |
+ S_008020_SOFT_RESET_VC(1) |
+ S_008020_SOFT_RESET_VGT(1);
+ }
+
+ if (reset_mask & RADEON_RESET_CP) {
+ grbm_soft_reset |= S_008020_SOFT_RESET_CP(1) |
+ S_008020_SOFT_RESET_VGT(1);
+
+ srbm_soft_reset |= S_000E60_SOFT_RESET_GRBM(1);
+ }
+
+ if (reset_mask & RADEON_RESET_DMA) {
+ if (rdev->family >= CHIP_RV770)
+ srbm_soft_reset |= RV770_SOFT_RESET_DMA;
+ else
+ srbm_soft_reset |= SOFT_RESET_DMA;
+ }
+
+ if (reset_mask & RADEON_RESET_RLC)
+ srbm_soft_reset |= S_000E60_SOFT_RESET_RLC(1);
+
+ if (reset_mask & RADEON_RESET_SEM)
+ srbm_soft_reset |= S_000E60_SOFT_RESET_SEM(1);
+
+ if (reset_mask & RADEON_RESET_IH)
+ srbm_soft_reset |= S_000E60_SOFT_RESET_IH(1);
+
+ if (reset_mask & RADEON_RESET_GRBM)
+ srbm_soft_reset |= S_000E60_SOFT_RESET_GRBM(1);
+
+ if (!(rdev->flags & RADEON_IS_IGP)) {
+ if (reset_mask & RADEON_RESET_MC)
+ srbm_soft_reset |= S_000E60_SOFT_RESET_MC(1);
+ }
+
+ if (reset_mask & RADEON_RESET_VMC)
+ srbm_soft_reset |= S_000E60_SOFT_RESET_VMC(1);
+
+ if (grbm_soft_reset) {
+ tmp = RREG32(R_008020_GRBM_SOFT_RESET);
+ tmp |= grbm_soft_reset;
+ dev_info(rdev->dev, "R_008020_GRBM_SOFT_RESET=0x%08X\n", tmp);
+ WREG32(R_008020_GRBM_SOFT_RESET, tmp);
+ tmp = RREG32(R_008020_GRBM_SOFT_RESET);
+
+ udelay(50);
+
+ tmp &= ~grbm_soft_reset;
+ WREG32(R_008020_GRBM_SOFT_RESET, tmp);
+ tmp = RREG32(R_008020_GRBM_SOFT_RESET);
+ }
+
+ if (srbm_soft_reset) {
+ tmp = RREG32(SRBM_SOFT_RESET);
+ tmp |= srbm_soft_reset;
+ dev_info(rdev->dev, "SRBM_SOFT_RESET=0x%08X\n", tmp);
+ WREG32(SRBM_SOFT_RESET, tmp);
+ tmp = RREG32(SRBM_SOFT_RESET);
+
+ udelay(50);
- if (reset_mask & RADEON_RESET_DMA)
- r600_gpu_soft_reset_dma(rdev);
+ tmp &= ~srbm_soft_reset;
+ WREG32(SRBM_SOFT_RESET, tmp);
+ tmp = RREG32(SRBM_SOFT_RESET);
+ }
/* Wait a little for things to settle down */
mdelay(1);
rv515_mc_resume(rdev, &save);
+ udelay(50);
+
+ r600_print_gpu_status_regs(rdev);
+}
+
+int r600_asic_reset(struct radeon_device *rdev)
+{
+ u32 reset_mask;
+
+ reset_mask = r600_gpu_check_soft_reset(rdev);
+
+ if (reset_mask)
+ r600_set_bios_scratch_engine_hung(rdev, true);
+
+ r600_gpu_soft_reset(rdev, reset_mask);
+
+ reset_mask = r600_gpu_check_soft_reset(rdev);
+
+ if (!reset_mask)
+ r600_set_bios_scratch_engine_hung(rdev, false);
+
return 0;
}
-bool r600_gpu_is_lockup(struct radeon_device *rdev, struct radeon_ring *ring)
+/**
+ * r600_gfx_is_lockup - Check if the GFX engine is locked up
+ *
+ * @rdev: radeon_device pointer
+ * @ring: radeon_ring structure holding ring information
+ *
+ * Check if the GFX engine is locked up.
+ * Returns true if the engine appears to be locked up, false if not.
+ */
+bool r600_gfx_is_lockup(struct radeon_device *rdev, struct radeon_ring *ring)
{
- u32 srbm_status;
- u32 grbm_status;
- u32 grbm_status2;
-
- srbm_status = RREG32(R_000E50_SRBM_STATUS);
- grbm_status = RREG32(R_008010_GRBM_STATUS);
- grbm_status2 = RREG32(R_008014_GRBM_STATUS2);
- if (!G_008010_GUI_ACTIVE(grbm_status)) {
+ u32 reset_mask = r600_gpu_check_soft_reset(rdev);
+
+ if (!(reset_mask & (RADEON_RESET_GFX |
+ RADEON_RESET_COMPUTE |
+ RADEON_RESET_CP))) {
radeon_ring_lockup_update(ring);
return false;
}
@@ -1431,15 +1582,14 @@ bool r600_gpu_is_lockup(struct radeon_device *rdev, struct radeon_ring *ring)
* @rdev: radeon_device pointer
* @ring: radeon_ring structure holding ring information
*
- * Check if the async DMA engine is locked up (r6xx-evergreen).
+ * Check if the async DMA engine is locked up.
* Returns true if the engine appears to be locked up, false if not.
*/
bool r600_dma_is_lockup(struct radeon_device *rdev, struct radeon_ring *ring)
{
- u32 dma_status_reg;
+ u32 reset_mask = r600_gpu_check_soft_reset(rdev);
- dma_status_reg = RREG32(DMA_STATUS_REG);
- if (dma_status_reg & DMA_IDLE) {
+ if (!(reset_mask & RADEON_RESET_DMA)) {
radeon_ring_lockup_update(ring);
return false;
}
@@ -1448,13 +1598,6 @@ bool r600_dma_is_lockup(struct radeon_device *rdev, struct radeon_ring *ring)
return radeon_ring_test_lockup(rdev, ring);
}
-int r600_asic_reset(struct radeon_device *rdev)
-{
- return r600_gpu_soft_reset(rdev, (RADEON_RESET_GFX |
- RADEON_RESET_COMPUTE |
- RADEON_RESET_DMA));
-}
-
u32 r6xx_remap_render_backend(struct radeon_device *rdev,
u32 tiling_pipe_num,
u32 max_rb_num,
@@ -4318,14 +4461,14 @@ static void r600_pcie_gen2_enable(struct radeon_device *rdev)
}
/**
- * r600_get_gpu_clock - return GPU clock counter snapshot
+ * r600_get_gpu_clock_counter - return GPU clock counter snapshot
*
* @rdev: radeon_device pointer
*
* Fetches a GPU clock counter snapshot (R6xx-cayman).
* Returns the 64 bit clock counter snapshot.
*/
-uint64_t r600_get_gpu_clock(struct radeon_device *rdev)
+uint64_t r600_get_gpu_clock_counter(struct radeon_device *rdev)
{
uint64_t clock;
diff --git a/drivers/gpu/drm/radeon/r600_blit.c b/drivers/gpu/drm/radeon/r600_blit.c
index 77da1f9c0b8e..f651881eb0ae 100644
--- a/drivers/gpu/drm/radeon/r600_blit.c
+++ b/drivers/gpu/drm/radeon/r600_blit.c
@@ -22,6 +22,8 @@
*
* Authors:
* Alex Deucher <alexander.deucher@amd.com>
+ *
+ * ------------------------ This file is DEPRECATED! -------------------------
*/
#include <drm/drmP.h>
#include <drm/radeon_drm.h>
@@ -488,37 +490,6 @@ set_default_state(drm_radeon_private_t *dev_priv)
ADVANCE_RING();
}
-/* 23 bits of float fractional data */
-#define I2F_FRAC_BITS 23
-#define I2F_MASK ((1 << I2F_FRAC_BITS) - 1)
-
-/*
- * Converts unsigned integer into 32-bit IEEE floating point representation.
- * Will be exact from 0 to 2^24. Above that, we round towards zero
- * as the fractional bits will not fit in a float. (It would be better to
- * round towards even as the fpu does, but that is slower.)
- */
-__pure uint32_t int2float(uint32_t x)
-{
- uint32_t msb, exponent, fraction;
-
- /* Zero is special */
- if (!x) return 0;
-
- /* Get location of the most significant bit */
- msb = __fls(x);
-
- /*
- * Use a rotate instead of a shift because that works both leftwards
- * and rightwards due to the mod(32) behaviour. This means we don't
- * need to check to see if we are above 2^24 or not.
- */
- fraction = ror32(x, (msb - I2F_FRAC_BITS) & 0x1f) & I2F_MASK;
- exponent = (127 + msb) << I2F_FRAC_BITS;
-
- return fraction + exponent;
-}
-
static int r600_nomm_get_vb(struct drm_device *dev)
{
drm_radeon_private_t *dev_priv = dev->dev_private;
diff --git a/drivers/gpu/drm/radeon/r600_blit_kms.c b/drivers/gpu/drm/radeon/r600_blit_kms.c
index e082dca6feee..9fb5780a552f 100644
--- a/drivers/gpu/drm/radeon/r600_blit_kms.c
+++ b/drivers/gpu/drm/radeon/r600_blit_kms.c
@@ -31,6 +31,37 @@
#include "r600_blit_shaders.h"
#include "radeon_blit_common.h"
+/* 23 bits of float fractional data */
+#define I2F_FRAC_BITS 23
+#define I2F_MASK ((1 << I2F_FRAC_BITS) - 1)
+
+/*
+ * Converts unsigned integer into 32-bit IEEE floating point representation.
+ * Will be exact from 0 to 2^24. Above that, we round towards zero
+ * as the fractional bits will not fit in a float. (It would be better to
+ * round towards even as the fpu does, but that is slower.)
+ */
+__pure uint32_t int2float(uint32_t x)
+{
+ uint32_t msb, exponent, fraction;
+
+ /* Zero is special */
+ if (!x) return 0;
+
+ /* Get location of the most significant bit */
+ msb = __fls(x);
+
+ /*
+ * Use a rotate instead of a shift because that works both leftwards
+ * and rightwards due to the mod(32) behaviour. This means we don't
+ * need to check to see if we are above 2^24 or not.
+ */
+ fraction = ror32(x, (msb - I2F_FRAC_BITS) & 0x1f) & I2F_MASK;
+ exponent = (127 + msb) << I2F_FRAC_BITS;
+
+ return fraction + exponent;
+}
+
/* emits 21 on rv770+, 23 on r600 */
static void
set_render_target(struct radeon_device *rdev, int format,
diff --git a/drivers/gpu/drm/radeon/r600_cp.c b/drivers/gpu/drm/radeon/r600_cp.c
index be85f75aedda..1c51c08b1fde 100644
--- a/drivers/gpu/drm/radeon/r600_cp.c
+++ b/drivers/gpu/drm/radeon/r600_cp.c
@@ -24,6 +24,8 @@
* Authors:
* Dave Airlie <airlied@redhat.com>
* Alex Deucher <alexander.deucher@amd.com>
+ *
+ * ------------------------ This file is DEPRECATED! -------------------------
*/
#include <linux/module.h>
diff --git a/drivers/gpu/drm/radeon/r600_cs.c b/drivers/gpu/drm/radeon/r600_cs.c
index 9b2512bf1a46..01a3ec83f284 100644
--- a/drivers/gpu/drm/radeon/r600_cs.c
+++ b/drivers/gpu/drm/radeon/r600_cs.c
@@ -31,12 +31,7 @@
#include "r600d.h"
#include "r600_reg_safe.h"
-static int r600_cs_packet_next_reloc_mm(struct radeon_cs_parser *p,
- struct radeon_cs_reloc **cs_reloc);
-static int r600_cs_packet_next_reloc_nomm(struct radeon_cs_parser *p,
- struct radeon_cs_reloc **cs_reloc);
-typedef int (*next_reloc_t)(struct radeon_cs_parser*, struct radeon_cs_reloc**);
-static next_reloc_t r600_cs_packet_next_reloc = &r600_cs_packet_next_reloc_mm;
+static int r600_nomm;
extern void r600_cs_legacy_get_tiling_conf(struct drm_device *dev, u32 *npipes, u32 *nbanks, u32 *group_size);
@@ -784,170 +779,29 @@ static int r600_cs_track_check(struct radeon_cs_parser *p)
}
/**
- * r600_cs_packet_parse() - parse cp packet and point ib index to next packet
- * @parser: parser structure holding parsing context.
- * @pkt: where to store packet informations
- *
- * Assume that chunk_ib_index is properly set. Will return -EINVAL
- * if packet is bigger than remaining ib size. or if packets is unknown.
- **/
-static int r600_cs_packet_parse(struct radeon_cs_parser *p,
- struct radeon_cs_packet *pkt,
- unsigned idx)
-{
- struct radeon_cs_chunk *ib_chunk = &p->chunks[p->chunk_ib_idx];
- uint32_t header;
-
- if (idx >= ib_chunk->length_dw) {
- DRM_ERROR("Can not parse packet at %d after CS end %d !\n",
- idx, ib_chunk->length_dw);
- return -EINVAL;
- }
- header = radeon_get_ib_value(p, idx);
- pkt->idx = idx;
- pkt->type = CP_PACKET_GET_TYPE(header);
- pkt->count = CP_PACKET_GET_COUNT(header);
- pkt->one_reg_wr = 0;
- switch (pkt->type) {
- case PACKET_TYPE0:
- pkt->reg = CP_PACKET0_GET_REG(header);
- break;
- case PACKET_TYPE3:
- pkt->opcode = CP_PACKET3_GET_OPCODE(header);
- break;
- case PACKET_TYPE2:
- pkt->count = -1;
- break;
- default:
- DRM_ERROR("Unknown packet type %d at %d !\n", pkt->type, idx);
- return -EINVAL;
- }
- if ((pkt->count + 1 + pkt->idx) >= ib_chunk->length_dw) {
- DRM_ERROR("Packet (%d:%d:%d) end after CS buffer (%d) !\n",
- pkt->idx, pkt->type, pkt->count, ib_chunk->length_dw);
- return -EINVAL;
- }
- return 0;
-}
-
-/**
- * r600_cs_packet_next_reloc_mm() - parse next packet which should be reloc packet3
- * @parser: parser structure holding parsing context.
- * @data: pointer to relocation data
- * @offset_start: starting offset
- * @offset_mask: offset mask (to align start offset on)
- * @reloc: reloc informations
- *
- * Check next packet is relocation packet3, do bo validation and compute
- * GPU offset using the provided start.
- **/
-static int r600_cs_packet_next_reloc_mm(struct radeon_cs_parser *p,
- struct radeon_cs_reloc **cs_reloc)
-{
- struct radeon_cs_chunk *relocs_chunk;
- struct radeon_cs_packet p3reloc;
- unsigned idx;
- int r;
-
- if (p->chunk_relocs_idx == -1) {
- DRM_ERROR("No relocation chunk !\n");
- return -EINVAL;
- }
- *cs_reloc = NULL;
- relocs_chunk = &p->chunks[p->chunk_relocs_idx];
- r = r600_cs_packet_parse(p, &p3reloc, p->idx);
- if (r) {
- return r;
- }
- p->idx += p3reloc.count + 2;
- if (p3reloc.type != PACKET_TYPE3 || p3reloc.opcode != PACKET3_NOP) {
- DRM_ERROR("No packet3 for relocation for packet at %d.\n",
- p3reloc.idx);
- return -EINVAL;
- }
- idx = radeon_get_ib_value(p, p3reloc.idx + 1);
- if (idx >= relocs_chunk->length_dw) {
- DRM_ERROR("Relocs at %d after relocations chunk end %d !\n",
- idx, relocs_chunk->length_dw);
- return -EINVAL;
- }
- /* FIXME: we assume reloc size is 4 dwords */
- *cs_reloc = p->relocs_ptr[(idx / 4)];
- return 0;
-}
-
-/**
- * r600_cs_packet_next_reloc_nomm() - parse next packet which should be reloc packet3
+ * r600_cs_packet_parse_vline() - parse userspace VLINE packet
* @parser: parser structure holding parsing context.
- * @data: pointer to relocation data
- * @offset_start: starting offset
- * @offset_mask: offset mask (to align start offset on)
- * @reloc: reloc informations
*
- * Check next packet is relocation packet3, do bo validation and compute
- * GPU offset using the provided start.
- **/
-static int r600_cs_packet_next_reloc_nomm(struct radeon_cs_parser *p,
- struct radeon_cs_reloc **cs_reloc)
-{
- struct radeon_cs_chunk *relocs_chunk;
- struct radeon_cs_packet p3reloc;
- unsigned idx;
- int r;
-
- if (p->chunk_relocs_idx == -1) {
- DRM_ERROR("No relocation chunk !\n");
- return -EINVAL;
- }
- *cs_reloc = NULL;
- relocs_chunk = &p->chunks[p->chunk_relocs_idx];
- r = r600_cs_packet_parse(p, &p3reloc, p->idx);
- if (r) {
- return r;
- }
- p->idx += p3reloc.count + 2;
- if (p3reloc.type != PACKET_TYPE3 || p3reloc.opcode != PACKET3_NOP) {
- DRM_ERROR("No packet3 for relocation for packet at %d.\n",
- p3reloc.idx);
- return -EINVAL;
- }
- idx = radeon_get_ib_value(p, p3reloc.idx + 1);
- if (idx >= relocs_chunk->length_dw) {
- DRM_ERROR("Relocs at %d after relocations chunk end %d !\n",
- idx, relocs_chunk->length_dw);
- return -EINVAL;
- }
- *cs_reloc = p->relocs;
- (*cs_reloc)->lobj.gpu_offset = (u64)relocs_chunk->kdata[idx + 3] << 32;
- (*cs_reloc)->lobj.gpu_offset |= relocs_chunk->kdata[idx + 0];
- return 0;
-}
-
-/**
- * r600_cs_packet_next_is_pkt3_nop() - test if next packet is packet3 nop for reloc
- * @parser: parser structure holding parsing context.
- *
- * Check next packet is relocation packet3, do bo validation and compute
- * GPU offset using the provided start.
- **/
-static int r600_cs_packet_next_is_pkt3_nop(struct radeon_cs_parser *p)
+ * This is an R600-specific function for parsing VLINE packets.
+ * Real work is done by r600_cs_common_vline_parse function.
+ * Here we just set up ASIC-specific register table and call
+ * the common implementation function.
+ */
+static int r600_cs_packet_parse_vline(struct radeon_cs_parser *p)
{
- struct radeon_cs_packet p3reloc;
- int r;
+ static uint32_t vline_start_end[2] = {AVIVO_D1MODE_VLINE_START_END,
+ AVIVO_D2MODE_VLINE_START_END};
+ static uint32_t vline_status[2] = {AVIVO_D1MODE_VLINE_STATUS,
+ AVIVO_D2MODE_VLINE_STATUS};
- r = r600_cs_packet_parse(p, &p3reloc, p->idx);
- if (r) {
- return 0;
- }
- if (p3reloc.type != PACKET_TYPE3 || p3reloc.opcode != PACKET3_NOP) {
- return 0;
- }
- return 1;
+ return r600_cs_common_vline_parse(p, vline_start_end, vline_status);
}
/**
- * r600_cs_packet_next_vline() - parse userspace VLINE packet
+ * r600_cs_common_vline_parse() - common vline parser
* @parser: parser structure holding parsing context.
+ * @vline_start_end: table of vline_start_end registers
+ * @vline_status: table of vline_status registers
*
* Userspace sends a special sequence for VLINE waits.
* PACKET0 - VLINE_START_END + value
@@ -957,9 +811,16 @@ static int r600_cs_packet_next_is_pkt3_nop(struct radeon_cs_parser *p)
* This function parses this and relocates the VLINE START END
* and WAIT_REG_MEM packets to the correct crtc.
* It also detects a switched off crtc and nulls out the
- * wait in that case.
+ * wait in that case. This function is common for all ASICs that
+ * are R600 and newer. The parsing algorithm is the same, and only
+ * differs in which registers are used.
+ *
+ * Caller is the ASIC-specific function which passes the parser
+ * context and ASIC-specific register table
*/
-static int r600_cs_packet_parse_vline(struct radeon_cs_parser *p)
+int r600_cs_common_vline_parse(struct radeon_cs_parser *p,
+ uint32_t *vline_start_end,
+ uint32_t *vline_status)
{
struct drm_mode_object *obj;
struct drm_crtc *crtc;
@@ -973,12 +834,12 @@ static int r600_cs_packet_parse_vline(struct radeon_cs_parser *p)
ib = p->ib.ptr;
/* parse the WAIT_REG_MEM */
- r = r600_cs_packet_parse(p, &wait_reg_mem, p->idx);
+ r = radeon_cs_packet_parse(p, &wait_reg_mem, p->idx);
if (r)
return r;
/* check its a WAIT_REG_MEM */
- if (wait_reg_mem.type != PACKET_TYPE3 ||
+ if (wait_reg_mem.type != RADEON_PACKET_TYPE3 ||
wait_reg_mem.opcode != PACKET3_WAIT_REG_MEM) {
DRM_ERROR("vline wait missing WAIT_REG_MEM segment\n");
return -EINVAL;
@@ -987,7 +848,12 @@ static int r600_cs_packet_parse_vline(struct radeon_cs_parser *p)
wait_reg_mem_info = radeon_get_ib_value(p, wait_reg_mem.idx + 1);
/* bit 4 is reg (0) or mem (1) */
if (wait_reg_mem_info & 0x10) {
- DRM_ERROR("vline WAIT_REG_MEM waiting on MEM rather than REG\n");
+ DRM_ERROR("vline WAIT_REG_MEM waiting on MEM instead of REG\n");
+ return -EINVAL;
+ }
+ /* bit 8 is me (0) or pfp (1) */
+ if (wait_reg_mem_info & 0x100) {
+ DRM_ERROR("vline WAIT_REG_MEM waiting on PFP instead of ME\n");
return -EINVAL;
}
/* waiting for value to be equal */
@@ -995,18 +861,18 @@ static int r600_cs_packet_parse_vline(struct radeon_cs_parser *p)
DRM_ERROR("vline WAIT_REG_MEM function not equal\n");
return -EINVAL;
}
- if ((radeon_get_ib_value(p, wait_reg_mem.idx + 2) << 2) != AVIVO_D1MODE_VLINE_STATUS) {
+ if ((radeon_get_ib_value(p, wait_reg_mem.idx + 2) << 2) != vline_status[0]) {
DRM_ERROR("vline WAIT_REG_MEM bad reg\n");
return -EINVAL;
}
- if (radeon_get_ib_value(p, wait_reg_mem.idx + 5) != AVIVO_D1MODE_VLINE_STAT) {
+ if (radeon_get_ib_value(p, wait_reg_mem.idx + 5) != RADEON_VLINE_STAT) {
DRM_ERROR("vline WAIT_REG_MEM bad bit mask\n");
return -EINVAL;
}
/* jump over the NOP */
- r = r600_cs_packet_parse(p, &p3reloc, p->idx + wait_reg_mem.count + 2);
+ r = radeon_cs_packet_parse(p, &p3reloc, p->idx + wait_reg_mem.count + 2);
if (r)
return r;
@@ -1016,7 +882,7 @@ static int r600_cs_packet_parse_vline(struct radeon_cs_parser *p)
header = radeon_get_ib_value(p, h_idx);
crtc_id = radeon_get_ib_value(p, h_idx + 2 + 7 + 1);
- reg = CP_PACKET0_GET_REG(header);
+ reg = R600_CP_PACKET0_GET_REG(header);
obj = drm_mode_object_find(p->rdev->ddev, crtc_id, DRM_MODE_OBJECT_CRTC);
if (!obj) {
@@ -1028,7 +894,7 @@ static int r600_cs_packet_parse_vline(struct radeon_cs_parser *p)
crtc_id = radeon_crtc->crtc_id;
if (!crtc->enabled) {
- /* if the CRTC isn't enabled - we need to nop out the WAIT_REG_MEM */
+ /* CRTC isn't enabled - we need to nop out the WAIT_REG_MEM */
ib[h_idx + 2] = PACKET2(0);
ib[h_idx + 3] = PACKET2(0);
ib[h_idx + 4] = PACKET2(0);
@@ -1036,20 +902,15 @@ static int r600_cs_packet_parse_vline(struct radeon_cs_parser *p)
ib[h_idx + 6] = PACKET2(0);
ib[h_idx + 7] = PACKET2(0);
ib[h_idx + 8] = PACKET2(0);
- } else if (crtc_id == 1) {
- switch (reg) {
- case AVIVO_D1MODE_VLINE_START_END:
- header &= ~R600_CP_PACKET0_REG_MASK;
- header |= AVIVO_D2MODE_VLINE_START_END >> 2;
- break;
- default:
- DRM_ERROR("unknown crtc reloc\n");
- return -EINVAL;
- }
+ } else if (reg == vline_start_end[0]) {
+ header &= ~R600_CP_PACKET0_REG_MASK;
+ header |= vline_start_end[crtc_id] >> 2;
ib[h_idx] = header;
- ib[h_idx + 4] = AVIVO_D2MODE_VLINE_STATUS >> 2;
+ ib[h_idx + 4] = vline_status[crtc_id] >> 2;
+ } else {
+ DRM_ERROR("unknown crtc reloc\n");
+ return -EINVAL;
}
-
return 0;
}
@@ -1155,8 +1016,8 @@ static int r600_cs_check_reg(struct radeon_cs_parser *p, u32 reg, u32 idx)
break;
case R_028010_DB_DEPTH_INFO:
if (!(p->cs_flags & RADEON_CS_KEEP_TILING_FLAGS) &&
- r600_cs_packet_next_is_pkt3_nop(p)) {
- r = r600_cs_packet_next_reloc(p, &reloc);
+ radeon_cs_packet_next_is_pkt3_nop(p)) {
+ r = radeon_cs_packet_next_reloc(p, &reloc, r600_nomm);
if (r) {
dev_warn(p->dev, "bad SET_CONTEXT_REG "
"0x%04X\n", reg);
@@ -1198,7 +1059,7 @@ static int r600_cs_check_reg(struct radeon_cs_parser *p, u32 reg, u32 idx)
case VGT_STRMOUT_BUFFER_BASE_1:
case VGT_STRMOUT_BUFFER_BASE_2:
case VGT_STRMOUT_BUFFER_BASE_3:
- r = r600_cs_packet_next_reloc(p, &reloc);
+ r = radeon_cs_packet_next_reloc(p, &reloc, r600_nomm);
if (r) {
dev_warn(p->dev, "bad SET_CONTEXT_REG "
"0x%04X\n", reg);
@@ -1221,7 +1082,7 @@ static int r600_cs_check_reg(struct radeon_cs_parser *p, u32 reg, u32 idx)
track->streamout_dirty = true;
break;
case CP_COHER_BASE:
- r = r600_cs_packet_next_reloc(p, &reloc);
+ r = radeon_cs_packet_next_reloc(p, &reloc, r600_nomm);
if (r) {
dev_warn(p->dev, "missing reloc for CP_COHER_BASE "
"0x%04X\n", reg);
@@ -1256,8 +1117,8 @@ static int r600_cs_check_reg(struct radeon_cs_parser *p, u32 reg, u32 idx)
case R_0280B8_CB_COLOR6_INFO:
case R_0280BC_CB_COLOR7_INFO:
if (!(p->cs_flags & RADEON_CS_KEEP_TILING_FLAGS) &&
- r600_cs_packet_next_is_pkt3_nop(p)) {
- r = r600_cs_packet_next_reloc(p, &reloc);
+ radeon_cs_packet_next_is_pkt3_nop(p)) {
+ r = radeon_cs_packet_next_reloc(p, &reloc, r600_nomm);
if (r) {
dev_err(p->dev, "bad SET_CONTEXT_REG 0x%04X\n", reg);
return -EINVAL;
@@ -1320,7 +1181,7 @@ static int r600_cs_check_reg(struct radeon_cs_parser *p, u32 reg, u32 idx)
case R_0280F8_CB_COLOR6_FRAG:
case R_0280FC_CB_COLOR7_FRAG:
tmp = (reg - R_0280E0_CB_COLOR0_FRAG) / 4;
- if (!r600_cs_packet_next_is_pkt3_nop(p)) {
+ if (!radeon_cs_packet_next_is_pkt3_nop(p)) {
if (!track->cb_color_base_last[tmp]) {
dev_err(p->dev, "Broken old userspace ? no cb_color0_base supplied before trying to write 0x%08X\n", reg);
return -EINVAL;
@@ -1329,7 +1190,7 @@ static int r600_cs_check_reg(struct radeon_cs_parser *p, u32 reg, u32 idx)
track->cb_color_frag_offset[tmp] = track->cb_color_bo_offset[tmp];
ib[idx] = track->cb_color_base_last[tmp];
} else {
- r = r600_cs_packet_next_reloc(p, &reloc);
+ r = radeon_cs_packet_next_reloc(p, &reloc, r600_nomm);
if (r) {
dev_err(p->dev, "bad SET_CONTEXT_REG 0x%04X\n", reg);
return -EINVAL;
@@ -1351,7 +1212,7 @@ static int r600_cs_check_reg(struct radeon_cs_parser *p, u32 reg, u32 idx)
case R_0280D8_CB_COLOR6_TILE:
case R_0280DC_CB_COLOR7_TILE:
tmp = (reg - R_0280C0_CB_COLOR0_TILE) / 4;
- if (!r600_cs_packet_next_is_pkt3_nop(p)) {
+ if (!radeon_cs_packet_next_is_pkt3_nop(p)) {
if (!track->cb_color_base_last[tmp]) {
dev_err(p->dev, "Broken old userspace ? no cb_color0_base supplied before trying to write 0x%08X\n", reg);
return -EINVAL;
@@ -1360,7 +1221,7 @@ static int r600_cs_check_reg(struct radeon_cs_parser *p, u32 reg, u32 idx)
track->cb_color_tile_offset[tmp] = track->cb_color_bo_offset[tmp];
ib[idx] = track->cb_color_base_last[tmp];
} else {
- r = r600_cs_packet_next_reloc(p, &reloc);
+ r = radeon_cs_packet_next_reloc(p, &reloc, r600_nomm);
if (r) {
dev_err(p->dev, "bad SET_CONTEXT_REG 0x%04X\n", reg);
return -EINVAL;
@@ -1395,7 +1256,7 @@ static int r600_cs_check_reg(struct radeon_cs_parser *p, u32 reg, u32 idx)
case CB_COLOR5_BASE:
case CB_COLOR6_BASE:
case CB_COLOR7_BASE:
- r = r600_cs_packet_next_reloc(p, &reloc);
+ r = radeon_cs_packet_next_reloc(p, &reloc, r600_nomm);
if (r) {
dev_warn(p->dev, "bad SET_CONTEXT_REG "
"0x%04X\n", reg);
@@ -1410,7 +1271,7 @@ static int r600_cs_check_reg(struct radeon_cs_parser *p, u32 reg, u32 idx)
track->cb_dirty = true;
break;
case DB_DEPTH_BASE:
- r = r600_cs_packet_next_reloc(p, &reloc);
+ r = radeon_cs_packet_next_reloc(p, &reloc, r600_nomm);
if (r) {
dev_warn(p->dev, "bad SET_CONTEXT_REG "
"0x%04X\n", reg);
@@ -1423,7 +1284,7 @@ static int r600_cs_check_reg(struct radeon_cs_parser *p, u32 reg, u32 idx)
track->db_dirty = true;
break;
case DB_HTILE_DATA_BASE:
- r = r600_cs_packet_next_reloc(p, &reloc);
+ r = radeon_cs_packet_next_reloc(p, &reloc, r600_nomm);
if (r) {
dev_warn(p->dev, "bad SET_CONTEXT_REG "
"0x%04X\n", reg);
@@ -1493,7 +1354,7 @@ static int r600_cs_check_reg(struct radeon_cs_parser *p, u32 reg, u32 idx)
case SQ_ALU_CONST_CACHE_VS_13:
case SQ_ALU_CONST_CACHE_VS_14:
case SQ_ALU_CONST_CACHE_VS_15:
- r = r600_cs_packet_next_reloc(p, &reloc);
+ r = radeon_cs_packet_next_reloc(p, &reloc, r600_nomm);
if (r) {
dev_warn(p->dev, "bad SET_CONTEXT_REG "
"0x%04X\n", reg);
@@ -1502,7 +1363,7 @@ static int r600_cs_check_reg(struct radeon_cs_parser *p, u32 reg, u32 idx)
ib[idx] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff);
break;
case SX_MEMORY_EXPORT_BASE:
- r = r600_cs_packet_next_reloc(p, &reloc);
+ r = radeon_cs_packet_next_reloc(p, &reloc, r600_nomm);
if (r) {
dev_warn(p->dev, "bad SET_CONFIG_REG "
"0x%04X\n", reg);
@@ -1788,7 +1649,7 @@ static int r600_packet3_check(struct radeon_cs_parser *p,
return -EINVAL;
}
- r = r600_cs_packet_next_reloc(p, &reloc);
+ r = radeon_cs_packet_next_reloc(p, &reloc, r600_nomm);
if (r) {
DRM_ERROR("bad SET PREDICATION\n");
return -EINVAL;
@@ -1829,7 +1690,7 @@ static int r600_packet3_check(struct radeon_cs_parser *p,
DRM_ERROR("bad DRAW_INDEX\n");
return -EINVAL;
}
- r = r600_cs_packet_next_reloc(p, &reloc);
+ r = radeon_cs_packet_next_reloc(p, &reloc, r600_nomm);
if (r) {
DRM_ERROR("bad DRAW_INDEX\n");
return -EINVAL;
@@ -1881,7 +1742,7 @@ static int r600_packet3_check(struct radeon_cs_parser *p,
if (idx_value & 0x10) {
uint64_t offset;
- r = r600_cs_packet_next_reloc(p, &reloc);
+ r = radeon_cs_packet_next_reloc(p, &reloc, r600_nomm);
if (r) {
DRM_ERROR("bad WAIT_REG_MEM\n");
return -EINVAL;
@@ -1893,6 +1754,9 @@ static int r600_packet3_check(struct radeon_cs_parser *p,
ib[idx+1] = (ib[idx+1] & 0x3) | (offset & 0xfffffff0);
ib[idx+2] = upper_32_bits(offset) & 0xff;
+ } else if (idx_value & 0x100) {
+ DRM_ERROR("cannot use PFP on REG wait\n");
+ return -EINVAL;
}
break;
case PACKET3_CP_DMA:
@@ -1915,7 +1779,7 @@ static int r600_packet3_check(struct radeon_cs_parser *p,
return -EINVAL;
}
/* src address space is memory */
- r = r600_cs_packet_next_reloc(p, &reloc);
+ r = radeon_cs_packet_next_reloc(p, &reloc, r600_nomm);
if (r) {
DRM_ERROR("bad CP DMA SRC\n");
return -EINVAL;
@@ -1945,7 +1809,7 @@ static int r600_packet3_check(struct radeon_cs_parser *p,
DRM_ERROR("CP DMA DAIC only supported for registers\n");
return -EINVAL;
}
- r = r600_cs_packet_next_reloc(p, &reloc);
+ r = radeon_cs_packet_next_reloc(p, &reloc, r600_nomm);
if (r) {
DRM_ERROR("bad CP DMA DST\n");
return -EINVAL;
@@ -1975,7 +1839,7 @@ static int r600_packet3_check(struct radeon_cs_parser *p,
/* 0xffffffff/0x0 is flush all cache flag */
if (radeon_get_ib_value(p, idx + 1) != 0xffffffff ||
radeon_get_ib_value(p, idx + 2) != 0) {
- r = r600_cs_packet_next_reloc(p, &reloc);
+ r = radeon_cs_packet_next_reloc(p, &reloc, r600_nomm);
if (r) {
DRM_ERROR("bad SURFACE_SYNC\n");
return -EINVAL;
@@ -1991,7 +1855,7 @@ static int r600_packet3_check(struct radeon_cs_parser *p,
if (pkt->count) {
uint64_t offset;
- r = r600_cs_packet_next_reloc(p, &reloc);
+ r = radeon_cs_packet_next_reloc(p, &reloc, r600_nomm);
if (r) {
DRM_ERROR("bad EVENT_WRITE\n");
return -EINVAL;
@@ -2012,7 +1876,7 @@ static int r600_packet3_check(struct radeon_cs_parser *p,
DRM_ERROR("bad EVENT_WRITE_EOP\n");
return -EINVAL;
}
- r = r600_cs_packet_next_reloc(p, &reloc);
+ r = radeon_cs_packet_next_reloc(p, &reloc, r600_nomm);
if (r) {
DRM_ERROR("bad EVENT_WRITE\n");
return -EINVAL;
@@ -2078,7 +1942,7 @@ static int r600_packet3_check(struct radeon_cs_parser *p,
switch (G__SQ_VTX_CONSTANT_TYPE(radeon_get_ib_value(p, idx+(i*7)+6+1))) {
case SQ_TEX_VTX_VALID_TEXTURE:
/* tex base */
- r = r600_cs_packet_next_reloc(p, &reloc);
+ r = radeon_cs_packet_next_reloc(p, &reloc, r600_nomm);
if (r) {
DRM_ERROR("bad SET_RESOURCE\n");
return -EINVAL;
@@ -2092,7 +1956,7 @@ static int r600_packet3_check(struct radeon_cs_parser *p,
}
texture = reloc->robj;
/* tex mip base */
- r = r600_cs_packet_next_reloc(p, &reloc);
+ r = radeon_cs_packet_next_reloc(p, &reloc, r600_nomm);
if (r) {
DRM_ERROR("bad SET_RESOURCE\n");
return -EINVAL;
@@ -2113,7 +1977,7 @@ static int r600_packet3_check(struct radeon_cs_parser *p,
{
uint64_t offset64;
/* vtx base */
- r = r600_cs_packet_next_reloc(p, &reloc);
+ r = radeon_cs_packet_next_reloc(p, &reloc, r600_nomm);
if (r) {
DRM_ERROR("bad SET_RESOURCE\n");
return -EINVAL;
@@ -2214,7 +2078,7 @@ static int r600_packet3_check(struct radeon_cs_parser *p,
{
u64 offset;
- r = r600_cs_packet_next_reloc(p, &reloc);
+ r = radeon_cs_packet_next_reloc(p, &reloc, r600_nomm);
if (r) {
DRM_ERROR("bad STRMOUT_BASE_UPDATE reloc\n");
return -EINVAL;
@@ -2258,7 +2122,7 @@ static int r600_packet3_check(struct radeon_cs_parser *p,
/* Updating memory at DST_ADDRESS. */
if (idx_value & 0x1) {
u64 offset;
- r = r600_cs_packet_next_reloc(p, &reloc);
+ r = radeon_cs_packet_next_reloc(p, &reloc, r600_nomm);
if (r) {
DRM_ERROR("bad STRMOUT_BUFFER_UPDATE (missing dst reloc)\n");
return -EINVAL;
@@ -2277,7 +2141,7 @@ static int r600_packet3_check(struct radeon_cs_parser *p,
/* Reading data from SRC_ADDRESS. */
if (((idx_value >> 1) & 0x3) == 2) {
u64 offset;
- r = r600_cs_packet_next_reloc(p, &reloc);
+ r = radeon_cs_packet_next_reloc(p, &reloc, r600_nomm);
if (r) {
DRM_ERROR("bad STRMOUT_BUFFER_UPDATE (missing src reloc)\n");
return -EINVAL;
@@ -2302,7 +2166,7 @@ static int r600_packet3_check(struct radeon_cs_parser *p,
DRM_ERROR("bad MEM_WRITE (invalid count)\n");
return -EINVAL;
}
- r = r600_cs_packet_next_reloc(p, &reloc);
+ r = radeon_cs_packet_next_reloc(p, &reloc, r600_nomm);
if (r) {
DRM_ERROR("bad MEM_WRITE (missing reloc)\n");
return -EINVAL;
@@ -2331,7 +2195,7 @@ static int r600_packet3_check(struct radeon_cs_parser *p,
if (idx_value & 0x1) {
u64 offset;
/* SRC is memory. */
- r = r600_cs_packet_next_reloc(p, &reloc);
+ r = radeon_cs_packet_next_reloc(p, &reloc, r600_nomm);
if (r) {
DRM_ERROR("bad COPY_DW (missing src reloc)\n");
return -EINVAL;
@@ -2355,7 +2219,7 @@ static int r600_packet3_check(struct radeon_cs_parser *p,
if (idx_value & 0x2) {
u64 offset;
/* DST is memory. */
- r = r600_cs_packet_next_reloc(p, &reloc);
+ r = radeon_cs_packet_next_reloc(p, &reloc, r600_nomm);
if (r) {
DRM_ERROR("bad COPY_DW (missing dst reloc)\n");
return -EINVAL;
@@ -2410,7 +2274,7 @@ int r600_cs_parse(struct radeon_cs_parser *p)
p->track = track;
}
do {
- r = r600_cs_packet_parse(p, &pkt, p->idx);
+ r = radeon_cs_packet_parse(p, &pkt, p->idx);
if (r) {
kfree(p->track);
p->track = NULL;
@@ -2418,12 +2282,12 @@ int r600_cs_parse(struct radeon_cs_parser *p)
}
p->idx += pkt.count + 2;
switch (pkt.type) {
- case PACKET_TYPE0:
+ case RADEON_PACKET_TYPE0:
r = r600_cs_parse_packet0(p, &pkt);
break;
- case PACKET_TYPE2:
+ case RADEON_PACKET_TYPE2:
break;
- case PACKET_TYPE3:
+ case RADEON_PACKET_TYPE3:
r = r600_packet3_check(p, &pkt);
break;
default:
@@ -2449,17 +2313,7 @@ int r600_cs_parse(struct radeon_cs_parser *p)
return 0;
}
-static int r600_cs_parser_relocs_legacy(struct radeon_cs_parser *p)
-{
- if (p->chunk_relocs_idx == -1) {
- return 0;
- }
- p->relocs = kzalloc(sizeof(struct radeon_cs_reloc), GFP_KERNEL);
- if (p->relocs == NULL) {
- return -ENOMEM;
- }
- return 0;
-}
+#ifdef CONFIG_DRM_RADEON_UMS
/**
* cs_parser_fini() - clean parser states
@@ -2485,6 +2339,18 @@ static void r600_cs_parser_fini(struct radeon_cs_parser *parser, int error)
kfree(parser->chunks_array);
}
+static int r600_cs_parser_relocs_legacy(struct radeon_cs_parser *p)
+{
+ if (p->chunk_relocs_idx == -1) {
+ return 0;
+ }
+ p->relocs = kzalloc(sizeof(struct radeon_cs_reloc), GFP_KERNEL);
+ if (p->relocs == NULL) {
+ return -ENOMEM;
+ }
+ return 0;
+}
+
int r600_cs_legacy(struct drm_device *dev, void *data, struct drm_file *filp,
unsigned family, u32 *ib, int *l)
{
@@ -2543,9 +2409,11 @@ int r600_cs_legacy(struct drm_device *dev, void *data, struct drm_file *filp,
void r600_cs_legacy_init(void)
{
- r600_cs_packet_next_reloc = &r600_cs_packet_next_reloc_nomm;
+ r600_nomm = 1;
}
+#endif
+
/*
* DMA
*/
diff --git a/drivers/gpu/drm/radeon/r600_hdmi.c b/drivers/gpu/drm/radeon/r600_hdmi.c
index ff80efe9cb7d..21ecc0e12dc4 100644
--- a/drivers/gpu/drm/radeon/r600_hdmi.c
+++ b/drivers/gpu/drm/radeon/r600_hdmi.c
@@ -23,6 +23,7 @@
*
* Authors: Christian König
*/
+#include <linux/hdmi.h>
#include <drm/drmP.h>
#include <drm/radeon_drm.h>
#include "radeon.h"
@@ -121,79 +122,18 @@ static void r600_hdmi_update_ACR(struct drm_encoder *encoder, uint32_t clock)
}
/*
- * calculate the crc for a given info frame
- */
-static void r600_hdmi_infoframe_checksum(uint8_t packetType,
- uint8_t versionNumber,
- uint8_t length,
- uint8_t *frame)
-{
- int i;
- frame[0] = packetType + versionNumber + length;
- for (i = 1; i <= length; i++)
- frame[0] += frame[i];
- frame[0] = 0x100 - frame[0];
-}
-
-/*
* build a HDMI Video Info Frame
*/
-static void r600_hdmi_videoinfoframe(
- struct drm_encoder *encoder,
- enum r600_hdmi_color_format color_format,
- int active_information_present,
- uint8_t active_format_aspect_ratio,
- uint8_t scan_information,
- uint8_t colorimetry,
- uint8_t ex_colorimetry,
- uint8_t quantization,
- int ITC,
- uint8_t picture_aspect_ratio,
- uint8_t video_format_identification,
- uint8_t pixel_repetition,
- uint8_t non_uniform_picture_scaling,
- uint8_t bar_info_data_valid,
- uint16_t top_bar,
- uint16_t bottom_bar,
- uint16_t left_bar,
- uint16_t right_bar
-)
+static void r600_hdmi_update_avi_infoframe(struct drm_encoder *encoder,
+ void *buffer, size_t size)
{
struct drm_device *dev = encoder->dev;
struct radeon_device *rdev = dev->dev_private;
struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv;
uint32_t offset = dig->afmt->offset;
+ uint8_t *frame = buffer + 3;
- uint8_t frame[14];
-
- frame[0x0] = 0;
- frame[0x1] =
- (scan_information & 0x3) |
- ((bar_info_data_valid & 0x3) << 2) |
- ((active_information_present & 0x1) << 4) |
- ((color_format & 0x3) << 5);
- frame[0x2] =
- (active_format_aspect_ratio & 0xF) |
- ((picture_aspect_ratio & 0x3) << 4) |
- ((colorimetry & 0x3) << 6);
- frame[0x3] =
- (non_uniform_picture_scaling & 0x3) |
- ((quantization & 0x3) << 2) |
- ((ex_colorimetry & 0x7) << 4) |
- ((ITC & 0x1) << 7);
- frame[0x4] = (video_format_identification & 0x7F);
- frame[0x5] = (pixel_repetition & 0xF);
- frame[0x6] = (top_bar & 0xFF);
- frame[0x7] = (top_bar >> 8);
- frame[0x8] = (bottom_bar & 0xFF);
- frame[0x9] = (bottom_bar >> 8);
- frame[0xA] = (left_bar & 0xFF);
- frame[0xB] = (left_bar >> 8);
- frame[0xC] = (right_bar & 0xFF);
- frame[0xD] = (right_bar >> 8);
-
- r600_hdmi_infoframe_checksum(0x82, 0x02, 0x0D, frame);
/* Our header values (type, version, length) should be alright, Intel
* is using the same. Checksum function also seems to be OK, it works
* fine for audio infoframe. However calculated value is always lower
@@ -215,39 +155,15 @@ static void r600_hdmi_videoinfoframe(
/*
* build a Audio Info Frame
*/
-static void r600_hdmi_audioinfoframe(
- struct drm_encoder *encoder,
- uint8_t channel_count,
- uint8_t coding_type,
- uint8_t sample_size,
- uint8_t sample_frequency,
- uint8_t format,
- uint8_t channel_allocation,
- uint8_t level_shift,
- int downmix_inhibit
-)
+static void r600_hdmi_update_audio_infoframe(struct drm_encoder *encoder,
+ const void *buffer, size_t size)
{
struct drm_device *dev = encoder->dev;
struct radeon_device *rdev = dev->dev_private;
struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv;
uint32_t offset = dig->afmt->offset;
-
- uint8_t frame[11];
-
- frame[0x0] = 0;
- frame[0x1] = (channel_count & 0x7) | ((coding_type & 0xF) << 4);
- frame[0x2] = (sample_size & 0x3) | ((sample_frequency & 0x7) << 2);
- frame[0x3] = format;
- frame[0x4] = channel_allocation;
- frame[0x5] = ((level_shift & 0xF) << 3) | ((downmix_inhibit & 0x1) << 7);
- frame[0x6] = 0;
- frame[0x7] = 0;
- frame[0x8] = 0;
- frame[0x9] = 0;
- frame[0xA] = 0;
-
- r600_hdmi_infoframe_checksum(0x84, 0x01, 0x0A, frame);
+ const u8 *frame = buffer + 3;
WREG32(HDMI0_AUDIO_INFO0 + offset,
frame[0x0] | (frame[0x1] << 8) | (frame[0x2] << 16) | (frame[0x3] << 24));
@@ -320,7 +236,10 @@ void r600_hdmi_setmode(struct drm_encoder *encoder, struct drm_display_mode *mod
struct radeon_device *rdev = dev->dev_private;
struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv;
+ u8 buffer[HDMI_INFOFRAME_HEADER_SIZE + HDMI_AVI_INFOFRAME_SIZE];
+ struct hdmi_avi_infoframe frame;
uint32_t offset;
+ ssize_t err;
/* Silent, r600_hdmi_enable will raise WARN for us */
if (!dig->afmt->enabled)
@@ -371,9 +290,19 @@ void r600_hdmi_setmode(struct drm_encoder *encoder, struct drm_display_mode *mod
WREG32(HDMI0_GC + offset, 0); /* unset HDMI0_GC_AVMUTE */
- r600_hdmi_videoinfoframe(encoder, RGB, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0);
+ err = drm_hdmi_avi_infoframe_from_display_mode(&frame, mode);
+ if (err < 0) {
+ DRM_ERROR("failed to setup AVI infoframe: %zd\n", err);
+ return;
+ }
+ err = hdmi_avi_infoframe_pack(&frame, buffer, sizeof(buffer));
+ if (err < 0) {
+ DRM_ERROR("failed to pack AVI infoframe: %zd\n", err);
+ return;
+ }
+
+ r600_hdmi_update_avi_infoframe(encoder, buffer, sizeof(buffer));
r600_hdmi_update_ACR(encoder, mode->clock);
/* it's unknown what these bits do excatly, but it's indeed quite useful for debugging */
@@ -395,8 +324,11 @@ void r600_hdmi_update_audio_settings(struct drm_encoder *encoder)
struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv;
struct r600_audio audio = r600_audio_status(rdev);
+ uint8_t buffer[HDMI_INFOFRAME_HEADER_SIZE + HDMI_AUDIO_INFOFRAME_SIZE];
+ struct hdmi_audio_infoframe frame;
uint32_t offset;
uint32_t iec;
+ ssize_t err;
if (!dig->afmt || !dig->afmt->enabled)
return;
@@ -462,9 +394,21 @@ void r600_hdmi_update_audio_settings(struct drm_encoder *encoder)
iec |= 0x5 << 16;
WREG32_P(HDMI0_60958_1 + offset, iec, ~0x5000f);
- r600_hdmi_audioinfoframe(encoder, audio.channels - 1, 0, 0, 0, 0, 0, 0,
- 0);
+ err = hdmi_audio_infoframe_init(&frame);
+ if (err < 0) {
+ DRM_ERROR("failed to setup audio infoframe\n");
+ return;
+ }
+
+ frame.channels = audio.channels;
+
+ err = hdmi_audio_infoframe_pack(&frame, buffer, sizeof(buffer));
+ if (err < 0) {
+ DRM_ERROR("failed to pack audio infoframe\n");
+ return;
+ }
+ r600_hdmi_update_audio_infoframe(encoder, buffer, sizeof(buffer));
r600_hdmi_audio_workaround(encoder);
}
@@ -544,7 +488,6 @@ void r600_hdmi_disable(struct drm_encoder *encoder)
/* Called for ATOM_ENCODER_MODE_HDMI only */
if (!dig || !dig->afmt) {
- WARN_ON(1);
return;
}
if (!dig->afmt->enabled)
diff --git a/drivers/gpu/drm/radeon/r600d.h b/drivers/gpu/drm/radeon/r600d.h
index 4a53402b1852..a42ba11a3bed 100644
--- a/drivers/gpu/drm/radeon/r600d.h
+++ b/drivers/gpu/drm/radeon/r600d.h
@@ -182,6 +182,8 @@
#define CP_COHER_BASE 0x85F8
#define CP_DEBUG 0xC1FC
#define R_0086D8_CP_ME_CNTL 0x86D8
+#define S_0086D8_CP_PFP_HALT(x) (((x) & 1)<<26)
+#define C_0086D8_CP_PFP_HALT(x) ((x) & 0xFBFFFFFF)
#define S_0086D8_CP_ME_HALT(x) (((x) & 1)<<28)
#define C_0086D8_CP_ME_HALT(x) ((x) & 0xEFFFFFFF)
#define CP_ME_RAM_DATA 0xC160
@@ -1143,19 +1145,10 @@
/*
* PM4
*/
-#define PACKET_TYPE0 0
-#define PACKET_TYPE1 1
-#define PACKET_TYPE2 2
-#define PACKET_TYPE3 3
-
-#define CP_PACKET_GET_TYPE(h) (((h) >> 30) & 3)
-#define CP_PACKET_GET_COUNT(h) (((h) >> 16) & 0x3FFF)
-#define CP_PACKET0_GET_REG(h) (((h) & 0xFFFF) << 2)
-#define CP_PACKET3_GET_OPCODE(h) (((h) >> 8) & 0xFF)
-#define PACKET0(reg, n) ((PACKET_TYPE0 << 30) | \
+#define PACKET0(reg, n) ((RADEON_PACKET_TYPE0 << 30) | \
(((reg) >> 2) & 0xFFFF) | \
((n) & 0x3FFF) << 16)
-#define PACKET3(op, n) ((PACKET_TYPE3 << 30) | \
+#define PACKET3(op, n) ((RADEON_PACKET_TYPE3 << 30) | \
(((op) & 0xFF) << 8) | \
((n) & 0x3FFF) << 16)
@@ -1328,6 +1321,7 @@
#define G_008010_VC_BUSY(x) (((x) >> 11) & 1)
#define G_008010_DB03_CLEAN(x) (((x) >> 12) & 1)
#define G_008010_CB03_CLEAN(x) (((x) >> 13) & 1)
+#define G_008010_TA_BUSY(x) (((x) >> 14) & 1)
#define G_008010_VGT_BUSY_NO_DMA(x) (((x) >> 16) & 1)
#define G_008010_VGT_BUSY(x) (((x) >> 17) & 1)
#define G_008010_TA03_BUSY(x) (((x) >> 18) & 1)
@@ -1395,6 +1389,7 @@
#define G_000E50_MCDW_BUSY(x) (((x) >> 13) & 1)
#define G_000E50_SEM_BUSY(x) (((x) >> 14) & 1)
#define G_000E50_RLC_BUSY(x) (((x) >> 15) & 1)
+#define G_000E50_IH_BUSY(x) (((x) >> 17) & 1)
#define G_000E50_BIF_BUSY(x) (((x) >> 29) & 1)
#define R_000E60_SRBM_SOFT_RESET 0x0E60
#define S_000E60_SOFT_RESET_BIF(x) (((x) & 1) << 1)
diff --git a/drivers/gpu/drm/radeon/radeon.h b/drivers/gpu/drm/radeon/radeon.h
index a08f657329a0..8263af3fd832 100644
--- a/drivers/gpu/drm/radeon/radeon.h
+++ b/drivers/gpu/drm/radeon/radeon.h
@@ -136,6 +136,15 @@ extern int radeon_lockup_timeout;
#define RADEON_RESET_GFX (1 << 0)
#define RADEON_RESET_COMPUTE (1 << 1)
#define RADEON_RESET_DMA (1 << 2)
+#define RADEON_RESET_CP (1 << 3)
+#define RADEON_RESET_GRBM (1 << 4)
+#define RADEON_RESET_DMA1 (1 << 5)
+#define RADEON_RESET_RLC (1 << 6)
+#define RADEON_RESET_SEM (1 << 7)
+#define RADEON_RESET_IH (1 << 8)
+#define RADEON_RESET_VMC (1 << 9)
+#define RADEON_RESET_MC (1 << 10)
+#define RADEON_RESET_DISPLAY (1 << 11)
/*
* Errata workarounds.
@@ -341,7 +350,6 @@ struct radeon_bo {
struct drm_gem_object gem_base;
struct ttm_bo_kmap_obj dma_buf_vmap;
- int vmapping_count;
};
#define gem_to_radeon_bo(gobj) container_of((gobj), struct radeon_bo, gem_base)
@@ -771,6 +779,7 @@ int radeon_ib_get(struct radeon_device *rdev, int ring,
struct radeon_ib *ib, struct radeon_vm *vm,
unsigned size);
void radeon_ib_free(struct radeon_device *rdev, struct radeon_ib *ib);
+void radeon_ib_sync_to(struct radeon_ib *ib, struct radeon_fence *fence);
int radeon_ib_schedule(struct radeon_device *rdev, struct radeon_ib *ib,
struct radeon_ib *const_ib);
int radeon_ib_pool_init(struct radeon_device *rdev);
@@ -1169,6 +1178,10 @@ struct radeon_asic {
bool (*gui_idle)(struct radeon_device *rdev);
/* wait for mc_idle */
int (*mc_wait_for_idle)(struct radeon_device *rdev);
+ /* get the reference clock */
+ u32 (*get_xclk)(struct radeon_device *rdev);
+ /* get the gpu clock counter */
+ uint64_t (*get_gpu_clock_counter)(struct radeon_device *rdev);
/* gart */
struct {
void (*tlb_flush)(struct radeon_device *rdev);
@@ -1179,7 +1192,9 @@ struct radeon_asic {
void (*fini)(struct radeon_device *rdev);
u32 pt_ring_index;
- void (*set_page)(struct radeon_device *rdev, uint64_t pe,
+ void (*set_page)(struct radeon_device *rdev,
+ struct radeon_ib *ib,
+ uint64_t pe,
uint64_t addr, unsigned count,
uint32_t incr, uint32_t flags);
} vm;
@@ -1757,6 +1772,7 @@ void r100_pll_errata_after_index(struct radeon_device *rdev);
#define ASIC_IS_DCE6(rdev) ((rdev->family >= CHIP_ARUBA))
#define ASIC_IS_DCE61(rdev) ((rdev->family >= CHIP_ARUBA) && \
(rdev->flags & RADEON_IS_IGP))
+#define ASIC_IS_DCE64(rdev) ((rdev->family == CHIP_OLAND))
/*
* BIOS helpers.
@@ -1801,7 +1817,7 @@ void radeon_ring_write(struct radeon_ring *ring, uint32_t v);
#define radeon_gart_set_page(rdev, i, p) (rdev)->asic->gart.set_page((rdev), (i), (p))
#define radeon_asic_vm_init(rdev) (rdev)->asic->vm.init((rdev))
#define radeon_asic_vm_fini(rdev) (rdev)->asic->vm.fini((rdev))
-#define radeon_asic_vm_set_page(rdev, pe, addr, count, incr, flags) ((rdev)->asic->vm.set_page((rdev), (pe), (addr), (count), (incr), (flags)))
+#define radeon_asic_vm_set_page(rdev, ib, pe, addr, count, incr, flags) ((rdev)->asic->vm.set_page((rdev), (ib), (pe), (addr), (count), (incr), (flags)))
#define radeon_ring_start(rdev, r, cp) (rdev)->asic->ring[(r)].ring_start((rdev), (cp))
#define radeon_ring_test(rdev, r, cp) (rdev)->asic->ring[(r)].ring_test((rdev), (cp))
#define radeon_ib_test(rdev, r, cp) (rdev)->asic->ring[(r)].ib_test((rdev), (cp))
@@ -1847,10 +1863,13 @@ void radeon_ring_write(struct radeon_ring *ring, uint32_t v);
#define radeon_post_page_flip(rdev, crtc) (rdev)->asic->pflip.post_page_flip((rdev), (crtc))
#define radeon_wait_for_vblank(rdev, crtc) (rdev)->asic->display.wait_for_vblank((rdev), (crtc))
#define radeon_mc_wait_for_idle(rdev) (rdev)->asic->mc_wait_for_idle((rdev))
+#define radeon_get_xclk(rdev) (rdev)->asic->get_xclk((rdev))
+#define radeon_get_gpu_clock_counter(rdev) (rdev)->asic->get_gpu_clock_counter((rdev))
/* Common functions */
/* AGP */
extern int radeon_gpu_reset(struct radeon_device *rdev);
+extern void r600_set_bios_scratch_engine_hung(struct radeon_device *rdev, bool hung);
extern void radeon_agp_disable(struct radeon_device *rdev);
extern int radeon_modeset_init(struct radeon_device *rdev);
extern void radeon_modeset_fini(struct radeon_device *rdev);
@@ -1972,6 +1991,19 @@ static inline int radeon_acpi_init(struct radeon_device *rdev) { return 0; }
static inline void radeon_acpi_fini(struct radeon_device *rdev) { }
#endif
+int radeon_cs_packet_parse(struct radeon_cs_parser *p,
+ struct radeon_cs_packet *pkt,
+ unsigned idx);
+bool radeon_cs_packet_next_is_pkt3_nop(struct radeon_cs_parser *p);
+void radeon_cs_dump_packet(struct radeon_cs_parser *p,
+ struct radeon_cs_packet *pkt);
+int radeon_cs_packet_next_reloc(struct radeon_cs_parser *p,
+ struct radeon_cs_reloc **cs_reloc,
+ int nomm);
+int r600_cs_common_vline_parse(struct radeon_cs_parser *p,
+ uint32_t *vline_start_end,
+ uint32_t *vline_status);
+
#include "radeon_object.h"
#endif
diff --git a/drivers/gpu/drm/radeon/radeon_asic.c b/drivers/gpu/drm/radeon/radeon_asic.c
index 0b202c07fe50..aba0a893ea98 100644
--- a/drivers/gpu/drm/radeon/radeon_asic.c
+++ b/drivers/gpu/drm/radeon/radeon_asic.c
@@ -934,6 +934,8 @@ static struct radeon_asic r600_asic = {
.ioctl_wait_idle = r600_ioctl_wait_idle,
.gui_idle = &r600_gui_idle,
.mc_wait_for_idle = &r600_mc_wait_for_idle,
+ .get_xclk = &r600_get_xclk,
+ .get_gpu_clock_counter = &r600_get_gpu_clock_counter,
.gart = {
.tlb_flush = &r600_pcie_gart_tlb_flush,
.set_page = &rs600_gart_set_page,
@@ -946,7 +948,7 @@ static struct radeon_asic r600_asic = {
.cs_parse = &r600_cs_parse,
.ring_test = &r600_ring_test,
.ib_test = &r600_ib_test,
- .is_lockup = &r600_gpu_is_lockup,
+ .is_lockup = &r600_gfx_is_lockup,
},
[R600_RING_TYPE_DMA_INDEX] = {
.ib_execute = &r600_dma_ring_ib_execute,
@@ -1018,6 +1020,8 @@ static struct radeon_asic rs780_asic = {
.ioctl_wait_idle = r600_ioctl_wait_idle,
.gui_idle = &r600_gui_idle,
.mc_wait_for_idle = &r600_mc_wait_for_idle,
+ .get_xclk = &r600_get_xclk,
+ .get_gpu_clock_counter = &r600_get_gpu_clock_counter,
.gart = {
.tlb_flush = &r600_pcie_gart_tlb_flush,
.set_page = &rs600_gart_set_page,
@@ -1030,7 +1034,7 @@ static struct radeon_asic rs780_asic = {
.cs_parse = &r600_cs_parse,
.ring_test = &r600_ring_test,
.ib_test = &r600_ib_test,
- .is_lockup = &r600_gpu_is_lockup,
+ .is_lockup = &r600_gfx_is_lockup,
},
[R600_RING_TYPE_DMA_INDEX] = {
.ib_execute = &r600_dma_ring_ib_execute,
@@ -1102,6 +1106,8 @@ static struct radeon_asic rv770_asic = {
.ioctl_wait_idle = r600_ioctl_wait_idle,
.gui_idle = &r600_gui_idle,
.mc_wait_for_idle = &r600_mc_wait_for_idle,
+ .get_xclk = &rv770_get_xclk,
+ .get_gpu_clock_counter = &r600_get_gpu_clock_counter,
.gart = {
.tlb_flush = &r600_pcie_gart_tlb_flush,
.set_page = &rs600_gart_set_page,
@@ -1114,7 +1120,7 @@ static struct radeon_asic rv770_asic = {
.cs_parse = &r600_cs_parse,
.ring_test = &r600_ring_test,
.ib_test = &r600_ib_test,
- .is_lockup = &r600_gpu_is_lockup,
+ .is_lockup = &r600_gfx_is_lockup,
},
[R600_RING_TYPE_DMA_INDEX] = {
.ib_execute = &r600_dma_ring_ib_execute,
@@ -1186,6 +1192,8 @@ static struct radeon_asic evergreen_asic = {
.ioctl_wait_idle = r600_ioctl_wait_idle,
.gui_idle = &r600_gui_idle,
.mc_wait_for_idle = &evergreen_mc_wait_for_idle,
+ .get_xclk = &rv770_get_xclk,
+ .get_gpu_clock_counter = &r600_get_gpu_clock_counter,
.gart = {
.tlb_flush = &evergreen_pcie_gart_tlb_flush,
.set_page = &rs600_gart_set_page,
@@ -1198,7 +1206,7 @@ static struct radeon_asic evergreen_asic = {
.cs_parse = &evergreen_cs_parse,
.ring_test = &r600_ring_test,
.ib_test = &r600_ib_test,
- .is_lockup = &evergreen_gpu_is_lockup,
+ .is_lockup = &evergreen_gfx_is_lockup,
},
[R600_RING_TYPE_DMA_INDEX] = {
.ib_execute = &evergreen_dma_ring_ib_execute,
@@ -1207,7 +1215,7 @@ static struct radeon_asic evergreen_asic = {
.cs_parse = &evergreen_dma_cs_parse,
.ring_test = &r600_dma_ring_test,
.ib_test = &r600_dma_ib_test,
- .is_lockup = &r600_dma_is_lockup,
+ .is_lockup = &evergreen_dma_is_lockup,
}
},
.irq = {
@@ -1270,6 +1278,8 @@ static struct radeon_asic sumo_asic = {
.ioctl_wait_idle = r600_ioctl_wait_idle,
.gui_idle = &r600_gui_idle,
.mc_wait_for_idle = &evergreen_mc_wait_for_idle,
+ .get_xclk = &r600_get_xclk,
+ .get_gpu_clock_counter = &r600_get_gpu_clock_counter,
.gart = {
.tlb_flush = &evergreen_pcie_gart_tlb_flush,
.set_page = &rs600_gart_set_page,
@@ -1282,7 +1292,7 @@ static struct radeon_asic sumo_asic = {
.cs_parse = &evergreen_cs_parse,
.ring_test = &r600_ring_test,
.ib_test = &r600_ib_test,
- .is_lockup = &evergreen_gpu_is_lockup,
+ .is_lockup = &evergreen_gfx_is_lockup,
},
[R600_RING_TYPE_DMA_INDEX] = {
.ib_execute = &evergreen_dma_ring_ib_execute,
@@ -1291,7 +1301,7 @@ static struct radeon_asic sumo_asic = {
.cs_parse = &evergreen_dma_cs_parse,
.ring_test = &r600_dma_ring_test,
.ib_test = &r600_dma_ib_test,
- .is_lockup = &r600_dma_is_lockup,
+ .is_lockup = &evergreen_dma_is_lockup,
}
},
.irq = {
@@ -1354,6 +1364,8 @@ static struct radeon_asic btc_asic = {
.ioctl_wait_idle = r600_ioctl_wait_idle,
.gui_idle = &r600_gui_idle,
.mc_wait_for_idle = &evergreen_mc_wait_for_idle,
+ .get_xclk = &rv770_get_xclk,
+ .get_gpu_clock_counter = &r600_get_gpu_clock_counter,
.gart = {
.tlb_flush = &evergreen_pcie_gart_tlb_flush,
.set_page = &rs600_gart_set_page,
@@ -1366,7 +1378,7 @@ static struct radeon_asic btc_asic = {
.cs_parse = &evergreen_cs_parse,
.ring_test = &r600_ring_test,
.ib_test = &r600_ib_test,
- .is_lockup = &evergreen_gpu_is_lockup,
+ .is_lockup = &evergreen_gfx_is_lockup,
},
[R600_RING_TYPE_DMA_INDEX] = {
.ib_execute = &evergreen_dma_ring_ib_execute,
@@ -1375,7 +1387,7 @@ static struct radeon_asic btc_asic = {
.cs_parse = &evergreen_dma_cs_parse,
.ring_test = &r600_dma_ring_test,
.ib_test = &r600_dma_ib_test,
- .is_lockup = &r600_dma_is_lockup,
+ .is_lockup = &evergreen_dma_is_lockup,
}
},
.irq = {
@@ -1438,6 +1450,8 @@ static struct radeon_asic cayman_asic = {
.ioctl_wait_idle = r600_ioctl_wait_idle,
.gui_idle = &r600_gui_idle,
.mc_wait_for_idle = &evergreen_mc_wait_for_idle,
+ .get_xclk = &rv770_get_xclk,
+ .get_gpu_clock_counter = &r600_get_gpu_clock_counter,
.gart = {
.tlb_flush = &cayman_pcie_gart_tlb_flush,
.set_page = &rs600_gart_set_page,
@@ -1445,7 +1459,7 @@ static struct radeon_asic cayman_asic = {
.vm = {
.init = &cayman_vm_init,
.fini = &cayman_vm_fini,
- .pt_ring_index = RADEON_RING_TYPE_GFX_INDEX,
+ .pt_ring_index = R600_RING_TYPE_DMA_INDEX,
.set_page = &cayman_vm_set_page,
},
.ring = {
@@ -1457,7 +1471,7 @@ static struct radeon_asic cayman_asic = {
.cs_parse = &evergreen_cs_parse,
.ring_test = &r600_ring_test,
.ib_test = &r600_ib_test,
- .is_lockup = &evergreen_gpu_is_lockup,
+ .is_lockup = &cayman_gfx_is_lockup,
.vm_flush = &cayman_vm_flush,
},
[CAYMAN_RING_TYPE_CP1_INDEX] = {
@@ -1468,7 +1482,7 @@ static struct radeon_asic cayman_asic = {
.cs_parse = &evergreen_cs_parse,
.ring_test = &r600_ring_test,
.ib_test = &r600_ib_test,
- .is_lockup = &evergreen_gpu_is_lockup,
+ .is_lockup = &cayman_gfx_is_lockup,
.vm_flush = &cayman_vm_flush,
},
[CAYMAN_RING_TYPE_CP2_INDEX] = {
@@ -1479,7 +1493,7 @@ static struct radeon_asic cayman_asic = {
.cs_parse = &evergreen_cs_parse,
.ring_test = &r600_ring_test,
.ib_test = &r600_ib_test,
- .is_lockup = &evergreen_gpu_is_lockup,
+ .is_lockup = &cayman_gfx_is_lockup,
.vm_flush = &cayman_vm_flush,
},
[R600_RING_TYPE_DMA_INDEX] = {
@@ -1565,6 +1579,8 @@ static struct radeon_asic trinity_asic = {
.ioctl_wait_idle = r600_ioctl_wait_idle,
.gui_idle = &r600_gui_idle,
.mc_wait_for_idle = &evergreen_mc_wait_for_idle,
+ .get_xclk = &r600_get_xclk,
+ .get_gpu_clock_counter = &r600_get_gpu_clock_counter,
.gart = {
.tlb_flush = &cayman_pcie_gart_tlb_flush,
.set_page = &rs600_gart_set_page,
@@ -1572,7 +1588,7 @@ static struct radeon_asic trinity_asic = {
.vm = {
.init = &cayman_vm_init,
.fini = &cayman_vm_fini,
- .pt_ring_index = RADEON_RING_TYPE_GFX_INDEX,
+ .pt_ring_index = R600_RING_TYPE_DMA_INDEX,
.set_page = &cayman_vm_set_page,
},
.ring = {
@@ -1584,7 +1600,7 @@ static struct radeon_asic trinity_asic = {
.cs_parse = &evergreen_cs_parse,
.ring_test = &r600_ring_test,
.ib_test = &r600_ib_test,
- .is_lockup = &evergreen_gpu_is_lockup,
+ .is_lockup = &cayman_gfx_is_lockup,
.vm_flush = &cayman_vm_flush,
},
[CAYMAN_RING_TYPE_CP1_INDEX] = {
@@ -1595,7 +1611,7 @@ static struct radeon_asic trinity_asic = {
.cs_parse = &evergreen_cs_parse,
.ring_test = &r600_ring_test,
.ib_test = &r600_ib_test,
- .is_lockup = &evergreen_gpu_is_lockup,
+ .is_lockup = &cayman_gfx_is_lockup,
.vm_flush = &cayman_vm_flush,
},
[CAYMAN_RING_TYPE_CP2_INDEX] = {
@@ -1606,7 +1622,7 @@ static struct radeon_asic trinity_asic = {
.cs_parse = &evergreen_cs_parse,
.ring_test = &r600_ring_test,
.ib_test = &r600_ib_test,
- .is_lockup = &evergreen_gpu_is_lockup,
+ .is_lockup = &cayman_gfx_is_lockup,
.vm_flush = &cayman_vm_flush,
},
[R600_RING_TYPE_DMA_INDEX] = {
@@ -1692,6 +1708,8 @@ static struct radeon_asic si_asic = {
.ioctl_wait_idle = r600_ioctl_wait_idle,
.gui_idle = &r600_gui_idle,
.mc_wait_for_idle = &evergreen_mc_wait_for_idle,
+ .get_xclk = &si_get_xclk,
+ .get_gpu_clock_counter = &si_get_gpu_clock_counter,
.gart = {
.tlb_flush = &si_pcie_gart_tlb_flush,
.set_page = &rs600_gart_set_page,
@@ -1699,7 +1717,7 @@ static struct radeon_asic si_asic = {
.vm = {
.init = &si_vm_init,
.fini = &si_vm_fini,
- .pt_ring_index = RADEON_RING_TYPE_GFX_INDEX,
+ .pt_ring_index = R600_RING_TYPE_DMA_INDEX,
.set_page = &si_vm_set_page,
},
.ring = {
@@ -1711,7 +1729,7 @@ static struct radeon_asic si_asic = {
.cs_parse = NULL,
.ring_test = &r600_ring_test,
.ib_test = &r600_ib_test,
- .is_lockup = &si_gpu_is_lockup,
+ .is_lockup = &si_gfx_is_lockup,
.vm_flush = &si_vm_flush,
},
[CAYMAN_RING_TYPE_CP1_INDEX] = {
@@ -1722,7 +1740,7 @@ static struct radeon_asic si_asic = {
.cs_parse = NULL,
.ring_test = &r600_ring_test,
.ib_test = &r600_ib_test,
- .is_lockup = &si_gpu_is_lockup,
+ .is_lockup = &si_gfx_is_lockup,
.vm_flush = &si_vm_flush,
},
[CAYMAN_RING_TYPE_CP2_INDEX] = {
@@ -1733,7 +1751,7 @@ static struct radeon_asic si_asic = {
.cs_parse = NULL,
.ring_test = &r600_ring_test,
.ib_test = &r600_ib_test,
- .is_lockup = &si_gpu_is_lockup,
+ .is_lockup = &si_gfx_is_lockup,
.vm_flush = &si_vm_flush,
},
[R600_RING_TYPE_DMA_INDEX] = {
@@ -1744,7 +1762,7 @@ static struct radeon_asic si_asic = {
.cs_parse = NULL,
.ring_test = &r600_dma_ring_test,
.ib_test = &r600_dma_ib_test,
- .is_lockup = &cayman_dma_is_lockup,
+ .is_lockup = &si_dma_is_lockup,
.vm_flush = &si_dma_vm_flush,
},
[CAYMAN_RING_TYPE_DMA1_INDEX] = {
@@ -1755,7 +1773,7 @@ static struct radeon_asic si_asic = {
.cs_parse = NULL,
.ring_test = &r600_dma_ring_test,
.ib_test = &r600_dma_ib_test,
- .is_lockup = &cayman_dma_is_lockup,
+ .is_lockup = &si_dma_is_lockup,
.vm_flush = &si_dma_vm_flush,
}
},
@@ -1944,9 +1962,13 @@ int radeon_asic_init(struct radeon_device *rdev)
case CHIP_TAHITI:
case CHIP_PITCAIRN:
case CHIP_VERDE:
+ case CHIP_OLAND:
rdev->asic = &si_asic;
/* set num crtcs */
- rdev->num_crtc = 6;
+ if (rdev->family == CHIP_OLAND)
+ rdev->num_crtc = 2;
+ else
+ rdev->num_crtc = 6;
break;
default:
/* FIXME: not supported yet */
diff --git a/drivers/gpu/drm/radeon/radeon_asic.h b/drivers/gpu/drm/radeon/radeon_asic.h
index 15d70e613076..3535f73ad3e2 100644
--- a/drivers/gpu/drm/radeon/radeon_asic.h
+++ b/drivers/gpu/drm/radeon/radeon_asic.h
@@ -319,7 +319,7 @@ void r600_dma_semaphore_ring_emit(struct radeon_device *rdev,
bool emit_wait);
void r600_dma_ring_ib_execute(struct radeon_device *rdev, struct radeon_ib *ib);
bool r600_dma_is_lockup(struct radeon_device *rdev, struct radeon_ring *ring);
-bool r600_gpu_is_lockup(struct radeon_device *rdev, struct radeon_ring *cp);
+bool r600_gfx_is_lockup(struct radeon_device *rdev, struct radeon_ring *cp);
int r600_asic_reset(struct radeon_device *rdev);
int r600_set_surface_reg(struct radeon_device *rdev, int reg,
uint32_t tiling_flags, uint32_t pitch,
@@ -389,7 +389,8 @@ void r600_kms_blit_copy(struct radeon_device *rdev,
unsigned num_gpu_pages,
struct radeon_sa_bo *vb);
int r600_mc_wait_for_idle(struct radeon_device *rdev);
-uint64_t r600_get_gpu_clock(struct radeon_device *rdev);
+u32 r600_get_xclk(struct radeon_device *rdev);
+uint64_t r600_get_gpu_clock_counter(struct radeon_device *rdev);
/*
* rv770,rv730,rv710,rv740
@@ -407,6 +408,7 @@ int rv770_copy_dma(struct radeon_device *rdev,
uint64_t src_offset, uint64_t dst_offset,
unsigned num_gpu_pages,
struct radeon_fence **fence);
+u32 rv770_get_xclk(struct radeon_device *rdev);
/*
* evergreen
@@ -422,7 +424,8 @@ int evergreen_init(struct radeon_device *rdev);
void evergreen_fini(struct radeon_device *rdev);
int evergreen_suspend(struct radeon_device *rdev);
int evergreen_resume(struct radeon_device *rdev);
-bool evergreen_gpu_is_lockup(struct radeon_device *rdev, struct radeon_ring *cp);
+bool evergreen_gfx_is_lockup(struct radeon_device *rdev, struct radeon_ring *cp);
+bool evergreen_dma_is_lockup(struct radeon_device *rdev, struct radeon_ring *cp);
int evergreen_asic_reset(struct radeon_device *rdev);
void evergreen_bandwidth_update(struct radeon_device *rdev);
void evergreen_ring_ib_execute(struct radeon_device *rdev, struct radeon_ib *ib);
@@ -473,13 +476,16 @@ int cayman_vm_init(struct radeon_device *rdev);
void cayman_vm_fini(struct radeon_device *rdev);
void cayman_vm_flush(struct radeon_device *rdev, int ridx, struct radeon_vm *vm);
uint32_t cayman_vm_page_flags(struct radeon_device *rdev, uint32_t flags);
-void cayman_vm_set_page(struct radeon_device *rdev, uint64_t pe,
+void cayman_vm_set_page(struct radeon_device *rdev,
+ struct radeon_ib *ib,
+ uint64_t pe,
uint64_t addr, unsigned count,
uint32_t incr, uint32_t flags);
int evergreen_ib_parse(struct radeon_device *rdev, struct radeon_ib *ib);
int evergreen_dma_ib_parse(struct radeon_device *rdev, struct radeon_ib *ib);
void cayman_dma_ring_ib_execute(struct radeon_device *rdev,
struct radeon_ib *ib);
+bool cayman_gfx_is_lockup(struct radeon_device *rdev, struct radeon_ring *ring);
bool cayman_dma_is_lockup(struct radeon_device *rdev, struct radeon_ring *ring);
void cayman_dma_vm_flush(struct radeon_device *rdev, int ridx, struct radeon_vm *vm);
@@ -496,23 +502,27 @@ int si_init(struct radeon_device *rdev);
void si_fini(struct radeon_device *rdev);
int si_suspend(struct radeon_device *rdev);
int si_resume(struct radeon_device *rdev);
-bool si_gpu_is_lockup(struct radeon_device *rdev, struct radeon_ring *cp);
+bool si_gfx_is_lockup(struct radeon_device *rdev, struct radeon_ring *cp);
+bool si_dma_is_lockup(struct radeon_device *rdev, struct radeon_ring *cp);
int si_asic_reset(struct radeon_device *rdev);
void si_ring_ib_execute(struct radeon_device *rdev, struct radeon_ib *ib);
int si_irq_set(struct radeon_device *rdev);
int si_irq_process(struct radeon_device *rdev);
int si_vm_init(struct radeon_device *rdev);
void si_vm_fini(struct radeon_device *rdev);
-void si_vm_set_page(struct radeon_device *rdev, uint64_t pe,
+void si_vm_set_page(struct radeon_device *rdev,
+ struct radeon_ib *ib,
+ uint64_t pe,
uint64_t addr, unsigned count,
uint32_t incr, uint32_t flags);
void si_vm_flush(struct radeon_device *rdev, int ridx, struct radeon_vm *vm);
int si_ib_parse(struct radeon_device *rdev, struct radeon_ib *ib);
-uint64_t si_get_gpu_clock(struct radeon_device *rdev);
int si_copy_dma(struct radeon_device *rdev,
uint64_t src_offset, uint64_t dst_offset,
unsigned num_gpu_pages,
struct radeon_fence **fence);
void si_dma_vm_flush(struct radeon_device *rdev, int ridx, struct radeon_vm *vm);
+u32 si_get_xclk(struct radeon_device *rdev);
+uint64_t si_get_gpu_clock_counter(struct radeon_device *rdev);
#endif
diff --git a/drivers/gpu/drm/radeon/radeon_atpx_handler.c b/drivers/gpu/drm/radeon/radeon_atpx_handler.c
index 15f5ded65e0c..d96070bf8388 100644
--- a/drivers/gpu/drm/radeon/radeon_atpx_handler.c
+++ b/drivers/gpu/drm/radeon/radeon_atpx_handler.c
@@ -43,6 +43,12 @@ struct atpx_verify_interface {
u32 function_bits; /* supported functions bit vector */
} __packed;
+struct atpx_px_params {
+ u16 size; /* structure size in bytes (includes size field) */
+ u32 valid_flags; /* which flags are valid */
+ u32 flags; /* flags */
+} __packed;
+
struct atpx_power_control {
u16 size;
u8 dgpu_state;
@@ -123,9 +129,61 @@ static void radeon_atpx_parse_functions(struct radeon_atpx_functions *f, u32 mas
}
/**
+ * radeon_atpx_validate_functions - validate ATPX functions
+ *
+ * @atpx: radeon atpx struct
+ *
+ * Validate that required functions are enabled (all asics).
+ * returns 0 on success, error on failure.
+ */
+static int radeon_atpx_validate(struct radeon_atpx *atpx)
+{
+ /* make sure required functions are enabled */
+ /* dGPU power control is required */
+ atpx->functions.power_cntl = true;
+
+ if (atpx->functions.px_params) {
+ union acpi_object *info;
+ struct atpx_px_params output;
+ size_t size;
+ u32 valid_bits;
+
+ info = radeon_atpx_call(atpx->handle, ATPX_FUNCTION_GET_PX_PARAMETERS, NULL);
+ if (!info)
+ return -EIO;
+
+ memset(&output, 0, sizeof(output));
+
+ size = *(u16 *) info->buffer.pointer;
+ if (size < 10) {
+ printk("ATPX buffer is too small: %zu\n", size);
+ kfree(info);
+ return -EINVAL;
+ }
+ size = min(sizeof(output), size);
+
+ memcpy(&output, info->buffer.pointer, size);
+
+ valid_bits = output.flags & output.valid_flags;
+ /* if separate mux flag is set, mux controls are required */
+ if (valid_bits & ATPX_SEPARATE_MUX_FOR_I2C) {
+ atpx->functions.i2c_mux_cntl = true;
+ atpx->functions.disp_mux_cntl = true;
+ }
+ /* if any outputs are muxed, mux controls are required */
+ if (valid_bits & (ATPX_CRT1_RGB_SIGNAL_MUXED |
+ ATPX_TV_SIGNAL_MUXED |
+ ATPX_DFP_SIGNAL_MUXED))
+ atpx->functions.disp_mux_cntl = true;
+
+ kfree(info);
+ }
+ return 0;
+}
+
+/**
* radeon_atpx_verify_interface - verify ATPX
*
- * @handle: acpi handle
* @atpx: radeon atpx struct
*
* Execute the ATPX_FUNCTION_VERIFY_INTERFACE ATPX function
@@ -406,8 +464,19 @@ static bool radeon_atpx_pci_probe_handle(struct pci_dev *pdev)
*/
static int radeon_atpx_init(void)
{
+ int r;
+
/* set up the ATPX handle */
- return radeon_atpx_verify_interface(&radeon_atpx_priv.atpx);
+ r = radeon_atpx_verify_interface(&radeon_atpx_priv.atpx);
+ if (r)
+ return r;
+
+ /* validate the atpx setup */
+ r = radeon_atpx_validate(&radeon_atpx_priv.atpx);
+ if (r)
+ return r;
+
+ return 0;
}
/**
diff --git a/drivers/gpu/drm/radeon/radeon_cp.c b/drivers/gpu/drm/radeon/radeon_cp.c
index 9143fc45e35b..efc4f6441ef4 100644
--- a/drivers/gpu/drm/radeon/radeon_cp.c
+++ b/drivers/gpu/drm/radeon/radeon_cp.c
@@ -27,6 +27,8 @@
* Authors:
* Kevin E. Martin <martin@valinux.com>
* Gareth Hughes <gareth@valinux.com>
+ *
+ * ------------------------ This file is DEPRECATED! -------------------------
*/
#include <linux/module.h>
diff --git a/drivers/gpu/drm/radeon/radeon_cs.c b/drivers/gpu/drm/radeon/radeon_cs.c
index 5407459e56d2..70d38241b083 100644
--- a/drivers/gpu/drm/radeon/radeon_cs.c
+++ b/drivers/gpu/drm/radeon/radeon_cs.c
@@ -29,9 +29,6 @@
#include "radeon_reg.h"
#include "radeon.h"
-void r100_cs_dump_packet(struct radeon_cs_parser *p,
- struct radeon_cs_packet *pkt);
-
static int radeon_cs_parser_relocs(struct radeon_cs_parser *p)
{
struct drm_device *ddev = p->rdev->ddev;
@@ -128,18 +125,6 @@ static int radeon_cs_get_ring(struct radeon_cs_parser *p, u32 ring, s32 priority
return 0;
}
-static void radeon_cs_sync_to(struct radeon_cs_parser *p,
- struct radeon_fence *fence)
-{
- struct radeon_fence *other;
-
- if (!fence)
- return;
-
- other = p->ib.sync_to[fence->ring];
- p->ib.sync_to[fence->ring] = radeon_fence_later(fence, other);
-}
-
static void radeon_cs_sync_rings(struct radeon_cs_parser *p)
{
int i;
@@ -148,7 +133,7 @@ static void radeon_cs_sync_rings(struct radeon_cs_parser *p)
if (!p->relocs[i].robj)
continue;
- radeon_cs_sync_to(p, p->relocs[i].robj->tbo.sync_obj);
+ radeon_ib_sync_to(&p->ib, p->relocs[i].robj->tbo.sync_obj);
}
}
@@ -203,7 +188,7 @@ int radeon_cs_parser_init(struct radeon_cs_parser *p, void *data)
p->chunks[i].length_dw = user_chunk.length_dw;
p->chunks[i].kdata = NULL;
p->chunks[i].chunk_id = user_chunk.chunk_id;
-
+ p->chunks[i].user_ptr = (void __user *)(unsigned long)user_chunk.chunk_data;
if (p->chunks[i].chunk_id == RADEON_CHUNK_ID_RELOCS) {
p->chunk_relocs_idx = i;
}
@@ -226,9 +211,6 @@ int radeon_cs_parser_init(struct radeon_cs_parser *p, void *data)
return -EINVAL;
}
- p->chunks[i].length_dw = user_chunk.length_dw;
- p->chunks[i].user_ptr = (void __user *)(unsigned long)user_chunk.chunk_data;
-
cdata = (uint32_t *)(unsigned long)user_chunk.chunk_data;
if ((p->chunks[i].chunk_id == RADEON_CHUNK_ID_RELOCS) ||
(p->chunks[i].chunk_id == RADEON_CHUNK_ID_FLAGS)) {
@@ -478,8 +460,9 @@ static int radeon_cs_ib_vm_chunk(struct radeon_device *rdev,
goto out;
}
radeon_cs_sync_rings(parser);
- radeon_cs_sync_to(parser, vm->fence);
- radeon_cs_sync_to(parser, radeon_vm_grab_id(rdev, vm, parser->ring));
+ radeon_ib_sync_to(&parser->ib, vm->fence);
+ radeon_ib_sync_to(&parser->ib, radeon_vm_grab_id(
+ rdev, vm, parser->ring));
if ((rdev->family >= CHIP_TAHITI) &&
(parser->chunk_const_ib_idx != -1)) {
@@ -648,3 +631,152 @@ u32 radeon_get_ib_value(struct radeon_cs_parser *p, int idx)
idx_value = ibc->kpage[new_page][pg_offset/4];
return idx_value;
}
+
+/**
+ * radeon_cs_packet_parse() - parse cp packet and point ib index to next packet
+ * @parser: parser structure holding parsing context.
+ * @pkt: where to store packet information
+ *
+ * Assume that chunk_ib_index is properly set. Will return -EINVAL
+ * if packet is bigger than remaining ib size. or if packets is unknown.
+ **/
+int radeon_cs_packet_parse(struct radeon_cs_parser *p,
+ struct radeon_cs_packet *pkt,
+ unsigned idx)
+{
+ struct radeon_cs_chunk *ib_chunk = &p->chunks[p->chunk_ib_idx];
+ struct radeon_device *rdev = p->rdev;
+ uint32_t header;
+
+ if (idx >= ib_chunk->length_dw) {
+ DRM_ERROR("Can not parse packet at %d after CS end %d !\n",
+ idx, ib_chunk->length_dw);
+ return -EINVAL;
+ }
+ header = radeon_get_ib_value(p, idx);
+ pkt->idx = idx;
+ pkt->type = RADEON_CP_PACKET_GET_TYPE(header);
+ pkt->count = RADEON_CP_PACKET_GET_COUNT(header);
+ pkt->one_reg_wr = 0;
+ switch (pkt->type) {
+ case RADEON_PACKET_TYPE0:
+ if (rdev->family < CHIP_R600) {
+ pkt->reg = R100_CP_PACKET0_GET_REG(header);
+ pkt->one_reg_wr =
+ RADEON_CP_PACKET0_GET_ONE_REG_WR(header);
+ } else
+ pkt->reg = R600_CP_PACKET0_GET_REG(header);
+ break;
+ case RADEON_PACKET_TYPE3:
+ pkt->opcode = RADEON_CP_PACKET3_GET_OPCODE(header);
+ break;
+ case RADEON_PACKET_TYPE2:
+ pkt->count = -1;
+ break;
+ default:
+ DRM_ERROR("Unknown packet type %d at %d !\n", pkt->type, idx);
+ return -EINVAL;
+ }
+ if ((pkt->count + 1 + pkt->idx) >= ib_chunk->length_dw) {
+ DRM_ERROR("Packet (%d:%d:%d) end after CS buffer (%d) !\n",
+ pkt->idx, pkt->type, pkt->count, ib_chunk->length_dw);
+ return -EINVAL;
+ }
+ return 0;
+}
+
+/**
+ * radeon_cs_packet_next_is_pkt3_nop() - test if the next packet is P3 NOP
+ * @p: structure holding the parser context.
+ *
+ * Check if the next packet is NOP relocation packet3.
+ **/
+bool radeon_cs_packet_next_is_pkt3_nop(struct radeon_cs_parser *p)
+{
+ struct radeon_cs_packet p3reloc;
+ int r;
+
+ r = radeon_cs_packet_parse(p, &p3reloc, p->idx);
+ if (r)
+ return false;
+ if (p3reloc.type != RADEON_PACKET_TYPE3)
+ return false;
+ if (p3reloc.opcode != RADEON_PACKET3_NOP)
+ return false;
+ return true;
+}
+
+/**
+ * radeon_cs_dump_packet() - dump raw packet context
+ * @p: structure holding the parser context.
+ * @pkt: structure holding the packet.
+ *
+ * Used mostly for debugging and error reporting.
+ **/
+void radeon_cs_dump_packet(struct radeon_cs_parser *p,
+ struct radeon_cs_packet *pkt)
+{
+ volatile uint32_t *ib;
+ unsigned i;
+ unsigned idx;
+
+ ib = p->ib.ptr;
+ idx = pkt->idx;
+ for (i = 0; i <= (pkt->count + 1); i++, idx++)
+ DRM_INFO("ib[%d]=0x%08X\n", idx, ib[idx]);
+}
+
+/**
+ * radeon_cs_packet_next_reloc() - parse next (should be reloc) packet
+ * @parser: parser structure holding parsing context.
+ * @data: pointer to relocation data
+ * @offset_start: starting offset
+ * @offset_mask: offset mask (to align start offset on)
+ * @reloc: reloc informations
+ *
+ * Check if next packet is relocation packet3, do bo validation and compute
+ * GPU offset using the provided start.
+ **/
+int radeon_cs_packet_next_reloc(struct radeon_cs_parser *p,
+ struct radeon_cs_reloc **cs_reloc,
+ int nomm)
+{
+ struct radeon_cs_chunk *relocs_chunk;
+ struct radeon_cs_packet p3reloc;
+ unsigned idx;
+ int r;
+
+ if (p->chunk_relocs_idx == -1) {
+ DRM_ERROR("No relocation chunk !\n");
+ return -EINVAL;
+ }
+ *cs_reloc = NULL;
+ relocs_chunk = &p->chunks[p->chunk_relocs_idx];
+ r = radeon_cs_packet_parse(p, &p3reloc, p->idx);
+ if (r)
+ return r;
+ p->idx += p3reloc.count + 2;
+ if (p3reloc.type != RADEON_PACKET_TYPE3 ||
+ p3reloc.opcode != RADEON_PACKET3_NOP) {
+ DRM_ERROR("No packet3 for relocation for packet at %d.\n",
+ p3reloc.idx);
+ radeon_cs_dump_packet(p, &p3reloc);
+ return -EINVAL;
+ }
+ idx = radeon_get_ib_value(p, p3reloc.idx + 1);
+ if (idx >= relocs_chunk->length_dw) {
+ DRM_ERROR("Relocs at %d after relocations chunk end %d !\n",
+ idx, relocs_chunk->length_dw);
+ radeon_cs_dump_packet(p, &p3reloc);
+ return -EINVAL;
+ }
+ /* FIXME: we assume reloc size is 4 dwords */
+ if (nomm) {
+ *cs_reloc = p->relocs;
+ (*cs_reloc)->lobj.gpu_offset =
+ (u64)relocs_chunk->kdata[idx + 3] << 32;
+ (*cs_reloc)->lobj.gpu_offset |= relocs_chunk->kdata[idx + 0];
+ } else
+ *cs_reloc = p->relocs_ptr[(idx / 4)];
+ return 0;
+}
diff --git a/drivers/gpu/drm/radeon/radeon_cursor.c b/drivers/gpu/drm/radeon/radeon_cursor.c
index 0d67674b64b1..b097d5b4ff39 100644
--- a/drivers/gpu/drm/radeon/radeon_cursor.c
+++ b/drivers/gpu/drm/radeon/radeon_cursor.c
@@ -246,8 +246,14 @@ int radeon_crtc_cursor_move(struct drm_crtc *crtc,
int i = 0;
struct drm_crtc *crtc_p;
- /* avivo cursor image can't end on 128 pixel boundary or
+ /*
+ * avivo cursor image can't end on 128 pixel boundary or
* go past the end of the frame if both crtcs are enabled
+ *
+ * NOTE: It is safe to access crtc->enabled of other crtcs
+ * without holding either the mode_config lock or the other
+ * crtc's lock as long as write access to this flag _always_
+ * grabs all locks.
*/
list_for_each_entry(crtc_p, &crtc->dev->mode_config.crtc_list, head) {
if (crtc_p->enabled)
diff --git a/drivers/gpu/drm/radeon/radeon_device.c b/drivers/gpu/drm/radeon/radeon_device.c
index 0d6562bb0c93..44b8034a400d 100644
--- a/drivers/gpu/drm/radeon/radeon_device.c
+++ b/drivers/gpu/drm/radeon/radeon_device.c
@@ -93,6 +93,7 @@ static const char radeon_family_name[][16] = {
"TAHITI",
"PITCAIRN",
"VERDE",
+ "OLAND",
"LAST",
};
@@ -758,6 +759,11 @@ int radeon_atombios_init(struct radeon_device *rdev)
atom_card_info->pll_write = cail_pll_write;
rdev->mode_info.atom_context = atom_parse(atom_card_info, rdev->bios);
+ if (!rdev->mode_info.atom_context) {
+ radeon_atombios_fini(rdev);
+ return -ENOMEM;
+ }
+
mutex_init(&rdev->mode_info.atom_context->mutex);
radeon_atom_initialize_bios_scratch_regs(rdev->ddev);
atom_allocate_fb_scratch(rdev->mode_info.atom_context);
@@ -777,9 +783,11 @@ void radeon_atombios_fini(struct radeon_device *rdev)
{
if (rdev->mode_info.atom_context) {
kfree(rdev->mode_info.atom_context->scratch);
- kfree(rdev->mode_info.atom_context);
}
+ kfree(rdev->mode_info.atom_context);
+ rdev->mode_info.atom_context = NULL;
kfree(rdev->mode_info.atom_card_info);
+ rdev->mode_info.atom_card_info = NULL;
}
/* COMBIOS */
diff --git a/drivers/gpu/drm/radeon/radeon_display.c b/drivers/gpu/drm/radeon/radeon_display.c
index 05c96fa0b051..e38fd559f1ab 100644
--- a/drivers/gpu/drm/radeon/radeon_display.c
+++ b/drivers/gpu/drm/radeon/radeon_display.c
@@ -1089,12 +1089,12 @@ radeon_framebuffer_init(struct drm_device *dev,
{
int ret;
rfb->obj = obj;
+ drm_helper_mode_fill_fb_struct(&rfb->base, mode_cmd);
ret = drm_framebuffer_init(dev, &rfb->base, &radeon_fb_funcs);
if (ret) {
rfb->obj = NULL;
return ret;
}
- drm_helper_mode_fill_fb_struct(&rfb->base, mode_cmd);
return 0;
}
diff --git a/drivers/gpu/drm/radeon/radeon_drv.c b/drivers/gpu/drm/radeon/radeon_drv.c
index d9bf96ee299a..167758488ed6 100644
--- a/drivers/gpu/drm/radeon/radeon_drv.c
+++ b/drivers/gpu/drm/radeon/radeon_drv.c
@@ -118,20 +118,32 @@ int radeon_mode_dumb_create(struct drm_file *file_priv,
int radeon_mode_dumb_destroy(struct drm_file *file_priv,
struct drm_device *dev,
uint32_t handle);
-struct dma_buf *radeon_gem_prime_export(struct drm_device *dev,
- struct drm_gem_object *obj,
- int flags);
-struct drm_gem_object *radeon_gem_prime_import(struct drm_device *dev,
- struct dma_buf *dma_buf);
+struct sg_table *radeon_gem_prime_get_sg_table(struct drm_gem_object *obj);
+struct drm_gem_object *radeon_gem_prime_import_sg_table(struct drm_device *dev,
+ size_t size,
+ struct sg_table *sg);
+int radeon_gem_prime_pin(struct drm_gem_object *obj);
+void *radeon_gem_prime_vmap(struct drm_gem_object *obj);
+void radeon_gem_prime_vunmap(struct drm_gem_object *obj, void *vaddr);
+extern long radeon_kms_compat_ioctl(struct file *filp, unsigned int cmd,
+ unsigned long arg);
#if defined(CONFIG_DEBUG_FS)
int radeon_debugfs_init(struct drm_minor *minor);
void radeon_debugfs_cleanup(struct drm_minor *minor);
#endif
+/* atpx handler */
+#if defined(CONFIG_VGA_SWITCHEROO)
+void radeon_register_atpx_handler(void);
+void radeon_unregister_atpx_handler(void);
+#else
+static inline void radeon_register_atpx_handler(void) {}
+static inline void radeon_unregister_atpx_handler(void) {}
+#endif
int radeon_no_wb;
-int radeon_modeset = -1;
+int radeon_modeset = 1;
int radeon_dynclks = -1;
int radeon_r4xx_atom = 0;
int radeon_agpmode = 0;
@@ -199,6 +211,14 @@ module_param_named(msi, radeon_msi, int, 0444);
MODULE_PARM_DESC(lockup_timeout, "GPU lockup timeout in ms (defaul 10000 = 10 seconds, 0 = disable)");
module_param_named(lockup_timeout, radeon_lockup_timeout, int, 0444);
+static struct pci_device_id pciidlist[] = {
+ radeon_PCI_IDS
+};
+
+MODULE_DEVICE_TABLE(pci, pciidlist);
+
+#ifdef CONFIG_DRM_RADEON_UMS
+
static int radeon_suspend(struct drm_device *dev, pm_message_t state)
{
drm_radeon_private_t *dev_priv = dev->dev_private;
@@ -227,14 +247,6 @@ static int radeon_resume(struct drm_device *dev)
return 0;
}
-static struct pci_device_id pciidlist[] = {
- radeon_PCI_IDS
-};
-
-#if defined(CONFIG_DRM_RADEON_KMS)
-MODULE_DEVICE_TABLE(pci, pciidlist);
-#endif
-
static const struct file_operations radeon_driver_old_fops = {
.owner = THIS_MODULE,
.open = drm_open,
@@ -284,6 +296,8 @@ static struct drm_driver driver_old = {
.patchlevel = DRIVER_PATCHLEVEL,
};
+#endif
+
static struct drm_driver kms_driver;
static int radeon_kick_out_firmware_fb(struct pci_dev *pdev)
@@ -397,8 +411,13 @@ static struct drm_driver kms_driver = {
.prime_handle_to_fd = drm_gem_prime_handle_to_fd,
.prime_fd_to_handle = drm_gem_prime_fd_to_handle,
- .gem_prime_export = radeon_gem_prime_export,
- .gem_prime_import = radeon_gem_prime_import,
+ .gem_prime_export = drm_gem_prime_export,
+ .gem_prime_import = drm_gem_prime_import,
+ .gem_prime_pin = radeon_gem_prime_pin,
+ .gem_prime_get_sg_table = radeon_gem_prime_get_sg_table,
+ .gem_prime_import_sg_table = radeon_gem_prime_import_sg_table,
+ .gem_prime_vmap = radeon_gem_prime_vmap,
+ .gem_prime_vunmap = radeon_gem_prime_vunmap,
.name = DRIVER_NAME,
.desc = DRIVER_DESC,
@@ -411,10 +430,12 @@ static struct drm_driver kms_driver = {
static struct drm_driver *driver;
static struct pci_driver *pdriver;
+#ifdef CONFIG_DRM_RADEON_UMS
static struct pci_driver radeon_pci_driver = {
.name = DRIVER_NAME,
.id_table = pciidlist,
};
+#endif
static struct pci_driver radeon_kms_pci_driver = {
.name = DRIVER_NAME,
@@ -427,28 +448,6 @@ static struct pci_driver radeon_kms_pci_driver = {
static int __init radeon_init(void)
{
- driver = &driver_old;
- pdriver = &radeon_pci_driver;
- driver->num_ioctls = radeon_max_ioctl;
-#ifdef CONFIG_VGA_CONSOLE
- if (vgacon_text_force() && radeon_modeset == -1) {
- DRM_INFO("VGACON disable radeon kernel modesetting.\n");
- driver = &driver_old;
- pdriver = &radeon_pci_driver;
- driver->driver_features &= ~DRIVER_MODESET;
- radeon_modeset = 0;
- }
-#endif
- /* if enabled by default */
- if (radeon_modeset == -1) {
-#ifdef CONFIG_DRM_RADEON_KMS
- DRM_INFO("radeon defaulting to kernel modesetting.\n");
- radeon_modeset = 1;
-#else
- DRM_INFO("radeon defaulting to userspace modesetting.\n");
- radeon_modeset = 0;
-#endif
- }
if (radeon_modeset == 1) {
DRM_INFO("radeon kernel modesetting enabled.\n");
driver = &kms_driver;
@@ -456,9 +455,21 @@ static int __init radeon_init(void)
driver->driver_features |= DRIVER_MODESET;
driver->num_ioctls = radeon_max_kms_ioctl;
radeon_register_atpx_handler();
+
+ } else {
+#ifdef CONFIG_DRM_RADEON_UMS
+ DRM_INFO("radeon userspace modesetting enabled.\n");
+ driver = &driver_old;
+ pdriver = &radeon_pci_driver;
+ driver->driver_features &= ~DRIVER_MODESET;
+ driver->num_ioctls = radeon_max_ioctl;
+#else
+ DRM_ERROR("No UMS support in radeon module!\n");
+ return -EINVAL;
+#endif
}
- /* if the vga console setting is enabled still
- * let modprobe override it */
+
+ /* let modprobe override vga console setting */
return drm_pci_init(driver, pdriver);
}
diff --git a/drivers/gpu/drm/radeon/radeon_drv.h b/drivers/gpu/drm/radeon/radeon_drv.h
index e7fdf163a8ca..b369d42f7de5 100644
--- a/drivers/gpu/drm/radeon/radeon_drv.h
+++ b/drivers/gpu/drm/radeon/radeon_drv.h
@@ -113,6 +113,9 @@
#define DRIVER_MINOR 33
#define DRIVER_PATCHLEVEL 0
+/* The rest of the file is DEPRECATED! */
+#ifdef CONFIG_DRM_RADEON_UMS
+
enum radeon_cp_microcode_version {
UCODE_R100,
UCODE_R200,
@@ -418,8 +421,6 @@ extern int radeon_driver_open(struct drm_device *dev,
struct drm_file *file_priv);
extern long radeon_compat_ioctl(struct file *filp, unsigned int cmd,
unsigned long arg);
-extern long radeon_kms_compat_ioctl(struct file *filp, unsigned int cmd,
- unsigned long arg);
extern int radeon_master_create(struct drm_device *dev, struct drm_master *master);
extern void radeon_master_destroy(struct drm_device *dev, struct drm_master *master);
@@ -462,15 +463,6 @@ extern void r600_blit_swap(struct drm_device *dev,
int sx, int sy, int dx, int dy,
int w, int h, int src_pitch, int dst_pitch, int cpp);
-/* atpx handler */
-#if defined(CONFIG_VGA_SWITCHEROO)
-void radeon_register_atpx_handler(void);
-void radeon_unregister_atpx_handler(void);
-#else
-static inline void radeon_register_atpx_handler(void) {}
-static inline void radeon_unregister_atpx_handler(void) {}
-#endif
-
/* Flags for stats.boxes
*/
#define RADEON_BOX_DMA_IDLE 0x1
@@ -2167,4 +2159,6 @@ extern void radeon_commit_ring(drm_radeon_private_t *dev_priv);
} while (0)
+#endif /* CONFIG_DRM_RADEON_UMS */
+
#endif /* __RADEON_DRV_H__ */
diff --git a/drivers/gpu/drm/radeon/radeon_family.h b/drivers/gpu/drm/radeon/radeon_family.h
index d1fafeabea09..2d91123f2759 100644
--- a/drivers/gpu/drm/radeon/radeon_family.h
+++ b/drivers/gpu/drm/radeon/radeon_family.h
@@ -91,6 +91,7 @@ enum radeon_family {
CHIP_TAHITI,
CHIP_PITCAIRN,
CHIP_VERDE,
+ CHIP_OLAND,
CHIP_LAST,
};
diff --git a/drivers/gpu/drm/radeon/radeon_fb.c b/drivers/gpu/drm/radeon/radeon_fb.c
index cc8489d8c6d1..b1746741bc59 100644
--- a/drivers/gpu/drm/radeon/radeon_fb.c
+++ b/drivers/gpu/drm/radeon/radeon_fb.c
@@ -187,9 +187,10 @@ out_unref:
return ret;
}
-static int radeonfb_create(struct radeon_fbdev *rfbdev,
+static int radeonfb_create(struct drm_fb_helper *helper,
struct drm_fb_helper_surface_size *sizes)
{
+ struct radeon_fbdev *rfbdev = (struct radeon_fbdev *)helper;
struct radeon_device *rdev = rfbdev->rdev;
struct fb_info *info;
struct drm_framebuffer *fb = NULL;
@@ -293,28 +294,13 @@ out_unref:
}
if (fb && ret) {
drm_gem_object_unreference(gobj);
+ drm_framebuffer_unregister_private(fb);
drm_framebuffer_cleanup(fb);
kfree(fb);
}
return ret;
}
-static int radeon_fb_find_or_create_single(struct drm_fb_helper *helper,
- struct drm_fb_helper_surface_size *sizes)
-{
- struct radeon_fbdev *rfbdev = (struct radeon_fbdev *)helper;
- int new_fb = 0;
- int ret;
-
- if (!helper->fb) {
- ret = radeonfb_create(rfbdev, sizes);
- if (ret)
- return ret;
- new_fb = 1;
- }
- return new_fb;
-}
-
void radeon_fb_output_poll_changed(struct radeon_device *rdev)
{
drm_fb_helper_hotplug_event(&rdev->mode_info.rfbdev->helper);
@@ -339,6 +325,7 @@ static int radeon_fbdev_destroy(struct drm_device *dev, struct radeon_fbdev *rfb
rfb->obj = NULL;
}
drm_fb_helper_fini(&rfbdev->helper);
+ drm_framebuffer_unregister_private(&rfb->base);
drm_framebuffer_cleanup(&rfb->base);
return 0;
@@ -347,7 +334,7 @@ static int radeon_fbdev_destroy(struct drm_device *dev, struct radeon_fbdev *rfb
static struct drm_fb_helper_funcs radeon_fb_helper_funcs = {
.gamma_set = radeon_crtc_fb_gamma_set,
.gamma_get = radeon_crtc_fb_gamma_get,
- .fb_probe = radeon_fb_find_or_create_single,
+ .fb_probe = radeonfb_create,
};
int radeon_fbdev_init(struct radeon_device *rdev)
@@ -377,6 +364,10 @@ int radeon_fbdev_init(struct radeon_device *rdev)
}
drm_fb_helper_single_add_all_connectors(&rfbdev->helper);
+
+ /* disable all the possible outputs/crtcs before entering KMS mode */
+ drm_helper_disable_unused_functions(rdev->ddev);
+
drm_fb_helper_initial_config(&rfbdev->helper, bpp_sel);
return 0;
}
diff --git a/drivers/gpu/drm/radeon/radeon_gart.c b/drivers/gpu/drm/radeon/radeon_gart.c
index 6e24f84755b5..2c1341f63dc5 100644
--- a/drivers/gpu/drm/radeon/radeon_gart.c
+++ b/drivers/gpu/drm/radeon/radeon_gart.c
@@ -929,6 +929,7 @@ uint64_t radeon_vm_map_gart(struct radeon_device *rdev, uint64_t addr)
*/
static int radeon_vm_update_pdes(struct radeon_device *rdev,
struct radeon_vm *vm,
+ struct radeon_ib *ib,
uint64_t start, uint64_t end)
{
static const uint32_t incr = RADEON_VM_PTE_COUNT * 8;
@@ -971,7 +972,7 @@ retry:
((last_pt + incr * count) != pt)) {
if (count) {
- radeon_asic_vm_set_page(rdev, last_pde,
+ radeon_asic_vm_set_page(rdev, ib, last_pde,
last_pt, count, incr,
RADEON_VM_PAGE_VALID);
}
@@ -985,7 +986,7 @@ retry:
}
if (count) {
- radeon_asic_vm_set_page(rdev, last_pde, last_pt, count,
+ radeon_asic_vm_set_page(rdev, ib, last_pde, last_pt, count,
incr, RADEON_VM_PAGE_VALID);
}
@@ -1009,6 +1010,7 @@ retry:
*/
static void radeon_vm_update_ptes(struct radeon_device *rdev,
struct radeon_vm *vm,
+ struct radeon_ib *ib,
uint64_t start, uint64_t end,
uint64_t dst, uint32_t flags)
{
@@ -1038,7 +1040,7 @@ static void radeon_vm_update_ptes(struct radeon_device *rdev,
if ((last_pte + 8 * count) != pte) {
if (count) {
- radeon_asic_vm_set_page(rdev, last_pte,
+ radeon_asic_vm_set_page(rdev, ib, last_pte,
last_dst, count,
RADEON_GPU_PAGE_SIZE,
flags);
@@ -1056,7 +1058,8 @@ static void radeon_vm_update_ptes(struct radeon_device *rdev,
}
if (count) {
- radeon_asic_vm_set_page(rdev, last_pte, last_dst, count,
+ radeon_asic_vm_set_page(rdev, ib, last_pte,
+ last_dst, count,
RADEON_GPU_PAGE_SIZE, flags);
}
}
@@ -1080,8 +1083,7 @@ int radeon_vm_bo_update_pte(struct radeon_device *rdev,
struct ttm_mem_reg *mem)
{
unsigned ridx = rdev->asic->vm.pt_ring_index;
- struct radeon_ring *ring = &rdev->ring[ridx];
- struct radeon_semaphore *sem = NULL;
+ struct radeon_ib ib;
struct radeon_bo_va *bo_va;
unsigned nptes, npdes, ndw;
uint64_t addr;
@@ -1124,25 +1126,13 @@ int radeon_vm_bo_update_pte(struct radeon_device *rdev,
bo_va->valid = false;
}
- if (vm->fence && radeon_fence_signaled(vm->fence)) {
- radeon_fence_unref(&vm->fence);
- }
-
- if (vm->fence && vm->fence->ring != ridx) {
- r = radeon_semaphore_create(rdev, &sem);
- if (r) {
- return r;
- }
- }
-
nptes = radeon_bo_ngpu_pages(bo);
/* assume two extra pdes in case the mapping overlaps the borders */
npdes = (nptes >> RADEON_VM_BLOCK_SIZE) + 2;
- /* estimate number of dw needed */
- /* semaphore, fence and padding */
- ndw = 32;
+ /* padding, etc. */
+ ndw = 64;
if (RADEON_VM_BLOCK_SIZE > 11)
/* reserve space for one header for every 2k dwords */
@@ -1161,33 +1151,31 @@ int radeon_vm_bo_update_pte(struct radeon_device *rdev,
/* reserve space for pde addresses */
ndw += npdes * 2;
- r = radeon_ring_lock(rdev, ring, ndw);
- if (r) {
- return r;
- }
+ /* update too big for an IB */
+ if (ndw > 0xfffff)
+ return -ENOMEM;
- if (sem && radeon_fence_need_sync(vm->fence, ridx)) {
- radeon_semaphore_sync_rings(rdev, sem, vm->fence->ring, ridx);
- radeon_fence_note_sync(vm->fence, ridx);
- }
+ r = radeon_ib_get(rdev, ridx, &ib, NULL, ndw * 4);
+ ib.length_dw = 0;
- r = radeon_vm_update_pdes(rdev, vm, bo_va->soffset, bo_va->eoffset);
+ r = radeon_vm_update_pdes(rdev, vm, &ib, bo_va->soffset, bo_va->eoffset);
if (r) {
- radeon_ring_unlock_undo(rdev, ring);
+ radeon_ib_free(rdev, &ib);
return r;
}
- radeon_vm_update_ptes(rdev, vm, bo_va->soffset, bo_va->eoffset,
+ radeon_vm_update_ptes(rdev, vm, &ib, bo_va->soffset, bo_va->eoffset,
addr, bo_va->flags);
- radeon_fence_unref(&vm->fence);
- r = radeon_fence_emit(rdev, &vm->fence, ridx);
+ radeon_ib_sync_to(&ib, vm->fence);
+ r = radeon_ib_schedule(rdev, &ib, NULL);
if (r) {
- radeon_ring_unlock_undo(rdev, ring);
+ radeon_ib_free(rdev, &ib);
return r;
}
- radeon_ring_unlock_commit(rdev, ring);
- radeon_semaphore_free(rdev, &sem, vm->fence);
+ radeon_fence_unref(&vm->fence);
+ vm->fence = radeon_fence_ref(ib.fence);
+ radeon_ib_free(rdev, &ib);
radeon_fence_unref(&vm->last_flush);
return 0;
diff --git a/drivers/gpu/drm/radeon/radeon_irq.c b/drivers/gpu/drm/radeon/radeon_irq.c
index e7710339a6a7..8d68e972789a 100644
--- a/drivers/gpu/drm/radeon/radeon_irq.c
+++ b/drivers/gpu/drm/radeon/radeon_irq.c
@@ -28,6 +28,8 @@
* Authors:
* Keith Whitwell <keith@tungstengraphics.com>
* Michel D�zer <michel@daenzer.net>
+ *
+ * ------------------------ This file is DEPRECATED! -------------------------
*/
#include <drm/drmP.h>
diff --git a/drivers/gpu/drm/radeon/radeon_kms.c b/drivers/gpu/drm/radeon/radeon_kms.c
index 9c312f9afb68..c75cb2c6ba71 100644
--- a/drivers/gpu/drm/radeon/radeon_kms.c
+++ b/drivers/gpu/drm/radeon/radeon_kms.c
@@ -185,11 +185,7 @@ int radeon_info_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
if (info->request == RADEON_INFO_TIMESTAMP) {
if (rdev->family >= CHIP_R600) {
value_ptr64 = (uint64_t*)((unsigned long)info->value);
- if (rdev->family >= CHIP_TAHITI) {
- value64 = si_get_gpu_clock(rdev);
- } else {
- value64 = r600_get_gpu_clock(rdev);
- }
+ value64 = radeon_get_gpu_clock_counter(rdev);
if (DRM_COPY_TO_USER(value_ptr64, &value64, sizeof(value64))) {
DRM_ERROR("copy_to_user %s:%u\n", __func__, __LINE__);
@@ -282,7 +278,10 @@ int radeon_info_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
break;
case RADEON_INFO_CLOCK_CRYSTAL_FREQ:
/* return clock value in KHz */
- value = rdev->clock.spll.reference_freq * 10;
+ if (rdev->asic->get_xclk)
+ value = radeon_get_xclk(rdev) * 10;
+ else
+ value = rdev->clock.spll.reference_freq * 10;
break;
case RADEON_INFO_NUM_BACKENDS:
if (rdev->family >= CHIP_TAHITI)
diff --git a/drivers/gpu/drm/radeon/radeon_mem.c b/drivers/gpu/drm/radeon/radeon_mem.c
index b9f067241633..d54d2d7c9031 100644
--- a/drivers/gpu/drm/radeon/radeon_mem.c
+++ b/drivers/gpu/drm/radeon/radeon_mem.c
@@ -27,6 +27,8 @@
*
* Authors:
* Keith Whitwell <keith@tungstengraphics.com>
+ *
+ * ------------------------ This file is DEPRECATED! -------------------------
*/
#include <drm/drmP.h>
diff --git a/drivers/gpu/drm/radeon/radeon_pm.c b/drivers/gpu/drm/radeon/radeon_pm.c
index 0bfa656aa87d..338fd6a74e87 100644
--- a/drivers/gpu/drm/radeon/radeon_pm.c
+++ b/drivers/gpu/drm/radeon/radeon_pm.c
@@ -169,7 +169,7 @@ static void radeon_set_power_state(struct radeon_device *rdev)
/* starting with BTC, there is one state that is used for both
* MH and SH. Difference is that we always use the high clock index for
- * mclk.
+ * mclk and vddci.
*/
if ((rdev->pm.pm_method == PM_METHOD_PROFILE) &&
(rdev->family >= CHIP_BARTS) &&
diff --git a/drivers/gpu/drm/radeon/radeon_prime.c b/drivers/gpu/drm/radeon/radeon_prime.c
index 26c23bb651c6..4940af7e75e6 100644
--- a/drivers/gpu/drm/radeon/radeon_prime.c
+++ b/drivers/gpu/drm/radeon/radeon_prime.c
@@ -28,199 +28,71 @@
#include "radeon.h"
#include <drm/radeon_drm.h>
-#include <linux/dma-buf.h>
-
-static struct sg_table *radeon_gem_map_dma_buf(struct dma_buf_attachment *attachment,
- enum dma_data_direction dir)
+struct sg_table *radeon_gem_prime_get_sg_table(struct drm_gem_object *obj)
{
- struct radeon_bo *bo = attachment->dmabuf->priv;
- struct drm_device *dev = bo->rdev->ddev;
+ struct radeon_bo *bo = gem_to_radeon_bo(obj);
int npages = bo->tbo.num_pages;
- struct sg_table *sg;
- int nents;
-
- mutex_lock(&dev->struct_mutex);
- sg = drm_prime_pages_to_sg(bo->tbo.ttm->pages, npages);
- nents = dma_map_sg(attachment->dev, sg->sgl, sg->nents, dir);
- mutex_unlock(&dev->struct_mutex);
- return sg;
-}
-
-static void radeon_gem_unmap_dma_buf(struct dma_buf_attachment *attachment,
- struct sg_table *sg, enum dma_data_direction dir)
-{
- dma_unmap_sg(attachment->dev, sg->sgl, sg->nents, dir);
- sg_free_table(sg);
- kfree(sg);
-}
-
-static void radeon_gem_dmabuf_release(struct dma_buf *dma_buf)
-{
- struct radeon_bo *bo = dma_buf->priv;
-
- if (bo->gem_base.export_dma_buf == dma_buf) {
- DRM_ERROR("unreference dmabuf %p\n", &bo->gem_base);
- bo->gem_base.export_dma_buf = NULL;
- drm_gem_object_unreference_unlocked(&bo->gem_base);
- }
-}
-
-static void *radeon_gem_kmap_atomic(struct dma_buf *dma_buf, unsigned long page_num)
-{
- return NULL;
-}
-
-static void radeon_gem_kunmap_atomic(struct dma_buf *dma_buf, unsigned long page_num, void *addr)
-{
-
-}
-static void *radeon_gem_kmap(struct dma_buf *dma_buf, unsigned long page_num)
-{
- return NULL;
-}
-
-static void radeon_gem_kunmap(struct dma_buf *dma_buf, unsigned long page_num, void *addr)
-{
+ return drm_prime_pages_to_sg(bo->tbo.ttm->pages, npages);
}
-static int radeon_gem_prime_mmap(struct dma_buf *dma_buf, struct vm_area_struct *vma)
+void *radeon_gem_prime_vmap(struct drm_gem_object *obj)
{
- return -EINVAL;
-}
-
-static void *radeon_gem_prime_vmap(struct dma_buf *dma_buf)
-{
- struct radeon_bo *bo = dma_buf->priv;
- struct drm_device *dev = bo->rdev->ddev;
+ struct radeon_bo *bo = gem_to_radeon_bo(obj);
int ret;
- mutex_lock(&dev->struct_mutex);
- if (bo->vmapping_count) {
- bo->vmapping_count++;
- goto out_unlock;
- }
-
ret = ttm_bo_kmap(&bo->tbo, 0, bo->tbo.num_pages,
&bo->dma_buf_vmap);
- if (ret) {
- mutex_unlock(&dev->struct_mutex);
+ if (ret)
return ERR_PTR(ret);
- }
- bo->vmapping_count = 1;
-out_unlock:
- mutex_unlock(&dev->struct_mutex);
+
return bo->dma_buf_vmap.virtual;
}
-static void radeon_gem_prime_vunmap(struct dma_buf *dma_buf, void *vaddr)
+void radeon_gem_prime_vunmap(struct drm_gem_object *obj, void *vaddr)
{
- struct radeon_bo *bo = dma_buf->priv;
- struct drm_device *dev = bo->rdev->ddev;
+ struct radeon_bo *bo = gem_to_radeon_bo(obj);
- mutex_lock(&dev->struct_mutex);
- bo->vmapping_count--;
- if (bo->vmapping_count == 0) {
- ttm_bo_kunmap(&bo->dma_buf_vmap);
- }
- mutex_unlock(&dev->struct_mutex);
+ ttm_bo_kunmap(&bo->dma_buf_vmap);
}
-const static struct dma_buf_ops radeon_dmabuf_ops = {
- .map_dma_buf = radeon_gem_map_dma_buf,
- .unmap_dma_buf = radeon_gem_unmap_dma_buf,
- .release = radeon_gem_dmabuf_release,
- .kmap = radeon_gem_kmap,
- .kmap_atomic = radeon_gem_kmap_atomic,
- .kunmap = radeon_gem_kunmap,
- .kunmap_atomic = radeon_gem_kunmap_atomic,
- .mmap = radeon_gem_prime_mmap,
- .vmap = radeon_gem_prime_vmap,
- .vunmap = radeon_gem_prime_vunmap,
-};
-
-static int radeon_prime_create(struct drm_device *dev,
- size_t size,
- struct sg_table *sg,
- struct radeon_bo **pbo)
+
+struct drm_gem_object *radeon_gem_prime_import_sg_table(struct drm_device *dev,
+ size_t size,
+ struct sg_table *sg)
{
struct radeon_device *rdev = dev->dev_private;
struct radeon_bo *bo;
int ret;
ret = radeon_bo_create(rdev, size, PAGE_SIZE, false,
- RADEON_GEM_DOMAIN_GTT, sg, pbo);
+ RADEON_GEM_DOMAIN_GTT, sg, &bo);
if (ret)
- return ret;
- bo = *pbo;
+ return ERR_PTR(ret);
bo->gem_base.driver_private = bo;
mutex_lock(&rdev->gem.mutex);
list_add_tail(&bo->list, &rdev->gem.objects);
mutex_unlock(&rdev->gem.mutex);
- return 0;
+ return &bo->gem_base;
}
-struct dma_buf *radeon_gem_prime_export(struct drm_device *dev,
- struct drm_gem_object *obj,
- int flags)
+int radeon_gem_prime_pin(struct drm_gem_object *obj)
{
struct radeon_bo *bo = gem_to_radeon_bo(obj);
int ret = 0;
ret = radeon_bo_reserve(bo, false);
if (unlikely(ret != 0))
- return ERR_PTR(ret);
+ return ret;
/* pin buffer into GTT */
ret = radeon_bo_pin(bo, RADEON_GEM_DOMAIN_GTT, NULL);
if (ret) {
radeon_bo_unreserve(bo);
- return ERR_PTR(ret);
+ return ret;
}
radeon_bo_unreserve(bo);
- return dma_buf_export(bo, &radeon_dmabuf_ops, obj->size, flags);
-}
-struct drm_gem_object *radeon_gem_prime_import(struct drm_device *dev,
- struct dma_buf *dma_buf)
-{
- struct dma_buf_attachment *attach;
- struct sg_table *sg;
- struct radeon_bo *bo;
- int ret;
-
- if (dma_buf->ops == &radeon_dmabuf_ops) {
- bo = dma_buf->priv;
- if (bo->gem_base.dev == dev) {
- drm_gem_object_reference(&bo->gem_base);
- dma_buf_put(dma_buf);
- return &bo->gem_base;
- }
- }
-
- /* need to attach */
- attach = dma_buf_attach(dma_buf, dev->dev);
- if (IS_ERR(attach))
- return ERR_CAST(attach);
-
- sg = dma_buf_map_attachment(attach, DMA_BIDIRECTIONAL);
- if (IS_ERR(sg)) {
- ret = PTR_ERR(sg);
- goto fail_detach;
- }
-
- ret = radeon_prime_create(dev, dma_buf->size, sg, &bo);
- if (ret)
- goto fail_unmap;
-
- bo->gem_base.import_attach = attach;
-
- return &bo->gem_base;
-
-fail_unmap:
- dma_buf_unmap_attachment(attach, sg, DMA_BIDIRECTIONAL);
-fail_detach:
- dma_buf_detach(dma_buf, attach);
- return ERR_PTR(ret);
+ return 0;
}
diff --git a/drivers/gpu/drm/radeon/radeon_reg.h b/drivers/gpu/drm/radeon/radeon_reg.h
index 5d8f735d6aaf..7e2c2b7cf188 100644
--- a/drivers/gpu/drm/radeon/radeon_reg.h
+++ b/drivers/gpu/drm/radeon/radeon_reg.h
@@ -3706,4 +3706,19 @@
#define RV530_GB_PIPE_SELECT2 0x4124
+#define RADEON_CP_PACKET_GET_TYPE(h) (((h) >> 30) & 3)
+#define RADEON_CP_PACKET_GET_COUNT(h) (((h) >> 16) & 0x3FFF)
+#define RADEON_CP_PACKET0_GET_ONE_REG_WR(h) (((h) >> 15) & 1)
+#define RADEON_CP_PACKET3_GET_OPCODE(h) (((h) >> 8) & 0xFF)
+#define R100_CP_PACKET0_GET_REG(h) (((h) & 0x1FFF) << 2)
+#define R600_CP_PACKET0_GET_REG(h) (((h) & 0xFFFF) << 2)
+#define RADEON_PACKET_TYPE0 0
+#define RADEON_PACKET_TYPE1 1
+#define RADEON_PACKET_TYPE2 2
+#define RADEON_PACKET_TYPE3 3
+
+#define RADEON_PACKET3_NOP 0x10
+
+#define RADEON_VLINE_STAT (1 << 12)
+
#endif
diff --git a/drivers/gpu/drm/radeon/radeon_ring.c b/drivers/gpu/drm/radeon/radeon_ring.c
index cd72062d5a91..8d58e268ff6d 100644
--- a/drivers/gpu/drm/radeon/radeon_ring.c
+++ b/drivers/gpu/drm/radeon/radeon_ring.c
@@ -109,6 +109,25 @@ void radeon_ib_free(struct radeon_device *rdev, struct radeon_ib *ib)
}
/**
+ * radeon_ib_sync_to - sync to fence before executing the IB
+ *
+ * @ib: IB object to add fence to
+ * @fence: fence to sync to
+ *
+ * Sync to the fence before executing the IB
+ */
+void radeon_ib_sync_to(struct radeon_ib *ib, struct radeon_fence *fence)
+{
+ struct radeon_fence *other;
+
+ if (!fence)
+ return;
+
+ other = ib->sync_to[fence->ring];
+ ib->sync_to[fence->ring] = radeon_fence_later(fence, other);
+}
+
+/**
* radeon_ib_schedule - schedule an IB (Indirect Buffer) on the ring
*
* @rdev: radeon_device pointer
diff --git a/drivers/gpu/drm/radeon/radeon_state.c b/drivers/gpu/drm/radeon/radeon_state.c
index 8e9057b6a365..4d20910899d4 100644
--- a/drivers/gpu/drm/radeon/radeon_state.c
+++ b/drivers/gpu/drm/radeon/radeon_state.c
@@ -25,6 +25,8 @@
* Authors:
* Gareth Hughes <gareth@valinux.com>
* Kevin E. Martin <martin@valinux.com>
+ *
+ * ------------------------ This file is DEPRECATED! -------------------------
*/
#include <drm/drmP.h>
diff --git a/drivers/gpu/drm/radeon/rv515d.h b/drivers/gpu/drm/radeon/rv515d.h
index 590309a710b1..6927a200daf4 100644
--- a/drivers/gpu/drm/radeon/rv515d.h
+++ b/drivers/gpu/drm/radeon/rv515d.h
@@ -205,17 +205,6 @@
REG_SET(PACKET3_IT_OPCODE, (op)) | \
REG_SET(PACKET3_COUNT, (n)))
-#define PACKET_TYPE0 0
-#define PACKET_TYPE1 1
-#define PACKET_TYPE2 2
-#define PACKET_TYPE3 3
-
-#define CP_PACKET_GET_TYPE(h) (((h) >> 30) & 3)
-#define CP_PACKET_GET_COUNT(h) (((h) >> 16) & 0x3FFF)
-#define CP_PACKET0_GET_REG(h) (((h) & 0x1FFF) << 2)
-#define CP_PACKET0_GET_ONE_REG_WR(h) (((h) >> 15) & 1)
-#define CP_PACKET3_GET_OPCODE(h) (((h) >> 8) & 0xFF)
-
/* Registers */
#define R_0000F0_RBBM_SOFT_RESET 0x0000F0
#define S_0000F0_SOFT_RESET_CP(x) (((x) & 0x1) << 0)
diff --git a/drivers/gpu/drm/radeon/rv770.c b/drivers/gpu/drm/radeon/rv770.c
index 1b2444f4d8f4..d63fe1d0f53f 100644
--- a/drivers/gpu/drm/radeon/rv770.c
+++ b/drivers/gpu/drm/radeon/rv770.c
@@ -43,6 +43,31 @@ static void rv770_gpu_init(struct radeon_device *rdev);
void rv770_fini(struct radeon_device *rdev);
static void rv770_pcie_gen2_enable(struct radeon_device *rdev);
+#define PCIE_BUS_CLK 10000
+#define TCLK (PCIE_BUS_CLK / 10)
+
+/**
+ * rv770_get_xclk - get the xclk
+ *
+ * @rdev: radeon_device pointer
+ *
+ * Returns the reference clock used by the gfx engine
+ * (r7xx-cayman).
+ */
+u32 rv770_get_xclk(struct radeon_device *rdev)
+{
+ u32 reference_clock = rdev->clock.spll.reference_freq;
+ u32 tmp = RREG32(CG_CLKPIN_CNTL);
+
+ if (tmp & MUX_TCLK_TO_XCLK)
+ return TCLK;
+
+ if (tmp & XTALIN_DIVIDE)
+ return reference_clock / 4;
+
+ return reference_clock;
+}
+
u32 rv770_page_flip(struct radeon_device *rdev, int crtc_id, u64 crtc_base)
{
struct radeon_crtc *radeon_crtc = rdev->mode_info.crtcs[crtc_id];
diff --git a/drivers/gpu/drm/radeon/rv770d.h b/drivers/gpu/drm/radeon/rv770d.h
index 20e29d23d348..c55f950a4af7 100644
--- a/drivers/gpu/drm/radeon/rv770d.h
+++ b/drivers/gpu/drm/radeon/rv770d.h
@@ -128,6 +128,10 @@
#define GUI_ACTIVE (1<<31)
#define GRBM_STATUS2 0x8014
+#define CG_CLKPIN_CNTL 0x660
+# define MUX_TCLK_TO_XCLK (1 << 8)
+# define XTALIN_DIVIDE (1 << 9)
+
#define CG_MULT_THERMAL_STATUS 0x740
#define ASIC_T(x) ((x) << 16)
#define ASIC_T_MASK 0x3FF0000
diff --git a/drivers/gpu/drm/radeon/si.c b/drivers/gpu/drm/radeon/si.c
index ae8b48205a6c..80979ed951eb 100644
--- a/drivers/gpu/drm/radeon/si.c
+++ b/drivers/gpu/drm/radeon/si.c
@@ -38,6 +38,7 @@
#define SI_CE_UCODE_SIZE 2144
#define SI_RLC_UCODE_SIZE 2048
#define SI_MC_UCODE_SIZE 7769
+#define OLAND_MC_UCODE_SIZE 7863
MODULE_FIRMWARE("radeon/TAHITI_pfp.bin");
MODULE_FIRMWARE("radeon/TAHITI_me.bin");
@@ -54,6 +55,11 @@ MODULE_FIRMWARE("radeon/VERDE_me.bin");
MODULE_FIRMWARE("radeon/VERDE_ce.bin");
MODULE_FIRMWARE("radeon/VERDE_mc.bin");
MODULE_FIRMWARE("radeon/VERDE_rlc.bin");
+MODULE_FIRMWARE("radeon/OLAND_pfp.bin");
+MODULE_FIRMWARE("radeon/OLAND_me.bin");
+MODULE_FIRMWARE("radeon/OLAND_ce.bin");
+MODULE_FIRMWARE("radeon/OLAND_mc.bin");
+MODULE_FIRMWARE("radeon/OLAND_rlc.bin");
extern int r600_ih_ring_alloc(struct radeon_device *rdev);
extern void r600_ih_ring_fini(struct radeon_device *rdev);
@@ -61,6 +67,35 @@ extern void evergreen_fix_pci_max_read_req_size(struct radeon_device *rdev);
extern void evergreen_mc_stop(struct radeon_device *rdev, struct evergreen_mc_save *save);
extern void evergreen_mc_resume(struct radeon_device *rdev, struct evergreen_mc_save *save);
extern u32 evergreen_get_number_of_dram_channels(struct radeon_device *rdev);
+extern void evergreen_print_gpu_status_regs(struct radeon_device *rdev);
+extern bool evergreen_is_display_hung(struct radeon_device *rdev);
+
+#define PCIE_BUS_CLK 10000
+#define TCLK (PCIE_BUS_CLK / 10)
+
+/**
+ * si_get_xclk - get the xclk
+ *
+ * @rdev: radeon_device pointer
+ *
+ * Returns the reference clock used by the gfx engine
+ * (SI).
+ */
+u32 si_get_xclk(struct radeon_device *rdev)
+{
+ u32 reference_clock = rdev->clock.spll.reference_freq;
+ u32 tmp;
+
+ tmp = RREG32(CG_CLKPIN_CNTL_2);
+ if (tmp & MUX_TCLK_TO_XCLK)
+ return TCLK;
+
+ tmp = RREG32(CG_CLKPIN_CNTL);
+ if (tmp & XTALIN_DIVIDE)
+ return reference_clock / 4;
+
+ return reference_clock;
+}
/* get temperature in millidegrees */
int si_get_temp(struct radeon_device *rdev)
@@ -200,6 +235,45 @@ static const u32 verde_io_mc_regs[TAHITI_IO_MC_REGS_SIZE][2] = {
{0x0000009f, 0x00a37400}
};
+static const u32 oland_io_mc_regs[TAHITI_IO_MC_REGS_SIZE][2] = {
+ {0x0000006f, 0x03044000},
+ {0x00000070, 0x0480c018},
+ {0x00000071, 0x00000040},
+ {0x00000072, 0x01000000},
+ {0x00000074, 0x000000ff},
+ {0x00000075, 0x00143400},
+ {0x00000076, 0x08ec0800},
+ {0x00000077, 0x040000cc},
+ {0x00000079, 0x00000000},
+ {0x0000007a, 0x21000409},
+ {0x0000007c, 0x00000000},
+ {0x0000007d, 0xe8000000},
+ {0x0000007e, 0x044408a8},
+ {0x0000007f, 0x00000003},
+ {0x00000080, 0x00000000},
+ {0x00000081, 0x01000000},
+ {0x00000082, 0x02000000},
+ {0x00000083, 0x00000000},
+ {0x00000084, 0xe3f3e4f4},
+ {0x00000085, 0x00052024},
+ {0x00000087, 0x00000000},
+ {0x00000088, 0x66036603},
+ {0x00000089, 0x01000000},
+ {0x0000008b, 0x1c0a0000},
+ {0x0000008c, 0xff010000},
+ {0x0000008e, 0xffffefff},
+ {0x0000008f, 0xfff3efff},
+ {0x00000090, 0xfff3efbf},
+ {0x00000094, 0x00101101},
+ {0x00000095, 0x00000fff},
+ {0x00000096, 0x00116fff},
+ {0x00000097, 0x60010000},
+ {0x00000098, 0x10010000},
+ {0x00000099, 0x00006000},
+ {0x0000009a, 0x00001000},
+ {0x0000009f, 0x00a17730}
+};
+
/* ucode loading */
static int si_mc_load_microcode(struct radeon_device *rdev)
{
@@ -228,6 +302,11 @@ static int si_mc_load_microcode(struct radeon_device *rdev)
ucode_size = SI_MC_UCODE_SIZE;
regs_size = TAHITI_IO_MC_REGS_SIZE;
break;
+ case CHIP_OLAND:
+ io_mc_regs = (u32 *)&oland_io_mc_regs;
+ ucode_size = OLAND_MC_UCODE_SIZE;
+ regs_size = TAHITI_IO_MC_REGS_SIZE;
+ break;
}
running = RREG32(MC_SEQ_SUP_CNTL) & RUN_MASK;
@@ -322,6 +401,15 @@ static int si_init_microcode(struct radeon_device *rdev)
rlc_req_size = SI_RLC_UCODE_SIZE * 4;
mc_req_size = SI_MC_UCODE_SIZE * 4;
break;
+ case CHIP_OLAND:
+ chip_name = "OLAND";
+ rlc_chip_name = "OLAND";
+ pfp_req_size = SI_PFP_UCODE_SIZE * 4;
+ me_req_size = SI_PM4_UCODE_SIZE * 4;
+ ce_req_size = SI_CE_UCODE_SIZE * 4;
+ rlc_req_size = SI_RLC_UCODE_SIZE * 4;
+ mc_req_size = OLAND_MC_UCODE_SIZE * 4;
+ break;
default: BUG();
}
@@ -1125,7 +1213,8 @@ static void si_tiling_mode_table_init(struct radeon_device *rdev)
}
WREG32(GB_TILE_MODE0 + (reg_offset * 4), gb_tile_moden);
}
- } else if (rdev->family == CHIP_VERDE) {
+ } else if ((rdev->family == CHIP_VERDE) ||
+ (rdev->family == CHIP_OLAND)) {
for (reg_offset = 0; reg_offset < num_tile_mode_states; reg_offset++) {
switch (reg_offset) {
case 0: /* non-AA compressed depth or any compressed stencil */
@@ -1570,6 +1659,23 @@ static void si_gpu_init(struct radeon_device *rdev)
rdev->config.si.sc_earlyz_tile_fifo_size = 0x130;
gb_addr_config = VERDE_GB_ADDR_CONFIG_GOLDEN;
break;
+ case CHIP_OLAND:
+ rdev->config.si.max_shader_engines = 1;
+ rdev->config.si.max_tile_pipes = 4;
+ rdev->config.si.max_cu_per_sh = 6;
+ rdev->config.si.max_sh_per_se = 1;
+ rdev->config.si.max_backends_per_se = 2;
+ rdev->config.si.max_texture_channel_caches = 4;
+ rdev->config.si.max_gprs = 256;
+ rdev->config.si.max_gs_threads = 16;
+ rdev->config.si.max_hw_contexts = 8;
+
+ rdev->config.si.sc_prim_fifo_size_frontend = 0x20;
+ rdev->config.si.sc_prim_fifo_size_backend = 0x40;
+ rdev->config.si.sc_hiz_tile_fifo_size = 0x30;
+ rdev->config.si.sc_earlyz_tile_fifo_size = 0x130;
+ gb_addr_config = VERDE_GB_ADDR_CONFIG_GOLDEN;
+ break;
}
/* Initialize HDP */
@@ -2106,154 +2212,275 @@ static int si_cp_resume(struct radeon_device *rdev)
return 0;
}
-bool si_gpu_is_lockup(struct radeon_device *rdev, struct radeon_ring *ring)
+static u32 si_gpu_check_soft_reset(struct radeon_device *rdev)
{
- u32 srbm_status;
- u32 grbm_status, grbm_status2;
- u32 grbm_status_se0, grbm_status_se1;
-
- srbm_status = RREG32(SRBM_STATUS);
- grbm_status = RREG32(GRBM_STATUS);
- grbm_status2 = RREG32(GRBM_STATUS2);
- grbm_status_se0 = RREG32(GRBM_STATUS_SE0);
- grbm_status_se1 = RREG32(GRBM_STATUS_SE1);
- if (!(grbm_status & GUI_ACTIVE)) {
- radeon_ring_lockup_update(ring);
- return false;
- }
- /* force CP activities */
- radeon_ring_force_activity(rdev, ring);
- return radeon_ring_test_lockup(rdev, ring);
-}
+ u32 reset_mask = 0;
+ u32 tmp;
-static void si_gpu_soft_reset_gfx(struct radeon_device *rdev)
-{
- u32 grbm_reset = 0;
+ /* GRBM_STATUS */
+ tmp = RREG32(GRBM_STATUS);
+ if (tmp & (PA_BUSY | SC_BUSY |
+ BCI_BUSY | SX_BUSY |
+ TA_BUSY | VGT_BUSY |
+ DB_BUSY | CB_BUSY |
+ GDS_BUSY | SPI_BUSY |
+ IA_BUSY | IA_BUSY_NO_DMA))
+ reset_mask |= RADEON_RESET_GFX;
- if (!(RREG32(GRBM_STATUS) & GUI_ACTIVE))
- return;
+ if (tmp & (CF_RQ_PENDING | PF_RQ_PENDING |
+ CP_BUSY | CP_COHERENCY_BUSY))
+ reset_mask |= RADEON_RESET_CP;
- dev_info(rdev->dev, " GRBM_STATUS=0x%08X\n",
- RREG32(GRBM_STATUS));
- dev_info(rdev->dev, " GRBM_STATUS2=0x%08X\n",
- RREG32(GRBM_STATUS2));
- dev_info(rdev->dev, " GRBM_STATUS_SE0=0x%08X\n",
- RREG32(GRBM_STATUS_SE0));
- dev_info(rdev->dev, " GRBM_STATUS_SE1=0x%08X\n",
- RREG32(GRBM_STATUS_SE1));
- dev_info(rdev->dev, " SRBM_STATUS=0x%08X\n",
- RREG32(SRBM_STATUS));
+ if (tmp & GRBM_EE_BUSY)
+ reset_mask |= RADEON_RESET_GRBM | RADEON_RESET_GFX | RADEON_RESET_CP;
- /* Disable CP parsing/prefetching */
- WREG32(CP_ME_CNTL, CP_ME_HALT | CP_PFP_HALT | CP_CE_HALT);
+ /* GRBM_STATUS2 */
+ tmp = RREG32(GRBM_STATUS2);
+ if (tmp & (RLC_RQ_PENDING | RLC_BUSY))
+ reset_mask |= RADEON_RESET_RLC;
- /* reset all the gfx blocks */
- grbm_reset = (SOFT_RESET_CP |
- SOFT_RESET_CB |
- SOFT_RESET_DB |
- SOFT_RESET_GDS |
- SOFT_RESET_PA |
- SOFT_RESET_SC |
- SOFT_RESET_BCI |
- SOFT_RESET_SPI |
- SOFT_RESET_SX |
- SOFT_RESET_TC |
- SOFT_RESET_TA |
- SOFT_RESET_VGT |
- SOFT_RESET_IA);
-
- dev_info(rdev->dev, " GRBM_SOFT_RESET=0x%08X\n", grbm_reset);
- WREG32(GRBM_SOFT_RESET, grbm_reset);
- (void)RREG32(GRBM_SOFT_RESET);
- udelay(50);
- WREG32(GRBM_SOFT_RESET, 0);
- (void)RREG32(GRBM_SOFT_RESET);
-
- dev_info(rdev->dev, " GRBM_STATUS=0x%08X\n",
- RREG32(GRBM_STATUS));
- dev_info(rdev->dev, " GRBM_STATUS2=0x%08X\n",
- RREG32(GRBM_STATUS2));
- dev_info(rdev->dev, " GRBM_STATUS_SE0=0x%08X\n",
- RREG32(GRBM_STATUS_SE0));
- dev_info(rdev->dev, " GRBM_STATUS_SE1=0x%08X\n",
- RREG32(GRBM_STATUS_SE1));
- dev_info(rdev->dev, " SRBM_STATUS=0x%08X\n",
- RREG32(SRBM_STATUS));
-}
+ /* DMA_STATUS_REG 0 */
+ tmp = RREG32(DMA_STATUS_REG + DMA0_REGISTER_OFFSET);
+ if (!(tmp & DMA_IDLE))
+ reset_mask |= RADEON_RESET_DMA;
-static void si_gpu_soft_reset_dma(struct radeon_device *rdev)
-{
- u32 tmp;
+ /* DMA_STATUS_REG 1 */
+ tmp = RREG32(DMA_STATUS_REG + DMA1_REGISTER_OFFSET);
+ if (!(tmp & DMA_IDLE))
+ reset_mask |= RADEON_RESET_DMA1;
- if (RREG32(DMA_STATUS_REG) & DMA_IDLE)
- return;
+ /* SRBM_STATUS2 */
+ tmp = RREG32(SRBM_STATUS2);
+ if (tmp & DMA_BUSY)
+ reset_mask |= RADEON_RESET_DMA;
- dev_info(rdev->dev, " DMA_STATUS_REG = 0x%08X\n",
- RREG32(DMA_STATUS_REG));
+ if (tmp & DMA1_BUSY)
+ reset_mask |= RADEON_RESET_DMA1;
- /* dma0 */
- tmp = RREG32(DMA_RB_CNTL + DMA0_REGISTER_OFFSET);
- tmp &= ~DMA_RB_ENABLE;
- WREG32(DMA_RB_CNTL + DMA0_REGISTER_OFFSET, tmp);
+ /* SRBM_STATUS */
+ tmp = RREG32(SRBM_STATUS);
- /* dma1 */
- tmp = RREG32(DMA_RB_CNTL + DMA1_REGISTER_OFFSET);
- tmp &= ~DMA_RB_ENABLE;
- WREG32(DMA_RB_CNTL + DMA1_REGISTER_OFFSET, tmp);
+ if (tmp & IH_BUSY)
+ reset_mask |= RADEON_RESET_IH;
- /* Reset dma */
- WREG32(SRBM_SOFT_RESET, SOFT_RESET_DMA | SOFT_RESET_DMA1);
- RREG32(SRBM_SOFT_RESET);
- udelay(50);
- WREG32(SRBM_SOFT_RESET, 0);
+ if (tmp & SEM_BUSY)
+ reset_mask |= RADEON_RESET_SEM;
+
+ if (tmp & GRBM_RQ_PENDING)
+ reset_mask |= RADEON_RESET_GRBM;
+
+ if (tmp & VMC_BUSY)
+ reset_mask |= RADEON_RESET_VMC;
- dev_info(rdev->dev, " DMA_STATUS_REG = 0x%08X\n",
- RREG32(DMA_STATUS_REG));
+ if (tmp & (MCB_BUSY | MCB_NON_DISPLAY_BUSY |
+ MCC_BUSY | MCD_BUSY))
+ reset_mask |= RADEON_RESET_MC;
+
+ if (evergreen_is_display_hung(rdev))
+ reset_mask |= RADEON_RESET_DISPLAY;
+
+ /* VM_L2_STATUS */
+ tmp = RREG32(VM_L2_STATUS);
+ if (tmp & L2_BUSY)
+ reset_mask |= RADEON_RESET_VMC;
+
+ return reset_mask;
}
-static int si_gpu_soft_reset(struct radeon_device *rdev, u32 reset_mask)
+static void si_gpu_soft_reset(struct radeon_device *rdev, u32 reset_mask)
{
struct evergreen_mc_save save;
-
- if (!(RREG32(GRBM_STATUS) & GUI_ACTIVE))
- reset_mask &= ~(RADEON_RESET_GFX | RADEON_RESET_COMPUTE);
-
- if (RREG32(DMA_STATUS_REG) & DMA_IDLE)
- reset_mask &= ~RADEON_RESET_DMA;
+ u32 grbm_soft_reset = 0, srbm_soft_reset = 0;
+ u32 tmp;
if (reset_mask == 0)
- return 0;
+ return;
dev_info(rdev->dev, "GPU softreset: 0x%08X\n", reset_mask);
+ evergreen_print_gpu_status_regs(rdev);
dev_info(rdev->dev, " VM_CONTEXT1_PROTECTION_FAULT_ADDR 0x%08X\n",
RREG32(VM_CONTEXT1_PROTECTION_FAULT_ADDR));
dev_info(rdev->dev, " VM_CONTEXT1_PROTECTION_FAULT_STATUS 0x%08X\n",
RREG32(VM_CONTEXT1_PROTECTION_FAULT_STATUS));
+ /* Disable CP parsing/prefetching */
+ WREG32(CP_ME_CNTL, CP_ME_HALT | CP_PFP_HALT | CP_CE_HALT);
+
+ if (reset_mask & RADEON_RESET_DMA) {
+ /* dma0 */
+ tmp = RREG32(DMA_RB_CNTL + DMA0_REGISTER_OFFSET);
+ tmp &= ~DMA_RB_ENABLE;
+ WREG32(DMA_RB_CNTL + DMA0_REGISTER_OFFSET, tmp);
+ }
+ if (reset_mask & RADEON_RESET_DMA1) {
+ /* dma1 */
+ tmp = RREG32(DMA_RB_CNTL + DMA1_REGISTER_OFFSET);
+ tmp &= ~DMA_RB_ENABLE;
+ WREG32(DMA_RB_CNTL + DMA1_REGISTER_OFFSET, tmp);
+ }
+
+ udelay(50);
+
evergreen_mc_stop(rdev, &save);
- if (radeon_mc_wait_for_idle(rdev)) {
+ if (evergreen_mc_wait_for_idle(rdev)) {
dev_warn(rdev->dev, "Wait for MC idle timedout !\n");
}
- if (reset_mask & (RADEON_RESET_GFX | RADEON_RESET_COMPUTE))
- si_gpu_soft_reset_gfx(rdev);
+ if (reset_mask & (RADEON_RESET_GFX | RADEON_RESET_COMPUTE | RADEON_RESET_CP)) {
+ grbm_soft_reset = SOFT_RESET_CB |
+ SOFT_RESET_DB |
+ SOFT_RESET_GDS |
+ SOFT_RESET_PA |
+ SOFT_RESET_SC |
+ SOFT_RESET_BCI |
+ SOFT_RESET_SPI |
+ SOFT_RESET_SX |
+ SOFT_RESET_TC |
+ SOFT_RESET_TA |
+ SOFT_RESET_VGT |
+ SOFT_RESET_IA;
+ }
+
+ if (reset_mask & RADEON_RESET_CP) {
+ grbm_soft_reset |= SOFT_RESET_CP | SOFT_RESET_VGT;
+
+ srbm_soft_reset |= SOFT_RESET_GRBM;
+ }
if (reset_mask & RADEON_RESET_DMA)
- si_gpu_soft_reset_dma(rdev);
+ srbm_soft_reset |= SOFT_RESET_DMA;
+
+ if (reset_mask & RADEON_RESET_DMA1)
+ srbm_soft_reset |= SOFT_RESET_DMA1;
+
+ if (reset_mask & RADEON_RESET_DISPLAY)
+ srbm_soft_reset |= SOFT_RESET_DC;
+
+ if (reset_mask & RADEON_RESET_RLC)
+ grbm_soft_reset |= SOFT_RESET_RLC;
+
+ if (reset_mask & RADEON_RESET_SEM)
+ srbm_soft_reset |= SOFT_RESET_SEM;
+
+ if (reset_mask & RADEON_RESET_IH)
+ srbm_soft_reset |= SOFT_RESET_IH;
+
+ if (reset_mask & RADEON_RESET_GRBM)
+ srbm_soft_reset |= SOFT_RESET_GRBM;
+
+ if (reset_mask & RADEON_RESET_VMC)
+ srbm_soft_reset |= SOFT_RESET_VMC;
+
+ if (reset_mask & RADEON_RESET_MC)
+ srbm_soft_reset |= SOFT_RESET_MC;
+
+ if (grbm_soft_reset) {
+ tmp = RREG32(GRBM_SOFT_RESET);
+ tmp |= grbm_soft_reset;
+ dev_info(rdev->dev, "GRBM_SOFT_RESET=0x%08X\n", tmp);
+ WREG32(GRBM_SOFT_RESET, tmp);
+ tmp = RREG32(GRBM_SOFT_RESET);
+
+ udelay(50);
+
+ tmp &= ~grbm_soft_reset;
+ WREG32(GRBM_SOFT_RESET, tmp);
+ tmp = RREG32(GRBM_SOFT_RESET);
+ }
+
+ if (srbm_soft_reset) {
+ tmp = RREG32(SRBM_SOFT_RESET);
+ tmp |= srbm_soft_reset;
+ dev_info(rdev->dev, "SRBM_SOFT_RESET=0x%08X\n", tmp);
+ WREG32(SRBM_SOFT_RESET, tmp);
+ tmp = RREG32(SRBM_SOFT_RESET);
+
+ udelay(50);
+
+ tmp &= ~srbm_soft_reset;
+ WREG32(SRBM_SOFT_RESET, tmp);
+ tmp = RREG32(SRBM_SOFT_RESET);
+ }
/* Wait a little for things to settle down */
udelay(50);
evergreen_mc_resume(rdev, &save);
- return 0;
+ udelay(50);
+
+ evergreen_print_gpu_status_regs(rdev);
}
int si_asic_reset(struct radeon_device *rdev)
{
- return si_gpu_soft_reset(rdev, (RADEON_RESET_GFX |
- RADEON_RESET_COMPUTE |
- RADEON_RESET_DMA));
+ u32 reset_mask;
+
+ reset_mask = si_gpu_check_soft_reset(rdev);
+
+ if (reset_mask)
+ r600_set_bios_scratch_engine_hung(rdev, true);
+
+ si_gpu_soft_reset(rdev, reset_mask);
+
+ reset_mask = si_gpu_check_soft_reset(rdev);
+
+ if (!reset_mask)
+ r600_set_bios_scratch_engine_hung(rdev, false);
+
+ return 0;
+}
+
+/**
+ * si_gfx_is_lockup - Check if the GFX engine is locked up
+ *
+ * @rdev: radeon_device pointer
+ * @ring: radeon_ring structure holding ring information
+ *
+ * Check if the GFX engine is locked up.
+ * Returns true if the engine appears to be locked up, false if not.
+ */
+bool si_gfx_is_lockup(struct radeon_device *rdev, struct radeon_ring *ring)
+{
+ u32 reset_mask = si_gpu_check_soft_reset(rdev);
+
+ if (!(reset_mask & (RADEON_RESET_GFX |
+ RADEON_RESET_COMPUTE |
+ RADEON_RESET_CP))) {
+ radeon_ring_lockup_update(ring);
+ return false;
+ }
+ /* force CP activities */
+ radeon_ring_force_activity(rdev, ring);
+ return radeon_ring_test_lockup(rdev, ring);
+}
+
+/**
+ * si_dma_is_lockup - Check if the DMA engine is locked up
+ *
+ * @rdev: radeon_device pointer
+ * @ring: radeon_ring structure holding ring information
+ *
+ * Check if the async DMA engine is locked up.
+ * Returns true if the engine appears to be locked up, false if not.
+ */
+bool si_dma_is_lockup(struct radeon_device *rdev, struct radeon_ring *ring)
+{
+ u32 reset_mask = si_gpu_check_soft_reset(rdev);
+ u32 mask;
+
+ if (ring->idx == R600_RING_TYPE_DMA_INDEX)
+ mask = RADEON_RESET_DMA;
+ else
+ mask = RADEON_RESET_DMA1;
+
+ if (!(reset_mask & mask)) {
+ radeon_ring_lockup_update(ring);
+ return false;
+ }
+ /* force ring activities */
+ radeon_ring_force_activity(rdev, ring);
+ return radeon_ring_test_lockup(rdev, ring);
}
/* MC */
@@ -2855,19 +3082,19 @@ int si_ib_parse(struct radeon_device *rdev, struct radeon_ib *ib)
do {
pkt.idx = idx;
- pkt.type = CP_PACKET_GET_TYPE(ib->ptr[idx]);
- pkt.count = CP_PACKET_GET_COUNT(ib->ptr[idx]);
+ pkt.type = RADEON_CP_PACKET_GET_TYPE(ib->ptr[idx]);
+ pkt.count = RADEON_CP_PACKET_GET_COUNT(ib->ptr[idx]);
pkt.one_reg_wr = 0;
switch (pkt.type) {
- case PACKET_TYPE0:
+ case RADEON_PACKET_TYPE0:
dev_err(rdev->dev, "Packet0 not allowed!\n");
ret = -EINVAL;
break;
- case PACKET_TYPE2:
+ case RADEON_PACKET_TYPE2:
idx += 1;
break;
- case PACKET_TYPE3:
- pkt.opcode = CP_PACKET3_GET_OPCODE(ib->ptr[idx]);
+ case RADEON_PACKET_TYPE3:
+ pkt.opcode = RADEON_CP_PACKET3_GET_OPCODE(ib->ptr[idx]);
if (ib->is_const_ib)
ret = si_vm_packet3_ce_check(rdev, ib->ptr, &pkt);
else {
@@ -2920,19 +3147,21 @@ void si_vm_fini(struct radeon_device *rdev)
* si_vm_set_page - update the page tables using the CP
*
* @rdev: radeon_device pointer
+ * @ib: indirect buffer to fill with commands
* @pe: addr of the page entry
* @addr: dst addr to write into pe
* @count: number of page entries to update
* @incr: increase next addr by incr bytes
* @flags: access flags
*
- * Update the page tables using the CP (cayman-si).
+ * Update the page tables using the CP (SI).
*/
-void si_vm_set_page(struct radeon_device *rdev, uint64_t pe,
+void si_vm_set_page(struct radeon_device *rdev,
+ struct radeon_ib *ib,
+ uint64_t pe,
uint64_t addr, unsigned count,
uint32_t incr, uint32_t flags)
{
- struct radeon_ring *ring = &rdev->ring[rdev->asic->vm.pt_ring_index];
uint32_t r600_flags = cayman_vm_page_flags(rdev, flags);
uint64_t value;
unsigned ndw;
@@ -2943,11 +3172,11 @@ void si_vm_set_page(struct radeon_device *rdev, uint64_t pe,
if (ndw > 0x3FFE)
ndw = 0x3FFE;
- radeon_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, ndw));
- radeon_ring_write(ring, (WRITE_DATA_ENGINE_SEL(0) |
- WRITE_DATA_DST_SEL(1)));
- radeon_ring_write(ring, pe);
- radeon_ring_write(ring, upper_32_bits(pe));
+ ib->ptr[ib->length_dw++] = PACKET3(PACKET3_WRITE_DATA, ndw);
+ ib->ptr[ib->length_dw++] = (WRITE_DATA_ENGINE_SEL(0) |
+ WRITE_DATA_DST_SEL(1));
+ ib->ptr[ib->length_dw++] = pe;
+ ib->ptr[ib->length_dw++] = upper_32_bits(pe);
for (; ndw > 2; ndw -= 2, --count, pe += 8) {
if (flags & RADEON_VM_PAGE_SYSTEM) {
value = radeon_vm_map_gart(rdev, addr);
@@ -2959,8 +3188,8 @@ void si_vm_set_page(struct radeon_device *rdev, uint64_t pe,
}
addr += incr;
value |= r600_flags;
- radeon_ring_write(ring, value);
- radeon_ring_write(ring, upper_32_bits(value));
+ ib->ptr[ib->length_dw++] = value;
+ ib->ptr[ib->length_dw++] = upper_32_bits(value);
}
}
} else {
@@ -2972,9 +3201,9 @@ void si_vm_set_page(struct radeon_device *rdev, uint64_t pe,
ndw = 0xFFFFE;
/* for non-physically contiguous pages (system) */
- radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_WRITE, 0, 0, 0, ndw));
- radeon_ring_write(ring, pe);
- radeon_ring_write(ring, upper_32_bits(pe) & 0xff);
+ ib->ptr[ib->length_dw++] = DMA_PACKET(DMA_PACKET_WRITE, 0, 0, 0, ndw);
+ ib->ptr[ib->length_dw++] = pe;
+ ib->ptr[ib->length_dw++] = upper_32_bits(pe) & 0xff;
for (; ndw > 0; ndw -= 2, --count, pe += 8) {
if (flags & RADEON_VM_PAGE_SYSTEM) {
value = radeon_vm_map_gart(rdev, addr);
@@ -2986,8 +3215,8 @@ void si_vm_set_page(struct radeon_device *rdev, uint64_t pe,
}
addr += incr;
value |= r600_flags;
- radeon_ring_write(ring, value);
- radeon_ring_write(ring, upper_32_bits(value));
+ ib->ptr[ib->length_dw++] = value;
+ ib->ptr[ib->length_dw++] = upper_32_bits(value);
}
}
} else {
@@ -3001,20 +3230,22 @@ void si_vm_set_page(struct radeon_device *rdev, uint64_t pe,
else
value = 0;
/* for physically contiguous pages (vram) */
- radeon_ring_write(ring, DMA_PTE_PDE_PACKET(ndw));
- radeon_ring_write(ring, pe); /* dst addr */
- radeon_ring_write(ring, upper_32_bits(pe) & 0xff);
- radeon_ring_write(ring, r600_flags); /* mask */
- radeon_ring_write(ring, 0);
- radeon_ring_write(ring, value); /* value */
- radeon_ring_write(ring, upper_32_bits(value));
- radeon_ring_write(ring, incr); /* increment size */
- radeon_ring_write(ring, 0);
+ ib->ptr[ib->length_dw++] = DMA_PTE_PDE_PACKET(ndw);
+ ib->ptr[ib->length_dw++] = pe; /* dst addr */
+ ib->ptr[ib->length_dw++] = upper_32_bits(pe) & 0xff;
+ ib->ptr[ib->length_dw++] = r600_flags; /* mask */
+ ib->ptr[ib->length_dw++] = 0;
+ ib->ptr[ib->length_dw++] = value; /* value */
+ ib->ptr[ib->length_dw++] = upper_32_bits(value);
+ ib->ptr[ib->length_dw++] = incr; /* increment size */
+ ib->ptr[ib->length_dw++] = 0;
pe += ndw * 4;
addr += (ndw / 2) * incr;
count -= ndw / 2;
}
}
+ while (ib->length_dw & 0x7)
+ ib->ptr[ib->length_dw++] = DMA_PACKET(DMA_PACKET_NOP, 0, 0, 0, 0);
}
}
@@ -4378,14 +4609,14 @@ void si_fini(struct radeon_device *rdev)
}
/**
- * si_get_gpu_clock - return GPU clock counter snapshot
+ * si_get_gpu_clock_counter - return GPU clock counter snapshot
*
* @rdev: radeon_device pointer
*
* Fetches a GPU clock counter snapshot (SI).
* Returns the 64 bit clock counter snapshot.
*/
-uint64_t si_get_gpu_clock(struct radeon_device *rdev)
+uint64_t si_get_gpu_clock_counter(struct radeon_device *rdev)
{
uint64_t clock;
diff --git a/drivers/gpu/drm/radeon/sid.h b/drivers/gpu/drm/radeon/sid.h
index c056aae814f0..23fc08fc8e7f 100644
--- a/drivers/gpu/drm/radeon/sid.h
+++ b/drivers/gpu/drm/radeon/sid.h
@@ -58,9 +58,22 @@
#define VGA_HDP_CONTROL 0x328
#define VGA_MEMORY_DISABLE (1 << 4)
+#define CG_CLKPIN_CNTL 0x660
+# define XTALIN_DIVIDE (1 << 1)
+#define CG_CLKPIN_CNTL_2 0x664
+# define MUX_TCLK_TO_XCLK (1 << 8)
+
#define DMIF_ADDR_CONFIG 0xBD4
#define SRBM_STATUS 0xE50
+#define GRBM_RQ_PENDING (1 << 5)
+#define VMC_BUSY (1 << 8)
+#define MCB_BUSY (1 << 9)
+#define MCB_NON_DISPLAY_BUSY (1 << 10)
+#define MCC_BUSY (1 << 11)
+#define MCD_BUSY (1 << 12)
+#define SEM_BUSY (1 << 14)
+#define IH_BUSY (1 << 17)
#define SRBM_SOFT_RESET 0x0E60
#define SOFT_RESET_BIF (1 << 1)
@@ -81,6 +94,10 @@
#define CC_SYS_RB_BACKEND_DISABLE 0xe80
#define GC_USER_SYS_RB_BACKEND_DISABLE 0xe84
+#define SRBM_STATUS2 0x0EC4
+#define DMA_BUSY (1 << 5)
+#define DMA1_BUSY (1 << 6)
+
#define VM_L2_CNTL 0x1400
#define ENABLE_L2_CACHE (1 << 0)
#define ENABLE_L2_FRAGMENT_PROCESSING (1 << 1)
@@ -783,16 +800,7 @@
/*
* PM4
*/
-#define PACKET_TYPE0 0
-#define PACKET_TYPE1 1
-#define PACKET_TYPE2 2
-#define PACKET_TYPE3 3
-
-#define CP_PACKET_GET_TYPE(h) (((h) >> 30) & 3)
-#define CP_PACKET_GET_COUNT(h) (((h) >> 16) & 0x3FFF)
-#define CP_PACKET0_GET_REG(h) (((h) & 0xFFFF) << 2)
-#define CP_PACKET3_GET_OPCODE(h) (((h) >> 8) & 0xFF)
-#define PACKET0(reg, n) ((PACKET_TYPE0 << 30) | \
+#define PACKET0(reg, n) ((RADEON_PACKET_TYPE0 << 30) | \
(((reg) >> 2) & 0xFFFF) | \
((n) & 0x3FFF) << 16)
#define CP_PACKET2 0x80000000
@@ -801,7 +809,7 @@
#define PACKET2(v) (CP_PACKET2 | REG_SET(PACKET2_PAD, (v)))
-#define PACKET3(op, n) ((PACKET_TYPE3 << 30) | \
+#define PACKET3(op, n) ((RADEON_PACKET_TYPE3 << 30) | \
(((op) & 0xFF) << 8) | \
((n) & 0x3FFF) << 16)
diff --git a/drivers/gpu/drm/shmobile/shmob_drm_drv.c b/drivers/gpu/drm/shmobile/shmob_drm_drv.c
index d1d5306ebf24..f6e0b5395051 100644
--- a/drivers/gpu/drm/shmobile/shmob_drm_drv.c
+++ b/drivers/gpu/drm/shmobile/shmob_drm_drv.c
@@ -313,9 +313,9 @@ static int shmob_drm_pm_resume(struct device *dev)
{
struct shmob_drm_device *sdev = dev_get_drvdata(dev);
- mutex_lock(&sdev->ddev->mode_config.mutex);
+ drm_modeset_lock_all(sdev->ddev);
shmob_drm_crtc_resume(&sdev->crtc);
- mutex_unlock(&sdev->ddev->mode_config.mutex);
+ drm_modeset_unlock_all(sdev->ddev);
drm_kms_helper_poll_enable(sdev->ddev);
return 0;
diff --git a/drivers/gpu/drm/sis/sis_drv.c b/drivers/gpu/drm/sis/sis_drv.c
index 841065b998a1..5a5325e6b759 100644
--- a/drivers/gpu/drm/sis/sis_drv.c
+++ b/drivers/gpu/drm/sis/sis_drv.c
@@ -58,7 +58,6 @@ static int sis_driver_unload(struct drm_device *dev)
{
drm_sis_private_t *dev_priv = dev->dev_private;
- idr_remove_all(&dev_priv->object_idr);
idr_destroy(&dev_priv->object_idr);
kfree(dev_priv);
diff --git a/drivers/gpu/drm/sis/sis_mm.c b/drivers/gpu/drm/sis/sis_mm.c
index 2b2f78c428af..9a43d98e5003 100644
--- a/drivers/gpu/drm/sis/sis_mm.c
+++ b/drivers/gpu/drm/sis/sis_mm.c
@@ -128,17 +128,10 @@ static int sis_drm_alloc(struct drm_device *dev, struct drm_file *file,
if (retval)
goto fail_alloc;
-again:
- if (idr_pre_get(&dev_priv->object_idr, GFP_KERNEL) == 0) {
- retval = -ENOMEM;
- goto fail_idr;
- }
-
- retval = idr_get_new_above(&dev_priv->object_idr, item, 1, &user_key);
- if (retval == -EAGAIN)
- goto again;
- if (retval)
+ retval = idr_alloc(&dev_priv->object_idr, item, 1, 0, GFP_KERNEL);
+ if (retval < 0)
goto fail_idr;
+ user_key = retval;
list_add(&item->owner_list, &file_priv->obj_list);
mutex_unlock(&dev->struct_mutex);
diff --git a/drivers/gpu/drm/tegra/Kconfig b/drivers/gpu/drm/tegra/Kconfig
index be1daf7344d3..c92955df0658 100644
--- a/drivers/gpu/drm/tegra/Kconfig
+++ b/drivers/gpu/drm/tegra/Kconfig
@@ -4,6 +4,7 @@ config DRM_TEGRA
select DRM_KMS_HELPER
select DRM_GEM_CMA_HELPER
select DRM_KMS_CMA_HELPER
+ select DRM_HDMI
select FB_CFB_FILLRECT
select FB_CFB_COPYAREA
select FB_CFB_IMAGEBLIT
diff --git a/drivers/gpu/drm/tegra/dc.c b/drivers/gpu/drm/tegra/dc.c
index d365c6dff0fb..de94707b9dbe 100644
--- a/drivers/gpu/drm/tegra/dc.c
+++ b/drivers/gpu/drm/tegra/dc.c
@@ -12,32 +12,262 @@
#include <linux/module.h>
#include <linux/of.h>
#include <linux/platform_device.h>
-
-#include <mach/clk.h>
+#include <linux/clk/tegra.h>
#include "drm.h"
#include "dc.h"
-struct tegra_dc_window {
- fixed20_12 x;
- fixed20_12 y;
- fixed20_12 w;
- fixed20_12 h;
- unsigned int outx;
- unsigned int outy;
- unsigned int outw;
- unsigned int outh;
- unsigned int stride;
- unsigned int fmt;
+struct tegra_plane {
+ struct drm_plane base;
+ unsigned int index;
+};
+
+static inline struct tegra_plane *to_tegra_plane(struct drm_plane *plane)
+{
+ return container_of(plane, struct tegra_plane, base);
+}
+
+static int tegra_plane_update(struct drm_plane *plane, struct drm_crtc *crtc,
+ struct drm_framebuffer *fb, int crtc_x,
+ int crtc_y, unsigned int crtc_w,
+ unsigned int crtc_h, uint32_t src_x,
+ uint32_t src_y, uint32_t src_w, uint32_t src_h)
+{
+ struct tegra_plane *p = to_tegra_plane(plane);
+ struct tegra_dc *dc = to_tegra_dc(crtc);
+ struct tegra_dc_window window;
+ unsigned int i;
+
+ memset(&window, 0, sizeof(window));
+ window.src.x = src_x >> 16;
+ window.src.y = src_y >> 16;
+ window.src.w = src_w >> 16;
+ window.src.h = src_h >> 16;
+ window.dst.x = crtc_x;
+ window.dst.y = crtc_y;
+ window.dst.w = crtc_w;
+ window.dst.h = crtc_h;
+ window.format = tegra_dc_format(fb->pixel_format);
+ window.bits_per_pixel = fb->bits_per_pixel;
+
+ for (i = 0; i < drm_format_num_planes(fb->pixel_format); i++) {
+ struct drm_gem_cma_object *gem = drm_fb_cma_get_gem_obj(fb, i);
+
+ window.base[i] = gem->paddr + fb->offsets[i];
+
+ /*
+ * Tegra doesn't support different strides for U and V planes
+ * so we display a warning if the user tries to display a
+ * framebuffer with such a configuration.
+ */
+ if (i >= 2) {
+ if (fb->pitches[i] != window.stride[1])
+ DRM_ERROR("unsupported UV-plane configuration\n");
+ } else {
+ window.stride[i] = fb->pitches[i];
+ }
+ }
+
+ return tegra_dc_setup_window(dc, p->index, &window);
+}
+
+static int tegra_plane_disable(struct drm_plane *plane)
+{
+ struct tegra_dc *dc = to_tegra_dc(plane->crtc);
+ struct tegra_plane *p = to_tegra_plane(plane);
+ unsigned long value;
+
+ value = WINDOW_A_SELECT << p->index;
+ tegra_dc_writel(dc, value, DC_CMD_DISPLAY_WINDOW_HEADER);
+
+ value = tegra_dc_readl(dc, DC_WIN_WIN_OPTIONS);
+ value &= ~WIN_ENABLE;
+ tegra_dc_writel(dc, value, DC_WIN_WIN_OPTIONS);
+
+ tegra_dc_writel(dc, WIN_A_UPDATE << p->index, DC_CMD_STATE_CONTROL);
+ tegra_dc_writel(dc, WIN_A_ACT_REQ << p->index, DC_CMD_STATE_CONTROL);
+
+ return 0;
+}
+
+static void tegra_plane_destroy(struct drm_plane *plane)
+{
+ tegra_plane_disable(plane);
+ drm_plane_cleanup(plane);
+}
+
+static const struct drm_plane_funcs tegra_plane_funcs = {
+ .update_plane = tegra_plane_update,
+ .disable_plane = tegra_plane_disable,
+ .destroy = tegra_plane_destroy,
};
+static const uint32_t plane_formats[] = {
+ DRM_FORMAT_XRGB8888,
+ DRM_FORMAT_UYVY,
+ DRM_FORMAT_YUV420,
+ DRM_FORMAT_YUV422,
+};
+
+static int tegra_dc_add_planes(struct drm_device *drm, struct tegra_dc *dc)
+{
+ unsigned int i;
+ int err = 0;
+
+ for (i = 0; i < 2; i++) {
+ struct tegra_plane *plane;
+
+ plane = devm_kzalloc(drm->dev, sizeof(*plane), GFP_KERNEL);
+ if (!plane)
+ return -ENOMEM;
+
+ plane->index = 1 + i;
+
+ err = drm_plane_init(drm, &plane->base, 1 << dc->pipe,
+ &tegra_plane_funcs, plane_formats,
+ ARRAY_SIZE(plane_formats), false);
+ if (err < 0)
+ return err;
+ }
+
+ return 0;
+}
+
+static int tegra_dc_set_base(struct tegra_dc *dc, int x, int y,
+ struct drm_framebuffer *fb)
+{
+ struct drm_gem_cma_object *gem = drm_fb_cma_get_gem_obj(fb, 0);
+ unsigned long value;
+
+ tegra_dc_writel(dc, WINDOW_A_SELECT, DC_CMD_DISPLAY_WINDOW_HEADER);
+
+ value = fb->offsets[0] + y * fb->pitches[0] +
+ x * fb->bits_per_pixel / 8;
+
+ tegra_dc_writel(dc, gem->paddr + value, DC_WINBUF_START_ADDR);
+ tegra_dc_writel(dc, fb->pitches[0], DC_WIN_LINE_STRIDE);
+
+ value = GENERAL_UPDATE | WIN_A_UPDATE;
+ tegra_dc_writel(dc, value, DC_CMD_STATE_CONTROL);
+
+ value = GENERAL_ACT_REQ | WIN_A_ACT_REQ;
+ tegra_dc_writel(dc, value, DC_CMD_STATE_CONTROL);
+
+ return 0;
+}
+
+void tegra_dc_enable_vblank(struct tegra_dc *dc)
+{
+ unsigned long value, flags;
+
+ spin_lock_irqsave(&dc->lock, flags);
+
+ value = tegra_dc_readl(dc, DC_CMD_INT_MASK);
+ value |= VBLANK_INT;
+ tegra_dc_writel(dc, value, DC_CMD_INT_MASK);
+
+ spin_unlock_irqrestore(&dc->lock, flags);
+}
+
+void tegra_dc_disable_vblank(struct tegra_dc *dc)
+{
+ unsigned long value, flags;
+
+ spin_lock_irqsave(&dc->lock, flags);
+
+ value = tegra_dc_readl(dc, DC_CMD_INT_MASK);
+ value &= ~VBLANK_INT;
+ tegra_dc_writel(dc, value, DC_CMD_INT_MASK);
+
+ spin_unlock_irqrestore(&dc->lock, flags);
+}
+
+static void tegra_dc_finish_page_flip(struct tegra_dc *dc)
+{
+ struct drm_device *drm = dc->base.dev;
+ struct drm_crtc *crtc = &dc->base;
+ struct drm_gem_cma_object *gem;
+ unsigned long flags, base;
+
+ if (!dc->event)
+ return;
+
+ gem = drm_fb_cma_get_gem_obj(crtc->fb, 0);
+
+ /* check if new start address has been latched */
+ tegra_dc_writel(dc, READ_MUX, DC_CMD_STATE_ACCESS);
+ base = tegra_dc_readl(dc, DC_WINBUF_START_ADDR);
+ tegra_dc_writel(dc, 0, DC_CMD_STATE_ACCESS);
+
+ if (base == gem->paddr + crtc->fb->offsets[0]) {
+ spin_lock_irqsave(&drm->event_lock, flags);
+ drm_send_vblank_event(drm, dc->pipe, dc->event);
+ drm_vblank_put(drm, dc->pipe);
+ dc->event = NULL;
+ spin_unlock_irqrestore(&drm->event_lock, flags);
+ }
+}
+
+void tegra_dc_cancel_page_flip(struct drm_crtc *crtc, struct drm_file *file)
+{
+ struct tegra_dc *dc = to_tegra_dc(crtc);
+ struct drm_device *drm = crtc->dev;
+ unsigned long flags;
+
+ spin_lock_irqsave(&drm->event_lock, flags);
+
+ if (dc->event && dc->event->base.file_priv == file) {
+ dc->event->base.destroy(&dc->event->base);
+ drm_vblank_put(drm, dc->pipe);
+ dc->event = NULL;
+ }
+
+ spin_unlock_irqrestore(&drm->event_lock, flags);
+}
+
+static int tegra_dc_page_flip(struct drm_crtc *crtc, struct drm_framebuffer *fb,
+ struct drm_pending_vblank_event *event)
+{
+ struct tegra_dc *dc = to_tegra_dc(crtc);
+ struct drm_device *drm = crtc->dev;
+
+ if (dc->event)
+ return -EBUSY;
+
+ if (event) {
+ event->pipe = dc->pipe;
+ dc->event = event;
+ drm_vblank_get(drm, dc->pipe);
+ }
+
+ tegra_dc_set_base(dc, 0, 0, fb);
+ crtc->fb = fb;
+
+ return 0;
+}
+
static const struct drm_crtc_funcs tegra_crtc_funcs = {
+ .page_flip = tegra_dc_page_flip,
.set_config = drm_crtc_helper_set_config,
.destroy = drm_crtc_cleanup,
};
-static void tegra_crtc_dpms(struct drm_crtc *crtc, int mode)
+static void tegra_crtc_disable(struct drm_crtc *crtc)
{
+ struct drm_device *drm = crtc->dev;
+ struct drm_plane *plane;
+
+ list_for_each_entry(plane, &drm->mode_config.plane_list, head) {
+ if (plane->crtc == crtc) {
+ tegra_plane_disable(plane);
+ plane->crtc = NULL;
+
+ if (plane->fb) {
+ drm_framebuffer_unreference(plane->fb);
+ plane->fb = NULL;
+ }
+ }
+ }
}
static bool tegra_crtc_mode_fixup(struct drm_crtc *crtc,
@@ -47,10 +277,11 @@ static bool tegra_crtc_mode_fixup(struct drm_crtc *crtc,
return true;
}
-static inline u32 compute_dda_inc(fixed20_12 inf, unsigned int out, bool v,
+static inline u32 compute_dda_inc(unsigned int in, unsigned int out, bool v,
unsigned int bpp)
{
fixed20_12 outf = dfixed_init(out);
+ fixed20_12 inf = dfixed_init(in);
u32 dda_inc;
int max;
@@ -80,9 +311,10 @@ static inline u32 compute_dda_inc(fixed20_12 inf, unsigned int out, bool v,
return dda_inc;
}
-static inline u32 compute_initial_dda(fixed20_12 in)
+static inline u32 compute_initial_dda(unsigned int in)
{
- return dfixed_frac(in);
+ fixed20_12 inf = dfixed_init(in);
+ return dfixed_frac(inf);
}
static int tegra_dc_set_timings(struct tegra_dc *dc,
@@ -153,18 +385,198 @@ static int tegra_crtc_setup_clk(struct drm_crtc *crtc,
return 0;
}
+static bool tegra_dc_format_is_yuv(unsigned int format, bool *planar)
+{
+ switch (format) {
+ case WIN_COLOR_DEPTH_YCbCr422:
+ case WIN_COLOR_DEPTH_YUV422:
+ if (planar)
+ *planar = false;
+
+ return true;
+
+ case WIN_COLOR_DEPTH_YCbCr420P:
+ case WIN_COLOR_DEPTH_YUV420P:
+ case WIN_COLOR_DEPTH_YCbCr422P:
+ case WIN_COLOR_DEPTH_YUV422P:
+ case WIN_COLOR_DEPTH_YCbCr422R:
+ case WIN_COLOR_DEPTH_YUV422R:
+ case WIN_COLOR_DEPTH_YCbCr422RA:
+ case WIN_COLOR_DEPTH_YUV422RA:
+ if (planar)
+ *planar = true;
+
+ return true;
+ }
+
+ return false;
+}
+
+int tegra_dc_setup_window(struct tegra_dc *dc, unsigned int index,
+ const struct tegra_dc_window *window)
+{
+ unsigned h_offset, v_offset, h_size, v_size, h_dda, v_dda, bpp;
+ unsigned long value;
+ bool yuv, planar;
+
+ /*
+ * For YUV planar modes, the number of bytes per pixel takes into
+ * account only the luma component and therefore is 1.
+ */
+ yuv = tegra_dc_format_is_yuv(window->format, &planar);
+ if (!yuv)
+ bpp = window->bits_per_pixel / 8;
+ else
+ bpp = planar ? 1 : 2;
+
+ value = WINDOW_A_SELECT << index;
+ tegra_dc_writel(dc, value, DC_CMD_DISPLAY_WINDOW_HEADER);
+
+ tegra_dc_writel(dc, window->format, DC_WIN_COLOR_DEPTH);
+ tegra_dc_writel(dc, 0, DC_WIN_BYTE_SWAP);
+
+ value = V_POSITION(window->dst.y) | H_POSITION(window->dst.x);
+ tegra_dc_writel(dc, value, DC_WIN_POSITION);
+
+ value = V_SIZE(window->dst.h) | H_SIZE(window->dst.w);
+ tegra_dc_writel(dc, value, DC_WIN_SIZE);
+
+ h_offset = window->src.x * bpp;
+ v_offset = window->src.y;
+ h_size = window->src.w * bpp;
+ v_size = window->src.h;
+
+ value = V_PRESCALED_SIZE(v_size) | H_PRESCALED_SIZE(h_size);
+ tegra_dc_writel(dc, value, DC_WIN_PRESCALED_SIZE);
+
+ /*
+ * For DDA computations the number of bytes per pixel for YUV planar
+ * modes needs to take into account all Y, U and V components.
+ */
+ if (yuv && planar)
+ bpp = 2;
+
+ h_dda = compute_dda_inc(window->src.w, window->dst.w, false, bpp);
+ v_dda = compute_dda_inc(window->src.h, window->dst.h, true, bpp);
+
+ value = V_DDA_INC(v_dda) | H_DDA_INC(h_dda);
+ tegra_dc_writel(dc, value, DC_WIN_DDA_INC);
+
+ h_dda = compute_initial_dda(window->src.x);
+ v_dda = compute_initial_dda(window->src.y);
+
+ tegra_dc_writel(dc, h_dda, DC_WIN_H_INITIAL_DDA);
+ tegra_dc_writel(dc, v_dda, DC_WIN_V_INITIAL_DDA);
+
+ tegra_dc_writel(dc, 0, DC_WIN_UV_BUF_STRIDE);
+ tegra_dc_writel(dc, 0, DC_WIN_BUF_STRIDE);
+
+ tegra_dc_writel(dc, window->base[0], DC_WINBUF_START_ADDR);
+
+ if (yuv && planar) {
+ tegra_dc_writel(dc, window->base[1], DC_WINBUF_START_ADDR_U);
+ tegra_dc_writel(dc, window->base[2], DC_WINBUF_START_ADDR_V);
+ value = window->stride[1] << 16 | window->stride[0];
+ tegra_dc_writel(dc, value, DC_WIN_LINE_STRIDE);
+ } else {
+ tegra_dc_writel(dc, window->stride[0], DC_WIN_LINE_STRIDE);
+ }
+
+ tegra_dc_writel(dc, h_offset, DC_WINBUF_ADDR_H_OFFSET);
+ tegra_dc_writel(dc, v_offset, DC_WINBUF_ADDR_V_OFFSET);
+
+ value = WIN_ENABLE;
+
+ if (yuv) {
+ /* setup default colorspace conversion coefficients */
+ tegra_dc_writel(dc, 0x00f0, DC_WIN_CSC_YOF);
+ tegra_dc_writel(dc, 0x012a, DC_WIN_CSC_KYRGB);
+ tegra_dc_writel(dc, 0x0000, DC_WIN_CSC_KUR);
+ tegra_dc_writel(dc, 0x0198, DC_WIN_CSC_KVR);
+ tegra_dc_writel(dc, 0x039b, DC_WIN_CSC_KUG);
+ tegra_dc_writel(dc, 0x032f, DC_WIN_CSC_KVG);
+ tegra_dc_writel(dc, 0x0204, DC_WIN_CSC_KUB);
+ tegra_dc_writel(dc, 0x0000, DC_WIN_CSC_KVB);
+
+ value |= CSC_ENABLE;
+ } else if (window->bits_per_pixel < 24) {
+ value |= COLOR_EXPAND;
+ }
+
+ tegra_dc_writel(dc, value, DC_WIN_WIN_OPTIONS);
+
+ /*
+ * Disable blending and assume Window A is the bottom-most window,
+ * Window C is the top-most window and Window B is in the middle.
+ */
+ tegra_dc_writel(dc, 0xffff00, DC_WIN_BLEND_NOKEY);
+ tegra_dc_writel(dc, 0xffff00, DC_WIN_BLEND_1WIN);
+
+ switch (index) {
+ case 0:
+ tegra_dc_writel(dc, 0x000000, DC_WIN_BLEND_2WIN_X);
+ tegra_dc_writel(dc, 0x000000, DC_WIN_BLEND_2WIN_Y);
+ tegra_dc_writel(dc, 0x000000, DC_WIN_BLEND_3WIN_XY);
+ break;
+
+ case 1:
+ tegra_dc_writel(dc, 0xffff00, DC_WIN_BLEND_2WIN_X);
+ tegra_dc_writel(dc, 0x000000, DC_WIN_BLEND_2WIN_Y);
+ tegra_dc_writel(dc, 0x000000, DC_WIN_BLEND_3WIN_XY);
+ break;
+
+ case 2:
+ tegra_dc_writel(dc, 0xffff00, DC_WIN_BLEND_2WIN_X);
+ tegra_dc_writel(dc, 0xffff00, DC_WIN_BLEND_2WIN_Y);
+ tegra_dc_writel(dc, 0xffff00, DC_WIN_BLEND_3WIN_XY);
+ break;
+ }
+
+ tegra_dc_writel(dc, WIN_A_UPDATE << index, DC_CMD_STATE_CONTROL);
+ tegra_dc_writel(dc, WIN_A_ACT_REQ << index, DC_CMD_STATE_CONTROL);
+
+ return 0;
+}
+
+unsigned int tegra_dc_format(uint32_t format)
+{
+ switch (format) {
+ case DRM_FORMAT_XRGB8888:
+ return WIN_COLOR_DEPTH_B8G8R8A8;
+
+ case DRM_FORMAT_RGB565:
+ return WIN_COLOR_DEPTH_B5G6R5;
+
+ case DRM_FORMAT_UYVY:
+ return WIN_COLOR_DEPTH_YCbCr422;
+
+ case DRM_FORMAT_YUV420:
+ return WIN_COLOR_DEPTH_YCbCr420P;
+
+ case DRM_FORMAT_YUV422:
+ return WIN_COLOR_DEPTH_YCbCr422P;
+
+ default:
+ break;
+ }
+
+ WARN(1, "unsupported pixel format %u, using default\n", format);
+ return WIN_COLOR_DEPTH_B8G8R8A8;
+}
+
static int tegra_crtc_mode_set(struct drm_crtc *crtc,
struct drm_display_mode *mode,
struct drm_display_mode *adjusted,
int x, int y, struct drm_framebuffer *old_fb)
{
- struct tegra_framebuffer *fb = to_tegra_fb(crtc->fb);
+ struct drm_gem_cma_object *gem = drm_fb_cma_get_gem_obj(crtc->fb, 0);
struct tegra_dc *dc = to_tegra_dc(crtc);
- unsigned int h_dda, v_dda, bpp;
- struct tegra_dc_window win;
+ struct tegra_dc_window window;
unsigned long div, value;
int err;
+ drm_vblank_pre_modeset(crtc->dev, dc->pipe);
+
err = tegra_crtc_setup_clk(crtc, mode, &div);
if (err) {
dev_err(dc->dev, "failed to setup clock for CRTC: %d\n", err);
@@ -192,83 +604,33 @@ static int tegra_crtc_mode_set(struct drm_crtc *crtc,
tegra_dc_writel(dc, value, DC_DISP_DISP_CLOCK_CONTROL);
/* setup window parameters */
- memset(&win, 0, sizeof(win));
- win.x.full = dfixed_const(0);
- win.y.full = dfixed_const(0);
- win.w.full = dfixed_const(mode->hdisplay);
- win.h.full = dfixed_const(mode->vdisplay);
- win.outx = 0;
- win.outy = 0;
- win.outw = mode->hdisplay;
- win.outh = mode->vdisplay;
-
- switch (crtc->fb->pixel_format) {
- case DRM_FORMAT_XRGB8888:
- win.fmt = WIN_COLOR_DEPTH_B8G8R8A8;
- break;
-
- case DRM_FORMAT_RGB565:
- win.fmt = WIN_COLOR_DEPTH_B5G6R5;
- break;
-
- default:
- win.fmt = WIN_COLOR_DEPTH_B8G8R8A8;
- WARN_ON(1);
- break;
- }
-
- bpp = crtc->fb->bits_per_pixel / 8;
- win.stride = crtc->fb->pitches[0];
-
- /* program window registers */
- value = WINDOW_A_SELECT;
- tegra_dc_writel(dc, value, DC_CMD_DISPLAY_WINDOW_HEADER);
-
- tegra_dc_writel(dc, win.fmt, DC_WIN_COLOR_DEPTH);
- tegra_dc_writel(dc, 0, DC_WIN_BYTE_SWAP);
-
- value = V_POSITION(win.outy) | H_POSITION(win.outx);
- tegra_dc_writel(dc, value, DC_WIN_POSITION);
-
- value = V_SIZE(win.outh) | H_SIZE(win.outw);
- tegra_dc_writel(dc, value, DC_WIN_SIZE);
-
- value = V_PRESCALED_SIZE(dfixed_trunc(win.h)) |
- H_PRESCALED_SIZE(dfixed_trunc(win.w) * bpp);
- tegra_dc_writel(dc, value, DC_WIN_PRESCALED_SIZE);
-
- h_dda = compute_dda_inc(win.w, win.outw, false, bpp);
- v_dda = compute_dda_inc(win.h, win.outh, true, bpp);
-
- value = V_DDA_INC(v_dda) | H_DDA_INC(h_dda);
- tegra_dc_writel(dc, value, DC_WIN_DDA_INC);
-
- h_dda = compute_initial_dda(win.x);
- v_dda = compute_initial_dda(win.y);
-
- tegra_dc_writel(dc, h_dda, DC_WIN_H_INITIAL_DDA);
- tegra_dc_writel(dc, v_dda, DC_WIN_V_INITIAL_DDA);
-
- tegra_dc_writel(dc, 0, DC_WIN_UV_BUF_STRIDE);
- tegra_dc_writel(dc, 0, DC_WIN_BUF_STRIDE);
-
- tegra_dc_writel(dc, fb->obj->paddr, DC_WINBUF_START_ADDR);
- tegra_dc_writel(dc, win.stride, DC_WIN_LINE_STRIDE);
- tegra_dc_writel(dc, dfixed_trunc(win.x) * bpp,
- DC_WINBUF_ADDR_H_OFFSET);
- tegra_dc_writel(dc, dfixed_trunc(win.y), DC_WINBUF_ADDR_V_OFFSET);
-
- value = WIN_ENABLE;
-
- if (bpp < 24)
- value |= COLOR_EXPAND;
+ memset(&window, 0, sizeof(window));
+ window.src.x = 0;
+ window.src.y = 0;
+ window.src.w = mode->hdisplay;
+ window.src.h = mode->vdisplay;
+ window.dst.x = 0;
+ window.dst.y = 0;
+ window.dst.w = mode->hdisplay;
+ window.dst.h = mode->vdisplay;
+ window.format = tegra_dc_format(crtc->fb->pixel_format);
+ window.bits_per_pixel = crtc->fb->bits_per_pixel;
+ window.stride[0] = crtc->fb->pitches[0];
+ window.base[0] = gem->paddr;
+
+ err = tegra_dc_setup_window(dc, 0, &window);
+ if (err < 0)
+ dev_err(dc->dev, "failed to enable root plane\n");
- tegra_dc_writel(dc, value, DC_WIN_WIN_OPTIONS);
+ return 0;
+}
- tegra_dc_writel(dc, 0xff00, DC_WIN_BLEND_NOKEY);
- tegra_dc_writel(dc, 0xff00, DC_WIN_BLEND_1WIN);
+static int tegra_crtc_mode_set_base(struct drm_crtc *crtc, int x, int y,
+ struct drm_framebuffer *old_fb)
+{
+ struct tegra_dc *dc = to_tegra_dc(crtc);
- return 0;
+ return tegra_dc_set_base(dc, x, y, crtc->fb);
}
static void tegra_crtc_prepare(struct drm_crtc *crtc)
@@ -315,31 +677,24 @@ static void tegra_crtc_prepare(struct drm_crtc *crtc)
tegra_dc_writel(dc, value, DC_DISP_DISP_MEM_HIGH_PRIORITY_TIMER);
value = VBLANK_INT | WIN_A_UF_INT | WIN_B_UF_INT | WIN_C_UF_INT;
- tegra_dc_writel(dc, value, DC_CMD_INT_MASK);
-
- value = VBLANK_INT | WIN_A_UF_INT | WIN_B_UF_INT | WIN_C_UF_INT;
tegra_dc_writel(dc, value, DC_CMD_INT_ENABLE);
+
+ value = WIN_A_UF_INT | WIN_B_UF_INT | WIN_C_UF_INT;
+ tegra_dc_writel(dc, value, DC_CMD_INT_MASK);
}
static void tegra_crtc_commit(struct drm_crtc *crtc)
{
struct tegra_dc *dc = to_tegra_dc(crtc);
- unsigned long update_mask;
unsigned long value;
- update_mask = GENERAL_ACT_REQ | WIN_A_ACT_REQ;
-
- tegra_dc_writel(dc, update_mask << 8, DC_CMD_STATE_CONTROL);
+ value = GENERAL_UPDATE | WIN_A_UPDATE;
+ tegra_dc_writel(dc, value, DC_CMD_STATE_CONTROL);
- value = tegra_dc_readl(dc, DC_CMD_INT_ENABLE);
- value |= FRAME_END_INT;
- tegra_dc_writel(dc, value, DC_CMD_INT_ENABLE);
-
- value = tegra_dc_readl(dc, DC_CMD_INT_MASK);
- value |= FRAME_END_INT;
- tegra_dc_writel(dc, value, DC_CMD_INT_MASK);
+ value = GENERAL_ACT_REQ | WIN_A_ACT_REQ;
+ tegra_dc_writel(dc, value, DC_CMD_STATE_CONTROL);
- tegra_dc_writel(dc, update_mask, DC_CMD_STATE_CONTROL);
+ drm_vblank_post_modeset(crtc->dev, dc->pipe);
}
static void tegra_crtc_load_lut(struct drm_crtc *crtc)
@@ -347,15 +702,16 @@ static void tegra_crtc_load_lut(struct drm_crtc *crtc)
}
static const struct drm_crtc_helper_funcs tegra_crtc_helper_funcs = {
- .dpms = tegra_crtc_dpms,
+ .disable = tegra_crtc_disable,
.mode_fixup = tegra_crtc_mode_fixup,
.mode_set = tegra_crtc_mode_set,
+ .mode_set_base = tegra_crtc_mode_set_base,
.prepare = tegra_crtc_prepare,
.commit = tegra_crtc_commit,
.load_lut = tegra_crtc_load_lut,
};
-static irqreturn_t tegra_drm_irq(int irq, void *data)
+static irqreturn_t tegra_dc_irq(int irq, void *data)
{
struct tegra_dc *dc = data;
unsigned long status;
@@ -374,6 +730,7 @@ static irqreturn_t tegra_drm_irq(int irq, void *data)
dev_dbg(dc->dev, "%s(): vertical blank\n", __func__);
*/
drm_handle_vblank(dc->base.dev, dc->pipe);
+ tegra_dc_finish_page_flip(dc);
}
if (status & (WIN_A_UF_INT | WIN_B_UF_INT | WIN_C_UF_INT)) {
@@ -588,7 +945,7 @@ static int tegra_dc_show_regs(struct seq_file *s, void *data)
DUMP_REG(DC_WIN_BLEND_1WIN);
DUMP_REG(DC_WIN_BLEND_2WIN_X);
DUMP_REG(DC_WIN_BLEND_2WIN_Y);
- DUMP_REG(DC_WIN_BLEND32WIN_XY);
+ DUMP_REG(DC_WIN_BLEND_3WIN_XY);
DUMP_REG(DC_WIN_HP_FETCH_CONTROL);
DUMP_REG(DC_WINBUF_START_ADDR);
DUMP_REG(DC_WINBUF_START_ADDR_NS);
@@ -690,13 +1047,17 @@ static int tegra_dc_drm_init(struct host1x_client *client,
return err;
}
+ err = tegra_dc_add_planes(drm, dc);
+ if (err < 0)
+ return err;
+
if (IS_ENABLED(CONFIG_DEBUG_FS)) {
err = tegra_dc_debugfs_init(dc, drm->primary);
if (err < 0)
dev_err(dc->dev, "debugfs setup failed: %d\n", err);
}
- err = devm_request_irq(dc->dev, dc->irq, tegra_drm_irq, 0,
+ err = devm_request_irq(dc->dev, dc->irq, tegra_dc_irq, 0,
dev_name(dc->dev), dc);
if (err < 0) {
dev_err(dc->dev, "failed to request IRQ#%u: %d\n", dc->irq,
@@ -745,6 +1106,7 @@ static int tegra_dc_probe(struct platform_device *pdev)
if (!dc)
return -ENOMEM;
+ spin_lock_init(&dc->lock);
INIT_LIST_HEAD(&dc->list);
dc->dev = &pdev->dev;
diff --git a/drivers/gpu/drm/tegra/dc.h b/drivers/gpu/drm/tegra/dc.h
index 99977b5d5c36..79eaec9aac77 100644
--- a/drivers/gpu/drm/tegra/dc.h
+++ b/drivers/gpu/drm/tegra/dc.h
@@ -58,6 +58,8 @@
#define DC_CMD_SIGNAL_RAISE3 0x03e
#define DC_CMD_STATE_ACCESS 0x040
+#define READ_MUX (1 << 0)
+#define WRITE_MUX (1 << 2)
#define DC_CMD_STATE_CONTROL 0x041
#define GENERAL_ACT_REQ (1 << 0)
@@ -290,8 +292,18 @@
#define DC_DISP_SD_HW_K_VALUES 0x4dd
#define DC_DISP_SD_MAN_K_VALUES 0x4de
+#define DC_WIN_CSC_YOF 0x611
+#define DC_WIN_CSC_KYRGB 0x612
+#define DC_WIN_CSC_KUR 0x613
+#define DC_WIN_CSC_KVR 0x614
+#define DC_WIN_CSC_KUG 0x615
+#define DC_WIN_CSC_KVG 0x616
+#define DC_WIN_CSC_KUB 0x617
+#define DC_WIN_CSC_KVB 0x618
+
#define DC_WIN_WIN_OPTIONS 0x700
#define COLOR_EXPAND (1 << 6)
+#define CSC_ENABLE (1 << 18)
#define WIN_ENABLE (1 << 30)
#define DC_WIN_BYTE_SWAP 0x701
@@ -359,7 +371,7 @@
#define DC_WIN_BLEND_1WIN 0x710
#define DC_WIN_BLEND_2WIN_X 0x711
#define DC_WIN_BLEND_2WIN_Y 0x712
-#define DC_WIN_BLEND32WIN_XY 0x713
+#define DC_WIN_BLEND_3WIN_XY 0x713
#define DC_WIN_HP_FETCH_CONTROL 0x714
diff --git a/drivers/gpu/drm/tegra/drm.c b/drivers/gpu/drm/tegra/drm.c
index 3a503c9e4686..9d452df5bcad 100644
--- a/drivers/gpu/drm/tegra/drm.c
+++ b/drivers/gpu/drm/tegra/drm.c
@@ -11,7 +11,6 @@
#include <linux/of_address.h>
#include <linux/of_platform.h>
-#include <mach/clk.h>
#include <linux/dma-mapping.h>
#include <asm/dma-iommu.h>
@@ -40,6 +39,10 @@ static int tegra_drm_load(struct drm_device *drm, unsigned long flags)
if (err < 0)
return err;
+ err = drm_vblank_init(drm, drm->mode_config.num_crtc);
+ if (err < 0)
+ return err;
+
err = tegra_drm_fb_init(drm);
if (err < 0)
return err;
@@ -89,13 +92,112 @@ static const struct file_operations tegra_drm_fops = {
.llseek = noop_llseek,
};
+static struct drm_crtc *tegra_crtc_from_pipe(struct drm_device *drm, int pipe)
+{
+ struct drm_crtc *crtc;
+
+ list_for_each_entry(crtc, &drm->mode_config.crtc_list, head) {
+ struct tegra_dc *dc = to_tegra_dc(crtc);
+
+ if (dc->pipe == pipe)
+ return crtc;
+ }
+
+ return NULL;
+}
+
+static u32 tegra_drm_get_vblank_counter(struct drm_device *dev, int crtc)
+{
+ /* TODO: implement real hardware counter using syncpoints */
+ return drm_vblank_count(dev, crtc);
+}
+
+static int tegra_drm_enable_vblank(struct drm_device *drm, int pipe)
+{
+ struct drm_crtc *crtc = tegra_crtc_from_pipe(drm, pipe);
+ struct tegra_dc *dc = to_tegra_dc(crtc);
+
+ if (!crtc)
+ return -ENODEV;
+
+ tegra_dc_enable_vblank(dc);
+
+ return 0;
+}
+
+static void tegra_drm_disable_vblank(struct drm_device *drm, int pipe)
+{
+ struct drm_crtc *crtc = tegra_crtc_from_pipe(drm, pipe);
+ struct tegra_dc *dc = to_tegra_dc(crtc);
+
+ if (crtc)
+ tegra_dc_disable_vblank(dc);
+}
+
+static void tegra_drm_preclose(struct drm_device *drm, struct drm_file *file)
+{
+ struct drm_crtc *crtc;
+
+ list_for_each_entry(crtc, &drm->mode_config.crtc_list, head)
+ tegra_dc_cancel_page_flip(crtc, file);
+}
+
+#ifdef CONFIG_DEBUG_FS
+static int tegra_debugfs_framebuffers(struct seq_file *s, void *data)
+{
+ struct drm_info_node *node = (struct drm_info_node *)s->private;
+ struct drm_device *drm = node->minor->dev;
+ struct drm_framebuffer *fb;
+
+ mutex_lock(&drm->mode_config.fb_lock);
+
+ list_for_each_entry(fb, &drm->mode_config.fb_list, head) {
+ seq_printf(s, "%3d: user size: %d x %d, depth %d, %d bpp, refcount %d\n",
+ fb->base.id, fb->width, fb->height, fb->depth,
+ fb->bits_per_pixel,
+ atomic_read(&fb->refcount.refcount));
+ }
+
+ mutex_unlock(&drm->mode_config.fb_lock);
+
+ return 0;
+}
+
+static struct drm_info_list tegra_debugfs_list[] = {
+ { "framebuffers", tegra_debugfs_framebuffers, 0 },
+};
+
+static int tegra_debugfs_init(struct drm_minor *minor)
+{
+ return drm_debugfs_create_files(tegra_debugfs_list,
+ ARRAY_SIZE(tegra_debugfs_list),
+ minor->debugfs_root, minor);
+}
+
+static void tegra_debugfs_cleanup(struct drm_minor *minor)
+{
+ drm_debugfs_remove_files(tegra_debugfs_list,
+ ARRAY_SIZE(tegra_debugfs_list), minor);
+}
+#endif
+
struct drm_driver tegra_drm_driver = {
.driver_features = DRIVER_BUS_PLATFORM | DRIVER_MODESET | DRIVER_GEM,
.load = tegra_drm_load,
.unload = tegra_drm_unload,
.open = tegra_drm_open,
+ .preclose = tegra_drm_preclose,
.lastclose = tegra_drm_lastclose,
+ .get_vblank_counter = tegra_drm_get_vblank_counter,
+ .enable_vblank = tegra_drm_enable_vblank,
+ .disable_vblank = tegra_drm_disable_vblank,
+
+#if defined(CONFIG_DEBUG_FS)
+ .debugfs_init = tegra_debugfs_init,
+ .debugfs_cleanup = tegra_debugfs_cleanup,
+#endif
+
.gem_free_object = drm_gem_cma_free_object,
.gem_vm_ops = &drm_gem_cma_vm_ops,
.dumb_create = drm_gem_cma_dumb_create,
diff --git a/drivers/gpu/drm/tegra/drm.h b/drivers/gpu/drm/tegra/drm.h
index 741b5dc2742c..6dd75a2600eb 100644
--- a/drivers/gpu/drm/tegra/drm.h
+++ b/drivers/gpu/drm/tegra/drm.h
@@ -18,16 +18,6 @@
#include <drm/drm_fb_cma_helper.h>
#include <drm/drm_fixed.h>
-struct tegra_framebuffer {
- struct drm_framebuffer base;
- struct drm_gem_cma_object *obj;
-};
-
-static inline struct tegra_framebuffer *to_tegra_fb(struct drm_framebuffer *fb)
-{
- return container_of(fb, struct tegra_framebuffer, base);
-}
-
struct host1x {
struct drm_device *drm;
struct device *dev;
@@ -44,7 +34,6 @@ struct host1x {
struct list_head clients;
struct drm_fbdev_cma *fbdev;
- struct tegra_framebuffer fb;
};
struct host1x_client;
@@ -75,6 +64,7 @@ struct tegra_output;
struct tegra_dc {
struct host1x_client client;
+ spinlock_t lock;
struct host1x *host1x;
struct device *dev;
@@ -94,6 +84,9 @@ struct tegra_dc {
struct drm_info_list *debugfs_files;
struct drm_minor *minor;
struct dentry *debugfs;
+
+ /* page-flip handling */
+ struct drm_pending_vblank_event *event;
};
static inline struct tegra_dc *host1x_client_to_dc(struct host1x_client *client)
@@ -118,6 +111,34 @@ static inline unsigned long tegra_dc_readl(struct tegra_dc *dc,
return readl(dc->regs + (reg << 2));
}
+struct tegra_dc_window {
+ struct {
+ unsigned int x;
+ unsigned int y;
+ unsigned int w;
+ unsigned int h;
+ } src;
+ struct {
+ unsigned int x;
+ unsigned int y;
+ unsigned int w;
+ unsigned int h;
+ } dst;
+ unsigned int bits_per_pixel;
+ unsigned int format;
+ unsigned int stride[2];
+ unsigned long base[3];
+};
+
+/* from dc.c */
+extern unsigned int tegra_dc_format(uint32_t format);
+extern int tegra_dc_setup_window(struct tegra_dc *dc, unsigned int index,
+ const struct tegra_dc_window *window);
+extern void tegra_dc_enable_vblank(struct tegra_dc *dc);
+extern void tegra_dc_disable_vblank(struct tegra_dc *dc);
+extern void tegra_dc_cancel_page_flip(struct drm_crtc *crtc,
+ struct drm_file *file);
+
struct tegra_output_ops {
int (*enable)(struct tegra_output *output);
int (*disable)(struct tegra_output *output);
diff --git a/drivers/gpu/drm/tegra/fb.c b/drivers/gpu/drm/tegra/fb.c
index 97993c6835fd..03914953cb1c 100644
--- a/drivers/gpu/drm/tegra/fb.c
+++ b/drivers/gpu/drm/tegra/fb.c
@@ -39,10 +39,6 @@ int tegra_drm_fb_init(struct drm_device *drm)
if (IS_ERR(fbdev))
return PTR_ERR(fbdev);
-#ifndef CONFIG_FRAMEBUFFER_CONSOLE
- drm_fbdev_cma_restore_mode(fbdev);
-#endif
-
host1x->fbdev = fbdev;
return 0;
diff --git a/drivers/gpu/drm/tegra/hdmi.c b/drivers/gpu/drm/tegra/hdmi.c
index 266af7879240..bb747f6cd1a4 100644
--- a/drivers/gpu/drm/tegra/hdmi.c
+++ b/drivers/gpu/drm/tegra/hdmi.c
@@ -10,12 +10,14 @@
#include <linux/clk.h>
#include <linux/debugfs.h>
#include <linux/gpio.h>
+#include <linux/hdmi.h>
#include <linux/module.h>
#include <linux/of.h>
#include <linux/platform_device.h>
#include <linux/regulator/consumer.h>
+#include <linux/clk/tegra.h>
-#include <mach/clk.h>
+#include <drm/drm_edid.h>
#include "hdmi.h"
#include "drm.h"
@@ -401,54 +403,65 @@ static int tegra_hdmi_setup_audio(struct tegra_hdmi *hdmi, unsigned int pclk)
return 0;
}
-static void tegra_hdmi_write_infopack(struct tegra_hdmi *hdmi,
- unsigned int offset, u8 type,
- u8 version, void *data, size_t size)
+static inline unsigned long tegra_hdmi_subpack(const u8 *ptr, size_t size)
{
- unsigned long value;
- u8 *ptr = data;
- u32 subpack[2];
+ unsigned long value = 0;
size_t i;
- u8 csum;
- /* first byte of data is the checksum */
- csum = type + version + size - 1;
+ for (i = size; i > 0; i--)
+ value = (value << 8) | ptr[i - 1];
- for (i = 1; i < size; i++)
- csum += ptr[i];
+ return value;
+}
- ptr[0] = 0x100 - csum;
+static void tegra_hdmi_write_infopack(struct tegra_hdmi *hdmi, const void *data,
+ size_t size)
+{
+ const u8 *ptr = data;
+ unsigned long offset;
+ unsigned long value;
+ size_t i, j;
- value = INFOFRAME_HEADER_TYPE(type) |
- INFOFRAME_HEADER_VERSION(version) |
- INFOFRAME_HEADER_LEN(size - 1);
- tegra_hdmi_writel(hdmi, value, offset);
+ switch (ptr[0]) {
+ case HDMI_INFOFRAME_TYPE_AVI:
+ offset = HDMI_NV_PDISP_HDMI_AVI_INFOFRAME_HEADER;
+ break;
- /* The audio inforame only has one set of subpack registers. The hdmi
- * block pads the rest of the data as per the spec so we have to fixup
- * the length before filling in the subpacks.
- */
- if (offset == HDMI_NV_PDISP_HDMI_AUDIO_INFOFRAME_HEADER)
- size = 6;
+ case HDMI_INFOFRAME_TYPE_AUDIO:
+ offset = HDMI_NV_PDISP_HDMI_AUDIO_INFOFRAME_HEADER;
+ break;
- /* each subpack 7 bytes devided into:
- * subpack_low - bytes 0 - 3
- * subpack_high - bytes 4 - 6 (with byte 7 padded to 0x00)
- */
- for (i = 0; i < size; i++) {
- size_t index = i % 7;
+ case HDMI_INFOFRAME_TYPE_VENDOR:
+ offset = HDMI_NV_PDISP_HDMI_GENERIC_HEADER;
+ break;
- if (index == 0)
- memset(subpack, 0x0, sizeof(subpack));
+ default:
+ dev_err(hdmi->dev, "unsupported infoframe type: %02x\n",
+ ptr[0]);
+ return;
+ }
- ((u8 *)subpack)[index] = ptr[i];
+ value = INFOFRAME_HEADER_TYPE(ptr[0]) |
+ INFOFRAME_HEADER_VERSION(ptr[1]) |
+ INFOFRAME_HEADER_LEN(ptr[2]);
+ tegra_hdmi_writel(hdmi, value, offset);
+ offset++;
- if (index == 6 || (i + 1 == size)) {
- unsigned int reg = offset + 1 + (i / 7) * 2;
+ /*
+ * Each subpack contains 7 bytes, divided into:
+ * - subpack_low: bytes 0 - 3
+ * - subpack_high: bytes 4 - 6 (with byte 7 padded to 0x00)
+ */
+ for (i = 3, j = 0; i < size; i += 7, j += 8) {
+ size_t rem = size - i, num = min_t(size_t, rem, 4);
- tegra_hdmi_writel(hdmi, subpack[0], reg);
- tegra_hdmi_writel(hdmi, subpack[1], reg + 1);
- }
+ value = tegra_hdmi_subpack(&ptr[i], num);
+ tegra_hdmi_writel(hdmi, value, offset++);
+
+ num = min_t(size_t, rem - num, 3);
+
+ value = tegra_hdmi_subpack(&ptr[i + 4], num);
+ tegra_hdmi_writel(hdmi, value, offset++);
}
}
@@ -456,9 +469,8 @@ static void tegra_hdmi_setup_avi_infoframe(struct tegra_hdmi *hdmi,
struct drm_display_mode *mode)
{
struct hdmi_avi_infoframe frame;
- unsigned int h_front_porch;
- unsigned int hsize = 16;
- unsigned int vsize = 9;
+ u8 buffer[17];
+ ssize_t err;
if (hdmi->dvi) {
tegra_hdmi_writel(hdmi, 0,
@@ -466,69 +478,19 @@ static void tegra_hdmi_setup_avi_infoframe(struct tegra_hdmi *hdmi,
return;
}
- h_front_porch = mode->hsync_start - mode->hdisplay;
- memset(&frame, 0, sizeof(frame));
- frame.r = HDMI_AVI_R_SAME;
-
- switch (mode->vdisplay) {
- case 480:
- if (mode->hdisplay == 640) {
- frame.m = HDMI_AVI_M_4_3;
- frame.vic = 1;
- } else {
- frame.m = HDMI_AVI_M_16_9;
- frame.vic = 3;
- }
- break;
-
- case 576:
- if (((hsize * 10) / vsize) > 14) {
- frame.m = HDMI_AVI_M_16_9;
- frame.vic = 18;
- } else {
- frame.m = HDMI_AVI_M_4_3;
- frame.vic = 17;
- }
- break;
-
- case 720:
- case 1470: /* stereo mode */
- frame.m = HDMI_AVI_M_16_9;
-
- if (h_front_porch == 110)
- frame.vic = 4;
- else
- frame.vic = 19;
- break;
-
- case 1080:
- case 2205: /* stereo mode */
- frame.m = HDMI_AVI_M_16_9;
-
- switch (h_front_porch) {
- case 88:
- frame.vic = 16;
- break;
-
- case 528:
- frame.vic = 31;
- break;
-
- default:
- frame.vic = 32;
- break;
- }
- break;
+ err = drm_hdmi_avi_infoframe_from_display_mode(&frame, mode);
+ if (err < 0) {
+ dev_err(hdmi->dev, "failed to setup AVI infoframe: %zd\n", err);
+ return;
+ }
- default:
- frame.m = HDMI_AVI_M_16_9;
- frame.vic = 0;
- break;
+ err = hdmi_avi_infoframe_pack(&frame, buffer, sizeof(buffer));
+ if (err < 0) {
+ dev_err(hdmi->dev, "failed to pack AVI infoframe: %zd\n", err);
+ return;
}
- tegra_hdmi_write_infopack(hdmi, HDMI_NV_PDISP_HDMI_AVI_INFOFRAME_HEADER,
- HDMI_INFOFRAME_TYPE_AVI, HDMI_AVI_VERSION,
- &frame, sizeof(frame));
+ tegra_hdmi_write_infopack(hdmi, buffer, err);
tegra_hdmi_writel(hdmi, INFOFRAME_CTRL_ENABLE,
HDMI_NV_PDISP_HDMI_AVI_INFOFRAME_CTRL);
@@ -537,6 +499,8 @@ static void tegra_hdmi_setup_avi_infoframe(struct tegra_hdmi *hdmi,
static void tegra_hdmi_setup_audio_infoframe(struct tegra_hdmi *hdmi)
{
struct hdmi_audio_infoframe frame;
+ u8 buffer[14];
+ ssize_t err;
if (hdmi->dvi) {
tegra_hdmi_writel(hdmi, 0,
@@ -544,14 +508,29 @@ static void tegra_hdmi_setup_audio_infoframe(struct tegra_hdmi *hdmi)
return;
}
- memset(&frame, 0, sizeof(frame));
- frame.cc = HDMI_AUDIO_CC_2;
+ err = hdmi_audio_infoframe_init(&frame);
+ if (err < 0) {
+ dev_err(hdmi->dev, "failed to initialize audio infoframe: %d\n",
+ err);
+ return;
+ }
+
+ frame.channels = 2;
+
+ err = hdmi_audio_infoframe_pack(&frame, buffer, sizeof(buffer));
+ if (err < 0) {
+ dev_err(hdmi->dev, "failed to pack audio infoframe: %zd\n",
+ err);
+ return;
+ }
- tegra_hdmi_write_infopack(hdmi,
- HDMI_NV_PDISP_HDMI_AUDIO_INFOFRAME_HEADER,
- HDMI_INFOFRAME_TYPE_AUDIO,
- HDMI_AUDIO_VERSION,
- &frame, sizeof(frame));
+ /*
+ * The audio infoframe has only one set of subpack registers, so the
+ * infoframe needs to be truncated. One set of subpack registers can
+ * contain 7 bytes. Including the 3 byte header only the first 10
+ * bytes can be programmed.
+ */
+ tegra_hdmi_write_infopack(hdmi, buffer, min(10, err));
tegra_hdmi_writel(hdmi, INFOFRAME_CTRL_ENABLE,
HDMI_NV_PDISP_HDMI_AUDIO_INFOFRAME_CTRL);
@@ -559,8 +538,10 @@ static void tegra_hdmi_setup_audio_infoframe(struct tegra_hdmi *hdmi)
static void tegra_hdmi_setup_stereo_infoframe(struct tegra_hdmi *hdmi)
{
- struct hdmi_stereo_infoframe frame;
+ struct hdmi_vendor_infoframe frame;
unsigned long value;
+ u8 buffer[10];
+ ssize_t err;
if (!hdmi->stereo) {
value = tegra_hdmi_readl(hdmi, HDMI_NV_PDISP_HDMI_GENERIC_CTRL);
@@ -570,22 +551,32 @@ static void tegra_hdmi_setup_stereo_infoframe(struct tegra_hdmi *hdmi)
}
memset(&frame, 0, sizeof(frame));
- frame.regid0 = 0x03;
- frame.regid1 = 0x0c;
- frame.regid2 = 0x00;
- frame.hdmi_video_format = 2;
+
+ frame.type = HDMI_INFOFRAME_TYPE_VENDOR;
+ frame.version = 0x01;
+ frame.length = 6;
+
+ frame.data[0] = 0x03; /* regid0 */
+ frame.data[1] = 0x0c; /* regid1 */
+ frame.data[2] = 0x00; /* regid2 */
+ frame.data[3] = 0x02 << 5; /* video format */
/* TODO: 74 MHz limit? */
if (1) {
- frame._3d_structure = 0;
+ frame.data[4] = 0x00 << 4; /* 3D structure */
} else {
- frame._3d_structure = 8;
- frame._3d_ext_data = 0;
+ frame.data[4] = 0x08 << 4; /* 3D structure */
+ frame.data[5] = 0x00 << 4; /* 3D ext. data */
+ }
+
+ err = hdmi_vendor_infoframe_pack(&frame, buffer, sizeof(buffer));
+ if (err < 0) {
+ dev_err(hdmi->dev, "failed to pack vendor infoframe: %zd\n",
+ err);
+ return;
}
- tegra_hdmi_write_infopack(hdmi, HDMI_NV_PDISP_HDMI_GENERIC_HEADER,
- HDMI_INFOFRAME_TYPE_VENDOR,
- HDMI_VENDOR_VERSION, &frame, 6);
+ tegra_hdmi_write_infopack(hdmi, buffer, err);
value = tegra_hdmi_readl(hdmi, HDMI_NV_PDISP_HDMI_GENERIC_CTRL);
value |= GENERIC_CTRL_ENABLE;
diff --git a/drivers/gpu/drm/tegra/hdmi.h b/drivers/gpu/drm/tegra/hdmi.h
index 1477f36eb45a..52ac36e08ccb 100644
--- a/drivers/gpu/drm/tegra/hdmi.h
+++ b/drivers/gpu/drm/tegra/hdmi.h
@@ -10,195 +10,6 @@
#ifndef TEGRA_HDMI_H
#define TEGRA_HDMI_H 1
-#define HDMI_INFOFRAME_TYPE_VENDOR 0x81
-#define HDMI_INFOFRAME_TYPE_AVI 0x82
-#define HDMI_INFOFRAME_TYPE_SPD 0x83
-#define HDMI_INFOFRAME_TYPE_AUDIO 0x84
-#define HDMI_INFOFRAME_TYPE_MPEG_SRC 0x85
-#define HDMI_INFOFRAME_TYPE_NTSC_VBI 0x86
-
-/* all fields little endian */
-struct hdmi_avi_infoframe {
- /* PB0 */
- u8 csum;
-
- /* PB1 */
- unsigned s:2; /* scan information */
- unsigned b:2; /* bar info data valid */
- unsigned a:1; /* active info present */
- unsigned y:2; /* RGB or YCbCr */
- unsigned res1:1;
-
- /* PB2 */
- unsigned r:4; /* active format aspect ratio */
- unsigned m:2; /* picture aspect ratio */
- unsigned c:2; /* colorimetry */
-
- /* PB3 */
- unsigned sc:2; /* scan information */
- unsigned q:2; /* quantization range */
- unsigned ec:3; /* extended colorimetry */
- unsigned itc:1; /* it content */
-
- /* PB4 */
- unsigned vic:7; /* video format id code */
- unsigned res4:1;
-
- /* PB5 */
- unsigned pr:4; /* pixel repetition factor */
- unsigned cn:2; /* it content type*/
- unsigned yq:2; /* ycc quantization range */
-
- /* PB6-7 */
- u16 top_bar_end_line;
-
- /* PB8-9 */
- u16 bot_bar_start_line;
-
- /* PB10-11 */
- u16 left_bar_end_pixel;
-
- /* PB12-13 */
- u16 right_bar_start_pixel;
-} __packed;
-
-#define HDMI_AVI_VERSION 0x02
-
-#define HDMI_AVI_Y_RGB 0x0
-#define HDMI_AVI_Y_YCBCR_422 0x1
-#define HDMI_AVI_Y_YCBCR_444 0x2
-
-#define HDMI_AVI_B_VERT 0x1
-#define HDMI_AVI_B_HORIZ 0x2
-
-#define HDMI_AVI_S_NONE 0x0
-#define HDMI_AVI_S_OVERSCAN 0x1
-#define HDMI_AVI_S_UNDERSCAN 0x2
-
-#define HDMI_AVI_C_NONE 0x0
-#define HDMI_AVI_C_SMPTE 0x1
-#define HDMI_AVI_C_ITU_R 0x2
-#define HDMI_AVI_C_EXTENDED 0x4
-
-#define HDMI_AVI_M_4_3 0x1
-#define HDMI_AVI_M_16_9 0x2
-
-#define HDMI_AVI_R_SAME 0x8
-#define HDMI_AVI_R_4_3_CENTER 0x9
-#define HDMI_AVI_R_16_9_CENTER 0xa
-#define HDMI_AVI_R_14_9_CENTER 0xb
-
-/* all fields little endian */
-struct hdmi_audio_infoframe {
- /* PB0 */
- u8 csum;
-
- /* PB1 */
- unsigned cc:3; /* channel count */
- unsigned res1:1;
- unsigned ct:4; /* coding type */
-
- /* PB2 */
- unsigned ss:2; /* sample size */
- unsigned sf:3; /* sample frequency */
- unsigned res2:3;
-
- /* PB3 */
- unsigned cxt:5; /* coding extention type */
- unsigned res3:3;
-
- /* PB4 */
- u8 ca; /* channel/speaker allocation */
-
- /* PB5 */
- unsigned res5:3;
- unsigned lsv:4; /* level shift value */
- unsigned dm_inh:1; /* downmix inhibit */
-
- /* PB6-10 reserved */
- u8 res6;
- u8 res7;
- u8 res8;
- u8 res9;
- u8 res10;
-} __packed;
-
-#define HDMI_AUDIO_VERSION 0x01
-
-#define HDMI_AUDIO_CC_STREAM 0x0 /* specified by audio stream */
-#define HDMI_AUDIO_CC_2 0x1
-#define HDMI_AUDIO_CC_3 0x2
-#define HDMI_AUDIO_CC_4 0x3
-#define HDMI_AUDIO_CC_5 0x4
-#define HDMI_AUDIO_CC_6 0x5
-#define HDMI_AUDIO_CC_7 0x6
-#define HDMI_AUDIO_CC_8 0x7
-
-#define HDMI_AUDIO_CT_STREAM 0x0 /* specified by audio stream */
-#define HDMI_AUDIO_CT_PCM 0x1
-#define HDMI_AUDIO_CT_AC3 0x2
-#define HDMI_AUDIO_CT_MPEG1 0x3
-#define HDMI_AUDIO_CT_MP3 0x4
-#define HDMI_AUDIO_CT_MPEG2 0x5
-#define HDMI_AUDIO_CT_AAC_LC 0x6
-#define HDMI_AUDIO_CT_DTS 0x7
-#define HDMI_AUDIO_CT_ATRAC 0x8
-#define HDMI_AUDIO_CT_DSD 0x9
-#define HDMI_AUDIO_CT_E_AC3 0xa
-#define HDMI_AUDIO_CT_DTS_HD 0xb
-#define HDMI_AUDIO_CT_MLP 0xc
-#define HDMI_AUDIO_CT_DST 0xd
-#define HDMI_AUDIO_CT_WMA_PRO 0xe
-#define HDMI_AUDIO_CT_CXT 0xf
-
-#define HDMI_AUDIO_SF_STREAM 0x0 /* specified by audio stream */
-#define HDMI_AUIDO_SF_32K 0x1
-#define HDMI_AUDIO_SF_44_1K 0x2
-#define HDMI_AUDIO_SF_48K 0x3
-#define HDMI_AUDIO_SF_88_2K 0x4
-#define HDMI_AUDIO_SF_96K 0x5
-#define HDMI_AUDIO_SF_176_4K 0x6
-#define HDMI_AUDIO_SF_192K 0x7
-
-#define HDMI_AUDIO_SS_STREAM 0x0 /* specified by audio stream */
-#define HDMI_AUDIO_SS_16BIT 0x1
-#define HDMI_AUDIO_SS_20BIT 0x2
-#define HDMI_AUDIO_SS_24BIT 0x3
-
-#define HDMI_AUDIO_CXT_CT 0x0 /* refer to coding in CT */
-#define HDMI_AUDIO_CXT_HE_AAC 0x1
-#define HDMI_AUDIO_CXT_HE_AAC_V2 0x2
-#define HDMI_AUDIO_CXT_MPEG_SURROUND 0x3
-
-/* all fields little endian */
-struct hdmi_stereo_infoframe {
- /* PB0 */
- u8 csum;
-
- /* PB1 */
- u8 regid0;
-
- /* PB2 */
- u8 regid1;
-
- /* PB3 */
- u8 regid2;
-
- /* PB4 */
- unsigned res1:5;
- unsigned hdmi_video_format:3;
-
- /* PB5 */
- unsigned res2:4;
- unsigned _3d_structure:4;
-
- /* PB6*/
- unsigned res3:4;
- unsigned _3d_ext_data:4;
-} __packed;
-
-#define HDMI_VENDOR_VERSION 0x01
-
/* register definitions */
#define HDMI_CTXSW 0x00
diff --git a/drivers/gpu/drm/tilcdc/Kconfig b/drivers/gpu/drm/tilcdc/Kconfig
new file mode 100644
index 000000000000..d24d04013476
--- /dev/null
+++ b/drivers/gpu/drm/tilcdc/Kconfig
@@ -0,0 +1,13 @@
+config DRM_TILCDC
+ tristate "DRM Support for TI LCDC Display Controller"
+ depends on DRM && OF && ARM
+ select DRM_KMS_HELPER
+ select DRM_KMS_CMA_HELPER
+ select DRM_GEM_CMA_HELPER
+ select OF_VIDEOMODE
+ select OF_DISPLAY_TIMING
+ select BACKLIGHT_CLASS_DEVICE
+ help
+ Choose this option if you have an TI SoC with LCDC display
+ controller, for example AM33xx in beagle-bone, DA8xx, or
+ OMAP-L1xx. This driver replaces the FB_DA8XX fbdev driver.
diff --git a/drivers/gpu/drm/tilcdc/Makefile b/drivers/gpu/drm/tilcdc/Makefile
new file mode 100644
index 000000000000..deda656b10e7
--- /dev/null
+++ b/drivers/gpu/drm/tilcdc/Makefile
@@ -0,0 +1,10 @@
+ccflags-y := -Iinclude/drm -Werror
+
+tilcdc-y := \
+ tilcdc_crtc.o \
+ tilcdc_tfp410.o \
+ tilcdc_slave.o \
+ tilcdc_panel.o \
+ tilcdc_drv.o
+
+obj-$(CONFIG_DRM_TILCDC) += tilcdc.o
diff --git a/drivers/gpu/drm/tilcdc/tilcdc_crtc.c b/drivers/gpu/drm/tilcdc/tilcdc_crtc.c
new file mode 100644
index 000000000000..5dd3c7d031d5
--- /dev/null
+++ b/drivers/gpu/drm/tilcdc/tilcdc_crtc.c
@@ -0,0 +1,602 @@
+/*
+ * Copyright (C) 2012 Texas Instruments
+ * Author: Rob Clark <robdclark@gmail.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include <linux/kfifo.h>
+
+#include "tilcdc_drv.h"
+#include "tilcdc_regs.h"
+
+struct tilcdc_crtc {
+ struct drm_crtc base;
+
+ const struct tilcdc_panel_info *info;
+ uint32_t dirty;
+ dma_addr_t start, end;
+ struct drm_pending_vblank_event *event;
+ int dpms;
+ wait_queue_head_t frame_done_wq;
+ bool frame_done;
+
+ /* fb currently set to scanout 0/1: */
+ struct drm_framebuffer *scanout[2];
+
+ /* for deferred fb unref's: */
+ DECLARE_KFIFO_PTR(unref_fifo, struct drm_framebuffer *);
+ struct work_struct work;
+};
+#define to_tilcdc_crtc(x) container_of(x, struct tilcdc_crtc, base)
+
+static void unref_worker(struct work_struct *work)
+{
+ struct tilcdc_crtc *tilcdc_crtc = container_of(work, struct tilcdc_crtc, work);
+ struct drm_device *dev = tilcdc_crtc->base.dev;
+ struct drm_framebuffer *fb;
+
+ mutex_lock(&dev->mode_config.mutex);
+ while (kfifo_get(&tilcdc_crtc->unref_fifo, &fb))
+ drm_framebuffer_unreference(fb);
+ mutex_unlock(&dev->mode_config.mutex);
+}
+
+static void set_scanout(struct drm_crtc *crtc, int n)
+{
+ static const uint32_t base_reg[] = {
+ LCDC_DMA_FB_BASE_ADDR_0_REG, LCDC_DMA_FB_BASE_ADDR_1_REG,
+ };
+ static const uint32_t ceil_reg[] = {
+ LCDC_DMA_FB_CEILING_ADDR_0_REG, LCDC_DMA_FB_CEILING_ADDR_1_REG,
+ };
+ static const uint32_t stat[] = {
+ LCDC_END_OF_FRAME0, LCDC_END_OF_FRAME1,
+ };
+ struct tilcdc_crtc *tilcdc_crtc = to_tilcdc_crtc(crtc);
+ struct drm_device *dev = crtc->dev;
+
+ pm_runtime_get_sync(dev->dev);
+ tilcdc_write(dev, base_reg[n], tilcdc_crtc->start);
+ tilcdc_write(dev, ceil_reg[n], tilcdc_crtc->end);
+ if (tilcdc_crtc->scanout[n]) {
+ if (kfifo_put(&tilcdc_crtc->unref_fifo,
+ (const struct drm_framebuffer **)&tilcdc_crtc->scanout[n])) {
+ struct tilcdc_drm_private *priv = dev->dev_private;
+ queue_work(priv->wq, &tilcdc_crtc->work);
+ } else {
+ dev_err(dev->dev, "unref fifo full!\n");
+ drm_framebuffer_unreference(tilcdc_crtc->scanout[n]);
+ }
+ }
+ tilcdc_crtc->scanout[n] = crtc->fb;
+ drm_framebuffer_reference(tilcdc_crtc->scanout[n]);
+ tilcdc_crtc->dirty &= ~stat[n];
+ pm_runtime_put_sync(dev->dev);
+}
+
+static void update_scanout(struct drm_crtc *crtc)
+{
+ struct tilcdc_crtc *tilcdc_crtc = to_tilcdc_crtc(crtc);
+ struct drm_device *dev = crtc->dev;
+ struct drm_framebuffer *fb = crtc->fb;
+ struct drm_gem_cma_object *gem;
+ unsigned int depth, bpp;
+
+ drm_fb_get_bpp_depth(fb->pixel_format, &depth, &bpp);
+ gem = drm_fb_cma_get_gem_obj(fb, 0);
+
+ tilcdc_crtc->start = gem->paddr + fb->offsets[0] +
+ (crtc->y * fb->pitches[0]) + (crtc->x * bpp/8);
+
+ tilcdc_crtc->end = tilcdc_crtc->start +
+ (crtc->mode.vdisplay * fb->pitches[0]);
+
+ if (tilcdc_crtc->dpms == DRM_MODE_DPMS_ON) {
+ /* already enabled, so just mark the frames that need
+ * updating and they will be updated on vblank:
+ */
+ tilcdc_crtc->dirty |= LCDC_END_OF_FRAME0 | LCDC_END_OF_FRAME1;
+ drm_vblank_get(dev, 0);
+ } else {
+ /* not enabled yet, so update registers immediately: */
+ set_scanout(crtc, 0);
+ set_scanout(crtc, 1);
+ }
+}
+
+static void start(struct drm_crtc *crtc)
+{
+ struct drm_device *dev = crtc->dev;
+ struct tilcdc_drm_private *priv = dev->dev_private;
+
+ if (priv->rev == 2) {
+ tilcdc_set(dev, LCDC_CLK_RESET_REG, LCDC_CLK_MAIN_RESET);
+ msleep(1);
+ tilcdc_clear(dev, LCDC_CLK_RESET_REG, LCDC_CLK_MAIN_RESET);
+ msleep(1);
+ }
+
+ tilcdc_set(dev, LCDC_DMA_CTRL_REG, LCDC_DUAL_FRAME_BUFFER_ENABLE);
+ tilcdc_set(dev, LCDC_RASTER_CTRL_REG, LCDC_PALETTE_LOAD_MODE(DATA_ONLY));
+ tilcdc_set(dev, LCDC_RASTER_CTRL_REG, LCDC_RASTER_ENABLE);
+}
+
+static void stop(struct drm_crtc *crtc)
+{
+ struct drm_device *dev = crtc->dev;
+
+ tilcdc_clear(dev, LCDC_RASTER_CTRL_REG, LCDC_RASTER_ENABLE);
+}
+
+static void tilcdc_crtc_destroy(struct drm_crtc *crtc)
+{
+ struct tilcdc_crtc *tilcdc_crtc = to_tilcdc_crtc(crtc);
+
+ WARN_ON(tilcdc_crtc->dpms == DRM_MODE_DPMS_ON);
+
+ drm_crtc_cleanup(crtc);
+ WARN_ON(!kfifo_is_empty(&tilcdc_crtc->unref_fifo));
+ kfifo_free(&tilcdc_crtc->unref_fifo);
+ kfree(tilcdc_crtc);
+}
+
+static int tilcdc_crtc_page_flip(struct drm_crtc *crtc,
+ struct drm_framebuffer *fb,
+ struct drm_pending_vblank_event *event)
+{
+ struct tilcdc_crtc *tilcdc_crtc = to_tilcdc_crtc(crtc);
+ struct drm_device *dev = crtc->dev;
+
+ if (tilcdc_crtc->event) {
+ dev_err(dev->dev, "already pending page flip!\n");
+ return -EBUSY;
+ }
+
+ crtc->fb = fb;
+ tilcdc_crtc->event = event;
+ update_scanout(crtc);
+
+ return 0;
+}
+
+static void tilcdc_crtc_dpms(struct drm_crtc *crtc, int mode)
+{
+ struct tilcdc_crtc *tilcdc_crtc = to_tilcdc_crtc(crtc);
+ struct drm_device *dev = crtc->dev;
+ struct tilcdc_drm_private *priv = dev->dev_private;
+
+ /* we really only care about on or off: */
+ if (mode != DRM_MODE_DPMS_ON)
+ mode = DRM_MODE_DPMS_OFF;
+
+ if (tilcdc_crtc->dpms == mode)
+ return;
+
+ tilcdc_crtc->dpms = mode;
+
+ pm_runtime_get_sync(dev->dev);
+
+ if (mode == DRM_MODE_DPMS_ON) {
+ pm_runtime_forbid(dev->dev);
+ start(crtc);
+ } else {
+ tilcdc_crtc->frame_done = false;
+ stop(crtc);
+
+ /* if necessary wait for framedone irq which will still come
+ * before putting things to sleep..
+ */
+ if (priv->rev == 2) {
+ int ret = wait_event_timeout(
+ tilcdc_crtc->frame_done_wq,
+ tilcdc_crtc->frame_done,
+ msecs_to_jiffies(50));
+ if (ret == 0)
+ dev_err(dev->dev, "timeout waiting for framedone\n");
+ }
+ pm_runtime_allow(dev->dev);
+ }
+
+ pm_runtime_put_sync(dev->dev);
+}
+
+static bool tilcdc_crtc_mode_fixup(struct drm_crtc *crtc,
+ const struct drm_display_mode *mode,
+ struct drm_display_mode *adjusted_mode)
+{
+ return true;
+}
+
+static void tilcdc_crtc_prepare(struct drm_crtc *crtc)
+{
+ tilcdc_crtc_dpms(crtc, DRM_MODE_DPMS_OFF);
+}
+
+static void tilcdc_crtc_commit(struct drm_crtc *crtc)
+{
+ tilcdc_crtc_dpms(crtc, DRM_MODE_DPMS_ON);
+}
+
+static int tilcdc_crtc_mode_set(struct drm_crtc *crtc,
+ struct drm_display_mode *mode,
+ struct drm_display_mode *adjusted_mode,
+ int x, int y,
+ struct drm_framebuffer *old_fb)
+{
+ struct tilcdc_crtc *tilcdc_crtc = to_tilcdc_crtc(crtc);
+ struct drm_device *dev = crtc->dev;
+ struct tilcdc_drm_private *priv = dev->dev_private;
+ const struct tilcdc_panel_info *info = tilcdc_crtc->info;
+ uint32_t reg, hbp, hfp, hsw, vbp, vfp, vsw;
+ int ret;
+
+ ret = tilcdc_crtc_mode_valid(crtc, mode);
+ if (WARN_ON(ret))
+ return ret;
+
+ if (WARN_ON(!info))
+ return -EINVAL;
+
+ pm_runtime_get_sync(dev->dev);
+
+ /* Configure the Burst Size and fifo threshold of DMA: */
+ reg = tilcdc_read(dev, LCDC_DMA_CTRL_REG) & ~0x00000770;
+ switch (info->dma_burst_sz) {
+ case 1:
+ reg |= LCDC_DMA_BURST_SIZE(LCDC_DMA_BURST_1);
+ break;
+ case 2:
+ reg |= LCDC_DMA_BURST_SIZE(LCDC_DMA_BURST_2);
+ break;
+ case 4:
+ reg |= LCDC_DMA_BURST_SIZE(LCDC_DMA_BURST_4);
+ break;
+ case 8:
+ reg |= LCDC_DMA_BURST_SIZE(LCDC_DMA_BURST_8);
+ break;
+ case 16:
+ reg |= LCDC_DMA_BURST_SIZE(LCDC_DMA_BURST_16);
+ break;
+ default:
+ return -EINVAL;
+ }
+ reg |= (info->fifo_th << 8);
+ tilcdc_write(dev, LCDC_DMA_CTRL_REG, reg);
+
+ /* Configure timings: */
+ hbp = mode->htotal - mode->hsync_end;
+ hfp = mode->hsync_start - mode->hdisplay;
+ hsw = mode->hsync_end - mode->hsync_start;
+ vbp = mode->vtotal - mode->vsync_end;
+ vfp = mode->vsync_start - mode->vdisplay;
+ vsw = mode->vsync_end - mode->vsync_start;
+
+ DBG("%dx%d, hbp=%u, hfp=%u, hsw=%u, vbp=%u, vfp=%u, vsw=%u",
+ mode->hdisplay, mode->vdisplay, hbp, hfp, hsw, vbp, vfp, vsw);
+
+ /* Configure the AC Bias Period and Number of Transitions per Interrupt: */
+ reg = tilcdc_read(dev, LCDC_RASTER_TIMING_2_REG) & ~0x000fff00;
+ reg |= LCDC_AC_BIAS_FREQUENCY(info->ac_bias) |
+ LCDC_AC_BIAS_TRANSITIONS_PER_INT(info->ac_bias_intrpt);
+ if (priv->rev == 2) {
+ reg |= (hfp & 0x300) >> 8;
+ reg |= (hbp & 0x300) >> 4;
+ reg |= (hsw & 0x3c0) << 21;
+ }
+ tilcdc_write(dev, LCDC_RASTER_TIMING_2_REG, reg);
+
+ reg = (((mode->hdisplay >> 4) - 1) << 4) |
+ ((hbp & 0xff) << 24) |
+ ((hfp & 0xff) << 16) |
+ ((hsw & 0x3f) << 10);
+ if (priv->rev == 2)
+ reg |= (((mode->hdisplay >> 4) - 1) & 0x40) >> 3;
+ tilcdc_write(dev, LCDC_RASTER_TIMING_0_REG, reg);
+
+ reg = ((mode->vdisplay - 1) & 0x3ff) |
+ ((vbp & 0xff) << 24) |
+ ((vfp & 0xff) << 16) |
+ ((vsw & 0x3f) << 10);
+ tilcdc_write(dev, LCDC_RASTER_TIMING_1_REG, reg);
+
+ /* Configure display type: */
+ reg = tilcdc_read(dev, LCDC_RASTER_CTRL_REG) &
+ ~(LCDC_TFT_MODE | LCDC_MONO_8BIT_MODE | LCDC_MONOCHROME_MODE |
+ LCDC_V2_TFT_24BPP_MODE | LCDC_V2_TFT_24BPP_UNPACK | 0x000ff000);
+ reg |= LCDC_TFT_MODE; /* no monochrome/passive support */
+ if (info->tft_alt_mode)
+ reg |= LCDC_TFT_ALT_ENABLE;
+ if (priv->rev == 2) {
+ unsigned int depth, bpp;
+
+ drm_fb_get_bpp_depth(crtc->fb->pixel_format, &depth, &bpp);
+ switch (bpp) {
+ case 16:
+ break;
+ case 32:
+ reg |= LCDC_V2_TFT_24BPP_UNPACK;
+ /* fallthrough */
+ case 24:
+ reg |= LCDC_V2_TFT_24BPP_MODE;
+ break;
+ default:
+ dev_err(dev->dev, "invalid pixel format\n");
+ return -EINVAL;
+ }
+ }
+ reg |= info->fdd < 12;
+ tilcdc_write(dev, LCDC_RASTER_CTRL_REG, reg);
+
+ if (info->invert_pxl_clk)
+ tilcdc_set(dev, LCDC_RASTER_TIMING_2_REG, LCDC_INVERT_PIXEL_CLOCK);
+ else
+ tilcdc_clear(dev, LCDC_RASTER_TIMING_2_REG, LCDC_INVERT_PIXEL_CLOCK);
+
+ if (info->sync_ctrl)
+ tilcdc_set(dev, LCDC_RASTER_TIMING_2_REG, LCDC_SYNC_CTRL);
+ else
+ tilcdc_clear(dev, LCDC_RASTER_TIMING_2_REG, LCDC_SYNC_CTRL);
+
+ if (info->sync_edge)
+ tilcdc_set(dev, LCDC_RASTER_TIMING_2_REG, LCDC_SYNC_EDGE);
+ else
+ tilcdc_clear(dev, LCDC_RASTER_TIMING_2_REG, LCDC_SYNC_EDGE);
+
+ if (mode->flags & DRM_MODE_FLAG_NHSYNC)
+ tilcdc_set(dev, LCDC_RASTER_TIMING_2_REG, LCDC_INVERT_HSYNC);
+ else
+ tilcdc_clear(dev, LCDC_RASTER_TIMING_2_REG, LCDC_INVERT_HSYNC);
+
+ if (mode->flags & DRM_MODE_FLAG_NVSYNC)
+ tilcdc_set(dev, LCDC_RASTER_TIMING_2_REG, LCDC_INVERT_VSYNC);
+ else
+ tilcdc_clear(dev, LCDC_RASTER_TIMING_2_REG, LCDC_INVERT_VSYNC);
+
+ if (info->raster_order)
+ tilcdc_set(dev, LCDC_RASTER_CTRL_REG, LCDC_RASTER_ORDER);
+ else
+ tilcdc_clear(dev, LCDC_RASTER_CTRL_REG, LCDC_RASTER_ORDER);
+
+
+ update_scanout(crtc);
+ tilcdc_crtc_update_clk(crtc);
+
+ pm_runtime_put_sync(dev->dev);
+
+ return 0;
+}
+
+static int tilcdc_crtc_mode_set_base(struct drm_crtc *crtc, int x, int y,
+ struct drm_framebuffer *old_fb)
+{
+ update_scanout(crtc);
+ return 0;
+}
+
+static void tilcdc_crtc_load_lut(struct drm_crtc *crtc)
+{
+}
+
+static const struct drm_crtc_funcs tilcdc_crtc_funcs = {
+ .destroy = tilcdc_crtc_destroy,
+ .set_config = drm_crtc_helper_set_config,
+ .page_flip = tilcdc_crtc_page_flip,
+};
+
+static const struct drm_crtc_helper_funcs tilcdc_crtc_helper_funcs = {
+ .dpms = tilcdc_crtc_dpms,
+ .mode_fixup = tilcdc_crtc_mode_fixup,
+ .prepare = tilcdc_crtc_prepare,
+ .commit = tilcdc_crtc_commit,
+ .mode_set = tilcdc_crtc_mode_set,
+ .mode_set_base = tilcdc_crtc_mode_set_base,
+ .load_lut = tilcdc_crtc_load_lut,
+};
+
+int tilcdc_crtc_max_width(struct drm_crtc *crtc)
+{
+ struct drm_device *dev = crtc->dev;
+ struct tilcdc_drm_private *priv = dev->dev_private;
+ int max_width = 0;
+
+ if (priv->rev == 1)
+ max_width = 1024;
+ else if (priv->rev == 2)
+ max_width = 2048;
+
+ return max_width;
+}
+
+int tilcdc_crtc_mode_valid(struct drm_crtc *crtc, struct drm_display_mode *mode)
+{
+ struct tilcdc_drm_private *priv = crtc->dev->dev_private;
+ unsigned int bandwidth;
+
+ if (mode->hdisplay > tilcdc_crtc_max_width(crtc))
+ return MODE_VIRTUAL_X;
+
+ /* width must be multiple of 16 */
+ if (mode->hdisplay & 0xf)
+ return MODE_VIRTUAL_X;
+
+ if (mode->vdisplay > 2048)
+ return MODE_VIRTUAL_Y;
+
+ /* filter out modes that would require too much memory bandwidth: */
+ bandwidth = mode->hdisplay * mode->vdisplay * drm_mode_vrefresh(mode);
+ if (bandwidth > priv->max_bandwidth)
+ return MODE_BAD;
+
+ return MODE_OK;
+}
+
+void tilcdc_crtc_set_panel_info(struct drm_crtc *crtc,
+ const struct tilcdc_panel_info *info)
+{
+ struct tilcdc_crtc *tilcdc_crtc = to_tilcdc_crtc(crtc);
+ tilcdc_crtc->info = info;
+}
+
+void tilcdc_crtc_update_clk(struct drm_crtc *crtc)
+{
+ struct tilcdc_crtc *tilcdc_crtc = to_tilcdc_crtc(crtc);
+ struct drm_device *dev = crtc->dev;
+ struct tilcdc_drm_private *priv = dev->dev_private;
+ int dpms = tilcdc_crtc->dpms;
+ unsigned int lcd_clk, div;
+ int ret;
+
+ pm_runtime_get_sync(dev->dev);
+
+ if (dpms == DRM_MODE_DPMS_ON)
+ tilcdc_crtc_dpms(crtc, DRM_MODE_DPMS_OFF);
+
+ /* in raster mode, minimum divisor is 2: */
+ ret = clk_set_rate(priv->disp_clk, crtc->mode.clock * 1000 * 2);
+ if (ret) {
+ dev_err(dev->dev, "failed to set display clock rate to: %d\n",
+ crtc->mode.clock);
+ goto out;
+ }
+
+ lcd_clk = clk_get_rate(priv->clk);
+ div = lcd_clk / (crtc->mode.clock * 1000);
+
+ DBG("lcd_clk=%u, mode clock=%d, div=%u", lcd_clk, crtc->mode.clock, div);
+ DBG("fck=%lu, dpll_disp_ck=%lu", clk_get_rate(priv->clk), clk_get_rate(priv->disp_clk));
+
+ /* Configure the LCD clock divisor. */
+ tilcdc_write(dev, LCDC_CTRL_REG, LCDC_CLK_DIVISOR(div) |
+ LCDC_RASTER_MODE);
+
+ if (priv->rev == 2)
+ tilcdc_set(dev, LCDC_CLK_ENABLE_REG,
+ LCDC_V2_DMA_CLK_EN | LCDC_V2_LIDD_CLK_EN |
+ LCDC_V2_CORE_CLK_EN);
+
+ if (dpms == DRM_MODE_DPMS_ON)
+ tilcdc_crtc_dpms(crtc, DRM_MODE_DPMS_ON);
+
+out:
+ pm_runtime_put_sync(dev->dev);
+}
+
+irqreturn_t tilcdc_crtc_irq(struct drm_crtc *crtc)
+{
+ struct tilcdc_crtc *tilcdc_crtc = to_tilcdc_crtc(crtc);
+ struct drm_device *dev = crtc->dev;
+ struct tilcdc_drm_private *priv = dev->dev_private;
+ uint32_t stat = tilcdc_read_irqstatus(dev);
+
+ if ((stat & LCDC_SYNC_LOST) && (stat & LCDC_FIFO_UNDERFLOW)) {
+ stop(crtc);
+ dev_err(dev->dev, "error: %08x\n", stat);
+ tilcdc_clear_irqstatus(dev, stat);
+ start(crtc);
+ } else if (stat & LCDC_PL_LOAD_DONE) {
+ tilcdc_clear_irqstatus(dev, stat);
+ } else {
+ struct drm_pending_vblank_event *event;
+ unsigned long flags;
+ uint32_t dirty = tilcdc_crtc->dirty & stat;
+
+ tilcdc_clear_irqstatus(dev, stat);
+
+ if (dirty & LCDC_END_OF_FRAME0)
+ set_scanout(crtc, 0);
+
+ if (dirty & LCDC_END_OF_FRAME1)
+ set_scanout(crtc, 1);
+
+ drm_handle_vblank(dev, 0);
+
+ spin_lock_irqsave(&dev->event_lock, flags);
+ event = tilcdc_crtc->event;
+ tilcdc_crtc->event = NULL;
+ if (event)
+ drm_send_vblank_event(dev, 0, event);
+ spin_unlock_irqrestore(&dev->event_lock, flags);
+
+ if (dirty && !tilcdc_crtc->dirty)
+ drm_vblank_put(dev, 0);
+ }
+
+ if (priv->rev == 2) {
+ if (stat & LCDC_FRAME_DONE) {
+ tilcdc_crtc->frame_done = true;
+ wake_up(&tilcdc_crtc->frame_done_wq);
+ }
+ tilcdc_write(dev, LCDC_END_OF_INT_IND_REG, 0);
+ }
+
+ return IRQ_HANDLED;
+}
+
+void tilcdc_crtc_cancel_page_flip(struct drm_crtc *crtc, struct drm_file *file)
+{
+ struct tilcdc_crtc *tilcdc_crtc = to_tilcdc_crtc(crtc);
+ struct drm_pending_vblank_event *event;
+ struct drm_device *dev = crtc->dev;
+ unsigned long flags;
+
+ /* Destroy the pending vertical blanking event associated with the
+ * pending page flip, if any, and disable vertical blanking interrupts.
+ */
+ spin_lock_irqsave(&dev->event_lock, flags);
+ event = tilcdc_crtc->event;
+ if (event && event->base.file_priv == file) {
+ tilcdc_crtc->event = NULL;
+ event->base.destroy(&event->base);
+ drm_vblank_put(dev, 0);
+ }
+ spin_unlock_irqrestore(&dev->event_lock, flags);
+}
+
+struct drm_crtc *tilcdc_crtc_create(struct drm_device *dev)
+{
+ struct tilcdc_crtc *tilcdc_crtc;
+ struct drm_crtc *crtc;
+ int ret;
+
+ tilcdc_crtc = kzalloc(sizeof(*tilcdc_crtc), GFP_KERNEL);
+ if (!tilcdc_crtc) {
+ dev_err(dev->dev, "allocation failed\n");
+ return NULL;
+ }
+
+ crtc = &tilcdc_crtc->base;
+
+ tilcdc_crtc->dpms = DRM_MODE_DPMS_OFF;
+ init_waitqueue_head(&tilcdc_crtc->frame_done_wq);
+
+ ret = kfifo_alloc(&tilcdc_crtc->unref_fifo, 16, GFP_KERNEL);
+ if (ret) {
+ dev_err(dev->dev, "could not allocate unref FIFO\n");
+ goto fail;
+ }
+
+ INIT_WORK(&tilcdc_crtc->work, unref_worker);
+
+ ret = drm_crtc_init(dev, crtc, &tilcdc_crtc_funcs);
+ if (ret < 0)
+ goto fail;
+
+ drm_crtc_helper_add(crtc, &tilcdc_crtc_helper_funcs);
+
+ return crtc;
+
+fail:
+ tilcdc_crtc_destroy(crtc);
+ return NULL;
+}
diff --git a/drivers/gpu/drm/tilcdc/tilcdc_drv.c b/drivers/gpu/drm/tilcdc/tilcdc_drv.c
new file mode 100644
index 000000000000..c5b592dc1970
--- /dev/null
+++ b/drivers/gpu/drm/tilcdc/tilcdc_drv.c
@@ -0,0 +1,611 @@
+/*
+ * Copyright (C) 2012 Texas Instruments
+ * Author: Rob Clark <robdclark@gmail.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+/* LCDC DRM driver, based on da8xx-fb */
+
+#include "tilcdc_drv.h"
+#include "tilcdc_regs.h"
+#include "tilcdc_tfp410.h"
+#include "tilcdc_slave.h"
+#include "tilcdc_panel.h"
+
+#include "drm_fb_helper.h"
+
+static LIST_HEAD(module_list);
+
+void tilcdc_module_init(struct tilcdc_module *mod, const char *name,
+ const struct tilcdc_module_ops *funcs)
+{
+ mod->name = name;
+ mod->funcs = funcs;
+ INIT_LIST_HEAD(&mod->list);
+ list_add(&mod->list, &module_list);
+}
+
+void tilcdc_module_cleanup(struct tilcdc_module *mod)
+{
+ list_del(&mod->list);
+}
+
+static struct of_device_id tilcdc_of_match[];
+
+static struct drm_framebuffer *tilcdc_fb_create(struct drm_device *dev,
+ struct drm_file *file_priv, struct drm_mode_fb_cmd2 *mode_cmd)
+{
+ return drm_fb_cma_create(dev, file_priv, mode_cmd);
+}
+
+static void tilcdc_fb_output_poll_changed(struct drm_device *dev)
+{
+ struct tilcdc_drm_private *priv = dev->dev_private;
+ if (priv->fbdev)
+ drm_fbdev_cma_hotplug_event(priv->fbdev);
+}
+
+static const struct drm_mode_config_funcs mode_config_funcs = {
+ .fb_create = tilcdc_fb_create,
+ .output_poll_changed = tilcdc_fb_output_poll_changed,
+};
+
+static int modeset_init(struct drm_device *dev)
+{
+ struct tilcdc_drm_private *priv = dev->dev_private;
+ struct tilcdc_module *mod;
+
+ drm_mode_config_init(dev);
+
+ priv->crtc = tilcdc_crtc_create(dev);
+
+ list_for_each_entry(mod, &module_list, list) {
+ DBG("loading module: %s", mod->name);
+ mod->funcs->modeset_init(mod, dev);
+ }
+
+ if ((priv->num_encoders = 0) || (priv->num_connectors == 0)) {
+ /* oh nos! */
+ dev_err(dev->dev, "no encoders/connectors found\n");
+ return -ENXIO;
+ }
+
+ dev->mode_config.min_width = 0;
+ dev->mode_config.min_height = 0;
+ dev->mode_config.max_width = tilcdc_crtc_max_width(priv->crtc);
+ dev->mode_config.max_height = 2048;
+ dev->mode_config.funcs = &mode_config_funcs;
+
+ return 0;
+}
+
+#ifdef CONFIG_CPU_FREQ
+static int cpufreq_transition(struct notifier_block *nb,
+ unsigned long val, void *data)
+{
+ struct tilcdc_drm_private *priv = container_of(nb,
+ struct tilcdc_drm_private, freq_transition);
+ if (val == CPUFREQ_POSTCHANGE) {
+ if (priv->lcd_fck_rate != clk_get_rate(priv->clk)) {
+ priv->lcd_fck_rate = clk_get_rate(priv->clk);
+ tilcdc_crtc_update_clk(priv->crtc);
+ }
+ }
+
+ return 0;
+}
+#endif
+
+/*
+ * DRM operations:
+ */
+
+static int tilcdc_unload(struct drm_device *dev)
+{
+ struct tilcdc_drm_private *priv = dev->dev_private;
+ struct tilcdc_module *mod, *cur;
+
+ drm_kms_helper_poll_fini(dev);
+ drm_mode_config_cleanup(dev);
+ drm_vblank_cleanup(dev);
+
+ pm_runtime_get_sync(dev->dev);
+ drm_irq_uninstall(dev);
+ pm_runtime_put_sync(dev->dev);
+
+#ifdef CONFIG_CPU_FREQ
+ cpufreq_unregister_notifier(&priv->freq_transition,
+ CPUFREQ_TRANSITION_NOTIFIER);
+#endif
+
+ if (priv->clk)
+ clk_put(priv->clk);
+
+ if (priv->mmio)
+ iounmap(priv->mmio);
+
+ flush_workqueue(priv->wq);
+ destroy_workqueue(priv->wq);
+
+ dev->dev_private = NULL;
+
+ pm_runtime_disable(dev->dev);
+
+ list_for_each_entry_safe(mod, cur, &module_list, list) {
+ DBG("destroying module: %s", mod->name);
+ mod->funcs->destroy(mod);
+ }
+
+ kfree(priv);
+
+ return 0;
+}
+
+static int tilcdc_load(struct drm_device *dev, unsigned long flags)
+{
+ struct platform_device *pdev = dev->platformdev;
+ struct device_node *node = pdev->dev.of_node;
+ struct tilcdc_drm_private *priv;
+ struct resource *res;
+ int ret;
+
+ priv = kzalloc(sizeof(*priv), GFP_KERNEL);
+ if (!priv) {
+ dev_err(dev->dev, "failed to allocate private data\n");
+ return -ENOMEM;
+ }
+
+ dev->dev_private = priv;
+
+ priv->wq = alloc_ordered_workqueue("tilcdc", 0);
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!res) {
+ dev_err(dev->dev, "failed to get memory resource\n");
+ ret = -EINVAL;
+ goto fail;
+ }
+
+ priv->mmio = ioremap_nocache(res->start, resource_size(res));
+ if (!priv->mmio) {
+ dev_err(dev->dev, "failed to ioremap\n");
+ ret = -ENOMEM;
+ goto fail;
+ }
+
+ priv->clk = clk_get(dev->dev, "fck");
+ if (IS_ERR(priv->clk)) {
+ dev_err(dev->dev, "failed to get functional clock\n");
+ ret = -ENODEV;
+ goto fail;
+ }
+
+ priv->disp_clk = clk_get(dev->dev, "dpll_disp_ck");
+ if (IS_ERR(priv->clk)) {
+ dev_err(dev->dev, "failed to get display clock\n");
+ ret = -ENODEV;
+ goto fail;
+ }
+
+#ifdef CONFIG_CPU_FREQ
+ priv->lcd_fck_rate = clk_get_rate(priv->clk);
+ priv->freq_transition.notifier_call = cpufreq_transition;
+ ret = cpufreq_register_notifier(&priv->freq_transition,
+ CPUFREQ_TRANSITION_NOTIFIER);
+ if (ret) {
+ dev_err(dev->dev, "failed to register cpufreq notifier\n");
+ goto fail;
+ }
+#endif
+
+ if (of_property_read_u32(node, "max-bandwidth", &priv->max_bandwidth))
+ priv->max_bandwidth = 1280 * 1024 * 60;
+
+ pm_runtime_enable(dev->dev);
+
+ /* Determine LCD IP Version */
+ pm_runtime_get_sync(dev->dev);
+ switch (tilcdc_read(dev, LCDC_PID_REG)) {
+ case 0x4c100102:
+ priv->rev = 1;
+ break;
+ case 0x4f200800:
+ case 0x4f201000:
+ priv->rev = 2;
+ break;
+ default:
+ dev_warn(dev->dev, "Unknown PID Reg value 0x%08x, "
+ "defaulting to LCD revision 1\n",
+ tilcdc_read(dev, LCDC_PID_REG));
+ priv->rev = 1;
+ break;
+ }
+
+ pm_runtime_put_sync(dev->dev);
+
+ ret = modeset_init(dev);
+ if (ret < 0) {
+ dev_err(dev->dev, "failed to initialize mode setting\n");
+ goto fail;
+ }
+
+ ret = drm_vblank_init(dev, 1);
+ if (ret < 0) {
+ dev_err(dev->dev, "failed to initialize vblank\n");
+ goto fail;
+ }
+
+ pm_runtime_get_sync(dev->dev);
+ ret = drm_irq_install(dev);
+ pm_runtime_put_sync(dev->dev);
+ if (ret < 0) {
+ dev_err(dev->dev, "failed to install IRQ handler\n");
+ goto fail;
+ }
+
+ platform_set_drvdata(pdev, dev);
+
+ priv->fbdev = drm_fbdev_cma_init(dev, 16,
+ dev->mode_config.num_crtc,
+ dev->mode_config.num_connector);
+
+ drm_kms_helper_poll_init(dev);
+
+ return 0;
+
+fail:
+ tilcdc_unload(dev);
+ return ret;
+}
+
+static void tilcdc_preclose(struct drm_device *dev, struct drm_file *file)
+{
+ struct tilcdc_drm_private *priv = dev->dev_private;
+
+ tilcdc_crtc_cancel_page_flip(priv->crtc, file);
+}
+
+static void tilcdc_lastclose(struct drm_device *dev)
+{
+ struct tilcdc_drm_private *priv = dev->dev_private;
+ drm_fbdev_cma_restore_mode(priv->fbdev);
+}
+
+static irqreturn_t tilcdc_irq(DRM_IRQ_ARGS)
+{
+ struct drm_device *dev = arg;
+ struct tilcdc_drm_private *priv = dev->dev_private;
+ return tilcdc_crtc_irq(priv->crtc);
+}
+
+static void tilcdc_irq_preinstall(struct drm_device *dev)
+{
+ tilcdc_clear_irqstatus(dev, 0xffffffff);
+}
+
+static int tilcdc_irq_postinstall(struct drm_device *dev)
+{
+ struct tilcdc_drm_private *priv = dev->dev_private;
+
+ /* enable FIFO underflow irq: */
+ if (priv->rev == 1) {
+ tilcdc_set(dev, LCDC_RASTER_CTRL_REG, LCDC_V1_UNDERFLOW_INT_ENA);
+ } else {
+ tilcdc_set(dev, LCDC_INT_ENABLE_SET_REG, LCDC_V2_UNDERFLOW_INT_ENA);
+ }
+
+ return 0;
+}
+
+static void tilcdc_irq_uninstall(struct drm_device *dev)
+{
+ struct tilcdc_drm_private *priv = dev->dev_private;
+
+ /* disable irqs that we might have enabled: */
+ if (priv->rev == 1) {
+ tilcdc_clear(dev, LCDC_RASTER_CTRL_REG,
+ LCDC_V1_UNDERFLOW_INT_ENA | LCDC_V1_PL_INT_ENA);
+ tilcdc_clear(dev, LCDC_DMA_CTRL_REG, LCDC_V1_END_OF_FRAME_INT_ENA);
+ } else {
+ tilcdc_clear(dev, LCDC_INT_ENABLE_SET_REG,
+ LCDC_V2_UNDERFLOW_INT_ENA | LCDC_V2_PL_INT_ENA |
+ LCDC_V2_END_OF_FRAME0_INT_ENA | LCDC_V2_END_OF_FRAME1_INT_ENA |
+ LCDC_FRAME_DONE);
+ }
+
+}
+
+static void enable_vblank(struct drm_device *dev, bool enable)
+{
+ struct tilcdc_drm_private *priv = dev->dev_private;
+ u32 reg, mask;
+
+ if (priv->rev == 1) {
+ reg = LCDC_DMA_CTRL_REG;
+ mask = LCDC_V1_END_OF_FRAME_INT_ENA;
+ } else {
+ reg = LCDC_INT_ENABLE_SET_REG;
+ mask = LCDC_V2_END_OF_FRAME0_INT_ENA |
+ LCDC_V2_END_OF_FRAME1_INT_ENA | LCDC_FRAME_DONE;
+ }
+
+ if (enable)
+ tilcdc_set(dev, reg, mask);
+ else
+ tilcdc_clear(dev, reg, mask);
+}
+
+static int tilcdc_enable_vblank(struct drm_device *dev, int crtc)
+{
+ enable_vblank(dev, true);
+ return 0;
+}
+
+static void tilcdc_disable_vblank(struct drm_device *dev, int crtc)
+{
+ enable_vblank(dev, false);
+}
+
+#if defined(CONFIG_DEBUG_FS) || defined(CONFIG_PM_SLEEP)
+static const struct {
+ const char *name;
+ uint8_t rev;
+ uint8_t save;
+ uint32_t reg;
+} registers[] = {
+#define REG(rev, save, reg) { #reg, rev, save, reg }
+ /* exists in revision 1: */
+ REG(1, false, LCDC_PID_REG),
+ REG(1, true, LCDC_CTRL_REG),
+ REG(1, false, LCDC_STAT_REG),
+ REG(1, true, LCDC_RASTER_CTRL_REG),
+ REG(1, true, LCDC_RASTER_TIMING_0_REG),
+ REG(1, true, LCDC_RASTER_TIMING_1_REG),
+ REG(1, true, LCDC_RASTER_TIMING_2_REG),
+ REG(1, true, LCDC_DMA_CTRL_REG),
+ REG(1, true, LCDC_DMA_FB_BASE_ADDR_0_REG),
+ REG(1, true, LCDC_DMA_FB_CEILING_ADDR_0_REG),
+ REG(1, true, LCDC_DMA_FB_BASE_ADDR_1_REG),
+ REG(1, true, LCDC_DMA_FB_CEILING_ADDR_1_REG),
+ /* new in revision 2: */
+ REG(2, false, LCDC_RAW_STAT_REG),
+ REG(2, false, LCDC_MASKED_STAT_REG),
+ REG(2, false, LCDC_INT_ENABLE_SET_REG),
+ REG(2, false, LCDC_INT_ENABLE_CLR_REG),
+ REG(2, false, LCDC_END_OF_INT_IND_REG),
+ REG(2, true, LCDC_CLK_ENABLE_REG),
+ REG(2, true, LCDC_INT_ENABLE_SET_REG),
+#undef REG
+};
+#endif
+
+#ifdef CONFIG_DEBUG_FS
+static int tilcdc_regs_show(struct seq_file *m, void *arg)
+{
+ struct drm_info_node *node = (struct drm_info_node *) m->private;
+ struct drm_device *dev = node->minor->dev;
+ struct tilcdc_drm_private *priv = dev->dev_private;
+ unsigned i;
+
+ pm_runtime_get_sync(dev->dev);
+
+ seq_printf(m, "revision: %d\n", priv->rev);
+
+ for (i = 0; i < ARRAY_SIZE(registers); i++)
+ if (priv->rev >= registers[i].rev)
+ seq_printf(m, "%s:\t %08x\n", registers[i].name,
+ tilcdc_read(dev, registers[i].reg));
+
+ pm_runtime_put_sync(dev->dev);
+
+ return 0;
+}
+
+static int tilcdc_mm_show(struct seq_file *m, void *arg)
+{
+ struct drm_info_node *node = (struct drm_info_node *) m->private;
+ struct drm_device *dev = node->minor->dev;
+ return drm_mm_dump_table(m, dev->mm_private);
+}
+
+static struct drm_info_list tilcdc_debugfs_list[] = {
+ { "regs", tilcdc_regs_show, 0 },
+ { "mm", tilcdc_mm_show, 0 },
+ { "fb", drm_fb_cma_debugfs_show, 0 },
+};
+
+static int tilcdc_debugfs_init(struct drm_minor *minor)
+{
+ struct drm_device *dev = minor->dev;
+ struct tilcdc_module *mod;
+ int ret;
+
+ ret = drm_debugfs_create_files(tilcdc_debugfs_list,
+ ARRAY_SIZE(tilcdc_debugfs_list),
+ minor->debugfs_root, minor);
+
+ list_for_each_entry(mod, &module_list, list)
+ if (mod->funcs->debugfs_init)
+ mod->funcs->debugfs_init(mod, minor);
+
+ if (ret) {
+ dev_err(dev->dev, "could not install tilcdc_debugfs_list\n");
+ return ret;
+ }
+
+ return ret;
+}
+
+static void tilcdc_debugfs_cleanup(struct drm_minor *minor)
+{
+ struct tilcdc_module *mod;
+ drm_debugfs_remove_files(tilcdc_debugfs_list,
+ ARRAY_SIZE(tilcdc_debugfs_list), minor);
+
+ list_for_each_entry(mod, &module_list, list)
+ if (mod->funcs->debugfs_cleanup)
+ mod->funcs->debugfs_cleanup(mod, minor);
+}
+#endif
+
+static const struct file_operations fops = {
+ .owner = THIS_MODULE,
+ .open = drm_open,
+ .release = drm_release,
+ .unlocked_ioctl = drm_ioctl,
+#ifdef CONFIG_COMPAT
+ .compat_ioctl = drm_compat_ioctl,
+#endif
+ .poll = drm_poll,
+ .read = drm_read,
+ .fasync = drm_fasync,
+ .llseek = no_llseek,
+ .mmap = drm_gem_cma_mmap,
+};
+
+static struct drm_driver tilcdc_driver = {
+ .driver_features = DRIVER_HAVE_IRQ | DRIVER_GEM | DRIVER_MODESET,
+ .load = tilcdc_load,
+ .unload = tilcdc_unload,
+ .preclose = tilcdc_preclose,
+ .lastclose = tilcdc_lastclose,
+ .irq_handler = tilcdc_irq,
+ .irq_preinstall = tilcdc_irq_preinstall,
+ .irq_postinstall = tilcdc_irq_postinstall,
+ .irq_uninstall = tilcdc_irq_uninstall,
+ .get_vblank_counter = drm_vblank_count,
+ .enable_vblank = tilcdc_enable_vblank,
+ .disable_vblank = tilcdc_disable_vblank,
+ .gem_free_object = drm_gem_cma_free_object,
+ .gem_vm_ops = &drm_gem_cma_vm_ops,
+ .dumb_create = drm_gem_cma_dumb_create,
+ .dumb_map_offset = drm_gem_cma_dumb_map_offset,
+ .dumb_destroy = drm_gem_cma_dumb_destroy,
+#ifdef CONFIG_DEBUG_FS
+ .debugfs_init = tilcdc_debugfs_init,
+ .debugfs_cleanup = tilcdc_debugfs_cleanup,
+#endif
+ .fops = &fops,
+ .name = "tilcdc",
+ .desc = "TI LCD Controller DRM",
+ .date = "20121205",
+ .major = 1,
+ .minor = 0,
+};
+
+/*
+ * Power management:
+ */
+
+#ifdef CONFIG_PM_SLEEP
+static int tilcdc_pm_suspend(struct device *dev)
+{
+ struct drm_device *ddev = dev_get_drvdata(dev);
+ struct tilcdc_drm_private *priv = ddev->dev_private;
+ unsigned i, n = 0;
+
+ drm_kms_helper_poll_disable(ddev);
+
+ /* Save register state: */
+ for (i = 0; i < ARRAY_SIZE(registers); i++)
+ if (registers[i].save && (priv->rev >= registers[i].rev))
+ priv->saved_register[n++] = tilcdc_read(ddev, registers[i].reg);
+
+ return 0;
+}
+
+static int tilcdc_pm_resume(struct device *dev)
+{
+ struct drm_device *ddev = dev_get_drvdata(dev);
+ struct tilcdc_drm_private *priv = ddev->dev_private;
+ unsigned i, n = 0;
+
+ /* Restore register state: */
+ for (i = 0; i < ARRAY_SIZE(registers); i++)
+ if (registers[i].save && (priv->rev >= registers[i].rev))
+ tilcdc_write(ddev, registers[i].reg, priv->saved_register[n++]);
+
+ drm_kms_helper_poll_enable(ddev);
+
+ return 0;
+}
+#endif
+
+static const struct dev_pm_ops tilcdc_pm_ops = {
+ SET_SYSTEM_SLEEP_PM_OPS(tilcdc_pm_suspend, tilcdc_pm_resume)
+};
+
+/*
+ * Platform driver:
+ */
+
+static int tilcdc_pdev_probe(struct platform_device *pdev)
+{
+ /* bail out early if no DT data: */
+ if (!pdev->dev.of_node) {
+ dev_err(&pdev->dev, "device-tree data is missing\n");
+ return -ENXIO;
+ }
+
+ return drm_platform_init(&tilcdc_driver, pdev);
+}
+
+static int tilcdc_pdev_remove(struct platform_device *pdev)
+{
+ drm_platform_exit(&tilcdc_driver, pdev);
+
+ return 0;
+}
+
+static struct of_device_id tilcdc_of_match[] = {
+ { .compatible = "ti,am33xx-tilcdc", },
+ { },
+};
+MODULE_DEVICE_TABLE(of, tilcdc_of_match);
+
+static struct platform_driver tilcdc_platform_driver = {
+ .probe = tilcdc_pdev_probe,
+ .remove = tilcdc_pdev_remove,
+ .driver = {
+ .owner = THIS_MODULE,
+ .name = "tilcdc",
+ .pm = &tilcdc_pm_ops,
+ .of_match_table = tilcdc_of_match,
+ },
+};
+
+static int __init tilcdc_drm_init(void)
+{
+ DBG("init");
+ tilcdc_tfp410_init();
+ tilcdc_slave_init();
+ tilcdc_panel_init();
+ return platform_driver_register(&tilcdc_platform_driver);
+}
+
+static void __exit tilcdc_drm_fini(void)
+{
+ DBG("fini");
+ tilcdc_tfp410_fini();
+ tilcdc_slave_fini();
+ tilcdc_panel_fini();
+ platform_driver_unregister(&tilcdc_platform_driver);
+}
+
+late_initcall(tilcdc_drm_init);
+module_exit(tilcdc_drm_fini);
+
+MODULE_AUTHOR("Rob Clark <robdclark@gmail.com");
+MODULE_DESCRIPTION("TI LCD Controller DRM Driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/gpu/drm/tilcdc/tilcdc_drv.h b/drivers/gpu/drm/tilcdc/tilcdc_drv.h
new file mode 100644
index 000000000000..8242b5a4307b
--- /dev/null
+++ b/drivers/gpu/drm/tilcdc/tilcdc_drv.h
@@ -0,0 +1,150 @@
+/*
+ * Copyright (C) 2012 Texas Instruments
+ * Author: Rob Clark <robdclark@gmail.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#ifndef __TILCDC_DRV_H__
+#define __TILCDC_DRV_H__
+
+#include <linux/clk.h>
+#include <linux/cpufreq.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/pm.h>
+#include <linux/pm_runtime.h>
+#include <linux/slab.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/list.h>
+
+#include <drm/drmP.h>
+#include <drm/drm_crtc_helper.h>
+#include <drm/drm_gem_cma_helper.h>
+#include <drm/drm_fb_cma_helper.h>
+
+struct tilcdc_drm_private {
+ void __iomem *mmio;
+
+ struct clk *disp_clk; /* display dpll */
+ struct clk *clk; /* functional clock */
+ int rev; /* IP revision */
+
+ /* don't attempt resolutions w/ higher W * H * Hz: */
+ uint32_t max_bandwidth;
+
+ /* register contents saved across suspend/resume: */
+ u32 saved_register[12];
+
+#ifdef CONFIG_CPU_FREQ
+ struct notifier_block freq_transition;
+ unsigned int lcd_fck_rate;
+#endif
+
+ struct workqueue_struct *wq;
+
+ struct drm_fbdev_cma *fbdev;
+
+ struct drm_crtc *crtc;
+
+ unsigned int num_encoders;
+ struct drm_encoder *encoders[8];
+
+ unsigned int num_connectors;
+ struct drm_connector *connectors[8];
+};
+
+/* Sub-module for display. Since we don't know at compile time what panels
+ * or display adapter(s) might be present (for ex, off chip dvi/tfp410,
+ * hdmi encoder, various lcd panels), the connector/encoder(s) are split into
+ * separate drivers. If they are probed and found to be present, they
+ * register themselves with tilcdc_register_module().
+ */
+struct tilcdc_module;
+
+struct tilcdc_module_ops {
+ /* create appropriate encoders/connectors: */
+ int (*modeset_init)(struct tilcdc_module *mod, struct drm_device *dev);
+ void (*destroy)(struct tilcdc_module *mod);
+#ifdef CONFIG_DEBUG_FS
+ /* create debugfs nodes (can be NULL): */
+ int (*debugfs_init)(struct tilcdc_module *mod, struct drm_minor *minor);
+ /* cleanup debugfs nodes (can be NULL): */
+ void (*debugfs_cleanup)(struct tilcdc_module *mod, struct drm_minor *minor);
+#endif
+};
+
+struct tilcdc_module {
+ const char *name;
+ struct list_head list;
+ const struct tilcdc_module_ops *funcs;
+};
+
+void tilcdc_module_init(struct tilcdc_module *mod, const char *name,
+ const struct tilcdc_module_ops *funcs);
+void tilcdc_module_cleanup(struct tilcdc_module *mod);
+
+
+/* Panel config that needs to be set in the crtc, but is not coming from
+ * the mode timings. The display module is expected to call
+ * tilcdc_crtc_set_panel_info() to set this during modeset.
+ */
+struct tilcdc_panel_info {
+
+ /* AC Bias Pin Frequency */
+ uint32_t ac_bias;
+
+ /* AC Bias Pin Transitions per Interrupt */
+ uint32_t ac_bias_intrpt;
+
+ /* DMA burst size */
+ uint32_t dma_burst_sz;
+
+ /* Bits per pixel */
+ uint32_t bpp;
+
+ /* FIFO DMA Request Delay */
+ uint32_t fdd;
+
+ /* TFT Alternative Signal Mapping (Only for active) */
+ bool tft_alt_mode;
+
+ /* Invert pixel clock */
+ bool invert_pxl_clk;
+
+ /* Horizontal and Vertical Sync Edge: 0=rising 1=falling */
+ uint32_t sync_edge;
+
+ /* Horizontal and Vertical Sync: Control: 0=ignore */
+ uint32_t sync_ctrl;
+
+ /* Raster Data Order Select: 1=Most-to-least 0=Least-to-most */
+ uint32_t raster_order;
+
+ /* DMA FIFO threshold */
+ uint32_t fifo_th;
+};
+
+#define DBG(fmt, ...) DRM_DEBUG(fmt"\n", ##__VA_ARGS__)
+
+struct drm_crtc *tilcdc_crtc_create(struct drm_device *dev);
+void tilcdc_crtc_cancel_page_flip(struct drm_crtc *crtc, struct drm_file *file);
+irqreturn_t tilcdc_crtc_irq(struct drm_crtc *crtc);
+void tilcdc_crtc_update_clk(struct drm_crtc *crtc);
+void tilcdc_crtc_set_panel_info(struct drm_crtc *crtc,
+ const struct tilcdc_panel_info *info);
+int tilcdc_crtc_mode_valid(struct drm_crtc *crtc, struct drm_display_mode *mode);
+int tilcdc_crtc_max_width(struct drm_crtc *crtc);
+
+#endif /* __TILCDC_DRV_H__ */
diff --git a/drivers/gpu/drm/tilcdc/tilcdc_panel.c b/drivers/gpu/drm/tilcdc/tilcdc_panel.c
new file mode 100644
index 000000000000..580b74e2022b
--- /dev/null
+++ b/drivers/gpu/drm/tilcdc/tilcdc_panel.c
@@ -0,0 +1,436 @@
+/*
+ * Copyright (C) 2012 Texas Instruments
+ * Author: Rob Clark <robdclark@gmail.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include <linux/pinctrl/pinmux.h>
+#include <linux/pinctrl/consumer.h>
+#include <linux/backlight.h>
+#include <video/display_timing.h>
+#include <video/of_display_timing.h>
+#include <video/videomode.h>
+
+#include "tilcdc_drv.h"
+
+struct panel_module {
+ struct tilcdc_module base;
+ struct tilcdc_panel_info *info;
+ struct display_timings *timings;
+ struct backlight_device *backlight;
+};
+#define to_panel_module(x) container_of(x, struct panel_module, base)
+
+
+/*
+ * Encoder:
+ */
+
+struct panel_encoder {
+ struct drm_encoder base;
+ struct panel_module *mod;
+};
+#define to_panel_encoder(x) container_of(x, struct panel_encoder, base)
+
+
+static void panel_encoder_destroy(struct drm_encoder *encoder)
+{
+ struct panel_encoder *panel_encoder = to_panel_encoder(encoder);
+ drm_encoder_cleanup(encoder);
+ kfree(panel_encoder);
+}
+
+static void panel_encoder_dpms(struct drm_encoder *encoder, int mode)
+{
+ struct panel_encoder *panel_encoder = to_panel_encoder(encoder);
+ struct backlight_device *backlight = panel_encoder->mod->backlight;
+
+ if (!backlight)
+ return;
+
+ backlight->props.power = mode == DRM_MODE_DPMS_ON
+ ? FB_BLANK_UNBLANK : FB_BLANK_POWERDOWN;
+ backlight_update_status(backlight);
+}
+
+static bool panel_encoder_mode_fixup(struct drm_encoder *encoder,
+ const struct drm_display_mode *mode,
+ struct drm_display_mode *adjusted_mode)
+{
+ /* nothing needed */
+ return true;
+}
+
+static void panel_encoder_prepare(struct drm_encoder *encoder)
+{
+ struct panel_encoder *panel_encoder = to_panel_encoder(encoder);
+ panel_encoder_dpms(encoder, DRM_MODE_DPMS_OFF);
+ tilcdc_crtc_set_panel_info(encoder->crtc, panel_encoder->mod->info);
+}
+
+static void panel_encoder_commit(struct drm_encoder *encoder)
+{
+ panel_encoder_dpms(encoder, DRM_MODE_DPMS_ON);
+}
+
+static void panel_encoder_mode_set(struct drm_encoder *encoder,
+ struct drm_display_mode *mode,
+ struct drm_display_mode *adjusted_mode)
+{
+ /* nothing needed */
+}
+
+static const struct drm_encoder_funcs panel_encoder_funcs = {
+ .destroy = panel_encoder_destroy,
+};
+
+static const struct drm_encoder_helper_funcs panel_encoder_helper_funcs = {
+ .dpms = panel_encoder_dpms,
+ .mode_fixup = panel_encoder_mode_fixup,
+ .prepare = panel_encoder_prepare,
+ .commit = panel_encoder_commit,
+ .mode_set = panel_encoder_mode_set,
+};
+
+static struct drm_encoder *panel_encoder_create(struct drm_device *dev,
+ struct panel_module *mod)
+{
+ struct panel_encoder *panel_encoder;
+ struct drm_encoder *encoder;
+ int ret;
+
+ panel_encoder = kzalloc(sizeof(*panel_encoder), GFP_KERNEL);
+ if (!panel_encoder) {
+ dev_err(dev->dev, "allocation failed\n");
+ return NULL;
+ }
+
+ panel_encoder->mod = mod;
+
+ encoder = &panel_encoder->base;
+ encoder->possible_crtcs = 1;
+
+ ret = drm_encoder_init(dev, encoder, &panel_encoder_funcs,
+ DRM_MODE_ENCODER_LVDS);
+ if (ret < 0)
+ goto fail;
+
+ drm_encoder_helper_add(encoder, &panel_encoder_helper_funcs);
+
+ return encoder;
+
+fail:
+ panel_encoder_destroy(encoder);
+ return NULL;
+}
+
+/*
+ * Connector:
+ */
+
+struct panel_connector {
+ struct drm_connector base;
+
+ struct drm_encoder *encoder; /* our connected encoder */
+ struct panel_module *mod;
+};
+#define to_panel_connector(x) container_of(x, struct panel_connector, base)
+
+
+static void panel_connector_destroy(struct drm_connector *connector)
+{
+ struct panel_connector *panel_connector = to_panel_connector(connector);
+ drm_connector_cleanup(connector);
+ kfree(panel_connector);
+}
+
+static enum drm_connector_status panel_connector_detect(
+ struct drm_connector *connector,
+ bool force)
+{
+ return connector_status_connected;
+}
+
+static int panel_connector_get_modes(struct drm_connector *connector)
+{
+ struct drm_device *dev = connector->dev;
+ struct panel_connector *panel_connector = to_panel_connector(connector);
+ struct display_timings *timings = panel_connector->mod->timings;
+ int i;
+
+ for (i = 0; i < timings->num_timings; i++) {
+ struct drm_display_mode *mode = drm_mode_create(dev);
+ struct videomode vm;
+
+ if (videomode_from_timing(timings, &vm, i))
+ break;
+
+ drm_display_mode_from_videomode(&vm, mode);
+
+ mode->type = DRM_MODE_TYPE_DRIVER;
+
+ if (timings->native_mode == i)
+ mode->type |= DRM_MODE_TYPE_PREFERRED;
+
+ drm_mode_set_name(mode);
+ drm_mode_probed_add(connector, mode);
+ }
+
+ return i;
+}
+
+static int panel_connector_mode_valid(struct drm_connector *connector,
+ struct drm_display_mode *mode)
+{
+ struct tilcdc_drm_private *priv = connector->dev->dev_private;
+ /* our only constraints are what the crtc can generate: */
+ return tilcdc_crtc_mode_valid(priv->crtc, mode);
+}
+
+static struct drm_encoder *panel_connector_best_encoder(
+ struct drm_connector *connector)
+{
+ struct panel_connector *panel_connector = to_panel_connector(connector);
+ return panel_connector->encoder;
+}
+
+static const struct drm_connector_funcs panel_connector_funcs = {
+ .destroy = panel_connector_destroy,
+ .dpms = drm_helper_connector_dpms,
+ .detect = panel_connector_detect,
+ .fill_modes = drm_helper_probe_single_connector_modes,
+};
+
+static const struct drm_connector_helper_funcs panel_connector_helper_funcs = {
+ .get_modes = panel_connector_get_modes,
+ .mode_valid = panel_connector_mode_valid,
+ .best_encoder = panel_connector_best_encoder,
+};
+
+static struct drm_connector *panel_connector_create(struct drm_device *dev,
+ struct panel_module *mod, struct drm_encoder *encoder)
+{
+ struct panel_connector *panel_connector;
+ struct drm_connector *connector;
+ int ret;
+
+ panel_connector = kzalloc(sizeof(*panel_connector), GFP_KERNEL);
+ if (!panel_connector) {
+ dev_err(dev->dev, "allocation failed\n");
+ return NULL;
+ }
+
+ panel_connector->encoder = encoder;
+ panel_connector->mod = mod;
+
+ connector = &panel_connector->base;
+
+ drm_connector_init(dev, connector, &panel_connector_funcs,
+ DRM_MODE_CONNECTOR_LVDS);
+ drm_connector_helper_add(connector, &panel_connector_helper_funcs);
+
+ connector->interlace_allowed = 0;
+ connector->doublescan_allowed = 0;
+
+ ret = drm_mode_connector_attach_encoder(connector, encoder);
+ if (ret)
+ goto fail;
+
+ drm_sysfs_connector_add(connector);
+
+ return connector;
+
+fail:
+ panel_connector_destroy(connector);
+ return NULL;
+}
+
+/*
+ * Module:
+ */
+
+static int panel_modeset_init(struct tilcdc_module *mod, struct drm_device *dev)
+{
+ struct panel_module *panel_mod = to_panel_module(mod);
+ struct tilcdc_drm_private *priv = dev->dev_private;
+ struct drm_encoder *encoder;
+ struct drm_connector *connector;
+
+ encoder = panel_encoder_create(dev, panel_mod);
+ if (!encoder)
+ return -ENOMEM;
+
+ connector = panel_connector_create(dev, panel_mod, encoder);
+ if (!connector)
+ return -ENOMEM;
+
+ priv->encoders[priv->num_encoders++] = encoder;
+ priv->connectors[priv->num_connectors++] = connector;
+
+ return 0;
+}
+
+static void panel_destroy(struct tilcdc_module *mod)
+{
+ struct panel_module *panel_mod = to_panel_module(mod);
+
+ if (panel_mod->timings) {
+ display_timings_release(panel_mod->timings);
+ kfree(panel_mod->timings);
+ }
+
+ tilcdc_module_cleanup(mod);
+ kfree(panel_mod->info);
+ kfree(panel_mod);
+}
+
+static const struct tilcdc_module_ops panel_module_ops = {
+ .modeset_init = panel_modeset_init,
+ .destroy = panel_destroy,
+};
+
+/*
+ * Device:
+ */
+
+/* maybe move this somewhere common if it is needed by other outputs? */
+static struct tilcdc_panel_info * of_get_panel_info(struct device_node *np)
+{
+ struct device_node *info_np;
+ struct tilcdc_panel_info *info;
+ int ret = 0;
+
+ if (!np) {
+ pr_err("%s: no devicenode given\n", __func__);
+ return NULL;
+ }
+
+ info_np = of_get_child_by_name(np, "panel-info");
+ if (!info_np) {
+ pr_err("%s: could not find panel-info node\n", __func__);
+ return NULL;
+ }
+
+ info = kzalloc(sizeof(*info), GFP_KERNEL);
+ if (!info) {
+ pr_err("%s: allocation failed\n", __func__);
+ return NULL;
+ }
+
+ ret |= of_property_read_u32(info_np, "ac-bias", &info->ac_bias);
+ ret |= of_property_read_u32(info_np, "ac-bias-intrpt", &info->ac_bias_intrpt);
+ ret |= of_property_read_u32(info_np, "dma-burst-sz", &info->dma_burst_sz);
+ ret |= of_property_read_u32(info_np, "bpp", &info->bpp);
+ ret |= of_property_read_u32(info_np, "fdd", &info->fdd);
+ ret |= of_property_read_u32(info_np, "sync-edge", &info->sync_edge);
+ ret |= of_property_read_u32(info_np, "sync-ctrl", &info->sync_ctrl);
+ ret |= of_property_read_u32(info_np, "raster-order", &info->raster_order);
+ ret |= of_property_read_u32(info_np, "fifo-th", &info->fifo_th);
+
+ /* optional: */
+ info->tft_alt_mode = of_property_read_bool(info_np, "tft-alt-mode");
+ info->invert_pxl_clk = of_property_read_bool(info_np, "invert-pxl-clk");
+
+ if (ret) {
+ pr_err("%s: error reading panel-info properties\n", __func__);
+ kfree(info);
+ return NULL;
+ }
+
+ return info;
+}
+
+static struct of_device_id panel_of_match[];
+
+static int panel_probe(struct platform_device *pdev)
+{
+ struct device_node *node = pdev->dev.of_node;
+ struct panel_module *panel_mod;
+ struct tilcdc_module *mod;
+ struct pinctrl *pinctrl;
+ int ret = -EINVAL;
+
+
+ /* bail out early if no DT data: */
+ if (!node) {
+ dev_err(&pdev->dev, "device-tree data is missing\n");
+ return -ENXIO;
+ }
+
+ panel_mod = kzalloc(sizeof(*panel_mod), GFP_KERNEL);
+ if (!panel_mod)
+ return -ENOMEM;
+
+ mod = &panel_mod->base;
+
+ tilcdc_module_init(mod, "panel", &panel_module_ops);
+
+ pinctrl = devm_pinctrl_get_select_default(&pdev->dev);
+ if (IS_ERR(pinctrl))
+ dev_warn(&pdev->dev, "pins are not configured\n");
+
+
+ panel_mod->timings = of_get_display_timings(node);
+ if (!panel_mod->timings) {
+ dev_err(&pdev->dev, "could not get panel timings\n");
+ goto fail;
+ }
+
+ panel_mod->info = of_get_panel_info(node);
+ if (!panel_mod->info) {
+ dev_err(&pdev->dev, "could not get panel info\n");
+ goto fail;
+ }
+
+ panel_mod->backlight = of_find_backlight_by_node(node);
+ if (panel_mod->backlight)
+ dev_info(&pdev->dev, "found backlight\n");
+
+ return 0;
+
+fail:
+ panel_destroy(mod);
+ return ret;
+}
+
+static int panel_remove(struct platform_device *pdev)
+{
+ return 0;
+}
+
+static struct of_device_id panel_of_match[] = {
+ { .compatible = "ti,tilcdc,panel", },
+ { },
+};
+MODULE_DEVICE_TABLE(of, panel_of_match);
+
+struct platform_driver panel_driver = {
+ .probe = panel_probe,
+ .remove = panel_remove,
+ .driver = {
+ .owner = THIS_MODULE,
+ .name = "panel",
+ .of_match_table = panel_of_match,
+ },
+};
+
+int __init tilcdc_panel_init(void)
+{
+ return platform_driver_register(&panel_driver);
+}
+
+void __exit tilcdc_panel_fini(void)
+{
+ platform_driver_unregister(&panel_driver);
+}
diff --git a/drivers/gpu/drm/tilcdc/tilcdc_panel.h b/drivers/gpu/drm/tilcdc/tilcdc_panel.h
new file mode 100644
index 000000000000..7db40aacc74a
--- /dev/null
+++ b/drivers/gpu/drm/tilcdc/tilcdc_panel.h
@@ -0,0 +1,26 @@
+/*
+ * Copyright (C) 2012 Texas Instruments
+ * Author: Rob Clark <robdclark@gmail.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#ifndef __TILCDC_PANEL_H__
+#define __TILCDC_PANEL_H__
+
+/* sub-module for generic lcd panel output */
+
+int tilcdc_panel_init(void);
+void tilcdc_panel_fini(void);
+
+#endif /* __TILCDC_PANEL_H__ */
diff --git a/drivers/gpu/drm/tilcdc/tilcdc_regs.h b/drivers/gpu/drm/tilcdc/tilcdc_regs.h
new file mode 100644
index 000000000000..17fd1b45428a
--- /dev/null
+++ b/drivers/gpu/drm/tilcdc/tilcdc_regs.h
@@ -0,0 +1,154 @@
+/*
+ * Copyright (C) 2012 Texas Instruments
+ * Author: Rob Clark <robdclark@gmail.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#ifndef __TILCDC_REGS_H__
+#define __TILCDC_REGS_H__
+
+/* LCDC register definitions, based on da8xx-fb */
+
+#include <linux/bitops.h>
+
+#include "tilcdc_drv.h"
+
+/* LCDC Status Register */
+#define LCDC_END_OF_FRAME1 BIT(9)
+#define LCDC_END_OF_FRAME0 BIT(8)
+#define LCDC_PL_LOAD_DONE BIT(6)
+#define LCDC_FIFO_UNDERFLOW BIT(5)
+#define LCDC_SYNC_LOST BIT(2)
+#define LCDC_FRAME_DONE BIT(0)
+
+/* LCDC DMA Control Register */
+#define LCDC_DMA_BURST_SIZE(x) ((x) << 4)
+#define LCDC_DMA_BURST_1 0x0
+#define LCDC_DMA_BURST_2 0x1
+#define LCDC_DMA_BURST_4 0x2
+#define LCDC_DMA_BURST_8 0x3
+#define LCDC_DMA_BURST_16 0x4
+#define LCDC_V1_END_OF_FRAME_INT_ENA BIT(2)
+#define LCDC_V2_END_OF_FRAME0_INT_ENA BIT(8)
+#define LCDC_V2_END_OF_FRAME1_INT_ENA BIT(9)
+#define LCDC_DUAL_FRAME_BUFFER_ENABLE BIT(0)
+
+/* LCDC Control Register */
+#define LCDC_CLK_DIVISOR(x) ((x) << 8)
+#define LCDC_RASTER_MODE 0x01
+
+/* LCDC Raster Control Register */
+#define LCDC_PALETTE_LOAD_MODE(x) ((x) << 20)
+#define PALETTE_AND_DATA 0x00
+#define PALETTE_ONLY 0x01
+#define DATA_ONLY 0x02
+
+#define LCDC_MONO_8BIT_MODE BIT(9)
+#define LCDC_RASTER_ORDER BIT(8)
+#define LCDC_TFT_MODE BIT(7)
+#define LCDC_V1_UNDERFLOW_INT_ENA BIT(6)
+#define LCDC_V2_UNDERFLOW_INT_ENA BIT(5)
+#define LCDC_V1_PL_INT_ENA BIT(4)
+#define LCDC_V2_PL_INT_ENA BIT(6)
+#define LCDC_MONOCHROME_MODE BIT(1)
+#define LCDC_RASTER_ENABLE BIT(0)
+#define LCDC_TFT_ALT_ENABLE BIT(23)
+#define LCDC_STN_565_ENABLE BIT(24)
+#define LCDC_V2_DMA_CLK_EN BIT(2)
+#define LCDC_V2_LIDD_CLK_EN BIT(1)
+#define LCDC_V2_CORE_CLK_EN BIT(0)
+#define LCDC_V2_LPP_B10 26
+#define LCDC_V2_TFT_24BPP_MODE BIT(25)
+#define LCDC_V2_TFT_24BPP_UNPACK BIT(26)
+
+/* LCDC Raster Timing 2 Register */
+#define LCDC_AC_BIAS_TRANSITIONS_PER_INT(x) ((x) << 16)
+#define LCDC_AC_BIAS_FREQUENCY(x) ((x) << 8)
+#define LCDC_SYNC_CTRL BIT(25)
+#define LCDC_SYNC_EDGE BIT(24)
+#define LCDC_INVERT_PIXEL_CLOCK BIT(22)
+#define LCDC_INVERT_HSYNC BIT(21)
+#define LCDC_INVERT_VSYNC BIT(20)
+
+/* LCDC Block */
+#define LCDC_PID_REG 0x0
+#define LCDC_CTRL_REG 0x4
+#define LCDC_STAT_REG 0x8
+#define LCDC_RASTER_CTRL_REG 0x28
+#define LCDC_RASTER_TIMING_0_REG 0x2c
+#define LCDC_RASTER_TIMING_1_REG 0x30
+#define LCDC_RASTER_TIMING_2_REG 0x34
+#define LCDC_DMA_CTRL_REG 0x40
+#define LCDC_DMA_FB_BASE_ADDR_0_REG 0x44
+#define LCDC_DMA_FB_CEILING_ADDR_0_REG 0x48
+#define LCDC_DMA_FB_BASE_ADDR_1_REG 0x4c
+#define LCDC_DMA_FB_CEILING_ADDR_1_REG 0x50
+
+/* Interrupt Registers available only in Version 2 */
+#define LCDC_RAW_STAT_REG 0x58
+#define LCDC_MASKED_STAT_REG 0x5c
+#define LCDC_INT_ENABLE_SET_REG 0x60
+#define LCDC_INT_ENABLE_CLR_REG 0x64
+#define LCDC_END_OF_INT_IND_REG 0x68
+
+/* Clock registers available only on Version 2 */
+#define LCDC_CLK_ENABLE_REG 0x6c
+#define LCDC_CLK_RESET_REG 0x70
+#define LCDC_CLK_MAIN_RESET BIT(3)
+
+
+/*
+ * Helpers:
+ */
+
+static inline void tilcdc_write(struct drm_device *dev, u32 reg, u32 data)
+{
+ struct tilcdc_drm_private *priv = dev->dev_private;
+ iowrite32(data, priv->mmio + reg);
+}
+
+static inline u32 tilcdc_read(struct drm_device *dev, u32 reg)
+{
+ struct tilcdc_drm_private *priv = dev->dev_private;
+ return ioread32(priv->mmio + reg);
+}
+
+static inline void tilcdc_set(struct drm_device *dev, u32 reg, u32 mask)
+{
+ tilcdc_write(dev, reg, tilcdc_read(dev, reg) | mask);
+}
+
+static inline void tilcdc_clear(struct drm_device *dev, u32 reg, u32 mask)
+{
+ tilcdc_write(dev, reg, tilcdc_read(dev, reg) & ~mask);
+}
+
+/* the register to read/clear irqstatus differs between v1 and v2 of the IP */
+static inline u32 tilcdc_irqstatus_reg(struct drm_device *dev)
+{
+ struct tilcdc_drm_private *priv = dev->dev_private;
+ return (priv->rev == 2) ? LCDC_MASKED_STAT_REG : LCDC_STAT_REG;
+}
+
+static inline u32 tilcdc_read_irqstatus(struct drm_device *dev)
+{
+ return tilcdc_read(dev, tilcdc_irqstatus_reg(dev));
+}
+
+static inline void tilcdc_clear_irqstatus(struct drm_device *dev, u32 mask)
+{
+ tilcdc_write(dev, tilcdc_irqstatus_reg(dev), mask);
+}
+
+#endif /* __TILCDC_REGS_H__ */
diff --git a/drivers/gpu/drm/tilcdc/tilcdc_slave.c b/drivers/gpu/drm/tilcdc/tilcdc_slave.c
new file mode 100644
index 000000000000..568dc1c08e6c
--- /dev/null
+++ b/drivers/gpu/drm/tilcdc/tilcdc_slave.c
@@ -0,0 +1,376 @@
+/*
+ * Copyright (C) 2012 Texas Instruments
+ * Author: Rob Clark <robdclark@gmail.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include <linux/i2c.h>
+#include <linux/of_i2c.h>
+#include <linux/pinctrl/pinmux.h>
+#include <linux/pinctrl/consumer.h>
+#include <drm/drm_encoder_slave.h>
+
+#include "tilcdc_drv.h"
+
+struct slave_module {
+ struct tilcdc_module base;
+ struct i2c_adapter *i2c;
+};
+#define to_slave_module(x) container_of(x, struct slave_module, base)
+
+static const struct tilcdc_panel_info slave_info = {
+ .bpp = 16,
+ .ac_bias = 255,
+ .ac_bias_intrpt = 0,
+ .dma_burst_sz = 16,
+ .fdd = 0x80,
+ .tft_alt_mode = 0,
+ .sync_edge = 0,
+ .sync_ctrl = 1,
+ .raster_order = 0,
+};
+
+
+/*
+ * Encoder:
+ */
+
+struct slave_encoder {
+ struct drm_encoder_slave base;
+ struct slave_module *mod;
+};
+#define to_slave_encoder(x) container_of(to_encoder_slave(x), struct slave_encoder, base)
+
+static inline struct drm_encoder_slave_funcs *
+get_slave_funcs(struct drm_encoder *enc)
+{
+ return to_encoder_slave(enc)->slave_funcs;
+}
+
+static void slave_encoder_destroy(struct drm_encoder *encoder)
+{
+ struct slave_encoder *slave_encoder = to_slave_encoder(encoder);
+ if (get_slave_funcs(encoder))
+ get_slave_funcs(encoder)->destroy(encoder);
+ drm_encoder_cleanup(encoder);
+ kfree(slave_encoder);
+}
+
+static void slave_encoder_prepare(struct drm_encoder *encoder)
+{
+ drm_i2c_encoder_prepare(encoder);
+ tilcdc_crtc_set_panel_info(encoder->crtc, &slave_info);
+}
+
+static const struct drm_encoder_funcs slave_encoder_funcs = {
+ .destroy = slave_encoder_destroy,
+};
+
+static const struct drm_encoder_helper_funcs slave_encoder_helper_funcs = {
+ .dpms = drm_i2c_encoder_dpms,
+ .mode_fixup = drm_i2c_encoder_mode_fixup,
+ .prepare = slave_encoder_prepare,
+ .commit = drm_i2c_encoder_commit,
+ .mode_set = drm_i2c_encoder_mode_set,
+ .save = drm_i2c_encoder_save,
+ .restore = drm_i2c_encoder_restore,
+};
+
+static const struct i2c_board_info info = {
+ I2C_BOARD_INFO("tda998x", 0x70)
+};
+
+static struct drm_encoder *slave_encoder_create(struct drm_device *dev,
+ struct slave_module *mod)
+{
+ struct slave_encoder *slave_encoder;
+ struct drm_encoder *encoder;
+ int ret;
+
+ slave_encoder = kzalloc(sizeof(*slave_encoder), GFP_KERNEL);
+ if (!slave_encoder) {
+ dev_err(dev->dev, "allocation failed\n");
+ return NULL;
+ }
+
+ slave_encoder->mod = mod;
+
+ encoder = &slave_encoder->base.base;
+ encoder->possible_crtcs = 1;
+
+ ret = drm_encoder_init(dev, encoder, &slave_encoder_funcs,
+ DRM_MODE_ENCODER_TMDS);
+ if (ret)
+ goto fail;
+
+ drm_encoder_helper_add(encoder, &slave_encoder_helper_funcs);
+
+ ret = drm_i2c_encoder_init(dev, to_encoder_slave(encoder), mod->i2c, &info);
+ if (ret)
+ goto fail;
+
+ return encoder;
+
+fail:
+ slave_encoder_destroy(encoder);
+ return NULL;
+}
+
+/*
+ * Connector:
+ */
+
+struct slave_connector {
+ struct drm_connector base;
+
+ struct drm_encoder *encoder; /* our connected encoder */
+ struct slave_module *mod;
+};
+#define to_slave_connector(x) container_of(x, struct slave_connector, base)
+
+static void slave_connector_destroy(struct drm_connector *connector)
+{
+ struct slave_connector *slave_connector = to_slave_connector(connector);
+ drm_connector_cleanup(connector);
+ kfree(slave_connector);
+}
+
+static enum drm_connector_status slave_connector_detect(
+ struct drm_connector *connector,
+ bool force)
+{
+ struct drm_encoder *encoder = to_slave_connector(connector)->encoder;
+ return get_slave_funcs(encoder)->detect(encoder, connector);
+}
+
+static int slave_connector_get_modes(struct drm_connector *connector)
+{
+ struct drm_encoder *encoder = to_slave_connector(connector)->encoder;
+ return get_slave_funcs(encoder)->get_modes(encoder, connector);
+}
+
+static int slave_connector_mode_valid(struct drm_connector *connector,
+ struct drm_display_mode *mode)
+{
+ struct drm_encoder *encoder = to_slave_connector(connector)->encoder;
+ struct tilcdc_drm_private *priv = connector->dev->dev_private;
+ int ret;
+
+ ret = tilcdc_crtc_mode_valid(priv->crtc, mode);
+ if (ret != MODE_OK)
+ return ret;
+
+ return get_slave_funcs(encoder)->mode_valid(encoder, mode);
+}
+
+static struct drm_encoder *slave_connector_best_encoder(
+ struct drm_connector *connector)
+{
+ struct slave_connector *slave_connector = to_slave_connector(connector);
+ return slave_connector->encoder;
+}
+
+static int slave_connector_set_property(struct drm_connector *connector,
+ struct drm_property *property, uint64_t value)
+{
+ struct drm_encoder *encoder = to_slave_connector(connector)->encoder;
+ return get_slave_funcs(encoder)->set_property(encoder,
+ connector, property, value);
+}
+
+static const struct drm_connector_funcs slave_connector_funcs = {
+ .destroy = slave_connector_destroy,
+ .dpms = drm_helper_connector_dpms,
+ .detect = slave_connector_detect,
+ .fill_modes = drm_helper_probe_single_connector_modes,
+ .set_property = slave_connector_set_property,
+};
+
+static const struct drm_connector_helper_funcs slave_connector_helper_funcs = {
+ .get_modes = slave_connector_get_modes,
+ .mode_valid = slave_connector_mode_valid,
+ .best_encoder = slave_connector_best_encoder,
+};
+
+static struct drm_connector *slave_connector_create(struct drm_device *dev,
+ struct slave_module *mod, struct drm_encoder *encoder)
+{
+ struct slave_connector *slave_connector;
+ struct drm_connector *connector;
+ int ret;
+
+ slave_connector = kzalloc(sizeof(*slave_connector), GFP_KERNEL);
+ if (!slave_connector) {
+ dev_err(dev->dev, "allocation failed\n");
+ return NULL;
+ }
+
+ slave_connector->encoder = encoder;
+ slave_connector->mod = mod;
+
+ connector = &slave_connector->base;
+
+ drm_connector_init(dev, connector, &slave_connector_funcs,
+ DRM_MODE_CONNECTOR_HDMIA);
+ drm_connector_helper_add(connector, &slave_connector_helper_funcs);
+
+ connector->polled = DRM_CONNECTOR_POLL_CONNECT |
+ DRM_CONNECTOR_POLL_DISCONNECT;
+
+ connector->interlace_allowed = 0;
+ connector->doublescan_allowed = 0;
+
+ get_slave_funcs(encoder)->create_resources(encoder, connector);
+
+ ret = drm_mode_connector_attach_encoder(connector, encoder);
+ if (ret)
+ goto fail;
+
+ drm_sysfs_connector_add(connector);
+
+ return connector;
+
+fail:
+ slave_connector_destroy(connector);
+ return NULL;
+}
+
+/*
+ * Module:
+ */
+
+static int slave_modeset_init(struct tilcdc_module *mod, struct drm_device *dev)
+{
+ struct slave_module *slave_mod = to_slave_module(mod);
+ struct tilcdc_drm_private *priv = dev->dev_private;
+ struct drm_encoder *encoder;
+ struct drm_connector *connector;
+
+ encoder = slave_encoder_create(dev, slave_mod);
+ if (!encoder)
+ return -ENOMEM;
+
+ connector = slave_connector_create(dev, slave_mod, encoder);
+ if (!connector)
+ return -ENOMEM;
+
+ priv->encoders[priv->num_encoders++] = encoder;
+ priv->connectors[priv->num_connectors++] = connector;
+
+ return 0;
+}
+
+static void slave_destroy(struct tilcdc_module *mod)
+{
+ struct slave_module *slave_mod = to_slave_module(mod);
+
+ tilcdc_module_cleanup(mod);
+ kfree(slave_mod);
+}
+
+static const struct tilcdc_module_ops slave_module_ops = {
+ .modeset_init = slave_modeset_init,
+ .destroy = slave_destroy,
+};
+
+/*
+ * Device:
+ */
+
+static struct of_device_id slave_of_match[];
+
+static int slave_probe(struct platform_device *pdev)
+{
+ struct device_node *node = pdev->dev.of_node;
+ struct device_node *i2c_node;
+ struct slave_module *slave_mod;
+ struct tilcdc_module *mod;
+ struct pinctrl *pinctrl;
+ uint32_t i2c_phandle;
+ int ret = -EINVAL;
+
+ /* bail out early if no DT data: */
+ if (!node) {
+ dev_err(&pdev->dev, "device-tree data is missing\n");
+ return -ENXIO;
+ }
+
+ slave_mod = kzalloc(sizeof(*slave_mod), GFP_KERNEL);
+ if (!slave_mod)
+ return -ENOMEM;
+
+ mod = &slave_mod->base;
+
+ tilcdc_module_init(mod, "slave", &slave_module_ops);
+
+ pinctrl = devm_pinctrl_get_select_default(&pdev->dev);
+ if (IS_ERR(pinctrl))
+ dev_warn(&pdev->dev, "pins are not configured\n");
+
+ if (of_property_read_u32(node, "i2c", &i2c_phandle)) {
+ dev_err(&pdev->dev, "could not get i2c bus phandle\n");
+ goto fail;
+ }
+
+ i2c_node = of_find_node_by_phandle(i2c_phandle);
+ if (!i2c_node) {
+ dev_err(&pdev->dev, "could not get i2c bus node\n");
+ goto fail;
+ }
+
+ slave_mod->i2c = of_find_i2c_adapter_by_node(i2c_node);
+ if (!slave_mod->i2c) {
+ dev_err(&pdev->dev, "could not get i2c\n");
+ goto fail;
+ }
+
+ of_node_put(i2c_node);
+
+ return 0;
+
+fail:
+ slave_destroy(mod);
+ return ret;
+}
+
+static int slave_remove(struct platform_device *pdev)
+{
+ return 0;
+}
+
+static struct of_device_id slave_of_match[] = {
+ { .compatible = "ti,tilcdc,slave", },
+ { },
+};
+MODULE_DEVICE_TABLE(of, slave_of_match);
+
+struct platform_driver slave_driver = {
+ .probe = slave_probe,
+ .remove = slave_remove,
+ .driver = {
+ .owner = THIS_MODULE,
+ .name = "slave",
+ .of_match_table = slave_of_match,
+ },
+};
+
+int __init tilcdc_slave_init(void)
+{
+ return platform_driver_register(&slave_driver);
+}
+
+void __exit tilcdc_slave_fini(void)
+{
+ platform_driver_unregister(&slave_driver);
+}
diff --git a/drivers/gpu/drm/tilcdc/tilcdc_slave.h b/drivers/gpu/drm/tilcdc/tilcdc_slave.h
new file mode 100644
index 000000000000..2f8504848320
--- /dev/null
+++ b/drivers/gpu/drm/tilcdc/tilcdc_slave.h
@@ -0,0 +1,26 @@
+/*
+ * Copyright (C) 2012 Texas Instruments
+ * Author: Rob Clark <robdclark@gmail.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#ifndef __TILCDC_SLAVE_H__
+#define __TILCDC_SLAVE_H__
+
+/* sub-module for i2c slave encoder output */
+
+int tilcdc_slave_init(void);
+void tilcdc_slave_fini(void);
+
+#endif /* __TILCDC_SLAVE_H__ */
diff --git a/drivers/gpu/drm/tilcdc/tilcdc_tfp410.c b/drivers/gpu/drm/tilcdc/tilcdc_tfp410.c
new file mode 100644
index 000000000000..58d487ba2414
--- /dev/null
+++ b/drivers/gpu/drm/tilcdc/tilcdc_tfp410.c
@@ -0,0 +1,419 @@
+/*
+ * Copyright (C) 2012 Texas Instruments
+ * Author: Rob Clark <robdclark@gmail.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include <linux/i2c.h>
+#include <linux/of_i2c.h>
+#include <linux/gpio.h>
+#include <linux/of_gpio.h>
+#include <linux/pinctrl/pinmux.h>
+#include <linux/pinctrl/consumer.h>
+
+#include "tilcdc_drv.h"
+
+struct tfp410_module {
+ struct tilcdc_module base;
+ struct i2c_adapter *i2c;
+ int gpio;
+};
+#define to_tfp410_module(x) container_of(x, struct tfp410_module, base)
+
+
+static const struct tilcdc_panel_info dvi_info = {
+ .ac_bias = 255,
+ .ac_bias_intrpt = 0,
+ .dma_burst_sz = 16,
+ .bpp = 16,
+ .fdd = 0x80,
+ .tft_alt_mode = 0,
+ .sync_edge = 0,
+ .sync_ctrl = 1,
+ .raster_order = 0,
+};
+
+/*
+ * Encoder:
+ */
+
+struct tfp410_encoder {
+ struct drm_encoder base;
+ struct tfp410_module *mod;
+ int dpms;
+};
+#define to_tfp410_encoder(x) container_of(x, struct tfp410_encoder, base)
+
+
+static void tfp410_encoder_destroy(struct drm_encoder *encoder)
+{
+ struct tfp410_encoder *tfp410_encoder = to_tfp410_encoder(encoder);
+ drm_encoder_cleanup(encoder);
+ kfree(tfp410_encoder);
+}
+
+static void tfp410_encoder_dpms(struct drm_encoder *encoder, int mode)
+{
+ struct tfp410_encoder *tfp410_encoder = to_tfp410_encoder(encoder);
+
+ if (tfp410_encoder->dpms == mode)
+ return;
+
+ if (mode == DRM_MODE_DPMS_ON) {
+ DBG("Power on");
+ gpio_direction_output(tfp410_encoder->mod->gpio, 1);
+ } else {
+ DBG("Power off");
+ gpio_direction_output(tfp410_encoder->mod->gpio, 0);
+ }
+
+ tfp410_encoder->dpms = mode;
+}
+
+static bool tfp410_encoder_mode_fixup(struct drm_encoder *encoder,
+ const struct drm_display_mode *mode,
+ struct drm_display_mode *adjusted_mode)
+{
+ /* nothing needed */
+ return true;
+}
+
+static void tfp410_encoder_prepare(struct drm_encoder *encoder)
+{
+ tfp410_encoder_dpms(encoder, DRM_MODE_DPMS_OFF);
+ tilcdc_crtc_set_panel_info(encoder->crtc, &dvi_info);
+}
+
+static void tfp410_encoder_commit(struct drm_encoder *encoder)
+{
+ tfp410_encoder_dpms(encoder, DRM_MODE_DPMS_ON);
+}
+
+static void tfp410_encoder_mode_set(struct drm_encoder *encoder,
+ struct drm_display_mode *mode,
+ struct drm_display_mode *adjusted_mode)
+{
+ /* nothing needed */
+}
+
+static const struct drm_encoder_funcs tfp410_encoder_funcs = {
+ .destroy = tfp410_encoder_destroy,
+};
+
+static const struct drm_encoder_helper_funcs tfp410_encoder_helper_funcs = {
+ .dpms = tfp410_encoder_dpms,
+ .mode_fixup = tfp410_encoder_mode_fixup,
+ .prepare = tfp410_encoder_prepare,
+ .commit = tfp410_encoder_commit,
+ .mode_set = tfp410_encoder_mode_set,
+};
+
+static struct drm_encoder *tfp410_encoder_create(struct drm_device *dev,
+ struct tfp410_module *mod)
+{
+ struct tfp410_encoder *tfp410_encoder;
+ struct drm_encoder *encoder;
+ int ret;
+
+ tfp410_encoder = kzalloc(sizeof(*tfp410_encoder), GFP_KERNEL);
+ if (!tfp410_encoder) {
+ dev_err(dev->dev, "allocation failed\n");
+ return NULL;
+ }
+
+ tfp410_encoder->dpms = DRM_MODE_DPMS_OFF;
+ tfp410_encoder->mod = mod;
+
+ encoder = &tfp410_encoder->base;
+ encoder->possible_crtcs = 1;
+
+ ret = drm_encoder_init(dev, encoder, &tfp410_encoder_funcs,
+ DRM_MODE_ENCODER_TMDS);
+ if (ret < 0)
+ goto fail;
+
+ drm_encoder_helper_add(encoder, &tfp410_encoder_helper_funcs);
+
+ return encoder;
+
+fail:
+ tfp410_encoder_destroy(encoder);
+ return NULL;
+}
+
+/*
+ * Connector:
+ */
+
+struct tfp410_connector {
+ struct drm_connector base;
+
+ struct drm_encoder *encoder; /* our connected encoder */
+ struct tfp410_module *mod;
+};
+#define to_tfp410_connector(x) container_of(x, struct tfp410_connector, base)
+
+
+static void tfp410_connector_destroy(struct drm_connector *connector)
+{
+ struct tfp410_connector *tfp410_connector = to_tfp410_connector(connector);
+ drm_connector_cleanup(connector);
+ kfree(tfp410_connector);
+}
+
+static enum drm_connector_status tfp410_connector_detect(
+ struct drm_connector *connector,
+ bool force)
+{
+ struct tfp410_connector *tfp410_connector = to_tfp410_connector(connector);
+
+ if (drm_probe_ddc(tfp410_connector->mod->i2c))
+ return connector_status_connected;
+
+ return connector_status_unknown;
+}
+
+static int tfp410_connector_get_modes(struct drm_connector *connector)
+{
+ struct tfp410_connector *tfp410_connector = to_tfp410_connector(connector);
+ struct edid *edid;
+ int ret = 0;
+
+ edid = drm_get_edid(connector, tfp410_connector->mod->i2c);
+
+ drm_mode_connector_update_edid_property(connector, edid);
+
+ if (edid) {
+ ret = drm_add_edid_modes(connector, edid);
+ kfree(edid);
+ }
+
+ return ret;
+}
+
+static int tfp410_connector_mode_valid(struct drm_connector *connector,
+ struct drm_display_mode *mode)
+{
+ struct tilcdc_drm_private *priv = connector->dev->dev_private;
+ /* our only constraints are what the crtc can generate: */
+ return tilcdc_crtc_mode_valid(priv->crtc, mode);
+}
+
+static struct drm_encoder *tfp410_connector_best_encoder(
+ struct drm_connector *connector)
+{
+ struct tfp410_connector *tfp410_connector = to_tfp410_connector(connector);
+ return tfp410_connector->encoder;
+}
+
+static const struct drm_connector_funcs tfp410_connector_funcs = {
+ .destroy = tfp410_connector_destroy,
+ .dpms = drm_helper_connector_dpms,
+ .detect = tfp410_connector_detect,
+ .fill_modes = drm_helper_probe_single_connector_modes,
+};
+
+static const struct drm_connector_helper_funcs tfp410_connector_helper_funcs = {
+ .get_modes = tfp410_connector_get_modes,
+ .mode_valid = tfp410_connector_mode_valid,
+ .best_encoder = tfp410_connector_best_encoder,
+};
+
+static struct drm_connector *tfp410_connector_create(struct drm_device *dev,
+ struct tfp410_module *mod, struct drm_encoder *encoder)
+{
+ struct tfp410_connector *tfp410_connector;
+ struct drm_connector *connector;
+ int ret;
+
+ tfp410_connector = kzalloc(sizeof(*tfp410_connector), GFP_KERNEL);
+ if (!tfp410_connector) {
+ dev_err(dev->dev, "allocation failed\n");
+ return NULL;
+ }
+
+ tfp410_connector->encoder = encoder;
+ tfp410_connector->mod = mod;
+
+ connector = &tfp410_connector->base;
+
+ drm_connector_init(dev, connector, &tfp410_connector_funcs,
+ DRM_MODE_CONNECTOR_DVID);
+ drm_connector_helper_add(connector, &tfp410_connector_helper_funcs);
+
+ connector->polled = DRM_CONNECTOR_POLL_CONNECT |
+ DRM_CONNECTOR_POLL_DISCONNECT;
+
+ connector->interlace_allowed = 0;
+ connector->doublescan_allowed = 0;
+
+ ret = drm_mode_connector_attach_encoder(connector, encoder);
+ if (ret)
+ goto fail;
+
+ drm_sysfs_connector_add(connector);
+
+ return connector;
+
+fail:
+ tfp410_connector_destroy(connector);
+ return NULL;
+}
+
+/*
+ * Module:
+ */
+
+static int tfp410_modeset_init(struct tilcdc_module *mod, struct drm_device *dev)
+{
+ struct tfp410_module *tfp410_mod = to_tfp410_module(mod);
+ struct tilcdc_drm_private *priv = dev->dev_private;
+ struct drm_encoder *encoder;
+ struct drm_connector *connector;
+
+ encoder = tfp410_encoder_create(dev, tfp410_mod);
+ if (!encoder)
+ return -ENOMEM;
+
+ connector = tfp410_connector_create(dev, tfp410_mod, encoder);
+ if (!connector)
+ return -ENOMEM;
+
+ priv->encoders[priv->num_encoders++] = encoder;
+ priv->connectors[priv->num_connectors++] = connector;
+
+ return 0;
+}
+
+static void tfp410_destroy(struct tilcdc_module *mod)
+{
+ struct tfp410_module *tfp410_mod = to_tfp410_module(mod);
+
+ if (tfp410_mod->i2c)
+ i2c_put_adapter(tfp410_mod->i2c);
+
+ if (!IS_ERR_VALUE(tfp410_mod->gpio))
+ gpio_free(tfp410_mod->gpio);
+
+ tilcdc_module_cleanup(mod);
+ kfree(tfp410_mod);
+}
+
+static const struct tilcdc_module_ops tfp410_module_ops = {
+ .modeset_init = tfp410_modeset_init,
+ .destroy = tfp410_destroy,
+};
+
+/*
+ * Device:
+ */
+
+static struct of_device_id tfp410_of_match[];
+
+static int tfp410_probe(struct platform_device *pdev)
+{
+ struct device_node *node = pdev->dev.of_node;
+ struct device_node *i2c_node;
+ struct tfp410_module *tfp410_mod;
+ struct tilcdc_module *mod;
+ struct pinctrl *pinctrl;
+ uint32_t i2c_phandle;
+ int ret = -EINVAL;
+
+ /* bail out early if no DT data: */
+ if (!node) {
+ dev_err(&pdev->dev, "device-tree data is missing\n");
+ return -ENXIO;
+ }
+
+ tfp410_mod = kzalloc(sizeof(*tfp410_mod), GFP_KERNEL);
+ if (!tfp410_mod)
+ return -ENOMEM;
+
+ mod = &tfp410_mod->base;
+
+ tilcdc_module_init(mod, "tfp410", &tfp410_module_ops);
+
+ pinctrl = devm_pinctrl_get_select_default(&pdev->dev);
+ if (IS_ERR(pinctrl))
+ dev_warn(&pdev->dev, "pins are not configured\n");
+
+ if (of_property_read_u32(node, "i2c", &i2c_phandle)) {
+ dev_err(&pdev->dev, "could not get i2c bus phandle\n");
+ goto fail;
+ }
+
+ i2c_node = of_find_node_by_phandle(i2c_phandle);
+ if (!i2c_node) {
+ dev_err(&pdev->dev, "could not get i2c bus node\n");
+ goto fail;
+ }
+
+ tfp410_mod->i2c = of_find_i2c_adapter_by_node(i2c_node);
+ if (!tfp410_mod->i2c) {
+ dev_err(&pdev->dev, "could not get i2c\n");
+ goto fail;
+ }
+
+ of_node_put(i2c_node);
+
+ tfp410_mod->gpio = of_get_named_gpio_flags(node, "powerdn-gpio",
+ 0, NULL);
+ if (IS_ERR_VALUE(tfp410_mod->gpio)) {
+ dev_warn(&pdev->dev, "No power down GPIO\n");
+ } else {
+ ret = gpio_request(tfp410_mod->gpio, "DVI_PDn");
+ if (ret) {
+ dev_err(&pdev->dev, "could not get DVI_PDn gpio\n");
+ goto fail;
+ }
+ }
+
+ return 0;
+
+fail:
+ tfp410_destroy(mod);
+ return ret;
+}
+
+static int tfp410_remove(struct platform_device *pdev)
+{
+ return 0;
+}
+
+static struct of_device_id tfp410_of_match[] = {
+ { .compatible = "ti,tilcdc,tfp410", },
+ { },
+};
+MODULE_DEVICE_TABLE(of, tfp410_of_match);
+
+struct platform_driver tfp410_driver = {
+ .probe = tfp410_probe,
+ .remove = tfp410_remove,
+ .driver = {
+ .owner = THIS_MODULE,
+ .name = "tfp410",
+ .of_match_table = tfp410_of_match,
+ },
+};
+
+int __init tilcdc_tfp410_init(void)
+{
+ return platform_driver_register(&tfp410_driver);
+}
+
+void __exit tilcdc_tfp410_fini(void)
+{
+ platform_driver_unregister(&tfp410_driver);
+}
diff --git a/drivers/gpu/drm/tilcdc/tilcdc_tfp410.h b/drivers/gpu/drm/tilcdc/tilcdc_tfp410.h
new file mode 100644
index 000000000000..5b800f1f6aa5
--- /dev/null
+++ b/drivers/gpu/drm/tilcdc/tilcdc_tfp410.h
@@ -0,0 +1,26 @@
+/*
+ * Copyright (C) 2012 Texas Instruments
+ * Author: Rob Clark <robdclark@gmail.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#ifndef __TILCDC_TFP410_H__
+#define __TILCDC_TFP410_H__
+
+/* sub-module for tfp410 dvi adaptor */
+
+int tilcdc_tfp410_init(void);
+void tilcdc_tfp410_fini(void);
+
+#endif /* __TILCDC_TFP410_H__ */
diff --git a/drivers/gpu/drm/ttm/ttm_bo.c b/drivers/gpu/drm/ttm/ttm_bo.c
index 52b20b12c83a..9b07b7d44a58 100644
--- a/drivers/gpu/drm/ttm/ttm_bo.c
+++ b/drivers/gpu/drm/ttm/ttm_bo.c
@@ -158,7 +158,8 @@ static void ttm_bo_release_list(struct kref *list_kref)
ttm_mem_global_free(bdev->glob->mem_glob, acc_size);
}
-int ttm_bo_wait_unreserved(struct ttm_buffer_object *bo, bool interruptible)
+static int ttm_bo_wait_unreserved(struct ttm_buffer_object *bo,
+ bool interruptible)
{
if (interruptible) {
return wait_event_interruptible(bo->event_queue,
@@ -168,7 +169,6 @@ int ttm_bo_wait_unreserved(struct ttm_buffer_object *bo, bool interruptible)
return 0;
}
}
-EXPORT_SYMBOL(ttm_bo_wait_unreserved);
void ttm_bo_add_to_lru(struct ttm_buffer_object *bo)
{
@@ -213,14 +213,13 @@ int ttm_bo_del_from_lru(struct ttm_buffer_object *bo)
return put_count;
}
-int ttm_bo_reserve_locked(struct ttm_buffer_object *bo,
+int ttm_bo_reserve_nolru(struct ttm_buffer_object *bo,
bool interruptible,
bool no_wait, bool use_sequence, uint32_t sequence)
{
- struct ttm_bo_global *glob = bo->glob;
int ret;
- while (unlikely(atomic_read(&bo->reserved) != 0)) {
+ while (unlikely(atomic_xchg(&bo->reserved, 1) != 0)) {
/**
* Deadlock avoidance for multi-bo reserving.
*/
@@ -241,26 +240,36 @@ int ttm_bo_reserve_locked(struct ttm_buffer_object *bo,
if (no_wait)
return -EBUSY;
- spin_unlock(&glob->lru_lock);
ret = ttm_bo_wait_unreserved(bo, interruptible);
- spin_lock(&glob->lru_lock);
if (unlikely(ret))
return ret;
}
- atomic_set(&bo->reserved, 1);
if (use_sequence) {
+ bool wake_up = false;
/**
* Wake up waiters that may need to recheck for deadlock,
* if we decreased the sequence number.
*/
if (unlikely((bo->val_seq - sequence < (1 << 31))
|| !bo->seq_valid))
- wake_up_all(&bo->event_queue);
+ wake_up = true;
+ /*
+ * In the worst case with memory ordering these values can be
+ * seen in the wrong order. However since we call wake_up_all
+ * in that case, this will hopefully not pose a problem,
+ * and the worst case would only cause someone to accidentally
+ * hit -EAGAIN in ttm_bo_reserve when they see old value of
+ * val_seq. However this would only happen if seq_valid was
+ * written before val_seq was, and just means some slightly
+ * increased cpu usage
+ */
bo->val_seq = sequence;
bo->seq_valid = true;
+ if (wake_up)
+ wake_up_all(&bo->event_queue);
} else {
bo->seq_valid = false;
}
@@ -289,17 +298,64 @@ int ttm_bo_reserve(struct ttm_buffer_object *bo,
int put_count = 0;
int ret;
- spin_lock(&glob->lru_lock);
- ret = ttm_bo_reserve_locked(bo, interruptible, no_wait, use_sequence,
- sequence);
- if (likely(ret == 0))
+ ret = ttm_bo_reserve_nolru(bo, interruptible, no_wait, use_sequence,
+ sequence);
+ if (likely(ret == 0)) {
+ spin_lock(&glob->lru_lock);
put_count = ttm_bo_del_from_lru(bo);
- spin_unlock(&glob->lru_lock);
+ spin_unlock(&glob->lru_lock);
+ ttm_bo_list_ref_sub(bo, put_count, true);
+ }
- ttm_bo_list_ref_sub(bo, put_count, true);
+ return ret;
+}
+
+int ttm_bo_reserve_slowpath_nolru(struct ttm_buffer_object *bo,
+ bool interruptible, uint32_t sequence)
+{
+ bool wake_up = false;
+ int ret;
+
+ while (unlikely(atomic_xchg(&bo->reserved, 1) != 0)) {
+ WARN_ON(bo->seq_valid && sequence == bo->val_seq);
+
+ ret = ttm_bo_wait_unreserved(bo, interruptible);
+ if (unlikely(ret))
+ return ret;
+ }
+
+ if ((bo->val_seq - sequence < (1 << 31)) || !bo->seq_valid)
+ wake_up = true;
+
+ /**
+ * Wake up waiters that may need to recheck for deadlock,
+ * if we decreased the sequence number.
+ */
+ bo->val_seq = sequence;
+ bo->seq_valid = true;
+ if (wake_up)
+ wake_up_all(&bo->event_queue);
+
+ return 0;
+}
+
+int ttm_bo_reserve_slowpath(struct ttm_buffer_object *bo,
+ bool interruptible, uint32_t sequence)
+{
+ struct ttm_bo_global *glob = bo->glob;
+ int put_count, ret;
+
+ ret = ttm_bo_reserve_slowpath_nolru(bo, interruptible, sequence);
+ if (likely(!ret)) {
+ spin_lock(&glob->lru_lock);
+ put_count = ttm_bo_del_from_lru(bo);
+ spin_unlock(&glob->lru_lock);
+ ttm_bo_list_ref_sub(bo, put_count, true);
+ }
return ret;
}
+EXPORT_SYMBOL(ttm_bo_reserve_slowpath);
void ttm_bo_unreserve_locked(struct ttm_buffer_object *bo)
{
@@ -511,7 +567,7 @@ static void ttm_bo_cleanup_refs_or_queue(struct ttm_buffer_object *bo)
int ret;
spin_lock(&glob->lru_lock);
- ret = ttm_bo_reserve_locked(bo, false, true, false, 0);
+ ret = ttm_bo_reserve_nolru(bo, false, true, false, 0);
spin_lock(&bdev->fence_lock);
(void) ttm_bo_wait(bo, false, false, true);
@@ -604,7 +660,7 @@ static int ttm_bo_cleanup_refs_and_unlock(struct ttm_buffer_object *bo,
return ret;
spin_lock(&glob->lru_lock);
- ret = ttm_bo_reserve_locked(bo, false, true, false, 0);
+ ret = ttm_bo_reserve_nolru(bo, false, true, false, 0);
/*
* We raced, and lost, someone else holds the reservation now,
@@ -668,7 +724,14 @@ static int ttm_bo_delayed_delete(struct ttm_bo_device *bdev, bool remove_all)
kref_get(&nentry->list_kref);
}
- ret = ttm_bo_reserve_locked(entry, false, !remove_all, false, 0);
+ ret = ttm_bo_reserve_nolru(entry, false, true, false, 0);
+ if (remove_all && ret) {
+ spin_unlock(&glob->lru_lock);
+ ret = ttm_bo_reserve_nolru(entry, false, false,
+ false, 0);
+ spin_lock(&glob->lru_lock);
+ }
+
if (!ret)
ret = ttm_bo_cleanup_refs_and_unlock(entry, false,
!remove_all);
@@ -816,7 +879,7 @@ static int ttm_mem_evict_first(struct ttm_bo_device *bdev,
spin_lock(&glob->lru_lock);
list_for_each_entry(bo, &man->lru, lru) {
- ret = ttm_bo_reserve_locked(bo, false, true, false, 0);
+ ret = ttm_bo_reserve_nolru(bo, false, true, false, 0);
if (!ret)
break;
}
@@ -1797,7 +1860,7 @@ static int ttm_bo_swapout(struct ttm_mem_shrink *shrink)
spin_lock(&glob->lru_lock);
list_for_each_entry(bo, &glob->swap_lru, swap) {
- ret = ttm_bo_reserve_locked(bo, false, true, false, 0);
+ ret = ttm_bo_reserve_nolru(bo, false, true, false, 0);
if (!ret)
break;
}
diff --git a/drivers/gpu/drm/ttm/ttm_execbuf_util.c b/drivers/gpu/drm/ttm/ttm_execbuf_util.c
index cd9e4523dc56..7b90def15674 100644
--- a/drivers/gpu/drm/ttm/ttm_execbuf_util.c
+++ b/drivers/gpu/drm/ttm/ttm_execbuf_util.c
@@ -82,22 +82,6 @@ static void ttm_eu_list_ref_sub(struct list_head *list)
}
}
-static int ttm_eu_wait_unreserved_locked(struct list_head *list,
- struct ttm_buffer_object *bo)
-{
- struct ttm_bo_global *glob = bo->glob;
- int ret;
-
- ttm_eu_del_from_lru_locked(list);
- spin_unlock(&glob->lru_lock);
- ret = ttm_bo_wait_unreserved(bo, true);
- spin_lock(&glob->lru_lock);
- if (unlikely(ret != 0))
- ttm_eu_backoff_reservation_locked(list);
- return ret;
-}
-
-
void ttm_eu_backoff_reservation(struct list_head *list)
{
struct ttm_validate_buffer *entry;
@@ -145,47 +129,65 @@ int ttm_eu_reserve_buffers(struct list_head *list)
entry = list_first_entry(list, struct ttm_validate_buffer, head);
glob = entry->bo->glob;
-retry:
spin_lock(&glob->lru_lock);
val_seq = entry->bo->bdev->val_seq++;
+retry:
list_for_each_entry(entry, list, head) {
struct ttm_buffer_object *bo = entry->bo;
-retry_this_bo:
- ret = ttm_bo_reserve_locked(bo, true, true, true, val_seq);
+ /* already slowpath reserved? */
+ if (entry->reserved)
+ continue;
+
+ ret = ttm_bo_reserve_nolru(bo, true, true, true, val_seq);
switch (ret) {
case 0:
break;
case -EBUSY:
- ret = ttm_eu_wait_unreserved_locked(list, bo);
- if (unlikely(ret != 0)) {
- spin_unlock(&glob->lru_lock);
- ttm_eu_list_ref_sub(list);
- return ret;
- }
- goto retry_this_bo;
+ ttm_eu_del_from_lru_locked(list);
+ spin_unlock(&glob->lru_lock);
+ ret = ttm_bo_reserve_nolru(bo, true, false,
+ true, val_seq);
+ spin_lock(&glob->lru_lock);
+ if (!ret)
+ break;
+
+ if (unlikely(ret != -EAGAIN))
+ goto err;
+
+ /* fallthrough */
case -EAGAIN:
ttm_eu_backoff_reservation_locked(list);
+
+ /*
+ * temporarily increase sequence number every retry,
+ * to prevent us from seeing our old reservation
+ * sequence when someone else reserved the buffer,
+ * but hasn't updated the seq_valid/seqno members yet.
+ */
+ val_seq = entry->bo->bdev->val_seq++;
+
spin_unlock(&glob->lru_lock);
ttm_eu_list_ref_sub(list);
- ret = ttm_bo_wait_unreserved(bo, true);
+ ret = ttm_bo_reserve_slowpath_nolru(bo, true, val_seq);
if (unlikely(ret != 0))
return ret;
+ spin_lock(&glob->lru_lock);
+ entry->reserved = true;
+ if (unlikely(atomic_read(&bo->cpu_writers) > 0)) {
+ ret = -EBUSY;
+ goto err;
+ }
goto retry;
default:
- ttm_eu_backoff_reservation_locked(list);
- spin_unlock(&glob->lru_lock);
- ttm_eu_list_ref_sub(list);
- return ret;
+ goto err;
}
entry->reserved = true;
if (unlikely(atomic_read(&bo->cpu_writers) > 0)) {
- ttm_eu_backoff_reservation_locked(list);
- spin_unlock(&glob->lru_lock);
- ttm_eu_list_ref_sub(list);
- return -EBUSY;
+ ret = -EBUSY;
+ goto err;
}
}
@@ -194,6 +196,12 @@ retry_this_bo:
ttm_eu_list_ref_sub(list);
return 0;
+
+err:
+ ttm_eu_backoff_reservation_locked(list);
+ spin_unlock(&glob->lru_lock);
+ ttm_eu_list_ref_sub(list);
+ return ret;
}
EXPORT_SYMBOL(ttm_eu_reserve_buffers);
diff --git a/drivers/gpu/drm/ttm/ttm_tt.c b/drivers/gpu/drm/ttm/ttm_tt.c
index 7d759a430294..5e93a52d4f2c 100644
--- a/drivers/gpu/drm/ttm/ttm_tt.c
+++ b/drivers/gpu/drm/ttm/ttm_tt.c
@@ -296,7 +296,7 @@ int ttm_tt_swapin(struct ttm_tt *ttm)
swap_storage = ttm->swap_storage;
BUG_ON(swap_storage == NULL);
- swap_space = swap_storage->f_path.dentry->d_inode->i_mapping;
+ swap_space = file_inode(swap_storage)->i_mapping;
for (i = 0; i < ttm->num_pages; ++i) {
from_page = shmem_read_mapping_page(swap_space, i);
@@ -345,7 +345,7 @@ int ttm_tt_swapout(struct ttm_tt *ttm, struct file *persistent_swap_storage)
} else
swap_storage = persistent_swap_storage;
- swap_space = swap_storage->f_path.dentry->d_inode->i_mapping;
+ swap_space = file_inode(swap_storage)->i_mapping;
for (i = 0; i < ttm->num_pages; ++i) {
from_page = ttm->pages[i];
diff --git a/drivers/gpu/drm/udl/udl_drv.h b/drivers/gpu/drm/udl/udl_drv.h
index 87aa5f5d3c88..cc6d90f28c71 100644
--- a/drivers/gpu/drm/udl/udl_drv.h
+++ b/drivers/gpu/drm/udl/udl_drv.h
@@ -75,6 +75,8 @@ struct udl_framebuffer {
struct drm_framebuffer base;
struct udl_gem_object *obj;
bool active_16; /* active on the 16-bit channel */
+ int x1, y1, x2, y2; /* dirty rect */
+ spinlock_t dirty_lock;
};
#define to_udl_fb(x) container_of(x, struct udl_framebuffer, base)
diff --git a/drivers/gpu/drm/udl/udl_fb.c b/drivers/gpu/drm/udl/udl_fb.c
index d4ab3beaada0..9f4be3d4a02e 100644
--- a/drivers/gpu/drm/udl/udl_fb.c
+++ b/drivers/gpu/drm/udl/udl_fb.c
@@ -22,9 +22,9 @@
#include <drm/drm_fb_helper.h>
-#define DL_DEFIO_WRITE_DELAY 5 /* fb_deferred_io.delay in jiffies */
+#define DL_DEFIO_WRITE_DELAY (HZ/20) /* fb_deferred_io.delay in jiffies */
-static int fb_defio = 1; /* Optionally enable experimental fb_defio mmap support */
+static int fb_defio = 0; /* Optionally enable experimental fb_defio mmap support */
static int fb_bpp = 16;
module_param(fb_bpp, int, S_IWUSR | S_IRUSR | S_IWGRP | S_IRGRP);
@@ -153,6 +153,9 @@ int udl_handle_damage(struct udl_framebuffer *fb, int x, int y,
struct urb *urb;
int aligned_x;
int bpp = (fb->base.bits_per_pixel / 8);
+ int x2, y2;
+ bool store_for_later = false;
+ unsigned long flags;
if (!fb->active_16)
return 0;
@@ -169,8 +172,6 @@ int udl_handle_damage(struct udl_framebuffer *fb, int x, int y,
}
}
- start_cycles = get_cycles();
-
aligned_x = DL_ALIGN_DOWN(x, sizeof(unsigned long));
width = DL_ALIGN_UP(width + (x-aligned_x), sizeof(unsigned long));
x = aligned_x;
@@ -180,19 +181,53 @@ int udl_handle_damage(struct udl_framebuffer *fb, int x, int y,
(y + height > fb->base.height))
return -EINVAL;
+ /* if we are in atomic just store the info
+ can't test inside spin lock */
+ if (in_atomic())
+ store_for_later = true;
+
+ x2 = x + width - 1;
+ y2 = y + height - 1;
+
+ spin_lock_irqsave(&fb->dirty_lock, flags);
+
+ if (fb->y1 < y)
+ y = fb->y1;
+ if (fb->y2 > y2)
+ y2 = fb->y2;
+ if (fb->x1 < x)
+ x = fb->x1;
+ if (fb->x2 > x2)
+ x2 = fb->x2;
+
+ if (store_for_later) {
+ fb->x1 = x;
+ fb->x2 = x2;
+ fb->y1 = y;
+ fb->y2 = y2;
+ spin_unlock_irqrestore(&fb->dirty_lock, flags);
+ return 0;
+ }
+
+ fb->x1 = fb->y1 = INT_MAX;
+ fb->x2 = fb->y2 = 0;
+
+ spin_unlock_irqrestore(&fb->dirty_lock, flags);
+ start_cycles = get_cycles();
+
urb = udl_get_urb(dev);
if (!urb)
return 0;
cmd = urb->transfer_buffer;
- for (i = y; i < y + height ; i++) {
+ for (i = y; i <= y2 ; i++) {
const int line_offset = fb->base.pitches[0] * i;
const int byte_offset = line_offset + (x * bpp);
const int dev_byte_offset = (fb->base.width * bpp * i) + (x * bpp);
if (udl_render_hline(dev, bpp, &urb,
(char *) fb->obj->vmapping,
&cmd, byte_offset, dev_byte_offset,
- width * bpp,
+ (x2 - x + 1) * bpp,
&bytes_identical, &bytes_sent))
goto error;
}
@@ -422,7 +457,6 @@ static void udl_user_framebuffer_destroy(struct drm_framebuffer *fb)
static const struct drm_framebuffer_funcs udlfb_funcs = {
.destroy = udl_user_framebuffer_destroy,
.dirty = udl_user_framebuffer_dirty,
- .create_handle = NULL,
};
@@ -434,16 +468,18 @@ udl_framebuffer_init(struct drm_device *dev,
{
int ret;
+ spin_lock_init(&ufb->dirty_lock);
ufb->obj = obj;
- ret = drm_framebuffer_init(dev, &ufb->base, &udlfb_funcs);
drm_helper_mode_fill_fb_struct(&ufb->base, mode_cmd);
+ ret = drm_framebuffer_init(dev, &ufb->base, &udlfb_funcs);
return ret;
}
-static int udlfb_create(struct udl_fbdev *ufbdev,
+static int udlfb_create(struct drm_fb_helper *helper,
struct drm_fb_helper_surface_size *sizes)
{
+ struct udl_fbdev *ufbdev = (struct udl_fbdev *)helper;
struct drm_device *dev = ufbdev->helper.dev;
struct fb_info *info;
struct device *device = &dev->usbdev->dev;
@@ -521,27 +557,10 @@ out:
return ret;
}
-static int udl_fb_find_or_create_single(struct drm_fb_helper *helper,
- struct drm_fb_helper_surface_size *sizes)
-{
- struct udl_fbdev *ufbdev = (struct udl_fbdev *)helper;
- int new_fb = 0;
- int ret;
-
- if (!helper->fb) {
- ret = udlfb_create(ufbdev, sizes);
- if (ret)
- return ret;
-
- new_fb = 1;
- }
- return new_fb;
-}
-
static struct drm_fb_helper_funcs udl_fb_helper_funcs = {
.gamma_set = udl_crtc_fb_gamma_set,
.gamma_get = udl_crtc_fb_gamma_get,
- .fb_probe = udl_fb_find_or_create_single,
+ .fb_probe = udlfb_create,
};
static void udl_fbdev_destroy(struct drm_device *dev,
@@ -556,6 +575,7 @@ static void udl_fbdev_destroy(struct drm_device *dev,
framebuffer_release(info);
}
drm_fb_helper_fini(&ufbdev->helper);
+ drm_framebuffer_unregister_private(&ufbdev->ufb.base);
drm_framebuffer_cleanup(&ufbdev->ufb.base);
drm_gem_object_unreference_unlocked(&ufbdev->ufb.obj->base);
}
@@ -583,6 +603,10 @@ int udl_fbdev_init(struct drm_device *dev)
}
drm_fb_helper_single_add_all_connectors(&ufbdev->helper);
+
+ /* disable all the possible outputs/crtcs before entering KMS mode */
+ drm_helper_disable_unused_functions(dev);
+
drm_fb_helper_initial_config(&ufbdev->helper, bpp_sel);
return 0;
}
diff --git a/drivers/gpu/drm/udl/udl_gem.c b/drivers/gpu/drm/udl/udl_gem.c
index afd212c99216..3816270ba49b 100644
--- a/drivers/gpu/drm/udl/udl_gem.c
+++ b/drivers/gpu/drm/udl/udl_gem.c
@@ -137,7 +137,7 @@ static int udl_gem_get_pages(struct udl_gem_object *obj, gfp_t gfpmask)
if (obj->pages == NULL)
return -ENOMEM;
- inode = obj->base.filp->f_path.dentry->d_inode;
+ inode = file_inode(obj->base.filp);
mapping = inode->i_mapping;
gfpmask |= mapping_gfp_mask(mapping);
diff --git a/drivers/gpu/drm/udl/udl_transfer.c b/drivers/gpu/drm/udl/udl_transfer.c
index 142fee5f983f..f343db73e095 100644
--- a/drivers/gpu/drm/udl/udl_transfer.c
+++ b/drivers/gpu/drm/udl/udl_transfer.c
@@ -75,15 +75,19 @@ static int udl_trim_hline(const u8 *bback, const u8 **bfront, int *width_bytes)
}
#endif
-static inline u16 pixel32_to_be16p(const uint8_t *pixel)
+static inline u16 pixel32_to_be16(const uint32_t pixel)
{
- uint32_t pix = *(uint32_t *)pixel;
- u16 retval;
+ return (((pixel >> 3) & 0x001f) |
+ ((pixel >> 5) & 0x07e0) |
+ ((pixel >> 8) & 0xf800));
+}
- retval = (((pix >> 3) & 0x001f) |
- ((pix >> 5) & 0x07e0) |
- ((pix >> 8) & 0xf800));
- return retval;
+static bool pixel_repeats(const void *pixel, const uint32_t repeat, int bpp)
+{
+ if (bpp == 2)
+ return *(const uint16_t *)pixel == repeat;
+ else
+ return *(const uint32_t *)pixel == repeat;
}
/*
@@ -152,29 +156,33 @@ static void udl_compress_hline16(
prefetch_range((void *) pixel, (cmd_pixel_end - pixel) * bpp);
while (pixel < cmd_pixel_end) {
- const u8 * const repeating_pixel = pixel;
-
- if (bpp == 2)
- *(uint16_t *)cmd = cpu_to_be16p((uint16_t *)pixel);
- else if (bpp == 4)
- *(uint16_t *)cmd = cpu_to_be16(pixel32_to_be16p(pixel));
+ const u8 *const start = pixel;
+ u32 repeating_pixel;
+
+ if (bpp == 2) {
+ repeating_pixel = *(uint16_t *)pixel;
+ *(uint16_t *)cmd = cpu_to_be16(repeating_pixel);
+ } else {
+ repeating_pixel = *(uint32_t *)pixel;
+ *(uint16_t *)cmd = cpu_to_be16(pixel32_to_be16(repeating_pixel));
+ }
cmd += 2;
pixel += bpp;
if (unlikely((pixel < cmd_pixel_end) &&
- (!memcmp(pixel, repeating_pixel, bpp)))) {
+ (pixel_repeats(pixel, repeating_pixel, bpp)))) {
/* go back and fill in raw pixel count */
- *raw_pixels_count_byte = (((repeating_pixel -
+ *raw_pixels_count_byte = (((start -
raw_pixel_start) / bpp) + 1) & 0xFF;
- while ((pixel < cmd_pixel_end)
- && (!memcmp(pixel, repeating_pixel, bpp))) {
+ while ((pixel < cmd_pixel_end) &&
+ (pixel_repeats(pixel, repeating_pixel, bpp))) {
pixel += bpp;
}
/* immediately after raw data is repeat byte */
- *cmd++ = (((pixel - repeating_pixel) / bpp) - 1) & 0xFF;
+ *cmd++ = (((pixel - start) / bpp) - 1) & 0xFF;
/* Then start another raw pixel span */
raw_pixel_start = pixel;
@@ -223,6 +231,8 @@ int udl_render_hline(struct drm_device *dev, int bpp, struct urb **urb_ptr,
u8 *cmd = *urb_buf_ptr;
u8 *cmd_end = (u8 *) urb->transfer_buffer + urb->transfer_buffer_length;
+ BUG_ON(!(bpp == 2 || bpp == 4));
+
line_start = (u8 *) (front + byte_offset);
next_pixel = line_start;
line_end = next_pixel + byte_width;
diff --git a/drivers/gpu/drm/via/via_map.c b/drivers/gpu/drm/via/via_map.c
index c0f1cc7f5ca9..d0ab3fb32acd 100644
--- a/drivers/gpu/drm/via/via_map.c
+++ b/drivers/gpu/drm/via/via_map.c
@@ -120,7 +120,6 @@ int via_driver_unload(struct drm_device *dev)
{
drm_via_private_t *dev_priv = dev->dev_private;
- idr_remove_all(&dev_priv->object_idr);
idr_destroy(&dev_priv->object_idr);
kfree(dev_priv);
diff --git a/drivers/gpu/drm/via/via_mm.c b/drivers/gpu/drm/via/via_mm.c
index 0d55432e02a2..0ab93ff09873 100644
--- a/drivers/gpu/drm/via/via_mm.c
+++ b/drivers/gpu/drm/via/via_mm.c
@@ -148,17 +148,10 @@ int via_mem_alloc(struct drm_device *dev, void *data,
if (retval)
goto fail_alloc;
-again:
- if (idr_pre_get(&dev_priv->object_idr, GFP_KERNEL) == 0) {
- retval = -ENOMEM;
- goto fail_idr;
- }
-
- retval = idr_get_new_above(&dev_priv->object_idr, item, 1, &user_key);
- if (retval == -EAGAIN)
- goto again;
- if (retval)
+ retval = idr_alloc(&dev_priv->object_idr, item, 1, 0, GFP_KERNEL);
+ if (retval < 0)
goto fail_idr;
+ user_key = retval;
list_add(&item->owner_list, &file_priv->obj_list);
mutex_unlock(&dev->struct_mutex);
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
index 161f8b2549aa..07dfd823cc30 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
@@ -829,7 +829,7 @@ static void vmw_lastclose(struct drm_device *dev)
list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
set.crtc = crtc;
- ret = crtc->funcs->set_config(&set);
+ ret = drm_mode_set_config_internal(&set);
WARN_ON(ret != 0);
}
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_ioctl.c b/drivers/gpu/drm/vmwgfx/vmwgfx_ioctl.c
index d9fbbe191071..c509d40c4897 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_ioctl.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_ioctl.c
@@ -131,7 +131,7 @@ int vmw_present_ioctl(struct drm_device *dev, void *data,
struct vmw_master *vmaster = vmw_master(file_priv->master);
struct drm_vmw_rect __user *clips_ptr;
struct drm_vmw_rect *clips = NULL;
- struct drm_mode_object *obj;
+ struct drm_framebuffer *fb;
struct vmw_framebuffer *vfb;
struct vmw_resource *res;
uint32_t num_clips;
@@ -163,19 +163,15 @@ int vmw_present_ioctl(struct drm_device *dev, void *data,
goto out_no_copy;
}
- ret = mutex_lock_interruptible(&dev->mode_config.mutex);
- if (unlikely(ret != 0)) {
- ret = -ERESTARTSYS;
- goto out_no_mode_mutex;
- }
+ drm_modeset_lock_all(dev);
- obj = drm_mode_object_find(dev, arg->fb_id, DRM_MODE_OBJECT_FB);
- if (!obj) {
+ fb = drm_framebuffer_lookup(dev, arg->fb_id);
+ if (!fb) {
DRM_ERROR("Invalid framebuffer id.\n");
ret = -EINVAL;
goto out_no_fb;
}
- vfb = vmw_framebuffer_to_vfb(obj_to_fb(obj));
+ vfb = vmw_framebuffer_to_vfb(fb);
ret = ttm_read_lock(&vmaster->lock, true);
if (unlikely(ret != 0))
@@ -199,9 +195,9 @@ int vmw_present_ioctl(struct drm_device *dev, void *data,
out_no_surface:
ttm_read_unlock(&vmaster->lock);
out_no_ttm_lock:
+ drm_framebuffer_unreference(fb);
out_no_fb:
- mutex_unlock(&dev->mode_config.mutex);
-out_no_mode_mutex:
+ drm_modeset_unlock_all(dev);
out_no_copy:
kfree(clips);
out_clips:
@@ -220,7 +216,7 @@ int vmw_present_readback_ioctl(struct drm_device *dev, void *data,
struct vmw_master *vmaster = vmw_master(file_priv->master);
struct drm_vmw_rect __user *clips_ptr;
struct drm_vmw_rect *clips = NULL;
- struct drm_mode_object *obj;
+ struct drm_framebuffer *fb;
struct vmw_framebuffer *vfb;
uint32_t num_clips;
int ret;
@@ -251,24 +247,20 @@ int vmw_present_readback_ioctl(struct drm_device *dev, void *data,
goto out_no_copy;
}
- ret = mutex_lock_interruptible(&dev->mode_config.mutex);
- if (unlikely(ret != 0)) {
- ret = -ERESTARTSYS;
- goto out_no_mode_mutex;
- }
+ drm_modeset_lock_all(dev);
- obj = drm_mode_object_find(dev, arg->fb_id, DRM_MODE_OBJECT_FB);
- if (!obj) {
+ fb = drm_framebuffer_lookup(dev, arg->fb_id);
+ if (!fb) {
DRM_ERROR("Invalid framebuffer id.\n");
ret = -EINVAL;
goto out_no_fb;
}
- vfb = vmw_framebuffer_to_vfb(obj_to_fb(obj));
+ vfb = vmw_framebuffer_to_vfb(fb);
if (!vfb->dmabuf) {
DRM_ERROR("Framebuffer not dmabuf backed.\n");
ret = -EINVAL;
- goto out_no_fb;
+ goto out_no_ttm_lock;
}
ret = ttm_read_lock(&vmaster->lock, true);
@@ -281,9 +273,9 @@ int vmw_present_readback_ioctl(struct drm_device *dev, void *data,
ttm_read_unlock(&vmaster->lock);
out_no_ttm_lock:
+ drm_framebuffer_unreference(fb);
out_no_fb:
- mutex_unlock(&dev->mode_config.mutex);
-out_no_mode_mutex:
+ drm_modeset_unlock_all(dev);
out_no_copy:
kfree(clips);
out_clips:
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
index 54743943d8b3..3e3c7ab33ca2 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
@@ -180,16 +180,29 @@ int vmw_du_crtc_cursor_set(struct drm_crtc *crtc, struct drm_file *file_priv,
struct vmw_dma_buffer *dmabuf = NULL;
int ret;
+ /*
+ * FIXME: Unclear whether there's any global state touched by the
+ * cursor_set function, especially vmw_cursor_update_position looks
+ * suspicious. For now take the easy route and reacquire all locks. We
+ * can do this since the caller in the drm core doesn't check anything
+ * which is protected by any looks.
+ */
+ mutex_unlock(&crtc->mutex);
+ drm_modeset_lock_all(dev_priv->dev);
+
/* A lot of the code assumes this */
- if (handle && (width != 64 || height != 64))
- return -EINVAL;
+ if (handle && (width != 64 || height != 64)) {
+ ret = -EINVAL;
+ goto out;
+ }
if (handle) {
ret = vmw_user_lookup_handle(dev_priv, tfile,
handle, &surface, &dmabuf);
if (ret) {
DRM_ERROR("failed to find surface or dmabuf: %i\n", ret);
- return -EINVAL;
+ ret = -EINVAL;
+ goto out;
}
}
@@ -197,7 +210,8 @@ int vmw_du_crtc_cursor_set(struct drm_crtc *crtc, struct drm_file *file_priv,
if (surface && !surface->snooper.image) {
DRM_ERROR("surface not suitable for cursor\n");
vmw_surface_unreference(&surface);
- return -EINVAL;
+ ret = -EINVAL;
+ goto out;
}
/* takedown old cursor */
@@ -225,14 +239,20 @@ int vmw_du_crtc_cursor_set(struct drm_crtc *crtc, struct drm_file *file_priv,
du->hotspot_x, du->hotspot_y);
} else {
vmw_cursor_update_position(dev_priv, false, 0, 0);
- return 0;
+ ret = 0;
+ goto out;
}
vmw_cursor_update_position(dev_priv, true,
du->cursor_x + du->hotspot_x,
du->cursor_y + du->hotspot_y);
- return 0;
+ ret = 0;
+out:
+ drm_modeset_unlock_all(dev_priv->dev);
+ mutex_lock(&crtc->mutex);
+
+ return ret;
}
int vmw_du_crtc_cursor_move(struct drm_crtc *crtc, int x, int y)
@@ -244,10 +264,23 @@ int vmw_du_crtc_cursor_move(struct drm_crtc *crtc, int x, int y)
du->cursor_x = x + crtc->x;
du->cursor_y = y + crtc->y;
+ /*
+ * FIXME: Unclear whether there's any global state touched by the
+ * cursor_set function, especially vmw_cursor_update_position looks
+ * suspicious. For now take the easy route and reacquire all locks. We
+ * can do this since the caller in the drm core doesn't check anything
+ * which is protected by any looks.
+ */
+ mutex_unlock(&crtc->mutex);
+ drm_modeset_lock_all(dev_priv->dev);
+
vmw_cursor_update_position(dev_priv, shown,
du->cursor_x + du->hotspot_x,
du->cursor_y + du->hotspot_y);
+ drm_modeset_unlock_all(dev_priv->dev);
+ mutex_lock(&crtc->mutex);
+
return 0;
}
@@ -373,16 +406,6 @@ void vmw_kms_cursor_post_execbuf(struct vmw_private *dev_priv)
* Generic framebuffer code
*/
-int vmw_framebuffer_create_handle(struct drm_framebuffer *fb,
- struct drm_file *file_priv,
- unsigned int *handle)
-{
- if (handle)
- *handle = 0;
-
- return 0;
-}
-
/*
* Surface framebuffer code
*/
@@ -610,7 +633,6 @@ int vmw_framebuffer_surface_dirty(struct drm_framebuffer *framebuffer,
static struct drm_framebuffer_funcs vmw_framebuffer_surface_funcs = {
.destroy = vmw_framebuffer_surface_destroy,
.dirty = vmw_framebuffer_surface_dirty,
- .create_handle = vmw_framebuffer_create_handle,
};
static int vmw_kms_new_framebuffer_surface(struct vmw_private *dev_priv,
@@ -681,14 +703,10 @@ static int vmw_kms_new_framebuffer_surface(struct vmw_private *dev_priv,
goto out_err1;
}
- ret = drm_framebuffer_init(dev, &vfbs->base.base,
- &vmw_framebuffer_surface_funcs);
- if (ret)
- goto out_err2;
-
if (!vmw_surface_reference(surface)) {
DRM_ERROR("failed to reference surface %p\n", surface);
- goto out_err3;
+ ret = -EINVAL;
+ goto out_err2;
}
/* XXX get the first 3 from the surface info */
@@ -707,10 +725,15 @@ static int vmw_kms_new_framebuffer_surface(struct vmw_private *dev_priv,
*out = &vfbs->base;
+ ret = drm_framebuffer_init(dev, &vfbs->base.base,
+ &vmw_framebuffer_surface_funcs);
+ if (ret)
+ goto out_err3;
+
return 0;
out_err3:
- drm_framebuffer_cleanup(&vfbs->base.base);
+ vmw_surface_unreference(&surface);
out_err2:
kfree(vfbs);
out_err1:
@@ -960,7 +983,6 @@ int vmw_framebuffer_dmabuf_dirty(struct drm_framebuffer *framebuffer,
static struct drm_framebuffer_funcs vmw_framebuffer_dmabuf_funcs = {
.destroy = vmw_framebuffer_dmabuf_destroy,
.dirty = vmw_framebuffer_dmabuf_dirty,
- .create_handle = vmw_framebuffer_create_handle,
};
/**
@@ -1053,14 +1075,10 @@ static int vmw_kms_new_framebuffer_dmabuf(struct vmw_private *dev_priv,
goto out_err1;
}
- ret = drm_framebuffer_init(dev, &vfbd->base.base,
- &vmw_framebuffer_dmabuf_funcs);
- if (ret)
- goto out_err2;
-
if (!vmw_dmabuf_reference(dmabuf)) {
DRM_ERROR("failed to reference dmabuf %p\n", dmabuf);
- goto out_err3;
+ ret = -EINVAL;
+ goto out_err2;
}
vfbd->base.base.bits_per_pixel = mode_cmd->bpp;
@@ -1077,10 +1095,15 @@ static int vmw_kms_new_framebuffer_dmabuf(struct vmw_private *dev_priv,
vfbd->base.user_handle = mode_cmd->handle;
*out = &vfbd->base;
+ ret = drm_framebuffer_init(dev, &vfbd->base.base,
+ &vmw_framebuffer_dmabuf_funcs);
+ if (ret)
+ goto out_err3;
+
return 0;
out_err3:
- drm_framebuffer_cleanup(&vfbd->base.base);
+ vmw_dmabuf_unreference(&dmabuf);
out_err2:
kfree(vfbd);
out_err1:
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c b/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c
index e01a17b407b2..bc784254e78e 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c
@@ -177,17 +177,16 @@ int vmw_resource_alloc_id(struct vmw_resource *res)
BUG_ON(res->id != -1);
- do {
- if (unlikely(idr_pre_get(idr, GFP_KERNEL) == 0))
- return -ENOMEM;
-
- write_lock(&dev_priv->resource_lock);
- ret = idr_get_new_above(idr, res, 1, &res->id);
- write_unlock(&dev_priv->resource_lock);
+ idr_preload(GFP_KERNEL);
+ write_lock(&dev_priv->resource_lock);
- } while (ret == -EAGAIN);
+ ret = idr_alloc(idr, res, 1, 0, GFP_NOWAIT);
+ if (ret >= 0)
+ res->id = ret;
- return ret;
+ write_unlock(&dev_priv->resource_lock);
+ idr_preload_end();
+ return ret < 0 ? ret : 0;
}
/**
@@ -959,13 +958,13 @@ void vmw_resource_unreserve(struct vmw_resource *res,
if (new_backup && new_backup != res->backup) {
if (res->backup) {
- BUG_ON(atomic_read(&res->backup->base.reserved) == 0);
+ BUG_ON(!ttm_bo_is_reserved(&res->backup->base));
list_del_init(&res->mob_head);
vmw_dmabuf_unreference(&res->backup);
}
res->backup = vmw_dmabuf_reference(new_backup);
- BUG_ON(atomic_read(&new_backup->base.reserved) == 0);
+ BUG_ON(!ttm_bo_is_reserved(&new_backup->base));
list_add_tail(&res->mob_head, &new_backup->res_list);
}
if (new_backup)
diff --git a/drivers/gpu/stub/Kconfig b/drivers/gpu/stub/Kconfig
deleted file mode 100644
index 419917955bf6..000000000000
--- a/drivers/gpu/stub/Kconfig
+++ /dev/null
@@ -1,18 +0,0 @@
-config STUB_POULSBO
- tristate "Intel GMA500 Stub Driver"
- depends on PCI
- depends on NET # for THERMAL
- # Poulsbo stub depends on ACPI_VIDEO when ACPI is enabled
- # but for select to work, need to select ACPI_VIDEO's dependencies, ick
- select BACKLIGHT_CLASS_DEVICE if ACPI
- select VIDEO_OUTPUT_CONTROL if ACPI
- select INPUT if ACPI
- select ACPI_VIDEO if ACPI
- select THERMAL if ACPI
- help
- Choose this option if you have a system that has Intel GMA500
- (Poulsbo) integrated graphics. If M is selected, the module will
- be called Poulsbo. This driver is a stub driver for Poulsbo that
- will call poulsbo.ko to enable the acpi backlight control sysfs
- entry file because there have no poulsbo native driver can support
- intel opregion.
diff --git a/drivers/gpu/stub/Makefile b/drivers/gpu/stub/Makefile
deleted file mode 100644
index cd940cc9d36d..000000000000
--- a/drivers/gpu/stub/Makefile
+++ /dev/null
@@ -1 +0,0 @@
-obj-$(CONFIG_STUB_POULSBO) += poulsbo.o
diff --git a/drivers/gpu/stub/poulsbo.c b/drivers/gpu/stub/poulsbo.c
deleted file mode 100644
index 7edfd27b8dee..000000000000
--- a/drivers/gpu/stub/poulsbo.c
+++ /dev/null
@@ -1,64 +0,0 @@
-/*
- * Intel Poulsbo Stub driver
- *
- * Copyright (C) 2010 Novell <jlee@novell.com>
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 as published by
- * the Free Software Foundation.
- *
- */
-
-#include <linux/module.h>
-#include <linux/pci.h>
-#include <linux/acpi.h>
-#include <acpi/video.h>
-
-#define DRIVER_NAME "poulsbo"
-
-enum {
- CHIP_PSB_8108 = 0,
- CHIP_PSB_8109 = 1,
-};
-
-static DEFINE_PCI_DEVICE_TABLE(pciidlist) = {
- {0x8086, 0x8108, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_PSB_8108}, \
- {0x8086, 0x8109, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_PSB_8109}, \
- {0, 0, 0}
-};
-
-static int poulsbo_probe(struct pci_dev *pdev, const struct pci_device_id *id)
-{
- return acpi_video_register();
-}
-
-static void poulsbo_remove(struct pci_dev *pdev)
-{
- acpi_video_unregister();
-}
-
-static struct pci_driver poulsbo_driver = {
- .name = DRIVER_NAME,
- .id_table = pciidlist,
- .probe = poulsbo_probe,
- .remove = poulsbo_remove,
-};
-
-static int __init poulsbo_init(void)
-{
- return pci_register_driver(&poulsbo_driver);
-}
-
-static void __exit poulsbo_exit(void)
-{
- pci_unregister_driver(&poulsbo_driver);
-}
-
-module_init(poulsbo_init);
-module_exit(poulsbo_exit);
-
-MODULE_AUTHOR("Lee, Chun-Yi <jlee@novell.com>");
-MODULE_DESCRIPTION("Poulsbo Stub Driver");
-MODULE_LICENSE("GPL");
-
-MODULE_DEVICE_TABLE(pci, pciidlist);
diff --git a/drivers/gpu/vga/vga_switcheroo.c b/drivers/gpu/vga/vga_switcheroo.c
index fa60add0ff63..cf787e1d9322 100644
--- a/drivers/gpu/vga/vga_switcheroo.c
+++ b/drivers/gpu/vga/vga_switcheroo.c
@@ -25,6 +25,7 @@
#include <linux/fb.h>
#include <linux/pci.h>
+#include <linux/console.h>
#include <linux/vga_switcheroo.h>
#include <linux/vgaarb.h>
@@ -337,8 +338,10 @@ static int vga_switchto_stage2(struct vga_switcheroo_client *new_client)
if (new_client->fb_info) {
struct fb_event event;
+ console_lock();
event.info = new_client->fb_info;
fb_notifier_call_chain(FB_EVENT_REMAP_ALL_CONSOLE, &event);
+ console_unlock();
}
ret = vgasr_priv.handler->switchto(new_client->id);
diff --git a/drivers/hid/Kconfig b/drivers/hid/Kconfig
index e7d6a13ec6a6..5f07d85c4189 100644
--- a/drivers/hid/Kconfig
+++ b/drivers/hid/Kconfig
@@ -320,7 +320,7 @@ config HID_LOGITECH_DJ
Say Y if you want support for Logitech Unifying receivers and devices.
Unifying receivers are capable of pairing up to 6 Logitech compliant
devices to the same receiver. Without this driver it will be handled by
- generic USB_HID driver and all incomming events will be multiplexed
+ generic USB_HID driver and all incoming events will be multiplexed
into a single mouse and a single keyboard device.
config LOGITECH_FF
@@ -596,6 +596,12 @@ config HID_SPEEDLINK
---help---
Support for Speedlink Vicious and Divine Cezanne mouse.
+config HID_STEELSERIES
+ tristate "Steelseries SRW-S1 steering wheel support"
+ depends on USB_HID
+ ---help---
+ Support for Steelseries SRW-S1 steering wheel
+
config HID_SUNPLUS
tristate "Sunplus wireless desktop"
depends on USB_HID
@@ -655,6 +661,16 @@ config HID_TOPSEED
Say Y if you have a TopSeed Cyberlink or BTC Emprex or Conceptronic
CLLRCMCE remote control.
+config HID_THINGM
+ tristate "ThingM blink(1) USB RGB LED"
+ depends on USB_HID
+ depends on LEDS_CLASS
+ ---help---
+ Support for the ThingM blink(1) USB RGB LED. This driver registers a
+ Linux LED class instance, plus additional sysfs attributes to control
+ RGB colors, fade time and playing. The device is exposed through hidraw
+ to access other functions.
+
config HID_THRUSTMASTER
tristate "ThrustMaster devices support"
depends on USB_HID
@@ -719,7 +735,7 @@ config HID_ZYDACRON
config HID_SENSOR_HUB
tristate "HID Sensors framework support"
- depends on USB_HID
+ depends on USB_HID && GENERIC_HARDIRQS
select MFD_CORE
default n
-- help---
diff --git a/drivers/hid/Makefile b/drivers/hid/Makefile
index b62215716b2f..72d1b0bc0a97 100644
--- a/drivers/hid/Makefile
+++ b/drivers/hid/Makefile
@@ -101,8 +101,10 @@ obj-$(CONFIG_HID_SAMSUNG) += hid-samsung.o
obj-$(CONFIG_HID_SMARTJOYPLUS) += hid-sjoy.o
obj-$(CONFIG_HID_SONY) += hid-sony.o
obj-$(CONFIG_HID_SPEEDLINK) += hid-speedlink.o
+obj-$(CONFIG_HID_STEELSERIES) += hid-steelseries.o
obj-$(CONFIG_HID_SUNPLUS) += hid-sunplus.o
obj-$(CONFIG_HID_GREENASIA) += hid-gaff.o
+obj-$(CONFIG_HID_THINGM) += hid-thingm.o
obj-$(CONFIG_HID_THRUSTMASTER) += hid-tmff.o
obj-$(CONFIG_HID_TIVO) += hid-tivo.o
obj-$(CONFIG_HID_TOPSEED) += hid-topseed.o
diff --git a/drivers/hid/hid-a4tech.c b/drivers/hid/hid-a4tech.c
index 0a239885e67c..7c5507e94820 100644
--- a/drivers/hid/hid-a4tech.c
+++ b/drivers/hid/hid-a4tech.c
@@ -146,17 +146,6 @@ static struct hid_driver a4_driver = {
.probe = a4_probe,
.remove = a4_remove,
};
+module_hid_driver(a4_driver);
-static int __init a4_init(void)
-{
- return hid_register_driver(&a4_driver);
-}
-
-static void __exit a4_exit(void)
-{
- hid_unregister_driver(&a4_driver);
-}
-
-module_init(a4_init);
-module_exit(a4_exit);
MODULE_LICENSE("GPL");
diff --git a/drivers/hid/hid-apple.c b/drivers/hid/hid-apple.c
index d0f7662aacca..320a958d4139 100644
--- a/drivers/hid/hid-apple.c
+++ b/drivers/hid/hid-apple.c
@@ -555,23 +555,6 @@ static struct hid_driver apple_driver = {
.input_mapping = apple_input_mapping,
.input_mapped = apple_input_mapped,
};
+module_hid_driver(apple_driver);
-static int __init apple_init(void)
-{
- int ret;
-
- ret = hid_register_driver(&apple_driver);
- if (ret)
- pr_err("can't register apple driver\n");
-
- return ret;
-}
-
-static void __exit apple_exit(void)
-{
- hid_unregister_driver(&apple_driver);
-}
-
-module_init(apple_init);
-module_exit(apple_exit);
MODULE_LICENSE("GPL");
diff --git a/drivers/hid/hid-aureal.c b/drivers/hid/hid-aureal.c
index 7968187ddf7b..340ba9d394a0 100644
--- a/drivers/hid/hid-aureal.c
+++ b/drivers/hid/hid-aureal.c
@@ -37,17 +37,6 @@ static struct hid_driver aureal_driver = {
.id_table = aureal_devices,
.report_fixup = aureal_report_fixup,
};
+module_hid_driver(aureal_driver);
-static int __init aureal_init(void)
-{
- return hid_register_driver(&aureal_driver);
-}
-
-static void __exit aureal_exit(void)
-{
- hid_unregister_driver(&aureal_driver);
-}
-
-module_init(aureal_init);
-module_exit(aureal_exit);
MODULE_LICENSE("GPL");
diff --git a/drivers/hid/hid-axff.c b/drivers/hid/hid-axff.c
index 5be858dd9a15..62f0cee032ba 100644
--- a/drivers/hid/hid-axff.c
+++ b/drivers/hid/hid-axff.c
@@ -192,19 +192,7 @@ static struct hid_driver ax_driver = {
.probe = ax_probe,
.remove = ax_remove,
};
-
-static int __init ax_init(void)
-{
- return hid_register_driver(&ax_driver);
-}
-
-static void __exit ax_exit(void)
-{
- hid_unregister_driver(&ax_driver);
-}
-
-module_init(ax_init);
-module_exit(ax_exit);
+module_hid_driver(ax_driver);
MODULE_AUTHOR("Sergei Kolzun");
MODULE_DESCRIPTION("Force feedback support for ACRUX game controllers");
diff --git a/drivers/hid/hid-belkin.c b/drivers/hid/hid-belkin.c
index a1a5a12c3a6b..cc4cf138bef5 100644
--- a/drivers/hid/hid-belkin.c
+++ b/drivers/hid/hid-belkin.c
@@ -86,17 +86,6 @@ static struct hid_driver belkin_driver = {
.input_mapping = belkin_input_mapping,
.probe = belkin_probe,
};
+module_hid_driver(belkin_driver);
-static int __init belkin_init(void)
-{
- return hid_register_driver(&belkin_driver);
-}
-
-static void __exit belkin_exit(void)
-{
- hid_unregister_driver(&belkin_driver);
-}
-
-module_init(belkin_init);
-module_exit(belkin_exit);
MODULE_LICENSE("GPL");
diff --git a/drivers/hid/hid-cherry.c b/drivers/hid/hid-cherry.c
index af034d3d9256..1bdcccc54a1d 100644
--- a/drivers/hid/hid-cherry.c
+++ b/drivers/hid/hid-cherry.c
@@ -69,17 +69,6 @@ static struct hid_driver ch_driver = {
.report_fixup = ch_report_fixup,
.input_mapping = ch_input_mapping,
};
+module_hid_driver(ch_driver);
-static int __init ch_init(void)
-{
- return hid_register_driver(&ch_driver);
-}
-
-static void __exit ch_exit(void)
-{
- hid_unregister_driver(&ch_driver);
-}
-
-module_init(ch_init);
-module_exit(ch_exit);
MODULE_LICENSE("GPL");
diff --git a/drivers/hid/hid-chicony.c b/drivers/hid/hid-chicony.c
index a2abb8e15727..b613d5a79684 100644
--- a/drivers/hid/hid-chicony.c
+++ b/drivers/hid/hid-chicony.c
@@ -70,17 +70,6 @@ static struct hid_driver ch_driver = {
.id_table = ch_devices,
.input_mapping = ch_input_mapping,
};
+module_hid_driver(ch_driver);
-static int __init ch_init(void)
-{
- return hid_register_driver(&ch_driver);
-}
-
-static void __exit ch_exit(void)
-{
- hid_unregister_driver(&ch_driver);
-}
-
-module_init(ch_init);
-module_exit(ch_exit);
MODULE_LICENSE("GPL");
diff --git a/drivers/hid/hid-core.c b/drivers/hid/hid-core.c
index eb2ee11b6412..512b01c04ea7 100644
--- a/drivers/hid/hid-core.c
+++ b/drivers/hid/hid-core.c
@@ -729,7 +729,7 @@ static int hid_scan_report(struct hid_device *hid)
item.type == HID_ITEM_TYPE_MAIN &&
item.tag == HID_MAIN_ITEM_TAG_BEGIN_COLLECTION &&
(item_udata(&item) & 0xff) == HID_COLLECTION_PHYSICAL &&
- hid->bus == BUS_USB)
+ (hid->bus == BUS_USB || hid->bus == BUS_I2C))
hid->group = HID_GROUP_SENSOR_HUB;
}
@@ -1195,6 +1195,7 @@ int hid_report_raw_event(struct hid_device *hid, int type, u8 *data, int size,
{
struct hid_report_enum *report_enum = hid->report_enum + type;
struct hid_report *report;
+ struct hid_driver *hdrv;
unsigned int a;
int rsize, csize = size;
u8 *cdata = data;
@@ -1231,6 +1232,9 @@ int hid_report_raw_event(struct hid_device *hid, int type, u8 *data, int size,
if (hid->claimed != HID_CLAIMED_HIDRAW) {
for (a = 0; a < report->maxfield; a++)
hid_input_field(hid, report->field[a], cdata, interrupt);
+ hdrv = hid->driver;
+ if (hdrv && hdrv->report)
+ hdrv->report(hid, report);
}
if (hid->claimed & HID_CLAIMED_INPUT)
@@ -1599,6 +1603,7 @@ static const struct hid_device_id hid_have_special_driver[] = {
{ HID_USB_DEVICE(USB_VENDOR_ID_GYRATION, USB_DEVICE_ID_GYRATION_REMOTE_3) },
{ HID_USB_DEVICE(USB_VENDOR_ID_HOLTEK, USB_DEVICE_ID_HOLTEK_ON_LINE_GRIP) },
{ HID_USB_DEVICE(USB_VENDOR_ID_HOLTEK_ALT, USB_DEVICE_ID_HOLTEK_ALT_KEYBOARD) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_JESS2, USB_DEVICE_ID_JESS2_COLOR_RUMBLE_PAD) },
{ HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_ION, USB_DEVICE_ID_ICADE) },
{ HID_USB_DEVICE(USB_VENDOR_ID_KENSINGTON, USB_DEVICE_ID_KS_SLIMBLADE) },
{ HID_USB_DEVICE(USB_VENDOR_ID_KEYTOUCH, USB_DEVICE_ID_KEYTOUCH_IEC) },
@@ -1697,7 +1702,9 @@ static const struct hid_device_id hid_have_special_driver[] = {
{ HID_USB_DEVICE(USB_VENDOR_ID_SONY, USB_DEVICE_ID_SONY_NAVIGATION_CONTROLLER) },
{ HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_SONY, USB_DEVICE_ID_SONY_PS3_CONTROLLER) },
{ HID_USB_DEVICE(USB_VENDOR_ID_SONY, USB_DEVICE_ID_SONY_VAIO_VGX_MOUSE) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_STEELSERIES, USB_DEVICE_ID_STEELSERIES_SRWS1) },
{ HID_USB_DEVICE(USB_VENDOR_ID_SUNPLUS, USB_DEVICE_ID_SUNPLUS_WDESKTOP) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_THINGM, USB_DEVICE_ID_BLINK1) },
{ HID_USB_DEVICE(USB_VENDOR_ID_THRUSTMASTER, 0xb300) },
{ HID_USB_DEVICE(USB_VENDOR_ID_THRUSTMASTER, 0xb304) },
{ HID_USB_DEVICE(USB_VENDOR_ID_THRUSTMASTER, 0xb323) },
@@ -2070,6 +2077,7 @@ static const struct hid_device_id hid_ignore_list[] = {
{ HID_USB_DEVICE(USB_VENDOR_ID_LD, USB_DEVICE_ID_LD_HYBRID) },
{ HID_USB_DEVICE(USB_VENDOR_ID_LD, USB_DEVICE_ID_LD_HEATCONTROL) },
{ HID_USB_DEVICE(USB_VENDOR_ID_MADCATZ, USB_DEVICE_ID_MADCATZ_BEATPAD) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_MASTERKIT, USB_DEVICE_ID_MASTERKIT_MA901RADIO) },
{ HID_USB_DEVICE(USB_VENDOR_ID_MCC, USB_DEVICE_ID_MCC_PMD1024LS) },
{ HID_USB_DEVICE(USB_VENDOR_ID_MCC, USB_DEVICE_ID_MCC_PMD1208LS) },
{ HID_USB_DEVICE(USB_VENDOR_ID_MICROCHIP, USB_DEVICE_ID_PICKIT1) },
@@ -2228,6 +2236,14 @@ bool hid_ignore(struct hid_device *hdev)
hdev->type != HID_TYPE_USBMOUSE)
return true;
break;
+ case USB_VENDOR_ID_VELLEMAN:
+ /* These are not HID devices. They are handled by comedi. */
+ if ((hdev->product >= USB_DEVICE_ID_VELLEMAN_K8055_FIRST &&
+ hdev->product <= USB_DEVICE_ID_VELLEMAN_K8055_LAST) ||
+ (hdev->product >= USB_DEVICE_ID_VELLEMAN_K8061_FIRST &&
+ hdev->product <= USB_DEVICE_ID_VELLEMAN_K8061_LAST))
+ return true;
+ break;
}
if (hdev->type == HID_TYPE_USBMOUSE &&
diff --git a/drivers/hid/hid-cypress.c b/drivers/hid/hid-cypress.c
index 3e159a50dac7..c4ef3bc726e3 100644
--- a/drivers/hid/hid-cypress.c
+++ b/drivers/hid/hid-cypress.c
@@ -144,17 +144,6 @@ static struct hid_driver cp_driver = {
.event = cp_event,
.probe = cp_probe,
};
+module_hid_driver(cp_driver);
-static int __init cp_init(void)
-{
- return hid_register_driver(&cp_driver);
-}
-
-static void __exit cp_exit(void)
-{
- hid_unregister_driver(&cp_driver);
-}
-
-module_init(cp_init);
-module_exit(cp_exit);
MODULE_LICENSE("GPL");
diff --git a/drivers/hid/hid-dr.c b/drivers/hid/hid-dr.c
index e832f44ae383..0fe8f65ef01a 100644
--- a/drivers/hid/hid-dr.c
+++ b/drivers/hid/hid-dr.c
@@ -297,17 +297,6 @@ static struct hid_driver dr_driver = {
.report_fixup = dr_report_fixup,
.probe = dr_probe,
};
+module_hid_driver(dr_driver);
-static int __init dr_init(void)
-{
- return hid_register_driver(&dr_driver);
-}
-
-static void __exit dr_exit(void)
-{
- hid_unregister_driver(&dr_driver);
-}
-
-module_init(dr_init);
-module_exit(dr_exit);
MODULE_LICENSE("GPL");
diff --git a/drivers/hid/hid-elecom.c b/drivers/hid/hid-elecom.c
index 79d0c61e7214..d0bd13b62dc2 100644
--- a/drivers/hid/hid-elecom.c
+++ b/drivers/hid/hid-elecom.c
@@ -41,17 +41,6 @@ static struct hid_driver elecom_driver = {
.id_table = elecom_devices,
.report_fixup = elecom_report_fixup
};
+module_hid_driver(elecom_driver);
-static int __init elecom_init(void)
-{
- return hid_register_driver(&elecom_driver);
-}
-
-static void __exit elecom_exit(void)
-{
- hid_unregister_driver(&elecom_driver);
-}
-
-module_init(elecom_init);
-module_exit(elecom_exit);
MODULE_LICENSE("GPL");
diff --git a/drivers/hid/hid-emsff.c b/drivers/hid/hid-emsff.c
index 2630d483d262..2e093ab99b43 100644
--- a/drivers/hid/hid-emsff.c
+++ b/drivers/hid/hid-emsff.c
@@ -150,18 +150,7 @@ static struct hid_driver ems_driver = {
.id_table = ems_devices,
.probe = ems_probe,
};
+module_hid_driver(ems_driver);
-static int ems_init(void)
-{
- return hid_register_driver(&ems_driver);
-}
-
-static void ems_exit(void)
-{
- hid_unregister_driver(&ems_driver);
-}
-
-module_init(ems_init);
-module_exit(ems_exit);
MODULE_LICENSE("GPL");
diff --git a/drivers/hid/hid-ezkey.c b/drivers/hid/hid-ezkey.c
index 6540af2871a7..212ac6be2451 100644
--- a/drivers/hid/hid-ezkey.c
+++ b/drivers/hid/hid-ezkey.c
@@ -76,17 +76,6 @@ static struct hid_driver ez_driver = {
.input_mapping = ez_input_mapping,
.event = ez_event,
};
+module_hid_driver(ez_driver);
-static int __init ez_init(void)
-{
- return hid_register_driver(&ez_driver);
-}
-
-static void __exit ez_exit(void)
-{
- hid_unregister_driver(&ez_driver);
-}
-
-module_init(ez_init);
-module_exit(ez_exit);
MODULE_LICENSE("GPL");
diff --git a/drivers/hid/hid-gaff.c b/drivers/hid/hid-gaff.c
index f1e1bcf67427..04d2e6aca778 100644
--- a/drivers/hid/hid-gaff.c
+++ b/drivers/hid/hid-gaff.c
@@ -176,17 +176,6 @@ static struct hid_driver ga_driver = {
.id_table = ga_devices,
.probe = ga_probe,
};
+module_hid_driver(ga_driver);
-static int __init ga_init(void)
-{
- return hid_register_driver(&ga_driver);
-}
-
-static void __exit ga_exit(void)
-{
- hid_unregister_driver(&ga_driver);
-}
-
-module_init(ga_init);
-module_exit(ga_exit);
MODULE_LICENSE("GPL");
diff --git a/drivers/hid/hid-generic.c b/drivers/hid/hid-generic.c
index a8b3148e03a2..e288a4a06fe8 100644
--- a/drivers/hid/hid-generic.c
+++ b/drivers/hid/hid-generic.c
@@ -34,19 +34,7 @@ static struct hid_driver hid_generic = {
.name = "hid-generic",
.id_table = hid_table,
};
-
-static int __init hid_init(void)
-{
- return hid_register_driver(&hid_generic);
-}
-
-static void __exit hid_exit(void)
-{
- hid_unregister_driver(&hid_generic);
-}
-
-module_init(hid_init);
-module_exit(hid_exit);
+module_hid_driver(hid_generic);
MODULE_AUTHOR("Henrik Rydberg");
MODULE_DESCRIPTION("HID generic driver");
diff --git a/drivers/hid/hid-gyration.c b/drivers/hid/hid-gyration.c
index 4442c30ef531..288d61c9748e 100644
--- a/drivers/hid/hid-gyration.c
+++ b/drivers/hid/hid-gyration.c
@@ -88,17 +88,6 @@ static struct hid_driver gyration_driver = {
.input_mapping = gyration_input_mapping,
.event = gyration_event,
};
+module_hid_driver(gyration_driver);
-static int __init gyration_init(void)
-{
- return hid_register_driver(&gyration_driver);
-}
-
-static void __exit gyration_exit(void)
-{
- hid_unregister_driver(&gyration_driver);
-}
-
-module_init(gyration_init);
-module_exit(gyration_exit);
MODULE_LICENSE("GPL");
diff --git a/drivers/hid/hid-holtek-kbd.c b/drivers/hid/hid-holtek-kbd.c
index e0a5d1739fc3..6e1a4a4fc0c1 100644
--- a/drivers/hid/hid-holtek-kbd.c
+++ b/drivers/hid/hid-holtek-kbd.c
@@ -167,17 +167,6 @@ static struct hid_driver holtek_kbd_driver = {
.report_fixup = holtek_kbd_report_fixup,
.probe = holtek_kbd_probe
};
+module_hid_driver(holtek_kbd_driver);
-static int __init holtek_kbd_init(void)
-{
- return hid_register_driver(&holtek_kbd_driver);
-}
-
-static void __exit holtek_kbd_exit(void)
-{
- hid_unregister_driver(&holtek_kbd_driver);
-}
-
-module_exit(holtek_kbd_exit);
-module_init(holtek_kbd_init);
MODULE_LICENSE("GPL");
diff --git a/drivers/hid/hid-holtekff.c b/drivers/hid/hid-holtekff.c
index ff295e60059b..f34d1186a3e1 100644
--- a/drivers/hid/hid-holtekff.c
+++ b/drivers/hid/hid-holtekff.c
@@ -224,17 +224,4 @@ static struct hid_driver holtek_driver = {
.id_table = holtek_devices,
.probe = holtek_probe,
};
-
-static int __init holtek_init(void)
-{
- return hid_register_driver(&holtek_driver);
-}
-
-static void __exit holtek_exit(void)
-{
- hid_unregister_driver(&holtek_driver);
-}
-
-module_init(holtek_init);
-module_exit(holtek_exit);
-
+module_hid_driver(holtek_driver);
diff --git a/drivers/hid/hid-hyperv.c b/drivers/hid/hid-hyperv.c
index 3d62781b8993..aa3fec0d9dc6 100644
--- a/drivers/hid/hid-hyperv.c
+++ b/drivers/hid/hid-hyperv.c
@@ -568,8 +568,7 @@ static int mousevsc_remove(struct hv_device *dev)
static const struct hv_vmbus_device_id id_table[] = {
/* Mouse guid */
- { VMBUS_DEVICE(0x9E, 0xB6, 0xA8, 0xCF, 0x4A, 0x5B, 0xc0, 0x4c,
- 0xB9, 0x8B, 0x8B, 0xA1, 0xA1, 0xF3, 0xF9, 0x5A) },
+ { HV_MOUSE_GUID, },
{ },
};
diff --git a/drivers/hid/hid-icade.c b/drivers/hid/hid-icade.c
index 1d6565e37ba3..09dcc04595f3 100644
--- a/drivers/hid/hid-icade.c
+++ b/drivers/hid/hid-icade.c
@@ -235,25 +235,8 @@ static struct hid_driver icade_driver = {
.input_mapped = icade_input_mapped,
.input_mapping = icade_input_mapping,
};
+module_hid_driver(icade_driver);
-static int __init icade_init(void)
-{
- int ret;
-
- ret = hid_register_driver(&icade_driver);
- if (ret)
- pr_err("can't register icade driver\n");
-
- return ret;
-}
-
-static void __exit icade_exit(void)
-{
- hid_unregister_driver(&icade_driver);
-}
-
-module_init(icade_init);
-module_exit(icade_exit);
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Bastien Nocera <hadess@hadess.net>");
MODULE_DESCRIPTION("ION iCade input driver");
diff --git a/drivers/hid/hid-ids.h b/drivers/hid/hid-ids.h
index 34e25471aeaa..92e47e5c9564 100644
--- a/drivers/hid/hid-ids.h
+++ b/drivers/hid/hid-ids.h
@@ -445,6 +445,9 @@
#define USB_VENDOR_ID_JESS 0x0c45
#define USB_DEVICE_ID_JESS_YUREX 0x1010
+#define USB_VENDOR_ID_JESS2 0x0f30
+#define USB_DEVICE_ID_JESS2_COLOR_RUMBLE_PAD 0x0111
+
#define USB_VENDOR_ID_KBGEAR 0x084e
#define USB_DEVICE_ID_KBGEAR_JAMSTUDIO 0x1001
@@ -525,8 +528,8 @@
#define USB_DEVICE_ID_LOGITECH_WINGMAN_F3D 0xc283
#define USB_DEVICE_ID_LOGITECH_FORCE3D_PRO 0xc286
#define USB_DEVICE_ID_LOGITECH_FLIGHT_SYSTEM_G940 0xc287
-#define USB_DEVICE_ID_LOGITECH_WHEEL 0xc294
#define USB_DEVICE_ID_LOGITECH_WINGMAN_FFG 0xc293
+#define USB_DEVICE_ID_LOGITECH_WHEEL 0xc294
#define USB_DEVICE_ID_LOGITECH_MOMO_WHEEL 0xc295
#define USB_DEVICE_ID_LOGITECH_DFP_WHEEL 0xc298
#define USB_DEVICE_ID_LOGITECH_G25_WHEEL 0xc299
@@ -554,6 +557,9 @@
#define USB_VENDOR_ID_MADCATZ 0x0738
#define USB_DEVICE_ID_MADCATZ_BEATPAD 0x4540
+#define USB_VENDOR_ID_MASTERKIT 0x16c0
+#define USB_DEVICE_ID_MASTERKIT_MA901RADIO 0x05df
+
#define USB_VENDOR_ID_MCC 0x09db
#define USB_DEVICE_ID_MCC_PMD1024LS 0x0076
#define USB_DEVICE_ID_MCC_PMD1208LS 0x007a
@@ -597,6 +603,9 @@
#define USB_VENDOR_ID_NEC 0x073e
#define USB_DEVICE_ID_NEC_USB_GAME_PAD 0x0301
+#define USB_VENDOR_ID_NEXIO 0x1870
+#define USB_DEVICE_ID_NEXIO_MULTITOUCH_420 0x010d
+
#define USB_VENDOR_ID_NEXTWINDOW 0x1926
#define USB_DEVICE_ID_NEXTWINDOW_TOUCHSCREEN 0x0003
@@ -709,6 +718,7 @@
#define USB_VENDOR_ID_SONY 0x054c
#define USB_DEVICE_ID_SONY_VAIO_VGX_MOUSE 0x024b
+#define USB_DEVICE_ID_SONY_VAIO_VGP_MOUSE 0x0374
#define USB_DEVICE_ID_SONY_PS3_BDREMOTE 0x0306
#define USB_DEVICE_ID_SONY_PS3_CONTROLLER 0x0268
#define USB_DEVICE_ID_SONY_NAVIGATION_CONTROLLER 0x042f
@@ -726,6 +736,9 @@
#define USB_VENDOR_ID_STANTUM_SITRONIX 0x1403
#define USB_DEVICE_ID_MTP_SITRONIX 0x5001
+#define USB_VENDOR_ID_STEELSERIES 0x1038
+#define USB_DEVICE_ID_STEELSERIES_SRWS1 0x1410
+
#define USB_VENDOR_ID_SUN 0x0430
#define USB_DEVICE_ID_RARITAN_KVM_DONGLE 0xcdab
@@ -747,6 +760,9 @@
#define USB_DEVICE_ID_SYNAPTICS_WTP 0x0010
#define USB_DEVICE_ID_SYNAPTICS_DPAD 0x0013
+#define USB_VENDOR_ID_THINGM 0x27b8
+#define USB_DEVICE_ID_BLINK1 0x01ed
+
#define USB_VENDOR_ID_THRUSTMASTER 0x044f
#define USB_VENDOR_ID_TIVO 0x150a
@@ -794,6 +810,12 @@
#define USB_DEVICE_ID_UNITEC_USB_TOUCH_0709 0x0709
#define USB_DEVICE_ID_UNITEC_USB_TOUCH_0A19 0x0a19
+#define USB_VENDOR_ID_VELLEMAN 0x10cf
+#define USB_DEVICE_ID_VELLEMAN_K8055_FIRST 0x5500
+#define USB_DEVICE_ID_VELLEMAN_K8055_LAST 0x5503
+#define USB_DEVICE_ID_VELLEMAN_K8061_FIRST 0x8061
+#define USB_DEVICE_ID_VELLEMAN_K8061_LAST 0x8068
+
#define USB_VENDOR_ID_VERNIER 0x08f7
#define USB_DEVICE_ID_VERNIER_LABPRO 0x0001
#define USB_DEVICE_ID_VERNIER_GOTEMP 0x0002
diff --git a/drivers/hid/hid-kensington.c b/drivers/hid/hid-kensington.c
index a5b4016e9bd7..fe9a99dd8d08 100644
--- a/drivers/hid/hid-kensington.c
+++ b/drivers/hid/hid-kensington.c
@@ -47,17 +47,6 @@ static struct hid_driver ks_driver = {
.id_table = ks_devices,
.input_mapping = ks_input_mapping,
};
+module_hid_driver(ks_driver);
-static int __init ks_init(void)
-{
- return hid_register_driver(&ks_driver);
-}
-
-static void __exit ks_exit(void)
-{
- hid_unregister_driver(&ks_driver);
-}
-
-module_init(ks_init);
-module_exit(ks_exit);
MODULE_LICENSE("GPL");
diff --git a/drivers/hid/hid-keytouch.c b/drivers/hid/hid-keytouch.c
index 07cd825f6f01..3074671b7d6a 100644
--- a/drivers/hid/hid-keytouch.c
+++ b/drivers/hid/hid-keytouch.c
@@ -49,18 +49,7 @@ static struct hid_driver keytouch_driver = {
.id_table = keytouch_devices,
.report_fixup = keytouch_report_fixup,
};
+module_hid_driver(keytouch_driver);
-static int __init keytouch_init(void)
-{
- return hid_register_driver(&keytouch_driver);
-}
-
-static void __exit keytouch_exit(void)
-{
- hid_unregister_driver(&keytouch_driver);
-}
-
-module_init(keytouch_init);
-module_exit(keytouch_exit);
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Jiri Kosina");
diff --git a/drivers/hid/hid-kye.c b/drivers/hid/hid-kye.c
index b4f0d8216fd0..ef72daecfa16 100644
--- a/drivers/hid/hid-kye.c
+++ b/drivers/hid/hid-kye.c
@@ -419,17 +419,6 @@ static struct hid_driver kye_driver = {
.probe = kye_probe,
.report_fixup = kye_report_fixup,
};
+module_hid_driver(kye_driver);
-static int __init kye_init(void)
-{
- return hid_register_driver(&kye_driver);
-}
-
-static void __exit kye_exit(void)
-{
- hid_unregister_driver(&kye_driver);
-}
-
-module_init(kye_init);
-module_exit(kye_exit);
MODULE_LICENSE("GPL");
diff --git a/drivers/hid/hid-lcpower.c b/drivers/hid/hid-lcpower.c
index 22bc14abdfa3..6424cfdb7737 100644
--- a/drivers/hid/hid-lcpower.c
+++ b/drivers/hid/hid-lcpower.c
@@ -54,17 +54,6 @@ static struct hid_driver ts_driver = {
.id_table = ts_devices,
.input_mapping = ts_input_mapping,
};
+module_hid_driver(ts_driver);
-static int __init ts_init(void)
-{
- return hid_register_driver(&ts_driver);
-}
-
-static void __exit ts_exit(void)
-{
- hid_unregister_driver(&ts_driver);
-}
-
-module_init(ts_init);
-module_exit(ts_exit);
MODULE_LICENSE("GPL");
diff --git a/drivers/hid/hid-lenovo-tpkbd.c b/drivers/hid/hid-lenovo-tpkbd.c
index cea016e94f43..956c3b135f64 100644
--- a/drivers/hid/hid-lenovo-tpkbd.c
+++ b/drivers/hid/hid-lenovo-tpkbd.c
@@ -468,18 +468,6 @@ static struct hid_driver tpkbd_driver = {
.probe = tpkbd_probe,
.remove = tpkbd_remove,
};
-
-static int __init tpkbd_init(void)
-{
- return hid_register_driver(&tpkbd_driver);
-}
-
-static void __exit tpkbd_exit(void)
-{
- hid_unregister_driver(&tpkbd_driver);
-}
-
-module_init(tpkbd_init);
-module_exit(tpkbd_exit);
+module_hid_driver(tpkbd_driver);
MODULE_LICENSE("GPL");
diff --git a/drivers/hid/hid-lg.c b/drivers/hid/hid-lg.c
index a2f8e88b9fa2..6f12ecd36c88 100644
--- a/drivers/hid/hid-lg.c
+++ b/drivers/hid/hid-lg.c
@@ -21,8 +21,10 @@
#include <linux/module.h>
#include <linux/random.h>
#include <linux/sched.h>
+#include <linux/usb.h>
#include <linux/wait.h>
+#include "usbhid/usbhid.h"
#include "hid-ids.h"
#include "hid-lg.h"
@@ -40,17 +42,86 @@
#define LG_FF3 0x1000
#define LG_FF4 0x2000
-/* Size of the original descriptor of the Driving Force Pro wheel */
+/* Size of the original descriptors of the Driving Force (and Pro) wheels */
+#define DF_RDESC_ORIG_SIZE 130
#define DFP_RDESC_ORIG_SIZE 97
+#define MOMO_RDESC_ORIG_SIZE 87
-/* Fixed report descriptor for Logitech Driving Force Pro wheel controller
+/* Fixed report descriptors for Logitech Driving Force (and Pro)
+ * wheel controllers
*
- * The original descriptor hides the separate throttle and brake axes in
+ * The original descriptors hide the separate throttle and brake axes in
* a custom vendor usage page, providing only a combined value as
* GenericDesktop.Y.
- * This descriptor removes the combined Y axis and instead reports
+ * These descriptors remove the combined Y axis and instead report
* separate throttle (Y) and brake (RZ).
*/
+static __u8 df_rdesc_fixed[] = {
+0x05, 0x01, /* Usage Page (Desktop), */
+0x09, 0x04, /* Usage (Joystik), */
+0xA1, 0x01, /* Collection (Application), */
+0xA1, 0x02, /* Collection (Logical), */
+0x95, 0x01, /* Report Count (1), */
+0x75, 0x0A, /* Report Size (10), */
+0x14, /* Logical Minimum (0), */
+0x26, 0xFF, 0x03, /* Logical Maximum (1023), */
+0x34, /* Physical Minimum (0), */
+0x46, 0xFF, 0x03, /* Physical Maximum (1023), */
+0x09, 0x30, /* Usage (X), */
+0x81, 0x02, /* Input (Variable), */
+0x95, 0x0C, /* Report Count (12), */
+0x75, 0x01, /* Report Size (1), */
+0x25, 0x01, /* Logical Maximum (1), */
+0x45, 0x01, /* Physical Maximum (1), */
+0x05, 0x09, /* Usage (Buttons), */
+0x19, 0x01, /* Usage Minimum (1), */
+0x29, 0x0c, /* Usage Maximum (12), */
+0x81, 0x02, /* Input (Variable), */
+0x95, 0x02, /* Report Count (2), */
+0x06, 0x00, 0xFF, /* Usage Page (Vendor: 65280), */
+0x09, 0x01, /* Usage (?: 1), */
+0x81, 0x02, /* Input (Variable), */
+0x05, 0x01, /* Usage Page (Desktop), */
+0x26, 0xFF, 0x00, /* Logical Maximum (255), */
+0x46, 0xFF, 0x00, /* Physical Maximum (255), */
+0x95, 0x01, /* Report Count (1), */
+0x75, 0x08, /* Report Size (8), */
+0x81, 0x02, /* Input (Variable), */
+0x25, 0x07, /* Logical Maximum (7), */
+0x46, 0x3B, 0x01, /* Physical Maximum (315), */
+0x75, 0x04, /* Report Size (4), */
+0x65, 0x14, /* Unit (Degrees), */
+0x09, 0x39, /* Usage (Hat Switch), */
+0x81, 0x42, /* Input (Variable, Null State), */
+0x75, 0x01, /* Report Size (1), */
+0x95, 0x04, /* Report Count (4), */
+0x65, 0x00, /* Unit (none), */
+0x06, 0x00, 0xFF, /* Usage Page (Vendor: 65280), */
+0x09, 0x01, /* Usage (?: 1), */
+0x25, 0x01, /* Logical Maximum (1), */
+0x45, 0x01, /* Physical Maximum (1), */
+0x81, 0x02, /* Input (Variable), */
+0x05, 0x01, /* Usage Page (Desktop), */
+0x95, 0x01, /* Report Count (1), */
+0x75, 0x08, /* Report Size (8), */
+0x26, 0xFF, 0x00, /* Logical Maximum (255), */
+0x46, 0xFF, 0x00, /* Physical Maximum (255), */
+0x09, 0x31, /* Usage (Y), */
+0x81, 0x02, /* Input (Variable), */
+0x09, 0x35, /* Usage (Rz), */
+0x81, 0x02, /* Input (Variable), */
+0xC0, /* End Collection, */
+0xA1, 0x02, /* Collection (Logical), */
+0x26, 0xFF, 0x00, /* Logical Maximum (255), */
+0x46, 0xFF, 0x00, /* Physical Maximum (255), */
+0x95, 0x07, /* Report Count (7), */
+0x75, 0x08, /* Report Size (8), */
+0x09, 0x03, /* Usage (?: 3), */
+0x91, 0x02, /* Output (Variable), */
+0xC0, /* End Collection, */
+0xC0 /* End Collection */
+};
+
static __u8 dfp_rdesc_fixed[] = {
0x05, 0x01, /* Usage Page (Desktop), */
0x09, 0x04, /* Usage (Joystik), */
@@ -99,6 +170,51 @@ static __u8 dfp_rdesc_fixed[] = {
0xC0 /* End Collection */
};
+static __u8 momo_rdesc_fixed[] = {
+0x05, 0x01, /* Usage Page (Desktop), */
+0x09, 0x04, /* Usage (Joystik), */
+0xA1, 0x01, /* Collection (Application), */
+0xA1, 0x02, /* Collection (Logical), */
+0x95, 0x01, /* Report Count (1), */
+0x75, 0x0A, /* Report Size (10), */
+0x15, 0x00, /* Logical Minimum (0), */
+0x26, 0xFF, 0x03, /* Logical Maximum (1023), */
+0x35, 0x00, /* Physical Minimum (0), */
+0x46, 0xFF, 0x03, /* Physical Maximum (1023), */
+0x09, 0x30, /* Usage (X), */
+0x81, 0x02, /* Input (Variable), */
+0x95, 0x08, /* Report Count (8), */
+0x75, 0x01, /* Report Size (1), */
+0x25, 0x01, /* Logical Maximum (1), */
+0x45, 0x01, /* Physical Maximum (1), */
+0x05, 0x09, /* Usage Page (Button), */
+0x19, 0x01, /* Usage Minimum (01h), */
+0x29, 0x08, /* Usage Maximum (08h), */
+0x81, 0x02, /* Input (Variable), */
+0x06, 0x00, 0xFF, /* Usage Page (FF00h), */
+0x75, 0x0E, /* Report Size (14), */
+0x95, 0x01, /* Report Count (1), */
+0x26, 0xFF, 0x00, /* Logical Maximum (255), */
+0x46, 0xFF, 0x00, /* Physical Maximum (255), */
+0x09, 0x00, /* Usage (00h), */
+0x81, 0x02, /* Input (Variable), */
+0x05, 0x01, /* Usage Page (Desktop), */
+0x75, 0x08, /* Report Size (8), */
+0x09, 0x31, /* Usage (Y), */
+0x81, 0x02, /* Input (Variable), */
+0x09, 0x32, /* Usage (Z), */
+0x81, 0x02, /* Input (Variable), */
+0x06, 0x00, 0xFF, /* Usage Page (FF00h), */
+0x09, 0x01, /* Usage (01h), */
+0x81, 0x02, /* Input (Variable), */
+0xC0, /* End Collection, */
+0xA1, 0x02, /* Collection (Logical), */
+0x09, 0x02, /* Usage (02h), */
+0x95, 0x07, /* Report Count (7), */
+0x91, 0x02, /* Output (Variable), */
+0xC0, /* End Collection, */
+0xC0 /* End Collection */
+};
/*
* Certain Logitech keyboards send in report #3 keys which are far
@@ -109,6 +225,8 @@ static __u8 *lg_report_fixup(struct hid_device *hdev, __u8 *rdesc,
unsigned int *rsize)
{
struct lg_drv_data *drv_data = hid_get_drvdata(hdev);
+ struct usb_device_descriptor *udesc;
+ __u16 bcdDevice, rev_maj, rev_min;
if ((drv_data->quirks & LG_RDESC) && *rsize >= 90 && rdesc[83] == 0x26 &&
rdesc[84] == 0x8c && rdesc[85] == 0x02) {
@@ -124,17 +242,39 @@ static __u8 *lg_report_fixup(struct hid_device *hdev, __u8 *rdesc,
"fixing up rel/abs in Logitech report descriptor\n");
rdesc[33] = rdesc[50] = 0x02;
}
- if ((drv_data->quirks & LG_FF4) && *rsize >= 101 &&
- rdesc[41] == 0x95 && rdesc[42] == 0x0B &&
- rdesc[47] == 0x05 && rdesc[48] == 0x09) {
- hid_info(hdev, "fixing up Logitech Speed Force Wireless button descriptor\n");
- rdesc[41] = 0x05;
- rdesc[42] = 0x09;
- rdesc[47] = 0x95;
- rdesc[48] = 0x0B;
- }
switch (hdev->product) {
+
+ /* Several wheels report as this id when operating in emulation mode. */
+ case USB_DEVICE_ID_LOGITECH_WHEEL:
+ udesc = &(hid_to_usb_dev(hdev)->descriptor);
+ if (!udesc) {
+ hid_err(hdev, "NULL USB device descriptor\n");
+ break;
+ }
+ bcdDevice = le16_to_cpu(udesc->bcdDevice);
+ rev_maj = bcdDevice >> 8;
+ rev_min = bcdDevice & 0xff;
+
+ /* Update the report descriptor for only the Driving Force wheel */
+ if (rev_maj == 1 && rev_min == 2 &&
+ *rsize == DF_RDESC_ORIG_SIZE) {
+ hid_info(hdev,
+ "fixing up Logitech Driving Force report descriptor\n");
+ rdesc = df_rdesc_fixed;
+ *rsize = sizeof(df_rdesc_fixed);
+ }
+ break;
+
+ case USB_DEVICE_ID_LOGITECH_MOMO_WHEEL:
+ if (*rsize == MOMO_RDESC_ORIG_SIZE) {
+ hid_info(hdev,
+ "fixing up Logitech Momo Force (Red) report descriptor\n");
+ rdesc = momo_rdesc_fixed;
+ *rsize = sizeof(momo_rdesc_fixed);
+ }
+ break;
+
case USB_DEVICE_ID_LOGITECH_DFP_WHEEL:
if (*rsize == DFP_RDESC_ORIG_SIZE) {
hid_info(hdev,
@@ -143,6 +283,17 @@ static __u8 *lg_report_fixup(struct hid_device *hdev, __u8 *rdesc,
*rsize = sizeof(dfp_rdesc_fixed);
}
break;
+
+ case USB_DEVICE_ID_LOGITECH_WII_WHEEL:
+ if (*rsize >= 101 && rdesc[41] == 0x95 && rdesc[42] == 0x0B &&
+ rdesc[47] == 0x05 && rdesc[48] == 0x09) {
+ hid_info(hdev, "fixing up Logitech Speed Force Wireless report descriptor\n");
+ rdesc[41] = 0x05;
+ rdesc[42] = 0x09;
+ rdesc[47] = 0x95;
+ rdesc[48] = 0x0B;
+ }
+ break;
}
return rdesc;
@@ -328,6 +479,26 @@ static int lg_input_mapped(struct hid_device *hdev, struct hid_input *hi,
usage->type == EV_REL || usage->type == EV_ABS))
clear_bit(usage->code, *bit);
+ /* Ensure that Logitech wheels are not given a default fuzz/flat value */
+ if (usage->type == EV_ABS && (usage->code == ABS_X ||
+ usage->code == ABS_Y || usage->code == ABS_Z ||
+ usage->code == ABS_RZ)) {
+ switch (hdev->product) {
+ case USB_DEVICE_ID_LOGITECH_WHEEL:
+ case USB_DEVICE_ID_LOGITECH_MOMO_WHEEL:
+ case USB_DEVICE_ID_LOGITECH_DFP_WHEEL:
+ case USB_DEVICE_ID_LOGITECH_G25_WHEEL:
+ case USB_DEVICE_ID_LOGITECH_DFGT_WHEEL:
+ case USB_DEVICE_ID_LOGITECH_G27_WHEEL:
+ case USB_DEVICE_ID_LOGITECH_WII_WHEEL:
+ case USB_DEVICE_ID_LOGITECH_MOMO_WHEEL2:
+ field->application = HID_GD_MULTIAXIS;
+ break;
+ default:
+ break;
+ }
+ }
+
return 0;
}
@@ -465,7 +636,7 @@ static const struct hid_device_id lg_devices[] = {
{ HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_FORCE3D_PRO),
.driver_data = LG_FF },
{ HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_MOMO_WHEEL),
- .driver_data = LG_FF4 },
+ .driver_data = LG_NOGET | LG_FF4 },
{ HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_MOMO_WHEEL2),
.driver_data = LG_FF4 },
{ HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_G25_WHEEL),
@@ -503,17 +674,6 @@ static struct hid_driver lg_driver = {
.probe = lg_probe,
.remove = lg_remove,
};
+module_hid_driver(lg_driver);
-static int __init lg_init(void)
-{
- return hid_register_driver(&lg_driver);
-}
-
-static void __exit lg_exit(void)
-{
- hid_unregister_driver(&lg_driver);
-}
-
-module_init(lg_init);
-module_exit(lg_exit);
MODULE_LICENSE("GPL");
diff --git a/drivers/hid/hid-lg4ff.c b/drivers/hid/hid-lg4ff.c
index d7947c701f30..65a6ec8d3742 100644
--- a/drivers/hid/hid-lg4ff.c
+++ b/drivers/hid/hid-lg4ff.c
@@ -43,11 +43,6 @@
#define G27_REV_MAJ 0x12
#define G27_REV_MIN 0x38
-#define DFP_X_MIN 0
-#define DFP_X_MAX 16383
-#define DFP_PEDAL_MIN 0
-#define DFP_PEDAL_MAX 255
-
#define to_hid_device(pdev) container_of(pdev, struct hid_device, dev)
static void hid_lg4ff_set_range_dfp(struct hid_device *hid, u16 range);
@@ -598,18 +593,6 @@ int lg4ff_init(struct hid_device *hid)
return error;
dbg_hid("sysfs interface created\n");
- /* Set default axes parameters */
- switch (lg4ff_devices[i].product_id) {
- case USB_DEVICE_ID_LOGITECH_DFP_WHEEL:
- dbg_hid("Setting axes parameters for Driving Force Pro\n");
- input_set_abs_params(dev, ABS_X, DFP_X_MIN, DFP_X_MAX, 0, 0);
- input_set_abs_params(dev, ABS_Y, DFP_PEDAL_MIN, DFP_PEDAL_MAX, 0, 0);
- input_set_abs_params(dev, ABS_RZ, DFP_PEDAL_MIN, DFP_PEDAL_MAX, 0, 0);
- break;
- default:
- break;
- }
-
/* Set the maximum range to start with */
entry->range = entry->max_range;
if (entry->set_range != NULL)
diff --git a/drivers/hid/hid-magicmouse.c b/drivers/hid/hid-magicmouse.c
index 25ddf3e3aec6..f7f113ba083e 100644
--- a/drivers/hid/hid-magicmouse.c
+++ b/drivers/hid/hid-magicmouse.c
@@ -569,23 +569,6 @@ static struct hid_driver magicmouse_driver = {
.raw_event = magicmouse_raw_event,
.input_mapping = magicmouse_input_mapping,
};
+module_hid_driver(magicmouse_driver);
-static int __init magicmouse_init(void)
-{
- int ret;
-
- ret = hid_register_driver(&magicmouse_driver);
- if (ret)
- pr_err("can't register magicmouse driver\n");
-
- return ret;
-}
-
-static void __exit magicmouse_exit(void)
-{
- hid_unregister_driver(&magicmouse_driver);
-}
-
-module_init(magicmouse_init);
-module_exit(magicmouse_exit);
MODULE_LICENSE("GPL");
diff --git a/drivers/hid/hid-microsoft.c b/drivers/hid/hid-microsoft.c
index 6fcd466d0825..29d27f65a118 100644
--- a/drivers/hid/hid-microsoft.c
+++ b/drivers/hid/hid-microsoft.c
@@ -221,17 +221,6 @@ static struct hid_driver ms_driver = {
.event = ms_event,
.probe = ms_probe,
};
+module_hid_driver(ms_driver);
-static int __init ms_init(void)
-{
- return hid_register_driver(&ms_driver);
-}
-
-static void __exit ms_exit(void)
-{
- hid_unregister_driver(&ms_driver);
-}
-
-module_init(ms_init);
-module_exit(ms_exit);
MODULE_LICENSE("GPL");
diff --git a/drivers/hid/hid-monterey.c b/drivers/hid/hid-monterey.c
index cd3643e06fa6..9e14c00eb1b6 100644
--- a/drivers/hid/hid-monterey.c
+++ b/drivers/hid/hid-monterey.c
@@ -63,17 +63,6 @@ static struct hid_driver mr_driver = {
.report_fixup = mr_report_fixup,
.input_mapping = mr_input_mapping,
};
+module_hid_driver(mr_driver);
-static int __init mr_init(void)
-{
- return hid_register_driver(&mr_driver);
-}
-
-static void __exit mr_exit(void)
-{
- hid_unregister_driver(&mr_driver);
-}
-
-module_init(mr_init);
-module_exit(mr_exit);
MODULE_LICENSE("GPL");
diff --git a/drivers/hid/hid-multitouch.c b/drivers/hid/hid-multitouch.c
index 61543c02ea0b..7a1ebb867cf4 100644
--- a/drivers/hid/hid-multitouch.c
+++ b/drivers/hid/hid-multitouch.c
@@ -54,6 +54,7 @@ MODULE_LICENSE("GPL");
#define MT_QUIRK_NO_AREA (1 << 9)
#define MT_QUIRK_IGNORE_DUPLICATES (1 << 10)
#define MT_QUIRK_HOVERING (1 << 11)
+#define MT_QUIRK_CONTACT_CNT_ACCURATE (1 << 12)
struct mt_slot {
__s32 x, y, cx, cy, p, w, h;
@@ -83,8 +84,11 @@ struct mt_device {
struct mt_class mtclass; /* our mt device class */
struct mt_fields *fields; /* temporary placeholder for storing the
multitouch fields */
+ int cc_index; /* contact count field index in the report */
+ int cc_value_index; /* contact count value index in the field */
unsigned last_field_index; /* last field index of the report */
unsigned last_slot_field; /* the last field of a slot */
+ unsigned mt_report_id; /* the report ID of the multitouch device */
__s8 inputmode; /* InputMode HID feature, -1 if non-existent */
__s8 inputmode_index; /* InputMode HID feature index in the report */
__s8 maxcontact_report_id; /* Maximum Contact Number HID feature,
@@ -111,6 +115,9 @@ struct mt_device {
#define MT_CLS_DUAL_INRANGE_CONTACTNUMBER 0x0007
#define MT_CLS_DUAL_NSMU_CONTACTID 0x0008
#define MT_CLS_INRANGE_CONTACTNUMBER 0x0009
+#define MT_CLS_NSMU 0x000a
+#define MT_CLS_DUAL_CONTACT_NUMBER 0x0010
+#define MT_CLS_DUAL_CONTACT_ID 0x0011
/* vendor specific classes */
#define MT_CLS_3M 0x0101
@@ -144,6 +151,9 @@ static int cypress_compute_slot(struct mt_device *td)
static struct mt_class mt_classes[] = {
{ .name = MT_CLS_DEFAULT,
+ .quirks = MT_QUIRK_ALWAYS_VALID |
+ MT_QUIRK_CONTACT_CNT_ACCURATE },
+ { .name = MT_CLS_NSMU,
.quirks = MT_QUIRK_NOT_SEEN_MEANS_UP },
{ .name = MT_CLS_SERIAL,
.quirks = MT_QUIRK_ALWAYS_VALID},
@@ -170,6 +180,16 @@ static struct mt_class mt_classes[] = {
{ .name = MT_CLS_INRANGE_CONTACTNUMBER,
.quirks = MT_QUIRK_VALID_IS_INRANGE |
MT_QUIRK_SLOT_IS_CONTACTNUMBER },
+ { .name = MT_CLS_DUAL_CONTACT_NUMBER,
+ .quirks = MT_QUIRK_ALWAYS_VALID |
+ MT_QUIRK_CONTACT_CNT_ACCURATE |
+ MT_QUIRK_SLOT_IS_CONTACTNUMBER,
+ .maxcontacts = 2 },
+ { .name = MT_CLS_DUAL_CONTACT_ID,
+ .quirks = MT_QUIRK_ALWAYS_VALID |
+ MT_QUIRK_CONTACT_CNT_ACCURATE |
+ MT_QUIRK_SLOT_IS_CONTACTID,
+ .maxcontacts = 2 },
/*
* vendor specific classes
@@ -250,6 +270,9 @@ static ssize_t mt_set_quirks(struct device *dev,
td->mtclass.quirks = val;
+ if (td->cc_index < 0)
+ td->mtclass.quirks &= ~MT_QUIRK_CONTACT_CNT_ACCURATE;
+
return count;
}
@@ -301,6 +324,7 @@ static void mt_feature_mapping(struct hid_device *hdev,
*quirks |= MT_QUIRK_ALWAYS_VALID;
*quirks |= MT_QUIRK_IGNORE_DUPLICATES;
*quirks |= MT_QUIRK_HOVERING;
+ *quirks |= MT_QUIRK_CONTACT_CNT_ACCURATE;
*quirks &= ~MT_QUIRK_NOT_SEEN_MEANS_UP;
*quirks &= ~MT_QUIRK_VALID_IS_INRANGE;
*quirks &= ~MT_QUIRK_VALID_IS_CONFIDENCE;
@@ -428,6 +452,7 @@ static int mt_input_mapping(struct hid_device *hdev, struct hid_input *hi,
mt_store_field(usage, td, hi);
td->last_field_index = field->index;
td->touches_by_report++;
+ td->mt_report_id = field->report->id;
return 1;
case HID_DG_WIDTH:
hid_map_usage(hi, usage, bit, max,
@@ -459,6 +484,8 @@ static int mt_input_mapping(struct hid_device *hdev, struct hid_input *hi,
td->last_field_index = field->index;
return 1;
case HID_DG_CONTACTCOUNT:
+ td->cc_index = field->index;
+ td->cc_value_index = usage->usage_index;
td->last_field_index = field->index;
return 1;
case HID_DG_CONTACTMAX:
@@ -523,6 +550,10 @@ static int mt_compute_slot(struct mt_device *td, struct input_dev *input)
*/
static void mt_complete_slot(struct mt_device *td, struct input_dev *input)
{
+ if ((td->mtclass.quirks & MT_QUIRK_CONTACT_CNT_ACCURATE) &&
+ td->num_received >= td->num_expected)
+ return;
+
if (td->curvalid || (td->mtclass.quirks & MT_QUIRK_ALWAYS_VALID)) {
int slotnum = mt_compute_slot(td, input);
struct mt_slot *s = &td->curdata;
@@ -578,6 +609,16 @@ static void mt_sync_frame(struct mt_device *td, struct input_dev *input)
static int mt_event(struct hid_device *hid, struct hid_field *field,
struct hid_usage *usage, __s32 value)
{
+ /* we will handle the hidinput part later, now remains hiddev */
+ if (hid->claimed & HID_CLAIMED_HIDDEV && hid->hiddev_hid_event)
+ hid->hiddev_hid_event(hid, field, usage, value);
+
+ return 1;
+}
+
+static void mt_process_mt_event(struct hid_device *hid, struct hid_field *field,
+ struct hid_usage *usage, __s32 value)
+{
struct mt_device *td = hid_get_drvdata(hid);
__s32 quirks = td->mtclass.quirks;
@@ -623,20 +664,13 @@ static int mt_event(struct hid_device *hid, struct hid_field *field,
td->curdata.h = value;
break;
case HID_DG_CONTACTCOUNT:
- /*
- * Includes multi-packet support where subsequent
- * packets are sent with zero contactcount.
- */
- if (value)
- td->num_expected = value;
break;
case HID_DG_TOUCH:
/* do nothing */
break;
default:
- /* fallback to the generic hidinput handling */
- return 0;
+ return;
}
if (usage->usage_index + 1 == field->report_count) {
@@ -650,12 +684,43 @@ static int mt_event(struct hid_device *hid, struct hid_field *field,
}
}
+}
- /* we have handled the hidinput part, now remains hiddev */
- if (hid->claimed & HID_CLAIMED_HIDDEV && hid->hiddev_hid_event)
- hid->hiddev_hid_event(hid, field, usage, value);
+static void mt_report(struct hid_device *hid, struct hid_report *report)
+{
+ struct mt_device *td = hid_get_drvdata(hid);
+ struct hid_field *field;
+ unsigned count;
+ int r, n;
- return 1;
+ if (report->id != td->mt_report_id)
+ return;
+
+ if (!(hid->claimed & HID_CLAIMED_INPUT))
+ return;
+
+ /*
+ * Includes multi-packet support where subsequent
+ * packets are sent with zero contactcount.
+ */
+ if (td->cc_index >= 0) {
+ struct hid_field *field = report->field[td->cc_index];
+ int value = field->value[td->cc_value_index];
+ if (value)
+ td->num_expected = value;
+ }
+
+ for (r = 0; r < report->maxfield; r++) {
+ field = report->field[r];
+ count = field->report_count;
+
+ if (!(HID_MAIN_ITEM_VARIABLE & field->flags))
+ continue;
+
+ for (n = 0; n < count; n++)
+ mt_process_mt_event(hid, field, &field->usage[n],
+ field->value[n]);
+ }
}
static void mt_set_input_mode(struct hid_device *hdev)
@@ -711,6 +776,7 @@ static void mt_post_parse_default_settings(struct mt_device *td)
quirks &= ~MT_QUIRK_NOT_SEEN_MEANS_UP;
quirks &= ~MT_QUIRK_VALID_IS_INRANGE;
quirks &= ~MT_QUIRK_VALID_IS_CONFIDENCE;
+ quirks &= ~MT_QUIRK_CONTACT_CNT_ACCURATE;
}
td->mtclass.quirks = quirks;
@@ -719,11 +785,15 @@ static void mt_post_parse_default_settings(struct mt_device *td)
static void mt_post_parse(struct mt_device *td)
{
struct mt_fields *f = td->fields;
+ struct mt_class *cls = &td->mtclass;
if (td->touches_by_report > 0) {
int field_count_per_touch = f->length / td->touches_by_report;
td->last_slot_field = f->usages[field_count_per_touch - 1];
}
+
+ if (td->cc_index < 0)
+ cls->quirks &= ~MT_QUIRK_CONTACT_CNT_ACCURATE;
}
static void mt_input_configured(struct hid_device *hdev, struct hid_input *hi)
@@ -781,6 +851,7 @@ static int mt_probe(struct hid_device *hdev, const struct hid_device_id *id)
td->mtclass = *mtclass;
td->inputmode = -1;
td->maxcontact_report_id = -1;
+ td->cc_index = -1;
hid_set_drvdata(hdev, td);
td->fields = kzalloc(sizeof(struct mt_fields), GFP_KERNEL);
@@ -875,7 +946,7 @@ static const struct hid_device_id mt_devices[] = {
USB_DEVICE_ID_3M3266) },
/* ActionStar panels */
- { .driver_data = MT_CLS_DEFAULT,
+ { .driver_data = MT_CLS_NSMU,
MT_USB_DEVICE(USB_VENDOR_ID_ACTIONSTAR,
USB_DEVICE_ID_ACTIONSTAR_1011) },
@@ -888,14 +959,14 @@ static const struct hid_device_id mt_devices[] = {
USB_DEVICE_ID_ATMEL_MXT_DIGITIZER) },
/* Baanto multitouch devices */
- { .driver_data = MT_CLS_DEFAULT,
+ { .driver_data = MT_CLS_NSMU,
MT_USB_DEVICE(USB_VENDOR_ID_BAANTO,
USB_DEVICE_ID_BAANTO_MT_190W2) },
/* Cando panels */
{ .driver_data = MT_CLS_DUAL_INRANGE_CONTACTNUMBER,
MT_USB_DEVICE(USB_VENDOR_ID_CANDO,
USB_DEVICE_ID_CANDO_MULTI_TOUCH) },
- { .driver_data = MT_CLS_DUAL_INRANGE_CONTACTNUMBER,
+ { .driver_data = MT_CLS_DUAL_CONTACT_NUMBER,
MT_USB_DEVICE(USB_VENDOR_ID_CANDO,
USB_DEVICE_ID_CANDO_MULTI_TOUCH_10_1) },
{ .driver_data = MT_CLS_DUAL_INRANGE_CONTACTNUMBER,
@@ -906,12 +977,12 @@ static const struct hid_device_id mt_devices[] = {
USB_DEVICE_ID_CANDO_MULTI_TOUCH_15_6) },
/* Chunghwa Telecom touch panels */
- { .driver_data = MT_CLS_DEFAULT,
+ { .driver_data = MT_CLS_NSMU,
MT_USB_DEVICE(USB_VENDOR_ID_CHUNGHWAT,
USB_DEVICE_ID_CHUNGHWAT_MULTITOUCH) },
/* CVTouch panels */
- { .driver_data = MT_CLS_DEFAULT,
+ { .driver_data = MT_CLS_NSMU,
MT_USB_DEVICE(USB_VENDOR_ID_CVTOUCH,
USB_DEVICE_ID_CVTOUCH_SCREEN) },
@@ -982,7 +1053,7 @@ static const struct hid_device_id mt_devices[] = {
USB_DEVICE_ID_DWAV_EGALAX_MULTITOUCH_72C4) },
/* Elo TouchSystems IntelliTouch Plus panel */
- { .driver_data = MT_CLS_DUAL_NSMU_CONTACTID,
+ { .driver_data = MT_CLS_DUAL_CONTACT_ID,
MT_USB_DEVICE(USB_VENDOR_ID_ELO,
USB_DEVICE_ID_ELO_TS2515) },
@@ -1000,12 +1071,12 @@ static const struct hid_device_id mt_devices[] = {
USB_DEVICE_ID_GENERAL_TOUCH_WIN8_PWT_TENFINGERS) },
/* Gametel game controller */
- { .driver_data = MT_CLS_DEFAULT,
+ { .driver_data = MT_CLS_NSMU,
MT_BT_DEVICE(USB_VENDOR_ID_FRUCTEL,
USB_DEVICE_ID_GAMETEL_MT_MODE) },
/* GoodTouch panels */
- { .driver_data = MT_CLS_DEFAULT,
+ { .driver_data = MT_CLS_NSMU,
MT_USB_DEVICE(USB_VENDOR_ID_GOODTOUCH,
USB_DEVICE_ID_GOODTOUCH_000f) },
@@ -1023,7 +1094,7 @@ static const struct hid_device_id mt_devices[] = {
USB_DEVICE_ID_IDEACOM_IDC6651) },
/* Ilitek dual touch panel */
- { .driver_data = MT_CLS_DEFAULT,
+ { .driver_data = MT_CLS_NSMU,
MT_USB_DEVICE(USB_VENDOR_ID_ILITEK,
USB_DEVICE_ID_ILITEK_MULTITOUCH) },
@@ -1056,6 +1127,11 @@ static const struct hid_device_id mt_devices[] = {
MT_USB_DEVICE(USB_VENDOR_ID_TURBOX,
USB_DEVICE_ID_TURBOX_TOUCHSCREEN_MOSART) },
+ /* Nexio panels */
+ { .driver_data = MT_CLS_DEFAULT,
+ MT_USB_DEVICE(USB_VENDOR_ID_NEXIO,
+ USB_DEVICE_ID_NEXIO_MULTITOUCH_420)},
+
/* Panasonic panels */
{ .driver_data = MT_CLS_PANASONIC,
MT_USB_DEVICE(USB_VENDOR_ID_PANASONIC,
@@ -1065,7 +1141,7 @@ static const struct hid_device_id mt_devices[] = {
USB_DEVICE_ID_PANABOARD_UBT880) },
/* Novatek Panel */
- { .driver_data = MT_CLS_DEFAULT,
+ { .driver_data = MT_CLS_NSMU,
MT_USB_DEVICE(USB_VENDOR_ID_NOVATEK,
USB_DEVICE_ID_NOVATEK_PCT) },
@@ -1111,7 +1187,7 @@ static const struct hid_device_id mt_devices[] = {
{ .driver_data = MT_CLS_CONFIDENCE,
MT_USB_DEVICE(USB_VENDOR_ID_STANTUM_STM,
USB_DEVICE_ID_MTP_STM)},
- { .driver_data = MT_CLS_CONFIDENCE,
+ { .driver_data = MT_CLS_DEFAULT,
MT_USB_DEVICE(USB_VENDOR_ID_STANTUM_SITRONIX,
USB_DEVICE_ID_MTP_SITRONIX)},
@@ -1121,48 +1197,48 @@ static const struct hid_device_id mt_devices[] = {
USB_DEVICE_ID_TOPSEED2_PERIPAD_701) },
/* Touch International panels */
- { .driver_data = MT_CLS_DEFAULT,
+ { .driver_data = MT_CLS_NSMU,
MT_USB_DEVICE(USB_VENDOR_ID_TOUCH_INTL,
USB_DEVICE_ID_TOUCH_INTL_MULTI_TOUCH) },
/* Unitec panels */
- { .driver_data = MT_CLS_DEFAULT,
+ { .driver_data = MT_CLS_NSMU,
MT_USB_DEVICE(USB_VENDOR_ID_UNITEC,
USB_DEVICE_ID_UNITEC_USB_TOUCH_0709) },
- { .driver_data = MT_CLS_DEFAULT,
+ { .driver_data = MT_CLS_NSMU,
MT_USB_DEVICE(USB_VENDOR_ID_UNITEC,
USB_DEVICE_ID_UNITEC_USB_TOUCH_0A19) },
/* XAT */
- { .driver_data = MT_CLS_DEFAULT,
+ { .driver_data = MT_CLS_NSMU,
MT_USB_DEVICE(USB_VENDOR_ID_XAT,
USB_DEVICE_ID_XAT_CSR) },
/* Xiroku */
- { .driver_data = MT_CLS_DEFAULT,
+ { .driver_data = MT_CLS_NSMU,
MT_USB_DEVICE(USB_VENDOR_ID_XIROKU,
USB_DEVICE_ID_XIROKU_SPX) },
- { .driver_data = MT_CLS_DEFAULT,
+ { .driver_data = MT_CLS_NSMU,
MT_USB_DEVICE(USB_VENDOR_ID_XIROKU,
USB_DEVICE_ID_XIROKU_MPX) },
- { .driver_data = MT_CLS_DEFAULT,
+ { .driver_data = MT_CLS_NSMU,
MT_USB_DEVICE(USB_VENDOR_ID_XIROKU,
USB_DEVICE_ID_XIROKU_CSR) },
- { .driver_data = MT_CLS_DEFAULT,
+ { .driver_data = MT_CLS_NSMU,
MT_USB_DEVICE(USB_VENDOR_ID_XIROKU,
USB_DEVICE_ID_XIROKU_SPX1) },
- { .driver_data = MT_CLS_DEFAULT,
+ { .driver_data = MT_CLS_NSMU,
MT_USB_DEVICE(USB_VENDOR_ID_XIROKU,
USB_DEVICE_ID_XIROKU_MPX1) },
- { .driver_data = MT_CLS_DEFAULT,
+ { .driver_data = MT_CLS_NSMU,
MT_USB_DEVICE(USB_VENDOR_ID_XIROKU,
USB_DEVICE_ID_XIROKU_CSR1) },
- { .driver_data = MT_CLS_DEFAULT,
+ { .driver_data = MT_CLS_NSMU,
MT_USB_DEVICE(USB_VENDOR_ID_XIROKU,
USB_DEVICE_ID_XIROKU_SPX2) },
- { .driver_data = MT_CLS_DEFAULT,
+ { .driver_data = MT_CLS_NSMU,
MT_USB_DEVICE(USB_VENDOR_ID_XIROKU,
USB_DEVICE_ID_XIROKU_MPX2) },
- { .driver_data = MT_CLS_DEFAULT,
+ { .driver_data = MT_CLS_NSMU,
MT_USB_DEVICE(USB_VENDOR_ID_XIROKU,
USB_DEVICE_ID_XIROKU_CSR2) },
@@ -1193,21 +1269,10 @@ static struct hid_driver mt_driver = {
.feature_mapping = mt_feature_mapping,
.usage_table = mt_grabbed_usages,
.event = mt_event,
+ .report = mt_report,
#ifdef CONFIG_PM
.reset_resume = mt_reset_resume,
.resume = mt_resume,
#endif
};
-
-static int __init mt_init(void)
-{
- return hid_register_driver(&mt_driver);
-}
-
-static void __exit mt_exit(void)
-{
- hid_unregister_driver(&mt_driver);
-}
-
-module_init(mt_init);
-module_exit(mt_exit);
+module_hid_driver(mt_driver);
diff --git a/drivers/hid/hid-ntrig.c b/drivers/hid/hid-ntrig.c
index 86a969f63292..7757e82416e7 100644
--- a/drivers/hid/hid-ntrig.c
+++ b/drivers/hid/hid-ntrig.c
@@ -858,12 +858,43 @@ not_claimed_input:
return 1;
}
+static void ntrig_input_configured(struct hid_device *hid,
+ struct hid_input *hidinput)
+
+{
+ struct input_dev *input = hidinput->input;
+
+ if (hidinput->report->maxfield < 1)
+ return;
+
+ switch (hidinput->report->field[0]->application) {
+ case HID_DG_PEN:
+ input->name = "N-Trig Pen";
+ break;
+ case HID_DG_TOUCHSCREEN:
+ /* These keys are redundant for fingers, clear them
+ * to prevent incorrect identification */
+ __clear_bit(BTN_TOOL_PEN, input->keybit);
+ __clear_bit(BTN_TOOL_FINGER, input->keybit);
+ __clear_bit(BTN_0, input->keybit);
+ __set_bit(BTN_TOOL_DOUBLETAP, input->keybit);
+ /*
+ * The physical touchscreen (single touch)
+ * input has a value for physical, whereas
+ * the multitouch only has logical input
+ * fields.
+ */
+ input->name = (hidinput->report->field[0]->physical) ?
+ "N-Trig Touchscreen" :
+ "N-Trig MultiTouch";
+ break;
+ }
+}
+
static int ntrig_probe(struct hid_device *hdev, const struct hid_device_id *id)
{
int ret;
struct ntrig_data *nd;
- struct hid_input *hidinput;
- struct input_dev *input;
struct hid_report *report;
if (id->driver_data)
@@ -901,38 +932,6 @@ static int ntrig_probe(struct hid_device *hdev, const struct hid_device_id *id)
goto err_free;
}
-
- list_for_each_entry(hidinput, &hdev->inputs, list) {
- if (hidinput->report->maxfield < 1)
- continue;
-
- input = hidinput->input;
- switch (hidinput->report->field[0]->application) {
- case HID_DG_PEN:
- input->name = "N-Trig Pen";
- break;
- case HID_DG_TOUCHSCREEN:
- /* These keys are redundant for fingers, clear them
- * to prevent incorrect identification */
- __clear_bit(BTN_TOOL_PEN, input->keybit);
- __clear_bit(BTN_TOOL_FINGER, input->keybit);
- __clear_bit(BTN_0, input->keybit);
- __set_bit(BTN_TOOL_DOUBLETAP, input->keybit);
- /*
- * The physical touchscreen (single touch)
- * input has a value for physical, whereas
- * the multitouch only has logical input
- * fields.
- */
- input->name =
- (hidinput->report->field[0]
- ->physical) ?
- "N-Trig Touchscreen" :
- "N-Trig MultiTouch";
- break;
- }
- }
-
/* This is needed for devices with more recent firmware versions */
report = hdev->report_enum[HID_FEATURE_REPORT].report_id_hash[0x0a];
if (report) {
@@ -1023,20 +1022,10 @@ static struct hid_driver ntrig_driver = {
.remove = ntrig_remove,
.input_mapping = ntrig_input_mapping,
.input_mapped = ntrig_input_mapped,
+ .input_configured = ntrig_input_configured,
.usage_table = ntrig_grabbed_usages,
.event = ntrig_event,
};
+module_hid_driver(ntrig_driver);
-static int __init ntrig_init(void)
-{
- return hid_register_driver(&ntrig_driver);
-}
-
-static void __exit ntrig_exit(void)
-{
- hid_unregister_driver(&ntrig_driver);
-}
-
-module_init(ntrig_init);
-module_exit(ntrig_exit);
MODULE_LICENSE("GPL");
diff --git a/drivers/hid/hid-ortek.c b/drivers/hid/hid-ortek.c
index 0ffa1d2d64f0..6620f15fec22 100644
--- a/drivers/hid/hid-ortek.c
+++ b/drivers/hid/hid-ortek.c
@@ -50,17 +50,6 @@ static struct hid_driver ortek_driver = {
.id_table = ortek_devices,
.report_fixup = ortek_report_fixup
};
+module_hid_driver(ortek_driver);
-static int __init ortek_init(void)
-{
- return hid_register_driver(&ortek_driver);
-}
-
-static void __exit ortek_exit(void)
-{
- hid_unregister_driver(&ortek_driver);
-}
-
-module_init(ortek_init);
-module_exit(ortek_exit);
MODULE_LICENSE("GPL");
diff --git a/drivers/hid/hid-petalynx.c b/drivers/hid/hid-petalynx.c
index 4c521de4e7e6..736b2502df4f 100644
--- a/drivers/hid/hid-petalynx.c
+++ b/drivers/hid/hid-petalynx.c
@@ -103,17 +103,6 @@ static struct hid_driver pl_driver = {
.input_mapping = pl_input_mapping,
.probe = pl_probe,
};
+module_hid_driver(pl_driver);
-static int __init pl_init(void)
-{
- return hid_register_driver(&pl_driver);
-}
-
-static void __exit pl_exit(void)
-{
- hid_unregister_driver(&pl_driver);
-}
-
-module_init(pl_init);
-module_exit(pl_exit);
MODULE_LICENSE("GPL");
diff --git a/drivers/hid/hid-picolcd_core.c b/drivers/hid/hid-picolcd_core.c
index 86df26e58aba..31cd93fc3d4b 100644
--- a/drivers/hid/hid-picolcd_core.c
+++ b/drivers/hid/hid-picolcd_core.c
@@ -672,18 +672,7 @@ static struct hid_driver picolcd_driver = {
.reset_resume = picolcd_reset_resume,
#endif
};
+module_hid_driver(picolcd_driver);
-static int __init picolcd_init(void)
-{
- return hid_register_driver(&picolcd_driver);
-}
-
-static void __exit picolcd_exit(void)
-{
- hid_unregister_driver(&picolcd_driver);
-}
-
-module_init(picolcd_init);
-module_exit(picolcd_exit);
MODULE_DESCRIPTION("Minibox graphics PicoLCD Driver");
MODULE_LICENSE("GPL v2");
diff --git a/drivers/hid/hid-pl.c b/drivers/hid/hid-pl.c
index 47ed74c46b6b..b0199d27787b 100644
--- a/drivers/hid/hid-pl.c
+++ b/drivers/hid/hid-pl.c
@@ -14,6 +14,8 @@
* 0e8f:0003 "GASIA USB Gamepad"
* - another version of the König gamepad
*
+ * 0f30:0111 "Saitek Color Rumble Pad"
+ *
* Copyright (c) 2007, 2009 Anssi Hannula <anssi.hannula@gmail.com>
*/
@@ -51,6 +53,7 @@
struct plff_device {
struct hid_report *report;
+ s32 maxval;
s32 *strong;
s32 *weak;
};
@@ -66,8 +69,8 @@ static int hid_plff_play(struct input_dev *dev, void *data,
right = effect->u.rumble.weak_magnitude;
debug("called with 0x%04x 0x%04x", left, right);
- left = left * 0x7f / 0xffff;
- right = right * 0x7f / 0xffff;
+ left = left * plff->maxval / 0xffff;
+ right = right * plff->maxval / 0xffff;
*plff->strong = left;
*plff->weak = right;
@@ -87,6 +90,7 @@ static int plff_init(struct hid_device *hid)
struct list_head *report_ptr = report_list;
struct input_dev *dev;
int error;
+ s32 maxval;
s32 *strong;
s32 *weak;
@@ -123,6 +127,7 @@ static int plff_init(struct hid_device *hid)
return -ENODEV;
}
+ maxval = 0x7f;
if (report->field[0]->report_count >= 4) {
report->field[0]->value[0] = 0x00;
report->field[0]->value[1] = 0x00;
@@ -135,6 +140,8 @@ static int plff_init(struct hid_device *hid)
report->field[1]->value[0] = 0x00;
strong = &report->field[2]->value[0];
weak = &report->field[3]->value[0];
+ if (hid->vendor == USB_VENDOR_ID_JESS2)
+ maxval = 0xff;
debug("detected 4-field device");
} else {
hid_err(hid, "not enough fields or values\n");
@@ -158,6 +165,7 @@ static int plff_init(struct hid_device *hid)
plff->report = report;
plff->strong = strong;
plff->weak = weak;
+ plff->maxval = maxval;
*strong = 0x00;
*weak = 0x00;
@@ -207,6 +215,7 @@ static const struct hid_device_id pl_devices[] = {
{ HID_USB_DEVICE(USB_VENDOR_ID_GAMERON, USB_DEVICE_ID_GAMERON_DUAL_PCS_ADAPTOR),
.driver_data = 1 }, /* Twin USB Joystick */
{ HID_USB_DEVICE(USB_VENDOR_ID_GREENASIA, 0x0003), },
+ { HID_USB_DEVICE(USB_VENDOR_ID_JESS2, USB_DEVICE_ID_JESS2_COLOR_RUMBLE_PAD), },
{ }
};
MODULE_DEVICE_TABLE(hid, pl_devices);
@@ -216,17 +225,6 @@ static struct hid_driver pl_driver = {
.id_table = pl_devices,
.probe = pl_probe,
};
+module_hid_driver(pl_driver);
-static int __init pl_init(void)
-{
- return hid_register_driver(&pl_driver);
-}
-
-static void __exit pl_exit(void)
-{
- hid_unregister_driver(&pl_driver);
-}
-
-module_init(pl_init);
-module_exit(pl_exit);
MODULE_LICENSE("GPL");
diff --git a/drivers/hid/hid-primax.c b/drivers/hid/hid-primax.c
index c15adb0c98a1..3a1c3c4c50dc 100644
--- a/drivers/hid/hid-primax.c
+++ b/drivers/hid/hid-primax.c
@@ -75,18 +75,7 @@ static struct hid_driver px_driver = {
.id_table = px_devices,
.raw_event = px_raw_event,
};
+module_hid_driver(px_driver);
-static int __init px_init(void)
-{
- return hid_register_driver(&px_driver);
-}
-
-static void __exit px_exit(void)
-{
- hid_unregister_driver(&px_driver);
-}
-
-module_init(px_init);
-module_exit(px_exit);
MODULE_AUTHOR("Terry Lambert <tlambert@google.com>");
MODULE_LICENSE("GPL");
diff --git a/drivers/hid/hid-prodikeys.c b/drivers/hid/hid-prodikeys.c
index ec8ca3336315..4e1c4bcbdc03 100644
--- a/drivers/hid/hid-prodikeys.c
+++ b/drivers/hid/hid-prodikeys.c
@@ -889,23 +889,6 @@ static struct hid_driver pk_driver = {
.probe = pk_probe,
.remove = pk_remove,
};
+module_hid_driver(pk_driver);
-static int pk_init(void)
-{
- int ret;
-
- ret = hid_register_driver(&pk_driver);
- if (ret)
- pr_err("can't register prodikeys driver\n");
-
- return ret;
-}
-
-static void pk_exit(void)
-{
- hid_unregister_driver(&pk_driver);
-}
-
-module_init(pk_init);
-module_exit(pk_exit);
MODULE_LICENSE("GPL");
diff --git a/drivers/hid/hid-ps3remote.c b/drivers/hid/hid-ps3remote.c
index 03811e539d71..f1239d3c5b14 100644
--- a/drivers/hid/hid-ps3remote.c
+++ b/drivers/hid/hid-ps3remote.c
@@ -198,18 +198,7 @@ static struct hid_driver ps3remote_driver = {
.report_fixup = ps3remote_fixup,
.input_mapping = ps3remote_mapping,
};
+module_hid_driver(ps3remote_driver);
-static int __init ps3remote_init(void)
-{
- return hid_register_driver(&ps3remote_driver);
-}
-
-static void __exit ps3remote_exit(void)
-{
- hid_unregister_driver(&ps3remote_driver);
-}
-
-module_init(ps3remote_init);
-module_exit(ps3remote_exit);
MODULE_LICENSE("GPL");
MODULE_AUTHOR("David Dillow <dave@thedillows.org>, Antonio Ospite <ospite@studenti.unina.it>");
diff --git a/drivers/hid/hid-roccat-lua.c b/drivers/hid/hid-roccat-lua.c
index 5084fb4b7e91..6adc0fa08d96 100644
--- a/drivers/hid/hid-roccat-lua.c
+++ b/drivers/hid/hid-roccat-lua.c
@@ -208,19 +208,7 @@ static struct hid_driver lua_driver = {
.probe = lua_probe,
.remove = lua_remove
};
-
-static int __init lua_init(void)
-{
- return hid_register_driver(&lua_driver);
-}
-
-static void __exit lua_exit(void)
-{
- hid_unregister_driver(&lua_driver);
-}
-
-module_init(lua_init);
-module_exit(lua_exit);
+module_hid_driver(lua_driver);
MODULE_AUTHOR("Stefan Achatz");
MODULE_DESCRIPTION("USB Roccat Lua driver");
diff --git a/drivers/hid/hid-roccat.c b/drivers/hid/hid-roccat.c
index b685b04dbf9d..d7437ef5c695 100644
--- a/drivers/hid/hid-roccat.c
+++ b/drivers/hid/hid-roccat.c
@@ -378,7 +378,7 @@ EXPORT_SYMBOL_GPL(roccat_disconnect);
static long roccat_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
{
- struct inode *inode = file->f_path.dentry->d_inode;
+ struct inode *inode = file_inode(file);
struct roccat_device *device;
unsigned int minor = iminor(inode);
long retval = 0;
diff --git a/drivers/hid/hid-saitek.c b/drivers/hid/hid-saitek.c
index 45aea77bb611..37961c7e397d 100644
--- a/drivers/hid/hid-saitek.c
+++ b/drivers/hid/hid-saitek.c
@@ -54,17 +54,6 @@ static struct hid_driver saitek_driver = {
.id_table = saitek_devices,
.report_fixup = saitek_report_fixup
};
+module_hid_driver(saitek_driver);
-static int __init saitek_init(void)
-{
- return hid_register_driver(&saitek_driver);
-}
-
-static void __exit saitek_exit(void)
-{
- hid_unregister_driver(&saitek_driver);
-}
-
-module_init(saitek_init);
-module_exit(saitek_exit);
MODULE_LICENSE("GPL");
diff --git a/drivers/hid/hid-samsung.c b/drivers/hid/hid-samsung.c
index a5821d317229..7cbb067d4a9e 100644
--- a/drivers/hid/hid-samsung.c
+++ b/drivers/hid/hid-samsung.c
@@ -196,17 +196,6 @@ static struct hid_driver samsung_driver = {
.input_mapping = samsung_input_mapping,
.probe = samsung_probe,
};
+module_hid_driver(samsung_driver);
-static int __init samsung_init(void)
-{
- return hid_register_driver(&samsung_driver);
-}
-
-static void __exit samsung_exit(void)
-{
- hid_unregister_driver(&samsung_driver);
-}
-
-module_init(samsung_init);
-module_exit(samsung_exit);
MODULE_LICENSE("GPL");
diff --git a/drivers/hid/hid-sensor-hub.c b/drivers/hid/hid-sensor-hub.c
index 0bc58bd8d4f5..6679788bf75a 100644
--- a/drivers/hid/hid-sensor-hub.c
+++ b/drivers/hid/hid-sensor-hub.c
@@ -605,16 +605,12 @@ static void sensor_hub_remove(struct hid_device *hdev)
}
static const struct hid_device_id sensor_hub_devices[] = {
- { HID_DEVICE(BUS_USB, HID_GROUP_SENSOR_HUB, HID_ANY_ID, HID_ANY_ID) },
+ { HID_DEVICE(HID_BUS_ANY, HID_GROUP_SENSOR_HUB, HID_ANY_ID,
+ HID_ANY_ID) },
{ }
};
MODULE_DEVICE_TABLE(hid, sensor_hub_devices);
-static const struct hid_usage_id sensor_hub_grabbed_usages[] = {
- { HID_ANY_ID, HID_ANY_ID, HID_ANY_ID },
- { HID_ANY_ID - 1, HID_ANY_ID - 1, HID_ANY_ID - 1 }
-};
-
static struct hid_driver sensor_hub_driver = {
.name = "hid-sensor-hub",
.id_table = sensor_hub_devices,
@@ -627,19 +623,7 @@ static struct hid_driver sensor_hub_driver = {
.reset_resume = sensor_hub_reset_resume,
#endif
};
-
-static int __init sensor_hub_init(void)
-{
- return hid_register_driver(&sensor_hub_driver);
-}
-
-static void __exit sensor_hub_exit(void)
-{
- hid_unregister_driver(&sensor_hub_driver);
-}
-
-module_init(sensor_hub_init);
-module_exit(sensor_hub_exit);
+module_hid_driver(sensor_hub_driver);
MODULE_DESCRIPTION("HID Sensor Hub driver");
MODULE_AUTHOR("Srinivas Pandruvada <srinivas.pandruvada@intel.com>");
diff --git a/drivers/hid/hid-sjoy.c b/drivers/hid/hid-sjoy.c
index 42257acfeb73..28f774003f03 100644
--- a/drivers/hid/hid-sjoy.c
+++ b/drivers/hid/hid-sjoy.c
@@ -177,19 +177,8 @@ static struct hid_driver sjoy_driver = {
.id_table = sjoy_devices,
.probe = sjoy_probe,
};
+module_hid_driver(sjoy_driver);
-static int __init sjoy_init(void)
-{
- return hid_register_driver(&sjoy_driver);
-}
-
-static void __exit sjoy_exit(void)
-{
- hid_unregister_driver(&sjoy_driver);
-}
-
-module_init(sjoy_init);
-module_exit(sjoy_exit);
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Jussi Kivilinna");
diff --git a/drivers/hid/hid-sony.c b/drivers/hid/hid-sony.c
index 7f33ebf299c2..312098e4af4f 100644
--- a/drivers/hid/hid-sony.c
+++ b/drivers/hid/hid-sony.c
@@ -33,6 +33,28 @@ static const u8 sixaxis_rdesc_fixup[] = {
0x03, 0x46, 0xFF, 0x03, 0x09, 0x01, 0x81, 0x02
};
+static const u8 sixaxis_rdesc_fixup2[] = {
+ 0x05, 0x01, 0x09, 0x04, 0xa1, 0x01, 0xa1, 0x02,
+ 0x85, 0x01, 0x75, 0x08, 0x95, 0x01, 0x15, 0x00,
+ 0x26, 0xff, 0x00, 0x81, 0x03, 0x75, 0x01, 0x95,
+ 0x13, 0x15, 0x00, 0x25, 0x01, 0x35, 0x00, 0x45,
+ 0x01, 0x05, 0x09, 0x19, 0x01, 0x29, 0x13, 0x81,
+ 0x02, 0x75, 0x01, 0x95, 0x0d, 0x06, 0x00, 0xff,
+ 0x81, 0x03, 0x15, 0x00, 0x26, 0xff, 0x00, 0x05,
+ 0x01, 0x09, 0x01, 0xa1, 0x00, 0x75, 0x08, 0x95,
+ 0x04, 0x35, 0x00, 0x46, 0xff, 0x00, 0x09, 0x30,
+ 0x09, 0x31, 0x09, 0x32, 0x09, 0x35, 0x81, 0x02,
+ 0xc0, 0x05, 0x01, 0x95, 0x13, 0x09, 0x01, 0x81,
+ 0x02, 0x95, 0x0c, 0x81, 0x01, 0x75, 0x10, 0x95,
+ 0x04, 0x26, 0xff, 0x03, 0x46, 0xff, 0x03, 0x09,
+ 0x01, 0x81, 0x02, 0xc0, 0xa1, 0x02, 0x85, 0x02,
+ 0x75, 0x08, 0x95, 0x30, 0x09, 0x01, 0xb1, 0x02,
+ 0xc0, 0xa1, 0x02, 0x85, 0xee, 0x75, 0x08, 0x95,
+ 0x30, 0x09, 0x01, 0xb1, 0x02, 0xc0, 0xa1, 0x02,
+ 0x85, 0xef, 0x75, 0x08, 0x95, 0x30, 0x09, 0x01,
+ 0xb1, 0x02, 0xc0, 0xc0,
+};
+
struct sony_sc {
unsigned long quirks;
};
@@ -43,9 +65,19 @@ static __u8 *sony_report_fixup(struct hid_device *hdev, __u8 *rdesc,
{
struct sony_sc *sc = hid_get_drvdata(hdev);
- if ((sc->quirks & VAIO_RDESC_CONSTANT) &&
- *rsize >= 56 && rdesc[54] == 0x81 && rdesc[55] == 0x07) {
- hid_info(hdev, "Fixing up Sony Vaio VGX report descriptor\n");
+ /*
+ * Some Sony RF receivers wrongly declare the mouse pointer as a
+ * a constant non-data variable.
+ */
+ if ((sc->quirks & VAIO_RDESC_CONSTANT) && *rsize >= 56 &&
+ /* usage page: generic desktop controls */
+ /* rdesc[0] == 0x05 && rdesc[1] == 0x01 && */
+ /* usage: mouse */
+ rdesc[2] == 0x09 && rdesc[3] == 0x02 &&
+ /* input (usage page for x,y axes): constant, variable, relative */
+ rdesc[54] == 0x81 && rdesc[55] == 0x07) {
+ hid_info(hdev, "Fixing up Sony RF Receiver report descriptor\n");
+ /* input: data, variable, relative */
rdesc[55] = 0x06;
}
@@ -56,6 +88,12 @@ static __u8 *sony_report_fixup(struct hid_device *hdev, __u8 *rdesc,
hid_info(hdev, "Fixing up Sony Sixaxis report descriptor\n");
memcpy((void *)&rdesc[83], (void *)&sixaxis_rdesc_fixup,
sizeof(sixaxis_rdesc_fixup));
+ } else if (sc->quirks & SIXAXIS_CONTROLLER_USB &&
+ *rsize > sizeof(sixaxis_rdesc_fixup2)) {
+ hid_info(hdev, "Sony Sixaxis clone detected. Using original report descriptor (size: %d clone; %d new)\n",
+ *rsize, (int)sizeof(sixaxis_rdesc_fixup2));
+ *rsize = sizeof(sixaxis_rdesc_fixup2);
+ memcpy(rdesc, &sixaxis_rdesc_fixup2, *rsize);
}
return rdesc;
}
@@ -217,6 +255,8 @@ static const struct hid_device_id sony_devices[] = {
.driver_data = SIXAXIS_CONTROLLER_BT },
{ HID_USB_DEVICE(USB_VENDOR_ID_SONY, USB_DEVICE_ID_SONY_VAIO_VGX_MOUSE),
.driver_data = VAIO_RDESC_CONSTANT },
+ { HID_USB_DEVICE(USB_VENDOR_ID_SONY, USB_DEVICE_ID_SONY_VAIO_VGP_MOUSE),
+ .driver_data = VAIO_RDESC_CONSTANT },
{ }
};
MODULE_DEVICE_TABLE(hid, sony_devices);
@@ -229,17 +269,6 @@ static struct hid_driver sony_driver = {
.report_fixup = sony_report_fixup,
.raw_event = sony_raw_event
};
+module_hid_driver(sony_driver);
-static int __init sony_init(void)
-{
- return hid_register_driver(&sony_driver);
-}
-
-static void __exit sony_exit(void)
-{
- hid_unregister_driver(&sony_driver);
-}
-
-module_init(sony_init);
-module_exit(sony_exit);
MODULE_LICENSE("GPL");
diff --git a/drivers/hid/hid-speedlink.c b/drivers/hid/hid-speedlink.c
index 602013741718..e94371a059cb 100644
--- a/drivers/hid/hid-speedlink.c
+++ b/drivers/hid/hid-speedlink.c
@@ -73,17 +73,6 @@ static struct hid_driver speedlink_driver = {
.input_mapping = speedlink_input_mapping,
.event = speedlink_event,
};
+module_hid_driver(speedlink_driver);
-static int __init speedlink_init(void)
-{
- return hid_register_driver(&speedlink_driver);
-}
-
-static void __exit speedlink_exit(void)
-{
- hid_unregister_driver(&speedlink_driver);
-}
-
-module_init(speedlink_init);
-module_exit(speedlink_exit);
MODULE_LICENSE("GPL");
diff --git a/drivers/hid/hid-steelseries.c b/drivers/hid/hid-steelseries.c
new file mode 100644
index 000000000000..2ed995cda44a
--- /dev/null
+++ b/drivers/hid/hid-steelseries.c
@@ -0,0 +1,393 @@
+/*
+ * HID driver for Steelseries SRW-S1
+ *
+ * Copyright (c) 2013 Simon Wood
+ */
+
+/*
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the Free
+ * Software Foundation; either version 2 of the License, or (at your option)
+ * any later version.
+ */
+
+#include <linux/device.h>
+#include <linux/usb.h>
+#include <linux/hid.h>
+#include <linux/module.h>
+
+#include "usbhid/usbhid.h"
+#include "hid-ids.h"
+
+#if defined(CONFIG_LEDS_CLASS) || defined(CONFIG_LEDS_CLASS_MODULE)
+#define SRWS1_NUMBER_LEDS 15
+struct steelseries_srws1_data {
+ __u16 led_state;
+ /* the last element is used for setting all leds simultaneously */
+ struct led_classdev *led[SRWS1_NUMBER_LEDS + 1];
+};
+#endif
+
+/* Fixed report descriptor for Steelseries SRW-S1 wheel controller
+ *
+ * The original descriptor hides the sensitivity and assists dials
+ * a custom vendor usage page. This inserts a patch to make them
+ * appear in the 'Generic Desktop' usage.
+ */
+
+static __u8 steelseries_srws1_rdesc_fixed[] = {
+0x05, 0x01, /* Usage Page (Desktop) */
+0x09, 0x08, /* Usage (MultiAxis), Changed */
+0xA1, 0x01, /* Collection (Application), */
+0xA1, 0x02, /* Collection (Logical), */
+0x95, 0x01, /* Report Count (1), */
+0x05, 0x01, /* Changed Usage Page (Desktop), */
+0x09, 0x30, /* Changed Usage (X), */
+0x16, 0xF8, 0xF8, /* Logical Minimum (-1800), */
+0x26, 0x08, 0x07, /* Logical Maximum (1800), */
+0x65, 0x14, /* Unit (Degrees), */
+0x55, 0x0F, /* Unit Exponent (15), */
+0x75, 0x10, /* Report Size (16), */
+0x81, 0x02, /* Input (Variable), */
+0x09, 0x31, /* Changed Usage (Y), */
+0x15, 0x00, /* Logical Minimum (0), */
+0x26, 0xFF, 0x03, /* Logical Maximum (1023), */
+0x75, 0x0C, /* Report Size (12), */
+0x81, 0x02, /* Input (Variable), */
+0x09, 0x32, /* Changed Usage (Z), */
+0x15, 0x00, /* Logical Minimum (0), */
+0x26, 0xFF, 0x03, /* Logical Maximum (1023), */
+0x75, 0x0C, /* Report Size (12), */
+0x81, 0x02, /* Input (Variable), */
+0x05, 0x01, /* Usage Page (Desktop), */
+0x09, 0x39, /* Usage (Hat Switch), */
+0x25, 0x07, /* Logical Maximum (7), */
+0x35, 0x00, /* Physical Minimum (0), */
+0x46, 0x3B, 0x01, /* Physical Maximum (315), */
+0x65, 0x14, /* Unit (Degrees), */
+0x75, 0x04, /* Report Size (4), */
+0x95, 0x01, /* Report Count (1), */
+0x81, 0x02, /* Input (Variable), */
+0x25, 0x01, /* Logical Maximum (1), */
+0x45, 0x01, /* Physical Maximum (1), */
+0x65, 0x00, /* Unit, */
+0x75, 0x01, /* Report Size (1), */
+0x95, 0x03, /* Report Count (3), */
+0x81, 0x01, /* Input (Constant), */
+0x05, 0x09, /* Usage Page (Button), */
+0x19, 0x01, /* Usage Minimum (01h), */
+0x29, 0x11, /* Usage Maximum (11h), */
+0x95, 0x11, /* Report Count (17), */
+0x81, 0x02, /* Input (Variable), */
+ /* ---- Dial patch starts here ---- */
+0x05, 0x01, /* Usage Page (Desktop), */
+0x09, 0x33, /* Usage (RX), */
+0x75, 0x04, /* Report Size (4), */
+0x95, 0x02, /* Report Count (2), */
+0x15, 0x00, /* Logical Minimum (0), */
+0x25, 0x0b, /* Logical Maximum (b), */
+0x81, 0x02, /* Input (Variable), */
+0x09, 0x35, /* Usage (RZ), */
+0x75, 0x04, /* Report Size (4), */
+0x95, 0x01, /* Report Count (1), */
+0x25, 0x03, /* Logical Maximum (3), */
+0x81, 0x02, /* Input (Variable), */
+ /* ---- Dial patch ends here ---- */
+0x06, 0x00, 0xFF, /* Usage Page (FF00h), */
+0x09, 0x01, /* Usage (01h), */
+0x75, 0x04, /* Changed Report Size (4), */
+0x95, 0x0D, /* Changed Report Count (13), */
+0x81, 0x02, /* Input (Variable), */
+0xC0, /* End Collection, */
+0xA1, 0x02, /* Collection (Logical), */
+0x09, 0x02, /* Usage (02h), */
+0x75, 0x08, /* Report Size (8), */
+0x95, 0x10, /* Report Count (16), */
+0x91, 0x02, /* Output (Variable), */
+0xC0, /* End Collection, */
+0xC0 /* End Collection */
+};
+
+#if defined(CONFIG_LEDS_CLASS) || defined(CONFIG_LEDS_CLASS_MODULE)
+static void steelseries_srws1_set_leds(struct hid_device *hdev, __u16 leds)
+{
+ struct list_head *report_list = &hdev->report_enum[HID_OUTPUT_REPORT].report_list;
+ struct hid_report *report = list_entry(report_list->next, struct hid_report, list);
+ __s32 *value = report->field[0]->value;
+
+ value[0] = 0x40;
+ value[1] = leds & 0xFF;
+ value[2] = leds >> 8;
+ value[3] = 0x00;
+ value[4] = 0x00;
+ value[5] = 0x00;
+ value[6] = 0x00;
+ value[7] = 0x00;
+ value[8] = 0x00;
+ value[9] = 0x00;
+ value[10] = 0x00;
+ value[11] = 0x00;
+ value[12] = 0x00;
+ value[13] = 0x00;
+ value[14] = 0x00;
+ value[15] = 0x00;
+
+ usbhid_submit_report(hdev, report, USB_DIR_OUT);
+
+ /* Note: LED change does not show on device until the device is read/polled */
+}
+
+static void steelseries_srws1_led_all_set_brightness(struct led_classdev *led_cdev,
+ enum led_brightness value)
+{
+ struct device *dev = led_cdev->dev->parent;
+ struct hid_device *hid = container_of(dev, struct hid_device, dev);
+ struct steelseries_srws1_data *drv_data = hid_get_drvdata(hid);
+
+ if (!drv_data) {
+ hid_err(hid, "Device data not found.");
+ return;
+ }
+
+ if (value == LED_OFF)
+ drv_data->led_state = 0;
+ else
+ drv_data->led_state = (1 << (SRWS1_NUMBER_LEDS + 1)) - 1;
+
+ steelseries_srws1_set_leds(hid, drv_data->led_state);
+}
+
+static enum led_brightness steelseries_srws1_led_all_get_brightness(struct led_classdev *led_cdev)
+{
+ struct device *dev = led_cdev->dev->parent;
+ struct hid_device *hid = container_of(dev, struct hid_device, dev);
+ struct steelseries_srws1_data *drv_data;
+
+ drv_data = hid_get_drvdata(hid);
+
+ if (!drv_data) {
+ hid_err(hid, "Device data not found.");
+ return LED_OFF;
+ }
+
+ return (drv_data->led_state >> SRWS1_NUMBER_LEDS) ? LED_FULL : LED_OFF;
+}
+
+static void steelseries_srws1_led_set_brightness(struct led_classdev *led_cdev,
+ enum led_brightness value)
+{
+ struct device *dev = led_cdev->dev->parent;
+ struct hid_device *hid = container_of(dev, struct hid_device, dev);
+ struct steelseries_srws1_data *drv_data = hid_get_drvdata(hid);
+ int i, state = 0;
+
+ if (!drv_data) {
+ hid_err(hid, "Device data not found.");
+ return;
+ }
+
+ for (i = 0; i < SRWS1_NUMBER_LEDS; i++) {
+ if (led_cdev != drv_data->led[i])
+ continue;
+
+ state = (drv_data->led_state >> i) & 1;
+ if (value == LED_OFF && state) {
+ drv_data->led_state &= ~(1 << i);
+ steelseries_srws1_set_leds(hid, drv_data->led_state);
+ } else if (value != LED_OFF && !state) {
+ drv_data->led_state |= 1 << i;
+ steelseries_srws1_set_leds(hid, drv_data->led_state);
+ }
+ break;
+ }
+}
+
+static enum led_brightness steelseries_srws1_led_get_brightness(struct led_classdev *led_cdev)
+{
+ struct device *dev = led_cdev->dev->parent;
+ struct hid_device *hid = container_of(dev, struct hid_device, dev);
+ struct steelseries_srws1_data *drv_data;
+ int i, value = 0;
+
+ drv_data = hid_get_drvdata(hid);
+
+ if (!drv_data) {
+ hid_err(hid, "Device data not found.");
+ return LED_OFF;
+ }
+
+ for (i = 0; i < SRWS1_NUMBER_LEDS; i++)
+ if (led_cdev == drv_data->led[i]) {
+ value = (drv_data->led_state >> i) & 1;
+ break;
+ }
+
+ return value ? LED_FULL : LED_OFF;
+}
+
+static int steelseries_srws1_probe(struct hid_device *hdev,
+ const struct hid_device_id *id)
+{
+ int ret, i;
+ struct led_classdev *led;
+ size_t name_sz;
+ char *name;
+
+ struct steelseries_srws1_data *drv_data = kzalloc(sizeof(*drv_data), GFP_KERNEL);
+
+ if (drv_data == NULL) {
+ hid_err(hdev, "can't alloc SRW-S1 memory\n");
+ return -ENOMEM;
+ }
+
+ hid_set_drvdata(hdev, drv_data);
+
+ ret = hid_parse(hdev);
+ if (ret) {
+ hid_err(hdev, "parse failed\n");
+ goto err_free;
+ }
+
+ ret = hid_hw_start(hdev, HID_CONNECT_DEFAULT);
+ if (ret) {
+ hid_err(hdev, "hw start failed\n");
+ goto err_free;
+ }
+
+ /* register led subsystem */
+ drv_data->led_state = 0;
+ for (i = 0; i < SRWS1_NUMBER_LEDS + 1; i++)
+ drv_data->led[i] = NULL;
+
+ steelseries_srws1_set_leds(hdev, 0);
+
+ name_sz = strlen(hdev->uniq) + 16;
+
+ /* 'ALL', for setting all LEDs simultaneously */
+ led = kzalloc(sizeof(struct led_classdev)+name_sz, GFP_KERNEL);
+ if (!led) {
+ hid_err(hdev, "can't allocate memory for LED ALL\n");
+ goto err_led;
+ }
+
+ name = (void *)(&led[1]);
+ snprintf(name, name_sz, "SRWS1::%s::RPMALL", hdev->uniq);
+ led->name = name;
+ led->brightness = 0;
+ led->max_brightness = 1;
+ led->brightness_get = steelseries_srws1_led_all_get_brightness;
+ led->brightness_set = steelseries_srws1_led_all_set_brightness;
+
+ drv_data->led[SRWS1_NUMBER_LEDS] = led;
+ ret = led_classdev_register(&hdev->dev, led);
+ if (ret)
+ goto err_led;
+
+ /* Each individual LED */
+ for (i = 0; i < SRWS1_NUMBER_LEDS; i++) {
+ led = kzalloc(sizeof(struct led_classdev)+name_sz, GFP_KERNEL);
+ if (!led) {
+ hid_err(hdev, "can't allocate memory for LED %d\n", i);
+ goto err_led;
+ }
+
+ name = (void *)(&led[1]);
+ snprintf(name, name_sz, "SRWS1::%s::RPM%d", hdev->uniq, i+1);
+ led->name = name;
+ led->brightness = 0;
+ led->max_brightness = 1;
+ led->brightness_get = steelseries_srws1_led_get_brightness;
+ led->brightness_set = steelseries_srws1_led_set_brightness;
+
+ drv_data->led[i] = led;
+ ret = led_classdev_register(&hdev->dev, led);
+
+ if (ret) {
+ hid_err(hdev, "failed to register LED %d. Aborting.\n", i);
+err_led:
+ /* Deregister all LEDs (if any) */
+ for (i = 0; i < SRWS1_NUMBER_LEDS + 1; i++) {
+ led = drv_data->led[i];
+ drv_data->led[i] = NULL;
+ if (!led)
+ continue;
+ led_classdev_unregister(led);
+ kfree(led);
+ }
+ goto out; /* but let the driver continue without LEDs */
+ }
+ }
+out:
+ return 0;
+err_free:
+ kfree(drv_data);
+ return ret;
+}
+
+static void steelseries_srws1_remove(struct hid_device *hdev)
+{
+ int i;
+ struct led_classdev *led;
+
+ struct steelseries_srws1_data *drv_data = hid_get_drvdata(hdev);
+
+ if (drv_data) {
+ /* Deregister LEDs (if any) */
+ for (i = 0; i < SRWS1_NUMBER_LEDS + 1; i++) {
+ led = drv_data->led[i];
+ drv_data->led[i] = NULL;
+ if (!led)
+ continue;
+ led_classdev_unregister(led);
+ kfree(led);
+ }
+
+ }
+
+ hid_hw_stop(hdev);
+ kfree(drv_data);
+ return;
+}
+#endif
+
+static __u8 *steelseries_srws1_report_fixup(struct hid_device *hdev, __u8 *rdesc,
+ unsigned int *rsize)
+{
+ if (*rsize >= 115 && rdesc[11] == 0x02 && rdesc[13] == 0xc8
+ && rdesc[29] == 0xbb && rdesc[40] == 0xc5) {
+ hid_info(hdev, "Fixing up Steelseries SRW-S1 report descriptor\n");
+ rdesc = steelseries_srws1_rdesc_fixed;
+ *rsize = sizeof(steelseries_srws1_rdesc_fixed);
+ }
+ return rdesc;
+}
+
+static const struct hid_device_id steelseries_srws1_devices[] = {
+ { HID_USB_DEVICE(USB_VENDOR_ID_STEELSERIES, USB_DEVICE_ID_STEELSERIES_SRWS1) },
+ { }
+};
+MODULE_DEVICE_TABLE(hid, steelseries_srws1_devices);
+
+static struct hid_driver steelseries_srws1_driver = {
+ .name = "steelseries_srws1",
+ .id_table = steelseries_srws1_devices,
+#if defined(CONFIG_LEDS_CLASS) || defined(CONFIG_LEDS_CLASS_MODULE)
+ .probe = steelseries_srws1_probe,
+ .remove = steelseries_srws1_remove,
+#endif
+ .report_fixup = steelseries_srws1_report_fixup
+};
+
+static int __init steelseries_srws1_init(void)
+{
+ return hid_register_driver(&steelseries_srws1_driver);
+}
+
+static void __exit steelseries_srws1_exit(void)
+{
+ hid_unregister_driver(&steelseries_srws1_driver);
+}
+
+module_init(steelseries_srws1_init);
+module_exit(steelseries_srws1_exit);
+MODULE_LICENSE("GPL");
diff --git a/drivers/hid/hid-sunplus.c b/drivers/hid/hid-sunplus.c
index 45b4b066a262..87fc91e1c8de 100644
--- a/drivers/hid/hid-sunplus.c
+++ b/drivers/hid/hid-sunplus.c
@@ -63,17 +63,6 @@ static struct hid_driver sp_driver = {
.report_fixup = sp_report_fixup,
.input_mapping = sp_input_mapping,
};
+module_hid_driver(sp_driver);
-static int __init sp_init(void)
-{
- return hid_register_driver(&sp_driver);
-}
-
-static void __exit sp_exit(void)
-{
- hid_unregister_driver(&sp_driver);
-}
-
-module_init(sp_init);
-module_exit(sp_exit);
MODULE_LICENSE("GPL");
diff --git a/drivers/hid/hid-thingm.c b/drivers/hid/hid-thingm.c
new file mode 100644
index 000000000000..2055a52e9a20
--- /dev/null
+++ b/drivers/hid/hid-thingm.c
@@ -0,0 +1,272 @@
+/*
+ * ThingM blink(1) USB RGB LED driver
+ *
+ * Copyright 2013 Savoir-faire Linux Inc.
+ * Vivien Didelot <vivien.didelot@savoirfairelinux.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation, version 2.
+ */
+
+#include <linux/hid.h>
+#include <linux/leds.h>
+#include <linux/module.h>
+#include <linux/usb.h>
+
+#include "hid-ids.h"
+
+#define BLINK1_CMD_SIZE 9
+
+#define blink1_rgb_to_r(rgb) ((rgb & 0xFF0000) >> 16)
+#define blink1_rgb_to_g(rgb) ((rgb & 0x00FF00) >> 8)
+#define blink1_rgb_to_b(rgb) ((rgb & 0x0000FF) >> 0)
+
+/**
+ * struct blink1_data - blink(1) device specific data
+ * @hdev: HID device.
+ * @led_cdev: LED class instance.
+ * @rgb: 8-bit per channel RGB notation.
+ * @fade: fade time in hundredths of a second.
+ * @brightness: brightness coefficient.
+ * @play: play/pause in-memory patterns.
+ */
+struct blink1_data {
+ struct hid_device *hdev;
+ struct led_classdev led_cdev;
+ u32 rgb;
+ u16 fade;
+ u8 brightness;
+ bool play;
+};
+
+static int blink1_send_command(struct blink1_data *data,
+ u8 buf[BLINK1_CMD_SIZE])
+{
+ int ret;
+
+ hid_dbg(data->hdev, "command: %d%c%.2x%.2x%.2x%.2x%.2x%.2x%.2x\n",
+ buf[0], buf[1], buf[2], buf[3], buf[4],
+ buf[5], buf[6], buf[7], buf[8]);
+
+ ret = data->hdev->hid_output_raw_report(data->hdev, buf,
+ BLINK1_CMD_SIZE, HID_FEATURE_REPORT);
+
+ return ret < 0 ? ret : 0;
+}
+
+static int blink1_update_color(struct blink1_data *data)
+{
+ u8 buf[BLINK1_CMD_SIZE] = { 1, 'n', 0, 0, 0, 0, 0, 0, 0 };
+
+ if (data->brightness) {
+ unsigned int coef = DIV_ROUND_CLOSEST(255, data->brightness);
+
+ buf[2] = DIV_ROUND_CLOSEST(blink1_rgb_to_r(data->rgb), coef);
+ buf[3] = DIV_ROUND_CLOSEST(blink1_rgb_to_g(data->rgb), coef);
+ buf[4] = DIV_ROUND_CLOSEST(blink1_rgb_to_b(data->rgb), coef);
+ }
+
+ if (data->fade) {
+ buf[1] = 'c';
+ buf[5] = (data->fade & 0xFF00) >> 8;
+ buf[6] = (data->fade & 0x00FF);
+ }
+
+ return blink1_send_command(data, buf);
+}
+
+static void blink1_led_set(struct led_classdev *led_cdev,
+ enum led_brightness brightness)
+{
+ struct blink1_data *data = dev_get_drvdata(led_cdev->dev->parent);
+
+ data->brightness = brightness;
+ if (blink1_update_color(data))
+ hid_err(data->hdev, "failed to update color\n");
+}
+
+static enum led_brightness blink1_led_get(struct led_classdev *led_cdev)
+{
+ struct blink1_data *data = dev_get_drvdata(led_cdev->dev->parent);
+
+ return data->brightness;
+}
+
+static ssize_t blink1_show_rgb(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct blink1_data *data = dev_get_drvdata(dev->parent);
+
+ return sprintf(buf, "%.6X\n", data->rgb);
+}
+
+static ssize_t blink1_store_rgb(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t count)
+{
+ struct blink1_data *data = dev_get_drvdata(dev->parent);
+ long unsigned int rgb;
+ int ret;
+
+ ret = kstrtoul(buf, 16, &rgb);
+ if (ret)
+ return ret;
+
+ /* RGB triplet notation is 24-bit hexadecimal */
+ if (rgb > 0xFFFFFF)
+ return -EINVAL;
+
+ data->rgb = rgb;
+ ret = blink1_update_color(data);
+
+ return ret ? ret : count;
+}
+
+static DEVICE_ATTR(rgb, S_IRUGO | S_IWUSR, blink1_show_rgb, blink1_store_rgb);
+
+static ssize_t blink1_show_fade(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct blink1_data *data = dev_get_drvdata(dev->parent);
+
+ return sprintf(buf, "%d\n", data->fade * 10);
+}
+
+static ssize_t blink1_store_fade(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t count)
+{
+ struct blink1_data *data = dev_get_drvdata(dev->parent);
+ long unsigned int fade;
+ int ret;
+
+ ret = kstrtoul(buf, 10, &fade);
+ if (ret)
+ return ret;
+
+ /* blink(1) accepts 16-bit fade time, number of 10ms ticks */
+ fade = DIV_ROUND_CLOSEST(fade, 10);
+ if (fade > 65535)
+ return -EINVAL;
+
+ data->fade = fade;
+
+ return count;
+}
+
+static DEVICE_ATTR(fade, S_IRUGO | S_IWUSR,
+ blink1_show_fade, blink1_store_fade);
+
+static ssize_t blink1_show_play(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct blink1_data *data = dev_get_drvdata(dev->parent);
+
+ return sprintf(buf, "%d\n", data->play);
+}
+
+static ssize_t blink1_store_play(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t count)
+{
+ struct blink1_data *data = dev_get_drvdata(dev->parent);
+ u8 cmd[BLINK1_CMD_SIZE] = { 1, 'p', 0, 0, 0, 0, 0, 0, 0 };
+ long unsigned int play;
+ int ret;
+
+ ret = kstrtoul(buf, 10, &play);
+ if (ret)
+ return ret;
+
+ data->play = !!play;
+ cmd[2] = data->play;
+ ret = blink1_send_command(data, cmd);
+
+ return ret ? ret : count;
+}
+
+static DEVICE_ATTR(play, S_IRUGO | S_IWUSR,
+ blink1_show_play, blink1_store_play);
+
+static const struct attribute_group blink1_sysfs_group = {
+ .attrs = (struct attribute *[]) {
+ &dev_attr_rgb.attr,
+ &dev_attr_fade.attr,
+ &dev_attr_play.attr,
+ NULL
+ },
+};
+
+static int thingm_probe(struct hid_device *hdev, const struct hid_device_id *id)
+{
+ struct blink1_data *data;
+ struct led_classdev *led;
+ char led_name[13];
+ int ret;
+
+ data = devm_kzalloc(&hdev->dev, sizeof(struct blink1_data), GFP_KERNEL);
+ if (!data)
+ return -ENOMEM;
+
+ hid_set_drvdata(hdev, data);
+ data->hdev = hdev;
+ data->rgb = 0xFFFFFF; /* set a default white color */
+
+ ret = hid_parse(hdev);
+ if (ret)
+ goto error;
+
+ ret = hid_hw_start(hdev, HID_CONNECT_HIDRAW);
+ if (ret)
+ goto error;
+
+ /* blink(1) serial numbers range is 0x1A001000 to 0x1A002FFF */
+ led = &data->led_cdev;
+ snprintf(led_name, sizeof(led_name), "blink1::%s", hdev->uniq + 4);
+ led->name = led_name;
+ led->brightness_set = blink1_led_set;
+ led->brightness_get = blink1_led_get;
+ ret = led_classdev_register(&hdev->dev, led);
+ if (ret)
+ goto stop;
+
+ ret = sysfs_create_group(&led->dev->kobj, &blink1_sysfs_group);
+ if (ret)
+ goto remove_led;
+
+ return 0;
+
+remove_led:
+ led_classdev_unregister(led);
+stop:
+ hid_hw_stop(hdev);
+error:
+ return ret;
+}
+
+static void thingm_remove(struct hid_device *hdev)
+{
+ struct blink1_data *data = hid_get_drvdata(hdev);
+ struct led_classdev *led = &data->led_cdev;
+
+ sysfs_remove_group(&led->dev->kobj, &blink1_sysfs_group);
+ led_classdev_unregister(led);
+ hid_hw_stop(hdev);
+}
+
+static const struct hid_device_id thingm_table[] = {
+ { HID_USB_DEVICE(USB_VENDOR_ID_THINGM, USB_DEVICE_ID_BLINK1) },
+ { }
+};
+MODULE_DEVICE_TABLE(hid, thingm_table);
+
+static struct hid_driver thingm_driver = {
+ .name = "thingm",
+ .probe = thingm_probe,
+ .remove = thingm_remove,
+ .id_table = thingm_table,
+};
+
+module_hid_driver(thingm_driver);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Vivien Didelot <vivien.didelot@savoirfairelinux.com>");
+MODULE_DESCRIPTION("ThingM blink(1) USB RGB LED driver");
diff --git a/drivers/hid/hid-tivo.c b/drivers/hid/hid-tivo.c
index 9f85f827607f..d790d8d71f7f 100644
--- a/drivers/hid/hid-tivo.c
+++ b/drivers/hid/hid-tivo.c
@@ -73,18 +73,7 @@ static struct hid_driver tivo_driver = {
.id_table = tivo_devices,
.input_mapping = tivo_input_mapping,
};
+module_hid_driver(tivo_driver);
-static int __init tivo_init(void)
-{
- return hid_register_driver(&tivo_driver);
-}
-
-static void __exit tivo_exit(void)
-{
- hid_unregister_driver(&tivo_driver);
-}
-
-module_init(tivo_init);
-module_exit(tivo_exit);
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Jarod Wilson <jarod@redhat.com>");
diff --git a/drivers/hid/hid-tmff.c b/drivers/hid/hid-tmff.c
index 83a933b9c2e9..e4fcf3f702a5 100644
--- a/drivers/hid/hid-tmff.c
+++ b/drivers/hid/hid-tmff.c
@@ -261,17 +261,6 @@ static struct hid_driver tm_driver = {
.id_table = tm_devices,
.probe = tm_probe,
};
+module_hid_driver(tm_driver);
-static int __init tm_init(void)
-{
- return hid_register_driver(&tm_driver);
-}
-
-static void __exit tm_exit(void)
-{
- hid_unregister_driver(&tm_driver);
-}
-
-module_init(tm_init);
-module_exit(tm_exit);
MODULE_LICENSE("GPL");
diff --git a/drivers/hid/hid-topseed.c b/drivers/hid/hid-topseed.c
index 613ff7b1d746..8a5b843e9dd6 100644
--- a/drivers/hid/hid-topseed.c
+++ b/drivers/hid/hid-topseed.c
@@ -76,17 +76,6 @@ static struct hid_driver ts_driver = {
.id_table = ts_devices,
.input_mapping = ts_input_mapping,
};
+module_hid_driver(ts_driver);
-static int __init ts_init(void)
-{
- return hid_register_driver(&ts_driver);
-}
-
-static void __exit ts_exit(void)
-{
- hid_unregister_driver(&ts_driver);
-}
-
-module_init(ts_init);
-module_exit(ts_exit);
MODULE_LICENSE("GPL");
diff --git a/drivers/hid/hid-twinhan.c b/drivers/hid/hid-twinhan.c
index f23456b1fd4b..c08c36443f83 100644
--- a/drivers/hid/hid-twinhan.c
+++ b/drivers/hid/hid-twinhan.c
@@ -131,17 +131,6 @@ static struct hid_driver twinhan_driver = {
.id_table = twinhan_devices,
.input_mapping = twinhan_input_mapping,
};
+module_hid_driver(twinhan_driver);
-static int __init twinhan_init(void)
-{
- return hid_register_driver(&twinhan_driver);
-}
-
-static void __exit twinhan_exit(void)
-{
- hid_unregister_driver(&twinhan_driver);
-}
-
-module_init(twinhan_init);
-module_exit(twinhan_exit);
MODULE_LICENSE("GPL");
diff --git a/drivers/hid/hid-uclogic.c b/drivers/hid/hid-uclogic.c
index 2e56a1fd2375..fb8b516ff0ed 100644
--- a/drivers/hid/hid-uclogic.c
+++ b/drivers/hid/hid-uclogic.c
@@ -650,17 +650,6 @@ static struct hid_driver uclogic_driver = {
.id_table = uclogic_devices,
.report_fixup = uclogic_report_fixup,
};
+module_hid_driver(uclogic_driver);
-static int __init uclogic_init(void)
-{
- return hid_register_driver(&uclogic_driver);
-}
-
-static void __exit uclogic_exit(void)
-{
- hid_unregister_driver(&uclogic_driver);
-}
-
-module_init(uclogic_init);
-module_exit(uclogic_exit);
MODULE_LICENSE("GPL");
diff --git a/drivers/hid/hid-wacom.c b/drivers/hid/hid-wacom.c
index 2f60da9ed066..a4a8bb0da688 100644
--- a/drivers/hid/hid-wacom.c
+++ b/drivers/hid/hid-wacom.c
@@ -953,23 +953,7 @@ static struct hid_driver wacom_driver = {
.raw_event = wacom_raw_event,
.input_mapped = wacom_input_mapped,
};
+module_hid_driver(wacom_driver);
-static int __init wacom_init(void)
-{
- int ret;
-
- ret = hid_register_driver(&wacom_driver);
- if (ret)
- pr_err("can't register wacom driver\n");
- return ret;
-}
-
-static void __exit wacom_exit(void)
-{
- hid_unregister_driver(&wacom_driver);
-}
-
-module_init(wacom_init);
-module_exit(wacom_exit);
MODULE_DESCRIPTION("Driver for Wacom Graphire Bluetooth and Wacom Intuos4 WL");
MODULE_LICENSE("GPL");
diff --git a/drivers/hid/hid-waltop.c b/drivers/hid/hid-waltop.c
index bb536ab5941e..059931d7b392 100644
--- a/drivers/hid/hid-waltop.c
+++ b/drivers/hid/hid-waltop.c
@@ -779,17 +779,6 @@ static struct hid_driver waltop_driver = {
.report_fixup = waltop_report_fixup,
.raw_event = waltop_raw_event,
};
+module_hid_driver(waltop_driver);
-static int __init waltop_init(void)
-{
- return hid_register_driver(&waltop_driver);
-}
-
-static void __exit waltop_exit(void)
-{
- hid_unregister_driver(&waltop_driver);
-}
-
-module_init(waltop_init);
-module_exit(waltop_exit);
MODULE_LICENSE("GPL");
diff --git a/drivers/hid/hid-wiimote-core.c b/drivers/hid/hid-wiimote-core.c
index 84e2fbec5fbb..0fb8ab93db68 100644
--- a/drivers/hid/hid-wiimote-core.c
+++ b/drivers/hid/hid-wiimote-core.c
@@ -1294,25 +1294,8 @@ static struct hid_driver wiimote_hid_driver = {
.remove = wiimote_hid_remove,
.raw_event = wiimote_hid_event,
};
+module_hid_driver(wiimote_hid_driver);
-static int __init wiimote_init(void)
-{
- int ret;
-
- ret = hid_register_driver(&wiimote_hid_driver);
- if (ret)
- pr_err("Can't register wiimote hid driver\n");
-
- return ret;
-}
-
-static void __exit wiimote_exit(void)
-{
- hid_unregister_driver(&wiimote_hid_driver);
-}
-
-module_init(wiimote_init);
-module_exit(wiimote_exit);
MODULE_LICENSE("GPL");
MODULE_AUTHOR("David Herrmann <dh.herrmann@gmail.com>");
MODULE_DESCRIPTION(WIIMOTE_NAME " Device Driver");
diff --git a/drivers/hid/hid-wiimote-debug.c b/drivers/hid/hid-wiimote-debug.c
index eec329197c16..90124ffaa2a5 100644
--- a/drivers/hid/hid-wiimote-debug.c
+++ b/drivers/hid/hid-wiimote-debug.c
@@ -31,7 +31,7 @@ static ssize_t wiidebug_eeprom_read(struct file *f, char __user *u, size_t s,
unsigned long flags;
ssize_t ret;
char buf[16];
- __u16 size;
+ __u16 size = 0;
if (s == 0)
return -EINVAL;
diff --git a/drivers/hid/hid-wiimote-ext.c b/drivers/hid/hid-wiimote-ext.c
index 38ae87772e96..0472191d4a72 100644
--- a/drivers/hid/hid-wiimote-ext.c
+++ b/drivers/hid/hid-wiimote-ext.c
@@ -403,14 +403,14 @@ static void handler_nunchuck(struct wiimote_ext *ext, const __u8 *payload)
if (ext->motionp) {
input_report_key(ext->input,
- wiiext_keymap[WIIEXT_KEY_Z], !!(payload[5] & 0x04));
+ wiiext_keymap[WIIEXT_KEY_Z], !(payload[5] & 0x04));
input_report_key(ext->input,
- wiiext_keymap[WIIEXT_KEY_C], !!(payload[5] & 0x08));
+ wiiext_keymap[WIIEXT_KEY_C], !(payload[5] & 0x08));
} else {
input_report_key(ext->input,
- wiiext_keymap[WIIEXT_KEY_Z], !!(payload[5] & 0x01));
+ wiiext_keymap[WIIEXT_KEY_Z], !(payload[5] & 0x01));
input_report_key(ext->input,
- wiiext_keymap[WIIEXT_KEY_C], !!(payload[5] & 0x02));
+ wiiext_keymap[WIIEXT_KEY_C], !(payload[5] & 0x02));
}
input_sync(ext->input);
diff --git a/drivers/hid/hid-zpff.c b/drivers/hid/hid-zpff.c
index f6ba81df71bd..af66452592e9 100644
--- a/drivers/hid/hid-zpff.c
+++ b/drivers/hid/hid-zpff.c
@@ -152,17 +152,6 @@ static struct hid_driver zp_driver = {
.id_table = zp_devices,
.probe = zp_probe,
};
+module_hid_driver(zp_driver);
-static int __init zp_init(void)
-{
- return hid_register_driver(&zp_driver);
-}
-
-static void __exit zp_exit(void)
-{
- hid_unregister_driver(&zp_driver);
-}
-
-module_init(zp_init);
-module_exit(zp_exit);
MODULE_LICENSE("GPL");
diff --git a/drivers/hid/hid-zydacron.c b/drivers/hid/hid-zydacron.c
index 1ad85f2257b4..e4cddeccd6b5 100644
--- a/drivers/hid/hid-zydacron.c
+++ b/drivers/hid/hid-zydacron.c
@@ -219,17 +219,6 @@ static struct hid_driver zc_driver = {
.probe = zc_probe,
.remove = zc_remove,
};
+module_hid_driver(zc_driver);
-static int __init zc_init(void)
-{
- return hid_register_driver(&zc_driver);
-}
-
-static void __exit zc_exit(void)
-{
- hid_unregister_driver(&zc_driver);
-}
-
-module_init(zc_init);
-module_exit(zc_exit);
MODULE_LICENSE("GPL");
diff --git a/drivers/hid/hidraw.c b/drivers/hid/hidraw.c
index 413a73187d33..a7451632ceb4 100644
--- a/drivers/hid/hidraw.c
+++ b/drivers/hid/hidraw.c
@@ -108,7 +108,7 @@ out:
* This function is to be called with the minors_lock mutex held */
static ssize_t hidraw_send_report(struct file *file, const char __user *buffer, size_t count, unsigned char report_type)
{
- unsigned int minor = iminor(file->f_path.dentry->d_inode);
+ unsigned int minor = iminor(file_inode(file));
struct hid_device *dev;
__u8 *buf;
int ret = 0;
@@ -176,7 +176,7 @@ static ssize_t hidraw_write(struct file *file, const char __user *buffer, size_t
* mutex held. */
static ssize_t hidraw_get_report(struct file *file, char __user *buffer, size_t count, unsigned char report_type)
{
- unsigned int minor = iminor(file->f_path.dentry->d_inode);
+ unsigned int minor = iminor(file_inode(file));
struct hid_device *dev;
__u8 *buf;
int ret = 0, len;
@@ -340,7 +340,7 @@ unlock:
static long hidraw_ioctl(struct file *file, unsigned int cmd,
unsigned long arg)
{
- struct inode *inode = file->f_path.dentry->d_inode;
+ struct inode *inode = file_inode(file);
unsigned int minor = iminor(inode);
long ret = 0;
struct hidraw *dev;
@@ -581,6 +581,7 @@ int __init hidraw_init(void)
if (result < 0)
goto error_class;
+ printk(KERN_INFO "hidraw: raw HID events driver (C) Jiri Kosina\n");
out:
return result;
diff --git a/drivers/hid/i2c-hid/i2c-hid.c b/drivers/hid/i2c-hid/i2c-hid.c
index e766b5614ef5..ec7930217a6d 100644
--- a/drivers/hid/i2c-hid/i2c-hid.c
+++ b/drivers/hid/i2c-hid/i2c-hid.c
@@ -34,6 +34,7 @@
#include <linux/kernel.h>
#include <linux/hid.h>
#include <linux/mutex.h>
+#include <linux/acpi.h>
#include <linux/i2c/i2c-hid.h>
@@ -139,6 +140,8 @@ struct i2c_hid {
unsigned long flags; /* device flags */
wait_queue_head_t wait; /* For waiting the interrupt */
+
+ struct i2c_hid_platform_data pdata;
};
static int __i2c_hid_command(struct i2c_client *client,
@@ -821,6 +824,70 @@ static int i2c_hid_fetch_hid_descriptor(struct i2c_hid *ihid)
return 0;
}
+#ifdef CONFIG_ACPI
+static int i2c_hid_acpi_pdata(struct i2c_client *client,
+ struct i2c_hid_platform_data *pdata)
+{
+ static u8 i2c_hid_guid[] = {
+ 0xF7, 0xF6, 0xDF, 0x3C, 0x67, 0x42, 0x55, 0x45,
+ 0xAD, 0x05, 0xB3, 0x0A, 0x3D, 0x89, 0x38, 0xDE,
+ };
+ struct acpi_buffer buf = { ACPI_ALLOCATE_BUFFER, NULL };
+ union acpi_object params[4], *obj;
+ struct acpi_object_list input;
+ struct acpi_device *adev;
+ acpi_handle handle;
+
+ handle = ACPI_HANDLE(&client->dev);
+ if (!handle || acpi_bus_get_device(handle, &adev))
+ return -ENODEV;
+
+ input.count = ARRAY_SIZE(params);
+ input.pointer = params;
+
+ params[0].type = ACPI_TYPE_BUFFER;
+ params[0].buffer.length = sizeof(i2c_hid_guid);
+ params[0].buffer.pointer = i2c_hid_guid;
+ params[1].type = ACPI_TYPE_INTEGER;
+ params[1].integer.value = 1;
+ params[2].type = ACPI_TYPE_INTEGER;
+ params[2].integer.value = 1; /* HID function */
+ params[3].type = ACPI_TYPE_INTEGER;
+ params[3].integer.value = 0;
+
+ if (ACPI_FAILURE(acpi_evaluate_object(handle, "_DSM", &input, &buf))) {
+ dev_err(&client->dev, "device _DSM execution failed\n");
+ return -ENODEV;
+ }
+
+ obj = (union acpi_object *)buf.pointer;
+ if (obj->type != ACPI_TYPE_INTEGER) {
+ dev_err(&client->dev, "device _DSM returned invalid type: %d\n",
+ obj->type);
+ kfree(buf.pointer);
+ return -EINVAL;
+ }
+
+ pdata->hid_descriptor_address = obj->integer.value;
+
+ kfree(buf.pointer);
+ return 0;
+}
+
+static const struct acpi_device_id i2c_hid_acpi_match[] = {
+ {"ACPI0C50", 0 },
+ {"PNP0C50", 0 },
+ { },
+};
+MODULE_DEVICE_TABLE(acpi, i2c_hid_acpi_match);
+#else
+static inline int i2c_hid_acpi_pdata(struct i2c_client *client,
+ struct i2c_hid_platform_data *pdata)
+{
+ return -ENODEV;
+}
+#endif
+
static int i2c_hid_probe(struct i2c_client *client,
const struct i2c_device_id *dev_id)
{
@@ -832,11 +899,6 @@ static int i2c_hid_probe(struct i2c_client *client,
dbg_hid("HID probe called for i2c 0x%02x\n", client->addr);
- if (!platform_data) {
- dev_err(&client->dev, "HID register address not provided\n");
- return -EINVAL;
- }
-
if (!client->irq) {
dev_err(&client->dev,
"HID over i2c has not been provided an Int IRQ\n");
@@ -847,11 +909,22 @@ static int i2c_hid_probe(struct i2c_client *client,
if (!ihid)
return -ENOMEM;
+ if (!platform_data) {
+ ret = i2c_hid_acpi_pdata(client, &ihid->pdata);
+ if (ret) {
+ dev_err(&client->dev,
+ "HID register address not provided\n");
+ goto err;
+ }
+ } else {
+ ihid->pdata = *platform_data;
+ }
+
i2c_set_clientdata(client, ihid);
ihid->client = client;
- hidRegister = platform_data->hid_descriptor_address;
+ hidRegister = ihid->pdata.hid_descriptor_address;
ihid->wHIDDescRegister = cpu_to_le16(hidRegister);
init_waitqueue_head(&ihid->wait);
@@ -884,6 +957,7 @@ static int i2c_hid_probe(struct i2c_client *client,
hid->hid_get_raw_report = i2c_hid_get_raw_report;
hid->hid_output_raw_report = i2c_hid_output_raw_report;
hid->dev.parent = &client->dev;
+ ACPI_HANDLE_SET(&hid->dev, ACPI_HANDLE(&client->dev));
hid->bus = BUS_I2C;
hid->version = le16_to_cpu(ihid->hdesc.bcdVersion);
hid->vendor = le16_to_cpu(ihid->hdesc.wVendorID);
@@ -975,6 +1049,7 @@ static struct i2c_driver i2c_hid_driver = {
.name = "i2c_hid",
.owner = THIS_MODULE,
.pm = &i2c_hid_pm,
+ .acpi_match_table = ACPI_PTR(i2c_hid_acpi_match),
},
.probe = i2c_hid_probe,
diff --git a/drivers/hid/uhid.c b/drivers/hid/uhid.c
index 714cd8cc9579..fc307e0422af 100644
--- a/drivers/hid/uhid.c
+++ b/drivers/hid/uhid.c
@@ -11,6 +11,7 @@
*/
#include <linux/atomic.h>
+#include <linux/compat.h>
#include <linux/device.h>
#include <linux/fs.h>
#include <linux/hid.h>
@@ -276,6 +277,94 @@ static struct hid_ll_driver uhid_hid_driver = {
.parse = uhid_hid_parse,
};
+#ifdef CONFIG_COMPAT
+
+/* Apparently we haven't stepped on these rakes enough times yet. */
+struct uhid_create_req_compat {
+ __u8 name[128];
+ __u8 phys[64];
+ __u8 uniq[64];
+
+ compat_uptr_t rd_data;
+ __u16 rd_size;
+
+ __u16 bus;
+ __u32 vendor;
+ __u32 product;
+ __u32 version;
+ __u32 country;
+} __attribute__((__packed__));
+
+static int uhid_event_from_user(const char __user *buffer, size_t len,
+ struct uhid_event *event)
+{
+ if (is_compat_task()) {
+ u32 type;
+
+ if (get_user(type, buffer))
+ return -EFAULT;
+
+ if (type == UHID_CREATE) {
+ /*
+ * This is our messed up request with compat pointer.
+ * It is largish (more than 256 bytes) so we better
+ * allocate it from the heap.
+ */
+ struct uhid_create_req_compat *compat;
+
+ compat = kmalloc(sizeof(*compat), GFP_KERNEL);
+ if (!compat)
+ return -ENOMEM;
+
+ buffer += sizeof(type);
+ len -= sizeof(type);
+ if (copy_from_user(compat, buffer,
+ min(len, sizeof(*compat)))) {
+ kfree(compat);
+ return -EFAULT;
+ }
+
+ /* Shuffle the data over to proper structure */
+ event->type = type;
+
+ memcpy(event->u.create.name, compat->name,
+ sizeof(compat->name));
+ memcpy(event->u.create.phys, compat->phys,
+ sizeof(compat->phys));
+ memcpy(event->u.create.uniq, compat->uniq,
+ sizeof(compat->uniq));
+
+ event->u.create.rd_data = compat_ptr(compat->rd_data);
+ event->u.create.rd_size = compat->rd_size;
+
+ event->u.create.bus = compat->bus;
+ event->u.create.vendor = compat->vendor;
+ event->u.create.product = compat->product;
+ event->u.create.version = compat->version;
+ event->u.create.country = compat->country;
+
+ kfree(compat);
+ return 0;
+ }
+ /* All others can be copied directly */
+ }
+
+ if (copy_from_user(event, buffer, min(len, sizeof(*event))))
+ return -EFAULT;
+
+ return 0;
+}
+#else
+static int uhid_event_from_user(const char __user *buffer, size_t len,
+ struct uhid_event *event)
+{
+ if (copy_from_user(event, buffer, min(len, sizeof(*event))))
+ return -EFAULT;
+
+ return 0;
+}
+#endif
+
static int uhid_dev_create(struct uhid_device *uhid,
const struct uhid_event *ev)
{
@@ -498,10 +587,10 @@ static ssize_t uhid_char_write(struct file *file, const char __user *buffer,
memset(&uhid->input_buf, 0, sizeof(uhid->input_buf));
len = min(count, sizeof(uhid->input_buf));
- if (copy_from_user(&uhid->input_buf, buffer, len)) {
- ret = -EFAULT;
+
+ ret = uhid_event_from_user(buffer, len, &uhid->input_buf);
+ if (ret)
goto unlock;
- }
switch (uhid->input_buf.type) {
case UHID_CREATE:
diff --git a/drivers/hsi/hsi.c b/drivers/hsi/hsi.c
index 2d58f939d27f..833dd1afbf46 100644
--- a/drivers/hsi/hsi.c
+++ b/drivers/hsi/hsi.c
@@ -420,7 +420,7 @@ static int hsi_event_notifier_call(struct notifier_block *nb,
/**
* hsi_register_port_event - Register a client to receive port events
* @cl: HSI client that wants to receive port events
- * @cb: Event handler callback
+ * @handler: Event handler callback
*
* Clients should register a callback to be able to receive
* events from the ports. Registration should happen after
diff --git a/drivers/hv/channel.c b/drivers/hv/channel.c
index 773a2f25a8f0..0b122f8c7005 100644
--- a/drivers/hv/channel.c
+++ b/drivers/hv/channel.c
@@ -55,7 +55,7 @@ static void vmbus_setevent(struct vmbus_channel *channel)
[channel->monitor_grp].pending);
} else {
- vmbus_set_event(channel->offermsg.child_relid);
+ vmbus_set_event(channel);
}
}
@@ -181,7 +181,7 @@ int vmbus_open(struct vmbus_channel *newchannel, u32 send_ringbuffer_size,
open_msg->ringbuffer_gpadlhandle = newchannel->ringbuffer_gpadlhandle;
open_msg->downstream_ringbuffer_pageoffset = send_ringbuffer_size >>
PAGE_SHIFT;
- open_msg->server_contextarea_gpadlhandle = 0;
+ open_msg->target_vp = newchannel->target_vp;
if (userdatalen > MAX_USER_DEFINED_BYTES) {
err = -EINVAL;
@@ -564,6 +564,7 @@ int vmbus_sendpacket(struct vmbus_channel *channel, const void *buffer,
struct scatterlist bufferlist[3];
u64 aligned_data = 0;
int ret;
+ bool signal = false;
/* Setup the descriptor */
@@ -580,9 +581,9 @@ int vmbus_sendpacket(struct vmbus_channel *channel, const void *buffer,
sg_set_buf(&bufferlist[2], &aligned_data,
packetlen_aligned - packetlen);
- ret = hv_ringbuffer_write(&channel->outbound, bufferlist, 3);
+ ret = hv_ringbuffer_write(&channel->outbound, bufferlist, 3, &signal);
- if (ret == 0 && !hv_get_ringbuffer_interrupt_mask(&channel->outbound))
+ if (ret == 0 && signal)
vmbus_setevent(channel);
return ret;
@@ -606,6 +607,7 @@ int vmbus_sendpacket_pagebuffer(struct vmbus_channel *channel,
u32 packetlen_aligned;
struct scatterlist bufferlist[3];
u64 aligned_data = 0;
+ bool signal = false;
if (pagecount > MAX_PAGE_BUFFER_COUNT)
return -EINVAL;
@@ -641,9 +643,9 @@ int vmbus_sendpacket_pagebuffer(struct vmbus_channel *channel,
sg_set_buf(&bufferlist[2], &aligned_data,
packetlen_aligned - packetlen);
- ret = hv_ringbuffer_write(&channel->outbound, bufferlist, 3);
+ ret = hv_ringbuffer_write(&channel->outbound, bufferlist, 3, &signal);
- if (ret == 0 && !hv_get_ringbuffer_interrupt_mask(&channel->outbound))
+ if (ret == 0 && signal)
vmbus_setevent(channel);
return ret;
@@ -665,6 +667,7 @@ int vmbus_sendpacket_multipagebuffer(struct vmbus_channel *channel,
u32 packetlen_aligned;
struct scatterlist bufferlist[3];
u64 aligned_data = 0;
+ bool signal = false;
u32 pfncount = NUM_PAGES_SPANNED(multi_pagebuffer->offset,
multi_pagebuffer->len);
@@ -703,9 +706,9 @@ int vmbus_sendpacket_multipagebuffer(struct vmbus_channel *channel,
sg_set_buf(&bufferlist[2], &aligned_data,
packetlen_aligned - packetlen);
- ret = hv_ringbuffer_write(&channel->outbound, bufferlist, 3);
+ ret = hv_ringbuffer_write(&channel->outbound, bufferlist, 3, &signal);
- if (ret == 0 && !hv_get_ringbuffer_interrupt_mask(&channel->outbound))
+ if (ret == 0 && signal)
vmbus_setevent(channel);
return ret;
@@ -732,6 +735,7 @@ int vmbus_recvpacket(struct vmbus_channel *channel, void *buffer,
u32 packetlen;
u32 userlen;
int ret;
+ bool signal = false;
*buffer_actual_len = 0;
*requestid = 0;
@@ -758,8 +762,10 @@ int vmbus_recvpacket(struct vmbus_channel *channel, void *buffer,
/* Copy over the packet to the user buffer */
ret = hv_ringbuffer_read(&channel->inbound, buffer, userlen,
- (desc.offset8 << 3));
+ (desc.offset8 << 3), &signal);
+ if (signal)
+ vmbus_setevent(channel);
return 0;
}
@@ -774,8 +780,8 @@ int vmbus_recvpacket_raw(struct vmbus_channel *channel, void *buffer,
{
struct vmpacket_descriptor desc;
u32 packetlen;
- u32 userlen;
int ret;
+ bool signal = false;
*buffer_actual_len = 0;
*requestid = 0;
@@ -788,7 +794,6 @@ int vmbus_recvpacket_raw(struct vmbus_channel *channel, void *buffer,
packetlen = desc.len8 << 3;
- userlen = packetlen - (desc.offset8 << 3);
*buffer_actual_len = packetlen;
@@ -802,7 +807,11 @@ int vmbus_recvpacket_raw(struct vmbus_channel *channel, void *buffer,
*requestid = desc.trans_id;
/* Copy over the entire packet to the user buffer */
- ret = hv_ringbuffer_read(&channel->inbound, buffer, packetlen, 0);
+ ret = hv_ringbuffer_read(&channel->inbound, buffer, packetlen, 0,
+ &signal);
+
+ if (signal)
+ vmbus_setevent(channel);
return 0;
}
diff --git a/drivers/hv/channel_mgmt.c b/drivers/hv/channel_mgmt.c
index 2f84c5cff8d4..53a8600162a5 100644
--- a/drivers/hv/channel_mgmt.c
+++ b/drivers/hv/channel_mgmt.c
@@ -257,6 +257,70 @@ static void vmbus_process_offer(struct work_struct *work)
}
}
+enum {
+ IDE = 0,
+ SCSI,
+ NIC,
+ MAX_PERF_CHN,
+};
+
+/*
+ * This is an array of device_ids (device types) that are performance critical.
+ * We attempt to distribute the interrupt load for these devices across
+ * all available CPUs.
+ */
+static const struct hv_vmbus_device_id hp_devs[] = {
+ /* IDE */
+ { HV_IDE_GUID, },
+ /* Storage - SCSI */
+ { HV_SCSI_GUID, },
+ /* Network */
+ { HV_NIC_GUID, },
+};
+
+
+/*
+ * We use this state to statically distribute the channel interrupt load.
+ */
+static u32 next_vp;
+
+/*
+ * Starting with Win8, we can statically distribute the incoming
+ * channel interrupt load by binding a channel to VCPU. We
+ * implement here a simple round robin scheme for distributing
+ * the interrupt load.
+ * We will bind channels that are not performance critical to cpu 0 and
+ * performance critical channels (IDE, SCSI and Network) will be uniformly
+ * distributed across all available CPUs.
+ */
+static u32 get_vp_index(uuid_le *type_guid)
+{
+ u32 cur_cpu;
+ int i;
+ bool perf_chn = false;
+ u32 max_cpus = num_online_cpus();
+
+ for (i = IDE; i < MAX_PERF_CHN; i++) {
+ if (!memcmp(type_guid->b, hp_devs[i].guid,
+ sizeof(uuid_le))) {
+ perf_chn = true;
+ break;
+ }
+ }
+ if ((vmbus_proto_version == VERSION_WS2008) ||
+ (vmbus_proto_version == VERSION_WIN7) || (!perf_chn)) {
+ /*
+ * Prior to win8, all channel interrupts are
+ * delivered on cpu 0.
+ * Also if the channel is not a performance critical
+ * channel, bind it to cpu 0.
+ */
+ return 0;
+ }
+ cur_cpu = (++next_vp % max_cpus);
+ return 0;
+}
+
/*
* vmbus_onoffer - Handler for channel offers from vmbus in parent partition.
*
@@ -275,6 +339,35 @@ static void vmbus_onoffer(struct vmbus_channel_message_header *hdr)
return;
}
+ /*
+ * By default we setup state to enable batched
+ * reading. A specific service can choose to
+ * disable this prior to opening the channel.
+ */
+ newchannel->batched_reading = true;
+
+ /*
+ * Setup state for signalling the host.
+ */
+ newchannel->sig_event = (struct hv_input_signal_event *)
+ (ALIGN((unsigned long)
+ &newchannel->sig_buf,
+ HV_HYPERCALL_PARAM_ALIGN));
+
+ newchannel->sig_event->connectionid.asu32 = 0;
+ newchannel->sig_event->connectionid.u.id = VMBUS_EVENT_CONNECTION_ID;
+ newchannel->sig_event->flag_number = 0;
+ newchannel->sig_event->rsvdz = 0;
+
+ if (vmbus_proto_version != VERSION_WS2008) {
+ newchannel->is_dedicated_interrupt =
+ (offer->is_dedicated_interrupt != 0);
+ newchannel->sig_event->connectionid.u.id =
+ offer->connection_id;
+ }
+
+ newchannel->target_vp = get_vp_index(&offer->offer.if_type);
+
memcpy(&newchannel->offermsg, offer,
sizeof(struct vmbus_channel_offer_channel));
newchannel->monitor_grp = (u8)offer->monitorid / 32;
diff --git a/drivers/hv/connection.c b/drivers/hv/connection.c
index 650c9f0b6642..253a74ba245c 100644
--- a/drivers/hv/connection.c
+++ b/drivers/hv/connection.c
@@ -30,6 +30,7 @@
#include <linux/slab.h>
#include <linux/vmalloc.h>
#include <linux/hyperv.h>
+#include <linux/export.h>
#include <asm/hyperv.h>
#include "hyperv_vmbus.h"
@@ -40,15 +41,99 @@ struct vmbus_connection vmbus_connection = {
};
/*
+ * Negotiated protocol version with the host.
+ */
+__u32 vmbus_proto_version;
+EXPORT_SYMBOL_GPL(vmbus_proto_version);
+
+static __u32 vmbus_get_next_version(__u32 current_version)
+{
+ switch (current_version) {
+ case (VERSION_WIN7):
+ return VERSION_WS2008;
+
+ case (VERSION_WIN8):
+ return VERSION_WIN7;
+
+ case (VERSION_WS2008):
+ default:
+ return VERSION_INVAL;
+ }
+}
+
+static int vmbus_negotiate_version(struct vmbus_channel_msginfo *msginfo,
+ __u32 version)
+{
+ int ret = 0;
+ struct vmbus_channel_initiate_contact *msg;
+ unsigned long flags;
+ int t;
+
+ init_completion(&msginfo->waitevent);
+
+ msg = (struct vmbus_channel_initiate_contact *)msginfo->msg;
+
+ msg->header.msgtype = CHANNELMSG_INITIATE_CONTACT;
+ msg->vmbus_version_requested = version;
+ msg->interrupt_page = virt_to_phys(vmbus_connection.int_page);
+ msg->monitor_page1 = virt_to_phys(vmbus_connection.monitor_pages);
+ msg->monitor_page2 = virt_to_phys(
+ (void *)((unsigned long)vmbus_connection.monitor_pages +
+ PAGE_SIZE));
+
+ /*
+ * Add to list before we send the request since we may
+ * receive the response before returning from this routine
+ */
+ spin_lock_irqsave(&vmbus_connection.channelmsg_lock, flags);
+ list_add_tail(&msginfo->msglistentry,
+ &vmbus_connection.chn_msg_list);
+
+ spin_unlock_irqrestore(&vmbus_connection.channelmsg_lock, flags);
+
+ ret = vmbus_post_msg(msg,
+ sizeof(struct vmbus_channel_initiate_contact));
+ if (ret != 0) {
+ spin_lock_irqsave(&vmbus_connection.channelmsg_lock, flags);
+ list_del(&msginfo->msglistentry);
+ spin_unlock_irqrestore(&vmbus_connection.channelmsg_lock,
+ flags);
+ return ret;
+ }
+
+ /* Wait for the connection response */
+ t = wait_for_completion_timeout(&msginfo->waitevent, 5*HZ);
+ if (t == 0) {
+ spin_lock_irqsave(&vmbus_connection.channelmsg_lock,
+ flags);
+ list_del(&msginfo->msglistentry);
+ spin_unlock_irqrestore(&vmbus_connection.channelmsg_lock,
+ flags);
+ return -ETIMEDOUT;
+ }
+
+ spin_lock_irqsave(&vmbus_connection.channelmsg_lock, flags);
+ list_del(&msginfo->msglistentry);
+ spin_unlock_irqrestore(&vmbus_connection.channelmsg_lock, flags);
+
+ /* Check if successful */
+ if (msginfo->response.version_response.version_supported) {
+ vmbus_connection.conn_state = CONNECTED;
+ } else {
+ return -ECONNREFUSED;
+ }
+
+ return ret;
+}
+
+/*
* vmbus_connect - Sends a connect request on the partition service connection
*/
int vmbus_connect(void)
{
int ret = 0;
- int t;
struct vmbus_channel_msginfo *msginfo = NULL;
- struct vmbus_channel_initiate_contact *msg;
- unsigned long flags;
+ __u32 version;
/* Initialize the vmbus connection */
vmbus_connection.conn_state = CONNECTING;
@@ -99,69 +184,38 @@ int vmbus_connect(void)
goto cleanup;
}
- init_completion(&msginfo->waitevent);
-
- msg = (struct vmbus_channel_initiate_contact *)msginfo->msg;
-
- msg->header.msgtype = CHANNELMSG_INITIATE_CONTACT;
- msg->vmbus_version_requested = VMBUS_REVISION_NUMBER;
- msg->interrupt_page = virt_to_phys(vmbus_connection.int_page);
- msg->monitor_page1 = virt_to_phys(vmbus_connection.monitor_pages);
- msg->monitor_page2 = virt_to_phys(
- (void *)((unsigned long)vmbus_connection.monitor_pages +
- PAGE_SIZE));
-
/*
- * Add to list before we send the request since we may
- * receive the response before returning from this routine
+ * Negotiate a compatible VMBUS version number with the
+ * host. We start with the highest number we can support
+ * and work our way down until we negotiate a compatible
+ * version.
*/
- spin_lock_irqsave(&vmbus_connection.channelmsg_lock, flags);
- list_add_tail(&msginfo->msglistentry,
- &vmbus_connection.chn_msg_list);
- spin_unlock_irqrestore(&vmbus_connection.channelmsg_lock, flags);
+ version = VERSION_CURRENT;
- ret = vmbus_post_msg(msg,
- sizeof(struct vmbus_channel_initiate_contact));
- if (ret != 0) {
- spin_lock_irqsave(&vmbus_connection.channelmsg_lock, flags);
- list_del(&msginfo->msglistentry);
- spin_unlock_irqrestore(&vmbus_connection.channelmsg_lock,
- flags);
- goto cleanup;
- }
+ do {
+ ret = vmbus_negotiate_version(msginfo, version);
+ if (ret == 0)
+ break;
- /* Wait for the connection response */
- t = wait_for_completion_timeout(&msginfo->waitevent, 5*HZ);
- if (t == 0) {
- spin_lock_irqsave(&vmbus_connection.channelmsg_lock,
- flags);
- list_del(&msginfo->msglistentry);
- spin_unlock_irqrestore(&vmbus_connection.channelmsg_lock,
- flags);
- ret = -ETIMEDOUT;
- goto cleanup;
- }
+ version = vmbus_get_next_version(version);
+ } while (version != VERSION_INVAL);
- spin_lock_irqsave(&vmbus_connection.channelmsg_lock, flags);
- list_del(&msginfo->msglistentry);
- spin_unlock_irqrestore(&vmbus_connection.channelmsg_lock, flags);
-
- /* Check if successful */
- if (msginfo->response.version_response.version_supported) {
- vmbus_connection.conn_state = CONNECTED;
- } else {
- pr_err("Unable to connect, "
- "Version %d not supported by Hyper-V\n",
- VMBUS_REVISION_NUMBER);
- ret = -ECONNREFUSED;
+ if (version == VERSION_INVAL)
goto cleanup;
- }
+
+ vmbus_proto_version = version;
+ pr_info("Hyper-V Host Build:%d-%d.%d-%d-%d.%d; Vmbus version:%d.%d\n",
+ host_info_eax, host_info_ebx >> 16,
+ host_info_ebx & 0xFFFF, host_info_ecx,
+ host_info_edx >> 24, host_info_edx & 0xFFFFFF,
+ version >> 16, version & 0xFFFF);
kfree(msginfo);
return 0;
cleanup:
+ pr_err("Unable to connect to host\n");
vmbus_connection.conn_state = DISCONNECTED;
if (vmbus_connection.work_queue)
@@ -212,6 +266,9 @@ static void process_chn_event(u32 relid)
{
struct vmbus_channel *channel;
unsigned long flags;
+ void *arg;
+ bool read_state;
+ u32 bytes_to_read;
/*
* Find the channel based on this relid and invokes the
@@ -234,10 +291,29 @@ static void process_chn_event(u32 relid)
*/
spin_lock_irqsave(&channel->inbound_lock, flags);
- if (channel->onchannel_callback != NULL)
- channel->onchannel_callback(channel->channel_callback_context);
- else
+ if (channel->onchannel_callback != NULL) {
+ arg = channel->channel_callback_context;
+ read_state = channel->batched_reading;
+ /*
+ * This callback reads the messages sent by the host.
+ * We can optimize host to guest signaling by ensuring:
+ * 1. While reading the channel, we disable interrupts from
+ * host.
+ * 2. Ensure that we process all posted messages from the host
+ * before returning from this callback.
+ * 3. Once we return, enable signaling from the host. Once this
+ * state is set we check to see if additional packets are
+ * available to read. In this case we repeat the process.
+ */
+
+ do {
+ hv_begin_read(&channel->inbound);
+ channel->onchannel_callback(arg);
+ bytes_to_read = hv_end_read(&channel->inbound);
+ } while (read_state && (bytes_to_read != 0));
+ } else {
pr_err("no channel callback for relid - %u\n", relid);
+ }
spin_unlock_irqrestore(&channel->inbound_lock, flags);
}
@@ -248,10 +324,32 @@ static void process_chn_event(u32 relid)
void vmbus_on_event(unsigned long data)
{
u32 dword;
- u32 maxdword = MAX_NUM_CHANNELS_SUPPORTED >> 5;
+ u32 maxdword;
int bit;
u32 relid;
- u32 *recv_int_page = vmbus_connection.recv_int_page;
+ u32 *recv_int_page = NULL;
+ void *page_addr;
+ int cpu = smp_processor_id();
+ union hv_synic_event_flags *event;
+
+ if ((vmbus_proto_version == VERSION_WS2008) ||
+ (vmbus_proto_version == VERSION_WIN7)) {
+ maxdword = MAX_NUM_CHANNELS_SUPPORTED >> 5;
+ recv_int_page = vmbus_connection.recv_int_page;
+ } else {
+ /*
+ * When the host is win8 and beyond, the event page
+ * can be directly checked to get the id of the channel
+ * that has the interrupt pending.
+ */
+ maxdword = HV_EVENT_FLAGS_DWORD_COUNT;
+ page_addr = hv_context.synic_event_page[cpu];
+ event = (union hv_synic_event_flags *)page_addr +
+ VMBUS_MESSAGE_SINT;
+ recv_int_page = event->flags32;
+ }
+
+
/* Check events */
if (!recv_int_page)
@@ -307,12 +405,16 @@ int vmbus_post_msg(void *buffer, size_t buflen)
/*
* vmbus_set_event - Send an event notification to the parent
*/
-int vmbus_set_event(u32 child_relid)
+int vmbus_set_event(struct vmbus_channel *channel)
{
- /* Each u32 represents 32 channels */
- sync_set_bit(child_relid & 31,
- (unsigned long *)vmbus_connection.send_int_page +
- (child_relid >> 5));
+ u32 child_relid = channel->offermsg.child_relid;
+
+ if (!channel->is_dedicated_interrupt) {
+ /* Each u32 represents 32 channels */
+ sync_set_bit(child_relid & 31,
+ (unsigned long *)vmbus_connection.send_int_page +
+ (child_relid >> 5));
+ }
- return hv_signal_event();
+ return hv_signal_event(channel->sig_event);
}
diff --git a/drivers/hv/hv.c b/drivers/hv/hv.c
index 3648f8f0f368..1c5481da6e4a 100644
--- a/drivers/hv/hv.c
+++ b/drivers/hv/hv.c
@@ -27,6 +27,7 @@
#include <linux/vmalloc.h>
#include <linux/hyperv.h>
#include <linux/version.h>
+#include <linux/interrupt.h>
#include <asm/hyperv.h>
#include "hyperv_vmbus.h"
@@ -34,13 +35,16 @@
struct hv_context hv_context = {
.synic_initialized = false,
.hypercall_page = NULL,
- .signal_event_param = NULL,
- .signal_event_buffer = NULL,
};
/*
* query_hypervisor_info - Get version info of the windows hypervisor
*/
+unsigned int host_info_eax;
+unsigned int host_info_ebx;
+unsigned int host_info_ecx;
+unsigned int host_info_edx;
+
static int query_hypervisor_info(void)
{
unsigned int eax;
@@ -70,13 +74,10 @@ static int query_hypervisor_info(void)
edx = 0;
op = HVCPUID_VERSION;
cpuid(op, &eax, &ebx, &ecx, &edx);
- pr_info("Hyper-V Host OS Build:%d-%d.%d-%d-%d.%d\n",
- eax,
- ebx >> 16,
- ebx & 0xFFFF,
- ecx,
- edx >> 24,
- edx & 0xFFFFFF);
+ host_info_eax = eax;
+ host_info_ebx = ebx;
+ host_info_ecx = ecx;
+ host_info_edx = edx;
}
return max_leaf;
}
@@ -137,6 +138,10 @@ int hv_init(void)
memset(hv_context.synic_event_page, 0, sizeof(void *) * NR_CPUS);
memset(hv_context.synic_message_page, 0,
sizeof(void *) * NR_CPUS);
+ memset(hv_context.vp_index, 0,
+ sizeof(int) * NR_CPUS);
+ memset(hv_context.event_dpc, 0,
+ sizeof(void *) * NR_CPUS);
max_leaf = query_hypervisor_info();
@@ -168,24 +173,6 @@ int hv_init(void)
hv_context.hypercall_page = virtaddr;
- /* Setup the global signal event param for the signal event hypercall */
- hv_context.signal_event_buffer =
- kmalloc(sizeof(struct hv_input_signal_event_buffer),
- GFP_KERNEL);
- if (!hv_context.signal_event_buffer)
- goto cleanup;
-
- hv_context.signal_event_param =
- (struct hv_input_signal_event *)
- (ALIGN((unsigned long)
- hv_context.signal_event_buffer,
- HV_HYPERCALL_PARAM_ALIGN));
- hv_context.signal_event_param->connectionid.asu32 = 0;
- hv_context.signal_event_param->connectionid.u.id =
- VMBUS_EVENT_CONNECTION_ID;
- hv_context.signal_event_param->flag_number = 0;
- hv_context.signal_event_param->rsvdz = 0;
-
return 0;
cleanup:
@@ -213,10 +200,6 @@ void hv_cleanup(void)
/* Reset our OS id */
wrmsrl(HV_X64_MSR_GUEST_OS_ID, 0);
- kfree(hv_context.signal_event_buffer);
- hv_context.signal_event_buffer = NULL;
- hv_context.signal_event_param = NULL;
-
if (hv_context.hypercall_page) {
hypercall_msr.as_uint64 = 0;
wrmsrl(HV_X64_MSR_HYPERCALL, hypercall_msr.as_uint64);
@@ -273,13 +256,12 @@ int hv_post_message(union hv_connection_id connection_id,
*
* This involves a hypercall.
*/
-u16 hv_signal_event(void)
+u16 hv_signal_event(void *con_id)
{
u16 status;
- status = do_hypercall(HVCALL_SIGNAL_EVENT,
- hv_context.signal_event_param,
- NULL) & 0xFFFF;
+ status = (do_hypercall(HVCALL_SIGNAL_EVENT, con_id, NULL) & 0xFFFF);
+
return status;
}
@@ -297,6 +279,7 @@ void hv_synic_init(void *irqarg)
union hv_synic_siefp siefp;
union hv_synic_sint shared_sint;
union hv_synic_scontrol sctrl;
+ u64 vp_index;
u32 irq_vector = *((u32 *)(irqarg));
int cpu = smp_processor_id();
@@ -307,6 +290,15 @@ void hv_synic_init(void *irqarg)
/* Check the version */
rdmsrl(HV_X64_MSR_SVERSION, version);
+ hv_context.event_dpc[cpu] = (struct tasklet_struct *)
+ kmalloc(sizeof(struct tasklet_struct),
+ GFP_ATOMIC);
+ if (hv_context.event_dpc[cpu] == NULL) {
+ pr_err("Unable to allocate event dpc\n");
+ goto cleanup;
+ }
+ tasklet_init(hv_context.event_dpc[cpu], vmbus_on_event, cpu);
+
hv_context.synic_message_page[cpu] =
(void *)get_zeroed_page(GFP_ATOMIC);
@@ -345,7 +337,7 @@ void hv_synic_init(void *irqarg)
shared_sint.as_uint64 = 0;
shared_sint.vector = irq_vector; /* HV_SHARED_SINT_IDT_VECTOR + 0x20; */
shared_sint.masked = false;
- shared_sint.auto_eoi = false;
+ shared_sint.auto_eoi = true;
wrmsrl(HV_X64_MSR_SINT0 + VMBUS_MESSAGE_SINT, shared_sint.as_uint64);
@@ -356,6 +348,14 @@ void hv_synic_init(void *irqarg)
wrmsrl(HV_X64_MSR_SCONTROL, sctrl.as_uint64);
hv_context.synic_initialized = true;
+
+ /*
+ * Setup the mapping between Hyper-V's notion
+ * of cpuid and Linux' notion of cpuid.
+ * This array will be indexed using Linux cpuid.
+ */
+ rdmsrl(HV_X64_MSR_VP_INDEX, vp_index);
+ hv_context.vp_index[cpu] = (u32)vp_index;
return;
cleanup:
diff --git a/drivers/hv/hv_balloon.c b/drivers/hv/hv_balloon.c
index dd289fd179ca..37873213e24f 100644
--- a/drivers/hv/hv_balloon.c
+++ b/drivers/hv/hv_balloon.c
@@ -29,7 +29,6 @@
#include <linux/memory_hotplug.h>
#include <linux/memory.h>
#include <linux/notifier.h>
-#include <linux/mman.h>
#include <linux/percpu_counter.h>
#include <linux/hyperv.h>
@@ -415,10 +414,17 @@ struct dm_info_msg {
static bool hot_add;
static bool do_hot_add;
+/*
+ * Delay reporting memory pressure by
+ * the specified number of seconds.
+ */
+static uint pressure_report_delay = 30;
module_param(hot_add, bool, (S_IRUGO | S_IWUSR));
MODULE_PARM_DESC(hot_add, "If set attempt memory hot_add");
+module_param(pressure_report_delay, uint, (S_IRUGO | S_IWUSR));
+MODULE_PARM_DESC(pressure_report_delay, "Delay in secs in reporting pressure");
static atomic_t trans_id = ATOMIC_INIT(0);
static int dm_ring_size = (5 * PAGE_SIZE);
@@ -517,6 +523,34 @@ static void process_info(struct hv_dynmem_device *dm, struct dm_info_msg *msg)
}
}
+unsigned long compute_balloon_floor(void)
+{
+ unsigned long min_pages;
+#define MB2PAGES(mb) ((mb) << (20 - PAGE_SHIFT))
+ /* Simple continuous piecewiese linear function:
+ * max MiB -> min MiB gradient
+ * 0 0
+ * 16 16
+ * 32 24
+ * 128 72 (1/2)
+ * 512 168 (1/4)
+ * 2048 360 (1/8)
+ * 8192 552 (1/32)
+ * 32768 1320
+ * 131072 4392
+ */
+ if (totalram_pages < MB2PAGES(128))
+ min_pages = MB2PAGES(8) + (totalram_pages >> 1);
+ else if (totalram_pages < MB2PAGES(512))
+ min_pages = MB2PAGES(40) + (totalram_pages >> 2);
+ else if (totalram_pages < MB2PAGES(2048))
+ min_pages = MB2PAGES(104) + (totalram_pages >> 3);
+ else
+ min_pages = MB2PAGES(296) + (totalram_pages >> 5);
+#undef MB2PAGES
+ return min_pages;
+}
+
/*
* Post our status as it relates memory pressure to the
* host. Host expects the guests to post this status
@@ -530,15 +564,30 @@ static void process_info(struct hv_dynmem_device *dm, struct dm_info_msg *msg)
static void post_status(struct hv_dynmem_device *dm)
{
struct dm_status status;
+ struct sysinfo val;
-
+ if (pressure_report_delay > 0) {
+ --pressure_report_delay;
+ return;
+ }
+ si_meminfo(&val);
memset(&status, 0, sizeof(struct dm_status));
status.hdr.type = DM_STATUS_REPORT;
status.hdr.size = sizeof(struct dm_status);
status.hdr.trans_id = atomic_inc_return(&trans_id);
-
- status.num_committed = vm_memory_committed();
+ /*
+ * The host expects the guest to report free memory.
+ * Further, the host expects the pressure information to
+ * include the ballooned out pages.
+ * For a given amount of memory that we are managing, we
+ * need to compute a floor below which we should not balloon.
+ * Compute this and add it to the pressure report.
+ */
+ status.num_avail = val.freeram;
+ status.num_committed = vm_memory_committed() +
+ dm->num_pages_ballooned +
+ compute_balloon_floor();
vmbus_sendpacket(dm->dev->channel, &status,
sizeof(struct dm_status),
@@ -547,8 +596,6 @@ static void post_status(struct hv_dynmem_device *dm)
}
-
-
static void free_balloon_pages(struct hv_dynmem_device *dm,
union dm_mem_page_range *range_array)
{
@@ -1013,9 +1060,7 @@ static int balloon_remove(struct hv_device *dev)
static const struct hv_vmbus_device_id id_table[] = {
/* Dynamic Memory Class ID */
/* 525074DC-8985-46e2-8057-A307DC18A502 */
- { VMBUS_DEVICE(0xdc, 0x74, 0x50, 0X52, 0x85, 0x89, 0xe2, 0x46,
- 0x80, 0x57, 0xa3, 0x07, 0xdc, 0x18, 0xa5, 0x02)
- },
+ { HV_DM_GUID, },
{ },
};
diff --git a/drivers/hv/hv_util.c b/drivers/hv/hv_util.c
index a0667de7a04c..1d4cbd8e8261 100644
--- a/drivers/hv/hv_util.c
+++ b/drivers/hv/hv_util.c
@@ -49,6 +49,16 @@ static struct hv_util_service util_kvp = {
.util_deinit = hv_kvp_deinit,
};
+static void perform_shutdown(struct work_struct *dummy)
+{
+ orderly_poweroff(true);
+}
+
+/*
+ * Perform the shutdown operation in a thread context.
+ */
+static DECLARE_WORK(shutdown_work, perform_shutdown);
+
static void shutdown_onchannelcallback(void *context)
{
struct vmbus_channel *channel = context;
@@ -106,7 +116,7 @@ static void shutdown_onchannelcallback(void *context)
}
if (execute_shutdown == true)
- orderly_poweroff(true);
+ schedule_work(&shutdown_work);
}
/*
@@ -274,6 +284,16 @@ static int util_probe(struct hv_device *dev,
}
}
+ /*
+ * The set of services managed by the util driver are not performance
+ * critical and do not need batched reading. Furthermore, some services
+ * such as KVP can only handle one message from the host at a time.
+ * Turn off batched reading for all util drivers before we open the
+ * channel.
+ */
+
+ set_channel_read_state(dev->channel, false);
+
ret = vmbus_open(dev->channel, 4 * PAGE_SIZE, 4 * PAGE_SIZE, NULL, 0,
srv->util_cb, dev->channel);
if (ret)
@@ -304,21 +324,21 @@ static int util_remove(struct hv_device *dev)
static const struct hv_vmbus_device_id id_table[] = {
/* Shutdown guid */
- { VMBUS_DEVICE(0x31, 0x60, 0x0B, 0X0E, 0x13, 0x52, 0x34, 0x49,
- 0x81, 0x8B, 0x38, 0XD9, 0x0C, 0xED, 0x39, 0xDB)
- .driver_data = (unsigned long)&util_shutdown },
+ { HV_SHUTDOWN_GUID,
+ .driver_data = (unsigned long)&util_shutdown
+ },
/* Time synch guid */
- { VMBUS_DEVICE(0x30, 0xe6, 0x27, 0x95, 0xae, 0xd0, 0x7b, 0x49,
- 0xad, 0xce, 0xe8, 0x0a, 0xb0, 0x17, 0x5c, 0xaf)
- .driver_data = (unsigned long)&util_timesynch },
+ { HV_TS_GUID,
+ .driver_data = (unsigned long)&util_timesynch
+ },
/* Heartbeat guid */
- { VMBUS_DEVICE(0x39, 0x4f, 0x16, 0x57, 0x15, 0x91, 0x78, 0x4e,
- 0xab, 0x55, 0x38, 0x2f, 0x3b, 0xd5, 0x42, 0x2d)
- .driver_data = (unsigned long)&util_heartbeat },
+ { HV_HEART_BEAT_GUID,
+ .driver_data = (unsigned long)&util_heartbeat
+ },
/* KVP guid */
- { VMBUS_DEVICE(0xe7, 0xf4, 0xa0, 0xa9, 0x45, 0x5a, 0x96, 0x4d,
- 0xb8, 0x27, 0x8a, 0x84, 0x1e, 0x8c, 0x3, 0xe6)
- .driver_data = (unsigned long)&util_kvp },
+ { HV_KVP_GUID,
+ .driver_data = (unsigned long)&util_kvp
+ },
{ },
};
diff --git a/drivers/hv/hyperv_vmbus.h b/drivers/hv/hyperv_vmbus.h
index d8d1fadb398a..12f2f9e989f7 100644
--- a/drivers/hv/hyperv_vmbus.h
+++ b/drivers/hv/hyperv_vmbus.h
@@ -101,15 +101,6 @@ enum hv_message_type {
/* Define invalid partition identifier. */
#define HV_PARTITION_ID_INVALID ((u64)0x0)
-/* Define connection identifier type. */
-union hv_connection_id {
- u32 asu32;
- struct {
- u32 id:24;
- u32 reserved:8;
- } u;
-};
-
/* Define port identifier type. */
union hv_port_id {
u32 asu32;
@@ -338,13 +329,6 @@ struct hv_input_post_message {
u64 payload[HV_MESSAGE_PAYLOAD_QWORD_COUNT];
};
-/* Definition of the hv_signal_event hypercall input structure. */
-struct hv_input_signal_event {
- union hv_connection_id connectionid;
- u16 flag_number;
- u16 rsvdz;
-};
-
/*
* Versioning definitions used for guests reporting themselves to the
* hypervisor, and visa versa.
@@ -498,11 +482,6 @@ static const uuid_le VMBUS_SERVICE_ID = {
-struct hv_input_signal_event_buffer {
- u64 align8;
- struct hv_input_signal_event event;
-};
-
struct hv_context {
/* We only support running on top of Hyper-V
* So at this point this really can only contain the Hyper-V ID
@@ -513,16 +492,24 @@ struct hv_context {
bool synic_initialized;
- /*
- * This is used as an input param to HvCallSignalEvent hypercall. The
- * input param is immutable in our usage and must be dynamic mem (vs
- * stack or global). */
- struct hv_input_signal_event_buffer *signal_event_buffer;
- /* 8-bytes aligned of the buffer above */
- struct hv_input_signal_event *signal_event_param;
-
void *synic_message_page[NR_CPUS];
void *synic_event_page[NR_CPUS];
+ /*
+ * Hypervisor's notion of virtual processor ID is different from
+ * Linux' notion of CPU ID. This information can only be retrieved
+ * in the context of the calling CPU. Setup a map for easy access
+ * to this information:
+ *
+ * vp_index[a] is the Hyper-V's processor ID corresponding to
+ * Linux cpuid 'a'.
+ */
+ u32 vp_index[NR_CPUS];
+ /*
+ * Starting with win8, we can take channel interrupts on any CPU;
+ * we will manage the tasklet that handles events on a per CPU
+ * basis.
+ */
+ struct tasklet_struct *event_dpc[NR_CPUS];
};
extern struct hv_context hv_context;
@@ -538,12 +525,19 @@ extern int hv_post_message(union hv_connection_id connection_id,
enum hv_message_type message_type,
void *payload, size_t payload_size);
-extern u16 hv_signal_event(void);
+extern u16 hv_signal_event(void *con_id);
extern void hv_synic_init(void *irqarg);
extern void hv_synic_cleanup(void *arg);
+/*
+ * Host version information.
+ */
+extern unsigned int host_info_eax;
+extern unsigned int host_info_ebx;
+extern unsigned int host_info_ecx;
+extern unsigned int host_info_edx;
/* Interface */
@@ -555,7 +549,7 @@ void hv_ringbuffer_cleanup(struct hv_ring_buffer_info *ring_info);
int hv_ringbuffer_write(struct hv_ring_buffer_info *ring_info,
struct scatterlist *sglist,
- u32 sgcount);
+ u32 sgcount, bool *signal);
int hv_ringbuffer_peek(struct hv_ring_buffer_info *ring_info, void *buffer,
u32 buflen);
@@ -563,13 +557,16 @@ int hv_ringbuffer_peek(struct hv_ring_buffer_info *ring_info, void *buffer,
int hv_ringbuffer_read(struct hv_ring_buffer_info *ring_info,
void *buffer,
u32 buflen,
- u32 offset);
+ u32 offset, bool *signal);
-u32 hv_get_ringbuffer_interrupt_mask(struct hv_ring_buffer_info *ring_info);
void hv_ringbuffer_get_debuginfo(struct hv_ring_buffer_info *ring_info,
struct hv_ring_buffer_debug_info *debug_info);
+void hv_begin_read(struct hv_ring_buffer_info *rbi);
+
+u32 hv_end_read(struct hv_ring_buffer_info *rbi);
+
/*
* Maximum channels is determined by the size of the interrupt page
* which is PAGE_SIZE. 1/2 of PAGE_SIZE is for send endpoint interrupt
@@ -657,7 +654,7 @@ int vmbus_connect(void);
int vmbus_post_msg(void *buffer, size_t buflen);
-int vmbus_set_event(u32 child_relid);
+int vmbus_set_event(struct vmbus_channel *channel);
void vmbus_on_event(unsigned long data);
diff --git a/drivers/hv/ring_buffer.c b/drivers/hv/ring_buffer.c
index 7233c88f01b8..cafa72ffdc30 100644
--- a/drivers/hv/ring_buffer.c
+++ b/drivers/hv/ring_buffer.c
@@ -29,6 +29,105 @@
#include "hyperv_vmbus.h"
+void hv_begin_read(struct hv_ring_buffer_info *rbi)
+{
+ rbi->ring_buffer->interrupt_mask = 1;
+ smp_mb();
+}
+
+u32 hv_end_read(struct hv_ring_buffer_info *rbi)
+{
+ u32 read;
+ u32 write;
+
+ rbi->ring_buffer->interrupt_mask = 0;
+ smp_mb();
+
+ /*
+ * Now check to see if the ring buffer is still empty.
+ * If it is not, we raced and we need to process new
+ * incoming messages.
+ */
+ hv_get_ringbuffer_availbytes(rbi, &read, &write);
+
+ return read;
+}
+
+/*
+ * When we write to the ring buffer, check if the host needs to
+ * be signaled. Here is the details of this protocol:
+ *
+ * 1. The host guarantees that while it is draining the
+ * ring buffer, it will set the interrupt_mask to
+ * indicate it does not need to be interrupted when
+ * new data is placed.
+ *
+ * 2. The host guarantees that it will completely drain
+ * the ring buffer before exiting the read loop. Further,
+ * once the ring buffer is empty, it will clear the
+ * interrupt_mask and re-check to see if new data has
+ * arrived.
+ */
+
+static bool hv_need_to_signal(u32 old_write, struct hv_ring_buffer_info *rbi)
+{
+ if (rbi->ring_buffer->interrupt_mask)
+ return false;
+
+ /*
+ * This is the only case we need to signal when the
+ * ring transitions from being empty to non-empty.
+ */
+ if (old_write == rbi->ring_buffer->read_index)
+ return true;
+
+ return false;
+}
+
+/*
+ * To optimize the flow management on the send-side,
+ * when the sender is blocked because of lack of
+ * sufficient space in the ring buffer, potential the
+ * consumer of the ring buffer can signal the producer.
+ * This is controlled by the following parameters:
+ *
+ * 1. pending_send_sz: This is the size in bytes that the
+ * producer is trying to send.
+ * 2. The feature bit feat_pending_send_sz set to indicate if
+ * the consumer of the ring will signal when the ring
+ * state transitions from being full to a state where
+ * there is room for the producer to send the pending packet.
+ */
+
+static bool hv_need_to_signal_on_read(u32 old_rd,
+ struct hv_ring_buffer_info *rbi)
+{
+ u32 prev_write_sz;
+ u32 cur_write_sz;
+ u32 r_size;
+ u32 write_loc = rbi->ring_buffer->write_index;
+ u32 read_loc = rbi->ring_buffer->read_index;
+ u32 pending_sz = rbi->ring_buffer->pending_send_sz;
+
+ /*
+ * If the other end is not blocked on write don't bother.
+ */
+ if (pending_sz == 0)
+ return false;
+
+ r_size = rbi->ring_datasize;
+ cur_write_sz = write_loc >= read_loc ? r_size - (write_loc - read_loc) :
+ read_loc - write_loc;
+
+ prev_write_sz = write_loc >= old_rd ? r_size - (write_loc - old_rd) :
+ old_rd - write_loc;
+
+
+ if ((prev_write_sz < pending_sz) && (cur_write_sz >= pending_sz))
+ return true;
+
+ return false;
+}
/*
* hv_get_next_write_location()
@@ -239,19 +338,6 @@ void hv_ringbuffer_get_debuginfo(struct hv_ring_buffer_info *ring_info,
}
}
-
-/*
- *
- * hv_get_ringbuffer_interrupt_mask()
- *
- * Get the interrupt mask for the specified ring buffer
- *
- */
-u32 hv_get_ringbuffer_interrupt_mask(struct hv_ring_buffer_info *rbi)
-{
- return rbi->ring_buffer->interrupt_mask;
-}
-
/*
*
* hv_ringbuffer_init()
@@ -298,7 +384,7 @@ void hv_ringbuffer_cleanup(struct hv_ring_buffer_info *ring_info)
*
*/
int hv_ringbuffer_write(struct hv_ring_buffer_info *outring_info,
- struct scatterlist *sglist, u32 sgcount)
+ struct scatterlist *sglist, u32 sgcount, bool *signal)
{
int i = 0;
u32 bytes_avail_towrite;
@@ -307,6 +393,7 @@ int hv_ringbuffer_write(struct hv_ring_buffer_info *outring_info,
struct scatterlist *sg;
u32 next_write_location;
+ u32 old_write;
u64 prev_indices = 0;
unsigned long flags;
@@ -335,6 +422,8 @@ int hv_ringbuffer_write(struct hv_ring_buffer_info *outring_info,
/* Write to the ring buffer */
next_write_location = hv_get_next_write_location(outring_info);
+ old_write = next_write_location;
+
for_each_sg(sglist, sg, sgcount, i)
{
next_write_location = hv_copyto_ringbuffer(outring_info,
@@ -351,14 +440,16 @@ int hv_ringbuffer_write(struct hv_ring_buffer_info *outring_info,
&prev_indices,
sizeof(u64));
- /* Make sure we flush all writes before updating the writeIndex */
- smp_wmb();
+ /* Issue a full memory barrier before updating the write index */
+ smp_mb();
/* Now, update the write location */
hv_set_next_write_location(outring_info, next_write_location);
spin_unlock_irqrestore(&outring_info->ring_lock, flags);
+
+ *signal = hv_need_to_signal(old_write, outring_info);
return 0;
}
@@ -414,13 +505,14 @@ int hv_ringbuffer_peek(struct hv_ring_buffer_info *Inring_info,
*
*/
int hv_ringbuffer_read(struct hv_ring_buffer_info *inring_info, void *buffer,
- u32 buflen, u32 offset)
+ u32 buflen, u32 offset, bool *signal)
{
u32 bytes_avail_towrite;
u32 bytes_avail_toread;
u32 next_read_location = 0;
u64 prev_indices = 0;
unsigned long flags;
+ u32 old_read;
if (buflen <= 0)
return -EINVAL;
@@ -431,6 +523,8 @@ int hv_ringbuffer_read(struct hv_ring_buffer_info *inring_info, void *buffer,
&bytes_avail_toread,
&bytes_avail_towrite);
+ old_read = bytes_avail_toread;
+
/* Make sure there is something to read */
if (bytes_avail_toread < buflen) {
spin_unlock_irqrestore(&inring_info->ring_lock, flags);
@@ -461,5 +555,7 @@ int hv_ringbuffer_read(struct hv_ring_buffer_info *inring_info, void *buffer,
spin_unlock_irqrestore(&inring_info->ring_lock, flags);
+ *signal = hv_need_to_signal_on_read(old_read, inring_info);
+
return 0;
}
diff --git a/drivers/hv/vmbus_drv.c b/drivers/hv/vmbus_drv.c
index 8e1a9ec53003..cf19dfa5ead1 100644
--- a/drivers/hv/vmbus_drv.c
+++ b/drivers/hv/vmbus_drv.c
@@ -33,6 +33,7 @@
#include <acpi/acpi_bus.h>
#include <linux/completion.h>
#include <linux/hyperv.h>
+#include <linux/kernel_stat.h>
#include <asm/hyperv.h>
#include <asm/hypervisor.h>
#include "hyperv_vmbus.h"
@@ -41,7 +42,6 @@
static struct acpi_device *hv_acpi_dev;
static struct tasklet_struct msg_dpc;
-static struct tasklet_struct event_dpc;
static struct completion probe_event;
static int irq;
@@ -454,21 +454,40 @@ static irqreturn_t vmbus_isr(int irq, void *dev_id)
union hv_synic_event_flags *event;
bool handled = false;
+ page_addr = hv_context.synic_event_page[cpu];
+ if (page_addr == NULL)
+ return IRQ_NONE;
+
+ event = (union hv_synic_event_flags *)page_addr +
+ VMBUS_MESSAGE_SINT;
/*
* Check for events before checking for messages. This is the order
* in which events and messages are checked in Windows guests on
* Hyper-V, and the Windows team suggested we do the same.
*/
- page_addr = hv_context.synic_event_page[cpu];
- event = (union hv_synic_event_flags *)page_addr + VMBUS_MESSAGE_SINT;
+ if ((vmbus_proto_version == VERSION_WS2008) ||
+ (vmbus_proto_version == VERSION_WIN7)) {
- /* Since we are a child, we only need to check bit 0 */
- if (sync_test_and_clear_bit(0, (unsigned long *) &event->flags32[0])) {
+ /* Since we are a child, we only need to check bit 0 */
+ if (sync_test_and_clear_bit(0,
+ (unsigned long *) &event->flags32[0])) {
+ handled = true;
+ }
+ } else {
+ /*
+ * Our host is win8 or above. The signaling mechanism
+ * has changed and we can directly look at the event page.
+ * If bit n is set then we have an interrup on the channel
+ * whose id is n.
+ */
handled = true;
- tasklet_schedule(&event_dpc);
}
+ if (handled)
+ tasklet_schedule(hv_context.event_dpc[cpu]);
+
+
page_addr = hv_context.synic_message_page[cpu];
msg = (struct hv_message *)page_addr + VMBUS_MESSAGE_SINT;
@@ -485,6 +504,19 @@ static irqreturn_t vmbus_isr(int irq, void *dev_id)
}
/*
+ * vmbus interrupt flow handler:
+ * vmbus interrupts can concurrently occur on multiple CPUs and
+ * can be handled concurrently.
+ */
+
+static void vmbus_flow_handler(unsigned int irq, struct irq_desc *desc)
+{
+ kstat_incr_irqs_this_cpu(irq, desc);
+
+ desc->action->handler(irq, desc->action->dev_id);
+}
+
+/*
* vmbus_bus_init -Main vmbus driver initialization routine.
*
* Here, we
@@ -506,7 +538,6 @@ static int vmbus_bus_init(int irq)
}
tasklet_init(&msg_dpc, vmbus_on_msg_dpc, 0);
- tasklet_init(&event_dpc, vmbus_on_event, 0);
ret = bus_register(&hv_bus);
if (ret)
@@ -520,6 +551,13 @@ static int vmbus_bus_init(int irq)
goto err_unregister;
}
+ /*
+ * Vmbus interrupts can be handled concurrently on
+ * different CPUs. Establish an appropriate interrupt flow
+ * handler that can support this model.
+ */
+ irq_set_handler(irq, vmbus_flow_handler);
+
vector = IRQ0_VECTOR + irq;
/*
@@ -575,8 +613,6 @@ int __vmbus_driver_register(struct hv_driver *hv_driver, struct module *owner, c
ret = driver_register(&hv_driver->driver);
- vmbus_request_offers();
-
return ret;
}
EXPORT_SYMBOL_GPL(__vmbus_driver_register);
diff --git a/drivers/i2c/busses/Kconfig b/drivers/i2c/busses/Kconfig
index 9b94a78ca776..a3725de92384 100644
--- a/drivers/i2c/busses/Kconfig
+++ b/drivers/i2c/busses/Kconfig
@@ -106,6 +106,8 @@ config I2C_I801
Panther Point (PCH)
Lynx Point (PCH)
Lynx Point-LP (PCH)
+ Avoton (SOC)
+ Wellsburg (PCH)
This driver can also be built as a module. If so, the module
will be called i2c-i801.
@@ -121,6 +123,16 @@ config I2C_ISCH
This driver can also be built as a module. If so, the module
will be called i2c-isch.
+config I2C_ISMT
+ tristate "Intel iSMT SMBus Controller"
+ depends on PCI && X86
+ help
+ If you say yes to this option, support will be included for the Intel
+ iSMT SMBus host controller interface.
+
+ This driver can also be built as a module. If so, the module will be
+ called i2c-ismt.
+
config I2C_PIIX4
tristate "Intel PIIX4 and compatible (ATI/AMD/Serverworks/Broadcom/SMSC)"
depends on PCI
@@ -186,11 +198,11 @@ config I2C_SIS5595
will be called i2c-sis5595.
config I2C_SIS630
- tristate "SiS 630/730"
+ tristate "SiS 630/730/964"
depends on PCI
help
If you say yes to this option, support will be included for the
- SiS630 and SiS730 SMBus (a subset of I2C) interface.
+ SiS630, SiS730 and SiS964 SMBus (a subset of I2C) interface.
This driver can also be built as a module. If so, the module
will be called i2c-sis630.
@@ -319,6 +331,18 @@ config I2C_AU1550
This driver can also be built as a module. If so, the module
will be called i2c-au1550.
+config I2C_BCM2835
+ tristate "Broadcom BCM2835 I2C controller"
+ depends on ARCH_BCM2835
+ help
+ If you say yes to this option, support will be included for the
+ BCM2835 I2C controller.
+
+ If you don't know what to do here, say N.
+
+ This support is also available as a module. If so, the module
+ will be called i2c-bcm2835.
+
config I2C_BLACKFIN_TWI
tristate "Blackfin TWI I2C support"
depends on BLACKFIN
@@ -802,6 +826,7 @@ config I2C_PARPORT_LIGHT
config I2C_TAOS_EVM
tristate "TAOS evaluation module"
+ depends on TTY
select SERIO
select SERIO_SERPORT
default n
diff --git a/drivers/i2c/busses/Makefile b/drivers/i2c/busses/Makefile
index 6181f3ff263f..8f4fc23b85b1 100644
--- a/drivers/i2c/busses/Makefile
+++ b/drivers/i2c/busses/Makefile
@@ -14,6 +14,7 @@ obj-$(CONFIG_I2C_AMD756_S4882) += i2c-amd756-s4882.o
obj-$(CONFIG_I2C_AMD8111) += i2c-amd8111.o
obj-$(CONFIG_I2C_I801) += i2c-i801.o
obj-$(CONFIG_I2C_ISCH) += i2c-isch.o
+obj-$(CONFIG_I2C_ISMT) += i2c-ismt.o
obj-$(CONFIG_I2C_NFORCE2) += i2c-nforce2.o
obj-$(CONFIG_I2C_NFORCE2_S4985) += i2c-nforce2-s4985.o
obj-$(CONFIG_I2C_PIIX4) += i2c-piix4.o
@@ -30,6 +31,7 @@ obj-$(CONFIG_I2C_POWERMAC) += i2c-powermac.o
# Embedded system I2C/SMBus host controller drivers
obj-$(CONFIG_I2C_AT91) += i2c-at91.o
obj-$(CONFIG_I2C_AU1550) += i2c-au1550.o
+obj-$(CONFIG_I2C_BCM2835) += i2c-bcm2835.o
obj-$(CONFIG_I2C_BLACKFIN_TWI) += i2c-bfin-twi.o
obj-$(CONFIG_I2C_CBUS_GPIO) += i2c-cbus-gpio.o
obj-$(CONFIG_I2C_CPM) += i2c-cpm.o
diff --git a/drivers/i2c/busses/i2c-at91.c b/drivers/i2c/busses/i2c-at91.c
index ebc224154695..75195e3f5ddb 100644
--- a/drivers/i2c/busses/i2c-at91.c
+++ b/drivers/i2c/busses/i2c-at91.c
@@ -553,13 +553,6 @@ static struct at91_twi_pdata at91sam9g10_config = {
.has_dma_support = false,
};
-static struct at91_twi_pdata at91sam9x5_config = {
- .clk_max_div = 7,
- .clk_offset = 4,
- .has_unre_flag = false,
- .has_dma_support = true,
-};
-
static const struct platform_device_id at91_twi_devtypes[] = {
{
.name = "i2c-at91rm9200",
@@ -582,8 +575,18 @@ static const struct platform_device_id at91_twi_devtypes[] = {
};
#if defined(CONFIG_OF)
+static struct at91_twi_pdata at91sam9x5_config = {
+ .clk_max_div = 7,
+ .clk_offset = 4,
+ .has_unre_flag = false,
+ .has_dma_support = true,
+};
+
static const struct of_device_id atmel_twi_dt_ids[] = {
{
+ .compatible = "atmel,at91rm9200-i2c",
+ .data = &at91rm9200_config,
+ } , {
.compatible = "atmel,at91sam9260-i2c",
.data = &at91sam9260_config,
} , {
diff --git a/drivers/i2c/busses/i2c-au1550.c b/drivers/i2c/busses/i2c-au1550.c
index b278298787d7..b5b89239d622 100644
--- a/drivers/i2c/busses/i2c-au1550.c
+++ b/drivers/i2c/busses/i2c-au1550.c
@@ -376,7 +376,6 @@ static int i2c_au1550_remove(struct platform_device *pdev)
{
struct i2c_au1550_data *priv = platform_get_drvdata(pdev);
- platform_set_drvdata(pdev, NULL);
i2c_del_adapter(&priv->adap);
i2c_au1550_disable(priv);
iounmap(priv->psc_base);
diff --git a/drivers/i2c/busses/i2c-bcm2835.c b/drivers/i2c/busses/i2c-bcm2835.c
new file mode 100644
index 000000000000..ea4b08fc3353
--- /dev/null
+++ b/drivers/i2c/busses/i2c-bcm2835.c
@@ -0,0 +1,342 @@
+/*
+ * BCM2835 master mode driver
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/clk.h>
+#include <linux/completion.h>
+#include <linux/err.h>
+#include <linux/i2c.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+
+#define BCM2835_I2C_C 0x0
+#define BCM2835_I2C_S 0x4
+#define BCM2835_I2C_DLEN 0x8
+#define BCM2835_I2C_A 0xc
+#define BCM2835_I2C_FIFO 0x10
+#define BCM2835_I2C_DIV 0x14
+#define BCM2835_I2C_DEL 0x18
+#define BCM2835_I2C_CLKT 0x1c
+
+#define BCM2835_I2C_C_READ BIT(0)
+#define BCM2835_I2C_C_CLEAR BIT(4) /* bits 4 and 5 both clear */
+#define BCM2835_I2C_C_ST BIT(7)
+#define BCM2835_I2C_C_INTD BIT(8)
+#define BCM2835_I2C_C_INTT BIT(9)
+#define BCM2835_I2C_C_INTR BIT(10)
+#define BCM2835_I2C_C_I2CEN BIT(15)
+
+#define BCM2835_I2C_S_TA BIT(0)
+#define BCM2835_I2C_S_DONE BIT(1)
+#define BCM2835_I2C_S_TXW BIT(2)
+#define BCM2835_I2C_S_RXR BIT(3)
+#define BCM2835_I2C_S_TXD BIT(4)
+#define BCM2835_I2C_S_RXD BIT(5)
+#define BCM2835_I2C_S_TXE BIT(6)
+#define BCM2835_I2C_S_RXF BIT(7)
+#define BCM2835_I2C_S_ERR BIT(8)
+#define BCM2835_I2C_S_CLKT BIT(9)
+#define BCM2835_I2C_S_LEN BIT(10) /* Fake bit for SW error reporting */
+
+#define BCM2835_I2C_TIMEOUT (msecs_to_jiffies(1000))
+
+struct bcm2835_i2c_dev {
+ struct device *dev;
+ void __iomem *regs;
+ struct clk *clk;
+ int irq;
+ struct i2c_adapter adapter;
+ struct completion completion;
+ u32 msg_err;
+ u8 *msg_buf;
+ size_t msg_buf_remaining;
+};
+
+static inline void bcm2835_i2c_writel(struct bcm2835_i2c_dev *i2c_dev,
+ u32 reg, u32 val)
+{
+ writel(val, i2c_dev->regs + reg);
+}
+
+static inline u32 bcm2835_i2c_readl(struct bcm2835_i2c_dev *i2c_dev, u32 reg)
+{
+ return readl(i2c_dev->regs + reg);
+}
+
+static void bcm2835_fill_txfifo(struct bcm2835_i2c_dev *i2c_dev)
+{
+ u32 val;
+
+ while (i2c_dev->msg_buf_remaining) {
+ val = bcm2835_i2c_readl(i2c_dev, BCM2835_I2C_S);
+ if (!(val & BCM2835_I2C_S_TXD))
+ break;
+ bcm2835_i2c_writel(i2c_dev, BCM2835_I2C_FIFO,
+ *i2c_dev->msg_buf);
+ i2c_dev->msg_buf++;
+ i2c_dev->msg_buf_remaining--;
+ }
+}
+
+static void bcm2835_drain_rxfifo(struct bcm2835_i2c_dev *i2c_dev)
+{
+ u32 val;
+
+ while (i2c_dev->msg_buf_remaining) {
+ val = bcm2835_i2c_readl(i2c_dev, BCM2835_I2C_S);
+ if (!(val & BCM2835_I2C_S_RXD))
+ break;
+ *i2c_dev->msg_buf = bcm2835_i2c_readl(i2c_dev,
+ BCM2835_I2C_FIFO);
+ i2c_dev->msg_buf++;
+ i2c_dev->msg_buf_remaining--;
+ }
+}
+
+static irqreturn_t bcm2835_i2c_isr(int this_irq, void *data)
+{
+ struct bcm2835_i2c_dev *i2c_dev = data;
+ u32 val, err;
+
+ val = bcm2835_i2c_readl(i2c_dev, BCM2835_I2C_S);
+ bcm2835_i2c_writel(i2c_dev, BCM2835_I2C_S, val);
+
+ err = val & (BCM2835_I2C_S_CLKT | BCM2835_I2C_S_ERR);
+ if (err) {
+ i2c_dev->msg_err = err;
+ complete(&i2c_dev->completion);
+ return IRQ_HANDLED;
+ }
+
+ if (val & BCM2835_I2C_S_RXD) {
+ bcm2835_drain_rxfifo(i2c_dev);
+ if (!(val & BCM2835_I2C_S_DONE))
+ return IRQ_HANDLED;
+ }
+
+ if (val & BCM2835_I2C_S_DONE) {
+ if (i2c_dev->msg_buf_remaining)
+ i2c_dev->msg_err = BCM2835_I2C_S_LEN;
+ else
+ i2c_dev->msg_err = 0;
+ complete(&i2c_dev->completion);
+ return IRQ_HANDLED;
+ }
+
+ if (val & BCM2835_I2C_S_TXD) {
+ bcm2835_fill_txfifo(i2c_dev);
+ return IRQ_HANDLED;
+ }
+
+ return IRQ_NONE;
+}
+
+static int bcm2835_i2c_xfer_msg(struct bcm2835_i2c_dev *i2c_dev,
+ struct i2c_msg *msg)
+{
+ u32 c;
+ int time_left;
+
+ i2c_dev->msg_buf = msg->buf;
+ i2c_dev->msg_buf_remaining = msg->len;
+ INIT_COMPLETION(i2c_dev->completion);
+
+ bcm2835_i2c_writel(i2c_dev, BCM2835_I2C_C, BCM2835_I2C_C_CLEAR);
+
+ if (msg->flags & I2C_M_RD) {
+ c = BCM2835_I2C_C_READ | BCM2835_I2C_C_INTR;
+ } else {
+ c = BCM2835_I2C_C_INTT;
+ bcm2835_fill_txfifo(i2c_dev);
+ }
+ c |= BCM2835_I2C_C_ST | BCM2835_I2C_C_INTD | BCM2835_I2C_C_I2CEN;
+
+ bcm2835_i2c_writel(i2c_dev, BCM2835_I2C_A, msg->addr);
+ bcm2835_i2c_writel(i2c_dev, BCM2835_I2C_DLEN, msg->len);
+ bcm2835_i2c_writel(i2c_dev, BCM2835_I2C_C, c);
+
+ time_left = wait_for_completion_timeout(&i2c_dev->completion,
+ BCM2835_I2C_TIMEOUT);
+ bcm2835_i2c_writel(i2c_dev, BCM2835_I2C_C, BCM2835_I2C_C_CLEAR);
+ if (!time_left) {
+ dev_err(i2c_dev->dev, "i2c transfer timed out\n");
+ return -ETIMEDOUT;
+ }
+
+ if (likely(!i2c_dev->msg_err))
+ return 0;
+
+ if ((i2c_dev->msg_err & BCM2835_I2C_S_ERR) &&
+ (msg->flags & I2C_M_IGNORE_NAK))
+ return 0;
+
+ dev_err(i2c_dev->dev, "i2c transfer failed: %x\n", i2c_dev->msg_err);
+
+ if (i2c_dev->msg_err & BCM2835_I2C_S_ERR)
+ return -EREMOTEIO;
+ else
+ return -EIO;
+}
+
+static int bcm2835_i2c_xfer(struct i2c_adapter *adap, struct i2c_msg msgs[],
+ int num)
+{
+ struct bcm2835_i2c_dev *i2c_dev = i2c_get_adapdata(adap);
+ int i;
+ int ret = 0;
+
+ for (i = 0; i < num; i++) {
+ ret = bcm2835_i2c_xfer_msg(i2c_dev, &msgs[i]);
+ if (ret)
+ break;
+ }
+
+ return ret ?: i;
+}
+
+static u32 bcm2835_i2c_func(struct i2c_adapter *adap)
+{
+ return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL;
+}
+
+static const struct i2c_algorithm bcm2835_i2c_algo = {
+ .master_xfer = bcm2835_i2c_xfer,
+ .functionality = bcm2835_i2c_func,
+};
+
+static int bcm2835_i2c_probe(struct platform_device *pdev)
+{
+ struct bcm2835_i2c_dev *i2c_dev;
+ struct resource *mem, *requested, *irq;
+ u32 bus_clk_rate, divider;
+ int ret;
+ struct i2c_adapter *adap;
+
+ i2c_dev = devm_kzalloc(&pdev->dev, sizeof(*i2c_dev), GFP_KERNEL);
+ if (!i2c_dev) {
+ dev_err(&pdev->dev, "Cannot allocate i2c_dev\n");
+ return -ENOMEM;
+ }
+ platform_set_drvdata(pdev, i2c_dev);
+ i2c_dev->dev = &pdev->dev;
+ init_completion(&i2c_dev->completion);
+
+ mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!mem) {
+ dev_err(&pdev->dev, "No mem resource\n");
+ return -ENODEV;
+ }
+
+ requested = devm_request_mem_region(&pdev->dev, mem->start,
+ resource_size(mem),
+ dev_name(&pdev->dev));
+ if (!requested) {
+ dev_err(&pdev->dev, "Could not claim register region\n");
+ return -EBUSY;
+ }
+
+ i2c_dev->regs = devm_ioremap(&pdev->dev, mem->start,
+ resource_size(mem));
+ if (!i2c_dev->regs) {
+ dev_err(&pdev->dev, "Could not map registers\n");
+ return -ENOMEM;
+ }
+
+ i2c_dev->clk = devm_clk_get(&pdev->dev, NULL);
+ if (IS_ERR(i2c_dev->clk)) {
+ dev_err(&pdev->dev, "Could not get clock\n");
+ return PTR_ERR(i2c_dev->clk);
+ }
+
+ ret = of_property_read_u32(pdev->dev.of_node, "clock-frequency",
+ &bus_clk_rate);
+ if (ret < 0) {
+ dev_warn(&pdev->dev,
+ "Could not read clock-frequency property\n");
+ bus_clk_rate = 100000;
+ }
+
+ divider = DIV_ROUND_UP(clk_get_rate(i2c_dev->clk), bus_clk_rate);
+ /*
+ * Per the datasheet, the register is always interpreted as an even
+ * number, by rounding down. In other words, the LSB is ignored. So,
+ * if the LSB is set, increment the divider to avoid any issue.
+ */
+ if (divider & 1)
+ divider++;
+ bcm2835_i2c_writel(i2c_dev, BCM2835_I2C_DIV, divider);
+
+ irq = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
+ if (!irq) {
+ dev_err(&pdev->dev, "No IRQ resource\n");
+ return -ENODEV;
+ }
+ i2c_dev->irq = irq->start;
+
+ ret = request_irq(i2c_dev->irq, bcm2835_i2c_isr, IRQF_SHARED,
+ dev_name(&pdev->dev), i2c_dev);
+ if (ret) {
+ dev_err(&pdev->dev, "Could not request IRQ\n");
+ return -ENODEV;
+ }
+
+ adap = &i2c_dev->adapter;
+ i2c_set_adapdata(adap, i2c_dev);
+ adap->owner = THIS_MODULE;
+ adap->class = I2C_CLASS_HWMON;
+ strlcpy(adap->name, "bcm2835 I2C adapter", sizeof(adap->name));
+ adap->algo = &bcm2835_i2c_algo;
+ adap->dev.parent = &pdev->dev;
+
+ bcm2835_i2c_writel(i2c_dev, BCM2835_I2C_C, 0);
+
+ ret = i2c_add_adapter(adap);
+ if (ret)
+ free_irq(i2c_dev->irq, i2c_dev);
+
+ return ret;
+}
+
+static int bcm2835_i2c_remove(struct platform_device *pdev)
+{
+ struct bcm2835_i2c_dev *i2c_dev = platform_get_drvdata(pdev);
+
+ free_irq(i2c_dev->irq, i2c_dev);
+ i2c_del_adapter(&i2c_dev->adapter);
+
+ return 0;
+}
+
+static const struct of_device_id bcm2835_i2c_of_match[] = {
+ { .compatible = "brcm,bcm2835-i2c" },
+ {},
+};
+MODULE_DEVICE_TABLE(of, bcm2835_i2c_of_match);
+
+static struct platform_driver bcm2835_i2c_driver = {
+ .probe = bcm2835_i2c_probe,
+ .remove = bcm2835_i2c_remove,
+ .driver = {
+ .name = "i2c-bcm2835",
+ .owner = THIS_MODULE,
+ .of_match_table = bcm2835_i2c_of_match,
+ },
+};
+module_platform_driver(bcm2835_i2c_driver);
+
+MODULE_AUTHOR("Stephen Warren <swarren@wwwdotorg.org>");
+MODULE_DESCRIPTION("BCM2835 I2C bus adapter");
+MODULE_LICENSE("GPL v2");
+MODULE_ALIAS("platform:i2c-bcm2835");
diff --git a/drivers/i2c/busses/i2c-bfin-twi.c b/drivers/i2c/busses/i2c-bfin-twi.c
index 0cf780fd6ef1..05080c449c6b 100644
--- a/drivers/i2c/busses/i2c-bfin-twi.c
+++ b/drivers/i2c/busses/i2c-bfin-twi.c
@@ -724,8 +724,6 @@ static int i2c_bfin_twi_remove(struct platform_device *pdev)
{
struct bfin_twi_iface *iface = platform_get_drvdata(pdev);
- platform_set_drvdata(pdev, NULL);
-
i2c_del_adapter(&(iface->adap));
free_irq(iface->irq, iface);
peripheral_free_list((unsigned short *)pdev->dev.platform_data);
diff --git a/drivers/i2c/busses/i2c-cpm.c b/drivers/i2c/busses/i2c-cpm.c
index 2e79c1024191..3823623baa48 100644
--- a/drivers/i2c/busses/i2c-cpm.c
+++ b/drivers/i2c/busses/i2c-cpm.c
@@ -682,7 +682,6 @@ static int cpm_i2c_probe(struct platform_device *ofdev)
out_shut:
cpm_i2c_shutdown(cpm);
out_free:
- dev_set_drvdata(&ofdev->dev, NULL);
kfree(cpm);
return result;
@@ -696,7 +695,6 @@ static int cpm_i2c_remove(struct platform_device *ofdev)
cpm_i2c_shutdown(cpm);
- dev_set_drvdata(&ofdev->dev, NULL);
kfree(cpm);
return 0;
diff --git a/drivers/i2c/busses/i2c-davinci.c b/drivers/i2c/busses/i2c-davinci.c
index 6a0a55319449..7d1e590a7bb6 100644
--- a/drivers/i2c/busses/i2c-davinci.c
+++ b/drivers/i2c/busses/i2c-davinci.c
@@ -755,7 +755,6 @@ err_mem_ioremap:
clk_put(dev->clk);
dev->clk = NULL;
err_free_mem:
- platform_set_drvdata(pdev, NULL);
put_device(&pdev->dev);
kfree(dev);
err_release_region:
@@ -771,7 +770,6 @@ static int davinci_i2c_remove(struct platform_device *pdev)
i2c_davinci_cpufreq_deregister(dev);
- platform_set_drvdata(pdev, NULL);
i2c_del_adapter(&dev->adapter);
put_device(&pdev->dev);
diff --git a/drivers/i2c/busses/i2c-designware-core.c b/drivers/i2c/busses/i2c-designware-core.c
index f5258c205de5..94fd81875409 100644
--- a/drivers/i2c/busses/i2c-designware-core.c
+++ b/drivers/i2c/busses/i2c-designware-core.c
@@ -413,11 +413,23 @@ i2c_dw_xfer_msg(struct dw_i2c_dev *dev)
rx_limit = dev->rx_fifo_depth - dw_readl(dev, DW_IC_RXFLR);
while (buf_len > 0 && tx_limit > 0 && rx_limit > 0) {
+ u32 cmd = 0;
+
+ /*
+ * If IC_EMPTYFIFO_HOLD_MASTER_EN is set we must
+ * manually set the stop bit. However, it cannot be
+ * detected from the registers so we set it always
+ * when writing/reading the last byte.
+ */
+ if (dev->msg_write_idx == dev->msgs_num - 1 &&
+ buf_len == 1)
+ cmd |= BIT(9);
+
if (msgs[dev->msg_write_idx].flags & I2C_M_RD) {
- dw_writel(dev, 0x100, DW_IC_DATA_CMD);
+ dw_writel(dev, cmd | 0x100, DW_IC_DATA_CMD);
rx_limit--;
} else
- dw_writel(dev, *buf++, DW_IC_DATA_CMD);
+ dw_writel(dev, cmd | *buf++, DW_IC_DATA_CMD);
tx_limit--; buf_len--;
}
diff --git a/drivers/i2c/busses/i2c-designware-pcidrv.c b/drivers/i2c/busses/i2c-designware-pcidrv.c
index 6add851e9dee..7c5e383c350d 100644
--- a/drivers/i2c/busses/i2c-designware-pcidrv.c
+++ b/drivers/i2c/busses/i2c-designware-pcidrv.c
@@ -319,7 +319,6 @@ err_free_irq:
free_irq(pdev->irq, dev);
err_iounmap:
iounmap(dev->base);
- pci_set_drvdata(pdev, NULL);
put_device(&pdev->dev);
kfree(dev);
err_release_region:
@@ -336,7 +335,6 @@ static void i2c_dw_pci_remove(struct pci_dev *pdev)
pm_runtime_forbid(&pdev->dev);
pm_runtime_get_noresume(&pdev->dev);
- pci_set_drvdata(pdev, NULL);
i2c_del_adapter(&dev->adapter);
put_device(&pdev->dev);
diff --git a/drivers/i2c/busses/i2c-designware-platdrv.c b/drivers/i2c/busses/i2c-designware-platdrv.c
index 343357a2b5b4..0ceb6e1b0f65 100644
--- a/drivers/i2c/busses/i2c-designware-platdrv.c
+++ b/drivers/i2c/busses/i2c-designware-platdrv.c
@@ -37,8 +37,10 @@
#include <linux/of_i2c.h>
#include <linux/platform_device.h>
#include <linux/pm.h>
+#include <linux/pm_runtime.h>
#include <linux/io.h>
#include <linux/slab.h>
+#include <linux/acpi.h>
#include "i2c-designware-core.h"
static struct i2c_algorithm i2c_dw_algo = {
@@ -50,6 +52,42 @@ static u32 i2c_dw_get_clk_rate_khz(struct dw_i2c_dev *dev)
return clk_get_rate(dev->clk)/1000;
}
+#ifdef CONFIG_ACPI
+static int dw_i2c_acpi_configure(struct platform_device *pdev)
+{
+ struct dw_i2c_dev *dev = platform_get_drvdata(pdev);
+ struct acpi_device *adev;
+ int busno, ret;
+
+ if (!ACPI_HANDLE(&pdev->dev))
+ return -ENODEV;
+
+ ret = acpi_bus_get_device(ACPI_HANDLE(&pdev->dev), &adev);
+ if (ret)
+ return -ENODEV;
+
+ dev->adapter.nr = -1;
+ if (adev->pnp.unique_id && !kstrtoint(adev->pnp.unique_id, 0, &busno))
+ dev->adapter.nr = busno;
+
+ dev->tx_fifo_depth = 32;
+ dev->rx_fifo_depth = 32;
+ return 0;
+}
+
+static const struct acpi_device_id dw_i2c_acpi_match[] = {
+ { "INT33C2", 0 },
+ { "INT33C3", 0 },
+ { }
+};
+MODULE_DEVICE_TABLE(acpi, dw_i2c_acpi_match);
+#else
+static inline int dw_i2c_acpi_configure(struct platform_device *pdev)
+{
+ return -ENODEV;
+}
+#endif
+
static int dw_i2c_probe(struct platform_device *pdev)
{
struct dw_i2c_dev *dev;
@@ -114,18 +152,22 @@ static int dw_i2c_probe(struct platform_device *pdev)
r = -EBUSY;
goto err_unuse_clocks;
}
- {
+
+ /* Try first if we can configure the device from ACPI */
+ r = dw_i2c_acpi_configure(pdev);
+ if (r) {
u32 param1 = i2c_dw_read_comp_param(dev);
dev->tx_fifo_depth = ((param1 >> 16) & 0xff) + 1;
dev->rx_fifo_depth = ((param1 >> 8) & 0xff) + 1;
+ dev->adapter.nr = pdev->id;
}
r = i2c_dw_init(dev);
if (r)
goto err_iounmap;
i2c_dw_disable_int(dev);
- r = request_irq(dev->irq, i2c_dw_isr, IRQF_DISABLED, pdev->name, dev);
+ r = request_irq(dev->irq, i2c_dw_isr, IRQF_SHARED, pdev->name, dev);
if (r) {
dev_err(&pdev->dev, "failure requesting irq %i\n", dev->irq);
goto err_iounmap;
@@ -140,14 +182,19 @@ static int dw_i2c_probe(struct platform_device *pdev)
adap->algo = &i2c_dw_algo;
adap->dev.parent = &pdev->dev;
adap->dev.of_node = pdev->dev.of_node;
+ ACPI_HANDLE_SET(&adap->dev, ACPI_HANDLE(&pdev->dev));
- adap->nr = pdev->id;
r = i2c_add_numbered_adapter(adap);
if (r) {
dev_err(&pdev->dev, "failure adding adapter\n");
goto err_free_irq;
}
of_i2c_register_devices(adap);
+ acpi_i2c_register_devices(adap);
+
+ pm_runtime_set_active(&pdev->dev);
+ pm_runtime_enable(&pdev->dev);
+ pm_runtime_put(&pdev->dev);
return 0;
@@ -160,7 +207,6 @@ err_unuse_clocks:
clk_put(dev->clk);
dev->clk = NULL;
err_free_mem:
- platform_set_drvdata(pdev, NULL);
put_device(&pdev->dev);
kfree(dev);
err_release_region:
@@ -174,7 +220,8 @@ static int dw_i2c_remove(struct platform_device *pdev)
struct dw_i2c_dev *dev = platform_get_drvdata(pdev);
struct resource *mem;
- platform_set_drvdata(pdev, NULL);
+ pm_runtime_get_sync(&pdev->dev);
+
i2c_del_adapter(&dev->adapter);
put_device(&pdev->dev);
@@ -186,6 +233,9 @@ static int dw_i2c_remove(struct platform_device *pdev)
free_irq(dev->irq, dev);
kfree(dev);
+ pm_runtime_put(&pdev->dev);
+ pm_runtime_disable(&pdev->dev);
+
mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
release_mem_region(mem->start, resource_size(mem));
return 0;
@@ -233,6 +283,7 @@ static struct platform_driver dw_i2c_driver = {
.name = "i2c_designware",
.owner = THIS_MODULE,
.of_match_table = of_match_ptr(dw_i2c_of_match),
+ .acpi_match_table = ACPI_PTR(dw_i2c_acpi_match),
.pm = &dw_i2c_dev_pm_ops,
},
};
diff --git a/drivers/i2c/busses/i2c-eg20t.c b/drivers/i2c/busses/i2c-eg20t.c
index 5e7886e7136e..0f3752967c4b 100644
--- a/drivers/i2c/busses/i2c-eg20t.c
+++ b/drivers/i2c/busses/i2c-eg20t.c
@@ -869,8 +869,6 @@ static void pch_i2c_remove(struct pci_dev *pdev)
for (i = 0; i < adap_info->ch_num; i++)
adap_info->pch_data[i].pch_base_address = NULL;
- pci_set_drvdata(pdev, NULL);
-
pci_release_regions(pdev);
pci_disable_device(pdev);
diff --git a/drivers/i2c/busses/i2c-highlander.c b/drivers/i2c/busses/i2c-highlander.c
index 3351cc7ed11f..436b0f254916 100644
--- a/drivers/i2c/busses/i2c-highlander.c
+++ b/drivers/i2c/busses/i2c-highlander.c
@@ -436,8 +436,6 @@ err_unmap:
err:
kfree(dev);
- platform_set_drvdata(pdev, NULL);
-
return ret;
}
@@ -453,8 +451,6 @@ static int highlander_i2c_remove(struct platform_device *pdev)
iounmap(dev->base);
kfree(dev);
- platform_set_drvdata(pdev, NULL);
-
return 0;
}
diff --git a/drivers/i2c/busses/i2c-i801.c b/drivers/i2c/busses/i2c-i801.c
index 3092387f6ef4..e1cf2e0e1f23 100644
--- a/drivers/i2c/busses/i2c-i801.c
+++ b/drivers/i2c/busses/i2c-i801.c
@@ -53,6 +53,11 @@
Panther Point (PCH) 0x1e22 32 hard yes yes yes
Lynx Point (PCH) 0x8c22 32 hard yes yes yes
Lynx Point-LP (PCH) 0x9c22 32 hard yes yes yes
+ Avoton (SOC) 0x1f3c 32 hard yes yes yes
+ Wellsburg (PCH) 0x8d22 32 hard yes yes yes
+ Wellsburg (PCH) MS 0x8d7d 32 hard yes yes yes
+ Wellsburg (PCH) MS 0x8d7e 32 hard yes yes yes
+ Wellsburg (PCH) MS 0x8d7f 32 hard yes yes yes
Features supported by this driver:
Software PEC no
@@ -162,9 +167,14 @@
#define PCI_DEVICE_ID_INTEL_PATSBURG_SMBUS_IDF1 0x1d71
#define PCI_DEVICE_ID_INTEL_PATSBURG_SMBUS_IDF2 0x1d72
#define PCI_DEVICE_ID_INTEL_PANTHERPOINT_SMBUS 0x1e22
+#define PCI_DEVICE_ID_INTEL_AVOTON_SMBUS 0x1f3c
#define PCI_DEVICE_ID_INTEL_DH89XXCC_SMBUS 0x2330
#define PCI_DEVICE_ID_INTEL_5_3400_SERIES_SMBUS 0x3b30
#define PCI_DEVICE_ID_INTEL_LYNXPOINT_SMBUS 0x8c22
+#define PCI_DEVICE_ID_INTEL_WELLSBURG_SMBUS 0x8d22
+#define PCI_DEVICE_ID_INTEL_WELLSBURG_SMBUS_MS0 0x8d7d
+#define PCI_DEVICE_ID_INTEL_WELLSBURG_SMBUS_MS1 0x8d7e
+#define PCI_DEVICE_ID_INTEL_WELLSBURG_SMBUS_MS2 0x8d7f
#define PCI_DEVICE_ID_INTEL_LYNXPOINT_LP_SMBUS 0x9c22
struct i801_mux_config {
@@ -798,6 +808,11 @@ static DEFINE_PCI_DEVICE_TABLE(i801_ids) = {
{ PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_PANTHERPOINT_SMBUS) },
{ PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_LYNXPOINT_SMBUS) },
{ PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_LYNXPOINT_LP_SMBUS) },
+ { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_AVOTON_SMBUS) },
+ { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_WELLSBURG_SMBUS) },
+ { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_WELLSBURG_SMBUS_MS0) },
+ { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_WELLSBURG_SMBUS_MS1) },
+ { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_WELLSBURG_SMBUS_MS2) },
{ 0, }
};
@@ -1103,6 +1118,9 @@ static int i801_probe(struct pci_dev *dev, const struct pci_device_id *id)
case PCI_DEVICE_ID_INTEL_PATSBURG_SMBUS_IDF0:
case PCI_DEVICE_ID_INTEL_PATSBURG_SMBUS_IDF1:
case PCI_DEVICE_ID_INTEL_PATSBURG_SMBUS_IDF2:
+ case PCI_DEVICE_ID_INTEL_WELLSBURG_SMBUS_MS0:
+ case PCI_DEVICE_ID_INTEL_WELLSBURG_SMBUS_MS1:
+ case PCI_DEVICE_ID_INTEL_WELLSBURG_SMBUS_MS2:
priv->features |= FEATURE_IDF;
/* fall through */
default:
@@ -1236,7 +1254,6 @@ static void i801_remove(struct pci_dev *dev)
free_irq(dev->irq, priv);
pci_release_region(dev, SMBBAR);
- pci_set_drvdata(dev, NULL);
kfree(priv);
/*
* do not call pci_disable_device(dev) since it can cause hard hangs on
diff --git a/drivers/i2c/busses/i2c-ibm_iic.c b/drivers/i2c/busses/i2c-ibm_iic.c
index 33a2abb6c063..405a2e240454 100644
--- a/drivers/i2c/busses/i2c-ibm_iic.c
+++ b/drivers/i2c/busses/i2c-ibm_iic.c
@@ -773,7 +773,6 @@ error_cleanup:
if (dev->vaddr)
iounmap(dev->vaddr);
- dev_set_drvdata(&ofdev->dev, NULL);
kfree(dev);
return ret;
}
@@ -785,8 +784,6 @@ static int iic_remove(struct platform_device *ofdev)
{
struct ibm_iic_private *dev = dev_get_drvdata(&ofdev->dev);
- dev_set_drvdata(&ofdev->dev, NULL);
-
i2c_del_adapter(&dev->adap);
if (dev->irq) {
diff --git a/drivers/i2c/busses/i2c-imx.c b/drivers/i2c/busses/i2c-imx.c
index a71ece63e917..82f20c60bb7b 100644
--- a/drivers/i2c/busses/i2c-imx.c
+++ b/drivers/i2c/busses/i2c-imx.c
@@ -605,7 +605,6 @@ static int __exit i2c_imx_remove(struct platform_device *pdev)
/* remove adapter */
dev_dbg(&i2c_imx->adapter.dev, "adapter removed\n");
i2c_del_adapter(&i2c_imx->adapter);
- platform_set_drvdata(pdev, NULL);
/* setup chip registers to defaults */
writeb(0, i2c_imx->base + IMX_I2C_IADR);
diff --git a/drivers/i2c/busses/i2c-intel-mid.c b/drivers/i2c/busses/i2c-intel-mid.c
index de3736bf6465..323fa018ffd5 100644
--- a/drivers/i2c/busses/i2c-intel-mid.c
+++ b/drivers/i2c/busses/i2c-intel-mid.c
@@ -1069,7 +1069,6 @@ static int intel_mid_i2c_probe(struct pci_dev *dev,
fail3:
free_irq(dev->irq, mrst);
fail2:
- pci_set_drvdata(dev, NULL);
kfree(mrst);
fail1:
iounmap(base);
@@ -1087,7 +1086,6 @@ static void intel_mid_i2c_remove(struct pci_dev *dev)
dev_err(&dev->dev, "Failed to delete i2c adapter");
free_irq(dev->irq, mrst);
- pci_set_drvdata(dev, NULL);
iounmap(mrst->base);
kfree(mrst);
pci_release_region(dev, 0);
diff --git a/drivers/i2c/busses/i2c-iop3xx.c b/drivers/i2c/busses/i2c-iop3xx.c
index 2f99613fd677..bc993331c695 100644
--- a/drivers/i2c/busses/i2c-iop3xx.c
+++ b/drivers/i2c/busses/i2c-iop3xx.c
@@ -415,8 +415,6 @@ iop3xx_i2c_remove(struct platform_device *pdev)
kfree(adapter_data);
kfree(padapter);
- platform_set_drvdata(pdev, NULL);
-
return 0;
}
diff --git a/drivers/i2c/busses/i2c-isch.c b/drivers/i2c/busses/i2c-isch.c
index 4099f79c2280..8c38aaa7417c 100644
--- a/drivers/i2c/busses/i2c-isch.c
+++ b/drivers/i2c/busses/i2c-isch.c
@@ -40,6 +40,7 @@
/* SCH SMBus address offsets */
#define SMBHSTCNT (0 + sch_smba)
#define SMBHSTSTS (1 + sch_smba)
+#define SMBHSTCLK (2 + sch_smba)
#define SMBHSTADD (4 + sch_smba) /* TSA */
#define SMBHSTCMD (5 + sch_smba)
#define SMBHSTDAT0 (6 + sch_smba)
@@ -58,6 +59,9 @@
static unsigned short sch_smba;
static struct i2c_adapter sch_adapter;
+static int backbone_speed = 33000; /* backbone speed in kHz */
+module_param(backbone_speed, int, S_IRUSR | S_IWUSR);
+MODULE_PARM_DESC(backbone_speed, "Backbone speed in kHz, (default = 33000)");
/*
* Start the i2c transaction -- the i2c_access will prepare the transaction
@@ -156,6 +160,19 @@ static s32 sch_access(struct i2c_adapter *adap, u16 addr,
dev_dbg(&sch_adapter.dev, "SMBus busy (%02x)\n", temp);
return -EAGAIN;
}
+ temp = inw(SMBHSTCLK);
+ if (!temp) {
+ /*
+ * We can't determine if we have 33 or 25 MHz clock for
+ * SMBus, so expect 33 MHz and calculate a bus clock of
+ * 100 kHz. If we actually run at 25 MHz the bus will be
+ * run ~75 kHz instead which should do no harm.
+ */
+ dev_notice(&sch_adapter.dev,
+ "Clock divider unitialized. Setting defaults\n");
+ outw(backbone_speed / (4 * 100), SMBHSTCLK);
+ }
+
dev_dbg(&sch_adapter.dev, "access size: %d %s\n", size,
(read_write)?"READ":"WRITE");
switch (size) {
diff --git a/drivers/i2c/busses/i2c-ismt.c b/drivers/i2c/busses/i2c-ismt.c
new file mode 100644
index 000000000000..e9205ee8cf94
--- /dev/null
+++ b/drivers/i2c/busses/i2c-ismt.c
@@ -0,0 +1,963 @@
+/*
+ * This file is provided under a dual BSD/GPLv2 license. When using or
+ * redistributing this file, you may do so under either license.
+ *
+ * Copyright(c) 2012 Intel Corporation. All rights reserved.
+ *
+ * GPL LICENSE SUMMARY
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ * The full GNU General Public License is included in this distribution
+ * in the file called LICENSE.GPL.
+ *
+ * BSD LICENSE
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/*
+ * Supports the SMBus Message Transport (SMT) in the Intel Atom Processor
+ * S12xx Product Family.
+ *
+ * Features supported by this driver:
+ * Hardware PEC yes
+ * Block buffer yes
+ * Block process call transaction no
+ * Slave mode no
+ */
+
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/pci.h>
+#include <linux/kernel.h>
+#include <linux/stddef.h>
+#include <linux/completion.h>
+#include <linux/dma-mapping.h>
+#include <linux/i2c.h>
+#include <linux/acpi.h>
+#include <linux/interrupt.h>
+
+#include <asm-generic/io-64-nonatomic-lo-hi.h>
+
+/* PCI Address Constants */
+#define SMBBAR 0
+
+/* PCI DIDs for the Intel SMBus Message Transport (SMT) Devices */
+#define PCI_DEVICE_ID_INTEL_S1200_SMT0 0x0c59
+#define PCI_DEVICE_ID_INTEL_S1200_SMT1 0x0c5a
+
+#define ISMT_DESC_ENTRIES 32 /* number of descriptor entries */
+#define ISMT_MAX_RETRIES 3 /* number of SMBus retries to attempt */
+
+/* Hardware Descriptor Constants - Control Field */
+#define ISMT_DESC_CWRL 0x01 /* Command/Write Length */
+#define ISMT_DESC_BLK 0X04 /* Perform Block Transaction */
+#define ISMT_DESC_FAIR 0x08 /* Set fairness flag upon successful arbit. */
+#define ISMT_DESC_PEC 0x10 /* Packet Error Code */
+#define ISMT_DESC_I2C 0x20 /* I2C Enable */
+#define ISMT_DESC_INT 0x40 /* Interrupt */
+#define ISMT_DESC_SOE 0x80 /* Stop On Error */
+
+/* Hardware Descriptor Constants - Status Field */
+#define ISMT_DESC_SCS 0x01 /* Success */
+#define ISMT_DESC_DLTO 0x04 /* Data Low Time Out */
+#define ISMT_DESC_NAK 0x08 /* NAK Received */
+#define ISMT_DESC_CRC 0x10 /* CRC Error */
+#define ISMT_DESC_CLTO 0x20 /* Clock Low Time Out */
+#define ISMT_DESC_COL 0x40 /* Collisions */
+#define ISMT_DESC_LPR 0x80 /* Large Packet Received */
+
+/* Macros */
+#define ISMT_DESC_ADDR_RW(addr, rw) (((addr) << 1) | (rw))
+
+/* iSMT General Register address offsets (SMBBAR + <addr>) */
+#define ISMT_GR_GCTRL 0x000 /* General Control */
+#define ISMT_GR_SMTICL 0x008 /* SMT Interrupt Cause Location */
+#define ISMT_GR_ERRINTMSK 0x010 /* Error Interrupt Mask */
+#define ISMT_GR_ERRAERMSK 0x014 /* Error AER Mask */
+#define ISMT_GR_ERRSTS 0x018 /* Error Status */
+#define ISMT_GR_ERRINFO 0x01c /* Error Information */
+
+/* iSMT Master Registers */
+#define ISMT_MSTR_MDBA 0x100 /* Master Descriptor Base Address */
+#define ISMT_MSTR_MCTRL 0x108 /* Master Control */
+#define ISMT_MSTR_MSTS 0x10c /* Master Status */
+#define ISMT_MSTR_MDS 0x110 /* Master Descriptor Size */
+#define ISMT_MSTR_RPOLICY 0x114 /* Retry Policy */
+
+/* iSMT Miscellaneous Registers */
+#define ISMT_SPGT 0x300 /* SMBus PHY Global Timing */
+
+/* General Control Register (GCTRL) bit definitions */
+#define ISMT_GCTRL_TRST 0x04 /* Target Reset */
+#define ISMT_GCTRL_KILL 0x08 /* Kill */
+#define ISMT_GCTRL_SRST 0x40 /* Soft Reset */
+
+/* Master Control Register (MCTRL) bit definitions */
+#define ISMT_MCTRL_SS 0x01 /* Start/Stop */
+#define ISMT_MCTRL_MEIE 0x10 /* Master Error Interrupt Enable */
+#define ISMT_MCTRL_FMHP 0x00ff0000 /* Firmware Master Head Ptr (FMHP) */
+
+/* Master Status Register (MSTS) bit definitions */
+#define ISMT_MSTS_HMTP 0xff0000 /* HW Master Tail Pointer (HMTP) */
+#define ISMT_MSTS_MIS 0x20 /* Master Interrupt Status (MIS) */
+#define ISMT_MSTS_MEIS 0x10 /* Master Error Int Status (MEIS) */
+#define ISMT_MSTS_IP 0x01 /* In Progress */
+
+/* Master Descriptor Size (MDS) bit definitions */
+#define ISMT_MDS_MASK 0xff /* Master Descriptor Size mask (MDS) */
+
+/* SMBus PHY Global Timing Register (SPGT) bit definitions */
+#define ISMT_SPGT_SPD_MASK 0xc0000000 /* SMBus Speed mask */
+#define ISMT_SPGT_SPD_80K 0x00 /* 80 kHz */
+#define ISMT_SPGT_SPD_100K (0x1 << 30) /* 100 kHz */
+#define ISMT_SPGT_SPD_400K (0x2 << 30) /* 400 kHz */
+#define ISMT_SPGT_SPD_1M (0x3 << 30) /* 1 MHz */
+
+
+/* MSI Control Register (MSICTL) bit definitions */
+#define ISMT_MSICTL_MSIE 0x01 /* MSI Enable */
+
+/* iSMT Hardware Descriptor */
+struct ismt_desc {
+ u8 tgtaddr_rw; /* target address & r/w bit */
+ u8 wr_len_cmd; /* write length in bytes or a command */
+ u8 rd_len; /* read length */
+ u8 control; /* control bits */
+ u8 status; /* status bits */
+ u8 retry; /* collision retry and retry count */
+ u8 rxbytes; /* received bytes */
+ u8 txbytes; /* transmitted bytes */
+ u32 dptr_low; /* lower 32 bit of the data pointer */
+ u32 dptr_high; /* upper 32 bit of the data pointer */
+} __packed;
+
+struct ismt_priv {
+ struct i2c_adapter adapter;
+ void *smba; /* PCI BAR */
+ struct pci_dev *pci_dev;
+ struct ismt_desc *hw; /* descriptor virt base addr */
+ dma_addr_t io_rng_dma; /* descriptor HW base addr */
+ u8 head; /* ring buffer head pointer */
+ struct completion cmp; /* interrupt completion */
+ u8 dma_buffer[I2C_SMBUS_BLOCK_MAX + 1]; /* temp R/W data buffer */
+ bool using_msi; /* type of interrupt flag */
+};
+
+/**
+ * ismt_ids - PCI device IDs supported by this driver
+ */
+static const DEFINE_PCI_DEVICE_TABLE(ismt_ids) = {
+ { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_S1200_SMT0) },
+ { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_S1200_SMT1) },
+ { 0, }
+};
+
+MODULE_DEVICE_TABLE(pci, ismt_ids);
+
+/* Bus speed control bits for slow debuggers - refer to the docs for usage */
+static unsigned int bus_speed;
+module_param(bus_speed, uint, S_IRUGO);
+MODULE_PARM_DESC(bus_speed, "Bus Speed in kHz (0 = BIOS default)");
+
+/**
+ * __ismt_desc_dump() - dump the contents of a specific descriptor
+ */
+static void __ismt_desc_dump(struct device *dev, const struct ismt_desc *desc)
+{
+
+ dev_dbg(dev, "Descriptor struct: %p\n", desc);
+ dev_dbg(dev, "\ttgtaddr_rw=0x%02X\n", desc->tgtaddr_rw);
+ dev_dbg(dev, "\twr_len_cmd=0x%02X\n", desc->wr_len_cmd);
+ dev_dbg(dev, "\trd_len= 0x%02X\n", desc->rd_len);
+ dev_dbg(dev, "\tcontrol= 0x%02X\n", desc->control);
+ dev_dbg(dev, "\tstatus= 0x%02X\n", desc->status);
+ dev_dbg(dev, "\tretry= 0x%02X\n", desc->retry);
+ dev_dbg(dev, "\trxbytes= 0x%02X\n", desc->rxbytes);
+ dev_dbg(dev, "\ttxbytes= 0x%02X\n", desc->txbytes);
+ dev_dbg(dev, "\tdptr_low= 0x%08X\n", desc->dptr_low);
+ dev_dbg(dev, "\tdptr_high= 0x%08X\n", desc->dptr_high);
+}
+/**
+ * ismt_desc_dump() - dump the contents of a descriptor for debug purposes
+ * @priv: iSMT private data
+ */
+static void ismt_desc_dump(struct ismt_priv *priv)
+{
+ struct device *dev = &priv->pci_dev->dev;
+ struct ismt_desc *desc = &priv->hw[priv->head];
+
+ dev_dbg(dev, "Dump of the descriptor struct: 0x%X\n", priv->head);
+ __ismt_desc_dump(dev, desc);
+}
+
+/**
+ * ismt_gen_reg_dump() - dump the iSMT General Registers
+ * @priv: iSMT private data
+ */
+static void ismt_gen_reg_dump(struct ismt_priv *priv)
+{
+ struct device *dev = &priv->pci_dev->dev;
+
+ dev_dbg(dev, "Dump of the iSMT General Registers\n");
+ dev_dbg(dev, " GCTRL.... : (0x%p)=0x%X\n",
+ priv->smba + ISMT_GR_GCTRL,
+ readl(priv->smba + ISMT_GR_GCTRL));
+ dev_dbg(dev, " SMTICL... : (0x%p)=0x%016llX\n",
+ priv->smba + ISMT_GR_SMTICL,
+ (long long unsigned int)readq(priv->smba + ISMT_GR_SMTICL));
+ dev_dbg(dev, " ERRINTMSK : (0x%p)=0x%X\n",
+ priv->smba + ISMT_GR_ERRINTMSK,
+ readl(priv->smba + ISMT_GR_ERRINTMSK));
+ dev_dbg(dev, " ERRAERMSK : (0x%p)=0x%X\n",
+ priv->smba + ISMT_GR_ERRAERMSK,
+ readl(priv->smba + ISMT_GR_ERRAERMSK));
+ dev_dbg(dev, " ERRSTS... : (0x%p)=0x%X\n",
+ priv->smba + ISMT_GR_ERRSTS,
+ readl(priv->smba + ISMT_GR_ERRSTS));
+ dev_dbg(dev, " ERRINFO.. : (0x%p)=0x%X\n",
+ priv->smba + ISMT_GR_ERRINFO,
+ readl(priv->smba + ISMT_GR_ERRINFO));
+}
+
+/**
+ * ismt_mstr_reg_dump() - dump the iSMT Master Registers
+ * @priv: iSMT private data
+ */
+static void ismt_mstr_reg_dump(struct ismt_priv *priv)
+{
+ struct device *dev = &priv->pci_dev->dev;
+
+ dev_dbg(dev, "Dump of the iSMT Master Registers\n");
+ dev_dbg(dev, " MDBA..... : (0x%p)=0x%016llX\n",
+ priv->smba + ISMT_MSTR_MDBA,
+ (long long unsigned int)readq(priv->smba + ISMT_MSTR_MDBA));
+ dev_dbg(dev, " MCTRL.... : (0x%p)=0x%X\n",
+ priv->smba + ISMT_MSTR_MCTRL,
+ readl(priv->smba + ISMT_MSTR_MCTRL));
+ dev_dbg(dev, " MSTS..... : (0x%p)=0x%X\n",
+ priv->smba + ISMT_MSTR_MSTS,
+ readl(priv->smba + ISMT_MSTR_MSTS));
+ dev_dbg(dev, " MDS...... : (0x%p)=0x%X\n",
+ priv->smba + ISMT_MSTR_MDS,
+ readl(priv->smba + ISMT_MSTR_MDS));
+ dev_dbg(dev, " RPOLICY.. : (0x%p)=0x%X\n",
+ priv->smba + ISMT_MSTR_RPOLICY,
+ readl(priv->smba + ISMT_MSTR_RPOLICY));
+ dev_dbg(dev, " SPGT..... : (0x%p)=0x%X\n",
+ priv->smba + ISMT_SPGT,
+ readl(priv->smba + ISMT_SPGT));
+}
+
+/**
+ * ismt_submit_desc() - add a descriptor to the ring
+ * @priv: iSMT private data
+ */
+static void ismt_submit_desc(struct ismt_priv *priv)
+{
+ uint fmhp;
+ uint val;
+
+ ismt_desc_dump(priv);
+ ismt_gen_reg_dump(priv);
+ ismt_mstr_reg_dump(priv);
+
+ /* Set the FMHP (Firmware Master Head Pointer)*/
+ fmhp = ((priv->head + 1) % ISMT_DESC_ENTRIES) << 16;
+ val = readl(priv->smba + ISMT_MSTR_MCTRL);
+ writel((val & ~ISMT_MCTRL_FMHP) | fmhp,
+ priv->smba + ISMT_MSTR_MCTRL);
+
+ /* Set the start bit */
+ val = readl(priv->smba + ISMT_MSTR_MCTRL);
+ writel(val | ISMT_MCTRL_SS,
+ priv->smba + ISMT_MSTR_MCTRL);
+}
+
+/**
+ * ismt_process_desc() - handle the completion of the descriptor
+ * @desc: the iSMT hardware descriptor
+ * @data: data buffer from the upper layer
+ * @priv: ismt_priv struct holding our dma buffer
+ * @size: SMBus transaction type
+ * @read_write: flag to indicate if this is a read or write
+ */
+static int ismt_process_desc(const struct ismt_desc *desc,
+ union i2c_smbus_data *data,
+ struct ismt_priv *priv, int size,
+ char read_write)
+{
+ u8 *dma_buffer = priv->dma_buffer;
+
+ dev_dbg(&priv->pci_dev->dev, "Processing completed descriptor\n");
+ __ismt_desc_dump(&priv->pci_dev->dev, desc);
+
+ if (desc->status & ISMT_DESC_SCS) {
+ if (read_write == I2C_SMBUS_WRITE &&
+ size != I2C_SMBUS_PROC_CALL)
+ return 0;
+
+ switch (size) {
+ case I2C_SMBUS_BYTE:
+ case I2C_SMBUS_BYTE_DATA:
+ data->byte = dma_buffer[0];
+ break;
+ case I2C_SMBUS_WORD_DATA:
+ case I2C_SMBUS_PROC_CALL:
+ data->word = dma_buffer[0] | (dma_buffer[1] << 8);
+ break;
+ case I2C_SMBUS_BLOCK_DATA:
+ memcpy(&data->block[1], dma_buffer, desc->rxbytes);
+ data->block[0] = desc->rxbytes;
+ break;
+ }
+ return 0;
+ }
+
+ if (likely(desc->status & ISMT_DESC_NAK))
+ return -ENXIO;
+
+ if (desc->status & ISMT_DESC_CRC)
+ return -EBADMSG;
+
+ if (desc->status & ISMT_DESC_COL)
+ return -EAGAIN;
+
+ if (desc->status & ISMT_DESC_LPR)
+ return -EPROTO;
+
+ if (desc->status & (ISMT_DESC_DLTO | ISMT_DESC_CLTO))
+ return -ETIMEDOUT;
+
+ return -EIO;
+}
+
+/**
+ * ismt_access() - process an SMBus command
+ * @adap: the i2c host adapter
+ * @addr: address of the i2c/SMBus target
+ * @flags: command options
+ * @read_write: read from or write to device
+ * @command: the i2c/SMBus command to issue
+ * @size: SMBus transaction type
+ * @data: read/write data buffer
+ */
+static int ismt_access(struct i2c_adapter *adap, u16 addr,
+ unsigned short flags, char read_write, u8 command,
+ int size, union i2c_smbus_data *data)
+{
+ int ret;
+ dma_addr_t dma_addr = 0; /* address of the data buffer */
+ u8 dma_size = 0;
+ enum dma_data_direction dma_direction = 0;
+ struct ismt_desc *desc;
+ struct ismt_priv *priv = i2c_get_adapdata(adap);
+ struct device *dev = &priv->pci_dev->dev;
+
+ desc = &priv->hw[priv->head];
+
+ /* Initialize the descriptor */
+ memset(desc, 0, sizeof(struct ismt_desc));
+ desc->tgtaddr_rw = ISMT_DESC_ADDR_RW(addr, read_write);
+
+ /* Initialize common control bits */
+ if (likely(priv->using_msi))
+ desc->control = ISMT_DESC_INT | ISMT_DESC_FAIR;
+ else
+ desc->control = ISMT_DESC_FAIR;
+
+ if ((flags & I2C_CLIENT_PEC) && (size != I2C_SMBUS_QUICK)
+ && (size != I2C_SMBUS_I2C_BLOCK_DATA))
+ desc->control |= ISMT_DESC_PEC;
+
+ switch (size) {
+ case I2C_SMBUS_QUICK:
+ dev_dbg(dev, "I2C_SMBUS_QUICK\n");
+ break;
+
+ case I2C_SMBUS_BYTE:
+ if (read_write == I2C_SMBUS_WRITE) {
+ /*
+ * Send Byte
+ * The command field contains the write data
+ */
+ dev_dbg(dev, "I2C_SMBUS_BYTE: WRITE\n");
+ desc->control |= ISMT_DESC_CWRL;
+ desc->wr_len_cmd = command;
+ } else {
+ /* Receive Byte */
+ dev_dbg(dev, "I2C_SMBUS_BYTE: READ\n");
+ dma_size = 1;
+ dma_direction = DMA_FROM_DEVICE;
+ desc->rd_len = 1;
+ }
+ break;
+
+ case I2C_SMBUS_BYTE_DATA:
+ if (read_write == I2C_SMBUS_WRITE) {
+ /*
+ * Write Byte
+ * Command plus 1 data byte
+ */
+ dev_dbg(dev, "I2C_SMBUS_BYTE_DATA: WRITE\n");
+ desc->wr_len_cmd = 2;
+ dma_size = 2;
+ dma_direction = DMA_TO_DEVICE;
+ priv->dma_buffer[0] = command;
+ priv->dma_buffer[1] = data->byte;
+ } else {
+ /* Read Byte */
+ dev_dbg(dev, "I2C_SMBUS_BYTE_DATA: READ\n");
+ desc->control |= ISMT_DESC_CWRL;
+ desc->wr_len_cmd = command;
+ desc->rd_len = 1;
+ dma_size = 1;
+ dma_direction = DMA_FROM_DEVICE;
+ }
+ break;
+
+ case I2C_SMBUS_WORD_DATA:
+ if (read_write == I2C_SMBUS_WRITE) {
+ /* Write Word */
+ dev_dbg(dev, "I2C_SMBUS_WORD_DATA: WRITE\n");
+ desc->wr_len_cmd = 3;
+ dma_size = 3;
+ dma_direction = DMA_TO_DEVICE;
+ priv->dma_buffer[0] = command;
+ priv->dma_buffer[1] = data->word & 0xff;
+ priv->dma_buffer[2] = data->word >> 8;
+ } else {
+ /* Read Word */
+ dev_dbg(dev, "I2C_SMBUS_WORD_DATA: READ\n");
+ desc->wr_len_cmd = command;
+ desc->control |= ISMT_DESC_CWRL;
+ desc->rd_len = 2;
+ dma_size = 2;
+ dma_direction = DMA_FROM_DEVICE;
+ }
+ break;
+
+ case I2C_SMBUS_PROC_CALL:
+ dev_dbg(dev, "I2C_SMBUS_PROC_CALL\n");
+ desc->wr_len_cmd = 3;
+ desc->rd_len = 2;
+ dma_size = 3;
+ dma_direction = DMA_BIDIRECTIONAL;
+ priv->dma_buffer[0] = command;
+ priv->dma_buffer[1] = data->word & 0xff;
+ priv->dma_buffer[2] = data->word >> 8;
+ break;
+
+ case I2C_SMBUS_BLOCK_DATA:
+ if (read_write == I2C_SMBUS_WRITE) {
+ /* Block Write */
+ dev_dbg(dev, "I2C_SMBUS_BLOCK_DATA: WRITE\n");
+ dma_size = data->block[0] + 1;
+ dma_direction = DMA_TO_DEVICE;
+ desc->wr_len_cmd = dma_size;
+ desc->control |= ISMT_DESC_BLK;
+ priv->dma_buffer[0] = command;
+ memcpy(&priv->dma_buffer[1], &data->block[1], dma_size);
+ } else {
+ /* Block Read */
+ dev_dbg(dev, "I2C_SMBUS_BLOCK_DATA: READ\n");
+ dma_size = I2C_SMBUS_BLOCK_MAX;
+ dma_direction = DMA_FROM_DEVICE;
+ desc->rd_len = dma_size;
+ desc->wr_len_cmd = command;
+ desc->control |= (ISMT_DESC_BLK | ISMT_DESC_CWRL);
+ }
+ break;
+
+ default:
+ dev_err(dev, "Unsupported transaction %d\n",
+ size);
+ return -EOPNOTSUPP;
+ }
+
+ /* map the data buffer */
+ if (dma_size != 0) {
+ dev_dbg(dev, " dev=%p\n", dev);
+ dev_dbg(dev, " data=%p\n", data);
+ dev_dbg(dev, " dma_buffer=%p\n", priv->dma_buffer);
+ dev_dbg(dev, " dma_size=%d\n", dma_size);
+ dev_dbg(dev, " dma_direction=%d\n", dma_direction);
+
+ dma_addr = dma_map_single(dev,
+ priv->dma_buffer,
+ dma_size,
+ dma_direction);
+
+ if (dma_mapping_error(dev, dma_addr)) {
+ dev_err(dev, "Error in mapping dma buffer %p\n",
+ priv->dma_buffer);
+ return -EIO;
+ }
+
+ dev_dbg(dev, " dma_addr = 0x%016llX\n",
+ (unsigned long long)dma_addr);
+
+ desc->dptr_low = lower_32_bits(dma_addr);
+ desc->dptr_high = upper_32_bits(dma_addr);
+ }
+
+ INIT_COMPLETION(priv->cmp);
+
+ /* Add the descriptor */
+ ismt_submit_desc(priv);
+
+ /* Now we wait for interrupt completion, 1s */
+ ret = wait_for_completion_timeout(&priv->cmp, HZ*1);
+
+ /* unmap the data buffer */
+ if (dma_size != 0)
+ dma_unmap_single(&adap->dev, dma_addr, dma_size, dma_direction);
+
+ if (unlikely(!ret)) {
+ dev_err(dev, "completion wait timed out\n");
+ ret = -ETIMEDOUT;
+ goto out;
+ }
+
+ /* do any post processing of the descriptor here */
+ ret = ismt_process_desc(desc, data, priv, size, read_write);
+
+out:
+ /* Update the ring pointer */
+ priv->head++;
+ priv->head %= ISMT_DESC_ENTRIES;
+
+ return ret;
+}
+
+/**
+ * ismt_func() - report which i2c commands are supported by this adapter
+ * @adap: the i2c host adapter
+ */
+static u32 ismt_func(struct i2c_adapter *adap)
+{
+ return I2C_FUNC_SMBUS_QUICK |
+ I2C_FUNC_SMBUS_BYTE |
+ I2C_FUNC_SMBUS_BYTE_DATA |
+ I2C_FUNC_SMBUS_WORD_DATA |
+ I2C_FUNC_SMBUS_PROC_CALL |
+ I2C_FUNC_SMBUS_BLOCK_DATA |
+ I2C_FUNC_SMBUS_PEC;
+}
+
+/**
+ * smbus_algorithm - the adapter algorithm and supported functionality
+ * @smbus_xfer: the adapter algorithm
+ * @functionality: functionality supported by the adapter
+ */
+static const struct i2c_algorithm smbus_algorithm = {
+ .smbus_xfer = ismt_access,
+ .functionality = ismt_func,
+};
+
+/**
+ * ismt_handle_isr() - interrupt handler bottom half
+ * @priv: iSMT private data
+ */
+static irqreturn_t ismt_handle_isr(struct ismt_priv *priv)
+{
+ complete(&priv->cmp);
+
+ return IRQ_HANDLED;
+}
+
+
+/**
+ * ismt_do_interrupt() - IRQ interrupt handler
+ * @vec: interrupt vector
+ * @data: iSMT private data
+ */
+static irqreturn_t ismt_do_interrupt(int vec, void *data)
+{
+ u32 val;
+ struct ismt_priv *priv = data;
+
+ /*
+ * check to see it's our interrupt, return IRQ_NONE if not ours
+ * since we are sharing interrupt
+ */
+ val = readl(priv->smba + ISMT_MSTR_MSTS);
+
+ if (!(val & (ISMT_MSTS_MIS | ISMT_MSTS_MEIS)))
+ return IRQ_NONE;
+ else
+ writel(val | ISMT_MSTS_MIS | ISMT_MSTS_MEIS,
+ priv->smba + ISMT_MSTR_MSTS);
+
+ return ismt_handle_isr(priv);
+}
+
+/**
+ * ismt_do_msi_interrupt() - MSI interrupt handler
+ * @vec: interrupt vector
+ * @data: iSMT private data
+ */
+static irqreturn_t ismt_do_msi_interrupt(int vec, void *data)
+{
+ return ismt_handle_isr(data);
+}
+
+/**
+ * ismt_hw_init() - initialize the iSMT hardware
+ * @priv: iSMT private data
+ */
+static void ismt_hw_init(struct ismt_priv *priv)
+{
+ u32 val;
+ struct device *dev = &priv->pci_dev->dev;
+
+ /* initialize the Master Descriptor Base Address (MDBA) */
+ writeq(priv->io_rng_dma, priv->smba + ISMT_MSTR_MDBA);
+
+ /* initialize the Master Control Register (MCTRL) */
+ writel(ISMT_MCTRL_MEIE, priv->smba + ISMT_MSTR_MCTRL);
+
+ /* initialize the Master Status Register (MSTS) */
+ writel(0, priv->smba + ISMT_MSTR_MSTS);
+
+ /* initialize the Master Descriptor Size (MDS) */
+ val = readl(priv->smba + ISMT_MSTR_MDS);
+ writel((val & ~ISMT_MDS_MASK) | (ISMT_DESC_ENTRIES - 1),
+ priv->smba + ISMT_MSTR_MDS);
+
+ /*
+ * Set the SMBus speed (could use this for slow HW debuggers)
+ */
+
+ val = readl(priv->smba + ISMT_SPGT);
+
+ switch (bus_speed) {
+ case 0:
+ break;
+
+ case 80:
+ dev_dbg(dev, "Setting SMBus clock to 80 kHz\n");
+ writel(((val & ~ISMT_SPGT_SPD_MASK) | ISMT_SPGT_SPD_80K),
+ priv->smba + ISMT_SPGT);
+ break;
+
+ case 100:
+ dev_dbg(dev, "Setting SMBus clock to 100 kHz\n");
+ writel(((val & ~ISMT_SPGT_SPD_MASK) | ISMT_SPGT_SPD_100K),
+ priv->smba + ISMT_SPGT);
+ break;
+
+ case 400:
+ dev_dbg(dev, "Setting SMBus clock to 400 kHz\n");
+ writel(((val & ~ISMT_SPGT_SPD_MASK) | ISMT_SPGT_SPD_400K),
+ priv->smba + ISMT_SPGT);
+ break;
+
+ case 1000:
+ dev_dbg(dev, "Setting SMBus clock to 1000 kHz\n");
+ writel(((val & ~ISMT_SPGT_SPD_MASK) | ISMT_SPGT_SPD_1M),
+ priv->smba + ISMT_SPGT);
+ break;
+
+ default:
+ dev_warn(dev, "Invalid SMBus clock speed, only 0, 80, 100, 400, and 1000 are valid\n");
+ break;
+ }
+
+ val = readl(priv->smba + ISMT_SPGT);
+
+ switch (val & ISMT_SPGT_SPD_MASK) {
+ case ISMT_SPGT_SPD_80K:
+ bus_speed = 80;
+ break;
+ case ISMT_SPGT_SPD_100K:
+ bus_speed = 100;
+ break;
+ case ISMT_SPGT_SPD_400K:
+ bus_speed = 400;
+ break;
+ case ISMT_SPGT_SPD_1M:
+ bus_speed = 1000;
+ break;
+ }
+ dev_dbg(dev, "SMBus clock is running at %d kHz\n", bus_speed);
+}
+
+/**
+ * ismt_dev_init() - initialize the iSMT data structures
+ * @priv: iSMT private data
+ */
+static int ismt_dev_init(struct ismt_priv *priv)
+{
+ /* allocate memory for the descriptor */
+ priv->hw = dmam_alloc_coherent(&priv->pci_dev->dev,
+ (ISMT_DESC_ENTRIES
+ * sizeof(struct ismt_desc)),
+ &priv->io_rng_dma,
+ GFP_KERNEL);
+ if (!priv->hw)
+ return -ENOMEM;
+
+ memset(priv->hw, 0, (ISMT_DESC_ENTRIES * sizeof(struct ismt_desc)));
+
+ priv->head = 0;
+ init_completion(&priv->cmp);
+
+ return 0;
+}
+
+/**
+ * ismt_int_init() - initialize interrupts
+ * @priv: iSMT private data
+ */
+static int ismt_int_init(struct ismt_priv *priv)
+{
+ int err;
+
+ /* Try using MSI interrupts */
+ err = pci_enable_msi(priv->pci_dev);
+ if (err) {
+ dev_warn(&priv->pci_dev->dev,
+ "Unable to use MSI interrupts, falling back to legacy\n");
+ goto intx;
+ }
+
+ err = devm_request_irq(&priv->pci_dev->dev,
+ priv->pci_dev->irq,
+ ismt_do_msi_interrupt,
+ 0,
+ "ismt-msi",
+ priv);
+ if (err) {
+ pci_disable_msi(priv->pci_dev);
+ goto intx;
+ }
+
+ priv->using_msi = true;
+ goto done;
+
+ /* Try using legacy interrupts */
+intx:
+ err = devm_request_irq(&priv->pci_dev->dev,
+ priv->pci_dev->irq,
+ ismt_do_interrupt,
+ IRQF_SHARED,
+ "ismt-intx",
+ priv);
+ if (err) {
+ dev_err(&priv->pci_dev->dev, "no usable interrupts\n");
+ return -ENODEV;
+ }
+
+ priv->using_msi = false;
+
+done:
+ return 0;
+}
+
+static struct pci_driver ismt_driver;
+
+/**
+ * ismt_probe() - probe for iSMT devices
+ * @pdev: PCI-Express device
+ * @id: PCI-Express device ID
+ */
+static int
+ismt_probe(struct pci_dev *pdev, const struct pci_device_id *id)
+{
+ int err;
+ struct ismt_priv *priv;
+ unsigned long start, len;
+
+ priv = devm_kzalloc(&pdev->dev, sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ pci_set_drvdata(pdev, priv);
+ i2c_set_adapdata(&priv->adapter, priv);
+ priv->adapter.owner = THIS_MODULE;
+
+ priv->adapter.class = I2C_CLASS_HWMON;
+
+ priv->adapter.algo = &smbus_algorithm;
+
+ /* set up the sysfs linkage to our parent device */
+ priv->adapter.dev.parent = &pdev->dev;
+
+ /* number of retries on lost arbitration */
+ priv->adapter.retries = ISMT_MAX_RETRIES;
+
+ priv->pci_dev = pdev;
+
+ err = pcim_enable_device(pdev);
+ if (err) {
+ dev_err(&pdev->dev, "Failed to enable SMBus PCI device (%d)\n",
+ err);
+ return err;
+ }
+
+ /* enable bus mastering */
+ pci_set_master(pdev);
+
+ /* Determine the address of the SMBus area */
+ start = pci_resource_start(pdev, SMBBAR);
+ len = pci_resource_len(pdev, SMBBAR);
+ if (!start || !len) {
+ dev_err(&pdev->dev,
+ "SMBus base address uninitialized, upgrade BIOS\n");
+ return -ENODEV;
+ }
+
+ snprintf(priv->adapter.name, sizeof(priv->adapter.name),
+ "SMBus iSMT adapter at %lx", start);
+
+ dev_dbg(&priv->pci_dev->dev, " start=0x%lX\n", start);
+ dev_dbg(&priv->pci_dev->dev, " len=0x%lX\n", len);
+
+ err = acpi_check_resource_conflict(&pdev->resource[SMBBAR]);
+ if (err) {
+ dev_err(&pdev->dev, "ACPI resource conflict!\n");
+ return err;
+ }
+
+ err = pci_request_region(pdev, SMBBAR, ismt_driver.name);
+ if (err) {
+ dev_err(&pdev->dev,
+ "Failed to request SMBus region 0x%lx-0x%lx\n",
+ start, start + len);
+ return err;
+ }
+
+ priv->smba = pcim_iomap(pdev, SMBBAR, len);
+ if (!priv->smba) {
+ dev_err(&pdev->dev, "Unable to ioremap SMBus BAR\n");
+ err = -ENODEV;
+ goto fail;
+ }
+
+ if ((pci_set_dma_mask(pdev, DMA_BIT_MASK(64)) != 0) ||
+ (pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64)) != 0)) {
+ if ((pci_set_dma_mask(pdev, DMA_BIT_MASK(32)) != 0) ||
+ (pci_set_consistent_dma_mask(pdev,
+ DMA_BIT_MASK(32)) != 0)) {
+ dev_err(&pdev->dev, "pci_set_dma_mask fail %p\n",
+ pdev);
+ goto fail;
+ }
+ }
+
+ err = ismt_dev_init(priv);
+ if (err)
+ goto fail;
+
+ ismt_hw_init(priv);
+
+ err = ismt_int_init(priv);
+ if (err)
+ goto fail;
+
+ err = i2c_add_adapter(&priv->adapter);
+ if (err) {
+ dev_err(&pdev->dev, "Failed to add SMBus iSMT adapter\n");
+ err = -ENODEV;
+ goto fail;
+ }
+ return 0;
+
+fail:
+ pci_release_region(pdev, SMBBAR);
+ return err;
+}
+
+/**
+ * ismt_remove() - release driver resources
+ * @pdev: PCI-Express device
+ */
+static void ismt_remove(struct pci_dev *pdev)
+{
+ struct ismt_priv *priv = pci_get_drvdata(pdev);
+
+ i2c_del_adapter(&priv->adapter);
+ pci_release_region(pdev, SMBBAR);
+}
+
+/**
+ * ismt_suspend() - place the device in suspend
+ * @pdev: PCI-Express device
+ * @mesg: PM message
+ */
+#ifdef CONFIG_PM
+static int ismt_suspend(struct pci_dev *pdev, pm_message_t mesg)
+{
+ pci_save_state(pdev);
+ pci_set_power_state(pdev, pci_choose_state(pdev, mesg));
+ return 0;
+}
+
+/**
+ * ismt_resume() - PCI resume code
+ * @pdev: PCI-Express device
+ */
+static int ismt_resume(struct pci_dev *pdev)
+{
+ pci_set_power_state(pdev, PCI_D0);
+ pci_restore_state(pdev);
+ return pci_enable_device(pdev);
+}
+
+#else
+
+#define ismt_suspend NULL
+#define ismt_resume NULL
+
+#endif
+
+static struct pci_driver ismt_driver = {
+ .name = "ismt_smbus",
+ .id_table = ismt_ids,
+ .probe = ismt_probe,
+ .remove = ismt_remove,
+ .suspend = ismt_suspend,
+ .resume = ismt_resume,
+};
+
+module_pci_driver(ismt_driver);
+
+MODULE_LICENSE("Dual BSD/GPL");
+MODULE_AUTHOR("Bill E. Brown <bill.e.brown@intel.com>");
+MODULE_DESCRIPTION("Intel SMBus Message Transport (iSMT) driver");
diff --git a/drivers/i2c/busses/i2c-mpc.c b/drivers/i2c/busses/i2c-mpc.c
index a69459e5c3f3..5e705ee02f4a 100644
--- a/drivers/i2c/busses/i2c-mpc.c
+++ b/drivers/i2c/busses/i2c-mpc.c
@@ -696,7 +696,6 @@ static int fsl_i2c_probe(struct platform_device *op)
return result;
fail_add:
- dev_set_drvdata(&op->dev, NULL);
free_irq(i2c->irq, i2c);
fail_request:
irq_dispose_mapping(i2c->irq);
@@ -711,7 +710,6 @@ static int fsl_i2c_remove(struct platform_device *op)
struct mpc_i2c *i2c = dev_get_drvdata(&op->dev);
i2c_del_adapter(&i2c->adap);
- dev_set_drvdata(&op->dev, NULL);
if (i2c->irq)
free_irq(i2c->irq, i2c);
diff --git a/drivers/i2c/busses/i2c-mxs.c b/drivers/i2c/busses/i2c-mxs.c
index d6abaf2cf2e3..120f24646696 100644
--- a/drivers/i2c/busses/i2c-mxs.c
+++ b/drivers/i2c/busses/i2c-mxs.c
@@ -39,6 +39,7 @@
#define MXS_I2C_CTRL0_SET (0x04)
#define MXS_I2C_CTRL0_SFTRST 0x80000000
+#define MXS_I2C_CTRL0_RUN 0x20000000
#define MXS_I2C_CTRL0_SEND_NAK_ON_LAST 0x02000000
#define MXS_I2C_CTRL0_RETAIN_CLOCK 0x00200000
#define MXS_I2C_CTRL0_POST_SEND_STOP 0x00100000
@@ -64,6 +65,13 @@
#define MXS_I2C_CTRL1_SLAVE_STOP_IRQ 0x02
#define MXS_I2C_CTRL1_SLAVE_IRQ 0x01
+#define MXS_I2C_DATA (0xa0)
+
+#define MXS_I2C_DEBUG0 (0xb0)
+#define MXS_I2C_DEBUG0_CLR (0xb8)
+
+#define MXS_I2C_DEBUG0_DMAREQ 0x80000000
+
#define MXS_I2C_IRQ_MASK (MXS_I2C_CTRL1_DATA_ENGINE_CMPLT_IRQ | \
MXS_I2C_CTRL1_NO_SLAVE_ACK_IRQ | \
MXS_I2C_CTRL1_EARLY_TERM_IRQ | \
@@ -85,35 +93,6 @@
#define MXS_CMD_I2C_READ (MXS_I2C_CTRL0_SEND_NAK_ON_LAST | \
MXS_I2C_CTRL0_MASTER_MODE)
-struct mxs_i2c_speed_config {
- uint32_t timing0;
- uint32_t timing1;
- uint32_t timing2;
-};
-
-/*
- * Timing values for the default 24MHz clock supplied into the i2c block.
- *
- * The bus can operate at 95kHz or at 400kHz with the following timing
- * register configurations. The 100kHz mode isn't present because it's
- * values are not stated in the i.MX233/i.MX28 datasheet. The 95kHz mode
- * shall be close enough replacement. Therefore when the bus is configured
- * for 100kHz operation, 95kHz timing settings are actually loaded.
- *
- * For details, see i.MX233 [25.4.2 - 25.4.4] and i.MX28 [27.5.2 - 27.5.4].
- */
-static const struct mxs_i2c_speed_config mxs_i2c_95kHz_config = {
- .timing0 = 0x00780030,
- .timing1 = 0x00800030,
- .timing2 = 0x00300030,
-};
-
-static const struct mxs_i2c_speed_config mxs_i2c_400kHz_config = {
- .timing0 = 0x000f0007,
- .timing1 = 0x001f000f,
- .timing2 = 0x00300030,
-};
-
/**
* struct mxs_i2c_dev - per device, private MXS-I2C data
*
@@ -129,7 +108,9 @@ struct mxs_i2c_dev {
struct completion cmd_complete;
int cmd_err;
struct i2c_adapter adapter;
- const struct mxs_i2c_speed_config *speed;
+
+ uint32_t timing0;
+ uint32_t timing1;
/* DMA support components */
int dma_channel;
@@ -145,9 +126,16 @@ static void mxs_i2c_reset(struct mxs_i2c_dev *i2c)
{
stmp_reset_block(i2c->regs);
- writel(i2c->speed->timing0, i2c->regs + MXS_I2C_TIMING0);
- writel(i2c->speed->timing1, i2c->regs + MXS_I2C_TIMING1);
- writel(i2c->speed->timing2, i2c->regs + MXS_I2C_TIMING2);
+ /*
+ * Configure timing for the I2C block. The I2C TIMING2 register has to
+ * be programmed with this particular magic number. The rest is derived
+ * from the XTAL speed and requested I2C speed.
+ *
+ * For details, see i.MX233 [25.4.2 - 25.4.4] and i.MX28 [27.5.2 - 27.5.4].
+ */
+ writel(i2c->timing0, i2c->regs + MXS_I2C_TIMING0);
+ writel(i2c->timing1, i2c->regs + MXS_I2C_TIMING1);
+ writel(0x00300030, i2c->regs + MXS_I2C_TIMING2);
writel(MXS_I2C_IRQ_MASK << 8, i2c->regs + MXS_I2C_CTRL1_SET);
}
@@ -298,6 +286,135 @@ write_init_pio_fail:
return -EINVAL;
}
+static int mxs_i2c_pio_wait_dmareq(struct mxs_i2c_dev *i2c)
+{
+ unsigned long timeout = jiffies + msecs_to_jiffies(1000);
+
+ while (!(readl(i2c->regs + MXS_I2C_DEBUG0) &
+ MXS_I2C_DEBUG0_DMAREQ)) {
+ if (time_after(jiffies, timeout))
+ return -ETIMEDOUT;
+ cond_resched();
+ }
+
+ writel(MXS_I2C_DEBUG0_DMAREQ, i2c->regs + MXS_I2C_DEBUG0_CLR);
+
+ return 0;
+}
+
+static int mxs_i2c_pio_wait_cplt(struct mxs_i2c_dev *i2c)
+{
+ unsigned long timeout = jiffies + msecs_to_jiffies(1000);
+
+ /*
+ * We do not use interrupts in the PIO mode. Due to the
+ * maximum transfer length being 8 bytes in PIO mode, the
+ * overhead of interrupt would be too large and this would
+ * neglect the gain from using the PIO mode.
+ */
+
+ while (!(readl(i2c->regs + MXS_I2C_CTRL1) &
+ MXS_I2C_CTRL1_DATA_ENGINE_CMPLT_IRQ)) {
+ if (time_after(jiffies, timeout))
+ return -ETIMEDOUT;
+ cond_resched();
+ }
+
+ writel(MXS_I2C_CTRL1_DATA_ENGINE_CMPLT_IRQ,
+ i2c->regs + MXS_I2C_CTRL1_CLR);
+
+ return 0;
+}
+
+static int mxs_i2c_pio_setup_xfer(struct i2c_adapter *adap,
+ struct i2c_msg *msg, uint32_t flags)
+{
+ struct mxs_i2c_dev *i2c = i2c_get_adapdata(adap);
+ uint32_t addr_data = msg->addr << 1;
+ uint32_t data = 0;
+ int i, shifts_left, ret;
+
+ /* Mute IRQs coming from this block. */
+ writel(MXS_I2C_IRQ_MASK << 8, i2c->regs + MXS_I2C_CTRL1_CLR);
+
+ if (msg->flags & I2C_M_RD) {
+ addr_data |= I2C_SMBUS_READ;
+
+ /* SELECT command. */
+ writel(MXS_I2C_CTRL0_RUN | MXS_CMD_I2C_SELECT,
+ i2c->regs + MXS_I2C_CTRL0);
+
+ ret = mxs_i2c_pio_wait_dmareq(i2c);
+ if (ret)
+ return ret;
+
+ writel(addr_data, i2c->regs + MXS_I2C_DATA);
+
+ ret = mxs_i2c_pio_wait_cplt(i2c);
+ if (ret)
+ return ret;
+
+ /* READ command. */
+ writel(MXS_I2C_CTRL0_RUN | MXS_CMD_I2C_READ | flags |
+ MXS_I2C_CTRL0_XFER_COUNT(msg->len),
+ i2c->regs + MXS_I2C_CTRL0);
+
+ for (i = 0; i < msg->len; i++) {
+ if ((i & 3) == 0) {
+ ret = mxs_i2c_pio_wait_dmareq(i2c);
+ if (ret)
+ return ret;
+ data = readl(i2c->regs + MXS_I2C_DATA);
+ }
+ msg->buf[i] = data & 0xff;
+ data >>= 8;
+ }
+ } else {
+ addr_data |= I2C_SMBUS_WRITE;
+
+ /* WRITE command. */
+ writel(MXS_I2C_CTRL0_RUN | MXS_CMD_I2C_WRITE | flags |
+ MXS_I2C_CTRL0_XFER_COUNT(msg->len + 1),
+ i2c->regs + MXS_I2C_CTRL0);
+
+ /*
+ * The LSB of data buffer is the first byte blasted across
+ * the bus. Higher order bytes follow. Thus the following
+ * filling schematic.
+ */
+ data = addr_data << 24;
+ for (i = 0; i < msg->len; i++) {
+ data >>= 8;
+ data |= (msg->buf[i] << 24);
+ if ((i & 3) == 2) {
+ ret = mxs_i2c_pio_wait_dmareq(i2c);
+ if (ret)
+ return ret;
+ writel(data, i2c->regs + MXS_I2C_DATA);
+ }
+ }
+
+ shifts_left = 24 - (i & 3) * 8;
+ if (shifts_left) {
+ data >>= shifts_left;
+ ret = mxs_i2c_pio_wait_dmareq(i2c);
+ if (ret)
+ return ret;
+ writel(data, i2c->regs + MXS_I2C_DATA);
+ }
+ }
+
+ ret = mxs_i2c_pio_wait_cplt(i2c);
+ if (ret)
+ return ret;
+
+ /* Clear any dangling IRQs and re-enable interrupts. */
+ writel(MXS_I2C_IRQ_MASK, i2c->regs + MXS_I2C_CTRL1_CLR);
+ writel(MXS_I2C_IRQ_MASK << 8, i2c->regs + MXS_I2C_CTRL1_SET);
+
+ return 0;
+}
+
/*
* Low level master read/write transaction.
*/
@@ -316,24 +433,37 @@ static int mxs_i2c_xfer_msg(struct i2c_adapter *adap, struct i2c_msg *msg,
if (msg->len == 0)
return -EINVAL;
- INIT_COMPLETION(i2c->cmd_complete);
- i2c->cmd_err = 0;
-
- ret = mxs_i2c_dma_setup_xfer(adap, msg, flags);
- if (ret)
- return ret;
+ /*
+ * The current boundary to select between PIO/DMA transfer method
+ * is set to 8 bytes, transfers shorter than 8 bytes are transfered
+ * using PIO mode while longer transfers use DMA. The 8 byte border is
+ * based on this empirical measurement and a lot of previous frobbing.
+ */
+ if (msg->len < 8) {
+ ret = mxs_i2c_pio_setup_xfer(adap, msg, flags);
+ if (ret)
+ mxs_i2c_reset(i2c);
+ } else {
+ i2c->cmd_err = 0;
+ INIT_COMPLETION(i2c->cmd_complete);
+ ret = mxs_i2c_dma_setup_xfer(adap, msg, flags);
+ if (ret)
+ return ret;
- ret = wait_for_completion_timeout(&i2c->cmd_complete,
+ ret = wait_for_completion_timeout(&i2c->cmd_complete,
msecs_to_jiffies(1000));
- if (ret == 0)
- goto timeout;
+ if (ret == 0)
+ goto timeout;
+
+ if (i2c->cmd_err == -ENXIO)
+ mxs_i2c_reset(i2c);
- if (i2c->cmd_err == -ENXIO)
- mxs_i2c_reset(i2c);
+ ret = i2c->cmd_err;
+ }
- dev_dbg(i2c->dev, "Done with err=%d\n", i2c->cmd_err);
+ dev_dbg(i2c->dev, "Done with err=%d\n", ret);
- return i2c->cmd_err;
+ return ret;
timeout:
dev_dbg(i2c->dev, "Timeout!\n");
@@ -403,6 +533,43 @@ static bool mxs_i2c_dma_filter(struct dma_chan *chan, void *param)
return true;
}
+static void mxs_i2c_derive_timing(struct mxs_i2c_dev *i2c, int speed)
+{
+ /* The I2C block clock run at 24MHz */
+ const uint32_t clk = 24000000;
+ uint32_t base;
+ uint16_t high_count, low_count, rcv_count, xmit_count;
+ struct device *dev = i2c->dev;
+
+ if (speed > 540000) {
+ dev_warn(dev, "Speed too high (%d Hz), using 540 kHz\n", speed);
+ speed = 540000;
+ } else if (speed < 12000) {
+ dev_warn(dev, "Speed too low (%d Hz), using 12 kHz\n", speed);
+ speed = 12000;
+ }
+
+ /*
+ * The timing derivation algorithm. There is no documentation for this
+ * algorithm available, it was derived by using the scope and fiddling
+ * with constants until the result observed on the scope was good enough
+ * for 20kHz, 50kHz, 100kHz, 200kHz, 300kHz and 400kHz. It should be
+ * possible to assume the algorithm works for other frequencies as well.
+ *
+ * Note it was necessary to cap the frequency on both ends as it's not
+ * possible to configure completely arbitrary frequency for the I2C bus
+ * clock.
+ */
+ base = ((clk / speed) - 38) / 2;
+ high_count = base + 3;
+ low_count = base - 3;
+ rcv_count = (high_count * 3) / 4;
+ xmit_count = low_count / 4;
+
+ i2c->timing0 = (high_count << 16) | rcv_count;
+ i2c->timing1 = (low_count << 16) | xmit_count;
+}
+
static int mxs_i2c_get_ofdata(struct mxs_i2c_dev *i2c)
{
uint32_t speed;
@@ -422,12 +589,12 @@ static int mxs_i2c_get_ofdata(struct mxs_i2c_dev *i2c)
}
ret = of_property_read_u32(node, "clock-frequency", &speed);
- if (ret)
+ if (ret) {
dev_warn(dev, "No I2C speed selected, using 100kHz\n");
- else if (speed == 400000)
- i2c->speed = &mxs_i2c_400kHz_config;
- else if (speed != 100000)
- dev_warn(dev, "Unsupported I2C speed selected, using 100kHz\n");
+ speed = 100000;
+ }
+
+ mxs_i2c_derive_timing(i2c, speed);
return 0;
}
@@ -471,7 +638,6 @@ static int mxs_i2c_probe(struct platform_device *pdev)
return err;
i2c->dev = dev;
- i2c->speed = &mxs_i2c_95kHz_config;
init_completion(&i2c->cmd_complete);
@@ -531,8 +697,6 @@ static int mxs_i2c_remove(struct platform_device *pdev)
writel(MXS_I2C_CTRL0_SFTRST, i2c->regs + MXS_I2C_CTRL0_SET);
- platform_set_drvdata(pdev, NULL);
-
return 0;
}
diff --git a/drivers/i2c/busses/i2c-nforce2.c b/drivers/i2c/busses/i2c-nforce2.c
index adac8542771d..ac88f4000cc2 100644
--- a/drivers/i2c/busses/i2c-nforce2.c
+++ b/drivers/i2c/busses/i2c-nforce2.c
@@ -60,7 +60,7 @@
#include <linux/io.h>
MODULE_LICENSE("GPL");
-MODULE_AUTHOR ("Hans-Frieder Vogt <hfvogt@gmx.net>");
+MODULE_AUTHOR("Hans-Frieder Vogt <hfvogt@gmx.net>");
MODULE_DESCRIPTION("nForce2/3/4/5xx SMBus driver");
@@ -188,9 +188,9 @@ static int nforce2_check_status(struct i2c_adapter *adap)
}
/* Return negative errno on error */
-static s32 nforce2_access(struct i2c_adapter * adap, u16 addr,
+static s32 nforce2_access(struct i2c_adapter *adap, u16 addr,
unsigned short flags, char read_write,
- u8 command, int size, union i2c_smbus_data * data)
+ u8 command, int size, union i2c_smbus_data *data)
{
struct nforce2_smbus *smbus = adap->algo_data;
unsigned char protocol, pec;
@@ -202,56 +202,54 @@ static s32 nforce2_access(struct i2c_adapter * adap, u16 addr,
pec = (flags & I2C_CLIENT_PEC) ? NVIDIA_SMB_PRTCL_PEC : 0;
switch (size) {
+ case I2C_SMBUS_QUICK:
+ protocol |= NVIDIA_SMB_PRTCL_QUICK;
+ read_write = I2C_SMBUS_WRITE;
+ break;
- case I2C_SMBUS_QUICK:
- protocol |= NVIDIA_SMB_PRTCL_QUICK;
- read_write = I2C_SMBUS_WRITE;
- break;
-
- case I2C_SMBUS_BYTE:
- if (read_write == I2C_SMBUS_WRITE)
- outb_p(command, NVIDIA_SMB_CMD);
- protocol |= NVIDIA_SMB_PRTCL_BYTE;
- break;
-
- case I2C_SMBUS_BYTE_DATA:
- outb_p(command, NVIDIA_SMB_CMD);
- if (read_write == I2C_SMBUS_WRITE)
- outb_p(data->byte, NVIDIA_SMB_DATA);
- protocol |= NVIDIA_SMB_PRTCL_BYTE_DATA;
- break;
-
- case I2C_SMBUS_WORD_DATA:
+ case I2C_SMBUS_BYTE:
+ if (read_write == I2C_SMBUS_WRITE)
outb_p(command, NVIDIA_SMB_CMD);
- if (read_write == I2C_SMBUS_WRITE) {
- outb_p(data->word, NVIDIA_SMB_DATA);
- outb_p(data->word >> 8, NVIDIA_SMB_DATA+1);
- }
- protocol |= NVIDIA_SMB_PRTCL_WORD_DATA | pec;
- break;
-
- case I2C_SMBUS_BLOCK_DATA:
- outb_p(command, NVIDIA_SMB_CMD);
- if (read_write == I2C_SMBUS_WRITE) {
- len = data->block[0];
- if ((len == 0) || (len > I2C_SMBUS_BLOCK_MAX)) {
- dev_err(&adap->dev,
- "Transaction failed "
- "(requested block size: %d)\n",
- len);
- return -EINVAL;
- }
- outb_p(len, NVIDIA_SMB_BCNT);
- for (i = 0; i < I2C_SMBUS_BLOCK_MAX; i++)
- outb_p(data->block[i + 1],
- NVIDIA_SMB_DATA+i);
+ protocol |= NVIDIA_SMB_PRTCL_BYTE;
+ break;
+
+ case I2C_SMBUS_BYTE_DATA:
+ outb_p(command, NVIDIA_SMB_CMD);
+ if (read_write == I2C_SMBUS_WRITE)
+ outb_p(data->byte, NVIDIA_SMB_DATA);
+ protocol |= NVIDIA_SMB_PRTCL_BYTE_DATA;
+ break;
+
+ case I2C_SMBUS_WORD_DATA:
+ outb_p(command, NVIDIA_SMB_CMD);
+ if (read_write == I2C_SMBUS_WRITE) {
+ outb_p(data->word, NVIDIA_SMB_DATA);
+ outb_p(data->word >> 8, NVIDIA_SMB_DATA + 1);
+ }
+ protocol |= NVIDIA_SMB_PRTCL_WORD_DATA | pec;
+ break;
+
+ case I2C_SMBUS_BLOCK_DATA:
+ outb_p(command, NVIDIA_SMB_CMD);
+ if (read_write == I2C_SMBUS_WRITE) {
+ len = data->block[0];
+ if ((len == 0) || (len > I2C_SMBUS_BLOCK_MAX)) {
+ dev_err(&adap->dev,
+ "Transaction failed (requested block size: %d)\n",
+ len);
+ return -EINVAL;
}
- protocol |= NVIDIA_SMB_PRTCL_BLOCK_DATA | pec;
- break;
+ outb_p(len, NVIDIA_SMB_BCNT);
+ for (i = 0; i < I2C_SMBUS_BLOCK_MAX; i++)
+ outb_p(data->block[i + 1],
+ NVIDIA_SMB_DATA + i);
+ }
+ protocol |= NVIDIA_SMB_PRTCL_BLOCK_DATA | pec;
+ break;
- default:
- dev_err(&adap->dev, "Unsupported transaction %d\n", size);
- return -EOPNOTSUPP;
+ default:
+ dev_err(&adap->dev, "Unsupported transaction %d\n", size);
+ return -EOPNOTSUPP;
}
outb_p((addr & 0x7f) << 1, NVIDIA_SMB_ADDR);
@@ -265,28 +263,28 @@ static s32 nforce2_access(struct i2c_adapter * adap, u16 addr,
return 0;
switch (size) {
-
- case I2C_SMBUS_BYTE:
- case I2C_SMBUS_BYTE_DATA:
- data->byte = inb_p(NVIDIA_SMB_DATA);
- break;
-
- case I2C_SMBUS_WORD_DATA:
- data->word = inb_p(NVIDIA_SMB_DATA) | (inb_p(NVIDIA_SMB_DATA+1) << 8);
- break;
-
- case I2C_SMBUS_BLOCK_DATA:
- len = inb_p(NVIDIA_SMB_BCNT);
- if ((len <= 0) || (len > I2C_SMBUS_BLOCK_MAX)) {
- dev_err(&adap->dev, "Transaction failed "
- "(received block size: 0x%02x)\n",
- len);
- return -EPROTO;
- }
- for (i = 0; i < len; i++)
- data->block[i+1] = inb_p(NVIDIA_SMB_DATA + i);
- data->block[0] = len;
- break;
+ case I2C_SMBUS_BYTE:
+ case I2C_SMBUS_BYTE_DATA:
+ data->byte = inb_p(NVIDIA_SMB_DATA);
+ break;
+
+ case I2C_SMBUS_WORD_DATA:
+ data->word = inb_p(NVIDIA_SMB_DATA) |
+ (inb_p(NVIDIA_SMB_DATA + 1) << 8);
+ break;
+
+ case I2C_SMBUS_BLOCK_DATA:
+ len = inb_p(NVIDIA_SMB_BCNT);
+ if ((len <= 0) || (len > I2C_SMBUS_BLOCK_MAX)) {
+ dev_err(&adap->dev,
+ "Transaction failed (received block size: 0x%02x)\n",
+ len);
+ return -EPROTO;
+ }
+ for (i = 0; i < len; i++)
+ data->block[i + 1] = inb_p(NVIDIA_SMB_DATA + i);
+ data->block[0] = len;
+ break;
}
return 0;
@@ -299,7 +297,7 @@ static u32 nforce2_func(struct i2c_adapter *adapter)
return I2C_FUNC_SMBUS_QUICK | I2C_FUNC_SMBUS_BYTE |
I2C_FUNC_SMBUS_BYTE_DATA | I2C_FUNC_SMBUS_WORD_DATA |
I2C_FUNC_SMBUS_PEC |
- (((struct nforce2_smbus*)adapter->algo_data)->blockops ?
+ (((struct nforce2_smbus *)adapter->algo_data)->blockops ?
I2C_FUNC_SMBUS_BLOCK_DATA : 0);
}
@@ -327,7 +325,7 @@ static DEFINE_PCI_DEVICE_TABLE(nforce2_ids) = {
{ 0 }
};
-MODULE_DEVICE_TABLE (pci, nforce2_ids);
+MODULE_DEVICE_TABLE(pci, nforce2_ids);
static int nforce2_probe_smb(struct pci_dev *dev, int bar, int alt_reg,
@@ -377,7 +375,8 @@ static int nforce2_probe_smb(struct pci_dev *dev, int bar, int alt_reg,
release_region(smbus->base, smbus->size);
return error;
}
- dev_info(&smbus->adapter.dev, "nForce2 SMBus adapter at %#x\n", smbus->base);
+ dev_info(&smbus->adapter.dev, "nForce2 SMBus adapter at %#x\n",
+ smbus->base);
return 0;
}
@@ -388,11 +387,12 @@ static int nforce2_probe(struct pci_dev *dev, const struct pci_device_id *id)
int res1, res2;
/* we support 2 SMBus adapters */
- if (!(smbuses = kzalloc(2*sizeof(struct nforce2_smbus), GFP_KERNEL)))
+ smbuses = kzalloc(2 * sizeof(struct nforce2_smbus), GFP_KERNEL);
+ if (!smbuses)
return -ENOMEM;
pci_set_drvdata(dev, smbuses);
- switch(dev->device) {
+ switch (dev->device) {
case PCI_DEVICE_ID_NVIDIA_NFORCE2_SMBUS:
case PCI_DEVICE_ID_NVIDIA_NFORCE_MCP51_SMBUS:
case PCI_DEVICE_ID_NVIDIA_NFORCE_MCP55_SMBUS:
diff --git a/drivers/i2c/busses/i2c-nomadik.c b/drivers/i2c/busses/i2c-nomadik.c
index 8b2ffcf45322..650293ff4d62 100644
--- a/drivers/i2c/busses/i2c-nomadik.c
+++ b/drivers/i2c/busses/i2c-nomadik.c
@@ -26,6 +26,7 @@
#include <linux/platform_data/i2c-nomadik.h>
#include <linux/of.h>
#include <linux/of_i2c.h>
+#include <linux/pinctrl/consumer.h>
#define DRIVER_NAME "nmk-i2c"
@@ -147,6 +148,10 @@ struct i2c_nmk_client {
* @stop: stop condition.
* @xfer_complete: acknowledge completion for a I2C message.
* @result: controller propogated result.
+ * @pinctrl: pinctrl handle.
+ * @pins_default: default state for the pins.
+ * @pins_idle: idle state for the pins.
+ * @pins_sleep: sleep state for the pins.
* @busy: Busy doing transfer.
*/
struct nmk_i2c_dev {
@@ -160,6 +165,11 @@ struct nmk_i2c_dev {
int stop;
struct completion xfer_complete;
int result;
+ /* Three pin states - default, idle & sleep */
+ struct pinctrl *pinctrl;
+ struct pinctrl_state *pins_default;
+ struct pinctrl_state *pins_idle;
+ struct pinctrl_state *pins_sleep;
bool busy;
};
@@ -402,8 +412,7 @@ static void setup_i2c_controller(struct nmk_i2c_dev *dev)
static int read_i2c(struct nmk_i2c_dev *dev, u16 flags)
{
u32 status = 0;
- u32 mcr;
- u32 irq_mask = 0;
+ u32 mcr, irq_mask;
int timeout;
mcr = load_i2c_mcr_reg(dev, flags);
@@ -472,8 +481,7 @@ static void fill_tx_fifo(struct nmk_i2c_dev *dev, int no_bytes)
static int write_i2c(struct nmk_i2c_dev *dev, u16 flags)
{
u32 status = 0;
- u32 mcr;
- u32 irq_mask = 0;
+ u32 mcr, irq_mask;
int timeout;
mcr = load_i2c_mcr_reg(dev, flags);
@@ -636,6 +644,15 @@ static int nmk_i2c_xfer(struct i2c_adapter *i2c_adap,
goto out_clk;
}
+ /* Optionaly enable pins to be muxed in and configured */
+ if (!IS_ERR(dev->pins_default)) {
+ status = pinctrl_select_state(dev->pinctrl,
+ dev->pins_default);
+ if (status)
+ dev_err(&dev->adev->dev,
+ "could not set default pins\n");
+ }
+
status = init_hw(dev);
if (status)
goto out;
@@ -663,6 +680,15 @@ static int nmk_i2c_xfer(struct i2c_adapter *i2c_adap,
out:
clk_disable_unprepare(dev->clk);
out_clk:
+ /* Optionally let pins go into idle state */
+ if (!IS_ERR(dev->pins_idle)) {
+ status = pinctrl_select_state(dev->pinctrl,
+ dev->pins_idle);
+ if (status)
+ dev_err(&dev->adev->dev,
+ "could not set pins to idle state\n");
+ }
+
pm_runtime_put_sync(&dev->adev->dev);
dev->busy = false;
@@ -703,8 +729,7 @@ static irqreturn_t i2c_irq_handler(int irq, void *arg)
struct nmk_i2c_dev *dev = arg;
u32 tft, rft;
u32 count;
- u32 misr;
- u32 src = 0;
+ u32 misr, src;
/* load Tx FIFO and Rx FIFO threshold values */
tft = readl(dev->virtbase + I2C_TFTR);
@@ -857,15 +882,41 @@ static int nmk_i2c_suspend(struct device *dev)
{
struct amba_device *adev = to_amba_device(dev);
struct nmk_i2c_dev *nmk_i2c = amba_get_drvdata(adev);
+ int ret;
if (nmk_i2c->busy)
return -EBUSY;
+ if (!IS_ERR(nmk_i2c->pins_sleep)) {
+ ret = pinctrl_select_state(nmk_i2c->pinctrl,
+ nmk_i2c->pins_sleep);
+ if (ret)
+ dev_err(dev, "could not set pins to sleep state\n");
+ }
+
return 0;
}
static int nmk_i2c_resume(struct device *dev)
{
+ struct amba_device *adev = to_amba_device(dev);
+ struct nmk_i2c_dev *nmk_i2c = amba_get_drvdata(adev);
+ int ret;
+
+ /* First go to the default state */
+ if (!IS_ERR(nmk_i2c->pins_default)) {
+ ret = pinctrl_select_state(nmk_i2c->pinctrl,
+ nmk_i2c->pins_default);
+ if (ret)
+ dev_err(dev, "could not set pins to default state\n");
+ }
+ /* Then let's idle the pins until the next transfer happens */
+ if (!IS_ERR(nmk_i2c->pins_idle)) {
+ ret = pinctrl_select_state(nmk_i2c->pinctrl,
+ nmk_i2c->pins_idle);
+ if (ret)
+ dev_err(dev, "could not set pins to idle state\n");
+ }
return 0;
}
#else
@@ -953,6 +1004,40 @@ static int nmk_i2c_probe(struct amba_device *adev, const struct amba_id *id)
dev->adev = adev;
amba_set_drvdata(adev, dev);
+ dev->pinctrl = devm_pinctrl_get(&adev->dev);
+ if (IS_ERR(dev->pinctrl)) {
+ ret = PTR_ERR(dev->pinctrl);
+ goto err_pinctrl;
+ }
+
+ dev->pins_default = pinctrl_lookup_state(dev->pinctrl,
+ PINCTRL_STATE_DEFAULT);
+ if (IS_ERR(dev->pins_default)) {
+ dev_err(&adev->dev, "could not get default pinstate\n");
+ } else {
+ ret = pinctrl_select_state(dev->pinctrl,
+ dev->pins_default);
+ if (ret)
+ dev_dbg(&adev->dev, "could not set default pinstate\n");
+ }
+
+ dev->pins_idle = pinctrl_lookup_state(dev->pinctrl,
+ PINCTRL_STATE_IDLE);
+ if (IS_ERR(dev->pins_idle)) {
+ dev_dbg(&adev->dev, "could not get idle pinstate\n");
+ } else {
+ /* If possible, let's go to idle until the first transfer */
+ ret = pinctrl_select_state(dev->pinctrl,
+ dev->pins_idle);
+ if (ret)
+ dev_dbg(&adev->dev, "could not set idle pinstate\n");
+ }
+
+ dev->pins_sleep = pinctrl_lookup_state(dev->pinctrl,
+ PINCTRL_STATE_SLEEP);
+ if (IS_ERR(dev->pins_sleep))
+ dev_dbg(&adev->dev, "could not get sleep pinstate\n");
+
dev->virtbase = ioremap(adev->res.start, resource_size(&adev->res));
if (!dev->virtbase) {
ret = -ENOMEM;
@@ -1020,8 +1105,8 @@ static int nmk_i2c_probe(struct amba_device *adev, const struct amba_id *id)
err_irq:
iounmap(dev->virtbase);
err_no_ioremap:
- amba_set_drvdata(adev, NULL);
kfree(dev);
+ err_pinctrl:
err_no_mem:
return ret;
@@ -1044,7 +1129,6 @@ static int nmk_i2c_remove(struct amba_device *adev)
release_mem_region(res->start, resource_size(res));
clk_put(dev->clk);
pm_runtime_disable(&adev->dev);
- amba_set_drvdata(adev, NULL);
kfree(dev);
return 0;
diff --git a/drivers/i2c/busses/i2c-ocores.c b/drivers/i2c/busses/i2c-ocores.c
index a337d08a392d..0e1f8245e768 100644
--- a/drivers/i2c/busses/i2c-ocores.c
+++ b/drivers/i2c/busses/i2c-ocores.c
@@ -332,7 +332,7 @@ static int ocores_i2c_of_probe(struct platform_device *pdev,
&i2c->reg_io_width);
match = of_match_node(ocores_i2c_match, pdev->dev.of_node);
- if (match && (int)match->data == TYPE_GRLIB) {
+ if (match && (long)match->data == TYPE_GRLIB) {
dev_dbg(&pdev->dev, "GRLIB variant of i2c-ocores\n");
i2c->setreg = oc_setreg_grlib;
i2c->getreg = oc_getreg_grlib;
@@ -452,7 +452,6 @@ static int ocores_i2c_remove(struct platform_device *pdev)
/* remove adapter & data */
i2c_del_adapter(&i2c->adap);
- platform_set_drvdata(pdev, NULL);
return 0;
}
diff --git a/drivers/i2c/busses/i2c-octeon.c b/drivers/i2c/busses/i2c-octeon.c
index 484ca771fdff..935585ef4d39 100644
--- a/drivers/i2c/busses/i2c-octeon.c
+++ b/drivers/i2c/busses/i2c-octeon.c
@@ -595,7 +595,7 @@ static int octeon_i2c_probe(struct platform_device *pdev)
result = i2c_add_adapter(&i2c->adap);
if (result < 0) {
dev_err(i2c->dev, "failed to add adapter\n");
- goto fail_add;
+ goto out;
}
dev_info(i2c->dev, "version %s\n", DRV_VERSION);
@@ -603,8 +603,6 @@ static int octeon_i2c_probe(struct platform_device *pdev)
return 0;
-fail_add:
- platform_set_drvdata(pdev, NULL);
out:
return result;
};
@@ -614,7 +612,6 @@ static int octeon_i2c_remove(struct platform_device *pdev)
struct octeon_i2c *i2c = platform_get_drvdata(pdev);
i2c_del_adapter(&i2c->adap);
- platform_set_drvdata(pdev, NULL);
return 0;
};
diff --git a/drivers/i2c/busses/i2c-omap.c b/drivers/i2c/busses/i2c-omap.c
index 3ee188679cf1..e02f9e36a7b2 100644
--- a/drivers/i2c/busses/i2c-omap.c
+++ b/drivers/i2c/busses/i2c-omap.c
@@ -1260,7 +1260,6 @@ err_unuse_clocks:
pm_runtime_put(dev->dev);
pm_runtime_disable(&pdev->dev);
err_free_mem:
- platform_set_drvdata(pdev, NULL);
return r;
}
@@ -1270,8 +1269,6 @@ static int omap_i2c_remove(struct platform_device *pdev)
struct omap_i2c_dev *dev = platform_get_drvdata(pdev);
int ret;
- platform_set_drvdata(pdev, NULL);
-
i2c_del_adapter(&dev->adapter);
ret = pm_runtime_get_sync(&pdev->dev);
if (IS_ERR_VALUE(ret))
diff --git a/drivers/i2c/busses/i2c-pca-platform.c b/drivers/i2c/busses/i2c-pca-platform.c
index a30d2f613c03..aa00df14e30b 100644
--- a/drivers/i2c/busses/i2c-pca-platform.c
+++ b/drivers/i2c/busses/i2c-pca-platform.c
@@ -260,7 +260,6 @@ e_print:
static int i2c_pca_pf_remove(struct platform_device *pdev)
{
struct i2c_pca_pf_data *i2c = platform_get_drvdata(pdev);
- platform_set_drvdata(pdev, NULL);
i2c_del_adapter(&i2c->adap);
diff --git a/drivers/i2c/busses/i2c-pmcmsp.c b/drivers/i2c/busses/i2c-pmcmsp.c
index 083d68cfaf0b..f6389e2c9d02 100644
--- a/drivers/i2c/busses/i2c-pmcmsp.c
+++ b/drivers/i2c/busses/i2c-pmcmsp.c
@@ -349,7 +349,6 @@ static int pmcmsptwi_probe(struct platform_device *pldev)
return 0;
ret_unmap:
- platform_set_drvdata(pldev, NULL);
if (pmcmsptwi_data.irq) {
pmcmsptwi_writel(0,
pmcmsptwi_data.iobase + MSP_TWI_INT_MSK_REG_OFFSET);
@@ -374,7 +373,6 @@ static int pmcmsptwi_remove(struct platform_device *pldev)
i2c_del_adapter(&pmcmsptwi_adapter);
- platform_set_drvdata(pldev, NULL);
if (pmcmsptwi_data.irq) {
pmcmsptwi_writel(0,
pmcmsptwi_data.iobase + MSP_TWI_INT_MSK_REG_OFFSET);
diff --git a/drivers/i2c/busses/i2c-pnx.c b/drivers/i2c/busses/i2c-pnx.c
index ce4097012e97..5f39c6d8117a 100644
--- a/drivers/i2c/busses/i2c-pnx.c
+++ b/drivers/i2c/busses/i2c-pnx.c
@@ -761,7 +761,6 @@ out_clkget:
out_drvdata:
kfree(alg_data);
err_kzalloc:
- platform_set_drvdata(pdev, NULL);
return ret;
}
@@ -776,7 +775,6 @@ static int i2c_pnx_remove(struct platform_device *pdev)
release_mem_region(alg_data->base, I2C_PNX_REGION_SIZE);
clk_put(alg_data->clk);
kfree(alg_data);
- platform_set_drvdata(pdev, NULL);
return 0;
}
diff --git a/drivers/i2c/busses/i2c-powermac.c b/drivers/i2c/busses/i2c-powermac.c
index 0dd5b334d090..da54e673449d 100644
--- a/drivers/i2c/busses/i2c-powermac.c
+++ b/drivers/i2c/busses/i2c-powermac.c
@@ -221,7 +221,6 @@ static int i2c_powermac_remove(struct platform_device *dev)
printk(KERN_WARNING
"i2c-powermac.c: Failed to remove bus %s !\n",
adapter->name);
- platform_set_drvdata(dev, NULL);
memset(adapter, 0, sizeof(*adapter));
return 0;
diff --git a/drivers/i2c/busses/i2c-puv3.c b/drivers/i2c/busses/i2c-puv3.c
index d7c512d717a7..261d7db437e2 100644
--- a/drivers/i2c/busses/i2c-puv3.c
+++ b/drivers/i2c/busses/i2c-puv3.c
@@ -223,7 +223,6 @@ static int puv3_i2c_probe(struct platform_device *pdev)
return 0;
fail_add_adapter:
- platform_set_drvdata(pdev, NULL);
kfree(adapter);
fail_nomem:
release_mem_region(mem->start, resource_size(mem));
@@ -245,7 +244,6 @@ static int puv3_i2c_remove(struct platform_device *pdev)
}
put_device(&pdev->dev);
- platform_set_drvdata(pdev, NULL);
mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
release_mem_region(mem->start, resource_size(mem));
diff --git a/drivers/i2c/busses/i2c-pxa-pci.c b/drivers/i2c/busses/i2c-pxa-pci.c
index 3d4985695aed..9639be86e53f 100644
--- a/drivers/i2c/busses/i2c-pxa-pci.c
+++ b/drivers/i2c/busses/i2c-pxa-pci.c
@@ -128,7 +128,6 @@ static int ce4100_i2c_probe(struct pci_dev *dev,
return 0;
err_dev_add:
- pci_set_drvdata(dev, NULL);
kfree(sds);
err_mem:
pci_disable_device(dev);
@@ -141,7 +140,6 @@ static void ce4100_i2c_remove(struct pci_dev *dev)
unsigned int i;
sds = pci_get_drvdata(dev);
- pci_set_drvdata(dev, NULL);
for (i = 0; i < ARRAY_SIZE(sds->pdev); i++)
platform_device_unregister(sds->pdev[i]);
diff --git a/drivers/i2c/busses/i2c-pxa.c b/drivers/i2c/busses/i2c-pxa.c
index 1034d93fb838..1e88e8d66c55 100644
--- a/drivers/i2c/busses/i2c-pxa.c
+++ b/drivers/i2c/busses/i2c-pxa.c
@@ -1215,12 +1215,10 @@ emalloc:
return ret;
}
-static int __exit i2c_pxa_remove(struct platform_device *dev)
+static int i2c_pxa_remove(struct platform_device *dev)
{
struct pxa_i2c *i2c = platform_get_drvdata(dev);
- platform_set_drvdata(dev, NULL);
-
i2c_del_adapter(&i2c->adap);
if (!i2c->use_pio)
free_irq(i2c->irq, i2c);
@@ -1269,7 +1267,7 @@ static const struct dev_pm_ops i2c_pxa_dev_pm_ops = {
static struct platform_driver i2c_pxa_driver = {
.probe = i2c_pxa_probe,
- .remove = __exit_p(i2c_pxa_remove),
+ .remove = i2c_pxa_remove,
.driver = {
.name = "pxa2xx-i2c",
.owner = THIS_MODULE,
diff --git a/drivers/i2c/busses/i2c-s3c2410.c b/drivers/i2c/busses/i2c-s3c2410.c
index c807a6d14f0c..f6b880ba1932 100644
--- a/drivers/i2c/busses/i2c-s3c2410.c
+++ b/drivers/i2c/busses/i2c-s3c2410.c
@@ -111,6 +111,8 @@ static const struct of_device_id s3c24xx_i2c_match[] = {
{ .compatible = "samsung,s3c2440-i2c", .data = (void *)QUIRK_S3C2440 },
{ .compatible = "samsung,s3c2440-hdmiphy-i2c",
.data = (void *)(QUIRK_S3C2440 | QUIRK_HDMIPHY | QUIRK_NO_GPIO) },
+ { .compatible = "samsung,exynos5440-i2c",
+ .data = (void *)(QUIRK_S3C2440 | QUIRK_NO_GPIO) },
{},
};
MODULE_DEVICE_TABLE(of, s3c24xx_i2c_match);
@@ -1000,8 +1002,8 @@ static int s3c24xx_i2c_probe(struct platform_device *pdev)
i2c->pdata = devm_kzalloc(&pdev->dev, sizeof(*pdata), GFP_KERNEL);
if (!i2c->pdata) {
- ret = -ENOMEM;
- goto err_noclk;
+ dev_err(&pdev->dev, "no memory for platform data\n");
+ return -ENOMEM;
}
i2c->quirks = s3c24xx_get_device_quirks(pdev);
@@ -1022,32 +1024,27 @@ static int s3c24xx_i2c_probe(struct platform_device *pdev)
/* find the clock and enable it */
i2c->dev = &pdev->dev;
- i2c->clk = clk_get(&pdev->dev, "i2c");
+ i2c->clk = devm_clk_get(&pdev->dev, "i2c");
if (IS_ERR(i2c->clk)) {
dev_err(&pdev->dev, "cannot get clock\n");
- ret = -ENOENT;
- goto err_noclk;
+ return -ENOENT;
}
dev_dbg(&pdev->dev, "clock source %p\n", i2c->clk);
- clk_prepare_enable(i2c->clk);
/* map the registers */
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
if (res == NULL) {
dev_err(&pdev->dev, "cannot find IO resource\n");
- ret = -ENOENT;
- goto err_clk;
+ return -ENOENT;
}
i2c->regs = devm_ioremap_resource(&pdev->dev, res);
- if (IS_ERR(i2c->regs)) {
- ret = PTR_ERR(i2c->regs);
- goto err_clk;
- }
+ if (IS_ERR(i2c->regs))
+ return PTR_ERR(i2c->regs);
dev_dbg(&pdev->dev, "registers %p (%p)\n",
i2c->regs, res);
@@ -1064,16 +1061,18 @@ static int s3c24xx_i2c_probe(struct platform_device *pdev)
if (i2c->pdata->cfg_gpio) {
i2c->pdata->cfg_gpio(to_platform_device(i2c->dev));
} else if (IS_ERR(i2c->pctrl) && s3c24xx_i2c_parse_dt_gpio(i2c)) {
- ret = -EINVAL;
- goto err_clk;
+ return -EINVAL;
}
/* initialise the i2c controller */
+ clk_prepare_enable(i2c->clk);
ret = s3c24xx_i2c_init(i2c);
- if (ret != 0)
- goto err_clk;
-
+ clk_disable_unprepare(i2c->clk);
+ if (ret != 0) {
+ dev_err(&pdev->dev, "I2C controller init failed\n");
+ return ret;
+ }
/* find the IRQ for this unit (note, this relies on the init call to
* ensure no current IRQs pending
*/
@@ -1081,21 +1080,21 @@ static int s3c24xx_i2c_probe(struct platform_device *pdev)
i2c->irq = ret = platform_get_irq(pdev, 0);
if (ret <= 0) {
dev_err(&pdev->dev, "cannot find IRQ\n");
- goto err_clk;
+ return ret;
}
- ret = request_irq(i2c->irq, s3c24xx_i2c_irq, 0,
- dev_name(&pdev->dev), i2c);
+ ret = devm_request_irq(&pdev->dev, i2c->irq, s3c24xx_i2c_irq, 0,
+ dev_name(&pdev->dev), i2c);
if (ret != 0) {
dev_err(&pdev->dev, "cannot claim IRQ %d\n", i2c->irq);
- goto err_clk;
+ return ret;
}
ret = s3c24xx_i2c_register_cpufreq(i2c);
if (ret < 0) {
dev_err(&pdev->dev, "failed to register cpufreq notifier\n");
- goto err_irq;
+ return ret;
}
/* Note, previous versions of the driver used i2c_add_adapter()
@@ -1110,7 +1109,8 @@ static int s3c24xx_i2c_probe(struct platform_device *pdev)
ret = i2c_add_numbered_adapter(&i2c->adap);
if (ret < 0) {
dev_err(&pdev->dev, "failed to add bus to i2c core\n");
- goto err_cpufreq;
+ s3c24xx_i2c_deregister_cpufreq(i2c);
+ return ret;
}
of_i2c_register_devices(&i2c->adap);
@@ -1120,21 +1120,7 @@ static int s3c24xx_i2c_probe(struct platform_device *pdev)
pm_runtime_enable(&i2c->adap.dev);
dev_info(&pdev->dev, "%s: S3C I2C adapter\n", dev_name(&i2c->adap.dev));
- clk_disable_unprepare(i2c->clk);
return 0;
-
- err_cpufreq:
- s3c24xx_i2c_deregister_cpufreq(i2c);
-
- err_irq:
- free_irq(i2c->irq, i2c);
-
- err_clk:
- clk_disable_unprepare(i2c->clk);
- clk_put(i2c->clk);
-
- err_noclk:
- return ret;
}
/* s3c24xx_i2c_remove
@@ -1152,10 +1138,8 @@ static int s3c24xx_i2c_remove(struct platform_device *pdev)
s3c24xx_i2c_deregister_cpufreq(i2c);
i2c_del_adapter(&i2c->adap);
- free_irq(i2c->irq, i2c);
clk_disable_unprepare(i2c->clk);
- clk_put(i2c->clk);
if (pdev->dev.of_node && IS_ERR(i2c->pctrl))
s3c24xx_i2c_dt_gpio_free(i2c);
diff --git a/drivers/i2c/busses/i2c-s6000.c b/drivers/i2c/busses/i2c-s6000.c
index 008836409efe..7c1ca5aca088 100644
--- a/drivers/i2c/busses/i2c-s6000.c
+++ b/drivers/i2c/busses/i2c-s6000.c
@@ -365,7 +365,6 @@ static int s6i2c_remove(struct platform_device *pdev)
{
struct s6i2c_if *iface = platform_get_drvdata(pdev);
i2c_wr16(iface, S6_I2C_ENABLE, 0);
- platform_set_drvdata(pdev, NULL);
i2c_del_adapter(&iface->adap);
free_irq(iface->irq, iface);
clk_disable(iface->clk);
diff --git a/drivers/i2c/busses/i2c-sh7760.c b/drivers/i2c/busses/i2c-sh7760.c
index 3a2253e1bf59..5351a2f34912 100644
--- a/drivers/i2c/busses/i2c-sh7760.c
+++ b/drivers/i2c/busses/i2c-sh7760.c
@@ -546,7 +546,6 @@ static int sh7760_i2c_remove(struct platform_device *pdev)
release_resource(id->ioarea);
kfree(id->ioarea);
kfree(id);
- platform_set_drvdata(pdev, NULL);
return 0;
}
diff --git a/drivers/i2c/busses/i2c-sh_mobile.c b/drivers/i2c/busses/i2c-sh_mobile.c
index b6e7a83a8296..debf745c0268 100644
--- a/drivers/i2c/busses/i2c-sh_mobile.c
+++ b/drivers/i2c/busses/i2c-sh_mobile.c
@@ -38,21 +38,21 @@
/* Transmit operation: */
/* */
/* 0 byte transmit */
-/* BUS: S A8 ACK P */
+/* BUS: S A8 ACK P(*) */
/* IRQ: DTE WAIT */
/* ICIC: */
/* ICCR: 0x94 0x90 */
/* ICDR: A8 */
/* */
/* 1 byte transmit */
-/* BUS: S A8 ACK D8(1) ACK P */
+/* BUS: S A8 ACK D8(1) ACK P(*) */
/* IRQ: DTE WAIT WAIT */
/* ICIC: -DTE */
/* ICCR: 0x94 0x90 */
/* ICDR: A8 D8(1) */
/* */
/* 2 byte transmit */
-/* BUS: S A8 ACK D8(1) ACK D8(2) ACK P */
+/* BUS: S A8 ACK D8(1) ACK D8(2) ACK P(*) */
/* IRQ: DTE WAIT WAIT WAIT */
/* ICIC: -DTE */
/* ICCR: 0x94 0x90 */
@@ -66,20 +66,20 @@
/* 0 byte receive - not supported since slave may hold SDA low */
/* */
/* 1 byte receive [TX] | [RX] */
-/* BUS: S A8 ACK | D8(1) ACK P */
+/* BUS: S A8 ACK | D8(1) ACK P(*) */
/* IRQ: DTE WAIT | WAIT DTE */
/* ICIC: -DTE | +DTE */
/* ICCR: 0x94 0x81 | 0xc0 */
/* ICDR: A8 | D8(1) */
/* */
/* 2 byte receive [TX]| [RX] */
-/* BUS: S A8 ACK | D8(1) ACK D8(2) ACK P */
+/* BUS: S A8 ACK | D8(1) ACK D8(2) ACK P(*) */
/* IRQ: DTE WAIT | WAIT WAIT DTE */
/* ICIC: -DTE | +DTE */
/* ICCR: 0x94 0x81 | 0xc0 */
/* ICDR: A8 | D8(1) D8(2) */
/* */
-/* 3 byte receive [TX] | [RX] */
+/* 3 byte receive [TX] | [RX] (*) */
/* BUS: S A8 ACK | D8(1) ACK D8(2) ACK D8(3) ACK P */
/* IRQ: DTE WAIT | WAIT WAIT WAIT DTE */
/* ICIC: -DTE | +DTE */
@@ -94,7 +94,7 @@
/* SDA ___\___XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXAAAAAAAAA___/ */
/* SCL \_/1\_/2\_/3\_/4\_/5\_/6\_/7\_/8\___/9\_____/ */
/* */
-/* S D7 D6 D5 D4 D3 D2 D1 D0 P */
+/* S D7 D6 D5 D4 D3 D2 D1 D0 P(*) */
/* ___ */
/* WAIT IRQ ________________________________/ \___________ */
/* TACK IRQ ____________________________________/ \_______ */
@@ -103,6 +103,11 @@
/* _______________________________________________ */
/* BUSY __/ \_ */
/* */
+/* (*) The STOP condition is only sent by the master at the end of the last */
+/* I2C message or if the I2C_M_STOP flag is set. Similarly, the BUSY bit is */
+/* only cleared after the STOP condition, so, between messages we have to */
+/* poll for the DTE bit. */
+/* */
enum sh_mobile_i2c_op {
OP_START = 0,
@@ -132,6 +137,7 @@ struct sh_mobile_i2c_data {
struct i2c_msg *msg;
int pos;
int sr;
+ bool send_stop;
};
#define IIC_FLAG_HAS_ICIC67 (1 << 0)
@@ -322,7 +328,7 @@ static unsigned char i2c_op(struct sh_mobile_i2c_data *pd,
break;
case OP_TX_STOP: /* write data and issue a stop afterwards */
iic_wr(pd, ICDR, data);
- iic_wr(pd, ICCR, 0x90);
+ iic_wr(pd, ICCR, pd->send_stop ? 0x90 : 0x94);
break;
case OP_TX_TO_RX: /* select read mode */
iic_wr(pd, ICCR, 0x81);
@@ -349,20 +355,14 @@ static unsigned char i2c_op(struct sh_mobile_i2c_data *pd,
return ret;
}
-static int sh_mobile_i2c_is_first_byte(struct sh_mobile_i2c_data *pd)
+static bool sh_mobile_i2c_is_first_byte(struct sh_mobile_i2c_data *pd)
{
- if (pd->pos == -1)
- return 1;
-
- return 0;
+ return pd->pos == -1;
}
-static int sh_mobile_i2c_is_last_byte(struct sh_mobile_i2c_data *pd)
+static bool sh_mobile_i2c_is_last_byte(struct sh_mobile_i2c_data *pd)
{
- if (pd->pos == (pd->msg->len - 1))
- return 1;
-
- return 0;
+ return pd->pos == pd->msg->len - 1;
}
static void sh_mobile_i2c_get_data(struct sh_mobile_i2c_data *pd,
@@ -475,22 +475,25 @@ static irqreturn_t sh_mobile_i2c_isr(int irq, void *dev_id)
return IRQ_HANDLED;
}
-static int start_ch(struct sh_mobile_i2c_data *pd, struct i2c_msg *usr_msg)
+static int start_ch(struct sh_mobile_i2c_data *pd, struct i2c_msg *usr_msg,
+ bool do_init)
{
if (usr_msg->len == 0 && (usr_msg->flags & I2C_M_RD)) {
dev_err(pd->dev, "Unsupported zero length i2c read\n");
return -EIO;
}
- /* Initialize channel registers */
- iic_set_clr(pd, ICCR, 0, ICCR_ICE);
+ if (do_init) {
+ /* Initialize channel registers */
+ iic_set_clr(pd, ICCR, 0, ICCR_ICE);
- /* Enable channel and configure rx ack */
- iic_set_clr(pd, ICCR, ICCR_ICE, 0);
+ /* Enable channel and configure rx ack */
+ iic_set_clr(pd, ICCR, ICCR_ICE, 0);
- /* Set the clock */
- iic_wr(pd, ICCL, pd->iccl & 0xff);
- iic_wr(pd, ICCH, pd->icch & 0xff);
+ /* Set the clock */
+ iic_wr(pd, ICCL, pd->iccl & 0xff);
+ iic_wr(pd, ICCH, pd->icch & 0xff);
+ }
pd->msg = usr_msg;
pd->pos = -1;
@@ -501,6 +504,61 @@ static int start_ch(struct sh_mobile_i2c_data *pd, struct i2c_msg *usr_msg)
return 0;
}
+static int poll_dte(struct sh_mobile_i2c_data *pd)
+{
+ int i;
+
+ for (i = 1000; i; i--) {
+ u_int8_t val = iic_rd(pd, ICSR);
+
+ if (val & ICSR_DTE)
+ break;
+
+ if (val & ICSR_TACK)
+ return -EIO;
+
+ udelay(10);
+ }
+
+ if (!i) {
+ dev_warn(pd->dev, "Timeout polling for DTE!\n");
+ return -ETIMEDOUT;
+ }
+
+ return 0;
+}
+
+static int poll_busy(struct sh_mobile_i2c_data *pd)
+{
+ int i;
+
+ for (i = 1000; i; i--) {
+ u_int8_t val = iic_rd(pd, ICSR);
+
+ dev_dbg(pd->dev, "val 0x%02x pd->sr 0x%02x\n", val, pd->sr);
+
+ /* the interrupt handler may wake us up before the
+ * transfer is finished, so poll the hardware
+ * until we're done.
+ */
+ if (!(val & ICSR_BUSY)) {
+ /* handle missing acknowledge and arbitration lost */
+ if ((val | pd->sr) & (ICSR_TACK | ICSR_AL))
+ return -EIO;
+ break;
+ }
+
+ udelay(10);
+ }
+
+ if (!i) {
+ dev_err(pd->dev, "Polling timed out\n");
+ return -ETIMEDOUT;
+ }
+
+ return 0;
+}
+
static int sh_mobile_i2c_xfer(struct i2c_adapter *adapter,
struct i2c_msg *msgs,
int num)
@@ -508,53 +566,39 @@ static int sh_mobile_i2c_xfer(struct i2c_adapter *adapter,
struct sh_mobile_i2c_data *pd = i2c_get_adapdata(adapter);
struct i2c_msg *msg;
int err = 0;
- u_int8_t val;
- int i, k, retry_count;
+ int i, k;
activate_ch(pd);
/* Process all messages */
for (i = 0; i < num; i++) {
+ bool do_start = pd->send_stop || !i;
msg = &msgs[i];
+ pd->send_stop = i == num - 1 || msg->flags & I2C_M_STOP;
- err = start_ch(pd, msg);
+ err = start_ch(pd, msg, do_start);
if (err)
break;
- i2c_op(pd, OP_START, 0);
+ if (do_start)
+ i2c_op(pd, OP_START, 0);
/* The interrupt handler takes care of the rest... */
k = wait_event_timeout(pd->wait,
pd->sr & (ICSR_TACK | SW_DONE),
5 * HZ);
- if (!k)
+ if (!k) {
dev_err(pd->dev, "Transfer request timed out\n");
-
- retry_count = 1000;
-again:
- val = iic_rd(pd, ICSR);
-
- dev_dbg(pd->dev, "val 0x%02x pd->sr 0x%02x\n", val, pd->sr);
-
- /* the interrupt handler may wake us up before the
- * transfer is finished, so poll the hardware
- * until we're done.
- */
- if (val & ICSR_BUSY) {
- udelay(10);
- if (retry_count--)
- goto again;
-
- err = -EIO;
- dev_err(pd->dev, "Polling timed out\n");
+ err = -ETIMEDOUT;
break;
}
- /* handle missing acknowledge and arbitration lost */
- if ((val | pd->sr) & (ICSR_TACK | ICSR_AL)) {
- err = -EIO;
+ if (pd->send_stop)
+ err = poll_busy(pd);
+ else
+ err = poll_dte(pd);
+ if (err < 0)
break;
- }
}
deactivate_ch(pd);
@@ -566,7 +610,7 @@ again:
static u32 sh_mobile_i2c_func(struct i2c_adapter *adapter)
{
- return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL;
+ return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL | I2C_FUNC_PROTOCOL_MANGLING;
}
static struct i2c_algorithm sh_mobile_i2c_algorithm = {
diff --git a/drivers/i2c/busses/i2c-sis630.c b/drivers/i2c/busses/i2c-sis630.c
index de6dddb9f865..36a9556d7cfa 100644
--- a/drivers/i2c/busses/i2c-sis630.c
+++ b/drivers/i2c/busses/i2c-sis630.c
@@ -17,30 +17,25 @@
*/
/*
- Changes:
- 24.08.2002
- Fixed the typo in sis630_access (Thanks to Mark M. Hoffman)
- Changed sis630_transaction.(Thanks to Mark M. Hoffman)
- 18.09.2002
- Added SIS730 as supported.
- 21.09.2002
- Added high_clock module option.If this option is set
- used Host Master Clock 56KHz (default 14KHz).For now we save old Host
- Master Clock and after transaction completed restore (otherwise
- it's confuse BIOS and hung Machine).
- 24.09.2002
- Fixed typo in sis630_access
- Fixed logical error by restoring of Host Master Clock
- 31.07.2003
- Added block data read/write support.
-*/
-
-/*
Status: beta
Supports:
SIS 630
SIS 730
+ SIS 964
+
+ Notable differences between chips:
+ +------------------------+--------------------+-------------------+
+ | | SIS630/730 | SIS964 |
+ +------------------------+--------------------+-------------------+
+ | Clock | 14kHz/56kHz | 55.56kHz/27.78kHz |
+ | SMBus registers offset | 0x80 | 0xE0 |
+ | SMB_CNT | Bit 1 = Slave Busy | Bit 1 = Bus probe |
+ | (not used yet) | Bit 3 is reserved | Bit 3 = Last byte |
+ | SMB_PCOUNT | Offset + 0x06 | Offset + 0x14 |
+ | SMB_COUNT | 4:0 bits | 5:0 bits |
+ +------------------------+--------------------+-------------------+
+ (Other differences don't affect the functions provided by the driver)
Note: we assume there can only be one device, with one SMBus interface.
*/
@@ -55,22 +50,36 @@
#include <linux/acpi.h>
#include <linux/io.h>
-/* SIS630 SMBus registers */
-#define SMB_STS 0x80 /* status */
-#define SMB_EN 0x81 /* status enable */
-#define SMB_CNT 0x82
-#define SMBHOST_CNT 0x83
-#define SMB_ADDR 0x84
-#define SMB_CMD 0x85
-#define SMB_PCOUNT 0x86 /* processed count */
-#define SMB_COUNT 0x87
-#define SMB_BYTE 0x88 /* ~0x8F data byte field */
-#define SMBDEV_ADDR 0x90
-#define SMB_DB0 0x91
-#define SMB_DB1 0x92
-#define SMB_SAA 0x93
-
-/* register count for request_region */
+/* SIS964 id is defined here as we are the only file using it */
+#define PCI_DEVICE_ID_SI_964 0x0964
+
+/* SIS630/730/964 SMBus registers */
+#define SMB_STS 0x00 /* status */
+#define SMB_CNT 0x02 /* control */
+#define SMBHOST_CNT 0x03 /* host control */
+#define SMB_ADDR 0x04 /* address */
+#define SMB_CMD 0x05 /* command */
+#define SMB_COUNT 0x07 /* byte count */
+#define SMB_BYTE 0x08 /* ~0x8F data byte field */
+
+/* SMB_STS register */
+#define BYTE_DONE_STS 0x10 /* Byte Done Status / Block Array */
+#define SMBCOL_STS 0x04 /* Collision */
+#define SMBERR_STS 0x02 /* Device error */
+
+/* SMB_CNT register */
+#define MSTO_EN 0x40 /* Host Master Timeout Enable */
+#define SMBCLK_SEL 0x20 /* Host master clock selection */
+#define SMB_PROBE 0x02 /* Bus Probe/Slave busy */
+#define SMB_HOSTBUSY 0x01 /* Host Busy */
+
+/* SMBHOST_CNT register */
+#define SMB_KILL 0x20 /* Kill */
+#define SMB_START 0x10 /* Start */
+
+/* register count for request_region
+ * As we don't use SMB_PCOUNT, 20 is ok for SiS630 and SiS964
+ */
#define SIS630_SMB_IOREGION 20
/* PCI address constants */
@@ -96,65 +105,71 @@ static struct pci_driver sis630_driver;
static bool high_clock;
static bool force;
module_param(high_clock, bool, 0);
-MODULE_PARM_DESC(high_clock, "Set Host Master Clock to 56KHz (default 14KHz).");
+MODULE_PARM_DESC(high_clock,
+ "Set Host Master Clock to 56KHz (default 14KHz) (SIS630/730 only).");
module_param(force, bool, 0);
MODULE_PARM_DESC(force, "Forcibly enable the SIS630. DANGEROUS!");
-/* acpi base address */
-static unsigned short acpi_base;
+/* SMBus base adress */
+static unsigned short smbus_base;
/* supported chips */
static int supported[] = {
PCI_DEVICE_ID_SI_630,
PCI_DEVICE_ID_SI_730,
+ PCI_DEVICE_ID_SI_760,
0 /* terminates the list */
};
static inline u8 sis630_read(u8 reg)
{
- return inb(acpi_base + reg);
+ return inb(smbus_base + reg);
}
static inline void sis630_write(u8 reg, u8 data)
{
- outb(data, acpi_base + reg);
+ outb(data, smbus_base + reg);
}
-static int sis630_transaction_start(struct i2c_adapter *adap, int size, u8 *oldclock)
+static int sis630_transaction_start(struct i2c_adapter *adap, int size,
+ u8 *oldclock)
{
- int temp;
+ int temp;
/* Make sure the SMBus host is ready to start transmitting. */
- if ((temp = sis630_read(SMB_CNT) & 0x03) != 0x00) {
- dev_dbg(&adap->dev, "SMBus busy (%02x).Resetting...\n",temp);
+ temp = sis630_read(SMB_CNT);
+ if ((temp & (SMB_PROBE | SMB_HOSTBUSY)) != 0x00) {
+ dev_dbg(&adap->dev, "SMBus busy (%02x). Resetting...\n", temp);
/* kill smbus transaction */
- sis630_write(SMBHOST_CNT, 0x20);
+ sis630_write(SMBHOST_CNT, SMB_KILL);
- if ((temp = sis630_read(SMB_CNT) & 0x03) != 0x00) {
+ temp = sis630_read(SMB_CNT);
+ if (temp & (SMB_PROBE | SMB_HOSTBUSY)) {
dev_dbg(&adap->dev, "Failed! (%02x)\n", temp);
return -EBUSY;
- } else {
+ } else {
dev_dbg(&adap->dev, "Successful!\n");
}
- }
+ }
/* save old clock, so we can prevent machine for hung */
*oldclock = sis630_read(SMB_CNT);
dev_dbg(&adap->dev, "saved clock 0x%02x\n", *oldclock);
- /* disable timeout interrupt , set Host Master Clock to 56KHz if requested */
+ /* disable timeout interrupt,
+ * set Host Master Clock to 56KHz if requested */
if (high_clock)
- sis630_write(SMB_CNT, 0x20);
+ sis630_write(SMB_CNT, SMBCLK_SEL);
else
- sis630_write(SMB_CNT, (*oldclock & ~0x40));
+ sis630_write(SMB_CNT, (*oldclock & ~MSTO_EN));
/* clear all sticky bits */
temp = sis630_read(SMB_STS);
sis630_write(SMB_STS, temp & 0x1e);
/* start the transaction by setting bit 4 and size */
- sis630_write(SMBHOST_CNT,0x10 | (size & 0x07));
+ sis630_write(SMBHOST_CNT, SMB_START | (size & 0x07));
return 0;
}
@@ -168,7 +183,7 @@ static int sis630_transaction_wait(struct i2c_adapter *adap, int size)
msleep(1);
temp = sis630_read(SMB_STS);
/* check if block transmitted */
- if (size == SIS630_BLOCK_DATA && (temp & 0x10))
+ if (size == SIS630_BLOCK_DATA && (temp & BYTE_DONE_STS))
break;
} while (!(temp & 0x0e) && (timeout++ < MAX_TIMEOUT));
@@ -178,19 +193,14 @@ static int sis630_transaction_wait(struct i2c_adapter *adap, int size)
result = -ETIMEDOUT;
}
- if (temp & 0x02) {
+ if (temp & SMBERR_STS) {
dev_dbg(&adap->dev, "Error: Failed bus transaction\n");
result = -ENXIO;
}
- if (temp & 0x04) {
+ if (temp & SMBCOL_STS) {
dev_err(&adap->dev, "Bus collision!\n");
- result = -EIO;
- /*
- TBD: Datasheet say:
- the software should clear this bit and restart SMBUS operation.
- Should we do it or user start request again?
- */
+ result = -EAGAIN;
}
return result;
@@ -198,21 +208,21 @@ static int sis630_transaction_wait(struct i2c_adapter *adap, int size)
static void sis630_transaction_end(struct i2c_adapter *adap, u8 oldclock)
{
- int temp = 0;
-
/* clear all status "sticky" bits */
- sis630_write(SMB_STS, temp);
+ sis630_write(SMB_STS, 0xFF);
- dev_dbg(&adap->dev, "SMB_CNT before clock restore 0x%02x\n", sis630_read(SMB_CNT));
+ dev_dbg(&adap->dev,
+ "SMB_CNT before clock restore 0x%02x\n", sis630_read(SMB_CNT));
/*
* restore old Host Master Clock if high_clock is set
* and oldclock was not 56KHz
*/
- if (high_clock && !(oldclock & 0x20))
- sis630_write(SMB_CNT,(sis630_read(SMB_CNT) & ~0x20));
+ if (high_clock && !(oldclock & SMBCLK_SEL))
+ sis630_write(SMB_CNT, sis630_read(SMB_CNT) & ~SMBCLK_SEL);
- dev_dbg(&adap->dev, "SMB_CNT after clock restore 0x%02x\n", sis630_read(SMB_CNT));
+ dev_dbg(&adap->dev,
+ "SMB_CNT after clock restore 0x%02x\n", sis630_read(SMB_CNT));
}
static int sis630_transaction(struct i2c_adapter *adap, int size)
@@ -229,7 +239,8 @@ static int sis630_transaction(struct i2c_adapter *adap, int size)
return result;
}
-static int sis630_block_data(struct i2c_adapter *adap, union i2c_smbus_data *data, int read_write)
+static int sis630_block_data(struct i2c_adapter *adap,
+ union i2c_smbus_data *data, int read_write)
{
int i, len = 0, rc = 0;
u8 oldclock = 0;
@@ -241,39 +252,43 @@ static int sis630_block_data(struct i2c_adapter *adap, union i2c_smbus_data *dat
else if (len > 32)
len = 32;
sis630_write(SMB_COUNT, len);
- for (i=1; i <= len; i++) {
- dev_dbg(&adap->dev, "set data 0x%02x\n", data->block[i]);
+ for (i = 1; i <= len; i++) {
+ dev_dbg(&adap->dev,
+ "set data 0x%02x\n", data->block[i]);
/* set data */
- sis630_write(SMB_BYTE+(i-1)%8, data->block[i]);
- if (i==8 || (len<8 && i==len)) {
- dev_dbg(&adap->dev, "start trans len=%d i=%d\n",len ,i);
+ sis630_write(SMB_BYTE + (i - 1) % 8, data->block[i]);
+ if (i == 8 || (len < 8 && i == len)) {
+ dev_dbg(&adap->dev,
+ "start trans len=%d i=%d\n", len, i);
/* first transaction */
rc = sis630_transaction_start(adap,
SIS630_BLOCK_DATA, &oldclock);
if (rc)
return rc;
- }
- else if ((i-1)%8 == 7 || i==len) {
- dev_dbg(&adap->dev, "trans_wait len=%d i=%d\n",len,i);
- if (i>8) {
- dev_dbg(&adap->dev, "clear smbary_sts len=%d i=%d\n",len,i);
+ } else if ((i - 1) % 8 == 7 || i == len) {
+ dev_dbg(&adap->dev,
+ "trans_wait len=%d i=%d\n", len, i);
+ if (i > 8) {
+ dev_dbg(&adap->dev,
+ "clear smbary_sts"
+ " len=%d i=%d\n", len, i);
/*
If this is not first transaction,
we must clear sticky bit.
clear SMBARY_STS
*/
- sis630_write(SMB_STS,0x10);
+ sis630_write(SMB_STS, BYTE_DONE_STS);
}
rc = sis630_transaction_wait(adap,
SIS630_BLOCK_DATA);
if (rc) {
- dev_dbg(&adap->dev, "trans_wait failed\n");
+ dev_dbg(&adap->dev,
+ "trans_wait failed\n");
break;
}
}
}
- }
- else {
+ } else {
/* read request */
data->block[0] = len = 0;
rc = sis630_transaction_start(adap,
@@ -294,18 +309,22 @@ static int sis630_block_data(struct i2c_adapter *adap, union i2c_smbus_data *dat
if (data->block[0] > 32)
data->block[0] = 32;
- dev_dbg(&adap->dev, "block data read len=0x%x\n", data->block[0]);
+ dev_dbg(&adap->dev,
+ "block data read len=0x%x\n", data->block[0]);
- for (i=0; i < 8 && len < data->block[0]; i++,len++) {
- dev_dbg(&adap->dev, "read i=%d len=%d\n", i, len);
- data->block[len+1] = sis630_read(SMB_BYTE+i);
+ for (i = 0; i < 8 && len < data->block[0]; i++, len++) {
+ dev_dbg(&adap->dev,
+ "read i=%d len=%d\n", i, len);
+ data->block[len + 1] = sis630_read(SMB_BYTE +
+ i);
}
- dev_dbg(&adap->dev, "clear smbary_sts len=%d i=%d\n",len,i);
+ dev_dbg(&adap->dev,
+ "clear smbary_sts len=%d i=%d\n", len, i);
/* clear SMBARY_STS */
- sis630_write(SMB_STS,0x10);
- } while(len < data->block[0]);
+ sis630_write(SMB_STS, BYTE_DONE_STS);
+ } while (len < data->block[0]);
}
sis630_transaction_end(adap, oldclock);
@@ -321,42 +340,47 @@ static s32 sis630_access(struct i2c_adapter *adap, u16 addr,
int status;
switch (size) {
- case I2C_SMBUS_QUICK:
- sis630_write(SMB_ADDR, ((addr & 0x7f) << 1) | (read_write & 0x01));
- size = SIS630_QUICK;
- break;
- case I2C_SMBUS_BYTE:
- sis630_write(SMB_ADDR, ((addr & 0x7f) << 1) | (read_write & 0x01));
- if (read_write == I2C_SMBUS_WRITE)
- sis630_write(SMB_CMD, command);
- size = SIS630_BYTE;
- break;
- case I2C_SMBUS_BYTE_DATA:
- sis630_write(SMB_ADDR, ((addr & 0x7f) << 1) | (read_write & 0x01));
- sis630_write(SMB_CMD, command);
- if (read_write == I2C_SMBUS_WRITE)
- sis630_write(SMB_BYTE, data->byte);
- size = SIS630_BYTE_DATA;
- break;
- case I2C_SMBUS_PROC_CALL:
- case I2C_SMBUS_WORD_DATA:
- sis630_write(SMB_ADDR,((addr & 0x7f) << 1) | (read_write & 0x01));
+ case I2C_SMBUS_QUICK:
+ sis630_write(SMB_ADDR,
+ ((addr & 0x7f) << 1) | (read_write & 0x01));
+ size = SIS630_QUICK;
+ break;
+ case I2C_SMBUS_BYTE:
+ sis630_write(SMB_ADDR,
+ ((addr & 0x7f) << 1) | (read_write & 0x01));
+ if (read_write == I2C_SMBUS_WRITE)
sis630_write(SMB_CMD, command);
- if (read_write == I2C_SMBUS_WRITE) {
- sis630_write(SMB_BYTE, data->word & 0xff);
- sis630_write(SMB_BYTE + 1,(data->word & 0xff00) >> 8);
- }
- size = (size == I2C_SMBUS_PROC_CALL ? SIS630_PCALL : SIS630_WORD_DATA);
- break;
- case I2C_SMBUS_BLOCK_DATA:
- sis630_write(SMB_ADDR,((addr & 0x7f) << 1) | (read_write & 0x01));
- sis630_write(SMB_CMD, command);
- size = SIS630_BLOCK_DATA;
- return sis630_block_data(adap, data, read_write);
- default:
- dev_warn(&adap->dev, "Unsupported transaction %d\n",
- size);
- return -EOPNOTSUPP;
+ size = SIS630_BYTE;
+ break;
+ case I2C_SMBUS_BYTE_DATA:
+ sis630_write(SMB_ADDR,
+ ((addr & 0x7f) << 1) | (read_write & 0x01));
+ sis630_write(SMB_CMD, command);
+ if (read_write == I2C_SMBUS_WRITE)
+ sis630_write(SMB_BYTE, data->byte);
+ size = SIS630_BYTE_DATA;
+ break;
+ case I2C_SMBUS_PROC_CALL:
+ case I2C_SMBUS_WORD_DATA:
+ sis630_write(SMB_ADDR,
+ ((addr & 0x7f) << 1) | (read_write & 0x01));
+ sis630_write(SMB_CMD, command);
+ if (read_write == I2C_SMBUS_WRITE) {
+ sis630_write(SMB_BYTE, data->word & 0xff);
+ sis630_write(SMB_BYTE + 1, (data->word & 0xff00) >> 8);
+ }
+ size = (size == I2C_SMBUS_PROC_CALL ?
+ SIS630_PCALL : SIS630_WORD_DATA);
+ break;
+ case I2C_SMBUS_BLOCK_DATA:
+ sis630_write(SMB_ADDR,
+ ((addr & 0x7f) << 1) | (read_write & 0x01));
+ sis630_write(SMB_CMD, command);
+ size = SIS630_BLOCK_DATA;
+ return sis630_block_data(adap, data, read_write);
+ default:
+ dev_warn(&adap->dev, "Unsupported transaction %d\n", size);
+ return -EOPNOTSUPP;
}
status = sis630_transaction(adap, size);
@@ -368,15 +392,16 @@ static s32 sis630_access(struct i2c_adapter *adap, u16 addr,
return 0;
}
- switch(size) {
- case SIS630_BYTE:
- case SIS630_BYTE_DATA:
- data->byte = sis630_read(SMB_BYTE);
- break;
- case SIS630_PCALL:
- case SIS630_WORD_DATA:
- data->word = sis630_read(SMB_BYTE) + (sis630_read(SMB_BYTE + 1) << 8);
- break;
+ switch (size) {
+ case SIS630_BYTE:
+ case SIS630_BYTE_DATA:
+ data->byte = sis630_read(SMB_BYTE);
+ break;
+ case SIS630_PCALL:
+ case SIS630_WORD_DATA:
+ data->word = sis630_read(SMB_BYTE) +
+ (sis630_read(SMB_BYTE + 1) << 8);
+ break;
}
return 0;
@@ -384,9 +409,9 @@ static s32 sis630_access(struct i2c_adapter *adap, u16 addr,
static u32 sis630_func(struct i2c_adapter *adapter)
{
- return I2C_FUNC_SMBUS_QUICK | I2C_FUNC_SMBUS_BYTE | I2C_FUNC_SMBUS_BYTE_DATA |
- I2C_FUNC_SMBUS_WORD_DATA | I2C_FUNC_SMBUS_PROC_CALL |
- I2C_FUNC_SMBUS_BLOCK_DATA;
+ return I2C_FUNC_SMBUS_QUICK | I2C_FUNC_SMBUS_BYTE |
+ I2C_FUNC_SMBUS_BYTE_DATA | I2C_FUNC_SMBUS_WORD_DATA |
+ I2C_FUNC_SMBUS_PROC_CALL | I2C_FUNC_SMBUS_BLOCK_DATA;
}
static int sis630_setup(struct pci_dev *sis630_dev)
@@ -394,21 +419,23 @@ static int sis630_setup(struct pci_dev *sis630_dev)
unsigned char b;
struct pci_dev *dummy = NULL;
int retval, i;
+ /* acpi base address */
+ unsigned short acpi_base;
/* check for supported SiS devices */
- for (i=0; supported[i] > 0 ; i++) {
- if ((dummy = pci_get_device(PCI_VENDOR_ID_SI, supported[i], dummy)))
+ for (i = 0; supported[i] > 0; i++) {
+ dummy = pci_get_device(PCI_VENDOR_ID_SI, supported[i], dummy);
+ if (dummy)
break; /* found */
}
if (dummy) {
pci_dev_put(dummy);
- }
- else if (force) {
- dev_err(&sis630_dev->dev, "WARNING: Can't detect SIS630 compatible device, but "
+ } else if (force) {
+ dev_err(&sis630_dev->dev,
+ "WARNING: Can't detect SIS630 compatible device, but "
"loading because of force option enabled\n");
- }
- else {
+ } else {
return -ENODEV;
}
@@ -416,7 +443,7 @@ static int sis630_setup(struct pci_dev *sis630_dev)
Enable ACPI first , so we can accsess reg 74-75
in acpi io space and read acpi base addr
*/
- if (pci_read_config_byte(sis630_dev, SIS630_BIOS_CTL_REG,&b)) {
+ if (pci_read_config_byte(sis630_dev, SIS630_BIOS_CTL_REG, &b)) {
dev_err(&sis630_dev->dev, "Error: Can't read bios ctl reg\n");
retval = -ENODEV;
goto exit;
@@ -430,24 +457,35 @@ static int sis630_setup(struct pci_dev *sis630_dev)
}
/* Determine the ACPI base address */
- if (pci_read_config_word(sis630_dev,SIS630_ACPI_BASE_REG,&acpi_base)) {
- dev_err(&sis630_dev->dev, "Error: Can't determine ACPI base address\n");
+ if (pci_read_config_word(sis630_dev,
+ SIS630_ACPI_BASE_REG, &acpi_base)) {
+ dev_err(&sis630_dev->dev,
+ "Error: Can't determine ACPI base address\n");
retval = -ENODEV;
goto exit;
}
- dev_dbg(&sis630_dev->dev, "ACPI base at 0x%04x\n", acpi_base);
+ dev_dbg(&sis630_dev->dev, "ACPI base at 0x%04hx\n", acpi_base);
+
+ if (supported[i] == PCI_DEVICE_ID_SI_760)
+ smbus_base = acpi_base + 0xE0;
+ else
+ smbus_base = acpi_base + 0x80;
+
+ dev_dbg(&sis630_dev->dev, "SMBus base at 0x%04hx\n", smbus_base);
- retval = acpi_check_region(acpi_base + SMB_STS, SIS630_SMB_IOREGION,
+ retval = acpi_check_region(smbus_base + SMB_STS, SIS630_SMB_IOREGION,
sis630_driver.name);
if (retval)
goto exit;
/* Everything is happy, let's grab the memory and set things up. */
- if (!request_region(acpi_base + SMB_STS, SIS630_SMB_IOREGION,
+ if (!request_region(smbus_base + SMB_STS, SIS630_SMB_IOREGION,
sis630_driver.name)) {
- dev_err(&sis630_dev->dev, "SMBus registers 0x%04x-0x%04x already "
- "in use!\n", acpi_base + SMB_STS, acpi_base + SMB_SAA);
+ dev_err(&sis630_dev->dev,
+ "I/O Region 0x%04hx-0x%04hx for SMBus already in use.\n",
+ smbus_base + SMB_STS,
+ smbus_base + SMB_STS + SIS630_SMB_IOREGION - 1);
retval = -EBUSY;
goto exit;
}
@@ -456,7 +494,7 @@ static int sis630_setup(struct pci_dev *sis630_dev)
exit:
if (retval)
- acpi_base = 0;
+ smbus_base = 0;
return retval;
}
@@ -470,20 +508,24 @@ static struct i2c_adapter sis630_adapter = {
.owner = THIS_MODULE,
.class = I2C_CLASS_HWMON | I2C_CLASS_SPD,
.algo = &smbus_algorithm,
+ .retries = 3
};
static DEFINE_PCI_DEVICE_TABLE(sis630_ids) = {
{ PCI_DEVICE(PCI_VENDOR_ID_SI, PCI_DEVICE_ID_SI_503) },
{ PCI_DEVICE(PCI_VENDOR_ID_SI, PCI_DEVICE_ID_SI_LPC) },
+ { PCI_DEVICE(PCI_VENDOR_ID_SI, PCI_DEVICE_ID_SI_964) },
{ 0, }
};
-MODULE_DEVICE_TABLE (pci, sis630_ids);
+MODULE_DEVICE_TABLE(pci, sis630_ids);
static int sis630_probe(struct pci_dev *dev, const struct pci_device_id *id)
{
if (sis630_setup(dev)) {
- dev_err(&dev->dev, "SIS630 comp. bus not detected, module not inserted.\n");
+ dev_err(&dev->dev,
+ "SIS630 compatible bus not detected, "
+ "module not inserted.\n");
return -ENODEV;
}
@@ -491,17 +533,17 @@ static int sis630_probe(struct pci_dev *dev, const struct pci_device_id *id)
sis630_adapter.dev.parent = &dev->dev;
snprintf(sis630_adapter.name, sizeof(sis630_adapter.name),
- "SMBus SIS630 adapter at %04x", acpi_base + SMB_STS);
+ "SMBus SIS630 adapter at %04hx", smbus_base + SMB_STS);
return i2c_add_adapter(&sis630_adapter);
}
static void sis630_remove(struct pci_dev *dev)
{
- if (acpi_base) {
+ if (smbus_base) {
i2c_del_adapter(&sis630_adapter);
- release_region(acpi_base + SMB_STS, SIS630_SMB_IOREGION);
- acpi_base = 0;
+ release_region(smbus_base + SMB_STS, SIS630_SMB_IOREGION);
+ smbus_base = 0;
}
}
diff --git a/drivers/i2c/busses/i2c-stu300.c b/drivers/i2c/busses/i2c-stu300.c
index 60195b590637..0a6f941133f6 100644
--- a/drivers/i2c/busses/i2c-stu300.c
+++ b/drivers/i2c/busses/i2c-stu300.c
@@ -975,7 +975,6 @@ stu300_remove(struct platform_device *pdev)
i2c_del_adapter(&dev->adapter);
/* Turn off everything */
stu300_wr8(0x00, dev->virtbase + I2C_CR);
- platform_set_drvdata(pdev, NULL);
return 0;
}
diff --git a/drivers/i2c/busses/i2c-taos-evm.c b/drivers/i2c/busses/i2c-taos-evm.c
index 26c352a09298..6ffa56e08517 100644
--- a/drivers/i2c/busses/i2c-taos-evm.c
+++ b/drivers/i2c/busses/i2c-taos-evm.c
@@ -271,7 +271,6 @@ static int taos_connect(struct serio *serio, struct serio_driver *drv)
exit_close:
serio_close(serio);
exit_kfree:
- serio_set_drvdata(serio, NULL);
kfree(taos);
exit:
return err;
@@ -285,7 +284,6 @@ static void taos_disconnect(struct serio *serio)
i2c_unregister_device(taos->client);
i2c_del_adapter(&taos->adapter);
serio_close(serio);
- serio_set_drvdata(serio, NULL);
kfree(taos);
dev_info(&serio->dev, "Disconnected from TAOS EVM\n");
diff --git a/drivers/i2c/busses/i2c-tegra.c b/drivers/i2c/busses/i2c-tegra.c
index 1fb30099dac4..36704e3ab3fa 100644
--- a/drivers/i2c/busses/i2c-tegra.c
+++ b/drivers/i2c/busses/i2c-tegra.c
@@ -29,11 +29,10 @@
#include <linux/of_i2c.h>
#include <linux/of_device.h>
#include <linux/module.h>
+#include <linux/clk/tegra.h>
#include <asm/unaligned.h>
-#include <mach/clk.h>
-
#define TEGRA_I2C_TIMEOUT (msecs_to_jiffies(1000))
#define BYTES_PER_FIFO_WORD 4
@@ -71,6 +70,8 @@
#define I2C_INT_TX_FIFO_DATA_REQ (1<<1)
#define I2C_INT_RX_FIFO_DATA_REQ (1<<0)
#define I2C_CLK_DIVISOR 0x06c
+#define I2C_CLK_DIVISOR_STD_FAST_MODE_SHIFT 16
+#define I2C_CLK_MULTIPLIER_STD_FAST_MODE 8
#define DVC_CTRL_REG1 0x000
#define DVC_CTRL_REG1_INTR_EN (1<<10)
@@ -117,10 +118,23 @@ enum msg_end_type {
/**
* struct tegra_i2c_hw_feature : Different HW support on Tegra
* @has_continue_xfer_support: Continue transfer supports.
+ * @has_per_pkt_xfer_complete_irq: Has enable/disable capability for transfer
+ * complete interrupt per packet basis.
+ * @has_single_clk_source: The i2c controller has single clock source. Tegra30
+ * and earlier Socs has two clock sources i.e. div-clk and
+ * fast-clk.
+ * @clk_divisor_hs_mode: Clock divisor in HS mode.
+ * @clk_divisor_std_fast_mode: Clock divisor in standard/fast mode. It is
+ * applicable if there is no fast clock source i.e. single clock
+ * source.
*/
struct tegra_i2c_hw_feature {
bool has_continue_xfer_support;
+ bool has_per_pkt_xfer_complete_irq;
+ bool has_single_clk_source;
+ int clk_divisor_hs_mode;
+ int clk_divisor_std_fast_mode;
};
/**
@@ -366,11 +380,13 @@ static void tegra_dvc_init(struct tegra_i2c_dev *i2c_dev)
static inline int tegra_i2c_clock_enable(struct tegra_i2c_dev *i2c_dev)
{
int ret;
- ret = clk_prepare_enable(i2c_dev->fast_clk);
- if (ret < 0) {
- dev_err(i2c_dev->dev,
- "Enabling fast clk failed, err %d\n", ret);
- return ret;
+ if (!i2c_dev->hw->has_single_clk_source) {
+ ret = clk_prepare_enable(i2c_dev->fast_clk);
+ if (ret < 0) {
+ dev_err(i2c_dev->dev,
+ "Enabling fast clk failed, err %d\n", ret);
+ return ret;
+ }
}
ret = clk_prepare_enable(i2c_dev->div_clk);
if (ret < 0) {
@@ -384,13 +400,16 @@ static inline int tegra_i2c_clock_enable(struct tegra_i2c_dev *i2c_dev)
static inline void tegra_i2c_clock_disable(struct tegra_i2c_dev *i2c_dev)
{
clk_disable_unprepare(i2c_dev->div_clk);
- clk_disable_unprepare(i2c_dev->fast_clk);
+ if (!i2c_dev->hw->has_single_clk_source)
+ clk_disable_unprepare(i2c_dev->fast_clk);
}
static int tegra_i2c_init(struct tegra_i2c_dev *i2c_dev)
{
u32 val;
int err = 0;
+ int clk_multiplier = I2C_CLK_MULTIPLIER_STD_FAST_MODE;
+ u32 clk_divisor;
tegra_i2c_clock_enable(i2c_dev);
@@ -405,7 +424,15 @@ static int tegra_i2c_init(struct tegra_i2c_dev *i2c_dev)
(0x2 << I2C_CNFG_DEBOUNCE_CNT_SHIFT);
i2c_writel(i2c_dev, val, I2C_CNFG);
i2c_writel(i2c_dev, 0, I2C_INT_MASK);
- clk_set_rate(i2c_dev->div_clk, i2c_dev->bus_clk_rate * 8);
+
+ clk_multiplier *= (i2c_dev->hw->clk_divisor_std_fast_mode + 1);
+ clk_set_rate(i2c_dev->div_clk, i2c_dev->bus_clk_rate * clk_multiplier);
+
+ /* Make sure clock divisor programmed correctly */
+ clk_divisor = i2c_dev->hw->clk_divisor_hs_mode;
+ clk_divisor |= i2c_dev->hw->clk_divisor_std_fast_mode <<
+ I2C_CLK_DIVISOR_STD_FAST_MODE_SHIFT;
+ i2c_writel(i2c_dev, clk_divisor, I2C_CLK_DIVISOR);
if (!i2c_dev->is_dvc) {
u32 sl_cfg = i2c_readl(i2c_dev, I2C_SL_CNFG);
@@ -547,6 +574,8 @@ static int tegra_i2c_xfer_msg(struct tegra_i2c_dev *i2c_dev,
tegra_i2c_fill_tx_fifo(i2c_dev);
int_mask = I2C_INT_NO_ACK | I2C_INT_ARBITRATION_LOST;
+ if (i2c_dev->hw->has_per_pkt_xfer_complete_irq)
+ int_mask |= I2C_INT_PACKET_XFER_COMPLETE;
if (msg->flags & I2C_M_RD)
int_mask |= I2C_INT_RX_FIFO_DATA_REQ;
else if (i2c_dev->msg_buf_remaining)
@@ -558,7 +587,7 @@ static int tegra_i2c_xfer_msg(struct tegra_i2c_dev *i2c_dev,
ret = wait_for_completion_timeout(&i2c_dev->msg_complete, TEGRA_I2C_TIMEOUT);
tegra_i2c_mask_irq(i2c_dev, int_mask);
- if (WARN_ON(ret == 0)) {
+ if (ret == 0) {
dev_err(i2c_dev->dev, "i2c transfer timed out\n");
tegra_i2c_init(i2c_dev);
@@ -634,15 +663,32 @@ static const struct i2c_algorithm tegra_i2c_algo = {
static const struct tegra_i2c_hw_feature tegra20_i2c_hw = {
.has_continue_xfer_support = false,
+ .has_per_pkt_xfer_complete_irq = false,
+ .has_single_clk_source = false,
+ .clk_divisor_hs_mode = 3,
+ .clk_divisor_std_fast_mode = 0,
};
static const struct tegra_i2c_hw_feature tegra30_i2c_hw = {
.has_continue_xfer_support = true,
+ .has_per_pkt_xfer_complete_irq = false,
+ .has_single_clk_source = false,
+ .clk_divisor_hs_mode = 3,
+ .clk_divisor_std_fast_mode = 0,
+};
+
+static const struct tegra_i2c_hw_feature tegra114_i2c_hw = {
+ .has_continue_xfer_support = true,
+ .has_per_pkt_xfer_complete_irq = true,
+ .has_single_clk_source = true,
+ .clk_divisor_hs_mode = 1,
+ .clk_divisor_std_fast_mode = 0x19,
};
#if defined(CONFIG_OF)
/* Match table for of_platform binding */
static const struct of_device_id tegra_i2c_of_match[] = {
+ { .compatible = "nvidia,tegra114-i2c", .data = &tegra114_i2c_hw, },
{ .compatible = "nvidia,tegra30-i2c", .data = &tegra30_i2c_hw, },
{ .compatible = "nvidia,tegra20-i2c", .data = &tegra20_i2c_hw, },
{ .compatible = "nvidia,tegra20-i2c-dvc", .data = &tegra20_i2c_hw, },
@@ -686,12 +732,6 @@ static int tegra_i2c_probe(struct platform_device *pdev)
return PTR_ERR(div_clk);
}
- fast_clk = devm_clk_get(&pdev->dev, "fast-clk");
- if (IS_ERR(fast_clk)) {
- dev_err(&pdev->dev, "missing bus clock");
- return PTR_ERR(fast_clk);
- }
-
i2c_dev = devm_kzalloc(&pdev->dev, sizeof(*i2c_dev), GFP_KERNEL);
if (!i2c_dev) {
dev_err(&pdev->dev, "Could not allocate struct tegra_i2c_dev");
@@ -700,7 +740,6 @@ static int tegra_i2c_probe(struct platform_device *pdev)
i2c_dev->base = base;
i2c_dev->div_clk = div_clk;
- i2c_dev->fast_clk = fast_clk;
i2c_dev->adapter.algo = &tegra_i2c_algo;
i2c_dev->irq = irq;
i2c_dev->cont_id = pdev->id;
@@ -731,6 +770,15 @@ static int tegra_i2c_probe(struct platform_device *pdev)
}
init_completion(&i2c_dev->msg_complete);
+ if (!i2c_dev->hw->has_single_clk_source) {
+ fast_clk = devm_clk_get(&pdev->dev, "fast-clk");
+ if (IS_ERR(fast_clk)) {
+ dev_err(&pdev->dev, "missing fast clock");
+ return PTR_ERR(fast_clk);
+ }
+ i2c_dev->fast_clk = fast_clk;
+ }
+
platform_set_drvdata(pdev, i2c_dev);
ret = tegra_i2c_init(i2c_dev);
diff --git a/drivers/i2c/busses/i2c-versatile.c b/drivers/i2c/busses/i2c-versatile.c
index eec20db6246f..f3a8790a07e8 100644
--- a/drivers/i2c/busses/i2c-versatile.c
+++ b/drivers/i2c/busses/i2c-versatile.c
@@ -125,8 +125,6 @@ static int i2c_versatile_remove(struct platform_device *dev)
{
struct i2c_versatile *i2c = platform_get_drvdata(dev);
- platform_set_drvdata(dev, NULL);
-
i2c_del_adapter(&i2c->adap);
return 0;
}
diff --git a/drivers/i2c/busses/i2c-xiic.c b/drivers/i2c/busses/i2c-xiic.c
index f042f6da0ace..332c720fb3fe 100644
--- a/drivers/i2c/busses/i2c-xiic.c
+++ b/drivers/i2c/busses/i2c-xiic.c
@@ -784,8 +784,6 @@ static int xiic_i2c_remove(struct platform_device *pdev)
xiic_deinit(i2c);
- platform_set_drvdata(pdev, NULL);
-
free_irq(platform_get_irq(pdev, 0), i2c);
iounmap(i2c->base);
diff --git a/drivers/i2c/busses/i2c-xlr.c b/drivers/i2c/busses/i2c-xlr.c
index 93f029e98c0d..7945b05d3ea0 100644
--- a/drivers/i2c/busses/i2c-xlr.c
+++ b/drivers/i2c/busses/i2c-xlr.c
@@ -256,7 +256,6 @@ static int xlr_i2c_remove(struct platform_device *pdev)
priv = platform_get_drvdata(pdev);
i2c_del_adapter(&priv->adap);
- platform_set_drvdata(pdev, NULL);
return 0;
}
diff --git a/drivers/i2c/busses/scx200_acb.c b/drivers/i2c/busses/scx200_acb.c
index 3862a953239c..2d1d2c5653fb 100644
--- a/drivers/i2c/busses/scx200_acb.c
+++ b/drivers/i2c/busses/scx200_acb.c
@@ -542,7 +542,6 @@ static int scx200_remove(struct platform_device *pdev)
struct scx200_acb_iface *iface;
iface = platform_get_drvdata(pdev);
- platform_set_drvdata(pdev, NULL);
scx200_cleanup_iface(iface);
return 0;
diff --git a/drivers/i2c/i2c-core.c b/drivers/i2c/i2c-core.c
index e388590b44ab..991d38daa87d 100644
--- a/drivers/i2c/i2c-core.c
+++ b/drivers/i2c/i2c-core.c
@@ -935,25 +935,17 @@ out_list:
*/
int i2c_add_adapter(struct i2c_adapter *adapter)
{
- int id, res = 0;
-
-retry:
- if (idr_pre_get(&i2c_adapter_idr, GFP_KERNEL) == 0)
- return -ENOMEM;
+ int id;
mutex_lock(&core_lock);
- /* "above" here means "above or equal to", sigh */
- res = idr_get_new_above(&i2c_adapter_idr, adapter,
- __i2c_first_dynamic_bus_num, &id);
+ id = idr_alloc(&i2c_adapter_idr, adapter,
+ __i2c_first_dynamic_bus_num, 0, GFP_KERNEL);
mutex_unlock(&core_lock);
-
- if (res < 0) {
- if (res == -EAGAIN)
- goto retry;
- return res;
- }
+ if (id < 0)
+ return id;
adapter->nr = id;
+
return i2c_register_adapter(adapter);
}
EXPORT_SYMBOL(i2c_add_adapter);
@@ -984,33 +976,17 @@ EXPORT_SYMBOL(i2c_add_adapter);
int i2c_add_numbered_adapter(struct i2c_adapter *adap)
{
int id;
- int status;
if (adap->nr == -1) /* -1 means dynamically assign bus id */
return i2c_add_adapter(adap);
- if (adap->nr & ~MAX_IDR_MASK)
- return -EINVAL;
-
-retry:
- if (idr_pre_get(&i2c_adapter_idr, GFP_KERNEL) == 0)
- return -ENOMEM;
mutex_lock(&core_lock);
- /* "above" here means "above or equal to", sigh;
- * we need the "equal to" result to force the result
- */
- status = idr_get_new_above(&i2c_adapter_idr, adap, adap->nr, &id);
- if (status == 0 && id != adap->nr) {
- status = -EBUSY;
- idr_remove(&i2c_adapter_idr, id);
- }
+ id = idr_alloc(&i2c_adapter_idr, adap, adap->nr, adap->nr + 1,
+ GFP_KERNEL);
mutex_unlock(&core_lock);
- if (status == -EAGAIN)
- goto retry;
-
- if (status == 0)
- status = i2c_register_adapter(adap);
- return status;
+ if (id < 0)
+ return id == -ENOSPC ? -EBUSY : id;
+ return i2c_register_adapter(adap);
}
EXPORT_SYMBOL_GPL(i2c_add_numbered_adapter);
@@ -1866,29 +1842,6 @@ s32 i2c_smbus_write_word_data(const struct i2c_client *client, u8 command,
EXPORT_SYMBOL(i2c_smbus_write_word_data);
/**
- * i2c_smbus_process_call - SMBus "process call" protocol
- * @client: Handle to slave device
- * @command: Byte interpreted by slave
- * @value: 16-bit "word" being written
- *
- * This executes the SMBus "process call" protocol, returning negative errno
- * else a 16-bit unsigned "word" received from the device.
- */
-s32 i2c_smbus_process_call(const struct i2c_client *client, u8 command,
- u16 value)
-{
- union i2c_smbus_data data;
- int status;
- data.word = value;
-
- status = i2c_smbus_xfer(client->adapter, client->addr, client->flags,
- I2C_SMBUS_WRITE, command,
- I2C_SMBUS_PROC_CALL, &data);
- return (status < 0) ? status : data.word;
-}
-EXPORT_SYMBOL(i2c_smbus_process_call);
-
-/**
* i2c_smbus_read_block_data - SMBus "block read" protocol
* @client: Handle to slave device
* @command: Byte interpreted by slave
diff --git a/drivers/i2c/i2c-dev.c b/drivers/i2c/i2c-dev.c
index 5ec2261574ec..c3ccdea3d180 100644
--- a/drivers/i2c/i2c-dev.c
+++ b/drivers/i2c/i2c-dev.c
@@ -148,7 +148,7 @@ static ssize_t i2cdev_read(struct file *file, char __user *buf, size_t count,
return -ENOMEM;
pr_debug("i2c-dev: i2c-%d reading %zu bytes.\n",
- iminor(file->f_path.dentry->d_inode), count);
+ iminor(file_inode(file)), count);
ret = i2c_master_recv(client, tmp, count);
if (ret >= 0)
@@ -172,7 +172,7 @@ static ssize_t i2cdev_write(struct file *file, const char __user *buf,
return PTR_ERR(tmp);
pr_debug("i2c-dev: i2c-%d writing %zu bytes.\n",
- iminor(file->f_path.dentry->d_inode), count);
+ iminor(file_inode(file)), count);
ret = i2c_master_send(client, tmp, count);
kfree(tmp);
diff --git a/drivers/i2c/muxes/i2c-mux-gpio.c b/drivers/i2c/muxes/i2c-mux-gpio.c
index 9f50ef04a4bd..abc2e55aa243 100644
--- a/drivers/i2c/muxes/i2c-mux-gpio.c
+++ b/drivers/i2c/muxes/i2c-mux-gpio.c
@@ -250,7 +250,6 @@ static int i2c_mux_gpio_remove(struct platform_device *pdev)
for (i = 0; i < mux->data.n_gpios; i++)
gpio_free(mux->gpio_base + mux->data.gpios[i]);
- platform_set_drvdata(pdev, NULL);
i2c_put_adapter(mux->parent);
return 0;
diff --git a/drivers/ide/Kconfig b/drivers/ide/Kconfig
index 3c4417a1d438..02906ca99b41 100644
--- a/drivers/ide/Kconfig
+++ b/drivers/ide/Kconfig
@@ -700,11 +700,6 @@ config BLK_DEV_IDE_TX4939
depends on SOC_TX4939
select BLK_DEV_IDEDMA_SFF
-config BLK_DEV_IDE_AT91
- tristate "Atmel AT91 (SAM9, CAP9, AT572D940HF) IDE support"
- depends on ARM && ARCH_AT91 && !ARCH_AT91RM9200 && !ARCH_AT91X40
- select IDE_TIMINGS
-
config BLK_DEV_IDE_ICSIDE
tristate "ICS IDE interface support"
depends on ARM && ARCH_ACORN
diff --git a/drivers/ide/ide-proc.c b/drivers/ide/ide-proc.c
index a3133d7b2a0c..2abcc4790f12 100644
--- a/drivers/ide/ide-proc.c
+++ b/drivers/ide/ide-proc.c
@@ -333,7 +333,7 @@ static int ide_settings_proc_open(struct inode *inode, struct file *file)
static ssize_t ide_settings_proc_write(struct file *file, const char __user *buffer,
size_t count, loff_t *pos)
{
- ide_drive_t *drive = (ide_drive_t *) PDE(file->f_path.dentry->d_inode)->data;
+ ide_drive_t *drive = (ide_drive_t *) PDE(file_inode(file))->data;
char name[MAX_LEN + 1];
int for_real = 0, mul_factor, div_factor;
unsigned long n;
@@ -558,7 +558,7 @@ static int ide_replace_subdriver(ide_drive_t *drive, const char *driver)
static ssize_t ide_driver_proc_write(struct file *file, const char __user *buffer,
size_t count, loff_t *pos)
{
- ide_drive_t *drive = (ide_drive_t *) PDE(file->f_path.dentry->d_inode)->data;
+ ide_drive_t *drive = (ide_drive_t *) PDE(file_inode(file))->data;
char name[32];
if (!capable(CAP_SYS_ADMIN))
diff --git a/drivers/idle/i7300_idle.c b/drivers/idle/i7300_idle.c
index fa080ebd568f..ffeebc7e9f1c 100644
--- a/drivers/idle/i7300_idle.c
+++ b/drivers/idle/i7300_idle.c
@@ -75,7 +75,7 @@ static unsigned long past_skip;
static struct pci_dev *fbd_dev;
-static spinlock_t i7300_idle_lock;
+static raw_spinlock_t i7300_idle_lock;
static int i7300_idle_active;
static u8 i7300_idle_thrtctl_saved;
@@ -457,7 +457,7 @@ static int i7300_idle_notifier(struct notifier_block *nb, unsigned long val,
idle_begin_time = ktime_get();
}
- spin_lock_irqsave(&i7300_idle_lock, flags);
+ raw_spin_lock_irqsave(&i7300_idle_lock, flags);
if (val == IDLE_START) {
cpumask_set_cpu(smp_processor_id(), idle_cpumask);
@@ -506,7 +506,7 @@ static int i7300_idle_notifier(struct notifier_block *nb, unsigned long val,
}
}
end:
- spin_unlock_irqrestore(&i7300_idle_lock, flags);
+ raw_spin_unlock_irqrestore(&i7300_idle_lock, flags);
return 0;
}
@@ -548,7 +548,7 @@ struct debugfs_file_info {
static int __init i7300_idle_init(void)
{
- spin_lock_init(&i7300_idle_lock);
+ raw_spin_lock_init(&i7300_idle_lock);
total_us = 0;
if (i7300_idle_platform_probe(&fbd_dev, &ioat_dev, forceload))
diff --git a/drivers/infiniband/core/cm.c b/drivers/infiniband/core/cm.c
index 394fea2ba1bc..784b97cb05b0 100644
--- a/drivers/infiniband/core/cm.c
+++ b/drivers/infiniband/core/cm.c
@@ -382,20 +382,21 @@ static int cm_init_av_by_path(struct ib_sa_path_rec *path, struct cm_av *av)
static int cm_alloc_id(struct cm_id_private *cm_id_priv)
{
unsigned long flags;
- int ret, id;
+ int id;
static int next_id;
- do {
- spin_lock_irqsave(&cm.lock, flags);
- ret = idr_get_new_above(&cm.local_id_table, cm_id_priv,
- next_id, &id);
- if (!ret)
- next_id = ((unsigned) id + 1) & MAX_IDR_MASK;
- spin_unlock_irqrestore(&cm.lock, flags);
- } while( (ret == -EAGAIN) && idr_pre_get(&cm.local_id_table, GFP_KERNEL) );
+ idr_preload(GFP_KERNEL);
+ spin_lock_irqsave(&cm.lock, flags);
+
+ id = idr_alloc(&cm.local_id_table, cm_id_priv, next_id, 0, GFP_NOWAIT);
+ if (id >= 0)
+ next_id = max(id + 1, 0);
+
+ spin_unlock_irqrestore(&cm.lock, flags);
+ idr_preload_end();
cm_id_priv->id.local_id = (__force __be32)id ^ cm.random_id_operand;
- return ret;
+ return id < 0 ? id : 0;
}
static void cm_free_id(__be32 local_id)
@@ -3844,7 +3845,6 @@ static int __init ib_cm_init(void)
cm.remote_sidr_table = RB_ROOT;
idr_init(&cm.local_id_table);
get_random_bytes(&cm.random_id_operand, sizeof cm.random_id_operand);
- idr_pre_get(&cm.local_id_table, GFP_KERNEL);
INIT_LIST_HEAD(&cm.timewait_list);
ret = class_register(&cm_class);
diff --git a/drivers/infiniband/core/cma.c b/drivers/infiniband/core/cma.c
index d789eea32168..71c2c7116802 100644
--- a/drivers/infiniband/core/cma.c
+++ b/drivers/infiniband/core/cma.c
@@ -2143,33 +2143,23 @@ static int cma_alloc_port(struct idr *ps, struct rdma_id_private *id_priv,
unsigned short snum)
{
struct rdma_bind_list *bind_list;
- int port, ret;
+ int ret;
bind_list = kzalloc(sizeof *bind_list, GFP_KERNEL);
if (!bind_list)
return -ENOMEM;
- do {
- ret = idr_get_new_above(ps, bind_list, snum, &port);
- } while ((ret == -EAGAIN) && idr_pre_get(ps, GFP_KERNEL));
-
- if (ret)
- goto err1;
-
- if (port != snum) {
- ret = -EADDRNOTAVAIL;
- goto err2;
- }
+ ret = idr_alloc(ps, bind_list, snum, snum + 1, GFP_KERNEL);
+ if (ret < 0)
+ goto err;
bind_list->ps = ps;
- bind_list->port = (unsigned short) port;
+ bind_list->port = (unsigned short)ret;
cma_bind_port(bind_list, id_priv);
return 0;
-err2:
- idr_remove(ps, port);
-err1:
+err:
kfree(bind_list);
- return ret;
+ return ret == -ENOSPC ? -EADDRNOTAVAIL : ret;
}
static int cma_alloc_any_port(struct idr *ps, struct rdma_id_private *id_priv)
@@ -2214,10 +2204,9 @@ static int cma_check_port(struct rdma_bind_list *bind_list,
{
struct rdma_id_private *cur_id;
struct sockaddr *addr, *cur_addr;
- struct hlist_node *node;
addr = (struct sockaddr *) &id_priv->id.route.addr.src_addr;
- hlist_for_each_entry(cur_id, node, &bind_list->owners, node) {
+ hlist_for_each_entry(cur_id, &bind_list->owners, node) {
if (id_priv == cur_id)
continue;
diff --git a/drivers/infiniband/core/fmr_pool.c b/drivers/infiniband/core/fmr_pool.c
index 176c8f90f2bb..9f5ad7cc33c8 100644
--- a/drivers/infiniband/core/fmr_pool.c
+++ b/drivers/infiniband/core/fmr_pool.c
@@ -118,14 +118,13 @@ static inline struct ib_pool_fmr *ib_fmr_cache_lookup(struct ib_fmr_pool *pool,
{
struct hlist_head *bucket;
struct ib_pool_fmr *fmr;
- struct hlist_node *pos;
if (!pool->cache_bucket)
return NULL;
bucket = pool->cache_bucket + ib_fmr_hash(*page_list);
- hlist_for_each_entry(fmr, pos, bucket, cache_node)
+ hlist_for_each_entry(fmr, bucket, cache_node)
if (io_virtual_address == fmr->io_virtual_address &&
page_list_len == fmr->page_list_len &&
!memcmp(page_list, fmr->page_list,
diff --git a/drivers/infiniband/core/sa_query.c b/drivers/infiniband/core/sa_query.c
index a8905abc56e4..934f45e79e5e 100644
--- a/drivers/infiniband/core/sa_query.c
+++ b/drivers/infiniband/core/sa_query.c
@@ -611,19 +611,21 @@ static void init_mad(struct ib_sa_mad *mad, struct ib_mad_agent *agent)
static int send_mad(struct ib_sa_query *query, int timeout_ms, gfp_t gfp_mask)
{
+ bool preload = gfp_mask & __GFP_WAIT;
unsigned long flags;
int ret, id;
-retry:
- if (!idr_pre_get(&query_idr, gfp_mask))
- return -ENOMEM;
+ if (preload)
+ idr_preload(gfp_mask);
spin_lock_irqsave(&idr_lock, flags);
- ret = idr_get_new(&query_idr, query, &id);
+
+ id = idr_alloc(&query_idr, query, 0, 0, GFP_NOWAIT);
+
spin_unlock_irqrestore(&idr_lock, flags);
- if (ret == -EAGAIN)
- goto retry;
- if (ret)
- return ret;
+ if (preload)
+ idr_preload_end();
+ if (id < 0)
+ return id;
query->mad_buf->timeout_ms = timeout_ms;
query->mad_buf->context[0] = query;
diff --git a/drivers/infiniband/core/ucm.c b/drivers/infiniband/core/ucm.c
index 49b15ac1987e..f2f63933e8a9 100644
--- a/drivers/infiniband/core/ucm.c
+++ b/drivers/infiniband/core/ucm.c
@@ -176,7 +176,6 @@ static void ib_ucm_cleanup_events(struct ib_ucm_context *ctx)
static struct ib_ucm_context *ib_ucm_ctx_alloc(struct ib_ucm_file *file)
{
struct ib_ucm_context *ctx;
- int result;
ctx = kzalloc(sizeof *ctx, GFP_KERNEL);
if (!ctx)
@@ -187,17 +186,10 @@ static struct ib_ucm_context *ib_ucm_ctx_alloc(struct ib_ucm_file *file)
ctx->file = file;
INIT_LIST_HEAD(&ctx->events);
- do {
- result = idr_pre_get(&ctx_id_table, GFP_KERNEL);
- if (!result)
- goto error;
-
- mutex_lock(&ctx_id_mutex);
- result = idr_get_new(&ctx_id_table, ctx, &ctx->id);
- mutex_unlock(&ctx_id_mutex);
- } while (result == -EAGAIN);
-
- if (result)
+ mutex_lock(&ctx_id_mutex);
+ ctx->id = idr_alloc(&ctx_id_table, ctx, 0, 0, GFP_KERNEL);
+ mutex_unlock(&ctx_id_mutex);
+ if (ctx->id < 0)
goto error;
list_add_tail(&ctx->file_list, &file->ctxs);
diff --git a/drivers/infiniband/core/ucma.c b/drivers/infiniband/core/ucma.c
index 2709ff581392..5ca44cd9b00c 100644
--- a/drivers/infiniband/core/ucma.c
+++ b/drivers/infiniband/core/ucma.c
@@ -145,7 +145,6 @@ static void ucma_put_ctx(struct ucma_context *ctx)
static struct ucma_context *ucma_alloc_ctx(struct ucma_file *file)
{
struct ucma_context *ctx;
- int ret;
ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
if (!ctx)
@@ -156,17 +155,10 @@ static struct ucma_context *ucma_alloc_ctx(struct ucma_file *file)
INIT_LIST_HEAD(&ctx->mc_list);
ctx->file = file;
- do {
- ret = idr_pre_get(&ctx_idr, GFP_KERNEL);
- if (!ret)
- goto error;
-
- mutex_lock(&mut);
- ret = idr_get_new(&ctx_idr, ctx, &ctx->id);
- mutex_unlock(&mut);
- } while (ret == -EAGAIN);
-
- if (ret)
+ mutex_lock(&mut);
+ ctx->id = idr_alloc(&ctx_idr, ctx, 0, 0, GFP_KERNEL);
+ mutex_unlock(&mut);
+ if (ctx->id < 0)
goto error;
list_add_tail(&ctx->list, &file->ctx_list);
@@ -180,23 +172,15 @@ error:
static struct ucma_multicast* ucma_alloc_multicast(struct ucma_context *ctx)
{
struct ucma_multicast *mc;
- int ret;
mc = kzalloc(sizeof(*mc), GFP_KERNEL);
if (!mc)
return NULL;
- do {
- ret = idr_pre_get(&multicast_idr, GFP_KERNEL);
- if (!ret)
- goto error;
-
- mutex_lock(&mut);
- ret = idr_get_new(&multicast_idr, mc, &mc->id);
- mutex_unlock(&mut);
- } while (ret == -EAGAIN);
-
- if (ret)
+ mutex_lock(&mut);
+ mc->id = idr_alloc(&multicast_idr, mc, 0, 0, GFP_KERNEL);
+ mutex_unlock(&mut);
+ if (mc->id < 0)
goto error;
mc->ctx = ctx;
diff --git a/drivers/infiniband/core/uverbs.h b/drivers/infiniband/core/uverbs.h
index 5bcb2afd3dcb..0fcd7aa26fa2 100644
--- a/drivers/infiniband/core/uverbs.h
+++ b/drivers/infiniband/core/uverbs.h
@@ -188,6 +188,8 @@ IB_UVERBS_DECLARE_CMD(alloc_pd);
IB_UVERBS_DECLARE_CMD(dealloc_pd);
IB_UVERBS_DECLARE_CMD(reg_mr);
IB_UVERBS_DECLARE_CMD(dereg_mr);
+IB_UVERBS_DECLARE_CMD(alloc_mw);
+IB_UVERBS_DECLARE_CMD(dealloc_mw);
IB_UVERBS_DECLARE_CMD(create_comp_channel);
IB_UVERBS_DECLARE_CMD(create_cq);
IB_UVERBS_DECLARE_CMD(resize_cq);
diff --git a/drivers/infiniband/core/uverbs_cmd.c b/drivers/infiniband/core/uverbs_cmd.c
index 0cb0007724a2..a7d00f6b3bc1 100644
--- a/drivers/infiniband/core/uverbs_cmd.c
+++ b/drivers/infiniband/core/uverbs_cmd.c
@@ -48,6 +48,7 @@ struct uverbs_lock_class {
static struct uverbs_lock_class pd_lock_class = { .name = "PD-uobj" };
static struct uverbs_lock_class mr_lock_class = { .name = "MR-uobj" };
+static struct uverbs_lock_class mw_lock_class = { .name = "MW-uobj" };
static struct uverbs_lock_class cq_lock_class = { .name = "CQ-uobj" };
static struct uverbs_lock_class qp_lock_class = { .name = "QP-uobj" };
static struct uverbs_lock_class ah_lock_class = { .name = "AH-uobj" };
@@ -124,18 +125,17 @@ static int idr_add_uobj(struct idr *idr, struct ib_uobject *uobj)
{
int ret;
-retry:
- if (!idr_pre_get(idr, GFP_KERNEL))
- return -ENOMEM;
-
+ idr_preload(GFP_KERNEL);
spin_lock(&ib_uverbs_idr_lock);
- ret = idr_get_new(idr, uobj, &uobj->id);
- spin_unlock(&ib_uverbs_idr_lock);
- if (ret == -EAGAIN)
- goto retry;
+ ret = idr_alloc(idr, uobj, 0, 0, GFP_NOWAIT);
+ if (ret >= 0)
+ uobj->id = ret;
- return ret;
+ spin_unlock(&ib_uverbs_idr_lock);
+ idr_preload_end();
+
+ return ret < 0 ? ret : 0;
}
void idr_remove_uobj(struct idr *idr, struct ib_uobject *uobj)
@@ -730,7 +730,7 @@ ssize_t ib_uverbs_open_xrcd(struct ib_uverbs_file *file,
goto err_tree_mutex_unlock;
}
- inode = f.file->f_path.dentry->d_inode;
+ inode = file_inode(f.file);
xrcd = find_xrcd(file->device, inode);
if (!xrcd && !(cmd.oflags & O_CREAT)) {
/* no file descriptor. Need CREATE flag */
@@ -1049,6 +1049,126 @@ ssize_t ib_uverbs_dereg_mr(struct ib_uverbs_file *file,
return in_len;
}
+ssize_t ib_uverbs_alloc_mw(struct ib_uverbs_file *file,
+ const char __user *buf, int in_len,
+ int out_len)
+{
+ struct ib_uverbs_alloc_mw cmd;
+ struct ib_uverbs_alloc_mw_resp resp;
+ struct ib_uobject *uobj;
+ struct ib_pd *pd;
+ struct ib_mw *mw;
+ int ret;
+
+ if (out_len < sizeof(resp))
+ return -ENOSPC;
+
+ if (copy_from_user(&cmd, buf, sizeof(cmd)))
+ return -EFAULT;
+
+ uobj = kmalloc(sizeof(*uobj), GFP_KERNEL);
+ if (!uobj)
+ return -ENOMEM;
+
+ init_uobj(uobj, 0, file->ucontext, &mw_lock_class);
+ down_write(&uobj->mutex);
+
+ pd = idr_read_pd(cmd.pd_handle, file->ucontext);
+ if (!pd) {
+ ret = -EINVAL;
+ goto err_free;
+ }
+
+ mw = pd->device->alloc_mw(pd, cmd.mw_type);
+ if (IS_ERR(mw)) {
+ ret = PTR_ERR(mw);
+ goto err_put;
+ }
+
+ mw->device = pd->device;
+ mw->pd = pd;
+ mw->uobject = uobj;
+ atomic_inc(&pd->usecnt);
+
+ uobj->object = mw;
+ ret = idr_add_uobj(&ib_uverbs_mw_idr, uobj);
+ if (ret)
+ goto err_unalloc;
+
+ memset(&resp, 0, sizeof(resp));
+ resp.rkey = mw->rkey;
+ resp.mw_handle = uobj->id;
+
+ if (copy_to_user((void __user *)(unsigned long)cmd.response,
+ &resp, sizeof(resp))) {
+ ret = -EFAULT;
+ goto err_copy;
+ }
+
+ put_pd_read(pd);
+
+ mutex_lock(&file->mutex);
+ list_add_tail(&uobj->list, &file->ucontext->mw_list);
+ mutex_unlock(&file->mutex);
+
+ uobj->live = 1;
+
+ up_write(&uobj->mutex);
+
+ return in_len;
+
+err_copy:
+ idr_remove_uobj(&ib_uverbs_mw_idr, uobj);
+
+err_unalloc:
+ ib_dealloc_mw(mw);
+
+err_put:
+ put_pd_read(pd);
+
+err_free:
+ put_uobj_write(uobj);
+ return ret;
+}
+
+ssize_t ib_uverbs_dealloc_mw(struct ib_uverbs_file *file,
+ const char __user *buf, int in_len,
+ int out_len)
+{
+ struct ib_uverbs_dealloc_mw cmd;
+ struct ib_mw *mw;
+ struct ib_uobject *uobj;
+ int ret = -EINVAL;
+
+ if (copy_from_user(&cmd, buf, sizeof(cmd)))
+ return -EFAULT;
+
+ uobj = idr_write_uobj(&ib_uverbs_mw_idr, cmd.mw_handle, file->ucontext);
+ if (!uobj)
+ return -EINVAL;
+
+ mw = uobj->object;
+
+ ret = ib_dealloc_mw(mw);
+ if (!ret)
+ uobj->live = 0;
+
+ put_uobj_write(uobj);
+
+ if (ret)
+ return ret;
+
+ idr_remove_uobj(&ib_uverbs_mw_idr, uobj);
+
+ mutex_lock(&file->mutex);
+ list_del(&uobj->list);
+ mutex_unlock(&file->mutex);
+
+ put_uobj(uobj);
+
+ return in_len;
+}
+
ssize_t ib_uverbs_create_comp_channel(struct ib_uverbs_file *file,
const char __user *buf, int in_len,
int out_len)
diff --git a/drivers/infiniband/core/uverbs_main.c b/drivers/infiniband/core/uverbs_main.c
index 6f2ce6fa98f8..2c6f0f2ecd9d 100644
--- a/drivers/infiniband/core/uverbs_main.c
+++ b/drivers/infiniband/core/uverbs_main.c
@@ -87,6 +87,8 @@ static ssize_t (*uverbs_cmd_table[])(struct ib_uverbs_file *file,
[IB_USER_VERBS_CMD_DEALLOC_PD] = ib_uverbs_dealloc_pd,
[IB_USER_VERBS_CMD_REG_MR] = ib_uverbs_reg_mr,
[IB_USER_VERBS_CMD_DEREG_MR] = ib_uverbs_dereg_mr,
+ [IB_USER_VERBS_CMD_ALLOC_MW] = ib_uverbs_alloc_mw,
+ [IB_USER_VERBS_CMD_DEALLOC_MW] = ib_uverbs_dealloc_mw,
[IB_USER_VERBS_CMD_CREATE_COMP_CHANNEL] = ib_uverbs_create_comp_channel,
[IB_USER_VERBS_CMD_CREATE_CQ] = ib_uverbs_create_cq,
[IB_USER_VERBS_CMD_RESIZE_CQ] = ib_uverbs_resize_cq,
@@ -201,6 +203,15 @@ static int ib_uverbs_cleanup_ucontext(struct ib_uverbs_file *file,
kfree(uobj);
}
+ /* Remove MWs before QPs, in order to support type 2A MWs. */
+ list_for_each_entry_safe(uobj, tmp, &context->mw_list, list) {
+ struct ib_mw *mw = uobj->object;
+
+ idr_remove_uobj(&ib_uverbs_mw_idr, uobj);
+ ib_dealloc_mw(mw);
+ kfree(uobj);
+ }
+
list_for_each_entry_safe(uobj, tmp, &context->qp_list, list) {
struct ib_qp *qp = uobj->object;
struct ib_uqp_object *uqp =
@@ -240,8 +251,6 @@ static int ib_uverbs_cleanup_ucontext(struct ib_uverbs_file *file,
kfree(uevent);
}
- /* XXX Free MWs */
-
list_for_each_entry_safe(uobj, tmp, &context->mr_list, list) {
struct ib_mr *mr = uobj->object;
diff --git a/drivers/infiniband/core/verbs.c b/drivers/infiniband/core/verbs.c
index 30f199e8579f..a8fdd3381405 100644
--- a/drivers/infiniband/core/verbs.c
+++ b/drivers/infiniband/core/verbs.c
@@ -1099,18 +1099,19 @@ EXPORT_SYMBOL(ib_free_fast_reg_page_list);
/* Memory windows */
-struct ib_mw *ib_alloc_mw(struct ib_pd *pd)
+struct ib_mw *ib_alloc_mw(struct ib_pd *pd, enum ib_mw_type type)
{
struct ib_mw *mw;
if (!pd->device->alloc_mw)
return ERR_PTR(-ENOSYS);
- mw = pd->device->alloc_mw(pd);
+ mw = pd->device->alloc_mw(pd, type);
if (!IS_ERR(mw)) {
mw->device = pd->device;
mw->pd = pd;
mw->uobject = NULL;
+ mw->type = type;
atomic_inc(&pd->usecnt);
}
diff --git a/drivers/infiniband/hw/amso1100/c2.c b/drivers/infiniband/hw/amso1100/c2.c
index 7275e727e0f5..d53cf519f42a 100644
--- a/drivers/infiniband/hw/amso1100/c2.c
+++ b/drivers/infiniband/hw/amso1100/c2.c
@@ -1238,15 +1238,4 @@ static struct pci_driver c2_pci_driver = {
.remove = c2_remove,
};
-static int __init c2_init_module(void)
-{
- return pci_register_driver(&c2_pci_driver);
-}
-
-static void __exit c2_exit_module(void)
-{
- pci_unregister_driver(&c2_pci_driver);
-}
-
-module_init(c2_init_module);
-module_exit(c2_exit_module);
+module_pci_driver(c2_pci_driver);
diff --git a/drivers/infiniband/hw/amso1100/c2_qp.c b/drivers/infiniband/hw/amso1100/c2_qp.c
index 28cd5cb51859..0ab826b280b2 100644
--- a/drivers/infiniband/hw/amso1100/c2_qp.c
+++ b/drivers/infiniband/hw/amso1100/c2_qp.c
@@ -382,14 +382,17 @@ static int c2_alloc_qpn(struct c2_dev *c2dev, struct c2_qp *qp)
{
int ret;
- do {
- spin_lock_irq(&c2dev->qp_table.lock);
- ret = idr_get_new_above(&c2dev->qp_table.idr, qp,
- c2dev->qp_table.last++, &qp->qpn);
- spin_unlock_irq(&c2dev->qp_table.lock);
- } while ((ret == -EAGAIN) &&
- idr_pre_get(&c2dev->qp_table.idr, GFP_KERNEL));
- return ret;
+ idr_preload(GFP_KERNEL);
+ spin_lock_irq(&c2dev->qp_table.lock);
+
+ ret = idr_alloc(&c2dev->qp_table.idr, qp, c2dev->qp_table.last++, 0,
+ GFP_NOWAIT);
+ if (ret >= 0)
+ qp->qpn = ret;
+
+ spin_unlock_irq(&c2dev->qp_table.lock);
+ idr_preload_end();
+ return ret < 0 ? ret : 0;
}
static void c2_free_qpn(struct c2_dev *c2dev, int qpn)
diff --git a/drivers/infiniband/hw/cxgb3/iwch.h b/drivers/infiniband/hw/cxgb3/iwch.h
index a1c44578e039..837862287a29 100644
--- a/drivers/infiniband/hw/cxgb3/iwch.h
+++ b/drivers/infiniband/hw/cxgb3/iwch.h
@@ -153,19 +153,17 @@ static inline int insert_handle(struct iwch_dev *rhp, struct idr *idr,
void *handle, u32 id)
{
int ret;
- int newid;
-
- do {
- if (!idr_pre_get(idr, GFP_KERNEL)) {
- return -ENOMEM;
- }
- spin_lock_irq(&rhp->lock);
- ret = idr_get_new_above(idr, handle, id, &newid);
- BUG_ON(newid != id);
- spin_unlock_irq(&rhp->lock);
- } while (ret == -EAGAIN);
-
- return ret;
+
+ idr_preload(GFP_KERNEL);
+ spin_lock_irq(&rhp->lock);
+
+ ret = idr_alloc(idr, handle, id, id + 1, GFP_NOWAIT);
+
+ spin_unlock_irq(&rhp->lock);
+ idr_preload_end();
+
+ BUG_ON(ret == -ENOSPC);
+ return ret < 0 ? ret : 0;
}
static inline void remove_handle(struct iwch_dev *rhp, struct idr *idr, u32 id)
diff --git a/drivers/infiniband/hw/cxgb3/iwch_provider.c b/drivers/infiniband/hw/cxgb3/iwch_provider.c
index 0bdf09aa6f42..9c12da0cbd32 100644
--- a/drivers/infiniband/hw/cxgb3/iwch_provider.c
+++ b/drivers/infiniband/hw/cxgb3/iwch_provider.c
@@ -738,7 +738,7 @@ static struct ib_mr *iwch_get_dma_mr(struct ib_pd *pd, int acc)
return ibmr;
}
-static struct ib_mw *iwch_alloc_mw(struct ib_pd *pd)
+static struct ib_mw *iwch_alloc_mw(struct ib_pd *pd, enum ib_mw_type type)
{
struct iwch_dev *rhp;
struct iwch_pd *php;
@@ -747,6 +747,9 @@ static struct ib_mw *iwch_alloc_mw(struct ib_pd *pd)
u32 stag = 0;
int ret;
+ if (type != IB_MW_TYPE_1)
+ return ERR_PTR(-EINVAL);
+
php = to_iwch_pd(pd);
rhp = php->rhp;
mhp = kzalloc(sizeof(*mhp), GFP_KERNEL);
@@ -783,8 +786,8 @@ static int iwch_dealloc_mw(struct ib_mw *mw)
mmid = (mw->rkey) >> 8;
cxio_deallocate_window(&rhp->rdev, mhp->attr.stag);
remove_handle(rhp, &rhp->mmidr, mmid);
- kfree(mhp);
PDBG("%s ib_mw %p mmid 0x%x ptr %p\n", __func__, mw, mmid, mhp);
+ kfree(mhp);
return 0;
}
diff --git a/drivers/infiniband/hw/cxgb3/iwch_qp.c b/drivers/infiniband/hw/cxgb3/iwch_qp.c
index 6de8463f453b..e5649e8b215d 100644
--- a/drivers/infiniband/hw/cxgb3/iwch_qp.c
+++ b/drivers/infiniband/hw/cxgb3/iwch_qp.c
@@ -567,18 +567,19 @@ int iwch_bind_mw(struct ib_qp *qp,
if (mw_bind->send_flags & IB_SEND_SIGNALED)
t3_wr_flags = T3_COMPLETION_FLAG;
- sgl.addr = mw_bind->addr;
- sgl.lkey = mw_bind->mr->lkey;
- sgl.length = mw_bind->length;
+ sgl.addr = mw_bind->bind_info.addr;
+ sgl.lkey = mw_bind->bind_info.mr->lkey;
+ sgl.length = mw_bind->bind_info.length;
wqe->bind.reserved = 0;
wqe->bind.type = TPT_VATO;
/* TBD: check perms */
- wqe->bind.perms = iwch_ib_to_tpt_bind_access(mw_bind->mw_access_flags);
- wqe->bind.mr_stag = cpu_to_be32(mw_bind->mr->lkey);
+ wqe->bind.perms = iwch_ib_to_tpt_bind_access(
+ mw_bind->bind_info.mw_access_flags);
+ wqe->bind.mr_stag = cpu_to_be32(mw_bind->bind_info.mr->lkey);
wqe->bind.mw_stag = cpu_to_be32(mw->rkey);
- wqe->bind.mw_len = cpu_to_be32(mw_bind->length);
- wqe->bind.mw_va = cpu_to_be64(mw_bind->addr);
+ wqe->bind.mw_len = cpu_to_be32(mw_bind->bind_info.length);
+ wqe->bind.mw_va = cpu_to_be64(mw_bind->bind_info.addr);
err = iwch_sgl2pbl_map(rhp, &sgl, 1, &pbl_addr, &page_size);
if (err) {
spin_unlock_irqrestore(&qhp->lock, flag);
diff --git a/drivers/infiniband/hw/cxgb4/cm.c b/drivers/infiniband/hw/cxgb4/cm.c
index c13745cde7fa..565bfb161c1a 100644
--- a/drivers/infiniband/hw/cxgb4/cm.c
+++ b/drivers/infiniband/hw/cxgb4/cm.c
@@ -143,14 +143,28 @@ static void connect_reply_upcall(struct c4iw_ep *ep, int status);
static LIST_HEAD(timeout_list);
static spinlock_t timeout_lock;
+static void deref_qp(struct c4iw_ep *ep)
+{
+ c4iw_qp_rem_ref(&ep->com.qp->ibqp);
+ clear_bit(QP_REFERENCED, &ep->com.flags);
+}
+
+static void ref_qp(struct c4iw_ep *ep)
+{
+ set_bit(QP_REFERENCED, &ep->com.flags);
+ c4iw_qp_add_ref(&ep->com.qp->ibqp);
+}
+
static void start_ep_timer(struct c4iw_ep *ep)
{
PDBG("%s ep %p\n", __func__, ep);
if (timer_pending(&ep->timer)) {
- PDBG("%s stopped / restarted timer ep %p\n", __func__, ep);
- del_timer_sync(&ep->timer);
- } else
- c4iw_get_ep(&ep->com);
+ pr_err("%s timer already started! ep %p\n",
+ __func__, ep);
+ return;
+ }
+ clear_bit(TIMEOUT, &ep->com.flags);
+ c4iw_get_ep(&ep->com);
ep->timer.expires = jiffies + ep_timeout_secs * HZ;
ep->timer.data = (unsigned long)ep;
ep->timer.function = ep_timeout;
@@ -159,14 +173,10 @@ static void start_ep_timer(struct c4iw_ep *ep)
static void stop_ep_timer(struct c4iw_ep *ep)
{
- PDBG("%s ep %p\n", __func__, ep);
- if (!timer_pending(&ep->timer)) {
- WARN(1, "%s timer stopped when its not running! "
- "ep %p state %u\n", __func__, ep, ep->com.state);
- return;
- }
+ PDBG("%s ep %p stopping\n", __func__, ep);
del_timer_sync(&ep->timer);
- c4iw_put_ep(&ep->com);
+ if (!test_and_set_bit(TIMEOUT, &ep->com.flags))
+ c4iw_put_ep(&ep->com);
}
static int c4iw_l2t_send(struct c4iw_rdev *rdev, struct sk_buff *skb,
@@ -271,11 +281,13 @@ void _c4iw_free_ep(struct kref *kref)
ep = container_of(kref, struct c4iw_ep, com.kref);
PDBG("%s ep %p state %s\n", __func__, ep, states[state_read(&ep->com)]);
+ if (test_bit(QP_REFERENCED, &ep->com.flags))
+ deref_qp(ep);
if (test_bit(RELEASE_RESOURCES, &ep->com.flags)) {
+ remove_handle(ep->com.dev, &ep->com.dev->hwtid_idr, ep->hwtid);
cxgb4_remove_tid(ep->com.dev->rdev.lldi.tids, 0, ep->hwtid);
dst_release(ep->dst);
cxgb4_l2t_release(ep->l2t);
- remove_handle(ep->com.dev, &ep->com.dev->hwtid_idr, ep->hwtid);
}
kfree(ep);
}
@@ -687,7 +699,7 @@ static int send_mpa_reject(struct c4iw_ep *ep, const void *pdata, u8 plen)
memset(mpa, 0, sizeof(*mpa));
memcpy(mpa->key, MPA_KEY_REP, sizeof(mpa->key));
mpa->flags = MPA_REJECT;
- mpa->revision = mpa_rev;
+ mpa->revision = ep->mpa_attr.version;
mpa->private_data_size = htons(plen);
if (ep->mpa_attr.version == 2 && ep->mpa_attr.enhanced_rdma_conn) {
@@ -863,7 +875,6 @@ static void close_complete_upcall(struct c4iw_ep *ep)
ep->com.cm_id->event_handler(ep->com.cm_id, &event);
ep->com.cm_id->rem_ref(ep->com.cm_id);
ep->com.cm_id = NULL;
- ep->com.qp = NULL;
set_bit(CLOSE_UPCALL, &ep->com.history);
}
}
@@ -906,7 +917,6 @@ static void peer_abort_upcall(struct c4iw_ep *ep)
ep->com.cm_id->event_handler(ep->com.cm_id, &event);
ep->com.cm_id->rem_ref(ep->com.cm_id);
ep->com.cm_id = NULL;
- ep->com.qp = NULL;
set_bit(ABORT_UPCALL, &ep->com.history);
}
}
@@ -946,7 +956,6 @@ static void connect_reply_upcall(struct c4iw_ep *ep, int status)
if (status < 0) {
ep->com.cm_id->rem_ref(ep->com.cm_id);
ep->com.cm_id = NULL;
- ep->com.qp = NULL;
}
}
@@ -1291,11 +1300,13 @@ static void process_mpa_request(struct c4iw_ep *ep, struct sk_buff *skb)
if (mpa->revision > mpa_rev) {
printk(KERN_ERR MOD "%s MPA version mismatch. Local = %d,"
" Received = %d\n", __func__, mpa_rev, mpa->revision);
+ stop_ep_timer(ep);
abort_connection(ep, skb, GFP_KERNEL);
return;
}
if (memcmp(mpa->key, MPA_KEY_REQ, sizeof(mpa->key))) {
+ stop_ep_timer(ep);
abort_connection(ep, skb, GFP_KERNEL);
return;
}
@@ -1306,6 +1317,7 @@ static void process_mpa_request(struct c4iw_ep *ep, struct sk_buff *skb)
* Fail if there's too much private data.
*/
if (plen > MPA_MAX_PRIVATE_DATA) {
+ stop_ep_timer(ep);
abort_connection(ep, skb, GFP_KERNEL);
return;
}
@@ -1314,6 +1326,7 @@ static void process_mpa_request(struct c4iw_ep *ep, struct sk_buff *skb)
* If plen does not account for pkt size
*/
if (ep->mpa_pkt_len > (sizeof(*mpa) + plen)) {
+ stop_ep_timer(ep);
abort_connection(ep, skb, GFP_KERNEL);
return;
}
@@ -1391,30 +1404,33 @@ static int rx_data(struct c4iw_dev *dev, struct sk_buff *skb)
skb_pull(skb, sizeof(*hdr));
skb_trim(skb, dlen);
- ep->rcv_seq += dlen;
- BUG_ON(ep->rcv_seq != (ntohl(hdr->seq) + dlen));
-
/* update RX credits */
update_rx_credits(ep, dlen);
switch (state_read(&ep->com)) {
case MPA_REQ_SENT:
+ ep->rcv_seq += dlen;
process_mpa_reply(ep, skb);
break;
case MPA_REQ_WAIT:
+ ep->rcv_seq += dlen;
process_mpa_request(ep, skb);
break;
- case MPA_REP_SENT:
+ case FPDU_MODE: {
+ struct c4iw_qp_attributes attrs;
+ BUG_ON(!ep->com.qp);
+ if (status)
+ pr_err("%s Unexpected streaming data." \
+ " qpid %u ep %p state %d tid %u status %d\n",
+ __func__, ep->com.qp->wq.sq.qid, ep,
+ state_read(&ep->com), ep->hwtid, status);
+ attrs.next_state = C4IW_QP_STATE_ERROR;
+ c4iw_modify_qp(ep->com.qp->rhp, ep->com.qp,
+ C4IW_QP_ATTR_NEXT_STATE, &attrs, 1);
+ c4iw_ep_disconnect(ep, 1, GFP_KERNEL);
break;
+ }
default:
- pr_err("%s Unexpected streaming data." \
- " ep %p state %d tid %u status %d\n",
- __func__, ep, state_read(&ep->com), ep->hwtid, status);
-
- /*
- * The ep will timeout and inform the ULP of the failure.
- * See ep_timeout().
- */
break;
}
return 0;
@@ -1437,6 +1453,7 @@ static int abort_rpl(struct c4iw_dev *dev, struct sk_buff *skb)
mutex_lock(&ep->com.mutex);
switch (ep->com.state) {
case ABORTING:
+ c4iw_wake_up(&ep->com.wr_wait, -ECONNRESET);
__state_set(&ep->com, DEAD);
release = 1;
break;
@@ -1475,11 +1492,11 @@ static void send_fw_act_open_req(struct c4iw_ep *ep, unsigned int atid)
V_FW_OFLD_CONNECTION_WR_ASTID(atid));
req->tcb.cplrxdataack_cplpassacceptrpl =
htons(F_FW_OFLD_CONNECTION_WR_CPLRXDATAACK);
- req->tcb.tx_max = jiffies;
+ req->tcb.tx_max = (__force __be32) jiffies;
req->tcb.rcv_adv = htons(1);
cxgb4_best_mtu(ep->com.dev->rdev.lldi.mtus, ep->mtu, &mtu_idx);
wscale = compute_wscale(rcv_win);
- req->tcb.opt0 = TCAM_BYPASS(1) |
+ req->tcb.opt0 = (__force __be64) (TCAM_BYPASS(1) |
(nocong ? NO_CONG(1) : 0) |
KEEP_ALIVE(1) |
DELACK(1) |
@@ -1490,20 +1507,20 @@ static void send_fw_act_open_req(struct c4iw_ep *ep, unsigned int atid)
SMAC_SEL(ep->smac_idx) |
DSCP(ep->tos) |
ULP_MODE(ULP_MODE_TCPDDP) |
- RCV_BUFSIZ(rcv_win >> 10);
- req->tcb.opt2 = PACE(1) |
+ RCV_BUFSIZ(rcv_win >> 10));
+ req->tcb.opt2 = (__force __be32) (PACE(1) |
TX_QUEUE(ep->com.dev->rdev.lldi.tx_modq[ep->tx_chan]) |
RX_CHANNEL(0) |
CCTRL_ECN(enable_ecn) |
- RSS_QUEUE_VALID | RSS_QUEUE(ep->rss_qid);
+ RSS_QUEUE_VALID | RSS_QUEUE(ep->rss_qid));
if (enable_tcp_timestamps)
- req->tcb.opt2 |= TSTAMPS_EN(1);
+ req->tcb.opt2 |= (__force __be32) TSTAMPS_EN(1);
if (enable_tcp_sack)
- req->tcb.opt2 |= SACK_EN(1);
+ req->tcb.opt2 |= (__force __be32) SACK_EN(1);
if (wscale && enable_tcp_window_scaling)
- req->tcb.opt2 |= WND_SCALE_EN(1);
- req->tcb.opt0 = cpu_to_be64(req->tcb.opt0);
- req->tcb.opt2 = cpu_to_be32(req->tcb.opt2);
+ req->tcb.opt2 |= (__force __be32) WND_SCALE_EN(1);
+ req->tcb.opt0 = cpu_to_be64((__force u64) req->tcb.opt0);
+ req->tcb.opt2 = cpu_to_be32((__force u32) req->tcb.opt2);
set_wr_txq(skb, CPL_PRIORITY_CONTROL, ep->ctrlq_idx);
set_bit(ACT_OFLD_CONN, &ep->com.history);
c4iw_l2t_send(&ep->com.dev->rdev, skb, ep->l2t);
@@ -1993,6 +2010,7 @@ static int pass_accept_req(struct c4iw_dev *dev, struct sk_buff *skb)
init_timer(&child_ep->timer);
cxgb4_insert_tid(t, child_ep, hwtid);
+ insert_handle(dev, &dev->hwtid_idr, child_ep, child_ep->hwtid);
accept_cr(child_ep, peer_ip, skb, req);
set_bit(PASS_ACCEPT_REQ, &child_ep->com.history);
goto out;
@@ -2018,7 +2036,6 @@ static int pass_establish(struct c4iw_dev *dev, struct sk_buff *skb)
ntohs(req->tcp_opt));
set_emss(ep, ntohs(req->tcp_opt));
- insert_handle(dev, &dev->hwtid_idr, ep, ep->hwtid);
dst_confirm(ep->dst);
state_set(&ep->com, MPA_REQ_WAIT);
@@ -2163,7 +2180,7 @@ static int peer_abort(struct c4iw_dev *dev, struct sk_buff *skb)
break;
case MPA_REQ_SENT:
stop_ep_timer(ep);
- if (mpa_rev == 2 && ep->tried_with_mpa_v1)
+ if (mpa_rev == 1 || (mpa_rev == 2 && ep->tried_with_mpa_v1))
connect_reply_upcall(ep, -ECONNRESET);
else {
/*
@@ -2235,9 +2252,8 @@ static int peer_abort(struct c4iw_dev *dev, struct sk_buff *skb)
out:
if (release)
release_ep_resources(ep);
-
- /* retry with mpa-v1 */
- if (ep && ep->retry_with_mpa_v1) {
+ else if (ep->retry_with_mpa_v1) {
+ remove_handle(ep->com.dev, &ep->com.dev->hwtid_idr, ep->hwtid);
cxgb4_remove_tid(ep->com.dev->rdev.lldi.tids, 0, ep->hwtid);
dst_release(ep->dst);
cxgb4_l2t_release(ep->l2t);
@@ -2430,6 +2446,7 @@ int c4iw_accept_cr(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
cm_id->add_ref(cm_id);
ep->com.cm_id = cm_id;
ep->com.qp = qp;
+ ref_qp(ep);
/* bind QP to EP and move to RTS */
attrs.mpa_attr = ep->mpa_attr;
@@ -2460,7 +2477,6 @@ int c4iw_accept_cr(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
return 0;
err1:
ep->com.cm_id = NULL;
- ep->com.qp = NULL;
cm_id->rem_ref(cm_id);
err:
c4iw_put_ep(&ep->com);
@@ -2501,6 +2517,7 @@ int c4iw_connect(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
ep->com.cm_id = cm_id;
ep->com.qp = get_qhp(dev, conn_param->qpn);
BUG_ON(!ep->com.qp);
+ ref_qp(ep);
PDBG("%s qpn 0x%x qp %p cm_id %p\n", __func__, conn_param->qpn,
ep->com.qp, cm_id);
@@ -2756,7 +2773,8 @@ static void active_ofld_conn_reply(struct c4iw_dev *dev, struct sk_buff *skb,
struct c4iw_ep *ep;
int atid = be32_to_cpu(req->tid);
- ep = (struct c4iw_ep *)lookup_atid(dev->rdev.lldi.tids, req->tid);
+ ep = (struct c4iw_ep *)lookup_atid(dev->rdev.lldi.tids,
+ (__force u32) req->tid);
if (!ep)
return;
@@ -2800,7 +2818,7 @@ static void passive_ofld_conn_reply(struct c4iw_dev *dev, struct sk_buff *skb,
struct cpl_pass_accept_req *cpl;
int ret;
- rpl_skb = (struct sk_buff *)cpu_to_be64(req->cookie);
+ rpl_skb = (struct sk_buff *)(unsigned long)req->cookie;
BUG_ON(!rpl_skb);
if (req->retval) {
PDBG("%s passive open failure %d\n", __func__, req->retval);
@@ -2811,7 +2829,8 @@ static void passive_ofld_conn_reply(struct c4iw_dev *dev, struct sk_buff *skb,
} else {
cpl = (struct cpl_pass_accept_req *)cplhdr(rpl_skb);
OPCODE_TID(cpl) = htonl(MK_OPCODE_TID(CPL_PASS_ACCEPT_REQ,
- htonl(req->tid)));
+ (__force u32) htonl(
+ (__force u32) req->tid)));
ret = pass_accept_req(dev, rpl_skb);
if (!ret)
kfree_skb(rpl_skb);
@@ -2857,10 +2876,10 @@ static void build_cpl_pass_accept_req(struct sk_buff *skb, int stid , u8 tos)
struct tcp_options_received tmp_opt;
/* Store values from cpl_rx_pkt in temporary location. */
- vlantag = cpl->vlan;
- len = cpl->len;
- l2info = cpl->l2info;
- hdr_len = cpl->hdr_len;
+ vlantag = (__force u16) cpl->vlan;
+ len = (__force u16) cpl->len;
+ l2info = (__force u32) cpl->l2info;
+ hdr_len = (__force u16) cpl->hdr_len;
intf = cpl->iff;
__skb_pull(skb, sizeof(*req) + sizeof(struct rss_header));
@@ -2871,19 +2890,24 @@ static void build_cpl_pass_accept_req(struct sk_buff *skb, int stid , u8 tos)
*/
memset(&tmp_opt, 0, sizeof(tmp_opt));
tcp_clear_options(&tmp_opt);
- tcp_parse_options(skb, &tmp_opt, 0, 0, NULL);
+ tcp_parse_options(skb, &tmp_opt, NULL, 0, NULL);
req = (struct cpl_pass_accept_req *)__skb_push(skb, sizeof(*req));
memset(req, 0, sizeof(*req));
req->l2info = cpu_to_be16(V_SYN_INTF(intf) |
- V_SYN_MAC_IDX(G_RX_MACIDX(htonl(l2info))) |
+ V_SYN_MAC_IDX(G_RX_MACIDX(
+ (__force int) htonl(l2info))) |
F_SYN_XACT_MATCH);
- req->hdr_len = cpu_to_be32(V_SYN_RX_CHAN(G_RX_CHAN(htonl(l2info))) |
- V_TCP_HDR_LEN(G_RX_TCPHDR_LEN(htons(hdr_len))) |
- V_IP_HDR_LEN(G_RX_IPHDR_LEN(htons(hdr_len))) |
- V_ETH_HDR_LEN(G_RX_ETHHDR_LEN(htonl(l2info))));
- req->vlan = vlantag;
- req->len = len;
+ req->hdr_len = cpu_to_be32(V_SYN_RX_CHAN(G_RX_CHAN(
+ (__force int) htonl(l2info))) |
+ V_TCP_HDR_LEN(G_RX_TCPHDR_LEN(
+ (__force int) htons(hdr_len))) |
+ V_IP_HDR_LEN(G_RX_IPHDR_LEN(
+ (__force int) htons(hdr_len))) |
+ V_ETH_HDR_LEN(G_RX_ETHHDR_LEN(
+ (__force int) htonl(l2info))));
+ req->vlan = (__force __be16) vlantag;
+ req->len = (__force __be16) len;
req->tos_stid = cpu_to_be32(PASS_OPEN_TID(stid) |
PASS_OPEN_TOS(tos));
req->tcpopt.mss = htons(tmp_opt.mss_clamp);
@@ -2912,7 +2936,7 @@ static void send_fw_pass_open_req(struct c4iw_dev *dev, struct sk_buff *skb,
req->op_compl = htonl(V_WR_OP(FW_OFLD_CONNECTION_WR) | FW_WR_COMPL(1));
req->len16_pkd = htonl(FW_WR_LEN16(DIV_ROUND_UP(sizeof(*req), 16)));
req->le.version_cpl = htonl(F_FW_OFLD_CONNECTION_WR_CPL);
- req->le.filter = filter;
+ req->le.filter = (__force __be32) filter;
req->le.lport = lport;
req->le.pport = rport;
req->le.u.ipv4.lip = laddr;
@@ -2938,7 +2962,7 @@ static void send_fw_pass_open_req(struct c4iw_dev *dev, struct sk_buff *skb,
* TP will ignore any value > 0 for MSS index.
*/
req->tcb.opt0 = cpu_to_be64(V_MSS_IDX(0xF));
- req->cookie = cpu_to_be64((u64)skb);
+ req->cookie = (unsigned long)skb;
set_wr_txq(req_skb, CPL_PRIORITY_CONTROL, port_id);
cxgb4_ofld_send(dev->rdev.lldi.ports[0], req_skb);
@@ -2988,7 +3012,8 @@ static int rx_pkt(struct c4iw_dev *dev, struct sk_buff *skb)
/*
* Calculate the server tid from filter hit index from cpl_rx_pkt.
*/
- stid = cpu_to_be32(rss->hash_val) - dev->rdev.lldi.tids->sftid_base
+ stid = (__force int) cpu_to_be32((__force u32) rss->hash_val)
+ - dev->rdev.lldi.tids->sftid_base
+ dev->rdev.lldi.tids->nstids;
lep = (struct c4iw_ep *)lookup_stid(dev->rdev.lldi.tids, stid);
@@ -3049,10 +3074,10 @@ static int rx_pkt(struct c4iw_dev *dev, struct sk_buff *skb)
step = dev->rdev.lldi.nrxq / dev->rdev.lldi.nchan;
rss_qid = dev->rdev.lldi.rxq_ids[pi->port_id * step];
- window = htons(tcph->window);
+ window = (__force u16) htons((__force u16)tcph->window);
/* Calcuate filter portion for LE region. */
- filter = cpu_to_be32(select_ntuple(dev, dst, e));
+ filter = (__force unsigned int) cpu_to_be32(select_ntuple(dev, dst, e));
/*
* Synthesize the cpl_pass_accept_req. We have everything except the
@@ -3175,11 +3200,16 @@ static DECLARE_WORK(skb_work, process_work);
static void ep_timeout(unsigned long arg)
{
struct c4iw_ep *ep = (struct c4iw_ep *)arg;
+ int kickit = 0;
spin_lock(&timeout_lock);
- list_add_tail(&ep->entry, &timeout_list);
+ if (!test_and_set_bit(TIMEOUT, &ep->com.flags)) {
+ list_add_tail(&ep->entry, &timeout_list);
+ kickit = 1;
+ }
spin_unlock(&timeout_lock);
- queue_work(workq, &skb_work);
+ if (kickit)
+ queue_work(workq, &skb_work);
}
/*
@@ -3268,8 +3298,14 @@ static int peer_abort_intr(struct c4iw_dev *dev, struct sk_buff *skb)
/*
* Wake up any threads in rdma_init() or rdma_fini().
+ * However, if we are on MPAv2 and want to retry with MPAv1
+ * then, don't wake up yet.
*/
- c4iw_wake_up(&ep->com.wr_wait, -ECONNRESET);
+ if (mpa_rev == 2 && !ep->tried_with_mpa_v1) {
+ if (ep->com.state != MPA_REQ_SENT)
+ c4iw_wake_up(&ep->com.wr_wait, -ECONNRESET);
+ } else
+ c4iw_wake_up(&ep->com.wr_wait, -ECONNRESET);
sched(dev, skb);
return 0;
}
diff --git a/drivers/infiniband/hw/cxgb4/device.c b/drivers/infiniband/hw/cxgb4/device.c
index ba11c76c0b5a..80069ad595c1 100644
--- a/drivers/infiniband/hw/cxgb4/device.c
+++ b/drivers/infiniband/hw/cxgb4/device.c
@@ -533,7 +533,7 @@ static int c4iw_rdev_open(struct c4iw_rdev *rdev)
PDBG("udb len 0x%x udb base %p db_reg %p gts_reg %p qpshift %lu "
"qpmask 0x%x cqshift %lu cqmask 0x%x\n",
(unsigned)pci_resource_len(rdev->lldi.pdev, 2),
- (void *)pci_resource_start(rdev->lldi.pdev, 2),
+ (void *)(unsigned long)pci_resource_start(rdev->lldi.pdev, 2),
rdev->lldi.db_reg,
rdev->lldi.gts_reg,
rdev->qpshift, rdev->qpmask,
@@ -797,7 +797,8 @@ static int c4iw_uld_rx_handler(void *handle, const __be64 *rsp,
"RSS %#llx, FL %#llx, len %u\n",
pci_name(ctx->lldi.pdev), gl->va,
(unsigned long long)be64_to_cpu(*rsp),
- (unsigned long long)be64_to_cpu(*(u64 *)gl->va),
+ (unsigned long long)be64_to_cpu(
+ *(__force __be64 *)gl->va),
gl->tot_len);
return 0;
diff --git a/drivers/infiniband/hw/cxgb4/ev.c b/drivers/infiniband/hw/cxgb4/ev.c
index cf2f6b47617a..1a840b2211dd 100644
--- a/drivers/infiniband/hw/cxgb4/ev.c
+++ b/drivers/infiniband/hw/cxgb4/ev.c
@@ -46,9 +46,11 @@ static void post_qp_event(struct c4iw_dev *dev, struct c4iw_cq *chp,
if ((qhp->attr.state == C4IW_QP_STATE_ERROR) ||
(qhp->attr.state == C4IW_QP_STATE_TERMINATE)) {
- PDBG("%s AE received after RTS - "
- "qp state %d qpid 0x%x status 0x%x\n", __func__,
- qhp->attr.state, qhp->wq.sq.qid, CQE_STATUS(err_cqe));
+ pr_err("%s AE after RTS - qpid 0x%x opcode %d status 0x%x "\
+ "type %d wrid.hi 0x%x wrid.lo 0x%x\n",
+ __func__, CQE_QPID(err_cqe), CQE_OPCODE(err_cqe),
+ CQE_STATUS(err_cqe), CQE_TYPE(err_cqe),
+ CQE_WRID_HI(err_cqe), CQE_WRID_LOW(err_cqe));
return;
}
diff --git a/drivers/infiniband/hw/cxgb4/iw_cxgb4.h b/drivers/infiniband/hw/cxgb4/iw_cxgb4.h
index 9c1644fb0259..7eec5e13fa8c 100644
--- a/drivers/infiniband/hw/cxgb4/iw_cxgb4.h
+++ b/drivers/infiniband/hw/cxgb4/iw_cxgb4.h
@@ -260,20 +260,21 @@ static inline int _insert_handle(struct c4iw_dev *rhp, struct idr *idr,
void *handle, u32 id, int lock)
{
int ret;
- int newid;
- do {
- if (!idr_pre_get(idr, lock ? GFP_KERNEL : GFP_ATOMIC))
- return -ENOMEM;
- if (lock)
- spin_lock_irq(&rhp->lock);
- ret = idr_get_new_above(idr, handle, id, &newid);
- BUG_ON(!ret && newid != id);
- if (lock)
- spin_unlock_irq(&rhp->lock);
- } while (ret == -EAGAIN);
+ if (lock) {
+ idr_preload(GFP_KERNEL);
+ spin_lock_irq(&rhp->lock);
+ }
+
+ ret = idr_alloc(idr, handle, id, id + 1, GFP_ATOMIC);
+
+ if (lock) {
+ spin_unlock_irq(&rhp->lock);
+ idr_preload_end();
+ }
- return ret;
+ BUG_ON(ret == -ENOSPC);
+ return ret < 0 ? ret : 0;
}
static inline int insert_handle(struct c4iw_dev *rhp, struct idr *idr,
@@ -716,6 +717,8 @@ enum c4iw_ep_flags {
ABORT_REQ_IN_PROGRESS = 1,
RELEASE_RESOURCES = 2,
CLOSE_SENT = 3,
+ TIMEOUT = 4,
+ QP_REFERENCED = 5,
};
enum c4iw_ep_history {
@@ -866,7 +869,7 @@ struct ib_fast_reg_page_list *c4iw_alloc_fastreg_pbl(
int page_list_len);
struct ib_mr *c4iw_alloc_fast_reg_mr(struct ib_pd *pd, int pbl_depth);
int c4iw_dealloc_mw(struct ib_mw *mw);
-struct ib_mw *c4iw_alloc_mw(struct ib_pd *pd);
+struct ib_mw *c4iw_alloc_mw(struct ib_pd *pd, enum ib_mw_type type);
struct ib_mr *c4iw_reg_user_mr(struct ib_pd *pd, u64 start,
u64 length, u64 virt, int acc,
struct ib_udata *udata);
diff --git a/drivers/infiniband/hw/cxgb4/mem.c b/drivers/infiniband/hw/cxgb4/mem.c
index afd81790ab3c..903a92d6f91d 100644
--- a/drivers/infiniband/hw/cxgb4/mem.c
+++ b/drivers/infiniband/hw/cxgb4/mem.c
@@ -650,7 +650,7 @@ err:
return ERR_PTR(err);
}
-struct ib_mw *c4iw_alloc_mw(struct ib_pd *pd)
+struct ib_mw *c4iw_alloc_mw(struct ib_pd *pd, enum ib_mw_type type)
{
struct c4iw_dev *rhp;
struct c4iw_pd *php;
@@ -659,6 +659,9 @@ struct ib_mw *c4iw_alloc_mw(struct ib_pd *pd)
u32 stag = 0;
int ret;
+ if (type != IB_MW_TYPE_1)
+ return ERR_PTR(-EINVAL);
+
php = to_c4iw_pd(pd);
rhp = php->rhp;
mhp = kzalloc(sizeof(*mhp), GFP_KERNEL);
diff --git a/drivers/infiniband/hw/cxgb4/qp.c b/drivers/infiniband/hw/cxgb4/qp.c
index 05bfe53bff64..17ba4f8bc12d 100644
--- a/drivers/infiniband/hw/cxgb4/qp.c
+++ b/drivers/infiniband/hw/cxgb4/qp.c
@@ -1383,6 +1383,7 @@ err:
qhp->ep = NULL;
set_state(qhp, C4IW_QP_STATE_ERROR);
free = 1;
+ abort = 1;
wake_up(&qhp->wait);
BUG_ON(!ep);
flush_qp(qhp);
diff --git a/drivers/infiniband/hw/ehca/ehca_cq.c b/drivers/infiniband/hw/ehca/ehca_cq.c
index 8f5290147e8a..212150c25ea0 100644
--- a/drivers/infiniband/hw/ehca/ehca_cq.c
+++ b/drivers/infiniband/hw/ehca/ehca_cq.c
@@ -128,7 +128,7 @@ struct ib_cq *ehca_create_cq(struct ib_device *device, int cqe, int comp_vector,
void *vpage;
u32 counter;
u64 rpage, cqx_fec, h_ret;
- int ipz_rc, ret, i;
+ int ipz_rc, i;
unsigned long flags;
if (cqe >= 0xFFFFFFFF - 64 - additional_cqe)
@@ -163,32 +163,19 @@ struct ib_cq *ehca_create_cq(struct ib_device *device, int cqe, int comp_vector,
adapter_handle = shca->ipz_hca_handle;
param.eq_handle = shca->eq.ipz_eq_handle;
- do {
- if (!idr_pre_get(&ehca_cq_idr, GFP_KERNEL)) {
- cq = ERR_PTR(-ENOMEM);
- ehca_err(device, "Can't reserve idr nr. device=%p",
- device);
- goto create_cq_exit1;
- }
-
- write_lock_irqsave(&ehca_cq_idr_lock, flags);
- ret = idr_get_new(&ehca_cq_idr, my_cq, &my_cq->token);
- write_unlock_irqrestore(&ehca_cq_idr_lock, flags);
- } while (ret == -EAGAIN);
+ idr_preload(GFP_KERNEL);
+ write_lock_irqsave(&ehca_cq_idr_lock, flags);
+ my_cq->token = idr_alloc(&ehca_cq_idr, my_cq, 0, 0x2000000, GFP_NOWAIT);
+ write_unlock_irqrestore(&ehca_cq_idr_lock, flags);
+ idr_preload_end();
- if (ret) {
+ if (my_cq->token < 0) {
cq = ERR_PTR(-ENOMEM);
ehca_err(device, "Can't allocate new idr entry. device=%p",
device);
goto create_cq_exit1;
}
- if (my_cq->token > 0x1FFFFFF) {
- cq = ERR_PTR(-ENOMEM);
- ehca_err(device, "Invalid number of cq. device=%p", device);
- goto create_cq_exit2;
- }
-
/*
* CQs maximum depth is 4GB-64, but we need additional 20 as buffer
* for receiving errors CQEs.
diff --git a/drivers/infiniband/hw/ehca/ehca_iverbs.h b/drivers/infiniband/hw/ehca/ehca_iverbs.h
index 8f7f282ead65..22f79afa7fc1 100644
--- a/drivers/infiniband/hw/ehca/ehca_iverbs.h
+++ b/drivers/infiniband/hw/ehca/ehca_iverbs.h
@@ -95,7 +95,7 @@ int ehca_query_mr(struct ib_mr *mr, struct ib_mr_attr *mr_attr);
int ehca_dereg_mr(struct ib_mr *mr);
-struct ib_mw *ehca_alloc_mw(struct ib_pd *pd);
+struct ib_mw *ehca_alloc_mw(struct ib_pd *pd, enum ib_mw_type type);
int ehca_bind_mw(struct ib_qp *qp, struct ib_mw *mw,
struct ib_mw_bind *mw_bind);
diff --git a/drivers/infiniband/hw/ehca/ehca_mrmw.c b/drivers/infiniband/hw/ehca/ehca_mrmw.c
index 87844869dcc2..bcfb0c183620 100644
--- a/drivers/infiniband/hw/ehca/ehca_mrmw.c
+++ b/drivers/infiniband/hw/ehca/ehca_mrmw.c
@@ -688,7 +688,7 @@ dereg_mr_exit0:
/*----------------------------------------------------------------------*/
-struct ib_mw *ehca_alloc_mw(struct ib_pd *pd)
+struct ib_mw *ehca_alloc_mw(struct ib_pd *pd, enum ib_mw_type type)
{
struct ib_mw *ib_mw;
u64 h_ret;
@@ -698,6 +698,9 @@ struct ib_mw *ehca_alloc_mw(struct ib_pd *pd)
container_of(pd->device, struct ehca_shca, ib_device);
struct ehca_mw_hipzout_parms hipzout;
+ if (type != IB_MW_TYPE_1)
+ return ERR_PTR(-EINVAL);
+
e_mw = ehca_mw_new();
if (!e_mw) {
ib_mw = ERR_PTR(-ENOMEM);
diff --git a/drivers/infiniband/hw/ehca/ehca_qp.c b/drivers/infiniband/hw/ehca/ehca_qp.c
index 149393915ae5..00d6861a6a18 100644
--- a/drivers/infiniband/hw/ehca/ehca_qp.c
+++ b/drivers/infiniband/hw/ehca/ehca_qp.c
@@ -636,30 +636,26 @@ static struct ehca_qp *internal_create_qp(
my_qp->send_cq =
container_of(init_attr->send_cq, struct ehca_cq, ib_cq);
- do {
- if (!idr_pre_get(&ehca_qp_idr, GFP_KERNEL)) {
- ret = -ENOMEM;
- ehca_err(pd->device, "Can't reserve idr resources.");
- goto create_qp_exit0;
- }
+ idr_preload(GFP_KERNEL);
+ write_lock_irqsave(&ehca_qp_idr_lock, flags);
- write_lock_irqsave(&ehca_qp_idr_lock, flags);
- ret = idr_get_new(&ehca_qp_idr, my_qp, &my_qp->token);
- write_unlock_irqrestore(&ehca_qp_idr_lock, flags);
- } while (ret == -EAGAIN);
+ ret = idr_alloc(&ehca_qp_idr, my_qp, 0, 0x2000000, GFP_NOWAIT);
+ if (ret >= 0)
+ my_qp->token = ret;
- if (ret) {
- ret = -ENOMEM;
- ehca_err(pd->device, "Can't allocate new idr entry.");
+ write_unlock_irqrestore(&ehca_qp_idr_lock, flags);
+ idr_preload_end();
+ if (ret < 0) {
+ if (ret == -ENOSPC) {
+ ret = -EINVAL;
+ ehca_err(pd->device, "Invalid number of qp");
+ } else {
+ ret = -ENOMEM;
+ ehca_err(pd->device, "Can't allocate new idr entry.");
+ }
goto create_qp_exit0;
}
- if (my_qp->token > 0x1FFFFFF) {
- ret = -EINVAL;
- ehca_err(pd->device, "Invalid number of qp");
- goto create_qp_exit1;
- }
-
if (has_srq)
parms.srq_token = my_qp->token;
diff --git a/drivers/infiniband/hw/ipath/ipath_driver.c b/drivers/infiniband/hw/ipath/ipath_driver.c
index 7b371f545ece..bd0caedafe99 100644
--- a/drivers/infiniband/hw/ipath/ipath_driver.c
+++ b/drivers/infiniband/hw/ipath/ipath_driver.c
@@ -194,11 +194,6 @@ static struct ipath_devdata *ipath_alloc_devdata(struct pci_dev *pdev)
struct ipath_devdata *dd;
int ret;
- if (!idr_pre_get(&unit_table, GFP_KERNEL)) {
- dd = ERR_PTR(-ENOMEM);
- goto bail;
- }
-
dd = vzalloc(sizeof(*dd));
if (!dd) {
dd = ERR_PTR(-ENOMEM);
@@ -206,9 +201,10 @@ static struct ipath_devdata *ipath_alloc_devdata(struct pci_dev *pdev)
}
dd->ipath_unit = -1;
+ idr_preload(GFP_KERNEL);
spin_lock_irqsave(&ipath_devs_lock, flags);
- ret = idr_get_new(&unit_table, dd, &dd->ipath_unit);
+ ret = idr_alloc(&unit_table, dd, 0, 0, GFP_NOWAIT);
if (ret < 0) {
printk(KERN_ERR IPATH_DRV_NAME
": Could not allocate unit ID: error %d\n", -ret);
@@ -216,6 +212,7 @@ static struct ipath_devdata *ipath_alloc_devdata(struct pci_dev *pdev)
dd = ERR_PTR(ret);
goto bail_unlock;
}
+ dd->ipath_unit = ret;
dd->pcidev = pdev;
pci_set_drvdata(pdev, dd);
@@ -224,7 +221,7 @@ static struct ipath_devdata *ipath_alloc_devdata(struct pci_dev *pdev)
bail_unlock:
spin_unlock_irqrestore(&ipath_devs_lock, flags);
-
+ idr_preload_end();
bail:
return dd;
}
@@ -2503,11 +2500,6 @@ static int __init infinipath_init(void)
* the PCI subsystem.
*/
idr_init(&unit_table);
- if (!idr_pre_get(&unit_table, GFP_KERNEL)) {
- printk(KERN_ERR IPATH_DRV_NAME ": idr_pre_get() failed\n");
- ret = -ENOMEM;
- goto bail;
- }
ret = pci_register_driver(&ipath_driver);
if (ret < 0) {
diff --git a/drivers/infiniband/hw/ipath/ipath_file_ops.c b/drivers/infiniband/hw/ipath/ipath_file_ops.c
index 3eb7e454849b..aed8afee56da 100644
--- a/drivers/infiniband/hw/ipath/ipath_file_ops.c
+++ b/drivers/infiniband/hw/ipath/ipath_file_ops.c
@@ -1864,9 +1864,9 @@ static int ipath_assign_port(struct file *fp,
goto done_chk_sdma;
}
- i_minor = iminor(fp->f_path.dentry->d_inode) - IPATH_USER_MINOR_BASE;
+ i_minor = iminor(file_inode(fp)) - IPATH_USER_MINOR_BASE;
ipath_cdbg(VERBOSE, "open on dev %lx (minor %d)\n",
- (long)fp->f_path.dentry->d_inode->i_rdev, i_minor);
+ (long)file_inode(fp)->i_rdev, i_minor);
if (i_minor)
ret = find_free_port(i_minor - 1, fp, uinfo);
diff --git a/drivers/infiniband/hw/ipath/ipath_fs.c b/drivers/infiniband/hw/ipath/ipath_fs.c
index a4de9d58e9b4..a479375a8fd8 100644
--- a/drivers/infiniband/hw/ipath/ipath_fs.c
+++ b/drivers/infiniband/hw/ipath/ipath_fs.c
@@ -113,7 +113,7 @@ static ssize_t atomic_counters_read(struct file *file, char __user *buf,
struct infinipath_counters counters;
struct ipath_devdata *dd;
- dd = file->f_path.dentry->d_inode->i_private;
+ dd = file_inode(file)->i_private;
dd->ipath_f_read_counters(dd, &counters);
return simple_read_from_buffer(buf, count, ppos, &counters,
@@ -154,7 +154,7 @@ static ssize_t flash_read(struct file *file, char __user *buf,
goto bail;
}
- dd = file->f_path.dentry->d_inode->i_private;
+ dd = file_inode(file)->i_private;
if (ipath_eeprom_read(dd, pos, tmp, count)) {
ipath_dev_err(dd, "failed to read from flash\n");
ret = -ENXIO;
@@ -207,7 +207,7 @@ static ssize_t flash_write(struct file *file, const char __user *buf,
goto bail_tmp;
}
- dd = file->f_path.dentry->d_inode->i_private;
+ dd = file_inode(file)->i_private;
if (ipath_eeprom_write(dd, pos, tmp, count)) {
ret = -ENXIO;
ipath_dev_err(dd, "failed to write to flash\n");
diff --git a/drivers/infiniband/hw/mlx4/cm.c b/drivers/infiniband/hw/mlx4/cm.c
index dbc99d41605c..e0d79b2395e4 100644
--- a/drivers/infiniband/hw/mlx4/cm.c
+++ b/drivers/infiniband/hw/mlx4/cm.c
@@ -203,7 +203,7 @@ static void sl_id_map_add(struct ib_device *ibdev, struct id_map_entry *new)
static struct id_map_entry *
id_map_alloc(struct ib_device *ibdev, int slave_id, u32 sl_cm_id)
{
- int ret, id;
+ int ret;
static int next_id;
struct id_map_entry *ent;
struct mlx4_ib_sriov *sriov = &to_mdev(ibdev)->sriov;
@@ -220,25 +220,23 @@ id_map_alloc(struct ib_device *ibdev, int slave_id, u32 sl_cm_id)
ent->dev = to_mdev(ibdev);
INIT_DELAYED_WORK(&ent->timeout, id_map_ent_timeout);
- do {
- spin_lock(&to_mdev(ibdev)->sriov.id_map_lock);
- ret = idr_get_new_above(&sriov->pv_id_table, ent,
- next_id, &id);
- if (!ret) {
- next_id = ((unsigned) id + 1) & MAX_IDR_MASK;
- ent->pv_cm_id = (u32)id;
- sl_id_map_add(ibdev, ent);
- }
+ idr_preload(GFP_KERNEL);
+ spin_lock(&to_mdev(ibdev)->sriov.id_map_lock);
- spin_unlock(&sriov->id_map_lock);
- } while (ret == -EAGAIN && idr_pre_get(&sriov->pv_id_table, GFP_KERNEL));
- /*the function idr_get_new_above can return -ENOSPC, so don't insert in that case.*/
- if (!ret) {
- spin_lock(&sriov->id_map_lock);
+ ret = idr_alloc(&sriov->pv_id_table, ent, next_id, 0, GFP_NOWAIT);
+ if (ret >= 0) {
+ next_id = max(ret + 1, 0);
+ ent->pv_cm_id = (u32)ret;
+ sl_id_map_add(ibdev, ent);
list_add_tail(&ent->list, &sriov->cm_list);
- spin_unlock(&sriov->id_map_lock);
- return ent;
}
+
+ spin_unlock(&sriov->id_map_lock);
+ idr_preload_end();
+
+ if (ret >= 0)
+ return ent;
+
/*error flow*/
kfree(ent);
mlx4_ib_warn(ibdev, "No more space in the idr (err:0x%x)\n", ret);
diff --git a/drivers/infiniband/hw/mlx4/mad.c b/drivers/infiniband/hw/mlx4/mad.c
index 0a903c129f0a..934792c477bc 100644
--- a/drivers/infiniband/hw/mlx4/mad.c
+++ b/drivers/infiniband/hw/mlx4/mad.c
@@ -1999,16 +1999,17 @@ int mlx4_ib_init_sriov(struct mlx4_ib_dev *dev)
goto demux_err;
err = mlx4_ib_alloc_demux_ctx(dev, &dev->sriov.demux[i], i + 1);
if (err)
- goto demux_err;
+ goto free_pv;
}
mlx4_ib_master_tunnels(dev, 1);
return 0;
+free_pv:
+ free_pv_object(dev, mlx4_master_func_num(dev->dev), i + 1);
demux_err:
- while (i > 0) {
+ while (--i >= 0) {
free_pv_object(dev, mlx4_master_func_num(dev->dev), i + 1);
mlx4_ib_free_demux_ctx(&dev->sriov.demux[i]);
- --i;
}
mlx4_ib_device_unregister_sysfs(dev);
diff --git a/drivers/infiniband/hw/mlx4/main.c b/drivers/infiniband/hw/mlx4/main.c
index e7d81c0d1ac5..23d734349d8e 100644
--- a/drivers/infiniband/hw/mlx4/main.c
+++ b/drivers/infiniband/hw/mlx4/main.c
@@ -137,6 +137,14 @@ static int mlx4_ib_query_device(struct ib_device *ibdev,
props->device_cap_flags |= IB_DEVICE_MEM_MGT_EXTENSIONS;
if (dev->dev->caps.flags & MLX4_DEV_CAP_FLAG_XRC)
props->device_cap_flags |= IB_DEVICE_XRC;
+ if (dev->dev->caps.flags & MLX4_DEV_CAP_FLAG_MEM_WINDOW)
+ props->device_cap_flags |= IB_DEVICE_MEM_WINDOW;
+ if (dev->dev->caps.bmme_flags & MLX4_BMME_FLAG_TYPE_2_WIN) {
+ if (dev->dev->caps.bmme_flags & MLX4_BMME_FLAG_WIN_TYPE_2B)
+ props->device_cap_flags |= IB_DEVICE_MEM_WINDOW_TYPE_2B;
+ else
+ props->device_cap_flags |= IB_DEVICE_MEM_WINDOW_TYPE_2A;
+ }
props->vendor_id = be32_to_cpup((__be32 *) (out_mad->data + 36)) &
0xffffff;
@@ -1434,6 +1442,17 @@ static void *mlx4_ib_add(struct mlx4_dev *dev)
ibdev->ib_dev.dealloc_fmr = mlx4_ib_fmr_dealloc;
}
+ if (dev->caps.flags & MLX4_DEV_CAP_FLAG_MEM_WINDOW ||
+ dev->caps.bmme_flags & MLX4_BMME_FLAG_TYPE_2_WIN) {
+ ibdev->ib_dev.alloc_mw = mlx4_ib_alloc_mw;
+ ibdev->ib_dev.bind_mw = mlx4_ib_bind_mw;
+ ibdev->ib_dev.dealloc_mw = mlx4_ib_dealloc_mw;
+
+ ibdev->ib_dev.uverbs_cmd_mask |=
+ (1ull << IB_USER_VERBS_CMD_ALLOC_MW) |
+ (1ull << IB_USER_VERBS_CMD_DEALLOC_MW);
+ }
+
if (dev->caps.flags & MLX4_DEV_CAP_FLAG_XRC) {
ibdev->ib_dev.alloc_xrcd = mlx4_ib_alloc_xrcd;
ibdev->ib_dev.dealloc_xrcd = mlx4_ib_dealloc_xrcd;
@@ -1601,8 +1620,7 @@ static void do_slave_init(struct mlx4_ib_dev *ibdev, int slave, int do_init)
spin_unlock_irqrestore(&ibdev->sriov.going_down_lock, flags);
}
out:
- if (dm)
- kfree(dm);
+ kfree(dm);
return;
}
diff --git a/drivers/infiniband/hw/mlx4/mlx4_ib.h b/drivers/infiniband/hw/mlx4/mlx4_ib.h
index dcd845bc30f0..f61ec26500c4 100644
--- a/drivers/infiniband/hw/mlx4/mlx4_ib.h
+++ b/drivers/infiniband/hw/mlx4/mlx4_ib.h
@@ -116,6 +116,11 @@ struct mlx4_ib_mr {
struct ib_umem *umem;
};
+struct mlx4_ib_mw {
+ struct ib_mw ibmw;
+ struct mlx4_mw mmw;
+};
+
struct mlx4_ib_fast_reg_page_list {
struct ib_fast_reg_page_list ibfrpl;
__be64 *mapped_page_list;
@@ -533,6 +538,11 @@ static inline struct mlx4_ib_mr *to_mmr(struct ib_mr *ibmr)
return container_of(ibmr, struct mlx4_ib_mr, ibmr);
}
+static inline struct mlx4_ib_mw *to_mmw(struct ib_mw *ibmw)
+{
+ return container_of(ibmw, struct mlx4_ib_mw, ibmw);
+}
+
static inline struct mlx4_ib_fast_reg_page_list *to_mfrpl(struct ib_fast_reg_page_list *ibfrpl)
{
return container_of(ibfrpl, struct mlx4_ib_fast_reg_page_list, ibfrpl);
@@ -581,6 +591,10 @@ struct ib_mr *mlx4_ib_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
u64 virt_addr, int access_flags,
struct ib_udata *udata);
int mlx4_ib_dereg_mr(struct ib_mr *mr);
+struct ib_mw *mlx4_ib_alloc_mw(struct ib_pd *pd, enum ib_mw_type type);
+int mlx4_ib_bind_mw(struct ib_qp *qp, struct ib_mw *mw,
+ struct ib_mw_bind *mw_bind);
+int mlx4_ib_dealloc_mw(struct ib_mw *mw);
struct ib_mr *mlx4_ib_alloc_fast_reg_mr(struct ib_pd *pd,
int max_page_list_len);
struct ib_fast_reg_page_list *mlx4_ib_alloc_fast_reg_page_list(struct ib_device *ibdev,
@@ -652,12 +666,12 @@ int __mlx4_ib_query_gid(struct ib_device *ibdev, u8 port, int index,
int mlx4_ib_resolve_grh(struct mlx4_ib_dev *dev, const struct ib_ah_attr *ah_attr,
u8 *mac, int *is_mcast, u8 port);
-static inline int mlx4_ib_ah_grh_present(struct mlx4_ib_ah *ah)
+static inline bool mlx4_ib_ah_grh_present(struct mlx4_ib_ah *ah)
{
u8 port = be32_to_cpu(ah->av.ib.port_pd) >> 24 & 3;
if (rdma_port_get_link_layer(ah->ibah.device, port) == IB_LINK_LAYER_ETHERNET)
- return 1;
+ return true;
return !!(ah->av.ib.g_slid & 0x80);
}
diff --git a/drivers/infiniband/hw/mlx4/mr.c b/drivers/infiniband/hw/mlx4/mr.c
index bbaf6176f207..e471f089ff00 100644
--- a/drivers/infiniband/hw/mlx4/mr.c
+++ b/drivers/infiniband/hw/mlx4/mr.c
@@ -41,9 +41,19 @@ static u32 convert_access(int acc)
(acc & IB_ACCESS_REMOTE_WRITE ? MLX4_PERM_REMOTE_WRITE : 0) |
(acc & IB_ACCESS_REMOTE_READ ? MLX4_PERM_REMOTE_READ : 0) |
(acc & IB_ACCESS_LOCAL_WRITE ? MLX4_PERM_LOCAL_WRITE : 0) |
+ (acc & IB_ACCESS_MW_BIND ? MLX4_PERM_BIND_MW : 0) |
MLX4_PERM_LOCAL_READ;
}
+static enum mlx4_mw_type to_mlx4_type(enum ib_mw_type type)
+{
+ switch (type) {
+ case IB_MW_TYPE_1: return MLX4_MW_TYPE_1;
+ case IB_MW_TYPE_2: return MLX4_MW_TYPE_2;
+ default: return -1;
+ }
+}
+
struct ib_mr *mlx4_ib_get_dma_mr(struct ib_pd *pd, int acc)
{
struct mlx4_ib_mr *mr;
@@ -68,7 +78,7 @@ struct ib_mr *mlx4_ib_get_dma_mr(struct ib_pd *pd, int acc)
return &mr->ibmr;
err_mr:
- mlx4_mr_free(to_mdev(pd->device)->dev, &mr->mmr);
+ (void) mlx4_mr_free(to_mdev(pd->device)->dev, &mr->mmr);
err_free:
kfree(mr);
@@ -163,7 +173,7 @@ struct ib_mr *mlx4_ib_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
return &mr->ibmr;
err_mr:
- mlx4_mr_free(to_mdev(pd->device)->dev, &mr->mmr);
+ (void) mlx4_mr_free(to_mdev(pd->device)->dev, &mr->mmr);
err_umem:
ib_umem_release(mr->umem);
@@ -177,8 +187,11 @@ err_free:
int mlx4_ib_dereg_mr(struct ib_mr *ibmr)
{
struct mlx4_ib_mr *mr = to_mmr(ibmr);
+ int ret;
- mlx4_mr_free(to_mdev(ibmr->device)->dev, &mr->mmr);
+ ret = mlx4_mr_free(to_mdev(ibmr->device)->dev, &mr->mmr);
+ if (ret)
+ return ret;
if (mr->umem)
ib_umem_release(mr->umem);
kfree(mr);
@@ -186,6 +199,70 @@ int mlx4_ib_dereg_mr(struct ib_mr *ibmr)
return 0;
}
+struct ib_mw *mlx4_ib_alloc_mw(struct ib_pd *pd, enum ib_mw_type type)
+{
+ struct mlx4_ib_dev *dev = to_mdev(pd->device);
+ struct mlx4_ib_mw *mw;
+ int err;
+
+ mw = kmalloc(sizeof(*mw), GFP_KERNEL);
+ if (!mw)
+ return ERR_PTR(-ENOMEM);
+
+ err = mlx4_mw_alloc(dev->dev, to_mpd(pd)->pdn,
+ to_mlx4_type(type), &mw->mmw);
+ if (err)
+ goto err_free;
+
+ err = mlx4_mw_enable(dev->dev, &mw->mmw);
+ if (err)
+ goto err_mw;
+
+ mw->ibmw.rkey = mw->mmw.key;
+
+ return &mw->ibmw;
+
+err_mw:
+ mlx4_mw_free(dev->dev, &mw->mmw);
+
+err_free:
+ kfree(mw);
+
+ return ERR_PTR(err);
+}
+
+int mlx4_ib_bind_mw(struct ib_qp *qp, struct ib_mw *mw,
+ struct ib_mw_bind *mw_bind)
+{
+ struct ib_send_wr wr;
+ struct ib_send_wr *bad_wr;
+ int ret;
+
+ memset(&wr, 0, sizeof(wr));
+ wr.opcode = IB_WR_BIND_MW;
+ wr.wr_id = mw_bind->wr_id;
+ wr.send_flags = mw_bind->send_flags;
+ wr.wr.bind_mw.mw = mw;
+ wr.wr.bind_mw.bind_info = mw_bind->bind_info;
+ wr.wr.bind_mw.rkey = ib_inc_rkey(mw->rkey);
+
+ ret = mlx4_ib_post_send(qp, &wr, &bad_wr);
+ if (!ret)
+ mw->rkey = wr.wr.bind_mw.rkey;
+
+ return ret;
+}
+
+int mlx4_ib_dealloc_mw(struct ib_mw *ibmw)
+{
+ struct mlx4_ib_mw *mw = to_mmw(ibmw);
+
+ mlx4_mw_free(to_mdev(ibmw->device)->dev, &mw->mmw);
+ kfree(mw);
+
+ return 0;
+}
+
struct ib_mr *mlx4_ib_alloc_fast_reg_mr(struct ib_pd *pd,
int max_page_list_len)
{
@@ -212,7 +289,7 @@ struct ib_mr *mlx4_ib_alloc_fast_reg_mr(struct ib_pd *pd,
return &mr->ibmr;
err_mr:
- mlx4_mr_free(dev->dev, &mr->mmr);
+ (void) mlx4_mr_free(dev->dev, &mr->mmr);
err_free:
kfree(mr);
@@ -291,7 +368,7 @@ struct ib_fmr *mlx4_ib_fmr_alloc(struct ib_pd *pd, int acc,
return &fmr->ibfmr;
err_mr:
- mlx4_mr_free(to_mdev(pd->device)->dev, &fmr->mfmr.mr);
+ (void) mlx4_mr_free(to_mdev(pd->device)->dev, &fmr->mfmr.mr);
err_free:
kfree(fmr);
diff --git a/drivers/infiniband/hw/mlx4/qp.c b/drivers/infiniband/hw/mlx4/qp.c
index 19e0637220b9..35cced2a4da8 100644
--- a/drivers/infiniband/hw/mlx4/qp.c
+++ b/drivers/infiniband/hw/mlx4/qp.c
@@ -104,6 +104,7 @@ static const __be32 mlx4_ib_opcode[] = {
[IB_WR_FAST_REG_MR] = cpu_to_be32(MLX4_OPCODE_FMR),
[IB_WR_MASKED_ATOMIC_CMP_AND_SWP] = cpu_to_be32(MLX4_OPCODE_MASKED_ATOMIC_CS),
[IB_WR_MASKED_ATOMIC_FETCH_AND_ADD] = cpu_to_be32(MLX4_OPCODE_MASKED_ATOMIC_FA),
+ [IB_WR_BIND_MW] = cpu_to_be32(MLX4_OPCODE_BIND_MW),
};
static struct mlx4_ib_sqp *to_msqp(struct mlx4_ib_qp *mqp)
@@ -1746,11 +1747,11 @@ static int build_mlx_header(struct mlx4_ib_sqp *sqp, struct ib_send_wr *wr,
int header_size;
int spc;
int i;
- int is_eth;
- int is_vlan = 0;
- int is_grh;
- u16 vlan;
int err = 0;
+ u16 vlan = 0xffff;
+ bool is_eth;
+ bool is_vlan = false;
+ bool is_grh;
send_size = 0;
for (i = 0; i < wr->num_sge; ++i)
@@ -1953,9 +1954,12 @@ static int mlx4_wq_overflow(struct mlx4_ib_wq *wq, int nreq, struct ib_cq *ib_cq
static __be32 convert_access(int acc)
{
- return (acc & IB_ACCESS_REMOTE_ATOMIC ? cpu_to_be32(MLX4_WQE_FMR_PERM_ATOMIC) : 0) |
- (acc & IB_ACCESS_REMOTE_WRITE ? cpu_to_be32(MLX4_WQE_FMR_PERM_REMOTE_WRITE) : 0) |
- (acc & IB_ACCESS_REMOTE_READ ? cpu_to_be32(MLX4_WQE_FMR_PERM_REMOTE_READ) : 0) |
+ return (acc & IB_ACCESS_REMOTE_ATOMIC ?
+ cpu_to_be32(MLX4_WQE_FMR_AND_BIND_PERM_ATOMIC) : 0) |
+ (acc & IB_ACCESS_REMOTE_WRITE ?
+ cpu_to_be32(MLX4_WQE_FMR_AND_BIND_PERM_REMOTE_WRITE) : 0) |
+ (acc & IB_ACCESS_REMOTE_READ ?
+ cpu_to_be32(MLX4_WQE_FMR_AND_BIND_PERM_REMOTE_READ) : 0) |
(acc & IB_ACCESS_LOCAL_WRITE ? cpu_to_be32(MLX4_WQE_FMR_PERM_LOCAL_WRITE) : 0) |
cpu_to_be32(MLX4_WQE_FMR_PERM_LOCAL_READ);
}
@@ -1981,12 +1985,28 @@ static void set_fmr_seg(struct mlx4_wqe_fmr_seg *fseg, struct ib_send_wr *wr)
fseg->reserved[1] = 0;
}
+static void set_bind_seg(struct mlx4_wqe_bind_seg *bseg, struct ib_send_wr *wr)
+{
+ bseg->flags1 =
+ convert_access(wr->wr.bind_mw.bind_info.mw_access_flags) &
+ cpu_to_be32(MLX4_WQE_FMR_AND_BIND_PERM_REMOTE_READ |
+ MLX4_WQE_FMR_AND_BIND_PERM_REMOTE_WRITE |
+ MLX4_WQE_FMR_AND_BIND_PERM_ATOMIC);
+ bseg->flags2 = 0;
+ if (wr->wr.bind_mw.mw->type == IB_MW_TYPE_2)
+ bseg->flags2 |= cpu_to_be32(MLX4_WQE_BIND_TYPE_2);
+ if (wr->wr.bind_mw.bind_info.mw_access_flags & IB_ZERO_BASED)
+ bseg->flags2 |= cpu_to_be32(MLX4_WQE_BIND_ZERO_BASED);
+ bseg->new_rkey = cpu_to_be32(wr->wr.bind_mw.rkey);
+ bseg->lkey = cpu_to_be32(wr->wr.bind_mw.bind_info.mr->lkey);
+ bseg->addr = cpu_to_be64(wr->wr.bind_mw.bind_info.addr);
+ bseg->length = cpu_to_be64(wr->wr.bind_mw.bind_info.length);
+}
+
static void set_local_inv_seg(struct mlx4_wqe_local_inval_seg *iseg, u32 rkey)
{
- iseg->flags = 0;
- iseg->mem_key = cpu_to_be32(rkey);
- iseg->guest_id = 0;
- iseg->pa = 0;
+ memset(iseg, 0, sizeof(*iseg));
+ iseg->mem_key = cpu_to_be32(rkey);
}
static __always_inline void set_raddr_seg(struct mlx4_wqe_raddr_seg *rseg,
@@ -2291,6 +2311,13 @@ int mlx4_ib_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
size += sizeof (struct mlx4_wqe_fmr_seg) / 16;
break;
+ case IB_WR_BIND_MW:
+ ctrl->srcrb_flags |=
+ cpu_to_be32(MLX4_WQE_CTRL_STRONG_ORDER);
+ set_bind_seg(wqe, wr);
+ wqe += sizeof(struct mlx4_wqe_bind_seg);
+ size += sizeof(struct mlx4_wqe_bind_seg) / 16;
+ break;
default:
/* No extra segments required for sends */
break;
diff --git a/drivers/infiniband/hw/mlx4/sysfs.c b/drivers/infiniband/hw/mlx4/sysfs.c
index 5b2a01dfb907..97516eb363b7 100644
--- a/drivers/infiniband/hw/mlx4/sysfs.c
+++ b/drivers/infiniband/hw/mlx4/sysfs.c
@@ -732,7 +732,7 @@ int mlx4_ib_device_register_sysfs(struct mlx4_ib_dev *dev)
dev->ports_parent =
kobject_create_and_add("ports",
kobject_get(dev->iov_parent));
- if (!dev->iov_parent) {
+ if (!dev->ports_parent) {
ret = -ENOMEM;
goto err_ports;
}
diff --git a/drivers/infiniband/hw/nes/nes_verbs.c b/drivers/infiniband/hw/nes/nes_verbs.c
index 07e4fbad987a..8f67fe2e91e6 100644
--- a/drivers/infiniband/hw/nes/nes_verbs.c
+++ b/drivers/infiniband/hw/nes/nes_verbs.c
@@ -55,7 +55,8 @@ static void nes_unregister_ofa_device(struct nes_ib_device *nesibdev);
/**
* nes_alloc_mw
*/
-static struct ib_mw *nes_alloc_mw(struct ib_pd *ibpd) {
+static struct ib_mw *nes_alloc_mw(struct ib_pd *ibpd, enum ib_mw_type type)
+{
struct nes_pd *nespd = to_nespd(ibpd);
struct nes_vnic *nesvnic = to_nesvnic(ibpd->device);
struct nes_device *nesdev = nesvnic->nesdev;
@@ -71,6 +72,9 @@ static struct ib_mw *nes_alloc_mw(struct ib_pd *ibpd) {
u32 driver_key = 0;
u8 stag_key = 0;
+ if (type != IB_MW_TYPE_1)
+ return ERR_PTR(-EINVAL);
+
get_random_bytes(&next_stag_index, sizeof(next_stag_index));
stag_key = (u8)next_stag_index;
@@ -244,20 +248,19 @@ static int nes_bind_mw(struct ib_qp *ibqp, struct ib_mw *ibmw,
if (ibmw_bind->send_flags & IB_SEND_SIGNALED)
wqe_misc |= NES_IWARP_SQ_WQE_SIGNALED_COMPL;
- if (ibmw_bind->mw_access_flags & IB_ACCESS_REMOTE_WRITE) {
+ if (ibmw_bind->bind_info.mw_access_flags & IB_ACCESS_REMOTE_WRITE)
wqe_misc |= NES_CQP_STAG_RIGHTS_REMOTE_WRITE;
- }
- if (ibmw_bind->mw_access_flags & IB_ACCESS_REMOTE_READ) {
+ if (ibmw_bind->bind_info.mw_access_flags & IB_ACCESS_REMOTE_READ)
wqe_misc |= NES_CQP_STAG_RIGHTS_REMOTE_READ;
- }
set_wqe_32bit_value(wqe->wqe_words, NES_IWARP_SQ_WQE_MISC_IDX, wqe_misc);
- set_wqe_32bit_value(wqe->wqe_words, NES_IWARP_SQ_BIND_WQE_MR_IDX, ibmw_bind->mr->lkey);
+ set_wqe_32bit_value(wqe->wqe_words, NES_IWARP_SQ_BIND_WQE_MR_IDX,
+ ibmw_bind->bind_info.mr->lkey);
set_wqe_32bit_value(wqe->wqe_words, NES_IWARP_SQ_BIND_WQE_MW_IDX, ibmw->rkey);
set_wqe_32bit_value(wqe->wqe_words, NES_IWARP_SQ_BIND_WQE_LENGTH_LOW_IDX,
- ibmw_bind->length);
+ ibmw_bind->bind_info.length);
wqe->wqe_words[NES_IWARP_SQ_BIND_WQE_LENGTH_HIGH_IDX] = 0;
- u64temp = (u64)ibmw_bind->addr;
+ u64temp = (u64)ibmw_bind->bind_info.addr;
set_wqe_64bit_value(wqe->wqe_words, NES_IWARP_SQ_BIND_WQE_VA_FBO_LOW_IDX, u64temp);
head++;
diff --git a/drivers/infiniband/hw/ocrdma/ocrdma_main.c b/drivers/infiniband/hw/ocrdma/ocrdma_main.c
index c4e0131f1b57..48928c8e7774 100644
--- a/drivers/infiniband/hw/ocrdma/ocrdma_main.c
+++ b/drivers/infiniband/hw/ocrdma/ocrdma_main.c
@@ -51,18 +51,6 @@ static DEFINE_IDR(ocrdma_dev_id);
static union ib_gid ocrdma_zero_sgid;
-static int ocrdma_get_instance(void)
-{
- int instance = 0;
-
- /* Assign an unused number */
- if (!idr_pre_get(&ocrdma_dev_id, GFP_KERNEL))
- return -1;
- if (idr_get_new(&ocrdma_dev_id, NULL, &instance))
- return -1;
- return instance;
-}
-
void ocrdma_get_guid(struct ocrdma_dev *dev, u8 *guid)
{
u8 mac_addr[6];
@@ -416,7 +404,7 @@ static struct ocrdma_dev *ocrdma_add(struct be_dev_info *dev_info)
goto idr_err;
memcpy(&dev->nic_info, dev_info, sizeof(*dev_info));
- dev->id = ocrdma_get_instance();
+ dev->id = idr_alloc(&ocrdma_dev_id, NULL, 0, 0, GFP_KERNEL);
if (dev->id < 0)
goto idr_err;
diff --git a/drivers/infiniband/hw/qib/qib_file_ops.c b/drivers/infiniband/hw/qib/qib_file_ops.c
index 959a5c4ff812..4f7aa301b3b1 100644
--- a/drivers/infiniband/hw/qib/qib_file_ops.c
+++ b/drivers/infiniband/hw/qib/qib_file_ops.c
@@ -1524,7 +1524,7 @@ static int qib_assign_ctxt(struct file *fp, const struct qib_user_info *uinfo)
}
}
- i_minor = iminor(fp->f_dentry->d_inode) - QIB_USER_MINOR_BASE;
+ i_minor = iminor(file_inode(fp)) - QIB_USER_MINOR_BASE;
if (i_minor)
ret = find_free_ctxt(i_minor - 1, fp, uinfo);
else
diff --git a/drivers/infiniband/hw/qib/qib_fs.c b/drivers/infiniband/hw/qib/qib_fs.c
index 65a2a23f6f8a..644bd6f6467c 100644
--- a/drivers/infiniband/hw/qib/qib_fs.c
+++ b/drivers/infiniband/hw/qib/qib_fs.c
@@ -45,7 +45,7 @@
static struct super_block *qib_super;
-#define private2dd(file) ((file)->f_dentry->d_inode->i_private)
+#define private2dd(file) (file_inode(file)->i_private)
static int qibfs_mknod(struct inode *dir, struct dentry *dentry,
umode_t mode, const struct file_operations *fops,
@@ -171,7 +171,7 @@ static const struct file_operations cntr_ops[] = {
};
/*
- * Could use file->f_dentry->d_inode->i_ino to figure out which file,
+ * Could use file_inode(file)->i_ino to figure out which file,
* instead of separate routine for each, but for now, this works...
*/
diff --git a/drivers/infiniband/hw/qib/qib_init.c b/drivers/infiniband/hw/qib/qib_init.c
index ddf066d9abb6..50e33aa0b4e3 100644
--- a/drivers/infiniband/hw/qib/qib_init.c
+++ b/drivers/infiniband/hw/qib/qib_init.c
@@ -1060,22 +1060,23 @@ struct qib_devdata *qib_alloc_devdata(struct pci_dev *pdev, size_t extra)
struct qib_devdata *dd;
int ret;
- if (!idr_pre_get(&qib_unit_table, GFP_KERNEL)) {
- dd = ERR_PTR(-ENOMEM);
- goto bail;
- }
-
dd = (struct qib_devdata *) ib_alloc_device(sizeof(*dd) + extra);
if (!dd) {
dd = ERR_PTR(-ENOMEM);
goto bail;
}
+ idr_preload(GFP_KERNEL);
spin_lock_irqsave(&qib_devs_lock, flags);
- ret = idr_get_new(&qib_unit_table, dd, &dd->unit);
- if (ret >= 0)
+
+ ret = idr_alloc(&qib_unit_table, dd, 0, 0, GFP_NOWAIT);
+ if (ret >= 0) {
+ dd->unit = ret;
list_add(&dd->list, &qib_dev_list);
+ }
+
spin_unlock_irqrestore(&qib_devs_lock, flags);
+ idr_preload_end();
if (ret < 0) {
qib_early_err(&pdev->dev,
@@ -1180,11 +1181,6 @@ static int __init qlogic_ib_init(void)
* the PCI subsystem.
*/
idr_init(&qib_unit_table);
- if (!idr_pre_get(&qib_unit_table, GFP_KERNEL)) {
- pr_err("idr_pre_get() failed\n");
- ret = -ENOMEM;
- goto bail_cq_wq;
- }
ret = pci_register_driver(&qib_driver);
if (ret < 0) {
@@ -1199,7 +1195,6 @@ static int __init qlogic_ib_init(void)
bail_unit:
idr_destroy(&qib_unit_table);
-bail_cq_wq:
destroy_workqueue(qib_cq_wq);
bail_dev:
qib_dev_cleanup();
diff --git a/drivers/infiniband/hw/qib/qib_qp.c b/drivers/infiniband/hw/qib/qib_qp.c
index 35275099cafd..a6a2cc2ba260 100644
--- a/drivers/infiniband/hw/qib/qib_qp.c
+++ b/drivers/infiniband/hw/qib/qib_qp.c
@@ -268,8 +268,9 @@ static void remove_qp(struct qib_ibdev *dev, struct qib_qp *qp)
qpp = &q->next)
if (q == qp) {
atomic_dec(&qp->refcount);
- *qpp = qp->next;
- rcu_assign_pointer(qp->next, NULL);
+ rcu_assign_pointer(*qpp,
+ rcu_dereference_protected(qp->next,
+ lockdep_is_held(&dev->qpt_lock)));
break;
}
}
diff --git a/drivers/infiniband/ulp/ipoib/ipoib.h b/drivers/infiniband/ulp/ipoib/ipoib.h
index 07ca6fd5546b..eb71aaa26a9a 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib.h
+++ b/drivers/infiniband/ulp/ipoib/ipoib.h
@@ -117,6 +117,8 @@ enum {
#define IPOIB_OP_CM (0)
#endif
+#define IPOIB_QPN_MASK ((__force u32) cpu_to_be32(0xFFFFFF))
+
/* structs */
struct ipoib_header {
@@ -760,4 +762,6 @@ extern int ipoib_debug_level;
#define IPOIB_QPN(ha) (be32_to_cpup((__be32 *) ha) & 0xffffff)
+extern const char ipoib_driver_version[];
+
#endif /* _IPOIB_H */
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_ethtool.c b/drivers/infiniband/ulp/ipoib/ipoib_ethtool.c
index ca131335417b..c4b3940845e6 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib_ethtool.c
+++ b/drivers/infiniband/ulp/ipoib/ipoib_ethtool.c
@@ -39,7 +39,24 @@
static void ipoib_get_drvinfo(struct net_device *netdev,
struct ethtool_drvinfo *drvinfo)
{
- strlcpy(drvinfo->driver, "ipoib", sizeof(drvinfo->driver));
+ struct ipoib_dev_priv *priv = netdev_priv(netdev);
+ struct ib_device_attr *attr;
+
+ attr = kmalloc(sizeof(*attr), GFP_KERNEL);
+ if (attr && !ib_query_device(priv->ca, attr))
+ snprintf(drvinfo->fw_version, sizeof(drvinfo->fw_version),
+ "%d.%d.%d", (int)(attr->fw_ver >> 32),
+ (int)(attr->fw_ver >> 16) & 0xffff,
+ (int)attr->fw_ver & 0xffff);
+ kfree(attr);
+
+ strlcpy(drvinfo->bus_info, dev_name(priv->ca->dma_device),
+ sizeof(drvinfo->bus_info));
+
+ strlcpy(drvinfo->version, ipoib_driver_version,
+ sizeof(drvinfo->version));
+
+ strlcpy(drvinfo->driver, "ib_ipoib", sizeof(drvinfo->driver));
}
static int ipoib_get_coalesce(struct net_device *dev,
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_main.c b/drivers/infiniband/ulp/ipoib/ipoib_main.c
index 6fdc9e78da0d..8534afd04e7c 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib_main.c
+++ b/drivers/infiniband/ulp/ipoib/ipoib_main.c
@@ -49,9 +49,14 @@
#include <linux/jhash.h>
#include <net/arp.h>
+#define DRV_VERSION "1.0.0"
+
+const char ipoib_driver_version[] = DRV_VERSION;
+
MODULE_AUTHOR("Roland Dreier");
MODULE_DESCRIPTION("IP-over-InfiniBand net driver");
MODULE_LICENSE("Dual BSD/GPL");
+MODULE_VERSION(DRV_VERSION);
int ipoib_sendq_size __read_mostly = IPOIB_TX_RING_SIZE;
int ipoib_recvq_size __read_mostly = IPOIB_RX_RING_SIZE;
@@ -505,6 +510,9 @@ static void path_rec_completion(int status,
spin_unlock_irqrestore(&priv->lock, flags);
+ if (IS_ERR_OR_NULL(ah))
+ ipoib_del_neighs_by_gid(dev, path->pathrec.dgid.raw);
+
if (old_ah)
ipoib_put_ah(old_ah);
@@ -844,10 +852,10 @@ static u32 ipoib_addr_hash(struct ipoib_neigh_hash *htbl, u8 *daddr)
* different subnets.
*/
/* qpn octets[1:4) & port GUID octets[12:20) */
- u32 *daddr_32 = (u32 *) daddr;
+ u32 *d32 = (u32 *) daddr;
u32 hv;
- hv = jhash_3words(daddr_32[3], daddr_32[4], 0xFFFFFF & daddr_32[0], 0);
+ hv = jhash_3words(d32[3], d32[4], IPOIB_QPN_MASK & d32[0], 0);
return hv & htbl->mask;
}
@@ -1688,6 +1696,8 @@ static void ipoib_remove_one(struct ib_device *device)
return;
dev_list = ib_get_client_data(device, &ipoib_client);
+ if (!dev_list)
+ return;
list_for_each_entry_safe(priv, tmp, dev_list, list) {
ib_unregister_event_handler(&priv->event_handler);
diff --git a/drivers/infiniband/ulp/iser/iscsi_iser.h b/drivers/infiniband/ulp/iser/iscsi_iser.h
index ef7d3be46c31..5babdb35bda7 100644
--- a/drivers/infiniband/ulp/iser/iscsi_iser.h
+++ b/drivers/infiniband/ulp/iser/iscsi_iser.h
@@ -94,7 +94,7 @@
/* support up to 512KB in one RDMA */
#define ISCSI_ISER_SG_TABLESIZE (0x80000 >> SHIFT_4K)
-#define ISER_DEF_CMD_PER_LUN 128
+#define ISER_DEF_CMD_PER_LUN ISCSI_DEF_XMIT_CMDS_MAX
/* QP settings */
/* Maximal bounds on received asynchronous PDUs */
diff --git a/drivers/infiniband/ulp/iser/iser_memory.c b/drivers/infiniband/ulp/iser/iser_memory.c
index 2033a928d34d..be1edb04b085 100644
--- a/drivers/infiniband/ulp/iser/iser_memory.c
+++ b/drivers/infiniband/ulp/iser/iser_memory.c
@@ -369,10 +369,11 @@ int iser_reg_rdma_mem(struct iscsi_iser_task *iser_task,
regd_buf = &iser_task->rdma_regd[cmd_dir];
aligned_len = iser_data_buf_aligned_len(mem, ibdev);
- if (aligned_len != mem->dma_nents) {
+ if (aligned_len != mem->dma_nents ||
+ (!ib_conn->fmr_pool && mem->dma_nents > 1)) {
iscsi_conn->fmr_unalign_cnt++;
- iser_warn("rdma alignment violation %d/%d aligned\n",
- aligned_len, mem->size);
+ iser_warn("rdma alignment violation (%d/%d aligned) or FMR not supported\n",
+ aligned_len, mem->size);
iser_data_buf_dump(mem, ibdev);
/* unmap the command data before accessing it */
@@ -404,7 +405,7 @@ int iser_reg_rdma_mem(struct iscsi_iser_task *iser_task,
} else { /* use FMR for multiple dma entries */
iser_page_vec_build(mem, ib_conn->page_vec, ibdev);
err = iser_reg_page_vec(ib_conn, ib_conn->page_vec, &regd_buf->reg);
- if (err) {
+ if (err && err != -EAGAIN) {
iser_data_buf_dump(mem, ibdev);
iser_err("mem->dma_nents = %d (dlength = 0x%x)\n",
mem->dma_nents,
diff --git a/drivers/infiniband/ulp/iser/iser_verbs.c b/drivers/infiniband/ulp/iser/iser_verbs.c
index 95a49affee44..4debadc53106 100644
--- a/drivers/infiniband/ulp/iser/iser_verbs.c
+++ b/drivers/infiniband/ulp/iser/iser_verbs.c
@@ -242,10 +242,14 @@ static int iser_create_ib_conn_res(struct iser_conn *ib_conn)
IB_ACCESS_REMOTE_READ);
ib_conn->fmr_pool = ib_create_fmr_pool(device->pd, &params);
- if (IS_ERR(ib_conn->fmr_pool)) {
- ret = PTR_ERR(ib_conn->fmr_pool);
+ ret = PTR_ERR(ib_conn->fmr_pool);
+ if (IS_ERR(ib_conn->fmr_pool) && ret != -ENOSYS) {
ib_conn->fmr_pool = NULL;
goto out_err;
+ } else if (ret == -ENOSYS) {
+ ib_conn->fmr_pool = NULL;
+ iser_warn("FMRs are not supported, using unaligned mode\n");
+ ret = 0;
}
memset(&init_attr, 0, sizeof init_attr);
diff --git a/drivers/infiniband/ulp/srp/ib_srp.c b/drivers/infiniband/ulp/srp/ib_srp.c
index d5088ce78290..7ccf3284dda3 100644
--- a/drivers/infiniband/ulp/srp/ib_srp.c
+++ b/drivers/infiniband/ulp/srp/ib_srp.c
@@ -700,23 +700,24 @@ static int srp_reconnect_target(struct srp_target_port *target)
struct Scsi_Host *shost = target->scsi_host;
int i, ret;
- if (target->state != SRP_TARGET_LIVE)
- return -EAGAIN;
-
scsi_target_block(&shost->shost_gendev);
srp_disconnect_target(target);
/*
- * Now get a new local CM ID so that we avoid confusing the
- * target in case things are really fouled up.
+ * Now get a new local CM ID so that we avoid confusing the target in
+ * case things are really fouled up. Doing so also ensures that all CM
+ * callbacks will have finished before a new QP is allocated.
*/
ret = srp_new_cm_id(target);
- if (ret)
- goto unblock;
-
- ret = srp_create_target_ib(target);
- if (ret)
- goto unblock;
+ /*
+ * Whether or not creating a new CM ID succeeded, create a new
+ * QP. This guarantees that all completion callback function
+ * invocations have finished before request resetting starts.
+ */
+ if (ret == 0)
+ ret = srp_create_target_ib(target);
+ else
+ srp_create_target_ib(target);
for (i = 0; i < SRP_CMD_SQ_SIZE; ++i) {
struct srp_request *req = &target->req_ring[i];
@@ -728,11 +729,12 @@ static int srp_reconnect_target(struct srp_target_port *target)
for (i = 0; i < SRP_SQ_SIZE; ++i)
list_add(&target->tx_ring[i]->list, &target->free_tx);
- ret = srp_connect_target(target);
+ if (ret == 0)
+ ret = srp_connect_target(target);
-unblock:
scsi_target_unblock(&shost->shost_gendev, ret == 0 ? SDEV_RUNNING :
SDEV_TRANSPORT_OFFLINE);
+ target->transport_offline = !!ret;
if (ret)
goto err;
@@ -1352,6 +1354,12 @@ static int srp_queuecommand(struct Scsi_Host *shost, struct scsi_cmnd *scmnd)
unsigned long flags;
int len;
+ if (unlikely(target->transport_offline)) {
+ scmnd->result = DID_NO_CONNECT << 16;
+ scmnd->scsi_done(scmnd);
+ return 0;
+ }
+
spin_lock_irqsave(&target->lock, flags);
iu = __srp_get_tx_iu(target, SRP_IU_CMD);
if (!iu)
@@ -1695,6 +1703,9 @@ static int srp_send_tsk_mgmt(struct srp_target_port *target,
struct srp_iu *iu;
struct srp_tsk_mgmt *tsk_mgmt;
+ if (!target->connected || target->qp_in_error)
+ return -1;
+
init_completion(&target->tsk_mgmt_done);
spin_lock_irq(&target->lock);
@@ -1736,7 +1747,7 @@ static int srp_abort(struct scsi_cmnd *scmnd)
shost_printk(KERN_ERR, target->scsi_host, "SRP abort called\n");
- if (!req || target->qp_in_error || !srp_claim_req(target, req, scmnd))
+ if (!req || !srp_claim_req(target, req, scmnd))
return FAILED;
srp_send_tsk_mgmt(target, req->index, scmnd->device->lun,
SRP_TSK_ABORT_TASK);
@@ -1754,8 +1765,6 @@ static int srp_reset_device(struct scsi_cmnd *scmnd)
shost_printk(KERN_ERR, target->scsi_host, "SRP reset_device called\n");
- if (target->qp_in_error)
- return FAILED;
if (srp_send_tsk_mgmt(target, SRP_TAG_NO_REQ, scmnd->device->lun,
SRP_TSK_LUN_RESET))
return FAILED;
@@ -1972,7 +1981,6 @@ static int srp_add_target(struct srp_host *host, struct srp_target_port *target)
spin_unlock(&host->target_lock);
target->state = SRP_TARGET_LIVE;
- target->connected = false;
scsi_scan_target(&target->scsi_host->shost_gendev,
0, target->scsi_id, SCAN_WILD_CARD, 0);
diff --git a/drivers/infiniband/ulp/srp/ib_srp.h b/drivers/infiniband/ulp/srp/ib_srp.h
index de2d0b3c0bfe..66fbedda4571 100644
--- a/drivers/infiniband/ulp/srp/ib_srp.h
+++ b/drivers/infiniband/ulp/srp/ib_srp.h
@@ -140,6 +140,7 @@ struct srp_target_port {
unsigned int cmd_sg_cnt;
unsigned int indirect_size;
bool allow_ext_sg;
+ bool transport_offline;
/* Everything above this point is used in the hot path of
* command processing. Try to keep them packed into cachelines.
diff --git a/drivers/input/keyboard/tegra-kbc.c b/drivers/input/keyboard/tegra-kbc.c
index d89e7d392d1e..0e138ebcc768 100644
--- a/drivers/input/keyboard/tegra-kbc.c
+++ b/drivers/input/keyboard/tegra-kbc.c
@@ -30,7 +30,7 @@
#include <linux/clk.h>
#include <linux/slab.h>
#include <linux/input/matrix_keypad.h>
-#include <mach/clk.h>
+#include <linux/clk/tegra.h>
#define KBC_MAX_GPIO 24
#define KBC_MAX_KPENT 8
diff --git a/drivers/input/misc/max8925_onkey.c b/drivers/input/misc/max8925_onkey.c
index 369a39de4ff3..f9179b2585a9 100644
--- a/drivers/input/misc/max8925_onkey.c
+++ b/drivers/input/misc/max8925_onkey.c
@@ -100,9 +100,6 @@ static int max8925_onkey_probe(struct platform_device *pdev)
input->dev.parent = &pdev->dev;
input_set_capability(input, EV_KEY, KEY_POWER);
- irq[0] += chip->irq_base;
- irq[1] += chip->irq_base;
-
error = request_threaded_irq(irq[0], NULL, max8925_onkey_handler,
IRQF_ONESHOT, "onkey-down", info);
if (error < 0) {
diff --git a/drivers/input/serio/Kconfig b/drivers/input/serio/Kconfig
index 560c243bfcaf..6e9cc765e0dc 100644
--- a/drivers/input/serio/Kconfig
+++ b/drivers/input/serio/Kconfig
@@ -36,6 +36,7 @@ config SERIO_I8042
config SERIO_SERPORT
tristate "Serial port line discipline"
default y
+ depends on TTY
help
Say Y here if you plan to use an input device (mouse, joystick,
tablet, 6dof) that communicates over the RS232 serial (COM) port.
diff --git a/drivers/iommu/Kconfig b/drivers/iommu/Kconfig
index 01068987809d..5c514d0711d1 100644
--- a/drivers/iommu/Kconfig
+++ b/drivers/iommu/Kconfig
@@ -158,7 +158,7 @@ config TEGRA_IOMMU_GART
config TEGRA_IOMMU_SMMU
bool "Tegra SMMU IOMMU Support"
- depends on ARCH_TEGRA_3x_SOC && TEGRA_AHB
+ depends on ARCH_TEGRA && TEGRA_AHB
select IOMMU_API
help
Enables support for remapping discontiguous physical memory
@@ -187,4 +187,78 @@ config EXYNOS_IOMMU_DEBUG
Say N unless you need kernel log message for IOMMU debugging
+config SHMOBILE_IPMMU
+ bool
+
+config SHMOBILE_IPMMU_TLB
+ bool
+
+config SHMOBILE_IOMMU
+ bool "IOMMU for Renesas IPMMU/IPMMUI"
+ default n
+ depends on (ARM && ARCH_SHMOBILE)
+ select IOMMU_API
+ select ARM_DMA_USE_IOMMU
+ select SHMOBILE_IPMMU
+ select SHMOBILE_IPMMU_TLB
+ help
+ Support for Renesas IPMMU/IPMMUI. This option enables
+ remapping of DMA memory accesses from all of the IP blocks
+ on the ICB.
+
+ Warning: Drivers (including userspace drivers of UIO
+ devices) of the IP blocks on the ICB *must* use addresses
+ allocated from the IPMMU (iova) for DMA with this option
+ enabled.
+
+ If unsure, say N.
+
+choice
+ prompt "IPMMU/IPMMUI address space size"
+ default SHMOBILE_IOMMU_ADDRSIZE_2048MB
+ depends on SHMOBILE_IOMMU
+ help
+ This option sets IPMMU/IPMMUI address space size by
+ adjusting the 1st level page table size. The page table size
+ is calculated as follows:
+
+ page table size = number of page table entries * 4 bytes
+ number of page table entries = address space size / 1 MiB
+
+ For example, when the address space size is 2048 MiB, the
+ 1st level page table size is 8192 bytes.
+
+ config SHMOBILE_IOMMU_ADDRSIZE_2048MB
+ bool "2 GiB"
+
+ config SHMOBILE_IOMMU_ADDRSIZE_1024MB
+ bool "1 GiB"
+
+ config SHMOBILE_IOMMU_ADDRSIZE_512MB
+ bool "512 MiB"
+
+ config SHMOBILE_IOMMU_ADDRSIZE_256MB
+ bool "256 MiB"
+
+ config SHMOBILE_IOMMU_ADDRSIZE_128MB
+ bool "128 MiB"
+
+ config SHMOBILE_IOMMU_ADDRSIZE_64MB
+ bool "64 MiB"
+
+ config SHMOBILE_IOMMU_ADDRSIZE_32MB
+ bool "32 MiB"
+
+endchoice
+
+config SHMOBILE_IOMMU_L1SIZE
+ int
+ default 8192 if SHMOBILE_IOMMU_ADDRSIZE_2048MB
+ default 4096 if SHMOBILE_IOMMU_ADDRSIZE_1024MB
+ default 2048 if SHMOBILE_IOMMU_ADDRSIZE_512MB
+ default 1024 if SHMOBILE_IOMMU_ADDRSIZE_256MB
+ default 512 if SHMOBILE_IOMMU_ADDRSIZE_128MB
+ default 256 if SHMOBILE_IOMMU_ADDRSIZE_64MB
+ default 128 if SHMOBILE_IOMMU_ADDRSIZE_32MB
+
endif # IOMMU_SUPPORT
diff --git a/drivers/iommu/Makefile b/drivers/iommu/Makefile
index f66b816d455c..ef0e5207ad69 100644
--- a/drivers/iommu/Makefile
+++ b/drivers/iommu/Makefile
@@ -13,3 +13,5 @@ obj-$(CONFIG_OMAP_IOMMU_DEBUG) += omap-iommu-debug.o
obj-$(CONFIG_TEGRA_IOMMU_GART) += tegra-gart.o
obj-$(CONFIG_TEGRA_IOMMU_SMMU) += tegra-smmu.o
obj-$(CONFIG_EXYNOS_IOMMU) += exynos-iommu.o
+obj-$(CONFIG_SHMOBILE_IOMMU) += shmobile-iommu.o
+obj-$(CONFIG_SHMOBILE_IPMMU) += shmobile-ipmmu.o
diff --git a/drivers/iommu/amd_iommu.c b/drivers/iommu/amd_iommu.c
index d33eaaf783ad..98f555dafb55 100644
--- a/drivers/iommu/amd_iommu.c
+++ b/drivers/iommu/amd_iommu.c
@@ -3187,8 +3187,7 @@ int __init amd_iommu_init_dma_ops(void)
free_domains:
for_each_iommu(iommu) {
- if (iommu->default_dom)
- dma_ops_domain_free(iommu->default_dom);
+ dma_ops_domain_free(iommu->default_dom);
}
return ret;
diff --git a/drivers/iommu/amd_iommu_init.c b/drivers/iommu/amd_iommu_init.c
index faf10ba1ed9a..b6ecddb63cd0 100644
--- a/drivers/iommu/amd_iommu_init.c
+++ b/drivers/iommu/amd_iommu_init.c
@@ -1876,11 +1876,6 @@ static int amd_iommu_init_dma(void)
struct amd_iommu *iommu;
int ret;
- init_device_table_dma();
-
- for_each_iommu(iommu)
- iommu_flush_all_caches(iommu);
-
if (iommu_pass_through)
ret = amd_iommu_init_passthrough();
else
@@ -1889,6 +1884,11 @@ static int amd_iommu_init_dma(void)
if (ret)
return ret;
+ init_device_table_dma();
+
+ for_each_iommu(iommu)
+ iommu_flush_all_caches(iommu);
+
amd_iommu_init_api();
amd_iommu_init_notifier();
diff --git a/drivers/iommu/dmar.c b/drivers/iommu/dmar.c
index 174bb654453d..dc7e478b7e5f 100644
--- a/drivers/iommu/dmar.c
+++ b/drivers/iommu/dmar.c
@@ -1042,7 +1042,7 @@ int dmar_enable_qi(struct intel_iommu *iommu)
qi->desc = page_address(desc_page);
- qi->desc_status = kmalloc(QI_LENGTH * sizeof(int), GFP_ATOMIC);
+ qi->desc_status = kzalloc(QI_LENGTH * sizeof(int), GFP_ATOMIC);
if (!qi->desc_status) {
free_page((unsigned long) qi->desc);
kfree(qi);
diff --git a/drivers/iommu/exynos-iommu.c b/drivers/iommu/exynos-iommu.c
index 7fe44f83cc37..238a3caa949a 100644
--- a/drivers/iommu/exynos-iommu.c
+++ b/drivers/iommu/exynos-iommu.c
@@ -511,7 +511,7 @@ int exynos_sysmmu_enable(struct device *dev, unsigned long pgtable)
return ret;
}
-bool exynos_sysmmu_disable(struct device *dev)
+static bool exynos_sysmmu_disable(struct device *dev)
{
struct sysmmu_drvdata *data = dev_get_drvdata(dev->archdata.iommu);
bool disabled;
diff --git a/drivers/iommu/intel-iommu.c b/drivers/iommu/intel-iommu.c
index 43d5c8b8e7ad..0099667a397e 100644
--- a/drivers/iommu/intel-iommu.c
+++ b/drivers/iommu/intel-iommu.c
@@ -4255,13 +4255,19 @@ static void quirk_iommu_rwbf(struct pci_dev *dev)
{
/*
* Mobile 4 Series Chipset neglects to set RWBF capability,
- * but needs it:
+ * but needs it. Same seems to hold for the desktop versions.
*/
printk(KERN_INFO "DMAR: Forcing write-buffer flush capability\n");
rwbf_quirk = 1;
}
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2a40, quirk_iommu_rwbf);
+DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e00, quirk_iommu_rwbf);
+DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e10, quirk_iommu_rwbf);
+DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e20, quirk_iommu_rwbf);
+DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e30, quirk_iommu_rwbf);
+DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e40, quirk_iommu_rwbf);
+DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e90, quirk_iommu_rwbf);
#define GGC 0x52
#define GGC_MEMORY_SIZE_MASK (0xf << 8)
diff --git a/drivers/iommu/iommu.c b/drivers/iommu/iommu.c
index ddbdacad7768..b972d430d92b 100644
--- a/drivers/iommu/iommu.c
+++ b/drivers/iommu/iommu.c
@@ -734,7 +734,8 @@ int iommu_map(struct iommu_domain *domain, unsigned long iova,
size_t orig_size = size;
int ret = 0;
- if (unlikely(domain->ops->map == NULL))
+ if (unlikely(domain->ops->unmap == NULL ||
+ domain->ops->pgsize_bitmap == 0UL))
return -ENODEV;
/* find out the minimum page size supported */
@@ -808,7 +809,8 @@ size_t iommu_unmap(struct iommu_domain *domain, unsigned long iova, size_t size)
size_t unmapped_page, unmapped = 0;
unsigned int min_pagesz;
- if (unlikely(domain->ops->unmap == NULL))
+ if (unlikely(domain->ops->unmap == NULL ||
+ domain->ops->pgsize_bitmap == 0UL))
return -ENODEV;
/* find out the minimum page size supported */
@@ -850,6 +852,26 @@ size_t iommu_unmap(struct iommu_domain *domain, unsigned long iova, size_t size)
}
EXPORT_SYMBOL_GPL(iommu_unmap);
+
+int iommu_domain_window_enable(struct iommu_domain *domain, u32 wnd_nr,
+ phys_addr_t paddr, u64 size)
+{
+ if (unlikely(domain->ops->domain_window_enable == NULL))
+ return -ENODEV;
+
+ return domain->ops->domain_window_enable(domain, wnd_nr, paddr, size);
+}
+EXPORT_SYMBOL_GPL(iommu_domain_window_enable);
+
+void iommu_domain_window_disable(struct iommu_domain *domain, u32 wnd_nr)
+{
+ if (unlikely(domain->ops->domain_window_disable == NULL))
+ return;
+
+ return domain->ops->domain_window_disable(domain, wnd_nr);
+}
+EXPORT_SYMBOL_GPL(iommu_domain_window_disable);
+
static int __init iommu_init(void)
{
iommu_group_kset = kset_create_and_add("iommu_groups",
@@ -861,13 +883,15 @@ static int __init iommu_init(void)
return 0;
}
-subsys_initcall(iommu_init);
+arch_initcall(iommu_init);
int iommu_domain_get_attr(struct iommu_domain *domain,
enum iommu_attr attr, void *data)
{
struct iommu_domain_geometry *geometry;
+ bool *paging;
int ret = 0;
+ u32 *count;
switch (attr) {
case DOMAIN_ATTR_GEOMETRY:
@@ -875,6 +899,19 @@ int iommu_domain_get_attr(struct iommu_domain *domain,
*geometry = domain->geometry;
break;
+ case DOMAIN_ATTR_PAGING:
+ paging = data;
+ *paging = (domain->ops->pgsize_bitmap != 0UL);
+ break;
+ case DOMAIN_ATTR_WINDOWS:
+ count = data;
+
+ if (domain->ops->domain_get_windows != NULL)
+ *count = domain->ops->domain_get_windows(domain);
+ else
+ ret = -ENODEV;
+
+ break;
default:
if (!domain->ops->domain_get_attr)
return -EINVAL;
@@ -889,9 +926,26 @@ EXPORT_SYMBOL_GPL(iommu_domain_get_attr);
int iommu_domain_set_attr(struct iommu_domain *domain,
enum iommu_attr attr, void *data)
{
- if (!domain->ops->domain_set_attr)
- return -EINVAL;
+ int ret = 0;
+ u32 *count;
- return domain->ops->domain_set_attr(domain, attr, data);
+ switch (attr) {
+ case DOMAIN_ATTR_WINDOWS:
+ count = data;
+
+ if (domain->ops->domain_set_windows != NULL)
+ ret = domain->ops->domain_set_windows(domain, *count);
+ else
+ ret = -ENODEV;
+
+ break;
+ default:
+ if (domain->ops->domain_set_attr == NULL)
+ return -EINVAL;
+
+ ret = domain->ops->domain_set_attr(domain, attr, data);
+ }
+
+ return ret;
}
EXPORT_SYMBOL_GPL(iommu_domain_set_attr);
diff --git a/drivers/iommu/omap-iommu.c b/drivers/iommu/omap-iommu.c
index d33c980e9c20..6ac02fa5910f 100644
--- a/drivers/iommu/omap-iommu.c
+++ b/drivers/iommu/omap-iommu.c
@@ -146,7 +146,7 @@ static int iommu_enable(struct omap_iommu *obj)
struct platform_device *pdev = to_platform_device(obj->dev);
struct iommu_platform_data *pdata = pdev->dev.platform_data;
- if (!obj || !pdata)
+ if (!pdata)
return -EINVAL;
if (!arch_iommu)
@@ -172,7 +172,7 @@ static void iommu_disable(struct omap_iommu *obj)
struct platform_device *pdev = to_platform_device(obj->dev);
struct iommu_platform_data *pdata = pdev->dev.platform_data;
- if (!obj || !pdata)
+ if (!pdata)
return;
arch_iommu->disable(obj);
diff --git a/drivers/iommu/shmobile-iommu.c b/drivers/iommu/shmobile-iommu.c
new file mode 100644
index 000000000000..b6e8b57cf0a8
--- /dev/null
+++ b/drivers/iommu/shmobile-iommu.c
@@ -0,0 +1,395 @@
+/*
+ * IOMMU for IPMMU/IPMMUI
+ * Copyright (C) 2012 Hideki EIRAKU
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 of the License.
+ */
+
+#include <linux/dma-mapping.h>
+#include <linux/io.h>
+#include <linux/iommu.h>
+#include <linux/platform_device.h>
+#include <linux/sizes.h>
+#include <linux/slab.h>
+#include <asm/dma-iommu.h>
+#include "shmobile-ipmmu.h"
+
+#define L1_SIZE CONFIG_SHMOBILE_IOMMU_L1SIZE
+#define L1_LEN (L1_SIZE / 4)
+#define L1_ALIGN L1_SIZE
+#define L2_SIZE SZ_1K
+#define L2_LEN (L2_SIZE / 4)
+#define L2_ALIGN L2_SIZE
+
+struct shmobile_iommu_domain_pgtable {
+ uint32_t *pgtable;
+ dma_addr_t handle;
+};
+
+struct shmobile_iommu_archdata {
+ struct list_head attached_list;
+ struct dma_iommu_mapping *iommu_mapping;
+ spinlock_t attach_lock;
+ struct shmobile_iommu_domain *attached;
+ int num_attached_devices;
+ struct shmobile_ipmmu *ipmmu;
+};
+
+struct shmobile_iommu_domain {
+ struct shmobile_iommu_domain_pgtable l1, l2[L1_LEN];
+ spinlock_t map_lock;
+ spinlock_t attached_list_lock;
+ struct list_head attached_list;
+};
+
+static struct shmobile_iommu_archdata *ipmmu_archdata;
+static struct kmem_cache *l1cache, *l2cache;
+
+static int pgtable_alloc(struct shmobile_iommu_domain_pgtable *pgtable,
+ struct kmem_cache *cache, size_t size)
+{
+ pgtable->pgtable = kmem_cache_zalloc(cache, GFP_ATOMIC);
+ if (!pgtable->pgtable)
+ return -ENOMEM;
+ pgtable->handle = dma_map_single(NULL, pgtable->pgtable, size,
+ DMA_TO_DEVICE);
+ return 0;
+}
+
+static void pgtable_free(struct shmobile_iommu_domain_pgtable *pgtable,
+ struct kmem_cache *cache, size_t size)
+{
+ dma_unmap_single(NULL, pgtable->handle, size, DMA_TO_DEVICE);
+ kmem_cache_free(cache, pgtable->pgtable);
+}
+
+static uint32_t pgtable_read(struct shmobile_iommu_domain_pgtable *pgtable,
+ unsigned int index)
+{
+ return pgtable->pgtable[index];
+}
+
+static void pgtable_write(struct shmobile_iommu_domain_pgtable *pgtable,
+ unsigned int index, unsigned int count, uint32_t val)
+{
+ unsigned int i;
+
+ for (i = 0; i < count; i++)
+ pgtable->pgtable[index + i] = val;
+ dma_sync_single_for_device(NULL, pgtable->handle + index * sizeof(val),
+ sizeof(val) * count, DMA_TO_DEVICE);
+}
+
+static int shmobile_iommu_domain_init(struct iommu_domain *domain)
+{
+ struct shmobile_iommu_domain *sh_domain;
+ int i, ret;
+
+ sh_domain = kmalloc(sizeof(*sh_domain), GFP_KERNEL);
+ if (!sh_domain)
+ return -ENOMEM;
+ ret = pgtable_alloc(&sh_domain->l1, l1cache, L1_SIZE);
+ if (ret < 0) {
+ kfree(sh_domain);
+ return ret;
+ }
+ for (i = 0; i < L1_LEN; i++)
+ sh_domain->l2[i].pgtable = NULL;
+ spin_lock_init(&sh_domain->map_lock);
+ spin_lock_init(&sh_domain->attached_list_lock);
+ INIT_LIST_HEAD(&sh_domain->attached_list);
+ domain->priv = sh_domain;
+ return 0;
+}
+
+static void shmobile_iommu_domain_destroy(struct iommu_domain *domain)
+{
+ struct shmobile_iommu_domain *sh_domain = domain->priv;
+ int i;
+
+ for (i = 0; i < L1_LEN; i++) {
+ if (sh_domain->l2[i].pgtable)
+ pgtable_free(&sh_domain->l2[i], l2cache, L2_SIZE);
+ }
+ pgtable_free(&sh_domain->l1, l1cache, L1_SIZE);
+ kfree(sh_domain);
+ domain->priv = NULL;
+}
+
+static int shmobile_iommu_attach_device(struct iommu_domain *domain,
+ struct device *dev)
+{
+ struct shmobile_iommu_archdata *archdata = dev->archdata.iommu;
+ struct shmobile_iommu_domain *sh_domain = domain->priv;
+ int ret = -EBUSY;
+
+ if (!archdata)
+ return -ENODEV;
+ spin_lock(&sh_domain->attached_list_lock);
+ spin_lock(&archdata->attach_lock);
+ if (archdata->attached != sh_domain) {
+ if (archdata->attached)
+ goto err;
+ ipmmu_tlb_set(archdata->ipmmu, sh_domain->l1.handle, L1_SIZE,
+ 0);
+ ipmmu_tlb_flush(archdata->ipmmu);
+ archdata->attached = sh_domain;
+ archdata->num_attached_devices = 0;
+ list_add(&archdata->attached_list, &sh_domain->attached_list);
+ }
+ archdata->num_attached_devices++;
+ ret = 0;
+err:
+ spin_unlock(&archdata->attach_lock);
+ spin_unlock(&sh_domain->attached_list_lock);
+ return ret;
+}
+
+static void shmobile_iommu_detach_device(struct iommu_domain *domain,
+ struct device *dev)
+{
+ struct shmobile_iommu_archdata *archdata = dev->archdata.iommu;
+ struct shmobile_iommu_domain *sh_domain = domain->priv;
+
+ if (!archdata)
+ return;
+ spin_lock(&sh_domain->attached_list_lock);
+ spin_lock(&archdata->attach_lock);
+ archdata->num_attached_devices--;
+ if (!archdata->num_attached_devices) {
+ ipmmu_tlb_set(archdata->ipmmu, 0, 0, 0);
+ ipmmu_tlb_flush(archdata->ipmmu);
+ archdata->attached = NULL;
+ list_del(&archdata->attached_list);
+ }
+ spin_unlock(&archdata->attach_lock);
+ spin_unlock(&sh_domain->attached_list_lock);
+}
+
+static void domain_tlb_flush(struct shmobile_iommu_domain *sh_domain)
+{
+ struct shmobile_iommu_archdata *archdata;
+
+ spin_lock(&sh_domain->attached_list_lock);
+ list_for_each_entry(archdata, &sh_domain->attached_list, attached_list)
+ ipmmu_tlb_flush(archdata->ipmmu);
+ spin_unlock(&sh_domain->attached_list_lock);
+}
+
+static int l2alloc(struct shmobile_iommu_domain *sh_domain,
+ unsigned int l1index)
+{
+ int ret;
+
+ if (!sh_domain->l2[l1index].pgtable) {
+ ret = pgtable_alloc(&sh_domain->l2[l1index], l2cache, L2_SIZE);
+ if (ret < 0)
+ return ret;
+ }
+ pgtable_write(&sh_domain->l1, l1index, 1,
+ sh_domain->l2[l1index].handle | 0x1);
+ return 0;
+}
+
+static void l2realfree(struct shmobile_iommu_domain_pgtable *l2)
+{
+ if (l2->pgtable)
+ pgtable_free(l2, l2cache, L2_SIZE);
+}
+
+static void l2free(struct shmobile_iommu_domain *sh_domain,
+ unsigned int l1index,
+ struct shmobile_iommu_domain_pgtable *l2)
+{
+ pgtable_write(&sh_domain->l1, l1index, 1, 0);
+ if (sh_domain->l2[l1index].pgtable) {
+ *l2 = sh_domain->l2[l1index];
+ sh_domain->l2[l1index].pgtable = NULL;
+ }
+}
+
+static int shmobile_iommu_map(struct iommu_domain *domain, unsigned long iova,
+ phys_addr_t paddr, size_t size, int prot)
+{
+ struct shmobile_iommu_domain_pgtable l2 = { .pgtable = NULL };
+ struct shmobile_iommu_domain *sh_domain = domain->priv;
+ unsigned int l1index, l2index;
+ int ret;
+
+ l1index = iova >> 20;
+ switch (size) {
+ case SZ_4K:
+ l2index = (iova >> 12) & 0xff;
+ spin_lock(&sh_domain->map_lock);
+ ret = l2alloc(sh_domain, l1index);
+ if (!ret)
+ pgtable_write(&sh_domain->l2[l1index], l2index, 1,
+ paddr | 0xff2);
+ spin_unlock(&sh_domain->map_lock);
+ break;
+ case SZ_64K:
+ l2index = (iova >> 12) & 0xf0;
+ spin_lock(&sh_domain->map_lock);
+ ret = l2alloc(sh_domain, l1index);
+ if (!ret)
+ pgtable_write(&sh_domain->l2[l1index], l2index, 0x10,
+ paddr | 0xff1);
+ spin_unlock(&sh_domain->map_lock);
+ break;
+ case SZ_1M:
+ spin_lock(&sh_domain->map_lock);
+ l2free(sh_domain, l1index, &l2);
+ pgtable_write(&sh_domain->l1, l1index, 1, paddr | 0xc02);
+ spin_unlock(&sh_domain->map_lock);
+ ret = 0;
+ break;
+ default:
+ ret = -EINVAL;
+ }
+ if (!ret)
+ domain_tlb_flush(sh_domain);
+ l2realfree(&l2);
+ return ret;
+}
+
+static size_t shmobile_iommu_unmap(struct iommu_domain *domain,
+ unsigned long iova, size_t size)
+{
+ struct shmobile_iommu_domain_pgtable l2 = { .pgtable = NULL };
+ struct shmobile_iommu_domain *sh_domain = domain->priv;
+ unsigned int l1index, l2index;
+ uint32_t l2entry = 0;
+ size_t ret = 0;
+
+ l1index = iova >> 20;
+ if (!(iova & 0xfffff) && size >= SZ_1M) {
+ spin_lock(&sh_domain->map_lock);
+ l2free(sh_domain, l1index, &l2);
+ spin_unlock(&sh_domain->map_lock);
+ ret = SZ_1M;
+ goto done;
+ }
+ l2index = (iova >> 12) & 0xff;
+ spin_lock(&sh_domain->map_lock);
+ if (sh_domain->l2[l1index].pgtable)
+ l2entry = pgtable_read(&sh_domain->l2[l1index], l2index);
+ switch (l2entry & 3) {
+ case 1:
+ if (l2index & 0xf)
+ break;
+ pgtable_write(&sh_domain->l2[l1index], l2index, 0x10, 0);
+ ret = SZ_64K;
+ break;
+ case 2:
+ pgtable_write(&sh_domain->l2[l1index], l2index, 1, 0);
+ ret = SZ_4K;
+ break;
+ }
+ spin_unlock(&sh_domain->map_lock);
+done:
+ if (ret)
+ domain_tlb_flush(sh_domain);
+ l2realfree(&l2);
+ return ret;
+}
+
+static phys_addr_t shmobile_iommu_iova_to_phys(struct iommu_domain *domain,
+ unsigned long iova)
+{
+ struct shmobile_iommu_domain *sh_domain = domain->priv;
+ uint32_t l1entry = 0, l2entry = 0;
+ unsigned int l1index, l2index;
+
+ l1index = iova >> 20;
+ l2index = (iova >> 12) & 0xff;
+ spin_lock(&sh_domain->map_lock);
+ if (sh_domain->l2[l1index].pgtable)
+ l2entry = pgtable_read(&sh_domain->l2[l1index], l2index);
+ else
+ l1entry = pgtable_read(&sh_domain->l1, l1index);
+ spin_unlock(&sh_domain->map_lock);
+ switch (l2entry & 3) {
+ case 1:
+ return (l2entry & ~0xffff) | (iova & 0xffff);
+ case 2:
+ return (l2entry & ~0xfff) | (iova & 0xfff);
+ default:
+ if ((l1entry & 3) == 2)
+ return (l1entry & ~0xfffff) | (iova & 0xfffff);
+ return 0;
+ }
+}
+
+static int find_dev_name(struct shmobile_ipmmu *ipmmu, const char *dev_name)
+{
+ unsigned int i, n = ipmmu->num_dev_names;
+
+ for (i = 0; i < n; i++) {
+ if (strcmp(ipmmu->dev_names[i], dev_name) == 0)
+ return 1;
+ }
+ return 0;
+}
+
+static int shmobile_iommu_add_device(struct device *dev)
+{
+ struct shmobile_iommu_archdata *archdata = ipmmu_archdata;
+ struct dma_iommu_mapping *mapping;
+
+ if (!find_dev_name(archdata->ipmmu, dev_name(dev)))
+ return 0;
+ mapping = archdata->iommu_mapping;
+ if (!mapping) {
+ mapping = arm_iommu_create_mapping(&platform_bus_type, 0,
+ L1_LEN << 20, 0);
+ if (IS_ERR(mapping))
+ return PTR_ERR(mapping);
+ archdata->iommu_mapping = mapping;
+ }
+ dev->archdata.iommu = archdata;
+ if (arm_iommu_attach_device(dev, mapping))
+ pr_err("arm_iommu_attach_device failed\n");
+ return 0;
+}
+
+static struct iommu_ops shmobile_iommu_ops = {
+ .domain_init = shmobile_iommu_domain_init,
+ .domain_destroy = shmobile_iommu_domain_destroy,
+ .attach_dev = shmobile_iommu_attach_device,
+ .detach_dev = shmobile_iommu_detach_device,
+ .map = shmobile_iommu_map,
+ .unmap = shmobile_iommu_unmap,
+ .iova_to_phys = shmobile_iommu_iova_to_phys,
+ .add_device = shmobile_iommu_add_device,
+ .pgsize_bitmap = SZ_1M | SZ_64K | SZ_4K,
+};
+
+int ipmmu_iommu_init(struct shmobile_ipmmu *ipmmu)
+{
+ static struct shmobile_iommu_archdata *archdata;
+
+ l1cache = kmem_cache_create("shmobile-iommu-pgtable1", L1_SIZE,
+ L1_ALIGN, SLAB_HWCACHE_ALIGN, NULL);
+ if (!l1cache)
+ return -ENOMEM;
+ l2cache = kmem_cache_create("shmobile-iommu-pgtable2", L2_SIZE,
+ L2_ALIGN, SLAB_HWCACHE_ALIGN, NULL);
+ if (!l2cache) {
+ kmem_cache_destroy(l1cache);
+ return -ENOMEM;
+ }
+ archdata = kmalloc(sizeof(*archdata), GFP_KERNEL);
+ if (!archdata) {
+ kmem_cache_destroy(l1cache);
+ kmem_cache_destroy(l2cache);
+ return -ENOMEM;
+ }
+ spin_lock_init(&archdata->attach_lock);
+ archdata->attached = NULL;
+ archdata->ipmmu = ipmmu;
+ ipmmu_archdata = archdata;
+ bus_set_iommu(&platform_bus_type, &shmobile_iommu_ops);
+ return 0;
+}
diff --git a/drivers/iommu/shmobile-ipmmu.c b/drivers/iommu/shmobile-ipmmu.c
new file mode 100644
index 000000000000..8321f89596c4
--- /dev/null
+++ b/drivers/iommu/shmobile-ipmmu.c
@@ -0,0 +1,136 @@
+/*
+ * IPMMU/IPMMUI
+ * Copyright (C) 2012 Hideki EIRAKU
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 of the License.
+ */
+
+#include <linux/err.h>
+#include <linux/export.h>
+#include <linux/io.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+#include <linux/platform_data/sh_ipmmu.h>
+#include "shmobile-ipmmu.h"
+
+#define IMCTR1 0x000
+#define IMCTR2 0x004
+#define IMASID 0x010
+#define IMTTBR 0x014
+#define IMTTBCR 0x018
+
+#define IMCTR1_TLBEN (1 << 0)
+#define IMCTR1_FLUSH (1 << 1)
+
+static void ipmmu_reg_write(struct shmobile_ipmmu *ipmmu, unsigned long reg_off,
+ unsigned long data)
+{
+ iowrite32(data, ipmmu->ipmmu_base + reg_off);
+}
+
+void ipmmu_tlb_flush(struct shmobile_ipmmu *ipmmu)
+{
+ if (!ipmmu)
+ return;
+
+ mutex_lock(&ipmmu->flush_lock);
+ if (ipmmu->tlb_enabled)
+ ipmmu_reg_write(ipmmu, IMCTR1, IMCTR1_FLUSH | IMCTR1_TLBEN);
+ else
+ ipmmu_reg_write(ipmmu, IMCTR1, IMCTR1_FLUSH);
+ mutex_unlock(&ipmmu->flush_lock);
+}
+
+void ipmmu_tlb_set(struct shmobile_ipmmu *ipmmu, unsigned long phys, int size,
+ int asid)
+{
+ if (!ipmmu)
+ return;
+
+ mutex_lock(&ipmmu->flush_lock);
+ switch (size) {
+ default:
+ ipmmu->tlb_enabled = 0;
+ break;
+ case 0x2000:
+ ipmmu_reg_write(ipmmu, IMTTBCR, 1);
+ ipmmu->tlb_enabled = 1;
+ break;
+ case 0x1000:
+ ipmmu_reg_write(ipmmu, IMTTBCR, 2);
+ ipmmu->tlb_enabled = 1;
+ break;
+ case 0x800:
+ ipmmu_reg_write(ipmmu, IMTTBCR, 3);
+ ipmmu->tlb_enabled = 1;
+ break;
+ case 0x400:
+ ipmmu_reg_write(ipmmu, IMTTBCR, 4);
+ ipmmu->tlb_enabled = 1;
+ break;
+ case 0x200:
+ ipmmu_reg_write(ipmmu, IMTTBCR, 5);
+ ipmmu->tlb_enabled = 1;
+ break;
+ case 0x100:
+ ipmmu_reg_write(ipmmu, IMTTBCR, 6);
+ ipmmu->tlb_enabled = 1;
+ break;
+ case 0x80:
+ ipmmu_reg_write(ipmmu, IMTTBCR, 7);
+ ipmmu->tlb_enabled = 1;
+ break;
+ }
+ ipmmu_reg_write(ipmmu, IMTTBR, phys);
+ ipmmu_reg_write(ipmmu, IMASID, asid);
+ mutex_unlock(&ipmmu->flush_lock);
+}
+
+static int ipmmu_probe(struct platform_device *pdev)
+{
+ struct shmobile_ipmmu *ipmmu;
+ struct resource *res;
+ struct shmobile_ipmmu_platform_data *pdata = pdev->dev.platform_data;
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!res) {
+ dev_err(&pdev->dev, "cannot get platform resources\n");
+ return -ENOENT;
+ }
+ ipmmu = devm_kzalloc(&pdev->dev, sizeof(*ipmmu), GFP_KERNEL);
+ if (!ipmmu) {
+ dev_err(&pdev->dev, "cannot allocate device data\n");
+ return -ENOMEM;
+ }
+ mutex_init(&ipmmu->flush_lock);
+ ipmmu->dev = &pdev->dev;
+ ipmmu->ipmmu_base = devm_ioremap_nocache(&pdev->dev, res->start,
+ resource_size(res));
+ if (!ipmmu->ipmmu_base) {
+ dev_err(&pdev->dev, "ioremap_nocache failed\n");
+ return -ENOMEM;
+ }
+ ipmmu->dev_names = pdata->dev_names;
+ ipmmu->num_dev_names = pdata->num_dev_names;
+ platform_set_drvdata(pdev, ipmmu);
+ ipmmu_reg_write(ipmmu, IMCTR1, 0x0); /* disable TLB */
+ ipmmu_reg_write(ipmmu, IMCTR2, 0x0); /* disable PMB */
+ ipmmu_iommu_init(ipmmu);
+ return 0;
+}
+
+static struct platform_driver ipmmu_driver = {
+ .probe = ipmmu_probe,
+ .driver = {
+ .owner = THIS_MODULE,
+ .name = "ipmmu",
+ },
+};
+
+static int __init ipmmu_init(void)
+{
+ return platform_driver_register(&ipmmu_driver);
+}
+subsys_initcall(ipmmu_init);
diff --git a/drivers/iommu/shmobile-ipmmu.h b/drivers/iommu/shmobile-ipmmu.h
new file mode 100644
index 000000000000..4d53684673e1
--- /dev/null
+++ b/drivers/iommu/shmobile-ipmmu.h
@@ -0,0 +1,34 @@
+/* shmobile-ipmmu.h
+ *
+ * Copyright (C) 2012 Hideki EIRAKU
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 of the License.
+ */
+
+#ifndef __SHMOBILE_IPMMU_H__
+#define __SHMOBILE_IPMMU_H__
+
+struct shmobile_ipmmu {
+ struct device *dev;
+ void __iomem *ipmmu_base;
+ int tlb_enabled;
+ struct mutex flush_lock;
+ const char * const *dev_names;
+ unsigned int num_dev_names;
+};
+
+#ifdef CONFIG_SHMOBILE_IPMMU_TLB
+void ipmmu_tlb_flush(struct shmobile_ipmmu *ipmmu);
+void ipmmu_tlb_set(struct shmobile_ipmmu *ipmmu, unsigned long phys, int size,
+ int asid);
+int ipmmu_iommu_init(struct shmobile_ipmmu *ipmmu);
+#else
+static inline int ipmmu_iommu_init(struct shmobile_ipmmu *ipmmu)
+{
+ return -EINVAL;
+}
+#endif
+
+#endif /* __SHMOBILE_IPMMU_H__ */
diff --git a/drivers/iommu/tegra-gart.c b/drivers/iommu/tegra-gart.c
index 8219f1d596ee..86437575f94d 100644
--- a/drivers/iommu/tegra-gart.c
+++ b/drivers/iommu/tegra-gart.c
@@ -430,13 +430,11 @@ const struct dev_pm_ops tegra_gart_pm_ops = {
.resume = tegra_gart_resume,
};
-#ifdef CONFIG_OF
static struct of_device_id tegra_gart_of_match[] = {
{ .compatible = "nvidia,tegra20-gart", },
{ },
};
MODULE_DEVICE_TABLE(of, tegra_gart_of_match);
-#endif
static struct platform_driver tegra_gart_driver = {
.probe = tegra_gart_probe,
@@ -445,7 +443,7 @@ static struct platform_driver tegra_gart_driver = {
.owner = THIS_MODULE,
.name = "tegra-gart",
.pm = &tegra_gart_pm_ops,
- .of_match_table = of_match_ptr(tegra_gart_of_match),
+ .of_match_table = tegra_gart_of_match,
},
};
diff --git a/drivers/iommu/tegra-smmu.c b/drivers/iommu/tegra-smmu.c
index f08dbcd2f175..b34e5fd7fd9e 100644
--- a/drivers/iommu/tegra-smmu.c
+++ b/drivers/iommu/tegra-smmu.c
@@ -1,7 +1,7 @@
/*
* IOMMU API for SMMU in Tegra30
*
- * Copyright (c) 2011-2012, NVIDIA CORPORATION. All rights reserved.
+ * Copyright (c) 2011-2013, NVIDIA CORPORATION. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
@@ -294,7 +294,11 @@ struct smmu_debugfs_info {
* Per SMMU device - IOMMU device
*/
struct smmu_device {
- void __iomem *regs[NUM_SMMU_REG_BANKS];
+ void __iomem *regbase; /* register offset base */
+ void __iomem **regs; /* register block start address array */
+ void __iomem **rege; /* register block end address array */
+ int nregs; /* number of register blocks */
+
unsigned long iovmm_base; /* remappable base address */
unsigned long page_count; /* total remappable size */
spinlock_t lock;
@@ -324,38 +328,37 @@ static struct smmu_device *smmu_handle; /* unique for a system */
/*
* SMMU register accessors
*/
+static bool inline smmu_valid_reg(struct smmu_device *smmu,
+ void __iomem *addr)
+{
+ int i;
+
+ for (i = 0; i < smmu->nregs; i++) {
+ if (addr < smmu->regs[i])
+ break;
+ if (addr <= smmu->rege[i])
+ return true;
+ }
+
+ return false;
+}
+
static inline u32 smmu_read(struct smmu_device *smmu, size_t offs)
{
- BUG_ON(offs < 0x10);
- if (offs < 0x3c)
- return readl(smmu->regs[0] + offs - 0x10);
- BUG_ON(offs < 0x1f0);
- if (offs < 0x200)
- return readl(smmu->regs[1] + offs - 0x1f0);
- BUG_ON(offs < 0x228);
- if (offs < 0x284)
- return readl(smmu->regs[2] + offs - 0x228);
- BUG();
+ void __iomem *addr = smmu->regbase + offs;
+
+ BUG_ON(!smmu_valid_reg(smmu, addr));
+
+ return readl(addr);
}
static inline void smmu_write(struct smmu_device *smmu, u32 val, size_t offs)
{
- BUG_ON(offs < 0x10);
- if (offs < 0x3c) {
- writel(val, smmu->regs[0] + offs - 0x10);
- return;
- }
- BUG_ON(offs < 0x1f0);
- if (offs < 0x200) {
- writel(val, smmu->regs[1] + offs - 0x1f0);
- return;
- }
- BUG_ON(offs < 0x228);
- if (offs < 0x284) {
- writel(val, smmu->regs[2] + offs - 0x228);
- return;
- }
- BUG();
+ void __iomem *addr = smmu->regbase + offs;
+
+ BUG_ON(!smmu_valid_reg(smmu, addr));
+
+ writel(val, addr);
}
#define VA_PAGE_TO_PA(va, page) \
@@ -965,7 +968,6 @@ static ssize_t smmu_debugfs_stats_write(struct file *file,
{
struct smmu_debugfs_info *info;
struct smmu_device *smmu;
- struct dentry *dent;
int i;
enum {
_OFF = 0,
@@ -993,8 +995,7 @@ static ssize_t smmu_debugfs_stats_write(struct file *file,
if (i == ARRAY_SIZE(command))
return -EINVAL;
- dent = file->f_dentry;
- info = dent->d_inode->i_private;
+ info = file_inode(file)->i_private;
smmu = info->smmu;
offs = SMMU_CACHE_CONFIG(info->cache);
@@ -1029,15 +1030,11 @@ static ssize_t smmu_debugfs_stats_write(struct file *file,
static int smmu_debugfs_stats_show(struct seq_file *s, void *v)
{
- struct smmu_debugfs_info *info;
- struct smmu_device *smmu;
- struct dentry *dent;
+ struct smmu_debugfs_info *info = s->private;
+ struct smmu_device *smmu = info->smmu;
int i;
const char * const stats[] = { "hit", "miss", };
- dent = d_find_alias(s->private);
- info = dent->d_inode->i_private;
- smmu = info->smmu;
for (i = 0; i < ARRAY_SIZE(stats); i++) {
u32 val;
@@ -1051,14 +1048,12 @@ static int smmu_debugfs_stats_show(struct seq_file *s, void *v)
stats[i], val, offs);
}
seq_printf(s, "\n");
- dput(dent);
-
return 0;
}
static int smmu_debugfs_stats_open(struct inode *inode, struct file *file)
{
- return single_open(file, smmu_debugfs_stats_show, inode);
+ return single_open(file, smmu_debugfs_stats_show, inode->i_private);
}
static const struct file_operations smmu_debugfs_stats_fops = {
@@ -1171,7 +1166,13 @@ static int tegra_smmu_probe(struct platform_device *pdev)
return -ENOMEM;
}
- for (i = 0; i < ARRAY_SIZE(smmu->regs); i++) {
+ smmu->nregs = pdev->num_resources;
+ smmu->regs = devm_kzalloc(dev, 2 * smmu->nregs * sizeof(*smmu->regs),
+ GFP_KERNEL);
+ smmu->rege = smmu->regs + smmu->nregs;
+ if (!smmu->regs)
+ return -ENOMEM;
+ for (i = 0; i < smmu->nregs; i++) {
struct resource *res;
res = platform_get_resource(pdev, IORESOURCE_MEM, i);
@@ -1180,7 +1181,10 @@ static int tegra_smmu_probe(struct platform_device *pdev)
smmu->regs[i] = devm_ioremap_resource(&pdev->dev, res);
if (IS_ERR(smmu->regs[i]))
return PTR_ERR(smmu->regs[i]);
+ smmu->rege[i] = smmu->regs[i] + resource_size(res) - 1;
}
+ /* Same as "mc" 1st regiter block start address */
+ smmu->regbase = (void __iomem *)((u32)smmu->regs[0] & PAGE_MASK);
err = of_get_dma_window(dev->of_node, NULL, 0, NULL, &base, &size);
if (err)
@@ -1217,6 +1221,7 @@ static int tegra_smmu_probe(struct platform_device *pdev)
as->pte_attr = _PTE_ATTR;
spin_lock_init(&as->lock);
+ spin_lock_init(&as->client_lock);
INIT_LIST_HEAD(&as->client);
}
spin_lock_init(&smmu->lock);
@@ -1255,13 +1260,11 @@ const struct dev_pm_ops tegra_smmu_pm_ops = {
.resume = tegra_smmu_resume,
};
-#ifdef CONFIG_OF
static struct of_device_id tegra_smmu_of_match[] = {
{ .compatible = "nvidia,tegra30-smmu", },
{ },
};
MODULE_DEVICE_TABLE(of, tegra_smmu_of_match);
-#endif
static struct platform_driver tegra_smmu_driver = {
.probe = tegra_smmu_probe,
@@ -1270,7 +1273,7 @@ static struct platform_driver tegra_smmu_driver = {
.owner = THIS_MODULE,
.name = "tegra-smmu",
.pm = &tegra_smmu_pm_ops,
- .of_match_table = of_match_ptr(tegra_smmu_of_match),
+ .of_match_table = tegra_smmu_of_match,
},
};
diff --git a/drivers/ipack/devices/Kconfig b/drivers/ipack/devices/Kconfig
index 0b82fdc198c0..907a8cb48f2a 100644
--- a/drivers/ipack/devices/Kconfig
+++ b/drivers/ipack/devices/Kconfig
@@ -1,6 +1,6 @@
config SERIAL_IPOCTAL
tristate "IndustryPack IP-OCTAL uart support"
- depends on IPACK_BUS
+ depends on IPACK_BUS && TTY
help
This driver supports the IPOCTAL serial port device for the IndustryPack bus.
default n
diff --git a/drivers/ipack/devices/ipoctal.c b/drivers/ipack/devices/ipoctal.c
index 576d53d92677..141094e7c06e 100644
--- a/drivers/ipack/devices/ipoctal.c
+++ b/drivers/ipack/devices/ipoctal.c
@@ -20,7 +20,6 @@
#include <linux/serial.h>
#include <linux/tty_flip.h>
#include <linux/slab.h>
-#include <linux/atomic.h>
#include <linux/io.h>
#include <linux/ipack.h>
#include "ipoctal.h"
@@ -38,21 +37,19 @@ struct ipoctal_channel {
spinlock_t lock;
unsigned int pointer_read;
unsigned int pointer_write;
- atomic_t open;
struct tty_port tty_port;
union scc2698_channel __iomem *regs;
union scc2698_block __iomem *block_regs;
unsigned int board_id;
- unsigned char *board_write;
u8 isr_rx_rdy_mask;
u8 isr_tx_rdy_mask;
+ unsigned int rx_enable;
};
struct ipoctal {
struct ipack_device *dev;
unsigned int board_id;
struct ipoctal_channel channel[NR_CHANNELS];
- unsigned char write;
struct tty_driver *tty_drv;
u8 __iomem *mem8_space;
u8 __iomem *int_space;
@@ -64,28 +61,23 @@ static int ipoctal_port_activate(struct tty_port *port, struct tty_struct *tty)
channel = dev_get_drvdata(tty->dev);
+ /*
+ * Enable RX. TX will be enabled when
+ * there is something to send
+ */
iowrite8(CR_ENABLE_RX, &channel->regs->w.cr);
+ channel->rx_enable = 1;
return 0;
}
static int ipoctal_open(struct tty_struct *tty, struct file *file)
{
- int res;
struct ipoctal_channel *channel;
channel = dev_get_drvdata(tty->dev);
-
- if (atomic_read(&channel->open))
- return -EBUSY;
-
tty->driver_data = channel;
- res = tty_port_open(&channel->tty_port, tty, file);
- if (res)
- return res;
-
- atomic_inc(&channel->open);
- return 0;
+ return tty_port_open(&channel->tty_port, tty, file);
}
static void ipoctal_reset_stats(struct ipoctal_stats *stats)
@@ -111,9 +103,7 @@ static void ipoctal_close(struct tty_struct *tty, struct file *filp)
struct ipoctal_channel *channel = tty->driver_data;
tty_port_close(&channel->tty_port, tty, filp);
-
- if (atomic_dec_and_test(&channel->open))
- ipoctal_free_channel(channel);
+ ipoctal_free_channel(channel);
}
static int ipoctal_get_icount(struct tty_struct *tty,
@@ -133,15 +123,16 @@ static int ipoctal_get_icount(struct tty_struct *tty,
return 0;
}
-static void ipoctal_irq_rx(struct ipoctal_channel *channel,
- struct tty_struct *tty, u8 sr)
+static void ipoctal_irq_rx(struct ipoctal_channel *channel, u8 sr)
{
+ struct tty_port *port = &channel->tty_port;
unsigned char value;
- unsigned char flag = TTY_NORMAL;
+ unsigned char flag;
u8 isr;
do {
value = ioread8(&channel->regs->r.rhr);
+ flag = TTY_NORMAL;
/* Error: count statistics */
if (sr & SR_ERROR) {
iowrite8(CR_CMD_RESET_ERR_STATUS, &channel->regs->w.cr);
@@ -149,7 +140,7 @@ static void ipoctal_irq_rx(struct ipoctal_channel *channel,
if (sr & SR_OVERRUN_ERROR) {
channel->stats.overrun_err++;
/* Overrun doesn't affect the current character*/
- tty_insert_flip_char(tty, 0, TTY_OVERRUN);
+ tty_insert_flip_char(port, 0, TTY_OVERRUN);
}
if (sr & SR_PARITY_ERROR) {
channel->stats.parity_err++;
@@ -165,7 +156,7 @@ static void ipoctal_irq_rx(struct ipoctal_channel *channel,
flag = TTY_BREAK;
}
}
- tty_insert_flip_char(tty, value, flag);
+ tty_insert_flip_char(port, value, flag);
/* Check if there are more characters in RX FIFO
* If there are more, the isr register for this channel
@@ -175,7 +166,7 @@ static void ipoctal_irq_rx(struct ipoctal_channel *channel,
sr = ioread8(&channel->regs->r.sr);
} while (isr & channel->isr_rx_rdy_mask);
- tty_flip_buffer_push(tty);
+ tty_flip_buffer_push(port);
}
static void ipoctal_irq_tx(struct ipoctal_channel *channel)
@@ -183,10 +174,8 @@ static void ipoctal_irq_tx(struct ipoctal_channel *channel)
unsigned char value;
unsigned int *pointer_write = &channel->pointer_write;
- if (channel->nb_bytes <= 0) {
- channel->nb_bytes = 0;
+ if (channel->nb_bytes == 0)
return;
- }
value = channel->tty_port.xmit_buf[*pointer_write];
iowrite8(value, &channel->regs->w.thr);
@@ -194,55 +183,38 @@ static void ipoctal_irq_tx(struct ipoctal_channel *channel)
(*pointer_write)++;
*pointer_write = *pointer_write % PAGE_SIZE;
channel->nb_bytes--;
-
- if ((channel->nb_bytes == 0) &&
- (waitqueue_active(&channel->queue))) {
-
- if (channel->board_id != IPACK1_DEVICE_ID_SBS_OCTAL_485) {
- *channel->board_write = 1;
- wake_up_interruptible(&channel->queue);
- }
- }
}
static void ipoctal_irq_channel(struct ipoctal_channel *channel)
{
u8 isr, sr;
- struct tty_struct *tty;
- /* If there is no client, skip the check */
- if (!atomic_read(&channel->open))
- return;
-
- tty = tty_port_tty_get(&channel->tty_port);
- if (!tty)
- return;
+ spin_lock(&channel->lock);
/* The HW is organized in pair of channels. See which register we need
* to read from */
isr = ioread8(&channel->block_regs->r.isr);
sr = ioread8(&channel->regs->r.sr);
- /* In case of RS-485, change from TX to RX when finishing TX.
- * Half-duplex. */
- if ((channel->board_id == IPACK1_DEVICE_ID_SBS_OCTAL_485) &&
- (sr & SR_TX_EMPTY) && (channel->nb_bytes == 0)) {
+ if ((sr & SR_TX_EMPTY) && (channel->nb_bytes == 0)) {
iowrite8(CR_DISABLE_TX, &channel->regs->w.cr);
- iowrite8(CR_CMD_NEGATE_RTSN, &channel->regs->w.cr);
- iowrite8(CR_ENABLE_RX, &channel->regs->w.cr);
- *channel->board_write = 1;
- wake_up_interruptible(&channel->queue);
+ /* In case of RS-485, change from TX to RX when finishing TX.
+ * Half-duplex. */
+ if (channel->board_id == IPACK1_DEVICE_ID_SBS_OCTAL_485) {
+ iowrite8(CR_CMD_NEGATE_RTSN, &channel->regs->w.cr);
+ iowrite8(CR_ENABLE_RX, &channel->regs->w.cr);
+ channel->rx_enable = 1;
+ }
}
/* RX data */
if ((isr & channel->isr_rx_rdy_mask) && (sr & SR_RX_READY))
- ipoctal_irq_rx(channel, tty, sr);
+ ipoctal_irq_rx(channel, sr);
/* TX of each character */
if ((isr & channel->isr_tx_rdy_mask) && (sr & SR_TX_READY))
ipoctal_irq_tx(channel);
- tty_flip_buffer_push(tty);
- tty_kref_put(tty);
+ spin_unlock(&channel->lock);
}
static irqreturn_t ipoctal_irq_handler(void *arg)
@@ -250,14 +222,14 @@ static irqreturn_t ipoctal_irq_handler(void *arg)
unsigned int i;
struct ipoctal *ipoctal = (struct ipoctal *) arg;
- /* Check all channels */
- for (i = 0; i < NR_CHANNELS; i++)
- ipoctal_irq_channel(&ipoctal->channel[i]);
-
/* Clear the IPack device interrupt */
readw(ipoctal->int_space + ACK_INT_REQ0);
readw(ipoctal->int_space + ACK_INT_REQ1);
+ /* Check all channels */
+ for (i = 0; i < NR_CHANNELS; i++)
+ ipoctal_irq_channel(&ipoctal->channel[i]);
+
return IRQ_HANDLED;
}
@@ -311,7 +283,7 @@ static int ipoctal_inst_slot(struct ipoctal *ipoctal, unsigned int bus_nr,
ipoctal->mem8_space =
devm_ioremap_nocache(&ipoctal->dev->dev,
region->start, 0x8000);
- if (!addr) {
+ if (!ipoctal->mem8_space) {
dev_err(&ipoctal->dev->dev,
"Unable to map slot [%d:%d] MEM8 space!\n",
bus_nr, slot);
@@ -324,7 +296,6 @@ static int ipoctal_inst_slot(struct ipoctal *ipoctal, unsigned int bus_nr,
struct ipoctal_channel *channel = &ipoctal->channel[i];
channel->regs = chan_regs + i;
channel->block_regs = block_regs + (i >> 1);
- channel->board_write = &ipoctal->write;
channel->board_id = ipoctal->board_id;
if (i & 1) {
channel->isr_tx_rdy_mask = ISR_TxRDY_B;
@@ -335,6 +306,7 @@ static int ipoctal_inst_slot(struct ipoctal *ipoctal, unsigned int bus_nr,
}
iowrite8(CR_DISABLE_RX | CR_DISABLE_TX, &channel->regs->w.cr);
+ channel->rx_enable = 0;
iowrite8(CR_CMD_RESET_RX, &channel->regs->w.cr);
iowrite8(CR_CMD_RESET_TX, &channel->regs->w.cr);
iowrite8(MR1_CHRL_8_BITS | MR1_ERROR_CHAR | MR1_RxINT_RxRDY,
@@ -407,8 +379,6 @@ static int ipoctal_inst_slot(struct ipoctal *ipoctal, unsigned int bus_nr,
ipoctal_reset_stats(&channel->stats);
channel->nb_bytes = 0;
- init_waitqueue_head(&channel->queue);
-
spin_lock_init(&channel->lock);
channel->pointer_read = 0;
channel->pointer_write = 0;
@@ -419,12 +389,6 @@ static int ipoctal_inst_slot(struct ipoctal *ipoctal, unsigned int bus_nr,
continue;
}
dev_set_drvdata(tty_dev, channel);
-
- /*
- * Enable again the RX. TX will be enabled when
- * there is something to send
- */
- iowrite8(CR_ENABLE_RX, &channel->regs->w.cr);
}
return 0;
@@ -464,6 +428,7 @@ static int ipoctal_write_tty(struct tty_struct *tty,
/* As the IP-OCTAL 485 only supports half duplex, do it manually */
if (channel->board_id == IPACK1_DEVICE_ID_SBS_OCTAL_485) {
iowrite8(CR_DISABLE_RX, &channel->regs->w.cr);
+ channel->rx_enable = 0;
iowrite8(CR_CMD_ASSERT_RTSN, &channel->regs->w.cr);
}
@@ -472,10 +437,6 @@ static int ipoctal_write_tty(struct tty_struct *tty,
* operations
*/
iowrite8(CR_ENABLE_TX, &channel->regs->w.cr);
- wait_event_interruptible(channel->queue, *channel->board_write);
- iowrite8(CR_DISABLE_TX, &channel->regs->w.cr);
-
- *channel->board_write = 0;
return char_copied;
}
@@ -627,8 +588,9 @@ static void ipoctal_set_termios(struct tty_struct *tty,
iowrite8(mr2, &channel->regs->w.mr);
iowrite8(csr, &channel->regs->w.csr);
- /* Enable again the RX */
- iowrite8(CR_ENABLE_RX, &channel->regs->w.cr);
+ /* Enable again the RX, if it was before */
+ if (channel->rx_enable)
+ iowrite8(CR_ENABLE_RX, &channel->regs->w.cr);
}
static void ipoctal_hangup(struct tty_struct *tty)
@@ -648,6 +610,7 @@ static void ipoctal_hangup(struct tty_struct *tty)
tty_port_hangup(&channel->tty_port);
iowrite8(CR_DISABLE_RX | CR_DISABLE_TX, &channel->regs->w.cr);
+ channel->rx_enable = 0;
iowrite8(CR_CMD_RESET_RX, &channel->regs->w.cr);
iowrite8(CR_CMD_RESET_TX, &channel->regs->w.cr);
iowrite8(CR_CMD_RESET_ERR_STATUS, &channel->regs->w.cr);
@@ -657,6 +620,22 @@ static void ipoctal_hangup(struct tty_struct *tty)
wake_up_interruptible(&channel->tty_port.open_wait);
}
+static void ipoctal_shutdown(struct tty_struct *tty)
+{
+ struct ipoctal_channel *channel = tty->driver_data;
+
+ if (channel == NULL)
+ return;
+
+ iowrite8(CR_DISABLE_RX | CR_DISABLE_TX, &channel->regs->w.cr);
+ channel->rx_enable = 0;
+ iowrite8(CR_CMD_RESET_RX, &channel->regs->w.cr);
+ iowrite8(CR_CMD_RESET_TX, &channel->regs->w.cr);
+ iowrite8(CR_CMD_RESET_ERR_STATUS, &channel->regs->w.cr);
+ iowrite8(CR_CMD_RESET_MR, &channel->regs->w.cr);
+ clear_bit(ASYNCB_INITIALIZED, &channel->tty_port.flags);
+}
+
static const struct tty_operations ipoctal_fops = {
.ioctl = NULL,
.open = ipoctal_open,
@@ -667,6 +646,7 @@ static const struct tty_operations ipoctal_fops = {
.chars_in_buffer = ipoctal_chars_in_buffer,
.get_icount = ipoctal_get_icount,
.hangup = ipoctal_hangup,
+ .shutdown = ipoctal_shutdown,
};
static int ipoctal_probe(struct ipack_device *dev)
diff --git a/drivers/irqchip/Kconfig b/drivers/irqchip/Kconfig
index 62ca575701d3..a350969e5efe 100644
--- a/drivers/irqchip/Kconfig
+++ b/drivers/irqchip/Kconfig
@@ -1,3 +1,30 @@
+config IRQCHIP
+ def_bool y
+ depends on OF_IRQ
+
+config ARM_GIC
+ bool
+ select IRQ_DOMAIN
+ select MULTI_IRQ_HANDLER
+
+config GIC_NON_BANKED
+ bool
+
+config ARM_VIC
+ bool
+ select IRQ_DOMAIN
+ select MULTI_IRQ_HANDLER
+
+config ARM_VIC_NR
+ int
+ default 4 if ARCH_S5PV210
+ default 3 if ARCH_S5PC100
+ default 2
+ depends on ARM_VIC
+ help
+ The maximum number of VICs available in the system, for
+ power management.
+
config VERSATILE_FPGA_IRQ
bool
select IRQ_DOMAIN
diff --git a/drivers/irqchip/Makefile b/drivers/irqchip/Makefile
index bf4609a5bd9d..e65fbf2cdf71 100644
--- a/drivers/irqchip/Makefile
+++ b/drivers/irqchip/Makefile
@@ -1,4 +1,9 @@
+obj-$(CONFIG_IRQCHIP) += irqchip.o
+
obj-$(CONFIG_ARCH_BCM2835) += irq-bcm2835.o
+obj-$(CONFIG_ARCH_EXYNOS) += exynos-combiner.o
obj-$(CONFIG_ARCH_SUNXI) += irq-sunxi.o
-obj-$(CONFIG_VERSATILE_FPGA_IRQ) += irq-versatile-fpga.o
obj-$(CONFIG_ARCH_SPEAR3XX) += spear-shirq.o
+obj-$(CONFIG_ARM_GIC) += irq-gic.o
+obj-$(CONFIG_ARM_VIC) += irq-vic.o
+obj-$(CONFIG_VERSATILE_FPGA_IRQ) += irq-versatile-fpga.o
diff --git a/drivers/irqchip/exynos-combiner.c b/drivers/irqchip/exynos-combiner.c
new file mode 100644
index 000000000000..04d86a9803f4
--- /dev/null
+++ b/drivers/irqchip/exynos-combiner.c
@@ -0,0 +1,230 @@
+/*
+ * Copyright (c) 2010-2011 Samsung Electronics Co., Ltd.
+ * http://www.samsung.com
+ *
+ * Combiner irqchip for EXYNOS
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+#include <linux/err.h>
+#include <linux/export.h>
+#include <linux/init.h>
+#include <linux/io.h>
+#include <linux/irqdomain.h>
+#include <linux/of_address.h>
+#include <linux/of_irq.h>
+#include <asm/mach/irq.h>
+
+#include <plat/cpu.h>
+
+#include "irqchip.h"
+
+#define COMBINER_ENABLE_SET 0x0
+#define COMBINER_ENABLE_CLEAR 0x4
+#define COMBINER_INT_STATUS 0xC
+
+static DEFINE_SPINLOCK(irq_controller_lock);
+
+struct combiner_chip_data {
+ unsigned int irq_offset;
+ unsigned int irq_mask;
+ void __iomem *base;
+};
+
+static struct irq_domain *combiner_irq_domain;
+static struct combiner_chip_data combiner_data[MAX_COMBINER_NR];
+
+static inline void __iomem *combiner_base(struct irq_data *data)
+{
+ struct combiner_chip_data *combiner_data =
+ irq_data_get_irq_chip_data(data);
+
+ return combiner_data->base;
+}
+
+static void combiner_mask_irq(struct irq_data *data)
+{
+ u32 mask = 1 << (data->hwirq % 32);
+
+ __raw_writel(mask, combiner_base(data) + COMBINER_ENABLE_CLEAR);
+}
+
+static void combiner_unmask_irq(struct irq_data *data)
+{
+ u32 mask = 1 << (data->hwirq % 32);
+
+ __raw_writel(mask, combiner_base(data) + COMBINER_ENABLE_SET);
+}
+
+static void combiner_handle_cascade_irq(unsigned int irq, struct irq_desc *desc)
+{
+ struct combiner_chip_data *chip_data = irq_get_handler_data(irq);
+ struct irq_chip *chip = irq_get_chip(irq);
+ unsigned int cascade_irq, combiner_irq;
+ unsigned long status;
+
+ chained_irq_enter(chip, desc);
+
+ spin_lock(&irq_controller_lock);
+ status = __raw_readl(chip_data->base + COMBINER_INT_STATUS);
+ spin_unlock(&irq_controller_lock);
+ status &= chip_data->irq_mask;
+
+ if (status == 0)
+ goto out;
+
+ combiner_irq = __ffs(status);
+
+ cascade_irq = combiner_irq + (chip_data->irq_offset & ~31);
+ if (unlikely(cascade_irq >= NR_IRQS))
+ do_bad_IRQ(cascade_irq, desc);
+ else
+ generic_handle_irq(cascade_irq);
+
+ out:
+ chained_irq_exit(chip, desc);
+}
+
+static struct irq_chip combiner_chip = {
+ .name = "COMBINER",
+ .irq_mask = combiner_mask_irq,
+ .irq_unmask = combiner_unmask_irq,
+};
+
+static void __init combiner_cascade_irq(unsigned int combiner_nr, unsigned int irq)
+{
+ unsigned int max_nr;
+
+ if (soc_is_exynos5250())
+ max_nr = EXYNOS5_MAX_COMBINER_NR;
+ else
+ max_nr = EXYNOS4_MAX_COMBINER_NR;
+
+ if (combiner_nr >= max_nr)
+ BUG();
+ if (irq_set_handler_data(irq, &combiner_data[combiner_nr]) != 0)
+ BUG();
+ irq_set_chained_handler(irq, combiner_handle_cascade_irq);
+}
+
+static void __init combiner_init_one(unsigned int combiner_nr,
+ void __iomem *base)
+{
+ combiner_data[combiner_nr].base = base;
+ combiner_data[combiner_nr].irq_offset = irq_find_mapping(
+ combiner_irq_domain, combiner_nr * MAX_IRQ_IN_COMBINER);
+ combiner_data[combiner_nr].irq_mask = 0xff << ((combiner_nr % 4) << 3);
+
+ /* Disable all interrupts */
+ __raw_writel(combiner_data[combiner_nr].irq_mask,
+ base + COMBINER_ENABLE_CLEAR);
+}
+
+#ifdef CONFIG_OF
+static int combiner_irq_domain_xlate(struct irq_domain *d,
+ struct device_node *controller,
+ const u32 *intspec, unsigned int intsize,
+ unsigned long *out_hwirq,
+ unsigned int *out_type)
+{
+ if (d->of_node != controller)
+ return -EINVAL;
+
+ if (intsize < 2)
+ return -EINVAL;
+
+ *out_hwirq = intspec[0] * MAX_IRQ_IN_COMBINER + intspec[1];
+ *out_type = 0;
+
+ return 0;
+}
+#else
+static int combiner_irq_domain_xlate(struct irq_domain *d,
+ struct device_node *controller,
+ const u32 *intspec, unsigned int intsize,
+ unsigned long *out_hwirq,
+ unsigned int *out_type)
+{
+ return -EINVAL;
+}
+#endif
+
+static int combiner_irq_domain_map(struct irq_domain *d, unsigned int irq,
+ irq_hw_number_t hw)
+{
+ irq_set_chip_and_handler(irq, &combiner_chip, handle_level_irq);
+ irq_set_chip_data(irq, &combiner_data[hw >> 3]);
+ set_irq_flags(irq, IRQF_VALID | IRQF_PROBE);
+
+ return 0;
+}
+
+static struct irq_domain_ops combiner_irq_domain_ops = {
+ .xlate = combiner_irq_domain_xlate,
+ .map = combiner_irq_domain_map,
+};
+
+void __init combiner_init(void __iomem *combiner_base,
+ struct device_node *np)
+{
+ int i, irq, irq_base;
+ unsigned int max_nr, nr_irq;
+
+ if (np) {
+ if (of_property_read_u32(np, "samsung,combiner-nr", &max_nr)) {
+ pr_warning("%s: number of combiners not specified, "
+ "setting default as %d.\n",
+ __func__, EXYNOS4_MAX_COMBINER_NR);
+ max_nr = EXYNOS4_MAX_COMBINER_NR;
+ }
+ } else {
+ max_nr = soc_is_exynos5250() ? EXYNOS5_MAX_COMBINER_NR :
+ EXYNOS4_MAX_COMBINER_NR;
+ }
+ nr_irq = max_nr * MAX_IRQ_IN_COMBINER;
+
+ irq_base = irq_alloc_descs(COMBINER_IRQ(0, 0), 1, nr_irq, 0);
+ if (IS_ERR_VALUE(irq_base)) {
+ irq_base = COMBINER_IRQ(0, 0);
+ pr_warning("%s: irq desc alloc failed. Continuing with %d as linux irq base\n", __func__, irq_base);
+ }
+
+ combiner_irq_domain = irq_domain_add_legacy(np, nr_irq, irq_base, 0,
+ &combiner_irq_domain_ops, &combiner_data);
+ if (WARN_ON(!combiner_irq_domain)) {
+ pr_warning("%s: irq domain init failed\n", __func__);
+ return;
+ }
+
+ for (i = 0; i < max_nr; i++) {
+ combiner_init_one(i, combiner_base + (i >> 2) * 0x10);
+ irq = IRQ_SPI(i);
+#ifdef CONFIG_OF
+ if (np)
+ irq = irq_of_parse_and_map(np, i);
+#endif
+ combiner_cascade_irq(i, irq);
+ }
+}
+
+#ifdef CONFIG_OF
+static int __init combiner_of_init(struct device_node *np,
+ struct device_node *parent)
+{
+ void __iomem *combiner_base;
+
+ combiner_base = of_iomap(np, 0);
+ if (!combiner_base) {
+ pr_err("%s: failed to map combiner registers\n", __func__);
+ return -ENXIO;
+ }
+
+ combiner_init(combiner_base, np);
+
+ return 0;
+}
+IRQCHIP_DECLARE(exynos4210_combiner, "samsung,exynos4210-combiner",
+ combiner_of_init);
+#endif
diff --git a/drivers/irqchip/irq-gic.c b/drivers/irqchip/irq-gic.c
new file mode 100644
index 000000000000..644d72468423
--- /dev/null
+++ b/drivers/irqchip/irq-gic.c
@@ -0,0 +1,845 @@
+/*
+ * linux/arch/arm/common/gic.c
+ *
+ * Copyright (C) 2002 ARM Limited, All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * Interrupt architecture for the GIC:
+ *
+ * o There is one Interrupt Distributor, which receives interrupts
+ * from system devices and sends them to the Interrupt Controllers.
+ *
+ * o There is one CPU Interface per CPU, which sends interrupts sent
+ * by the Distributor, and interrupts generated locally, to the
+ * associated CPU. The base address of the CPU interface is usually
+ * aliased so that the same address points to different chips depending
+ * on the CPU it is accessed from.
+ *
+ * Note that IRQs 0-31 are special - they are local to each CPU.
+ * As such, the enable set/clear, pending set/clear and active bit
+ * registers are banked per-cpu for these sources.
+ */
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/err.h>
+#include <linux/module.h>
+#include <linux/list.h>
+#include <linux/smp.h>
+#include <linux/cpu_pm.h>
+#include <linux/cpumask.h>
+#include <linux/io.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
+#include <linux/of_irq.h>
+#include <linux/irqdomain.h>
+#include <linux/interrupt.h>
+#include <linux/percpu.h>
+#include <linux/slab.h>
+#include <linux/irqchip/arm-gic.h>
+
+#include <asm/irq.h>
+#include <asm/exception.h>
+#include <asm/smp_plat.h>
+#include <asm/mach/irq.h>
+
+#include "irqchip.h"
+
+union gic_base {
+ void __iomem *common_base;
+ void __percpu __iomem **percpu_base;
+};
+
+struct gic_chip_data {
+ union gic_base dist_base;
+ union gic_base cpu_base;
+#ifdef CONFIG_CPU_PM
+ u32 saved_spi_enable[DIV_ROUND_UP(1020, 32)];
+ u32 saved_spi_conf[DIV_ROUND_UP(1020, 16)];
+ u32 saved_spi_target[DIV_ROUND_UP(1020, 4)];
+ u32 __percpu *saved_ppi_enable;
+ u32 __percpu *saved_ppi_conf;
+#endif
+ struct irq_domain *domain;
+ unsigned int gic_irqs;
+#ifdef CONFIG_GIC_NON_BANKED
+ void __iomem *(*get_base)(union gic_base *);
+#endif
+};
+
+static DEFINE_RAW_SPINLOCK(irq_controller_lock);
+
+/*
+ * The GIC mapping of CPU interfaces does not necessarily match
+ * the logical CPU numbering. Let's use a mapping as returned
+ * by the GIC itself.
+ */
+#define NR_GIC_CPU_IF 8
+static u8 gic_cpu_map[NR_GIC_CPU_IF] __read_mostly;
+
+/*
+ * Supported arch specific GIC irq extension.
+ * Default make them NULL.
+ */
+struct irq_chip gic_arch_extn = {
+ .irq_eoi = NULL,
+ .irq_mask = NULL,
+ .irq_unmask = NULL,
+ .irq_retrigger = NULL,
+ .irq_set_type = NULL,
+ .irq_set_wake = NULL,
+};
+
+#ifndef MAX_GIC_NR
+#define MAX_GIC_NR 1
+#endif
+
+static struct gic_chip_data gic_data[MAX_GIC_NR] __read_mostly;
+
+#ifdef CONFIG_GIC_NON_BANKED
+static void __iomem *gic_get_percpu_base(union gic_base *base)
+{
+ return *__this_cpu_ptr(base->percpu_base);
+}
+
+static void __iomem *gic_get_common_base(union gic_base *base)
+{
+ return base->common_base;
+}
+
+static inline void __iomem *gic_data_dist_base(struct gic_chip_data *data)
+{
+ return data->get_base(&data->dist_base);
+}
+
+static inline void __iomem *gic_data_cpu_base(struct gic_chip_data *data)
+{
+ return data->get_base(&data->cpu_base);
+}
+
+static inline void gic_set_base_accessor(struct gic_chip_data *data,
+ void __iomem *(*f)(union gic_base *))
+{
+ data->get_base = f;
+}
+#else
+#define gic_data_dist_base(d) ((d)->dist_base.common_base)
+#define gic_data_cpu_base(d) ((d)->cpu_base.common_base)
+#define gic_set_base_accessor(d,f)
+#endif
+
+static inline void __iomem *gic_dist_base(struct irq_data *d)
+{
+ struct gic_chip_data *gic_data = irq_data_get_irq_chip_data(d);
+ return gic_data_dist_base(gic_data);
+}
+
+static inline void __iomem *gic_cpu_base(struct irq_data *d)
+{
+ struct gic_chip_data *gic_data = irq_data_get_irq_chip_data(d);
+ return gic_data_cpu_base(gic_data);
+}
+
+static inline unsigned int gic_irq(struct irq_data *d)
+{
+ return d->hwirq;
+}
+
+/*
+ * Routines to acknowledge, disable and enable interrupts
+ */
+static void gic_mask_irq(struct irq_data *d)
+{
+ u32 mask = 1 << (gic_irq(d) % 32);
+
+ raw_spin_lock(&irq_controller_lock);
+ writel_relaxed(mask, gic_dist_base(d) + GIC_DIST_ENABLE_CLEAR + (gic_irq(d) / 32) * 4);
+ if (gic_arch_extn.irq_mask)
+ gic_arch_extn.irq_mask(d);
+ raw_spin_unlock(&irq_controller_lock);
+}
+
+static void gic_unmask_irq(struct irq_data *d)
+{
+ u32 mask = 1 << (gic_irq(d) % 32);
+
+ raw_spin_lock(&irq_controller_lock);
+ if (gic_arch_extn.irq_unmask)
+ gic_arch_extn.irq_unmask(d);
+ writel_relaxed(mask, gic_dist_base(d) + GIC_DIST_ENABLE_SET + (gic_irq(d) / 32) * 4);
+ raw_spin_unlock(&irq_controller_lock);
+}
+
+static void gic_eoi_irq(struct irq_data *d)
+{
+ if (gic_arch_extn.irq_eoi) {
+ raw_spin_lock(&irq_controller_lock);
+ gic_arch_extn.irq_eoi(d);
+ raw_spin_unlock(&irq_controller_lock);
+ }
+
+ writel_relaxed(gic_irq(d), gic_cpu_base(d) + GIC_CPU_EOI);
+}
+
+static int gic_set_type(struct irq_data *d, unsigned int type)
+{
+ void __iomem *base = gic_dist_base(d);
+ unsigned int gicirq = gic_irq(d);
+ u32 enablemask = 1 << (gicirq % 32);
+ u32 enableoff = (gicirq / 32) * 4;
+ u32 confmask = 0x2 << ((gicirq % 16) * 2);
+ u32 confoff = (gicirq / 16) * 4;
+ bool enabled = false;
+ u32 val;
+
+ /* Interrupt configuration for SGIs can't be changed */
+ if (gicirq < 16)
+ return -EINVAL;
+
+ if (type != IRQ_TYPE_LEVEL_HIGH && type != IRQ_TYPE_EDGE_RISING)
+ return -EINVAL;
+
+ raw_spin_lock(&irq_controller_lock);
+
+ if (gic_arch_extn.irq_set_type)
+ gic_arch_extn.irq_set_type(d, type);
+
+ val = readl_relaxed(base + GIC_DIST_CONFIG + confoff);
+ if (type == IRQ_TYPE_LEVEL_HIGH)
+ val &= ~confmask;
+ else if (type == IRQ_TYPE_EDGE_RISING)
+ val |= confmask;
+
+ /*
+ * As recommended by the spec, disable the interrupt before changing
+ * the configuration
+ */
+ if (readl_relaxed(base + GIC_DIST_ENABLE_SET + enableoff) & enablemask) {
+ writel_relaxed(enablemask, base + GIC_DIST_ENABLE_CLEAR + enableoff);
+ enabled = true;
+ }
+
+ writel_relaxed(val, base + GIC_DIST_CONFIG + confoff);
+
+ if (enabled)
+ writel_relaxed(enablemask, base + GIC_DIST_ENABLE_SET + enableoff);
+
+ raw_spin_unlock(&irq_controller_lock);
+
+ return 0;
+}
+
+static int gic_retrigger(struct irq_data *d)
+{
+ if (gic_arch_extn.irq_retrigger)
+ return gic_arch_extn.irq_retrigger(d);
+
+ return -ENXIO;
+}
+
+#ifdef CONFIG_SMP
+static int gic_set_affinity(struct irq_data *d, const struct cpumask *mask_val,
+ bool force)
+{
+ void __iomem *reg = gic_dist_base(d) + GIC_DIST_TARGET + (gic_irq(d) & ~3);
+ unsigned int shift = (gic_irq(d) % 4) * 8;
+ unsigned int cpu = cpumask_any_and(mask_val, cpu_online_mask);
+ u32 val, mask, bit;
+
+ if (cpu >= NR_GIC_CPU_IF || cpu >= nr_cpu_ids)
+ return -EINVAL;
+
+ mask = 0xff << shift;
+ bit = gic_cpu_map[cpu] << shift;
+
+ raw_spin_lock(&irq_controller_lock);
+ val = readl_relaxed(reg) & ~mask;
+ writel_relaxed(val | bit, reg);
+ raw_spin_unlock(&irq_controller_lock);
+
+ return IRQ_SET_MASK_OK;
+}
+#endif
+
+#ifdef CONFIG_PM
+static int gic_set_wake(struct irq_data *d, unsigned int on)
+{
+ int ret = -ENXIO;
+
+ if (gic_arch_extn.irq_set_wake)
+ ret = gic_arch_extn.irq_set_wake(d, on);
+
+ return ret;
+}
+
+#else
+#define gic_set_wake NULL
+#endif
+
+static asmlinkage void __exception_irq_entry gic_handle_irq(struct pt_regs *regs)
+{
+ u32 irqstat, irqnr;
+ struct gic_chip_data *gic = &gic_data[0];
+ void __iomem *cpu_base = gic_data_cpu_base(gic);
+
+ do {
+ irqstat = readl_relaxed(cpu_base + GIC_CPU_INTACK);
+ irqnr = irqstat & ~0x1c00;
+
+ if (likely(irqnr > 15 && irqnr < 1021)) {
+ irqnr = irq_find_mapping(gic->domain, irqnr);
+ handle_IRQ(irqnr, regs);
+ continue;
+ }
+ if (irqnr < 16) {
+ writel_relaxed(irqstat, cpu_base + GIC_CPU_EOI);
+#ifdef CONFIG_SMP
+ handle_IPI(irqnr, regs);
+#endif
+ continue;
+ }
+ break;
+ } while (1);
+}
+
+static void gic_handle_cascade_irq(unsigned int irq, struct irq_desc *desc)
+{
+ struct gic_chip_data *chip_data = irq_get_handler_data(irq);
+ struct irq_chip *chip = irq_get_chip(irq);
+ unsigned int cascade_irq, gic_irq;
+ unsigned long status;
+
+ chained_irq_enter(chip, desc);
+
+ raw_spin_lock(&irq_controller_lock);
+ status = readl_relaxed(gic_data_cpu_base(chip_data) + GIC_CPU_INTACK);
+ raw_spin_unlock(&irq_controller_lock);
+
+ gic_irq = (status & 0x3ff);
+ if (gic_irq == 1023)
+ goto out;
+
+ cascade_irq = irq_find_mapping(chip_data->domain, gic_irq);
+ if (unlikely(gic_irq < 32 || gic_irq > 1020))
+ do_bad_IRQ(cascade_irq, desc);
+ else
+ generic_handle_irq(cascade_irq);
+
+ out:
+ chained_irq_exit(chip, desc);
+}
+
+static struct irq_chip gic_chip = {
+ .name = "GIC",
+ .irq_mask = gic_mask_irq,
+ .irq_unmask = gic_unmask_irq,
+ .irq_eoi = gic_eoi_irq,
+ .irq_set_type = gic_set_type,
+ .irq_retrigger = gic_retrigger,
+#ifdef CONFIG_SMP
+ .irq_set_affinity = gic_set_affinity,
+#endif
+ .irq_set_wake = gic_set_wake,
+};
+
+void __init gic_cascade_irq(unsigned int gic_nr, unsigned int irq)
+{
+ if (gic_nr >= MAX_GIC_NR)
+ BUG();
+ if (irq_set_handler_data(irq, &gic_data[gic_nr]) != 0)
+ BUG();
+ irq_set_chained_handler(irq, gic_handle_cascade_irq);
+}
+
+static u8 gic_get_cpumask(struct gic_chip_data *gic)
+{
+ void __iomem *base = gic_data_dist_base(gic);
+ u32 mask, i;
+
+ for (i = mask = 0; i < 32; i += 4) {
+ mask = readl_relaxed(base + GIC_DIST_TARGET + i);
+ mask |= mask >> 16;
+ mask |= mask >> 8;
+ if (mask)
+ break;
+ }
+
+ if (!mask)
+ pr_crit("GIC CPU mask not found - kernel will fail to boot.\n");
+
+ return mask;
+}
+
+static void __init gic_dist_init(struct gic_chip_data *gic)
+{
+ unsigned int i;
+ u32 cpumask;
+ unsigned int gic_irqs = gic->gic_irqs;
+ void __iomem *base = gic_data_dist_base(gic);
+
+ writel_relaxed(0, base + GIC_DIST_CTRL);
+
+ /*
+ * Set all global interrupts to be level triggered, active low.
+ */
+ for (i = 32; i < gic_irqs; i += 16)
+ writel_relaxed(0, base + GIC_DIST_CONFIG + i * 4 / 16);
+
+ /*
+ * Set all global interrupts to this CPU only.
+ */
+ cpumask = gic_get_cpumask(gic);
+ cpumask |= cpumask << 8;
+ cpumask |= cpumask << 16;
+ for (i = 32; i < gic_irqs; i += 4)
+ writel_relaxed(cpumask, base + GIC_DIST_TARGET + i * 4 / 4);
+
+ /*
+ * Set priority on all global interrupts.
+ */
+ for (i = 32; i < gic_irqs; i += 4)
+ writel_relaxed(0xa0a0a0a0, base + GIC_DIST_PRI + i * 4 / 4);
+
+ /*
+ * Disable all interrupts. Leave the PPI and SGIs alone
+ * as these enables are banked registers.
+ */
+ for (i = 32; i < gic_irqs; i += 32)
+ writel_relaxed(0xffffffff, base + GIC_DIST_ENABLE_CLEAR + i * 4 / 32);
+
+ writel_relaxed(1, base + GIC_DIST_CTRL);
+}
+
+static void __cpuinit gic_cpu_init(struct gic_chip_data *gic)
+{
+ void __iomem *dist_base = gic_data_dist_base(gic);
+ void __iomem *base = gic_data_cpu_base(gic);
+ unsigned int cpu_mask, cpu = smp_processor_id();
+ int i;
+
+ /*
+ * Get what the GIC says our CPU mask is.
+ */
+ BUG_ON(cpu >= NR_GIC_CPU_IF);
+ cpu_mask = gic_get_cpumask(gic);
+ gic_cpu_map[cpu] = cpu_mask;
+
+ /*
+ * Clear our mask from the other map entries in case they're
+ * still undefined.
+ */
+ for (i = 0; i < NR_GIC_CPU_IF; i++)
+ if (i != cpu)
+ gic_cpu_map[i] &= ~cpu_mask;
+
+ /*
+ * Deal with the banked PPI and SGI interrupts - disable all
+ * PPI interrupts, ensure all SGI interrupts are enabled.
+ */
+ writel_relaxed(0xffff0000, dist_base + GIC_DIST_ENABLE_CLEAR);
+ writel_relaxed(0x0000ffff, dist_base + GIC_DIST_ENABLE_SET);
+
+ /*
+ * Set priority on PPI and SGI interrupts
+ */
+ for (i = 0; i < 32; i += 4)
+ writel_relaxed(0xa0a0a0a0, dist_base + GIC_DIST_PRI + i * 4 / 4);
+
+ writel_relaxed(0xf0, base + GIC_CPU_PRIMASK);
+ writel_relaxed(1, base + GIC_CPU_CTRL);
+}
+
+#ifdef CONFIG_CPU_PM
+/*
+ * Saves the GIC distributor registers during suspend or idle. Must be called
+ * with interrupts disabled but before powering down the GIC. After calling
+ * this function, no interrupts will be delivered by the GIC, and another
+ * platform-specific wakeup source must be enabled.
+ */
+static void gic_dist_save(unsigned int gic_nr)
+{
+ unsigned int gic_irqs;
+ void __iomem *dist_base;
+ int i;
+
+ if (gic_nr >= MAX_GIC_NR)
+ BUG();
+
+ gic_irqs = gic_data[gic_nr].gic_irqs;
+ dist_base = gic_data_dist_base(&gic_data[gic_nr]);
+
+ if (!dist_base)
+ return;
+
+ for (i = 0; i < DIV_ROUND_UP(gic_irqs, 16); i++)
+ gic_data[gic_nr].saved_spi_conf[i] =
+ readl_relaxed(dist_base + GIC_DIST_CONFIG + i * 4);
+
+ for (i = 0; i < DIV_ROUND_UP(gic_irqs, 4); i++)
+ gic_data[gic_nr].saved_spi_target[i] =
+ readl_relaxed(dist_base + GIC_DIST_TARGET + i * 4);
+
+ for (i = 0; i < DIV_ROUND_UP(gic_irqs, 32); i++)
+ gic_data[gic_nr].saved_spi_enable[i] =
+ readl_relaxed(dist_base + GIC_DIST_ENABLE_SET + i * 4);
+}
+
+/*
+ * Restores the GIC distributor registers during resume or when coming out of
+ * idle. Must be called before enabling interrupts. If a level interrupt
+ * that occured while the GIC was suspended is still present, it will be
+ * handled normally, but any edge interrupts that occured will not be seen by
+ * the GIC and need to be handled by the platform-specific wakeup source.
+ */
+static void gic_dist_restore(unsigned int gic_nr)
+{
+ unsigned int gic_irqs;
+ unsigned int i;
+ void __iomem *dist_base;
+
+ if (gic_nr >= MAX_GIC_NR)
+ BUG();
+
+ gic_irqs = gic_data[gic_nr].gic_irqs;
+ dist_base = gic_data_dist_base(&gic_data[gic_nr]);
+
+ if (!dist_base)
+ return;
+
+ writel_relaxed(0, dist_base + GIC_DIST_CTRL);
+
+ for (i = 0; i < DIV_ROUND_UP(gic_irqs, 16); i++)
+ writel_relaxed(gic_data[gic_nr].saved_spi_conf[i],
+ dist_base + GIC_DIST_CONFIG + i * 4);
+
+ for (i = 0; i < DIV_ROUND_UP(gic_irqs, 4); i++)
+ writel_relaxed(0xa0a0a0a0,
+ dist_base + GIC_DIST_PRI + i * 4);
+
+ for (i = 0; i < DIV_ROUND_UP(gic_irqs, 4); i++)
+ writel_relaxed(gic_data[gic_nr].saved_spi_target[i],
+ dist_base + GIC_DIST_TARGET + i * 4);
+
+ for (i = 0; i < DIV_ROUND_UP(gic_irqs, 32); i++)
+ writel_relaxed(gic_data[gic_nr].saved_spi_enable[i],
+ dist_base + GIC_DIST_ENABLE_SET + i * 4);
+
+ writel_relaxed(1, dist_base + GIC_DIST_CTRL);
+}
+
+static void gic_cpu_save(unsigned int gic_nr)
+{
+ int i;
+ u32 *ptr;
+ void __iomem *dist_base;
+ void __iomem *cpu_base;
+
+ if (gic_nr >= MAX_GIC_NR)
+ BUG();
+
+ dist_base = gic_data_dist_base(&gic_data[gic_nr]);
+ cpu_base = gic_data_cpu_base(&gic_data[gic_nr]);
+
+ if (!dist_base || !cpu_base)
+ return;
+
+ ptr = __this_cpu_ptr(gic_data[gic_nr].saved_ppi_enable);
+ for (i = 0; i < DIV_ROUND_UP(32, 32); i++)
+ ptr[i] = readl_relaxed(dist_base + GIC_DIST_ENABLE_SET + i * 4);
+
+ ptr = __this_cpu_ptr(gic_data[gic_nr].saved_ppi_conf);
+ for (i = 0; i < DIV_ROUND_UP(32, 16); i++)
+ ptr[i] = readl_relaxed(dist_base + GIC_DIST_CONFIG + i * 4);
+
+}
+
+static void gic_cpu_restore(unsigned int gic_nr)
+{
+ int i;
+ u32 *ptr;
+ void __iomem *dist_base;
+ void __iomem *cpu_base;
+
+ if (gic_nr >= MAX_GIC_NR)
+ BUG();
+
+ dist_base = gic_data_dist_base(&gic_data[gic_nr]);
+ cpu_base = gic_data_cpu_base(&gic_data[gic_nr]);
+
+ if (!dist_base || !cpu_base)
+ return;
+
+ ptr = __this_cpu_ptr(gic_data[gic_nr].saved_ppi_enable);
+ for (i = 0; i < DIV_ROUND_UP(32, 32); i++)
+ writel_relaxed(ptr[i], dist_base + GIC_DIST_ENABLE_SET + i * 4);
+
+ ptr = __this_cpu_ptr(gic_data[gic_nr].saved_ppi_conf);
+ for (i = 0; i < DIV_ROUND_UP(32, 16); i++)
+ writel_relaxed(ptr[i], dist_base + GIC_DIST_CONFIG + i * 4);
+
+ for (i = 0; i < DIV_ROUND_UP(32, 4); i++)
+ writel_relaxed(0xa0a0a0a0, dist_base + GIC_DIST_PRI + i * 4);
+
+ writel_relaxed(0xf0, cpu_base + GIC_CPU_PRIMASK);
+ writel_relaxed(1, cpu_base + GIC_CPU_CTRL);
+}
+
+static int gic_notifier(struct notifier_block *self, unsigned long cmd, void *v)
+{
+ int i;
+
+ for (i = 0; i < MAX_GIC_NR; i++) {
+#ifdef CONFIG_GIC_NON_BANKED
+ /* Skip over unused GICs */
+ if (!gic_data[i].get_base)
+ continue;
+#endif
+ switch (cmd) {
+ case CPU_PM_ENTER:
+ gic_cpu_save(i);
+ break;
+ case CPU_PM_ENTER_FAILED:
+ case CPU_PM_EXIT:
+ gic_cpu_restore(i);
+ break;
+ case CPU_CLUSTER_PM_ENTER:
+ gic_dist_save(i);
+ break;
+ case CPU_CLUSTER_PM_ENTER_FAILED:
+ case CPU_CLUSTER_PM_EXIT:
+ gic_dist_restore(i);
+ break;
+ }
+ }
+
+ return NOTIFY_OK;
+}
+
+static struct notifier_block gic_notifier_block = {
+ .notifier_call = gic_notifier,
+};
+
+static void __init gic_pm_init(struct gic_chip_data *gic)
+{
+ gic->saved_ppi_enable = __alloc_percpu(DIV_ROUND_UP(32, 32) * 4,
+ sizeof(u32));
+ BUG_ON(!gic->saved_ppi_enable);
+
+ gic->saved_ppi_conf = __alloc_percpu(DIV_ROUND_UP(32, 16) * 4,
+ sizeof(u32));
+ BUG_ON(!gic->saved_ppi_conf);
+
+ if (gic == &gic_data[0])
+ cpu_pm_register_notifier(&gic_notifier_block);
+}
+#else
+static void __init gic_pm_init(struct gic_chip_data *gic)
+{
+}
+#endif
+
+#ifdef CONFIG_SMP
+void gic_raise_softirq(const struct cpumask *mask, unsigned int irq)
+{
+ int cpu;
+ unsigned long map = 0;
+
+ /* Convert our logical CPU mask into a physical one. */
+ for_each_cpu(cpu, mask)
+ map |= 1 << cpu_logical_map(cpu);
+
+ /*
+ * Ensure that stores to Normal memory are visible to the
+ * other CPUs before issuing the IPI.
+ */
+ dsb();
+
+ /* this always happens on GIC0 */
+ writel_relaxed(map << 16 | irq, gic_data_dist_base(&gic_data[0]) + GIC_DIST_SOFTINT);
+}
+#endif
+
+static int gic_irq_domain_map(struct irq_domain *d, unsigned int irq,
+ irq_hw_number_t hw)
+{
+ if (hw < 32) {
+ irq_set_percpu_devid(irq);
+ irq_set_chip_and_handler(irq, &gic_chip,
+ handle_percpu_devid_irq);
+ set_irq_flags(irq, IRQF_VALID | IRQF_NOAUTOEN);
+ } else {
+ irq_set_chip_and_handler(irq, &gic_chip,
+ handle_fasteoi_irq);
+ set_irq_flags(irq, IRQF_VALID | IRQF_PROBE);
+ }
+ irq_set_chip_data(irq, d->host_data);
+ return 0;
+}
+
+static int gic_irq_domain_xlate(struct irq_domain *d,
+ struct device_node *controller,
+ const u32 *intspec, unsigned int intsize,
+ unsigned long *out_hwirq, unsigned int *out_type)
+{
+ if (d->of_node != controller)
+ return -EINVAL;
+ if (intsize < 3)
+ return -EINVAL;
+
+ /* Get the interrupt number and add 16 to skip over SGIs */
+ *out_hwirq = intspec[1] + 16;
+
+ /* For SPIs, we need to add 16 more to get the GIC irq ID number */
+ if (!intspec[0])
+ *out_hwirq += 16;
+
+ *out_type = intspec[2] & IRQ_TYPE_SENSE_MASK;
+ return 0;
+}
+
+const struct irq_domain_ops gic_irq_domain_ops = {
+ .map = gic_irq_domain_map,
+ .xlate = gic_irq_domain_xlate,
+};
+
+void __init gic_init_bases(unsigned int gic_nr, int irq_start,
+ void __iomem *dist_base, void __iomem *cpu_base,
+ u32 percpu_offset, struct device_node *node)
+{
+ irq_hw_number_t hwirq_base;
+ struct gic_chip_data *gic;
+ int gic_irqs, irq_base, i;
+
+ BUG_ON(gic_nr >= MAX_GIC_NR);
+
+ gic = &gic_data[gic_nr];
+#ifdef CONFIG_GIC_NON_BANKED
+ if (percpu_offset) { /* Frankein-GIC without banked registers... */
+ unsigned int cpu;
+
+ gic->dist_base.percpu_base = alloc_percpu(void __iomem *);
+ gic->cpu_base.percpu_base = alloc_percpu(void __iomem *);
+ if (WARN_ON(!gic->dist_base.percpu_base ||
+ !gic->cpu_base.percpu_base)) {
+ free_percpu(gic->dist_base.percpu_base);
+ free_percpu(gic->cpu_base.percpu_base);
+ return;
+ }
+
+ for_each_possible_cpu(cpu) {
+ unsigned long offset = percpu_offset * cpu_logical_map(cpu);
+ *per_cpu_ptr(gic->dist_base.percpu_base, cpu) = dist_base + offset;
+ *per_cpu_ptr(gic->cpu_base.percpu_base, cpu) = cpu_base + offset;
+ }
+
+ gic_set_base_accessor(gic, gic_get_percpu_base);
+ } else
+#endif
+ { /* Normal, sane GIC... */
+ WARN(percpu_offset,
+ "GIC_NON_BANKED not enabled, ignoring %08x offset!",
+ percpu_offset);
+ gic->dist_base.common_base = dist_base;
+ gic->cpu_base.common_base = cpu_base;
+ gic_set_base_accessor(gic, gic_get_common_base);
+ }
+
+ /*
+ * Initialize the CPU interface map to all CPUs.
+ * It will be refined as each CPU probes its ID.
+ */
+ for (i = 0; i < NR_GIC_CPU_IF; i++)
+ gic_cpu_map[i] = 0xff;
+
+ /*
+ * For primary GICs, skip over SGIs.
+ * For secondary GICs, skip over PPIs, too.
+ */
+ if (gic_nr == 0 && (irq_start & 31) > 0) {
+ hwirq_base = 16;
+ if (irq_start != -1)
+ irq_start = (irq_start & ~31) + 16;
+ } else {
+ hwirq_base = 32;
+ }
+
+ /*
+ * Find out how many interrupts are supported.
+ * The GIC only supports up to 1020 interrupt sources.
+ */
+ gic_irqs = readl_relaxed(gic_data_dist_base(gic) + GIC_DIST_CTR) & 0x1f;
+ gic_irqs = (gic_irqs + 1) * 32;
+ if (gic_irqs > 1020)
+ gic_irqs = 1020;
+ gic->gic_irqs = gic_irqs;
+
+ gic_irqs -= hwirq_base; /* calculate # of irqs to allocate */
+ irq_base = irq_alloc_descs(irq_start, 16, gic_irqs, numa_node_id());
+ if (IS_ERR_VALUE(irq_base)) {
+ WARN(1, "Cannot allocate irq_descs @ IRQ%d, assuming pre-allocated\n",
+ irq_start);
+ irq_base = irq_start;
+ }
+ gic->domain = irq_domain_add_legacy(node, gic_irqs, irq_base,
+ hwirq_base, &gic_irq_domain_ops, gic);
+ if (WARN_ON(!gic->domain))
+ return;
+
+#ifdef CONFIG_SMP
+ set_smp_cross_call(gic_raise_softirq);
+#endif
+
+ set_handle_irq(gic_handle_irq);
+
+ gic_chip.flags |= gic_arch_extn.flags;
+ gic_dist_init(gic);
+ gic_cpu_init(gic);
+ gic_pm_init(gic);
+}
+
+void __cpuinit gic_secondary_init(unsigned int gic_nr)
+{
+ BUG_ON(gic_nr >= MAX_GIC_NR);
+
+ gic_cpu_init(&gic_data[gic_nr]);
+}
+
+#ifdef CONFIG_OF
+static int gic_cnt __initdata = 0;
+
+int __init gic_of_init(struct device_node *node, struct device_node *parent)
+{
+ void __iomem *cpu_base;
+ void __iomem *dist_base;
+ u32 percpu_offset;
+ int irq;
+
+ if (WARN_ON(!node))
+ return -ENODEV;
+
+ dist_base = of_iomap(node, 0);
+ WARN(!dist_base, "unable to map gic dist registers\n");
+
+ cpu_base = of_iomap(node, 1);
+ WARN(!cpu_base, "unable to map gic cpu registers\n");
+
+ if (of_property_read_u32(node, "cpu-offset", &percpu_offset))
+ percpu_offset = 0;
+
+ gic_init_bases(gic_cnt, -1, dist_base, cpu_base, percpu_offset, node);
+
+ if (parent) {
+ irq = irq_of_parse_and_map(node, 0);
+ gic_cascade_irq(gic_cnt, irq);
+ }
+ gic_cnt++;
+ return 0;
+}
+IRQCHIP_DECLARE(cortex_a15_gic, "arm,cortex-a15-gic", gic_of_init);
+IRQCHIP_DECLARE(cortex_a9_gic, "arm,cortex-a9-gic", gic_of_init);
+IRQCHIP_DECLARE(msm_8660_qgic, "qcom,msm-8660-qgic", gic_of_init);
+IRQCHIP_DECLARE(msm_qgic2, "qcom,msm-qgic2", gic_of_init);
+
+#endif
diff --git a/drivers/irqchip/irq-vic.c b/drivers/irqchip/irq-vic.c
new file mode 100644
index 000000000000..3cf97aaebe40
--- /dev/null
+++ b/drivers/irqchip/irq-vic.c
@@ -0,0 +1,489 @@
+/*
+ * linux/arch/arm/common/vic.c
+ *
+ * Copyright (C) 1999 - 2003 ARM Limited
+ * Copyright (C) 2000 Deep Blue Solutions Ltd
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+
+#include <linux/export.h>
+#include <linux/init.h>
+#include <linux/list.h>
+#include <linux/io.h>
+#include <linux/irqdomain.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
+#include <linux/of_irq.h>
+#include <linux/syscore_ops.h>
+#include <linux/device.h>
+#include <linux/amba/bus.h>
+#include <linux/irqchip/arm-vic.h>
+
+#include <asm/exception.h>
+#include <asm/mach/irq.h>
+
+#include "irqchip.h"
+
+#define VIC_IRQ_STATUS 0x00
+#define VIC_FIQ_STATUS 0x04
+#define VIC_INT_SELECT 0x0c /* 1 = FIQ, 0 = IRQ */
+#define VIC_INT_SOFT 0x18
+#define VIC_INT_SOFT_CLEAR 0x1c
+#define VIC_PROTECT 0x20
+#define VIC_PL190_VECT_ADDR 0x30 /* PL190 only */
+#define VIC_PL190_DEF_VECT_ADDR 0x34 /* PL190 only */
+
+#define VIC_VECT_ADDR0 0x100 /* 0 to 15 (0..31 PL192) */
+#define VIC_VECT_CNTL0 0x200 /* 0 to 15 (0..31 PL192) */
+#define VIC_ITCR 0x300 /* VIC test control register */
+
+#define VIC_VECT_CNTL_ENABLE (1 << 5)
+
+#define VIC_PL192_VECT_ADDR 0xF00
+
+/**
+ * struct vic_device - VIC PM device
+ * @irq: The IRQ number for the base of the VIC.
+ * @base: The register base for the VIC.
+ * @valid_sources: A bitmask of valid interrupts
+ * @resume_sources: A bitmask of interrupts for resume.
+ * @resume_irqs: The IRQs enabled for resume.
+ * @int_select: Save for VIC_INT_SELECT.
+ * @int_enable: Save for VIC_INT_ENABLE.
+ * @soft_int: Save for VIC_INT_SOFT.
+ * @protect: Save for VIC_PROTECT.
+ * @domain: The IRQ domain for the VIC.
+ */
+struct vic_device {
+ void __iomem *base;
+ int irq;
+ u32 valid_sources;
+ u32 resume_sources;
+ u32 resume_irqs;
+ u32 int_select;
+ u32 int_enable;
+ u32 soft_int;
+ u32 protect;
+ struct irq_domain *domain;
+};
+
+/* we cannot allocate memory when VICs are initially registered */
+static struct vic_device vic_devices[CONFIG_ARM_VIC_NR];
+
+static int vic_id;
+
+static void vic_handle_irq(struct pt_regs *regs);
+
+/**
+ * vic_init2 - common initialisation code
+ * @base: Base of the VIC.
+ *
+ * Common initialisation code for registration
+ * and resume.
+*/
+static void vic_init2(void __iomem *base)
+{
+ int i;
+
+ for (i = 0; i < 16; i++) {
+ void __iomem *reg = base + VIC_VECT_CNTL0 + (i * 4);
+ writel(VIC_VECT_CNTL_ENABLE | i, reg);
+ }
+
+ writel(32, base + VIC_PL190_DEF_VECT_ADDR);
+}
+
+#ifdef CONFIG_PM
+static void resume_one_vic(struct vic_device *vic)
+{
+ void __iomem *base = vic->base;
+
+ printk(KERN_DEBUG "%s: resuming vic at %p\n", __func__, base);
+
+ /* re-initialise static settings */
+ vic_init2(base);
+
+ writel(vic->int_select, base + VIC_INT_SELECT);
+ writel(vic->protect, base + VIC_PROTECT);
+
+ /* set the enabled ints and then clear the non-enabled */
+ writel(vic->int_enable, base + VIC_INT_ENABLE);
+ writel(~vic->int_enable, base + VIC_INT_ENABLE_CLEAR);
+
+ /* and the same for the soft-int register */
+
+ writel(vic->soft_int, base + VIC_INT_SOFT);
+ writel(~vic->soft_int, base + VIC_INT_SOFT_CLEAR);
+}
+
+static void vic_resume(void)
+{
+ int id;
+
+ for (id = vic_id - 1; id >= 0; id--)
+ resume_one_vic(vic_devices + id);
+}
+
+static void suspend_one_vic(struct vic_device *vic)
+{
+ void __iomem *base = vic->base;
+
+ printk(KERN_DEBUG "%s: suspending vic at %p\n", __func__, base);
+
+ vic->int_select = readl(base + VIC_INT_SELECT);
+ vic->int_enable = readl(base + VIC_INT_ENABLE);
+ vic->soft_int = readl(base + VIC_INT_SOFT);
+ vic->protect = readl(base + VIC_PROTECT);
+
+ /* set the interrupts (if any) that are used for
+ * resuming the system */
+
+ writel(vic->resume_irqs, base + VIC_INT_ENABLE);
+ writel(~vic->resume_irqs, base + VIC_INT_ENABLE_CLEAR);
+}
+
+static int vic_suspend(void)
+{
+ int id;
+
+ for (id = 0; id < vic_id; id++)
+ suspend_one_vic(vic_devices + id);
+
+ return 0;
+}
+
+struct syscore_ops vic_syscore_ops = {
+ .suspend = vic_suspend,
+ .resume = vic_resume,
+};
+
+/**
+ * vic_pm_init - initicall to register VIC pm
+ *
+ * This is called via late_initcall() to register
+ * the resources for the VICs due to the early
+ * nature of the VIC's registration.
+*/
+static int __init vic_pm_init(void)
+{
+ if (vic_id > 0)
+ register_syscore_ops(&vic_syscore_ops);
+
+ return 0;
+}
+late_initcall(vic_pm_init);
+#endif /* CONFIG_PM */
+
+static struct irq_chip vic_chip;
+
+static int vic_irqdomain_map(struct irq_domain *d, unsigned int irq,
+ irq_hw_number_t hwirq)
+{
+ struct vic_device *v = d->host_data;
+
+ /* Skip invalid IRQs, only register handlers for the real ones */
+ if (!(v->valid_sources & (1 << hwirq)))
+ return -ENOTSUPP;
+ irq_set_chip_and_handler(irq, &vic_chip, handle_level_irq);
+ irq_set_chip_data(irq, v->base);
+ set_irq_flags(irq, IRQF_VALID | IRQF_PROBE);
+ return 0;
+}
+
+/*
+ * Handle each interrupt in a single VIC. Returns non-zero if we've
+ * handled at least one interrupt. This reads the status register
+ * before handling each interrupt, which is necessary given that
+ * handle_IRQ may briefly re-enable interrupts for soft IRQ handling.
+ */
+static int handle_one_vic(struct vic_device *vic, struct pt_regs *regs)
+{
+ u32 stat, irq;
+ int handled = 0;
+
+ while ((stat = readl_relaxed(vic->base + VIC_IRQ_STATUS))) {
+ irq = ffs(stat) - 1;
+ handle_IRQ(irq_find_mapping(vic->domain, irq), regs);
+ handled = 1;
+ }
+
+ return handled;
+}
+
+/*
+ * Keep iterating over all registered VIC's until there are no pending
+ * interrupts.
+ */
+static asmlinkage void __exception_irq_entry vic_handle_irq(struct pt_regs *regs)
+{
+ int i, handled;
+
+ do {
+ for (i = 0, handled = 0; i < vic_id; ++i)
+ handled |= handle_one_vic(&vic_devices[i], regs);
+ } while (handled);
+}
+
+static struct irq_domain_ops vic_irqdomain_ops = {
+ .map = vic_irqdomain_map,
+ .xlate = irq_domain_xlate_onetwocell,
+};
+
+/**
+ * vic_register() - Register a VIC.
+ * @base: The base address of the VIC.
+ * @irq: The base IRQ for the VIC.
+ * @valid_sources: bitmask of valid interrupts
+ * @resume_sources: bitmask of interrupts allowed for resume sources.
+ * @node: The device tree node associated with the VIC.
+ *
+ * Register the VIC with the system device tree so that it can be notified
+ * of suspend and resume requests and ensure that the correct actions are
+ * taken to re-instate the settings on resume.
+ *
+ * This also configures the IRQ domain for the VIC.
+ */
+static void __init vic_register(void __iomem *base, unsigned int irq,
+ u32 valid_sources, u32 resume_sources,
+ struct device_node *node)
+{
+ struct vic_device *v;
+ int i;
+
+ if (vic_id >= ARRAY_SIZE(vic_devices)) {
+ printk(KERN_ERR "%s: too few VICs, increase CONFIG_ARM_VIC_NR\n", __func__);
+ return;
+ }
+
+ v = &vic_devices[vic_id];
+ v->base = base;
+ v->valid_sources = valid_sources;
+ v->resume_sources = resume_sources;
+ v->irq = irq;
+ set_handle_irq(vic_handle_irq);
+ vic_id++;
+ v->domain = irq_domain_add_simple(node, fls(valid_sources), irq,
+ &vic_irqdomain_ops, v);
+ /* create an IRQ mapping for each valid IRQ */
+ for (i = 0; i < fls(valid_sources); i++)
+ if (valid_sources & (1 << i))
+ irq_create_mapping(v->domain, i);
+}
+
+static void vic_ack_irq(struct irq_data *d)
+{
+ void __iomem *base = irq_data_get_irq_chip_data(d);
+ unsigned int irq = d->hwirq;
+ writel(1 << irq, base + VIC_INT_ENABLE_CLEAR);
+ /* moreover, clear the soft-triggered, in case it was the reason */
+ writel(1 << irq, base + VIC_INT_SOFT_CLEAR);
+}
+
+static void vic_mask_irq(struct irq_data *d)
+{
+ void __iomem *base = irq_data_get_irq_chip_data(d);
+ unsigned int irq = d->hwirq;
+ writel(1 << irq, base + VIC_INT_ENABLE_CLEAR);
+}
+
+static void vic_unmask_irq(struct irq_data *d)
+{
+ void __iomem *base = irq_data_get_irq_chip_data(d);
+ unsigned int irq = d->hwirq;
+ writel(1 << irq, base + VIC_INT_ENABLE);
+}
+
+#if defined(CONFIG_PM)
+static struct vic_device *vic_from_irq(unsigned int irq)
+{
+ struct vic_device *v = vic_devices;
+ unsigned int base_irq = irq & ~31;
+ int id;
+
+ for (id = 0; id < vic_id; id++, v++) {
+ if (v->irq == base_irq)
+ return v;
+ }
+
+ return NULL;
+}
+
+static int vic_set_wake(struct irq_data *d, unsigned int on)
+{
+ struct vic_device *v = vic_from_irq(d->irq);
+ unsigned int off = d->hwirq;
+ u32 bit = 1 << off;
+
+ if (!v)
+ return -EINVAL;
+
+ if (!(bit & v->resume_sources))
+ return -EINVAL;
+
+ if (on)
+ v->resume_irqs |= bit;
+ else
+ v->resume_irqs &= ~bit;
+
+ return 0;
+}
+#else
+#define vic_set_wake NULL
+#endif /* CONFIG_PM */
+
+static struct irq_chip vic_chip = {
+ .name = "VIC",
+ .irq_ack = vic_ack_irq,
+ .irq_mask = vic_mask_irq,
+ .irq_unmask = vic_unmask_irq,
+ .irq_set_wake = vic_set_wake,
+};
+
+static void __init vic_disable(void __iomem *base)
+{
+ writel(0, base + VIC_INT_SELECT);
+ writel(0, base + VIC_INT_ENABLE);
+ writel(~0, base + VIC_INT_ENABLE_CLEAR);
+ writel(0, base + VIC_ITCR);
+ writel(~0, base + VIC_INT_SOFT_CLEAR);
+}
+
+static void __init vic_clear_interrupts(void __iomem *base)
+{
+ unsigned int i;
+
+ writel(0, base + VIC_PL190_VECT_ADDR);
+ for (i = 0; i < 19; i++) {
+ unsigned int value;
+
+ value = readl(base + VIC_PL190_VECT_ADDR);
+ writel(value, base + VIC_PL190_VECT_ADDR);
+ }
+}
+
+/*
+ * The PL190 cell from ARM has been modified by ST to handle 64 interrupts.
+ * The original cell has 32 interrupts, while the modified one has 64,
+ * replocating two blocks 0x00..0x1f in 0x20..0x3f. In that case
+ * the probe function is called twice, with base set to offset 000
+ * and 020 within the page. We call this "second block".
+ */
+static void __init vic_init_st(void __iomem *base, unsigned int irq_start,
+ u32 vic_sources, struct device_node *node)
+{
+ unsigned int i;
+ int vic_2nd_block = ((unsigned long)base & ~PAGE_MASK) != 0;
+
+ /* Disable all interrupts initially. */
+ vic_disable(base);
+
+ /*
+ * Make sure we clear all existing interrupts. The vector registers
+ * in this cell are after the second block of general registers,
+ * so we can address them using standard offsets, but only from
+ * the second base address, which is 0x20 in the page
+ */
+ if (vic_2nd_block) {
+ vic_clear_interrupts(base);
+
+ /* ST has 16 vectors as well, but we don't enable them by now */
+ for (i = 0; i < 16; i++) {
+ void __iomem *reg = base + VIC_VECT_CNTL0 + (i * 4);
+ writel(0, reg);
+ }
+
+ writel(32, base + VIC_PL190_DEF_VECT_ADDR);
+ }
+
+ vic_register(base, irq_start, vic_sources, 0, node);
+}
+
+void __init __vic_init(void __iomem *base, int irq_start,
+ u32 vic_sources, u32 resume_sources,
+ struct device_node *node)
+{
+ unsigned int i;
+ u32 cellid = 0;
+ enum amba_vendor vendor;
+
+ /* Identify which VIC cell this one is, by reading the ID */
+ for (i = 0; i < 4; i++) {
+ void __iomem *addr;
+ addr = (void __iomem *)((u32)base & PAGE_MASK) + 0xfe0 + (i * 4);
+ cellid |= (readl(addr) & 0xff) << (8 * i);
+ }
+ vendor = (cellid >> 12) & 0xff;
+ printk(KERN_INFO "VIC @%p: id 0x%08x, vendor 0x%02x\n",
+ base, cellid, vendor);
+
+ switch(vendor) {
+ case AMBA_VENDOR_ST:
+ vic_init_st(base, irq_start, vic_sources, node);
+ return;
+ default:
+ printk(KERN_WARNING "VIC: unknown vendor, continuing anyways\n");
+ /* fall through */
+ case AMBA_VENDOR_ARM:
+ break;
+ }
+
+ /* Disable all interrupts initially. */
+ vic_disable(base);
+
+ /* Make sure we clear all existing interrupts */
+ vic_clear_interrupts(base);
+
+ vic_init2(base);
+
+ vic_register(base, irq_start, vic_sources, resume_sources, node);
+}
+
+/**
+ * vic_init() - initialise a vectored interrupt controller
+ * @base: iomem base address
+ * @irq_start: starting interrupt number, must be muliple of 32
+ * @vic_sources: bitmask of interrupt sources to allow
+ * @resume_sources: bitmask of interrupt sources to allow for resume
+ */
+void __init vic_init(void __iomem *base, unsigned int irq_start,
+ u32 vic_sources, u32 resume_sources)
+{
+ __vic_init(base, irq_start, vic_sources, resume_sources, NULL);
+}
+
+#ifdef CONFIG_OF
+int __init vic_of_init(struct device_node *node, struct device_node *parent)
+{
+ void __iomem *regs;
+
+ if (WARN(parent, "non-root VICs are not supported"))
+ return -EINVAL;
+
+ regs = of_iomap(node, 0);
+ if (WARN_ON(!regs))
+ return -EIO;
+
+ /*
+ * Passing 0 as first IRQ makes the simple domain allocate descriptors
+ */
+ __vic_init(regs, 0, ~0, ~0, node);
+
+ return 0;
+}
+IRQCHIP_DECLARE(arm_pl190_vic, "arm,pl190-vic", vic_of_init);
+IRQCHIP_DECLARE(arm_pl192_vic, "arm,pl192-vic", vic_of_init);
+IRQCHIP_DECLARE(arm_versatile_vic, "arm,versatile-vic", vic_of_init);
+#endif /* CONFIG OF */
diff --git a/drivers/irqchip/irqchip.c b/drivers/irqchip/irqchip.c
new file mode 100644
index 000000000000..f496afce29de
--- /dev/null
+++ b/drivers/irqchip/irqchip.c
@@ -0,0 +1,30 @@
+/*
+ * Copyright (C) 2012 Thomas Petazzoni
+ *
+ * Thomas Petazzoni <thomas.petazzoni@free-electrons.com>
+ *
+ * This file is licensed under the terms of the GNU General Public
+ * License version 2. This program is licensed "as is" without any
+ * warranty of any kind, whether express or implied.
+ */
+
+#include <linux/init.h>
+#include <linux/of_irq.h>
+
+#include "irqchip.h"
+
+/*
+ * This special of_device_id is the sentinel at the end of the
+ * of_device_id[] array of all irqchips. It is automatically placed at
+ * the end of the array by the linker, thanks to being part of a
+ * special section.
+ */
+static const struct of_device_id
+irqchip_of_match_end __used __section(__irqchip_of_end);
+
+extern struct of_device_id __irqchip_begin[];
+
+void __init irqchip_init(void)
+{
+ of_irq_init(__irqchip_begin);
+}
diff --git a/drivers/irqchip/irqchip.h b/drivers/irqchip/irqchip.h
new file mode 100644
index 000000000000..e445ba2d6add
--- /dev/null
+++ b/drivers/irqchip/irqchip.h
@@ -0,0 +1,29 @@
+/*
+ * Copyright (C) 2012 Thomas Petazzoni
+ *
+ * Thomas Petazzoni <thomas.petazzoni@free-electrons.com>
+ *
+ * This file is licensed under the terms of the GNU General Public
+ * License version 2. This program is licensed "as is" without any
+ * warranty of any kind, whether express or implied.
+ */
+
+#ifndef _IRQCHIP_H
+#define _IRQCHIP_H
+
+/*
+ * This macro must be used by the different irqchip drivers to declare
+ * the association between their DT compatible string and their
+ * initialization function.
+ *
+ * @name: name that must be unique accross all IRQCHIP_DECLARE of the
+ * same file.
+ * @compstr: compatible string of the irqchip driver
+ * @fn: initialization function
+ */
+#define IRQCHIP_DECLARE(name,compstr,fn) \
+ static const struct of_device_id irqchip_of_match_##name \
+ __used __section(__irqchip_of_table) \
+ = { .compatible = compstr, .data = fn }
+
+#endif
diff --git a/drivers/irqchip/spear-shirq.c b/drivers/irqchip/spear-shirq.c
index 80e1d2fd9d4c..8527743b5cef 100644
--- a/drivers/irqchip/spear-shirq.c
+++ b/drivers/irqchip/spear-shirq.c
@@ -25,6 +25,8 @@
#include <linux/of_irq.h>
#include <linux/spinlock.h>
+#include "irqchip.h"
+
static DEFINE_SPINLOCK(lock);
/* spear300 shared irq registers offsets and masks */
@@ -300,6 +302,7 @@ int __init spear300_shirq_of_init(struct device_node *np,
return shirq_init(spear300_shirq_blocks,
ARRAY_SIZE(spear300_shirq_blocks), np);
}
+IRQCHIP_DECLARE(spear300_shirq, "st,spear300-shirq", spear300_shirq_of_init);
int __init spear310_shirq_of_init(struct device_node *np,
struct device_node *parent)
@@ -307,6 +310,7 @@ int __init spear310_shirq_of_init(struct device_node *np,
return shirq_init(spear310_shirq_blocks,
ARRAY_SIZE(spear310_shirq_blocks), np);
}
+IRQCHIP_DECLARE(spear310_shirq, "st,spear310-shirq", spear310_shirq_of_init);
int __init spear320_shirq_of_init(struct device_node *np,
struct device_node *parent)
@@ -314,3 +318,4 @@ int __init spear320_shirq_of_init(struct device_node *np,
return shirq_init(spear320_shirq_blocks,
ARRAY_SIZE(spear320_shirq_blocks), np);
}
+IRQCHIP_DECLARE(spear320_shirq, "st,spear320-shirq", spear320_shirq_of_init);
diff --git a/drivers/isdn/Kconfig b/drivers/isdn/Kconfig
index 86cd75a0e84d..ef661acdda17 100644
--- a/drivers/isdn/Kconfig
+++ b/drivers/isdn/Kconfig
@@ -22,6 +22,7 @@ if ISDN
menuconfig ISDN_I4L
tristate "Old ISDN4Linux (deprecated)"
+ depends on TTY
---help---
This driver allows you to use an ISDN adapter for networking
connections and as dialin/out device. The isdn-tty's have a built
diff --git a/drivers/isdn/capi/Kconfig b/drivers/isdn/capi/Kconfig
index 15c3ffd9d860..f04686580040 100644
--- a/drivers/isdn/capi/Kconfig
+++ b/drivers/isdn/capi/Kconfig
@@ -18,6 +18,7 @@ config CAPI_TRACE
config ISDN_CAPI_MIDDLEWARE
bool "CAPI2.0 Middleware support"
+ depends on TTY
help
This option will enhance the capabilities of the /dev/capi20
interface. It will provide a means of moving a data connection,
diff --git a/drivers/isdn/gigaset/Kconfig b/drivers/isdn/gigaset/Kconfig
index b18a92c32184..dde5e09e6267 100644
--- a/drivers/isdn/gigaset/Kconfig
+++ b/drivers/isdn/gigaset/Kconfig
@@ -1,5 +1,6 @@
menuconfig ISDN_DRV_GIGASET
tristate "Siemens Gigaset support"
+ depends on TTY
select CRC_CCITT
select BITREVERSE
help
diff --git a/drivers/isdn/gigaset/interface.c b/drivers/isdn/gigaset/interface.c
index 20b7e7a1190f..e2b539675b66 100644
--- a/drivers/isdn/gigaset/interface.c
+++ b/drivers/isdn/gigaset/interface.c
@@ -134,7 +134,7 @@ static int if_open(struct tty_struct *tty, struct file *filp)
if (cs->port.count == 1) {
tty_port_tty_set(&cs->port, tty);
- tty->low_latency = 1;
+ cs->port.low_latency = 1;
}
mutex_unlock(&cs->mutex);
@@ -546,16 +546,8 @@ void gigaset_if_free(struct cardstate *cs)
void gigaset_if_receive(struct cardstate *cs,
unsigned char *buffer, size_t len)
{
- struct tty_struct *tty = tty_port_tty_get(&cs->port);
-
- if (tty == NULL) {
- gig_dbg(DEBUG_IF, "receive on closed device");
- return;
- }
-
- tty_insert_flip_string(tty, buffer, len);
- tty_flip_buffer_push(tty);
- tty_kref_put(tty);
+ tty_insert_flip_string(&cs->port, buffer, len);
+ tty_flip_buffer_push(&cs->port);
}
EXPORT_SYMBOL_GPL(gigaset_if_receive);
diff --git a/drivers/isdn/hardware/eicon/divacapi.h b/drivers/isdn/hardware/eicon/divacapi.h
index 3942efbbfb58..a315a2914d70 100644
--- a/drivers/isdn/hardware/eicon/divacapi.h
+++ b/drivers/isdn/hardware/eicon/divacapi.h
@@ -422,11 +422,11 @@ struct _DIVA_CAPI_ADAPTER {
#define LAPD 6 /* lapd (Q.921) */
#define X25_L2 7 /* x.25 layer-2 */
#define V120_L2 8 /* V.120 layer-2 protocol */
-#define V42_IN 9 /* V.42 layer-2 protocol, incomming */
+#define V42_IN 9 /* V.42 layer-2 protocol, incoming */
#define V42 10 /* V.42 layer-2 protocol */
#define MDM_ATP 11 /* AT Parser built in the L2 */
#define X75_V42BIS 12 /* ISO7776 (X.75 SLP) modified to support V.42 bis compression */
-#define RTPL2_IN 13 /* RTP layer-2 protocol, incomming */
+#define RTPL2_IN 13 /* RTP layer-2 protocol, incoming */
#define RTPL2 14 /* RTP layer-2 protocol */
#define V120_V42BIS 15 /* V.120 layer-2 protocol supporting V.42 bis compression */
@@ -1125,7 +1125,7 @@ extern word li_total_channels;
| Direction | word | Enable compression/decompression for |
| | | 0: All direction |
| | | 1: disable outgoing data |
- | | | 2: disable incomming data |
+ | | | 2: disable incoming data |
| | | 3: disable both direction (default) |
+---------------------+------+-----------------------------------------+
| Number of code | word | Parameter P1 of V.42bis in accordance |
diff --git a/drivers/isdn/hardware/eicon/divasproc.c b/drivers/isdn/hardware/eicon/divasproc.c
index af4fd3d036c1..3a4165c61196 100644
--- a/drivers/isdn/hardware/eicon/divasproc.c
+++ b/drivers/isdn/hardware/eicon/divasproc.c
@@ -145,7 +145,7 @@ void remove_divas_proc(void)
static ssize_t grp_opt_proc_write(struct file *file, const char __user *buffer,
size_t count, loff_t *pos)
{
- diva_os_xdi_adapter_t *a = PDE(file->f_path.dentry->d_inode)->data;
+ diva_os_xdi_adapter_t *a = PDE(file_inode(file))->data;
PISDN_ADAPTER IoAdapter = IoAdapters[a->controller - 1];
if ((count == 1) || (count == 2)) {
@@ -172,7 +172,7 @@ static ssize_t grp_opt_proc_write(struct file *file, const char __user *buffer,
static ssize_t d_l1_down_proc_write(struct file *file, const char __user *buffer,
size_t count, loff_t *pos)
{
- diva_os_xdi_adapter_t *a = PDE(file->f_path.dentry->d_inode)->data;
+ diva_os_xdi_adapter_t *a = PDE(file_inode(file))->data;
PISDN_ADAPTER IoAdapter = IoAdapters[a->controller - 1];
if ((count == 1) || (count == 2)) {
@@ -251,7 +251,7 @@ static const struct file_operations grp_opt_proc_fops = {
static ssize_t info_proc_write(struct file *file, const char __user *buffer,
size_t count, loff_t *pos)
{
- diva_os_xdi_adapter_t *a = PDE(file->f_path.dentry->d_inode)->data;
+ diva_os_xdi_adapter_t *a = PDE(file_inode(file))->data;
PISDN_ADAPTER IoAdapter = IoAdapters[a->controller - 1];
char c[4];
diff --git a/drivers/isdn/hardware/eicon/pc.h b/drivers/isdn/hardware/eicon/pc.h
index 889dc984bbca..329c0c26abfb 100644
--- a/drivers/isdn/hardware/eicon/pc.h
+++ b/drivers/isdn/hardware/eicon/pc.h
@@ -419,11 +419,11 @@ struct dual
#define LAPD 6 /* lapd (Q.921) */
#define X25_L2 7 /* x.25 layer-2 */
#define V120_L2 8 /* V.120 layer-2 protocol */
-#define V42_IN 9 /* V.42 layer-2 protocol, incomming */
+#define V42_IN 9 /* V.42 layer-2 protocol, incoming */
#define V42 10 /* V.42 layer-2 protocol */
#define MDM_ATP 11 /* AT Parser built in the L2 */
#define X75_V42BIS 12 /* x.75 with V.42bis */
-#define RTPL2_IN 13 /* RTP layer-2 protocol, incomming */
+#define RTPL2_IN 13 /* RTP layer-2 protocol, incoming */
#define RTPL2 14 /* RTP layer-2 protocol */
#define V120_V42BIS 15 /* V.120 asynchronous mode supporting V.42bis compression */
#define LISTENER 27 /* Layer 2 to listen line */
diff --git a/drivers/isdn/hardware/mISDN/Kconfig b/drivers/isdn/hardware/mISDN/Kconfig
index eadc1cd34a20..b8611e3e5e74 100644
--- a/drivers/isdn/hardware/mISDN/Kconfig
+++ b/drivers/isdn/hardware/mISDN/Kconfig
@@ -76,6 +76,7 @@ config MISDN_NETJET
tristate "Support for NETJet cards"
depends on MISDN
depends on PCI
+ depends on TTY
select MISDN_IPAC
select ISDN_HDLC
select ISDN_I4L
diff --git a/drivers/isdn/hysdn/hysdn_proclog.c b/drivers/isdn/hysdn/hysdn_proclog.c
index 88e4f0ee073c..9a3ce93665c5 100644
--- a/drivers/isdn/hysdn/hysdn_proclog.c
+++ b/drivers/isdn/hysdn/hysdn_proclog.c
@@ -173,7 +173,7 @@ hysdn_log_read(struct file *file, char __user *buf, size_t count, loff_t *off)
{
struct log_data *inf;
int len;
- struct proc_dir_entry *pde = PDE(file->f_path.dentry->d_inode);
+ struct proc_dir_entry *pde = PDE(file_inode(file));
struct procdata *pd = NULL;
hysdn_card *card;
@@ -319,7 +319,7 @@ static unsigned int
hysdn_log_poll(struct file *file, poll_table *wait)
{
unsigned int mask = 0;
- struct proc_dir_entry *pde = PDE(file->f_path.dentry->d_inode);
+ struct proc_dir_entry *pde = PDE(file_inode(file));
hysdn_card *card;
struct procdata *pd = NULL;
diff --git a/drivers/isdn/i4l/isdn_common.c b/drivers/isdn/i4l/isdn_common.c
index e2a945ee9f05..9bb12ba3191f 100644
--- a/drivers/isdn/i4l/isdn_common.c
+++ b/drivers/isdn/i4l/isdn_common.c
@@ -876,7 +876,7 @@ isdn_readbchan(int di, int channel, u_char *buf, u_char *fp, int len, wait_queue
* of the mapping (di,ch)<->minor, happen during the sleep? --he
*/
int
-isdn_readbchan_tty(int di, int channel, struct tty_struct *tty, int cisco_hack)
+isdn_readbchan_tty(int di, int channel, struct tty_port *port, int cisco_hack)
{
int count;
int count_pull;
@@ -891,7 +891,7 @@ isdn_readbchan_tty(int di, int channel, struct tty_struct *tty, int cisco_hack)
if (skb_queue_empty(&dev->drv[di]->rpqueue[channel]))
return 0;
- len = tty_buffer_request_room(tty, dev->drv[di]->rcvcount[channel]);
+ len = tty_buffer_request_room(port, dev->drv[di]->rcvcount[channel]);
if (len == 0)
return len;
@@ -912,7 +912,7 @@ isdn_readbchan_tty(int di, int channel, struct tty_struct *tty, int cisco_hack)
while ((count_pull < skb->len) && (len > 0)) {
/* push every character but the last to the tty buffer directly */
if (count_put)
- tty_insert_flip_char(tty, last, TTY_NORMAL);
+ tty_insert_flip_char(port, last, TTY_NORMAL);
len--;
if (dev->drv[di]->DLEflag & DLEmask) {
last = DLE;
@@ -940,7 +940,7 @@ isdn_readbchan_tty(int di, int channel, struct tty_struct *tty, int cisco_hack)
}
count_put = count_pull;
if (count_put > 1)
- tty_insert_flip_string(tty, skb->data, count_put - 1);
+ tty_insert_flip_string(port, skb->data, count_put - 1);
last = skb->data[count_put - 1];
len -= count_put;
#ifdef CONFIG_ISDN_AUDIO
@@ -952,16 +952,16 @@ isdn_readbchan_tty(int di, int channel, struct tty_struct *tty, int cisco_hack)
* Now we can dequeue it.
*/
if (cisco_hack)
- tty_insert_flip_char(tty, last, 0xFF);
+ tty_insert_flip_char(port, last, 0xFF);
else
- tty_insert_flip_char(tty, last, TTY_NORMAL);
+ tty_insert_flip_char(port, last, TTY_NORMAL);
#ifdef CONFIG_ISDN_AUDIO
ISDN_AUDIO_SKB_LOCK(skb) = 0;
#endif
skb = skb_dequeue(&dev->drv[di]->rpqueue[channel]);
dev_kfree_skb(skb);
} else {
- tty_insert_flip_char(tty, last, TTY_NORMAL);
+ tty_insert_flip_char(port, last, TTY_NORMAL);
/* Not yet emptied this buff, so it
* must stay in the queue, for further calls
* but we pull off the data we got until now.
@@ -1058,7 +1058,7 @@ isdn_info_update(void)
static ssize_t
isdn_read(struct file *file, char __user *buf, size_t count, loff_t *off)
{
- uint minor = iminor(file->f_path.dentry->d_inode);
+ uint minor = iminor(file_inode(file));
int len = 0;
int drvidx;
int chidx;
@@ -1165,7 +1165,7 @@ out:
static ssize_t
isdn_write(struct file *file, const char __user *buf, size_t count, loff_t *off)
{
- uint minor = iminor(file->f_path.dentry->d_inode);
+ uint minor = iminor(file_inode(file));
int drvidx;
int chidx;
int retval;
@@ -1228,7 +1228,7 @@ static unsigned int
isdn_poll(struct file *file, poll_table *wait)
{
unsigned int mask = 0;
- unsigned int minor = iminor(file->f_path.dentry->d_inode);
+ unsigned int minor = iminor(file_inode(file));
int drvidx = isdn_minor2drv(minor - ISDN_MINOR_CTRL);
mutex_lock(&isdn_mutex);
@@ -1269,7 +1269,7 @@ out:
static int
isdn_ioctl(struct file *file, uint cmd, ulong arg)
{
- uint minor = iminor(file->f_path.dentry->d_inode);
+ uint minor = iminor(file_inode(file));
isdn_ctrl c;
int drvidx;
int ret;
diff --git a/drivers/isdn/i4l/isdn_common.h b/drivers/isdn/i4l/isdn_common.h
index 9a471f62e1d4..2260ef07ab9c 100644
--- a/drivers/isdn/i4l/isdn_common.h
+++ b/drivers/isdn/i4l/isdn_common.h
@@ -37,7 +37,7 @@ extern void isdn_timer_ctrl(int tf, int onoff);
extern void isdn_unexclusive_channel(int di, int ch);
extern int isdn_getnum(char **);
extern int isdn_readbchan(int, int, u_char *, u_char *, int, wait_queue_head_t *);
-extern int isdn_readbchan_tty(int, int, struct tty_struct *, int);
+extern int isdn_readbchan_tty(int, int, struct tty_port *, int);
extern int isdn_get_free_channel(int, int, int, int, int, char *);
extern int isdn_writebuf_skb_stub(int, int, int, struct sk_buff *);
extern int register_isdn(isdn_if *i);
diff --git a/drivers/isdn/i4l/isdn_ppp.c b/drivers/isdn/i4l/isdn_ppp.c
index 61d78fa03b1a..38ceac5053a0 100644
--- a/drivers/isdn/i4l/isdn_ppp.c
+++ b/drivers/isdn/i4l/isdn_ppp.c
@@ -668,7 +668,7 @@ isdn_ppp_poll(struct file *file, poll_table *wait)
if (is->debug & 0x2)
printk(KERN_DEBUG "isdn_ppp_poll: minor: %d\n",
- iminor(file->f_path.dentry->d_inode));
+ iminor(file_inode(file)));
/* just registers wait_queue hook. This doesn't really wait. */
poll_wait(file, &is->wq, wait);
diff --git a/drivers/isdn/i4l/isdn_tty.c b/drivers/isdn/i4l/isdn_tty.c
index e09dc8a5e743..d8a7d8323414 100644
--- a/drivers/isdn/i4l/isdn_tty.c
+++ b/drivers/isdn/i4l/isdn_tty.c
@@ -60,18 +60,14 @@ static int si2bit[8] =
static int
isdn_tty_try_read(modem_info *info, struct sk_buff *skb)
{
+ struct tty_port *port = &info->port;
int c;
int len;
- struct tty_struct *tty;
char last;
if (!info->online)
return 0;
- tty = info->port.tty;
- if (!tty)
- return 0;
-
if (!(info->mcr & UART_MCR_RTS))
return 0;
@@ -81,7 +77,7 @@ isdn_tty_try_read(modem_info *info, struct sk_buff *skb)
#endif
;
- c = tty_buffer_request_room(tty, len);
+ c = tty_buffer_request_room(port, len);
if (c < len)
return 0;
@@ -91,25 +87,25 @@ isdn_tty_try_read(modem_info *info, struct sk_buff *skb)
unsigned char *dp = skb->data;
while (--l) {
if (*dp == DLE)
- tty_insert_flip_char(tty, DLE, 0);
- tty_insert_flip_char(tty, *dp++, 0);
+ tty_insert_flip_char(port, DLE, 0);
+ tty_insert_flip_char(port, *dp++, 0);
}
if (*dp == DLE)
- tty_insert_flip_char(tty, DLE, 0);
+ tty_insert_flip_char(port, DLE, 0);
last = *dp;
} else {
#endif
if (len > 1)
- tty_insert_flip_string(tty, skb->data, len - 1);
+ tty_insert_flip_string(port, skb->data, len - 1);
last = skb->data[len - 1];
#ifdef CONFIG_ISDN_AUDIO
}
#endif
if (info->emu.mdmreg[REG_CPPP] & BIT_CPPP)
- tty_insert_flip_char(tty, last, 0xFF);
+ tty_insert_flip_char(port, last, 0xFF);
else
- tty_insert_flip_char(tty, last, TTY_NORMAL);
- tty_flip_buffer_push(tty);
+ tty_insert_flip_char(port, last, TTY_NORMAL);
+ tty_flip_buffer_push(port);
kfree_skb(skb);
return 1;
@@ -126,7 +122,6 @@ isdn_tty_readmodem(void)
int midx;
int i;
int r;
- struct tty_struct *tty;
modem_info *info;
for (i = 0; i < ISDN_MAX_CHANNELS; i++) {
@@ -144,20 +139,21 @@ isdn_tty_readmodem(void)
if ((info->vonline & 1) && (info->emu.vpar[1]))
isdn_audio_eval_silence(info);
#endif
- tty = info->port.tty;
- if (tty) {
- if (info->mcr & UART_MCR_RTS) {
- /* CISCO AsyncPPP Hack */
- if (!(info->emu.mdmreg[REG_CPPP] & BIT_CPPP))
- r = isdn_readbchan_tty(info->isdn_driver, info->isdn_channel, tty, 0);
- else
- r = isdn_readbchan_tty(info->isdn_driver, info->isdn_channel, tty, 1);
- if (r)
- tty_flip_buffer_push(tty);
- } else
- r = 1;
+ if (info->mcr & UART_MCR_RTS) {
+ /* CISCO AsyncPPP Hack */
+ if (!(info->emu.mdmreg[REG_CPPP] & BIT_CPPP))
+ r = isdn_readbchan_tty(info->isdn_driver,
+ info->isdn_channel,
+ &info->port, 0);
+ else
+ r = isdn_readbchan_tty(info->isdn_driver,
+ info->isdn_channel,
+ &info->port, 1);
+ if (r)
+ tty_flip_buffer_push(&info->port);
} else
r = 1;
+
if (r) {
info->rcvsched = 0;
resched = 1;
@@ -2229,7 +2225,7 @@ isdn_tty_stat_callback(int i, isdn_ctrl *c)
void
isdn_tty_at_cout(char *msg, modem_info *info)
{
- struct tty_struct *tty;
+ struct tty_port *port = &info->port;
atemu *m = &info->emu;
char *p;
char c;
@@ -2246,15 +2242,14 @@ isdn_tty_at_cout(char *msg, modem_info *info)
l = strlen(msg);
spin_lock_irqsave(&info->readlock, flags);
- tty = info->port.tty;
- if ((info->port.flags & ASYNC_CLOSING) || (!tty)) {
+ if (port->flags & ASYNC_CLOSING) {
spin_unlock_irqrestore(&info->readlock, flags);
return;
}
/* use queue instead of direct, if online and */
/* data is in queue or buffer is full */
- if (info->online && ((tty_buffer_request_room(tty, l) < l) ||
+ if (info->online && ((tty_buffer_request_room(port, l) < l) ||
!skb_queue_empty(&dev->drv[info->isdn_driver]->rpqueue[info->isdn_channel]))) {
skb = alloc_skb(l, GFP_ATOMIC);
if (!skb) {
@@ -2285,7 +2280,7 @@ isdn_tty_at_cout(char *msg, modem_info *info)
if (skb) {
*sp++ = c;
} else {
- if (tty_insert_flip_char(tty, c, TTY_NORMAL) == 0)
+ if (tty_insert_flip_char(port, c, TTY_NORMAL) == 0)
break;
}
}
@@ -2299,7 +2294,7 @@ isdn_tty_at_cout(char *msg, modem_info *info)
} else {
spin_unlock_irqrestore(&info->readlock, flags);
- tty_flip_buffer_push(tty);
+ tty_flip_buffer_push(port);
}
}
diff --git a/drivers/isdn/mISDN/l1oip_core.c b/drivers/isdn/mISDN/l1oip_core.c
index f8e405c383a0..2c0d2c2bf946 100644
--- a/drivers/isdn/mISDN/l1oip_core.c
+++ b/drivers/isdn/mISDN/l1oip_core.c
@@ -689,7 +689,7 @@ l1oip_socket_thread(void *data)
hc->sin_remote.sin_addr.s_addr = htonl(hc->remoteip);
hc->sin_remote.sin_port = htons((unsigned short)hc->remoteport);
- /* bind to incomming port */
+ /* bind to incoming port */
if (socket->ops->bind(socket, (struct sockaddr *)&hc->sin_local,
sizeof(hc->sin_local))) {
printk(KERN_ERR "%s: Failed to bind socket to port %d.\n",
diff --git a/drivers/isdn/mISDN/socket.c b/drivers/isdn/mISDN/socket.c
index abe2d699b6f3..8b07f83d48ad 100644
--- a/drivers/isdn/mISDN/socket.c
+++ b/drivers/isdn/mISDN/socket.c
@@ -483,7 +483,6 @@ data_sock_bind(struct socket *sock, struct sockaddr *addr, int addr_len)
{
struct sockaddr_mISDN *maddr = (struct sockaddr_mISDN *) addr;
struct sock *sk = sock->sk;
- struct hlist_node *node;
struct sock *csk;
int err = 0;
@@ -508,7 +507,7 @@ data_sock_bind(struct socket *sock, struct sockaddr *addr, int addr_len)
if (sk->sk_protocol < ISDN_P_B_START) {
read_lock_bh(&data_sockets.lock);
- sk_for_each(csk, node, &data_sockets.head) {
+ sk_for_each(csk, &data_sockets.head) {
if (sk == csk)
continue;
if (_pms(csk)->dev != _pms(sk)->dev)
diff --git a/drivers/isdn/mISDN/stack.c b/drivers/isdn/mISDN/stack.c
index deda591f70b9..9cb4b621fbc3 100644
--- a/drivers/isdn/mISDN/stack.c
+++ b/drivers/isdn/mISDN/stack.c
@@ -64,12 +64,11 @@ unlock:
static void
send_socklist(struct mISDN_sock_list *sl, struct sk_buff *skb)
{
- struct hlist_node *node;
struct sock *sk;
struct sk_buff *cskb = NULL;
read_lock(&sl->lock);
- sk_for_each(sk, node, &sl->head) {
+ sk_for_each(sk, &sl->head) {
if (sk->sk_state != MISDN_BOUND)
continue;
if (!cskb)
diff --git a/drivers/leds/Kconfig b/drivers/leds/Kconfig
index 4469b441b785..ec50824c02ec 100644
--- a/drivers/leds/Kconfig
+++ b/drivers/leds/Kconfig
@@ -193,9 +193,18 @@ config LEDS_LP3944
To compile this driver as a module, choose M here: the
module will be called leds-lp3944.
+config LEDS_LP55XX_COMMON
+ tristate "Common Driver for TI/National LP5521 and LP5523/55231"
+ depends on LEDS_LP5521 || LEDS_LP5523
+ select FW_LOADER
+ help
+ This option supports common operations for LP5521 and LP5523/55231
+ devices.
+
config LEDS_LP5521
tristate "LED Support for N.S. LP5521 LED driver chip"
depends on LEDS_CLASS && I2C
+ select LEDS_LP55XX_COMMON
help
If you say yes here you get support for the National Semiconductor
LP5521 LED driver. It is 3 channel chip with programmable engines.
@@ -205,6 +214,7 @@ config LEDS_LP5521
config LEDS_LP5523
tristate "LED Support for TI/National LP5523/55231 LED driver chip"
depends on LEDS_CLASS && I2C
+ select LEDS_LP55XX_COMMON
help
If you say yes here you get support for TI/National Semiconductor
LP5523/55231 LED driver.
@@ -310,7 +320,7 @@ config LEDS_DAC124S085
config LEDS_PWM
tristate "PWM driven LED Support"
depends on LEDS_CLASS
- depends on HAVE_PWM
+ depends on PWM
help
This option enables support for pwm driven LEDs
diff --git a/drivers/leds/Makefile b/drivers/leds/Makefile
index 3fb9641b6194..215e7e3b6173 100644
--- a/drivers/leds/Makefile
+++ b/drivers/leds/Makefile
@@ -23,6 +23,7 @@ obj-$(CONFIG_LEDS_PCA9532) += leds-pca9532.o
obj-$(CONFIG_LEDS_GPIO_REGISTER) += leds-gpio-register.o
obj-$(CONFIG_LEDS_GPIO) += leds-gpio.o
obj-$(CONFIG_LEDS_LP3944) += leds-lp3944.o
+obj-$(CONFIG_LEDS_LP55XX_COMMON) += leds-lp55xx-common.o
obj-$(CONFIG_LEDS_LP5521) += leds-lp5521.o
obj-$(CONFIG_LEDS_LP5523) += leds-lp5523.o
obj-$(CONFIG_LEDS_LP8788) += leds-lp8788.o
diff --git a/drivers/leds/leds-88pm860x.c b/drivers/leds/leds-88pm860x.c
index 6be2edd41173..f5b9ea315790 100644
--- a/drivers/leds/leds-88pm860x.c
+++ b/drivers/leds/leds-88pm860x.c
@@ -128,8 +128,10 @@ static void pm860x_led_set(struct led_classdev *cdev,
static int pm860x_led_dt_init(struct platform_device *pdev,
struct pm860x_led *data)
{
- struct device_node *nproot = pdev->dev.parent->of_node, *np;
+ struct device_node *nproot, *np;
int iset = 0;
+
+ nproot = of_node_get(pdev->dev.parent->of_node);
if (!nproot)
return -ENODEV;
nproot = of_find_node_by_name(nproot, "leds");
@@ -145,6 +147,7 @@ static int pm860x_led_dt_init(struct platform_device *pdev,
break;
}
}
+ of_node_put(nproot);
return 0;
}
#else
diff --git a/drivers/leds/leds-lm3530.c b/drivers/leds/leds-lm3530.c
index 214145483836..a036a19040fe 100644
--- a/drivers/leds/leds-lm3530.c
+++ b/drivers/leds/leds-lm3530.c
@@ -187,6 +187,40 @@ static void lm3530_als_configure(struct lm3530_platform_data *pdata,
(pdata->als2_resistor_sel << LM3530_ALS2_IMP_SHIFT);
}
+static int lm3530_led_enable(struct lm3530_data *drvdata)
+{
+ int ret;
+
+ if (drvdata->enable)
+ return 0;
+
+ ret = regulator_enable(drvdata->regulator);
+ if (ret) {
+ dev_err(drvdata->led_dev.dev, "Failed to enable vin:%d\n", ret);
+ return ret;
+ }
+
+ drvdata->enable = true;
+ return 0;
+}
+
+static void lm3530_led_disable(struct lm3530_data *drvdata)
+{
+ int ret;
+
+ if (!drvdata->enable)
+ return;
+
+ ret = regulator_disable(drvdata->regulator);
+ if (ret) {
+ dev_err(drvdata->led_dev.dev, "Failed to disable vin:%d\n",
+ ret);
+ return;
+ }
+
+ drvdata->enable = false;
+}
+
static int lm3530_init_registers(struct lm3530_data *drvdata)
{
int ret = 0;
@@ -245,15 +279,9 @@ static int lm3530_init_registers(struct lm3530_data *drvdata)
reg_val[12] = LM3530_DEF_ZT_3; /* LM3530_ALS_Z3T_REG */
reg_val[13] = LM3530_DEF_ZT_4; /* LM3530_ALS_Z4T_REG */
- if (!drvdata->enable) {
- ret = regulator_enable(drvdata->regulator);
- if (ret) {
- dev_err(&drvdata->client->dev,
- "Enable regulator failed\n");
- return ret;
- }
- drvdata->enable = true;
- }
+ ret = lm3530_led_enable(drvdata);
+ if (ret)
+ return ret;
for (i = 0; i < LM3530_REG_MAX; i++) {
/* do not update brightness register when pwm mode */
@@ -305,13 +333,8 @@ static void lm3530_brightness_set(struct led_classdev *led_cdev,
else
drvdata->brightness = brt_val;
- if (brt_val == 0) {
- err = regulator_disable(drvdata->regulator);
- if (err)
- dev_err(&drvdata->client->dev,
- "Disable regulator failed\n");
- drvdata->enable = false;
- }
+ if (brt_val == 0)
+ lm3530_led_disable(drvdata);
break;
case LM3530_BL_MODE_ALS:
break;
@@ -458,8 +481,7 @@ static int lm3530_remove(struct i2c_client *client)
device_remove_file(drvdata->led_dev.dev, &dev_attr_mode);
- if (drvdata->enable)
- regulator_disable(drvdata->regulator);
+ lm3530_led_disable(drvdata);
led_classdev_unregister(&drvdata->led_dev);
return 0;
}
diff --git a/drivers/leds/leds-lm355x.c b/drivers/leds/leds-lm355x.c
index 65d79284c488..4117235ba618 100644
--- a/drivers/leds/leds-lm355x.c
+++ b/drivers/leds/leds-lm355x.c
@@ -380,7 +380,7 @@ static void lm355x_indicator_brightness_set(struct led_classdev *cdev,
/* indicator pattern only for lm3556*/
static ssize_t lm3556_indicator_pattern_store(struct device *dev,
- struct device_attribute *devAttr,
+ struct device_attribute *attr,
const char *buf, size_t size)
{
ssize_t ret;
diff --git a/drivers/leds/leds-lm3642.c b/drivers/leds/leds-lm3642.c
index 07b3dde90613..9f428d9dfe91 100644
--- a/drivers/leds/leds-lm3642.c
+++ b/drivers/leds/leds-lm3642.c
@@ -176,7 +176,7 @@ out:
/* torch pin config for lm3642*/
static ssize_t lm3642_torch_pin_store(struct device *dev,
- struct device_attribute *devAttr,
+ struct device_attribute *attr,
const char *buf, size_t size)
{
ssize_t ret;
@@ -233,7 +233,7 @@ static void lm3642_torch_brightness_set(struct led_classdev *cdev,
/* strobe pin config for lm3642*/
static ssize_t lm3642_strobe_pin_store(struct device *dev,
- struct device_attribute *devAttr,
+ struct device_attribute *attr,
const char *buf, size_t size)
{
ssize_t ret;
diff --git a/drivers/leds/leds-lp5521.c b/drivers/leds/leds-lp5521.c
index cb8a5220200b..1001347ba70b 100644
--- a/drivers/leds/leds-lp5521.c
+++ b/drivers/leds/leds-lp5521.c
@@ -2,8 +2,10 @@
* LP5521 LED chip driver.
*
* Copyright (C) 2010 Nokia Corporation
+ * Copyright (C) 2012 Texas Instruments
*
* Contact: Samu Onkalo <samu.p.onkalo@nokia.com>
+ * Milo(Woogyom) Kim <milo.kim@ti.com>
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
@@ -20,33 +22,21 @@
* 02110-1301 USA
*/
-#include <linux/module.h>
-#include <linux/init.h>
-#include <linux/i2c.h>
-#include <linux/mutex.h>
-#include <linux/gpio.h>
-#include <linux/interrupt.h>
#include <linux/delay.h>
-#include <linux/ctype.h>
-#include <linux/spinlock.h>
-#include <linux/wait.h>
+#include <linux/firmware.h>
+#include <linux/i2c.h>
+#include <linux/init.h>
#include <linux/leds.h>
-#include <linux/leds-lp5521.h>
-#include <linux/workqueue.h>
+#include <linux/module.h>
+#include <linux/mutex.h>
+#include <linux/platform_data/leds-lp55xx.h>
#include <linux/slab.h>
-#define LP5521_PROGRAM_LENGTH 32 /* in bytes */
-
-#define LP5521_MAX_LEDS 3 /* Maximum number of LEDs */
-#define LP5521_MAX_ENGINES 3 /* Maximum number of engines */
-
-#define LP5521_ENG_MASK_BASE 0x30 /* 00110000 */
-#define LP5521_ENG_STATUS_MASK 0x07 /* 00000111 */
+#include "leds-lp55xx-common.h"
-#define LP5521_CMD_LOAD 0x15 /* 00010101 */
-#define LP5521_CMD_RUN 0x2a /* 00101010 */
-#define LP5521_CMD_DIRECT 0x3f /* 00111111 */
-#define LP5521_CMD_DISABLED 0x00 /* 00000000 */
+#define LP5521_PROGRAM_LENGTH 32
+#define LP5521_MAX_LEDS 3
+#define LP5521_CMD_DIRECT 0x3F
/* Registers */
#define LP5521_REG_ENABLE 0x00
@@ -58,22 +48,14 @@
#define LP5521_REG_G_CURRENT 0x06
#define LP5521_REG_B_CURRENT 0x07
#define LP5521_REG_CONFIG 0x08
-#define LP5521_REG_R_CHANNEL_PC 0x09
-#define LP5521_REG_G_CHANNEL_PC 0x0A
-#define LP5521_REG_B_CHANNEL_PC 0x0B
#define LP5521_REG_STATUS 0x0C
#define LP5521_REG_RESET 0x0D
-#define LP5521_REG_GPO 0x0E
#define LP5521_REG_R_PROG_MEM 0x10
#define LP5521_REG_G_PROG_MEM 0x30
#define LP5521_REG_B_PROG_MEM 0x50
-#define LP5521_PROG_MEM_BASE LP5521_REG_R_PROG_MEM
-#define LP5521_PROG_MEM_SIZE 0x20
-
/* Base register to set LED current */
#define LP5521_REG_LED_CURRENT_BASE LP5521_REG_R_CURRENT
-
/* Base register to set the brightness */
#define LP5521_REG_LED_PWM_BASE LP5521_REG_R_PWM
@@ -92,440 +74,287 @@
/* default R channel current register value */
#define LP5521_REG_R_CURR_DEFAULT 0xAF
-/* Pattern Mode */
-#define PATTERN_OFF 0
+/* Reset register value */
+#define LP5521_RESET 0xFF
-struct lp5521_engine {
- int id;
- u8 mode;
- u8 prog_page;
- u8 engine_mask;
-};
+/* Program Memory Operations */
+#define LP5521_MODE_R_M 0x30 /* Operation Mode Register */
+#define LP5521_MODE_G_M 0x0C
+#define LP5521_MODE_B_M 0x03
+#define LP5521_LOAD_R 0x10
+#define LP5521_LOAD_G 0x04
+#define LP5521_LOAD_B 0x01
-struct lp5521_led {
- int id;
- u8 chan_nr;
- u8 led_current;
- u8 max_current;
- struct led_classdev cdev;
- struct work_struct brightness_work;
- u8 brightness;
-};
+#define LP5521_R_IS_LOADING(mode) \
+ ((mode & LP5521_MODE_R_M) == LP5521_LOAD_R)
+#define LP5521_G_IS_LOADING(mode) \
+ ((mode & LP5521_MODE_G_M) == LP5521_LOAD_G)
+#define LP5521_B_IS_LOADING(mode) \
+ ((mode & LP5521_MODE_B_M) == LP5521_LOAD_B)
-struct lp5521_chip {
- struct lp5521_platform_data *pdata;
- struct mutex lock; /* Serialize control */
- struct i2c_client *client;
- struct lp5521_engine engines[LP5521_MAX_ENGINES];
- struct lp5521_led leds[LP5521_MAX_LEDS];
- u8 num_channels;
- u8 num_leds;
-};
+#define LP5521_EXEC_R_M 0x30 /* Enable Register */
+#define LP5521_EXEC_G_M 0x0C
+#define LP5521_EXEC_B_M 0x03
+#define LP5521_EXEC_M 0x3F
+#define LP5521_RUN_R 0x20
+#define LP5521_RUN_G 0x08
+#define LP5521_RUN_B 0x02
-static inline struct lp5521_led *cdev_to_led(struct led_classdev *cdev)
+static inline void lp5521_wait_opmode_done(void)
{
- return container_of(cdev, struct lp5521_led, cdev);
+ /* operation mode change needs to be longer than 153 us */
+ usleep_range(200, 300);
}
-static inline struct lp5521_chip *engine_to_lp5521(struct lp5521_engine *engine)
+static inline void lp5521_wait_enable_done(void)
{
- return container_of(engine, struct lp5521_chip,
- engines[engine->id - 1]);
+ /* it takes more 488 us to update ENABLE register */
+ usleep_range(500, 600);
}
-static inline struct lp5521_chip *led_to_lp5521(struct lp5521_led *led)
+static void lp5521_set_led_current(struct lp55xx_led *led, u8 led_current)
{
- return container_of(led, struct lp5521_chip,
- leds[led->id]);
+ led->led_current = led_current;
+ lp55xx_write(led->chip, LP5521_REG_LED_CURRENT_BASE + led->chan_nr,
+ led_current);
}
-static void lp5521_led_brightness_work(struct work_struct *work);
-
-static inline int lp5521_write(struct i2c_client *client, u8 reg, u8 value)
+static void lp5521_load_engine(struct lp55xx_chip *chip)
{
- return i2c_smbus_write_byte_data(client, reg, value);
-}
+ enum lp55xx_engine_index idx = chip->engine_idx;
+ u8 mask[] = {
+ [LP55XX_ENGINE_1] = LP5521_MODE_R_M,
+ [LP55XX_ENGINE_2] = LP5521_MODE_G_M,
+ [LP55XX_ENGINE_3] = LP5521_MODE_B_M,
+ };
-static int lp5521_read(struct i2c_client *client, u8 reg, u8 *buf)
-{
- s32 ret;
+ u8 val[] = {
+ [LP55XX_ENGINE_1] = LP5521_LOAD_R,
+ [LP55XX_ENGINE_2] = LP5521_LOAD_G,
+ [LP55XX_ENGINE_3] = LP5521_LOAD_B,
+ };
- ret = i2c_smbus_read_byte_data(client, reg);
- if (ret < 0)
- return ret;
+ lp55xx_update_bits(chip, LP5521_REG_OP_MODE, mask[idx], val[idx]);
- *buf = ret;
- return 0;
+ lp5521_wait_opmode_done();
}
-static int lp5521_set_engine_mode(struct lp5521_engine *engine, u8 mode)
+static void lp5521_stop_engine(struct lp55xx_chip *chip)
{
- struct lp5521_chip *chip = engine_to_lp5521(engine);
- struct i2c_client *client = chip->client;
- int ret;
- u8 engine_state;
-
- /* Only transition between RUN and DIRECT mode are handled here */
- if (mode == LP5521_CMD_LOAD)
- return 0;
-
- if (mode == LP5521_CMD_DISABLED)
- mode = LP5521_CMD_DIRECT;
-
- ret = lp5521_read(client, LP5521_REG_OP_MODE, &engine_state);
- if (ret < 0)
- return ret;
-
- /* set mode only for this engine */
- engine_state &= ~(engine->engine_mask);
- mode &= engine->engine_mask;
- engine_state |= mode;
- return lp5521_write(client, LP5521_REG_OP_MODE, engine_state);
+ lp55xx_write(chip, LP5521_REG_OP_MODE, 0);
+ lp5521_wait_opmode_done();
}
-static int lp5521_load_program(struct lp5521_engine *eng, const u8 *pattern)
+static void lp5521_run_engine(struct lp55xx_chip *chip, bool start)
{
- struct lp5521_chip *chip = engine_to_lp5521(eng);
- struct i2c_client *client = chip->client;
int ret;
- int addr;
u8 mode;
+ u8 exec;
- /* move current engine to direct mode and remember the state */
- ret = lp5521_set_engine_mode(eng, LP5521_CMD_DIRECT);
- if (ret)
- return ret;
-
- /* Mode change requires min 500 us delay. 1 - 2 ms with margin */
- usleep_range(1000, 2000);
- ret = lp5521_read(client, LP5521_REG_OP_MODE, &mode);
- if (ret)
- return ret;
-
- /* For loading, all the engines to load mode */
- lp5521_write(client, LP5521_REG_OP_MODE, LP5521_CMD_DIRECT);
- /* Mode change requires min 500 us delay. 1 - 2 ms with margin */
- usleep_range(1000, 2000);
- lp5521_write(client, LP5521_REG_OP_MODE, LP5521_CMD_LOAD);
- /* Mode change requires min 500 us delay. 1 - 2 ms with margin */
- usleep_range(1000, 2000);
-
- addr = LP5521_PROG_MEM_BASE + eng->prog_page * LP5521_PROG_MEM_SIZE;
- i2c_smbus_write_i2c_block_data(client,
- addr,
- LP5521_PROG_MEM_SIZE,
- pattern);
-
- return lp5521_write(client, LP5521_REG_OP_MODE, mode);
-}
-
-static int lp5521_set_led_current(struct lp5521_chip *chip, int led, u8 curr)
-{
- return lp5521_write(chip->client,
- LP5521_REG_LED_CURRENT_BASE + chip->leds[led].chan_nr,
- curr);
-}
-
-static void lp5521_init_engine(struct lp5521_chip *chip)
-{
- int i;
- for (i = 0; i < ARRAY_SIZE(chip->engines); i++) {
- chip->engines[i].id = i + 1;
- chip->engines[i].engine_mask = LP5521_ENG_MASK_BASE >> (i * 2);
- chip->engines[i].prog_page = i;
+ /* stop engine */
+ if (!start) {
+ lp5521_stop_engine(chip);
+ lp55xx_write(chip, LP5521_REG_OP_MODE, LP5521_CMD_DIRECT);
+ lp5521_wait_opmode_done();
+ return;
}
-}
-
-static int lp5521_configure(struct i2c_client *client)
-{
- struct lp5521_chip *chip = i2c_get_clientdata(client);
- int ret;
- u8 cfg;
-
- lp5521_init_engine(chip);
-
- /* Set all PWMs to direct control mode */
- ret = lp5521_write(client, LP5521_REG_OP_MODE, LP5521_CMD_DIRECT);
-
- cfg = chip->pdata->update_config ?
- : (LP5521_PWRSAVE_EN | LP5521_CP_MODE_AUTO | LP5521_R_TO_BATT);
- ret |= lp5521_write(client, LP5521_REG_CONFIG, cfg);
-
- /* Initialize all channels PWM to zero -> leds off */
- ret |= lp5521_write(client, LP5521_REG_R_PWM, 0);
- ret |= lp5521_write(client, LP5521_REG_G_PWM, 0);
- ret |= lp5521_write(client, LP5521_REG_B_PWM, 0);
-
- /* Set engines are set to run state when OP_MODE enables engines */
- ret |= lp5521_write(client, LP5521_REG_ENABLE,
- LP5521_ENABLE_RUN_PROGRAM);
- /* enable takes 500us. 1 - 2 ms leaves some margin */
- usleep_range(1000, 2000);
-
- return ret;
-}
-
-static int lp5521_run_selftest(struct lp5521_chip *chip, char *buf)
-{
- int ret;
- u8 status;
-
- ret = lp5521_read(chip->client, LP5521_REG_STATUS, &status);
- if (ret < 0)
- return ret;
-
- /* Check that ext clock is really in use if requested */
- if (chip->pdata && chip->pdata->clock_mode == LP5521_CLOCK_EXT)
- if ((status & LP5521_EXT_CLK_USED) == 0)
- return -EIO;
- return 0;
-}
-
-static void lp5521_set_brightness(struct led_classdev *cdev,
- enum led_brightness brightness)
-{
- struct lp5521_led *led = cdev_to_led(cdev);
- led->brightness = (u8)brightness;
- schedule_work(&led->brightness_work);
-}
-
-static void lp5521_led_brightness_work(struct work_struct *work)
-{
- struct lp5521_led *led = container_of(work,
- struct lp5521_led,
- brightness_work);
- struct lp5521_chip *chip = led_to_lp5521(led);
- struct i2c_client *client = chip->client;
- mutex_lock(&chip->lock);
- lp5521_write(client, LP5521_REG_LED_PWM_BASE + led->chan_nr,
- led->brightness);
- mutex_unlock(&chip->lock);
-}
-
-/* Detect the chip by setting its ENABLE register and reading it back. */
-static int lp5521_detect(struct i2c_client *client)
-{
- int ret;
- u8 buf;
+ /*
+ * To run the engine,
+ * operation mode and enable register should updated at the same time
+ */
- ret = lp5521_write(client, LP5521_REG_ENABLE, LP5521_ENABLE_DEFAULT);
+ ret = lp55xx_read(chip, LP5521_REG_OP_MODE, &mode);
if (ret)
- return ret;
- /* enable takes 500us. 1 - 2 ms leaves some margin */
- usleep_range(1000, 2000);
- ret = lp5521_read(client, LP5521_REG_ENABLE, &buf);
- if (ret)
- return ret;
- if (buf != LP5521_ENABLE_DEFAULT)
- return -ENODEV;
+ return;
- return 0;
-}
+ ret = lp55xx_read(chip, LP5521_REG_ENABLE, &exec);
+ if (ret)
+ return;
-/* Set engine mode and create appropriate sysfs attributes, if required. */
-static int lp5521_set_mode(struct lp5521_engine *engine, u8 mode)
-{
- int ret = 0;
+ /* change operation mode to RUN only when each engine is loading */
+ if (LP5521_R_IS_LOADING(mode)) {
+ mode = (mode & ~LP5521_MODE_R_M) | LP5521_RUN_R;
+ exec = (exec & ~LP5521_EXEC_R_M) | LP5521_RUN_R;
+ }
- /* if in that mode already do nothing, except for run */
- if (mode == engine->mode && mode != LP5521_CMD_RUN)
- return 0;
+ if (LP5521_G_IS_LOADING(mode)) {
+ mode = (mode & ~LP5521_MODE_G_M) | LP5521_RUN_G;
+ exec = (exec & ~LP5521_EXEC_G_M) | LP5521_RUN_G;
+ }
- if (mode == LP5521_CMD_RUN) {
- ret = lp5521_set_engine_mode(engine, LP5521_CMD_RUN);
- } else if (mode == LP5521_CMD_LOAD) {
- lp5521_set_engine_mode(engine, LP5521_CMD_DISABLED);
- lp5521_set_engine_mode(engine, LP5521_CMD_LOAD);
- } else if (mode == LP5521_CMD_DISABLED) {
- lp5521_set_engine_mode(engine, LP5521_CMD_DISABLED);
+ if (LP5521_B_IS_LOADING(mode)) {
+ mode = (mode & ~LP5521_MODE_B_M) | LP5521_RUN_B;
+ exec = (exec & ~LP5521_EXEC_B_M) | LP5521_RUN_B;
}
- engine->mode = mode;
+ lp55xx_write(chip, LP5521_REG_OP_MODE, mode);
+ lp5521_wait_opmode_done();
- return ret;
+ lp55xx_update_bits(chip, LP5521_REG_ENABLE, LP5521_EXEC_M, exec);
+ lp5521_wait_enable_done();
}
-static int lp5521_do_store_load(struct lp5521_engine *engine,
- const char *buf, size_t len)
+static int lp5521_update_program_memory(struct lp55xx_chip *chip,
+ const u8 *data, size_t size)
{
- struct lp5521_chip *chip = engine_to_lp5521(engine);
- struct i2c_client *client = chip->client;
- int ret, nrchars, offset = 0, i = 0;
- char c[3];
- unsigned cmd;
+ enum lp55xx_engine_index idx = chip->engine_idx;
u8 pattern[LP5521_PROGRAM_LENGTH] = {0};
+ u8 addr[] = {
+ [LP55XX_ENGINE_1] = LP5521_REG_R_PROG_MEM,
+ [LP55XX_ENGINE_2] = LP5521_REG_G_PROG_MEM,
+ [LP55XX_ENGINE_3] = LP5521_REG_B_PROG_MEM,
+ };
+ unsigned cmd;
+ char c[3];
+ int program_size;
+ int nrchars;
+ int offset = 0;
+ int ret;
+ int i;
- while ((offset < len - 1) && (i < LP5521_PROGRAM_LENGTH)) {
+ /* clear program memory before updating */
+ for (i = 0; i < LP5521_PROGRAM_LENGTH; i++)
+ lp55xx_write(chip, addr[idx] + i, 0);
+
+ i = 0;
+ while ((offset < size - 1) && (i < LP5521_PROGRAM_LENGTH)) {
/* separate sscanfs because length is working only for %s */
- ret = sscanf(buf + offset, "%2s%n ", c, &nrchars);
- if (ret != 2)
- goto fail;
+ ret = sscanf(data + offset, "%2s%n ", c, &nrchars);
+ if (ret != 1)
+ goto err;
+
ret = sscanf(c, "%2x", &cmd);
if (ret != 1)
- goto fail;
- pattern[i] = (u8)cmd;
+ goto err;
+ pattern[i] = (u8)cmd;
offset += nrchars;
i++;
}
/* Each instruction is 16bit long. Check that length is even */
if (i % 2)
- goto fail;
+ goto err;
- mutex_lock(&chip->lock);
- if (engine->mode == LP5521_CMD_LOAD)
- ret = lp5521_load_program(engine, pattern);
- else
- ret = -EINVAL;
- mutex_unlock(&chip->lock);
+ program_size = i;
+ for (i = 0; i < program_size; i++)
+ lp55xx_write(chip, addr[idx] + i, pattern[i]);
- if (ret) {
- dev_err(&client->dev, "failed loading pattern\n");
- return ret;
- }
+ return 0;
- return len;
-fail:
- dev_err(&client->dev, "wrong pattern format\n");
+err:
+ dev_err(&chip->cl->dev, "wrong pattern format\n");
return -EINVAL;
}
-static ssize_t store_engine_load(struct device *dev,
- struct device_attribute *attr,
- const char *buf, size_t len, int nr)
+static void lp5521_firmware_loaded(struct lp55xx_chip *chip)
{
- struct i2c_client *client = to_i2c_client(dev);
- struct lp5521_chip *chip = i2c_get_clientdata(client);
- return lp5521_do_store_load(&chip->engines[nr - 1], buf, len);
-}
+ const struct firmware *fw = chip->fw;
-#define store_load(nr) \
-static ssize_t store_engine##nr##_load(struct device *dev, \
- struct device_attribute *attr, \
- const char *buf, size_t len) \
-{ \
- return store_engine_load(dev, attr, buf, len, nr); \
-}
-store_load(1)
-store_load(2)
-store_load(3)
-
-static ssize_t show_engine_mode(struct device *dev,
- struct device_attribute *attr,
- char *buf, int nr)
-{
- struct i2c_client *client = to_i2c_client(dev);
- struct lp5521_chip *chip = i2c_get_clientdata(client);
- switch (chip->engines[nr - 1].mode) {
- case LP5521_CMD_RUN:
- return sprintf(buf, "run\n");
- case LP5521_CMD_LOAD:
- return sprintf(buf, "load\n");
- case LP5521_CMD_DISABLED:
- return sprintf(buf, "disabled\n");
- default:
- return sprintf(buf, "disabled\n");
+ if (fw->size > LP5521_PROGRAM_LENGTH) {
+ dev_err(&chip->cl->dev, "firmware data size overflow: %zu\n",
+ fw->size);
+ return;
}
-}
-#define show_mode(nr) \
-static ssize_t show_engine##nr##_mode(struct device *dev, \
- struct device_attribute *attr, \
- char *buf) \
-{ \
- return show_engine_mode(dev, attr, buf, nr); \
+ /*
+ * Program momery sequence
+ * 1) set engine mode to "LOAD"
+ * 2) write firmware data into program memory
+ */
+
+ lp5521_load_engine(chip);
+ lp5521_update_program_memory(chip, fw->data, fw->size);
}
-show_mode(1)
-show_mode(2)
-show_mode(3)
-static ssize_t store_engine_mode(struct device *dev,
- struct device_attribute *attr,
- const char *buf, size_t len, int nr)
+static int lp5521_post_init_device(struct lp55xx_chip *chip)
{
- struct i2c_client *client = to_i2c_client(dev);
- struct lp5521_chip *chip = i2c_get_clientdata(client);
- struct lp5521_engine *engine = &chip->engines[nr - 1];
- mutex_lock(&chip->lock);
+ int ret;
+ u8 val;
- if (!strncmp(buf, "run", 3))
- lp5521_set_mode(engine, LP5521_CMD_RUN);
- else if (!strncmp(buf, "load", 4))
- lp5521_set_mode(engine, LP5521_CMD_LOAD);
- else if (!strncmp(buf, "disabled", 8))
- lp5521_set_mode(engine, LP5521_CMD_DISABLED);
+ /*
+ * Make sure that the chip is reset by reading back the r channel
+ * current reg. This is dummy read is required on some platforms -
+ * otherwise further access to the R G B channels in the
+ * LP5521_REG_ENABLE register will not have any effect - strange!
+ */
+ ret = lp55xx_read(chip, LP5521_REG_R_CURRENT, &val);
+ if (ret) {
+ dev_err(&chip->cl->dev, "error in resetting chip\n");
+ return ret;
+ }
+ if (val != LP5521_REG_R_CURR_DEFAULT) {
+ dev_err(&chip->cl->dev,
+ "unexpected data in register (expected 0x%x got 0x%x)\n",
+ LP5521_REG_R_CURR_DEFAULT, val);
+ ret = -EINVAL;
+ return ret;
+ }
+ usleep_range(10000, 20000);
- mutex_unlock(&chip->lock);
- return len;
-}
+ /* Set all PWMs to direct control mode */
+ ret = lp55xx_write(chip, LP5521_REG_OP_MODE, LP5521_CMD_DIRECT);
-#define store_mode(nr) \
-static ssize_t store_engine##nr##_mode(struct device *dev, \
- struct device_attribute *attr, \
- const char *buf, size_t len) \
-{ \
- return store_engine_mode(dev, attr, buf, len, nr); \
-}
-store_mode(1)
-store_mode(2)
-store_mode(3)
+ val = chip->pdata->update_config ?
+ : (LP5521_PWRSAVE_EN | LP5521_CP_MODE_AUTO | LP5521_R_TO_BATT);
+ ret = lp55xx_write(chip, LP5521_REG_CONFIG, val);
+ if (ret)
+ return ret;
-static ssize_t show_max_current(struct device *dev,
- struct device_attribute *attr,
- char *buf)
-{
- struct led_classdev *led_cdev = dev_get_drvdata(dev);
- struct lp5521_led *led = cdev_to_led(led_cdev);
+ /* Initialize all channels PWM to zero -> leds off */
+ lp55xx_write(chip, LP5521_REG_R_PWM, 0);
+ lp55xx_write(chip, LP5521_REG_G_PWM, 0);
+ lp55xx_write(chip, LP5521_REG_B_PWM, 0);
- return sprintf(buf, "%d\n", led->max_current);
-}
+ /* Set engines are set to run state when OP_MODE enables engines */
+ ret = lp55xx_write(chip, LP5521_REG_ENABLE, LP5521_ENABLE_RUN_PROGRAM);
+ if (ret)
+ return ret;
-static ssize_t show_current(struct device *dev,
- struct device_attribute *attr,
- char *buf)
-{
- struct led_classdev *led_cdev = dev_get_drvdata(dev);
- struct lp5521_led *led = cdev_to_led(led_cdev);
+ lp5521_wait_enable_done();
- return sprintf(buf, "%d\n", led->led_current);
+ return 0;
}
-static ssize_t store_current(struct device *dev,
- struct device_attribute *attr,
- const char *buf, size_t len)
+static int lp5521_run_selftest(struct lp55xx_chip *chip, char *buf)
{
- struct led_classdev *led_cdev = dev_get_drvdata(dev);
- struct lp5521_led *led = cdev_to_led(led_cdev);
- struct lp5521_chip *chip = led_to_lp5521(led);
- ssize_t ret;
- unsigned long curr;
+ struct lp55xx_platform_data *pdata = chip->pdata;
+ int ret;
+ u8 status;
- if (kstrtoul(buf, 0, &curr))
- return -EINVAL;
+ ret = lp55xx_read(chip, LP5521_REG_STATUS, &status);
+ if (ret < 0)
+ return ret;
- if (curr > led->max_current)
- return -EINVAL;
+ if (pdata->clock_mode != LP55XX_CLOCK_EXT)
+ return 0;
- mutex_lock(&chip->lock);
- ret = lp5521_set_led_current(chip, led->id, curr);
- mutex_unlock(&chip->lock);
+ /* Check that ext clock is really in use if requested */
+ if ((status & LP5521_EXT_CLK_USED) == 0)
+ return -EIO;
- if (ret < 0)
- return ret;
+ return 0;
+}
- led->led_current = (u8)curr;
+static void lp5521_led_brightness_work(struct work_struct *work)
+{
+ struct lp55xx_led *led = container_of(work, struct lp55xx_led,
+ brightness_work);
+ struct lp55xx_chip *chip = led->chip;
- return len;
+ mutex_lock(&chip->lock);
+ lp55xx_write(chip, LP5521_REG_LED_PWM_BASE + led->chan_nr,
+ led->brightness);
+ mutex_unlock(&chip->lock);
}
static ssize_t lp5521_selftest(struct device *dev,
struct device_attribute *attr,
char *buf)
{
- struct i2c_client *client = to_i2c_client(dev);
- struct lp5521_chip *chip = i2c_get_clientdata(client);
+ struct lp55xx_led *led = i2c_get_clientdata(to_i2c_client(dev));
+ struct lp55xx_chip *chip = led->chip;
int ret;
mutex_lock(&chip->lock);
@@ -534,133 +363,11 @@ static ssize_t lp5521_selftest(struct device *dev,
return sprintf(buf, "%s\n", ret ? "FAIL" : "OK");
}
-static void lp5521_clear_program_memory(struct i2c_client *cl)
-{
- int i;
- u8 rgb_mem[] = {
- LP5521_REG_R_PROG_MEM,
- LP5521_REG_G_PROG_MEM,
- LP5521_REG_B_PROG_MEM,
- };
-
- for (i = 0; i < ARRAY_SIZE(rgb_mem); i++) {
- lp5521_write(cl, rgb_mem[i], 0);
- lp5521_write(cl, rgb_mem[i] + 1, 0);
- }
-}
-
-static void lp5521_write_program_memory(struct i2c_client *cl,
- u8 base, u8 *rgb, int size)
-{
- int i;
-
- if (!rgb || size <= 0)
- return;
-
- for (i = 0; i < size; i++)
- lp5521_write(cl, base + i, *(rgb + i));
-
- lp5521_write(cl, base + i, 0);
- lp5521_write(cl, base + i + 1, 0);
-}
-
-static inline struct lp5521_led_pattern *lp5521_get_pattern
- (struct lp5521_chip *chip, u8 offset)
-{
- struct lp5521_led_pattern *ptn;
- ptn = chip->pdata->patterns + (offset - 1);
- return ptn;
-}
-
-static void lp5521_run_led_pattern(int mode, struct lp5521_chip *chip)
-{
- struct lp5521_led_pattern *ptn;
- struct i2c_client *cl = chip->client;
- int num_patterns = chip->pdata->num_patterns;
-
- if (mode > num_patterns || !(chip->pdata->patterns))
- return;
-
- if (mode == PATTERN_OFF) {
- lp5521_write(cl, LP5521_REG_ENABLE, LP5521_ENABLE_DEFAULT);
- usleep_range(1000, 2000);
- lp5521_write(cl, LP5521_REG_OP_MODE, LP5521_CMD_DIRECT);
- } else {
- ptn = lp5521_get_pattern(chip, mode);
- if (!ptn)
- return;
-
- lp5521_write(cl, LP5521_REG_OP_MODE, LP5521_CMD_LOAD);
- usleep_range(1000, 2000);
-
- lp5521_clear_program_memory(cl);
-
- lp5521_write_program_memory(cl, LP5521_REG_R_PROG_MEM,
- ptn->r, ptn->size_r);
- lp5521_write_program_memory(cl, LP5521_REG_G_PROG_MEM,
- ptn->g, ptn->size_g);
- lp5521_write_program_memory(cl, LP5521_REG_B_PROG_MEM,
- ptn->b, ptn->size_b);
-
- lp5521_write(cl, LP5521_REG_OP_MODE, LP5521_CMD_RUN);
- usleep_range(1000, 2000);
- lp5521_write(cl, LP5521_REG_ENABLE, LP5521_ENABLE_RUN_PROGRAM);
- }
-}
-
-static ssize_t store_led_pattern(struct device *dev,
- struct device_attribute *attr,
- const char *buf, size_t len)
-{
- struct lp5521_chip *chip = i2c_get_clientdata(to_i2c_client(dev));
- unsigned long val;
- int ret;
-
- ret = kstrtoul(buf, 16, &val);
- if (ret)
- return ret;
-
- lp5521_run_led_pattern(val, chip);
-
- return len;
-}
-
-/* led class device attributes */
-static DEVICE_ATTR(led_current, S_IRUGO | S_IWUSR, show_current, store_current);
-static DEVICE_ATTR(max_current, S_IRUGO , show_max_current, NULL);
-
-static struct attribute *lp5521_led_attributes[] = {
- &dev_attr_led_current.attr,
- &dev_attr_max_current.attr,
- NULL,
-};
-
-static struct attribute_group lp5521_led_attribute_group = {
- .attrs = lp5521_led_attributes
-};
-
/* device attributes */
-static DEVICE_ATTR(engine1_mode, S_IRUGO | S_IWUSR,
- show_engine1_mode, store_engine1_mode);
-static DEVICE_ATTR(engine2_mode, S_IRUGO | S_IWUSR,
- show_engine2_mode, store_engine2_mode);
-static DEVICE_ATTR(engine3_mode, S_IRUGO | S_IWUSR,
- show_engine3_mode, store_engine3_mode);
-static DEVICE_ATTR(engine1_load, S_IWUSR, NULL, store_engine1_load);
-static DEVICE_ATTR(engine2_load, S_IWUSR, NULL, store_engine2_load);
-static DEVICE_ATTR(engine3_load, S_IWUSR, NULL, store_engine3_load);
static DEVICE_ATTR(selftest, S_IRUGO, lp5521_selftest, NULL);
-static DEVICE_ATTR(led_pattern, S_IWUSR, NULL, store_led_pattern);
static struct attribute *lp5521_attributes[] = {
- &dev_attr_engine1_mode.attr,
- &dev_attr_engine2_mode.attr,
- &dev_attr_engine3_mode.attr,
&dev_attr_selftest.attr,
- &dev_attr_engine1_load.attr,
- &dev_attr_engine2_load.attr,
- &dev_attr_engine3_load.attr,
- &dev_attr_led_pattern.attr,
NULL
};
@@ -668,217 +375,91 @@ static const struct attribute_group lp5521_group = {
.attrs = lp5521_attributes,
};
-static int lp5521_register_sysfs(struct i2c_client *client)
-{
- struct device *dev = &client->dev;
- return sysfs_create_group(&dev->kobj, &lp5521_group);
-}
-
-static void lp5521_unregister_sysfs(struct i2c_client *client)
-{
- struct lp5521_chip *chip = i2c_get_clientdata(client);
- struct device *dev = &client->dev;
- int i;
-
- sysfs_remove_group(&dev->kobj, &lp5521_group);
-
- for (i = 0; i < chip->num_leds; i++)
- sysfs_remove_group(&chip->leds[i].cdev.dev->kobj,
- &lp5521_led_attribute_group);
-}
-
-static int lp5521_init_led(struct lp5521_led *led,
- struct i2c_client *client,
- int chan, struct lp5521_platform_data *pdata)
-{
- struct device *dev = &client->dev;
- char name[32];
- int res;
-
- if (chan >= LP5521_MAX_LEDS)
- return -EINVAL;
-
- if (pdata->led_config[chan].led_current == 0)
- return 0;
-
- led->led_current = pdata->led_config[chan].led_current;
- led->max_current = pdata->led_config[chan].max_current;
- led->chan_nr = pdata->led_config[chan].chan_nr;
-
- if (led->chan_nr >= LP5521_MAX_LEDS) {
- dev_err(dev, "Use channel numbers between 0 and %d\n",
- LP5521_MAX_LEDS - 1);
- return -EINVAL;
- }
-
- led->cdev.brightness_set = lp5521_set_brightness;
- if (pdata->led_config[chan].name) {
- led->cdev.name = pdata->led_config[chan].name;
- } else {
- snprintf(name, sizeof(name), "%s:channel%d",
- pdata->label ?: client->name, chan);
- led->cdev.name = name;
- }
-
- res = led_classdev_register(dev, &led->cdev);
- if (res < 0) {
- dev_err(dev, "couldn't register led on channel %d\n", chan);
- return res;
- }
-
- res = sysfs_create_group(&led->cdev.dev->kobj,
- &lp5521_led_attribute_group);
- if (res < 0) {
- dev_err(dev, "couldn't register current attribute\n");
- led_classdev_unregister(&led->cdev);
- return res;
- }
- return 0;
-}
+/* Chip specific configurations */
+static struct lp55xx_device_config lp5521_cfg = {
+ .reset = {
+ .addr = LP5521_REG_RESET,
+ .val = LP5521_RESET,
+ },
+ .enable = {
+ .addr = LP5521_REG_ENABLE,
+ .val = LP5521_ENABLE_DEFAULT,
+ },
+ .max_channel = LP5521_MAX_LEDS,
+ .post_init_device = lp5521_post_init_device,
+ .brightness_work_fn = lp5521_led_brightness_work,
+ .set_led_current = lp5521_set_led_current,
+ .firmware_cb = lp5521_firmware_loaded,
+ .run_engine = lp5521_run_engine,
+ .dev_attr_group = &lp5521_group,
+};
static int lp5521_probe(struct i2c_client *client,
const struct i2c_device_id *id)
{
- struct lp5521_chip *chip;
- struct lp5521_platform_data *pdata;
- int ret, i, led;
- u8 buf;
-
- chip = devm_kzalloc(&client->dev, sizeof(*chip), GFP_KERNEL);
- if (!chip)
- return -ENOMEM;
-
- i2c_set_clientdata(client, chip);
- chip->client = client;
-
- pdata = client->dev.platform_data;
+ int ret;
+ struct lp55xx_chip *chip;
+ struct lp55xx_led *led;
+ struct lp55xx_platform_data *pdata = client->dev.platform_data;
if (!pdata) {
dev_err(&client->dev, "no platform data\n");
return -EINVAL;
}
- mutex_init(&chip->lock);
-
- chip->pdata = pdata;
-
- if (pdata->setup_resources) {
- ret = pdata->setup_resources();
- if (ret < 0)
- return ret;
- }
+ chip = devm_kzalloc(&client->dev, sizeof(*chip), GFP_KERNEL);
+ if (!chip)
+ return -ENOMEM;
- if (pdata->enable) {
- pdata->enable(0);
- usleep_range(1000, 2000); /* Keep enable down at least 1ms */
- pdata->enable(1);
- usleep_range(1000, 2000); /* 500us abs min. */
- }
+ led = devm_kzalloc(&client->dev,
+ sizeof(*led) * pdata->num_channels, GFP_KERNEL);
+ if (!led)
+ return -ENOMEM;
- lp5521_write(client, LP5521_REG_RESET, 0xff);
- usleep_range(10000, 20000); /*
- * Exact value is not available. 10 - 20ms
- * appears to be enough for reset.
- */
+ chip->cl = client;
+ chip->pdata = pdata;
+ chip->cfg = &lp5521_cfg;
- /*
- * Make sure that the chip is reset by reading back the r channel
- * current reg. This is dummy read is required on some platforms -
- * otherwise further access to the R G B channels in the
- * LP5521_REG_ENABLE register will not have any effect - strange!
- */
- ret = lp5521_read(client, LP5521_REG_R_CURRENT, &buf);
- if (ret) {
- dev_err(&client->dev, "error in resetting chip\n");
- goto fail2;
- }
- if (buf != LP5521_REG_R_CURR_DEFAULT) {
- dev_err(&client->dev,
- "unexpected data in register (expected 0x%x got 0x%x)\n",
- LP5521_REG_R_CURR_DEFAULT, buf);
- ret = -EINVAL;
- goto fail2;
- }
- usleep_range(10000, 20000);
+ mutex_init(&chip->lock);
- ret = lp5521_detect(client);
+ i2c_set_clientdata(client, led);
- if (ret) {
- dev_err(&client->dev, "Chip not found\n");
- goto fail2;
- }
+ ret = lp55xx_init_device(chip);
+ if (ret)
+ goto err_init;
dev_info(&client->dev, "%s programmable led chip found\n", id->name);
- ret = lp5521_configure(client);
- if (ret < 0) {
- dev_err(&client->dev, "error configuring chip\n");
- goto fail1;
- }
-
- /* Initialize leds */
- chip->num_channels = pdata->num_channels;
- chip->num_leds = 0;
- led = 0;
- for (i = 0; i < pdata->num_channels; i++) {
- /* Do not initialize channels that are not connected */
- if (pdata->led_config[i].led_current == 0)
- continue;
-
- ret = lp5521_init_led(&chip->leds[led], client, i, pdata);
- if (ret) {
- dev_err(&client->dev, "error initializing leds\n");
- goto fail2;
- }
- chip->num_leds++;
-
- chip->leds[led].id = led;
- /* Set initial LED current */
- lp5521_set_led_current(chip, led,
- chip->leds[led].led_current);
-
- INIT_WORK(&(chip->leds[led].brightness_work),
- lp5521_led_brightness_work);
-
- led++;
- }
+ ret = lp55xx_register_leds(led, chip);
+ if (ret)
+ goto err_register_leds;
- ret = lp5521_register_sysfs(client);
+ ret = lp55xx_register_sysfs(chip);
if (ret) {
dev_err(&client->dev, "registering sysfs failed\n");
- goto fail2;
- }
- return ret;
-fail2:
- for (i = 0; i < chip->num_leds; i++) {
- led_classdev_unregister(&chip->leds[i].cdev);
- cancel_work_sync(&chip->leds[i].brightness_work);
+ goto err_register_sysfs;
}
-fail1:
- if (pdata->enable)
- pdata->enable(0);
- if (pdata->release_resources)
- pdata->release_resources();
+
+ return 0;
+
+err_register_sysfs:
+ lp55xx_unregister_leds(led, chip);
+err_register_leds:
+ lp55xx_deinit_device(chip);
+err_init:
return ret;
}
static int lp5521_remove(struct i2c_client *client)
{
- struct lp5521_chip *chip = i2c_get_clientdata(client);
- int i;
+ struct lp55xx_led *led = i2c_get_clientdata(client);
+ struct lp55xx_chip *chip = led->chip;
- lp5521_run_led_pattern(PATTERN_OFF, chip);
- lp5521_unregister_sysfs(client);
-
- for (i = 0; i < chip->num_leds; i++) {
- led_classdev_unregister(&chip->leds[i].cdev);
- cancel_work_sync(&chip->leds[i].brightness_work);
- }
+ lp5521_stop_engine(chip);
+ lp55xx_unregister_sysfs(chip);
+ lp55xx_unregister_leds(led, chip);
+ lp55xx_deinit_device(chip);
- if (chip->pdata->enable)
- chip->pdata->enable(0);
- if (chip->pdata->release_resources)
- chip->pdata->release_resources();
return 0;
}
@@ -900,5 +481,6 @@ static struct i2c_driver lp5521_driver = {
module_i2c_driver(lp5521_driver);
MODULE_AUTHOR("Mathias Nyman, Yuri Zaporozhets, Samu Onkalo");
+MODULE_AUTHOR("Milo Kim <milo.kim@ti.com>");
MODULE_DESCRIPTION("LP5521 LED engine");
MODULE_LICENSE("GPL v2");
diff --git a/drivers/leds/leds-lp5523.c b/drivers/leds/leds-lp5523.c
index 7f5be8948cde..229f734040af 100644
--- a/drivers/leds/leds-lp5523.c
+++ b/drivers/leds/leds-lp5523.c
@@ -2,8 +2,10 @@
* lp5523.c - LP5523 LED Driver
*
* Copyright (C) 2010 Nokia Corporation
+ * Copyright (C) 2012 Texas Instruments
*
* Contact: Samu Onkalo <samu.p.onkalo@nokia.com>
+ * Milo(Woogyom) Kim <milo.kim@ti.com>
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
@@ -20,502 +22,351 @@
* 02110-1301 USA
*/
-#include <linux/module.h>
-#include <linux/init.h>
-#include <linux/i2c.h>
-#include <linux/mutex.h>
-#include <linux/gpio.h>
-#include <linux/interrupt.h>
#include <linux/delay.h>
-#include <linux/ctype.h>
-#include <linux/spinlock.h>
-#include <linux/wait.h>
+#include <linux/firmware.h>
+#include <linux/i2c.h>
+#include <linux/init.h>
#include <linux/leds.h>
-#include <linux/leds-lp5523.h>
-#include <linux/workqueue.h>
+#include <linux/module.h>
+#include <linux/mutex.h>
+#include <linux/platform_data/leds-lp55xx.h>
#include <linux/slab.h>
+#include "leds-lp55xx-common.h"
+
+#define LP5523_PROGRAM_LENGTH 32
+#define LP5523_MAX_LEDS 9
+
+/* Registers */
#define LP5523_REG_ENABLE 0x00
#define LP5523_REG_OP_MODE 0x01
-#define LP5523_REG_RATIOMETRIC_MSB 0x02
-#define LP5523_REG_RATIOMETRIC_LSB 0x03
#define LP5523_REG_ENABLE_LEDS_MSB 0x04
#define LP5523_REG_ENABLE_LEDS_LSB 0x05
-#define LP5523_REG_LED_CNTRL_BASE 0x06
#define LP5523_REG_LED_PWM_BASE 0x16
#define LP5523_REG_LED_CURRENT_BASE 0x26
#define LP5523_REG_CONFIG 0x36
-#define LP5523_REG_CHANNEL1_PC 0x37
-#define LP5523_REG_CHANNEL2_PC 0x38
-#define LP5523_REG_CHANNEL3_PC 0x39
-#define LP5523_REG_STATUS 0x3a
-#define LP5523_REG_GPO 0x3b
-#define LP5523_REG_VARIABLE 0x3c
-#define LP5523_REG_RESET 0x3d
-#define LP5523_REG_TEMP_CTRL 0x3e
-#define LP5523_REG_TEMP_READ 0x3f
-#define LP5523_REG_TEMP_WRITE 0x40
+#define LP5523_REG_STATUS 0x3A
+#define LP5523_REG_RESET 0x3D
#define LP5523_REG_LED_TEST_CTRL 0x41
#define LP5523_REG_LED_TEST_ADC 0x42
-#define LP5523_REG_ENG1_VARIABLE 0x45
-#define LP5523_REG_ENG2_VARIABLE 0x46
-#define LP5523_REG_ENG3_VARIABLE 0x47
-#define LP5523_REG_MASTER_FADER1 0x48
-#define LP5523_REG_MASTER_FADER2 0x49
-#define LP5523_REG_MASTER_FADER3 0x4a
-#define LP5523_REG_CH1_PROG_START 0x4c
-#define LP5523_REG_CH2_PROG_START 0x4d
-#define LP5523_REG_CH3_PROG_START 0x4e
-#define LP5523_REG_PROG_PAGE_SEL 0x4f
+#define LP5523_REG_PROG_PAGE_SEL 0x4F
#define LP5523_REG_PROG_MEM 0x50
-#define LP5523_CMD_LOAD 0x15 /* 00010101 */
-#define LP5523_CMD_RUN 0x2a /* 00101010 */
-#define LP5523_CMD_DISABLED 0x00 /* 00000000 */
-
+/* Bit description in registers */
#define LP5523_ENABLE 0x40
#define LP5523_AUTO_INC 0x40
#define LP5523_PWR_SAVE 0x20
#define LP5523_PWM_PWR_SAVE 0x04
-#define LP5523_CP_1 0x08
-#define LP5523_CP_1_5 0x10
#define LP5523_CP_AUTO 0x18
-#define LP5523_INT_CLK 0x01
#define LP5523_AUTO_CLK 0x02
+
#define LP5523_EN_LEDTEST 0x80
#define LP5523_LEDTEST_DONE 0x80
-
-#define LP5523_DEFAULT_CURRENT 50 /* microAmps */
-#define LP5523_PROGRAM_LENGTH 32 /* in bytes */
-#define LP5523_PROGRAM_PAGES 6
+#define LP5523_RESET 0xFF
#define LP5523_ADC_SHORTCIRC_LIM 80
-
-#define LP5523_LEDS 9
-#define LP5523_ENGINES 3
-
-#define LP5523_ENG_MASK_BASE 0x30 /* 00110000 */
-
-#define LP5523_ENG_STATUS_MASK 0x07 /* 00000111 */
-
-#define LP5523_IRQ_FLAGS IRQF_TRIGGER_FALLING
-
#define LP5523_EXT_CLK_USED 0x08
-#define LED_ACTIVE(mux, led) (!!(mux & (0x0001 << led)))
-#define SHIFT_MASK(id) (((id) - 1) * 2)
+/* Memory Page Selection */
+#define LP5523_PAGE_ENG1 0
+#define LP5523_PAGE_ENG2 1
+#define LP5523_PAGE_ENG3 2
+
+/* Program Memory Operations */
+#define LP5523_MODE_ENG1_M 0x30 /* Operation Mode Register */
+#define LP5523_MODE_ENG2_M 0x0C
+#define LP5523_MODE_ENG3_M 0x03
+#define LP5523_LOAD_ENG1 0x10
+#define LP5523_LOAD_ENG2 0x04
+#define LP5523_LOAD_ENG3 0x01
+
+#define LP5523_ENG1_IS_LOADING(mode) \
+ ((mode & LP5523_MODE_ENG1_M) == LP5523_LOAD_ENG1)
+#define LP5523_ENG2_IS_LOADING(mode) \
+ ((mode & LP5523_MODE_ENG2_M) == LP5523_LOAD_ENG2)
+#define LP5523_ENG3_IS_LOADING(mode) \
+ ((mode & LP5523_MODE_ENG3_M) == LP5523_LOAD_ENG3)
+
+#define LP5523_EXEC_ENG1_M 0x30 /* Enable Register */
+#define LP5523_EXEC_ENG2_M 0x0C
+#define LP5523_EXEC_ENG3_M 0x03
+#define LP5523_EXEC_M 0x3F
+#define LP5523_RUN_ENG1 0x20
+#define LP5523_RUN_ENG2 0x08
+#define LP5523_RUN_ENG3 0x02
enum lp5523_chip_id {
LP5523,
LP55231,
};
-struct lp5523_engine {
- int id;
- u8 mode;
- u8 prog_page;
- u8 mux_page;
- u16 led_mux;
- u8 engine_mask;
-};
-
-struct lp5523_led {
- int id;
- u8 chan_nr;
- u8 led_current;
- u8 max_current;
- struct led_classdev cdev;
- struct work_struct brightness_work;
- u8 brightness;
-};
-
-struct lp5523_chip {
- struct mutex lock; /* Serialize control */
- struct i2c_client *client;
- struct lp5523_engine engines[LP5523_ENGINES];
- struct lp5523_led leds[LP5523_LEDS];
- struct lp5523_platform_data *pdata;
- u8 num_channels;
- u8 num_leds;
-};
-
-static inline struct lp5523_led *cdev_to_led(struct led_classdev *cdev)
+static inline void lp5523_wait_opmode_done(void)
{
- return container_of(cdev, struct lp5523_led, cdev);
-}
-
-static inline struct lp5523_chip *engine_to_lp5523(struct lp5523_engine *engine)
-{
- return container_of(engine, struct lp5523_chip,
- engines[engine->id - 1]);
-}
-
-static inline struct lp5523_chip *led_to_lp5523(struct lp5523_led *led)
-{
- return container_of(led, struct lp5523_chip,
- leds[led->id]);
+ usleep_range(1000, 2000);
}
-static void lp5523_set_mode(struct lp5523_engine *engine, u8 mode);
-static int lp5523_set_engine_mode(struct lp5523_engine *engine, u8 mode);
-static int lp5523_load_program(struct lp5523_engine *engine, const u8 *pattern);
-
-static void lp5523_led_brightness_work(struct work_struct *work);
-
-static int lp5523_write(struct i2c_client *client, u8 reg, u8 value)
+static void lp5523_set_led_current(struct lp55xx_led *led, u8 led_current)
{
- return i2c_smbus_write_byte_data(client, reg, value);
+ led->led_current = led_current;
+ lp55xx_write(led->chip, LP5523_REG_LED_CURRENT_BASE + led->chan_nr,
+ led_current);
}
-static int lp5523_read(struct i2c_client *client, u8 reg, u8 *buf)
-{
- s32 ret = i2c_smbus_read_byte_data(client, reg);
-
- if (ret < 0)
- return ret;
-
- *buf = ret;
- return 0;
-}
-
-static int lp5523_detect(struct i2c_client *client)
+static int lp5523_post_init_device(struct lp55xx_chip *chip)
{
int ret;
- u8 buf;
- ret = lp5523_write(client, LP5523_REG_ENABLE, LP5523_ENABLE);
- if (ret)
- return ret;
- ret = lp5523_read(client, LP5523_REG_ENABLE, &buf);
+ ret = lp55xx_write(chip, LP5523_REG_ENABLE, LP5523_ENABLE);
if (ret)
return ret;
- if (buf == 0x40)
- return 0;
- else
- return -ENODEV;
-}
-static int lp5523_configure(struct i2c_client *client)
-{
- struct lp5523_chip *chip = i2c_get_clientdata(client);
- int ret = 0;
- u8 status;
-
- /* one pattern per engine setting led mux start and stop addresses */
- static const u8 pattern[][LP5523_PROGRAM_LENGTH] = {
- { 0x9c, 0x30, 0x9c, 0xb0, 0x9d, 0x80, 0xd8, 0x00, 0},
- { 0x9c, 0x40, 0x9c, 0xc0, 0x9d, 0x80, 0xd8, 0x00, 0},
- { 0x9c, 0x50, 0x9c, 0xd0, 0x9d, 0x80, 0xd8, 0x00, 0},
- };
-
- ret |= lp5523_write(client, LP5523_REG_ENABLE, LP5523_ENABLE);
/* Chip startup time is 500 us, 1 - 2 ms gives some margin */
usleep_range(1000, 2000);
- ret |= lp5523_write(client, LP5523_REG_CONFIG,
+ ret = lp55xx_write(chip, LP5523_REG_CONFIG,
LP5523_AUTO_INC | LP5523_PWR_SAVE |
LP5523_CP_AUTO | LP5523_AUTO_CLK |
LP5523_PWM_PWR_SAVE);
+ if (ret)
+ return ret;
/* turn on all leds */
- ret |= lp5523_write(client, LP5523_REG_ENABLE_LEDS_MSB, 0x01);
- ret |= lp5523_write(client, LP5523_REG_ENABLE_LEDS_LSB, 0xff);
-
- /* hardcode 32 bytes of memory for each engine from program memory */
- ret |= lp5523_write(client, LP5523_REG_CH1_PROG_START, 0x00);
- ret |= lp5523_write(client, LP5523_REG_CH2_PROG_START, 0x10);
- ret |= lp5523_write(client, LP5523_REG_CH3_PROG_START, 0x20);
-
- /* write led mux address space for each channel */
- ret |= lp5523_load_program(&chip->engines[0], pattern[0]);
- ret |= lp5523_load_program(&chip->engines[1], pattern[1]);
- ret |= lp5523_load_program(&chip->engines[2], pattern[2]);
-
- if (ret) {
- dev_err(&client->dev, "could not load mux programs\n");
- return -1;
- }
-
- /* set all engines exec state and mode to run 00101010 */
- ret |= lp5523_write(client, LP5523_REG_ENABLE,
- (LP5523_CMD_RUN | LP5523_ENABLE));
-
- ret |= lp5523_write(client, LP5523_REG_OP_MODE, LP5523_CMD_RUN);
-
- if (ret) {
- dev_err(&client->dev, "could not start mux programs\n");
- return -1;
- }
-
- /* Let the programs run for couple of ms and check the engine status */
- usleep_range(3000, 6000);
- ret = lp5523_read(client, LP5523_REG_STATUS, &status);
- if (ret < 0)
+ ret = lp55xx_write(chip, LP5523_REG_ENABLE_LEDS_MSB, 0x01);
+ if (ret)
return ret;
- status &= LP5523_ENG_STATUS_MASK;
-
- if (status == LP5523_ENG_STATUS_MASK) {
- dev_dbg(&client->dev, "all engines configured\n");
- } else {
- dev_info(&client->dev, "status == %x\n", status);
- dev_err(&client->dev, "cound not configure LED engine\n");
- return -1;
- }
-
- dev_info(&client->dev, "disabling engines\n");
-
- ret |= lp5523_write(client, LP5523_REG_OP_MODE, LP5523_CMD_DISABLED);
-
- return ret;
+ return lp55xx_write(chip, LP5523_REG_ENABLE_LEDS_LSB, 0xff);
}
-static int lp5523_set_engine_mode(struct lp5523_engine *engine, u8 mode)
+static void lp5523_load_engine(struct lp55xx_chip *chip)
{
- struct lp5523_chip *chip = engine_to_lp5523(engine);
- struct i2c_client *client = chip->client;
- int ret;
- u8 engine_state;
+ enum lp55xx_engine_index idx = chip->engine_idx;
+ u8 mask[] = {
+ [LP55XX_ENGINE_1] = LP5523_MODE_ENG1_M,
+ [LP55XX_ENGINE_2] = LP5523_MODE_ENG2_M,
+ [LP55XX_ENGINE_3] = LP5523_MODE_ENG3_M,
+ };
- ret = lp5523_read(client, LP5523_REG_OP_MODE, &engine_state);
- if (ret)
- goto fail;
+ u8 val[] = {
+ [LP55XX_ENGINE_1] = LP5523_LOAD_ENG1,
+ [LP55XX_ENGINE_2] = LP5523_LOAD_ENG2,
+ [LP55XX_ENGINE_3] = LP5523_LOAD_ENG3,
+ };
- engine_state &= ~(engine->engine_mask);
+ u8 page_sel[] = {
+ [LP55XX_ENGINE_1] = LP5523_PAGE_ENG1,
+ [LP55XX_ENGINE_2] = LP5523_PAGE_ENG2,
+ [LP55XX_ENGINE_3] = LP5523_PAGE_ENG3,
+ };
- /* set mode only for this engine */
- mode &= engine->engine_mask;
+ lp55xx_update_bits(chip, LP5523_REG_OP_MODE, mask[idx], val[idx]);
- engine_state |= mode;
+ lp5523_wait_opmode_done();
- ret |= lp5523_write(client, LP5523_REG_OP_MODE, engine_state);
-fail:
- return ret;
+ lp55xx_write(chip, LP5523_REG_PROG_PAGE_SEL, page_sel[idx]);
}
-static int lp5523_load_mux(struct lp5523_engine *engine, u16 mux)
+static void lp5523_stop_engine(struct lp55xx_chip *chip)
{
- struct lp5523_chip *chip = engine_to_lp5523(engine);
- struct i2c_client *client = chip->client;
- int ret = 0;
-
- ret |= lp5523_set_engine_mode(engine, LP5523_CMD_LOAD);
+ lp55xx_write(chip, LP5523_REG_OP_MODE, 0);
+ lp5523_wait_opmode_done();
+}
- ret |= lp5523_write(client, LP5523_REG_PROG_PAGE_SEL, engine->mux_page);
- ret |= lp5523_write(client, LP5523_REG_PROG_MEM,
- (u8)(mux >> 8));
- ret |= lp5523_write(client, LP5523_REG_PROG_MEM + 1, (u8)(mux));
- engine->led_mux = mux;
+static void lp5523_turn_off_channels(struct lp55xx_chip *chip)
+{
+ int i;
- return ret;
+ for (i = 0; i < LP5523_MAX_LEDS; i++)
+ lp55xx_write(chip, LP5523_REG_LED_PWM_BASE + i, 0);
}
-static int lp5523_load_program(struct lp5523_engine *engine, const u8 *pattern)
+static void lp5523_run_engine(struct lp55xx_chip *chip, bool start)
{
- struct lp5523_chip *chip = engine_to_lp5523(engine);
- struct i2c_client *client = chip->client;
+ int ret;
+ u8 mode;
+ u8 exec;
- int ret = 0;
+ /* stop engine */
+ if (!start) {
+ lp5523_stop_engine(chip);
+ lp5523_turn_off_channels(chip);
+ return;
+ }
- ret |= lp5523_set_engine_mode(engine, LP5523_CMD_LOAD);
+ /*
+ * To run the engine,
+ * operation mode and enable register should updated at the same time
+ */
- ret |= lp5523_write(client, LP5523_REG_PROG_PAGE_SEL,
- engine->prog_page);
- ret |= i2c_smbus_write_i2c_block_data(client, LP5523_REG_PROG_MEM,
- LP5523_PROGRAM_LENGTH, pattern);
+ ret = lp55xx_read(chip, LP5523_REG_OP_MODE, &mode);
+ if (ret)
+ return;
- return ret;
-}
+ ret = lp55xx_read(chip, LP5523_REG_ENABLE, &exec);
+ if (ret)
+ return;
-static int lp5523_run_program(struct lp5523_engine *engine)
-{
- struct lp5523_chip *chip = engine_to_lp5523(engine);
- struct i2c_client *client = chip->client;
- int ret;
+ /* change operation mode to RUN only when each engine is loading */
+ if (LP5523_ENG1_IS_LOADING(mode)) {
+ mode = (mode & ~LP5523_MODE_ENG1_M) | LP5523_RUN_ENG1;
+ exec = (exec & ~LP5523_EXEC_ENG1_M) | LP5523_RUN_ENG1;
+ }
- ret = lp5523_write(client, LP5523_REG_ENABLE,
- LP5523_CMD_RUN | LP5523_ENABLE);
- if (ret)
- goto fail;
+ if (LP5523_ENG2_IS_LOADING(mode)) {
+ mode = (mode & ~LP5523_MODE_ENG2_M) | LP5523_RUN_ENG2;
+ exec = (exec & ~LP5523_EXEC_ENG2_M) | LP5523_RUN_ENG2;
+ }
- ret = lp5523_set_engine_mode(engine, LP5523_CMD_RUN);
-fail:
- return ret;
+ if (LP5523_ENG3_IS_LOADING(mode)) {
+ mode = (mode & ~LP5523_MODE_ENG3_M) | LP5523_RUN_ENG3;
+ exec = (exec & ~LP5523_EXEC_ENG3_M) | LP5523_RUN_ENG3;
+ }
+
+ lp55xx_write(chip, LP5523_REG_OP_MODE, mode);
+ lp5523_wait_opmode_done();
+
+ lp55xx_update_bits(chip, LP5523_REG_ENABLE, LP5523_EXEC_M, exec);
}
-static int lp5523_mux_parse(const char *buf, u16 *mux, size_t len)
+static int lp5523_update_program_memory(struct lp55xx_chip *chip,
+ const u8 *data, size_t size)
{
+ u8 pattern[LP5523_PROGRAM_LENGTH] = {0};
+ unsigned cmd;
+ char c[3];
+ int update_size;
+ int nrchars;
+ int offset = 0;
+ int ret;
int i;
- u16 tmp_mux = 0;
-
- len = min_t(int, len, LP5523_LEDS);
- for (i = 0; i < len; i++) {
- switch (buf[i]) {
- case '1':
- tmp_mux |= (1 << i);
- break;
- case '0':
- break;
- case '\n':
- i = len;
- break;
- default:
- return -1;
- }
- }
- *mux = tmp_mux;
- return 0;
-}
+ /* clear program memory before updating */
+ for (i = 0; i < LP5523_PROGRAM_LENGTH; i++)
+ lp55xx_write(chip, LP5523_REG_PROG_MEM + i, 0);
-static void lp5523_mux_to_array(u16 led_mux, char *array)
-{
- int i, pos = 0;
- for (i = 0; i < LP5523_LEDS; i++)
- pos += sprintf(array + pos, "%x", LED_ACTIVE(led_mux, i));
+ i = 0;
+ while ((offset < size - 1) && (i < LP5523_PROGRAM_LENGTH)) {
+ /* separate sscanfs because length is working only for %s */
+ ret = sscanf(data + offset, "%2s%n ", c, &nrchars);
+ if (ret != 1)
+ goto err;
- array[pos] = '\0';
-}
+ ret = sscanf(c, "%2x", &cmd);
+ if (ret != 1)
+ goto err;
-/*--------------------------------------------------------------*/
-/* Sysfs interface */
-/*--------------------------------------------------------------*/
+ pattern[i] = (u8)cmd;
+ offset += nrchars;
+ i++;
+ }
-static ssize_t show_engine_leds(struct device *dev,
- struct device_attribute *attr,
- char *buf, int nr)
-{
- struct i2c_client *client = to_i2c_client(dev);
- struct lp5523_chip *chip = i2c_get_clientdata(client);
- char mux[LP5523_LEDS + 1];
+ /* Each instruction is 16bit long. Check that length is even */
+ if (i % 2)
+ goto err;
- lp5523_mux_to_array(chip->engines[nr - 1].led_mux, mux);
+ update_size = i;
+ for (i = 0; i < update_size; i++)
+ lp55xx_write(chip, LP5523_REG_PROG_MEM + i, pattern[i]);
- return sprintf(buf, "%s\n", mux);
-}
+ return 0;
-#define show_leds(nr) \
-static ssize_t show_engine##nr##_leds(struct device *dev, \
- struct device_attribute *attr, \
- char *buf) \
-{ \
- return show_engine_leds(dev, attr, buf, nr); \
+err:
+ dev_err(&chip->cl->dev, "wrong pattern format\n");
+ return -EINVAL;
}
-show_leds(1)
-show_leds(2)
-show_leds(3)
-static ssize_t store_engine_leds(struct device *dev,
- struct device_attribute *attr,
- const char *buf, size_t len, int nr)
+static void lp5523_firmware_loaded(struct lp55xx_chip *chip)
{
- struct i2c_client *client = to_i2c_client(dev);
- struct lp5523_chip *chip = i2c_get_clientdata(client);
- u16 mux = 0;
- ssize_t ret;
+ const struct firmware *fw = chip->fw;
- if (lp5523_mux_parse(buf, &mux, len))
- return -EINVAL;
-
- mutex_lock(&chip->lock);
- ret = -EINVAL;
- if (chip->engines[nr - 1].mode != LP5523_CMD_LOAD)
- goto leave;
+ if (fw->size > LP5523_PROGRAM_LENGTH) {
+ dev_err(&chip->cl->dev, "firmware data size overflow: %zu\n",
+ fw->size);
+ return;
+ }
- if (lp5523_load_mux(&chip->engines[nr - 1], mux))
- goto leave;
+ /*
+ * Program momery sequence
+ * 1) set engine mode to "LOAD"
+ * 2) write firmware data into program memory
+ */
- ret = len;
-leave:
- mutex_unlock(&chip->lock);
- return ret;
+ lp5523_load_engine(chip);
+ lp5523_update_program_memory(chip, fw->data, fw->size);
}
-#define store_leds(nr) \
-static ssize_t store_engine##nr##_leds(struct device *dev, \
- struct device_attribute *attr, \
- const char *buf, size_t len) \
-{ \
- return store_engine_leds(dev, attr, buf, len, nr); \
-}
-store_leds(1)
-store_leds(2)
-store_leds(3)
-
static ssize_t lp5523_selftest(struct device *dev,
struct device_attribute *attr,
char *buf)
{
- struct i2c_client *client = to_i2c_client(dev);
- struct lp5523_chip *chip = i2c_get_clientdata(client);
+ struct lp55xx_led *led = i2c_get_clientdata(to_i2c_client(dev));
+ struct lp55xx_chip *chip = led->chip;
+ struct lp55xx_platform_data *pdata = chip->pdata;
int i, ret, pos = 0;
- int led = 0;
u8 status, adc, vdd;
mutex_lock(&chip->lock);
- ret = lp5523_read(chip->client, LP5523_REG_STATUS, &status);
+ ret = lp55xx_read(chip, LP5523_REG_STATUS, &status);
if (ret < 0)
goto fail;
/* Check that ext clock is really in use if requested */
- if ((chip->pdata) && (chip->pdata->clock_mode == LP5523_CLOCK_EXT))
+ if (pdata->clock_mode == LP55XX_CLOCK_EXT) {
if ((status & LP5523_EXT_CLK_USED) == 0)
goto fail;
+ }
/* Measure VDD (i.e. VBAT) first (channel 16 corresponds to VDD) */
- lp5523_write(chip->client, LP5523_REG_LED_TEST_CTRL,
- LP5523_EN_LEDTEST | 16);
+ lp55xx_write(chip, LP5523_REG_LED_TEST_CTRL, LP5523_EN_LEDTEST | 16);
usleep_range(3000, 6000); /* ADC conversion time is typically 2.7 ms */
- ret = lp5523_read(chip->client, LP5523_REG_STATUS, &status);
+ ret = lp55xx_read(chip, LP5523_REG_STATUS, &status);
if (ret < 0)
goto fail;
if (!(status & LP5523_LEDTEST_DONE))
usleep_range(3000, 6000); /* Was not ready. Wait little bit */
- ret = lp5523_read(chip->client, LP5523_REG_LED_TEST_ADC, &vdd);
+ ret = lp55xx_read(chip, LP5523_REG_LED_TEST_ADC, &vdd);
if (ret < 0)
goto fail;
vdd--; /* There may be some fluctuation in measurement */
- for (i = 0; i < LP5523_LEDS; i++) {
+ for (i = 0; i < LP5523_MAX_LEDS; i++) {
/* Skip non-existing channels */
- if (chip->pdata->led_config[i].led_current == 0)
+ if (pdata->led_config[i].led_current == 0)
continue;
/* Set default current */
- lp5523_write(chip->client,
- LP5523_REG_LED_CURRENT_BASE + i,
- chip->pdata->led_config[i].led_current);
+ lp55xx_write(chip, LP5523_REG_LED_CURRENT_BASE + i,
+ pdata->led_config[i].led_current);
- lp5523_write(chip->client, LP5523_REG_LED_PWM_BASE + i, 0xff);
+ lp55xx_write(chip, LP5523_REG_LED_PWM_BASE + i, 0xff);
/* let current stabilize 2 - 4ms before measurements start */
usleep_range(2000, 4000);
- lp5523_write(chip->client,
- LP5523_REG_LED_TEST_CTRL,
+ lp55xx_write(chip, LP5523_REG_LED_TEST_CTRL,
LP5523_EN_LEDTEST | i);
/* ADC conversion time is 2.7 ms typically */
usleep_range(3000, 6000);
- ret = lp5523_read(chip->client, LP5523_REG_STATUS, &status);
+ ret = lp55xx_read(chip, LP5523_REG_STATUS, &status);
if (ret < 0)
goto fail;
if (!(status & LP5523_LEDTEST_DONE))
usleep_range(3000, 6000);/* Was not ready. Wait. */
- ret = lp5523_read(chip->client, LP5523_REG_LED_TEST_ADC, &adc);
+
+ ret = lp55xx_read(chip, LP5523_REG_LED_TEST_ADC, &adc);
if (ret < 0)
goto fail;
if (adc >= vdd || adc < LP5523_ADC_SHORTCIRC_LIM)
pos += sprintf(buf + pos, "LED %d FAIL\n", i);
- lp5523_write(chip->client, LP5523_REG_LED_PWM_BASE + i, 0x00);
+ lp55xx_write(chip, LP5523_REG_LED_PWM_BASE + i, 0x00);
/* Restore current */
- lp5523_write(chip->client,
- LP5523_REG_LED_CURRENT_BASE + i,
- chip->leds[led].led_current);
+ lp55xx_write(chip, LP5523_REG_LED_CURRENT_BASE + i,
+ led->led_current);
led++;
}
if (pos == 0)
@@ -530,249 +381,22 @@ release_lock:
return pos;
}
-static void lp5523_set_brightness(struct led_classdev *cdev,
- enum led_brightness brightness)
-{
- struct lp5523_led *led = cdev_to_led(cdev);
-
- led->brightness = (u8)brightness;
-
- schedule_work(&led->brightness_work);
-}
-
static void lp5523_led_brightness_work(struct work_struct *work)
{
- struct lp5523_led *led = container_of(work,
- struct lp5523_led,
+ struct lp55xx_led *led = container_of(work, struct lp55xx_led,
brightness_work);
- struct lp5523_chip *chip = led_to_lp5523(led);
- struct i2c_client *client = chip->client;
+ struct lp55xx_chip *chip = led->chip;
mutex_lock(&chip->lock);
-
- lp5523_write(client, LP5523_REG_LED_PWM_BASE + led->chan_nr,
+ lp55xx_write(chip, LP5523_REG_LED_PWM_BASE + led->chan_nr,
led->brightness);
-
- mutex_unlock(&chip->lock);
-}
-
-static int lp5523_do_store_load(struct lp5523_engine *engine,
- const char *buf, size_t len)
-{
- struct lp5523_chip *chip = engine_to_lp5523(engine);
- struct i2c_client *client = chip->client;
- int ret, nrchars, offset = 0, i = 0;
- char c[3];
- unsigned cmd;
- u8 pattern[LP5523_PROGRAM_LENGTH] = {0};
-
- if (engine->mode != LP5523_CMD_LOAD)
- return -EINVAL;
-
- while ((offset < len - 1) && (i < LP5523_PROGRAM_LENGTH)) {
- /* separate sscanfs because length is working only for %s */
- ret = sscanf(buf + offset, "%2s%n ", c, &nrchars);
- ret = sscanf(c, "%2x", &cmd);
- if (ret != 1)
- goto fail;
- pattern[i] = (u8)cmd;
-
- offset += nrchars;
- i++;
- }
-
- /* Each instruction is 16bit long. Check that length is even */
- if (i % 2)
- goto fail;
-
- mutex_lock(&chip->lock);
- ret = lp5523_load_program(engine, pattern);
mutex_unlock(&chip->lock);
-
- if (ret) {
- dev_err(&client->dev, "failed loading pattern\n");
- return ret;
- }
-
- return len;
-fail:
- dev_err(&client->dev, "wrong pattern format\n");
- return -EINVAL;
-}
-
-static ssize_t store_engine_load(struct device *dev,
- struct device_attribute *attr,
- const char *buf, size_t len, int nr)
-{
- struct i2c_client *client = to_i2c_client(dev);
- struct lp5523_chip *chip = i2c_get_clientdata(client);
- return lp5523_do_store_load(&chip->engines[nr - 1], buf, len);
}
-#define store_load(nr) \
-static ssize_t store_engine##nr##_load(struct device *dev, \
- struct device_attribute *attr, \
- const char *buf, size_t len) \
-{ \
- return store_engine_load(dev, attr, buf, len, nr); \
-}
-store_load(1)
-store_load(2)
-store_load(3)
-
-static ssize_t show_engine_mode(struct device *dev,
- struct device_attribute *attr,
- char *buf, int nr)
-{
- struct i2c_client *client = to_i2c_client(dev);
- struct lp5523_chip *chip = i2c_get_clientdata(client);
- switch (chip->engines[nr - 1].mode) {
- case LP5523_CMD_RUN:
- return sprintf(buf, "run\n");
- case LP5523_CMD_LOAD:
- return sprintf(buf, "load\n");
- case LP5523_CMD_DISABLED:
- return sprintf(buf, "disabled\n");
- default:
- return sprintf(buf, "disabled\n");
- }
-}
-
-#define show_mode(nr) \
-static ssize_t show_engine##nr##_mode(struct device *dev, \
- struct device_attribute *attr, \
- char *buf) \
-{ \
- return show_engine_mode(dev, attr, buf, nr); \
-}
-show_mode(1)
-show_mode(2)
-show_mode(3)
-
-static ssize_t store_engine_mode(struct device *dev,
- struct device_attribute *attr,
- const char *buf, size_t len, int nr)
-{
- struct i2c_client *client = to_i2c_client(dev);
- struct lp5523_chip *chip = i2c_get_clientdata(client);
- struct lp5523_engine *engine = &chip->engines[nr - 1];
- mutex_lock(&chip->lock);
-
- if (!strncmp(buf, "run", 3))
- lp5523_set_mode(engine, LP5523_CMD_RUN);
- else if (!strncmp(buf, "load", 4))
- lp5523_set_mode(engine, LP5523_CMD_LOAD);
- else if (!strncmp(buf, "disabled", 8))
- lp5523_set_mode(engine, LP5523_CMD_DISABLED);
-
- mutex_unlock(&chip->lock);
- return len;
-}
-
-#define store_mode(nr) \
-static ssize_t store_engine##nr##_mode(struct device *dev, \
- struct device_attribute *attr, \
- const char *buf, size_t len) \
-{ \
- return store_engine_mode(dev, attr, buf, len, nr); \
-}
-store_mode(1)
-store_mode(2)
-store_mode(3)
-
-static ssize_t show_max_current(struct device *dev,
- struct device_attribute *attr,
- char *buf)
-{
- struct led_classdev *led_cdev = dev_get_drvdata(dev);
- struct lp5523_led *led = cdev_to_led(led_cdev);
-
- return sprintf(buf, "%d\n", led->max_current);
-}
-
-static ssize_t show_current(struct device *dev,
- struct device_attribute *attr,
- char *buf)
-{
- struct led_classdev *led_cdev = dev_get_drvdata(dev);
- struct lp5523_led *led = cdev_to_led(led_cdev);
-
- return sprintf(buf, "%d\n", led->led_current);
-}
-
-static ssize_t store_current(struct device *dev,
- struct device_attribute *attr,
- const char *buf, size_t len)
-{
- struct led_classdev *led_cdev = dev_get_drvdata(dev);
- struct lp5523_led *led = cdev_to_led(led_cdev);
- struct lp5523_chip *chip = led_to_lp5523(led);
- ssize_t ret;
- unsigned long curr;
-
- if (kstrtoul(buf, 0, &curr))
- return -EINVAL;
-
- if (curr > led->max_current)
- return -EINVAL;
-
- mutex_lock(&chip->lock);
- ret = lp5523_write(chip->client,
- LP5523_REG_LED_CURRENT_BASE + led->chan_nr,
- (u8)curr);
- mutex_unlock(&chip->lock);
-
- if (ret < 0)
- return ret;
-
- led->led_current = (u8)curr;
-
- return len;
-}
-
-/* led class device attributes */
-static DEVICE_ATTR(led_current, S_IRUGO | S_IWUSR, show_current, store_current);
-static DEVICE_ATTR(max_current, S_IRUGO , show_max_current, NULL);
-
-static struct attribute *lp5523_led_attributes[] = {
- &dev_attr_led_current.attr,
- &dev_attr_max_current.attr,
- NULL,
-};
-
-static struct attribute_group lp5523_led_attribute_group = {
- .attrs = lp5523_led_attributes
-};
-
-/* device attributes */
-static DEVICE_ATTR(engine1_mode, S_IRUGO | S_IWUSR,
- show_engine1_mode, store_engine1_mode);
-static DEVICE_ATTR(engine2_mode, S_IRUGO | S_IWUSR,
- show_engine2_mode, store_engine2_mode);
-static DEVICE_ATTR(engine3_mode, S_IRUGO | S_IWUSR,
- show_engine3_mode, store_engine3_mode);
-static DEVICE_ATTR(engine1_leds, S_IRUGO | S_IWUSR,
- show_engine1_leds, store_engine1_leds);
-static DEVICE_ATTR(engine2_leds, S_IRUGO | S_IWUSR,
- show_engine2_leds, store_engine2_leds);
-static DEVICE_ATTR(engine3_leds, S_IRUGO | S_IWUSR,
- show_engine3_leds, store_engine3_leds);
-static DEVICE_ATTR(engine1_load, S_IWUSR, NULL, store_engine1_load);
-static DEVICE_ATTR(engine2_load, S_IWUSR, NULL, store_engine2_load);
-static DEVICE_ATTR(engine3_load, S_IWUSR, NULL, store_engine3_load);
static DEVICE_ATTR(selftest, S_IRUGO, lp5523_selftest, NULL);
static struct attribute *lp5523_attributes[] = {
- &dev_attr_engine1_mode.attr,
- &dev_attr_engine2_mode.attr,
- &dev_attr_engine3_mode.attr,
&dev_attr_selftest.attr,
- &dev_attr_engine1_load.attr,
- &dev_attr_engine1_leds.attr,
- &dev_attr_engine2_load.attr,
- &dev_attr_engine2_leds.attr,
- &dev_attr_engine3_load.attr,
- &dev_attr_engine3_leds.attr,
NULL,
};
@@ -780,252 +404,91 @@ static const struct attribute_group lp5523_group = {
.attrs = lp5523_attributes,
};
-static int lp5523_register_sysfs(struct i2c_client *client)
-{
- struct device *dev = &client->dev;
- int ret;
-
- ret = sysfs_create_group(&dev->kobj, &lp5523_group);
- if (ret < 0)
- return ret;
-
- return 0;
-}
-
-static void lp5523_unregister_sysfs(struct i2c_client *client)
-{
- struct lp5523_chip *chip = i2c_get_clientdata(client);
- struct device *dev = &client->dev;
- int i;
-
- sysfs_remove_group(&dev->kobj, &lp5523_group);
-
- for (i = 0; i < chip->num_leds; i++)
- sysfs_remove_group(&chip->leds[i].cdev.dev->kobj,
- &lp5523_led_attribute_group);
-}
-
-/*--------------------------------------------------------------*/
-/* Set chip operating mode */
-/*--------------------------------------------------------------*/
-static void lp5523_set_mode(struct lp5523_engine *engine, u8 mode)
-{
- /* if in that mode already do nothing, except for run */
- if (mode == engine->mode && mode != LP5523_CMD_RUN)
- return;
-
- switch (mode) {
- case LP5523_CMD_RUN:
- lp5523_run_program(engine);
- break;
- case LP5523_CMD_LOAD:
- lp5523_set_engine_mode(engine, LP5523_CMD_DISABLED);
- lp5523_set_engine_mode(engine, LP5523_CMD_LOAD);
- break;
- case LP5523_CMD_DISABLED:
- lp5523_set_engine_mode(engine, LP5523_CMD_DISABLED);
- break;
- default:
- return;
- }
-
- engine->mode = mode;
-}
-
-/*--------------------------------------------------------------*/
-/* Probe, Attach, Remove */
-/*--------------------------------------------------------------*/
-static int __init lp5523_init_engine(struct lp5523_engine *engine, int id)
-{
- if (id < 1 || id > LP5523_ENGINES)
- return -1;
- engine->id = id;
- engine->engine_mask = LP5523_ENG_MASK_BASE >> SHIFT_MASK(id);
- engine->prog_page = id - 1;
- engine->mux_page = id + 2;
-
- return 0;
-}
+/* Chip specific configurations */
+static struct lp55xx_device_config lp5523_cfg = {
+ .reset = {
+ .addr = LP5523_REG_RESET,
+ .val = LP5523_RESET,
+ },
+ .enable = {
+ .addr = LP5523_REG_ENABLE,
+ .val = LP5523_ENABLE,
+ },
+ .max_channel = LP5523_MAX_LEDS,
+ .post_init_device = lp5523_post_init_device,
+ .brightness_work_fn = lp5523_led_brightness_work,
+ .set_led_current = lp5523_set_led_current,
+ .firmware_cb = lp5523_firmware_loaded,
+ .run_engine = lp5523_run_engine,
+ .dev_attr_group = &lp5523_group,
+};
-static int lp5523_init_led(struct lp5523_led *led, struct device *dev,
- int chan, struct lp5523_platform_data *pdata,
- const char *chip_name)
+static int lp5523_probe(struct i2c_client *client,
+ const struct i2c_device_id *id)
{
- char name[32];
- int res;
+ int ret;
+ struct lp55xx_chip *chip;
+ struct lp55xx_led *led;
+ struct lp55xx_platform_data *pdata = client->dev.platform_data;
- if (chan >= LP5523_LEDS)
+ if (!pdata) {
+ dev_err(&client->dev, "no platform data\n");
return -EINVAL;
-
- if (pdata->led_config[chan].led_current) {
- led->led_current = pdata->led_config[chan].led_current;
- led->max_current = pdata->led_config[chan].max_current;
- led->chan_nr = pdata->led_config[chan].chan_nr;
-
- if (led->chan_nr >= LP5523_LEDS) {
- dev_err(dev, "Use channel numbers between 0 and %d\n",
- LP5523_LEDS - 1);
- return -EINVAL;
- }
-
- if (pdata->led_config[chan].name) {
- led->cdev.name = pdata->led_config[chan].name;
- } else {
- snprintf(name, sizeof(name), "%s:channel%d",
- pdata->label ? : chip_name, chan);
- led->cdev.name = name;
- }
-
- led->cdev.brightness_set = lp5523_set_brightness;
- res = led_classdev_register(dev, &led->cdev);
- if (res < 0) {
- dev_err(dev, "couldn't register led on channel %d\n",
- chan);
- return res;
- }
- res = sysfs_create_group(&led->cdev.dev->kobj,
- &lp5523_led_attribute_group);
- if (res < 0) {
- dev_err(dev, "couldn't register current attribute\n");
- led_classdev_unregister(&led->cdev);
- return res;
- }
- } else {
- led->led_current = 0;
}
- return 0;
-}
-
-static int lp5523_probe(struct i2c_client *client,
- const struct i2c_device_id *id)
-{
- struct lp5523_chip *chip;
- struct lp5523_platform_data *pdata;
- int ret, i, led;
chip = devm_kzalloc(&client->dev, sizeof(*chip), GFP_KERNEL);
if (!chip)
return -ENOMEM;
- i2c_set_clientdata(client, chip);
- chip->client = client;
+ led = devm_kzalloc(&client->dev,
+ sizeof(*led) * pdata->num_channels, GFP_KERNEL);
+ if (!led)
+ return -ENOMEM;
- pdata = client->dev.platform_data;
-
- if (!pdata) {
- dev_err(&client->dev, "no platform data\n");
- return -EINVAL;
- }
+ chip->cl = client;
+ chip->pdata = pdata;
+ chip->cfg = &lp5523_cfg;
mutex_init(&chip->lock);
- chip->pdata = pdata;
+ i2c_set_clientdata(client, led);
- if (pdata->setup_resources) {
- ret = pdata->setup_resources();
- if (ret < 0)
- return ret;
- }
-
- if (pdata->enable) {
- pdata->enable(0);
- usleep_range(1000, 2000); /* Keep enable down at least 1ms */
- pdata->enable(1);
- usleep_range(1000, 2000); /* 500us abs min. */
- }
-
- lp5523_write(client, LP5523_REG_RESET, 0xff);
- usleep_range(10000, 20000); /*
- * Exact value is not available. 10 - 20ms
- * appears to be enough for reset.
- */
- ret = lp5523_detect(client);
+ ret = lp55xx_init_device(chip);
if (ret)
- goto fail1;
+ goto err_init;
dev_info(&client->dev, "%s Programmable led chip found\n", id->name);
- /* Initialize engines */
- for (i = 0; i < ARRAY_SIZE(chip->engines); i++) {
- ret = lp5523_init_engine(&chip->engines[i], i + 1);
- if (ret) {
- dev_err(&client->dev, "error initializing engine\n");
- goto fail1;
- }
- }
- ret = lp5523_configure(client);
- if (ret < 0) {
- dev_err(&client->dev, "error configuring chip\n");
- goto fail1;
- }
-
- /* Initialize leds */
- chip->num_channels = pdata->num_channels;
- chip->num_leds = 0;
- led = 0;
- for (i = 0; i < pdata->num_channels; i++) {
- /* Do not initialize channels that are not connected */
- if (pdata->led_config[i].led_current == 0)
- continue;
-
- INIT_WORK(&chip->leds[led].brightness_work,
- lp5523_led_brightness_work);
-
- ret = lp5523_init_led(&chip->leds[led], &client->dev, i, pdata,
- id->name);
- if (ret) {
- dev_err(&client->dev, "error initializing leds\n");
- goto fail2;
- }
- chip->num_leds++;
-
- chip->leds[led].id = led;
- /* Set LED current */
- lp5523_write(client,
- LP5523_REG_LED_CURRENT_BASE + chip->leds[led].chan_nr,
- chip->leds[led].led_current);
-
- led++;
- }
+ ret = lp55xx_register_leds(led, chip);
+ if (ret)
+ goto err_register_leds;
- ret = lp5523_register_sysfs(client);
+ ret = lp55xx_register_sysfs(chip);
if (ret) {
dev_err(&client->dev, "registering sysfs failed\n");
- goto fail2;
+ goto err_register_sysfs;
}
- return ret;
-fail2:
- for (i = 0; i < chip->num_leds; i++) {
- led_classdev_unregister(&chip->leds[i].cdev);
- flush_work(&chip->leds[i].brightness_work);
- }
-fail1:
- if (pdata->enable)
- pdata->enable(0);
- if (pdata->release_resources)
- pdata->release_resources();
+
+ return 0;
+
+err_register_sysfs:
+ lp55xx_unregister_leds(led, chip);
+err_register_leds:
+ lp55xx_deinit_device(chip);
+err_init:
return ret;
}
static int lp5523_remove(struct i2c_client *client)
{
- struct lp5523_chip *chip = i2c_get_clientdata(client);
- int i;
-
- /* Disable engine mode */
- lp5523_write(client, LP5523_REG_OP_MODE, LP5523_CMD_DISABLED);
+ struct lp55xx_led *led = i2c_get_clientdata(client);
+ struct lp55xx_chip *chip = led->chip;
- lp5523_unregister_sysfs(client);
-
- for (i = 0; i < chip->num_leds; i++) {
- led_classdev_unregister(&chip->leds[i].cdev);
- flush_work(&chip->leds[i].brightness_work);
- }
+ lp5523_stop_engine(chip);
+ lp55xx_unregister_sysfs(chip);
+ lp55xx_unregister_leds(led, chip);
+ lp55xx_deinit_device(chip);
- if (chip->pdata->enable)
- chip->pdata->enable(0);
- if (chip->pdata->release_resources)
- chip->pdata->release_resources();
return 0;
}
@@ -1049,5 +512,6 @@ static struct i2c_driver lp5523_driver = {
module_i2c_driver(lp5523_driver);
MODULE_AUTHOR("Mathias Nyman <mathias.nyman@nokia.com>");
+MODULE_AUTHOR("Milo Kim <milo.kim@ti.com>");
MODULE_DESCRIPTION("LP5523 LED engine");
MODULE_LICENSE("GPL");
diff --git a/drivers/leds/leds-lp55xx-common.c b/drivers/leds/leds-lp55xx-common.c
new file mode 100644
index 000000000000..d9eb84157423
--- /dev/null
+++ b/drivers/leds/leds-lp55xx-common.c
@@ -0,0 +1,523 @@
+/*
+ * LP5521/LP5523/LP55231 Common Driver
+ *
+ * Copyright 2012 Texas Instruments
+ *
+ * Author: Milo(Woogyom) Kim <milo.kim@ti.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * Derived from leds-lp5521.c, leds-lp5523.c
+ */
+
+#include <linux/delay.h>
+#include <linux/firmware.h>
+#include <linux/i2c.h>
+#include <linux/leds.h>
+#include <linux/module.h>
+#include <linux/platform_data/leds-lp55xx.h>
+
+#include "leds-lp55xx-common.h"
+
+static struct lp55xx_led *cdev_to_lp55xx_led(struct led_classdev *cdev)
+{
+ return container_of(cdev, struct lp55xx_led, cdev);
+}
+
+static struct lp55xx_led *dev_to_lp55xx_led(struct device *dev)
+{
+ return cdev_to_lp55xx_led(dev_get_drvdata(dev));
+}
+
+static void lp55xx_reset_device(struct lp55xx_chip *chip)
+{
+ struct lp55xx_device_config *cfg = chip->cfg;
+ u8 addr = cfg->reset.addr;
+ u8 val = cfg->reset.val;
+
+ /* no error checking here because no ACK from the device after reset */
+ lp55xx_write(chip, addr, val);
+}
+
+static int lp55xx_detect_device(struct lp55xx_chip *chip)
+{
+ struct lp55xx_device_config *cfg = chip->cfg;
+ u8 addr = cfg->enable.addr;
+ u8 val = cfg->enable.val;
+ int ret;
+
+ ret = lp55xx_write(chip, addr, val);
+ if (ret)
+ return ret;
+
+ usleep_range(1000, 2000);
+
+ ret = lp55xx_read(chip, addr, &val);
+ if (ret)
+ return ret;
+
+ if (val != cfg->enable.val)
+ return -ENODEV;
+
+ return 0;
+}
+
+static int lp55xx_post_init_device(struct lp55xx_chip *chip)
+{
+ struct lp55xx_device_config *cfg = chip->cfg;
+
+ if (!cfg->post_init_device)
+ return 0;
+
+ return cfg->post_init_device(chip);
+}
+
+static ssize_t lp55xx_show_current(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct lp55xx_led *led = dev_to_lp55xx_led(dev);
+
+ return sprintf(buf, "%d\n", led->led_current);
+}
+
+static ssize_t lp55xx_store_current(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t len)
+{
+ struct lp55xx_led *led = dev_to_lp55xx_led(dev);
+ struct lp55xx_chip *chip = led->chip;
+ unsigned long curr;
+
+ if (kstrtoul(buf, 0, &curr))
+ return -EINVAL;
+
+ if (curr > led->max_current)
+ return -EINVAL;
+
+ if (!chip->cfg->set_led_current)
+ return len;
+
+ mutex_lock(&chip->lock);
+ chip->cfg->set_led_current(led, (u8)curr);
+ mutex_unlock(&chip->lock);
+
+ return len;
+}
+
+static ssize_t lp55xx_show_max_current(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct lp55xx_led *led = dev_to_lp55xx_led(dev);
+
+ return sprintf(buf, "%d\n", led->max_current);
+}
+
+static DEVICE_ATTR(led_current, S_IRUGO | S_IWUSR, lp55xx_show_current,
+ lp55xx_store_current);
+static DEVICE_ATTR(max_current, S_IRUGO , lp55xx_show_max_current, NULL);
+
+static struct attribute *lp55xx_led_attributes[] = {
+ &dev_attr_led_current.attr,
+ &dev_attr_max_current.attr,
+ NULL,
+};
+
+static struct attribute_group lp55xx_led_attr_group = {
+ .attrs = lp55xx_led_attributes
+};
+
+static void lp55xx_set_brightness(struct led_classdev *cdev,
+ enum led_brightness brightness)
+{
+ struct lp55xx_led *led = cdev_to_lp55xx_led(cdev);
+
+ led->brightness = (u8)brightness;
+ schedule_work(&led->brightness_work);
+}
+
+static int lp55xx_init_led(struct lp55xx_led *led,
+ struct lp55xx_chip *chip, int chan)
+{
+ struct lp55xx_platform_data *pdata = chip->pdata;
+ struct lp55xx_device_config *cfg = chip->cfg;
+ struct device *dev = &chip->cl->dev;
+ char name[32];
+ int ret;
+ int max_channel = cfg->max_channel;
+
+ if (chan >= max_channel) {
+ dev_err(dev, "invalid channel: %d / %d\n", chan, max_channel);
+ return -EINVAL;
+ }
+
+ if (pdata->led_config[chan].led_current == 0)
+ return 0;
+
+ led->led_current = pdata->led_config[chan].led_current;
+ led->max_current = pdata->led_config[chan].max_current;
+ led->chan_nr = pdata->led_config[chan].chan_nr;
+
+ if (led->chan_nr >= max_channel) {
+ dev_err(dev, "Use channel numbers between 0 and %d\n",
+ max_channel - 1);
+ return -EINVAL;
+ }
+
+ led->cdev.brightness_set = lp55xx_set_brightness;
+
+ if (pdata->led_config[chan].name) {
+ led->cdev.name = pdata->led_config[chan].name;
+ } else {
+ snprintf(name, sizeof(name), "%s:channel%d",
+ pdata->label ? : chip->cl->name, chan);
+ led->cdev.name = name;
+ }
+
+ /*
+ * register led class device for each channel and
+ * add device attributes
+ */
+
+ ret = led_classdev_register(dev, &led->cdev);
+ if (ret) {
+ dev_err(dev, "led register err: %d\n", ret);
+ return ret;
+ }
+
+ ret = sysfs_create_group(&led->cdev.dev->kobj, &lp55xx_led_attr_group);
+ if (ret) {
+ dev_err(dev, "led sysfs err: %d\n", ret);
+ led_classdev_unregister(&led->cdev);
+ return ret;
+ }
+
+ return 0;
+}
+
+static void lp55xx_firmware_loaded(const struct firmware *fw, void *context)
+{
+ struct lp55xx_chip *chip = context;
+ struct device *dev = &chip->cl->dev;
+
+ if (!fw) {
+ dev_err(dev, "firmware request failed\n");
+ goto out;
+ }
+
+ /* handling firmware data is chip dependent */
+ mutex_lock(&chip->lock);
+
+ chip->fw = fw;
+ if (chip->cfg->firmware_cb)
+ chip->cfg->firmware_cb(chip);
+
+ mutex_unlock(&chip->lock);
+
+out:
+ /* firmware should be released for other channel use */
+ release_firmware(chip->fw);
+}
+
+static int lp55xx_request_firmware(struct lp55xx_chip *chip)
+{
+ const char *name = chip->cl->name;
+ struct device *dev = &chip->cl->dev;
+
+ return request_firmware_nowait(THIS_MODULE, true, name, dev,
+ GFP_KERNEL, chip, lp55xx_firmware_loaded);
+}
+
+static ssize_t lp55xx_show_engine_select(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct lp55xx_led *led = i2c_get_clientdata(to_i2c_client(dev));
+ struct lp55xx_chip *chip = led->chip;
+
+ return sprintf(buf, "%d\n", chip->engine_idx);
+}
+
+static ssize_t lp55xx_store_engine_select(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t len)
+{
+ struct lp55xx_led *led = i2c_get_clientdata(to_i2c_client(dev));
+ struct lp55xx_chip *chip = led->chip;
+ unsigned long val;
+ int ret;
+
+ if (kstrtoul(buf, 0, &val))
+ return -EINVAL;
+
+ /* select the engine to be run */
+
+ switch (val) {
+ case LP55XX_ENGINE_1:
+ case LP55XX_ENGINE_2:
+ case LP55XX_ENGINE_3:
+ mutex_lock(&chip->lock);
+ chip->engine_idx = val;
+ ret = lp55xx_request_firmware(chip);
+ mutex_unlock(&chip->lock);
+ break;
+ default:
+ dev_err(dev, "%lu: invalid engine index. (1, 2, 3)\n", val);
+ return -EINVAL;
+ }
+
+ if (ret) {
+ dev_err(dev, "request firmware err: %d\n", ret);
+ return ret;
+ }
+
+ return len;
+}
+
+static inline void lp55xx_run_engine(struct lp55xx_chip *chip, bool start)
+{
+ if (chip->cfg->run_engine)
+ chip->cfg->run_engine(chip, start);
+}
+
+static ssize_t lp55xx_store_engine_run(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t len)
+{
+ struct lp55xx_led *led = i2c_get_clientdata(to_i2c_client(dev));
+ struct lp55xx_chip *chip = led->chip;
+ unsigned long val;
+
+ if (kstrtoul(buf, 0, &val))
+ return -EINVAL;
+
+ /* run or stop the selected engine */
+
+ if (val <= 0) {
+ lp55xx_run_engine(chip, false);
+ return len;
+ }
+
+ mutex_lock(&chip->lock);
+ lp55xx_run_engine(chip, true);
+ mutex_unlock(&chip->lock);
+
+ return len;
+}
+
+static DEVICE_ATTR(select_engine, S_IRUGO | S_IWUSR,
+ lp55xx_show_engine_select, lp55xx_store_engine_select);
+static DEVICE_ATTR(run_engine, S_IWUSR, NULL, lp55xx_store_engine_run);
+
+static struct attribute *lp55xx_engine_attributes[] = {
+ &dev_attr_select_engine.attr,
+ &dev_attr_run_engine.attr,
+ NULL,
+};
+
+static const struct attribute_group lp55xx_engine_attr_group = {
+ .attrs = lp55xx_engine_attributes,
+};
+
+int lp55xx_write(struct lp55xx_chip *chip, u8 reg, u8 val)
+{
+ return i2c_smbus_write_byte_data(chip->cl, reg, val);
+}
+EXPORT_SYMBOL_GPL(lp55xx_write);
+
+int lp55xx_read(struct lp55xx_chip *chip, u8 reg, u8 *val)
+{
+ s32 ret;
+
+ ret = i2c_smbus_read_byte_data(chip->cl, reg);
+ if (ret < 0)
+ return ret;
+
+ *val = ret;
+ return 0;
+}
+EXPORT_SYMBOL_GPL(lp55xx_read);
+
+int lp55xx_update_bits(struct lp55xx_chip *chip, u8 reg, u8 mask, u8 val)
+{
+ int ret;
+ u8 tmp;
+
+ ret = lp55xx_read(chip, reg, &tmp);
+ if (ret)
+ return ret;
+
+ tmp &= ~mask;
+ tmp |= val & mask;
+
+ return lp55xx_write(chip, reg, tmp);
+}
+EXPORT_SYMBOL_GPL(lp55xx_update_bits);
+
+int lp55xx_init_device(struct lp55xx_chip *chip)
+{
+ struct lp55xx_platform_data *pdata;
+ struct lp55xx_device_config *cfg;
+ struct device *dev = &chip->cl->dev;
+ int ret = 0;
+
+ WARN_ON(!chip);
+
+ pdata = chip->pdata;
+ cfg = chip->cfg;
+
+ if (!pdata || !cfg)
+ return -EINVAL;
+
+ if (pdata->setup_resources) {
+ ret = pdata->setup_resources();
+ if (ret < 0) {
+ dev_err(dev, "setup resoure err: %d\n", ret);
+ goto err;
+ }
+ }
+
+ if (pdata->enable) {
+ pdata->enable(0);
+ usleep_range(1000, 2000); /* Keep enable down at least 1ms */
+ pdata->enable(1);
+ usleep_range(1000, 2000); /* 500us abs min. */
+ }
+
+ lp55xx_reset_device(chip);
+
+ /*
+ * Exact value is not available. 10 - 20ms
+ * appears to be enough for reset.
+ */
+ usleep_range(10000, 20000);
+
+ ret = lp55xx_detect_device(chip);
+ if (ret) {
+ dev_err(dev, "device detection err: %d\n", ret);
+ goto err;
+ }
+
+ /* chip specific initialization */
+ ret = lp55xx_post_init_device(chip);
+ if (ret) {
+ dev_err(dev, "post init device err: %d\n", ret);
+ goto err_post_init;
+ }
+
+ return 0;
+
+err_post_init:
+ lp55xx_deinit_device(chip);
+err:
+ return ret;
+}
+EXPORT_SYMBOL_GPL(lp55xx_init_device);
+
+void lp55xx_deinit_device(struct lp55xx_chip *chip)
+{
+ struct lp55xx_platform_data *pdata = chip->pdata;
+
+ if (pdata->enable)
+ pdata->enable(0);
+
+ if (pdata->release_resources)
+ pdata->release_resources();
+}
+EXPORT_SYMBOL_GPL(lp55xx_deinit_device);
+
+int lp55xx_register_leds(struct lp55xx_led *led, struct lp55xx_chip *chip)
+{
+ struct lp55xx_platform_data *pdata = chip->pdata;
+ struct lp55xx_device_config *cfg = chip->cfg;
+ int num_channels = pdata->num_channels;
+ struct lp55xx_led *each;
+ u8 led_current;
+ int ret;
+ int i;
+
+ if (!cfg->brightness_work_fn) {
+ dev_err(&chip->cl->dev, "empty brightness configuration\n");
+ return -EINVAL;
+ }
+
+ for (i = 0; i < num_channels; i++) {
+
+ /* do not initialize channels that are not connected */
+ if (pdata->led_config[i].led_current == 0)
+ continue;
+
+ led_current = pdata->led_config[i].led_current;
+ each = led + i;
+ ret = lp55xx_init_led(each, chip, i);
+ if (ret)
+ goto err_init_led;
+
+ INIT_WORK(&each->brightness_work, cfg->brightness_work_fn);
+
+ chip->num_leds++;
+ each->chip = chip;
+
+ /* setting led current at each channel */
+ if (cfg->set_led_current)
+ cfg->set_led_current(each, led_current);
+ }
+
+ return 0;
+
+err_init_led:
+ lp55xx_unregister_leds(led, chip);
+ return ret;
+}
+EXPORT_SYMBOL_GPL(lp55xx_register_leds);
+
+void lp55xx_unregister_leds(struct lp55xx_led *led, struct lp55xx_chip *chip)
+{
+ int i;
+ struct lp55xx_led *each;
+
+ for (i = 0; i < chip->num_leds; i++) {
+ each = led + i;
+ led_classdev_unregister(&each->cdev);
+ flush_work(&each->brightness_work);
+ }
+}
+EXPORT_SYMBOL_GPL(lp55xx_unregister_leds);
+
+int lp55xx_register_sysfs(struct lp55xx_chip *chip)
+{
+ struct device *dev = &chip->cl->dev;
+ struct lp55xx_device_config *cfg = chip->cfg;
+ int ret;
+
+ if (!cfg->run_engine || !cfg->firmware_cb)
+ goto dev_specific_attrs;
+
+ ret = sysfs_create_group(&dev->kobj, &lp55xx_engine_attr_group);
+ if (ret)
+ return ret;
+
+dev_specific_attrs:
+ return cfg->dev_attr_group ?
+ sysfs_create_group(&dev->kobj, cfg->dev_attr_group) : 0;
+}
+EXPORT_SYMBOL_GPL(lp55xx_register_sysfs);
+
+void lp55xx_unregister_sysfs(struct lp55xx_chip *chip)
+{
+ struct device *dev = &chip->cl->dev;
+ struct lp55xx_device_config *cfg = chip->cfg;
+
+ if (cfg->dev_attr_group)
+ sysfs_remove_group(&dev->kobj, cfg->dev_attr_group);
+
+ sysfs_remove_group(&dev->kobj, &lp55xx_engine_attr_group);
+}
+EXPORT_SYMBOL_GPL(lp55xx_unregister_sysfs);
+
+MODULE_AUTHOR("Milo Kim <milo.kim@ti.com>");
+MODULE_DESCRIPTION("LP55xx Common Driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/leds/leds-lp55xx-common.h b/drivers/leds/leds-lp55xx-common.h
new file mode 100644
index 000000000000..ece4761a1302
--- /dev/null
+++ b/drivers/leds/leds-lp55xx-common.h
@@ -0,0 +1,134 @@
+/*
+ * LP55XX Common Driver Header
+ *
+ * Copyright (C) 2012 Texas Instruments
+ *
+ * Author: Milo(Woogyom) Kim <milo.kim@ti.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * version 2 as published by the Free Software Foundation.
+ *
+ * Derived from leds-lp5521.c, leds-lp5523.c
+ */
+
+#ifndef _LEDS_LP55XX_COMMON_H
+#define _LEDS_LP55XX_COMMON_H
+
+enum lp55xx_engine_index {
+ LP55XX_ENGINE_INVALID,
+ LP55XX_ENGINE_1,
+ LP55XX_ENGINE_2,
+ LP55XX_ENGINE_3,
+};
+
+struct lp55xx_led;
+struct lp55xx_chip;
+
+/*
+ * struct lp55xx_reg
+ * @addr : Register address
+ * @val : Register value
+ */
+struct lp55xx_reg {
+ u8 addr;
+ u8 val;
+};
+
+/*
+ * struct lp55xx_device_config
+ * @reset : Chip specific reset command
+ * @enable : Chip specific enable command
+ * @max_channel : Maximum number of channels
+ * @post_init_device : Chip specific initialization code
+ * @brightness_work_fn : Brightness work function
+ * @set_led_current : LED current set function
+ * @firmware_cb : Call function when the firmware is loaded
+ * @run_engine : Run internal engine for pattern
+ * @dev_attr_group : Device specific attributes
+ */
+struct lp55xx_device_config {
+ const struct lp55xx_reg reset;
+ const struct lp55xx_reg enable;
+ const int max_channel;
+
+ /* define if the device has specific initialization process */
+ int (*post_init_device) (struct lp55xx_chip *chip);
+
+ /* access brightness register */
+ void (*brightness_work_fn)(struct work_struct *work);
+
+ /* current setting function */
+ void (*set_led_current) (struct lp55xx_led *led, u8 led_current);
+
+ /* access program memory when the firmware is loaded */
+ void (*firmware_cb)(struct lp55xx_chip *chip);
+
+ /* used for running firmware LED patterns */
+ void (*run_engine) (struct lp55xx_chip *chip, bool start);
+
+ /* additional device specific attributes */
+ const struct attribute_group *dev_attr_group;
+};
+
+/*
+ * struct lp55xx_chip
+ * @cl : I2C communication for access registers
+ * @pdata : Platform specific data
+ * @lock : Lock for user-space interface
+ * @num_leds : Number of registered LEDs
+ * @cfg : Device specific configuration data
+ * @engine_idx : Selected engine number
+ * @fw : Firmware data for running a LED pattern
+ */
+struct lp55xx_chip {
+ struct i2c_client *cl;
+ struct lp55xx_platform_data *pdata;
+ struct mutex lock; /* lock for user-space interface */
+ int num_leds;
+ struct lp55xx_device_config *cfg;
+ enum lp55xx_engine_index engine_idx;
+ const struct firmware *fw;
+};
+
+/*
+ * struct lp55xx_led
+ * @chan_nr : Channel number
+ * @cdev : LED class device
+ * @led_current : Current setting at each led channel
+ * @max_current : Maximun current at each led channel
+ * @brightness_work : Workqueue for brightness control
+ * @brightness : Brightness value
+ * @chip : The lp55xx chip data
+ */
+struct lp55xx_led {
+ int chan_nr;
+ struct led_classdev cdev;
+ u8 led_current;
+ u8 max_current;
+ struct work_struct brightness_work;
+ u8 brightness;
+ struct lp55xx_chip *chip;
+};
+
+/* register access */
+extern int lp55xx_write(struct lp55xx_chip *chip, u8 reg, u8 val);
+extern int lp55xx_read(struct lp55xx_chip *chip, u8 reg, u8 *val);
+extern int lp55xx_update_bits(struct lp55xx_chip *chip, u8 reg,
+ u8 mask, u8 val);
+
+/* common device init/deinit functions */
+extern int lp55xx_init_device(struct lp55xx_chip *chip);
+extern void lp55xx_deinit_device(struct lp55xx_chip *chip);
+
+/* common LED class device functions */
+extern int lp55xx_register_leds(struct lp55xx_led *led,
+ struct lp55xx_chip *chip);
+extern void lp55xx_unregister_leds(struct lp55xx_led *led,
+ struct lp55xx_chip *chip);
+
+/* common device attributes functions */
+extern int lp55xx_register_sysfs(struct lp55xx_chip *chip);
+extern void lp55xx_unregister_sysfs(struct lp55xx_chip *chip);
+
+#endif /* _LEDS_LP55XX_COMMON_H */
diff --git a/drivers/leds/leds-lp8788.c b/drivers/leds/leds-lp8788.c
index 4353942c5fd1..7c2cb384e7ae 100644
--- a/drivers/leds/leds-lp8788.c
+++ b/drivers/leds/leds-lp8788.c
@@ -130,9 +130,10 @@ static int lp8788_led_probe(struct platform_device *pdev)
struct lp8788 *lp = dev_get_drvdata(pdev->dev.parent);
struct lp8788_led_platform_data *led_pdata;
struct lp8788_led *led;
+ struct device *dev = &pdev->dev;
int ret;
- led = devm_kzalloc(lp->dev, sizeof(struct lp8788_led), GFP_KERNEL);
+ led = devm_kzalloc(dev, sizeof(struct lp8788_led), GFP_KERNEL);
if (!led)
return -ENOMEM;
@@ -154,13 +155,13 @@ static int lp8788_led_probe(struct platform_device *pdev)
ret = lp8788_led_init_device(led, led_pdata);
if (ret) {
- dev_err(lp->dev, "led init device err: %d\n", ret);
+ dev_err(dev, "led init device err: %d\n", ret);
return ret;
}
- ret = led_classdev_register(lp->dev, &led->led_dev);
+ ret = led_classdev_register(dev, &led->led_dev);
if (ret) {
- dev_err(lp->dev, "led register err: %d\n", ret);
+ dev_err(dev, "led register err: %d\n", ret);
return ret;
}
diff --git a/drivers/leds/leds-pca9532.c b/drivers/leds/leds-pca9532.c
index cee8a5b483ac..0c597bdd23f9 100644
--- a/drivers/leds/leds-pca9532.c
+++ b/drivers/leds/leds-pca9532.c
@@ -186,7 +186,7 @@ static int pca9532_set_blink(struct led_classdev *led_cdev,
int err = 0;
if (*delay_on == 0 && *delay_off == 0) {
- /* led subsystem ask us for a blink rate */
+ /* led subsystem ask us for a blink rate */
*delay_on = 1000;
*delay_off = 1000;
}
@@ -311,7 +311,6 @@ static int pca9532_destroy_devices(struct pca9532_data *data, int n_devs)
break;
case PCA9532_TYPE_N2100_BEEP:
if (data->idev != NULL) {
- input_unregister_device(data->idev);
cancel_work_sync(&data->work);
data->idev = NULL;
}
@@ -382,7 +381,7 @@ static int pca9532_configure(struct i2c_client *client,
BUG_ON(data->idev);
led->state = PCA9532_PWM1;
pca9532_setled(led);
- data->idev = input_allocate_device();
+ data->idev = devm_input_allocate_device(&client->dev);
if (data->idev == NULL) {
err = -ENOMEM;
goto exit;
@@ -401,7 +400,6 @@ static int pca9532_configure(struct i2c_client *client,
INIT_WORK(&data->work, pca9532_input_work);
err = input_register_device(data->idev);
if (err) {
- input_free_device(data->idev);
cancel_work_sync(&data->work);
data->idev = NULL;
goto exit;
diff --git a/drivers/leds/leds-pwm.c b/drivers/leds/leds-pwm.c
index 2157524f277c..a1ea5f6a8d39 100644
--- a/drivers/leds/leds-pwm.c
+++ b/drivers/leds/leds-pwm.c
@@ -16,6 +16,7 @@
#include <linux/kernel.h>
#include <linux/init.h>
#include <linux/platform_device.h>
+#include <linux/of_platform.h>
#include <linux/fb.h>
#include <linux/leds.h>
#include <linux/err.h>
@@ -30,6 +31,11 @@ struct led_pwm_data {
unsigned int period;
};
+struct led_pwm_priv {
+ int num_leds;
+ struct led_pwm_data leds[0];
+};
+
static void led_pwm_set(struct led_classdev *led_cdev,
enum led_brightness brightness)
{
@@ -47,88 +53,152 @@ static void led_pwm_set(struct led_classdev *led_cdev,
}
}
-static int led_pwm_probe(struct platform_device *pdev)
+static inline size_t sizeof_pwm_leds_priv(int num_leds)
{
- struct led_pwm_platform_data *pdata = pdev->dev.platform_data;
- struct led_pwm *cur_led;
- struct led_pwm_data *leds_data, *led_dat;
- int i, ret = 0;
+ return sizeof(struct led_pwm_priv) +
+ (sizeof(struct led_pwm_data) * num_leds);
+}
+
+static struct led_pwm_priv *led_pwm_create_of(struct platform_device *pdev)
+{
+ struct device_node *node = pdev->dev.of_node;
+ struct device_node *child;
+ struct led_pwm_priv *priv;
+ int count, ret;
- if (!pdata)
- return -EBUSY;
+ /* count LEDs in this device, so we know how much to allocate */
+ count = of_get_child_count(node);
+ if (!count)
+ return NULL;
- leds_data = devm_kzalloc(&pdev->dev,
- sizeof(struct led_pwm_data) * pdata->num_leds,
- GFP_KERNEL);
- if (!leds_data)
- return -ENOMEM;
+ priv = devm_kzalloc(&pdev->dev, sizeof_pwm_leds_priv(count),
+ GFP_KERNEL);
+ if (!priv)
+ return NULL;
- for (i = 0; i < pdata->num_leds; i++) {
- cur_led = &pdata->leds[i];
- led_dat = &leds_data[i];
+ for_each_child_of_node(node, child) {
+ struct led_pwm_data *led_dat = &priv->leds[priv->num_leds];
- led_dat->pwm = pwm_request(cur_led->pwm_id,
- cur_led->name);
+ led_dat->cdev.name = of_get_property(child, "label",
+ NULL) ? : child->name;
+
+ led_dat->pwm = devm_of_pwm_get(&pdev->dev, child, NULL);
if (IS_ERR(led_dat->pwm)) {
- ret = PTR_ERR(led_dat->pwm);
- dev_err(&pdev->dev, "unable to request PWM %d\n",
- cur_led->pwm_id);
+ dev_err(&pdev->dev, "unable to request PWM for %s\n",
+ led_dat->cdev.name);
goto err;
}
+ /* Get the period from PWM core when n*/
+ led_dat->period = pwm_get_period(led_dat->pwm);
+
+ led_dat->cdev.default_trigger = of_get_property(child,
+ "linux,default-trigger", NULL);
+ of_property_read_u32(child, "max-brightness",
+ &led_dat->cdev.max_brightness);
- led_dat->cdev.name = cur_led->name;
- led_dat->cdev.default_trigger = cur_led->default_trigger;
- led_dat->active_low = cur_led->active_low;
- led_dat->period = cur_led->pwm_period_ns;
led_dat->cdev.brightness_set = led_pwm_set;
led_dat->cdev.brightness = LED_OFF;
- led_dat->cdev.max_brightness = cur_led->max_brightness;
led_dat->cdev.flags |= LED_CORE_SUSPENDRESUME;
ret = led_classdev_register(&pdev->dev, &led_dat->cdev);
if (ret < 0) {
- pwm_free(led_dat->pwm);
+ dev_err(&pdev->dev, "failed to register for %s\n",
+ led_dat->cdev.name);
+ of_node_put(child);
goto err;
}
+ priv->num_leds++;
}
- platform_set_drvdata(pdev, leds_data);
+ return priv;
+err:
+ while (priv->num_leds--)
+ led_classdev_unregister(&priv->leds[priv->num_leds].cdev);
- return 0;
+ return NULL;
+}
-err:
- if (i > 0) {
- for (i = i - 1; i >= 0; i--) {
- led_classdev_unregister(&leds_data[i].cdev);
- pwm_free(leds_data[i].pwm);
+static int led_pwm_probe(struct platform_device *pdev)
+{
+ struct led_pwm_platform_data *pdata = pdev->dev.platform_data;
+ struct led_pwm_priv *priv;
+ int i, ret = 0;
+
+ if (pdata && pdata->num_leds) {
+ priv = devm_kzalloc(&pdev->dev,
+ sizeof_pwm_leds_priv(pdata->num_leds),
+ GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ for (i = 0; i < pdata->num_leds; i++) {
+ struct led_pwm *cur_led = &pdata->leds[i];
+ struct led_pwm_data *led_dat = &priv->leds[i];
+
+ led_dat->pwm = devm_pwm_get(&pdev->dev, cur_led->name);
+ if (IS_ERR(led_dat->pwm)) {
+ ret = PTR_ERR(led_dat->pwm);
+ dev_err(&pdev->dev,
+ "unable to request PWM for %s\n",
+ cur_led->name);
+ goto err;
+ }
+
+ led_dat->cdev.name = cur_led->name;
+ led_dat->cdev.default_trigger = cur_led->default_trigger;
+ led_dat->active_low = cur_led->active_low;
+ led_dat->period = cur_led->pwm_period_ns;
+ led_dat->cdev.brightness_set = led_pwm_set;
+ led_dat->cdev.brightness = LED_OFF;
+ led_dat->cdev.max_brightness = cur_led->max_brightness;
+ led_dat->cdev.flags |= LED_CORE_SUSPENDRESUME;
+
+ ret = led_classdev_register(&pdev->dev, &led_dat->cdev);
+ if (ret < 0)
+ goto err;
}
+ priv->num_leds = pdata->num_leds;
+ } else {
+ priv = led_pwm_create_of(pdev);
+ if (!priv)
+ return -ENODEV;
}
+ platform_set_drvdata(pdev, priv);
+
+ return 0;
+
+err:
+ while (i--)
+ led_classdev_unregister(&priv->leds[i].cdev);
+
return ret;
}
static int led_pwm_remove(struct platform_device *pdev)
{
+ struct led_pwm_priv *priv = platform_get_drvdata(pdev);
int i;
- struct led_pwm_platform_data *pdata = pdev->dev.platform_data;
- struct led_pwm_data *leds_data;
- leds_data = platform_get_drvdata(pdev);
-
- for (i = 0; i < pdata->num_leds; i++) {
- led_classdev_unregister(&leds_data[i].cdev);
- pwm_free(leds_data[i].pwm);
- }
+ for (i = 0; i < priv->num_leds; i++)
+ led_classdev_unregister(&priv->leds[i].cdev);
return 0;
}
+static const struct of_device_id of_pwm_leds_match[] = {
+ { .compatible = "pwm-leds", },
+ {},
+};
+MODULE_DEVICE_TABLE(of, of_pwm_leds_match);
+
static struct platform_driver led_pwm_driver = {
.probe = led_pwm_probe,
.remove = led_pwm_remove,
.driver = {
.name = "leds_pwm",
.owner = THIS_MODULE,
+ .of_match_table = of_match_ptr(of_pwm_leds_match),
},
};
diff --git a/drivers/leds/leds-renesas-tpu.c b/drivers/leds/leds-renesas-tpu.c
index e0fff1ca5923..d3c2b7e68fbc 100644
--- a/drivers/leds/leds-renesas-tpu.c
+++ b/drivers/leds/leds-renesas-tpu.c
@@ -133,24 +133,24 @@ static int r_tpu_enable(struct r_tpu_priv *p, enum led_brightness brightness)
rate = clk_get_rate(p->clk);
/* pick the lowest acceptable rate */
- for (k = 0; k < ARRAY_SIZE(prescaler); k++)
- if ((rate / prescaler[k]) < p->min_rate)
+ for (k = ARRAY_SIZE(prescaler) - 1; k >= 0; k--)
+ if ((rate / prescaler[k]) >= p->min_rate)
break;
- if (!k) {
+ if (k < 0) {
dev_err(&p->pdev->dev, "clock rate mismatch\n");
goto err0;
}
dev_dbg(&p->pdev->dev, "rate = %lu, prescaler %u\n",
- rate, prescaler[k - 1]);
+ rate, prescaler[k]);
/* clear TCNT on TGRB match, count on rising edge, set prescaler */
- r_tpu_write(p, TCR, 0x0040 | (k - 1));
+ r_tpu_write(p, TCR, 0x0040 | k);
/* output 0 until TGRA, output 1 until TGRB */
r_tpu_write(p, TIOR, 0x0002);
- rate /= prescaler[k - 1] * p->refresh_rate;
+ rate /= prescaler[k] * p->refresh_rate;
r_tpu_write(p, TGRB, rate);
dev_dbg(&p->pdev->dev, "TRGB = 0x%04lx\n", rate);
diff --git a/drivers/leds/leds-ss4200.c b/drivers/leds/leds-ss4200.c
index ec9b287ecfbf..64e204e714f6 100644
--- a/drivers/leds/leds-ss4200.c
+++ b/drivers/leds/leds-ss4200.c
@@ -63,8 +63,7 @@ MODULE_LICENSE("GPL");
/*
* PCI ID of the Intel ICH7 LPC Device within which the GPIO block lives.
*/
-static const struct pci_device_id ich7_lpc_pci_id[] =
-{
+static DEFINE_PCI_DEVICE_TABLE(ich7_lpc_pci_id) = {
{ PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ICH7_0) },
{ PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ICH7_1) },
{ PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ICH7_30) },
diff --git a/drivers/leds/leds-sunfire.c b/drivers/leds/leds-sunfire.c
index 07ff5a3a6cee..89792990088d 100644
--- a/drivers/leds/leds-sunfire.c
+++ b/drivers/leds/leds-sunfire.c
@@ -3,6 +3,8 @@
* Copyright (C) 2008 David S. Miller <davem@davemloft.net>
*/
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/init.h>
@@ -14,9 +16,6 @@
#include <asm/fhc.h>
#include <asm/upa.h>
-#define DRIVER_NAME "leds-sunfire"
-#define PFX DRIVER_NAME ": "
-
MODULE_AUTHOR("David S. Miller (davem@davemloft.net)");
MODULE_DESCRIPTION("Sun Fire LED driver");
MODULE_LICENSE("GPL");
@@ -130,14 +129,14 @@ static int sunfire_led_generic_probe(struct platform_device *pdev,
int i, err;
if (pdev->num_resources != 1) {
- printk(KERN_ERR PFX "Wrong number of resources %d, should be 1\n",
+ dev_err(&pdev->dev, "Wrong number of resources %d, should be 1\n",
pdev->num_resources);
return -EINVAL;
}
p = devm_kzalloc(&pdev->dev, sizeof(*p), GFP_KERNEL);
if (!p) {
- printk(KERN_ERR PFX "Could not allocate struct sunfire_drvdata\n");
+ dev_err(&pdev->dev, "Could not allocate struct sunfire_drvdata\n");
return -ENOMEM;
}
@@ -152,7 +151,7 @@ static int sunfire_led_generic_probe(struct platform_device *pdev,
err = led_classdev_register(&pdev->dev, lp);
if (err) {
- printk(KERN_ERR PFX "Could not register %s LED\n",
+ dev_err(&pdev->dev, "Could not register %s LED\n",
lp->name);
for (i--; i >= 0; i--)
led_classdev_unregister(&p->leds[i].led_cdev);
@@ -188,7 +187,7 @@ static struct led_type clockboard_led_types[NUM_LEDS_PER_BOARD] = {
{
.name = "clockboard-right",
.handler = clockboard_right_set,
- .default_trigger= "heartbeat",
+ .default_trigger = "heartbeat",
},
};
@@ -209,7 +208,7 @@ static struct led_type fhc_led_types[NUM_LEDS_PER_BOARD] = {
{
.name = "fhc-right",
.handler = fhc_right_set,
- .default_trigger= "heartbeat",
+ .default_trigger = "heartbeat",
},
};
@@ -244,13 +243,13 @@ static int __init sunfire_leds_init(void)
int err = platform_driver_register(&sunfire_clockboard_led_driver);
if (err) {
- printk(KERN_ERR PFX "Could not register clock board LED driver\n");
+ pr_err("Could not register clock board LED driver\n");
return err;
}
err = platform_driver_register(&sunfire_fhc_led_driver);
if (err) {
- printk(KERN_ERR PFX "Could not register FHC LED driver\n");
+ pr_err("Could not register FHC LED driver\n");
platform_driver_unregister(&sunfire_clockboard_led_driver);
}
diff --git a/drivers/leds/leds-tca6507.c b/drivers/leds/leds-tca6507.c
index b26a63bae16b..070ba0741b21 100644
--- a/drivers/leds/leds-tca6507.c
+++ b/drivers/leds/leds-tca6507.c
@@ -667,8 +667,68 @@ static void tca6507_remove_gpio(struct tca6507_chip *tca)
}
#endif /* CONFIG_GPIOLIB */
+#ifdef CONFIG_OF
+static struct tca6507_platform_data *
+tca6507_led_dt_init(struct i2c_client *client)
+{
+ struct device_node *np = client->dev.of_node, *child;
+ struct tca6507_platform_data *pdata;
+ struct led_info *tca_leds;
+ int count;
+
+ count = of_get_child_count(np);
+ if (!count || count > NUM_LEDS)
+ return ERR_PTR(-ENODEV);
+
+ tca_leds = devm_kzalloc(&client->dev,
+ sizeof(struct led_info) * count, GFP_KERNEL);
+ if (!tca_leds)
+ return ERR_PTR(-ENOMEM);
+
+ for_each_child_of_node(np, child) {
+ struct led_info led;
+ u32 reg;
+ int ret;
+
+ led.name =
+ of_get_property(child, "label", NULL) ? : child->name;
+ led.default_trigger =
+ of_get_property(child, "linux,default-trigger", NULL);
+
+ ret = of_property_read_u32(child, "reg", &reg);
+ if (ret != 0)
+ continue;
+
+ tca_leds[reg] = led;
+ }
+ pdata = devm_kzalloc(&client->dev,
+ sizeof(struct tca6507_platform_data), GFP_KERNEL);
+ if (!pdata)
+ return ERR_PTR(-ENOMEM);
+
+ pdata->leds.leds = tca_leds;
+ pdata->leds.num_leds = count;
+
+ return pdata;
+}
+
+static const struct of_device_id of_tca6507_leds_match[] = {
+ { .compatible = "ti,tca6507", },
+ {},
+};
+
+#else
+static struct tca6507_platform_data *
+tca6507_led_dt_init(struct i2c_client *client)
+{
+ return ERR_PTR(-ENODEV);
+}
+
+#define of_tca6507_leds_match NULL
+#endif
+
static int tca6507_probe(struct i2c_client *client,
- const struct i2c_device_id *id)
+ const struct i2c_device_id *id)
{
struct tca6507_chip *tca;
struct i2c_adapter *adapter;
@@ -683,9 +743,12 @@ static int tca6507_probe(struct i2c_client *client,
return -EIO;
if (!pdata || pdata->leds.num_leds != NUM_LEDS) {
- dev_err(&client->dev, "Need %d entries in platform-data list\n",
- NUM_LEDS);
- return -ENODEV;
+ pdata = tca6507_led_dt_init(client);
+ if (IS_ERR(pdata)) {
+ dev_err(&client->dev, "Need %d entries in platform-data list\n",
+ NUM_LEDS);
+ return PTR_ERR(pdata);
+ }
}
tca = devm_kzalloc(&client->dev, sizeof(*tca), GFP_KERNEL);
if (!tca)
@@ -750,6 +813,7 @@ static struct i2c_driver tca6507_driver = {
.driver = {
.name = "leds-tca6507",
.owner = THIS_MODULE,
+ .of_match_table = of_tca6507_leds_match,
},
.probe = tca6507_probe,
.remove = tca6507_remove,
diff --git a/drivers/leds/leds-wm831x-status.c b/drivers/leds/leds-wm831x-status.c
index 74a24cf897c3..6bd5c679d877 100644
--- a/drivers/leds/leds-wm831x-status.c
+++ b/drivers/leds/leds-wm831x-status.c
@@ -157,7 +157,7 @@ static int wm831x_status_blink_set(struct led_classdev *led_cdev,
return ret;
}
-static const char *led_src_texts[] = {
+static const char * const led_src_texts[] = {
"otp",
"power",
"charger",
diff --git a/drivers/lguest/Kconfig b/drivers/lguest/Kconfig
index 6cdcdb0d3d58..89875ea19ade 100644
--- a/drivers/lguest/Kconfig
+++ b/drivers/lguest/Kconfig
@@ -1,6 +1,6 @@
config LGUEST
tristate "Linux hypervisor example code"
- depends on X86_32 && EVENTFD
+ depends on X86_32 && EVENTFD && TTY
select HVC_DRIVER
---help---
This is a very simple module which allows you to run
diff --git a/drivers/lguest/lguest_device.c b/drivers/lguest/lguest_device.c
index fc92ccbd71dc..b3256ff0d426 100644
--- a/drivers/lguest/lguest_device.c
+++ b/drivers/lguest/lguest_device.c
@@ -396,7 +396,7 @@ static const char *lg_bus_name(struct virtio_device *vdev)
}
/* The ops structure which hooks everything together. */
-static struct virtio_config_ops lguest_config_ops = {
+static const struct virtio_config_ops lguest_config_ops = {
.get_features = lg_get_features,
.finalize_features = lg_finalize_features,
.get = lg_get,
diff --git a/drivers/macintosh/windfarm_pm112.c b/drivers/macintosh/windfarm_pm112.c
index 35ef6e2582b8..3024685e4cca 100644
--- a/drivers/macintosh/windfarm_pm112.c
+++ b/drivers/macintosh/windfarm_pm112.c
@@ -681,7 +681,7 @@ static int __init wf_pm112_init(void)
/* Count the number of CPU cores */
nr_cores = 0;
- for (cpu = NULL; (cpu = of_find_node_by_type(cpu, "cpu")) != NULL; )
+ for_each_node_by_type(cpu, "cpu")
++nr_cores;
printk(KERN_INFO "windfarm: initializing for dual-core desktop G5\n");
diff --git a/drivers/macintosh/windfarm_pm72.c b/drivers/macintosh/windfarm_pm72.c
index 6e5585357cd3..2f506b9d5a52 100644
--- a/drivers/macintosh/windfarm_pm72.c
+++ b/drivers/macintosh/windfarm_pm72.c
@@ -804,7 +804,7 @@ static int __init wf_pm72_init(void)
/* Count the number of CPU cores */
nr_chips = 0;
- for (cpu = NULL; (cpu = of_find_node_by_type(cpu, "cpu")) != NULL; )
+ for_each_node_by_type(cpu, "cpu")
++nr_chips;
if (nr_chips > NR_CHIPS)
nr_chips = NR_CHIPS;
diff --git a/drivers/macintosh/windfarm_rm31.c b/drivers/macintosh/windfarm_rm31.c
index 844003fb4ef0..0b9a79b2f48a 100644
--- a/drivers/macintosh/windfarm_rm31.c
+++ b/drivers/macintosh/windfarm_rm31.c
@@ -696,7 +696,7 @@ static int __init wf_rm31_init(void)
/* Count the number of CPU cores */
nr_chips = 0;
- for (cpu = NULL; (cpu = of_find_node_by_type(cpu, "cpu")) != NULL; )
+ for_each_node_by_type(cpu, "cpu")
++nr_chips;
if (nr_chips > NR_CHIPS)
nr_chips = NR_CHIPS;
diff --git a/drivers/md/Kconfig b/drivers/md/Kconfig
index 91a02eeeb319..e30b490055aa 100644
--- a/drivers/md/Kconfig
+++ b/drivers/md/Kconfig
@@ -210,7 +210,7 @@ config DM_DEBUG
config DM_BUFIO
tristate
- depends on BLK_DEV_DM && EXPERIMENTAL
+ depends on BLK_DEV_DM
---help---
This interface allows you to do buffered I/O on a device and acts
as a cache, holding recently-read blocks in memory and performing
@@ -218,7 +218,7 @@ config DM_BUFIO
config DM_BIO_PRISON
tristate
- depends on BLK_DEV_DM && EXPERIMENTAL
+ depends on BLK_DEV_DM
---help---
Some bio locking schemes used by other device-mapper targets
including thin provisioning.
@@ -251,8 +251,8 @@ config DM_SNAPSHOT
Allow volume managers to take writable snapshots of a device.
config DM_THIN_PROVISIONING
- tristate "Thin provisioning target (EXPERIMENTAL)"
- depends on BLK_DEV_DM && EXPERIMENTAL
+ tristate "Thin provisioning target"
+ depends on BLK_DEV_DM
select DM_PERSISTENT_DATA
select DM_BIO_PRISON
---help---
@@ -268,6 +268,37 @@ config DM_DEBUG_BLOCK_STACK_TRACING
If unsure, say N.
+config DM_CACHE
+ tristate "Cache target (EXPERIMENTAL)"
+ depends on BLK_DEV_DM
+ default n
+ select DM_PERSISTENT_DATA
+ select DM_BIO_PRISON
+ ---help---
+ dm-cache attempts to improve performance of a block device by
+ moving frequently used data to a smaller, higher performance
+ device. Different 'policy' plugins can be used to change the
+ algorithms used to select which blocks are promoted, demoted,
+ cleaned etc. It supports writeback and writethrough modes.
+
+config DM_CACHE_MQ
+ tristate "MQ Cache Policy (EXPERIMENTAL)"
+ depends on DM_CACHE
+ default y
+ ---help---
+ A cache policy that uses a multiqueue ordered by recent hit
+ count to select which blocks should be promoted and demoted.
+ This is meant to be a general purpose policy. It prioritises
+ reads over writes.
+
+config DM_CACHE_CLEANER
+ tristate "Cleaner Cache Policy (EXPERIMENTAL)"
+ depends on DM_CACHE
+ default y
+ ---help---
+ A simple cache policy that writes back all data to the
+ origin. Used when decommissioning a dm-cache.
+
config DM_MIRROR
tristate "Mirror target"
depends on BLK_DEV_DM
@@ -302,8 +333,8 @@ config DM_RAID
in one of the available parity distribution methods.
config DM_LOG_USERSPACE
- tristate "Mirror userspace logging (EXPERIMENTAL)"
- depends on DM_MIRROR && EXPERIMENTAL && NET
+ tristate "Mirror userspace logging"
+ depends on DM_MIRROR && NET
select CONNECTOR
---help---
The userspace logging module provides a mechanism for
@@ -350,8 +381,8 @@ config DM_MULTIPATH_ST
If unsure, say N.
config DM_DELAY
- tristate "I/O delaying target (EXPERIMENTAL)"
- depends on BLK_DEV_DM && EXPERIMENTAL
+ tristate "I/O delaying target"
+ depends on BLK_DEV_DM
---help---
A target that delays reads and/or writes and can send
them to different devices. Useful for testing.
@@ -365,14 +396,14 @@ config DM_UEVENT
Generate udev events for DM events.
config DM_FLAKEY
- tristate "Flakey target (EXPERIMENTAL)"
- depends on BLK_DEV_DM && EXPERIMENTAL
+ tristate "Flakey target"
+ depends on BLK_DEV_DM
---help---
A target that intermittently fails I/O for debugging purposes.
config DM_VERITY
- tristate "Verity target support (EXPERIMENTAL)"
- depends on BLK_DEV_DM && EXPERIMENTAL
+ tristate "Verity target support"
+ depends on BLK_DEV_DM
select CRYPTO
select CRYPTO_HASH
select DM_BUFIO
diff --git a/drivers/md/Makefile b/drivers/md/Makefile
index 94dce8b49324..7ceeaefc0e95 100644
--- a/drivers/md/Makefile
+++ b/drivers/md/Makefile
@@ -11,6 +11,9 @@ dm-mirror-y += dm-raid1.o
dm-log-userspace-y \
+= dm-log-userspace-base.o dm-log-userspace-transfer.o
dm-thin-pool-y += dm-thin.o dm-thin-metadata.o
+dm-cache-y += dm-cache-target.o dm-cache-metadata.o dm-cache-policy.o
+dm-cache-mq-y += dm-cache-policy-mq.o
+dm-cache-cleaner-y += dm-cache-policy-cleaner.o
md-mod-y += md.o bitmap.o
raid456-y += raid5.o
@@ -44,6 +47,9 @@ obj-$(CONFIG_DM_ZERO) += dm-zero.o
obj-$(CONFIG_DM_RAID) += dm-raid.o
obj-$(CONFIG_DM_THIN_PROVISIONING) += dm-thin-pool.o
obj-$(CONFIG_DM_VERITY) += dm-verity.o
+obj-$(CONFIG_DM_CACHE) += dm-cache.o
+obj-$(CONFIG_DM_CACHE_MQ) += dm-cache-mq.o
+obj-$(CONFIG_DM_CACHE_CLEANER) += dm-cache-cleaner.o
ifeq ($(CONFIG_DM_UEVENT),y)
dm-mod-objs += dm-uevent.o
diff --git a/drivers/md/bitmap.c b/drivers/md/bitmap.c
index 7155945f8eb8..4fd9d6aeff6a 100644
--- a/drivers/md/bitmap.c
+++ b/drivers/md/bitmap.c
@@ -337,7 +337,7 @@ static int read_page(struct file *file, unsigned long index,
struct page *page)
{
int ret = 0;
- struct inode *inode = file->f_path.dentry->d_inode;
+ struct inode *inode = file_inode(file);
struct buffer_head *bh;
sector_t block;
@@ -755,7 +755,7 @@ static void bitmap_file_unmap(struct bitmap_storage *store)
free_buffers(sb_page);
if (file) {
- struct inode *inode = file->f_path.dentry->d_inode;
+ struct inode *inode = file_inode(file);
invalidate_mapping_pages(inode->i_mapping, 0, -1);
fput(file);
}
diff --git a/drivers/md/dm-bio-prison.c b/drivers/md/dm-bio-prison.c
index aefb78e3cbf9..85f0b7074257 100644
--- a/drivers/md/dm-bio-prison.c
+++ b/drivers/md/dm-bio-prison.c
@@ -14,14 +14,6 @@
/*----------------------------------------------------------------*/
-struct dm_bio_prison_cell {
- struct hlist_node list;
- struct dm_bio_prison *prison;
- struct dm_cell_key key;
- struct bio *holder;
- struct bio_list bios;
-};
-
struct dm_bio_prison {
spinlock_t lock;
mempool_t *cell_pool;
@@ -87,6 +79,19 @@ void dm_bio_prison_destroy(struct dm_bio_prison *prison)
}
EXPORT_SYMBOL_GPL(dm_bio_prison_destroy);
+struct dm_bio_prison_cell *dm_bio_prison_alloc_cell(struct dm_bio_prison *prison, gfp_t gfp)
+{
+ return mempool_alloc(prison->cell_pool, gfp);
+}
+EXPORT_SYMBOL_GPL(dm_bio_prison_alloc_cell);
+
+void dm_bio_prison_free_cell(struct dm_bio_prison *prison,
+ struct dm_bio_prison_cell *cell)
+{
+ mempool_free(cell, prison->cell_pool);
+}
+EXPORT_SYMBOL_GPL(dm_bio_prison_free_cell);
+
static uint32_t hash_key(struct dm_bio_prison *prison, struct dm_cell_key *key)
{
const unsigned long BIG_PRIME = 4294967291UL;
@@ -106,100 +111,103 @@ static struct dm_bio_prison_cell *__search_bucket(struct hlist_head *bucket,
struct dm_cell_key *key)
{
struct dm_bio_prison_cell *cell;
- struct hlist_node *tmp;
- hlist_for_each_entry(cell, tmp, bucket, list)
+ hlist_for_each_entry(cell, bucket, list)
if (keys_equal(&cell->key, key))
return cell;
return NULL;
}
-/*
- * This may block if a new cell needs allocating. You must ensure that
- * cells will be unlocked even if the calling thread is blocked.
- *
- * Returns 1 if the cell was already held, 0 if @inmate is the new holder.
- */
-int dm_bio_detain(struct dm_bio_prison *prison, struct dm_cell_key *key,
- struct bio *inmate, struct dm_bio_prison_cell **ref)
+static void __setup_new_cell(struct dm_bio_prison *prison,
+ struct dm_cell_key *key,
+ struct bio *holder,
+ uint32_t hash,
+ struct dm_bio_prison_cell *cell)
{
- int r = 1;
- unsigned long flags;
- uint32_t hash = hash_key(prison, key);
- struct dm_bio_prison_cell *cell, *cell2;
-
- BUG_ON(hash > prison->nr_buckets);
-
- spin_lock_irqsave(&prison->lock, flags);
-
- cell = __search_bucket(prison->cells + hash, key);
- if (cell) {
- bio_list_add(&cell->bios, inmate);
- goto out;
- }
+ memcpy(&cell->key, key, sizeof(cell->key));
+ cell->holder = holder;
+ bio_list_init(&cell->bios);
+ hlist_add_head(&cell->list, prison->cells + hash);
+}
- /*
- * Allocate a new cell
- */
- spin_unlock_irqrestore(&prison->lock, flags);
- cell2 = mempool_alloc(prison->cell_pool, GFP_NOIO);
- spin_lock_irqsave(&prison->lock, flags);
+static int __bio_detain(struct dm_bio_prison *prison,
+ struct dm_cell_key *key,
+ struct bio *inmate,
+ struct dm_bio_prison_cell *cell_prealloc,
+ struct dm_bio_prison_cell **cell_result)
+{
+ uint32_t hash = hash_key(prison, key);
+ struct dm_bio_prison_cell *cell;
- /*
- * We've been unlocked, so we have to double check that
- * nobody else has inserted this cell in the meantime.
- */
cell = __search_bucket(prison->cells + hash, key);
if (cell) {
- mempool_free(cell2, prison->cell_pool);
- bio_list_add(&cell->bios, inmate);
- goto out;
+ if (inmate)
+ bio_list_add(&cell->bios, inmate);
+ *cell_result = cell;
+ return 1;
}
- /*
- * Use new cell.
- */
- cell = cell2;
-
- cell->prison = prison;
- memcpy(&cell->key, key, sizeof(cell->key));
- cell->holder = inmate;
- bio_list_init(&cell->bios);
- hlist_add_head(&cell->list, prison->cells + hash);
+ __setup_new_cell(prison, key, inmate, hash, cell_prealloc);
+ *cell_result = cell_prealloc;
+ return 0;
+}
- r = 0;
+static int bio_detain(struct dm_bio_prison *prison,
+ struct dm_cell_key *key,
+ struct bio *inmate,
+ struct dm_bio_prison_cell *cell_prealloc,
+ struct dm_bio_prison_cell **cell_result)
+{
+ int r;
+ unsigned long flags;
-out:
+ spin_lock_irqsave(&prison->lock, flags);
+ r = __bio_detain(prison, key, inmate, cell_prealloc, cell_result);
spin_unlock_irqrestore(&prison->lock, flags);
- *ref = cell;
-
return r;
}
+
+int dm_bio_detain(struct dm_bio_prison *prison,
+ struct dm_cell_key *key,
+ struct bio *inmate,
+ struct dm_bio_prison_cell *cell_prealloc,
+ struct dm_bio_prison_cell **cell_result)
+{
+ return bio_detain(prison, key, inmate, cell_prealloc, cell_result);
+}
EXPORT_SYMBOL_GPL(dm_bio_detain);
+int dm_get_cell(struct dm_bio_prison *prison,
+ struct dm_cell_key *key,
+ struct dm_bio_prison_cell *cell_prealloc,
+ struct dm_bio_prison_cell **cell_result)
+{
+ return bio_detain(prison, key, NULL, cell_prealloc, cell_result);
+}
+EXPORT_SYMBOL_GPL(dm_get_cell);
+
/*
* @inmates must have been initialised prior to this call
*/
-static void __cell_release(struct dm_bio_prison_cell *cell, struct bio_list *inmates)
+static void __cell_release(struct dm_bio_prison_cell *cell,
+ struct bio_list *inmates)
{
- struct dm_bio_prison *prison = cell->prison;
-
hlist_del(&cell->list);
if (inmates) {
- bio_list_add(inmates, cell->holder);
+ if (cell->holder)
+ bio_list_add(inmates, cell->holder);
bio_list_merge(inmates, &cell->bios);
}
-
- mempool_free(cell, prison->cell_pool);
}
-void dm_cell_release(struct dm_bio_prison_cell *cell, struct bio_list *bios)
+void dm_cell_release(struct dm_bio_prison *prison,
+ struct dm_bio_prison_cell *cell,
+ struct bio_list *bios)
{
unsigned long flags;
- struct dm_bio_prison *prison = cell->prison;
spin_lock_irqsave(&prison->lock, flags);
__cell_release(cell, bios);
@@ -210,20 +218,18 @@ EXPORT_SYMBOL_GPL(dm_cell_release);
/*
* Sometimes we don't want the holder, just the additional bios.
*/
-static void __cell_release_no_holder(struct dm_bio_prison_cell *cell, struct bio_list *inmates)
+static void __cell_release_no_holder(struct dm_bio_prison_cell *cell,
+ struct bio_list *inmates)
{
- struct dm_bio_prison *prison = cell->prison;
-
hlist_del(&cell->list);
bio_list_merge(inmates, &cell->bios);
-
- mempool_free(cell, prison->cell_pool);
}
-void dm_cell_release_no_holder(struct dm_bio_prison_cell *cell, struct bio_list *inmates)
+void dm_cell_release_no_holder(struct dm_bio_prison *prison,
+ struct dm_bio_prison_cell *cell,
+ struct bio_list *inmates)
{
unsigned long flags;
- struct dm_bio_prison *prison = cell->prison;
spin_lock_irqsave(&prison->lock, flags);
__cell_release_no_holder(cell, inmates);
@@ -231,9 +237,9 @@ void dm_cell_release_no_holder(struct dm_bio_prison_cell *cell, struct bio_list
}
EXPORT_SYMBOL_GPL(dm_cell_release_no_holder);
-void dm_cell_error(struct dm_bio_prison_cell *cell)
+void dm_cell_error(struct dm_bio_prison *prison,
+ struct dm_bio_prison_cell *cell)
{
- struct dm_bio_prison *prison = cell->prison;
struct bio_list bios;
struct bio *bio;
unsigned long flags;
diff --git a/drivers/md/dm-bio-prison.h b/drivers/md/dm-bio-prison.h
index 53d1a7a84e2f..3f833190eadf 100644
--- a/drivers/md/dm-bio-prison.h
+++ b/drivers/md/dm-bio-prison.h
@@ -22,7 +22,6 @@
* subsequently unlocked the bios become available.
*/
struct dm_bio_prison;
-struct dm_bio_prison_cell;
/* FIXME: this needs to be more abstract */
struct dm_cell_key {
@@ -31,21 +30,62 @@ struct dm_cell_key {
dm_block_t block;
};
+/*
+ * Treat this as opaque, only in header so callers can manage allocation
+ * themselves.
+ */
+struct dm_bio_prison_cell {
+ struct hlist_node list;
+ struct dm_cell_key key;
+ struct bio *holder;
+ struct bio_list bios;
+};
+
struct dm_bio_prison *dm_bio_prison_create(unsigned nr_cells);
void dm_bio_prison_destroy(struct dm_bio_prison *prison);
/*
- * This may block if a new cell needs allocating. You must ensure that
- * cells will be unlocked even if the calling thread is blocked.
+ * These two functions just wrap a mempool. This is a transitory step:
+ * Eventually all bio prison clients should manage their own cell memory.
*
- * Returns 1 if the cell was already held, 0 if @inmate is the new holder.
+ * Like mempool_alloc(), dm_bio_prison_alloc_cell() can only fail if called
+ * in interrupt context or passed GFP_NOWAIT.
*/
-int dm_bio_detain(struct dm_bio_prison *prison, struct dm_cell_key *key,
- struct bio *inmate, struct dm_bio_prison_cell **ref);
+struct dm_bio_prison_cell *dm_bio_prison_alloc_cell(struct dm_bio_prison *prison,
+ gfp_t gfp);
+void dm_bio_prison_free_cell(struct dm_bio_prison *prison,
+ struct dm_bio_prison_cell *cell);
-void dm_cell_release(struct dm_bio_prison_cell *cell, struct bio_list *bios);
-void dm_cell_release_no_holder(struct dm_bio_prison_cell *cell, struct bio_list *inmates);
-void dm_cell_error(struct dm_bio_prison_cell *cell);
+/*
+ * Creates, or retrieves a cell for the given key.
+ *
+ * Returns 1 if pre-existing cell returned, zero if new cell created using
+ * @cell_prealloc.
+ */
+int dm_get_cell(struct dm_bio_prison *prison,
+ struct dm_cell_key *key,
+ struct dm_bio_prison_cell *cell_prealloc,
+ struct dm_bio_prison_cell **cell_result);
+
+/*
+ * An atomic op that combines retrieving a cell, and adding a bio to it.
+ *
+ * Returns 1 if the cell was already held, 0 if @inmate is the new holder.
+ */
+int dm_bio_detain(struct dm_bio_prison *prison,
+ struct dm_cell_key *key,
+ struct bio *inmate,
+ struct dm_bio_prison_cell *cell_prealloc,
+ struct dm_bio_prison_cell **cell_result);
+
+void dm_cell_release(struct dm_bio_prison *prison,
+ struct dm_bio_prison_cell *cell,
+ struct bio_list *bios);
+void dm_cell_release_no_holder(struct dm_bio_prison *prison,
+ struct dm_bio_prison_cell *cell,
+ struct bio_list *inmates);
+void dm_cell_error(struct dm_bio_prison *prison,
+ struct dm_bio_prison_cell *cell);
/*----------------------------------------------------------------*/
diff --git a/drivers/md/dm-bufio.c b/drivers/md/dm-bufio.c
index 651ca79881dd..3c955e10a618 100644
--- a/drivers/md/dm-bufio.c
+++ b/drivers/md/dm-bufio.c
@@ -859,9 +859,8 @@ static void __check_watermark(struct dm_bufio_client *c)
static struct dm_buffer *__find(struct dm_bufio_client *c, sector_t block)
{
struct dm_buffer *b;
- struct hlist_node *hn;
- hlist_for_each_entry(b, hn, &c->cache_hash[DM_BUFIO_HASH(block)],
+ hlist_for_each_entry(b, &c->cache_hash[DM_BUFIO_HASH(block)],
hash_list) {
dm_bufio_cond_resched();
if (b->block == block)
@@ -1193,7 +1192,7 @@ EXPORT_SYMBOL_GPL(dm_bufio_write_dirty_buffers);
int dm_bufio_issue_flush(struct dm_bufio_client *c)
{
struct dm_io_request io_req = {
- .bi_rw = REQ_FLUSH,
+ .bi_rw = WRITE_FLUSH,
.mem.type = DM_IO_KMEM,
.mem.ptr.addr = NULL,
.client = c->dm_io,
diff --git a/drivers/md/dm-cache-block-types.h b/drivers/md/dm-cache-block-types.h
new file mode 100644
index 000000000000..bed4ad4e1b7c
--- /dev/null
+++ b/drivers/md/dm-cache-block-types.h
@@ -0,0 +1,54 @@
+/*
+ * Copyright (C) 2012 Red Hat, Inc.
+ *
+ * This file is released under the GPL.
+ */
+
+#ifndef DM_CACHE_BLOCK_TYPES_H
+#define DM_CACHE_BLOCK_TYPES_H
+
+#include "persistent-data/dm-block-manager.h"
+
+/*----------------------------------------------------------------*/
+
+/*
+ * It's helpful to get sparse to differentiate between indexes into the
+ * origin device, indexes into the cache device, and indexes into the
+ * discard bitset.
+ */
+
+typedef dm_block_t __bitwise__ dm_oblock_t;
+typedef uint32_t __bitwise__ dm_cblock_t;
+typedef dm_block_t __bitwise__ dm_dblock_t;
+
+static inline dm_oblock_t to_oblock(dm_block_t b)
+{
+ return (__force dm_oblock_t) b;
+}
+
+static inline dm_block_t from_oblock(dm_oblock_t b)
+{
+ return (__force dm_block_t) b;
+}
+
+static inline dm_cblock_t to_cblock(uint32_t b)
+{
+ return (__force dm_cblock_t) b;
+}
+
+static inline uint32_t from_cblock(dm_cblock_t b)
+{
+ return (__force uint32_t) b;
+}
+
+static inline dm_dblock_t to_dblock(dm_block_t b)
+{
+ return (__force dm_dblock_t) b;
+}
+
+static inline dm_block_t from_dblock(dm_dblock_t b)
+{
+ return (__force dm_block_t) b;
+}
+
+#endif /* DM_CACHE_BLOCK_TYPES_H */
diff --git a/drivers/md/dm-cache-metadata.c b/drivers/md/dm-cache-metadata.c
new file mode 100644
index 000000000000..fbd3625f2748
--- /dev/null
+++ b/drivers/md/dm-cache-metadata.c
@@ -0,0 +1,1146 @@
+/*
+ * Copyright (C) 2012 Red Hat, Inc.
+ *
+ * This file is released under the GPL.
+ */
+
+#include "dm-cache-metadata.h"
+
+#include "persistent-data/dm-array.h"
+#include "persistent-data/dm-bitset.h"
+#include "persistent-data/dm-space-map.h"
+#include "persistent-data/dm-space-map-disk.h"
+#include "persistent-data/dm-transaction-manager.h"
+
+#include <linux/device-mapper.h>
+
+/*----------------------------------------------------------------*/
+
+#define DM_MSG_PREFIX "cache metadata"
+
+#define CACHE_SUPERBLOCK_MAGIC 06142003
+#define CACHE_SUPERBLOCK_LOCATION 0
+#define CACHE_VERSION 1
+#define CACHE_METADATA_CACHE_SIZE 64
+
+/*
+ * 3 for btree insert +
+ * 2 for btree lookup used within space map
+ */
+#define CACHE_MAX_CONCURRENT_LOCKS 5
+#define SPACE_MAP_ROOT_SIZE 128
+
+enum superblock_flag_bits {
+ /* for spotting crashes that would invalidate the dirty bitset */
+ CLEAN_SHUTDOWN,
+};
+
+/*
+ * Each mapping from cache block -> origin block carries a set of flags.
+ */
+enum mapping_bits {
+ /*
+ * A valid mapping. Because we're using an array we clear this
+ * flag for an non existant mapping.
+ */
+ M_VALID = 1,
+
+ /*
+ * The data on the cache is different from that on the origin.
+ */
+ M_DIRTY = 2
+};
+
+struct cache_disk_superblock {
+ __le32 csum;
+ __le32 flags;
+ __le64 blocknr;
+
+ __u8 uuid[16];
+ __le64 magic;
+ __le32 version;
+
+ __u8 policy_name[CACHE_POLICY_NAME_SIZE];
+ __le32 policy_hint_size;
+
+ __u8 metadata_space_map_root[SPACE_MAP_ROOT_SIZE];
+ __le64 mapping_root;
+ __le64 hint_root;
+
+ __le64 discard_root;
+ __le64 discard_block_size;
+ __le64 discard_nr_blocks;
+
+ __le32 data_block_size;
+ __le32 metadata_block_size;
+ __le32 cache_blocks;
+
+ __le32 compat_flags;
+ __le32 compat_ro_flags;
+ __le32 incompat_flags;
+
+ __le32 read_hits;
+ __le32 read_misses;
+ __le32 write_hits;
+ __le32 write_misses;
+} __packed;
+
+struct dm_cache_metadata {
+ struct block_device *bdev;
+ struct dm_block_manager *bm;
+ struct dm_space_map *metadata_sm;
+ struct dm_transaction_manager *tm;
+
+ struct dm_array_info info;
+ struct dm_array_info hint_info;
+ struct dm_disk_bitset discard_info;
+
+ struct rw_semaphore root_lock;
+ dm_block_t root;
+ dm_block_t hint_root;
+ dm_block_t discard_root;
+
+ sector_t discard_block_size;
+ dm_dblock_t discard_nr_blocks;
+
+ sector_t data_block_size;
+ dm_cblock_t cache_blocks;
+ bool changed:1;
+ bool clean_when_opened:1;
+
+ char policy_name[CACHE_POLICY_NAME_SIZE];
+ size_t policy_hint_size;
+ struct dm_cache_statistics stats;
+};
+
+/*-------------------------------------------------------------------
+ * superblock validator
+ *-----------------------------------------------------------------*/
+
+#define SUPERBLOCK_CSUM_XOR 9031977
+
+static void sb_prepare_for_write(struct dm_block_validator *v,
+ struct dm_block *b,
+ size_t sb_block_size)
+{
+ struct cache_disk_superblock *disk_super = dm_block_data(b);
+
+ disk_super->blocknr = cpu_to_le64(dm_block_location(b));
+ disk_super->csum = cpu_to_le32(dm_bm_checksum(&disk_super->flags,
+ sb_block_size - sizeof(__le32),
+ SUPERBLOCK_CSUM_XOR));
+}
+
+static int sb_check(struct dm_block_validator *v,
+ struct dm_block *b,
+ size_t sb_block_size)
+{
+ struct cache_disk_superblock *disk_super = dm_block_data(b);
+ __le32 csum_le;
+
+ if (dm_block_location(b) != le64_to_cpu(disk_super->blocknr)) {
+ DMERR("sb_check failed: blocknr %llu: wanted %llu",
+ le64_to_cpu(disk_super->blocknr),
+ (unsigned long long)dm_block_location(b));
+ return -ENOTBLK;
+ }
+
+ if (le64_to_cpu(disk_super->magic) != CACHE_SUPERBLOCK_MAGIC) {
+ DMERR("sb_check failed: magic %llu: wanted %llu",
+ le64_to_cpu(disk_super->magic),
+ (unsigned long long)CACHE_SUPERBLOCK_MAGIC);
+ return -EILSEQ;
+ }
+
+ csum_le = cpu_to_le32(dm_bm_checksum(&disk_super->flags,
+ sb_block_size - sizeof(__le32),
+ SUPERBLOCK_CSUM_XOR));
+ if (csum_le != disk_super->csum) {
+ DMERR("sb_check failed: csum %u: wanted %u",
+ le32_to_cpu(csum_le), le32_to_cpu(disk_super->csum));
+ return -EILSEQ;
+ }
+
+ return 0;
+}
+
+static struct dm_block_validator sb_validator = {
+ .name = "superblock",
+ .prepare_for_write = sb_prepare_for_write,
+ .check = sb_check
+};
+
+/*----------------------------------------------------------------*/
+
+static int superblock_read_lock(struct dm_cache_metadata *cmd,
+ struct dm_block **sblock)
+{
+ return dm_bm_read_lock(cmd->bm, CACHE_SUPERBLOCK_LOCATION,
+ &sb_validator, sblock);
+}
+
+static int superblock_lock_zero(struct dm_cache_metadata *cmd,
+ struct dm_block **sblock)
+{
+ return dm_bm_write_lock_zero(cmd->bm, CACHE_SUPERBLOCK_LOCATION,
+ &sb_validator, sblock);
+}
+
+static int superblock_lock(struct dm_cache_metadata *cmd,
+ struct dm_block **sblock)
+{
+ return dm_bm_write_lock(cmd->bm, CACHE_SUPERBLOCK_LOCATION,
+ &sb_validator, sblock);
+}
+
+/*----------------------------------------------------------------*/
+
+static int __superblock_all_zeroes(struct dm_block_manager *bm, int *result)
+{
+ int r;
+ unsigned i;
+ struct dm_block *b;
+ __le64 *data_le, zero = cpu_to_le64(0);
+ unsigned sb_block_size = dm_bm_block_size(bm) / sizeof(__le64);
+
+ /*
+ * We can't use a validator here - it may be all zeroes.
+ */
+ r = dm_bm_read_lock(bm, CACHE_SUPERBLOCK_LOCATION, NULL, &b);
+ if (r)
+ return r;
+
+ data_le = dm_block_data(b);
+ *result = 1;
+ for (i = 0; i < sb_block_size; i++) {
+ if (data_le[i] != zero) {
+ *result = 0;
+ break;
+ }
+ }
+
+ return dm_bm_unlock(b);
+}
+
+static void __setup_mapping_info(struct dm_cache_metadata *cmd)
+{
+ struct dm_btree_value_type vt;
+
+ vt.context = NULL;
+ vt.size = sizeof(__le64);
+ vt.inc = NULL;
+ vt.dec = NULL;
+ vt.equal = NULL;
+ dm_array_info_init(&cmd->info, cmd->tm, &vt);
+
+ if (cmd->policy_hint_size) {
+ vt.size = sizeof(__le32);
+ dm_array_info_init(&cmd->hint_info, cmd->tm, &vt);
+ }
+}
+
+static int __write_initial_superblock(struct dm_cache_metadata *cmd)
+{
+ int r;
+ struct dm_block *sblock;
+ size_t metadata_len;
+ struct cache_disk_superblock *disk_super;
+ sector_t bdev_size = i_size_read(cmd->bdev->bd_inode) >> SECTOR_SHIFT;
+
+ /* FIXME: see if we can lose the max sectors limit */
+ if (bdev_size > DM_CACHE_METADATA_MAX_SECTORS)
+ bdev_size = DM_CACHE_METADATA_MAX_SECTORS;
+
+ r = dm_sm_root_size(cmd->metadata_sm, &metadata_len);
+ if (r < 0)
+ return r;
+
+ r = dm_tm_pre_commit(cmd->tm);
+ if (r < 0)
+ return r;
+
+ r = superblock_lock_zero(cmd, &sblock);
+ if (r)
+ return r;
+
+ disk_super = dm_block_data(sblock);
+ disk_super->flags = 0;
+ memset(disk_super->uuid, 0, sizeof(disk_super->uuid));
+ disk_super->magic = cpu_to_le64(CACHE_SUPERBLOCK_MAGIC);
+ disk_super->version = cpu_to_le32(CACHE_VERSION);
+ memset(disk_super->policy_name, 0, CACHE_POLICY_NAME_SIZE);
+ disk_super->policy_hint_size = 0;
+
+ r = dm_sm_copy_root(cmd->metadata_sm, &disk_super->metadata_space_map_root,
+ metadata_len);
+ if (r < 0)
+ goto bad_locked;
+
+ disk_super->mapping_root = cpu_to_le64(cmd->root);
+ disk_super->hint_root = cpu_to_le64(cmd->hint_root);
+ disk_super->discard_root = cpu_to_le64(cmd->discard_root);
+ disk_super->discard_block_size = cpu_to_le64(cmd->discard_block_size);
+ disk_super->discard_nr_blocks = cpu_to_le64(from_dblock(cmd->discard_nr_blocks));
+ disk_super->metadata_block_size = cpu_to_le32(DM_CACHE_METADATA_BLOCK_SIZE >> SECTOR_SHIFT);
+ disk_super->data_block_size = cpu_to_le32(cmd->data_block_size);
+ disk_super->cache_blocks = cpu_to_le32(0);
+ memset(disk_super->policy_name, 0, sizeof(disk_super->policy_name));
+
+ disk_super->read_hits = cpu_to_le32(0);
+ disk_super->read_misses = cpu_to_le32(0);
+ disk_super->write_hits = cpu_to_le32(0);
+ disk_super->write_misses = cpu_to_le32(0);
+
+ return dm_tm_commit(cmd->tm, sblock);
+
+bad_locked:
+ dm_bm_unlock(sblock);
+ return r;
+}
+
+static int __format_metadata(struct dm_cache_metadata *cmd)
+{
+ int r;
+
+ r = dm_tm_create_with_sm(cmd->bm, CACHE_SUPERBLOCK_LOCATION,
+ &cmd->tm, &cmd->metadata_sm);
+ if (r < 0) {
+ DMERR("tm_create_with_sm failed");
+ return r;
+ }
+
+ __setup_mapping_info(cmd);
+
+ r = dm_array_empty(&cmd->info, &cmd->root);
+ if (r < 0)
+ goto bad;
+
+ dm_disk_bitset_init(cmd->tm, &cmd->discard_info);
+
+ r = dm_bitset_empty(&cmd->discard_info, &cmd->discard_root);
+ if (r < 0)
+ goto bad;
+
+ cmd->discard_block_size = 0;
+ cmd->discard_nr_blocks = 0;
+
+ r = __write_initial_superblock(cmd);
+ if (r)
+ goto bad;
+
+ cmd->clean_when_opened = true;
+ return 0;
+
+bad:
+ dm_tm_destroy(cmd->tm);
+ dm_sm_destroy(cmd->metadata_sm);
+
+ return r;
+}
+
+static int __check_incompat_features(struct cache_disk_superblock *disk_super,
+ struct dm_cache_metadata *cmd)
+{
+ uint32_t features;
+
+ features = le32_to_cpu(disk_super->incompat_flags) & ~DM_CACHE_FEATURE_INCOMPAT_SUPP;
+ if (features) {
+ DMERR("could not access metadata due to unsupported optional features (%lx).",
+ (unsigned long)features);
+ return -EINVAL;
+ }
+
+ /*
+ * Check for read-only metadata to skip the following RDWR checks.
+ */
+ if (get_disk_ro(cmd->bdev->bd_disk))
+ return 0;
+
+ features = le32_to_cpu(disk_super->compat_ro_flags) & ~DM_CACHE_FEATURE_COMPAT_RO_SUPP;
+ if (features) {
+ DMERR("could not access metadata RDWR due to unsupported optional features (%lx).",
+ (unsigned long)features);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int __open_metadata(struct dm_cache_metadata *cmd)
+{
+ int r;
+ struct dm_block *sblock;
+ struct cache_disk_superblock *disk_super;
+ unsigned long sb_flags;
+
+ r = superblock_read_lock(cmd, &sblock);
+ if (r < 0) {
+ DMERR("couldn't read lock superblock");
+ return r;
+ }
+
+ disk_super = dm_block_data(sblock);
+
+ r = __check_incompat_features(disk_super, cmd);
+ if (r < 0)
+ goto bad;
+
+ r = dm_tm_open_with_sm(cmd->bm, CACHE_SUPERBLOCK_LOCATION,
+ disk_super->metadata_space_map_root,
+ sizeof(disk_super->metadata_space_map_root),
+ &cmd->tm, &cmd->metadata_sm);
+ if (r < 0) {
+ DMERR("tm_open_with_sm failed");
+ goto bad;
+ }
+
+ __setup_mapping_info(cmd);
+ dm_disk_bitset_init(cmd->tm, &cmd->discard_info);
+ sb_flags = le32_to_cpu(disk_super->flags);
+ cmd->clean_when_opened = test_bit(CLEAN_SHUTDOWN, &sb_flags);
+ return dm_bm_unlock(sblock);
+
+bad:
+ dm_bm_unlock(sblock);
+ return r;
+}
+
+static int __open_or_format_metadata(struct dm_cache_metadata *cmd,
+ bool format_device)
+{
+ int r, unformatted;
+
+ r = __superblock_all_zeroes(cmd->bm, &unformatted);
+ if (r)
+ return r;
+
+ if (unformatted)
+ return format_device ? __format_metadata(cmd) : -EPERM;
+
+ return __open_metadata(cmd);
+}
+
+static int __create_persistent_data_objects(struct dm_cache_metadata *cmd,
+ bool may_format_device)
+{
+ int r;
+ cmd->bm = dm_block_manager_create(cmd->bdev, DM_CACHE_METADATA_BLOCK_SIZE,
+ CACHE_METADATA_CACHE_SIZE,
+ CACHE_MAX_CONCURRENT_LOCKS);
+ if (IS_ERR(cmd->bm)) {
+ DMERR("could not create block manager");
+ return PTR_ERR(cmd->bm);
+ }
+
+ r = __open_or_format_metadata(cmd, may_format_device);
+ if (r)
+ dm_block_manager_destroy(cmd->bm);
+
+ return r;
+}
+
+static void __destroy_persistent_data_objects(struct dm_cache_metadata *cmd)
+{
+ dm_sm_destroy(cmd->metadata_sm);
+ dm_tm_destroy(cmd->tm);
+ dm_block_manager_destroy(cmd->bm);
+}
+
+typedef unsigned long (*flags_mutator)(unsigned long);
+
+static void update_flags(struct cache_disk_superblock *disk_super,
+ flags_mutator mutator)
+{
+ uint32_t sb_flags = mutator(le32_to_cpu(disk_super->flags));
+ disk_super->flags = cpu_to_le32(sb_flags);
+}
+
+static unsigned long set_clean_shutdown(unsigned long flags)
+{
+ set_bit(CLEAN_SHUTDOWN, &flags);
+ return flags;
+}
+
+static unsigned long clear_clean_shutdown(unsigned long flags)
+{
+ clear_bit(CLEAN_SHUTDOWN, &flags);
+ return flags;
+}
+
+static void read_superblock_fields(struct dm_cache_metadata *cmd,
+ struct cache_disk_superblock *disk_super)
+{
+ cmd->root = le64_to_cpu(disk_super->mapping_root);
+ cmd->hint_root = le64_to_cpu(disk_super->hint_root);
+ cmd->discard_root = le64_to_cpu(disk_super->discard_root);
+ cmd->discard_block_size = le64_to_cpu(disk_super->discard_block_size);
+ cmd->discard_nr_blocks = to_dblock(le64_to_cpu(disk_super->discard_nr_blocks));
+ cmd->data_block_size = le32_to_cpu(disk_super->data_block_size);
+ cmd->cache_blocks = to_cblock(le32_to_cpu(disk_super->cache_blocks));
+ strncpy(cmd->policy_name, disk_super->policy_name, sizeof(cmd->policy_name));
+ cmd->policy_hint_size = le32_to_cpu(disk_super->policy_hint_size);
+
+ cmd->stats.read_hits = le32_to_cpu(disk_super->read_hits);
+ cmd->stats.read_misses = le32_to_cpu(disk_super->read_misses);
+ cmd->stats.write_hits = le32_to_cpu(disk_super->write_hits);
+ cmd->stats.write_misses = le32_to_cpu(disk_super->write_misses);
+
+ cmd->changed = false;
+}
+
+/*
+ * The mutator updates the superblock flags.
+ */
+static int __begin_transaction_flags(struct dm_cache_metadata *cmd,
+ flags_mutator mutator)
+{
+ int r;
+ struct cache_disk_superblock *disk_super;
+ struct dm_block *sblock;
+
+ r = superblock_lock(cmd, &sblock);
+ if (r)
+ return r;
+
+ disk_super = dm_block_data(sblock);
+ update_flags(disk_super, mutator);
+ read_superblock_fields(cmd, disk_super);
+
+ return dm_bm_flush_and_unlock(cmd->bm, sblock);
+}
+
+static int __begin_transaction(struct dm_cache_metadata *cmd)
+{
+ int r;
+ struct cache_disk_superblock *disk_super;
+ struct dm_block *sblock;
+
+ /*
+ * We re-read the superblock every time. Shouldn't need to do this
+ * really.
+ */
+ r = superblock_read_lock(cmd, &sblock);
+ if (r)
+ return r;
+
+ disk_super = dm_block_data(sblock);
+ read_superblock_fields(cmd, disk_super);
+ dm_bm_unlock(sblock);
+
+ return 0;
+}
+
+static int __commit_transaction(struct dm_cache_metadata *cmd,
+ flags_mutator mutator)
+{
+ int r;
+ size_t metadata_len;
+ struct cache_disk_superblock *disk_super;
+ struct dm_block *sblock;
+
+ /*
+ * We need to know if the cache_disk_superblock exceeds a 512-byte sector.
+ */
+ BUILD_BUG_ON(sizeof(struct cache_disk_superblock) > 512);
+
+ r = dm_bitset_flush(&cmd->discard_info, cmd->discard_root,
+ &cmd->discard_root);
+ if (r)
+ return r;
+
+ r = dm_tm_pre_commit(cmd->tm);
+ if (r < 0)
+ return r;
+
+ r = dm_sm_root_size(cmd->metadata_sm, &metadata_len);
+ if (r < 0)
+ return r;
+
+ r = superblock_lock(cmd, &sblock);
+ if (r)
+ return r;
+
+ disk_super = dm_block_data(sblock);
+
+ if (mutator)
+ update_flags(disk_super, mutator);
+
+ disk_super->mapping_root = cpu_to_le64(cmd->root);
+ disk_super->hint_root = cpu_to_le64(cmd->hint_root);
+ disk_super->discard_root = cpu_to_le64(cmd->discard_root);
+ disk_super->discard_block_size = cpu_to_le64(cmd->discard_block_size);
+ disk_super->discard_nr_blocks = cpu_to_le64(from_dblock(cmd->discard_nr_blocks));
+ disk_super->cache_blocks = cpu_to_le32(from_cblock(cmd->cache_blocks));
+ strncpy(disk_super->policy_name, cmd->policy_name, sizeof(disk_super->policy_name));
+
+ disk_super->read_hits = cpu_to_le32(cmd->stats.read_hits);
+ disk_super->read_misses = cpu_to_le32(cmd->stats.read_misses);
+ disk_super->write_hits = cpu_to_le32(cmd->stats.write_hits);
+ disk_super->write_misses = cpu_to_le32(cmd->stats.write_misses);
+
+ r = dm_sm_copy_root(cmd->metadata_sm, &disk_super->metadata_space_map_root,
+ metadata_len);
+ if (r < 0) {
+ dm_bm_unlock(sblock);
+ return r;
+ }
+
+ return dm_tm_commit(cmd->tm, sblock);
+}
+
+/*----------------------------------------------------------------*/
+
+/*
+ * The mappings are held in a dm-array that has 64-bit values stored in
+ * little-endian format. The index is the cblock, the high 48bits of the
+ * value are the oblock and the low 16 bit the flags.
+ */
+#define FLAGS_MASK ((1 << 16) - 1)
+
+static __le64 pack_value(dm_oblock_t block, unsigned flags)
+{
+ uint64_t value = from_oblock(block);
+ value <<= 16;
+ value = value | (flags & FLAGS_MASK);
+ return cpu_to_le64(value);
+}
+
+static void unpack_value(__le64 value_le, dm_oblock_t *block, unsigned *flags)
+{
+ uint64_t value = le64_to_cpu(value_le);
+ uint64_t b = value >> 16;
+ *block = to_oblock(b);
+ *flags = value & FLAGS_MASK;
+}
+
+/*----------------------------------------------------------------*/
+
+struct dm_cache_metadata *dm_cache_metadata_open(struct block_device *bdev,
+ sector_t data_block_size,
+ bool may_format_device,
+ size_t policy_hint_size)
+{
+ int r;
+ struct dm_cache_metadata *cmd;
+
+ cmd = kzalloc(sizeof(*cmd), GFP_KERNEL);
+ if (!cmd) {
+ DMERR("could not allocate metadata struct");
+ return NULL;
+ }
+
+ init_rwsem(&cmd->root_lock);
+ cmd->bdev = bdev;
+ cmd->data_block_size = data_block_size;
+ cmd->cache_blocks = 0;
+ cmd->policy_hint_size = policy_hint_size;
+ cmd->changed = true;
+
+ r = __create_persistent_data_objects(cmd, may_format_device);
+ if (r) {
+ kfree(cmd);
+ return ERR_PTR(r);
+ }
+
+ r = __begin_transaction_flags(cmd, clear_clean_shutdown);
+ if (r < 0) {
+ dm_cache_metadata_close(cmd);
+ return ERR_PTR(r);
+ }
+
+ return cmd;
+}
+
+void dm_cache_metadata_close(struct dm_cache_metadata *cmd)
+{
+ __destroy_persistent_data_objects(cmd);
+ kfree(cmd);
+}
+
+int dm_cache_resize(struct dm_cache_metadata *cmd, dm_cblock_t new_cache_size)
+{
+ int r;
+ __le64 null_mapping = pack_value(0, 0);
+
+ down_write(&cmd->root_lock);
+ __dm_bless_for_disk(&null_mapping);
+ r = dm_array_resize(&cmd->info, cmd->root, from_cblock(cmd->cache_blocks),
+ from_cblock(new_cache_size),
+ &null_mapping, &cmd->root);
+ if (!r)
+ cmd->cache_blocks = new_cache_size;
+ cmd->changed = true;
+ up_write(&cmd->root_lock);
+
+ return r;
+}
+
+int dm_cache_discard_bitset_resize(struct dm_cache_metadata *cmd,
+ sector_t discard_block_size,
+ dm_dblock_t new_nr_entries)
+{
+ int r;
+
+ down_write(&cmd->root_lock);
+ r = dm_bitset_resize(&cmd->discard_info,
+ cmd->discard_root,
+ from_dblock(cmd->discard_nr_blocks),
+ from_dblock(new_nr_entries),
+ false, &cmd->discard_root);
+ if (!r) {
+ cmd->discard_block_size = discard_block_size;
+ cmd->discard_nr_blocks = new_nr_entries;
+ }
+
+ cmd->changed = true;
+ up_write(&cmd->root_lock);
+
+ return r;
+}
+
+static int __set_discard(struct dm_cache_metadata *cmd, dm_dblock_t b)
+{
+ return dm_bitset_set_bit(&cmd->discard_info, cmd->discard_root,
+ from_dblock(b), &cmd->discard_root);
+}
+
+static int __clear_discard(struct dm_cache_metadata *cmd, dm_dblock_t b)
+{
+ return dm_bitset_clear_bit(&cmd->discard_info, cmd->discard_root,
+ from_dblock(b), &cmd->discard_root);
+}
+
+static int __is_discarded(struct dm_cache_metadata *cmd, dm_dblock_t b,
+ bool *is_discarded)
+{
+ return dm_bitset_test_bit(&cmd->discard_info, cmd->discard_root,
+ from_dblock(b), &cmd->discard_root,
+ is_discarded);
+}
+
+static int __discard(struct dm_cache_metadata *cmd,
+ dm_dblock_t dblock, bool discard)
+{
+ int r;
+
+ r = (discard ? __set_discard : __clear_discard)(cmd, dblock);
+ if (r)
+ return r;
+
+ cmd->changed = true;
+ return 0;
+}
+
+int dm_cache_set_discard(struct dm_cache_metadata *cmd,
+ dm_dblock_t dblock, bool discard)
+{
+ int r;
+
+ down_write(&cmd->root_lock);
+ r = __discard(cmd, dblock, discard);
+ up_write(&cmd->root_lock);
+
+ return r;
+}
+
+static int __load_discards(struct dm_cache_metadata *cmd,
+ load_discard_fn fn, void *context)
+{
+ int r = 0;
+ dm_block_t b;
+ bool discard;
+
+ for (b = 0; b < from_dblock(cmd->discard_nr_blocks); b++) {
+ dm_dblock_t dblock = to_dblock(b);
+
+ if (cmd->clean_when_opened) {
+ r = __is_discarded(cmd, dblock, &discard);
+ if (r)
+ return r;
+ } else
+ discard = false;
+
+ r = fn(context, cmd->discard_block_size, dblock, discard);
+ if (r)
+ break;
+ }
+
+ return r;
+}
+
+int dm_cache_load_discards(struct dm_cache_metadata *cmd,
+ load_discard_fn fn, void *context)
+{
+ int r;
+
+ down_read(&cmd->root_lock);
+ r = __load_discards(cmd, fn, context);
+ up_read(&cmd->root_lock);
+
+ return r;
+}
+
+dm_cblock_t dm_cache_size(struct dm_cache_metadata *cmd)
+{
+ dm_cblock_t r;
+
+ down_read(&cmd->root_lock);
+ r = cmd->cache_blocks;
+ up_read(&cmd->root_lock);
+
+ return r;
+}
+
+static int __remove(struct dm_cache_metadata *cmd, dm_cblock_t cblock)
+{
+ int r;
+ __le64 value = pack_value(0, 0);
+
+ __dm_bless_for_disk(&value);
+ r = dm_array_set_value(&cmd->info, cmd->root, from_cblock(cblock),
+ &value, &cmd->root);
+ if (r)
+ return r;
+
+ cmd->changed = true;
+ return 0;
+}
+
+int dm_cache_remove_mapping(struct dm_cache_metadata *cmd, dm_cblock_t cblock)
+{
+ int r;
+
+ down_write(&cmd->root_lock);
+ r = __remove(cmd, cblock);
+ up_write(&cmd->root_lock);
+
+ return r;
+}
+
+static int __insert(struct dm_cache_metadata *cmd,
+ dm_cblock_t cblock, dm_oblock_t oblock)
+{
+ int r;
+ __le64 value = pack_value(oblock, M_VALID);
+ __dm_bless_for_disk(&value);
+
+ r = dm_array_set_value(&cmd->info, cmd->root, from_cblock(cblock),
+ &value, &cmd->root);
+ if (r)
+ return r;
+
+ cmd->changed = true;
+ return 0;
+}
+
+int dm_cache_insert_mapping(struct dm_cache_metadata *cmd,
+ dm_cblock_t cblock, dm_oblock_t oblock)
+{
+ int r;
+
+ down_write(&cmd->root_lock);
+ r = __insert(cmd, cblock, oblock);
+ up_write(&cmd->root_lock);
+
+ return r;
+}
+
+struct thunk {
+ load_mapping_fn fn;
+ void *context;
+
+ struct dm_cache_metadata *cmd;
+ bool respect_dirty_flags;
+ bool hints_valid;
+};
+
+static bool hints_array_initialized(struct dm_cache_metadata *cmd)
+{
+ return cmd->hint_root && cmd->policy_hint_size;
+}
+
+static bool hints_array_available(struct dm_cache_metadata *cmd,
+ const char *policy_name)
+{
+ bool policy_names_match = !strncmp(cmd->policy_name, policy_name,
+ sizeof(cmd->policy_name));
+
+ return cmd->clean_when_opened && policy_names_match &&
+ hints_array_initialized(cmd);
+}
+
+static int __load_mapping(void *context, uint64_t cblock, void *leaf)
+{
+ int r = 0;
+ bool dirty;
+ __le64 value;
+ __le32 hint_value = 0;
+ dm_oblock_t oblock;
+ unsigned flags;
+ struct thunk *thunk = context;
+ struct dm_cache_metadata *cmd = thunk->cmd;
+
+ memcpy(&value, leaf, sizeof(value));
+ unpack_value(value, &oblock, &flags);
+
+ if (flags & M_VALID) {
+ if (thunk->hints_valid) {
+ r = dm_array_get_value(&cmd->hint_info, cmd->hint_root,
+ cblock, &hint_value);
+ if (r && r != -ENODATA)
+ return r;
+ }
+
+ dirty = thunk->respect_dirty_flags ? (flags & M_DIRTY) : true;
+ r = thunk->fn(thunk->context, oblock, to_cblock(cblock),
+ dirty, le32_to_cpu(hint_value), thunk->hints_valid);
+ }
+
+ return r;
+}
+
+static int __load_mappings(struct dm_cache_metadata *cmd, const char *policy_name,
+ load_mapping_fn fn, void *context)
+{
+ struct thunk thunk;
+
+ thunk.fn = fn;
+ thunk.context = context;
+
+ thunk.cmd = cmd;
+ thunk.respect_dirty_flags = cmd->clean_when_opened;
+ thunk.hints_valid = hints_array_available(cmd, policy_name);
+
+ return dm_array_walk(&cmd->info, cmd->root, __load_mapping, &thunk);
+}
+
+int dm_cache_load_mappings(struct dm_cache_metadata *cmd, const char *policy_name,
+ load_mapping_fn fn, void *context)
+{
+ int r;
+
+ down_read(&cmd->root_lock);
+ r = __load_mappings(cmd, policy_name, fn, context);
+ up_read(&cmd->root_lock);
+
+ return r;
+}
+
+static int __dump_mapping(void *context, uint64_t cblock, void *leaf)
+{
+ int r = 0;
+ __le64 value;
+ dm_oblock_t oblock;
+ unsigned flags;
+
+ memcpy(&value, leaf, sizeof(value));
+ unpack_value(value, &oblock, &flags);
+
+ return r;
+}
+
+static int __dump_mappings(struct dm_cache_metadata *cmd)
+{
+ return dm_array_walk(&cmd->info, cmd->root, __dump_mapping, NULL);
+}
+
+void dm_cache_dump(struct dm_cache_metadata *cmd)
+{
+ down_read(&cmd->root_lock);
+ __dump_mappings(cmd);
+ up_read(&cmd->root_lock);
+}
+
+int dm_cache_changed_this_transaction(struct dm_cache_metadata *cmd)
+{
+ int r;
+
+ down_read(&cmd->root_lock);
+ r = cmd->changed;
+ up_read(&cmd->root_lock);
+
+ return r;
+}
+
+static int __dirty(struct dm_cache_metadata *cmd, dm_cblock_t cblock, bool dirty)
+{
+ int r;
+ unsigned flags;
+ dm_oblock_t oblock;
+ __le64 value;
+
+ r = dm_array_get_value(&cmd->info, cmd->root, from_cblock(cblock), &value);
+ if (r)
+ return r;
+
+ unpack_value(value, &oblock, &flags);
+
+ if (((flags & M_DIRTY) && dirty) || (!(flags & M_DIRTY) && !dirty))
+ /* nothing to be done */
+ return 0;
+
+ value = pack_value(oblock, flags | (dirty ? M_DIRTY : 0));
+ __dm_bless_for_disk(&value);
+
+ r = dm_array_set_value(&cmd->info, cmd->root, from_cblock(cblock),
+ &value, &cmd->root);
+ if (r)
+ return r;
+
+ cmd->changed = true;
+ return 0;
+
+}
+
+int dm_cache_set_dirty(struct dm_cache_metadata *cmd,
+ dm_cblock_t cblock, bool dirty)
+{
+ int r;
+
+ down_write(&cmd->root_lock);
+ r = __dirty(cmd, cblock, dirty);
+ up_write(&cmd->root_lock);
+
+ return r;
+}
+
+void dm_cache_metadata_get_stats(struct dm_cache_metadata *cmd,
+ struct dm_cache_statistics *stats)
+{
+ down_read(&cmd->root_lock);
+ memcpy(stats, &cmd->stats, sizeof(*stats));
+ up_read(&cmd->root_lock);
+}
+
+void dm_cache_metadata_set_stats(struct dm_cache_metadata *cmd,
+ struct dm_cache_statistics *stats)
+{
+ down_write(&cmd->root_lock);
+ memcpy(&cmd->stats, stats, sizeof(*stats));
+ up_write(&cmd->root_lock);
+}
+
+int dm_cache_commit(struct dm_cache_metadata *cmd, bool clean_shutdown)
+{
+ int r;
+ flags_mutator mutator = (clean_shutdown ? set_clean_shutdown :
+ clear_clean_shutdown);
+
+ down_write(&cmd->root_lock);
+ r = __commit_transaction(cmd, mutator);
+ if (r)
+ goto out;
+
+ r = __begin_transaction(cmd);
+
+out:
+ up_write(&cmd->root_lock);
+ return r;
+}
+
+int dm_cache_get_free_metadata_block_count(struct dm_cache_metadata *cmd,
+ dm_block_t *result)
+{
+ int r = -EINVAL;
+
+ down_read(&cmd->root_lock);
+ r = dm_sm_get_nr_free(cmd->metadata_sm, result);
+ up_read(&cmd->root_lock);
+
+ return r;
+}
+
+int dm_cache_get_metadata_dev_size(struct dm_cache_metadata *cmd,
+ dm_block_t *result)
+{
+ int r = -EINVAL;
+
+ down_read(&cmd->root_lock);
+ r = dm_sm_get_nr_blocks(cmd->metadata_sm, result);
+ up_read(&cmd->root_lock);
+
+ return r;
+}
+
+/*----------------------------------------------------------------*/
+
+static int begin_hints(struct dm_cache_metadata *cmd, struct dm_cache_policy *policy)
+{
+ int r;
+ __le32 value;
+ size_t hint_size;
+ const char *policy_name = dm_cache_policy_get_name(policy);
+
+ if (!policy_name[0] ||
+ (strlen(policy_name) > sizeof(cmd->policy_name) - 1))
+ return -EINVAL;
+
+ if (strcmp(cmd->policy_name, policy_name)) {
+ strncpy(cmd->policy_name, policy_name, sizeof(cmd->policy_name));
+
+ hint_size = dm_cache_policy_get_hint_size(policy);
+ if (!hint_size)
+ return 0; /* short-circuit hints initialization */
+ cmd->policy_hint_size = hint_size;
+
+ if (cmd->hint_root) {
+ r = dm_array_del(&cmd->hint_info, cmd->hint_root);
+ if (r)
+ return r;
+ }
+
+ r = dm_array_empty(&cmd->hint_info, &cmd->hint_root);
+ if (r)
+ return r;
+
+ value = cpu_to_le32(0);
+ __dm_bless_for_disk(&value);
+ r = dm_array_resize(&cmd->hint_info, cmd->hint_root, 0,
+ from_cblock(cmd->cache_blocks),
+ &value, &cmd->hint_root);
+ if (r)
+ return r;
+ }
+
+ return 0;
+}
+
+int dm_cache_begin_hints(struct dm_cache_metadata *cmd, struct dm_cache_policy *policy)
+{
+ int r;
+
+ down_write(&cmd->root_lock);
+ r = begin_hints(cmd, policy);
+ up_write(&cmd->root_lock);
+
+ return r;
+}
+
+static int save_hint(struct dm_cache_metadata *cmd, dm_cblock_t cblock,
+ uint32_t hint)
+{
+ int r;
+ __le32 value = cpu_to_le32(hint);
+ __dm_bless_for_disk(&value);
+
+ r = dm_array_set_value(&cmd->hint_info, cmd->hint_root,
+ from_cblock(cblock), &value, &cmd->hint_root);
+ cmd->changed = true;
+
+ return r;
+}
+
+int dm_cache_save_hint(struct dm_cache_metadata *cmd, dm_cblock_t cblock,
+ uint32_t hint)
+{
+ int r;
+
+ if (!hints_array_initialized(cmd))
+ return 0;
+
+ down_write(&cmd->root_lock);
+ r = save_hint(cmd, cblock, hint);
+ up_write(&cmd->root_lock);
+
+ return r;
+}
diff --git a/drivers/md/dm-cache-metadata.h b/drivers/md/dm-cache-metadata.h
new file mode 100644
index 000000000000..135864ea0eee
--- /dev/null
+++ b/drivers/md/dm-cache-metadata.h
@@ -0,0 +1,142 @@
+/*
+ * Copyright (C) 2012 Red Hat, Inc.
+ *
+ * This file is released under the GPL.
+ */
+
+#ifndef DM_CACHE_METADATA_H
+#define DM_CACHE_METADATA_H
+
+#include "dm-cache-block-types.h"
+#include "dm-cache-policy-internal.h"
+
+/*----------------------------------------------------------------*/
+
+#define DM_CACHE_METADATA_BLOCK_SIZE 4096
+
+/* FIXME: remove this restriction */
+/*
+ * The metadata device is currently limited in size.
+ *
+ * We have one block of index, which can hold 255 index entries. Each
+ * index entry contains allocation info about 16k metadata blocks.
+ */
+#define DM_CACHE_METADATA_MAX_SECTORS (255 * (1 << 14) * (DM_CACHE_METADATA_BLOCK_SIZE / (1 << SECTOR_SHIFT)))
+
+/*
+ * A metadata device larger than 16GB triggers a warning.
+ */
+#define DM_CACHE_METADATA_MAX_SECTORS_WARNING (16 * (1024 * 1024 * 1024 >> SECTOR_SHIFT))
+
+/*----------------------------------------------------------------*/
+
+/*
+ * Ext[234]-style compat feature flags.
+ *
+ * A new feature which old metadata will still be compatible with should
+ * define a DM_CACHE_FEATURE_COMPAT_* flag (rarely useful).
+ *
+ * A new feature that is not compatible with old code should define a
+ * DM_CACHE_FEATURE_INCOMPAT_* flag and guard the relevant code with
+ * that flag.
+ *
+ * A new feature that is not compatible with old code accessing the
+ * metadata RDWR should define a DM_CACHE_FEATURE_RO_COMPAT_* flag and
+ * guard the relevant code with that flag.
+ *
+ * As these various flags are defined they should be added to the
+ * following masks.
+ */
+#define DM_CACHE_FEATURE_COMPAT_SUPP 0UL
+#define DM_CACHE_FEATURE_COMPAT_RO_SUPP 0UL
+#define DM_CACHE_FEATURE_INCOMPAT_SUPP 0UL
+
+/*
+ * Reopens or creates a new, empty metadata volume.
+ * Returns an ERR_PTR on failure.
+ */
+struct dm_cache_metadata *dm_cache_metadata_open(struct block_device *bdev,
+ sector_t data_block_size,
+ bool may_format_device,
+ size_t policy_hint_size);
+
+void dm_cache_metadata_close(struct dm_cache_metadata *cmd);
+
+/*
+ * The metadata needs to know how many cache blocks there are. We don't
+ * care about the origin, assuming the core target is giving us valid
+ * origin blocks to map to.
+ */
+int dm_cache_resize(struct dm_cache_metadata *cmd, dm_cblock_t new_cache_size);
+dm_cblock_t dm_cache_size(struct dm_cache_metadata *cmd);
+
+int dm_cache_discard_bitset_resize(struct dm_cache_metadata *cmd,
+ sector_t discard_block_size,
+ dm_dblock_t new_nr_entries);
+
+typedef int (*load_discard_fn)(void *context, sector_t discard_block_size,
+ dm_dblock_t dblock, bool discarded);
+int dm_cache_load_discards(struct dm_cache_metadata *cmd,
+ load_discard_fn fn, void *context);
+
+int dm_cache_set_discard(struct dm_cache_metadata *cmd, dm_dblock_t dblock, bool discard);
+
+int dm_cache_remove_mapping(struct dm_cache_metadata *cmd, dm_cblock_t cblock);
+int dm_cache_insert_mapping(struct dm_cache_metadata *cmd, dm_cblock_t cblock, dm_oblock_t oblock);
+int dm_cache_changed_this_transaction(struct dm_cache_metadata *cmd);
+
+typedef int (*load_mapping_fn)(void *context, dm_oblock_t oblock,
+ dm_cblock_t cblock, bool dirty,
+ uint32_t hint, bool hint_valid);
+int dm_cache_load_mappings(struct dm_cache_metadata *cmd,
+ const char *policy_name,
+ load_mapping_fn fn,
+ void *context);
+
+int dm_cache_set_dirty(struct dm_cache_metadata *cmd, dm_cblock_t cblock, bool dirty);
+
+struct dm_cache_statistics {
+ uint32_t read_hits;
+ uint32_t read_misses;
+ uint32_t write_hits;
+ uint32_t write_misses;
+};
+
+void dm_cache_metadata_get_stats(struct dm_cache_metadata *cmd,
+ struct dm_cache_statistics *stats);
+void dm_cache_metadata_set_stats(struct dm_cache_metadata *cmd,
+ struct dm_cache_statistics *stats);
+
+int dm_cache_commit(struct dm_cache_metadata *cmd, bool clean_shutdown);
+
+int dm_cache_get_free_metadata_block_count(struct dm_cache_metadata *cmd,
+ dm_block_t *result);
+
+int dm_cache_get_metadata_dev_size(struct dm_cache_metadata *cmd,
+ dm_block_t *result);
+
+void dm_cache_dump(struct dm_cache_metadata *cmd);
+
+/*
+ * The policy is invited to save a 32bit hint value for every cblock (eg,
+ * for a hit count). These are stored against the policy name. If
+ * policies are changed, then hints will be lost. If the machine crashes,
+ * hints will be lost.
+ *
+ * The hints are indexed by the cblock, but many policies will not
+ * neccessarily have a fast way of accessing efficiently via cblock. So
+ * rather than querying the policy for each cblock, we let it walk its data
+ * structures and fill in the hints in whatever order it wishes.
+ */
+
+int dm_cache_begin_hints(struct dm_cache_metadata *cmd, struct dm_cache_policy *p);
+
+/*
+ * requests hints for every cblock and stores in the metadata device.
+ */
+int dm_cache_save_hint(struct dm_cache_metadata *cmd,
+ dm_cblock_t cblock, uint32_t hint);
+
+/*----------------------------------------------------------------*/
+
+#endif /* DM_CACHE_METADATA_H */
diff --git a/drivers/md/dm-cache-policy-cleaner.c b/drivers/md/dm-cache-policy-cleaner.c
new file mode 100644
index 000000000000..cc05d70b3cb8
--- /dev/null
+++ b/drivers/md/dm-cache-policy-cleaner.c
@@ -0,0 +1,464 @@
+/*
+ * Copyright (C) 2012 Red Hat. All rights reserved.
+ *
+ * writeback cache policy supporting flushing out dirty cache blocks.
+ *
+ * This file is released under the GPL.
+ */
+
+#include "dm-cache-policy.h"
+#include "dm.h"
+
+#include <linux/hash.h>
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/vmalloc.h>
+
+/*----------------------------------------------------------------*/
+
+#define DM_MSG_PREFIX "cache cleaner"
+#define CLEANER_VERSION "1.0.0"
+
+/* Cache entry struct. */
+struct wb_cache_entry {
+ struct list_head list;
+ struct hlist_node hlist;
+
+ dm_oblock_t oblock;
+ dm_cblock_t cblock;
+ bool dirty:1;
+ bool pending:1;
+};
+
+struct hash {
+ struct hlist_head *table;
+ dm_block_t hash_bits;
+ unsigned nr_buckets;
+};
+
+struct policy {
+ struct dm_cache_policy policy;
+ spinlock_t lock;
+
+ struct list_head free;
+ struct list_head clean;
+ struct list_head clean_pending;
+ struct list_head dirty;
+
+ /*
+ * We know exactly how many cblocks will be needed,
+ * so we can allocate them up front.
+ */
+ dm_cblock_t cache_size, nr_cblocks_allocated;
+ struct wb_cache_entry *cblocks;
+ struct hash chash;
+};
+
+/*----------------------------------------------------------------------------*/
+
+/*
+ * Low-level functions.
+ */
+static unsigned next_power(unsigned n, unsigned min)
+{
+ return roundup_pow_of_two(max(n, min));
+}
+
+static struct policy *to_policy(struct dm_cache_policy *p)
+{
+ return container_of(p, struct policy, policy);
+}
+
+static struct list_head *list_pop(struct list_head *q)
+{
+ struct list_head *r = q->next;
+
+ list_del(r);
+
+ return r;
+}
+
+/*----------------------------------------------------------------------------*/
+
+/* Allocate/free various resources. */
+static int alloc_hash(struct hash *hash, unsigned elts)
+{
+ hash->nr_buckets = next_power(elts >> 4, 16);
+ hash->hash_bits = ffs(hash->nr_buckets) - 1;
+ hash->table = vzalloc(sizeof(*hash->table) * hash->nr_buckets);
+
+ return hash->table ? 0 : -ENOMEM;
+}
+
+static void free_hash(struct hash *hash)
+{
+ vfree(hash->table);
+}
+
+static int alloc_cache_blocks_with_hash(struct policy *p, dm_cblock_t cache_size)
+{
+ int r = -ENOMEM;
+
+ p->cblocks = vzalloc(sizeof(*p->cblocks) * from_cblock(cache_size));
+ if (p->cblocks) {
+ unsigned u = from_cblock(cache_size);
+
+ while (u--)
+ list_add(&p->cblocks[u].list, &p->free);
+
+ p->nr_cblocks_allocated = 0;
+
+ /* Cache entries hash. */
+ r = alloc_hash(&p->chash, from_cblock(cache_size));
+ if (r)
+ vfree(p->cblocks);
+ }
+
+ return r;
+}
+
+static void free_cache_blocks_and_hash(struct policy *p)
+{
+ free_hash(&p->chash);
+ vfree(p->cblocks);
+}
+
+static struct wb_cache_entry *alloc_cache_entry(struct policy *p)
+{
+ struct wb_cache_entry *e;
+
+ BUG_ON(from_cblock(p->nr_cblocks_allocated) >= from_cblock(p->cache_size));
+
+ e = list_entry(list_pop(&p->free), struct wb_cache_entry, list);
+ p->nr_cblocks_allocated = to_cblock(from_cblock(p->nr_cblocks_allocated) + 1);
+
+ return e;
+}
+
+/*----------------------------------------------------------------------------*/
+
+/* Hash functions (lookup, insert, remove). */
+static struct wb_cache_entry *lookup_cache_entry(struct policy *p, dm_oblock_t oblock)
+{
+ struct hash *hash = &p->chash;
+ unsigned h = hash_64(from_oblock(oblock), hash->hash_bits);
+ struct wb_cache_entry *cur;
+ struct hlist_head *bucket = &hash->table[h];
+
+ hlist_for_each_entry(cur, bucket, hlist) {
+ if (cur->oblock == oblock) {
+ /* Move upfront bucket for faster access. */
+ hlist_del(&cur->hlist);
+ hlist_add_head(&cur->hlist, bucket);
+ return cur;
+ }
+ }
+
+ return NULL;
+}
+
+static void insert_cache_hash_entry(struct policy *p, struct wb_cache_entry *e)
+{
+ unsigned h = hash_64(from_oblock(e->oblock), p->chash.hash_bits);
+
+ hlist_add_head(&e->hlist, &p->chash.table[h]);
+}
+
+static void remove_cache_hash_entry(struct wb_cache_entry *e)
+{
+ hlist_del(&e->hlist);
+}
+
+/* Public interface (see dm-cache-policy.h */
+static int wb_map(struct dm_cache_policy *pe, dm_oblock_t oblock,
+ bool can_block, bool can_migrate, bool discarded_oblock,
+ struct bio *bio, struct policy_result *result)
+{
+ struct policy *p = to_policy(pe);
+ struct wb_cache_entry *e;
+ unsigned long flags;
+
+ result->op = POLICY_MISS;
+
+ if (can_block)
+ spin_lock_irqsave(&p->lock, flags);
+
+ else if (!spin_trylock_irqsave(&p->lock, flags))
+ return -EWOULDBLOCK;
+
+ e = lookup_cache_entry(p, oblock);
+ if (e) {
+ result->op = POLICY_HIT;
+ result->cblock = e->cblock;
+
+ }
+
+ spin_unlock_irqrestore(&p->lock, flags);
+
+ return 0;
+}
+
+static int wb_lookup(struct dm_cache_policy *pe, dm_oblock_t oblock, dm_cblock_t *cblock)
+{
+ int r;
+ struct policy *p = to_policy(pe);
+ struct wb_cache_entry *e;
+ unsigned long flags;
+
+ if (!spin_trylock_irqsave(&p->lock, flags))
+ return -EWOULDBLOCK;
+
+ e = lookup_cache_entry(p, oblock);
+ if (e) {
+ *cblock = e->cblock;
+ r = 0;
+
+ } else
+ r = -ENOENT;
+
+ spin_unlock_irqrestore(&p->lock, flags);
+
+ return r;
+}
+
+static void __set_clear_dirty(struct dm_cache_policy *pe, dm_oblock_t oblock, bool set)
+{
+ struct policy *p = to_policy(pe);
+ struct wb_cache_entry *e;
+
+ e = lookup_cache_entry(p, oblock);
+ BUG_ON(!e);
+
+ if (set) {
+ if (!e->dirty) {
+ e->dirty = true;
+ list_move(&e->list, &p->dirty);
+ }
+
+ } else {
+ if (e->dirty) {
+ e->pending = false;
+ e->dirty = false;
+ list_move(&e->list, &p->clean);
+ }
+ }
+}
+
+static void wb_set_dirty(struct dm_cache_policy *pe, dm_oblock_t oblock)
+{
+ struct policy *p = to_policy(pe);
+ unsigned long flags;
+
+ spin_lock_irqsave(&p->lock, flags);
+ __set_clear_dirty(pe, oblock, true);
+ spin_unlock_irqrestore(&p->lock, flags);
+}
+
+static void wb_clear_dirty(struct dm_cache_policy *pe, dm_oblock_t oblock)
+{
+ struct policy *p = to_policy(pe);
+ unsigned long flags;
+
+ spin_lock_irqsave(&p->lock, flags);
+ __set_clear_dirty(pe, oblock, false);
+ spin_unlock_irqrestore(&p->lock, flags);
+}
+
+static void add_cache_entry(struct policy *p, struct wb_cache_entry *e)
+{
+ insert_cache_hash_entry(p, e);
+ if (e->dirty)
+ list_add(&e->list, &p->dirty);
+ else
+ list_add(&e->list, &p->clean);
+}
+
+static int wb_load_mapping(struct dm_cache_policy *pe,
+ dm_oblock_t oblock, dm_cblock_t cblock,
+ uint32_t hint, bool hint_valid)
+{
+ int r;
+ struct policy *p = to_policy(pe);
+ struct wb_cache_entry *e = alloc_cache_entry(p);
+
+ if (e) {
+ e->cblock = cblock;
+ e->oblock = oblock;
+ e->dirty = false; /* blocks default to clean */
+ add_cache_entry(p, e);
+ r = 0;
+
+ } else
+ r = -ENOMEM;
+
+ return r;
+}
+
+static void wb_destroy(struct dm_cache_policy *pe)
+{
+ struct policy *p = to_policy(pe);
+
+ free_cache_blocks_and_hash(p);
+ kfree(p);
+}
+
+static struct wb_cache_entry *__wb_force_remove_mapping(struct policy *p, dm_oblock_t oblock)
+{
+ struct wb_cache_entry *r = lookup_cache_entry(p, oblock);
+
+ BUG_ON(!r);
+
+ remove_cache_hash_entry(r);
+ list_del(&r->list);
+
+ return r;
+}
+
+static void wb_remove_mapping(struct dm_cache_policy *pe, dm_oblock_t oblock)
+{
+ struct policy *p = to_policy(pe);
+ struct wb_cache_entry *e;
+ unsigned long flags;
+
+ spin_lock_irqsave(&p->lock, flags);
+ e = __wb_force_remove_mapping(p, oblock);
+ list_add_tail(&e->list, &p->free);
+ BUG_ON(!from_cblock(p->nr_cblocks_allocated));
+ p->nr_cblocks_allocated = to_cblock(from_cblock(p->nr_cblocks_allocated) - 1);
+ spin_unlock_irqrestore(&p->lock, flags);
+}
+
+static void wb_force_mapping(struct dm_cache_policy *pe,
+ dm_oblock_t current_oblock, dm_oblock_t oblock)
+{
+ struct policy *p = to_policy(pe);
+ struct wb_cache_entry *e;
+ unsigned long flags;
+
+ spin_lock_irqsave(&p->lock, flags);
+ e = __wb_force_remove_mapping(p, current_oblock);
+ e->oblock = oblock;
+ add_cache_entry(p, e);
+ spin_unlock_irqrestore(&p->lock, flags);
+}
+
+static struct wb_cache_entry *get_next_dirty_entry(struct policy *p)
+{
+ struct list_head *l;
+ struct wb_cache_entry *r;
+
+ if (list_empty(&p->dirty))
+ return NULL;
+
+ l = list_pop(&p->dirty);
+ r = container_of(l, struct wb_cache_entry, list);
+ list_add(l, &p->clean_pending);
+
+ return r;
+}
+
+static int wb_writeback_work(struct dm_cache_policy *pe,
+ dm_oblock_t *oblock,
+ dm_cblock_t *cblock)
+{
+ int r = -ENOENT;
+ struct policy *p = to_policy(pe);
+ struct wb_cache_entry *e;
+ unsigned long flags;
+
+ spin_lock_irqsave(&p->lock, flags);
+
+ e = get_next_dirty_entry(p);
+ if (e) {
+ *oblock = e->oblock;
+ *cblock = e->cblock;
+ r = 0;
+ }
+
+ spin_unlock_irqrestore(&p->lock, flags);
+
+ return r;
+}
+
+static dm_cblock_t wb_residency(struct dm_cache_policy *pe)
+{
+ return to_policy(pe)->nr_cblocks_allocated;
+}
+
+/* Init the policy plugin interface function pointers. */
+static void init_policy_functions(struct policy *p)
+{
+ p->policy.destroy = wb_destroy;
+ p->policy.map = wb_map;
+ p->policy.lookup = wb_lookup;
+ p->policy.set_dirty = wb_set_dirty;
+ p->policy.clear_dirty = wb_clear_dirty;
+ p->policy.load_mapping = wb_load_mapping;
+ p->policy.walk_mappings = NULL;
+ p->policy.remove_mapping = wb_remove_mapping;
+ p->policy.writeback_work = wb_writeback_work;
+ p->policy.force_mapping = wb_force_mapping;
+ p->policy.residency = wb_residency;
+ p->policy.tick = NULL;
+}
+
+static struct dm_cache_policy *wb_create(dm_cblock_t cache_size,
+ sector_t origin_size,
+ sector_t cache_block_size)
+{
+ int r;
+ struct policy *p = kzalloc(sizeof(*p), GFP_KERNEL);
+
+ if (!p)
+ return NULL;
+
+ init_policy_functions(p);
+ INIT_LIST_HEAD(&p->free);
+ INIT_LIST_HEAD(&p->clean);
+ INIT_LIST_HEAD(&p->clean_pending);
+ INIT_LIST_HEAD(&p->dirty);
+
+ p->cache_size = cache_size;
+ spin_lock_init(&p->lock);
+
+ /* Allocate cache entry structs and add them to free list. */
+ r = alloc_cache_blocks_with_hash(p, cache_size);
+ if (!r)
+ return &p->policy;
+
+ kfree(p);
+
+ return NULL;
+}
+/*----------------------------------------------------------------------------*/
+
+static struct dm_cache_policy_type wb_policy_type = {
+ .name = "cleaner",
+ .hint_size = 0,
+ .owner = THIS_MODULE,
+ .create = wb_create
+};
+
+static int __init wb_init(void)
+{
+ int r = dm_cache_policy_register(&wb_policy_type);
+
+ if (r < 0)
+ DMERR("register failed %d", r);
+ else
+ DMINFO("version " CLEANER_VERSION " loaded");
+
+ return r;
+}
+
+static void __exit wb_exit(void)
+{
+ dm_cache_policy_unregister(&wb_policy_type);
+}
+
+module_init(wb_init);
+module_exit(wb_exit);
+
+MODULE_AUTHOR("Heinz Mauelshagen <dm-devel@redhat.com>");
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("cleaner cache policy");
diff --git a/drivers/md/dm-cache-policy-internal.h b/drivers/md/dm-cache-policy-internal.h
new file mode 100644
index 000000000000..52a75beeced5
--- /dev/null
+++ b/drivers/md/dm-cache-policy-internal.h
@@ -0,0 +1,124 @@
+/*
+ * Copyright (C) 2012 Red Hat. All rights reserved.
+ *
+ * This file is released under the GPL.
+ */
+
+#ifndef DM_CACHE_POLICY_INTERNAL_H
+#define DM_CACHE_POLICY_INTERNAL_H
+
+#include "dm-cache-policy.h"
+
+/*----------------------------------------------------------------*/
+
+/*
+ * Little inline functions that simplify calling the policy methods.
+ */
+static inline int policy_map(struct dm_cache_policy *p, dm_oblock_t oblock,
+ bool can_block, bool can_migrate, bool discarded_oblock,
+ struct bio *bio, struct policy_result *result)
+{
+ return p->map(p, oblock, can_block, can_migrate, discarded_oblock, bio, result);
+}
+
+static inline int policy_lookup(struct dm_cache_policy *p, dm_oblock_t oblock, dm_cblock_t *cblock)
+{
+ BUG_ON(!p->lookup);
+ return p->lookup(p, oblock, cblock);
+}
+
+static inline void policy_set_dirty(struct dm_cache_policy *p, dm_oblock_t oblock)
+{
+ if (p->set_dirty)
+ p->set_dirty(p, oblock);
+}
+
+static inline void policy_clear_dirty(struct dm_cache_policy *p, dm_oblock_t oblock)
+{
+ if (p->clear_dirty)
+ p->clear_dirty(p, oblock);
+}
+
+static inline int policy_load_mapping(struct dm_cache_policy *p,
+ dm_oblock_t oblock, dm_cblock_t cblock,
+ uint32_t hint, bool hint_valid)
+{
+ return p->load_mapping(p, oblock, cblock, hint, hint_valid);
+}
+
+static inline int policy_walk_mappings(struct dm_cache_policy *p,
+ policy_walk_fn fn, void *context)
+{
+ return p->walk_mappings ? p->walk_mappings(p, fn, context) : 0;
+}
+
+static inline int policy_writeback_work(struct dm_cache_policy *p,
+ dm_oblock_t *oblock,
+ dm_cblock_t *cblock)
+{
+ return p->writeback_work ? p->writeback_work(p, oblock, cblock) : -ENOENT;
+}
+
+static inline void policy_remove_mapping(struct dm_cache_policy *p, dm_oblock_t oblock)
+{
+ return p->remove_mapping(p, oblock);
+}
+
+static inline void policy_force_mapping(struct dm_cache_policy *p,
+ dm_oblock_t current_oblock, dm_oblock_t new_oblock)
+{
+ return p->force_mapping(p, current_oblock, new_oblock);
+}
+
+static inline dm_cblock_t policy_residency(struct dm_cache_policy *p)
+{
+ return p->residency(p);
+}
+
+static inline void policy_tick(struct dm_cache_policy *p)
+{
+ if (p->tick)
+ return p->tick(p);
+}
+
+static inline int policy_emit_config_values(struct dm_cache_policy *p, char *result, unsigned maxlen)
+{
+ ssize_t sz = 0;
+ if (p->emit_config_values)
+ return p->emit_config_values(p, result, maxlen);
+
+ DMEMIT("0");
+ return 0;
+}
+
+static inline int policy_set_config_value(struct dm_cache_policy *p,
+ const char *key, const char *value)
+{
+ return p->set_config_value ? p->set_config_value(p, key, value) : -EINVAL;
+}
+
+/*----------------------------------------------------------------*/
+
+/*
+ * Creates a new cache policy given a policy name, a cache size, an origin size and the block size.
+ */
+struct dm_cache_policy *dm_cache_policy_create(const char *name, dm_cblock_t cache_size,
+ sector_t origin_size, sector_t block_size);
+
+/*
+ * Destroys the policy. This drops references to the policy module as well
+ * as calling it's destroy method. So always use this rather than calling
+ * the policy->destroy method directly.
+ */
+void dm_cache_policy_destroy(struct dm_cache_policy *p);
+
+/*
+ * In case we've forgotten.
+ */
+const char *dm_cache_policy_get_name(struct dm_cache_policy *p);
+
+size_t dm_cache_policy_get_hint_size(struct dm_cache_policy *p);
+
+/*----------------------------------------------------------------*/
+
+#endif /* DM_CACHE_POLICY_INTERNAL_H */
diff --git a/drivers/md/dm-cache-policy-mq.c b/drivers/md/dm-cache-policy-mq.c
new file mode 100644
index 000000000000..964153255076
--- /dev/null
+++ b/drivers/md/dm-cache-policy-mq.c
@@ -0,0 +1,1195 @@
+/*
+ * Copyright (C) 2012 Red Hat. All rights reserved.
+ *
+ * This file is released under the GPL.
+ */
+
+#include "dm-cache-policy.h"
+#include "dm.h"
+
+#include <linux/hash.h>
+#include <linux/module.h>
+#include <linux/mutex.h>
+#include <linux/slab.h>
+#include <linux/vmalloc.h>
+
+#define DM_MSG_PREFIX "cache-policy-mq"
+#define MQ_VERSION "1.0.0"
+
+static struct kmem_cache *mq_entry_cache;
+
+/*----------------------------------------------------------------*/
+
+static unsigned next_power(unsigned n, unsigned min)
+{
+ return roundup_pow_of_two(max(n, min));
+}
+
+/*----------------------------------------------------------------*/
+
+static unsigned long *alloc_bitset(unsigned nr_entries)
+{
+ size_t s = sizeof(unsigned long) * dm_div_up(nr_entries, BITS_PER_LONG);
+ return vzalloc(s);
+}
+
+static void free_bitset(unsigned long *bits)
+{
+ vfree(bits);
+}
+
+/*----------------------------------------------------------------*/
+
+/*
+ * Large, sequential ios are probably better left on the origin device since
+ * spindles tend to have good bandwidth.
+ *
+ * The io_tracker tries to spot when the io is in one of these sequential
+ * modes.
+ *
+ * Two thresholds to switch between random and sequential io mode are defaulting
+ * as follows and can be adjusted via the constructor and message interfaces.
+ */
+#define RANDOM_THRESHOLD_DEFAULT 4
+#define SEQUENTIAL_THRESHOLD_DEFAULT 512
+
+enum io_pattern {
+ PATTERN_SEQUENTIAL,
+ PATTERN_RANDOM
+};
+
+struct io_tracker {
+ enum io_pattern pattern;
+
+ unsigned nr_seq_samples;
+ unsigned nr_rand_samples;
+ unsigned thresholds[2];
+
+ dm_oblock_t last_end_oblock;
+};
+
+static void iot_init(struct io_tracker *t,
+ int sequential_threshold, int random_threshold)
+{
+ t->pattern = PATTERN_RANDOM;
+ t->nr_seq_samples = 0;
+ t->nr_rand_samples = 0;
+ t->last_end_oblock = 0;
+ t->thresholds[PATTERN_RANDOM] = random_threshold;
+ t->thresholds[PATTERN_SEQUENTIAL] = sequential_threshold;
+}
+
+static enum io_pattern iot_pattern(struct io_tracker *t)
+{
+ return t->pattern;
+}
+
+static void iot_update_stats(struct io_tracker *t, struct bio *bio)
+{
+ if (bio->bi_sector == from_oblock(t->last_end_oblock) + 1)
+ t->nr_seq_samples++;
+ else {
+ /*
+ * Just one non-sequential IO is enough to reset the
+ * counters.
+ */
+ if (t->nr_seq_samples) {
+ t->nr_seq_samples = 0;
+ t->nr_rand_samples = 0;
+ }
+
+ t->nr_rand_samples++;
+ }
+
+ t->last_end_oblock = to_oblock(bio->bi_sector + bio_sectors(bio) - 1);
+}
+
+static void iot_check_for_pattern_switch(struct io_tracker *t)
+{
+ switch (t->pattern) {
+ case PATTERN_SEQUENTIAL:
+ if (t->nr_rand_samples >= t->thresholds[PATTERN_RANDOM]) {
+ t->pattern = PATTERN_RANDOM;
+ t->nr_seq_samples = t->nr_rand_samples = 0;
+ }
+ break;
+
+ case PATTERN_RANDOM:
+ if (t->nr_seq_samples >= t->thresholds[PATTERN_SEQUENTIAL]) {
+ t->pattern = PATTERN_SEQUENTIAL;
+ t->nr_seq_samples = t->nr_rand_samples = 0;
+ }
+ break;
+ }
+}
+
+static void iot_examine_bio(struct io_tracker *t, struct bio *bio)
+{
+ iot_update_stats(t, bio);
+ iot_check_for_pattern_switch(t);
+}
+
+/*----------------------------------------------------------------*/
+
+
+/*
+ * This queue is divided up into different levels. Allowing us to push
+ * entries to the back of any of the levels. Think of it as a partially
+ * sorted queue.
+ */
+#define NR_QUEUE_LEVELS 16u
+
+struct queue {
+ struct list_head qs[NR_QUEUE_LEVELS];
+};
+
+static void queue_init(struct queue *q)
+{
+ unsigned i;
+
+ for (i = 0; i < NR_QUEUE_LEVELS; i++)
+ INIT_LIST_HEAD(q->qs + i);
+}
+
+/*
+ * Insert an entry to the back of the given level.
+ */
+static void queue_push(struct queue *q, unsigned level, struct list_head *elt)
+{
+ list_add_tail(elt, q->qs + level);
+}
+
+static void queue_remove(struct list_head *elt)
+{
+ list_del(elt);
+}
+
+/*
+ * Shifts all regions down one level. This has no effect on the order of
+ * the queue.
+ */
+static void queue_shift_down(struct queue *q)
+{
+ unsigned level;
+
+ for (level = 1; level < NR_QUEUE_LEVELS; level++)
+ list_splice_init(q->qs + level, q->qs + level - 1);
+}
+
+/*
+ * Gives us the oldest entry of the lowest popoulated level. If the first
+ * level is emptied then we shift down one level.
+ */
+static struct list_head *queue_pop(struct queue *q)
+{
+ unsigned level;
+ struct list_head *r;
+
+ for (level = 0; level < NR_QUEUE_LEVELS; level++)
+ if (!list_empty(q->qs + level)) {
+ r = q->qs[level].next;
+ list_del(r);
+
+ /* have we just emptied the bottom level? */
+ if (level == 0 && list_empty(q->qs))
+ queue_shift_down(q);
+
+ return r;
+ }
+
+ return NULL;
+}
+
+static struct list_head *list_pop(struct list_head *lh)
+{
+ struct list_head *r = lh->next;
+
+ BUG_ON(!r);
+ list_del_init(r);
+
+ return r;
+}
+
+/*----------------------------------------------------------------*/
+
+/*
+ * Describes a cache entry. Used in both the cache and the pre_cache.
+ */
+struct entry {
+ struct hlist_node hlist;
+ struct list_head list;
+ dm_oblock_t oblock;
+ dm_cblock_t cblock; /* valid iff in_cache */
+
+ /*
+ * FIXME: pack these better
+ */
+ bool in_cache:1;
+ unsigned hit_count;
+ unsigned generation;
+ unsigned tick;
+};
+
+struct mq_policy {
+ struct dm_cache_policy policy;
+
+ /* protects everything */
+ struct mutex lock;
+ dm_cblock_t cache_size;
+ struct io_tracker tracker;
+
+ /*
+ * We maintain two queues of entries. The cache proper contains
+ * the currently active mappings. Whereas the pre_cache tracks
+ * blocks that are being hit frequently and potential candidates
+ * for promotion to the cache.
+ */
+ struct queue pre_cache;
+ struct queue cache;
+
+ /*
+ * Keeps track of time, incremented by the core. We use this to
+ * avoid attributing multiple hits within the same tick.
+ *
+ * Access to tick_protected should be done with the spin lock held.
+ * It's copied to tick at the start of the map function (within the
+ * mutex).
+ */
+ spinlock_t tick_lock;
+ unsigned tick_protected;
+ unsigned tick;
+
+ /*
+ * A count of the number of times the map function has been called
+ * and found an entry in the pre_cache or cache. Currently used to
+ * calculate the generation.
+ */
+ unsigned hit_count;
+
+ /*
+ * A generation is a longish period that is used to trigger some
+ * book keeping effects. eg, decrementing hit counts on entries.
+ * This is needed to allow the cache to evolve as io patterns
+ * change.
+ */
+ unsigned generation;
+ unsigned generation_period; /* in lookups (will probably change) */
+
+ /*
+ * Entries in the pre_cache whose hit count passes the promotion
+ * threshold move to the cache proper. Working out the correct
+ * value for the promotion_threshold is crucial to this policy.
+ */
+ unsigned promote_threshold;
+
+ /*
+ * We need cache_size entries for the cache, and choose to have
+ * cache_size entries for the pre_cache too. One motivation for
+ * using the same size is to make the hit counts directly
+ * comparable between pre_cache and cache.
+ */
+ unsigned nr_entries;
+ unsigned nr_entries_allocated;
+ struct list_head free;
+
+ /*
+ * Cache blocks may be unallocated. We store this info in a
+ * bitset.
+ */
+ unsigned long *allocation_bitset;
+ unsigned nr_cblocks_allocated;
+ unsigned find_free_nr_words;
+ unsigned find_free_last_word;
+
+ /*
+ * The hash table allows us to quickly find an entry by origin
+ * block. Both pre_cache and cache entries are in here.
+ */
+ unsigned nr_buckets;
+ dm_block_t hash_bits;
+ struct hlist_head *table;
+};
+
+/*----------------------------------------------------------------*/
+/* Free/alloc mq cache entry structures. */
+static void takeout_queue(struct list_head *lh, struct queue *q)
+{
+ unsigned level;
+
+ for (level = 0; level < NR_QUEUE_LEVELS; level++)
+ list_splice(q->qs + level, lh);
+}
+
+static void free_entries(struct mq_policy *mq)
+{
+ struct entry *e, *tmp;
+
+ takeout_queue(&mq->free, &mq->pre_cache);
+ takeout_queue(&mq->free, &mq->cache);
+
+ list_for_each_entry_safe(e, tmp, &mq->free, list)
+ kmem_cache_free(mq_entry_cache, e);
+}
+
+static int alloc_entries(struct mq_policy *mq, unsigned elts)
+{
+ unsigned u = mq->nr_entries;
+
+ INIT_LIST_HEAD(&mq->free);
+ mq->nr_entries_allocated = 0;
+
+ while (u--) {
+ struct entry *e = kmem_cache_zalloc(mq_entry_cache, GFP_KERNEL);
+
+ if (!e) {
+ free_entries(mq);
+ return -ENOMEM;
+ }
+
+
+ list_add(&e->list, &mq->free);
+ }
+
+ return 0;
+}
+
+/*----------------------------------------------------------------*/
+
+/*
+ * Simple hash table implementation. Should replace with the standard hash
+ * table that's making its way upstream.
+ */
+static void hash_insert(struct mq_policy *mq, struct entry *e)
+{
+ unsigned h = hash_64(from_oblock(e->oblock), mq->hash_bits);
+
+ hlist_add_head(&e->hlist, mq->table + h);
+}
+
+static struct entry *hash_lookup(struct mq_policy *mq, dm_oblock_t oblock)
+{
+ unsigned h = hash_64(from_oblock(oblock), mq->hash_bits);
+ struct hlist_head *bucket = mq->table + h;
+ struct entry *e;
+
+ hlist_for_each_entry(e, bucket, hlist)
+ if (e->oblock == oblock) {
+ hlist_del(&e->hlist);
+ hlist_add_head(&e->hlist, bucket);
+ return e;
+ }
+
+ return NULL;
+}
+
+static void hash_remove(struct entry *e)
+{
+ hlist_del(&e->hlist);
+}
+
+/*----------------------------------------------------------------*/
+
+/*
+ * Allocates a new entry structure. The memory is allocated in one lump,
+ * so we just handing it out here. Returns NULL if all entries have
+ * already been allocated. Cannot fail otherwise.
+ */
+static struct entry *alloc_entry(struct mq_policy *mq)
+{
+ struct entry *e;
+
+ if (mq->nr_entries_allocated >= mq->nr_entries) {
+ BUG_ON(!list_empty(&mq->free));
+ return NULL;
+ }
+
+ e = list_entry(list_pop(&mq->free), struct entry, list);
+ INIT_LIST_HEAD(&e->list);
+ INIT_HLIST_NODE(&e->hlist);
+
+ mq->nr_entries_allocated++;
+ return e;
+}
+
+/*----------------------------------------------------------------*/
+
+/*
+ * Mark cache blocks allocated or not in the bitset.
+ */
+static void alloc_cblock(struct mq_policy *mq, dm_cblock_t cblock)
+{
+ BUG_ON(from_cblock(cblock) > from_cblock(mq->cache_size));
+ BUG_ON(test_bit(from_cblock(cblock), mq->allocation_bitset));
+
+ set_bit(from_cblock(cblock), mq->allocation_bitset);
+ mq->nr_cblocks_allocated++;
+}
+
+static void free_cblock(struct mq_policy *mq, dm_cblock_t cblock)
+{
+ BUG_ON(from_cblock(cblock) > from_cblock(mq->cache_size));
+ BUG_ON(!test_bit(from_cblock(cblock), mq->allocation_bitset));
+
+ clear_bit(from_cblock(cblock), mq->allocation_bitset);
+ mq->nr_cblocks_allocated--;
+}
+
+static bool any_free_cblocks(struct mq_policy *mq)
+{
+ return mq->nr_cblocks_allocated < from_cblock(mq->cache_size);
+}
+
+/*
+ * Fills result out with a cache block that isn't in use, or return
+ * -ENOSPC. This does _not_ mark the cblock as allocated, the caller is
+ * reponsible for that.
+ */
+static int __find_free_cblock(struct mq_policy *mq, unsigned begin, unsigned end,
+ dm_cblock_t *result, unsigned *last_word)
+{
+ int r = -ENOSPC;
+ unsigned w;
+
+ for (w = begin; w < end; w++) {
+ /*
+ * ffz is undefined if no zero exists
+ */
+ if (mq->allocation_bitset[w] != ~0UL) {
+ *last_word = w;
+ *result = to_cblock((w * BITS_PER_LONG) + ffz(mq->allocation_bitset[w]));
+ if (from_cblock(*result) < from_cblock(mq->cache_size))
+ r = 0;
+
+ break;
+ }
+ }
+
+ return r;
+}
+
+static int find_free_cblock(struct mq_policy *mq, dm_cblock_t *result)
+{
+ int r;
+
+ if (!any_free_cblocks(mq))
+ return -ENOSPC;
+
+ r = __find_free_cblock(mq, mq->find_free_last_word, mq->find_free_nr_words, result, &mq->find_free_last_word);
+ if (r == -ENOSPC && mq->find_free_last_word)
+ r = __find_free_cblock(mq, 0, mq->find_free_last_word, result, &mq->find_free_last_word);
+
+ return r;
+}
+
+/*----------------------------------------------------------------*/
+
+/*
+ * Now we get to the meat of the policy. This section deals with deciding
+ * when to to add entries to the pre_cache and cache, and move between
+ * them.
+ */
+
+/*
+ * The queue level is based on the log2 of the hit count.
+ */
+static unsigned queue_level(struct entry *e)
+{
+ return min((unsigned) ilog2(e->hit_count), NR_QUEUE_LEVELS - 1u);
+}
+
+/*
+ * Inserts the entry into the pre_cache or the cache. Ensures the cache
+ * block is marked as allocated if necc. Inserts into the hash table. Sets the
+ * tick which records when the entry was last moved about.
+ */
+static void push(struct mq_policy *mq, struct entry *e)
+{
+ e->tick = mq->tick;
+ hash_insert(mq, e);
+
+ if (e->in_cache) {
+ alloc_cblock(mq, e->cblock);
+ queue_push(&mq->cache, queue_level(e), &e->list);
+ } else
+ queue_push(&mq->pre_cache, queue_level(e), &e->list);
+}
+
+/*
+ * Removes an entry from pre_cache or cache. Removes from the hash table.
+ * Frees off the cache block if necc.
+ */
+static void del(struct mq_policy *mq, struct entry *e)
+{
+ queue_remove(&e->list);
+ hash_remove(e);
+ if (e->in_cache)
+ free_cblock(mq, e->cblock);
+}
+
+/*
+ * Like del, except it removes the first entry in the queue (ie. the least
+ * recently used).
+ */
+static struct entry *pop(struct mq_policy *mq, struct queue *q)
+{
+ struct entry *e = container_of(queue_pop(q), struct entry, list);
+
+ if (e) {
+ hash_remove(e);
+
+ if (e->in_cache)
+ free_cblock(mq, e->cblock);
+ }
+
+ return e;
+}
+
+/*
+ * Has this entry already been updated?
+ */
+static bool updated_this_tick(struct mq_policy *mq, struct entry *e)
+{
+ return mq->tick == e->tick;
+}
+
+/*
+ * The promotion threshold is adjusted every generation. As are the counts
+ * of the entries.
+ *
+ * At the moment the threshold is taken by averaging the hit counts of some
+ * of the entries in the cache (the first 20 entries of the first level).
+ *
+ * We can be much cleverer than this though. For example, each promotion
+ * could bump up the threshold helping to prevent churn. Much more to do
+ * here.
+ */
+
+#define MAX_TO_AVERAGE 20
+
+static void check_generation(struct mq_policy *mq)
+{
+ unsigned total = 0, nr = 0, count = 0, level;
+ struct list_head *head;
+ struct entry *e;
+
+ if ((mq->hit_count >= mq->generation_period) &&
+ (mq->nr_cblocks_allocated == from_cblock(mq->cache_size))) {
+
+ mq->hit_count = 0;
+ mq->generation++;
+
+ for (level = 0; level < NR_QUEUE_LEVELS && count < MAX_TO_AVERAGE; level++) {
+ head = mq->cache.qs + level;
+ list_for_each_entry(e, head, list) {
+ nr++;
+ total += e->hit_count;
+
+ if (++count >= MAX_TO_AVERAGE)
+ break;
+ }
+ }
+
+ mq->promote_threshold = nr ? total / nr : 1;
+ if (mq->promote_threshold * nr < total)
+ mq->promote_threshold++;
+ }
+}
+
+/*
+ * Whenever we use an entry we bump up it's hit counter, and push it to the
+ * back to it's current level.
+ */
+static void requeue_and_update_tick(struct mq_policy *mq, struct entry *e)
+{
+ if (updated_this_tick(mq, e))
+ return;
+
+ e->hit_count++;
+ mq->hit_count++;
+ check_generation(mq);
+
+ /* generation adjustment, to stop the counts increasing forever. */
+ /* FIXME: divide? */
+ /* e->hit_count -= min(e->hit_count - 1, mq->generation - e->generation); */
+ e->generation = mq->generation;
+
+ del(mq, e);
+ push(mq, e);
+}
+
+/*
+ * Demote the least recently used entry from the cache to the pre_cache.
+ * Returns the new cache entry to use, and the old origin block it was
+ * mapped to.
+ *
+ * We drop the hit count on the demoted entry back to 1 to stop it bouncing
+ * straight back into the cache if it's subsequently hit. There are
+ * various options here, and more experimentation would be good:
+ *
+ * - just forget about the demoted entry completely (ie. don't insert it
+ into the pre_cache).
+ * - divide the hit count rather that setting to some hard coded value.
+ * - set the hit count to a hard coded value other than 1, eg, is it better
+ * if it goes in at level 2?
+ */
+static dm_cblock_t demote_cblock(struct mq_policy *mq, dm_oblock_t *oblock)
+{
+ dm_cblock_t result;
+ struct entry *demoted = pop(mq, &mq->cache);
+
+ BUG_ON(!demoted);
+ result = demoted->cblock;
+ *oblock = demoted->oblock;
+ demoted->in_cache = false;
+ demoted->hit_count = 1;
+ push(mq, demoted);
+
+ return result;
+}
+
+/*
+ * We modify the basic promotion_threshold depending on the specific io.
+ *
+ * If the origin block has been discarded then there's no cost to copy it
+ * to the cache.
+ *
+ * We bias towards reads, since they can be demoted at no cost if they
+ * haven't been dirtied.
+ */
+#define DISCARDED_PROMOTE_THRESHOLD 1
+#define READ_PROMOTE_THRESHOLD 4
+#define WRITE_PROMOTE_THRESHOLD 8
+
+static unsigned adjusted_promote_threshold(struct mq_policy *mq,
+ bool discarded_oblock, int data_dir)
+{
+ if (discarded_oblock && any_free_cblocks(mq) && data_dir == WRITE)
+ /*
+ * We don't need to do any copying at all, so give this a
+ * very low threshold. In practice this only triggers
+ * during initial population after a format.
+ */
+ return DISCARDED_PROMOTE_THRESHOLD;
+
+ return data_dir == READ ?
+ (mq->promote_threshold + READ_PROMOTE_THRESHOLD) :
+ (mq->promote_threshold + WRITE_PROMOTE_THRESHOLD);
+}
+
+static bool should_promote(struct mq_policy *mq, struct entry *e,
+ bool discarded_oblock, int data_dir)
+{
+ return e->hit_count >=
+ adjusted_promote_threshold(mq, discarded_oblock, data_dir);
+}
+
+static int cache_entry_found(struct mq_policy *mq,
+ struct entry *e,
+ struct policy_result *result)
+{
+ requeue_and_update_tick(mq, e);
+
+ if (e->in_cache) {
+ result->op = POLICY_HIT;
+ result->cblock = e->cblock;
+ }
+
+ return 0;
+}
+
+/*
+ * Moves and entry from the pre_cache to the cache. The main work is
+ * finding which cache block to use.
+ */
+static int pre_cache_to_cache(struct mq_policy *mq, struct entry *e,
+ struct policy_result *result)
+{
+ dm_cblock_t cblock;
+
+ if (find_free_cblock(mq, &cblock) == -ENOSPC) {
+ result->op = POLICY_REPLACE;
+ cblock = demote_cblock(mq, &result->old_oblock);
+ } else
+ result->op = POLICY_NEW;
+
+ result->cblock = e->cblock = cblock;
+
+ del(mq, e);
+ e->in_cache = true;
+ push(mq, e);
+
+ return 0;
+}
+
+static int pre_cache_entry_found(struct mq_policy *mq, struct entry *e,
+ bool can_migrate, bool discarded_oblock,
+ int data_dir, struct policy_result *result)
+{
+ int r = 0;
+ bool updated = updated_this_tick(mq, e);
+
+ requeue_and_update_tick(mq, e);
+
+ if ((!discarded_oblock && updated) ||
+ !should_promote(mq, e, discarded_oblock, data_dir))
+ result->op = POLICY_MISS;
+ else if (!can_migrate)
+ r = -EWOULDBLOCK;
+ else
+ r = pre_cache_to_cache(mq, e, result);
+
+ return r;
+}
+
+static void insert_in_pre_cache(struct mq_policy *mq,
+ dm_oblock_t oblock)
+{
+ struct entry *e = alloc_entry(mq);
+
+ if (!e)
+ /*
+ * There's no spare entry structure, so we grab the least
+ * used one from the pre_cache.
+ */
+ e = pop(mq, &mq->pre_cache);
+
+ if (unlikely(!e)) {
+ DMWARN("couldn't pop from pre cache");
+ return;
+ }
+
+ e->in_cache = false;
+ e->oblock = oblock;
+ e->hit_count = 1;
+ e->generation = mq->generation;
+ push(mq, e);
+}
+
+static void insert_in_cache(struct mq_policy *mq, dm_oblock_t oblock,
+ struct policy_result *result)
+{
+ struct entry *e;
+ dm_cblock_t cblock;
+
+ if (find_free_cblock(mq, &cblock) == -ENOSPC) {
+ result->op = POLICY_MISS;
+ insert_in_pre_cache(mq, oblock);
+ return;
+ }
+
+ e = alloc_entry(mq);
+ if (unlikely(!e)) {
+ result->op = POLICY_MISS;
+ return;
+ }
+
+ e->oblock = oblock;
+ e->cblock = cblock;
+ e->in_cache = true;
+ e->hit_count = 1;
+ e->generation = mq->generation;
+ push(mq, e);
+
+ result->op = POLICY_NEW;
+ result->cblock = e->cblock;
+}
+
+static int no_entry_found(struct mq_policy *mq, dm_oblock_t oblock,
+ bool can_migrate, bool discarded_oblock,
+ int data_dir, struct policy_result *result)
+{
+ if (adjusted_promote_threshold(mq, discarded_oblock, data_dir) == 1) {
+ if (can_migrate)
+ insert_in_cache(mq, oblock, result);
+ else
+ return -EWOULDBLOCK;
+ } else {
+ insert_in_pre_cache(mq, oblock);
+ result->op = POLICY_MISS;
+ }
+
+ return 0;
+}
+
+/*
+ * Looks the oblock up in the hash table, then decides whether to put in
+ * pre_cache, or cache etc.
+ */
+static int map(struct mq_policy *mq, dm_oblock_t oblock,
+ bool can_migrate, bool discarded_oblock,
+ int data_dir, struct policy_result *result)
+{
+ int r = 0;
+ struct entry *e = hash_lookup(mq, oblock);
+
+ if (e && e->in_cache)
+ r = cache_entry_found(mq, e, result);
+ else if (iot_pattern(&mq->tracker) == PATTERN_SEQUENTIAL)
+ result->op = POLICY_MISS;
+ else if (e)
+ r = pre_cache_entry_found(mq, e, can_migrate, discarded_oblock,
+ data_dir, result);
+ else
+ r = no_entry_found(mq, oblock, can_migrate, discarded_oblock,
+ data_dir, result);
+
+ if (r == -EWOULDBLOCK)
+ result->op = POLICY_MISS;
+
+ return r;
+}
+
+/*----------------------------------------------------------------*/
+
+/*
+ * Public interface, via the policy struct. See dm-cache-policy.h for a
+ * description of these.
+ */
+
+static struct mq_policy *to_mq_policy(struct dm_cache_policy *p)
+{
+ return container_of(p, struct mq_policy, policy);
+}
+
+static void mq_destroy(struct dm_cache_policy *p)
+{
+ struct mq_policy *mq = to_mq_policy(p);
+
+ free_bitset(mq->allocation_bitset);
+ kfree(mq->table);
+ free_entries(mq);
+ kfree(mq);
+}
+
+static void copy_tick(struct mq_policy *mq)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&mq->tick_lock, flags);
+ mq->tick = mq->tick_protected;
+ spin_unlock_irqrestore(&mq->tick_lock, flags);
+}
+
+static int mq_map(struct dm_cache_policy *p, dm_oblock_t oblock,
+ bool can_block, bool can_migrate, bool discarded_oblock,
+ struct bio *bio, struct policy_result *result)
+{
+ int r;
+ struct mq_policy *mq = to_mq_policy(p);
+
+ result->op = POLICY_MISS;
+
+ if (can_block)
+ mutex_lock(&mq->lock);
+ else if (!mutex_trylock(&mq->lock))
+ return -EWOULDBLOCK;
+
+ copy_tick(mq);
+
+ iot_examine_bio(&mq->tracker, bio);
+ r = map(mq, oblock, can_migrate, discarded_oblock,
+ bio_data_dir(bio), result);
+
+ mutex_unlock(&mq->lock);
+
+ return r;
+}
+
+static int mq_lookup(struct dm_cache_policy *p, dm_oblock_t oblock, dm_cblock_t *cblock)
+{
+ int r;
+ struct mq_policy *mq = to_mq_policy(p);
+ struct entry *e;
+
+ if (!mutex_trylock(&mq->lock))
+ return -EWOULDBLOCK;
+
+ e = hash_lookup(mq, oblock);
+ if (e && e->in_cache) {
+ *cblock = e->cblock;
+ r = 0;
+ } else
+ r = -ENOENT;
+
+ mutex_unlock(&mq->lock);
+
+ return r;
+}
+
+static int mq_load_mapping(struct dm_cache_policy *p,
+ dm_oblock_t oblock, dm_cblock_t cblock,
+ uint32_t hint, bool hint_valid)
+{
+ struct mq_policy *mq = to_mq_policy(p);
+ struct entry *e;
+
+ e = alloc_entry(mq);
+ if (!e)
+ return -ENOMEM;
+
+ e->cblock = cblock;
+ e->oblock = oblock;
+ e->in_cache = true;
+ e->hit_count = hint_valid ? hint : 1;
+ e->generation = mq->generation;
+ push(mq, e);
+
+ return 0;
+}
+
+static int mq_walk_mappings(struct dm_cache_policy *p, policy_walk_fn fn,
+ void *context)
+{
+ struct mq_policy *mq = to_mq_policy(p);
+ int r = 0;
+ struct entry *e;
+ unsigned level;
+
+ mutex_lock(&mq->lock);
+
+ for (level = 0; level < NR_QUEUE_LEVELS; level++)
+ list_for_each_entry(e, &mq->cache.qs[level], list) {
+ r = fn(context, e->cblock, e->oblock, e->hit_count);
+ if (r)
+ goto out;
+ }
+
+out:
+ mutex_unlock(&mq->lock);
+
+ return r;
+}
+
+static void remove_mapping(struct mq_policy *mq, dm_oblock_t oblock)
+{
+ struct entry *e = hash_lookup(mq, oblock);
+
+ BUG_ON(!e || !e->in_cache);
+
+ del(mq, e);
+ e->in_cache = false;
+ push(mq, e);
+}
+
+static void mq_remove_mapping(struct dm_cache_policy *p, dm_oblock_t oblock)
+{
+ struct mq_policy *mq = to_mq_policy(p);
+
+ mutex_lock(&mq->lock);
+ remove_mapping(mq, oblock);
+ mutex_unlock(&mq->lock);
+}
+
+static void force_mapping(struct mq_policy *mq,
+ dm_oblock_t current_oblock, dm_oblock_t new_oblock)
+{
+ struct entry *e = hash_lookup(mq, current_oblock);
+
+ BUG_ON(!e || !e->in_cache);
+
+ del(mq, e);
+ e->oblock = new_oblock;
+ push(mq, e);
+}
+
+static void mq_force_mapping(struct dm_cache_policy *p,
+ dm_oblock_t current_oblock, dm_oblock_t new_oblock)
+{
+ struct mq_policy *mq = to_mq_policy(p);
+
+ mutex_lock(&mq->lock);
+ force_mapping(mq, current_oblock, new_oblock);
+ mutex_unlock(&mq->lock);
+}
+
+static dm_cblock_t mq_residency(struct dm_cache_policy *p)
+{
+ struct mq_policy *mq = to_mq_policy(p);
+
+ /* FIXME: lock mutex, not sure we can block here */
+ return to_cblock(mq->nr_cblocks_allocated);
+}
+
+static void mq_tick(struct dm_cache_policy *p)
+{
+ struct mq_policy *mq = to_mq_policy(p);
+ unsigned long flags;
+
+ spin_lock_irqsave(&mq->tick_lock, flags);
+ mq->tick_protected++;
+ spin_unlock_irqrestore(&mq->tick_lock, flags);
+}
+
+static int mq_set_config_value(struct dm_cache_policy *p,
+ const char *key, const char *value)
+{
+ struct mq_policy *mq = to_mq_policy(p);
+ enum io_pattern pattern;
+ unsigned long tmp;
+
+ if (!strcasecmp(key, "random_threshold"))
+ pattern = PATTERN_RANDOM;
+ else if (!strcasecmp(key, "sequential_threshold"))
+ pattern = PATTERN_SEQUENTIAL;
+ else
+ return -EINVAL;
+
+ if (kstrtoul(value, 10, &tmp))
+ return -EINVAL;
+
+ mq->tracker.thresholds[pattern] = tmp;
+
+ return 0;
+}
+
+static int mq_emit_config_values(struct dm_cache_policy *p, char *result, unsigned maxlen)
+{
+ ssize_t sz = 0;
+ struct mq_policy *mq = to_mq_policy(p);
+
+ DMEMIT("4 random_threshold %u sequential_threshold %u",
+ mq->tracker.thresholds[PATTERN_RANDOM],
+ mq->tracker.thresholds[PATTERN_SEQUENTIAL]);
+
+ return 0;
+}
+
+/* Init the policy plugin interface function pointers. */
+static void init_policy_functions(struct mq_policy *mq)
+{
+ mq->policy.destroy = mq_destroy;
+ mq->policy.map = mq_map;
+ mq->policy.lookup = mq_lookup;
+ mq->policy.load_mapping = mq_load_mapping;
+ mq->policy.walk_mappings = mq_walk_mappings;
+ mq->policy.remove_mapping = mq_remove_mapping;
+ mq->policy.writeback_work = NULL;
+ mq->policy.force_mapping = mq_force_mapping;
+ mq->policy.residency = mq_residency;
+ mq->policy.tick = mq_tick;
+ mq->policy.emit_config_values = mq_emit_config_values;
+ mq->policy.set_config_value = mq_set_config_value;
+}
+
+static struct dm_cache_policy *mq_create(dm_cblock_t cache_size,
+ sector_t origin_size,
+ sector_t cache_block_size)
+{
+ int r;
+ struct mq_policy *mq = kzalloc(sizeof(*mq), GFP_KERNEL);
+
+ if (!mq)
+ return NULL;
+
+ init_policy_functions(mq);
+ iot_init(&mq->tracker, SEQUENTIAL_THRESHOLD_DEFAULT, RANDOM_THRESHOLD_DEFAULT);
+
+ mq->cache_size = cache_size;
+ mq->tick_protected = 0;
+ mq->tick = 0;
+ mq->hit_count = 0;
+ mq->generation = 0;
+ mq->promote_threshold = 0;
+ mutex_init(&mq->lock);
+ spin_lock_init(&mq->tick_lock);
+ mq->find_free_nr_words = dm_div_up(from_cblock(mq->cache_size), BITS_PER_LONG);
+ mq->find_free_last_word = 0;
+
+ queue_init(&mq->pre_cache);
+ queue_init(&mq->cache);
+ mq->generation_period = max((unsigned) from_cblock(cache_size), 1024U);
+
+ mq->nr_entries = 2 * from_cblock(cache_size);
+ r = alloc_entries(mq, mq->nr_entries);
+ if (r)
+ goto bad_cache_alloc;
+
+ mq->nr_entries_allocated = 0;
+ mq->nr_cblocks_allocated = 0;
+
+ mq->nr_buckets = next_power(from_cblock(cache_size) / 2, 16);
+ mq->hash_bits = ffs(mq->nr_buckets) - 1;
+ mq->table = kzalloc(sizeof(*mq->table) * mq->nr_buckets, GFP_KERNEL);
+ if (!mq->table)
+ goto bad_alloc_table;
+
+ mq->allocation_bitset = alloc_bitset(from_cblock(cache_size));
+ if (!mq->allocation_bitset)
+ goto bad_alloc_bitset;
+
+ return &mq->policy;
+
+bad_alloc_bitset:
+ kfree(mq->table);
+bad_alloc_table:
+ free_entries(mq);
+bad_cache_alloc:
+ kfree(mq);
+
+ return NULL;
+}
+
+/*----------------------------------------------------------------*/
+
+static struct dm_cache_policy_type mq_policy_type = {
+ .name = "mq",
+ .hint_size = 4,
+ .owner = THIS_MODULE,
+ .create = mq_create
+};
+
+static struct dm_cache_policy_type default_policy_type = {
+ .name = "default",
+ .hint_size = 4,
+ .owner = THIS_MODULE,
+ .create = mq_create
+};
+
+static int __init mq_init(void)
+{
+ int r;
+
+ mq_entry_cache = kmem_cache_create("dm_mq_policy_cache_entry",
+ sizeof(struct entry),
+ __alignof__(struct entry),
+ 0, NULL);
+ if (!mq_entry_cache)
+ goto bad;
+
+ r = dm_cache_policy_register(&mq_policy_type);
+ if (r) {
+ DMERR("register failed %d", r);
+ goto bad_register_mq;
+ }
+
+ r = dm_cache_policy_register(&default_policy_type);
+ if (!r) {
+ DMINFO("version " MQ_VERSION " loaded");
+ return 0;
+ }
+
+ DMERR("register failed (as default) %d", r);
+
+ dm_cache_policy_unregister(&mq_policy_type);
+bad_register_mq:
+ kmem_cache_destroy(mq_entry_cache);
+bad:
+ return -ENOMEM;
+}
+
+static void __exit mq_exit(void)
+{
+ dm_cache_policy_unregister(&mq_policy_type);
+ dm_cache_policy_unregister(&default_policy_type);
+
+ kmem_cache_destroy(mq_entry_cache);
+}
+
+module_init(mq_init);
+module_exit(mq_exit);
+
+MODULE_AUTHOR("Joe Thornber <dm-devel@redhat.com>");
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("mq cache policy");
+
+MODULE_ALIAS("dm-cache-default");
diff --git a/drivers/md/dm-cache-policy.c b/drivers/md/dm-cache-policy.c
new file mode 100644
index 000000000000..2cbf5fdaac52
--- /dev/null
+++ b/drivers/md/dm-cache-policy.c
@@ -0,0 +1,161 @@
+/*
+ * Copyright (C) 2012 Red Hat. All rights reserved.
+ *
+ * This file is released under the GPL.
+ */
+
+#include "dm-cache-policy-internal.h"
+#include "dm.h"
+
+#include <linux/module.h>
+#include <linux/slab.h>
+
+/*----------------------------------------------------------------*/
+
+#define DM_MSG_PREFIX "cache-policy"
+
+static DEFINE_SPINLOCK(register_lock);
+static LIST_HEAD(register_list);
+
+static struct dm_cache_policy_type *__find_policy(const char *name)
+{
+ struct dm_cache_policy_type *t;
+
+ list_for_each_entry(t, &register_list, list)
+ if (!strcmp(t->name, name))
+ return t;
+
+ return NULL;
+}
+
+static struct dm_cache_policy_type *__get_policy_once(const char *name)
+{
+ struct dm_cache_policy_type *t = __find_policy(name);
+
+ if (t && !try_module_get(t->owner)) {
+ DMWARN("couldn't get module %s", name);
+ t = ERR_PTR(-EINVAL);
+ }
+
+ return t;
+}
+
+static struct dm_cache_policy_type *get_policy_once(const char *name)
+{
+ struct dm_cache_policy_type *t;
+
+ spin_lock(&register_lock);
+ t = __get_policy_once(name);
+ spin_unlock(&register_lock);
+
+ return t;
+}
+
+static struct dm_cache_policy_type *get_policy(const char *name)
+{
+ struct dm_cache_policy_type *t;
+
+ t = get_policy_once(name);
+ if (IS_ERR(t))
+ return NULL;
+
+ if (t)
+ return t;
+
+ request_module("dm-cache-%s", name);
+
+ t = get_policy_once(name);
+ if (IS_ERR(t))
+ return NULL;
+
+ return t;
+}
+
+static void put_policy(struct dm_cache_policy_type *t)
+{
+ module_put(t->owner);
+}
+
+int dm_cache_policy_register(struct dm_cache_policy_type *type)
+{
+ int r;
+
+ /* One size fits all for now */
+ if (type->hint_size != 0 && type->hint_size != 4) {
+ DMWARN("hint size must be 0 or 4 but %llu supplied.", (unsigned long long) type->hint_size);
+ return -EINVAL;
+ }
+
+ spin_lock(&register_lock);
+ if (__find_policy(type->name)) {
+ DMWARN("attempt to register policy under duplicate name %s", type->name);
+ r = -EINVAL;
+ } else {
+ list_add(&type->list, &register_list);
+ r = 0;
+ }
+ spin_unlock(&register_lock);
+
+ return r;
+}
+EXPORT_SYMBOL_GPL(dm_cache_policy_register);
+
+void dm_cache_policy_unregister(struct dm_cache_policy_type *type)
+{
+ spin_lock(&register_lock);
+ list_del_init(&type->list);
+ spin_unlock(&register_lock);
+}
+EXPORT_SYMBOL_GPL(dm_cache_policy_unregister);
+
+struct dm_cache_policy *dm_cache_policy_create(const char *name,
+ dm_cblock_t cache_size,
+ sector_t origin_size,
+ sector_t cache_block_size)
+{
+ struct dm_cache_policy *p = NULL;
+ struct dm_cache_policy_type *type;
+
+ type = get_policy(name);
+ if (!type) {
+ DMWARN("unknown policy type");
+ return NULL;
+ }
+
+ p = type->create(cache_size, origin_size, cache_block_size);
+ if (!p) {
+ put_policy(type);
+ return NULL;
+ }
+ p->private = type;
+
+ return p;
+}
+EXPORT_SYMBOL_GPL(dm_cache_policy_create);
+
+void dm_cache_policy_destroy(struct dm_cache_policy *p)
+{
+ struct dm_cache_policy_type *t = p->private;
+
+ p->destroy(p);
+ put_policy(t);
+}
+EXPORT_SYMBOL_GPL(dm_cache_policy_destroy);
+
+const char *dm_cache_policy_get_name(struct dm_cache_policy *p)
+{
+ struct dm_cache_policy_type *t = p->private;
+
+ return t->name;
+}
+EXPORT_SYMBOL_GPL(dm_cache_policy_get_name);
+
+size_t dm_cache_policy_get_hint_size(struct dm_cache_policy *p)
+{
+ struct dm_cache_policy_type *t = p->private;
+
+ return t->hint_size;
+}
+EXPORT_SYMBOL_GPL(dm_cache_policy_get_hint_size);
+
+/*----------------------------------------------------------------*/
diff --git a/drivers/md/dm-cache-policy.h b/drivers/md/dm-cache-policy.h
new file mode 100644
index 000000000000..f0f51b260544
--- /dev/null
+++ b/drivers/md/dm-cache-policy.h
@@ -0,0 +1,228 @@
+/*
+ * Copyright (C) 2012 Red Hat. All rights reserved.
+ *
+ * This file is released under the GPL.
+ */
+
+#ifndef DM_CACHE_POLICY_H
+#define DM_CACHE_POLICY_H
+
+#include "dm-cache-block-types.h"
+
+#include <linux/device-mapper.h>
+
+/*----------------------------------------------------------------*/
+
+/* FIXME: make it clear which methods are optional. Get debug policy to
+ * double check this at start.
+ */
+
+/*
+ * The cache policy makes the important decisions about which blocks get to
+ * live on the faster cache device.
+ *
+ * When the core target has to remap a bio it calls the 'map' method of the
+ * policy. This returns an instruction telling the core target what to do.
+ *
+ * POLICY_HIT:
+ * That block is in the cache. Remap to the cache and carry on.
+ *
+ * POLICY_MISS:
+ * This block is on the origin device. Remap and carry on.
+ *
+ * POLICY_NEW:
+ * This block is currently on the origin device, but the policy wants to
+ * move it. The core should:
+ *
+ * - hold any further io to this origin block
+ * - copy the origin to the given cache block
+ * - release all the held blocks
+ * - remap the original block to the cache
+ *
+ * POLICY_REPLACE:
+ * This block is currently on the origin device. The policy wants to
+ * move it to the cache, with the added complication that the destination
+ * cache block needs a writeback first. The core should:
+ *
+ * - hold any further io to this origin block
+ * - hold any further io to the origin block that's being written back
+ * - writeback
+ * - copy new block to cache
+ * - release held blocks
+ * - remap bio to cache and reissue.
+ *
+ * Should the core run into trouble while processing a POLICY_NEW or
+ * POLICY_REPLACE instruction it will roll back the policies mapping using
+ * remove_mapping() or force_mapping(). These methods must not fail. This
+ * approach avoids having transactional semantics in the policy (ie, the
+ * core informing the policy when a migration is complete), and hence makes
+ * it easier to write new policies.
+ *
+ * In general policy methods should never block, except in the case of the
+ * map function when can_migrate is set. So be careful to implement using
+ * bounded, preallocated memory.
+ */
+enum policy_operation {
+ POLICY_HIT,
+ POLICY_MISS,
+ POLICY_NEW,
+ POLICY_REPLACE
+};
+
+/*
+ * This is the instruction passed back to the core target.
+ */
+struct policy_result {
+ enum policy_operation op;
+ dm_oblock_t old_oblock; /* POLICY_REPLACE */
+ dm_cblock_t cblock; /* POLICY_HIT, POLICY_NEW, POLICY_REPLACE */
+};
+
+typedef int (*policy_walk_fn)(void *context, dm_cblock_t cblock,
+ dm_oblock_t oblock, uint32_t hint);
+
+/*
+ * The cache policy object. Just a bunch of methods. It is envisaged that
+ * this structure will be embedded in a bigger, policy specific structure
+ * (ie. use container_of()).
+ */
+struct dm_cache_policy {
+
+ /*
+ * FIXME: make it clear which methods are optional, and which may
+ * block.
+ */
+
+ /*
+ * Destroys this object.
+ */
+ void (*destroy)(struct dm_cache_policy *p);
+
+ /*
+ * See large comment above.
+ *
+ * oblock - the origin block we're interested in.
+ *
+ * can_block - indicates whether the current thread is allowed to
+ * block. -EWOULDBLOCK returned if it can't and would.
+ *
+ * can_migrate - gives permission for POLICY_NEW or POLICY_REPLACE
+ * instructions. If denied and the policy would have
+ * returned one of these instructions it should
+ * return -EWOULDBLOCK.
+ *
+ * discarded_oblock - indicates whether the whole origin block is
+ * in a discarded state (FIXME: better to tell the
+ * policy about this sooner, so it can recycle that
+ * cache block if it wants.)
+ * bio - the bio that triggered this call.
+ * result - gets filled in with the instruction.
+ *
+ * May only return 0, or -EWOULDBLOCK (if !can_migrate)
+ */
+ int (*map)(struct dm_cache_policy *p, dm_oblock_t oblock,
+ bool can_block, bool can_migrate, bool discarded_oblock,
+ struct bio *bio, struct policy_result *result);
+
+ /*
+ * Sometimes we want to see if a block is in the cache, without
+ * triggering any update of stats. (ie. it's not a real hit).
+ *
+ * Must not block.
+ *
+ * Returns 1 iff in cache, 0 iff not, < 0 on error (-EWOULDBLOCK
+ * would be typical).
+ */
+ int (*lookup)(struct dm_cache_policy *p, dm_oblock_t oblock, dm_cblock_t *cblock);
+
+ /*
+ * oblock must be a mapped block. Must not block.
+ */
+ void (*set_dirty)(struct dm_cache_policy *p, dm_oblock_t oblock);
+ void (*clear_dirty)(struct dm_cache_policy *p, dm_oblock_t oblock);
+
+ /*
+ * Called when a cache target is first created. Used to load a
+ * mapping from the metadata device into the policy.
+ */
+ int (*load_mapping)(struct dm_cache_policy *p, dm_oblock_t oblock,
+ dm_cblock_t cblock, uint32_t hint, bool hint_valid);
+
+ int (*walk_mappings)(struct dm_cache_policy *p, policy_walk_fn fn,
+ void *context);
+
+ /*
+ * Override functions used on the error paths of the core target.
+ * They must succeed.
+ */
+ void (*remove_mapping)(struct dm_cache_policy *p, dm_oblock_t oblock);
+ void (*force_mapping)(struct dm_cache_policy *p, dm_oblock_t current_oblock,
+ dm_oblock_t new_oblock);
+
+ int (*writeback_work)(struct dm_cache_policy *p, dm_oblock_t *oblock, dm_cblock_t *cblock);
+
+
+ /*
+ * How full is the cache?
+ */
+ dm_cblock_t (*residency)(struct dm_cache_policy *p);
+
+ /*
+ * Because of where we sit in the block layer, we can be asked to
+ * map a lot of little bios that are all in the same block (no
+ * queue merging has occurred). To stop the policy being fooled by
+ * these the core target sends regular tick() calls to the policy.
+ * The policy should only count an entry as hit once per tick.
+ */
+ void (*tick)(struct dm_cache_policy *p);
+
+ /*
+ * Configuration.
+ */
+ int (*emit_config_values)(struct dm_cache_policy *p,
+ char *result, unsigned maxlen);
+ int (*set_config_value)(struct dm_cache_policy *p,
+ const char *key, const char *value);
+
+ /*
+ * Book keeping ptr for the policy register, not for general use.
+ */
+ void *private;
+};
+
+/*----------------------------------------------------------------*/
+
+/*
+ * We maintain a little register of the different policy types.
+ */
+#define CACHE_POLICY_NAME_SIZE 16
+
+struct dm_cache_policy_type {
+ /* For use by the register code only. */
+ struct list_head list;
+
+ /*
+ * Policy writers should fill in these fields. The name field is
+ * what gets passed on the target line to select your policy.
+ */
+ char name[CACHE_POLICY_NAME_SIZE];
+
+ /*
+ * Policies may store a hint for each each cache block.
+ * Currently the size of this hint must be 0 or 4 bytes but we
+ * expect to relax this in future.
+ */
+ size_t hint_size;
+
+ struct module *owner;
+ struct dm_cache_policy *(*create)(dm_cblock_t cache_size,
+ sector_t origin_size,
+ sector_t block_size);
+};
+
+int dm_cache_policy_register(struct dm_cache_policy_type *type);
+void dm_cache_policy_unregister(struct dm_cache_policy_type *type);
+
+/*----------------------------------------------------------------*/
+
+#endif /* DM_CACHE_POLICY_H */
diff --git a/drivers/md/dm-cache-target.c b/drivers/md/dm-cache-target.c
new file mode 100644
index 000000000000..0f4e84b15c30
--- /dev/null
+++ b/drivers/md/dm-cache-target.c
@@ -0,0 +1,2584 @@
+/*
+ * Copyright (C) 2012 Red Hat. All rights reserved.
+ *
+ * This file is released under the GPL.
+ */
+
+#include "dm.h"
+#include "dm-bio-prison.h"
+#include "dm-cache-metadata.h"
+
+#include <linux/dm-io.h>
+#include <linux/dm-kcopyd.h>
+#include <linux/init.h>
+#include <linux/mempool.h>
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/vmalloc.h>
+
+#define DM_MSG_PREFIX "cache"
+
+DECLARE_DM_KCOPYD_THROTTLE_WITH_MODULE_PARM(cache_copy_throttle,
+ "A percentage of time allocated for copying to and/or from cache");
+
+/*----------------------------------------------------------------*/
+
+/*
+ * Glossary:
+ *
+ * oblock: index of an origin block
+ * cblock: index of a cache block
+ * promotion: movement of a block from origin to cache
+ * demotion: movement of a block from cache to origin
+ * migration: movement of a block between the origin and cache device,
+ * either direction
+ */
+
+/*----------------------------------------------------------------*/
+
+static size_t bitset_size_in_bytes(unsigned nr_entries)
+{
+ return sizeof(unsigned long) * dm_div_up(nr_entries, BITS_PER_LONG);
+}
+
+static unsigned long *alloc_bitset(unsigned nr_entries)
+{
+ size_t s = bitset_size_in_bytes(nr_entries);
+ return vzalloc(s);
+}
+
+static void clear_bitset(void *bitset, unsigned nr_entries)
+{
+ size_t s = bitset_size_in_bytes(nr_entries);
+ memset(bitset, 0, s);
+}
+
+static void free_bitset(unsigned long *bits)
+{
+ vfree(bits);
+}
+
+/*----------------------------------------------------------------*/
+
+#define PRISON_CELLS 1024
+#define MIGRATION_POOL_SIZE 128
+#define COMMIT_PERIOD HZ
+#define MIGRATION_COUNT_WINDOW 10
+
+/*
+ * The block size of the device holding cache data must be >= 32KB
+ */
+#define DATA_DEV_BLOCK_SIZE_MIN_SECTORS (32 * 1024 >> SECTOR_SHIFT)
+
+/*
+ * FIXME: the cache is read/write for the time being.
+ */
+enum cache_mode {
+ CM_WRITE, /* metadata may be changed */
+ CM_READ_ONLY, /* metadata may not be changed */
+};
+
+struct cache_features {
+ enum cache_mode mode;
+ bool write_through:1;
+};
+
+struct cache_stats {
+ atomic_t read_hit;
+ atomic_t read_miss;
+ atomic_t write_hit;
+ atomic_t write_miss;
+ atomic_t demotion;
+ atomic_t promotion;
+ atomic_t copies_avoided;
+ atomic_t cache_cell_clash;
+ atomic_t commit_count;
+ atomic_t discard_count;
+};
+
+struct cache {
+ struct dm_target *ti;
+ struct dm_target_callbacks callbacks;
+
+ /*
+ * Metadata is written to this device.
+ */
+ struct dm_dev *metadata_dev;
+
+ /*
+ * The slower of the two data devices. Typically a spindle.
+ */
+ struct dm_dev *origin_dev;
+
+ /*
+ * The faster of the two data devices. Typically an SSD.
+ */
+ struct dm_dev *cache_dev;
+
+ /*
+ * Cache features such as write-through.
+ */
+ struct cache_features features;
+
+ /*
+ * Size of the origin device in _complete_ blocks and native sectors.
+ */
+ dm_oblock_t origin_blocks;
+ sector_t origin_sectors;
+
+ /*
+ * Size of the cache device in blocks.
+ */
+ dm_cblock_t cache_size;
+
+ /*
+ * Fields for converting from sectors to blocks.
+ */
+ uint32_t sectors_per_block;
+ int sectors_per_block_shift;
+
+ struct dm_cache_metadata *cmd;
+
+ spinlock_t lock;
+ struct bio_list deferred_bios;
+ struct bio_list deferred_flush_bios;
+ struct list_head quiesced_migrations;
+ struct list_head completed_migrations;
+ struct list_head need_commit_migrations;
+ sector_t migration_threshold;
+ atomic_t nr_migrations;
+ wait_queue_head_t migration_wait;
+
+ /*
+ * cache_size entries, dirty if set
+ */
+ dm_cblock_t nr_dirty;
+ unsigned long *dirty_bitset;
+
+ /*
+ * origin_blocks entries, discarded if set.
+ */
+ sector_t discard_block_size; /* a power of 2 times sectors per block */
+ dm_dblock_t discard_nr_blocks;
+ unsigned long *discard_bitset;
+
+ struct dm_kcopyd_client *copier;
+ struct workqueue_struct *wq;
+ struct work_struct worker;
+
+ struct delayed_work waker;
+ unsigned long last_commit_jiffies;
+
+ struct dm_bio_prison *prison;
+ struct dm_deferred_set *all_io_ds;
+
+ mempool_t *migration_pool;
+ struct dm_cache_migration *next_migration;
+
+ struct dm_cache_policy *policy;
+ unsigned policy_nr_args;
+
+ bool need_tick_bio:1;
+ bool sized:1;
+ bool quiescing:1;
+ bool commit_requested:1;
+ bool loaded_mappings:1;
+ bool loaded_discards:1;
+
+ struct cache_stats stats;
+
+ /*
+ * Rather than reconstructing the table line for the status we just
+ * save it and regurgitate.
+ */
+ unsigned nr_ctr_args;
+ const char **ctr_args;
+};
+
+struct per_bio_data {
+ bool tick:1;
+ unsigned req_nr:2;
+ struct dm_deferred_entry *all_io_entry;
+};
+
+struct dm_cache_migration {
+ struct list_head list;
+ struct cache *cache;
+
+ unsigned long start_jiffies;
+ dm_oblock_t old_oblock;
+ dm_oblock_t new_oblock;
+ dm_cblock_t cblock;
+
+ bool err:1;
+ bool writeback:1;
+ bool demote:1;
+ bool promote:1;
+
+ struct dm_bio_prison_cell *old_ocell;
+ struct dm_bio_prison_cell *new_ocell;
+};
+
+/*
+ * Processing a bio in the worker thread may require these memory
+ * allocations. We prealloc to avoid deadlocks (the same worker thread
+ * frees them back to the mempool).
+ */
+struct prealloc {
+ struct dm_cache_migration *mg;
+ struct dm_bio_prison_cell *cell1;
+ struct dm_bio_prison_cell *cell2;
+};
+
+static void wake_worker(struct cache *cache)
+{
+ queue_work(cache->wq, &cache->worker);
+}
+
+/*----------------------------------------------------------------*/
+
+static struct dm_bio_prison_cell *alloc_prison_cell(struct cache *cache)
+{
+ /* FIXME: change to use a local slab. */
+ return dm_bio_prison_alloc_cell(cache->prison, GFP_NOWAIT);
+}
+
+static void free_prison_cell(struct cache *cache, struct dm_bio_prison_cell *cell)
+{
+ dm_bio_prison_free_cell(cache->prison, cell);
+}
+
+static int prealloc_data_structs(struct cache *cache, struct prealloc *p)
+{
+ if (!p->mg) {
+ p->mg = mempool_alloc(cache->migration_pool, GFP_NOWAIT);
+ if (!p->mg)
+ return -ENOMEM;
+ }
+
+ if (!p->cell1) {
+ p->cell1 = alloc_prison_cell(cache);
+ if (!p->cell1)
+ return -ENOMEM;
+ }
+
+ if (!p->cell2) {
+ p->cell2 = alloc_prison_cell(cache);
+ if (!p->cell2)
+ return -ENOMEM;
+ }
+
+ return 0;
+}
+
+static void prealloc_free_structs(struct cache *cache, struct prealloc *p)
+{
+ if (p->cell2)
+ free_prison_cell(cache, p->cell2);
+
+ if (p->cell1)
+ free_prison_cell(cache, p->cell1);
+
+ if (p->mg)
+ mempool_free(p->mg, cache->migration_pool);
+}
+
+static struct dm_cache_migration *prealloc_get_migration(struct prealloc *p)
+{
+ struct dm_cache_migration *mg = p->mg;
+
+ BUG_ON(!mg);
+ p->mg = NULL;
+
+ return mg;
+}
+
+/*
+ * You must have a cell within the prealloc struct to return. If not this
+ * function will BUG() rather than returning NULL.
+ */
+static struct dm_bio_prison_cell *prealloc_get_cell(struct prealloc *p)
+{
+ struct dm_bio_prison_cell *r = NULL;
+
+ if (p->cell1) {
+ r = p->cell1;
+ p->cell1 = NULL;
+
+ } else if (p->cell2) {
+ r = p->cell2;
+ p->cell2 = NULL;
+ } else
+ BUG();
+
+ return r;
+}
+
+/*
+ * You can't have more than two cells in a prealloc struct. BUG() will be
+ * called if you try and overfill.
+ */
+static void prealloc_put_cell(struct prealloc *p, struct dm_bio_prison_cell *cell)
+{
+ if (!p->cell2)
+ p->cell2 = cell;
+
+ else if (!p->cell1)
+ p->cell1 = cell;
+
+ else
+ BUG();
+}
+
+/*----------------------------------------------------------------*/
+
+static void build_key(dm_oblock_t oblock, struct dm_cell_key *key)
+{
+ key->virtual = 0;
+ key->dev = 0;
+ key->block = from_oblock(oblock);
+}
+
+/*
+ * The caller hands in a preallocated cell, and a free function for it.
+ * The cell will be freed if there's an error, or if it wasn't used because
+ * a cell with that key already exists.
+ */
+typedef void (*cell_free_fn)(void *context, struct dm_bio_prison_cell *cell);
+
+static int bio_detain(struct cache *cache, dm_oblock_t oblock,
+ struct bio *bio, struct dm_bio_prison_cell *cell_prealloc,
+ cell_free_fn free_fn, void *free_context,
+ struct dm_bio_prison_cell **cell_result)
+{
+ int r;
+ struct dm_cell_key key;
+
+ build_key(oblock, &key);
+ r = dm_bio_detain(cache->prison, &key, bio, cell_prealloc, cell_result);
+ if (r)
+ free_fn(free_context, cell_prealloc);
+
+ return r;
+}
+
+static int get_cell(struct cache *cache,
+ dm_oblock_t oblock,
+ struct prealloc *structs,
+ struct dm_bio_prison_cell **cell_result)
+{
+ int r;
+ struct dm_cell_key key;
+ struct dm_bio_prison_cell *cell_prealloc;
+
+ cell_prealloc = prealloc_get_cell(structs);
+
+ build_key(oblock, &key);
+ r = dm_get_cell(cache->prison, &key, cell_prealloc, cell_result);
+ if (r)
+ prealloc_put_cell(structs, cell_prealloc);
+
+ return r;
+}
+
+ /*----------------------------------------------------------------*/
+
+static bool is_dirty(struct cache *cache, dm_cblock_t b)
+{
+ return test_bit(from_cblock(b), cache->dirty_bitset);
+}
+
+static void set_dirty(struct cache *cache, dm_oblock_t oblock, dm_cblock_t cblock)
+{
+ if (!test_and_set_bit(from_cblock(cblock), cache->dirty_bitset)) {
+ cache->nr_dirty = to_cblock(from_cblock(cache->nr_dirty) + 1);
+ policy_set_dirty(cache->policy, oblock);
+ }
+}
+
+static void clear_dirty(struct cache *cache, dm_oblock_t oblock, dm_cblock_t cblock)
+{
+ if (test_and_clear_bit(from_cblock(cblock), cache->dirty_bitset)) {
+ policy_clear_dirty(cache->policy, oblock);
+ cache->nr_dirty = to_cblock(from_cblock(cache->nr_dirty) - 1);
+ if (!from_cblock(cache->nr_dirty))
+ dm_table_event(cache->ti->table);
+ }
+}
+
+/*----------------------------------------------------------------*/
+static bool block_size_is_power_of_two(struct cache *cache)
+{
+ return cache->sectors_per_block_shift >= 0;
+}
+
+static dm_dblock_t oblock_to_dblock(struct cache *cache, dm_oblock_t oblock)
+{
+ sector_t discard_blocks = cache->discard_block_size;
+ dm_block_t b = from_oblock(oblock);
+
+ if (!block_size_is_power_of_two(cache))
+ (void) sector_div(discard_blocks, cache->sectors_per_block);
+ else
+ discard_blocks >>= cache->sectors_per_block_shift;
+
+ (void) sector_div(b, discard_blocks);
+
+ return to_dblock(b);
+}
+
+static void set_discard(struct cache *cache, dm_dblock_t b)
+{
+ unsigned long flags;
+
+ atomic_inc(&cache->stats.discard_count);
+
+ spin_lock_irqsave(&cache->lock, flags);
+ set_bit(from_dblock(b), cache->discard_bitset);
+ spin_unlock_irqrestore(&cache->lock, flags);
+}
+
+static void clear_discard(struct cache *cache, dm_dblock_t b)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&cache->lock, flags);
+ clear_bit(from_dblock(b), cache->discard_bitset);
+ spin_unlock_irqrestore(&cache->lock, flags);
+}
+
+static bool is_discarded(struct cache *cache, dm_dblock_t b)
+{
+ int r;
+ unsigned long flags;
+
+ spin_lock_irqsave(&cache->lock, flags);
+ r = test_bit(from_dblock(b), cache->discard_bitset);
+ spin_unlock_irqrestore(&cache->lock, flags);
+
+ return r;
+}
+
+static bool is_discarded_oblock(struct cache *cache, dm_oblock_t b)
+{
+ int r;
+ unsigned long flags;
+
+ spin_lock_irqsave(&cache->lock, flags);
+ r = test_bit(from_dblock(oblock_to_dblock(cache, b)),
+ cache->discard_bitset);
+ spin_unlock_irqrestore(&cache->lock, flags);
+
+ return r;
+}
+
+/*----------------------------------------------------------------*/
+
+static void load_stats(struct cache *cache)
+{
+ struct dm_cache_statistics stats;
+
+ dm_cache_metadata_get_stats(cache->cmd, &stats);
+ atomic_set(&cache->stats.read_hit, stats.read_hits);
+ atomic_set(&cache->stats.read_miss, stats.read_misses);
+ atomic_set(&cache->stats.write_hit, stats.write_hits);
+ atomic_set(&cache->stats.write_miss, stats.write_misses);
+}
+
+static void save_stats(struct cache *cache)
+{
+ struct dm_cache_statistics stats;
+
+ stats.read_hits = atomic_read(&cache->stats.read_hit);
+ stats.read_misses = atomic_read(&cache->stats.read_miss);
+ stats.write_hits = atomic_read(&cache->stats.write_hit);
+ stats.write_misses = atomic_read(&cache->stats.write_miss);
+
+ dm_cache_metadata_set_stats(cache->cmd, &stats);
+}
+
+/*----------------------------------------------------------------
+ * Per bio data
+ *--------------------------------------------------------------*/
+static struct per_bio_data *get_per_bio_data(struct bio *bio)
+{
+ struct per_bio_data *pb = dm_per_bio_data(bio, sizeof(struct per_bio_data));
+ BUG_ON(!pb);
+ return pb;
+}
+
+static struct per_bio_data *init_per_bio_data(struct bio *bio)
+{
+ struct per_bio_data *pb = get_per_bio_data(bio);
+
+ pb->tick = false;
+ pb->req_nr = dm_bio_get_target_bio_nr(bio);
+ pb->all_io_entry = NULL;
+
+ return pb;
+}
+
+/*----------------------------------------------------------------
+ * Remapping
+ *--------------------------------------------------------------*/
+static void remap_to_origin(struct cache *cache, struct bio *bio)
+{
+ bio->bi_bdev = cache->origin_dev->bdev;
+}
+
+static void remap_to_cache(struct cache *cache, struct bio *bio,
+ dm_cblock_t cblock)
+{
+ sector_t bi_sector = bio->bi_sector;
+
+ bio->bi_bdev = cache->cache_dev->bdev;
+ if (!block_size_is_power_of_two(cache))
+ bio->bi_sector = (from_cblock(cblock) * cache->sectors_per_block) +
+ sector_div(bi_sector, cache->sectors_per_block);
+ else
+ bio->bi_sector = (from_cblock(cblock) << cache->sectors_per_block_shift) |
+ (bi_sector & (cache->sectors_per_block - 1));
+}
+
+static void check_if_tick_bio_needed(struct cache *cache, struct bio *bio)
+{
+ unsigned long flags;
+ struct per_bio_data *pb = get_per_bio_data(bio);
+
+ spin_lock_irqsave(&cache->lock, flags);
+ if (cache->need_tick_bio &&
+ !(bio->bi_rw & (REQ_FUA | REQ_FLUSH | REQ_DISCARD))) {
+ pb->tick = true;
+ cache->need_tick_bio = false;
+ }
+ spin_unlock_irqrestore(&cache->lock, flags);
+}
+
+static void remap_to_origin_clear_discard(struct cache *cache, struct bio *bio,
+ dm_oblock_t oblock)
+{
+ check_if_tick_bio_needed(cache, bio);
+ remap_to_origin(cache, bio);
+ if (bio_data_dir(bio) == WRITE)
+ clear_discard(cache, oblock_to_dblock(cache, oblock));
+}
+
+static void remap_to_cache_dirty(struct cache *cache, struct bio *bio,
+ dm_oblock_t oblock, dm_cblock_t cblock)
+{
+ remap_to_cache(cache, bio, cblock);
+ if (bio_data_dir(bio) == WRITE) {
+ set_dirty(cache, oblock, cblock);
+ clear_discard(cache, oblock_to_dblock(cache, oblock));
+ }
+}
+
+static dm_oblock_t get_bio_block(struct cache *cache, struct bio *bio)
+{
+ sector_t block_nr = bio->bi_sector;
+
+ if (!block_size_is_power_of_two(cache))
+ (void) sector_div(block_nr, cache->sectors_per_block);
+ else
+ block_nr >>= cache->sectors_per_block_shift;
+
+ return to_oblock(block_nr);
+}
+
+static int bio_triggers_commit(struct cache *cache, struct bio *bio)
+{
+ return bio->bi_rw & (REQ_FLUSH | REQ_FUA);
+}
+
+static void issue(struct cache *cache, struct bio *bio)
+{
+ unsigned long flags;
+
+ if (!bio_triggers_commit(cache, bio)) {
+ generic_make_request(bio);
+ return;
+ }
+
+ /*
+ * Batch together any bios that trigger commits and then issue a
+ * single commit for them in do_worker().
+ */
+ spin_lock_irqsave(&cache->lock, flags);
+ cache->commit_requested = true;
+ bio_list_add(&cache->deferred_flush_bios, bio);
+ spin_unlock_irqrestore(&cache->lock, flags);
+}
+
+/*----------------------------------------------------------------
+ * Migration processing
+ *
+ * Migration covers moving data from the origin device to the cache, or
+ * vice versa.
+ *--------------------------------------------------------------*/
+static void free_migration(struct dm_cache_migration *mg)
+{
+ mempool_free(mg, mg->cache->migration_pool);
+}
+
+static void inc_nr_migrations(struct cache *cache)
+{
+ atomic_inc(&cache->nr_migrations);
+}
+
+static void dec_nr_migrations(struct cache *cache)
+{
+ atomic_dec(&cache->nr_migrations);
+
+ /*
+ * Wake the worker in case we're suspending the target.
+ */
+ wake_up(&cache->migration_wait);
+}
+
+static void __cell_defer(struct cache *cache, struct dm_bio_prison_cell *cell,
+ bool holder)
+{
+ (holder ? dm_cell_release : dm_cell_release_no_holder)
+ (cache->prison, cell, &cache->deferred_bios);
+ free_prison_cell(cache, cell);
+}
+
+static void cell_defer(struct cache *cache, struct dm_bio_prison_cell *cell,
+ bool holder)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&cache->lock, flags);
+ __cell_defer(cache, cell, holder);
+ spin_unlock_irqrestore(&cache->lock, flags);
+
+ wake_worker(cache);
+}
+
+static void cleanup_migration(struct dm_cache_migration *mg)
+{
+ dec_nr_migrations(mg->cache);
+ free_migration(mg);
+}
+
+static void migration_failure(struct dm_cache_migration *mg)
+{
+ struct cache *cache = mg->cache;
+
+ if (mg->writeback) {
+ DMWARN_LIMIT("writeback failed; couldn't copy block");
+ set_dirty(cache, mg->old_oblock, mg->cblock);
+ cell_defer(cache, mg->old_ocell, false);
+
+ } else if (mg->demote) {
+ DMWARN_LIMIT("demotion failed; couldn't copy block");
+ policy_force_mapping(cache->policy, mg->new_oblock, mg->old_oblock);
+
+ cell_defer(cache, mg->old_ocell, mg->promote ? 0 : 1);
+ if (mg->promote)
+ cell_defer(cache, mg->new_ocell, 1);
+ } else {
+ DMWARN_LIMIT("promotion failed; couldn't copy block");
+ policy_remove_mapping(cache->policy, mg->new_oblock);
+ cell_defer(cache, mg->new_ocell, 1);
+ }
+
+ cleanup_migration(mg);
+}
+
+static void migration_success_pre_commit(struct dm_cache_migration *mg)
+{
+ unsigned long flags;
+ struct cache *cache = mg->cache;
+
+ if (mg->writeback) {
+ cell_defer(cache, mg->old_ocell, false);
+ clear_dirty(cache, mg->old_oblock, mg->cblock);
+ cleanup_migration(mg);
+ return;
+
+ } else if (mg->demote) {
+ if (dm_cache_remove_mapping(cache->cmd, mg->cblock)) {
+ DMWARN_LIMIT("demotion failed; couldn't update on disk metadata");
+ policy_force_mapping(cache->policy, mg->new_oblock,
+ mg->old_oblock);
+ if (mg->promote)
+ cell_defer(cache, mg->new_ocell, true);
+ cleanup_migration(mg);
+ return;
+ }
+ } else {
+ if (dm_cache_insert_mapping(cache->cmd, mg->cblock, mg->new_oblock)) {
+ DMWARN_LIMIT("promotion failed; couldn't update on disk metadata");
+ policy_remove_mapping(cache->policy, mg->new_oblock);
+ cleanup_migration(mg);
+ return;
+ }
+ }
+
+ spin_lock_irqsave(&cache->lock, flags);
+ list_add_tail(&mg->list, &cache->need_commit_migrations);
+ cache->commit_requested = true;
+ spin_unlock_irqrestore(&cache->lock, flags);
+}
+
+static void migration_success_post_commit(struct dm_cache_migration *mg)
+{
+ unsigned long flags;
+ struct cache *cache = mg->cache;
+
+ if (mg->writeback) {
+ DMWARN("writeback unexpectedly triggered commit");
+ return;
+
+ } else if (mg->demote) {
+ cell_defer(cache, mg->old_ocell, mg->promote ? 0 : 1);
+
+ if (mg->promote) {
+ mg->demote = false;
+
+ spin_lock_irqsave(&cache->lock, flags);
+ list_add_tail(&mg->list, &cache->quiesced_migrations);
+ spin_unlock_irqrestore(&cache->lock, flags);
+
+ } else
+ cleanup_migration(mg);
+
+ } else {
+ cell_defer(cache, mg->new_ocell, true);
+ clear_dirty(cache, mg->new_oblock, mg->cblock);
+ cleanup_migration(mg);
+ }
+}
+
+static void copy_complete(int read_err, unsigned long write_err, void *context)
+{
+ unsigned long flags;
+ struct dm_cache_migration *mg = (struct dm_cache_migration *) context;
+ struct cache *cache = mg->cache;
+
+ if (read_err || write_err)
+ mg->err = true;
+
+ spin_lock_irqsave(&cache->lock, flags);
+ list_add_tail(&mg->list, &cache->completed_migrations);
+ spin_unlock_irqrestore(&cache->lock, flags);
+
+ wake_worker(cache);
+}
+
+static void issue_copy_real(struct dm_cache_migration *mg)
+{
+ int r;
+ struct dm_io_region o_region, c_region;
+ struct cache *cache = mg->cache;
+
+ o_region.bdev = cache->origin_dev->bdev;
+ o_region.count = cache->sectors_per_block;
+
+ c_region.bdev = cache->cache_dev->bdev;
+ c_region.sector = from_cblock(mg->cblock) * cache->sectors_per_block;
+ c_region.count = cache->sectors_per_block;
+
+ if (mg->writeback || mg->demote) {
+ /* demote */
+ o_region.sector = from_oblock(mg->old_oblock) * cache->sectors_per_block;
+ r = dm_kcopyd_copy(cache->copier, &c_region, 1, &o_region, 0, copy_complete, mg);
+ } else {
+ /* promote */
+ o_region.sector = from_oblock(mg->new_oblock) * cache->sectors_per_block;
+ r = dm_kcopyd_copy(cache->copier, &o_region, 1, &c_region, 0, copy_complete, mg);
+ }
+
+ if (r < 0)
+ migration_failure(mg);
+}
+
+static void avoid_copy(struct dm_cache_migration *mg)
+{
+ atomic_inc(&mg->cache->stats.copies_avoided);
+ migration_success_pre_commit(mg);
+}
+
+static void issue_copy(struct dm_cache_migration *mg)
+{
+ bool avoid;
+ struct cache *cache = mg->cache;
+
+ if (mg->writeback || mg->demote)
+ avoid = !is_dirty(cache, mg->cblock) ||
+ is_discarded_oblock(cache, mg->old_oblock);
+ else
+ avoid = is_discarded_oblock(cache, mg->new_oblock);
+
+ avoid ? avoid_copy(mg) : issue_copy_real(mg);
+}
+
+static void complete_migration(struct dm_cache_migration *mg)
+{
+ if (mg->err)
+ migration_failure(mg);
+ else
+ migration_success_pre_commit(mg);
+}
+
+static void process_migrations(struct cache *cache, struct list_head *head,
+ void (*fn)(struct dm_cache_migration *))
+{
+ unsigned long flags;
+ struct list_head list;
+ struct dm_cache_migration *mg, *tmp;
+
+ INIT_LIST_HEAD(&list);
+ spin_lock_irqsave(&cache->lock, flags);
+ list_splice_init(head, &list);
+ spin_unlock_irqrestore(&cache->lock, flags);
+
+ list_for_each_entry_safe(mg, tmp, &list, list)
+ fn(mg);
+}
+
+static void __queue_quiesced_migration(struct dm_cache_migration *mg)
+{
+ list_add_tail(&mg->list, &mg->cache->quiesced_migrations);
+}
+
+static void queue_quiesced_migration(struct dm_cache_migration *mg)
+{
+ unsigned long flags;
+ struct cache *cache = mg->cache;
+
+ spin_lock_irqsave(&cache->lock, flags);
+ __queue_quiesced_migration(mg);
+ spin_unlock_irqrestore(&cache->lock, flags);
+
+ wake_worker(cache);
+}
+
+static void queue_quiesced_migrations(struct cache *cache, struct list_head *work)
+{
+ unsigned long flags;
+ struct dm_cache_migration *mg, *tmp;
+
+ spin_lock_irqsave(&cache->lock, flags);
+ list_for_each_entry_safe(mg, tmp, work, list)
+ __queue_quiesced_migration(mg);
+ spin_unlock_irqrestore(&cache->lock, flags);
+
+ wake_worker(cache);
+}
+
+static void check_for_quiesced_migrations(struct cache *cache,
+ struct per_bio_data *pb)
+{
+ struct list_head work;
+
+ if (!pb->all_io_entry)
+ return;
+
+ INIT_LIST_HEAD(&work);
+ if (pb->all_io_entry)
+ dm_deferred_entry_dec(pb->all_io_entry, &work);
+
+ if (!list_empty(&work))
+ queue_quiesced_migrations(cache, &work);
+}
+
+static void quiesce_migration(struct dm_cache_migration *mg)
+{
+ if (!dm_deferred_set_add_work(mg->cache->all_io_ds, &mg->list))
+ queue_quiesced_migration(mg);
+}
+
+static void promote(struct cache *cache, struct prealloc *structs,
+ dm_oblock_t oblock, dm_cblock_t cblock,
+ struct dm_bio_prison_cell *cell)
+{
+ struct dm_cache_migration *mg = prealloc_get_migration(structs);
+
+ mg->err = false;
+ mg->writeback = false;
+ mg->demote = false;
+ mg->promote = true;
+ mg->cache = cache;
+ mg->new_oblock = oblock;
+ mg->cblock = cblock;
+ mg->old_ocell = NULL;
+ mg->new_ocell = cell;
+ mg->start_jiffies = jiffies;
+
+ inc_nr_migrations(cache);
+ quiesce_migration(mg);
+}
+
+static void writeback(struct cache *cache, struct prealloc *structs,
+ dm_oblock_t oblock, dm_cblock_t cblock,
+ struct dm_bio_prison_cell *cell)
+{
+ struct dm_cache_migration *mg = prealloc_get_migration(structs);
+
+ mg->err = false;
+ mg->writeback = true;
+ mg->demote = false;
+ mg->promote = false;
+ mg->cache = cache;
+ mg->old_oblock = oblock;
+ mg->cblock = cblock;
+ mg->old_ocell = cell;
+ mg->new_ocell = NULL;
+ mg->start_jiffies = jiffies;
+
+ inc_nr_migrations(cache);
+ quiesce_migration(mg);
+}
+
+static void demote_then_promote(struct cache *cache, struct prealloc *structs,
+ dm_oblock_t old_oblock, dm_oblock_t new_oblock,
+ dm_cblock_t cblock,
+ struct dm_bio_prison_cell *old_ocell,
+ struct dm_bio_prison_cell *new_ocell)
+{
+ struct dm_cache_migration *mg = prealloc_get_migration(structs);
+
+ mg->err = false;
+ mg->writeback = false;
+ mg->demote = true;
+ mg->promote = true;
+ mg->cache = cache;
+ mg->old_oblock = old_oblock;
+ mg->new_oblock = new_oblock;
+ mg->cblock = cblock;
+ mg->old_ocell = old_ocell;
+ mg->new_ocell = new_ocell;
+ mg->start_jiffies = jiffies;
+
+ inc_nr_migrations(cache);
+ quiesce_migration(mg);
+}
+
+/*----------------------------------------------------------------
+ * bio processing
+ *--------------------------------------------------------------*/
+static void defer_bio(struct cache *cache, struct bio *bio)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&cache->lock, flags);
+ bio_list_add(&cache->deferred_bios, bio);
+ spin_unlock_irqrestore(&cache->lock, flags);
+
+ wake_worker(cache);
+}
+
+static void process_flush_bio(struct cache *cache, struct bio *bio)
+{
+ struct per_bio_data *pb = get_per_bio_data(bio);
+
+ BUG_ON(bio->bi_size);
+ if (!pb->req_nr)
+ remap_to_origin(cache, bio);
+ else
+ remap_to_cache(cache, bio, 0);
+
+ issue(cache, bio);
+}
+
+/*
+ * People generally discard large parts of a device, eg, the whole device
+ * when formatting. Splitting these large discards up into cache block
+ * sized ios and then quiescing (always neccessary for discard) takes too
+ * long.
+ *
+ * We keep it simple, and allow any size of discard to come in, and just
+ * mark off blocks on the discard bitset. No passdown occurs!
+ *
+ * To implement passdown we need to change the bio_prison such that a cell
+ * can have a key that spans many blocks.
+ */
+static void process_discard_bio(struct cache *cache, struct bio *bio)
+{
+ dm_block_t start_block = dm_sector_div_up(bio->bi_sector,
+ cache->discard_block_size);
+ dm_block_t end_block = bio->bi_sector + bio_sectors(bio);
+ dm_block_t b;
+
+ (void) sector_div(end_block, cache->discard_block_size);
+
+ for (b = start_block; b < end_block; b++)
+ set_discard(cache, to_dblock(b));
+
+ bio_endio(bio, 0);
+}
+
+static bool spare_migration_bandwidth(struct cache *cache)
+{
+ sector_t current_volume = (atomic_read(&cache->nr_migrations) + 1) *
+ cache->sectors_per_block;
+ return current_volume < cache->migration_threshold;
+}
+
+static bool is_writethrough_io(struct cache *cache, struct bio *bio,
+ dm_cblock_t cblock)
+{
+ return bio_data_dir(bio) == WRITE &&
+ cache->features.write_through && !is_dirty(cache, cblock);
+}
+
+static void inc_hit_counter(struct cache *cache, struct bio *bio)
+{
+ atomic_inc(bio_data_dir(bio) == READ ?
+ &cache->stats.read_hit : &cache->stats.write_hit);
+}
+
+static void inc_miss_counter(struct cache *cache, struct bio *bio)
+{
+ atomic_inc(bio_data_dir(bio) == READ ?
+ &cache->stats.read_miss : &cache->stats.write_miss);
+}
+
+static void process_bio(struct cache *cache, struct prealloc *structs,
+ struct bio *bio)
+{
+ int r;
+ bool release_cell = true;
+ dm_oblock_t block = get_bio_block(cache, bio);
+ struct dm_bio_prison_cell *cell_prealloc, *old_ocell, *new_ocell;
+ struct policy_result lookup_result;
+ struct per_bio_data *pb = get_per_bio_data(bio);
+ bool discarded_block = is_discarded_oblock(cache, block);
+ bool can_migrate = discarded_block || spare_migration_bandwidth(cache);
+
+ /*
+ * Check to see if that block is currently migrating.
+ */
+ cell_prealloc = prealloc_get_cell(structs);
+ r = bio_detain(cache, block, bio, cell_prealloc,
+ (cell_free_fn) prealloc_put_cell,
+ structs, &new_ocell);
+ if (r > 0)
+ return;
+
+ r = policy_map(cache->policy, block, true, can_migrate, discarded_block,
+ bio, &lookup_result);
+
+ if (r == -EWOULDBLOCK)
+ /* migration has been denied */
+ lookup_result.op = POLICY_MISS;
+
+ switch (lookup_result.op) {
+ case POLICY_HIT:
+ inc_hit_counter(cache, bio);
+ pb->all_io_entry = dm_deferred_entry_inc(cache->all_io_ds);
+
+ if (is_writethrough_io(cache, bio, lookup_result.cblock)) {
+ /*
+ * No need to mark anything dirty in write through mode.
+ */
+ pb->req_nr == 0 ?
+ remap_to_cache(cache, bio, lookup_result.cblock) :
+ remap_to_origin_clear_discard(cache, bio, block);
+ } else
+ remap_to_cache_dirty(cache, bio, block, lookup_result.cblock);
+
+ issue(cache, bio);
+ break;
+
+ case POLICY_MISS:
+ inc_miss_counter(cache, bio);
+ pb->all_io_entry = dm_deferred_entry_inc(cache->all_io_ds);
+
+ if (pb->req_nr != 0) {
+ /*
+ * This is a duplicate writethrough io that is no
+ * longer needed because the block has been demoted.
+ */
+ bio_endio(bio, 0);
+ } else {
+ remap_to_origin_clear_discard(cache, bio, block);
+ issue(cache, bio);
+ }
+ break;
+
+ case POLICY_NEW:
+ atomic_inc(&cache->stats.promotion);
+ promote(cache, structs, block, lookup_result.cblock, new_ocell);
+ release_cell = false;
+ break;
+
+ case POLICY_REPLACE:
+ cell_prealloc = prealloc_get_cell(structs);
+ r = bio_detain(cache, lookup_result.old_oblock, bio, cell_prealloc,
+ (cell_free_fn) prealloc_put_cell,
+ structs, &old_ocell);
+ if (r > 0) {
+ /*
+ * We have to be careful to avoid lock inversion of
+ * the cells. So we back off, and wait for the
+ * old_ocell to become free.
+ */
+ policy_force_mapping(cache->policy, block,
+ lookup_result.old_oblock);
+ atomic_inc(&cache->stats.cache_cell_clash);
+ break;
+ }
+ atomic_inc(&cache->stats.demotion);
+ atomic_inc(&cache->stats.promotion);
+
+ demote_then_promote(cache, structs, lookup_result.old_oblock,
+ block, lookup_result.cblock,
+ old_ocell, new_ocell);
+ release_cell = false;
+ break;
+
+ default:
+ DMERR_LIMIT("%s: erroring bio, unknown policy op: %u", __func__,
+ (unsigned) lookup_result.op);
+ bio_io_error(bio);
+ }
+
+ if (release_cell)
+ cell_defer(cache, new_ocell, false);
+}
+
+static int need_commit_due_to_time(struct cache *cache)
+{
+ return jiffies < cache->last_commit_jiffies ||
+ jiffies > cache->last_commit_jiffies + COMMIT_PERIOD;
+}
+
+static int commit_if_needed(struct cache *cache)
+{
+ if (dm_cache_changed_this_transaction(cache->cmd) &&
+ (cache->commit_requested || need_commit_due_to_time(cache))) {
+ atomic_inc(&cache->stats.commit_count);
+ cache->last_commit_jiffies = jiffies;
+ cache->commit_requested = false;
+ return dm_cache_commit(cache->cmd, false);
+ }
+
+ return 0;
+}
+
+static void process_deferred_bios(struct cache *cache)
+{
+ unsigned long flags;
+ struct bio_list bios;
+ struct bio *bio;
+ struct prealloc structs;
+
+ memset(&structs, 0, sizeof(structs));
+ bio_list_init(&bios);
+
+ spin_lock_irqsave(&cache->lock, flags);
+ bio_list_merge(&bios, &cache->deferred_bios);
+ bio_list_init(&cache->deferred_bios);
+ spin_unlock_irqrestore(&cache->lock, flags);
+
+ while (!bio_list_empty(&bios)) {
+ /*
+ * If we've got no free migration structs, and processing
+ * this bio might require one, we pause until there are some
+ * prepared mappings to process.
+ */
+ if (prealloc_data_structs(cache, &structs)) {
+ spin_lock_irqsave(&cache->lock, flags);
+ bio_list_merge(&cache->deferred_bios, &bios);
+ spin_unlock_irqrestore(&cache->lock, flags);
+ break;
+ }
+
+ bio = bio_list_pop(&bios);
+
+ if (bio->bi_rw & REQ_FLUSH)
+ process_flush_bio(cache, bio);
+ else if (bio->bi_rw & REQ_DISCARD)
+ process_discard_bio(cache, bio);
+ else
+ process_bio(cache, &structs, bio);
+ }
+
+ prealloc_free_structs(cache, &structs);
+}
+
+static void process_deferred_flush_bios(struct cache *cache, bool submit_bios)
+{
+ unsigned long flags;
+ struct bio_list bios;
+ struct bio *bio;
+
+ bio_list_init(&bios);
+
+ spin_lock_irqsave(&cache->lock, flags);
+ bio_list_merge(&bios, &cache->deferred_flush_bios);
+ bio_list_init(&cache->deferred_flush_bios);
+ spin_unlock_irqrestore(&cache->lock, flags);
+
+ while ((bio = bio_list_pop(&bios)))
+ submit_bios ? generic_make_request(bio) : bio_io_error(bio);
+}
+
+static void writeback_some_dirty_blocks(struct cache *cache)
+{
+ int r = 0;
+ dm_oblock_t oblock;
+ dm_cblock_t cblock;
+ struct prealloc structs;
+ struct dm_bio_prison_cell *old_ocell;
+
+ memset(&structs, 0, sizeof(structs));
+
+ while (spare_migration_bandwidth(cache)) {
+ if (prealloc_data_structs(cache, &structs))
+ break;
+
+ r = policy_writeback_work(cache->policy, &oblock, &cblock);
+ if (r)
+ break;
+
+ r = get_cell(cache, oblock, &structs, &old_ocell);
+ if (r) {
+ policy_set_dirty(cache->policy, oblock);
+ break;
+ }
+
+ writeback(cache, &structs, oblock, cblock, old_ocell);
+ }
+
+ prealloc_free_structs(cache, &structs);
+}
+
+/*----------------------------------------------------------------
+ * Main worker loop
+ *--------------------------------------------------------------*/
+static void start_quiescing(struct cache *cache)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&cache->lock, flags);
+ cache->quiescing = 1;
+ spin_unlock_irqrestore(&cache->lock, flags);
+}
+
+static void stop_quiescing(struct cache *cache)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&cache->lock, flags);
+ cache->quiescing = 0;
+ spin_unlock_irqrestore(&cache->lock, flags);
+}
+
+static bool is_quiescing(struct cache *cache)
+{
+ int r;
+ unsigned long flags;
+
+ spin_lock_irqsave(&cache->lock, flags);
+ r = cache->quiescing;
+ spin_unlock_irqrestore(&cache->lock, flags);
+
+ return r;
+}
+
+static void wait_for_migrations(struct cache *cache)
+{
+ wait_event(cache->migration_wait, !atomic_read(&cache->nr_migrations));
+}
+
+static void stop_worker(struct cache *cache)
+{
+ cancel_delayed_work(&cache->waker);
+ flush_workqueue(cache->wq);
+}
+
+static void requeue_deferred_io(struct cache *cache)
+{
+ struct bio *bio;
+ struct bio_list bios;
+
+ bio_list_init(&bios);
+ bio_list_merge(&bios, &cache->deferred_bios);
+ bio_list_init(&cache->deferred_bios);
+
+ while ((bio = bio_list_pop(&bios)))
+ bio_endio(bio, DM_ENDIO_REQUEUE);
+}
+
+static int more_work(struct cache *cache)
+{
+ if (is_quiescing(cache))
+ return !list_empty(&cache->quiesced_migrations) ||
+ !list_empty(&cache->completed_migrations) ||
+ !list_empty(&cache->need_commit_migrations);
+ else
+ return !bio_list_empty(&cache->deferred_bios) ||
+ !bio_list_empty(&cache->deferred_flush_bios) ||
+ !list_empty(&cache->quiesced_migrations) ||
+ !list_empty(&cache->completed_migrations) ||
+ !list_empty(&cache->need_commit_migrations);
+}
+
+static void do_worker(struct work_struct *ws)
+{
+ struct cache *cache = container_of(ws, struct cache, worker);
+
+ do {
+ if (!is_quiescing(cache))
+ process_deferred_bios(cache);
+
+ process_migrations(cache, &cache->quiesced_migrations, issue_copy);
+ process_migrations(cache, &cache->completed_migrations, complete_migration);
+
+ writeback_some_dirty_blocks(cache);
+
+ if (commit_if_needed(cache)) {
+ process_deferred_flush_bios(cache, false);
+
+ /*
+ * FIXME: rollback metadata or just go into a
+ * failure mode and error everything
+ */
+ } else {
+ process_deferred_flush_bios(cache, true);
+ process_migrations(cache, &cache->need_commit_migrations,
+ migration_success_post_commit);
+ }
+ } while (more_work(cache));
+}
+
+/*
+ * We want to commit periodically so that not too much
+ * unwritten metadata builds up.
+ */
+static void do_waker(struct work_struct *ws)
+{
+ struct cache *cache = container_of(to_delayed_work(ws), struct cache, waker);
+ wake_worker(cache);
+ queue_delayed_work(cache->wq, &cache->waker, COMMIT_PERIOD);
+}
+
+/*----------------------------------------------------------------*/
+
+static int is_congested(struct dm_dev *dev, int bdi_bits)
+{
+ struct request_queue *q = bdev_get_queue(dev->bdev);
+ return bdi_congested(&q->backing_dev_info, bdi_bits);
+}
+
+static int cache_is_congested(struct dm_target_callbacks *cb, int bdi_bits)
+{
+ struct cache *cache = container_of(cb, struct cache, callbacks);
+
+ return is_congested(cache->origin_dev, bdi_bits) ||
+ is_congested(cache->cache_dev, bdi_bits);
+}
+
+/*----------------------------------------------------------------
+ * Target methods
+ *--------------------------------------------------------------*/
+
+/*
+ * This function gets called on the error paths of the constructor, so we
+ * have to cope with a partially initialised struct.
+ */
+static void destroy(struct cache *cache)
+{
+ unsigned i;
+
+ if (cache->next_migration)
+ mempool_free(cache->next_migration, cache->migration_pool);
+
+ if (cache->migration_pool)
+ mempool_destroy(cache->migration_pool);
+
+ if (cache->all_io_ds)
+ dm_deferred_set_destroy(cache->all_io_ds);
+
+ if (cache->prison)
+ dm_bio_prison_destroy(cache->prison);
+
+ if (cache->wq)
+ destroy_workqueue(cache->wq);
+
+ if (cache->dirty_bitset)
+ free_bitset(cache->dirty_bitset);
+
+ if (cache->discard_bitset)
+ free_bitset(cache->discard_bitset);
+
+ if (cache->copier)
+ dm_kcopyd_client_destroy(cache->copier);
+
+ if (cache->cmd)
+ dm_cache_metadata_close(cache->cmd);
+
+ if (cache->metadata_dev)
+ dm_put_device(cache->ti, cache->metadata_dev);
+
+ if (cache->origin_dev)
+ dm_put_device(cache->ti, cache->origin_dev);
+
+ if (cache->cache_dev)
+ dm_put_device(cache->ti, cache->cache_dev);
+
+ if (cache->policy)
+ dm_cache_policy_destroy(cache->policy);
+
+ for (i = 0; i < cache->nr_ctr_args ; i++)
+ kfree(cache->ctr_args[i]);
+ kfree(cache->ctr_args);
+
+ kfree(cache);
+}
+
+static void cache_dtr(struct dm_target *ti)
+{
+ struct cache *cache = ti->private;
+
+ destroy(cache);
+}
+
+static sector_t get_dev_size(struct dm_dev *dev)
+{
+ return i_size_read(dev->bdev->bd_inode) >> SECTOR_SHIFT;
+}
+
+/*----------------------------------------------------------------*/
+
+/*
+ * Construct a cache device mapping.
+ *
+ * cache <metadata dev> <cache dev> <origin dev> <block size>
+ * <#feature args> [<feature arg>]*
+ * <policy> <#policy args> [<policy arg>]*
+ *
+ * metadata dev : fast device holding the persistent metadata
+ * cache dev : fast device holding cached data blocks
+ * origin dev : slow device holding original data blocks
+ * block size : cache unit size in sectors
+ *
+ * #feature args : number of feature arguments passed
+ * feature args : writethrough. (The default is writeback.)
+ *
+ * policy : the replacement policy to use
+ * #policy args : an even number of policy arguments corresponding
+ * to key/value pairs passed to the policy
+ * policy args : key/value pairs passed to the policy
+ * E.g. 'sequential_threshold 1024'
+ * See cache-policies.txt for details.
+ *
+ * Optional feature arguments are:
+ * writethrough : write through caching that prohibits cache block
+ * content from being different from origin block content.
+ * Without this argument, the default behaviour is to write
+ * back cache block contents later for performance reasons,
+ * so they may differ from the corresponding origin blocks.
+ */
+struct cache_args {
+ struct dm_target *ti;
+
+ struct dm_dev *metadata_dev;
+
+ struct dm_dev *cache_dev;
+ sector_t cache_sectors;
+
+ struct dm_dev *origin_dev;
+ sector_t origin_sectors;
+
+ uint32_t block_size;
+
+ const char *policy_name;
+ int policy_argc;
+ const char **policy_argv;
+
+ struct cache_features features;
+};
+
+static void destroy_cache_args(struct cache_args *ca)
+{
+ if (ca->metadata_dev)
+ dm_put_device(ca->ti, ca->metadata_dev);
+
+ if (ca->cache_dev)
+ dm_put_device(ca->ti, ca->cache_dev);
+
+ if (ca->origin_dev)
+ dm_put_device(ca->ti, ca->origin_dev);
+
+ kfree(ca);
+}
+
+static bool at_least_one_arg(struct dm_arg_set *as, char **error)
+{
+ if (!as->argc) {
+ *error = "Insufficient args";
+ return false;
+ }
+
+ return true;
+}
+
+static int parse_metadata_dev(struct cache_args *ca, struct dm_arg_set *as,
+ char **error)
+{
+ int r;
+ sector_t metadata_dev_size;
+ char b[BDEVNAME_SIZE];
+
+ if (!at_least_one_arg(as, error))
+ return -EINVAL;
+
+ r = dm_get_device(ca->ti, dm_shift_arg(as), FMODE_READ | FMODE_WRITE,
+ &ca->metadata_dev);
+ if (r) {
+ *error = "Error opening metadata device";
+ return r;
+ }
+
+ metadata_dev_size = get_dev_size(ca->metadata_dev);
+ if (metadata_dev_size > DM_CACHE_METADATA_MAX_SECTORS_WARNING)
+ DMWARN("Metadata device %s is larger than %u sectors: excess space will not be used.",
+ bdevname(ca->metadata_dev->bdev, b), THIN_METADATA_MAX_SECTORS);
+
+ return 0;
+}
+
+static int parse_cache_dev(struct cache_args *ca, struct dm_arg_set *as,
+ char **error)
+{
+ int r;
+
+ if (!at_least_one_arg(as, error))
+ return -EINVAL;
+
+ r = dm_get_device(ca->ti, dm_shift_arg(as), FMODE_READ | FMODE_WRITE,
+ &ca->cache_dev);
+ if (r) {
+ *error = "Error opening cache device";
+ return r;
+ }
+ ca->cache_sectors = get_dev_size(ca->cache_dev);
+
+ return 0;
+}
+
+static int parse_origin_dev(struct cache_args *ca, struct dm_arg_set *as,
+ char **error)
+{
+ int r;
+
+ if (!at_least_one_arg(as, error))
+ return -EINVAL;
+
+ r = dm_get_device(ca->ti, dm_shift_arg(as), FMODE_READ | FMODE_WRITE,
+ &ca->origin_dev);
+ if (r) {
+ *error = "Error opening origin device";
+ return r;
+ }
+
+ ca->origin_sectors = get_dev_size(ca->origin_dev);
+ if (ca->ti->len > ca->origin_sectors) {
+ *error = "Device size larger than cached device";
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int parse_block_size(struct cache_args *ca, struct dm_arg_set *as,
+ char **error)
+{
+ unsigned long tmp;
+
+ if (!at_least_one_arg(as, error))
+ return -EINVAL;
+
+ if (kstrtoul(dm_shift_arg(as), 10, &tmp) || !tmp ||
+ tmp < DATA_DEV_BLOCK_SIZE_MIN_SECTORS ||
+ tmp & (DATA_DEV_BLOCK_SIZE_MIN_SECTORS - 1)) {
+ *error = "Invalid data block size";
+ return -EINVAL;
+ }
+
+ if (tmp > ca->cache_sectors) {
+ *error = "Data block size is larger than the cache device";
+ return -EINVAL;
+ }
+
+ ca->block_size = tmp;
+
+ return 0;
+}
+
+static void init_features(struct cache_features *cf)
+{
+ cf->mode = CM_WRITE;
+ cf->write_through = false;
+}
+
+static int parse_features(struct cache_args *ca, struct dm_arg_set *as,
+ char **error)
+{
+ static struct dm_arg _args[] = {
+ {0, 1, "Invalid number of cache feature arguments"},
+ };
+
+ int r;
+ unsigned argc;
+ const char *arg;
+ struct cache_features *cf = &ca->features;
+
+ init_features(cf);
+
+ r = dm_read_arg_group(_args, as, &argc, error);
+ if (r)
+ return -EINVAL;
+
+ while (argc--) {
+ arg = dm_shift_arg(as);
+
+ if (!strcasecmp(arg, "writeback"))
+ cf->write_through = false;
+
+ else if (!strcasecmp(arg, "writethrough"))
+ cf->write_through = true;
+
+ else {
+ *error = "Unrecognised cache feature requested";
+ return -EINVAL;
+ }
+ }
+
+ return 0;
+}
+
+static int parse_policy(struct cache_args *ca, struct dm_arg_set *as,
+ char **error)
+{
+ static struct dm_arg _args[] = {
+ {0, 1024, "Invalid number of policy arguments"},
+ };
+
+ int r;
+
+ if (!at_least_one_arg(as, error))
+ return -EINVAL;
+
+ ca->policy_name = dm_shift_arg(as);
+
+ r = dm_read_arg_group(_args, as, &ca->policy_argc, error);
+ if (r)
+ return -EINVAL;
+
+ ca->policy_argv = (const char **)as->argv;
+ dm_consume_args(as, ca->policy_argc);
+
+ return 0;
+}
+
+static int parse_cache_args(struct cache_args *ca, int argc, char **argv,
+ char **error)
+{
+ int r;
+ struct dm_arg_set as;
+
+ as.argc = argc;
+ as.argv = argv;
+
+ r = parse_metadata_dev(ca, &as, error);
+ if (r)
+ return r;
+
+ r = parse_cache_dev(ca, &as, error);
+ if (r)
+ return r;
+
+ r = parse_origin_dev(ca, &as, error);
+ if (r)
+ return r;
+
+ r = parse_block_size(ca, &as, error);
+ if (r)
+ return r;
+
+ r = parse_features(ca, &as, error);
+ if (r)
+ return r;
+
+ r = parse_policy(ca, &as, error);
+ if (r)
+ return r;
+
+ return 0;
+}
+
+/*----------------------------------------------------------------*/
+
+static struct kmem_cache *migration_cache;
+
+static int set_config_values(struct dm_cache_policy *p, int argc, const char **argv)
+{
+ int r = 0;
+
+ if (argc & 1) {
+ DMWARN("Odd number of policy arguments given but they should be <key> <value> pairs.");
+ return -EINVAL;
+ }
+
+ while (argc) {
+ r = policy_set_config_value(p, argv[0], argv[1]);
+ if (r) {
+ DMWARN("policy_set_config_value failed: key = '%s', value = '%s'",
+ argv[0], argv[1]);
+ return r;
+ }
+
+ argc -= 2;
+ argv += 2;
+ }
+
+ return r;
+}
+
+static int create_cache_policy(struct cache *cache, struct cache_args *ca,
+ char **error)
+{
+ int r;
+
+ cache->policy = dm_cache_policy_create(ca->policy_name,
+ cache->cache_size,
+ cache->origin_sectors,
+ cache->sectors_per_block);
+ if (!cache->policy) {
+ *error = "Error creating cache's policy";
+ return -ENOMEM;
+ }
+
+ r = set_config_values(cache->policy, ca->policy_argc, ca->policy_argv);
+ if (r)
+ dm_cache_policy_destroy(cache->policy);
+
+ return r;
+}
+
+/*
+ * We want the discard block size to be a power of two, at least the size
+ * of the cache block size, and have no more than 2^14 discard blocks
+ * across the origin.
+ */
+#define MAX_DISCARD_BLOCKS (1 << 14)
+
+static bool too_many_discard_blocks(sector_t discard_block_size,
+ sector_t origin_size)
+{
+ (void) sector_div(origin_size, discard_block_size);
+
+ return origin_size > MAX_DISCARD_BLOCKS;
+}
+
+static sector_t calculate_discard_block_size(sector_t cache_block_size,
+ sector_t origin_size)
+{
+ sector_t discard_block_size;
+
+ discard_block_size = roundup_pow_of_two(cache_block_size);
+
+ if (origin_size)
+ while (too_many_discard_blocks(discard_block_size, origin_size))
+ discard_block_size *= 2;
+
+ return discard_block_size;
+}
+
+#define DEFAULT_MIGRATION_THRESHOLD (2048 * 100)
+
+static unsigned cache_num_write_bios(struct dm_target *ti, struct bio *bio);
+
+static int cache_create(struct cache_args *ca, struct cache **result)
+{
+ int r = 0;
+ char **error = &ca->ti->error;
+ struct cache *cache;
+ struct dm_target *ti = ca->ti;
+ dm_block_t origin_blocks;
+ struct dm_cache_metadata *cmd;
+ bool may_format = ca->features.mode == CM_WRITE;
+
+ cache = kzalloc(sizeof(*cache), GFP_KERNEL);
+ if (!cache)
+ return -ENOMEM;
+
+ cache->ti = ca->ti;
+ ti->private = cache;
+ ti->per_bio_data_size = sizeof(struct per_bio_data);
+ ti->num_flush_bios = 2;
+ ti->flush_supported = true;
+
+ ti->num_discard_bios = 1;
+ ti->discards_supported = true;
+ ti->discard_zeroes_data_unsupported = true;
+
+ memcpy(&cache->features, &ca->features, sizeof(cache->features));
+
+ if (cache->features.write_through)
+ ti->num_write_bios = cache_num_write_bios;
+
+ cache->callbacks.congested_fn = cache_is_congested;
+ dm_table_add_target_callbacks(ti->table, &cache->callbacks);
+
+ cache->metadata_dev = ca->metadata_dev;
+ cache->origin_dev = ca->origin_dev;
+ cache->cache_dev = ca->cache_dev;
+
+ ca->metadata_dev = ca->origin_dev = ca->cache_dev = NULL;
+
+ /* FIXME: factor out this whole section */
+ origin_blocks = cache->origin_sectors = ca->origin_sectors;
+ (void) sector_div(origin_blocks, ca->block_size);
+ cache->origin_blocks = to_oblock(origin_blocks);
+
+ cache->sectors_per_block = ca->block_size;
+ if (dm_set_target_max_io_len(ti, cache->sectors_per_block)) {
+ r = -EINVAL;
+ goto bad;
+ }
+
+ if (ca->block_size & (ca->block_size - 1)) {
+ dm_block_t cache_size = ca->cache_sectors;
+
+ cache->sectors_per_block_shift = -1;
+ (void) sector_div(cache_size, ca->block_size);
+ cache->cache_size = to_cblock(cache_size);
+ } else {
+ cache->sectors_per_block_shift = __ffs(ca->block_size);
+ cache->cache_size = to_cblock(ca->cache_sectors >> cache->sectors_per_block_shift);
+ }
+
+ r = create_cache_policy(cache, ca, error);
+ if (r)
+ goto bad;
+ cache->policy_nr_args = ca->policy_argc;
+
+ cmd = dm_cache_metadata_open(cache->metadata_dev->bdev,
+ ca->block_size, may_format,
+ dm_cache_policy_get_hint_size(cache->policy));
+ if (IS_ERR(cmd)) {
+ *error = "Error creating metadata object";
+ r = PTR_ERR(cmd);
+ goto bad;
+ }
+ cache->cmd = cmd;
+
+ spin_lock_init(&cache->lock);
+ bio_list_init(&cache->deferred_bios);
+ bio_list_init(&cache->deferred_flush_bios);
+ INIT_LIST_HEAD(&cache->quiesced_migrations);
+ INIT_LIST_HEAD(&cache->completed_migrations);
+ INIT_LIST_HEAD(&cache->need_commit_migrations);
+ cache->migration_threshold = DEFAULT_MIGRATION_THRESHOLD;
+ atomic_set(&cache->nr_migrations, 0);
+ init_waitqueue_head(&cache->migration_wait);
+
+ cache->nr_dirty = 0;
+ cache->dirty_bitset = alloc_bitset(from_cblock(cache->cache_size));
+ if (!cache->dirty_bitset) {
+ *error = "could not allocate dirty bitset";
+ goto bad;
+ }
+ clear_bitset(cache->dirty_bitset, from_cblock(cache->cache_size));
+
+ cache->discard_block_size =
+ calculate_discard_block_size(cache->sectors_per_block,
+ cache->origin_sectors);
+ cache->discard_nr_blocks = oblock_to_dblock(cache, cache->origin_blocks);
+ cache->discard_bitset = alloc_bitset(from_dblock(cache->discard_nr_blocks));
+ if (!cache->discard_bitset) {
+ *error = "could not allocate discard bitset";
+ goto bad;
+ }
+ clear_bitset(cache->discard_bitset, from_dblock(cache->discard_nr_blocks));
+
+ cache->copier = dm_kcopyd_client_create(&dm_kcopyd_throttle);
+ if (IS_ERR(cache->copier)) {
+ *error = "could not create kcopyd client";
+ r = PTR_ERR(cache->copier);
+ goto bad;
+ }
+
+ cache->wq = alloc_ordered_workqueue("dm-" DM_MSG_PREFIX, WQ_MEM_RECLAIM);
+ if (!cache->wq) {
+ *error = "could not create workqueue for metadata object";
+ goto bad;
+ }
+ INIT_WORK(&cache->worker, do_worker);
+ INIT_DELAYED_WORK(&cache->waker, do_waker);
+ cache->last_commit_jiffies = jiffies;
+
+ cache->prison = dm_bio_prison_create(PRISON_CELLS);
+ if (!cache->prison) {
+ *error = "could not create bio prison";
+ goto bad;
+ }
+
+ cache->all_io_ds = dm_deferred_set_create();
+ if (!cache->all_io_ds) {
+ *error = "could not create all_io deferred set";
+ goto bad;
+ }
+
+ cache->migration_pool = mempool_create_slab_pool(MIGRATION_POOL_SIZE,
+ migration_cache);
+ if (!cache->migration_pool) {
+ *error = "Error creating cache's migration mempool";
+ goto bad;
+ }
+
+ cache->next_migration = NULL;
+
+ cache->need_tick_bio = true;
+ cache->sized = false;
+ cache->quiescing = false;
+ cache->commit_requested = false;
+ cache->loaded_mappings = false;
+ cache->loaded_discards = false;
+
+ load_stats(cache);
+
+ atomic_set(&cache->stats.demotion, 0);
+ atomic_set(&cache->stats.promotion, 0);
+ atomic_set(&cache->stats.copies_avoided, 0);
+ atomic_set(&cache->stats.cache_cell_clash, 0);
+ atomic_set(&cache->stats.commit_count, 0);
+ atomic_set(&cache->stats.discard_count, 0);
+
+ *result = cache;
+ return 0;
+
+bad:
+ destroy(cache);
+ return r;
+}
+
+static int copy_ctr_args(struct cache *cache, int argc, const char **argv)
+{
+ unsigned i;
+ const char **copy;
+
+ copy = kcalloc(argc, sizeof(*copy), GFP_KERNEL);
+ if (!copy)
+ return -ENOMEM;
+ for (i = 0; i < argc; i++) {
+ copy[i] = kstrdup(argv[i], GFP_KERNEL);
+ if (!copy[i]) {
+ while (i--)
+ kfree(copy[i]);
+ kfree(copy);
+ return -ENOMEM;
+ }
+ }
+
+ cache->nr_ctr_args = argc;
+ cache->ctr_args = copy;
+
+ return 0;
+}
+
+static int cache_ctr(struct dm_target *ti, unsigned argc, char **argv)
+{
+ int r = -EINVAL;
+ struct cache_args *ca;
+ struct cache *cache = NULL;
+
+ ca = kzalloc(sizeof(*ca), GFP_KERNEL);
+ if (!ca) {
+ ti->error = "Error allocating memory for cache";
+ return -ENOMEM;
+ }
+ ca->ti = ti;
+
+ r = parse_cache_args(ca, argc, argv, &ti->error);
+ if (r)
+ goto out;
+
+ r = cache_create(ca, &cache);
+
+ r = copy_ctr_args(cache, argc - 3, (const char **)argv + 3);
+ if (r) {
+ destroy(cache);
+ goto out;
+ }
+
+ ti->private = cache;
+
+out:
+ destroy_cache_args(ca);
+ return r;
+}
+
+static unsigned cache_num_write_bios(struct dm_target *ti, struct bio *bio)
+{
+ int r;
+ struct cache *cache = ti->private;
+ dm_oblock_t block = get_bio_block(cache, bio);
+ dm_cblock_t cblock;
+
+ r = policy_lookup(cache->policy, block, &cblock);
+ if (r < 0)
+ return 2; /* assume the worst */
+
+ return (!r && !is_dirty(cache, cblock)) ? 2 : 1;
+}
+
+static int cache_map(struct dm_target *ti, struct bio *bio)
+{
+ struct cache *cache = ti->private;
+
+ int r;
+ dm_oblock_t block = get_bio_block(cache, bio);
+ bool can_migrate = false;
+ bool discarded_block;
+ struct dm_bio_prison_cell *cell;
+ struct policy_result lookup_result;
+ struct per_bio_data *pb;
+
+ if (from_oblock(block) > from_oblock(cache->origin_blocks)) {
+ /*
+ * This can only occur if the io goes to a partial block at
+ * the end of the origin device. We don't cache these.
+ * Just remap to the origin and carry on.
+ */
+ remap_to_origin_clear_discard(cache, bio, block);
+ return DM_MAPIO_REMAPPED;
+ }
+
+ pb = init_per_bio_data(bio);
+
+ if (bio->bi_rw & (REQ_FLUSH | REQ_FUA | REQ_DISCARD)) {
+ defer_bio(cache, bio);
+ return DM_MAPIO_SUBMITTED;
+ }
+
+ /*
+ * Check to see if that block is currently migrating.
+ */
+ cell = alloc_prison_cell(cache);
+ if (!cell) {
+ defer_bio(cache, bio);
+ return DM_MAPIO_SUBMITTED;
+ }
+
+ r = bio_detain(cache, block, bio, cell,
+ (cell_free_fn) free_prison_cell,
+ cache, &cell);
+ if (r) {
+ if (r < 0)
+ defer_bio(cache, bio);
+
+ return DM_MAPIO_SUBMITTED;
+ }
+
+ discarded_block = is_discarded_oblock(cache, block);
+
+ r = policy_map(cache->policy, block, false, can_migrate, discarded_block,
+ bio, &lookup_result);
+ if (r == -EWOULDBLOCK) {
+ cell_defer(cache, cell, true);
+ return DM_MAPIO_SUBMITTED;
+
+ } else if (r) {
+ DMERR_LIMIT("Unexpected return from cache replacement policy: %d", r);
+ bio_io_error(bio);
+ return DM_MAPIO_SUBMITTED;
+ }
+
+ switch (lookup_result.op) {
+ case POLICY_HIT:
+ inc_hit_counter(cache, bio);
+ pb->all_io_entry = dm_deferred_entry_inc(cache->all_io_ds);
+
+ if (is_writethrough_io(cache, bio, lookup_result.cblock)) {
+ /*
+ * No need to mark anything dirty in write through mode.
+ */
+ pb->req_nr == 0 ?
+ remap_to_cache(cache, bio, lookup_result.cblock) :
+ remap_to_origin_clear_discard(cache, bio, block);
+ cell_defer(cache, cell, false);
+ } else {
+ remap_to_cache_dirty(cache, bio, block, lookup_result.cblock);
+ cell_defer(cache, cell, false);
+ }
+ break;
+
+ case POLICY_MISS:
+ inc_miss_counter(cache, bio);
+ pb->all_io_entry = dm_deferred_entry_inc(cache->all_io_ds);
+
+ if (pb->req_nr != 0) {
+ /*
+ * This is a duplicate writethrough io that is no
+ * longer needed because the block has been demoted.
+ */
+ bio_endio(bio, 0);
+ cell_defer(cache, cell, false);
+ return DM_MAPIO_SUBMITTED;
+ } else {
+ remap_to_origin_clear_discard(cache, bio, block);
+ cell_defer(cache, cell, false);
+ }
+ break;
+
+ default:
+ DMERR_LIMIT("%s: erroring bio: unknown policy op: %u", __func__,
+ (unsigned) lookup_result.op);
+ bio_io_error(bio);
+ return DM_MAPIO_SUBMITTED;
+ }
+
+ return DM_MAPIO_REMAPPED;
+}
+
+static int cache_end_io(struct dm_target *ti, struct bio *bio, int error)
+{
+ struct cache *cache = ti->private;
+ unsigned long flags;
+ struct per_bio_data *pb = get_per_bio_data(bio);
+
+ if (pb->tick) {
+ policy_tick(cache->policy);
+
+ spin_lock_irqsave(&cache->lock, flags);
+ cache->need_tick_bio = true;
+ spin_unlock_irqrestore(&cache->lock, flags);
+ }
+
+ check_for_quiesced_migrations(cache, pb);
+
+ return 0;
+}
+
+static int write_dirty_bitset(struct cache *cache)
+{
+ unsigned i, r;
+
+ for (i = 0; i < from_cblock(cache->cache_size); i++) {
+ r = dm_cache_set_dirty(cache->cmd, to_cblock(i),
+ is_dirty(cache, to_cblock(i)));
+ if (r)
+ return r;
+ }
+
+ return 0;
+}
+
+static int write_discard_bitset(struct cache *cache)
+{
+ unsigned i, r;
+
+ r = dm_cache_discard_bitset_resize(cache->cmd, cache->discard_block_size,
+ cache->discard_nr_blocks);
+ if (r) {
+ DMERR("could not resize on-disk discard bitset");
+ return r;
+ }
+
+ for (i = 0; i < from_dblock(cache->discard_nr_blocks); i++) {
+ r = dm_cache_set_discard(cache->cmd, to_dblock(i),
+ is_discarded(cache, to_dblock(i)));
+ if (r)
+ return r;
+ }
+
+ return 0;
+}
+
+static int save_hint(void *context, dm_cblock_t cblock, dm_oblock_t oblock,
+ uint32_t hint)
+{
+ struct cache *cache = context;
+ return dm_cache_save_hint(cache->cmd, cblock, hint);
+}
+
+static int write_hints(struct cache *cache)
+{
+ int r;
+
+ r = dm_cache_begin_hints(cache->cmd, cache->policy);
+ if (r) {
+ DMERR("dm_cache_begin_hints failed");
+ return r;
+ }
+
+ r = policy_walk_mappings(cache->policy, save_hint, cache);
+ if (r)
+ DMERR("policy_walk_mappings failed");
+
+ return r;
+}
+
+/*
+ * returns true on success
+ */
+static bool sync_metadata(struct cache *cache)
+{
+ int r1, r2, r3, r4;
+
+ r1 = write_dirty_bitset(cache);
+ if (r1)
+ DMERR("could not write dirty bitset");
+
+ r2 = write_discard_bitset(cache);
+ if (r2)
+ DMERR("could not write discard bitset");
+
+ save_stats(cache);
+
+ r3 = write_hints(cache);
+ if (r3)
+ DMERR("could not write hints");
+
+ /*
+ * If writing the above metadata failed, we still commit, but don't
+ * set the clean shutdown flag. This will effectively force every
+ * dirty bit to be set on reload.
+ */
+ r4 = dm_cache_commit(cache->cmd, !r1 && !r2 && !r3);
+ if (r4)
+ DMERR("could not write cache metadata. Data loss may occur.");
+
+ return !r1 && !r2 && !r3 && !r4;
+}
+
+static void cache_postsuspend(struct dm_target *ti)
+{
+ struct cache *cache = ti->private;
+
+ start_quiescing(cache);
+ wait_for_migrations(cache);
+ stop_worker(cache);
+ requeue_deferred_io(cache);
+ stop_quiescing(cache);
+
+ (void) sync_metadata(cache);
+}
+
+static int load_mapping(void *context, dm_oblock_t oblock, dm_cblock_t cblock,
+ bool dirty, uint32_t hint, bool hint_valid)
+{
+ int r;
+ struct cache *cache = context;
+
+ r = policy_load_mapping(cache->policy, oblock, cblock, hint, hint_valid);
+ if (r)
+ return r;
+
+ if (dirty)
+ set_dirty(cache, oblock, cblock);
+ else
+ clear_dirty(cache, oblock, cblock);
+
+ return 0;
+}
+
+static int load_discard(void *context, sector_t discard_block_size,
+ dm_dblock_t dblock, bool discard)
+{
+ struct cache *cache = context;
+
+ /* FIXME: handle mis-matched block size */
+
+ if (discard)
+ set_discard(cache, dblock);
+ else
+ clear_discard(cache, dblock);
+
+ return 0;
+}
+
+static int cache_preresume(struct dm_target *ti)
+{
+ int r = 0;
+ struct cache *cache = ti->private;
+ sector_t actual_cache_size = get_dev_size(cache->cache_dev);
+ (void) sector_div(actual_cache_size, cache->sectors_per_block);
+
+ /*
+ * Check to see if the cache has resized.
+ */
+ if (from_cblock(cache->cache_size) != actual_cache_size || !cache->sized) {
+ cache->cache_size = to_cblock(actual_cache_size);
+
+ r = dm_cache_resize(cache->cmd, cache->cache_size);
+ if (r) {
+ DMERR("could not resize cache metadata");
+ return r;
+ }
+
+ cache->sized = true;
+ }
+
+ if (!cache->loaded_mappings) {
+ r = dm_cache_load_mappings(cache->cmd,
+ dm_cache_policy_get_name(cache->policy),
+ load_mapping, cache);
+ if (r) {
+ DMERR("could not load cache mappings");
+ return r;
+ }
+
+ cache->loaded_mappings = true;
+ }
+
+ if (!cache->loaded_discards) {
+ r = dm_cache_load_discards(cache->cmd, load_discard, cache);
+ if (r) {
+ DMERR("could not load origin discards");
+ return r;
+ }
+
+ cache->loaded_discards = true;
+ }
+
+ return r;
+}
+
+static void cache_resume(struct dm_target *ti)
+{
+ struct cache *cache = ti->private;
+
+ cache->need_tick_bio = true;
+ do_waker(&cache->waker.work);
+}
+
+/*
+ * Status format:
+ *
+ * <#used metadata blocks>/<#total metadata blocks>
+ * <#read hits> <#read misses> <#write hits> <#write misses>
+ * <#demotions> <#promotions> <#blocks in cache> <#dirty>
+ * <#features> <features>*
+ * <#core args> <core args>
+ * <#policy args> <policy args>*
+ */
+static void cache_status(struct dm_target *ti, status_type_t type,
+ unsigned status_flags, char *result, unsigned maxlen)
+{
+ int r = 0;
+ unsigned i;
+ ssize_t sz = 0;
+ dm_block_t nr_free_blocks_metadata = 0;
+ dm_block_t nr_blocks_metadata = 0;
+ char buf[BDEVNAME_SIZE];
+ struct cache *cache = ti->private;
+ dm_cblock_t residency;
+
+ switch (type) {
+ case STATUSTYPE_INFO:
+ /* Commit to ensure statistics aren't out-of-date */
+ if (!(status_flags & DM_STATUS_NOFLUSH_FLAG) && !dm_suspended(ti)) {
+ r = dm_cache_commit(cache->cmd, false);
+ if (r)
+ DMERR("could not commit metadata for accurate status");
+ }
+
+ r = dm_cache_get_free_metadata_block_count(cache->cmd,
+ &nr_free_blocks_metadata);
+ if (r) {
+ DMERR("could not get metadata free block count");
+ goto err;
+ }
+
+ r = dm_cache_get_metadata_dev_size(cache->cmd, &nr_blocks_metadata);
+ if (r) {
+ DMERR("could not get metadata device size");
+ goto err;
+ }
+
+ residency = policy_residency(cache->policy);
+
+ DMEMIT("%llu/%llu %u %u %u %u %u %u %llu %u ",
+ (unsigned long long)(nr_blocks_metadata - nr_free_blocks_metadata),
+ (unsigned long long)nr_blocks_metadata,
+ (unsigned) atomic_read(&cache->stats.read_hit),
+ (unsigned) atomic_read(&cache->stats.read_miss),
+ (unsigned) atomic_read(&cache->stats.write_hit),
+ (unsigned) atomic_read(&cache->stats.write_miss),
+ (unsigned) atomic_read(&cache->stats.demotion),
+ (unsigned) atomic_read(&cache->stats.promotion),
+ (unsigned long long) from_cblock(residency),
+ cache->nr_dirty);
+
+ if (cache->features.write_through)
+ DMEMIT("1 writethrough ");
+ else
+ DMEMIT("0 ");
+
+ DMEMIT("2 migration_threshold %llu ", (unsigned long long) cache->migration_threshold);
+ if (sz < maxlen) {
+ r = policy_emit_config_values(cache->policy, result + sz, maxlen - sz);
+ if (r)
+ DMERR("policy_emit_config_values returned %d", r);
+ }
+
+ break;
+
+ case STATUSTYPE_TABLE:
+ format_dev_t(buf, cache->metadata_dev->bdev->bd_dev);
+ DMEMIT("%s ", buf);
+ format_dev_t(buf, cache->cache_dev->bdev->bd_dev);
+ DMEMIT("%s ", buf);
+ format_dev_t(buf, cache->origin_dev->bdev->bd_dev);
+ DMEMIT("%s", buf);
+
+ for (i = 0; i < cache->nr_ctr_args - 1; i++)
+ DMEMIT(" %s", cache->ctr_args[i]);
+ if (cache->nr_ctr_args)
+ DMEMIT(" %s", cache->ctr_args[cache->nr_ctr_args - 1]);
+ }
+
+ return;
+
+err:
+ DMEMIT("Error");
+}
+
+#define NOT_CORE_OPTION 1
+
+static int process_config_option(struct cache *cache, char **argv)
+{
+ unsigned long tmp;
+
+ if (!strcasecmp(argv[0], "migration_threshold")) {
+ if (kstrtoul(argv[1], 10, &tmp))
+ return -EINVAL;
+
+ cache->migration_threshold = tmp;
+ return 0;
+ }
+
+ return NOT_CORE_OPTION;
+}
+
+/*
+ * Supports <key> <value>.
+ *
+ * The key migration_threshold is supported by the cache target core.
+ */
+static int cache_message(struct dm_target *ti, unsigned argc, char **argv)
+{
+ int r;
+ struct cache *cache = ti->private;
+
+ if (argc != 2)
+ return -EINVAL;
+
+ r = process_config_option(cache, argv);
+ if (r == NOT_CORE_OPTION)
+ return policy_set_config_value(cache->policy, argv[0], argv[1]);
+
+ return r;
+}
+
+static int cache_iterate_devices(struct dm_target *ti,
+ iterate_devices_callout_fn fn, void *data)
+{
+ int r = 0;
+ struct cache *cache = ti->private;
+
+ r = fn(ti, cache->cache_dev, 0, get_dev_size(cache->cache_dev), data);
+ if (!r)
+ r = fn(ti, cache->origin_dev, 0, ti->len, data);
+
+ return r;
+}
+
+/*
+ * We assume I/O is going to the origin (which is the volume
+ * more likely to have restrictions e.g. by being striped).
+ * (Looking up the exact location of the data would be expensive
+ * and could always be out of date by the time the bio is submitted.)
+ */
+static int cache_bvec_merge(struct dm_target *ti,
+ struct bvec_merge_data *bvm,
+ struct bio_vec *biovec, int max_size)
+{
+ struct cache *cache = ti->private;
+ struct request_queue *q = bdev_get_queue(cache->origin_dev->bdev);
+
+ if (!q->merge_bvec_fn)
+ return max_size;
+
+ bvm->bi_bdev = cache->origin_dev->bdev;
+ return min(max_size, q->merge_bvec_fn(q, bvm, biovec));
+}
+
+static void set_discard_limits(struct cache *cache, struct queue_limits *limits)
+{
+ /*
+ * FIXME: these limits may be incompatible with the cache device
+ */
+ limits->max_discard_sectors = cache->discard_block_size * 1024;
+ limits->discard_granularity = cache->discard_block_size << SECTOR_SHIFT;
+}
+
+static void cache_io_hints(struct dm_target *ti, struct queue_limits *limits)
+{
+ struct cache *cache = ti->private;
+
+ blk_limits_io_min(limits, 0);
+ blk_limits_io_opt(limits, cache->sectors_per_block << SECTOR_SHIFT);
+ set_discard_limits(cache, limits);
+}
+
+/*----------------------------------------------------------------*/
+
+static struct target_type cache_target = {
+ .name = "cache",
+ .version = {1, 0, 0},
+ .module = THIS_MODULE,
+ .ctr = cache_ctr,
+ .dtr = cache_dtr,
+ .map = cache_map,
+ .end_io = cache_end_io,
+ .postsuspend = cache_postsuspend,
+ .preresume = cache_preresume,
+ .resume = cache_resume,
+ .status = cache_status,
+ .message = cache_message,
+ .iterate_devices = cache_iterate_devices,
+ .merge = cache_bvec_merge,
+ .io_hints = cache_io_hints,
+};
+
+static int __init dm_cache_init(void)
+{
+ int r;
+
+ r = dm_register_target(&cache_target);
+ if (r) {
+ DMERR("cache target registration failed: %d", r);
+ return r;
+ }
+
+ migration_cache = KMEM_CACHE(dm_cache_migration, 0);
+ if (!migration_cache) {
+ dm_unregister_target(&cache_target);
+ return -ENOMEM;
+ }
+
+ return 0;
+}
+
+static void __exit dm_cache_exit(void)
+{
+ dm_unregister_target(&cache_target);
+ kmem_cache_destroy(migration_cache);
+}
+
+module_init(dm_cache_init);
+module_exit(dm_cache_exit);
+
+MODULE_DESCRIPTION(DM_NAME " cache target");
+MODULE_AUTHOR("Joe Thornber <ejt@redhat.com>");
+MODULE_LICENSE("GPL");
diff --git a/drivers/md/dm-crypt.c b/drivers/md/dm-crypt.c
index f7369f9d8595..13c15480d940 100644
--- a/drivers/md/dm-crypt.c
+++ b/drivers/md/dm-crypt.c
@@ -1234,20 +1234,6 @@ static int crypt_decode_key(u8 *key, char *hex, unsigned int size)
return 0;
}
-/*
- * Encode key into its hex representation
- */
-static void crypt_encode_key(char *hex, u8 *key, unsigned int size)
-{
- unsigned int i;
-
- for (i = 0; i < size; i++) {
- sprintf(hex, "%02x", *key);
- hex += 2;
- key++;
- }
-}
-
static void crypt_free_tfms(struct crypt_config *cc)
{
unsigned i;
@@ -1651,7 +1637,7 @@ static int crypt_ctr(struct dm_target *ti, unsigned int argc, char **argv)
if (opt_params == 1 && opt_string &&
!strcasecmp(opt_string, "allow_discards"))
- ti->num_discard_requests = 1;
+ ti->num_discard_bios = 1;
else if (opt_params) {
ret = -EINVAL;
ti->error = "Invalid feature arguments";
@@ -1679,7 +1665,7 @@ static int crypt_ctr(struct dm_target *ti, unsigned int argc, char **argv)
goto bad;
}
- ti->num_flush_requests = 1;
+ ti->num_flush_bios = 1;
ti->discard_zeroes_data_unsupported = true;
return 0;
@@ -1717,11 +1703,11 @@ static int crypt_map(struct dm_target *ti, struct bio *bio)
return DM_MAPIO_SUBMITTED;
}
-static int crypt_status(struct dm_target *ti, status_type_t type,
- unsigned status_flags, char *result, unsigned maxlen)
+static void crypt_status(struct dm_target *ti, status_type_t type,
+ unsigned status_flags, char *result, unsigned maxlen)
{
struct crypt_config *cc = ti->private;
- unsigned int sz = 0;
+ unsigned i, sz = 0;
switch (type) {
case STATUSTYPE_INFO:
@@ -1731,27 +1717,20 @@ static int crypt_status(struct dm_target *ti, status_type_t type,
case STATUSTYPE_TABLE:
DMEMIT("%s ", cc->cipher_string);
- if (cc->key_size > 0) {
- if ((maxlen - sz) < ((cc->key_size << 1) + 1))
- return -ENOMEM;
-
- crypt_encode_key(result + sz, cc->key, cc->key_size);
- sz += cc->key_size << 1;
- } else {
- if (sz >= maxlen)
- return -ENOMEM;
- result[sz++] = '-';
- }
+ if (cc->key_size > 0)
+ for (i = 0; i < cc->key_size; i++)
+ DMEMIT("%02x", cc->key[i]);
+ else
+ DMEMIT("-");
DMEMIT(" %llu %s %llu", (unsigned long long)cc->iv_offset,
cc->dev->name, (unsigned long long)cc->start);
- if (ti->num_discard_requests)
+ if (ti->num_discard_bios)
DMEMIT(" 1 allow_discards");
break;
}
- return 0;
}
static void crypt_postsuspend(struct dm_target *ti)
@@ -1845,7 +1824,7 @@ static int crypt_iterate_devices(struct dm_target *ti,
static struct target_type crypt_target = {
.name = "crypt",
- .version = {1, 12, 0},
+ .version = {1, 12, 1},
.module = THIS_MODULE,
.ctr = crypt_ctr,
.dtr = crypt_dtr,
diff --git a/drivers/md/dm-delay.c b/drivers/md/dm-delay.c
index cc1bd048acb2..496d5f3646a5 100644
--- a/drivers/md/dm-delay.c
+++ b/drivers/md/dm-delay.c
@@ -198,8 +198,8 @@ out:
mutex_init(&dc->timer_lock);
atomic_set(&dc->may_delay, 1);
- ti->num_flush_requests = 1;
- ti->num_discard_requests = 1;
+ ti->num_flush_bios = 1;
+ ti->num_discard_bios = 1;
ti->private = dc;
return 0;
@@ -293,8 +293,8 @@ static int delay_map(struct dm_target *ti, struct bio *bio)
return delay_bio(dc, dc->read_delay, bio);
}
-static int delay_status(struct dm_target *ti, status_type_t type,
- unsigned status_flags, char *result, unsigned maxlen)
+static void delay_status(struct dm_target *ti, status_type_t type,
+ unsigned status_flags, char *result, unsigned maxlen)
{
struct delay_c *dc = ti->private;
int sz = 0;
@@ -314,8 +314,6 @@ static int delay_status(struct dm_target *ti, status_type_t type,
dc->write_delay);
break;
}
-
- return 0;
}
static int delay_iterate_devices(struct dm_target *ti,
@@ -337,7 +335,7 @@ out:
static struct target_type delay_target = {
.name = "delay",
- .version = {1, 2, 0},
+ .version = {1, 2, 1},
.module = THIS_MODULE,
.ctr = delay_ctr,
.dtr = delay_dtr,
diff --git a/drivers/md/dm-flakey.c b/drivers/md/dm-flakey.c
index 9721f2ffb1a2..7fcf21cb4ff8 100644
--- a/drivers/md/dm-flakey.c
+++ b/drivers/md/dm-flakey.c
@@ -216,8 +216,8 @@ static int flakey_ctr(struct dm_target *ti, unsigned int argc, char **argv)
goto bad;
}
- ti->num_flush_requests = 1;
- ti->num_discard_requests = 1;
+ ti->num_flush_bios = 1;
+ ti->num_discard_bios = 1;
ti->per_bio_data_size = sizeof(struct per_bio_data);
ti->private = fc;
return 0;
@@ -337,8 +337,8 @@ static int flakey_end_io(struct dm_target *ti, struct bio *bio, int error)
return error;
}
-static int flakey_status(struct dm_target *ti, status_type_t type,
- unsigned status_flags, char *result, unsigned maxlen)
+static void flakey_status(struct dm_target *ti, status_type_t type,
+ unsigned status_flags, char *result, unsigned maxlen)
{
unsigned sz = 0;
struct flakey_c *fc = ti->private;
@@ -368,7 +368,6 @@ static int flakey_status(struct dm_target *ti, status_type_t type,
break;
}
- return 0;
}
static int flakey_ioctl(struct dm_target *ti, unsigned int cmd, unsigned long arg)
@@ -411,7 +410,7 @@ static int flakey_iterate_devices(struct dm_target *ti, iterate_devices_callout_
static struct target_type flakey_target = {
.name = "flakey",
- .version = {1, 3, 0},
+ .version = {1, 3, 1},
.module = THIS_MODULE,
.ctr = flakey_ctr,
.dtr = flakey_dtr,
diff --git a/drivers/md/dm-ioctl.c b/drivers/md/dm-ioctl.c
index 0666b5d14b88..aa04f0224642 100644
--- a/drivers/md/dm-ioctl.c
+++ b/drivers/md/dm-ioctl.c
@@ -1067,6 +1067,7 @@ static void retrieve_status(struct dm_table *table,
num_targets = dm_table_get_num_targets(table);
for (i = 0; i < num_targets; i++) {
struct dm_target *ti = dm_table_get_target(table, i);
+ size_t l;
remaining = len - (outptr - outbuf);
if (remaining <= sizeof(struct dm_target_spec)) {
@@ -1093,14 +1094,17 @@ static void retrieve_status(struct dm_table *table,
if (ti->type->status) {
if (param->flags & DM_NOFLUSH_FLAG)
status_flags |= DM_STATUS_NOFLUSH_FLAG;
- if (ti->type->status(ti, type, status_flags, outptr, remaining)) {
- param->flags |= DM_BUFFER_FULL_FLAG;
- break;
- }
+ ti->type->status(ti, type, status_flags, outptr, remaining);
} else
outptr[0] = '\0';
- outptr += strlen(outptr) + 1;
+ l = strlen(outptr) + 1;
+ if (l == remaining) {
+ param->flags |= DM_BUFFER_FULL_FLAG;
+ break;
+ }
+
+ outptr += l;
used = param->data_start + (outptr - outbuf);
outptr = align_ptr(outptr);
@@ -1410,6 +1414,22 @@ static int table_status(struct dm_ioctl *param, size_t param_size)
return 0;
}
+static bool buffer_test_overflow(char *result, unsigned maxlen)
+{
+ return !maxlen || strlen(result) + 1 >= maxlen;
+}
+
+/*
+ * Process device-mapper dependent messages.
+ * Returns a number <= 1 if message was processed by device mapper.
+ * Returns 2 if message should be delivered to the target.
+ */
+static int message_for_md(struct mapped_device *md, unsigned argc, char **argv,
+ char *result, unsigned maxlen)
+{
+ return 2;
+}
+
/*
* Pass a message to the target that's at the supplied device offset.
*/
@@ -1421,6 +1441,8 @@ static int target_message(struct dm_ioctl *param, size_t param_size)
struct dm_table *table;
struct dm_target *ti;
struct dm_target_msg *tmsg = (void *) param + param->data_start;
+ size_t maxlen;
+ char *result = get_result_buffer(param, param_size, &maxlen);
md = find_device(param);
if (!md)
@@ -1444,6 +1466,10 @@ static int target_message(struct dm_ioctl *param, size_t param_size)
goto out_argv;
}
+ r = message_for_md(md, argc, argv, result, maxlen);
+ if (r <= 1)
+ goto out_argv;
+
table = dm_get_live_table(md);
if (!table)
goto out_argv;
@@ -1469,44 +1495,68 @@ static int target_message(struct dm_ioctl *param, size_t param_size)
out_argv:
kfree(argv);
out:
- param->data_size = 0;
+ if (r >= 0)
+ __dev_status(md, param);
+
+ if (r == 1) {
+ param->flags |= DM_DATA_OUT_FLAG;
+ if (buffer_test_overflow(result, maxlen))
+ param->flags |= DM_BUFFER_FULL_FLAG;
+ else
+ param->data_size = param->data_start + strlen(result) + 1;
+ r = 0;
+ }
+
dm_put(md);
return r;
}
+/*
+ * The ioctl parameter block consists of two parts, a dm_ioctl struct
+ * followed by a data buffer. This flag is set if the second part,
+ * which has a variable size, is not used by the function processing
+ * the ioctl.
+ */
+#define IOCTL_FLAGS_NO_PARAMS 1
+
/*-----------------------------------------------------------------
* Implementation of open/close/ioctl on the special char
* device.
*---------------------------------------------------------------*/
-static ioctl_fn lookup_ioctl(unsigned int cmd)
+static ioctl_fn lookup_ioctl(unsigned int cmd, int *ioctl_flags)
{
static struct {
int cmd;
+ int flags;
ioctl_fn fn;
} _ioctls[] = {
- {DM_VERSION_CMD, NULL}, /* version is dealt with elsewhere */
- {DM_REMOVE_ALL_CMD, remove_all},
- {DM_LIST_DEVICES_CMD, list_devices},
-
- {DM_DEV_CREATE_CMD, dev_create},
- {DM_DEV_REMOVE_CMD, dev_remove},
- {DM_DEV_RENAME_CMD, dev_rename},
- {DM_DEV_SUSPEND_CMD, dev_suspend},
- {DM_DEV_STATUS_CMD, dev_status},
- {DM_DEV_WAIT_CMD, dev_wait},
-
- {DM_TABLE_LOAD_CMD, table_load},
- {DM_TABLE_CLEAR_CMD, table_clear},
- {DM_TABLE_DEPS_CMD, table_deps},
- {DM_TABLE_STATUS_CMD, table_status},
-
- {DM_LIST_VERSIONS_CMD, list_versions},
-
- {DM_TARGET_MSG_CMD, target_message},
- {DM_DEV_SET_GEOMETRY_CMD, dev_set_geometry}
+ {DM_VERSION_CMD, 0, NULL}, /* version is dealt with elsewhere */
+ {DM_REMOVE_ALL_CMD, IOCTL_FLAGS_NO_PARAMS, remove_all},
+ {DM_LIST_DEVICES_CMD, 0, list_devices},
+
+ {DM_DEV_CREATE_CMD, IOCTL_FLAGS_NO_PARAMS, dev_create},
+ {DM_DEV_REMOVE_CMD, IOCTL_FLAGS_NO_PARAMS, dev_remove},
+ {DM_DEV_RENAME_CMD, 0, dev_rename},
+ {DM_DEV_SUSPEND_CMD, IOCTL_FLAGS_NO_PARAMS, dev_suspend},
+ {DM_DEV_STATUS_CMD, IOCTL_FLAGS_NO_PARAMS, dev_status},
+ {DM_DEV_WAIT_CMD, 0, dev_wait},
+
+ {DM_TABLE_LOAD_CMD, 0, table_load},
+ {DM_TABLE_CLEAR_CMD, IOCTL_FLAGS_NO_PARAMS, table_clear},
+ {DM_TABLE_DEPS_CMD, 0, table_deps},
+ {DM_TABLE_STATUS_CMD, 0, table_status},
+
+ {DM_LIST_VERSIONS_CMD, 0, list_versions},
+
+ {DM_TARGET_MSG_CMD, 0, target_message},
+ {DM_DEV_SET_GEOMETRY_CMD, 0, dev_set_geometry}
};
- return (cmd >= ARRAY_SIZE(_ioctls)) ? NULL : _ioctls[cmd].fn;
+ if (unlikely(cmd >= ARRAY_SIZE(_ioctls)))
+ return NULL;
+
+ *ioctl_flags = _ioctls[cmd].flags;
+ return _ioctls[cmd].fn;
}
/*
@@ -1543,7 +1593,8 @@ static int check_version(unsigned int cmd, struct dm_ioctl __user *user)
return r;
}
-#define DM_PARAMS_VMALLOC 0x0001 /* Params alloced with vmalloc not kmalloc */
+#define DM_PARAMS_KMALLOC 0x0001 /* Params alloced with kmalloc */
+#define DM_PARAMS_VMALLOC 0x0002 /* Params alloced with vmalloc */
#define DM_WIPE_BUFFER 0x0010 /* Wipe input buffer before returning from ioctl */
static void free_params(struct dm_ioctl *param, size_t param_size, int param_flags)
@@ -1551,66 +1602,80 @@ static void free_params(struct dm_ioctl *param, size_t param_size, int param_fla
if (param_flags & DM_WIPE_BUFFER)
memset(param, 0, param_size);
+ if (param_flags & DM_PARAMS_KMALLOC)
+ kfree(param);
if (param_flags & DM_PARAMS_VMALLOC)
vfree(param);
- else
- kfree(param);
}
-static int copy_params(struct dm_ioctl __user *user, struct dm_ioctl **param, int *param_flags)
+static int copy_params(struct dm_ioctl __user *user, struct dm_ioctl *param_kernel,
+ int ioctl_flags,
+ struct dm_ioctl **param, int *param_flags)
{
- struct dm_ioctl tmp, *dmi;
+ struct dm_ioctl *dmi;
int secure_data;
+ const size_t minimum_data_size = sizeof(*param_kernel) - sizeof(param_kernel->data);
- if (copy_from_user(&tmp, user, sizeof(tmp) - sizeof(tmp.data)))
+ if (copy_from_user(param_kernel, user, minimum_data_size))
return -EFAULT;
- if (tmp.data_size < (sizeof(tmp) - sizeof(tmp.data)))
+ if (param_kernel->data_size < minimum_data_size)
return -EINVAL;
- secure_data = tmp.flags & DM_SECURE_DATA_FLAG;
+ secure_data = param_kernel->flags & DM_SECURE_DATA_FLAG;
*param_flags = secure_data ? DM_WIPE_BUFFER : 0;
+ if (ioctl_flags & IOCTL_FLAGS_NO_PARAMS) {
+ dmi = param_kernel;
+ dmi->data_size = minimum_data_size;
+ goto data_copied;
+ }
+
/*
* Try to avoid low memory issues when a device is suspended.
* Use kmalloc() rather than vmalloc() when we can.
*/
dmi = NULL;
- if (tmp.data_size <= KMALLOC_MAX_SIZE)
- dmi = kmalloc(tmp.data_size, GFP_NOIO | __GFP_NORETRY | __GFP_NOMEMALLOC | __GFP_NOWARN);
+ if (param_kernel->data_size <= KMALLOC_MAX_SIZE) {
+ dmi = kmalloc(param_kernel->data_size, GFP_NOIO | __GFP_NORETRY | __GFP_NOMEMALLOC | __GFP_NOWARN);
+ if (dmi)
+ *param_flags |= DM_PARAMS_KMALLOC;
+ }
if (!dmi) {
- dmi = __vmalloc(tmp.data_size, GFP_NOIO | __GFP_REPEAT | __GFP_HIGH, PAGE_KERNEL);
- *param_flags |= DM_PARAMS_VMALLOC;
+ dmi = __vmalloc(param_kernel->data_size, GFP_NOIO | __GFP_REPEAT | __GFP_HIGH, PAGE_KERNEL);
+ if (dmi)
+ *param_flags |= DM_PARAMS_VMALLOC;
}
if (!dmi) {
- if (secure_data && clear_user(user, tmp.data_size))
+ if (secure_data && clear_user(user, param_kernel->data_size))
return -EFAULT;
return -ENOMEM;
}
- if (copy_from_user(dmi, user, tmp.data_size))
+ if (copy_from_user(dmi, user, param_kernel->data_size))
goto bad;
+data_copied:
/*
* Abort if something changed the ioctl data while it was being copied.
*/
- if (dmi->data_size != tmp.data_size) {
+ if (dmi->data_size != param_kernel->data_size) {
DMERR("rejecting ioctl: data size modified while processing parameters");
goto bad;
}
/* Wipe the user buffer so we do not return it to userspace */
- if (secure_data && clear_user(user, tmp.data_size))
+ if (secure_data && clear_user(user, param_kernel->data_size))
goto bad;
*param = dmi;
return 0;
bad:
- free_params(dmi, tmp.data_size, *param_flags);
+ free_params(dmi, param_kernel->data_size, *param_flags);
return -EFAULT;
}
@@ -1621,6 +1686,7 @@ static int validate_params(uint cmd, struct dm_ioctl *param)
param->flags &= ~DM_BUFFER_FULL_FLAG;
param->flags &= ~DM_UEVENT_GENERATED_FLAG;
param->flags &= ~DM_SECURE_DATA_FLAG;
+ param->flags &= ~DM_DATA_OUT_FLAG;
/* Ignores parameters */
if (cmd == DM_REMOVE_ALL_CMD ||
@@ -1648,11 +1714,13 @@ static int validate_params(uint cmd, struct dm_ioctl *param)
static int ctl_ioctl(uint command, struct dm_ioctl __user *user)
{
int r = 0;
+ int ioctl_flags;
int param_flags;
unsigned int cmd;
struct dm_ioctl *uninitialized_var(param);
ioctl_fn fn = NULL;
size_t input_param_size;
+ struct dm_ioctl param_kernel;
/* only root can play with this */
if (!capable(CAP_SYS_ADMIN))
@@ -1677,7 +1745,7 @@ static int ctl_ioctl(uint command, struct dm_ioctl __user *user)
if (cmd == DM_VERSION_CMD)
return 0;
- fn = lookup_ioctl(cmd);
+ fn = lookup_ioctl(cmd, &ioctl_flags);
if (!fn) {
DMWARN("dm_ctl_ioctl: unknown command 0x%x", command);
return -ENOTTY;
@@ -1686,7 +1754,7 @@ static int ctl_ioctl(uint command, struct dm_ioctl __user *user)
/*
* Copy the parameters into kernel space.
*/
- r = copy_params(user, &param, &param_flags);
+ r = copy_params(user, &param_kernel, ioctl_flags, &param, &param_flags);
if (r)
return r;
@@ -1699,6 +1767,10 @@ static int ctl_ioctl(uint command, struct dm_ioctl __user *user)
param->data_size = sizeof(*param);
r = fn(param, input_param_size);
+ if (unlikely(param->flags & DM_BUFFER_FULL_FLAG) &&
+ unlikely(ioctl_flags & IOCTL_FLAGS_NO_PARAMS))
+ DMERR("ioctl %d tried to output some data but has IOCTL_FLAGS_NO_PARAMS set", cmd);
+
/*
* Copy the results back to userland.
*/
diff --git a/drivers/md/dm-kcopyd.c b/drivers/md/dm-kcopyd.c
index 68c02673263b..d581fe5d2faf 100644
--- a/drivers/md/dm-kcopyd.c
+++ b/drivers/md/dm-kcopyd.c
@@ -22,6 +22,7 @@
#include <linux/vmalloc.h>
#include <linux/workqueue.h>
#include <linux/mutex.h>
+#include <linux/delay.h>
#include <linux/device-mapper.h>
#include <linux/dm-kcopyd.h>
@@ -51,6 +52,8 @@ struct dm_kcopyd_client {
struct workqueue_struct *kcopyd_wq;
struct work_struct kcopyd_work;
+ struct dm_kcopyd_throttle *throttle;
+
/*
* We maintain three lists of jobs:
*
@@ -68,6 +71,117 @@ struct dm_kcopyd_client {
static struct page_list zero_page_list;
+static DEFINE_SPINLOCK(throttle_spinlock);
+
+/*
+ * IO/IDLE accounting slowly decays after (1 << ACCOUNT_INTERVAL_SHIFT) period.
+ * When total_period >= (1 << ACCOUNT_INTERVAL_SHIFT) the counters are divided
+ * by 2.
+ */
+#define ACCOUNT_INTERVAL_SHIFT SHIFT_HZ
+
+/*
+ * Sleep this number of milliseconds.
+ *
+ * The value was decided experimentally.
+ * Smaller values seem to cause an increased copy rate above the limit.
+ * The reason for this is unknown but possibly due to jiffies rounding errors
+ * or read/write cache inside the disk.
+ */
+#define SLEEP_MSEC 100
+
+/*
+ * Maximum number of sleep events. There is a theoretical livelock if more
+ * kcopyd clients do work simultaneously which this limit avoids.
+ */
+#define MAX_SLEEPS 10
+
+static void io_job_start(struct dm_kcopyd_throttle *t)
+{
+ unsigned throttle, now, difference;
+ int slept = 0, skew;
+
+ if (unlikely(!t))
+ return;
+
+try_again:
+ spin_lock_irq(&throttle_spinlock);
+
+ throttle = ACCESS_ONCE(t->throttle);
+
+ if (likely(throttle >= 100))
+ goto skip_limit;
+
+ now = jiffies;
+ difference = now - t->last_jiffies;
+ t->last_jiffies = now;
+ if (t->num_io_jobs)
+ t->io_period += difference;
+ t->total_period += difference;
+
+ /*
+ * Maintain sane values if we got a temporary overflow.
+ */
+ if (unlikely(t->io_period > t->total_period))
+ t->io_period = t->total_period;
+
+ if (unlikely(t->total_period >= (1 << ACCOUNT_INTERVAL_SHIFT))) {
+ int shift = fls(t->total_period >> ACCOUNT_INTERVAL_SHIFT);
+ t->total_period >>= shift;
+ t->io_period >>= shift;
+ }
+
+ skew = t->io_period - throttle * t->total_period / 100;
+
+ if (unlikely(skew > 0) && slept < MAX_SLEEPS) {
+ slept++;
+ spin_unlock_irq(&throttle_spinlock);
+ msleep(SLEEP_MSEC);
+ goto try_again;
+ }
+
+skip_limit:
+ t->num_io_jobs++;
+
+ spin_unlock_irq(&throttle_spinlock);
+}
+
+static void io_job_finish(struct dm_kcopyd_throttle *t)
+{
+ unsigned long flags;
+
+ if (unlikely(!t))
+ return;
+
+ spin_lock_irqsave(&throttle_spinlock, flags);
+
+ t->num_io_jobs--;
+
+ if (likely(ACCESS_ONCE(t->throttle) >= 100))
+ goto skip_limit;
+
+ if (!t->num_io_jobs) {
+ unsigned now, difference;
+
+ now = jiffies;
+ difference = now - t->last_jiffies;
+ t->last_jiffies = now;
+
+ t->io_period += difference;
+ t->total_period += difference;
+
+ /*
+ * Maintain sane values if we got a temporary overflow.
+ */
+ if (unlikely(t->io_period > t->total_period))
+ t->io_period = t->total_period;
+ }
+
+skip_limit:
+ spin_unlock_irqrestore(&throttle_spinlock, flags);
+}
+
+
static void wake(struct dm_kcopyd_client *kc)
{
queue_work(kc->kcopyd_wq, &kc->kcopyd_work);
@@ -348,6 +462,8 @@ static void complete_io(unsigned long error, void *context)
struct kcopyd_job *job = (struct kcopyd_job *) context;
struct dm_kcopyd_client *kc = job->kc;
+ io_job_finish(kc->throttle);
+
if (error) {
if (job->rw & WRITE)
job->write_err |= error;
@@ -389,6 +505,8 @@ static int run_io_job(struct kcopyd_job *job)
.client = job->kc->io_client,
};
+ io_job_start(job->kc->throttle);
+
if (job->rw == READ)
r = dm_io(&io_req, 1, &job->source, NULL);
else
@@ -695,7 +813,7 @@ int kcopyd_cancel(struct kcopyd_job *job, int block)
/*-----------------------------------------------------------------
* Client setup
*---------------------------------------------------------------*/
-struct dm_kcopyd_client *dm_kcopyd_client_create(void)
+struct dm_kcopyd_client *dm_kcopyd_client_create(struct dm_kcopyd_throttle *throttle)
{
int r = -ENOMEM;
struct dm_kcopyd_client *kc;
@@ -708,6 +826,7 @@ struct dm_kcopyd_client *dm_kcopyd_client_create(void)
INIT_LIST_HEAD(&kc->complete_jobs);
INIT_LIST_HEAD(&kc->io_jobs);
INIT_LIST_HEAD(&kc->pages_jobs);
+ kc->throttle = throttle;
kc->job_pool = mempool_create_slab_pool(MIN_JOBS, _job_cache);
if (!kc->job_pool)
diff --git a/drivers/md/dm-linear.c b/drivers/md/dm-linear.c
index 328cad5617ab..4f99d267340c 100644
--- a/drivers/md/dm-linear.c
+++ b/drivers/md/dm-linear.c
@@ -53,9 +53,9 @@ static int linear_ctr(struct dm_target *ti, unsigned int argc, char **argv)
goto bad;
}
- ti->num_flush_requests = 1;
- ti->num_discard_requests = 1;
- ti->num_write_same_requests = 1;
+ ti->num_flush_bios = 1;
+ ti->num_discard_bios = 1;
+ ti->num_write_same_bios = 1;
ti->private = lc;
return 0;
@@ -95,8 +95,8 @@ static int linear_map(struct dm_target *ti, struct bio *bio)
return DM_MAPIO_REMAPPED;
}
-static int linear_status(struct dm_target *ti, status_type_t type,
- unsigned status_flags, char *result, unsigned maxlen)
+static void linear_status(struct dm_target *ti, status_type_t type,
+ unsigned status_flags, char *result, unsigned maxlen)
{
struct linear_c *lc = (struct linear_c *) ti->private;
@@ -110,7 +110,6 @@ static int linear_status(struct dm_target *ti, status_type_t type,
(unsigned long long)lc->start);
break;
}
- return 0;
}
static int linear_ioctl(struct dm_target *ti, unsigned int cmd,
@@ -155,7 +154,7 @@ static int linear_iterate_devices(struct dm_target *ti,
static struct target_type linear_target = {
.name = "linear",
- .version = {1, 2, 0},
+ .version = {1, 2, 1},
.module = THIS_MODULE,
.ctr = linear_ctr,
.dtr = linear_dtr,
diff --git a/drivers/md/dm-mpath.c b/drivers/md/dm-mpath.c
index 573bd04591bf..51bb81676be3 100644
--- a/drivers/md/dm-mpath.c
+++ b/drivers/md/dm-mpath.c
@@ -905,8 +905,8 @@ static int multipath_ctr(struct dm_target *ti, unsigned int argc,
goto bad;
}
- ti->num_flush_requests = 1;
- ti->num_discard_requests = 1;
+ ti->num_flush_bios = 1;
+ ti->num_discard_bios = 1;
return 0;
@@ -1378,8 +1378,8 @@ static void multipath_resume(struct dm_target *ti)
* [priority selector-name num_ps_args [ps_args]*
* num_paths num_selector_args [path_dev [selector_args]* ]+ ]+
*/
-static int multipath_status(struct dm_target *ti, status_type_t type,
- unsigned status_flags, char *result, unsigned maxlen)
+static void multipath_status(struct dm_target *ti, status_type_t type,
+ unsigned status_flags, char *result, unsigned maxlen)
{
int sz = 0;
unsigned long flags;
@@ -1485,8 +1485,6 @@ static int multipath_status(struct dm_target *ti, status_type_t type,
}
spin_unlock_irqrestore(&m->lock, flags);
-
- return 0;
}
static int multipath_message(struct dm_target *ti, unsigned argc, char **argv)
@@ -1695,7 +1693,7 @@ out:
*---------------------------------------------------------------*/
static struct target_type multipath_target = {
.name = "multipath",
- .version = {1, 5, 0},
+ .version = {1, 5, 1},
.module = THIS_MODULE,
.ctr = multipath_ctr,
.dtr = multipath_dtr,
diff --git a/drivers/md/dm-raid.c b/drivers/md/dm-raid.c
index 9e58dbd8d8cb..9a01d1e4c783 100644
--- a/drivers/md/dm-raid.c
+++ b/drivers/md/dm-raid.c
@@ -1151,7 +1151,7 @@ static int raid_ctr(struct dm_target *ti, unsigned argc, char **argv)
INIT_WORK(&rs->md.event_work, do_table_event);
ti->private = rs;
- ti->num_flush_requests = 1;
+ ti->num_flush_bios = 1;
mutex_lock(&rs->md.reconfig_mutex);
ret = md_run(&rs->md);
@@ -1201,8 +1201,8 @@ static int raid_map(struct dm_target *ti, struct bio *bio)
return DM_MAPIO_SUBMITTED;
}
-static int raid_status(struct dm_target *ti, status_type_t type,
- unsigned status_flags, char *result, unsigned maxlen)
+static void raid_status(struct dm_target *ti, status_type_t type,
+ unsigned status_flags, char *result, unsigned maxlen)
{
struct raid_set *rs = ti->private;
unsigned raid_param_cnt = 1; /* at least 1 for chunksize */
@@ -1344,8 +1344,6 @@ static int raid_status(struct dm_target *ti, status_type_t type,
DMEMIT(" -");
}
}
-
- return 0;
}
static int raid_iterate_devices(struct dm_target *ti, iterate_devices_callout_fn fn, void *data)
@@ -1405,7 +1403,7 @@ static void raid_resume(struct dm_target *ti)
static struct target_type raid_target = {
.name = "raid",
- .version = {1, 4, 1},
+ .version = {1, 4, 2},
.module = THIS_MODULE,
.ctr = raid_ctr,
.dtr = raid_dtr,
diff --git a/drivers/md/dm-raid1.c b/drivers/md/dm-raid1.c
index fa519185ebba..d053098c6a91 100644
--- a/drivers/md/dm-raid1.c
+++ b/drivers/md/dm-raid1.c
@@ -82,6 +82,9 @@ struct mirror_set {
struct mirror mirror[0];
};
+DECLARE_DM_KCOPYD_THROTTLE_WITH_MODULE_PARM(raid1_resync_throttle,
+ "A percentage of time allocated for raid resynchronization");
+
static void wakeup_mirrord(void *context)
{
struct mirror_set *ms = context;
@@ -1072,8 +1075,8 @@ static int mirror_ctr(struct dm_target *ti, unsigned int argc, char **argv)
if (r)
goto err_free_context;
- ti->num_flush_requests = 1;
- ti->num_discard_requests = 1;
+ ti->num_flush_bios = 1;
+ ti->num_discard_bios = 1;
ti->per_bio_data_size = sizeof(struct dm_raid1_bio_record);
ti->discard_zeroes_data_unsupported = true;
@@ -1111,7 +1114,7 @@ static int mirror_ctr(struct dm_target *ti, unsigned int argc, char **argv)
goto err_destroy_wq;
}
- ms->kcopyd_client = dm_kcopyd_client_create();
+ ms->kcopyd_client = dm_kcopyd_client_create(&dm_kcopyd_throttle);
if (IS_ERR(ms->kcopyd_client)) {
r = PTR_ERR(ms->kcopyd_client);
goto err_destroy_wq;
@@ -1347,8 +1350,8 @@ static char device_status_char(struct mirror *m)
}
-static int mirror_status(struct dm_target *ti, status_type_t type,
- unsigned status_flags, char *result, unsigned maxlen)
+static void mirror_status(struct dm_target *ti, status_type_t type,
+ unsigned status_flags, char *result, unsigned maxlen)
{
unsigned int m, sz = 0;
struct mirror_set *ms = (struct mirror_set *) ti->private;
@@ -1383,8 +1386,6 @@ static int mirror_status(struct dm_target *ti, status_type_t type,
if (ms->features & DM_RAID1_HANDLE_ERRORS)
DMEMIT(" 1 handle_errors");
}
-
- return 0;
}
static int mirror_iterate_devices(struct dm_target *ti,
@@ -1403,7 +1404,7 @@ static int mirror_iterate_devices(struct dm_target *ti,
static struct target_type mirror_target = {
.name = "mirror",
- .version = {1, 13, 1},
+ .version = {1, 13, 2},
.module = THIS_MODULE,
.ctr = mirror_ctr,
.dtr = mirror_dtr,
diff --git a/drivers/md/dm-snap.c b/drivers/md/dm-snap.c
index 59fc18ae52c2..c0e07026a8d1 100644
--- a/drivers/md/dm-snap.c
+++ b/drivers/md/dm-snap.c
@@ -124,6 +124,9 @@ struct dm_snapshot {
#define RUNNING_MERGE 0
#define SHUTDOWN_MERGE 1
+DECLARE_DM_KCOPYD_THROTTLE_WITH_MODULE_PARM(snapshot_copy_throttle,
+ "A percentage of time allocated for copy on write");
+
struct dm_dev *dm_snap_origin(struct dm_snapshot *s)
{
return s->origin;
@@ -227,12 +230,11 @@ static void stop_tracking_chunk(struct dm_snapshot *s, struct bio *bio)
static int __chunk_is_tracked(struct dm_snapshot *s, chunk_t chunk)
{
struct dm_snap_tracked_chunk *c;
- struct hlist_node *hn;
int found = 0;
spin_lock_irq(&s->tracked_chunk_lock);
- hlist_for_each_entry(c, hn,
+ hlist_for_each_entry(c,
&s->tracked_chunk_hash[DM_TRACKED_CHUNK_HASH(chunk)], node) {
if (c->chunk == chunk) {
found = 1;
@@ -1038,7 +1040,7 @@ static int snapshot_ctr(struct dm_target *ti, unsigned int argc, char **argv)
int i;
int r = -EINVAL;
char *origin_path, *cow_path;
- unsigned args_used, num_flush_requests = 1;
+ unsigned args_used, num_flush_bios = 1;
fmode_t origin_mode = FMODE_READ;
if (argc != 4) {
@@ -1048,7 +1050,7 @@ static int snapshot_ctr(struct dm_target *ti, unsigned int argc, char **argv)
}
if (dm_target_is_snapshot_merge(ti)) {
- num_flush_requests = 2;
+ num_flush_bios = 2;
origin_mode = FMODE_WRITE;
}
@@ -1109,7 +1111,7 @@ static int snapshot_ctr(struct dm_target *ti, unsigned int argc, char **argv)
goto bad_hash_tables;
}
- s->kcopyd_client = dm_kcopyd_client_create();
+ s->kcopyd_client = dm_kcopyd_client_create(&dm_kcopyd_throttle);
if (IS_ERR(s->kcopyd_client)) {
r = PTR_ERR(s->kcopyd_client);
ti->error = "Could not create kcopyd client";
@@ -1128,7 +1130,7 @@ static int snapshot_ctr(struct dm_target *ti, unsigned int argc, char **argv)
spin_lock_init(&s->tracked_chunk_lock);
ti->private = s;
- ti->num_flush_requests = num_flush_requests;
+ ti->num_flush_bios = num_flush_bios;
ti->per_bio_data_size = sizeof(struct dm_snap_tracked_chunk);
/* Add snapshot to the list of snapshots for this origin */
@@ -1692,7 +1694,7 @@ static int snapshot_merge_map(struct dm_target *ti, struct bio *bio)
init_tracked_chunk(bio);
if (bio->bi_rw & REQ_FLUSH) {
- if (!dm_bio_get_target_request_nr(bio))
+ if (!dm_bio_get_target_bio_nr(bio))
bio->bi_bdev = s->origin->bdev;
else
bio->bi_bdev = s->cow->bdev;
@@ -1837,8 +1839,8 @@ static void snapshot_merge_resume(struct dm_target *ti)
start_merge(s);
}
-static int snapshot_status(struct dm_target *ti, status_type_t type,
- unsigned status_flags, char *result, unsigned maxlen)
+static void snapshot_status(struct dm_target *ti, status_type_t type,
+ unsigned status_flags, char *result, unsigned maxlen)
{
unsigned sz = 0;
struct dm_snapshot *snap = ti->private;
@@ -1884,8 +1886,6 @@ static int snapshot_status(struct dm_target *ti, status_type_t type,
maxlen - sz);
break;
}
-
- return 0;
}
static int snapshot_iterate_devices(struct dm_target *ti,
@@ -2105,7 +2105,7 @@ static int origin_ctr(struct dm_target *ti, unsigned int argc, char **argv)
}
ti->private = dev;
- ti->num_flush_requests = 1;
+ ti->num_flush_bios = 1;
return 0;
}
@@ -2139,8 +2139,8 @@ static void origin_resume(struct dm_target *ti)
ti->max_io_len = get_origin_minimum_chunksize(dev->bdev);
}
-static int origin_status(struct dm_target *ti, status_type_t type,
- unsigned status_flags, char *result, unsigned maxlen)
+static void origin_status(struct dm_target *ti, status_type_t type,
+ unsigned status_flags, char *result, unsigned maxlen)
{
struct dm_dev *dev = ti->private;
@@ -2153,8 +2153,6 @@ static int origin_status(struct dm_target *ti, status_type_t type,
snprintf(result, maxlen, "%s", dev->name);
break;
}
-
- return 0;
}
static int origin_merge(struct dm_target *ti, struct bvec_merge_data *bvm,
@@ -2181,7 +2179,7 @@ static int origin_iterate_devices(struct dm_target *ti,
static struct target_type origin_target = {
.name = "snapshot-origin",
- .version = {1, 8, 0},
+ .version = {1, 8, 1},
.module = THIS_MODULE,
.ctr = origin_ctr,
.dtr = origin_dtr,
@@ -2194,7 +2192,7 @@ static struct target_type origin_target = {
static struct target_type snapshot_target = {
.name = "snapshot",
- .version = {1, 11, 0},
+ .version = {1, 11, 1},
.module = THIS_MODULE,
.ctr = snapshot_ctr,
.dtr = snapshot_dtr,
@@ -2307,3 +2305,5 @@ module_exit(dm_snapshot_exit);
MODULE_DESCRIPTION(DM_NAME " snapshot target");
MODULE_AUTHOR("Joe Thornber");
MODULE_LICENSE("GPL");
+MODULE_ALIAS("dm-snapshot-origin");
+MODULE_ALIAS("dm-snapshot-merge");
diff --git a/drivers/md/dm-stripe.c b/drivers/md/dm-stripe.c
index c89cde86d400..d8837d313f54 100644
--- a/drivers/md/dm-stripe.c
+++ b/drivers/md/dm-stripe.c
@@ -160,9 +160,9 @@ static int stripe_ctr(struct dm_target *ti, unsigned int argc, char **argv)
if (r)
return r;
- ti->num_flush_requests = stripes;
- ti->num_discard_requests = stripes;
- ti->num_write_same_requests = stripes;
+ ti->num_flush_bios = stripes;
+ ti->num_discard_bios = stripes;
+ ti->num_write_same_bios = stripes;
sc->chunk_size = chunk_size;
if (chunk_size & (chunk_size - 1))
@@ -276,19 +276,19 @@ static int stripe_map(struct dm_target *ti, struct bio *bio)
{
struct stripe_c *sc = ti->private;
uint32_t stripe;
- unsigned target_request_nr;
+ unsigned target_bio_nr;
if (bio->bi_rw & REQ_FLUSH) {
- target_request_nr = dm_bio_get_target_request_nr(bio);
- BUG_ON(target_request_nr >= sc->stripes);
- bio->bi_bdev = sc->stripe[target_request_nr].dev->bdev;
+ target_bio_nr = dm_bio_get_target_bio_nr(bio);
+ BUG_ON(target_bio_nr >= sc->stripes);
+ bio->bi_bdev = sc->stripe[target_bio_nr].dev->bdev;
return DM_MAPIO_REMAPPED;
}
if (unlikely(bio->bi_rw & REQ_DISCARD) ||
unlikely(bio->bi_rw & REQ_WRITE_SAME)) {
- target_request_nr = dm_bio_get_target_request_nr(bio);
- BUG_ON(target_request_nr >= sc->stripes);
- return stripe_map_range(sc, bio, target_request_nr);
+ target_bio_nr = dm_bio_get_target_bio_nr(bio);
+ BUG_ON(target_bio_nr >= sc->stripes);
+ return stripe_map_range(sc, bio, target_bio_nr);
}
stripe_map_sector(sc, bio->bi_sector, &stripe, &bio->bi_sector);
@@ -312,8 +312,8 @@ static int stripe_map(struct dm_target *ti, struct bio *bio)
*
*/
-static int stripe_status(struct dm_target *ti, status_type_t type,
- unsigned status_flags, char *result, unsigned maxlen)
+static void stripe_status(struct dm_target *ti, status_type_t type,
+ unsigned status_flags, char *result, unsigned maxlen)
{
struct stripe_c *sc = (struct stripe_c *) ti->private;
char buffer[sc->stripes + 1];
@@ -340,7 +340,6 @@ static int stripe_status(struct dm_target *ti, status_type_t type,
(unsigned long long)sc->stripe[i].physical_start);
break;
}
- return 0;
}
static int stripe_end_io(struct dm_target *ti, struct bio *bio, int error)
@@ -428,7 +427,7 @@ static int stripe_merge(struct dm_target *ti, struct bvec_merge_data *bvm,
static struct target_type stripe_target = {
.name = "striped",
- .version = {1, 5, 0},
+ .version = {1, 5, 1},
.module = THIS_MODULE,
.ctr = stripe_ctr,
.dtr = stripe_dtr,
diff --git a/drivers/md/dm-table.c b/drivers/md/dm-table.c
index daf25d0890b3..e50dad0c65f4 100644
--- a/drivers/md/dm-table.c
+++ b/drivers/md/dm-table.c
@@ -217,7 +217,6 @@ int dm_table_create(struct dm_table **result, fmode_t mode,
if (alloc_targets(t, num_targets)) {
kfree(t);
- t = NULL;
return -ENOMEM;
}
@@ -823,8 +822,8 @@ int dm_table_add_target(struct dm_table *t, const char *type,
t->highs[t->num_targets++] = tgt->begin + tgt->len - 1;
- if (!tgt->num_discard_requests && tgt->discards_supported)
- DMWARN("%s: %s: ignoring discards_supported because num_discard_requests is zero.",
+ if (!tgt->num_discard_bios && tgt->discards_supported)
+ DMWARN("%s: %s: ignoring discards_supported because num_discard_bios is zero.",
dm_device_name(t->md), type);
return 0;
@@ -1360,7 +1359,7 @@ static bool dm_table_supports_flush(struct dm_table *t, unsigned flush)
while (i < dm_table_get_num_targets(t)) {
ti = dm_table_get_target(t, i++);
- if (!ti->num_flush_requests)
+ if (!ti->num_flush_bios)
continue;
if (ti->flush_supported)
@@ -1439,7 +1438,7 @@ static bool dm_table_supports_write_same(struct dm_table *t)
while (i < dm_table_get_num_targets(t)) {
ti = dm_table_get_target(t, i++);
- if (!ti->num_write_same_requests)
+ if (!ti->num_write_same_bios)
return false;
if (!ti->type->iterate_devices ||
@@ -1657,7 +1656,7 @@ bool dm_table_supports_discards(struct dm_table *t)
while (i < dm_table_get_num_targets(t)) {
ti = dm_table_get_target(t, i++);
- if (!ti->num_discard_requests)
+ if (!ti->num_discard_bios)
continue;
if (ti->discards_supported)
diff --git a/drivers/md/dm-target.c b/drivers/md/dm-target.c
index 617d21a77256..37ba5db71cd9 100644
--- a/drivers/md/dm-target.c
+++ b/drivers/md/dm-target.c
@@ -116,7 +116,7 @@ static int io_err_ctr(struct dm_target *tt, unsigned int argc, char **args)
/*
* Return error for discards instead of -EOPNOTSUPP
*/
- tt->num_discard_requests = 1;
+ tt->num_discard_bios = 1;
return 0;
}
diff --git a/drivers/md/dm-thin-metadata.c b/drivers/md/dm-thin-metadata.c
index 4d6e85367b84..00cee02f8fc9 100644
--- a/drivers/md/dm-thin-metadata.c
+++ b/drivers/md/dm-thin-metadata.c
@@ -280,7 +280,7 @@ static void unpack_block_time(uint64_t v, dm_block_t *b, uint32_t *t)
*t = v & ((1 << 24) - 1);
}
-static void data_block_inc(void *context, void *value_le)
+static void data_block_inc(void *context, const void *value_le)
{
struct dm_space_map *sm = context;
__le64 v_le;
@@ -292,7 +292,7 @@ static void data_block_inc(void *context, void *value_le)
dm_sm_inc_block(sm, b);
}
-static void data_block_dec(void *context, void *value_le)
+static void data_block_dec(void *context, const void *value_le)
{
struct dm_space_map *sm = context;
__le64 v_le;
@@ -304,7 +304,7 @@ static void data_block_dec(void *context, void *value_le)
dm_sm_dec_block(sm, b);
}
-static int data_block_equal(void *context, void *value1_le, void *value2_le)
+static int data_block_equal(void *context, const void *value1_le, const void *value2_le)
{
__le64 v1_le, v2_le;
uint64_t b1, b2;
@@ -318,7 +318,7 @@ static int data_block_equal(void *context, void *value1_le, void *value2_le)
return b1 == b2;
}
-static void subtree_inc(void *context, void *value)
+static void subtree_inc(void *context, const void *value)
{
struct dm_btree_info *info = context;
__le64 root_le;
@@ -329,7 +329,7 @@ static void subtree_inc(void *context, void *value)
dm_tm_inc(info->tm, root);
}
-static void subtree_dec(void *context, void *value)
+static void subtree_dec(void *context, const void *value)
{
struct dm_btree_info *info = context;
__le64 root_le;
@@ -341,7 +341,7 @@ static void subtree_dec(void *context, void *value)
DMERR("btree delete failed\n");
}
-static int subtree_equal(void *context, void *value1_le, void *value2_le)
+static int subtree_equal(void *context, const void *value1_le, const void *value2_le)
{
__le64 v1_le, v2_le;
memcpy(&v1_le, value1_le, sizeof(v1_le));
diff --git a/drivers/md/dm-thin.c b/drivers/md/dm-thin.c
index 5409607d4875..009339d62828 100644
--- a/drivers/md/dm-thin.c
+++ b/drivers/md/dm-thin.c
@@ -26,6 +26,9 @@
#define PRISON_CELLS 1024
#define COMMIT_PERIOD HZ
+DECLARE_DM_KCOPYD_THROTTLE_WITH_MODULE_PARM(snapshot_copy_throttle,
+ "A percentage of time allocated for copy on write");
+
/*
* The block size of the device holding pool data must be
* between 64KB and 1GB.
@@ -227,6 +230,78 @@ struct thin_c {
/*----------------------------------------------------------------*/
/*
+ * wake_worker() is used when new work is queued and when pool_resume is
+ * ready to continue deferred IO processing.
+ */
+static void wake_worker(struct pool *pool)
+{
+ queue_work(pool->wq, &pool->worker);
+}
+
+/*----------------------------------------------------------------*/
+
+static int bio_detain(struct pool *pool, struct dm_cell_key *key, struct bio *bio,
+ struct dm_bio_prison_cell **cell_result)
+{
+ int r;
+ struct dm_bio_prison_cell *cell_prealloc;
+
+ /*
+ * Allocate a cell from the prison's mempool.
+ * This might block but it can't fail.
+ */
+ cell_prealloc = dm_bio_prison_alloc_cell(pool->prison, GFP_NOIO);
+
+ r = dm_bio_detain(pool->prison, key, bio, cell_prealloc, cell_result);
+ if (r)
+ /*
+ * We reused an old cell; we can get rid of
+ * the new one.
+ */
+ dm_bio_prison_free_cell(pool->prison, cell_prealloc);
+
+ return r;
+}
+
+static void cell_release(struct pool *pool,
+ struct dm_bio_prison_cell *cell,
+ struct bio_list *bios)
+{
+ dm_cell_release(pool->prison, cell, bios);
+ dm_bio_prison_free_cell(pool->prison, cell);
+}
+
+static void cell_release_no_holder(struct pool *pool,
+ struct dm_bio_prison_cell *cell,
+ struct bio_list *bios)
+{
+ dm_cell_release_no_holder(pool->prison, cell, bios);
+ dm_bio_prison_free_cell(pool->prison, cell);
+}
+
+static void cell_defer_no_holder_no_free(struct thin_c *tc,
+ struct dm_bio_prison_cell *cell)
+{
+ struct pool *pool = tc->pool;
+ unsigned long flags;
+
+ spin_lock_irqsave(&pool->lock, flags);
+ dm_cell_release_no_holder(pool->prison, cell, &pool->deferred_bios);
+ spin_unlock_irqrestore(&pool->lock, flags);
+
+ wake_worker(pool);
+}
+
+static void cell_error(struct pool *pool,
+ struct dm_bio_prison_cell *cell)
+{
+ dm_cell_error(pool->prison, cell);
+ dm_bio_prison_free_cell(pool->prison, cell);
+}
+
+/*----------------------------------------------------------------*/
+
+/*
* A global list of pools that uses a struct mapped_device as a key.
*/
static struct dm_thin_pool_table {
@@ -330,14 +405,20 @@ static void requeue_io(struct thin_c *tc)
* target.
*/
+static bool block_size_is_power_of_two(struct pool *pool)
+{
+ return pool->sectors_per_block_shift >= 0;
+}
+
static dm_block_t get_bio_block(struct thin_c *tc, struct bio *bio)
{
+ struct pool *pool = tc->pool;
sector_t block_nr = bio->bi_sector;
- if (tc->pool->sectors_per_block_shift < 0)
- (void) sector_div(block_nr, tc->pool->sectors_per_block);
+ if (block_size_is_power_of_two(pool))
+ block_nr >>= pool->sectors_per_block_shift;
else
- block_nr >>= tc->pool->sectors_per_block_shift;
+ (void) sector_div(block_nr, pool->sectors_per_block);
return block_nr;
}
@@ -348,12 +429,12 @@ static void remap(struct thin_c *tc, struct bio *bio, dm_block_t block)
sector_t bi_sector = bio->bi_sector;
bio->bi_bdev = tc->pool_dev->bdev;
- if (tc->pool->sectors_per_block_shift < 0)
- bio->bi_sector = (block * pool->sectors_per_block) +
- sector_div(bi_sector, pool->sectors_per_block);
- else
+ if (block_size_is_power_of_two(pool))
bio->bi_sector = (block << pool->sectors_per_block_shift) |
(bi_sector & (pool->sectors_per_block - 1));
+ else
+ bio->bi_sector = (block * pool->sectors_per_block) +
+ sector_div(bi_sector, pool->sectors_per_block);
}
static void remap_to_origin(struct thin_c *tc, struct bio *bio)
@@ -420,15 +501,6 @@ static void remap_and_issue(struct thin_c *tc, struct bio *bio,
issue(tc, bio);
}
-/*
- * wake_worker() is used when new work is queued and when pool_resume is
- * ready to continue deferred IO processing.
- */
-static void wake_worker(struct pool *pool)
-{
- queue_work(pool->wq, &pool->worker);
-}
-
/*----------------------------------------------------------------*/
/*
@@ -515,14 +587,14 @@ static void cell_defer(struct thin_c *tc, struct dm_bio_prison_cell *cell)
unsigned long flags;
spin_lock_irqsave(&pool->lock, flags);
- dm_cell_release(cell, &pool->deferred_bios);
+ cell_release(pool, cell, &pool->deferred_bios);
spin_unlock_irqrestore(&tc->pool->lock, flags);
wake_worker(pool);
}
/*
- * Same as cell_defer except it omits the original holder of the cell.
+ * Same as cell_defer above, except it omits the original holder of the cell.
*/
static void cell_defer_no_holder(struct thin_c *tc, struct dm_bio_prison_cell *cell)
{
@@ -530,7 +602,7 @@ static void cell_defer_no_holder(struct thin_c *tc, struct dm_bio_prison_cell *c
unsigned long flags;
spin_lock_irqsave(&pool->lock, flags);
- dm_cell_release_no_holder(cell, &pool->deferred_bios);
+ cell_release_no_holder(pool, cell, &pool->deferred_bios);
spin_unlock_irqrestore(&pool->lock, flags);
wake_worker(pool);
@@ -540,13 +612,15 @@ static void process_prepared_mapping_fail(struct dm_thin_new_mapping *m)
{
if (m->bio)
m->bio->bi_end_io = m->saved_bi_end_io;
- dm_cell_error(m->cell);
+ cell_error(m->tc->pool, m->cell);
list_del(&m->list);
mempool_free(m, m->tc->pool->mapping_pool);
}
+
static void process_prepared_mapping(struct dm_thin_new_mapping *m)
{
struct thin_c *tc = m->tc;
+ struct pool *pool = tc->pool;
struct bio *bio;
int r;
@@ -555,7 +629,7 @@ static void process_prepared_mapping(struct dm_thin_new_mapping *m)
bio->bi_end_io = m->saved_bi_end_io;
if (m->err) {
- dm_cell_error(m->cell);
+ cell_error(pool, m->cell);
goto out;
}
@@ -567,7 +641,7 @@ static void process_prepared_mapping(struct dm_thin_new_mapping *m)
r = dm_thin_insert_block(tc->td, m->virt_block, m->data_block);
if (r) {
DMERR_LIMIT("dm_thin_insert_block() failed");
- dm_cell_error(m->cell);
+ cell_error(pool, m->cell);
goto out;
}
@@ -585,7 +659,7 @@ static void process_prepared_mapping(struct dm_thin_new_mapping *m)
out:
list_del(&m->list);
- mempool_free(m, tc->pool->mapping_pool);
+ mempool_free(m, pool->mapping_pool);
}
static void process_prepared_discard_fail(struct dm_thin_new_mapping *m)
@@ -736,7 +810,7 @@ static void schedule_copy(struct thin_c *tc, dm_block_t virt_block,
if (r < 0) {
mempool_free(m, pool->mapping_pool);
DMERR_LIMIT("dm_kcopyd_copy() failed");
- dm_cell_error(cell);
+ cell_error(pool, cell);
}
}
}
@@ -802,7 +876,7 @@ static void schedule_zero(struct thin_c *tc, dm_block_t virt_block,
if (r < 0) {
mempool_free(m, pool->mapping_pool);
DMERR_LIMIT("dm_kcopyd_zero() failed");
- dm_cell_error(cell);
+ cell_error(pool, cell);
}
}
}
@@ -908,13 +982,13 @@ static void retry_on_resume(struct bio *bio)
spin_unlock_irqrestore(&pool->lock, flags);
}
-static void no_space(struct dm_bio_prison_cell *cell)
+static void no_space(struct pool *pool, struct dm_bio_prison_cell *cell)
{
struct bio *bio;
struct bio_list bios;
bio_list_init(&bios);
- dm_cell_release(cell, &bios);
+ cell_release(pool, cell, &bios);
while ((bio = bio_list_pop(&bios)))
retry_on_resume(bio);
@@ -932,7 +1006,7 @@ static void process_discard(struct thin_c *tc, struct bio *bio)
struct dm_thin_new_mapping *m;
build_virtual_key(tc->td, block, &key);
- if (dm_bio_detain(tc->pool->prison, &key, bio, &cell))
+ if (bio_detain(tc->pool, &key, bio, &cell))
return;
r = dm_thin_find_block(tc->td, block, 1, &lookup_result);
@@ -944,7 +1018,7 @@ static void process_discard(struct thin_c *tc, struct bio *bio)
* on this block.
*/
build_data_key(tc->td, lookup_result.block, &key2);
- if (dm_bio_detain(tc->pool->prison, &key2, bio, &cell2)) {
+ if (bio_detain(tc->pool, &key2, bio, &cell2)) {
cell_defer_no_holder(tc, cell);
break;
}
@@ -1020,13 +1094,13 @@ static void break_sharing(struct thin_c *tc, struct bio *bio, dm_block_t block,
break;
case -ENOSPC:
- no_space(cell);
+ no_space(tc->pool, cell);
break;
default:
DMERR_LIMIT("%s: alloc_data_block() failed: error = %d",
__func__, r);
- dm_cell_error(cell);
+ cell_error(tc->pool, cell);
break;
}
}
@@ -1044,7 +1118,7 @@ static void process_shared_bio(struct thin_c *tc, struct bio *bio,
* of being broken so we have nothing further to do here.
*/
build_data_key(tc->td, lookup_result->block, &key);
- if (dm_bio_detain(pool->prison, &key, bio, &cell))
+ if (bio_detain(pool, &key, bio, &cell))
return;
if (bio_data_dir(bio) == WRITE && bio->bi_size)
@@ -1065,12 +1139,13 @@ static void provision_block(struct thin_c *tc, struct bio *bio, dm_block_t block
{
int r;
dm_block_t data_block;
+ struct pool *pool = tc->pool;
/*
* Remap empty bios (flushes) immediately, without provisioning.
*/
if (!bio->bi_size) {
- inc_all_io_entry(tc->pool, bio);
+ inc_all_io_entry(pool, bio);
cell_defer_no_holder(tc, cell);
remap_and_issue(tc, bio, 0);
@@ -1097,14 +1172,14 @@ static void provision_block(struct thin_c *tc, struct bio *bio, dm_block_t block
break;
case -ENOSPC:
- no_space(cell);
+ no_space(pool, cell);
break;
default:
DMERR_LIMIT("%s: alloc_data_block() failed: error = %d",
__func__, r);
- set_pool_mode(tc->pool, PM_READ_ONLY);
- dm_cell_error(cell);
+ set_pool_mode(pool, PM_READ_ONLY);
+ cell_error(pool, cell);
break;
}
}
@@ -1112,6 +1187,7 @@ static void provision_block(struct thin_c *tc, struct bio *bio, dm_block_t block
static void process_bio(struct thin_c *tc, struct bio *bio)
{
int r;
+ struct pool *pool = tc->pool;
dm_block_t block = get_bio_block(tc, bio);
struct dm_bio_prison_cell *cell;
struct dm_cell_key key;
@@ -1122,7 +1198,7 @@ static void process_bio(struct thin_c *tc, struct bio *bio)
* being provisioned so we have nothing further to do here.
*/
build_virtual_key(tc->td, block, &key);
- if (dm_bio_detain(tc->pool->prison, &key, bio, &cell))
+ if (bio_detain(pool, &key, bio, &cell))
return;
r = dm_thin_find_block(tc->td, block, 1, &lookup_result);
@@ -1130,9 +1206,9 @@ static void process_bio(struct thin_c *tc, struct bio *bio)
case 0:
if (lookup_result.shared) {
process_shared_bio(tc, bio, block, &lookup_result);
- cell_defer_no_holder(tc, cell);
+ cell_defer_no_holder(tc, cell); /* FIXME: pass this cell into process_shared? */
} else {
- inc_all_io_entry(tc->pool, bio);
+ inc_all_io_entry(pool, bio);
cell_defer_no_holder(tc, cell);
remap_and_issue(tc, bio, lookup_result.block);
@@ -1141,7 +1217,7 @@ static void process_bio(struct thin_c *tc, struct bio *bio)
case -ENODATA:
if (bio_data_dir(bio) == READ && tc->origin_dev) {
- inc_all_io_entry(tc->pool, bio);
+ inc_all_io_entry(pool, bio);
cell_defer_no_holder(tc, cell);
remap_to_origin_and_issue(tc, bio);
@@ -1378,7 +1454,8 @@ static int thin_bio_map(struct dm_target *ti, struct bio *bio)
dm_block_t block = get_bio_block(tc, bio);
struct dm_thin_device *td = tc->td;
struct dm_thin_lookup_result result;
- struct dm_bio_prison_cell *cell1, *cell2;
+ struct dm_bio_prison_cell cell1, cell2;
+ struct dm_bio_prison_cell *cell_result;
struct dm_cell_key key;
thin_hook_bio(tc, bio);
@@ -1420,18 +1497,18 @@ static int thin_bio_map(struct dm_target *ti, struct bio *bio)
}
build_virtual_key(tc->td, block, &key);
- if (dm_bio_detain(tc->pool->prison, &key, bio, &cell1))
+ if (dm_bio_detain(tc->pool->prison, &key, bio, &cell1, &cell_result))
return DM_MAPIO_SUBMITTED;
build_data_key(tc->td, result.block, &key);
- if (dm_bio_detain(tc->pool->prison, &key, bio, &cell2)) {
- cell_defer_no_holder(tc, cell1);
+ if (dm_bio_detain(tc->pool->prison, &key, bio, &cell2, &cell_result)) {
+ cell_defer_no_holder_no_free(tc, &cell1);
return DM_MAPIO_SUBMITTED;
}
inc_all_io_entry(tc->pool, bio);
- cell_defer_no_holder(tc, cell2);
- cell_defer_no_holder(tc, cell1);
+ cell_defer_no_holder_no_free(tc, &cell2);
+ cell_defer_no_holder_no_free(tc, &cell1);
remap(tc, bio, result.block);
return DM_MAPIO_REMAPPED;
@@ -1636,7 +1713,7 @@ static struct pool *pool_create(struct mapped_device *pool_md,
goto bad_prison;
}
- pool->copier = dm_kcopyd_client_create();
+ pool->copier = dm_kcopyd_client_create(&dm_kcopyd_throttle);
if (IS_ERR(pool->copier)) {
r = PTR_ERR(pool->copier);
*error = "Error creating pool's kcopyd client";
@@ -1938,7 +2015,7 @@ static int pool_ctr(struct dm_target *ti, unsigned argc, char **argv)
pt->data_dev = data_dev;
pt->low_water_blocks = low_water_blocks;
pt->adjusted_pf = pt->requested_pf = pf;
- ti->num_flush_requests = 1;
+ ti->num_flush_bios = 1;
/*
* Only need to enable discards if the pool should pass
@@ -1946,7 +2023,7 @@ static int pool_ctr(struct dm_target *ti, unsigned argc, char **argv)
* processing will cause mappings to be removed from the btree.
*/
if (pf.discard_enabled && pf.discard_passdown) {
- ti->num_discard_requests = 1;
+ ti->num_discard_bios = 1;
/*
* Setting 'discards_supported' circumvents the normal
@@ -2299,8 +2376,8 @@ static void emit_flags(struct pool_features *pf, char *result,
* <transaction id> <used metadata sectors>/<total metadata sectors>
* <used data sectors>/<total data sectors> <held metadata root>
*/
-static int pool_status(struct dm_target *ti, status_type_t type,
- unsigned status_flags, char *result, unsigned maxlen)
+static void pool_status(struct dm_target *ti, status_type_t type,
+ unsigned status_flags, char *result, unsigned maxlen)
{
int r;
unsigned sz = 0;
@@ -2326,32 +2403,41 @@ static int pool_status(struct dm_target *ti, status_type_t type,
if (!(status_flags & DM_STATUS_NOFLUSH_FLAG) && !dm_suspended(ti))
(void) commit_or_fallback(pool);
- r = dm_pool_get_metadata_transaction_id(pool->pmd,
- &transaction_id);
- if (r)
- return r;
+ r = dm_pool_get_metadata_transaction_id(pool->pmd, &transaction_id);
+ if (r) {
+ DMERR("dm_pool_get_metadata_transaction_id returned %d", r);
+ goto err;
+ }
- r = dm_pool_get_free_metadata_block_count(pool->pmd,
- &nr_free_blocks_metadata);
- if (r)
- return r;
+ r = dm_pool_get_free_metadata_block_count(pool->pmd, &nr_free_blocks_metadata);
+ if (r) {
+ DMERR("dm_pool_get_free_metadata_block_count returned %d", r);
+ goto err;
+ }
r = dm_pool_get_metadata_dev_size(pool->pmd, &nr_blocks_metadata);
- if (r)
- return r;
+ if (r) {
+ DMERR("dm_pool_get_metadata_dev_size returned %d", r);
+ goto err;
+ }
- r = dm_pool_get_free_block_count(pool->pmd,
- &nr_free_blocks_data);
- if (r)
- return r;
+ r = dm_pool_get_free_block_count(pool->pmd, &nr_free_blocks_data);
+ if (r) {
+ DMERR("dm_pool_get_free_block_count returned %d", r);
+ goto err;
+ }
r = dm_pool_get_data_dev_size(pool->pmd, &nr_blocks_data);
- if (r)
- return r;
+ if (r) {
+ DMERR("dm_pool_get_data_dev_size returned %d", r);
+ goto err;
+ }
r = dm_pool_get_metadata_snap(pool->pmd, &held_root);
- if (r)
- return r;
+ if (r) {
+ DMERR("dm_pool_get_metadata_snap returned %d", r);
+ goto err;
+ }
DMEMIT("%llu %llu/%llu %llu/%llu ",
(unsigned long long)transaction_id,
@@ -2388,8 +2474,10 @@ static int pool_status(struct dm_target *ti, status_type_t type,
emit_flags(&pt->requested_pf, result, sz, maxlen);
break;
}
+ return;
- return 0;
+err:
+ DMEMIT("Error");
}
static int pool_iterate_devices(struct dm_target *ti,
@@ -2414,11 +2502,6 @@ static int pool_merge(struct dm_target *ti, struct bvec_merge_data *bvm,
return min(max_size, q->merge_bvec_fn(q, bvm, biovec));
}
-static bool block_size_is_power_of_two(struct pool *pool)
-{
- return pool->sectors_per_block_shift >= 0;
-}
-
static void set_discard_limits(struct pool_c *pt, struct queue_limits *limits)
{
struct pool *pool = pt->pool;
@@ -2432,15 +2515,8 @@ static void set_discard_limits(struct pool_c *pt, struct queue_limits *limits)
if (pt->adjusted_pf.discard_passdown) {
data_limits = &bdev_get_queue(pt->data_dev->bdev)->limits;
limits->discard_granularity = data_limits->discard_granularity;
- } else if (block_size_is_power_of_two(pool))
+ } else
limits->discard_granularity = pool->sectors_per_block << SECTOR_SHIFT;
- else
- /*
- * Use largest power of 2 that is a factor of sectors_per_block
- * but at least DATA_DEV_BLOCK_SIZE_MIN_SECTORS.
- */
- limits->discard_granularity = max(1 << (ffs(pool->sectors_per_block) - 1),
- DATA_DEV_BLOCK_SIZE_MIN_SECTORS) << SECTOR_SHIFT;
}
static void pool_io_hints(struct dm_target *ti, struct queue_limits *limits)
@@ -2468,7 +2544,7 @@ static struct target_type pool_target = {
.name = "thin-pool",
.features = DM_TARGET_SINGLETON | DM_TARGET_ALWAYS_WRITEABLE |
DM_TARGET_IMMUTABLE,
- .version = {1, 6, 0},
+ .version = {1, 6, 1},
.module = THIS_MODULE,
.ctr = pool_ctr,
.dtr = pool_dtr,
@@ -2588,17 +2664,17 @@ static int thin_ctr(struct dm_target *ti, unsigned argc, char **argv)
if (r)
goto bad_thin_open;
- ti->num_flush_requests = 1;
+ ti->num_flush_bios = 1;
ti->flush_supported = true;
ti->per_bio_data_size = sizeof(struct dm_thin_endio_hook);
/* In case the pool supports discards, pass them on. */
if (tc->pool->pf.discard_enabled) {
ti->discards_supported = true;
- ti->num_discard_requests = 1;
+ ti->num_discard_bios = 1;
ti->discard_zeroes_data_unsupported = true;
- /* Discard requests must be split on a block boundary */
- ti->split_discard_requests = true;
+ /* Discard bios must be split on a block boundary */
+ ti->split_discard_bios = true;
}
dm_put(pool_md);
@@ -2676,8 +2752,8 @@ static void thin_postsuspend(struct dm_target *ti)
/*
* <nr mapped sectors> <highest mapped sector>
*/
-static int thin_status(struct dm_target *ti, status_type_t type,
- unsigned status_flags, char *result, unsigned maxlen)
+static void thin_status(struct dm_target *ti, status_type_t type,
+ unsigned status_flags, char *result, unsigned maxlen)
{
int r;
ssize_t sz = 0;
@@ -2687,7 +2763,7 @@ static int thin_status(struct dm_target *ti, status_type_t type,
if (get_pool_mode(tc->pool) == PM_FAIL) {
DMEMIT("Fail");
- return 0;
+ return;
}
if (!tc->td)
@@ -2696,12 +2772,16 @@ static int thin_status(struct dm_target *ti, status_type_t type,
switch (type) {
case STATUSTYPE_INFO:
r = dm_thin_get_mapped_count(tc->td, &mapped);
- if (r)
- return r;
+ if (r) {
+ DMERR("dm_thin_get_mapped_count returned %d", r);
+ goto err;
+ }
r = dm_thin_get_highest_mapped_block(tc->td, &highest);
- if (r < 0)
- return r;
+ if (r < 0) {
+ DMERR("dm_thin_get_highest_mapped_block returned %d", r);
+ goto err;
+ }
DMEMIT("%llu ", mapped * tc->pool->sectors_per_block);
if (r)
@@ -2721,7 +2801,10 @@ static int thin_status(struct dm_target *ti, status_type_t type,
}
}
- return 0;
+ return;
+
+err:
+ DMEMIT("Error");
}
static int thin_iterate_devices(struct dm_target *ti,
@@ -2748,7 +2831,7 @@ static int thin_iterate_devices(struct dm_target *ti,
static struct target_type thin_target = {
.name = "thin",
- .version = {1, 7, 0},
+ .version = {1, 7, 1},
.module = THIS_MODULE,
.ctr = thin_ctr,
.dtr = thin_dtr,
diff --git a/drivers/md/dm-verity.c b/drivers/md/dm-verity.c
index 52cde982164a..6ad538375c3c 100644
--- a/drivers/md/dm-verity.c
+++ b/drivers/md/dm-verity.c
@@ -508,8 +508,8 @@ static int verity_map(struct dm_target *ti, struct bio *bio)
/*
* Status: V (valid) or C (corruption found)
*/
-static int verity_status(struct dm_target *ti, status_type_t type,
- unsigned status_flags, char *result, unsigned maxlen)
+static void verity_status(struct dm_target *ti, status_type_t type,
+ unsigned status_flags, char *result, unsigned maxlen)
{
struct dm_verity *v = ti->private;
unsigned sz = 0;
@@ -540,8 +540,6 @@ static int verity_status(struct dm_target *ti, status_type_t type,
DMEMIT("%02x", v->salt[x]);
break;
}
-
- return 0;
}
static int verity_ioctl(struct dm_target *ti, unsigned cmd,
@@ -860,7 +858,7 @@ bad:
static struct target_type verity_target = {
.name = "verity",
- .version = {1, 1, 0},
+ .version = {1, 1, 1},
.module = THIS_MODULE,
.ctr = verity_ctr,
.dtr = verity_dtr,
diff --git a/drivers/md/dm-zero.c b/drivers/md/dm-zero.c
index 69a5c3b3b340..c99003e0d47a 100644
--- a/drivers/md/dm-zero.c
+++ b/drivers/md/dm-zero.c
@@ -25,7 +25,7 @@ static int zero_ctr(struct dm_target *ti, unsigned int argc, char **argv)
/*
* Silently drop discards, avoiding -EOPNOTSUPP.
*/
- ti->num_discard_requests = 1;
+ ti->num_discard_bios = 1;
return 0;
}
diff --git a/drivers/md/dm.c b/drivers/md/dm.c
index 314a0e2faf79..7e469260fe5e 100644
--- a/drivers/md/dm.c
+++ b/drivers/md/dm.c
@@ -163,7 +163,6 @@ struct mapped_device {
* io objects are allocated from here.
*/
mempool_t *io_pool;
- mempool_t *tio_pool;
struct bio_set *bs;
@@ -197,7 +196,6 @@ struct mapped_device {
*/
struct dm_md_mempools {
mempool_t *io_pool;
- mempool_t *tio_pool;
struct bio_set *bs;
};
@@ -205,12 +203,6 @@ struct dm_md_mempools {
static struct kmem_cache *_io_cache;
static struct kmem_cache *_rq_tio_cache;
-/*
- * Unused now, and needs to be deleted. But since io_pool is overloaded and it's
- * still used for _io_cache, I'm leaving this for a later cleanup
- */
-static struct kmem_cache *_rq_bio_info_cache;
-
static int __init local_init(void)
{
int r = -ENOMEM;
@@ -224,13 +216,9 @@ static int __init local_init(void)
if (!_rq_tio_cache)
goto out_free_io_cache;
- _rq_bio_info_cache = KMEM_CACHE(dm_rq_clone_bio_info, 0);
- if (!_rq_bio_info_cache)
- goto out_free_rq_tio_cache;
-
r = dm_uevent_init();
if (r)
- goto out_free_rq_bio_info_cache;
+ goto out_free_rq_tio_cache;
_major = major;
r = register_blkdev(_major, _name);
@@ -244,8 +232,6 @@ static int __init local_init(void)
out_uevent_exit:
dm_uevent_exit();
-out_free_rq_bio_info_cache:
- kmem_cache_destroy(_rq_bio_info_cache);
out_free_rq_tio_cache:
kmem_cache_destroy(_rq_tio_cache);
out_free_io_cache:
@@ -256,7 +242,6 @@ out_free_io_cache:
static void local_exit(void)
{
- kmem_cache_destroy(_rq_bio_info_cache);
kmem_cache_destroy(_rq_tio_cache);
kmem_cache_destroy(_io_cache);
unregister_blkdev(_major, _name);
@@ -318,7 +303,6 @@ static void __exit dm_exit(void)
/*
* Should be empty by this point.
*/
- idr_remove_all(&_minor_idr);
idr_destroy(&_minor_idr);
}
@@ -449,12 +433,12 @@ static void free_tio(struct mapped_device *md, struct dm_target_io *tio)
static struct dm_rq_target_io *alloc_rq_tio(struct mapped_device *md,
gfp_t gfp_mask)
{
- return mempool_alloc(md->tio_pool, gfp_mask);
+ return mempool_alloc(md->io_pool, gfp_mask);
}
static void free_rq_tio(struct dm_rq_target_io *tio)
{
- mempool_free(tio, tio->md->tio_pool);
+ mempool_free(tio, tio->md->io_pool);
}
static int md_in_flight(struct mapped_device *md)
@@ -627,7 +611,6 @@ static void dec_pending(struct dm_io *io, int error)
queue_io(md, bio);
} else {
/* done with normal IO or empty flush */
- trace_block_bio_complete(md->queue, bio, io_error);
bio_endio(bio, io_error);
}
}
@@ -987,12 +970,13 @@ int dm_set_target_max_io_len(struct dm_target *ti, sector_t len)
}
EXPORT_SYMBOL_GPL(dm_set_target_max_io_len);
-static void __map_bio(struct dm_target *ti, struct dm_target_io *tio)
+static void __map_bio(struct dm_target_io *tio)
{
int r;
sector_t sector;
struct mapped_device *md;
struct bio *clone = &tio->clone;
+ struct dm_target *ti = tio->ti;
clone->bi_end_io = clone_endio;
clone->bi_private = tio;
@@ -1033,32 +1017,54 @@ struct clone_info {
unsigned short idx;
};
+static void bio_setup_sector(struct bio *bio, sector_t sector, sector_t len)
+{
+ bio->bi_sector = sector;
+ bio->bi_size = to_bytes(len);
+}
+
+static void bio_setup_bv(struct bio *bio, unsigned short idx, unsigned short bv_count)
+{
+ bio->bi_idx = idx;
+ bio->bi_vcnt = idx + bv_count;
+ bio->bi_flags &= ~(1 << BIO_SEG_VALID);
+}
+
+static void clone_bio_integrity(struct bio *bio, struct bio *clone,
+ unsigned short idx, unsigned len, unsigned offset,
+ unsigned trim)
+{
+ if (!bio_integrity(bio))
+ return;
+
+ bio_integrity_clone(clone, bio, GFP_NOIO);
+
+ if (trim)
+ bio_integrity_trim(clone, bio_sector_offset(bio, idx, offset), len);
+}
+
/*
* Creates a little bio that just does part of a bvec.
*/
-static void split_bvec(struct dm_target_io *tio, struct bio *bio,
- sector_t sector, unsigned short idx, unsigned int offset,
- unsigned int len, struct bio_set *bs)
+static void clone_split_bio(struct dm_target_io *tio, struct bio *bio,
+ sector_t sector, unsigned short idx,
+ unsigned offset, unsigned len)
{
struct bio *clone = &tio->clone;
struct bio_vec *bv = bio->bi_io_vec + idx;
*clone->bi_io_vec = *bv;
- clone->bi_sector = sector;
+ bio_setup_sector(clone, sector, len);
+
clone->bi_bdev = bio->bi_bdev;
clone->bi_rw = bio->bi_rw;
clone->bi_vcnt = 1;
- clone->bi_size = to_bytes(len);
clone->bi_io_vec->bv_offset = offset;
clone->bi_io_vec->bv_len = clone->bi_size;
clone->bi_flags |= 1 << BIO_CLONED;
- if (bio_integrity(bio)) {
- bio_integrity_clone(clone, bio, GFP_NOIO);
- bio_integrity_trim(clone,
- bio_sector_offset(bio, idx, offset), len);
- }
+ clone_bio_integrity(bio, clone, idx, len, offset, 1);
}
/*
@@ -1066,29 +1072,23 @@ static void split_bvec(struct dm_target_io *tio, struct bio *bio,
*/
static void clone_bio(struct dm_target_io *tio, struct bio *bio,
sector_t sector, unsigned short idx,
- unsigned short bv_count, unsigned int len,
- struct bio_set *bs)
+ unsigned short bv_count, unsigned len)
{
struct bio *clone = &tio->clone;
+ unsigned trim = 0;
__bio_clone(clone, bio);
- clone->bi_sector = sector;
- clone->bi_idx = idx;
- clone->bi_vcnt = idx + bv_count;
- clone->bi_size = to_bytes(len);
- clone->bi_flags &= ~(1 << BIO_SEG_VALID);
-
- if (bio_integrity(bio)) {
- bio_integrity_clone(clone, bio, GFP_NOIO);
-
- if (idx != bio->bi_idx || clone->bi_size < bio->bi_size)
- bio_integrity_trim(clone,
- bio_sector_offset(bio, idx, 0), len);
- }
+ bio_setup_sector(clone, sector, len);
+ bio_setup_bv(clone, idx, bv_count);
+
+ if (idx != bio->bi_idx || clone->bi_size < bio->bi_size)
+ trim = 1;
+ clone_bio_integrity(bio, clone, idx, len, 0, trim);
}
static struct dm_target_io *alloc_tio(struct clone_info *ci,
- struct dm_target *ti, int nr_iovecs)
+ struct dm_target *ti, int nr_iovecs,
+ unsigned target_bio_nr)
{
struct dm_target_io *tio;
struct bio *clone;
@@ -1099,96 +1099,104 @@ static struct dm_target_io *alloc_tio(struct clone_info *ci,
tio->io = ci->io;
tio->ti = ti;
memset(&tio->info, 0, sizeof(tio->info));
- tio->target_request_nr = 0;
+ tio->target_bio_nr = target_bio_nr;
return tio;
}
-static void __issue_target_request(struct clone_info *ci, struct dm_target *ti,
- unsigned request_nr, sector_t len)
+static void __clone_and_map_simple_bio(struct clone_info *ci,
+ struct dm_target *ti,
+ unsigned target_bio_nr, sector_t len)
{
- struct dm_target_io *tio = alloc_tio(ci, ti, ci->bio->bi_max_vecs);
+ struct dm_target_io *tio = alloc_tio(ci, ti, ci->bio->bi_max_vecs, target_bio_nr);
struct bio *clone = &tio->clone;
- tio->target_request_nr = request_nr;
-
/*
* Discard requests require the bio's inline iovecs be initialized.
* ci->bio->bi_max_vecs is BIO_INLINE_VECS anyway, for both flush
* and discard, so no need for concern about wasted bvec allocations.
*/
-
__bio_clone(clone, ci->bio);
- if (len) {
- clone->bi_sector = ci->sector;
- clone->bi_size = to_bytes(len);
- }
+ if (len)
+ bio_setup_sector(clone, ci->sector, len);
- __map_bio(ti, tio);
+ __map_bio(tio);
}
-static void __issue_target_requests(struct clone_info *ci, struct dm_target *ti,
- unsigned num_requests, sector_t len)
+static void __send_duplicate_bios(struct clone_info *ci, struct dm_target *ti,
+ unsigned num_bios, sector_t len)
{
- unsigned request_nr;
+ unsigned target_bio_nr;
- for (request_nr = 0; request_nr < num_requests; request_nr++)
- __issue_target_request(ci, ti, request_nr, len);
+ for (target_bio_nr = 0; target_bio_nr < num_bios; target_bio_nr++)
+ __clone_and_map_simple_bio(ci, ti, target_bio_nr, len);
}
-static int __clone_and_map_empty_flush(struct clone_info *ci)
+static int __send_empty_flush(struct clone_info *ci)
{
unsigned target_nr = 0;
struct dm_target *ti;
BUG_ON(bio_has_data(ci->bio));
while ((ti = dm_table_get_target(ci->map, target_nr++)))
- __issue_target_requests(ci, ti, ti->num_flush_requests, 0);
+ __send_duplicate_bios(ci, ti, ti->num_flush_bios, 0);
return 0;
}
-/*
- * Perform all io with a single clone.
- */
-static void __clone_and_map_simple(struct clone_info *ci, struct dm_target *ti)
+static void __clone_and_map_data_bio(struct clone_info *ci, struct dm_target *ti,
+ sector_t sector, int nr_iovecs,
+ unsigned short idx, unsigned short bv_count,
+ unsigned offset, unsigned len,
+ unsigned split_bvec)
{
struct bio *bio = ci->bio;
struct dm_target_io *tio;
+ unsigned target_bio_nr;
+ unsigned num_target_bios = 1;
- tio = alloc_tio(ci, ti, bio->bi_max_vecs);
- clone_bio(tio, bio, ci->sector, ci->idx, bio->bi_vcnt - ci->idx,
- ci->sector_count, ci->md->bs);
- __map_bio(ti, tio);
- ci->sector_count = 0;
+ /*
+ * Does the target want to receive duplicate copies of the bio?
+ */
+ if (bio_data_dir(bio) == WRITE && ti->num_write_bios)
+ num_target_bios = ti->num_write_bios(ti, bio);
+
+ for (target_bio_nr = 0; target_bio_nr < num_target_bios; target_bio_nr++) {
+ tio = alloc_tio(ci, ti, nr_iovecs, target_bio_nr);
+ if (split_bvec)
+ clone_split_bio(tio, bio, sector, idx, offset, len);
+ else
+ clone_bio(tio, bio, sector, idx, bv_count, len);
+ __map_bio(tio);
+ }
}
-typedef unsigned (*get_num_requests_fn)(struct dm_target *ti);
+typedef unsigned (*get_num_bios_fn)(struct dm_target *ti);
-static unsigned get_num_discard_requests(struct dm_target *ti)
+static unsigned get_num_discard_bios(struct dm_target *ti)
{
- return ti->num_discard_requests;
+ return ti->num_discard_bios;
}
-static unsigned get_num_write_same_requests(struct dm_target *ti)
+static unsigned get_num_write_same_bios(struct dm_target *ti)
{
- return ti->num_write_same_requests;
+ return ti->num_write_same_bios;
}
typedef bool (*is_split_required_fn)(struct dm_target *ti);
static bool is_split_required_for_discard(struct dm_target *ti)
{
- return ti->split_discard_requests;
+ return ti->split_discard_bios;
}
-static int __clone_and_map_changing_extent_only(struct clone_info *ci,
- get_num_requests_fn get_num_requests,
- is_split_required_fn is_split_required)
+static int __send_changing_extent_only(struct clone_info *ci,
+ get_num_bios_fn get_num_bios,
+ is_split_required_fn is_split_required)
{
struct dm_target *ti;
sector_t len;
- unsigned num_requests;
+ unsigned num_bios;
do {
ti = dm_table_find_target(ci->map, ci->sector);
@@ -1201,8 +1209,8 @@ static int __clone_and_map_changing_extent_only(struct clone_info *ci,
* reconfiguration might also have changed that since the
* check was performed.
*/
- num_requests = get_num_requests ? get_num_requests(ti) : 0;
- if (!num_requests)
+ num_bios = get_num_bios ? get_num_bios(ti) : 0;
+ if (!num_bios)
return -EOPNOTSUPP;
if (is_split_required && !is_split_required(ti))
@@ -1210,7 +1218,7 @@ static int __clone_and_map_changing_extent_only(struct clone_info *ci,
else
len = min(ci->sector_count, max_io_len(ci->sector, ti));
- __issue_target_requests(ci, ti, num_requests, len);
+ __send_duplicate_bios(ci, ti, num_bios, len);
ci->sector += len;
} while (ci->sector_count -= len);
@@ -1218,108 +1226,129 @@ static int __clone_and_map_changing_extent_only(struct clone_info *ci,
return 0;
}
-static int __clone_and_map_discard(struct clone_info *ci)
+static int __send_discard(struct clone_info *ci)
{
- return __clone_and_map_changing_extent_only(ci, get_num_discard_requests,
- is_split_required_for_discard);
+ return __send_changing_extent_only(ci, get_num_discard_bios,
+ is_split_required_for_discard);
}
-static int __clone_and_map_write_same(struct clone_info *ci)
+static int __send_write_same(struct clone_info *ci)
{
- return __clone_and_map_changing_extent_only(ci, get_num_write_same_requests, NULL);
+ return __send_changing_extent_only(ci, get_num_write_same_bios, NULL);
}
-static int __clone_and_map(struct clone_info *ci)
+/*
+ * Find maximum number of sectors / bvecs we can process with a single bio.
+ */
+static sector_t __len_within_target(struct clone_info *ci, sector_t max, int *idx)
{
struct bio *bio = ci->bio;
- struct dm_target *ti;
- sector_t len = 0, max;
- struct dm_target_io *tio;
+ sector_t bv_len, total_len = 0;
- if (unlikely(bio->bi_rw & REQ_DISCARD))
- return __clone_and_map_discard(ci);
- else if (unlikely(bio->bi_rw & REQ_WRITE_SAME))
- return __clone_and_map_write_same(ci);
+ for (*idx = ci->idx; max && (*idx < bio->bi_vcnt); (*idx)++) {
+ bv_len = to_sector(bio->bi_io_vec[*idx].bv_len);
- ti = dm_table_find_target(ci->map, ci->sector);
- if (!dm_target_is_valid(ti))
- return -EIO;
-
- max = max_io_len(ci->sector, ti);
+ if (bv_len > max)
+ break;
- if (ci->sector_count <= max) {
- /*
- * Optimise for the simple case where we can do all of
- * the remaining io with a single clone.
- */
- __clone_and_map_simple(ci, ti);
+ max -= bv_len;
+ total_len += bv_len;
+ }
- } else if (to_sector(bio->bi_io_vec[ci->idx].bv_len) <= max) {
- /*
- * There are some bvecs that don't span targets.
- * Do as many of these as possible.
- */
- int i;
- sector_t remaining = max;
- sector_t bv_len;
+ return total_len;
+}
- for (i = ci->idx; remaining && (i < bio->bi_vcnt); i++) {
- bv_len = to_sector(bio->bi_io_vec[i].bv_len);
+static int __split_bvec_across_targets(struct clone_info *ci,
+ struct dm_target *ti, sector_t max)
+{
+ struct bio *bio = ci->bio;
+ struct bio_vec *bv = bio->bi_io_vec + ci->idx;
+ sector_t remaining = to_sector(bv->bv_len);
+ unsigned offset = 0;
+ sector_t len;
- if (bv_len > remaining)
- break;
+ do {
+ if (offset) {
+ ti = dm_table_find_target(ci->map, ci->sector);
+ if (!dm_target_is_valid(ti))
+ return -EIO;
- remaining -= bv_len;
- len += bv_len;
+ max = max_io_len(ci->sector, ti);
}
- tio = alloc_tio(ci, ti, bio->bi_max_vecs);
- clone_bio(tio, bio, ci->sector, ci->idx, i - ci->idx, len,
- ci->md->bs);
- __map_bio(ti, tio);
+ len = min(remaining, max);
+
+ __clone_and_map_data_bio(ci, ti, ci->sector, 1, ci->idx, 0,
+ bv->bv_offset + offset, len, 1);
ci->sector += len;
ci->sector_count -= len;
- ci->idx = i;
+ offset += to_bytes(len);
+ } while (remaining -= len);
- } else {
- /*
- * Handle a bvec that must be split between two or more targets.
- */
- struct bio_vec *bv = bio->bi_io_vec + ci->idx;
- sector_t remaining = to_sector(bv->bv_len);
- unsigned int offset = 0;
+ ci->idx++;
- do {
- if (offset) {
- ti = dm_table_find_target(ci->map, ci->sector);
- if (!dm_target_is_valid(ti))
- return -EIO;
+ return 0;
+}
- max = max_io_len(ci->sector, ti);
- }
+/*
+ * Select the correct strategy for processing a non-flush bio.
+ */
+static int __split_and_process_non_flush(struct clone_info *ci)
+{
+ struct bio *bio = ci->bio;
+ struct dm_target *ti;
+ sector_t len, max;
+ int idx;
- len = min(remaining, max);
+ if (unlikely(bio->bi_rw & REQ_DISCARD))
+ return __send_discard(ci);
+ else if (unlikely(bio->bi_rw & REQ_WRITE_SAME))
+ return __send_write_same(ci);
- tio = alloc_tio(ci, ti, 1);
- split_bvec(tio, bio, ci->sector, ci->idx,
- bv->bv_offset + offset, len, ci->md->bs);
+ ti = dm_table_find_target(ci->map, ci->sector);
+ if (!dm_target_is_valid(ti))
+ return -EIO;
- __map_bio(ti, tio);
+ max = max_io_len(ci->sector, ti);
+
+ /*
+ * Optimise for the simple case where we can do all of
+ * the remaining io with a single clone.
+ */
+ if (ci->sector_count <= max) {
+ __clone_and_map_data_bio(ci, ti, ci->sector, bio->bi_max_vecs,
+ ci->idx, bio->bi_vcnt - ci->idx, 0,
+ ci->sector_count, 0);
+ ci->sector_count = 0;
+ return 0;
+ }
+
+ /*
+ * There are some bvecs that don't span targets.
+ * Do as many of these as possible.
+ */
+ if (to_sector(bio->bi_io_vec[ci->idx].bv_len) <= max) {
+ len = __len_within_target(ci, max, &idx);
- ci->sector += len;
- ci->sector_count -= len;
- offset += to_bytes(len);
- } while (remaining -= len);
+ __clone_and_map_data_bio(ci, ti, ci->sector, bio->bi_max_vecs,
+ ci->idx, idx - ci->idx, 0, len, 0);
- ci->idx++;
+ ci->sector += len;
+ ci->sector_count -= len;
+ ci->idx = idx;
+
+ return 0;
}
- return 0;
+ /*
+ * Handle a bvec that must be split between two or more targets.
+ */
+ return __split_bvec_across_targets(ci, ti, max);
}
/*
- * Split the bio into several clones and submit it to targets.
+ * Entry point to split a bio into clones and submit them to the targets.
*/
static void __split_and_process_bio(struct mapped_device *md, struct bio *bio)
{
@@ -1343,16 +1372,17 @@ static void __split_and_process_bio(struct mapped_device *md, struct bio *bio)
ci.idx = bio->bi_idx;
start_io_acct(ci.io);
+
if (bio->bi_rw & REQ_FLUSH) {
ci.bio = &ci.md->flush_bio;
ci.sector_count = 0;
- error = __clone_and_map_empty_flush(&ci);
+ error = __send_empty_flush(&ci);
/* dec_pending submits any data associated with flush */
} else {
ci.bio = bio;
ci.sector_count = bio_sectors(bio);
while (ci.sector_count && !error)
- error = __clone_and_map(&ci);
+ error = __split_and_process_non_flush(&ci);
}
/* drop the extra reference count */
@@ -1756,62 +1786,38 @@ static void free_minor(int minor)
*/
static int specific_minor(int minor)
{
- int r, m;
+ int r;
if (minor >= (1 << MINORBITS))
return -EINVAL;
- r = idr_pre_get(&_minor_idr, GFP_KERNEL);
- if (!r)
- return -ENOMEM;
-
+ idr_preload(GFP_KERNEL);
spin_lock(&_minor_lock);
- if (idr_find(&_minor_idr, minor)) {
- r = -EBUSY;
- goto out;
- }
+ r = idr_alloc(&_minor_idr, MINOR_ALLOCED, minor, minor + 1, GFP_NOWAIT);
- r = idr_get_new_above(&_minor_idr, MINOR_ALLOCED, minor, &m);
- if (r)
- goto out;
-
- if (m != minor) {
- idr_remove(&_minor_idr, m);
- r = -EBUSY;
- goto out;
- }
-
-out:
spin_unlock(&_minor_lock);
- return r;
+ idr_preload_end();
+ if (r < 0)
+ return r == -ENOSPC ? -EBUSY : r;
+ return 0;
}
static int next_free_minor(int *minor)
{
- int r, m;
-
- r = idr_pre_get(&_minor_idr, GFP_KERNEL);
- if (!r)
- return -ENOMEM;
+ int r;
+ idr_preload(GFP_KERNEL);
spin_lock(&_minor_lock);
- r = idr_get_new(&_minor_idr, MINOR_ALLOCED, &m);
- if (r)
- goto out;
+ r = idr_alloc(&_minor_idr, MINOR_ALLOCED, 0, 1 << MINORBITS, GFP_NOWAIT);
- if (m >= (1 << MINORBITS)) {
- idr_remove(&_minor_idr, m);
- r = -ENOSPC;
- goto out;
- }
-
- *minor = m;
-
-out:
spin_unlock(&_minor_lock);
- return r;
+ idr_preload_end();
+ if (r < 0)
+ return r;
+ *minor = r;
+ return 0;
}
static const struct block_device_operations dm_blk_dops;
@@ -1949,8 +1955,6 @@ static void free_dev(struct mapped_device *md)
unlock_fs(md);
bdput(md->bdev);
destroy_workqueue(md->wq);
- if (md->tio_pool)
- mempool_destroy(md->tio_pool);
if (md->io_pool)
mempool_destroy(md->io_pool);
if (md->bs)
@@ -1973,24 +1977,33 @@ static void __bind_mempools(struct mapped_device *md, struct dm_table *t)
{
struct dm_md_mempools *p = dm_table_get_md_mempools(t);
- if (md->io_pool && (md->tio_pool || dm_table_get_type(t) == DM_TYPE_BIO_BASED) && md->bs) {
- /*
- * The md already has necessary mempools. Reload just the
- * bioset because front_pad may have changed because
- * a different table was loaded.
- */
- bioset_free(md->bs);
- md->bs = p->bs;
- p->bs = NULL;
+ if (md->io_pool && md->bs) {
+ /* The md already has necessary mempools. */
+ if (dm_table_get_type(t) == DM_TYPE_BIO_BASED) {
+ /*
+ * Reload bioset because front_pad may have changed
+ * because a different table was loaded.
+ */
+ bioset_free(md->bs);
+ md->bs = p->bs;
+ p->bs = NULL;
+ } else if (dm_table_get_type(t) == DM_TYPE_REQUEST_BASED) {
+ /*
+ * There's no need to reload with request-based dm
+ * because the size of front_pad doesn't change.
+ * Note for future: If you are to reload bioset,
+ * prep-ed requests in the queue may refer
+ * to bio from the old bioset, so you must walk
+ * through the queue to unprep.
+ */
+ }
goto out;
}
- BUG_ON(!p || md->io_pool || md->tio_pool || md->bs);
+ BUG_ON(!p || md->io_pool || md->bs);
md->io_pool = p->io_pool;
p->io_pool = NULL;
- md->tio_pool = p->tio_pool;
- p->tio_pool = NULL;
md->bs = p->bs;
p->bs = NULL;
@@ -2421,7 +2434,7 @@ static void dm_queue_flush(struct mapped_device *md)
*/
struct dm_table *dm_swap_table(struct mapped_device *md, struct dm_table *table)
{
- struct dm_table *live_map, *map = ERR_PTR(-EINVAL);
+ struct dm_table *live_map = NULL, *map = ERR_PTR(-EINVAL);
struct queue_limits limits;
int r;
@@ -2444,10 +2457,12 @@ struct dm_table *dm_swap_table(struct mapped_device *md, struct dm_table *table)
dm_table_put(live_map);
}
- r = dm_calculate_queue_limits(table, &limits);
- if (r) {
- map = ERR_PTR(r);
- goto out;
+ if (!live_map) {
+ r = dm_calculate_queue_limits(table, &limits);
+ if (r) {
+ map = ERR_PTR(r);
+ goto out;
+ }
}
map = __bind(md, table, &limits);
@@ -2745,52 +2760,42 @@ EXPORT_SYMBOL_GPL(dm_noflush_suspending);
struct dm_md_mempools *dm_alloc_md_mempools(unsigned type, unsigned integrity, unsigned per_bio_data_size)
{
- struct dm_md_mempools *pools = kmalloc(sizeof(*pools), GFP_KERNEL);
- unsigned int pool_size = (type == DM_TYPE_BIO_BASED) ? 16 : MIN_IOS;
+ struct dm_md_mempools *pools = kzalloc(sizeof(*pools), GFP_KERNEL);
+ struct kmem_cache *cachep;
+ unsigned int pool_size;
+ unsigned int front_pad;
if (!pools)
return NULL;
- per_bio_data_size = roundup(per_bio_data_size, __alignof__(struct dm_target_io));
+ if (type == DM_TYPE_BIO_BASED) {
+ cachep = _io_cache;
+ pool_size = 16;
+ front_pad = roundup(per_bio_data_size, __alignof__(struct dm_target_io)) + offsetof(struct dm_target_io, clone);
+ } else if (type == DM_TYPE_REQUEST_BASED) {
+ cachep = _rq_tio_cache;
+ pool_size = MIN_IOS;
+ front_pad = offsetof(struct dm_rq_clone_bio_info, clone);
+ /* per_bio_data_size is not used. See __bind_mempools(). */
+ WARN_ON(per_bio_data_size != 0);
+ } else
+ goto out;
- pools->io_pool = (type == DM_TYPE_BIO_BASED) ?
- mempool_create_slab_pool(MIN_IOS, _io_cache) :
- mempool_create_slab_pool(MIN_IOS, _rq_bio_info_cache);
+ pools->io_pool = mempool_create_slab_pool(MIN_IOS, cachep);
if (!pools->io_pool)
- goto free_pools_and_out;
-
- pools->tio_pool = NULL;
- if (type == DM_TYPE_REQUEST_BASED) {
- pools->tio_pool = mempool_create_slab_pool(MIN_IOS, _rq_tio_cache);
- if (!pools->tio_pool)
- goto free_io_pool_and_out;
- }
+ goto out;
- pools->bs = (type == DM_TYPE_BIO_BASED) ?
- bioset_create(pool_size,
- per_bio_data_size + offsetof(struct dm_target_io, clone)) :
- bioset_create(pool_size,
- offsetof(struct dm_rq_clone_bio_info, clone));
+ pools->bs = bioset_create(pool_size, front_pad);
if (!pools->bs)
- goto free_tio_pool_and_out;
+ goto out;
if (integrity && bioset_integrity_create(pools->bs, pool_size))
- goto free_bioset_and_out;
+ goto out;
return pools;
-free_bioset_and_out:
- bioset_free(pools->bs);
-
-free_tio_pool_and_out:
- if (pools->tio_pool)
- mempool_destroy(pools->tio_pool);
-
-free_io_pool_and_out:
- mempool_destroy(pools->io_pool);
-
-free_pools_and_out:
- kfree(pools);
+out:
+ dm_free_md_mempools(pools);
return NULL;
}
@@ -2803,9 +2808,6 @@ void dm_free_md_mempools(struct dm_md_mempools *pools)
if (pools->io_pool)
mempool_destroy(pools->io_pool);
- if (pools->tio_pool)
- mempool_destroy(pools->tio_pool);
-
if (pools->bs)
bioset_free(pools->bs);
diff --git a/drivers/md/persistent-data/Kconfig b/drivers/md/persistent-data/Kconfig
index ceb359050a59..19b268795415 100644
--- a/drivers/md/persistent-data/Kconfig
+++ b/drivers/md/persistent-data/Kconfig
@@ -1,6 +1,6 @@
config DM_PERSISTENT_DATA
tristate
- depends on BLK_DEV_DM && EXPERIMENTAL
+ depends on BLK_DEV_DM
select LIBCRC32C
select DM_BUFIO
---help---
diff --git a/drivers/md/persistent-data/Makefile b/drivers/md/persistent-data/Makefile
index d8e7cb767c1e..ff528792c358 100644
--- a/drivers/md/persistent-data/Makefile
+++ b/drivers/md/persistent-data/Makefile
@@ -1,5 +1,7 @@
obj-$(CONFIG_DM_PERSISTENT_DATA) += dm-persistent-data.o
dm-persistent-data-objs := \
+ dm-array.o \
+ dm-bitset.o \
dm-block-manager.o \
dm-space-map-common.o \
dm-space-map-disk.o \
diff --git a/drivers/md/persistent-data/dm-array.c b/drivers/md/persistent-data/dm-array.c
new file mode 100644
index 000000000000..172147eb1d40
--- /dev/null
+++ b/drivers/md/persistent-data/dm-array.c
@@ -0,0 +1,808 @@
+/*
+ * Copyright (C) 2012 Red Hat, Inc.
+ *
+ * This file is released under the GPL.
+ */
+
+#include "dm-array.h"
+#include "dm-space-map.h"
+#include "dm-transaction-manager.h"
+
+#include <linux/export.h>
+#include <linux/device-mapper.h>
+
+#define DM_MSG_PREFIX "array"
+
+/*----------------------------------------------------------------*/
+
+/*
+ * The array is implemented as a fully populated btree, which points to
+ * blocks that contain the packed values. This is more space efficient
+ * than just using a btree since we don't store 1 key per value.
+ */
+struct array_block {
+ __le32 csum;
+ __le32 max_entries;
+ __le32 nr_entries;
+ __le32 value_size;
+ __le64 blocknr; /* Block this node is supposed to live in. */
+} __packed;
+
+/*----------------------------------------------------------------*/
+
+/*
+ * Validator methods. As usual we calculate a checksum, and also write the
+ * block location into the header (paranoia about ssds remapping areas by
+ * mistake).
+ */
+#define CSUM_XOR 595846735
+
+static void array_block_prepare_for_write(struct dm_block_validator *v,
+ struct dm_block *b,
+ size_t size_of_block)
+{
+ struct array_block *bh_le = dm_block_data(b);
+
+ bh_le->blocknr = cpu_to_le64(dm_block_location(b));
+ bh_le->csum = cpu_to_le32(dm_bm_checksum(&bh_le->max_entries,
+ size_of_block - sizeof(__le32),
+ CSUM_XOR));
+}
+
+static int array_block_check(struct dm_block_validator *v,
+ struct dm_block *b,
+ size_t size_of_block)
+{
+ struct array_block *bh_le = dm_block_data(b);
+ __le32 csum_disk;
+
+ if (dm_block_location(b) != le64_to_cpu(bh_le->blocknr)) {
+ DMERR_LIMIT("array_block_check failed: blocknr %llu != wanted %llu",
+ (unsigned long long) le64_to_cpu(bh_le->blocknr),
+ (unsigned long long) dm_block_location(b));
+ return -ENOTBLK;
+ }
+
+ csum_disk = cpu_to_le32(dm_bm_checksum(&bh_le->max_entries,
+ size_of_block - sizeof(__le32),
+ CSUM_XOR));
+ if (csum_disk != bh_le->csum) {
+ DMERR_LIMIT("array_block_check failed: csum %u != wanted %u",
+ (unsigned) le32_to_cpu(csum_disk),
+ (unsigned) le32_to_cpu(bh_le->csum));
+ return -EILSEQ;
+ }
+
+ return 0;
+}
+
+static struct dm_block_validator array_validator = {
+ .name = "array",
+ .prepare_for_write = array_block_prepare_for_write,
+ .check = array_block_check
+};
+
+/*----------------------------------------------------------------*/
+
+/*
+ * Functions for manipulating the array blocks.
+ */
+
+/*
+ * Returns a pointer to a value within an array block.
+ *
+ * index - The index into _this_ specific block.
+ */
+static void *element_at(struct dm_array_info *info, struct array_block *ab,
+ unsigned index)
+{
+ unsigned char *entry = (unsigned char *) (ab + 1);
+
+ entry += index * info->value_type.size;
+
+ return entry;
+}
+
+/*
+ * Utility function that calls one of the value_type methods on every value
+ * in an array block.
+ */
+static void on_entries(struct dm_array_info *info, struct array_block *ab,
+ void (*fn)(void *, const void *))
+{
+ unsigned i, nr_entries = le32_to_cpu(ab->nr_entries);
+
+ for (i = 0; i < nr_entries; i++)
+ fn(info->value_type.context, element_at(info, ab, i));
+}
+
+/*
+ * Increment every value in an array block.
+ */
+static void inc_ablock_entries(struct dm_array_info *info, struct array_block *ab)
+{
+ struct dm_btree_value_type *vt = &info->value_type;
+
+ if (vt->inc)
+ on_entries(info, ab, vt->inc);
+}
+
+/*
+ * Decrement every value in an array block.
+ */
+static void dec_ablock_entries(struct dm_array_info *info, struct array_block *ab)
+{
+ struct dm_btree_value_type *vt = &info->value_type;
+
+ if (vt->dec)
+ on_entries(info, ab, vt->dec);
+}
+
+/*
+ * Each array block can hold this many values.
+ */
+static uint32_t calc_max_entries(size_t value_size, size_t size_of_block)
+{
+ return (size_of_block - sizeof(struct array_block)) / value_size;
+}
+
+/*
+ * Allocate a new array block. The caller will need to unlock block.
+ */
+static int alloc_ablock(struct dm_array_info *info, size_t size_of_block,
+ uint32_t max_entries,
+ struct dm_block **block, struct array_block **ab)
+{
+ int r;
+
+ r = dm_tm_new_block(info->btree_info.tm, &array_validator, block);
+ if (r)
+ return r;
+
+ (*ab) = dm_block_data(*block);
+ (*ab)->max_entries = cpu_to_le32(max_entries);
+ (*ab)->nr_entries = cpu_to_le32(0);
+ (*ab)->value_size = cpu_to_le32(info->value_type.size);
+
+ return 0;
+}
+
+/*
+ * Pad an array block out with a particular value. Every instance will
+ * cause an increment of the value_type. new_nr must always be more than
+ * the current number of entries.
+ */
+static void fill_ablock(struct dm_array_info *info, struct array_block *ab,
+ const void *value, unsigned new_nr)
+{
+ unsigned i;
+ uint32_t nr_entries;
+ struct dm_btree_value_type *vt = &info->value_type;
+
+ BUG_ON(new_nr > le32_to_cpu(ab->max_entries));
+ BUG_ON(new_nr < le32_to_cpu(ab->nr_entries));
+
+ nr_entries = le32_to_cpu(ab->nr_entries);
+ for (i = nr_entries; i < new_nr; i++) {
+ if (vt->inc)
+ vt->inc(vt->context, value);
+ memcpy(element_at(info, ab, i), value, vt->size);
+ }
+ ab->nr_entries = cpu_to_le32(new_nr);
+}
+
+/*
+ * Remove some entries from the back of an array block. Every value
+ * removed will be decremented. new_nr must be <= the current number of
+ * entries.
+ */
+static void trim_ablock(struct dm_array_info *info, struct array_block *ab,
+ unsigned new_nr)
+{
+ unsigned i;
+ uint32_t nr_entries;
+ struct dm_btree_value_type *vt = &info->value_type;
+
+ BUG_ON(new_nr > le32_to_cpu(ab->max_entries));
+ BUG_ON(new_nr > le32_to_cpu(ab->nr_entries));
+
+ nr_entries = le32_to_cpu(ab->nr_entries);
+ for (i = nr_entries; i > new_nr; i--)
+ if (vt->dec)
+ vt->dec(vt->context, element_at(info, ab, i - 1));
+ ab->nr_entries = cpu_to_le32(new_nr);
+}
+
+/*
+ * Read locks a block, and coerces it to an array block. The caller must
+ * unlock 'block' when finished.
+ */
+static int get_ablock(struct dm_array_info *info, dm_block_t b,
+ struct dm_block **block, struct array_block **ab)
+{
+ int r;
+
+ r = dm_tm_read_lock(info->btree_info.tm, b, &array_validator, block);
+ if (r)
+ return r;
+
+ *ab = dm_block_data(*block);
+ return 0;
+}
+
+/*
+ * Unlocks an array block.
+ */
+static int unlock_ablock(struct dm_array_info *info, struct dm_block *block)
+{
+ return dm_tm_unlock(info->btree_info.tm, block);
+}
+
+/*----------------------------------------------------------------*/
+
+/*
+ * Btree manipulation.
+ */
+
+/*
+ * Looks up an array block in the btree, and then read locks it.
+ *
+ * index is the index of the index of the array_block, (ie. the array index
+ * / max_entries).
+ */
+static int lookup_ablock(struct dm_array_info *info, dm_block_t root,
+ unsigned index, struct dm_block **block,
+ struct array_block **ab)
+{
+ int r;
+ uint64_t key = index;
+ __le64 block_le;
+
+ r = dm_btree_lookup(&info->btree_info, root, &key, &block_le);
+ if (r)
+ return r;
+
+ return get_ablock(info, le64_to_cpu(block_le), block, ab);
+}
+
+/*
+ * Insert an array block into the btree. The block is _not_ unlocked.
+ */
+static int insert_ablock(struct dm_array_info *info, uint64_t index,
+ struct dm_block *block, dm_block_t *root)
+{
+ __le64 block_le = cpu_to_le64(dm_block_location(block));
+
+ __dm_bless_for_disk(block_le);
+ return dm_btree_insert(&info->btree_info, *root, &index, &block_le, root);
+}
+
+/*
+ * Looks up an array block in the btree. Then shadows it, and updates the
+ * btree to point to this new shadow. 'root' is an input/output parameter
+ * for both the current root block, and the new one.
+ */
+static int shadow_ablock(struct dm_array_info *info, dm_block_t *root,
+ unsigned index, struct dm_block **block,
+ struct array_block **ab)
+{
+ int r, inc;
+ uint64_t key = index;
+ dm_block_t b;
+ __le64 block_le;
+
+ /*
+ * lookup
+ */
+ r = dm_btree_lookup(&info->btree_info, *root, &key, &block_le);
+ if (r)
+ return r;
+ b = le64_to_cpu(block_le);
+
+ /*
+ * shadow
+ */
+ r = dm_tm_shadow_block(info->btree_info.tm, b,
+ &array_validator, block, &inc);
+ if (r)
+ return r;
+
+ *ab = dm_block_data(*block);
+ if (inc)
+ inc_ablock_entries(info, *ab);
+
+ /*
+ * Reinsert.
+ *
+ * The shadow op will often be a noop. Only insert if it really
+ * copied data.
+ */
+ if (dm_block_location(*block) != b)
+ r = insert_ablock(info, index, *block, root);
+
+ return r;
+}
+
+/*
+ * Allocate an new array block, and fill it with some values.
+ */
+static int insert_new_ablock(struct dm_array_info *info, size_t size_of_block,
+ uint32_t max_entries,
+ unsigned block_index, uint32_t nr,
+ const void *value, dm_block_t *root)
+{
+ int r;
+ struct dm_block *block;
+ struct array_block *ab;
+
+ r = alloc_ablock(info, size_of_block, max_entries, &block, &ab);
+ if (r)
+ return r;
+
+ fill_ablock(info, ab, value, nr);
+ r = insert_ablock(info, block_index, block, root);
+ unlock_ablock(info, block);
+
+ return r;
+}
+
+static int insert_full_ablocks(struct dm_array_info *info, size_t size_of_block,
+ unsigned begin_block, unsigned end_block,
+ unsigned max_entries, const void *value,
+ dm_block_t *root)
+{
+ int r = 0;
+
+ for (; !r && begin_block != end_block; begin_block++)
+ r = insert_new_ablock(info, size_of_block, max_entries, begin_block, max_entries, value, root);
+
+ return r;
+}
+
+/*
+ * There are a bunch of functions involved with resizing an array. This
+ * structure holds information that commonly needed by them. Purely here
+ * to reduce parameter count.
+ */
+struct resize {
+ /*
+ * Describes the array.
+ */
+ struct dm_array_info *info;
+
+ /*
+ * The current root of the array. This gets updated.
+ */
+ dm_block_t root;
+
+ /*
+ * Metadata block size. Used to calculate the nr entries in an
+ * array block.
+ */
+ size_t size_of_block;
+
+ /*
+ * Maximum nr entries in an array block.
+ */
+ unsigned max_entries;
+
+ /*
+ * nr of completely full blocks in the array.
+ *
+ * 'old' refers to before the resize, 'new' after.
+ */
+ unsigned old_nr_full_blocks, new_nr_full_blocks;
+
+ /*
+ * Number of entries in the final block. 0 iff only full blocks in
+ * the array.
+ */
+ unsigned old_nr_entries_in_last_block, new_nr_entries_in_last_block;
+
+ /*
+ * The default value used when growing the array.
+ */
+ const void *value;
+};
+
+/*
+ * Removes a consecutive set of array blocks from the btree. The values
+ * in block are decremented as a side effect of the btree remove.
+ *
+ * begin_index - the index of the first array block to remove.
+ * end_index - the one-past-the-end value. ie. this block is not removed.
+ */
+static int drop_blocks(struct resize *resize, unsigned begin_index,
+ unsigned end_index)
+{
+ int r;
+
+ while (begin_index != end_index) {
+ uint64_t key = begin_index++;
+ r = dm_btree_remove(&resize->info->btree_info, resize->root,
+ &key, &resize->root);
+ if (r)
+ return r;
+ }
+
+ return 0;
+}
+
+/*
+ * Calculates how many blocks are needed for the array.
+ */
+static unsigned total_nr_blocks_needed(unsigned nr_full_blocks,
+ unsigned nr_entries_in_last_block)
+{
+ return nr_full_blocks + (nr_entries_in_last_block ? 1 : 0);
+}
+
+/*
+ * Shrink an array.
+ */
+static int shrink(struct resize *resize)
+{
+ int r;
+ unsigned begin, end;
+ struct dm_block *block;
+ struct array_block *ab;
+
+ /*
+ * Lose some blocks from the back?
+ */
+ if (resize->new_nr_full_blocks < resize->old_nr_full_blocks) {
+ begin = total_nr_blocks_needed(resize->new_nr_full_blocks,
+ resize->new_nr_entries_in_last_block);
+ end = total_nr_blocks_needed(resize->old_nr_full_blocks,
+ resize->old_nr_entries_in_last_block);
+
+ r = drop_blocks(resize, begin, end);
+ if (r)
+ return r;
+ }
+
+ /*
+ * Trim the new tail block
+ */
+ if (resize->new_nr_entries_in_last_block) {
+ r = shadow_ablock(resize->info, &resize->root,
+ resize->new_nr_full_blocks, &block, &ab);
+ if (r)
+ return r;
+
+ trim_ablock(resize->info, ab, resize->new_nr_entries_in_last_block);
+ unlock_ablock(resize->info, block);
+ }
+
+ return 0;
+}
+
+/*
+ * Grow an array.
+ */
+static int grow_extend_tail_block(struct resize *resize, uint32_t new_nr_entries)
+{
+ int r;
+ struct dm_block *block;
+ struct array_block *ab;
+
+ r = shadow_ablock(resize->info, &resize->root,
+ resize->old_nr_full_blocks, &block, &ab);
+ if (r)
+ return r;
+
+ fill_ablock(resize->info, ab, resize->value, new_nr_entries);
+ unlock_ablock(resize->info, block);
+
+ return r;
+}
+
+static int grow_add_tail_block(struct resize *resize)
+{
+ return insert_new_ablock(resize->info, resize->size_of_block,
+ resize->max_entries,
+ resize->new_nr_full_blocks,
+ resize->new_nr_entries_in_last_block,
+ resize->value, &resize->root);
+}
+
+static int grow_needs_more_blocks(struct resize *resize)
+{
+ int r;
+
+ if (resize->old_nr_entries_in_last_block > 0) {
+ r = grow_extend_tail_block(resize, resize->max_entries);
+ if (r)
+ return r;
+ }
+
+ r = insert_full_ablocks(resize->info, resize->size_of_block,
+ resize->old_nr_full_blocks,
+ resize->new_nr_full_blocks,
+ resize->max_entries, resize->value,
+ &resize->root);
+ if (r)
+ return r;
+
+ if (resize->new_nr_entries_in_last_block)
+ r = grow_add_tail_block(resize);
+
+ return r;
+}
+
+static int grow(struct resize *resize)
+{
+ if (resize->new_nr_full_blocks > resize->old_nr_full_blocks)
+ return grow_needs_more_blocks(resize);
+
+ else if (resize->old_nr_entries_in_last_block)
+ return grow_extend_tail_block(resize, resize->new_nr_entries_in_last_block);
+
+ else
+ return grow_add_tail_block(resize);
+}
+
+/*----------------------------------------------------------------*/
+
+/*
+ * These are the value_type functions for the btree elements, which point
+ * to array blocks.
+ */
+static void block_inc(void *context, const void *value)
+{
+ __le64 block_le;
+ struct dm_array_info *info = context;
+
+ memcpy(&block_le, value, sizeof(block_le));
+ dm_tm_inc(info->btree_info.tm, le64_to_cpu(block_le));
+}
+
+static void block_dec(void *context, const void *value)
+{
+ int r;
+ uint64_t b;
+ __le64 block_le;
+ uint32_t ref_count;
+ struct dm_block *block;
+ struct array_block *ab;
+ struct dm_array_info *info = context;
+
+ memcpy(&block_le, value, sizeof(block_le));
+ b = le64_to_cpu(block_le);
+
+ r = dm_tm_ref(info->btree_info.tm, b, &ref_count);
+ if (r) {
+ DMERR_LIMIT("couldn't get reference count for block %llu",
+ (unsigned long long) b);
+ return;
+ }
+
+ if (ref_count == 1) {
+ /*
+ * We're about to drop the last reference to this ablock.
+ * So we need to decrement the ref count of the contents.
+ */
+ r = get_ablock(info, b, &block, &ab);
+ if (r) {
+ DMERR_LIMIT("couldn't get array block %llu",
+ (unsigned long long) b);
+ return;
+ }
+
+ dec_ablock_entries(info, ab);
+ unlock_ablock(info, block);
+ }
+
+ dm_tm_dec(info->btree_info.tm, b);
+}
+
+static int block_equal(void *context, const void *value1, const void *value2)
+{
+ return !memcmp(value1, value2, sizeof(__le64));
+}
+
+/*----------------------------------------------------------------*/
+
+void dm_array_info_init(struct dm_array_info *info,
+ struct dm_transaction_manager *tm,
+ struct dm_btree_value_type *vt)
+{
+ struct dm_btree_value_type *bvt = &info->btree_info.value_type;
+
+ memcpy(&info->value_type, vt, sizeof(info->value_type));
+ info->btree_info.tm = tm;
+ info->btree_info.levels = 1;
+
+ bvt->context = info;
+ bvt->size = sizeof(__le64);
+ bvt->inc = block_inc;
+ bvt->dec = block_dec;
+ bvt->equal = block_equal;
+}
+EXPORT_SYMBOL_GPL(dm_array_info_init);
+
+int dm_array_empty(struct dm_array_info *info, dm_block_t *root)
+{
+ return dm_btree_empty(&info->btree_info, root);
+}
+EXPORT_SYMBOL_GPL(dm_array_empty);
+
+static int array_resize(struct dm_array_info *info, dm_block_t root,
+ uint32_t old_size, uint32_t new_size,
+ const void *value, dm_block_t *new_root)
+{
+ int r;
+ struct resize resize;
+
+ if (old_size == new_size)
+ return 0;
+
+ resize.info = info;
+ resize.root = root;
+ resize.size_of_block = dm_bm_block_size(dm_tm_get_bm(info->btree_info.tm));
+ resize.max_entries = calc_max_entries(info->value_type.size,
+ resize.size_of_block);
+
+ resize.old_nr_full_blocks = old_size / resize.max_entries;
+ resize.old_nr_entries_in_last_block = old_size % resize.max_entries;
+ resize.new_nr_full_blocks = new_size / resize.max_entries;
+ resize.new_nr_entries_in_last_block = new_size % resize.max_entries;
+ resize.value = value;
+
+ r = ((new_size > old_size) ? grow : shrink)(&resize);
+ if (r)
+ return r;
+
+ *new_root = resize.root;
+ return 0;
+}
+
+int dm_array_resize(struct dm_array_info *info, dm_block_t root,
+ uint32_t old_size, uint32_t new_size,
+ const void *value, dm_block_t *new_root)
+ __dm_written_to_disk(value)
+{
+ int r = array_resize(info, root, old_size, new_size, value, new_root);
+ __dm_unbless_for_disk(value);
+ return r;
+}
+EXPORT_SYMBOL_GPL(dm_array_resize);
+
+int dm_array_del(struct dm_array_info *info, dm_block_t root)
+{
+ return dm_btree_del(&info->btree_info, root);
+}
+EXPORT_SYMBOL_GPL(dm_array_del);
+
+int dm_array_get_value(struct dm_array_info *info, dm_block_t root,
+ uint32_t index, void *value_le)
+{
+ int r;
+ struct dm_block *block;
+ struct array_block *ab;
+ size_t size_of_block;
+ unsigned entry, max_entries;
+
+ size_of_block = dm_bm_block_size(dm_tm_get_bm(info->btree_info.tm));
+ max_entries = calc_max_entries(info->value_type.size, size_of_block);
+
+ r = lookup_ablock(info, root, index / max_entries, &block, &ab);
+ if (r)
+ return r;
+
+ entry = index % max_entries;
+ if (entry >= le32_to_cpu(ab->nr_entries))
+ r = -ENODATA;
+ else
+ memcpy(value_le, element_at(info, ab, entry),
+ info->value_type.size);
+
+ unlock_ablock(info, block);
+ return r;
+}
+EXPORT_SYMBOL_GPL(dm_array_get_value);
+
+static int array_set_value(struct dm_array_info *info, dm_block_t root,
+ uint32_t index, const void *value, dm_block_t *new_root)
+{
+ int r;
+ struct dm_block *block;
+ struct array_block *ab;
+ size_t size_of_block;
+ unsigned max_entries;
+ unsigned entry;
+ void *old_value;
+ struct dm_btree_value_type *vt = &info->value_type;
+
+ size_of_block = dm_bm_block_size(dm_tm_get_bm(info->btree_info.tm));
+ max_entries = calc_max_entries(info->value_type.size, size_of_block);
+
+ r = shadow_ablock(info, &root, index / max_entries, &block, &ab);
+ if (r)
+ return r;
+ *new_root = root;
+
+ entry = index % max_entries;
+ if (entry >= le32_to_cpu(ab->nr_entries)) {
+ r = -ENODATA;
+ goto out;
+ }
+
+ old_value = element_at(info, ab, entry);
+ if (vt->dec &&
+ (!vt->equal || !vt->equal(vt->context, old_value, value))) {
+ vt->dec(vt->context, old_value);
+ if (vt->inc)
+ vt->inc(vt->context, value);
+ }
+
+ memcpy(old_value, value, info->value_type.size);
+
+out:
+ unlock_ablock(info, block);
+ return r;
+}
+
+int dm_array_set_value(struct dm_array_info *info, dm_block_t root,
+ uint32_t index, const void *value, dm_block_t *new_root)
+ __dm_written_to_disk(value)
+{
+ int r;
+
+ r = array_set_value(info, root, index, value, new_root);
+ __dm_unbless_for_disk(value);
+ return r;
+}
+EXPORT_SYMBOL_GPL(dm_array_set_value);
+
+struct walk_info {
+ struct dm_array_info *info;
+ int (*fn)(void *context, uint64_t key, void *leaf);
+ void *context;
+};
+
+static int walk_ablock(void *context, uint64_t *keys, void *leaf)
+{
+ struct walk_info *wi = context;
+
+ int r;
+ unsigned i;
+ __le64 block_le;
+ unsigned nr_entries, max_entries;
+ struct dm_block *block;
+ struct array_block *ab;
+
+ memcpy(&block_le, leaf, sizeof(block_le));
+ r = get_ablock(wi->info, le64_to_cpu(block_le), &block, &ab);
+ if (r)
+ return r;
+
+ max_entries = le32_to_cpu(ab->max_entries);
+ nr_entries = le32_to_cpu(ab->nr_entries);
+ for (i = 0; i < nr_entries; i++) {
+ r = wi->fn(wi->context, keys[0] * max_entries + i,
+ element_at(wi->info, ab, i));
+
+ if (r)
+ break;
+ }
+
+ unlock_ablock(wi->info, block);
+ return r;
+}
+
+int dm_array_walk(struct dm_array_info *info, dm_block_t root,
+ int (*fn)(void *, uint64_t key, void *leaf),
+ void *context)
+{
+ struct walk_info wi;
+
+ wi.info = info;
+ wi.fn = fn;
+ wi.context = context;
+
+ return dm_btree_walk(&info->btree_info, root, walk_ablock, &wi);
+}
+EXPORT_SYMBOL_GPL(dm_array_walk);
+
+/*----------------------------------------------------------------*/
diff --git a/drivers/md/persistent-data/dm-array.h b/drivers/md/persistent-data/dm-array.h
new file mode 100644
index 000000000000..ea177d6fa58f
--- /dev/null
+++ b/drivers/md/persistent-data/dm-array.h
@@ -0,0 +1,166 @@
+/*
+ * Copyright (C) 2012 Red Hat, Inc.
+ *
+ * This file is released under the GPL.
+ */
+#ifndef _LINUX_DM_ARRAY_H
+#define _LINUX_DM_ARRAY_H
+
+#include "dm-btree.h"
+
+/*----------------------------------------------------------------*/
+
+/*
+ * The dm-array is a persistent version of an array. It packs the data
+ * more efficiently than a btree which will result in less disk space use,
+ * and a performance boost. The element get and set operations are still
+ * O(ln(n)), but with a much smaller constant.
+ *
+ * The value type structure is reused from the btree type to support proper
+ * reference counting of values.
+ *
+ * The arrays implicitly know their length, and bounds are checked for
+ * lookups and updated. It doesn't store this in an accessible place
+ * because it would waste a whole metadata block. Make sure you store the
+ * size along with the array root in your encompassing data.
+ *
+ * Array entries are indexed via an unsigned integer starting from zero.
+ * Arrays are not sparse; if you resize an array to have 'n' entries then
+ * 'n - 1' will be the last valid index.
+ *
+ * Typical use:
+ *
+ * a) initialise a dm_array_info structure. This describes the array
+ * values and ties it into a specific transaction manager. It holds no
+ * instance data; the same info can be used for many similar arrays if
+ * you wish.
+ *
+ * b) Get yourself a root. The root is the index of a block of data on the
+ * disk that holds a particular instance of an array. You may have a
+ * pre existing root in your metadata that you wish to use, or you may
+ * want to create a brand new, empty array with dm_array_empty().
+ *
+ * Like the other data structures in this library, dm_array objects are
+ * immutable between transactions. Update functions will return you the
+ * root for a _new_ array. If you've incremented the old root, via
+ * dm_tm_inc(), before calling the update function you may continue to use
+ * it in parallel with the new root.
+ *
+ * c) resize an array with dm_array_resize().
+ *
+ * d) Get a value from the array with dm_array_get_value().
+ *
+ * e) Set a value in the array with dm_array_set_value().
+ *
+ * f) Walk an array of values in index order with dm_array_walk(). More
+ * efficient than making many calls to dm_array_get_value().
+ *
+ * g) Destroy the array with dm_array_del(). This tells the transaction
+ * manager that you're no longer using this data structure so it can
+ * recycle it's blocks. (dm_array_dec() would be a better name for it,
+ * but del is in keeping with dm_btree_del()).
+ */
+
+/*
+ * Describes an array. Don't initialise this structure yourself, use the
+ * init function below.
+ */
+struct dm_array_info {
+ struct dm_transaction_manager *tm;
+ struct dm_btree_value_type value_type;
+ struct dm_btree_info btree_info;
+};
+
+/*
+ * Sets up a dm_array_info structure. You don't need to do anything with
+ * this structure when you finish using it.
+ *
+ * info - the structure being filled in.
+ * tm - the transaction manager that should supervise this structure.
+ * vt - describes the leaf values.
+ */
+void dm_array_info_init(struct dm_array_info *info,
+ struct dm_transaction_manager *tm,
+ struct dm_btree_value_type *vt);
+
+/*
+ * Create an empty, zero length array.
+ *
+ * info - describes the array
+ * root - on success this will be filled out with the root block
+ */
+int dm_array_empty(struct dm_array_info *info, dm_block_t *root);
+
+/*
+ * Resizes the array.
+ *
+ * info - describes the array
+ * root - the root block of the array on disk
+ * old_size - the caller is responsible for remembering the size of
+ * the array
+ * new_size - can be bigger or smaller than old_size
+ * value - if we're growing the array the new entries will have this value
+ * new_root - on success, points to the new root block
+ *
+ * If growing the inc function for 'value' will be called the appropriate
+ * number of times. So if the caller is holding a reference they may want
+ * to drop it.
+ */
+int dm_array_resize(struct dm_array_info *info, dm_block_t root,
+ uint32_t old_size, uint32_t new_size,
+ const void *value, dm_block_t *new_root)
+ __dm_written_to_disk(value);
+
+/*
+ * Frees a whole array. The value_type's decrement operation will be called
+ * for all values in the array
+ */
+int dm_array_del(struct dm_array_info *info, dm_block_t root);
+
+/*
+ * Lookup a value in the array
+ *
+ * info - describes the array
+ * root - root block of the array
+ * index - array index
+ * value - the value to be read. Will be in on-disk format of course.
+ *
+ * -ENODATA will be returned if the index is out of bounds.
+ */
+int dm_array_get_value(struct dm_array_info *info, dm_block_t root,
+ uint32_t index, void *value);
+
+/*
+ * Set an entry in the array.
+ *
+ * info - describes the array
+ * root - root block of the array
+ * index - array index
+ * value - value to be written to disk. Make sure you confirm the value is
+ * in on-disk format with__dm_bless_for_disk() before calling.
+ * new_root - the new root block
+ *
+ * The old value being overwritten will be decremented, the new value
+ * incremented.
+ *
+ * -ENODATA will be returned if the index is out of bounds.
+ */
+int dm_array_set_value(struct dm_array_info *info, dm_block_t root,
+ uint32_t index, const void *value, dm_block_t *new_root)
+ __dm_written_to_disk(value);
+
+/*
+ * Walk through all the entries in an array.
+ *
+ * info - describes the array
+ * root - root block of the array
+ * fn - called back for every element
+ * context - passed to the callback
+ */
+int dm_array_walk(struct dm_array_info *info, dm_block_t root,
+ int (*fn)(void *context, uint64_t key, void *leaf),
+ void *context);
+
+/*----------------------------------------------------------------*/
+
+#endif /* _LINUX_DM_ARRAY_H */
diff --git a/drivers/md/persistent-data/dm-bitset.c b/drivers/md/persistent-data/dm-bitset.c
new file mode 100644
index 000000000000..cd9a86d4cdf0
--- /dev/null
+++ b/drivers/md/persistent-data/dm-bitset.c
@@ -0,0 +1,163 @@
+/*
+ * Copyright (C) 2012 Red Hat, Inc.
+ *
+ * This file is released under the GPL.
+ */
+
+#include "dm-bitset.h"
+#include "dm-transaction-manager.h"
+
+#include <linux/export.h>
+#include <linux/device-mapper.h>
+
+#define DM_MSG_PREFIX "bitset"
+#define BITS_PER_ARRAY_ENTRY 64
+
+/*----------------------------------------------------------------*/
+
+static struct dm_btree_value_type bitset_bvt = {
+ .context = NULL,
+ .size = sizeof(__le64),
+ .inc = NULL,
+ .dec = NULL,
+ .equal = NULL,
+};
+
+/*----------------------------------------------------------------*/
+
+void dm_disk_bitset_init(struct dm_transaction_manager *tm,
+ struct dm_disk_bitset *info)
+{
+ dm_array_info_init(&info->array_info, tm, &bitset_bvt);
+ info->current_index_set = false;
+}
+EXPORT_SYMBOL_GPL(dm_disk_bitset_init);
+
+int dm_bitset_empty(struct dm_disk_bitset *info, dm_block_t *root)
+{
+ return dm_array_empty(&info->array_info, root);
+}
+EXPORT_SYMBOL_GPL(dm_bitset_empty);
+
+int dm_bitset_resize(struct dm_disk_bitset *info, dm_block_t root,
+ uint32_t old_nr_entries, uint32_t new_nr_entries,
+ bool default_value, dm_block_t *new_root)
+{
+ uint32_t old_blocks = dm_div_up(old_nr_entries, BITS_PER_ARRAY_ENTRY);
+ uint32_t new_blocks = dm_div_up(new_nr_entries, BITS_PER_ARRAY_ENTRY);
+ __le64 value = default_value ? cpu_to_le64(~0) : cpu_to_le64(0);
+
+ __dm_bless_for_disk(&value);
+ return dm_array_resize(&info->array_info, root, old_blocks, new_blocks,
+ &value, new_root);
+}
+EXPORT_SYMBOL_GPL(dm_bitset_resize);
+
+int dm_bitset_del(struct dm_disk_bitset *info, dm_block_t root)
+{
+ return dm_array_del(&info->array_info, root);
+}
+EXPORT_SYMBOL_GPL(dm_bitset_del);
+
+int dm_bitset_flush(struct dm_disk_bitset *info, dm_block_t root,
+ dm_block_t *new_root)
+{
+ int r;
+ __le64 value;
+
+ if (!info->current_index_set)
+ return 0;
+
+ value = cpu_to_le64(info->current_bits);
+
+ __dm_bless_for_disk(&value);
+ r = dm_array_set_value(&info->array_info, root, info->current_index,
+ &value, new_root);
+ if (r)
+ return r;
+
+ info->current_index_set = false;
+ return 0;
+}
+EXPORT_SYMBOL_GPL(dm_bitset_flush);
+
+static int read_bits(struct dm_disk_bitset *info, dm_block_t root,
+ uint32_t array_index)
+{
+ int r;
+ __le64 value;
+
+ r = dm_array_get_value(&info->array_info, root, array_index, &value);
+ if (r)
+ return r;
+
+ info->current_bits = le64_to_cpu(value);
+ info->current_index_set = true;
+ info->current_index = array_index;
+ return 0;
+}
+
+static int get_array_entry(struct dm_disk_bitset *info, dm_block_t root,
+ uint32_t index, dm_block_t *new_root)
+{
+ int r;
+ unsigned array_index = index / BITS_PER_ARRAY_ENTRY;
+
+ if (info->current_index_set) {
+ if (info->current_index == array_index)
+ return 0;
+
+ r = dm_bitset_flush(info, root, new_root);
+ if (r)
+ return r;
+ }
+
+ return read_bits(info, root, array_index);
+}
+
+int dm_bitset_set_bit(struct dm_disk_bitset *info, dm_block_t root,
+ uint32_t index, dm_block_t *new_root)
+{
+ int r;
+ unsigned b = index % BITS_PER_ARRAY_ENTRY;
+
+ r = get_array_entry(info, root, index, new_root);
+ if (r)
+ return r;
+
+ set_bit(b, (unsigned long *) &info->current_bits);
+ return 0;
+}
+EXPORT_SYMBOL_GPL(dm_bitset_set_bit);
+
+int dm_bitset_clear_bit(struct dm_disk_bitset *info, dm_block_t root,
+ uint32_t index, dm_block_t *new_root)
+{
+ int r;
+ unsigned b = index % BITS_PER_ARRAY_ENTRY;
+
+ r = get_array_entry(info, root, index, new_root);
+ if (r)
+ return r;
+
+ clear_bit(b, (unsigned long *) &info->current_bits);
+ return 0;
+}
+EXPORT_SYMBOL_GPL(dm_bitset_clear_bit);
+
+int dm_bitset_test_bit(struct dm_disk_bitset *info, dm_block_t root,
+ uint32_t index, dm_block_t *new_root, bool *result)
+{
+ int r;
+ unsigned b = index % BITS_PER_ARRAY_ENTRY;
+
+ r = get_array_entry(info, root, index, new_root);
+ if (r)
+ return r;
+
+ *result = test_bit(b, (unsigned long *) &info->current_bits);
+ return 0;
+}
+EXPORT_SYMBOL_GPL(dm_bitset_test_bit);
+
+/*----------------------------------------------------------------*/
diff --git a/drivers/md/persistent-data/dm-bitset.h b/drivers/md/persistent-data/dm-bitset.h
new file mode 100644
index 000000000000..e1b9bea14aa1
--- /dev/null
+++ b/drivers/md/persistent-data/dm-bitset.h
@@ -0,0 +1,165 @@
+/*
+ * Copyright (C) 2012 Red Hat, Inc.
+ *
+ * This file is released under the GPL.
+ */
+#ifndef _LINUX_DM_BITSET_H
+#define _LINUX_DM_BITSET_H
+
+#include "dm-array.h"
+
+/*----------------------------------------------------------------*/
+
+/*
+ * This bitset type is a thin wrapper round a dm_array of 64bit words. It
+ * uses a tiny, one word cache to reduce the number of array lookups and so
+ * increase performance.
+ *
+ * Like the dm-array that it's based on, the caller needs to keep track of
+ * the size of the bitset separately. The underlying dm-array implicitly
+ * knows how many words it's storing and will return -ENODATA if you try
+ * and access an out of bounds word. However, an out of bounds bit in the
+ * final word will _not_ be detected, you have been warned.
+ *
+ * Bits are indexed from zero.
+
+ * Typical use:
+ *
+ * a) Initialise a dm_disk_bitset structure with dm_disk_bitset_init().
+ * This describes the bitset and includes the cache. It's not called it
+ * dm_bitset_info in line with other data structures because it does
+ * include instance data.
+ *
+ * b) Get yourself a root. The root is the index of a block of data on the
+ * disk that holds a particular instance of an bitset. You may have a
+ * pre existing root in your metadata that you wish to use, or you may
+ * want to create a brand new, empty bitset with dm_bitset_empty().
+ *
+ * Like the other data structures in this library, dm_bitset objects are
+ * immutable between transactions. Update functions will return you the
+ * root for a _new_ array. If you've incremented the old root, via
+ * dm_tm_inc(), before calling the update function you may continue to use
+ * it in parallel with the new root.
+ *
+ * Even read operations may trigger the cache to be flushed and as such
+ * return a root for a new, updated bitset.
+ *
+ * c) resize a bitset with dm_bitset_resize().
+ *
+ * d) Set a bit with dm_bitset_set_bit().
+ *
+ * e) Clear a bit with dm_bitset_clear_bit().
+ *
+ * f) Test a bit with dm_bitset_test_bit().
+ *
+ * g) Flush all updates from the cache with dm_bitset_flush().
+ *
+ * h) Destroy the bitset with dm_bitset_del(). This tells the transaction
+ * manager that you're no longer using this data structure so it can
+ * recycle it's blocks. (dm_bitset_dec() would be a better name for it,
+ * but del is in keeping with dm_btree_del()).
+ */
+
+/*
+ * Opaque object. Unlike dm_array_info, you should have one of these per
+ * bitset. Initialise with dm_disk_bitset_init().
+ */
+struct dm_disk_bitset {
+ struct dm_array_info array_info;
+
+ uint32_t current_index;
+ uint64_t current_bits;
+
+ bool current_index_set:1;
+};
+
+/*
+ * Sets up a dm_disk_bitset structure. You don't need to do anything with
+ * this structure when you finish using it.
+ *
+ * tm - the transaction manager that should supervise this structure
+ * info - the structure being initialised
+ */
+void dm_disk_bitset_init(struct dm_transaction_manager *tm,
+ struct dm_disk_bitset *info);
+
+/*
+ * Create an empty, zero length bitset.
+ *
+ * info - describes the bitset
+ * new_root - on success, points to the new root block
+ */
+int dm_bitset_empty(struct dm_disk_bitset *info, dm_block_t *new_root);
+
+/*
+ * Resize the bitset.
+ *
+ * info - describes the bitset
+ * old_root - the root block of the array on disk
+ * old_nr_entries - the number of bits in the old bitset
+ * new_nr_entries - the number of bits you want in the new bitset
+ * default_value - the value for any new bits
+ * new_root - on success, points to the new root block
+ */
+int dm_bitset_resize(struct dm_disk_bitset *info, dm_block_t old_root,
+ uint32_t old_nr_entries, uint32_t new_nr_entries,
+ bool default_value, dm_block_t *new_root);
+
+/*
+ * Frees the bitset.
+ */
+int dm_bitset_del(struct dm_disk_bitset *info, dm_block_t root);
+
+/*
+ * Set a bit.
+ *
+ * info - describes the bitset
+ * root - the root block of the bitset
+ * index - the bit index
+ * new_root - on success, points to the new root block
+ *
+ * -ENODATA will be returned if the index is out of bounds.
+ */
+int dm_bitset_set_bit(struct dm_disk_bitset *info, dm_block_t root,
+ uint32_t index, dm_block_t *new_root);
+
+/*
+ * Clears a bit.
+ *
+ * info - describes the bitset
+ * root - the root block of the bitset
+ * index - the bit index
+ * new_root - on success, points to the new root block
+ *
+ * -ENODATA will be returned if the index is out of bounds.
+ */
+int dm_bitset_clear_bit(struct dm_disk_bitset *info, dm_block_t root,
+ uint32_t index, dm_block_t *new_root);
+
+/*
+ * Tests a bit.
+ *
+ * info - describes the bitset
+ * root - the root block of the bitset
+ * index - the bit index
+ * new_root - on success, points to the new root block (cached values may have been written)
+ * result - the bit value you're after
+ *
+ * -ENODATA will be returned if the index is out of bounds.
+ */
+int dm_bitset_test_bit(struct dm_disk_bitset *info, dm_block_t root,
+ uint32_t index, dm_block_t *new_root, bool *result);
+
+/*
+ * Flush any cached changes to disk.
+ *
+ * info - describes the bitset
+ * root - the root block of the bitset
+ * new_root - on success, points to the new root block
+ */
+int dm_bitset_flush(struct dm_disk_bitset *info, dm_block_t root,
+ dm_block_t *new_root);
+
+/*----------------------------------------------------------------*/
+
+#endif /* _LINUX_DM_BITSET_H */
diff --git a/drivers/md/persistent-data/dm-block-manager.c b/drivers/md/persistent-data/dm-block-manager.c
index 28c3ed072a79..81b513890e2b 100644
--- a/drivers/md/persistent-data/dm-block-manager.c
+++ b/drivers/md/persistent-data/dm-block-manager.c
@@ -613,6 +613,7 @@ int dm_bm_flush_and_unlock(struct dm_block_manager *bm,
return dm_bufio_write_dirty_buffers(bm->bufio);
}
+EXPORT_SYMBOL_GPL(dm_bm_flush_and_unlock);
void dm_bm_set_read_only(struct dm_block_manager *bm)
{
diff --git a/drivers/md/persistent-data/dm-btree-internal.h b/drivers/md/persistent-data/dm-btree-internal.h
index accbb05f17b6..37d367bb9aa8 100644
--- a/drivers/md/persistent-data/dm-btree-internal.h
+++ b/drivers/md/persistent-data/dm-btree-internal.h
@@ -64,6 +64,7 @@ struct ro_spine {
void init_ro_spine(struct ro_spine *s, struct dm_btree_info *info);
int exit_ro_spine(struct ro_spine *s);
int ro_step(struct ro_spine *s, dm_block_t new_child);
+void ro_pop(struct ro_spine *s);
struct btree_node *ro_node(struct ro_spine *s);
struct shadow_spine {
diff --git a/drivers/md/persistent-data/dm-btree-spine.c b/drivers/md/persistent-data/dm-btree-spine.c
index f199a0c4ed04..cf9fd676ae44 100644
--- a/drivers/md/persistent-data/dm-btree-spine.c
+++ b/drivers/md/persistent-data/dm-btree-spine.c
@@ -164,6 +164,13 @@ int ro_step(struct ro_spine *s, dm_block_t new_child)
return r;
}
+void ro_pop(struct ro_spine *s)
+{
+ BUG_ON(!s->count);
+ --s->count;
+ unlock_block(s->info, s->nodes[s->count]);
+}
+
struct btree_node *ro_node(struct ro_spine *s)
{
struct dm_block *block;
diff --git a/drivers/md/persistent-data/dm-btree.c b/drivers/md/persistent-data/dm-btree.c
index 4caf66918cdb..35865425e4b4 100644
--- a/drivers/md/persistent-data/dm-btree.c
+++ b/drivers/md/persistent-data/dm-btree.c
@@ -807,3 +807,55 @@ int dm_btree_find_highest_key(struct dm_btree_info *info, dm_block_t root,
return r ? r : count;
}
EXPORT_SYMBOL_GPL(dm_btree_find_highest_key);
+
+/*
+ * FIXME: We shouldn't use a recursive algorithm when we have limited stack
+ * space. Also this only works for single level trees.
+ */
+static int walk_node(struct ro_spine *s, dm_block_t block,
+ int (*fn)(void *context, uint64_t *keys, void *leaf),
+ void *context)
+{
+ int r;
+ unsigned i, nr;
+ struct btree_node *n;
+ uint64_t keys;
+
+ r = ro_step(s, block);
+ n = ro_node(s);
+
+ nr = le32_to_cpu(n->header.nr_entries);
+ for (i = 0; i < nr; i++) {
+ if (le32_to_cpu(n->header.flags) & INTERNAL_NODE) {
+ r = walk_node(s, value64(n, i), fn, context);
+ if (r)
+ goto out;
+ } else {
+ keys = le64_to_cpu(*key_ptr(n, i));
+ r = fn(context, &keys, value_ptr(n, i));
+ if (r)
+ goto out;
+ }
+ }
+
+out:
+ ro_pop(s);
+ return r;
+}
+
+int dm_btree_walk(struct dm_btree_info *info, dm_block_t root,
+ int (*fn)(void *context, uint64_t *keys, void *leaf),
+ void *context)
+{
+ int r;
+ struct ro_spine spine;
+
+ BUG_ON(info->levels > 1);
+
+ init_ro_spine(&spine, info);
+ r = walk_node(&spine, root, fn, context);
+ exit_ro_spine(&spine);
+
+ return r;
+}
+EXPORT_SYMBOL_GPL(dm_btree_walk);
diff --git a/drivers/md/persistent-data/dm-btree.h b/drivers/md/persistent-data/dm-btree.h
index a2cd50441ca1..8672d159e0b5 100644
--- a/drivers/md/persistent-data/dm-btree.h
+++ b/drivers/md/persistent-data/dm-btree.h
@@ -58,21 +58,21 @@ struct dm_btree_value_type {
* somewhere.) This method is _not_ called for insertion of a new
* value: It is assumed the ref count is already 1.
*/
- void (*inc)(void *context, void *value);
+ void (*inc)(void *context, const void *value);
/*
* This value is being deleted. The btree takes care of freeing
* the memory pointed to by @value. Often the del function just
* needs to decrement a reference count somewhere.
*/
- void (*dec)(void *context, void *value);
+ void (*dec)(void *context, const void *value);
/*
* A test for equality between two values. When a value is
* overwritten with a new one, the old one has the dec method
* called _unless_ the new and old value are deemed equal.
*/
- int (*equal)(void *context, void *value1, void *value2);
+ int (*equal)(void *context, const void *value1, const void *value2);
};
/*
@@ -142,4 +142,13 @@ int dm_btree_remove(struct dm_btree_info *info, dm_block_t root,
int dm_btree_find_highest_key(struct dm_btree_info *info, dm_block_t root,
uint64_t *result_keys);
+/*
+ * Iterate through the a btree, calling fn() on each entry.
+ * It only works for single level trees and is internally recursive, so
+ * monitor stack usage carefully.
+ */
+int dm_btree_walk(struct dm_btree_info *info, dm_block_t root,
+ int (*fn)(void *context, uint64_t *keys, void *leaf),
+ void *context);
+
#endif /* _LINUX_DM_BTREE_H */
diff --git a/drivers/md/persistent-data/dm-transaction-manager.c b/drivers/md/persistent-data/dm-transaction-manager.c
index d247a35da3c6..81da1a26042e 100644
--- a/drivers/md/persistent-data/dm-transaction-manager.c
+++ b/drivers/md/persistent-data/dm-transaction-manager.c
@@ -25,8 +25,8 @@ struct shadow_info {
/*
* It would be nice if we scaled with the size of transaction.
*/
-#define HASH_SIZE 256
-#define HASH_MASK (HASH_SIZE - 1)
+#define DM_HASH_SIZE 256
+#define DM_HASH_MASK (DM_HASH_SIZE - 1)
struct dm_transaction_manager {
int is_clone;
@@ -36,7 +36,7 @@ struct dm_transaction_manager {
struct dm_space_map *sm;
spinlock_t lock;
- struct hlist_head buckets[HASH_SIZE];
+ struct hlist_head buckets[DM_HASH_SIZE];
};
/*----------------------------------------------------------------*/
@@ -44,12 +44,11 @@ struct dm_transaction_manager {
static int is_shadow(struct dm_transaction_manager *tm, dm_block_t b)
{
int r = 0;
- unsigned bucket = dm_hash_block(b, HASH_MASK);
+ unsigned bucket = dm_hash_block(b, DM_HASH_MASK);
struct shadow_info *si;
- struct hlist_node *n;
spin_lock(&tm->lock);
- hlist_for_each_entry(si, n, tm->buckets + bucket, hlist)
+ hlist_for_each_entry(si, tm->buckets + bucket, hlist)
if (si->where == b) {
r = 1;
break;
@@ -71,7 +70,7 @@ static void insert_shadow(struct dm_transaction_manager *tm, dm_block_t b)
si = kmalloc(sizeof(*si), GFP_NOIO);
if (si) {
si->where = b;
- bucket = dm_hash_block(b, HASH_MASK);
+ bucket = dm_hash_block(b, DM_HASH_MASK);
spin_lock(&tm->lock);
hlist_add_head(&si->hlist, tm->buckets + bucket);
spin_unlock(&tm->lock);
@@ -81,14 +80,14 @@ static void insert_shadow(struct dm_transaction_manager *tm, dm_block_t b)
static void wipe_shadow_table(struct dm_transaction_manager *tm)
{
struct shadow_info *si;
- struct hlist_node *n, *tmp;
+ struct hlist_node *tmp;
struct hlist_head *bucket;
int i;
spin_lock(&tm->lock);
- for (i = 0; i < HASH_SIZE; i++) {
+ for (i = 0; i < DM_HASH_SIZE; i++) {
bucket = tm->buckets + i;
- hlist_for_each_entry_safe(si, n, tmp, bucket, hlist)
+ hlist_for_each_entry_safe(si, tmp, bucket, hlist)
kfree(si);
INIT_HLIST_HEAD(bucket);
@@ -115,7 +114,7 @@ static struct dm_transaction_manager *dm_tm_create(struct dm_block_manager *bm,
tm->sm = sm;
spin_lock_init(&tm->lock);
- for (i = 0; i < HASH_SIZE; i++)
+ for (i = 0; i < DM_HASH_SIZE; i++)
INIT_HLIST_HEAD(tm->buckets + i);
return tm;
diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c
index 19d77a026639..5af2d2709081 100644
--- a/drivers/md/raid5.c
+++ b/drivers/md/raid5.c
@@ -184,8 +184,6 @@ static void return_io(struct bio *return_bi)
return_bi = bi->bi_next;
bi->bi_next = NULL;
bi->bi_size = 0;
- trace_block_bio_complete(bdev_get_queue(bi->bi_bdev),
- bi, 0);
bio_endio(bi, 0);
bi = return_bi;
}
@@ -365,10 +363,9 @@ static struct stripe_head *__find_stripe(struct r5conf *conf, sector_t sector,
short generation)
{
struct stripe_head *sh;
- struct hlist_node *hn;
pr_debug("__find_stripe, sector %llu\n", (unsigned long long)sector);
- hlist_for_each_entry(sh, hn, stripe_hash(conf, sector), hash)
+ hlist_for_each_entry(sh, stripe_hash(conf, sector), hash)
if (sh->sector == sector && sh->generation == generation)
return sh;
pr_debug("__stripe %llu not in cache\n", (unsigned long long)sector);
@@ -3917,8 +3914,6 @@ static void raid5_align_endio(struct bio *bi, int error)
rdev_dec_pending(rdev, conf->mddev);
if (!error && uptodate) {
- trace_block_bio_complete(bdev_get_queue(raid_bi->bi_bdev),
- raid_bi, 0);
bio_endio(raid_bi, 0);
if (atomic_dec_and_test(&conf->active_aligned_reads))
wake_up(&conf->wait_for_stripe);
@@ -4377,8 +4372,6 @@ static void make_request(struct mddev *mddev, struct bio * bi)
if ( rw == WRITE )
md_write_end(mddev);
- trace_block_bio_complete(bdev_get_queue(bi->bi_bdev),
- bi, 0);
bio_endio(bi, 0);
}
}
@@ -4755,11 +4748,8 @@ static int retry_aligned_read(struct r5conf *conf, struct bio *raid_bio)
handled++;
}
remaining = raid5_dec_bi_active_stripes(raid_bio);
- if (remaining == 0) {
- trace_block_bio_complete(bdev_get_queue(raid_bio->bi_bdev),
- raid_bio, 0);
+ if (remaining == 0)
bio_endio(raid_bio, 0);
- }
if (atomic_dec_and_test(&conf->active_aligned_reads))
wake_up(&conf->wait_for_stripe);
return handled;
diff --git a/drivers/media/Kconfig b/drivers/media/Kconfig
index 8567a7a64104..7f5a7cac6dc7 100644
--- a/drivers/media/Kconfig
+++ b/drivers/media/Kconfig
@@ -134,6 +134,12 @@ config DVB_NET
You may want to disable the network support on embedded devices. If
unsure say Y.
+# This Kconfig option is used by both PCI and USB drivers
+config TTPCI_EEPROM
+ tristate
+ depends on I2C
+ default n
+
source "drivers/media/dvb-core/Kconfig"
comment "Media drivers"
@@ -157,17 +163,20 @@ source "drivers/media/firewire/Kconfig"
# Common driver options
source "drivers/media/common/Kconfig"
+comment "Media ancillary drivers (tuners, sensors, i2c, frontends)"
+
#
# Ancillary drivers (tuners, i2c, frontends)
#
config MEDIA_SUBDRV_AUTOSELECT
- bool "Autoselect tuners and i2c modules to build"
+ bool "Autoselect ancillary drivers (tuners, sensors, i2c, frontends)"
depends on MEDIA_ANALOG_TV_SUPPORT || MEDIA_DIGITAL_TV_SUPPORT || MEDIA_CAMERA_SUPPORT
default y
help
- By default, a media driver auto-selects all possible i2c
- devices that are used by any of the supported devices.
+ By default, a media driver auto-selects all possible ancillary
+ devices such as tuners, sensors, video encoders/decoders and
+ frontends, that are used by any of the supported devices.
This is generally the right thing to do, except when there
are strict constraints with regards to the kernel size,
@@ -176,12 +185,10 @@ config MEDIA_SUBDRV_AUTOSELECT
Use this option with care, as deselecting ancillary drivers which
are, in fact, necessary will result in the lack of the needed
functionality for your device (it may not tune or may not have
- the need demodulers).
+ the needed demodulators).
If unsure say Y.
-comment "Media ancillary drivers (tuners, sensors, i2c, frontends)"
-
source "drivers/media/i2c/Kconfig"
source "drivers/media/tuners/Kconfig"
source "drivers/media/dvb-frontends/Kconfig"
diff --git a/drivers/media/common/Kconfig b/drivers/media/common/Kconfig
index d2a436ce77f8..56c25e6299e9 100644
--- a/drivers/media/common/Kconfig
+++ b/drivers/media/common/Kconfig
@@ -5,6 +5,17 @@ config MEDIA_COMMON_OPTIONS
comment "common driver options"
depends on MEDIA_COMMON_OPTIONS
+config VIDEO_CX2341X
+ tristate
+
+config VIDEO_BTCX
+ depends on PCI
+ tristate
+
+config VIDEO_TVEEPROM
+ tristate
+ depends on I2C
+
source "drivers/media/common/b2c2/Kconfig"
source "drivers/media/common/saa7146/Kconfig"
source "drivers/media/common/siano/Kconfig"
diff --git a/drivers/media/common/Makefile b/drivers/media/common/Makefile
index b8e2e3a33a31..8f8d18755d15 100644
--- a/drivers/media/common/Makefile
+++ b/drivers/media/common/Makefile
@@ -1 +1,4 @@
obj-y += b2c2/ saa7146/ siano/
+obj-$(CONFIG_VIDEO_CX2341X) += cx2341x.o
+obj-$(CONFIG_VIDEO_BTCX) += btcx-risc.o
+obj-$(CONFIG_VIDEO_TVEEPROM) += tveeprom.o
diff --git a/drivers/media/i2c/btcx-risc.c b/drivers/media/common/btcx-risc.c
index ac1b2687a20d..ac1b2687a20d 100644
--- a/drivers/media/i2c/btcx-risc.c
+++ b/drivers/media/common/btcx-risc.c
diff --git a/drivers/media/i2c/btcx-risc.h b/drivers/media/common/btcx-risc.h
index f8bc6e8e7b51..f8bc6e8e7b51 100644
--- a/drivers/media/i2c/btcx-risc.h
+++ b/drivers/media/common/btcx-risc.h
diff --git a/drivers/media/i2c/cx2341x.c b/drivers/media/common/cx2341x.c
index 103ef6bad2e2..103ef6bad2e2 100644
--- a/drivers/media/i2c/cx2341x.c
+++ b/drivers/media/common/cx2341x.c
diff --git a/drivers/media/common/saa7146/saa7146_fops.c b/drivers/media/common/saa7146/saa7146_fops.c
index b3890bd49df6..eda01bc68ab2 100644
--- a/drivers/media/common/saa7146/saa7146_fops.c
+++ b/drivers/media/common/saa7146/saa7146_fops.c
@@ -105,7 +105,7 @@ void saa7146_buffer_finish(struct saa7146_dev *dev,
}
q->curr->vb.state = state;
- do_gettimeofday(&q->curr->vb.ts);
+ v4l2_get_timestamp(&q->curr->vb.ts);
wake_up(&q->curr->vb.done);
q->curr = NULL;
@@ -265,8 +265,7 @@ static int fops_release(struct file *file)
DEB_EE("file:%p\n", file);
- if (mutex_lock_interruptible(vdev->lock))
- return -ERESTARTSYS;
+ mutex_lock(vdev->lock);
if (vdev->vfl_type == VFL_TYPE_VBI) {
if (dev->ext_vv_data->capabilities & V4L2_CAP_VBI_CAPTURE)
diff --git a/drivers/media/i2c/tveeprom.c b/drivers/media/common/tveeprom.c
index 3b6cf034976a..cc1e172dfece 100644
--- a/drivers/media/i2c/tveeprom.c
+++ b/drivers/media/common/tveeprom.c
@@ -96,170 +96,170 @@ static struct HAUPPAUGE_TUNER
hauppauge_tuner[] =
{
/* 0-9 */
- { TUNER_ABSENT, "None" },
- { TUNER_ABSENT, "External" },
- { TUNER_ABSENT, "Unspecified" },
- { TUNER_PHILIPS_PAL, "Philips FI1216" },
- { TUNER_PHILIPS_SECAM, "Philips FI1216MF" },
- { TUNER_PHILIPS_NTSC, "Philips FI1236" },
- { TUNER_PHILIPS_PAL_I, "Philips FI1246" },
+ { TUNER_ABSENT, "None" },
+ { TUNER_ABSENT, "External" },
+ { TUNER_ABSENT, "Unspecified" },
+ { TUNER_PHILIPS_PAL, "Philips FI1216" },
+ { TUNER_PHILIPS_SECAM, "Philips FI1216MF" },
+ { TUNER_PHILIPS_NTSC, "Philips FI1236" },
+ { TUNER_PHILIPS_PAL_I, "Philips FI1246" },
{ TUNER_PHILIPS_PAL_DK, "Philips FI1256" },
- { TUNER_PHILIPS_PAL, "Philips FI1216 MK2" },
- { TUNER_PHILIPS_SECAM, "Philips FI1216MF MK2" },
+ { TUNER_PHILIPS_PAL, "Philips FI1216 MK2" },
+ { TUNER_PHILIPS_SECAM, "Philips FI1216MF MK2" },
/* 10-19 */
- { TUNER_PHILIPS_NTSC, "Philips FI1236 MK2" },
- { TUNER_PHILIPS_PAL_I, "Philips FI1246 MK2" },
+ { TUNER_PHILIPS_NTSC, "Philips FI1236 MK2" },
+ { TUNER_PHILIPS_PAL_I, "Philips FI1246 MK2" },
{ TUNER_PHILIPS_PAL_DK, "Philips FI1256 MK2" },
- { TUNER_TEMIC_NTSC, "Temic 4032FY5" },
- { TUNER_TEMIC_PAL, "Temic 4002FH5" },
- { TUNER_TEMIC_PAL_I, "Temic 4062FY5" },
- { TUNER_PHILIPS_PAL, "Philips FR1216 MK2" },
- { TUNER_PHILIPS_SECAM, "Philips FR1216MF MK2" },
- { TUNER_PHILIPS_NTSC, "Philips FR1236 MK2" },
- { TUNER_PHILIPS_PAL_I, "Philips FR1246 MK2" },
+ { TUNER_TEMIC_NTSC, "Temic 4032FY5" },
+ { TUNER_TEMIC_PAL, "Temic 4002FH5" },
+ { TUNER_TEMIC_PAL_I, "Temic 4062FY5" },
+ { TUNER_PHILIPS_PAL, "Philips FR1216 MK2" },
+ { TUNER_PHILIPS_SECAM, "Philips FR1216MF MK2" },
+ { TUNER_PHILIPS_NTSC, "Philips FR1236 MK2" },
+ { TUNER_PHILIPS_PAL_I, "Philips FR1246 MK2" },
/* 20-29 */
{ TUNER_PHILIPS_PAL_DK, "Philips FR1256 MK2" },
- { TUNER_PHILIPS_PAL, "Philips FM1216" },
- { TUNER_PHILIPS_SECAM, "Philips FM1216MF" },
- { TUNER_PHILIPS_NTSC, "Philips FM1236" },
- { TUNER_PHILIPS_PAL_I, "Philips FM1246" },
+ { TUNER_PHILIPS_PAL, "Philips FM1216" },
+ { TUNER_PHILIPS_SECAM, "Philips FM1216MF" },
+ { TUNER_PHILIPS_NTSC, "Philips FM1236" },
+ { TUNER_PHILIPS_PAL_I, "Philips FM1246" },
{ TUNER_PHILIPS_PAL_DK, "Philips FM1256" },
- { TUNER_TEMIC_4036FY5_NTSC, "Temic 4036FY5" },
- { TUNER_ABSENT, "Samsung TCPN9082D" },
- { TUNER_ABSENT, "Samsung TCPM9092P" },
- { TUNER_TEMIC_4006FH5_PAL, "Temic 4006FH5" },
+ { TUNER_TEMIC_4036FY5_NTSC, "Temic 4036FY5" },
+ { TUNER_ABSENT, "Samsung TCPN9082D" },
+ { TUNER_ABSENT, "Samsung TCPM9092P" },
+ { TUNER_TEMIC_4006FH5_PAL, "Temic 4006FH5" },
/* 30-39 */
- { TUNER_ABSENT, "Samsung TCPN9085D" },
- { TUNER_ABSENT, "Samsung TCPB9085P" },
- { TUNER_ABSENT, "Samsung TCPL9091P" },
- { TUNER_TEMIC_4039FR5_NTSC, "Temic 4039FR5" },
- { TUNER_PHILIPS_FQ1216ME, "Philips FQ1216 ME" },
- { TUNER_TEMIC_4066FY5_PAL_I, "Temic 4066FY5" },
- { TUNER_PHILIPS_NTSC, "Philips TD1536" },
- { TUNER_PHILIPS_NTSC, "Philips TD1536D" },
- { TUNER_PHILIPS_NTSC, "Philips FMR1236" }, /* mono radio */
- { TUNER_ABSENT, "Philips FI1256MP" },
+ { TUNER_ABSENT, "Samsung TCPN9085D" },
+ { TUNER_ABSENT, "Samsung TCPB9085P" },
+ { TUNER_ABSENT, "Samsung TCPL9091P" },
+ { TUNER_TEMIC_4039FR5_NTSC, "Temic 4039FR5" },
+ { TUNER_PHILIPS_FQ1216ME, "Philips FQ1216 ME" },
+ { TUNER_TEMIC_4066FY5_PAL_I, "Temic 4066FY5" },
+ { TUNER_PHILIPS_NTSC, "Philips TD1536" },
+ { TUNER_PHILIPS_NTSC, "Philips TD1536D" },
+ { TUNER_PHILIPS_NTSC, "Philips FMR1236" }, /* mono radio */
+ { TUNER_ABSENT, "Philips FI1256MP" },
/* 40-49 */
- { TUNER_ABSENT, "Samsung TCPQ9091P" },
- { TUNER_TEMIC_4006FN5_MULTI_PAL, "Temic 4006FN5" },
- { TUNER_TEMIC_4009FR5_PAL, "Temic 4009FR5" },
- { TUNER_TEMIC_4046FM5, "Temic 4046FM5" },
+ { TUNER_ABSENT, "Samsung TCPQ9091P" },
+ { TUNER_TEMIC_4006FN5_MULTI_PAL,"Temic 4006FN5" },
+ { TUNER_TEMIC_4009FR5_PAL, "Temic 4009FR5" },
+ { TUNER_TEMIC_4046FM5, "Temic 4046FM5" },
{ TUNER_TEMIC_4009FN5_MULTI_PAL_FM, "Temic 4009FN5" },
- { TUNER_ABSENT, "Philips TD1536D FH 44"},
- { TUNER_LG_NTSC_FM, "LG TP18NSR01F"},
- { TUNER_LG_PAL_FM, "LG TP18PSB01D"},
- { TUNER_LG_PAL, "LG TP18PSB11D"},
- { TUNER_LG_PAL_I_FM, "LG TAPC-I001D"},
+ { TUNER_ABSENT, "Philips TD1536D FH 44"},
+ { TUNER_LG_NTSC_FM, "LG TP18NSR01F"},
+ { TUNER_LG_PAL_FM, "LG TP18PSB01D"},
+ { TUNER_LG_PAL, "LG TP18PSB11D"},
+ { TUNER_LG_PAL_I_FM, "LG TAPC-I001D"},
/* 50-59 */
- { TUNER_LG_PAL_I, "LG TAPC-I701D"},
- { TUNER_ABSENT, "Temic 4042FI5"},
- { TUNER_MICROTUNE_4049FM5, "Microtune 4049 FM5"},
- { TUNER_ABSENT, "LG TPI8NSR11F"},
- { TUNER_ABSENT, "Microtune 4049 FM5 Alt I2C"},
- { TUNER_PHILIPS_FM1216ME_MK3, "Philips FQ1216ME MK3"},
- { TUNER_ABSENT, "Philips FI1236 MK3"},
- { TUNER_PHILIPS_FM1216ME_MK3, "Philips FM1216 ME MK3"},
- { TUNER_PHILIPS_FM1236_MK3, "Philips FM1236 MK3"},
- { TUNER_ABSENT, "Philips FM1216MP MK3"},
+ { TUNER_LG_PAL_I, "LG TAPC-I701D"},
+ { TUNER_ABSENT, "Temic 4042FI5"},
+ { TUNER_MICROTUNE_4049FM5, "Microtune 4049 FM5"},
+ { TUNER_ABSENT, "LG TPI8NSR11F"},
+ { TUNER_ABSENT, "Microtune 4049 FM5 Alt I2C"},
+ { TUNER_PHILIPS_FM1216ME_MK3, "Philips FQ1216ME MK3"},
+ { TUNER_ABSENT, "Philips FI1236 MK3"},
+ { TUNER_PHILIPS_FM1216ME_MK3, "Philips FM1216 ME MK3"},
+ { TUNER_PHILIPS_FM1236_MK3, "Philips FM1236 MK3"},
+ { TUNER_ABSENT, "Philips FM1216MP MK3"},
/* 60-69 */
- { TUNER_PHILIPS_FM1216ME_MK3, "LG S001D MK3"},
- { TUNER_ABSENT, "LG M001D MK3"},
- { TUNER_PHILIPS_FM1216ME_MK3, "LG S701D MK3"},
- { TUNER_ABSENT, "LG M701D MK3"},
- { TUNER_ABSENT, "Temic 4146FM5"},
- { TUNER_ABSENT, "Temic 4136FY5"},
- { TUNER_ABSENT, "Temic 4106FH5"},
- { TUNER_ABSENT, "Philips FQ1216LMP MK3"},
- { TUNER_LG_NTSC_TAPE, "LG TAPE H001F MK3"},
- { TUNER_LG_NTSC_TAPE, "LG TAPE H701F MK3"},
+ { TUNER_PHILIPS_FM1216ME_MK3, "LG S001D MK3"},
+ { TUNER_ABSENT, "LG M001D MK3"},
+ { TUNER_PHILIPS_FM1216ME_MK3, "LG S701D MK3"},
+ { TUNER_ABSENT, "LG M701D MK3"},
+ { TUNER_ABSENT, "Temic 4146FM5"},
+ { TUNER_ABSENT, "Temic 4136FY5"},
+ { TUNER_ABSENT, "Temic 4106FH5"},
+ { TUNER_ABSENT, "Philips FQ1216LMP MK3"},
+ { TUNER_LG_NTSC_TAPE, "LG TAPE H001F MK3"},
+ { TUNER_LG_NTSC_TAPE, "LG TAPE H701F MK3"},
/* 70-79 */
- { TUNER_ABSENT, "LG TALN H200T"},
- { TUNER_ABSENT, "LG TALN H250T"},
- { TUNER_ABSENT, "LG TALN M200T"},
- { TUNER_ABSENT, "LG TALN Z200T"},
- { TUNER_ABSENT, "LG TALN S200T"},
- { TUNER_ABSENT, "Thompson DTT7595"},
- { TUNER_ABSENT, "Thompson DTT7592"},
- { TUNER_ABSENT, "Silicon TDA8275C1 8290"},
- { TUNER_ABSENT, "Silicon TDA8275C1 8290 FM"},
- { TUNER_ABSENT, "Thompson DTT757"},
+ { TUNER_ABSENT, "LG TALN H200T"},
+ { TUNER_ABSENT, "LG TALN H250T"},
+ { TUNER_ABSENT, "LG TALN M200T"},
+ { TUNER_ABSENT, "LG TALN Z200T"},
+ { TUNER_ABSENT, "LG TALN S200T"},
+ { TUNER_ABSENT, "Thompson DTT7595"},
+ { TUNER_ABSENT, "Thompson DTT7592"},
+ { TUNER_ABSENT, "Silicon TDA8275C1 8290"},
+ { TUNER_ABSENT, "Silicon TDA8275C1 8290 FM"},
+ { TUNER_ABSENT, "Thompson DTT757"},
/* 80-89 */
- { TUNER_PHILIPS_FQ1216LME_MK3, "Philips FQ1216LME MK3"},
- { TUNER_LG_PAL_NEW_TAPC, "LG TAPC G701D"},
- { TUNER_LG_NTSC_NEW_TAPC, "LG TAPC H791F"},
- { TUNER_LG_PAL_NEW_TAPC, "TCL 2002MB 3"},
- { TUNER_LG_PAL_NEW_TAPC, "TCL 2002MI 3"},
- { TUNER_TCL_2002N, "TCL 2002N 6A"},
- { TUNER_PHILIPS_FM1236_MK3, "Philips FQ1236 MK3"},
- { TUNER_SAMSUNG_TCPN_2121P30A, "Samsung TCPN 2121P30A"},
- { TUNER_ABSENT, "Samsung TCPE 4121P30A"},
- { TUNER_PHILIPS_FM1216ME_MK3, "TCL MFPE05 2"},
+ { TUNER_PHILIPS_FQ1216LME_MK3, "Philips FQ1216LME MK3"},
+ { TUNER_LG_PAL_NEW_TAPC, "LG TAPC G701D"},
+ { TUNER_LG_NTSC_NEW_TAPC, "LG TAPC H791F"},
+ { TUNER_LG_PAL_NEW_TAPC, "TCL 2002MB 3"},
+ { TUNER_LG_PAL_NEW_TAPC, "TCL 2002MI 3"},
+ { TUNER_TCL_2002N, "TCL 2002N 6A"},
+ { TUNER_PHILIPS_FM1236_MK3, "Philips FQ1236 MK3"},
+ { TUNER_SAMSUNG_TCPN_2121P30A, "Samsung TCPN 2121P30A"},
+ { TUNER_ABSENT, "Samsung TCPE 4121P30A"},
+ { TUNER_PHILIPS_FM1216ME_MK3, "TCL MFPE05 2"},
/* 90-99 */
- { TUNER_ABSENT, "LG TALN H202T"},
- { TUNER_PHILIPS_FQ1216AME_MK4, "Philips FQ1216AME MK4"},
- { TUNER_PHILIPS_FQ1236A_MK4, "Philips FQ1236A MK4"},
- { TUNER_ABSENT, "Philips FQ1286A MK4"},
- { TUNER_ABSENT, "Philips FQ1216ME MK5"},
- { TUNER_ABSENT, "Philips FQ1236 MK5"},
- { TUNER_SAMSUNG_TCPG_6121P30A, "Samsung TCPG 6121P30A"},
- { TUNER_TCL_2002MB, "TCL 2002MB_3H"},
- { TUNER_ABSENT, "TCL 2002MI_3H"},
- { TUNER_TCL_2002N, "TCL 2002N 5H"},
+ { TUNER_ABSENT, "LG TALN H202T"},
+ { TUNER_PHILIPS_FQ1216AME_MK4, "Philips FQ1216AME MK4"},
+ { TUNER_PHILIPS_FQ1236A_MK4, "Philips FQ1236A MK4"},
+ { TUNER_ABSENT, "Philips FQ1286A MK4"},
+ { TUNER_ABSENT, "Philips FQ1216ME MK5"},
+ { TUNER_ABSENT, "Philips FQ1236 MK5"},
+ { TUNER_SAMSUNG_TCPG_6121P30A, "Samsung TCPG 6121P30A"},
+ { TUNER_TCL_2002MB, "TCL 2002MB_3H"},
+ { TUNER_ABSENT, "TCL 2002MI_3H"},
+ { TUNER_TCL_2002N, "TCL 2002N 5H"},
/* 100-109 */
- { TUNER_PHILIPS_FMD1216ME_MK3, "Philips FMD1216ME"},
- { TUNER_TEA5767, "Philips TEA5768HL FM Radio"},
- { TUNER_ABSENT, "Panasonic ENV57H12D5"},
- { TUNER_PHILIPS_FM1236_MK3, "TCL MFNM05-4"},
+ { TUNER_PHILIPS_FMD1216ME_MK3, "Philips FMD1216ME"},
+ { TUNER_TEA5767, "Philips TEA5768HL FM Radio"},
+ { TUNER_ABSENT, "Panasonic ENV57H12D5"},
+ { TUNER_PHILIPS_FM1236_MK3, "TCL MFNM05-4"},
{ TUNER_PHILIPS_FM1236_MK3, "TCL MNM05-4"},
- { TUNER_PHILIPS_FM1216ME_MK3, "TCL MPE05-2"},
- { TUNER_ABSENT, "TCL MQNM05-4"},
- { TUNER_ABSENT, "LG TAPC-W701D"},
- { TUNER_ABSENT, "TCL 9886P-WM"},
- { TUNER_ABSENT, "TCL 1676NM-WM"},
+ { TUNER_PHILIPS_FM1216ME_MK3, "TCL MPE05-2"},
+ { TUNER_ABSENT, "TCL MQNM05-4"},
+ { TUNER_ABSENT, "LG TAPC-W701D"},
+ { TUNER_ABSENT, "TCL 9886P-WM"},
+ { TUNER_ABSENT, "TCL 1676NM-WM"},
/* 110-119 */
- { TUNER_ABSENT, "Thompson DTT75105"},
- { TUNER_ABSENT, "Conexant_CX24109"},
- { TUNER_TCL_2002N, "TCL M2523_5N_E"},
- { TUNER_TCL_2002MB, "TCL M2523_3DB_E"},
- { TUNER_ABSENT, "Philips 8275A"},
- { TUNER_ABSENT, "Microtune MT2060"},
- { TUNER_PHILIPS_FM1236_MK3, "Philips FM1236 MK5"},
- { TUNER_PHILIPS_FM1216ME_MK3, "Philips FM1216ME MK5"},
- { TUNER_ABSENT, "TCL M2523_3DI_E"},
- { TUNER_ABSENT, "Samsung THPD5222FG30A"},
+ { TUNER_ABSENT, "Thompson DTT75105"},
+ { TUNER_ABSENT, "Conexant_CX24109"},
+ { TUNER_TCL_2002N, "TCL M2523_5N_E"},
+ { TUNER_TCL_2002MB, "TCL M2523_3DB_E"},
+ { TUNER_ABSENT, "Philips 8275A"},
+ { TUNER_ABSENT, "Microtune MT2060"},
+ { TUNER_PHILIPS_FM1236_MK3, "Philips FM1236 MK5"},
+ { TUNER_PHILIPS_FM1216ME_MK3, "Philips FM1216ME MK5"},
+ { TUNER_ABSENT, "TCL M2523_3DI_E"},
+ { TUNER_ABSENT, "Samsung THPD5222FG30A"},
/* 120-129 */
- { TUNER_XC2028, "Xceive XC3028"},
+ { TUNER_XC2028, "Xceive XC3028"},
{ TUNER_PHILIPS_FQ1216LME_MK3, "Philips FQ1216LME MK5"},
- { TUNER_ABSENT, "Philips FQD1216LME"},
- { TUNER_ABSENT, "Conexant CX24118A"},
- { TUNER_ABSENT, "TCL DMF11WIP"},
- { TUNER_ABSENT, "TCL MFNM05_4H_E"},
- { TUNER_ABSENT, "TCL MNM05_4H_E"},
- { TUNER_ABSENT, "TCL MPE05_2H_E"},
- { TUNER_ABSENT, "TCL MQNM05_4_U"},
- { TUNER_ABSENT, "TCL M2523_5NH_E"},
+ { TUNER_ABSENT, "Philips FQD1216LME"},
+ { TUNER_ABSENT, "Conexant CX24118A"},
+ { TUNER_ABSENT, "TCL DMF11WIP"},
+ { TUNER_ABSENT, "TCL MFNM05_4H_E"},
+ { TUNER_ABSENT, "TCL MNM05_4H_E"},
+ { TUNER_ABSENT, "TCL MPE05_2H_E"},
+ { TUNER_ABSENT, "TCL MQNM05_4_U"},
+ { TUNER_ABSENT, "TCL M2523_5NH_E"},
/* 130-139 */
- { TUNER_ABSENT, "TCL M2523_3DBH_E"},
- { TUNER_ABSENT, "TCL M2523_3DIH_E"},
- { TUNER_ABSENT, "TCL MFPE05_2_U"},
+ { TUNER_ABSENT, "TCL M2523_3DBH_E"},
+ { TUNER_ABSENT, "TCL M2523_3DIH_E"},
+ { TUNER_ABSENT, "TCL MFPE05_2_U"},
{ TUNER_PHILIPS_FMD1216MEX_MK3, "Philips FMD1216MEX"},
- { TUNER_ABSENT, "Philips FRH2036B"},
- { TUNER_ABSENT, "Panasonic ENGF75_01GF"},
- { TUNER_ABSENT, "MaxLinear MXL5005"},
- { TUNER_ABSENT, "MaxLinear MXL5003"},
- { TUNER_ABSENT, "Xceive XC2028"},
- { TUNER_ABSENT, "Microtune MT2131"},
+ { TUNER_ABSENT, "Philips FRH2036B"},
+ { TUNER_ABSENT, "Panasonic ENGF75_01GF"},
+ { TUNER_ABSENT, "MaxLinear MXL5005"},
+ { TUNER_ABSENT, "MaxLinear MXL5003"},
+ { TUNER_ABSENT, "Xceive XC2028"},
+ { TUNER_ABSENT, "Microtune MT2131"},
/* 140-149 */
- { TUNER_ABSENT, "Philips 8275A_8295"},
- { TUNER_ABSENT, "TCL MF02GIP_5N_E"},
- { TUNER_ABSENT, "TCL MF02GIP_3DB_E"},
- { TUNER_ABSENT, "TCL MF02GIP_3DI_E"},
- { TUNER_ABSENT, "Microtune MT2266"},
- { TUNER_ABSENT, "TCL MF10WPP_4N_E"},
- { TUNER_ABSENT, "LG TAPQ_H702F"},
- { TUNER_ABSENT, "TCL M09WPP_4N_E"},
- { TUNER_ABSENT, "MaxLinear MXL5005_v2"},
- { TUNER_PHILIPS_TDA8290, "Philips 18271_8295"},
+ { TUNER_ABSENT, "Philips 8275A_8295"},
+ { TUNER_ABSENT, "TCL MF02GIP_5N_E"},
+ { TUNER_ABSENT, "TCL MF02GIP_3DB_E"},
+ { TUNER_ABSENT, "TCL MF02GIP_3DI_E"},
+ { TUNER_ABSENT, "Microtune MT2266"},
+ { TUNER_ABSENT, "TCL MF10WPP_4N_E"},
+ { TUNER_ABSENT, "LG TAPQ_H702F"},
+ { TUNER_ABSENT, "TCL M09WPP_4N_E"},
+ { TUNER_ABSENT, "MaxLinear MXL5005_v2"},
+ { TUNER_PHILIPS_TDA8290, "Philips 18271_8295"},
/* 150-159 */
{ TUNER_XC5000, "Xceive XC5000"},
{ TUNER_ABSENT, "Xceive XC3028L"},
@@ -784,9 +784,3 @@ int tveeprom_read(struct i2c_client *c, unsigned char *eedata, int len)
return 0;
}
EXPORT_SYMBOL(tveeprom_read);
-
-/*
- * Local variables:
- * c-basic-offset: 8
- * End:
- */
diff --git a/drivers/media/dvb-core/dvb-usb-ids.h b/drivers/media/dvb-core/dvb-usb-ids.h
index 388c2eb4d747..399e1042d351 100644
--- a/drivers/media/dvb-core/dvb-usb-ids.h
+++ b/drivers/media/dvb-core/dvb-usb-ids.h
@@ -172,6 +172,7 @@
#define USB_PID_TWINHAN_VP7045_WARM 0x3206
#define USB_PID_TWINHAN_VP7021_COLD 0x3207
#define USB_PID_TWINHAN_VP7021_WARM 0x3208
+#define USB_PID_TWINHAN_VP7049 0x3219
#define USB_PID_TINYTWIN 0x3226
#define USB_PID_TINYTWIN_2 0xe402
#define USB_PID_TINYTWIN_3 0x9016
@@ -233,10 +234,15 @@
#define USB_PID_AVERMEDIA_A815M 0x815a
#define USB_PID_AVERMEDIA_A835 0xa835
#define USB_PID_AVERMEDIA_B835 0xb835
+#define USB_PID_AVERMEDIA_A835B_1835 0x1835
+#define USB_PID_AVERMEDIA_A835B_2835 0x2835
+#define USB_PID_AVERMEDIA_A835B_3835 0x3835
+#define USB_PID_AVERMEDIA_A835B_4835 0x4835
#define USB_PID_AVERMEDIA_1867 0x1867
#define USB_PID_AVERMEDIA_A867 0xa867
#define USB_PID_AVERMEDIA_TWINSTAR 0x0825
#define USB_PID_TECHNOTREND_CONNECT_S2400 0x3006
+#define USB_PID_TECHNOTREND_CONNECT_S2400_8KEEPROM 0x3009
#define USB_PID_TECHNOTREND_CONNECT_CT3650 0x300d
#define USB_PID_TERRATEC_CINERGY_DT_XS_DIVERSITY 0x005a
#define USB_PID_TERRATEC_CINERGY_DT_XS_DIVERSITY_2 0x0081
diff --git a/drivers/media/dvb-core/dvb_ca_en50221.c b/drivers/media/dvb-core/dvb_ca_en50221.c
index 9be65a3b931f..0aac3096728e 100644
--- a/drivers/media/dvb-core/dvb_ca_en50221.c
+++ b/drivers/media/dvb-core/dvb_ca_en50221.c
@@ -156,6 +156,9 @@ struct dvb_ca_private {
/* Slot to start looking for data to read from in the next user-space read operation */
int next_read_slot;
+
+ /* mutex serializing ioctls */
+ struct mutex ioctl_mutex;
};
static void dvb_ca_en50221_thread_wakeup(struct dvb_ca_private *ca);
@@ -1191,6 +1194,9 @@ static int dvb_ca_en50221_io_do_ioctl(struct file *file,
dprintk("%s\n", __func__);
+ if (mutex_lock_interruptible(&ca->ioctl_mutex))
+ return -ERESTARTSYS;
+
switch (cmd) {
case CA_RESET:
for (slot = 0; slot < ca->slot_count; slot++) {
@@ -1221,8 +1227,10 @@ static int dvb_ca_en50221_io_do_ioctl(struct file *file,
case CA_GET_SLOT_INFO: {
struct ca_slot_info *info = parg;
- if ((info->num > ca->slot_count) || (info->num < 0))
- return -EINVAL;
+ if ((info->num > ca->slot_count) || (info->num < 0)) {
+ err = -EINVAL;
+ goto out_unlock;
+ }
info->type = CA_CI_LINK;
info->flags = 0;
@@ -1241,6 +1249,8 @@ static int dvb_ca_en50221_io_do_ioctl(struct file *file,
break;
}
+out_unlock:
+ mutex_unlock(&ca->ioctl_mutex);
return err;
}
@@ -1695,6 +1705,8 @@ int dvb_ca_en50221_init(struct dvb_adapter *dvb_adapter,
mutex_init(&ca->slot_info[i].slot_lock);
}
+ mutex_init(&ca->ioctl_mutex);
+
if (signal_pending(current)) {
ret = -EINTR;
goto error;
diff --git a/drivers/media/dvb-core/dvb_frontend.c b/drivers/media/dvb-core/dvb_frontend.c
index 0223ad255cb4..6e50a7581568 100644
--- a/drivers/media/dvb-core/dvb_frontend.c
+++ b/drivers/media/dvb-core/dvb_frontend.c
@@ -603,6 +603,7 @@ static int dvb_frontend_thread(void *data)
enum dvbfe_algo algo;
bool re_tune = false;
+ bool semheld = false;
dev_dbg(fe->dvb->device, "%s:\n", __func__);
@@ -626,6 +627,8 @@ restart:
if (kthread_should_stop() || dvb_frontend_is_exiting(fe)) {
/* got signal or quitting */
+ if (!down_interruptible(&fepriv->sem))
+ semheld = true;
fepriv->exit = DVB_FE_NORMAL_EXIT;
break;
}
@@ -741,6 +744,8 @@ restart:
fepriv->exit = DVB_FE_NO_EXIT;
mb();
+ if (semheld)
+ up(&fepriv->sem);
dvb_frontend_wakeup(fe);
return 0;
}
@@ -1048,6 +1053,16 @@ static struct dtv_cmds_h dtv_cmds[DTV_MAX_COMMAND + 1] = {
_DTV_CMD(DTV_ATSCMH_SCCC_CODE_MODE_B, 0, 0),
_DTV_CMD(DTV_ATSCMH_SCCC_CODE_MODE_C, 0, 0),
_DTV_CMD(DTV_ATSCMH_SCCC_CODE_MODE_D, 0, 0),
+
+ /* Statistics API */
+ _DTV_CMD(DTV_STAT_SIGNAL_STRENGTH, 0, 0),
+ _DTV_CMD(DTV_STAT_CNR, 0, 0),
+ _DTV_CMD(DTV_STAT_PRE_ERROR_BIT_COUNT, 0, 0),
+ _DTV_CMD(DTV_STAT_PRE_TOTAL_BIT_COUNT, 0, 0),
+ _DTV_CMD(DTV_STAT_POST_ERROR_BIT_COUNT, 0, 0),
+ _DTV_CMD(DTV_STAT_POST_TOTAL_BIT_COUNT, 0, 0),
+ _DTV_CMD(DTV_STAT_ERROR_BLOCK_COUNT, 0, 0),
+ _DTV_CMD(DTV_STAT_TOTAL_BLOCK_COUNT, 0, 0),
};
static void dtv_property_dump(struct dvb_frontend *fe, struct dtv_property *tvp)
@@ -1438,7 +1453,35 @@ static int dtv_property_process_get(struct dvb_frontend *fe,
tvp->u.data = c->lna;
break;
+ /* Fill quality measures */
+ case DTV_STAT_SIGNAL_STRENGTH:
+ tvp->u.st = c->strength;
+ break;
+ case DTV_STAT_CNR:
+ tvp->u.st = c->cnr;
+ break;
+ case DTV_STAT_PRE_ERROR_BIT_COUNT:
+ tvp->u.st = c->pre_bit_error;
+ break;
+ case DTV_STAT_PRE_TOTAL_BIT_COUNT:
+ tvp->u.st = c->pre_bit_count;
+ break;
+ case DTV_STAT_POST_ERROR_BIT_COUNT:
+ tvp->u.st = c->post_bit_error;
+ break;
+ case DTV_STAT_POST_TOTAL_BIT_COUNT:
+ tvp->u.st = c->post_bit_count;
+ break;
+ case DTV_STAT_ERROR_BLOCK_COUNT:
+ tvp->u.st = c->block_error;
+ break;
+ case DTV_STAT_TOTAL_BLOCK_COUNT:
+ tvp->u.st = c->block_count;
+ break;
default:
+ dev_dbg(fe->dvb->device,
+ "%s: FE property %d doesn't exist\n",
+ __func__, tvp->cmd);
return -EINVAL;
}
@@ -1823,16 +1866,20 @@ static int dvb_frontend_ioctl(struct file *file,
int err = -EOPNOTSUPP;
dev_dbg(fe->dvb->device, "%s: (%d)\n", __func__, _IOC_NR(cmd));
- if (fepriv->exit != DVB_FE_NO_EXIT)
+ if (down_interruptible(&fepriv->sem))
+ return -ERESTARTSYS;
+
+ if (fepriv->exit != DVB_FE_NO_EXIT) {
+ up(&fepriv->sem);
return -ENODEV;
+ }
if ((file->f_flags & O_ACCMODE) == O_RDONLY &&
(_IOC_DIR(cmd) != _IOC_READ || cmd == FE_GET_EVENT ||
- cmd == FE_DISEQC_RECV_SLAVE_REPLY))
+ cmd == FE_DISEQC_RECV_SLAVE_REPLY)) {
+ up(&fepriv->sem);
return -EPERM;
-
- if (down_interruptible (&fepriv->sem))
- return -ERESTARTSYS;
+ }
if ((cmd == FE_SET_PROPERTY) || (cmd == FE_GET_PROPERTY))
err = dvb_frontend_ioctl_properties(file, cmd, parg);
@@ -2246,7 +2293,7 @@ static int dvb_frontend_ioctl_legacy(struct file *file,
printk("%s switch command: 0x%04lx\n", __func__, swcmd);
do_gettimeofday(&nexttime);
if (dvb_frontend_debug)
- memcpy(&tv[0], &nexttime, sizeof(struct timeval));
+ tv[0] = nexttime;
/* before sending a command, initialize by sending
* a 32ms 18V to the switch
*/
diff --git a/drivers/media/dvb-core/dvb_frontend.h b/drivers/media/dvb-core/dvb_frontend.h
index 97112cd88a17..b34922a08156 100644
--- a/drivers/media/dvb-core/dvb_frontend.h
+++ b/drivers/media/dvb-core/dvb_frontend.h
@@ -393,6 +393,16 @@ struct dtv_frontend_properties {
u8 atscmh_sccc_code_mode_d;
u32 lna;
+
+ /* statistics data */
+ struct dtv_fe_stats strength;
+ struct dtv_fe_stats cnr;
+ struct dtv_fe_stats pre_bit_error;
+ struct dtv_fe_stats pre_bit_count;
+ struct dtv_fe_stats post_bit_error;
+ struct dtv_fe_stats post_bit_count;
+ struct dtv_fe_stats block_error;
+ struct dtv_fe_stats block_count;
};
struct dvb_frontend {
diff --git a/drivers/media/dvb-core/dvb_net.c b/drivers/media/dvb-core/dvb_net.c
index c2117688aa23..44225b186f6d 100644
--- a/drivers/media/dvb-core/dvb_net.c
+++ b/drivers/media/dvb-core/dvb_net.c
@@ -1345,26 +1345,35 @@ static int dvb_net_do_ioctl(struct file *file,
{
struct dvb_device *dvbdev = file->private_data;
struct dvb_net *dvbnet = dvbdev->priv;
+ int ret = 0;
if (((file->f_flags&O_ACCMODE)==O_RDONLY))
return -EPERM;
+ if (mutex_lock_interruptible(&dvbnet->ioctl_mutex))
+ return -ERESTARTSYS;
+
switch (cmd) {
case NET_ADD_IF:
{
struct dvb_net_if *dvbnetif = parg;
int result;
- if (!capable(CAP_SYS_ADMIN))
- return -EPERM;
+ if (!capable(CAP_SYS_ADMIN)) {
+ ret = -EPERM;
+ goto ioctl_error;
+ }
- if (!try_module_get(dvbdev->adapter->module))
- return -EPERM;
+ if (!try_module_get(dvbdev->adapter->module)) {
+ ret = -EPERM;
+ goto ioctl_error;
+ }
result=dvb_net_add_if(dvbnet, dvbnetif->pid, dvbnetif->feedtype);
if (result<0) {
module_put(dvbdev->adapter->module);
- return result;
+ ret = result;
+ goto ioctl_error;
}
dvbnetif->if_num=result;
break;
@@ -1376,8 +1385,10 @@ static int dvb_net_do_ioctl(struct file *file,
struct dvb_net_if *dvbnetif = parg;
if (dvbnetif->if_num >= DVB_NET_DEVICES_MAX ||
- !dvbnet->state[dvbnetif->if_num])
- return -EINVAL;
+ !dvbnet->state[dvbnetif->if_num]) {
+ ret = -EINVAL;
+ goto ioctl_error;
+ }
netdev = dvbnet->device[dvbnetif->if_num];
@@ -1388,16 +1399,18 @@ static int dvb_net_do_ioctl(struct file *file,
}
case NET_REMOVE_IF:
{
- int ret;
-
- if (!capable(CAP_SYS_ADMIN))
- return -EPERM;
- if ((unsigned long) parg >= DVB_NET_DEVICES_MAX)
- return -EINVAL;
+ if (!capable(CAP_SYS_ADMIN)) {
+ ret = -EPERM;
+ goto ioctl_error;
+ }
+ if ((unsigned long) parg >= DVB_NET_DEVICES_MAX) {
+ ret = -EINVAL;
+ goto ioctl_error;
+ }
ret = dvb_net_remove_if(dvbnet, (unsigned long) parg);
if (!ret)
module_put(dvbdev->adapter->module);
- return ret;
+ break;
}
/* binary compatibility cruft */
@@ -1406,16 +1419,21 @@ static int dvb_net_do_ioctl(struct file *file,
struct __dvb_net_if_old *dvbnetif = parg;
int result;
- if (!capable(CAP_SYS_ADMIN))
- return -EPERM;
+ if (!capable(CAP_SYS_ADMIN)) {
+ ret = -EPERM;
+ goto ioctl_error;
+ }
- if (!try_module_get(dvbdev->adapter->module))
- return -EPERM;
+ if (!try_module_get(dvbdev->adapter->module)) {
+ ret = -EPERM;
+ goto ioctl_error;
+ }
result=dvb_net_add_if(dvbnet, dvbnetif->pid, DVB_NET_FEEDTYPE_MPE);
if (result<0) {
module_put(dvbdev->adapter->module);
- return result;
+ ret = result;
+ goto ioctl_error;
}
dvbnetif->if_num=result;
break;
@@ -1427,8 +1445,10 @@ static int dvb_net_do_ioctl(struct file *file,
struct __dvb_net_if_old *dvbnetif = parg;
if (dvbnetif->if_num >= DVB_NET_DEVICES_MAX ||
- !dvbnet->state[dvbnetif->if_num])
- return -EINVAL;
+ !dvbnet->state[dvbnetif->if_num]) {
+ ret = -EINVAL;
+ goto ioctl_error;
+ }
netdev = dvbnet->device[dvbnetif->if_num];
@@ -1437,9 +1457,13 @@ static int dvb_net_do_ioctl(struct file *file,
break;
}
default:
- return -ENOTTY;
+ ret = -ENOTTY;
+ break;
}
- return 0;
+
+ioctl_error:
+ mutex_unlock(&dvbnet->ioctl_mutex);
+ return ret;
}
static long dvb_net_ioctl(struct file *file,
@@ -1505,6 +1529,7 @@ int dvb_net_init (struct dvb_adapter *adap, struct dvb_net *dvbnet,
{
int i;
+ mutex_init(&dvbnet->ioctl_mutex);
dvbnet->demux = dmx;
for (i=0; i<DVB_NET_DEVICES_MAX; i++)
diff --git a/drivers/media/dvb-core/dvb_net.h b/drivers/media/dvb-core/dvb_net.h
index 1e53acd50cf4..ede78e8c8aa8 100644
--- a/drivers/media/dvb-core/dvb_net.h
+++ b/drivers/media/dvb-core/dvb_net.h
@@ -40,6 +40,7 @@ struct dvb_net {
int state[DVB_NET_DEVICES_MAX];
unsigned int exit:1;
struct dmx_demux *demux;
+ struct mutex ioctl_mutex;
};
void dvb_net_release(struct dvb_net *);
diff --git a/drivers/media/dvb-core/dvbdev.c b/drivers/media/dvb-core/dvbdev.c
index d33101aaf0b5..401ef64f92c6 100644
--- a/drivers/media/dvb-core/dvbdev.c
+++ b/drivers/media/dvb-core/dvbdev.c
@@ -418,10 +418,8 @@ int dvb_usercopy(struct file *file,
}
/* call driver */
- mutex_lock(&dvbdev_mutex);
if ((err = func(file, cmd, parg)) == -ENOIOCTLCMD)
err = -ENOTTY;
- mutex_unlock(&dvbdev_mutex);
if (err < 0)
goto out;
diff --git a/drivers/media/dvb-frontends/Kconfig b/drivers/media/dvb-frontends/Kconfig
index 5efec73a32d2..6f809a70c78e 100644
--- a/drivers/media/dvb-frontends/Kconfig
+++ b/drivers/media/dvb-frontends/Kconfig
@@ -207,6 +207,13 @@ config DVB_SI21XX
help
A DVB-S tuner module. Say Y when you want to support this frontend.
+config DVB_TS2020
+ tristate "Montage Tehnology TS2020 based tuners"
+ depends on DVB_CORE && I2C
+ default m if DVB_FE_CUSTOMISE
+ help
+ A DVB-S/S2 silicon tuner. Say Y when you want to support this tuner.
+
config DVB_DS3000
tristate "Montage Tehnology DS3000 based"
depends on DVB_CORE && I2C
diff --git a/drivers/media/dvb-frontends/Makefile b/drivers/media/dvb-frontends/Makefile
index 7eb73bbd2e26..cebc0faffab5 100644
--- a/drivers/media/dvb-frontends/Makefile
+++ b/drivers/media/dvb-frontends/Makefile
@@ -88,6 +88,7 @@ obj-$(CONFIG_DVB_ISL6423) += isl6423.o
obj-$(CONFIG_DVB_EC100) += ec100.o
obj-$(CONFIG_DVB_HD29L2) += hd29l2.o
obj-$(CONFIG_DVB_DS3000) += ds3000.o
+obj-$(CONFIG_DVB_TS2020) += ts2020.o
obj-$(CONFIG_DVB_MB86A16) += mb86a16.o
obj-$(CONFIG_DVB_MB86A20S) += mb86a20s.o
obj-$(CONFIG_DVB_IX2505V) += ix2505v.o
diff --git a/drivers/media/dvb-frontends/af9033.c b/drivers/media/dvb-frontends/af9033.c
index 464ad878490b..c9cad989b8b9 100644
--- a/drivers/media/dvb-frontends/af9033.c
+++ b/drivers/media/dvb-frontends/af9033.c
@@ -318,6 +318,10 @@ static int af9033_init(struct dvb_frontend *fe)
len = ARRAY_SIZE(tuner_init_fc2580);
init = tuner_init_fc2580;
break;
+ case AF9033_TUNER_FC0012:
+ len = ARRAY_SIZE(tuner_init_fc0012);
+ init = tuner_init_fc0012;
+ break;
default:
dev_dbg(&state->i2c->dev, "%s: unsupported tuner ID=%d\n",
__func__, state->cfg.tuner);
@@ -331,6 +335,20 @@ static int af9033_init(struct dvb_frontend *fe)
goto err;
}
+ if (state->cfg.ts_mode == AF9033_TS_MODE_SERIAL) {
+ ret = af9033_wr_reg_mask(state, 0x00d91c, 0x01, 0x01);
+ if (ret < 0)
+ goto err;
+
+ ret = af9033_wr_reg_mask(state, 0x00d917, 0x00, 0x01);
+ if (ret < 0)
+ goto err;
+
+ ret = af9033_wr_reg_mask(state, 0x00d916, 0x00, 0x01);
+ if (ret < 0)
+ goto err;
+ }
+
state->bandwidth_hz = 0; /* force to program all parameters */
return 0;
diff --git a/drivers/media/dvb-frontends/af9033.h b/drivers/media/dvb-frontends/af9033.h
index bfa4313fde21..82bd8c1513b6 100644
--- a/drivers/media/dvb-frontends/af9033.h
+++ b/drivers/media/dvb-frontends/af9033.h
@@ -40,6 +40,7 @@ struct af9033_config {
*/
#define AF9033_TUNER_TUA9001 0x27 /* Infineon TUA 9001 */
#define AF9033_TUNER_FC0011 0x28 /* Fitipower FC0011 */
+#define AF9033_TUNER_FC0012 0x2e /* Fitipower FC0012 */
#define AF9033_TUNER_MXL5007T 0xa0 /* MaxLinear MxL5007T */
#define AF9033_TUNER_TDA18218 0xa1 /* NXP TDA 18218HN */
#define AF9033_TUNER_FC2580 0x32 /* FCI FC2580 */
diff --git a/drivers/media/dvb-frontends/af9033_priv.h b/drivers/media/dvb-frontends/af9033_priv.h
index 34dddcd77538..e9bd78265543 100644
--- a/drivers/media/dvb-frontends/af9033_priv.h
+++ b/drivers/media/dvb-frontends/af9033_priv.h
@@ -199,10 +199,9 @@ static const struct reg_val ofsm_init[] = {
{ 0x8000a6, 0x01 },
{ 0x8000a9, 0x00 },
{ 0x8000aa, 0x01 },
- { 0x8000ab, 0x01 },
{ 0x8000b0, 0x01 },
- { 0x8000c0, 0x05 },
- { 0x8000c4, 0x19 },
+ { 0x8000c4, 0x05 },
+ { 0x8000c8, 0x19 },
{ 0x80f000, 0x0f },
{ 0x80f016, 0x10 },
{ 0x80f017, 0x04 },
@@ -322,8 +321,9 @@ static const struct reg_val tuner_init_tua9001[] = {
{ 0x80009b, 0x05 },
{ 0x80009c, 0x80 },
{ 0x8000b3, 0x00 },
- { 0x8000c1, 0x01 },
- { 0x8000c2, 0x00 },
+ { 0x8000c5, 0x01 },
+ { 0x8000c6, 0x00 },
+ { 0x8000c9, 0x5d },
{ 0x80f007, 0x00 },
{ 0x80f01f, 0x82 },
{ 0x80f020, 0x00 },
@@ -339,14 +339,14 @@ static const struct reg_val tuner_init_tua9001[] = {
/* Fitipower fc0011 tuner init
AF9033_TUNER_FC0011 = 0x28 */
static const struct reg_val tuner_init_fc0011[] = {
- { 0x800046, AF9033_TUNER_FC0011 },
+ { 0x800046, 0x28 },
{ 0x800057, 0x00 },
{ 0x800058, 0x01 },
{ 0x80005f, 0x00 },
{ 0x800060, 0x00 },
{ 0x800068, 0xa5 },
{ 0x80006e, 0x01 },
- { 0x800071, 0x0A },
+ { 0x800071, 0x0a },
{ 0x800072, 0x02 },
{ 0x800074, 0x01 },
{ 0x800079, 0x01 },
@@ -354,7 +354,7 @@ static const struct reg_val tuner_init_fc0011[] = {
{ 0x800094, 0x00 },
{ 0x800095, 0x00 },
{ 0x800096, 0x00 },
- { 0x80009b, 0x2D },
+ { 0x80009b, 0x2d },
{ 0x80009c, 0x60 },
{ 0x80009d, 0x23 },
{ 0x8000a4, 0x50 },
@@ -362,39 +362,82 @@ static const struct reg_val tuner_init_fc0011[] = {
{ 0x8000b3, 0x01 },
{ 0x8000b7, 0x88 },
{ 0x8000b8, 0xa6 },
- { 0x8000c3, 0x01 },
- { 0x8000c4, 0x01 },
- { 0x8000c7, 0x69 },
- { 0x80F007, 0x00 },
- { 0x80F00A, 0x1B },
- { 0x80F00B, 0x1B },
- { 0x80F00C, 0x1B },
- { 0x80F00D, 0x1B },
- { 0x80F00E, 0xFF },
- { 0x80F00F, 0x01 },
- { 0x80F010, 0x00 },
- { 0x80F011, 0x02 },
- { 0x80F012, 0xFF },
- { 0x80F013, 0x01 },
- { 0x80F014, 0x00 },
- { 0x80F015, 0x02 },
- { 0x80F01B, 0xEF },
- { 0x80F01C, 0x01 },
- { 0x80F01D, 0x0f },
- { 0x80F01E, 0x02 },
- { 0x80F01F, 0x6E },
- { 0x80F020, 0x00 },
- { 0x80F025, 0xDE },
- { 0x80F026, 0x00 },
- { 0x80F027, 0x0A },
- { 0x80F028, 0x03 },
- { 0x80F029, 0x6E },
- { 0x80F02A, 0x00 },
- { 0x80F047, 0x00 },
- { 0x80F054, 0x00 },
- { 0x80F055, 0x00 },
- { 0x80F077, 0x01 },
- { 0x80F1E6, 0x00 },
+ { 0x8000c5, 0x01 },
+ { 0x8000c6, 0x01 },
+ { 0x8000c9, 0x69 },
+ { 0x80f007, 0x00 },
+ { 0x80f00a, 0x1b },
+ { 0x80f00b, 0x1b },
+ { 0x80f00c, 0x1b },
+ { 0x80f00d, 0x1b },
+ { 0x80f00e, 0xff },
+ { 0x80f00f, 0x01 },
+ { 0x80f010, 0x00 },
+ { 0x80f011, 0x02 },
+ { 0x80f012, 0xff },
+ { 0x80f013, 0x01 },
+ { 0x80f014, 0x00 },
+ { 0x80f015, 0x02 },
+ { 0x80f01b, 0xef },
+ { 0x80f01c, 0x01 },
+ { 0x80f01d, 0x0f },
+ { 0x80f01e, 0x02 },
+ { 0x80f01f, 0x6e },
+ { 0x80f020, 0x00 },
+ { 0x80f025, 0xde },
+ { 0x80f026, 0x00 },
+ { 0x80f027, 0x0a },
+ { 0x80f028, 0x03 },
+ { 0x80f029, 0x6e },
+ { 0x80f02a, 0x00 },
+ { 0x80f047, 0x00 },
+ { 0x80f054, 0x00 },
+ { 0x80f055, 0x00 },
+ { 0x80f077, 0x01 },
+ { 0x80f1e6, 0x00 },
+};
+
+/* Fitipower FC0012 tuner init
+ AF9033_TUNER_FC0012 = 0x2e */
+static const struct reg_val tuner_init_fc0012[] = {
+ { 0x800046, 0x2e },
+ { 0x800057, 0x00 },
+ { 0x800058, 0x01 },
+ { 0x800059, 0x01 },
+ { 0x80005f, 0x00 },
+ { 0x800060, 0x00 },
+ { 0x80006d, 0x00 },
+ { 0x800071, 0x05 },
+ { 0x800072, 0x02 },
+ { 0x800074, 0x01 },
+ { 0x800075, 0x03 },
+ { 0x800076, 0x02 },
+ { 0x800077, 0x01 },
+ { 0x800078, 0x00 },
+ { 0x800079, 0x00 },
+ { 0x80007a, 0x90 },
+ { 0x80007b, 0x90 },
+ { 0x800093, 0x00 },
+ { 0x800094, 0x01 },
+ { 0x800095, 0x02 },
+ { 0x800096, 0x01 },
+ { 0x800098, 0x0a },
+ { 0x80009b, 0x05 },
+ { 0x80009c, 0x80 },
+ { 0x8000b3, 0x00 },
+ { 0x8000c5, 0x01 },
+ { 0x8000c6, 0x00 },
+ { 0x8000c9, 0x5d },
+ { 0x80f007, 0x00 },
+ { 0x80f01f, 0xa0 },
+ { 0x80f020, 0x00 },
+ { 0x80f029, 0x82 },
+ { 0x80f02a, 0x00 },
+ { 0x80f047, 0x00 },
+ { 0x80f054, 0x00 },
+ { 0x80f055, 0x00 },
+ { 0x80f077, 0x01 },
+ { 0x80f1e6, 0x00 },
};
/* MaxLinear MxL5007T tuner init
@@ -482,11 +525,12 @@ static const struct reg_val tuner_init_fc2580[] = {
{ 0x800095, 0x00 },
{ 0x800096, 0x05 },
{ 0x8000b3, 0x01 },
- { 0x8000c3, 0x01 },
- { 0x8000c4, 0x00 },
+ { 0x8000c5, 0x01 },
+ { 0x8000c6, 0x00 },
+ { 0x8000d1, 0x01 },
{ 0x80f007, 0x00 },
{ 0x80f00c, 0x19 },
- { 0x80f00d, 0x1A },
+ { 0x80f00d, 0x1a },
{ 0x80f00e, 0x00 },
{ 0x80f00f, 0x02 },
{ 0x80f010, 0x00 },
diff --git a/drivers/media/dvb-frontends/bcm3510.h b/drivers/media/dvb-frontends/bcm3510.h
index f4575c0cc446..5bd56b1623bf 100644
--- a/drivers/media/dvb-frontends/bcm3510.h
+++ b/drivers/media/dvb-frontends/bcm3510.h
@@ -34,7 +34,7 @@ struct bcm3510_config
int (*request_firmware)(struct dvb_frontend* fe, const struct firmware **fw, char* name);
};
-#if defined(CONFIG_DVB_BCM3510) || (defined(CONFIG_DVB_BCM3510_MODULE) && defined(MODULE))
+#if IS_ENABLED(CONFIG_DVB_BCM3510)
extern struct dvb_frontend* bcm3510_attach(const struct bcm3510_config* config,
struct i2c_adapter* i2c);
#else
diff --git a/drivers/media/dvb-frontends/cx22700.h b/drivers/media/dvb-frontends/cx22700.h
index 4757a930ca05..382a7b1f3618 100644
--- a/drivers/media/dvb-frontends/cx22700.h
+++ b/drivers/media/dvb-frontends/cx22700.h
@@ -31,7 +31,7 @@ struct cx22700_config
u8 demod_address;
};
-#if defined(CONFIG_DVB_CX22700) || (defined(CONFIG_DVB_CX22700_MODULE) && defined(MODULE))
+#if IS_ENABLED(CONFIG_DVB_CX22700)
extern struct dvb_frontend* cx22700_attach(const struct cx22700_config* config,
struct i2c_adapter* i2c);
#else
diff --git a/drivers/media/dvb-frontends/cx24110.h b/drivers/media/dvb-frontends/cx24110.h
index fdcceee91f3a..527aff1f2723 100644
--- a/drivers/media/dvb-frontends/cx24110.h
+++ b/drivers/media/dvb-frontends/cx24110.h
@@ -46,7 +46,7 @@ static inline int cx24110_pll_write(struct dvb_frontend *fe, u32 val)
return 0;
}
-#if defined(CONFIG_DVB_CX24110) || (defined(CONFIG_DVB_CX24110_MODULE) && defined(MODULE))
+#if IS_ENABLED(CONFIG_DVB_CX24110)
extern struct dvb_frontend* cx24110_attach(const struct cx24110_config* config,
struct i2c_adapter* i2c);
#else
diff --git a/drivers/media/dvb-frontends/cx24116.c b/drivers/media/dvb-frontends/cx24116.c
index b48879186537..2916d7c74a1d 100644
--- a/drivers/media/dvb-frontends/cx24116.c
+++ b/drivers/media/dvb-frontends/cx24116.c
@@ -819,7 +819,7 @@ static int cx24116_read_ucblocks(struct dvb_frontend *fe, u32 *ucblocks)
static void cx24116_clone_params(struct dvb_frontend *fe)
{
struct cx24116_state *state = fe->demodulator_priv;
- memcpy(&state->dcur, &state->dnxt, sizeof(state->dcur));
+ state->dcur = state->dnxt;
}
/* Wait for LNB */
diff --git a/drivers/media/dvb-frontends/dib0070.h b/drivers/media/dvb-frontends/dib0070.h
index 45c31fae3967..0c6befcc9143 100644
--- a/drivers/media/dvb-frontends/dib0070.h
+++ b/drivers/media/dvb-frontends/dib0070.h
@@ -48,7 +48,7 @@ struct dib0070_config {
u8 vga_filter;
};
-#if defined(CONFIG_DVB_TUNER_DIB0070) || (defined(CONFIG_DVB_TUNER_DIB0070_MODULE) && defined(MODULE))
+#if IS_ENABLED(CONFIG_DVB_TUNER_DIB0070)
extern struct dvb_frontend *dib0070_attach(struct dvb_frontend *fe, struct i2c_adapter *i2c, struct dib0070_config *cfg);
extern u16 dib0070_wbd_offset(struct dvb_frontend *);
extern void dib0070_ctrl_agc_filter(struct dvb_frontend *, u8 open);
diff --git a/drivers/media/dvb-frontends/dib0090.h b/drivers/media/dvb-frontends/dib0090.h
index 781dc49de45b..6a090954fa10 100644
--- a/drivers/media/dvb-frontends/dib0090.h
+++ b/drivers/media/dvb-frontends/dib0090.h
@@ -75,7 +75,7 @@ struct dib0090_config {
u8 force_crystal_mode;
};
-#if defined(CONFIG_DVB_TUNER_DIB0090) || (defined(CONFIG_DVB_TUNER_DIB0090_MODULE) && defined(MODULE))
+#if IS_ENABLED(CONFIG_DVB_TUNER_DIB0090)
extern struct dvb_frontend *dib0090_register(struct dvb_frontend *fe, struct i2c_adapter *i2c, const struct dib0090_config *config);
extern struct dvb_frontend *dib0090_fw_register(struct dvb_frontend *fe, struct i2c_adapter *i2c, const struct dib0090_config *config);
extern void dib0090_dcc_freq(struct dvb_frontend *fe, u8 fast);
diff --git a/drivers/media/dvb-frontends/dib3000.h b/drivers/media/dvb-frontends/dib3000.h
index 404f63a6f26b..9b6c3bbc983a 100644
--- a/drivers/media/dvb-frontends/dib3000.h
+++ b/drivers/media/dvb-frontends/dib3000.h
@@ -41,7 +41,7 @@ struct dib_fe_xfer_ops
int (*tuner_pass_ctrl)(struct dvb_frontend *fe, int onoff, u8 pll_ctrl);
};
-#if defined(CONFIG_DVB_DIB3000MB) || (defined(CONFIG_DVB_DIB3000MB_MODULE) && defined(MODULE))
+#if IS_ENABLED(CONFIG_DVB_DIB3000MB)
extern struct dvb_frontend* dib3000mb_attach(const struct dib3000_config* config,
struct i2c_adapter* i2c, struct dib_fe_xfer_ops *xfer_ops);
#else
diff --git a/drivers/media/dvb-frontends/dib8000.h b/drivers/media/dvb-frontends/dib8000.h
index 39591bb172c1..9e7a2b170d55 100644
--- a/drivers/media/dvb-frontends/dib8000.h
+++ b/drivers/media/dvb-frontends/dib8000.h
@@ -37,7 +37,7 @@ struct dib8000_config {
#define DEFAULT_DIB8000_I2C_ADDRESS 18
-#if defined(CONFIG_DVB_DIB8000) || (defined(CONFIG_DVB_DIB8000_MODULE) && defined(MODULE))
+#if IS_ENABLED(CONFIG_DVB_DIB8000)
extern struct dvb_frontend *dib8000_attach(struct i2c_adapter *i2c_adap, u8 i2c_addr, struct dib8000_config *cfg);
extern struct i2c_adapter *dib8000_get_i2c_master(struct dvb_frontend *, enum dibx000_i2c_interface, int);
diff --git a/drivers/media/dvb-frontends/dib9000.h b/drivers/media/dvb-frontends/dib9000.h
index de1cc91fd833..f3639f045ff0 100644
--- a/drivers/media/dvb-frontends/dib9000.h
+++ b/drivers/media/dvb-frontends/dib9000.h
@@ -27,7 +27,7 @@ struct dib9000_config {
#define DEFAULT_DIB9000_I2C_ADDRESS 18
-#if defined(CONFIG_DVB_DIB9000) || (defined(CONFIG_DVB_DIB9000_MODULE) && defined(MODULE))
+#if IS_ENABLED(CONFIG_DVB_DIB9000)
extern struct dvb_frontend *dib9000_attach(struct i2c_adapter *i2c_adap, u8 i2c_addr, const struct dib9000_config *cfg);
extern int dib9000_i2c_enumeration(struct i2c_adapter *host, int no_of_demods, u8 default_addr, u8 first_addr);
extern struct i2c_adapter *dib9000_get_tuner_interface(struct dvb_frontend *fe);
diff --git a/drivers/media/dvb-frontends/drxd_hard.c b/drivers/media/dvb-frontends/drxd_hard.c
index e71cc60851e7..9a2134792cfa 100644
--- a/drivers/media/dvb-frontends/drxd_hard.c
+++ b/drivers/media/dvb-frontends/drxd_hard.c
@@ -2965,7 +2965,7 @@ struct dvb_frontend *drxd_attach(const struct drxd_config *config,
return NULL;
memset(state, 0, sizeof(*state));
- memcpy(&state->ops, &drxd_ops, sizeof(struct dvb_frontend_ops));
+ state->ops = drxd_ops;
state->dev = dev;
state->config = *config;
state->i2c = i2c;
@@ -2976,10 +2976,13 @@ struct dvb_frontend *drxd_attach(const struct drxd_config *config,
if (Read16(state, 0, 0, 0) < 0)
goto error;
- memcpy(&state->frontend.ops, &drxd_ops,
- sizeof(struct dvb_frontend_ops));
+ state->frontend.ops = drxd_ops;
state->frontend.demodulator_priv = state;
ConfigureMPEGOutput(state, 0);
+ /* add few initialization to allow gate control */
+ CDRXD(state, state->config.IF ? state->config.IF : 36000000);
+ InitHI(state);
+
return &state->frontend;
error:
diff --git a/drivers/media/dvb-frontends/ds3000.c b/drivers/media/dvb-frontends/ds3000.c
index 60a529e3833f..1e344b033277 100644
--- a/drivers/media/dvb-frontends/ds3000.c
+++ b/drivers/media/dvb-frontends/ds3000.c
@@ -1,8 +1,8 @@
/*
- Montage Technology DS3000/TS2020 - DVBS/S2 Demodulator/Tuner driver
- Copyright (C) 2009 Konstantin Dimitrov <kosio.dimitrov@gmail.com>
+ Montage Technology DS3000 - DVBS/S2 Demodulator driver
+ Copyright (C) 2009-2012 Konstantin Dimitrov <kosio.dimitrov@gmail.com>
- Copyright (C) 2009 TurboSight.com
+ Copyright (C) 2009-2012 TurboSight.com
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
@@ -27,6 +27,7 @@
#include <linux/firmware.h>
#include "dvb_frontend.h"
+#include "ts2020.h"
#include "ds3000.h"
static int debug;
@@ -42,7 +43,6 @@ static int debug;
#define DS3000_DEFAULT_FIRMWARE "dvb-fe-ds3000.fw"
#define DS3000_SAMPLE_RATE 96000 /* in kHz */
-#define DS3000_XTAL_FREQ 27000 /* in kHz */
/* Register values to initialise the demod in DVB-S mode */
static u8 ds3000_dvbs_init_tab[] = {
@@ -256,22 +256,14 @@ static int ds3000_writereg(struct ds3000_state *state, int reg, int data)
return 0;
}
-static int ds3000_tuner_writereg(struct ds3000_state *state, int reg, int data)
+static int ds3000_i2c_gate_ctrl(struct dvb_frontend *fe, int enable)
{
- u8 buf[] = { reg, data };
- struct i2c_msg msg = { .addr = 0x60,
- .flags = 0, .buf = buf, .len = 2 };
- int err;
-
- dprintk("%s: write reg 0x%02x, value 0x%02x\n", __func__, reg, data);
+ struct ds3000_state *state = fe->demodulator_priv;
- ds3000_writereg(state, 0x03, 0x11);
- err = i2c_transfer(state->i2c, &msg, 1);
- if (err != 1) {
- printk("%s: writereg error(err == %i, reg == 0x%02x,"
- " value == 0x%02x)\n", __func__, err, reg, data);
- return -EREMOTEIO;
- }
+ if (enable)
+ ds3000_writereg(state, 0x03, 0x12);
+ else
+ ds3000_writereg(state, 0x03, 0x02);
return 0;
}
@@ -280,15 +272,14 @@ static int ds3000_tuner_writereg(struct ds3000_state *state, int reg, int data)
static int ds3000_writeFW(struct ds3000_state *state, int reg,
const u8 *data, u16 len)
{
- int i, ret = -EREMOTEIO;
+ int i, ret = 0;
struct i2c_msg msg;
u8 *buf;
buf = kmalloc(33, GFP_KERNEL);
if (buf == NULL) {
printk(KERN_ERR "Unable to kmalloc\n");
- ret = -ENOMEM;
- goto error;
+ return -ENOMEM;
}
*(buf) = reg;
@@ -308,8 +299,10 @@ static int ds3000_writeFW(struct ds3000_state *state, int reg,
printk(KERN_ERR "%s: write error(err == %i, "
"reg == 0x%02x\n", __func__, ret, reg);
ret = -EREMOTEIO;
+ goto error;
}
}
+ ret = 0;
error:
kfree(buf);
@@ -348,38 +341,6 @@ static int ds3000_readreg(struct ds3000_state *state, u8 reg)
return b1[0];
}
-static int ds3000_tuner_readreg(struct ds3000_state *state, u8 reg)
-{
- int ret;
- u8 b0[] = { reg };
- u8 b1[] = { 0 };
- struct i2c_msg msg[] = {
- {
- .addr = 0x60,
- .flags = 0,
- .buf = b0,
- .len = 1
- }, {
- .addr = 0x60,
- .flags = I2C_M_RD,
- .buf = b1,
- .len = 1
- }
- };
-
- ds3000_writereg(state, 0x03, 0x12);
- ret = i2c_transfer(state->i2c, msg, 2);
-
- if (ret != 2) {
- printk(KERN_ERR "%s: reg=0x%x(error=%d)\n", __func__, reg, ret);
- return ret;
- }
-
- dprintk("%s: read reg 0x%02x, value 0x%02x\n", __func__, reg, b1[0]);
-
- return b1[0];
-}
-
static int ds3000_load_firmware(struct dvb_frontend *fe,
const struct firmware *fw);
@@ -424,6 +385,7 @@ static int ds3000_load_firmware(struct dvb_frontend *fe,
const struct firmware *fw)
{
struct ds3000_state *state = fe->demodulator_priv;
+ int ret = 0;
dprintk("%s\n", __func__);
dprintk("Firmware is %zu bytes (%02x %02x .. %02x %02x)\n",
@@ -436,10 +398,10 @@ static int ds3000_load_firmware(struct dvb_frontend *fe,
/* Begin the firmware load process */
ds3000_writereg(state, 0xb2, 0x01);
/* write the entire firmware */
- ds3000_writeFW(state, 0xb0, fw->data, fw->size);
+ ret = ds3000_writeFW(state, 0xb0, fw->data, fw->size);
ds3000_writereg(state, 0xb2, 0x00);
- return 0;
+ return ret;
}
static int ds3000_set_voltage(struct dvb_frontend *fe, fe_sec_voltage_t voltage)
@@ -498,6 +460,9 @@ static int ds3000_read_status(struct dvb_frontend *fe, fe_status_t* status)
return 1;
}
+ if (state->config->set_lock_led)
+ state->config->set_lock_led(fe, *status == 0 ? 0 : 1);
+
dprintk("%s: status = 0x%02x\n", __func__, lock);
return 0;
@@ -568,33 +533,11 @@ static int ds3000_read_ber(struct dvb_frontend *fe, u32* ber)
return 0;
}
-/* read TS2020 signal strength */
static int ds3000_read_signal_strength(struct dvb_frontend *fe,
u16 *signal_strength)
{
- struct ds3000_state *state = fe->demodulator_priv;
- u16 sig_reading, sig_strength;
- u8 rfgain, bbgain;
-
- dprintk("%s()\n", __func__);
-
- rfgain = ds3000_tuner_readreg(state, 0x3d) & 0x1f;
- bbgain = ds3000_tuner_readreg(state, 0x21) & 0x1f;
-
- if (rfgain > 15)
- rfgain = 15;
- if (bbgain > 13)
- bbgain = 13;
-
- sig_reading = rfgain * 2 + bbgain * 3;
-
- sig_strength = 40 + (64 - sig_reading) * 50 / 64 ;
-
- /* cook the value to be suitable for szap-s2 human readable output */
- *signal_strength = sig_strength * 1000;
-
- dprintk("%s: raw / cooked = 0x%04x / 0x%04x\n", __func__,
- sig_reading, *signal_strength);
+ if (fe->ops.tuner_ops.get_rf_strength)
+ fe->ops.tuner_ops.get_rf_strength(fe, signal_strength);
return 0;
}
@@ -878,6 +821,10 @@ static int ds3000_diseqc_send_burst(struct dvb_frontend *fe,
static void ds3000_release(struct dvb_frontend *fe)
{
struct ds3000_state *state = fe->demodulator_priv;
+
+ if (state->config->set_lock_led)
+ state->config->set_lock_led(fe, 0);
+
dprintk("%s\n", __func__);
kfree(state);
}
@@ -952,133 +899,17 @@ static int ds3000_set_frontend(struct dvb_frontend *fe)
int i;
fe_status_t status;
- u8 mlpf, mlpf_new, mlpf_max, mlpf_min, nlpf, div4;
s32 offset_khz;
- u16 value, ndiv;
- u32 f3db;
+ u32 frequency;
+ u16 value;
dprintk("%s() ", __func__);
if (state->config->set_ts_params)
state->config->set_ts_params(fe, 0);
/* Tune */
- /* unknown */
- ds3000_tuner_writereg(state, 0x07, 0x02);
- ds3000_tuner_writereg(state, 0x10, 0x00);
- ds3000_tuner_writereg(state, 0x60, 0x79);
- ds3000_tuner_writereg(state, 0x08, 0x01);
- ds3000_tuner_writereg(state, 0x00, 0x01);
- div4 = 0;
-
- /* calculate and set freq divider */
- if (c->frequency < 1146000) {
- ds3000_tuner_writereg(state, 0x10, 0x11);
- div4 = 1;
- ndiv = ((c->frequency * (6 + 8) * 4) +
- (DS3000_XTAL_FREQ / 2)) /
- DS3000_XTAL_FREQ - 1024;
- } else {
- ds3000_tuner_writereg(state, 0x10, 0x01);
- ndiv = ((c->frequency * (6 + 8) * 2) +
- (DS3000_XTAL_FREQ / 2)) /
- DS3000_XTAL_FREQ - 1024;
- }
-
- ds3000_tuner_writereg(state, 0x01, (ndiv & 0x0f00) >> 8);
- ds3000_tuner_writereg(state, 0x02, ndiv & 0x00ff);
-
- /* set pll */
- ds3000_tuner_writereg(state, 0x03, 0x06);
- ds3000_tuner_writereg(state, 0x51, 0x0f);
- ds3000_tuner_writereg(state, 0x51, 0x1f);
- ds3000_tuner_writereg(state, 0x50, 0x10);
- ds3000_tuner_writereg(state, 0x50, 0x00);
- msleep(5);
-
- /* unknown */
- ds3000_tuner_writereg(state, 0x51, 0x17);
- ds3000_tuner_writereg(state, 0x51, 0x1f);
- ds3000_tuner_writereg(state, 0x50, 0x08);
- ds3000_tuner_writereg(state, 0x50, 0x00);
- msleep(5);
-
- value = ds3000_tuner_readreg(state, 0x3d);
- value &= 0x0f;
- if ((value > 4) && (value < 15)) {
- value -= 3;
- if (value < 4)
- value = 4;
- value = ((value << 3) | 0x01) & 0x79;
- }
-
- ds3000_tuner_writereg(state, 0x60, value);
- ds3000_tuner_writereg(state, 0x51, 0x17);
- ds3000_tuner_writereg(state, 0x51, 0x1f);
- ds3000_tuner_writereg(state, 0x50, 0x08);
- ds3000_tuner_writereg(state, 0x50, 0x00);
-
- /* set low-pass filter period */
- ds3000_tuner_writereg(state, 0x04, 0x2e);
- ds3000_tuner_writereg(state, 0x51, 0x1b);
- ds3000_tuner_writereg(state, 0x51, 0x1f);
- ds3000_tuner_writereg(state, 0x50, 0x04);
- ds3000_tuner_writereg(state, 0x50, 0x00);
- msleep(5);
-
- f3db = ((c->symbol_rate / 1000) << 2) / 5 + 2000;
- if ((c->symbol_rate / 1000) < 5000)
- f3db += 3000;
- if (f3db < 7000)
- f3db = 7000;
- if (f3db > 40000)
- f3db = 40000;
-
- /* set low-pass filter baseband */
- value = ds3000_tuner_readreg(state, 0x26);
- mlpf = 0x2e * 207 / ((value << 1) + 151);
- mlpf_max = mlpf * 135 / 100;
- mlpf_min = mlpf * 78 / 100;
- if (mlpf_max > 63)
- mlpf_max = 63;
-
- /* rounded to the closest integer */
- nlpf = ((mlpf * f3db * 1000) + (2766 * DS3000_XTAL_FREQ / 2))
- / (2766 * DS3000_XTAL_FREQ);
- if (nlpf > 23)
- nlpf = 23;
- if (nlpf < 1)
- nlpf = 1;
-
- /* rounded to the closest integer */
- mlpf_new = ((DS3000_XTAL_FREQ * nlpf * 2766) +
- (1000 * f3db / 2)) / (1000 * f3db);
-
- if (mlpf_new < mlpf_min) {
- nlpf++;
- mlpf_new = ((DS3000_XTAL_FREQ * nlpf * 2766) +
- (1000 * f3db / 2)) / (1000 * f3db);
- }
-
- if (mlpf_new > mlpf_max)
- mlpf_new = mlpf_max;
-
- ds3000_tuner_writereg(state, 0x04, mlpf_new);
- ds3000_tuner_writereg(state, 0x06, nlpf);
- ds3000_tuner_writereg(state, 0x51, 0x1b);
- ds3000_tuner_writereg(state, 0x51, 0x1f);
- ds3000_tuner_writereg(state, 0x50, 0x04);
- ds3000_tuner_writereg(state, 0x50, 0x00);
- msleep(5);
-
- /* unknown */
- ds3000_tuner_writereg(state, 0x51, 0x1e);
- ds3000_tuner_writereg(state, 0x51, 0x1f);
- ds3000_tuner_writereg(state, 0x50, 0x01);
- ds3000_tuner_writereg(state, 0x50, 0x00);
- msleep(60);
-
- offset_khz = (ndiv - ndiv % 2 + 1024) * DS3000_XTAL_FREQ
- / (6 + 8) / (div4 + 1) / 2 - c->frequency;
+ if (fe->ops.tuner_ops.set_params)
+ fe->ops.tuner_ops.set_params(fe);
/* ds3000 global reset */
ds3000_writereg(state, 0x07, 0x80);
@@ -1186,7 +1017,11 @@ static int ds3000_set_frontend(struct dvb_frontend *fe)
/* start ds3000 build-in uC */
ds3000_writereg(state, 0xb2, 0x00);
- ds3000_set_carrier_offset(fe, offset_khz);
+ if (fe->ops.tuner_ops.get_frequency) {
+ fe->ops.tuner_ops.get_frequency(fe, &frequency);
+ offset_khz = frequency - c->frequency;
+ ds3000_set_carrier_offset(fe, offset_khz);
+ }
for (i = 0; i < 30 ; i++) {
ds3000_read_status(fe, &status);
@@ -1218,6 +1053,11 @@ static int ds3000_tune(struct dvb_frontend *fe,
static enum dvbfe_algo ds3000_get_algo(struct dvb_frontend *fe)
{
+ struct ds3000_state *state = fe->demodulator_priv;
+
+ if (state->config->set_lock_led)
+ state->config->set_lock_led(fe, 0);
+
dprintk("%s()\n", __func__);
return DVBFE_ALGO_HW;
}
@@ -1237,10 +1077,6 @@ static int ds3000_initfe(struct dvb_frontend *fe)
ds3000_writereg(state, 0x08, 0x01 | ds3000_readreg(state, 0x08));
msleep(1);
- /* TS2020 init */
- ds3000_tuner_writereg(state, 0x42, 0x73);
- ds3000_tuner_writereg(state, 0x05, 0x01);
- ds3000_tuner_writereg(state, 0x62, 0xf5);
/* Load the firmware if required */
ret = ds3000_firmware_ondemand(fe);
if (ret != 0) {
@@ -1251,17 +1087,10 @@ static int ds3000_initfe(struct dvb_frontend *fe)
return 0;
}
-/* Put device to sleep */
-static int ds3000_sleep(struct dvb_frontend *fe)
-{
- dprintk("%s()\n", __func__);
- return 0;
-}
-
static struct dvb_frontend_ops ds3000_ops = {
- .delsys = { SYS_DVBS, SYS_DVBS2},
+ .delsys = { SYS_DVBS, SYS_DVBS2 },
.info = {
- .name = "Montage Technology DS3000/TS2020",
+ .name = "Montage Technology DS3000",
.frequency_min = 950000,
.frequency_max = 2150000,
.frequency_stepsize = 1011, /* kHz for QPSK frontends */
@@ -1279,7 +1108,7 @@ static struct dvb_frontend_ops ds3000_ops = {
.release = ds3000_release,
.init = ds3000_initfe,
- .sleep = ds3000_sleep,
+ .i2c_gate_ctrl = ds3000_i2c_gate_ctrl,
.read_status = ds3000_read_status,
.read_ber = ds3000_read_ber,
.read_signal_strength = ds3000_read_signal_strength,
@@ -1299,7 +1128,7 @@ module_param(debug, int, 0644);
MODULE_PARM_DESC(debug, "Activates frontend debugging (default:0)");
MODULE_DESCRIPTION("DVB Frontend module for Montage Technology "
- "DS3000/TS2020 hardware");
-MODULE_AUTHOR("Konstantin Dimitrov");
+ "DS3000 hardware");
+MODULE_AUTHOR("Konstantin Dimitrov <kosio.dimitrov@gmail.com>");
MODULE_LICENSE("GPL");
MODULE_FIRMWARE(DS3000_DEFAULT_FIRMWARE);
diff --git a/drivers/media/dvb-frontends/ds3000.h b/drivers/media/dvb-frontends/ds3000.h
index 1b736888ea37..478ad66c63d7 100644
--- a/drivers/media/dvb-frontends/ds3000.h
+++ b/drivers/media/dvb-frontends/ds3000.h
@@ -1,8 +1,8 @@
/*
- Montage Technology DS3000/TS2020 - DVBS/S2 Satellite demod/tuner driver
- Copyright (C) 2009 Konstantin Dimitrov <kosio.dimitrov@gmail.com>
+ Montage Technology DS3000 - DVBS/S2 Demodulator driver
+ Copyright (C) 2009-2012 Konstantin Dimitrov <kosio.dimitrov@gmail.com>
- Copyright (C) 2009 TurboSight.com
+ Copyright (C) 2009-2012 TurboSight.com
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
@@ -17,7 +17,7 @@
You should have received a copy of the GNU General Public License
along with this program; if not, write to the Free Software
Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
-*/
+ */
#ifndef DS3000_H
#define DS3000_H
@@ -30,6 +30,8 @@ struct ds3000_config {
u8 ci_mode;
/* Set device param to start dma */
int (*set_ts_params)(struct dvb_frontend *fe, int is_punctured);
+ /* Hook for Lock LED */
+ void (*set_lock_led)(struct dvb_frontend *fe, int offon);
};
#if defined(CONFIG_DVB_DS3000) || \
diff --git a/drivers/media/dvb-frontends/dvb-pll.h b/drivers/media/dvb-frontends/dvb-pll.h
index 4de754f76ce9..f4b5a0601c3a 100644
--- a/drivers/media/dvb-frontends/dvb-pll.h
+++ b/drivers/media/dvb-frontends/dvb-pll.h
@@ -38,7 +38,7 @@
* @param pll_desc_id dvb_pll_desc to use.
* @return Frontend pointer on success, NULL on failure
*/
-#if defined(CONFIG_DVB_PLL) || (defined(CONFIG_DVB_PLL_MODULE) && defined(MODULE))
+#if IS_ENABLED(CONFIG_DVB_PLL)
extern struct dvb_frontend *dvb_pll_attach(struct dvb_frontend *fe,
int pll_addr,
struct i2c_adapter *i2c,
diff --git a/drivers/media/dvb-frontends/isl6405.h b/drivers/media/dvb-frontends/isl6405.h
index 1c793d37576b..8abb70c26fd9 100644
--- a/drivers/media/dvb-frontends/isl6405.h
+++ b/drivers/media/dvb-frontends/isl6405.h
@@ -55,7 +55,7 @@
#define ISL6405_ENT2 0x20
#define ISL6405_ISEL2 0x40
-#if defined(CONFIG_DVB_ISL6405) || (defined(CONFIG_DVB_ISL6405_MODULE) && defined(MODULE))
+#if IS_ENABLED(CONFIG_DVB_ISL6405)
/* override_set and override_clear control which system register bits (above)
* to always set & clear
*/
diff --git a/drivers/media/dvb-frontends/isl6421.h b/drivers/media/dvb-frontends/isl6421.h
index 47e4518a042d..e7ca7d12b50a 100644
--- a/drivers/media/dvb-frontends/isl6421.h
+++ b/drivers/media/dvb-frontends/isl6421.h
@@ -39,7 +39,7 @@
#define ISL6421_ISEL1 0x20
#define ISL6421_DCL 0x40
-#if defined(CONFIG_DVB_ISL6421) || (defined(CONFIG_DVB_ISL6421_MODULE) && defined(MODULE))
+#if IS_ENABLED(CONFIG_DVB_ISL6421)
/* override_set and override_clear control which system register bits (above) to always set & clear */
extern struct dvb_frontend *isl6421_attach(struct dvb_frontend *fe, struct i2c_adapter *i2c, u8 i2c_addr,
u8 override_set, u8 override_clear);
diff --git a/drivers/media/dvb-frontends/isl6423.h b/drivers/media/dvb-frontends/isl6423.h
index e1a37fba01ca..80dfd9cc4f41 100644
--- a/drivers/media/dvb-frontends/isl6423.h
+++ b/drivers/media/dvb-frontends/isl6423.h
@@ -42,7 +42,7 @@ struct isl6423_config {
u8 mod_extern;
};
-#if defined(CONFIG_DVB_ISL6423) || (defined(CONFIG_DVB_ISL6423_MODULE) && defined(MODULE))
+#if IS_ENABLED(CONFIG_DVB_ISL6423)
extern struct dvb_frontend *isl6423_attach(struct dvb_frontend *fe,
diff --git a/drivers/media/dvb-frontends/itd1000.h b/drivers/media/dvb-frontends/itd1000.h
index 5e18df071b88..edae0902f4fd 100644
--- a/drivers/media/dvb-frontends/itd1000.h
+++ b/drivers/media/dvb-frontends/itd1000.h
@@ -29,7 +29,7 @@ struct itd1000_config {
u8 i2c_address;
};
-#if defined(CONFIG_DVB_TUNER_ITD1000) || (defined(CONFIG_DVB_TUNER_ITD1000_MODULE) && defined(MODULE))
+#if IS_ENABLED(CONFIG_DVB_TUNER_ITD1000)
extern struct dvb_frontend *itd1000_attach(struct dvb_frontend *fe, struct i2c_adapter *i2c, struct itd1000_config *cfg);
#else
static inline struct dvb_frontend *itd1000_attach(struct dvb_frontend *fe, struct i2c_adapter *i2c, struct itd1000_config *cfg)
diff --git a/drivers/media/dvb-frontends/ix2505v.c b/drivers/media/dvb-frontends/ix2505v.c
index bc5a82082aaa..0e3387e00952 100644
--- a/drivers/media/dvb-frontends/ix2505v.c
+++ b/drivers/media/dvb-frontends/ix2505v.c
@@ -212,7 +212,7 @@ static int ix2505v_set_params(struct dvb_frontend *fe)
lpf = 0xb;
deb_info("Osc=%x b_w=%x lpf=%x\n", local_osc, b_w, lpf);
- deb_info("Data 0=[%x%x%x%x]\n", data[0], data[1], data[2], data[3]);
+ deb_info("Data 0=[%4phN]\n", data);
if (fe->ops.i2c_gate_ctrl)
fe->ops.i2c_gate_ctrl(fe, 1);
diff --git a/drivers/media/dvb-frontends/l64781.h b/drivers/media/dvb-frontends/l64781.h
index 1305a9e7fb0b..6813b08a774d 100644
--- a/drivers/media/dvb-frontends/l64781.h
+++ b/drivers/media/dvb-frontends/l64781.h
@@ -31,7 +31,7 @@ struct l64781_config
u8 demod_address;
};
-#if defined(CONFIG_DVB_L64781) || (defined(CONFIG_DVB_L64781_MODULE) && defined(MODULE))
+#if IS_ENABLED(CONFIG_DVB_L64781)
extern struct dvb_frontend* l64781_attach(const struct l64781_config* config,
struct i2c_adapter* i2c);
#else
diff --git a/drivers/media/dvb-frontends/lgdt330x.h b/drivers/media/dvb-frontends/lgdt330x.h
index 9012504f0f2d..ca0eab562e1e 100644
--- a/drivers/media/dvb-frontends/lgdt330x.h
+++ b/drivers/media/dvb-frontends/lgdt330x.h
@@ -52,7 +52,7 @@ struct lgdt330x_config
int clock_polarity_flip;
};
-#if defined(CONFIG_DVB_LGDT330X) || (defined(CONFIG_DVB_LGDT330X_MODULE) && defined(MODULE))
+#if IS_ENABLED(CONFIG_DVB_LGDT330X)
extern struct dvb_frontend* lgdt330x_attach(const struct lgdt330x_config* config,
struct i2c_adapter* i2c);
#else
diff --git a/drivers/media/dvb-frontends/m88rs2000.c b/drivers/media/dvb-frontends/m88rs2000.c
index 633815ed90ca..4da5272075cb 100644
--- a/drivers/media/dvb-frontends/m88rs2000.c
+++ b/drivers/media/dvb-frontends/m88rs2000.c
@@ -60,15 +60,13 @@ MODULE_PARM_DESC(debug, "set debugging level (1=info (or-able)).");
#define info(format, arg...) \
printk(KERN_INFO "m88rs2000-fe: " format "\n" , ## arg)
-static int m88rs2000_writereg(struct m88rs2000_state *state, u8 tuner,
+static int m88rs2000_writereg(struct m88rs2000_state *state,
u8 reg, u8 data)
{
int ret;
- u8 addr = (tuner == 0) ? state->config->tuner_addr :
- state->config->demod_addr;
u8 buf[] = { reg, data };
struct i2c_msg msg = {
- .addr = addr,
+ .addr = state->config->demod_addr,
.flags = 0,
.buf = buf,
.len = 2
@@ -83,44 +81,20 @@ static int m88rs2000_writereg(struct m88rs2000_state *state, u8 tuner,
return (ret != 1) ? -EREMOTEIO : 0;
}
-static int m88rs2000_demod_write(struct m88rs2000_state *state, u8 reg, u8 data)
-{
- return m88rs2000_writereg(state, 1, reg, data);
-}
-
-static int m88rs2000_tuner_write(struct m88rs2000_state *state, u8 reg, u8 data)
-{
- m88rs2000_demod_write(state, 0x81, 0x84);
- udelay(10);
- return m88rs2000_writereg(state, 0, reg, data);
-
-}
-
-static int m88rs2000_write(struct dvb_frontend *fe, const u8 buf[], int len)
-{
- struct m88rs2000_state *state = fe->demodulator_priv;
-
- if (len != 2)
- return -EINVAL;
-
- return m88rs2000_writereg(state, 1, buf[0], buf[1]);
-}
-
-static u8 m88rs2000_readreg(struct m88rs2000_state *state, u8 tuner, u8 reg)
+static u8 m88rs2000_readreg(struct m88rs2000_state *state, u8 reg)
{
int ret;
u8 b0[] = { reg };
u8 b1[] = { 0 };
- u8 addr = (tuner == 0) ? state->config->tuner_addr :
- state->config->demod_addr;
+
struct i2c_msg msg[] = {
{
- .addr = addr,
+ .addr = state->config->demod_addr,
.flags = 0,
.buf = b0,
.len = 1
}, {
- .addr = addr,
+ .addr = state->config->demod_addr,
.flags = I2C_M_RD,
.buf = b1,
.len = 1
@@ -136,18 +110,6 @@ static u8 m88rs2000_readreg(struct m88rs2000_state *state, u8 tuner, u8 reg)
return b1[0];
}
-static u8 m88rs2000_demod_read(struct m88rs2000_state *state, u8 reg)
-{
- return m88rs2000_readreg(state, 1, reg);
-}
-
-static u8 m88rs2000_tuner_read(struct m88rs2000_state *state, u8 reg)
-{
- m88rs2000_demod_write(state, 0x81, 0x85);
- udelay(10);
- return m88rs2000_readreg(state, 0, reg);
-}
-
static int m88rs2000_set_symbolrate(struct dvb_frontend *fe, u32 srate)
{
struct m88rs2000_state *state = fe->demodulator_priv;
@@ -166,9 +128,9 @@ static int m88rs2000_set_symbolrate(struct dvb_frontend *fe, u32 srate)
b[0] = (u8) (temp >> 16) & 0xff;
b[1] = (u8) (temp >> 8) & 0xff;
b[2] = (u8) temp & 0xff;
- ret = m88rs2000_demod_write(state, 0x93, b[2]);
- ret |= m88rs2000_demod_write(state, 0x94, b[1]);
- ret |= m88rs2000_demod_write(state, 0x95, b[0]);
+ ret = m88rs2000_writereg(state, 0x93, b[2]);
+ ret |= m88rs2000_writereg(state, 0x94, b[1]);
+ ret |= m88rs2000_writereg(state, 0x95, b[0]);
deb_info("m88rs2000: m88rs2000_set_symbolrate\n");
return ret;
@@ -182,37 +144,37 @@ static int m88rs2000_send_diseqc_msg(struct dvb_frontend *fe,
int i;
u8 reg;
deb_info("%s\n", __func__);
- m88rs2000_demod_write(state, 0x9a, 0x30);
- reg = m88rs2000_demod_read(state, 0xb2);
+ m88rs2000_writereg(state, 0x9a, 0x30);
+ reg = m88rs2000_readreg(state, 0xb2);
reg &= 0x3f;
- m88rs2000_demod_write(state, 0xb2, reg);
+ m88rs2000_writereg(state, 0xb2, reg);
for (i = 0; i < m->msg_len; i++)
- m88rs2000_demod_write(state, 0xb3 + i, m->msg[i]);
+ m88rs2000_writereg(state, 0xb3 + i, m->msg[i]);
- reg = m88rs2000_demod_read(state, 0xb1);
+ reg = m88rs2000_readreg(state, 0xb1);
reg &= 0x87;
reg |= ((m->msg_len - 1) << 3) | 0x07;
reg &= 0x7f;
- m88rs2000_demod_write(state, 0xb1, reg);
+ m88rs2000_writereg(state, 0xb1, reg);
for (i = 0; i < 15; i++) {
- if ((m88rs2000_demod_read(state, 0xb1) & 0x40) == 0x0)
+ if ((m88rs2000_readreg(state, 0xb1) & 0x40) == 0x0)
break;
msleep(20);
}
- reg = m88rs2000_demod_read(state, 0xb1);
+ reg = m88rs2000_readreg(state, 0xb1);
if ((reg & 0x40) > 0x0) {
reg &= 0x7f;
reg |= 0x40;
- m88rs2000_demod_write(state, 0xb1, reg);
+ m88rs2000_writereg(state, 0xb1, reg);
}
- reg = m88rs2000_demod_read(state, 0xb2);
+ reg = m88rs2000_readreg(state, 0xb2);
reg &= 0x3f;
reg |= 0x80;
- m88rs2000_demod_write(state, 0xb2, reg);
- m88rs2000_demod_write(state, 0x9a, 0xb0);
+ m88rs2000_writereg(state, 0xb2, reg);
+ m88rs2000_writereg(state, 0x9a, 0xb0);
return 0;
@@ -224,14 +186,14 @@ static int m88rs2000_send_diseqc_burst(struct dvb_frontend *fe,
struct m88rs2000_state *state = fe->demodulator_priv;
u8 reg0, reg1;
deb_info("%s\n", __func__);
- m88rs2000_demod_write(state, 0x9a, 0x30);
+ m88rs2000_writereg(state, 0x9a, 0x30);
msleep(50);
- reg0 = m88rs2000_demod_read(state, 0xb1);
- reg1 = m88rs2000_demod_read(state, 0xb2);
+ reg0 = m88rs2000_readreg(state, 0xb1);
+ reg1 = m88rs2000_readreg(state, 0xb2);
/* TODO complete this section */
- m88rs2000_demod_write(state, 0xb2, reg1);
- m88rs2000_demod_write(state, 0xb1, reg0);
- m88rs2000_demod_write(state, 0x9a, 0xb0);
+ m88rs2000_writereg(state, 0xb2, reg1);
+ m88rs2000_writereg(state, 0xb1, reg0);
+ m88rs2000_writereg(state, 0x9a, 0xb0);
return 0;
}
@@ -240,9 +202,9 @@ static int m88rs2000_set_tone(struct dvb_frontend *fe, fe_sec_tone_mode_t tone)
{
struct m88rs2000_state *state = fe->demodulator_priv;
u8 reg0, reg1;
- m88rs2000_demod_write(state, 0x9a, 0x30);
- reg0 = m88rs2000_demod_read(state, 0xb1);
- reg1 = m88rs2000_demod_read(state, 0xb2);
+ m88rs2000_writereg(state, 0x9a, 0x30);
+ reg0 = m88rs2000_readreg(state, 0xb1);
+ reg1 = m88rs2000_readreg(state, 0xb2);
reg1 &= 0x3f;
@@ -257,9 +219,9 @@ static int m88rs2000_set_tone(struct dvb_frontend *fe, fe_sec_tone_mode_t tone)
default:
break;
}
- m88rs2000_demod_write(state, 0xb2, reg1);
- m88rs2000_demod_write(state, 0xb1, reg0);
- m88rs2000_demod_write(state, 0x9a, 0xb0);
+ m88rs2000_writereg(state, 0xb2, reg1);
+ m88rs2000_writereg(state, 0xb1, reg0);
+ m88rs2000_writereg(state, 0x9a, 0xb0);
return 0;
}
@@ -276,14 +238,6 @@ struct inittab m88rs2000_setup[] = {
{DEMOD_WRITE, 0x00, 0x00},
{DEMOD_WRITE, 0x9a, 0xb0},
{DEMOD_WRITE, 0x81, 0xc1},
- {TUNER_WRITE, 0x42, 0x73},
- {TUNER_WRITE, 0x05, 0x07},
- {TUNER_WRITE, 0x20, 0x27},
- {TUNER_WRITE, 0x07, 0x02},
- {TUNER_WRITE, 0x11, 0xff},
- {TUNER_WRITE, 0x60, 0xf9},
- {TUNER_WRITE, 0x08, 0x01},
- {TUNER_WRITE, 0x00, 0x41},
{DEMOD_WRITE, 0x81, 0x81},
{DEMOD_WRITE, 0x86, 0xc6},
{DEMOD_WRITE, 0x9a, 0x30},
@@ -301,23 +255,10 @@ struct inittab m88rs2000_shutdown[] = {
{DEMOD_WRITE, 0xf1, 0x89},
{DEMOD_WRITE, 0x00, 0x01},
{DEMOD_WRITE, 0x9a, 0xb0},
- {TUNER_WRITE, 0x00, 0x40},
{DEMOD_WRITE, 0x81, 0x81},
{0xff, 0xaa, 0xff}
};
-struct inittab tuner_reset[] = {
- {TUNER_WRITE, 0x42, 0x73},
- {TUNER_WRITE, 0x05, 0x07},
- {TUNER_WRITE, 0x20, 0x27},
- {TUNER_WRITE, 0x07, 0x02},
- {TUNER_WRITE, 0x11, 0xff},
- {TUNER_WRITE, 0x60, 0xf9},
- {TUNER_WRITE, 0x08, 0x01},
- {TUNER_WRITE, 0x00, 0x41},
- {0xff, 0xaa, 0xff}
-};
-
struct inittab fe_reset[] = {
{DEMOD_WRITE, 0x00, 0x01},
{DEMOD_WRITE, 0xf1, 0xbf},
@@ -389,11 +330,7 @@ static int m88rs2000_tab_set(struct m88rs2000_state *state,
for (i = 0; i < 255; i++) {
switch (tab[i].cmd) {
case 0x01:
- ret = m88rs2000_demod_write(state, tab[i].reg,
- tab[i].val);
- break;
- case 0x02:
- ret = m88rs2000_tuner_write(state, tab[i].reg,
+ ret = m88rs2000_writereg(state, tab[i].reg,
tab[i].val);
break;
case 0x10:
@@ -419,7 +356,7 @@ static int m88rs2000_set_voltage(struct dvb_frontend *fe, fe_sec_voltage_t volt)
struct m88rs2000_state *state = fe->demodulator_priv;
u8 data;
- data = m88rs2000_demod_read(state, 0xb2);
+ data = m88rs2000_readreg(state, 0xb2);
data |= 0x03; /* bit0 V/H, bit1 off/on */
switch (volt) {
@@ -434,23 +371,11 @@ static int m88rs2000_set_voltage(struct dvb_frontend *fe, fe_sec_voltage_t volt)
break;
}
- m88rs2000_demod_write(state, 0xb2, data);
+ m88rs2000_writereg(state, 0xb2, data);
return 0;
}
-static int m88rs2000_startup(struct m88rs2000_state *state)
-{
- int ret = 0;
- u8 reg;
-
- reg = m88rs2000_tuner_read(state, 0x00);
- if ((reg & 0x40) == 0)
- ret = -ENODEV;
-
- return ret;
-}
-
static int m88rs2000_init(struct dvb_frontend *fe)
{
struct m88rs2000_state *state = fe->demodulator_priv;
@@ -458,7 +383,11 @@ static int m88rs2000_init(struct dvb_frontend *fe)
deb_info("m88rs2000: init chip\n");
/* Setup frontend from shutdown/cold */
- ret = m88rs2000_tab_set(state, m88rs2000_setup);
+ if (state->config->inittab)
+ ret = m88rs2000_tab_set(state,
+ (struct inittab *)state->config->inittab);
+ else
+ ret = m88rs2000_tab_set(state, m88rs2000_setup);
return ret;
}
@@ -475,7 +404,7 @@ static int m88rs2000_sleep(struct dvb_frontend *fe)
static int m88rs2000_read_status(struct dvb_frontend *fe, fe_status_t *status)
{
struct m88rs2000_state *state = fe->demodulator_priv;
- u8 reg = m88rs2000_demod_read(state, 0x8c);
+ u8 reg = m88rs2000_readreg(state, 0x8c);
*status = 0;
@@ -488,183 +417,64 @@ static int m88rs2000_read_status(struct dvb_frontend *fe, fe_status_t *status)
return 0;
}
-/* Extact code for these unknown but lmedm04 driver uses interupt callbacks */
-
static int m88rs2000_read_ber(struct dvb_frontend *fe, u32 *ber)
{
- deb_info("m88rs2000_read_ber %d\n", *ber);
- *ber = 0;
+ struct m88rs2000_state *state = fe->demodulator_priv;
+ u8 tmp0, tmp1;
+
+ m88rs2000_writereg(state, 0x9a, 0x30);
+ tmp0 = m88rs2000_readreg(state, 0xd8);
+ if ((tmp0 & 0x10) != 0) {
+ m88rs2000_writereg(state, 0x9a, 0xb0);
+ *ber = 0xffffffff;
+ return 0;
+ }
+
+ *ber = (m88rs2000_readreg(state, 0xd7) << 8) |
+ m88rs2000_readreg(state, 0xd6);
+
+ tmp1 = m88rs2000_readreg(state, 0xd9);
+ m88rs2000_writereg(state, 0xd9, (tmp1 & ~7) | 4);
+ /* needs twice */
+ m88rs2000_writereg(state, 0xd8, (tmp0 & ~8) | 0x30);
+ m88rs2000_writereg(state, 0xd8, (tmp0 & ~8) | 0x30);
+ m88rs2000_writereg(state, 0x9a, 0xb0);
+
return 0;
}
static int m88rs2000_read_signal_strength(struct dvb_frontend *fe,
u16 *strength)
{
- *strength = 0;
- return 0;
-}
+ if (fe->ops.tuner_ops.get_rf_strength)
+ fe->ops.tuner_ops.get_rf_strength(fe, strength);
-static int m88rs2000_read_snr(struct dvb_frontend *fe, u16 *snr)
-{
- deb_info("m88rs2000_read_snr %d\n", *snr);
- *snr = 0;
return 0;
}
-static int m88rs2000_read_ucblocks(struct dvb_frontend *fe, u32 *ucblocks)
-{
- deb_info("m88rs2000_read_ber %d\n", *ucblocks);
- *ucblocks = 0;
- return 0;
-}
-
-static int m88rs2000_tuner_gate_ctrl(struct m88rs2000_state *state, u8 offset)
-{
- int ret;
- ret = m88rs2000_tuner_write(state, 0x51, 0x1f - offset);
- ret |= m88rs2000_tuner_write(state, 0x51, 0x1f);
- ret |= m88rs2000_tuner_write(state, 0x50, offset);
- ret |= m88rs2000_tuner_write(state, 0x50, 0x00);
- msleep(20);
- return ret;
-}
-
-static int m88rs2000_set_tuner_rf(struct dvb_frontend *fe)
+static int m88rs2000_read_snr(struct dvb_frontend *fe, u16 *snr)
{
struct m88rs2000_state *state = fe->demodulator_priv;
- int reg;
- reg = m88rs2000_tuner_read(state, 0x3d);
- reg &= 0x7f;
- if (reg < 0x16)
- reg = 0xa1;
- else if (reg == 0x16)
- reg = 0x99;
- else
- reg = 0xf9;
- m88rs2000_tuner_write(state, 0x60, reg);
- reg = m88rs2000_tuner_gate_ctrl(state, 0x08);
+ *snr = 512 * m88rs2000_readreg(state, 0x65);
- if (fe->ops.i2c_gate_ctrl)
- fe->ops.i2c_gate_ctrl(fe, 0);
- return reg;
+ return 0;
}
-static int m88rs2000_set_tuner(struct dvb_frontend *fe, u16 *offset)
+static int m88rs2000_read_ucblocks(struct dvb_frontend *fe, u32 *ucblocks)
{
- struct dtv_frontend_properties *c = &fe->dtv_property_cache;
struct m88rs2000_state *state = fe->demodulator_priv;
- int ret;
- u32 frequency = c->frequency;
- s32 offset_khz;
- s32 tmp;
- u32 symbol_rate = (c->symbol_rate / 1000);
- u32 f3db, gdiv28;
- u16 value, ndiv, lpf_coeff;
- u8 lpf_mxdiv, mlpf_max, mlpf_min, nlpf;
- u8 lo = 0x01, div4 = 0x0;
-
- /* Reset Tuner */
- ret = m88rs2000_tab_set(state, tuner_reset);
-
- /* Calculate frequency divider */
- if (frequency < 1060000) {
- lo |= 0x10;
- div4 = 0x1;
- ndiv = (frequency * 14 * 4) / FE_CRYSTAL_KHZ;
- } else
- ndiv = (frequency * 14 * 2) / FE_CRYSTAL_KHZ;
- ndiv = ndiv + ndiv % 2;
- ndiv = ndiv - 1024;
-
- ret = m88rs2000_tuner_write(state, 0x10, 0x80 | lo);
-
- /* Set frequency divider */
- ret |= m88rs2000_tuner_write(state, 0x01, (ndiv >> 8) & 0xf);
- ret |= m88rs2000_tuner_write(state, 0x02, ndiv & 0xff);
-
- ret |= m88rs2000_tuner_write(state, 0x03, 0x06);
- ret |= m88rs2000_tuner_gate_ctrl(state, 0x10);
- if (ret < 0)
- return -ENODEV;
-
- /* Tuner Frequency Range */
- ret = m88rs2000_tuner_write(state, 0x10, lo);
-
- ret |= m88rs2000_tuner_gate_ctrl(state, 0x08);
-
- /* Tuner RF */
- ret |= m88rs2000_set_tuner_rf(fe);
-
- gdiv28 = (FE_CRYSTAL_KHZ / 1000 * 1694 + 500) / 1000;
- ret |= m88rs2000_tuner_write(state, 0x04, gdiv28 & 0xff);
- ret |= m88rs2000_tuner_gate_ctrl(state, 0x04);
- if (ret < 0)
- return -ENODEV;
-
- value = m88rs2000_tuner_read(state, 0x26);
-
- f3db = (symbol_rate * 135) / 200 + 2000;
- f3db += FREQ_OFFSET_LOW_SYM_RATE;
- if (f3db < 7000)
- f3db = 7000;
- if (f3db > 40000)
- f3db = 40000;
-
- gdiv28 = gdiv28 * 207 / (value * 2 + 151);
- mlpf_max = gdiv28 * 135 / 100;
- mlpf_min = gdiv28 * 78 / 100;
- if (mlpf_max > 63)
- mlpf_max = 63;
-
- lpf_coeff = 2766;
-
- nlpf = (f3db * gdiv28 * 2 / lpf_coeff /
- (FE_CRYSTAL_KHZ / 1000) + 1) / 2;
- if (nlpf > 23)
- nlpf = 23;
- if (nlpf < 1)
- nlpf = 1;
-
- lpf_mxdiv = (nlpf * (FE_CRYSTAL_KHZ / 1000)
- * lpf_coeff * 2 / f3db + 1) / 2;
-
- if (lpf_mxdiv < mlpf_min) {
- nlpf++;
- lpf_mxdiv = (nlpf * (FE_CRYSTAL_KHZ / 1000)
- * lpf_coeff * 2 / f3db + 1) / 2;
- }
-
- if (lpf_mxdiv > mlpf_max)
- lpf_mxdiv = mlpf_max;
-
- ret = m88rs2000_tuner_write(state, 0x04, lpf_mxdiv);
- ret |= m88rs2000_tuner_write(state, 0x06, nlpf);
-
- ret |= m88rs2000_tuner_gate_ctrl(state, 0x04);
-
- ret |= m88rs2000_tuner_gate_ctrl(state, 0x01);
-
- msleep(80);
- /* calculate offset assuming 96000kHz*/
- offset_khz = (ndiv - ndiv % 2 + 1024) * FE_CRYSTAL_KHZ
- / 14 / (div4 + 1) / 2;
+ u8 tmp;
- offset_khz -= frequency;
+ *ucblocks = (m88rs2000_readreg(state, 0xd5) << 8) |
+ m88rs2000_readreg(state, 0xd4);
+ tmp = m88rs2000_readreg(state, 0xd8);
+ m88rs2000_writereg(state, 0xd8, tmp & ~0x20);
+ /* needs two times */
+ m88rs2000_writereg(state, 0xd8, tmp | 0x20);
+ m88rs2000_writereg(state, 0xd8, tmp | 0x20);
- tmp = offset_khz;
- tmp *= 65536;
-
- tmp = (2 * tmp + 96000) / (2 * 96000);
- if (tmp < 0)
- tmp += 65536;
-
- *offset = tmp & 0xffff;
-
- if (fe->ops.i2c_gate_ctrl)
- fe->ops.i2c_gate_ctrl(fe, 0);
-
- return (ret < 0) ? -EINVAL : 0;
+ return 0;
}
static int m88rs2000_set_fec(struct m88rs2000_state *state,
@@ -692,7 +502,7 @@ static int m88rs2000_set_fec(struct m88rs2000_state *state,
default:
fec_set = 0x08;
}
- m88rs2000_demod_write(state, 0x76, fec_set);
+ m88rs2000_writereg(state, 0x76, fec_set);
return 0;
}
@@ -701,9 +511,9 @@ static int m88rs2000_set_fec(struct m88rs2000_state *state,
static fe_code_rate_t m88rs2000_get_fec(struct m88rs2000_state *state)
{
u8 reg;
- m88rs2000_demod_write(state, 0x9a, 0x30);
- reg = m88rs2000_demod_read(state, 0x76);
- m88rs2000_demod_write(state, 0x9a, 0xb0);
+ m88rs2000_writereg(state, 0x9a, 0x30);
+ reg = m88rs2000_readreg(state, 0x76);
+ m88rs2000_writereg(state, 0x9a, 0xb0);
switch (reg) {
case 0x88:
@@ -729,7 +539,9 @@ static int m88rs2000_set_frontend(struct dvb_frontend *fe)
struct m88rs2000_state *state = fe->demodulator_priv;
struct dtv_frontend_properties *c = &fe->dtv_property_cache;
fe_status_t status;
- int i, ret;
+ int i, ret = 0;
+ s32 tmp;
+ u32 tuner_freq;
u16 offset = 0;
u8 reg;
@@ -743,17 +555,37 @@ static int m88rs2000_set_frontend(struct dvb_frontend *fe)
}
/* Set Tuner */
- ret = m88rs2000_set_tuner(fe, &offset);
+ if (fe->ops.tuner_ops.set_params)
+ ret = fe->ops.tuner_ops.set_params(fe);
+
+ if (ret < 0)
+ return -ENODEV;
+
+ if (fe->ops.tuner_ops.get_frequency)
+ ret = fe->ops.tuner_ops.get_frequency(fe, &tuner_freq);
+
if (ret < 0)
return -ENODEV;
- ret = m88rs2000_demod_write(state, 0x9a, 0x30);
+ offset = tuner_freq - c->frequency;
+
+ /* calculate offset assuming 96000kHz*/
+ tmp = offset;
+ tmp *= 65536;
+
+ tmp = (2 * tmp + 96000) / (2 * 96000);
+ if (tmp < 0)
+ tmp += 65536;
+
+ offset = tmp & 0xffff;
+
+ ret = m88rs2000_writereg(state, 0x9a, 0x30);
/* Unknown usually 0xc6 sometimes 0xc1 */
- reg = m88rs2000_demod_read(state, 0x86);
- ret |= m88rs2000_demod_write(state, 0x86, reg);
+ reg = m88rs2000_readreg(state, 0x86);
+ ret |= m88rs2000_writereg(state, 0x86, reg);
/* Offset lower nibble always 0 */
- ret |= m88rs2000_demod_write(state, 0x9c, (offset >> 8));
- ret |= m88rs2000_demod_write(state, 0x9d, offset & 0xf0);
+ ret |= m88rs2000_writereg(state, 0x9c, (offset >> 8));
+ ret |= m88rs2000_writereg(state, 0x9d, offset & 0xf0);
/* Reset Demod */
@@ -762,16 +594,16 @@ static int m88rs2000_set_frontend(struct dvb_frontend *fe)
return -ENODEV;
/* Unknown */
- reg = m88rs2000_demod_read(state, 0x70);
- ret = m88rs2000_demod_write(state, 0x70, reg);
+ reg = m88rs2000_readreg(state, 0x70);
+ ret = m88rs2000_writereg(state, 0x70, reg);
/* Set FEC */
ret |= m88rs2000_set_fec(state, c->fec_inner);
- ret |= m88rs2000_demod_write(state, 0x85, 0x1);
- ret |= m88rs2000_demod_write(state, 0x8a, 0xbf);
- ret |= m88rs2000_demod_write(state, 0x8d, 0x1e);
- ret |= m88rs2000_demod_write(state, 0x90, 0xf1);
- ret |= m88rs2000_demod_write(state, 0x91, 0x08);
+ ret |= m88rs2000_writereg(state, 0x85, 0x1);
+ ret |= m88rs2000_writereg(state, 0x8a, 0xbf);
+ ret |= m88rs2000_writereg(state, 0x8d, 0x1e);
+ ret |= m88rs2000_writereg(state, 0x90, 0xf1);
+ ret |= m88rs2000_writereg(state, 0x91, 0x08);
if (ret < 0)
return -ENODEV;
@@ -787,27 +619,25 @@ static int m88rs2000_set_frontend(struct dvb_frontend *fe)
return -ENODEV;
for (i = 0; i < 25; i++) {
- reg = m88rs2000_demod_read(state, 0x8c);
+ reg = m88rs2000_readreg(state, 0x8c);
if ((reg & 0x7) == 0x7) {
status = FE_HAS_LOCK;
break;
}
state->no_lock_count++;
if (state->no_lock_count == 15) {
- reg = m88rs2000_demod_read(state, 0x70);
+ reg = m88rs2000_readreg(state, 0x70);
reg ^= 0x4;
- m88rs2000_demod_write(state, 0x70, reg);
+ m88rs2000_writereg(state, 0x70, reg);
state->no_lock_count = 0;
}
- if (state->no_lock_count == 20)
- m88rs2000_set_tuner_rf(fe);
msleep(20);
}
if (status & FE_HAS_LOCK) {
state->fec_inner = m88rs2000_get_fec(state);
/* Uknown suspect SNR level */
- reg = m88rs2000_demod_read(state, 0x65);
+ reg = m88rs2000_readreg(state, 0x65);
}
state->tuner_frequency = c->frequency;
@@ -830,9 +660,9 @@ static int m88rs2000_i2c_gate_ctrl(struct dvb_frontend *fe, int enable)
struct m88rs2000_state *state = fe->demodulator_priv;
if (enable)
- m88rs2000_demod_write(state, 0x81, 0x84);
+ m88rs2000_writereg(state, 0x81, 0x84);
else
- m88rs2000_demod_write(state, 0x81, 0x81);
+ m88rs2000_writereg(state, 0x81, 0x81);
udelay(10);
return 0;
}
@@ -863,7 +693,6 @@ static struct dvb_frontend_ops m88rs2000_ops = {
.release = m88rs2000_release,
.init = m88rs2000_init,
.sleep = m88rs2000_sleep,
- .write = m88rs2000_write,
.i2c_gate_ctrl = m88rs2000_i2c_gate_ctrl,
.read_status = m88rs2000_read_status,
.read_ber = m88rs2000_read_ber,
@@ -896,9 +725,6 @@ struct dvb_frontend *m88rs2000_attach(const struct m88rs2000_config *config,
state->symbol_rate = 0;
state->fec_inner = 0;
- if (m88rs2000_startup(state) < 0)
- goto error;
-
/* create dvb_frontend */
memcpy(&state->frontend.ops, &m88rs2000_ops,
sizeof(struct dvb_frontend_ops));
diff --git a/drivers/media/dvb-frontends/m88rs2000.h b/drivers/media/dvb-frontends/m88rs2000.h
index 59acdb696873..5a8023e5a4b8 100644
--- a/drivers/media/dvb-frontends/m88rs2000.h
+++ b/drivers/media/dvb-frontends/m88rs2000.h
@@ -26,8 +26,6 @@
struct m88rs2000_config {
/* Demodulator i2c address */
u8 demod_addr;
- /* Tuner address */
- u8 tuner_addr;
u8 *inittab;
@@ -55,12 +53,8 @@ static inline struct dvb_frontend *m88rs2000_attach(
}
#endif /* CONFIG_DVB_M88RS2000 */
-#define FE_CRYSTAL_KHZ 27000
-#define FREQ_OFFSET_LOW_SYM_RATE 3000
-
enum {
DEMOD_WRITE = 0x1,
- TUNER_WRITE,
WRITE_DELAY = 0x10,
};
#endif /* M88RS2000_H */
diff --git a/drivers/media/dvb-frontends/mb86a16.h b/drivers/media/dvb-frontends/mb86a16.h
index 6ea8c376394f..277ce061acf9 100644
--- a/drivers/media/dvb-frontends/mb86a16.h
+++ b/drivers/media/dvb-frontends/mb86a16.h
@@ -33,7 +33,7 @@ struct mb86a16_config {
-#if defined(CONFIG_DVB_MB86A16) || (defined(CONFIG_DVB_MB86A16_MODULE) && defined(MODULE))
+#if IS_ENABLED(CONFIG_DVB_MB86A16)
extern struct dvb_frontend *mb86a16_attach(const struct mb86a16_config *config,
struct i2c_adapter *i2c_adap);
diff --git a/drivers/media/dvb-frontends/mb86a20s.c b/drivers/media/dvb-frontends/mb86a20s.c
index fade566927c3..f19cd7367040 100644
--- a/drivers/media/dvb-frontends/mb86a20s.c
+++ b/drivers/media/dvb-frontends/mb86a20s.c
@@ -1,11 +1,9 @@
/*
* Fujitu mb86a20s ISDB-T/ISDB-Tsb Module driver
*
- * Copyright (C) 2010 Mauro Carvalho Chehab <mchehab@redhat.com>
+ * Copyright (C) 2010-2013 Mauro Carvalho Chehab <mchehab@redhat.com>
* Copyright (C) 2009-2010 Douglas Landgraf <dougsland@redhat.com>
*
- * FIXME: Need to port to DVB v5.2 API
- *
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License as
* published by the Free Software Foundation version 2.
@@ -26,24 +24,15 @@ static int debug = 1;
module_param(debug, int, 0644);
MODULE_PARM_DESC(debug, "Activates frontend debugging (default:0)");
-#define rc(args...) do { \
- printk(KERN_ERR "mb86a20s: " args); \
-} while (0)
-
-#define dprintk(args...) \
- do { \
- if (debug) { \
- printk(KERN_DEBUG "mb86a20s: %s: ", __func__); \
- printk(args); \
- } \
- } while (0)
-
struct mb86a20s_state {
struct i2c_adapter *i2c;
const struct mb86a20s_config *config;
+ u32 last_frequency;
struct dvb_frontend frontend;
+ u32 estimated_rate[3];
+
bool need_init;
};
@@ -52,6 +41,8 @@ struct regdata {
u8 data;
};
+#define BER_SAMPLING_RATE 1 /* Seconds */
+
/*
* Initialization sequence: Use whatevere default values that PV SBTVD
* does on its initialisation, obtained via USB snoop
@@ -94,41 +85,68 @@ static struct regdata mb86a20s_init[] = {
{ 0x04, 0x13 }, { 0x05, 0xff },
{ 0x04, 0x15 }, { 0x05, 0x4e },
{ 0x04, 0x16 }, { 0x05, 0x20 },
- { 0x52, 0x01 },
- { 0x50, 0xa7 }, { 0x51, 0xff },
+
+ /*
+ * On this demod, when the bit count reaches the count below,
+ * it collects the bit error count. The bit counters are initialized
+ * to 65535 here. This warrants that all of them will be quickly
+ * calculated when device gets locked. As TMCC is parsed, the values
+ * will be adjusted later in the driver's code.
+ */
+ { 0x52, 0x01 }, /* Turn on BER before Viterbi */
+ { 0x50, 0xa7 }, { 0x51, 0x00 },
{ 0x50, 0xa8 }, { 0x51, 0xff },
{ 0x50, 0xa9 }, { 0x51, 0xff },
- { 0x50, 0xaa }, { 0x51, 0xff },
+ { 0x50, 0xaa }, { 0x51, 0x00 },
{ 0x50, 0xab }, { 0x51, 0xff },
{ 0x50, 0xac }, { 0x51, 0xff },
- { 0x50, 0xad }, { 0x51, 0xff },
+ { 0x50, 0xad }, { 0x51, 0x00 },
{ 0x50, 0xae }, { 0x51, 0xff },
{ 0x50, 0xaf }, { 0x51, 0xff },
- { 0x5e, 0x07 },
- { 0x50, 0xdc }, { 0x51, 0x01 },
- { 0x50, 0xdd }, { 0x51, 0xf4 },
- { 0x50, 0xde }, { 0x51, 0x01 },
- { 0x50, 0xdf }, { 0x51, 0xf4 },
- { 0x50, 0xe0 }, { 0x51, 0x01 },
- { 0x50, 0xe1 }, { 0x51, 0xf4 },
- { 0x50, 0xb0 }, { 0x51, 0x07 },
- { 0x50, 0xb2 }, { 0x51, 0xff },
- { 0x50, 0xb3 }, { 0x51, 0xff },
- { 0x50, 0xb4 }, { 0x51, 0xff },
- { 0x50, 0xb5 }, { 0x51, 0xff },
- { 0x50, 0xb6 }, { 0x51, 0xff },
- { 0x50, 0xb7 }, { 0x51, 0xff },
- { 0x50, 0x50 }, { 0x51, 0x02 },
- { 0x50, 0x51 }, { 0x51, 0x04 },
- { 0x45, 0x04 },
- { 0x48, 0x04 },
+
+ /*
+ * On this demod, post BER counts blocks. When the count reaches the
+ * value below, it collects the block error count. The block counters
+ * are initialized to 127 here. This warrants that all of them will be
+ * quickly calculated when device gets locked. As TMCC is parsed, the
+ * values will be adjusted later in the driver's code.
+ */
+ { 0x5e, 0x07 }, /* Turn on BER after Viterbi */
+ { 0x50, 0xdc }, { 0x51, 0x00 },
+ { 0x50, 0xdd }, { 0x51, 0x7f },
+ { 0x50, 0xde }, { 0x51, 0x00 },
+ { 0x50, 0xdf }, { 0x51, 0x7f },
+ { 0x50, 0xe0 }, { 0x51, 0x00 },
+ { 0x50, 0xe1 }, { 0x51, 0x7f },
+
+ /*
+ * On this demod, when the block count reaches the count below,
+ * it collects the block error count. The block counters are initialized
+ * to 127 here. This warrants that all of them will be quickly
+ * calculated when device gets locked. As TMCC is parsed, the values
+ * will be adjusted later in the driver's code.
+ */
+ { 0x50, 0xb0 }, { 0x51, 0x07 }, /* Enable PER */
+ { 0x50, 0xb2 }, { 0x51, 0x00 },
+ { 0x50, 0xb3 }, { 0x51, 0x7f },
+ { 0x50, 0xb4 }, { 0x51, 0x00 },
+ { 0x50, 0xb5 }, { 0x51, 0x7f },
+ { 0x50, 0xb6 }, { 0x51, 0x00 },
+ { 0x50, 0xb7 }, { 0x51, 0x7f },
+
+ { 0x50, 0x50 }, { 0x51, 0x02 }, /* MER manual mode */
+ { 0x50, 0x51 }, { 0x51, 0x04 }, /* MER symbol 4 */
+ { 0x45, 0x04 }, /* CN symbol 4 */
+ { 0x48, 0x04 }, /* CN manual mode */
+
{ 0x50, 0xd5 }, { 0x51, 0x01 }, /* Serial */
{ 0x50, 0xd6 }, { 0x51, 0x1f },
{ 0x50, 0xd2 }, { 0x51, 0x03 },
{ 0x50, 0xd7 }, { 0x51, 0x3f },
{ 0x28, 0x74 }, { 0x29, 0x00 }, { 0x28, 0x74 }, { 0x29, 0x40 },
{ 0x28, 0x46 }, { 0x29, 0x2c }, { 0x28, 0x46 }, { 0x29, 0x0c },
- { 0x04, 0x40 }, { 0x05, 0x01 },
+
+ { 0x04, 0x40 }, { 0x05, 0x00 },
{ 0x28, 0x00 }, { 0x29, 0x10 },
{ 0x28, 0x05 }, { 0x29, 0x02 },
{ 0x1c, 0x01 },
@@ -176,8 +194,24 @@ static struct regdata mb86a20s_reset_reception[] = {
{ 0x08, 0x00 },
};
+static struct regdata mb86a20s_per_ber_reset[] = {
+ { 0x53, 0x00 }, /* pre BER Counter reset */
+ { 0x53, 0x07 },
+
+ { 0x5f, 0x00 }, /* post BER Counter reset */
+ { 0x5f, 0x07 },
+
+ { 0x50, 0xb1 }, /* PER Counter reset */
+ { 0x51, 0x07 },
+ { 0x51, 0x00 },
+};
+
+/*
+ * I2C read/write functions and macros
+ */
+
static int mb86a20s_i2c_writereg(struct mb86a20s_state *state,
- u8 i2c_addr, int reg, int data)
+ u8 i2c_addr, u8 reg, u8 data)
{
u8 buf[] = { reg, data };
struct i2c_msg msg = {
@@ -187,8 +221,9 @@ static int mb86a20s_i2c_writereg(struct mb86a20s_state *state,
rc = i2c_transfer(state->i2c, &msg, 1);
if (rc != 1) {
- printk("%s: writereg error (rc == %i, reg == 0x%02x,"
- " data == 0x%02x)\n", __func__, rc, reg, data);
+ dev_err(&state->i2c->dev,
+ "%s: writereg error (rc == %i, reg == 0x%02x, data == 0x%02x)\n",
+ __func__, rc, reg, data);
return rc;
}
@@ -222,8 +257,9 @@ static int mb86a20s_i2c_readreg(struct mb86a20s_state *state,
rc = i2c_transfer(state->i2c, msg, 2);
if (rc != 2) {
- rc("%s: reg=0x%x (error=%d)\n", __func__, reg, rc);
- return rc;
+ dev_err(&state->i2c->dev, "%s: reg=0x%x (error=%d)\n",
+ __func__, reg, rc);
+ return (rc < 0) ? rc : -EIO;
}
return val;
@@ -237,100 +273,22 @@ static int mb86a20s_i2c_readreg(struct mb86a20s_state *state,
mb86a20s_i2c_writeregdata(state, state->config->demod_address, \
regdata, ARRAY_SIZE(regdata))
-static int mb86a20s_initfe(struct dvb_frontend *fe)
-{
- struct mb86a20s_state *state = fe->demodulator_priv;
- int rc;
- u8 regD5 = 1;
-
- dprintk("\n");
-
- if (fe->ops.i2c_gate_ctrl)
- fe->ops.i2c_gate_ctrl(fe, 0);
-
- /* Initialize the frontend */
- rc = mb86a20s_writeregdata(state, mb86a20s_init);
- if (rc < 0)
- goto err;
-
- if (!state->config->is_serial) {
- regD5 &= ~1;
-
- rc = mb86a20s_writereg(state, 0x50, 0xd5);
- if (rc < 0)
- goto err;
- rc = mb86a20s_writereg(state, 0x51, regD5);
- if (rc < 0)
- goto err;
- }
-
- if (fe->ops.i2c_gate_ctrl)
- fe->ops.i2c_gate_ctrl(fe, 1);
-
-err:
- if (rc < 0) {
- state->need_init = true;
- printk(KERN_INFO "mb86a20s: Init failed. Will try again later\n");
- } else {
- state->need_init = false;
- dprintk("Initialization succeeded.\n");
- }
- return rc;
-}
-
-static int mb86a20s_read_signal_strength(struct dvb_frontend *fe, u16 *strength)
-{
- struct mb86a20s_state *state = fe->demodulator_priv;
- unsigned rf_max, rf_min, rf;
- u8 val;
-
- dprintk("\n");
-
- if (fe->ops.i2c_gate_ctrl)
- fe->ops.i2c_gate_ctrl(fe, 0);
-
- /* Does a binary search to get RF strength */
- rf_max = 0xfff;
- rf_min = 0;
- do {
- rf = (rf_max + rf_min) / 2;
- mb86a20s_writereg(state, 0x04, 0x1f);
- mb86a20s_writereg(state, 0x05, rf >> 8);
- mb86a20s_writereg(state, 0x04, 0x20);
- mb86a20s_writereg(state, 0x04, rf);
-
- val = mb86a20s_readreg(state, 0x02);
- if (val & 0x08)
- rf_min = (rf_max + rf_min) / 2;
- else
- rf_max = (rf_max + rf_min) / 2;
- if (rf_max - rf_min < 4) {
- *strength = (((rf_max + rf_min) / 2) * 65535) / 4095;
- break;
- }
- } while (1);
-
- dprintk("signal strength = %d\n", *strength);
-
- if (fe->ops.i2c_gate_ctrl)
- fe->ops.i2c_gate_ctrl(fe, 1);
-
- return 0;
-}
+/*
+ * Ancillary internal routines (likely compiled inlined)
+ *
+ * The functions below assume that gateway lock has already obtained
+ */
static int mb86a20s_read_status(struct dvb_frontend *fe, fe_status_t *status)
{
struct mb86a20s_state *state = fe->demodulator_priv;
- u8 val;
+ int val;
- dprintk("\n");
*status = 0;
- if (fe->ops.i2c_gate_ctrl)
- fe->ops.i2c_gate_ctrl(fe, 0);
val = mb86a20s_readreg(state, 0x0a) & 0xf;
- if (fe->ops.i2c_gate_ctrl)
- fe->ops.i2c_gate_ctrl(fe, 1);
+ if (val < 0)
+ return val;
if (val >= 2)
*status |= FE_HAS_SIGNAL;
@@ -347,49 +305,56 @@ static int mb86a20s_read_status(struct dvb_frontend *fe, fe_status_t *status)
if (val >= 8) /* Maybe 9? */
*status |= FE_HAS_LOCK;
- dprintk("val = %d, status = 0x%02x\n", val, *status);
+ dev_dbg(&state->i2c->dev, "%s: Status = 0x%02x (state = %d)\n",
+ __func__, *status, val);
return 0;
}
-static int mb86a20s_set_frontend(struct dvb_frontend *fe)
+static int mb86a20s_read_signal_strength(struct dvb_frontend *fe)
{
struct mb86a20s_state *state = fe->demodulator_priv;
int rc;
-#if 0
- /*
- * FIXME: Properly implement the set frontend properties
- */
- struct dtv_frontend_properties *p = &fe->dtv_property_cache;
-#endif
-
- dprintk("\n");
-
- if (fe->ops.i2c_gate_ctrl)
- fe->ops.i2c_gate_ctrl(fe, 1);
- dprintk("Calling tuner set parameters\n");
- fe->ops.tuner_ops.set_params(fe);
+ unsigned rf_max, rf_min, rf;
- /*
- * Make it more reliable: if, for some reason, the initial
- * device initialization doesn't happen, initialize it when
- * a SBTVD parameters are adjusted.
- *
- * Unfortunately, due to a hard to track bug at tda829x/tda18271,
- * the agc callback logic is not called during DVB attach time,
- * causing mb86a20s to not be initialized with Kworld SBTVD.
- * So, this hack is needed, in order to make Kworld SBTVD to work.
- */
- if (state->need_init)
- mb86a20s_initfe(fe);
+ /* Does a binary search to get RF strength */
+ rf_max = 0xfff;
+ rf_min = 0;
+ do {
+ rf = (rf_max + rf_min) / 2;
+ rc = mb86a20s_writereg(state, 0x04, 0x1f);
+ if (rc < 0)
+ return rc;
+ rc = mb86a20s_writereg(state, 0x05, rf >> 8);
+ if (rc < 0)
+ return rc;
+ rc = mb86a20s_writereg(state, 0x04, 0x20);
+ if (rc < 0)
+ return rc;
+ rc = mb86a20s_writereg(state, 0x04, rf);
+ if (rc < 0)
+ return rc;
- if (fe->ops.i2c_gate_ctrl)
- fe->ops.i2c_gate_ctrl(fe, 0);
- rc = mb86a20s_writeregdata(state, mb86a20s_reset_reception);
- if (fe->ops.i2c_gate_ctrl)
- fe->ops.i2c_gate_ctrl(fe, 1);
+ rc = mb86a20s_readreg(state, 0x02);
+ if (rc < 0)
+ return rc;
+ if (rc & 0x08)
+ rf_min = (rf_max + rf_min) / 2;
+ else
+ rf_max = (rf_max + rf_min) / 2;
+ if (rf_max - rf_min < 4) {
+ rf = (rf_max + rf_min) / 2;
+
+ /* Rescale it from 2^12 (4096) to 2^16 */
+ rf <<= (16 - 12);
+ dev_dbg(&state->i2c->dev,
+ "%s: signal strength = %d (%d < RF=%d < %d)\n",
+ __func__, rf, rf_min, rf >> 4, rf_max);
+ return rf;
+ }
+ } while (1);
- return rc;
+ return 0;
}
static int mb86a20s_get_modulation(struct mb86a20s_state *state,
@@ -410,7 +375,7 @@ static int mb86a20s_get_modulation(struct mb86a20s_state *state,
rc = mb86a20s_readreg(state, 0x6e);
if (rc < 0)
return rc;
- switch ((rc & 0x70) >> 4) {
+ switch ((rc >> 4) & 0x07) {
case 0:
return DQPSK;
case 1:
@@ -443,7 +408,7 @@ static int mb86a20s_get_fec(struct mb86a20s_state *state,
rc = mb86a20s_readreg(state, 0x6e);
if (rc < 0)
return rc;
- switch (rc) {
+ switch ((rc >> 4) & 0x07) {
case 0:
return FEC_1_2;
case 1:
@@ -478,24 +443,38 @@ static int mb86a20s_get_interleaving(struct mb86a20s_state *state,
rc = mb86a20s_readreg(state, 0x6e);
if (rc < 0)
return rc;
- if (rc > 3)
- return -EINVAL; /* Not used */
- return rc;
+
+ switch ((rc >> 4) & 0x07) {
+ case 1:
+ return GUARD_INTERVAL_1_4;
+ case 2:
+ return GUARD_INTERVAL_1_8;
+ case 3:
+ return GUARD_INTERVAL_1_16;
+ case 4:
+ return GUARD_INTERVAL_1_32;
+
+ default:
+ case 0:
+ return GUARD_INTERVAL_AUTO;
+ }
}
static int mb86a20s_get_segment_count(struct mb86a20s_state *state,
unsigned layer)
{
int rc, count;
-
static unsigned char reg[] = {
[0] = 0x89, /* Layer A */
[1] = 0x8d, /* Layer B */
[2] = 0x91, /* Layer C */
};
+ dev_dbg(&state->i2c->dev, "%s called.\n", __func__);
+
if (layer >= ARRAY_SIZE(reg))
return -EINVAL;
+
rc = mb86a20s_writereg(state, 0x6d, reg[layer]);
if (rc < 0)
return rc;
@@ -504,113 +483,1451 @@ static int mb86a20s_get_segment_count(struct mb86a20s_state *state,
return rc;
count = (rc >> 4) & 0x0f;
+ dev_dbg(&state->i2c->dev, "%s: segments: %d.\n", __func__, count);
+
return count;
}
+static void mb86a20s_reset_frontend_cache(struct dvb_frontend *fe)
+{
+ struct mb86a20s_state *state = fe->demodulator_priv;
+ struct dtv_frontend_properties *c = &fe->dtv_property_cache;
+
+ dev_dbg(&state->i2c->dev, "%s called.\n", __func__);
+
+ /* Fixed parameters */
+ c->delivery_system = SYS_ISDBT;
+ c->bandwidth_hz = 6000000;
+
+ /* Initialize values that will be later autodetected */
+ c->isdbt_layer_enabled = 0;
+ c->transmission_mode = TRANSMISSION_MODE_AUTO;
+ c->guard_interval = GUARD_INTERVAL_AUTO;
+ c->isdbt_sb_mode = 0;
+ c->isdbt_sb_segment_count = 0;
+}
+
+/*
+ * Estimates the bit rate using the per-segment bit rate given by
+ * ABNT/NBR 15601 spec (table 4).
+ */
+static u32 isdbt_rate[3][5][4] = {
+ { /* DQPSK/QPSK */
+ { 280850, 312060, 330420, 340430 }, /* 1/2 */
+ { 374470, 416080, 440560, 453910 }, /* 2/3 */
+ { 421280, 468090, 495630, 510650 }, /* 3/4 */
+ { 468090, 520100, 550700, 567390 }, /* 5/6 */
+ { 491500, 546110, 578230, 595760 }, /* 7/8 */
+ }, { /* QAM16 */
+ { 561710, 624130, 660840, 680870 }, /* 1/2 */
+ { 748950, 832170, 881120, 907820 }, /* 2/3 */
+ { 842570, 936190, 991260, 1021300 }, /* 3/4 */
+ { 936190, 1040210, 1101400, 1134780 }, /* 5/6 */
+ { 983000, 1092220, 1156470, 1191520 }, /* 7/8 */
+ }, { /* QAM64 */
+ { 842570, 936190, 991260, 1021300 }, /* 1/2 */
+ { 1123430, 1248260, 1321680, 1361740 }, /* 2/3 */
+ { 1263860, 1404290, 1486900, 1531950 }, /* 3/4 */
+ { 1404290, 1560320, 1652110, 1702170 }, /* 5/6 */
+ { 1474500, 1638340, 1734710, 1787280 }, /* 7/8 */
+ }
+};
+
+static void mb86a20s_layer_bitrate(struct dvb_frontend *fe, u32 layer,
+ u32 modulation, u32 fec, u32 interleaving,
+ u32 segment)
+{
+ struct mb86a20s_state *state = fe->demodulator_priv;
+ u32 rate;
+ int m, f, i;
+
+ /*
+ * If modulation/fec/interleaving is not detected, the default is
+ * to consider the lowest bit rate, to avoid taking too long time
+ * to get BER.
+ */
+ switch (modulation) {
+ case DQPSK:
+ case QPSK:
+ default:
+ m = 0;
+ break;
+ case QAM_16:
+ m = 1;
+ break;
+ case QAM_64:
+ m = 2;
+ break;
+ }
+
+ switch (fec) {
+ default:
+ case FEC_1_2:
+ case FEC_AUTO:
+ f = 0;
+ break;
+ case FEC_2_3:
+ f = 1;
+ break;
+ case FEC_3_4:
+ f = 2;
+ break;
+ case FEC_5_6:
+ f = 3;
+ break;
+ case FEC_7_8:
+ f = 4;
+ break;
+ }
+
+ switch (interleaving) {
+ default:
+ case GUARD_INTERVAL_1_4:
+ i = 0;
+ break;
+ case GUARD_INTERVAL_1_8:
+ i = 1;
+ break;
+ case GUARD_INTERVAL_1_16:
+ i = 2;
+ break;
+ case GUARD_INTERVAL_1_32:
+ i = 3;
+ break;
+ }
+
+ /* Samples BER at BER_SAMPLING_RATE seconds */
+ rate = isdbt_rate[m][f][i] * segment * BER_SAMPLING_RATE;
+
+ /* Avoids sampling too quickly or to overflow the register */
+ if (rate < 256)
+ rate = 256;
+ else if (rate > (1 << 24) - 1)
+ rate = (1 << 24) - 1;
+
+ dev_dbg(&state->i2c->dev,
+ "%s: layer %c bitrate: %d kbps; counter = %d (0x%06x)\n",
+ __func__, 'A' + layer, segment * isdbt_rate[m][f][i]/1000,
+ rate, rate);
+
+ state->estimated_rate[i] = rate;
+}
+
+
static int mb86a20s_get_frontend(struct dvb_frontend *fe)
{
struct mb86a20s_state *state = fe->demodulator_priv;
- struct dtv_frontend_properties *p = &fe->dtv_property_cache;
+ struct dtv_frontend_properties *c = &fe->dtv_property_cache;
int i, rc;
- /* Fixed parameters */
- p->delivery_system = SYS_ISDBT;
- p->bandwidth_hz = 6000000;
+ dev_dbg(&state->i2c->dev, "%s called.\n", __func__);
- if (fe->ops.i2c_gate_ctrl)
- fe->ops.i2c_gate_ctrl(fe, 0);
+ /* Reset frontend cache to default values */
+ mb86a20s_reset_frontend_cache(fe);
/* Check for partial reception */
rc = mb86a20s_writereg(state, 0x6d, 0x85);
- if (rc >= 0)
- rc = mb86a20s_readreg(state, 0x6e);
- if (rc >= 0)
- p->isdbt_partial_reception = (rc & 0x10) ? 1 : 0;
+ if (rc < 0)
+ return rc;
+ rc = mb86a20s_readreg(state, 0x6e);
+ if (rc < 0)
+ return rc;
+ c->isdbt_partial_reception = (rc & 0x10) ? 1 : 0;
/* Get per-layer data */
- p->isdbt_layer_enabled = 0;
+
for (i = 0; i < 3; i++) {
+ dev_dbg(&state->i2c->dev, "%s: getting data for layer %c.\n",
+ __func__, 'A' + i);
+
rc = mb86a20s_get_segment_count(state, i);
- if (rc >= 0 && rc < 14)
- p->layer[i].segment_count = rc;
- if (rc == 0x0f)
+ if (rc < 0)
+ goto noperlayer_error;
+ if (rc >= 0 && rc < 14) {
+ c->layer[i].segment_count = rc;
+ } else {
+ c->layer[i].segment_count = 0;
+ state->estimated_rate[i] = 0;
continue;
- p->isdbt_layer_enabled |= 1 << i;
+ }
+ c->isdbt_layer_enabled |= 1 << i;
rc = mb86a20s_get_modulation(state, i);
- if (rc >= 0)
- p->layer[i].modulation = rc;
+ if (rc < 0)
+ goto noperlayer_error;
+ dev_dbg(&state->i2c->dev, "%s: modulation %d.\n",
+ __func__, rc);
+ c->layer[i].modulation = rc;
rc = mb86a20s_get_fec(state, i);
- if (rc >= 0)
- p->layer[i].fec = rc;
+ if (rc < 0)
+ goto noperlayer_error;
+ dev_dbg(&state->i2c->dev, "%s: FEC %d.\n",
+ __func__, rc);
+ c->layer[i].fec = rc;
rc = mb86a20s_get_interleaving(state, i);
- if (rc >= 0)
- p->layer[i].interleaving = rc;
+ if (rc < 0)
+ goto noperlayer_error;
+ dev_dbg(&state->i2c->dev, "%s: interleaving %d.\n",
+ __func__, rc);
+ c->layer[i].interleaving = rc;
+ mb86a20s_layer_bitrate(fe, i, c->layer[i].modulation,
+ c->layer[i].fec,
+ c->layer[i].interleaving,
+ c->layer[i].segment_count);
}
- p->isdbt_sb_mode = 0;
rc = mb86a20s_writereg(state, 0x6d, 0x84);
- if ((rc >= 0) && ((rc & 0x60) == 0x20)) {
- p->isdbt_sb_mode = 1;
+ if (rc < 0)
+ return rc;
+ if ((rc & 0x60) == 0x20) {
+ c->isdbt_sb_mode = 1;
/* At least, one segment should exist */
- if (!p->isdbt_sb_segment_count)
- p->isdbt_sb_segment_count = 1;
- } else
- p->isdbt_sb_segment_count = 0;
+ if (!c->isdbt_sb_segment_count)
+ c->isdbt_sb_segment_count = 1;
+ }
/* Get transmission mode and guard interval */
- p->transmission_mode = TRANSMISSION_MODE_AUTO;
- p->guard_interval = GUARD_INTERVAL_AUTO;
rc = mb86a20s_readreg(state, 0x07);
- if (rc >= 0) {
- if ((rc & 0x60) == 0x20) {
- switch (rc & 0x0c >> 2) {
- case 0:
- p->transmission_mode = TRANSMISSION_MODE_2K;
- break;
- case 1:
- p->transmission_mode = TRANSMISSION_MODE_4K;
- break;
- case 2:
- p->transmission_mode = TRANSMISSION_MODE_8K;
- break;
- }
+ if (rc < 0)
+ return rc;
+ if ((rc & 0x60) == 0x20) {
+ switch (rc & 0x0c >> 2) {
+ case 0:
+ c->transmission_mode = TRANSMISSION_MODE_2K;
+ break;
+ case 1:
+ c->transmission_mode = TRANSMISSION_MODE_4K;
+ break;
+ case 2:
+ c->transmission_mode = TRANSMISSION_MODE_8K;
+ break;
+ }
+ }
+ if (!(rc & 0x10)) {
+ switch (rc & 0x3) {
+ case 0:
+ c->guard_interval = GUARD_INTERVAL_1_4;
+ break;
+ case 1:
+ c->guard_interval = GUARD_INTERVAL_1_8;
+ break;
+ case 2:
+ c->guard_interval = GUARD_INTERVAL_1_16;
+ break;
+ }
+ }
+ return 0;
+
+noperlayer_error:
+
+ /* per-layer info is incomplete; discard all per-layer */
+ c->isdbt_layer_enabled = 0;
+
+ return rc;
+}
+
+static int mb86a20s_reset_counters(struct dvb_frontend *fe)
+{
+ struct mb86a20s_state *state = fe->demodulator_priv;
+ struct dtv_frontend_properties *c = &fe->dtv_property_cache;
+ int rc, val;
+
+ dev_dbg(&state->i2c->dev, "%s called.\n", __func__);
+
+ /* Reset the counters, if the channel changed */
+ if (state->last_frequency != c->frequency) {
+ memset(&c->strength, 0, sizeof(c->strength));
+ memset(&c->cnr, 0, sizeof(c->cnr));
+ memset(&c->pre_bit_error, 0, sizeof(c->pre_bit_error));
+ memset(&c->pre_bit_count, 0, sizeof(c->pre_bit_count));
+ memset(&c->post_bit_error, 0, sizeof(c->post_bit_error));
+ memset(&c->post_bit_count, 0, sizeof(c->post_bit_count));
+ memset(&c->block_error, 0, sizeof(c->block_error));
+ memset(&c->block_count, 0, sizeof(c->block_count));
+
+ state->last_frequency = c->frequency;
+ }
+
+ /* Clear status for most stats */
+
+ /* BER/PER counter reset */
+ rc = mb86a20s_writeregdata(state, mb86a20s_per_ber_reset);
+ if (rc < 0)
+ goto err;
+
+ /* CNR counter reset */
+ rc = mb86a20s_readreg(state, 0x45);
+ if (rc < 0)
+ goto err;
+ val = rc;
+ rc = mb86a20s_writereg(state, 0x45, val | 0x10);
+ if (rc < 0)
+ goto err;
+ rc = mb86a20s_writereg(state, 0x45, val & 0x6f);
+ if (rc < 0)
+ goto err;
+
+ /* MER counter reset */
+ rc = mb86a20s_writereg(state, 0x50, 0x50);
+ if (rc < 0)
+ goto err;
+ rc = mb86a20s_readreg(state, 0x51);
+ if (rc < 0)
+ goto err;
+ val = rc;
+ rc = mb86a20s_writereg(state, 0x51, val | 0x01);
+ if (rc < 0)
+ goto err;
+ rc = mb86a20s_writereg(state, 0x51, val & 0x06);
+ if (rc < 0)
+ goto err;
+
+ goto ok;
+err:
+ dev_err(&state->i2c->dev,
+ "%s: Can't reset FE statistics (error %d).\n",
+ __func__, rc);
+ok:
+ return rc;
+}
+
+static int mb86a20s_get_pre_ber(struct dvb_frontend *fe,
+ unsigned layer,
+ u32 *error, u32 *count)
+{
+ struct mb86a20s_state *state = fe->demodulator_priv;
+ int rc, val;
+
+ dev_dbg(&state->i2c->dev, "%s called.\n", __func__);
+
+ if (layer >= 3)
+ return -EINVAL;
+
+ /* Check if the BER measures are already available */
+ rc = mb86a20s_readreg(state, 0x54);
+ if (rc < 0)
+ return rc;
+
+ /* Check if data is available for that layer */
+ if (!(rc & (1 << layer))) {
+ dev_dbg(&state->i2c->dev,
+ "%s: preBER for layer %c is not available yet.\n",
+ __func__, 'A' + layer);
+ return -EBUSY;
+ }
+
+ /* Read Bit Error Count */
+ rc = mb86a20s_readreg(state, 0x55 + layer * 3);
+ if (rc < 0)
+ return rc;
+ *error = rc << 16;
+ rc = mb86a20s_readreg(state, 0x56 + layer * 3);
+ if (rc < 0)
+ return rc;
+ *error |= rc << 8;
+ rc = mb86a20s_readreg(state, 0x57 + layer * 3);
+ if (rc < 0)
+ return rc;
+ *error |= rc;
+
+ dev_dbg(&state->i2c->dev,
+ "%s: bit error before Viterbi for layer %c: %d.\n",
+ __func__, 'A' + layer, *error);
+
+ /* Read Bit Count */
+ rc = mb86a20s_writereg(state, 0x50, 0xa7 + layer * 3);
+ if (rc < 0)
+ return rc;
+ rc = mb86a20s_readreg(state, 0x51);
+ if (rc < 0)
+ return rc;
+ *count = rc << 16;
+ rc = mb86a20s_writereg(state, 0x50, 0xa8 + layer * 3);
+ if (rc < 0)
+ return rc;
+ rc = mb86a20s_readreg(state, 0x51);
+ if (rc < 0)
+ return rc;
+ *count |= rc << 8;
+ rc = mb86a20s_writereg(state, 0x50, 0xa9 + layer * 3);
+ if (rc < 0)
+ return rc;
+ rc = mb86a20s_readreg(state, 0x51);
+ if (rc < 0)
+ return rc;
+ *count |= rc;
+
+ dev_dbg(&state->i2c->dev,
+ "%s: bit count before Viterbi for layer %c: %d.\n",
+ __func__, 'A' + layer, *count);
+
+
+ /*
+ * As we get TMCC data from the frontend, we can better estimate the
+ * BER bit counters, in order to do the BER measure during a longer
+ * time. Use those data, if available, to update the bit count
+ * measure.
+ */
+
+ if (state->estimated_rate[layer]
+ && state->estimated_rate[layer] != *count) {
+ dev_dbg(&state->i2c->dev,
+ "%s: updating layer %c preBER counter to %d.\n",
+ __func__, 'A' + layer, state->estimated_rate[layer]);
+
+ /* Turn off BER before Viterbi */
+ rc = mb86a20s_writereg(state, 0x52, 0x00);
+
+ /* Update counter for this layer */
+ rc = mb86a20s_writereg(state, 0x50, 0xa7 + layer * 3);
+ if (rc < 0)
+ return rc;
+ rc = mb86a20s_writereg(state, 0x51,
+ state->estimated_rate[layer] >> 16);
+ if (rc < 0)
+ return rc;
+ rc = mb86a20s_writereg(state, 0x50, 0xa8 + layer * 3);
+ if (rc < 0)
+ return rc;
+ rc = mb86a20s_writereg(state, 0x51,
+ state->estimated_rate[layer] >> 8);
+ if (rc < 0)
+ return rc;
+ rc = mb86a20s_writereg(state, 0x50, 0xa9 + layer * 3);
+ if (rc < 0)
+ return rc;
+ rc = mb86a20s_writereg(state, 0x51,
+ state->estimated_rate[layer]);
+ if (rc < 0)
+ return rc;
+
+ /* Turn on BER before Viterbi */
+ rc = mb86a20s_writereg(state, 0x52, 0x01);
+
+ /* Reset all preBER counters */
+ rc = mb86a20s_writereg(state, 0x53, 0x00);
+ if (rc < 0)
+ return rc;
+ rc = mb86a20s_writereg(state, 0x53, 0x07);
+ } else {
+ /* Reset counter to collect new data */
+ rc = mb86a20s_readreg(state, 0x53);
+ if (rc < 0)
+ return rc;
+ val = rc;
+ rc = mb86a20s_writereg(state, 0x53, val & ~(1 << layer));
+ if (rc < 0)
+ return rc;
+ rc = mb86a20s_writereg(state, 0x53, val | (1 << layer));
+ }
+
+ return rc;
+}
+
+static int mb86a20s_get_post_ber(struct dvb_frontend *fe,
+ unsigned layer,
+ u32 *error, u32 *count)
+{
+ struct mb86a20s_state *state = fe->demodulator_priv;
+ u32 counter, collect_rate;
+ int rc, val;
+
+ dev_dbg(&state->i2c->dev, "%s called.\n", __func__);
+
+ if (layer >= 3)
+ return -EINVAL;
+
+ /* Check if the BER measures are already available */
+ rc = mb86a20s_readreg(state, 0x60);
+ if (rc < 0)
+ return rc;
+
+ /* Check if data is available for that layer */
+ if (!(rc & (1 << layer))) {
+ dev_dbg(&state->i2c->dev,
+ "%s: post BER for layer %c is not available yet.\n",
+ __func__, 'A' + layer);
+ return -EBUSY;
+ }
+
+ /* Read Bit Error Count */
+ rc = mb86a20s_readreg(state, 0x64 + layer * 3);
+ if (rc < 0)
+ return rc;
+ *error = rc << 16;
+ rc = mb86a20s_readreg(state, 0x65 + layer * 3);
+ if (rc < 0)
+ return rc;
+ *error |= rc << 8;
+ rc = mb86a20s_readreg(state, 0x66 + layer * 3);
+ if (rc < 0)
+ return rc;
+ *error |= rc;
+
+ dev_dbg(&state->i2c->dev,
+ "%s: post bit error for layer %c: %d.\n",
+ __func__, 'A' + layer, *error);
+
+ /* Read Bit Count */
+ rc = mb86a20s_writereg(state, 0x50, 0xdc + layer * 2);
+ if (rc < 0)
+ return rc;
+ rc = mb86a20s_readreg(state, 0x51);
+ if (rc < 0)
+ return rc;
+ counter = rc << 8;
+ rc = mb86a20s_writereg(state, 0x50, 0xdd + layer * 2);
+ if (rc < 0)
+ return rc;
+ rc = mb86a20s_readreg(state, 0x51);
+ if (rc < 0)
+ return rc;
+ counter |= rc;
+ *count = counter * 204 * 8;
+
+ dev_dbg(&state->i2c->dev,
+ "%s: post bit count for layer %c: %d.\n",
+ __func__, 'A' + layer, *count);
+
+ /*
+ * As we get TMCC data from the frontend, we can better estimate the
+ * BER bit counters, in order to do the BER measure during a longer
+ * time. Use those data, if available, to update the bit count
+ * measure.
+ */
+
+ if (!state->estimated_rate[layer])
+ goto reset_measurement;
+
+ collect_rate = state->estimated_rate[layer] / 204 / 8;
+ if (collect_rate < 32)
+ collect_rate = 32;
+ if (collect_rate > 65535)
+ collect_rate = 65535;
+ if (collect_rate != counter) {
+ dev_dbg(&state->i2c->dev,
+ "%s: updating postBER counter on layer %c to %d.\n",
+ __func__, 'A' + layer, collect_rate);
+
+ /* Turn off BER after Viterbi */
+ rc = mb86a20s_writereg(state, 0x5e, 0x00);
+
+ /* Update counter for this layer */
+ rc = mb86a20s_writereg(state, 0x50, 0xdc + layer * 2);
+ if (rc < 0)
+ return rc;
+ rc = mb86a20s_writereg(state, 0x51, collect_rate >> 8);
+ if (rc < 0)
+ return rc;
+ rc = mb86a20s_writereg(state, 0x50, 0xdd + layer * 2);
+ if (rc < 0)
+ return rc;
+ rc = mb86a20s_writereg(state, 0x51, collect_rate & 0xff);
+ if (rc < 0)
+ return rc;
+
+ /* Turn on BER after Viterbi */
+ rc = mb86a20s_writereg(state, 0x5e, 0x07);
+
+ /* Reset all preBER counters */
+ rc = mb86a20s_writereg(state, 0x5f, 0x00);
+ if (rc < 0)
+ return rc;
+ rc = mb86a20s_writereg(state, 0x5f, 0x07);
+
+ return rc;
+ }
+
+reset_measurement:
+ /* Reset counter to collect new data */
+ rc = mb86a20s_readreg(state, 0x5f);
+ if (rc < 0)
+ return rc;
+ val = rc;
+ rc = mb86a20s_writereg(state, 0x5f, val & ~(1 << layer));
+ if (rc < 0)
+ return rc;
+ rc = mb86a20s_writereg(state, 0x5f, val | (1 << layer));
+
+ return rc;
+}
+
+static int mb86a20s_get_blk_error(struct dvb_frontend *fe,
+ unsigned layer,
+ u32 *error, u32 *count)
+{
+ struct mb86a20s_state *state = fe->demodulator_priv;
+ int rc, val;
+ u32 collect_rate;
+ dev_dbg(&state->i2c->dev, "%s called.\n", __func__);
+
+ if (layer >= 3)
+ return -EINVAL;
+
+ /* Check if the PER measures are already available */
+ rc = mb86a20s_writereg(state, 0x50, 0xb8);
+ if (rc < 0)
+ return rc;
+ rc = mb86a20s_readreg(state, 0x51);
+ if (rc < 0)
+ return rc;
+
+ /* Check if data is available for that layer */
+
+ if (!(rc & (1 << layer))) {
+ dev_dbg(&state->i2c->dev,
+ "%s: block counts for layer %c aren't available yet.\n",
+ __func__, 'A' + layer);
+ return -EBUSY;
+ }
+
+ /* Read Packet error Count */
+ rc = mb86a20s_writereg(state, 0x50, 0xb9 + layer * 2);
+ if (rc < 0)
+ return rc;
+ rc = mb86a20s_readreg(state, 0x51);
+ if (rc < 0)
+ return rc;
+ *error = rc << 8;
+ rc = mb86a20s_writereg(state, 0x50, 0xba + layer * 2);
+ if (rc < 0)
+ return rc;
+ rc = mb86a20s_readreg(state, 0x51);
+ if (rc < 0)
+ return rc;
+ *error |= rc;
+ dev_err(&state->i2c->dev, "%s: block error for layer %c: %d.\n",
+ __func__, 'A' + layer, *error);
+
+ /* Read Bit Count */
+ rc = mb86a20s_writereg(state, 0x50, 0xb2 + layer * 2);
+ if (rc < 0)
+ return rc;
+ rc = mb86a20s_readreg(state, 0x51);
+ if (rc < 0)
+ return rc;
+ *count = rc << 8;
+ rc = mb86a20s_writereg(state, 0x50, 0xb3 + layer * 2);
+ if (rc < 0)
+ return rc;
+ rc = mb86a20s_readreg(state, 0x51);
+ if (rc < 0)
+ return rc;
+ *count |= rc;
+
+ dev_dbg(&state->i2c->dev,
+ "%s: block count for layer %c: %d.\n",
+ __func__, 'A' + layer, *count);
+
+ /*
+ * As we get TMCC data from the frontend, we can better estimate the
+ * BER bit counters, in order to do the BER measure during a longer
+ * time. Use those data, if available, to update the bit count
+ * measure.
+ */
+
+ if (!state->estimated_rate[layer])
+ goto reset_measurement;
+
+ collect_rate = state->estimated_rate[layer] / 204 / 8;
+ if (collect_rate < 32)
+ collect_rate = 32;
+ if (collect_rate > 65535)
+ collect_rate = 65535;
+
+ if (collect_rate != *count) {
+ dev_dbg(&state->i2c->dev,
+ "%s: updating PER counter on layer %c to %d.\n",
+ __func__, 'A' + layer, collect_rate);
+
+ /* Stop PER measurement */
+ rc = mb86a20s_writereg(state, 0x50, 0xb0);
+ if (rc < 0)
+ return rc;
+ rc = mb86a20s_writereg(state, 0x51, 0x00);
+ if (rc < 0)
+ return rc;
+
+ /* Update this layer's counter */
+ rc = mb86a20s_writereg(state, 0x50, 0xb2 + layer * 2);
+ if (rc < 0)
+ return rc;
+ rc = mb86a20s_writereg(state, 0x51, collect_rate >> 8);
+ if (rc < 0)
+ return rc;
+ rc = mb86a20s_writereg(state, 0x50, 0xb3 + layer * 2);
+ if (rc < 0)
+ return rc;
+ rc = mb86a20s_writereg(state, 0x51, collect_rate & 0xff);
+ if (rc < 0)
+ return rc;
+
+ /* start PER measurement */
+ rc = mb86a20s_writereg(state, 0x50, 0xb0);
+ if (rc < 0)
+ return rc;
+ rc = mb86a20s_writereg(state, 0x51, 0x07);
+ if (rc < 0)
+ return rc;
+
+ /* Reset all counters to collect new data */
+ rc = mb86a20s_writereg(state, 0x50, 0xb1);
+ if (rc < 0)
+ return rc;
+ rc = mb86a20s_writereg(state, 0x51, 0x07);
+ if (rc < 0)
+ return rc;
+ rc = mb86a20s_writereg(state, 0x51, 0x00);
+
+ return rc;
+ }
+
+reset_measurement:
+ /* Reset counter to collect new data */
+ rc = mb86a20s_writereg(state, 0x50, 0xb1);
+ if (rc < 0)
+ return rc;
+ rc = mb86a20s_readreg(state, 0x51);
+ if (rc < 0)
+ return rc;
+ val = rc;
+ rc = mb86a20s_writereg(state, 0x51, val | (1 << layer));
+ if (rc < 0)
+ return rc;
+ rc = mb86a20s_writereg(state, 0x51, val & ~(1 << layer));
+
+ return rc;
+}
+
+struct linear_segments {
+ unsigned x, y;
+};
+
+/*
+ * All tables below return a dB/1000 measurement
+ */
+
+static struct linear_segments cnr_to_db_table[] = {
+ { 19648, 0},
+ { 18187, 1000},
+ { 16534, 2000},
+ { 14823, 3000},
+ { 13161, 4000},
+ { 11622, 5000},
+ { 10279, 6000},
+ { 9089, 7000},
+ { 8042, 8000},
+ { 7137, 9000},
+ { 6342, 10000},
+ { 5641, 11000},
+ { 5030, 12000},
+ { 4474, 13000},
+ { 3988, 14000},
+ { 3556, 15000},
+ { 3180, 16000},
+ { 2841, 17000},
+ { 2541, 18000},
+ { 2276, 19000},
+ { 2038, 20000},
+ { 1800, 21000},
+ { 1625, 22000},
+ { 1462, 23000},
+ { 1324, 24000},
+ { 1175, 25000},
+ { 1063, 26000},
+ { 980, 27000},
+ { 907, 28000},
+ { 840, 29000},
+ { 788, 30000},
+};
+
+static struct linear_segments cnr_64qam_table[] = {
+ { 3922688, 0},
+ { 3920384, 1000},
+ { 3902720, 2000},
+ { 3894784, 3000},
+ { 3882496, 4000},
+ { 3872768, 5000},
+ { 3858944, 6000},
+ { 3851520, 7000},
+ { 3838976, 8000},
+ { 3829248, 9000},
+ { 3818240, 10000},
+ { 3806976, 11000},
+ { 3791872, 12000},
+ { 3767040, 13000},
+ { 3720960, 14000},
+ { 3637504, 15000},
+ { 3498496, 16000},
+ { 3296000, 17000},
+ { 3031040, 18000},
+ { 2715392, 19000},
+ { 2362624, 20000},
+ { 1963264, 21000},
+ { 1649664, 22000},
+ { 1366784, 23000},
+ { 1120768, 24000},
+ { 890880, 25000},
+ { 723456, 26000},
+ { 612096, 27000},
+ { 518912, 28000},
+ { 448256, 29000},
+ { 388864, 30000},
+};
+
+static struct linear_segments cnr_16qam_table[] = {
+ { 5314816, 0},
+ { 5219072, 1000},
+ { 5118720, 2000},
+ { 4998912, 3000},
+ { 4875520, 4000},
+ { 4736000, 5000},
+ { 4604160, 6000},
+ { 4458752, 7000},
+ { 4300288, 8000},
+ { 4092928, 9000},
+ { 3836160, 10000},
+ { 3521024, 11000},
+ { 3155968, 12000},
+ { 2756864, 13000},
+ { 2347008, 14000},
+ { 1955072, 15000},
+ { 1593600, 16000},
+ { 1297920, 17000},
+ { 1043968, 18000},
+ { 839680, 19000},
+ { 672256, 20000},
+ { 523008, 21000},
+ { 424704, 22000},
+ { 345088, 23000},
+ { 280064, 24000},
+ { 221440, 25000},
+ { 179712, 26000},
+ { 151040, 27000},
+ { 128512, 28000},
+ { 110080, 29000},
+ { 95744, 30000},
+};
+
+struct linear_segments cnr_qpsk_table[] = {
+ { 2834176, 0},
+ { 2683648, 1000},
+ { 2536960, 2000},
+ { 2391808, 3000},
+ { 2133248, 4000},
+ { 1906176, 5000},
+ { 1666560, 6000},
+ { 1422080, 7000},
+ { 1189632, 8000},
+ { 976384, 9000},
+ { 790272, 10000},
+ { 633344, 11000},
+ { 505600, 12000},
+ { 402944, 13000},
+ { 320768, 14000},
+ { 255488, 15000},
+ { 204032, 16000},
+ { 163072, 17000},
+ { 130304, 18000},
+ { 105216, 19000},
+ { 83456, 20000},
+ { 65024, 21000},
+ { 52480, 22000},
+ { 42752, 23000},
+ { 34560, 24000},
+ { 27136, 25000},
+ { 22016, 26000},
+ { 18432, 27000},
+ { 15616, 28000},
+ { 13312, 29000},
+ { 11520, 30000},
+};
+
+static u32 interpolate_value(u32 value, struct linear_segments *segments,
+ unsigned len)
+{
+ u64 tmp64;
+ u32 dx, dy;
+ int i, ret;
+
+ if (value >= segments[0].x)
+ return segments[0].y;
+ if (value < segments[len-1].x)
+ return segments[len-1].y;
+
+ for (i = 1; i < len - 1; i++) {
+ /* If value is identical, no need to interpolate */
+ if (value == segments[i].x)
+ return segments[i].y;
+ if (value > segments[i].x)
+ break;
+ }
+
+ /* Linear interpolation between the two (x,y) points */
+ dy = segments[i].y - segments[i - 1].y;
+ dx = segments[i - 1].x - segments[i].x;
+ tmp64 = value - segments[i].x;
+ tmp64 *= dy;
+ do_div(tmp64, dx);
+ ret = segments[i].y - tmp64;
+
+ return ret;
+}
+
+static int mb86a20s_get_main_CNR(struct dvb_frontend *fe)
+{
+ struct mb86a20s_state *state = fe->demodulator_priv;
+ struct dtv_frontend_properties *c = &fe->dtv_property_cache;
+ u32 cnr_linear, cnr;
+ int rc, val;
+
+ /* Check if CNR is available */
+ rc = mb86a20s_readreg(state, 0x45);
+ if (rc < 0)
+ return rc;
+
+ if (!(rc & 0x40)) {
+ dev_info(&state->i2c->dev, "%s: CNR is not available yet.\n",
+ __func__);
+ return -EBUSY;
+ }
+ val = rc;
+
+ rc = mb86a20s_readreg(state, 0x46);
+ if (rc < 0)
+ return rc;
+ cnr_linear = rc << 8;
+
+ rc = mb86a20s_readreg(state, 0x46);
+ if (rc < 0)
+ return rc;
+ cnr_linear |= rc;
+
+ cnr = interpolate_value(cnr_linear,
+ cnr_to_db_table, ARRAY_SIZE(cnr_to_db_table));
+
+ c->cnr.stat[0].scale = FE_SCALE_DECIBEL;
+ c->cnr.stat[0].svalue = cnr;
+
+ dev_dbg(&state->i2c->dev, "%s: CNR is %d.%03d dB (%d)\n",
+ __func__, cnr / 1000, cnr % 1000, cnr_linear);
+
+ /* CNR counter reset */
+ rc = mb86a20s_writereg(state, 0x45, val | 0x10);
+ if (rc < 0)
+ return rc;
+ rc = mb86a20s_writereg(state, 0x45, val & 0x6f);
+
+ return rc;
+}
+
+static int mb86a20s_get_blk_error_layer_CNR(struct dvb_frontend *fe)
+{
+ struct mb86a20s_state *state = fe->demodulator_priv;
+ struct dtv_frontend_properties *c = &fe->dtv_property_cache;
+ u32 mer, cnr;
+ int rc, val, i;
+ struct linear_segments *segs;
+ unsigned segs_len;
+
+ dev_dbg(&state->i2c->dev, "%s called.\n", __func__);
+
+ /* Check if the measures are already available */
+ rc = mb86a20s_writereg(state, 0x50, 0x5b);
+ if (rc < 0)
+ return rc;
+ rc = mb86a20s_readreg(state, 0x51);
+ if (rc < 0)
+ return rc;
+
+ /* Check if data is available */
+ if (!(rc & 0x01)) {
+ dev_info(&state->i2c->dev,
+ "%s: MER measures aren't available yet.\n", __func__);
+ return -EBUSY;
+ }
+
+ /* Read all layers */
+ for (i = 0; i < 3; i++) {
+ if (!(c->isdbt_layer_enabled & (1 << i))) {
+ c->cnr.stat[1 + i].scale = FE_SCALE_NOT_AVAILABLE;
+ continue;
+ }
+
+ rc = mb86a20s_writereg(state, 0x50, 0x52 + i * 3);
+ if (rc < 0)
+ return rc;
+ rc = mb86a20s_readreg(state, 0x51);
+ if (rc < 0)
+ return rc;
+ mer = rc << 16;
+ rc = mb86a20s_writereg(state, 0x50, 0x53 + i * 3);
+ if (rc < 0)
+ return rc;
+ rc = mb86a20s_readreg(state, 0x51);
+ if (rc < 0)
+ return rc;
+ mer |= rc << 8;
+ rc = mb86a20s_writereg(state, 0x50, 0x54 + i * 3);
+ if (rc < 0)
+ return rc;
+ rc = mb86a20s_readreg(state, 0x51);
+ if (rc < 0)
+ return rc;
+ mer |= rc;
+
+ switch (c->layer[i].modulation) {
+ case DQPSK:
+ case QPSK:
+ segs = cnr_qpsk_table;
+ segs_len = ARRAY_SIZE(cnr_qpsk_table);
+ break;
+ case QAM_16:
+ segs = cnr_16qam_table;
+ segs_len = ARRAY_SIZE(cnr_16qam_table);
+ break;
+ default:
+ case QAM_64:
+ segs = cnr_64qam_table;
+ segs_len = ARRAY_SIZE(cnr_64qam_table);
+ break;
}
- if (!(rc & 0x10)) {
- switch (rc & 0x3) {
- case 0:
- p->guard_interval = GUARD_INTERVAL_1_4;
- break;
- case 1:
- p->guard_interval = GUARD_INTERVAL_1_8;
- break;
- case 2:
- p->guard_interval = GUARD_INTERVAL_1_16;
- break;
+ cnr = interpolate_value(mer, segs, segs_len);
+
+ c->cnr.stat[1 + i].scale = FE_SCALE_DECIBEL;
+ c->cnr.stat[1 + i].svalue = cnr;
+
+ dev_dbg(&state->i2c->dev,
+ "%s: CNR for layer %c is %d.%03d dB (MER = %d).\n",
+ __func__, 'A' + i, cnr / 1000, cnr % 1000, mer);
+
+ }
+
+ /* Start a new MER measurement */
+ /* MER counter reset */
+ rc = mb86a20s_writereg(state, 0x50, 0x50);
+ if (rc < 0)
+ return rc;
+ rc = mb86a20s_readreg(state, 0x51);
+ if (rc < 0)
+ return rc;
+ val = rc;
+
+ rc = mb86a20s_writereg(state, 0x51, val | 0x01);
+ if (rc < 0)
+ return rc;
+ rc = mb86a20s_writereg(state, 0x51, val & 0x06);
+ if (rc < 0)
+ return rc;
+
+ return 0;
+}
+
+static void mb86a20s_stats_not_ready(struct dvb_frontend *fe)
+{
+ struct mb86a20s_state *state = fe->demodulator_priv;
+ struct dtv_frontend_properties *c = &fe->dtv_property_cache;
+ int i;
+
+ dev_dbg(&state->i2c->dev, "%s called.\n", __func__);
+
+ /* Fill the length of each status counter */
+
+ /* Only global stats */
+ c->strength.len = 1;
+
+ /* Per-layer stats - 3 layers + global */
+ c->cnr.len = 4;
+ c->pre_bit_error.len = 4;
+ c->pre_bit_count.len = 4;
+ c->post_bit_error.len = 4;
+ c->post_bit_count.len = 4;
+ c->block_error.len = 4;
+ c->block_count.len = 4;
+
+ /* Signal is always available */
+ c->strength.stat[0].scale = FE_SCALE_RELATIVE;
+ c->strength.stat[0].uvalue = 0;
+
+ /* Put all of them at FE_SCALE_NOT_AVAILABLE */
+ for (i = 0; i < 4; i++) {
+ c->cnr.stat[i].scale = FE_SCALE_NOT_AVAILABLE;
+ c->pre_bit_error.stat[i].scale = FE_SCALE_NOT_AVAILABLE;
+ c->pre_bit_count.stat[i].scale = FE_SCALE_NOT_AVAILABLE;
+ c->post_bit_error.stat[i].scale = FE_SCALE_NOT_AVAILABLE;
+ c->post_bit_count.stat[i].scale = FE_SCALE_NOT_AVAILABLE;
+ c->block_error.stat[i].scale = FE_SCALE_NOT_AVAILABLE;
+ c->block_count.stat[i].scale = FE_SCALE_NOT_AVAILABLE;
+ }
+}
+
+static int mb86a20s_get_stats(struct dvb_frontend *fe)
+{
+ struct mb86a20s_state *state = fe->demodulator_priv;
+ struct dtv_frontend_properties *c = &fe->dtv_property_cache;
+ int rc = 0, i;
+ u32 bit_error = 0, bit_count = 0;
+ u32 t_pre_bit_error = 0, t_pre_bit_count = 0;
+ u32 t_post_bit_error = 0, t_post_bit_count = 0;
+ u32 block_error = 0, block_count = 0;
+ u32 t_block_error = 0, t_block_count = 0;
+ int active_layers = 0, pre_ber_layers = 0, post_ber_layers = 0;
+ int per_layers = 0;
+
+ dev_dbg(&state->i2c->dev, "%s called.\n", __func__);
+
+ mb86a20s_get_main_CNR(fe);
+
+ /* Get per-layer stats */
+ mb86a20s_get_blk_error_layer_CNR(fe);
+
+ for (i = 0; i < 3; i++) {
+ if (c->isdbt_layer_enabled & (1 << i)) {
+ /* Layer is active and has rc segments */
+ active_layers++;
+
+ /* Handle BER before vterbi */
+ rc = mb86a20s_get_pre_ber(fe, i,
+ &bit_error, &bit_count);
+ if (rc >= 0) {
+ c->pre_bit_error.stat[1 + i].scale = FE_SCALE_COUNTER;
+ c->pre_bit_error.stat[1 + i].uvalue += bit_error;
+ c->pre_bit_count.stat[1 + i].scale = FE_SCALE_COUNTER;
+ c->pre_bit_count.stat[1 + i].uvalue += bit_count;
+ } else if (rc != -EBUSY) {
+ /*
+ * If an I/O error happened,
+ * measures are now unavailable
+ */
+ c->pre_bit_error.stat[1 + i].scale = FE_SCALE_NOT_AVAILABLE;
+ c->pre_bit_count.stat[1 + i].scale = FE_SCALE_NOT_AVAILABLE;
+ dev_err(&state->i2c->dev,
+ "%s: Can't get BER for layer %c (error %d).\n",
+ __func__, 'A' + i, rc);
}
+ if (c->block_error.stat[1 + i].scale != FE_SCALE_NOT_AVAILABLE)
+ pre_ber_layers++;
+
+ /* Handle BER post vterbi */
+ rc = mb86a20s_get_post_ber(fe, i,
+ &bit_error, &bit_count);
+ if (rc >= 0) {
+ c->post_bit_error.stat[1 + i].scale = FE_SCALE_COUNTER;
+ c->post_bit_error.stat[1 + i].uvalue += bit_error;
+ c->post_bit_count.stat[1 + i].scale = FE_SCALE_COUNTER;
+ c->post_bit_count.stat[1 + i].uvalue += bit_count;
+ } else if (rc != -EBUSY) {
+ /*
+ * If an I/O error happened,
+ * measures are now unavailable
+ */
+ c->post_bit_error.stat[1 + i].scale = FE_SCALE_NOT_AVAILABLE;
+ c->post_bit_count.stat[1 + i].scale = FE_SCALE_NOT_AVAILABLE;
+ dev_err(&state->i2c->dev,
+ "%s: Can't get BER for layer %c (error %d).\n",
+ __func__, 'A' + i, rc);
+ }
+ if (c->block_error.stat[1 + i].scale != FE_SCALE_NOT_AVAILABLE)
+ post_ber_layers++;
+
+ /* Handle Block errors for PER/UCB reports */
+ rc = mb86a20s_get_blk_error(fe, i,
+ &block_error,
+ &block_count);
+ if (rc >= 0) {
+ c->block_error.stat[1 + i].scale = FE_SCALE_COUNTER;
+ c->block_error.stat[1 + i].uvalue += block_error;
+ c->block_count.stat[1 + i].scale = FE_SCALE_COUNTER;
+ c->block_count.stat[1 + i].uvalue += block_count;
+ } else if (rc != -EBUSY) {
+ /*
+ * If an I/O error happened,
+ * measures are now unavailable
+ */
+ c->block_error.stat[1 + i].scale = FE_SCALE_NOT_AVAILABLE;
+ c->block_count.stat[1 + i].scale = FE_SCALE_NOT_AVAILABLE;
+ dev_err(&state->i2c->dev,
+ "%s: Can't get PER for layer %c (error %d).\n",
+ __func__, 'A' + i, rc);
+
+ }
+ if (c->block_error.stat[1 + i].scale != FE_SCALE_NOT_AVAILABLE)
+ per_layers++;
+
+ /* Update total preBER */
+ t_pre_bit_error += c->pre_bit_error.stat[1 + i].uvalue;
+ t_pre_bit_count += c->pre_bit_count.stat[1 + i].uvalue;
+
+ /* Update total postBER */
+ t_post_bit_error += c->post_bit_error.stat[1 + i].uvalue;
+ t_post_bit_count += c->post_bit_count.stat[1 + i].uvalue;
+
+ /* Update total PER */
+ t_block_error += c->block_error.stat[1 + i].uvalue;
+ t_block_count += c->block_count.stat[1 + i].uvalue;
}
}
+ /*
+ * Start showing global count if at least one error count is
+ * available.
+ */
+ if (pre_ber_layers) {
+ /*
+ * At least one per-layer BER measure was read. We can now
+ * calculate the total BER
+ *
+ * Total Bit Error/Count is calculated as the sum of the
+ * bit errors on all active layers.
+ */
+ c->pre_bit_error.stat[0].scale = FE_SCALE_COUNTER;
+ c->pre_bit_error.stat[0].uvalue = t_pre_bit_error;
+ c->pre_bit_count.stat[0].scale = FE_SCALE_COUNTER;
+ c->pre_bit_count.stat[0].uvalue = t_pre_bit_count;
+ } else {
+ c->pre_bit_error.stat[0].scale = FE_SCALE_NOT_AVAILABLE;
+ c->pre_bit_count.stat[0].scale = FE_SCALE_COUNTER;
+ }
+
+ /*
+ * Start showing global count if at least one error count is
+ * available.
+ */
+ if (post_ber_layers) {
+ /*
+ * At least one per-layer BER measure was read. We can now
+ * calculate the total BER
+ *
+ * Total Bit Error/Count is calculated as the sum of the
+ * bit errors on all active layers.
+ */
+ c->post_bit_error.stat[0].scale = FE_SCALE_COUNTER;
+ c->post_bit_error.stat[0].uvalue = t_post_bit_error;
+ c->post_bit_count.stat[0].scale = FE_SCALE_COUNTER;
+ c->post_bit_count.stat[0].uvalue = t_post_bit_count;
+ } else {
+ c->post_bit_error.stat[0].scale = FE_SCALE_NOT_AVAILABLE;
+ c->post_bit_count.stat[0].scale = FE_SCALE_COUNTER;
+ }
+
+ if (per_layers) {
+ /*
+ * At least one per-layer UCB measure was read. We can now
+ * calculate the total UCB
+ *
+ * Total block Error/Count is calculated as the sum of the
+ * block errors on all active layers.
+ */
+ c->block_error.stat[0].scale = FE_SCALE_COUNTER;
+ c->block_error.stat[0].uvalue = t_block_error;
+ c->block_count.stat[0].scale = FE_SCALE_COUNTER;
+ c->block_count.stat[0].uvalue = t_block_count;
+ } else {
+ c->block_error.stat[0].scale = FE_SCALE_NOT_AVAILABLE;
+ c->block_count.stat[0].scale = FE_SCALE_COUNTER;
+ }
+
+ return rc;
+}
+
+/*
+ * The functions below are called via DVB callbacks, so they need to
+ * properly use the I2C gate control
+ */
+
+static int mb86a20s_initfe(struct dvb_frontend *fe)
+{
+ struct mb86a20s_state *state = fe->demodulator_priv;
+ int rc;
+ u8 regD5 = 1;
+
+ dev_dbg(&state->i2c->dev, "%s called.\n", __func__);
+
+ if (fe->ops.i2c_gate_ctrl)
+ fe->ops.i2c_gate_ctrl(fe, 0);
+
+ /* Initialize the frontend */
+ rc = mb86a20s_writeregdata(state, mb86a20s_init);
+ if (rc < 0)
+ goto err;
+
+ if (!state->config->is_serial) {
+ regD5 &= ~1;
+
+ rc = mb86a20s_writereg(state, 0x50, 0xd5);
+ if (rc < 0)
+ goto err;
+ rc = mb86a20s_writereg(state, 0x51, regD5);
+ if (rc < 0)
+ goto err;
+ }
+
+err:
if (fe->ops.i2c_gate_ctrl)
fe->ops.i2c_gate_ctrl(fe, 1);
+ if (rc < 0) {
+ state->need_init = true;
+ dev_info(&state->i2c->dev,
+ "mb86a20s: Init failed. Will try again later\n");
+ } else {
+ state->need_init = false;
+ dev_dbg(&state->i2c->dev, "Initialization succeeded.\n");
+ }
+ return rc;
+}
+
+static int mb86a20s_set_frontend(struct dvb_frontend *fe)
+{
+ struct mb86a20s_state *state = fe->demodulator_priv;
+ int rc;
+#if 0
+ /*
+ * FIXME: Properly implement the set frontend properties
+ */
+ struct dtv_frontend_properties *c = &fe->dtv_property_cache;
+#endif
+ dev_dbg(&state->i2c->dev, "%s called.\n", __func__);
+
+ /*
+ * Gate should already be opened, but it doesn't hurt to
+ * double-check
+ */
+ if (fe->ops.i2c_gate_ctrl)
+ fe->ops.i2c_gate_ctrl(fe, 1);
+ fe->ops.tuner_ops.set_params(fe);
+
+ /*
+ * Make it more reliable: if, for some reason, the initial
+ * device initialization doesn't happen, initialize it when
+ * a SBTVD parameters are adjusted.
+ *
+ * Unfortunately, due to a hard to track bug at tda829x/tda18271,
+ * the agc callback logic is not called during DVB attach time,
+ * causing mb86a20s to not be initialized with Kworld SBTVD.
+ * So, this hack is needed, in order to make Kworld SBTVD to work.
+ */
+ if (state->need_init)
+ mb86a20s_initfe(fe);
+
+ if (fe->ops.i2c_gate_ctrl)
+ fe->ops.i2c_gate_ctrl(fe, 0);
+
+ rc = mb86a20s_writeregdata(state, mb86a20s_reset_reception);
+ mb86a20s_reset_counters(fe);
+
+ if (fe->ops.i2c_gate_ctrl)
+ fe->ops.i2c_gate_ctrl(fe, 1);
+
+ return rc;
+}
+
+static int mb86a20s_read_status_and_stats(struct dvb_frontend *fe,
+ fe_status_t *status)
+{
+ struct mb86a20s_state *state = fe->demodulator_priv;
+ struct dtv_frontend_properties *c = &fe->dtv_property_cache;
+ int rc;
+
+ dev_dbg(&state->i2c->dev, "%s called.\n", __func__);
+
+ if (fe->ops.i2c_gate_ctrl)
+ fe->ops.i2c_gate_ctrl(fe, 0);
+
+ /* Get lock */
+ rc = mb86a20s_read_status(fe, status);
+ if (!(*status & FE_HAS_LOCK)) {
+ mb86a20s_stats_not_ready(fe);
+ mb86a20s_reset_frontend_cache(fe);
+ }
+ if (rc < 0) {
+ dev_err(&state->i2c->dev,
+ "%s: Can't read frontend lock status\n", __func__);
+ goto error;
+ }
+
+ /* Get signal strength */
+ rc = mb86a20s_read_signal_strength(fe);
+ if (rc < 0) {
+ dev_err(&state->i2c->dev,
+ "%s: Can't reset VBER registers.\n", __func__);
+ mb86a20s_stats_not_ready(fe);
+ mb86a20s_reset_frontend_cache(fe);
+
+ rc = 0; /* Status is OK */
+ goto error;
+ }
+ /* Fill signal strength */
+ c->strength.stat[0].uvalue = rc;
+
+ if (*status & FE_HAS_LOCK) {
+ /* Get TMCC info*/
+ rc = mb86a20s_get_frontend(fe);
+ if (rc < 0) {
+ dev_err(&state->i2c->dev,
+ "%s: Can't get FE TMCC data.\n", __func__);
+ rc = 0; /* Status is OK */
+ goto error;
+ }
+
+ /* Get statistics */
+ rc = mb86a20s_get_stats(fe);
+ if (rc < 0 && rc != -EBUSY) {
+ dev_err(&state->i2c->dev,
+ "%s: Can't get FE statistics.\n", __func__);
+ rc = 0;
+ goto error;
+ }
+ rc = 0; /* Don't return EBUSY to userspace */
+ }
+ goto ok;
+
+error:
+ mb86a20s_stats_not_ready(fe);
+
+ok:
+ if (fe->ops.i2c_gate_ctrl)
+ fe->ops.i2c_gate_ctrl(fe, 1);
+
+ return rc;
+}
+
+static int mb86a20s_read_signal_strength_from_cache(struct dvb_frontend *fe,
+ u16 *strength)
+{
+ struct dtv_frontend_properties *c = &fe->dtv_property_cache;
+
+
+ *strength = c->strength.stat[0].uvalue;
+
return 0;
}
+static int mb86a20s_get_frontend_dummy(struct dvb_frontend *fe)
+{
+ /*
+ * get_frontend is now handled together with other stats
+ * retrival, when read_status() is called, as some statistics
+ * will depend on the layers detection.
+ */
+ return 0;
+};
+
static int mb86a20s_tune(struct dvb_frontend *fe,
bool re_tune,
unsigned int mode_flags,
unsigned int *delay,
fe_status_t *status)
{
+ struct mb86a20s_state *state = fe->demodulator_priv;
int rc = 0;
- dprintk("\n");
+ dev_dbg(&state->i2c->dev, "%s called.\n", __func__);
if (re_tune)
rc = mb86a20s_set_frontend(fe);
if (!(mode_flags & FE_TUNE_MODE_ONESHOT))
- mb86a20s_read_status(fe, status);
+ mb86a20s_read_status_and_stats(fe, status);
return rc;
}
@@ -619,7 +1936,7 @@ static void mb86a20s_release(struct dvb_frontend *fe)
{
struct mb86a20s_state *state = fe->demodulator_priv;
- dprintk("\n");
+ dev_dbg(&state->i2c->dev, "%s called.\n", __func__);
kfree(state);
}
@@ -629,15 +1946,16 @@ static struct dvb_frontend_ops mb86a20s_ops;
struct dvb_frontend *mb86a20s_attach(const struct mb86a20s_config *config,
struct i2c_adapter *i2c)
{
+ struct mb86a20s_state *state;
u8 rev;
- /* allocate memory for the internal state */
- struct mb86a20s_state *state =
- kzalloc(sizeof(struct mb86a20s_state), GFP_KERNEL);
+ dev_dbg(&i2c->dev, "%s called.\n", __func__);
- dprintk("\n");
+ /* allocate memory for the internal state */
+ state = kzalloc(sizeof(struct mb86a20s_state), GFP_KERNEL);
if (state == NULL) {
- rc("Unable to kzalloc\n");
+ dev_err(&i2c->dev,
+ "%s: unable to allocate memory for state\n", __func__);
goto error;
}
@@ -654,9 +1972,11 @@ struct dvb_frontend *mb86a20s_attach(const struct mb86a20s_config *config,
rev = mb86a20s_readreg(state, 0);
if (rev == 0x13) {
- printk(KERN_INFO "Detected a Fujitsu mb86a20s frontend\n");
+ dev_info(&i2c->dev,
+ "Detected a Fujitsu mb86a20s frontend\n");
} else {
- printk(KERN_ERR "Frontend revision %d is unknown - aborting.\n",
+ dev_dbg(&i2c->dev,
+ "Frontend revision %d is unknown - aborting.\n",
rev);
goto error;
}
@@ -690,9 +2010,9 @@ static struct dvb_frontend_ops mb86a20s_ops = {
.init = mb86a20s_initfe,
.set_frontend = mb86a20s_set_frontend,
- .get_frontend = mb86a20s_get_frontend,
- .read_status = mb86a20s_read_status,
- .read_signal_strength = mb86a20s_read_signal_strength,
+ .get_frontend = mb86a20s_get_frontend_dummy,
+ .read_status = mb86a20s_read_status_and_stats,
+ .read_signal_strength = mb86a20s_read_signal_strength_from_cache,
.tune = mb86a20s_tune,
};
diff --git a/drivers/media/dvb-frontends/mt312.h b/drivers/media/dvb-frontends/mt312.h
index 29e3bb5496b8..5706621ad79d 100644
--- a/drivers/media/dvb-frontends/mt312.h
+++ b/drivers/media/dvb-frontends/mt312.h
@@ -36,7 +36,7 @@ struct mt312_config {
unsigned int voltage_inverted:1;
};
-#if defined(CONFIG_DVB_MT312) || (defined(CONFIG_DVB_MT312_MODULE) && defined(MODULE))
+#if IS_ENABLED(CONFIG_DVB_MT312)
struct dvb_frontend *mt312_attach(const struct mt312_config *config,
struct i2c_adapter *i2c);
#else
diff --git a/drivers/media/dvb-frontends/mt352.h b/drivers/media/dvb-frontends/mt352.h
index ca2562d6f289..451d904e1500 100644
--- a/drivers/media/dvb-frontends/mt352.h
+++ b/drivers/media/dvb-frontends/mt352.h
@@ -51,7 +51,7 @@ struct mt352_config
int (*demod_init)(struct dvb_frontend* fe);
};
-#if defined(CONFIG_DVB_MT352) || (defined(CONFIG_DVB_MT352_MODULE) && defined(MODULE))
+#if IS_ENABLED(CONFIG_DVB_MT352)
extern struct dvb_frontend* mt352_attach(const struct mt352_config* config,
struct i2c_adapter* i2c);
#else
diff --git a/drivers/media/dvb-frontends/nxt200x.h b/drivers/media/dvb-frontends/nxt200x.h
index f3c84583770f..b518d545609e 100644
--- a/drivers/media/dvb-frontends/nxt200x.h
+++ b/drivers/media/dvb-frontends/nxt200x.h
@@ -42,7 +42,7 @@ struct nxt200x_config
int (*set_ts_params)(struct dvb_frontend* fe, int is_punctured);
};
-#if defined(CONFIG_DVB_NXT200X) || (defined(CONFIG_DVB_NXT200X_MODULE) && defined(MODULE))
+#if IS_ENABLED(CONFIG_DVB_NXT200X)
extern struct dvb_frontend* nxt200x_attach(const struct nxt200x_config* config,
struct i2c_adapter* i2c);
#else
diff --git a/drivers/media/dvb-frontends/nxt6000.h b/drivers/media/dvb-frontends/nxt6000.h
index 878eb38a075e..b5867c2ae681 100644
--- a/drivers/media/dvb-frontends/nxt6000.h
+++ b/drivers/media/dvb-frontends/nxt6000.h
@@ -33,7 +33,7 @@ struct nxt6000_config
u8 clock_inversion:1;
};
-#if defined(CONFIG_DVB_NXT6000) || (defined(CONFIG_DVB_NXT6000_MODULE) && defined(MODULE))
+#if IS_ENABLED(CONFIG_DVB_NXT6000)
extern struct dvb_frontend* nxt6000_attach(const struct nxt6000_config* config,
struct i2c_adapter* i2c);
#else
diff --git a/drivers/media/dvb-frontends/or51132.h b/drivers/media/dvb-frontends/or51132.h
index 1b8e04d973c8..938958386cb1 100644
--- a/drivers/media/dvb-frontends/or51132.h
+++ b/drivers/media/dvb-frontends/or51132.h
@@ -34,7 +34,7 @@ struct or51132_config
int (*set_ts_params)(struct dvb_frontend* fe, int is_punctured);
};
-#if defined(CONFIG_DVB_OR51132) || (defined(CONFIG_DVB_OR51132_MODULE) && defined(MODULE))
+#if IS_ENABLED(CONFIG_DVB_OR51132)
extern struct dvb_frontend* or51132_attach(const struct or51132_config* config,
struct i2c_adapter* i2c);
#else
diff --git a/drivers/media/dvb-frontends/or51211.c b/drivers/media/dvb-frontends/or51211.c
index c625b57b4333..10cfc0579168 100644
--- a/drivers/media/dvb-frontends/or51211.c
+++ b/drivers/media/dvb-frontends/or51211.c
@@ -22,6 +22,8 @@
*
*/
+#define pr_fmt(fmt) KBUILD_MODNAME ": %s: " fmt, __func__
+
/*
* This driver needs external firmware. Please use the command
* "<kerneldir>/Documentation/dvb/get_dvb_firmware or51211" to
@@ -44,9 +46,7 @@
static int debug;
#define dprintk(args...) \
- do { \
- if (debug) printk(KERN_DEBUG "or51211: " args); \
- } while (0)
+ do { if (debug) pr_debug(args); } while (0)
static u8 run_buf[] = {0x7f,0x01};
static u8 cmd_buf[] = {0x04,0x01,0x50,0x80,0x06}; // ATSC
@@ -80,8 +80,7 @@ static int i2c_writebytes (struct or51211_state* state, u8 reg, const u8 *buf,
msg.buf = (u8 *)buf;
if ((err = i2c_transfer (state->i2c, &msg, 1)) != 1) {
- printk(KERN_WARNING "or51211: i2c_writebytes error "
- "(addr %02x, err == %i)\n", reg, err);
+ pr_warn("error (addr %02x, err == %i)\n", reg, err);
return -EREMOTEIO;
}
@@ -98,8 +97,7 @@ static int i2c_readbytes(struct or51211_state *state, u8 reg, u8 *buf, int len)
msg.buf = buf;
if ((err = i2c_transfer (state->i2c, &msg, 1)) != 1) {
- printk(KERN_WARNING "or51211: i2c_readbytes error "
- "(addr %02x, err == %i)\n", reg, err);
+ pr_warn("error (addr %02x, err == %i)\n", reg, err);
return -EREMOTEIO;
}
@@ -118,11 +116,11 @@ static int or51211_load_firmware (struct dvb_frontend* fe,
/* Get eprom data */
tudata[0] = 17;
if (i2c_writebytes(state,0x50,tudata,1)) {
- printk(KERN_WARNING "or51211:load_firmware error eprom addr\n");
+ pr_warn("error eprom addr\n");
return -1;
}
if (i2c_readbytes(state,0x50,&tudata[145],192)) {
- printk(KERN_WARNING "or51211: load_firmware error eprom\n");
+ pr_warn("error eprom\n");
return -1;
}
@@ -136,32 +134,32 @@ static int or51211_load_firmware (struct dvb_frontend* fe,
state->config->reset(fe);
if (i2c_writebytes(state,state->config->demod_address,tudata,585)) {
- printk(KERN_WARNING "or51211: load_firmware error 1\n");
+ pr_warn("error 1\n");
return -1;
}
msleep(1);
if (i2c_writebytes(state,state->config->demod_address,
&fw->data[393],8125)) {
- printk(KERN_WARNING "or51211: load_firmware error 2\n");
+ pr_warn("error 2\n");
return -1;
}
msleep(1);
if (i2c_writebytes(state,state->config->demod_address,run_buf,2)) {
- printk(KERN_WARNING "or51211: load_firmware error 3\n");
+ pr_warn("error 3\n");
return -1;
}
/* Wait at least 5 msec */
msleep(10);
if (i2c_writebytes(state,state->config->demod_address,run_buf,2)) {
- printk(KERN_WARNING "or51211: load_firmware error 4\n");
+ pr_warn("error 4\n");
return -1;
}
msleep(10);
- printk("or51211: Done.\n");
+ pr_info("Done.\n");
return 0;
};
@@ -173,14 +171,14 @@ static int or51211_setmode(struct dvb_frontend* fe, int mode)
state->config->setmode(fe, mode);
if (i2c_writebytes(state,state->config->demod_address,run_buf,2)) {
- printk(KERN_WARNING "or51211: setmode error 1\n");
+ pr_warn("error 1\n");
return -1;
}
/* Wait at least 5 msec */
msleep(10);
if (i2c_writebytes(state,state->config->demod_address,run_buf,2)) {
- printk(KERN_WARNING "or51211: setmode error 2\n");
+ pr_warn("error 2\n");
return -1;
}
@@ -196,7 +194,7 @@ static int or51211_setmode(struct dvb_frontend* fe, int mode)
* normal +/-150kHz Carrier acquisition range
*/
if (i2c_writebytes(state,state->config->demod_address,cmd_buf,3)) {
- printk(KERN_WARNING "or51211: setmode error 3\n");
+ pr_warn("error 3\n");
return -1;
}
@@ -206,14 +204,14 @@ static int or51211_setmode(struct dvb_frontend* fe, int mode)
rec_buf[3] = 0x00;
msleep(20);
if (i2c_writebytes(state,state->config->demod_address,rec_buf,3)) {
- printk(KERN_WARNING "or51211: setmode error 5\n");
+ pr_warn("error 5\n");
}
msleep(3);
if (i2c_readbytes(state,state->config->demod_address,&rec_buf[10],2)) {
- printk(KERN_WARNING "or51211: setmode error 6");
+ pr_warn("error 6\n");
return -1;
}
- dprintk("setmode rec status %02x %02x\n",rec_buf[10],rec_buf[11]);
+ dprintk("rec status %02x %02x\n", rec_buf[10], rec_buf[11]);
return 0;
}
@@ -248,15 +246,15 @@ static int or51211_read_status(struct dvb_frontend* fe, fe_status_t* status)
/* Receiver Status */
if (i2c_writebytes(state,state->config->demod_address,snd_buf,3)) {
- printk(KERN_WARNING "or51132: read_status write error\n");
+ pr_warn("write error\n");
return -1;
}
msleep(3);
if (i2c_readbytes(state,state->config->demod_address,rec_buf,2)) {
- printk(KERN_WARNING "or51132: read_status read error\n");
+ pr_warn("read error\n");
return -1;
}
- dprintk("read_status %x %x\n",rec_buf[0],rec_buf[1]);
+ dprintk("%x %x\n", rec_buf[0], rec_buf[1]);
if (rec_buf[0] & 0x01) { /* Receiver Lock */
*status |= FE_HAS_SIGNAL;
@@ -306,20 +304,18 @@ static int or51211_read_snr(struct dvb_frontend* fe, u16* snr)
snd_buf[2] = 0x04;
if (i2c_writebytes(state,state->config->demod_address,snd_buf,3)) {
- printk(KERN_WARNING "%s: error writing snr reg\n",
- __func__);
+ pr_warn("error writing snr reg\n");
return -1;
}
if (i2c_readbytes(state,state->config->demod_address,rec_buf,2)) {
- printk(KERN_WARNING "%s: read_status read error\n",
- __func__);
+ pr_warn("read_status read error\n");
return -1;
}
state->snr = calculate_snr(rec_buf[0], 89599047);
*snr = (state->snr) >> 16;
- dprintk("%s: noise = 0x%02x, snr = %d.%02d dB\n", __func__, rec_buf[0],
+ dprintk("noise = 0x%02x, snr = %d.%02d dB\n", rec_buf[0],
state->snr >> 24, (((state->snr>>8) & 0xffff) * 100) >> 16);
return 0;
@@ -375,25 +371,24 @@ static int or51211_init(struct dvb_frontend* fe)
if (!state->initialized) {
/* Request the firmware, this will block until it uploads */
- printk(KERN_INFO "or51211: Waiting for firmware upload "
- "(%s)...\n", OR51211_DEFAULT_FIRMWARE);
+ pr_info("Waiting for firmware upload (%s)...\n",
+ OR51211_DEFAULT_FIRMWARE);
ret = config->request_firmware(fe, &fw,
OR51211_DEFAULT_FIRMWARE);
- printk(KERN_INFO "or51211:Got Hotplug firmware\n");
+ pr_info("Got Hotplug firmware\n");
if (ret) {
- printk(KERN_WARNING "or51211: No firmware uploaded "
- "(timeout or file not found?)\n");
+ pr_warn("No firmware uploaded "
+ "(timeout or file not found?)\n");
return ret;
}
ret = or51211_load_firmware(fe, fw);
release_firmware(fw);
if (ret) {
- printk(KERN_WARNING "or51211: Writing firmware to "
- "device failed!\n");
+ pr_warn("Writing firmware to device failed!\n");
return ret;
}
- printk(KERN_INFO "or51211: Firmware upload complete.\n");
+ pr_info("Firmware upload complete.\n");
/* Set operation mode in Receiver 1 register;
* type 1:
@@ -406,7 +401,7 @@ static int or51211_init(struct dvb_frontend* fe)
*/
if (i2c_writebytes(state,state->config->demod_address,
cmd_buf,3)) {
- printk(KERN_WARNING "or51211: Load DVR Error 5\n");
+ pr_warn("Load DVR Error 5\n");
return -1;
}
@@ -419,13 +414,13 @@ static int or51211_init(struct dvb_frontend* fe)
msleep(30);
if (i2c_writebytes(state,state->config->demod_address,
rec_buf,3)) {
- printk(KERN_WARNING "or51211: Load DVR Error A\n");
+ pr_warn("Load DVR Error A\n");
return -1;
}
msleep(3);
if (i2c_readbytes(state,state->config->demod_address,
&rec_buf[10],2)) {
- printk(KERN_WARNING "or51211: Load DVR Error B\n");
+ pr_warn("Load DVR Error B\n");
return -1;
}
@@ -436,13 +431,13 @@ static int or51211_init(struct dvb_frontend* fe)
msleep(20);
if (i2c_writebytes(state,state->config->demod_address,
rec_buf,3)) {
- printk(KERN_WARNING "or51211: Load DVR Error C\n");
+ pr_warn("Load DVR Error C\n");
return -1;
}
msleep(3);
if (i2c_readbytes(state,state->config->demod_address,
&rec_buf[12],2)) {
- printk(KERN_WARNING "or51211: Load DVR Error D\n");
+ pr_warn("Load DVR Error D\n");
return -1;
}
@@ -454,16 +449,14 @@ static int or51211_init(struct dvb_frontend* fe)
get_ver_buf[4] = i+1;
if (i2c_writebytes(state,state->config->demod_address,
get_ver_buf,5)) {
- printk(KERN_WARNING "or51211:Load DVR Error 6"
- " - %d\n",i);
+ pr_warn("Load DVR Error 6 - %d\n", i);
return -1;
}
msleep(3);
if (i2c_readbytes(state,state->config->demod_address,
&rec_buf[i*2],2)) {
- printk(KERN_WARNING "or51211:Load DVR Error 7"
- " - %d\n",i);
+ pr_warn("Load DVR Error 7 - %d\n", i);
return -1;
}
/* If we didn't receive the right index, try again */
@@ -471,15 +464,11 @@ static int or51211_init(struct dvb_frontend* fe)
i--;
}
}
- dprintk("read_fwbits %x %x %x %x %x %x %x %x %x %x\n",
- rec_buf[0], rec_buf[1], rec_buf[2], rec_buf[3],
- rec_buf[4], rec_buf[5], rec_buf[6], rec_buf[7],
- rec_buf[8], rec_buf[9]);
+ dprintk("read_fwbits %10ph\n", rec_buf);
- printk(KERN_INFO "or51211: ver TU%02x%02x%02x VSB mode %02x"
- " Status %02x\n",
- rec_buf[2], rec_buf[4],rec_buf[6],
- rec_buf[12],rec_buf[10]);
+ pr_info("ver TU%02x%02x%02x VSB mode %02x Status %02x\n",
+ rec_buf[2], rec_buf[4], rec_buf[6], rec_buf[12],
+ rec_buf[10]);
rec_buf[0] = 0x04;
rec_buf[1] = 0x00;
@@ -488,13 +477,13 @@ static int or51211_init(struct dvb_frontend* fe)
msleep(20);
if (i2c_writebytes(state,state->config->demod_address,
rec_buf,3)) {
- printk(KERN_WARNING "or51211: Load DVR Error 8\n");
+ pr_warn("Load DVR Error 8\n");
return -1;
}
msleep(20);
if (i2c_readbytes(state,state->config->demod_address,
&rec_buf[8],2)) {
- printk(KERN_WARNING "or51211: Load DVR Error 9\n");
+ pr_warn("Load DVR Error 9\n");
return -1;
}
state->initialized = 1;
diff --git a/drivers/media/dvb-frontends/or51211.h b/drivers/media/dvb-frontends/or51211.h
index 3ce0508b898e..9a8ae936b62d 100644
--- a/drivers/media/dvb-frontends/or51211.h
+++ b/drivers/media/dvb-frontends/or51211.h
@@ -37,7 +37,7 @@ struct or51211_config
void (*sleep)(struct dvb_frontend * fe);
};
-#if defined(CONFIG_DVB_OR51211) || (defined(CONFIG_DVB_OR51211_MODULE) && defined(MODULE))
+#if IS_ENABLED(CONFIG_DVB_OR51211)
extern struct dvb_frontend* or51211_attach(const struct or51211_config* config,
struct i2c_adapter* i2c);
#else
diff --git a/drivers/media/dvb-frontends/s5h1420.h b/drivers/media/dvb-frontends/s5h1420.h
index ff308136d865..210049b5cf30 100644
--- a/drivers/media/dvb-frontends/s5h1420.h
+++ b/drivers/media/dvb-frontends/s5h1420.h
@@ -40,7 +40,7 @@ struct s5h1420_config
u8 serial_mpeg:1;
};
-#if defined(CONFIG_DVB_S5H1420) || (defined(CONFIG_DVB_S5H1420_MODULE) && defined(MODULE))
+#if IS_ENABLED(CONFIG_DVB_S5H1420)
extern struct dvb_frontend *s5h1420_attach(const struct s5h1420_config *config,
struct i2c_adapter *i2c);
extern struct i2c_adapter *s5h1420_get_tuner_i2c_adapter(struct dvb_frontend *fe);
diff --git a/drivers/media/dvb-frontends/sp8870.h b/drivers/media/dvb-frontends/sp8870.h
index a764a793c7d8..065ec67d4e30 100644
--- a/drivers/media/dvb-frontends/sp8870.h
+++ b/drivers/media/dvb-frontends/sp8870.h
@@ -35,7 +35,7 @@ struct sp8870_config
int (*request_firmware)(struct dvb_frontend* fe, const struct firmware **fw, char* name);
};
-#if defined(CONFIG_DVB_SP8870) || (defined(CONFIG_DVB_SP8870_MODULE) && defined(MODULE))
+#if IS_ENABLED(CONFIG_DVB_SP8870)
extern struct dvb_frontend* sp8870_attach(const struct sp8870_config* config,
struct i2c_adapter* i2c);
#else
diff --git a/drivers/media/dvb-frontends/sp887x.h b/drivers/media/dvb-frontends/sp887x.h
index 04eff6e0eef3..2cdc4e8bc9cd 100644
--- a/drivers/media/dvb-frontends/sp887x.h
+++ b/drivers/media/dvb-frontends/sp887x.h
@@ -17,7 +17,7 @@ struct sp887x_config
int (*request_firmware)(struct dvb_frontend* fe, const struct firmware **fw, char* name);
};
-#if defined(CONFIG_DVB_SP887X) || (defined(CONFIG_DVB_SP887X_MODULE) && defined(MODULE))
+#if IS_ENABLED(CONFIG_DVB_SP887X)
extern struct dvb_frontend* sp887x_attach(const struct sp887x_config* config,
struct i2c_adapter* i2c);
#else
diff --git a/drivers/media/dvb-frontends/stb0899_drv.h b/drivers/media/dvb-frontends/stb0899_drv.h
index 98b200ce0c34..8d26ff6eb1db 100644
--- a/drivers/media/dvb-frontends/stb0899_drv.h
+++ b/drivers/media/dvb-frontends/stb0899_drv.h
@@ -142,7 +142,7 @@ struct stb0899_config {
int (*tuner_set_rfsiggain)(struct dvb_frontend *fe, u32 rf_gain);
};
-#if defined(CONFIG_DVB_STB0899) || (defined(CONFIG_DVB_STB0899_MODULE) && defined(MODULE))
+#if IS_ENABLED(CONFIG_DVB_STB0899)
extern struct dvb_frontend *stb0899_attach(struct stb0899_config *config,
struct i2c_adapter *i2c);
diff --git a/drivers/media/dvb-frontends/stb6100.h b/drivers/media/dvb-frontends/stb6100.h
index 2ab096614b3f..3a1e40f3b8be 100644
--- a/drivers/media/dvb-frontends/stb6100.h
+++ b/drivers/media/dvb-frontends/stb6100.h
@@ -94,7 +94,7 @@ struct stb6100_state {
u32 reference;
};
-#if defined(CONFIG_DVB_STB6100) || (defined(CONFIG_DVB_STB6100_MODULE) && defined(MODULE))
+#if IS_ENABLED(CONFIG_DVB_STB6100)
extern struct dvb_frontend *stb6100_attach(struct dvb_frontend *fe,
const struct stb6100_config *config,
diff --git a/drivers/media/dvb-frontends/stv0297.h b/drivers/media/dvb-frontends/stv0297.h
index 3f8f9468f387..c8ff3639ce00 100644
--- a/drivers/media/dvb-frontends/stv0297.h
+++ b/drivers/media/dvb-frontends/stv0297.h
@@ -42,7 +42,7 @@ struct stv0297_config
u8 stop_during_read:1;
};
-#if defined(CONFIG_DVB_STV0297) || (defined(CONFIG_DVB_STV0297_MODULE) && defined(MODULE))
+#if IS_ENABLED(CONFIG_DVB_STV0297)
extern struct dvb_frontend* stv0297_attach(const struct stv0297_config* config,
struct i2c_adapter* i2c);
#else
diff --git a/drivers/media/dvb-frontends/stv0299.c b/drivers/media/dvb-frontends/stv0299.c
index 92a6075cd82f..b57ecf42e75a 100644
--- a/drivers/media/dvb-frontends/stv0299.c
+++ b/drivers/media/dvb-frontends/stv0299.c
@@ -420,7 +420,7 @@ static int stv0299_send_legacy_dish_cmd (struct dvb_frontend* fe, unsigned long
do_gettimeofday (&nexttime);
if (debug_legacy_dish_switch)
- memcpy (&tv[0], &nexttime, sizeof (struct timeval));
+ tv[0] = nexttime;
stv0299_writeregI (state, 0x0c, reg0x0c | 0x50); /* set LNB to 18V */
dvb_frontend_sleep_until(&nexttime, 32000);
diff --git a/drivers/media/dvb-frontends/stv0299.h b/drivers/media/dvb-frontends/stv0299.h
index ba219b767a69..06f70fc8327b 100644
--- a/drivers/media/dvb-frontends/stv0299.h
+++ b/drivers/media/dvb-frontends/stv0299.h
@@ -95,7 +95,7 @@ struct stv0299_config
int (*set_ts_params)(struct dvb_frontend *fe, int is_punctured);
};
-#if defined(CONFIG_DVB_STV0299) || (defined(CONFIG_DVB_STV0299_MODULE) && defined(MODULE))
+#if IS_ENABLED(CONFIG_DVB_STV0299)
extern struct dvb_frontend *stv0299_attach(const struct stv0299_config *config,
struct i2c_adapter *i2c);
#else
diff --git a/drivers/media/dvb-frontends/stv0900_core.c b/drivers/media/dvb-frontends/stv0900_core.c
index b551ca350e00..e5a87b57d855 100644
--- a/drivers/media/dvb-frontends/stv0900_core.c
+++ b/drivers/media/dvb-frontends/stv0900_core.c
@@ -524,11 +524,8 @@ void stv0900_set_tuner(struct dvb_frontend *fe, u32 frequency,
struct dvb_frontend_ops *frontend_ops = NULL;
struct dvb_tuner_ops *tuner_ops = NULL;
- if (&fe->ops)
- frontend_ops = &fe->ops;
-
- if (&frontend_ops->tuner_ops)
- tuner_ops = &frontend_ops->tuner_ops;
+ frontend_ops = &fe->ops;
+ tuner_ops = &frontend_ops->tuner_ops;
if (tuner_ops->set_frequency) {
if ((tuner_ops->set_frequency(fe, frequency)) < 0)
@@ -552,11 +549,8 @@ void stv0900_set_bandwidth(struct dvb_frontend *fe, u32 bandwidth)
struct dvb_frontend_ops *frontend_ops = NULL;
struct dvb_tuner_ops *tuner_ops = NULL;
- if (&fe->ops)
- frontend_ops = &fe->ops;
-
- if (&frontend_ops->tuner_ops)
- tuner_ops = &frontend_ops->tuner_ops;
+ frontend_ops = &fe->ops;
+ tuner_ops = &frontend_ops->tuner_ops;
if (tuner_ops->set_bandwidth) {
if ((tuner_ops->set_bandwidth(fe, bandwidth)) < 0)
@@ -1558,6 +1552,27 @@ static int stv0900_status(struct stv0900_internal *intp,
return locked;
}
+static int stv0900_set_mis(struct stv0900_internal *intp,
+ enum fe_stv0900_demod_num demod, int mis)
+{
+ enum fe_stv0900_error error = STV0900_NO_ERROR;
+
+ dprintk("%s\n", __func__);
+
+ if (mis < 0 || mis > 255) {
+ dprintk("Disable MIS filtering\n");
+ stv0900_write_bits(intp, FILTER_EN, 0);
+ } else {
+ dprintk("Enable MIS filtering - %d\n", mis);
+ stv0900_write_bits(intp, FILTER_EN, 1);
+ stv0900_write_reg(intp, ISIENTRY, mis);
+ stv0900_write_reg(intp, ISIBITENA, 0xff);
+ }
+
+ return error;
+}
+
+
static enum dvbfe_search stv0900_search(struct dvb_frontend *fe)
{
struct stv0900_state *state = fe->demodulator_priv;
@@ -1578,6 +1593,8 @@ static enum dvbfe_search stv0900_search(struct dvb_frontend *fe)
if (state->config->set_ts_params)
state->config->set_ts_params(fe, 0);
+ stv0900_set_mis(intp, demod, c->stream_id);
+
p_result.locked = FALSE;
p_search.path = demod;
p_search.frequency = c->frequency;
@@ -1935,6 +1952,9 @@ struct dvb_frontend *stv0900_attach(const struct stv0900_config *config,
if (err_stv0900)
goto error;
+ if (state->internal->chip_id >= 0x30)
+ state->frontend.ops.info.caps |= FE_CAN_MULTISTREAM;
+
break;
default:
goto error;
diff --git a/drivers/media/dvb-frontends/stv0900_reg.h b/drivers/media/dvb-frontends/stv0900_reg.h
index 731afe93a823..511ed2a2d987 100644
--- a/drivers/media/dvb-frontends/stv0900_reg.h
+++ b/drivers/media/dvb-frontends/stv0900_reg.h
@@ -3446,8 +3446,11 @@ extern s32 shiftx(s32 x, int demod, s32 shift);
#define R0900_P1_PDELCTRL1 0xf550
#define PDELCTRL1 REGx(R0900_P1_PDELCTRL1)
#define F0900_P1_INV_MISMASK 0xf5500080
+#define INV_MISMASK FLDx(F0900_P1_INV_MISMASK)
#define F0900_P1_FILTER_EN 0xf5500020
+#define FILTER_EN FLDx(F0900_P1_FILTER_EN)
#define F0900_P1_EN_MIS00 0xf5500002
+#define EN_MIS00 FLDx(F0900_P1_EN_MIS00)
#define F0900_P1_ALGOSWRST 0xf5500001
#define ALGOSWRST FLDx(F0900_P1_ALGOSWRST)
diff --git a/drivers/media/dvb-frontends/stv0900_sw.c b/drivers/media/dvb-frontends/stv0900_sw.c
index 4af20780fb9c..0a40edfad739 100644
--- a/drivers/media/dvb-frontends/stv0900_sw.c
+++ b/drivers/media/dvb-frontends/stv0900_sw.c
@@ -1167,11 +1167,8 @@ static u32 stv0900_get_tuner_freq(struct dvb_frontend *fe)
struct dvb_tuner_ops *tuner_ops = NULL;
u32 freq = 0;
- if (&fe->ops)
- frontend_ops = &fe->ops;
-
- if (&frontend_ops->tuner_ops)
- tuner_ops = &frontend_ops->tuner_ops;
+ frontend_ops = &fe->ops;
+ tuner_ops = &frontend_ops->tuner_ops;
if (tuner_ops->get_frequency) {
if ((tuner_ops->get_frequency(fe, &freq)) < 0)
diff --git a/drivers/media/dvb-frontends/stv090x.c b/drivers/media/dvb-frontends/stv090x.c
index 13caec013902..f36eeefb76a6 100644
--- a/drivers/media/dvb-frontends/stv090x.c
+++ b/drivers/media/dvb-frontends/stv090x.c
@@ -4267,7 +4267,7 @@ err:
return -1;
}
-static int stv090x_set_tspath(struct stv090x_state *state)
+static int stv0900_set_tspath(struct stv090x_state *state)
{
u32 reg;
@@ -4538,6 +4538,121 @@ err:
return -1;
}
+static int stv0903_set_tspath(struct stv090x_state *state)
+{
+ u32 reg;
+
+ if (state->internal->dev_ver >= 0x20) {
+ switch (state->config->ts1_mode) {
+ case STV090x_TSMODE_PARALLEL_PUNCTURED:
+ case STV090x_TSMODE_DVBCI:
+ stv090x_write_reg(state, STV090x_TSGENERAL, 0x00);
+ break;
+
+ case STV090x_TSMODE_SERIAL_PUNCTURED:
+ case STV090x_TSMODE_SERIAL_CONTINUOUS:
+ default:
+ stv090x_write_reg(state, STV090x_TSGENERAL, 0x0c);
+ break;
+ }
+ } else {
+ switch (state->config->ts1_mode) {
+ case STV090x_TSMODE_PARALLEL_PUNCTURED:
+ case STV090x_TSMODE_DVBCI:
+ stv090x_write_reg(state, STV090x_TSGENERAL1X, 0x10);
+ break;
+
+ case STV090x_TSMODE_SERIAL_PUNCTURED:
+ case STV090x_TSMODE_SERIAL_CONTINUOUS:
+ default:
+ stv090x_write_reg(state, STV090x_TSGENERAL1X, 0x14);
+ break;
+ }
+ }
+
+ switch (state->config->ts1_mode) {
+ case STV090x_TSMODE_PARALLEL_PUNCTURED:
+ reg = stv090x_read_reg(state, STV090x_P1_TSCFGH);
+ STV090x_SETFIELD_Px(reg, TSFIFO_SERIAL_FIELD, 0x00);
+ STV090x_SETFIELD_Px(reg, TSFIFO_DVBCI_FIELD, 0x00);
+ if (stv090x_write_reg(state, STV090x_P1_TSCFGH, reg) < 0)
+ goto err;
+ break;
+
+ case STV090x_TSMODE_DVBCI:
+ reg = stv090x_read_reg(state, STV090x_P1_TSCFGH);
+ STV090x_SETFIELD_Px(reg, TSFIFO_SERIAL_FIELD, 0x00);
+ STV090x_SETFIELD_Px(reg, TSFIFO_DVBCI_FIELD, 0x01);
+ if (stv090x_write_reg(state, STV090x_P1_TSCFGH, reg) < 0)
+ goto err;
+ break;
+
+ case STV090x_TSMODE_SERIAL_PUNCTURED:
+ reg = stv090x_read_reg(state, STV090x_P1_TSCFGH);
+ STV090x_SETFIELD_Px(reg, TSFIFO_SERIAL_FIELD, 0x01);
+ STV090x_SETFIELD_Px(reg, TSFIFO_DVBCI_FIELD, 0x00);
+ if (stv090x_write_reg(state, STV090x_P1_TSCFGH, reg) < 0)
+ goto err;
+ break;
+
+ case STV090x_TSMODE_SERIAL_CONTINUOUS:
+ reg = stv090x_read_reg(state, STV090x_P1_TSCFGH);
+ STV090x_SETFIELD_Px(reg, TSFIFO_SERIAL_FIELD, 0x01);
+ STV090x_SETFIELD_Px(reg, TSFIFO_DVBCI_FIELD, 0x01);
+ if (stv090x_write_reg(state, STV090x_P1_TSCFGH, reg) < 0)
+ goto err;
+ break;
+
+ default:
+ break;
+ }
+
+ if (state->config->ts1_clk > 0) {
+ u32 speed;
+
+ switch (state->config->ts1_mode) {
+ case STV090x_TSMODE_PARALLEL_PUNCTURED:
+ case STV090x_TSMODE_DVBCI:
+ default:
+ speed = state->internal->mclk /
+ (state->config->ts1_clk / 4);
+ if (speed < 0x08)
+ speed = 0x08;
+ if (speed > 0xFF)
+ speed = 0xFF;
+ break;
+ case STV090x_TSMODE_SERIAL_PUNCTURED:
+ case STV090x_TSMODE_SERIAL_CONTINUOUS:
+ speed = state->internal->mclk /
+ (state->config->ts1_clk / 32);
+ if (speed < 0x20)
+ speed = 0x20;
+ if (speed > 0xFF)
+ speed = 0xFF;
+ break;
+ }
+ reg = stv090x_read_reg(state, STV090x_P1_TSCFGM);
+ STV090x_SETFIELD_Px(reg, TSFIFO_MANSPEED_FIELD, 3);
+ if (stv090x_write_reg(state, STV090x_P1_TSCFGM, reg) < 0)
+ goto err;
+ if (stv090x_write_reg(state, STV090x_P1_TSSPEED, speed) < 0)
+ goto err;
+ }
+
+ reg = stv090x_read_reg(state, STV090x_P1_TSCFGH);
+ STV090x_SETFIELD_Px(reg, RST_HWARE_FIELD, 0x01);
+ if (stv090x_write_reg(state, STV090x_P1_TSCFGH, reg) < 0)
+ goto err;
+ STV090x_SETFIELD_Px(reg, RST_HWARE_FIELD, 0x00);
+ if (stv090x_write_reg(state, STV090x_P1_TSCFGH, reg) < 0)
+ goto err;
+
+ return 0;
+err:
+ dprintk(FE_ERROR, 1, "I/O error");
+ return -1;
+}
+
static int stv090x_init(struct dvb_frontend *fe)
{
struct stv090x_state *state = fe->demodulator_priv;
@@ -4600,8 +4715,13 @@ static int stv090x_init(struct dvb_frontend *fe)
if (stv090x_i2c_gate_ctrl(state, 0) < 0)
goto err;
- if (stv090x_set_tspath(state) < 0)
- goto err;
+ if (state->device == STV0900) {
+ if (stv0900_set_tspath(state) < 0)
+ goto err;
+ } else {
+ if (stv0903_set_tspath(state) < 0)
+ goto err;
+ }
return 0;
@@ -4642,23 +4762,26 @@ static int stv090x_setup(struct dvb_frontend *fe)
/* Stop Demod */
if (stv090x_write_reg(state, STV090x_P1_DMDISTATE, 0x5c) < 0)
goto err;
- if (stv090x_write_reg(state, STV090x_P2_DMDISTATE, 0x5c) < 0)
- goto err;
+ if (state->device == STV0900)
+ if (stv090x_write_reg(state, STV090x_P2_DMDISTATE, 0x5c) < 0)
+ goto err;
msleep(5);
/* Set No Tuner Mode */
if (stv090x_write_reg(state, STV090x_P1_TNRCFG, 0x6c) < 0)
goto err;
- if (stv090x_write_reg(state, STV090x_P2_TNRCFG, 0x6c) < 0)
- goto err;
+ if (state->device == STV0900)
+ if (stv090x_write_reg(state, STV090x_P2_TNRCFG, 0x6c) < 0)
+ goto err;
/* I2C repeater OFF */
STV090x_SETFIELD_Px(reg, ENARPT_LEVEL_FIELD, config->repeater_level);
if (stv090x_write_reg(state, STV090x_P1_I2CRPT, reg) < 0)
goto err;
- if (stv090x_write_reg(state, STV090x_P2_I2CRPT, reg) < 0)
- goto err;
+ if (state->device == STV0900)
+ if (stv090x_write_reg(state, STV090x_P2_I2CRPT, reg) < 0)
+ goto err;
if (stv090x_write_reg(state, STV090x_NCOARSE, 0x13) < 0) /* set PLL divider */
goto err;
diff --git a/drivers/media/dvb-frontends/stv090x.h b/drivers/media/dvb-frontends/stv090x.h
index 29cdc2b71314..0bd6adcfee8a 100644
--- a/drivers/media/dvb-frontends/stv090x.h
+++ b/drivers/media/dvb-frontends/stv090x.h
@@ -103,7 +103,7 @@ struct stv090x_config {
void (*tuner_i2c_lock) (struct dvb_frontend *fe, int lock);
};
-#if defined(CONFIG_DVB_STV090x) || (defined(CONFIG_DVB_STV090x_MODULE) && defined(MODULE))
+#if IS_ENABLED(CONFIG_DVB_STV090x)
extern struct dvb_frontend *stv090x_attach(const struct stv090x_config *config,
struct i2c_adapter *i2c,
diff --git a/drivers/media/dvb-frontends/stv6110x.h b/drivers/media/dvb-frontends/stv6110x.h
index 47516753929a..bc4766db29c5 100644
--- a/drivers/media/dvb-frontends/stv6110x.h
+++ b/drivers/media/dvb-frontends/stv6110x.h
@@ -53,7 +53,7 @@ struct stv6110x_devctl {
};
-#if defined(CONFIG_DVB_STV6110x) || (defined(CONFIG_DVB_STV6110x_MODULE) && defined(MODULE))
+#if IS_ENABLED(CONFIG_DVB_STV6110x)
extern struct stv6110x_devctl *stv6110x_attach(struct dvb_frontend *fe,
const struct stv6110x_config *config,
diff --git a/drivers/media/dvb-frontends/tda1002x.h b/drivers/media/dvb-frontends/tda1002x.h
index 04d19418bf20..e404b6e44802 100644
--- a/drivers/media/dvb-frontends/tda1002x.h
+++ b/drivers/media/dvb-frontends/tda1002x.h
@@ -57,7 +57,7 @@ struct tda10023_config {
u16 deltaf;
};
-#if defined(CONFIG_DVB_TDA10021) || (defined(CONFIG_DVB_TDA10021_MODULE) && defined(MODULE))
+#if IS_ENABLED(CONFIG_DVB_TDA10021)
extern struct dvb_frontend* tda10021_attach(const struct tda1002x_config* config,
struct i2c_adapter* i2c, u8 pwm);
#else
@@ -69,8 +69,7 @@ static inline struct dvb_frontend* tda10021_attach(const struct tda1002x_config*
}
#endif // CONFIG_DVB_TDA10021
-#if defined(CONFIG_DVB_TDA10023) || \
- (defined(CONFIG_DVB_TDA10023_MODULE) && defined(MODULE))
+#if IS_ENABLED(CONFIG_DVB_TDA10023)
extern struct dvb_frontend *tda10023_attach(
const struct tda10023_config *config,
struct i2c_adapter *i2c, u8 pwm);
diff --git a/drivers/media/dvb-frontends/tda1004x.h b/drivers/media/dvb-frontends/tda1004x.h
index 4e27ffb0f14e..dd283fbb61c0 100644
--- a/drivers/media/dvb-frontends/tda1004x.h
+++ b/drivers/media/dvb-frontends/tda1004x.h
@@ -117,7 +117,7 @@ struct tda1004x_state {
enum tda1004x_demod demod_type;
};
-#if defined(CONFIG_DVB_TDA1004X) || (defined(CONFIG_DVB_TDA1004X_MODULE) && defined(MODULE))
+#if IS_ENABLED(CONFIG_DVB_TDA1004X)
extern struct dvb_frontend* tda10045_attach(const struct tda1004x_config* config,
struct i2c_adapter* i2c);
diff --git a/drivers/media/dvb-frontends/tda10071.c b/drivers/media/dvb-frontends/tda10071.c
index 16a4bc54dbe7..2521f7e23018 100644
--- a/drivers/media/dvb-frontends/tda10071.c
+++ b/drivers/media/dvb-frontends/tda10071.c
@@ -30,7 +30,7 @@ static int tda10071_wr_regs(struct tda10071_priv *priv, u8 reg, u8 *val,
u8 buf[len+1];
struct i2c_msg msg[1] = {
{
- .addr = priv->cfg.i2c_address,
+ .addr = priv->cfg.demod_i2c_addr,
.flags = 0,
.len = sizeof(buf),
.buf = buf,
@@ -59,12 +59,12 @@ static int tda10071_rd_regs(struct tda10071_priv *priv, u8 reg, u8 *val,
u8 buf[len];
struct i2c_msg msg[2] = {
{
- .addr = priv->cfg.i2c_address,
+ .addr = priv->cfg.demod_i2c_addr,
.flags = 0,
.len = 1,
.buf = &reg,
}, {
- .addr = priv->cfg.i2c_address,
+ .addr = priv->cfg.demod_i2c_addr,
.flags = I2C_M_RD,
.len = sizeof(buf),
.buf = buf,
@@ -1064,7 +1064,7 @@ static int tda10071_init(struct dvb_frontend *fe)
cmd.args[2] = 0x00;
cmd.args[3] = 0x00;
cmd.args[4] = 0x00;
- cmd.args[5] = 0x14;
+ cmd.args[5] = (priv->cfg.tuner_i2c_addr) ? priv->cfg.tuner_i2c_addr : 0x14;
cmd.args[6] = 0x00;
cmd.args[7] = 0x03;
cmd.args[8] = 0x02;
@@ -1202,6 +1202,20 @@ struct dvb_frontend *tda10071_attach(const struct tda10071_config *config,
goto error;
}
+ /* make sure demod i2c address is specified */
+ if (!config->demod_i2c_addr) {
+ dev_dbg(&i2c->dev, "%s: invalid demod i2c address!\n", __func__);
+ ret = -EINVAL;
+ goto error;
+ }
+
+ /* make sure tuner i2c address is specified */
+ if (!config->tuner_i2c_addr) {
+ dev_dbg(&i2c->dev, "%s: invalid tuner i2c address!\n", __func__);
+ ret = -EINVAL;
+ goto error;
+ }
+
/* setup the priv */
priv->i2c = i2c;
memcpy(&priv->cfg, config, sizeof(struct tda10071_config));
diff --git a/drivers/media/dvb-frontends/tda10071.h b/drivers/media/dvb-frontends/tda10071.h
index 21163c4b555c..bff1c38df802 100644
--- a/drivers/media/dvb-frontends/tda10071.h
+++ b/drivers/media/dvb-frontends/tda10071.h
@@ -28,7 +28,13 @@ struct tda10071_config {
* Default: none, must set
* Values: 0x55,
*/
- u8 i2c_address;
+ u8 demod_i2c_addr;
+
+ /* Tuner I2C address.
+ * Default: none, must set
+ * Values: 0x14, 0x54, ...
+ */
+ u8 tuner_i2c_addr;
/* Max bytes I2C provider can write at once.
* Note: Buffer is taken from the stack currently!
diff --git a/drivers/media/dvb-frontends/tda10086.h b/drivers/media/dvb-frontends/tda10086.h
index 61148c558d8d..458fe91c1b88 100644
--- a/drivers/media/dvb-frontends/tda10086.h
+++ b/drivers/media/dvb-frontends/tda10086.h
@@ -46,7 +46,7 @@ struct tda10086_config
enum tda10086_xtal xtal_freq;
};
-#if defined(CONFIG_DVB_TDA10086) || (defined(CONFIG_DVB_TDA10086_MODULE) && defined(MODULE))
+#if IS_ENABLED(CONFIG_DVB_TDA10086)
extern struct dvb_frontend* tda10086_attach(const struct tda10086_config* config,
struct i2c_adapter* i2c);
#else
diff --git a/drivers/media/dvb-frontends/tda665x.h b/drivers/media/dvb-frontends/tda665x.h
index ec7927aa75ae..03a0da6d5cf2 100644
--- a/drivers/media/dvb-frontends/tda665x.h
+++ b/drivers/media/dvb-frontends/tda665x.h
@@ -31,7 +31,7 @@ struct tda665x_config {
u32 ref_divider;
};
-#if defined(CONFIG_DVB_TDA665x) || (defined(CONFIG_DVB_TDA665x_MODULE) && defined(MODULE))
+#if IS_ENABLED(CONFIG_DVB_TDA665x)
extern struct dvb_frontend *tda665x_attach(struct dvb_frontend *fe,
const struct tda665x_config *config,
diff --git a/drivers/media/dvb-frontends/tda8083.h b/drivers/media/dvb-frontends/tda8083.h
index 5a03c14a10e8..de6b1860dfdd 100644
--- a/drivers/media/dvb-frontends/tda8083.h
+++ b/drivers/media/dvb-frontends/tda8083.h
@@ -35,7 +35,7 @@ struct tda8083_config
u8 demod_address;
};
-#if defined(CONFIG_DVB_TDA8083) || (defined(CONFIG_DVB_TDA8083_MODULE) && defined(MODULE))
+#if IS_ENABLED(CONFIG_DVB_TDA8083)
extern struct dvb_frontend* tda8083_attach(const struct tda8083_config* config,
struct i2c_adapter* i2c);
#else
diff --git a/drivers/media/dvb-frontends/tda8261.h b/drivers/media/dvb-frontends/tda8261.h
index 006e45351b94..55cf4ffcbfdf 100644
--- a/drivers/media/dvb-frontends/tda8261.h
+++ b/drivers/media/dvb-frontends/tda8261.h
@@ -34,7 +34,7 @@ struct tda8261_config {
enum tda8261_step step_size;
};
-#if defined(CONFIG_DVB_TDA8261) || (defined(CONFIG_DVB_TDA8261_MODULE) && defined(MODULE))
+#if IS_ENABLED(CONFIG_DVB_TDA8261)
extern struct dvb_frontend *tda8261_attach(struct dvb_frontend *fe,
const struct tda8261_config *config,
diff --git a/drivers/media/dvb-frontends/tda8261_cfg.h b/drivers/media/dvb-frontends/tda8261_cfg.h
index 1af1ee49b542..46710744173b 100644
--- a/drivers/media/dvb-frontends/tda8261_cfg.h
+++ b/drivers/media/dvb-frontends/tda8261_cfg.h
@@ -78,7 +78,7 @@ static int tda8261_get_bandwidth(struct dvb_frontend *fe, u32 *bandwidth)
return err;
}
*bandwidth = t_state.bandwidth;
+ printk("%s: Bandwidth=%d\n", __func__, t_state.bandwidth);
}
- printk("%s: Bandwidth=%d\n", __func__, t_state.bandwidth);
return 0;
}
diff --git a/drivers/media/dvb-frontends/tda826x.h b/drivers/media/dvb-frontends/tda826x.h
index 89e97926ab23..5f0f20e7e4f8 100644
--- a/drivers/media/dvb-frontends/tda826x.h
+++ b/drivers/media/dvb-frontends/tda826x.h
@@ -35,7 +35,7 @@
* @param has_loopthrough Set to 1 if the card has a loopthrough RF connector.
* @return FE pointer on success, NULL on failure.
*/
-#if defined(CONFIG_DVB_TDA826X) || (defined(CONFIG_DVB_TDA826X_MODULE) && defined(MODULE))
+#if IS_ENABLED(CONFIG_DVB_TDA826X)
extern struct dvb_frontend* tda826x_attach(struct dvb_frontend *fe, int addr,
struct i2c_adapter *i2c,
int has_loopthrough);
diff --git a/drivers/media/dvb-frontends/ts2020.c b/drivers/media/dvb-frontends/ts2020.c
new file mode 100644
index 000000000000..ad7ad857ab2a
--- /dev/null
+++ b/drivers/media/dvb-frontends/ts2020.c
@@ -0,0 +1,373 @@
+/*
+ Montage Technology TS2020 - Silicon Tuner driver
+ Copyright (C) 2009-2012 Konstantin Dimitrov <kosio.dimitrov@gmail.com>
+
+ Copyright (C) 2009-2012 TurboSight.com
+
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; either version 2 of the License, or
+ (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, write to the Free Software
+ Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ */
+
+#include "dvb_frontend.h"
+#include "ts2020.h"
+
+#define TS2020_XTAL_FREQ 27000 /* in kHz */
+#define FREQ_OFFSET_LOW_SYM_RATE 3000
+
+struct ts2020_priv {
+ /* i2c details */
+ int i2c_address;
+ struct i2c_adapter *i2c;
+ u8 clk_out_div;
+ u32 frequency;
+};
+
+static int ts2020_release(struct dvb_frontend *fe)
+{
+ kfree(fe->tuner_priv);
+ fe->tuner_priv = NULL;
+ return 0;
+}
+
+static int ts2020_writereg(struct dvb_frontend *fe, int reg, int data)
+{
+ struct ts2020_priv *priv = fe->tuner_priv;
+ u8 buf[] = { reg, data };
+ struct i2c_msg msg[] = {
+ {
+ .addr = priv->i2c_address,
+ .flags = 0,
+ .buf = buf,
+ .len = 2
+ }
+ };
+ int err;
+
+ if (fe->ops.i2c_gate_ctrl)
+ fe->ops.i2c_gate_ctrl(fe, 1);
+
+ err = i2c_transfer(priv->i2c, msg, 1);
+ if (err != 1) {
+ printk(KERN_ERR
+ "%s: writereg error(err == %i, reg == 0x%02x, value == 0x%02x)\n",
+ __func__, err, reg, data);
+ return -EREMOTEIO;
+ }
+
+ if (fe->ops.i2c_gate_ctrl)
+ fe->ops.i2c_gate_ctrl(fe, 0);
+
+ return 0;
+}
+
+static int ts2020_readreg(struct dvb_frontend *fe, u8 reg)
+{
+ struct ts2020_priv *priv = fe->tuner_priv;
+ int ret;
+ u8 b0[] = { reg };
+ u8 b1[] = { 0 };
+ struct i2c_msg msg[] = {
+ {
+ .addr = priv->i2c_address,
+ .flags = 0,
+ .buf = b0,
+ .len = 1
+ }, {
+ .addr = priv->i2c_address,
+ .flags = I2C_M_RD,
+ .buf = b1,
+ .len = 1
+ }
+ };
+
+ if (fe->ops.i2c_gate_ctrl)
+ fe->ops.i2c_gate_ctrl(fe, 1);
+
+ ret = i2c_transfer(priv->i2c, msg, 2);
+
+ if (ret != 2) {
+ printk(KERN_ERR "%s: reg=0x%x(error=%d)\n",
+ __func__, reg, ret);
+ return ret;
+ }
+
+ if (fe->ops.i2c_gate_ctrl)
+ fe->ops.i2c_gate_ctrl(fe, 0);
+
+ return b1[0];
+}
+
+static int ts2020_sleep(struct dvb_frontend *fe)
+{
+ struct ts2020_priv *priv = fe->tuner_priv;
+ int ret;
+ u8 buf[] = { 10, 0 };
+ struct i2c_msg msg = {
+ .addr = priv->i2c_address,
+ .flags = 0,
+ .buf = buf,
+ .len = 2
+ };
+
+ if (fe->ops.i2c_gate_ctrl)
+ fe->ops.i2c_gate_ctrl(fe, 1);
+
+ ret = i2c_transfer(priv->i2c, &msg, 1);
+ if (ret != 1)
+ printk(KERN_ERR "%s: i2c error\n", __func__);
+
+ if (fe->ops.i2c_gate_ctrl)
+ fe->ops.i2c_gate_ctrl(fe, 0);
+
+ return (ret == 1) ? 0 : ret;
+}
+
+static int ts2020_init(struct dvb_frontend *fe)
+{
+ struct ts2020_priv *priv = fe->tuner_priv;
+
+ ts2020_writereg(fe, 0x42, 0x73);
+ ts2020_writereg(fe, 0x05, priv->clk_out_div);
+ ts2020_writereg(fe, 0x20, 0x27);
+ ts2020_writereg(fe, 0x07, 0x02);
+ ts2020_writereg(fe, 0x11, 0xff);
+ ts2020_writereg(fe, 0x60, 0xf9);
+ ts2020_writereg(fe, 0x08, 0x01);
+ ts2020_writereg(fe, 0x00, 0x41);
+
+ return 0;
+}
+
+static int ts2020_tuner_gate_ctrl(struct dvb_frontend *fe, u8 offset)
+{
+ int ret;
+ ret = ts2020_writereg(fe, 0x51, 0x1f - offset);
+ ret |= ts2020_writereg(fe, 0x51, 0x1f);
+ ret |= ts2020_writereg(fe, 0x50, offset);
+ ret |= ts2020_writereg(fe, 0x50, 0x00);
+ msleep(20);
+ return ret;
+}
+
+static int ts2020_set_tuner_rf(struct dvb_frontend *fe)
+{
+ int reg;
+
+ reg = ts2020_readreg(fe, 0x3d);
+ reg &= 0x7f;
+ if (reg < 0x16)
+ reg = 0xa1;
+ else if (reg == 0x16)
+ reg = 0x99;
+ else
+ reg = 0xf9;
+
+ ts2020_writereg(fe, 0x60, reg);
+ reg = ts2020_tuner_gate_ctrl(fe, 0x08);
+
+ return reg;
+}
+
+static int ts2020_set_params(struct dvb_frontend *fe)
+{
+ struct dtv_frontend_properties *c = &fe->dtv_property_cache;
+ struct ts2020_priv *priv = fe->tuner_priv;
+ int ret;
+ u32 frequency = c->frequency;
+ s32 offset_khz;
+ u32 symbol_rate = (c->symbol_rate / 1000);
+ u32 f3db, gdiv28;
+ u16 value, ndiv, lpf_coeff;
+ u8 lpf_mxdiv, mlpf_max, mlpf_min, nlpf;
+ u8 lo = 0x01, div4 = 0x0;
+
+ /* Calculate frequency divider */
+ if (frequency < 1060000) {
+ lo |= 0x10;
+ div4 = 0x1;
+ ndiv = (frequency * 14 * 4) / TS2020_XTAL_FREQ;
+ } else
+ ndiv = (frequency * 14 * 2) / TS2020_XTAL_FREQ;
+ ndiv = ndiv + ndiv % 2;
+ ndiv = ndiv - 1024;
+
+ ret = ts2020_writereg(fe, 0x10, 0x80 | lo);
+
+ /* Set frequency divider */
+ ret |= ts2020_writereg(fe, 0x01, (ndiv >> 8) & 0xf);
+ ret |= ts2020_writereg(fe, 0x02, ndiv & 0xff);
+
+ ret |= ts2020_writereg(fe, 0x03, 0x06);
+ ret |= ts2020_tuner_gate_ctrl(fe, 0x10);
+ if (ret < 0)
+ return -ENODEV;
+
+ /* Tuner Frequency Range */
+ ret = ts2020_writereg(fe, 0x10, lo);
+
+ ret |= ts2020_tuner_gate_ctrl(fe, 0x08);
+
+ /* Tuner RF */
+ ret |= ts2020_set_tuner_rf(fe);
+
+ gdiv28 = (TS2020_XTAL_FREQ / 1000 * 1694 + 500) / 1000;
+ ret |= ts2020_writereg(fe, 0x04, gdiv28 & 0xff);
+ ret |= ts2020_tuner_gate_ctrl(fe, 0x04);
+ if (ret < 0)
+ return -ENODEV;
+
+ value = ts2020_readreg(fe, 0x26);
+
+ f3db = (symbol_rate * 135) / 200 + 2000;
+ f3db += FREQ_OFFSET_LOW_SYM_RATE;
+ if (f3db < 7000)
+ f3db = 7000;
+ if (f3db > 40000)
+ f3db = 40000;
+
+ gdiv28 = gdiv28 * 207 / (value * 2 + 151);
+ mlpf_max = gdiv28 * 135 / 100;
+ mlpf_min = gdiv28 * 78 / 100;
+ if (mlpf_max > 63)
+ mlpf_max = 63;
+
+ lpf_coeff = 2766;
+
+ nlpf = (f3db * gdiv28 * 2 / lpf_coeff /
+ (TS2020_XTAL_FREQ / 1000) + 1) / 2;
+ if (nlpf > 23)
+ nlpf = 23;
+ if (nlpf < 1)
+ nlpf = 1;
+
+ lpf_mxdiv = (nlpf * (TS2020_XTAL_FREQ / 1000)
+ * lpf_coeff * 2 / f3db + 1) / 2;
+
+ if (lpf_mxdiv < mlpf_min) {
+ nlpf++;
+ lpf_mxdiv = (nlpf * (TS2020_XTAL_FREQ / 1000)
+ * lpf_coeff * 2 / f3db + 1) / 2;
+ }
+
+ if (lpf_mxdiv > mlpf_max)
+ lpf_mxdiv = mlpf_max;
+
+ ret = ts2020_writereg(fe, 0x04, lpf_mxdiv);
+ ret |= ts2020_writereg(fe, 0x06, nlpf);
+
+ ret |= ts2020_tuner_gate_ctrl(fe, 0x04);
+
+ ret |= ts2020_tuner_gate_ctrl(fe, 0x01);
+
+ msleep(80);
+ /* calculate offset assuming 96000kHz*/
+ offset_khz = (ndiv - ndiv % 2 + 1024) * TS2020_XTAL_FREQ
+ / (6 + 8) / (div4 + 1) / 2;
+
+ priv->frequency = offset_khz;
+
+ return (ret < 0) ? -EINVAL : 0;
+}
+
+static int ts2020_get_frequency(struct dvb_frontend *fe, u32 *frequency)
+{
+ struct ts2020_priv *priv = fe->tuner_priv;
+ *frequency = priv->frequency;
+ return 0;
+}
+
+/* read TS2020 signal strength */
+static int ts2020_read_signal_strength(struct dvb_frontend *fe,
+ u16 *signal_strength)
+{
+ u16 sig_reading, sig_strength;
+ u8 rfgain, bbgain;
+
+ rfgain = ts2020_readreg(fe, 0x3d) & 0x1f;
+ bbgain = ts2020_readreg(fe, 0x21) & 0x1f;
+
+ if (rfgain > 15)
+ rfgain = 15;
+ if (bbgain > 13)
+ bbgain = 13;
+
+ sig_reading = rfgain * 2 + bbgain * 3;
+
+ sig_strength = 40 + (64 - sig_reading) * 50 / 64 ;
+
+ /* cook the value to be suitable for szap-s2 human readable output */
+ *signal_strength = sig_strength * 1000;
+
+ return 0;
+}
+
+static struct dvb_tuner_ops ts2020_tuner_ops = {
+ .info = {
+ .name = "TS2020",
+ .frequency_min = 950000,
+ .frequency_max = 2150000
+ },
+ .init = ts2020_init,
+ .release = ts2020_release,
+ .sleep = ts2020_sleep,
+ .set_params = ts2020_set_params,
+ .get_frequency = ts2020_get_frequency,
+ .get_rf_strength = ts2020_read_signal_strength,
+};
+
+struct dvb_frontend *ts2020_attach(struct dvb_frontend *fe,
+ const struct ts2020_config *config,
+ struct i2c_adapter *i2c)
+{
+ struct ts2020_priv *priv = NULL;
+ u8 buf;
+
+ priv = kzalloc(sizeof(struct ts2020_priv), GFP_KERNEL);
+ if (priv == NULL)
+ return NULL;
+
+ priv->i2c_address = config->tuner_address;
+ priv->i2c = i2c;
+ priv->clk_out_div = config->clk_out_div;
+ fe->tuner_priv = priv;
+
+ /* Wake Up the tuner */
+ if ((0x03 & ts2020_readreg(fe, 0x00)) == 0x00) {
+ ts2020_writereg(fe, 0x00, 0x01);
+ msleep(2);
+ }
+
+ ts2020_writereg(fe, 0x00, 0x03);
+ msleep(2);
+
+ /* Check the tuner version */
+ buf = ts2020_readreg(fe, 0x00);
+ if ((buf == 0x01) || (buf == 0x41) || (buf == 0x81))
+ printk(KERN_INFO "%s: Find tuner TS2020!\n", __func__);
+ else {
+ printk(KERN_ERR "%s: Read tuner reg[0] = %d\n", __func__, buf);
+ kfree(priv);
+ return NULL;
+ }
+
+ memcpy(&fe->ops.tuner_ops, &ts2020_tuner_ops,
+ sizeof(struct dvb_tuner_ops));
+
+ return fe;
+}
+EXPORT_SYMBOL(ts2020_attach);
+
+MODULE_AUTHOR("Konstantin Dimitrov <kosio.dimitrov@gmail.com>");
+MODULE_DESCRIPTION("Montage Technology TS2020 - Silicon tuner driver module");
+MODULE_LICENSE("GPL");
diff --git a/drivers/media/dvb-frontends/ts2020.h b/drivers/media/dvb-frontends/ts2020.h
new file mode 100644
index 000000000000..c7e64afa614a
--- /dev/null
+++ b/drivers/media/dvb-frontends/ts2020.h
@@ -0,0 +1,50 @@
+/*
+ Montage Technology TS2020 - Silicon Tuner driver
+ Copyright (C) 2009-2012 Konstantin Dimitrov <kosio.dimitrov@gmail.com>
+
+ Copyright (C) 2009-2012 TurboSight.com
+
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; either version 2 of the License, or
+ (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, write to the Free Software
+ Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ */
+
+#ifndef TS2020_H
+#define TS2020_H
+
+#include <linux/dvb/frontend.h>
+
+struct ts2020_config {
+ u8 tuner_address;
+ u8 clk_out_div;
+};
+
+#if defined(CONFIG_DVB_TS2020) || \
+ (defined(CONFIG_DVB_TS2020_MODULE) && defined(MODULE))
+
+extern struct dvb_frontend *ts2020_attach(
+ struct dvb_frontend *fe,
+ const struct ts2020_config *config,
+ struct i2c_adapter *i2c);
+#else
+static inline struct dvb_frontend *ts2020_attach(
+ struct dvb_frontend *fe,
+ const struct ts2020_config *config,
+ struct i2c_adapter *i2c)
+{
+ printk(KERN_WARNING "%s: driver disabled by Kconfig\n", __func__);
+ return NULL;
+}
+#endif
+
+#endif /* TS2020_H */
diff --git a/drivers/media/dvb-frontends/tua6100.h b/drivers/media/dvb-frontends/tua6100.h
index f83dbd5e42ae..83a9c30e67ca 100644
--- a/drivers/media/dvb-frontends/tua6100.h
+++ b/drivers/media/dvb-frontends/tua6100.h
@@ -34,7 +34,7 @@
#include <linux/i2c.h>
#include "dvb_frontend.h"
-#if defined(CONFIG_DVB_TUA6100) || (defined(CONFIG_DVB_TUA6100_MODULE) && defined(MODULE))
+#if IS_ENABLED(CONFIG_DVB_TUA6100)
extern struct dvb_frontend *tua6100_attach(struct dvb_frontend *fe, int addr, struct i2c_adapter *i2c);
#else
static inline struct dvb_frontend* tua6100_attach(struct dvb_frontend *fe, int addr, struct i2c_adapter *i2c)
diff --git a/drivers/media/dvb-frontends/ves1820.h b/drivers/media/dvb-frontends/ves1820.h
index e902ed634ec3..c073f353ac38 100644
--- a/drivers/media/dvb-frontends/ves1820.h
+++ b/drivers/media/dvb-frontends/ves1820.h
@@ -41,7 +41,7 @@ struct ves1820_config
u8 selagc:1;
};
-#if defined(CONFIG_DVB_VES1820) || (defined(CONFIG_DVB_VES1820_MODULE) && defined(MODULE))
+#if IS_ENABLED(CONFIG_DVB_VES1820)
extern struct dvb_frontend* ves1820_attach(const struct ves1820_config* config,
struct i2c_adapter* i2c, u8 pwm);
#else
diff --git a/drivers/media/dvb-frontends/ves1x93.h b/drivers/media/dvb-frontends/ves1x93.h
index 8a5a49e808f6..2307caea6aec 100644
--- a/drivers/media/dvb-frontends/ves1x93.h
+++ b/drivers/media/dvb-frontends/ves1x93.h
@@ -40,7 +40,7 @@ struct ves1x93_config
u8 invert_pwm:1;
};
-#if defined(CONFIG_DVB_VES1X93) || (defined(CONFIG_DVB_VES1X93_MODULE) && defined(MODULE))
+#if IS_ENABLED(CONFIG_DVB_VES1X93)
extern struct dvb_frontend* ves1x93_attach(const struct ves1x93_config* config,
struct i2c_adapter* i2c);
#else
diff --git a/drivers/media/dvb-frontends/zl10353.h b/drivers/media/dvb-frontends/zl10353.h
index 6e3ca9eed048..50c1004aef36 100644
--- a/drivers/media/dvb-frontends/zl10353.h
+++ b/drivers/media/dvb-frontends/zl10353.h
@@ -47,7 +47,7 @@ struct zl10353_config
u8 pll_0; /* default: 0x15 */
};
-#if defined(CONFIG_DVB_ZL10353) || (defined(CONFIG_DVB_ZL10353_MODULE) && defined(MODULE))
+#if IS_ENABLED(CONFIG_DVB_ZL10353)
extern struct dvb_frontend* zl10353_attach(const struct zl10353_config *config,
struct i2c_adapter *i2c);
#else
diff --git a/drivers/media/i2c/Kconfig b/drivers/media/i2c/Kconfig
index 24d78e28e493..7b771baa2212 100644
--- a/drivers/media/i2c/Kconfig
+++ b/drivers/media/i2c/Kconfig
@@ -1,16 +1,4 @@
#
-# Generic video config states
-#
-
-config VIDEO_BTCX
- depends on PCI
- tristate
-
-config VIDEO_TVEEPROM
- tristate
- depends on I2C
-
-#
# Multimedia Video device configuration
#
@@ -317,20 +305,6 @@ config VIDEO_SAA717X
source "drivers/media/i2c/cx25840/Kconfig"
-comment "MPEG video encoders"
-
-config VIDEO_CX2341X
- tristate "Conexant CX2341x MPEG encoders"
- depends on VIDEO_V4L2
- ---help---
- Support for the Conexant CX23416 MPEG encoders
- and CX23415 MPEG encoder/decoders.
-
- This module currently supports the encoding functions only.
-
- To compile this driver as a module, choose M here: the
- module will be called cx2341x.
-
comment "Video encoders"
config VIDEO_SAA7127
@@ -421,6 +395,13 @@ config VIDEO_OV7670
OV7670 VGA camera. It currently only works with the M88ALP01
controller.
+config VIDEO_OV9650
+ tristate "OmniVision OV9650/OV9652 sensor support"
+ depends on I2C && VIDEO_V4L2 && VIDEO_V4L2_SUBDEV_API
+ ---help---
+ This is a V4L2 sensor-level driver for the Omnivision
+ OV9650 and OV9652 camera sensors.
+
config VIDEO_VS6624
tristate "ST VS6624 sensor support"
depends on VIDEO_V4L2 && I2C
@@ -477,7 +458,7 @@ config VIDEO_MT9V032
config VIDEO_TCM825X
tristate "TCM825x camera sensor support"
- depends on I2C && VIDEO_V4L2
+ depends on I2C && VIDEO_V4L2 && VIDEO_V4L2_INT_DEVICE
depends on MEDIA_CAMERA_SUPPORT
---help---
This is a driver for the Toshiba TCM825x VGA camera sensor.
@@ -516,6 +497,13 @@ config VIDEO_S5K4ECGX
source "drivers/media/i2c/smiapp/Kconfig"
+config VIDEO_S5C73M3
+ tristate "Samsung S5C73M3 sensor support"
+ depends on I2C && SPI && VIDEO_V4L2 && VIDEO_V4L2_SUBDEV_API
+ ---help---
+ This is a V4L2 sensor-level driver for Samsung S5C73M3
+ 8 Mpixel camera.
+
comment "Flash devices"
config VIDEO_ADP1653
diff --git a/drivers/media/i2c/Makefile b/drivers/media/i2c/Makefile
index b1d62dfd49b8..cfefd30cc1bc 100644
--- a/drivers/media/i2c/Makefile
+++ b/drivers/media/i2c/Makefile
@@ -47,8 +47,8 @@ obj-$(CONFIG_VIDEO_VP27SMPX) += vp27smpx.o
obj-$(CONFIG_VIDEO_UPD64031A) += upd64031a.o
obj-$(CONFIG_VIDEO_UPD64083) += upd64083.o
obj-$(CONFIG_VIDEO_OV7670) += ov7670.o
+obj-$(CONFIG_VIDEO_OV9650) += ov9650.o
obj-$(CONFIG_VIDEO_TCM825X) += tcm825x.o
-obj-$(CONFIG_VIDEO_TVEEPROM) += tveeprom.o
obj-$(CONFIG_VIDEO_MT9M032) += mt9m032.o
obj-$(CONFIG_VIDEO_MT9P031) += mt9p031.o
obj-$(CONFIG_VIDEO_MT9T001) += mt9t001.o
@@ -58,10 +58,9 @@ obj-$(CONFIG_VIDEO_SR030PC30) += sr030pc30.o
obj-$(CONFIG_VIDEO_NOON010PC30) += noon010pc30.o
obj-$(CONFIG_VIDEO_S5K6AA) += s5k6aa.o
obj-$(CONFIG_VIDEO_S5K4ECGX) += s5k4ecgx.o
+obj-$(CONFIG_VIDEO_S5C73M3) += s5c73m3/
obj-$(CONFIG_VIDEO_ADP1653) += adp1653.o
obj-$(CONFIG_VIDEO_AS3645A) += as3645a.o
obj-$(CONFIG_VIDEO_SMIAPP_PLL) += smiapp-pll.o
-obj-$(CONFIG_VIDEO_BTCX) += btcx-risc.o
-obj-$(CONFIG_VIDEO_CX2341X) += cx2341x.o
obj-$(CONFIG_VIDEO_AK881X) += ak881x.o
obj-$(CONFIG_VIDEO_IR_I2C) += ir-kbd-i2c.o
diff --git a/drivers/media/i2c/adv7180.c b/drivers/media/i2c/adv7180.c
index 64d71fb87a96..34f39d3b3e3e 100644
--- a/drivers/media/i2c/adv7180.c
+++ b/drivers/media/i2c/adv7180.c
@@ -402,9 +402,6 @@ static const struct v4l2_subdev_video_ops adv7180_video_ops = {
static const struct v4l2_subdev_core_ops adv7180_core_ops = {
.g_chip_ident = adv7180_g_chip_ident,
.s_std = adv7180_s_std,
- .queryctrl = v4l2_subdev_queryctrl,
- .g_ctrl = v4l2_subdev_g_ctrl,
- .s_ctrl = v4l2_subdev_s_ctrl,
};
static const struct v4l2_subdev_ops adv7180_ops = {
diff --git a/drivers/media/i2c/adv7343.c b/drivers/media/i2c/adv7343.c
index 2b5aa676a84e..9fc2b985df0e 100644
--- a/drivers/media/i2c/adv7343.c
+++ b/drivers/media/i2c/adv7343.c
@@ -43,6 +43,7 @@ MODULE_PARM_DESC(debug, "Debug level 0-1");
struct adv7343_state {
struct v4l2_subdev sd;
struct v4l2_ctrl_handler hdl;
+ const struct adv7343_platform_data *pdata;
u8 reg00;
u8 reg01;
u8 reg02;
@@ -215,12 +216,23 @@ static int adv7343_setoutput(struct v4l2_subdev *sd, u32 output_type)
/* Enable Appropriate DAC */
val = state->reg00 & 0x03;
- if (output_type == ADV7343_COMPOSITE_ID)
- val |= ADV7343_COMPOSITE_POWER_VALUE;
- else if (output_type == ADV7343_COMPONENT_ID)
- val |= ADV7343_COMPONENT_POWER_VALUE;
+ /* configure default configuration */
+ if (!state->pdata)
+ if (output_type == ADV7343_COMPOSITE_ID)
+ val |= ADV7343_COMPOSITE_POWER_VALUE;
+ else if (output_type == ADV7343_COMPONENT_ID)
+ val |= ADV7343_COMPONENT_POWER_VALUE;
+ else
+ val |= ADV7343_SVIDEO_POWER_VALUE;
else
- val |= ADV7343_SVIDEO_POWER_VALUE;
+ val = state->pdata->mode_config.sleep_mode << 0 |
+ state->pdata->mode_config.pll_control << 1 |
+ state->pdata->mode_config.dac_3 << 2 |
+ state->pdata->mode_config.dac_2 << 3 |
+ state->pdata->mode_config.dac_1 << 4 |
+ state->pdata->mode_config.dac_6 << 5 |
+ state->pdata->mode_config.dac_5 << 6 |
+ state->pdata->mode_config.dac_4 << 7;
err = adv7343_write(sd, ADV7343_POWER_MODE_REG, val);
if (err < 0)
@@ -238,6 +250,17 @@ static int adv7343_setoutput(struct v4l2_subdev *sd, u32 output_type)
/* configure SD DAC Output 2 and SD DAC Output 1 bit to zero */
val = state->reg82 & (SD_DAC_1_DI & SD_DAC_2_DI);
+
+ if (state->pdata && state->pdata->sd_config.sd_dac_out1)
+ val = val | (state->pdata->sd_config.sd_dac_out1 << 1);
+ else if (state->pdata && !state->pdata->sd_config.sd_dac_out1)
+ val = val & ~(state->pdata->sd_config.sd_dac_out1 << 1);
+
+ if (state->pdata && state->pdata->sd_config.sd_dac_out2)
+ val = val | (state->pdata->sd_config.sd_dac_out2 << 2);
+ else if (state->pdata && !state->pdata->sd_config.sd_dac_out2)
+ val = val & ~(state->pdata->sd_config.sd_dac_out2 << 2);
+
err = adv7343_write(sd, ADV7343_SD_MODE_REG2, val);
if (err < 0)
goto setoutput_exit;
@@ -397,10 +420,14 @@ static int adv7343_probe(struct i2c_client *client,
v4l_info(client, "chip found @ 0x%x (%s)\n",
client->addr << 1, client->adapter->name);
- state = kzalloc(sizeof(struct adv7343_state), GFP_KERNEL);
+ state = devm_kzalloc(&client->dev, sizeof(struct adv7343_state),
+ GFP_KERNEL);
if (state == NULL)
return -ENOMEM;
+ /* Copy board specific information here */
+ state->pdata = client->dev.platform_data;
+
state->reg00 = 0x80;
state->reg01 = 0x00;
state->reg02 = 0x20;
@@ -431,16 +458,13 @@ static int adv7343_probe(struct i2c_client *client,
int err = state->hdl.error;
v4l2_ctrl_handler_free(&state->hdl);
- kfree(state);
return err;
}
v4l2_ctrl_handler_setup(&state->hdl);
err = adv7343_initialize(&state->sd);
- if (err) {
+ if (err)
v4l2_ctrl_handler_free(&state->hdl);
- kfree(state);
- }
return err;
}
@@ -451,7 +475,6 @@ static int adv7343_remove(struct i2c_client *client)
v4l2_device_unregister_subdev(sd);
v4l2_ctrl_handler_free(&state->hdl);
- kfree(state);
return 0;
}
diff --git a/drivers/media/i2c/cx25840/cx25840-ir.c b/drivers/media/i2c/cx25840/cx25840-ir.c
index 38ce76ed1924..9ae977b5983a 100644
--- a/drivers/media/i2c/cx25840/cx25840-ir.c
+++ b/drivers/media/i2c/cx25840/cx25840-ir.c
@@ -1251,13 +1251,11 @@ int cx25840_ir_probe(struct v4l2_subdev *sd)
cx25840_write4(ir_state->c, CX25840_IR_IRQEN_REG, 0);
mutex_init(&ir_state->rx_params_lock);
- memcpy(&default_params, &default_rx_params,
- sizeof(struct v4l2_subdev_ir_parameters));
+ default_params = default_rx_params;
v4l2_subdev_call(sd, ir, rx_s_parameters, &default_params);
mutex_init(&ir_state->tx_params_lock);
- memcpy(&default_params, &default_tx_params,
- sizeof(struct v4l2_subdev_ir_parameters));
+ default_params = default_tx_params;
v4l2_subdev_call(sd, ir, tx_s_parameters, &default_params);
return 0;
diff --git a/drivers/media/i2c/mt9v011.c b/drivers/media/i2c/mt9v011.c
index 6bf01ad62765..73b7688cbebd 100644
--- a/drivers/media/i2c/mt9v011.c
+++ b/drivers/media/i2c/mt9v011.c
@@ -13,6 +13,7 @@
#include <asm/div64.h>
#include <media/v4l2-device.h>
#include <media/v4l2-chip-ident.h>
+#include <media/v4l2-ctrls.h>
#include <media/mt9v011.h>
MODULE_DESCRIPTION("Micron mt9v011 sensor driver");
@@ -48,68 +49,9 @@ MODULE_PARM_DESC(debug, "Debug level (0-2)");
#define MT9V011_VERSION 0x8232
#define MT9V011_REV_B_VERSION 0x8243
-/* supported controls */
-static struct v4l2_queryctrl mt9v011_qctrl[] = {
- {
- .id = V4L2_CID_GAIN,
- .type = V4L2_CTRL_TYPE_INTEGER,
- .name = "Gain",
- .minimum = 0,
- .maximum = (1 << 12) - 1 - 0x0020,
- .step = 1,
- .default_value = 0x0020,
- .flags = 0,
- }, {
- .id = V4L2_CID_EXPOSURE,
- .type = V4L2_CTRL_TYPE_INTEGER,
- .name = "Exposure",
- .minimum = 0,
- .maximum = 2047,
- .step = 1,
- .default_value = 0x01fc,
- .flags = 0,
- }, {
- .id = V4L2_CID_RED_BALANCE,
- .type = V4L2_CTRL_TYPE_INTEGER,
- .name = "Red Balance",
- .minimum = -1 << 9,
- .maximum = (1 << 9) - 1,
- .step = 1,
- .default_value = 0,
- .flags = 0,
- }, {
- .id = V4L2_CID_BLUE_BALANCE,
- .type = V4L2_CTRL_TYPE_INTEGER,
- .name = "Blue Balance",
- .minimum = -1 << 9,
- .maximum = (1 << 9) - 1,
- .step = 1,
- .default_value = 0,
- .flags = 0,
- }, {
- .id = V4L2_CID_HFLIP,
- .type = V4L2_CTRL_TYPE_BOOLEAN,
- .name = "Mirror",
- .minimum = 0,
- .maximum = 1,
- .step = 1,
- .default_value = 0,
- .flags = 0,
- }, {
- .id = V4L2_CID_VFLIP,
- .type = V4L2_CTRL_TYPE_BOOLEAN,
- .name = "Vflip",
- .minimum = 0,
- .maximum = 1,
- .step = 1,
- .default_value = 0,
- .flags = 0,
- }, {
- }
-};
-
struct mt9v011 {
struct v4l2_subdev sd;
+ struct v4l2_ctrl_handler ctrls;
unsigned width, height;
unsigned xtal;
unsigned hflip:1;
@@ -381,99 +323,6 @@ static int mt9v011_reset(struct v4l2_subdev *sd, u32 val)
set_read_mode(sd);
return 0;
-};
-
-static int mt9v011_g_ctrl(struct v4l2_subdev *sd, struct v4l2_control *ctrl)
-{
- struct mt9v011 *core = to_mt9v011(sd);
-
- v4l2_dbg(1, debug, sd, "g_ctrl called\n");
-
- switch (ctrl->id) {
- case V4L2_CID_GAIN:
- ctrl->value = core->global_gain;
- return 0;
- case V4L2_CID_EXPOSURE:
- ctrl->value = core->exposure;
- return 0;
- case V4L2_CID_RED_BALANCE:
- ctrl->value = core->red_bal;
- return 0;
- case V4L2_CID_BLUE_BALANCE:
- ctrl->value = core->blue_bal;
- return 0;
- case V4L2_CID_HFLIP:
- ctrl->value = core->hflip ? 1 : 0;
- return 0;
- case V4L2_CID_VFLIP:
- ctrl->value = core->vflip ? 1 : 0;
- return 0;
- }
- return -EINVAL;
-}
-
-static int mt9v011_queryctrl(struct v4l2_subdev *sd, struct v4l2_queryctrl *qc)
-{
- int i;
-
- v4l2_dbg(1, debug, sd, "queryctrl called\n");
-
- for (i = 0; i < ARRAY_SIZE(mt9v011_qctrl); i++)
- if (qc->id && qc->id == mt9v011_qctrl[i].id) {
- memcpy(qc, &(mt9v011_qctrl[i]),
- sizeof(*qc));
- return 0;
- }
-
- return -EINVAL;
-}
-
-
-static int mt9v011_s_ctrl(struct v4l2_subdev *sd, struct v4l2_control *ctrl)
-{
- struct mt9v011 *core = to_mt9v011(sd);
- u8 i, n;
- n = ARRAY_SIZE(mt9v011_qctrl);
-
- for (i = 0; i < n; i++) {
- if (ctrl->id != mt9v011_qctrl[i].id)
- continue;
- if (ctrl->value < mt9v011_qctrl[i].minimum ||
- ctrl->value > mt9v011_qctrl[i].maximum)
- return -ERANGE;
- v4l2_dbg(1, debug, sd, "s_ctrl: id=%d, value=%d\n",
- ctrl->id, ctrl->value);
- break;
- }
-
- switch (ctrl->id) {
- case V4L2_CID_GAIN:
- core->global_gain = ctrl->value;
- break;
- case V4L2_CID_EXPOSURE:
- core->exposure = ctrl->value;
- break;
- case V4L2_CID_RED_BALANCE:
- core->red_bal = ctrl->value;
- break;
- case V4L2_CID_BLUE_BALANCE:
- core->blue_bal = ctrl->value;
- break;
- case V4L2_CID_HFLIP:
- core->hflip = ctrl->value;
- set_read_mode(sd);
- return 0;
- case V4L2_CID_VFLIP:
- core->vflip = ctrl->value;
- set_read_mode(sd);
- return 0;
- default:
- return -EINVAL;
- }
-
- set_balance(sd);
-
- return 0;
}
static int mt9v011_enum_mbus_fmt(struct v4l2_subdev *sd, unsigned index,
@@ -599,10 +448,46 @@ static int mt9v011_g_chip_ident(struct v4l2_subdev *sd,
version);
}
-static const struct v4l2_subdev_core_ops mt9v011_core_ops = {
- .queryctrl = mt9v011_queryctrl,
- .g_ctrl = mt9v011_g_ctrl,
+static int mt9v011_s_ctrl(struct v4l2_ctrl *ctrl)
+{
+ struct mt9v011 *core =
+ container_of(ctrl->handler, struct mt9v011, ctrls);
+ struct v4l2_subdev *sd = &core->sd;
+
+ switch (ctrl->id) {
+ case V4L2_CID_GAIN:
+ core->global_gain = ctrl->val;
+ break;
+ case V4L2_CID_EXPOSURE:
+ core->exposure = ctrl->val;
+ break;
+ case V4L2_CID_RED_BALANCE:
+ core->red_bal = ctrl->val;
+ break;
+ case V4L2_CID_BLUE_BALANCE:
+ core->blue_bal = ctrl->val;
+ break;
+ case V4L2_CID_HFLIP:
+ core->hflip = ctrl->val;
+ set_read_mode(sd);
+ return 0;
+ case V4L2_CID_VFLIP:
+ core->vflip = ctrl->val;
+ set_read_mode(sd);
+ return 0;
+ default:
+ return -EINVAL;
+ }
+
+ set_balance(sd);
+ return 0;
+}
+
+static struct v4l2_ctrl_ops mt9v011_ctrl_ops = {
.s_ctrl = mt9v011_s_ctrl,
+};
+
+static const struct v4l2_subdev_core_ops mt9v011_core_ops = {
.reset = mt9v011_reset,
.g_chip_ident = mt9v011_g_chip_ident,
#ifdef CONFIG_VIDEO_ADV_DEBUG
@@ -658,6 +543,30 @@ static int mt9v011_probe(struct i2c_client *c,
return -EINVAL;
}
+ v4l2_ctrl_handler_init(&core->ctrls, 5);
+ v4l2_ctrl_new_std(&core->ctrls, &mt9v011_ctrl_ops,
+ V4L2_CID_GAIN, 0, (1 << 12) - 1 - 0x20, 1, 0x20);
+ v4l2_ctrl_new_std(&core->ctrls, &mt9v011_ctrl_ops,
+ V4L2_CID_EXPOSURE, 0, 2047, 1, 0x01fc);
+ v4l2_ctrl_new_std(&core->ctrls, &mt9v011_ctrl_ops,
+ V4L2_CID_RED_BALANCE, -(1 << 9), (1 << 9) - 1, 1, 0);
+ v4l2_ctrl_new_std(&core->ctrls, &mt9v011_ctrl_ops,
+ V4L2_CID_BLUE_BALANCE, -(1 << 9), (1 << 9) - 1, 1, 0);
+ v4l2_ctrl_new_std(&core->ctrls, &mt9v011_ctrl_ops,
+ V4L2_CID_HFLIP, 0, 1, 1, 0);
+ v4l2_ctrl_new_std(&core->ctrls, &mt9v011_ctrl_ops,
+ V4L2_CID_VFLIP, 0, 1, 1, 0);
+
+ if (core->ctrls.error) {
+ int ret = core->ctrls.error;
+
+ v4l2_err(sd, "control initialization error %d\n", ret);
+ v4l2_ctrl_handler_free(&core->ctrls);
+ kfree(core);
+ return ret;
+ }
+ core->sd.ctrl_handler = &core->ctrls;
+
core->global_gain = 0x0024;
core->exposure = 0x01fc;
core->width = 640;
@@ -681,12 +590,14 @@ static int mt9v011_probe(struct i2c_client *c,
static int mt9v011_remove(struct i2c_client *c)
{
struct v4l2_subdev *sd = i2c_get_clientdata(c);
+ struct mt9v011 *core = to_mt9v011(sd);
v4l2_dbg(1, debug, sd,
"mt9v011.c: removing mt9v011 adapter on address 0x%x\n",
c->addr << 1);
v4l2_device_unregister_subdev(sd);
+ v4l2_ctrl_handler_free(&core->ctrls);
kfree(to_mt9v011(sd));
return 0;
}
diff --git a/drivers/media/i2c/noon010pc30.c b/drivers/media/i2c/noon010pc30.c
index 440c12962bae..8554b47f993a 100644
--- a/drivers/media/i2c/noon010pc30.c
+++ b/drivers/media/i2c/noon010pc30.c
@@ -660,13 +660,6 @@ static const struct v4l2_ctrl_ops noon010_ctrl_ops = {
static const struct v4l2_subdev_core_ops noon010_core_ops = {
.s_power = noon010_s_power,
- .g_ctrl = v4l2_subdev_g_ctrl,
- .s_ctrl = v4l2_subdev_s_ctrl,
- .queryctrl = v4l2_subdev_queryctrl,
- .querymenu = v4l2_subdev_querymenu,
- .g_ext_ctrls = v4l2_subdev_g_ext_ctrls,
- .try_ext_ctrls = v4l2_subdev_try_ext_ctrls,
- .s_ext_ctrls = v4l2_subdev_s_ext_ctrls,
.log_status = noon010_log_status,
};
diff --git a/drivers/media/i2c/ov7670.c b/drivers/media/i2c/ov7670.c
index e7c82b297514..05ed5b8e7f88 100644
--- a/drivers/media/i2c/ov7670.c
+++ b/drivers/media/i2c/ov7670.c
@@ -18,6 +18,7 @@
#include <linux/videodev2.h>
#include <media/v4l2-device.h>
#include <media/v4l2-chip-ident.h>
+#include <media/v4l2-ctrls.h>
#include <media/v4l2-mediabus.h>
#include <media/ov7670.h>
@@ -47,6 +48,8 @@ MODULE_PARM_DESC(debug, "Debug level (0-1)");
*/
#define OV7670_I2C_ADDR 0x42
+#define PLL_FACTOR 4
+
/* Registers */
#define REG_GAIN 0x00 /* Gain lower 8 bits (rest in vref) */
#define REG_BLUE 0x01 /* blue gain */
@@ -164,6 +167,12 @@ MODULE_PARM_DESC(debug, "Debug level (0-1)");
#define REG_GFIX 0x69 /* Fix gain control */
+#define REG_DBLV 0x6b /* PLL control an debugging */
+#define DBLV_BYPASS 0x00 /* Bypass PLL */
+#define DBLV_X4 0x01 /* clock x4 */
+#define DBLV_X6 0x10 /* clock x6 */
+#define DBLV_X8 0x11 /* clock x8 */
+
#define REG_REG76 0x76 /* OV's name */
#define R76_BLKPCOR 0x80 /* Black pixel correction enable */
#define R76_WHTPCOR 0x40 /* White pixel correction enable */
@@ -183,6 +192,30 @@ MODULE_PARM_DESC(debug, "Debug level (0-1)");
#define REG_HAECC7 0xaa /* Hist AEC/AGC control 7 */
#define REG_BD60MAX 0xab /* 60hz banding step limit */
+enum ov7670_model {
+ MODEL_OV7670 = 0,
+ MODEL_OV7675,
+};
+
+struct ov7670_win_size {
+ int width;
+ int height;
+ unsigned char com7_bit;
+ int hstart; /* Start/stop values for the camera. Note */
+ int hstop; /* that they do not always make complete */
+ int vstart; /* sense to humans, but evidently the sensor */
+ int vstop; /* will do the right thing... */
+ struct regval_list *regs; /* Regs to tweak */
+};
+
+struct ov7670_devtype {
+ /* formats supported for each model */
+ struct ov7670_win_size *win_sizes;
+ unsigned int n_win_sizes;
+ /* callbacks for frame rate control */
+ int (*set_framerate)(struct v4l2_subdev *, struct v4l2_fract *);
+ void (*get_framerate)(struct v4l2_subdev *, struct v4l2_fract *);
+};
/*
* Information we maintain about a known sensor.
@@ -190,14 +223,31 @@ MODULE_PARM_DESC(debug, "Debug level (0-1)");
struct ov7670_format_struct; /* coming later */
struct ov7670_info {
struct v4l2_subdev sd;
+ struct v4l2_ctrl_handler hdl;
+ struct {
+ /* gain cluster */
+ struct v4l2_ctrl *auto_gain;
+ struct v4l2_ctrl *gain;
+ };
+ struct {
+ /* exposure cluster */
+ struct v4l2_ctrl *auto_exposure;
+ struct v4l2_ctrl *exposure;
+ };
+ struct {
+ /* saturation/hue cluster */
+ struct v4l2_ctrl *saturation;
+ struct v4l2_ctrl *hue;
+ };
struct ov7670_format_struct *fmt; /* Current format */
- unsigned char sat; /* Saturation value */
- int hue; /* Hue value */
int min_width; /* Filter out smaller sizes */
int min_height; /* Filter out smaller sizes */
int clock_speed; /* External clock speed (MHz) */
u8 clkrc; /* Clock divider value */
bool use_smbus; /* Use smbus I/O instead of I2C */
+ bool pll_bypass;
+ bool pclk_hb_disable;
+ const struct ov7670_devtype *devtype; /* Device specifics */
};
static inline struct ov7670_info *to_state(struct v4l2_subdev *sd)
@@ -205,6 +255,11 @@ static inline struct ov7670_info *to_state(struct v4l2_subdev *sd)
return container_of(sd, struct ov7670_info, sd);
}
+static inline struct v4l2_subdev *to_sd(struct v4l2_ctrl *ctrl)
+{
+ return &container_of(ctrl->handler, struct ov7670_info, hdl)->sd;
+}
+
/*
@@ -353,7 +408,7 @@ static struct regval_list ov7670_fmt_yuv422[] = {
{ REG_RGB444, 0 }, /* No RGB444 please */
{ REG_COM1, 0 }, /* CCIR601 */
{ REG_COM15, COM15_R00FF },
- { REG_COM9, 0x18 }, /* 4x gain ceiling; 0x8 is reserved bit */
+ { REG_COM9, 0x48 }, /* 32x gain ceiling; 0x8 is reserved bit */
{ 0x4f, 0x80 }, /* "matrix coefficient 1" */
{ 0x50, 0x80 }, /* "matrix coefficient 2" */
{ 0x51, 0 }, /* vb */
@@ -652,65 +707,178 @@ static struct regval_list ov7670_qcif_regs[] = {
{ 0xff, 0xff },
};
-static struct ov7670_win_size {
- int width;
- int height;
- unsigned char com7_bit;
- int hstart; /* Start/stop values for the camera. Note */
- int hstop; /* that they do not always make complete */
- int vstart; /* sense to humans, but evidently the sensor */
- int vstop; /* will do the right thing... */
- struct regval_list *regs; /* Regs to tweak */
-/* h/vref stuff */
-} ov7670_win_sizes[] = {
+static struct ov7670_win_size ov7670_win_sizes[] = {
/* VGA */
{
.width = VGA_WIDTH,
.height = VGA_HEIGHT,
.com7_bit = COM7_FMT_VGA,
- .hstart = 158, /* These values from */
- .hstop = 14, /* Omnivision */
+ .hstart = 158, /* These values from */
+ .hstop = 14, /* Omnivision */
.vstart = 10,
.vstop = 490,
- .regs = NULL,
+ .regs = NULL,
},
/* CIF */
{
.width = CIF_WIDTH,
.height = CIF_HEIGHT,
.com7_bit = COM7_FMT_CIF,
- .hstart = 170, /* Empirically determined */
+ .hstart = 170, /* Empirically determined */
.hstop = 90,
.vstart = 14,
.vstop = 494,
- .regs = NULL,
+ .regs = NULL,
},
/* QVGA */
{
.width = QVGA_WIDTH,
.height = QVGA_HEIGHT,
.com7_bit = COM7_FMT_QVGA,
- .hstart = 168, /* Empirically determined */
+ .hstart = 168, /* Empirically determined */
.hstop = 24,
.vstart = 12,
.vstop = 492,
- .regs = NULL,
+ .regs = NULL,
},
/* QCIF */
{
.width = QCIF_WIDTH,
.height = QCIF_HEIGHT,
.com7_bit = COM7_FMT_VGA, /* see comment above */
- .hstart = 456, /* Empirically determined */
+ .hstart = 456, /* Empirically determined */
.hstop = 24,
.vstart = 14,
.vstop = 494,
- .regs = ov7670_qcif_regs,
- },
+ .regs = ov7670_qcif_regs,
+ }
+};
+
+static struct ov7670_win_size ov7675_win_sizes[] = {
+ /*
+ * Currently, only VGA is supported. Theoretically it could be possible
+ * to support CIF, QVGA and QCIF too. Taking values for ov7670 as a
+ * base and tweak them empirically could be required.
+ */
+ {
+ .width = VGA_WIDTH,
+ .height = VGA_HEIGHT,
+ .com7_bit = COM7_FMT_VGA,
+ .hstart = 158, /* These values from */
+ .hstop = 14, /* Omnivision */
+ .vstart = 14, /* Empirically determined */
+ .vstop = 494,
+ .regs = NULL,
+ }
};
-#define N_WIN_SIZES (ARRAY_SIZE(ov7670_win_sizes))
+static void ov7675_get_framerate(struct v4l2_subdev *sd,
+ struct v4l2_fract *tpf)
+{
+ struct ov7670_info *info = to_state(sd);
+ u32 clkrc = info->clkrc;
+ int pll_factor;
+
+ if (info->pll_bypass)
+ pll_factor = 1;
+ else
+ pll_factor = PLL_FACTOR;
+
+ clkrc++;
+ if (info->fmt->mbus_code == V4L2_MBUS_FMT_SBGGR8_1X8)
+ clkrc = (clkrc >> 1);
+
+ tpf->numerator = 1;
+ tpf->denominator = (5 * pll_factor * info->clock_speed) /
+ (4 * clkrc);
+}
+
+static int ov7675_set_framerate(struct v4l2_subdev *sd,
+ struct v4l2_fract *tpf)
+{
+ struct ov7670_info *info = to_state(sd);
+ u32 clkrc;
+ int pll_factor;
+ int ret;
+
+ /*
+ * The formula is fps = 5/4*pixclk for YUV/RGB and
+ * fps = 5/2*pixclk for RAW.
+ *
+ * pixclk = clock_speed / (clkrc + 1) * PLLfactor
+ *
+ */
+ if (info->pll_bypass) {
+ pll_factor = 1;
+ ret = ov7670_write(sd, REG_DBLV, DBLV_BYPASS);
+ } else {
+ pll_factor = PLL_FACTOR;
+ ret = ov7670_write(sd, REG_DBLV, DBLV_X4);
+ }
+ if (ret < 0)
+ return ret;
+
+ if (tpf->numerator == 0 || tpf->denominator == 0) {
+ clkrc = 0;
+ } else {
+ clkrc = (5 * pll_factor * info->clock_speed * tpf->numerator) /
+ (4 * tpf->denominator);
+ if (info->fmt->mbus_code == V4L2_MBUS_FMT_SBGGR8_1X8)
+ clkrc = (clkrc << 1);
+ clkrc--;
+ }
+
+ /*
+ * The datasheet claims that clkrc = 0 will divide the input clock by 1
+ * but we've checked with an oscilloscope that it divides by 2 instead.
+ * So, if clkrc = 0 just bypass the divider.
+ */
+ if (clkrc <= 0)
+ clkrc = CLK_EXT;
+ else if (clkrc > CLK_SCALE)
+ clkrc = CLK_SCALE;
+ info->clkrc = clkrc;
+
+ /* Recalculate frame rate */
+ ov7675_get_framerate(sd, tpf);
+
+ ret = ov7670_write(sd, REG_CLKRC, info->clkrc);
+ if (ret < 0)
+ return ret;
+
+ return ov7670_write(sd, REG_DBLV, DBLV_X4);
+}
+
+static void ov7670_get_framerate_legacy(struct v4l2_subdev *sd,
+ struct v4l2_fract *tpf)
+{
+ struct ov7670_info *info = to_state(sd);
+
+ tpf->numerator = 1;
+ tpf->denominator = info->clock_speed;
+ if ((info->clkrc & CLK_EXT) == 0 && (info->clkrc & CLK_SCALE) > 1)
+ tpf->denominator /= (info->clkrc & CLK_SCALE);
+}
+
+static int ov7670_set_framerate_legacy(struct v4l2_subdev *sd,
+ struct v4l2_fract *tpf)
+{
+ struct ov7670_info *info = to_state(sd);
+ int div;
+ if (tpf->numerator == 0 || tpf->denominator == 0)
+ div = 1; /* Reset to full rate */
+ else
+ div = (tpf->numerator * info->clock_speed) / tpf->denominator;
+ if (div == 0)
+ div = 1;
+ else if (div > CLK_SCALE)
+ div = CLK_SCALE;
+ info->clkrc = (info->clkrc & 0x80) | div;
+ tpf->numerator = 1;
+ tpf->denominator = info->clock_speed / div;
+ return ov7670_write(sd, REG_CLKRC, info->clkrc);
+}
/*
* Store a set of start/stop values into the camera.
@@ -759,8 +927,11 @@ static int ov7670_try_fmt_internal(struct v4l2_subdev *sd,
struct ov7670_format_struct **ret_fmt,
struct ov7670_win_size **ret_wsize)
{
- int index;
+ int index, i;
struct ov7670_win_size *wsize;
+ struct ov7670_info *info = to_state(sd);
+ unsigned int n_win_sizes = info->devtype->n_win_sizes;
+ unsigned int win_sizes_limit = n_win_sizes;
for (index = 0; index < N_OV7670_FMTS; index++)
if (ov7670_formats[index].mbus_code == fmt->code)
@@ -776,15 +947,30 @@ static int ov7670_try_fmt_internal(struct v4l2_subdev *sd,
* Fields: the OV devices claim to be progressive.
*/
fmt->field = V4L2_FIELD_NONE;
+
+ /*
+ * Don't consider values that don't match min_height and min_width
+ * constraints.
+ */
+ if (info->min_width || info->min_height)
+ for (i = 0; i < n_win_sizes; i++) {
+ wsize = info->devtype->win_sizes + i;
+
+ if (wsize->width < info->min_width ||
+ wsize->height < info->min_height) {
+ win_sizes_limit = i;
+ break;
+ }
+ }
/*
* Round requested image size down to the nearest
* we support, but not below the smallest.
*/
- for (wsize = ov7670_win_sizes; wsize < ov7670_win_sizes + N_WIN_SIZES;
- wsize++)
+ for (wsize = info->devtype->win_sizes;
+ wsize < info->devtype->win_sizes + win_sizes_limit; wsize++)
if (fmt->width >= wsize->width && fmt->height >= wsize->height)
break;
- if (wsize >= ov7670_win_sizes + N_WIN_SIZES)
+ if (wsize >= info->devtype->win_sizes + win_sizes_limit)
wsize--; /* Take the smallest one */
if (ret_wsize != NULL)
*ret_wsize = wsize;
@@ -868,10 +1054,8 @@ static int ov7670_g_parm(struct v4l2_subdev *sd, struct v4l2_streamparm *parms)
memset(cp, 0, sizeof(struct v4l2_captureparm));
cp->capability = V4L2_CAP_TIMEPERFRAME;
- cp->timeperframe.numerator = 1;
- cp->timeperframe.denominator = info->clock_speed;
- if ((info->clkrc & CLK_EXT) == 0 && (info->clkrc & CLK_SCALE) > 1)
- cp->timeperframe.denominator /= (info->clkrc & CLK_SCALE);
+ info->devtype->get_framerate(sd, &cp->timeperframe);
+
return 0;
}
@@ -880,25 +1064,13 @@ static int ov7670_s_parm(struct v4l2_subdev *sd, struct v4l2_streamparm *parms)
struct v4l2_captureparm *cp = &parms->parm.capture;
struct v4l2_fract *tpf = &cp->timeperframe;
struct ov7670_info *info = to_state(sd);
- int div;
if (parms->type != V4L2_BUF_TYPE_VIDEO_CAPTURE)
return -EINVAL;
if (cp->extendedmode != 0)
return -EINVAL;
- if (tpf->numerator == 0 || tpf->denominator == 0)
- div = 1; /* Reset to full rate */
- else
- div = (tpf->numerator * info->clock_speed) / tpf->denominator;
- if (div == 0)
- div = 1;
- else if (div > CLK_SCALE)
- div = CLK_SCALE;
- info->clkrc = (info->clkrc & 0x80) | div;
- tpf->numerator = 1;
- tpf->denominator = info->clock_speed / div;
- return ov7670_write(sd, REG_CLKRC, info->clkrc);
+ return info->devtype->set_framerate(sd, tpf);
}
@@ -931,13 +1103,14 @@ static int ov7670_enum_framesizes(struct v4l2_subdev *sd,
int i;
int num_valid = -1;
__u32 index = fsize->index;
+ unsigned int n_win_sizes = info->devtype->n_win_sizes;
/*
* If a minimum width/height was requested, filter out the capture
* windows that fall outside that.
*/
- for (i = 0; i < N_WIN_SIZES; i++) {
- struct ov7670_win_size *win = &ov7670_win_sizes[index];
+ for (i = 0; i < n_win_sizes; i++) {
+ struct ov7670_win_size *win = &info->devtype->win_sizes[index];
if (info->min_width && win->width < info->min_width)
continue;
if (info->min_height && win->height < info->min_height)
@@ -1042,23 +1215,23 @@ static int ov7670_cosine(int theta)
static void ov7670_calc_cmatrix(struct ov7670_info *info,
- int matrix[CMATRIX_LEN])
+ int matrix[CMATRIX_LEN], int sat, int hue)
{
int i;
/*
* Apply the current saturation setting first.
*/
for (i = 0; i < CMATRIX_LEN; i++)
- matrix[i] = (info->fmt->cmatrix[i]*info->sat) >> 7;
+ matrix[i] = (info->fmt->cmatrix[i] * sat) >> 7;
/*
* Then, if need be, rotate the hue value.
*/
- if (info->hue != 0) {
+ if (hue != 0) {
int sinth, costh, tmpmatrix[CMATRIX_LEN];
memcpy(tmpmatrix, matrix, CMATRIX_LEN*sizeof(int));
- sinth = ov7670_sine(info->hue);
- costh = ov7670_cosine(info->hue);
+ sinth = ov7670_sine(hue);
+ costh = ov7670_cosine(hue);
matrix[0] = (matrix[3]*sinth + matrix[0]*costh)/1000;
matrix[1] = (matrix[4]*sinth + matrix[1]*costh)/1000;
@@ -1071,60 +1244,21 @@ static void ov7670_calc_cmatrix(struct ov7670_info *info,
-static int ov7670_s_sat(struct v4l2_subdev *sd, int value)
-{
- struct ov7670_info *info = to_state(sd);
- int matrix[CMATRIX_LEN];
- int ret;
-
- info->sat = value;
- ov7670_calc_cmatrix(info, matrix);
- ret = ov7670_store_cmatrix(sd, matrix);
- return ret;
-}
-
-static int ov7670_g_sat(struct v4l2_subdev *sd, __s32 *value)
-{
- struct ov7670_info *info = to_state(sd);
-
- *value = info->sat;
- return 0;
-}
-
-static int ov7670_s_hue(struct v4l2_subdev *sd, int value)
+static int ov7670_s_sat_hue(struct v4l2_subdev *sd, int sat, int hue)
{
struct ov7670_info *info = to_state(sd);
int matrix[CMATRIX_LEN];
int ret;
- if (value < -180 || value > 180)
- return -EINVAL;
- info->hue = value;
- ov7670_calc_cmatrix(info, matrix);
+ ov7670_calc_cmatrix(info, matrix, sat, hue);
ret = ov7670_store_cmatrix(sd, matrix);
return ret;
}
-static int ov7670_g_hue(struct v4l2_subdev *sd, __s32 *value)
-{
- struct ov7670_info *info = to_state(sd);
-
- *value = info->hue;
- return 0;
-}
-
-
/*
* Some weird registers seem to store values in a sign/magnitude format!
*/
-static unsigned char ov7670_sm_to_abs(unsigned char v)
-{
- if ((v & 0x80) == 0)
- return v + 128;
- return 128 - (v & 0x7f);
-}
-
static unsigned char ov7670_abs_to_sm(unsigned char v)
{
@@ -1146,40 +1280,11 @@ static int ov7670_s_brightness(struct v4l2_subdev *sd, int value)
return ret;
}
-static int ov7670_g_brightness(struct v4l2_subdev *sd, __s32 *value)
-{
- unsigned char v = 0;
- int ret = ov7670_read(sd, REG_BRIGHT, &v);
-
- *value = ov7670_sm_to_abs(v);
- return ret;
-}
-
static int ov7670_s_contrast(struct v4l2_subdev *sd, int value)
{
return ov7670_write(sd, REG_CONTRAS, (unsigned char) value);
}
-static int ov7670_g_contrast(struct v4l2_subdev *sd, __s32 *value)
-{
- unsigned char v = 0;
- int ret = ov7670_read(sd, REG_CONTRAS, &v);
-
- *value = v;
- return ret;
-}
-
-static int ov7670_g_hflip(struct v4l2_subdev *sd, __s32 *value)
-{
- int ret;
- unsigned char v = 0;
-
- ret = ov7670_read(sd, REG_MVFP, &v);
- *value = (v & MVFP_MIRROR) == MVFP_MIRROR;
- return ret;
-}
-
-
static int ov7670_s_hflip(struct v4l2_subdev *sd, int value)
{
unsigned char v = 0;
@@ -1195,19 +1300,6 @@ static int ov7670_s_hflip(struct v4l2_subdev *sd, int value)
return ret;
}
-
-
-static int ov7670_g_vflip(struct v4l2_subdev *sd, __s32 *value)
-{
- int ret;
- unsigned char v = 0;
-
- ret = ov7670_read(sd, REG_MVFP, &v);
- *value = (v & MVFP_FLIP) == MVFP_FLIP;
- return ret;
-}
-
-
static int ov7670_s_vflip(struct v4l2_subdev *sd, int value)
{
unsigned char v = 0;
@@ -1256,16 +1348,6 @@ static int ov7670_s_gain(struct v4l2_subdev *sd, int value)
/*
* Tweak autogain.
*/
-static int ov7670_g_autogain(struct v4l2_subdev *sd, __s32 *value)
-{
- int ret;
- unsigned char com8;
-
- ret = ov7670_read(sd, REG_COM8, &com8);
- *value = (com8 & COM8_AGC) != 0;
- return ret;
-}
-
static int ov7670_s_autogain(struct v4l2_subdev *sd, int value)
{
int ret;
@@ -1282,22 +1364,6 @@ static int ov7670_s_autogain(struct v4l2_subdev *sd, int value)
return ret;
}
-/*
- * Exposure is spread all over the place: top 6 bits in AECHH, middle
- * 8 in AECH, and two stashed in COM1 just for the hell of it.
- */
-static int ov7670_g_exp(struct v4l2_subdev *sd, __s32 *value)
-{
- int ret;
- unsigned char com1, aech, aechh;
-
- ret = ov7670_read(sd, REG_COM1, &com1) +
- ov7670_read(sd, REG_AECH, &aech) +
- ov7670_read(sd, REG_AECHH, &aechh);
- *value = ((aechh & 0x3f) << 10) | (aech << 2) | (com1 & 0x03);
- return ret;
-}
-
static int ov7670_s_exp(struct v4l2_subdev *sd, int value)
{
int ret;
@@ -1324,20 +1390,6 @@ static int ov7670_s_exp(struct v4l2_subdev *sd, int value)
/*
* Tweak autoexposure.
*/
-static int ov7670_g_autoexp(struct v4l2_subdev *sd, __s32 *value)
-{
- int ret;
- unsigned char com8;
- enum v4l2_exposure_auto_type *atype = (enum v4l2_exposure_auto_type *) value;
-
- ret = ov7670_read(sd, REG_COM8, &com8);
- if (com8 & COM8_AEC)
- *atype = V4L2_EXPOSURE_AUTO;
- else
- *atype = V4L2_EXPOSURE_MANUAL;
- return ret;
-}
-
static int ov7670_s_autoexp(struct v4l2_subdev *sd,
enum v4l2_exposure_auto_type value)
{
@@ -1356,90 +1408,60 @@ static int ov7670_s_autoexp(struct v4l2_subdev *sd,
}
-
-static int ov7670_queryctrl(struct v4l2_subdev *sd,
- struct v4l2_queryctrl *qc)
+static int ov7670_g_volatile_ctrl(struct v4l2_ctrl *ctrl)
{
- /* Fill in min, max, step and default value for these controls. */
- switch (qc->id) {
- case V4L2_CID_BRIGHTNESS:
- return v4l2_ctrl_query_fill(qc, 0, 255, 1, 128);
- case V4L2_CID_CONTRAST:
- return v4l2_ctrl_query_fill(qc, 0, 127, 1, 64);
- case V4L2_CID_VFLIP:
- case V4L2_CID_HFLIP:
- return v4l2_ctrl_query_fill(qc, 0, 1, 1, 0);
- case V4L2_CID_SATURATION:
- return v4l2_ctrl_query_fill(qc, 0, 256, 1, 128);
- case V4L2_CID_HUE:
- return v4l2_ctrl_query_fill(qc, -180, 180, 5, 0);
- case V4L2_CID_GAIN:
- return v4l2_ctrl_query_fill(qc, 0, 255, 1, 128);
- case V4L2_CID_AUTOGAIN:
- return v4l2_ctrl_query_fill(qc, 0, 1, 1, 1);
- case V4L2_CID_EXPOSURE:
- return v4l2_ctrl_query_fill(qc, 0, 65535, 1, 500);
- case V4L2_CID_EXPOSURE_AUTO:
- return v4l2_ctrl_query_fill(qc, 0, 1, 1, 0);
- }
- return -EINVAL;
-}
+ struct v4l2_subdev *sd = to_sd(ctrl);
+ struct ov7670_info *info = to_state(sd);
-static int ov7670_g_ctrl(struct v4l2_subdev *sd, struct v4l2_control *ctrl)
-{
switch (ctrl->id) {
- case V4L2_CID_BRIGHTNESS:
- return ov7670_g_brightness(sd, &ctrl->value);
- case V4L2_CID_CONTRAST:
- return ov7670_g_contrast(sd, &ctrl->value);
- case V4L2_CID_SATURATION:
- return ov7670_g_sat(sd, &ctrl->value);
- case V4L2_CID_HUE:
- return ov7670_g_hue(sd, &ctrl->value);
- case V4L2_CID_VFLIP:
- return ov7670_g_vflip(sd, &ctrl->value);
- case V4L2_CID_HFLIP:
- return ov7670_g_hflip(sd, &ctrl->value);
- case V4L2_CID_GAIN:
- return ov7670_g_gain(sd, &ctrl->value);
case V4L2_CID_AUTOGAIN:
- return ov7670_g_autogain(sd, &ctrl->value);
- case V4L2_CID_EXPOSURE:
- return ov7670_g_exp(sd, &ctrl->value);
- case V4L2_CID_EXPOSURE_AUTO:
- return ov7670_g_autoexp(sd, &ctrl->value);
+ return ov7670_g_gain(sd, &info->gain->val);
}
return -EINVAL;
}
-static int ov7670_s_ctrl(struct v4l2_subdev *sd, struct v4l2_control *ctrl)
+static int ov7670_s_ctrl(struct v4l2_ctrl *ctrl)
{
+ struct v4l2_subdev *sd = to_sd(ctrl);
+ struct ov7670_info *info = to_state(sd);
+
switch (ctrl->id) {
case V4L2_CID_BRIGHTNESS:
- return ov7670_s_brightness(sd, ctrl->value);
+ return ov7670_s_brightness(sd, ctrl->val);
case V4L2_CID_CONTRAST:
- return ov7670_s_contrast(sd, ctrl->value);
+ return ov7670_s_contrast(sd, ctrl->val);
case V4L2_CID_SATURATION:
- return ov7670_s_sat(sd, ctrl->value);
- case V4L2_CID_HUE:
- return ov7670_s_hue(sd, ctrl->value);
+ return ov7670_s_sat_hue(sd,
+ info->saturation->val, info->hue->val);
case V4L2_CID_VFLIP:
- return ov7670_s_vflip(sd, ctrl->value);
+ return ov7670_s_vflip(sd, ctrl->val);
case V4L2_CID_HFLIP:
- return ov7670_s_hflip(sd, ctrl->value);
- case V4L2_CID_GAIN:
- return ov7670_s_gain(sd, ctrl->value);
+ return ov7670_s_hflip(sd, ctrl->val);
case V4L2_CID_AUTOGAIN:
- return ov7670_s_autogain(sd, ctrl->value);
- case V4L2_CID_EXPOSURE:
- return ov7670_s_exp(sd, ctrl->value);
+ /* Only set manual gain if auto gain is not explicitly
+ turned on. */
+ if (!ctrl->val) {
+ /* ov7670_s_gain turns off auto gain */
+ return ov7670_s_gain(sd, info->gain->val);
+ }
+ return ov7670_s_autogain(sd, ctrl->val);
case V4L2_CID_EXPOSURE_AUTO:
- return ov7670_s_autoexp(sd,
- (enum v4l2_exposure_auto_type) ctrl->value);
+ /* Only set manual exposure if auto exposure is not explicitly
+ turned on. */
+ if (ctrl->val == V4L2_EXPOSURE_MANUAL) {
+ /* ov7670_s_exp turns off auto exposure */
+ return ov7670_s_exp(sd, info->exposure->val);
+ }
+ return ov7670_s_autoexp(sd, ctrl->val);
}
return -EINVAL;
}
+static const struct v4l2_ctrl_ops ov7670_ctrl_ops = {
+ .s_ctrl = ov7670_s_ctrl,
+ .g_volatile_ctrl = ov7670_g_volatile_ctrl,
+};
+
static int ov7670_g_chip_ident(struct v4l2_subdev *sd,
struct v4l2_dbg_chip_ident *chip)
{
@@ -1482,9 +1504,6 @@ static int ov7670_s_register(struct v4l2_subdev *sd, struct v4l2_dbg_register *r
static const struct v4l2_subdev_core_ops ov7670_core_ops = {
.g_chip_ident = ov7670_g_chip_ident,
- .g_ctrl = ov7670_g_ctrl,
- .s_ctrl = ov7670_s_ctrl,
- .queryctrl = ov7670_queryctrl,
.reset = ov7670_reset,
.init = ov7670_init,
#ifdef CONFIG_VIDEO_ADV_DEBUG
@@ -1510,9 +1529,25 @@ static const struct v4l2_subdev_ops ov7670_ops = {
/* ----------------------------------------------------------------------- */
+static const struct ov7670_devtype ov7670_devdata[] = {
+ [MODEL_OV7670] = {
+ .win_sizes = ov7670_win_sizes,
+ .n_win_sizes = ARRAY_SIZE(ov7670_win_sizes),
+ .set_framerate = ov7670_set_framerate_legacy,
+ .get_framerate = ov7670_get_framerate_legacy,
+ },
+ [MODEL_OV7675] = {
+ .win_sizes = ov7675_win_sizes,
+ .n_win_sizes = ARRAY_SIZE(ov7675_win_sizes),
+ .set_framerate = ov7675_set_framerate,
+ .get_framerate = ov7675_get_framerate,
+ },
+};
+
static int ov7670_probe(struct i2c_client *client,
const struct i2c_device_id *id)
{
+ struct v4l2_fract tpf;
struct v4l2_subdev *sd;
struct ov7670_info *info;
int ret;
@@ -1537,6 +1572,16 @@ static int ov7670_probe(struct i2c_client *client,
if (config->clock_speed)
info->clock_speed = config->clock_speed;
+
+ /*
+ * It should be allowed for ov7670 too when it is migrated to
+ * the new frame rate formula.
+ */
+ if (config->pll_bypass && id->driver_data != MODEL_OV7670)
+ info->pll_bypass = true;
+
+ if (config->pclk_hb_disable)
+ info->pclk_hb_disable = true;
}
/* Make sure it's an ov7670 */
@@ -1551,9 +1596,58 @@ static int ov7670_probe(struct i2c_client *client,
v4l_info(client, "chip found @ 0x%02x (%s)\n",
client->addr << 1, client->adapter->name);
+ info->devtype = &ov7670_devdata[id->driver_data];
info->fmt = &ov7670_formats[0];
- info->sat = 128; /* Review this */
- info->clkrc = info->clock_speed / 30;
+ info->clkrc = 0;
+
+ /* Set default frame rate to 30 fps */
+ tpf.numerator = 1;
+ tpf.denominator = 30;
+ info->devtype->set_framerate(sd, &tpf);
+
+ if (info->pclk_hb_disable)
+ ov7670_write(sd, REG_COM10, COM10_PCLK_HB);
+
+ v4l2_ctrl_handler_init(&info->hdl, 10);
+ v4l2_ctrl_new_std(&info->hdl, &ov7670_ctrl_ops,
+ V4L2_CID_BRIGHTNESS, 0, 255, 1, 128);
+ v4l2_ctrl_new_std(&info->hdl, &ov7670_ctrl_ops,
+ V4L2_CID_CONTRAST, 0, 127, 1, 64);
+ v4l2_ctrl_new_std(&info->hdl, &ov7670_ctrl_ops,
+ V4L2_CID_VFLIP, 0, 1, 1, 0);
+ v4l2_ctrl_new_std(&info->hdl, &ov7670_ctrl_ops,
+ V4L2_CID_HFLIP, 0, 1, 1, 0);
+ info->saturation = v4l2_ctrl_new_std(&info->hdl, &ov7670_ctrl_ops,
+ V4L2_CID_SATURATION, 0, 256, 1, 128);
+ info->hue = v4l2_ctrl_new_std(&info->hdl, &ov7670_ctrl_ops,
+ V4L2_CID_HUE, -180, 180, 5, 0);
+ info->gain = v4l2_ctrl_new_std(&info->hdl, &ov7670_ctrl_ops,
+ V4L2_CID_GAIN, 0, 255, 1, 128);
+ info->auto_gain = v4l2_ctrl_new_std(&info->hdl, &ov7670_ctrl_ops,
+ V4L2_CID_AUTOGAIN, 0, 1, 1, 1);
+ info->exposure = v4l2_ctrl_new_std(&info->hdl, &ov7670_ctrl_ops,
+ V4L2_CID_EXPOSURE, 0, 65535, 1, 500);
+ info->auto_exposure = v4l2_ctrl_new_std_menu(&info->hdl, &ov7670_ctrl_ops,
+ V4L2_CID_EXPOSURE_AUTO, V4L2_EXPOSURE_MANUAL, 0,
+ V4L2_EXPOSURE_AUTO);
+ sd->ctrl_handler = &info->hdl;
+ if (info->hdl.error) {
+ int err = info->hdl.error;
+
+ v4l2_ctrl_handler_free(&info->hdl);
+ kfree(info);
+ return err;
+ }
+ /*
+ * We have checked empirically that hw allows to read back the gain
+ * value chosen by auto gain but that's not the case for auto exposure.
+ */
+ v4l2_ctrl_auto_cluster(2, &info->auto_gain, 0, true);
+ v4l2_ctrl_auto_cluster(2, &info->auto_exposure,
+ V4L2_EXPOSURE_MANUAL, false);
+ v4l2_ctrl_cluster(2, &info->saturation);
+ v4l2_ctrl_handler_setup(&info->hdl);
+
return 0;
}
@@ -1561,14 +1655,17 @@ static int ov7670_probe(struct i2c_client *client,
static int ov7670_remove(struct i2c_client *client)
{
struct v4l2_subdev *sd = i2c_get_clientdata(client);
+ struct ov7670_info *info = to_state(sd);
v4l2_device_unregister_subdev(sd);
- kfree(to_state(sd));
+ v4l2_ctrl_handler_free(&info->hdl);
+ kfree(info);
return 0;
}
static const struct i2c_device_id ov7670_id[] = {
- { "ov7670", 0 },
+ { "ov7670", MODEL_OV7670 },
+ { "ov7675", MODEL_OV7675 },
{ }
};
MODULE_DEVICE_TABLE(i2c, ov7670_id);
diff --git a/drivers/media/i2c/ov9650.c b/drivers/media/i2c/ov9650.c
new file mode 100644
index 000000000000..1dbb8118a285
--- /dev/null
+++ b/drivers/media/i2c/ov9650.c
@@ -0,0 +1,1562 @@
+/*
+ * Omnivision OV9650/OV9652 CMOS Image Sensor driver
+ *
+ * Copyright (C) 2013, Sylwester Nawrocki <sylvester.nawrocki@gmail.com>
+ *
+ * Register definitions and initial settings based on a driver written
+ * by Vladimir Fonov.
+ * Copyright (c) 2010, Vladimir Fonov
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+#include <linux/delay.h>
+#include <linux/gpio.h>
+#include <linux/i2c.h>
+#include <linux/kernel.h>
+#include <linux/media.h>
+#include <linux/module.h>
+#include <linux/ratelimit.h>
+#include <linux/slab.h>
+#include <linux/string.h>
+#include <linux/videodev2.h>
+
+#include <media/media-entity.h>
+#include <media/v4l2-ctrls.h>
+#include <media/v4l2-device.h>
+#include <media/v4l2-event.h>
+#include <media/v4l2-image-sizes.h>
+#include <media/v4l2-subdev.h>
+#include <media/v4l2-mediabus.h>
+#include <media/ov9650.h>
+
+static int debug;
+module_param(debug, int, 0644);
+MODULE_PARM_DESC(debug, "Debug level (0-2)");
+
+#define DRIVER_NAME "OV9650"
+
+/*
+ * OV9650/OV9652 register definitions
+ */
+#define REG_GAIN 0x00 /* Gain control, AGC[7:0] */
+#define REG_BLUE 0x01 /* AWB - Blue chanel gain */
+#define REG_RED 0x02 /* AWB - Red chanel gain */
+#define REG_VREF 0x03 /* [7:6] - AGC[9:8], [5:3]/[2:0] */
+#define VREF_GAIN_MASK 0xc0 /* - VREF end/start low 3 bits */
+#define REG_COM1 0x04
+#define COM1_CCIR656 0x40
+#define REG_B_AVE 0x05
+#define REG_GB_AVE 0x06
+#define REG_GR_AVE 0x07
+#define REG_R_AVE 0x08
+#define REG_COM2 0x09
+#define REG_PID 0x0a /* Product ID MSB */
+#define REG_VER 0x0b /* Product ID LSB */
+#define REG_COM3 0x0c
+#define COM3_SWAP 0x40
+#define COM3_VARIOPIXEL1 0x04
+#define REG_COM4 0x0d /* Vario Pixels */
+#define COM4_VARIOPIXEL2 0x80
+#define REG_COM5 0x0e /* System clock options */
+#define COM5_SLAVE_MODE 0x10
+#define COM5_SYSTEMCLOCK48MHZ 0x80
+#define REG_COM6 0x0f /* HREF & ADBLC options */
+#define REG_AECH 0x10 /* Exposure value, AEC[9:2] */
+#define REG_CLKRC 0x11 /* Clock control */
+#define CLK_EXT 0x40 /* Use external clock directly */
+#define CLK_SCALE 0x3f /* Mask for internal clock scale */
+#define REG_COM7 0x12 /* SCCB reset, output format */
+#define COM7_RESET 0x80
+#define COM7_FMT_MASK 0x38
+#define COM7_FMT_VGA 0x40
+#define COM7_FMT_CIF 0x20
+#define COM7_FMT_QVGA 0x10
+#define COM7_FMT_QCIF 0x08
+#define COM7_RGB 0x04
+#define COM7_YUV 0x00
+#define COM7_BAYER 0x01
+#define COM7_PBAYER 0x05
+#define REG_COM8 0x13 /* AGC/AEC options */
+#define COM8_FASTAEC 0x80 /* Enable fast AGC/AEC */
+#define COM8_AECSTEP 0x40 /* Unlimited AEC step size */
+#define COM8_BFILT 0x20 /* Band filter enable */
+#define COM8_AGC 0x04 /* Auto gain enable */
+#define COM8_AWB 0x02 /* White balance enable */
+#define COM8_AEC 0x01 /* Auto exposure enable */
+#define REG_COM9 0x14 /* Gain ceiling */
+#define COM9_GAIN_CEIL_MASK 0x70 /* */
+#define REG_COM10 0x15 /* PCLK, HREF, HSYNC signals polarity */
+#define COM10_HSYNC 0x40 /* HSYNC instead of HREF */
+#define COM10_PCLK_HB 0x20 /* Suppress PCLK on horiz blank */
+#define COM10_HREF_REV 0x08 /* Reverse HREF */
+#define COM10_VS_LEAD 0x04 /* VSYNC on clock leading edge */
+#define COM10_VS_NEG 0x02 /* VSYNC negative */
+#define COM10_HS_NEG 0x01 /* HSYNC negative */
+#define REG_HSTART 0x17 /* Horiz start high bits */
+#define REG_HSTOP 0x18 /* Horiz stop high bits */
+#define REG_VSTART 0x19 /* Vert start high bits */
+#define REG_VSTOP 0x1a /* Vert stop high bits */
+#define REG_PSHFT 0x1b /* Pixel delay after HREF */
+#define REG_MIDH 0x1c /* Manufacturer ID MSB */
+#define REG_MIDL 0x1d /* Manufufacturer ID LSB */
+#define REG_MVFP 0x1e /* Image mirror/flip */
+#define MVFP_MIRROR 0x20 /* Mirror image */
+#define MVFP_FLIP 0x10 /* Vertical flip */
+#define REG_BOS 0x20 /* B channel Offset */
+#define REG_GBOS 0x21 /* Gb channel Offset */
+#define REG_GROS 0x22 /* Gr channel Offset */
+#define REG_ROS 0x23 /* R channel Offset */
+#define REG_AEW 0x24 /* AGC upper limit */
+#define REG_AEB 0x25 /* AGC lower limit */
+#define REG_VPT 0x26 /* AGC/AEC fast mode op region */
+#define REG_BBIAS 0x27 /* B channel output bias */
+#define REG_GBBIAS 0x28 /* Gb channel output bias */
+#define REG_GRCOM 0x29 /* Analog BLC & regulator */
+#define REG_EXHCH 0x2a /* Dummy pixel insert MSB */
+#define REG_EXHCL 0x2b /* Dummy pixel insert LSB */
+#define REG_RBIAS 0x2c /* R channel output bias */
+#define REG_ADVFL 0x2d /* LSB of dummy line insert */
+#define REG_ADVFH 0x2e /* MSB of dummy line insert */
+#define REG_YAVE 0x2f /* Y/G channel average value */
+#define REG_HSYST 0x30 /* HSYNC rising edge delay LSB*/
+#define REG_HSYEN 0x31 /* HSYNC falling edge delay LSB*/
+#define REG_HREF 0x32 /* HREF pieces */
+#define REG_CHLF 0x33 /* reserved */
+#define REG_ADC 0x37 /* reserved */
+#define REG_ACOM 0x38 /* reserved */
+#define REG_OFON 0x39 /* Power down register */
+#define OFON_PWRDN 0x08 /* Power down bit */
+#define REG_TSLB 0x3a /* YUVU format */
+#define TSLB_YUYV_MASK 0x0c /* UYVY or VYUY - see com13 */
+#define REG_COM11 0x3b /* Night mode, banding filter enable */
+#define COM11_NIGHT 0x80 /* Night mode enable */
+#define COM11_NMFR 0x60 /* Two bit NM frame rate */
+#define COM11_BANDING 0x01 /* Banding filter */
+#define COM11_AEC_REF_MASK 0x18 /* AEC reference area selection */
+#define REG_COM12 0x3c /* HREF option, UV average */
+#define COM12_HREF 0x80 /* HREF always */
+#define REG_COM13 0x3d /* Gamma selection, Color matrix en. */
+#define COM13_GAMMA 0x80 /* Gamma enable */
+#define COM13_UVSAT 0x40 /* UV saturation auto adjustment */
+#define COM13_UVSWAP 0x01 /* V before U - w/TSLB */
+#define REG_COM14 0x3e /* Edge enhancement options */
+#define COM14_EDGE_EN 0x02
+#define COM14_EEF_X2 0x01
+#define REG_EDGE 0x3f /* Edge enhancement factor */
+#define EDGE_FACTOR_MASK 0x0f
+#define REG_COM15 0x40 /* Output range, RGB 555/565 */
+#define COM15_R10F0 0x00 /* Data range 10 to F0 */
+#define COM15_R01FE 0x80 /* 01 to FE */
+#define COM15_R00FF 0xc0 /* 00 to FF */
+#define COM15_RGB565 0x10 /* RGB565 output */
+#define COM15_RGB555 0x30 /* RGB555 output */
+#define COM15_SWAPRB 0x04 /* Swap R&B */
+#define REG_COM16 0x41 /* Color matrix coeff options */
+#define REG_COM17 0x42 /* Single frame out, banding filter */
+/* n = 1...9, 0x4f..0x57 */
+#define REG_MTX(__n) (0x4f + (__n) - 1)
+#define REG_MTXS 0x58
+/* Lens Correction Option 1...5, __n = 0...5 */
+#define REG_LCC(__n) (0x62 + (__n) - 1)
+#define LCC5_LCC_ENABLE 0x01 /* LCC5, enable lens correction */
+#define LCC5_LCC_COLOR 0x04
+#define REG_MANU 0x67 /* Manual U value */
+#define REG_MANV 0x68 /* Manual V value */
+#define REG_HV 0x69 /* Manual banding filter MSB */
+#define REG_MBD 0x6a /* Manual banding filter value */
+#define REG_DBLV 0x6b /* reserved */
+#define REG_GSP 0x6c /* Gamma curve */
+#define GSP_LEN 15
+#define REG_GST 0x7c /* Gamma curve */
+#define GST_LEN 15
+#define REG_COM21 0x8b
+#define REG_COM22 0x8c /* Edge enhancement, denoising */
+#define COM22_WHTPCOR 0x02 /* White pixel correction enable */
+#define COM22_WHTPCOROPT 0x01 /* White pixel correction option */
+#define COM22_DENOISE 0x10 /* White pixel correction option */
+#define REG_COM23 0x8d /* Color bar test, color gain */
+#define COM23_TEST_MODE 0x10
+#define REG_DBLC1 0x8f /* Digital BLC */
+#define REG_DBLC_B 0x90 /* Digital BLC B channel offset */
+#define REG_DBLC_R 0x91 /* Digital BLC R channel offset */
+#define REG_DM_LNL 0x92 /* Dummy line low 8 bits */
+#define REG_DM_LNH 0x93 /* Dummy line high 8 bits */
+#define REG_LCCFB 0x9d /* Lens Correction B channel */
+#define REG_LCCFR 0x9e /* Lens Correction R channel */
+#define REG_DBLC_GB 0x9f /* Digital BLC GB chan offset */
+#define REG_DBLC_GR 0xa0 /* Digital BLC GR chan offset */
+#define REG_AECHM 0xa1 /* Exposure value - bits AEC[15:10] */
+#define REG_BD50ST 0xa2 /* Banding filter value for 50Hz */
+#define REG_BD60ST 0xa3 /* Banding filter value for 60Hz */
+#define REG_NULL 0xff /* Array end token */
+
+#define DEF_CLKRC 0x80
+
+#define OV965X_ID(_msb, _lsb) ((_msb) << 8 | (_lsb))
+#define OV9650_ID 0x9650
+#define OV9652_ID 0x9652
+
+struct ov965x_ctrls {
+ struct v4l2_ctrl_handler handler;
+ struct {
+ struct v4l2_ctrl *auto_exp;
+ struct v4l2_ctrl *exposure;
+ };
+ struct {
+ struct v4l2_ctrl *auto_wb;
+ struct v4l2_ctrl *blue_balance;
+ struct v4l2_ctrl *red_balance;
+ };
+ struct {
+ struct v4l2_ctrl *hflip;
+ struct v4l2_ctrl *vflip;
+ };
+ struct {
+ struct v4l2_ctrl *auto_gain;
+ struct v4l2_ctrl *gain;
+ };
+ struct v4l2_ctrl *brightness;
+ struct v4l2_ctrl *saturation;
+ struct v4l2_ctrl *sharpness;
+ struct v4l2_ctrl *light_freq;
+ u8 update;
+};
+
+struct ov965x_framesize {
+ u16 width;
+ u16 height;
+ u16 max_exp_lines;
+ const u8 *regs;
+};
+
+struct ov965x_interval {
+ struct v4l2_fract interval;
+ /* Maximum resolution for this interval */
+ struct v4l2_frmsize_discrete size;
+ u8 clkrc_div;
+};
+
+enum gpio_id {
+ GPIO_PWDN,
+ GPIO_RST,
+ NUM_GPIOS,
+};
+
+struct ov965x {
+ struct v4l2_subdev sd;
+ struct media_pad pad;
+ enum v4l2_mbus_type bus_type;
+ int gpios[NUM_GPIOS];
+ /* External master clock frequency */
+ unsigned long mclk_frequency;
+
+ /* Protects the struct fields below */
+ struct mutex lock;
+
+ struct i2c_client *client;
+
+ /* Exposure row interval in us */
+ unsigned int exp_row_interval;
+
+ unsigned short id;
+ const struct ov965x_framesize *frame_size;
+ /* YUYV sequence (pixel format) control register */
+ u8 tslb_reg;
+ struct v4l2_mbus_framefmt format;
+
+ struct ov965x_ctrls ctrls;
+ /* Pointer to frame rate control data structure */
+ const struct ov965x_interval *fiv;
+
+ int streaming;
+ int power;
+
+ u8 apply_frame_fmt;
+};
+
+struct i2c_rv {
+ u8 addr;
+ u8 value;
+};
+
+static const struct i2c_rv ov965x_init_regs[] = {
+ { REG_COM2, 0x10 }, /* Set soft sleep mode */
+ { REG_COM5, 0x00 }, /* System clock options */
+ { REG_COM2, 0x01 }, /* Output drive, soft sleep mode */
+ { REG_COM10, 0x00 }, /* Slave mode, HREF vs HSYNC, signals negate */
+ { REG_EDGE, 0xa6 }, /* Edge enhancement treshhold and factor */
+ { REG_COM16, 0x02 }, /* Color matrix coeff double option */
+ { REG_COM17, 0x08 }, /* Single frame out, banding filter */
+ { 0x16, 0x06 },
+ { REG_CHLF, 0xc0 }, /* Reserved */
+ { 0x34, 0xbf },
+ { 0xa8, 0x80 },
+ { 0x96, 0x04 },
+ { 0x8e, 0x00 },
+ { REG_COM12, 0x77 }, /* HREF option, UV average */
+ { 0x8b, 0x06 },
+ { 0x35, 0x91 },
+ { 0x94, 0x88 },
+ { 0x95, 0x88 },
+ { REG_COM15, 0xc1 }, /* Output range, RGB 555/565 */
+ { REG_GRCOM, 0x2f }, /* Analog BLC & regulator */
+ { REG_COM6, 0x43 }, /* HREF & ADBLC options */
+ { REG_COM8, 0xe5 }, /* AGC/AEC options */
+ { REG_COM13, 0x90 }, /* Gamma selection, colour matrix, UV delay */
+ { REG_HV, 0x80 }, /* Manual banding filter MSB */
+ { 0x5c, 0x96 }, /* Reserved up to 0xa5 */
+ { 0x5d, 0x96 },
+ { 0x5e, 0x10 },
+ { 0x59, 0xeb },
+ { 0x5a, 0x9c },
+ { 0x5b, 0x55 },
+ { 0x43, 0xf0 },
+ { 0x44, 0x10 },
+ { 0x45, 0x55 },
+ { 0x46, 0x86 },
+ { 0x47, 0x64 },
+ { 0x48, 0x86 },
+ { 0x5f, 0xe0 },
+ { 0x60, 0x8c },
+ { 0x61, 0x20 },
+ { 0xa5, 0xd9 },
+ { 0xa4, 0x74 }, /* reserved */
+ { REG_COM23, 0x02 }, /* Color gain analog/_digital_ */
+ { REG_COM8, 0xe7 }, /* Enable AEC, AWB, AEC */
+ { REG_COM22, 0x23 }, /* Edge enhancement, denoising */
+ { 0xa9, 0xb8 },
+ { 0xaa, 0x92 },
+ { 0xab, 0x0a },
+ { REG_DBLC1, 0xdf }, /* Digital BLC */
+ { REG_DBLC_B, 0x00 }, /* Digital BLC B chan offset */
+ { REG_DBLC_R, 0x00 }, /* Digital BLC R chan offset */
+ { REG_DBLC_GB, 0x00 }, /* Digital BLC GB chan offset */
+ { REG_DBLC_GR, 0x00 },
+ { REG_COM9, 0x3a }, /* Gain ceiling 16x */
+ { REG_NULL, 0 }
+};
+
+#define NUM_FMT_REGS 14
+/*
+ * COM7, COM3, COM4, HSTART, HSTOP, HREF, VSTART, VSTOP, VREF,
+ * EXHCH, EXHCL, ADC, OCOM, OFON
+ */
+static const u8 frame_size_reg_addr[NUM_FMT_REGS] = {
+ 0x12, 0x0c, 0x0d, 0x17, 0x18, 0x32, 0x19, 0x1a, 0x03,
+ 0x2a, 0x2b, 0x37, 0x38, 0x39,
+};
+
+static const u8 ov965x_sxga_regs[NUM_FMT_REGS] = {
+ 0x00, 0x00, 0x00, 0x1e, 0xbe, 0xbf, 0x01, 0x81, 0x12,
+ 0x10, 0x34, 0x81, 0x93, 0x51,
+};
+
+static const u8 ov965x_vga_regs[NUM_FMT_REGS] = {
+ 0x40, 0x04, 0x80, 0x26, 0xc6, 0xed, 0x01, 0x3d, 0x00,
+ 0x10, 0x40, 0x91, 0x12, 0x43,
+};
+
+/* Determined empirically. */
+static const u8 ov965x_qvga_regs[NUM_FMT_REGS] = {
+ 0x10, 0x04, 0x80, 0x25, 0xc5, 0xbf, 0x00, 0x80, 0x12,
+ 0x10, 0x40, 0x91, 0x12, 0x43,
+};
+
+static const struct ov965x_framesize ov965x_framesizes[] = {
+ {
+ .width = SXGA_WIDTH,
+ .height = SXGA_HEIGHT,
+ .regs = ov965x_sxga_regs,
+ .max_exp_lines = 1048,
+ }, {
+ .width = VGA_WIDTH,
+ .height = VGA_HEIGHT,
+ .regs = ov965x_vga_regs,
+ .max_exp_lines = 498,
+ }, {
+ .width = QVGA_WIDTH,
+ .height = QVGA_HEIGHT,
+ .regs = ov965x_qvga_regs,
+ .max_exp_lines = 248,
+ },
+};
+
+struct ov965x_pixfmt {
+ enum v4l2_mbus_pixelcode code;
+ u32 colorspace;
+ /* REG_TSLB value, only bits [3:2] may be set. */
+ u8 tslb_reg;
+};
+
+static const struct ov965x_pixfmt ov965x_formats[] = {
+ { V4L2_MBUS_FMT_YUYV8_2X8, V4L2_COLORSPACE_JPEG, 0x00},
+ { V4L2_MBUS_FMT_YVYU8_2X8, V4L2_COLORSPACE_JPEG, 0x04},
+ { V4L2_MBUS_FMT_UYVY8_2X8, V4L2_COLORSPACE_JPEG, 0x0c},
+ { V4L2_MBUS_FMT_VYUY8_2X8, V4L2_COLORSPACE_JPEG, 0x08},
+};
+
+/*
+ * This table specifies possible frame resolution and interval
+ * combinations. Default CLKRC[5:0] divider values are valid
+ * only for 24 MHz external clock frequency.
+ */
+static struct ov965x_interval ov965x_intervals[] = {
+ {{ 100, 625 }, { SXGA_WIDTH, SXGA_HEIGHT }, 0 }, /* 6.25 fps */
+ {{ 10, 125 }, { VGA_WIDTH, VGA_HEIGHT }, 1 }, /* 12.5 fps */
+ {{ 10, 125 }, { QVGA_WIDTH, QVGA_HEIGHT }, 3 }, /* 12.5 fps */
+ {{ 1, 25 }, { VGA_WIDTH, VGA_HEIGHT }, 0 }, /* 25 fps */
+ {{ 1, 25 }, { QVGA_WIDTH, QVGA_HEIGHT }, 1 }, /* 25 fps */
+};
+
+static inline struct v4l2_subdev *ctrl_to_sd(struct v4l2_ctrl *ctrl)
+{
+ return &container_of(ctrl->handler, struct ov965x, ctrls.handler)->sd;
+}
+
+static inline struct ov965x *to_ov965x(struct v4l2_subdev *sd)
+{
+ return container_of(sd, struct ov965x, sd);
+}
+
+static int ov965x_read(struct i2c_client *client, u8 addr, u8 *val)
+{
+ u8 buf = addr;
+ struct i2c_msg msg = {
+ .addr = client->addr,
+ .flags = 0,
+ .len = 1,
+ .buf = &buf
+ };
+ int ret;
+
+ ret = i2c_transfer(client->adapter, &msg, 1);
+ if (ret == 1) {
+ msg.flags = I2C_M_RD;
+ ret = i2c_transfer(client->adapter, &msg, 1);
+
+ if (ret == 1)
+ *val = buf;
+ }
+
+ v4l2_dbg(2, debug, client, "%s: 0x%02x @ 0x%02x. (%d)\n",
+ __func__, *val, addr, ret);
+
+ return ret == 1 ? 0 : ret;
+}
+
+static int ov965x_write(struct i2c_client *client, u8 addr, u8 val)
+{
+ u8 buf[2] = { addr, val };
+
+ int ret = i2c_master_send(client, buf, 2);
+
+ v4l2_dbg(2, debug, client, "%s: 0x%02x @ 0x%02X (%d)\n",
+ __func__, val, addr, ret);
+
+ return ret == 2 ? 0 : ret;
+}
+
+static int ov965x_write_array(struct i2c_client *client,
+ const struct i2c_rv *regs)
+{
+ int i, ret = 0;
+
+ for (i = 0; ret == 0 && regs[i].addr != REG_NULL; i++)
+ ret = ov965x_write(client, regs[i].addr, regs[i].value);
+
+ return ret;
+}
+
+static int ov965x_set_default_gamma_curve(struct ov965x *ov965x)
+{
+ static const u8 gamma_curve[] = {
+ /* Values taken from OV application note. */
+ 0x40, 0x30, 0x4b, 0x60, 0x70, 0x70, 0x70, 0x70,
+ 0x60, 0x60, 0x50, 0x48, 0x3a, 0x2e, 0x28, 0x22,
+ 0x04, 0x07, 0x10, 0x28, 0x36, 0x44, 0x52, 0x60,
+ 0x6c, 0x78, 0x8c, 0x9e, 0xbb, 0xd2, 0xe6
+ };
+ u8 addr = REG_GSP;
+ unsigned int i;
+
+ for (i = 0; i < ARRAY_SIZE(gamma_curve); i++) {
+ int ret = ov965x_write(ov965x->client, addr, gamma_curve[i]);
+ if (ret < 0)
+ return ret;
+ addr++;
+ }
+
+ return 0;
+};
+
+static int ov965x_set_color_matrix(struct ov965x *ov965x)
+{
+ static const u8 mtx[] = {
+ /* MTX1..MTX9, MTXS */
+ 0x3a, 0x3d, 0x03, 0x12, 0x26, 0x38, 0x40, 0x40, 0x40, 0x0d
+ };
+ u8 addr = REG_MTX(1);
+ unsigned int i;
+
+ for (i = 0; i < ARRAY_SIZE(mtx); i++) {
+ int ret = ov965x_write(ov965x->client, addr, mtx[i]);
+ if (ret < 0)
+ return ret;
+ addr++;
+ }
+
+ return 0;
+}
+
+static void ov965x_gpio_set(int gpio, int val)
+{
+ if (gpio_is_valid(gpio))
+ gpio_set_value(gpio, val);
+}
+
+static void __ov965x_set_power(struct ov965x *ov965x, int on)
+{
+ if (on) {
+ ov965x_gpio_set(ov965x->gpios[GPIO_PWDN], 0);
+ ov965x_gpio_set(ov965x->gpios[GPIO_RST], 0);
+ usleep_range(25000, 26000);
+ } else {
+ ov965x_gpio_set(ov965x->gpios[GPIO_RST], 1);
+ ov965x_gpio_set(ov965x->gpios[GPIO_PWDN], 1);
+ }
+
+ ov965x->streaming = 0;
+}
+
+static int ov965x_s_power(struct v4l2_subdev *sd, int on)
+{
+ struct ov965x *ov965x = to_ov965x(sd);
+ struct i2c_client *client = ov965x->client;
+ int ret = 0;
+
+ v4l2_dbg(1, debug, client, "%s: on: %d\n", __func__, on);
+
+ mutex_lock(&ov965x->lock);
+ if (ov965x->power == !on) {
+ __ov965x_set_power(ov965x, on);
+ if (on) {
+ ret = ov965x_write_array(client,
+ ov965x_init_regs);
+ ov965x->apply_frame_fmt = 1;
+ ov965x->ctrls.update = 1;
+ }
+ }
+ if (!ret)
+ ov965x->power += on ? 1 : -1;
+
+ WARN_ON(ov965x->power < 0);
+ mutex_unlock(&ov965x->lock);
+ return ret;
+}
+
+/*
+ * V4L2 controls
+ */
+
+static void ov965x_update_exposure_ctrl(struct ov965x *ov965x)
+{
+ struct v4l2_ctrl *ctrl = ov965x->ctrls.exposure;
+ unsigned long fint, trow;
+ int min, max, def;
+ u8 clkrc;
+
+ mutex_lock(&ov965x->lock);
+ if (WARN_ON(!ctrl || !ov965x->frame_size)) {
+ mutex_unlock(&ov965x->lock);
+ return;
+ }
+ clkrc = DEF_CLKRC + ov965x->fiv->clkrc_div;
+ /* Calculate internal clock frequency */
+ fint = ov965x->mclk_frequency * ((clkrc >> 7) + 1) /
+ ((2 * ((clkrc & 0x3f) + 1)));
+ /* and the row interval (in us). */
+ trow = (2 * 1520 * 1000000UL) / fint;
+ max = ov965x->frame_size->max_exp_lines * trow;
+ ov965x->exp_row_interval = trow;
+ mutex_unlock(&ov965x->lock);
+
+ v4l2_dbg(1, debug, &ov965x->sd, "clkrc: %#x, fi: %lu, tr: %lu, %d\n",
+ clkrc, fint, trow, max);
+
+ /* Update exposure time range to match current frame format. */
+ min = (trow + 100) / 100;
+ max = (max - 100) / 100;
+ def = min + (max - min) / 2;
+
+ if (v4l2_ctrl_modify_range(ctrl, min, max, 1, def))
+ v4l2_err(&ov965x->sd, "Exposure ctrl range update failed\n");
+}
+
+static int ov965x_set_banding_filter(struct ov965x *ov965x, int value)
+{
+ unsigned long mbd, light_freq;
+ int ret;
+ u8 reg;
+
+ ret = ov965x_read(ov965x->client, REG_COM8, &reg);
+ if (!ret) {
+ if (value == V4L2_CID_POWER_LINE_FREQUENCY_DISABLED)
+ reg &= ~COM8_BFILT;
+ else
+ reg |= COM8_BFILT;
+ ret = ov965x_write(ov965x->client, REG_COM8, reg);
+ }
+ if (value == V4L2_CID_POWER_LINE_FREQUENCY_DISABLED)
+ return 0;
+ if (WARN_ON(ov965x->fiv == NULL))
+ return -EINVAL;
+ /* Set minimal exposure time for 50/60 HZ lighting */
+ if (value == V4L2_CID_POWER_LINE_FREQUENCY_50HZ)
+ light_freq = 50;
+ else
+ light_freq = 60;
+ mbd = (1000UL * ov965x->fiv->interval.denominator *
+ ov965x->frame_size->max_exp_lines) /
+ ov965x->fiv->interval.numerator;
+ mbd = ((mbd / (light_freq * 2)) + 500) / 1000UL;
+
+ return ov965x_write(ov965x->client, REG_MBD, mbd);
+}
+
+static int ov965x_set_white_balance(struct ov965x *ov965x, int awb)
+{
+ int ret;
+ u8 reg;
+
+ ret = ov965x_read(ov965x->client, REG_COM8, &reg);
+ if (!ret) {
+ reg = awb ? reg | REG_COM8 : reg & ~REG_COM8;
+ ret = ov965x_write(ov965x->client, REG_COM8, reg);
+ }
+ if (!ret && !awb) {
+ ret = ov965x_write(ov965x->client, REG_BLUE,
+ ov965x->ctrls.blue_balance->val);
+ if (ret < 0)
+ return ret;
+ ret = ov965x_write(ov965x->client, REG_RED,
+ ov965x->ctrls.red_balance->val);
+ }
+ return ret;
+}
+
+#define NUM_BR_LEVELS 7
+#define NUM_BR_REGS 3
+
+static int ov965x_set_brightness(struct ov965x *ov965x, int val)
+{
+ static const u8 regs[NUM_BR_LEVELS + 1][NUM_BR_REGS] = {
+ { REG_AEW, REG_AEB, REG_VPT },
+ { 0x1c, 0x12, 0x50 }, /* -3 */
+ { 0x3d, 0x30, 0x71 }, /* -2 */
+ { 0x50, 0x44, 0x92 }, /* -1 */
+ { 0x70, 0x64, 0xc3 }, /* 0 */
+ { 0x90, 0x84, 0xd4 }, /* +1 */
+ { 0xc4, 0xbf, 0xf9 }, /* +2 */
+ { 0xd8, 0xd0, 0xfa }, /* +3 */
+ };
+ int i, ret = 0;
+
+ val += (NUM_BR_LEVELS / 2 + 1);
+ if (val > NUM_BR_LEVELS)
+ return -EINVAL;
+
+ for (i = 0; i < NUM_BR_REGS && !ret; i++)
+ ret = ov965x_write(ov965x->client, regs[0][i],
+ regs[val][i]);
+ return ret;
+}
+
+static int ov965x_set_gain(struct ov965x *ov965x, int auto_gain)
+{
+ struct i2c_client *client = ov965x->client;
+ struct ov965x_ctrls *ctrls = &ov965x->ctrls;
+ int ret = 0;
+ u8 reg;
+ /*
+ * For manual mode we need to disable AGC first, so
+ * gain value in REG_VREF, REG_GAIN is not overwritten.
+ */
+ if (ctrls->auto_gain->is_new) {
+ ret = ov965x_read(client, REG_COM8, &reg);
+ if (ret < 0)
+ return ret;
+ if (ctrls->auto_gain->val)
+ reg |= COM8_AGC;
+ else
+ reg &= ~COM8_AGC;
+ ret = ov965x_write(client, REG_COM8, reg);
+ if (ret < 0)
+ return ret;
+ }
+
+ if (ctrls->gain->is_new && !auto_gain) {
+ unsigned int gain = ctrls->gain->val;
+ unsigned int rgain;
+ int m;
+ /*
+ * Convert gain control value to the sensor's gain
+ * registers (VREF[7:6], GAIN[7:0]) format.
+ */
+ for (m = 6; m >= 0; m--)
+ if (gain >= (1 << m) * 16)
+ break;
+ rgain = (gain - ((1 << m) * 16)) / (1 << m);
+ rgain |= (((1 << m) - 1) << 4);
+
+ ret = ov965x_write(client, REG_GAIN, rgain & 0xff);
+ if (ret < 0)
+ return ret;
+ ret = ov965x_read(client, REG_VREF, &reg);
+ if (ret < 0)
+ return ret;
+ reg &= ~VREF_GAIN_MASK;
+ reg |= (((rgain >> 8) & 0x3) << 6);
+ ret = ov965x_write(client, REG_VREF, reg);
+ if (ret < 0)
+ return ret;
+ /* Return updated control's value to userspace */
+ ctrls->gain->val = (1 << m) * (16 + (rgain & 0xf));
+ }
+
+ return ret;
+}
+
+static int ov965x_set_sharpness(struct ov965x *ov965x, unsigned int value)
+{
+ u8 com14, edge;
+ int ret;
+
+ ret = ov965x_read(ov965x->client, REG_COM14, &com14);
+ if (ret < 0)
+ return ret;
+ ret = ov965x_read(ov965x->client, REG_EDGE, &edge);
+ if (ret < 0)
+ return ret;
+ com14 = value ? com14 | COM14_EDGE_EN : com14 & ~COM14_EDGE_EN;
+ value--;
+ if (value > 0x0f) {
+ com14 |= COM14_EEF_X2;
+ value >>= 1;
+ } else {
+ com14 &= ~COM14_EEF_X2;
+ }
+ ret = ov965x_write(ov965x->client, REG_COM14, com14);
+ if (ret < 0)
+ return ret;
+
+ edge &= ~EDGE_FACTOR_MASK;
+ edge |= ((u8)value & 0x0f);
+
+ return ov965x_write(ov965x->client, REG_EDGE, edge);
+}
+
+static int ov965x_set_exposure(struct ov965x *ov965x, int exp)
+{
+ struct i2c_client *client = ov965x->client;
+ struct ov965x_ctrls *ctrls = &ov965x->ctrls;
+ bool auto_exposure = (exp == V4L2_EXPOSURE_AUTO);
+ int ret;
+ u8 reg;
+
+ if (ctrls->auto_exp->is_new) {
+ ret = ov965x_read(client, REG_COM8, &reg);
+ if (ret < 0)
+ return ret;
+ if (auto_exposure)
+ reg |= (COM8_AEC | COM8_AGC);
+ else
+ reg &= ~(COM8_AEC | COM8_AGC);
+ ret = ov965x_write(client, REG_COM8, reg);
+ if (ret < 0)
+ return ret;
+ }
+
+ if (!auto_exposure && ctrls->exposure->is_new) {
+ unsigned int exposure = (ctrls->exposure->val * 100)
+ / ov965x->exp_row_interval;
+ /*
+ * Manual exposure value
+ * [b15:b0] - AECHM (b15:b10), AECH (b9:b2), COM1 (b1:b0)
+ */
+ ret = ov965x_write(client, REG_COM1, exposure & 0x3);
+ if (!ret)
+ ret = ov965x_write(client, REG_AECH,
+ (exposure >> 2) & 0xff);
+ if (!ret)
+ ret = ov965x_write(client, REG_AECHM,
+ (exposure >> 10) & 0x3f);
+ /* Update the value to minimize rounding errors */
+ ctrls->exposure->val = ((exposure * ov965x->exp_row_interval)
+ + 50) / 100;
+ if (ret < 0)
+ return ret;
+ }
+
+ v4l2_ctrl_activate(ov965x->ctrls.brightness, !exp);
+ return 0;
+}
+
+static int ov965x_set_flip(struct ov965x *ov965x)
+{
+ u8 mvfp = 0;
+
+ if (ov965x->ctrls.hflip->val)
+ mvfp |= MVFP_MIRROR;
+
+ if (ov965x->ctrls.vflip->val)
+ mvfp |= MVFP_FLIP;
+
+ return ov965x_write(ov965x->client, REG_MVFP, mvfp);
+}
+
+#define NUM_SAT_LEVELS 5
+#define NUM_SAT_REGS 6
+
+static int ov965x_set_saturation(struct ov965x *ov965x, int val)
+{
+ static const u8 regs[NUM_SAT_LEVELS][NUM_SAT_REGS] = {
+ /* MTX(1)...MTX(6) */
+ { 0x1d, 0x1f, 0x02, 0x09, 0x13, 0x1c }, /* -2 */
+ { 0x2e, 0x31, 0x02, 0x0e, 0x1e, 0x2d }, /* -1 */
+ { 0x3a, 0x3d, 0x03, 0x12, 0x26, 0x38 }, /* 0 */
+ { 0x46, 0x49, 0x04, 0x16, 0x2e, 0x43 }, /* +1 */
+ { 0x57, 0x5c, 0x05, 0x1b, 0x39, 0x54 }, /* +2 */
+ };
+ u8 addr = REG_MTX(1);
+ int i, ret = 0;
+
+ val += (NUM_SAT_LEVELS / 2);
+ if (val >= NUM_SAT_LEVELS)
+ return -EINVAL;
+
+ for (i = 0; i < NUM_SAT_REGS && !ret; i++)
+ ret = ov965x_write(ov965x->client, addr + i, regs[val][i]);
+
+ return ret;
+}
+
+static int ov965x_set_test_pattern(struct ov965x *ov965x, int value)
+{
+ int ret;
+ u8 reg;
+
+ ret = ov965x_read(ov965x->client, REG_COM23, &reg);
+ if (ret < 0)
+ return ret;
+ reg = value ? reg | COM23_TEST_MODE : reg & ~COM23_TEST_MODE;
+ return ov965x_write(ov965x->client, REG_COM23, reg);
+}
+
+static int __g_volatile_ctrl(struct ov965x *ov965x, struct v4l2_ctrl *ctrl)
+{
+ struct i2c_client *client = ov965x->client;
+ unsigned int exposure, gain, m;
+ u8 reg0, reg1, reg2;
+ int ret;
+
+ if (!ov965x->power)
+ return 0;
+
+ switch (ctrl->id) {
+ case V4L2_CID_AUTOGAIN:
+ if (!ctrl->val)
+ return 0;
+ ret = ov965x_read(client, REG_GAIN, &reg0);
+ if (ret < 0)
+ return ret;
+ ret = ov965x_read(client, REG_VREF, &reg1);
+ if (ret < 0)
+ return ret;
+ gain = ((reg1 >> 6) << 8) | reg0;
+ m = 0x01 << fls(gain >> 4);
+ ov965x->ctrls.gain->val = m * (16 + (gain & 0xf));
+ break;
+
+ case V4L2_CID_EXPOSURE_AUTO:
+ if (ctrl->val == V4L2_EXPOSURE_MANUAL)
+ return 0;
+ ret = ov965x_read(client, REG_COM1, &reg0);
+ if (!ret)
+ ret = ov965x_read(client, REG_AECH, &reg1);
+ if (!ret)
+ ret = ov965x_read(client, REG_AECHM, &reg2);
+ if (ret < 0)
+ return ret;
+ exposure = ((reg2 & 0x3f) << 10) | (reg1 << 2) |
+ (reg0 & 0x3);
+ ov965x->ctrls.exposure->val = ((exposure *
+ ov965x->exp_row_interval) + 50) / 100;
+ break;
+ }
+
+ return 0;
+}
+
+static int ov965x_g_volatile_ctrl(struct v4l2_ctrl *ctrl)
+{
+ struct v4l2_subdev *sd = ctrl_to_sd(ctrl);
+ struct ov965x *ov965x = to_ov965x(sd);
+ int ret;
+
+ v4l2_dbg(1, debug, sd, "g_ctrl: %s\n", ctrl->name);
+
+ mutex_lock(&ov965x->lock);
+ ret = __g_volatile_ctrl(ov965x, ctrl);
+ mutex_unlock(&ov965x->lock);
+ return ret;
+}
+
+static int ov965x_s_ctrl(struct v4l2_ctrl *ctrl)
+{
+ struct v4l2_subdev *sd = ctrl_to_sd(ctrl);
+ struct ov965x *ov965x = to_ov965x(sd);
+ int ret = -EINVAL;
+
+ v4l2_dbg(1, debug, sd, "s_ctrl: %s, value: %d. power: %d\n",
+ ctrl->name, ctrl->val, ov965x->power);
+
+ mutex_lock(&ov965x->lock);
+ /*
+ * If the device is not powered up now postpone applying control's
+ * value to the hardware, until it is ready to accept commands.
+ */
+ if (ov965x->power == 0) {
+ mutex_unlock(&ov965x->lock);
+ return 0;
+ }
+
+ switch (ctrl->id) {
+ case V4L2_CID_AUTO_WHITE_BALANCE:
+ ret = ov965x_set_white_balance(ov965x, ctrl->val);
+ break;
+
+ case V4L2_CID_BRIGHTNESS:
+ ret = ov965x_set_brightness(ov965x, ctrl->val);
+ break;
+
+ case V4L2_CID_EXPOSURE_AUTO:
+ ret = ov965x_set_exposure(ov965x, ctrl->val);
+ break;
+
+ case V4L2_CID_AUTOGAIN:
+ ret = ov965x_set_gain(ov965x, ctrl->val);
+ break;
+
+ case V4L2_CID_HFLIP:
+ ret = ov965x_set_flip(ov965x);
+ break;
+
+ case V4L2_CID_POWER_LINE_FREQUENCY:
+ ret = ov965x_set_banding_filter(ov965x, ctrl->val);
+ break;
+
+ case V4L2_CID_SATURATION:
+ ret = ov965x_set_saturation(ov965x, ctrl->val);
+ break;
+
+ case V4L2_CID_SHARPNESS:
+ ret = ov965x_set_sharpness(ov965x, ctrl->val);
+ break;
+
+ case V4L2_CID_TEST_PATTERN:
+ ret = ov965x_set_test_pattern(ov965x, ctrl->val);
+ break;
+ }
+
+ mutex_unlock(&ov965x->lock);
+ return ret;
+}
+
+static const struct v4l2_ctrl_ops ov965x_ctrl_ops = {
+ .g_volatile_ctrl = ov965x_g_volatile_ctrl,
+ .s_ctrl = ov965x_s_ctrl,
+};
+
+static const char * const test_pattern_menu[] = {
+ "Disabled",
+ "Color bars",
+ NULL
+};
+
+static int ov965x_initialize_controls(struct ov965x *ov965x)
+{
+ const struct v4l2_ctrl_ops *ops = &ov965x_ctrl_ops;
+ struct ov965x_ctrls *ctrls = &ov965x->ctrls;
+ struct v4l2_ctrl_handler *hdl = &ctrls->handler;
+ int ret;
+
+ ret = v4l2_ctrl_handler_init(hdl, 16);
+ if (ret < 0)
+ return ret;
+
+ /* Auto/manual white balance */
+ ctrls->auto_wb = v4l2_ctrl_new_std(hdl, ops,
+ V4L2_CID_AUTO_WHITE_BALANCE,
+ 0, 1, 1, 1);
+ ctrls->blue_balance = v4l2_ctrl_new_std(hdl, ops, V4L2_CID_BLUE_BALANCE,
+ 0, 0xff, 1, 0x80);
+ ctrls->red_balance = v4l2_ctrl_new_std(hdl, ops, V4L2_CID_RED_BALANCE,
+ 0, 0xff, 1, 0x80);
+ /* Auto/manual exposure */
+ ctrls->auto_exp = v4l2_ctrl_new_std_menu(hdl, ops,
+ V4L2_CID_EXPOSURE_AUTO,
+ V4L2_EXPOSURE_MANUAL, 0, V4L2_EXPOSURE_AUTO);
+ /* Exposure time, in 100 us units. min/max is updated dynamically. */
+ ctrls->exposure = v4l2_ctrl_new_std(hdl, ops,
+ V4L2_CID_EXPOSURE_ABSOLUTE,
+ 2, 1500, 1, 500);
+ /* Auto/manual gain */
+ ctrls->auto_gain = v4l2_ctrl_new_std(hdl, ops, V4L2_CID_AUTOGAIN,
+ 0, 1, 1, 1);
+ ctrls->gain = v4l2_ctrl_new_std(hdl, ops, V4L2_CID_GAIN,
+ 16, 64 * (16 + 15), 1, 64 * 16);
+
+ ctrls->saturation = v4l2_ctrl_new_std(hdl, ops, V4L2_CID_SATURATION,
+ -2, 2, 1, 0);
+ ctrls->brightness = v4l2_ctrl_new_std(hdl, ops, V4L2_CID_BRIGHTNESS,
+ -3, 3, 1, 0);
+ ctrls->sharpness = v4l2_ctrl_new_std(hdl, ops, V4L2_CID_SHARPNESS,
+ 0, 32, 1, 6);
+
+ ctrls->hflip = v4l2_ctrl_new_std(hdl, ops, V4L2_CID_HFLIP, 0, 1, 1, 0);
+ ctrls->vflip = v4l2_ctrl_new_std(hdl, ops, V4L2_CID_VFLIP, 0, 1, 1, 0);
+
+ ctrls->light_freq = v4l2_ctrl_new_std_menu(hdl, ops,
+ V4L2_CID_POWER_LINE_FREQUENCY,
+ V4L2_CID_POWER_LINE_FREQUENCY_60HZ, ~0x7,
+ V4L2_CID_POWER_LINE_FREQUENCY_50HZ);
+
+ v4l2_ctrl_new_std_menu_items(hdl, ops, V4L2_CID_TEST_PATTERN,
+ ARRAY_SIZE(test_pattern_menu) - 1, 0, 0,
+ test_pattern_menu);
+ if (hdl->error) {
+ ret = hdl->error;
+ v4l2_ctrl_handler_free(hdl);
+ return ret;
+ }
+
+ ctrls->gain->flags |= V4L2_CTRL_FLAG_VOLATILE;
+ ctrls->exposure->flags |= V4L2_CTRL_FLAG_VOLATILE;
+
+ v4l2_ctrl_auto_cluster(3, &ctrls->auto_wb, 0, false);
+ v4l2_ctrl_auto_cluster(3, &ctrls->auto_gain, 0, true);
+ v4l2_ctrl_auto_cluster(3, &ctrls->auto_exp, 1, true);
+ v4l2_ctrl_cluster(2, &ctrls->hflip);
+
+ ov965x->sd.ctrl_handler = hdl;
+ return 0;
+}
+
+/*
+ * V4L2 subdev video and pad level operations
+ */
+static void ov965x_get_default_format(struct v4l2_mbus_framefmt *mf)
+{
+ mf->width = ov965x_framesizes[0].width;
+ mf->height = ov965x_framesizes[0].height;
+ mf->colorspace = ov965x_formats[0].colorspace;
+ mf->code = ov965x_formats[0].code;
+ mf->field = V4L2_FIELD_NONE;
+}
+
+static int ov965x_enum_mbus_code(struct v4l2_subdev *sd,
+ struct v4l2_subdev_fh *fh,
+ struct v4l2_subdev_mbus_code_enum *code)
+{
+ if (code->index >= ARRAY_SIZE(ov965x_formats))
+ return -EINVAL;
+
+ code->code = ov965x_formats[code->index].code;
+ return 0;
+}
+
+static int ov965x_enum_frame_sizes(struct v4l2_subdev *sd,
+ struct v4l2_subdev_fh *fh,
+ struct v4l2_subdev_frame_size_enum *fse)
+{
+ int i = ARRAY_SIZE(ov965x_formats);
+
+ if (fse->index > ARRAY_SIZE(ov965x_framesizes))
+ return -EINVAL;
+
+ while (--i)
+ if (fse->code == ov965x_formats[i].code)
+ break;
+
+ fse->code = ov965x_formats[i].code;
+
+ fse->min_width = ov965x_framesizes[fse->index].width;
+ fse->max_width = fse->min_width;
+ fse->max_height = ov965x_framesizes[fse->index].height;
+ fse->min_height = fse->max_height;
+
+ return 0;
+}
+
+static int ov965x_g_frame_interval(struct v4l2_subdev *sd,
+ struct v4l2_subdev_frame_interval *fi)
+{
+ struct ov965x *ov965x = to_ov965x(sd);
+
+ mutex_lock(&ov965x->lock);
+ fi->interval = ov965x->fiv->interval;
+ mutex_unlock(&ov965x->lock);
+
+ return 0;
+}
+
+static int __ov965x_set_frame_interval(struct ov965x *ov965x,
+ struct v4l2_subdev_frame_interval *fi)
+{
+ struct v4l2_mbus_framefmt *mbus_fmt = &ov965x->format;
+ const struct ov965x_interval *fiv = &ov965x_intervals[0];
+ u64 req_int, err, min_err = ~0ULL;
+ unsigned int i;
+
+
+ if (fi->interval.denominator == 0)
+ return -EINVAL;
+
+ req_int = (u64)(fi->interval.numerator * 10000) /
+ fi->interval.denominator;
+
+ for (i = 0; i < ARRAY_SIZE(ov965x_intervals); i++) {
+ const struct ov965x_interval *iv = &ov965x_intervals[i];
+
+ if (mbus_fmt->width != iv->size.width ||
+ mbus_fmt->height != iv->size.height)
+ continue;
+ err = abs64((u64)(iv->interval.numerator * 10000) /
+ iv->interval.denominator - req_int);
+ if (err < min_err) {
+ fiv = iv;
+ min_err = err;
+ }
+ }
+ ov965x->fiv = fiv;
+
+ v4l2_dbg(1, debug, &ov965x->sd, "Changed frame interval to %u us\n",
+ fiv->interval.numerator * 1000000 / fiv->interval.denominator);
+
+ return 0;
+}
+
+static int ov965x_s_frame_interval(struct v4l2_subdev *sd,
+ struct v4l2_subdev_frame_interval *fi)
+{
+ struct ov965x *ov965x = to_ov965x(sd);
+ int ret;
+
+ v4l2_dbg(1, debug, sd, "Setting %d/%d frame interval\n",
+ fi->interval.numerator, fi->interval.denominator);
+
+ mutex_lock(&ov965x->lock);
+ ret = __ov965x_set_frame_interval(ov965x, fi);
+ ov965x->apply_frame_fmt = 1;
+ mutex_unlock(&ov965x->lock);
+ return ret;
+}
+
+static int ov965x_get_fmt(struct v4l2_subdev *sd, struct v4l2_subdev_fh *fh,
+ struct v4l2_subdev_format *fmt)
+{
+ struct ov965x *ov965x = to_ov965x(sd);
+ struct v4l2_mbus_framefmt *mf;
+
+ if (fmt->which == V4L2_SUBDEV_FORMAT_TRY) {
+ mf = v4l2_subdev_get_try_format(fh, 0);
+ fmt->format = *mf;
+ return 0;
+ }
+
+ mutex_lock(&ov965x->lock);
+ fmt->format = ov965x->format;
+ mutex_unlock(&ov965x->lock);
+
+ return 0;
+}
+
+static void __ov965x_try_frame_size(struct v4l2_mbus_framefmt *mf,
+ const struct ov965x_framesize **size)
+{
+ const struct ov965x_framesize *fsize = &ov965x_framesizes[0],
+ *match = NULL;
+ int i = ARRAY_SIZE(ov965x_framesizes);
+ unsigned int min_err = UINT_MAX;
+
+ while (i--) {
+ int err = abs(fsize->width - mf->width)
+ + abs(fsize->height - mf->height);
+ if (err < min_err) {
+ min_err = err;
+ match = fsize;
+ }
+ fsize++;
+ }
+ if (!match)
+ match = &ov965x_framesizes[0];
+ mf->width = match->width;
+ mf->height = match->height;
+ if (size)
+ *size = match;
+}
+
+static int ov965x_set_fmt(struct v4l2_subdev *sd, struct v4l2_subdev_fh *fh,
+ struct v4l2_subdev_format *fmt)
+{
+ unsigned int index = ARRAY_SIZE(ov965x_formats);
+ struct v4l2_mbus_framefmt *mf = &fmt->format;
+ struct ov965x *ov965x = to_ov965x(sd);
+ const struct ov965x_framesize *size = NULL;
+ int ret = 0;
+
+ __ov965x_try_frame_size(mf, &size);
+
+ while (--index)
+ if (ov965x_formats[index].code == mf->code)
+ break;
+
+ mf->colorspace = V4L2_COLORSPACE_JPEG;
+ mf->code = ov965x_formats[index].code;
+ mf->field = V4L2_FIELD_NONE;
+
+ mutex_lock(&ov965x->lock);
+
+ if (fmt->which == V4L2_SUBDEV_FORMAT_TRY) {
+ if (fh != NULL) {
+ mf = v4l2_subdev_get_try_format(fh, fmt->pad);
+ *mf = fmt->format;
+ }
+ } else {
+ if (ov965x->streaming) {
+ ret = -EBUSY;
+ } else {
+ ov965x->frame_size = size;
+ ov965x->format = fmt->format;
+ ov965x->tslb_reg = ov965x_formats[index].tslb_reg;
+ ov965x->apply_frame_fmt = 1;
+ }
+ }
+
+ if (!ret && fmt->which == V4L2_SUBDEV_FORMAT_ACTIVE) {
+ struct v4l2_subdev_frame_interval fiv = {
+ .interval = { 0, 1 }
+ };
+ /* Reset to minimum possible frame interval */
+ __ov965x_set_frame_interval(ov965x, &fiv);
+ }
+ mutex_unlock(&ov965x->lock);
+
+ if (!ret)
+ ov965x_update_exposure_ctrl(ov965x);
+
+ return ret;
+}
+
+static int ov965x_set_frame_size(struct ov965x *ov965x)
+{
+ int i, ret = 0;
+
+ for (i = 0; ret == 0 && i < NUM_FMT_REGS; i++)
+ ret = ov965x_write(ov965x->client, frame_size_reg_addr[i],
+ ov965x->frame_size->regs[i]);
+ return ret;
+}
+
+static int __ov965x_set_params(struct ov965x *ov965x)
+{
+ struct i2c_client *client = ov965x->client;
+ struct ov965x_ctrls *ctrls = &ov965x->ctrls;
+ int ret = 0;
+ u8 reg;
+
+ if (ov965x->apply_frame_fmt) {
+ reg = DEF_CLKRC + ov965x->fiv->clkrc_div;
+ ret = ov965x_write(client, REG_CLKRC, reg);
+ if (ret < 0)
+ return ret;
+ ret = ov965x_set_frame_size(ov965x);
+ if (ret < 0)
+ return ret;
+ ret = ov965x_read(client, REG_TSLB, &reg);
+ if (ret < 0)
+ return ret;
+ reg &= ~TSLB_YUYV_MASK;
+ reg |= ov965x->tslb_reg;
+ ret = ov965x_write(client, REG_TSLB, reg);
+ if (ret < 0)
+ return ret;
+ }
+ ret = ov965x_set_default_gamma_curve(ov965x);
+ if (ret < 0)
+ return ret;
+ ret = ov965x_set_color_matrix(ov965x);
+ if (ret < 0)
+ return ret;
+ /*
+ * Select manual banding filter, the filter will
+ * be enabled further if required.
+ */
+ ret = ov965x_read(client, REG_COM11, &reg);
+ if (!ret)
+ reg |= COM11_BANDING;
+ ret = ov965x_write(client, REG_COM11, reg);
+ if (ret < 0)
+ return ret;
+ /*
+ * Banding filter (REG_MBD value) needs to match selected
+ * resolution and frame rate, so it's always updated here.
+ */
+ return ov965x_set_banding_filter(ov965x, ctrls->light_freq->val);
+}
+
+static int ov965x_s_stream(struct v4l2_subdev *sd, int on)
+{
+ struct i2c_client *client = v4l2_get_subdevdata(sd);
+ struct ov965x *ov965x = to_ov965x(sd);
+ struct ov965x_ctrls *ctrls = &ov965x->ctrls;
+ int ret = 0;
+
+ v4l2_dbg(1, debug, client, "%s: on: %d\n", __func__, on);
+
+ mutex_lock(&ov965x->lock);
+ if (ov965x->streaming == !on) {
+ if (on)
+ ret = __ov965x_set_params(ov965x);
+
+ if (!ret && ctrls->update) {
+ /*
+ * ov965x_s_ctrl callback takes the mutex
+ * so it needs to be released here.
+ */
+ mutex_unlock(&ov965x->lock);
+ ret = v4l2_ctrl_handler_setup(&ctrls->handler);
+
+ mutex_lock(&ov965x->lock);
+ if (!ret)
+ ctrls->update = 0;
+ }
+ if (!ret)
+ ret = ov965x_write(client, REG_COM2,
+ on ? 0x01 : 0x11);
+ }
+ if (!ret)
+ ov965x->streaming += on ? 1 : -1;
+
+ WARN_ON(ov965x->streaming < 0);
+ mutex_unlock(&ov965x->lock);
+
+ return ret;
+}
+
+/*
+ * V4L2 subdev internal operations
+ */
+static int ov965x_open(struct v4l2_subdev *sd, struct v4l2_subdev_fh *fh)
+{
+ struct v4l2_mbus_framefmt *mf = v4l2_subdev_get_try_format(fh, 0);
+
+ ov965x_get_default_format(mf);
+ return 0;
+}
+
+static const struct v4l2_subdev_pad_ops ov965x_pad_ops = {
+ .enum_mbus_code = ov965x_enum_mbus_code,
+ .enum_frame_size = ov965x_enum_frame_sizes,
+ .get_fmt = ov965x_get_fmt,
+ .set_fmt = ov965x_set_fmt,
+};
+
+static const struct v4l2_subdev_video_ops ov965x_video_ops = {
+ .s_stream = ov965x_s_stream,
+ .g_frame_interval = ov965x_g_frame_interval,
+ .s_frame_interval = ov965x_s_frame_interval,
+
+};
+
+static const struct v4l2_subdev_internal_ops ov965x_sd_internal_ops = {
+ .open = ov965x_open,
+};
+
+static const struct v4l2_subdev_core_ops ov965x_core_ops = {
+ .s_power = ov965x_s_power,
+ .log_status = v4l2_ctrl_subdev_log_status,
+ .subscribe_event = v4l2_ctrl_subdev_subscribe_event,
+ .unsubscribe_event = v4l2_event_subdev_unsubscribe,
+};
+
+static const struct v4l2_subdev_ops ov965x_subdev_ops = {
+ .core = &ov965x_core_ops,
+ .pad = &ov965x_pad_ops,
+ .video = &ov965x_video_ops,
+};
+
+/*
+ * Reset and power down GPIOs configuration
+ */
+static int ov965x_configure_gpios(struct ov965x *ov965x,
+ const struct ov9650_platform_data *pdata)
+{
+ int ret, i;
+
+ ov965x->gpios[GPIO_PWDN] = pdata->gpio_pwdn;
+ ov965x->gpios[GPIO_RST] = pdata->gpio_reset;
+
+ for (i = 0; i < ARRAY_SIZE(ov965x->gpios); i++) {
+ int gpio = ov965x->gpios[i];
+
+ if (!gpio_is_valid(gpio))
+ continue;
+ ret = devm_gpio_request_one(&ov965x->client->dev, gpio,
+ GPIOF_OUT_INIT_HIGH, "OV965X");
+ if (ret < 0)
+ return ret;
+ v4l2_dbg(1, debug, &ov965x->sd, "set gpio %d to 1\n", gpio);
+
+ gpio_set_value(gpio, 1);
+ gpio_export(gpio, 0);
+ ov965x->gpios[i] = gpio;
+ }
+
+ return 0;
+}
+
+static int ov965x_detect_sensor(struct v4l2_subdev *sd)
+{
+ struct i2c_client *client = v4l2_get_subdevdata(sd);
+ struct ov965x *ov965x = to_ov965x(sd);
+ u8 pid, ver;
+ int ret;
+
+ mutex_lock(&ov965x->lock);
+ __ov965x_set_power(ov965x, 1);
+ usleep_range(25000, 26000);
+
+ /* Check sensor revision */
+ ret = ov965x_read(client, REG_PID, &pid);
+ if (!ret)
+ ret = ov965x_read(client, REG_VER, &ver);
+
+ __ov965x_set_power(ov965x, 0);
+
+ if (!ret) {
+ ov965x->id = OV965X_ID(pid, ver);
+ if (ov965x->id == OV9650_ID || ov965x->id == OV9652_ID) {
+ v4l2_info(sd, "Found OV%04X sensor\n", ov965x->id);
+ } else {
+ v4l2_err(sd, "Sensor detection failed (%04X, %d)\n",
+ ov965x->id, ret);
+ ret = -ENODEV;
+ }
+ }
+ mutex_unlock(&ov965x->lock);
+
+ return ret;
+}
+
+static int ov965x_probe(struct i2c_client *client,
+ const struct i2c_device_id *id)
+{
+ const struct ov9650_platform_data *pdata = client->dev.platform_data;
+ struct v4l2_subdev *sd;
+ struct ov965x *ov965x;
+ int ret;
+
+ if (pdata == NULL) {
+ dev_err(&client->dev, "platform data not specified\n");
+ return -EINVAL;
+ }
+
+ if (pdata->mclk_frequency == 0) {
+ dev_err(&client->dev, "MCLK frequency not specified\n");
+ return -EINVAL;
+ }
+
+ ov965x = devm_kzalloc(&client->dev, sizeof(*ov965x), GFP_KERNEL);
+ if (!ov965x)
+ return -ENOMEM;
+
+ mutex_init(&ov965x->lock);
+ ov965x->client = client;
+ ov965x->mclk_frequency = pdata->mclk_frequency;
+
+ sd = &ov965x->sd;
+ v4l2_i2c_subdev_init(sd, client, &ov965x_subdev_ops);
+ strlcpy(sd->name, DRIVER_NAME, sizeof(sd->name));
+
+ sd->internal_ops = &ov965x_sd_internal_ops;
+ sd->flags |= V4L2_SUBDEV_FL_HAS_DEVNODE |
+ V4L2_SUBDEV_FL_HAS_EVENTS;
+
+ ret = ov965x_configure_gpios(ov965x, pdata);
+ if (ret < 0)
+ return ret;
+
+ ov965x->pad.flags = MEDIA_PAD_FL_SOURCE;
+ sd->entity.type = MEDIA_ENT_T_V4L2_SUBDEV_SENSOR;
+ ret = media_entity_init(&sd->entity, 1, &ov965x->pad, 0);
+ if (ret < 0)
+ return ret;
+
+ ret = ov965x_initialize_controls(ov965x);
+ if (ret < 0)
+ goto err_me;
+
+ ov965x_get_default_format(&ov965x->format);
+ ov965x->frame_size = &ov965x_framesizes[0];
+ ov965x->fiv = &ov965x_intervals[0];
+
+ ret = ov965x_detect_sensor(sd);
+ if (ret < 0)
+ goto err_ctrls;
+
+ /* Update exposure time min/max to match frame format */
+ ov965x_update_exposure_ctrl(ov965x);
+
+ return 0;
+err_ctrls:
+ v4l2_ctrl_handler_free(sd->ctrl_handler);
+err_me:
+ media_entity_cleanup(&sd->entity);
+ return ret;
+}
+
+static int ov965x_remove(struct i2c_client *client)
+{
+ struct v4l2_subdev *sd = i2c_get_clientdata(client);
+
+ v4l2_device_unregister_subdev(sd);
+ v4l2_ctrl_handler_free(sd->ctrl_handler);
+ media_entity_cleanup(&sd->entity);
+
+ return 0;
+}
+
+static const struct i2c_device_id ov965x_id[] = {
+ { "OV9650", 0 },
+ { "OV9652", 0 },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(i2c, ov965x_id);
+
+static struct i2c_driver ov965x_i2c_driver = {
+ .driver = {
+ .name = DRIVER_NAME,
+ },
+ .probe = ov965x_probe,
+ .remove = ov965x_remove,
+ .id_table = ov965x_id,
+};
+
+module_i2c_driver(ov965x_i2c_driver);
+
+MODULE_AUTHOR("Sylwester Nawrocki <sylvester.nawrocki@gmail.com>");
+MODULE_DESCRIPTION("OV9650/OV9652 CMOS Image Sensor driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/media/i2c/s5c73m3/Makefile b/drivers/media/i2c/s5c73m3/Makefile
new file mode 100644
index 000000000000..fa4df342d1f1
--- /dev/null
+++ b/drivers/media/i2c/s5c73m3/Makefile
@@ -0,0 +1,2 @@
+s5c73m3-objs := s5c73m3-core.o s5c73m3-spi.o s5c73m3-ctrls.o
+obj-$(CONFIG_VIDEO_S5C73M3) += s5c73m3.o
diff --git a/drivers/media/i2c/s5c73m3/s5c73m3-core.c b/drivers/media/i2c/s5c73m3/s5c73m3-core.c
new file mode 100644
index 000000000000..5dbb65e1f6b7
--- /dev/null
+++ b/drivers/media/i2c/s5c73m3/s5c73m3-core.c
@@ -0,0 +1,1704 @@
+/*
+ * Samsung LSI S5C73M3 8M pixel camera driver
+ *
+ * Copyright (C) 2012, Samsung Electronics, Co., Ltd.
+ * Sylwester Nawrocki <s.nawrocki@samsung.com>
+ * Andrzej Hajda <a.hajda@samsung.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/sizes.h>
+#include <linux/delay.h>
+#include <linux/firmware.h>
+#include <linux/gpio.h>
+#include <linux/i2c.h>
+#include <linux/init.h>
+#include <linux/media.h>
+#include <linux/module.h>
+#include <linux/regulator/consumer.h>
+#include <linux/slab.h>
+#include <linux/spi/spi.h>
+#include <linux/videodev2.h>
+#include <media/media-entity.h>
+#include <media/v4l2-ctrls.h>
+#include <media/v4l2-device.h>
+#include <media/v4l2-subdev.h>
+#include <media/v4l2-mediabus.h>
+#include <media/s5c73m3.h>
+
+#include "s5c73m3.h"
+
+int s5c73m3_dbg;
+module_param_named(debug, s5c73m3_dbg, int, 0644);
+
+static int boot_from_rom = 1;
+module_param(boot_from_rom, int, 0644);
+
+static int update_fw;
+module_param(update_fw, int, 0644);
+
+#define S5C73M3_EMBEDDED_DATA_MAXLEN SZ_4K
+
+static const char * const s5c73m3_supply_names[S5C73M3_MAX_SUPPLIES] = {
+ "vdd-int", /* Digital Core supply (1.2V), CAM_ISP_CORE_1.2V */
+ "vdda", /* Analog Core supply (1.2V), CAM_SENSOR_CORE_1.2V */
+ "vdd-reg", /* Regulator input supply (2.8V), CAM_SENSOR_A2.8V */
+ "vddio-host", /* Digital Host I/O power supply (1.8V...2.8V),
+ CAM_ISP_SENSOR_1.8V */
+ "vddio-cis", /* Digital CIS I/O power (1.2V...1.8V),
+ CAM_ISP_MIPI_1.2V */
+ "vdd-af", /* Lens, CAM_AF_2.8V */
+};
+
+static const struct s5c73m3_frame_size s5c73m3_isp_resolutions[] = {
+ { 320, 240, COMM_CHG_MODE_YUV_320_240 },
+ { 352, 288, COMM_CHG_MODE_YUV_352_288 },
+ { 640, 480, COMM_CHG_MODE_YUV_640_480 },
+ { 880, 720, COMM_CHG_MODE_YUV_880_720 },
+ { 960, 720, COMM_CHG_MODE_YUV_960_720 },
+ { 1008, 672, COMM_CHG_MODE_YUV_1008_672 },
+ { 1184, 666, COMM_CHG_MODE_YUV_1184_666 },
+ { 1280, 720, COMM_CHG_MODE_YUV_1280_720 },
+ { 1536, 864, COMM_CHG_MODE_YUV_1536_864 },
+ { 1600, 1200, COMM_CHG_MODE_YUV_1600_1200 },
+ { 1632, 1224, COMM_CHG_MODE_YUV_1632_1224 },
+ { 1920, 1080, COMM_CHG_MODE_YUV_1920_1080 },
+ { 1920, 1440, COMM_CHG_MODE_YUV_1920_1440 },
+ { 2304, 1296, COMM_CHG_MODE_YUV_2304_1296 },
+ { 3264, 2448, COMM_CHG_MODE_YUV_3264_2448 },
+};
+
+static const struct s5c73m3_frame_size s5c73m3_jpeg_resolutions[] = {
+ { 640, 480, COMM_CHG_MODE_JPEG_640_480 },
+ { 800, 450, COMM_CHG_MODE_JPEG_800_450 },
+ { 800, 600, COMM_CHG_MODE_JPEG_800_600 },
+ { 1024, 768, COMM_CHG_MODE_JPEG_1024_768 },
+ { 1280, 720, COMM_CHG_MODE_JPEG_1280_720 },
+ { 1280, 960, COMM_CHG_MODE_JPEG_1280_960 },
+ { 1600, 900, COMM_CHG_MODE_JPEG_1600_900 },
+ { 1600, 1200, COMM_CHG_MODE_JPEG_1600_1200 },
+ { 2048, 1152, COMM_CHG_MODE_JPEG_2048_1152 },
+ { 2048, 1536, COMM_CHG_MODE_JPEG_2048_1536 },
+ { 2560, 1440, COMM_CHG_MODE_JPEG_2560_1440 },
+ { 2560, 1920, COMM_CHG_MODE_JPEG_2560_1920 },
+ { 3264, 1836, COMM_CHG_MODE_JPEG_3264_1836 },
+ { 3264, 2176, COMM_CHG_MODE_JPEG_3264_2176 },
+ { 3264, 2448, COMM_CHG_MODE_JPEG_3264_2448 },
+};
+
+static const struct s5c73m3_frame_size * const s5c73m3_resolutions[] = {
+ [RES_ISP] = s5c73m3_isp_resolutions,
+ [RES_JPEG] = s5c73m3_jpeg_resolutions
+};
+
+static const int s5c73m3_resolutions_len[] = {
+ [RES_ISP] = ARRAY_SIZE(s5c73m3_isp_resolutions),
+ [RES_JPEG] = ARRAY_SIZE(s5c73m3_jpeg_resolutions)
+};
+
+static const struct s5c73m3_interval s5c73m3_intervals[] = {
+ { COMM_FRAME_RATE_FIXED_7FPS, {142857, 1000000}, {3264, 2448} },
+ { COMM_FRAME_RATE_FIXED_15FPS, {66667, 1000000}, {3264, 2448} },
+ { COMM_FRAME_RATE_FIXED_20FPS, {50000, 1000000}, {2304, 1296} },
+ { COMM_FRAME_RATE_FIXED_30FPS, {33333, 1000000}, {2304, 1296} },
+};
+
+#define S5C73M3_DEFAULT_FRAME_INTERVAL 3 /* 30 fps */
+
+static void s5c73m3_fill_mbus_fmt(struct v4l2_mbus_framefmt *mf,
+ const struct s5c73m3_frame_size *fs,
+ u32 code)
+{
+ mf->width = fs->width;
+ mf->height = fs->height;
+ mf->code = code;
+ mf->colorspace = V4L2_COLORSPACE_JPEG;
+ mf->field = V4L2_FIELD_NONE;
+}
+
+static int s5c73m3_i2c_write(struct i2c_client *client, u16 addr, u16 data)
+{
+ u8 buf[4] = { addr >> 8, addr & 0xff, data >> 8, data & 0xff };
+
+ int ret = i2c_master_send(client, buf, sizeof(buf));
+
+ v4l_dbg(4, s5c73m3_dbg, client, "%s: addr 0x%04x, data 0x%04x\n",
+ __func__, addr, data);
+
+ if (ret == 4)
+ return 0;
+
+ return ret < 0 ? ret : -EREMOTEIO;
+}
+
+static int s5c73m3_i2c_read(struct i2c_client *client, u16 addr, u16 *data)
+{
+ int ret;
+ u8 rbuf[2], wbuf[2] = { addr >> 8, addr & 0xff };
+ struct i2c_msg msg[2] = {
+ {
+ .addr = client->addr,
+ .flags = 0,
+ .len = sizeof(wbuf),
+ .buf = wbuf
+ }, {
+ .addr = client->addr,
+ .flags = I2C_M_RD,
+ .len = sizeof(rbuf),
+ .buf = rbuf
+ }
+ };
+ /*
+ * Issue repeated START after writing 2 address bytes and
+ * just one STOP only after reading the data bytes.
+ */
+ ret = i2c_transfer(client->adapter, msg, 2);
+ if (ret == 2) {
+ *data = be16_to_cpup((u16 *)rbuf);
+ v4l2_dbg(4, s5c73m3_dbg, client,
+ "%s: addr: 0x%04x, data: 0x%04x\n",
+ __func__, addr, *data);
+ return 0;
+ }
+
+ v4l2_err(client, "I2C read failed: addr: %04x, (%d)\n", addr, ret);
+
+ return ret >= 0 ? -EREMOTEIO : ret;
+}
+
+int s5c73m3_write(struct s5c73m3 *state, u32 addr, u16 data)
+{
+ struct i2c_client *client = state->i2c_client;
+ int ret;
+
+ if ((addr ^ state->i2c_write_address) & 0xffff0000) {
+ ret = s5c73m3_i2c_write(client, REG_CMDWR_ADDRH, addr >> 16);
+ if (ret < 0) {
+ state->i2c_write_address = 0;
+ return ret;
+ }
+ }
+
+ if ((addr ^ state->i2c_write_address) & 0xffff) {
+ ret = s5c73m3_i2c_write(client, REG_CMDWR_ADDRL, addr & 0xffff);
+ if (ret < 0) {
+ state->i2c_write_address = 0;
+ return ret;
+ }
+ }
+
+ state->i2c_write_address = addr;
+
+ ret = s5c73m3_i2c_write(client, REG_CMDBUF_ADDR, data);
+ if (ret < 0)
+ return ret;
+
+ state->i2c_write_address += 2;
+
+ return ret;
+}
+
+int s5c73m3_read(struct s5c73m3 *state, u32 addr, u16 *data)
+{
+ struct i2c_client *client = state->i2c_client;
+ int ret;
+
+ if ((addr ^ state->i2c_read_address) & 0xffff0000) {
+ ret = s5c73m3_i2c_write(client, REG_CMDRD_ADDRH, addr >> 16);
+ if (ret < 0) {
+ state->i2c_read_address = 0;
+ return ret;
+ }
+ }
+
+ if ((addr ^ state->i2c_read_address) & 0xffff) {
+ ret = s5c73m3_i2c_write(client, REG_CMDRD_ADDRL, addr & 0xffff);
+ if (ret < 0) {
+ state->i2c_read_address = 0;
+ return ret;
+ }
+ }
+
+ state->i2c_read_address = addr;
+
+ ret = s5c73m3_i2c_read(client, REG_CMDBUF_ADDR, data);
+ if (ret < 0)
+ return ret;
+
+ state->i2c_read_address += 2;
+
+ return ret;
+}
+
+static int s5c73m3_check_status(struct s5c73m3 *state, unsigned int value)
+{
+ unsigned long start = jiffies;
+ unsigned long end = start + msecs_to_jiffies(2000);
+ int ret = 0;
+ u16 status;
+ int count = 0;
+
+ while (time_is_after_jiffies(end)) {
+ ret = s5c73m3_read(state, REG_STATUS, &status);
+ if (ret < 0 || status == value)
+ break;
+ usleep_range(500, 1000);
+ ++count;
+ }
+
+ if (count > 0)
+ v4l2_dbg(1, s5c73m3_dbg, &state->sensor_sd,
+ "status check took %dms\n",
+ jiffies_to_msecs(jiffies - start));
+
+ if (ret == 0 && status != value) {
+ u16 i2c_status = 0;
+ u16 i2c_seq_status = 0;
+
+ s5c73m3_read(state, REG_I2C_STATUS, &i2c_status);
+ s5c73m3_read(state, REG_I2C_SEQ_STATUS, &i2c_seq_status);
+
+ v4l2_err(&state->sensor_sd,
+ "wrong status %#x, expected: %#x, i2c_status: %#x/%#x\n",
+ status, value, i2c_status, i2c_seq_status);
+
+ return -ETIMEDOUT;
+ }
+
+ return ret;
+}
+
+int s5c73m3_isp_command(struct s5c73m3 *state, u16 command, u16 data)
+{
+ int ret;
+
+ ret = s5c73m3_check_status(state, REG_STATUS_ISP_COMMAND_COMPLETED);
+ if (ret < 0)
+ return ret;
+
+ ret = s5c73m3_write(state, 0x00095000, command);
+ if (ret < 0)
+ return ret;
+
+ ret = s5c73m3_write(state, 0x00095002, data);
+ if (ret < 0)
+ return ret;
+
+ return s5c73m3_write(state, REG_STATUS, 0x0001);
+}
+
+static int s5c73m3_isp_comm_result(struct s5c73m3 *state, u16 command,
+ u16 *data)
+{
+ return s5c73m3_read(state, COMM_RESULT_OFFSET + command, data);
+}
+
+static int s5c73m3_set_af_softlanding(struct s5c73m3 *state)
+{
+ unsigned long start = jiffies;
+ u16 af_softlanding;
+ int count = 0;
+ int ret;
+ const char *msg;
+
+ ret = s5c73m3_isp_command(state, COMM_AF_SOFTLANDING,
+ COMM_AF_SOFTLANDING_ON);
+ if (ret < 0) {
+ v4l2_info(&state->sensor_sd, "AF soft-landing failed\n");
+ return ret;
+ }
+
+ for (;;) {
+ ret = s5c73m3_isp_comm_result(state, COMM_AF_SOFTLANDING,
+ &af_softlanding);
+ if (ret < 0) {
+ msg = "failed";
+ break;
+ }
+ if (af_softlanding == COMM_AF_SOFTLANDING_RES_COMPLETE) {
+ msg = "succeeded";
+ break;
+ }
+ if (++count > 100) {
+ ret = -ETIME;
+ msg = "timed out";
+ break;
+ }
+ msleep(25);
+ }
+
+ v4l2_info(&state->sensor_sd, "AF soft-landing %s after %dms\n",
+ msg, jiffies_to_msecs(jiffies - start));
+
+ return ret;
+}
+
+static int s5c73m3_load_fw(struct v4l2_subdev *sd)
+{
+ struct s5c73m3 *state = sensor_sd_to_s5c73m3(sd);
+ struct i2c_client *client = state->i2c_client;
+ const struct firmware *fw;
+ int ret;
+ char fw_name[20];
+
+ snprintf(fw_name, sizeof(fw_name), "SlimISP_%.2s.bin",
+ state->fw_file_version);
+ ret = request_firmware(&fw, fw_name, &client->dev);
+ if (ret < 0) {
+ v4l2_err(sd, "Firmware request failed (%s)\n", fw_name);
+ return -EINVAL;
+ }
+
+ v4l2_info(sd, "Loading firmware (%s, %d B)\n", fw_name, fw->size);
+
+ ret = s5c73m3_spi_write(state, fw->data, fw->size, 64);
+
+ if (ret >= 0)
+ state->isp_ready = 1;
+ else
+ v4l2_err(sd, "SPI write failed\n");
+
+ release_firmware(fw);
+
+ return ret;
+}
+
+static int s5c73m3_set_frame_size(struct s5c73m3 *state)
+{
+ const struct s5c73m3_frame_size *prev_size =
+ state->sensor_pix_size[RES_ISP];
+ const struct s5c73m3_frame_size *cap_size =
+ state->sensor_pix_size[RES_JPEG];
+ unsigned int chg_mode;
+
+ v4l2_dbg(1, s5c73m3_dbg, &state->sensor_sd,
+ "Preview size: %dx%d, reg_val: 0x%x\n",
+ prev_size->width, prev_size->height, prev_size->reg_val);
+
+ chg_mode = prev_size->reg_val | COMM_CHG_MODE_NEW;
+
+ if (state->mbus_code == S5C73M3_JPEG_FMT) {
+ v4l2_dbg(1, s5c73m3_dbg, &state->sensor_sd,
+ "Capture size: %dx%d, reg_val: 0x%x\n",
+ cap_size->width, cap_size->height, cap_size->reg_val);
+ chg_mode |= cap_size->reg_val;
+ }
+
+ return s5c73m3_isp_command(state, COMM_CHG_MODE, chg_mode);
+}
+
+static int s5c73m3_set_frame_rate(struct s5c73m3 *state)
+{
+ int ret;
+
+ if (state->ctrls.stabilization->val)
+ return 0;
+
+ if (WARN_ON(state->fiv == NULL))
+ return -EINVAL;
+
+ ret = s5c73m3_isp_command(state, COMM_FRAME_RATE, state->fiv->fps_reg);
+ if (!ret)
+ state->apply_fiv = 0;
+
+ return ret;
+}
+
+static int __s5c73m3_s_stream(struct s5c73m3 *state, struct v4l2_subdev *sd,
+ int on)
+{
+ u16 mode;
+ int ret;
+
+ if (on && state->apply_fmt) {
+ if (state->mbus_code == S5C73M3_JPEG_FMT)
+ mode = COMM_IMG_OUTPUT_INTERLEAVED;
+ else
+ mode = COMM_IMG_OUTPUT_YUV;
+
+ ret = s5c73m3_isp_command(state, COMM_IMG_OUTPUT, mode);
+ if (!ret)
+ ret = s5c73m3_set_frame_size(state);
+ if (ret)
+ return ret;
+ state->apply_fmt = 0;
+ }
+
+ ret = s5c73m3_isp_command(state, COMM_SENSOR_STREAMING, !!on);
+ if (ret)
+ return ret;
+
+ state->streaming = !!on;
+
+ if (!on)
+ return ret;
+
+ if (state->apply_fiv) {
+ ret = s5c73m3_set_frame_rate(state);
+ if (ret < 0)
+ v4l2_err(sd, "Error setting frame rate(%d)\n", ret);
+ }
+
+ return s5c73m3_check_status(state, REG_STATUS_ISP_COMMAND_COMPLETED);
+}
+
+static int s5c73m3_oif_s_stream(struct v4l2_subdev *sd, int on)
+{
+ struct s5c73m3 *state = oif_sd_to_s5c73m3(sd);
+ int ret;
+
+ mutex_lock(&state->lock);
+ ret = __s5c73m3_s_stream(state, sd, on);
+ mutex_unlock(&state->lock);
+
+ return ret;
+}
+
+static int s5c73m3_system_status_wait(struct s5c73m3 *state, u32 value,
+ unsigned int delay, unsigned int steps)
+{
+ u16 reg = 0;
+
+ while (steps-- > 0) {
+ int ret = s5c73m3_read(state, 0x30100010, &reg);
+ if (ret < 0)
+ return ret;
+ if (reg == value)
+ return 0;
+ usleep_range(delay, delay + 25);
+ }
+ return -ETIMEDOUT;
+}
+
+static int s5c73m3_read_fw_version(struct s5c73m3 *state)
+{
+ struct v4l2_subdev *sd = &state->sensor_sd;
+ int i, ret;
+ u16 data[2];
+ int offset;
+
+ offset = state->isp_ready ? 0x60 : 0;
+
+ for (i = 0; i < S5C73M3_SENSOR_FW_LEN / 2; i++) {
+ ret = s5c73m3_read(state, offset + i * 2, data);
+ if (ret < 0)
+ return ret;
+ state->sensor_fw[i * 2] = (char)(*data & 0xff);
+ state->sensor_fw[i * 2 + 1] = (char)(*data >> 8);
+ }
+ state->sensor_fw[S5C73M3_SENSOR_FW_LEN] = '\0';
+
+
+ for (i = 0; i < S5C73M3_SENSOR_TYPE_LEN / 2; i++) {
+ ret = s5c73m3_read(state, offset + 6 + i * 2, data);
+ if (ret < 0)
+ return ret;
+ state->sensor_type[i * 2] = (char)(*data & 0xff);
+ state->sensor_type[i * 2 + 1] = (char)(*data >> 8);
+ }
+ state->sensor_type[S5C73M3_SENSOR_TYPE_LEN] = '\0';
+
+ ret = s5c73m3_read(state, offset + 0x14, data);
+ if (ret >= 0) {
+ ret = s5c73m3_read(state, offset + 0x16, data + 1);
+ if (ret >= 0)
+ state->fw_size = data[0] + (data[1] << 16);
+ }
+
+ v4l2_info(sd, "Sensor type: %s, FW version: %s\n",
+ state->sensor_type, state->sensor_fw);
+ return ret;
+}
+
+static int s5c73m3_fw_update_from(struct s5c73m3 *state)
+{
+ struct v4l2_subdev *sd = &state->sensor_sd;
+ u16 status = COMM_FW_UPDATE_NOT_READY;
+ int ret;
+ int count = 0;
+
+ v4l2_warn(sd, "Updating F-ROM firmware.\n");
+ do {
+ if (status == COMM_FW_UPDATE_NOT_READY) {
+ ret = s5c73m3_isp_command(state, COMM_FW_UPDATE, 0);
+ if (ret < 0)
+ return ret;
+ }
+
+ ret = s5c73m3_read(state, 0x00095906, &status);
+ if (ret < 0)
+ return ret;
+ switch (status) {
+ case COMM_FW_UPDATE_FAIL:
+ v4l2_warn(sd, "Updating F-ROM firmware failed.\n");
+ return -EIO;
+ case COMM_FW_UPDATE_SUCCESS:
+ v4l2_warn(sd, "Updating F-ROM firmware finished.\n");
+ return 0;
+ }
+ ++count;
+ msleep(20);
+ } while (count < 500);
+
+ v4l2_warn(sd, "Updating F-ROM firmware timed-out.\n");
+ return -ETIMEDOUT;
+}
+
+static int s5c73m3_spi_boot(struct s5c73m3 *state, bool load_fw)
+{
+ struct v4l2_subdev *sd = &state->sensor_sd;
+ int ret;
+
+ /* Run ARM MCU */
+ ret = s5c73m3_write(state, 0x30000004, 0xffff);
+ if (ret < 0)
+ return ret;
+
+ usleep_range(400, 500);
+
+ /* Check booting status */
+ ret = s5c73m3_system_status_wait(state, 0x0c, 100, 3);
+ if (ret < 0) {
+ v4l2_err(sd, "booting failed: %d\n", ret);
+ return ret;
+ }
+
+ /* P,M,S and Boot Mode */
+ ret = s5c73m3_write(state, 0x30100014, 0x2146);
+ if (ret < 0)
+ return ret;
+
+ ret = s5c73m3_write(state, 0x30100010, 0x210c);
+ if (ret < 0)
+ return ret;
+
+ usleep_range(200, 250);
+
+ /* Check SPI status */
+ ret = s5c73m3_system_status_wait(state, 0x210d, 100, 300);
+ if (ret < 0)
+ v4l2_err(sd, "SPI not ready: %d\n", ret);
+
+ /* Firmware download over SPI */
+ if (load_fw)
+ s5c73m3_load_fw(sd);
+
+ /* MCU reset */
+ ret = s5c73m3_write(state, 0x30000004, 0xfffd);
+ if (ret < 0)
+ return ret;
+
+ /* Remap */
+ ret = s5c73m3_write(state, 0x301000a4, 0x0183);
+ if (ret < 0)
+ return ret;
+
+ /* MCU restart */
+ ret = s5c73m3_write(state, 0x30000004, 0xffff);
+ if (ret < 0 || !load_fw)
+ return ret;
+
+ ret = s5c73m3_read_fw_version(state);
+ if (ret < 0)
+ return ret;
+
+ if (load_fw && update_fw) {
+ ret = s5c73m3_fw_update_from(state);
+ update_fw = 0;
+ }
+
+ return ret;
+}
+
+static int s5c73m3_set_timing_register_for_vdd(struct s5c73m3 *state)
+{
+ static const u32 regs[][2] = {
+ { 0x30100018, 0x0618 },
+ { 0x3010001c, 0x10c1 },
+ { 0x30100020, 0x249e }
+ };
+ int ret;
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(regs); i++) {
+ ret = s5c73m3_write(state, regs[i][0], regs[i][1]);
+ if (ret < 0)
+ return ret;
+ }
+
+ return 0;
+}
+
+static void s5c73m3_set_fw_file_version(struct s5c73m3 *state)
+{
+ switch (state->sensor_fw[0]) {
+ case 'G':
+ case 'O':
+ state->fw_file_version[0] = 'G';
+ break;
+ case 'S':
+ case 'Z':
+ state->fw_file_version[0] = 'Z';
+ break;
+ }
+
+ switch (state->sensor_fw[1]) {
+ case 'C'...'F':
+ state->fw_file_version[1] = state->sensor_fw[1];
+ break;
+ }
+}
+
+static int s5c73m3_get_fw_version(struct s5c73m3 *state)
+{
+ struct v4l2_subdev *sd = &state->sensor_sd;
+ int ret;
+
+ /* Run ARM MCU */
+ ret = s5c73m3_write(state, 0x30000004, 0xffff);
+ if (ret < 0)
+ return ret;
+ usleep_range(400, 500);
+
+ /* Check booting status */
+ ret = s5c73m3_system_status_wait(state, 0x0c, 100, 3);
+ if (ret < 0) {
+
+ v4l2_err(sd, "%s: booting failed: %d\n", __func__, ret);
+ return ret;
+ }
+
+ /* Change I/O Driver Current in order to read from F-ROM */
+ ret = s5c73m3_write(state, 0x30100120, 0x0820);
+ ret = s5c73m3_write(state, 0x30100124, 0x0820);
+
+ /* Offset Setting */
+ ret = s5c73m3_write(state, 0x00010418, 0x0008);
+
+ /* P,M,S and Boot Mode */
+ ret = s5c73m3_write(state, 0x30100014, 0x2146);
+ if (ret < 0)
+ return ret;
+ ret = s5c73m3_write(state, 0x30100010, 0x230c);
+ if (ret < 0)
+ return ret;
+
+ usleep_range(200, 250);
+
+ /* Check SPI status */
+ ret = s5c73m3_system_status_wait(state, 0x230e, 100, 300);
+ if (ret < 0)
+ v4l2_err(sd, "SPI not ready: %d\n", ret);
+
+ /* ARM reset */
+ ret = s5c73m3_write(state, 0x30000004, 0xfffd);
+ if (ret < 0)
+ return ret;
+
+ /* Remap */
+ ret = s5c73m3_write(state, 0x301000a4, 0x0183);
+ if (ret < 0)
+ return ret;
+
+ s5c73m3_set_timing_register_for_vdd(state);
+
+ ret = s5c73m3_read_fw_version(state);
+
+ s5c73m3_set_fw_file_version(state);
+
+ return ret;
+}
+
+static int s5c73m3_rom_boot(struct s5c73m3 *state, bool load_fw)
+{
+ static const u32 boot_regs[][2] = {
+ { 0x3100010c, 0x0044 },
+ { 0x31000108, 0x000d },
+ { 0x31000304, 0x0001 },
+ { 0x00010000, 0x5800 },
+ { 0x00010002, 0x0002 },
+ { 0x31000000, 0x0001 },
+ { 0x30100014, 0x1b85 },
+ { 0x30100010, 0x230c }
+ };
+ struct v4l2_subdev *sd = &state->sensor_sd;
+ int i, ret;
+
+ /* Run ARM MCU */
+ ret = s5c73m3_write(state, 0x30000004, 0xffff);
+ if (ret < 0)
+ return ret;
+ usleep_range(400, 450);
+
+ /* Check booting status */
+ ret = s5c73m3_system_status_wait(state, 0x0c, 100, 4);
+ if (ret < 0) {
+ v4l2_err(sd, "Booting failed: %d\n", ret);
+ return ret;
+ }
+
+ for (i = 0; i < ARRAY_SIZE(boot_regs); i++) {
+ ret = s5c73m3_write(state, boot_regs[i][0], boot_regs[i][1]);
+ if (ret < 0)
+ return ret;
+ }
+ msleep(200);
+
+ /* Check the binary read status */
+ ret = s5c73m3_system_status_wait(state, 0x230e, 1000, 150);
+ if (ret < 0) {
+ v4l2_err(sd, "Binary read failed: %d\n", ret);
+ return ret;
+ }
+
+ /* ARM reset */
+ ret = s5c73m3_write(state, 0x30000004, 0xfffd);
+ if (ret < 0)
+ return ret;
+ /* Remap */
+ ret = s5c73m3_write(state, 0x301000a4, 0x0183);
+ if (ret < 0)
+ return ret;
+ /* MCU re-start */
+ ret = s5c73m3_write(state, 0x30000004, 0xffff);
+ if (ret < 0)
+ return ret;
+
+ state->isp_ready = 1;
+
+ return s5c73m3_read_fw_version(state);
+}
+
+static int s5c73m3_isp_init(struct s5c73m3 *state)
+{
+ int ret;
+
+ state->i2c_read_address = 0;
+ state->i2c_write_address = 0;
+
+ ret = s5c73m3_i2c_write(state->i2c_client, AHB_MSB_ADDR_PTR, 0x3310);
+ if (ret < 0)
+ return ret;
+
+ if (boot_from_rom)
+ return s5c73m3_rom_boot(state, true);
+ else
+ return s5c73m3_spi_boot(state, true);
+}
+
+static const struct s5c73m3_frame_size *s5c73m3_find_frame_size(
+ struct v4l2_mbus_framefmt *fmt,
+ enum s5c73m3_resolution_types idx)
+{
+ const struct s5c73m3_frame_size *fs;
+ const struct s5c73m3_frame_size *best_fs;
+ int best_dist = INT_MAX;
+ int i;
+
+ fs = s5c73m3_resolutions[idx];
+ best_fs = NULL;
+ for (i = 0; i < s5c73m3_resolutions_len[idx]; ++i) {
+ int dist = abs(fs->width - fmt->width) +
+ abs(fs->height - fmt->height);
+ if (dist < best_dist) {
+ best_dist = dist;
+ best_fs = fs;
+ }
+ ++fs;
+ }
+
+ return best_fs;
+}
+
+static void s5c73m3_oif_try_format(struct s5c73m3 *state,
+ struct v4l2_subdev_fh *fh,
+ struct v4l2_subdev_format *fmt,
+ const struct s5c73m3_frame_size **fs)
+{
+ u32 code;
+
+ switch (fmt->pad) {
+ case OIF_ISP_PAD:
+ *fs = s5c73m3_find_frame_size(&fmt->format, RES_ISP);
+ code = S5C73M3_ISP_FMT;
+ break;
+ case OIF_JPEG_PAD:
+ *fs = s5c73m3_find_frame_size(&fmt->format, RES_JPEG);
+ code = S5C73M3_JPEG_FMT;
+ break;
+ case OIF_SOURCE_PAD:
+ default:
+ if (fmt->format.code == S5C73M3_JPEG_FMT)
+ code = S5C73M3_JPEG_FMT;
+ else
+ code = S5C73M3_ISP_FMT;
+
+ if (fmt->which == V4L2_SUBDEV_FORMAT_ACTIVE)
+ *fs = state->oif_pix_size[RES_ISP];
+ else
+ *fs = s5c73m3_find_frame_size(
+ v4l2_subdev_get_try_format(fh,
+ OIF_ISP_PAD),
+ RES_ISP);
+ break;
+ }
+
+ s5c73m3_fill_mbus_fmt(&fmt->format, *fs, code);
+}
+
+static void s5c73m3_try_format(struct s5c73m3 *state,
+ struct v4l2_subdev_fh *fh,
+ struct v4l2_subdev_format *fmt,
+ const struct s5c73m3_frame_size **fs)
+{
+ u32 code;
+
+ if (fmt->pad == S5C73M3_ISP_PAD) {
+ *fs = s5c73m3_find_frame_size(&fmt->format, RES_ISP);
+ code = S5C73M3_ISP_FMT;
+ } else {
+ *fs = s5c73m3_find_frame_size(&fmt->format, RES_JPEG);
+ code = S5C73M3_JPEG_FMT;
+ }
+
+ s5c73m3_fill_mbus_fmt(&fmt->format, *fs, code);
+}
+
+static int s5c73m3_oif_g_frame_interval(struct v4l2_subdev *sd,
+ struct v4l2_subdev_frame_interval *fi)
+{
+ struct s5c73m3 *state = oif_sd_to_s5c73m3(sd);
+
+ if (fi->pad != OIF_SOURCE_PAD)
+ return -EINVAL;
+
+ mutex_lock(&state->lock);
+ fi->interval = state->fiv->interval;
+ mutex_unlock(&state->lock);
+
+ return 0;
+}
+
+static int __s5c73m3_set_frame_interval(struct s5c73m3 *state,
+ struct v4l2_subdev_frame_interval *fi)
+{
+ const struct s5c73m3_frame_size *prev_size =
+ state->sensor_pix_size[RES_ISP];
+ const struct s5c73m3_interval *fiv = &s5c73m3_intervals[0];
+ unsigned int ret, min_err = UINT_MAX;
+ unsigned int i, fr_time;
+
+ if (fi->interval.denominator == 0)
+ return -EINVAL;
+
+ fr_time = fi->interval.numerator * 1000 / fi->interval.denominator;
+
+ for (i = 0; i < ARRAY_SIZE(s5c73m3_intervals); i++) {
+ const struct s5c73m3_interval *iv = &s5c73m3_intervals[i];
+
+ if (prev_size->width > iv->size.width ||
+ prev_size->height > iv->size.height)
+ continue;
+
+ ret = abs(iv->interval.numerator / 1000 - fr_time);
+ if (ret < min_err) {
+ fiv = iv;
+ min_err = ret;
+ }
+ }
+ state->fiv = fiv;
+
+ v4l2_dbg(1, s5c73m3_dbg, &state->sensor_sd,
+ "Changed frame interval to %u us\n", fiv->interval.numerator);
+ return 0;
+}
+
+static int s5c73m3_oif_s_frame_interval(struct v4l2_subdev *sd,
+ struct v4l2_subdev_frame_interval *fi)
+{
+ struct s5c73m3 *state = oif_sd_to_s5c73m3(sd);
+ int ret;
+
+ if (fi->pad != OIF_SOURCE_PAD)
+ return -EINVAL;
+
+ v4l2_dbg(1, s5c73m3_dbg, sd, "Setting %d/%d frame interval\n",
+ fi->interval.numerator, fi->interval.denominator);
+
+ mutex_lock(&state->lock);
+
+ ret = __s5c73m3_set_frame_interval(state, fi);
+ if (!ret) {
+ if (state->streaming)
+ ret = s5c73m3_set_frame_rate(state);
+ else
+ state->apply_fiv = 1;
+ }
+ mutex_unlock(&state->lock);
+ return ret;
+}
+
+static int s5c73m3_oif_enum_frame_interval(struct v4l2_subdev *sd,
+ struct v4l2_subdev_fh *fh,
+ struct v4l2_subdev_frame_interval_enum *fie)
+{
+ struct s5c73m3 *state = oif_sd_to_s5c73m3(sd);
+ const struct s5c73m3_interval *fi;
+ int ret = 0;
+
+ if (fie->pad != OIF_SOURCE_PAD)
+ return -EINVAL;
+ if (fie->index > ARRAY_SIZE(s5c73m3_intervals))
+ return -EINVAL;
+
+ mutex_lock(&state->lock);
+ fi = &s5c73m3_intervals[fie->index];
+ if (fie->width > fi->size.width || fie->height > fi->size.height)
+ ret = -EINVAL;
+ else
+ fie->interval = fi->interval;
+ mutex_unlock(&state->lock);
+
+ return ret;
+}
+
+static int s5c73m3_oif_get_pad_code(int pad, int index)
+{
+ if (pad == OIF_SOURCE_PAD) {
+ if (index > 1)
+ return -EINVAL;
+ return (index == 0) ? S5C73M3_ISP_FMT : S5C73M3_JPEG_FMT;
+ }
+
+ if (index > 0)
+ return -EINVAL;
+
+ return (pad == OIF_ISP_PAD) ? S5C73M3_ISP_FMT : S5C73M3_JPEG_FMT;
+}
+
+static int s5c73m3_get_fmt(struct v4l2_subdev *sd,
+ struct v4l2_subdev_fh *fh,
+ struct v4l2_subdev_format *fmt)
+{
+ struct s5c73m3 *state = sensor_sd_to_s5c73m3(sd);
+ const struct s5c73m3_frame_size *fs;
+ u32 code;
+
+ if (fmt->which == V4L2_SUBDEV_FORMAT_TRY) {
+ fmt->format = *v4l2_subdev_get_try_format(fh, fmt->pad);
+ return 0;
+ }
+
+ mutex_lock(&state->lock);
+
+ switch (fmt->pad) {
+ case S5C73M3_ISP_PAD:
+ code = S5C73M3_ISP_FMT;
+ fs = state->sensor_pix_size[RES_ISP];
+ break;
+ case S5C73M3_JPEG_PAD:
+ code = S5C73M3_JPEG_FMT;
+ fs = state->sensor_pix_size[RES_JPEG];
+ break;
+ default:
+ mutex_unlock(&state->lock);
+ return -EINVAL;
+ }
+ s5c73m3_fill_mbus_fmt(&fmt->format, fs, code);
+
+ mutex_unlock(&state->lock);
+ return 0;
+}
+
+static int s5c73m3_oif_get_fmt(struct v4l2_subdev *sd,
+ struct v4l2_subdev_fh *fh,
+ struct v4l2_subdev_format *fmt)
+{
+ struct s5c73m3 *state = oif_sd_to_s5c73m3(sd);
+ const struct s5c73m3_frame_size *fs;
+ u32 code;
+
+ if (fmt->which == V4L2_SUBDEV_FORMAT_TRY) {
+ fmt->format = *v4l2_subdev_get_try_format(fh, fmt->pad);
+ return 0;
+ }
+
+ mutex_lock(&state->lock);
+
+ switch (fmt->pad) {
+ case OIF_ISP_PAD:
+ code = S5C73M3_ISP_FMT;
+ fs = state->oif_pix_size[RES_ISP];
+ break;
+ case OIF_JPEG_PAD:
+ code = S5C73M3_JPEG_FMT;
+ fs = state->oif_pix_size[RES_JPEG];
+ break;
+ case OIF_SOURCE_PAD:
+ code = state->mbus_code;
+ fs = state->oif_pix_size[RES_ISP];
+ break;
+ default:
+ mutex_unlock(&state->lock);
+ return -EINVAL;
+ }
+ s5c73m3_fill_mbus_fmt(&fmt->format, fs, code);
+
+ mutex_unlock(&state->lock);
+ return 0;
+}
+
+static int s5c73m3_set_fmt(struct v4l2_subdev *sd,
+ struct v4l2_subdev_fh *fh,
+ struct v4l2_subdev_format *fmt)
+{
+ const struct s5c73m3_frame_size *frame_size = NULL;
+ struct s5c73m3 *state = sensor_sd_to_s5c73m3(sd);
+ struct v4l2_mbus_framefmt *mf;
+ int ret = 0;
+
+ mutex_lock(&state->lock);
+
+ s5c73m3_try_format(state, fh, fmt, &frame_size);
+
+ if (fmt->which == V4L2_SUBDEV_FORMAT_TRY) {
+ mf = v4l2_subdev_get_try_format(fh, fmt->pad);
+ *mf = fmt->format;
+ } else {
+ switch (fmt->pad) {
+ case S5C73M3_ISP_PAD:
+ state->sensor_pix_size[RES_ISP] = frame_size;
+ break;
+ case S5C73M3_JPEG_PAD:
+ state->sensor_pix_size[RES_JPEG] = frame_size;
+ break;
+ default:
+ ret = -EBUSY;
+ }
+
+ if (state->streaming)
+ ret = -EBUSY;
+ else
+ state->apply_fmt = 1;
+ }
+
+ mutex_unlock(&state->lock);
+
+ return ret;
+}
+
+static int s5c73m3_oif_set_fmt(struct v4l2_subdev *sd,
+ struct v4l2_subdev_fh *fh,
+ struct v4l2_subdev_format *fmt)
+{
+ const struct s5c73m3_frame_size *frame_size = NULL;
+ struct s5c73m3 *state = oif_sd_to_s5c73m3(sd);
+ struct v4l2_mbus_framefmt *mf;
+ int ret = 0;
+
+ mutex_lock(&state->lock);
+
+ s5c73m3_oif_try_format(state, fh, fmt, &frame_size);
+
+ if (fmt->which == V4L2_SUBDEV_FORMAT_TRY) {
+ mf = v4l2_subdev_get_try_format(fh, fmt->pad);
+ *mf = fmt->format;
+ } else {
+ switch (fmt->pad) {
+ case OIF_ISP_PAD:
+ state->oif_pix_size[RES_ISP] = frame_size;
+ break;
+ case OIF_JPEG_PAD:
+ state->oif_pix_size[RES_JPEG] = frame_size;
+ break;
+ case OIF_SOURCE_PAD:
+ state->mbus_code = fmt->format.code;
+ break;
+ default:
+ ret = -EBUSY;
+ }
+
+ if (state->streaming)
+ ret = -EBUSY;
+ else
+ state->apply_fmt = 1;
+ }
+
+ mutex_unlock(&state->lock);
+
+ return ret;
+}
+
+static int s5c73m3_oif_get_frame_desc(struct v4l2_subdev *sd, unsigned int pad,
+ struct v4l2_mbus_frame_desc *fd)
+{
+ struct s5c73m3 *state = oif_sd_to_s5c73m3(sd);
+ int i;
+
+ if (pad != OIF_SOURCE_PAD || fd == NULL)
+ return -EINVAL;
+
+ mutex_lock(&state->lock);
+ fd->num_entries = 2;
+ for (i = 0; i < fd->num_entries; i++)
+ fd->entry[i] = state->frame_desc.entry[i];
+ mutex_unlock(&state->lock);
+
+ return 0;
+}
+
+static int s5c73m3_oif_set_frame_desc(struct v4l2_subdev *sd, unsigned int pad,
+ struct v4l2_mbus_frame_desc *fd)
+{
+ struct s5c73m3 *state = oif_sd_to_s5c73m3(sd);
+ struct v4l2_mbus_frame_desc *frame_desc = &state->frame_desc;
+ int i;
+
+ if (pad != OIF_SOURCE_PAD || fd == NULL)
+ return -EINVAL;
+
+ fd->entry[0].length = 10 * SZ_1M;
+ fd->entry[1].length = max_t(u32, fd->entry[1].length,
+ S5C73M3_EMBEDDED_DATA_MAXLEN);
+ fd->num_entries = 2;
+
+ mutex_lock(&state->lock);
+ for (i = 0; i < fd->num_entries; i++)
+ frame_desc->entry[i] = fd->entry[i];
+ mutex_unlock(&state->lock);
+
+ return 0;
+}
+
+static int s5c73m3_enum_mbus_code(struct v4l2_subdev *sd,
+ struct v4l2_subdev_fh *fh,
+ struct v4l2_subdev_mbus_code_enum *code)
+{
+ static const int codes[] = {
+ [S5C73M3_ISP_PAD] = S5C73M3_ISP_FMT,
+ [S5C73M3_JPEG_PAD] = S5C73M3_JPEG_FMT};
+
+ if (code->index > 0 || code->pad >= S5C73M3_NUM_PADS)
+ return -EINVAL;
+
+ code->code = codes[code->pad];
+
+ return 0;
+}
+
+static int s5c73m3_oif_enum_mbus_code(struct v4l2_subdev *sd,
+ struct v4l2_subdev_fh *fh,
+ struct v4l2_subdev_mbus_code_enum *code)
+{
+ int ret;
+
+ ret = s5c73m3_oif_get_pad_code(code->pad, code->index);
+ if (ret < 0)
+ return ret;
+
+ code->code = ret;
+
+ return 0;
+}
+
+static int s5c73m3_enum_frame_size(struct v4l2_subdev *sd,
+ struct v4l2_subdev_fh *fh,
+ struct v4l2_subdev_frame_size_enum *fse)
+{
+ int idx;
+
+ if (fse->pad == S5C73M3_ISP_PAD) {
+ if (fse->code != S5C73M3_ISP_FMT)
+ return -EINVAL;
+ idx = RES_ISP;
+ } else{
+ if (fse->code != S5C73M3_JPEG_FMT)
+ return -EINVAL;
+ idx = RES_JPEG;
+ }
+
+ if (fse->index >= s5c73m3_resolutions_len[idx])
+ return -EINVAL;
+
+ fse->min_width = s5c73m3_resolutions[idx][fse->index].width;
+ fse->max_width = fse->min_width;
+ fse->max_height = s5c73m3_resolutions[idx][fse->index].height;
+ fse->min_height = fse->max_height;
+
+ return 0;
+}
+
+static int s5c73m3_oif_enum_frame_size(struct v4l2_subdev *sd,
+ struct v4l2_subdev_fh *fh,
+ struct v4l2_subdev_frame_size_enum *fse)
+{
+ int idx;
+
+ if (fse->pad == OIF_SOURCE_PAD) {
+ if (fse->index > 0)
+ return -EINVAL;
+
+ switch (fse->code) {
+ case S5C73M3_JPEG_FMT:
+ case S5C73M3_ISP_FMT: {
+ struct v4l2_mbus_framefmt *mf =
+ v4l2_subdev_get_try_format(fh, OIF_ISP_PAD);
+
+ fse->max_width = fse->min_width = mf->width;
+ fse->max_height = fse->min_height = mf->height;
+ return 0;
+ }
+ default:
+ return -EINVAL;
+ }
+ }
+
+ if (fse->code != s5c73m3_oif_get_pad_code(fse->pad, 0))
+ return -EINVAL;
+
+ if (fse->pad == OIF_JPEG_PAD)
+ idx = RES_JPEG;
+ else
+ idx = RES_ISP;
+
+ if (fse->index >= s5c73m3_resolutions_len[idx])
+ return -EINVAL;
+
+ fse->min_width = s5c73m3_resolutions[idx][fse->index].width;
+ fse->max_width = fse->min_width;
+ fse->max_height = s5c73m3_resolutions[idx][fse->index].height;
+ fse->min_height = fse->max_height;
+
+ return 0;
+}
+
+static int s5c73m3_oif_log_status(struct v4l2_subdev *sd)
+{
+ struct s5c73m3 *state = oif_sd_to_s5c73m3(sd);
+
+ v4l2_ctrl_handler_log_status(sd->ctrl_handler, sd->name);
+
+ v4l2_info(sd, "power: %d, apply_fmt: %d\n", state->power,
+ state->apply_fmt);
+
+ return 0;
+}
+
+static int s5c73m3_open(struct v4l2_subdev *sd, struct v4l2_subdev_fh *fh)
+{
+ struct v4l2_mbus_framefmt *mf;
+
+ mf = v4l2_subdev_get_try_format(fh, S5C73M3_ISP_PAD);
+ s5c73m3_fill_mbus_fmt(mf, &s5c73m3_isp_resolutions[1],
+ S5C73M3_ISP_FMT);
+
+ mf = v4l2_subdev_get_try_format(fh, S5C73M3_JPEG_PAD);
+ s5c73m3_fill_mbus_fmt(mf, &s5c73m3_jpeg_resolutions[1],
+ S5C73M3_JPEG_FMT);
+
+ return 0;
+}
+
+static int s5c73m3_oif_open(struct v4l2_subdev *sd, struct v4l2_subdev_fh *fh)
+{
+ struct v4l2_mbus_framefmt *mf;
+
+ mf = v4l2_subdev_get_try_format(fh, OIF_ISP_PAD);
+ s5c73m3_fill_mbus_fmt(mf, &s5c73m3_isp_resolutions[1],
+ S5C73M3_ISP_FMT);
+
+ mf = v4l2_subdev_get_try_format(fh, OIF_JPEG_PAD);
+ s5c73m3_fill_mbus_fmt(mf, &s5c73m3_jpeg_resolutions[1],
+ S5C73M3_JPEG_FMT);
+
+ mf = v4l2_subdev_get_try_format(fh, OIF_SOURCE_PAD);
+ s5c73m3_fill_mbus_fmt(mf, &s5c73m3_isp_resolutions[1],
+ S5C73M3_ISP_FMT);
+ return 0;
+}
+
+static int s5c73m3_gpio_set_value(struct s5c73m3 *priv, int id, u32 val)
+{
+ if (!gpio_is_valid(priv->gpio[id].gpio))
+ return 0;
+ gpio_set_value(priv->gpio[id].gpio, !!val);
+ return 1;
+}
+
+static int s5c73m3_gpio_assert(struct s5c73m3 *priv, int id)
+{
+ return s5c73m3_gpio_set_value(priv, id, priv->gpio[id].level);
+}
+
+static int s5c73m3_gpio_deassert(struct s5c73m3 *priv, int id)
+{
+ return s5c73m3_gpio_set_value(priv, id, !priv->gpio[id].level);
+}
+
+static int __s5c73m3_power_on(struct s5c73m3 *state)
+{
+ int i, ret;
+
+ for (i = 0; i < S5C73M3_MAX_SUPPLIES; i++) {
+ ret = regulator_enable(state->supplies[i].consumer);
+ if (ret)
+ goto err;
+ }
+
+ s5c73m3_gpio_deassert(state, STBY);
+ usleep_range(100, 200);
+
+ s5c73m3_gpio_deassert(state, RST);
+ usleep_range(50, 100);
+
+ return 0;
+err:
+ for (--i; i >= 0; i--)
+ regulator_disable(state->supplies[i].consumer);
+ return ret;
+}
+
+static int __s5c73m3_power_off(struct s5c73m3 *state)
+{
+ int i, ret;
+
+ if (s5c73m3_gpio_assert(state, RST))
+ usleep_range(10, 50);
+
+ if (s5c73m3_gpio_assert(state, STBY))
+ usleep_range(100, 200);
+ state->streaming = 0;
+ state->isp_ready = 0;
+
+ for (i = S5C73M3_MAX_SUPPLIES - 1; i >= 0; i--) {
+ ret = regulator_disable(state->supplies[i].consumer);
+ if (ret)
+ goto err;
+ }
+ return 0;
+err:
+ for (++i; i < S5C73M3_MAX_SUPPLIES; i++)
+ regulator_enable(state->supplies[i].consumer);
+
+ return ret;
+}
+
+static int s5c73m3_oif_set_power(struct v4l2_subdev *sd, int on)
+{
+ struct s5c73m3 *state = oif_sd_to_s5c73m3(sd);
+ int ret = 0;
+
+ mutex_lock(&state->lock);
+
+ if (on && !state->power) {
+ ret = __s5c73m3_power_on(state);
+ if (!ret)
+ ret = s5c73m3_isp_init(state);
+ if (!ret) {
+ state->apply_fiv = 1;
+ state->apply_fmt = 1;
+ }
+ } else if (!on == state->power) {
+ ret = s5c73m3_set_af_softlanding(state);
+ if (!ret)
+ ret = __s5c73m3_power_off(state);
+ else
+ v4l2_err(sd, "Soft landing lens failed\n");
+ }
+ if (!ret)
+ state->power += on ? 1 : -1;
+
+ v4l2_dbg(1, s5c73m3_dbg, sd, "%s: power: %d\n",
+ __func__, state->power);
+
+ mutex_unlock(&state->lock);
+ return ret;
+}
+
+static int s5c73m3_oif_registered(struct v4l2_subdev *sd)
+{
+ struct s5c73m3 *state = oif_sd_to_s5c73m3(sd);
+ int ret;
+
+ ret = v4l2_device_register_subdev(sd->v4l2_dev, &state->sensor_sd);
+ if (ret) {
+ v4l2_err(sd->v4l2_dev, "Failed to register %s\n",
+ state->oif_sd.name);
+ return ret;
+ }
+
+ ret = media_entity_create_link(&state->sensor_sd.entity,
+ S5C73M3_ISP_PAD, &state->oif_sd.entity, OIF_ISP_PAD,
+ MEDIA_LNK_FL_IMMUTABLE | MEDIA_LNK_FL_ENABLED);
+
+ ret = media_entity_create_link(&state->sensor_sd.entity,
+ S5C73M3_JPEG_PAD, &state->oif_sd.entity, OIF_JPEG_PAD,
+ MEDIA_LNK_FL_IMMUTABLE | MEDIA_LNK_FL_ENABLED);
+
+ mutex_lock(&state->lock);
+ ret = __s5c73m3_power_on(state);
+ if (ret == 0)
+ s5c73m3_get_fw_version(state);
+
+ __s5c73m3_power_off(state);
+ mutex_unlock(&state->lock);
+
+ v4l2_dbg(1, s5c73m3_dbg, sd, "%s: Booting %s (%d)\n",
+ __func__, ret ? "failed" : "succeded", ret);
+
+ return ret;
+}
+
+static const struct v4l2_subdev_internal_ops s5c73m3_internal_ops = {
+ .open = s5c73m3_open,
+};
+
+static const struct v4l2_subdev_pad_ops s5c73m3_pad_ops = {
+ .enum_mbus_code = s5c73m3_enum_mbus_code,
+ .enum_frame_size = s5c73m3_enum_frame_size,
+ .get_fmt = s5c73m3_get_fmt,
+ .set_fmt = s5c73m3_set_fmt,
+};
+
+static const struct v4l2_subdev_ops s5c73m3_subdev_ops = {
+ .pad = &s5c73m3_pad_ops,
+};
+
+static const struct v4l2_subdev_internal_ops oif_internal_ops = {
+ .registered = s5c73m3_oif_registered,
+ .open = s5c73m3_oif_open,
+};
+
+static const struct v4l2_subdev_pad_ops s5c73m3_oif_pad_ops = {
+ .enum_mbus_code = s5c73m3_oif_enum_mbus_code,
+ .enum_frame_size = s5c73m3_oif_enum_frame_size,
+ .enum_frame_interval = s5c73m3_oif_enum_frame_interval,
+ .get_fmt = s5c73m3_oif_get_fmt,
+ .set_fmt = s5c73m3_oif_set_fmt,
+ .get_frame_desc = s5c73m3_oif_get_frame_desc,
+ .set_frame_desc = s5c73m3_oif_set_frame_desc,
+};
+
+static const struct v4l2_subdev_core_ops s5c73m3_oif_core_ops = {
+ .s_power = s5c73m3_oif_set_power,
+ .log_status = s5c73m3_oif_log_status,
+};
+
+static const struct v4l2_subdev_video_ops s5c73m3_oif_video_ops = {
+ .s_stream = s5c73m3_oif_s_stream,
+ .g_frame_interval = s5c73m3_oif_g_frame_interval,
+ .s_frame_interval = s5c73m3_oif_s_frame_interval,
+};
+
+static const struct v4l2_subdev_ops oif_subdev_ops = {
+ .core = &s5c73m3_oif_core_ops,
+ .pad = &s5c73m3_oif_pad_ops,
+ .video = &s5c73m3_oif_video_ops,
+};
+
+static int s5c73m3_configure_gpio(int nr, int val, const char *name)
+{
+ unsigned long flags = val ? GPIOF_OUT_INIT_HIGH : GPIOF_OUT_INIT_LOW;
+ int ret;
+
+ if (!gpio_is_valid(nr))
+ return 0;
+ ret = gpio_request_one(nr, flags, name);
+ if (!ret)
+ gpio_export(nr, 0);
+ return ret;
+}
+
+static int s5c73m3_free_gpios(struct s5c73m3 *state)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(state->gpio); i++) {
+ if (!gpio_is_valid(state->gpio[i].gpio))
+ continue;
+ gpio_free(state->gpio[i].gpio);
+ state->gpio[i].gpio = -EINVAL;
+ }
+ return 0;
+}
+
+static int s5c73m3_configure_gpios(struct s5c73m3 *state,
+ const struct s5c73m3_platform_data *pdata)
+{
+ const struct s5c73m3_gpio *gpio = &pdata->gpio_stby;
+ int ret;
+
+ state->gpio[STBY].gpio = -EINVAL;
+ state->gpio[RST].gpio = -EINVAL;
+
+ ret = s5c73m3_configure_gpio(gpio->gpio, gpio->level, "S5C73M3_STBY");
+ if (ret) {
+ s5c73m3_free_gpios(state);
+ return ret;
+ }
+ state->gpio[STBY] = *gpio;
+ if (gpio_is_valid(gpio->gpio))
+ gpio_set_value(gpio->gpio, 0);
+
+ gpio = &pdata->gpio_reset;
+ ret = s5c73m3_configure_gpio(gpio->gpio, gpio->level, "S5C73M3_RST");
+ if (ret) {
+ s5c73m3_free_gpios(state);
+ return ret;
+ }
+ state->gpio[RST] = *gpio;
+ if (gpio_is_valid(gpio->gpio))
+ gpio_set_value(gpio->gpio, 0);
+
+ return 0;
+}
+
+static int s5c73m3_probe(struct i2c_client *client,
+ const struct i2c_device_id *id)
+{
+ struct device *dev = &client->dev;
+ const struct s5c73m3_platform_data *pdata = client->dev.platform_data;
+ struct v4l2_subdev *sd;
+ struct v4l2_subdev *oif_sd;
+ struct s5c73m3 *state;
+ int ret, i;
+
+ if (pdata == NULL) {
+ dev_err(&client->dev, "Platform data not specified\n");
+ return -EINVAL;
+ }
+
+ state = devm_kzalloc(dev, sizeof(*state), GFP_KERNEL);
+ if (!state)
+ return -ENOMEM;
+
+ mutex_init(&state->lock);
+ sd = &state->sensor_sd;
+ oif_sd = &state->oif_sd;
+
+ v4l2_subdev_init(sd, &s5c73m3_subdev_ops);
+ sd->owner = client->driver->driver.owner;
+ v4l2_set_subdevdata(sd, state);
+ strlcpy(sd->name, "S5C73M3", sizeof(sd->name));
+
+ sd->internal_ops = &s5c73m3_internal_ops;
+ sd->flags |= V4L2_SUBDEV_FL_HAS_DEVNODE;
+
+ state->sensor_pads[S5C73M3_JPEG_PAD].flags = MEDIA_PAD_FL_SOURCE;
+ state->sensor_pads[S5C73M3_ISP_PAD].flags = MEDIA_PAD_FL_SOURCE;
+ sd->entity.type = MEDIA_ENT_T_V4L2_SUBDEV;
+
+ ret = media_entity_init(&sd->entity, S5C73M3_NUM_PADS,
+ state->sensor_pads, 0);
+ if (ret < 0)
+ return ret;
+
+ v4l2_i2c_subdev_init(oif_sd, client, &oif_subdev_ops);
+ strcpy(oif_sd->name, "S5C73M3-OIF");
+
+ oif_sd->internal_ops = &oif_internal_ops;
+ oif_sd->flags |= V4L2_SUBDEV_FL_HAS_DEVNODE;
+
+ state->oif_pads[OIF_ISP_PAD].flags = MEDIA_PAD_FL_SINK;
+ state->oif_pads[OIF_JPEG_PAD].flags = MEDIA_PAD_FL_SINK;
+ state->oif_pads[OIF_SOURCE_PAD].flags = MEDIA_PAD_FL_SOURCE;
+ oif_sd->entity.type = MEDIA_ENT_T_V4L2_SUBDEV;
+
+ ret = media_entity_init(&oif_sd->entity, OIF_NUM_PADS,
+ state->oif_pads, 0);
+ if (ret < 0)
+ return ret;
+
+ state->mclk_frequency = pdata->mclk_frequency;
+ state->bus_type = pdata->bus_type;
+
+ ret = s5c73m3_configure_gpios(state, pdata);
+ if (ret)
+ goto out_err1;
+
+ for (i = 0; i < S5C73M3_MAX_SUPPLIES; i++)
+ state->supplies[i].supply = s5c73m3_supply_names[i];
+
+ ret = devm_regulator_bulk_get(dev, S5C73M3_MAX_SUPPLIES,
+ state->supplies);
+ if (ret) {
+ dev_err(dev, "failed to get regulators\n");
+ goto out_err2;
+ }
+
+ ret = s5c73m3_init_controls(state);
+ if (ret)
+ goto out_err2;
+
+ state->sensor_pix_size[RES_ISP] = &s5c73m3_isp_resolutions[1];
+ state->sensor_pix_size[RES_JPEG] = &s5c73m3_jpeg_resolutions[1];
+ state->oif_pix_size[RES_ISP] = state->sensor_pix_size[RES_ISP];
+ state->oif_pix_size[RES_JPEG] = state->sensor_pix_size[RES_JPEG];
+
+ state->mbus_code = S5C73M3_ISP_FMT;
+
+ state->fiv = &s5c73m3_intervals[S5C73M3_DEFAULT_FRAME_INTERVAL];
+
+ state->fw_file_version[0] = 'G';
+ state->fw_file_version[1] = 'C';
+
+ ret = s5c73m3_register_spi_driver(state);
+ if (ret < 0)
+ goto out_err2;
+
+ state->i2c_client = client;
+
+ v4l2_info(sd, "%s: completed succesfully\n", __func__);
+ return 0;
+
+out_err2:
+ s5c73m3_free_gpios(state);
+out_err1:
+ media_entity_cleanup(&sd->entity);
+ return ret;
+}
+
+static int s5c73m3_remove(struct i2c_client *client)
+{
+ struct v4l2_subdev *sd = i2c_get_clientdata(client);
+ struct s5c73m3 *state = sensor_sd_to_s5c73m3(sd);
+
+ v4l2_device_unregister_subdev(sd);
+
+ v4l2_ctrl_handler_free(sd->ctrl_handler);
+ media_entity_cleanup(&sd->entity);
+
+ s5c73m3_unregister_spi_driver(state);
+ s5c73m3_free_gpios(state);
+
+ return 0;
+}
+
+static const struct i2c_device_id s5c73m3_id[] = {
+ { DRIVER_NAME, 0 },
+ { }
+};
+MODULE_DEVICE_TABLE(i2c, s5c73m3_id);
+
+static struct i2c_driver s5c73m3_i2c_driver = {
+ .driver = {
+ .name = DRIVER_NAME,
+ },
+ .probe = s5c73m3_probe,
+ .remove = s5c73m3_remove,
+ .id_table = s5c73m3_id,
+};
+
+module_i2c_driver(s5c73m3_i2c_driver);
+
+MODULE_DESCRIPTION("Samsung S5C73M3 camera driver");
+MODULE_AUTHOR("Sylwester Nawrocki <s.nawrocki@samsung.com>");
+MODULE_LICENSE("GPL");
diff --git a/drivers/media/i2c/s5c73m3/s5c73m3-ctrls.c b/drivers/media/i2c/s5c73m3/s5c73m3-ctrls.c
new file mode 100644
index 000000000000..8001cde1db1e
--- /dev/null
+++ b/drivers/media/i2c/s5c73m3/s5c73m3-ctrls.c
@@ -0,0 +1,563 @@
+/*
+ * Samsung LSI S5C73M3 8M pixel camera driver
+ *
+ * Copyright (C) 2012, Samsung Electronics, Co., Ltd.
+ * Sylwester Nawrocki <s.nawrocki@samsung.com>
+ * Andrzej Hajda <a.hajda@samsung.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/sizes.h>
+#include <linux/delay.h>
+#include <linux/firmware.h>
+#include <linux/gpio.h>
+#include <linux/i2c.h>
+#include <linux/init.h>
+#include <linux/media.h>
+#include <linux/module.h>
+#include <linux/regulator/consumer.h>
+#include <linux/slab.h>
+#include <linux/spi/spi.h>
+#include <linux/videodev2.h>
+#include <media/media-entity.h>
+#include <media/v4l2-ctrls.h>
+#include <media/v4l2-device.h>
+#include <media/v4l2-subdev.h>
+#include <media/v4l2-mediabus.h>
+#include <media/s5c73m3.h>
+
+#include "s5c73m3.h"
+
+static int s5c73m3_get_af_status(struct s5c73m3 *state, struct v4l2_ctrl *ctrl)
+{
+ u16 reg = REG_AF_STATUS_UNFOCUSED;
+
+ int ret = s5c73m3_read(state, REG_AF_STATUS, &reg);
+
+ switch (reg) {
+ case REG_CAF_STATUS_FIND_SEARCH_DIR:
+ case REG_AF_STATUS_FOCUSING:
+ case REG_CAF_STATUS_FOCUSING:
+ ctrl->val = V4L2_AUTO_FOCUS_STATUS_BUSY;
+ break;
+ case REG_CAF_STATUS_FOCUSED:
+ case REG_AF_STATUS_FOCUSED:
+ ctrl->val = V4L2_AUTO_FOCUS_STATUS_REACHED;
+ break;
+ default:
+ v4l2_info(&state->sensor_sd, "Unknown AF status %#x\n", reg);
+ /* Fall through */
+ case REG_CAF_STATUS_UNFOCUSED:
+ case REG_AF_STATUS_UNFOCUSED:
+ case REG_AF_STATUS_INVALID:
+ ctrl->val = V4L2_AUTO_FOCUS_STATUS_FAILED;
+ break;
+ }
+
+ return ret;
+}
+
+static int s5c73m3_g_volatile_ctrl(struct v4l2_ctrl *ctrl)
+{
+ struct v4l2_subdev *sd = ctrl_to_sensor_sd(ctrl);
+ struct s5c73m3 *state = sensor_sd_to_s5c73m3(sd);
+ int ret;
+
+ if (state->power == 0)
+ return -EBUSY;
+
+ switch (ctrl->id) {
+ case V4L2_CID_FOCUS_AUTO:
+ ret = s5c73m3_get_af_status(state, state->ctrls.af_status);
+ if (ret)
+ return ret;
+ break;
+ }
+
+ return 0;
+}
+
+static int s5c73m3_set_colorfx(struct s5c73m3 *state, int val)
+{
+ static const unsigned short colorfx[][2] = {
+ { V4L2_COLORFX_NONE, COMM_IMAGE_EFFECT_NONE },
+ { V4L2_COLORFX_BW, COMM_IMAGE_EFFECT_MONO },
+ { V4L2_COLORFX_SEPIA, COMM_IMAGE_EFFECT_SEPIA },
+ { V4L2_COLORFX_NEGATIVE, COMM_IMAGE_EFFECT_NEGATIVE },
+ { V4L2_COLORFX_AQUA, COMM_IMAGE_EFFECT_AQUA },
+ };
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(colorfx); i++) {
+ if (colorfx[i][0] != val)
+ continue;
+
+ v4l2_dbg(1, s5c73m3_dbg, &state->sensor_sd,
+ "Setting %s color effect\n",
+ v4l2_ctrl_get_menu(state->ctrls.colorfx->id)[i]);
+
+ return s5c73m3_isp_command(state, COMM_IMAGE_EFFECT,
+ colorfx[i][1]);
+ }
+ return -EINVAL;
+}
+
+/* Set exposure metering/exposure bias */
+static int s5c73m3_set_exposure(struct s5c73m3 *state, int auto_exp)
+{
+ struct v4l2_subdev *sd = &state->sensor_sd;
+ struct s5c73m3_ctrls *ctrls = &state->ctrls;
+ int ret = 0;
+
+ if (ctrls->exposure_metering->is_new) {
+ u16 metering;
+
+ switch (ctrls->exposure_metering->val) {
+ case V4L2_EXPOSURE_METERING_CENTER_WEIGHTED:
+ metering = COMM_METERING_CENTER;
+ break;
+ case V4L2_EXPOSURE_METERING_SPOT:
+ metering = COMM_METERING_SPOT;
+ break;
+ default:
+ metering = COMM_METERING_AVERAGE;
+ break;
+ }
+
+ ret = s5c73m3_isp_command(state, COMM_METERING, metering);
+ }
+
+ if (!ret && ctrls->exposure_bias->is_new) {
+ u16 exp_bias = ctrls->exposure_bias->val;
+ ret = s5c73m3_isp_command(state, COMM_EV, exp_bias);
+ }
+
+ v4l2_dbg(1, s5c73m3_dbg, sd,
+ "%s: exposure bias: %#x, metering: %#x (%d)\n", __func__,
+ ctrls->exposure_bias->val, ctrls->exposure_metering->val, ret);
+
+ return ret;
+}
+
+static int s5c73m3_set_white_balance(struct s5c73m3 *state, int val)
+{
+ static const unsigned short wb[][2] = {
+ { V4L2_WHITE_BALANCE_INCANDESCENT, COMM_AWB_MODE_INCANDESCENT},
+ { V4L2_WHITE_BALANCE_FLUORESCENT, COMM_AWB_MODE_FLUORESCENT1},
+ { V4L2_WHITE_BALANCE_FLUORESCENT_H, COMM_AWB_MODE_FLUORESCENT2},
+ { V4L2_WHITE_BALANCE_CLOUDY, COMM_AWB_MODE_CLOUDY},
+ { V4L2_WHITE_BALANCE_DAYLIGHT, COMM_AWB_MODE_DAYLIGHT},
+ { V4L2_WHITE_BALANCE_AUTO, COMM_AWB_MODE_AUTO},
+ };
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(wb); i++) {
+ if (wb[i][0] != val)
+ continue;
+
+ v4l2_dbg(1, s5c73m3_dbg, &state->sensor_sd,
+ "Setting white balance to: %s\n",
+ v4l2_ctrl_get_menu(state->ctrls.auto_wb->id)[i]);
+
+ return s5c73m3_isp_command(state, COMM_AWB_MODE, wb[i][1]);
+ }
+
+ return -EINVAL;
+}
+
+static int s5c73m3_af_run(struct s5c73m3 *state, bool on)
+{
+ struct s5c73m3_ctrls *c = &state->ctrls;
+
+ if (!on)
+ return s5c73m3_isp_command(state, COMM_AF_CON,
+ COMM_AF_CON_STOP);
+
+ if (c->focus_auto->val)
+ return s5c73m3_isp_command(state, COMM_AF_MODE,
+ COMM_AF_MODE_PREVIEW_CAF_START);
+
+ return s5c73m3_isp_command(state, COMM_AF_CON, COMM_AF_CON_START);
+}
+
+static int s5c73m3_3a_lock(struct s5c73m3 *state, struct v4l2_ctrl *ctrl)
+{
+ bool awb_lock = ctrl->val & V4L2_LOCK_WHITE_BALANCE;
+ bool ae_lock = ctrl->val & V4L2_LOCK_EXPOSURE;
+ bool af_lock = ctrl->val & V4L2_LOCK_FOCUS;
+ int ret = 0;
+
+ if ((ctrl->val ^ ctrl->cur.val) & V4L2_LOCK_EXPOSURE) {
+ ret = s5c73m3_isp_command(state, COMM_AE_CON,
+ ae_lock ? COMM_AE_STOP : COMM_AE_START);
+ if (ret)
+ return ret;
+ }
+
+ if (((ctrl->val ^ ctrl->cur.val) & V4L2_LOCK_WHITE_BALANCE)
+ && state->ctrls.auto_wb->val) {
+ ret = s5c73m3_isp_command(state, COMM_AWB_CON,
+ awb_lock ? COMM_AWB_STOP : COMM_AWB_START);
+ if (ret)
+ return ret;
+ }
+
+ if ((ctrl->val ^ ctrl->cur.val) & V4L2_LOCK_FOCUS)
+ ret = s5c73m3_af_run(state, ~af_lock);
+
+ return ret;
+}
+
+static int s5c73m3_set_auto_focus(struct s5c73m3 *state, int caf)
+{
+ struct s5c73m3_ctrls *c = &state->ctrls;
+ int ret = 1;
+
+ if (c->af_distance->is_new) {
+ u16 mode = (c->af_distance->val == V4L2_AUTO_FOCUS_RANGE_MACRO)
+ ? COMM_AF_MODE_MACRO : COMM_AF_MODE_NORMAL;
+ ret = s5c73m3_isp_command(state, COMM_AF_MODE, mode);
+ if (ret != 0)
+ return ret;
+ }
+
+ if (!ret || (c->focus_auto->is_new && c->focus_auto->val) ||
+ c->af_start->is_new)
+ ret = s5c73m3_af_run(state, 1);
+ else if ((c->focus_auto->is_new && !c->focus_auto->val) ||
+ c->af_stop->is_new)
+ ret = s5c73m3_af_run(state, 0);
+ else
+ ret = 0;
+
+ return ret;
+}
+
+static int s5c73m3_set_contrast(struct s5c73m3 *state, int val)
+{
+ u16 reg = (val < 0) ? -val + 2 : val;
+ return s5c73m3_isp_command(state, COMM_CONTRAST, reg);
+}
+
+static int s5c73m3_set_saturation(struct s5c73m3 *state, int val)
+{
+ u16 reg = (val < 0) ? -val + 2 : val;
+ return s5c73m3_isp_command(state, COMM_SATURATION, reg);
+}
+
+static int s5c73m3_set_sharpness(struct s5c73m3 *state, int val)
+{
+ u16 reg = (val < 0) ? -val + 2 : val;
+ return s5c73m3_isp_command(state, COMM_SHARPNESS, reg);
+}
+
+static int s5c73m3_set_iso(struct s5c73m3 *state, int val)
+{
+ u32 iso;
+
+ if (val == V4L2_ISO_SENSITIVITY_MANUAL)
+ iso = state->ctrls.iso->val + 1;
+ else
+ iso = 0;
+
+ return s5c73m3_isp_command(state, COMM_ISO, iso);
+}
+
+static int s5c73m3_set_stabilization(struct s5c73m3 *state, int val)
+{
+ struct v4l2_subdev *sd = &state->sensor_sd;
+
+ v4l2_dbg(1, s5c73m3_dbg, sd, "Image stabilization: %d\n", val);
+
+ return s5c73m3_isp_command(state, COMM_FRAME_RATE, val ?
+ COMM_FRAME_RATE_ANTI_SHAKE : COMM_FRAME_RATE_AUTO_SET);
+}
+
+static int s5c73m3_set_jpeg_quality(struct s5c73m3 *state, int quality)
+{
+ int reg;
+
+ if (quality <= 65)
+ reg = COMM_IMAGE_QUALITY_NORMAL;
+ else if (quality <= 75)
+ reg = COMM_IMAGE_QUALITY_FINE;
+ else
+ reg = COMM_IMAGE_QUALITY_SUPERFINE;
+
+ return s5c73m3_isp_command(state, COMM_IMAGE_QUALITY, reg);
+}
+
+static int s5c73m3_set_scene_program(struct s5c73m3 *state, int val)
+{
+ static const unsigned short scene_lookup[] = {
+ COMM_SCENE_MODE_NONE, /* V4L2_SCENE_MODE_NONE */
+ COMM_SCENE_MODE_AGAINST_LIGHT,/* V4L2_SCENE_MODE_BACKLIGHT */
+ COMM_SCENE_MODE_BEACH, /* V4L2_SCENE_MODE_BEACH_SNOW */
+ COMM_SCENE_MODE_CANDLE, /* V4L2_SCENE_MODE_CANDLE_LIGHT */
+ COMM_SCENE_MODE_DAWN, /* V4L2_SCENE_MODE_DAWN_DUSK */
+ COMM_SCENE_MODE_FALL, /* V4L2_SCENE_MODE_FALL_COLORS */
+ COMM_SCENE_MODE_FIRE, /* V4L2_SCENE_MODE_FIREWORKS */
+ COMM_SCENE_MODE_LANDSCAPE, /* V4L2_SCENE_MODE_LANDSCAPE */
+ COMM_SCENE_MODE_NIGHT, /* V4L2_SCENE_MODE_NIGHT */
+ COMM_SCENE_MODE_INDOOR, /* V4L2_SCENE_MODE_PARTY_INDOOR */
+ COMM_SCENE_MODE_PORTRAIT, /* V4L2_SCENE_MODE_PORTRAIT */
+ COMM_SCENE_MODE_SPORTS, /* V4L2_SCENE_MODE_SPORTS */
+ COMM_SCENE_MODE_SUNSET, /* V4L2_SCENE_MODE_SUNSET */
+ COMM_SCENE_MODE_TEXT, /* V4L2_SCENE_MODE_TEXT */
+ };
+
+ v4l2_dbg(1, s5c73m3_dbg, &state->sensor_sd, "Setting %s scene mode\n",
+ v4l2_ctrl_get_menu(state->ctrls.scene_mode->id)[val]);
+
+ return s5c73m3_isp_command(state, COMM_SCENE_MODE, scene_lookup[val]);
+}
+
+static int s5c73m3_set_power_line_freq(struct s5c73m3 *state, int val)
+{
+ unsigned int pwr_line_freq = COMM_FLICKER_NONE;
+
+ switch (val) {
+ case V4L2_CID_POWER_LINE_FREQUENCY_DISABLED:
+ pwr_line_freq = COMM_FLICKER_NONE;
+ break;
+ case V4L2_CID_POWER_LINE_FREQUENCY_50HZ:
+ pwr_line_freq = COMM_FLICKER_AUTO_50HZ;
+ break;
+ case V4L2_CID_POWER_LINE_FREQUENCY_60HZ:
+ pwr_line_freq = COMM_FLICKER_AUTO_60HZ;
+ break;
+ default:
+ case V4L2_CID_POWER_LINE_FREQUENCY_AUTO:
+ pwr_line_freq = COMM_FLICKER_NONE;
+ }
+
+ return s5c73m3_isp_command(state, COMM_FLICKER_MODE, pwr_line_freq);
+}
+
+static int s5c73m3_s_ctrl(struct v4l2_ctrl *ctrl)
+{
+ struct v4l2_subdev *sd = ctrl_to_sensor_sd(ctrl);
+ struct s5c73m3 *state = sensor_sd_to_s5c73m3(sd);
+ int ret = 0;
+
+ v4l2_dbg(1, s5c73m3_dbg, sd, "set_ctrl: %s, value: %d\n",
+ ctrl->name, ctrl->val);
+
+ mutex_lock(&state->lock);
+ /*
+ * If the device is not powered up by the host driver do
+ * not apply any controls to H/W at this time. Instead
+ * the controls will be restored right after power-up.
+ */
+ if (state->power == 0)
+ goto unlock;
+
+ if (ctrl->flags & V4L2_CTRL_FLAG_INACTIVE) {
+ ret = -EINVAL;
+ goto unlock;
+ }
+
+ switch (ctrl->id) {
+ case V4L2_CID_3A_LOCK:
+ ret = s5c73m3_3a_lock(state, ctrl);
+ break;
+
+ case V4L2_CID_AUTO_N_PRESET_WHITE_BALANCE:
+ ret = s5c73m3_set_white_balance(state, ctrl->val);
+ break;
+
+ case V4L2_CID_CONTRAST:
+ ret = s5c73m3_set_contrast(state, ctrl->val);
+ break;
+
+ case V4L2_CID_COLORFX:
+ ret = s5c73m3_set_colorfx(state, ctrl->val);
+ break;
+
+ case V4L2_CID_EXPOSURE_AUTO:
+ ret = s5c73m3_set_exposure(state, ctrl->val);
+ break;
+
+ case V4L2_CID_FOCUS_AUTO:
+ ret = s5c73m3_set_auto_focus(state, ctrl->val);
+ break;
+
+ case V4L2_CID_IMAGE_STABILIZATION:
+ ret = s5c73m3_set_stabilization(state, ctrl->val);
+ break;
+
+ case V4L2_CID_ISO_SENSITIVITY:
+ ret = s5c73m3_set_iso(state, ctrl->val);
+ break;
+
+ case V4L2_CID_JPEG_COMPRESSION_QUALITY:
+ ret = s5c73m3_set_jpeg_quality(state, ctrl->val);
+ break;
+
+ case V4L2_CID_POWER_LINE_FREQUENCY:
+ ret = s5c73m3_set_power_line_freq(state, ctrl->val);
+ break;
+
+ case V4L2_CID_SATURATION:
+ ret = s5c73m3_set_saturation(state, ctrl->val);
+ break;
+
+ case V4L2_CID_SCENE_MODE:
+ ret = s5c73m3_set_scene_program(state, ctrl->val);
+ break;
+
+ case V4L2_CID_SHARPNESS:
+ ret = s5c73m3_set_sharpness(state, ctrl->val);
+ break;
+
+ case V4L2_CID_WIDE_DYNAMIC_RANGE:
+ ret = s5c73m3_isp_command(state, COMM_WDR, !!ctrl->val);
+ break;
+
+ case V4L2_CID_ZOOM_ABSOLUTE:
+ ret = s5c73m3_isp_command(state, COMM_ZOOM_STEP, ctrl->val);
+ break;
+ }
+unlock:
+ mutex_unlock(&state->lock);
+ return ret;
+}
+
+static const struct v4l2_ctrl_ops s5c73m3_ctrl_ops = {
+ .g_volatile_ctrl = s5c73m3_g_volatile_ctrl,
+ .s_ctrl = s5c73m3_s_ctrl,
+};
+
+/* Supported manual ISO values */
+static const s64 iso_qmenu[] = {
+ /* COMM_ISO: 0x0001...0x0004 */
+ 100, 200, 400, 800,
+};
+
+/* Supported exposure bias values (-2.0EV...+2.0EV) */
+static const s64 ev_bias_qmenu[] = {
+ /* COMM_EV: 0x0000...0x0008 */
+ -2000, -1500, -1000, -500, 0, 500, 1000, 1500, 2000
+};
+
+int s5c73m3_init_controls(struct s5c73m3 *state)
+{
+ const struct v4l2_ctrl_ops *ops = &s5c73m3_ctrl_ops;
+ struct s5c73m3_ctrls *ctrls = &state->ctrls;
+ struct v4l2_ctrl_handler *hdl = &ctrls->handler;
+
+ int ret = v4l2_ctrl_handler_init(hdl, 22);
+ if (ret)
+ return ret;
+
+ /* White balance */
+ ctrls->auto_wb = v4l2_ctrl_new_std_menu(hdl, ops,
+ V4L2_CID_AUTO_N_PRESET_WHITE_BALANCE,
+ 9, ~0x15e, V4L2_WHITE_BALANCE_AUTO);
+
+ /* Exposure (only automatic exposure) */
+ ctrls->auto_exposure = v4l2_ctrl_new_std_menu(hdl, ops,
+ V4L2_CID_EXPOSURE_AUTO, 0, ~0x01, V4L2_EXPOSURE_AUTO);
+
+ ctrls->exposure_bias = v4l2_ctrl_new_int_menu(hdl, ops,
+ V4L2_CID_AUTO_EXPOSURE_BIAS,
+ ARRAY_SIZE(ev_bias_qmenu) - 1,
+ ARRAY_SIZE(ev_bias_qmenu)/2 - 1,
+ ev_bias_qmenu);
+
+ ctrls->exposure_metering = v4l2_ctrl_new_std_menu(hdl, ops,
+ V4L2_CID_EXPOSURE_METERING,
+ 2, ~0x7, V4L2_EXPOSURE_METERING_AVERAGE);
+
+ /* Auto focus */
+ ctrls->focus_auto = v4l2_ctrl_new_std(hdl, ops,
+ V4L2_CID_FOCUS_AUTO, 0, 1, 1, 0);
+
+ ctrls->af_start = v4l2_ctrl_new_std(hdl, ops,
+ V4L2_CID_AUTO_FOCUS_START, 0, 1, 1, 0);
+
+ ctrls->af_stop = v4l2_ctrl_new_std(hdl, ops,
+ V4L2_CID_AUTO_FOCUS_STOP, 0, 1, 1, 0);
+
+ ctrls->af_status = v4l2_ctrl_new_std(hdl, ops,
+ V4L2_CID_AUTO_FOCUS_STATUS, 0,
+ (V4L2_AUTO_FOCUS_STATUS_BUSY |
+ V4L2_AUTO_FOCUS_STATUS_REACHED |
+ V4L2_AUTO_FOCUS_STATUS_FAILED),
+ 0, V4L2_AUTO_FOCUS_STATUS_IDLE);
+
+ ctrls->af_distance = v4l2_ctrl_new_std_menu(hdl, ops,
+ V4L2_CID_AUTO_FOCUS_RANGE,
+ V4L2_AUTO_FOCUS_RANGE_MACRO,
+ ~(1 << V4L2_AUTO_FOCUS_RANGE_NORMAL |
+ 1 << V4L2_AUTO_FOCUS_RANGE_MACRO),
+ V4L2_AUTO_FOCUS_RANGE_NORMAL);
+ /* ISO sensitivity */
+ ctrls->auto_iso = v4l2_ctrl_new_std_menu(hdl, ops,
+ V4L2_CID_ISO_SENSITIVITY_AUTO, 1, 0,
+ V4L2_ISO_SENSITIVITY_AUTO);
+
+ ctrls->iso = v4l2_ctrl_new_int_menu(hdl, ops,
+ V4L2_CID_ISO_SENSITIVITY, ARRAY_SIZE(iso_qmenu) - 1,
+ ARRAY_SIZE(iso_qmenu)/2 - 1, iso_qmenu);
+
+ ctrls->contrast = v4l2_ctrl_new_std(hdl, ops,
+ V4L2_CID_CONTRAST, -2, 2, 1, 0);
+
+ ctrls->saturation = v4l2_ctrl_new_std(hdl, ops,
+ V4L2_CID_SATURATION, -2, 2, 1, 0);
+
+ ctrls->sharpness = v4l2_ctrl_new_std(hdl, ops,
+ V4L2_CID_SHARPNESS, -2, 2, 1, 0);
+
+ ctrls->zoom = v4l2_ctrl_new_std(hdl, ops,
+ V4L2_CID_ZOOM_ABSOLUTE, 0, 30, 1, 0);
+
+ ctrls->colorfx = v4l2_ctrl_new_std_menu(hdl, ops, V4L2_CID_COLORFX,
+ V4L2_COLORFX_AQUA, ~0x40f, V4L2_COLORFX_NONE);
+
+ ctrls->wdr = v4l2_ctrl_new_std(hdl, ops,
+ V4L2_CID_WIDE_DYNAMIC_RANGE, 0, 1, 1, 0);
+
+ ctrls->stabilization = v4l2_ctrl_new_std(hdl, ops,
+ V4L2_CID_IMAGE_STABILIZATION, 0, 1, 1, 0);
+
+ v4l2_ctrl_new_std_menu(hdl, ops, V4L2_CID_POWER_LINE_FREQUENCY,
+ V4L2_CID_POWER_LINE_FREQUENCY_AUTO, 0,
+ V4L2_CID_POWER_LINE_FREQUENCY_AUTO);
+
+ ctrls->jpeg_quality = v4l2_ctrl_new_std(hdl, ops,
+ V4L2_CID_JPEG_COMPRESSION_QUALITY, 1, 100, 1, 80);
+
+ ctrls->scene_mode = v4l2_ctrl_new_std_menu(hdl, ops,
+ V4L2_CID_SCENE_MODE, V4L2_SCENE_MODE_TEXT, ~0x3fff,
+ V4L2_SCENE_MODE_NONE);
+
+ ctrls->aaa_lock = v4l2_ctrl_new_std(hdl, ops,
+ V4L2_CID_3A_LOCK, 0, 0x7, 0, 0);
+
+ if (hdl->error) {
+ ret = hdl->error;
+ v4l2_ctrl_handler_free(hdl);
+ return ret;
+ }
+
+ v4l2_ctrl_auto_cluster(3, &ctrls->auto_exposure, 0, false);
+ ctrls->auto_iso->flags |= V4L2_CTRL_FLAG_VOLATILE |
+ V4L2_CTRL_FLAG_UPDATE;
+ v4l2_ctrl_auto_cluster(2, &ctrls->auto_iso, 0, false);
+ ctrls->af_status->flags |= V4L2_CTRL_FLAG_VOLATILE;
+ v4l2_ctrl_cluster(6, &ctrls->focus_auto);
+
+ state->sensor_sd.ctrl_handler = hdl;
+
+ return 0;
+}
diff --git a/drivers/media/i2c/s5c73m3/s5c73m3-spi.c b/drivers/media/i2c/s5c73m3/s5c73m3-spi.c
new file mode 100644
index 000000000000..6f3a9c00fe65
--- /dev/null
+++ b/drivers/media/i2c/s5c73m3/s5c73m3-spi.c
@@ -0,0 +1,156 @@
+/*
+ * Samsung LSI S5C73M3 8M pixel camera driver
+ *
+ * Copyright (C) 2012, Samsung Electronics, Co., Ltd.
+ * Sylwester Nawrocki <s.nawrocki@samsung.com>
+ * Andrzej Hajda <a.hajda@samsung.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/sizes.h>
+#include <linux/delay.h>
+#include <linux/init.h>
+#include <linux/media.h>
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/spi/spi.h>
+
+#include "s5c73m3.h"
+
+#define S5C73M3_SPI_DRV_NAME "S5C73M3-SPI"
+
+enum spi_direction {
+ SPI_DIR_RX,
+ SPI_DIR_TX
+};
+
+static int spi_xmit(struct spi_device *spi_dev, void *addr, const int len,
+ enum spi_direction dir)
+{
+ struct spi_message msg;
+ int r;
+ struct spi_transfer xfer = {
+ .len = len,
+ };
+
+ if (dir == SPI_DIR_TX)
+ xfer.tx_buf = addr;
+ else
+ xfer.rx_buf = addr;
+
+ if (spi_dev == NULL) {
+ dev_err(&spi_dev->dev, "SPI device is uninitialized\n");
+ return -ENODEV;
+ }
+
+ spi_message_init(&msg);
+ spi_message_add_tail(&xfer, &msg);
+
+ r = spi_sync(spi_dev, &msg);
+ if (r < 0)
+ dev_err(&spi_dev->dev, "%s spi_sync failed %d\n", __func__, r);
+
+ return r;
+}
+
+int s5c73m3_spi_write(struct s5c73m3 *state, const void *addr,
+ const unsigned int len, const unsigned int tx_size)
+{
+ struct spi_device *spi_dev = state->spi_dev;
+ u32 count = len / tx_size;
+ u32 extra = len % tx_size;
+ unsigned int i, j = 0;
+ u8 padding[32];
+ int r = 0;
+
+ memset(padding, 0, sizeof(padding));
+
+ for (i = 0; i < count ; i++) {
+ r = spi_xmit(spi_dev, (void *)addr + j, tx_size, SPI_DIR_TX);
+ if (r < 0)
+ return r;
+ j += tx_size;
+ }
+
+ if (extra > 0) {
+ r = spi_xmit(spi_dev, (void *)addr + j, extra, SPI_DIR_TX);
+ if (r < 0)
+ return r;
+ }
+
+ return spi_xmit(spi_dev, padding, sizeof(padding), SPI_DIR_TX);
+}
+
+int s5c73m3_spi_read(struct s5c73m3 *state, void *addr,
+ const unsigned int len, const unsigned int tx_size)
+{
+ struct spi_device *spi_dev = state->spi_dev;
+ u32 count = len / tx_size;
+ u32 extra = len % tx_size;
+ unsigned int i, j = 0;
+ int r = 0;
+
+ for (i = 0; i < count ; i++) {
+ r = spi_xmit(spi_dev, addr + j, tx_size, SPI_DIR_RX);
+ if (r < 0)
+ return r;
+ j += tx_size;
+ }
+
+ if (extra > 0)
+ return spi_xmit(spi_dev, addr + j, extra, SPI_DIR_RX);
+
+ return 0;
+}
+
+static int s5c73m3_spi_probe(struct spi_device *spi)
+{
+ int r;
+ struct s5c73m3 *state = container_of(spi->dev.driver, struct s5c73m3,
+ spidrv.driver);
+ spi->bits_per_word = 32;
+
+ r = spi_setup(spi);
+ if (r < 0) {
+ dev_err(&spi->dev, "spi_setup() failed\n");
+ return r;
+ }
+
+ mutex_lock(&state->lock);
+ state->spi_dev = spi;
+ mutex_unlock(&state->lock);
+
+ v4l2_info(&state->sensor_sd, "S5C73M3 SPI probed successfully\n");
+ return 0;
+}
+
+static int s5c73m3_spi_remove(struct spi_device *spi)
+{
+ return 0;
+}
+
+int s5c73m3_register_spi_driver(struct s5c73m3 *state)
+{
+ struct spi_driver *spidrv = &state->spidrv;
+
+ spidrv->remove = s5c73m3_spi_remove;
+ spidrv->probe = s5c73m3_spi_probe;
+ spidrv->driver.name = S5C73M3_SPI_DRV_NAME;
+ spidrv->driver.bus = &spi_bus_type;
+ spidrv->driver.owner = THIS_MODULE;
+
+ return spi_register_driver(spidrv);
+}
+
+void s5c73m3_unregister_spi_driver(struct s5c73m3 *state)
+{
+ spi_unregister_driver(&state->spidrv);
+}
diff --git a/drivers/media/i2c/s5c73m3/s5c73m3.h b/drivers/media/i2c/s5c73m3/s5c73m3.h
new file mode 100644
index 000000000000..9d2c08652246
--- /dev/null
+++ b/drivers/media/i2c/s5c73m3/s5c73m3.h
@@ -0,0 +1,459 @@
+/*
+ * Samsung LSI S5C73M3 8M pixel camera driver
+ *
+ * Copyright (C) 2012, Samsung Electronics, Co., Ltd.
+ * Sylwester Nawrocki <s.nawrocki@samsung.com>
+ * Andrzej Hajda <a.hajda@samsung.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+#ifndef S5C73M3_H_
+#define S5C73M3_H_
+
+#include <linux/kernel.h>
+#include <linux/regulator/consumer.h>
+#include <media/v4l2-common.h>
+#include <media/v4l2-ctrls.h>
+#include <media/v4l2-subdev.h>
+#include <media/s5c73m3.h>
+
+#define DRIVER_NAME "S5C73M3"
+
+#define S5C73M3_ISP_FMT V4L2_MBUS_FMT_VYUY8_2X8
+#define S5C73M3_JPEG_FMT V4L2_MBUS_FMT_S5C_UYVY_JPEG_1X8
+
+/* Subdevs pad index definitions */
+enum s5c73m3_pads {
+ S5C73M3_ISP_PAD,
+ S5C73M3_JPEG_PAD,
+ S5C73M3_NUM_PADS
+};
+
+enum s5c73m3_oif_pads {
+ OIF_ISP_PAD,
+ OIF_JPEG_PAD,
+ OIF_SOURCE_PAD,
+ OIF_NUM_PADS
+};
+
+#define S5C73M3_SENSOR_FW_LEN 6
+#define S5C73M3_SENSOR_TYPE_LEN 12
+
+#define S5C73M3_REG(_addrh, _addrl) (((_addrh) << 16) | _addrl)
+
+#define AHB_MSB_ADDR_PTR 0xfcfc
+#define REG_CMDWR_ADDRH 0x0050
+#define REG_CMDWR_ADDRL 0x0054
+#define REG_CMDRD_ADDRH 0x0058
+#define REG_CMDRD_ADDRL 0x005c
+#define REG_CMDBUF_ADDR 0x0f14
+
+#define REG_I2C_SEQ_STATUS S5C73M3_REG(0x0009, 0x59A6)
+#define SEQ_END_PLL (1<<0x0)
+#define SEQ_END_SENSOR (1<<0x1)
+#define SEQ_END_GPIO (1<<0x2)
+#define SEQ_END_FROM (1<<0x3)
+#define SEQ_END_STABLE_AE_AWB (1<<0x4)
+#define SEQ_END_READY_I2C_CMD (1<<0x5)
+
+#define REG_I2C_STATUS S5C73M3_REG(0x0009, 0x599E)
+#define I2C_STATUS_CIS_I2C (1<<0x0)
+#define I2C_STATUS_AF_INIT (1<<0x1)
+#define I2C_STATUS_CAL_DATA (1<<0x2)
+#define I2C_STATUS_FRAME_COUNT (1<<0x3)
+#define I2C_STATUS_FROM_INIT (1<<0x4)
+#define I2C_STATUS_I2C_CIS_STREAM_OFF (1<<0x5)
+#define I2C_STATUS_I2C_N_CMD_OVER (1<<0x6)
+#define I2C_STATUS_I2C_N_CMD_MISMATCH (1<<0x7)
+#define I2C_STATUS_CHECK_BIN_CRC (1<<0x8)
+#define I2C_STATUS_EXCEPTION (1<<0x9)
+#define I2C_STATUS_INIF_INIT_STATE (0x8)
+
+#define REG_STATUS S5C73M3_REG(0x0009, 0x5080)
+#define REG_STATUS_BOOT_SUB_MAIN_ENTER 0xff01
+#define REG_STATUS_BOOT_SRAM_TIMING_OK 0xff02
+#define REG_STATUS_BOOT_INTERRUPTS_EN 0xff03
+#define REG_STATUS_BOOT_R_PLL_DONE 0xff04
+#define REG_STATUS_BOOT_R_PLL_LOCKTIME_DONE 0xff05
+#define REG_STATUS_BOOT_DELAY_COUNT_DONE 0xff06
+#define REG_STATUS_BOOT_I_PLL_DONE 0xff07
+#define REG_STATUS_BOOT_I_PLL_LOCKTIME_DONE 0xff08
+#define REG_STATUS_BOOT_PLL_INIT_OK 0xff09
+#define REG_STATUS_BOOT_SENSOR_INIT_OK 0xff0a
+#define REG_STATUS_BOOT_GPIO_SETTING_OK 0xff0b
+#define REG_STATUS_BOOT_READ_CAL_DATA_OK 0xff0c
+#define REG_STATUS_BOOT_STABLE_AE_AWB_OK 0xff0d
+#define REG_STATUS_ISP_COMMAND_COMPLETED 0xffff
+#define REG_STATUS_EXCEPTION_OCCURED 0xdead
+
+#define COMM_RESULT_OFFSET S5C73M3_REG(0x0009, 0x5000)
+
+#define COMM_IMG_OUTPUT 0x0902
+#define COMM_IMG_OUTPUT_HDR 0x0008
+#define COMM_IMG_OUTPUT_YUV 0x0009
+#define COMM_IMG_OUTPUT_INTERLEAVED 0x000d
+
+#define COMM_STILL_PRE_FLASH 0x0a00
+#define COMM_STILL_PRE_FLASH_FIRE 0x0000
+#define COMM_STILL_PRE_FLASH_NON_FIRED 0x0000
+#define COMM_STILL_PRE_FLASH_FIRED 0x0001
+
+#define COMM_STILL_MAIN_FLASH 0x0a02
+#define COMM_STILL_MAIN_FLASH_CANCEL 0x0001
+#define COMM_STILL_MAIN_FLASH_FIRE 0x0002
+
+#define COMM_ZOOM_STEP 0x0b00
+
+#define COMM_IMAGE_EFFECT 0x0b0a
+#define COMM_IMAGE_EFFECT_NONE 0x0001
+#define COMM_IMAGE_EFFECT_NEGATIVE 0x0002
+#define COMM_IMAGE_EFFECT_AQUA 0x0003
+#define COMM_IMAGE_EFFECT_SEPIA 0x0004
+#define COMM_IMAGE_EFFECT_MONO 0x0005
+
+#define COMM_IMAGE_QUALITY 0x0b0c
+#define COMM_IMAGE_QUALITY_SUPERFINE 0x0000
+#define COMM_IMAGE_QUALITY_FINE 0x0001
+#define COMM_IMAGE_QUALITY_NORMAL 0x0002
+
+#define COMM_FLASH_MODE 0x0b0e
+#define COMM_FLASH_MODE_OFF 0x0000
+#define COMM_FLASH_MODE_ON 0x0001
+#define COMM_FLASH_MODE_AUTO 0x0002
+
+#define COMM_FLASH_STATUS 0x0b80
+#define COMM_FLASH_STATUS_OFF 0x0001
+#define COMM_FLASH_STATUS_ON 0x0002
+#define COMM_FLASH_STATUS_AUTO 0x0003
+
+#define COMM_FLASH_TORCH 0x0b12
+#define COMM_FLASH_TORCH_OFF 0x0000
+#define COMM_FLASH_TORCH_ON 0x0001
+
+#define COMM_AE_NEEDS_FLASH 0x0cba
+#define COMM_AE_NEEDS_FLASH_OFF 0x0000
+#define COMM_AE_NEEDS_FLASH_ON 0x0001
+
+#define COMM_CHG_MODE 0x0b10
+#define COMM_CHG_MODE_NEW 0x8000
+#define COMM_CHG_MODE_SUBSAMPLING_HALF 0x2000
+#define COMM_CHG_MODE_SUBSAMPLING_QUARTER 0x4000
+
+#define COMM_CHG_MODE_YUV_320_240 0x0001
+#define COMM_CHG_MODE_YUV_640_480 0x0002
+#define COMM_CHG_MODE_YUV_880_720 0x0003
+#define COMM_CHG_MODE_YUV_960_720 0x0004
+#define COMM_CHG_MODE_YUV_1184_666 0x0005
+#define COMM_CHG_MODE_YUV_1280_720 0x0006
+#define COMM_CHG_MODE_YUV_1536_864 0x0007
+#define COMM_CHG_MODE_YUV_1600_1200 0x0008
+#define COMM_CHG_MODE_YUV_1632_1224 0x0009
+#define COMM_CHG_MODE_YUV_1920_1080 0x000a
+#define COMM_CHG_MODE_YUV_1920_1440 0x000b
+#define COMM_CHG_MODE_YUV_2304_1296 0x000c
+#define COMM_CHG_MODE_YUV_3264_2448 0x000d
+#define COMM_CHG_MODE_YUV_352_288 0x000e
+#define COMM_CHG_MODE_YUV_1008_672 0x000f
+
+#define COMM_CHG_MODE_JPEG_640_480 0x0010
+#define COMM_CHG_MODE_JPEG_800_450 0x0020
+#define COMM_CHG_MODE_JPEG_800_600 0x0030
+#define COMM_CHG_MODE_JPEG_1280_720 0x0040
+#define COMM_CHG_MODE_JPEG_1280_960 0x0050
+#define COMM_CHG_MODE_JPEG_1600_900 0x0060
+#define COMM_CHG_MODE_JPEG_1600_1200 0x0070
+#define COMM_CHG_MODE_JPEG_2048_1152 0x0080
+#define COMM_CHG_MODE_JPEG_2048_1536 0x0090
+#define COMM_CHG_MODE_JPEG_2560_1440 0x00a0
+#define COMM_CHG_MODE_JPEG_2560_1920 0x00b0
+#define COMM_CHG_MODE_JPEG_3264_2176 0x00c0
+#define COMM_CHG_MODE_JPEG_1024_768 0x00d0
+#define COMM_CHG_MODE_JPEG_3264_1836 0x00e0
+#define COMM_CHG_MODE_JPEG_3264_2448 0x00f0
+
+#define COMM_AF_CON 0x0e00
+#define COMM_AF_CON_STOP 0x0000
+#define COMM_AF_CON_SCAN 0x0001 /* Full Search */
+#define COMM_AF_CON_START 0x0002 /* Fast Search */
+
+#define COMM_AF_CAL 0x0e06
+#define COMM_AF_TOUCH_AF 0x0e0a
+
+#define REG_AF_STATUS S5C73M3_REG(0x0009, 0x5e80)
+#define REG_CAF_STATUS_FIND_SEARCH_DIR 0x0001
+#define REG_CAF_STATUS_FOCUSING 0x0002
+#define REG_CAF_STATUS_FOCUSED 0x0003
+#define REG_CAF_STATUS_UNFOCUSED 0x0004
+#define REG_AF_STATUS_INVALID 0x0010
+#define REG_AF_STATUS_FOCUSING 0x0020
+#define REG_AF_STATUS_FOCUSED 0x0030
+#define REG_AF_STATUS_UNFOCUSED 0x0040
+
+#define REG_AF_TOUCH_POSITION S5C73M3_REG(0x0009, 0x5e8e)
+#define COMM_AF_FACE_ZOOM 0x0e10
+
+#define COMM_AF_MODE 0x0e02
+#define COMM_AF_MODE_NORMAL 0x0000
+#define COMM_AF_MODE_MACRO 0x0001
+#define COMM_AF_MODE_MOVIE_CAF_START 0x0002
+#define COMM_AF_MODE_MOVIE_CAF_STOP 0x0003
+#define COMM_AF_MODE_PREVIEW_CAF_START 0x0004
+#define COMM_AF_MODE_PREVIEW_CAF_STOP 0x0005
+
+#define COMM_AF_SOFTLANDING 0x0e16
+#define COMM_AF_SOFTLANDING_ON 0x0000
+#define COMM_AF_SOFTLANDING_RES_COMPLETE 0x0001
+
+#define COMM_FACE_DET 0x0e0c
+#define COMM_FACE_DET_OFF 0x0000
+#define COMM_FACE_DET_ON 0x0001
+
+#define COMM_FACE_DET_OSD 0x0e0e
+#define COMM_FACE_DET_OSD_OFF 0x0000
+#define COMM_FACE_DET_OSD_ON 0x0001
+
+#define COMM_AE_CON 0x0c00
+#define COMM_AE_STOP 0x0000 /* lock */
+#define COMM_AE_START 0x0001 /* unlock */
+
+#define COMM_ISO 0x0c02
+#define COMM_ISO_AUTO 0x0000
+#define COMM_ISO_100 0x0001
+#define COMM_ISO_200 0x0002
+#define COMM_ISO_400 0x0003
+#define COMM_ISO_800 0x0004
+#define COMM_ISO_SPORTS 0x0005
+#define COMM_ISO_NIGHT 0x0006
+#define COMM_ISO_INDOOR 0x0007
+
+/* 0x00000 (-2.0 EV)...0x0008 (2.0 EV), 0.5EV step */
+#define COMM_EV 0x0c04
+
+#define COMM_METERING 0x0c06
+#define COMM_METERING_CENTER 0x0000
+#define COMM_METERING_SPOT 0x0001
+#define COMM_METERING_AVERAGE 0x0002
+#define COMM_METERING_SMART 0x0003
+
+#define COMM_WDR 0x0c08
+#define COMM_WDR_OFF 0x0000
+#define COMM_WDR_ON 0x0001
+
+#define COMM_FLICKER_MODE 0x0c12
+#define COMM_FLICKER_NONE 0x0000
+#define COMM_FLICKER_MANUAL_50HZ 0x0001
+#define COMM_FLICKER_MANUAL_60HZ 0x0002
+#define COMM_FLICKER_AUTO 0x0003
+#define COMM_FLICKER_AUTO_50HZ 0x0004
+#define COMM_FLICKER_AUTO_60HZ 0x0005
+
+#define COMM_FRAME_RATE 0x0c1e
+#define COMM_FRAME_RATE_AUTO_SET 0x0000
+#define COMM_FRAME_RATE_FIXED_30FPS 0x0002
+#define COMM_FRAME_RATE_FIXED_20FPS 0x0003
+#define COMM_FRAME_RATE_FIXED_15FPS 0x0004
+#define COMM_FRAME_RATE_FIXED_60FPS 0x0007
+#define COMM_FRAME_RATE_FIXED_120FPS 0x0008
+#define COMM_FRAME_RATE_FIXED_7FPS 0x0009
+#define COMM_FRAME_RATE_FIXED_10FPS 0x000a
+#define COMM_FRAME_RATE_FIXED_90FPS 0x000b
+#define COMM_FRAME_RATE_ANTI_SHAKE 0x0013
+
+/* 0x0000...0x0004 -> sharpness: 0, 1, 2, -1, -2 */
+#define COMM_SHARPNESS 0x0c14
+
+/* 0x0000...0x0004 -> saturation: 0, 1, 2, -1, -2 */
+#define COMM_SATURATION 0x0c16
+
+/* 0x0000...0x0004 -> contrast: 0, 1, 2, -1, -2 */
+#define COMM_CONTRAST 0x0c18
+
+#define COMM_SCENE_MODE 0x0c1a
+#define COMM_SCENE_MODE_NONE 0x0000
+#define COMM_SCENE_MODE_PORTRAIT 0x0001
+#define COMM_SCENE_MODE_LANDSCAPE 0x0002
+#define COMM_SCENE_MODE_SPORTS 0x0003
+#define COMM_SCENE_MODE_INDOOR 0x0004
+#define COMM_SCENE_MODE_BEACH 0x0005
+#define COMM_SCENE_MODE_SUNSET 0x0006
+#define COMM_SCENE_MODE_DAWN 0x0007
+#define COMM_SCENE_MODE_FALL 0x0008
+#define COMM_SCENE_MODE_NIGHT 0x0009
+#define COMM_SCENE_MODE_AGAINST_LIGHT 0x000a
+#define COMM_SCENE_MODE_FIRE 0x000b
+#define COMM_SCENE_MODE_TEXT 0x000c
+#define COMM_SCENE_MODE_CANDLE 0x000d
+
+#define COMM_AE_AUTO_BRACKET 0x0b14
+#define COMM_AE_AUTO_BRAKET_EV05 0x0080
+#define COMM_AE_AUTO_BRAKET_EV10 0x0100
+#define COMM_AE_AUTO_BRAKET_EV15 0x0180
+#define COMM_AE_AUTO_BRAKET_EV20 0x0200
+
+#define COMM_SENSOR_STREAMING 0x090a
+#define COMM_SENSOR_STREAMING_OFF 0x0000
+#define COMM_SENSOR_STREAMING_ON 0x0001
+
+#define COMM_AWB_MODE 0x0d02
+#define COMM_AWB_MODE_INCANDESCENT 0x0000
+#define COMM_AWB_MODE_FLUORESCENT1 0x0001
+#define COMM_AWB_MODE_FLUORESCENT2 0x0002
+#define COMM_AWB_MODE_DAYLIGHT 0x0003
+#define COMM_AWB_MODE_CLOUDY 0x0004
+#define COMM_AWB_MODE_AUTO 0x0005
+
+#define COMM_AWB_CON 0x0d00
+#define COMM_AWB_STOP 0x0000 /* lock */
+#define COMM_AWB_START 0x0001 /* unlock */
+
+#define COMM_FW_UPDATE 0x0906
+#define COMM_FW_UPDATE_NOT_READY 0x0000
+#define COMM_FW_UPDATE_SUCCESS 0x0005
+#define COMM_FW_UPDATE_FAIL 0x0007
+#define COMM_FW_UPDATE_BUSY 0xffff
+
+
+#define S5C73M3_MAX_SUPPLIES 6
+
+struct s5c73m3_ctrls {
+ struct v4l2_ctrl_handler handler;
+ struct {
+ /* exposure/exposure bias cluster */
+ struct v4l2_ctrl *auto_exposure;
+ struct v4l2_ctrl *exposure_bias;
+ struct v4l2_ctrl *exposure_metering;
+ };
+ struct {
+ /* iso/auto iso cluster */
+ struct v4l2_ctrl *auto_iso;
+ struct v4l2_ctrl *iso;
+ };
+ struct v4l2_ctrl *auto_wb;
+ struct {
+ /* continuous auto focus/auto focus cluster */
+ struct v4l2_ctrl *focus_auto;
+ struct v4l2_ctrl *af_start;
+ struct v4l2_ctrl *af_stop;
+ struct v4l2_ctrl *af_status;
+ struct v4l2_ctrl *af_distance;
+ };
+
+ struct v4l2_ctrl *aaa_lock;
+ struct v4l2_ctrl *colorfx;
+ struct v4l2_ctrl *contrast;
+ struct v4l2_ctrl *saturation;
+ struct v4l2_ctrl *sharpness;
+ struct v4l2_ctrl *zoom;
+ struct v4l2_ctrl *wdr;
+ struct v4l2_ctrl *stabilization;
+ struct v4l2_ctrl *jpeg_quality;
+ struct v4l2_ctrl *scene_mode;
+};
+
+enum s5c73m3_gpio_id {
+ STBY,
+ RST,
+ GPIO_NUM,
+};
+
+enum s5c73m3_resolution_types {
+ RES_ISP,
+ RES_JPEG,
+};
+
+struct s5c73m3_interval {
+ u16 fps_reg;
+ struct v4l2_fract interval;
+ /* Maximum rectangle for the interval */
+ struct v4l2_frmsize_discrete size;
+};
+
+struct s5c73m3 {
+ struct v4l2_subdev sensor_sd;
+ struct media_pad sensor_pads[S5C73M3_NUM_PADS];
+
+ struct v4l2_subdev oif_sd;
+ struct media_pad oif_pads[OIF_NUM_PADS];
+
+ struct spi_driver spidrv;
+ struct spi_device *spi_dev;
+ struct i2c_client *i2c_client;
+ u32 i2c_write_address;
+ u32 i2c_read_address;
+
+ struct regulator_bulk_data supplies[S5C73M3_MAX_SUPPLIES];
+ struct s5c73m3_gpio gpio[GPIO_NUM];
+
+ /* External master clock frequency */
+ u32 mclk_frequency;
+ /* Video bus type - MIPI-CSI2/paralell */
+ enum v4l2_mbus_type bus_type;
+
+ const struct s5c73m3_frame_size *sensor_pix_size[2];
+ const struct s5c73m3_frame_size *oif_pix_size[2];
+ enum v4l2_mbus_pixelcode mbus_code;
+
+ const struct s5c73m3_interval *fiv;
+
+ struct v4l2_mbus_frame_desc frame_desc;
+ /* protects the struct members below */
+ struct mutex lock;
+
+ struct s5c73m3_ctrls ctrls;
+
+ u8 streaming:1;
+ u8 apply_fmt:1;
+ u8 apply_fiv:1;
+ u8 isp_ready:1;
+
+ short power;
+
+ char sensor_fw[S5C73M3_SENSOR_FW_LEN + 2];
+ char sensor_type[S5C73M3_SENSOR_TYPE_LEN + 2];
+ char fw_file_version[2];
+ unsigned int fw_size;
+};
+
+struct s5c73m3_frame_size {
+ u32 width;
+ u32 height;
+ u8 reg_val;
+};
+
+extern int s5c73m3_dbg;
+
+int s5c73m3_register_spi_driver(struct s5c73m3 *state);
+void s5c73m3_unregister_spi_driver(struct s5c73m3 *state);
+int s5c73m3_spi_write(struct s5c73m3 *state, const void *addr,
+ const unsigned int len, const unsigned int tx_size);
+int s5c73m3_spi_read(struct s5c73m3 *state, void *addr,
+ const unsigned int len, const unsigned int tx_size);
+
+int s5c73m3_read(struct s5c73m3 *state, u32 addr, u16 *data);
+int s5c73m3_write(struct s5c73m3 *state, u32 addr, u16 data);
+int s5c73m3_isp_command(struct s5c73m3 *state, u16 command, u16 data);
+int s5c73m3_init_controls(struct s5c73m3 *state);
+
+static inline struct v4l2_subdev *ctrl_to_sensor_sd(struct v4l2_ctrl *ctrl)
+{
+ return &container_of(ctrl->handler, struct s5c73m3,
+ ctrls.handler)->sensor_sd;
+}
+
+static inline struct s5c73m3 *sensor_sd_to_s5c73m3(struct v4l2_subdev *sd)
+{
+ return container_of(sd, struct s5c73m3, sensor_sd);
+}
+
+static inline struct s5c73m3 *oif_sd_to_s5c73m3(struct v4l2_subdev *sd)
+{
+ return container_of(sd, struct s5c73m3, oif_sd);
+}
+#endif /* S5C73M3_H_ */
diff --git a/drivers/media/i2c/s5k6aa.c b/drivers/media/i2c/s5k6aa.c
index 57cd4fa0193d..bdf5e3db31d1 100644
--- a/drivers/media/i2c/s5k6aa.c
+++ b/drivers/media/i2c/s5k6aa.c
@@ -1598,7 +1598,7 @@ static int s5k6aa_probe(struct i2c_client *client,
for (i = 0; i < S5K6AA_NUM_SUPPLIES; i++)
s5k6aa->supplies[i].supply = s5k6aa_supply_names[i];
- ret = regulator_bulk_get(&client->dev, S5K6AA_NUM_SUPPLIES,
+ ret = devm_regulator_bulk_get(&client->dev, S5K6AA_NUM_SUPPLIES,
s5k6aa->supplies);
if (ret) {
dev_err(&client->dev, "Failed to get regulators\n");
@@ -1607,7 +1607,7 @@ static int s5k6aa_probe(struct i2c_client *client,
ret = s5k6aa_initialize_ctrls(s5k6aa);
if (ret)
- goto out_err4;
+ goto out_err3;
s5k6aa_presets_data_init(s5k6aa);
@@ -1618,8 +1618,6 @@ static int s5k6aa_probe(struct i2c_client *client,
return 0;
-out_err4:
- regulator_bulk_free(S5K6AA_NUM_SUPPLIES, s5k6aa->supplies);
out_err3:
s5k6aa_free_gpios(s5k6aa);
out_err2:
@@ -1635,7 +1633,6 @@ static int s5k6aa_remove(struct i2c_client *client)
v4l2_device_unregister_subdev(sd);
v4l2_ctrl_handler_free(sd->ctrl_handler);
media_entity_cleanup(&sd->entity);
- regulator_bulk_free(S5K6AA_NUM_SUPPLIES, s5k6aa->supplies);
s5k6aa_free_gpios(s5k6aa);
return 0;
diff --git a/drivers/media/i2c/soc_camera/imx074.c b/drivers/media/i2c/soc_camera/imx074.c
index f8534eec9de9..a2a5cbbdbe28 100644
--- a/drivers/media/i2c/soc_camera/imx074.c
+++ b/drivers/media/i2c/soc_camera/imx074.c
@@ -271,9 +271,9 @@ static int imx074_g_chip_ident(struct v4l2_subdev *sd,
static int imx074_s_power(struct v4l2_subdev *sd, int on)
{
struct i2c_client *client = v4l2_get_subdevdata(sd);
- struct soc_camera_link *icl = soc_camera_i2c_to_link(client);
+ struct soc_camera_subdev_desc *ssdd = soc_camera_i2c_to_desc(client);
- return soc_camera_set_power(&client->dev, icl, on);
+ return soc_camera_set_power(&client->dev, ssdd, on);
}
static int imx074_g_mbus_config(struct v4l2_subdev *sd,
@@ -430,10 +430,9 @@ static int imx074_probe(struct i2c_client *client,
{
struct imx074 *priv;
struct i2c_adapter *adapter = to_i2c_adapter(client->dev.parent);
- struct soc_camera_link *icl = soc_camera_i2c_to_link(client);
- int ret;
+ struct soc_camera_subdev_desc *ssdd = soc_camera_i2c_to_desc(client);
- if (!icl) {
+ if (!ssdd) {
dev_err(&client->dev, "IMX074: missing platform data!\n");
return -EINVAL;
}
@@ -444,7 +443,7 @@ static int imx074_probe(struct i2c_client *client,
return -EIO;
}
- priv = kzalloc(sizeof(struct imx074), GFP_KERNEL);
+ priv = devm_kzalloc(&client->dev, sizeof(struct imx074), GFP_KERNEL);
if (!priv)
return -ENOMEM;
@@ -452,23 +451,15 @@ static int imx074_probe(struct i2c_client *client,
priv->fmt = &imx074_colour_fmts[0];
- ret = imx074_video_probe(client);
- if (ret < 0) {
- kfree(priv);
- return ret;
- }
-
- return ret;
+ return imx074_video_probe(client);
}
static int imx074_remove(struct i2c_client *client)
{
- struct imx074 *priv = to_imx074(client);
- struct soc_camera_link *icl = soc_camera_i2c_to_link(client);
+ struct soc_camera_subdev_desc *ssdd = soc_camera_i2c_to_desc(client);
- if (icl->free_bus)
- icl->free_bus(icl);
- kfree(priv);
+ if (ssdd->free_bus)
+ ssdd->free_bus(ssdd);
return 0;
}
diff --git a/drivers/media/i2c/soc_camera/mt9m001.c b/drivers/media/i2c/soc_camera/mt9m001.c
index 19f8a07764f9..bcdc86175549 100644
--- a/drivers/media/i2c/soc_camera/mt9m001.c
+++ b/drivers/media/i2c/soc_camera/mt9m001.c
@@ -23,7 +23,7 @@
/*
* mt9m001 i2c address 0x5d
* The platform has to define struct i2c_board_info objects and link to them
- * from struct soc_camera_link
+ * from struct soc_camera_host_desc
*/
/* mt9m001 selected register addresses */
@@ -380,9 +380,9 @@ static int mt9m001_s_register(struct v4l2_subdev *sd,
static int mt9m001_s_power(struct v4l2_subdev *sd, int on)
{
struct i2c_client *client = v4l2_get_subdevdata(sd);
- struct soc_camera_link *icl = soc_camera_i2c_to_link(client);
+ struct soc_camera_subdev_desc *ssdd = soc_camera_i2c_to_desc(client);
- return soc_camera_set_power(&client->dev, icl, on);
+ return soc_camera_set_power(&client->dev, ssdd, on);
}
static int mt9m001_g_volatile_ctrl(struct v4l2_ctrl *ctrl)
@@ -482,7 +482,7 @@ static int mt9m001_s_ctrl(struct v4l2_ctrl *ctrl)
* Interface active, can use i2c. If it fails, it can indeed mean, that
* this wasn't our capture interface, so, we wait for the right one
*/
-static int mt9m001_video_probe(struct soc_camera_link *icl,
+static int mt9m001_video_probe(struct soc_camera_subdev_desc *ssdd,
struct i2c_client *client)
{
struct mt9m001 *mt9m001 = to_mt9m001(client);
@@ -526,8 +526,8 @@ static int mt9m001_video_probe(struct soc_camera_link *icl,
* The platform may support different bus widths due to
* different routing of the data lines.
*/
- if (icl->query_bus_param)
- flags = icl->query_bus_param(icl);
+ if (ssdd->query_bus_param)
+ flags = ssdd->query_bus_param(ssdd);
else
flags = SOCAM_DATAWIDTH_10;
@@ -558,10 +558,10 @@ done:
return ret;
}
-static void mt9m001_video_remove(struct soc_camera_link *icl)
+static void mt9m001_video_remove(struct soc_camera_subdev_desc *ssdd)
{
- if (icl->free_bus)
- icl->free_bus(icl);
+ if (ssdd->free_bus)
+ ssdd->free_bus(ssdd);
}
static int mt9m001_g_skip_top_lines(struct v4l2_subdev *sd, u32 *lines)
@@ -605,14 +605,14 @@ static int mt9m001_g_mbus_config(struct v4l2_subdev *sd,
struct v4l2_mbus_config *cfg)
{
struct i2c_client *client = v4l2_get_subdevdata(sd);
- struct soc_camera_link *icl = soc_camera_i2c_to_link(client);
+ struct soc_camera_subdev_desc *ssdd = soc_camera_i2c_to_desc(client);
/* MT9M001 has all capture_format parameters fixed */
cfg->flags = V4L2_MBUS_PCLK_SAMPLE_FALLING |
V4L2_MBUS_HSYNC_ACTIVE_HIGH | V4L2_MBUS_VSYNC_ACTIVE_HIGH |
V4L2_MBUS_DATA_ACTIVE_HIGH | V4L2_MBUS_MASTER;
cfg->type = V4L2_MBUS_PARALLEL;
- cfg->flags = soc_camera_apply_board_flags(icl, cfg);
+ cfg->flags = soc_camera_apply_board_flags(ssdd, cfg);
return 0;
}
@@ -621,12 +621,12 @@ static int mt9m001_s_mbus_config(struct v4l2_subdev *sd,
const struct v4l2_mbus_config *cfg)
{
const struct i2c_client *client = v4l2_get_subdevdata(sd);
- struct soc_camera_link *icl = soc_camera_i2c_to_link(client);
+ struct soc_camera_subdev_desc *ssdd = soc_camera_i2c_to_desc(client);
struct mt9m001 *mt9m001 = to_mt9m001(client);
unsigned int bps = soc_mbus_get_fmtdesc(mt9m001->fmt->code)->bits_per_sample;
- if (icl->set_bus_param)
- return icl->set_bus_param(icl, 1 << (bps - 1));
+ if (ssdd->set_bus_param)
+ return ssdd->set_bus_param(ssdd, 1 << (bps - 1));
/*
* Without board specific bus width settings we only support the
@@ -663,10 +663,10 @@ static int mt9m001_probe(struct i2c_client *client,
{
struct mt9m001 *mt9m001;
struct i2c_adapter *adapter = to_i2c_adapter(client->dev.parent);
- struct soc_camera_link *icl = soc_camera_i2c_to_link(client);
+ struct soc_camera_subdev_desc *ssdd = soc_camera_i2c_to_desc(client);
int ret;
- if (!icl) {
+ if (!ssdd) {
dev_err(&client->dev, "MT9M001 driver needs platform data\n");
return -EINVAL;
}
@@ -677,7 +677,7 @@ static int mt9m001_probe(struct i2c_client *client,
return -EIO;
}
- mt9m001 = kzalloc(sizeof(struct mt9m001), GFP_KERNEL);
+ mt9m001 = devm_kzalloc(&client->dev, sizeof(struct mt9m001), GFP_KERNEL);
if (!mt9m001)
return -ENOMEM;
@@ -697,12 +697,9 @@ static int mt9m001_probe(struct i2c_client *client,
&mt9m001_ctrl_ops, V4L2_CID_EXPOSURE_AUTO, 1, 0,
V4L2_EXPOSURE_AUTO);
mt9m001->subdev.ctrl_handler = &mt9m001->hdl;
- if (mt9m001->hdl.error) {
- int err = mt9m001->hdl.error;
+ if (mt9m001->hdl.error)
+ return mt9m001->hdl.error;
- kfree(mt9m001);
- return err;
- }
v4l2_ctrl_auto_cluster(2, &mt9m001->autoexposure,
V4L2_EXPOSURE_MANUAL, true);
@@ -713,11 +710,9 @@ static int mt9m001_probe(struct i2c_client *client,
mt9m001->rect.width = MT9M001_MAX_WIDTH;
mt9m001->rect.height = MT9M001_MAX_HEIGHT;
- ret = mt9m001_video_probe(icl, client);
- if (ret) {
+ ret = mt9m001_video_probe(ssdd, client);
+ if (ret)
v4l2_ctrl_handler_free(&mt9m001->hdl);
- kfree(mt9m001);
- }
return ret;
}
@@ -725,12 +720,11 @@ static int mt9m001_probe(struct i2c_client *client,
static int mt9m001_remove(struct i2c_client *client)
{
struct mt9m001 *mt9m001 = to_mt9m001(client);
- struct soc_camera_link *icl = soc_camera_i2c_to_link(client);
+ struct soc_camera_subdev_desc *ssdd = soc_camera_i2c_to_desc(client);
v4l2_device_unregister_subdev(&mt9m001->subdev);
v4l2_ctrl_handler_free(&mt9m001->hdl);
- mt9m001_video_remove(icl);
- kfree(mt9m001);
+ mt9m001_video_remove(ssdd);
return 0;
}
diff --git a/drivers/media/i2c/soc_camera/mt9m111.c b/drivers/media/i2c/soc_camera/mt9m111.c
index 62fd94af599b..bbc4ff99603c 100644
--- a/drivers/media/i2c/soc_camera/mt9m111.c
+++ b/drivers/media/i2c/soc_camera/mt9m111.c
@@ -24,7 +24,8 @@
/*
* MT9M111, MT9M112 and MT9M131:
* i2c address is 0x48 or 0x5d (depending on SADDR pin)
- * The platform has to define i2c_board_info and call i2c_register_board_info()
+ * The platform has to define struct i2c_board_info objects and link to them
+ * from struct soc_camera_host_desc
*/
/*
@@ -799,17 +800,17 @@ static int mt9m111_init(struct mt9m111 *mt9m111)
static int mt9m111_power_on(struct mt9m111 *mt9m111)
{
struct i2c_client *client = v4l2_get_subdevdata(&mt9m111->subdev);
- struct soc_camera_link *icl = soc_camera_i2c_to_link(client);
+ struct soc_camera_subdev_desc *ssdd = soc_camera_i2c_to_desc(client);
int ret;
- ret = soc_camera_power_on(&client->dev, icl);
+ ret = soc_camera_power_on(&client->dev, ssdd);
if (ret < 0)
return ret;
ret = mt9m111_resume(mt9m111);
if (ret < 0) {
dev_err(&client->dev, "Failed to resume the sensor: %d\n", ret);
- soc_camera_power_off(&client->dev, icl);
+ soc_camera_power_off(&client->dev, ssdd);
}
return ret;
@@ -818,10 +819,10 @@ static int mt9m111_power_on(struct mt9m111 *mt9m111)
static void mt9m111_power_off(struct mt9m111 *mt9m111)
{
struct i2c_client *client = v4l2_get_subdevdata(&mt9m111->subdev);
- struct soc_camera_link *icl = soc_camera_i2c_to_link(client);
+ struct soc_camera_subdev_desc *ssdd = soc_camera_i2c_to_desc(client);
mt9m111_suspend(mt9m111);
- soc_camera_power_off(&client->dev, icl);
+ soc_camera_power_off(&client->dev, ssdd);
}
static int mt9m111_s_power(struct v4l2_subdev *sd, int on)
@@ -879,13 +880,13 @@ static int mt9m111_g_mbus_config(struct v4l2_subdev *sd,
struct v4l2_mbus_config *cfg)
{
struct i2c_client *client = v4l2_get_subdevdata(sd);
- struct soc_camera_link *icl = soc_camera_i2c_to_link(client);
+ struct soc_camera_subdev_desc *ssdd = soc_camera_i2c_to_desc(client);
cfg->flags = V4L2_MBUS_MASTER | V4L2_MBUS_PCLK_SAMPLE_RISING |
V4L2_MBUS_HSYNC_ACTIVE_HIGH | V4L2_MBUS_VSYNC_ACTIVE_HIGH |
V4L2_MBUS_DATA_ACTIVE_HIGH;
cfg->type = V4L2_MBUS_PARALLEL;
- cfg->flags = soc_camera_apply_board_flags(icl, cfg);
+ cfg->flags = soc_camera_apply_board_flags(ssdd, cfg);
return 0;
}
@@ -956,10 +957,10 @@ static int mt9m111_probe(struct i2c_client *client,
{
struct mt9m111 *mt9m111;
struct i2c_adapter *adapter = to_i2c_adapter(client->dev.parent);
- struct soc_camera_link *icl = soc_camera_i2c_to_link(client);
+ struct soc_camera_subdev_desc *ssdd = soc_camera_i2c_to_desc(client);
int ret;
- if (!icl) {
+ if (!ssdd) {
dev_err(&client->dev, "mt9m111: driver needs platform data\n");
return -EINVAL;
}
@@ -970,7 +971,7 @@ static int mt9m111_probe(struct i2c_client *client,
return -EIO;
}
- mt9m111 = kzalloc(sizeof(struct mt9m111), GFP_KERNEL);
+ mt9m111 = devm_kzalloc(&client->dev, sizeof(struct mt9m111), GFP_KERNEL);
if (!mt9m111)
return -ENOMEM;
@@ -988,12 +989,8 @@ static int mt9m111_probe(struct i2c_client *client,
&mt9m111_ctrl_ops, V4L2_CID_EXPOSURE_AUTO, 1, 0,
V4L2_EXPOSURE_AUTO);
mt9m111->subdev.ctrl_handler = &mt9m111->hdl;
- if (mt9m111->hdl.error) {
- int err = mt9m111->hdl.error;
-
- kfree(mt9m111);
- return err;
- }
+ if (mt9m111->hdl.error)
+ return mt9m111->hdl.error;
/* Second stage probe - when a capture adapter is there */
mt9m111->rect.left = MT9M111_MIN_DARK_COLS;
@@ -1005,10 +1002,8 @@ static int mt9m111_probe(struct i2c_client *client,
mutex_init(&mt9m111->power_lock);
ret = mt9m111_video_probe(client);
- if (ret) {
+ if (ret)
v4l2_ctrl_handler_free(&mt9m111->hdl);
- kfree(mt9m111);
- }
return ret;
}
@@ -1019,7 +1014,6 @@ static int mt9m111_remove(struct i2c_client *client)
v4l2_device_unregister_subdev(&mt9m111->subdev);
v4l2_ctrl_handler_free(&mt9m111->hdl);
- kfree(mt9m111);
return 0;
}
diff --git a/drivers/media/i2c/soc_camera/mt9t031.c b/drivers/media/i2c/soc_camera/mt9t031.c
index 40800b10a080..d80d044ebf15 100644
--- a/drivers/media/i2c/soc_camera/mt9t031.c
+++ b/drivers/media/i2c/soc_camera/mt9t031.c
@@ -31,8 +31,8 @@
/*
* mt9t031 i2c address 0x5d
- * The platform has to define i2c_board_info and link to it from
- * struct soc_camera_link
+ * The platform has to define struct i2c_board_info objects and link to them
+ * from struct soc_camera_host_desc
*/
/* mt9t031 selected register addresses */
@@ -608,18 +608,18 @@ static struct device_type mt9t031_dev_type = {
static int mt9t031_s_power(struct v4l2_subdev *sd, int on)
{
struct i2c_client *client = v4l2_get_subdevdata(sd);
- struct soc_camera_link *icl = soc_camera_i2c_to_link(client);
+ struct soc_camera_subdev_desc *ssdd = soc_camera_i2c_to_desc(client);
struct video_device *vdev = soc_camera_i2c_to_vdev(client);
int ret;
if (on) {
- ret = soc_camera_power_on(&client->dev, icl);
+ ret = soc_camera_power_on(&client->dev, ssdd);
if (ret < 0)
return ret;
vdev->dev.type = &mt9t031_dev_type;
} else {
vdev->dev.type = NULL;
- soc_camera_power_off(&client->dev, icl);
+ soc_camera_power_off(&client->dev, ssdd);
}
return 0;
@@ -707,13 +707,13 @@ static int mt9t031_g_mbus_config(struct v4l2_subdev *sd,
struct v4l2_mbus_config *cfg)
{
struct i2c_client *client = v4l2_get_subdevdata(sd);
- struct soc_camera_link *icl = soc_camera_i2c_to_link(client);
+ struct soc_camera_subdev_desc *ssdd = soc_camera_i2c_to_desc(client);
cfg->flags = V4L2_MBUS_MASTER | V4L2_MBUS_PCLK_SAMPLE_RISING |
V4L2_MBUS_PCLK_SAMPLE_FALLING | V4L2_MBUS_HSYNC_ACTIVE_HIGH |
V4L2_MBUS_VSYNC_ACTIVE_HIGH | V4L2_MBUS_DATA_ACTIVE_HIGH;
cfg->type = V4L2_MBUS_PARALLEL;
- cfg->flags = soc_camera_apply_board_flags(icl, cfg);
+ cfg->flags = soc_camera_apply_board_flags(ssdd, cfg);
return 0;
}
@@ -722,9 +722,9 @@ static int mt9t031_s_mbus_config(struct v4l2_subdev *sd,
const struct v4l2_mbus_config *cfg)
{
struct i2c_client *client = v4l2_get_subdevdata(sd);
- struct soc_camera_link *icl = soc_camera_i2c_to_link(client);
+ struct soc_camera_subdev_desc *ssdd = soc_camera_i2c_to_desc(client);
- if (soc_camera_apply_board_flags(icl, cfg) &
+ if (soc_camera_apply_board_flags(ssdd, cfg) &
V4L2_MBUS_PCLK_SAMPLE_FALLING)
return reg_clear(client, MT9T031_PIXEL_CLOCK_CONTROL, 0x8000);
else
@@ -758,11 +758,11 @@ static int mt9t031_probe(struct i2c_client *client,
const struct i2c_device_id *did)
{
struct mt9t031 *mt9t031;
- struct soc_camera_link *icl = soc_camera_i2c_to_link(client);
+ struct soc_camera_subdev_desc *ssdd = soc_camera_i2c_to_desc(client);
struct i2c_adapter *adapter = to_i2c_adapter(client->dev.parent);
int ret;
- if (!icl) {
+ if (!ssdd) {
dev_err(&client->dev, "MT9T031 driver needs platform data\n");
return -EINVAL;
}
@@ -773,7 +773,7 @@ static int mt9t031_probe(struct i2c_client *client,
return -EIO;
}
- mt9t031 = kzalloc(sizeof(struct mt9t031), GFP_KERNEL);
+ mt9t031 = devm_kzalloc(&client->dev, sizeof(struct mt9t031), GFP_KERNEL);
if (!mt9t031)
return -ENOMEM;
@@ -797,12 +797,9 @@ static int mt9t031_probe(struct i2c_client *client,
V4L2_CID_EXPOSURE, 1, 255, 1, 255);
mt9t031->subdev.ctrl_handler = &mt9t031->hdl;
- if (mt9t031->hdl.error) {
- int err = mt9t031->hdl.error;
+ if (mt9t031->hdl.error)
+ return mt9t031->hdl.error;
- kfree(mt9t031);
- return err;
- }
v4l2_ctrl_auto_cluster(2, &mt9t031->autoexposure,
V4L2_EXPOSURE_MANUAL, true);
@@ -816,10 +813,8 @@ static int mt9t031_probe(struct i2c_client *client,
mt9t031->yskip = 1;
ret = mt9t031_video_probe(client);
- if (ret) {
+ if (ret)
v4l2_ctrl_handler_free(&mt9t031->hdl);
- kfree(mt9t031);
- }
return ret;
}
@@ -830,7 +825,6 @@ static int mt9t031_remove(struct i2c_client *client)
v4l2_device_unregister_subdev(&mt9t031->subdev);
v4l2_ctrl_handler_free(&mt9t031->hdl);
- kfree(mt9t031);
return 0;
}
diff --git a/drivers/media/i2c/soc_camera/mt9t112.c b/drivers/media/i2c/soc_camera/mt9t112.c
index de7cd836b0a2..188e29b03273 100644
--- a/drivers/media/i2c/soc_camera/mt9t112.c
+++ b/drivers/media/i2c/soc_camera/mt9t112.c
@@ -92,6 +92,7 @@ struct mt9t112_priv {
struct v4l2_rect frame;
const struct mt9t112_format *format;
int model;
+ int num_formats;
u32 flags;
/* for flags */
#define INIT_DONE (1 << 0)
@@ -779,9 +780,9 @@ static int mt9t112_s_register(struct v4l2_subdev *sd,
static int mt9t112_s_power(struct v4l2_subdev *sd, int on)
{
struct i2c_client *client = v4l2_get_subdevdata(sd);
- struct soc_camera_link *icl = soc_camera_i2c_to_link(client);
+ struct soc_camera_subdev_desc *ssdd = soc_camera_i2c_to_desc(client);
- return soc_camera_set_power(&client->dev, icl, on);
+ return soc_camera_set_power(&client->dev, ssdd, on);
}
static struct v4l2_subdev_core_ops mt9t112_subdev_core_ops = {
@@ -859,11 +860,11 @@ static int mt9t112_set_params(struct mt9t112_priv *priv,
/*
* get color format
*/
- for (i = 0; i < ARRAY_SIZE(mt9t112_cfmts); i++)
+ for (i = 0; i < priv->num_formats; i++)
if (mt9t112_cfmts[i].code == code)
break;
- if (i == ARRAY_SIZE(mt9t112_cfmts))
+ if (i == priv->num_formats)
return -EINVAL;
priv->frame = *rect;
@@ -955,14 +956,16 @@ static int mt9t112_s_fmt(struct v4l2_subdev *sd,
static int mt9t112_try_fmt(struct v4l2_subdev *sd,
struct v4l2_mbus_framefmt *mf)
{
+ struct i2c_client *client = v4l2_get_subdevdata(sd);
+ struct mt9t112_priv *priv = to_mt9t112(client);
unsigned int top, left;
int i;
- for (i = 0; i < ARRAY_SIZE(mt9t112_cfmts); i++)
+ for (i = 0; i < priv->num_formats; i++)
if (mt9t112_cfmts[i].code == mf->code)
break;
- if (i == ARRAY_SIZE(mt9t112_cfmts)) {
+ if (i == priv->num_formats) {
mf->code = V4L2_MBUS_FMT_UYVY8_2X8;
mf->colorspace = V4L2_COLORSPACE_JPEG;
} else {
@@ -979,7 +982,10 @@ static int mt9t112_try_fmt(struct v4l2_subdev *sd,
static int mt9t112_enum_fmt(struct v4l2_subdev *sd, unsigned int index,
enum v4l2_mbus_pixelcode *code)
{
- if (index >= ARRAY_SIZE(mt9t112_cfmts))
+ struct i2c_client *client = v4l2_get_subdevdata(sd);
+ struct mt9t112_priv *priv = to_mt9t112(client);
+
+ if (index >= priv->num_formats)
return -EINVAL;
*code = mt9t112_cfmts[index].code;
@@ -991,13 +997,13 @@ static int mt9t112_g_mbus_config(struct v4l2_subdev *sd,
struct v4l2_mbus_config *cfg)
{
struct i2c_client *client = v4l2_get_subdevdata(sd);
- struct soc_camera_link *icl = soc_camera_i2c_to_link(client);
+ struct soc_camera_subdev_desc *ssdd = soc_camera_i2c_to_desc(client);
cfg->flags = V4L2_MBUS_MASTER | V4L2_MBUS_VSYNC_ACTIVE_HIGH |
V4L2_MBUS_HSYNC_ACTIVE_HIGH | V4L2_MBUS_DATA_ACTIVE_HIGH |
V4L2_MBUS_PCLK_SAMPLE_RISING | V4L2_MBUS_PCLK_SAMPLE_FALLING;
cfg->type = V4L2_MBUS_PARALLEL;
- cfg->flags = soc_camera_apply_board_flags(icl, cfg);
+ cfg->flags = soc_camera_apply_board_flags(ssdd, cfg);
return 0;
}
@@ -1006,10 +1012,10 @@ static int mt9t112_s_mbus_config(struct v4l2_subdev *sd,
const struct v4l2_mbus_config *cfg)
{
struct i2c_client *client = v4l2_get_subdevdata(sd);
- struct soc_camera_link *icl = soc_camera_i2c_to_link(client);
+ struct soc_camera_subdev_desc *ssdd = soc_camera_i2c_to_desc(client);
struct mt9t112_priv *priv = to_mt9t112(client);
- if (soc_camera_apply_board_flags(icl, cfg) & V4L2_MBUS_PCLK_SAMPLE_RISING)
+ if (soc_camera_apply_board_flags(ssdd, cfg) & V4L2_MBUS_PCLK_SAMPLE_RISING)
priv->flags |= PCLK_RISING;
return 0;
@@ -1056,10 +1062,12 @@ static int mt9t112_camera_probe(struct i2c_client *client)
case 0x2680:
devname = "mt9t111";
priv->model = V4L2_IDENT_MT9T111;
+ priv->num_formats = 1;
break;
case 0x2682:
devname = "mt9t112";
priv->model = V4L2_IDENT_MT9T112;
+ priv->num_formats = ARRAY_SIZE(mt9t112_cfmts);
break;
default:
dev_err(&client->dev, "Product ID error %04x\n", chipid);
@@ -1078,7 +1086,7 @@ static int mt9t112_probe(struct i2c_client *client,
const struct i2c_device_id *did)
{
struct mt9t112_priv *priv;
- struct soc_camera_link *icl = soc_camera_i2c_to_link(client);
+ struct soc_camera_subdev_desc *ssdd = soc_camera_i2c_to_desc(client);
struct v4l2_rect rect = {
.width = VGA_WIDTH,
.height = VGA_HEIGHT,
@@ -1087,24 +1095,22 @@ static int mt9t112_probe(struct i2c_client *client,
};
int ret;
- if (!icl || !icl->priv) {
+ if (!ssdd || !ssdd->drv_priv) {
dev_err(&client->dev, "mt9t112: missing platform data!\n");
return -EINVAL;
}
- priv = kzalloc(sizeof(*priv), GFP_KERNEL);
+ priv = devm_kzalloc(&client->dev, sizeof(*priv), GFP_KERNEL);
if (!priv)
return -ENOMEM;
- priv->info = icl->priv;
+ priv->info = ssdd->drv_priv;
v4l2_i2c_subdev_init(&priv->subdev, client, &mt9t112_subdev_ops);
ret = mt9t112_camera_probe(client);
- if (ret) {
- kfree(priv);
+ if (ret)
return ret;
- }
/* Cannot fail: using the default supported pixel code */
mt9t112_set_params(priv, &rect, V4L2_MBUS_FMT_UYVY8_2X8);
@@ -1114,9 +1120,6 @@ static int mt9t112_probe(struct i2c_client *client,
static int mt9t112_remove(struct i2c_client *client)
{
- struct mt9t112_priv *priv = to_mt9t112(client);
-
- kfree(priv);
return 0;
}
diff --git a/drivers/media/i2c/soc_camera/mt9v022.c b/drivers/media/i2c/soc_camera/mt9v022.c
index d40a8858be01..a5e65d6a0781 100644
--- a/drivers/media/i2c/soc_camera/mt9v022.c
+++ b/drivers/media/i2c/soc_camera/mt9v022.c
@@ -25,7 +25,7 @@
/*
* mt9v022 i2c address 0x48, 0x4c, 0x58, 0x5c
* The platform has to define struct i2c_board_info objects and link to them
- * from struct soc_camera_link
+ * from struct soc_camera_host_desc
*/
static char *sensor_type;
@@ -508,9 +508,9 @@ static int mt9v022_s_register(struct v4l2_subdev *sd,
static int mt9v022_s_power(struct v4l2_subdev *sd, int on)
{
struct i2c_client *client = v4l2_get_subdevdata(sd);
- struct soc_camera_link *icl = soc_camera_i2c_to_link(client);
+ struct soc_camera_subdev_desc *ssdd = soc_camera_i2c_to_desc(client);
- return soc_camera_set_power(&client->dev, icl, on);
+ return soc_camera_set_power(&client->dev, ssdd, on);
}
static int mt9v022_g_volatile_ctrl(struct v4l2_ctrl *ctrl)
@@ -655,7 +655,7 @@ static int mt9v022_s_ctrl(struct v4l2_ctrl *ctrl)
static int mt9v022_video_probe(struct i2c_client *client)
{
struct mt9v022 *mt9v022 = to_mt9v022(client);
- struct soc_camera_link *icl = soc_camera_i2c_to_link(client);
+ struct soc_camera_subdev_desc *ssdd = soc_camera_i2c_to_desc(client);
s32 data;
int ret;
unsigned long flags;
@@ -715,8 +715,8 @@ static int mt9v022_video_probe(struct i2c_client *client)
* The platform may support different bus widths due to
* different routing of the data lines.
*/
- if (icl->query_bus_param)
- flags = icl->query_bus_param(icl);
+ if (ssdd->query_bus_param)
+ flags = ssdd->query_bus_param(ssdd);
else
flags = SOCAM_DATAWIDTH_10;
@@ -784,7 +784,7 @@ static int mt9v022_g_mbus_config(struct v4l2_subdev *sd,
struct v4l2_mbus_config *cfg)
{
struct i2c_client *client = v4l2_get_subdevdata(sd);
- struct soc_camera_link *icl = soc_camera_i2c_to_link(client);
+ struct soc_camera_subdev_desc *ssdd = soc_camera_i2c_to_desc(client);
cfg->flags = V4L2_MBUS_MASTER | V4L2_MBUS_SLAVE |
V4L2_MBUS_PCLK_SAMPLE_RISING | V4L2_MBUS_PCLK_SAMPLE_FALLING |
@@ -792,7 +792,7 @@ static int mt9v022_g_mbus_config(struct v4l2_subdev *sd,
V4L2_MBUS_VSYNC_ACTIVE_HIGH | V4L2_MBUS_VSYNC_ACTIVE_LOW |
V4L2_MBUS_DATA_ACTIVE_HIGH;
cfg->type = V4L2_MBUS_PARALLEL;
- cfg->flags = soc_camera_apply_board_flags(icl, cfg);
+ cfg->flags = soc_camera_apply_board_flags(ssdd, cfg);
return 0;
}
@@ -801,15 +801,15 @@ static int mt9v022_s_mbus_config(struct v4l2_subdev *sd,
const struct v4l2_mbus_config *cfg)
{
struct i2c_client *client = v4l2_get_subdevdata(sd);
- struct soc_camera_link *icl = soc_camera_i2c_to_link(client);
+ struct soc_camera_subdev_desc *ssdd = soc_camera_i2c_to_desc(client);
struct mt9v022 *mt9v022 = to_mt9v022(client);
- unsigned long flags = soc_camera_apply_board_flags(icl, cfg);
+ unsigned long flags = soc_camera_apply_board_flags(ssdd, cfg);
unsigned int bps = soc_mbus_get_fmtdesc(mt9v022->fmt->code)->bits_per_sample;
int ret;
u16 pixclk = 0;
- if (icl->set_bus_param) {
- ret = icl->set_bus_param(icl, 1 << (bps - 1));
+ if (ssdd->set_bus_param) {
+ ret = ssdd->set_bus_param(ssdd, 1 << (bps - 1));
if (ret)
return ret;
} else if (bps != 10) {
@@ -873,12 +873,12 @@ static int mt9v022_probe(struct i2c_client *client,
const struct i2c_device_id *did)
{
struct mt9v022 *mt9v022;
- struct soc_camera_link *icl = soc_camera_i2c_to_link(client);
+ struct soc_camera_subdev_desc *ssdd = soc_camera_i2c_to_desc(client);
struct i2c_adapter *adapter = to_i2c_adapter(client->dev.parent);
- struct mt9v022_platform_data *pdata = icl->priv;
+ struct mt9v022_platform_data *pdata;
int ret;
- if (!icl) {
+ if (!ssdd) {
dev_err(&client->dev, "MT9V022 driver needs platform data\n");
return -EINVAL;
}
@@ -889,10 +889,11 @@ static int mt9v022_probe(struct i2c_client *client,
return -EIO;
}
- mt9v022 = kzalloc(sizeof(struct mt9v022), GFP_KERNEL);
+ mt9v022 = devm_kzalloc(&client->dev, sizeof(struct mt9v022), GFP_KERNEL);
if (!mt9v022)
return -ENOMEM;
+ pdata = ssdd->drv_priv;
v4l2_i2c_subdev_init(&mt9v022->subdev, client, &mt9v022_subdev_ops);
v4l2_ctrl_handler_init(&mt9v022->hdl, 6);
v4l2_ctrl_new_std(&mt9v022->hdl, &mt9v022_ctrl_ops,
@@ -929,7 +930,6 @@ static int mt9v022_probe(struct i2c_client *client,
int err = mt9v022->hdl.error;
dev_err(&client->dev, "control initialisation err %d\n", err);
- kfree(mt9v022);
return err;
}
v4l2_ctrl_auto_cluster(2, &mt9v022->autoexposure,
@@ -949,10 +949,8 @@ static int mt9v022_probe(struct i2c_client *client,
mt9v022->rect.height = MT9V022_MAX_HEIGHT;
ret = mt9v022_video_probe(client);
- if (ret) {
+ if (ret)
v4l2_ctrl_handler_free(&mt9v022->hdl);
- kfree(mt9v022);
- }
return ret;
}
@@ -960,13 +958,12 @@ static int mt9v022_probe(struct i2c_client *client,
static int mt9v022_remove(struct i2c_client *client)
{
struct mt9v022 *mt9v022 = to_mt9v022(client);
- struct soc_camera_link *icl = soc_camera_i2c_to_link(client);
+ struct soc_camera_subdev_desc *ssdd = soc_camera_i2c_to_desc(client);
v4l2_device_unregister_subdev(&mt9v022->subdev);
- if (icl->free_bus)
- icl->free_bus(icl);
+ if (ssdd->free_bus)
+ ssdd->free_bus(ssdd);
v4l2_ctrl_handler_free(&mt9v022->hdl);
- kfree(mt9v022);
return 0;
}
diff --git a/drivers/media/i2c/soc_camera/ov2640.c b/drivers/media/i2c/soc_camera/ov2640.c
index 66698a83bda2..0f520f693b6e 100644
--- a/drivers/media/i2c/soc_camera/ov2640.c
+++ b/drivers/media/i2c/soc_camera/ov2640.c
@@ -771,9 +771,9 @@ static int ov2640_s_register(struct v4l2_subdev *sd,
static int ov2640_s_power(struct v4l2_subdev *sd, int on)
{
struct i2c_client *client = v4l2_get_subdevdata(sd);
- struct soc_camera_link *icl = soc_camera_i2c_to_link(client);
+ struct soc_camera_subdev_desc *ssdd = soc_camera_i2c_to_desc(client);
- return soc_camera_set_power(&client->dev, icl, on);
+ return soc_camera_set_power(&client->dev, ssdd, on);
}
/* Select the nearest higher resolution for capture */
@@ -1046,13 +1046,13 @@ static int ov2640_g_mbus_config(struct v4l2_subdev *sd,
struct v4l2_mbus_config *cfg)
{
struct i2c_client *client = v4l2_get_subdevdata(sd);
- struct soc_camera_link *icl = soc_camera_i2c_to_link(client);
+ struct soc_camera_subdev_desc *ssdd = soc_camera_i2c_to_desc(client);
cfg->flags = V4L2_MBUS_PCLK_SAMPLE_RISING | V4L2_MBUS_MASTER |
V4L2_MBUS_VSYNC_ACTIVE_HIGH | V4L2_MBUS_HSYNC_ACTIVE_HIGH |
V4L2_MBUS_DATA_ACTIVE_HIGH;
cfg->type = V4L2_MBUS_PARALLEL;
- cfg->flags = soc_camera_apply_board_flags(icl, cfg);
+ cfg->flags = soc_camera_apply_board_flags(ssdd, cfg);
return 0;
}
@@ -1080,11 +1080,11 @@ static int ov2640_probe(struct i2c_client *client,
const struct i2c_device_id *did)
{
struct ov2640_priv *priv;
- struct soc_camera_link *icl = soc_camera_i2c_to_link(client);
+ struct soc_camera_subdev_desc *ssdd = soc_camera_i2c_to_desc(client);
struct i2c_adapter *adapter = to_i2c_adapter(client->dev.parent);
int ret;
- if (!icl) {
+ if (!ssdd) {
dev_err(&adapter->dev,
"OV2640: Missing platform_data for driver\n");
return -EINVAL;
@@ -1096,7 +1096,7 @@ static int ov2640_probe(struct i2c_client *client,
return -EIO;
}
- priv = kzalloc(sizeof(struct ov2640_priv), GFP_KERNEL);
+ priv = devm_kzalloc(&client->dev, sizeof(struct ov2640_priv), GFP_KERNEL);
if (!priv) {
dev_err(&adapter->dev,
"Failed to allocate memory for private data!\n");
@@ -1110,20 +1110,14 @@ static int ov2640_probe(struct i2c_client *client,
v4l2_ctrl_new_std(&priv->hdl, &ov2640_ctrl_ops,
V4L2_CID_HFLIP, 0, 1, 1, 0);
priv->subdev.ctrl_handler = &priv->hdl;
- if (priv->hdl.error) {
- int err = priv->hdl.error;
-
- kfree(priv);
- return err;
- }
+ if (priv->hdl.error)
+ return priv->hdl.error;
ret = ov2640_video_probe(client);
- if (ret) {
+ if (ret)
v4l2_ctrl_handler_free(&priv->hdl);
- kfree(priv);
- } else {
+ else
dev_info(&adapter->dev, "OV2640 Probed\n");
- }
return ret;
}
@@ -1134,7 +1128,6 @@ static int ov2640_remove(struct i2c_client *client)
v4l2_device_unregister_subdev(&priv->subdev);
v4l2_ctrl_handler_free(&priv->hdl);
- kfree(priv);
return 0;
}
diff --git a/drivers/media/i2c/soc_camera/ov5642.c b/drivers/media/i2c/soc_camera/ov5642.c
index 8577e0cfb7fe..9d53309619d2 100644
--- a/drivers/media/i2c/soc_camera/ov5642.c
+++ b/drivers/media/i2c/soc_camera/ov5642.c
@@ -934,13 +934,13 @@ static int ov5642_g_mbus_config(struct v4l2_subdev *sd,
static int ov5642_s_power(struct v4l2_subdev *sd, int on)
{
struct i2c_client *client = v4l2_get_subdevdata(sd);
- struct soc_camera_link *icl = soc_camera_i2c_to_link(client);
+ struct soc_camera_subdev_desc *ssdd = soc_camera_i2c_to_desc(client);
int ret;
if (!on)
- return soc_camera_power_off(&client->dev, icl);
+ return soc_camera_power_off(&client->dev, ssdd);
- ret = soc_camera_power_on(&client->dev, icl);
+ ret = soc_camera_power_on(&client->dev, ssdd);
if (ret < 0)
return ret;
@@ -1020,15 +1020,14 @@ static int ov5642_probe(struct i2c_client *client,
const struct i2c_device_id *did)
{
struct ov5642 *priv;
- struct soc_camera_link *icl = soc_camera_i2c_to_link(client);
- int ret;
+ struct soc_camera_subdev_desc *ssdd = soc_camera_i2c_to_desc(client);
- if (!icl) {
+ if (!ssdd) {
dev_err(&client->dev, "OV5642: missing platform data!\n");
return -EINVAL;
}
- priv = kzalloc(sizeof(struct ov5642), GFP_KERNEL);
+ priv = devm_kzalloc(&client->dev, sizeof(struct ov5642), GFP_KERNEL);
if (!priv)
return -ENOMEM;
@@ -1043,25 +1042,15 @@ static int ov5642_probe(struct i2c_client *client,
priv->total_width = OV5642_DEFAULT_WIDTH + BLANKING_EXTRA_WIDTH;
priv->total_height = BLANKING_MIN_HEIGHT;
- ret = ov5642_video_probe(client);
- if (ret < 0)
- goto error;
-
- return 0;
-
-error:
- kfree(priv);
- return ret;
+ return ov5642_video_probe(client);
}
static int ov5642_remove(struct i2c_client *client)
{
- struct ov5642 *priv = to_ov5642(client);
- struct soc_camera_link *icl = soc_camera_i2c_to_link(client);
+ struct soc_camera_subdev_desc *ssdd = soc_camera_i2c_to_desc(client);
- if (icl->free_bus)
- icl->free_bus(icl);
- kfree(priv);
+ if (ssdd->free_bus)
+ ssdd->free_bus(ssdd);
return 0;
}
diff --git a/drivers/media/i2c/soc_camera/ov6650.c b/drivers/media/i2c/soc_camera/ov6650.c
index e87feb0881e3..dbe4f564e6b2 100644
--- a/drivers/media/i2c/soc_camera/ov6650.c
+++ b/drivers/media/i2c/soc_camera/ov6650.c
@@ -435,9 +435,9 @@ static int ov6650_set_register(struct v4l2_subdev *sd,
static int ov6650_s_power(struct v4l2_subdev *sd, int on)
{
struct i2c_client *client = v4l2_get_subdevdata(sd);
- struct soc_camera_link *icl = soc_camera_i2c_to_link(client);
+ struct soc_camera_subdev_desc *ssdd = soc_camera_i2c_to_desc(client);
- return soc_camera_set_power(&client->dev, icl, on);
+ return soc_camera_set_power(&client->dev, ssdd, on);
}
static int ov6650_g_crop(struct v4l2_subdev *sd, struct v4l2_crop *a)
@@ -892,7 +892,7 @@ static int ov6650_g_mbus_config(struct v4l2_subdev *sd,
struct v4l2_mbus_config *cfg)
{
struct i2c_client *client = v4l2_get_subdevdata(sd);
- struct soc_camera_link *icl = soc_camera_i2c_to_link(client);
+ struct soc_camera_subdev_desc *ssdd = soc_camera_i2c_to_desc(client);
cfg->flags = V4L2_MBUS_MASTER |
V4L2_MBUS_PCLK_SAMPLE_RISING | V4L2_MBUS_PCLK_SAMPLE_FALLING |
@@ -900,7 +900,7 @@ static int ov6650_g_mbus_config(struct v4l2_subdev *sd,
V4L2_MBUS_VSYNC_ACTIVE_HIGH | V4L2_MBUS_VSYNC_ACTIVE_LOW |
V4L2_MBUS_DATA_ACTIVE_HIGH;
cfg->type = V4L2_MBUS_PARALLEL;
- cfg->flags = soc_camera_apply_board_flags(icl, cfg);
+ cfg->flags = soc_camera_apply_board_flags(ssdd, cfg);
return 0;
}
@@ -910,8 +910,8 @@ static int ov6650_s_mbus_config(struct v4l2_subdev *sd,
const struct v4l2_mbus_config *cfg)
{
struct i2c_client *client = v4l2_get_subdevdata(sd);
- struct soc_camera_link *icl = soc_camera_i2c_to_link(client);
- unsigned long flags = soc_camera_apply_board_flags(icl, cfg);
+ struct soc_camera_subdev_desc *ssdd = soc_camera_i2c_to_desc(client);
+ unsigned long flags = soc_camera_apply_board_flags(ssdd, cfg);
int ret;
if (flags & V4L2_MBUS_PCLK_SAMPLE_RISING)
@@ -963,15 +963,15 @@ static int ov6650_probe(struct i2c_client *client,
const struct i2c_device_id *did)
{
struct ov6650 *priv;
- struct soc_camera_link *icl = soc_camera_i2c_to_link(client);
+ struct soc_camera_subdev_desc *ssdd = soc_camera_i2c_to_desc(client);
int ret;
- if (!icl) {
+ if (!ssdd) {
dev_err(&client->dev, "Missing platform_data for driver\n");
return -EINVAL;
}
- priv = kzalloc(sizeof(*priv), GFP_KERNEL);
+ priv = devm_kzalloc(&client->dev, sizeof(*priv), GFP_KERNEL);
if (!priv) {
dev_err(&client->dev,
"Failed to allocate memory for private data!\n");
@@ -1009,12 +1009,9 @@ static int ov6650_probe(struct i2c_client *client,
V4L2_CID_GAMMA, 0, 0xff, 1, 0x12);
priv->subdev.ctrl_handler = &priv->hdl;
- if (priv->hdl.error) {
- int err = priv->hdl.error;
+ if (priv->hdl.error)
+ return priv->hdl.error;
- kfree(priv);
- return err;
- }
v4l2_ctrl_auto_cluster(2, &priv->autogain, 0, true);
v4l2_ctrl_auto_cluster(3, &priv->autowb, 0, true);
v4l2_ctrl_auto_cluster(2, &priv->autoexposure,
@@ -1029,10 +1026,8 @@ static int ov6650_probe(struct i2c_client *client,
priv->colorspace = V4L2_COLORSPACE_JPEG;
ret = ov6650_video_probe(client);
- if (ret) {
+ if (ret)
v4l2_ctrl_handler_free(&priv->hdl);
- kfree(priv);
- }
return ret;
}
@@ -1043,7 +1038,6 @@ static int ov6650_remove(struct i2c_client *client)
v4l2_device_unregister_subdev(&priv->subdev);
v4l2_ctrl_handler_free(&priv->hdl);
- kfree(priv);
return 0;
}
diff --git a/drivers/media/i2c/soc_camera/ov772x.c b/drivers/media/i2c/soc_camera/ov772x.c
index e4a10751894d..fbeb5b2f3ae5 100644
--- a/drivers/media/i2c/soc_camera/ov772x.c
+++ b/drivers/media/i2c/soc_camera/ov772x.c
@@ -667,9 +667,9 @@ static int ov772x_s_register(struct v4l2_subdev *sd,
static int ov772x_s_power(struct v4l2_subdev *sd, int on)
{
struct i2c_client *client = v4l2_get_subdevdata(sd);
- struct soc_camera_link *icl = soc_camera_i2c_to_link(client);
+ struct soc_camera_subdev_desc *ssdd = soc_camera_i2c_to_desc(client);
- return soc_camera_set_power(&client->dev, icl, on);
+ return soc_camera_set_power(&client->dev, ssdd, on);
}
static const struct ov772x_win_size *ov772x_select_win(u32 width, u32 height)
@@ -1019,13 +1019,13 @@ static int ov772x_g_mbus_config(struct v4l2_subdev *sd,
struct v4l2_mbus_config *cfg)
{
struct i2c_client *client = v4l2_get_subdevdata(sd);
- struct soc_camera_link *icl = soc_camera_i2c_to_link(client);
+ struct soc_camera_subdev_desc *ssdd = soc_camera_i2c_to_desc(client);
cfg->flags = V4L2_MBUS_PCLK_SAMPLE_RISING | V4L2_MBUS_MASTER |
V4L2_MBUS_VSYNC_ACTIVE_HIGH | V4L2_MBUS_HSYNC_ACTIVE_HIGH |
V4L2_MBUS_DATA_ACTIVE_HIGH;
cfg->type = V4L2_MBUS_PARALLEL;
- cfg->flags = soc_camera_apply_board_flags(icl, cfg);
+ cfg->flags = soc_camera_apply_board_flags(ssdd, cfg);
return 0;
}
@@ -1054,11 +1054,11 @@ static int ov772x_probe(struct i2c_client *client,
const struct i2c_device_id *did)
{
struct ov772x_priv *priv;
- struct soc_camera_link *icl = soc_camera_i2c_to_link(client);
+ struct soc_camera_subdev_desc *ssdd = soc_camera_i2c_to_desc(client);
struct i2c_adapter *adapter = to_i2c_adapter(client->dev.parent);
int ret;
- if (!icl || !icl->priv) {
+ if (!ssdd || !ssdd->drv_priv) {
dev_err(&client->dev, "OV772X: missing platform data!\n");
return -EINVAL;
}
@@ -1070,11 +1070,11 @@ static int ov772x_probe(struct i2c_client *client,
return -EIO;
}
- priv = kzalloc(sizeof(*priv), GFP_KERNEL);
+ priv = devm_kzalloc(&client->dev, sizeof(*priv), GFP_KERNEL);
if (!priv)
return -ENOMEM;
- priv->info = icl->priv;
+ priv->info = ssdd->drv_priv;
v4l2_i2c_subdev_init(&priv->subdev, client, &ov772x_subdev_ops);
v4l2_ctrl_handler_init(&priv->hdl, 3);
@@ -1085,22 +1085,15 @@ static int ov772x_probe(struct i2c_client *client,
v4l2_ctrl_new_std(&priv->hdl, &ov772x_ctrl_ops,
V4L2_CID_BAND_STOP_FILTER, 0, 256, 1, 0);
priv->subdev.ctrl_handler = &priv->hdl;
- if (priv->hdl.error) {
- ret = priv->hdl.error;
- goto done;
- }
+ if (priv->hdl.error)
+ return priv->hdl.error;
ret = ov772x_video_probe(priv);
- if (ret < 0)
- goto done;
-
- priv->cfmt = &ov772x_cfmts[0];
- priv->win = &ov772x_win_sizes[0];
-
-done:
- if (ret) {
+ if (ret < 0) {
v4l2_ctrl_handler_free(&priv->hdl);
- kfree(priv);
+ } else {
+ priv->cfmt = &ov772x_cfmts[0];
+ priv->win = &ov772x_win_sizes[0];
}
return ret;
}
@@ -1111,7 +1104,6 @@ static int ov772x_remove(struct i2c_client *client)
v4l2_device_unregister_subdev(&priv->subdev);
v4l2_ctrl_handler_free(&priv->hdl);
- kfree(priv);
return 0;
}
diff --git a/drivers/media/i2c/soc_camera/ov9640.c b/drivers/media/i2c/soc_camera/ov9640.c
index b323684eaf77..05993041be31 100644
--- a/drivers/media/i2c/soc_camera/ov9640.c
+++ b/drivers/media/i2c/soc_camera/ov9640.c
@@ -336,9 +336,9 @@ static int ov9640_set_register(struct v4l2_subdev *sd,
static int ov9640_s_power(struct v4l2_subdev *sd, int on)
{
struct i2c_client *client = v4l2_get_subdevdata(sd);
- struct soc_camera_link *icl = soc_camera_i2c_to_link(client);
+ struct soc_camera_subdev_desc *ssdd = soc_camera_i2c_to_desc(client);
- return soc_camera_set_power(&client->dev, icl, on);
+ return soc_camera_set_power(&client->dev, ssdd, on);
}
/* select nearest higher resolution for capture */
@@ -657,13 +657,13 @@ static int ov9640_g_mbus_config(struct v4l2_subdev *sd,
struct v4l2_mbus_config *cfg)
{
struct i2c_client *client = v4l2_get_subdevdata(sd);
- struct soc_camera_link *icl = soc_camera_i2c_to_link(client);
+ struct soc_camera_subdev_desc *ssdd = soc_camera_i2c_to_desc(client);
cfg->flags = V4L2_MBUS_PCLK_SAMPLE_RISING | V4L2_MBUS_MASTER |
V4L2_MBUS_VSYNC_ACTIVE_HIGH | V4L2_MBUS_HSYNC_ACTIVE_HIGH |
V4L2_MBUS_DATA_ACTIVE_HIGH;
cfg->type = V4L2_MBUS_PARALLEL;
- cfg->flags = soc_camera_apply_board_flags(icl, cfg);
+ cfg->flags = soc_camera_apply_board_flags(ssdd, cfg);
return 0;
}
@@ -690,15 +690,15 @@ static int ov9640_probe(struct i2c_client *client,
const struct i2c_device_id *did)
{
struct ov9640_priv *priv;
- struct soc_camera_link *icl = soc_camera_i2c_to_link(client);
+ struct soc_camera_subdev_desc *ssdd = soc_camera_i2c_to_desc(client);
int ret;
- if (!icl) {
+ if (!ssdd) {
dev_err(&client->dev, "Missing platform_data for driver\n");
return -EINVAL;
}
- priv = kzalloc(sizeof(struct ov9640_priv), GFP_KERNEL);
+ priv = devm_kzalloc(&client->dev, sizeof(struct ov9640_priv), GFP_KERNEL);
if (!priv) {
dev_err(&client->dev,
"Failed to allocate memory for private data!\n");
@@ -713,19 +713,13 @@ static int ov9640_probe(struct i2c_client *client,
v4l2_ctrl_new_std(&priv->hdl, &ov9640_ctrl_ops,
V4L2_CID_HFLIP, 0, 1, 1, 0);
priv->subdev.ctrl_handler = &priv->hdl;
- if (priv->hdl.error) {
- int err = priv->hdl.error;
-
- kfree(priv);
- return err;
- }
+ if (priv->hdl.error)
+ return priv->hdl.error;
ret = ov9640_video_probe(client);
- if (ret) {
+ if (ret)
v4l2_ctrl_handler_free(&priv->hdl);
- kfree(priv);
- }
return ret;
}
@@ -737,7 +731,6 @@ static int ov9640_remove(struct i2c_client *client)
v4l2_device_unregister_subdev(&priv->subdev);
v4l2_ctrl_handler_free(&priv->hdl);
- kfree(priv);
return 0;
}
diff --git a/drivers/media/i2c/soc_camera/ov9740.c b/drivers/media/i2c/soc_camera/ov9740.c
index 7a55889e397b..2f236da80165 100644
--- a/drivers/media/i2c/soc_camera/ov9740.c
+++ b/drivers/media/i2c/soc_camera/ov9740.c
@@ -787,12 +787,12 @@ static int ov9740_g_chip_ident(struct v4l2_subdev *sd,
static int ov9740_s_power(struct v4l2_subdev *sd, int on)
{
struct i2c_client *client = v4l2_get_subdevdata(sd);
- struct soc_camera_link *icl = soc_camera_i2c_to_link(client);
+ struct soc_camera_subdev_desc *ssdd = soc_camera_i2c_to_desc(client);
struct ov9740_priv *priv = to_ov9740(sd);
int ret;
if (on) {
- ret = soc_camera_power_on(&client->dev, icl);
+ ret = soc_camera_power_on(&client->dev, ssdd);
if (ret < 0)
return ret;
@@ -806,7 +806,7 @@ static int ov9740_s_power(struct v4l2_subdev *sd, int on)
priv->current_enable = true;
}
- soc_camera_power_off(&client->dev, icl);
+ soc_camera_power_off(&client->dev, ssdd);
}
return 0;
@@ -905,13 +905,13 @@ static int ov9740_g_mbus_config(struct v4l2_subdev *sd,
struct v4l2_mbus_config *cfg)
{
struct i2c_client *client = v4l2_get_subdevdata(sd);
- struct soc_camera_link *icl = soc_camera_i2c_to_link(client);
+ struct soc_camera_subdev_desc *ssdd = soc_camera_i2c_to_desc(client);
cfg->flags = V4L2_MBUS_PCLK_SAMPLE_RISING | V4L2_MBUS_MASTER |
V4L2_MBUS_VSYNC_ACTIVE_HIGH | V4L2_MBUS_HSYNC_ACTIVE_HIGH |
V4L2_MBUS_DATA_ACTIVE_HIGH;
cfg->type = V4L2_MBUS_PARALLEL;
- cfg->flags = soc_camera_apply_board_flags(icl, cfg);
+ cfg->flags = soc_camera_apply_board_flags(ssdd, cfg);
return 0;
}
@@ -951,15 +951,15 @@ static int ov9740_probe(struct i2c_client *client,
const struct i2c_device_id *did)
{
struct ov9740_priv *priv;
- struct soc_camera_link *icl = soc_camera_i2c_to_link(client);
+ struct soc_camera_subdev_desc *ssdd = soc_camera_i2c_to_desc(client);
int ret;
- if (!icl) {
+ if (!ssdd) {
dev_err(&client->dev, "Missing platform_data for driver\n");
return -EINVAL;
}
- priv = kzalloc(sizeof(struct ov9740_priv), GFP_KERNEL);
+ priv = devm_kzalloc(&client->dev, sizeof(struct ov9740_priv), GFP_KERNEL);
if (!priv) {
dev_err(&client->dev, "Failed to allocate private data!\n");
return -ENOMEM;
@@ -972,18 +972,12 @@ static int ov9740_probe(struct i2c_client *client,
v4l2_ctrl_new_std(&priv->hdl, &ov9740_ctrl_ops,
V4L2_CID_HFLIP, 0, 1, 1, 0);
priv->subdev.ctrl_handler = &priv->hdl;
- if (priv->hdl.error) {
- int err = priv->hdl.error;
-
- kfree(priv);
- return err;
- }
+ if (priv->hdl.error)
+ return priv->hdl.error;
ret = ov9740_video_probe(client);
- if (ret < 0) {
+ if (ret < 0)
v4l2_ctrl_handler_free(&priv->hdl);
- kfree(priv);
- }
return ret;
}
@@ -994,7 +988,6 @@ static int ov9740_remove(struct i2c_client *client)
v4l2_device_unregister_subdev(&priv->subdev);
v4l2_ctrl_handler_free(&priv->hdl);
- kfree(priv);
return 0;
}
diff --git a/drivers/media/i2c/soc_camera/rj54n1cb0c.c b/drivers/media/i2c/soc_camera/rj54n1cb0c.c
index 02f0400051d9..5c92679bfefb 100644
--- a/drivers/media/i2c/soc_camera/rj54n1cb0c.c
+++ b/drivers/media/i2c/soc_camera/rj54n1cb0c.c
@@ -1183,9 +1183,9 @@ static int rj54n1_s_register(struct v4l2_subdev *sd,
static int rj54n1_s_power(struct v4l2_subdev *sd, int on)
{
struct i2c_client *client = v4l2_get_subdevdata(sd);
- struct soc_camera_link *icl = soc_camera_i2c_to_link(client);
+ struct soc_camera_subdev_desc *ssdd = soc_camera_i2c_to_desc(client);
- return soc_camera_set_power(&client->dev, icl, on);
+ return soc_camera_set_power(&client->dev, ssdd, on);
}
static int rj54n1_s_ctrl(struct v4l2_ctrl *ctrl)
@@ -1245,14 +1245,14 @@ static int rj54n1_g_mbus_config(struct v4l2_subdev *sd,
struct v4l2_mbus_config *cfg)
{
struct i2c_client *client = v4l2_get_subdevdata(sd);
- struct soc_camera_link *icl = soc_camera_i2c_to_link(client);
+ struct soc_camera_subdev_desc *ssdd = soc_camera_i2c_to_desc(client);
cfg->flags =
V4L2_MBUS_PCLK_SAMPLE_RISING | V4L2_MBUS_PCLK_SAMPLE_FALLING |
V4L2_MBUS_MASTER | V4L2_MBUS_DATA_ACTIVE_HIGH |
V4L2_MBUS_HSYNC_ACTIVE_HIGH | V4L2_MBUS_VSYNC_ACTIVE_HIGH;
cfg->type = V4L2_MBUS_PARALLEL;
- cfg->flags = soc_camera_apply_board_flags(icl, cfg);
+ cfg->flags = soc_camera_apply_board_flags(ssdd, cfg);
return 0;
}
@@ -1261,10 +1261,10 @@ static int rj54n1_s_mbus_config(struct v4l2_subdev *sd,
const struct v4l2_mbus_config *cfg)
{
struct i2c_client *client = v4l2_get_subdevdata(sd);
- struct soc_camera_link *icl = soc_camera_i2c_to_link(client);
+ struct soc_camera_subdev_desc *ssdd = soc_camera_i2c_to_desc(client);
/* Figures 2.5-1 to 2.5-3 - default falling pixclk edge */
- if (soc_camera_apply_board_flags(icl, cfg) &
+ if (soc_camera_apply_board_flags(ssdd, cfg) &
V4L2_MBUS_PCLK_SAMPLE_RISING)
return reg_write(client, RJ54N1_OUT_SIGPO, 1 << 4);
else
@@ -1334,17 +1334,17 @@ static int rj54n1_probe(struct i2c_client *client,
const struct i2c_device_id *did)
{
struct rj54n1 *rj54n1;
- struct soc_camera_link *icl = soc_camera_i2c_to_link(client);
+ struct soc_camera_subdev_desc *ssdd = soc_camera_i2c_to_desc(client);
struct i2c_adapter *adapter = to_i2c_adapter(client->dev.parent);
struct rj54n1_pdata *rj54n1_priv;
int ret;
- if (!icl || !icl->priv) {
+ if (!ssdd || !ssdd->drv_priv) {
dev_err(&client->dev, "RJ54N1CB0C: missing platform data!\n");
return -EINVAL;
}
- rj54n1_priv = icl->priv;
+ rj54n1_priv = ssdd->drv_priv;
if (!i2c_check_functionality(adapter, I2C_FUNC_SMBUS_BYTE_DATA)) {
dev_warn(&adapter->dev,
@@ -1352,7 +1352,7 @@ static int rj54n1_probe(struct i2c_client *client,
return -EIO;
}
- rj54n1 = kzalloc(sizeof(struct rj54n1), GFP_KERNEL);
+ rj54n1 = devm_kzalloc(&client->dev, sizeof(struct rj54n1), GFP_KERNEL);
if (!rj54n1)
return -ENOMEM;
@@ -1367,12 +1367,8 @@ static int rj54n1_probe(struct i2c_client *client,
v4l2_ctrl_new_std(&rj54n1->hdl, &rj54n1_ctrl_ops,
V4L2_CID_AUTO_WHITE_BALANCE, 0, 1, 1, 1);
rj54n1->subdev.ctrl_handler = &rj54n1->hdl;
- if (rj54n1->hdl.error) {
- int err = rj54n1->hdl.error;
-
- kfree(rj54n1);
- return err;
- }
+ if (rj54n1->hdl.error)
+ return rj54n1->hdl.error;
rj54n1->clk_div = clk_div;
rj54n1->rect.left = RJ54N1_COLUMN_SKIP;
@@ -1387,10 +1383,8 @@ static int rj54n1_probe(struct i2c_client *client,
(clk_div.ratio_tg + 1) / (clk_div.ratio_t + 1);
ret = rj54n1_video_probe(client, rj54n1_priv);
- if (ret < 0) {
+ if (ret < 0)
v4l2_ctrl_handler_free(&rj54n1->hdl);
- kfree(rj54n1);
- }
return ret;
}
@@ -1398,13 +1392,12 @@ static int rj54n1_probe(struct i2c_client *client,
static int rj54n1_remove(struct i2c_client *client)
{
struct rj54n1 *rj54n1 = to_rj54n1(client);
- struct soc_camera_link *icl = soc_camera_i2c_to_link(client);
+ struct soc_camera_subdev_desc *ssdd = soc_camera_i2c_to_desc(client);
v4l2_device_unregister_subdev(&rj54n1->subdev);
- if (icl->free_bus)
- icl->free_bus(icl);
+ if (ssdd->free_bus)
+ ssdd->free_bus(ssdd);
v4l2_ctrl_handler_free(&rj54n1->hdl);
- kfree(rj54n1);
return 0;
}
diff --git a/drivers/media/i2c/soc_camera/tw9910.c b/drivers/media/i2c/soc_camera/tw9910.c
index 140716e71a15..7d2074601881 100644
--- a/drivers/media/i2c/soc_camera/tw9910.c
+++ b/drivers/media/i2c/soc_camera/tw9910.c
@@ -569,9 +569,9 @@ static int tw9910_s_register(struct v4l2_subdev *sd,
static int tw9910_s_power(struct v4l2_subdev *sd, int on)
{
struct i2c_client *client = v4l2_get_subdevdata(sd);
- struct soc_camera_link *icl = soc_camera_i2c_to_link(client);
+ struct soc_camera_subdev_desc *ssdd = soc_camera_i2c_to_desc(client);
- return soc_camera_set_power(&client->dev, icl, on);
+ return soc_camera_set_power(&client->dev, ssdd, on);
}
static int tw9910_set_frame(struct v4l2_subdev *sd, u32 *width, u32 *height)
@@ -847,14 +847,14 @@ static int tw9910_g_mbus_config(struct v4l2_subdev *sd,
struct v4l2_mbus_config *cfg)
{
struct i2c_client *client = v4l2_get_subdevdata(sd);
- struct soc_camera_link *icl = soc_camera_i2c_to_link(client);
+ struct soc_camera_subdev_desc *ssdd = soc_camera_i2c_to_desc(client);
cfg->flags = V4L2_MBUS_PCLK_SAMPLE_RISING | V4L2_MBUS_MASTER |
V4L2_MBUS_VSYNC_ACTIVE_HIGH | V4L2_MBUS_VSYNC_ACTIVE_LOW |
V4L2_MBUS_HSYNC_ACTIVE_HIGH | V4L2_MBUS_HSYNC_ACTIVE_LOW |
V4L2_MBUS_DATA_ACTIVE_HIGH;
cfg->type = V4L2_MBUS_PARALLEL;
- cfg->flags = soc_camera_apply_board_flags(icl, cfg);
+ cfg->flags = soc_camera_apply_board_flags(ssdd, cfg);
return 0;
}
@@ -863,9 +863,9 @@ static int tw9910_s_mbus_config(struct v4l2_subdev *sd,
const struct v4l2_mbus_config *cfg)
{
struct i2c_client *client = v4l2_get_subdevdata(sd);
- struct soc_camera_link *icl = soc_camera_i2c_to_link(client);
+ struct soc_camera_subdev_desc *ssdd = soc_camera_i2c_to_desc(client);
u8 val = VSSL_VVALID | HSSL_DVALID;
- unsigned long flags = soc_camera_apply_board_flags(icl, cfg);
+ unsigned long flags = soc_camera_apply_board_flags(ssdd, cfg);
/*
* set OUTCTR1
@@ -911,15 +911,14 @@ static int tw9910_probe(struct i2c_client *client,
struct tw9910_video_info *info;
struct i2c_adapter *adapter =
to_i2c_adapter(client->dev.parent);
- struct soc_camera_link *icl = soc_camera_i2c_to_link(client);
- int ret;
+ struct soc_camera_subdev_desc *ssdd = soc_camera_i2c_to_desc(client);
- if (!icl || !icl->priv) {
+ if (!ssdd || !ssdd->drv_priv) {
dev_err(&client->dev, "TW9910: missing platform data!\n");
return -EINVAL;
}
- info = icl->priv;
+ info = ssdd->drv_priv;
if (!i2c_check_functionality(adapter, I2C_FUNC_SMBUS_BYTE_DATA)) {
dev_err(&client->dev,
@@ -928,7 +927,7 @@ static int tw9910_probe(struct i2c_client *client,
return -EIO;
}
- priv = kzalloc(sizeof(*priv), GFP_KERNEL);
+ priv = devm_kzalloc(&client->dev, sizeof(*priv), GFP_KERNEL);
if (!priv)
return -ENOMEM;
@@ -936,18 +935,11 @@ static int tw9910_probe(struct i2c_client *client,
v4l2_i2c_subdev_init(&priv->subdev, client, &tw9910_subdev_ops);
- ret = tw9910_video_probe(client);
- if (ret)
- kfree(priv);
-
- return ret;
+ return tw9910_video_probe(client);
}
static int tw9910_remove(struct i2c_client *client)
{
- struct tw9910_priv *priv = to_tw9910(client);
-
- kfree(priv);
return 0;
}
diff --git a/drivers/media/i2c/ths7303.c b/drivers/media/i2c/ths7303.c
index c31cc04fffd2..e747524ba6ed 100644
--- a/drivers/media/i2c/ths7303.c
+++ b/drivers/media/i2c/ths7303.c
@@ -175,7 +175,7 @@ static int ths7303_probe(struct i2c_client *client,
v4l_info(client, "chip found @ 0x%x (%s)\n",
client->addr << 1, client->adapter->name);
- sd = kzalloc(sizeof(struct v4l2_subdev), GFP_KERNEL);
+ sd = devm_kzalloc(&client->dev, sizeof(struct v4l2_subdev), GFP_KERNEL);
if (sd == NULL)
return -ENOMEM;
@@ -189,7 +189,6 @@ static int ths7303_remove(struct i2c_client *client)
struct v4l2_subdev *sd = i2c_get_clientdata(client);
v4l2_device_unregister_subdev(sd);
- kfree(sd);
return 0;
}
diff --git a/drivers/media/i2c/tvaudio.c b/drivers/media/i2c/tvaudio.c
index 3b24d3fc1866..e3b33b78dd21 100644
--- a/drivers/media/i2c/tvaudio.c
+++ b/drivers/media/i2c/tvaudio.c
@@ -39,6 +39,7 @@
#include <media/tvaudio.h>
#include <media/v4l2-device.h>
#include <media/v4l2-chip-ident.h>
+#include <media/v4l2-ctrls.h>
#include <media/i2c-addr.h>
@@ -91,13 +92,13 @@ struct CHIPDESC {
audiocmd init;
/* which register has which value */
- int leftreg,rightreg,treblereg,bassreg;
+ int leftreg, rightreg, treblereg, bassreg;
- /* initialize with (defaults to 65535/65535/32768/32768 */
- int leftinit,rightinit,trebleinit,bassinit;
+ /* initialize with (defaults to 65535/32768/32768 */
+ int volinit, trebleinit, bassinit;
/* functions to convert the values (v4l -> chip) */
- getvalue volfunc,treblefunc,bassfunc;
+ getvalue volfunc, treblefunc, bassfunc;
/* get/set mode */
getrxsubchans getrxsubchans;
@@ -113,6 +114,12 @@ struct CHIPDESC {
/* current state of the chip */
struct CHIPSTATE {
struct v4l2_subdev sd;
+ struct v4l2_ctrl_handler hdl;
+ struct {
+ /* volume/balance cluster */
+ struct v4l2_ctrl *volume;
+ struct v4l2_ctrl *balance;
+ };
/* chip-specific description - should point to
an entry at CHIPDESC table */
@@ -122,7 +129,7 @@ struct CHIPSTATE {
audiocmd shadow;
/* current settings */
- __u16 left, right, treble, bass, muted;
+ u16 muted;
int prevmode;
int radio;
int input;
@@ -138,6 +145,11 @@ static inline struct CHIPSTATE *to_state(struct v4l2_subdev *sd)
return container_of(sd, struct CHIPSTATE, sd);
}
+static inline struct v4l2_subdev *to_sd(struct v4l2_ctrl *ctrl)
+{
+ return &container_of(ctrl->handler, struct CHIPSTATE, hdl)->sd;
+}
+
/* ---------------------------------------------------------------------- */
/* i2c I/O functions */
@@ -1523,8 +1535,7 @@ static struct CHIPDESC chiplist[] = {
.rightreg = TDA9875_MVR,
.bassreg = TDA9875_MBA,
.treblereg = TDA9875_MTR,
- .leftinit = 58880,
- .rightinit = 58880,
+ .volinit = 58880,
},
{
.name = "tda9850",
@@ -1618,7 +1629,8 @@ static struct CHIPDESC chiplist[] = {
.inputreg = -1,
.inputmap = { TEA6420_S_SA, TEA6420_S_SB, TEA6420_S_SC },
- .inputmute = TEA6300_S_GMU,
+ .inputmute = TEA6420_S_GMU,
+ .inputmask = 0x07,
},
{
.name = "tda8425",
@@ -1679,121 +1691,39 @@ static struct CHIPDESC chiplist[] = {
/* ---------------------------------------------------------------------- */
-static int tvaudio_g_ctrl(struct v4l2_subdev *sd,
- struct v4l2_control *ctrl)
+static int tvaudio_s_ctrl(struct v4l2_ctrl *ctrl)
{
+ struct v4l2_subdev *sd = to_sd(ctrl);
struct CHIPSTATE *chip = to_state(sd);
struct CHIPDESC *desc = chip->desc;
switch (ctrl->id) {
case V4L2_CID_AUDIO_MUTE:
- if (!(desc->flags & CHIP_HAS_INPUTSEL))
- break;
- ctrl->value=chip->muted;
- return 0;
- case V4L2_CID_AUDIO_VOLUME:
- if (!(desc->flags & CHIP_HAS_VOLUME))
- break;
- ctrl->value = max(chip->left,chip->right);
- return 0;
- case V4L2_CID_AUDIO_BALANCE:
- {
- int volume;
- if (!(desc->flags & CHIP_HAS_VOLUME))
- break;
- volume = max(chip->left,chip->right);
- if (volume)
- ctrl->value=(32768*min(chip->left,chip->right))/volume;
- else
- ctrl->value=32768;
- return 0;
- }
- case V4L2_CID_AUDIO_BASS:
- if (!(desc->flags & CHIP_HAS_BASSTREBLE))
- break;
- ctrl->value = chip->bass;
- return 0;
- case V4L2_CID_AUDIO_TREBLE:
- if (!(desc->flags & CHIP_HAS_BASSTREBLE))
- break;
- ctrl->value = chip->treble;
- return 0;
- }
- return -EINVAL;
-}
-
-static int tvaudio_s_ctrl(struct v4l2_subdev *sd,
- struct v4l2_control *ctrl)
-{
- struct CHIPSTATE *chip = to_state(sd);
- struct CHIPDESC *desc = chip->desc;
-
- switch (ctrl->id) {
- case V4L2_CID_AUDIO_MUTE:
- if (!(desc->flags & CHIP_HAS_INPUTSEL))
- break;
-
- if (ctrl->value < 0 || ctrl->value >= 2)
- return -ERANGE;
- chip->muted = ctrl->value;
+ chip->muted = ctrl->val;
if (chip->muted)
chip_write_masked(chip,desc->inputreg,desc->inputmute,desc->inputmask);
else
chip_write_masked(chip,desc->inputreg,
desc->inputmap[chip->input],desc->inputmask);
return 0;
- case V4L2_CID_AUDIO_VOLUME:
- {
- int volume,balance;
-
- if (!(desc->flags & CHIP_HAS_VOLUME))
- break;
-
- volume = max(chip->left,chip->right);
- if (volume)
- balance=(32768*min(chip->left,chip->right))/volume;
- else
- balance=32768;
-
- volume=ctrl->value;
- chip->left = (min(65536 - balance,32768) * volume) / 32768;
- chip->right = (min(balance,volume *(__u16)32768)) / 32768;
-
- chip_write(chip,desc->leftreg,desc->volfunc(chip->left));
- chip_write(chip,desc->rightreg,desc->volfunc(chip->right));
-
- return 0;
- }
- case V4L2_CID_AUDIO_BALANCE:
- {
- int volume, balance;
-
- if (!(desc->flags & CHIP_HAS_VOLUME))
- break;
-
- volume = max(chip->left, chip->right);
- balance = ctrl->value;
- chip->left = (min(65536 - balance, 32768) * volume) / 32768;
- chip->right = (min(balance, volume * (__u16)32768)) / 32768;
+ case V4L2_CID_AUDIO_VOLUME: {
+ u32 volume, balance;
+ u32 left, right;
- chip_write(chip, desc->leftreg, desc->volfunc(chip->left));
- chip_write(chip, desc->rightreg, desc->volfunc(chip->right));
+ volume = chip->volume->val;
+ balance = chip->balance->val;
+ left = (min(65536U - balance, 32768U) * volume) / 32768U;
+ right = (min(balance, 32768U) * volume) / 32768U;
+ chip_write(chip, desc->leftreg, desc->volfunc(left));
+ chip_write(chip, desc->rightreg, desc->volfunc(right));
return 0;
}
case V4L2_CID_AUDIO_BASS:
- if (!(desc->flags & CHIP_HAS_BASSTREBLE))
- break;
- chip->bass = ctrl->value;
- chip_write(chip,desc->bassreg,desc->bassfunc(chip->bass));
-
+ chip_write(chip, desc->bassreg, desc->bassfunc(ctrl->val));
return 0;
case V4L2_CID_AUDIO_TREBLE:
- if (!(desc->flags & CHIP_HAS_BASSTREBLE))
- break;
- chip->treble = ctrl->value;
- chip_write(chip,desc->treblereg,desc->treblefunc(chip->treble));
-
+ chip_write(chip, desc->treblereg, desc->treblefunc(ctrl->val));
return 0;
}
return -EINVAL;
@@ -1812,35 +1742,6 @@ static int tvaudio_s_radio(struct v4l2_subdev *sd)
return 0;
}
-static int tvaudio_queryctrl(struct v4l2_subdev *sd, struct v4l2_queryctrl *qc)
-{
- struct CHIPSTATE *chip = to_state(sd);
- struct CHIPDESC *desc = chip->desc;
-
- switch (qc->id) {
- case V4L2_CID_AUDIO_MUTE:
- if (desc->flags & CHIP_HAS_INPUTSEL)
- return v4l2_ctrl_query_fill(qc, 0, 1, 1, 0);
- break;
- case V4L2_CID_AUDIO_VOLUME:
- if (desc->flags & CHIP_HAS_VOLUME)
- return v4l2_ctrl_query_fill(qc, 0, 65535, 65535 / 100, 58880);
- break;
- case V4L2_CID_AUDIO_BALANCE:
- if (desc->flags & CHIP_HAS_VOLUME)
- return v4l2_ctrl_query_fill(qc, 0, 65535, 65535 / 100, 32768);
- break;
- case V4L2_CID_AUDIO_BASS:
- case V4L2_CID_AUDIO_TREBLE:
- if (desc->flags & CHIP_HAS_BASSTREBLE)
- return v4l2_ctrl_query_fill(qc, 0, 65535, 65535 / 100, 32768);
- break;
- default:
- break;
- }
- return -EINVAL;
-}
-
static int tvaudio_s_routing(struct v4l2_subdev *sd,
u32 input, u32 output, u32 config)
{
@@ -1944,13 +1845,32 @@ static int tvaudio_g_chip_ident(struct v4l2_subdev *sd, struct v4l2_dbg_chip_ide
return v4l2_chip_ident_i2c_client(client, chip, V4L2_IDENT_TVAUDIO, 0);
}
+static int tvaudio_log_status(struct v4l2_subdev *sd)
+{
+ struct CHIPSTATE *chip = to_state(sd);
+ struct CHIPDESC *desc = chip->desc;
+
+ v4l2_info(sd, "Chip: %s\n", desc->name);
+ v4l2_ctrl_handler_log_status(&chip->hdl, sd->name);
+ return 0;
+}
+
/* ----------------------------------------------------------------------- */
+static const struct v4l2_ctrl_ops tvaudio_ctrl_ops = {
+ .s_ctrl = tvaudio_s_ctrl,
+};
+
static const struct v4l2_subdev_core_ops tvaudio_core_ops = {
+ .log_status = tvaudio_log_status,
.g_chip_ident = tvaudio_g_chip_ident,
- .queryctrl = tvaudio_queryctrl,
- .g_ctrl = tvaudio_g_ctrl,
- .s_ctrl = tvaudio_s_ctrl,
+ .g_ext_ctrls = v4l2_subdev_g_ext_ctrls,
+ .try_ext_ctrls = v4l2_subdev_try_ext_ctrls,
+ .s_ext_ctrls = v4l2_subdev_s_ext_ctrls,
+ .g_ctrl = v4l2_subdev_g_ctrl,
+ .s_ctrl = v4l2_subdev_s_ctrl,
+ .queryctrl = v4l2_subdev_queryctrl,
+ .querymenu = v4l2_subdev_querymenu,
.s_std = tvaudio_s_std,
};
@@ -2035,6 +1955,10 @@ static int tvaudio_probe(struct i2c_client *client, const struct i2c_device_id *
else
chip_cmd(chip, "init", &desc->init);
+ v4l2_ctrl_handler_init(&chip->hdl, 5);
+ if (desc->flags & CHIP_HAS_INPUTSEL)
+ v4l2_ctrl_new_std(&chip->hdl, &tvaudio_ctrl_ops,
+ V4L2_CID_AUDIO_MUTE, 0, 1, 1, 0);
if (desc->flags & CHIP_HAS_VOLUME) {
if (!desc->volfunc) {
/* This shouldn't be happen. Warn user, but keep working
@@ -2043,12 +1967,14 @@ static int tvaudio_probe(struct i2c_client *client, const struct i2c_device_id *
v4l2_info(sd, "volume callback undefined!\n");
desc->flags &= ~CHIP_HAS_VOLUME;
} else {
- chip->left = desc->leftinit ? desc->leftinit : 65535;
- chip->right = desc->rightinit ? desc->rightinit : 65535;
- chip_write(chip, desc->leftreg,
- desc->volfunc(chip->left));
- chip_write(chip, desc->rightreg,
- desc->volfunc(chip->right));
+ chip->volume = v4l2_ctrl_new_std(&chip->hdl,
+ &tvaudio_ctrl_ops, V4L2_CID_AUDIO_VOLUME,
+ 0, 65535, 65535 / 100,
+ desc->volinit ? desc->volinit : 65535);
+ chip->balance = v4l2_ctrl_new_std(&chip->hdl,
+ &tvaudio_ctrl_ops, V4L2_CID_AUDIO_BALANCE,
+ 0, 65535, 65535 / 100, 32768);
+ v4l2_ctrl_cluster(2, &chip->volume);
}
}
if (desc->flags & CHIP_HAS_BASSTREBLE) {
@@ -2059,17 +1985,28 @@ static int tvaudio_probe(struct i2c_client *client, const struct i2c_device_id *
v4l2_info(sd, "bass/treble callbacks undefined!\n");
desc->flags &= ~CHIP_HAS_BASSTREBLE;
} else {
- chip->treble = desc->trebleinit ?
- desc->trebleinit : 32768;
- chip->bass = desc->bassinit ?
- desc->bassinit : 32768;
- chip_write(chip, desc->bassreg,
- desc->bassfunc(chip->bass));
- chip_write(chip, desc->treblereg,
- desc->treblefunc(chip->treble));
+ v4l2_ctrl_new_std(&chip->hdl,
+ &tvaudio_ctrl_ops, V4L2_CID_AUDIO_BASS,
+ 0, 65535, 65535 / 100,
+ desc->bassinit ? desc->bassinit : 32768);
+ v4l2_ctrl_new_std(&chip->hdl,
+ &tvaudio_ctrl_ops, V4L2_CID_AUDIO_TREBLE,
+ 0, 65535, 65535 / 100,
+ desc->trebleinit ? desc->trebleinit : 32768);
}
}
+ sd->ctrl_handler = &chip->hdl;
+ if (chip->hdl.error) {
+ int err = chip->hdl.error;
+
+ v4l2_ctrl_handler_free(&chip->hdl);
+ kfree(chip);
+ return err;
+ }
+ /* set controls to the default values */
+ v4l2_ctrl_handler_setup(&chip->hdl);
+
chip->thread = NULL;
init_timer(&chip->wt);
if (desc->flags & CHIP_NEED_CHECKMODE) {
@@ -2105,6 +2042,7 @@ static int tvaudio_remove(struct i2c_client *client)
}
v4l2_device_unregister_subdev(sd);
+ v4l2_ctrl_handler_free(&chip->hdl);
kfree(chip);
return 0;
}
diff --git a/drivers/media/i2c/tvp514x.c b/drivers/media/i2c/tvp514x.c
index d5e10215a28f..aa94ebc2d755 100644
--- a/drivers/media/i2c/tvp514x.c
+++ b/drivers/media/i2c/tvp514x.c
@@ -951,7 +951,7 @@ tvp514x_probe(struct i2c_client *client, const struct i2c_device_id *id)
return -ENODEV;
}
- decoder = kzalloc(sizeof(*decoder), GFP_KERNEL);
+ decoder = devm_kzalloc(&client->dev, sizeof(*decoder), GFP_KERNEL);
if (!decoder)
return -ENOMEM;
@@ -998,7 +998,6 @@ tvp514x_probe(struct i2c_client *client, const struct i2c_device_id *id)
int err = decoder->hdl.error;
v4l2_ctrl_handler_free(&decoder->hdl);
- kfree(decoder);
return err;
}
v4l2_ctrl_handler_setup(&decoder->hdl);
@@ -1023,7 +1022,6 @@ static int tvp514x_remove(struct i2c_client *client)
v4l2_device_unregister_subdev(sd);
v4l2_ctrl_handler_free(&decoder->hdl);
- kfree(decoder);
return 0;
}
/* TVP5146 Init/Power on Sequence */
diff --git a/drivers/media/i2c/tvp5150.c b/drivers/media/i2c/tvp5150.c
index 31104a960652..5967e1a0c809 100644
--- a/drivers/media/i2c/tvp5150.c
+++ b/drivers/media/i2c/tvp5150.c
@@ -1096,13 +1096,6 @@ static const struct v4l2_ctrl_ops tvp5150_ctrl_ops = {
static const struct v4l2_subdev_core_ops tvp5150_core_ops = {
.log_status = tvp5150_log_status,
- .g_ext_ctrls = v4l2_subdev_g_ext_ctrls,
- .try_ext_ctrls = v4l2_subdev_try_ext_ctrls,
- .s_ext_ctrls = v4l2_subdev_s_ext_ctrls,
- .g_ctrl = v4l2_subdev_g_ctrl,
- .s_ctrl = v4l2_subdev_s_ctrl,
- .queryctrl = v4l2_subdev_queryctrl,
- .querymenu = v4l2_subdev_querymenu,
.s_std = tvp5150_s_std,
.reset = tvp5150_reset,
.g_chip_ident = tvp5150_g_chip_ident,
diff --git a/drivers/media/i2c/tvp7002.c b/drivers/media/i2c/tvp7002.c
index fb6a5b57eb83..537f6b4d4918 100644
--- a/drivers/media/i2c/tvp7002.c
+++ b/drivers/media/i2c/tvp7002.c
@@ -1036,7 +1036,7 @@ static int tvp7002_probe(struct i2c_client *c, const struct i2c_device_id *id)
return -ENODEV;
}
- device = kzalloc(sizeof(struct tvp7002), GFP_KERNEL);
+ device = devm_kzalloc(&c->dev, sizeof(struct tvp7002), GFP_KERNEL);
if (!device)
return -ENOMEM;
@@ -1052,7 +1052,7 @@ static int tvp7002_probe(struct i2c_client *c, const struct i2c_device_id *id)
error = tvp7002_read(sd, TVP7002_CHIP_REV, &revision);
if (error < 0)
- goto found_error;
+ return error;
/* Get revision number */
v4l2_info(sd, "Rev. %02x detected.\n", revision);
@@ -1063,21 +1063,21 @@ static int tvp7002_probe(struct i2c_client *c, const struct i2c_device_id *id)
error = tvp7002_write_inittab(sd, tvp7002_init_default);
if (error < 0)
- goto found_error;
+ return error;
/* Set polarity information after registers have been set */
polarity_a = 0x20 | device->pdata->hs_polarity << 5
| device->pdata->vs_polarity << 2;
error = tvp7002_write(sd, TVP7002_SYNC_CTL_1, polarity_a);
if (error < 0)
- goto found_error;
+ return error;
polarity_b = 0x01 | device->pdata->fid_polarity << 2
| device->pdata->sog_polarity << 1
| device->pdata->clk_polarity;
error = tvp7002_write(sd, TVP7002_MISC_CTL_3, polarity_b);
if (error < 0)
- goto found_error;
+ return error;
/* Set registers according to default video mode */
preset.preset = device->current_preset->preset;
@@ -1091,16 +1091,11 @@ static int tvp7002_probe(struct i2c_client *c, const struct i2c_device_id *id)
int err = device->hdl.error;
v4l2_ctrl_handler_free(&device->hdl);
- kfree(device);
return err;
}
v4l2_ctrl_handler_setup(&device->hdl);
-found_error:
- if (error < 0)
- kfree(device);
-
- return error;
+ return 0;
}
/*
@@ -1120,7 +1115,6 @@ static int tvp7002_remove(struct i2c_client *c)
v4l2_device_unregister_subdev(sd);
v4l2_ctrl_handler_free(&device->hdl);
- kfree(device);
return 0;
}
diff --git a/drivers/media/parport/Kconfig b/drivers/media/parport/Kconfig
index ece13dcff07d..948c981d9f05 100644
--- a/drivers/media/parport/Kconfig
+++ b/drivers/media/parport/Kconfig
@@ -9,6 +9,7 @@ if MEDIA_PARPORT_SUPPORT
config VIDEO_BWQCAM
tristate "Quickcam BW Video For Linux"
depends on PARPORT && VIDEO_V4L2
+ select VIDEOBUF2_VMALLOC
help
Say Y have if you the black and white version of the QuickCam
camera. See the next option for the color version.
diff --git a/drivers/media/parport/bw-qcam.c b/drivers/media/parport/bw-qcam.c
index 5b75a64b199b..06231b85e1a9 100644
--- a/drivers/media/parport/bw-qcam.c
+++ b/drivers/media/parport/bw-qcam.c
@@ -80,6 +80,7 @@ OTHER DEALINGS IN THE SOFTWARE.
#include <media/v4l2-fh.h>
#include <media/v4l2-ctrls.h>
#include <media/v4l2-event.h>
+#include <media/videobuf2-vmalloc.h>
/* One from column A... */
#define QC_NOTSET 0
@@ -107,9 +108,11 @@ struct qcam {
struct v4l2_device v4l2_dev;
struct video_device vdev;
struct v4l2_ctrl_handler hdl;
+ struct vb2_queue vb_vidq;
struct pardevice *pdev;
struct parport *pport;
struct mutex lock;
+ struct mutex queue_lock;
int width, height;
int bpp;
int mode;
@@ -418,8 +421,6 @@ static void qc_set(struct qcam *q)
int val;
int val2;
- qc_reset(q);
-
/* Set the brightness. Yes, this is repetitive, but it works.
* Shorter versions seem to fail subtly. Feel free to try :-). */
/* I think the problem was in qc_command, not here -- bls */
@@ -558,7 +559,7 @@ static inline int qc_readbytes(struct qcam *q, char buffer[])
* n=2^(bit depth)-1. Ask me for more details if you don't understand
* this. */
-static long qc_capture(struct qcam *q, char __user *buf, unsigned long len)
+static long qc_capture(struct qcam *q, u8 *buf, unsigned long len)
{
int i, j, k, yield;
int bytes;
@@ -609,7 +610,7 @@ static long qc_capture(struct qcam *q, char __user *buf, unsigned long len)
if (o < len) {
u8 ch = invert - buffer[k];
got++;
- put_user(ch << shift, buf + o);
+ buf[o] = ch << shift;
}
}
pixels_read += bytes;
@@ -639,6 +640,67 @@ static long qc_capture(struct qcam *q, char __user *buf, unsigned long len)
return len;
}
+/* ------------------------------------------------------------------
+ Videobuf operations
+ ------------------------------------------------------------------*/
+static int queue_setup(struct vb2_queue *vq, const struct v4l2_format *fmt,
+ unsigned int *nbuffers, unsigned int *nplanes,
+ unsigned int sizes[], void *alloc_ctxs[])
+{
+ struct qcam *dev = vb2_get_drv_priv(vq);
+
+ if (0 == *nbuffers)
+ *nbuffers = 3;
+ *nplanes = 1;
+ mutex_lock(&dev->lock);
+ if (fmt)
+ sizes[0] = fmt->fmt.pix.width * fmt->fmt.pix.height;
+ else
+ sizes[0] = (dev->width / dev->transfer_scale) *
+ (dev->height / dev->transfer_scale);
+ mutex_unlock(&dev->lock);
+ return 0;
+}
+
+static void buffer_queue(struct vb2_buffer *vb)
+{
+ vb2_buffer_done(vb, VB2_BUF_STATE_DONE);
+}
+
+static int buffer_finish(struct vb2_buffer *vb)
+{
+ struct qcam *qcam = vb2_get_drv_priv(vb->vb2_queue);
+ void *vbuf = vb2_plane_vaddr(vb, 0);
+ int size = vb->vb2_queue->plane_sizes[0];
+ int len;
+
+ mutex_lock(&qcam->lock);
+ parport_claim_or_block(qcam->pdev);
+
+ qc_reset(qcam);
+
+ /* Update the camera parameters if we need to */
+ if (qcam->status & QC_PARAM_CHANGE)
+ qc_set(qcam);
+
+ len = qc_capture(qcam, vbuf, size);
+
+ parport_release(qcam->pdev);
+ mutex_unlock(&qcam->lock);
+ if (len != size)
+ vb->state = VB2_BUF_STATE_ERROR;
+ vb2_set_plane_payload(vb, 0, len);
+ return 0;
+}
+
+static struct vb2_ops qcam_video_qops = {
+ .queue_setup = queue_setup,
+ .buf_queue = buffer_queue,
+ .buf_finish = buffer_finish,
+ .wait_prepare = vb2_ops_wait_prepare,
+ .wait_finish = vb2_ops_wait_finish,
+};
+
/*
* Video4linux interfacing
*/
@@ -651,7 +713,8 @@ static int qcam_querycap(struct file *file, void *priv,
strlcpy(vcap->driver, qcam->v4l2_dev.name, sizeof(vcap->driver));
strlcpy(vcap->card, "Connectix B&W Quickcam", sizeof(vcap->card));
strlcpy(vcap->bus_info, qcam->pport->name, sizeof(vcap->bus_info));
- vcap->device_caps = V4L2_CAP_VIDEO_CAPTURE | V4L2_CAP_READWRITE;
+ vcap->device_caps = V4L2_CAP_VIDEO_CAPTURE | V4L2_CAP_READWRITE |
+ V4L2_CAP_STREAMING;
vcap->capabilities = vcap->device_caps | V4L2_CAP_DEVICE_CAPS;
return 0;
}
@@ -693,6 +756,7 @@ static int qcam_g_fmt_vid_cap(struct file *file, void *fh, struct v4l2_format *f
pix->sizeimage = pix->width * pix->height;
/* Just a guess */
pix->colorspace = V4L2_COLORSPACE_SRGB;
+ pix->priv = 0;
return 0;
}
@@ -718,6 +782,7 @@ static int qcam_try_fmt_vid_cap(struct file *file, void *fh, struct v4l2_format
pix->sizeimage = pix->width * pix->height;
/* Just a guess */
pix->colorspace = V4L2_COLORSPACE_SRGB;
+ pix->priv = 0;
return 0;
}
@@ -729,6 +794,8 @@ static int qcam_s_fmt_vid_cap(struct file *file, void *fh, struct v4l2_format *f
if (ret)
return ret;
+ if (vb2_is_busy(&qcam->vb_vidq))
+ return -EBUSY;
qcam->width = 320;
qcam->height = 240;
if (pix->height == 60)
@@ -742,12 +809,10 @@ static int qcam_s_fmt_vid_cap(struct file *file, void *fh, struct v4l2_format *f
else
qcam->bpp = 4;
- mutex_lock(&qcam->lock);
qc_setscanmode(qcam);
/* We must update the camera before we grab. We could
just have changed the grab size */
qcam->status |= QC_PARAM_CHANGE;
- mutex_unlock(&qcam->lock);
return 0;
}
@@ -792,41 +857,12 @@ static int qcam_enum_framesizes(struct file *file, void *fh,
return 0;
}
-static ssize_t qcam_read(struct file *file, char __user *buf,
- size_t count, loff_t *ppos)
-{
- struct qcam *qcam = video_drvdata(file);
- int len;
- parport_claim_or_block(qcam->pdev);
-
- mutex_lock(&qcam->lock);
-
- qc_reset(qcam);
-
- /* Update the camera parameters if we need to */
- if (qcam->status & QC_PARAM_CHANGE)
- qc_set(qcam);
-
- len = qc_capture(qcam, buf, count);
-
- mutex_unlock(&qcam->lock);
-
- parport_release(qcam->pdev);
- return len;
-}
-
-static unsigned int qcam_poll(struct file *filp, poll_table *wait)
-{
- return v4l2_ctrl_poll(filp, wait) | POLLIN | POLLRDNORM;
-}
-
static int qcam_s_ctrl(struct v4l2_ctrl *ctrl)
{
struct qcam *qcam =
container_of(ctrl->handler, struct qcam, hdl);
int ret = 0;
- mutex_lock(&qcam->lock);
switch (ctrl->id) {
case V4L2_CID_BRIGHTNESS:
qcam->brightness = ctrl->val;
@@ -841,21 +877,19 @@ static int qcam_s_ctrl(struct v4l2_ctrl *ctrl)
ret = -EINVAL;
break;
}
- if (ret == 0) {
- qc_setscanmode(qcam);
+ if (ret == 0)
qcam->status |= QC_PARAM_CHANGE;
- }
- mutex_unlock(&qcam->lock);
return ret;
}
static const struct v4l2_file_operations qcam_fops = {
.owner = THIS_MODULE,
.open = v4l2_fh_open,
- .release = v4l2_fh_release,
- .poll = qcam_poll,
+ .release = vb2_fop_release,
+ .poll = vb2_fop_poll,
.unlocked_ioctl = video_ioctl2,
- .read = qcam_read,
+ .read = vb2_fop_read,
+ .mmap = vb2_fop_mmap,
};
static const struct v4l2_ioctl_ops qcam_ioctl_ops = {
@@ -868,6 +902,14 @@ static const struct v4l2_ioctl_ops qcam_ioctl_ops = {
.vidioc_g_fmt_vid_cap = qcam_g_fmt_vid_cap,
.vidioc_s_fmt_vid_cap = qcam_s_fmt_vid_cap,
.vidioc_try_fmt_vid_cap = qcam_try_fmt_vid_cap,
+ .vidioc_reqbufs = vb2_ioctl_reqbufs,
+ .vidioc_create_bufs = vb2_ioctl_create_bufs,
+ .vidioc_prepare_buf = vb2_ioctl_prepare_buf,
+ .vidioc_querybuf = vb2_ioctl_querybuf,
+ .vidioc_qbuf = vb2_ioctl_qbuf,
+ .vidioc_dqbuf = vb2_ioctl_dqbuf,
+ .vidioc_streamon = vb2_ioctl_streamon,
+ .vidioc_streamoff = vb2_ioctl_streamoff,
.vidioc_log_status = v4l2_ctrl_log_status,
.vidioc_subscribe_event = v4l2_ctrl_subscribe_event,
.vidioc_unsubscribe_event = v4l2_event_unsubscribe,
@@ -884,6 +926,8 @@ static struct qcam *qcam_init(struct parport *port)
{
struct qcam *qcam;
struct v4l2_device *v4l2_dev;
+ struct vb2_queue *q;
+ int err;
qcam = kzalloc(sizeof(struct qcam), GFP_KERNEL);
if (qcam == NULL)
@@ -907,31 +951,45 @@ static struct qcam *qcam_init(struct parport *port)
V4L2_CID_GAMMA, 0, 255, 1, 105);
if (qcam->hdl.error) {
v4l2_err(v4l2_dev, "couldn't register controls\n");
- v4l2_ctrl_handler_free(&qcam->hdl);
- kfree(qcam);
- return NULL;
+ goto exit;
+ }
+
+ mutex_init(&qcam->lock);
+ mutex_init(&qcam->queue_lock);
+
+ /* initialize queue */
+ q = &qcam->vb_vidq;
+ q->type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
+ q->io_modes = VB2_MMAP | VB2_USERPTR | VB2_READ;
+ q->drv_priv = qcam;
+ q->ops = &qcam_video_qops;
+ q->mem_ops = &vb2_vmalloc_memops;
+ err = vb2_queue_init(q);
+ if (err < 0) {
+ v4l2_err(v4l2_dev, "couldn't init vb2_queue for %s.\n", port->name);
+ goto exit;
}
+ qcam->vdev.queue = q;
+ qcam->vdev.queue->lock = &qcam->queue_lock;
+
qcam->pport = port;
qcam->pdev = parport_register_device(port, v4l2_dev->name, NULL, NULL,
NULL, 0, NULL);
if (qcam->pdev == NULL) {
v4l2_err(v4l2_dev, "couldn't register for %s.\n", port->name);
- v4l2_ctrl_handler_free(&qcam->hdl);
- kfree(qcam);
- return NULL;
+ goto exit;
}
strlcpy(qcam->vdev.name, "Connectix QuickCam", sizeof(qcam->vdev.name));
qcam->vdev.v4l2_dev = v4l2_dev;
qcam->vdev.ctrl_handler = &qcam->hdl;
qcam->vdev.fops = &qcam_fops;
+ qcam->vdev.lock = &qcam->lock;
qcam->vdev.ioctl_ops = &qcam_ioctl_ops;
set_bit(V4L2_FL_USE_FH_PRIO, &qcam->vdev.flags);
qcam->vdev.release = video_device_release_empty;
video_set_drvdata(&qcam->vdev, qcam);
- mutex_init(&qcam->lock);
-
qcam->port_mode = (QC_ANY | QC_NOTSET);
qcam->width = 320;
qcam->height = 240;
@@ -945,6 +1003,11 @@ static struct qcam *qcam_init(struct parport *port)
qcam->mode = -1;
qcam->status = QC_PARAM_CHANGE;
return qcam;
+
+exit:
+ v4l2_ctrl_handler_free(&qcam->hdl);
+ kfree(qcam);
+ return NULL;
}
static int qc_calibrate(struct qcam *q)
diff --git a/drivers/media/pci/bt8xx/Makefile b/drivers/media/pci/bt8xx/Makefile
index 5f06597c6a6e..f9fe7c4e7d53 100644
--- a/drivers/media/pci/bt8xx/Makefile
+++ b/drivers/media/pci/bt8xx/Makefile
@@ -8,4 +8,5 @@ obj-$(CONFIG_DVB_BT8XX) += bt878.o dvb-bt8xx.o dst.o dst_ca.o
ccflags-y += -Idrivers/media/dvb-core
ccflags-y += -Idrivers/media/dvb-frontends
ccflags-y += -Idrivers/media/i2c
+ccflags-y += -Idrivers/media/common
ccflags-y += -Idrivers/media/tuners
diff --git a/drivers/media/pci/bt8xx/bttv-driver.c b/drivers/media/pci/bt8xx/bttv-driver.c
index 45e5d0661b60..ccd18e4ee789 100644
--- a/drivers/media/pci/bt8xx/bttv-driver.c
+++ b/drivers/media/pci/bt8xx/bttv-driver.c
@@ -3835,7 +3835,7 @@ bttv_irq_wakeup_video(struct bttv *btv, struct bttv_buffer_set *wakeup,
{
struct timeval ts;
- do_gettimeofday(&ts);
+ v4l2_get_timestamp(&ts);
if (wakeup->top == wakeup->bottom) {
if (NULL != wakeup->top && curr->top != wakeup->top) {
@@ -3878,7 +3878,7 @@ bttv_irq_wakeup_vbi(struct bttv *btv, struct bttv_buffer *wakeup,
if (NULL == wakeup)
return;
- do_gettimeofday(&ts);
+ v4l2_get_timestamp(&ts);
wakeup->vb.ts = ts;
wakeup->vb.field_count = btv->field_count;
wakeup->vb.state = state;
@@ -3949,7 +3949,7 @@ bttv_irq_wakeup_top(struct bttv *btv)
btv->curr.top = NULL;
bttv_risc_hook(btv, RISC_SLOT_O_FIELD, NULL, 0);
- do_gettimeofday(&wakeup->vb.ts);
+ v4l2_get_timestamp(&wakeup->vb.ts);
wakeup->vb.field_count = btv->field_count;
wakeup->vb.state = VIDEOBUF_DONE;
wake_up(&wakeup->vb.done);
diff --git a/drivers/media/pci/bt8xx/bttv-i2c.c b/drivers/media/pci/bt8xx/bttv-i2c.c
index 5039b8826e0a..c63c643ed1f8 100644
--- a/drivers/media/pci/bt8xx/bttv-i2c.c
+++ b/drivers/media/pci/bt8xx/bttv-i2c.c
@@ -173,7 +173,7 @@ bttv_i2c_sendbytes(struct bttv *btv, const struct i2c_msg *msg, int last)
if (i2c_debug)
pr_cont(" %02x", msg->buf[cnt]);
}
- if (!(xmit & BT878_I2C_NOSTOP))
+ if (i2c_debug && !(xmit & BT878_I2C_NOSTOP))
pr_cont(">\n");
return msg->len;
@@ -366,8 +366,7 @@ int init_bttv_i2c(struct bttv *btv)
strlcpy(btv->c.i2c_adap.name, "bttv",
sizeof(btv->c.i2c_adap.name));
- memcpy(&btv->i2c_algo, &bttv_i2c_algo_bit_template,
- sizeof(bttv_i2c_algo_bit_template));
+ btv->i2c_algo = bttv_i2c_algo_bit_template;
btv->i2c_algo.udelay = i2c_udelay;
btv->i2c_algo.data = btv;
btv->c.i2c_adap.algo_data = &btv->i2c_algo;
diff --git a/drivers/media/pci/bt8xx/dst_ca.c b/drivers/media/pci/bt8xx/dst_ca.c
index 7d96fab7d246..0e788fca992c 100644
--- a/drivers/media/pci/bt8xx/dst_ca.c
+++ b/drivers/media/pci/bt8xx/dst_ca.c
@@ -180,11 +180,11 @@ static int ca_get_app_info(struct dst_state *state)
put_command_and_length(&state->messages[0], CA_APP_INFO, length);
// Copy application_type, application_manufacturer and manufacturer_code
- memcpy(&state->messages[4], &state->messages[7], 5);
+ memmove(&state->messages[4], &state->messages[7], 5);
// Set string length and copy string
state->messages[9] = str_length;
- memcpy(&state->messages[10], &state->messages[12], str_length);
+ memmove(&state->messages[10], &state->messages[12], str_length);
return 0;
}
diff --git a/drivers/media/pci/cx18/cx18-alsa-main.c b/drivers/media/pci/cx18/cx18-alsa-main.c
index 8e971ff60588..b2c8c3439fea 100644
--- a/drivers/media/pci/cx18/cx18-alsa-main.c
+++ b/drivers/media/pci/cx18/cx18-alsa-main.c
@@ -197,7 +197,7 @@ err_exit:
return ret;
}
-static int __init cx18_alsa_load(struct cx18 *cx)
+static int cx18_alsa_load(struct cx18 *cx)
{
struct v4l2_device *v4l2_dev = &cx->v4l2_dev;
struct cx18_stream *s;
diff --git a/drivers/media/pci/cx18/cx18-alsa-pcm.h b/drivers/media/pci/cx18/cx18-alsa-pcm.h
index d26e51f94577..e2b2c5b01215 100644
--- a/drivers/media/pci/cx18/cx18-alsa-pcm.h
+++ b/drivers/media/pci/cx18/cx18-alsa-pcm.h
@@ -20,7 +20,7 @@
* 02111-1307 USA
*/
-int __init snd_cx18_pcm_create(struct snd_cx18_card *cxsc);
+int snd_cx18_pcm_create(struct snd_cx18_card *cxsc);
/* Used by cx18-mailbox to announce the PCM data to the module */
void cx18_alsa_announce_pcm_data(struct snd_cx18_card *card, u8 *pcm_data,
diff --git a/drivers/media/pci/cx18/cx18-i2c.c b/drivers/media/pci/cx18/cx18-i2c.c
index 4908eb7bcf6c..4af8cd6df95d 100644
--- a/drivers/media/pci/cx18/cx18-i2c.c
+++ b/drivers/media/pci/cx18/cx18-i2c.c
@@ -116,9 +116,6 @@ int cx18_i2c_register(struct cx18 *cx, unsigned idx)
const char *type = hw_devicenames[idx];
u32 hw = 1 << idx;
- if (idx >= ARRAY_SIZE(hw_addrs))
- return -1;
-
if (hw == CX18_HW_TUNER) {
/* special tuner group handling */
sd = v4l2_i2c_new_subdev(&cx->v4l2_dev,
@@ -240,15 +237,13 @@ int init_cx18_i2c(struct cx18 *cx)
for (i = 0; i < 2; i++) {
/* Setup algorithm for adapter */
- memcpy(&cx->i2c_algo[i], &cx18_i2c_algo_template,
- sizeof(struct i2c_algo_bit_data));
+ cx->i2c_algo[i] = cx18_i2c_algo_template;
cx->i2c_algo_cb_data[i].cx = cx;
cx->i2c_algo_cb_data[i].bus_index = i;
cx->i2c_algo[i].data = &cx->i2c_algo_cb_data[i];
/* Setup adapter */
- memcpy(&cx->i2c_adap[i], &cx18_i2c_adap_template,
- sizeof(struct i2c_adapter));
+ cx->i2c_adap[i] = cx18_i2c_adap_template;
cx->i2c_adap[i].algo_data = &cx->i2c_algo[i];
sprintf(cx->i2c_adap[i].name + strlen(cx->i2c_adap[i].name),
" #%d-%d", cx->instance, i);
diff --git a/drivers/media/pci/cx18/cx18-vbi.c b/drivers/media/pci/cx18/cx18-vbi.c
index 6d3121ff45a2..add99642f1e2 100644
--- a/drivers/media/pci/cx18/cx18-vbi.c
+++ b/drivers/media/pci/cx18/cx18-vbi.c
@@ -84,7 +84,7 @@ static void copy_vbi_data(struct cx18 *cx, int lines, u32 pts_stamp)
(the max size of the VBI data is 36 * 43 + 4 bytes).
So in this case we use the magic number 'ITV0'. */
memcpy(dst + sd, "ITV0", 4);
- memcpy(dst + sd + 4, dst + sd + 12, line * 43);
+ memmove(dst + sd + 4, dst + sd + 12, line * 43);
size = 4 + ((43 * line + 3) & ~3);
} else {
memcpy(dst + sd, "itv0", 4);
diff --git a/drivers/media/pci/cx23885/Kconfig b/drivers/media/pci/cx23885/Kconfig
index eafa1144b17d..b3688aa8acc3 100644
--- a/drivers/media/pci/cx23885/Kconfig
+++ b/drivers/media/pci/cx23885/Kconfig
@@ -25,7 +25,10 @@ config VIDEO_CX23885
select DVB_CX24116 if MEDIA_SUBDRV_AUTOSELECT
select DVB_STV0900 if MEDIA_SUBDRV_AUTOSELECT
select DVB_DS3000 if MEDIA_SUBDRV_AUTOSELECT
+ select DVB_TS2020 if MEDIA_SUBDRV_AUTOSELECT
select DVB_STV0367 if MEDIA_SUBDRV_AUTOSELECT
+ select DVB_TDA10071 if MEDIA_SUBDRV_AUTOSELECT
+ select DVB_A8293 if MEDIA_SUBDRV_AUTOSELECT
select MEDIA_TUNER_MT2063 if MEDIA_SUBDRV_AUTOSELECT
select MEDIA_TUNER_MT2131 if MEDIA_SUBDRV_AUTOSELECT
select MEDIA_TUNER_XC2028 if MEDIA_SUBDRV_AUTOSELECT
diff --git a/drivers/media/pci/cx23885/Makefile b/drivers/media/pci/cx23885/Makefile
index a2cbdcf15a8c..2a2cafb8cf5b 100644
--- a/drivers/media/pci/cx23885/Makefile
+++ b/drivers/media/pci/cx23885/Makefile
@@ -8,6 +8,7 @@ obj-$(CONFIG_VIDEO_CX23885) += cx23885.o
obj-$(CONFIG_MEDIA_ALTERA_CI) += altera-ci.o
ccflags-y += -Idrivers/media/i2c
+ccflags-y += -Idrivers/media/common
ccflags-y += -Idrivers/media/tuners
ccflags-y += -Idrivers/media/dvb-core
ccflags-y += -Idrivers/media/dvb-frontends
diff --git a/drivers/media/pci/cx23885/cx23885-cards.c b/drivers/media/pci/cx23885/cx23885-cards.c
index 6277e145f0b8..7e923f8dd2f5 100644
--- a/drivers/media/pci/cx23885/cx23885-cards.c
+++ b/drivers/media/pci/cx23885/cx23885-cards.c
@@ -572,6 +572,39 @@ struct cx23885_board cx23885_boards[] = {
[CX23885_BOARD_PROF_8000] = {
.name = "Prof Revolution DVB-S2 8000",
.portb = CX23885_MPEG_DVB,
+ },
+ [CX23885_BOARD_HAUPPAUGE_HVR4400] = {
+ .name = "Hauppauge WinTV-HVR4400",
+ .portb = CX23885_MPEG_DVB,
+ },
+ [CX23885_BOARD_AVERMEDIA_HC81R] = {
+ .name = "AVerTV Hybrid Express Slim HC81R",
+ .tuner_type = TUNER_XC2028,
+ .tuner_addr = 0x61, /* 0xc2 >> 1 */
+ .tuner_bus = 1,
+ .porta = CX23885_ANALOG_VIDEO,
+ .input = {{
+ .type = CX23885_VMUX_TELEVISION,
+ .vmux = CX25840_VIN2_CH1 |
+ CX25840_VIN5_CH2 |
+ CX25840_NONE0_CH3 |
+ CX25840_NONE1_CH3,
+ .amux = CX25840_AUDIO8,
+ }, {
+ .type = CX23885_VMUX_SVIDEO,
+ .vmux = CX25840_VIN8_CH1 |
+ CX25840_NONE_CH2 |
+ CX25840_VIN7_CH3 |
+ CX25840_SVIDEO_ON,
+ .amux = CX25840_AUDIO6,
+ }, {
+ .type = CX23885_VMUX_COMPONENT,
+ .vmux = CX25840_VIN1_CH1 |
+ CX25840_NONE_CH2 |
+ CX25840_NONE0_CH3 |
+ CX25840_NONE1_CH3,
+ .amux = CX25840_AUDIO6,
+ } },
}
};
const unsigned int cx23885_bcount = ARRAY_SIZE(cx23885_boards);
@@ -788,6 +821,26 @@ struct cx23885_subid cx23885_subids[] = {
.subvendor = 0x8000,
.subdevice = 0x3034,
.card = CX23885_BOARD_PROF_8000,
+ }, {
+ .subvendor = 0x0070,
+ .subdevice = 0xc108,
+ .card = CX23885_BOARD_HAUPPAUGE_HVR4400,
+ }, {
+ .subvendor = 0x0070,
+ .subdevice = 0xc138,
+ .card = CX23885_BOARD_HAUPPAUGE_HVR4400,
+ }, {
+ .subvendor = 0x0070,
+ .subdevice = 0xc12a,
+ .card = CX23885_BOARD_HAUPPAUGE_HVR4400,
+ }, {
+ .subvendor = 0x0070,
+ .subdevice = 0xc1f8,
+ .card = CX23885_BOARD_HAUPPAUGE_HVR4400,
+ }, {
+ .subvendor = 0x1461,
+ .subdevice = 0xd939,
+ .card = CX23885_BOARD_AVERMEDIA_HC81R,
},
};
const unsigned int cx23885_idcount = ARRAY_SIZE(cx23885_subids);
@@ -1012,6 +1065,10 @@ int cx23885_tuner_callback(void *priv, int component, int command, int arg)
case CX23885_BOARD_NETUP_DUAL_DVB_T_C_CI_RF:
altera_ci_tuner_reset(dev, port->nr);
break;
+ case CX23885_BOARD_AVERMEDIA_HC81R:
+ /* XC3028L Reset Command */
+ bitmask = 1 << 2;
+ break;
}
if (bitmask) {
@@ -1301,6 +1358,42 @@ void cx23885_gpio_setup(struct cx23885_dev *dev)
/* enable irq */
cx_write(GPIO_ISM, 0x00000000);/* INTERRUPTS active low*/
break;
+ case CX23885_BOARD_HAUPPAUGE_HVR4400:
+ /* GPIO-8 tda10071 demod reset */
+
+ /* Put the parts into reset and back */
+ cx23885_gpio_enable(dev, GPIO_8, 1);
+ cx23885_gpio_clear(dev, GPIO_8);
+ mdelay(100);
+ cx23885_gpio_set(dev, GPIO_8);
+ mdelay(100);
+ break;
+ case CX23885_BOARD_AVERMEDIA_HC81R:
+ cx_clear(MC417_CTL, 1);
+ /* GPIO-0,1,2 setup direction as output */
+ cx_set(GP0_IO, 0x00070000);
+ mdelay(10);
+ /* AF9013 demod reset */
+ cx_set(GP0_IO, 0x00010001);
+ mdelay(10);
+ cx_clear(GP0_IO, 0x00010001);
+ mdelay(10);
+ cx_set(GP0_IO, 0x00010001);
+ mdelay(10);
+ /* demod tune? */
+ cx_clear(GP0_IO, 0x00030003);
+ mdelay(10);
+ cx_set(GP0_IO, 0x00020002);
+ mdelay(10);
+ cx_set(GP0_IO, 0x00010001);
+ mdelay(10);
+ cx_clear(GP0_IO, 0x00020002);
+ /* XC3028L tuner reset */
+ cx_set(GP0_IO, 0x00040004);
+ cx_clear(GP0_IO, 0x00040004);
+ cx_set(GP0_IO, 0x00040004);
+ mdelay(60);
+ break;
}
}
@@ -1378,6 +1471,7 @@ int cx23885_ir_init(struct cx23885_dev *dev)
break;
case CX23885_BOARD_TERRATEC_CINERGY_T_PCIE_DUAL:
case CX23885_BOARD_TEVII_S470:
+ case CX23885_BOARD_MYGICA_X8507:
if (!enable_885_ir)
break;
dev->sd_ir = cx23885_find_hw(dev, CX23885_HW_AV_CORE);
@@ -1420,6 +1514,7 @@ void cx23885_ir_fini(struct cx23885_dev *dev)
case CX23885_BOARD_TERRATEC_CINERGY_T_PCIE_DUAL:
case CX23885_BOARD_TEVII_S470:
case CX23885_BOARD_HAUPPAUGE_HVR1250:
+ case CX23885_BOARD_MYGICA_X8507:
cx23885_irq_remove(dev, PCI_MSK_AV_CORE);
/* sd_ir is a duplicate pointer to the AV Core, just clear it */
dev->sd_ir = NULL;
@@ -1464,6 +1559,7 @@ void cx23885_ir_pci_int_enable(struct cx23885_dev *dev)
case CX23885_BOARD_TERRATEC_CINERGY_T_PCIE_DUAL:
case CX23885_BOARD_TEVII_S470:
case CX23885_BOARD_HAUPPAUGE_HVR1250:
+ case CX23885_BOARD_MYGICA_X8507:
if (dev->sd_ir)
cx23885_irq_add_enable(dev, PCI_MSK_AV_CORE);
break;
@@ -1509,12 +1605,24 @@ void cx23885_card_setup(struct cx23885_dev *dev)
case CX23885_BOARD_HAUPPAUGE_HVR1210:
case CX23885_BOARD_HAUPPAUGE_HVR1850:
case CX23885_BOARD_HAUPPAUGE_HVR1290:
+ case CX23885_BOARD_HAUPPAUGE_HVR4400:
if (dev->i2c_bus[0].i2c_rc == 0)
hauppauge_eeprom(dev, eeprom+0xc0);
break;
}
switch (dev->board) {
+ case CX23885_BOARD_AVERMEDIA_HC81R:
+ /* Defaults for VID B */
+ ts1->gen_ctrl_val = 0x4; /* Parallel */
+ ts1->ts_clk_en_val = 0x1; /* Enable TS_CLK */
+ ts1->src_sel_val = CX23885_SRC_SEL_PARALLEL_MPEG_VIDEO;
+ /* Defaults for VID C */
+ /* DREQ_POL, SMODE, PUNC_CLK, MCLK_POL Serial bus + punc clk */
+ ts2->gen_ctrl_val = 0x10e;
+ ts2->ts_clk_en_val = 0x1; /* Enable TS_CLK */
+ ts2->src_sel_val = CX23885_SRC_SEL_PARALLEL_MPEG_VIDEO;
+ break;
case CX23885_BOARD_DVICO_FUSIONHDTV_7_DUAL_EXP:
case CX23885_BOARD_DVICO_FUSIONHDTV_DVB_T_DUAL_EXP:
ts2->gen_ctrl_val = 0xc; /* Serial bus + punctured clock */
@@ -1581,6 +1689,11 @@ void cx23885_card_setup(struct cx23885_dev *dev)
ts2->ts_clk_en_val = 0x1; /* Enable TS_CLK */
ts2->src_sel_val = CX23885_SRC_SEL_PARALLEL_MPEG_VIDEO;
break;
+ case CX23885_BOARD_HAUPPAUGE_HVR4400:
+ ts1->gen_ctrl_val = 0xc; /* Serial bus + punctured clock */
+ ts1->ts_clk_en_val = 0x1; /* Enable TS_CLK */
+ ts1->src_sel_val = CX23885_SRC_SEL_PARALLEL_MPEG_VIDEO;
+ break;
case CX23885_BOARD_HAUPPAUGE_HVR1250:
case CX23885_BOARD_HAUPPAUGE_HVR1500:
case CX23885_BOARD_HAUPPAUGE_HVR1500Q:
@@ -1636,6 +1749,7 @@ void cx23885_card_setup(struct cx23885_dev *dev)
case CX23885_BOARD_MPX885:
case CX23885_BOARD_MYGICA_X8507:
case CX23885_BOARD_TERRATEC_CINERGY_T_PCIE_DUAL:
+ case CX23885_BOARD_AVERMEDIA_HC81R:
dev->sd_cx25840 = v4l2_i2c_new_subdev(&dev->v4l2_dev,
&dev->i2c_bus[2].i2c_adap,
"cx25840", 0x88 >> 1, NULL);
diff --git a/drivers/media/pci/cx23885/cx23885-core.c b/drivers/media/pci/cx23885/cx23885-core.c
index f0416a668b4c..268654ac9a9f 100644
--- a/drivers/media/pci/cx23885/cx23885-core.c
+++ b/drivers/media/pci/cx23885/cx23885-core.c
@@ -439,7 +439,7 @@ void cx23885_wakeup(struct cx23885_tsport *port,
if ((s16) (count - buf->count) < 0)
break;
- do_gettimeofday(&buf->vb.ts);
+ v4l2_get_timestamp(&buf->vb.ts);
dprintk(2, "[%p/%d] wakeup reg=%d buf=%d\n", buf, buf->vb.i,
count, buf->count);
buf->vb.state = VIDEOBUF_DONE;
diff --git a/drivers/media/pci/cx23885/cx23885-dvb.c b/drivers/media/pci/cx23885/cx23885-dvb.c
index 2f5b902e63ae..9c5ed10b2c5e 100644
--- a/drivers/media/pci/cx23885/cx23885-dvb.c
+++ b/drivers/media/pci/cx23885/cx23885-dvb.c
@@ -57,6 +57,7 @@
#include "netup-init.h"
#include "lgdt3305.h"
#include "atbm8830.h"
+#include "ts2020.h"
#include "ds3000.h"
#include "cx23885-f300.h"
#include "altera-ci.h"
@@ -66,6 +67,8 @@
#include "stv090x.h"
#include "stb6100.h"
#include "stb6100_cfg.h"
+#include "tda10071.h"
+#include "a8293.h"
static unsigned int debug;
@@ -469,6 +472,11 @@ static struct ds3000_config tevii_ds3000_config = {
.demod_address = 0x68,
};
+static struct ts2020_config tevii_ts2020_config = {
+ .tuner_address = 0x60,
+ .clk_out_div = 1,
+};
+
static struct cx24116_config dvbworld_cx24116_config = {
.demod_address = 0x05,
};
@@ -493,20 +501,20 @@ static struct xc5000_config mygica_x8506_xc5000_config = {
};
static struct stv090x_config prof_8000_stv090x_config = {
- .device = STV0903,
- .demod_mode = STV090x_SINGLE,
- .clk_mode = STV090x_CLK_EXT,
- .xtal = 27000000,
- .address = 0x6A,
- .ts1_mode = STV090x_TSMODE_PARALLEL_PUNCTURED,
- .repeater_level = STV090x_RPTLEVEL_64,
- .adc1_range = STV090x_ADC_2Vpp,
- .diseqc_envelope_mode = false,
-
- .tuner_get_frequency = stb6100_get_frequency,
- .tuner_set_frequency = stb6100_set_frequency,
- .tuner_set_bandwidth = stb6100_set_bandwidth,
- .tuner_get_bandwidth = stb6100_get_bandwidth,
+ .device = STV0903,
+ .demod_mode = STV090x_SINGLE,
+ .clk_mode = STV090x_CLK_EXT,
+ .xtal = 27000000,
+ .address = 0x6A,
+ .ts1_mode = STV090x_TSMODE_PARALLEL_PUNCTURED,
+ .repeater_level = STV090x_RPTLEVEL_64,
+ .adc1_range = STV090x_ADC_2Vpp,
+ .diseqc_envelope_mode = false,
+
+ .tuner_get_frequency = stb6100_get_frequency,
+ .tuner_set_frequency = stb6100_set_frequency,
+ .tuner_set_bandwidth = stb6100_set_bandwidth,
+ .tuner_get_bandwidth = stb6100_get_bandwidth,
};
static struct stb6100_config prof_8000_stb6100_config = {
@@ -659,6 +667,20 @@ static struct mt2063_config terratec_mt2063_config[] = {
},
};
+static const struct tda10071_config hauppauge_tda10071_config = {
+ .demod_i2c_addr = 0x05,
+ .tuner_i2c_addr = 0x54,
+ .i2c_wr_max = 64,
+ .ts_mode = TDA10071_TS_SERIAL,
+ .spec_inv = 0,
+ .xtal = 40444000, /* 40.444 MHz */
+ .pll_multiplier = 20,
+};
+
+static const struct a8293_config hauppauge_a8293_config = {
+ .i2c_addr = 0x0b,
+};
+
static int netup_altera_fpga_rw(void *device, int flag, int data, int read)
{
struct cx23885_dev *dev = (struct cx23885_dev *)device;
@@ -1011,8 +1033,11 @@ static int dvb_register(struct cx23885_tsport *port)
fe0->dvb.frontend = dvb_attach(ds3000_attach,
&tevii_ds3000_config,
&i2c_bus->i2c_adap);
- if (fe0->dvb.frontend != NULL)
+ if (fe0->dvb.frontend != NULL) {
+ dvb_attach(ts2020_attach, fe0->dvb.frontend,
+ &tevii_ts2020_config, &i2c_bus->i2c_adap);
fe0->dvb.frontend->ops.set_voltage = f300_set_voltage;
+ }
break;
case CX23885_BOARD_DVBWORLD_2005:
@@ -1242,6 +1267,17 @@ static int dvb_register(struct cx23885_tsport *port)
fe0->dvb.frontend->ops.set_voltage = p8000_set_voltage;
}
break;
+ case CX23885_BOARD_HAUPPAUGE_HVR4400:
+ i2c_bus = &dev->i2c_bus[0];
+ fe0->dvb.frontend = dvb_attach(tda10071_attach,
+ &hauppauge_tda10071_config,
+ &i2c_bus->i2c_adap);
+ if (fe0->dvb.frontend != NULL) {
+ dvb_attach(a8293_attach, fe0->dvb.frontend,
+ &i2c_bus->i2c_adap,
+ &hauppauge_a8293_config);
+ }
+ break;
default:
printk(KERN_INFO "%s: The frontend of your DVB/ATSC card "
" isn't supported yet\n",
diff --git a/drivers/media/pci/cx23885/cx23885-input.c b/drivers/media/pci/cx23885/cx23885-input.c
index 4f1055a194b5..7875dfbe09ff 100644
--- a/drivers/media/pci/cx23885/cx23885-input.c
+++ b/drivers/media/pci/cx23885/cx23885-input.c
@@ -89,6 +89,7 @@ void cx23885_input_rx_work_handler(struct cx23885_dev *dev, u32 events)
case CX23885_BOARD_TERRATEC_CINERGY_T_PCIE_DUAL:
case CX23885_BOARD_TEVII_S470:
case CX23885_BOARD_HAUPPAUGE_HVR1250:
+ case CX23885_BOARD_MYGICA_X8507:
/*
* The only boards we handle right now. However other boards
* using the CX2388x integrated IR controller should be similar
@@ -140,6 +141,7 @@ static int cx23885_input_ir_start(struct cx23885_dev *dev)
case CX23885_BOARD_HAUPPAUGE_HVR1850:
case CX23885_BOARD_HAUPPAUGE_HVR1290:
case CX23885_BOARD_HAUPPAUGE_HVR1250:
+ case CX23885_BOARD_MYGICA_X8507:
/*
* The IR controller on this board only returns pulse widths.
* Any other mode setting will fail to set up the device.
@@ -289,6 +291,13 @@ int cx23885_input_init(struct cx23885_dev *dev)
/* A guess at the remote */
rc_map = RC_MAP_TEVII_NEC;
break;
+ case CX23885_BOARD_MYGICA_X8507:
+ /* Integrated CX23885 IR controller */
+ driver_type = RC_DRIVER_IR_RAW;
+ allowed_protos = RC_BIT_ALL;
+ /* A guess at the remote */
+ rc_map = RC_MAP_TOTAL_MEDIA_IN_HAND_02;
+ break;
default:
return -ENODEV;
}
diff --git a/drivers/media/pci/cx23885/cx23885-video.c b/drivers/media/pci/cx23885/cx23885-video.c
index 1a21926ca412..5991bc8dc158 100644
--- a/drivers/media/pci/cx23885/cx23885-video.c
+++ b/drivers/media/pci/cx23885/cx23885-video.c
@@ -300,7 +300,7 @@ void cx23885_video_wakeup(struct cx23885_dev *dev,
if ((s16) (count - buf->count) < 0)
break;
- do_gettimeofday(&buf->vb.ts);
+ v4l2_get_timestamp(&buf->vb.ts);
dprintk(2, "[%p/%d] wakeup reg=%d buf=%d\n", buf, buf->vb.i,
count, buf->count);
buf->vb.state = VIDEOBUF_DONE;
@@ -509,7 +509,8 @@ static int cx23885_video_mux(struct cx23885_dev *dev, unsigned int input)
(dev->board == CX23885_BOARD_HAUPPAUGE_HVR1255) ||
(dev->board == CX23885_BOARD_HAUPPAUGE_HVR1255_22111) ||
(dev->board == CX23885_BOARD_HAUPPAUGE_HVR1850) ||
- (dev->board == CX23885_BOARD_MYGICA_X8507)) {
+ (dev->board == CX23885_BOARD_MYGICA_X8507) ||
+ (dev->board == CX23885_BOARD_AVERMEDIA_HC81R)) {
/* Configure audio routing */
v4l2_subdev_call(dev->sd_cx25840, audio, s_routing,
INPUT(input)->amux, 0, 0);
@@ -1818,8 +1819,7 @@ int cx23885_video_register(struct cx23885_dev *dev)
spin_lock_init(&dev->slock);
/* Initialize VBI template */
- memcpy(&cx23885_vbi_template, &cx23885_video_template,
- sizeof(cx23885_vbi_template));
+ cx23885_vbi_template = cx23885_video_template;
strcpy(cx23885_vbi_template.name, "cx23885-vbi");
dev->tvnorm = cx23885_video_template.current_norm;
@@ -1878,6 +1878,18 @@ int cx23885_video_register(struct cx23885_dev *dev)
};
v4l2_subdev_call(sd, tuner, s_config, &cfg);
}
+
+ if (dev->board == CX23885_BOARD_AVERMEDIA_HC81R) {
+ struct xc2028_ctrl ctrl = {
+ .fname = "xc3028L-v36.fw",
+ .max_len = 64
+ };
+ struct v4l2_priv_tun_config cfg = {
+ .tuner = dev->tuner_type,
+ .priv = &ctrl
+ };
+ v4l2_subdev_call(sd, tuner, s_config, &cfg);
+ }
}
}
diff --git a/drivers/media/pci/cx23885/cx23885.h b/drivers/media/pci/cx23885/cx23885.h
index 67f40d31450b..59c322d870f2 100644
--- a/drivers/media/pci/cx23885/cx23885.h
+++ b/drivers/media/pci/cx23885/cx23885.h
@@ -91,6 +91,8 @@
#define CX23885_BOARD_TEVII_S471 35
#define CX23885_BOARD_HAUPPAUGE_HVR1255_22111 36
#define CX23885_BOARD_PROF_8000 37
+#define CX23885_BOARD_HAUPPAUGE_HVR4400 38
+#define CX23885_BOARD_AVERMEDIA_HC81R 39
#define GPIO_0 0x00000001
#define GPIO_1 0x00000002
diff --git a/drivers/media/pci/cx23885/cx23888-ir.c b/drivers/media/pci/cx23885/cx23888-ir.c
index c4bd1e95d33f..d51eed051d59 100644
--- a/drivers/media/pci/cx23885/cx23888-ir.c
+++ b/drivers/media/pci/cx23885/cx23888-ir.c
@@ -1237,13 +1237,11 @@ int cx23888_ir_probe(struct cx23885_dev *dev)
cx23888_ir_write4(dev, CX23888_IR_IRQEN_REG, 0);
mutex_init(&state->rx_params_lock);
- memcpy(&default_params, &default_rx_params,
- sizeof(struct v4l2_subdev_ir_parameters));
+ default_params = default_rx_params;
v4l2_subdev_call(sd, ir, rx_s_parameters, &default_params);
mutex_init(&state->tx_params_lock);
- memcpy(&default_params, &default_tx_params,
- sizeof(struct v4l2_subdev_ir_parameters));
+ default_params = default_tx_params;
v4l2_subdev_call(sd, ir, tx_s_parameters, &default_params);
} else {
kfifo_free(&state->rx_kfifo);
diff --git a/drivers/media/pci/cx25821/Makefile b/drivers/media/pci/cx25821/Makefile
index 5bf3ea4c1556..caa32b7b51f8 100644
--- a/drivers/media/pci/cx25821/Makefile
+++ b/drivers/media/pci/cx25821/Makefile
@@ -8,6 +8,7 @@ obj-$(CONFIG_VIDEO_CX25821) += cx25821.o
obj-$(CONFIG_VIDEO_CX25821_ALSA) += cx25821-alsa.o
ccflags-y += -Idrivers/media/i2c
+ccflags-y += -Idrivers/media/common
ccflags-y += -Idrivers/media/tuners
ccflags-y += -Idrivers/media/dvb-core
ccflags-y += -Idrivers/media/dvb-frontends
diff --git a/drivers/media/pci/cx25821/cx25821-video.c b/drivers/media/pci/cx25821/cx25821-video.c
index 53b16dd70320..d4de021dc844 100644
--- a/drivers/media/pci/cx25821/cx25821-video.c
+++ b/drivers/media/pci/cx25821/cx25821-video.c
@@ -130,7 +130,7 @@ void cx25821_video_wakeup(struct cx25821_dev *dev, struct cx25821_dmaqueue *q,
if ((s16) (count - buf->count) < 0)
break;
- do_gettimeofday(&buf->vb.ts);
+ v4l2_get_timestamp(&buf->vb.ts);
buf->vb.state = VIDEOBUF_DONE;
list_del(&buf->vb.queue);
wake_up(&buf->vb.done);
diff --git a/drivers/media/pci/cx88/Kconfig b/drivers/media/pci/cx88/Kconfig
index d27fccbf03c4..bb05eca2da29 100644
--- a/drivers/media/pci/cx88/Kconfig
+++ b/drivers/media/pci/cx88/Kconfig
@@ -62,6 +62,8 @@ config VIDEO_CX88_DVB
select DVB_STB6000 if MEDIA_SUBDRV_AUTOSELECT
select DVB_STV0900 if MEDIA_SUBDRV_AUTOSELECT
select DVB_STB6100 if MEDIA_SUBDRV_AUTOSELECT
+ select DVB_DS3000 if MEDIA_SUBDRV_AUTOSELECT
+ select DVB_TS2020 if MEDIA_SUBDRV_AUTOSELECT
select MEDIA_TUNER_SIMPLE if MEDIA_SUBDRV_AUTOSELECT
---help---
This adds support for DVB/ATSC cards based on the
diff --git a/drivers/media/pci/cx88/Makefile b/drivers/media/pci/cx88/Makefile
index d3679c3ee248..8619c1becee2 100644
--- a/drivers/media/pci/cx88/Makefile
+++ b/drivers/media/pci/cx88/Makefile
@@ -11,6 +11,7 @@ obj-$(CONFIG_VIDEO_CX88_DVB) += cx88-dvb.o
obj-$(CONFIG_VIDEO_CX88_VP3054) += cx88-vp3054-i2c.o
ccflags-y += -Idrivers/media/i2c
+ccflags-y += -Idrivers/media/common
ccflags-y += -Idrivers/media/tuners
ccflags-y += -Idrivers/media/dvb-core
ccflags-y += -Idrivers/media/dvb-frontends
diff --git a/drivers/media/pci/cx88/cx88-cards.c b/drivers/media/pci/cx88/cx88-cards.c
index 0c255248cbcd..e2e0b8faf7a4 100644
--- a/drivers/media/pci/cx88/cx88-cards.c
+++ b/drivers/media/pci/cx88/cx88-cards.c
@@ -3743,7 +3743,7 @@ struct cx88_core *cx88_core_create(struct pci_dev *pci, int nr)
cx88_card_list(core, pci);
}
- memcpy(&core->board, &cx88_boards[core->boardnr], sizeof(core->board));
+ core->board = cx88_boards[core->boardnr];
if (!core->board.num_frontends && (core->board.mpeg & CX88_MPEG_DVB))
core->board.num_frontends = 1;
diff --git a/drivers/media/pci/cx88/cx88-core.c b/drivers/media/pci/cx88/cx88-core.c
index 19a58754c6e1..39f095c37ffd 100644
--- a/drivers/media/pci/cx88/cx88-core.c
+++ b/drivers/media/pci/cx88/cx88-core.c
@@ -549,7 +549,7 @@ void cx88_wakeup(struct cx88_core *core,
* up to 32767 buffers in flight... */
if ((s16) (count - buf->count) < 0)
break;
- do_gettimeofday(&buf->vb.ts);
+ v4l2_get_timestamp(&buf->vb.ts);
dprintk(2,"[%p/%d] wakeup reg=%d buf=%d\n",buf,buf->vb.i,
count, buf->count);
buf->vb.state = VIDEOBUF_DONE;
diff --git a/drivers/media/pci/cx88/cx88-dvb.c b/drivers/media/pci/cx88/cx88-dvb.c
index 666f83b2f3c0..672b267a2d3e 100644
--- a/drivers/media/pci/cx88/cx88-dvb.c
+++ b/drivers/media/pci/cx88/cx88-dvb.c
@@ -58,6 +58,7 @@
#include "stb6100.h"
#include "stb6100_proc.h"
#include "mb86a16.h"
+#include "ts2020.h"
#include "ds3000.h"
MODULE_DESCRIPTION("driver for cx2388x based DVB cards");
@@ -264,7 +265,7 @@ static struct mb86a16_config twinhan_vp1027 = {
.demod_address = 0x08,
};
-#if defined(CONFIG_VIDEO_CX88_VP3054) || (defined(CONFIG_VIDEO_CX88_VP3054_MODULE) && defined(MODULE))
+#if IS_ENABLED(CONFIG_VIDEO_CX88_VP3054)
static int dntv_live_dvbt_pro_demod_init(struct dvb_frontend* fe)
{
static const u8 clock_config [] = { 0x89, 0x38, 0x38 };
@@ -700,6 +701,11 @@ static struct ds3000_config tevii_ds3000_config = {
.set_ts_params = ds3000_set_ts_param,
};
+static struct ts2020_config tevii_ts2020_config = {
+ .tuner_address = 0x60,
+ .clk_out_div = 1,
+};
+
static const struct stv0900_config prof_7301_stv0900_config = {
.demod_address = 0x6a,
/* demod_mode = 0,*/
@@ -1121,7 +1127,7 @@ static int dvb_register(struct cx8802_dev *dev)
}
break;
case CX88_BOARD_DNTV_LIVE_DVB_T_PRO:
-#if defined(CONFIG_VIDEO_CX88_VP3054) || (defined(CONFIG_VIDEO_CX88_VP3054_MODULE) && defined(MODULE))
+#if IS_ENABLED(CONFIG_VIDEO_CX88_VP3054)
/* MT352 is on a secondary I2C bus made from some GPIO lines */
fe0->dvb.frontend = dvb_attach(mt352_attach, &dntv_live_dvbt_pro_config,
&dev->vp3054->adap);
@@ -1466,9 +1472,12 @@ static int dvb_register(struct cx8802_dev *dev)
fe0->dvb.frontend = dvb_attach(ds3000_attach,
&tevii_ds3000_config,
&core->i2c_adap);
- if (fe0->dvb.frontend != NULL)
+ if (fe0->dvb.frontend != NULL) {
+ dvb_attach(ts2020_attach, fe0->dvb.frontend,
+ &tevii_ts2020_config, &core->i2c_adap);
fe0->dvb.frontend->ops.set_voltage =
tevii_dvbs_set_voltage;
+ }
break;
case CX88_BOARD_OMICOM_SS4_PCI:
case CX88_BOARD_TBS_8920:
diff --git a/drivers/media/pci/cx88/cx88-i2c.c b/drivers/media/pci/cx88/cx88-i2c.c
index de0f1af74e41..cf2d69615838 100644
--- a/drivers/media/pci/cx88/cx88-i2c.c
+++ b/drivers/media/pci/cx88/cx88-i2c.c
@@ -139,8 +139,7 @@ int cx88_i2c_init(struct cx88_core *core, struct pci_dev *pci)
if (i2c_udelay<5)
i2c_udelay=5;
- memcpy(&core->i2c_algo, &cx8800_i2c_algo_template,
- sizeof(core->i2c_algo));
+ core->i2c_algo = cx8800_i2c_algo_template;
core->i2c_adap.dev.parent = &pci->dev;
diff --git a/drivers/media/pci/cx88/cx88-vp3054-i2c.c b/drivers/media/pci/cx88/cx88-vp3054-i2c.c
index d77f8ecab9d7..deede6e25d94 100644
--- a/drivers/media/pci/cx88/cx88-vp3054-i2c.c
+++ b/drivers/media/pci/cx88/cx88-vp3054-i2c.c
@@ -118,8 +118,7 @@ int vp3054_i2c_probe(struct cx8802_dev *dev)
return -ENOMEM;
dev->vp3054 = vp3054_i2c;
- memcpy(&vp3054_i2c->algo, &vp3054_i2c_algo_template,
- sizeof(vp3054_i2c->algo));
+ vp3054_i2c->algo = vp3054_i2c_algo_template;
vp3054_i2c->adap.dev.parent = &dev->pci->dev;
strlcpy(vp3054_i2c->adap.name, core->name,
diff --git a/drivers/media/pci/cx88/cx88-vp3054-i2c.h b/drivers/media/pci/cx88/cx88-vp3054-i2c.h
index be99c931dc3e..95d0c60a35e1 100644
--- a/drivers/media/pci/cx88/cx88-vp3054-i2c.h
+++ b/drivers/media/pci/cx88/cx88-vp3054-i2c.h
@@ -30,7 +30,7 @@ struct vp3054_i2c_state {
};
/* ----------------------------------------------------------------------- */
-#if defined(CONFIG_VIDEO_CX88_VP3054) || (defined(CONFIG_VIDEO_CX88_VP3054_MODULE) && defined(MODULE))
+#if IS_ENABLED(CONFIG_VIDEO_CX88_VP3054)
int vp3054_i2c_probe(struct cx8802_dev *dev);
void vp3054_i2c_remove(struct cx8802_dev *dev);
#else
diff --git a/drivers/media/pci/cx88/cx88.h b/drivers/media/pci/cx88/cx88.h
index ba0dba4a4d22..feff53c0a251 100644
--- a/drivers/media/pci/cx88/cx88.h
+++ b/drivers/media/pci/cx88/cx88.h
@@ -363,7 +363,7 @@ struct cx88_core {
unsigned int tuner_formats;
/* config info -- dvb */
-#if defined(CONFIG_VIDEO_CX88_DVB) || defined(CONFIG_VIDEO_CX88_DVB_MODULE)
+#if IS_ENABLED(CONFIG_VIDEO_CX88_DVB)
int (*prev_set_voltage)(struct dvb_frontend *fe, fe_sec_voltage_t voltage);
#endif
void (*gate_ctrl)(struct cx88_core *core, int open);
@@ -562,8 +562,7 @@ struct cx8802_dev {
/* for blackbird only */
struct list_head devlist;
-#if defined(CONFIG_VIDEO_CX88_BLACKBIRD) || \
- defined(CONFIG_VIDEO_CX88_BLACKBIRD_MODULE)
+#if IS_ENABLED(CONFIG_VIDEO_CX88_BLACKBIRD)
struct video_device *mpeg_dev;
u32 mailbox;
int width;
@@ -574,13 +573,12 @@ struct cx8802_dev {
struct cx2341x_handler cxhdl;
#endif
-#if defined(CONFIG_VIDEO_CX88_DVB) || defined(CONFIG_VIDEO_CX88_DVB_MODULE)
+#if IS_ENABLED(CONFIG_VIDEO_CX88_DVB)
/* for dvb only */
struct videobuf_dvb_frontends frontends;
#endif
-#if defined(CONFIG_VIDEO_CX88_VP3054) || \
- defined(CONFIG_VIDEO_CX88_VP3054_MODULE)
+#if IS_ENABLED(CONFIG_VIDEO_CX88_VP3054)
/* For VP3045 secondary I2C bus support */
struct vp3054_i2c_state *vp3054;
#endif
diff --git a/drivers/media/pci/dm1105/Kconfig b/drivers/media/pci/dm1105/Kconfig
index 013df4e015cd..173daf0c0847 100644
--- a/drivers/media/pci/dm1105/Kconfig
+++ b/drivers/media/pci/dm1105/Kconfig
@@ -8,6 +8,7 @@ config DVB_DM1105
select DVB_CX24116 if MEDIA_SUBDRV_AUTOSELECT
select DVB_SI21XX if MEDIA_SUBDRV_AUTOSELECT
select DVB_DS3000 if MEDIA_SUBDRV_AUTOSELECT
+ select DVB_TS2020 if MEDIA_SUBDRV_AUTOSELECT
depends on RC_CORE
help
Support for cards based on the SDMC DM1105 PCI chip like
diff --git a/drivers/media/pci/dm1105/dm1105.c b/drivers/media/pci/dm1105/dm1105.c
index 904c3ea350f5..026767bed5cd 100644
--- a/drivers/media/pci/dm1105/dm1105.c
+++ b/drivers/media/pci/dm1105/dm1105.c
@@ -45,6 +45,7 @@
#include "si21xx.h"
#include "cx24116.h"
#include "z0194a.h"
+#include "ts2020.h"
#include "ds3000.h"
#define MODULE_NAME "dm1105"
@@ -849,6 +850,11 @@ static struct ds3000_config dvbworld_ds3000_config = {
.demod_address = 0x68,
};
+static struct ts2020_config dvbworld_ts2020_config = {
+ .tuner_address = 0x60,
+ .clk_out_div = 1,
+};
+
static int frontend_init(struct dm1105_dev *dev)
{
int ret;
@@ -898,8 +904,11 @@ static int frontend_init(struct dm1105_dev *dev)
dev->fe = dvb_attach(
ds3000_attach, &dvbworld_ds3000_config,
&dev->i2c_adap);
- if (dev->fe)
+ if (dev->fe) {
+ dvb_attach(ts2020_attach, dev->fe,
+ &dvbworld_ts2020_config, &dev->i2c_adap);
dev->fe->ops.set_voltage = dm1105_set_voltage;
+ }
break;
case DM1105_BOARD_DVBWORLD_2002:
diff --git a/drivers/media/pci/ivtv/ivtv-alsa-main.c b/drivers/media/pci/ivtv/ivtv-alsa-main.c
index 4a221c693995..e970cface70e 100644
--- a/drivers/media/pci/ivtv/ivtv-alsa-main.c
+++ b/drivers/media/pci/ivtv/ivtv-alsa-main.c
@@ -205,7 +205,7 @@ err_exit:
return ret;
}
-static int __init ivtv_alsa_load(struct ivtv *itv)
+static int ivtv_alsa_load(struct ivtv *itv)
{
struct v4l2_device *v4l2_dev = &itv->v4l2_dev;
struct ivtv_stream *s;
diff --git a/drivers/media/pci/ivtv/ivtv-alsa-pcm.h b/drivers/media/pci/ivtv/ivtv-alsa-pcm.h
index 23dfe0d12400..186814e0b2d4 100644
--- a/drivers/media/pci/ivtv/ivtv-alsa-pcm.h
+++ b/drivers/media/pci/ivtv/ivtv-alsa-pcm.h
@@ -20,4 +20,4 @@
* 02111-1307 USA
*/
-int __init snd_ivtv_pcm_create(struct snd_ivtv_card *itvsc);
+int snd_ivtv_pcm_create(struct snd_ivtv_card *itvsc);
diff --git a/drivers/media/pci/ivtv/ivtv-driver.c b/drivers/media/pci/ivtv/ivtv-driver.c
index df88dc4ab555..2928e7287da8 100644
--- a/drivers/media/pci/ivtv/ivtv-driver.c
+++ b/drivers/media/pci/ivtv/ivtv-driver.c
@@ -304,7 +304,7 @@ static void request_modules(struct ivtv *dev)
static void flush_request_modules(struct ivtv *dev)
{
- flush_work_sync(&dev->request_module_wk);
+ flush_work(&dev->request_module_wk);
}
#else
#define request_modules(dev)
diff --git a/drivers/media/pci/ivtv/ivtv-i2c.c b/drivers/media/pci/ivtv/ivtv-i2c.c
index 46e262becb67..ceed2d87abfd 100644
--- a/drivers/media/pci/ivtv/ivtv-i2c.c
+++ b/drivers/media/pci/ivtv/ivtv-i2c.c
@@ -267,8 +267,6 @@ int ivtv_i2c_register(struct ivtv *itv, unsigned idx)
const char *type = hw_devicenames[idx];
u32 hw = 1 << idx;
- if (idx >= ARRAY_SIZE(hw_addrs))
- return -1;
if (hw == IVTV_HW_TUNER) {
/* special tuner handling */
sd = v4l2_i2c_new_subdev(&itv->v4l2_dev, adap, type, 0,
@@ -719,13 +717,10 @@ int init_ivtv_i2c(struct ivtv *itv)
return -ENODEV;
}
if (itv->options.newi2c > 0) {
- memcpy(&itv->i2c_adap, &ivtv_i2c_adap_hw_template,
- sizeof(struct i2c_adapter));
+ itv->i2c_adap = ivtv_i2c_adap_hw_template;
} else {
- memcpy(&itv->i2c_adap, &ivtv_i2c_adap_template,
- sizeof(struct i2c_adapter));
- memcpy(&itv->i2c_algo, &ivtv_i2c_algo_template,
- sizeof(struct i2c_algo_bit_data));
+ itv->i2c_adap = ivtv_i2c_adap_template;
+ itv->i2c_algo = ivtv_i2c_algo_template;
}
itv->i2c_algo.udelay = itv->options.i2c_clock_period / 2;
itv->i2c_algo.data = itv;
@@ -735,8 +730,7 @@ int init_ivtv_i2c(struct ivtv *itv)
itv->instance);
i2c_set_adapdata(&itv->i2c_adap, &itv->v4l2_dev);
- memcpy(&itv->i2c_client, &ivtv_i2c_client_template,
- sizeof(struct i2c_client));
+ itv->i2c_client = ivtv_i2c_client_template;
itv->i2c_client.adapter = &itv->i2c_adap;
itv->i2c_adap.dev.parent = &itv->pdev->dev;
diff --git a/drivers/media/pci/ivtv/ivtv-vbi.c b/drivers/media/pci/ivtv/ivtv-vbi.c
index 293db806d936..3c156bc70fb4 100644
--- a/drivers/media/pci/ivtv/ivtv-vbi.c
+++ b/drivers/media/pci/ivtv/ivtv-vbi.c
@@ -224,7 +224,7 @@ static void copy_vbi_data(struct ivtv *itv, int lines, u32 pts_stamp)
(the max size of the VBI data is 36 * 43 + 4 bytes).
So in this case we use the magic number 'ITV0'. */
memcpy(dst + sd, "ITV0", 4);
- memcpy(dst + sd + 4, dst + sd + 12, line * 43);
+ memmove(dst + sd + 4, dst + sd + 12, line * 43);
size = 4 + ((43 * line + 3) & ~3);
} else {
memcpy(dst + sd, "itv0", 4);
@@ -532,7 +532,7 @@ void ivtv_vbi_work_handler(struct ivtv *itv)
while (vi->cc_payload_idx) {
cc = vi->cc_payload[0];
- memcpy(vi->cc_payload, vi->cc_payload + 1,
+ memmove(vi->cc_payload, vi->cc_payload + 1,
sizeof(vi->cc_payload) - sizeof(vi->cc_payload[0]));
vi->cc_payload_idx--;
if (vi->cc_payload_idx && cc.odd[0] == 0x80 && cc.odd[1] == 0x80)
diff --git a/drivers/media/pci/mantis/mantis_ca.c b/drivers/media/pci/mantis/mantis_ca.c
index 3d7046909009..60c6c2f24066 100644
--- a/drivers/media/pci/mantis/mantis_ca.c
+++ b/drivers/media/pci/mantis/mantis_ca.c
@@ -198,11 +198,12 @@ void mantis_ca_exit(struct mantis_pci *mantis)
struct mantis_ca *ca = mantis->mantis_ca;
dprintk(MANTIS_DEBUG, 1, "Mantis CA exit");
+ if (!ca)
+ return;
mantis_evmgr_exit(ca);
dprintk(MANTIS_ERROR, 1, "Unregistering EN50221 device");
- if (ca)
- dvb_ca_en50221_release(&ca->en50221);
+ dvb_ca_en50221_release(&ca->en50221);
kfree(ca);
}
diff --git a/drivers/media/pci/meye/meye.c b/drivers/media/pci/meye/meye.c
index 049e18667cd0..7859c43479d7 100644
--- a/drivers/media/pci/meye/meye.c
+++ b/drivers/media/pci/meye/meye.c
@@ -35,6 +35,8 @@
#include <media/v4l2-common.h>
#include <media/v4l2-device.h>
#include <media/v4l2-ioctl.h>
+#include <media/v4l2-fh.h>
+#include <media/v4l2-event.h>
#include <asm/uaccess.h>
#include <asm/io.h>
#include <linux/delay.h>
@@ -811,7 +813,7 @@ again:
mchip_hsize() * mchip_vsize() * 2);
meye.grab_buffer[reqnr].size = mchip_hsize() * mchip_vsize() * 2;
meye.grab_buffer[reqnr].state = MEYE_BUF_DONE;
- do_gettimeofday(&meye.grab_buffer[reqnr].timestamp);
+ v4l2_get_timestamp(&meye.grab_buffer[reqnr].timestamp);
meye.grab_buffer[reqnr].sequence = sequence++;
kfifo_in_locked(&meye.doneq, (unsigned char *)&reqnr,
sizeof(int), &meye.doneq_lock);
@@ -832,7 +834,7 @@ again:
size);
meye.grab_buffer[reqnr].size = size;
meye.grab_buffer[reqnr].state = MEYE_BUF_DONE;
- do_gettimeofday(&meye.grab_buffer[reqnr].timestamp);
+ v4l2_get_timestamp(&meye.grab_buffer[reqnr].timestamp);
meye.grab_buffer[reqnr].sequence = sequence++;
kfifo_in_locked(&meye.doneq, (unsigned char *)&reqnr,
sizeof(int), &meye.doneq_lock);
@@ -865,7 +867,7 @@ static int meye_open(struct file *file)
meye.grab_buffer[i].state = MEYE_BUF_UNUSED;
kfifo_reset(&meye.grabq);
kfifo_reset(&meye.doneq);
- return 0;
+ return v4l2_fh_open(file);
}
static int meye_release(struct file *file)
@@ -873,7 +875,7 @@ static int meye_release(struct file *file)
mchip_hic_stop();
mchip_dma_free();
clear_bit(0, &meye.in_use);
- return 0;
+ return v4l2_fh_release(file);
}
static int meyeioc_g_params(struct meye_params *p)
@@ -1032,8 +1034,9 @@ static int vidioc_querycap(struct file *file, void *fh,
cap->version = (MEYE_DRIVER_MAJORVERSION << 8) +
MEYE_DRIVER_MINORVERSION;
- cap->capabilities = V4L2_CAP_VIDEO_CAPTURE |
+ cap->device_caps = V4L2_CAP_VIDEO_CAPTURE |
V4L2_CAP_STREAMING;
+ cap->capabilities = cap->device_caps | V4L2_CAP_DEVICE_CAPS;
return 0;
}
@@ -1063,191 +1066,50 @@ static int vidioc_s_input(struct file *file, void *fh, unsigned int i)
return 0;
}
-static int vidioc_queryctrl(struct file *file, void *fh,
- struct v4l2_queryctrl *c)
-{
- switch (c->id) {
-
- case V4L2_CID_BRIGHTNESS:
- c->type = V4L2_CTRL_TYPE_INTEGER;
- strcpy(c->name, "Brightness");
- c->minimum = 0;
- c->maximum = 63;
- c->step = 1;
- c->default_value = 32;
- c->flags = 0;
- break;
- case V4L2_CID_HUE:
- c->type = V4L2_CTRL_TYPE_INTEGER;
- strcpy(c->name, "Hue");
- c->minimum = 0;
- c->maximum = 63;
- c->step = 1;
- c->default_value = 32;
- c->flags = 0;
- break;
- case V4L2_CID_CONTRAST:
- c->type = V4L2_CTRL_TYPE_INTEGER;
- strcpy(c->name, "Contrast");
- c->minimum = 0;
- c->maximum = 63;
- c->step = 1;
- c->default_value = 32;
- c->flags = 0;
- break;
- case V4L2_CID_SATURATION:
- c->type = V4L2_CTRL_TYPE_INTEGER;
- strcpy(c->name, "Saturation");
- c->minimum = 0;
- c->maximum = 63;
- c->step = 1;
- c->default_value = 32;
- c->flags = 0;
- break;
- case V4L2_CID_AGC:
- c->type = V4L2_CTRL_TYPE_INTEGER;
- strcpy(c->name, "Agc");
- c->minimum = 0;
- c->maximum = 63;
- c->step = 1;
- c->default_value = 48;
- c->flags = 0;
- break;
- case V4L2_CID_MEYE_SHARPNESS:
- case V4L2_CID_SHARPNESS:
- c->type = V4L2_CTRL_TYPE_INTEGER;
- strcpy(c->name, "Sharpness");
- c->minimum = 0;
- c->maximum = 63;
- c->step = 1;
- c->default_value = 32;
-
- /* Continue to report legacy private SHARPNESS ctrl but
- * say it is disabled in preference to ctrl in the spec
- */
- c->flags = (c->id == V4L2_CID_SHARPNESS) ? 0 :
- V4L2_CTRL_FLAG_DISABLED;
- break;
- case V4L2_CID_PICTURE:
- c->type = V4L2_CTRL_TYPE_INTEGER;
- strcpy(c->name, "Picture");
- c->minimum = 0;
- c->maximum = 63;
- c->step = 1;
- c->default_value = 0;
- c->flags = 0;
- break;
- case V4L2_CID_JPEGQUAL:
- c->type = V4L2_CTRL_TYPE_INTEGER;
- strcpy(c->name, "JPEG quality");
- c->minimum = 0;
- c->maximum = 10;
- c->step = 1;
- c->default_value = 8;
- c->flags = 0;
- break;
- case V4L2_CID_FRAMERATE:
- c->type = V4L2_CTRL_TYPE_INTEGER;
- strcpy(c->name, "Framerate");
- c->minimum = 0;
- c->maximum = 31;
- c->step = 1;
- c->default_value = 0;
- c->flags = 0;
- break;
- default:
- return -EINVAL;
- }
-
- return 0;
-}
-
-static int vidioc_s_ctrl(struct file *file, void *fh, struct v4l2_control *c)
+static int meye_s_ctrl(struct v4l2_ctrl *ctrl)
{
mutex_lock(&meye.lock);
- switch (c->id) {
+ switch (ctrl->id) {
case V4L2_CID_BRIGHTNESS:
sony_pic_camera_command(
- SONY_PIC_COMMAND_SETCAMERABRIGHTNESS, c->value);
- meye.brightness = c->value << 10;
+ SONY_PIC_COMMAND_SETCAMERABRIGHTNESS, ctrl->val);
+ meye.brightness = ctrl->val << 10;
break;
case V4L2_CID_HUE:
sony_pic_camera_command(
- SONY_PIC_COMMAND_SETCAMERAHUE, c->value);
- meye.hue = c->value << 10;
+ SONY_PIC_COMMAND_SETCAMERAHUE, ctrl->val);
+ meye.hue = ctrl->val << 10;
break;
case V4L2_CID_CONTRAST:
sony_pic_camera_command(
- SONY_PIC_COMMAND_SETCAMERACONTRAST, c->value);
- meye.contrast = c->value << 10;
+ SONY_PIC_COMMAND_SETCAMERACONTRAST, ctrl->val);
+ meye.contrast = ctrl->val << 10;
break;
case V4L2_CID_SATURATION:
sony_pic_camera_command(
- SONY_PIC_COMMAND_SETCAMERACOLOR, c->value);
- meye.colour = c->value << 10;
+ SONY_PIC_COMMAND_SETCAMERACOLOR, ctrl->val);
+ meye.colour = ctrl->val << 10;
break;
- case V4L2_CID_AGC:
+ case V4L2_CID_MEYE_AGC:
sony_pic_camera_command(
- SONY_PIC_COMMAND_SETCAMERAAGC, c->value);
- meye.params.agc = c->value;
+ SONY_PIC_COMMAND_SETCAMERAAGC, ctrl->val);
+ meye.params.agc = ctrl->val;
break;
case V4L2_CID_SHARPNESS:
- case V4L2_CID_MEYE_SHARPNESS:
sony_pic_camera_command(
- SONY_PIC_COMMAND_SETCAMERASHARPNESS, c->value);
- meye.params.sharpness = c->value;
+ SONY_PIC_COMMAND_SETCAMERASHARPNESS, ctrl->val);
+ meye.params.sharpness = ctrl->val;
break;
- case V4L2_CID_PICTURE:
+ case V4L2_CID_MEYE_PICTURE:
sony_pic_camera_command(
- SONY_PIC_COMMAND_SETCAMERAPICTURE, c->value);
- meye.params.picture = c->value;
+ SONY_PIC_COMMAND_SETCAMERAPICTURE, ctrl->val);
+ meye.params.picture = ctrl->val;
break;
- case V4L2_CID_JPEGQUAL:
- meye.params.quality = c->value;
+ case V4L2_CID_JPEG_COMPRESSION_QUALITY:
+ meye.params.quality = ctrl->val;
break;
- case V4L2_CID_FRAMERATE:
- meye.params.framerate = c->value;
- break;
- default:
- mutex_unlock(&meye.lock);
- return -EINVAL;
- }
- mutex_unlock(&meye.lock);
-
- return 0;
-}
-
-static int vidioc_g_ctrl(struct file *file, void *fh, struct v4l2_control *c)
-{
- mutex_lock(&meye.lock);
- switch (c->id) {
- case V4L2_CID_BRIGHTNESS:
- c->value = meye.brightness >> 10;
- break;
- case V4L2_CID_HUE:
- c->value = meye.hue >> 10;
- break;
- case V4L2_CID_CONTRAST:
- c->value = meye.contrast >> 10;
- break;
- case V4L2_CID_SATURATION:
- c->value = meye.colour >> 10;
- break;
- case V4L2_CID_AGC:
- c->value = meye.params.agc;
- break;
- case V4L2_CID_SHARPNESS:
- case V4L2_CID_MEYE_SHARPNESS:
- c->value = meye.params.sharpness;
- break;
- case V4L2_CID_PICTURE:
- c->value = meye.params.picture;
- break;
- case V4L2_CID_JPEGQUAL:
- c->value = meye.params.quality;
- break;
- case V4L2_CID_FRAMERATE:
- c->value = meye.params.framerate;
+ case V4L2_CID_MEYE_FRAMERATE:
+ meye.params.framerate = ctrl->val;
break;
default:
mutex_unlock(&meye.lock);
@@ -1426,7 +1288,7 @@ static int vidioc_querybuf(struct file *file, void *fh, struct v4l2_buffer *buf)
return -EINVAL;
buf->bytesused = meye.grab_buffer[index].size;
- buf->flags = V4L2_BUF_FLAG_MAPPED;
+ buf->flags = V4L2_BUF_FLAG_MAPPED | V4L2_BUF_FLAG_TIMESTAMP_MONOTONIC;
if (meye.grab_buffer[index].state == MEYE_BUF_USING)
buf->flags |= V4L2_BUF_FLAG_QUEUED;
@@ -1499,7 +1361,7 @@ static int vidioc_dqbuf(struct file *file, void *fh, struct v4l2_buffer *buf)
buf->index = reqnr;
buf->bytesused = meye.grab_buffer[reqnr].size;
- buf->flags = V4L2_BUF_FLAG_MAPPED;
+ buf->flags = V4L2_BUF_FLAG_MAPPED | V4L2_BUF_FLAG_TIMESTAMP_MONOTONIC;
buf->field = V4L2_FIELD_NONE;
buf->timestamp = meye.grab_buffer[reqnr].timestamp;
buf->sequence = meye.grab_buffer[reqnr].sequence;
@@ -1577,12 +1439,12 @@ static long vidioc_default(struct file *file, void *fh, bool valid_prio,
static unsigned int meye_poll(struct file *file, poll_table *wait)
{
- unsigned int res = 0;
+ unsigned int res = v4l2_ctrl_poll(file, wait);
mutex_lock(&meye.lock);
poll_wait(file, &meye.proc_list, wait);
if (kfifo_len(&meye.doneq))
- res = POLLIN | POLLRDNORM;
+ res |= POLLIN | POLLRDNORM;
mutex_unlock(&meye.lock);
return res;
}
@@ -1669,9 +1531,6 @@ static const struct v4l2_ioctl_ops meye_ioctl_ops = {
.vidioc_enum_input = vidioc_enum_input,
.vidioc_g_input = vidioc_g_input,
.vidioc_s_input = vidioc_s_input,
- .vidioc_queryctrl = vidioc_queryctrl,
- .vidioc_s_ctrl = vidioc_s_ctrl,
- .vidioc_g_ctrl = vidioc_g_ctrl,
.vidioc_enum_fmt_vid_cap = vidioc_enum_fmt_vid_cap,
.vidioc_try_fmt_vid_cap = vidioc_try_fmt_vid_cap,
.vidioc_g_fmt_vid_cap = vidioc_g_fmt_vid_cap,
@@ -1682,6 +1541,9 @@ static const struct v4l2_ioctl_ops meye_ioctl_ops = {
.vidioc_dqbuf = vidioc_dqbuf,
.vidioc_streamon = vidioc_streamon,
.vidioc_streamoff = vidioc_streamoff,
+ .vidioc_log_status = v4l2_ctrl_log_status,
+ .vidioc_subscribe_event = v4l2_ctrl_subscribe_event,
+ .vidioc_unsubscribe_event = v4l2_event_unsubscribe,
.vidioc_default = vidioc_default,
};
@@ -1692,6 +1554,10 @@ static struct video_device meye_template = {
.release = video_device_release,
};
+static const struct v4l2_ctrl_ops meye_ctrl_ops = {
+ .s_ctrl = meye_s_ctrl,
+};
+
#ifdef CONFIG_PM
static int meye_suspend(struct pci_dev *pdev, pm_message_t state)
{
@@ -1730,6 +1596,32 @@ static int meye_resume(struct pci_dev *pdev)
static int meye_probe(struct pci_dev *pcidev, const struct pci_device_id *ent)
{
+ static const struct v4l2_ctrl_config ctrl_agc = {
+ .id = V4L2_CID_MEYE_AGC,
+ .type = V4L2_CTRL_TYPE_INTEGER,
+ .ops = &meye_ctrl_ops,
+ .name = "AGC",
+ .max = 63,
+ .step = 1,
+ .def = 48,
+ .flags = V4L2_CTRL_FLAG_SLIDER,
+ };
+ static const struct v4l2_ctrl_config ctrl_picture = {
+ .id = V4L2_CID_MEYE_PICTURE,
+ .type = V4L2_CTRL_TYPE_INTEGER,
+ .ops = &meye_ctrl_ops,
+ .name = "Picture",
+ .max = 63,
+ .step = 1,
+ };
+ static const struct v4l2_ctrl_config ctrl_framerate = {
+ .id = V4L2_CID_MEYE_FRAMERATE,
+ .type = V4L2_CTRL_TYPE_INTEGER,
+ .ops = &meye_ctrl_ops,
+ .name = "Framerate",
+ .max = 31,
+ .step = 1,
+ };
struct v4l2_device *v4l2_dev = &meye.v4l2_dev;
int ret = -EBUSY;
unsigned long mchip_adr;
@@ -1833,24 +1725,31 @@ static int meye_probe(struct pci_dev *pcidev, const struct pci_device_id *ent)
mutex_init(&meye.lock);
init_waitqueue_head(&meye.proc_list);
- meye.brightness = 32 << 10;
- meye.hue = 32 << 10;
- meye.colour = 32 << 10;
- meye.contrast = 32 << 10;
- meye.params.subsample = 0;
- meye.params.quality = 8;
- meye.params.sharpness = 32;
- meye.params.agc = 48;
- meye.params.picture = 0;
- meye.params.framerate = 0;
-
- sony_pic_camera_command(SONY_PIC_COMMAND_SETCAMERABRIGHTNESS, 32);
- sony_pic_camera_command(SONY_PIC_COMMAND_SETCAMERAHUE, 32);
- sony_pic_camera_command(SONY_PIC_COMMAND_SETCAMERACOLOR, 32);
- sony_pic_camera_command(SONY_PIC_COMMAND_SETCAMERACONTRAST, 32);
- sony_pic_camera_command(SONY_PIC_COMMAND_SETCAMERASHARPNESS, 32);
- sony_pic_camera_command(SONY_PIC_COMMAND_SETCAMERAPICTURE, 0);
- sony_pic_camera_command(SONY_PIC_COMMAND_SETCAMERAAGC, 48);
+
+ v4l2_ctrl_handler_init(&meye.hdl, 3);
+ v4l2_ctrl_new_std(&meye.hdl, &meye_ctrl_ops,
+ V4L2_CID_BRIGHTNESS, 0, 63, 1, 32);
+ v4l2_ctrl_new_std(&meye.hdl, &meye_ctrl_ops,
+ V4L2_CID_HUE, 0, 63, 1, 32);
+ v4l2_ctrl_new_std(&meye.hdl, &meye_ctrl_ops,
+ V4L2_CID_CONTRAST, 0, 63, 1, 32);
+ v4l2_ctrl_new_std(&meye.hdl, &meye_ctrl_ops,
+ V4L2_CID_SATURATION, 0, 63, 1, 32);
+ v4l2_ctrl_new_custom(&meye.hdl, &ctrl_agc, NULL);
+ v4l2_ctrl_new_std(&meye.hdl, &meye_ctrl_ops,
+ V4L2_CID_SHARPNESS, 0, 63, 1, 32);
+ v4l2_ctrl_new_custom(&meye.hdl, &ctrl_picture, NULL);
+ v4l2_ctrl_new_std(&meye.hdl, &meye_ctrl_ops,
+ V4L2_CID_JPEG_COMPRESSION_QUALITY, 0, 10, 1, 8);
+ v4l2_ctrl_new_custom(&meye.hdl, &ctrl_framerate, NULL);
+ if (meye.hdl.error) {
+ v4l2_err(v4l2_dev, "couldn't register controls\n");
+ goto outvideoreg;
+ }
+
+ v4l2_ctrl_handler_setup(&meye.hdl);
+ meye.vdev->ctrl_handler = &meye.hdl;
+ set_bit(V4L2_FL_USE_FH_PRIO, &meye.vdev->flags);
if (video_register_device(meye.vdev, VFL_TYPE_GRABBER,
video_nr) < 0) {
@@ -1866,6 +1765,7 @@ static int meye_probe(struct pci_dev *pcidev, const struct pci_device_id *ent)
return 0;
outvideoreg:
+ v4l2_ctrl_handler_free(&meye.hdl);
free_irq(meye.mchip_irq, meye_irq);
outreqirq:
iounmap(meye.mchip_mmregs);
diff --git a/drivers/media/pci/meye/meye.h b/drivers/media/pci/meye/meye.h
index 4bdeb03f1644..6fed9274cfa5 100644
--- a/drivers/media/pci/meye/meye.h
+++ b/drivers/media/pci/meye/meye.h
@@ -39,6 +39,7 @@
#include <linux/types.h>
#include <linux/pci.h>
#include <linux/kfifo.h>
+#include <media/v4l2-ctrls.h>
/****************************************************************************/
/* Motion JPEG chip registers */
@@ -290,6 +291,7 @@ struct meye_grab_buffer {
/* Motion Eye device structure */
struct meye {
struct v4l2_device v4l2_dev; /* Main v4l2_device struct */
+ struct v4l2_ctrl_handler hdl;
struct pci_dev *mchip_dev; /* pci device */
u8 mchip_irq; /* irq */
u8 mchip_mode; /* actual mchip mode: HIC_MODE... */
diff --git a/drivers/media/pci/ngene/ngene-cards.c b/drivers/media/pci/ngene/ngene-cards.c
index fad214113669..9e82d2105d53 100644
--- a/drivers/media/pci/ngene/ngene-cards.c
+++ b/drivers/media/pci/ngene/ngene-cards.c
@@ -327,6 +327,14 @@ static int demod_attach_drxd(struct ngene_channel *chan)
pr_err("No DRXD found!\n");
return -ENODEV;
}
+ return 0;
+}
+
+static int tuner_attach_dtt7520x(struct ngene_channel *chan)
+{
+ struct drxd_config *feconf;
+
+ feconf = chan->dev->card_info->fe_config[chan->number];
if (!dvb_attach(dvb_pll_attach, chan->fe, feconf->pll_address,
&chan->i2c_adapter,
@@ -724,6 +732,7 @@ static struct ngene_info ngene_info_terratec = {
.name = "Terratec Integra/Cinergy2400i Dual DVB-T",
.io_type = {NGENE_IO_TSIN, NGENE_IO_TSIN},
.demod_attach = {demod_attach_drxd, demod_attach_drxd},
+ .tuner_attach = {tuner_attach_dtt7520x, tuner_attach_dtt7520x},
.fe_config = {&fe_terratec_dvbt_0, &fe_terratec_dvbt_1},
.i2c_access = 1,
};
diff --git a/drivers/media/pci/saa7134/saa7134-cards.c b/drivers/media/pci/saa7134/saa7134-cards.c
index bc08f1dbc293..dc68cf1070f7 100644
--- a/drivers/media/pci/saa7134/saa7134-cards.c
+++ b/drivers/media/pci/saa7134/saa7134-cards.c
@@ -5773,6 +5773,23 @@ struct saa7134_board saa7134_boards[] = {
.gpio = 0x0000000,
},
},
+ [SAA7134_BOARD_HAWELL_HW_9004V1] = {
+ /* Hawell HW-9004V1 */
+ /* Vadim Frolov <fralik@gmail.com> */
+ .name = "Hawell HW-9004V1",
+ .audio_clock = 0x00200000,
+ .tuner_type = UNSET,
+ .radio_type = UNSET,
+ .tuner_addr = ADDR_UNSET,
+ .radio_addr = ADDR_UNSET,
+ .gpiomask = 0x618E700,
+ .inputs = {{
+ .name = name_comp1,
+ .vmux = 3,
+ .amux = LINE1,
+ .gpio = 0x6010000,
+ } },
+ },
};
diff --git a/drivers/media/pci/saa7134/saa7134-core.c b/drivers/media/pci/saa7134/saa7134-core.c
index e359d200d698..8fd24e7c9403 100644
--- a/drivers/media/pci/saa7134/saa7134-core.c
+++ b/drivers/media/pci/saa7134/saa7134-core.c
@@ -308,7 +308,7 @@ void saa7134_buffer_finish(struct saa7134_dev *dev,
/* finish current buffer */
q->curr->vb.state = state;
- do_gettimeofday(&q->curr->vb.ts);
+ v4l2_get_timestamp(&q->curr->vb.ts);
wake_up(&q->curr->vb.done);
q->curr = NULL;
}
diff --git a/drivers/media/pci/saa7134/saa7134-dvb.c b/drivers/media/pci/saa7134/saa7134-dvb.c
index b209de40a4f8..27915e501db9 100644
--- a/drivers/media/pci/saa7134/saa7134-dvb.c
+++ b/drivers/media/pci/saa7134/saa7134-dvb.c
@@ -607,6 +607,9 @@ static int configure_tda827x_fe(struct saa7134_dev *dev,
/* Get the first frontend */
fe0 = videobuf_dvb_get_frontend(&dev->frontends, 1);
+ if (!fe0)
+ return -EINVAL;
+
fe0->dvb.frontend = dvb_attach(tda10046_attach, cdec_conf, &dev->i2c_adap);
if (fe0->dvb.frontend) {
if (cdec_conf->i2c_gate)
diff --git a/drivers/media/pci/saa7134/saa7134-video.c b/drivers/media/pci/saa7134/saa7134-video.c
index 3abf52711e13..7c503fb68526 100644
--- a/drivers/media/pci/saa7134/saa7134-video.c
+++ b/drivers/media/pci/saa7134/saa7134-video.c
@@ -2248,6 +2248,17 @@ static int saa7134_streamon(struct file *file, void *priv,
if (!res_get(dev, fh, res))
return -EBUSY;
+ /* The SAA7134 has a 1K FIFO; the datasheet suggests that when
+ * configured conservatively, there's 22 usec of buffering for video.
+ * We therefore request a DMA latency of 20 usec, giving us 2 usec of
+ * margin in case the FIFO is configured differently to the datasheet.
+ * Unfortunately, I lack register-level documentation to check the
+ * Linux FIFO setup and confirm the perfect value.
+ */
+ pm_qos_add_request(&fh->qos_request,
+ PM_QOS_CPU_DMA_LATENCY,
+ 20);
+
return videobuf_streamon(saa7134_queue(fh));
}
@@ -2259,6 +2270,8 @@ static int saa7134_streamoff(struct file *file, void *priv,
struct saa7134_dev *dev = fh->dev;
int res = saa7134_resource(fh);
+ pm_qos_remove_request(&fh->qos_request);
+
err = videobuf_streamoff(saa7134_queue(fh));
if (err < 0)
return err;
diff --git a/drivers/media/pci/saa7134/saa7134.h b/drivers/media/pci/saa7134/saa7134.h
index 075908fae4d9..71eefef5e324 100644
--- a/drivers/media/pci/saa7134/saa7134.h
+++ b/drivers/media/pci/saa7134/saa7134.h
@@ -29,6 +29,7 @@
#include <linux/notifier.h>
#include <linux/delay.h>
#include <linux/mutex.h>
+#include <linux/pm_qos.h>
#include <asm/io.h>
@@ -41,7 +42,7 @@
#include <media/videobuf-dma-sg.h>
#include <sound/core.h>
#include <sound/pcm.h>
-#if defined(CONFIG_VIDEO_SAA7134_DVB) || defined(CONFIG_VIDEO_SAA7134_DVB_MODULE)
+#if IS_ENABLED(CONFIG_VIDEO_SAA7134_DVB)
#include <media/videobuf-dvb.h>
#endif
@@ -332,6 +333,7 @@ struct saa7134_card_ir {
#define SAA7134_BOARD_SENSORAY811_911 188
#define SAA7134_BOARD_KWORLD_PC150U 189
#define SAA7134_BOARD_ASUSTeK_PS3_100 190
+#define SAA7134_BOARD_HAWELL_HW_9004V1 191
#define SAA7134_MAXBOARDS 32
#define SAA7134_INPUT_MAX 8
@@ -469,6 +471,7 @@ struct saa7134_fh {
enum v4l2_buf_type type;
unsigned int resources;
enum v4l2_priority prio;
+ struct pm_qos_request qos_request;
/* video overlay */
struct v4l2_window win;
@@ -642,7 +645,7 @@ struct saa7134_dev {
struct work_struct empress_workqueue;
int empress_started;
-#if defined(CONFIG_VIDEO_SAA7134_DVB) || defined(CONFIG_VIDEO_SAA7134_DVB_MODULE)
+#if IS_ENABLED(CONFIG_VIDEO_SAA7134_DVB)
/* SAA7134_MPEG_DVB only */
struct videobuf_dvb_frontends frontends;
int (*original_demod_sleep)(struct dvb_frontend *fe);
diff --git a/drivers/media/pci/saa7164/saa7164-encoder.c b/drivers/media/pci/saa7164/saa7164-encoder.c
index 994018e2d0d6..9bb0903ee5f1 100644
--- a/drivers/media/pci/saa7164/saa7164-encoder.c
+++ b/drivers/media/pci/saa7164/saa7164-encoder.c
@@ -1298,6 +1298,7 @@ static int saa7164_g_chip_ident(struct file *file, void *fh,
return 0;
}
+#ifdef CONFIG_VIDEO_ADV_DEBUG
static int saa7164_g_register(struct file *file, void *fh,
struct v4l2_dbg_register *reg)
{
@@ -1323,6 +1324,7 @@ static int saa7164_s_register(struct file *file, void *fh,
return 0;
}
+#endif
static const struct v4l2_ioctl_ops mpeg_ioctl_ops = {
.vidioc_s_std = vidioc_s_std,
diff --git a/drivers/media/pci/sta2x11/Kconfig b/drivers/media/pci/sta2x11/Kconfig
index 6749f67cab8a..a94ccad02066 100644
--- a/drivers/media/pci/sta2x11/Kconfig
+++ b/drivers/media/pci/sta2x11/Kconfig
@@ -2,7 +2,7 @@ config STA2X11_VIP
tristate "STA2X11 VIP Video For Linux"
depends on STA2X11
select VIDEO_ADV7180 if MEDIA_SUBDRV_AUTOSELECT
- select VIDEOBUF_DMA_CONTIG
+ select VIDEOBUF2_DMA_CONTIG
depends on PCI && VIDEO_V4L2 && VIRT_TO_BUS
help
Say Y for support for STA2X11 VIP (Video Input Port) capture
diff --git a/drivers/media/pci/sta2x11/sta2x11_vip.c b/drivers/media/pci/sta2x11/sta2x11_vip.c
index 27ae48842656..4b703fe8c953 100644
--- a/drivers/media/pci/sta2x11/sta2x11_vip.c
+++ b/drivers/media/pci/sta2x11/sta2x11_vip.c
@@ -1,7 +1,11 @@
/*
* This is the driver for the STA2x11 Video Input Port.
*
+ * Copyright (C) 2012 ST Microelectronics
+ * author: Federico Vaga <federico.vaga@gmail.com>
* Copyright (C) 2010 WindRiver Systems, Inc.
+ * authors: Andreas Kies <andreas.kies@windriver.com>
+ * Vlad Lungu <vlad.lungu@windriver.com>
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
@@ -19,36 +23,30 @@
* The full GNU General Public License is included in this distribution in
* the file called "COPYING".
*
- * Author: Andreas Kies <andreas.kies@windriver.com>
- * Vlad Lungu <vlad.lungu@windriver.com>
- *
*/
#include <linux/types.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/init.h>
-#include <linux/vmalloc.h>
-
#include <linux/videodev2.h>
-
#include <linux/kmod.h>
-
#include <linux/pci.h>
#include <linux/interrupt.h>
-#include <linux/mutex.h>
#include <linux/io.h>
#include <linux/gpio.h>
#include <linux/i2c.h>
#include <linux/delay.h>
#include <media/v4l2-common.h>
#include <media/v4l2-device.h>
+#include <media/v4l2-ctrls.h>
#include <media/v4l2-ioctl.h>
-#include <media/videobuf-dma-contig.h>
+#include <media/v4l2-fh.h>
+#include <media/v4l2-event.h>
+#include <media/videobuf2-dma-contig.h>
#include "sta2x11_vip.h"
-#define DRV_NAME "sta2x11_vip"
#define DRV_VERSION "1.3"
#ifndef PCI_DEVICE_ID_STMICRO_VIP
@@ -63,8 +61,8 @@
#define DVP_TFS 0x08
#define DVP_BFO 0x0C
#define DVP_BFS 0x10
-#define DVP_VTP 0x14
-#define DVP_VBP 0x18
+#define DVP_VTP 0x14
+#define DVP_VBP 0x18
#define DVP_VMP 0x1C
#define DVP_ITM 0x98
#define DVP_ITS 0x9C
@@ -84,13 +82,21 @@
#define DVP_HLFLN_SD 0x00000001
-#define REG_WRITE(vip, reg, value) iowrite32((value), (vip->iomem)+(reg))
-#define REG_READ(vip, reg) ioread32((vip->iomem)+(reg))
-
#define SAVE_COUNT 8
#define AUX_COUNT 3
#define IRQ_COUNT 1
+
+struct vip_buffer {
+ struct vb2_buffer vb;
+ struct list_head list;
+ dma_addr_t dma;
+};
+static inline struct vip_buffer *to_vip_buffer(struct vb2_buffer *vb2)
+{
+ return container_of(vb2, struct vip_buffer, vb);
+}
+
/**
* struct sta2x11_vip - All internal data for one instance of device
* @v4l2_dev: device registered in v4l layer
@@ -99,29 +105,26 @@
* @adapter: contains I2C adapter information
* @register_save_area: All relevant register are saved here during suspend
* @decoder: contains information about video DAC
+ * @ctrl_hdl: handler for control framework
* @format: pixel format, fixed UYVY
* @std: video standard (e.g. PAL/NTSC)
* @input: input line for video signal ( 0 or 1 )
- * @users: Number of open of device ( max. 1 )
* @disabled: Device is in power down state
- * @mutex: ensures exclusive opening of device
* @slock: for excluse acces of registers
- * @vb_vidq: queue maintained by videobuf layer
- * @capture: linked list of capture buffer
- * @active: struct videobuf_buffer currently beingg filled
- * @started: device is ready to capture frame
- * @closing: device will be shut down
+ * @alloc_ctx: context for videobuf2
+ * @vb_vidq: queue maintained by videobuf2 layer
+ * @buffer_list: list of buffer in use
+ * @sequence: sequence number of acquired buffer
+ * @active: current active buffer
+ * @lock: used in videobuf2 callback
* @tcount: Number of top frames
* @bcount: Number of bottom frames
* @overflow: Number of FIFO overflows
- * @mem_spare: small buffer of unused frame
- * @dma_spare: dma addres of mem_spare
* @iomem: hardware base address
* @config: I2C and gpio config from platform
*
* All non-local data is accessed via this structure.
*/
-
struct sta2x11_vip {
struct v4l2_device v4l2_dev;
struct video_device *video_dev;
@@ -129,21 +132,27 @@ struct sta2x11_vip {
struct i2c_adapter *adapter;
unsigned int register_save_area[IRQ_COUNT + SAVE_COUNT + AUX_COUNT];
struct v4l2_subdev *decoder;
+ struct v4l2_ctrl_handler ctrl_hdl;
+
+
struct v4l2_pix_format format;
v4l2_std_id std;
unsigned int input;
- int users;
int disabled;
- struct mutex mutex; /* exclusive access during open */
- spinlock_t slock; /* spin lock for hardware and queue access */
- struct videobuf_queue vb_vidq;
- struct list_head capture;
- struct videobuf_buffer *active;
- int started, closing, tcount, bcount;
+ spinlock_t slock;
+
+ struct vb2_alloc_ctx *alloc_ctx;
+ struct vb2_queue vb_vidq;
+ struct list_head buffer_list;
+ unsigned int sequence;
+ struct vip_buffer *active; /* current active buffer */
+ spinlock_t lock; /* Used in videobuf2 callback */
+
+ /* Interrupt counters */
+ int tcount, bcount;
int overflow;
- void *mem_spare;
- dma_addr_t dma_spare;
- void *iomem;
+
+ void *iomem; /* I/O Memory */
struct vip_config *config;
};
@@ -206,318 +215,195 @@ static struct v4l2_pix_format formats_60[] = {
.colorspace = V4L2_COLORSPACE_SMPTE170M},
};
-/**
- * buf_setup - Get size and number of video buffer
- * @vq: queue in videobuf
- * @count: Number of buffers (1..MAX_FRAMES).
- * 0 use default value.
- * @size: size of buffer in bytes
- *
- * returns size and number of buffers
- * a preset value of 0 returns the default number.
- * return value: 0, always succesfull.
- */
-static int buf_setup(struct videobuf_queue *vq, unsigned int *count,
- unsigned int *size)
+/* Write VIP register */
+static inline void reg_write(struct sta2x11_vip *vip, unsigned int reg, u32 val)
{
- struct sta2x11_vip *vip = vq->priv_data;
-
- *size = vip->format.width * vip->format.height * 2;
- if (0 == *count || MAX_FRAMES < *count)
- *count = MAX_FRAMES;
- return 0;
-};
-
-/**
- * buf_prepare - prepare buffer for usage
- * @vq: queue in videobuf layer
- * @vb: buffer to be prepared
- * @field: type of video data (interlaced/non-interlaced)
- *
- * Allocate or realloc buffer
- * return value: 0, successful.
- *
- * -EINVAL, supplied buffer is too small.
- *
- * other, buffer could not be locked.
- */
-static int buf_prepare(struct videobuf_queue *vq, struct videobuf_buffer *vb,
- enum v4l2_field field)
+ iowrite32((val), (vip->iomem)+(reg));
+}
+/* Read VIP register */
+static inline u32 reg_read(struct sta2x11_vip *vip, unsigned int reg)
{
- struct sta2x11_vip *vip = vq->priv_data;
- int ret;
-
- vb->size = vip->format.width * vip->format.height * 2;
- if ((0 != vb->baddr) && (vb->bsize < vb->size))
- return -EINVAL;
- vb->width = vip->format.width;
- vb->height = vip->format.height;
- vb->field = field;
-
- if (VIDEOBUF_NEEDS_INIT == vb->state) {
- ret = videobuf_iolock(vq, vb, NULL);
- if (ret)
- goto fail;
- }
- vb->state = VIDEOBUF_PREPARED;
- return 0;
-fail:
- videobuf_dma_contig_free(vq, vb);
- vb->state = VIDEOBUF_NEEDS_INIT;
- return ret;
+ return ioread32((vip->iomem)+(reg));
}
-
-/**
- * buf_queu - queue buffer for filling
- * @vq: queue in videobuf layer
- * @vb: buffer to be queued
- *
- * if capturing is already running, the buffer will be queued. Otherwise
- * capture is started and the buffer is used directly.
- */
-static void buf_queue(struct videobuf_queue *vq, struct videobuf_buffer *vb)
+/* Start DMA acquisition */
+static void start_dma(struct sta2x11_vip *vip, struct vip_buffer *vip_buf)
{
- struct sta2x11_vip *vip = vq->priv_data;
- u32 dma;
+ unsigned long offset = 0;
+
+ if (vip->format.field == V4L2_FIELD_INTERLACED)
+ offset = vip->format.width * 2;
- vb->state = VIDEOBUF_QUEUED;
+ spin_lock_irq(&vip->slock);
+ /* Enable acquisition */
+ reg_write(vip, DVP_CTL, reg_read(vip, DVP_CTL) | DVP_CTL_ENA);
+ /* Set Top and Bottom Field memory address */
+ reg_write(vip, DVP_VTP, (u32)vip_buf->dma);
+ reg_write(vip, DVP_VBP, (u32)vip_buf->dma + offset);
+ spin_unlock_irq(&vip->slock);
+}
- if (vip->active) {
- list_add_tail(&vb->queue, &vip->capture);
+/* Fetch the next buffer to activate */
+static void vip_active_buf_next(struct sta2x11_vip *vip)
+{
+ /* Get the next buffer */
+ spin_lock(&vip->lock);
+ if (list_empty(&vip->buffer_list)) {/* No available buffer */
+ spin_unlock(&vip->lock);
return;
}
-
- vip->started = 1;
+ vip->active = list_first_entry(&vip->buffer_list,
+ struct vip_buffer,
+ list);
+ /* Reset Top and Bottom counter */
vip->tcount = 0;
vip->bcount = 0;
- vip->active = vb;
- vb->state = VIDEOBUF_ACTIVE;
+ spin_unlock(&vip->lock);
+ if (vb2_is_streaming(&vip->vb_vidq)) { /* streaming is on */
+ start_dma(vip, vip->active); /* start dma capture */
+ }
+}
- dma = videobuf_to_dma_contig(vb);
- REG_WRITE(vip, DVP_TFO, (0 << 16) | (0));
- /* despite of interlace mode, upper and lower frames start at zero */
- REG_WRITE(vip, DVP_BFO, (0 << 16) | (0));
+/* Videobuf2 Operations */
+static int queue_setup(struct vb2_queue *vq, const struct v4l2_format *fmt,
+ unsigned int *nbuffers, unsigned int *nplanes,
+ unsigned int sizes[], void *alloc_ctxs[])
+{
+ struct sta2x11_vip *vip = vb2_get_drv_priv(vq);
- switch (vip->format.field) {
- case V4L2_FIELD_INTERLACED:
- REG_WRITE(vip, DVP_TFS,
- ((vip->format.height / 2 - 1) << 16) |
- (2 * vip->format.width - 1));
- REG_WRITE(vip, DVP_BFS, ((vip->format.height / 2 - 1) << 16) |
- (2 * vip->format.width - 1));
- REG_WRITE(vip, DVP_VTP, dma);
- REG_WRITE(vip, DVP_VBP, dma + vip->format.width * 2);
- REG_WRITE(vip, DVP_VMP, 4 * vip->format.width);
- break;
- case V4L2_FIELD_TOP:
- REG_WRITE(vip, DVP_TFS,
- ((vip->format.height - 1) << 16) |
- (2 * vip->format.width - 1));
- REG_WRITE(vip, DVP_BFS, ((0) << 16) |
- (2 * vip->format.width - 1));
- REG_WRITE(vip, DVP_VTP, dma);
- REG_WRITE(vip, DVP_VBP, dma);
- REG_WRITE(vip, DVP_VMP, 2 * vip->format.width);
- break;
- case V4L2_FIELD_BOTTOM:
- REG_WRITE(vip, DVP_TFS, ((0) << 16) |
- (2 * vip->format.width - 1));
- REG_WRITE(vip, DVP_BFS,
- ((vip->format.height) << 16) |
- (2 * vip->format.width - 1));
- REG_WRITE(vip, DVP_VTP, dma);
- REG_WRITE(vip, DVP_VBP, dma);
- REG_WRITE(vip, DVP_VMP, 2 * vip->format.width);
- break;
+ if (!(*nbuffers) || *nbuffers < MAX_FRAMES)
+ *nbuffers = MAX_FRAMES;
- default:
- pr_warning("VIP: unknown field format\n");
- return;
- }
+ *nplanes = 1;
+ sizes[0] = vip->format.sizeimage;
+ alloc_ctxs[0] = vip->alloc_ctx;
- REG_WRITE(vip, DVP_CTL, DVP_CTL_ENA);
-}
+ vip->sequence = 0;
+ vip->active = NULL;
+ vip->tcount = 0;
+ vip->bcount = 0;
-/**
- * buff_release - release buffer
- * @vq: queue in videobuf layer
- * @vb: buffer to be released
- *
- * release buffer in videobuf layer
- */
-static void buf_release(struct videobuf_queue *vq, struct videobuf_buffer *vb)
+ return 0;
+};
+static int buffer_init(struct vb2_buffer *vb)
{
+ struct vip_buffer *vip_buf = to_vip_buffer(vb);
- videobuf_dma_contig_free(vq, vb);
- vb->state = VIDEOBUF_NEEDS_INIT;
+ vip_buf->dma = vb2_dma_contig_plane_dma_addr(vb, 0);
+ INIT_LIST_HEAD(&vip_buf->list);
+ return 0;
}
-static struct videobuf_queue_ops vip_qops = {
- .buf_setup = buf_setup,
- .buf_prepare = buf_prepare,
- .buf_queue = buf_queue,
- .buf_release = buf_release,
-};
-
-/**
- * vip_open - open video device
- * @file: descriptor of device
- *
- * open device, make sure it is only opened once.
- * return value: 0, no error.
- *
- * -EBUSY, device is already opened
- *
- * -ENOMEM, no memory for auxiliary DMA buffer
- */
-static int vip_open(struct file *file)
+static int buffer_prepare(struct vb2_buffer *vb)
{
- struct video_device *dev = video_devdata(file);
- struct sta2x11_vip *vip = video_get_drvdata(dev);
+ struct sta2x11_vip *vip = vb2_get_drv_priv(vb->vb2_queue);
+ struct vip_buffer *vip_buf = to_vip_buffer(vb);
+ unsigned long size;
+
+ size = vip->format.sizeimage;
+ if (vb2_plane_size(vb, 0) < size) {
+ v4l2_err(&vip->v4l2_dev, "buffer too small (%lu < %lu)\n",
+ vb2_plane_size(vb, 0), size);
+ return -EINVAL;
+ }
- mutex_lock(&vip->mutex);
- vip->users++;
+ vb2_set_plane_payload(&vip_buf->vb, 0, size);
- if (vip->users > 1) {
- vip->users--;
- mutex_unlock(&vip->mutex);
- return -EBUSY;
+ return 0;
+}
+static void buffer_queue(struct vb2_buffer *vb)
+{
+ struct sta2x11_vip *vip = vb2_get_drv_priv(vb->vb2_queue);
+ struct vip_buffer *vip_buf = to_vip_buffer(vb);
+
+ spin_lock(&vip->lock);
+ list_add_tail(&vip_buf->list, &vip->buffer_list);
+ if (!vip->active) { /* No active buffer, active the first one */
+ vip->active = list_first_entry(&vip->buffer_list,
+ struct vip_buffer,
+ list);
+ if (vb2_is_streaming(&vip->vb_vidq)) /* streaming is on */
+ start_dma(vip, vip_buf); /* start dma capture */
}
+ spin_unlock(&vip->lock);
+}
+static int buffer_finish(struct vb2_buffer *vb)
+{
+ struct sta2x11_vip *vip = vb2_get_drv_priv(vb->vb2_queue);
+ struct vip_buffer *vip_buf = to_vip_buffer(vb);
- file->private_data = dev;
- vip->overflow = 0;
- vip->started = 0;
- vip->closing = 0;
- vip->active = NULL;
+ /* Buffer handled, remove it from the list */
+ spin_lock(&vip->lock);
+ list_del_init(&vip_buf->list);
+ spin_unlock(&vip->lock);
- INIT_LIST_HEAD(&vip->capture);
- vip->mem_spare = dma_alloc_coherent(&vip->pdev->dev, 64,
- &vip->dma_spare, GFP_KERNEL);
- if (!vip->mem_spare) {
- vip->users--;
- mutex_unlock(&vip->mutex);
- return -ENOMEM;
- }
+ vip_active_buf_next(vip);
- mutex_unlock(&vip->mutex);
- videobuf_queue_dma_contig_init_cached(&vip->vb_vidq,
- &vip_qops,
- &vip->pdev->dev,
- &vip->slock,
- V4L2_BUF_TYPE_VIDEO_CAPTURE,
- V4L2_FIELD_INTERLACED,
- sizeof(struct videobuf_buffer),
- vip, NULL);
- REG_READ(vip, DVP_ITS);
- REG_WRITE(vip, DVP_HLFLN, DVP_HLFLN_SD);
- REG_WRITE(vip, DVP_ITM, DVP_IT_VSB | DVP_IT_VST);
- REG_WRITE(vip, DVP_CTL, DVP_CTL_RST);
- REG_WRITE(vip, DVP_CTL, 0);
- REG_READ(vip, DVP_ITS);
return 0;
}
-/**
- * vip_close - close video device
- * @file: descriptor of device
- *
- * close video device, wait until all pending operations are finished
- * ( maximum FRAME_MAX buffers pending )
- * Turn off interrupts.
- *
- * return value: 0, always succesful.
- */
-static int vip_close(struct file *file)
+static int start_streaming(struct vb2_queue *vq, unsigned int count)
{
- struct video_device *dev = video_devdata(file);
- struct sta2x11_vip *vip = video_get_drvdata(dev);
+ struct sta2x11_vip *vip = vb2_get_drv_priv(vq);
- vip->closing = 1;
- if (vip->active)
- videobuf_waiton(&vip->vb_vidq, vip->active, 0, 0);
spin_lock_irq(&vip->slock);
-
- REG_WRITE(vip, DVP_ITM, 0);
- REG_WRITE(vip, DVP_CTL, DVP_CTL_RST);
- REG_WRITE(vip, DVP_CTL, 0);
- REG_READ(vip, DVP_ITS);
-
- vip->started = 0;
- vip->active = NULL;
-
+ /* Enable interrupt VSYNC Top and Bottom*/
+ reg_write(vip, DVP_ITM, DVP_IT_VSB | DVP_IT_VST);
spin_unlock_irq(&vip->slock);
- videobuf_stop(&vip->vb_vidq);
- videobuf_mmap_free(&vip->vb_vidq);
+ if (count)
+ start_dma(vip, vip->active);
- dma_free_coherent(&vip->pdev->dev, 64, vip->mem_spare, vip->dma_spare);
- file->private_data = NULL;
- mutex_lock(&vip->mutex);
- vip->users--;
- mutex_unlock(&vip->mutex);
return 0;
}
-/**
- * vip_read - read from video input
- * @file: descriptor of device
- * @data: user buffer
- * @count: number of bytes to be read
- * @ppos: position within stream
- *
- * read video data from video device.
- * handling is done in generic videobuf layer
- * return value: provided by videobuf layer
- */
-static ssize_t vip_read(struct file *file, char __user *data,
- size_t count, loff_t *ppos)
+/* abort streaming and wait for last buffer */
+static int stop_streaming(struct vb2_queue *vq)
{
- struct video_device *dev = file->private_data;
- struct sta2x11_vip *vip = video_get_drvdata(dev);
-
- return videobuf_read_stream(&vip->vb_vidq, data, count, ppos, 0,
- file->f_flags & O_NONBLOCK);
+ struct sta2x11_vip *vip = vb2_get_drv_priv(vq);
+ struct vip_buffer *vip_buf, *node;
+
+ /* Disable acquisition */
+ reg_write(vip, DVP_CTL, reg_read(vip, DVP_CTL) & ~DVP_CTL_ENA);
+ /* Disable all interrupts */
+ reg_write(vip, DVP_ITM, 0);
+
+ /* Release all active buffers */
+ spin_lock(&vip->lock);
+ list_for_each_entry_safe(vip_buf, node, &vip->buffer_list, list) {
+ vb2_buffer_done(&vip_buf->vb, VB2_BUF_STATE_ERROR);
+ list_del(&vip_buf->list);
+ }
+ spin_unlock(&vip->lock);
+ return 0;
}
-/**
- * vip_mmap - map user buffer
- * @file: descriptor of device
- * @vma: user buffer
- *
- * map user space buffer into kernel mode, including DMA address.
- * handling is done in generic videobuf layer.
- * return value: provided by videobuf layer
- */
-static int vip_mmap(struct file *file, struct vm_area_struct *vma)
-{
- struct video_device *dev = file->private_data;
- struct sta2x11_vip *vip = video_get_drvdata(dev);
+static struct vb2_ops vip_video_qops = {
+ .queue_setup = queue_setup,
+ .buf_init = buffer_init,
+ .buf_prepare = buffer_prepare,
+ .buf_finish = buffer_finish,
+ .buf_queue = buffer_queue,
+ .start_streaming = start_streaming,
+ .stop_streaming = stop_streaming,
+};
- return videobuf_mmap_mapper(&vip->vb_vidq, vma);
-}
-/**
- * vip_poll - poll for event
- * @file: descriptor of device
- * @wait: contains events to be waited for
- *
- * wait for event related to video device.
- * handling is done in generic videobuf layer.
- * return value: provided by videobuf layer
- */
-static unsigned int vip_poll(struct file *file, struct poll_table_struct *wait)
-{
- struct video_device *dev = file->private_data;
- struct sta2x11_vip *vip = video_get_drvdata(dev);
+/* File Operations */
+static const struct v4l2_file_operations vip_fops = {
+ .owner = THIS_MODULE,
+ .open = v4l2_fh_open,
+ .release = vb2_fop_release,
+ .unlocked_ioctl = video_ioctl2,
+ .read = vb2_fop_read,
+ .mmap = vb2_fop_mmap,
+ .poll = vb2_fop_poll
+};
- return videobuf_poll_stream(file, &vip->vb_vidq, wait);
-}
/**
* vidioc_querycap - return capabilities of device
- * @file: descriptor of device (not used)
- * @priv: points to current videodevice
+ * @file: descriptor of device
* @cap: contains return values
*
* the capabilities of the device are returned
@@ -527,25 +413,22 @@ static unsigned int vip_poll(struct file *file, struct poll_table_struct *wait)
static int vidioc_querycap(struct file *file, void *priv,
struct v4l2_capability *cap)
{
- struct video_device *dev = priv;
- struct sta2x11_vip *vip = video_get_drvdata(dev);
+ struct sta2x11_vip *vip = video_drvdata(file);
- memset(cap, 0, sizeof(struct v4l2_capability));
- strcpy(cap->driver, DRV_NAME);
- strcpy(cap->card, DRV_NAME);
- cap->version = 0;
+ strcpy(cap->driver, KBUILD_MODNAME);
+ strcpy(cap->card, KBUILD_MODNAME);
snprintf(cap->bus_info, sizeof(cap->bus_info), "PCI:%s",
pci_name(vip->pdev));
- cap->capabilities = V4L2_CAP_VIDEO_CAPTURE | V4L2_CAP_READWRITE |
- V4L2_CAP_STREAMING;
+ cap->device_caps = V4L2_CAP_VIDEO_CAPTURE | V4L2_CAP_READWRITE |
+ V4L2_CAP_STREAMING;
+ cap->capabilities = cap->device_caps | V4L2_CAP_DEVICE_CAPS;
return 0;
}
/**
* vidioc_s_std - set video standard
- * @file: descriptor of device (not used)
- * @priv: points to current videodevice
+ * @file: descriptor of device
* @std: contains standard to be set
*
* the video standard is set
@@ -558,8 +441,7 @@ static int vidioc_querycap(struct file *file, void *priv,
*/
static int vidioc_s_std(struct file *file, void *priv, v4l2_std_id *std)
{
- struct video_device *dev = priv;
- struct sta2x11_vip *vip = video_get_drvdata(dev);
+ struct sta2x11_vip *vip = video_drvdata(file);
v4l2_std_id oldstd = vip->std, newstd;
int status;
@@ -592,8 +474,7 @@ static int vidioc_s_std(struct file *file, void *priv, v4l2_std_id *std)
/**
* vidioc_g_std - get video standard
- * @file: descriptor of device (not used)
- * @priv: points to current videodevice
+ * @file: descriptor of device
* @std: contains return values
*
* the current video standard is returned
@@ -602,8 +483,7 @@ static int vidioc_s_std(struct file *file, void *priv, v4l2_std_id *std)
*/
static int vidioc_g_std(struct file *file, void *priv, v4l2_std_id *std)
{
- struct video_device *dev = priv;
- struct sta2x11_vip *vip = video_get_drvdata(dev);
+ struct sta2x11_vip *vip = video_drvdata(file);
*std = vip->std;
return 0;
@@ -611,8 +491,7 @@ static int vidioc_g_std(struct file *file, void *priv, v4l2_std_id *std)
/**
* vidioc_querystd - get possible video standards
- * @file: descriptor of device (not used)
- * @priv: points to current videodevice
+ * @file: descriptor of device
* @std: contains return values
*
* all possible video standards are returned
@@ -621,79 +500,11 @@ static int vidioc_g_std(struct file *file, void *priv, v4l2_std_id *std)
*/
static int vidioc_querystd(struct file *file, void *priv, v4l2_std_id *std)
{
- struct video_device *dev = priv;
- struct sta2x11_vip *vip = video_get_drvdata(dev);
+ struct sta2x11_vip *vip = video_drvdata(file);
return v4l2_subdev_call(vip->decoder, video, querystd, std);
-
}
-/**
- * vidioc_queryctl - get possible control settings
- * @file: descriptor of device (not used)
- * @priv: points to current videodevice
- * @ctrl: contains return values
- *
- * return possible values for a control
- * return value: delivered by video DAC routine.
- */
-static int vidioc_queryctrl(struct file *file, void *priv,
- struct v4l2_queryctrl *ctrl)
-{
- struct video_device *dev = priv;
- struct sta2x11_vip *vip = video_get_drvdata(dev);
-
- return v4l2_subdev_call(vip->decoder, core, queryctrl, ctrl);
-}
-
-/**
- * vidioc_g_ctl - get control value
- * @file: descriptor of device (not used)
- * @priv: points to current videodevice
- * @ctrl: contains return values
- *
- * return setting for a control value
- * return value: delivered by video DAC routine.
- */
-static int vidioc_g_ctrl(struct file *file, void *priv,
- struct v4l2_control *ctrl)
-{
- struct video_device *dev = priv;
- struct sta2x11_vip *vip = video_get_drvdata(dev);
-
- return v4l2_subdev_call(vip->decoder, core, g_ctrl, ctrl);
-}
-
-/**
- * vidioc_s_ctl - set control value
- * @file: descriptor of device (not used)
- * @priv: points to current videodevice
- * @ctrl: contains value to be set
- *
- * set value for a specific control
- * return value: delivered by video DAC routine.
- */
-static int vidioc_s_ctrl(struct file *file, void *priv,
- struct v4l2_control *ctrl)
-{
- struct video_device *dev = priv;
- struct sta2x11_vip *vip = video_get_drvdata(dev);
-
- return v4l2_subdev_call(vip->decoder, core, s_ctrl, ctrl);
-}
-
-/**
- * vidioc_enum_input - return name of input line
- * @file: descriptor of device (not used)
- * @priv: points to current videodevice
- * @inp: contains return values
- *
- * the user friendly name of the input line is returned
- *
- * return value: 0, no error.
- *
- * -EINVAL, input line number out of range
- */
static int vidioc_enum_input(struct file *file, void *priv,
struct v4l2_input *inp)
{
@@ -709,8 +520,7 @@ static int vidioc_enum_input(struct file *file, void *priv,
/**
* vidioc_s_input - set input line
- * @file: descriptor of device ( not used)
- * @priv: points to current videodevice
+ * @file: descriptor of device
* @i: new input line number
*
* the current active input line is set
@@ -721,8 +531,7 @@ static int vidioc_enum_input(struct file *file, void *priv,
*/
static int vidioc_s_input(struct file *file, void *priv, unsigned int i)
{
- struct video_device *dev = priv;
- struct sta2x11_vip *vip = video_get_drvdata(dev);
+ struct sta2x11_vip *vip = video_drvdata(file);
int ret;
if (i > 1)
@@ -737,8 +546,7 @@ static int vidioc_s_input(struct file *file, void *priv, unsigned int i)
/**
* vidioc_g_input - return input line
- * @file: descriptor of device ( not used)
- * @priv: points to current videodevice
+ * @file: descriptor of device
* @i: returned input line number
*
* the current active input line is returned
@@ -747,8 +555,7 @@ static int vidioc_s_input(struct file *file, void *priv, unsigned int i)
*/
static int vidioc_g_input(struct file *file, void *priv, unsigned int *i)
{
- struct video_device *dev = priv;
- struct sta2x11_vip *vip = video_get_drvdata(dev);
+ struct sta2x11_vip *vip = video_drvdata(file);
*i = vip->input;
return 0;
@@ -756,8 +563,6 @@ static int vidioc_g_input(struct file *file, void *priv, unsigned int *i)
/**
* vidioc_enum_fmt_vid_cap - return video capture format
- * @file: descriptor of device ( not used)
- * @priv: points to current videodevice
* @f: returned format information
*
* returns name and format of video capture
@@ -780,8 +585,7 @@ static int vidioc_enum_fmt_vid_cap(struct file *file, void *priv,
/**
* vidioc_try_fmt_vid_cap - set video capture format
- * @file: descriptor of device ( not used)
- * @priv: points to current videodevice
+ * @file: descriptor of device
* @f: new format
*
* new video format is set which includes width and
@@ -797,12 +601,13 @@ static int vidioc_enum_fmt_vid_cap(struct file *file, void *priv,
static int vidioc_try_fmt_vid_cap(struct file *file, void *priv,
struct v4l2_format *f)
{
- struct video_device *dev = priv;
- struct sta2x11_vip *vip = video_get_drvdata(dev);
+ struct sta2x11_vip *vip = video_drvdata(file);
int interlace_lim;
- if (V4L2_PIX_FMT_UYVY != f->fmt.pix.pixelformat)
+ if (V4L2_PIX_FMT_UYVY != f->fmt.pix.pixelformat) {
+ v4l2_warn(&vip->v4l2_dev, "Invalid format, only UYVY supported\n");
return -EINVAL;
+ }
if (V4L2_STD_525_60 & vip->std)
interlace_lim = 240;
@@ -810,6 +615,7 @@ static int vidioc_try_fmt_vid_cap(struct file *file, void *priv,
interlace_lim = 288;
switch (f->fmt.pix.field) {
+ default:
case V4L2_FIELD_ANY:
if (interlace_lim < f->fmt.pix.height)
f->fmt.pix.field = V4L2_FIELD_INTERLACED;
@@ -823,10 +629,10 @@ static int vidioc_try_fmt_vid_cap(struct file *file, void *priv,
break;
case V4L2_FIELD_INTERLACED:
break;
- default:
- return -EINVAL;
}
+ /* It is the only supported format */
+ f->fmt.pix.pixelformat = V4L2_PIX_FMT_UYVY;
f->fmt.pix.height &= ~1;
if (2 * interlace_lim < f->fmt.pix.height)
f->fmt.pix.height = 2 * interlace_lim;
@@ -842,8 +648,7 @@ static int vidioc_try_fmt_vid_cap(struct file *file, void *priv,
/**
* vidioc_s_fmt_vid_cap - set current video format parameters
- * @file: descriptor of device ( not used)
- * @priv: points to current videodevice
+ * @file: descriptor of device
* @f: returned format information
*
* set new capture format
@@ -854,22 +659,63 @@ static int vidioc_try_fmt_vid_cap(struct file *file, void *priv,
static int vidioc_s_fmt_vid_cap(struct file *file, void *priv,
struct v4l2_format *f)
{
- struct video_device *dev = priv;
- struct sta2x11_vip *vip = video_get_drvdata(dev);
+ struct sta2x11_vip *vip = video_drvdata(file);
+ unsigned int t_stop, b_stop, pitch;
int ret;
ret = vidioc_try_fmt_vid_cap(file, priv, f);
if (ret)
return ret;
- memcpy(&vip->format, &f->fmt.pix, sizeof(struct v4l2_pix_format));
+ if (vb2_is_busy(&vip->vb_vidq)) {
+ /* Can't change format during acquisition */
+ v4l2_err(&vip->v4l2_dev, "device busy\n");
+ return -EBUSY;
+ }
+ vip->format = f->fmt.pix;
+ switch (vip->format.field) {
+ case V4L2_FIELD_INTERLACED:
+ t_stop = ((vip->format.height / 2 - 1) << 16) |
+ (2 * vip->format.width - 1);
+ b_stop = t_stop;
+ pitch = 4 * vip->format.width;
+ break;
+ case V4L2_FIELD_TOP:
+ t_stop = ((vip->format.height - 1) << 16) |
+ (2 * vip->format.width - 1);
+ b_stop = (0 << 16) | (2 * vip->format.width - 1);
+ pitch = 2 * vip->format.width;
+ break;
+ case V4L2_FIELD_BOTTOM:
+ t_stop = (0 << 16) | (2 * vip->format.width - 1);
+ b_stop = (vip->format.height << 16) |
+ (2 * vip->format.width - 1);
+ pitch = 2 * vip->format.width;
+ break;
+ default:
+ v4l2_err(&vip->v4l2_dev, "unknown field format\n");
+ return -EINVAL;
+ }
+
+ spin_lock_irq(&vip->slock);
+ /* Y-X Top Field Offset */
+ reg_write(vip, DVP_TFO, 0);
+ /* Y-X Bottom Field Offset */
+ reg_write(vip, DVP_BFO, 0);
+ /* Y-X Top Field Stop*/
+ reg_write(vip, DVP_TFS, t_stop);
+ /* Y-X Bottom Field Stop */
+ reg_write(vip, DVP_BFS, b_stop);
+ /* Video Memory Pitch */
+ reg_write(vip, DVP_VMP, pitch);
+ spin_unlock_irq(&vip->slock);
+
return 0;
}
/**
* vidioc_g_fmt_vid_cap - get current video format parameters
- * @file: descriptor of device ( not used)
- * @priv: points to current videodevice
+ * @file: descriptor of device
* @f: contains format information
*
* returns current video format parameters
@@ -879,150 +725,47 @@ static int vidioc_s_fmt_vid_cap(struct file *file, void *priv,
static int vidioc_g_fmt_vid_cap(struct file *file, void *priv,
struct v4l2_format *f)
{
- struct video_device *dev = priv;
- struct sta2x11_vip *vip = video_get_drvdata(dev);
-
- memcpy(&f->fmt.pix, &vip->format, sizeof(struct v4l2_pix_format));
- return 0;
-}
-
-/**
- * vidioc_reqfs - request buffer
- * @file: descriptor of device ( not used)
- * @priv: points to current videodevice
- * @p: video buffer
- *
- * Handling is done in generic videobuf layer.
- */
-static int vidioc_reqbufs(struct file *file, void *priv,
- struct v4l2_requestbuffers *p)
-{
- struct video_device *dev = priv;
- struct sta2x11_vip *vip = video_get_drvdata(dev);
-
- return videobuf_reqbufs(&vip->vb_vidq, p);
-}
-
-/**
- * vidioc_querybuf - query buffer
- * @file: descriptor of device ( not used)
- * @priv: points to current videodevice
- * @p: video buffer
- *
- * query buffer state.
- * Handling is done in generic videobuf layer.
- */
-static int vidioc_querybuf(struct file *file, void *priv, struct v4l2_buffer *p)
-{
- struct video_device *dev = priv;
- struct sta2x11_vip *vip = video_get_drvdata(dev);
+ struct sta2x11_vip *vip = video_drvdata(file);
- return videobuf_querybuf(&vip->vb_vidq, p);
-}
+ f->fmt.pix = vip->format;
-/**
- * vidioc_qbuf - queue a buffer
- * @file: descriptor of device ( not used)
- * @priv: points to current videodevice
- * @p: video buffer
- *
- * Handling is done in generic videobuf layer.
- */
-static int vidioc_qbuf(struct file *file, void *priv, struct v4l2_buffer *p)
-{
- struct video_device *dev = priv;
- struct sta2x11_vip *vip = video_get_drvdata(dev);
-
- return videobuf_qbuf(&vip->vb_vidq, p);
-}
-
-/**
- * vidioc_dqbuf - dequeue a buffer
- * @file: descriptor of device ( not used)
- * @priv: points to current videodevice
- * @p: video buffer
- *
- * Handling is done in generic videobuf layer.
- */
-static int vidioc_dqbuf(struct file *file, void *priv, struct v4l2_buffer *p)
-{
- struct video_device *dev = priv;
- struct sta2x11_vip *vip = video_get_drvdata(dev);
-
- return videobuf_dqbuf(&vip->vb_vidq, p, file->f_flags & O_NONBLOCK);
-}
-
-/**
- * vidioc_streamon - turn on streaming
- * @file: descriptor of device ( not used)
- * @priv: points to current videodevice
- * @type: type of capture
- *
- * turn on streaming.
- * Handling is done in generic videobuf layer.
- */
-static int vidioc_streamon(struct file *file, void *priv,
- enum v4l2_buf_type type)
-{
- struct video_device *dev = priv;
- struct sta2x11_vip *vip = video_get_drvdata(dev);
-
- return videobuf_streamon(&vip->vb_vidq);
-}
-
-/**
- * vidioc_streamoff - turn off streaming
- * @file: descriptor of device ( not used)
- * @priv: points to current videodevice
- * @type: type of capture
- *
- * turn off streaming.
- * Handling is done in generic videobuf layer.
- */
-static int vidioc_streamoff(struct file *file, void *priv,
- enum v4l2_buf_type type)
-{
- struct video_device *dev = priv;
- struct sta2x11_vip *vip = video_get_drvdata(dev);
-
- return videobuf_streamoff(&vip->vb_vidq);
+ return 0;
}
-static const struct v4l2_file_operations vip_fops = {
- .owner = THIS_MODULE,
- .open = vip_open,
- .release = vip_close,
- .ioctl = video_ioctl2,
- .read = vip_read,
- .mmap = vip_mmap,
- .poll = vip_poll
-};
-
static const struct v4l2_ioctl_ops vip_ioctl_ops = {
.vidioc_querycap = vidioc_querycap,
- .vidioc_s_std = vidioc_s_std,
+ /* FMT handling */
+ .vidioc_enum_fmt_vid_cap = vidioc_enum_fmt_vid_cap,
+ .vidioc_g_fmt_vid_cap = vidioc_g_fmt_vid_cap,
+ .vidioc_s_fmt_vid_cap = vidioc_s_fmt_vid_cap,
+ .vidioc_try_fmt_vid_cap = vidioc_try_fmt_vid_cap,
+ /* Buffer handlers */
+ .vidioc_create_bufs = vb2_ioctl_create_bufs,
+ .vidioc_prepare_buf = vb2_ioctl_prepare_buf,
+ .vidioc_reqbufs = vb2_ioctl_reqbufs,
+ .vidioc_querybuf = vb2_ioctl_querybuf,
+ .vidioc_qbuf = vb2_ioctl_qbuf,
+ .vidioc_dqbuf = vb2_ioctl_dqbuf,
+ /* Stream on/off */
+ .vidioc_streamon = vb2_ioctl_streamon,
+ .vidioc_streamoff = vb2_ioctl_streamoff,
+ /* Standard handling */
.vidioc_g_std = vidioc_g_std,
+ .vidioc_s_std = vidioc_s_std,
.vidioc_querystd = vidioc_querystd,
- .vidioc_queryctrl = vidioc_queryctrl,
- .vidioc_g_ctrl = vidioc_g_ctrl,
- .vidioc_s_ctrl = vidioc_s_ctrl,
+ /* Input handling */
.vidioc_enum_input = vidioc_enum_input,
- .vidioc_try_fmt_vid_cap = vidioc_try_fmt_vid_cap,
- .vidioc_s_input = vidioc_s_input,
.vidioc_g_input = vidioc_g_input,
- .vidioc_enum_fmt_vid_cap = vidioc_enum_fmt_vid_cap,
- .vidioc_s_fmt_vid_cap = vidioc_s_fmt_vid_cap,
- .vidioc_g_fmt_vid_cap = vidioc_g_fmt_vid_cap,
- .vidioc_reqbufs = vidioc_reqbufs,
- .vidioc_querybuf = vidioc_querybuf,
- .vidioc_qbuf = vidioc_qbuf,
- .vidioc_dqbuf = vidioc_dqbuf,
- .vidioc_streamon = vidioc_streamon,
- .vidioc_streamoff = vidioc_streamoff,
+ .vidioc_s_input = vidioc_s_input,
+ /* Log status ioctl */
+ .vidioc_log_status = v4l2_ctrl_log_status,
+ /* Event handling */
+ .vidioc_subscribe_event = v4l2_ctrl_subscribe_event,
+ .vidioc_unsubscribe_event = v4l2_event_unsubscribe,
};
static struct video_device video_dev_template = {
- .name = DRV_NAME,
+ .name = KBUILD_MODNAME,
.release = video_device_release,
.fops = &vip_fops,
.ioctl_ops = &vip_ioctl_ops,
@@ -1036,9 +779,7 @@ static struct video_device video_dev_template = {
*
* check for both frame interrupts set ( top and bottom ).
* check FIFO overflow, but limit number of log messages after open.
- * signal a complete buffer if done.
- * dequeue a new buffer if available.
- * disable VIP if no buffer available.
+ * signal a complete buffer if done
*
* return value: IRQ_NONE, interrupt was not generated by VIP
*
@@ -1046,88 +787,122 @@ static struct video_device video_dev_template = {
*/
static irqreturn_t vip_irq(int irq, struct sta2x11_vip *vip)
{
- u32 status, dma;
- unsigned long flags;
- struct videobuf_buffer *vb;
+ unsigned int status;
- status = REG_READ(vip, DVP_ITS);
+ status = reg_read(vip, DVP_ITS);
- if (!status) {
- pr_debug("VIP: irq ignored\n");
+ if (!status) /* No interrupt to handle */
return IRQ_NONE;
- }
-
- if (!vip->started)
- return IRQ_HANDLED;
- if (status & DVP_IT_VSB)
- vip->bcount++;
-
- if (status & DVP_IT_VST)
- vip->tcount++;
+ if (status & DVP_IT_FIFO)
+ if (vip->overflow++ > 5)
+ pr_info("VIP: fifo overflow\n");
- if ((DVP_IT_VSB | DVP_IT_VST) == (status & (DVP_IT_VST | DVP_IT_VSB))) {
+ if ((status & DVP_IT_VST) && (status & DVP_IT_VSB)) {
/* this is bad, we are too slow, hope the condition is gone
* on the next frame */
- pr_info("VIP: both irqs\n");
return IRQ_HANDLED;
}
- if (status & DVP_IT_FIFO) {
- if (5 > vip->overflow++)
- pr_info("VIP: fifo overflow\n");
+ if (status & DVP_IT_VST)
+ if ((++vip->tcount) < 2)
+ return IRQ_HANDLED;
+ if (status & DVP_IT_VSB) {
+ vip->bcount++;
+ return IRQ_HANDLED;
}
- if (2 > vip->tcount)
- return IRQ_HANDLED;
+ if (vip->active) { /* Acquisition is over on this buffer */
+ /* Disable acquisition */
+ reg_write(vip, DVP_CTL, reg_read(vip, DVP_CTL) & ~DVP_CTL_ENA);
+ /* Remove the active buffer from the list */
+ do_gettimeofday(&vip->active->vb.v4l2_buf.timestamp);
+ vip->active->vb.v4l2_buf.sequence = vip->sequence++;
+ vb2_buffer_done(&vip->active->vb, VB2_BUF_STATE_DONE);
+ }
- if (status & DVP_IT_VSB)
- return IRQ_HANDLED;
+ return IRQ_HANDLED;
+}
- spin_lock_irqsave(&vip->slock, flags);
+static void sta2x11_vip_init_register(struct sta2x11_vip *vip)
+{
+ /* Register initialization */
+ spin_lock_irq(&vip->slock);
+ /* Clean interrupt */
+ reg_read(vip, DVP_ITS);
+ /* Enable Half Line per vertical */
+ reg_write(vip, DVP_HLFLN, DVP_HLFLN_SD);
+ /* Reset VIP control */
+ reg_write(vip, DVP_CTL, DVP_CTL_RST);
+ /* Clear VIP control */
+ reg_write(vip, DVP_CTL, 0);
+ spin_unlock_irq(&vip->slock);
+}
+static void sta2x11_vip_clear_register(struct sta2x11_vip *vip)
+{
+ spin_lock_irq(&vip->slock);
+ /* Disable interrupt */
+ reg_write(vip, DVP_ITM, 0);
+ /* Reset VIP Control */
+ reg_write(vip, DVP_CTL, DVP_CTL_RST);
+ /* Clear VIP Control */
+ reg_write(vip, DVP_CTL, 0);
+ /* Clean VIP Interrupt */
+ reg_read(vip, DVP_ITS);
+ spin_unlock_irq(&vip->slock);
+}
+static int sta2x11_vip_init_buffer(struct sta2x11_vip *vip)
+{
+ int err;
- REG_WRITE(vip, DVP_CTL, REG_READ(vip, DVP_CTL) & ~DVP_CTL_ENA);
- if (vip->active) {
- do_gettimeofday(&vip->active->ts);
- vip->active->field_count++;
- vip->active->state = VIDEOBUF_DONE;
- wake_up(&vip->active->done);
- vip->active = NULL;
+ err = dma_set_coherent_mask(&vip->pdev->dev, DMA_BIT_MASK(29));
+ if (err) {
+ v4l2_err(&vip->v4l2_dev, "Cannot configure coherent mask");
+ return err;
}
- if (!vip->closing) {
- if (list_empty(&vip->capture))
- goto done;
-
- vb = list_first_entry(&vip->capture, struct videobuf_buffer,
- queue);
- if (NULL == vb) {
- pr_info("VIP: no buffer\n");
- goto done;
- }
- vb->state = VIDEOBUF_ACTIVE;
- list_del(&vb->queue);
- vip->active = vb;
- dma = videobuf_to_dma_contig(vb);
- switch (vip->format.field) {
- case V4L2_FIELD_INTERLACED:
- REG_WRITE(vip, DVP_VTP, dma);
- REG_WRITE(vip, DVP_VBP, dma + vip->format.width * 2);
- break;
- case V4L2_FIELD_TOP:
- case V4L2_FIELD_BOTTOM:
- REG_WRITE(vip, DVP_VTP, dma);
- REG_WRITE(vip, DVP_VBP, dma);
- break;
- default:
- pr_warning("VIP: unknown field format\n");
- goto done;
- break;
- }
- REG_WRITE(vip, DVP_CTL, REG_READ(vip, DVP_CTL) | DVP_CTL_ENA);
+ memset(&vip->vb_vidq, 0, sizeof(struct vb2_queue));
+ vip->vb_vidq.type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
+ vip->vb_vidq.io_modes = VB2_MMAP | VB2_READ;
+ vip->vb_vidq.drv_priv = vip;
+ vip->vb_vidq.buf_struct_size = sizeof(struct vip_buffer);
+ vip->vb_vidq.ops = &vip_video_qops;
+ vip->vb_vidq.mem_ops = &vb2_dma_contig_memops;
+ err = vb2_queue_init(&vip->vb_vidq);
+ if (err)
+ return err;
+ INIT_LIST_HEAD(&vip->buffer_list);
+ spin_lock_init(&vip->lock);
+
+
+ vip->alloc_ctx = vb2_dma_contig_init_ctx(&vip->pdev->dev);
+ if (IS_ERR(vip->alloc_ctx)) {
+ v4l2_err(&vip->v4l2_dev, "Can't allocate buffer context");
+ return PTR_ERR(vip->alloc_ctx);
}
-done:
- spin_unlock_irqrestore(&vip->slock, flags);
- return IRQ_HANDLED;
+
+ return 0;
+}
+static void sta2x11_vip_release_buffer(struct sta2x11_vip *vip)
+{
+ vb2_dma_contig_cleanup_ctx(vip->alloc_ctx);
+}
+static int sta2x11_vip_init_controls(struct sta2x11_vip *vip)
+{
+ /*
+ * Inititialize an empty control so VIP can inerithing controls
+ * from ADV7180
+ */
+ v4l2_ctrl_handler_init(&vip->ctrl_hdl, 0);
+
+ vip->v4l2_dev.ctrl_handler = &vip->ctrl_hdl;
+ if (vip->ctrl_hdl.error) {
+ int err = vip->ctrl_hdl.error;
+
+ v4l2_ctrl_handler_free(&vip->ctrl_hdl);
+ return err;
+ }
+
+ return 0;
}
/**
@@ -1212,10 +987,17 @@ static int sta2x11_vip_init_one(struct pci_dev *pdev,
struct sta2x11_vip *vip;
struct vip_config *config;
+ /* Check if hardware support 26-bit DMA */
+ if (dma_set_mask(&pdev->dev, DMA_BIT_MASK(26))) {
+ dev_err(&pdev->dev, "26-bit DMA addressing not available\n");
+ return -EINVAL;
+ }
+ /* Enable PCI */
ret = pci_enable_device(pdev);
if (ret)
return ret;
+ /* Get VIP platform data */
config = dev_get_platdata(&pdev->dev);
if (!config) {
dev_info(&pdev->dev, "VIP slot disabled\n");
@@ -1223,6 +1005,7 @@ static int sta2x11_vip_init_one(struct pci_dev *pdev,
goto disable;
}
+ /* Power configuration */
ret = vip_gpio_reserve(&pdev->dev, config->pwr_pin, 0,
config->pwr_name);
if (ret)
@@ -1237,7 +1020,6 @@ static int sta2x11_vip_init_one(struct pci_dev *pdev,
goto disable;
}
}
-
if (config->pwr_pin != -1) {
/* Datasheet says 5ms between PWR and RST */
usleep_range(5000, 25000);
@@ -1251,17 +1033,20 @@ static int sta2x11_vip_init_one(struct pci_dev *pdev,
}
usleep_range(5000, 25000);
+ /* Allocate a new VIP instance */
vip = kzalloc(sizeof(struct sta2x11_vip), GFP_KERNEL);
if (!vip) {
ret = -ENOMEM;
goto release_gpios;
}
-
vip->pdev = pdev;
vip->std = V4L2_STD_PAL;
vip->format = formats_50[0];
vip->config = config;
+ ret = sta2x11_vip_init_controls(vip);
+ if (ret)
+ goto free_mem;
if (v4l2_device_register(&pdev->dev, &vip->v4l2_dev))
goto free_mem;
@@ -1271,46 +1056,52 @@ static int sta2x11_vip_init_one(struct pci_dev *pdev,
pci_set_master(pdev);
- ret = pci_request_regions(pdev, DRV_NAME);
+ ret = pci_request_regions(pdev, KBUILD_MODNAME);
if (ret)
goto unreg;
vip->iomem = pci_iomap(pdev, 0, 0x100);
if (!vip->iomem) {
- ret = -ENOMEM; /* FIXME */
+ ret = -ENOMEM;
goto release;
}
pci_enable_msi(pdev);
- INIT_LIST_HEAD(&vip->capture);
+ /* Initialize buffer */
+ ret = sta2x11_vip_init_buffer(vip);
+ if (ret)
+ goto unmap;
+
spin_lock_init(&vip->slock);
- mutex_init(&vip->mutex);
- vip->started = 0;
- vip->disabled = 0;
ret = request_irq(pdev->irq,
(irq_handler_t) vip_irq,
- IRQF_SHARED, DRV_NAME, vip);
+ IRQF_SHARED, KBUILD_MODNAME, vip);
if (ret) {
dev_err(&pdev->dev, "request_irq failed\n");
ret = -ENODEV;
- goto unmap;
+ goto release_buf;
}
+ /* Alloc, initialize and register video device */
vip->video_dev = video_device_alloc();
if (!vip->video_dev) {
ret = -ENOMEM;
goto release_irq;
}
- *(vip->video_dev) = video_dev_template;
+ vip->video_dev = &video_dev_template;
+ vip->video_dev->v4l2_dev = &vip->v4l2_dev;
+ vip->video_dev->queue = &vip->vb_vidq;
+ set_bit(V4L2_FL_USE_FH_PRIO, &vip->video_dev->flags);
video_set_drvdata(vip->video_dev, vip);
ret = video_register_device(vip->video_dev, VFL_TYPE_GRABBER, -1);
if (ret)
goto vrelease;
+ /* Get ADV7180 subdevice */
vip->adapter = i2c_get_adapter(vip->config->i2c_id);
if (!vip->adapter) {
ret = -ENODEV;
@@ -1328,10 +1119,11 @@ static int sta2x11_vip_init_one(struct pci_dev *pdev,
}
i2c_put_adapter(vip->adapter);
-
v4l2_subdev_call(vip->decoder, core, init, 0);
- pr_info("STA2X11 Video Input Port (VIP) loaded\n");
+ sta2x11_vip_init_register(vip);
+
+ dev_info(&pdev->dev, "STA2X11 Video Input Port (VIP) loaded\n");
return 0;
vunreg:
@@ -1343,10 +1135,12 @@ vrelease:
video_device_release(vip->video_dev);
release_irq:
free_irq(pdev->irq, vip);
+release_buf:
+ sta2x11_vip_release_buffer(vip);
pci_disable_msi(pdev);
unmap:
+ vb2_queue_release(&vip->vb_vidq);
pci_iounmap(pdev, vip->iomem);
- mutex_destroy(&vip->mutex);
release:
pci_release_regions(pdev);
unreg:
@@ -1382,16 +1176,18 @@ static void sta2x11_vip_remove_one(struct pci_dev *pdev)
struct sta2x11_vip *vip =
container_of(v4l2_dev, struct sta2x11_vip, v4l2_dev);
+ sta2x11_vip_clear_register(vip);
+
video_set_drvdata(vip->video_dev, NULL);
video_unregister_device(vip->video_dev);
/*do not call video_device_release() here, is already done */
free_irq(pdev->irq, vip);
pci_disable_msi(pdev);
+ vb2_queue_release(&vip->vb_vidq);
pci_iounmap(pdev, vip->iomem);
pci_release_regions(pdev);
v4l2_device_unregister(&vip->v4l2_dev);
- mutex_destroy(&vip->mutex);
vip_gpio_release(&pdev->dev, vip->config->pwr_pin,
vip->config->pwr_name);
@@ -1416,9 +1212,6 @@ static void sta2x11_vip_remove_one(struct pci_dev *pdev)
*
* return value: 0 always indicate success,
* even if device could not be disabled. (workaround for hardware problem)
- *
- * reurn value : 0, always succesful, even if hardware does not not support
- * power down mode.
*/
static int sta2x11_vip_suspend(struct pci_dev *pdev, pm_message_t state)
{
@@ -1429,15 +1222,15 @@ static int sta2x11_vip_suspend(struct pci_dev *pdev, pm_message_t state)
int i;
spin_lock_irqsave(&vip->slock, flags);
- vip->register_save_area[0] = REG_READ(vip, DVP_CTL);
- REG_WRITE(vip, DVP_CTL, vip->register_save_area[0] & DVP_CTL_DIS);
- vip->register_save_area[SAVE_COUNT] = REG_READ(vip, DVP_ITM);
- REG_WRITE(vip, DVP_ITM, 0);
+ vip->register_save_area[0] = reg_read(vip, DVP_CTL);
+ reg_write(vip, DVP_CTL, vip->register_save_area[0] & DVP_CTL_DIS);
+ vip->register_save_area[SAVE_COUNT] = reg_read(vip, DVP_ITM);
+ reg_write(vip, DVP_ITM, 0);
for (i = 1; i < SAVE_COUNT; i++)
- vip->register_save_area[i] = REG_READ(vip, 4 * i);
+ vip->register_save_area[i] = reg_read(vip, 4 * i);
for (i = 0; i < AUX_COUNT; i++)
vip->register_save_area[SAVE_COUNT + IRQ_COUNT + i] =
- REG_READ(vip, registers_to_save[i]);
+ reg_read(vip, registers_to_save[i]);
spin_unlock_irqrestore(&vip->slock, flags);
/* save pci state */
pci_save_state(pdev);
@@ -1477,7 +1270,7 @@ static int sta2x11_vip_resume(struct pci_dev *pdev)
if (vip->disabled) {
ret = pci_enable_device(pdev);
if (ret) {
- pr_warning("VIP: Can't enable device.\n");
+ pr_warn("VIP: Can't enable device.\n");
return ret;
}
vip->disabled = 0;
@@ -1488,7 +1281,7 @@ static int sta2x11_vip_resume(struct pci_dev *pdev)
* do not call pci_disable_device on sta2x11 because it
* break all other Bus masters on this EP
*/
- pr_warning("VIP: Can't enable device.\n");
+ pr_warn("VIP: Can't enable device.\n");
vip->disabled = 1;
return ret;
}
@@ -1497,12 +1290,12 @@ static int sta2x11_vip_resume(struct pci_dev *pdev)
spin_lock_irqsave(&vip->slock, flags);
for (i = 1; i < SAVE_COUNT; i++)
- REG_WRITE(vip, 4 * i, vip->register_save_area[i]);
+ reg_write(vip, 4 * i, vip->register_save_area[i]);
for (i = 0; i < AUX_COUNT; i++)
- REG_WRITE(vip, registers_to_save[i],
+ reg_write(vip, registers_to_save[i],
vip->register_save_area[SAVE_COUNT + IRQ_COUNT + i]);
- REG_WRITE(vip, DVP_CTL, vip->register_save_area[0]);
- REG_WRITE(vip, DVP_ITM, vip->register_save_area[SAVE_COUNT]);
+ reg_write(vip, DVP_CTL, vip->register_save_area[0]);
+ reg_write(vip, DVP_ITM, vip->register_save_area[SAVE_COUNT]);
spin_unlock_irqrestore(&vip->slock, flags);
return 0;
}
@@ -1515,7 +1308,7 @@ static DEFINE_PCI_DEVICE_TABLE(sta2x11_vip_pci_tbl) = {
};
static struct pci_driver sta2x11_vip_driver = {
- .name = DRV_NAME,
+ .name = KBUILD_MODNAME,
.probe = sta2x11_vip_init_one,
.remove = sta2x11_vip_remove_one,
.id_table = sta2x11_vip_pci_tbl,
diff --git a/drivers/media/pci/ttpci/Kconfig b/drivers/media/pci/ttpci/Kconfig
index 314e417addae..0dcb8cd77676 100644
--- a/drivers/media/pci/ttpci/Kconfig
+++ b/drivers/media/pci/ttpci/Kconfig
@@ -1,8 +1,3 @@
-config TTPCI_EEPROM
- tristate
- depends on I2C
- default n
-
config DVB_AV7110
tristate "AV7110 cards"
depends on DVB_CORE && PCI && I2C
diff --git a/drivers/media/pci/ttpci/av7110.c b/drivers/media/pci/ttpci/av7110.c
index 4656d4a10af0..3dc7aa9b6f40 100644
--- a/drivers/media/pci/ttpci/av7110.c
+++ b/drivers/media/pci/ttpci/av7110.c
@@ -235,7 +235,7 @@ static void recover_arm(struct av7110 *av7110)
restart_feeds(av7110);
-#if defined(CONFIG_INPUT_EVDEV) || defined(CONFIG_INPUT_EVDEV_MODULE)
+#if IS_ENABLED(CONFIG_INPUT_EVDEV)
av7110_check_ir_config(av7110, true);
#endif
}
@@ -268,7 +268,7 @@ static int arm_thread(void *data)
if (!av7110->arm_ready)
continue;
-#if defined(CONFIG_INPUT_EVDEV) || defined(CONFIG_INPUT_EVDEV_MODULE)
+#if IS_ENABLED(CONFIG_INPUT_EVDEV)
av7110_check_ir_config(av7110, false);
#endif
@@ -1730,7 +1730,7 @@ static int alps_tdlb7_tuner_set_params(struct dvb_frontend *fe)
static int alps_tdlb7_request_firmware(struct dvb_frontend* fe, const struct firmware **fw, char* name)
{
-#if defined(CONFIG_DVB_SP8870) || defined(CONFIG_DVB_SP8870_MODULE)
+#if IS_ENABLED(CONFIG_DVB_SP8870)
struct av7110* av7110 = fe->dvb->priv;
return request_firmware(fw, name, &av7110->dev->pci->dev);
@@ -2723,7 +2723,9 @@ static int av7110_attach(struct saa7146_dev* dev,
if (ret < 0)
goto err_av7110_exit_v4l_12;
-#if defined(CONFIG_INPUT_EVDEV) || defined(CONFIG_INPUT_EVDEV_MODULE)
+ mutex_init(&av7110->ioctl_mutex);
+
+#if IS_ENABLED(CONFIG_INPUT_EVDEV)
av7110_ir_init(av7110);
#endif
printk(KERN_INFO "dvb-ttpci: found av7110-%d.\n", av7110_num);
@@ -2766,7 +2768,7 @@ static int av7110_detach(struct saa7146_dev* saa)
struct av7110 *av7110 = saa->ext_priv;
dprintk(4, "%p\n", av7110);
-#if defined(CONFIG_INPUT_EVDEV) || defined(CONFIG_INPUT_EVDEV_MODULE)
+#if IS_ENABLED(CONFIG_INPUT_EVDEV)
av7110_ir_exit(av7110);
#endif
if (budgetpatch || av7110->full_ts) {
diff --git a/drivers/media/pci/ttpci/av7110.h b/drivers/media/pci/ttpci/av7110.h
index a378662b1dcf..ef3d9606b269 100644
--- a/drivers/media/pci/ttpci/av7110.h
+++ b/drivers/media/pci/ttpci/av7110.h
@@ -271,6 +271,8 @@ struct av7110 {
struct dvb_frontend* fe;
fe_status_t fe_status;
+ struct mutex ioctl_mutex;
+
/* crash recovery */
void (*recover)(struct av7110* av7110);
fe_sec_voltage_t saved_voltage;
diff --git a/drivers/media/pci/ttpci/av7110_av.c b/drivers/media/pci/ttpci/av7110_av.c
index 952b33dbac4f..301029ca4535 100644
--- a/drivers/media/pci/ttpci/av7110_av.c
+++ b/drivers/media/pci/ttpci/av7110_av.c
@@ -1109,6 +1109,9 @@ static int dvb_video_ioctl(struct file *file,
}
}
+ if (mutex_lock_interruptible(&av7110->ioctl_mutex))
+ return -ERESTARTSYS;
+
switch (cmd) {
case VIDEO_STOP:
av7110->videostate.play_state = VIDEO_STOPPED;
@@ -1297,6 +1300,7 @@ static int dvb_video_ioctl(struct file *file,
break;
}
+ mutex_unlock(&av7110->ioctl_mutex);
return ret;
}
@@ -1314,6 +1318,9 @@ static int dvb_audio_ioctl(struct file *file,
(cmd != AUDIO_GET_STATUS))
return -EPERM;
+ if (mutex_lock_interruptible(&av7110->ioctl_mutex))
+ return -ERESTARTSYS;
+
switch (cmd) {
case AUDIO_STOP:
if (av7110->audiostate.stream_source == AUDIO_SOURCE_MEMORY)
@@ -1442,6 +1449,7 @@ static int dvb_audio_ioctl(struct file *file,
ret = -ENOIOCTLCMD;
}
+ mutex_unlock(&av7110->ioctl_mutex);
return ret;
}
diff --git a/drivers/media/pci/ttpci/av7110_ca.c b/drivers/media/pci/ttpci/av7110_ca.c
index 9fc1dd0ba4c3..a6079b90252a 100644
--- a/drivers/media/pci/ttpci/av7110_ca.c
+++ b/drivers/media/pci/ttpci/av7110_ca.c
@@ -253,12 +253,17 @@ static int dvb_ca_ioctl(struct file *file, unsigned int cmd, void *parg)
struct dvb_device *dvbdev = file->private_data;
struct av7110 *av7110 = dvbdev->priv;
unsigned long arg = (unsigned long) parg;
+ int ret = 0;
dprintk(8, "av7110:%p\n",av7110);
+ if (mutex_lock_interruptible(&av7110->ioctl_mutex))
+ return -ERESTARTSYS;
+
switch (cmd) {
case CA_RESET:
- return ci_ll_reset(&av7110->ci_wbuffer, file, arg, &av7110->ci_slot[0]);
+ ret = ci_ll_reset(&av7110->ci_wbuffer, file, arg,
+ &av7110->ci_slot[0]);
break;
case CA_GET_CAP:
{
@@ -277,8 +282,10 @@ static int dvb_ca_ioctl(struct file *file, unsigned int cmd, void *parg)
{
ca_slot_info_t *info=(ca_slot_info_t *)parg;
- if (info->num < 0 || info->num > 1)
+ if (info->num < 0 || info->num > 1) {
+ mutex_unlock(&av7110->ioctl_mutex);
return -EINVAL;
+ }
av7110->ci_slot[info->num].num = info->num;
av7110->ci_slot[info->num].type = FW_CI_LL_SUPPORT(av7110->arm_app) ?
CA_CI_LINK : CA_CI;
@@ -306,10 +313,10 @@ static int dvb_ca_ioctl(struct file *file, unsigned int cmd, void *parg)
{
ca_descr_t *descr = (ca_descr_t*) parg;
- if (descr->index >= 16)
- return -EINVAL;
- if (descr->parity > 1)
+ if (descr->index >= 16 || descr->parity > 1) {
+ mutex_unlock(&av7110->ioctl_mutex);
return -EINVAL;
+ }
av7110_fw_cmd(av7110, COMTYPE_PIDFILTER, SetDescr, 5,
(descr->index<<8)|descr->parity,
(descr->cw[0]<<8)|descr->cw[1],
@@ -320,9 +327,12 @@ static int dvb_ca_ioctl(struct file *file, unsigned int cmd, void *parg)
}
default:
- return -EINVAL;
+ ret = -EINVAL;
+ break;
}
- return 0;
+
+ mutex_unlock(&av7110->ioctl_mutex);
+ return ret;
}
static ssize_t dvb_ca_write(struct file *file, const char __user *buf,
diff --git a/drivers/media/pci/zoran/zoran_card.c b/drivers/media/pci/zoran/zoran_card.c
index a90a3b9b09bf..bb53d2488ad0 100644
--- a/drivers/media/pci/zoran/zoran_card.c
+++ b/drivers/media/pci/zoran/zoran_card.c
@@ -708,8 +708,7 @@ static const struct i2c_algo_bit_data zoran_i2c_bit_data_template = {
static int
zoran_register_i2c (struct zoran *zr)
{
- memcpy(&zr->i2c_algo, &zoran_i2c_bit_data_template,
- sizeof(struct i2c_algo_bit_data));
+ zr->i2c_algo = zoran_i2c_bit_data_template;
zr->i2c_algo.data = zr;
strlcpy(zr->i2c_adapter.name, ZR_DEVNAME(zr),
sizeof(zr->i2c_adapter.name));
diff --git a/drivers/media/pci/zoran/zoran_device.c b/drivers/media/pci/zoran/zoran_device.c
index a4cd504b8eee..519164c572c8 100644
--- a/drivers/media/pci/zoran/zoran_device.c
+++ b/drivers/media/pci/zoran/zoran_device.c
@@ -1169,7 +1169,7 @@ zoran_reap_stat_com (struct zoran *zr)
}
frame = zr->jpg_pend[zr->jpg_dma_tail & BUZ_MASK_FRAME];
buffer = &zr->jpg_buffers.buffer[frame];
- do_gettimeofday(&buffer->bs.timestamp);
+ v4l2_get_timestamp(&buffer->bs.timestamp);
if (zr->codec_mode == BUZ_MODE_MOTION_COMPRESS) {
buffer->bs.length = (stat_com & 0x7fffff) >> 1;
@@ -1407,7 +1407,7 @@ zoran_irq (int irq,
zr->v4l_buffers.buffer[zr->v4l_grab_frame].state = BUZ_STATE_DONE;
zr->v4l_buffers.buffer[zr->v4l_grab_frame].bs.seq = zr->v4l_grab_seq;
- do_gettimeofday(&zr->v4l_buffers.buffer[zr->v4l_grab_frame].bs.timestamp);
+ v4l2_get_timestamp(&zr->v4l_buffers.buffer[zr->v4l_grab_frame].bs.timestamp);
zr->v4l_grab_frame = NO_GRAB_ACTIVE;
zr->v4l_pend_tail++;
}
diff --git a/drivers/media/pci/zoran/zoran_driver.c b/drivers/media/pci/zoran/zoran_driver.c
index e60ae41e2319..2e8f518f298f 100644
--- a/drivers/media/pci/zoran/zoran_driver.c
+++ b/drivers/media/pci/zoran/zoran_driver.c
@@ -1334,7 +1334,7 @@ static int zoran_v4l2_buffer_status(struct zoran_fh *fh,
struct zoran *zr = fh->zr;
unsigned long flags;
- buf->flags = V4L2_BUF_FLAG_MAPPED;
+ buf->flags = V4L2_BUF_FLAG_MAPPED | V4L2_BUF_FLAG_TIMESTAMP_MONOTONIC;
switch (fh->map_mode) {
case ZORAN_MAP_MODE_RAW:
diff --git a/drivers/media/pci/zoran/zoran_procfs.c b/drivers/media/pci/zoran/zoran_procfs.c
index f1423b777db1..e084b0a21b1b 100644
--- a/drivers/media/pci/zoran/zoran_procfs.c
+++ b/drivers/media/pci/zoran/zoran_procfs.c
@@ -137,7 +137,7 @@ static int zoran_open(struct inode *inode, struct file *file)
static ssize_t zoran_write(struct file *file, const char __user *buffer,
size_t count, loff_t *ppos)
{
- struct zoran *zr = PDE(file->f_path.dentry->d_inode)->data;
+ struct zoran *zr = PDE(file_inode(file))->data;
char *string, *sp;
char *line, *ldelim, *varname, *svar, *tdelim;
diff --git a/drivers/media/platform/Kconfig b/drivers/media/platform/Kconfig
index 33241120a58c..05d7b6333461 100644
--- a/drivers/media/platform/Kconfig
+++ b/drivers/media/platform/Kconfig
@@ -92,7 +92,7 @@ config VIDEO_M32R_AR_M64278
config VIDEO_OMAP2
tristate "OMAP2 Camera Capture Interface driver"
- depends on VIDEO_DEV && ARCH_OMAP2
+ depends on VIDEO_DEV && ARCH_OMAP2 && VIDEO_V4L2_INT_DEVICE
select VIDEOBUF_DMA_SG
---help---
This is a v4l2 driver for the TI OMAP2 camera capture interface
@@ -202,6 +202,15 @@ config VIDEO_SAMSUNG_EXYNOS_GSC
help
This is a v4l2 driver for Samsung EXYNOS5 SoC G-Scaler.
+config VIDEO_SH_VEU
+ tristate "SuperH VEU mem2mem video processing driver"
+ depends on VIDEO_DEV && VIDEO_V4L2
+ select VIDEOBUF2_DMA_CONTIG
+ select V4L2_MEM2MEM_DEV
+ help
+ Support for the Video Engine Unit (VEU) on SuperH and
+ SH-Mobile SoCs.
+
endif # V4L_MEM2MEM_DRIVERS
menuconfig V4L_TEST_DRIVERS
diff --git a/drivers/media/platform/Makefile b/drivers/media/platform/Makefile
index 4817d2802171..42089ba3600f 100644
--- a/drivers/media/platform/Makefile
+++ b/drivers/media/platform/Makefile
@@ -25,6 +25,8 @@ obj-$(CONFIG_VIDEO_MEM2MEM_TESTDEV) += mem2mem_testdev.o
obj-$(CONFIG_VIDEO_MX2_EMMAPRP) += mx2_emmaprp.o
obj-$(CONFIG_VIDEO_CODA) += coda.o
+obj-$(CONFIG_VIDEO_SH_VEU) += sh_veu.o
+
obj-$(CONFIG_VIDEO_MEM2MEM_DEINTERLACE) += m2m-deinterlace.o
obj-$(CONFIG_VIDEO_S3C_CAMIF) += s3c-camif/
diff --git a/drivers/media/platform/blackfin/Kconfig b/drivers/media/platform/blackfin/Kconfig
index ecd5323768b7..cc239972fa2c 100644
--- a/drivers/media/platform/blackfin/Kconfig
+++ b/drivers/media/platform/blackfin/Kconfig
@@ -7,4 +7,9 @@ config VIDEO_BLACKFIN_CAPTURE
Choose PPI or EPPI as its interface.
To compile this driver as a module, choose M here: the
- module will be called bfin_video_capture.
+ module will be called bfin_capture.
+
+config VIDEO_BLACKFIN_PPI
+ tristate
+ depends on VIDEO_BLACKFIN_CAPTURE
+ default VIDEO_BLACKFIN_CAPTURE
diff --git a/drivers/media/platform/blackfin/Makefile b/drivers/media/platform/blackfin/Makefile
index aa3a0a216387..30421bc23080 100644
--- a/drivers/media/platform/blackfin/Makefile
+++ b/drivers/media/platform/blackfin/Makefile
@@ -1,2 +1,2 @@
-bfin_video_capture-objs := bfin_capture.o ppi.o
-obj-$(CONFIG_VIDEO_BLACKFIN_CAPTURE) += bfin_video_capture.o
+obj-$(CONFIG_VIDEO_BLACKFIN_CAPTURE) += bfin_capture.o
+obj-$(CONFIG_VIDEO_BLACKFIN_PPI) += ppi.o
diff --git a/drivers/media/platform/blackfin/bfin_capture.c b/drivers/media/platform/blackfin/bfin_capture.c
index 1aad2a65d2f3..5f209d5810dc 100644
--- a/drivers/media/platform/blackfin/bfin_capture.c
+++ b/drivers/media/platform/blackfin/bfin_capture.c
@@ -52,6 +52,7 @@ struct bcap_format {
u32 pixelformat;
enum v4l2_mbus_pixelcode mbus_code;
int bpp; /* bits per pixel */
+ int dlen; /* data length for ppi in bits */
};
struct bcap_buffer {
@@ -76,18 +77,20 @@ struct bcap_device {
unsigned int cur_input;
/* current selected standard */
v4l2_std_id std;
+ /* current selected dv_timings */
+ struct v4l2_dv_timings dv_timings;
/* used to store pixel format */
struct v4l2_pix_format fmt;
/* bits per pixel*/
int bpp;
+ /* data length for ppi in bits */
+ int dlen;
/* used to store sensor supported format */
struct bcap_format *sensor_formats;
/* number of sensor formats array */
int num_sensor_formats;
/* pointing to current video buffer */
struct bcap_buffer *cur_frm;
- /* pointing to next video buffer */
- struct bcap_buffer *next_frm;
/* buffer queue used in videobuf2 */
struct vb2_queue buffer_queue;
/* allocator-specific contexts for each plane */
@@ -116,24 +119,35 @@ static const struct bcap_format bcap_formats[] = {
.pixelformat = V4L2_PIX_FMT_UYVY,
.mbus_code = V4L2_MBUS_FMT_UYVY8_2X8,
.bpp = 16,
+ .dlen = 8,
},
{
.desc = "YCbCr 4:2:2 Interleaved YUYV",
.pixelformat = V4L2_PIX_FMT_YUYV,
.mbus_code = V4L2_MBUS_FMT_YUYV8_2X8,
.bpp = 16,
+ .dlen = 8,
+ },
+ {
+ .desc = "YCbCr 4:2:2 Interleaved UYVY",
+ .pixelformat = V4L2_PIX_FMT_UYVY,
+ .mbus_code = V4L2_MBUS_FMT_UYVY8_1X16,
+ .bpp = 16,
+ .dlen = 16,
},
{
.desc = "RGB 565",
.pixelformat = V4L2_PIX_FMT_RGB565,
.mbus_code = V4L2_MBUS_FMT_RGB565_2X8_LE,
.bpp = 16,
+ .dlen = 8,
},
{
.desc = "RGB 444",
.pixelformat = V4L2_PIX_FMT_RGB444,
.mbus_code = V4L2_MBUS_FMT_RGB444_2X8_PADHI_LE,
.bpp = 16,
+ .dlen = 8,
},
};
@@ -366,9 +380,39 @@ static int bcap_start_streaming(struct vb2_queue *vq, unsigned int count)
params.width = bcap_dev->fmt.width;
params.height = bcap_dev->fmt.height;
params.bpp = bcap_dev->bpp;
+ params.dlen = bcap_dev->dlen;
params.ppi_control = bcap_dev->cfg->ppi_control;
params.int_mask = bcap_dev->cfg->int_mask;
- params.blank_clocks = bcap_dev->cfg->blank_clocks;
+ if (bcap_dev->cfg->inputs[bcap_dev->cur_input].capabilities
+ & V4L2_IN_CAP_CUSTOM_TIMINGS) {
+ struct v4l2_bt_timings *bt = &bcap_dev->dv_timings.bt;
+
+ params.hdelay = bt->hsync + bt->hbackporch;
+ params.vdelay = bt->vsync + bt->vbackporch;
+ params.line = bt->hfrontporch + bt->hsync
+ + bt->hbackporch + bt->width;
+ params.frame = bt->vfrontporch + bt->vsync
+ + bt->vbackporch + bt->height;
+ if (bt->interlaced)
+ params.frame += bt->il_vfrontporch + bt->il_vsync
+ + bt->il_vbackporch;
+ } else if (bcap_dev->cfg->inputs[bcap_dev->cur_input].capabilities
+ & V4L2_IN_CAP_STD) {
+ params.hdelay = 0;
+ params.vdelay = 0;
+ if (bcap_dev->std & V4L2_STD_525_60) {
+ params.line = 858;
+ params.frame = 525;
+ } else {
+ params.line = 864;
+ params.frame = 625;
+ }
+ } else {
+ params.hdelay = 0;
+ params.vdelay = 0;
+ params.line = params.width + bcap_dev->cfg->blank_pixels;
+ params.frame = params.height;
+ }
ret = ppi->ops->set_params(ppi, &params);
if (ret < 0) {
v4l2_err(&bcap_dev->v4l2_dev,
@@ -409,10 +453,10 @@ static int bcap_stop_streaming(struct vb2_queue *vq)
/* release all active buffers */
while (!list_empty(&bcap_dev->dma_queue)) {
- bcap_dev->next_frm = list_entry(bcap_dev->dma_queue.next,
+ bcap_dev->cur_frm = list_entry(bcap_dev->dma_queue.next,
struct bcap_buffer, list);
- list_del(&bcap_dev->next_frm->list);
- vb2_buffer_done(&bcap_dev->next_frm->vb, VB2_BUF_STATE_ERROR);
+ list_del(&bcap_dev->cur_frm->list);
+ vb2_buffer_done(&bcap_dev->cur_frm->vb, VB2_BUF_STATE_ERROR);
}
return 0;
}
@@ -484,17 +528,26 @@ static irqreturn_t bcap_isr(int irq, void *dev_id)
{
struct ppi_if *ppi = dev_id;
struct bcap_device *bcap_dev = ppi->priv;
- struct timeval timevalue;
struct vb2_buffer *vb = &bcap_dev->cur_frm->vb;
dma_addr_t addr;
spin_lock(&bcap_dev->lock);
- if (bcap_dev->cur_frm != bcap_dev->next_frm) {
- do_gettimeofday(&timevalue);
- vb->v4l2_buf.timestamp = timevalue;
- vb2_buffer_done(vb, VB2_BUF_STATE_DONE);
- bcap_dev->cur_frm = bcap_dev->next_frm;
+ if (!list_empty(&bcap_dev->dma_queue)) {
+ v4l2_get_timestamp(&vb->v4l2_buf.timestamp);
+ if (ppi->err) {
+ vb2_buffer_done(vb, VB2_BUF_STATE_ERROR);
+ ppi->err = false;
+ } else {
+ vb2_buffer_done(vb, VB2_BUF_STATE_DONE);
+ }
+ bcap_dev->cur_frm = list_entry(bcap_dev->dma_queue.next,
+ struct bcap_buffer, list);
+ list_del(&bcap_dev->cur_frm->list);
+ } else {
+ /* clear error flag, we will get a new frame */
+ if (ppi->err)
+ ppi->err = false;
}
ppi->ops->stop(ppi);
@@ -502,13 +555,8 @@ static irqreturn_t bcap_isr(int irq, void *dev_id)
if (bcap_dev->stop) {
complete(&bcap_dev->comp);
} else {
- if (!list_empty(&bcap_dev->dma_queue)) {
- bcap_dev->next_frm = list_entry(bcap_dev->dma_queue.next,
- struct bcap_buffer, list);
- list_del(&bcap_dev->next_frm->list);
- addr = vb2_dma_contig_plane_dma_addr(&bcap_dev->next_frm->vb, 0);
- ppi->ops->update_addr(ppi, (unsigned long)addr);
- }
+ addr = vb2_dma_contig_plane_dma_addr(&bcap_dev->cur_frm->vb, 0);
+ ppi->ops->update_addr(ppi, (unsigned long)addr);
ppi->ops->start(ppi);
}
@@ -542,9 +590,8 @@ static int bcap_streamon(struct file *file, void *priv,
}
/* get the next frame from the dma queue */
- bcap_dev->next_frm = list_entry(bcap_dev->dma_queue.next,
+ bcap_dev->cur_frm = list_entry(bcap_dev->dma_queue.next,
struct bcap_buffer, list);
- bcap_dev->cur_frm = bcap_dev->next_frm;
/* remove buffer from the dma queue */
list_del(&bcap_dev->cur_frm->list);
addr = vb2_dma_contig_plane_dma_addr(&bcap_dev->cur_frm->vb, 0);
@@ -602,6 +649,37 @@ static int bcap_s_std(struct file *file, void *priv, v4l2_std_id *std)
return 0;
}
+static int bcap_g_dv_timings(struct file *file, void *priv,
+ struct v4l2_dv_timings *timings)
+{
+ struct bcap_device *bcap_dev = video_drvdata(file);
+ int ret;
+
+ ret = v4l2_subdev_call(bcap_dev->sd, video,
+ g_dv_timings, timings);
+ if (ret < 0)
+ return ret;
+
+ bcap_dev->dv_timings = *timings;
+ return 0;
+}
+
+static int bcap_s_dv_timings(struct file *file, void *priv,
+ struct v4l2_dv_timings *timings)
+{
+ struct bcap_device *bcap_dev = video_drvdata(file);
+ int ret;
+ if (vb2_is_busy(&bcap_dev->buffer_queue))
+ return -EBUSY;
+
+ ret = v4l2_subdev_call(bcap_dev->sd, video, s_dv_timings, timings);
+ if (ret < 0)
+ return ret;
+
+ bcap_dev->dv_timings = *timings;
+ return 0;
+}
+
static int bcap_enum_input(struct file *file, void *priv,
struct v4l2_input *input)
{
@@ -650,13 +728,15 @@ static int bcap_s_input(struct file *file, void *priv, unsigned int index)
return ret;
}
bcap_dev->cur_input = index;
+ /* if this route has specific config, update ppi control */
+ if (route->ppi_control)
+ config->ppi_control = route->ppi_control;
return 0;
}
static int bcap_try_format(struct bcap_device *bcap,
struct v4l2_pix_format *pixfmt,
- enum v4l2_mbus_pixelcode *mbus_code,
- int *bpp)
+ struct bcap_format *bcap_fmt)
{
struct bcap_format *sf = bcap->sensor_formats;
struct bcap_format *fmt = NULL;
@@ -671,16 +751,20 @@ static int bcap_try_format(struct bcap_device *bcap,
if (i == bcap->num_sensor_formats)
fmt = &sf[0];
- if (mbus_code)
- *mbus_code = fmt->mbus_code;
- if (bpp)
- *bpp = fmt->bpp;
v4l2_fill_mbus_format(&mbus_fmt, pixfmt, fmt->mbus_code);
ret = v4l2_subdev_call(bcap->sd, video,
try_mbus_fmt, &mbus_fmt);
if (ret < 0)
return ret;
v4l2_fill_pix_format(pixfmt, &mbus_fmt);
+ if (bcap_fmt) {
+ for (i = 0; i < bcap->num_sensor_formats; i++) {
+ fmt = &sf[i];
+ if (mbus_fmt.code == fmt->mbus_code)
+ break;
+ }
+ *bcap_fmt = *fmt;
+ }
pixfmt->bytesperline = pixfmt->width * fmt->bpp / 8;
pixfmt->sizeimage = pixfmt->bytesperline * pixfmt->height;
return 0;
@@ -709,7 +793,7 @@ static int bcap_try_fmt_vid_cap(struct file *file, void *priv,
struct bcap_device *bcap_dev = video_drvdata(file);
struct v4l2_pix_format *pixfmt = &fmt->fmt.pix;
- return bcap_try_format(bcap_dev, pixfmt, NULL, NULL);
+ return bcap_try_format(bcap_dev, pixfmt, NULL);
}
static int bcap_g_fmt_vid_cap(struct file *file, void *priv,
@@ -726,24 +810,25 @@ static int bcap_s_fmt_vid_cap(struct file *file, void *priv,
{
struct bcap_device *bcap_dev = video_drvdata(file);
struct v4l2_mbus_framefmt mbus_fmt;
- enum v4l2_mbus_pixelcode mbus_code;
+ struct bcap_format bcap_fmt;
struct v4l2_pix_format *pixfmt = &fmt->fmt.pix;
- int ret, bpp;
+ int ret;
if (vb2_is_busy(&bcap_dev->buffer_queue))
return -EBUSY;
/* see if format works */
- ret = bcap_try_format(bcap_dev, pixfmt, &mbus_code, &bpp);
+ ret = bcap_try_format(bcap_dev, pixfmt, &bcap_fmt);
if (ret < 0)
return ret;
- v4l2_fill_mbus_format(&mbus_fmt, pixfmt, mbus_code);
+ v4l2_fill_mbus_format(&mbus_fmt, pixfmt, bcap_fmt.mbus_code);
ret = v4l2_subdev_call(bcap_dev->sd, video, s_mbus_fmt, &mbus_fmt);
if (ret < 0)
return ret;
bcap_dev->fmt = *pixfmt;
- bcap_dev->bpp = bpp;
+ bcap_dev->bpp = bcap_fmt.bpp;
+ bcap_dev->dlen = bcap_fmt.dlen;
return 0;
}
@@ -834,6 +919,8 @@ static const struct v4l2_ioctl_ops bcap_ioctl_ops = {
.vidioc_querystd = bcap_querystd,
.vidioc_s_std = bcap_s_std,
.vidioc_g_std = bcap_g_std,
+ .vidioc_s_dv_timings = bcap_s_dv_timings,
+ .vidioc_g_dv_timings = bcap_g_dv_timings,
.vidioc_reqbufs = bcap_reqbufs,
.vidioc_querybuf = bcap_querybuf,
.vidioc_qbuf = bcap_qbuf,
@@ -869,6 +956,7 @@ static int bcap_probe(struct platform_device *pdev)
struct i2c_adapter *i2c_adap;
struct bfin_capture_config *config;
struct vb2_queue *q;
+ struct bcap_route *route;
int ret;
config = pdev->dev.platform_data;
@@ -978,6 +1066,12 @@ static int bcap_probe(struct platform_device *pdev)
NULL);
if (bcap_dev->sd) {
int i;
+ if (!config->num_inputs) {
+ v4l2_err(&bcap_dev->v4l2_dev,
+ "Unable to work without input\n");
+ goto err_unreg_vdev;
+ }
+
/* update tvnorms from the sub devices */
for (i = 0; i < config->num_inputs; i++)
vfd->tvnorms |= config->inputs[i].std;
@@ -989,8 +1083,24 @@ static int bcap_probe(struct platform_device *pdev)
v4l2_info(&bcap_dev->v4l2_dev, "v4l2 sub device registered\n");
+ /*
+ * explicitly set input, otherwise some boards
+ * may not work at the state as we expected
+ */
+ route = &config->routes[0];
+ ret = v4l2_subdev_call(bcap_dev->sd, video, s_routing,
+ route->input, route->output, 0);
+ if ((ret < 0) && (ret != -ENOIOCTLCMD)) {
+ v4l2_err(&bcap_dev->v4l2_dev, "Failed to set input\n");
+ goto err_unreg_vdev;
+ }
+ bcap_dev->cur_input = 0;
+ /* if this route has specific config, update ppi control */
+ if (route->ppi_control)
+ config->ppi_control = route->ppi_control;
+
/* now we can probe the default state */
- if (vfd->tvnorms) {
+ if (config->inputs[0].capabilities & V4L2_IN_CAP_STD) {
v4l2_std_id std;
ret = v4l2_subdev_call(bcap_dev->sd, core, g_std, &std);
if (ret) {
@@ -1000,6 +1110,17 @@ static int bcap_probe(struct platform_device *pdev)
}
bcap_dev->std = std;
}
+ if (config->inputs[0].capabilities & V4L2_IN_CAP_CUSTOM_TIMINGS) {
+ struct v4l2_dv_timings dv_timings;
+ ret = v4l2_subdev_call(bcap_dev->sd, video,
+ g_dv_timings, &dv_timings);
+ if (ret) {
+ v4l2_err(&bcap_dev->v4l2_dev,
+ "Unable to get dv timings\n");
+ goto err_unreg_vdev;
+ }
+ bcap_dev->dv_timings = dv_timings;
+ }
ret = bcap_init_sensor_formats(bcap_dev);
if (ret) {
v4l2_err(&bcap_dev->v4l2_dev,
diff --git a/drivers/media/platform/blackfin/ppi.c b/drivers/media/platform/blackfin/ppi.c
index d29592186b02..01b5b501347e 100644
--- a/drivers/media/platform/blackfin/ppi.c
+++ b/drivers/media/platform/blackfin/ppi.c
@@ -17,6 +17,7 @@
* Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*/
+#include <linux/module.h>
#include <linux/slab.h>
#include <asm/bfin_ppi.h>
@@ -58,15 +59,33 @@ static irqreturn_t ppi_irq_err(int irq, void *dev_id)
* others are W1C
*/
status = bfin_read16(&reg->status);
+ if (status & 0x3000)
+ ppi->err = true;
bfin_write16(&reg->status, 0xff00);
break;
}
case PPI_TYPE_EPPI:
{
struct bfin_eppi_regs *reg = info->base;
+ unsigned short status;
+
+ status = bfin_read16(&reg->status);
+ if (status & 0x2)
+ ppi->err = true;
bfin_write16(&reg->status, 0xffff);
break;
}
+ case PPI_TYPE_EPPI3:
+ {
+ struct bfin_eppi3_regs *reg = info->base;
+ unsigned long stat;
+
+ stat = bfin_read32(&reg->stat);
+ if (stat & 0x2)
+ ppi->err = true;
+ bfin_write32(&reg->stat, 0xc0ff);
+ break;
+ }
default:
break;
}
@@ -128,6 +147,12 @@ static int ppi_start(struct ppi_if *ppi)
bfin_write32(&reg->control, ppi->ppi_control);
break;
}
+ case PPI_TYPE_EPPI3:
+ {
+ struct bfin_eppi3_regs *reg = info->base;
+ bfin_write32(&reg->ctl, ppi->ppi_control);
+ break;
+ }
default:
return -EINVAL;
}
@@ -155,6 +180,12 @@ static int ppi_stop(struct ppi_if *ppi)
bfin_write32(&reg->control, ppi->ppi_control);
break;
}
+ case PPI_TYPE_EPPI3:
+ {
+ struct bfin_eppi3_regs *reg = info->base;
+ bfin_write32(&reg->ctl, ppi->ppi_control);
+ break;
+ }
default:
return -EINVAL;
}
@@ -171,17 +202,23 @@ static int ppi_set_params(struct ppi_if *ppi, struct ppi_params *params)
{
const struct ppi_info *info = ppi->info;
int dma32 = 0;
- int dma_config, bytes_per_line, lines_per_frame;
+ int dma_config, bytes_per_line;
+ int hcount, hdelay, samples_per_line;
bytes_per_line = params->width * params->bpp / 8;
- lines_per_frame = params->height;
+ /* convert parameters unit from pixels to samples */
+ hcount = params->width * params->bpp / params->dlen;
+ hdelay = params->hdelay * params->bpp / params->dlen;
+ samples_per_line = params->line * params->bpp / params->dlen;
if (params->int_mask == 0xFFFFFFFF)
ppi->err_int = false;
else
ppi->err_int = true;
- dma_config = (DMA_FLOW_STOP | WNR | RESTART | DMA2D | DI_EN);
+ dma_config = (DMA_FLOW_STOP | RESTART | DMA2D | DI_EN_Y);
ppi->ppi_control = params->ppi_control & ~PORT_EN;
+ if (!(ppi->ppi_control & PORT_DIR))
+ dma_config |= WNR;
switch (info->type) {
case PPI_TYPE_PPI:
{
@@ -191,8 +228,8 @@ static int ppi_set_params(struct ppi_if *ppi, struct ppi_params *params)
dma32 = 1;
bfin_write16(&reg->control, ppi->ppi_control);
- bfin_write16(&reg->count, bytes_per_line - 1);
- bfin_write16(&reg->frame, lines_per_frame);
+ bfin_write16(&reg->count, samples_per_line - 1);
+ bfin_write16(&reg->frame, params->frame);
break;
}
case PPI_TYPE_EPPI:
@@ -204,12 +241,31 @@ static int ppi_set_params(struct ppi_if *ppi, struct ppi_params *params)
dma32 = 1;
bfin_write32(&reg->control, ppi->ppi_control);
- bfin_write16(&reg->line, bytes_per_line + params->blank_clocks);
- bfin_write16(&reg->frame, lines_per_frame);
- bfin_write16(&reg->hdelay, 0);
- bfin_write16(&reg->vdelay, 0);
- bfin_write16(&reg->hcount, bytes_per_line);
- bfin_write16(&reg->vcount, lines_per_frame);
+ bfin_write16(&reg->line, samples_per_line);
+ bfin_write16(&reg->frame, params->frame);
+ bfin_write16(&reg->hdelay, hdelay);
+ bfin_write16(&reg->vdelay, params->vdelay);
+ bfin_write16(&reg->hcount, hcount);
+ bfin_write16(&reg->vcount, params->height);
+ break;
+ }
+ case PPI_TYPE_EPPI3:
+ {
+ struct bfin_eppi3_regs *reg = info->base;
+
+ if ((params->ppi_control & PACK_EN)
+ || (params->ppi_control & 0x70000) > DLEN_16)
+ dma32 = 1;
+
+ bfin_write32(&reg->ctl, ppi->ppi_control);
+ bfin_write32(&reg->line, samples_per_line);
+ bfin_write32(&reg->frame, params->frame);
+ bfin_write32(&reg->hdly, hdelay);
+ bfin_write32(&reg->vdly, params->vdelay);
+ bfin_write32(&reg->hcnt, hcount);
+ bfin_write32(&reg->vcnt, params->height);
+ if (params->int_mask)
+ bfin_write32(&reg->imsk, params->int_mask & 0xFF);
break;
}
default:
@@ -217,17 +273,17 @@ static int ppi_set_params(struct ppi_if *ppi, struct ppi_params *params)
}
if (dma32) {
- dma_config |= WDSIZE_32;
+ dma_config |= WDSIZE_32 | PSIZE_32;
set_dma_x_count(info->dma_ch, bytes_per_line >> 2);
set_dma_x_modify(info->dma_ch, 4);
set_dma_y_modify(info->dma_ch, 4);
} else {
- dma_config |= WDSIZE_16;
+ dma_config |= WDSIZE_16 | PSIZE_16;
set_dma_x_count(info->dma_ch, bytes_per_line >> 1);
set_dma_x_modify(info->dma_ch, 2);
set_dma_y_modify(info->dma_ch, 2);
}
- set_dma_y_count(info->dma_ch, lines_per_frame);
+ set_dma_y_count(info->dma_ch, params->height);
set_dma_config(info->dma_ch, dma_config);
SSYNC();
@@ -263,9 +319,15 @@ struct ppi_if *ppi_create_instance(const struct ppi_info *info)
pr_info("ppi probe success\n");
return ppi;
}
+EXPORT_SYMBOL(ppi_create_instance);
void ppi_delete_instance(struct ppi_if *ppi)
{
peripheral_free_list(ppi->info->pin_req);
kfree(ppi);
}
+EXPORT_SYMBOL(ppi_delete_instance);
+
+MODULE_DESCRIPTION("Analog Devices PPI driver");
+MODULE_AUTHOR("Scott Jiang <Scott.Jiang.Linux@gmail.com>");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/media/platform/coda.c b/drivers/media/platform/coda.c
index 4a980e029ca7..20827ba168fc 100644
--- a/drivers/media/platform/coda.c
+++ b/drivers/media/platform/coda.c
@@ -178,6 +178,10 @@ struct coda_ctx {
int idx;
};
+static const u8 coda_filler_nal[14] = { 0x00, 0x00, 0x00, 0x01, 0x0c, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x80 };
+static const u8 coda_filler_size[8] = { 0, 7, 14, 13, 12, 11, 10, 9 };
+
static inline void coda_write(struct coda_dev *dev, u32 data, u32 reg)
{
v4l2_dbg(1, coda_debug, &dev->v4l2_dev,
@@ -944,6 +948,24 @@ static int coda_alloc_framebuffers(struct coda_ctx *ctx, struct coda_q_data *q_d
return 0;
}
+static int coda_h264_padding(int size, char *p)
+{
+ int nal_size;
+ int diff;
+
+ diff = size - (size & ~0x7);
+ if (diff == 0)
+ return 0;
+
+ nal_size = coda_filler_size[diff];
+ memcpy(p, coda_filler_nal, nal_size);
+
+ /* Add rbsp stop bit and trailing at the end */
+ *(p + nal_size - 1) = 0x80;
+
+ return nal_size;
+}
+
static int coda_start_streaming(struct vb2_queue *q, unsigned int count)
{
struct coda_ctx *ctx = vb2_get_drv_priv(q);
@@ -1171,7 +1193,15 @@ static int coda_start_streaming(struct vb2_queue *q, unsigned int count)
coda_read(dev, CODA_CMD_ENC_HEADER_BB_START);
memcpy(&ctx->vpu_header[1][0], vb2_plane_vaddr(buf, 0),
ctx->vpu_header_size[1]);
- ctx->vpu_header_size[2] = 0;
+ /*
+ * Length of H.264 headers is variable and thus it might not be
+ * aligned for the coda to append the encoded frame. In that is
+ * the case a filler NAL must be added to header 2.
+ */
+ ctx->vpu_header_size[2] = coda_h264_padding(
+ (ctx->vpu_header_size[0] +
+ ctx->vpu_header_size[1]),
+ ctx->vpu_header[2]);
break;
case V4L2_PIX_FMT_MPEG4:
/*
diff --git a/drivers/media/platform/davinci/Kconfig b/drivers/media/platform/davinci/Kconfig
index 3c56037c82fc..ccfde4eb626a 100644
--- a/drivers/media/platform/davinci/Kconfig
+++ b/drivers/media/platform/davinci/Kconfig
@@ -97,25 +97,15 @@ config VIDEO_ISIF
To compile this driver as a module, choose M here: the
module will be called vpfe.
-config VIDEO_DM644X_VPBE
- tristate "DM644X VPBE HW module"
- depends on ARCH_DAVINCI_DM644x
+config VIDEO_DAVINCI_VPBE_DISPLAY
+ tristate "DM644X/DM365/DM355 VPBE HW module"
+ depends on ARCH_DAVINCI_DM644x || ARCH_DAVINCI_DM355 || ARCH_DAVINCI_DM365
select VIDEO_VPSS_SYSTEM
select VIDEOBUF2_DMA_CONTIG
help
- Enables VPBE modules used for display on a DM644x
- SoC.
+ Enables Davinci VPBE module used for display devices.
+ This module is common for following DM644x/DM365/DM355
+ based display devices.
To compile this driver as a module, choose M here: the
module will be called vpbe.
-
-
-config VIDEO_VPBE_DISPLAY
- tristate "VPBE V4L2 Display driver"
- depends on ARCH_DAVINCI_DM644x
- select VIDEO_DM644X_VPBE
- help
- Enables VPBE V4L2 Display driver on a DM644x device
-
- To compile this driver as a module, choose M here: the
- module will be called vpbe_display.
diff --git a/drivers/media/platform/davinci/Makefile b/drivers/media/platform/davinci/Makefile
index 74ed92d09257..f40f5219ca50 100644
--- a/drivers/media/platform/davinci/Makefile
+++ b/drivers/media/platform/davinci/Makefile
@@ -16,5 +16,5 @@ obj-$(CONFIG_VIDEO_VPFE_CAPTURE) += vpfe_capture.o
obj-$(CONFIG_VIDEO_DM6446_CCDC) += dm644x_ccdc.o
obj-$(CONFIG_VIDEO_DM355_CCDC) += dm355_ccdc.o
obj-$(CONFIG_VIDEO_ISIF) += isif.o
-obj-$(CONFIG_VIDEO_DM644X_VPBE) += vpbe.o vpbe_osd.o vpbe_venc.o
-obj-$(CONFIG_VIDEO_VPBE_DISPLAY) += vpbe_display.o
+obj-$(CONFIG_VIDEO_DAVINCI_VPBE_DISPLAY) += vpbe.o vpbe_osd.o \
+ vpbe_venc.o vpbe_display.o
diff --git a/drivers/media/platform/davinci/dm355_ccdc.c b/drivers/media/platform/davinci/dm355_ccdc.c
index f263cabade7a..4277e4ad810c 100644
--- a/drivers/media/platform/davinci/dm355_ccdc.c
+++ b/drivers/media/platform/davinci/dm355_ccdc.c
@@ -557,7 +557,7 @@ static int ccdc_config_vdfc(struct ccdc_vertical_dft *dfc)
*/
static void ccdc_config_csc(struct ccdc_csc *csc)
{
- u32 val1, val2;
+ u32 val1 = 0, val2;
int i;
if (!csc->enable)
diff --git a/drivers/media/platform/davinci/vpbe.c b/drivers/media/platform/davinci/vpbe.c
index 841b91a3d255..4ca0f9a2ad8a 100644
--- a/drivers/media/platform/davinci/vpbe.c
+++ b/drivers/media/platform/davinci/vpbe.c
@@ -558,9 +558,9 @@ static int platform_device_get(struct device *dev, void *data)
struct platform_device *pdev = to_platform_device(dev);
struct vpbe_device *vpbe_dev = data;
- if (strcmp("vpbe-osd", pdev->name) == 0)
+ if (strstr(pdev->name, "vpbe-osd") != NULL)
vpbe_dev->osd_device = platform_get_drvdata(pdev);
- if (strcmp("vpbe-venc", pdev->name) == 0)
+ if (strstr(pdev->name, "vpbe-venc") != NULL)
vpbe_dev->venc_device = dev_get_platdata(&pdev->dev);
return 0;
@@ -584,7 +584,6 @@ static int vpbe_initialize(struct device *dev, struct vpbe_device *vpbe_dev)
struct v4l2_subdev **enc_subdev;
struct osd_state *osd_device;
struct i2c_adapter *i2c_adap;
- int output_index;
int num_encoders;
int ret = 0;
int err;
@@ -632,8 +631,10 @@ static int vpbe_initialize(struct device *dev, struct vpbe_device *vpbe_dev)
err = bus_for_each_dev(&platform_bus_type, NULL, vpbe_dev,
platform_device_get);
- if (err < 0)
- return err;
+ if (err < 0) {
+ ret = err;
+ goto fail_dev_unregister;
+ }
vpbe_dev->venc = venc_sub_dev_init(&vpbe_dev->v4l2_dev,
vpbe_dev->cfg->venc.module_name);
@@ -731,7 +732,6 @@ static int vpbe_initialize(struct device *dev, struct vpbe_device *vpbe_dev)
/* set the current encoder and output to that of venc by default */
vpbe_dev->current_sd_index = 0;
vpbe_dev->current_out_index = 0;
- output_index = 0;
mutex_unlock(&vpbe_dev->lock);
diff --git a/drivers/media/platform/davinci/vpbe_display.c b/drivers/media/platform/davinci/vpbe_display.c
index e707a6f2325b..5e6b0cab514b 100644
--- a/drivers/media/platform/davinci/vpbe_display.c
+++ b/drivers/media/platform/davinci/vpbe_display.c
@@ -791,7 +791,6 @@ static int vpbe_display_g_crop(struct file *file, void *priv,
struct vpbe_device *vpbe_dev = fh->disp_dev->vpbe_dev;
struct osd_state *osd_device = fh->disp_dev->osd_device;
struct v4l2_rect *rect = &crop->c;
- int ret;
v4l2_dbg(1, debug, &vpbe_dev->v4l2_dev,
"VIDIOC_G_CROP, layer id = %d\n",
@@ -799,7 +798,7 @@ static int vpbe_display_g_crop(struct file *file, void *priv,
if (crop->type != V4L2_BUF_TYPE_VIDEO_OUTPUT) {
v4l2_err(&vpbe_dev->v4l2_dev, "Invalid buf type\n");
- ret = -EINVAL;
+ return -EINVAL;
}
osd_device->ops.get_layer_config(osd_device,
layer->layer_info.id, cfg);
@@ -1393,9 +1392,9 @@ static int vpbe_display_reqbufs(struct file *file, void *priv,
}
/* Initialize videobuf queue as per the buffer type */
layer->alloc_ctx = vb2_dma_contig_init_ctx(vpbe_dev->pdev);
- if (!layer->alloc_ctx) {
+ if (IS_ERR(layer->alloc_ctx)) {
v4l2_err(&vpbe_dev->v4l2_dev, "Failed to get the context\n");
- return -EINVAL;
+ return PTR_ERR(layer->alloc_ctx);
}
q = &layer->buffer_queue;
memset(q, 0, sizeof(*q));
@@ -1656,7 +1655,7 @@ static int vpbe_device_get(struct device *dev, void *data)
if (strcmp("vpbe_controller", pdev->name) == 0)
vpbe_disp->vpbe_dev = platform_get_drvdata(pdev);
- if (strcmp("vpbe-osd", pdev->name) == 0)
+ if (strstr(pdev->name, "vpbe-osd") != NULL)
vpbe_disp->osd_device = platform_get_drvdata(pdev);
return 0;
diff --git a/drivers/media/platform/davinci/vpbe_osd.c b/drivers/media/platform/davinci/vpbe_osd.c
index 707f243f810d..12ad17c52ef3 100644
--- a/drivers/media/platform/davinci/vpbe_osd.c
+++ b/drivers/media/platform/davinci/vpbe_osd.c
@@ -39,7 +39,22 @@
#include <linux/io.h>
#include "vpbe_osd_regs.h"
-#define MODULE_NAME VPBE_OSD_SUBDEV_NAME
+#define MODULE_NAME "davinci-vpbe-osd"
+
+static struct platform_device_id vpbe_osd_devtype[] = {
+ {
+ .name = DM644X_VPBE_OSD_SUBDEV_NAME,
+ .driver_data = VPBE_VERSION_1,
+ }, {
+ .name = DM365_VPBE_OSD_SUBDEV_NAME,
+ .driver_data = VPBE_VERSION_2,
+ }, {
+ .name = DM355_VPBE_OSD_SUBDEV_NAME,
+ .driver_data = VPBE_VERSION_3,
+ },
+};
+
+MODULE_DEVICE_TABLE(platform, vpbe_osd_devtype);
/* register access routines */
static inline u32 osd_read(struct osd_state *sd, u32 offset)
@@ -129,7 +144,7 @@ static int _osd_dm6446_vid0_pingpong(struct osd_state *sd,
struct osd_platform_data *pdata;
pdata = (struct osd_platform_data *)sd->dev->platform_data;
- if (pdata->field_inv_wa_enable) {
+ if (pdata != NULL && pdata->field_inv_wa_enable) {
if (!field_inversion || !lconfig->interlaced) {
osd_write(sd, fb_base_phys & ~0x1F, OSD_VIDWIN0ADR);
@@ -1526,7 +1541,7 @@ static const struct vpbe_osd_ops osd_ops = {
static int osd_probe(struct platform_device *pdev)
{
- struct osd_platform_data *pdata;
+ const struct platform_device_id *pdev_id;
struct osd_state *osd;
struct resource *res;
int ret = 0;
@@ -1535,16 +1550,15 @@ static int osd_probe(struct platform_device *pdev)
if (osd == NULL)
return -ENOMEM;
- osd->dev = &pdev->dev;
- pdata = (struct osd_platform_data *)pdev->dev.platform_data;
- osd->vpbe_type = (enum vpbe_version)pdata->vpbe_type;
- if (NULL == pdev->dev.platform_data) {
- dev_err(osd->dev, "No platform data defined for OSD"
- " sub device\n");
- ret = -ENOENT;
+ pdev_id = platform_get_device_id(pdev);
+ if (!pdev_id) {
+ ret = -EINVAL;
goto free_mem;
}
+ osd->dev = &pdev->dev;
+ osd->vpbe_type = pdev_id->driver_data;
+
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
if (!res) {
dev_err(osd->dev, "Unable to get OSD register address map\n");
@@ -1595,6 +1609,7 @@ static struct platform_driver osd_driver = {
.name = MODULE_NAME,
.owner = THIS_MODULE,
},
+ .id_table = vpbe_osd_devtype
};
module_platform_driver(osd_driver);
diff --git a/drivers/media/platform/davinci/vpbe_venc.c b/drivers/media/platform/davinci/vpbe_venc.c
index aed7369b962a..bdbebd59df98 100644
--- a/drivers/media/platform/davinci/vpbe_venc.c
+++ b/drivers/media/platform/davinci/vpbe_venc.c
@@ -38,7 +38,22 @@
#include "vpbe_venc_regs.h"
-#define MODULE_NAME VPBE_VENC_SUBDEV_NAME
+#define MODULE_NAME "davinci-vpbe-venc"
+
+static struct platform_device_id vpbe_venc_devtype[] = {
+ {
+ .name = DM644X_VPBE_VENC_SUBDEV_NAME,
+ .driver_data = VPBE_VERSION_1,
+ }, {
+ .name = DM365_VPBE_VENC_SUBDEV_NAME,
+ .driver_data = VPBE_VERSION_2,
+ }, {
+ .name = DM355_VPBE_VENC_SUBDEV_NAME,
+ .driver_data = VPBE_VERSION_3,
+ },
+};
+
+MODULE_DEVICE_TABLE(platform, vpbe_venc_devtype);
static int debug = 2;
module_param(debug, int, 0644);
@@ -54,6 +69,7 @@ struct venc_state {
spinlock_t lock;
void __iomem *venc_base;
void __iomem *vdaccfg_reg;
+ enum vpbe_version venc_type;
};
static inline struct venc_state *to_state(struct v4l2_subdev *sd)
@@ -127,7 +143,7 @@ static int venc_set_dac(struct v4l2_subdev *sd, u32 out_index)
static void venc_enabledigitaloutput(struct v4l2_subdev *sd, int benable)
{
struct venc_state *venc = to_state(sd);
- struct venc_platform_data *pdata = venc->pdata;
+
v4l2_dbg(debug, 2, sd, "venc_enabledigitaloutput\n");
if (benable) {
@@ -159,7 +175,7 @@ static void venc_enabledigitaloutput(struct v4l2_subdev *sd, int benable)
/* Disable LCD output control (accepting default polarity) */
venc_write(sd, VENC_LCDOUT, 0);
- if (pdata->venc_type != VPBE_VERSION_3)
+ if (venc->venc_type != VPBE_VERSION_3)
venc_write(sd, VENC_CMPNT, 0x100);
venc_write(sd, VENC_HSPLS, 0);
venc_write(sd, VENC_HINT, 0);
@@ -203,11 +219,11 @@ static int venc_set_ntsc(struct v4l2_subdev *sd)
venc_enabledigitaloutput(sd, 0);
- if (pdata->venc_type == VPBE_VERSION_3) {
+ if (venc->venc_type == VPBE_VERSION_3) {
venc_write(sd, VENC_CLKCTL, 0x01);
venc_write(sd, VENC_VIDCTL, 0);
val = vdaccfg_write(sd, VDAC_CONFIG_SD_V3);
- } else if (pdata->venc_type == VPBE_VERSION_2) {
+ } else if (venc->venc_type == VPBE_VERSION_2) {
venc_write(sd, VENC_CLKCTL, 0x01);
venc_write(sd, VENC_VIDCTL, 0);
vdaccfg_write(sd, VDAC_CONFIG_SD_V2);
@@ -238,7 +254,6 @@ static int venc_set_ntsc(struct v4l2_subdev *sd)
static int venc_set_pal(struct v4l2_subdev *sd)
{
struct venc_state *venc = to_state(sd);
- struct venc_platform_data *pdata = venc->pdata;
v4l2_dbg(debug, 2, sd, "venc_set_pal\n");
@@ -249,11 +264,11 @@ static int venc_set_pal(struct v4l2_subdev *sd)
venc_enabledigitaloutput(sd, 0);
- if (pdata->venc_type == VPBE_VERSION_3) {
+ if (venc->venc_type == VPBE_VERSION_3) {
venc_write(sd, VENC_CLKCTL, 0x1);
venc_write(sd, VENC_VIDCTL, 0);
vdaccfg_write(sd, VDAC_CONFIG_SD_V3);
- } else if (pdata->venc_type == VPBE_VERSION_2) {
+ } else if (venc->venc_type == VPBE_VERSION_2) {
venc_write(sd, VENC_CLKCTL, 0x1);
venc_write(sd, VENC_VIDCTL, 0);
vdaccfg_write(sd, VDAC_CONFIG_SD_V2);
@@ -293,8 +308,8 @@ static int venc_set_480p59_94(struct v4l2_subdev *sd)
struct venc_platform_data *pdata = venc->pdata;
v4l2_dbg(debug, 2, sd, "venc_set_480p59_94\n");
- if ((pdata->venc_type != VPBE_VERSION_1) &&
- (pdata->venc_type != VPBE_VERSION_2))
+ if (venc->venc_type != VPBE_VERSION_1 &&
+ venc->venc_type != VPBE_VERSION_2)
return -EINVAL;
/* Setup clock at VPSS & VENC for SD */
@@ -303,12 +318,12 @@ static int venc_set_480p59_94(struct v4l2_subdev *sd)
venc_enabledigitaloutput(sd, 0);
- if (pdata->venc_type == VPBE_VERSION_2)
+ if (venc->venc_type == VPBE_VERSION_2)
vdaccfg_write(sd, VDAC_CONFIG_HD_V2);
venc_write(sd, VENC_OSDCLK0, 0);
venc_write(sd, VENC_OSDCLK1, 1);
- if (pdata->venc_type == VPBE_VERSION_1) {
+ if (venc->venc_type == VPBE_VERSION_1) {
venc_modify(sd, VENC_VDPRO, VENC_VDPRO_DAFRQ,
VENC_VDPRO_DAFRQ);
venc_modify(sd, VENC_VDPRO, VENC_VDPRO_DAUPS,
@@ -341,8 +356,8 @@ static int venc_set_576p50(struct v4l2_subdev *sd)
v4l2_dbg(debug, 2, sd, "venc_set_576p50\n");
- if ((pdata->venc_type != VPBE_VERSION_1) &&
- (pdata->venc_type != VPBE_VERSION_2))
+ if (venc->venc_type != VPBE_VERSION_1 &&
+ venc->venc_type != VPBE_VERSION_2)
return -EINVAL;
/* Setup clock at VPSS & VENC for SD */
if (pdata->setup_clock(VPBE_ENC_CUSTOM_TIMINGS, 27000000) < 0)
@@ -350,13 +365,13 @@ static int venc_set_576p50(struct v4l2_subdev *sd)
venc_enabledigitaloutput(sd, 0);
- if (pdata->venc_type == VPBE_VERSION_2)
+ if (venc->venc_type == VPBE_VERSION_2)
vdaccfg_write(sd, VDAC_CONFIG_HD_V2);
venc_write(sd, VENC_OSDCLK0, 0);
venc_write(sd, VENC_OSDCLK1, 1);
- if (pdata->venc_type == VPBE_VERSION_1) {
+ if (venc->venc_type == VPBE_VERSION_1) {
venc_modify(sd, VENC_VDPRO, VENC_VDPRO_DAFRQ,
VENC_VDPRO_DAFRQ);
venc_modify(sd, VENC_VDPRO, VENC_VDPRO_DAUPS,
@@ -460,14 +475,14 @@ static int venc_s_dv_timings(struct v4l2_subdev *sd,
else if (height == 480)
return venc_set_480p59_94(sd);
else if ((height == 720) &&
- (venc->pdata->venc_type == VPBE_VERSION_2)) {
+ (venc->venc_type == VPBE_VERSION_2)) {
/* TBD setup internal 720p mode here */
ret = venc_set_720p60_internal(sd);
/* for DM365 VPBE, there is DAC inside */
vdaccfg_write(sd, VDAC_CONFIG_HD_V2);
return ret;
} else if ((height == 1080) &&
- (venc->pdata->venc_type == VPBE_VERSION_2)) {
+ (venc->venc_type == VPBE_VERSION_2)) {
/* TBD setup internal 1080i mode here */
ret = venc_set_1080i30_internal(sd);
/* for DM365 VPBE, there is DAC inside */
@@ -556,7 +571,7 @@ static int venc_device_get(struct device *dev, void *data)
struct platform_device *pdev = to_platform_device(dev);
struct venc_state **venc = data;
- if (strcmp(MODULE_NAME, pdev->name) == 0)
+ if (strstr(pdev->name, "vpbe-venc") != NULL)
*venc = platform_get_drvdata(pdev);
return 0;
@@ -593,6 +608,7 @@ EXPORT_SYMBOL(venc_sub_dev_init);
static int venc_probe(struct platform_device *pdev)
{
+ const struct platform_device_id *pdev_id;
struct venc_state *venc;
struct resource *res;
int ret;
@@ -601,6 +617,12 @@ static int venc_probe(struct platform_device *pdev)
if (venc == NULL)
return -ENOMEM;
+ pdev_id = platform_get_device_id(pdev);
+ if (!pdev_id) {
+ ret = -EINVAL;
+ goto free_mem;
+ }
+ venc->venc_type = pdev_id->driver_data;
venc->pdev = &pdev->dev;
venc->pdata = pdev->dev.platform_data;
if (NULL == venc->pdata) {
@@ -630,7 +652,7 @@ static int venc_probe(struct platform_device *pdev)
goto release_venc_mem_region;
}
- if (venc->pdata->venc_type != VPBE_VERSION_1) {
+ if (venc->venc_type != VPBE_VERSION_1) {
res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
if (!res) {
dev_err(venc->pdev,
@@ -681,7 +703,7 @@ static int venc_remove(struct platform_device *pdev)
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
iounmap((void *)venc->venc_base);
release_mem_region(res->start, resource_size(res));
- if (venc->pdata->venc_type != VPBE_VERSION_1) {
+ if (venc->venc_type != VPBE_VERSION_1) {
res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
iounmap((void *)venc->vdaccfg_reg);
release_mem_region(res->start, resource_size(res));
@@ -698,6 +720,7 @@ static struct platform_driver venc_driver = {
.name = MODULE_NAME,
.owner = THIS_MODULE,
},
+ .id_table = vpbe_venc_devtype
};
module_platform_driver(venc_driver);
diff --git a/drivers/media/platform/davinci/vpfe_capture.c b/drivers/media/platform/davinci/vpfe_capture.c
index be9d3e1b4868..28d019da4c01 100644
--- a/drivers/media/platform/davinci/vpfe_capture.c
+++ b/drivers/media/platform/davinci/vpfe_capture.c
@@ -560,10 +560,7 @@ static void vpfe_schedule_bottom_field(struct vpfe_device *vpfe_dev)
static void vpfe_process_buffer_complete(struct vpfe_device *vpfe_dev)
{
- struct timeval timevalue;
-
- do_gettimeofday(&timevalue);
- vpfe_dev->cur_frm->ts = timevalue;
+ v4l2_get_timestamp(&vpfe_dev->cur_frm->ts);
vpfe_dev->cur_frm->state = VIDEOBUF_DONE;
vpfe_dev->cur_frm->size = vpfe_dev->fmt.fmt.pix.sizeimage;
wake_up_interruptible(&vpfe_dev->cur_frm->done);
diff --git a/drivers/media/platform/davinci/vpif_capture.c b/drivers/media/platform/davinci/vpif_capture.c
index a409ccefb380..5892d2bc8eee 100644
--- a/drivers/media/platform/davinci/vpif_capture.c
+++ b/drivers/media/platform/davinci/vpif_capture.c
@@ -411,7 +411,7 @@ static struct vb2_ops video_qops = {
*/
static void vpif_process_buffer_complete(struct common_obj *common)
{
- do_gettimeofday(&common->cur_frm->vb.v4l2_buf.timestamp);
+ v4l2_get_timestamp(&common->cur_frm->vb.v4l2_buf.timestamp);
vb2_buffer_done(&common->cur_frm->vb,
VB2_BUF_STATE_DONE);
/* Make curFrm pointing to nextFrm */
diff --git a/drivers/media/platform/davinci/vpif_display.c b/drivers/media/platform/davinci/vpif_display.c
index 9f2b603be9c9..dd249c96126d 100644
--- a/drivers/media/platform/davinci/vpif_display.c
+++ b/drivers/media/platform/davinci/vpif_display.c
@@ -402,7 +402,7 @@ static void process_interlaced_mode(int fid, struct common_obj *common)
/* one frame is displayed If next frame is
* available, release cur_frm and move on */
/* Copy frame display time */
- do_gettimeofday(&common->cur_frm->vb.v4l2_buf.timestamp);
+ v4l2_get_timestamp(&common->cur_frm->vb.v4l2_buf.timestamp);
/* Change status of the cur_frm */
vb2_buffer_done(&common->cur_frm->vb,
VB2_BUF_STATE_DONE);
@@ -462,8 +462,8 @@ static irqreturn_t vpif_channel_isr(int irq, void *dev_id)
if (!channel_first_int[i][channel_id]) {
/* Mark status of the cur_frm to
* done and unlock semaphore on it */
- do_gettimeofday(&common->cur_frm->vb.
- v4l2_buf.timestamp);
+ v4l2_get_timestamp(&common->cur_frm->vb.
+ v4l2_buf.timestamp);
vb2_buffer_done(&common->cur_frm->vb,
VB2_BUF_STATE_DONE);
/* Make cur_frm pointing to next_frm */
diff --git a/drivers/media/platform/davinci/vpss.c b/drivers/media/platform/davinci/vpss.c
index cdbff88e0f1e..a19c552232d1 100644
--- a/drivers/media/platform/davinci/vpss.c
+++ b/drivers/media/platform/davinci/vpss.c
@@ -25,7 +25,6 @@
#include <linux/spinlock.h>
#include <linux/compiler.h>
#include <linux/io.h>
-#include <mach/hardware.h>
#include <media/davinci/vpss.h>
MODULE_LICENSE("GPL");
@@ -51,13 +50,29 @@ MODULE_AUTHOR("Texas Instruments");
/* VENCINT - vpss_int8 */
#define DM355_VPSSBL_EVTSEL_DEFAULT 0x4
-#define DM365_ISP5_PCCR 0x04
+#define DM365_ISP5_PCCR 0x04
+#define DM365_ISP5_PCCR_BL_CLK_ENABLE BIT(0)
+#define DM365_ISP5_PCCR_ISIF_CLK_ENABLE BIT(1)
+#define DM365_ISP5_PCCR_H3A_CLK_ENABLE BIT(2)
+#define DM365_ISP5_PCCR_RSZ_CLK_ENABLE BIT(3)
+#define DM365_ISP5_PCCR_IPIPE_CLK_ENABLE BIT(4)
+#define DM365_ISP5_PCCR_IPIPEIF_CLK_ENABLE BIT(5)
+#define DM365_ISP5_PCCR_RSV BIT(6)
+
+#define DM365_ISP5_BCR 0x08
+#define DM365_ISP5_BCR_ISIF_OUT_ENABLE BIT(1)
+
#define DM365_ISP5_INTSEL1 0x10
#define DM365_ISP5_INTSEL2 0x14
#define DM365_ISP5_INTSEL3 0x18
#define DM365_ISP5_CCDCMUX 0x20
#define DM365_ISP5_PG_FRAME_SIZE 0x28
#define DM365_VPBE_CLK_CTRL 0x00
+
+#define VPSS_CLK_CTRL 0x01c40044
+#define VPSS_CLK_CTRL_VENCCLKEN BIT(3)
+#define VPSS_CLK_CTRL_DACCLKEN BIT(4)
+
/*
* vpss interrupts. VDINT0 - vpss_int0, VDINT1 - vpss_int1,
* AF - vpss_int3
@@ -95,12 +110,19 @@ struct vpss_hw_ops {
void (*select_ccdc_source)(enum vpss_ccdc_source_sel src_sel);
/* clear wbl overflow bit */
int (*clear_wbl_overflow)(enum vpss_wbl_sel wbl_sel);
+ /* set sync polarity */
+ void (*set_sync_pol)(struct vpss_sync_pol);
+ /* set the PG_FRAME_SIZE register*/
+ void (*set_pg_frame_size)(struct vpss_pg_frame_size);
+ /* check and clear interrupt if occured */
+ int (*dma_complete_interrupt)(void);
};
/* vpss configuration */
struct vpss_oper_config {
__iomem void *vpss_regs_base0;
__iomem void *vpss_regs_base1;
+ resource_size_t *vpss_regs_base2;
enum vpss_platform_type platform;
spinlock_t vpss_lock;
struct vpss_hw_ops hw_ops;
@@ -158,6 +180,14 @@ static void dm355_select_ccdc_source(enum vpss_ccdc_source_sel src_sel)
bl_regw(src_sel << VPSS_HSSISEL_SHIFT, DM355_VPSSBL_CCDCMUX);
}
+int vpss_dma_complete_interrupt(void)
+{
+ if (!oper_cfg.hw_ops.dma_complete_interrupt)
+ return 2;
+ return oper_cfg.hw_ops.dma_complete_interrupt();
+}
+EXPORT_SYMBOL(vpss_dma_complete_interrupt);
+
int vpss_select_ccdc_source(enum vpss_ccdc_source_sel src_sel)
{
if (!oper_cfg.hw_ops.select_ccdc_source)
@@ -183,6 +213,15 @@ static int dm644x_clear_wbl_overflow(enum vpss_wbl_sel wbl_sel)
return 0;
}
+void vpss_set_sync_pol(struct vpss_sync_pol sync)
+{
+ if (!oper_cfg.hw_ops.set_sync_pol)
+ return;
+
+ oper_cfg.hw_ops.set_sync_pol(sync);
+}
+EXPORT_SYMBOL(vpss_set_sync_pol);
+
int vpss_clear_wbl_overflow(enum vpss_wbl_sel wbl_sel)
{
if (!oper_cfg.hw_ops.clear_wbl_overflow)
@@ -348,6 +387,15 @@ void dm365_vpss_set_sync_pol(struct vpss_sync_pol sync)
}
EXPORT_SYMBOL(dm365_vpss_set_sync_pol);
+void vpss_set_pg_frame_size(struct vpss_pg_frame_size frame_size)
+{
+ if (!oper_cfg.hw_ops.set_pg_frame_size)
+ return;
+
+ oper_cfg.hw_ops.set_pg_frame_size(frame_size);
+}
+EXPORT_SYMBOL(vpss_set_pg_frame_size);
+
void dm365_vpss_set_pg_frame_size(struct vpss_pg_frame_size frame_size)
{
int current_reg = ((frame_size.hlpfr >> 1) - 1) << 16;
@@ -426,6 +474,16 @@ static int vpss_probe(struct platform_device *pdev)
oper_cfg.hw_ops.enable_clock = dm365_enable_clock;
oper_cfg.hw_ops.select_ccdc_source = dm365_select_ccdc_source;
/* Setup vpss interrupts */
+ isp5_write((isp5_read(DM365_ISP5_PCCR) |
+ DM365_ISP5_PCCR_BL_CLK_ENABLE |
+ DM365_ISP5_PCCR_ISIF_CLK_ENABLE |
+ DM365_ISP5_PCCR_H3A_CLK_ENABLE |
+ DM365_ISP5_PCCR_RSZ_CLK_ENABLE |
+ DM365_ISP5_PCCR_IPIPE_CLK_ENABLE |
+ DM365_ISP5_PCCR_IPIPEIF_CLK_ENABLE |
+ DM365_ISP5_PCCR_RSV), DM365_ISP5_PCCR);
+ isp5_write((isp5_read(DM365_ISP5_BCR) |
+ DM365_ISP5_BCR_ISIF_OUT_ENABLE), DM365_ISP5_BCR);
isp5_write(DM365_ISP5_INTSEL1_DEFAULT, DM365_ISP5_INTSEL1);
isp5_write(DM365_ISP5_INTSEL2_DEFAULT, DM365_ISP5_INTSEL2);
isp5_write(DM365_ISP5_INTSEL3_DEFAULT, DM365_ISP5_INTSEL3);
@@ -471,11 +529,20 @@ static struct platform_driver vpss_driver = {
static void vpss_exit(void)
{
+ iounmap(oper_cfg.vpss_regs_base2);
+ release_mem_region(VPSS_CLK_CTRL, 4);
platform_driver_unregister(&vpss_driver);
}
static int __init vpss_init(void)
{
+ if (!request_mem_region(VPSS_CLK_CTRL, 4, "vpss_clock_control"))
+ return -EBUSY;
+
+ oper_cfg.vpss_regs_base2 = ioremap(VPSS_CLK_CTRL, 4);
+ writel(VPSS_CLK_CTRL_VENCCLKEN |
+ VPSS_CLK_CTRL_DACCLKEN, oper_cfg.vpss_regs_base2);
+
return platform_driver_register(&vpss_driver);
}
subsys_initcall(vpss_init);
diff --git a/drivers/media/platform/exynos-gsc/gsc-core.c b/drivers/media/platform/exynos-gsc/gsc-core.c
index c1a07133cc56..82d9f6ac12f3 100644
--- a/drivers/media/platform/exynos-gsc/gsc-core.c
+++ b/drivers/media/platform/exynos-gsc/gsc-core.c
@@ -185,6 +185,15 @@ static const struct gsc_fmt gsc_formats[] = {
.corder = GSC_CRCB,
.num_planes = 3,
.num_comp = 3,
+ }, {
+ .name = "YUV 4:2:0 n.c. 2p, Y/CbCr tiled",
+ .pixelformat = V4L2_PIX_FMT_NV12MT_16X16,
+ .depth = { 8, 4 },
+ .color = GSC_YUV420,
+ .yorder = GSC_LSB_Y,
+ .corder = GSC_CBCR,
+ .num_planes = 2,
+ .num_comp = 2,
}
};
@@ -935,8 +944,8 @@ static struct gsc_variant gsc_v_100_variant = {
.pix_max = &gsc_v_100_max,
.pix_min = &gsc_v_100_min,
.pix_align = &gsc_v_100_align,
- .in_buf_cnt = 8,
- .out_buf_cnt = 16,
+ .in_buf_cnt = 32,
+ .out_buf_cnt = 32,
.sc_up_max = 8,
.sc_down_max = 16,
.poly_sc_down_max = 4,
@@ -993,12 +1002,8 @@ static void *gsc_get_drv_data(struct platform_device *pdev)
static void gsc_clk_put(struct gsc_dev *gsc)
{
- if (IS_ERR_OR_NULL(gsc->clock))
- return;
-
- clk_unprepare(gsc->clock);
- clk_put(gsc->clock);
- gsc->clock = NULL;
+ if (!IS_ERR(gsc->clock))
+ clk_unprepare(gsc->clock);
}
static int gsc_clk_get(struct gsc_dev *gsc)
@@ -1007,27 +1012,22 @@ static int gsc_clk_get(struct gsc_dev *gsc)
dev_dbg(&gsc->pdev->dev, "gsc_clk_get Called\n");
- gsc->clock = clk_get(&gsc->pdev->dev, GSC_CLOCK_GATE_NAME);
- if (IS_ERR(gsc->clock))
- goto err_print;
+ gsc->clock = devm_clk_get(&gsc->pdev->dev, GSC_CLOCK_GATE_NAME);
+ if (IS_ERR(gsc->clock)) {
+ dev_err(&gsc->pdev->dev, "failed to get clock~~~: %s\n",
+ GSC_CLOCK_GATE_NAME);
+ return PTR_ERR(gsc->clock);
+ }
ret = clk_prepare(gsc->clock);
if (ret < 0) {
- clk_put(gsc->clock);
- gsc->clock = NULL;
- goto err;
+ dev_err(&gsc->pdev->dev, "clock prepare failed for clock: %s\n",
+ GSC_CLOCK_GATE_NAME);
+ gsc->clock = ERR_PTR(-EINVAL);
+ return ret;
}
return 0;
-
-err:
- dev_err(&gsc->pdev->dev, "clock prepare failed for clock: %s\n",
- GSC_CLOCK_GATE_NAME);
- gsc_clk_put(gsc);
-err_print:
- dev_err(&gsc->pdev->dev, "failed to get clock~~~: %s\n",
- GSC_CLOCK_GATE_NAME);
- return -ENXIO;
}
static int gsc_m2m_suspend(struct gsc_dev *gsc)
@@ -1096,6 +1096,7 @@ static int gsc_probe(struct platform_device *pdev)
init_waitqueue_head(&gsc->irq_queue);
spin_lock_init(&gsc->slock);
mutex_init(&gsc->lock);
+ gsc->clock = ERR_PTR(-EINVAL);
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
gsc->regs = devm_ioremap_resource(dev, res);
@@ -1157,6 +1158,7 @@ static int gsc_remove(struct platform_device *pdev)
vb2_dma_contig_cleanup_ctx(gsc->alloc_ctx);
pm_runtime_disable(&pdev->dev);
+ gsc_clk_put(gsc);
dev_dbg(&pdev->dev, "%s driver unloaded\n", pdev->name);
return 0;
diff --git a/drivers/media/platform/exynos-gsc/gsc-core.h b/drivers/media/platform/exynos-gsc/gsc-core.h
index 5f157efd24f0..cc19bba09bd1 100644
--- a/drivers/media/platform/exynos-gsc/gsc-core.h
+++ b/drivers/media/platform/exynos-gsc/gsc-core.h
@@ -427,6 +427,11 @@ static inline void gsc_ctx_state_lock_clear(u32 state, struct gsc_ctx *ctx)
spin_unlock_irqrestore(&ctx->gsc_dev->slock, flags);
}
+static inline int is_tiled(const struct gsc_fmt *fmt)
+{
+ return fmt->pixelformat == V4L2_PIX_FMT_NV12MT_16X16;
+}
+
static inline void gsc_hw_enable_control(struct gsc_dev *dev, bool on)
{
u32 cfg = readl(dev->regs + GSC_ENABLE);
diff --git a/drivers/media/platform/exynos-gsc/gsc-m2m.c b/drivers/media/platform/exynos-gsc/gsc-m2m.c
index c267c57c76fd..386c0a7a3a52 100644
--- a/drivers/media/platform/exynos-gsc/gsc-m2m.c
+++ b/drivers/media/platform/exynos-gsc/gsc-m2m.c
@@ -99,22 +99,28 @@ static void gsc_m2m_job_abort(void *priv)
gsc_m2m_job_finish(ctx, VB2_BUF_STATE_ERROR);
}
-static int gsc_fill_addr(struct gsc_ctx *ctx)
+static int gsc_get_bufs(struct gsc_ctx *ctx)
{
struct gsc_frame *s_frame, *d_frame;
- struct vb2_buffer *vb = NULL;
+ struct vb2_buffer *src_vb, *dst_vb;
int ret;
s_frame = &ctx->s_frame;
d_frame = &ctx->d_frame;
- vb = v4l2_m2m_next_src_buf(ctx->m2m_ctx);
- ret = gsc_prepare_addr(ctx, vb, s_frame, &s_frame->addr);
+ src_vb = v4l2_m2m_next_src_buf(ctx->m2m_ctx);
+ ret = gsc_prepare_addr(ctx, src_vb, s_frame, &s_frame->addr);
if (ret)
return ret;
- vb = v4l2_m2m_next_dst_buf(ctx->m2m_ctx);
- return gsc_prepare_addr(ctx, vb, d_frame, &d_frame->addr);
+ dst_vb = v4l2_m2m_next_dst_buf(ctx->m2m_ctx);
+ ret = gsc_prepare_addr(ctx, dst_vb, d_frame, &d_frame->addr);
+ if (ret)
+ return ret;
+
+ dst_vb->v4l2_buf.timestamp = src_vb->v4l2_buf.timestamp;
+
+ return 0;
}
static void gsc_m2m_device_run(void *priv)
@@ -148,7 +154,7 @@ static void gsc_m2m_device_run(void *priv)
goto put_device;
}
- ret = gsc_fill_addr(ctx);
+ ret = gsc_get_bufs(ctx);
if (ret) {
pr_err("Wrong address");
goto put_device;
@@ -367,6 +373,13 @@ static int gsc_m2m_reqbufs(struct file *file, void *fh,
return v4l2_m2m_reqbufs(file, ctx->m2m_ctx, reqbufs);
}
+static int gsc_m2m_expbuf(struct file *file, void *fh,
+ struct v4l2_exportbuffer *eb)
+{
+ struct gsc_ctx *ctx = fh_to_ctx(fh);
+ return v4l2_m2m_expbuf(file, ctx->m2m_ctx, eb);
+}
+
static int gsc_m2m_querybuf(struct file *file, void *fh,
struct v4l2_buffer *buf)
{
@@ -548,6 +561,7 @@ static const struct v4l2_ioctl_ops gsc_m2m_ioctl_ops = {
.vidioc_s_fmt_vid_cap_mplane = gsc_m2m_s_fmt_mplane,
.vidioc_s_fmt_vid_out_mplane = gsc_m2m_s_fmt_mplane,
.vidioc_reqbufs = gsc_m2m_reqbufs,
+ .vidioc_expbuf = gsc_m2m_expbuf,
.vidioc_querybuf = gsc_m2m_querybuf,
.vidioc_qbuf = gsc_m2m_qbuf,
.vidioc_dqbuf = gsc_m2m_dqbuf,
@@ -565,7 +579,7 @@ static int queue_init(void *priv, struct vb2_queue *src_vq,
memset(src_vq, 0, sizeof(*src_vq));
src_vq->type = V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE;
- src_vq->io_modes = VB2_MMAP | VB2_USERPTR;
+ src_vq->io_modes = VB2_MMAP | VB2_USERPTR | VB2_DMABUF;
src_vq->drv_priv = ctx;
src_vq->ops = &gsc_m2m_qops;
src_vq->mem_ops = &vb2_dma_contig_memops;
@@ -577,7 +591,7 @@ static int queue_init(void *priv, struct vb2_queue *src_vq,
memset(dst_vq, 0, sizeof(*dst_vq));
dst_vq->type = V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE;
- dst_vq->io_modes = VB2_MMAP | VB2_USERPTR;
+ dst_vq->io_modes = VB2_MMAP | VB2_USERPTR | VB2_DMABUF;
dst_vq->drv_priv = ctx;
dst_vq->ops = &gsc_m2m_qops;
dst_vq->mem_ops = &vb2_dma_contig_memops;
@@ -597,7 +611,7 @@ static int gsc_m2m_open(struct file *file)
if (mutex_lock_interruptible(&gsc->lock))
return -ERESTARTSYS;
- ctx = kzalloc(sizeof (*ctx), GFP_KERNEL);
+ ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
if (!ctx) {
ret = -ENOMEM;
goto unlock;
diff --git a/drivers/media/platform/exynos-gsc/gsc-regs.c b/drivers/media/platform/exynos-gsc/gsc-regs.c
index 0146b354dc22..6f5b5a486cf3 100644
--- a/drivers/media/platform/exynos-gsc/gsc-regs.c
+++ b/drivers/media/platform/exynos-gsc/gsc-regs.c
@@ -214,6 +214,9 @@ void gsc_hw_set_in_image_format(struct gsc_ctx *ctx)
break;
}
+ if (is_tiled(frame->fmt))
+ cfg |= GSC_IN_TILE_C_16x8 | GSC_IN_TILE_MODE;
+
writel(cfg, dev->regs + GSC_IN_CON);
}
@@ -334,6 +337,9 @@ void gsc_hw_set_out_image_format(struct gsc_ctx *ctx)
break;
}
+ if (is_tiled(frame->fmt))
+ cfg |= GSC_OUT_TILE_C_16x8 | GSC_OUT_TILE_MODE;
+
end_set:
writel(cfg, dev->regs + GSC_OUT_CON);
}
diff --git a/drivers/media/platform/fsl-viu.c b/drivers/media/platform/fsl-viu.c
index 9115a2c8d075..5f7db3f1f6f5 100644
--- a/drivers/media/platform/fsl-viu.c
+++ b/drivers/media/platform/fsl-viu.c
@@ -1181,7 +1181,7 @@ static void viu_capture_intr(struct viu_dev *dev, u32 status)
if (waitqueue_active(&buf->vb.done)) {
list_del(&buf->vb.queue);
- do_gettimeofday(&buf->vb.ts);
+ v4l2_get_timestamp(&buf->vb.ts);
buf->vb.state = VIDEOBUF_DONE;
buf->vb.field_count++;
wake_up(&buf->vb.done);
diff --git a/drivers/media/platform/m2m-deinterlace.c b/drivers/media/platform/m2m-deinterlace.c
index 05c560f2ef06..6c4db9b98989 100644
--- a/drivers/media/platform/m2m-deinterlace.c
+++ b/drivers/media/platform/m2m-deinterlace.c
@@ -28,7 +28,7 @@ MODULE_AUTHOR("Javier Martin <javier.martin@vista-silicon.com");
MODULE_LICENSE("GPL");
MODULE_VERSION("0.0.1");
-static bool debug = true;
+static bool debug;
module_param(debug, bool, 0644);
/* Flags that indicate a format can be used for capture/output */
@@ -917,10 +917,8 @@ static int deinterlace_open(struct file *file)
ctx->xt = kzalloc(sizeof(struct dma_async_tx_descriptor) +
sizeof(struct data_chunk), GFP_KERNEL);
if (!ctx->xt) {
- int ret = PTR_ERR(ctx->xt);
-
kfree(ctx);
- return ret;
+ return -ENOMEM;
}
ctx->colorspace = V4L2_COLORSPACE_REC709;
diff --git a/drivers/media/platform/marvell-ccic/mcam-core.c b/drivers/media/platform/marvell-ccic/mcam-core.c
index ce2b7b4788d6..92a33f081852 100644
--- a/drivers/media/platform/marvell-ccic/mcam-core.c
+++ b/drivers/media/platform/marvell-ccic/mcam-core.c
@@ -22,6 +22,7 @@
#include <linux/videodev2.h>
#include <media/v4l2-device.h>
#include <media/v4l2-ioctl.h>
+#include <media/v4l2-ctrls.h>
#include <media/v4l2-chip-ident.h>
#include <media/ov7670.h>
#include <media/videobuf2-vmalloc.h>
@@ -30,13 +31,6 @@
#include "mcam-core.h"
-/*
- * Basic frame stats - to be deleted shortly
- */
-static int frames;
-static int singles;
-static int delivered;
-
#ifdef MCAM_MODE_VMALLOC
/*
* Internal DMA buffer management. Since the controller cannot do S/G I/O,
@@ -367,10 +361,10 @@ static void mcam_frame_tasklet(unsigned long data)
if (!test_bit(bufno, &cam->flags))
continue;
if (list_empty(&cam->buffers)) {
- singles++;
+ cam->frame_state.singles++;
break; /* Leave it valid, hope for better later */
}
- delivered++;
+ cam->frame_state.delivered++;
clear_bit(bufno, &cam->flags);
buf = list_first_entry(&cam->buffers, struct mcam_vb_buffer,
queue);
@@ -452,7 +446,7 @@ static void mcam_set_contig_buffer(struct mcam_camera *cam, int frame)
mcam_reg_write(cam, frame == 0 ? REG_Y0BAR : REG_Y1BAR,
vb2_dma_contig_plane_dma_addr(&buf->vb_buf, 0));
set_bit(CF_SINGLE_BUFFER, &cam->flags);
- singles++;
+ cam->frame_state.singles++;
return;
}
/*
@@ -485,7 +479,7 @@ static void mcam_dma_contig_done(struct mcam_camera *cam, int frame)
struct mcam_vb_buffer *buf = cam->vb_bufs[frame];
if (!test_bit(CF_SINGLE_BUFFER, &cam->flags)) {
- delivered++;
+ cam->frame_state.delivered++;
mcam_buffer_done(cam, frame, &buf->vb_buf);
}
mcam_set_contig_buffer(cam, frame);
@@ -578,13 +572,13 @@ static void mcam_dma_sg_done(struct mcam_camera *cam, int frame)
*/
} else {
set_bit(CF_SG_RESTART, &cam->flags);
- singles++;
+ cam->frame_state.singles++;
cam->vb_bufs[0] = NULL;
}
/*
* Now we can give the completed frame back to user space.
*/
- delivered++;
+ cam->frame_state.delivered++;
mcam_buffer_done(cam, frame, &buf->vb_buf);
}
@@ -1232,47 +1226,6 @@ static int mcam_vidioc_dqbuf(struct file *filp, void *priv,
return ret;
}
-
-
-static int mcam_vidioc_queryctrl(struct file *filp, void *priv,
- struct v4l2_queryctrl *qc)
-{
- struct mcam_camera *cam = priv;
- int ret;
-
- mutex_lock(&cam->s_mutex);
- ret = sensor_call(cam, core, queryctrl, qc);
- mutex_unlock(&cam->s_mutex);
- return ret;
-}
-
-
-static int mcam_vidioc_g_ctrl(struct file *filp, void *priv,
- struct v4l2_control *ctrl)
-{
- struct mcam_camera *cam = priv;
- int ret;
-
- mutex_lock(&cam->s_mutex);
- ret = sensor_call(cam, core, g_ctrl, ctrl);
- mutex_unlock(&cam->s_mutex);
- return ret;
-}
-
-
-static int mcam_vidioc_s_ctrl(struct file *filp, void *priv,
- struct v4l2_control *ctrl)
-{
- struct mcam_camera *cam = priv;
- int ret;
-
- mutex_lock(&cam->s_mutex);
- ret = sensor_call(cam, core, s_ctrl, ctrl);
- mutex_unlock(&cam->s_mutex);
- return ret;
-}
-
-
static int mcam_vidioc_querycap(struct file *file, void *priv,
struct v4l2_capability *cap)
{
@@ -1520,9 +1473,6 @@ static const struct v4l2_ioctl_ops mcam_v4l_ioctl_ops = {
.vidioc_dqbuf = mcam_vidioc_dqbuf,
.vidioc_streamon = mcam_vidioc_streamon,
.vidioc_streamoff = mcam_vidioc_streamoff,
- .vidioc_queryctrl = mcam_vidioc_queryctrl,
- .vidioc_g_ctrl = mcam_vidioc_g_ctrl,
- .vidioc_s_ctrl = mcam_vidioc_s_ctrl,
.vidioc_g_parm = mcam_vidioc_g_parm,
.vidioc_s_parm = mcam_vidioc_s_parm,
.vidioc_enum_framesizes = mcam_vidioc_enum_framesizes,
@@ -1545,7 +1495,9 @@ static int mcam_v4l_open(struct file *filp)
filp->private_data = cam;
- frames = singles = delivered = 0;
+ cam->frame_state.frames = 0;
+ cam->frame_state.singles = 0;
+ cam->frame_state.delivered = 0;
mutex_lock(&cam->s_mutex);
if (cam->users == 0) {
ret = mcam_setup_vb2(cam);
@@ -1566,8 +1518,9 @@ static int mcam_v4l_release(struct file *filp)
{
struct mcam_camera *cam = filp->private_data;
- cam_dbg(cam, "Release, %d frames, %d singles, %d delivered\n", frames,
- singles, delivered);
+ cam_dbg(cam, "Release, %d frames, %d singles, %d delivered\n",
+ cam->frame_state.frames, cam->frame_state.singles,
+ cam->frame_state.delivered);
mutex_lock(&cam->s_mutex);
(cam->users)--;
if (cam->users == 0) {
@@ -1660,7 +1613,7 @@ static void mcam_frame_complete(struct mcam_camera *cam, int frame)
clear_bit(CF_DMA_ACTIVE, &cam->flags);
cam->next_buf = frame;
cam->buf_seq[frame] = ++(cam->sequence);
- frames++;
+ cam->frame_state.frames++;
/*
* "This should never happen"
*/
@@ -1786,14 +1739,19 @@ int mccic_register(struct mcam_camera *cam)
/*
* Get the v4l2 setup done.
*/
+ ret = v4l2_ctrl_handler_init(&cam->ctrl_handler, 10);
+ if (ret)
+ goto out_unregister;
+ cam->v4l2_dev.ctrl_handler = &cam->ctrl_handler;
+
mutex_lock(&cam->s_mutex);
cam->vdev = mcam_v4l_template;
cam->vdev.debug = 0;
cam->vdev.v4l2_dev = &cam->v4l2_dev;
+ video_set_drvdata(&cam->vdev, cam);
ret = video_register_device(&cam->vdev, VFL_TYPE_GRABBER, -1);
if (ret)
goto out;
- video_set_drvdata(&cam->vdev, cam);
/*
* If so requested, try to get our DMA buffers now.
@@ -1805,6 +1763,7 @@ int mccic_register(struct mcam_camera *cam)
}
out:
+ v4l2_ctrl_handler_free(&cam->ctrl_handler);
mutex_unlock(&cam->s_mutex);
return ret;
out_unregister:
@@ -1829,6 +1788,7 @@ void mccic_shutdown(struct mcam_camera *cam)
if (cam->buffer_mode == B_vmalloc)
mcam_free_dma_bufs(cam);
video_unregister_device(&cam->vdev);
+ v4l2_ctrl_handler_free(&cam->ctrl_handler);
v4l2_device_unregister(&cam->v4l2_dev);
}
diff --git a/drivers/media/platform/marvell-ccic/mcam-core.h b/drivers/media/platform/marvell-ccic/mcam-core.h
index bd6acba9fb37..01dec9e5fc2b 100644
--- a/drivers/media/platform/marvell-ccic/mcam-core.h
+++ b/drivers/media/platform/marvell-ccic/mcam-core.h
@@ -8,6 +8,7 @@
#include <linux/list.h>
#include <media/v4l2-common.h>
+#include <media/v4l2-ctrls.h>
#include <media/v4l2-dev.h>
#include <media/videobuf2-core.h>
@@ -15,15 +16,15 @@
* Create our own symbols for the supported buffer modes, but, for now,
* base them entirely on which videobuf2 options have been selected.
*/
-#if defined(CONFIG_VIDEOBUF2_VMALLOC) || defined(CONFIG_VIDEOBUF2_VMALLOC_MODULE)
+#if IS_ENABLED(CONFIG_VIDEOBUF2_VMALLOC)
#define MCAM_MODE_VMALLOC 1
#endif
-#if defined(CONFIG_VIDEOBUF2_DMA_CONTIG) || defined(CONFIG_VIDEOBUF2_DMA_CONTIG_MODULE)
+#if IS_ENABLED(CONFIG_VIDEOBUF2_DMA_CONTIG)
#define MCAM_MODE_DMA_CONTIG 1
#endif
-#if defined(CONFIG_VIDEOBUF2_DMA_SG) || defined(CONFIG_VIDEOBUF2_DMA_SG_MODULE)
+#if IS_ENABLED(CONFIG_VIDEOBUF2_DMA_SG)
#define MCAM_MODE_DMA_SG 1
#endif
@@ -73,6 +74,14 @@ static inline int mcam_buffer_mode_supported(enum mcam_buffer_mode mode)
}
}
+/*
+ * Basic frame states
+ */
+struct mcam_frame_state {
+ unsigned int frames;
+ unsigned int singles;
+ unsigned int delivered;
+};
/*
* A description of one of our devices.
@@ -104,10 +113,12 @@ struct mcam_camera {
* should not be touched by the platform code.
*/
struct v4l2_device v4l2_dev;
+ struct v4l2_ctrl_handler ctrl_handler;
enum mcam_state state;
unsigned long flags; /* Buffer status, mainly (dev_lock) */
int users; /* How many open FDs */
+ struct mcam_frame_state frame_state; /* Frame state counter */
/*
* Subsystem structures.
*/
diff --git a/drivers/media/platform/omap/Kconfig b/drivers/media/platform/omap/Kconfig
index 390ab094f9f2..37ad446b35b3 100644
--- a/drivers/media/platform/omap/Kconfig
+++ b/drivers/media/platform/omap/Kconfig
@@ -6,7 +6,7 @@ config VIDEO_OMAP2_VOUT
depends on ARCH_OMAP2 || ARCH_OMAP3
select VIDEOBUF_GEN
select VIDEOBUF_DMA_CONTIG
- select OMAP2_DSS
+ select OMAP2_DSS if HAS_IOMEM && ARCH_OMAP2PLUS
select OMAP2_VRFB if ARCH_OMAP2 || ARCH_OMAP3
select VIDEO_OMAP2_VOUT_VRFB if VIDEO_OMAP2_VOUT && OMAP2_VRFB
default n
diff --git a/drivers/media/platform/omap/omap_vout.c b/drivers/media/platform/omap/omap_vout.c
index 35cc526e6c93..96c4a17e4280 100644
--- a/drivers/media/platform/omap/omap_vout.c
+++ b/drivers/media/platform/omap/omap_vout.c
@@ -205,19 +205,21 @@ static u32 omap_vout_uservirt_to_phys(u32 virtp)
struct vm_area_struct *vma;
struct mm_struct *mm = current->mm;
- vma = find_vma(mm, virtp);
/* For kernel direct-mapped memory, take the easy way */
- if (virtp >= PAGE_OFFSET) {
- physp = virt_to_phys((void *) virtp);
- } else if (vma && (vma->vm_flags & VM_IO) && vma->vm_pgoff) {
+ if (virtp >= PAGE_OFFSET)
+ return virt_to_phys((void *) virtp);
+
+ down_read(&current->mm->mmap_sem);
+ vma = find_vma(mm, virtp);
+ if (vma && (vma->vm_flags & VM_IO) && vma->vm_pgoff) {
/* this will catch, kernel-allocated, mmaped-to-usermode
addresses */
physp = (vma->vm_pgoff << PAGE_SHIFT) + (virtp - vma->vm_start);
+ up_read(&current->mm->mmap_sem);
} else {
/* otherwise, use get_user_pages() for general userland pages */
int res, nr_pages = 1;
struct page *pages;
- down_read(&current->mm->mmap_sem);
res = get_user_pages(current, current->mm, virtp, nr_pages, 1,
0, &pages, NULL);
@@ -595,7 +597,7 @@ static void omap_vout_isr(void *arg, unsigned int irqstatus)
return;
spin_lock(&vout->vbq_lock);
- do_gettimeofday(&timevalue);
+ v4l2_get_timestamp(&timevalue);
switch (cur_display->type) {
case OMAP_DISPLAY_TYPE_DSI:
@@ -1230,21 +1232,6 @@ static int vidioc_s_fmt_vid_overlay(struct file *file, void *fh,
return ret;
}
-static int vidioc_enum_fmt_vid_overlay(struct file *file, void *fh,
- struct v4l2_fmtdesc *fmt)
-{
- int index = fmt->index;
-
- if (index >= NUM_OUTPUT_FORMATS)
- return -EINVAL;
-
- fmt->flags = omap_formats[index].flags;
- strlcpy(fmt->description, omap_formats[index].description,
- sizeof(fmt->description));
- fmt->pixelformat = omap_formats[index].pixelformat;
- return 0;
-}
-
static int vidioc_g_fmt_vid_overlay(struct file *file, void *fh,
struct v4l2_format *f)
{
@@ -1858,10 +1845,9 @@ static const struct v4l2_ioctl_ops vout_ioctl_ops = {
.vidioc_s_fbuf = vidioc_s_fbuf,
.vidioc_g_fbuf = vidioc_g_fbuf,
.vidioc_s_ctrl = vidioc_s_ctrl,
- .vidioc_try_fmt_vid_overlay = vidioc_try_fmt_vid_overlay,
- .vidioc_s_fmt_vid_overlay = vidioc_s_fmt_vid_overlay,
- .vidioc_enum_fmt_vid_overlay = vidioc_enum_fmt_vid_overlay,
- .vidioc_g_fmt_vid_overlay = vidioc_g_fmt_vid_overlay,
+ .vidioc_try_fmt_vid_out_overlay = vidioc_try_fmt_vid_overlay,
+ .vidioc_s_fmt_vid_out_overlay = vidioc_s_fmt_vid_overlay,
+ .vidioc_g_fmt_vid_out_overlay = vidioc_g_fmt_vid_overlay,
.vidioc_cropcap = vidioc_cropcap,
.vidioc_g_crop = vidioc_g_crop,
.vidioc_s_crop = vidioc_s_crop,
diff --git a/drivers/media/platform/omap24xxcam.c b/drivers/media/platform/omap24xxcam.c
index 8b7ccea982e7..debb44ceb185 100644
--- a/drivers/media/platform/omap24xxcam.c
+++ b/drivers/media/platform/omap24xxcam.c
@@ -402,7 +402,7 @@ static void omap24xxcam_vbq_complete(struct omap24xxcam_sgdma *sgdma,
omap24xxcam_core_disable(cam);
spin_unlock_irqrestore(&cam->core_enable_disable_lock, flags);
- do_gettimeofday(&vb->ts);
+ v4l2_get_timestamp(&vb->ts);
vb->field_count = atomic_add_return(2, &fh->field_count);
if (csr & csr_error) {
vb->state = VIDEOBUF_ERROR;
diff --git a/drivers/media/platform/omap3isp/isp.c b/drivers/media/platform/omap3isp/isp.c
index e4aaee91201d..6e5ad8ec0a22 100644
--- a/drivers/media/platform/omap3isp/isp.c
+++ b/drivers/media/platform/omap3isp/isp.c
@@ -1338,28 +1338,15 @@ static int isp_enable_clocks(struct isp_device *isp)
{
int r;
unsigned long rate;
- int divisor;
-
- /*
- * cam_mclk clock chain:
- * dpll4 -> dpll4_m5 -> dpll4_m5x2 -> cam_mclk
- *
- * In OMAP3630 dpll4_m5x2 != 2 x dpll4_m5 but both are
- * set to the same value. Hence the rate set for dpll4_m5
- * has to be twice of what is set on OMAP3430 to get
- * the required value for cam_mclk
- */
- divisor = isp->revision == ISP_REVISION_15_0 ? 1 : 2;
r = clk_prepare_enable(isp->clock[ISP_CLK_CAM_ICK]);
if (r) {
dev_err(isp->dev, "failed to enable cam_ick clock\n");
goto out_clk_enable_ick;
}
- r = clk_set_rate(isp->clock[ISP_CLK_DPLL4_M5_CK],
- CM_CAM_MCLK_HZ/divisor);
+ r = clk_set_rate(isp->clock[ISP_CLK_CAM_MCLK], CM_CAM_MCLK_HZ);
if (r) {
- dev_err(isp->dev, "clk_set_rate for dpll4_m5_ck failed\n");
+ dev_err(isp->dev, "clk_set_rate for cam_mclk failed\n");
goto out_clk_enable_mclk;
}
r = clk_prepare_enable(isp->clock[ISP_CLK_CAM_MCLK]);
@@ -1401,33 +1388,19 @@ static void isp_disable_clocks(struct isp_device *isp)
static const char *isp_clocks[] = {
"cam_ick",
"cam_mclk",
- "dpll4_m5_ck",
"csi2_96m_fck",
"l3_ick",
};
-static void isp_put_clocks(struct isp_device *isp)
-{
- unsigned int i;
-
- for (i = 0; i < ARRAY_SIZE(isp_clocks); ++i) {
- if (isp->clock[i]) {
- clk_put(isp->clock[i]);
- isp->clock[i] = NULL;
- }
- }
-}
-
static int isp_get_clocks(struct isp_device *isp)
{
struct clk *clk;
unsigned int i;
for (i = 0; i < ARRAY_SIZE(isp_clocks); ++i) {
- clk = clk_get(isp->dev, isp_clocks[i]);
+ clk = devm_clk_get(isp->dev, isp_clocks[i]);
if (IS_ERR(clk)) {
dev_err(isp->dev, "clk_get %s failed\n", isp_clocks[i]);
- isp_put_clocks(isp);
return PTR_ERR(clk);
}
@@ -1993,7 +1966,6 @@ error_csiphy:
static int isp_remove(struct platform_device *pdev)
{
struct isp_device *isp = platform_get_drvdata(pdev);
- int i;
isp_unregister_entities(isp);
isp_cleanup_modules(isp);
@@ -2004,26 +1976,6 @@ static int isp_remove(struct platform_device *pdev)
isp->domain = NULL;
omap3isp_put(isp);
- free_irq(isp->irq_num, isp);
- isp_put_clocks(isp);
-
- for (i = 0; i < OMAP3_ISP_IOMEM_LAST; i++) {
- if (isp->mmio_base[i]) {
- iounmap(isp->mmio_base[i]);
- isp->mmio_base[i] = NULL;
- }
-
- if (isp->mmio_base_phys[i]) {
- release_mem_region(isp->mmio_base_phys[i],
- isp->mmio_size[i]);
- isp->mmio_base_phys[i] = 0;
- }
- }
-
- regulator_put(isp->isp_csiphy1.vdd);
- regulator_put(isp->isp_csiphy2.vdd);
- kfree(isp);
-
return 0;
}
@@ -2041,7 +1993,8 @@ static int isp_map_mem_resource(struct platform_device *pdev,
return -ENODEV;
}
- if (!request_mem_region(mem->start, resource_size(mem), pdev->name)) {
+ if (!devm_request_mem_region(isp->dev, mem->start, resource_size(mem),
+ pdev->name)) {
dev_err(isp->dev,
"cannot reserve camera register I/O region\n");
return -ENODEV;
@@ -2050,8 +2003,9 @@ static int isp_map_mem_resource(struct platform_device *pdev,
isp->mmio_size[res] = resource_size(mem);
/* map the region */
- isp->mmio_base[res] = ioremap_nocache(isp->mmio_base_phys[res],
- isp->mmio_size[res]);
+ isp->mmio_base[res] = devm_ioremap_nocache(isp->dev,
+ isp->mmio_base_phys[res],
+ isp->mmio_size[res]);
if (!isp->mmio_base[res]) {
dev_err(isp->dev, "cannot map camera register I/O region\n");
return -ENODEV;
@@ -2081,7 +2035,7 @@ static int isp_probe(struct platform_device *pdev)
if (pdata == NULL)
return -EINVAL;
- isp = kzalloc(sizeof(*isp), GFP_KERNEL);
+ isp = devm_kzalloc(&pdev->dev, sizeof(*isp), GFP_KERNEL);
if (!isp) {
dev_err(&pdev->dev, "could not allocate memory\n");
return -ENOMEM;
@@ -2104,8 +2058,8 @@ static int isp_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, isp);
/* Regulators */
- isp->isp_csiphy1.vdd = regulator_get(&pdev->dev, "VDD_CSIPHY1");
- isp->isp_csiphy2.vdd = regulator_get(&pdev->dev, "VDD_CSIPHY2");
+ isp->isp_csiphy1.vdd = devm_regulator_get(&pdev->dev, "VDD_CSIPHY1");
+ isp->isp_csiphy2.vdd = devm_regulator_get(&pdev->dev, "VDD_CSIPHY2");
/* Clocks
*
@@ -2180,7 +2134,8 @@ static int isp_probe(struct platform_device *pdev)
goto detach_dev;
}
- if (request_irq(isp->irq_num, isp_isr, IRQF_SHARED, "OMAP3 ISP", isp)) {
+ if (devm_request_irq(isp->dev, isp->irq_num, isp_isr, IRQF_SHARED,
+ "OMAP3 ISP", isp)) {
dev_err(isp->dev, "Unable to request IRQ\n");
ret = -EINVAL;
goto detach_dev;
@@ -2189,7 +2144,7 @@ static int isp_probe(struct platform_device *pdev)
/* Entities */
ret = isp_initialize_modules(isp);
if (ret < 0)
- goto error_irq;
+ goto detach_dev;
ret = isp_register_entities(isp);
if (ret < 0)
@@ -2202,8 +2157,6 @@ static int isp_probe(struct platform_device *pdev)
error_modules:
isp_cleanup_modules(isp);
-error_irq:
- free_irq(isp->irq_num, isp);
detach_dev:
iommu_detach_device(isp->domain, &pdev->dev);
free_domain:
@@ -2211,26 +2164,9 @@ free_domain:
error_isp:
omap3isp_put(isp);
error:
- isp_put_clocks(isp);
-
- for (i = 0; i < OMAP3_ISP_IOMEM_LAST; i++) {
- if (isp->mmio_base[i]) {
- iounmap(isp->mmio_base[i]);
- isp->mmio_base[i] = NULL;
- }
-
- if (isp->mmio_base_phys[i]) {
- release_mem_region(isp->mmio_base_phys[i],
- isp->mmio_size[i]);
- isp->mmio_base_phys[i] = 0;
- }
- }
- regulator_put(isp->isp_csiphy2.vdd);
- regulator_put(isp->isp_csiphy1.vdd);
platform_set_drvdata(pdev, NULL);
mutex_destroy(&isp->isp_mutex);
- kfree(isp);
return ret;
}
diff --git a/drivers/media/platform/omap3isp/isp.h b/drivers/media/platform/omap3isp/isp.h
index 517d348ce32b..c77e1f2ae5ca 100644
--- a/drivers/media/platform/omap3isp/isp.h
+++ b/drivers/media/platform/omap3isp/isp.h
@@ -147,7 +147,6 @@ struct isp_platform_callback {
* @ref_count: Reference count for handling multiple ISP requests.
* @cam_ick: Pointer to camera interface clock structure.
* @cam_mclk: Pointer to camera functional clock structure.
- * @dpll4_m5_ck: Pointer to DPLL4 M5 clock structure.
* @csi2_fck: Pointer to camera CSI2 complexIO clock structure.
* @l3_ick: Pointer to OMAP3 L3 bus interface clock.
* @irq: Currently attached ISP ISR callbacks information structure.
@@ -189,10 +188,9 @@ struct isp_device {
u32 xclk_divisor[2]; /* Two clocks, a and b. */
#define ISP_CLK_CAM_ICK 0
#define ISP_CLK_CAM_MCLK 1
-#define ISP_CLK_DPLL4_M5_CK 2
-#define ISP_CLK_CSI2_FCK 3
-#define ISP_CLK_L3_ICK 4
- struct clk *clock[5];
+#define ISP_CLK_CSI2_FCK 2
+#define ISP_CLK_L3_ICK 3
+ struct clk *clock[4];
/* ISP modules */
struct ispstat isp_af;
diff --git a/drivers/media/platform/omap3isp/ispccp2.c b/drivers/media/platform/omap3isp/ispccp2.c
index 85f0de85f37c..c5d84c977e29 100644
--- a/drivers/media/platform/omap3isp/ispccp2.c
+++ b/drivers/media/platform/omap3isp/ispccp2.c
@@ -1136,7 +1136,7 @@ int omap3isp_ccp2_init(struct isp_device *isp)
* TODO: Don't hardcode the usage of PHY1 (shared with CSI2c).
*/
if (isp->revision == ISP_REVISION_2_0) {
- ccp2->vdds_csib = regulator_get(isp->dev, "vdds_csib");
+ ccp2->vdds_csib = devm_regulator_get(isp->dev, "vdds_csib");
if (IS_ERR(ccp2->vdds_csib)) {
dev_dbg(isp->dev,
"Could not get regulator vdds_csib\n");
@@ -1147,10 +1147,8 @@ int omap3isp_ccp2_init(struct isp_device *isp)
}
ret = ccp2_init_entities(ccp2);
- if (ret < 0) {
- regulator_put(ccp2->vdds_csib);
+ if (ret < 0)
return ret;
- }
ccp2_reset(ccp2);
return 0;
@@ -1166,6 +1164,4 @@ void omap3isp_ccp2_cleanup(struct isp_device *isp)
omap3isp_video_cleanup(&ccp2->video_in);
media_entity_cleanup(&ccp2->subdev.entity);
-
- regulator_put(ccp2->vdds_csib);
}
diff --git a/drivers/media/platform/omap3isp/ispcsiphy.c b/drivers/media/platform/omap3isp/ispcsiphy.c
index 3d56b33f85e8..c09de32f986a 100644
--- a/drivers/media/platform/omap3isp/ispcsiphy.c
+++ b/drivers/media/platform/omap3isp/ispcsiphy.c
@@ -32,7 +32,8 @@
#include "ispreg.h"
#include "ispcsiphy.h"
-static void csiphy_routing_cfg_3630(struct isp_csiphy *phy, u32 iface,
+static void csiphy_routing_cfg_3630(struct isp_csiphy *phy,
+ enum isp_interface_type iface,
bool ccp2_strobe)
{
u32 reg = isp_reg_readl(
@@ -40,6 +41,8 @@ static void csiphy_routing_cfg_3630(struct isp_csiphy *phy, u32 iface,
u32 shift, mode;
switch (iface) {
+ default:
+ /* Should not happen in practice, but let's keep the compiler happy. */
case ISP_INTERFACE_CCP2B_PHY1:
reg &= ~OMAP3630_CONTROL_CAMERA_PHY_CTRL_CSI1_RX_SEL_PHY2;
shift = OMAP3630_CONTROL_CAMERA_PHY_CTRL_CAMMODE_PHY1_SHIFT;
@@ -59,9 +62,8 @@ static void csiphy_routing_cfg_3630(struct isp_csiphy *phy, u32 iface,
}
/* Select data/clock or data/strobe mode for CCP2 */
- switch (iface) {
- case ISP_INTERFACE_CCP2B_PHY1:
- case ISP_INTERFACE_CCP2B_PHY2:
+ if (iface == ISP_INTERFACE_CCP2B_PHY1 ||
+ iface == ISP_INTERFACE_CCP2B_PHY2) {
if (ccp2_strobe)
mode = OMAP3630_CONTROL_CAMERA_PHY_CTRL_CAMMODE_CCP2_DATA_STROBE;
else
@@ -110,7 +112,8 @@ static void csiphy_routing_cfg_3430(struct isp_csiphy *phy, u32 iface, bool on,
* and 3630, so they will not hold their contents in off-mode. This isn't an
* issue since the MPU power domain is forced on whilst the ISP is in use.
*/
-static void csiphy_routing_cfg(struct isp_csiphy *phy, u32 iface, bool on,
+static void csiphy_routing_cfg(struct isp_csiphy *phy,
+ enum isp_interface_type iface, bool on,
bool ccp2_strobe)
{
if (phy->isp->mmio_base[OMAP3_ISP_IOMEM_3630_CONTROL_CAMERA_PHY_CTRL]
diff --git a/drivers/media/platform/omap3isp/isph3a_aewb.c b/drivers/media/platform/omap3isp/isph3a_aewb.c
index 036e9961d027..75fd82b152ba 100644
--- a/drivers/media/platform/omap3isp/isph3a_aewb.c
+++ b/drivers/media/platform/omap3isp/isph3a_aewb.c
@@ -300,13 +300,11 @@ int omap3isp_h3a_aewb_init(struct isp_device *isp)
struct ispstat *aewb = &isp->isp_aewb;
struct omap3isp_h3a_aewb_config *aewb_cfg;
struct omap3isp_h3a_aewb_config *aewb_recover_cfg;
- int ret;
- aewb_cfg = kzalloc(sizeof(*aewb_cfg), GFP_KERNEL);
+ aewb_cfg = devm_kzalloc(isp->dev, sizeof(*aewb_cfg), GFP_KERNEL);
if (!aewb_cfg)
return -ENOMEM;
- memset(aewb, 0, sizeof(*aewb));
aewb->ops = &h3a_aewb_ops;
aewb->priv = aewb_cfg;
aewb->dma_ch = -1;
@@ -314,12 +312,12 @@ int omap3isp_h3a_aewb_init(struct isp_device *isp)
aewb->isp = isp;
/* Set recover state configuration */
- aewb_recover_cfg = kzalloc(sizeof(*aewb_recover_cfg), GFP_KERNEL);
+ aewb_recover_cfg = devm_kzalloc(isp->dev, sizeof(*aewb_recover_cfg),
+ GFP_KERNEL);
if (!aewb_recover_cfg) {
dev_err(aewb->isp->dev, "AEWB: cannot allocate memory for "
"recover configuration.\n");
- ret = -ENOMEM;
- goto err_recover_alloc;
+ return -ENOMEM;
}
aewb_recover_cfg->saturation_limit = OMAP3ISP_AEWB_MAX_SATURATION_LIM;
@@ -336,25 +334,13 @@ int omap3isp_h3a_aewb_init(struct isp_device *isp)
if (h3a_aewb_validate_params(aewb, aewb_recover_cfg)) {
dev_err(aewb->isp->dev, "AEWB: recover configuration is "
"invalid.\n");
- ret = -EINVAL;
- goto err_conf;
+ return -EINVAL;
}
aewb_recover_cfg->buf_size = h3a_aewb_get_buf_size(aewb_recover_cfg);
aewb->recover_priv = aewb_recover_cfg;
- ret = omap3isp_stat_init(aewb, "AEWB", &h3a_aewb_subdev_ops);
- if (ret)
- goto err_conf;
-
- return 0;
-
-err_conf:
- kfree(aewb_recover_cfg);
-err_recover_alloc:
- kfree(aewb_cfg);
-
- return ret;
+ return omap3isp_stat_init(aewb, "AEWB", &h3a_aewb_subdev_ops);
}
/*
@@ -362,7 +348,5 @@ err_recover_alloc:
*/
void omap3isp_h3a_aewb_cleanup(struct isp_device *isp)
{
- kfree(isp->isp_aewb.priv);
- kfree(isp->isp_aewb.recover_priv);
omap3isp_stat_cleanup(&isp->isp_aewb);
}
diff --git a/drivers/media/platform/omap3isp/isph3a_af.c b/drivers/media/platform/omap3isp/isph3a_af.c
index 42ccce318d5d..a0bf5af32438 100644
--- a/drivers/media/platform/omap3isp/isph3a_af.c
+++ b/drivers/media/platform/omap3isp/isph3a_af.c
@@ -363,13 +363,11 @@ int omap3isp_h3a_af_init(struct isp_device *isp)
struct ispstat *af = &isp->isp_af;
struct omap3isp_h3a_af_config *af_cfg;
struct omap3isp_h3a_af_config *af_recover_cfg;
- int ret;
- af_cfg = kzalloc(sizeof(*af_cfg), GFP_KERNEL);
+ af_cfg = devm_kzalloc(isp->dev, sizeof(*af_cfg), GFP_KERNEL);
if (af_cfg == NULL)
return -ENOMEM;
- memset(af, 0, sizeof(*af));
af->ops = &h3a_af_ops;
af->priv = af_cfg;
af->dma_ch = -1;
@@ -377,12 +375,12 @@ int omap3isp_h3a_af_init(struct isp_device *isp)
af->isp = isp;
/* Set recover state configuration */
- af_recover_cfg = kzalloc(sizeof(*af_recover_cfg), GFP_KERNEL);
+ af_recover_cfg = devm_kzalloc(isp->dev, sizeof(*af_recover_cfg),
+ GFP_KERNEL);
if (!af_recover_cfg) {
dev_err(af->isp->dev, "AF: cannot allocate memory for recover "
"configuration.\n");
- ret = -ENOMEM;
- goto err_recover_alloc;
+ return -ENOMEM;
}
af_recover_cfg->paxel.h_start = OMAP3ISP_AF_PAXEL_HZSTART_MIN;
@@ -394,30 +392,16 @@ int omap3isp_h3a_af_init(struct isp_device *isp)
if (h3a_af_validate_params(af, af_recover_cfg)) {
dev_err(af->isp->dev, "AF: recover configuration is "
"invalid.\n");
- ret = -EINVAL;
- goto err_conf;
+ return -EINVAL;
}
af_recover_cfg->buf_size = h3a_af_get_buf_size(af_recover_cfg);
af->recover_priv = af_recover_cfg;
- ret = omap3isp_stat_init(af, "AF", &h3a_af_subdev_ops);
- if (ret)
- goto err_conf;
-
- return 0;
-
-err_conf:
- kfree(af_recover_cfg);
-err_recover_alloc:
- kfree(af_cfg);
-
- return ret;
+ return omap3isp_stat_init(af, "AF", &h3a_af_subdev_ops);
}
void omap3isp_h3a_af_cleanup(struct isp_device *isp)
{
- kfree(isp->isp_af.priv);
- kfree(isp->isp_af.recover_priv);
omap3isp_stat_cleanup(&isp->isp_af);
}
diff --git a/drivers/media/platform/omap3isp/isphist.c b/drivers/media/platform/omap3isp/isphist.c
index 2d759c56f37c..e070c24048ef 100644
--- a/drivers/media/platform/omap3isp/isphist.c
+++ b/drivers/media/platform/omap3isp/isphist.c
@@ -114,14 +114,14 @@ static void hist_setup_regs(struct ispstat *hist, void *priv)
/* Regions size and position */
for (c = 0; c < OMAP3ISP_HIST_MAX_REGIONS; c++) {
if (c < conf->num_regions) {
- reg_hor[c] = conf->region[c].h_start <<
- ISPHIST_REG_START_SHIFT;
- reg_hor[c] = conf->region[c].h_end <<
- ISPHIST_REG_END_SHIFT;
- reg_ver[c] = conf->region[c].v_start <<
- ISPHIST_REG_START_SHIFT;
- reg_ver[c] = conf->region[c].v_end <<
- ISPHIST_REG_END_SHIFT;
+ reg_hor[c] = (conf->region[c].h_start <<
+ ISPHIST_REG_START_SHIFT)
+ | (conf->region[c].h_end <<
+ ISPHIST_REG_END_SHIFT);
+ reg_ver[c] = (conf->region[c].v_start <<
+ ISPHIST_REG_START_SHIFT)
+ | (conf->region[c].v_end <<
+ ISPHIST_REG_END_SHIFT);
} else {
reg_hor[c] = 0;
reg_ver[c] = 0;
@@ -477,11 +477,10 @@ int omap3isp_hist_init(struct isp_device *isp)
struct omap3isp_hist_config *hist_cfg;
int ret = -1;
- hist_cfg = kzalloc(sizeof(*hist_cfg), GFP_KERNEL);
+ hist_cfg = devm_kzalloc(isp->dev, sizeof(*hist_cfg), GFP_KERNEL);
if (hist_cfg == NULL)
return -ENOMEM;
- memset(hist, 0, sizeof(*hist));
hist->isp = isp;
if (HIST_CONFIG_DMA)
@@ -504,7 +503,6 @@ int omap3isp_hist_init(struct isp_device *isp)
ret = omap3isp_stat_init(hist, "histogram", &hist_subdev_ops);
if (ret) {
- kfree(hist_cfg);
if (HIST_USING_DMA(hist))
omap_free_dma(hist->dma_ch);
}
@@ -519,6 +517,5 @@ void omap3isp_hist_cleanup(struct isp_device *isp)
{
if (HIST_USING_DMA(&isp->isp_hist))
omap_free_dma(isp->isp_hist.dma_ch);
- kfree(isp->isp_hist.priv);
omap3isp_stat_cleanup(&isp->isp_hist);
}
diff --git a/drivers/media/platform/omap3isp/isppreview.c b/drivers/media/platform/omap3isp/isppreview.c
index 691b92a3c3e7..cd8831aebdeb 100644
--- a/drivers/media/platform/omap3isp/isppreview.c
+++ b/drivers/media/platform/omap3isp/isppreview.c
@@ -82,8 +82,9 @@ static struct omap3isp_prev_csc flr_prev_csc = {
* The preview engine crops several rows and columns internally depending on
* which filters are enabled. To avoid format changes when the filters are
* enabled or disabled (which would prevent them from being turned on or off
- * during streaming), the driver assumes all the filters are enabled when
- * computing sink crop and source format limits.
+ * during streaming), the driver assumes all filters that can be configured
+ * during streaming are enabled when computing sink crop and source format
+ * limits.
*
* If a filter is disabled, additional cropping is automatically added at the
* preview engine input by the driver to avoid overflow at line and frame end.
@@ -92,25 +93,23 @@ static struct omap3isp_prev_csc flr_prev_csc = {
* Median filter 4 pixels
* Noise filter,
* Faulty pixels correction 4 pixels, 4 lines
- * CFA filter 4 pixels, 4 lines in Bayer mode
- * 2 lines in other modes
* Color suppression 2 pixels
* or luma enhancement
* -------------------------------------------------------------
- * Maximum total 14 pixels, 8 lines
+ * Maximum total 10 pixels, 4 lines
*
* The color suppression and luma enhancement filters are applied after bayer to
* YUV conversion. They thus can crop one pixel on the left and one pixel on the
* right side of the image without changing the color pattern. When both those
* filters are disabled, the driver must crop the two pixels on the same side of
* the image to avoid changing the bayer pattern. The left margin is thus set to
- * 8 pixels and the right margin to 6 pixels.
+ * 6 pixels and the right margin to 4 pixels.
*/
-#define PREV_MARGIN_LEFT 8
-#define PREV_MARGIN_RIGHT 6
-#define PREV_MARGIN_TOP 4
-#define PREV_MARGIN_BOTTOM 4
+#define PREV_MARGIN_LEFT 6
+#define PREV_MARGIN_RIGHT 4
+#define PREV_MARGIN_TOP 2
+#define PREV_MARGIN_BOTTOM 2
#define PREV_MIN_IN_WIDTH 64
#define PREV_MIN_IN_HEIGHT 8
@@ -1080,7 +1079,6 @@ static void preview_config_input_format(struct isp_prev_device *prev,
*/
static void preview_config_input_size(struct isp_prev_device *prev, u32 active)
{
- const struct v4l2_mbus_framefmt *format = &prev->formats[PREV_PAD_SINK];
struct isp_device *isp = to_isp_device(prev);
unsigned int sph = prev->crop.left;
unsigned int eph = prev->crop.left + prev->crop.width - 1;
@@ -1088,14 +1086,6 @@ static void preview_config_input_size(struct isp_prev_device *prev, u32 active)
unsigned int elv = prev->crop.top + prev->crop.height - 1;
u32 features;
- if (format->code != V4L2_MBUS_FMT_Y8_1X8 &&
- format->code != V4L2_MBUS_FMT_Y10_1X10) {
- sph -= 2;
- eph += 2;
- slv -= 2;
- elv += 2;
- }
-
features = (prev->params.params[0].features & active)
| (prev->params.params[1].features & ~active);
@@ -1849,6 +1839,18 @@ static void preview_try_crop(struct isp_prev_device *prev,
right -= 2;
}
+ /* The CFA filter crops 4 lines and 4 columns in Bayer mode, and 2 lines
+ * and no columns in other modes. Increase the margins based on the sink
+ * format.
+ */
+ if (sink->code != V4L2_MBUS_FMT_Y8_1X8 &&
+ sink->code != V4L2_MBUS_FMT_Y10_1X10) {
+ left += 2;
+ right -= 2;
+ top += 2;
+ bottom -= 2;
+ }
+
/* Restrict left/top to even values to keep the Bayer pattern. */
crop->left &= ~1;
crop->top &= ~1;
diff --git a/drivers/media/platform/omap3isp/ispqueue.c b/drivers/media/platform/omap3isp/ispqueue.c
index 15bf3eab2224..e15f01342058 100644
--- a/drivers/media/platform/omap3isp/ispqueue.c
+++ b/drivers/media/platform/omap3isp/ispqueue.c
@@ -366,7 +366,7 @@ static int isp_video_buffer_prepare_pfnmap(struct isp_video_buffer *buf)
unsigned long this_pfn;
unsigned long start;
unsigned long end;
- dma_addr_t pa;
+ dma_addr_t pa = 0;
int ret = -EFAULT;
start = buf->vbuf.m.userptr;
@@ -419,7 +419,7 @@ done:
static int isp_video_buffer_prepare_vm_flags(struct isp_video_buffer *buf)
{
struct vm_area_struct *vma;
- pgprot_t vm_page_prot;
+ pgprot_t uninitialized_var(vm_page_prot);
unsigned long start;
unsigned long end;
int ret = -EFAULT;
@@ -674,6 +674,7 @@ static int isp_video_queue_alloc(struct isp_video_queue *queue,
buf->vbuf.index = i;
buf->vbuf.length = size;
buf->vbuf.type = queue->type;
+ buf->vbuf.flags = V4L2_BUF_FLAG_TIMESTAMP_MONOTONIC;
buf->vbuf.field = V4L2_FIELD_NONE;
buf->vbuf.memory = memory;
diff --git a/drivers/media/platform/s3c-camif/camif-core.c b/drivers/media/platform/s3c-camif/camif-core.c
index 09a8c9cac5c9..0d0fab1a7b5e 100644
--- a/drivers/media/platform/s3c-camif/camif-core.c
+++ b/drivers/media/platform/s3c-camif/camif-core.c
@@ -27,6 +27,7 @@
#include <linux/pm_runtime.h>
#include <linux/slab.h>
#include <linux/types.h>
+#include <linux/version.h>
#include <media/media-device.h>
#include <media/v4l2-ctrls.h>
diff --git a/drivers/media/platform/s5p-fimc/fimc-capture.c b/drivers/media/platform/s5p-fimc/fimc-capture.c
index fdb6740248a7..f553cc2a8ee8 100644
--- a/drivers/media/platform/s5p-fimc/fimc-capture.c
+++ b/drivers/media/platform/s5p-fimc/fimc-capture.c
@@ -486,6 +486,7 @@ static struct vb2_ops fimc_capture_qops = {
int fimc_capture_ctrls_create(struct fimc_dev *fimc)
{
struct fimc_vid_cap *vid_cap = &fimc->vid_cap;
+ struct v4l2_subdev *sensor = fimc->pipeline.subdevs[IDX_SENSOR];
int ret;
if (WARN_ON(vid_cap->ctx == NULL))
@@ -494,11 +495,13 @@ int fimc_capture_ctrls_create(struct fimc_dev *fimc)
return 0;
ret = fimc_ctrls_create(vid_cap->ctx);
- if (ret || vid_cap->user_subdev_api || !vid_cap->ctx->ctrls.ready)
+
+ if (ret || vid_cap->user_subdev_api || !sensor ||
+ !vid_cap->ctx->ctrls.ready)
return ret;
return v4l2_ctrl_add_handler(&vid_cap->ctx->ctrls.handler,
- fimc->pipeline.subdevs[IDX_SENSOR]->ctrl_handler, NULL);
+ sensor->ctrl_handler, NULL);
}
static int fimc_capture_set_default_format(struct fimc_dev *fimc);
@@ -510,8 +513,8 @@ static int fimc_capture_open(struct file *file)
dbg("pid: %d, state: 0x%lx", task_pid_nr(current), fimc->state);
- if (mutex_lock_interruptible(&fimc->lock))
- return -ERESTARTSYS;
+ fimc_md_graph_lock(fimc);
+ mutex_lock(&fimc->lock);
if (fimc_m2m_active(fimc))
goto unlock;
@@ -546,6 +549,7 @@ static int fimc_capture_open(struct file *file)
}
unlock:
mutex_unlock(&fimc->lock);
+ fimc_md_graph_unlock(fimc);
return ret;
}
@@ -626,8 +630,8 @@ static struct fimc_fmt *fimc_capture_try_format(struct fimc_ctx *ctx,
{
bool rotation = ctx->rotation == 90 || ctx->rotation == 270;
struct fimc_dev *fimc = ctx->fimc_dev;
- struct fimc_variant *var = fimc->variant;
- struct fimc_pix_limit *pl = var->pix_limit;
+ const struct fimc_variant *var = fimc->variant;
+ const struct fimc_pix_limit *pl = var->pix_limit;
struct fimc_frame *dst = &ctx->d_frame;
u32 depth, min_w, max_w, min_h, align_h = 3;
u32 mask = FMT_FLAGS_CAM;
@@ -699,8 +703,8 @@ static void fimc_capture_try_selection(struct fimc_ctx *ctx,
{
bool rotate = ctx->rotation == 90 || ctx->rotation == 270;
struct fimc_dev *fimc = ctx->fimc_dev;
- struct fimc_variant *var = fimc->variant;
- struct fimc_pix_limit *pl = var->pix_limit;
+ const struct fimc_variant *var = fimc->variant;
+ const struct fimc_pix_limit *pl = var->pix_limit;
struct fimc_frame *sink = &ctx->s_frame;
u32 max_w, max_h, min_w = 0, min_h = 0, min_sz;
u32 align_sz = 0, align_h = 4;
@@ -793,6 +797,21 @@ static int fimc_cap_enum_fmt_mplane(struct file *file, void *priv,
return 0;
}
+static struct media_entity *fimc_pipeline_get_head(struct media_entity *me)
+{
+ struct media_pad *pad = &me->pads[0];
+
+ while (!(pad->flags & MEDIA_PAD_FL_SOURCE)) {
+ pad = media_entity_remote_source(pad);
+ if (!pad)
+ break;
+ me = pad->entity;
+ pad = &me->pads[0];
+ }
+
+ return me;
+}
+
/**
* fimc_pipeline_try_format - negotiate and/or set formats at pipeline
* elements
@@ -808,19 +827,23 @@ static int fimc_pipeline_try_format(struct fimc_ctx *ctx,
{
struct fimc_dev *fimc = ctx->fimc_dev;
struct v4l2_subdev *sd = fimc->pipeline.subdevs[IDX_SENSOR];
- struct v4l2_subdev *csis = fimc->pipeline.subdevs[IDX_CSIS];
struct v4l2_subdev_format sfmt;
struct v4l2_mbus_framefmt *mf = &sfmt.format;
- struct fimc_fmt *ffmt = NULL;
- int ret, i = 0;
+ struct media_entity *me;
+ struct fimc_fmt *ffmt;
+ struct media_pad *pad;
+ int ret, i = 1;
+ u32 fcc;
if (WARN_ON(!sd || !tfmt))
return -EINVAL;
memset(&sfmt, 0, sizeof(sfmt));
sfmt.format = *tfmt;
-
sfmt.which = set ? V4L2_SUBDEV_FORMAT_ACTIVE : V4L2_SUBDEV_FORMAT_TRY;
+
+ me = fimc_pipeline_get_head(&sd->entity);
+
while (1) {
ffmt = fimc_find_format(NULL, mf->code != 0 ? &mf->code : NULL,
FMT_FLAGS_CAM, i++);
@@ -833,40 +856,52 @@ static int fimc_pipeline_try_format(struct fimc_ctx *ctx,
}
mf->code = tfmt->code = ffmt->mbus_code;
- ret = v4l2_subdev_call(sd, pad, set_fmt, NULL, &sfmt);
- if (ret)
- return ret;
- if (mf->code != tfmt->code) {
- mf->code = 0;
- continue;
+ /* set format on all pipeline subdevs */
+ while (me != &fimc->vid_cap.subdev.entity) {
+ sd = media_entity_to_v4l2_subdev(me);
+
+ sfmt.pad = 0;
+ ret = v4l2_subdev_call(sd, pad, set_fmt, NULL, &sfmt);
+ if (ret)
+ return ret;
+
+ if (me->pads[0].flags & MEDIA_PAD_FL_SINK) {
+ sfmt.pad = me->num_pads - 1;
+ mf->code = tfmt->code;
+ ret = v4l2_subdev_call(sd, pad, set_fmt, NULL,
+ &sfmt);
+ if (ret)
+ return ret;
+ }
+
+ pad = media_entity_remote_source(&me->pads[sfmt.pad]);
+ if (!pad)
+ return -EINVAL;
+ me = pad->entity;
}
- if (mf->width != tfmt->width || mf->height != tfmt->height) {
- u32 fcc = ffmt->fourcc;
- tfmt->width = mf->width;
- tfmt->height = mf->height;
- ffmt = fimc_capture_try_format(ctx,
- &tfmt->width, &tfmt->height,
- NULL, &fcc, FIMC_SD_PAD_SOURCE);
- if (ffmt && ffmt->mbus_code)
- mf->code = ffmt->mbus_code;
- if (mf->width != tfmt->width ||
- mf->height != tfmt->height)
- continue;
- tfmt->code = mf->code;
- }
- if (csis)
- ret = v4l2_subdev_call(csis, pad, set_fmt, NULL, &sfmt);
- if (mf->code == tfmt->code &&
- mf->width == tfmt->width && mf->height == tfmt->height)
- break;
+ if (mf->code != tfmt->code)
+ continue;
+
+ fcc = ffmt->fourcc;
+ tfmt->width = mf->width;
+ tfmt->height = mf->height;
+ ffmt = fimc_capture_try_format(ctx, &tfmt->width, &tfmt->height,
+ NULL, &fcc, FIMC_SD_PAD_SINK);
+ ffmt = fimc_capture_try_format(ctx, &tfmt->width, &tfmt->height,
+ NULL, &fcc, FIMC_SD_PAD_SOURCE);
+ if (ffmt && ffmt->mbus_code)
+ mf->code = ffmt->mbus_code;
+ if (mf->width != tfmt->width || mf->height != tfmt->height)
+ continue;
+ tfmt->code = mf->code;
+ break;
}
if (fmt_id && ffmt)
*fmt_id = ffmt;
*tfmt = *mf;
- dbg("code: 0x%x, %dx%d, %p", mf->code, mf->width, mf->height, ffmt);
return 0;
}
@@ -884,14 +919,16 @@ static int fimc_get_sensor_frame_desc(struct v4l2_subdev *sensor,
{
struct v4l2_mbus_frame_desc fd;
int i, ret;
+ int pad;
for (i = 0; i < num_planes; i++)
fd.entry[i].length = plane_fmt[i].sizeimage;
+ pad = sensor->entity.num_pads - 1;
if (try)
- ret = v4l2_subdev_call(sensor, pad, set_frame_desc, 0, &fd);
+ ret = v4l2_subdev_call(sensor, pad, set_frame_desc, pad, &fd);
else
- ret = v4l2_subdev_call(sensor, pad, get_frame_desc, 0, &fd);
+ ret = v4l2_subdev_call(sensor, pad, get_frame_desc, pad, &fd);
if (ret < 0)
return ret;
@@ -916,9 +953,9 @@ static int fimc_cap_g_fmt_mplane(struct file *file, void *fh,
struct v4l2_format *f)
{
struct fimc_dev *fimc = video_drvdata(file);
- struct fimc_ctx *ctx = fimc->vid_cap.ctx;
- return fimc_fill_format(&ctx->d_frame, f);
+ __fimc_get_format(&fimc->vid_cap.ctx->d_frame, f);
+ return 0;
}
static int fimc_cap_try_fmt_mplane(struct file *file, void *fh,
@@ -929,6 +966,10 @@ static int fimc_cap_try_fmt_mplane(struct file *file, void *fh,
struct fimc_ctx *ctx = fimc->vid_cap.ctx;
struct v4l2_mbus_framefmt mf;
struct fimc_fmt *ffmt = NULL;
+ int ret = 0;
+
+ fimc_md_graph_lock(fimc);
+ mutex_lock(&fimc->lock);
if (fimc_jpeg_fourcc(pix->pixelformat)) {
fimc_capture_try_format(ctx, &pix->width, &pix->height,
@@ -940,16 +981,16 @@ static int fimc_cap_try_fmt_mplane(struct file *file, void *fh,
ffmt = fimc_capture_try_format(ctx, &pix->width, &pix->height,
NULL, &pix->pixelformat,
FIMC_SD_PAD_SOURCE);
- if (!ffmt)
- return -EINVAL;
+ if (!ffmt) {
+ ret = -EINVAL;
+ goto unlock;
+ }
if (!fimc->vid_cap.user_subdev_api) {
mf.width = pix->width;
mf.height = pix->height;
mf.code = ffmt->mbus_code;
- fimc_md_graph_lock(fimc);
fimc_pipeline_try_format(ctx, &mf, &ffmt, false);
- fimc_md_graph_unlock(fimc);
pix->width = mf.width;
pix->height = mf.height;
if (ffmt)
@@ -961,8 +1002,11 @@ static int fimc_cap_try_fmt_mplane(struct file *file, void *fh,
if (ffmt->flags & FMT_FLAGS_COMPRESSED)
fimc_get_sensor_frame_desc(fimc->pipeline.subdevs[IDX_SENSOR],
pix->plane_fmt, ffmt->memplanes, true);
+unlock:
+ mutex_unlock(&fimc->lock);
+ fimc_md_graph_unlock(fimc);
- return 0;
+ return ret;
}
static void fimc_capture_mark_jpeg_xfer(struct fimc_ctx *ctx,
@@ -979,7 +1023,8 @@ static void fimc_capture_mark_jpeg_xfer(struct fimc_ctx *ctx,
clear_bit(ST_CAPT_JPEG, &ctx->fimc_dev->state);
}
-static int fimc_capture_set_format(struct fimc_dev *fimc, struct v4l2_format *f)
+static int __fimc_capture_set_format(struct fimc_dev *fimc,
+ struct v4l2_format *f)
{
struct fimc_ctx *ctx = fimc->vid_cap.ctx;
struct v4l2_pix_format_mplane *pix = &f->fmt.pix_mp;
@@ -1014,12 +1059,10 @@ static int fimc_capture_set_format(struct fimc_dev *fimc, struct v4l2_format *f)
mf->code = ff->fmt->mbus_code;
mf->width = pix->width;
mf->height = pix->height;
-
- fimc_md_graph_lock(fimc);
ret = fimc_pipeline_try_format(ctx, mf, &s_fmt, true);
- fimc_md_graph_unlock(fimc);
if (ret)
return ret;
+
pix->width = mf->width;
pix->height = mf->height;
}
@@ -1034,8 +1077,10 @@ static int fimc_capture_set_format(struct fimc_dev *fimc, struct v4l2_format *f)
return ret;
}
- for (i = 0; i < ff->fmt->memplanes; i++)
+ for (i = 0; i < ff->fmt->memplanes; i++) {
+ ff->bytesperline[i] = pix->plane_fmt[i].bytesperline;
ff->payload[i] = pix->plane_fmt[i].sizeimage;
+ }
set_frame_bounds(ff, pix->width, pix->height);
/* Reset the composition rectangle if not yet configured */
@@ -1058,8 +1103,23 @@ static int fimc_cap_s_fmt_mplane(struct file *file, void *priv,
struct v4l2_format *f)
{
struct fimc_dev *fimc = video_drvdata(file);
+ int ret;
+
+ fimc_md_graph_lock(fimc);
+ mutex_lock(&fimc->lock);
+ /*
+ * The graph is walked within __fimc_capture_set_format() to set
+ * the format at subdevs thus the graph mutex needs to be held at
+ * this point and acquired before the video mutex, to avoid AB-BA
+ * deadlock when fimc_md_link_notify() is called by other thread.
+ * Ideally the graph walking and setting format at the whole pipeline
+ * should be removed from this driver and handled in userspace only.
+ */
+ ret = __fimc_capture_set_format(fimc, f);
- return fimc_capture_set_format(fimc, f);
+ mutex_unlock(&fimc->lock);
+ fimc_md_graph_unlock(fimc);
+ return ret;
}
static int fimc_cap_enum_input(struct file *file, void *priv,
@@ -1528,6 +1588,10 @@ static int fimc_subdev_set_fmt(struct v4l2_subdev *sd,
*mf = fmt->format;
return 0;
}
+ /* There must be a bug in the driver if this happens */
+ if (WARN_ON(ffmt == NULL))
+ return -EINVAL;
+
/* Update RGB Alpha control state and value range */
fimc_alpha_ctrl_update(ctx);
@@ -1624,16 +1688,6 @@ static int fimc_subdev_set_selection(struct v4l2_subdev *sd,
fimc_capture_try_selection(ctx, r, V4L2_SEL_TGT_CROP);
switch (sel->target) {
- case V4L2_SEL_TGT_COMPOSE_BOUNDS:
- f = &ctx->d_frame;
- case V4L2_SEL_TGT_CROP_BOUNDS:
- r->width = f->o_width;
- r->height = f->o_height;
- r->left = 0;
- r->top = 0;
- mutex_unlock(&fimc->lock);
- return 0;
-
case V4L2_SEL_TGT_CROP:
try_sel = v4l2_subdev_get_try_crop(fh, sel->pad);
break;
@@ -1652,9 +1706,9 @@ static int fimc_subdev_set_selection(struct v4l2_subdev *sd,
spin_lock_irqsave(&fimc->slock, flags);
set_frame_crop(f, r->left, r->top, r->width, r->height);
set_bit(ST_CAPT_APPLY_CFG, &fimc->state);
- spin_unlock_irqrestore(&fimc->slock, flags);
if (sel->target == V4L2_SEL_TGT_COMPOSE)
ctx->state |= FIMC_COMPOSE;
+ spin_unlock_irqrestore(&fimc->slock, flags);
}
dbg("target %#x: (%d,%d)/%dx%d", sel->target, r->left, r->top,
@@ -1690,7 +1744,7 @@ static int fimc_capture_set_default_format(struct fimc_dev *fimc)
},
};
- return fimc_capture_set_format(fimc, &fmt);
+ return __fimc_capture_set_format(fimc, &fmt);
}
/* fimc->lock must be already initialized */
@@ -1752,6 +1806,12 @@ static int fimc_register_capture_device(struct fimc_dev *fimc,
ret = media_entity_init(&vfd->entity, 1, &vid_cap->vd_pad, 0);
if (ret)
goto err_ent;
+ /*
+ * For proper order of acquiring/releasing the video
+ * and the graph mutex.
+ */
+ v4l2_disable_ioctl_locking(vfd, VIDIOC_TRY_FMT);
+ v4l2_disable_ioctl_locking(vfd, VIDIOC_S_FMT);
ret = video_register_device(vfd, VFL_TYPE_GRABBER, -1);
if (ret)
diff --git a/drivers/media/platform/s5p-fimc/fimc-core.c b/drivers/media/platform/s5p-fimc/fimc-core.c
index acc0f84ffa56..e3916bde45cf 100644
--- a/drivers/media/platform/s5p-fimc/fimc-core.c
+++ b/drivers/media/platform/s5p-fimc/fimc-core.c
@@ -241,7 +241,7 @@ static int fimc_get_scaler_factor(u32 src, u32 tar, u32 *ratio, u32 *shift)
int fimc_set_scaler_info(struct fimc_ctx *ctx)
{
- struct fimc_variant *variant = ctx->fimc_dev->variant;
+ const struct fimc_variant *variant = ctx->fimc_dev->variant;
struct device *dev = &ctx->fimc_dev->pdev->dev;
struct fimc_scaler *sc = &ctx->scaler;
struct fimc_frame *s_frame = &ctx->s_frame;
@@ -257,14 +257,14 @@ int fimc_set_scaler_info(struct fimc_ctx *ctx)
ty = d_frame->height;
}
if (tx <= 0 || ty <= 0) {
- dev_err(dev, "Invalid target size: %dx%d", tx, ty);
+ dev_err(dev, "Invalid target size: %dx%d\n", tx, ty);
return -EINVAL;
}
sx = s_frame->width;
sy = s_frame->height;
if (sx <= 0 || sy <= 0) {
- dev_err(dev, "Invalid source size: %dx%d", sx, sy);
+ dev_err(dev, "Invalid source size: %dx%d\n", sx, sy);
return -EINVAL;
}
sc->real_width = sx;
@@ -440,7 +440,7 @@ void fimc_set_yuv_order(struct fimc_ctx *ctx)
void fimc_prepare_dma_offset(struct fimc_ctx *ctx, struct fimc_frame *f)
{
- struct fimc_variant *variant = ctx->fimc_dev->variant;
+ const struct fimc_variant *variant = ctx->fimc_dev->variant;
u32 i, depth = 0;
for (i = 0; i < f->fmt->colplanes; i++)
@@ -524,8 +524,7 @@ static int fimc_set_color_effect(struct fimc_ctx *ctx, enum v4l2_colorfx colorfx
static int __fimc_s_ctrl(struct fimc_ctx *ctx, struct v4l2_ctrl *ctrl)
{
struct fimc_dev *fimc = ctx->fimc_dev;
- struct fimc_variant *variant = fimc->variant;
- unsigned int flags = FIMC_DST_FMT | FIMC_SRC_FMT;
+ const struct fimc_variant *variant = fimc->variant;
int ret = 0;
if (ctrl->flags & V4L2_CTRL_FLAG_INACTIVE)
@@ -541,8 +540,7 @@ static int __fimc_s_ctrl(struct fimc_ctx *ctx, struct v4l2_ctrl *ctrl)
break;
case V4L2_CID_ROTATE:
- if (fimc_capture_pending(fimc) ||
- (ctx->state & flags) == flags) {
+ if (fimc_capture_pending(fimc)) {
ret = fimc_check_scaler_ratio(ctx, ctx->s_frame.width,
ctx->s_frame.height, ctx->d_frame.width,
ctx->d_frame.height, ctrl->val);
@@ -591,7 +589,7 @@ static const struct v4l2_ctrl_ops fimc_ctrl_ops = {
int fimc_ctrls_create(struct fimc_ctx *ctx)
{
- struct fimc_variant *variant = ctx->fimc_dev->variant;
+ const struct fimc_variant *variant = ctx->fimc_dev->variant;
unsigned int max_alpha = fimc_get_alpha_mask(ctx->d_frame.fmt);
struct fimc_ctrls *ctrls = &ctx->ctrls;
struct v4l2_ctrl_handler *handler = &ctrls->handler;
@@ -691,7 +689,7 @@ void fimc_alpha_ctrl_update(struct fimc_ctx *ctx)
v4l2_ctrl_unlock(ctrl);
}
-int fimc_fill_format(struct fimc_frame *frame, struct v4l2_format *f)
+void __fimc_get_format(struct fimc_frame *frame, struct v4l2_format *f)
{
struct v4l2_pix_format_mplane *pixm = &f->fmt.pix_mp;
int i;
@@ -704,35 +702,9 @@ int fimc_fill_format(struct fimc_frame *frame, struct v4l2_format *f)
pixm->num_planes = frame->fmt->memplanes;
for (i = 0; i < pixm->num_planes; ++i) {
- int bpl = frame->f_width;
- if (frame->fmt->colplanes == 1) /* packed formats */
- bpl = (bpl * frame->fmt->depth[0]) / 8;
- pixm->plane_fmt[i].bytesperline = bpl;
-
- if (frame->fmt->flags & FMT_FLAGS_COMPRESSED) {
- pixm->plane_fmt[i].sizeimage = frame->payload[i];
- continue;
- }
- pixm->plane_fmt[i].sizeimage = (frame->o_width *
- frame->o_height * frame->fmt->depth[i]) / 8;
+ pixm->plane_fmt[i].bytesperline = frame->bytesperline[i];
+ pixm->plane_fmt[i].sizeimage = frame->payload[i];
}
- return 0;
-}
-
-void fimc_fill_frame(struct fimc_frame *frame, struct v4l2_format *f)
-{
- struct v4l2_pix_format_mplane *pixm = &f->fmt.pix_mp;
-
- frame->f_width = pixm->plane_fmt[0].bytesperline;
- if (frame->fmt->colplanes == 1)
- frame->f_width = (frame->f_width * 8) / frame->fmt->depth[0];
- frame->f_height = pixm->height;
- frame->width = pixm->width;
- frame->height = pixm->height;
- frame->o_width = pixm->width;
- frame->o_height = pixm->height;
- frame->offs_h = 0;
- frame->offs_v = 0;
}
/**
@@ -765,9 +737,16 @@ void fimc_adjust_mplane_format(struct fimc_fmt *fmt, u32 width, u32 height,
if (fmt->colplanes == 1 && /* Packed */
(bpl == 0 || ((bpl * 8) / fmt->depth[i]) < pix->width))
bpl = (pix->width * fmt->depth[0]) / 8;
-
- if (i == 0) /* Same bytesperline for each plane. */
+ /*
+ * Currently bytesperline for each plane is same, except
+ * V4L2_PIX_FMT_YUV420M format. This calculation may need
+ * to be changed when other multi-planar formats are added
+ * to the fimc_formats[] array.
+ */
+ if (i == 0)
bytesperline = bpl;
+ else if (i == 1 && fmt->memplanes == 3)
+ bytesperline /= 2;
plane_fmt->bytesperline = bytesperline;
plane_fmt->sizeimage = max((pix->width * pix->height *
@@ -811,11 +790,11 @@ static void fimc_clk_put(struct fimc_dev *fimc)
{
int i;
for (i = 0; i < MAX_FIMC_CLOCKS; i++) {
- if (IS_ERR_OR_NULL(fimc->clock[i]))
+ if (IS_ERR(fimc->clock[i]))
continue;
clk_unprepare(fimc->clock[i]);
clk_put(fimc->clock[i]);
- fimc->clock[i] = NULL;
+ fimc->clock[i] = ERR_PTR(-EINVAL);
}
}
@@ -823,14 +802,19 @@ static int fimc_clk_get(struct fimc_dev *fimc)
{
int i, ret;
+ for (i = 0; i < MAX_FIMC_CLOCKS; i++)
+ fimc->clock[i] = ERR_PTR(-EINVAL);
+
for (i = 0; i < MAX_FIMC_CLOCKS; i++) {
fimc->clock[i] = clk_get(&fimc->pdev->dev, fimc_clocks[i]);
- if (IS_ERR(fimc->clock[i]))
+ if (IS_ERR(fimc->clock[i])) {
+ ret = PTR_ERR(fimc->clock[i]);
goto err;
+ }
ret = clk_prepare(fimc->clock[i]);
if (ret < 0) {
clk_put(fimc->clock[i]);
- fimc->clock[i] = NULL;
+ fimc->clock[i] = ERR_PTR(-EINVAL);
goto err;
}
}
@@ -881,7 +865,7 @@ static int fimc_m2m_resume(struct fimc_dev *fimc)
static int fimc_probe(struct platform_device *pdev)
{
- struct fimc_drvdata *drv_data = fimc_get_drvdata(pdev);
+ const struct fimc_drvdata *drv_data = fimc_get_drvdata(pdev);
struct s5p_platform_fimc *pdata;
struct fimc_dev *fimc;
struct resource *res;
@@ -922,8 +906,14 @@ static int fimc_probe(struct platform_device *pdev)
ret = fimc_clk_get(fimc);
if (ret)
return ret;
- clk_set_rate(fimc->clock[CLK_BUS], drv_data->lclk_frequency);
- clk_enable(fimc->clock[CLK_BUS]);
+
+ ret = clk_set_rate(fimc->clock[CLK_BUS], drv_data->lclk_frequency);
+ if (ret < 0)
+ return ret;
+
+ ret = clk_enable(fimc->clock[CLK_BUS]);
+ if (ret < 0)
+ return ret;
ret = devm_request_irq(&pdev->dev, res->start, fimc_irq_handler,
0, dev_name(&pdev->dev), fimc);
@@ -957,6 +947,7 @@ err_pm:
err_sd:
fimc_unregister_capture_subdev(fimc);
err_clk:
+ clk_disable(fimc->clock[CLK_BUS]);
fimc_clk_put(fimc);
return ret;
}
@@ -1051,7 +1042,7 @@ static int fimc_remove(struct platform_device *pdev)
}
/* Image pixel limits, similar across several FIMC HW revisions. */
-static struct fimc_pix_limit s5p_pix_limit[4] = {
+static const struct fimc_pix_limit s5p_pix_limit[4] = {
[0] = {
.scaler_en_w = 3264,
.scaler_dis_w = 8192,
@@ -1086,7 +1077,7 @@ static struct fimc_pix_limit s5p_pix_limit[4] = {
},
};
-static struct fimc_variant fimc0_variant_s5p = {
+static const struct fimc_variant fimc0_variant_s5p = {
.has_inp_rot = 1,
.has_out_rot = 1,
.has_cam_if = 1,
@@ -1098,7 +1089,7 @@ static struct fimc_variant fimc0_variant_s5p = {
.pix_limit = &s5p_pix_limit[0],
};
-static struct fimc_variant fimc2_variant_s5p = {
+static const struct fimc_variant fimc2_variant_s5p = {
.has_cam_if = 1,
.min_inp_pixsize = 16,
.min_out_pixsize = 16,
@@ -1108,7 +1099,7 @@ static struct fimc_variant fimc2_variant_s5p = {
.pix_limit = &s5p_pix_limit[1],
};
-static struct fimc_variant fimc0_variant_s5pv210 = {
+static const struct fimc_variant fimc0_variant_s5pv210 = {
.pix_hoff = 1,
.has_inp_rot = 1,
.has_out_rot = 1,
@@ -1121,7 +1112,7 @@ static struct fimc_variant fimc0_variant_s5pv210 = {
.pix_limit = &s5p_pix_limit[1],
};
-static struct fimc_variant fimc1_variant_s5pv210 = {
+static const struct fimc_variant fimc1_variant_s5pv210 = {
.pix_hoff = 1,
.has_inp_rot = 1,
.has_out_rot = 1,
@@ -1135,7 +1126,7 @@ static struct fimc_variant fimc1_variant_s5pv210 = {
.pix_limit = &s5p_pix_limit[2],
};
-static struct fimc_variant fimc2_variant_s5pv210 = {
+static const struct fimc_variant fimc2_variant_s5pv210 = {
.has_cam_if = 1,
.pix_hoff = 1,
.min_inp_pixsize = 16,
@@ -1146,7 +1137,7 @@ static struct fimc_variant fimc2_variant_s5pv210 = {
.pix_limit = &s5p_pix_limit[2],
};
-static struct fimc_variant fimc0_variant_exynos4 = {
+static const struct fimc_variant fimc0_variant_exynos4210 = {
.pix_hoff = 1,
.has_inp_rot = 1,
.has_out_rot = 1,
@@ -1162,9 +1153,8 @@ static struct fimc_variant fimc0_variant_exynos4 = {
.pix_limit = &s5p_pix_limit[1],
};
-static struct fimc_variant fimc3_variant_exynos4 = {
+static const struct fimc_variant fimc3_variant_exynos4210 = {
.pix_hoff = 1,
- .has_cam_if = 1,
.has_cistatus2 = 1,
.has_mainscaler_ext = 1,
.has_alpha = 1,
@@ -1176,8 +1166,38 @@ static struct fimc_variant fimc3_variant_exynos4 = {
.pix_limit = &s5p_pix_limit[3],
};
+static const struct fimc_variant fimc0_variant_exynos4x12 = {
+ .pix_hoff = 1,
+ .has_inp_rot = 1,
+ .has_out_rot = 1,
+ .has_cam_if = 1,
+ .has_isp_wb = 1,
+ .has_cistatus2 = 1,
+ .has_mainscaler_ext = 1,
+ .has_alpha = 1,
+ .min_inp_pixsize = 16,
+ .min_out_pixsize = 16,
+ .hor_offs_align = 2,
+ .min_vsize_align = 1,
+ .out_buf_count = 32,
+ .pix_limit = &s5p_pix_limit[1],
+};
+
+static const struct fimc_variant fimc3_variant_exynos4x12 = {
+ .pix_hoff = 1,
+ .has_cistatus2 = 1,
+ .has_mainscaler_ext = 1,
+ .has_alpha = 1,
+ .min_inp_pixsize = 16,
+ .min_out_pixsize = 16,
+ .hor_offs_align = 2,
+ .min_vsize_align = 1,
+ .out_buf_count = 32,
+ .pix_limit = &s5p_pix_limit[3],
+};
+
/* S5PC100 */
-static struct fimc_drvdata fimc_drvdata_s5p = {
+static const struct fimc_drvdata fimc_drvdata_s5p = {
.variant = {
[0] = &fimc0_variant_s5p,
[1] = &fimc0_variant_s5p,
@@ -1188,7 +1208,7 @@ static struct fimc_drvdata fimc_drvdata_s5p = {
};
/* S5PV210, S5PC110 */
-static struct fimc_drvdata fimc_drvdata_s5pv210 = {
+static const struct fimc_drvdata fimc_drvdata_s5pv210 = {
.variant = {
[0] = &fimc0_variant_s5pv210,
[1] = &fimc1_variant_s5pv210,
@@ -1199,18 +1219,30 @@ static struct fimc_drvdata fimc_drvdata_s5pv210 = {
};
/* EXYNOS4210, S5PV310, S5PC210 */
-static struct fimc_drvdata fimc_drvdata_exynos4 = {
+static const struct fimc_drvdata fimc_drvdata_exynos4210 = {
+ .variant = {
+ [0] = &fimc0_variant_exynos4210,
+ [1] = &fimc0_variant_exynos4210,
+ [2] = &fimc0_variant_exynos4210,
+ [3] = &fimc3_variant_exynos4210,
+ },
+ .num_entities = 4,
+ .lclk_frequency = 166000000UL,
+};
+
+/* EXYNOS4212, EXYNOS4412 */
+static const struct fimc_drvdata fimc_drvdata_exynos4x12 = {
.variant = {
- [0] = &fimc0_variant_exynos4,
- [1] = &fimc0_variant_exynos4,
- [2] = &fimc0_variant_exynos4,
- [3] = &fimc3_variant_exynos4,
+ [0] = &fimc0_variant_exynos4x12,
+ [1] = &fimc0_variant_exynos4x12,
+ [2] = &fimc0_variant_exynos4x12,
+ [3] = &fimc3_variant_exynos4x12,
},
.num_entities = 4,
.lclk_frequency = 166000000UL,
};
-static struct platform_device_id fimc_driver_ids[] = {
+static const struct platform_device_id fimc_driver_ids[] = {
{
.name = "s5p-fimc",
.driver_data = (unsigned long)&fimc_drvdata_s5p,
@@ -1219,7 +1251,10 @@ static struct platform_device_id fimc_driver_ids[] = {
.driver_data = (unsigned long)&fimc_drvdata_s5pv210,
}, {
.name = "exynos4-fimc",
- .driver_data = (unsigned long)&fimc_drvdata_exynos4,
+ .driver_data = (unsigned long)&fimc_drvdata_exynos4210,
+ }, {
+ .name = "exynos4x12-fimc",
+ .driver_data = (unsigned long)&fimc_drvdata_exynos4x12,
},
{},
};
diff --git a/drivers/media/platform/s5p-fimc/fimc-core.h b/drivers/media/platform/s5p-fimc/fimc-core.h
index c0040d792499..412d50708f75 100644
--- a/drivers/media/platform/s5p-fimc/fimc-core.h
+++ b/drivers/media/platform/s5p-fimc/fimc-core.h
@@ -112,9 +112,7 @@ enum fimc_color_fmt {
/* The hardware context state. */
#define FIMC_PARAMS (1 << 0)
-#define FIMC_SRC_FMT (1 << 3)
-#define FIMC_DST_FMT (1 << 4)
-#define FIMC_COMPOSE (1 << 5)
+#define FIMC_COMPOSE (1 << 1)
#define FIMC_CTX_M2M (1 << 16)
#define FIMC_CTX_CAP (1 << 17)
#define FIMC_CTX_SHUT (1 << 18)
@@ -265,6 +263,7 @@ struct fimc_vid_buffer {
* @width: image pixel width
* @height: image pixel weight
* @payload: image size in bytes (w x h x bpp)
+ * @bytesperline: bytesperline value for each plane
* @paddr: image frame buffer physical addresses
* @dma_offset: DMA offset in bytes
* @fmt: fimc color format pointer
@@ -279,6 +278,7 @@ struct fimc_frame {
u32 width;
u32 height;
unsigned int payload[VIDEO_MAX_PLANES];
+ unsigned int bytesperline[VIDEO_MAX_PLANES];
struct fimc_addr paddr;
struct fimc_dma_offset dma_offset;
struct fimc_fmt *fmt;
@@ -372,6 +372,7 @@ struct fimc_pix_limit {
* @has_mainscaler_ext: 1 if extended mainscaler ratios in CIEXTEN register
* are present in this IP revision
* @has_cam_if: set if this instance has a camera input interface
+ * @has_isp_wb: set if this instance has ISP writeback input
* @pix_limit: pixel size constraints for the scaler
* @min_inp_pixsize: minimum input pixel size
* @min_out_pixsize: minimum output pixel size
@@ -386,8 +387,9 @@ struct fimc_variant {
unsigned int has_cistatus2:1;
unsigned int has_mainscaler_ext:1;
unsigned int has_cam_if:1;
+ unsigned int has_isp_wb:1;
unsigned int has_alpha:1;
- struct fimc_pix_limit *pix_limit;
+ const struct fimc_pix_limit *pix_limit;
u16 min_inp_pixsize;
u16 min_out_pixsize;
u16 hor_offs_align;
@@ -402,7 +404,7 @@ struct fimc_variant {
* @lclk_frequency: local bus clock frequency
*/
struct fimc_drvdata {
- struct fimc_variant *variant[FIMC_MAX_DEVS];
+ const struct fimc_variant *variant[FIMC_MAX_DEVS];
int num_entities;
unsigned long lclk_frequency;
};
@@ -435,7 +437,7 @@ struct fimc_dev {
struct mutex lock;
struct platform_device *pdev;
struct s5p_platform_fimc *pdata;
- struct fimc_variant *variant;
+ const struct fimc_variant *variant;
u16 id;
struct clk *clock[MAX_FIMC_CLOCKS];
void __iomem *regs;
@@ -635,7 +637,7 @@ int fimc_ctrls_create(struct fimc_ctx *ctx);
void fimc_ctrls_delete(struct fimc_ctx *ctx);
void fimc_ctrls_activate(struct fimc_ctx *ctx, bool active);
void fimc_alpha_ctrl_update(struct fimc_ctx *ctx);
-int fimc_fill_format(struct fimc_frame *frame, struct v4l2_format *f);
+void __fimc_get_format(struct fimc_frame *frame, struct v4l2_format *f);
void fimc_adjust_mplane_format(struct fimc_fmt *fmt, u32 width, u32 height,
struct v4l2_pix_format_mplane *pix);
struct fimc_fmt *fimc_find_format(const u32 *pixelformat, const u32 *mbus_code,
@@ -650,7 +652,6 @@ int fimc_prepare_addr(struct fimc_ctx *ctx, struct vb2_buffer *vb,
struct fimc_frame *frame, struct fimc_addr *paddr);
void fimc_prepare_dma_offset(struct fimc_ctx *ctx, struct fimc_frame *f);
void fimc_set_yuv_order(struct fimc_ctx *ctx);
-void fimc_fill_frame(struct fimc_frame *frame, struct v4l2_format *f);
void fimc_capture_irq_handler(struct fimc_dev *fimc, int deq_buf);
int fimc_register_m2m_device(struct fimc_dev *fimc,
diff --git a/drivers/media/platform/s5p-fimc/fimc-lite-reg.c b/drivers/media/platform/s5p-fimc/fimc-lite-reg.c
index a22d7eb05c82..f0af0754a7b4 100644
--- a/drivers/media/platform/s5p-fimc/fimc-lite-reg.c
+++ b/drivers/media/platform/s5p-fimc/fimc-lite-reg.c
@@ -65,7 +65,7 @@ void flite_hw_set_interrupt_mask(struct fimc_lite *dev)
u32 cfg, intsrc;
/* Select interrupts to be enabled for each output mode */
- if (dev->out_path == FIMC_IO_DMA) {
+ if (atomic_read(&dev->out_path) == FIMC_IO_DMA) {
intsrc = FLITE_REG_CIGCTRL_IRQ_OVFEN |
FLITE_REG_CIGCTRL_IRQ_LASTEN |
FLITE_REG_CIGCTRL_IRQ_STARTEN;
@@ -187,12 +187,12 @@ static void flite_hw_set_camera_port(struct fimc_lite *dev, int id)
/* Select serial or parallel bus, camera port (A,B) and set signals polarity */
void flite_hw_set_camera_bus(struct fimc_lite *dev,
- struct s5p_fimc_isp_info *s_info)
+ struct fimc_source_info *si)
{
u32 cfg = readl(dev->regs + FLITE_REG_CIGCTRL);
- unsigned int flags = s_info->flags;
+ unsigned int flags = si->flags;
- if (s_info->bus_type != FIMC_MIPI_CSI2) {
+ if (si->sensor_bus_type != FIMC_BUS_TYPE_MIPI_CSI2) {
cfg &= ~(FLITE_REG_CIGCTRL_SELCAM_MIPI |
FLITE_REG_CIGCTRL_INVPOLPCLK |
FLITE_REG_CIGCTRL_INVPOLVSYNC |
@@ -212,7 +212,7 @@ void flite_hw_set_camera_bus(struct fimc_lite *dev,
writel(cfg, dev->regs + FLITE_REG_CIGCTRL);
- flite_hw_set_camera_port(dev, s_info->mux_id);
+ flite_hw_set_camera_port(dev, si->mux_id);
}
static void flite_hw_set_out_order(struct fimc_lite *dev, struct flite_frame *f)
@@ -292,9 +292,11 @@ void flite_hw_dump_regs(struct fimc_lite *dev, const char *label)
};
u32 i;
- pr_info("--- %s ---\n", label);
+ v4l2_info(&dev->subdev, "--- %s ---\n", label);
+
for (i = 0; i < ARRAY_SIZE(registers); i++) {
u32 cfg = readl(dev->regs + registers[i].offset);
- pr_info("%s: %s:\t0x%08x\n", __func__, registers[i].name, cfg);
+ v4l2_info(&dev->subdev, "%9s: 0x%08x\n",
+ registers[i].name, cfg);
}
}
diff --git a/drivers/media/platform/s5p-fimc/fimc-lite-reg.h b/drivers/media/platform/s5p-fimc/fimc-lite-reg.h
index adb9e9e6f3c2..0e345844c13a 100644
--- a/drivers/media/platform/s5p-fimc/fimc-lite-reg.h
+++ b/drivers/media/platform/s5p-fimc/fimc-lite-reg.h
@@ -131,9 +131,9 @@ void flite_hw_set_interrupt_mask(struct fimc_lite *dev);
void flite_hw_capture_start(struct fimc_lite *dev);
void flite_hw_capture_stop(struct fimc_lite *dev);
void flite_hw_set_camera_bus(struct fimc_lite *dev,
- struct s5p_fimc_isp_info *s_info);
+ struct fimc_source_info *s_info);
void flite_hw_set_camera_polarity(struct fimc_lite *dev,
- struct s5p_fimc_isp_info *cam);
+ struct fimc_source_info *cam);
void flite_hw_set_window_offset(struct fimc_lite *dev, struct flite_frame *f);
void flite_hw_set_source_format(struct fimc_lite *dev, struct flite_frame *f);
diff --git a/drivers/media/platform/s5p-fimc/fimc-lite.c b/drivers/media/platform/s5p-fimc/fimc-lite.c
index 67db9f8102e4..bfc4206935c8 100644
--- a/drivers/media/platform/s5p-fimc/fimc-lite.c
+++ b/drivers/media/platform/s5p-fimc/fimc-lite.c
@@ -120,25 +120,29 @@ static const struct fimc_fmt *fimc_lite_find_format(const u32 *pixelformat,
return def_fmt;
}
-static int fimc_lite_hw_init(struct fimc_lite *fimc)
+static int fimc_lite_hw_init(struct fimc_lite *fimc, bool isp_output)
{
struct fimc_pipeline *pipeline = &fimc->pipeline;
- struct fimc_sensor_info *sensor;
+ struct v4l2_subdev *sensor;
+ struct fimc_sensor_info *si;
unsigned long flags;
- if (pipeline->subdevs[IDX_SENSOR] == NULL)
+ sensor = isp_output ? fimc->sensor : pipeline->subdevs[IDX_SENSOR];
+
+ if (sensor == NULL)
return -ENXIO;
if (fimc->fmt == NULL)
return -EINVAL;
- sensor = v4l2_get_subdev_hostdata(pipeline->subdevs[IDX_SENSOR]);
+ /* Get sensor configuration data from the sensor subdev */
+ si = v4l2_get_subdev_hostdata(sensor);
spin_lock_irqsave(&fimc->slock, flags);
- flite_hw_set_camera_bus(fimc, &sensor->pdata);
+ flite_hw_set_camera_bus(fimc, &si->pdata);
flite_hw_set_source_format(fimc, &fimc->inp_frame);
flite_hw_set_window_offset(fimc, &fimc->inp_frame);
- flite_hw_set_output_dma(fimc, &fimc->out_frame, true);
+ flite_hw_set_output_dma(fimc, &fimc->out_frame, !isp_output);
flite_hw_set_interrupt_mask(fimc);
flite_hw_set_test_pattern(fimc, fimc->test_pattern->val);
@@ -256,7 +260,7 @@ static irqreturn_t flite_irq_handler(int irq, void *priv)
wake_up(&fimc->irq_queue);
}
- if (fimc->out_path != FIMC_IO_DMA)
+ if (atomic_read(&fimc->out_path) != FIMC_IO_DMA)
goto done;
if ((intsrc & FLITE_REG_CISTATUS_IRQ_SRC_FRMSTART) &&
@@ -296,7 +300,7 @@ static int start_streaming(struct vb2_queue *q, unsigned int count)
fimc->frame_count = 0;
- ret = fimc_lite_hw_init(fimc);
+ ret = fimc_lite_hw_init(fimc, false);
if (ret) {
fimc_lite_reinit(fimc, false);
return ret;
@@ -455,10 +459,16 @@ static void fimc_lite_clear_event_counters(struct fimc_lite *fimc)
static int fimc_lite_open(struct file *file)
{
struct fimc_lite *fimc = video_drvdata(file);
+ struct media_entity *me = &fimc->vfd.entity;
int ret;
- if (mutex_lock_interruptible(&fimc->lock))
- return -ERESTARTSYS;
+ mutex_lock(&me->parent->graph_mutex);
+
+ mutex_lock(&fimc->lock);
+ if (atomic_read(&fimc->out_path) != FIMC_IO_DMA) {
+ ret = -EBUSY;
+ goto done;
+ }
set_bit(ST_FLITE_IN_USE, &fimc->state);
ret = pm_runtime_get_sync(&fimc->pdev->dev);
@@ -469,7 +479,8 @@ static int fimc_lite_open(struct file *file)
if (ret < 0)
goto done;
- if (++fimc->ref_count == 1 && fimc->out_path == FIMC_IO_DMA) {
+ if (++fimc->ref_count == 1 &&
+ atomic_read(&fimc->out_path) == FIMC_IO_DMA) {
ret = fimc_pipeline_call(fimc, open, &fimc->pipeline,
&fimc->vfd.entity, true);
if (ret < 0) {
@@ -483,6 +494,7 @@ static int fimc_lite_open(struct file *file)
}
done:
mutex_unlock(&fimc->lock);
+ mutex_unlock(&me->parent->graph_mutex);
return ret;
}
@@ -493,7 +505,8 @@ static int fimc_lite_close(struct file *file)
mutex_lock(&fimc->lock);
- if (--fimc->ref_count == 0 && fimc->out_path == FIMC_IO_DMA) {
+ if (--fimc->ref_count == 0 &&
+ atomic_read(&fimc->out_path) == FIMC_IO_DMA) {
clear_bit(ST_FLITE_IN_USE, &fimc->state);
fimc_lite_stop_capture(fimc, false);
fimc_pipeline_call(fimc, close, &fimc->pipeline);
@@ -598,7 +611,7 @@ static void fimc_lite_try_crop(struct fimc_lite *fimc, struct v4l2_rect *r)
r->left = round_down(r->left, fimc->variant->win_hor_offs_align);
r->top = clamp_t(u32, r->top, 0, frame->f_height - r->height);
- v4l2_dbg(1, debug, &fimc->subdev, "(%d,%d)/%dx%d, sink fmt: %dx%d",
+ v4l2_dbg(1, debug, &fimc->subdev, "(%d,%d)/%dx%d, sink fmt: %dx%d\n",
r->left, r->top, r->width, r->height,
frame->f_width, frame->f_height);
}
@@ -618,7 +631,7 @@ static void fimc_lite_try_compose(struct fimc_lite *fimc, struct v4l2_rect *r)
r->left = round_down(r->left, fimc->variant->out_hor_offs_align);
r->top = clamp_t(u32, r->top, 0, fimc->out_frame.f_height - r->height);
- v4l2_dbg(1, debug, &fimc->subdev, "(%d,%d)/%dx%d, source fmt: %dx%d",
+ v4l2_dbg(1, debug, &fimc->subdev, "(%d,%d)/%dx%d, source fmt: %dx%d\n",
r->left, r->top, r->width, r->height,
frame->f_width, frame->f_height);
}
@@ -962,6 +975,29 @@ static const struct v4l2_ioctl_ops fimc_lite_ioctl_ops = {
.vidioc_streamoff = fimc_lite_streamoff,
};
+/* Called with the media graph mutex held */
+static struct v4l2_subdev *__find_remote_sensor(struct media_entity *me)
+{
+ struct media_pad *pad = &me->pads[0];
+ struct v4l2_subdev *sd;
+
+ while (pad->flags & MEDIA_PAD_FL_SINK) {
+ /* source pad */
+ pad = media_entity_remote_source(pad);
+ if (pad == NULL ||
+ media_entity_type(pad->entity) != MEDIA_ENT_T_V4L2_SUBDEV)
+ break;
+
+ sd = media_entity_to_v4l2_subdev(pad->entity);
+
+ if (sd->grp_id == GRP_ID_FIMC_IS_SENSOR)
+ return sd;
+ /* sink pad */
+ pad = &sd->entity.pads[0];
+ }
+ return NULL;
+}
+
/* Capture subdev media entity operations */
static int fimc_lite_link_setup(struct media_entity *entity,
const struct media_pad *local,
@@ -970,46 +1006,60 @@ static int fimc_lite_link_setup(struct media_entity *entity,
struct v4l2_subdev *sd = media_entity_to_v4l2_subdev(entity);
struct fimc_lite *fimc = v4l2_get_subdevdata(sd);
unsigned int remote_ent_type = media_entity_type(remote->entity);
+ int ret = 0;
if (WARN_ON(fimc == NULL))
return 0;
- v4l2_dbg(1, debug, sd, "%s: %s --> %s, flags: 0x%x. source_id: 0x%x",
- __func__, local->entity->name, remote->entity->name,
+ v4l2_dbg(1, debug, sd, "%s: %s --> %s, flags: 0x%x. source_id: 0x%x\n",
+ __func__, remote->entity->name, local->entity->name,
flags, fimc->source_subdev_grp_id);
- switch (local->index) {
- case FIMC_SD_PAD_SINK:
- if (remote_ent_type != MEDIA_ENT_T_V4L2_SUBDEV)
- return -EINVAL;
+ mutex_lock(&fimc->lock);
+ switch (local->index) {
+ case FLITE_SD_PAD_SINK:
+ if (remote_ent_type != MEDIA_ENT_T_V4L2_SUBDEV) {
+ ret = -EINVAL;
+ break;
+ }
if (flags & MEDIA_LNK_FL_ENABLED) {
- if (fimc->source_subdev_grp_id != 0)
- return -EBUSY;
- fimc->source_subdev_grp_id = sd->grp_id;
- return 0;
+ if (fimc->source_subdev_grp_id == 0)
+ fimc->source_subdev_grp_id = sd->grp_id;
+ else
+ ret = -EBUSY;
+ } else {
+ fimc->source_subdev_grp_id = 0;
+ fimc->sensor = NULL;
}
+ break;
- fimc->source_subdev_grp_id = 0;
+ case FLITE_SD_PAD_SOURCE_DMA:
+ if (!(flags & MEDIA_LNK_FL_ENABLED))
+ atomic_set(&fimc->out_path, FIMC_IO_NONE);
+ else if (remote_ent_type == MEDIA_ENT_T_DEVNODE)
+ atomic_set(&fimc->out_path, FIMC_IO_DMA);
+ else
+ ret = -EINVAL;
break;
- case FIMC_SD_PAD_SOURCE:
- if (!(flags & MEDIA_LNK_FL_ENABLED)) {
- fimc->out_path = FIMC_IO_NONE;
- return 0;
- }
- if (remote_ent_type == MEDIA_ENT_T_V4L2_SUBDEV)
- fimc->out_path = FIMC_IO_ISP;
+ case FLITE_SD_PAD_SOURCE_ISP:
+ if (!(flags & MEDIA_LNK_FL_ENABLED))
+ atomic_set(&fimc->out_path, FIMC_IO_NONE);
+ else if (remote_ent_type == MEDIA_ENT_T_V4L2_SUBDEV)
+ atomic_set(&fimc->out_path, FIMC_IO_ISP);
else
- fimc->out_path = FIMC_IO_DMA;
+ ret = -EINVAL;
break;
default:
v4l2_err(sd, "Invalid pad index\n");
- return -EINVAL;
+ ret = -EINVAL;
}
+ mb();
- return 0;
+ mutex_unlock(&fimc->lock);
+ return ret;
}
static const struct media_entity_operations fimc_lite_subdev_media_ops = {
@@ -1070,14 +1120,16 @@ static int fimc_lite_subdev_set_fmt(struct v4l2_subdev *sd,
struct flite_frame *source = &fimc->out_frame;
const struct fimc_fmt *ffmt;
- v4l2_dbg(1, debug, sd, "pad%d: code: 0x%x, %dx%d",
+ v4l2_dbg(1, debug, sd, "pad%d: code: 0x%x, %dx%d\n",
fmt->pad, mf->code, mf->width, mf->height);
mf->colorspace = V4L2_COLORSPACE_JPEG;
mutex_lock(&fimc->lock);
- if ((fimc->out_path == FIMC_IO_ISP && sd->entity.stream_count > 0) ||
- (fimc->out_path == FIMC_IO_DMA && vb2_is_busy(&fimc->vb_queue))) {
+ if ((atomic_read(&fimc->out_path) == FIMC_IO_ISP &&
+ sd->entity.stream_count > 0) ||
+ (atomic_read(&fimc->out_path) == FIMC_IO_DMA &&
+ vb2_is_busy(&fimc->vb_queue))) {
mutex_unlock(&fimc->lock);
return -EBUSY;
}
@@ -1144,7 +1196,7 @@ static int fimc_lite_subdev_get_selection(struct v4l2_subdev *sd,
}
mutex_unlock(&fimc->lock);
- v4l2_dbg(1, debug, sd, "%s: (%d,%d) %dx%d, f_w: %d, f_h: %d",
+ v4l2_dbg(1, debug, sd, "%s: (%d,%d) %dx%d, f_w: %d, f_h: %d\n",
__func__, f->rect.left, f->rect.top, f->rect.width,
f->rect.height, f->f_width, f->f_height);
@@ -1178,7 +1230,7 @@ static int fimc_lite_subdev_set_selection(struct v4l2_subdev *sd,
}
mutex_unlock(&fimc->lock);
- v4l2_dbg(1, debug, sd, "%s: (%d,%d) %dx%d, f_w: %d, f_h: %d",
+ v4l2_dbg(1, debug, sd, "%s: (%d,%d) %dx%d, f_w: %d, f_h: %d\n",
__func__, f->rect.left, f->rect.top, f->rect.width,
f->rect.height, f->f_width, f->f_height);
@@ -1188,25 +1240,47 @@ static int fimc_lite_subdev_set_selection(struct v4l2_subdev *sd,
static int fimc_lite_subdev_s_stream(struct v4l2_subdev *sd, int on)
{
struct fimc_lite *fimc = v4l2_get_subdevdata(sd);
+ unsigned long flags;
+ int ret;
- if (fimc->out_path == FIMC_IO_DMA)
- return -ENOIOCTLCMD;
-
- /* TODO: */
+ /*
+ * Find sensor subdev linked to FIMC-LITE directly or through
+ * MIPI-CSIS. This is required for configuration where FIMC-LITE
+ * is used as a subdev only and feeds data internally to FIMC-IS.
+ * The pipeline links are protected through entity.stream_count
+ * so there is no need to take the media graph mutex here.
+ */
+ fimc->sensor = __find_remote_sensor(&sd->entity);
- return 0;
-}
+ if (atomic_read(&fimc->out_path) != FIMC_IO_ISP)
+ return -ENOIOCTLCMD;
-static int fimc_lite_subdev_s_power(struct v4l2_subdev *sd, int on)
-{
- struct fimc_lite *fimc = v4l2_get_subdevdata(sd);
+ mutex_lock(&fimc->lock);
+ if (on) {
+ flite_hw_reset(fimc);
+ ret = fimc_lite_hw_init(fimc, true);
+ if (!ret) {
+ spin_lock_irqsave(&fimc->slock, flags);
+ flite_hw_capture_start(fimc);
+ spin_unlock_irqrestore(&fimc->slock, flags);
+ }
+ } else {
+ set_bit(ST_FLITE_OFF, &fimc->state);
- if (fimc->out_path == FIMC_IO_DMA)
- return -ENOIOCTLCMD;
+ spin_lock_irqsave(&fimc->slock, flags);
+ flite_hw_capture_stop(fimc);
+ spin_unlock_irqrestore(&fimc->slock, flags);
- /* TODO: */
+ ret = wait_event_timeout(fimc->irq_queue,
+ !test_bit(ST_FLITE_OFF, &fimc->state),
+ msecs_to_jiffies(200));
+ if (ret == 0)
+ v4l2_err(sd, "s_stream(0) timeout\n");
+ clear_bit(ST_FLITE_RUN, &fimc->state);
+ }
- return 0;
+ mutex_unlock(&fimc->lock);
+ return ret;
}
static int fimc_lite_log_status(struct v4l2_subdev *sd)
@@ -1227,7 +1301,7 @@ static int fimc_lite_subdev_registered(struct v4l2_subdev *sd)
memset(vfd, 0, sizeof(*vfd));
fimc->fmt = &fimc_lite_formats[0];
- fimc->out_path = FIMC_IO_DMA;
+ atomic_set(&fimc->out_path, FIMC_IO_DMA);
snprintf(vfd->name, sizeof(vfd->name), "fimc-lite.%d.capture",
fimc->index);
@@ -1308,7 +1382,6 @@ static const struct v4l2_subdev_video_ops fimc_lite_subdev_video_ops = {
};
static const struct v4l2_subdev_core_ops fimc_lite_core_ops = {
- .s_power = fimc_lite_subdev_s_power,
.log_status = fimc_lite_log_status,
};
@@ -1347,9 +1420,10 @@ static int fimc_lite_create_capture_subdev(struct fimc_lite *fimc)
sd->flags = V4L2_SUBDEV_FL_HAS_DEVNODE;
snprintf(sd->name, sizeof(sd->name), "FIMC-LITE.%d", fimc->index);
- fimc->subdev_pads[FIMC_SD_PAD_SINK].flags = MEDIA_PAD_FL_SINK;
- fimc->subdev_pads[FIMC_SD_PAD_SOURCE].flags = MEDIA_PAD_FL_SOURCE;
- ret = media_entity_init(&sd->entity, FIMC_SD_PADS_NUM,
+ fimc->subdev_pads[FLITE_SD_PAD_SINK].flags = MEDIA_PAD_FL_SINK;
+ fimc->subdev_pads[FLITE_SD_PAD_SOURCE_DMA].flags = MEDIA_PAD_FL_SOURCE;
+ fimc->subdev_pads[FLITE_SD_PAD_SOURCE_ISP].flags = MEDIA_PAD_FL_SOURCE;
+ ret = media_entity_init(&sd->entity, FLITE_SD_PADS_NUM,
fimc->subdev_pads, 0);
if (ret)
return ret;
@@ -1516,7 +1590,7 @@ static int fimc_lite_resume(struct device *dev)
INIT_LIST_HEAD(&fimc->active_buf_q);
fimc_pipeline_call(fimc, open, &fimc->pipeline,
&fimc->vfd.entity, false);
- fimc_lite_hw_init(fimc);
+ fimc_lite_hw_init(fimc, atomic_read(&fimc->out_path) == FIMC_IO_ISP);
clear_bit(ST_FLITE_SUSPENDED, &fimc->state);
for (i = 0; i < fimc->reqbufs_count; i++) {
diff --git a/drivers/media/platform/s5p-fimc/fimc-lite.h b/drivers/media/platform/s5p-fimc/fimc-lite.h
index 3081db35c5b0..7085761f8c4b 100644
--- a/drivers/media/platform/s5p-fimc/fimc-lite.h
+++ b/drivers/media/platform/s5p-fimc/fimc-lite.h
@@ -45,8 +45,9 @@ enum {
};
#define FLITE_SD_PAD_SINK 0
-#define FLITE_SD_PAD_SOURCE 1
-#define FLITE_SD_PADS_NUM 2
+#define FLITE_SD_PAD_SOURCE_DMA 1
+#define FLITE_SD_PAD_SOURCE_ISP 2
+#define FLITE_SD_PADS_NUM 3
struct flite_variant {
unsigned short max_width;
@@ -104,6 +105,7 @@ struct flite_buffer {
* @subdev: FIMC-LITE subdev
* @vd_pad: media (sink) pad for the capture video node
* @subdev_pads: the subdev media pads
+ * @sensor: sensor subdev attached to FIMC-LITE directly or through MIPI-CSIS
* @ctrl_handler: v4l2 control handler
* @test_pattern: test pattern controls
* @index: FIMC-LITE platform device index
@@ -139,6 +141,7 @@ struct fimc_lite {
struct v4l2_subdev subdev;
struct media_pad vd_pad;
struct media_pad subdev_pads[FLITE_SD_PADS_NUM];
+ struct v4l2_subdev *sensor;
struct v4l2_ctrl_handler ctrl_handler;
struct v4l2_ctrl *test_pattern;
u32 index;
@@ -156,7 +159,7 @@ struct fimc_lite {
unsigned long payload[FLITE_MAX_PLANES];
struct flite_frame inp_frame;
struct flite_frame out_frame;
- enum fimc_datapath out_path;
+ atomic_t out_path;
unsigned int source_subdev_grp_id;
unsigned long state;
diff --git a/drivers/media/platform/s5p-fimc/fimc-m2m.c b/drivers/media/platform/s5p-fimc/fimc-m2m.c
index 1d21da4bd24b..f3d535cdd87f 100644
--- a/drivers/media/platform/s5p-fimc/fimc-m2m.c
+++ b/drivers/media/platform/s5p-fimc/fimc-m2m.c
@@ -1,8 +1,8 @@
/*
* Samsung S5P/EXYNOS4 SoC series FIMC (video postprocessor) driver
*
- * Copyright (C) 2012 Samsung Electronics Co., Ltd.
- * Sylwester Nawrocki, <s.nawrocki@samsung.com>
+ * Copyright (C) 2012 - 2013 Samsung Electronics Co., Ltd.
+ * Sylwester Nawrocki <s.nawrocki@samsung.com>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published
@@ -160,8 +160,7 @@ static void fimc_device_run(void *priv)
fimc_hw_set_output_addr(fimc, &df->paddr, -1);
fimc_activate_capture(ctx);
- ctx->state &= (FIMC_CTX_M2M | FIMC_CTX_CAP |
- FIMC_SRC_FMT | FIMC_DST_FMT);
+ ctx->state &= (FIMC_CTX_M2M | FIMC_CTX_CAP);
fimc_hw_activate_input_dma(fimc, true);
dma_unlock:
@@ -294,13 +293,14 @@ static int fimc_m2m_g_fmt_mplane(struct file *file, void *fh,
if (IS_ERR(frame))
return PTR_ERR(frame);
- return fimc_fill_format(frame, f);
+ __fimc_get_format(frame, f);
+ return 0;
}
static int fimc_try_fmt_mplane(struct fimc_ctx *ctx, struct v4l2_format *f)
{
struct fimc_dev *fimc = ctx->fimc_dev;
- struct fimc_variant *variant = fimc->variant;
+ const struct fimc_variant *variant = fimc->variant;
struct v4l2_pix_format_mplane *pix = &f->fmt.pix_mp;
struct fimc_fmt *fmt;
u32 max_w, mod_x, mod_y;
@@ -308,8 +308,6 @@ static int fimc_try_fmt_mplane(struct fimc_ctx *ctx, struct v4l2_format *f)
if (!IS_M2M(f->type))
return -EINVAL;
- dbg("w: %d, h: %d", pix->width, pix->height);
-
fmt = fimc_find_format(&pix->pixelformat, NULL,
get_m2m_fmt_flags(f->type), 0);
if (WARN(fmt == NULL, "Pixel format lookup failed"))
@@ -349,19 +347,39 @@ static int fimc_m2m_try_fmt_mplane(struct file *file, void *fh,
struct v4l2_format *f)
{
struct fimc_ctx *ctx = fh_to_ctx(fh);
-
return fimc_try_fmt_mplane(ctx, f);
}
+static void __set_frame_format(struct fimc_frame *frame, struct fimc_fmt *fmt,
+ struct v4l2_pix_format_mplane *pixm)
+{
+ int i;
+
+ for (i = 0; i < fmt->colplanes; i++) {
+ frame->bytesperline[i] = pixm->plane_fmt[i].bytesperline;
+ frame->payload[i] = pixm->plane_fmt[i].sizeimage;
+ }
+
+ frame->f_width = pixm->width;
+ frame->f_height = pixm->height;
+ frame->o_width = pixm->width;
+ frame->o_height = pixm->height;
+ frame->width = pixm->width;
+ frame->height = pixm->height;
+ frame->offs_h = 0;
+ frame->offs_v = 0;
+ frame->fmt = fmt;
+}
+
static int fimc_m2m_s_fmt_mplane(struct file *file, void *fh,
struct v4l2_format *f)
{
struct fimc_ctx *ctx = fh_to_ctx(fh);
struct fimc_dev *fimc = ctx->fimc_dev;
+ struct fimc_fmt *fmt;
struct vb2_queue *vq;
struct fimc_frame *frame;
- struct v4l2_pix_format_mplane *pix;
- int i, ret = 0;
+ int ret;
ret = fimc_try_fmt_mplane(ctx, f);
if (ret)
@@ -379,31 +397,16 @@ static int fimc_m2m_s_fmt_mplane(struct file *file, void *fh,
else
frame = &ctx->d_frame;
- pix = &f->fmt.pix_mp;
- frame->fmt = fimc_find_format(&pix->pixelformat, NULL,
- get_m2m_fmt_flags(f->type), 0);
- if (!frame->fmt)
+ fmt = fimc_find_format(&f->fmt.pix_mp.pixelformat, NULL,
+ get_m2m_fmt_flags(f->type), 0);
+ if (!fmt)
return -EINVAL;
+ __set_frame_format(frame, fmt, &f->fmt.pix_mp);
+
/* Update RGB Alpha control state and value range */
fimc_alpha_ctrl_update(ctx);
- for (i = 0; i < frame->fmt->colplanes; i++) {
- frame->payload[i] =
- (pix->width * pix->height * frame->fmt->depth[i]) / 8;
- }
-
- fimc_fill_frame(frame, f);
-
- ctx->scaler.enabled = 1;
-
- if (f->type == V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE)
- fimc_ctx_state_set(FIMC_PARAMS | FIMC_DST_FMT, ctx);
- else
- fimc_ctx_state_set(FIMC_PARAMS | FIMC_SRC_FMT, ctx);
-
- dbg("f_w: %d, f_h: %d", frame->f_width, frame->f_height);
-
return 0;
}
@@ -411,7 +414,6 @@ static int fimc_m2m_reqbufs(struct file *file, void *fh,
struct v4l2_requestbuffers *reqbufs)
{
struct fimc_ctx *ctx = fh_to_ctx(fh);
-
return v4l2_m2m_reqbufs(file, ctx->m2m_ctx, reqbufs);
}
@@ -419,7 +421,6 @@ static int fimc_m2m_querybuf(struct file *file, void *fh,
struct v4l2_buffer *buf)
{
struct fimc_ctx *ctx = fh_to_ctx(fh);
-
return v4l2_m2m_querybuf(file, ctx->m2m_ctx, buf);
}
@@ -427,7 +428,6 @@ static int fimc_m2m_qbuf(struct file *file, void *fh,
struct v4l2_buffer *buf)
{
struct fimc_ctx *ctx = fh_to_ctx(fh);
-
return v4l2_m2m_qbuf(file, ctx->m2m_ctx, buf);
}
@@ -435,7 +435,6 @@ static int fimc_m2m_dqbuf(struct file *file, void *fh,
struct v4l2_buffer *buf)
{
struct fimc_ctx *ctx = fh_to_ctx(fh);
-
return v4l2_m2m_dqbuf(file, ctx->m2m_ctx, buf);
}
@@ -443,7 +442,6 @@ static int fimc_m2m_expbuf(struct file *file, void *fh,
struct v4l2_exportbuffer *eb)
{
struct fimc_ctx *ctx = fh_to_ctx(fh);
-
return v4l2_m2m_expbuf(file, ctx->m2m_ctx, eb);
}
@@ -452,15 +450,6 @@ static int fimc_m2m_streamon(struct file *file, void *fh,
enum v4l2_buf_type type)
{
struct fimc_ctx *ctx = fh_to_ctx(fh);
-
- /* The source and target color format need to be set */
- if (V4L2_TYPE_IS_OUTPUT(type)) {
- if (!fimc_ctx_state_is_set(FIMC_SRC_FMT, ctx))
- return -EINVAL;
- } else if (!fimc_ctx_state_is_set(FIMC_DST_FMT, ctx)) {
- return -EINVAL;
- }
-
return v4l2_m2m_streamon(file, ctx->m2m_ctx, type);
}
@@ -468,7 +457,6 @@ static int fimc_m2m_streamoff(struct file *file, void *fh,
enum v4l2_buf_type type)
{
struct fimc_ctx *ctx = fh_to_ctx(fh);
-
return v4l2_m2m_streamoff(file, ctx->m2m_ctx, type);
}
@@ -576,20 +564,18 @@ static int fimc_m2m_s_crop(struct file *file, void *fh, const struct v4l2_crop *
&ctx->s_frame : &ctx->d_frame;
/* Check to see if scaling ratio is within supported range */
- if (fimc_ctx_state_is_set(FIMC_DST_FMT | FIMC_SRC_FMT, ctx)) {
- if (cr.type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE) {
- ret = fimc_check_scaler_ratio(ctx, cr.c.width,
- cr.c.height, ctx->d_frame.width,
- ctx->d_frame.height, ctx->rotation);
- } else {
- ret = fimc_check_scaler_ratio(ctx, ctx->s_frame.width,
- ctx->s_frame.height, cr.c.width,
- cr.c.height, ctx->rotation);
- }
- if (ret) {
- v4l2_err(&fimc->m2m.vfd, "Out of scaler range\n");
- return -EINVAL;
- }
+ if (cr.type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE) {
+ ret = fimc_check_scaler_ratio(ctx, cr.c.width,
+ cr.c.height, ctx->d_frame.width,
+ ctx->d_frame.height, ctx->rotation);
+ } else {
+ ret = fimc_check_scaler_ratio(ctx, ctx->s_frame.width,
+ ctx->s_frame.height, cr.c.width,
+ cr.c.height, ctx->rotation);
+ }
+ if (ret) {
+ v4l2_err(&fimc->m2m.vfd, "Out of scaler range\n");
+ return -EINVAL;
}
f->offs_h = cr.c.left;
@@ -652,6 +638,29 @@ static int queue_init(void *priv, struct vb2_queue *src_vq,
return vb2_queue_init(dst_vq);
}
+static int fimc_m2m_set_default_format(struct fimc_ctx *ctx)
+{
+ struct v4l2_pix_format_mplane pixm = {
+ .pixelformat = V4L2_PIX_FMT_RGB32,
+ .width = 800,
+ .height = 600,
+ .plane_fmt[0] = {
+ .bytesperline = 800 * 4,
+ .sizeimage = 800 * 4 * 600,
+ },
+ };
+ struct fimc_fmt *fmt;
+
+ fmt = fimc_find_format(&pixm.pixelformat, NULL, FMT_FLAGS_M2M, 0);
+ if (!fmt)
+ return -EINVAL;
+
+ __set_frame_format(&ctx->s_frame, fmt, &pixm);
+ __set_frame_format(&ctx->d_frame, fmt, &pixm);
+
+ return 0;
+}
+
static int fimc_m2m_open(struct file *file)
{
struct fimc_dev *fimc = video_drvdata(file);
@@ -696,6 +705,7 @@ static int fimc_m2m_open(struct file *file)
ctx->flags = 0;
ctx->in_path = FIMC_IO_DMA;
ctx->out_path = FIMC_IO_DMA;
+ ctx->scaler.enabled = 1;
ctx->m2m_ctx = v4l2_m2m_ctx_init(fimc->m2m.m2m_dev, ctx, queue_init);
if (IS_ERR(ctx->m2m_ctx)) {
@@ -706,9 +716,15 @@ static int fimc_m2m_open(struct file *file)
if (fimc->m2m.refcnt++ == 0)
set_bit(ST_M2M_RUN, &fimc->state);
+ ret = fimc_m2m_set_default_format(ctx);
+ if (ret < 0)
+ goto error_m2m_ctx;
+
mutex_unlock(&fimc->lock);
return 0;
+error_m2m_ctx:
+ v4l2_m2m_ctx_release(ctx->m2m_ctx);
error_c:
fimc_ctrls_delete(ctx);
error_fh:
diff --git a/drivers/media/platform/s5p-fimc/fimc-mdevice.c b/drivers/media/platform/s5p-fimc/fimc-mdevice.c
index b4a68ecf0ca7..a17fcb2d5d41 100644
--- a/drivers/media/platform/s5p-fimc/fimc-mdevice.c
+++ b/drivers/media/platform/s5p-fimc/fimc-mdevice.c
@@ -1,8 +1,8 @@
/*
* S5P/EXYNOS4 SoC series camera host interface media device driver
*
- * Copyright (C) 2011 Samsung Electronics Co., Ltd.
- * Contact: Sylwester Nawrocki, <s.nawrocki@samsung.com>
+ * Copyright (C) 2011 - 2012 Samsung Electronics Co., Ltd.
+ * Sylwester Nawrocki <s.nawrocki@samsung.com>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published
@@ -62,16 +62,17 @@ static void fimc_pipeline_prepare(struct fimc_pipeline *p,
sd = media_entity_to_v4l2_subdev(pad->entity);
switch (sd->grp_id) {
- case SENSOR_GROUP_ID:
+ case GRP_ID_FIMC_IS_SENSOR:
+ case GRP_ID_SENSOR:
p->subdevs[IDX_SENSOR] = sd;
break;
- case CSIS_GROUP_ID:
+ case GRP_ID_CSIS:
p->subdevs[IDX_CSIS] = sd;
break;
- case FLITE_GROUP_ID:
+ case GRP_ID_FLITE:
p->subdevs[IDX_FLITE] = sd;
break;
- case FIMC_GROUP_ID:
+ case GRP_ID_FIMC:
/* No need to control FIMC subdev through subdev ops */
break;
default:
@@ -141,7 +142,7 @@ static int fimc_pipeline_s_power(struct fimc_pipeline *p, bool state)
* @me: media entity to start graph walk with
* @prep: true to acquire sensor (and csis) subdevs
*
- * This function must be called with the graph mutex held.
+ * Called with the graph mutex held.
*/
static int __fimc_pipeline_open(struct fimc_pipeline *p,
struct media_entity *me, bool prep)
@@ -161,30 +162,19 @@ static int __fimc_pipeline_open(struct fimc_pipeline *p,
return fimc_pipeline_s_power(p, 1);
}
-static int fimc_pipeline_open(struct fimc_pipeline *p,
- struct media_entity *me, bool prep)
-{
- int ret;
-
- mutex_lock(&me->parent->graph_mutex);
- ret = __fimc_pipeline_open(p, me, prep);
- mutex_unlock(&me->parent->graph_mutex);
-
- return ret;
-}
-
/**
* __fimc_pipeline_close - disable the sensor clock and pipeline power
* @fimc: fimc device terminating the pipeline
*
- * Disable power of all subdevs in the pipeline and turn off the external
- * sensor clock.
- * Called with the graph mutex held.
+ * Disable power of all subdevs and turn the external sensor clock off.
*/
static int __fimc_pipeline_close(struct fimc_pipeline *p)
{
int ret = 0;
+ if (!p || !p->subdevs[IDX_SENSOR])
+ return -EINVAL;
+
if (p->subdevs[IDX_SENSOR]) {
ret = fimc_pipeline_s_power(p, 0);
fimc_md_set_camclk(p->subdevs[IDX_SENSOR], false);
@@ -192,28 +182,12 @@ static int __fimc_pipeline_close(struct fimc_pipeline *p)
return ret == -ENXIO ? 0 : ret;
}
-static int fimc_pipeline_close(struct fimc_pipeline *p)
-{
- struct media_entity *me;
- int ret;
-
- if (!p || !p->subdevs[IDX_SENSOR])
- return -EINVAL;
-
- me = &p->subdevs[IDX_SENSOR]->entity;
- mutex_lock(&me->parent->graph_mutex);
- ret = __fimc_pipeline_close(p);
- mutex_unlock(&me->parent->graph_mutex);
-
- return ret;
-}
-
/**
- * fimc_pipeline_s_stream - invoke s_stream on pipeline subdevs
+ * __fimc_pipeline_s_stream - invoke s_stream on pipeline subdevs
* @pipeline: video pipeline structure
* @on: passed as the s_stream call argument
*/
-static int fimc_pipeline_s_stream(struct fimc_pipeline *p, bool on)
+static int __fimc_pipeline_s_stream(struct fimc_pipeline *p, bool on)
{
int i, ret;
@@ -235,9 +209,9 @@ static int fimc_pipeline_s_stream(struct fimc_pipeline *p, bool on)
/* Media pipeline operations for the FIMC/FIMC-LITE video device driver */
static const struct fimc_pipeline_ops fimc_pipeline_ops = {
- .open = fimc_pipeline_open,
- .close = fimc_pipeline_close,
- .set_stream = fimc_pipeline_s_stream,
+ .open = __fimc_pipeline_open,
+ .close = __fimc_pipeline_close,
+ .set_stream = __fimc_pipeline_s_stream,
};
/*
@@ -269,7 +243,7 @@ static struct v4l2_subdev *fimc_md_register_sensor(struct fimc_md *fmd,
return ERR_PTR(-EPROBE_DEFER);
}
v4l2_set_subdev_hostdata(sd, s_info);
- sd->grp_id = SENSOR_GROUP_ID;
+ sd->grp_id = GRP_ID_SENSOR;
v4l2_info(&fmd->v4l2_dev, "Registered sensor subdevice %s\n",
s_info->pdata.board_info->type);
@@ -316,7 +290,7 @@ static int fimc_md_register_sensor_entities(struct fimc_md *fmd)
for (i = 0; i < num_clients; i++) {
struct v4l2_subdev *sd;
- fmd->sensor[i].pdata = pdata->isp_info[i];
+ fmd->sensor[i].pdata = pdata->source_info[i];
ret = __fimc_md_set_camclk(fmd, &fmd->sensor[i], true);
if (ret)
break;
@@ -338,138 +312,149 @@ static int fimc_md_register_sensor_entities(struct fimc_md *fmd)
}
/*
- * MIPI CSIS and FIMC platform devices registration.
+ * MIPI-CSIS, FIMC and FIMC-LITE platform devices registration.
*/
-static int fimc_register_callback(struct device *dev, void *p)
+
+static int register_fimc_lite_entity(struct fimc_md *fmd,
+ struct fimc_lite *fimc_lite)
{
- struct fimc_dev *fimc = dev_get_drvdata(dev);
struct v4l2_subdev *sd;
- struct fimc_md *fmd = p;
int ret;
- if (fimc == NULL || fimc->id >= FIMC_MAX_DEVS)
- return 0;
+ if (WARN_ON(fimc_lite->index >= FIMC_LITE_MAX_DEVS ||
+ fmd->fimc_lite[fimc_lite->index]))
+ return -EBUSY;
- sd = &fimc->vid_cap.subdev;
- sd->grp_id = FIMC_GROUP_ID;
+ sd = &fimc_lite->subdev;
+ sd->grp_id = GRP_ID_FLITE;
v4l2_set_subdev_hostdata(sd, (void *)&fimc_pipeline_ops);
ret = v4l2_device_register_subdev(&fmd->v4l2_dev, sd);
- if (ret) {
- v4l2_err(&fmd->v4l2_dev, "Failed to register FIMC.%d (%d)\n",
- fimc->id, ret);
- return ret;
- }
-
- fmd->fimc[fimc->id] = fimc;
- return 0;
+ if (!ret)
+ fmd->fimc_lite[fimc_lite->index] = fimc_lite;
+ else
+ v4l2_err(&fmd->v4l2_dev, "Failed to register FIMC.LITE%d\n",
+ fimc_lite->index);
+ return ret;
}
-static int fimc_lite_register_callback(struct device *dev, void *p)
+static int register_fimc_entity(struct fimc_md *fmd, struct fimc_dev *fimc)
{
- struct fimc_lite *fimc = dev_get_drvdata(dev);
- struct fimc_md *fmd = p;
+ struct v4l2_subdev *sd;
int ret;
- if (fimc == NULL || fimc->index >= FIMC_LITE_MAX_DEVS)
- return 0;
+ if (WARN_ON(fimc->id >= FIMC_MAX_DEVS || fmd->fimc[fimc->id]))
+ return -EBUSY;
- fimc->subdev.grp_id = FLITE_GROUP_ID;
- v4l2_set_subdev_hostdata(&fimc->subdev, (void *)&fimc_pipeline_ops);
+ sd = &fimc->vid_cap.subdev;
+ sd->grp_id = GRP_ID_FIMC;
+ v4l2_set_subdev_hostdata(sd, (void *)&fimc_pipeline_ops);
- ret = v4l2_device_register_subdev(&fmd->v4l2_dev, &fimc->subdev);
- if (ret) {
- v4l2_err(&fmd->v4l2_dev,
- "Failed to register FIMC-LITE.%d (%d)\n",
- fimc->index, ret);
- return ret;
+ ret = v4l2_device_register_subdev(&fmd->v4l2_dev, sd);
+ if (!ret) {
+ fmd->fimc[fimc->id] = fimc;
+ fimc->vid_cap.user_subdev_api = fmd->user_subdev_api;
+ } else {
+ v4l2_err(&fmd->v4l2_dev, "Failed to register FIMC.%d (%d)\n",
+ fimc->id, ret);
}
-
- fmd->fimc_lite[fimc->index] = fimc;
- return 0;
+ return ret;
}
-static int csis_register_callback(struct device *dev, void *p)
+static int register_csis_entity(struct fimc_md *fmd,
+ struct platform_device *pdev,
+ struct v4l2_subdev *sd)
{
- struct v4l2_subdev *sd = dev_get_drvdata(dev);
- struct platform_device *pdev;
- struct fimc_md *fmd = p;
+ struct device_node *node = pdev->dev.of_node;
int id, ret;
- if (!sd)
- return 0;
- pdev = v4l2_get_subdevdata(sd);
- if (!pdev || pdev->id < 0 || pdev->id >= CSIS_MAX_ENTITIES)
- return 0;
- v4l2_info(sd, "csis%d sd: %s\n", pdev->id, sd->name);
+ id = node ? of_alias_get_id(node, "csis") : max(0, pdev->id);
+
+ if (WARN_ON(id >= CSIS_MAX_ENTITIES || fmd->csis[id].sd))
+ return -EBUSY;
- id = pdev->id < 0 ? 0 : pdev->id;
- sd->grp_id = CSIS_GROUP_ID;
+ if (WARN_ON(id >= CSIS_MAX_ENTITIES))
+ return 0;
+ sd->grp_id = GRP_ID_CSIS;
ret = v4l2_device_register_subdev(&fmd->v4l2_dev, sd);
if (!ret)
fmd->csis[id].sd = sd;
else
v4l2_err(&fmd->v4l2_dev,
- "Failed to register CSIS subdevice: %d\n", ret);
+ "Failed to register MIPI-CSIS.%d (%d)\n", id, ret);
return ret;
}
-/**
- * fimc_md_register_platform_entities - register FIMC and CSIS media entities
- */
-static int fimc_md_register_platform_entities(struct fimc_md *fmd)
+static int fimc_md_register_platform_entity(struct fimc_md *fmd,
+ struct platform_device *pdev,
+ int plat_entity)
{
- struct s5p_platform_fimc *pdata = fmd->pdev->dev.platform_data;
- struct device_driver *driver;
- int ret, i;
-
- driver = driver_find(FIMC_MODULE_NAME, &platform_bus_type);
- if (!driver) {
- v4l2_warn(&fmd->v4l2_dev,
- "%s driver not found, deffering probe\n",
- FIMC_MODULE_NAME);
- return -EPROBE_DEFER;
- }
-
- ret = driver_for_each_device(driver, NULL, fmd,
- fimc_register_callback);
- if (ret)
- return ret;
-
- driver = driver_find(FIMC_LITE_DRV_NAME, &platform_bus_type);
- if (driver && try_module_get(driver->owner)) {
- ret = driver_for_each_device(driver, NULL, fmd,
- fimc_lite_register_callback);
- if (ret)
- return ret;
- module_put(driver->owner);
- }
- /*
- * Check if there is any sensor on the MIPI-CSI2 bus and
- * if not skip the s5p-csis module loading.
- */
- if (pdata == NULL)
- return 0;
- for (i = 0; i < pdata->num_clients; i++) {
- if (pdata->isp_info[i].bus_type == FIMC_MIPI_CSI2) {
- ret = 1;
+ struct device *dev = &pdev->dev;
+ int ret = -EPROBE_DEFER;
+ void *drvdata;
+
+ /* Lock to ensure dev->driver won't change. */
+ device_lock(dev);
+
+ if (!dev->driver || !try_module_get(dev->driver->owner))
+ goto dev_unlock;
+
+ drvdata = dev_get_drvdata(dev);
+ /* Some subdev didn't probe succesfully id drvdata is NULL */
+ if (drvdata) {
+ switch (plat_entity) {
+ case IDX_FIMC:
+ ret = register_fimc_entity(fmd, drvdata);
break;
+ case IDX_FLITE:
+ ret = register_fimc_lite_entity(fmd, drvdata);
+ break;
+ case IDX_CSIS:
+ ret = register_csis_entity(fmd, pdev, drvdata);
+ break;
+ default:
+ ret = -ENODEV;
}
}
- if (!ret)
- return 0;
- driver = driver_find(CSIS_DRIVER_NAME, &platform_bus_type);
- if (!driver || !try_module_get(driver->owner)) {
- v4l2_warn(&fmd->v4l2_dev,
- "%s driver not found, deffering probe\n",
- CSIS_DRIVER_NAME);
- return -EPROBE_DEFER;
+ module_put(dev->driver->owner);
+dev_unlock:
+ device_unlock(dev);
+ if (ret == -EPROBE_DEFER)
+ dev_info(&fmd->pdev->dev, "deferring %s device registration\n",
+ dev_name(dev));
+ else if (ret < 0)
+ dev_err(&fmd->pdev->dev, "%s device registration failed (%d)\n",
+ dev_name(dev), ret);
+ return ret;
+}
+
+static int fimc_md_pdev_match(struct device *dev, void *data)
+{
+ struct platform_device *pdev = to_platform_device(dev);
+ int plat_entity = -1;
+ int ret;
+ char *p;
+
+ if (!get_device(dev))
+ return -ENODEV;
+
+ if (!strcmp(pdev->name, CSIS_DRIVER_NAME)) {
+ plat_entity = IDX_CSIS;
+ } else if (!strcmp(pdev->name, FIMC_LITE_DRV_NAME)) {
+ plat_entity = IDX_FLITE;
+ } else {
+ p = strstr(pdev->name, "fimc");
+ if (p && *(p + 4) == 0)
+ plat_entity = IDX_FIMC;
}
- return driver_for_each_device(driver, NULL, fmd,
- csis_register_callback);
+ if (plat_entity >= 0)
+ ret = fimc_md_register_platform_entity(data, pdev,
+ plat_entity);
+ put_device(dev);
+ return 0;
}
static void fimc_md_unregister_entities(struct fimc_md *fmd)
@@ -487,7 +472,7 @@ static void fimc_md_unregister_entities(struct fimc_md *fmd)
if (fmd->fimc_lite[i] == NULL)
continue;
v4l2_device_unregister_subdev(&fmd->fimc_lite[i]->subdev);
- fmd->fimc[i]->pipeline_ops = NULL;
+ fmd->fimc_lite[i]->pipeline_ops = NULL;
fmd->fimc_lite[i] = NULL;
}
for (i = 0; i < CSIS_MAX_ENTITIES; i++) {
@@ -503,6 +488,7 @@ static void fimc_md_unregister_entities(struct fimc_md *fmd)
fimc_md_unregister_sensor(fmd->sensor[i].subdev);
fmd->sensor[i].subdev = NULL;
}
+ v4l2_info(&fmd->v4l2_dev, "Unregistered all entities\n");
}
/**
@@ -518,7 +504,7 @@ static int __fimc_md_create_fimc_sink_links(struct fimc_md *fmd,
struct v4l2_subdev *sensor,
int pad, int link_mask)
{
- struct fimc_sensor_info *s_info;
+ struct fimc_sensor_info *s_info = NULL;
struct media_entity *sink;
unsigned int flags = 0;
int ret, i;
@@ -582,7 +568,7 @@ static int __fimc_md_create_fimc_sink_links(struct fimc_md *fmd,
if (ret)
break;
- v4l2_info(&fmd->v4l2_dev, "created link [%s] %c> [%s]",
+ v4l2_info(&fmd->v4l2_dev, "created link [%s] %c> [%s]\n",
source->name, flags ? '=' : '-', sink->name);
}
return 0;
@@ -602,7 +588,7 @@ static int __fimc_md_create_flite_source_links(struct fimc_md *fmd)
source = &fimc->subdev.entity;
sink = &fimc->vfd.entity;
/* FIMC-LITE's subdev and video node */
- ret = media_entity_create_link(source, FIMC_SD_PAD_SOURCE,
+ ret = media_entity_create_link(source, FLITE_SD_PAD_SOURCE_DMA,
sink, 0, flags);
if (ret)
break;
@@ -626,9 +612,9 @@ static int __fimc_md_create_flite_source_links(struct fimc_md *fmd)
*/
static int fimc_md_create_links(struct fimc_md *fmd)
{
- struct v4l2_subdev *csi_sensors[2] = { NULL };
+ struct v4l2_subdev *csi_sensors[CSIS_MAX_ENTITIES] = { NULL };
struct v4l2_subdev *sensor, *csis;
- struct s5p_fimc_isp_info *pdata;
+ struct fimc_source_info *pdata;
struct fimc_sensor_info *s_info;
struct media_entity *source, *sink;
int i, pad, fimc_id = 0, ret = 0;
@@ -646,8 +632,8 @@ static int fimc_md_create_links(struct fimc_md *fmd)
source = NULL;
pdata = &s_info->pdata;
- switch (pdata->bus_type) {
- case FIMC_MIPI_CSI2:
+ switch (pdata->sensor_bus_type) {
+ case FIMC_BUS_TYPE_MIPI_CSI2:
if (WARN(pdata->mux_id >= CSIS_MAX_ENTITIES,
"Wrong CSI channel id: %d\n", pdata->mux_id))
return -EINVAL;
@@ -658,28 +644,29 @@ static int fimc_md_create_links(struct fimc_md *fmd)
"but s5p-csis module is not loaded!\n"))
return -EINVAL;
- ret = media_entity_create_link(&sensor->entity, 0,
+ pad = sensor->entity.num_pads - 1;
+ ret = media_entity_create_link(&sensor->entity, pad,
&csis->entity, CSIS_PAD_SINK,
MEDIA_LNK_FL_IMMUTABLE |
MEDIA_LNK_FL_ENABLED);
if (ret)
return ret;
- v4l2_info(&fmd->v4l2_dev, "created link [%s] => [%s]",
+ v4l2_info(&fmd->v4l2_dev, "created link [%s] => [%s]\n",
sensor->entity.name, csis->entity.name);
source = NULL;
csi_sensors[pdata->mux_id] = sensor;
break;
- case FIMC_ITU_601...FIMC_ITU_656:
+ case FIMC_BUS_TYPE_ITU_601...FIMC_BUS_TYPE_ITU_656:
source = &sensor->entity;
pad = 0;
break;
default:
v4l2_err(&fmd->v4l2_dev, "Wrong bus_type: %x\n",
- pdata->bus_type);
+ pdata->sensor_bus_type);
return -EINVAL;
}
if (source == NULL)
@@ -690,7 +677,7 @@ static int fimc_md_create_links(struct fimc_md *fmd)
pad, link_mask);
}
- for (i = 0; i < ARRAY_SIZE(fmd->csis); i++) {
+ for (i = 0; i < CSIS_MAX_ENTITIES; i++) {
if (fmd->csis[i].sd == NULL)
continue;
source = &fmd->csis[i].sd->entity;
@@ -721,42 +708,61 @@ static int fimc_md_create_links(struct fimc_md *fmd)
/*
* The peripheral sensor clock management.
*/
+static void fimc_md_put_clocks(struct fimc_md *fmd)
+{
+ int i = FIMC_MAX_CAMCLKS;
+
+ while (--i >= 0) {
+ if (IS_ERR(fmd->camclk[i].clock))
+ continue;
+ clk_unprepare(fmd->camclk[i].clock);
+ clk_put(fmd->camclk[i].clock);
+ fmd->camclk[i].clock = ERR_PTR(-EINVAL);
+ }
+}
+
static int fimc_md_get_clocks(struct fimc_md *fmd)
{
+ struct device *dev = NULL;
char clk_name[32];
struct clk *clock;
- int i;
+ int ret, i;
+
+ for (i = 0; i < FIMC_MAX_CAMCLKS; i++)
+ fmd->camclk[i].clock = ERR_PTR(-EINVAL);
+
+ if (fmd->pdev->dev.of_node)
+ dev = &fmd->pdev->dev;
for (i = 0; i < FIMC_MAX_CAMCLKS; i++) {
snprintf(clk_name, sizeof(clk_name), "sclk_cam%u", i);
- clock = clk_get(NULL, clk_name);
- if (IS_ERR_OR_NULL(clock)) {
- v4l2_err(&fmd->v4l2_dev, "Failed to get clock: %s",
- clk_name);
- return -ENXIO;
+ clock = clk_get(dev, clk_name);
+
+ if (IS_ERR(clock)) {
+ dev_err(&fmd->pdev->dev, "Failed to get clock: %s\n",
+ clk_name);
+ ret = PTR_ERR(clock);
+ break;
+ }
+ ret = clk_prepare(clock);
+ if (ret < 0) {
+ clk_put(clock);
+ fmd->camclk[i].clock = ERR_PTR(-EINVAL);
+ break;
}
fmd->camclk[i].clock = clock;
}
- return 0;
-}
-
-static void fimc_md_put_clocks(struct fimc_md *fmd)
-{
- int i = FIMC_MAX_CAMCLKS;
+ if (ret)
+ fimc_md_put_clocks(fmd);
- while (--i >= 0) {
- if (IS_ERR_OR_NULL(fmd->camclk[i].clock))
- continue;
- clk_put(fmd->camclk[i].clock);
- fmd->camclk[i].clock = NULL;
- }
+ return ret;
}
static int __fimc_md_set_camclk(struct fimc_md *fmd,
struct fimc_sensor_info *s_info,
bool on)
{
- struct s5p_fimc_isp_info *pdata = &s_info->pdata;
+ struct fimc_source_info *pdata = &s_info->pdata;
struct fimc_camclk_info *camclk;
int ret = 0;
@@ -820,7 +826,9 @@ static int fimc_md_link_notify(struct media_pad *source,
struct fimc_dev *fimc = NULL;
struct fimc_pipeline *pipeline;
struct v4l2_subdev *sd;
+ struct mutex *lock;
int ret = 0;
+ int ref_count;
if (media_entity_type(sink->entity) != MEDIA_ENT_T_V4L2_SUBDEV)
return 0;
@@ -828,28 +836,33 @@ static int fimc_md_link_notify(struct media_pad *source,
sd = media_entity_to_v4l2_subdev(sink->entity);
switch (sd->grp_id) {
- case FLITE_GROUP_ID:
+ case GRP_ID_FLITE:
fimc_lite = v4l2_get_subdevdata(sd);
+ if (WARN_ON(fimc_lite == NULL))
+ return 0;
pipeline = &fimc_lite->pipeline;
+ lock = &fimc_lite->lock;
break;
- case FIMC_GROUP_ID:
+ case GRP_ID_FIMC:
fimc = v4l2_get_subdevdata(sd);
+ if (WARN_ON(fimc == NULL))
+ return 0;
pipeline = &fimc->pipeline;
+ lock = &fimc->lock;
break;
default:
return 0;
}
if (!(flags & MEDIA_LNK_FL_ENABLED)) {
+ int i;
+ mutex_lock(lock);
ret = __fimc_pipeline_close(pipeline);
- pipeline->subdevs[IDX_SENSOR] = NULL;
- pipeline->subdevs[IDX_CSIS] = NULL;
-
- if (fimc) {
- mutex_lock(&fimc->lock);
+ for (i = 0; i < IDX_MAX; i++)
+ pipeline->subdevs[i] = NULL;
+ if (fimc)
fimc_ctrls_delete(fimc->vid_cap.ctx);
- mutex_unlock(&fimc->lock);
- }
+ mutex_unlock(lock);
return ret;
}
/*
@@ -857,23 +870,15 @@ static int fimc_md_link_notify(struct media_pad *source,
* pipeline is already in use, i.e. its video node is opened.
* Recreate the controls destroyed during the link deactivation.
*/
- if (fimc) {
- mutex_lock(&fimc->lock);
- if (fimc->vid_cap.refcnt > 0) {
- ret = __fimc_pipeline_open(pipeline,
- source->entity, true);
- if (!ret)
- ret = fimc_capture_ctrls_create(fimc);
- }
- mutex_unlock(&fimc->lock);
- } else {
- mutex_lock(&fimc_lite->lock);
- if (fimc_lite->ref_count > 0) {
- ret = __fimc_pipeline_open(pipeline,
- source->entity, true);
- }
- mutex_unlock(&fimc_lite->lock);
- }
+ mutex_lock(lock);
+
+ ref_count = fimc ? fimc->vid_cap.refcnt : fimc_lite->ref_count;
+ if (ref_count > 0)
+ ret = __fimc_pipeline_open(pipeline, source->entity, true);
+ if (!ret && fimc)
+ ret = fimc_capture_ctrls_create(fimc);
+
+ mutex_unlock(lock);
return ret ? -EPIPE : ret;
}
@@ -965,7 +970,8 @@ static int fimc_md_probe(struct platform_device *pdev)
/* Protect the media graph while we're registering entities */
mutex_lock(&fmd->media_dev.graph_mutex);
- ret = fimc_md_register_platform_entities(fmd);
+ ret = bus_for_each_dev(&platform_bus_type, NULL, fmd,
+ fimc_md_pdev_match);
if (ret)
goto err_unlock;
diff --git a/drivers/media/platform/s5p-fimc/fimc-mdevice.h b/drivers/media/platform/s5p-fimc/fimc-mdevice.h
index 2d8d41d82620..06b0d8276fd2 100644
--- a/drivers/media/platform/s5p-fimc/fimc-mdevice.h
+++ b/drivers/media/platform/s5p-fimc/fimc-mdevice.h
@@ -22,11 +22,13 @@
#include "mipi-csis.h"
/* Group IDs of sensor, MIPI-CSIS, FIMC-LITE and the writeback subdevs. */
-#define SENSOR_GROUP_ID (1 << 8)
-#define CSIS_GROUP_ID (1 << 9)
-#define WRITEBACK_GROUP_ID (1 << 10)
-#define FIMC_GROUP_ID (1 << 11)
-#define FLITE_GROUP_ID (1 << 12)
+#define GRP_ID_SENSOR (1 << 8)
+#define GRP_ID_FIMC_IS_SENSOR (1 << 9)
+#define GRP_ID_WRITEBACK (1 << 10)
+#define GRP_ID_CSIS (1 << 11)
+#define GRP_ID_FIMC (1 << 12)
+#define GRP_ID_FLITE (1 << 13)
+#define GRP_ID_FIMC_IS (1 << 14)
#define FIMC_MAX_SENSORS 8
#define FIMC_MAX_CAMCLKS 2
@@ -51,7 +53,7 @@ struct fimc_camclk_info {
* This data structure applies to image sensor and the writeback subdevs.
*/
struct fimc_sensor_info {
- struct s5p_fimc_isp_info pdata;
+ struct fimc_source_info pdata;
struct v4l2_subdev *subdev;
struct fimc_dev *host;
};
diff --git a/drivers/media/platform/s5p-fimc/fimc-reg.c b/drivers/media/platform/s5p-fimc/fimc-reg.c
index 2c9d0c06c9e8..50b97c75b956 100644
--- a/drivers/media/platform/s5p-fimc/fimc-reg.c
+++ b/drivers/media/platform/s5p-fimc/fimc-reg.c
@@ -44,9 +44,9 @@ static u32 fimc_hw_get_in_flip(struct fimc_ctx *ctx)
u32 flip = FIMC_REG_MSCTRL_FLIP_NORMAL;
if (ctx->hflip)
- flip = FIMC_REG_MSCTRL_FLIP_X_MIRROR;
- if (ctx->vflip)
flip = FIMC_REG_MSCTRL_FLIP_Y_MIRROR;
+ if (ctx->vflip)
+ flip = FIMC_REG_MSCTRL_FLIP_X_MIRROR;
if (ctx->rotation <= 90)
return flip;
@@ -59,9 +59,9 @@ static u32 fimc_hw_get_target_flip(struct fimc_ctx *ctx)
u32 flip = FIMC_REG_CITRGFMT_FLIP_NORMAL;
if (ctx->hflip)
- flip |= FIMC_REG_CITRGFMT_FLIP_X_MIRROR;
- if (ctx->vflip)
flip |= FIMC_REG_CITRGFMT_FLIP_Y_MIRROR;
+ if (ctx->vflip)
+ flip |= FIMC_REG_CITRGFMT_FLIP_X_MIRROR;
if (ctx->rotation <= 90)
return flip;
@@ -312,7 +312,7 @@ static void fimc_hw_set_scaler(struct fimc_ctx *ctx)
void fimc_hw_set_mainscaler(struct fimc_ctx *ctx)
{
struct fimc_dev *dev = ctx->fimc_dev;
- struct fimc_variant *variant = dev->variant;
+ const struct fimc_variant *variant = dev->variant;
struct fimc_scaler *sc = &ctx->scaler;
u32 cfg;
@@ -344,30 +344,31 @@ void fimc_hw_set_mainscaler(struct fimc_ctx *ctx)
}
}
-void fimc_hw_en_capture(struct fimc_ctx *ctx)
+void fimc_hw_enable_capture(struct fimc_ctx *ctx)
{
struct fimc_dev *dev = ctx->fimc_dev;
+ u32 cfg;
- u32 cfg = readl(dev->regs + FIMC_REG_CIIMGCPT);
-
- if (ctx->out_path == FIMC_IO_DMA) {
- /* one shot mode */
- cfg |= FIMC_REG_CIIMGCPT_CPT_FREN_ENABLE |
- FIMC_REG_CIIMGCPT_IMGCPTEN;
- } else {
- /* Continuous frame capture mode (freerun). */
- cfg &= ~(FIMC_REG_CIIMGCPT_CPT_FREN_ENABLE |
- FIMC_REG_CIIMGCPT_CPT_FRMOD_CNT);
- cfg |= FIMC_REG_CIIMGCPT_IMGCPTEN;
- }
+ cfg = readl(dev->regs + FIMC_REG_CIIMGCPT);
+ cfg |= FIMC_REG_CIIMGCPT_CPT_FREN_ENABLE;
if (ctx->scaler.enabled)
cfg |= FIMC_REG_CIIMGCPT_IMGCPTEN_SC;
+ else
+ cfg &= FIMC_REG_CIIMGCPT_IMGCPTEN_SC;
cfg |= FIMC_REG_CIIMGCPT_IMGCPTEN;
writel(cfg, dev->regs + FIMC_REG_CIIMGCPT);
}
+void fimc_hw_disable_capture(struct fimc_dev *dev)
+{
+ u32 cfg = readl(dev->regs + FIMC_REG_CIIMGCPT);
+ cfg &= ~(FIMC_REG_CIIMGCPT_IMGCPTEN |
+ FIMC_REG_CIIMGCPT_IMGCPTEN_SC);
+ writel(cfg, dev->regs + FIMC_REG_CIIMGCPT);
+}
+
void fimc_hw_set_effect(struct fimc_ctx *ctx)
{
struct fimc_dev *dev = ctx->fimc_dev;
@@ -553,7 +554,7 @@ void fimc_hw_set_output_addr(struct fimc_dev *dev,
}
int fimc_hw_set_camera_polarity(struct fimc_dev *fimc,
- struct s5p_fimc_isp_info *cam)
+ struct fimc_source_info *cam)
{
u32 cfg = readl(fimc->regs + FIMC_REG_CIGCTRL);
@@ -595,14 +596,15 @@ static const struct mbus_pixfmt_desc pix_desc[] = {
};
int fimc_hw_set_camera_source(struct fimc_dev *fimc,
- struct s5p_fimc_isp_info *cam)
+ struct fimc_source_info *source)
{
struct fimc_frame *f = &fimc->vid_cap.ctx->s_frame;
- u32 cfg = 0;
- u32 bus_width;
+ u32 bus_width, cfg = 0;
int i;
- if (cam->bus_type == FIMC_ITU_601 || cam->bus_type == FIMC_ITU_656) {
+ switch (source->fimc_bus_type) {
+ case FIMC_BUS_TYPE_ITU_601:
+ case FIMC_BUS_TYPE_ITU_656:
for (i = 0; i < ARRAY_SIZE(pix_desc); i++) {
if (fimc->vid_cap.mf.code == pix_desc[i].pixelcode) {
cfg = pix_desc[i].cisrcfmt;
@@ -618,15 +620,17 @@ int fimc_hw_set_camera_source(struct fimc_dev *fimc,
return -EINVAL;
}
- if (cam->bus_type == FIMC_ITU_601) {
+ if (source->fimc_bus_type == FIMC_BUS_TYPE_ITU_601) {
if (bus_width == 8)
cfg |= FIMC_REG_CISRCFMT_ITU601_8BIT;
else if (bus_width == 16)
cfg |= FIMC_REG_CISRCFMT_ITU601_16BIT;
} /* else defaults to ITU-R BT.656 8-bit */
- } else if (cam->bus_type == FIMC_MIPI_CSI2) {
+ break;
+ case FIMC_BUS_TYPE_MIPI_CSI2:
if (fimc_fmt_is_user_defined(f->fmt->color))
cfg |= FIMC_REG_CISRCFMT_ITU601_8BIT;
+ break;
}
cfg |= (f->o_width << 16) | f->o_height;
@@ -654,7 +658,7 @@ void fimc_hw_set_camera_offset(struct fimc_dev *fimc, struct fimc_frame *f)
}
int fimc_hw_set_camera_type(struct fimc_dev *fimc,
- struct s5p_fimc_isp_info *cam)
+ struct fimc_source_info *source)
{
u32 cfg, tmp;
struct fimc_vid_cap *vid_cap = &fimc->vid_cap;
@@ -667,11 +671,11 @@ int fimc_hw_set_camera_type(struct fimc_dev *fimc,
FIMC_REG_CIGCTRL_SELCAM_MIPI | FIMC_REG_CIGCTRL_CAMIF_SELWB |
FIMC_REG_CIGCTRL_SELCAM_MIPI_A | FIMC_REG_CIGCTRL_CAM_JPEG);
- switch (cam->bus_type) {
- case FIMC_MIPI_CSI2:
+ switch (source->fimc_bus_type) {
+ case FIMC_BUS_TYPE_MIPI_CSI2:
cfg |= FIMC_REG_CIGCTRL_SELCAM_MIPI;
- if (cam->mux_id == 0)
+ if (source->mux_id == 0)
cfg |= FIMC_REG_CIGCTRL_SELCAM_MIPI_A;
/* TODO: add remaining supported formats. */
@@ -694,15 +698,16 @@ int fimc_hw_set_camera_type(struct fimc_dev *fimc,
writel(tmp, fimc->regs + FIMC_REG_CSIIMGFMT);
break;
- case FIMC_ITU_601...FIMC_ITU_656:
- if (cam->mux_id == 0) /* ITU-A, ITU-B: 0, 1 */
+ case FIMC_BUS_TYPE_ITU_601...FIMC_BUS_TYPE_ITU_656:
+ if (source->mux_id == 0) /* ITU-A, ITU-B: 0, 1 */
cfg |= FIMC_REG_CIGCTRL_SELCAM_ITU_A;
break;
- case FIMC_LCD_WB:
+ case FIMC_BUS_TYPE_LCD_WRITEBACK_A:
cfg |= FIMC_REG_CIGCTRL_CAMIF_SELWB;
break;
default:
- v4l2_err(&vid_cap->vfd, "Invalid camera bus type selected\n");
+ v4l2_err(&vid_cap->vfd, "Invalid FIMC bus type selected: %d\n",
+ source->fimc_bus_type);
return -EINVAL;
}
writel(cfg, fimc->regs + FIMC_REG_CIGCTRL);
@@ -737,13 +742,6 @@ void fimc_hw_activate_input_dma(struct fimc_dev *dev, bool on)
writel(cfg, dev->regs + FIMC_REG_MSCTRL);
}
-void fimc_hw_dis_capture(struct fimc_dev *dev)
-{
- u32 cfg = readl(dev->regs + FIMC_REG_CIIMGCPT);
- cfg &= ~(FIMC_REG_CIIMGCPT_IMGCPTEN | FIMC_REG_CIIMGCPT_IMGCPTEN_SC);
- writel(cfg, dev->regs + FIMC_REG_CIIMGCPT);
-}
-
/* Return an index to the buffer actually being written. */
s32 fimc_hw_get_frame_index(struct fimc_dev *dev)
{
@@ -776,13 +774,13 @@ s32 fimc_hw_get_prev_frame_index(struct fimc_dev *dev)
void fimc_activate_capture(struct fimc_ctx *ctx)
{
fimc_hw_enable_scaler(ctx->fimc_dev, ctx->scaler.enabled);
- fimc_hw_en_capture(ctx);
+ fimc_hw_enable_capture(ctx);
}
void fimc_deactivate_capture(struct fimc_dev *fimc)
{
fimc_hw_en_lastirq(fimc, true);
- fimc_hw_dis_capture(fimc);
+ fimc_hw_disable_capture(fimc);
fimc_hw_enable_scaler(fimc, false);
fimc_hw_en_lastirq(fimc, false);
}
diff --git a/drivers/media/platform/s5p-fimc/fimc-reg.h b/drivers/media/platform/s5p-fimc/fimc-reg.h
index b6abfc7b72ac..1a40df6d1a80 100644
--- a/drivers/media/platform/s5p-fimc/fimc-reg.h
+++ b/drivers/media/platform/s5p-fimc/fimc-reg.h
@@ -287,7 +287,7 @@ void fimc_hw_en_lastirq(struct fimc_dev *fimc, int enable);
void fimc_hw_en_irq(struct fimc_dev *fimc, int enable);
void fimc_hw_set_prescaler(struct fimc_ctx *ctx);
void fimc_hw_set_mainscaler(struct fimc_ctx *ctx);
-void fimc_hw_en_capture(struct fimc_ctx *ctx);
+void fimc_hw_enable_capture(struct fimc_ctx *ctx);
void fimc_hw_set_effect(struct fimc_ctx *ctx);
void fimc_hw_set_rgb_alpha(struct fimc_ctx *ctx);
void fimc_hw_set_in_dma(struct fimc_ctx *ctx);
@@ -297,16 +297,16 @@ void fimc_hw_set_input_addr(struct fimc_dev *fimc, struct fimc_addr *paddr);
void fimc_hw_set_output_addr(struct fimc_dev *fimc, struct fimc_addr *paddr,
int index);
int fimc_hw_set_camera_source(struct fimc_dev *fimc,
- struct s5p_fimc_isp_info *cam);
+ struct fimc_source_info *cam);
void fimc_hw_set_camera_offset(struct fimc_dev *fimc, struct fimc_frame *f);
int fimc_hw_set_camera_polarity(struct fimc_dev *fimc,
- struct s5p_fimc_isp_info *cam);
+ struct fimc_source_info *cam);
int fimc_hw_set_camera_type(struct fimc_dev *fimc,
- struct s5p_fimc_isp_info *cam);
+ struct fimc_source_info *cam);
void fimc_hw_clear_irq(struct fimc_dev *dev);
void fimc_hw_enable_scaler(struct fimc_dev *dev, bool on);
void fimc_hw_activate_input_dma(struct fimc_dev *dev, bool on);
-void fimc_hw_dis_capture(struct fimc_dev *dev);
+void fimc_hw_disable_capture(struct fimc_dev *dev);
s32 fimc_hw_get_frame_index(struct fimc_dev *dev);
s32 fimc_hw_get_prev_frame_index(struct fimc_dev *dev);
void fimc_activate_capture(struct fimc_ctx *ctx);
diff --git a/drivers/media/platform/s5p-fimc/mipi-csis.c b/drivers/media/platform/s5p-fimc/mipi-csis.c
index 7abae012f55e..981863d05aaa 100644
--- a/drivers/media/platform/s5p-fimc/mipi-csis.c
+++ b/drivers/media/platform/s5p-fimc/mipi-csis.c
@@ -187,7 +187,7 @@ struct csis_state {
const struct csis_pix_format *csis_fmt;
struct v4l2_mbus_framefmt format;
- struct spinlock slock;
+ spinlock_t slock;
struct csis_pktbuf pkt_buf;
struct s5pcsis_event events[S5PCSIS_NUM_EVENTS];
};
@@ -220,6 +220,18 @@ static const struct csis_pix_format s5pcsis_formats[] = {
.code = V4L2_MBUS_FMT_S5C_UYVY_JPEG_1X8,
.fmt_reg = S5PCSIS_CFG_FMT_USER(1),
.data_alignment = 32,
+ }, {
+ .code = V4L2_MBUS_FMT_SGRBG8_1X8,
+ .fmt_reg = S5PCSIS_CFG_FMT_RAW8,
+ .data_alignment = 24,
+ }, {
+ .code = V4L2_MBUS_FMT_SGRBG10_1X10,
+ .fmt_reg = S5PCSIS_CFG_FMT_RAW10,
+ .data_alignment = 24,
+ }, {
+ .code = V4L2_MBUS_FMT_SGRBG12_1X12,
+ .fmt_reg = S5PCSIS_CFG_FMT_RAW12,
+ .data_alignment = 24,
}
};
@@ -261,7 +273,8 @@ static void s5pcsis_reset(struct csis_state *state)
static void s5pcsis_system_enable(struct csis_state *state, int on)
{
- u32 val;
+ struct s5p_platform_mipi_csis *pdata = state->pdev->dev.platform_data;
+ u32 val, mask;
val = s5pcsis_read(state, S5PCSIS_CTRL);
if (on)
@@ -271,10 +284,11 @@ static void s5pcsis_system_enable(struct csis_state *state, int on)
s5pcsis_write(state, S5PCSIS_CTRL, val);
val = s5pcsis_read(state, S5PCSIS_DPHYCTRL);
- if (on)
- val |= S5PCSIS_DPHYCTRL_ENABLE;
- else
- val &= ~S5PCSIS_DPHYCTRL_ENABLE;
+ val &= ~S5PCSIS_DPHYCTRL_ENABLE;
+ if (on) {
+ mask = (1 << (pdata->lanes + 1)) - 1;
+ val |= (mask & S5PCSIS_DPHYCTRL_ENABLE);
+ }
s5pcsis_write(state, S5PCSIS_DPHYCTRL, val);
}
@@ -338,11 +352,11 @@ static void s5pcsis_clk_put(struct csis_state *state)
int i;
for (i = 0; i < NUM_CSIS_CLOCKS; i++) {
- if (IS_ERR_OR_NULL(state->clock[i]))
+ if (IS_ERR(state->clock[i]))
continue;
clk_unprepare(state->clock[i]);
clk_put(state->clock[i]);
- state->clock[i] = NULL;
+ state->clock[i] = ERR_PTR(-EINVAL);
}
}
@@ -351,14 +365,19 @@ static int s5pcsis_clk_get(struct csis_state *state)
struct device *dev = &state->pdev->dev;
int i, ret;
+ for (i = 0; i < NUM_CSIS_CLOCKS; i++)
+ state->clock[i] = ERR_PTR(-EINVAL);
+
for (i = 0; i < NUM_CSIS_CLOCKS; i++) {
state->clock[i] = clk_get(dev, csi_clock_name[i]);
- if (IS_ERR(state->clock[i]))
+ if (IS_ERR(state->clock[i])) {
+ ret = PTR_ERR(state->clock[i]);
goto err;
+ }
ret = clk_prepare(state->clock[i]);
if (ret < 0) {
clk_put(state->clock[i]);
- state->clock[i] = NULL;
+ state->clock[i] = ERR_PTR(-EINVAL);
goto err;
}
}
@@ -366,7 +385,31 @@ static int s5pcsis_clk_get(struct csis_state *state)
err:
s5pcsis_clk_put(state);
dev_err(dev, "failed to get clock: %s\n", csi_clock_name[i]);
- return -ENXIO;
+ return ret;
+}
+
+static void dump_regs(struct csis_state *state, const char *label)
+{
+ struct {
+ u32 offset;
+ const char * const name;
+ } registers[] = {
+ { 0x00, "CTRL" },
+ { 0x04, "DPHYCTRL" },
+ { 0x08, "CONFIG" },
+ { 0x0c, "DPHYSTS" },
+ { 0x10, "INTMSK" },
+ { 0x2c, "RESOL" },
+ { 0x38, "SDW_CONFIG" },
+ };
+ u32 i;
+
+ v4l2_info(&state->sd, "--- %s ---\n", label);
+
+ for (i = 0; i < ARRAY_SIZE(registers); i++) {
+ u32 cfg = s5pcsis_read(state, registers[i].offset);
+ v4l2_info(&state->sd, "%10s: 0x%08x\n", registers[i].name, cfg);
+ }
}
static void s5pcsis_start_stream(struct csis_state *state)
@@ -401,12 +444,12 @@ static void s5pcsis_log_counters(struct csis_state *state, bool non_errors)
spin_lock_irqsave(&state->slock, flags);
- for (i--; i >= 0; i--)
- if (state->events[i].counter >= 0)
+ for (i--; i >= 0; i--) {
+ if (state->events[i].counter > 0 || debug)
v4l2_info(&state->sd, "%s events: %d\n",
state->events[i].name,
state->events[i].counter);
-
+ }
spin_unlock_irqrestore(&state->slock, flags);
}
@@ -569,7 +612,11 @@ static int s5pcsis_log_status(struct v4l2_subdev *sd)
{
struct csis_state *state = sd_to_csis_state(sd);
+ mutex_lock(&state->lock);
s5pcsis_log_counters(state, true);
+ if (debug && (state->flags & ST_POWERED))
+ dump_regs(state, __func__);
+ mutex_unlock(&state->lock);
return 0;
}
@@ -699,26 +746,32 @@ static int s5pcsis_probe(struct platform_device *pdev)
for (i = 0; i < CSIS_NUM_SUPPLIES; i++)
state->supplies[i].supply = csis_supply_name[i];
- ret = regulator_bulk_get(&pdev->dev, CSIS_NUM_SUPPLIES,
+ ret = devm_regulator_bulk_get(&pdev->dev, CSIS_NUM_SUPPLIES,
state->supplies);
if (ret)
return ret;
ret = s5pcsis_clk_get(state);
- if (ret)
- goto e_clkput;
+ if (ret < 0)
+ return ret;
- clk_enable(state->clock[CSIS_CLK_MUX]);
if (pdata->clk_rate)
- clk_set_rate(state->clock[CSIS_CLK_MUX], pdata->clk_rate);
+ ret = clk_set_rate(state->clock[CSIS_CLK_MUX],
+ pdata->clk_rate);
else
dev_WARN(&pdev->dev, "No clock frequency specified!\n");
+ if (ret < 0)
+ goto e_clkput;
+
+ ret = clk_enable(state->clock[CSIS_CLK_MUX]);
+ if (ret < 0)
+ goto e_clkput;
ret = devm_request_irq(&pdev->dev, state->irq, s5pcsis_irq_handler,
0, dev_name(&pdev->dev), state);
if (ret) {
dev_err(&pdev->dev, "Interrupt request failed\n");
- goto e_regput;
+ goto e_clkdis;
}
v4l2_subdev_init(&state->sd, &s5pcsis_subdev_ops);
@@ -736,7 +789,7 @@ static int s5pcsis_probe(struct platform_device *pdev)
ret = media_entity_init(&state->sd.entity,
CSIS_PADS_NUM, state->pads, 0);
if (ret < 0)
- goto e_clkput;
+ goto e_clkdis;
/* This allows to retrieve the platform device id by the host driver */
v4l2_set_subdevdata(&state->sd, pdev);
@@ -749,10 +802,9 @@ static int s5pcsis_probe(struct platform_device *pdev)
pm_runtime_enable(&pdev->dev);
return 0;
-e_regput:
- regulator_bulk_free(CSIS_NUM_SUPPLIES, state->supplies);
-e_clkput:
+e_clkdis:
clk_disable(state->clock[CSIS_CLK_MUX]);
+e_clkput:
s5pcsis_clk_put(state);
return ret;
}
@@ -859,7 +911,6 @@ static int s5pcsis_remove(struct platform_device *pdev)
clk_disable(state->clock[CSIS_CLK_MUX]);
pm_runtime_set_suspended(&pdev->dev);
s5pcsis_clk_put(state);
- regulator_bulk_free(CSIS_NUM_SUPPLIES, state->supplies);
media_entity_cleanup(&state->sd.entity);
diff --git a/drivers/media/platform/s5p-g2d/g2d-hw.c b/drivers/media/platform/s5p-g2d/g2d-hw.c
index 5b86cbe408e2..e87bd93811d4 100644
--- a/drivers/media/platform/s5p-g2d/g2d-hw.c
+++ b/drivers/media/platform/s5p-g2d/g2d-hw.c
@@ -28,6 +28,7 @@ void g2d_set_src_size(struct g2d_dev *d, struct g2d_frame *f)
{
u32 n;
+ w(0, SRC_SELECT_REG);
w(f->stride & 0xFFFF, SRC_STRIDE_REG);
n = f->o_height & 0xFFF;
@@ -52,6 +53,7 @@ void g2d_set_dst_size(struct g2d_dev *d, struct g2d_frame *f)
{
u32 n;
+ w(0, DST_SELECT_REG);
w(f->stride & 0xFFFF, DST_STRIDE_REG);
n = f->o_height & 0xFFF;
@@ -82,10 +84,14 @@ void g2d_set_flip(struct g2d_dev *d, u32 r)
w(r, SRC_MSK_DIRECT_REG);
}
-u32 g2d_cmd_stretch(u32 e)
+void g2d_set_v41_stretch(struct g2d_dev *d, struct g2d_frame *src,
+ struct g2d_frame *dst)
{
- e &= 1;
- return e << 4;
+ w(DEFAULT_SCALE_MODE, SRC_SCALE_CTRL_REG);
+
+ /* inversed scaling factor: src is numerator */
+ w((src->c_width << 16) / dst->c_width, SRC_XSCALE_REG);
+ w((src->c_height << 16) / dst->c_height, SRC_YSCALE_REG);
}
void g2d_set_cmd(struct g2d_dev *d, u32 c)
@@ -96,7 +102,9 @@ void g2d_set_cmd(struct g2d_dev *d, u32 c)
void g2d_start(struct g2d_dev *d)
{
/* Clear cache */
- w(0x7, CACHECTL_REG);
+ if (d->variant->hw_rev == TYPE_G2D_3X)
+ w(0x7, CACHECTL_REG);
+
/* Enable interrupt */
w(1, INTEN_REG);
/* Start G2D engine */
diff --git a/drivers/media/platform/s5p-g2d/g2d-regs.h b/drivers/media/platform/s5p-g2d/g2d-regs.h
index 02e1cf50da4e..9bf31ad35d47 100644
--- a/drivers/media/platform/s5p-g2d/g2d-regs.h
+++ b/drivers/media/platform/s5p-g2d/g2d-regs.h
@@ -35,6 +35,9 @@
#define SRC_COLOR_MODE_REG 0x030C /* Src Image Color Mode reg */
#define SRC_LEFT_TOP_REG 0x0310 /* Src Left Top Coordinate reg */
#define SRC_RIGHT_BOTTOM_REG 0x0314 /* Src Right Bottom Coordinate reg */
+#define SRC_SCALE_CTRL_REG 0x0328 /* Src Scaling type select */
+#define SRC_XSCALE_REG 0x032c /* Src X Scaling ratio */
+#define SRC_YSCALE_REG 0x0330 /* Src Y Scaling ratio */
/* Parameter Setting Registers (Dest) */
#define DST_SELECT_REG 0x0400 /* Dest Image Selection reg */
@@ -113,3 +116,7 @@
#define DEFAULT_WIDTH 100
#define DEFAULT_HEIGHT 100
+#define DEFAULT_SCALE_MODE (2 << 0)
+
+/* Command mode register values */
+#define CMD_V3_ENABLE_STRETCH (1 << 4)
diff --git a/drivers/media/platform/s5p-g2d/g2d.c b/drivers/media/platform/s5p-g2d/g2d.c
index 6ed259fb1046..aaaf276a5a6c 100644
--- a/drivers/media/platform/s5p-g2d/g2d.c
+++ b/drivers/media/platform/s5p-g2d/g2d.c
@@ -604,8 +604,13 @@ static void device_run(void *prv)
g2d_set_flip(dev, ctx->flip);
if (ctx->in.c_width != ctx->out.c_width ||
- ctx->in.c_height != ctx->out.c_height)
- cmd |= g2d_cmd_stretch(1);
+ ctx->in.c_height != ctx->out.c_height) {
+ if (dev->variant->hw_rev == TYPE_G2D_3X)
+ cmd |= CMD_V3_ENABLE_STRETCH;
+ else
+ g2d_set_v41_stretch(dev, &ctx->in, &ctx->out);
+ }
+
g2d_set_cmd(dev, cmd);
g2d_start(dev);
@@ -713,7 +718,7 @@ static int g2d_probe(struct platform_device *pdev)
return PTR_ERR(dev->regs);
dev->clk = clk_get(&pdev->dev, "sclk_fimg2d");
- if (IS_ERR_OR_NULL(dev->clk)) {
+ if (IS_ERR(dev->clk)) {
dev_err(&pdev->dev, "failed to get g2d clock\n");
return -ENXIO;
}
@@ -725,7 +730,7 @@ static int g2d_probe(struct platform_device *pdev)
}
dev->gate = clk_get(&pdev->dev, "fimg2d");
- if (IS_ERR_OR_NULL(dev->gate)) {
+ if (IS_ERR(dev->gate)) {
dev_err(&pdev->dev, "failed to get g2d clock gate\n");
ret = -ENXIO;
goto unprep_clk;
@@ -789,6 +794,7 @@ static int g2d_probe(struct platform_device *pdev)
}
def_frame.stride = (def_frame.width * def_frame.fmt->depth) >> 3;
+ dev->variant = g2d_get_drv_data(pdev);
return 0;
@@ -828,9 +834,30 @@ static int g2d_remove(struct platform_device *pdev)
return 0;
}
+static struct g2d_variant g2d_drvdata_v3x = {
+ .hw_rev = TYPE_G2D_3X,
+};
+
+static struct g2d_variant g2d_drvdata_v4x = {
+ .hw_rev = TYPE_G2D_4X, /* Revision 4.1 for Exynos4X12 and Exynos5 */
+};
+
+static struct platform_device_id g2d_driver_ids[] = {
+ {
+ .name = "s5p-g2d",
+ .driver_data = (unsigned long)&g2d_drvdata_v3x,
+ }, {
+ .name = "s5p-g2d-v4x",
+ .driver_data = (unsigned long)&g2d_drvdata_v4x,
+ },
+ {},
+};
+MODULE_DEVICE_TABLE(platform, g2d_driver_ids);
+
static struct platform_driver g2d_pdrv = {
.probe = g2d_probe,
.remove = g2d_remove,
+ .id_table = g2d_driver_ids,
.driver = {
.name = G2D_NAME,
.owner = THIS_MODULE,
diff --git a/drivers/media/platform/s5p-g2d/g2d.h b/drivers/media/platform/s5p-g2d/g2d.h
index 6b765b0216c5..300ca05ba404 100644
--- a/drivers/media/platform/s5p-g2d/g2d.h
+++ b/drivers/media/platform/s5p-g2d/g2d.h
@@ -10,10 +10,13 @@
* License, or (at your option) any later version
*/
+#include <linux/platform_device.h>
#include <media/v4l2-device.h>
#include <media/v4l2-ctrls.h>
#define G2D_NAME "s5p-g2d"
+#define TYPE_G2D_3X 3
+#define TYPE_G2D_4X 4
struct g2d_dev {
struct v4l2_device v4l2_dev;
@@ -27,6 +30,7 @@ struct g2d_dev {
struct clk *clk;
struct clk *gate;
struct g2d_ctx *curr;
+ struct g2d_variant *variant;
int irq;
wait_queue_head_t irq_queue;
};
@@ -53,7 +57,7 @@ struct g2d_frame {
struct g2d_ctx {
struct v4l2_fh fh;
struct g2d_dev *dev;
- struct v4l2_m2m_ctx *m2m_ctx;
+ struct v4l2_m2m_ctx *m2m_ctx;
struct g2d_frame in;
struct g2d_frame out;
struct v4l2_ctrl *ctrl_hflip;
@@ -70,6 +74,9 @@ struct g2d_fmt {
u32 hw;
};
+struct g2d_variant {
+ unsigned short hw_rev;
+};
void g2d_reset(struct g2d_dev *d);
void g2d_set_src_size(struct g2d_dev *d, struct g2d_frame *f);
@@ -80,7 +87,11 @@ void g2d_start(struct g2d_dev *d);
void g2d_clear_int(struct g2d_dev *d);
void g2d_set_rop4(struct g2d_dev *d, u32 r);
void g2d_set_flip(struct g2d_dev *d, u32 r);
-u32 g2d_cmd_stretch(u32 e);
+void g2d_set_v41_stretch(struct g2d_dev *d,
+ struct g2d_frame *src, struct g2d_frame *dst);
void g2d_set_cmd(struct g2d_dev *d, u32 c);
-
+static inline struct g2d_variant *g2d_get_drv_data(struct platform_device *pdev)
+{
+ return (struct g2d_variant *)platform_get_device_id(pdev)->driver_data;
+}
diff --git a/drivers/media/platform/s5p-jpeg/jpeg-core.h b/drivers/media/platform/s5p-jpeg/jpeg-core.h
index 022b9b9baff9..8a4013e3aee7 100644
--- a/drivers/media/platform/s5p-jpeg/jpeg-core.h
+++ b/drivers/media/platform/s5p-jpeg/jpeg-core.h
@@ -62,7 +62,7 @@
*/
struct s5p_jpeg {
struct mutex lock;
- struct spinlock slock;
+ spinlock_t slock;
struct v4l2_device v4l2_dev;
struct video_device *vfd_encoder;
diff --git a/drivers/media/platform/s5p-mfc/s5p_mfc.c b/drivers/media/platform/s5p-mfc/s5p_mfc.c
index 8b7fbc7cc04d..e84703c314ce 100644
--- a/drivers/media/platform/s5p-mfc/s5p_mfc.c
+++ b/drivers/media/platform/s5p-mfc/s5p_mfc.c
@@ -21,6 +21,7 @@
#include <linux/videodev2.h>
#include <media/v4l2-event.h>
#include <linux/workqueue.h>
+#include <linux/of.h>
#include <media/videobuf2-core.h>
#include "s5p_mfc_common.h"
#include "s5p_mfc_ctrl.h"
@@ -273,7 +274,6 @@ static void s5p_mfc_handle_frame_new(struct s5p_mfc_ctx *ctx, unsigned int err)
struct s5p_mfc_buf *dst_buf;
size_t dspl_y_addr;
unsigned int frame_type;
- unsigned int index;
dspl_y_addr = s5p_mfc_hw_call(dev->mfc_ops, get_dspl_y_adr, dev);
frame_type = s5p_mfc_hw_call(dev->mfc_ops, get_dec_frame_type, dev);
@@ -310,7 +310,6 @@ static void s5p_mfc_handle_frame_new(struct s5p_mfc_ctx *ctx, unsigned int err)
vb2_buffer_done(dst_buf->b,
err ? VB2_BUF_STATE_ERROR : VB2_BUF_STATE_DONE);
- index = dst_buf->b->v4l2_buf.index;
break;
}
}
@@ -326,8 +325,6 @@ static void s5p_mfc_handle_frame(struct s5p_mfc_ctx *ctx,
unsigned long flags;
unsigned int res_change;
- unsigned int index;
-
dst_frame_status = s5p_mfc_hw_call(dev->mfc_ops, get_dspl_status, dev)
& S5P_FIMV_DEC_STATUS_DECODING_STATUS_MASK;
res_change = (s5p_mfc_hw_call(dev->mfc_ops, get_dspl_status, dev)
@@ -387,7 +384,6 @@ static void s5p_mfc_handle_frame(struct s5p_mfc_ctx *ctx,
mfc_debug(2, "Running again the same buffer\n");
ctx->after_packed_pb = 1;
} else {
- index = src_buf->b->v4l2_buf.index;
mfc_debug(2, "MFC needs next buffer\n");
ctx->consumed_stream = 0;
list_del(&src_buf->list);
@@ -586,8 +582,7 @@ static void s5p_mfc_handle_stream_complete(struct s5p_mfc_ctx *ctx,
clear_work_bit(ctx);
- if (test_and_clear_bit(0, &dev->hw_lock) == 0)
- WARN_ON(1);
+ WARN_ON(test_and_clear_bit(0, &dev->hw_lock) == 0);
s5p_mfc_clock_off();
wake_up(&ctx->queue);
@@ -676,6 +671,12 @@ static irqreturn_t s5p_mfc_irq(int irq, void *priv)
s5p_mfc_handle_stream_complete(ctx, reason, err);
break;
+ case S5P_MFC_R2H_CMD_DPB_FLUSH_RET:
+ clear_work_bit(ctx);
+ ctx->state = MFCINST_RUNNING;
+ wake_up(&ctx->queue);
+ goto irq_cleanup_hw;
+
default:
mfc_debug(2, "Unknown int reason\n");
s5p_mfc_hw_call(dev->mfc_ops, clear_int_flags, dev);
@@ -777,14 +778,16 @@ static int s5p_mfc_open(struct file *file)
goto err_pwr_enable;
}
s5p_mfc_clock_on();
- ret = s5p_mfc_alloc_and_load_firmware(dev);
- if (ret)
- goto err_alloc_fw;
+ ret = s5p_mfc_load_firmware(dev);
+ if (ret) {
+ s5p_mfc_clock_off();
+ goto err_load_fw;
+ }
/* Init the FW */
ret = s5p_mfc_init_hw(dev);
+ s5p_mfc_clock_off();
if (ret)
goto err_init_hw;
- s5p_mfc_clock_off();
}
/* Init videobuf2 queue for CAPTURE */
q = &ctx->vq_dst;
@@ -833,21 +836,20 @@ static int s5p_mfc_open(struct file *file)
return ret;
/* Deinit when failure occured */
err_queue_init:
+ if (dev->num_inst == 1)
+ s5p_mfc_deinit_hw(dev);
err_init_hw:
- s5p_mfc_release_firmware(dev);
-err_alloc_fw:
- dev->ctx[ctx->num] = NULL;
- del_timer_sync(&dev->watchdog_timer);
- s5p_mfc_clock_off();
+err_load_fw:
err_pwr_enable:
if (dev->num_inst == 1) {
if (s5p_mfc_power_off() < 0)
mfc_err("power off failed\n");
- s5p_mfc_release_firmware(dev);
+ del_timer_sync(&dev->watchdog_timer);
}
err_ctrls_setup:
s5p_mfc_dec_ctrls_delete(ctx);
err_bad_node:
+ dev->ctx[ctx->num] = NULL;
err_no_ctx:
v4l2_fh_del(&ctx->fh);
v4l2_fh_exit(&ctx->fh);
@@ -901,11 +903,8 @@ static int s5p_mfc_release(struct file *file)
clear_bit(0, &dev->hw_lock);
dev->num_inst--;
if (dev->num_inst == 0) {
- mfc_debug(2, "Last instance - release firmware\n");
- /* reset <-> F/W release */
- s5p_mfc_reset(dev);
+ mfc_debug(2, "Last instance\n");
s5p_mfc_deinit_hw(dev);
- s5p_mfc_release_firmware(dev);
del_timer_sync(&dev->watchdog_timer);
if (s5p_mfc_power_off() < 0)
mfc_err("Power off failed\n");
@@ -1013,6 +1012,48 @@ static int match_child(struct device *dev, void *data)
return !strcmp(dev_name(dev), (char *)data);
}
+static void *mfc_get_drv_data(struct platform_device *pdev);
+
+static int s5p_mfc_alloc_memdevs(struct s5p_mfc_dev *dev)
+{
+ unsigned int mem_info[2];
+
+ dev->mem_dev_l = devm_kzalloc(&dev->plat_dev->dev,
+ sizeof(struct device), GFP_KERNEL);
+ if (!dev->mem_dev_l) {
+ mfc_err("Not enough memory\n");
+ return -ENOMEM;
+ }
+ device_initialize(dev->mem_dev_l);
+ of_property_read_u32_array(dev->plat_dev->dev.of_node,
+ "samsung,mfc-l", mem_info, 2);
+ if (dma_declare_coherent_memory(dev->mem_dev_l, mem_info[0],
+ mem_info[0], mem_info[1],
+ DMA_MEMORY_MAP | DMA_MEMORY_EXCLUSIVE) == 0) {
+ mfc_err("Failed to declare coherent memory for\n"
+ "MFC device\n");
+ return -ENOMEM;
+ }
+
+ dev->mem_dev_r = devm_kzalloc(&dev->plat_dev->dev,
+ sizeof(struct device), GFP_KERNEL);
+ if (!dev->mem_dev_r) {
+ mfc_err("Not enough memory\n");
+ return -ENOMEM;
+ }
+ device_initialize(dev->mem_dev_r);
+ of_property_read_u32_array(dev->plat_dev->dev.of_node,
+ "samsung,mfc-r", mem_info, 2);
+ if (dma_declare_coherent_memory(dev->mem_dev_r, mem_info[0],
+ mem_info[0], mem_info[1],
+ DMA_MEMORY_MAP | DMA_MEMORY_EXCLUSIVE) == 0) {
+ pr_err("Failed to declare coherent memory for\n"
+ "MFC device\n");
+ return -ENOMEM;
+ }
+ return 0;
+}
+
/* MFC probe function */
static int s5p_mfc_probe(struct platform_device *pdev)
{
@@ -1036,8 +1077,7 @@ static int s5p_mfc_probe(struct platform_device *pdev)
return -ENODEV;
}
- dev->variant = (struct s5p_mfc_variant *)
- platform_get_device_id(pdev)->driver_data;
+ dev->variant = mfc_get_drv_data(pdev);
ret = s5p_mfc_init_pm(dev);
if (ret < 0) {
@@ -1065,35 +1105,43 @@ static int s5p_mfc_probe(struct platform_device *pdev)
goto err_res;
}
- dev->mem_dev_l = device_find_child(&dev->plat_dev->dev, "s5p-mfc-l",
- match_child);
- if (!dev->mem_dev_l) {
- mfc_err("Mem child (L) device get failed\n");
- ret = -ENODEV;
- goto err_res;
- }
-
- dev->mem_dev_r = device_find_child(&dev->plat_dev->dev, "s5p-mfc-r",
- match_child);
- if (!dev->mem_dev_r) {
- mfc_err("Mem child (R) device get failed\n");
- ret = -ENODEV;
- goto err_res;
+ if (pdev->dev.of_node) {
+ if (s5p_mfc_alloc_memdevs(dev) < 0)
+ goto err_res;
+ } else {
+ dev->mem_dev_l = device_find_child(&dev->plat_dev->dev,
+ "s5p-mfc-l", match_child);
+ if (!dev->mem_dev_l) {
+ mfc_err("Mem child (L) device get failed\n");
+ ret = -ENODEV;
+ goto err_res;
+ }
+ dev->mem_dev_r = device_find_child(&dev->plat_dev->dev,
+ "s5p-mfc-r", match_child);
+ if (!dev->mem_dev_r) {
+ mfc_err("Mem child (R) device get failed\n");
+ ret = -ENODEV;
+ goto err_res;
+ }
}
dev->alloc_ctx[0] = vb2_dma_contig_init_ctx(dev->mem_dev_l);
- if (IS_ERR_OR_NULL(dev->alloc_ctx[0])) {
+ if (IS_ERR(dev->alloc_ctx[0])) {
ret = PTR_ERR(dev->alloc_ctx[0]);
goto err_res;
}
dev->alloc_ctx[1] = vb2_dma_contig_init_ctx(dev->mem_dev_r);
- if (IS_ERR_OR_NULL(dev->alloc_ctx[1])) {
+ if (IS_ERR(dev->alloc_ctx[1])) {
ret = PTR_ERR(dev->alloc_ctx[1]);
goto err_mem_init_ctx_1;
}
mutex_init(&dev->mfc_mutex);
+ ret = s5p_mfc_alloc_firmware(dev);
+ if (ret)
+ goto err_alloc_fw;
+
ret = v4l2_device_register(&pdev->dev, &dev->v4l2_dev);
if (ret)
goto err_v4l2_dev_reg;
@@ -1175,6 +1223,8 @@ err_dec_reg:
err_dec_alloc:
v4l2_device_unregister(&dev->v4l2_dev);
err_v4l2_dev_reg:
+ s5p_mfc_release_firmware(dev);
+err_alloc_fw:
vb2_dma_contig_cleanup_ctx(dev->alloc_ctx[1]);
err_mem_init_ctx_1:
vb2_dma_contig_cleanup_ctx(dev->alloc_ctx[0]);
@@ -1200,8 +1250,13 @@ static int s5p_mfc_remove(struct platform_device *pdev)
video_unregister_device(dev->vfd_enc);
video_unregister_device(dev->vfd_dec);
v4l2_device_unregister(&dev->v4l2_dev);
+ s5p_mfc_release_firmware(dev);
vb2_dma_contig_cleanup_ctx(dev->alloc_ctx[0]);
vb2_dma_contig_cleanup_ctx(dev->alloc_ctx[1]);
+ if (pdev->dev.of_node) {
+ put_device(dev->mem_dev_l);
+ put_device(dev->mem_dev_r);
+ }
s5p_mfc_final_pm(dev);
return 0;
@@ -1350,6 +1405,35 @@ static struct platform_device_id mfc_driver_ids[] = {
};
MODULE_DEVICE_TABLE(platform, mfc_driver_ids);
+static const struct of_device_id exynos_mfc_match[] = {
+ {
+ .compatible = "samsung,mfc-v5",
+ .data = &mfc_drvdata_v5,
+ }, {
+ .compatible = "samsung,mfc-v6",
+ .data = &mfc_drvdata_v6,
+ },
+ {},
+};
+MODULE_DEVICE_TABLE(of, exynos_mfc_match);
+
+static void *mfc_get_drv_data(struct platform_device *pdev)
+{
+ struct s5p_mfc_variant *driver_data = NULL;
+
+ if (pdev->dev.of_node) {
+ const struct of_device_id *match;
+ match = of_match_node(of_match_ptr(exynos_mfc_match),
+ pdev->dev.of_node);
+ if (match)
+ driver_data = (struct s5p_mfc_variant *)match->data;
+ } else {
+ driver_data = (struct s5p_mfc_variant *)
+ platform_get_device_id(pdev)->driver_data;
+ }
+ return driver_data;
+}
+
static struct platform_driver s5p_mfc_driver = {
.probe = s5p_mfc_probe,
.remove = s5p_mfc_remove,
@@ -1357,7 +1441,8 @@ static struct platform_driver s5p_mfc_driver = {
.driver = {
.name = S5P_MFC_NAME,
.owner = THIS_MODULE,
- .pm = &s5p_mfc_pm_ops
+ .pm = &s5p_mfc_pm_ops,
+ .of_match_table = exynos_mfc_match,
},
};
diff --git a/drivers/media/platform/s5p-mfc/s5p_mfc_common.h b/drivers/media/platform/s5p-mfc/s5p_mfc_common.h
index f02e0497ca98..202d1d7a37a8 100644
--- a/drivers/media/platform/s5p-mfc/s5p_mfc_common.h
+++ b/drivers/media/platform/s5p-mfc/s5p_mfc_common.h
@@ -145,6 +145,7 @@ enum s5p_mfc_inst_state {
MFCINST_RETURN_INST,
MFCINST_ERROR,
MFCINST_ABORT,
+ MFCINST_FLUSH,
MFCINST_RES_CHANGE_INIT,
MFCINST_RES_CHANGE_FLUSH,
MFCINST_RES_CHANGE_END,
@@ -277,8 +278,9 @@ struct s5p_mfc_priv_buf {
* @int_err: error number for last interrupt
* @queue: waitqueue for waiting for completion of device commands
* @fw_size: size of firmware
- * @bank1: address of the beggining of bank 1 memory
- * @bank2: address of the beggining of bank 2 memory
+ * @fw_virt_addr: virtual firmware address
+ * @bank1: address of the beginning of bank 1 memory
+ * @bank2: address of the beginning of bank 2 memory
* @hw_lock: used for hardware locking
* @ctx: array of driver contexts
* @curr_ctx: number of the currently running context
@@ -317,8 +319,9 @@ struct s5p_mfc_dev {
unsigned int int_err;
wait_queue_head_t queue;
size_t fw_size;
- size_t bank1;
- size_t bank2;
+ void *fw_virt_addr;
+ dma_addr_t bank1;
+ dma_addr_t bank2;
unsigned long hw_lock;
struct s5p_mfc_ctx *ctx[MFC_NUM_CONTEXTS];
int curr_ctx;
@@ -493,15 +496,9 @@ struct s5p_mfc_codec_ops {
* flushed
* @head_processed: flag mentioning whether the header data is processed
* completely or not
- * @bank1_buf: handle to memory allocated for temporary buffers from
+ * @bank1: handle to memory allocated for temporary buffers from
* memory bank 1
- * @bank1_phys: address of the temporary buffers from memory bank 1
- * @bank1_size: size of the memory allocated for temporary buffers from
- * memory bank 1
- * @bank2_buf: handle to memory allocated for temporary buffers from
- * memory bank 2
- * @bank2_phys: address of the temporary buffers from memory bank 2
- * @bank2_size: size of the memory allocated for temporary buffers from
+ * @bank2: handle to memory allocated for temporary buffers from
* memory bank 2
* @capture_state: state of the capture buffers queue
* @output_state: state of the output buffers queue
@@ -581,14 +578,8 @@ struct s5p_mfc_ctx {
unsigned int dpb_flush_flag;
unsigned int head_processed;
- /* Buffers */
- void *bank1_buf;
- size_t bank1_phys;
- size_t bank1_size;
-
- void *bank2_buf;
- size_t bank2_phys;
- size_t bank2_size;
+ struct s5p_mfc_priv_buf bank1;
+ struct s5p_mfc_priv_buf bank2;
enum s5p_mfc_queue_state capture_state;
enum s5p_mfc_queue_state output_state;
diff --git a/drivers/media/platform/s5p-mfc/s5p_mfc_ctrl.c b/drivers/media/platform/s5p-mfc/s5p_mfc_ctrl.c
index 585b7b0ed8ec..2e5f30b40dea 100644
--- a/drivers/media/platform/s5p-mfc/s5p_mfc_ctrl.c
+++ b/drivers/media/platform/s5p-mfc/s5p_mfc_ctrl.c
@@ -22,16 +22,64 @@
#include "s5p_mfc_opr.h"
#include "s5p_mfc_pm.h"
-static void *s5p_mfc_bitproc_buf;
-static size_t s5p_mfc_bitproc_phys;
-static unsigned char *s5p_mfc_bitproc_virt;
+/* Allocate memory for firmware */
+int s5p_mfc_alloc_firmware(struct s5p_mfc_dev *dev)
+{
+ void *bank2_virt;
+ dma_addr_t bank2_dma_addr;
+
+ dev->fw_size = dev->variant->buf_size->fw;
+
+ if (dev->fw_virt_addr) {
+ mfc_err("Attempting to allocate firmware when it seems that it is already loaded\n");
+ return -ENOMEM;
+ }
+
+ dev->fw_virt_addr = dma_alloc_coherent(dev->mem_dev_l, dev->fw_size,
+ &dev->bank1, GFP_KERNEL);
+
+ if (IS_ERR(dev->fw_virt_addr)) {
+ dev->fw_virt_addr = NULL;
+ mfc_err("Allocating bitprocessor buffer failed\n");
+ return -ENOMEM;
+ }
+
+ dev->bank1 = dev->bank1;
+
+ if (HAS_PORTNUM(dev) && IS_TWOPORT(dev)) {
+ bank2_virt = dma_alloc_coherent(dev->mem_dev_r, 1 << MFC_BASE_ALIGN_ORDER,
+ &bank2_dma_addr, GFP_KERNEL);
+
+ if (IS_ERR(dev->fw_virt_addr)) {
+ mfc_err("Allocating bank2 base failed\n");
+ dma_free_coherent(dev->mem_dev_l, dev->fw_size,
+ dev->fw_virt_addr, dev->bank1);
+ dev->fw_virt_addr = NULL;
+ return -ENOMEM;
+ }
+
+ /* Valid buffers passed to MFC encoder with LAST_FRAME command
+ * should not have address of bank2 - MFC will treat it as a null frame.
+ * To avoid such situation we set bank2 address below the pool address.
+ */
+ dev->bank2 = bank2_dma_addr - (1 << MFC_BASE_ALIGN_ORDER);
+
+ dma_free_coherent(dev->mem_dev_r, 1 << MFC_BASE_ALIGN_ORDER,
+ bank2_virt, bank2_dma_addr);
+
+ } else {
+ /* In this case bank2 can point to the same address as bank1.
+ * Firmware will always occupy the beggining of this area so it is
+ * impossible having a video frame buffer with zero address. */
+ dev->bank2 = dev->bank1;
+ }
+ return 0;
+}
-/* Allocate and load firmware */
-int s5p_mfc_alloc_and_load_firmware(struct s5p_mfc_dev *dev)
+/* Load firmware */
+int s5p_mfc_load_firmware(struct s5p_mfc_dev *dev)
{
struct firmware *fw_blob;
- size_t bank2_base_phys;
- void *b_base;
int err;
/* Firmare has to be present as a separate file or compiled
@@ -44,77 +92,17 @@ int s5p_mfc_alloc_and_load_firmware(struct s5p_mfc_dev *dev)
mfc_err("Firmware is not present in the /lib/firmware directory nor compiled in kernel\n");
return -EINVAL;
}
- dev->fw_size = dev->variant->buf_size->fw;
if (fw_blob->size > dev->fw_size) {
mfc_err("MFC firmware is too big to be loaded\n");
release_firmware(fw_blob);
return -ENOMEM;
}
- if (s5p_mfc_bitproc_buf) {
- mfc_err("Attempting to allocate firmware when it seems that it is already loaded\n");
- release_firmware(fw_blob);
- return -ENOMEM;
- }
- s5p_mfc_bitproc_buf = vb2_dma_contig_memops.alloc(
- dev->alloc_ctx[MFC_BANK1_ALLOC_CTX], dev->fw_size);
- if (IS_ERR(s5p_mfc_bitproc_buf)) {
- s5p_mfc_bitproc_buf = NULL;
- mfc_err("Allocating bitprocessor buffer failed\n");
+ if (!dev->fw_virt_addr) {
+ mfc_err("MFC firmware is not allocated\n");
release_firmware(fw_blob);
- return -ENOMEM;
- }
- s5p_mfc_bitproc_phys = s5p_mfc_mem_cookie(
- dev->alloc_ctx[MFC_BANK1_ALLOC_CTX], s5p_mfc_bitproc_buf);
- if (s5p_mfc_bitproc_phys & ((1 << MFC_BASE_ALIGN_ORDER) - 1)) {
- mfc_err("The base memory for bank 1 is not aligned to 128KB\n");
- vb2_dma_contig_memops.put(s5p_mfc_bitproc_buf);
- s5p_mfc_bitproc_phys = 0;
- s5p_mfc_bitproc_buf = NULL;
- release_firmware(fw_blob);
- return -EIO;
- }
- s5p_mfc_bitproc_virt = vb2_dma_contig_memops.vaddr(s5p_mfc_bitproc_buf);
- if (!s5p_mfc_bitproc_virt) {
- mfc_err("Bitprocessor memory remap failed\n");
- vb2_dma_contig_memops.put(s5p_mfc_bitproc_buf);
- s5p_mfc_bitproc_phys = 0;
- s5p_mfc_bitproc_buf = NULL;
- release_firmware(fw_blob);
- return -EIO;
- }
- dev->bank1 = s5p_mfc_bitproc_phys;
- if (HAS_PORTNUM(dev) && IS_TWOPORT(dev)) {
- b_base = vb2_dma_contig_memops.alloc(
- dev->alloc_ctx[MFC_BANK2_ALLOC_CTX],
- 1 << MFC_BASE_ALIGN_ORDER);
- if (IS_ERR(b_base)) {
- vb2_dma_contig_memops.put(s5p_mfc_bitproc_buf);
- s5p_mfc_bitproc_phys = 0;
- s5p_mfc_bitproc_buf = NULL;
- mfc_err("Allocating bank2 base failed\n");
- release_firmware(fw_blob);
- return -ENOMEM;
- }
- bank2_base_phys = s5p_mfc_mem_cookie(
- dev->alloc_ctx[MFC_BANK2_ALLOC_CTX], b_base);
- vb2_dma_contig_memops.put(b_base);
- if (bank2_base_phys & ((1 << MFC_BASE_ALIGN_ORDER) - 1)) {
- mfc_err("The base memory for bank 2 is not aligned to 128KB\n");
- vb2_dma_contig_memops.put(s5p_mfc_bitproc_buf);
- s5p_mfc_bitproc_phys = 0;
- s5p_mfc_bitproc_buf = NULL;
- release_firmware(fw_blob);
- return -EIO;
- }
- /* Valid buffers passed to MFC encoder with LAST_FRAME command
- * should not have address of bank2 - MFC will treat it as a null frame.
- * To avoid such situation we set bank2 address below the pool address.
- */
- dev->bank2 = bank2_base_phys - (1 << MFC_BASE_ALIGN_ORDER);
- } else {
- dev->bank2 = dev->bank1;
+ return -EINVAL;
}
- memcpy(s5p_mfc_bitproc_virt, fw_blob->data, fw_blob->size);
+ memcpy(dev->fw_virt_addr, fw_blob->data, fw_blob->size);
wmb();
release_firmware(fw_blob);
mfc_debug_leave();
@@ -142,12 +130,12 @@ int s5p_mfc_reload_firmware(struct s5p_mfc_dev *dev)
release_firmware(fw_blob);
return -ENOMEM;
}
- if (s5p_mfc_bitproc_buf == NULL || s5p_mfc_bitproc_phys == 0) {
- mfc_err("MFC firmware is not allocated or was not mapped correctly\n");
+ if (!dev->fw_virt_addr) {
+ mfc_err("MFC firmware is not allocated\n");
release_firmware(fw_blob);
return -EINVAL;
}
- memcpy(s5p_mfc_bitproc_virt, fw_blob->data, fw_blob->size);
+ memcpy(dev->fw_virt_addr, fw_blob->data, fw_blob->size);
wmb();
release_firmware(fw_blob);
mfc_debug_leave();
@@ -159,12 +147,11 @@ int s5p_mfc_release_firmware(struct s5p_mfc_dev *dev)
{
/* Before calling this function one has to make sure
* that MFC is no longer processing */
- if (!s5p_mfc_bitproc_buf)
+ if (!dev->fw_virt_addr)
return -EINVAL;
- vb2_dma_contig_memops.put(s5p_mfc_bitproc_buf);
- s5p_mfc_bitproc_virt = NULL;
- s5p_mfc_bitproc_phys = 0;
- s5p_mfc_bitproc_buf = NULL;
+ dma_free_coherent(dev->mem_dev_l, dev->fw_size, dev->fw_virt_addr,
+ dev->bank1);
+ dev->fw_virt_addr = NULL;
return 0;
}
@@ -257,8 +244,10 @@ int s5p_mfc_init_hw(struct s5p_mfc_dev *dev)
int ret;
mfc_debug_enter();
- if (!s5p_mfc_bitproc_buf)
+ if (!dev->fw_virt_addr) {
+ mfc_err("Firmware memory is not allocated.\n");
return -EINVAL;
+ }
/* 0. MFC reset */
mfc_debug(2, "MFC reset..\n");
diff --git a/drivers/media/platform/s5p-mfc/s5p_mfc_ctrl.h b/drivers/media/platform/s5p-mfc/s5p_mfc_ctrl.h
index 90aa9b9886d5..6a9b6f8606bb 100644
--- a/drivers/media/platform/s5p-mfc/s5p_mfc_ctrl.h
+++ b/drivers/media/platform/s5p-mfc/s5p_mfc_ctrl.h
@@ -16,7 +16,8 @@
#include "s5p_mfc_common.h"
int s5p_mfc_release_firmware(struct s5p_mfc_dev *dev);
-int s5p_mfc_alloc_and_load_firmware(struct s5p_mfc_dev *dev);
+int s5p_mfc_alloc_firmware(struct s5p_mfc_dev *dev);
+int s5p_mfc_load_firmware(struct s5p_mfc_dev *dev);
int s5p_mfc_reload_firmware(struct s5p_mfc_dev *dev);
int s5p_mfc_init_hw(struct s5p_mfc_dev *dev);
diff --git a/drivers/media/platform/s5p-mfc/s5p_mfc_dec.c b/drivers/media/platform/s5p-mfc/s5p_mfc_dec.c
index 6dad9a74f61c..4582473978ca 100644
--- a/drivers/media/platform/s5p-mfc/s5p_mfc_dec.c
+++ b/drivers/media/platform/s5p-mfc/s5p_mfc_dec.c
@@ -991,24 +991,35 @@ static int s5p_mfc_stop_streaming(struct vb2_queue *q)
S5P_MFC_R2H_CMD_FRAME_DONE_RET, 0);
aborted = 1;
}
- spin_lock_irqsave(&dev->irqlock, flags);
if (q->type == V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE) {
+ spin_lock_irqsave(&dev->irqlock, flags);
s5p_mfc_hw_call(dev->mfc_ops, cleanup_queue, &ctx->dst_queue,
&ctx->vq_dst);
INIT_LIST_HEAD(&ctx->dst_queue);
ctx->dst_queue_cnt = 0;
ctx->dpb_flush_flag = 1;
ctx->dec_dst_flag = 0;
+ spin_unlock_irqrestore(&dev->irqlock, flags);
+ if (IS_MFCV6(dev) && (ctx->state == MFCINST_RUNNING)) {
+ ctx->state = MFCINST_FLUSH;
+ set_work_bit_irqsave(ctx);
+ s5p_mfc_clean_ctx_int_flags(ctx);
+ s5p_mfc_hw_call(dev->mfc_ops, try_run, dev);
+ if (s5p_mfc_wait_for_done_ctx(ctx,
+ S5P_MFC_R2H_CMD_DPB_FLUSH_RET, 0))
+ mfc_err("Err flushing buffers\n");
+ }
}
if (q->type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE) {
+ spin_lock_irqsave(&dev->irqlock, flags);
s5p_mfc_hw_call(dev->mfc_ops, cleanup_queue, &ctx->src_queue,
&ctx->vq_src);
INIT_LIST_HEAD(&ctx->src_queue);
ctx->src_queue_cnt = 0;
+ spin_unlock_irqrestore(&dev->irqlock, flags);
}
if (aborted)
ctx->state = MFCINST_RUNNING;
- spin_unlock_irqrestore(&dev->irqlock, flags);
return 0;
}
diff --git a/drivers/media/platform/s5p-mfc/s5p_mfc_enc.c b/drivers/media/platform/s5p-mfc/s5p_mfc_enc.c
index f92f6ddd739f..2356fd52a169 100644
--- a/drivers/media/platform/s5p-mfc/s5p_mfc_enc.c
+++ b/drivers/media/platform/s5p-mfc/s5p_mfc_enc.c
@@ -1534,6 +1534,8 @@ int vidioc_encoder_cmd(struct file *file, void *priv,
if (list_empty(&ctx->src_queue)) {
mfc_debug(2, "EOS: empty src queue, entering finishing state");
ctx->state = MFCINST_FINISHING;
+ if (s5p_mfc_ctx_ready(ctx))
+ set_work_bit_irqsave(ctx);
spin_unlock_irqrestore(&dev->irqlock, flags);
s5p_mfc_hw_call(dev->mfc_ops, try_run, dev);
} else {
diff --git a/drivers/media/platform/s5p-mfc/s5p_mfc_opr.c b/drivers/media/platform/s5p-mfc/s5p_mfc_opr.c
index 6932e90d4065..10f8ac37cecd 100644
--- a/drivers/media/platform/s5p-mfc/s5p_mfc_opr.c
+++ b/drivers/media/platform/s5p-mfc/s5p_mfc_opr.c
@@ -12,6 +12,7 @@
* published by the Free Software Foundation.
*/
+#include "s5p_mfc_debug.h"
#include "s5p_mfc_opr.h"
#include "s5p_mfc_opr_v5.h"
#include "s5p_mfc_opr_v6.h"
@@ -29,3 +30,32 @@ void s5p_mfc_init_hw_ops(struct s5p_mfc_dev *dev)
}
dev->mfc_ops = s5p_mfc_ops;
}
+
+int s5p_mfc_alloc_priv_buf(struct device *dev,
+ struct s5p_mfc_priv_buf *b)
+{
+
+ mfc_debug(3, "Allocating priv: %d\n", b->size);
+
+ b->virt = dma_alloc_coherent(dev, b->size, &b->dma, GFP_KERNEL);
+
+ if (!b->virt) {
+ mfc_err("Allocating private buffer failed\n");
+ return -ENOMEM;
+ }
+
+ mfc_debug(3, "Allocated addr %p %08x\n", b->virt, b->dma);
+ return 0;
+}
+
+void s5p_mfc_release_priv_buf(struct device *dev,
+ struct s5p_mfc_priv_buf *b)
+{
+ if (b->virt) {
+ dma_free_coherent(dev, b->size, b->virt, b->dma);
+ b->virt = NULL;
+ b->dma = 0;
+ b->size = 0;
+ }
+}
+
diff --git a/drivers/media/platform/s5p-mfc/s5p_mfc_opr.h b/drivers/media/platform/s5p-mfc/s5p_mfc_opr.h
index 420abecafec0..754c540e7a7e 100644
--- a/drivers/media/platform/s5p-mfc/s5p_mfc_opr.h
+++ b/drivers/media/platform/s5p-mfc/s5p_mfc_opr.h
@@ -80,5 +80,10 @@ struct s5p_mfc_hw_ops {
};
void s5p_mfc_init_hw_ops(struct s5p_mfc_dev *dev);
+int s5p_mfc_alloc_priv_buf(struct device *dev,
+ struct s5p_mfc_priv_buf *b);
+void s5p_mfc_release_priv_buf(struct device *dev,
+ struct s5p_mfc_priv_buf *b);
+
#endif /* S5P_MFC_OPR_H_ */
diff --git a/drivers/media/platform/s5p-mfc/s5p_mfc_opr_v5.c b/drivers/media/platform/s5p-mfc/s5p_mfc_opr_v5.c
index bf7d010a4107..f61dba837899 100644
--- a/drivers/media/platform/s5p-mfc/s5p_mfc_opr_v5.c
+++ b/drivers/media/platform/s5p-mfc/s5p_mfc_opr_v5.c
@@ -38,39 +38,26 @@ int s5p_mfc_alloc_dec_temp_buffers_v5(struct s5p_mfc_ctx *ctx)
{
struct s5p_mfc_dev *dev = ctx->dev;
struct s5p_mfc_buf_size_v5 *buf_size = dev->variant->buf_size->priv;
+ int ret;
- ctx->dsc.alloc = vb2_dma_contig_memops.alloc(
- dev->alloc_ctx[MFC_BANK1_ALLOC_CTX],
- buf_size->dsc);
- if (IS_ERR_VALUE((int)ctx->dsc.alloc)) {
- ctx->dsc.alloc = NULL;
- mfc_err("Allocating DESC buffer failed\n");
- return -ENOMEM;
+ ctx->dsc.size = buf_size->dsc;
+ ret = s5p_mfc_alloc_priv_buf(dev->mem_dev_l, &ctx->dsc);
+ if (ret) {
+ mfc_err("Failed to allocate temporary buffer\n");
+ return ret;
}
- ctx->dsc.dma = s5p_mfc_mem_cookie(
- dev->alloc_ctx[MFC_BANK1_ALLOC_CTX], ctx->dsc.alloc);
+
BUG_ON(ctx->dsc.dma & ((1 << MFC_BANK1_ALIGN_ORDER) - 1));
- ctx->dsc.virt = vb2_dma_contig_memops.vaddr(ctx->dsc.alloc);
- if (ctx->dsc.virt == NULL) {
- vb2_dma_contig_memops.put(ctx->dsc.alloc);
- ctx->dsc.dma = 0;
- ctx->dsc.alloc = NULL;
- mfc_err("Remapping DESC buffer failed\n");
- return -ENOMEM;
- }
- memset(ctx->dsc.virt, 0, buf_size->dsc);
+ memset(ctx->dsc.virt, 0, ctx->dsc.size);
wmb();
return 0;
}
+
/* Release temporary buffers for decoding */
void s5p_mfc_release_dec_desc_buffer_v5(struct s5p_mfc_ctx *ctx)
{
- if (ctx->dsc.dma) {
- vb2_dma_contig_memops.put(ctx->dsc.alloc);
- ctx->dsc.alloc = NULL;
- ctx->dsc.dma = 0;
- }
+ s5p_mfc_release_priv_buf(ctx->dev->mem_dev_l, &ctx->dsc);
}
/* Allocate codec buffers */
@@ -80,6 +67,7 @@ int s5p_mfc_alloc_codec_buffers_v5(struct s5p_mfc_ctx *ctx)
unsigned int enc_ref_y_size = 0;
unsigned int enc_ref_c_size = 0;
unsigned int guard_width, guard_height;
+ int ret;
if (ctx->type == MFCINST_DECODER) {
mfc_debug(2, "Luma size:%d Chroma size:%d MV size:%d\n",
@@ -113,100 +101,93 @@ int s5p_mfc_alloc_codec_buffers_v5(struct s5p_mfc_ctx *ctx)
/* Codecs have different memory requirements */
switch (ctx->codec_mode) {
case S5P_MFC_CODEC_H264_DEC:
- ctx->bank1_size =
+ ctx->bank1.size =
ALIGN(S5P_FIMV_DEC_NB_IP_SIZE +
S5P_FIMV_DEC_VERT_NB_MV_SIZE,
S5P_FIMV_DEC_BUF_ALIGN);
- ctx->bank2_size = ctx->total_dpb_count * ctx->mv_size;
+ ctx->bank2.size = ctx->total_dpb_count * ctx->mv_size;
break;
case S5P_MFC_CODEC_MPEG4_DEC:
- ctx->bank1_size =
+ ctx->bank1.size =
ALIGN(S5P_FIMV_DEC_NB_DCAC_SIZE +
S5P_FIMV_DEC_UPNB_MV_SIZE +
S5P_FIMV_DEC_SUB_ANCHOR_MV_SIZE +
S5P_FIMV_DEC_STX_PARSER_SIZE +
S5P_FIMV_DEC_OVERLAP_TRANSFORM_SIZE,
S5P_FIMV_DEC_BUF_ALIGN);
- ctx->bank2_size = 0;
+ ctx->bank2.size = 0;
break;
case S5P_MFC_CODEC_VC1RCV_DEC:
case S5P_MFC_CODEC_VC1_DEC:
- ctx->bank1_size =
+ ctx->bank1.size =
ALIGN(S5P_FIMV_DEC_OVERLAP_TRANSFORM_SIZE +
S5P_FIMV_DEC_UPNB_MV_SIZE +
S5P_FIMV_DEC_SUB_ANCHOR_MV_SIZE +
S5P_FIMV_DEC_NB_DCAC_SIZE +
3 * S5P_FIMV_DEC_VC1_BITPLANE_SIZE,
S5P_FIMV_DEC_BUF_ALIGN);
- ctx->bank2_size = 0;
+ ctx->bank2.size = 0;
break;
case S5P_MFC_CODEC_MPEG2_DEC:
- ctx->bank1_size = 0;
- ctx->bank2_size = 0;
+ ctx->bank1.size = 0;
+ ctx->bank2.size = 0;
break;
case S5P_MFC_CODEC_H263_DEC:
- ctx->bank1_size =
+ ctx->bank1.size =
ALIGN(S5P_FIMV_DEC_OVERLAP_TRANSFORM_SIZE +
S5P_FIMV_DEC_UPNB_MV_SIZE +
S5P_FIMV_DEC_SUB_ANCHOR_MV_SIZE +
S5P_FIMV_DEC_NB_DCAC_SIZE,
S5P_FIMV_DEC_BUF_ALIGN);
- ctx->bank2_size = 0;
+ ctx->bank2.size = 0;
break;
case S5P_MFC_CODEC_H264_ENC:
- ctx->bank1_size = (enc_ref_y_size * 2) +
+ ctx->bank1.size = (enc_ref_y_size * 2) +
S5P_FIMV_ENC_UPMV_SIZE +
S5P_FIMV_ENC_COLFLG_SIZE +
S5P_FIMV_ENC_INTRAMD_SIZE +
S5P_FIMV_ENC_NBORINFO_SIZE;
- ctx->bank2_size = (enc_ref_y_size * 2) +
+ ctx->bank2.size = (enc_ref_y_size * 2) +
(enc_ref_c_size * 4) +
S5P_FIMV_ENC_INTRAPRED_SIZE;
break;
case S5P_MFC_CODEC_MPEG4_ENC:
- ctx->bank1_size = (enc_ref_y_size * 2) +
+ ctx->bank1.size = (enc_ref_y_size * 2) +
S5P_FIMV_ENC_UPMV_SIZE +
S5P_FIMV_ENC_COLFLG_SIZE +
S5P_FIMV_ENC_ACDCCOEF_SIZE;
- ctx->bank2_size = (enc_ref_y_size * 2) +
+ ctx->bank2.size = (enc_ref_y_size * 2) +
(enc_ref_c_size * 4);
break;
case S5P_MFC_CODEC_H263_ENC:
- ctx->bank1_size = (enc_ref_y_size * 2) +
+ ctx->bank1.size = (enc_ref_y_size * 2) +
S5P_FIMV_ENC_UPMV_SIZE +
S5P_FIMV_ENC_ACDCCOEF_SIZE;
- ctx->bank2_size = (enc_ref_y_size * 2) +
+ ctx->bank2.size = (enc_ref_y_size * 2) +
(enc_ref_c_size * 4);
break;
default:
break;
}
/* Allocate only if memory from bank 1 is necessary */
- if (ctx->bank1_size > 0) {
- ctx->bank1_buf = vb2_dma_contig_memops.alloc(
- dev->alloc_ctx[MFC_BANK1_ALLOC_CTX], ctx->bank1_size);
- if (IS_ERR(ctx->bank1_buf)) {
- ctx->bank1_buf = NULL;
- printk(KERN_ERR
- "Buf alloc for decoding failed (port A)\n");
- return -ENOMEM;
+ if (ctx->bank1.size > 0) {
+
+ ret = s5p_mfc_alloc_priv_buf(dev->mem_dev_l, &ctx->bank1);
+ if (ret) {
+ mfc_err("Failed to allocate Bank1 temporary buffer\n");
+ return ret;
}
- ctx->bank1_phys = s5p_mfc_mem_cookie(
- dev->alloc_ctx[MFC_BANK1_ALLOC_CTX], ctx->bank1_buf);
- BUG_ON(ctx->bank1_phys & ((1 << MFC_BANK1_ALIGN_ORDER) - 1));
+ BUG_ON(ctx->bank1.dma & ((1 << MFC_BANK1_ALIGN_ORDER) - 1));
}
/* Allocate only if memory from bank 2 is necessary */
- if (ctx->bank2_size > 0) {
- ctx->bank2_buf = vb2_dma_contig_memops.alloc(
- dev->alloc_ctx[MFC_BANK2_ALLOC_CTX], ctx->bank2_size);
- if (IS_ERR(ctx->bank2_buf)) {
- ctx->bank2_buf = NULL;
- mfc_err("Buf alloc for decoding failed (port B)\n");
- return -ENOMEM;
+ if (ctx->bank2.size > 0) {
+ ret = s5p_mfc_alloc_priv_buf(dev->mem_dev_r, &ctx->bank2);
+ if (ret) {
+ mfc_err("Failed to allocate Bank2 temporary buffer\n");
+ s5p_mfc_release_priv_buf(ctx->dev->mem_dev_l, &ctx->bank1);
+ return ret;
}
- ctx->bank2_phys = s5p_mfc_mem_cookie(
- dev->alloc_ctx[MFC_BANK2_ALLOC_CTX], ctx->bank2_buf);
- BUG_ON(ctx->bank2_phys & ((1 << MFC_BANK2_ALIGN_ORDER) - 1));
+ BUG_ON(ctx->bank2.dma & ((1 << MFC_BANK2_ALIGN_ORDER) - 1));
}
return 0;
}
@@ -214,18 +195,8 @@ int s5p_mfc_alloc_codec_buffers_v5(struct s5p_mfc_ctx *ctx)
/* Release buffers allocated for codec */
void s5p_mfc_release_codec_buffers_v5(struct s5p_mfc_ctx *ctx)
{
- if (ctx->bank1_buf) {
- vb2_dma_contig_memops.put(ctx->bank1_buf);
- ctx->bank1_buf = NULL;
- ctx->bank1_phys = 0;
- ctx->bank1_size = 0;
- }
- if (ctx->bank2_buf) {
- vb2_dma_contig_memops.put(ctx->bank2_buf);
- ctx->bank2_buf = NULL;
- ctx->bank2_phys = 0;
- ctx->bank2_size = 0;
- }
+ s5p_mfc_release_priv_buf(ctx->dev->mem_dev_l, &ctx->bank1);
+ s5p_mfc_release_priv_buf(ctx->dev->mem_dev_r, &ctx->bank2);
}
/* Allocate memory for instance data buffer */
@@ -233,58 +204,38 @@ int s5p_mfc_alloc_instance_buffer_v5(struct s5p_mfc_ctx *ctx)
{
struct s5p_mfc_dev *dev = ctx->dev;
struct s5p_mfc_buf_size_v5 *buf_size = dev->variant->buf_size->priv;
+ int ret;
if (ctx->codec_mode == S5P_MFC_CODEC_H264_DEC ||
ctx->codec_mode == S5P_MFC_CODEC_H264_ENC)
ctx->ctx.size = buf_size->h264_ctx;
else
ctx->ctx.size = buf_size->non_h264_ctx;
- ctx->ctx.alloc = vb2_dma_contig_memops.alloc(
- dev->alloc_ctx[MFC_BANK1_ALLOC_CTX], ctx->ctx.size);
- if (IS_ERR(ctx->ctx.alloc)) {
- mfc_err("Allocating context buffer failed\n");
- ctx->ctx.alloc = NULL;
- return -ENOMEM;
+
+ ret = s5p_mfc_alloc_priv_buf(dev->mem_dev_l, &ctx->ctx);
+ if (ret) {
+ mfc_err("Failed to allocate instance buffer\n");
+ return ret;
}
- ctx->ctx.dma = s5p_mfc_mem_cookie(
- dev->alloc_ctx[MFC_BANK1_ALLOC_CTX], ctx->ctx.alloc);
- BUG_ON(ctx->ctx.dma & ((1 << MFC_BANK1_ALIGN_ORDER) - 1));
ctx->ctx.ofs = OFFSETA(ctx->ctx.dma);
- ctx->ctx.virt = vb2_dma_contig_memops.vaddr(ctx->ctx.alloc);
- if (!ctx->ctx.virt) {
- mfc_err("Remapping instance buffer failed\n");
- vb2_dma_contig_memops.put(ctx->ctx.alloc);
- ctx->ctx.alloc = NULL;
- ctx->ctx.ofs = 0;
- ctx->ctx.dma = 0;
- return -ENOMEM;
- }
+
/* Zero content of the allocated memory */
memset(ctx->ctx.virt, 0, ctx->ctx.size);
wmb();
/* Initialize shared memory */
- ctx->shm.alloc = vb2_dma_contig_memops.alloc(
- dev->alloc_ctx[MFC_BANK1_ALLOC_CTX], buf_size->shm);
- if (IS_ERR(ctx->shm.alloc)) {
- mfc_err("failed to allocate shared memory\n");
- return PTR_ERR(ctx->shm.alloc);
+ ctx->shm.size = buf_size->shm;
+ ret = s5p_mfc_alloc_priv_buf(dev->mem_dev_l, &ctx->shm);
+ if (ret) {
+ mfc_err("Failed to allocate shared memory buffer\n");
+ return ret;
}
+
/* shared memory offset only keeps the offset from base (port a) */
- ctx->shm.ofs = s5p_mfc_mem_cookie(
- dev->alloc_ctx[MFC_BANK1_ALLOC_CTX], ctx->shm.alloc)
- - dev->bank1;
+ ctx->shm.ofs = ctx->shm.dma - dev->bank1;
BUG_ON(ctx->shm.ofs & ((1 << MFC_BANK1_ALIGN_ORDER) - 1));
- ctx->shm.virt = vb2_dma_contig_memops.vaddr(ctx->shm.alloc);
- if (!ctx->shm.virt) {
- vb2_dma_contig_memops.put(ctx->shm.alloc);
- ctx->shm.alloc = NULL;
- ctx->shm.ofs = 0;
- mfc_err("failed to virt addr of shared memory\n");
- return -ENOMEM;
- }
- memset((void *)ctx->shm.virt, 0, buf_size->shm);
+ memset(ctx->shm.virt, 0, buf_size->shm);
wmb();
return 0;
}
@@ -292,19 +243,8 @@ int s5p_mfc_alloc_instance_buffer_v5(struct s5p_mfc_ctx *ctx)
/* Release instance buffer */
void s5p_mfc_release_instance_buffer_v5(struct s5p_mfc_ctx *ctx)
{
- if (ctx->ctx.alloc) {
- vb2_dma_contig_memops.put(ctx->ctx.alloc);
- ctx->ctx.alloc = NULL;
- ctx->ctx.ofs = 0;
- ctx->ctx.virt = NULL;
- ctx->ctx.dma = 0;
- }
- if (ctx->shm.alloc) {
- vb2_dma_contig_memops.put(ctx->shm.alloc);
- ctx->shm.alloc = NULL;
- ctx->shm.ofs = 0;
- ctx->shm.virt = NULL;
- }
+ s5p_mfc_release_priv_buf(ctx->dev->mem_dev_l, &ctx->ctx);
+ s5p_mfc_release_priv_buf(ctx->dev->mem_dev_l, &ctx->shm);
}
int s5p_mfc_alloc_dev_context_buffer_v5(struct s5p_mfc_dev *dev)
@@ -443,10 +383,10 @@ int s5p_mfc_set_dec_frame_buffer_v5(struct s5p_mfc_ctx *ctx)
size_t buf_addr1, buf_addr2;
int buf_size1, buf_size2;
- buf_addr1 = ctx->bank1_phys;
- buf_size1 = ctx->bank1_size;
- buf_addr2 = ctx->bank2_phys;
- buf_size2 = ctx->bank2_size;
+ buf_addr1 = ctx->bank1.dma;
+ buf_size1 = ctx->bank1.size;
+ buf_addr2 = ctx->bank2.dma;
+ buf_size2 = ctx->bank2.size;
dpb = mfc_read(dev, S5P_FIMV_SI_CH0_DPB_CONF_CTRL) &
~S5P_FIMV_DPB_COUNT_MASK;
mfc_write(dev, ctx->total_dpb_count | dpb,
@@ -523,7 +463,6 @@ int s5p_mfc_set_dec_frame_buffer_v5(struct s5p_mfc_ctx *ctx)
mfc_err("Unknown codec for decoding (%x)\n",
ctx->codec_mode);
return -EINVAL;
- break;
}
frame_size = ctx->luma_size;
frame_size_ch = ctx->chroma_size;
@@ -607,10 +546,10 @@ int s5p_mfc_set_enc_ref_buffer_v5(struct s5p_mfc_ctx *ctx)
unsigned int guard_width, guard_height;
int i;
- buf_addr1 = ctx->bank1_phys;
- buf_size1 = ctx->bank1_size;
- buf_addr2 = ctx->bank2_phys;
- buf_size2 = ctx->bank2_size;
+ buf_addr1 = ctx->bank1.dma;
+ buf_size1 = ctx->bank1.size;
+ buf_addr2 = ctx->bank2.dma;
+ buf_size2 = ctx->bank2.size;
enc_ref_y_size = ALIGN(ctx->img_width, S5P_FIMV_NV12MT_HALIGN)
* ALIGN(ctx->img_height, S5P_FIMV_NV12MT_VALIGN);
enc_ref_y_size = ALIGN(enc_ref_y_size, S5P_FIMV_NV12MT_SALIGN);
diff --git a/drivers/media/platform/s5p-mfc/s5p_mfc_opr_v6.c b/drivers/media/platform/s5p-mfc/s5p_mfc_opr_v6.c
index 3a8cfd9fc1bd..beb6dbacebd9 100644
--- a/drivers/media/platform/s5p-mfc/s5p_mfc_opr_v6.c
+++ b/drivers/media/platform/s5p-mfc/s5p_mfc_opr_v6.c
@@ -73,6 +73,7 @@ int s5p_mfc_alloc_codec_buffers_v6(struct s5p_mfc_ctx *ctx)
{
struct s5p_mfc_dev *dev = ctx->dev;
unsigned int mb_width, mb_height;
+ int ret;
mb_width = MB_WIDTH(ctx->img_width);
mb_height = MB_HEIGHT(ctx->img_height);
@@ -112,7 +113,7 @@ int s5p_mfc_alloc_codec_buffers_v6(struct s5p_mfc_ctx *ctx)
mb_height);
ctx->scratch_buf_size = ALIGN(ctx->scratch_buf_size,
S5P_FIMV_SCRATCH_BUFFER_ALIGN_V6);
- ctx->bank1_size =
+ ctx->bank1.size =
ctx->scratch_buf_size +
(ctx->mv_count * ctx->mv_size);
break;
@@ -123,7 +124,7 @@ int s5p_mfc_alloc_codec_buffers_v6(struct s5p_mfc_ctx *ctx)
mb_height);
ctx->scratch_buf_size = ALIGN(ctx->scratch_buf_size,
S5P_FIMV_SCRATCH_BUFFER_ALIGN_V6);
- ctx->bank1_size = ctx->scratch_buf_size;
+ ctx->bank1.size = ctx->scratch_buf_size;
break;
case S5P_MFC_CODEC_VC1RCV_DEC:
case S5P_MFC_CODEC_VC1_DEC:
@@ -133,11 +134,11 @@ int s5p_mfc_alloc_codec_buffers_v6(struct s5p_mfc_ctx *ctx)
mb_height);
ctx->scratch_buf_size = ALIGN(ctx->scratch_buf_size,
S5P_FIMV_SCRATCH_BUFFER_ALIGN_V6);
- ctx->bank1_size = ctx->scratch_buf_size;
+ ctx->bank1.size = ctx->scratch_buf_size;
break;
case S5P_MFC_CODEC_MPEG2_DEC:
- ctx->bank1_size = 0;
- ctx->bank2_size = 0;
+ ctx->bank1.size = 0;
+ ctx->bank2.size = 0;
break;
case S5P_MFC_CODEC_H263_DEC:
ctx->scratch_buf_size =
@@ -146,7 +147,7 @@ int s5p_mfc_alloc_codec_buffers_v6(struct s5p_mfc_ctx *ctx)
mb_height);
ctx->scratch_buf_size = ALIGN(ctx->scratch_buf_size,
S5P_FIMV_SCRATCH_BUFFER_ALIGN_V6);
- ctx->bank1_size = ctx->scratch_buf_size;
+ ctx->bank1.size = ctx->scratch_buf_size;
break;
case S5P_MFC_CODEC_VP8_DEC:
ctx->scratch_buf_size =
@@ -155,7 +156,7 @@ int s5p_mfc_alloc_codec_buffers_v6(struct s5p_mfc_ctx *ctx)
mb_height);
ctx->scratch_buf_size = ALIGN(ctx->scratch_buf_size,
S5P_FIMV_SCRATCH_BUFFER_ALIGN_V6);
- ctx->bank1_size = ctx->scratch_buf_size;
+ ctx->bank1.size = ctx->scratch_buf_size;
break;
case S5P_MFC_CODEC_H264_ENC:
ctx->scratch_buf_size =
@@ -164,11 +165,11 @@ int s5p_mfc_alloc_codec_buffers_v6(struct s5p_mfc_ctx *ctx)
mb_height);
ctx->scratch_buf_size = ALIGN(ctx->scratch_buf_size,
S5P_FIMV_SCRATCH_BUFFER_ALIGN_V6);
- ctx->bank1_size =
+ ctx->bank1.size =
ctx->scratch_buf_size + ctx->tmv_buffer_size +
(ctx->dpb_count * (ctx->luma_dpb_size +
ctx->chroma_dpb_size + ctx->me_buffer_size));
- ctx->bank2_size = 0;
+ ctx->bank2.size = 0;
break;
case S5P_MFC_CODEC_MPEG4_ENC:
case S5P_MFC_CODEC_H263_ENC:
@@ -178,28 +179,24 @@ int s5p_mfc_alloc_codec_buffers_v6(struct s5p_mfc_ctx *ctx)
mb_height);
ctx->scratch_buf_size = ALIGN(ctx->scratch_buf_size,
S5P_FIMV_SCRATCH_BUFFER_ALIGN_V6);
- ctx->bank1_size =
+ ctx->bank1.size =
ctx->scratch_buf_size + ctx->tmv_buffer_size +
(ctx->dpb_count * (ctx->luma_dpb_size +
ctx->chroma_dpb_size + ctx->me_buffer_size));
- ctx->bank2_size = 0;
+ ctx->bank2.size = 0;
break;
default:
break;
}
/* Allocate only if memory from bank 1 is necessary */
- if (ctx->bank1_size > 0) {
- ctx->bank1_buf = vb2_dma_contig_memops.alloc(
- dev->alloc_ctx[MFC_BANK1_ALLOC_CTX], ctx->bank1_size);
- if (IS_ERR(ctx->bank1_buf)) {
- ctx->bank1_buf = 0;
- pr_err("Buf alloc for decoding failed (port A)\n");
- return -ENOMEM;
+ if (ctx->bank1.size > 0) {
+ ret = s5p_mfc_alloc_priv_buf(dev->mem_dev_l, &ctx->bank1);
+ if (ret) {
+ mfc_err("Failed to allocate Bank1 memory\n");
+ return ret;
}
- ctx->bank1_phys = s5p_mfc_mem_cookie(
- dev->alloc_ctx[MFC_BANK1_ALLOC_CTX], ctx->bank1_buf);
- BUG_ON(ctx->bank1_phys & ((1 << MFC_BANK1_ALIGN_ORDER) - 1));
+ BUG_ON(ctx->bank1.dma & ((1 << MFC_BANK1_ALIGN_ORDER) - 1));
}
return 0;
@@ -208,12 +205,7 @@ int s5p_mfc_alloc_codec_buffers_v6(struct s5p_mfc_ctx *ctx)
/* Release buffers allocated for codec */
void s5p_mfc_release_codec_buffers_v6(struct s5p_mfc_ctx *ctx)
{
- if (ctx->bank1_buf) {
- vb2_dma_contig_memops.put(ctx->bank1_buf);
- ctx->bank1_buf = 0;
- ctx->bank1_phys = 0;
- ctx->bank1_size = 0;
- }
+ s5p_mfc_release_priv_buf(ctx->dev->mem_dev_l, &ctx->bank1);
}
/* Allocate memory for instance data buffer */
@@ -221,6 +213,7 @@ int s5p_mfc_alloc_instance_buffer_v6(struct s5p_mfc_ctx *ctx)
{
struct s5p_mfc_dev *dev = ctx->dev;
struct s5p_mfc_buf_size_v6 *buf_size = dev->variant->buf_size->priv;
+ int ret;
mfc_debug_enter();
@@ -250,25 +243,10 @@ int s5p_mfc_alloc_instance_buffer_v6(struct s5p_mfc_ctx *ctx)
break;
}
- ctx->ctx.alloc = vb2_dma_contig_memops.alloc(
- dev->alloc_ctx[MFC_BANK1_ALLOC_CTX], ctx->ctx.size);
- if (IS_ERR(ctx->ctx.alloc)) {
- mfc_err("Allocating context buffer failed.\n");
- return PTR_ERR(ctx->ctx.alloc);
- }
-
- ctx->ctx.dma = s5p_mfc_mem_cookie(
- dev->alloc_ctx[MFC_BANK1_ALLOC_CTX], ctx->ctx.alloc);
-
- ctx->ctx.virt = vb2_dma_contig_memops.vaddr(ctx->ctx.alloc);
- if (!ctx->ctx.virt) {
- vb2_dma_contig_memops.put(ctx->ctx.alloc);
- ctx->ctx.alloc = NULL;
- ctx->ctx.dma = 0;
- ctx->ctx.virt = NULL;
-
- mfc_err("Remapping context buffer failed.\n");
- return -ENOMEM;
+ ret = s5p_mfc_alloc_priv_buf(dev->mem_dev_l, &ctx->ctx);
+ if (ret) {
+ mfc_err("Failed to allocate instance buffer\n");
+ return ret;
}
memset(ctx->ctx.virt, 0, ctx->ctx.size);
@@ -282,44 +260,22 @@ int s5p_mfc_alloc_instance_buffer_v6(struct s5p_mfc_ctx *ctx)
/* Release instance buffer */
void s5p_mfc_release_instance_buffer_v6(struct s5p_mfc_ctx *ctx)
{
- mfc_debug_enter();
-
- if (ctx->ctx.alloc) {
- vb2_dma_contig_memops.put(ctx->ctx.alloc);
- ctx->ctx.alloc = NULL;
- ctx->ctx.dma = 0;
- ctx->ctx.virt = NULL;
- }
-
- mfc_debug_leave();
+ s5p_mfc_release_priv_buf(ctx->dev->mem_dev_l, &ctx->ctx);
}
/* Allocate context buffers for SYS_INIT */
int s5p_mfc_alloc_dev_context_buffer_v6(struct s5p_mfc_dev *dev)
{
struct s5p_mfc_buf_size_v6 *buf_size = dev->variant->buf_size->priv;
+ int ret;
mfc_debug_enter();
- dev->ctx_buf.alloc = vb2_dma_contig_memops.alloc(
- dev->alloc_ctx[MFC_BANK1_ALLOC_CTX], buf_size->dev_ctx);
- if (IS_ERR(dev->ctx_buf.alloc)) {
- mfc_err("Allocating DESC buffer failed.\n");
- return PTR_ERR(dev->ctx_buf.alloc);
- }
-
- dev->ctx_buf.dma = s5p_mfc_mem_cookie(
- dev->alloc_ctx[MFC_BANK1_ALLOC_CTX],
- dev->ctx_buf.alloc);
-
- dev->ctx_buf.virt = vb2_dma_contig_memops.vaddr(dev->ctx_buf.alloc);
- if (!dev->ctx_buf.virt) {
- vb2_dma_contig_memops.put(dev->ctx_buf.alloc);
- dev->ctx_buf.alloc = NULL;
- dev->ctx_buf.dma = 0;
-
- mfc_err("Remapping DESC buffer failed.\n");
- return -ENOMEM;
+ dev->ctx_buf.size = buf_size->dev_ctx;
+ ret = s5p_mfc_alloc_priv_buf(dev->mem_dev_l, &dev->ctx_buf);
+ if (ret) {
+ mfc_err("Failed to allocate device context buffer\n");
+ return ret;
}
memset(dev->ctx_buf.virt, 0, buf_size->dev_ctx);
@@ -333,12 +289,7 @@ int s5p_mfc_alloc_dev_context_buffer_v6(struct s5p_mfc_dev *dev)
/* Release context buffers for SYS_INIT */
void s5p_mfc_release_dev_context_buffer_v6(struct s5p_mfc_dev *dev)
{
- if (dev->ctx_buf.alloc) {
- vb2_dma_contig_memops.put(dev->ctx_buf.alloc);
- dev->ctx_buf.alloc = NULL;
- dev->ctx_buf.dma = 0;
- dev->ctx_buf.virt = NULL;
- }
+ s5p_mfc_release_priv_buf(dev->mem_dev_l, &dev->ctx_buf);
}
static int calc_plane(int width, int height)
@@ -417,8 +368,8 @@ int s5p_mfc_set_dec_frame_buffer_v6(struct s5p_mfc_ctx *ctx)
int buf_size1;
int align_gap;
- buf_addr1 = ctx->bank1_phys;
- buf_size1 = ctx->bank1_size;
+ buf_addr1 = ctx->bank1.dma;
+ buf_size1 = ctx->bank1.size;
mfc_debug(2, "Buf1: %p (%d)\n", (void *)buf_addr1, buf_size1);
mfc_debug(2, "Total DPB COUNT: %d\n", ctx->total_dpb_count);
@@ -535,13 +486,13 @@ void s5p_mfc_get_enc_frame_buffer_v6(struct s5p_mfc_ctx *ctx,
int s5p_mfc_set_enc_ref_buffer_v6(struct s5p_mfc_ctx *ctx)
{
struct s5p_mfc_dev *dev = ctx->dev;
- size_t buf_addr1, buf_size1;
- int i;
+ size_t buf_addr1;
+ int i, buf_size1;
mfc_debug_enter();
- buf_addr1 = ctx->bank1_phys;
- buf_size1 = ctx->bank1_size;
+ buf_addr1 = ctx->bank1.dma;
+ buf_size1 = ctx->bank1.size;
mfc_debug(2, "Buf1: %p (%d)\n", (void *)buf_addr1, buf_size1);
@@ -1253,12 +1204,14 @@ int s5p_mfc_init_decode_v6(struct s5p_mfc_ctx *ctx)
static inline void s5p_mfc_set_flush(struct s5p_mfc_ctx *ctx, int flush)
{
struct s5p_mfc_dev *dev = ctx->dev;
- unsigned int dpb;
- if (flush)
- dpb = READL(S5P_FIMV_SI_CH0_DPB_CONF_CTRL) | (1 << 14);
- else
- dpb = READL(S5P_FIMV_SI_CH0_DPB_CONF_CTRL) & ~(1 << 14);
- WRITEL(dpb, S5P_FIMV_SI_CH0_DPB_CONF_CTRL);
+
+ if (flush) {
+ dev->curr_ctx = ctx->num;
+ s5p_mfc_clean_ctx_int_flags(ctx);
+ WRITEL(ctx->inst_no, S5P_FIMV_INSTANCE_ID_V6);
+ s5p_mfc_hw_call(dev->mfc_cmds, cmd_host2risc, dev,
+ S5P_FIMV_H2R_CMD_FLUSH_V6, NULL);
+ }
}
/* Decode a single frame */
@@ -1408,7 +1361,6 @@ static inline int s5p_mfc_run_dec_frame(struct s5p_mfc_ctx *ctx)
struct s5p_mfc_buf *temp_vb;
unsigned long flags;
int last_frame = 0;
- unsigned int index;
spin_lock_irqsave(&dev->irqlock, flags);
@@ -1427,8 +1379,6 @@ static inline int s5p_mfc_run_dec_frame(struct s5p_mfc_ctx *ctx)
temp_vb->b->v4l2_planes[0].bytesused);
spin_unlock_irqrestore(&dev->irqlock, flags);
- index = temp_vb->b->v4l2_buf.index;
-
dev->curr_ctx = ctx->num;
s5p_mfc_clean_ctx_int_flags(ctx);
if (temp_vb->b->v4l2_planes[0].bytesused == 0) {
@@ -1452,7 +1402,6 @@ static inline int s5p_mfc_run_enc_frame(struct s5p_mfc_ctx *ctx)
unsigned int src_y_size, src_c_size;
*/
unsigned int dst_size;
- unsigned int index;
spin_lock_irqsave(&dev->irqlock, flags);
@@ -1487,8 +1436,6 @@ static inline int s5p_mfc_run_enc_frame(struct s5p_mfc_ctx *ctx)
spin_unlock_irqrestore(&dev->irqlock, flags);
- index = src_mb->b->v4l2_buf.index;
-
dev->curr_ctx = ctx->num;
s5p_mfc_clean_ctx_int_flags(ctx);
s5p_mfc_encode_one_frame_v6(ctx);
@@ -1656,6 +1603,9 @@ void s5p_mfc_try_run_v6(struct s5p_mfc_dev *dev)
case MFCINST_HEAD_PARSED:
ret = s5p_mfc_run_init_dec_buffers(ctx);
break;
+ case MFCINST_FLUSH:
+ s5p_mfc_set_flush(ctx, ctx->dpb_flush_flag);
+ break;
case MFCINST_RES_CHANGE_INIT:
s5p_mfc_run_dec_last_frames(ctx);
break;
diff --git a/drivers/media/platform/s5p-mfc/s5p_mfc_pm.c b/drivers/media/platform/s5p-mfc/s5p_mfc_pm.c
index 2895333866fc..6aa38a56aaf2 100644
--- a/drivers/media/platform/s5p-mfc/s5p_mfc_pm.c
+++ b/drivers/media/platform/s5p-mfc/s5p_mfc_pm.c
@@ -46,7 +46,7 @@ int s5p_mfc_init_pm(struct s5p_mfc_dev *dev)
ret = clk_prepare(pm->clock_gate);
if (ret) {
- mfc_err("Failed to preapre clock-gating control\n");
+ mfc_err("Failed to prepare clock-gating control\n");
goto err_p_ip_clk;
}
diff --git a/drivers/media/platform/s5p-tv/hdmi_drv.c b/drivers/media/platform/s5p-tv/hdmi_drv.c
index 7c1116c73bf3..8de1b3dce459 100644
--- a/drivers/media/platform/s5p-tv/hdmi_drv.c
+++ b/drivers/media/platform/s5p-tv/hdmi_drv.c
@@ -656,7 +656,7 @@ static int hdmi_g_mbus_fmt(struct v4l2_subdev *sd,
dev_dbg(hdev->dev, "%s\n", __func__);
if (!hdev->cur_conf)
return -EINVAL;
- memset(fmt, 0, sizeof *fmt);
+ memset(fmt, 0, sizeof(*fmt));
fmt->width = t->hact.end - t->hact.beg;
fmt->height = t->vact[0].end - t->vact[0].beg;
fmt->code = V4L2_MBUS_FMT_FIXED; /* means RGB888 */
@@ -760,7 +760,7 @@ static void hdmi_resources_cleanup(struct hdmi_device *hdev)
clk_put(res->sclk_hdmi);
if (!IS_ERR_OR_NULL(res->hdmi))
clk_put(res->hdmi);
- memset(res, 0, sizeof *res);
+ memset(res, 0, sizeof(*res));
}
static int hdmi_resources_init(struct hdmi_device *hdev)
@@ -777,31 +777,31 @@ static int hdmi_resources_init(struct hdmi_device *hdev)
dev_dbg(dev, "HDMI resource init\n");
- memset(res, 0, sizeof *res);
+ memset(res, 0, sizeof(*res));
/* get clocks, power */
res->hdmi = clk_get(dev, "hdmi");
- if (IS_ERR_OR_NULL(res->hdmi)) {
+ if (IS_ERR(res->hdmi)) {
dev_err(dev, "failed to get clock 'hdmi'\n");
goto fail;
}
res->sclk_hdmi = clk_get(dev, "sclk_hdmi");
- if (IS_ERR_OR_NULL(res->sclk_hdmi)) {
+ if (IS_ERR(res->sclk_hdmi)) {
dev_err(dev, "failed to get clock 'sclk_hdmi'\n");
goto fail;
}
res->sclk_pixel = clk_get(dev, "sclk_pixel");
- if (IS_ERR_OR_NULL(res->sclk_pixel)) {
+ if (IS_ERR(res->sclk_pixel)) {
dev_err(dev, "failed to get clock 'sclk_pixel'\n");
goto fail;
}
res->sclk_hdmiphy = clk_get(dev, "sclk_hdmiphy");
- if (IS_ERR_OR_NULL(res->sclk_hdmiphy)) {
+ if (IS_ERR(res->sclk_hdmiphy)) {
dev_err(dev, "failed to get clock 'sclk_hdmiphy'\n");
goto fail;
}
res->hdmiphy = clk_get(dev, "hdmiphy");
- if (IS_ERR_OR_NULL(res->hdmiphy)) {
+ if (IS_ERR(res->hdmiphy)) {
dev_err(dev, "failed to get clock 'hdmiphy'\n");
goto fail;
}
@@ -955,7 +955,7 @@ static int hdmi_probe(struct platform_device *pdev)
v4l2_subdev_init(sd, &hdmi_sd_ops);
sd->owner = THIS_MODULE;
- strlcpy(sd->name, "s5p-hdmi", sizeof sd->name);
+ strlcpy(sd->name, "s5p-hdmi", sizeof(sd->name));
hdmi_dev->cur_preset = HDMI_DEFAULT_PRESET;
/* FIXME: missing fail preset is not supported */
hdmi_dev->cur_conf = hdmi_preset2timings(hdmi_dev->cur_preset);
diff --git a/drivers/media/platform/s5p-tv/hdmiphy_drv.c b/drivers/media/platform/s5p-tv/hdmiphy_drv.c
index 06b5d2dbb2d9..80717cec76ae 100644
--- a/drivers/media/platform/s5p-tv/hdmiphy_drv.c
+++ b/drivers/media/platform/s5p-tv/hdmiphy_drv.c
@@ -284,7 +284,7 @@ static int hdmiphy_probe(struct i2c_client *client,
{
struct hdmiphy_ctx *ctx;
- ctx = kzalloc(sizeof *ctx, GFP_KERNEL);
+ ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
if (!ctx)
return -ENOMEM;
diff --git a/drivers/media/platform/s5p-tv/mixer.h b/drivers/media/platform/s5p-tv/mixer.h
index b671e20e9318..04e6490a45be 100644
--- a/drivers/media/platform/s5p-tv/mixer.h
+++ b/drivers/media/platform/s5p-tv/mixer.h
@@ -19,6 +19,7 @@
#endif
#include <linux/fb.h>
+#include <linux/irqreturn.h>
#include <linux/kernel.h>
#include <linux/spinlock.h>
#include <linux/wait.h>
diff --git a/drivers/media/platform/s5p-tv/mixer_drv.c b/drivers/media/platform/s5p-tv/mixer_drv.c
index 02faea03aa7d..5733033a6ead 100644
--- a/drivers/media/platform/s5p-tv/mixer_drv.c
+++ b/drivers/media/platform/s5p-tv/mixer_drv.c
@@ -240,27 +240,27 @@ static int mxr_acquire_clocks(struct mxr_device *mdev)
struct device *dev = mdev->dev;
res->mixer = clk_get(dev, "mixer");
- if (IS_ERR_OR_NULL(res->mixer)) {
+ if (IS_ERR(res->mixer)) {
mxr_err(mdev, "failed to get clock 'mixer'\n");
goto fail;
}
res->vp = clk_get(dev, "vp");
- if (IS_ERR_OR_NULL(res->vp)) {
+ if (IS_ERR(res->vp)) {
mxr_err(mdev, "failed to get clock 'vp'\n");
goto fail;
}
res->sclk_mixer = clk_get(dev, "sclk_mixer");
- if (IS_ERR_OR_NULL(res->sclk_mixer)) {
+ if (IS_ERR(res->sclk_mixer)) {
mxr_err(mdev, "failed to get clock 'sclk_mixer'\n");
goto fail;
}
res->sclk_hdmi = clk_get(dev, "sclk_hdmi");
- if (IS_ERR_OR_NULL(res->sclk_hdmi)) {
+ if (IS_ERR(res->sclk_hdmi)) {
mxr_err(mdev, "failed to get clock 'sclk_hdmi'\n");
goto fail;
}
res->sclk_dac = clk_get(dev, "sclk_dac");
- if (IS_ERR_OR_NULL(res->sclk_dac)) {
+ if (IS_ERR(res->sclk_dac)) {
mxr_err(mdev, "failed to get clock 'sclk_dac'\n");
goto fail;
}
@@ -298,7 +298,7 @@ static void mxr_release_resources(struct mxr_device *mdev)
{
mxr_release_clocks(mdev);
mxr_release_plat_resources(mdev);
- memset(&mdev->res, 0, sizeof mdev->res);
+ memset(&mdev->res, 0, sizeof(mdev->res));
}
static void mxr_release_layers(struct mxr_device *mdev)
@@ -382,7 +382,7 @@ static int mxr_probe(struct platform_device *pdev)
/* mdev does not exist yet so no mxr_dbg is used */
dev_info(dev, "probe start\n");
- mdev = kzalloc(sizeof *mdev, GFP_KERNEL);
+ mdev = kzalloc(sizeof(*mdev), GFP_KERNEL);
if (!mdev) {
dev_err(dev, "not enough memory.\n");
ret = -ENOMEM;
diff --git a/drivers/media/platform/s5p-tv/mixer_reg.c b/drivers/media/platform/s5p-tv/mixer_reg.c
index 3b1670a045f4..b713403024ef 100644
--- a/drivers/media/platform/s5p-tv/mixer_reg.c
+++ b/drivers/media/platform/s5p-tv/mixer_reg.c
@@ -470,11 +470,11 @@ static inline void mxr_reg_vp_filter_set(struct mxr_device *mdev,
static void mxr_reg_vp_default_filter(struct mxr_device *mdev)
{
mxr_reg_vp_filter_set(mdev, VP_POLY8_Y0_LL,
- filter_y_horiz_tap8, sizeof filter_y_horiz_tap8);
+ filter_y_horiz_tap8, sizeof(filter_y_horiz_tap8));
mxr_reg_vp_filter_set(mdev, VP_POLY4_Y0_LL,
- filter_y_vert_tap4, sizeof filter_y_vert_tap4);
+ filter_y_vert_tap4, sizeof(filter_y_vert_tap4));
mxr_reg_vp_filter_set(mdev, VP_POLY4_C0_LL,
- filter_cr_horiz_tap4, sizeof filter_cr_horiz_tap4);
+ filter_cr_horiz_tap4, sizeof(filter_cr_horiz_tap4));
}
static void mxr_reg_mxr_dump(struct mxr_device *mdev)
diff --git a/drivers/media/platform/s5p-tv/mixer_video.c b/drivers/media/platform/s5p-tv/mixer_video.c
index 1f3b7436511c..82142a2d6d93 100644
--- a/drivers/media/platform/s5p-tv/mixer_video.c
+++ b/drivers/media/platform/s5p-tv/mixer_video.c
@@ -19,6 +19,7 @@
#include <linux/videodev2.h>
#include <linux/mm.h>
#include <linux/module.h>
+#include <linux/platform_device.h>
#include <linux/timer.h>
#include <media/videobuf2-dma-contig.h>
@@ -95,7 +96,7 @@ int mxr_acquire_video(struct mxr_device *mdev,
/* trying to register next output */
if (sd == NULL)
continue;
- out = kzalloc(sizeof *out, GFP_KERNEL);
+ out = kzalloc(sizeof(*out), GFP_KERNEL);
if (out == NULL) {
mxr_err(mdev, "no memory for '%s'\n",
conf->output_name);
@@ -127,7 +128,7 @@ fail_output:
/* kfree is NULL-safe */
for (i = 0; i < mdev->output_cnt; ++i)
kfree(mdev->output[i]);
- memset(mdev->output, 0, sizeof mdev->output);
+ memset(mdev->output, 0, sizeof(mdev->output));
fail_vb2_allocator:
/* freeing allocator context */
@@ -160,8 +161,8 @@ static int mxr_querycap(struct file *file, void *priv,
mxr_dbg(layer->mdev, "%s:%d\n", __func__, __LINE__);
- strlcpy(cap->driver, MXR_DRIVER_NAME, sizeof cap->driver);
- strlcpy(cap->card, layer->vfd.name, sizeof cap->card);
+ strlcpy(cap->driver, MXR_DRIVER_NAME, sizeof(cap->driver));
+ strlcpy(cap->card, layer->vfd.name, sizeof(cap->card));
sprintf(cap->bus_info, "%d", layer->idx);
cap->device_caps = V4L2_CAP_STREAMING | V4L2_CAP_VIDEO_OUTPUT_MPLANE;
cap->capabilities = cap->device_caps | V4L2_CAP_DEVICE_CAPS;
@@ -192,7 +193,7 @@ static void mxr_layer_default_geo(struct mxr_layer *layer)
struct mxr_device *mdev = layer->mdev;
struct v4l2_mbus_framefmt mbus_fmt;
- memset(&layer->geo, 0, sizeof layer->geo);
+ memset(&layer->geo, 0, sizeof(layer->geo));
mxr_get_mbus_fmt(mdev, &mbus_fmt);
@@ -425,7 +426,7 @@ static int mxr_s_selection(struct file *file, void *fh,
struct mxr_geometry tmp;
struct v4l2_rect res;
- memset(&res, 0, sizeof res);
+ memset(&res, 0, sizeof(res));
mxr_dbg(layer->mdev, "%s: rect: %dx%d@%d,%d\n", __func__,
s->r.width, s->r.height, s->r.left, s->r.top);
@@ -464,7 +465,7 @@ static int mxr_s_selection(struct file *file, void *fh,
/* apply change and update geometry if needed */
if (target) {
/* backup current geometry if setup fails */
- memcpy(&tmp, geo, sizeof tmp);
+ memcpy(&tmp, geo, sizeof(tmp));
/* apply requested selection */
target->x_offset = s->r.left;
@@ -496,7 +497,7 @@ static int mxr_s_selection(struct file *file, void *fh,
fail:
/* restore old geometry, which is not touched if target is NULL */
if (target)
- memcpy(geo, &tmp, sizeof tmp);
+ memcpy(geo, &tmp, sizeof(tmp));
return -ERANGE;
}
@@ -1071,7 +1072,7 @@ struct mxr_layer *mxr_base_layer_create(struct mxr_device *mdev,
{
struct mxr_layer *layer;
- layer = kzalloc(sizeof *layer, GFP_KERNEL);
+ layer = kzalloc(sizeof(*layer), GFP_KERNEL);
if (layer == NULL) {
mxr_err(mdev, "not enough memory for layer.\n");
goto fail;
diff --git a/drivers/media/platform/s5p-tv/sdo_drv.c b/drivers/media/platform/s5p-tv/sdo_drv.c
index 91a6939a270a..ab6f9ef89423 100644
--- a/drivers/media/platform/s5p-tv/sdo_drv.c
+++ b/drivers/media/platform/s5p-tv/sdo_drv.c
@@ -301,7 +301,7 @@ static int sdo_probe(struct platform_device *pdev)
struct clk *sclk_vpll;
dev_info(dev, "probe start\n");
- sdev = devm_kzalloc(&pdev->dev, sizeof *sdev, GFP_KERNEL);
+ sdev = devm_kzalloc(&pdev->dev, sizeof(*sdev), GFP_KERNEL);
if (!sdev) {
dev_err(dev, "not enough memory.\n");
ret = -ENOMEM;
@@ -341,47 +341,50 @@ static int sdo_probe(struct platform_device *pdev)
/* acquire clocks */
sdev->sclk_dac = clk_get(dev, "sclk_dac");
- if (IS_ERR_OR_NULL(sdev->sclk_dac)) {
+ if (IS_ERR(sdev->sclk_dac)) {
dev_err(dev, "failed to get clock 'sclk_dac'\n");
- ret = -ENXIO;
+ ret = PTR_ERR(sdev->sclk_dac);
goto fail;
}
sdev->dac = clk_get(dev, "dac");
- if (IS_ERR_OR_NULL(sdev->dac)) {
+ if (IS_ERR(sdev->dac)) {
dev_err(dev, "failed to get clock 'dac'\n");
- ret = -ENXIO;
+ ret = PTR_ERR(sdev->dac);
goto fail_sclk_dac;
}
sdev->dacphy = clk_get(dev, "dacphy");
- if (IS_ERR_OR_NULL(sdev->dacphy)) {
+ if (IS_ERR(sdev->dacphy)) {
dev_err(dev, "failed to get clock 'dacphy'\n");
- ret = -ENXIO;
+ ret = PTR_ERR(sdev->dacphy);
goto fail_dac;
}
sclk_vpll = clk_get(dev, "sclk_vpll");
- if (IS_ERR_OR_NULL(sclk_vpll)) {
+ if (IS_ERR(sclk_vpll)) {
dev_err(dev, "failed to get clock 'sclk_vpll'\n");
- ret = -ENXIO;
+ ret = PTR_ERR(sclk_vpll);
goto fail_dacphy;
}
clk_set_parent(sdev->sclk_dac, sclk_vpll);
clk_put(sclk_vpll);
sdev->fout_vpll = clk_get(dev, "fout_vpll");
- if (IS_ERR_OR_NULL(sdev->fout_vpll)) {
+ if (IS_ERR(sdev->fout_vpll)) {
dev_err(dev, "failed to get clock 'fout_vpll'\n");
+ ret = PTR_ERR(sdev->fout_vpll);
goto fail_dacphy;
}
dev_info(dev, "fout_vpll.rate = %lu\n", clk_get_rate(sclk_vpll));
/* acquire regulator */
sdev->vdac = devm_regulator_get(dev, "vdd33a_dac");
- if (IS_ERR_OR_NULL(sdev->vdac)) {
+ if (IS_ERR(sdev->vdac)) {
dev_err(dev, "failed to get regulator 'vdac'\n");
+ ret = PTR_ERR(sdev->vdac);
goto fail_fout_vpll;
}
sdev->vdet = devm_regulator_get(dev, "vdet");
- if (IS_ERR_OR_NULL(sdev->vdet)) {
+ if (IS_ERR(sdev->vdet)) {
dev_err(dev, "failed to get regulator 'vdet'\n");
+ ret = PTR_ERR(sdev->vdet);
goto fail_fout_vpll;
}
@@ -394,7 +397,7 @@ static int sdo_probe(struct platform_device *pdev)
/* configuration of interface subdevice */
v4l2_subdev_init(&sdev->sd, &sdo_sd_ops);
sdev->sd.owner = THIS_MODULE;
- strlcpy(sdev->sd.name, "s5p-sdo", sizeof sdev->sd.name);
+ strlcpy(sdev->sd.name, "s5p-sdo", sizeof(sdev->sd.name));
/* set default format */
sdev->fmt = sdo_find_format(SDO_DEFAULT_STD);
diff --git a/drivers/media/platform/s5p-tv/sii9234_drv.c b/drivers/media/platform/s5p-tv/sii9234_drv.c
index 49191aac9634..d90d2286090b 100644
--- a/drivers/media/platform/s5p-tv/sii9234_drv.c
+++ b/drivers/media/platform/s5p-tv/sii9234_drv.c
@@ -338,7 +338,7 @@ static int sii9234_probe(struct i2c_client *client,
}
ctx->gpio_n_reset = pdata->gpio_n_reset;
- ret = gpio_request(ctx->gpio_n_reset, "MHL_RST");
+ ret = devm_gpio_request(dev, ctx->gpio_n_reset, "MHL_RST");
if (ret) {
dev_err(dev, "failed to acquire MHL_RST gpio\n");
return ret;
@@ -370,7 +370,6 @@ fail_pm_get:
fail_pm:
pm_runtime_disable(dev);
- gpio_free(ctx->gpio_n_reset);
fail:
dev_err(dev, "probe failed\n");
@@ -381,11 +380,8 @@ fail:
static int sii9234_remove(struct i2c_client *client)
{
struct device *dev = &client->dev;
- struct v4l2_subdev *sd = i2c_get_clientdata(client);
- struct sii9234_context *ctx = sd_to_context(sd);
pm_runtime_disable(dev);
- gpio_free(ctx->gpio_n_reset);
dev_info(dev, "remove successful\n");
diff --git a/drivers/media/platform/sh_veu.c b/drivers/media/platform/sh_veu.c
new file mode 100644
index 000000000000..cb54c69d5748
--- /dev/null
+++ b/drivers/media/platform/sh_veu.c
@@ -0,0 +1,1266 @@
+/*
+ * sh-mobile VEU mem2mem driver
+ *
+ * Copyright (C) 2012 Renesas Electronics Corporation
+ * Author: Guennadi Liakhovetski, <g.liakhovetski@gmx.de>
+ * Copyright (C) 2008 Magnus Damm
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the version 2 of the GNU General Public License as
+ * published by the Free Software Foundation
+ */
+
+#include <linux/fs.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/platform_device.h>
+#include <linux/pm_runtime.h>
+#include <linux/slab.h>
+#include <linux/types.h>
+#include <linux/videodev2.h>
+
+#include <media/v4l2-dev.h>
+#include <media/v4l2-device.h>
+#include <media/v4l2-ioctl.h>
+#include <media/v4l2-mem2mem.h>
+#include <media/videobuf2-dma-contig.h>
+
+#define VEU_STR 0x00 /* start register */
+#define VEU_SWR 0x10 /* src: line length */
+#define VEU_SSR 0x14 /* src: image size */
+#define VEU_SAYR 0x18 /* src: y/rgb plane address */
+#define VEU_SACR 0x1c /* src: c plane address */
+#define VEU_BSSR 0x20 /* bundle mode register */
+#define VEU_EDWR 0x30 /* dst: line length */
+#define VEU_DAYR 0x34 /* dst: y/rgb plane address */
+#define VEU_DACR 0x38 /* dst: c plane address */
+#define VEU_TRCR 0x50 /* transform control */
+#define VEU_RFCR 0x54 /* resize scale */
+#define VEU_RFSR 0x58 /* resize clip */
+#define VEU_ENHR 0x5c /* enhance */
+#define VEU_FMCR 0x70 /* filter mode */
+#define VEU_VTCR 0x74 /* lowpass vertical */
+#define VEU_HTCR 0x78 /* lowpass horizontal */
+#define VEU_APCR 0x80 /* color match */
+#define VEU_ECCR 0x84 /* color replace */
+#define VEU_AFXR 0x90 /* fixed mode */
+#define VEU_SWPR 0x94 /* swap */
+#define VEU_EIER 0xa0 /* interrupt mask */
+#define VEU_EVTR 0xa4 /* interrupt event */
+#define VEU_STAR 0xb0 /* status */
+#define VEU_BSRR 0xb4 /* reset */
+
+#define VEU_MCR00 0x200 /* color conversion matrix coefficient 00 */
+#define VEU_MCR01 0x204 /* color conversion matrix coefficient 01 */
+#define VEU_MCR02 0x208 /* color conversion matrix coefficient 02 */
+#define VEU_MCR10 0x20c /* color conversion matrix coefficient 10 */
+#define VEU_MCR11 0x210 /* color conversion matrix coefficient 11 */
+#define VEU_MCR12 0x214 /* color conversion matrix coefficient 12 */
+#define VEU_MCR20 0x218 /* color conversion matrix coefficient 20 */
+#define VEU_MCR21 0x21c /* color conversion matrix coefficient 21 */
+#define VEU_MCR22 0x220 /* color conversion matrix coefficient 22 */
+#define VEU_COFFR 0x224 /* color conversion offset */
+#define VEU_CBR 0x228 /* color conversion clip */
+
+/*
+ * 4092x4092 max size is the normal case. In some cases it can be reduced to
+ * 2048x2048, in other cases it can be 4092x8188 or even 8188x8188.
+ */
+#define MAX_W 4092
+#define MAX_H 4092
+#define MIN_W 8
+#define MIN_H 8
+#define ALIGN_W 4
+
+/* 3 buffers of 2048 x 1536 - 3 megapixels @ 16bpp */
+#define VIDEO_MEM_LIMIT ALIGN(2048 * 1536 * 2 * 3, 1024 * 1024)
+
+#define MEM2MEM_DEF_TRANSLEN 1
+
+struct sh_veu_dev;
+
+struct sh_veu_file {
+ struct sh_veu_dev *veu_dev;
+ bool cfg_needed;
+};
+
+struct sh_veu_format {
+ char *name;
+ u32 fourcc;
+ unsigned int depth;
+ unsigned int ydepth;
+};
+
+/* video data format */
+struct sh_veu_vfmt {
+ /* Replace with v4l2_rect */
+ struct v4l2_rect frame;
+ unsigned int bytesperline;
+ unsigned int offset_y;
+ unsigned int offset_c;
+ const struct sh_veu_format *fmt;
+};
+
+struct sh_veu_dev {
+ struct v4l2_device v4l2_dev;
+ struct video_device vdev;
+ struct v4l2_m2m_dev *m2m_dev;
+ struct device *dev;
+ struct v4l2_m2m_ctx *m2m_ctx;
+ struct sh_veu_vfmt vfmt_out;
+ struct sh_veu_vfmt vfmt_in;
+ /* Only single user per direction so far */
+ struct sh_veu_file *capture;
+ struct sh_veu_file *output;
+ struct mutex fop_lock;
+ void __iomem *base;
+ struct vb2_alloc_ctx *alloc_ctx;
+ spinlock_t lock;
+ bool is_2h;
+ unsigned int xaction;
+ bool aborting;
+};
+
+enum sh_veu_fmt_idx {
+ SH_VEU_FMT_NV12,
+ SH_VEU_FMT_NV16,
+ SH_VEU_FMT_NV24,
+ SH_VEU_FMT_RGB332,
+ SH_VEU_FMT_RGB444,
+ SH_VEU_FMT_RGB565,
+ SH_VEU_FMT_RGB666,
+ SH_VEU_FMT_RGB24,
+};
+
+#define VGA_WIDTH 640
+#define VGA_HEIGHT 480
+
+#define DEFAULT_IN_WIDTH VGA_WIDTH
+#define DEFAULT_IN_HEIGHT VGA_HEIGHT
+#define DEFAULT_IN_FMTIDX SH_VEU_FMT_NV12
+#define DEFAULT_OUT_WIDTH VGA_WIDTH
+#define DEFAULT_OUT_HEIGHT VGA_HEIGHT
+#define DEFAULT_OUT_FMTIDX SH_VEU_FMT_RGB565
+
+/*
+ * Alignment: Y-plane should be 4-byte aligned for NV12 and NV16, and 8-byte
+ * aligned for NV24.
+ */
+static const struct sh_veu_format sh_veu_fmt[] = {
+ [SH_VEU_FMT_NV12] = { .ydepth = 8, .depth = 12, .name = "NV12", .fourcc = V4L2_PIX_FMT_NV12 },
+ [SH_VEU_FMT_NV16] = { .ydepth = 8, .depth = 16, .name = "NV16", .fourcc = V4L2_PIX_FMT_NV16 },
+ [SH_VEU_FMT_NV24] = { .ydepth = 8, .depth = 24, .name = "NV24", .fourcc = V4L2_PIX_FMT_NV24 },
+ [SH_VEU_FMT_RGB332] = { .ydepth = 8, .depth = 8, .name = "RGB332", .fourcc = V4L2_PIX_FMT_RGB332 },
+ [SH_VEU_FMT_RGB444] = { .ydepth = 16, .depth = 16, .name = "RGB444", .fourcc = V4L2_PIX_FMT_RGB444 },
+ [SH_VEU_FMT_RGB565] = { .ydepth = 16, .depth = 16, .name = "RGB565", .fourcc = V4L2_PIX_FMT_RGB565 },
+ [SH_VEU_FMT_RGB666] = { .ydepth = 32, .depth = 32, .name = "BGR666", .fourcc = V4L2_PIX_FMT_BGR666 },
+ [SH_VEU_FMT_RGB24] = { .ydepth = 24, .depth = 24, .name = "RGB24", .fourcc = V4L2_PIX_FMT_RGB24 },
+};
+
+#define DEFAULT_IN_VFMT (struct sh_veu_vfmt){ \
+ .frame = { \
+ .width = VGA_WIDTH, \
+ .height = VGA_HEIGHT, \
+ }, \
+ .bytesperline = (VGA_WIDTH * sh_veu_fmt[DEFAULT_IN_FMTIDX].ydepth) >> 3, \
+ .fmt = &sh_veu_fmt[DEFAULT_IN_FMTIDX], \
+}
+
+#define DEFAULT_OUT_VFMT (struct sh_veu_vfmt){ \
+ .frame = { \
+ .width = VGA_WIDTH, \
+ .height = VGA_HEIGHT, \
+ }, \
+ .bytesperline = (VGA_WIDTH * sh_veu_fmt[DEFAULT_OUT_FMTIDX].ydepth) >> 3, \
+ .fmt = &sh_veu_fmt[DEFAULT_OUT_FMTIDX], \
+}
+
+/*
+ * TODO: add support for further output formats:
+ * SH_VEU_FMT_NV12,
+ * SH_VEU_FMT_NV16,
+ * SH_VEU_FMT_NV24,
+ * SH_VEU_FMT_RGB332,
+ * SH_VEU_FMT_RGB444,
+ * SH_VEU_FMT_RGB666,
+ * SH_VEU_FMT_RGB24,
+ */
+
+static const int sh_veu_fmt_out[] = {
+ SH_VEU_FMT_RGB565,
+};
+
+/*
+ * TODO: add support for further input formats:
+ * SH_VEU_FMT_NV16,
+ * SH_VEU_FMT_NV24,
+ * SH_VEU_FMT_RGB565,
+ * SH_VEU_FMT_RGB666,
+ * SH_VEU_FMT_RGB24,
+ */
+static const int sh_veu_fmt_in[] = {
+ SH_VEU_FMT_NV12,
+};
+
+static enum v4l2_colorspace sh_veu_4cc2cspace(u32 fourcc)
+{
+ switch (fourcc) {
+ default:
+ BUG();
+ case V4L2_PIX_FMT_NV12:
+ case V4L2_PIX_FMT_NV16:
+ case V4L2_PIX_FMT_NV24:
+ return V4L2_COLORSPACE_JPEG;
+ case V4L2_PIX_FMT_RGB332:
+ case V4L2_PIX_FMT_RGB444:
+ case V4L2_PIX_FMT_RGB565:
+ case V4L2_PIX_FMT_BGR666:
+ case V4L2_PIX_FMT_RGB24:
+ return V4L2_COLORSPACE_SRGB;
+ }
+}
+
+static u32 sh_veu_reg_read(struct sh_veu_dev *veu, unsigned int reg)
+{
+ return ioread32(veu->base + reg);
+}
+
+static void sh_veu_reg_write(struct sh_veu_dev *veu, unsigned int reg,
+ u32 value)
+{
+ iowrite32(value, veu->base + reg);
+}
+
+ /* ========== mem2mem callbacks ========== */
+
+static void sh_veu_job_abort(void *priv)
+{
+ struct sh_veu_dev *veu = priv;
+
+ /* Will cancel the transaction in the next interrupt handler */
+ veu->aborting = true;
+}
+
+static void sh_veu_lock(void *priv)
+{
+ struct sh_veu_dev *veu = priv;
+
+ mutex_lock(&veu->fop_lock);
+}
+
+static void sh_veu_unlock(void *priv)
+{
+ struct sh_veu_dev *veu = priv;
+
+ mutex_unlock(&veu->fop_lock);
+}
+
+static void sh_veu_process(struct sh_veu_dev *veu,
+ struct vb2_buffer *src_buf,
+ struct vb2_buffer *dst_buf)
+{
+ dma_addr_t addr = vb2_dma_contig_plane_dma_addr(dst_buf, 0);
+
+ sh_veu_reg_write(veu, VEU_DAYR, addr + veu->vfmt_out.offset_y);
+ sh_veu_reg_write(veu, VEU_DACR, veu->vfmt_out.offset_c ?
+ addr + veu->vfmt_out.offset_c : 0);
+ dev_dbg(veu->dev, "%s(): dst base %lx, y: %x, c: %x\n", __func__,
+ (unsigned long)addr,
+ veu->vfmt_out.offset_y, veu->vfmt_out.offset_c);
+
+ addr = vb2_dma_contig_plane_dma_addr(src_buf, 0);
+ sh_veu_reg_write(veu, VEU_SAYR, addr + veu->vfmt_in.offset_y);
+ sh_veu_reg_write(veu, VEU_SACR, veu->vfmt_in.offset_c ?
+ addr + veu->vfmt_in.offset_c : 0);
+ dev_dbg(veu->dev, "%s(): src base %lx, y: %x, c: %x\n", __func__,
+ (unsigned long)addr,
+ veu->vfmt_in.offset_y, veu->vfmt_in.offset_c);
+
+ sh_veu_reg_write(veu, VEU_STR, 1);
+
+ sh_veu_reg_write(veu, VEU_EIER, 1); /* enable interrupt in VEU */
+}
+
+/**
+ * sh_veu_device_run() - prepares and starts the device
+ *
+ * This will be called by the framework when it decides to schedule a particular
+ * instance.
+ */
+static void sh_veu_device_run(void *priv)
+{
+ struct sh_veu_dev *veu = priv;
+ struct vb2_buffer *src_buf, *dst_buf;
+
+ src_buf = v4l2_m2m_next_src_buf(veu->m2m_ctx);
+ dst_buf = v4l2_m2m_next_dst_buf(veu->m2m_ctx);
+
+ if (src_buf && dst_buf)
+ sh_veu_process(veu, src_buf, dst_buf);
+}
+
+ /* ========== video ioctls ========== */
+
+static bool sh_veu_is_streamer(struct sh_veu_dev *veu, struct sh_veu_file *veu_file,
+ enum v4l2_buf_type type)
+{
+ return (type == V4L2_BUF_TYPE_VIDEO_CAPTURE &&
+ veu_file == veu->capture) ||
+ (type == V4L2_BUF_TYPE_VIDEO_OUTPUT &&
+ veu_file == veu->output);
+}
+
+static int sh_veu_queue_init(void *priv, struct vb2_queue *src_vq,
+ struct vb2_queue *dst_vq);
+
+/*
+ * It is not unusual to have video nodes open()ed multiple times. While some
+ * V4L2 operations are non-intrusive, like querying formats and various
+ * parameters, others, like setting formats, starting and stopping streaming,
+ * queuing and dequeuing buffers, directly affect hardware configuration and /
+ * or execution. This function verifies availability of the requested interface
+ * and, if available, reserves it for the requesting user.
+ */
+static int sh_veu_stream_init(struct sh_veu_dev *veu, struct sh_veu_file *veu_file,
+ enum v4l2_buf_type type)
+{
+ struct sh_veu_file **stream;
+
+ switch (type) {
+ case V4L2_BUF_TYPE_VIDEO_CAPTURE:
+ stream = &veu->capture;
+ break;
+ case V4L2_BUF_TYPE_VIDEO_OUTPUT:
+ stream = &veu->output;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ if (*stream == veu_file)
+ return 0;
+
+ if (*stream)
+ return -EBUSY;
+
+ *stream = veu_file;
+
+ return 0;
+}
+
+static int sh_veu_context_init(struct sh_veu_dev *veu)
+{
+ if (veu->m2m_ctx)
+ return 0;
+
+ veu->m2m_ctx = v4l2_m2m_ctx_init(veu->m2m_dev, veu,
+ sh_veu_queue_init);
+
+ if (IS_ERR(veu->m2m_ctx))
+ return PTR_ERR(veu->m2m_ctx);
+
+ return 0;
+}
+
+static int sh_veu_querycap(struct file *file, void *priv,
+ struct v4l2_capability *cap)
+{
+ strlcpy(cap->driver, "sh-veu", sizeof(cap->driver));
+ strlcpy(cap->card, "sh-mobile VEU", sizeof(cap->card));
+ strlcpy(cap->bus_info, "platform:sh-veu", sizeof(cap->bus_info));
+ cap->device_caps = V4L2_CAP_VIDEO_M2M | V4L2_CAP_STREAMING;
+ cap->capabilities = cap->device_caps | V4L2_CAP_DEVICE_CAPS;
+
+ return 0;
+}
+
+static int sh_veu_enum_fmt(struct v4l2_fmtdesc *f, const int *fmt, int fmt_num)
+{
+ if (f->index >= fmt_num)
+ return -EINVAL;
+
+ strlcpy(f->description, sh_veu_fmt[fmt[f->index]].name, sizeof(f->description));
+ f->pixelformat = sh_veu_fmt[fmt[f->index]].fourcc;
+ return 0;
+}
+
+static int sh_veu_enum_fmt_vid_cap(struct file *file, void *priv,
+ struct v4l2_fmtdesc *f)
+{
+ return sh_veu_enum_fmt(f, sh_veu_fmt_out, ARRAY_SIZE(sh_veu_fmt_out));
+}
+
+static int sh_veu_enum_fmt_vid_out(struct file *file, void *priv,
+ struct v4l2_fmtdesc *f)
+{
+ return sh_veu_enum_fmt(f, sh_veu_fmt_in, ARRAY_SIZE(sh_veu_fmt_in));
+}
+
+static struct sh_veu_vfmt *sh_veu_get_vfmt(struct sh_veu_dev *veu,
+ enum v4l2_buf_type type)
+{
+ switch (type) {
+ case V4L2_BUF_TYPE_VIDEO_CAPTURE:
+ return &veu->vfmt_out;
+ case V4L2_BUF_TYPE_VIDEO_OUTPUT:
+ return &veu->vfmt_in;
+ default:
+ return NULL;
+ }
+}
+
+static int sh_veu_g_fmt(struct sh_veu_file *veu_file, struct v4l2_format *f)
+{
+ struct v4l2_pix_format *pix = &f->fmt.pix;
+ struct sh_veu_dev *veu = veu_file->veu_dev;
+ struct sh_veu_vfmt *vfmt;
+
+ vfmt = sh_veu_get_vfmt(veu, f->type);
+
+ pix->width = vfmt->frame.width;
+ pix->height = vfmt->frame.height;
+ pix->field = V4L2_FIELD_NONE;
+ pix->pixelformat = vfmt->fmt->fourcc;
+ pix->colorspace = sh_veu_4cc2cspace(pix->pixelformat);
+ pix->bytesperline = vfmt->bytesperline;
+ pix->sizeimage = vfmt->bytesperline * pix->height *
+ vfmt->fmt->depth / vfmt->fmt->ydepth;
+ pix->priv = 0;
+ dev_dbg(veu->dev, "%s(): type: %d, size %u @ %ux%u, fmt %x\n", __func__,
+ f->type, pix->sizeimage, pix->width, pix->height, pix->pixelformat);
+
+ return 0;
+}
+
+static int sh_veu_g_fmt_vid_out(struct file *file, void *priv,
+ struct v4l2_format *f)
+{
+ return sh_veu_g_fmt(priv, f);
+}
+
+static int sh_veu_g_fmt_vid_cap(struct file *file, void *priv,
+ struct v4l2_format *f)
+{
+ return sh_veu_g_fmt(priv, f);
+}
+
+static int sh_veu_try_fmt(struct v4l2_format *f, const struct sh_veu_format *fmt)
+{
+ struct v4l2_pix_format *pix = &f->fmt.pix;
+ unsigned int y_bytes_used;
+
+ /*
+ * V4L2 specification suggests, that the driver should correct the
+ * format struct if any of the dimensions is unsupported
+ */
+ switch (pix->field) {
+ default:
+ case V4L2_FIELD_ANY:
+ pix->field = V4L2_FIELD_NONE;
+ /* fall through: continue handling V4L2_FIELD_NONE */
+ case V4L2_FIELD_NONE:
+ break;
+ }
+
+ v4l_bound_align_image(&pix->width, MIN_W, MAX_W, ALIGN_W,
+ &pix->height, MIN_H, MAX_H, 0, 0);
+
+ y_bytes_used = (pix->width * fmt->ydepth) >> 3;
+
+ if (pix->bytesperline < y_bytes_used)
+ pix->bytesperline = y_bytes_used;
+ pix->sizeimage = pix->height * pix->bytesperline * fmt->depth / fmt->ydepth;
+
+ pix->pixelformat = fmt->fourcc;
+ pix->colorspace = sh_veu_4cc2cspace(pix->pixelformat);
+ pix->priv = 0;
+
+ pr_debug("%s(): type: %d, size %u\n", __func__, f->type, pix->sizeimage);
+
+ return 0;
+}
+
+static const struct sh_veu_format *sh_veu_find_fmt(const struct v4l2_format *f)
+{
+ const int *fmt;
+ int i, n, dflt;
+
+ pr_debug("%s(%d;%d)\n", __func__, f->type, f->fmt.pix.field);
+
+ switch (f->type) {
+ case V4L2_BUF_TYPE_VIDEO_CAPTURE:
+ fmt = sh_veu_fmt_out;
+ n = ARRAY_SIZE(sh_veu_fmt_out);
+ dflt = DEFAULT_OUT_FMTIDX;
+ break;
+ case V4L2_BUF_TYPE_VIDEO_OUTPUT:
+ default:
+ fmt = sh_veu_fmt_in;
+ n = ARRAY_SIZE(sh_veu_fmt_in);
+ dflt = DEFAULT_IN_FMTIDX;
+ break;
+ }
+
+ for (i = 0; i < n; i++)
+ if (sh_veu_fmt[fmt[i]].fourcc == f->fmt.pix.pixelformat)
+ return &sh_veu_fmt[fmt[i]];
+
+ return &sh_veu_fmt[dflt];
+}
+
+static int sh_veu_try_fmt_vid_cap(struct file *file, void *priv,
+ struct v4l2_format *f)
+{
+ const struct sh_veu_format *fmt;
+
+ fmt = sh_veu_find_fmt(f);
+ if (!fmt)
+ /* wrong buffer type */
+ return -EINVAL;
+
+ return sh_veu_try_fmt(f, fmt);
+}
+
+static int sh_veu_try_fmt_vid_out(struct file *file, void *priv,
+ struct v4l2_format *f)
+{
+ const struct sh_veu_format *fmt;
+
+ fmt = sh_veu_find_fmt(f);
+ if (!fmt)
+ /* wrong buffer type */
+ return -EINVAL;
+
+ return sh_veu_try_fmt(f, fmt);
+}
+
+static void sh_veu_colour_offset(struct sh_veu_dev *veu, struct sh_veu_vfmt *vfmt)
+{
+ /* dst_left and dst_top validity will be verified in CROP / COMPOSE */
+ unsigned int left = vfmt->frame.left & ~0x03;
+ unsigned int top = vfmt->frame.top;
+ dma_addr_t offset = ((left * veu->vfmt_out.fmt->depth) >> 3) +
+ top * veu->vfmt_out.bytesperline;
+ unsigned int y_line;
+
+ vfmt->offset_y = offset;
+
+ switch (vfmt->fmt->fourcc) {
+ case V4L2_PIX_FMT_NV12:
+ case V4L2_PIX_FMT_NV16:
+ case V4L2_PIX_FMT_NV24:
+ y_line = ALIGN(vfmt->frame.width, 16);
+ vfmt->offset_c = offset + y_line * vfmt->frame.height;
+ break;
+ case V4L2_PIX_FMT_RGB332:
+ case V4L2_PIX_FMT_RGB444:
+ case V4L2_PIX_FMT_RGB565:
+ case V4L2_PIX_FMT_BGR666:
+ case V4L2_PIX_FMT_RGB24:
+ vfmt->offset_c = 0;
+ break;
+ default:
+ BUG();
+ }
+}
+
+static int sh_veu_s_fmt(struct sh_veu_file *veu_file, struct v4l2_format *f)
+{
+ struct v4l2_pix_format *pix = &f->fmt.pix;
+ struct sh_veu_dev *veu = veu_file->veu_dev;
+ struct sh_veu_vfmt *vfmt;
+ struct vb2_queue *vq;
+ int ret = sh_veu_context_init(veu);
+ if (ret < 0)
+ return ret;
+
+ vq = v4l2_m2m_get_vq(veu->m2m_ctx, f->type);
+ if (!vq)
+ return -EINVAL;
+
+ if (vb2_is_busy(vq)) {
+ v4l2_err(&veu_file->veu_dev->v4l2_dev, "%s queue busy\n", __func__);
+ return -EBUSY;
+ }
+
+ vfmt = sh_veu_get_vfmt(veu, f->type);
+ /* called after try_fmt(), hence vfmt != NULL. Implicit BUG_ON() below */
+
+ vfmt->fmt = sh_veu_find_fmt(f);
+ /* vfmt->fmt != NULL following the same argument as above */
+ vfmt->frame.width = pix->width;
+ vfmt->frame.height = pix->height;
+ vfmt->bytesperline = pix->bytesperline;
+
+ sh_veu_colour_offset(veu, vfmt);
+
+ /*
+ * We could also verify and require configuration only if any parameters
+ * actually have changed, but it is unlikely, that the user requests the
+ * same configuration several times without closing the device.
+ */
+ veu_file->cfg_needed = true;
+
+ dev_dbg(veu->dev,
+ "Setting format for type %d, wxh: %dx%d, fmt: %x\n",
+ f->type, pix->width, pix->height, vfmt->fmt->fourcc);
+
+ return 0;
+}
+
+static int sh_veu_s_fmt_vid_cap(struct file *file, void *priv,
+ struct v4l2_format *f)
+{
+ int ret = sh_veu_try_fmt_vid_cap(file, priv, f);
+ if (ret)
+ return ret;
+
+ return sh_veu_s_fmt(priv, f);
+}
+
+static int sh_veu_s_fmt_vid_out(struct file *file, void *priv,
+ struct v4l2_format *f)
+{
+ int ret = sh_veu_try_fmt_vid_out(file, priv, f);
+ if (ret)
+ return ret;
+
+ return sh_veu_s_fmt(priv, f);
+}
+
+static int sh_veu_reqbufs(struct file *file, void *priv,
+ struct v4l2_requestbuffers *reqbufs)
+{
+ struct sh_veu_file *veu_file = priv;
+ struct sh_veu_dev *veu = veu_file->veu_dev;
+ int ret = sh_veu_context_init(veu);
+ if (ret < 0)
+ return ret;
+
+ ret = sh_veu_stream_init(veu, veu_file, reqbufs->type);
+ if (ret < 0)
+ return ret;
+
+ return v4l2_m2m_reqbufs(file, veu->m2m_ctx, reqbufs);
+}
+
+static int sh_veu_querybuf(struct file *file, void *priv,
+ struct v4l2_buffer *buf)
+{
+ struct sh_veu_file *veu_file = priv;
+
+ if (!sh_veu_is_streamer(veu_file->veu_dev, veu_file, buf->type))
+ return -EBUSY;
+
+ return v4l2_m2m_querybuf(file, veu_file->veu_dev->m2m_ctx, buf);
+}
+
+static int sh_veu_qbuf(struct file *file, void *priv, struct v4l2_buffer *buf)
+{
+ struct sh_veu_file *veu_file = priv;
+
+ dev_dbg(veu_file->veu_dev->dev, "%s(%d)\n", __func__, buf->type);
+ if (!sh_veu_is_streamer(veu_file->veu_dev, veu_file, buf->type))
+ return -EBUSY;
+
+ return v4l2_m2m_qbuf(file, veu_file->veu_dev->m2m_ctx, buf);
+}
+
+static int sh_veu_dqbuf(struct file *file, void *priv, struct v4l2_buffer *buf)
+{
+ struct sh_veu_file *veu_file = priv;
+
+ dev_dbg(veu_file->veu_dev->dev, "%s(%d)\n", __func__, buf->type);
+ if (!sh_veu_is_streamer(veu_file->veu_dev, veu_file, buf->type))
+ return -EBUSY;
+
+ return v4l2_m2m_dqbuf(file, veu_file->veu_dev->m2m_ctx, buf);
+}
+
+static void sh_veu_calc_scale(struct sh_veu_dev *veu,
+ int size_in, int size_out, int crop_out,
+ u32 *mant, u32 *frac, u32 *rep)
+{
+ u32 fixpoint;
+
+ /* calculate FRAC and MANT */
+ *rep = *mant = *frac = 0;
+
+ if (size_in == size_out) {
+ if (crop_out != size_out)
+ *mant = 1; /* needed for cropping */
+ return;
+ }
+
+ /* VEU2H special upscale */
+ if (veu->is_2h && size_out > size_in) {
+ u32 fixpoint = (4096 * size_in) / size_out;
+ *mant = fixpoint / 4096;
+ *frac = (fixpoint - (*mant * 4096)) & ~0x07;
+
+ switch (*frac) {
+ case 0x800:
+ *rep = 1;
+ break;
+ case 0x400:
+ *rep = 3;
+ break;
+ case 0x200:
+ *rep = 7;
+ break;
+ }
+ if (*rep)
+ return;
+ }
+
+ fixpoint = (4096 * (size_in - 1)) / (size_out + 1);
+ *mant = fixpoint / 4096;
+ *frac = fixpoint - (*mant * 4096);
+
+ if (*frac & 0x07) {
+ /*
+ * FIXME: do we really have to round down twice in the
+ * up-scaling case?
+ */
+ *frac &= ~0x07;
+ if (size_out > size_in)
+ *frac -= 8; /* round down if scaling up */
+ else
+ *frac += 8; /* round up if scaling down */
+ }
+}
+
+static unsigned long sh_veu_scale_v(struct sh_veu_dev *veu,
+ int size_in, int size_out, int crop_out)
+{
+ u32 mant, frac, value, rep;
+
+ sh_veu_calc_scale(veu, size_in, size_out, crop_out, &mant, &frac, &rep);
+
+ /* set scale */
+ value = (sh_veu_reg_read(veu, VEU_RFCR) & ~0xffff0000) |
+ (((mant << 12) | frac) << 16);
+
+ sh_veu_reg_write(veu, VEU_RFCR, value);
+
+ /* set clip */
+ value = (sh_veu_reg_read(veu, VEU_RFSR) & ~0xffff0000) |
+ (((rep << 12) | crop_out) << 16);
+
+ sh_veu_reg_write(veu, VEU_RFSR, value);
+
+ return ALIGN((size_in * crop_out) / size_out, 4);
+}
+
+static unsigned long sh_veu_scale_h(struct sh_veu_dev *veu,
+ int size_in, int size_out, int crop_out)
+{
+ u32 mant, frac, value, rep;
+
+ sh_veu_calc_scale(veu, size_in, size_out, crop_out, &mant, &frac, &rep);
+
+ /* set scale */
+ value = (sh_veu_reg_read(veu, VEU_RFCR) & ~0xffff) |
+ (mant << 12) | frac;
+
+ sh_veu_reg_write(veu, VEU_RFCR, value);
+
+ /* set clip */
+ value = (sh_veu_reg_read(veu, VEU_RFSR) & ~0xffff) |
+ (rep << 12) | crop_out;
+
+ sh_veu_reg_write(veu, VEU_RFSR, value);
+
+ return ALIGN((size_in * crop_out) / size_out, 4);
+}
+
+static void sh_veu_configure(struct sh_veu_dev *veu)
+{
+ u32 src_width, src_stride, src_height;
+ u32 dst_width, dst_stride, dst_height;
+ u32 real_w, real_h;
+
+ /* reset VEU */
+ sh_veu_reg_write(veu, VEU_BSRR, 0x100);
+
+ src_width = veu->vfmt_in.frame.width;
+ src_height = veu->vfmt_in.frame.height;
+ src_stride = ALIGN(veu->vfmt_in.frame.width, 16);
+
+ dst_width = real_w = veu->vfmt_out.frame.width;
+ dst_height = real_h = veu->vfmt_out.frame.height;
+ /* Datasheet is unclear - whether it's always number of bytes or not */
+ dst_stride = veu->vfmt_out.bytesperline;
+
+ /*
+ * So far real_w == dst_width && real_h == dst_height, but it wasn't
+ * necessarily the case in the original vidix driver, so, it may change
+ * here in the future too.
+ */
+ src_width = sh_veu_scale_h(veu, src_width, real_w, dst_width);
+ src_height = sh_veu_scale_v(veu, src_height, real_h, dst_height);
+
+ sh_veu_reg_write(veu, VEU_SWR, src_stride);
+ sh_veu_reg_write(veu, VEU_SSR, src_width | (src_height << 16));
+ sh_veu_reg_write(veu, VEU_BSSR, 0); /* not using bundle mode */
+
+ sh_veu_reg_write(veu, VEU_EDWR, dst_stride);
+ sh_veu_reg_write(veu, VEU_DACR, 0); /* unused for RGB */
+
+ sh_veu_reg_write(veu, VEU_SWPR, 0x67);
+ sh_veu_reg_write(veu, VEU_TRCR, (6 << 16) | (0 << 14) | 2 | 4);
+
+ if (veu->is_2h) {
+ sh_veu_reg_write(veu, VEU_MCR00, 0x0cc5);
+ sh_veu_reg_write(veu, VEU_MCR01, 0x0950);
+ sh_veu_reg_write(veu, VEU_MCR02, 0x0000);
+
+ sh_veu_reg_write(veu, VEU_MCR10, 0x397f);
+ sh_veu_reg_write(veu, VEU_MCR11, 0x0950);
+ sh_veu_reg_write(veu, VEU_MCR12, 0x3ccd);
+
+ sh_veu_reg_write(veu, VEU_MCR20, 0x0000);
+ sh_veu_reg_write(veu, VEU_MCR21, 0x0950);
+ sh_veu_reg_write(veu, VEU_MCR22, 0x1023);
+
+ sh_veu_reg_write(veu, VEU_COFFR, 0x00800010);
+ }
+}
+
+static int sh_veu_streamon(struct file *file, void *priv,
+ enum v4l2_buf_type type)
+{
+ struct sh_veu_file *veu_file = priv;
+
+ if (!sh_veu_is_streamer(veu_file->veu_dev, veu_file, type))
+ return -EBUSY;
+
+ if (veu_file->cfg_needed) {
+ struct sh_veu_dev *veu = veu_file->veu_dev;
+ veu_file->cfg_needed = false;
+ sh_veu_configure(veu_file->veu_dev);
+ veu->xaction = 0;
+ veu->aborting = false;
+ }
+
+ return v4l2_m2m_streamon(file, veu_file->veu_dev->m2m_ctx, type);
+}
+
+static int sh_veu_streamoff(struct file *file, void *priv,
+ enum v4l2_buf_type type)
+{
+ struct sh_veu_file *veu_file = priv;
+
+ if (!sh_veu_is_streamer(veu_file->veu_dev, veu_file, type))
+ return -EBUSY;
+
+ return v4l2_m2m_streamoff(file, veu_file->veu_dev->m2m_ctx, type);
+}
+
+static const struct v4l2_ioctl_ops sh_veu_ioctl_ops = {
+ .vidioc_querycap = sh_veu_querycap,
+
+ .vidioc_enum_fmt_vid_cap = sh_veu_enum_fmt_vid_cap,
+ .vidioc_g_fmt_vid_cap = sh_veu_g_fmt_vid_cap,
+ .vidioc_try_fmt_vid_cap = sh_veu_try_fmt_vid_cap,
+ .vidioc_s_fmt_vid_cap = sh_veu_s_fmt_vid_cap,
+
+ .vidioc_enum_fmt_vid_out = sh_veu_enum_fmt_vid_out,
+ .vidioc_g_fmt_vid_out = sh_veu_g_fmt_vid_out,
+ .vidioc_try_fmt_vid_out = sh_veu_try_fmt_vid_out,
+ .vidioc_s_fmt_vid_out = sh_veu_s_fmt_vid_out,
+
+ .vidioc_reqbufs = sh_veu_reqbufs,
+ .vidioc_querybuf = sh_veu_querybuf,
+
+ .vidioc_qbuf = sh_veu_qbuf,
+ .vidioc_dqbuf = sh_veu_dqbuf,
+
+ .vidioc_streamon = sh_veu_streamon,
+ .vidioc_streamoff = sh_veu_streamoff,
+};
+
+ /* ========== Queue operations ========== */
+
+static int sh_veu_queue_setup(struct vb2_queue *vq,
+ const struct v4l2_format *f,
+ unsigned int *nbuffers, unsigned int *nplanes,
+ unsigned int sizes[], void *alloc_ctxs[])
+{
+ struct sh_veu_dev *veu = vb2_get_drv_priv(vq);
+ struct sh_veu_vfmt *vfmt;
+ unsigned int size, count = *nbuffers;
+
+ if (f) {
+ const struct v4l2_pix_format *pix = &f->fmt.pix;
+ const struct sh_veu_format *fmt = sh_veu_find_fmt(f);
+ struct v4l2_format ftmp = *f;
+
+ if (fmt->fourcc != pix->pixelformat)
+ return -EINVAL;
+ sh_veu_try_fmt(&ftmp, fmt);
+ if (ftmp.fmt.pix.width != pix->width ||
+ ftmp.fmt.pix.height != pix->height)
+ return -EINVAL;
+ size = pix->bytesperline ? pix->bytesperline * pix->height :
+ pix->width * pix->height * fmt->depth >> 3;
+ } else {
+ vfmt = sh_veu_get_vfmt(veu, vq->type);
+ size = vfmt->bytesperline * vfmt->frame.height;
+ }
+
+ if (count < 2)
+ *nbuffers = count = 2;
+
+ if (size * count > VIDEO_MEM_LIMIT) {
+ count = VIDEO_MEM_LIMIT / size;
+ *nbuffers = count;
+ }
+
+ *nplanes = 1;
+ sizes[0] = size;
+ alloc_ctxs[0] = veu->alloc_ctx;
+
+ dev_dbg(veu->dev, "get %d buffer(s) of size %d each.\n", count, size);
+
+ return 0;
+}
+
+static int sh_veu_buf_prepare(struct vb2_buffer *vb)
+{
+ struct sh_veu_dev *veu = vb2_get_drv_priv(vb->vb2_queue);
+ struct sh_veu_vfmt *vfmt;
+ unsigned int sizeimage;
+
+ vfmt = sh_veu_get_vfmt(veu, vb->vb2_queue->type);
+ sizeimage = vfmt->bytesperline * vfmt->frame.height *
+ vfmt->fmt->depth / vfmt->fmt->ydepth;
+
+ if (vb2_plane_size(vb, 0) < sizeimage) {
+ dev_dbg(veu->dev, "%s data will not fit into plane (%lu < %u)\n",
+ __func__, vb2_plane_size(vb, 0), sizeimage);
+ return -EINVAL;
+ }
+
+ vb2_set_plane_payload(vb, 0, sizeimage);
+
+ return 0;
+}
+
+static void sh_veu_buf_queue(struct vb2_buffer *vb)
+{
+ struct sh_veu_dev *veu = vb2_get_drv_priv(vb->vb2_queue);
+ dev_dbg(veu->dev, "%s(%d)\n", __func__, vb->v4l2_buf.type);
+ v4l2_m2m_buf_queue(veu->m2m_ctx, vb);
+}
+
+static void sh_veu_wait_prepare(struct vb2_queue *q)
+{
+ sh_veu_unlock(vb2_get_drv_priv(q));
+}
+
+static void sh_veu_wait_finish(struct vb2_queue *q)
+{
+ sh_veu_lock(vb2_get_drv_priv(q));
+}
+
+static const struct vb2_ops sh_veu_qops = {
+ .queue_setup = sh_veu_queue_setup,
+ .buf_prepare = sh_veu_buf_prepare,
+ .buf_queue = sh_veu_buf_queue,
+ .wait_prepare = sh_veu_wait_prepare,
+ .wait_finish = sh_veu_wait_finish,
+};
+
+static int sh_veu_queue_init(void *priv, struct vb2_queue *src_vq,
+ struct vb2_queue *dst_vq)
+{
+ int ret;
+
+ memset(src_vq, 0, sizeof(*src_vq));
+ src_vq->type = V4L2_BUF_TYPE_VIDEO_OUTPUT;
+ src_vq->io_modes = VB2_MMAP | VB2_USERPTR;
+ src_vq->drv_priv = priv;
+ src_vq->buf_struct_size = sizeof(struct v4l2_m2m_buffer);
+ src_vq->ops = &sh_veu_qops;
+ src_vq->mem_ops = &vb2_dma_contig_memops;
+
+ ret = vb2_queue_init(src_vq);
+ if (ret < 0)
+ return ret;
+
+ memset(dst_vq, 0, sizeof(*dst_vq));
+ dst_vq->type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
+ dst_vq->io_modes = VB2_MMAP | VB2_USERPTR;
+ dst_vq->drv_priv = priv;
+ dst_vq->buf_struct_size = sizeof(struct v4l2_m2m_buffer);
+ dst_vq->ops = &sh_veu_qops;
+ dst_vq->mem_ops = &vb2_dma_contig_memops;
+
+ return vb2_queue_init(dst_vq);
+}
+
+ /* ========== File operations ========== */
+
+static int sh_veu_open(struct file *file)
+{
+ struct sh_veu_dev *veu = video_drvdata(file);
+ struct sh_veu_file *veu_file;
+
+ veu_file = kzalloc(sizeof(*veu_file), GFP_KERNEL);
+ if (!veu_file)
+ return -ENOMEM;
+
+ veu_file->veu_dev = veu;
+ veu_file->cfg_needed = true;
+
+ file->private_data = veu_file;
+
+ pm_runtime_get_sync(veu->dev);
+
+ dev_dbg(veu->dev, "Created instance %p\n", veu_file);
+
+ return 0;
+}
+
+static int sh_veu_release(struct file *file)
+{
+ struct sh_veu_dev *veu = video_drvdata(file);
+ struct sh_veu_file *veu_file = file->private_data;
+
+ dev_dbg(veu->dev, "Releasing instance %p\n", veu_file);
+
+ pm_runtime_put(veu->dev);
+
+ if (veu_file == veu->capture) {
+ veu->capture = NULL;
+ vb2_queue_release(v4l2_m2m_get_vq(veu->m2m_ctx, V4L2_BUF_TYPE_VIDEO_CAPTURE));
+ }
+
+ if (veu_file == veu->output) {
+ veu->output = NULL;
+ vb2_queue_release(v4l2_m2m_get_vq(veu->m2m_ctx, V4L2_BUF_TYPE_VIDEO_OUTPUT));
+ }
+
+ if (!veu->output && !veu->capture && veu->m2m_ctx) {
+ v4l2_m2m_ctx_release(veu->m2m_ctx);
+ veu->m2m_ctx = NULL;
+ }
+
+ kfree(veu_file);
+
+ return 0;
+}
+
+static unsigned int sh_veu_poll(struct file *file,
+ struct poll_table_struct *wait)
+{
+ struct sh_veu_file *veu_file = file->private_data;
+
+ return v4l2_m2m_poll(file, veu_file->veu_dev->m2m_ctx, wait);
+}
+
+static int sh_veu_mmap(struct file *file, struct vm_area_struct *vma)
+{
+ struct sh_veu_file *veu_file = file->private_data;
+
+ return v4l2_m2m_mmap(file, veu_file->veu_dev->m2m_ctx, vma);
+}
+
+static const struct v4l2_file_operations sh_veu_fops = {
+ .owner = THIS_MODULE,
+ .open = sh_veu_open,
+ .release = sh_veu_release,
+ .poll = sh_veu_poll,
+ .unlocked_ioctl = video_ioctl2,
+ .mmap = sh_veu_mmap,
+};
+
+static const struct video_device sh_veu_videodev = {
+ .name = "sh-veu",
+ .fops = &sh_veu_fops,
+ .ioctl_ops = &sh_veu_ioctl_ops,
+ .minor = -1,
+ .release = video_device_release_empty,
+ .vfl_dir = VFL_DIR_M2M,
+};
+
+static const struct v4l2_m2m_ops sh_veu_m2m_ops = {
+ .device_run = sh_veu_device_run,
+ .job_abort = sh_veu_job_abort,
+};
+
+static irqreturn_t sh_veu_bh(int irq, void *dev_id)
+{
+ struct sh_veu_dev *veu = dev_id;
+
+ if (veu->xaction == MEM2MEM_DEF_TRANSLEN || veu->aborting) {
+ v4l2_m2m_job_finish(veu->m2m_dev, veu->m2m_ctx);
+ veu->xaction = 0;
+ } else {
+ sh_veu_device_run(veu);
+ }
+
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t sh_veu_isr(int irq, void *dev_id)
+{
+ struct sh_veu_dev *veu = dev_id;
+ struct vb2_buffer *dst;
+ struct vb2_buffer *src;
+ u32 status = sh_veu_reg_read(veu, VEU_EVTR);
+
+ /* bundle read mode not used */
+ if (!(status & 1))
+ return IRQ_NONE;
+
+ /* disable interrupt in VEU */
+ sh_veu_reg_write(veu, VEU_EIER, 0);
+ /* halt operation */
+ sh_veu_reg_write(veu, VEU_STR, 0);
+ /* ack int, write 0 to clear bits */
+ sh_veu_reg_write(veu, VEU_EVTR, status & ~1);
+
+ /* conversion completed */
+ dst = v4l2_m2m_dst_buf_remove(veu->m2m_ctx);
+ src = v4l2_m2m_src_buf_remove(veu->m2m_ctx);
+ if (!src || !dst)
+ return IRQ_NONE;
+
+ spin_lock(&veu->lock);
+ v4l2_m2m_buf_done(src, VB2_BUF_STATE_DONE);
+ v4l2_m2m_buf_done(dst, VB2_BUF_STATE_DONE);
+ spin_unlock(&veu->lock);
+
+ veu->xaction++;
+
+ if (!veu->aborting)
+ return IRQ_WAKE_THREAD;
+
+ return IRQ_HANDLED;
+}
+
+static int sh_veu_probe(struct platform_device *pdev)
+{
+ struct sh_veu_dev *veu;
+ struct resource *reg_res;
+ struct video_device *vdev;
+ int irq, ret;
+
+ reg_res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ irq = platform_get_irq(pdev, 0);
+
+ if (!reg_res || irq <= 0) {
+ dev_err(&pdev->dev, "Insufficient VEU platform information.\n");
+ return -ENODEV;
+ }
+
+ veu = devm_kzalloc(&pdev->dev, sizeof(*veu), GFP_KERNEL);
+ if (!veu)
+ return -ENOMEM;
+
+ veu->is_2h = resource_size(reg_res) == 0x22c;
+
+ veu->base = devm_request_and_ioremap(&pdev->dev, reg_res);
+ if (!veu->base)
+ return -ENOMEM;
+
+ ret = devm_request_threaded_irq(&pdev->dev, irq, sh_veu_isr, sh_veu_bh,
+ 0, "veu", veu);
+ if (ret < 0)
+ return ret;
+
+ ret = v4l2_device_register(&pdev->dev, &veu->v4l2_dev);
+ if (ret < 0) {
+ dev_err(&pdev->dev, "Error registering v4l2 device\n");
+ return ret;
+ }
+
+ vdev = &veu->vdev;
+
+ veu->alloc_ctx = vb2_dma_contig_init_ctx(&pdev->dev);
+ if (IS_ERR(veu->alloc_ctx)) {
+ ret = PTR_ERR(veu->alloc_ctx);
+ goto einitctx;
+ }
+
+ *vdev = sh_veu_videodev;
+ spin_lock_init(&veu->lock);
+ mutex_init(&veu->fop_lock);
+ vdev->lock = &veu->fop_lock;
+
+ video_set_drvdata(vdev, veu);
+
+ veu->dev = &pdev->dev;
+ veu->vfmt_out = DEFAULT_OUT_VFMT;
+ veu->vfmt_in = DEFAULT_IN_VFMT;
+
+ veu->m2m_dev = v4l2_m2m_init(&sh_veu_m2m_ops);
+ if (IS_ERR(veu->m2m_dev)) {
+ ret = PTR_ERR(veu->m2m_dev);
+ v4l2_err(&veu->v4l2_dev, "Failed to init mem2mem device: %d\n", ret);
+ goto em2minit;
+ }
+
+ pm_runtime_enable(&pdev->dev);
+ pm_runtime_resume(&pdev->dev);
+
+ ret = video_register_device(vdev, VFL_TYPE_GRABBER, -1);
+ pm_runtime_suspend(&pdev->dev);
+ if (ret < 0)
+ goto evidreg;
+
+ return ret;
+
+evidreg:
+ pm_runtime_disable(&pdev->dev);
+ v4l2_m2m_release(veu->m2m_dev);
+em2minit:
+ vb2_dma_contig_cleanup_ctx(veu->alloc_ctx);
+einitctx:
+ v4l2_device_unregister(&veu->v4l2_dev);
+ return ret;
+}
+
+static int sh_veu_remove(struct platform_device *pdev)
+{
+ struct v4l2_device *v4l2_dev = platform_get_drvdata(pdev);
+ struct sh_veu_dev *veu = container_of(v4l2_dev,
+ struct sh_veu_dev, v4l2_dev);
+
+ video_unregister_device(&veu->vdev);
+ pm_runtime_disable(&pdev->dev);
+ v4l2_m2m_release(veu->m2m_dev);
+ vb2_dma_contig_cleanup_ctx(veu->alloc_ctx);
+ v4l2_device_unregister(&veu->v4l2_dev);
+
+ return 0;
+}
+
+static struct platform_driver __refdata sh_veu_pdrv = {
+ .remove = sh_veu_remove,
+ .driver = {
+ .name = "sh_veu",
+ .owner = THIS_MODULE,
+ },
+};
+
+static int __init sh_veu_init(void)
+{
+ return platform_driver_probe(&sh_veu_pdrv, sh_veu_probe);
+}
+
+static void __exit sh_veu_exit(void)
+{
+ platform_driver_unregister(&sh_veu_pdrv);
+}
+
+module_init(sh_veu_init);
+module_exit(sh_veu_exit);
+
+MODULE_DESCRIPTION("sh-mobile VEU mem2mem driver");
+MODULE_AUTHOR("Guennadi Liakhovetski, <g.liakhovetski@gmx.de>");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/media/platform/sh_vou.c b/drivers/media/platform/sh_vou.c
index f3c4571ac01e..66c8da18df84 100644
--- a/drivers/media/platform/sh_vou.c
+++ b/drivers/media/platform/sh_vou.c
@@ -207,6 +207,7 @@ static void sh_vou_stream_start(struct sh_vou_device *vou_dev,
#endif
switch (vou_dev->pix.pixelformat) {
+ default:
case V4L2_PIX_FMT_NV12:
case V4L2_PIX_FMT_NV16:
row_coeff = 1;
@@ -253,7 +254,8 @@ static int sh_vou_buf_setup(struct videobuf_queue *vq, unsigned int *count,
if (PAGE_ALIGN(*size) * *count > 4 * 1024 * 1024)
*count = 4 * 1024 * 1024 / PAGE_ALIGN(*size);
- dev_dbg(vq->dev, "%s(): count=%d, size=%d\n", __func__, *count, *size);
+ dev_dbg(vou_dev->v4l2_dev.dev, "%s(): count=%d, size=%d\n", __func__,
+ *count, *size);
return 0;
}
@@ -269,7 +271,7 @@ static int sh_vou_buf_prepare(struct videobuf_queue *vq,
int bytes_per_line = vou_fmt[vou_dev->pix_idx].bpp * pix->width / 8;
int ret;
- dev_dbg(vq->dev, "%s()\n", __func__);
+ dev_dbg(vou_dev->v4l2_dev.dev, "%s()\n", __func__);
if (vb->width != pix->width ||
vb->height != pix->height ||
@@ -299,7 +301,7 @@ static int sh_vou_buf_prepare(struct videobuf_queue *vq,
vb->state = VIDEOBUF_PREPARED;
}
- dev_dbg(vq->dev,
+ dev_dbg(vou_dev->v4l2_dev.dev,
"%s(): fmt #%d, %u bytes per line, phys 0x%x, type %d, state %d\n",
__func__, vou_dev->pix_idx, bytes_per_line,
videobuf_to_dma_contig(vb), vb->memory, vb->state);
@@ -314,7 +316,7 @@ static void sh_vou_buf_queue(struct videobuf_queue *vq,
struct video_device *vdev = vq->priv_data;
struct sh_vou_device *vou_dev = video_get_drvdata(vdev);
- dev_dbg(vq->dev, "%s()\n", __func__);
+ dev_dbg(vou_dev->v4l2_dev.dev, "%s()\n", __func__);
vb->state = VIDEOBUF_QUEUED;
list_add_tail(&vb->queue, &vou_dev->queue);
@@ -325,8 +327,8 @@ static void sh_vou_buf_queue(struct videobuf_queue *vq,
vou_dev->active = vb;
/* Start from side A: we use mirror addresses, so, set B */
sh_vou_reg_a_write(vou_dev, VOURPR, 1);
- dev_dbg(vq->dev, "%s: first buffer status 0x%x\n", __func__,
- sh_vou_reg_a_read(vou_dev, VOUSTR));
+ dev_dbg(vou_dev->v4l2_dev.dev, "%s: first buffer status 0x%x\n",
+ __func__, sh_vou_reg_a_read(vou_dev, VOUSTR));
sh_vou_schedule_next(vou_dev, vb);
/* Only activate VOU after the second buffer */
} else if (vou_dev->active->queue.next == &vb->queue) {
@@ -336,8 +338,8 @@ static void sh_vou_buf_queue(struct videobuf_queue *vq,
/* Register side switching with frame VSYNC */
sh_vou_reg_a_write(vou_dev, VOURCR, 5);
- dev_dbg(vq->dev, "%s: second buffer status 0x%x\n", __func__,
- sh_vou_reg_a_read(vou_dev, VOUSTR));
+ dev_dbg(vou_dev->v4l2_dev.dev, "%s: second buffer status 0x%x\n",
+ __func__, sh_vou_reg_a_read(vou_dev, VOUSTR));
/* Enable End-of-Frame (VSYNC) interrupts */
sh_vou_reg_a_write(vou_dev, VOUIR, 0x10004);
@@ -355,7 +357,7 @@ static void sh_vou_buf_release(struct videobuf_queue *vq,
struct sh_vou_device *vou_dev = video_get_drvdata(vdev);
unsigned long flags;
- dev_dbg(vq->dev, "%s()\n", __func__);
+ dev_dbg(vou_dev->v4l2_dev.dev, "%s()\n", __func__);
spin_lock_irqsave(&vou_dev->lock, flags);
@@ -388,9 +390,9 @@ static struct videobuf_queue_ops sh_vou_video_qops = {
static int sh_vou_querycap(struct file *file, void *priv,
struct v4l2_capability *cap)
{
- struct sh_vou_file *vou_file = priv;
+ struct sh_vou_device *vou_dev = video_drvdata(file);
- dev_dbg(vou_file->vbq.dev, "%s()\n", __func__);
+ dev_dbg(vou_dev->v4l2_dev.dev, "%s()\n", __func__);
strlcpy(cap->card, "SuperH VOU", sizeof(cap->card));
cap->capabilities = V4L2_CAP_VIDEO_OUTPUT | V4L2_CAP_STREAMING;
@@ -401,12 +403,12 @@ static int sh_vou_querycap(struct file *file, void *priv,
static int sh_vou_enum_fmt_vid_out(struct file *file, void *priv,
struct v4l2_fmtdesc *fmt)
{
- struct sh_vou_file *vou_file = priv;
+ struct sh_vou_device *vou_dev = video_drvdata(file);
if (fmt->index >= ARRAY_SIZE(vou_fmt))
return -EINVAL;
- dev_dbg(vou_file->vbq.dev, "%s()\n", __func__);
+ dev_dbg(vou_dev->v4l2_dev.dev, "%s()\n", __func__);
fmt->type = V4L2_BUF_TYPE_VIDEO_OUTPUT;
strlcpy(fmt->description, vou_fmt[fmt->index].desc,
@@ -419,8 +421,7 @@ static int sh_vou_enum_fmt_vid_out(struct file *file, void *priv,
static int sh_vou_g_fmt_vid_out(struct file *file, void *priv,
struct v4l2_format *fmt)
{
- struct video_device *vdev = video_devdata(file);
- struct sh_vou_device *vou_dev = video_get_drvdata(vdev);
+ struct sh_vou_device *vou_dev = video_drvdata(file);
dev_dbg(vou_dev->v4l2_dev.dev, "%s()\n", __func__);
@@ -595,9 +596,9 @@ static void vou_adjust_input(struct sh_vou_geometry *geo, v4l2_std_id std)
*/
static void vou_adjust_output(struct sh_vou_geometry *geo, v4l2_std_id std)
{
- unsigned int best_err = UINT_MAX, best, width_max, height_max,
- img_height_max;
- int i, idx;
+ unsigned int best_err = UINT_MAX, best = geo->in_width,
+ width_max, height_max, img_height_max;
+ int i, idx = 0;
if (std & V4L2_STD_525_60) {
width_max = 858;
@@ -671,8 +672,7 @@ static void vou_adjust_output(struct sh_vou_geometry *geo, v4l2_std_id std)
static int sh_vou_s_fmt_vid_out(struct file *file, void *priv,
struct v4l2_format *fmt)
{
- struct video_device *vdev = video_devdata(file);
- struct sh_vou_device *vou_dev = video_get_drvdata(vdev);
+ struct sh_vou_device *vou_dev = video_drvdata(file);
struct v4l2_pix_format *pix = &fmt->fmt.pix;
unsigned int img_height_max;
int pix_idx;
@@ -764,11 +764,11 @@ static int sh_vou_s_fmt_vid_out(struct file *file, void *priv,
static int sh_vou_try_fmt_vid_out(struct file *file, void *priv,
struct v4l2_format *fmt)
{
- struct sh_vou_file *vou_file = priv;
+ struct sh_vou_device *vou_dev = video_drvdata(file);
struct v4l2_pix_format *pix = &fmt->fmt.pix;
int i;
- dev_dbg(vou_file->vbq.dev, "%s()\n", __func__);
+ dev_dbg(vou_dev->v4l2_dev.dev, "%s()\n", __func__);
fmt->type = V4L2_BUF_TYPE_VIDEO_OUTPUT;
pix->field = V4L2_FIELD_NONE;
@@ -788,9 +788,10 @@ static int sh_vou_try_fmt_vid_out(struct file *file, void *priv,
static int sh_vou_reqbufs(struct file *file, void *priv,
struct v4l2_requestbuffers *req)
{
+ struct sh_vou_device *vou_dev = video_drvdata(file);
struct sh_vou_file *vou_file = priv;
- dev_dbg(vou_file->vbq.dev, "%s()\n", __func__);
+ dev_dbg(vou_dev->v4l2_dev.dev, "%s()\n", __func__);
if (req->type != V4L2_BUF_TYPE_VIDEO_OUTPUT)
return -EINVAL;
@@ -801,27 +802,30 @@ static int sh_vou_reqbufs(struct file *file, void *priv,
static int sh_vou_querybuf(struct file *file, void *priv,
struct v4l2_buffer *b)
{
+ struct sh_vou_device *vou_dev = video_drvdata(file);
struct sh_vou_file *vou_file = priv;
- dev_dbg(vou_file->vbq.dev, "%s()\n", __func__);
+ dev_dbg(vou_dev->v4l2_dev.dev, "%s()\n", __func__);
return videobuf_querybuf(&vou_file->vbq, b);
}
static int sh_vou_qbuf(struct file *file, void *priv, struct v4l2_buffer *b)
{
+ struct sh_vou_device *vou_dev = video_drvdata(file);
struct sh_vou_file *vou_file = priv;
- dev_dbg(vou_file->vbq.dev, "%s()\n", __func__);
+ dev_dbg(vou_dev->v4l2_dev.dev, "%s()\n", __func__);
return videobuf_qbuf(&vou_file->vbq, b);
}
static int sh_vou_dqbuf(struct file *file, void *priv, struct v4l2_buffer *b)
{
+ struct sh_vou_device *vou_dev = video_drvdata(file);
struct sh_vou_file *vou_file = priv;
- dev_dbg(vou_file->vbq.dev, "%s()\n", __func__);
+ dev_dbg(vou_dev->v4l2_dev.dev, "%s()\n", __func__);
return videobuf_dqbuf(&vou_file->vbq, b, file->f_flags & O_NONBLOCK);
}
@@ -829,12 +833,11 @@ static int sh_vou_dqbuf(struct file *file, void *priv, struct v4l2_buffer *b)
static int sh_vou_streamon(struct file *file, void *priv,
enum v4l2_buf_type buftype)
{
- struct video_device *vdev = video_devdata(file);
- struct sh_vou_device *vou_dev = video_get_drvdata(vdev);
+ struct sh_vou_device *vou_dev = video_drvdata(file);
struct sh_vou_file *vou_file = priv;
int ret;
- dev_dbg(vou_file->vbq.dev, "%s()\n", __func__);
+ dev_dbg(vou_dev->v4l2_dev.dev, "%s()\n", __func__);
ret = v4l2_device_call_until_err(&vou_dev->v4l2_dev, 0,
video, s_stream, 1);
@@ -848,11 +851,10 @@ static int sh_vou_streamon(struct file *file, void *priv,
static int sh_vou_streamoff(struct file *file, void *priv,
enum v4l2_buf_type buftype)
{
- struct video_device *vdev = video_devdata(file);
- struct sh_vou_device *vou_dev = video_get_drvdata(vdev);
+ struct sh_vou_device *vou_dev = video_drvdata(file);
struct sh_vou_file *vou_file = priv;
- dev_dbg(vou_file->vbq.dev, "%s()\n", __func__);
+ dev_dbg(vou_dev->v4l2_dev.dev, "%s()\n", __func__);
/*
* This calls buf_release from host driver's videobuf_queue_ops for all
@@ -881,13 +883,12 @@ static u32 sh_vou_ntsc_mode(enum sh_vou_bus_fmt bus_fmt)
static int sh_vou_s_std(struct file *file, void *priv, v4l2_std_id *std_id)
{
- struct video_device *vdev = video_devdata(file);
- struct sh_vou_device *vou_dev = video_get_drvdata(vdev);
+ struct sh_vou_device *vou_dev = video_drvdata(file);
int ret;
dev_dbg(vou_dev->v4l2_dev.dev, "%s(): 0x%llx\n", __func__, *std_id);
- if (*std_id & ~vdev->tvnorms)
+ if (*std_id & ~vou_dev->vdev->tvnorms)
return -EINVAL;
ret = v4l2_device_call_until_err(&vou_dev->v4l2_dev, 0, video,
@@ -909,8 +910,7 @@ static int sh_vou_s_std(struct file *file, void *priv, v4l2_std_id *std_id)
static int sh_vou_g_std(struct file *file, void *priv, v4l2_std_id *std)
{
- struct video_device *vdev = video_devdata(file);
- struct sh_vou_device *vou_dev = video_get_drvdata(vdev);
+ struct sh_vou_device *vou_dev = video_drvdata(file);
dev_dbg(vou_dev->v4l2_dev.dev, "%s()\n", __func__);
@@ -921,8 +921,7 @@ static int sh_vou_g_std(struct file *file, void *priv, v4l2_std_id *std)
static int sh_vou_g_crop(struct file *file, void *fh, struct v4l2_crop *a)
{
- struct video_device *vdev = video_devdata(file);
- struct sh_vou_device *vou_dev = video_get_drvdata(vdev);
+ struct sh_vou_device *vou_dev = video_drvdata(file);
dev_dbg(vou_dev->v4l2_dev.dev, "%s()\n", __func__);
@@ -936,8 +935,7 @@ static int sh_vou_g_crop(struct file *file, void *fh, struct v4l2_crop *a)
static int sh_vou_s_crop(struct file *file, void *fh, const struct v4l2_crop *a)
{
struct v4l2_crop a_writable = *a;
- struct video_device *vdev = video_devdata(file);
- struct sh_vou_device *vou_dev = video_get_drvdata(vdev);
+ struct sh_vou_device *vou_dev = video_drvdata(file);
struct v4l2_rect *rect = &a_writable.c;
struct v4l2_crop sd_crop = {.type = V4L2_BUF_TYPE_VIDEO_OUTPUT};
struct v4l2_pix_format *pix = &vou_dev->pix;
@@ -1028,9 +1026,9 @@ static int sh_vou_s_crop(struct file *file, void *fh, const struct v4l2_crop *a)
static int sh_vou_cropcap(struct file *file, void *priv,
struct v4l2_cropcap *a)
{
- struct sh_vou_file *vou_file = priv;
+ struct sh_vou_device *vou_dev = video_drvdata(file);
- dev_dbg(vou_file->vbq.dev, "%s()\n", __func__);
+ dev_dbg(vou_dev->v4l2_dev.dev, "%s()\n", __func__);
a->type = V4L2_BUF_TYPE_VIDEO_OUTPUT;
a->bounds.left = 0;
@@ -1091,7 +1089,7 @@ static irqreturn_t sh_vou_isr(int irq, void *dev_id)
list_del(&vb->queue);
vb->state = VIDEOBUF_DONE;
- do_gettimeofday(&vb->ts);
+ v4l2_get_timestamp(&vb->ts);
vb->field_count++;
wake_up(&vb->done);
@@ -1160,8 +1158,7 @@ static int sh_vou_hw_init(struct sh_vou_device *vou_dev)
/* File operations */
static int sh_vou_open(struct file *file)
{
- struct video_device *vdev = video_devdata(file);
- struct sh_vou_device *vou_dev = video_get_drvdata(vdev);
+ struct sh_vou_device *vou_dev = video_drvdata(file);
struct sh_vou_file *vou_file = kzalloc(sizeof(struct sh_vou_file),
GFP_KERNEL);
@@ -1178,11 +1175,11 @@ static int sh_vou_open(struct file *file)
int ret;
/* First open */
vou_dev->status = SH_VOU_INITIALISING;
- pm_runtime_get_sync(vdev->v4l2_dev->dev);
+ pm_runtime_get_sync(vou_dev->v4l2_dev.dev);
ret = sh_vou_hw_init(vou_dev);
if (ret < 0) {
atomic_dec(&vou_dev->use_count);
- pm_runtime_put(vdev->v4l2_dev->dev);
+ pm_runtime_put(vou_dev->v4l2_dev.dev);
vou_dev->status = SH_VOU_IDLE;
mutex_unlock(&vou_dev->fop_lock);
return ret;
@@ -1193,8 +1190,8 @@ static int sh_vou_open(struct file *file)
vou_dev->v4l2_dev.dev, &vou_dev->lock,
V4L2_BUF_TYPE_VIDEO_OUTPUT,
V4L2_FIELD_NONE,
- sizeof(struct videobuf_buffer), vdev,
- &vou_dev->fop_lock);
+ sizeof(struct videobuf_buffer),
+ vou_dev->vdev, &vou_dev->fop_lock);
mutex_unlock(&vou_dev->fop_lock);
return 0;
@@ -1202,18 +1199,17 @@ static int sh_vou_open(struct file *file)
static int sh_vou_release(struct file *file)
{
- struct video_device *vdev = video_devdata(file);
- struct sh_vou_device *vou_dev = video_get_drvdata(vdev);
+ struct sh_vou_device *vou_dev = video_drvdata(file);
struct sh_vou_file *vou_file = file->private_data;
- dev_dbg(vou_file->vbq.dev, "%s()\n", __func__);
+ dev_dbg(vou_dev->v4l2_dev.dev, "%s()\n", __func__);
if (!atomic_dec_return(&vou_dev->use_count)) {
mutex_lock(&vou_dev->fop_lock);
/* Last close */
vou_dev->status = SH_VOU_IDLE;
sh_vou_reg_a_set(vou_dev, VOUER, 0, 0x101);
- pm_runtime_put(vdev->v4l2_dev->dev);
+ pm_runtime_put(vou_dev->v4l2_dev.dev);
mutex_unlock(&vou_dev->fop_lock);
}
@@ -1225,12 +1221,11 @@ static int sh_vou_release(struct file *file)
static int sh_vou_mmap(struct file *file, struct vm_area_struct *vma)
{
- struct video_device *vdev = video_devdata(file);
- struct sh_vou_device *vou_dev = video_get_drvdata(vdev);
+ struct sh_vou_device *vou_dev = video_drvdata(file);
struct sh_vou_file *vou_file = file->private_data;
int ret;
- dev_dbg(vou_file->vbq.dev, "%s()\n", __func__);
+ dev_dbg(vou_dev->v4l2_dev.dev, "%s()\n", __func__);
if (mutex_lock_interruptible(&vou_dev->fop_lock))
return -ERESTARTSYS;
@@ -1241,12 +1236,11 @@ static int sh_vou_mmap(struct file *file, struct vm_area_struct *vma)
static unsigned int sh_vou_poll(struct file *file, poll_table *wait)
{
- struct video_device *vdev = video_devdata(file);
- struct sh_vou_device *vou_dev = video_get_drvdata(vdev);
+ struct sh_vou_device *vou_dev = video_drvdata(file);
struct sh_vou_file *vou_file = file->private_data;
unsigned int res;
- dev_dbg(vou_file->vbq.dev, "%s()\n", __func__);
+ dev_dbg(vou_dev->v4l2_dev.dev, "%s()\n", __func__);
mutex_lock(&vou_dev->fop_lock);
res = videobuf_poll_stream(file, &vou_file->vbq, wait);
@@ -1257,8 +1251,7 @@ static unsigned int sh_vou_poll(struct file *file, poll_table *wait)
static int sh_vou_g_chip_ident(struct file *file, void *fh,
struct v4l2_dbg_chip_ident *id)
{
- struct video_device *vdev = video_devdata(file);
- struct sh_vou_device *vou_dev = video_get_drvdata(vdev);
+ struct sh_vou_device *vou_dev = video_drvdata(file);
return v4l2_device_call_until_err(&vou_dev->v4l2_dev, 0, core, g_chip_ident, id);
}
@@ -1267,8 +1260,7 @@ static int sh_vou_g_chip_ident(struct file *file, void *fh,
static int sh_vou_g_register(struct file *file, void *fh,
struct v4l2_dbg_register *reg)
{
- struct video_device *vdev = video_devdata(file);
- struct sh_vou_device *vou_dev = video_get_drvdata(vdev);
+ struct sh_vou_device *vou_dev = video_drvdata(file);
return v4l2_device_call_until_err(&vou_dev->v4l2_dev, 0, core, g_register, reg);
}
@@ -1276,8 +1268,7 @@ static int sh_vou_g_register(struct file *file, void *fh,
static int sh_vou_s_register(struct file *file, void *fh,
struct v4l2_dbg_register *reg)
{
- struct video_device *vdev = video_devdata(file);
- struct sh_vou_device *vou_dev = video_get_drvdata(vdev);
+ struct sh_vou_device *vou_dev = video_drvdata(file);
return v4l2_device_call_until_err(&vou_dev->v4l2_dev, 0, core, s_register, reg);
}
diff --git a/drivers/media/platform/soc_camera/Kconfig b/drivers/media/platform/soc_camera/Kconfig
index cb6791e62bd4..b139b525bb16 100644
--- a/drivers/media/platform/soc_camera/Kconfig
+++ b/drivers/media/platform/soc_camera/Kconfig
@@ -70,13 +70,12 @@ config VIDEO_MX2_HOSTSUPPORT
bool
config VIDEO_MX2
- tristate "i.MX27/i.MX25 Camera Sensor Interface driver"
- depends on VIDEO_DEV && SOC_CAMERA && (MACH_MX27 || (ARCH_MX25 && BROKEN))
+ tristate "i.MX27 Camera Sensor Interface driver"
+ depends on VIDEO_DEV && SOC_CAMERA && MACH_MX27
select VIDEOBUF2_DMA_CONTIG
select VIDEO_MX2_HOSTSUPPORT
---help---
- This is a v4l2 driver for the i.MX27 and the i.MX25 Camera Sensor
- Interface
+ This is a v4l2 driver for the i.MX27 Camera Sensor Interface
config VIDEO_ATMEL_ISI
tristate "ATMEL Image Sensor Interface (ISI) support"
diff --git a/drivers/media/platform/soc_camera/atmel-isi.c b/drivers/media/platform/soc_camera/atmel-isi.c
index d96c8c7e01d9..82dbf99d347c 100644
--- a/drivers/media/platform/soc_camera/atmel-isi.c
+++ b/drivers/media/platform/soc_camera/atmel-isi.c
@@ -166,7 +166,7 @@ static irqreturn_t atmel_isi_handle_streaming(struct atmel_isi *isi)
struct frame_buffer *buf = isi->active;
list_del_init(&buf->list);
- do_gettimeofday(&vb->v4l2_buf.timestamp);
+ v4l2_get_timestamp(&vb->v4l2_buf.timestamp);
vb->v4l2_buf.sequence = isi->sequence++;
vb2_buffer_done(vb, VB2_BUF_STATE_DONE);
}
@@ -745,7 +745,7 @@ static int isi_camera_get_formats(struct soc_camera_device *icd,
return formats;
}
-/* Called with .video_lock held */
+/* Called with .host_lock held */
static int isi_camera_add_device(struct soc_camera_device *icd)
{
struct soc_camera_host *ici = to_soc_camera_host(icd->parent);
@@ -770,7 +770,7 @@ static int isi_camera_add_device(struct soc_camera_device *icd)
icd->devnum);
return 0;
}
-/* Called with .video_lock held */
+/* Called with .host_lock held */
static void isi_camera_remove_device(struct soc_camera_device *icd)
{
struct soc_camera_host *ici = to_soc_camera_host(icd->parent);
diff --git a/drivers/media/platform/soc_camera/mx1_camera.c b/drivers/media/platform/soc_camera/mx1_camera.c
index 032b8c9097f9..25b2a285dc86 100644
--- a/drivers/media/platform/soc_camera/mx1_camera.c
+++ b/drivers/media/platform/soc_camera/mx1_camera.c
@@ -26,7 +26,6 @@
#include <linux/mm.h>
#include <linux/module.h>
#include <linux/moduleparam.h>
-#include <linux/mutex.h>
#include <linux/platform_device.h>
#include <linux/sched.h>
#include <linux/slab.h>
@@ -307,7 +306,7 @@ static void mx1_camera_wakeup(struct mx1_camera_dev *pcdev,
/* _init is used to debug races, see comment in mx1_camera_reqbufs() */
list_del_init(&vb->queue);
vb->state = VIDEOBUF_DONE;
- do_gettimeofday(&vb->ts);
+ v4l2_get_timestamp(&vb->ts);
vb->field_count++;
wake_up(&vb->done);
@@ -373,7 +372,7 @@ static void mx1_camera_init_videobuf(struct videobuf_queue *q,
videobuf_queue_dma_contig_init(q, &mx1_videobuf_ops, icd->parent,
&pcdev->lock, V4L2_BUF_TYPE_VIDEO_CAPTURE,
V4L2_FIELD_NONE,
- sizeof(struct mx1_buffer), icd, &icd->video_lock);
+ sizeof(struct mx1_buffer), icd, &ici->host_lock);
}
static int mclk_get_divisor(struct mx1_camera_dev *pcdev)
diff --git a/drivers/media/platform/soc_camera/mx2_camera.c b/drivers/media/platform/soc_camera/mx2_camera.c
index 1abdc7d9c744..ffba7d91f413 100644
--- a/drivers/media/platform/soc_camera/mx2_camera.c
+++ b/drivers/media/platform/soc_camera/mx2_camera.c
@@ -1,5 +1,5 @@
/*
- * V4L2 Driver for i.MX27/i.MX25 camera host
+ * V4L2 Driver for i.MX27 camera host
*
* Copyright (C) 2008, Sascha Hauer, Pengutronix
* Copyright (C) 2010, Baruch Siach, Orex Computed Radiography
@@ -28,7 +28,6 @@
#include <linux/time.h>
#include <linux/device.h>
#include <linux/platform_device.h>
-#include <linux/mutex.h>
#include <linux/clk.h>
#include <media/v4l2-common.h>
@@ -64,9 +63,7 @@
#define CSICR1_RF_OR_INTEN (1 << 24)
#define CSICR1_STATFF_LEVEL (3 << 22)
#define CSICR1_STATFF_INTEN (1 << 21)
-#define CSICR1_RXFF_LEVEL(l) (((l) & 3) << 19) /* MX27 */
-#define CSICR1_FB2_DMA_INTEN (1 << 20) /* MX25 */
-#define CSICR1_FB1_DMA_INTEN (1 << 19) /* MX25 */
+#define CSICR1_RXFF_LEVEL(l) (((l) & 3) << 19)
#define CSICR1_RXFF_INTEN (1 << 18)
#define CSICR1_SOF_POL (1 << 17)
#define CSICR1_SOF_INTEN (1 << 16)
@@ -88,45 +85,15 @@
#define SHIFT_RXFF_LEVEL 19
#define SHIFT_MCLKDIV 12
-/* control reg 3 */
-#define CSICR3_FRMCNT (0xFFFF << 16)
-#define CSICR3_FRMCNT_RST (1 << 15)
-#define CSICR3_DMA_REFLASH_RFF (1 << 14)
-#define CSICR3_DMA_REFLASH_SFF (1 << 13)
-#define CSICR3_DMA_REQ_EN_RFF (1 << 12)
-#define CSICR3_DMA_REQ_EN_SFF (1 << 11)
-#define CSICR3_RXFF_LEVEL(l) (((l) & 7) << 4) /* MX25 */
-#define CSICR3_CSI_SUP (1 << 3)
-#define CSICR3_ZERO_PACK_EN (1 << 2)
-#define CSICR3_ECC_INT_EN (1 << 1)
-#define CSICR3_ECC_AUTO_EN (1 << 0)
-
#define SHIFT_FRMCNT 16
-/* csi status reg */
-#define CSISR_SFF_OR_INT (1 << 25)
-#define CSISR_RFF_OR_INT (1 << 24)
-#define CSISR_STATFF_INT (1 << 21)
-#define CSISR_DMA_TSF_FB2_INT (1 << 20) /* MX25 */
-#define CSISR_DMA_TSF_FB1_INT (1 << 19) /* MX25 */
-#define CSISR_RXFF_INT (1 << 18)
-#define CSISR_EOF_INT (1 << 17)
-#define CSISR_SOF_INT (1 << 16)
-#define CSISR_F2_INT (1 << 15)
-#define CSISR_F1_INT (1 << 14)
-#define CSISR_COF_INT (1 << 13)
-#define CSISR_ECC_INT (1 << 1)
-#define CSISR_DRDY (1 << 0)
-
#define CSICR1 0x00
#define CSICR2 0x04
-#define CSISR_IMX25 0x18
-#define CSISR_IMX27 0x08
+#define CSISR 0x08
#define CSISTATFIFO 0x0c
#define CSIRFIFO 0x10
#define CSIRXCNT 0x14
-#define CSICR3_IMX25 0x08
-#define CSICR3_IMX27 0x1c
+#define CSICR3 0x1c
#define CSIDMASA_STATFIFO 0x20
#define CSIDMATA_STATFIFO 0x24
#define CSIDMASA_FB1 0x28
@@ -249,12 +216,6 @@ struct mx2_fmt_cfg {
struct mx2_prp_cfg cfg;
};
-enum mx2_buffer_state {
- MX2_STATE_QUEUED,
- MX2_STATE_ACTIVE,
- MX2_STATE_DONE,
-};
-
struct mx2_buf_internal {
struct list_head queue;
int bufnum;
@@ -265,12 +226,10 @@ struct mx2_buf_internal {
struct mx2_buffer {
/* common v4l buffer stuff -- must be first */
struct vb2_buffer vb;
- enum mx2_buffer_state state;
struct mx2_buf_internal internal;
};
enum mx2_camera_type {
- IMX25_CAMERA,
IMX27_CAMERA,
};
@@ -298,8 +257,6 @@ struct mx2_camera_dev {
struct mx2_buffer *fb2_active;
u32 csicr1;
- u32 reg_csisr;
- u32 reg_csicr3;
enum mx2_camera_type devtype;
struct mx2_buf_internal buf_discard[2];
@@ -315,9 +272,6 @@ struct mx2_camera_dev {
static struct platform_device_id mx2_camera_devtype[] = {
{
- .name = "imx25-camera",
- .driver_data = IMX25_CAMERA,
- }, {
.name = "imx27-camera",
.driver_data = IMX27_CAMERA,
}, {
@@ -326,16 +280,6 @@ static struct platform_device_id mx2_camera_devtype[] = {
};
MODULE_DEVICE_TABLE(platform, mx2_camera_devtype);
-static inline int is_imx25_camera(struct mx2_camera_dev *pcdev)
-{
- return pcdev->devtype == IMX25_CAMERA;
-}
-
-static inline int is_imx27_camera(struct mx2_camera_dev *pcdev)
-{
- return pcdev->devtype == IMX27_CAMERA;
-}
-
static struct mx2_buffer *mx2_ibuf_to_buf(struct mx2_buf_internal *int_buf)
{
return container_of(int_buf, struct mx2_buffer, internal);
@@ -345,7 +289,7 @@ static struct mx2_fmt_cfg mx27_emma_prp_table[] = {
/*
* This is a generic configuration which is valid for most
* prp input-output format combinations.
- * We set the incomming and outgoing pixelformat to a
+ * We set the incoming and outgoing pixelformat to a
* 16 Bit wide format and adjust the bytesperline
* accordingly. With this configuration the inputdata
* will not be changed by the emma and could be any type
@@ -463,21 +407,10 @@ static void mx27_update_emma_buf(struct mx2_camera_dev *pcdev,
static void mx2_camera_deactivate(struct mx2_camera_dev *pcdev)
{
- unsigned long flags;
-
clk_disable_unprepare(pcdev->clk_csi_ahb);
clk_disable_unprepare(pcdev->clk_csi_per);
writel(0, pcdev->base_csi + CSICR1);
- if (is_imx27_camera(pcdev)) {
- writel(0, pcdev->base_emma + PRP_CNTL);
- } else if (is_imx25_camera(pcdev)) {
- spin_lock_irqsave(&pcdev->lock, flags);
- pcdev->fb1_active = NULL;
- pcdev->fb2_active = NULL;
- writel(0, pcdev->base_csi + CSIDMASA_FB1);
- writel(0, pcdev->base_csi + CSIDMASA_FB2);
- spin_unlock_irqrestore(&pcdev->lock, flags);
- }
+ writel(0, pcdev->base_emma + PRP_CNTL);
}
/*
@@ -502,11 +435,8 @@ static int mx2_camera_add_device(struct soc_camera_device *icd)
if (ret < 0)
goto exit_csi_ahb;
- csicr1 = CSICR1_MCLKEN;
-
- if (is_imx27_camera(pcdev))
- csicr1 |= CSICR1_PRP_IF_EN | CSICR1_FCC |
- CSICR1_RXFF_LEVEL(0);
+ csicr1 = CSICR1_MCLKEN | CSICR1_PRP_IF_EN | CSICR1_FCC |
+ CSICR1_RXFF_LEVEL(0);
pcdev->csicr1 = csicr1;
writel(pcdev->csicr1, pcdev->base_csi + CSICR1);
@@ -540,65 +470,6 @@ static void mx2_camera_remove_device(struct soc_camera_device *icd)
pcdev->icd = NULL;
}
-static void mx25_camera_frame_done(struct mx2_camera_dev *pcdev, int fb,
- int state)
-{
- struct vb2_buffer *vb;
- struct mx2_buffer *buf;
- struct mx2_buffer **fb_active = fb == 1 ? &pcdev->fb1_active :
- &pcdev->fb2_active;
- u32 fb_reg = fb == 1 ? CSIDMASA_FB1 : CSIDMASA_FB2;
- unsigned long flags;
-
- spin_lock_irqsave(&pcdev->lock, flags);
-
- if (*fb_active == NULL)
- goto out;
-
- vb = &(*fb_active)->vb;
- dev_dbg(pcdev->dev, "%s (vb=0x%p) 0x%p %lu\n", __func__,
- vb, vb2_plane_vaddr(vb, 0), vb2_get_plane_payload(vb, 0));
-
- do_gettimeofday(&vb->v4l2_buf.timestamp);
- vb->v4l2_buf.sequence++;
- vb2_buffer_done(vb, VB2_BUF_STATE_DONE);
-
- if (list_empty(&pcdev->capture)) {
- buf = NULL;
- writel(0, pcdev->base_csi + fb_reg);
- } else {
- buf = list_first_entry(&pcdev->capture, struct mx2_buffer,
- internal.queue);
- vb = &buf->vb;
- list_del(&buf->internal.queue);
- buf->state = MX2_STATE_ACTIVE;
- writel(vb2_dma_contig_plane_dma_addr(vb, 0),
- pcdev->base_csi + fb_reg);
- }
-
- *fb_active = buf;
-
-out:
- spin_unlock_irqrestore(&pcdev->lock, flags);
-}
-
-static irqreturn_t mx25_camera_irq(int irq_csi, void *data)
-{
- struct mx2_camera_dev *pcdev = data;
- u32 status = readl(pcdev->base_csi + pcdev->reg_csisr);
-
- if (status & CSISR_DMA_TSF_FB1_INT)
- mx25_camera_frame_done(pcdev, 1, MX2_STATE_DONE);
- else if (status & CSISR_DMA_TSF_FB2_INT)
- mx25_camera_frame_done(pcdev, 2, MX2_STATE_DONE);
-
- /* FIXME: handle CSISR_RFF_OR_INT */
-
- writel(status, pcdev->base_csi + pcdev->reg_csisr);
-
- return IRQ_HANDLED;
-}
-
/*
* Videobuf operations
*/
@@ -676,97 +547,8 @@ static void mx2_videobuf_queue(struct vb2_buffer *vb)
spin_lock_irqsave(&pcdev->lock, flags);
- buf->state = MX2_STATE_QUEUED;
list_add_tail(&buf->internal.queue, &pcdev->capture);
- if (is_imx25_camera(pcdev)) {
- u32 csicr3, dma_inten = 0;
-
- if (pcdev->fb1_active == NULL) {
- writel(vb2_dma_contig_plane_dma_addr(vb, 0),
- pcdev->base_csi + CSIDMASA_FB1);
- pcdev->fb1_active = buf;
- dma_inten = CSICR1_FB1_DMA_INTEN;
- } else if (pcdev->fb2_active == NULL) {
- writel(vb2_dma_contig_plane_dma_addr(vb, 0),
- pcdev->base_csi + CSIDMASA_FB2);
- pcdev->fb2_active = buf;
- dma_inten = CSICR1_FB2_DMA_INTEN;
- }
-
- if (dma_inten) {
- list_del(&buf->internal.queue);
- buf->state = MX2_STATE_ACTIVE;
-
- csicr3 = readl(pcdev->base_csi + pcdev->reg_csicr3);
-
- /* Reflash DMA */
- writel(csicr3 | CSICR3_DMA_REFLASH_RFF,
- pcdev->base_csi + pcdev->reg_csicr3);
-
- /* clear & enable interrupts */
- writel(dma_inten, pcdev->base_csi + pcdev->reg_csisr);
- pcdev->csicr1 |= dma_inten;
- writel(pcdev->csicr1, pcdev->base_csi + CSICR1);
-
- /* enable DMA */
- csicr3 |= CSICR3_DMA_REQ_EN_RFF | CSICR3_RXFF_LEVEL(1);
- writel(csicr3, pcdev->base_csi + pcdev->reg_csicr3);
- }
- }
-
- spin_unlock_irqrestore(&pcdev->lock, flags);
-}
-
-static void mx2_videobuf_release(struct vb2_buffer *vb)
-{
- struct soc_camera_device *icd = soc_camera_from_vb2q(vb->vb2_queue);
- struct soc_camera_host *ici = to_soc_camera_host(icd->parent);
- struct mx2_camera_dev *pcdev = ici->priv;
- struct mx2_buffer *buf = container_of(vb, struct mx2_buffer, vb);
- unsigned long flags;
-
-#ifdef DEBUG
- dev_dbg(icd->parent, "%s (vb=0x%p) 0x%p %lu\n", __func__,
- vb, vb2_plane_vaddr(vb, 0), vb2_get_plane_payload(vb, 0));
-
- switch (buf->state) {
- case MX2_STATE_ACTIVE:
- dev_info(icd->parent, "%s (active)\n", __func__);
- break;
- case MX2_STATE_QUEUED:
- dev_info(icd->parent, "%s (queued)\n", __func__);
- break;
- default:
- dev_info(icd->parent, "%s (unknown) %d\n", __func__,
- buf->state);
- break;
- }
-#endif
-
- /*
- * Terminate only queued but inactive buffers. Active buffers are
- * released when they become inactive after videobuf_waiton().
- *
- * FIXME: implement forced termination of active buffers for mx27 and
- * mx27 eMMA, so that the user won't get stuck in an uninterruptible
- * state. This requires a specific handling for each of the these DMA
- * types.
- */
-
- spin_lock_irqsave(&pcdev->lock, flags);
- if (is_imx25_camera(pcdev) && buf->state == MX2_STATE_ACTIVE) {
- if (pcdev->fb1_active == buf) {
- pcdev->csicr1 &= ~CSICR1_FB1_DMA_INTEN;
- writel(0, pcdev->base_csi + CSIDMASA_FB1);
- pcdev->fb1_active = NULL;
- } else if (pcdev->fb2_active == buf) {
- pcdev->csicr1 &= ~CSICR1_FB2_DMA_INTEN;
- writel(0, pcdev->base_csi + CSIDMASA_FB2);
- pcdev->fb2_active = NULL;
- }
- writel(pcdev->csicr1, pcdev->base_csi + CSICR1);
- }
spin_unlock_irqrestore(&pcdev->lock, flags);
}
@@ -877,91 +659,87 @@ static int mx2_start_streaming(struct vb2_queue *q, unsigned int count)
struct mx2_buffer *buf;
unsigned long phys;
int bytesperline;
+ unsigned long flags;
- if (is_imx27_camera(pcdev)) {
- unsigned long flags;
- if (count < 2)
- return -EINVAL;
+ if (count < 2)
+ return -EINVAL;
- spin_lock_irqsave(&pcdev->lock, flags);
+ spin_lock_irqsave(&pcdev->lock, flags);
- buf = list_first_entry(&pcdev->capture, struct mx2_buffer,
- internal.queue);
- buf->internal.bufnum = 0;
- vb = &buf->vb;
- buf->state = MX2_STATE_ACTIVE;
+ buf = list_first_entry(&pcdev->capture, struct mx2_buffer,
+ internal.queue);
+ buf->internal.bufnum = 0;
+ vb = &buf->vb;
- phys = vb2_dma_contig_plane_dma_addr(vb, 0);
- mx27_update_emma_buf(pcdev, phys, buf->internal.bufnum);
- list_move_tail(pcdev->capture.next, &pcdev->active_bufs);
+ phys = vb2_dma_contig_plane_dma_addr(vb, 0);
+ mx27_update_emma_buf(pcdev, phys, buf->internal.bufnum);
+ list_move_tail(pcdev->capture.next, &pcdev->active_bufs);
- buf = list_first_entry(&pcdev->capture, struct mx2_buffer,
- internal.queue);
- buf->internal.bufnum = 1;
- vb = &buf->vb;
- buf->state = MX2_STATE_ACTIVE;
+ buf = list_first_entry(&pcdev->capture, struct mx2_buffer,
+ internal.queue);
+ buf->internal.bufnum = 1;
+ vb = &buf->vb;
- phys = vb2_dma_contig_plane_dma_addr(vb, 0);
- mx27_update_emma_buf(pcdev, phys, buf->internal.bufnum);
- list_move_tail(pcdev->capture.next, &pcdev->active_bufs);
-
- bytesperline = soc_mbus_bytes_per_line(icd->user_width,
- icd->current_fmt->host_fmt);
- if (bytesperline < 0) {
- spin_unlock_irqrestore(&pcdev->lock, flags);
- return bytesperline;
- }
+ phys = vb2_dma_contig_plane_dma_addr(vb, 0);
+ mx27_update_emma_buf(pcdev, phys, buf->internal.bufnum);
+ list_move_tail(pcdev->capture.next, &pcdev->active_bufs);
- /*
- * I didn't manage to properly enable/disable the prp
- * on a per frame basis during running transfers,
- * thus we allocate a buffer here and use it to
- * discard frames when no buffer is available.
- * Feel free to work on this ;)
- */
- pcdev->discard_size = icd->user_height * bytesperline;
- pcdev->discard_buffer = dma_alloc_coherent(ici->v4l2_dev.dev,
- pcdev->discard_size, &pcdev->discard_buffer_dma,
- GFP_KERNEL);
- if (!pcdev->discard_buffer) {
- spin_unlock_irqrestore(&pcdev->lock, flags);
- return -ENOMEM;
- }
+ bytesperline = soc_mbus_bytes_per_line(icd->user_width,
+ icd->current_fmt->host_fmt);
+ if (bytesperline < 0) {
+ spin_unlock_irqrestore(&pcdev->lock, flags);
+ return bytesperline;
+ }
- pcdev->buf_discard[0].discard = true;
- list_add_tail(&pcdev->buf_discard[0].queue,
- &pcdev->discard);
+ /*
+ * I didn't manage to properly enable/disable the prp
+ * on a per frame basis during running transfers,
+ * thus we allocate a buffer here and use it to
+ * discard frames when no buffer is available.
+ * Feel free to work on this ;)
+ */
+ pcdev->discard_size = icd->user_height * bytesperline;
+ pcdev->discard_buffer = dma_alloc_coherent(ici->v4l2_dev.dev,
+ pcdev->discard_size,
+ &pcdev->discard_buffer_dma, GFP_ATOMIC);
+ if (!pcdev->discard_buffer) {
+ spin_unlock_irqrestore(&pcdev->lock, flags);
+ return -ENOMEM;
+ }
- pcdev->buf_discard[1].discard = true;
- list_add_tail(&pcdev->buf_discard[1].queue,
- &pcdev->discard);
+ pcdev->buf_discard[0].discard = true;
+ list_add_tail(&pcdev->buf_discard[0].queue,
+ &pcdev->discard);
- mx2_prp_resize_commit(pcdev);
+ pcdev->buf_discard[1].discard = true;
+ list_add_tail(&pcdev->buf_discard[1].queue,
+ &pcdev->discard);
- mx27_camera_emma_buf_init(icd, bytesperline);
+ mx2_prp_resize_commit(pcdev);
- if (prp->cfg.channel == 1) {
- writel(PRP_CNTL_CH1EN |
- PRP_CNTL_CSIEN |
- prp->cfg.in_fmt |
- prp->cfg.out_fmt |
- PRP_CNTL_CH1_LEN |
- PRP_CNTL_CH1BYP |
- PRP_CNTL_CH1_TSKIP(0) |
- PRP_CNTL_IN_TSKIP(0),
- pcdev->base_emma + PRP_CNTL);
- } else {
- writel(PRP_CNTL_CH2EN |
- PRP_CNTL_CSIEN |
- prp->cfg.in_fmt |
- prp->cfg.out_fmt |
- PRP_CNTL_CH2_LEN |
- PRP_CNTL_CH2_TSKIP(0) |
- PRP_CNTL_IN_TSKIP(0),
- pcdev->base_emma + PRP_CNTL);
- }
- spin_unlock_irqrestore(&pcdev->lock, flags);
+ mx27_camera_emma_buf_init(icd, bytesperline);
+
+ if (prp->cfg.channel == 1) {
+ writel(PRP_CNTL_CH1EN |
+ PRP_CNTL_CSIEN |
+ prp->cfg.in_fmt |
+ prp->cfg.out_fmt |
+ PRP_CNTL_CH1_LEN |
+ PRP_CNTL_CH1BYP |
+ PRP_CNTL_CH1_TSKIP(0) |
+ PRP_CNTL_IN_TSKIP(0),
+ pcdev->base_emma + PRP_CNTL);
+ } else {
+ writel(PRP_CNTL_CH2EN |
+ PRP_CNTL_CSIEN |
+ prp->cfg.in_fmt |
+ prp->cfg.out_fmt |
+ PRP_CNTL_CH2_LEN |
+ PRP_CNTL_CH2_TSKIP(0) |
+ PRP_CNTL_IN_TSKIP(0),
+ pcdev->base_emma + PRP_CNTL);
}
+ spin_unlock_irqrestore(&pcdev->lock, flags);
return 0;
}
@@ -977,29 +755,27 @@ static int mx2_stop_streaming(struct vb2_queue *q)
void *b;
u32 cntl;
- if (is_imx27_camera(pcdev)) {
- spin_lock_irqsave(&pcdev->lock, flags);
+ spin_lock_irqsave(&pcdev->lock, flags);
- cntl = readl(pcdev->base_emma + PRP_CNTL);
- if (prp->cfg.channel == 1) {
- writel(cntl & ~PRP_CNTL_CH1EN,
- pcdev->base_emma + PRP_CNTL);
- } else {
- writel(cntl & ~PRP_CNTL_CH2EN,
- pcdev->base_emma + PRP_CNTL);
- }
- INIT_LIST_HEAD(&pcdev->capture);
- INIT_LIST_HEAD(&pcdev->active_bufs);
- INIT_LIST_HEAD(&pcdev->discard);
+ cntl = readl(pcdev->base_emma + PRP_CNTL);
+ if (prp->cfg.channel == 1) {
+ writel(cntl & ~PRP_CNTL_CH1EN,
+ pcdev->base_emma + PRP_CNTL);
+ } else {
+ writel(cntl & ~PRP_CNTL_CH2EN,
+ pcdev->base_emma + PRP_CNTL);
+ }
+ INIT_LIST_HEAD(&pcdev->capture);
+ INIT_LIST_HEAD(&pcdev->active_bufs);
+ INIT_LIST_HEAD(&pcdev->discard);
- b = pcdev->discard_buffer;
- pcdev->discard_buffer = NULL;
+ b = pcdev->discard_buffer;
+ pcdev->discard_buffer = NULL;
- spin_unlock_irqrestore(&pcdev->lock, flags);
+ spin_unlock_irqrestore(&pcdev->lock, flags);
- dma_free_coherent(ici->v4l2_dev.dev,
- pcdev->discard_size, b, pcdev->discard_buffer_dma);
- }
+ dma_free_coherent(ici->v4l2_dev.dev,
+ pcdev->discard_size, b, pcdev->discard_buffer_dma);
return 0;
}
@@ -1008,7 +784,6 @@ static struct vb2_ops mx2_videobuf_ops = {
.queue_setup = mx2_videobuf_setup,
.buf_prepare = mx2_videobuf_prepare,
.buf_queue = mx2_videobuf_queue,
- .buf_cleanup = mx2_videobuf_release,
.start_streaming = mx2_start_streaming,
.stop_streaming = mx2_stop_streaming,
};
@@ -1129,16 +904,9 @@ static int mx2_camera_set_bus_param(struct soc_camera_device *icd)
if (bytesperline < 0)
return bytesperline;
- if (is_imx27_camera(pcdev)) {
- ret = mx27_camera_emma_prp_reset(pcdev);
- if (ret)
- return ret;
- } else if (is_imx25_camera(pcdev)) {
- writel((bytesperline * icd->user_height) >> 2,
- pcdev->base_csi + CSIRXCNT);
- writel((bytesperline << 16) | icd->user_height,
- pcdev->base_csi + CSIIMAG_PARA);
- }
+ ret = mx27_camera_emma_prp_reset(pcdev);
+ if (ret)
+ return ret;
writel(pcdev->csicr1, pcdev->base_csi + CSICR1);
@@ -1425,7 +1193,6 @@ static int mx2_camera_try_fmt(struct soc_camera_device *icd,
struct soc_camera_host *ici = to_soc_camera_host(icd->parent);
struct mx2_camera_dev *pcdev = ici->priv;
struct mx2_fmt_cfg *emma_prp;
- unsigned int width_limit;
int ret;
dev_dbg(icd->parent, "%s: requested params: width = %d, height = %d\n",
@@ -1437,40 +1204,11 @@ static int mx2_camera_try_fmt(struct soc_camera_device *icd,
return -EINVAL;
}
- /* FIXME: implement MX27 limits */
-
- /* limit to MX25 hardware capabilities */
- if (is_imx25_camera(pcdev)) {
- if (xlate->host_fmt->bits_per_sample <= 8)
- width_limit = 0xffff * 4;
- else
- width_limit = 0xffff * 2;
- /* CSIIMAG_PARA limit */
- if (pix->width > width_limit)
- pix->width = width_limit;
- if (pix->height > 0xffff)
- pix->height = 0xffff;
-
- pix->bytesperline = soc_mbus_bytes_per_line(pix->width,
- xlate->host_fmt);
- if (pix->bytesperline < 0)
- return pix->bytesperline;
- pix->sizeimage = soc_mbus_image_size(xlate->host_fmt,
- pix->bytesperline, pix->height);
- /* Check against the CSIRXCNT limit */
- if (pix->sizeimage > 4 * 0x3ffff) {
- /* Adjust geometry, preserve aspect ratio */
- unsigned int new_height = int_sqrt(div_u64(0x3ffffULL *
- 4 * pix->height, pix->bytesperline));
- pix->width = new_height * pix->width / pix->height;
- pix->height = new_height;
- pix->bytesperline = soc_mbus_bytes_per_line(pix->width,
- xlate->host_fmt);
- BUG_ON(pix->bytesperline < 0);
- pix->sizeimage = soc_mbus_image_size(xlate->host_fmt,
- pix->bytesperline, pix->height);
- }
- }
+ /*
+ * limit to MX27 hardware capabilities: width must be a multiple of 8 as
+ * requested by the CSI. (Table 39-2 in the i.MX27 Reference Manual).
+ */
+ pix->width &= ~0x7;
/* limit to sensor capabilities */
mf.width = pix->width;
@@ -1488,7 +1226,7 @@ static int mx2_camera_try_fmt(struct soc_camera_device *icd,
/* If the sensor does not support image size try PrP resizing */
emma_prp = mx27_emma_prp_get_format(xlate->code,
- xlate->host_fmt->fourcc);
+ xlate->host_fmt->fourcc);
if ((mf.width != pix->width || mf.height != pix->height) &&
emma_prp->cfg.in_fmt == PRP_CNTL_DATA_IN_YUV422) {
@@ -1600,7 +1338,7 @@ static void mx27_camera_frame_done_emma(struct mx2_camera_dev *pcdev,
vb2_get_plane_payload(vb, 0));
list_del_init(&buf->internal.queue);
- do_gettimeofday(&vb->v4l2_buf.timestamp);
+ v4l2_get_timestamp(&vb->v4l2_buf.timestamp);
vb->v4l2_buf.sequence = pcdev->frame_count;
if (err)
vb2_buffer_done(vb, VB2_BUF_STATE_ERROR);
@@ -1634,7 +1372,6 @@ static void mx27_camera_frame_done_emma(struct mx2_camera_dev *pcdev,
list_move_tail(pcdev->capture.next, &pcdev->active_bufs);
vb = &buf->vb;
- buf->state = MX2_STATE_ACTIVE;
phys = vb2_dma_contig_plane_dma_addr(vb, 0);
mx27_update_emma_buf(pcdev, phys, bufnum);
@@ -1774,20 +1511,6 @@ static int mx2_camera_probe(struct platform_device *pdev)
goto exit;
}
- pcdev->devtype = pdev->id_entry->driver_data;
- switch (pcdev->devtype) {
- case IMX25_CAMERA:
- pcdev->reg_csisr = CSISR_IMX25;
- pcdev->reg_csicr3 = CSICR3_IMX25;
- break;
- case IMX27_CAMERA:
- pcdev->reg_csisr = CSISR_IMX27;
- pcdev->reg_csicr3 = CSICR3_IMX27;
- break;
- default:
- break;
- }
-
pcdev->clk_csi_ahb = devm_clk_get(&pdev->dev, "ahb");
if (IS_ERR(pcdev->clk_csi_ahb)) {
dev_err(&pdev->dev, "Could not get csi ahb clock\n");
@@ -1833,20 +1556,9 @@ static int mx2_camera_probe(struct platform_device *pdev)
pcdev->dev = &pdev->dev;
platform_set_drvdata(pdev, pcdev);
- if (is_imx25_camera(pcdev)) {
- err = devm_request_irq(&pdev->dev, irq_csi, mx25_camera_irq, 0,
- MX2_CAM_DRV_NAME, pcdev);
- if (err) {
- dev_err(pcdev->dev, "Camera interrupt register failed \n");
- goto exit;
- }
- }
-
- if (is_imx27_camera(pcdev)) {
- err = mx27_camera_emma_init(pdev);
- if (err)
- goto exit;
- }
+ err = mx27_camera_emma_init(pdev);
+ if (err)
+ goto exit;
/*
* We're done with drvdata here. Clear the pointer so that
@@ -1859,8 +1571,6 @@ static int mx2_camera_probe(struct platform_device *pdev)
pcdev->soc_host.priv = pcdev;
pcdev->soc_host.v4l2_dev.dev = &pdev->dev;
pcdev->soc_host.nr = pdev->id;
- if (is_imx25_camera(pcdev))
- pcdev->soc_host.capabilities = SOCAM_HOST_CAP_STRIDE;
pcdev->alloc_ctx = vb2_dma_contig_init_ctx(&pdev->dev);
if (IS_ERR(pcdev->alloc_ctx)) {
@@ -1879,10 +1589,8 @@ static int mx2_camera_probe(struct platform_device *pdev)
exit_free_emma:
vb2_dma_contig_cleanup_ctx(pcdev->alloc_ctx);
eallocctx:
- if (is_imx27_camera(pcdev)) {
- clk_disable_unprepare(pcdev->clk_emma_ipg);
- clk_disable_unprepare(pcdev->clk_emma_ahb);
- }
+ clk_disable_unprepare(pcdev->clk_emma_ipg);
+ clk_disable_unprepare(pcdev->clk_emma_ahb);
exit:
return err;
}
@@ -1897,10 +1605,8 @@ static int mx2_camera_remove(struct platform_device *pdev)
vb2_dma_contig_cleanup_ctx(pcdev->alloc_ctx);
- if (is_imx27_camera(pcdev)) {
- clk_disable_unprepare(pcdev->clk_emma_ipg);
- clk_disable_unprepare(pcdev->clk_emma_ahb);
- }
+ clk_disable_unprepare(pcdev->clk_emma_ipg);
+ clk_disable_unprepare(pcdev->clk_emma_ahb);
dev_info(&pdev->dev, "MX2 Camera driver unloaded\n");
@@ -1913,23 +1619,12 @@ static struct platform_driver mx2_camera_driver = {
},
.id_table = mx2_camera_devtype,
.remove = mx2_camera_remove,
+ .probe = mx2_camera_probe,
};
+module_platform_driver(mx2_camera_driver);
-static int __init mx2_camera_init(void)
-{
- return platform_driver_probe(&mx2_camera_driver, &mx2_camera_probe);
-}
-
-static void __exit mx2_camera_exit(void)
-{
- return platform_driver_unregister(&mx2_camera_driver);
-}
-
-module_init(mx2_camera_init);
-module_exit(mx2_camera_exit);
-
-MODULE_DESCRIPTION("i.MX27/i.MX25 SoC Camera Host driver");
+MODULE_DESCRIPTION("i.MX27 SoC Camera Host driver");
MODULE_AUTHOR("Sascha Hauer <sha@pengutronix.de>");
MODULE_LICENSE("GPL");
MODULE_VERSION(MX2_CAM_VERSION);
diff --git a/drivers/media/platform/soc_camera/mx3_camera.c b/drivers/media/platform/soc_camera/mx3_camera.c
index 45aef1053a49..f5cbb92db545 100644
--- a/drivers/media/platform/soc_camera/mx3_camera.c
+++ b/drivers/media/platform/soc_camera/mx3_camera.c
@@ -156,7 +156,7 @@ static void mx3_cam_dma_done(void *arg)
struct mx3_camera_buffer *buf = to_mx3_vb(vb);
list_del_init(&buf->queue);
- do_gettimeofday(&vb->v4l2_buf.timestamp);
+ v4l2_get_timestamp(&vb->v4l2_buf.timestamp);
vb->v4l2_buf.field = mx3_cam->field;
vb->v4l2_buf.sequence = mx3_cam->sequence++;
vb2_buffer_done(vb, VB2_BUF_STATE_DONE);
@@ -510,7 +510,7 @@ static void mx3_camera_activate(struct mx3_camera_dev *mx3_cam,
clk_set_rate(mx3_cam->clk, rate);
}
-/* Called with .video_lock held */
+/* Called with .host_lock held */
static int mx3_camera_add_device(struct soc_camera_device *icd)
{
struct soc_camera_host *ici = to_soc_camera_host(icd->parent);
@@ -530,7 +530,7 @@ static int mx3_camera_add_device(struct soc_camera_device *icd)
return 0;
}
-/* Called with .video_lock held */
+/* Called with .host_lock held */
static void mx3_camera_remove_device(struct soc_camera_device *icd)
{
struct soc_camera_host *ici = to_soc_camera_host(icd->parent);
diff --git a/drivers/media/platform/soc_camera/omap1_camera.c b/drivers/media/platform/soc_camera/omap1_camera.c
index 39a77f0b8860..2547bf88f79f 100644
--- a/drivers/media/platform/soc_camera/omap1_camera.c
+++ b/drivers/media/platform/soc_camera/omap1_camera.c
@@ -592,7 +592,7 @@ static void videobuf_done(struct omap1_cam_dev *pcdev,
suspend_capture(pcdev);
}
vb->state = result;
- do_gettimeofday(&vb->ts);
+ v4l2_get_timestamp(&vb->ts);
if (result != VIDEOBUF_ERROR)
vb->field_count++;
wake_up(&vb->done);
@@ -1383,12 +1383,12 @@ static void omap1_cam_init_videobuf(struct videobuf_queue *q,
videobuf_queue_dma_contig_init(q, &omap1_videobuf_ops,
icd->parent, &pcdev->lock,
V4L2_BUF_TYPE_VIDEO_CAPTURE, V4L2_FIELD_NONE,
- sizeof(struct omap1_cam_buf), icd, &icd->video_lock);
+ sizeof(struct omap1_cam_buf), icd, &ici->host_lock);
else
videobuf_queue_sg_init(q, &omap1_videobuf_ops,
icd->parent, &pcdev->lock,
V4L2_BUF_TYPE_VIDEO_CAPTURE, V4L2_FIELD_NONE,
- sizeof(struct omap1_cam_buf), icd, &icd->video_lock);
+ sizeof(struct omap1_cam_buf), icd, &ici->host_lock);
/* use videobuf mode (auto)selected with the module parameter */
pcdev->vb_mode = sg_mode ? OMAP1_CAM_DMA_SG : OMAP1_CAM_DMA_CONTIG;
diff --git a/drivers/media/platform/soc_camera/pxa_camera.c b/drivers/media/platform/soc_camera/pxa_camera.c
index 523330d00dee..395e2e043615 100644
--- a/drivers/media/platform/soc_camera/pxa_camera.c
+++ b/drivers/media/platform/soc_camera/pxa_camera.c
@@ -681,7 +681,7 @@ static void pxa_camera_wakeup(struct pxa_camera_dev *pcdev,
/* _init is used to debug races, see comment in pxa_camera_reqbufs() */
list_del_init(&vb->queue);
vb->state = VIDEOBUF_DONE;
- do_gettimeofday(&vb->ts);
+ v4l2_get_timestamp(&vb->ts);
vb->field_count++;
wake_up(&vb->done);
dev_dbg(pcdev->soc_host.v4l2_dev.dev, "%s dequeud buffer (vb=0x%p)\n",
@@ -842,7 +842,7 @@ static void pxa_camera_init_videobuf(struct videobuf_queue *q,
*/
videobuf_queue_sg_init(q, &pxa_videobuf_ops, NULL, &pcdev->lock,
V4L2_BUF_TYPE_VIDEO_CAPTURE, V4L2_FIELD_NONE,
- sizeof(struct pxa_buffer), icd, &icd->video_lock);
+ sizeof(struct pxa_buffer), icd, &ici->host_lock);
}
static u32 mclk_get_divisor(struct platform_device *pdev,
@@ -958,7 +958,7 @@ static irqreturn_t pxa_camera_irq(int irq, void *data)
/*
* The following two functions absolutely depend on the fact, that
* there can be only one camera on PXA quick capture interface
- * Called with .video_lock held
+ * Called with .host_lock held
*/
static int pxa_camera_add_device(struct soc_camera_device *icd)
{
@@ -978,7 +978,7 @@ static int pxa_camera_add_device(struct soc_camera_device *icd)
return 0;
}
-/* Called with .video_lock held */
+/* Called with .host_lock held */
static void pxa_camera_remove_device(struct soc_camera_device *icd)
{
struct soc_camera_host *ici = to_soc_camera_host(icd->parent);
@@ -1661,23 +1661,18 @@ static int pxa_camera_probe(struct platform_device *pdev)
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
irq = platform_get_irq(pdev, 0);
- if (!res || irq < 0) {
- err = -ENODEV;
- goto exit;
- }
+ if (!res || irq < 0)
+ return -ENODEV;
- pcdev = kzalloc(sizeof(*pcdev), GFP_KERNEL);
+ pcdev = devm_kzalloc(&pdev->dev, sizeof(*pcdev), GFP_KERNEL);
if (!pcdev) {
dev_err(&pdev->dev, "Could not allocate pcdev\n");
- err = -ENOMEM;
- goto exit;
+ return -ENOMEM;
}
- pcdev->clk = clk_get(&pdev->dev, NULL);
- if (IS_ERR(pcdev->clk)) {
- err = PTR_ERR(pcdev->clk);
- goto exit_kfree;
- }
+ pcdev->clk = devm_clk_get(&pdev->dev, NULL);
+ if (IS_ERR(pcdev->clk))
+ return PTR_ERR(pcdev->clk);
pcdev->res = res;
@@ -1715,17 +1710,9 @@ static int pxa_camera_probe(struct platform_device *pdev)
/*
* Request the regions.
*/
- if (!request_mem_region(res->start, resource_size(res),
- PXA_CAM_DRV_NAME)) {
- err = -EBUSY;
- goto exit_clk;
- }
-
- base = ioremap(res->start, resource_size(res));
- if (!base) {
- err = -ENOMEM;
- goto exit_release;
- }
+ base = devm_request_and_ioremap(&pdev->dev, res);
+ if (!base)
+ return -ENOMEM;
pcdev->irq = irq;
pcdev->base = base;
@@ -1734,7 +1721,7 @@ static int pxa_camera_probe(struct platform_device *pdev)
pxa_camera_dma_irq_y, pcdev);
if (err < 0) {
dev_err(&pdev->dev, "Can't request DMA for Y\n");
- goto exit_iounmap;
+ return err;
}
pcdev->dma_chans[0] = err;
dev_dbg(&pdev->dev, "got DMA channel %d\n", pcdev->dma_chans[0]);
@@ -1762,10 +1749,10 @@ static int pxa_camera_probe(struct platform_device *pdev)
DRCMR(70) = pcdev->dma_chans[2] | DRCMR_MAPVLD;
/* request irq */
- err = request_irq(pcdev->irq, pxa_camera_irq, 0, PXA_CAM_DRV_NAME,
- pcdev);
+ err = devm_request_irq(&pdev->dev, pcdev->irq, pxa_camera_irq, 0,
+ PXA_CAM_DRV_NAME, pcdev);
if (err) {
- dev_err(&pdev->dev, "Camera interrupt register failed \n");
+ dev_err(&pdev->dev, "Camera interrupt register failed\n");
goto exit_free_dma;
}
@@ -1777,27 +1764,16 @@ static int pxa_camera_probe(struct platform_device *pdev)
err = soc_camera_host_register(&pcdev->soc_host);
if (err)
- goto exit_free_irq;
+ goto exit_free_dma;
return 0;
-exit_free_irq:
- free_irq(pcdev->irq, pcdev);
exit_free_dma:
pxa_free_dma(pcdev->dma_chans[2]);
exit_free_dma_u:
pxa_free_dma(pcdev->dma_chans[1]);
exit_free_dma_y:
pxa_free_dma(pcdev->dma_chans[0]);
-exit_iounmap:
- iounmap(base);
-exit_release:
- release_mem_region(res->start, resource_size(res));
-exit_clk:
- clk_put(pcdev->clk);
-exit_kfree:
- kfree(pcdev);
-exit:
return err;
}
@@ -1806,24 +1782,13 @@ static int pxa_camera_remove(struct platform_device *pdev)
struct soc_camera_host *soc_host = to_soc_camera_host(&pdev->dev);
struct pxa_camera_dev *pcdev = container_of(soc_host,
struct pxa_camera_dev, soc_host);
- struct resource *res;
-
- clk_put(pcdev->clk);
pxa_free_dma(pcdev->dma_chans[0]);
pxa_free_dma(pcdev->dma_chans[1]);
pxa_free_dma(pcdev->dma_chans[2]);
- free_irq(pcdev->irq, pcdev);
soc_camera_host_unregister(soc_host);
- iounmap(pcdev->base);
-
- res = pcdev->res;
- release_mem_region(res->start, resource_size(res));
-
- kfree(pcdev);
-
dev_info(&pdev->dev, "PXA Camera driver unloaded\n");
return 0;
diff --git a/drivers/media/platform/soc_camera/sh_mobile_ceu_camera.c b/drivers/media/platform/soc_camera/sh_mobile_ceu_camera.c
index ebbc126e71a6..bb08a46432f4 100644
--- a/drivers/media/platform/soc_camera/sh_mobile_ceu_camera.c
+++ b/drivers/media/platform/soc_camera/sh_mobile_ceu_camera.c
@@ -516,7 +516,7 @@ static irqreturn_t sh_mobile_ceu_irq(int irq, void *data)
pcdev->active = NULL;
ret = sh_mobile_ceu_capture(pcdev);
- do_gettimeofday(&vb->v4l2_buf.timestamp);
+ v4l2_get_timestamp(&vb->v4l2_buf.timestamp);
if (!ret) {
vb->v4l2_buf.field = pcdev->field;
vb->v4l2_buf.sequence = pcdev->sequence++;
@@ -543,7 +543,7 @@ static struct v4l2_subdev *find_csi2(struct sh_mobile_ceu_dev *pcdev)
return NULL;
}
-/* Called with .video_lock held */
+/* Called with .host_lock held */
static int sh_mobile_ceu_add_device(struct soc_camera_device *icd)
{
struct soc_camera_host *ici = to_soc_camera_host(icd->parent);
@@ -572,7 +572,7 @@ static int sh_mobile_ceu_add_device(struct soc_camera_device *icd)
ret = v4l2_subdev_call(csi2_sd, core, s_power, 1);
if (ret < 0 && ret != -ENOIOCTLCMD && ret != -ENODEV) {
- pm_runtime_put_sync(ici->v4l2_dev.dev);
+ pm_runtime_put(ici->v4l2_dev.dev);
return ret;
}
@@ -587,7 +587,7 @@ static int sh_mobile_ceu_add_device(struct soc_camera_device *icd)
return 0;
}
-/* Called with .video_lock held */
+/* Called with .host_lock held */
static void sh_mobile_ceu_remove_device(struct soc_camera_device *icd)
{
struct soc_camera_host *ici = to_soc_camera_host(icd->parent);
@@ -612,7 +612,7 @@ static void sh_mobile_ceu_remove_device(struct soc_camera_device *icd)
}
spin_unlock_irq(&pcdev->lock);
- pm_runtime_put_sync(ici->v4l2_dev.dev);
+ pm_runtime_put(ici->v4l2_dev.dev);
dev_info(icd->parent,
"SuperH Mobile CEU driver detached from camera %d\n",
@@ -1064,7 +1064,7 @@ static int sh_mobile_ceu_get_formats(struct soc_camera_device *icd, unsigned int
/* Add our control */
v4l2_ctrl_new_std(&icd->ctrl_handler, &sh_mobile_ceu_ctrl_ops,
- V4L2_CID_SHARPNESS, 0, 1, 1, 0);
+ V4L2_CID_SHARPNESS, 0, 1, 1, 1);
if (icd->ctrl_handler.error)
return icd->ctrl_handler.error;
@@ -2088,15 +2088,13 @@ static int sh_mobile_ceu_probe(struct platform_device *pdev)
irq = platform_get_irq(pdev, 0);
if (!res || (int)irq <= 0) {
dev_err(&pdev->dev, "Not enough CEU platform resources.\n");
- err = -ENODEV;
- goto exit;
+ return -ENODEV;
}
- pcdev = kzalloc(sizeof(*pcdev), GFP_KERNEL);
+ pcdev = devm_kzalloc(&pdev->dev, sizeof(*pcdev), GFP_KERNEL);
if (!pcdev) {
dev_err(&pdev->dev, "Could not allocate pcdev\n");
- err = -ENOMEM;
- goto exit;
+ return -ENOMEM;
}
INIT_LIST_HEAD(&pcdev->capture);
@@ -2105,19 +2103,17 @@ static int sh_mobile_ceu_probe(struct platform_device *pdev)
pcdev->pdata = pdev->dev.platform_data;
if (!pcdev->pdata) {
- err = -EINVAL;
dev_err(&pdev->dev, "CEU platform data not set.\n");
- goto exit_kfree;
+ return -EINVAL;
}
pcdev->max_width = pcdev->pdata->max_width ? : 2560;
pcdev->max_height = pcdev->pdata->max_height ? : 1920;
- base = ioremap_nocache(res->start, resource_size(res));
+ base = devm_request_and_ioremap(&pdev->dev, res);
if (!base) {
- err = -ENXIO;
dev_err(&pdev->dev, "Unable to ioremap CEU registers.\n");
- goto exit_kfree;
+ return -ENXIO;
}
pcdev->irq = irq;
@@ -2133,16 +2129,15 @@ static int sh_mobile_ceu_probe(struct platform_device *pdev)
DMA_MEMORY_EXCLUSIVE);
if (!err) {
dev_err(&pdev->dev, "Unable to declare CEU memory.\n");
- err = -ENXIO;
- goto exit_iounmap;
+ return -ENXIO;
}
pcdev->video_limit = resource_size(res);
}
/* request irq */
- err = request_irq(pcdev->irq, sh_mobile_ceu_irq, IRQF_DISABLED,
- dev_name(&pdev->dev), pcdev);
+ err = devm_request_irq(&pdev->dev, pcdev->irq, sh_mobile_ceu_irq,
+ IRQF_DISABLED, dev_name(&pdev->dev), pcdev);
if (err) {
dev_err(&pdev->dev, "Unable to register CEU interrupt.\n");
goto exit_release_mem;
@@ -2246,15 +2241,9 @@ exit_free_ctx:
vb2_dma_contig_cleanup_ctx(pcdev->alloc_ctx);
exit_free_clk:
pm_runtime_disable(&pdev->dev);
- free_irq(pcdev->irq, pcdev);
exit_release_mem:
if (platform_get_resource(pdev, IORESOURCE_MEM, 1))
dma_release_declared_memory(&pdev->dev);
-exit_iounmap:
- iounmap(base);
-exit_kfree:
- kfree(pcdev);
-exit:
return err;
}
@@ -2267,10 +2256,8 @@ static int sh_mobile_ceu_remove(struct platform_device *pdev)
soc_camera_host_unregister(soc_host);
pm_runtime_disable(&pdev->dev);
- free_irq(pcdev->irq, pcdev);
if (platform_get_resource(pdev, IORESOURCE_MEM, 1))
dma_release_declared_memory(&pdev->dev);
- iounmap(pcdev->base);
vb2_dma_contig_cleanup_ctx(pcdev->alloc_ctx);
if (csi2_pdev && csi2_pdev->dev.driver) {
struct module *csi2_drv = csi2_pdev->dev.driver->owner;
@@ -2279,7 +2266,6 @@ static int sh_mobile_ceu_remove(struct platform_device *pdev)
platform_device_put(csi2_pdev);
module_put(csi2_drv);
}
- kfree(pcdev);
return 0;
}
diff --git a/drivers/media/platform/soc_camera/sh_mobile_csi2.c b/drivers/media/platform/soc_camera/sh_mobile_csi2.c
index a17aba9a0104..42c559eb4937 100644
--- a/drivers/media/platform/soc_camera/sh_mobile_csi2.c
+++ b/drivers/media/platform/soc_camera/sh_mobile_csi2.c
@@ -318,23 +318,16 @@ static int sh_csi2_probe(struct platform_device *pdev)
return -EINVAL;
}
- priv = kzalloc(sizeof(struct sh_csi2), GFP_KERNEL);
+ priv = devm_kzalloc(&pdev->dev, sizeof(struct sh_csi2), GFP_KERNEL);
if (!priv)
return -ENOMEM;
priv->irq = irq;
- if (!request_mem_region(res->start, resource_size(res), pdev->name)) {
- dev_err(&pdev->dev, "CSI2 register region already claimed\n");
- ret = -EBUSY;
- goto ereqreg;
- }
-
- priv->base = ioremap(res->start, resource_size(res));
+ priv->base = devm_request_and_ioremap(&pdev->dev, res);
if (!priv->base) {
- ret = -ENXIO;
dev_err(&pdev->dev, "Unable to ioremap CSI2 registers.\n");
- goto eremap;
+ return -ENXIO;
}
priv->pdev = pdev;
@@ -357,11 +350,7 @@ static int sh_csi2_probe(struct platform_device *pdev)
return 0;
esdreg:
- iounmap(priv->base);
-eremap:
- release_mem_region(res->start, resource_size(res));
-ereqreg:
- kfree(priv);
+ platform_set_drvdata(pdev, NULL);
return ret;
}
@@ -369,14 +358,10 @@ ereqreg:
static int sh_csi2_remove(struct platform_device *pdev)
{
struct sh_csi2 *priv = platform_get_drvdata(pdev);
- struct resource *res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
v4l2_device_unregister_subdev(&priv->subdev);
pm_runtime_disable(&pdev->dev);
- iounmap(priv->base);
- release_mem_region(res->start, resource_size(res));
platform_set_drvdata(pdev, NULL);
- kfree(priv);
return 0;
}
diff --git a/drivers/media/platform/soc_camera/soc_camera.c b/drivers/media/platform/soc_camera/soc_camera.c
index 2ec90eae6ba0..8ec98051ea73 100644
--- a/drivers/media/platform/soc_camera/soc_camera.c
+++ b/drivers/media/platform/soc_camera/soc_camera.c
@@ -50,22 +50,22 @@ static LIST_HEAD(hosts);
static LIST_HEAD(devices);
static DEFINE_MUTEX(list_lock); /* Protects the list of hosts */
-int soc_camera_power_on(struct device *dev, struct soc_camera_link *icl)
+int soc_camera_power_on(struct device *dev, struct soc_camera_subdev_desc *ssdd)
{
- int ret = regulator_bulk_enable(icl->num_regulators,
- icl->regulators);
+ int ret = regulator_bulk_enable(ssdd->num_regulators,
+ ssdd->regulators);
if (ret < 0) {
dev_err(dev, "Cannot enable regulators\n");
return ret;
}
- if (icl->power) {
- ret = icl->power(dev, 1);
+ if (ssdd->power) {
+ ret = ssdd->power(dev, 1);
if (ret < 0) {
dev_err(dev,
"Platform failed to power-on the camera.\n");
- regulator_bulk_disable(icl->num_regulators,
- icl->regulators);
+ regulator_bulk_disable(ssdd->num_regulators,
+ ssdd->regulators);
}
}
@@ -73,13 +73,13 @@ int soc_camera_power_on(struct device *dev, struct soc_camera_link *icl)
}
EXPORT_SYMBOL(soc_camera_power_on);
-int soc_camera_power_off(struct device *dev, struct soc_camera_link *icl)
+int soc_camera_power_off(struct device *dev, struct soc_camera_subdev_desc *ssdd)
{
int ret = 0;
int err;
- if (icl->power) {
- err = icl->power(dev, 0);
+ if (ssdd->power) {
+ err = ssdd->power(dev, 0);
if (err < 0) {
dev_err(dev,
"Platform failed to power-off the camera.\n");
@@ -87,8 +87,8 @@ int soc_camera_power_off(struct device *dev, struct soc_camera_link *icl)
}
}
- err = regulator_bulk_disable(icl->num_regulators,
- icl->regulators);
+ err = regulator_bulk_disable(ssdd->num_regulators,
+ ssdd->regulators);
if (err < 0) {
dev_err(dev, "Cannot disable regulators\n");
ret = ret ? : err;
@@ -136,29 +136,29 @@ EXPORT_SYMBOL(soc_camera_xlate_by_fourcc);
/**
* soc_camera_apply_board_flags() - apply platform SOCAM_SENSOR_INVERT_* flags
- * @icl: camera platform parameters
+ * @ssdd: camera platform parameters
* @cfg: media bus configuration
* @return: resulting flags
*/
-unsigned long soc_camera_apply_board_flags(struct soc_camera_link *icl,
+unsigned long soc_camera_apply_board_flags(struct soc_camera_subdev_desc *ssdd,
const struct v4l2_mbus_config *cfg)
{
unsigned long f, flags = cfg->flags;
/* If only one of the two polarities is supported, switch to the opposite */
- if (icl->flags & SOCAM_SENSOR_INVERT_HSYNC) {
+ if (ssdd->flags & SOCAM_SENSOR_INVERT_HSYNC) {
f = flags & (V4L2_MBUS_HSYNC_ACTIVE_HIGH | V4L2_MBUS_HSYNC_ACTIVE_LOW);
if (f == V4L2_MBUS_HSYNC_ACTIVE_HIGH || f == V4L2_MBUS_HSYNC_ACTIVE_LOW)
flags ^= V4L2_MBUS_HSYNC_ACTIVE_HIGH | V4L2_MBUS_HSYNC_ACTIVE_LOW;
}
- if (icl->flags & SOCAM_SENSOR_INVERT_VSYNC) {
+ if (ssdd->flags & SOCAM_SENSOR_INVERT_VSYNC) {
f = flags & (V4L2_MBUS_VSYNC_ACTIVE_HIGH | V4L2_MBUS_VSYNC_ACTIVE_LOW);
if (f == V4L2_MBUS_VSYNC_ACTIVE_HIGH || f == V4L2_MBUS_VSYNC_ACTIVE_LOW)
flags ^= V4L2_MBUS_VSYNC_ACTIVE_HIGH | V4L2_MBUS_VSYNC_ACTIVE_LOW;
}
- if (icl->flags & SOCAM_SENSOR_INVERT_PCLK) {
+ if (ssdd->flags & SOCAM_SENSOR_INVERT_PCLK) {
f = flags & (V4L2_MBUS_PCLK_SAMPLE_RISING | V4L2_MBUS_PCLK_SAMPLE_FALLING);
if (f == V4L2_MBUS_PCLK_SAMPLE_RISING || f == V4L2_MBUS_PCLK_SAMPLE_FALLING)
flags ^= V4L2_MBUS_PCLK_SAMPLE_RISING | V4L2_MBUS_PCLK_SAMPLE_FALLING;
@@ -383,7 +383,7 @@ static int soc_camera_prepare_buf(struct file *file, void *priv,
return vb2_prepare_buf(&icd->vb2_vidq, b);
}
-/* Always entered with .video_lock held */
+/* Always entered with .host_lock held */
static int soc_camera_init_user_formats(struct soc_camera_device *icd)
{
struct v4l2_subdev *sd = soc_camera_to_subdev(icd);
@@ -450,7 +450,7 @@ egfmt:
return ret;
}
-/* Always entered with .video_lock held */
+/* Always entered with .host_lock held */
static void soc_camera_free_user_formats(struct soc_camera_device *icd)
{
struct soc_camera_host *ici = to_soc_camera_host(icd->parent);
@@ -509,7 +509,7 @@ static int soc_camera_open(struct file *file)
{
struct video_device *vdev = video_devdata(file);
struct soc_camera_device *icd = dev_get_drvdata(vdev->parent);
- struct soc_camera_link *icl = to_soc_camera_link(icd);
+ struct soc_camera_desc *sdesc = to_soc_camera_desc(icd);
struct soc_camera_host *ici;
int ret;
@@ -517,9 +517,16 @@ static int soc_camera_open(struct file *file)
/* No device driver attached */
return -ENODEV;
+ /*
+ * Don't mess with the host during probe: wait until the loop in
+ * scan_add_host() completes
+ */
+ if (mutex_lock_interruptible(&list_lock))
+ return -ERESTARTSYS;
ici = to_soc_camera_host(icd->parent);
+ mutex_unlock(&list_lock);
- if (mutex_lock_interruptible(&icd->video_lock))
+ if (mutex_lock_interruptible(&ici->host_lock))
return -ERESTARTSYS;
if (!try_module_get(ici->ops->owner)) {
dev_err(icd->pdev, "Couldn't lock capture bus driver.\n");
@@ -545,13 +552,10 @@ static int soc_camera_open(struct file *file)
};
/* The camera could have been already on, try to reset */
- if (icl->reset)
- icl->reset(icd->pdev);
+ if (sdesc->subdev_desc.reset)
+ sdesc->subdev_desc.reset(icd->pdev);
- /* Don't mess with the host during probe */
- mutex_lock(&ici->host_lock);
ret = ici->ops->add(icd);
- mutex_unlock(&ici->host_lock);
if (ret < 0) {
dev_err(icd->pdev, "Couldn't activate the camera: %d\n", ret);
goto eiciadd;
@@ -570,7 +574,7 @@ static int soc_camera_open(struct file *file)
* Try to configure with default parameters. Notice: this is the
* very first open, so, we cannot race against other calls,
* apart from someone else calling open() simultaneously, but
- * .video_lock is protecting us against it.
+ * .host_lock is protecting us against it.
*/
ret = soc_camera_set_fmt(icd, &f);
if (ret < 0)
@@ -585,7 +589,7 @@ static int soc_camera_open(struct file *file)
}
v4l2_ctrl_handler_setup(&icd->ctrl_handler);
}
- mutex_unlock(&icd->video_lock);
+ mutex_unlock(&ici->host_lock);
file->private_data = icd;
dev_dbg(icd->pdev, "camera device open\n");
@@ -593,7 +597,7 @@ static int soc_camera_open(struct file *file)
return 0;
/*
- * First four errors are entered with the .video_lock held
+ * First four errors are entered with the .host_lock held
* and use_count == 1
*/
einitvb:
@@ -607,7 +611,7 @@ eiciadd:
icd->use_count--;
module_put(ici->ops->owner);
emodule:
- mutex_unlock(&icd->video_lock);
+ mutex_unlock(&ici->host_lock);
return ret;
}
@@ -617,7 +621,7 @@ static int soc_camera_close(struct file *file)
struct soc_camera_device *icd = file->private_data;
struct soc_camera_host *ici = to_soc_camera_host(icd->parent);
- mutex_lock(&icd->video_lock);
+ mutex_lock(&ici->host_lock);
icd->use_count--;
if (!icd->use_count) {
pm_runtime_suspend(&icd->vdev->dev);
@@ -632,7 +636,7 @@ static int soc_camera_close(struct file *file)
if (icd->streamer == file)
icd->streamer = NULL;
- mutex_unlock(&icd->video_lock);
+ mutex_unlock(&ici->host_lock);
module_put(ici->ops->owner);
@@ -669,13 +673,13 @@ static int soc_camera_mmap(struct file *file, struct vm_area_struct *vma)
if (icd->streamer != file)
return -EBUSY;
- if (mutex_lock_interruptible(&icd->video_lock))
+ if (mutex_lock_interruptible(&ici->host_lock))
return -ERESTARTSYS;
if (ici->ops->init_videobuf)
err = videobuf_mmap_mapper(&icd->vb_vidq, vma);
else
err = vb2_mmap(&icd->vb2_vidq, vma);
- mutex_unlock(&icd->video_lock);
+ mutex_unlock(&ici->host_lock);
dev_dbg(icd->pdev, "vma start=0x%08lx, size=%ld, ret=%d\n",
(unsigned long)vma->vm_start,
@@ -694,26 +698,28 @@ static unsigned int soc_camera_poll(struct file *file, poll_table *pt)
if (icd->streamer != file)
return POLLERR;
- mutex_lock(&icd->video_lock);
+ mutex_lock(&ici->host_lock);
if (ici->ops->init_videobuf && list_empty(&icd->vb_vidq.stream))
dev_err(icd->pdev, "Trying to poll with no queued buffers!\n");
else
res = ici->ops->poll(file, pt);
- mutex_unlock(&icd->video_lock);
+ mutex_unlock(&ici->host_lock);
return res;
}
void soc_camera_lock(struct vb2_queue *vq)
{
struct soc_camera_device *icd = vb2_get_drv_priv(vq);
- mutex_lock(&icd->video_lock);
+ struct soc_camera_host *ici = to_soc_camera_host(icd->parent);
+ mutex_lock(&ici->host_lock);
}
EXPORT_SYMBOL(soc_camera_lock);
void soc_camera_unlock(struct vb2_queue *vq)
{
struct soc_camera_device *icd = vb2_get_drv_priv(vq);
- mutex_unlock(&icd->video_lock);
+ struct soc_camera_host *ici = to_soc_camera_host(icd->parent);
+ mutex_unlock(&ici->host_lock);
}
EXPORT_SYMBOL(soc_camera_unlock);
@@ -908,6 +914,8 @@ static int soc_camera_s_crop(struct file *file, void *fh,
dev_dbg(icd->pdev, "S_CROP(%ux%u@%u:%u)\n",
rect->width, rect->height, rect->left, rect->top);
+ current_crop.type = a->type;
+
/* If get_crop fails, we'll let host and / or client drivers decide */
ret = ici->ops->get_crop(icd, &current_crop);
@@ -1050,7 +1058,7 @@ static void scan_add_host(struct soc_camera_host *ici)
{
struct soc_camera_device *icd;
- mutex_lock(&ici->host_lock);
+ mutex_lock(&list_lock);
list_for_each_entry(icd, &devices, list) {
if (icd->iface == ici->nr) {
@@ -1059,28 +1067,29 @@ static void scan_add_host(struct soc_camera_host *ici)
}
}
- mutex_unlock(&ici->host_lock);
+ mutex_unlock(&list_lock);
}
#ifdef CONFIG_I2C_BOARDINFO
static int soc_camera_init_i2c(struct soc_camera_device *icd,
- struct soc_camera_link *icl)
+ struct soc_camera_desc *sdesc)
{
struct i2c_client *client;
struct soc_camera_host *ici = to_soc_camera_host(icd->parent);
- struct i2c_adapter *adap = i2c_get_adapter(icl->i2c_adapter_id);
+ struct soc_camera_host_desc *shd = &sdesc->host_desc;
+ struct i2c_adapter *adap = i2c_get_adapter(shd->i2c_adapter_id);
struct v4l2_subdev *subdev;
if (!adap) {
dev_err(icd->pdev, "Cannot get I2C adapter #%d. No driver?\n",
- icl->i2c_adapter_id);
+ shd->i2c_adapter_id);
goto ei2cga;
}
- icl->board_info->platform_data = icl;
+ shd->board_info->platform_data = &sdesc->subdev_desc;
subdev = v4l2_i2c_new_subdev_board(&ici->v4l2_dev, adap,
- icl->board_info, NULL);
+ shd->board_info, NULL);
if (!subdev)
goto ei2cnd;
@@ -1108,7 +1117,7 @@ static void soc_camera_free_i2c(struct soc_camera_device *icd)
i2c_put_adapter(adap);
}
#else
-#define soc_camera_init_i2c(icd, icl) (-ENODEV)
+#define soc_camera_init_i2c(icd, sdesc) (-ENODEV)
#define soc_camera_free_i2c(icd) do {} while (0)
#endif
@@ -1118,7 +1127,9 @@ static int video_dev_create(struct soc_camera_device *icd);
static int soc_camera_probe(struct soc_camera_device *icd)
{
struct soc_camera_host *ici = to_soc_camera_host(icd->parent);
- struct soc_camera_link *icl = to_soc_camera_link(icd);
+ struct soc_camera_desc *sdesc = to_soc_camera_desc(icd);
+ struct soc_camera_host_desc *shd = &sdesc->host_desc;
+ struct soc_camera_subdev_desc *ssdd = &sdesc->subdev_desc;
struct device *control = NULL;
struct v4l2_subdev *sd;
struct v4l2_mbus_framefmt mf;
@@ -1137,16 +1148,13 @@ static int soc_camera_probe(struct soc_camera_device *icd)
if (ret < 0)
return ret;
- ret = regulator_bulk_get(icd->pdev, icl->num_regulators,
- icl->regulators);
- if (ret < 0)
- goto ereg;
-
/* The camera could have been already on, try to reset */
- if (icl->reset)
- icl->reset(icd->pdev);
+ if (ssdd->reset)
+ ssdd->reset(icd->pdev);
+ mutex_lock(&ici->host_lock);
ret = ici->ops->add(icd);
+ mutex_unlock(&ici->host_lock);
if (ret < 0)
goto eadd;
@@ -1156,18 +1164,18 @@ static int soc_camera_probe(struct soc_camera_device *icd)
goto evdc;
/* Non-i2c cameras, e.g., soc_camera_platform, have no board_info */
- if (icl->board_info) {
- ret = soc_camera_init_i2c(icd, icl);
+ if (shd->board_info) {
+ ret = soc_camera_init_i2c(icd, sdesc);
if (ret < 0)
goto eadddev;
- } else if (!icl->add_device || !icl->del_device) {
+ } else if (!shd->add_device || !shd->del_device) {
ret = -EINVAL;
goto eadddev;
} else {
- if (icl->module_name)
- ret = request_module(icl->module_name);
+ if (shd->module_name)
+ ret = request_module(shd->module_name);
- ret = icl->add_device(icd);
+ ret = shd->add_device(icd);
if (ret < 0)
goto eadddev;
@@ -1178,7 +1186,7 @@ static int soc_camera_probe(struct soc_camera_device *icd)
control = to_soc_camera_control(icd);
if (!control || !control->driver || !dev_get_drvdata(control) ||
!try_module_get(control->driver->owner)) {
- icl->del_device(icd);
+ shd->del_device(icd);
ret = -ENODEV;
goto enodrv;
}
@@ -1204,7 +1212,7 @@ static int soc_camera_probe(struct soc_camera_device *icd)
* itself is protected against concurrent open() calls, but we also have
* to protect our data.
*/
- mutex_lock(&icd->video_lock);
+ mutex_lock(&ici->host_lock);
ret = soc_camera_video_start(icd);
if (ret < 0)
@@ -1220,19 +1228,19 @@ static int soc_camera_probe(struct soc_camera_device *icd)
ici->ops->remove(icd);
- mutex_unlock(&icd->video_lock);
+ mutex_unlock(&ici->host_lock);
return 0;
evidstart:
- mutex_unlock(&icd->video_lock);
+ mutex_unlock(&ici->host_lock);
soc_camera_free_user_formats(icd);
eiufmt:
ectrl:
- if (icl->board_info) {
+ if (shd->board_info) {
soc_camera_free_i2c(icd);
} else {
- icl->del_device(icd);
+ shd->del_device(icd);
module_put(control->driver->owner);
}
enodrv:
@@ -1240,10 +1248,10 @@ eadddev:
video_device_release(icd->vdev);
icd->vdev = NULL;
evdc:
+ mutex_lock(&ici->host_lock);
ici->ops->remove(icd);
+ mutex_unlock(&ici->host_lock);
eadd:
- regulator_bulk_free(icl->num_regulators, icl->regulators);
-ereg:
v4l2_ctrl_handler_free(&icd->ctrl_handler);
return ret;
}
@@ -1254,7 +1262,7 @@ ereg:
*/
static int soc_camera_remove(struct soc_camera_device *icd)
{
- struct soc_camera_link *icl = to_soc_camera_link(icd);
+ struct soc_camera_desc *sdesc = to_soc_camera_desc(icd);
struct video_device *vdev = icd->vdev;
BUG_ON(!icd->parent);
@@ -1265,19 +1273,17 @@ static int soc_camera_remove(struct soc_camera_device *icd)
icd->vdev = NULL;
}
- if (icl->board_info) {
+ if (sdesc->host_desc.board_info) {
soc_camera_free_i2c(icd);
} else {
struct device_driver *drv = to_soc_camera_control(icd)->driver;
if (drv) {
- icl->del_device(icd);
+ sdesc->host_desc.del_device(icd);
module_put(drv->owner);
}
}
soc_camera_free_user_formats(icd);
- regulator_bulk_free(icl->num_regulators, icl->regulators);
-
return 0;
}
@@ -1442,7 +1448,6 @@ static int soc_camera_device_register(struct soc_camera_device *icd)
icd->devnum = num;
icd->use_count = 0;
icd->host_priv = NULL;
- mutex_init(&icd->video_lock);
list_add_tail(&icd->list, &devices);
@@ -1500,7 +1505,7 @@ static int video_dev_create(struct soc_camera_device *icd)
vdev->release = video_device_release;
vdev->tvnorms = V4L2_STD_UNKNOWN;
vdev->ctrl_handler = &icd->ctrl_handler;
- vdev->lock = &icd->video_lock;
+ vdev->lock = &ici->host_lock;
icd->vdev = vdev;
@@ -1508,7 +1513,7 @@ static int video_dev_create(struct soc_camera_device *icd)
}
/*
- * Called from soc_camera_probe() above (with .video_lock held???)
+ * Called from soc_camera_probe() above with .host_lock held
*/
static int soc_camera_video_start(struct soc_camera_device *icd)
{
@@ -1532,18 +1537,25 @@ static int soc_camera_video_start(struct soc_camera_device *icd)
static int soc_camera_pdrv_probe(struct platform_device *pdev)
{
- struct soc_camera_link *icl = pdev->dev.platform_data;
+ struct soc_camera_desc *sdesc = pdev->dev.platform_data;
+ struct soc_camera_subdev_desc *ssdd = &sdesc->subdev_desc;
struct soc_camera_device *icd;
+ int ret;
- if (!icl)
+ if (!sdesc)
return -EINVAL;
icd = devm_kzalloc(&pdev->dev, sizeof(*icd), GFP_KERNEL);
if (!icd)
return -ENOMEM;
- icd->iface = icl->bus_id;
- icd->link = icl;
+ ret = devm_regulator_bulk_get(&pdev->dev, ssdd->num_regulators,
+ ssdd->regulators);
+ if (ret < 0)
+ return ret;
+
+ icd->iface = sdesc->host_desc.bus_id;
+ icd->sdesc = sdesc;
icd->pdev = &pdev->dev;
platform_set_drvdata(pdev, icd);
diff --git a/drivers/media/platform/soc_camera/soc_camera_platform.c b/drivers/media/platform/soc_camera/soc_camera_platform.c
index 7cf7fd16481f..ce3b1d6a4734 100644
--- a/drivers/media/platform/soc_camera/soc_camera_platform.c
+++ b/drivers/media/platform/soc_camera/soc_camera_platform.c
@@ -54,7 +54,7 @@ static int soc_camera_platform_s_power(struct v4l2_subdev *sd, int on)
{
struct soc_camera_platform_info *p = v4l2_get_subdevdata(sd);
- return soc_camera_set_power(p->icd->control, p->icd->link, on);
+ return soc_camera_set_power(p->icd->control, &p->icd->sdesc->subdev_desc, on);
}
static struct v4l2_subdev_core_ops platform_subdev_core_ops = {
@@ -148,7 +148,7 @@ static int soc_camera_platform_probe(struct platform_device *pdev)
return -EINVAL;
}
- priv = kzalloc(sizeof(*priv), GFP_KERNEL);
+ priv = devm_kzalloc(&pdev->dev, sizeof(*priv), GFP_KERNEL);
if (!priv)
return -ENOMEM;
@@ -173,7 +173,6 @@ static int soc_camera_platform_probe(struct platform_device *pdev)
evdrs:
platform_set_drvdata(pdev, NULL);
- kfree(priv);
return ret;
}
@@ -185,7 +184,6 @@ static int soc_camera_platform_remove(struct platform_device *pdev)
p->icd->control = NULL;
v4l2_device_unregister_subdev(&priv->subdev);
platform_set_drvdata(pdev, NULL);
- kfree(priv);
return 0;
}
diff --git a/drivers/media/platform/soc_camera/soc_mediabus.c b/drivers/media/platform/soc_camera/soc_mediabus.c
index a397812635d6..89dce097a827 100644
--- a/drivers/media/platform/soc_camera/soc_mediabus.c
+++ b/drivers/media/platform/soc_camera/soc_mediabus.c
@@ -378,9 +378,6 @@ EXPORT_SYMBOL(soc_mbus_samples_per_pixel);
s32 soc_mbus_bytes_per_line(u32 width, const struct soc_mbus_pixelfmt *mf)
{
- if (mf->fourcc == V4L2_PIX_FMT_JPEG)
- return 0;
-
if (mf->layout != SOC_MBUS_LAYOUT_PACKED)
return width * mf->bits_per_sample / 8;
@@ -403,9 +400,6 @@ EXPORT_SYMBOL(soc_mbus_bytes_per_line);
s32 soc_mbus_image_size(const struct soc_mbus_pixelfmt *mf,
u32 bytes_per_line, u32 height)
{
- if (mf->fourcc == V4L2_PIX_FMT_JPEG)
- return 0;
-
if (mf->layout == SOC_MBUS_LAYOUT_PACKED)
return bytes_per_line * height;
diff --git a/drivers/media/platform/timblogiw.c b/drivers/media/platform/timblogiw.c
index d854d08a6c7f..c3a2a4484401 100644
--- a/drivers/media/platform/timblogiw.c
+++ b/drivers/media/platform/timblogiw.c
@@ -130,7 +130,7 @@ static void timblogiw_dma_cb(void *data)
if (vb->state != VIDEOBUF_ERROR) {
list_del(&vb->queue);
- do_gettimeofday(&vb->ts);
+ v4l2_get_timestamp(&vb->ts);
vb->field_count = fh->frame_count * 2;
vb->state = VIDEOBUF_DONE;
diff --git a/drivers/media/platform/via-camera.c b/drivers/media/platform/via-camera.c
index 63e8c3461239..b051c4a28554 100644
--- a/drivers/media/platform/via-camera.c
+++ b/drivers/media/platform/via-camera.c
@@ -18,6 +18,7 @@
#include <media/v4l2-device.h>
#include <media/v4l2-ioctl.h>
#include <media/v4l2-chip-ident.h>
+#include <media/v4l2-ctrls.h>
#include <media/ov7670.h>
#include <media/videobuf-dma-sg.h>
#include <linux/delay.h>
@@ -63,6 +64,7 @@ enum viacam_opstate { S_IDLE = 0, S_RUNNING = 1 };
struct via_camera {
struct v4l2_device v4l2_dev;
+ struct v4l2_ctrl_handler ctrl_handler;
struct video_device vdev;
struct v4l2_subdev *sensor;
struct platform_device *platdev;
@@ -818,47 +820,6 @@ static int viacam_g_chip_ident(struct file *file, void *priv,
}
/*
- * Control ops are passed through to the sensor.
- */
-static int viacam_queryctrl(struct file *filp, void *priv,
- struct v4l2_queryctrl *qc)
-{
- struct via_camera *cam = priv;
- int ret;
-
- mutex_lock(&cam->lock);
- ret = sensor_call(cam, core, queryctrl, qc);
- mutex_unlock(&cam->lock);
- return ret;
-}
-
-
-static int viacam_g_ctrl(struct file *filp, void *priv,
- struct v4l2_control *ctrl)
-{
- struct via_camera *cam = priv;
- int ret;
-
- mutex_lock(&cam->lock);
- ret = sensor_call(cam, core, g_ctrl, ctrl);
- mutex_unlock(&cam->lock);
- return ret;
-}
-
-
-static int viacam_s_ctrl(struct file *filp, void *priv,
- struct v4l2_control *ctrl)
-{
- struct via_camera *cam = priv;
- int ret;
-
- mutex_lock(&cam->lock);
- ret = sensor_call(cam, core, s_ctrl, ctrl);
- mutex_unlock(&cam->lock);
- return ret;
-}
-
-/*
* Only one input.
*/
static int viacam_enum_input(struct file *filp, void *priv,
@@ -1214,9 +1175,6 @@ static int viacam_enum_frameintervals(struct file *filp, void *priv,
static const struct v4l2_ioctl_ops viacam_ioctl_ops = {
.vidioc_g_chip_ident = viacam_g_chip_ident,
- .vidioc_queryctrl = viacam_queryctrl,
- .vidioc_g_ctrl = viacam_g_ctrl,
- .vidioc_s_ctrl = viacam_s_ctrl,
.vidioc_enum_input = viacam_enum_input,
.vidioc_g_input = viacam_g_input,
.vidioc_s_input = viacam_s_input,
@@ -1418,8 +1376,12 @@ static int viacam_probe(struct platform_device *pdev)
ret = v4l2_device_register(&pdev->dev, &cam->v4l2_dev);
if (ret) {
dev_err(&pdev->dev, "Unable to register v4l2 device\n");
- return ret;
+ goto out_free;
}
+ ret = v4l2_ctrl_handler_init(&cam->ctrl_handler, 10);
+ if (ret)
+ goto out_unregister;
+ cam->v4l2_dev.ctrl_handler = &cam->ctrl_handler;
/*
* Convince the system that we can do DMA.
*/
@@ -1436,7 +1398,7 @@ static int viacam_probe(struct platform_device *pdev)
*/
ret = via_sensor_power_setup(cam);
if (ret)
- goto out_unregister;
+ goto out_ctrl_hdl_free;
via_sensor_power_up(cam);
/*
@@ -1485,8 +1447,12 @@ out_irq:
free_irq(viadev->pdev->irq, cam);
out_power_down:
via_sensor_power_release(cam);
+out_ctrl_hdl_free:
+ v4l2_ctrl_handler_free(&cam->ctrl_handler);
out_unregister:
v4l2_device_unregister(&cam->v4l2_dev);
+out_free:
+ kfree(cam);
return ret;
}
@@ -1499,6 +1465,8 @@ static int viacam_remove(struct platform_device *pdev)
v4l2_device_unregister(&cam->v4l2_dev);
free_irq(viadev->pdev->irq, cam);
via_sensor_power_release(cam);
+ v4l2_ctrl_handler_free(&cam->ctrl_handler);
+ kfree(cam);
via_cam_info = NULL;
return 0;
}
diff --git a/drivers/media/platform/vino.c b/drivers/media/platform/vino.c
index 70b0bf4b2900..eb5d6f955709 100644
--- a/drivers/media/platform/vino.c
+++ b/drivers/media/platform/vino.c
@@ -2474,8 +2474,8 @@ static irqreturn_t vino_interrupt(int irq, void *dev_id)
if ((!handled_a) && (done_a || skip_a)) {
if (!skip_a) {
- do_gettimeofday(&vino_drvdata->
- a.int_data.timestamp);
+ v4l2_get_timestamp(
+ &vino_drvdata->a.int_data.timestamp);
vino_drvdata->a.int_data.frame_counter = fc_a;
}
vino_drvdata->a.int_data.skip = skip_a;
@@ -2489,8 +2489,8 @@ static irqreturn_t vino_interrupt(int irq, void *dev_id)
if ((!handled_b) && (done_b || skip_b)) {
if (!skip_b) {
- do_gettimeofday(&vino_drvdata->
- b.int_data.timestamp);
+ v4l2_get_timestamp(
+ &vino_drvdata->b.int_data.timestamp);
vino_drvdata->b.int_data.frame_counter = fc_b;
}
vino_drvdata->b.int_data.skip = skip_b;
@@ -3410,6 +3410,9 @@ static void vino_v4l2_get_buffer_status(struct vino_channel_settings *vcs,
if (fb->map_count > 0)
b->flags |= V4L2_BUF_FLAG_MAPPED;
+ b->flags &= ~V4L2_BUF_FLAG_TIMESTAMP_MASK;
+ b->flags |= V4L2_BUF_FLAG_TIMESTAMP_MONOTONIC;
+
b->index = fb->id;
b->memory = (vcs->fb_queue.type == VINO_MEMORY_MMAP) ?
V4L2_MEMORY_MMAP : V4L2_MEMORY_USERPTR;
diff --git a/drivers/media/platform/vivi.c b/drivers/media/platform/vivi.c
index 0d59b9db83cb..8a33a712f480 100644
--- a/drivers/media/platform/vivi.c
+++ b/drivers/media/platform/vivi.c
@@ -36,9 +36,17 @@
#define VIVI_MODULE_NAME "vivi"
-/* Wake up at about 30 fps */
-#define WAKE_NUMERATOR 30
-#define WAKE_DENOMINATOR 1001
+/* Maximum allowed frame rate
+ *
+ * Vivi will allow setting timeperframe in [1/FPS_MAX - FPS_MAX/1] range.
+ *
+ * Ideally FPS_MAX should be infinity, i.e. practically UINT_MAX, but that
+ * might hit application errors when they manipulate these values.
+ *
+ * Besides, for tpf < 1ms image-generation logic should be changed, to avoid
+ * producing frames with equal content.
+ */
+#define FPS_MAX 1000
#define MAX_WIDTH 1920
#define MAX_HEIGHT 1200
@@ -69,6 +77,12 @@ MODULE_PARM_DESC(vid_limit, "capture memory limit in megabytes");
/* Global font descriptor */
static const u8 *font8x16;
+/* timeperframe: min/max and default */
+static const struct v4l2_fract
+ tpf_min = {.numerator = 1, .denominator = FPS_MAX},
+ tpf_max = {.numerator = FPS_MAX, .denominator = 1},
+ tpf_default = {.numerator = 1001, .denominator = 30000}; /* NTSC */
+
#define dprintk(dev, level, fmt, arg...) \
v4l2_dbg(level, debug, &dev->v4l2_dev, fmt, ## arg)
@@ -77,13 +91,13 @@ static const u8 *font8x16;
------------------------------------------------------------------*/
struct vivi_fmt {
- char *name;
+ const char *name;
u32 fourcc; /* v4l2 format id */
u8 depth;
bool is_yuv;
};
-static struct vivi_fmt formats[] = {
+static const struct vivi_fmt formats[] = {
{
.name = "4:2:2, packed, YUYV",
.fourcc = V4L2_PIX_FMT_YUYV,
@@ -150,14 +164,14 @@ static struct vivi_fmt formats[] = {
},
};
-static struct vivi_fmt *get_format(struct v4l2_format *f)
+static const struct vivi_fmt *__get_format(u32 pixelformat)
{
- struct vivi_fmt *fmt;
+ const struct vivi_fmt *fmt;
unsigned int k;
for (k = 0; k < ARRAY_SIZE(formats); k++) {
fmt = &formats[k];
- if (fmt->fourcc == f->fmt.pix.pixelformat)
+ if (fmt->fourcc == pixelformat)
break;
}
@@ -167,12 +181,17 @@ static struct vivi_fmt *get_format(struct v4l2_format *f)
return &formats[k];
}
+static const struct vivi_fmt *get_format(struct v4l2_format *f)
+{
+ return __get_format(f->fmt.pix.pixelformat);
+}
+
/* buffer for one video frame */
struct vivi_buffer {
/* common v4l buffer stuff -- must be first */
struct vb2_buffer vb;
struct list_head list;
- struct vivi_fmt *fmt;
+ const struct vivi_fmt *fmt;
};
struct vivi_dmaqueue {
@@ -231,15 +250,17 @@ struct vivi_dev {
int input;
/* video capture */
- struct vivi_fmt *fmt;
+ const struct vivi_fmt *fmt;
+ struct v4l2_fract timeperframe;
unsigned int width, height;
struct vb2_queue vb_vidq;
unsigned int field_count;
u8 bars[9][3];
- u8 line[MAX_WIDTH * 8];
+ u8 line[MAX_WIDTH * 8] __attribute__((__aligned__(4)));
unsigned int pixelsize;
u8 alpha_component;
+ u32 textfg, textbg;
};
/* ------------------------------------------------------------------
@@ -276,7 +297,7 @@ struct bar_std {
/* Maximum number of bars are 10 - otherwise, the input print code
should be modified */
-static struct bar_std bars[] = {
+static const struct bar_std bars[] = {
{ /* Standard ITU-R color bar sequence */
{ COLOR_WHITE, COLOR_AMBER, COLOR_CYAN, COLOR_GREEN,
COLOR_MAGENTA, COLOR_RED, COLOR_BLUE, COLOR_BLACK, COLOR_BLACK }
@@ -511,66 +532,100 @@ static void gen_twopix(struct vivi_dev *dev, u8 *buf, int colorpos, bool odd)
static void precalculate_line(struct vivi_dev *dev)
{
- int w;
-
- for (w = 0; w < dev->width * 2; w++) {
- int colorpos = w / (dev->width / 8) % 8;
-
- gen_twopix(dev, dev->line + w * dev->pixelsize, colorpos, w & 1);
+ unsigned pixsize = dev->pixelsize;
+ unsigned pixsize2 = 2*pixsize;
+ int colorpos;
+ u8 *pos;
+
+ for (colorpos = 0; colorpos < 16; ++colorpos) {
+ u8 pix[8];
+ int wstart = colorpos * dev->width / 8;
+ int wend = (colorpos+1) * dev->width / 8;
+ int w;
+
+ gen_twopix(dev, &pix[0], colorpos % 8, 0);
+ gen_twopix(dev, &pix[pixsize], colorpos % 8, 1);
+
+ for (w = wstart/2*2, pos = dev->line + w*pixsize; w < wend; w += 2, pos += pixsize2)
+ memcpy(pos, pix, pixsize2);
}
}
+/* need this to do rgb24 rendering */
+typedef struct { u16 __; u8 _; } __attribute__((packed)) x24;
+
static void gen_text(struct vivi_dev *dev, char *basep,
int y, int x, char *text)
{
int line;
+ unsigned int width = dev->width;
/* Checks if it is possible to show string */
- if (y + 16 >= dev->height || x + strlen(text) * 8 >= dev->width)
+ if (y + 16 >= dev->height || x + strlen(text) * 8 >= width)
return;
/* Print stream time */
- for (line = y; line < y + 16; line++) {
- int j = 0;
- char *pos = basep + line * dev->width * dev->pixelsize + x * dev->pixelsize;
- char *s;
-
- for (s = text; *s; s++) {
- u8 chr = font8x16[*s * 16 + line - y];
- int i;
-
- for (i = 0; i < 7; i++, j++) {
- /* Draw white font on black background */
- if (chr & (1 << (7 - i)))
- gen_twopix(dev, pos + j * dev->pixelsize, WHITE, (x+y) & 1);
- else
- gen_twopix(dev, pos + j * dev->pixelsize, TEXT_BLACK, (x+y) & 1);
- }
- }
+#define PRINTSTR(PIXTYPE) do { \
+ PIXTYPE fg; \
+ PIXTYPE bg; \
+ memcpy(&fg, &dev->textfg, sizeof(PIXTYPE)); \
+ memcpy(&bg, &dev->textbg, sizeof(PIXTYPE)); \
+ \
+ for (line = 0; line < 16; line++) { \
+ PIXTYPE *pos = (PIXTYPE *)( basep + ((y + line) * width + x) * sizeof(PIXTYPE) ); \
+ u8 *s; \
+ \
+ for (s = text; *s; s++) { \
+ u8 chr = font8x16[*s * 16 + line]; \
+ \
+ pos[0] = (chr & (0x01 << 7) ? fg : bg); \
+ pos[1] = (chr & (0x01 << 6) ? fg : bg); \
+ pos[2] = (chr & (0x01 << 5) ? fg : bg); \
+ pos[3] = (chr & (0x01 << 4) ? fg : bg); \
+ pos[4] = (chr & (0x01 << 3) ? fg : bg); \
+ pos[5] = (chr & (0x01 << 2) ? fg : bg); \
+ pos[6] = (chr & (0x01 << 1) ? fg : bg); \
+ pos[7] = (chr & (0x01 << 0) ? fg : bg); \
+ \
+ pos += 8; \
+ } \
+ } \
+} while (0)
+
+ switch (dev->pixelsize) {
+ case 2:
+ PRINTSTR(u16); break;
+ case 4:
+ PRINTSTR(u32); break;
+ case 3:
+ PRINTSTR(x24); break;
}
}
static void vivi_fillbuff(struct vivi_dev *dev, struct vivi_buffer *buf)
{
- int wmax = dev->width;
+ int stride = dev->width * dev->pixelsize;
int hmax = dev->height;
- struct timeval ts;
void *vbuf = vb2_plane_vaddr(&buf->vb, 0);
unsigned ms;
char str[100];
int h, line = 1;
+ u8 *linestart;
s32 gain;
if (!vbuf)
return;
+ linestart = dev->line + (dev->mv_count % dev->width) * dev->pixelsize;
+
for (h = 0; h < hmax; h++)
- memcpy(vbuf + h * wmax * dev->pixelsize,
- dev->line + (dev->mv_count % wmax) * dev->pixelsize,
- wmax * dev->pixelsize);
+ memcpy(vbuf + h * stride, linestart, stride);
/* Updates stream time */
+ gen_twopix(dev, (u8 *)&dev->textbg, TEXT_BLACK, /*odd=*/ 0);
+ gen_twopix(dev, (u8 *)&dev->textfg, WHITE, /*odd=*/ 0);
+
dev->ms += jiffies_to_msecs(jiffies - dev->jiffies);
dev->jiffies = jiffies;
ms = dev->ms;
@@ -622,8 +677,7 @@ static void vivi_fillbuff(struct vivi_dev *dev, struct vivi_buffer *buf)
buf->vb.v4l2_buf.field = V4L2_FIELD_INTERLACED;
dev->field_count++;
buf->vb.v4l2_buf.sequence = dev->field_count >> 1;
- do_gettimeofday(&ts);
- buf->vb.v4l2_buf.timestamp = ts;
+ v4l2_get_timestamp(&buf->vb.v4l2_buf.timestamp);
}
static void vivi_thread_tick(struct vivi_dev *dev)
@@ -645,7 +699,7 @@ static void vivi_thread_tick(struct vivi_dev *dev)
list_del(&buf->list);
spin_unlock_irqrestore(&dev->slock, flags);
- do_gettimeofday(&buf->vb.v4l2_buf.timestamp);
+ v4l2_get_timestamp(&buf->vb.v4l2_buf.timestamp);
/* Fill buffer */
vivi_fillbuff(dev, buf);
@@ -655,8 +709,8 @@ static void vivi_thread_tick(struct vivi_dev *dev)
dprintk(dev, 2, "[%p/%d] done\n", buf, buf->vb.v4l2_buf.index);
}
-#define frames_to_ms(frames) \
- ((frames * WAKE_NUMERATOR * 1000) / WAKE_DENOMINATOR)
+#define frames_to_ms(dev, frames) \
+ ((frames * dev->timeperframe.numerator * 1000) / dev->timeperframe.denominator)
static void vivi_sleep(struct vivi_dev *dev)
{
@@ -672,7 +726,7 @@ static void vivi_sleep(struct vivi_dev *dev)
goto stop_task;
/* Calculate time to wake up */
- timeout = msecs_to_jiffies(frames_to_ms(1));
+ timeout = msecs_to_jiffies(frames_to_ms(dev, 1));
vivi_thread_tick(dev);
@@ -872,7 +926,7 @@ static void vivi_unlock(struct vb2_queue *vq)
}
-static struct vb2_ops vivi_video_qops = {
+static const struct vb2_ops vivi_video_qops = {
.queue_setup = queue_setup,
.buf_prepare = buffer_prepare,
.buf_queue = buffer_queue,
@@ -903,7 +957,7 @@ static int vidioc_querycap(struct file *file, void *priv,
static int vidioc_enum_fmt_vid_cap(struct file *file, void *priv,
struct v4l2_fmtdesc *f)
{
- struct vivi_fmt *fmt;
+ const struct vivi_fmt *fmt;
if (f->index >= ARRAY_SIZE(formats))
return -EINVAL;
@@ -939,7 +993,7 @@ static int vidioc_try_fmt_vid_cap(struct file *file, void *priv,
struct v4l2_format *f)
{
struct vivi_dev *dev = video_drvdata(file);
- struct vivi_fmt *fmt;
+ const struct vivi_fmt *fmt;
fmt = get_format(f);
if (!fmt) {
@@ -1044,6 +1098,70 @@ static int vidioc_s_input(struct file *file, void *priv, unsigned int i)
return 0;
}
+/* timeperframe is arbitrary and continous */
+static int vidioc_enum_frameintervals(struct file *file, void *priv,
+ struct v4l2_frmivalenum *fival)
+{
+ const struct vivi_fmt *fmt;
+
+ if (fival->index)
+ return -EINVAL;
+
+ fmt = __get_format(fival->pixel_format);
+ if (!fmt)
+ return -EINVAL;
+
+ /* regarding width & height - we support any */
+
+ fival->type = V4L2_FRMIVAL_TYPE_CONTINUOUS;
+
+ /* fill in stepwise (step=1.0 is requred by V4L2 spec) */
+ fival->stepwise.min = tpf_min;
+ fival->stepwise.max = tpf_max;
+ fival->stepwise.step = (struct v4l2_fract) {1, 1};
+
+ return 0;
+}
+
+static int vidioc_g_parm(struct file *file, void *priv,
+ struct v4l2_streamparm *parm)
+{
+ struct vivi_dev *dev = video_drvdata(file);
+
+ if (parm->type != V4L2_BUF_TYPE_VIDEO_CAPTURE)
+ return -EINVAL;
+
+ parm->parm.capture.capability = V4L2_CAP_TIMEPERFRAME;
+ parm->parm.capture.timeperframe = dev->timeperframe;
+ parm->parm.capture.readbuffers = 1;
+ return 0;
+}
+
+#define FRACT_CMP(a, OP, b) \
+ ((u64)(a).numerator * (b).denominator OP (u64)(b).numerator * (a).denominator)
+
+static int vidioc_s_parm(struct file *file, void *priv,
+ struct v4l2_streamparm *parm)
+{
+ struct vivi_dev *dev = video_drvdata(file);
+ struct v4l2_fract tpf;
+
+ if (parm->type != V4L2_BUF_TYPE_VIDEO_CAPTURE)
+ return -EINVAL;
+
+ tpf = parm->parm.capture.timeperframe;
+
+ /* tpf: {*, 0} resets timing; clip to [min, max]*/
+ tpf = tpf.denominator ? tpf : tpf_default;
+ tpf = FRACT_CMP(tpf, <, tpf_min) ? tpf_min : tpf;
+ tpf = FRACT_CMP(tpf, >, tpf_max) ? tpf_max : tpf;
+
+ dev->timeperframe = tpf;
+ parm->parm.capture.timeperframe = tpf;
+ parm->parm.capture.readbuffers = 1;
+ return 0;
+}
+
/* --- controls ---------------------------------------------- */
static int vivi_g_volatile_ctrl(struct v4l2_ctrl *ctrl)
@@ -1202,6 +1320,9 @@ static const struct v4l2_ioctl_ops vivi_ioctl_ops = {
.vidioc_enum_input = vidioc_enum_input,
.vidioc_g_input = vidioc_g_input,
.vidioc_s_input = vidioc_s_input,
+ .vidioc_enum_frameintervals = vidioc_enum_frameintervals,
+ .vidioc_g_parm = vidioc_g_parm,
+ .vidioc_s_parm = vidioc_s_parm,
.vidioc_streamon = vb2_ioctl_streamon,
.vidioc_streamoff = vb2_ioctl_streamoff,
.vidioc_log_status = v4l2_ctrl_log_status,
@@ -1209,7 +1330,7 @@ static const struct v4l2_ioctl_ops vivi_ioctl_ops = {
.vidioc_unsubscribe_event = v4l2_event_unsubscribe,
};
-static struct video_device vivi_template = {
+static const struct video_device vivi_template = {
.name = "vivi",
.fops = &vivi_fops,
.ioctl_ops = &vivi_ioctl_ops,
@@ -1260,6 +1381,7 @@ static int __init vivi_create_instance(int inst)
goto free_dev;
dev->fmt = &formats[0];
+ dev->timeperframe = tpf_default;
dev->width = 640;
dev->height = 480;
dev->pixelsize = dev->fmt->depth / 8;
diff --git a/drivers/media/radio/Kconfig b/drivers/media/radio/Kconfig
index 8090b87b3066..24e64a09884c 100644
--- a/drivers/media/radio/Kconfig
+++ b/drivers/media/radio/Kconfig
@@ -124,6 +124,18 @@ config USB_KEENE
To compile this driver as a module, choose M here: the
module will be called radio-keene.
+config USB_MA901
+ tristate "Masterkit MA901 USB FM radio support"
+ depends on USB && VIDEO_V4L2
+ ---help---
+ Say Y here if you want to connect this type of radio to your
+ computer's USB port. Note that the audio is not digital, and
+ you must connect the line out connector to a sound card or a
+ set of speakers or headphones.
+
+ To compile this driver as a module, choose M here: the
+ module will be called radio-ma901.
+
config RADIO_TEA5764
tristate "TEA5764 I2C FM radio support"
depends on I2C && VIDEO_V4L2
@@ -180,7 +192,7 @@ config RADIO_TIMBERDALE
config RADIO_WL1273
tristate "Texas Instruments WL1273 I2C FM Radio"
- depends on I2C && VIDEO_V4L2
+ depends on I2C && VIDEO_V4L2 && GENERIC_HARDIRQS
select MFD_CORE
select MFD_WL1273_CORE
select FW_LOADER
diff --git a/drivers/media/radio/Makefile b/drivers/media/radio/Makefile
index c03ce4fe74e9..303eaebdb85a 100644
--- a/drivers/media/radio/Makefile
+++ b/drivers/media/radio/Makefile
@@ -24,6 +24,7 @@ obj-$(CONFIG_USB_DSBR) += dsbr100.o
obj-$(CONFIG_RADIO_SI470X) += si470x/
obj-$(CONFIG_USB_MR800) += radio-mr800.o
obj-$(CONFIG_USB_KEENE) += radio-keene.o
+obj-$(CONFIG_USB_MA901) += radio-ma901.o
obj-$(CONFIG_RADIO_TEA5764) += radio-tea5764.o
obj-$(CONFIG_RADIO_SAA7706H) += saa7706h.o
obj-$(CONFIG_RADIO_TEF6862) += tef6862.o
diff --git a/drivers/media/radio/radio-ma901.c b/drivers/media/radio/radio-ma901.c
new file mode 100644
index 000000000000..c61f590029ad
--- /dev/null
+++ b/drivers/media/radio/radio-ma901.c
@@ -0,0 +1,460 @@
+/*
+ * Driver for the MasterKit MA901 USB FM radio. This device plugs
+ * into the USB port and an analog audio input or headphones, so this thing
+ * only deals with initialization, frequency setting, volume.
+ *
+ * Copyright (c) 2012 Alexey Klimov <klimov.linux@gmail.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/slab.h>
+#include <linux/input.h>
+#include <linux/videodev2.h>
+#include <media/v4l2-device.h>
+#include <media/v4l2-ioctl.h>
+#include <media/v4l2-ctrls.h>
+#include <media/v4l2-event.h>
+#include <linux/usb.h>
+#include <linux/mutex.h>
+
+#define DRIVER_AUTHOR "Alexey Klimov <klimov.linux@gmail.com>"
+#define DRIVER_DESC "Masterkit MA901 USB FM radio driver"
+#define DRIVER_VERSION "0.0.1"
+
+MODULE_AUTHOR(DRIVER_AUTHOR);
+MODULE_DESCRIPTION(DRIVER_DESC);
+MODULE_LICENSE("GPL");
+MODULE_VERSION(DRIVER_VERSION);
+
+#define USB_MA901_VENDOR 0x16c0
+#define USB_MA901_PRODUCT 0x05df
+
+/* dev_warn macro with driver name */
+#define MA901_DRIVER_NAME "radio-ma901"
+#define ma901radio_dev_warn(dev, fmt, arg...) \
+ dev_warn(dev, MA901_DRIVER_NAME " - " fmt, ##arg)
+
+#define ma901radio_dev_err(dev, fmt, arg...) \
+ dev_err(dev, MA901_DRIVER_NAME " - " fmt, ##arg)
+
+/* Probably USB_TIMEOUT should be modified in module parameter */
+#define BUFFER_LENGTH 8
+#define USB_TIMEOUT 500
+
+#define FREQ_MIN 87.5
+#define FREQ_MAX 108.0
+#define FREQ_MUL 16000
+
+#define MA901_VOLUME_MAX 16
+#define MA901_VOLUME_MIN 0
+
+/* Commands that device should understand
+ * List isn't full and will be updated with implementation of new functions
+ */
+#define MA901_RADIO_SET_FREQ 0x03
+#define MA901_RADIO_SET_VOLUME 0x04
+#define MA901_RADIO_SET_MONO_STEREO 0x05
+
+/* Comfortable defines for ma901radio_set_stereo */
+#define MA901_WANT_STEREO 0x50
+#define MA901_WANT_MONO 0xd0
+
+/* module parameter */
+static int radio_nr = -1;
+module_param(radio_nr, int, 0);
+MODULE_PARM_DESC(radio_nr, "Radio file number");
+
+/* Data for one (physical) device */
+struct ma901radio_device {
+ /* reference to USB and video device */
+ struct usb_device *usbdev;
+ struct usb_interface *intf;
+ struct video_device vdev;
+ struct v4l2_device v4l2_dev;
+ struct v4l2_ctrl_handler hdl;
+
+ u8 *buffer;
+ struct mutex lock; /* buffer locking */
+ int curfreq;
+ u16 volume;
+ int stereo;
+ bool muted;
+};
+
+static inline struct ma901radio_device *to_ma901radio_dev(struct v4l2_device *v4l2_dev)
+{
+ return container_of(v4l2_dev, struct ma901radio_device, v4l2_dev);
+}
+
+/* set a frequency, freq is defined by v4l's TUNER_LOW, i.e. 1/16th kHz */
+static int ma901radio_set_freq(struct ma901radio_device *radio, int freq)
+{
+ unsigned int freq_send = 0x300 + (freq >> 5) / 25;
+ int retval;
+
+ radio->buffer[0] = 0x0a;
+ radio->buffer[1] = MA901_RADIO_SET_FREQ;
+ radio->buffer[2] = ((freq_send >> 8) & 0xff) + 0x80;
+ radio->buffer[3] = freq_send & 0xff;
+ radio->buffer[4] = 0x00;
+ radio->buffer[5] = 0x00;
+ radio->buffer[6] = 0x00;
+ radio->buffer[7] = 0x00;
+
+ retval = usb_control_msg(radio->usbdev, usb_sndctrlpipe(radio->usbdev, 0),
+ 9, 0x21, 0x0300, 0,
+ radio->buffer, BUFFER_LENGTH, USB_TIMEOUT);
+ if (retval < 0)
+ return retval;
+
+ radio->curfreq = freq;
+ return 0;
+}
+
+static int ma901radio_set_volume(struct ma901radio_device *radio, u16 vol_to_set)
+{
+ int retval;
+
+ radio->buffer[0] = 0x0a;
+ radio->buffer[1] = MA901_RADIO_SET_VOLUME;
+ radio->buffer[2] = 0xc2;
+ radio->buffer[3] = vol_to_set + 0x20;
+ radio->buffer[4] = 0x00;
+ radio->buffer[5] = 0x00;
+ radio->buffer[6] = 0x00;
+ radio->buffer[7] = 0x00;
+
+ retval = usb_control_msg(radio->usbdev, usb_sndctrlpipe(radio->usbdev, 0),
+ 9, 0x21, 0x0300, 0,
+ radio->buffer, BUFFER_LENGTH, USB_TIMEOUT);
+ if (retval < 0)
+ return retval;
+
+ radio->volume = vol_to_set;
+ return retval;
+}
+
+static int ma901_set_stereo(struct ma901radio_device *radio, u8 stereo)
+{
+ int retval;
+
+ radio->buffer[0] = 0x0a;
+ radio->buffer[1] = MA901_RADIO_SET_MONO_STEREO;
+ radio->buffer[2] = stereo;
+ radio->buffer[3] = 0x00;
+ radio->buffer[4] = 0x00;
+ radio->buffer[5] = 0x00;
+ radio->buffer[6] = 0x00;
+ radio->buffer[7] = 0x00;
+
+ retval = usb_control_msg(radio->usbdev, usb_sndctrlpipe(radio->usbdev, 0),
+ 9, 0x21, 0x0300, 0,
+ radio->buffer, BUFFER_LENGTH, USB_TIMEOUT);
+
+ if (retval < 0)
+ return retval;
+
+ if (stereo == MA901_WANT_STEREO)
+ radio->stereo = V4L2_TUNER_MODE_STEREO;
+ else
+ radio->stereo = V4L2_TUNER_MODE_MONO;
+
+ return retval;
+}
+
+/* Handle unplugging the device.
+ * We call video_unregister_device in any case.
+ * The last function called in this procedure is
+ * usb_ma901radio_device_release.
+ */
+static void usb_ma901radio_disconnect(struct usb_interface *intf)
+{
+ struct ma901radio_device *radio = to_ma901radio_dev(usb_get_intfdata(intf));
+
+ mutex_lock(&radio->lock);
+ video_unregister_device(&radio->vdev);
+ usb_set_intfdata(intf, NULL);
+ v4l2_device_disconnect(&radio->v4l2_dev);
+ mutex_unlock(&radio->lock);
+ v4l2_device_put(&radio->v4l2_dev);
+}
+
+/* vidioc_querycap - query device capabilities */
+static int vidioc_querycap(struct file *file, void *priv,
+ struct v4l2_capability *v)
+{
+ struct ma901radio_device *radio = video_drvdata(file);
+
+ strlcpy(v->driver, "radio-ma901", sizeof(v->driver));
+ strlcpy(v->card, "Masterkit MA901 USB FM Radio", sizeof(v->card));
+ usb_make_path(radio->usbdev, v->bus_info, sizeof(v->bus_info));
+ v->device_caps = V4L2_CAP_RADIO | V4L2_CAP_TUNER;
+ v->capabilities = v->device_caps | V4L2_CAP_DEVICE_CAPS;
+ return 0;
+}
+
+/* vidioc_g_tuner - get tuner attributes */
+static int vidioc_g_tuner(struct file *file, void *priv,
+ struct v4l2_tuner *v)
+{
+ struct ma901radio_device *radio = video_drvdata(file);
+
+ if (v->index > 0)
+ return -EINVAL;
+
+ v->signal = 0;
+
+ /* TODO: the same words like in _probe() goes here.
+ * When receiving of stats will be implemented then we can call
+ * ma901radio_get_stat().
+ * retval = ma901radio_get_stat(radio, &is_stereo, &v->signal);
+ */
+
+ strcpy(v->name, "FM");
+ v->type = V4L2_TUNER_RADIO;
+ v->rangelow = FREQ_MIN * FREQ_MUL;
+ v->rangehigh = FREQ_MAX * FREQ_MUL;
+ v->capability = V4L2_TUNER_CAP_LOW | V4L2_TUNER_CAP_STEREO;
+ /* v->rxsubchans = is_stereo ? V4L2_TUNER_SUB_STEREO : V4L2_TUNER_SUB_MONO; */
+ v->audmode = radio->stereo ?
+ V4L2_TUNER_MODE_STEREO : V4L2_TUNER_MODE_MONO;
+ return 0;
+}
+
+/* vidioc_s_tuner - set tuner attributes */
+static int vidioc_s_tuner(struct file *file, void *priv,
+ struct v4l2_tuner *v)
+{
+ struct ma901radio_device *radio = video_drvdata(file);
+
+ if (v->index > 0)
+ return -EINVAL;
+
+ /* mono/stereo selector */
+ switch (v->audmode) {
+ case V4L2_TUNER_MODE_MONO:
+ return ma901_set_stereo(radio, MA901_WANT_MONO);
+ default:
+ return ma901_set_stereo(radio, MA901_WANT_STEREO);
+ }
+}
+
+/* vidioc_s_frequency - set tuner radio frequency */
+static int vidioc_s_frequency(struct file *file, void *priv,
+ struct v4l2_frequency *f)
+{
+ struct ma901radio_device *radio = video_drvdata(file);
+
+ if (f->tuner != 0)
+ return -EINVAL;
+
+ return ma901radio_set_freq(radio, clamp_t(unsigned, f->frequency,
+ FREQ_MIN * FREQ_MUL, FREQ_MAX * FREQ_MUL));
+}
+
+/* vidioc_g_frequency - get tuner radio frequency */
+static int vidioc_g_frequency(struct file *file, void *priv,
+ struct v4l2_frequency *f)
+{
+ struct ma901radio_device *radio = video_drvdata(file);
+
+ if (f->tuner != 0)
+ return -EINVAL;
+ f->frequency = radio->curfreq;
+
+ return 0;
+}
+
+static int usb_ma901radio_s_ctrl(struct v4l2_ctrl *ctrl)
+{
+ struct ma901radio_device *radio =
+ container_of(ctrl->handler, struct ma901radio_device, hdl);
+
+ switch (ctrl->id) {
+ case V4L2_CID_AUDIO_VOLUME: /* set volume */
+ return ma901radio_set_volume(radio, (u16)ctrl->val);
+ }
+
+ return -EINVAL;
+}
+
+/* TODO: Should we really need to implement suspend and resume functions?
+ * Radio has it's own memory and will continue playing if power is present
+ * on usb port and on resume it will start to play again based on freq, volume
+ * values in device memory.
+ */
+static int usb_ma901radio_suspend(struct usb_interface *intf, pm_message_t message)
+{
+ return 0;
+}
+
+static int usb_ma901radio_resume(struct usb_interface *intf)
+{
+ return 0;
+}
+
+static const struct v4l2_ctrl_ops usb_ma901radio_ctrl_ops = {
+ .s_ctrl = usb_ma901radio_s_ctrl,
+};
+
+/* File system interface */
+static const struct v4l2_file_operations usb_ma901radio_fops = {
+ .owner = THIS_MODULE,
+ .open = v4l2_fh_open,
+ .release = v4l2_fh_release,
+ .poll = v4l2_ctrl_poll,
+ .unlocked_ioctl = video_ioctl2,
+};
+
+static const struct v4l2_ioctl_ops usb_ma901radio_ioctl_ops = {
+ .vidioc_querycap = vidioc_querycap,
+ .vidioc_g_tuner = vidioc_g_tuner,
+ .vidioc_s_tuner = vidioc_s_tuner,
+ .vidioc_g_frequency = vidioc_g_frequency,
+ .vidioc_s_frequency = vidioc_s_frequency,
+ .vidioc_log_status = v4l2_ctrl_log_status,
+ .vidioc_subscribe_event = v4l2_ctrl_subscribe_event,
+ .vidioc_unsubscribe_event = v4l2_event_unsubscribe,
+};
+
+static void usb_ma901radio_release(struct v4l2_device *v4l2_dev)
+{
+ struct ma901radio_device *radio = to_ma901radio_dev(v4l2_dev);
+
+ v4l2_ctrl_handler_free(&radio->hdl);
+ v4l2_device_unregister(&radio->v4l2_dev);
+ kfree(radio->buffer);
+ kfree(radio);
+}
+
+/* check if the device is present and register with v4l and usb if it is */
+static int usb_ma901radio_probe(struct usb_interface *intf,
+ const struct usb_device_id *id)
+{
+ struct ma901radio_device *radio;
+ int retval = 0;
+
+ radio = kzalloc(sizeof(struct ma901radio_device), GFP_KERNEL);
+ if (!radio) {
+ dev_err(&intf->dev, "kzalloc for ma901radio_device failed\n");
+ retval = -ENOMEM;
+ goto err;
+ }
+
+ radio->buffer = kmalloc(BUFFER_LENGTH, GFP_KERNEL);
+ if (!radio->buffer) {
+ dev_err(&intf->dev, "kmalloc for radio->buffer failed\n");
+ retval = -ENOMEM;
+ goto err_nobuf;
+ }
+
+ retval = v4l2_device_register(&intf->dev, &radio->v4l2_dev);
+ if (retval < 0) {
+ dev_err(&intf->dev, "couldn't register v4l2_device\n");
+ goto err_v4l2;
+ }
+
+ v4l2_ctrl_handler_init(&radio->hdl, 1);
+
+ /* TODO:It looks like this radio doesn't have mute/unmute control
+ * and windows program just emulate it using volume control.
+ * Let's plan to do the same in this driver.
+ *
+ * v4l2_ctrl_new_std(&radio->hdl, &usb_ma901radio_ctrl_ops,
+ * V4L2_CID_AUDIO_MUTE, 0, 1, 1, 1);
+ */
+
+ v4l2_ctrl_new_std(&radio->hdl, &usb_ma901radio_ctrl_ops,
+ V4L2_CID_AUDIO_VOLUME, MA901_VOLUME_MIN,
+ MA901_VOLUME_MAX, 1, MA901_VOLUME_MAX);
+
+ if (radio->hdl.error) {
+ retval = radio->hdl.error;
+ dev_err(&intf->dev, "couldn't register control\n");
+ goto err_ctrl;
+ }
+ mutex_init(&radio->lock);
+
+ radio->v4l2_dev.ctrl_handler = &radio->hdl;
+ radio->v4l2_dev.release = usb_ma901radio_release;
+ strlcpy(radio->vdev.name, radio->v4l2_dev.name,
+ sizeof(radio->vdev.name));
+ radio->vdev.v4l2_dev = &radio->v4l2_dev;
+ radio->vdev.fops = &usb_ma901radio_fops;
+ radio->vdev.ioctl_ops = &usb_ma901radio_ioctl_ops;
+ radio->vdev.release = video_device_release_empty;
+ radio->vdev.lock = &radio->lock;
+ set_bit(V4L2_FL_USE_FH_PRIO, &radio->vdev.flags);
+
+ radio->usbdev = interface_to_usbdev(intf);
+ radio->intf = intf;
+ usb_set_intfdata(intf, &radio->v4l2_dev);
+ radio->curfreq = 95.21 * FREQ_MUL;
+
+ video_set_drvdata(&radio->vdev, radio);
+
+ /* TODO: we can get some statistics (freq, volume) from device
+ * but it's not implemented yet. After insertion in usb-port radio
+ * setups frequency and starts playing without any initialization.
+ * So we don't call usb_ma901radio_init/get_stat() here.
+ * retval = usb_ma901radio_init(radio);
+ */
+
+ retval = video_register_device(&radio->vdev, VFL_TYPE_RADIO,
+ radio_nr);
+ if (retval < 0) {
+ dev_err(&intf->dev, "could not register video device\n");
+ goto err_vdev;
+ }
+
+ return 0;
+
+err_vdev:
+ v4l2_ctrl_handler_free(&radio->hdl);
+err_ctrl:
+ v4l2_device_unregister(&radio->v4l2_dev);
+err_v4l2:
+ kfree(radio->buffer);
+err_nobuf:
+ kfree(radio);
+err:
+ return retval;
+}
+
+/* USB Device ID List */
+static struct usb_device_id usb_ma901radio_device_table[] = {
+ { USB_DEVICE_AND_INTERFACE_INFO(USB_MA901_VENDOR, USB_MA901_PRODUCT,
+ USB_CLASS_HID, 0, 0) },
+ { } /* Terminating entry */
+};
+
+MODULE_DEVICE_TABLE(usb, usb_ma901radio_device_table);
+
+/* USB subsystem interface */
+static struct usb_driver usb_ma901radio_driver = {
+ .name = MA901_DRIVER_NAME,
+ .probe = usb_ma901radio_probe,
+ .disconnect = usb_ma901radio_disconnect,
+ .suspend = usb_ma901radio_suspend,
+ .resume = usb_ma901radio_resume,
+ .reset_resume = usb_ma901radio_resume,
+ .id_table = usb_ma901radio_device_table,
+};
+
+module_usb_driver(usb_ma901radio_driver);
diff --git a/drivers/media/radio/radio-miropcm20.c b/drivers/media/radio/radio-miropcm20.c
index 11f76ed4c6fb..3d0ff4404d12 100644
--- a/drivers/media/radio/radio-miropcm20.c
+++ b/drivers/media/radio/radio-miropcm20.c
@@ -17,49 +17,36 @@
#include <linux/videodev2.h>
#include <media/v4l2-device.h>
#include <media/v4l2-ioctl.h>
+#include <media/v4l2-ctrls.h>
+#include <media/v4l2-fh.h>
+#include <media/v4l2-event.h>
#include <sound/aci.h>
static int radio_nr = -1;
module_param(radio_nr, int, 0);
MODULE_PARM_DESC(radio_nr, "Set radio device number (/dev/radioX). Default: -1 (autodetect)");
-static bool mono;
-module_param(mono, bool, 0);
-MODULE_PARM_DESC(mono, "Force tuner into mono mode.");
-
struct pcm20 {
struct v4l2_device v4l2_dev;
struct video_device vdev;
+ struct v4l2_ctrl_handler ctrl_handler;
unsigned long freq;
- int muted;
+ u32 audmode;
struct snd_miro_aci *aci;
struct mutex lock;
};
static struct pcm20 pcm20_card = {
- .freq = 87*16000,
- .muted = 1,
+ .freq = 87 * 16000,
+ .audmode = V4L2_TUNER_MODE_STEREO,
};
-static int pcm20_mute(struct pcm20 *dev, unsigned char mute)
-{
- dev->muted = mute;
- return snd_aci_cmd(dev->aci, ACI_SET_TUNERMUTE, mute, -1);
-}
-
-static int pcm20_stereo(struct pcm20 *dev, unsigned char stereo)
-{
- return snd_aci_cmd(dev->aci, ACI_SET_TUNERMONO, !stereo, -1);
-}
-
static int pcm20_setfreq(struct pcm20 *dev, unsigned long freq)
{
unsigned char freql;
unsigned char freqh;
struct snd_miro_aci *aci = dev->aci;
- dev->freq = freq;
-
freq /= 160;
if (!(aci->aci_version == 0x07 || aci->aci_version >= 0xb0))
freq /= 10; /* I don't know exactly which version
@@ -67,46 +54,66 @@ static int pcm20_setfreq(struct pcm20 *dev, unsigned long freq)
freql = freq & 0xff;
freqh = freq >> 8;
- pcm20_stereo(dev, !mono);
return snd_aci_cmd(aci, ACI_WRITE_TUNE, freql, freqh);
}
static const struct v4l2_file_operations pcm20_fops = {
.owner = THIS_MODULE,
+ .open = v4l2_fh_open,
+ .poll = v4l2_ctrl_poll,
+ .release = v4l2_fh_release,
.unlocked_ioctl = video_ioctl2,
};
static int vidioc_querycap(struct file *file, void *priv,
struct v4l2_capability *v)
{
+ struct pcm20 *dev = video_drvdata(file);
+
strlcpy(v->driver, "Miro PCM20", sizeof(v->driver));
strlcpy(v->card, "Miro PCM20", sizeof(v->card));
- strlcpy(v->bus_info, "ISA", sizeof(v->bus_info));
- v->version = 0x1;
- v->capabilities = V4L2_CAP_TUNER | V4L2_CAP_RADIO;
+ snprintf(v->bus_info, sizeof(v->bus_info), "ISA:%s", dev->v4l2_dev.name);
+ v->device_caps = V4L2_CAP_TUNER | V4L2_CAP_RADIO;
+ v->capabilities = v->device_caps | V4L2_CAP_DEVICE_CAPS;
return 0;
}
static int vidioc_g_tuner(struct file *file, void *priv,
struct v4l2_tuner *v)
{
- if (v->index) /* Only 1 tuner */
+ struct pcm20 *dev = video_drvdata(file);
+ int res;
+
+ if (v->index)
return -EINVAL;
strlcpy(v->name, "FM", sizeof(v->name));
v->type = V4L2_TUNER_RADIO;
v->rangelow = 87*16000;
v->rangehigh = 108*16000;
- v->signal = 0xffff;
- v->rxsubchans = V4L2_TUNER_SUB_MONO | V4L2_TUNER_SUB_STEREO;
- v->capability = V4L2_TUNER_CAP_LOW;
- v->audmode = V4L2_TUNER_MODE_MONO;
+ res = snd_aci_cmd(dev->aci, ACI_READ_TUNERSTATION, -1, -1);
+ v->signal = (res & 0x80) ? 0 : 0xffff;
+ /* Note: stereo detection does not work if the audio is muted,
+ it will default to mono in that case. */
+ res = snd_aci_cmd(dev->aci, ACI_READ_TUNERSTEREO, -1, -1);
+ v->rxsubchans = (res & 0x40) ? V4L2_TUNER_SUB_MONO :
+ V4L2_TUNER_SUB_STEREO;
+ v->capability = V4L2_TUNER_CAP_LOW | V4L2_TUNER_CAP_STEREO;
+ v->audmode = dev->audmode;
return 0;
}
static int vidioc_s_tuner(struct file *file, void *priv,
struct v4l2_tuner *v)
{
- return v->index ? -EINVAL : 0;
+ struct pcm20 *dev = video_drvdata(file);
+
+ if (v->index)
+ return -EINVAL;
+ if (v->audmode > V4L2_TUNER_MODE_STEREO)
+ v->audmode = V4L2_TUNER_MODE_STEREO;
+ snd_aci_cmd(dev->aci, ACI_SET_TUNERMONO,
+ v->audmode == V4L2_TUNER_MODE_MONO, -1);
+ return 0;
}
static int vidioc_g_frequency(struct file *file, void *priv,
@@ -131,75 +138,21 @@ static int vidioc_s_frequency(struct file *file, void *priv,
if (f->tuner != 0 || f->type != V4L2_TUNER_RADIO)
return -EINVAL;
- dev->freq = f->frequency;
- pcm20_setfreq(dev, f->frequency);
- return 0;
-}
-
-static int vidioc_queryctrl(struct file *file, void *priv,
- struct v4l2_queryctrl *qc)
-{
- switch (qc->id) {
- case V4L2_CID_AUDIO_MUTE:
- return v4l2_ctrl_query_fill(qc, 0, 1, 1, 1);
- }
- return -EINVAL;
-}
-
-static int vidioc_g_ctrl(struct file *file, void *priv,
- struct v4l2_control *ctrl)
-{
- struct pcm20 *dev = video_drvdata(file);
-
- switch (ctrl->id) {
- case V4L2_CID_AUDIO_MUTE:
- ctrl->value = dev->muted;
- break;
- default:
- return -EINVAL;
- }
+ dev->freq = clamp(f->frequency, 87 * 16000U, 108 * 16000U);
+ pcm20_setfreq(dev, dev->freq);
return 0;
}
-static int vidioc_s_ctrl(struct file *file, void *priv,
- struct v4l2_control *ctrl)
+static int pcm20_s_ctrl(struct v4l2_ctrl *ctrl)
{
- struct pcm20 *dev = video_drvdata(file);
+ struct pcm20 *dev = container_of(ctrl->handler, struct pcm20, ctrl_handler);
switch (ctrl->id) {
case V4L2_CID_AUDIO_MUTE:
- pcm20_mute(dev, ctrl->value);
- break;
- default:
- return -EINVAL;
+ snd_aci_cmd(dev->aci, ACI_SET_TUNERMUTE, ctrl->val, -1);
+ return 0;
}
- return 0;
-}
-
-static int vidioc_g_input(struct file *filp, void *priv, unsigned int *i)
-{
- *i = 0;
- return 0;
-}
-
-static int vidioc_s_input(struct file *filp, void *priv, unsigned int i)
-{
- return i ? -EINVAL : 0;
-}
-
-static int vidioc_g_audio(struct file *file, void *priv,
- struct v4l2_audio *a)
-{
- a->index = 0;
- strlcpy(a->name, "Radio", sizeof(a->name));
- a->capability = V4L2_AUDCAP_STEREO;
- return 0;
-}
-
-static int vidioc_s_audio(struct file *file, void *priv,
- const struct v4l2_audio *a)
-{
- return a->index ? -EINVAL : 0;
+ return -EINVAL;
}
static const struct v4l2_ioctl_ops pcm20_ioctl_ops = {
@@ -208,19 +161,20 @@ static const struct v4l2_ioctl_ops pcm20_ioctl_ops = {
.vidioc_s_tuner = vidioc_s_tuner,
.vidioc_g_frequency = vidioc_g_frequency,
.vidioc_s_frequency = vidioc_s_frequency,
- .vidioc_queryctrl = vidioc_queryctrl,
- .vidioc_g_ctrl = vidioc_g_ctrl,
- .vidioc_s_ctrl = vidioc_s_ctrl,
- .vidioc_g_audio = vidioc_g_audio,
- .vidioc_s_audio = vidioc_s_audio,
- .vidioc_g_input = vidioc_g_input,
- .vidioc_s_input = vidioc_s_input,
+ .vidioc_log_status = v4l2_ctrl_log_status,
+ .vidioc_subscribe_event = v4l2_ctrl_subscribe_event,
+ .vidioc_unsubscribe_event = v4l2_event_unsubscribe,
+};
+
+static const struct v4l2_ctrl_ops pcm20_ctrl_ops = {
+ .s_ctrl = pcm20_s_ctrl,
};
static int __init pcm20_init(void)
{
struct pcm20 *dev = &pcm20_card;
struct v4l2_device *v4l2_dev = &dev->v4l2_dev;
+ struct v4l2_ctrl_handler *hdl;
int res;
dev->aci = snd_aci_get_aci();
@@ -229,7 +183,7 @@ static int __init pcm20_init(void)
"you must load the snd-miro driver first!\n");
return -ENODEV;
}
- strlcpy(v4l2_dev->name, "miropcm20", sizeof(v4l2_dev->name));
+ strlcpy(v4l2_dev->name, "radio-miropcm20", sizeof(v4l2_dev->name));
mutex_init(&dev->lock);
res = v4l2_device_register(NULL, v4l2_dev);
@@ -238,20 +192,35 @@ static int __init pcm20_init(void)
return -EINVAL;
}
+ hdl = &dev->ctrl_handler;
+ v4l2_ctrl_handler_init(hdl, 1);
+ v4l2_ctrl_new_std(hdl, &pcm20_ctrl_ops,
+ V4L2_CID_AUDIO_MUTE, 0, 1, 1, 1);
+ v4l2_dev->ctrl_handler = hdl;
+ if (hdl->error) {
+ res = hdl->error;
+ v4l2_err(v4l2_dev, "Could not register control\n");
+ goto err_hdl;
+ }
strlcpy(dev->vdev.name, v4l2_dev->name, sizeof(dev->vdev.name));
dev->vdev.v4l2_dev = v4l2_dev;
dev->vdev.fops = &pcm20_fops;
dev->vdev.ioctl_ops = &pcm20_ioctl_ops;
dev->vdev.release = video_device_release_empty;
dev->vdev.lock = &dev->lock;
+ set_bit(V4L2_FL_USE_FH_PRIO, &dev->vdev.flags);
video_set_drvdata(&dev->vdev, dev);
+ snd_aci_cmd(dev->aci, ACI_SET_TUNERMONO,
+ dev->audmode == V4L2_TUNER_MODE_MONO, -1);
+ pcm20_setfreq(dev, dev->freq);
if (video_register_device(&dev->vdev, VFL_TYPE_RADIO, radio_nr) < 0)
- goto fail;
+ goto err_hdl;
v4l2_info(v4l2_dev, "Mirosound PCM20 Radio tuner\n");
return 0;
-fail:
+err_hdl:
+ v4l2_ctrl_handler_free(hdl);
v4l2_device_unregister(v4l2_dev);
return -EINVAL;
}
@@ -265,6 +234,8 @@ static void __exit pcm20_cleanup(void)
struct pcm20 *dev = &pcm20_card;
video_unregister_device(&dev->vdev);
+ snd_aci_cmd(dev->aci, ACI_SET_TUNERMUTE, 1, -1);
+ v4l2_ctrl_handler_free(&dev->ctrl_handler);
v4l2_device_unregister(&dev->v4l2_dev);
}
diff --git a/drivers/media/radio/radio-wl1273.c b/drivers/media/radio/radio-wl1273.c
index cabbe3adf435..02151e0e6e63 100644
--- a/drivers/media/radio/radio-wl1273.c
+++ b/drivers/media/radio/radio-wl1273.c
@@ -2085,8 +2085,7 @@ static int wl1273_fm_radio_probe(struct platform_device *pdev)
}
/* V4L2 configuration */
- memcpy(&radio->videodev, &wl1273_viddev_template,
- sizeof(wl1273_viddev_template));
+ radio->videodev = wl1273_viddev_template;
radio->videodev.v4l2_dev = &radio->v4l2dev;
diff --git a/drivers/media/radio/si470x/radio-si470x.h b/drivers/media/radio/si470x/radio-si470x.h
index 2f089b4252df..467e95575488 100644
--- a/drivers/media/radio/si470x/radio-si470x.h
+++ b/drivers/media/radio/si470x/radio-si470x.h
@@ -163,7 +163,7 @@ struct si470x_device {
struct completion completion;
bool status_rssi_auto_update; /* Does RSSI get updated automatic? */
-#if defined(CONFIG_USB_SI470X) || defined(CONFIG_USB_SI470X_MODULE)
+#if IS_ENABLED(CONFIG_USB_SI470X)
/* reference to USB and video device */
struct usb_device *usbdev;
struct usb_interface *intf;
@@ -179,7 +179,7 @@ struct si470x_device {
unsigned char hardware_version;
#endif
-#if defined(CONFIG_I2C_SI470X) || defined(CONFIG_I2C_SI470X_MODULE)
+#if IS_ENABLED(CONFIG_I2C_SI470X)
struct i2c_client *client;
#endif
};
diff --git a/drivers/media/radio/wl128x/Kconfig b/drivers/media/radio/wl128x/Kconfig
index ea1e6545df36..f359be7e9dd9 100644
--- a/drivers/media/radio/wl128x/Kconfig
+++ b/drivers/media/radio/wl128x/Kconfig
@@ -4,7 +4,7 @@
menu "Texas Instruments WL128x FM driver (ST based)"
config RADIO_WL128X
tristate "Texas Instruments WL128x FM Radio"
- depends on VIDEO_V4L2 && RFKILL && GPIOLIB
+ depends on VIDEO_V4L2 && RFKILL && GPIOLIB && TTY
select TI_ST if NET
help
Choose Y here if you have this FM radio chip.
diff --git a/drivers/media/radio/wl128x/fmdrv_common.c b/drivers/media/radio/wl128x/fmdrv_common.c
index 602ef7ac8c24..a002234ed5de 100644
--- a/drivers/media/radio/wl128x/fmdrv_common.c
+++ b/drivers/media/radio/wl128x/fmdrv_common.c
@@ -1563,8 +1563,7 @@ int fmc_prepare(struct fmdev *fmdev)
fmdev->irq_info.mask = FM_MAL_EVENT;
/* Region info */
- memcpy(&fmdev->rx.region, &region_configs[default_radio_region],
- sizeof(struct region_info));
+ fmdev->rx.region = region_configs[default_radio_region];
fmdev->rx.mute_mode = FM_MUTE_OFF;
fmdev->rx.rf_depend_mute = FM_RX_RF_DEPENDENT_MUTE_OFF;
diff --git a/drivers/media/rc/Kconfig b/drivers/media/rc/Kconfig
index 79ba242fe263..19f3563c61da 100644
--- a/drivers/media/rc/Kconfig
+++ b/drivers/media/rc/Kconfig
@@ -291,7 +291,7 @@ config IR_TTUSBIR
config IR_RX51
tristate "Nokia N900 IR transmitter diode"
- depends on OMAP_DM_TIMER && LIRC
+ depends on OMAP_DM_TIMER && LIRC && !ARCH_MULTIPLATFORM
---help---
Say Y or M here if you want to enable support for the IR
transmitter diode built in the Nokia N900 (RX51) device.
diff --git a/drivers/media/rc/ati_remote.c b/drivers/media/rc/ati_remote.c
index 2d6fb26a0170..4d6a63fe6c5e 100644
--- a/drivers/media/rc/ati_remote.c
+++ b/drivers/media/rc/ati_remote.c
@@ -872,11 +872,11 @@ static int ati_remote_probe(struct usb_interface *interface,
ati_remote = kzalloc(sizeof (struct ati_remote), GFP_KERNEL);
rc_dev = rc_allocate_device();
if (!ati_remote || !rc_dev)
- goto fail1;
+ goto exit_free_dev_rdev;
/* Allocate URB buffers, URBs */
if (ati_remote_alloc_buffers(udev, ati_remote))
- goto fail2;
+ goto exit_free_buffers;
ati_remote->endpoint_in = endpoint_in;
ati_remote->endpoint_out = endpoint_out;
@@ -924,12 +924,12 @@ static int ati_remote_probe(struct usb_interface *interface,
/* Device Hardware Initialization - fills in ati_remote->idev from udev. */
err = ati_remote_initialize(ati_remote);
if (err)
- goto fail3;
+ goto exit_kill_urbs;
/* Set up and register rc device */
err = rc_register_device(ati_remote->rdev);
if (err)
- goto fail3;
+ goto exit_kill_urbs;
/* use our delay for rc_dev */
ati_remote->rdev->input_dev->rep[REP_DELAY] = repeat_delay;
@@ -939,7 +939,7 @@ static int ati_remote_probe(struct usb_interface *interface,
input_dev = input_allocate_device();
if (!input_dev) {
err = -ENOMEM;
- goto fail4;
+ goto exit_unregister_device;
}
ati_remote->idev = input_dev;
@@ -947,19 +947,24 @@ static int ati_remote_probe(struct usb_interface *interface,
err = input_register_device(input_dev);
if (err)
- goto fail5;
+ goto exit_free_input_device;
}
usb_set_intfdata(interface, ati_remote);
return 0;
- fail5: input_free_device(input_dev);
- fail4: rc_unregister_device(rc_dev);
+ exit_free_input_device:
+ input_free_device(input_dev);
+ exit_unregister_device:
+ rc_unregister_device(rc_dev);
rc_dev = NULL;
- fail3: usb_kill_urb(ati_remote->irq_urb);
+ exit_kill_urbs:
+ usb_kill_urb(ati_remote->irq_urb);
usb_kill_urb(ati_remote->out_urb);
- fail2: ati_remote_free_buffers(ati_remote);
- fail1: rc_free_device(rc_dev);
+ exit_free_buffers:
+ ati_remote_free_buffers(ati_remote);
+ exit_free_dev_rdev:
+ rc_free_device(rc_dev);
kfree(ati_remote);
return err;
}
diff --git a/drivers/media/rc/ene_ir.c b/drivers/media/rc/ene_ir.c
index cef04786b52f..ee6c984cade2 100644
--- a/drivers/media/rc/ene_ir.c
+++ b/drivers/media/rc/ene_ir.c
@@ -1003,7 +1003,7 @@ static int ene_probe(struct pnp_dev *pnp_dev, const struct pnp_device_id *id)
dev = kzalloc(sizeof(struct ene_device), GFP_KERNEL);
rdev = rc_allocate_device();
if (!dev || !rdev)
- goto failure;
+ goto exit_free_dev_rdev;
/* validate resources */
error = -ENODEV;
@@ -1014,10 +1014,10 @@ static int ene_probe(struct pnp_dev *pnp_dev, const struct pnp_device_id *id)
if (!pnp_port_valid(pnp_dev, 0) ||
pnp_port_len(pnp_dev, 0) < ENE_IO_SIZE)
- goto failure;
+ goto exit_free_dev_rdev;
if (!pnp_irq_valid(pnp_dev, 0))
- goto failure;
+ goto exit_free_dev_rdev;
spin_lock_init(&dev->hw_lock);
@@ -1033,7 +1033,7 @@ static int ene_probe(struct pnp_dev *pnp_dev, const struct pnp_device_id *id)
/* detect hardware version and features */
error = ene_hw_detect(dev);
if (error)
- goto failure;
+ goto exit_free_dev_rdev;
if (!dev->hw_learning_and_tx_capable && txsim) {
dev->hw_learning_and_tx_capable = true;
@@ -1075,30 +1075,30 @@ static int ene_probe(struct pnp_dev *pnp_dev, const struct pnp_device_id *id)
device_set_wakeup_capable(&pnp_dev->dev, true);
device_set_wakeup_enable(&pnp_dev->dev, true);
+ error = rc_register_device(rdev);
+ if (error < 0)
+ goto exit_free_dev_rdev;
+
/* claim the resources */
error = -EBUSY;
if (!request_region(dev->hw_io, ENE_IO_SIZE, ENE_DRIVER_NAME)) {
- goto failure;
+ goto exit_unregister_device;
}
dev->irq = pnp_irq(pnp_dev, 0);
if (request_irq(dev->irq, ene_isr,
IRQF_SHARED, ENE_DRIVER_NAME, (void *)dev)) {
- goto failure2;
+ goto exit_release_hw_io;
}
- error = rc_register_device(rdev);
- if (error < 0)
- goto failure3;
-
pr_notice("driver has been successfully loaded\n");
return 0;
-failure3:
- free_irq(dev->irq, dev);
-failure2:
+exit_release_hw_io:
release_region(dev->hw_io, ENE_IO_SIZE);
-failure:
+exit_unregister_device:
+ rc_unregister_device(rdev);
+exit_free_dev_rdev:
rc_free_device(rdev);
kfree(dev);
return error;
diff --git a/drivers/media/rc/fintek-cir.c b/drivers/media/rc/fintek-cir.c
index 1df410e13688..d6fa441655d2 100644
--- a/drivers/media/rc/fintek-cir.c
+++ b/drivers/media/rc/fintek-cir.c
@@ -500,18 +500,18 @@ static int fintek_probe(struct pnp_dev *pdev, const struct pnp_device_id *dev_id
/* input device for IR remote (and tx) */
rdev = rc_allocate_device();
if (!rdev)
- goto failure;
+ goto exit_free_dev_rdev;
ret = -ENODEV;
/* validate pnp resources */
if (!pnp_port_valid(pdev, 0)) {
dev_err(&pdev->dev, "IR PNP Port not valid!\n");
- goto failure;
+ goto exit_free_dev_rdev;
}
if (!pnp_irq_valid(pdev, 0)) {
dev_err(&pdev->dev, "IR PNP IRQ not valid!\n");
- goto failure;
+ goto exit_free_dev_rdev;
}
fintek->cir_addr = pnp_port_start(pdev, 0);
@@ -528,7 +528,7 @@ static int fintek_probe(struct pnp_dev *pdev, const struct pnp_device_id *dev_id
ret = fintek_hw_detect(fintek);
if (ret)
- goto failure;
+ goto exit_free_dev_rdev;
/* Initialize CIR & CIR Wake Logical Devices */
fintek_config_mode_enable(fintek);
@@ -557,33 +557,35 @@ static int fintek_probe(struct pnp_dev *pdev, const struct pnp_device_id *dev_id
/* rx resolution is hardwired to 50us atm, 1, 25, 100 also possible */
rdev->rx_resolution = US_TO_NS(CIR_SAMPLE_PERIOD);
+ fintek->rdev = rdev;
+
ret = -EBUSY;
/* now claim resources */
if (!request_region(fintek->cir_addr,
fintek->cir_port_len, FINTEK_DRIVER_NAME))
- goto failure;
+ goto exit_free_dev_rdev;
if (request_irq(fintek->cir_irq, fintek_cir_isr, IRQF_SHARED,
FINTEK_DRIVER_NAME, (void *)fintek))
- goto failure2;
+ goto exit_free_cir_addr;
ret = rc_register_device(rdev);
if (ret)
- goto failure3;
+ goto exit_free_irq;
device_init_wakeup(&pdev->dev, true);
- fintek->rdev = rdev;
+
fit_pr(KERN_NOTICE, "driver has been successfully loaded\n");
if (debug)
cir_dump_regs(fintek);
return 0;
-failure3:
+exit_free_irq:
free_irq(fintek->cir_irq, fintek);
-failure2:
+exit_free_cir_addr:
release_region(fintek->cir_addr, fintek->cir_port_len);
-failure:
+exit_free_dev_rdev:
rc_free_device(rdev);
kfree(fintek);
diff --git a/drivers/media/rc/gpio-ir-recv.c b/drivers/media/rc/gpio-ir-recv.c
index 4f71a7d1f019..8b82ae9bd686 100644
--- a/drivers/media/rc/gpio-ir-recv.c
+++ b/drivers/media/rc/gpio-ir-recv.c
@@ -16,6 +16,7 @@
#include <linux/interrupt.h>
#include <linux/gpio.h>
#include <linux/slab.h>
+#include <linux/of_gpio.h>
#include <linux/platform_device.h>
#include <linux/irq.h>
#include <media/rc-core.h>
@@ -30,6 +31,45 @@ struct gpio_rc_dev {
bool active_low;
};
+#ifdef CONFIG_OF
+/*
+ * Translate OpenFirmware node properties into platform_data
+ */
+static int gpio_ir_recv_get_devtree_pdata(struct device *dev,
+ struct gpio_ir_recv_platform_data *pdata)
+{
+ struct device_node *np = dev->of_node;
+ enum of_gpio_flags flags;
+ int gpio;
+
+ gpio = of_get_gpio_flags(np, 0, &flags);
+ if (gpio < 0) {
+ if (gpio != -EPROBE_DEFER)
+ dev_err(dev, "Failed to get gpio flags (%d)\n", gpio);
+ return gpio;
+ }
+
+ pdata->gpio_nr = gpio;
+ pdata->active_low = (flags & OF_GPIO_ACTIVE_LOW);
+ /* probe() takes care of map_name == NULL or allowed_protos == 0 */
+ pdata->map_name = of_get_property(np, "linux,rc-map-name", NULL);
+ pdata->allowed_protos = 0;
+
+ return 0;
+}
+
+static struct of_device_id gpio_ir_recv_of_match[] = {
+ { .compatible = "gpio-ir-receiver", },
+ { },
+};
+MODULE_DEVICE_TABLE(of, gpio_ir_recv_of_match);
+
+#else /* !CONFIG_OF */
+
+#define gpio_ir_recv_get_devtree_pdata(dev, pdata) (-ENOSYS)
+
+#endif
+
static irqreturn_t gpio_ir_recv_irq(int irq, void *dev_id)
{
struct gpio_rc_dev *gpio_dev = dev_id;
@@ -66,6 +106,17 @@ static int gpio_ir_recv_probe(struct platform_device *pdev)
pdev->dev.platform_data;
int rc;
+ if (pdev->dev.of_node) {
+ struct gpio_ir_recv_platform_data *dtpdata =
+ devm_kzalloc(&pdev->dev, sizeof(*dtpdata), GFP_KERNEL);
+ if (!dtpdata)
+ return -ENOMEM;
+ rc = gpio_ir_recv_get_devtree_pdata(&pdev->dev, dtpdata);
+ if (rc)
+ return rc;
+ pdata = dtpdata;
+ }
+
if (!pdata)
return -EINVAL;
@@ -129,12 +180,12 @@ static int gpio_ir_recv_probe(struct platform_device *pdev)
err_request_irq:
platform_set_drvdata(pdev, NULL);
rc_unregister_device(rcdev);
+ rcdev = NULL;
err_register_rc_device:
err_gpio_direction_input:
gpio_free(pdata->gpio_nr);
err_gpio_request:
rc_free_device(rcdev);
- rcdev = NULL;
err_allocate_device:
kfree(gpio_dev);
return rc;
@@ -148,7 +199,6 @@ static int gpio_ir_recv_remove(struct platform_device *pdev)
platform_set_drvdata(pdev, NULL);
rc_unregister_device(gpio_dev->rcdev);
gpio_free(gpio_dev->gpio_nr);
- rc_free_device(gpio_dev->rcdev);
kfree(gpio_dev);
return 0;
}
@@ -192,6 +242,7 @@ static struct platform_driver gpio_ir_recv_driver = {
.driver = {
.name = GPIO_IR_DRIVER_NAME,
.owner = THIS_MODULE,
+ .of_match_table = of_match_ptr(gpio_ir_recv_of_match),
#ifdef CONFIG_PM
.pm = &gpio_ir_recv_pm_ops,
#endif
diff --git a/drivers/media/rc/iguanair.c b/drivers/media/rc/iguanair.c
index b99b096d8a8f..a4ab2e6b3f82 100644
--- a/drivers/media/rc/iguanair.c
+++ b/drivers/media/rc/iguanair.c
@@ -58,6 +58,7 @@ struct iguanair {
char phys[64];
};
+#define CMD_NOP 0x00
#define CMD_GET_VERSION 0x01
#define CMD_GET_BUFSIZE 0x11
#define CMD_GET_FEATURES 0x10
@@ -196,6 +197,10 @@ static void iguanair_irq_out(struct urb *urb)
if (urb->status)
dev_dbg(ir->dev, "Error: out urb status = %d\n", urb->status);
+
+ /* if we sent an nop packet, do not expect a response */
+ if (urb->status == 0 && ir->packet->header.cmd == CMD_NOP)
+ complete(&ir->completion);
}
static int iguanair_send(struct iguanair *ir, unsigned size)
@@ -219,10 +224,17 @@ static int iguanair_get_features(struct iguanair *ir)
{
int rc;
+ /*
+ * On cold boot, the iguanair initializes on the first packet
+ * received but does not process that packet. Send an empty
+ * packet.
+ */
ir->packet->header.start = 0;
ir->packet->header.direction = DIR_OUT;
- ir->packet->header.cmd = CMD_GET_VERSION;
+ ir->packet->header.cmd = CMD_NOP;
+ iguanair_send(ir, sizeof(ir->packet->header));
+ ir->packet->header.cmd = CMD_GET_VERSION;
rc = iguanair_send(ir, sizeof(ir->packet->header));
if (rc) {
dev_info(ir->dev, "failed to get version\n");
@@ -255,19 +267,14 @@ static int iguanair_get_features(struct iguanair *ir)
ir->packet->header.cmd = CMD_GET_FEATURES;
rc = iguanair_send(ir, sizeof(ir->packet->header));
- if (rc) {
+ if (rc)
dev_info(ir->dev, "failed to get features\n");
- goto out;
- }
-
out:
return rc;
}
static int iguanair_receiver(struct iguanair *ir, bool enable)
{
- int rc;
-
ir->packet->header.start = 0;
ir->packet->header.direction = DIR_OUT;
ir->packet->header.cmd = enable ? CMD_RECEIVER_ON : CMD_RECEIVER_OFF;
@@ -275,9 +282,7 @@ static int iguanair_receiver(struct iguanair *ir, bool enable)
if (enable)
ir_raw_event_reset(ir->rc);
- rc = iguanair_send(ir, sizeof(ir->packet->header));
-
- return rc;
+ return iguanair_send(ir, sizeof(ir->packet->header));
}
/*
@@ -512,6 +517,7 @@ static int iguanair_probe(struct usb_interface *intf,
rc->rx_resolution = RX_RESOLUTION;
iguanair_set_tx_carrier(rc, 38000);
+ iguanair_set_tx_mask(rc, 0);
ret = rc_register_device(rc);
if (ret < 0) {
diff --git a/drivers/media/rc/imon.c b/drivers/media/rc/imon.c
index 78d109b978dd..dec203bb06f6 100644
--- a/drivers/media/rc/imon.c
+++ b/drivers/media/rc/imon.c
@@ -1221,7 +1221,7 @@ static u32 imon_panel_key_lookup(u64 code)
static bool imon_mouse_event(struct imon_context *ictx,
unsigned char *buf, int len)
{
- char rel_x = 0x00, rel_y = 0x00;
+ signed char rel_x = 0x00, rel_y = 0x00;
u8 right_shift = 1;
bool mouse_input = true;
int dir = 0;
@@ -1297,7 +1297,7 @@ static void imon_touch_event(struct imon_context *ictx, unsigned char *buf)
static void imon_pad_to_keys(struct imon_context *ictx, unsigned char *buf)
{
int dir = 0;
- char rel_x = 0x00, rel_y = 0x00;
+ signed char rel_x = 0x00, rel_y = 0x00;
u16 timeout, threshold;
u32 scancode = KEY_RESERVED;
unsigned long flags;
diff --git a/drivers/media/rc/ir-raw.c b/drivers/media/rc/ir-raw.c
index 97dc8d13b06b..17c94be9f24c 100644
--- a/drivers/media/rc/ir-raw.c
+++ b/drivers/media/rc/ir-raw.c
@@ -31,11 +31,6 @@ static DEFINE_MUTEX(ir_raw_handler_lock);
static LIST_HEAD(ir_raw_handler_list);
static u64 available_protocols;
-#ifdef MODULE
-/* Used to load the decoders */
-static struct work_struct wq_load;
-#endif
-
static int ir_raw_event_thread(void *data)
{
struct ir_raw_event ev;
@@ -347,8 +342,7 @@ void ir_raw_handler_unregister(struct ir_raw_handler *ir_raw_handler)
}
EXPORT_SYMBOL(ir_raw_handler_unregister);
-#ifdef MODULE
-static void init_decoders(struct work_struct *work)
+void ir_raw_init(void)
{
/* Load the decoder modules */
@@ -365,12 +359,3 @@ static void init_decoders(struct work_struct *work)
it is needed to change the CONFIG_MODULE test at rc-core.h
*/
}
-#endif
-
-void ir_raw_init(void)
-{
-#ifdef MODULE
- INIT_WORK(&wq_load, init_decoders);
- schedule_work(&wq_load);
-#endif
-}
diff --git a/drivers/media/rc/ite-cir.c b/drivers/media/rc/ite-cir.c
index 1b8669b6d042..dd8237324c09 100644
--- a/drivers/media/rc/ite-cir.c
+++ b/drivers/media/rc/ite-cir.c
@@ -1472,7 +1472,7 @@ static int ite_probe(struct pnp_dev *pdev, const struct pnp_device_id
/* input device for IR remote (and tx) */
rdev = rc_allocate_device();
if (!rdev)
- goto failure;
+ goto exit_free_dev_rdev;
itdev->rdev = rdev;
ret = -ENODEV;
@@ -1498,12 +1498,12 @@ static int ite_probe(struct pnp_dev *pdev, const struct pnp_device_id
if (!pnp_port_valid(pdev, io_rsrc_no) ||
pnp_port_len(pdev, io_rsrc_no) != dev_desc->io_region_size) {
dev_err(&pdev->dev, "IR PNP Port not valid!\n");
- goto failure;
+ goto exit_free_dev_rdev;
}
if (!pnp_irq_valid(pdev, 0)) {
dev_err(&pdev->dev, "PNP IRQ not valid!\n");
- goto failure;
+ goto exit_free_dev_rdev;
}
/* store resource values */
@@ -1591,29 +1591,29 @@ static int ite_probe(struct pnp_dev *pdev, const struct pnp_device_id
rdev->driver_name = ITE_DRIVER_NAME;
rdev->map_name = RC_MAP_RC6_MCE;
+ ret = rc_register_device(rdev);
+ if (ret)
+ goto exit_free_dev_rdev;
+
ret = -EBUSY;
/* now claim resources */
if (!request_region(itdev->cir_addr,
dev_desc->io_region_size, ITE_DRIVER_NAME))
- goto failure;
+ goto exit_unregister_device;
if (request_irq(itdev->cir_irq, ite_cir_isr, IRQF_SHARED,
ITE_DRIVER_NAME, (void *)itdev))
- goto failure2;
-
- ret = rc_register_device(rdev);
- if (ret)
- goto failure3;
+ goto exit_release_cir_addr;
ite_pr(KERN_NOTICE, "driver has been successfully loaded\n");
return 0;
-failure3:
- free_irq(itdev->cir_irq, itdev);
-failure2:
+exit_release_cir_addr:
release_region(itdev->cir_addr, itdev->params.io_region_size);
-failure:
+exit_unregister_device:
+ rc_unregister_device(rdev);
+exit_free_dev_rdev:
rc_free_device(rdev);
kfree(itdev);
diff --git a/drivers/media/rc/keymaps/Makefile b/drivers/media/rc/keymaps/Makefile
index ab84d66c67c1..778661971aed 100644
--- a/drivers/media/rc/keymaps/Makefile
+++ b/drivers/media/rc/keymaps/Makefile
@@ -88,6 +88,7 @@ obj-$(CONFIG_RC_MAP) += rc-adstech-dvb-t-pci.o \
rc-tevii-nec.o \
rc-tivo.o \
rc-total-media-in-hand.o \
+ rc-total-media-in-hand-02.o \
rc-trekstor.o \
rc-tt-1500.o \
rc-twinhan1027.o \
diff --git a/drivers/media/rc/keymaps/rc-total-media-in-hand-02.c b/drivers/media/rc/keymaps/rc-total-media-in-hand-02.c
new file mode 100644
index 000000000000..47270f72ebf0
--- /dev/null
+++ b/drivers/media/rc/keymaps/rc-total-media-in-hand-02.c
@@ -0,0 +1,86 @@
+/*
+ * Total Media In Hand_02 remote controller keytable for Mygica X8507
+ *
+ * Copyright (C) 2012 Alfredo J. Delaiti <alfredodelaiti@netscape.net>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+ */
+
+#include <media/rc-map.h>
+#include <linux/module.h>
+
+
+static struct rc_map_table total_media_in_hand_02[] = {
+ { 0x0000, KEY_0 },
+ { 0x0001, KEY_1 },
+ { 0x0002, KEY_2 },
+ { 0x0003, KEY_3 },
+ { 0x0004, KEY_4 },
+ { 0x0005, KEY_5 },
+ { 0x0006, KEY_6 },
+ { 0x0007, KEY_7 },
+ { 0x0008, KEY_8 },
+ { 0x0009, KEY_9 },
+ { 0x000a, KEY_MUTE },
+ { 0x000b, KEY_STOP }, /* Stop */
+ { 0x000c, KEY_POWER2 }, /* Turn on/off application */
+ { 0x000d, KEY_OK }, /* OK */
+ { 0x000e, KEY_CAMERA }, /* Snapshot */
+ { 0x000f, KEY_ZOOM }, /* Full Screen/Restore */
+ { 0x0010, KEY_RIGHT }, /* Right arrow */
+ { 0x0011, KEY_LEFT }, /* Left arrow */
+ { 0x0012, KEY_CHANNELUP },
+ { 0x0013, KEY_CHANNELDOWN },
+ { 0x0014, KEY_SHUFFLE },
+ { 0x0016, KEY_PAUSE },
+ { 0x0017, KEY_PLAY }, /* Play */
+ { 0x001e, KEY_TIME }, /* Time Shift */
+ { 0x001f, KEY_RECORD },
+ { 0x0020, KEY_UP },
+ { 0x0021, KEY_DOWN },
+ { 0x0025, KEY_POWER }, /* Turn off computer */
+ { 0x0026, KEY_REWIND }, /* FR << */
+ { 0x0027, KEY_FASTFORWARD }, /* FF >> */
+ { 0x0029, KEY_ESC },
+ { 0x002b, KEY_VOLUMEUP },
+ { 0x002c, KEY_VOLUMEDOWN },
+ { 0x002d, KEY_CHANNEL }, /* CH Surfing */
+ { 0x0038, KEY_VIDEO }, /* TV/AV/S-Video/YPbPr */
+};
+
+static struct rc_map_list total_media_in_hand_02_map = {
+ .map = {
+ .scan = total_media_in_hand_02,
+ .size = ARRAY_SIZE(total_media_in_hand_02),
+ .rc_type = RC_TYPE_RC5,
+ .name = RC_MAP_TOTAL_MEDIA_IN_HAND_02,
+ }
+};
+
+static int __init init_rc_map_total_media_in_hand_02(void)
+{
+ return rc_map_register(&total_media_in_hand_02_map);
+}
+
+static void __exit exit_rc_map_total_media_in_hand_02(void)
+{
+ rc_map_unregister(&total_media_in_hand_02_map);
+}
+
+module_init(init_rc_map_total_media_in_hand_02)
+module_exit(exit_rc_map_total_media_in_hand_02)
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR(" Alfredo J. Delaiti <alfredodelaiti@netscape.net>");
diff --git a/drivers/media/rc/lirc_dev.c b/drivers/media/rc/lirc_dev.c
index ca12d3289bfe..5247d94fea29 100644
--- a/drivers/media/rc/lirc_dev.c
+++ b/drivers/media/rc/lirc_dev.c
@@ -531,7 +531,7 @@ EXPORT_SYMBOL(lirc_dev_fop_close);
unsigned int lirc_dev_fop_poll(struct file *file, poll_table *wait)
{
- struct irctl *ir = irctls[iminor(file->f_dentry->d_inode)];
+ struct irctl *ir = irctls[iminor(file_inode(file))];
unsigned int ret;
if (!ir) {
@@ -565,7 +565,7 @@ long lirc_dev_fop_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
{
__u32 mode;
int result = 0;
- struct irctl *ir = irctls[iminor(file->f_dentry->d_inode)];
+ struct irctl *ir = irctls[iminor(file_inode(file))];
if (!ir) {
printk(KERN_ERR "lirc_dev: %s: no irctl found!\n", __func__);
@@ -650,7 +650,7 @@ ssize_t lirc_dev_fop_read(struct file *file,
size_t length,
loff_t *ppos)
{
- struct irctl *ir = irctls[iminor(file->f_dentry->d_inode)];
+ struct irctl *ir = irctls[iminor(file_inode(file))];
unsigned char *buf;
int ret = 0, written = 0;
DECLARE_WAITQUEUE(wait, current);
@@ -752,16 +752,7 @@ EXPORT_SYMBOL(lirc_dev_fop_read);
void *lirc_get_pdata(struct file *file)
{
- void *data = NULL;
-
- if (file && file->f_dentry && file->f_dentry->d_inode &&
- file->f_dentry->d_inode->i_rdev) {
- struct irctl *ir;
- ir = irctls[iminor(file->f_dentry->d_inode)];
- data = ir->d.data;
- }
-
- return data;
+ return irctls[iminor(file_inode(file))]->d.data;
}
EXPORT_SYMBOL(lirc_get_pdata);
@@ -769,7 +760,7 @@ EXPORT_SYMBOL(lirc_get_pdata);
ssize_t lirc_dev_fop_write(struct file *file, const char __user *buffer,
size_t length, loff_t *ppos)
{
- struct irctl *ir = irctls[iminor(file->f_dentry->d_inode)];
+ struct irctl *ir = irctls[iminor(file_inode(file))];
if (!ir) {
printk(KERN_ERR "%s: called with invalid irctl\n", __func__);
diff --git a/drivers/media/rc/mceusb.c b/drivers/media/rc/mceusb.c
index 9afb9331217d..5b5b6e6f79e8 100644
--- a/drivers/media/rc/mceusb.c
+++ b/drivers/media/rc/mceusb.c
@@ -62,7 +62,6 @@
#define MCE_PACKET_SIZE 4 /* Normal length of packet (without header) */
#define MCE_IRDATA_HEADER 0x84 /* Actual header format is 0x80 + num_bytes */
#define MCE_IRDATA_TRAILER 0x80 /* End of IR data */
-#define MCE_TX_HEADER_LENGTH 3 /* # of bytes in the initializing tx header */
#define MCE_MAX_CHANNELS 2 /* Two transmitters, hardware dependent? */
#define MCE_DEFAULT_TX_MASK 0x03 /* Vals: TX1=0x01, TX2=0x02, ALL=0x03 */
#define MCE_PULSE_BIT 0x80 /* Pulse bit, MSB set == PULSE else SPACE */
@@ -291,7 +290,8 @@ static struct usb_device_id mceusb_dev_table[] = {
/* Philips/Spinel plus IR transceiver for ASUS */
{ USB_DEVICE(VENDOR_PHILIPS, 0x2088) },
/* Philips IR transceiver (Dell branded) */
- { USB_DEVICE(VENDOR_PHILIPS, 0x2093) },
+ { USB_DEVICE(VENDOR_PHILIPS, 0x2093),
+ .driver_info = MCE_GEN2_TX_INV },
/* Realtek MCE IR Receiver and card reader */
{ USB_DEVICE(VENDOR_REALTEK, 0x0161),
.driver_info = MULTIFUNCTION },
@@ -365,7 +365,8 @@ static struct usb_device_id mceusb_dev_table[] = {
/* Formosa Industrial Computing */
{ USB_DEVICE(VENDOR_FORMOSA, 0xe042) },
/* Fintek eHome Infrared Transceiver (HP branded) */
- { USB_DEVICE(VENDOR_FINTEK, 0x5168) },
+ { USB_DEVICE(VENDOR_FINTEK, 0x5168),
+ .driver_info = MCE_GEN2_TX_INV },
/* Fintek eHome Infrared Transceiver */
{ USB_DEVICE(VENDOR_FINTEK, 0x0602) },
/* Fintek eHome Infrared Transceiver (in the AOpen MP45) */
@@ -788,19 +789,19 @@ static void mce_flush_rx_buffer(struct mceusb_dev *ir, int size)
static int mceusb_tx_ir(struct rc_dev *dev, unsigned *txbuf, unsigned count)
{
struct mceusb_dev *ir = dev->priv;
- int i, ret = 0;
+ int i, length, ret = 0;
int cmdcount = 0;
- unsigned char *cmdbuf; /* MCE command buffer */
-
- cmdbuf = kzalloc(sizeof(unsigned) * MCE_CMDBUF_SIZE, GFP_KERNEL);
- if (!cmdbuf)
- return -ENOMEM;
+ unsigned char cmdbuf[MCE_CMDBUF_SIZE];
/* MCE tx init header */
cmdbuf[cmdcount++] = MCE_CMD_PORT_IR;
cmdbuf[cmdcount++] = MCE_CMD_SETIRTXPORTS;
cmdbuf[cmdcount++] = ir->tx_mask;
+ /* Send the set TX ports command */
+ mce_async_out(ir, cmdbuf, cmdcount);
+ cmdcount = 0;
+
/* Generate mce packet data */
for (i = 0; (i < count) && (cmdcount < MCE_CMDBUF_SIZE); i++) {
txbuf[i] = txbuf[i] / MCE_TIME_UNIT;
@@ -809,8 +810,7 @@ static int mceusb_tx_ir(struct rc_dev *dev, unsigned *txbuf, unsigned count)
/* Insert mce packet header every 4th entry */
if ((cmdcount < MCE_CMDBUF_SIZE) &&
- (cmdcount - MCE_TX_HEADER_LENGTH) %
- MCE_CODE_LENGTH == 0)
+ (cmdcount % MCE_CODE_LENGTH) == 0)
cmdbuf[cmdcount++] = MCE_IRDATA_HEADER;
/* Insert mce packet data */
@@ -828,17 +828,16 @@ static int mceusb_tx_ir(struct rc_dev *dev, unsigned *txbuf, unsigned count)
(txbuf[i] -= MCE_MAX_PULSE_LENGTH));
}
- /* Fix packet length in last header */
- cmdbuf[cmdcount - (cmdcount - MCE_TX_HEADER_LENGTH) % MCE_CODE_LENGTH] =
- MCE_COMMAND_IRDATA + (cmdcount - MCE_TX_HEADER_LENGTH) %
- MCE_CODE_LENGTH - 1;
-
/* Check if we have room for the empty packet at the end */
if (cmdcount >= MCE_CMDBUF_SIZE) {
ret = -EINVAL;
goto out;
}
+ /* Fix packet length in last header */
+ length = cmdcount % MCE_CODE_LENGTH;
+ cmdbuf[cmdcount - length] -= MCE_CODE_LENGTH - length;
+
/* All mce commands end with an empty packet (0x80) */
cmdbuf[cmdcount++] = MCE_IRDATA_TRAILER;
@@ -846,7 +845,6 @@ static int mceusb_tx_ir(struct rc_dev *dev, unsigned *txbuf, unsigned count)
mce_async_out(ir, cmdbuf, cmdcount);
out:
- kfree(cmdbuf);
return ret ? ret : count;
}
@@ -1121,16 +1119,13 @@ static void mceusb_gen1_init(struct mceusb_dev *ir)
mce_async_out(ir, GET_REVISION, sizeof(GET_REVISION));
kfree(data);
-};
+}
static void mceusb_gen2_init(struct mceusb_dev *ir)
{
/* device resume */
mce_async_out(ir, DEVICE_RESUME, sizeof(DEVICE_RESUME));
- /* get hw/sw revision? */
- mce_async_out(ir, GET_REVISION, sizeof(GET_REVISION));
-
/* get wake version (protocol, key, address) */
mce_async_out(ir, GET_WAKEVERSION, sizeof(GET_WAKEVERSION));
diff --git a/drivers/media/rc/nuvoton-cir.c b/drivers/media/rc/nuvoton-cir.c
index b8aa9abb31ff..40125d779049 100644
--- a/drivers/media/rc/nuvoton-cir.c
+++ b/drivers/media/rc/nuvoton-cir.c
@@ -986,25 +986,25 @@ static int nvt_probe(struct pnp_dev *pdev, const struct pnp_device_id *dev_id)
/* input device for IR remote (and tx) */
rdev = rc_allocate_device();
if (!rdev)
- goto failure;
+ goto exit_free_dev_rdev;
ret = -ENODEV;
/* validate pnp resources */
if (!pnp_port_valid(pdev, 0) ||
pnp_port_len(pdev, 0) < CIR_IOREG_LENGTH) {
dev_err(&pdev->dev, "IR PNP Port not valid!\n");
- goto failure;
+ goto exit_free_dev_rdev;
}
if (!pnp_irq_valid(pdev, 0)) {
dev_err(&pdev->dev, "PNP IRQ not valid!\n");
- goto failure;
+ goto exit_free_dev_rdev;
}
if (!pnp_port_valid(pdev, 1) ||
pnp_port_len(pdev, 1) < CIR_IOREG_LENGTH) {
dev_err(&pdev->dev, "Wake PNP Port not valid!\n");
- goto failure;
+ goto exit_free_dev_rdev;
}
nvt->cir_addr = pnp_port_start(pdev, 0);
@@ -1027,7 +1027,7 @@ static int nvt_probe(struct pnp_dev *pdev, const struct pnp_device_id *dev_id)
ret = nvt_hw_detect(nvt);
if (ret)
- goto failure;
+ goto exit_free_dev_rdev;
/* Initialize CIR & CIR Wake Logical Devices */
nvt_efm_enable(nvt);
@@ -1065,31 +1065,32 @@ static int nvt_probe(struct pnp_dev *pdev, const struct pnp_device_id *dev_id)
/* tx bits */
rdev->tx_resolution = XYZ;
#endif
+ nvt->rdev = rdev;
+
+ ret = rc_register_device(rdev);
+ if (ret)
+ goto exit_free_dev_rdev;
ret = -EBUSY;
/* now claim resources */
if (!request_region(nvt->cir_addr,
CIR_IOREG_LENGTH, NVT_DRIVER_NAME))
- goto failure;
+ goto exit_unregister_device;
if (request_irq(nvt->cir_irq, nvt_cir_isr, IRQF_SHARED,
NVT_DRIVER_NAME, (void *)nvt))
- goto failure2;
+ goto exit_release_cir_addr;
if (!request_region(nvt->cir_wake_addr,
CIR_IOREG_LENGTH, NVT_DRIVER_NAME))
- goto failure3;
+ goto exit_free_irq;
if (request_irq(nvt->cir_wake_irq, nvt_cir_wake_isr, IRQF_SHARED,
NVT_DRIVER_NAME, (void *)nvt))
- goto failure4;
-
- ret = rc_register_device(rdev);
- if (ret)
- goto failure5;
+ goto exit_release_cir_wake_addr;
device_init_wakeup(&pdev->dev, true);
- nvt->rdev = rdev;
+
nvt_pr(KERN_NOTICE, "driver has been successfully loaded\n");
if (debug) {
cir_dump_regs(nvt);
@@ -1098,15 +1099,15 @@ static int nvt_probe(struct pnp_dev *pdev, const struct pnp_device_id *dev_id)
return 0;
-failure5:
- free_irq(nvt->cir_wake_irq, nvt);
-failure4:
+exit_release_cir_wake_addr:
release_region(nvt->cir_wake_addr, CIR_IOREG_LENGTH);
-failure3:
+exit_free_irq:
free_irq(nvt->cir_irq, nvt);
-failure2:
+exit_release_cir_addr:
release_region(nvt->cir_addr, CIR_IOREG_LENGTH);
-failure:
+exit_unregister_device:
+ rc_unregister_device(rdev);
+exit_free_dev_rdev:
rc_free_device(rdev);
kfree(nvt);
diff --git a/drivers/media/rc/rc-core-priv.h b/drivers/media/rc/rc-core-priv.h
index 96f0a8bb39ea..5d87287ed372 100644
--- a/drivers/media/rc/rc-core-priv.h
+++ b/drivers/media/rc/rc-core-priv.h
@@ -165,56 +165,56 @@ void ir_raw_init(void);
/* from ir-nec-decoder.c */
#ifdef CONFIG_IR_NEC_DECODER_MODULE
-#define load_nec_decode() request_module("ir-nec-decoder")
+#define load_nec_decode() request_module_nowait("ir-nec-decoder")
#else
static inline void load_nec_decode(void) { }
#endif
/* from ir-rc5-decoder.c */
#ifdef CONFIG_IR_RC5_DECODER_MODULE
-#define load_rc5_decode() request_module("ir-rc5-decoder")
+#define load_rc5_decode() request_module_nowait("ir-rc5-decoder")
#else
static inline void load_rc5_decode(void) { }
#endif
/* from ir-rc6-decoder.c */
#ifdef CONFIG_IR_RC6_DECODER_MODULE
-#define load_rc6_decode() request_module("ir-rc6-decoder")
+#define load_rc6_decode() request_module_nowait("ir-rc6-decoder")
#else
static inline void load_rc6_decode(void) { }
#endif
/* from ir-jvc-decoder.c */
#ifdef CONFIG_IR_JVC_DECODER_MODULE
-#define load_jvc_decode() request_module("ir-jvc-decoder")
+#define load_jvc_decode() request_module_nowait("ir-jvc-decoder")
#else
static inline void load_jvc_decode(void) { }
#endif
/* from ir-sony-decoder.c */
#ifdef CONFIG_IR_SONY_DECODER_MODULE
-#define load_sony_decode() request_module("ir-sony-decoder")
+#define load_sony_decode() request_module_nowait("ir-sony-decoder")
#else
static inline void load_sony_decode(void) { }
#endif
/* from ir-sanyo-decoder.c */
#ifdef CONFIG_IR_SANYO_DECODER_MODULE
-#define load_sanyo_decode() request_module("ir-sanyo-decoder")
+#define load_sanyo_decode() request_module_nowait("ir-sanyo-decoder")
#else
static inline void load_sanyo_decode(void) { }
#endif
/* from ir-mce_kbd-decoder.c */
#ifdef CONFIG_IR_MCE_KBD_DECODER_MODULE
-#define load_mce_kbd_decode() request_module("ir-mce_kbd-decoder")
+#define load_mce_kbd_decode() request_module_nowait("ir-mce_kbd-decoder")
#else
static inline void load_mce_kbd_decode(void) { }
#endif
/* from ir-lirc-codec.c */
#ifdef CONFIG_IR_LIRC_CODEC_MODULE
-#define load_lirc_codec() request_module("ir-lirc-codec")
+#define load_lirc_codec() request_module_nowait("ir-lirc-codec")
#else
static inline void load_lirc_codec(void) { }
#endif
diff --git a/drivers/media/rc/rc-main.c b/drivers/media/rc/rc-main.c
index 601d1ac1c688..759a40a42eaa 100644
--- a/drivers/media/rc/rc-main.c
+++ b/drivers/media/rc/rc-main.c
@@ -789,8 +789,10 @@ static ssize_t show_protocols(struct device *device,
} else if (dev->raw) {
enabled = dev->raw->enabled_protocols;
allowed = ir_raw_get_allowed_protocols();
- } else
+ } else {
+ mutex_unlock(&dev->lock);
return -ENODEV;
+ }
IR_dprintk(1, "allowed - 0x%llx, enabled - 0x%llx\n",
(long long)allowed,
@@ -890,7 +892,8 @@ static ssize_t store_protocols(struct device *device,
if (i == ARRAY_SIZE(proto_names)) {
IR_dprintk(1, "Unknown protocol: '%s'\n", tmp);
- return -EINVAL;
+ ret = -EINVAL;
+ goto out;
}
count++;
diff --git a/drivers/media/rc/redrat3.c b/drivers/media/rc/redrat3.c
index 1800326f93e6..1b37fe2779f8 100644
--- a/drivers/media/rc/redrat3.c
+++ b/drivers/media/rc/redrat3.c
@@ -195,9 +195,6 @@ struct redrat3_dev {
dma_addr_t dma_in;
dma_addr_t dma_out;
- /* locks this structure */
- struct mutex lock;
-
/* rx signal timeout timer */
struct timer_list rx_timeout;
u32 hw_timeout;
@@ -922,8 +919,7 @@ static int redrat3_transmit_ir(struct rc_dev *rcdev, unsigned *txbuf,
return -EAGAIN;
}
- if (count > (RR3_DRIVER_MAXLENS * 2))
- return -EINVAL;
+ count = min_t(unsigned, count, RR3_MAX_SIG_SIZE - RR3_TX_TRAILER_LEN);
/* rr3 will disable rc detector on transmit */
rr3->det_enabled = false;
@@ -936,24 +932,22 @@ static int redrat3_transmit_ir(struct rc_dev *rcdev, unsigned *txbuf,
}
for (i = 0; i < count; i++) {
+ cur_sample_len = redrat3_us_to_len(txbuf[i]);
for (lencheck = 0; lencheck < curlencheck; lencheck++) {
- cur_sample_len = redrat3_us_to_len(txbuf[i]);
if (sample_lens[lencheck] == cur_sample_len)
break;
}
if (lencheck == curlencheck) {
- cur_sample_len = redrat3_us_to_len(txbuf[i]);
rr3_dbg(dev, "txbuf[%d]=%u, pos %d, enc %u\n",
i, txbuf[i], curlencheck, cur_sample_len);
- if (curlencheck < 255) {
+ if (curlencheck < RR3_DRIVER_MAXLENS) {
/* now convert the value to a proper
* rr3 value.. */
sample_lens[curlencheck] = cur_sample_len;
curlencheck++;
} else {
- dev_err(dev, "signal too long\n");
- ret = -EINVAL;
- goto out;
+ count = i - 1;
+ break;
}
}
}
@@ -1087,6 +1081,7 @@ static struct rc_dev *redrat3_init_rc_dev(struct redrat3_dev *rr3)
rc->tx_ir = redrat3_transmit_ir;
rc->s_tx_carrier = redrat3_set_tx_carrier;
rc->driver_name = DRIVER_NAME;
+ rc->rx_resolution = US_TO_NS(2);
rc->map_name = RC_MAP_HAUPPAUGE;
ret = rc_register_device(rc);
@@ -1202,7 +1197,6 @@ static int redrat3_dev_probe(struct usb_interface *intf,
rr3->bulk_out_buf, ep_out->wMaxPacketSize,
(usb_complete_t)redrat3_write_bulk_callback, rr3);
- mutex_init(&rr3->lock);
rr3->udev = udev;
redrat3_reset(rr3);
diff --git a/drivers/media/rc/ttusbir.c b/drivers/media/rc/ttusbir.c
index 78be8a914225..cf0d47f57fb2 100644
--- a/drivers/media/rc/ttusbir.c
+++ b/drivers/media/rc/ttusbir.c
@@ -213,19 +213,20 @@ static int ttusbir_probe(struct usb_interface *intf,
/* find the correct alt setting */
for (i = 0; i < intf->num_altsetting && altsetting == -1; i++) {
- int bulk_out_endp = -1, iso_in_endp = -1;
+ int max_packet, bulk_out_endp = -1, iso_in_endp = -1;
idesc = &intf->altsetting[i].desc;
for (j = 0; j < idesc->bNumEndpoints; j++) {
desc = &intf->altsetting[i].endpoint[j].desc;
+ max_packet = le16_to_cpu(desc->wMaxPacketSize);
if (usb_endpoint_dir_in(desc) &&
usb_endpoint_xfer_isoc(desc) &&
- desc->wMaxPacketSize == 0x10)
+ max_packet == 0x10)
iso_in_endp = j;
else if (usb_endpoint_dir_out(desc) &&
usb_endpoint_xfer_bulk(desc) &&
- desc->wMaxPacketSize == 0x20)
+ max_packet == 0x20)
bulk_out_endp = j;
if (bulk_out_endp != -1 && iso_in_endp != -1) {
@@ -408,9 +409,8 @@ static int ttusbir_resume(struct usb_interface *intf)
struct ttusbir *tt = usb_get_intfdata(intf);
int i, rc;
- led_classdev_resume(&tt->led);
tt->is_led_on = true;
- ttusbir_set_led(tt);
+ led_classdev_resume(&tt->led);
for (i = 0; i < NUM_URBS; i++) {
rc = usb_submit_urb(tt->urb[i], GFP_KERNEL);
diff --git a/drivers/media/rc/winbond-cir.c b/drivers/media/rc/winbond-cir.c
index 930c61499037..535a18dccbd0 100644
--- a/drivers/media/rc/winbond-cir.c
+++ b/drivers/media/rc/winbond-cir.c
@@ -154,6 +154,8 @@
#define WBCIR_CNTR_R 0x02
/* Invert TX */
#define WBCIR_IRTX_INV 0x04
+/* Receiver oversampling */
+#define WBCIR_RX_T_OV 0x40
/* Valid banks for the SP3 UART */
enum wbcir_bank {
@@ -394,7 +396,8 @@ wbcir_irq_rx(struct wbcir_data *data, struct pnp_dev *device)
if (data->rxstate == WBCIR_RXSTATE_ERROR)
continue;
- duration = ((irdata & 0x7F) + 1) * 2;
+ duration = ((irdata & 0x7F) + 1) *
+ (data->carrier_report_enabled ? 2 : 10);
rawir.pulse = irdata & 0x80 ? false : true;
rawir.duration = US_TO_NS(duration);
@@ -550,6 +553,17 @@ wbcir_set_carrier_report(struct rc_dev *dev, int enable)
wbcir_set_bits(data->ebase + WBCIR_REG_ECEIR_CCTL,
WBCIR_CNTR_EN, WBCIR_CNTR_EN | WBCIR_CNTR_R);
+ /* Set a higher sampling resolution if carrier reports are enabled */
+ wbcir_select_bank(data, WBCIR_BANK_2);
+ data->dev->rx_resolution = US_TO_NS(enable ? 2 : 10);
+ outb(enable ? 0x03 : 0x0f, data->sbase + WBCIR_REG_SP3_BGDL);
+ outb(0x00, data->sbase + WBCIR_REG_SP3_BGDH);
+
+ /* Enable oversampling if carrier reports are enabled */
+ wbcir_select_bank(data, WBCIR_BANK_7);
+ wbcir_set_bits(data->sbase + WBCIR_REG_SP3_RCCFG,
+ enable ? WBCIR_RX_T_OV : 0, WBCIR_RX_T_OV);
+
data->carrier_report_enabled = enable;
spin_unlock_irqrestore(&data->spinlock, flags);
@@ -931,8 +945,8 @@ wbcir_init_hw(struct wbcir_data *data)
/* prescaler 1.0, tx/rx fifo lvl 16 */
outb(0x30, data->sbase + WBCIR_REG_SP3_EXCR2);
- /* Set baud divisor to sample every 2 ns */
- outb(0x03, data->sbase + WBCIR_REG_SP3_BGDL);
+ /* Set baud divisor to sample every 10 us */
+ outb(0x0f, data->sbase + WBCIR_REG_SP3_BGDL);
outb(0x00, data->sbase + WBCIR_REG_SP3_BGDH);
/* Set CEIR mode */
@@ -941,12 +955,9 @@ wbcir_init_hw(struct wbcir_data *data)
inb(data->sbase + WBCIR_REG_SP3_LSR); /* Clear LSR */
inb(data->sbase + WBCIR_REG_SP3_MSR); /* Clear MSR */
- /*
- * Disable RX demod, enable run-length enc/dec, set freq span and
- * enable over-sampling
- */
+ /* Disable RX demod, enable run-length enc/dec, set freq span */
wbcir_select_bank(data, WBCIR_BANK_7);
- outb(0xd0, data->sbase + WBCIR_REG_SP3_RCCFG);
+ outb(0x90, data->sbase + WBCIR_REG_SP3_RCCFG);
/* Disable timer */
wbcir_select_bank(data, WBCIR_BANK_4);
@@ -1093,11 +1104,15 @@ wbcir_probe(struct pnp_dev *device, const struct pnp_device_id *dev_id)
data->dev->rx_resolution = US_TO_NS(2);
data->dev->allowed_protos = RC_BIT_ALL;
+ err = rc_register_device(data->dev);
+ if (err)
+ goto exit_free_rc;
+
if (!request_region(data->wbase, WAKEUP_IOMEM_LEN, DRVNAME)) {
dev_err(dev, "Region 0x%lx-0x%lx already in use!\n",
data->wbase, data->wbase + WAKEUP_IOMEM_LEN - 1);
err = -EBUSY;
- goto exit_free_rc;
+ goto exit_unregister_device;
}
if (!request_region(data->ebase, EHFUNC_IOMEM_LEN, DRVNAME)) {
@@ -1122,24 +1137,20 @@ wbcir_probe(struct pnp_dev *device, const struct pnp_device_id *dev_id)
goto exit_release_sbase;
}
- err = rc_register_device(data->dev);
- if (err)
- goto exit_free_irq;
-
device_init_wakeup(&device->dev, 1);
wbcir_init_hw(data);
return 0;
-exit_free_irq:
- free_irq(data->irq, device);
exit_release_sbase:
release_region(data->sbase, SP_IOMEM_LEN);
exit_release_ebase:
release_region(data->ebase, EHFUNC_IOMEM_LEN);
exit_release_wbase:
release_region(data->wbase, WAKEUP_IOMEM_LEN);
+exit_unregister_device:
+ rc_unregister_device(data->dev);
exit_free_rc:
rc_free_device(data->dev);
exit_unregister_led:
diff --git a/drivers/media/tuners/fc0011.c b/drivers/media/tuners/fc0011.c
index e4882546c283..3932aa81e18c 100644
--- a/drivers/media/tuners/fc0011.c
+++ b/drivers/media/tuners/fc0011.c
@@ -183,8 +183,7 @@ static int fc0011_set_params(struct dvb_frontend *fe)
unsigned int i, vco_retries;
u32 freq = p->frequency / 1000;
u32 bandwidth = p->bandwidth_hz / 1000;
- u32 fvco, xin, xdiv, xdivr;
- u16 frac;
+ u32 fvco, xin, frac, xdiv, xdivr;
u8 fa, fp, vco_sel, vco_cal;
u8 regs[FC11_NR_REGS] = { };
@@ -221,18 +220,15 @@ static int fc0011_set_params(struct dvb_frontend *fe)
/* Calc XIN. The PLL reference frequency is 18 MHz. */
xdiv = fvco / 18000;
+ WARN_ON(xdiv > 0xFF);
frac = fvco - xdiv * 18000;
frac = (frac << 15) / 18000;
if (frac >= 16384)
frac += 32786;
if (!frac)
xin = 0;
- else if (frac < 511)
- xin = 512;
- else if (frac < 65026)
- xin = frac;
else
- xin = 65024;
+ xin = clamp_t(u32, frac, 512, 65024);
regs[FC11_REG_XINHI] = xin >> 8;
regs[FC11_REG_XINLO] = xin;
@@ -247,8 +243,8 @@ static int fc0011_set_params(struct dvb_frontend *fe)
fa += 8;
}
if (fp > 0x1F) {
- fp &= 0x1F;
- fa &= 0xF;
+ fp = 0x1F;
+ fa = 0xF;
}
if (fa >= fp) {
dev_warn(&priv->i2c->dev,
@@ -351,6 +347,8 @@ static int fc0011_set_params(struct dvb_frontend *fe)
vco_cal &= FC11_VCOCAL_VALUEMASK;
switch (vco_sel) {
+ default:
+ WARN_ON(1);
case 0:
if (vco_cal < 8) {
regs[FC11_REG_VCOSEL] &= ~(FC11_VCOSEL_1 | FC11_VCOSEL_2);
@@ -432,7 +430,8 @@ static int fc0011_set_params(struct dvb_frontend *fe)
err = fc0011_writereg(priv, FC11_REG_RCCAL, regs[FC11_REG_RCCAL]);
if (err)
return err;
- err = fc0011_writereg(priv, FC11_REG_16, 0xB);
+ regs[FC11_REG_16] = 0xB;
+ err = fc0011_writereg(priv, FC11_REG_16, regs[FC11_REG_16]);
if (err)
return err;
diff --git a/drivers/media/tuners/fc0012-priv.h b/drivers/media/tuners/fc0012-priv.h
index 4577c917e616..1a86ce1d3fcf 100644
--- a/drivers/media/tuners/fc0012-priv.h
+++ b/drivers/media/tuners/fc0012-priv.h
@@ -21,20 +21,9 @@
#ifndef _FC0012_PRIV_H_
#define _FC0012_PRIV_H_
-#define LOG_PREFIX "fc0012"
-
-#undef err
-#define err(f, arg...) printk(KERN_ERR LOG_PREFIX": " f "\n" , ## arg)
-#undef info
-#define info(f, arg...) printk(KERN_INFO LOG_PREFIX": " f "\n" , ## arg)
-#undef warn
-#define warn(f, arg...) printk(KERN_WARNING LOG_PREFIX": " f "\n" , ## arg)
-
struct fc0012_priv {
struct i2c_adapter *i2c;
- u8 addr;
- u8 dual_master;
- u8 xtal_freq;
+ const struct fc0012_config *cfg;
u32 frequency;
u32 bandwidth;
diff --git a/drivers/media/tuners/fc0012.c b/drivers/media/tuners/fc0012.c
index 308135abd54c..f4d0e797a6cc 100644
--- a/drivers/media/tuners/fc0012.c
+++ b/drivers/media/tuners/fc0012.c
@@ -25,11 +25,13 @@ static int fc0012_writereg(struct fc0012_priv *priv, u8 reg, u8 val)
{
u8 buf[2] = {reg, val};
struct i2c_msg msg = {
- .addr = priv->addr, .flags = 0, .buf = buf, .len = 2
+ .addr = priv->cfg->i2c_address, .flags = 0, .buf = buf, .len = 2
};
if (i2c_transfer(priv->i2c, &msg, 1) != 1) {
- err("I2C write reg failed, reg: %02x, val: %02x", reg, val);
+ dev_err(&priv->i2c->dev,
+ "%s: I2C write reg failed, reg: %02x, val: %02x\n",
+ KBUILD_MODNAME, reg, val);
return -EREMOTEIO;
}
return 0;
@@ -38,12 +40,16 @@ static int fc0012_writereg(struct fc0012_priv *priv, u8 reg, u8 val)
static int fc0012_readreg(struct fc0012_priv *priv, u8 reg, u8 *val)
{
struct i2c_msg msg[2] = {
- { .addr = priv->addr, .flags = 0, .buf = &reg, .len = 1 },
- { .addr = priv->addr, .flags = I2C_M_RD, .buf = val, .len = 1 },
+ { .addr = priv->cfg->i2c_address, .flags = 0,
+ .buf = &reg, .len = 1 },
+ { .addr = priv->cfg->i2c_address, .flags = I2C_M_RD,
+ .buf = val, .len = 1 },
};
if (i2c_transfer(priv->i2c, msg, 2) != 2) {
- err("I2C read reg failed, reg: %02x", reg);
+ dev_err(&priv->i2c->dev,
+ "%s: I2C read reg failed, reg: %02x\n",
+ KBUILD_MODNAME, reg);
return -EREMOTEIO;
}
return 0;
@@ -88,7 +94,7 @@ static int fc0012_init(struct dvb_frontend *fe)
0x04, /* reg. 0x15: Enable LNA COMPS */
};
- switch (priv->xtal_freq) {
+ switch (priv->cfg->xtal_freq) {
case FC_XTAL_27_MHZ:
case FC_XTAL_28_8_MHZ:
reg[0x07] |= 0x20;
@@ -98,9 +104,12 @@ static int fc0012_init(struct dvb_frontend *fe)
break;
}
- if (priv->dual_master)
+ if (priv->cfg->dual_master)
reg[0x0c] |= 0x02;
+ if (priv->cfg->loop_through)
+ reg[0x09] |= 0x01;
+
if (fe->ops.i2c_gate_ctrl)
fe->ops.i2c_gate_ctrl(fe, 1); /* open I2C-gate */
@@ -114,17 +123,12 @@ static int fc0012_init(struct dvb_frontend *fe)
fe->ops.i2c_gate_ctrl(fe, 0); /* close I2C-gate */
if (ret)
- err("fc0012_writereg failed: %d", ret);
+ dev_err(&priv->i2c->dev, "%s: fc0012_writereg failed: %d\n",
+ KBUILD_MODNAME, ret);
return ret;
}
-static int fc0012_sleep(struct dvb_frontend *fe)
-{
- /* nothing to do here */
- return 0;
-}
-
static int fc0012_set_params(struct dvb_frontend *fe)
{
struct fc0012_priv *priv = fe->tuner_priv;
@@ -144,7 +148,7 @@ static int fc0012_set_params(struct dvb_frontend *fe)
goto exit;
}
- switch (priv->xtal_freq) {
+ switch (priv->cfg->xtal_freq) {
case FC_XTAL_27_MHZ:
xtal_freq_khz_2 = 27000 / 2;
break;
@@ -256,7 +260,8 @@ static int fc0012_set_params(struct dvb_frontend *fe)
break;
}
} else {
- err("%s: modulation type not supported!", __func__);
+ dev_err(&priv->i2c->dev, "%s: modulation type not supported!\n",
+ KBUILD_MODNAME);
return -EINVAL;
}
@@ -318,7 +323,8 @@ exit:
if (fe->ops.i2c_gate_ctrl)
fe->ops.i2c_gate_ctrl(fe, 0); /* close I2C-gate */
if (ret)
- warn("%s: failed: %d", __func__, ret);
+ dev_warn(&priv->i2c->dev, "%s: %s failed: %d\n",
+ KBUILD_MODNAME, __func__, ret);
return ret;
}
@@ -331,8 +337,7 @@ static int fc0012_get_frequency(struct dvb_frontend *fe, u32 *frequency)
static int fc0012_get_if_frequency(struct dvb_frontend *fe, u32 *frequency)
{
- /* CHECK: always ? */
- *frequency = 0;
+ *frequency = 0; /* Zero-IF */
return 0;
}
@@ -408,7 +413,8 @@ err:
fe->ops.i2c_gate_ctrl(fe, 0); /* close I2C-gate */
exit:
if (ret)
- warn("%s: failed: %d", __func__, ret);
+ dev_warn(&priv->i2c->dev, "%s: %s failed: %d\n",
+ KBUILD_MODNAME, __func__, ret);
return ret;
}
@@ -424,7 +430,6 @@ static const struct dvb_tuner_ops fc0012_tuner_ops = {
.release = fc0012_release,
.init = fc0012_init,
- .sleep = fc0012_sleep,
.set_params = fc0012_set_params,
@@ -436,27 +441,73 @@ static const struct dvb_tuner_ops fc0012_tuner_ops = {
};
struct dvb_frontend *fc0012_attach(struct dvb_frontend *fe,
- struct i2c_adapter *i2c, u8 i2c_address, int dual_master,
- enum fc001x_xtal_freq xtal_freq)
+ struct i2c_adapter *i2c, const struct fc0012_config *cfg)
{
- struct fc0012_priv *priv = NULL;
+ struct fc0012_priv *priv;
+ int ret;
+ u8 chip_id;
+
+ if (fe->ops.i2c_gate_ctrl)
+ fe->ops.i2c_gate_ctrl(fe, 1);
priv = kzalloc(sizeof(struct fc0012_priv), GFP_KERNEL);
- if (priv == NULL)
- return NULL;
+ if (!priv) {
+ ret = -ENOMEM;
+ dev_err(&i2c->dev, "%s: kzalloc() failed\n", KBUILD_MODNAME);
+ goto err;
+ }
+ priv->cfg = cfg;
priv->i2c = i2c;
- priv->dual_master = dual_master;
- priv->addr = i2c_address;
- priv->xtal_freq = xtal_freq;
- info("Fitipower FC0012 successfully attached.");
+ /* check if the tuner is there */
+ ret = fc0012_readreg(priv, 0x00, &chip_id);
+ if (ret < 0)
+ goto err;
- fe->tuner_priv = priv;
+ dev_dbg(&i2c->dev, "%s: chip_id=%02x\n", __func__, chip_id);
+
+ switch (chip_id) {
+ case 0xa1:
+ break;
+ default:
+ ret = -ENODEV;
+ goto err;
+ }
+
+ dev_info(&i2c->dev, "%s: Fitipower FC0012 successfully identified\n",
+ KBUILD_MODNAME);
+
+ if (priv->cfg->loop_through) {
+ ret = fc0012_writereg(priv, 0x09, 0x6f);
+ if (ret < 0)
+ goto err;
+ }
+
+ /*
+ * TODO: Clock out en or div?
+ * For dual tuner configuration clearing bit [0] is required.
+ */
+ if (priv->cfg->clock_out) {
+ ret = fc0012_writereg(priv, 0x0b, 0x82);
+ if (ret < 0)
+ goto err;
+ }
+ fe->tuner_priv = priv;
memcpy(&fe->ops.tuner_ops, &fc0012_tuner_ops,
sizeof(struct dvb_tuner_ops));
+err:
+ if (fe->ops.i2c_gate_ctrl)
+ fe->ops.i2c_gate_ctrl(fe, 0);
+
+ if (ret) {
+ dev_dbg(&i2c->dev, "%s: failed: %d\n", __func__, ret);
+ kfree(priv);
+ return NULL;
+ }
+
return fe;
}
EXPORT_SYMBOL(fc0012_attach);
diff --git a/drivers/media/tuners/fc0012.h b/drivers/media/tuners/fc0012.h
index 4dbd5efe8845..54508fcc3469 100644
--- a/drivers/media/tuners/fc0012.h
+++ b/drivers/media/tuners/fc0012.h
@@ -24,19 +24,41 @@
#include "dvb_frontend.h"
#include "fc001x-common.h"
+struct fc0012_config {
+ /*
+ * I2C address
+ */
+ u8 i2c_address;
+
+ /*
+ * clock
+ */
+ enum fc001x_xtal_freq xtal_freq;
+
+ bool dual_master;
+
+ /*
+ * RF loop-through
+ */
+ bool loop_through;
+
+ /*
+ * clock output
+ */
+ bool clock_out;
+};
+
#if defined(CONFIG_MEDIA_TUNER_FC0012) || \
(defined(CONFIG_MEDIA_TUNER_FC0012_MODULE) && defined(MODULE))
extern struct dvb_frontend *fc0012_attach(struct dvb_frontend *fe,
struct i2c_adapter *i2c,
- u8 i2c_address, int dual_master,
- enum fc001x_xtal_freq xtal_freq);
+ const struct fc0012_config *cfg);
#else
static inline struct dvb_frontend *fc0012_attach(struct dvb_frontend *fe,
struct i2c_adapter *i2c,
- u8 i2c_address, int dual_master,
- enum fc001x_xtal_freq xtal_freq)
+ const struct fc0012_config *cfg)
{
- printk(KERN_WARNING "%s: driver disabled by Kconfig\n", __func__);
+ pr_warn("%s: driver disabled by Kconfig\n", __func__);
return NULL;
}
#endif
diff --git a/drivers/media/tuners/mt2060.h b/drivers/media/tuners/mt2060.h
index cb60caffb6b6..c64fc19cb278 100644
--- a/drivers/media/tuners/mt2060.h
+++ b/drivers/media/tuners/mt2060.h
@@ -30,7 +30,7 @@ struct mt2060_config {
u8 clock_out; /* 0 = off, 1 = CLK/4, 2 = CLK/2, 3 = CLK/1 */
};
-#if defined(CONFIG_MEDIA_TUNER_MT2060) || (defined(CONFIG_MEDIA_TUNER_MT2060_MODULE) && defined(MODULE))
+#if IS_ENABLED(CONFIG_MEDIA_TUNER_MT2060)
extern struct dvb_frontend * mt2060_attach(struct dvb_frontend *fe, struct i2c_adapter *i2c, struct mt2060_config *cfg, u16 if1);
#else
static inline struct dvb_frontend * mt2060_attach(struct dvb_frontend *fe, struct i2c_adapter *i2c, struct mt2060_config *cfg, u16 if1)
diff --git a/drivers/media/tuners/mt2063.h b/drivers/media/tuners/mt2063.h
index ab24170c1571..e1acfc8e7ae3 100644
--- a/drivers/media/tuners/mt2063.h
+++ b/drivers/media/tuners/mt2063.h
@@ -8,7 +8,7 @@ struct mt2063_config {
u32 refclock;
};
-#if defined(CONFIG_MEDIA_TUNER_MT2063) || (defined(CONFIG_MEDIA_TUNER_MT2063_MODULE) && defined(MODULE))
+#if IS_ENABLED(CONFIG_MEDIA_TUNER_MT2063)
struct dvb_frontend *mt2063_attach(struct dvb_frontend *fe,
struct mt2063_config *config,
struct i2c_adapter *i2c);
diff --git a/drivers/media/tuners/mt20xx.h b/drivers/media/tuners/mt20xx.h
index 259553a24903..f56241ccaa00 100644
--- a/drivers/media/tuners/mt20xx.h
+++ b/drivers/media/tuners/mt20xx.h
@@ -20,7 +20,7 @@
#include <linux/i2c.h>
#include "dvb_frontend.h"
-#if defined(CONFIG_MEDIA_TUNER_MT20XX) || (defined(CONFIG_MEDIA_TUNER_MT20XX_MODULE) && defined(MODULE))
+#if IS_ENABLED(CONFIG_MEDIA_TUNER_MT20XX)
extern struct dvb_frontend *microtune_attach(struct dvb_frontend *fe,
struct i2c_adapter* i2c_adap,
u8 i2c_addr);
diff --git a/drivers/media/tuners/mt2131.h b/drivers/media/tuners/mt2131.h
index 6632de640df0..09ceaf68e47c 100644
--- a/drivers/media/tuners/mt2131.h
+++ b/drivers/media/tuners/mt2131.h
@@ -30,7 +30,7 @@ struct mt2131_config {
u8 clock_out; /* 0 = off, 1 = CLK/4, 2 = CLK/2, 3 = CLK/1 */
};
-#if defined(CONFIG_MEDIA_TUNER_MT2131) || (defined(CONFIG_MEDIA_TUNER_MT2131_MODULE) && defined(MODULE))
+#if IS_ENABLED(CONFIG_MEDIA_TUNER_MT2131)
extern struct dvb_frontend* mt2131_attach(struct dvb_frontend *fe,
struct i2c_adapter *i2c,
struct mt2131_config *cfg,
diff --git a/drivers/media/tuners/mt2266.h b/drivers/media/tuners/mt2266.h
index 4d083882d044..fad6dd657d77 100644
--- a/drivers/media/tuners/mt2266.h
+++ b/drivers/media/tuners/mt2266.h
@@ -24,7 +24,7 @@ struct mt2266_config {
u8 i2c_address;
};
-#if defined(CONFIG_MEDIA_TUNER_MT2266) || (defined(CONFIG_MEDIA_TUNER_MT2266_MODULE) && defined(MODULE))
+#if IS_ENABLED(CONFIG_MEDIA_TUNER_MT2266)
extern struct dvb_frontend * mt2266_attach(struct dvb_frontend *fe, struct i2c_adapter *i2c, struct mt2266_config *cfg);
#else
static inline struct dvb_frontend * mt2266_attach(struct dvb_frontend *fe, struct i2c_adapter *i2c, struct mt2266_config *cfg)
diff --git a/drivers/media/tuners/mxl5007t.h b/drivers/media/tuners/mxl5007t.h
index aa3eea0b5262..37b0942e2385 100644
--- a/drivers/media/tuners/mxl5007t.h
+++ b/drivers/media/tuners/mxl5007t.h
@@ -77,7 +77,7 @@ struct mxl5007t_config {
unsigned int clk_out_enable:1;
};
-#if defined(CONFIG_MEDIA_TUNER_MXL5007T) || (defined(CONFIG_MEDIA_TUNER_MXL5007T_MODULE) && defined(MODULE))
+#if IS_ENABLED(CONFIG_MEDIA_TUNER_MXL5007T)
extern struct dvb_frontend *mxl5007t_attach(struct dvb_frontend *fe,
struct i2c_adapter *i2c, u8 addr,
struct mxl5007t_config *cfg);
diff --git a/drivers/media/tuners/qt1010.h b/drivers/media/tuners/qt1010.h
index 807fb7b6146b..8ab5d479749f 100644
--- a/drivers/media/tuners/qt1010.h
+++ b/drivers/media/tuners/qt1010.h
@@ -36,7 +36,7 @@ struct qt1010_config {
* @param cfg tuner hw based configuration
* @return fe pointer on success, NULL on failure
*/
-#if defined(CONFIG_MEDIA_TUNER_QT1010) || (defined(CONFIG_MEDIA_TUNER_QT1010_MODULE) && defined(MODULE))
+#if IS_ENABLED(CONFIG_MEDIA_TUNER_QT1010)
extern struct dvb_frontend *qt1010_attach(struct dvb_frontend *fe,
struct i2c_adapter *i2c,
struct qt1010_config *cfg);
diff --git a/drivers/media/tuners/tda18212.c b/drivers/media/tuners/tda18212.c
index 5d9f02842501..e4a84ee231cf 100644
--- a/drivers/media/tuners/tda18212.c
+++ b/drivers/media/tuners/tda18212.c
@@ -277,7 +277,7 @@ struct dvb_frontend *tda18212_attach(struct dvb_frontend *fe,
{
struct tda18212_priv *priv = NULL;
int ret;
- u8 uninitialized_var(val);
+ u8 val;
priv = kzalloc(sizeof(struct tda18212_priv), GFP_KERNEL);
if (priv == NULL)
@@ -296,8 +296,8 @@ struct dvb_frontend *tda18212_attach(struct dvb_frontend *fe,
if (fe->ops.i2c_gate_ctrl)
fe->ops.i2c_gate_ctrl(fe, 0); /* close I2C-gate */
- dev_dbg(&priv->i2c->dev, "%s: ret=%d chip id=%02x\n", __func__, ret,
- val);
+ if (!ret)
+ dev_dbg(&priv->i2c->dev, "%s: chip id=%02x\n", __func__, val);
if (ret || val != 0xc7) {
kfree(priv);
return NULL;
diff --git a/drivers/media/tuners/tda18218.c b/drivers/media/tuners/tda18218.c
index 18198537be9f..2d31aeb6b088 100644
--- a/drivers/media/tuners/tda18218.c
+++ b/drivers/media/tuners/tda18218.c
@@ -277,7 +277,7 @@ struct dvb_frontend *tda18218_attach(struct dvb_frontend *fe,
struct i2c_adapter *i2c, struct tda18218_config *cfg)
{
struct tda18218_priv *priv = NULL;
- u8 uninitialized_var(val);
+ u8 val;
int ret;
/* chip default registers values */
static u8 def_regs[] = {
@@ -302,8 +302,8 @@ struct dvb_frontend *tda18218_attach(struct dvb_frontend *fe,
/* check if the tuner is there */
ret = tda18218_rd_reg(priv, R00_ID, &val);
- dev_dbg(&priv->i2c->dev, "%s: ret=%d chip id=%02x\n", __func__, ret,
- val);
+ if (!ret)
+ dev_dbg(&priv->i2c->dev, "%s: chip id=%02x\n", __func__, val);
if (ret || val != def_regs[R00_ID]) {
kfree(priv);
return NULL;
diff --git a/drivers/media/tuners/tda18271-fe.c b/drivers/media/tuners/tda18271-fe.c
index 72c26fd77922..e7786862dab2 100644
--- a/drivers/media/tuners/tda18271-fe.c
+++ b/drivers/media/tuners/tda18271-fe.c
@@ -1122,6 +1122,7 @@ static int tda18271_dump_std_map(struct dvb_frontend *fe)
tda18271_dump_std_item(dvbt_7, "dvbt 7");
tda18271_dump_std_item(dvbt_8, "dvbt 8");
tda18271_dump_std_item(qam_6, "qam 6 ");
+ tda18271_dump_std_item(qam_7, "qam 7 ");
tda18271_dump_std_item(qam_8, "qam 8 ");
return 0;
@@ -1149,6 +1150,7 @@ static int tda18271_update_std_map(struct dvb_frontend *fe,
tda18271_update_std(dvbt_7, "dvbt 7");
tda18271_update_std(dvbt_8, "dvbt 8");
tda18271_update_std(qam_6, "qam 6");
+ tda18271_update_std(qam_7, "qam 7");
tda18271_update_std(qam_8, "qam 8");
return 0;
diff --git a/drivers/media/tuners/tda18271-maps.c b/drivers/media/tuners/tda18271-maps.c
index fb881c667c94..b62e925f643f 100644
--- a/drivers/media/tuners/tda18271-maps.c
+++ b/drivers/media/tuners/tda18271-maps.c
@@ -1290,13 +1290,11 @@ int tda18271_assign_map_layout(struct dvb_frontend *fe)
switch (priv->id) {
case TDA18271HDC1:
priv->maps = &tda18271c1_map_layout;
- memcpy(&priv->std, &tda18271c1_std_map,
- sizeof(struct tda18271_std_map));
+ priv->std = tda18271c1_std_map;
break;
case TDA18271HDC2:
priv->maps = &tda18271c2_map_layout;
- memcpy(&priv->std, &tda18271c2_std_map,
- sizeof(struct tda18271_std_map));
+ priv->std = tda18271c2_std_map;
break;
default:
ret = -EINVAL;
diff --git a/drivers/media/tuners/tda18271.h b/drivers/media/tuners/tda18271.h
index 89b6c6d93fec..4c418d63f540 100644
--- a/drivers/media/tuners/tda18271.h
+++ b/drivers/media/tuners/tda18271.h
@@ -121,7 +121,7 @@ enum tda18271_mode {
TDA18271_DIGITAL,
};
-#if defined(CONFIG_MEDIA_TUNER_TDA18271) || (defined(CONFIG_MEDIA_TUNER_TDA18271_MODULE) && defined(MODULE))
+#if IS_ENABLED(CONFIG_MEDIA_TUNER_TDA18271)
extern struct dvb_frontend *tda18271_attach(struct dvb_frontend *fe, u8 addr,
struct i2c_adapter *i2c,
struct tda18271_config *cfg);
diff --git a/drivers/media/tuners/tda827x.h b/drivers/media/tuners/tda827x.h
index 7d72ce0a0c2d..9432b5b6121b 100644
--- a/drivers/media/tuners/tda827x.h
+++ b/drivers/media/tuners/tda827x.h
@@ -50,7 +50,7 @@ struct tda827x_config
* @param cfg optional callback function pointers.
* @return FE pointer on success, NULL on failure.
*/
-#if defined(CONFIG_MEDIA_TUNER_TDA827X) || (defined(CONFIG_MEDIA_TUNER_TDA827X_MODULE) && defined(MODULE))
+#if IS_ENABLED(CONFIG_MEDIA_TUNER_TDA827X)
extern struct dvb_frontend* tda827x_attach(struct dvb_frontend *fe, int addr,
struct i2c_adapter *i2c,
struct tda827x_config *cfg);
diff --git a/drivers/media/tuners/tda8290.h b/drivers/media/tuners/tda8290.h
index 7e288b26fcc3..e12ecbaa35a4 100644
--- a/drivers/media/tuners/tda8290.h
+++ b/drivers/media/tuners/tda8290.h
@@ -28,7 +28,7 @@ struct tda829x_config {
#define TDA829X_DONT_PROBE 1
};
-#if defined(CONFIG_MEDIA_TUNER_TDA8290) || (defined(CONFIG_MEDIA_TUNER_TDA8290_MODULE) && defined(MODULE))
+#if IS_ENABLED(CONFIG_MEDIA_TUNER_TDA8290)
extern int tda829x_probe(struct i2c_adapter *i2c_adap, u8 i2c_addr);
extern struct dvb_frontend *tda829x_attach(struct dvb_frontend *fe,
diff --git a/drivers/media/tuners/tda9887.h b/drivers/media/tuners/tda9887.h
index acc419e8c4fc..37a4a1123e0c 100644
--- a/drivers/media/tuners/tda9887.h
+++ b/drivers/media/tuners/tda9887.h
@@ -21,7 +21,7 @@
#include "dvb_frontend.h"
/* ------------------------------------------------------------------------ */
-#if defined(CONFIG_MEDIA_TUNER_TDA9887) || (defined(CONFIG_MEDIA_TUNER_TDA9887_MODULE) && defined(MODULE))
+#if IS_ENABLED(CONFIG_MEDIA_TUNER_TDA9887)
extern struct dvb_frontend *tda9887_attach(struct dvb_frontend *fe,
struct i2c_adapter *i2c_adap,
u8 i2c_addr);
diff --git a/drivers/media/tuners/tea5761.h b/drivers/media/tuners/tea5761.h
index 2e2ff82c95a4..933228ffb509 100644
--- a/drivers/media/tuners/tea5761.h
+++ b/drivers/media/tuners/tea5761.h
@@ -20,7 +20,7 @@
#include <linux/i2c.h>
#include "dvb_frontend.h"
-#if defined(CONFIG_MEDIA_TUNER_TEA5761) || (defined(CONFIG_MEDIA_TUNER_TEA5761_MODULE) && defined(MODULE))
+#if IS_ENABLED(CONFIG_MEDIA_TUNER_TEA5761)
extern int tea5761_autodetection(struct i2c_adapter* i2c_adap, u8 i2c_addr);
extern struct dvb_frontend *tea5761_attach(struct dvb_frontend *fe,
diff --git a/drivers/media/tuners/tea5767.h b/drivers/media/tuners/tea5767.h
index d30ab1b483de..c39101199383 100644
--- a/drivers/media/tuners/tea5767.h
+++ b/drivers/media/tuners/tea5767.h
@@ -39,7 +39,7 @@ struct tea5767_ctrl {
enum tea5767_xtal xtal_freq;
};
-#if defined(CONFIG_MEDIA_TUNER_TEA5767) || (defined(CONFIG_MEDIA_TUNER_TEA5767_MODULE) && defined(MODULE))
+#if IS_ENABLED(CONFIG_MEDIA_TUNER_TEA5767)
extern int tea5767_autodetection(struct i2c_adapter* i2c_adap, u8 i2c_addr);
extern struct dvb_frontend *tea5767_attach(struct dvb_frontend *fe,
diff --git a/drivers/media/tuners/tuner-simple.h b/drivers/media/tuners/tuner-simple.h
index 381fa5d35a9b..ffd12cfe650b 100644
--- a/drivers/media/tuners/tuner-simple.h
+++ b/drivers/media/tuners/tuner-simple.h
@@ -20,7 +20,7 @@
#include <linux/i2c.h>
#include "dvb_frontend.h"
-#if defined(CONFIG_MEDIA_TUNER_SIMPLE) || (defined(CONFIG_MEDIA_TUNER_SIMPLE_MODULE) && defined(MODULE))
+#if IS_ENABLED(CONFIG_MEDIA_TUNER_SIMPLE)
extern struct dvb_frontend *simple_tuner_attach(struct dvb_frontend *fe,
struct i2c_adapter *i2c_adap,
u8 i2c_addr,
diff --git a/drivers/media/tuners/tuner-xc2028.c b/drivers/media/tuners/tuner-xc2028.c
index 7bcb6b0ff1df..09451737c77e 100644
--- a/drivers/media/tuners/tuner-xc2028.c
+++ b/drivers/media/tuners/tuner-xc2028.c
@@ -870,7 +870,7 @@ check_device:
}
read_not_reliable:
- memcpy(&priv->cur_fw, &new_fw, sizeof(priv->cur_fw));
+ priv->cur_fw = new_fw;
/*
* By setting BASE in cur_fw.type only after successfully loading all
diff --git a/drivers/media/tuners/tuner-xc2028.h b/drivers/media/tuners/tuner-xc2028.h
index 9ebfb2d0ff14..181d087faec4 100644
--- a/drivers/media/tuners/tuner-xc2028.h
+++ b/drivers/media/tuners/tuner-xc2028.h
@@ -56,7 +56,7 @@ struct xc2028_config {
#define XC2028_RESET_CLK 1
#define XC2028_I2C_FLUSH 2
-#if defined(CONFIG_MEDIA_TUNER_XC2028) || (defined(CONFIG_MEDIA_TUNER_XC2028_MODULE) && defined(MODULE))
+#if IS_ENABLED(CONFIG_MEDIA_TUNER_XC2028)
extern struct dvb_frontend *xc2028_attach(struct dvb_frontend *fe,
struct xc2028_config *cfg);
#else
diff --git a/drivers/media/tuners/xc4000.c b/drivers/media/tuners/xc4000.c
index 5c0fd787cc8f..2018befabb5a 100644
--- a/drivers/media/tuners/xc4000.c
+++ b/drivers/media/tuners/xc4000.c
@@ -1066,7 +1066,7 @@ check_device:
goto fail;
}
- memcpy(&priv->cur_fw, &new_fw, sizeof(priv->cur_fw));
+ priv->cur_fw = new_fw;
/*
* By setting BASE in cur_fw.type only after successfully loading all
diff --git a/drivers/media/tuners/xc4000.h b/drivers/media/tuners/xc4000.h
index e6a44d151cbd..97c23de5296c 100644
--- a/drivers/media/tuners/xc4000.h
+++ b/drivers/media/tuners/xc4000.h
@@ -50,7 +50,7 @@ struct xc4000_config {
* it's passed back to a bridge during tuner_callback().
*/
-#if defined(CONFIG_MEDIA_TUNER_XC4000) || (defined(CONFIG_MEDIA_TUNER_XC4000_MODULE) && defined(MODULE))
+#if IS_ENABLED(CONFIG_MEDIA_TUNER_XC4000)
extern struct dvb_frontend *xc4000_attach(struct dvb_frontend *fe,
struct i2c_adapter *i2c,
struct xc4000_config *cfg);
diff --git a/drivers/media/tuners/xc5000.c b/drivers/media/tuners/xc5000.c
index dc93cf338f36..d6be1b613c52 100644
--- a/drivers/media/tuners/xc5000.c
+++ b/drivers/media/tuners/xc5000.c
@@ -785,6 +785,7 @@ static int xc5000_set_params(struct dvb_frontend *fe)
return -EINVAL;
}
priv->rf_mode = XC_RF_MODE_AIR;
+ break;
case SYS_DVBC_ANNEX_A:
case SYS_DVBC_ANNEX_C:
dprintk(1, "%s() QAM modulation\n", __func__);
diff --git a/drivers/media/usb/Kconfig b/drivers/media/usb/Kconfig
index 6746994d03fe..0a7d520636a9 100644
--- a/drivers/media/usb/Kconfig
+++ b/drivers/media/usb/Kconfig
@@ -21,7 +21,6 @@ endif
if MEDIA_ANALOG_TV_SUPPORT
comment "Analog TV USB devices"
-source "drivers/media/usb/au0828/Kconfig"
source "drivers/media/usb/pvrusb2/Kconfig"
source "drivers/media/usb/hdpvr/Kconfig"
source "drivers/media/usb/tlg2300/Kconfig"
@@ -31,6 +30,7 @@ endif
if (MEDIA_ANALOG_TV_SUPPORT || MEDIA_DIGITAL_TV_SUPPORT)
comment "Analog/digital TV USB devices"
+source "drivers/media/usb/au0828/Kconfig"
source "drivers/media/usb/cx231xx/Kconfig"
source "drivers/media/usb/tm6000/Kconfig"
endif
diff --git a/drivers/media/usb/au0828/Kconfig b/drivers/media/usb/au0828/Kconfig
index 1766c0ce93be..953a37c613b1 100644
--- a/drivers/media/usb/au0828/Kconfig
+++ b/drivers/media/usb/au0828/Kconfig
@@ -1,17 +1,28 @@
config VIDEO_AU0828
tristate "Auvitek AU0828 support"
- depends on I2C && INPUT && DVB_CORE && USB && VIDEO_V4L2
+ depends on I2C && INPUT && DVB_CORE && USB
select I2C_ALGOBIT
select VIDEO_TVEEPROM
select VIDEOBUF_VMALLOC
select DVB_AU8522_DTV if MEDIA_SUBDRV_AUTOSELECT
- select DVB_AU8522_V4L if MEDIA_SUBDRV_AUTOSELECT
select MEDIA_TUNER_XC5000 if MEDIA_SUBDRV_AUTOSELECT
select MEDIA_TUNER_MXL5007T if MEDIA_SUBDRV_AUTOSELECT
select MEDIA_TUNER_TDA18271 if MEDIA_SUBDRV_AUTOSELECT
---help---
- This is a video4linux driver for Auvitek's USB device.
+ This is a hybrid analog/digital tv capture driver for
+ Auvitek's AU0828 USB device.
To compile this driver as a module, choose M here: the
module will be called au0828
+
+config VIDEO_AU0828_V4L2
+ bool "Auvitek AU0828 v4l2 analog video support"
+ depends on VIDEO_AU0828 && VIDEO_V4L2
+ select DVB_AU8522_V4L if MEDIA_SUBDRV_AUTOSELECT
+ default y
+ ---help---
+ This is a video4linux driver for Auvitek's USB device.
+
+ Choose Y here to include support for v4l2 analog video
+ capture within the au0828 driver.
diff --git a/drivers/media/usb/au0828/Makefile b/drivers/media/usb/au0828/Makefile
index 98cc20cc0ffb..be3bdf698022 100644
--- a/drivers/media/usb/au0828/Makefile
+++ b/drivers/media/usb/au0828/Makefile
@@ -1,4 +1,8 @@
-au0828-objs := au0828-core.o au0828-i2c.o au0828-cards.o au0828-dvb.o au0828-video.o au0828-vbi.o
+au0828-objs := au0828-core.o au0828-i2c.o au0828-cards.o au0828-dvb.o
+
+ifeq ($(CONFIG_VIDEO_AU0828_V4L2),y)
+ au0828-objs += au0828-video.o au0828-vbi.o
+endif
obj-$(CONFIG_VIDEO_AU0828) += au0828.o
diff --git a/drivers/media/usb/au0828/au0828-cards.c b/drivers/media/usb/au0828/au0828-cards.c
index 0cb7c28dcb17..dd32decb237d 100644
--- a/drivers/media/usb/au0828/au0828-cards.c
+++ b/drivers/media/usb/au0828/au0828-cards.c
@@ -169,7 +169,9 @@ static void hauppauge_eeprom(struct au0828_dev *dev, u8 *eeprom_data)
case 72231: /* WinTV-HVR950q (OEM, IR, ATSC/QAM and analog video */
case 72241: /* WinTV-HVR950q (OEM, No IR, ATSC/QAM and analog video */
case 72251: /* WinTV-HVR950q (Retail, IR, ATSC/QAM and analog video */
- case 72261: /* WinTV-HVR950q (OEM, IR, ATSC/QAM and analog video */
+ case 72261: /* WinTV-HVR950q (OEM, No IR, ATSC/QAM and analog video */
+ case 72271: /* WinTV-HVR950q (OEM, No IR, ATSC/QAM and analog video */
+ case 72281: /* WinTV-HVR950q (OEM, No IR, ATSC/QAM and analog video */
case 72301: /* WinTV-HVR850 (Retail, IR, ATSC and analog video */
case 72500: /* WinTV-HVR950q (OEM, No IR, ATSC/QAM */
break;
@@ -183,16 +185,15 @@ static void hauppauge_eeprom(struct au0828_dev *dev, u8 *eeprom_data)
__func__, tv.model);
}
+void au0828_card_analog_fe_setup(struct au0828_dev *dev);
+
void au0828_card_setup(struct au0828_dev *dev)
{
static u8 eeprom[256];
- struct tuner_setup tun_setup;
- struct v4l2_subdev *sd;
- unsigned int mode_mask = T_ANALOG_TV;
dprintk(1, "%s()\n", __func__);
- memcpy(&dev->board, &au0828_boards[dev->boardnr], sizeof(dev->board));
+ dev->board = au0828_boards[dev->boardnr];
if (dev->i2c_rc == 0) {
dev->i2c_client.addr = 0xa0 >> 1;
@@ -209,6 +210,16 @@ void au0828_card_setup(struct au0828_dev *dev)
break;
}
+ au0828_card_analog_fe_setup(dev);
+}
+
+void au0828_card_analog_fe_setup(struct au0828_dev *dev)
+{
+#ifdef CONFIG_VIDEO_AU0828_V4L2
+ struct tuner_setup tun_setup;
+ struct v4l2_subdev *sd;
+ unsigned int mode_mask = T_ANALOG_TV;
+
if (AUVI_INPUT(0).type != AU0828_VMUX_UNDEFINED) {
/* Load the analog demodulator driver (note this would need to
be abstracted out if we ever need to support a different
@@ -234,6 +245,7 @@ void au0828_card_setup(struct au0828_dev *dev)
v4l2_device_call_all(&dev->v4l2_dev, 0, tuner, s_type_addr,
&tun_setup);
}
+#endif
}
/*
@@ -333,6 +345,8 @@ struct usb_device_id au0828_usb_id_table[] = {
.driver_info = AU0828_BOARD_HAUPPAUGE_HVR950Q },
{ USB_DEVICE(0x2040, 0x7213),
.driver_info = AU0828_BOARD_HAUPPAUGE_HVR950Q },
+ { USB_DEVICE(0x2040, 0x7270),
+ .driver_info = AU0828_BOARD_HAUPPAUGE_HVR950Q },
{ },
};
diff --git a/drivers/media/usb/au0828/au0828-core.c b/drivers/media/usb/au0828/au0828-core.c
index 745a80a798c8..1e6f40ef1c6b 100644
--- a/drivers/media/usb/au0828/au0828-core.c
+++ b/drivers/media/usb/au0828/au0828-core.c
@@ -134,13 +134,17 @@ static void au0828_usb_disconnect(struct usb_interface *interface)
/* Digital TV */
au0828_dvb_unregister(dev);
+#ifdef CONFIG_VIDEO_AU0828_V4L2
if (AUVI_INPUT(0).type != AU0828_VMUX_UNDEFINED)
au0828_analog_unregister(dev);
+#endif
/* I2C */
au0828_i2c_unregister(dev);
+#ifdef CONFIG_VIDEO_AU0828_V4L2
v4l2_device_unregister(&dev->v4l2_dev);
+#endif
usb_set_intfdata(interface, NULL);
@@ -155,7 +159,10 @@ static void au0828_usb_disconnect(struct usb_interface *interface)
static int au0828_usb_probe(struct usb_interface *interface,
const struct usb_device_id *id)
{
- int ifnum, retval;
+ int ifnum;
+#ifdef CONFIG_VIDEO_AU0828_V4L2
+ int retval;
+#endif
struct au0828_dev *dev;
struct usb_device *usbdev = interface_to_usbdev(interface);
@@ -194,6 +201,7 @@ static int au0828_usb_probe(struct usb_interface *interface,
dev->usbdev = usbdev;
dev->boardnr = id->driver_info;
+#ifdef CONFIG_VIDEO_AU0828_V4L2
/* Create the v4l2_device */
retval = v4l2_device_register(&interface->dev, &dev->v4l2_dev);
if (retval) {
@@ -203,6 +211,7 @@ static int au0828_usb_probe(struct usb_interface *interface,
kfree(dev);
return -EIO;
}
+#endif
/* Power Up the bridge */
au0828_write(dev, REG_600, 1 << 4);
@@ -216,9 +225,11 @@ static int au0828_usb_probe(struct usb_interface *interface,
/* Setup */
au0828_card_setup(dev);
+#ifdef CONFIG_VIDEO_AU0828_V4L2
/* Analog TV */
if (AUVI_INPUT(0).type != AU0828_VMUX_UNDEFINED)
au0828_analog_register(dev, interface);
+#endif
/* Digital TV */
au0828_dvb_register(dev);
diff --git a/drivers/media/usb/au0828/au0828-i2c.c b/drivers/media/usb/au0828/au0828-i2c.c
index 4ded17fe1957..17ec3651b10e 100644
--- a/drivers/media/usb/au0828/au0828-i2c.c
+++ b/drivers/media/usb/au0828/au0828-i2c.c
@@ -364,12 +364,9 @@ int au0828_i2c_register(struct au0828_dev *dev)
{
dprintk(1, "%s()\n", __func__);
- memcpy(&dev->i2c_adap, &au0828_i2c_adap_template,
- sizeof(dev->i2c_adap));
- memcpy(&dev->i2c_algo, &au0828_i2c_algo_template,
- sizeof(dev->i2c_algo));
- memcpy(&dev->i2c_client, &au0828_i2c_client_template,
- sizeof(dev->i2c_client));
+ dev->i2c_adap = au0828_i2c_adap_template;
+ dev->i2c_algo = au0828_i2c_algo_template;
+ dev->i2c_client = au0828_i2c_client_template;
dev->i2c_adap.dev.parent = &dev->usbdev->dev;
@@ -378,7 +375,11 @@ int au0828_i2c_register(struct au0828_dev *dev)
dev->i2c_adap.algo = &dev->i2c_algo;
dev->i2c_adap.algo_data = dev;
+#ifdef CONFIG_VIDEO_AU0828_V4L2
i2c_set_adapdata(&dev->i2c_adap, &dev->v4l2_dev);
+#else
+ i2c_set_adapdata(&dev->i2c_adap, dev);
+#endif
i2c_add_adapter(&dev->i2c_adap);
dev->i2c_client.adapter = &dev->i2c_adap;
diff --git a/drivers/media/usb/au0828/au0828-video.c b/drivers/media/usb/au0828/au0828-video.c
index 45387aab10c7..8b9e8268e911 100644
--- a/drivers/media/usb/au0828/au0828-video.c
+++ b/drivers/media/usb/au0828/au0828-video.c
@@ -304,7 +304,7 @@ static inline void buffer_filled(struct au0828_dev *dev,
buf->vb.state = VIDEOBUF_DONE;
buf->vb.field_count++;
- do_gettimeofday(&buf->vb.ts);
+ v4l2_get_timestamp(&buf->vb.ts);
dev->isoc_ctl.buf = NULL;
@@ -321,7 +321,7 @@ static inline void vbi_buffer_filled(struct au0828_dev *dev,
buf->vb.state = VIDEOBUF_DONE;
buf->vb.field_count++;
- do_gettimeofday(&buf->vb.ts);
+ v4l2_get_timestamp(&buf->vb.ts);
dev->isoc_ctl.vbi_buf = NULL;
diff --git a/drivers/media/usb/au0828/au0828.h b/drivers/media/usb/au0828/au0828.h
index 66a56ef7bbe4..e579ff69ca4a 100644
--- a/drivers/media/usb/au0828/au0828.h
+++ b/drivers/media/usb/au0828/au0828.h
@@ -199,8 +199,10 @@ struct au0828_dev {
struct au0828_dvb dvb;
struct work_struct restart_streaming;
+#ifdef CONFIG_VIDEO_AU0828_V4L2
/* Analog */
struct v4l2_device v4l2_dev;
+#endif
int users;
unsigned int resources; /* resources in use */
struct video_device *vdev;
diff --git a/drivers/media/usb/cpia2/cpia2_usb.c b/drivers/media/usb/cpia2/cpia2_usb.c
index 95b5d6e7cdc4..be1719283609 100644
--- a/drivers/media/usb/cpia2/cpia2_usb.c
+++ b/drivers/media/usb/cpia2/cpia2_usb.c
@@ -328,7 +328,7 @@ static void cpia2_usb_complete(struct urb *urb)
continue;
}
DBG("Start of frame pattern found\n");
- do_gettimeofday(&cam->workbuff->timestamp);
+ v4l2_get_timestamp(&cam->workbuff->timestamp);
cam->workbuff->seq = cam->frame_count++;
cam->workbuff->data[0] = 0xFF;
cam->workbuff->data[1] = 0xD8;
diff --git a/drivers/media/usb/cpia2/cpia2_v4l.c b/drivers/media/usb/cpia2/cpia2_v4l.c
index aeb9d2275725..d5d42b6e94be 100644
--- a/drivers/media/usb/cpia2/cpia2_v4l.c
+++ b/drivers/media/usb/cpia2/cpia2_v4l.c
@@ -825,6 +825,8 @@ static int cpia2_querybuf(struct file *file, void *fh, struct v4l2_buffer *buf)
else
buf->flags = 0;
+ buf->flags |= V4L2_BUF_FLAG_TIMESTAMP_MONOTONIC;
+
switch (cam->buffers[buf->index].status) {
case FRAME_EMPTY:
case FRAME_ERROR:
@@ -943,7 +945,8 @@ static int cpia2_dqbuf(struct file *file, void *fh, struct v4l2_buffer *buf)
buf->index = frame;
buf->bytesused = cam->buffers[buf->index].length;
- buf->flags = V4L2_BUF_FLAG_MAPPED | V4L2_BUF_FLAG_DONE;
+ buf->flags = V4L2_BUF_FLAG_MAPPED | V4L2_BUF_FLAG_DONE
+ | V4L2_BUF_FLAG_TIMESTAMP_MONOTONIC;
buf->field = V4L2_FIELD_NONE;
buf->timestamp = cam->buffers[buf->index].timestamp;
buf->sequence = cam->buffers[buf->index].seq;
diff --git a/drivers/media/usb/cx231xx/cx231xx-417.c b/drivers/media/usb/cx231xx/cx231xx-417.c
index b024e5197a75..28688dbcb609 100644
--- a/drivers/media/usb/cx231xx/cx231xx-417.c
+++ b/drivers/media/usb/cx231xx/cx231xx-417.c
@@ -1291,7 +1291,7 @@ static void buffer_copy(struct cx231xx *dev, char *data, int len, struct urb *ur
buf->vb.state = VIDEOBUF_DONE;
buf->vb.field_count++;
- do_gettimeofday(&buf->vb.ts);
+ v4l2_get_timestamp(&buf->vb.ts);
list_del(&buf->vb.queue);
wake_up(&buf->vb.done);
dma_q->mpeg_buffer_completed = 0;
@@ -1327,7 +1327,7 @@ static void buffer_filled(char *data, int len, struct urb *urb,
memcpy(vbuf, data, len);
buf->vb.state = VIDEOBUF_DONE;
buf->vb.field_count++;
- do_gettimeofday(&buf->vb.ts);
+ v4l2_get_timestamp(&buf->vb.ts);
list_del(&buf->vb.queue);
wake_up(&buf->vb.done);
diff --git a/drivers/media/usb/cx231xx/cx231xx-cards.c b/drivers/media/usb/cx231xx/cx231xx-cards.c
index bbed1e40eeda..8d529565f163 100644
--- a/drivers/media/usb/cx231xx/cx231xx-cards.c
+++ b/drivers/media/usb/cx231xx/cx231xx-cards.c
@@ -603,6 +603,33 @@ struct cx231xx_board cx231xx_boards[] = {
.gpio = NULL,
} },
},
+ [CX231XX_BOARD_ELGATO_VIDEO_CAPTURE_V2] = {
+ .name = "Elgato Video Capture V2",
+ .tuner_type = TUNER_ABSENT,
+ .decoder = CX231XX_AVDECODER,
+ .output_mode = OUT_MODE_VIP11,
+ .demod_xfer_mode = 0,
+ .ctl_pin_status_mask = 0xFFFFFFC4,
+ .agc_analog_digital_select_gpio = 0x0c,
+ .gpio_pin_status_mask = 0x4001000,
+ .norm = V4L2_STD_NTSC,
+ .no_alt_vanc = 1,
+ .external_av = 1,
+ .dont_use_port_3 = 1,
+ .input = {{
+ .type = CX231XX_VMUX_COMPOSITE1,
+ .vmux = CX231XX_VIN_2_1,
+ .amux = CX231XX_AMUX_LINE_IN,
+ .gpio = NULL,
+ }, {
+ .type = CX231XX_VMUX_SVIDEO,
+ .vmux = CX231XX_VIN_1_1 |
+ (CX231XX_VIN_1_2 << 8) |
+ CX25840_SVIDEO_ON,
+ .amux = CX231XX_AMUX_LINE_IN,
+ .gpio = NULL,
+ } },
+ },
};
const unsigned int cx231xx_bcount = ARRAY_SIZE(cx231xx_boards);
@@ -642,6 +669,8 @@ struct usb_device_id cx231xx_id_table[] = {
.driver_info = CX231XX_BOARD_KWORLD_UB430_USB_HYBRID},
{USB_DEVICE(0x1f4d, 0x0237),
.driver_info = CX231XX_BOARD_ICONBIT_U100},
+ {USB_DEVICE(0x0fd9, 0x0037),
+ .driver_info = CX231XX_BOARD_ELGATO_VIDEO_CAPTURE_V2},
{},
};
@@ -707,7 +736,7 @@ static void cx231xx_sleep_s5h1432(struct cx231xx *dev)
static inline void cx231xx_set_model(struct cx231xx *dev)
{
- memcpy(&dev->board, &cx231xx_boards[dev->model], sizeof(dev->board));
+ dev->board = cx231xx_boards[dev->model];
}
/* Since cx231xx_pre_card_setup() requires a proper dev->model,
diff --git a/drivers/media/usb/cx231xx/cx231xx-vbi.c b/drivers/media/usb/cx231xx/cx231xx-vbi.c
index ac7db52f404f..46e3892557c2 100644
--- a/drivers/media/usb/cx231xx/cx231xx-vbi.c
+++ b/drivers/media/usb/cx231xx/cx231xx-vbi.c
@@ -530,7 +530,7 @@ static inline void vbi_buffer_filled(struct cx231xx *dev,
buf->vb.state = VIDEOBUF_DONE;
buf->vb.field_count++;
- do_gettimeofday(&buf->vb.ts);
+ v4l2_get_timestamp(&buf->vb.ts);
dev->vbi_mode.bulk_ctl.buf = NULL;
diff --git a/drivers/media/usb/cx231xx/cx231xx-video.c b/drivers/media/usb/cx231xx/cx231xx-video.c
index fedf7852a355..06376d904c9f 100644
--- a/drivers/media/usb/cx231xx/cx231xx-video.c
+++ b/drivers/media/usb/cx231xx/cx231xx-video.c
@@ -235,7 +235,7 @@ static inline void buffer_filled(struct cx231xx *dev,
cx231xx_isocdbg("[%p/%d] wakeup\n", buf, buf->vb.i);
buf->vb.state = VIDEOBUF_DONE;
buf->vb.field_count++;
- do_gettimeofday(&buf->vb.ts);
+ v4l2_get_timestamp(&buf->vb.ts);
if (dev->USE_ISO)
dev->video_mode.isoc_ctl.buf = NULL;
@@ -1751,6 +1751,7 @@ static int vidioc_s_register(struct file *file, void *priv,
0x02,
(u16)reg->reg, 1,
value, 1, 2);
+ break;
case 0x322:
ret =
cx231xx_write_i2c_master(dev,
@@ -2627,8 +2628,7 @@ int cx231xx_register_analog_devices(struct cx231xx *dev)
dev->name, video_device_node_name(dev->vdev));
/* Initialize VBI template */
- memcpy(&cx231xx_vbi_template, &cx231xx_video_template,
- sizeof(cx231xx_vbi_template));
+ cx231xx_vbi_template = cx231xx_video_template;
strcpy(cx231xx_vbi_template.name, "cx231xx-vbi");
/* Allocate and fill vbi video_device struct */
diff --git a/drivers/media/usb/cx231xx/cx231xx.h b/drivers/media/usb/cx231xx/cx231xx.h
index a89d020de948..3e11462be0d0 100644
--- a/drivers/media/usb/cx231xx/cx231xx.h
+++ b/drivers/media/usb/cx231xx/cx231xx.h
@@ -68,6 +68,7 @@
#define CX231XX_BOARD_ICONBIT_U100 13
#define CX231XX_BOARD_HAUPPAUGE_USB2_FM_PAL 14
#define CX231XX_BOARD_HAUPPAUGE_USB2_FM_NTSC 15
+#define CX231XX_BOARD_ELGATO_VIDEO_CAPTURE_V2 16
/* Limits minimum and default number of buffers */
#define CX231XX_MIN_BUF 4
diff --git a/drivers/media/usb/dvb-usb-v2/Kconfig b/drivers/media/usb/dvb-usb-v2/Kconfig
index 7a622dbe9b6d..692224d97d06 100644
--- a/drivers/media/usb/dvb-usb-v2/Kconfig
+++ b/drivers/media/usb/dvb-usb-v2/Kconfig
@@ -1,6 +1,6 @@
config DVB_USB_V2
tristate "Support for various USB DVB devices v2"
- depends on DVB_CORE && USB && I2C && RC_CORE
+ depends on DVB_CORE && USB && I2C
help
By enabling this you will be able to choose the various supported
USB1.1 and USB2.0 DVB devices.
@@ -113,6 +113,7 @@ config DVB_USB_IT913X
config DVB_USB_LME2510
tristate "LME DM04/QQBOX DVB-S USB2.0 support"
depends on DVB_USB_V2
+ depends on RC_CORE
select DVB_TDA10086 if MEDIA_SUBDRV_AUTOSELECT
select DVB_TDA826X if MEDIA_SUBDRV_AUTOSELECT
select DVB_STV0288 if MEDIA_SUBDRV_AUTOSELECT
@@ -120,6 +121,7 @@ config DVB_USB_LME2510
select DVB_STV0299 if MEDIA_SUBDRV_AUTOSELECT
select DVB_PLL if MEDIA_SUBDRV_AUTOSELECT
select DVB_M88RS2000 if MEDIA_SUBDRV_AUTOSELECT
+ select DVB_TS2020 if MEDIA_SUBDRV_AUTOSELECT
help
Say Y here to support the LME DM04/QQBOX DVB-S USB2.0
diff --git a/drivers/media/usb/dvb-usb-v2/af9015.c b/drivers/media/usb/dvb-usb-v2/af9015.c
index 943d93423705..b86d0f27a398 100644
--- a/drivers/media/usb/dvb-usb-v2/af9015.c
+++ b/drivers/media/usb/dvb-usb-v2/af9015.c
@@ -1156,6 +1156,7 @@ error:
return ret;
}
+#if IS_ENABLED(CONFIG_RC_CORE)
struct af9015_rc_setup {
unsigned int id;
char *rc_codes;
@@ -1312,6 +1313,9 @@ static int af9015_get_rc_config(struct dvb_usb_device *d, struct dvb_usb_rc *rc)
return 0;
}
+#else
+ #define af9015_get_rc_config NULL
+#endif
/* interface 0 is used by DVB-T receiver and
interface 1 is for remote controller (HID) */
diff --git a/drivers/media/usb/dvb-usb-v2/af9035.c b/drivers/media/usb/dvb-usb-v2/af9035.c
index 61ae7f9d0b27..f11cc42454f0 100644
--- a/drivers/media/usb/dvb-usb-v2/af9035.c
+++ b/drivers/media/usb/dvb-usb-v2/af9035.c
@@ -209,10 +209,15 @@ static int af9035_i2c_master_xfer(struct i2c_adapter *adap,
if (msg[0].len > 40 || msg[1].len > 40) {
/* TODO: correct limits > 40 */
ret = -EOPNOTSUPP;
- } else if (msg[0].addr == state->af9033_config[0].i2c_addr) {
- /* integrated demod */
+ } else if ((msg[0].addr == state->af9033_config[0].i2c_addr) ||
+ (msg[0].addr == state->af9033_config[1].i2c_addr)) {
+ /* demod access via firmware interface */
u32 reg = msg[0].buf[0] << 16 | msg[0].buf[1] << 8 |
msg[0].buf[2];
+
+ if (msg[0].addr == state->af9033_config[1].i2c_addr)
+ reg |= 0x100000;
+
ret = af9035_rd_regs(d, reg, &msg[1].buf[0],
msg[1].len);
} else {
@@ -220,6 +225,7 @@ static int af9035_i2c_master_xfer(struct i2c_adapter *adap,
u8 buf[5 + msg[0].len];
struct usb_req req = { CMD_I2C_RD, 0, sizeof(buf),
buf, msg[1].len, msg[1].buf };
+ req.mbox |= ((msg[0].addr & 0x80) >> 3);
buf[0] = msg[1].len;
buf[1] = msg[0].addr << 1;
buf[2] = 0x00; /* reg addr len */
@@ -232,10 +238,15 @@ static int af9035_i2c_master_xfer(struct i2c_adapter *adap,
if (msg[0].len > 40) {
/* TODO: correct limits > 40 */
ret = -EOPNOTSUPP;
- } else if (msg[0].addr == state->af9033_config[0].i2c_addr) {
- /* integrated demod */
+ } else if ((msg[0].addr == state->af9033_config[0].i2c_addr) ||
+ (msg[0].addr == state->af9033_config[1].i2c_addr)) {
+ /* demod access via firmware interface */
u32 reg = msg[0].buf[0] << 16 | msg[0].buf[1] << 8 |
msg[0].buf[2];
+
+ if (msg[0].addr == state->af9033_config[1].i2c_addr)
+ reg |= 0x100000;
+
ret = af9035_wr_regs(d, reg, &msg[0].buf[3],
msg[0].len - 3);
} else {
@@ -243,6 +254,7 @@ static int af9035_i2c_master_xfer(struct i2c_adapter *adap,
u8 buf[5 + msg[0].len];
struct usb_req req = { CMD_I2C_WR, 0, sizeof(buf), buf,
0, NULL };
+ req.mbox |= ((msg[0].addr & 0x80) >> 3);
buf[0] = msg[0].len;
buf[1] = msg[0].addr << 1;
buf[2] = 0x00; /* reg addr len */
@@ -313,12 +325,57 @@ static int af9035_download_firmware(struct dvb_usb_device *d,
struct usb_req req = { 0, 0, 0, NULL, 0, NULL };
struct usb_req req_fw_dl = { CMD_FW_DL, 0, 0, wbuf, 0, NULL };
struct usb_req req_fw_ver = { CMD_FW_QUERYINFO, 0, 1, wbuf, 4, rbuf } ;
- u8 hdr_core;
+ u8 hdr_core, tmp;
u16 hdr_addr, hdr_data_len, hdr_checksum;
#define MAX_DATA 58
#define HDR_SIZE 7
/*
+ * In case of dual tuner configuration we need to do some extra
+ * initialization in order to download firmware to slave demod too,
+ * which is done by master demod.
+ * Master feeds also clock and controls power via GPIO.
+ */
+ ret = af9035_rd_reg(d, EEPROM_DUAL_MODE, &tmp);
+ if (ret < 0)
+ goto err;
+
+ if (tmp) {
+ /* configure gpioh1, reset & power slave demod */
+ ret = af9035_wr_reg_mask(d, 0x00d8b0, 0x01, 0x01);
+ if (ret < 0)
+ goto err;
+
+ ret = af9035_wr_reg_mask(d, 0x00d8b1, 0x01, 0x01);
+ if (ret < 0)
+ goto err;
+
+ ret = af9035_wr_reg_mask(d, 0x00d8af, 0x00, 0x01);
+ if (ret < 0)
+ goto err;
+
+ usleep_range(10000, 50000);
+
+ ret = af9035_wr_reg_mask(d, 0x00d8af, 0x01, 0x01);
+ if (ret < 0)
+ goto err;
+
+ /* tell the slave I2C address */
+ ret = af9035_rd_reg(d, EEPROM_2ND_DEMOD_ADDR, &tmp);
+ if (ret < 0)
+ goto err;
+
+ ret = af9035_wr_reg(d, 0x00417f, tmp);
+ if (ret < 0)
+ goto err;
+
+ /* enable clock out */
+ ret = af9035_wr_reg_mask(d, 0x00d81a, 0x01, 0x01);
+ if (ret < 0)
+ goto err;
+ }
+
+ /*
* Thanks to Daniel Glöckner <daniel-gl@gmx.net> about that info!
*
* byte 0: MCS 51 core
@@ -380,6 +437,10 @@ static int af9035_download_firmware(struct dvb_usb_device *d,
__func__, fw->size - i);
}
+ /* print warn if firmware is bad, continue and see what happens */
+ if (i)
+ dev_warn(&d->udev->dev, "%s: bad firmware\n", KBUILD_MODNAME);
+
/* firmware loaded, request boot */
req.cmd = CMD_FW_BOOT;
ret = af9035_ctrl_msg(d, &req);
@@ -489,14 +550,28 @@ static int af9035_read_config(struct dvb_usb_device *d)
u8 tmp;
u16 tmp16;
+ /* demod I2C "address" */
+ state->af9033_config[0].i2c_addr = 0x38;
+
/* check if there is dual tuners */
ret = af9035_rd_reg(d, EEPROM_DUAL_MODE, &tmp);
if (ret < 0)
goto err;
state->dual_mode = tmp;
- dev_dbg(&d->udev->dev, "%s: dual mode=%d\n",
- __func__, state->dual_mode);
+ dev_dbg(&d->udev->dev, "%s: dual mode=%d\n", __func__,
+ state->dual_mode);
+
+ if (state->dual_mode) {
+ /* read 2nd demodulator I2C address */
+ ret = af9035_rd_reg(d, EEPROM_2ND_DEMOD_ADDR, &tmp);
+ if (ret < 0)
+ goto err;
+
+ state->af9033_config[1].i2c_addr = tmp;
+ dev_dbg(&d->udev->dev, "%s: 2nd demod I2C addr=%02x\n",
+ __func__, tmp);
+ }
for (i = 0; i < state->dual_mode + 1; i++) {
/* tuner */
@@ -514,6 +589,7 @@ static int af9035_read_config(struct dvb_usb_device *d)
case AF9033_TUNER_MXL5007T:
case AF9033_TUNER_TDA18218:
case AF9033_TUNER_FC2580:
+ case AF9033_TUNER_FC0012:
state->af9033_config[i].spec_inv = 1;
break;
default:
@@ -522,6 +598,18 @@ static int af9035_read_config(struct dvb_usb_device *d)
KBUILD_MODNAME, tmp);
}
+ /* disable dual mode if driver does not support it */
+ if (i == 1)
+ switch (tmp) {
+ case AF9033_TUNER_FC0012:
+ break;
+ default:
+ state->dual_mode = false;
+ dev_info(&d->udev->dev, "%s: driver does not " \
+ "support 2nd tuner and will " \
+ "disable it", KBUILD_MODNAME);
+ }
+
/* tuner IF frequency */
ret = af9035_rd_reg(d, EEPROM_1_IFFREQ_L + eeprom_shift, &tmp);
if (ret < 0)
@@ -730,6 +818,12 @@ static int af9035_frontend_callback(void *adapter_priv, int component,
return 0;
}
+static int af9035_get_adapter_count(struct dvb_usb_device *d)
+{
+ struct state *state = d_to_priv(d);
+ return state->dual_mode + 1;
+}
+
static int af9035_frontend_attach(struct dvb_usb_adapter *adap)
{
struct state *state = adap_to_priv(adap);
@@ -751,15 +845,14 @@ static int af9035_frontend_attach(struct dvb_usb_adapter *adap)
if (ret < 0)
goto err;
- ret = af9035_wr_reg(d, 0x00d81a,
- state->dual_mode);
+ ret = af9035_wr_reg(d, 0x00d81a, state->dual_mode);
if (ret < 0)
goto err;
}
/* attach demodulator */
- adap->fe[0] = dvb_attach(af9033_attach,
- &state->af9033_config[adap->id], &d->i2c_adap);
+ adap->fe[0] = dvb_attach(af9033_attach, &state->af9033_config[adap->id],
+ &d->i2c_adap);
if (adap->fe[0] == NULL) {
ret = -ENODEV;
goto err;
@@ -785,13 +878,22 @@ static const struct fc0011_config af9035_fc0011_config = {
.i2c_address = 0x60,
};
-static struct mxl5007t_config af9035_mxl5007t_config = {
- .xtal_freq_hz = MxL_XTAL_24_MHZ,
- .if_freq_hz = MxL_IF_4_57_MHZ,
- .invert_if = 0,
- .loop_thru_enable = 0,
- .clk_out_enable = 0,
- .clk_out_amp = MxL_CLKOUT_AMP_0_94V,
+static struct mxl5007t_config af9035_mxl5007t_config[] = {
+ {
+ .xtal_freq_hz = MxL_XTAL_24_MHZ,
+ .if_freq_hz = MxL_IF_4_57_MHZ,
+ .invert_if = 0,
+ .loop_thru_enable = 0,
+ .clk_out_enable = 0,
+ .clk_out_amp = MxL_CLKOUT_AMP_0_94V,
+ }, {
+ .xtal_freq_hz = MxL_XTAL_24_MHZ,
+ .if_freq_hz = MxL_IF_4_57_MHZ,
+ .invert_if = 0,
+ .loop_thru_enable = 1,
+ .clk_out_enable = 1,
+ .clk_out_amp = MxL_CLKOUT_AMP_0_94V,
+ }
};
static struct tda18218_config af9035_tda18218_config = {
@@ -804,12 +906,32 @@ static const struct fc2580_config af9035_fc2580_config = {
.clock = 16384000,
};
+static const struct fc0012_config af9035_fc0012_config[] = {
+ {
+ .i2c_address = 0x63,
+ .xtal_freq = FC_XTAL_36_MHZ,
+ .dual_master = true,
+ .loop_through = true,
+ .clock_out = true,
+ }, {
+ .i2c_address = 0x63 | 0x80, /* I2C bus select hack */
+ .xtal_freq = FC_XTAL_36_MHZ,
+ .dual_master = true,
+ }
+};
+
static int af9035_tuner_attach(struct dvb_usb_adapter *adap)
{
struct state *state = adap_to_priv(adap);
struct dvb_usb_device *d = adap_to_d(adap);
int ret;
struct dvb_frontend *fe;
+ struct i2c_msg msg[1];
+ u8 tuner_addr;
+ /*
+ * XXX: Hack used in that function: we abuse unused I2C address bit [7]
+ * to carry info about used I2C bus for dual tuner configuration.
+ */
switch (state->af9033_config[adap->id].tuner) {
case AF9033_TUNER_TUA9001:
@@ -842,46 +964,59 @@ static int af9035_tuner_attach(struct dvb_usb_adapter *adap)
&d->i2c_adap, &af9035_fc0011_config);
break;
case AF9033_TUNER_MXL5007T:
- ret = af9035_wr_reg(d, 0x00d8e0, 1);
- if (ret < 0)
- goto err;
- ret = af9035_wr_reg(d, 0x00d8e1, 1);
- if (ret < 0)
- goto err;
- ret = af9035_wr_reg(d, 0x00d8df, 0);
- if (ret < 0)
- goto err;
+ if (adap->id == 0) {
+ ret = af9035_wr_reg(d, 0x00d8e0, 1);
+ if (ret < 0)
+ goto err;
- msleep(30);
+ ret = af9035_wr_reg(d, 0x00d8e1, 1);
+ if (ret < 0)
+ goto err;
- ret = af9035_wr_reg(d, 0x00d8df, 1);
- if (ret < 0)
- goto err;
+ ret = af9035_wr_reg(d, 0x00d8df, 0);
+ if (ret < 0)
+ goto err;
- msleep(300);
+ msleep(30);
- ret = af9035_wr_reg(d, 0x00d8c0, 1);
- if (ret < 0)
- goto err;
- ret = af9035_wr_reg(d, 0x00d8c1, 1);
- if (ret < 0)
- goto err;
- ret = af9035_wr_reg(d, 0x00d8bf, 0);
- if (ret < 0)
- goto err;
- ret = af9035_wr_reg(d, 0x00d8b4, 1);
- if (ret < 0)
- goto err;
- ret = af9035_wr_reg(d, 0x00d8b5, 1);
- if (ret < 0)
- goto err;
- ret = af9035_wr_reg(d, 0x00d8b3, 1);
- if (ret < 0)
- goto err;
+ ret = af9035_wr_reg(d, 0x00d8df, 1);
+ if (ret < 0)
+ goto err;
+
+ msleep(300);
+
+ ret = af9035_wr_reg(d, 0x00d8c0, 1);
+ if (ret < 0)
+ goto err;
+
+ ret = af9035_wr_reg(d, 0x00d8c1, 1);
+ if (ret < 0)
+ goto err;
+
+ ret = af9035_wr_reg(d, 0x00d8bf, 0);
+ if (ret < 0)
+ goto err;
+
+ ret = af9035_wr_reg(d, 0x00d8b4, 1);
+ if (ret < 0)
+ goto err;
+
+ ret = af9035_wr_reg(d, 0x00d8b5, 1);
+ if (ret < 0)
+ goto err;
+
+ ret = af9035_wr_reg(d, 0x00d8b3, 1);
+ if (ret < 0)
+ goto err;
+
+ tuner_addr = 0x60;
+ } else {
+ tuner_addr = 0x60 | 0x80; /* I2C bus hack */
+ }
/* attach tuner */
- fe = dvb_attach(mxl5007t_attach, adap->fe[0],
- &d->i2c_adap, 0x60, &af9035_mxl5007t_config);
+ fe = dvb_attach(mxl5007t_attach, adap->fe[0], &d->i2c_adap,
+ tuner_addr, &af9035_mxl5007t_config[adap->id]);
break;
case AF9033_TUNER_TDA18218:
/* attach tuner */
@@ -907,6 +1042,46 @@ static int af9035_tuner_attach(struct dvb_usb_adapter *adap)
fe = dvb_attach(fc2580_attach, adap->fe[0],
&d->i2c_adap, &af9035_fc2580_config);
break;
+ case AF9033_TUNER_FC0012:
+ /*
+ * AF9035 gpiot2 = FC0012 enable
+ * XXX: there seems to be something on gpioh8 too, but on my
+ * my test I didn't find any difference.
+ */
+
+ if (adap->id == 0) {
+ /* configure gpiot2 as output and high */
+ ret = af9035_wr_reg_mask(d, 0xd8eb, 0x01, 0x01);
+ if (ret < 0)
+ goto err;
+
+ ret = af9035_wr_reg_mask(d, 0xd8ec, 0x01, 0x01);
+ if (ret < 0)
+ goto err;
+
+ ret = af9035_wr_reg_mask(d, 0xd8ed, 0x01, 0x01);
+ if (ret < 0)
+ goto err;
+ } else {
+ /*
+ * FIXME: That belongs for the FC0012 driver.
+ * Write 02 to FC0012 master tuner register 0d directly
+ * in order to make slave tuner working.
+ */
+ msg[0].addr = 0x63;
+ msg[0].flags = 0;
+ msg[0].len = 2;
+ msg[0].buf = "\x0d\x02";
+ ret = i2c_transfer(&d->i2c_adap, msg, 1);
+ if (ret < 0)
+ goto err;
+ }
+
+ usleep_range(10000, 50000);
+
+ fe = dvb_attach(fc0012_attach, adap->fe[0], &d->i2c_adap,
+ &af9035_fc0012_config[adap->id]);
+ break;
default:
fe = NULL;
}
@@ -945,8 +1120,8 @@ static int af9035_init(struct dvb_usb_device *d)
{ 0x00dd8a, (frame_size >> 0) & 0xff, 0xff},
{ 0x00dd8b, (frame_size >> 8) & 0xff, 0xff},
{ 0x00dd0d, packet_size, 0xff },
- { 0x80f9a3, 0x00, 0x01 },
- { 0x80f9cd, 0x00, 0x01 },
+ { 0x80f9a3, state->dual_mode, 0x01 },
+ { 0x80f9cd, state->dual_mode, 0x01 },
{ 0x80f99d, 0x00, 0x01 },
{ 0x80f9a4, 0x00, 0x01 },
};
@@ -971,6 +1146,7 @@ err:
return ret;
}
+#if IS_ENABLED(CONFIG_RC_CORE)
static int af9035_rc_query(struct dvb_usb_device *d)
{
unsigned int key;
@@ -1045,6 +1221,9 @@ err:
return ret;
}
+#else
+ #define af9035_get_rc_config NULL
+#endif
/* interface 0 is used by DVB-T receiver and
interface 1 is for remote controller (HID) */
@@ -1068,7 +1247,7 @@ static const struct dvb_usb_device_properties af9035_props = {
.init = af9035_init,
.get_rc_config = af9035_get_rc_config,
- .num_adapters = 1,
+ .get_adapter_count = af9035_get_adapter_count,
.adapter = {
{
.stream = DVB_USB_STREAM_BULK(0x84, 6, 87 * 188),
diff --git a/drivers/media/usb/dvb-usb-v2/af9035.h b/drivers/media/usb/dvb-usb-v2/af9035.h
index 75ef1ec13fbf..29f3eec22c2c 100644
--- a/drivers/media/usb/dvb-usb-v2/af9035.h
+++ b/drivers/media/usb/dvb-usb-v2/af9035.h
@@ -26,6 +26,7 @@
#include "af9033.h"
#include "tua9001.h"
#include "fc0011.h"
+#include "fc0012.h"
#include "mxl5007t.h"
#include "tda18218.h"
#include "fc2580.h"
@@ -53,7 +54,6 @@ struct usb_req {
struct state {
u8 seq; /* packet sequence number */
bool dual_mode;
-
struct af9033_config af9033_config[2];
};
@@ -91,6 +91,7 @@ u32 clock_lut_it9135[] = {
/* EEPROM locations */
#define EEPROM_IR_MODE 0x430d
#define EEPROM_DUAL_MODE 0x4326
+#define EEPROM_2ND_DEMOD_ADDR 0x4327
#define EEPROM_IR_TYPE 0x4329
#define EEPROM_1_IFFREQ_L 0x432d
#define EEPROM_1_IFFREQ_H 0x432e
diff --git a/drivers/media/usb/dvb-usb-v2/anysee.c b/drivers/media/usb/dvb-usb-v2/anysee.c
index d05c5b563dac..a20d691d0b63 100644
--- a/drivers/media/usb/dvb-usb-v2/anysee.c
+++ b/drivers/media/usb/dvb-usb-v2/anysee.c
@@ -1019,6 +1019,7 @@ static int anysee_tuner_attach(struct dvb_usb_adapter *adap)
return ret;
}
+#if IS_ENABLED(CONFIG_RC_CORE)
static int anysee_rc_query(struct dvb_usb_device *d)
{
u8 buf[] = {CMD_GET_IR_CODE};
@@ -1054,6 +1055,9 @@ static int anysee_get_rc_config(struct dvb_usb_device *d, struct dvb_usb_rc *rc)
return 0;
}
+#else
+ #define anysee_get_rc_config NULL
+#endif
static int anysee_ci_read_attribute_mem(struct dvb_ca_en50221 *ci, int slot,
int addr)
diff --git a/drivers/media/usb/dvb-usb-v2/az6007.c b/drivers/media/usb/dvb-usb-v2/az6007.c
index d75dbf27e99e..70ec80d8be71 100644
--- a/drivers/media/usb/dvb-usb-v2/az6007.c
+++ b/drivers/media/usb/dvb-usb-v2/az6007.c
@@ -189,6 +189,7 @@ static int az6007_streaming_ctrl(struct dvb_frontend *fe, int onoff)
return az6007_write(d, 0xbc, onoff, 0, NULL, 0);
}
+#if IS_ENABLED(CONFIG_RC_CORE)
/* remote control stuff (does not work with my box) */
static int az6007_rc_query(struct dvb_usb_device *d)
{
@@ -215,6 +216,20 @@ static int az6007_rc_query(struct dvb_usb_device *d)
return 0;
}
+static int az6007_get_rc_config(struct dvb_usb_device *d, struct dvb_usb_rc *rc)
+{
+ pr_debug("Getting az6007 Remote Control properties\n");
+
+ rc->allowed_protos = RC_BIT_NEC;
+ rc->query = az6007_rc_query;
+ rc->interval = 400;
+
+ return 0;
+}
+#else
+ #define az6007_get_rc_config NULL
+#endif
+
static int az6007_ci_read_attribute_mem(struct dvb_ca_en50221 *ca,
int slot,
int address)
@@ -822,17 +837,6 @@ static void az6007_usb_disconnect(struct usb_interface *intf)
dvb_usbv2_disconnect(intf);
}
-static int az6007_get_rc_config(struct dvb_usb_device *d, struct dvb_usb_rc *rc)
-{
- pr_debug("Getting az6007 Remote Control properties\n");
-
- rc->allowed_protos = RC_BIT_NEC;
- rc->query = az6007_rc_query;
- rc->interval = 400;
-
- return 0;
-}
-
static int az6007_download_firmware(struct dvb_usb_device *d,
const struct firmware *fw)
{
diff --git a/drivers/media/usb/dvb-usb-v2/dvb_usb.h b/drivers/media/usb/dvb-usb-v2/dvb_usb.h
index 059291b892b8..3cac8bd0b116 100644
--- a/drivers/media/usb/dvb-usb-v2/dvb_usb.h
+++ b/drivers/media/usb/dvb-usb-v2/dvb_usb.h
@@ -347,6 +347,7 @@ struct dvb_usb_adapter {
* @props: device properties
* @name: device name
* @rc_map: name of rc codes table
+ * @rc_polling_active: set when RC polling is active
* @udev: pointer to the device's struct usb_device
* @intf: pointer to the device's usb interface
* @rc: remote controller configuration
@@ -364,7 +365,7 @@ struct dvb_usb_device {
const struct dvb_usb_device_properties *props;
const char *name;
const char *rc_map;
-
+ bool rc_polling_active;
struct usb_device *udev;
struct usb_interface *intf;
struct dvb_usb_rc rc;
diff --git a/drivers/media/usb/dvb-usb-v2/dvb_usb_core.c b/drivers/media/usb/dvb-usb-v2/dvb_usb_core.c
index 671b4fa232b4..086792055912 100644
--- a/drivers/media/usb/dvb-usb-v2/dvb_usb_core.c
+++ b/drivers/media/usb/dvb-usb-v2/dvb_usb_core.c
@@ -102,6 +102,7 @@ static int dvb_usbv2_i2c_exit(struct dvb_usb_device *d)
return 0;
}
+#if IS_ENABLED(CONFIG_RC_CORE)
static void dvb_usb_read_remote_control(struct work_struct *work)
{
struct dvb_usb_device *d = container_of(work,
@@ -112,13 +113,16 @@ static void dvb_usb_read_remote_control(struct work_struct *work)
* When the parameter has been set to 1 via sysfs while the
* driver was running, or when bulk mode is enabled after IR init.
*/
- if (dvb_usbv2_disable_rc_polling || d->rc.bulk_mode)
+ if (dvb_usbv2_disable_rc_polling || d->rc.bulk_mode) {
+ d->rc_polling_active = false;
return;
+ }
ret = d->rc.query(d);
if (ret < 0) {
dev_err(&d->udev->dev, "%s: rc.query() failed=%d\n",
KBUILD_MODNAME, ret);
+ d->rc_polling_active = false;
return; /* stop polling */
}
@@ -182,6 +186,7 @@ static int dvb_usbv2_remote_init(struct dvb_usb_device *d)
d->rc.interval);
schedule_delayed_work(&d->rc_query_work,
msecs_to_jiffies(d->rc.interval));
+ d->rc_polling_active = true;
}
return 0;
@@ -202,6 +207,10 @@ static int dvb_usbv2_remote_exit(struct dvb_usb_device *d)
return 0;
}
+#else
+ #define dvb_usbv2_remote_init(args...) 0
+ #define dvb_usbv2_remote_exit(args...)
+#endif
static void dvb_usb_data_complete(struct usb_data_stream *stream, u8 *buf,
size_t len)
@@ -959,7 +968,7 @@ int dvb_usbv2_suspend(struct usb_interface *intf, pm_message_t msg)
dev_dbg(&d->udev->dev, "%s:\n", __func__);
/* stop remote controller poll */
- if (d->rc.query && !d->rc.bulk_mode)
+ if (d->rc_polling_active)
cancel_delayed_work_sync(&d->rc_query_work);
for (i = MAX_NO_OF_ADAPTER_PER_DEVICE - 1; i >= 0; i--) {
@@ -1006,7 +1015,7 @@ static int dvb_usbv2_resume_common(struct dvb_usb_device *d)
}
/* start remote controller poll */
- if (d->rc.query && !d->rc.bulk_mode)
+ if (d->rc_polling_active)
schedule_delayed_work(&d->rc_query_work,
msecs_to_jiffies(d->rc.interval));
diff --git a/drivers/media/usb/dvb-usb-v2/it913x.c b/drivers/media/usb/dvb-usb-v2/it913x.c
index 47204280b8b3..833847995c65 100644
--- a/drivers/media/usb/dvb-usb-v2/it913x.c
+++ b/drivers/media/usb/dvb-usb-v2/it913x.c
@@ -308,7 +308,7 @@ static struct i2c_algorithm it913x_i2c_algo = {
};
/* Callbacks for DVB USB */
-#define IT913X_POLL 250
+#if IS_ENABLED(CONFIG_RC_CORE)
static int it913x_rc_query(struct dvb_usb_device *d)
{
u8 ibuf[4];
@@ -334,6 +334,25 @@ static int it913x_rc_query(struct dvb_usb_device *d)
return ret;
}
+static int it913x_get_rc_config(struct dvb_usb_device *d, struct dvb_usb_rc *rc)
+{
+ struct it913x_state *st = d->priv;
+
+ if (st->proprietary_ir == false) {
+ rc->map_name = NULL;
+ return 0;
+ }
+
+ rc->allowed_protos = RC_BIT_NEC;
+ rc->query = it913x_rc_query;
+ rc->interval = 250;
+
+ return 0;
+}
+#else
+ #define it913x_get_rc_config NULL
+#endif
+
/* Firmware sets raw */
static const char fw_it9135_v1[] = FW_IT9135_V1;
static const char fw_it9135_v2[] = FW_IT9135_V2;
@@ -643,7 +662,8 @@ static int it913x_frontend_attach(struct dvb_usb_adapter *adap)
struct it913x_state *st = d->priv;
int ret = 0;
u8 adap_addr = I2C_BASE_ADDR + (adap->id << 5);
- u16 ep_size = adap->stream.buf_size / 4;
+ u16 ep_size = (adap->pid_filtering) ? TS_BUFFER_SIZE_PID / 4 :
+ TS_BUFFER_SIZE_MAX / 4;
u8 pkt_size = 0x80;
if (d->udev->speed != USB_SPEED_HIGH)
@@ -695,22 +715,6 @@ static int it913x_frontend_attach(struct dvb_usb_adapter *adap)
}
/* DVB USB Driver */
-static int it913x_get_rc_config(struct dvb_usb_device *d, struct dvb_usb_rc *rc)
-{
- struct it913x_state *st = d->priv;
-
- if (st->proprietary_ir == false) {
- rc->map_name = NULL;
- return 0;
- }
-
- rc->allowed_protos = RC_BIT_NEC;
- rc->query = it913x_rc_query;
- rc->interval = 250;
-
- return 0;
-}
-
static int it913x_get_adapter_count(struct dvb_usb_device *d)
{
struct it913x_state *st = d->priv;
@@ -779,6 +783,18 @@ static const struct usb_device_id it913x_id_table[] = {
{ DVB_USB_DEVICE(USB_VID_ITETECH, USB_PID_ITETECH_IT9135_9006,
&it913x_properties, "ITE 9135(9006) Generic",
RC_MAP_IT913X_V1) },
+ { DVB_USB_DEVICE(USB_VID_AVERMEDIA, USB_PID_AVERMEDIA_A835B_1835,
+ &it913x_properties, "Avermedia A835B(1835)",
+ RC_MAP_IT913X_V2) },
+ { DVB_USB_DEVICE(USB_VID_AVERMEDIA, USB_PID_AVERMEDIA_A835B_2835,
+ &it913x_properties, "Avermedia A835B(2835)",
+ RC_MAP_IT913X_V2) },
+ { DVB_USB_DEVICE(USB_VID_AVERMEDIA, USB_PID_AVERMEDIA_A835B_3835,
+ &it913x_properties, "Avermedia A835B(3835)",
+ RC_MAP_IT913X_V2) },
+ { DVB_USB_DEVICE(USB_VID_AVERMEDIA, USB_PID_AVERMEDIA_A835B_4835,
+ &it913x_properties, "Avermedia A835B(4835)",
+ RC_MAP_IT913X_V2) },
{} /* Terminating entry */
};
@@ -797,7 +813,7 @@ module_usb_driver(it913x_driver);
MODULE_AUTHOR("Malcolm Priestley <tvboxspy@gmail.com>");
MODULE_DESCRIPTION("it913x USB 2 Driver");
-MODULE_VERSION("1.32");
+MODULE_VERSION("1.33");
MODULE_LICENSE("GPL");
MODULE_FIRMWARE(FW_IT9135_V1);
MODULE_FIRMWARE(FW_IT9135_V2);
diff --git a/drivers/media/usb/dvb-usb-v2/lmedm04.c b/drivers/media/usb/dvb-usb-v2/lmedm04.c
index 6427ac359f21..f30c58cecbba 100644
--- a/drivers/media/usb/dvb-usb-v2/lmedm04.c
+++ b/drivers/media/usb/dvb-usb-v2/lmedm04.c
@@ -81,6 +81,7 @@
#include "dvb-pll.h"
#include "z0194a.h"
#include "m88rs2000.h"
+#include "ts2020.h"
#define LME2510_C_S7395 "dvb-usb-lme2510c-s7395.fw";
@@ -626,8 +627,8 @@ static int lme2510_i2c_xfer(struct i2c_adapter *adap, struct i2c_msg msg[],
gate = 5;
for (i = 0; i < num; i++) {
- read_o = 1 & (msg[i].flags & I2C_M_RD);
- read = i+1 < num && (msg[i+1].flags & I2C_M_RD);
+ read_o = msg[i].flags & I2C_M_RD;
+ read = i + 1 < num && msg[i + 1].flags & I2C_M_RD;
read |= read_o;
gate = (msg[i].addr == st->i2c_tuner_addr)
? (read) ? st->i2c_tuner_gate_r
@@ -640,7 +641,8 @@ static int lme2510_i2c_xfer(struct i2c_adapter *adap, struct i2c_msg msg[],
else
obuf[1] = msg[i].len + read + 1;
- obuf[2] = msg[i].addr;
+ obuf[2] = msg[i].addr << 1;
+
if (read) {
if (read_o)
len = 3;
@@ -894,27 +896,27 @@ static int lme2510_kill_urb(struct usb_data_stream *stream)
}
static struct tda10086_config tda10086_config = {
- .demod_address = 0x1c,
+ .demod_address = 0x0e,
.invert = 0,
.diseqc_tone = 1,
.xtal_freq = TDA10086_XTAL_16M,
};
static struct stv0288_config lme_config = {
- .demod_address = 0xd0,
+ .demod_address = 0x68,
.min_delay_ms = 15,
.inittab = s7395_inittab,
};
static struct ix2505v_config lme_tuner = {
- .tuner_address = 0xc0,
+ .tuner_address = 0x60,
.min_delay_ms = 100,
.tuner_gain = 0x0,
.tuner_chargepump = 0x3,
};
static struct stv0299_config sharp_z0194_config = {
- .demod_address = 0xd0,
+ .demod_address = 0x68,
.inittab = sharp_z0194a_inittab,
.mclk = 88000000UL,
.invert = 0,
@@ -943,11 +945,15 @@ static int dm04_rs2000_set_ts_param(struct dvb_frontend *fe,
}
static struct m88rs2000_config m88rs2000_config = {
- .demod_addr = 0xd0,
- .tuner_addr = 0xc0,
+ .demod_addr = 0x68,
.set_ts_params = dm04_rs2000_set_ts_param,
};
+static struct ts2020_config ts2020_config = {
+ .tuner_address = 0x60,
+ .clk_out_div = 7,
+};
+
static int dm04_lme2510_set_voltage(struct dvb_frontend *fe,
fe_sec_voltage_t voltage)
{
@@ -1049,7 +1055,7 @@ static int dm04_lme2510_frontend_attach(struct dvb_usb_adapter *adap)
info("TUN Found Frontend TDA10086");
st->i2c_tuner_gate_w = 4;
st->i2c_tuner_gate_r = 4;
- st->i2c_tuner_addr = 0xc0;
+ st->i2c_tuner_addr = 0x60;
st->tuner_config = TUNER_LG;
if (st->dvb_usb_lme2510_firmware != TUNER_LG) {
st->dvb_usb_lme2510_firmware = TUNER_LG;
@@ -1065,7 +1071,7 @@ static int dm04_lme2510_frontend_attach(struct dvb_usb_adapter *adap)
info("FE Found Stv0299");
st->i2c_tuner_gate_w = 4;
st->i2c_tuner_gate_r = 5;
- st->i2c_tuner_addr = 0xc0;
+ st->i2c_tuner_addr = 0x60;
st->tuner_config = TUNER_S0194;
if (st->dvb_usb_lme2510_firmware != TUNER_S0194) {
st->dvb_usb_lme2510_firmware = TUNER_S0194;
@@ -1082,7 +1088,7 @@ static int dm04_lme2510_frontend_attach(struct dvb_usb_adapter *adap)
info("FE Found Stv0288");
st->i2c_tuner_gate_w = 4;
st->i2c_tuner_gate_r = 5;
- st->i2c_tuner_addr = 0xc0;
+ st->i2c_tuner_addr = 0x60;
st->tuner_config = TUNER_S7395;
if (st->dvb_usb_lme2510_firmware != TUNER_S7395) {
st->dvb_usb_lme2510_firmware = TUNER_S7395;
@@ -1097,9 +1103,11 @@ static int dm04_lme2510_frontend_attach(struct dvb_usb_adapter *adap)
if (adap->fe[0]) {
info("FE Found M88RS2000");
+ dvb_attach(ts2020_attach, adap->fe[0], &ts2020_config,
+ &d->i2c_adap);
st->i2c_tuner_gate_w = 5;
st->i2c_tuner_gate_r = 5;
- st->i2c_tuner_addr = 0xc0;
+ st->i2c_tuner_addr = 0x60;
st->tuner_config = TUNER_RS2000;
st->fe_set_voltage =
adap->fe[0]->ops.set_voltage;
@@ -1144,7 +1152,7 @@ static int dm04_lme2510_tuner(struct dvb_usb_adapter *adap)
switch (st->tuner_config) {
case TUNER_LG:
- if (dvb_attach(tda826x_attach, adap->fe[0], 0xc0,
+ if (dvb_attach(tda826x_attach, adap->fe[0], 0x60,
&d->i2c_adap, 1))
ret = st->tuner_config;
break;
@@ -1154,7 +1162,7 @@ static int dm04_lme2510_tuner(struct dvb_usb_adapter *adap)
ret = st->tuner_config;
break;
case TUNER_S0194:
- if (dvb_attach(dvb_pll_attach , adap->fe[0], 0xc0,
+ if (dvb_attach(dvb_pll_attach , adap->fe[0], 0x60,
&d->i2c_adap, DVB_PLL_OPERA1))
ret = st->tuner_config;
break;
diff --git a/drivers/media/usb/dvb-usb-v2/rtl28xxu.c b/drivers/media/usb/dvb-usb-v2/rtl28xxu.c
index a4c302d0aa37..d98387a3c95e 100644
--- a/drivers/media/usb/dvb-usb-v2/rtl28xxu.c
+++ b/drivers/media/usb/dvb-usb-v2/rtl28xxu.c
@@ -835,6 +835,11 @@ static struct tua9001_config rtl2832u_tua9001_config = {
.i2c_addr = 0x60,
};
+static const struct fc0012_config rtl2832u_fc0012_config = {
+ .i2c_address = 0x63, /* 0xc6 >> 1 */
+ .xtal_freq = FC_XTAL_28_8_MHZ,
+};
+
static int rtl2832u_tuner_attach(struct dvb_usb_adapter *adap)
{
int ret;
@@ -847,7 +852,7 @@ static int rtl2832u_tuner_attach(struct dvb_usb_adapter *adap)
switch (priv->tuner) {
case TUNER_RTL2832_FC0012:
fe = dvb_attach(fc0012_attach, adap->fe[0],
- &d->i2c_adap, 0xc6>>1, 0, FC_XTAL_28_8_MHZ);
+ &d->i2c_adap, &rtl2832u_fc0012_config);
/* since fc0012 includs reading the signal strength delegate
* that to the tuner driver */
@@ -1120,7 +1125,7 @@ err:
return ret;
}
-
+#if IS_ENABLED(CONFIG_RC_CORE)
static int rtl2831u_rc_query(struct dvb_usb_device *d)
{
int ret, i;
@@ -1203,7 +1208,11 @@ static int rtl2831u_get_rc_config(struct dvb_usb_device *d,
return 0;
}
+#else
+ #define rtl2831u_get_rc_config NULL
+#endif
+#if IS_ENABLED(CONFIG_RC_CORE)
static int rtl2832u_rc_query(struct dvb_usb_device *d)
{
int ret, i;
@@ -1275,6 +1284,9 @@ static int rtl2832u_get_rc_config(struct dvb_usb_device *d,
return 0;
}
+#else
+ #define rtl2832u_get_rc_config NULL
+#endif
static const struct dvb_usb_device_properties rtl2831u_props = {
.driver_name = KBUILD_MODNAME,
@@ -1333,13 +1345,13 @@ static const struct usb_device_id rtl28xxu_id_table[] = {
{ DVB_USB_DEVICE(USB_VID_REALTEK, 0x2838,
&rtl2832u_props, "Realtek RTL2832U reference design", NULL) },
{ DVB_USB_DEVICE(USB_VID_TERRATEC, USB_PID_TERRATEC_CINERGY_T_STICK_BLACK_REV1,
- &rtl2832u_props, "Terratec Cinergy T Stick Black", NULL) },
+ &rtl2832u_props, "TerraTec Cinergy T Stick Black", NULL) },
{ DVB_USB_DEVICE(USB_VID_GTEK, USB_PID_DELOCK_USB2_DVBT,
&rtl2832u_props, "G-Tek Electronics Group Lifeview LV5TDLX DVB-T", NULL) },
{ DVB_USB_DEVICE(USB_VID_TERRATEC, USB_PID_NOXON_DAB_STICK,
- &rtl2832u_props, "NOXON DAB/DAB+ USB dongle", NULL) },
+ &rtl2832u_props, "TerraTec NOXON DAB Stick", NULL) },
{ DVB_USB_DEVICE(USB_VID_TERRATEC, USB_PID_NOXON_DAB_STICK_REV2,
- &rtl2832u_props, "NOXON DAB/DAB+ USB dongle (rev 2)", NULL) },
+ &rtl2832u_props, "TerraTec NOXON DAB Stick (rev 2)", NULL) },
{ DVB_USB_DEVICE(USB_VID_GTEK, USB_PID_TREKSTOR_TERRES_2_0,
&rtl2832u_props, "Trekstor DVB-T Stick Terres 2.0", NULL) },
{ DVB_USB_DEVICE(USB_VID_DEXATEK, 0x1101,
@@ -1352,6 +1364,14 @@ static const struct usb_device_id rtl28xxu_id_table[] = {
&rtl2832u_props, "Dexatek DK mini DVB-T Dongle", NULL) },
{ DVB_USB_DEVICE(USB_VID_TERRATEC, 0x00d7,
&rtl2832u_props, "TerraTec Cinergy T Stick+", NULL) },
+ { DVB_USB_DEVICE(USB_VID_KWORLD_2, 0xd3a8,
+ &rtl2832u_props, "ASUS My Cinema-U3100Mini Plus V2", NULL) },
+ { DVB_USB_DEVICE(USB_VID_KWORLD_2, 0xd393,
+ &rtl2832u_props, "GIGABYTE U7300", NULL) },
+ { DVB_USB_DEVICE(USB_VID_DEXATEK, 0x1104,
+ &rtl2832u_props, "Digivox Micro Hd", NULL) },
+ { DVB_USB_DEVICE(USB_VID_COMPRO, 0x0620,
+ &rtl2832u_props, "Compro VideoMate U620F", NULL) },
{ }
};
MODULE_DEVICE_TABLE(usb, rtl28xxu_id_table);
diff --git a/drivers/media/usb/dvb-usb/Kconfig b/drivers/media/usb/dvb-usb/Kconfig
index fa0b2931d305..c5d95662e2e1 100644
--- a/drivers/media/usb/dvb-usb/Kconfig
+++ b/drivers/media/usb/dvb-usb/Kconfig
@@ -202,8 +202,12 @@ config DVB_USB_TTUSB2
select DVB_TDA10086 if MEDIA_SUBDRV_AUTOSELECT
select DVB_LNBP21 if MEDIA_SUBDRV_AUTOSELECT
select DVB_TDA826X if MEDIA_SUBDRV_AUTOSELECT
+ select DVB_TDA10023 if MEDIA_SUBDRV_AUTOSELECT
+ select DVB_TDA10048 if MEDIA_SUBDRV_AUTOSELECT
+ select MEDIA_TUNER_TDA827X if MEDIA_SUBDRV_AUTOSELECT
help
- Say Y here to support the Pinnacle 400e DVB-S USB2.0 receiver. The
+ Say Y here to support the Pinnacle 400e DVB-S USB2.0 receiver and
+ the TechnoTrend CT-3650 CI DVB-C/T USB2.0 receiver. The
firmware protocol used by this module is similar to the one used by the
old ttusb-driver - that's why the module is called dvb-usb-ttusb2.
@@ -267,9 +271,11 @@ config DVB_USB_DW2102
select DVB_MT312 if MEDIA_SUBDRV_AUTOSELECT
select DVB_ZL10039 if MEDIA_SUBDRV_AUTOSELECT
select DVB_DS3000 if MEDIA_SUBDRV_AUTOSELECT
+ select DVB_TS2020 if MEDIA_SUBDRV_AUTOSELECT
select DVB_STB6100 if MEDIA_SUBDRV_AUTOSELECT
select DVB_STV6110 if MEDIA_SUBDRV_AUTOSELECT
select DVB_STV0900 if MEDIA_SUBDRV_AUTOSELECT
+ select DVB_M88RS2000 if MEDIA_SUBDRV_AUTOSELECT
help
Say Y here to support the DvbWorld, TeVii, Prof DVB-S/S2 USB2.0
receivers.
diff --git a/drivers/media/usb/dvb-usb/dib0700_core.c b/drivers/media/usb/dvb-usb/dib0700_core.c
index 19b5ed2825d7..bf2a908d74cf 100644
--- a/drivers/media/usb/dvb-usb/dib0700_core.c
+++ b/drivers/media/usb/dvb-usb/dib0700_core.c
@@ -561,10 +561,7 @@ int dib0700_streaming_ctrl(struct dvb_usb_adapter *adap, int onoff)
}
}
- if (mutex_lock_interruptible(&adap->dev->usb_mutex) < 0) {
- err("could not acquire lock");
- return -EINTR;
- }
+ mutex_lock(&adap->dev->usb_mutex);
st->buf[0] = REQUEST_ENABLE_VIDEO;
/* this bit gives a kind of command,
diff --git a/drivers/media/usb/dvb-usb/dvb-usb-init.c b/drivers/media/usb/dvb-usb/dvb-usb-init.c
index 169196ec2d4e..1adf325012f7 100644
--- a/drivers/media/usb/dvb-usb/dvb-usb-init.c
+++ b/drivers/media/usb/dvb-usb/dvb-usb-init.c
@@ -38,41 +38,41 @@ static int dvb_usb_adapter_init(struct dvb_usb_device *d, short *adapter_nrs)
memcpy(&adap->props, &d->props.adapter[n], sizeof(struct dvb_usb_adapter_properties));
- for (o = 0; o < adap->props.num_frontends; o++) {
- struct dvb_usb_adapter_fe_properties *props = &adap->props.fe[o];
- /* speed - when running at FULL speed we need a HW PID filter */
- if (d->udev->speed == USB_SPEED_FULL && !(props->caps & DVB_USB_ADAP_HAS_PID_FILTER)) {
- err("This USB2.0 device cannot be run on a USB1.1 port. (it lacks a hardware PID filter)");
- return -ENODEV;
- }
+ for (o = 0; o < adap->props.num_frontends; o++) {
+ struct dvb_usb_adapter_fe_properties *props = &adap->props.fe[o];
+ /* speed - when running at FULL speed we need a HW PID filter */
+ if (d->udev->speed == USB_SPEED_FULL && !(props->caps & DVB_USB_ADAP_HAS_PID_FILTER)) {
+ err("This USB2.0 device cannot be run on a USB1.1 port. (it lacks a hardware PID filter)");
+ return -ENODEV;
+ }
- if ((d->udev->speed == USB_SPEED_FULL && props->caps & DVB_USB_ADAP_HAS_PID_FILTER) ||
- (props->caps & DVB_USB_ADAP_NEED_PID_FILTERING)) {
- info("will use the device's hardware PID filter (table count: %d).", props->pid_filter_count);
- adap->fe_adap[o].pid_filtering = 1;
- adap->fe_adap[o].max_feed_count = props->pid_filter_count;
- } else {
- info("will pass the complete MPEG2 transport stream to the software demuxer.");
- adap->fe_adap[o].pid_filtering = 0;
- adap->fe_adap[o].max_feed_count = 255;
- }
+ if ((d->udev->speed == USB_SPEED_FULL && props->caps & DVB_USB_ADAP_HAS_PID_FILTER) ||
+ (props->caps & DVB_USB_ADAP_NEED_PID_FILTERING)) {
+ info("will use the device's hardware PID filter (table count: %d).", props->pid_filter_count);
+ adap->fe_adap[o].pid_filtering = 1;
+ adap->fe_adap[o].max_feed_count = props->pid_filter_count;
+ } else {
+ info("will pass the complete MPEG2 transport stream to the software demuxer.");
+ adap->fe_adap[o].pid_filtering = 0;
+ adap->fe_adap[o].max_feed_count = 255;
+ }
- if (!adap->fe_adap[o].pid_filtering &&
- dvb_usb_force_pid_filter_usage &&
- props->caps & DVB_USB_ADAP_HAS_PID_FILTER) {
- info("pid filter enabled by module option.");
- adap->fe_adap[o].pid_filtering = 1;
- adap->fe_adap[o].max_feed_count = props->pid_filter_count;
- }
+ if (!adap->fe_adap[o].pid_filtering &&
+ dvb_usb_force_pid_filter_usage &&
+ props->caps & DVB_USB_ADAP_HAS_PID_FILTER) {
+ info("pid filter enabled by module option.");
+ adap->fe_adap[o].pid_filtering = 1;
+ adap->fe_adap[o].max_feed_count = props->pid_filter_count;
+ }
- if (props->size_of_priv > 0) {
- adap->fe_adap[o].priv = kzalloc(props->size_of_priv, GFP_KERNEL);
- if (adap->fe_adap[o].priv == NULL) {
- err("no memory for priv for adapter %d fe %d.", n, o);
- return -ENOMEM;
+ if (props->size_of_priv > 0) {
+ adap->fe_adap[o].priv = kzalloc(props->size_of_priv, GFP_KERNEL);
+ if (adap->fe_adap[o].priv == NULL) {
+ err("no memory for priv for adapter %d fe %d.", n, o);
+ return -ENOMEM;
+ }
}
}
- }
if (adap->props.size_of_priv > 0) {
adap->priv = kzalloc(adap->props.size_of_priv, GFP_KERNEL);
diff --git a/drivers/media/usb/dvb-usb/dw2102.c b/drivers/media/usb/dvb-usb/dw2102.c
index 9382895b1b88..9578a6761f1b 100644
--- a/drivers/media/usb/dvb-usb/dw2102.c
+++ b/drivers/media/usb/dvb-usb/dw2102.c
@@ -1,9 +1,9 @@
/* DVB USB framework compliant Linux driver for the
* DVBWorld DVB-S 2101, 2102, DVB-S2 2104, DVB-C 3101,
- * TeVii S600, S630, S650, S660, S480,
+ * TeVii S600, S630, S650, S660, S480, S421, S632
* Prof 1100, 7500,
* Geniatech SU3000 Cards
- * Copyright (C) 2008-2011 Igor M. Liplianin (liplianin@me.by)
+ * Copyright (C) 2008-2012 Igor M. Liplianin (liplianin@me.by)
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the
@@ -22,11 +22,14 @@
#include "tda1002x.h"
#include "mt312.h"
#include "zl10039.h"
+#include "ts2020.h"
#include "ds3000.h"
#include "stv0900.h"
#include "stv6110.h"
#include "stb6100.h"
#include "stb6100_proc.h"
+#include "m88rs2000.h"
+#include "ts2020.h"
#ifndef USB_PID_DW2102
#define USB_PID_DW2102 0x2102
@@ -68,6 +71,14 @@
#define USB_PID_PROF_1100 0xb012
#endif
+#ifndef USB_PID_TEVII_S421
+#define USB_PID_TEVII_S421 0xd421
+#endif
+
+#ifndef USB_PID_TEVII_S632
+#define USB_PID_TEVII_S632 0xd632
+#endif
+
#define DW210X_READ_MSG 0
#define DW210X_WRITE_MSG 1
@@ -80,6 +91,15 @@
#define DW2102_RC_QUERY (0x1a00)
#define DW2102_LED_CTRL (0x1b00)
+#define DW2101_FIRMWARE "dvb-usb-dw2101.fw"
+#define DW2102_FIRMWARE "dvb-usb-dw2102.fw"
+#define DW2104_FIRMWARE "dvb-usb-dw2104.fw"
+#define DW3101_FIRMWARE "dvb-usb-dw3101.fw"
+#define S630_FIRMWARE "dvb-usb-s630.fw"
+#define S660_FIRMWARE "dvb-usb-s660.fw"
+#define P1100_FIRMWARE "dvb-usb-p1100.fw"
+#define P7500_FIRMWARE "dvb-usb-p7500.fw"
+
#define err_str "did not find the firmware file. (%s) " \
"Please see linux/Documentation/dvb/ for more details " \
"on firmware-problems."
@@ -534,7 +554,7 @@ static int s6x0_i2c_transfer(struct i2c_adapter *adap, struct i2c_msg msg[],
}
/*case 0x55: cx24116
case 0x6a: stv0903
- case 0x68: ds3000, stv0903
+ case 0x68: ds3000, stv0903, rs2000
case 0x60: ts2020, stv6110, stb6100
case 0xa0: eeprom */
default: {
@@ -932,6 +952,17 @@ static struct ds3000_config dw2104_ds3000_config = {
.demod_address = 0x68,
};
+static struct ts2020_config dw2104_ts2020_config = {
+ .tuner_address = 0x60,
+ .clk_out_div = 1,
+};
+
+static struct ds3000_config s660_ds3000_config = {
+ .demod_address = 0x68,
+ .ci_mode = 1,
+ .set_lock_led = dw210x_led_ctrl,
+};
+
static struct stv0900_config dw2104a_stv0900_config = {
.demod_address = 0x6a,
.demod_mode = 0,
@@ -981,6 +1012,30 @@ static struct stv0900_config prof_7500_stv0900_config = {
static struct ds3000_config su3000_ds3000_config = {
.demod_address = 0x68,
.ci_mode = 1,
+ .set_lock_led = dw210x_led_ctrl,
+};
+
+static u8 m88rs2000_inittab[] = {
+ DEMOD_WRITE, 0x9a, 0x30,
+ DEMOD_WRITE, 0x00, 0x01,
+ WRITE_DELAY, 0x19, 0x00,
+ DEMOD_WRITE, 0x00, 0x00,
+ DEMOD_WRITE, 0x9a, 0xb0,
+ DEMOD_WRITE, 0x81, 0xc1,
+ DEMOD_WRITE, 0x81, 0x81,
+ DEMOD_WRITE, 0x86, 0xc6,
+ DEMOD_WRITE, 0x9a, 0x30,
+ DEMOD_WRITE, 0xf0, 0x80,
+ DEMOD_WRITE, 0xf1, 0xbf,
+ DEMOD_WRITE, 0xb0, 0x45,
+ DEMOD_WRITE, 0xb2, 0x01,
+ DEMOD_WRITE, 0x9a, 0xb0,
+ 0xff, 0xaa, 0xff
+};
+
+static struct m88rs2000_config s421_m88rs2000_config = {
+ .demod_addr = 0x68,
+ .inittab = m88rs2000_inittab,
};
static int dw2104_frontend_attach(struct dvb_usb_adapter *d)
@@ -1033,6 +1088,8 @@ static int dw2104_frontend_attach(struct dvb_usb_adapter *d)
d->fe_adap[0].fe = dvb_attach(ds3000_attach, &dw2104_ds3000_config,
&d->dev->i2c_adap);
if (d->fe_adap[0].fe != NULL) {
+ dvb_attach(ts2020_attach, d->fe_adap[0].fe,
+ &dw2104_ts2020_config, &d->dev->i2c_adap);
d->fe_adap[0].fe->ops.set_voltage = dw210x_set_voltage;
info("Attached DS3000!\n");
return 0;
@@ -1139,12 +1196,15 @@ static int ds3000_frontend_attach(struct dvb_usb_adapter *d)
struct s6x0_state *st = (struct s6x0_state *)d->dev->priv;
u8 obuf[] = {7, 1};
- d->fe_adap[0].fe = dvb_attach(ds3000_attach, &dw2104_ds3000_config,
+ d->fe_adap[0].fe = dvb_attach(ds3000_attach, &s660_ds3000_config,
&d->dev->i2c_adap);
if (d->fe_adap[0].fe == NULL)
return -EIO;
+ dvb_attach(ts2020_attach, d->fe_adap[0].fe, &dw2104_ts2020_config,
+ &d->dev->i2c_adap);
+
st->old_set_voltage = d->fe_adap[0].fe->ops.set_voltage;
d->fe_adap[0].fe->ops.set_voltage = s660_set_voltage;
@@ -1182,6 +1242,14 @@ static int su3000_frontend_attach(struct dvb_usb_adapter *d)
err("command 0x0e transfer failed.");
obuf[0] = 0xe;
+ obuf[1] = 0x02;
+ obuf[2] = 1;
+
+ if (dvb_usb_generic_rw(d->dev, obuf, 3, ibuf, 1, 0) < 0)
+ err("command 0x0e transfer failed.");
+ msleep(300);
+
+ obuf[0] = 0xe;
obuf[1] = 0x83;
obuf[2] = 0;
@@ -1205,9 +1273,40 @@ static int su3000_frontend_attach(struct dvb_usb_adapter *d)
if (d->fe_adap[0].fe == NULL)
return -EIO;
- info("Attached DS3000!\n");
+ if (dvb_attach(ts2020_attach, d->fe_adap[0].fe,
+ &dw2104_ts2020_config,
+ &d->dev->i2c_adap)) {
+ info("Attached DS3000/TS2020!\n");
+ return 0;
+ }
- return 0;
+ info("Failed to attach DS3000/TS2020!\n");
+ return -EIO;
+}
+
+static int m88rs2000_frontend_attach(struct dvb_usb_adapter *d)
+{
+ u8 obuf[] = { 0x51 };
+ u8 ibuf[] = { 0 };
+
+ if (dvb_usb_generic_rw(d->dev, obuf, 1, ibuf, 1, 0) < 0)
+ err("command 0x51 transfer failed.");
+
+ d->fe_adap[0].fe = dvb_attach(m88rs2000_attach, &s421_m88rs2000_config,
+ &d->dev->i2c_adap);
+
+ if (d->fe_adap[0].fe == NULL)
+ return -EIO;
+
+ if (dvb_attach(ts2020_attach, d->fe_adap[0].fe,
+ &dw2104_ts2020_config,
+ &d->dev->i2c_adap)) {
+ info("Attached RS2000/TS2020!\n");
+ return 0;
+ }
+
+ info("Failed to attach RS2000/TS2020!\n");
+ return -EIO;
}
static int dw2102_tuner_attach(struct dvb_usb_adapter *adap)
@@ -1447,6 +1546,8 @@ enum dw2102_table_entry {
TEVII_S480_1,
TEVII_S480_2,
X3M_SPC1400HD,
+ TEVII_S421,
+ TEVII_S632,
};
static struct usb_device_id dw2102_table[] = {
@@ -1465,6 +1566,8 @@ static struct usb_device_id dw2102_table[] = {
[TEVII_S480_1] = {USB_DEVICE(0x9022, USB_PID_TEVII_S480_1)},
[TEVII_S480_2] = {USB_DEVICE(0x9022, USB_PID_TEVII_S480_2)},
[X3M_SPC1400HD] = {USB_DEVICE(0x1f4d, 0x3100)},
+ [TEVII_S421] = {USB_DEVICE(0x9022, USB_PID_TEVII_S421)},
+ [TEVII_S632] = {USB_DEVICE(0x9022, USB_PID_TEVII_S632)},
{ }
};
@@ -1478,13 +1581,12 @@ static int dw2102_load_firmware(struct usb_device *dev,
u8 reset;
u8 reset16[] = {0, 0, 0, 0, 0, 0, 0};
const struct firmware *fw;
- const char *fw_2101 = "dvb-usb-dw2101.fw";
switch (dev->descriptor.idProduct) {
case 0x2101:
- ret = request_firmware(&fw, fw_2101, &dev->dev);
+ ret = request_firmware(&fw, DW2101_FIRMWARE, &dev->dev);
if (ret != 0) {
- err(err_str, fw_2101);
+ err(err_str, DW2101_FIRMWARE);
return ret;
}
break;
@@ -1586,7 +1688,7 @@ static int dw2102_load_firmware(struct usb_device *dev,
static struct dvb_usb_device_properties dw2102_properties = {
.caps = DVB_USB_IS_AN_I2C_ADAPTER,
.usb_ctrl = DEVICE_SPECIFIC,
- .firmware = "dvb-usb-dw2102.fw",
+ .firmware = DW2102_FIRMWARE,
.no_reconnect = 1,
.i2c_algo = &dw2102_serit_i2c_algo,
@@ -1641,7 +1743,7 @@ static struct dvb_usb_device_properties dw2102_properties = {
static struct dvb_usb_device_properties dw2104_properties = {
.caps = DVB_USB_IS_AN_I2C_ADAPTER,
.usb_ctrl = DEVICE_SPECIFIC,
- .firmware = "dvb-usb-dw2104.fw",
+ .firmware = DW2104_FIRMWARE,
.no_reconnect = 1,
.i2c_algo = &dw2104_i2c_algo,
@@ -1691,7 +1793,7 @@ static struct dvb_usb_device_properties dw2104_properties = {
static struct dvb_usb_device_properties dw3101_properties = {
.caps = DVB_USB_IS_AN_I2C_ADAPTER,
.usb_ctrl = DEVICE_SPECIFIC,
- .firmware = "dvb-usb-dw3101.fw",
+ .firmware = DW3101_FIRMWARE,
.no_reconnect = 1,
.i2c_algo = &dw3101_i2c_algo,
@@ -1739,7 +1841,7 @@ static struct dvb_usb_device_properties s6x0_properties = {
.caps = DVB_USB_IS_AN_I2C_ADAPTER,
.usb_ctrl = DEVICE_SPECIFIC,
.size_of_priv = sizeof(struct s6x0_state),
- .firmware = "dvb-usb-s630.fw",
+ .firmware = S630_FIRMWARE,
.no_reconnect = 1,
.i2c_algo = &s6x0_i2c_algo,
@@ -1814,6 +1916,19 @@ static struct dvb_usb_device_description d7500 = {
{NULL},
};
+struct dvb_usb_device_properties *s421;
+static struct dvb_usb_device_description d421 = {
+ "TeVii S421 PCI",
+ {&dw2102_table[TEVII_S421], NULL},
+ {NULL},
+};
+
+static struct dvb_usb_device_description d632 = {
+ "TeVii S632 USB",
+ {&dw2102_table[TEVII_S632], NULL},
+ {NULL},
+};
+
static struct dvb_usb_device_properties su3000_properties = {
.caps = DVB_USB_IS_AN_I2C_ADAPTER,
.usb_ctrl = DEVICE_SPECIFIC,
@@ -1879,7 +1994,7 @@ static int dw2102_probe(struct usb_interface *intf,
return -ENOMEM;
/* copy default structure */
/* fill only different fields */
- p1100->firmware = "dvb-usb-p1100.fw";
+ p1100->firmware = P1100_FIRMWARE;
p1100->devices[0] = d1100;
p1100->rc.legacy.rc_map_table = rc_map_tbs_table;
p1100->rc.legacy.rc_map_size = ARRAY_SIZE(rc_map_tbs_table);
@@ -1891,7 +2006,7 @@ static int dw2102_probe(struct usb_interface *intf,
kfree(p1100);
return -ENOMEM;
}
- s660->firmware = "dvb-usb-s660.fw";
+ s660->firmware = S660_FIRMWARE;
s660->num_device_descs = 3;
s660->devices[0] = d660;
s660->devices[1] = d480_1;
@@ -1905,12 +2020,26 @@ static int dw2102_probe(struct usb_interface *intf,
kfree(s660);
return -ENOMEM;
}
- p7500->firmware = "dvb-usb-p7500.fw";
+ p7500->firmware = P7500_FIRMWARE;
p7500->devices[0] = d7500;
p7500->rc.legacy.rc_map_table = rc_map_tbs_table;
p7500->rc.legacy.rc_map_size = ARRAY_SIZE(rc_map_tbs_table);
p7500->adapter->fe[0].frontend_attach = prof_7500_frontend_attach;
+
+ s421 = kmemdup(&su3000_properties,
+ sizeof(struct dvb_usb_device_properties), GFP_KERNEL);
+ if (!s421) {
+ kfree(p1100);
+ kfree(s660);
+ kfree(p7500);
+ return -ENOMEM;
+ }
+ s421->num_device_descs = 2;
+ s421->devices[0] = d421;
+ s421->devices[1] = d632;
+ s421->adapter->fe[0].frontend_attach = m88rs2000_frontend_attach;
+
if (0 == dvb_usb_device_init(intf, &dw2102_properties,
THIS_MODULE, NULL, adapter_nr) ||
0 == dvb_usb_device_init(intf, &dw2104_properties,
@@ -1925,6 +2054,8 @@ static int dw2102_probe(struct usb_interface *intf,
THIS_MODULE, NULL, adapter_nr) ||
0 == dvb_usb_device_init(intf, p7500,
THIS_MODULE, NULL, adapter_nr) ||
+ 0 == dvb_usb_device_init(intf, s421,
+ THIS_MODULE, NULL, adapter_nr) ||
0 == dvb_usb_device_init(intf, &su3000_properties,
THIS_MODULE, NULL, adapter_nr))
return 0;
@@ -1943,9 +2074,17 @@ module_usb_driver(dw2102_driver);
MODULE_AUTHOR("Igor M. Liplianin (c) liplianin@me.by");
MODULE_DESCRIPTION("Driver for DVBWorld DVB-S 2101, 2102, DVB-S2 2104,"
- " DVB-C 3101 USB2.0,"
- " TeVii S600, S630, S650, S660, S480,"
- " Prof 1100, 7500 USB2.0,"
- " Geniatech SU3000 devices");
+ " DVB-C 3101 USB2.0,"
+ " TeVii S600, S630, S650, S660, S480, S421, S632"
+ " Prof 1100, 7500 USB2.0,"
+ " Geniatech SU3000 devices");
MODULE_VERSION("0.1");
MODULE_LICENSE("GPL");
+MODULE_FIRMWARE(DW2101_FIRMWARE);
+MODULE_FIRMWARE(DW2102_FIRMWARE);
+MODULE_FIRMWARE(DW2104_FIRMWARE);
+MODULE_FIRMWARE(DW3101_FIRMWARE);
+MODULE_FIRMWARE(S630_FIRMWARE);
+MODULE_FIRMWARE(S660_FIRMWARE);
+MODULE_FIRMWARE(P1100_FIRMWARE);
+MODULE_FIRMWARE(P7500_FIRMWARE);
diff --git a/drivers/media/usb/dvb-usb/friio-fe.c b/drivers/media/usb/dvb-usb/friio-fe.c
index 90a70c66a96e..d56f927fc31a 100644
--- a/drivers/media/usb/dvb-usb/friio-fe.c
+++ b/drivers/media/usb/dvb-usb/friio-fe.c
@@ -421,11 +421,10 @@ struct dvb_frontend *jdvbt90502_attach(struct dvb_usb_device *d)
/* setup the state */
state->i2c = &d->i2c_adap;
- memcpy(&state->config, &friio_fe_config, sizeof(friio_fe_config));
+ state->config = friio_fe_config;
/* create dvb_frontend */
- memcpy(&state->frontend.ops, &jdvbt90502_ops,
- sizeof(jdvbt90502_ops));
+ state->frontend.ops = jdvbt90502_ops;
state->frontend.demodulator_priv = state;
if (jdvbt90502_init(&state->frontend) < 0)
diff --git a/drivers/media/usb/dvb-usb/m920x.c b/drivers/media/usb/dvb-usb/m920x.c
index 661bb75be955..92afeb20650f 100644
--- a/drivers/media/usb/dvb-usb/m920x.c
+++ b/drivers/media/usb/dvb-usb/m920x.c
@@ -16,6 +16,7 @@
#include "qt1010.h"
#include "tda1004x.h"
#include "tda827x.h"
+#include "mt2060.h"
#include <media/tuner.h>
#include "tuner-simple.h"
@@ -63,23 +64,33 @@ static inline int m920x_write(struct usb_device *udev, u8 request,
return ret;
}
+static inline int m920x_write_seq(struct usb_device *udev, u8 request,
+ struct m920x_inits *seq)
+{
+ int ret;
+ while (seq->address) {
+ ret = m920x_write(udev, request, seq->data, seq->address);
+ if (ret != 0)
+ return ret;
+
+ seq++;
+ }
+
+ return ret;
+}
+
static int m920x_init(struct dvb_usb_device *d, struct m920x_inits *rc_seq)
{
int ret = 0, i, epi, flags = 0;
int adap_enabled[M9206_MAX_ADAPTERS] = { 0 };
/* Remote controller init. */
- if (d->props.rc.legacy.rc_query) {
+ if (d->props.rc.legacy.rc_query || d->props.rc.core.rc_query) {
deb("Initialising remote control\n");
- while (rc_seq->address) {
- if ((ret = m920x_write(d->udev, M9206_CORE,
- rc_seq->data,
- rc_seq->address)) != 0) {
- deb("Initialising remote control failed\n");
- return ret;
- }
-
- rc_seq++;
+ ret = m920x_write_seq(d->udev, M9206_CORE, rc_seq);
+ if (ret != 0) {
+ deb("Initialising remote control failed\n");
+ return ret;
}
deb("Initialising remote control success\n");
@@ -130,9 +141,50 @@ static int m920x_init_ep(struct usb_interface *intf)
alt->desc.bAlternateSetting);
}
-static int m920x_rc_query(struct dvb_usb_device *d, u32 *event, int *state)
+static inline void m920x_parse_rc_state(struct dvb_usb_device *d, u8 rc_state,
+ int *state)
{
struct m920x_state *m = d->priv;
+
+ switch (rc_state) {
+ case 0x80:
+ *state = REMOTE_NO_KEY_PRESSED;
+ break;
+
+ case 0x88: /* framing error or "invalid code" */
+ case 0x99:
+ case 0xc0:
+ case 0xd8:
+ *state = REMOTE_NO_KEY_PRESSED;
+ m->rep_count = 0;
+ break;
+
+ case 0x93:
+ case 0x92:
+ case 0x83: /* pinnacle PCTV310e */
+ case 0x82:
+ m->rep_count = 0;
+ *state = REMOTE_KEY_PRESSED;
+ break;
+
+ case 0x91:
+ case 0x81: /* pinnacle PCTV310e */
+ /* prevent immediate auto-repeat */
+ if (++m->rep_count > 2)
+ *state = REMOTE_KEY_REPEAT;
+ else
+ *state = REMOTE_NO_KEY_PRESSED;
+ break;
+
+ default:
+ deb("Unexpected rc state %02x\n", rc_state);
+ *state = REMOTE_NO_KEY_PRESSED;
+ break;
+ }
+}
+
+static int m920x_rc_query(struct dvb_usb_device *d, u32 *event, int *state)
+{
int i, ret = 0;
u8 *rc_state;
@@ -140,51 +192,22 @@ static int m920x_rc_query(struct dvb_usb_device *d, u32 *event, int *state)
if (!rc_state)
return -ENOMEM;
- if ((ret = m920x_read(d->udev, M9206_CORE, 0x0, M9206_RC_STATE, rc_state, 1)) != 0)
+ ret = m920x_read(d->udev, M9206_CORE, 0x0, M9206_RC_STATE,
+ rc_state, 1);
+ if (ret != 0)
goto out;
- if ((ret = m920x_read(d->udev, M9206_CORE, 0x0, M9206_RC_KEY, rc_state + 1, 1)) != 0)
+ ret = m920x_read(d->udev, M9206_CORE, 0x0, M9206_RC_KEY,
+ rc_state + 1, 1);
+ if (ret != 0)
goto out;
+ m920x_parse_rc_state(d, rc_state[0], state);
+
for (i = 0; i < d->props.rc.legacy.rc_map_size; i++)
if (rc5_data(&d->props.rc.legacy.rc_map_table[i]) == rc_state[1]) {
*event = d->props.rc.legacy.rc_map_table[i].keycode;
-
- switch(rc_state[0]) {
- case 0x80:
- *state = REMOTE_NO_KEY_PRESSED;
- goto out;
-
- case 0x88: /* framing error or "invalid code" */
- case 0x99:
- case 0xc0:
- case 0xd8:
- *state = REMOTE_NO_KEY_PRESSED;
- m->rep_count = 0;
- goto out;
-
- case 0x93:
- case 0x92:
- case 0x83: /* pinnacle PCTV310e */
- case 0x82:
- m->rep_count = 0;
- *state = REMOTE_KEY_PRESSED;
- goto out;
-
- case 0x91:
- case 0x81: /* pinnacle PCTV310e */
- /* prevent immediate auto-repeat */
- if (++m->rep_count > 2)
- *state = REMOTE_KEY_REPEAT;
- else
- *state = REMOTE_NO_KEY_PRESSED;
- goto out;
-
- default:
- deb("Unexpected rc state %02x\n", rc_state[0]);
- *state = REMOTE_NO_KEY_PRESSED;
- goto out;
- }
+ goto out;
}
if (rc_state[1] != 0)
@@ -197,6 +220,38 @@ static int m920x_rc_query(struct dvb_usb_device *d, u32 *event, int *state)
return ret;
}
+static int m920x_rc_core_query(struct dvb_usb_device *d)
+{
+ int ret = 0;
+ u8 *rc_state;
+ int state;
+
+ rc_state = kmalloc(2, GFP_KERNEL);
+ if (!rc_state)
+ return -ENOMEM;
+
+ if ((ret = m920x_read(d->udev, M9206_CORE, 0x0, M9206_RC_STATE, &rc_state[0], 1)) != 0)
+ goto out;
+
+ if ((ret = m920x_read(d->udev, M9206_CORE, 0x0, M9206_RC_KEY, &rc_state[1], 1)) != 0)
+ goto out;
+
+ deb("state=0x%02x keycode=0x%02x\n", rc_state[0], rc_state[1]);
+
+ m920x_parse_rc_state(d, rc_state[0], &state);
+
+ if (state == REMOTE_NO_KEY_PRESSED)
+ rc_keyup(d->rc_dev);
+ else if (state == REMOTE_KEY_REPEAT)
+ rc_repeat(d->rc_dev);
+ else
+ rc_keydown(d->rc_dev, rc_state[1], 0);
+
+out:
+ kfree(rc_state);
+ return ret;
+}
+
/* I2C */
static int m920x_i2c_xfer(struct i2c_adapter *adap, struct i2c_msg msg[], int num)
{
@@ -496,6 +551,12 @@ static struct qt1010_config m920x_qt1010_config = {
.i2c_address = 0x62
};
+static struct mt2060_config m920x_mt2060_config = {
+ .i2c_address = 0x60, /* 0xc0 */
+ .clock_out = 0,
+};
+
+
/* Callbacks for DVB USB */
static int m920x_mt352_frontend_attach(struct dvb_usb_adapter *adap)
{
@@ -510,6 +571,37 @@ static int m920x_mt352_frontend_attach(struct dvb_usb_adapter *adap)
return 0;
}
+static int m920x_mt352_frontend_attach_vp7049(struct dvb_usb_adapter *adap)
+{
+ struct m920x_inits vp7049_fe_init_seq[] = {
+ /* XXX without these commands the frontend cannot be detected,
+ * they must be sent BEFORE the frontend is attached */
+ { 0xff28, 0x00 },
+ { 0xff23, 0x00 },
+ { 0xff28, 0x00 },
+ { 0xff23, 0x00 },
+ { 0xff21, 0x20 },
+ { 0xff21, 0x60 },
+ { 0xff28, 0x00 },
+ { 0xff22, 0x00 },
+ { 0xff20, 0x30 },
+ { 0xff20, 0x20 },
+ { 0xff20, 0x30 },
+ { } /* terminating entry */
+ };
+ int ret;
+
+ deb("%s\n", __func__);
+
+ ret = m920x_write_seq(adap->dev->udev, M9206_CORE, vp7049_fe_init_seq);
+ if (ret != 0) {
+ deb("Initialization of vp7049 frontend failed.");
+ return ret;
+ }
+
+ return m920x_mt352_frontend_attach(adap);
+}
+
static int m920x_tda10046_08_frontend_attach(struct dvb_usb_adapter *adap)
{
deb("%s\n",__func__);
@@ -574,6 +666,18 @@ static int m920x_fmd1216me_tuner_attach(struct dvb_usb_adapter *adap)
return 0;
}
+static int m920x_mt2060_tuner_attach(struct dvb_usb_adapter *adap)
+{
+ deb("%s\n", __func__);
+
+ if (dvb_attach(mt2060_attach, adap->fe_adap[0].fe, &adap->dev->i2c_adap,
+ &m920x_mt2060_config, 1220) == NULL)
+ return -ENODEV;
+
+ return 0;
+}
+
+
/* device-specific initialization */
static struct m920x_inits megasky_rc_init [] = {
{ M9206_RC_INIT2, 0xa8 },
@@ -591,7 +695,7 @@ static struct m920x_inits tvwalkertwin_rc_init [] = {
};
static struct m920x_inits pinnacle310e_init[] = {
- /* without these the tuner don't work */
+ /* without these the tuner doesn't work */
{ 0xff20, 0x9b },
{ 0xff22, 0x70 },
@@ -602,6 +706,15 @@ static struct m920x_inits pinnacle310e_init[] = {
{ } /* terminating entry */
};
+static struct m920x_inits vp7049_rc_init[] = {
+ { 0xff28, 0x00 },
+ { 0xff23, 0x00 },
+ { 0xff21, 0x70 },
+ { M9206_RC_INIT2, 0x00 },
+ { M9206_RC_INIT1, 0xff },
+ { } /* terminating entry */
+};
+
/* ir keymaps */
static struct rc_map_table rc_map_megasky_table[] = {
{ 0x0012, KEY_POWER },
@@ -704,6 +817,7 @@ static struct dvb_usb_device_properties digivox_mini_ii_properties;
static struct dvb_usb_device_properties tvwalkertwin_properties;
static struct dvb_usb_device_properties dposh_properties;
static struct dvb_usb_device_properties pinnacle_pctv310e_properties;
+static struct dvb_usb_device_properties vp7049_properties;
static int m920x_probe(struct usb_interface *intf,
const struct usb_device_id *id)
@@ -756,6 +870,13 @@ static int m920x_probe(struct usb_interface *intf,
goto found;
}
+ ret = dvb_usb_device_init(intf, &vp7049_properties,
+ THIS_MODULE, &d, adapter_nr);
+ if (ret == 0) {
+ rc_init_seq = vp7049_rc_init;
+ goto found;
+ }
+
return ret;
} else {
/* Another interface on a multi-tuner device */
@@ -787,6 +908,7 @@ static struct usb_device_id m920x_table [] = {
{ USB_DEVICE(USB_VID_DPOSH, USB_PID_DPOSH_M9206_COLD) },
{ USB_DEVICE(USB_VID_DPOSH, USB_PID_DPOSH_M9206_WARM) },
{ USB_DEVICE(USB_VID_VISIONPLUS, USB_PID_PINNACLE_PCTV310E) },
+ { USB_DEVICE(USB_VID_AZUREWAVE, USB_PID_TWINHAN_VP7049) },
{ } /* Terminating entry */
};
MODULE_DEVICE_TABLE (usb, m920x_table);
@@ -1079,6 +1201,61 @@ static struct dvb_usb_device_properties pinnacle_pctv310e_properties = {
}
};
+static struct dvb_usb_device_properties vp7049_properties = {
+ .caps = DVB_USB_IS_AN_I2C_ADAPTER,
+
+ .usb_ctrl = DEVICE_SPECIFIC,
+ .firmware = "dvb-usb-vp7049-0.95.fw",
+ .download_firmware = m920x_firmware_download,
+
+ .rc.core = {
+ .rc_interval = 150,
+ .rc_codes = RC_MAP_TWINHAN_VP1027_DVBS,
+ .rc_query = m920x_rc_core_query,
+ .allowed_protos = RC_TYPE_UNKNOWN,
+ },
+
+ .size_of_priv = sizeof(struct m920x_state),
+
+ .identify_state = m920x_identify_state,
+ .num_adapters = 1,
+ .adapter = {{
+ .num_frontends = 1,
+ .fe = {{
+
+ .caps = DVB_USB_ADAP_HAS_PID_FILTER |
+ DVB_USB_ADAP_PID_FILTER_CAN_BE_TURNED_OFF,
+
+ .pid_filter_count = 8,
+ .pid_filter = m920x_pid_filter,
+ .pid_filter_ctrl = m920x_pid_filter_ctrl,
+
+ .frontend_attach = m920x_mt352_frontend_attach_vp7049,
+ .tuner_attach = m920x_mt2060_tuner_attach,
+
+ .stream = {
+ .type = USB_BULK,
+ .count = 8,
+ .endpoint = 0x81,
+ .u = {
+ .bulk = {
+ .buffersize = 512,
+ }
+ }
+ },
+ } },
+ } },
+ .i2c_algo = &m920x_i2c_algo,
+
+ .num_device_descs = 1,
+ .devices = {
+ { "DTV-DVB UDTT7049",
+ { &m920x_table[7], NULL },
+ { NULL },
+ }
+ }
+};
+
static struct usb_driver m920x_driver = {
.name = "dvb_usb_m920x",
.probe = m920x_probe,
diff --git a/drivers/media/usb/dvb-usb/ttusb2.c b/drivers/media/usb/dvb-usb/ttusb2.c
index bcdac225ebe1..2ce3d19c58ef 100644
--- a/drivers/media/usb/dvb-usb/ttusb2.c
+++ b/drivers/media/usb/dvb-usb/ttusb2.c
@@ -620,6 +620,8 @@ static struct usb_device_id ttusb2_table [] = {
USB_PID_TECHNOTREND_CONNECT_S2400) },
{ USB_DEVICE(USB_VID_TECHNOTREND,
USB_PID_TECHNOTREND_CONNECT_CT3650) },
+ { USB_DEVICE(USB_VID_TECHNOTREND,
+ USB_PID_TECHNOTREND_CONNECT_S2400_8KEEPROM) },
{} /* Terminating entry */
};
MODULE_DEVICE_TABLE (usb, ttusb2_table);
@@ -721,12 +723,16 @@ static struct dvb_usb_device_properties ttusb2_properties_s2400 = {
.generic_bulk_ctrl_endpoint = 0x01,
- .num_device_descs = 1,
+ .num_device_descs = 2,
.devices = {
{ "Technotrend TT-connect S-2400",
{ &ttusb2_table[2], NULL },
{ NULL },
},
+ { "Technotrend TT-connect S-2400 (8kB EEPROM)",
+ { &ttusb2_table[4], NULL },
+ { NULL },
+ },
}
};
diff --git a/drivers/media/usb/em28xx/Kconfig b/drivers/media/usb/em28xx/Kconfig
index 7a5bd61bd3bb..c754a80a8d8b 100644
--- a/drivers/media/usb/em28xx/Kconfig
+++ b/drivers/media/usb/em28xx/Kconfig
@@ -3,7 +3,7 @@ config VIDEO_EM28XX
depends on VIDEO_DEV && I2C
select VIDEO_TUNER
select VIDEO_TVEEPROM
- select VIDEOBUF_VMALLOC
+ select VIDEOBUF2_VMALLOC
select VIDEO_SAA711X if MEDIA_SUBDRV_AUTOSELECT
select VIDEO_TVP5150 if MEDIA_SUBDRV_AUTOSELECT
select VIDEO_MSP3400 if MEDIA_SUBDRV_AUTOSELECT
@@ -34,6 +34,7 @@ config VIDEO_EM28XX_DVB
tristate "DVB/ATSC Support for em28xx based TV cards"
depends on VIDEO_EM28XX && DVB_CORE
select DVB_LGDT330X if MEDIA_SUBDRV_AUTOSELECT
+ select DVB_LGDT3305 if MEDIA_SUBDRV_AUTOSELECT
select DVB_ZL10353 if MEDIA_SUBDRV_AUTOSELECT
select DVB_TDA10023 if MEDIA_SUBDRV_AUTOSELECT
select DVB_S921 if MEDIA_SUBDRV_AUTOSELECT
@@ -43,7 +44,10 @@ config VIDEO_EM28XX_DVB
select DVB_TDA18271C2DD if MEDIA_SUBDRV_AUTOSELECT
select DVB_TDA10071 if MEDIA_SUBDRV_AUTOSELECT
select DVB_A8293 if MEDIA_SUBDRV_AUTOSELECT
- select VIDEOBUF_DVB
+ select DVB_MT352 if MEDIA_SUBDRV_AUTOSELECT
+ select DVB_S5H1409 if MEDIA_SUBDRV_AUTOSELECT
+ select MEDIA_TUNER_QT1010 if MEDIA_SUBDRV_AUTOSELECT
+ select MEDIA_TUNER_TDA18271 if MEDIA_SUBDRV_AUTOSELECT
---help---
This adds support for DVB cards based on the
Empiatech em28xx chips.
diff --git a/drivers/media/usb/em28xx/em28xx-cards.c b/drivers/media/usb/em28xx/em28xx-cards.c
index 619bffbab3bc..54a03b20de6e 100644
--- a/drivers/media/usb/em28xx/em28xx-cards.c
+++ b/drivers/media/usb/em28xx/em28xx-cards.c
@@ -6,6 +6,7 @@
Markus Rechberger <mrechberger@gmail.com>
Mauro Carvalho Chehab <mchehab@infradead.org>
Sascha Sommer <saschasommer@freenet.de>
+ Copyright (C) 2012 Frank Schäfer <fschaefer.oss@googlemail.com>
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
@@ -56,10 +57,16 @@ module_param(disable_usb_speed_check, int, 0444);
MODULE_PARM_DESC(disable_usb_speed_check,
"override min bandwidth requirement of 480M bps");
-static unsigned int card[] = {[0 ... (EM28XX_MAXBOARDS - 1)] = UNSET };
+static unsigned int card[] = {[0 ... (EM28XX_MAXBOARDS - 1)] = -1U };
module_param_array(card, int, NULL, 0444);
MODULE_PARM_DESC(card, "card type");
+static int usb_xfer_mode = -1;
+module_param(usb_xfer_mode, int, 0444);
+MODULE_PARM_DESC(usb_xfer_mode,
+ "USB transfer mode for frame data (-1 = auto, 0 = prefer isoc, 1 = prefer bulk)");
+
+
/* Bitmask marking allocated devices from 0 to EM28XX_MAXBOARDS - 1 */
static unsigned long em28xx_devused;
@@ -486,7 +493,7 @@ struct em28xx_board em28xx_boards[] = {
.input = { {
.type = EM28XX_VMUX_TELEVISION,
.vmux = SAA7115_COMPOSITE2,
- .amux = EM28XX_AMUX_LINE_IN,
+ .amux = EM28XX_AMUX_VIDEO,
}, {
.type = EM28XX_VMUX_COMPOSITE1,
.vmux = SAA7115_COMPOSITE0,
@@ -2073,6 +2080,8 @@ struct usb_device_id em28xx_id_table[] = {
.driver_info = EM2884_BOARD_TERRATEC_H5 },
{ USB_DEVICE(0x0ccd, 0x10ad), /* H5 Rev. 2 */
.driver_info = EM2884_BOARD_TERRATEC_H5 },
+ { USB_DEVICE(0x0ccd, 0x10b6), /* H5 Rev. 3 */
+ .driver_info = EM2884_BOARD_TERRATEC_H5 },
{ USB_DEVICE(0x0ccd, 0x0084),
.driver_info = EM2860_BOARD_TERRATEC_AV350 },
{ USB_DEVICE(0x0ccd, 0x0096),
@@ -2905,7 +2914,7 @@ static void request_module_async(struct work_struct *work)
if (dev->board.has_dvb)
request_module("em28xx-dvb");
- if (dev->board.ir_codes && !disable_ir)
+ if ((dev->board.ir_codes || dev->board.has_ir_i2c) && !disable_ir)
request_module("em28xx-rc");
#endif /* CONFIG_MODULES */
}
@@ -2934,6 +2943,8 @@ void em28xx_release_resources(struct em28xx *dev)
em28xx_i2c_unregister(dev);
+ v4l2_ctrl_handler_free(&dev->ctrl_handler);
+
v4l2_device_unregister(&dev->v4l2_dev);
usb_put_dev(dev->udev);
@@ -2950,9 +2961,14 @@ static int em28xx_init_dev(struct em28xx *dev, struct usb_device *udev,
struct usb_interface *interface,
int minor)
{
+ struct v4l2_ctrl_handler *hdl = &dev->ctrl_handler;
int retval;
+ static const char *default_chip_name = "em28xx";
+ const char *chip_name = default_chip_name;
dev->udev = udev;
+ mutex_init(&dev->vb_queue_lock);
+ mutex_init(&dev->vb_vbi_queue_lock);
mutex_init(&dev->ctrl_urb_lock);
spin_lock_init(&dev->slock);
@@ -2978,51 +2994,62 @@ static int em28xx_init_dev(struct em28xx *dev, struct usb_device *udev,
switch (dev->chip_id) {
case CHIP_ID_EM2800:
- em28xx_info("chip ID is em2800\n");
+ chip_name = "em2800";
break;
case CHIP_ID_EM2710:
- em28xx_info("chip ID is em2710\n");
+ chip_name = "em2710";
break;
case CHIP_ID_EM2750:
- em28xx_info("chip ID is em2750\n");
+ chip_name = "em2750";
break;
case CHIP_ID_EM2820:
- em28xx_info("chip ID is em2820 (or em2710)\n");
+ chip_name = "em2710/2820";
break;
case CHIP_ID_EM2840:
- em28xx_info("chip ID is em2840\n");
+ chip_name = "em2840";
break;
case CHIP_ID_EM2860:
- em28xx_info("chip ID is em2860\n");
+ chip_name = "em2860";
break;
case CHIP_ID_EM2870:
- em28xx_info("chip ID is em2870\n");
+ chip_name = "em2870";
dev->wait_after_write = 0;
break;
case CHIP_ID_EM2874:
- em28xx_info("chip ID is em2874\n");
+ chip_name = "em2874";
dev->reg_gpio_num = EM2874_R80_GPIO;
dev->wait_after_write = 0;
break;
case CHIP_ID_EM28174:
- em28xx_info("chip ID is em28174\n");
+ chip_name = "em28174";
dev->reg_gpio_num = EM2874_R80_GPIO;
dev->wait_after_write = 0;
break;
case CHIP_ID_EM2883:
- em28xx_info("chip ID is em2882/em2883\n");
+ chip_name = "em2882/3";
dev->wait_after_write = 0;
break;
case CHIP_ID_EM2884:
- em28xx_info("chip ID is em2884\n");
+ chip_name = "em2884";
dev->reg_gpio_num = EM2874_R80_GPIO;
dev->wait_after_write = 0;
break;
default:
- em28xx_info("em28xx chip ID = %d\n", dev->chip_id);
+ printk(KERN_INFO DRIVER_NAME
+ ": unknown em28xx chip ID (%d)\n", dev->chip_id);
}
}
+ if (chip_name != default_chip_name)
+ printk(KERN_INFO DRIVER_NAME
+ ": chip ID is %s\n", chip_name);
+
+ /*
+ * For em2820/em2710, the name may change latter, after checking
+ * if the device has a sensor (so, it is em2710) or not.
+ */
+ snprintf(dev->name, sizeof(dev->name), "%s #%d", chip_name, dev->devno);
+
if (dev->is_audio_only) {
retval = em28xx_audio_setup(dev);
if (retval)
@@ -3039,6 +3066,14 @@ static int em28xx_init_dev(struct em28xx *dev, struct usb_device *udev,
em28xx_pre_card_setup(dev);
+ if (dev->chip_id == CHIP_ID_EM2820) {
+ if (dev->board.is_webcam)
+ chip_name = "em2710";
+ else
+ chip_name = "em2820";
+ snprintf(dev->name, sizeof(dev->name), "%s #%d", chip_name, dev->devno);
+ }
+
if (!dev->board.is_em2800) {
/* Resets I2C speed */
retval = em28xx_write_reg(dev, EM28XX_R06_I2C_CLK, dev->board.i2c_speed);
@@ -3056,6 +3091,9 @@ static int em28xx_init_dev(struct em28xx *dev, struct usb_device *udev,
return retval;
}
+ v4l2_ctrl_handler_init(hdl, 4);
+ dev->v4l2_dev.ctrl_handler = hdl;
+
/* register i2c bus */
retval = em28xx_i2c_register(dev);
if (retval < 0) {
@@ -3081,6 +3119,18 @@ static int em28xx_init_dev(struct em28xx *dev, struct usb_device *udev,
__func__, retval);
goto fail;
}
+ if (dev->audio_mode.ac97 != EM28XX_NO_AC97) {
+ v4l2_ctrl_new_std(hdl, &em28xx_ctrl_ops,
+ V4L2_CID_AUDIO_MUTE, 0, 1, 1, 1);
+ v4l2_ctrl_new_std(hdl, &em28xx_ctrl_ops,
+ V4L2_CID_AUDIO_VOLUME, 0, 0x1f, 1, 0x1f);
+ } else {
+ /* install the em28xx notify callback */
+ v4l2_ctrl_notify(v4l2_ctrl_find(hdl, V4L2_CID_AUDIO_MUTE),
+ em28xx_ctrl_notify, dev);
+ v4l2_ctrl_notify(v4l2_ctrl_find(hdl, V4L2_CID_AUDIO_VOLUME),
+ em28xx_ctrl_notify, dev);
+ }
/* wake i2c devices */
em28xx_wake_i2c(dev);
@@ -3110,6 +3160,11 @@ static int em28xx_init_dev(struct em28xx *dev, struct usb_device *udev,
msleep(3);
}
+ v4l2_ctrl_handler_setup(&dev->ctrl_handler);
+ retval = dev->ctrl_handler.error;
+ if (retval)
+ goto fail;
+
retval = em28xx_register_analog_devices(dev);
if (retval < 0) {
goto fail;
@@ -3122,6 +3177,7 @@ static int em28xx_init_dev(struct em28xx *dev, struct usb_device *udev,
fail:
em28xx_i2c_unregister(dev);
+ v4l2_ctrl_handler_free(&dev->ctrl_handler);
unregister_dev:
v4l2_device_unregister(&dev->v4l2_dev);
@@ -3143,7 +3199,7 @@ static int em28xx_usb_probe(struct usb_interface *interface,
struct em28xx *dev = NULL;
int retval;
bool has_audio = false, has_video = false, has_dvb = false;
- int i, nr;
+ int i, nr, try_bulk;
const int ifnum = interface->altsetting[0].desc.bInterfaceNumber;
char *speed;
@@ -3183,9 +3239,10 @@ static int em28xx_usb_probe(struct usb_interface *interface,
}
/* compute alternate max packet sizes */
- dev->alt_max_pkt_size = kmalloc(sizeof(dev->alt_max_pkt_size[0]) *
+ dev->alt_max_pkt_size_isoc =
+ kmalloc(sizeof(dev->alt_max_pkt_size_isoc[0]) *
interface->num_altsetting, GFP_KERNEL);
- if (dev->alt_max_pkt_size == NULL) {
+ if (dev->alt_max_pkt_size_isoc == NULL) {
em28xx_errdev("out of memory!\n");
kfree(dev);
retval = -ENOMEM;
@@ -3208,25 +3265,67 @@ static int em28xx_usb_probe(struct usb_interface *interface,
if (udev->speed == USB_SPEED_HIGH)
size = size * hb_mult(sizedescr);
- if (usb_endpoint_xfer_isoc(e) &&
- usb_endpoint_dir_in(e)) {
+ if (usb_endpoint_dir_in(e)) {
switch (e->bEndpointAddress) {
- case EM28XX_EP_AUDIO:
- has_audio = true;
- break;
- case EM28XX_EP_ANALOG:
+ case 0x82:
has_video = true;
- dev->alt_max_pkt_size[i] = size;
+ if (usb_endpoint_xfer_isoc(e)) {
+ dev->analog_ep_isoc =
+ e->bEndpointAddress;
+ dev->alt_max_pkt_size_isoc[i] = size;
+ } else if (usb_endpoint_xfer_bulk(e)) {
+ dev->analog_ep_bulk =
+ e->bEndpointAddress;
+ }
break;
- case EM28XX_EP_DIGITAL:
- has_dvb = true;
- if (size > dev->dvb_max_pkt_size) {
- dev->dvb_max_pkt_size = size;
- dev->dvb_alt = i;
+ case 0x83:
+ if (usb_endpoint_xfer_isoc(e)) {
+ has_audio = true;
+ } else {
+ printk(KERN_INFO DRIVER_NAME
+ ": error: skipping audio endpoint 0x83, because it uses bulk transfers !\n");
+ }
+ break;
+ case 0x84:
+ if (has_video &&
+ (usb_endpoint_xfer_bulk(e))) {
+ dev->analog_ep_bulk =
+ e->bEndpointAddress;
+ } else {
+ has_dvb = true;
+ if (usb_endpoint_xfer_isoc(e)) {
+ dev->dvb_ep_isoc = e->bEndpointAddress;
+ if (size > dev->dvb_max_pkt_size_isoc) {
+ dev->dvb_max_pkt_size_isoc = size;
+ dev->dvb_alt_isoc = i;
+ }
+ } else {
+ dev->dvb_ep_bulk = e->bEndpointAddress;
+ }
}
break;
}
}
+ /* NOTE:
+ * Old logic with support for isoc transfers only was:
+ * 0x82 isoc => analog
+ * 0x83 isoc => audio
+ * 0x84 isoc => digital
+ *
+ * New logic with support for bulk transfers
+ * 0x82 isoc => analog
+ * 0x82 bulk => analog
+ * 0x83 isoc* => audio
+ * 0x84 isoc => digital
+ * 0x84 bulk => analog or digital**
+ * (*: audio should always be isoc)
+ * (**: analog, if ep 0x82 is isoc, otherwise digital)
+ *
+ * The new logic preserves backwards compatibility and
+ * reflects the endpoint configurations we have seen
+ * so far. But there might be devices for which this
+ * logic is not sufficient...
+ */
}
}
@@ -3261,19 +3360,6 @@ static int em28xx_usb_probe(struct usb_interface *interface,
ifnum,
interface->altsetting->desc.bInterfaceNumber);
- if (has_audio)
- printk(KERN_INFO DRIVER_NAME
- ": Audio Vendor Class interface %i found\n",
- ifnum);
- if (has_video)
- printk(KERN_INFO DRIVER_NAME
- ": Video interface %i found\n",
- ifnum);
- if (has_dvb)
- printk(KERN_INFO DRIVER_NAME
- ": DVB interface %i found\n",
- ifnum);
-
/*
* Make sure we have 480 Mbps of bandwidth, otherwise things like
* video stream wouldn't likely work, since 12 Mbps is generally
@@ -3287,7 +3373,6 @@ static int em28xx_usb_probe(struct usb_interface *interface,
goto err_free;
}
- snprintf(dev->name, sizeof(dev->name), "em28xx #%d", nr);
dev->devno = nr;
dev->model = id->driver_info;
dev->alt = -1;
@@ -3304,6 +3389,24 @@ static int em28xx_usb_probe(struct usb_interface *interface,
}
}
+ if (has_audio)
+ printk(KERN_INFO DRIVER_NAME
+ ": Audio interface %i found %s\n",
+ ifnum,
+ dev->has_audio_class ? "(USB Audio Class)" : "(Vendor Class)");
+ if (has_video)
+ printk(KERN_INFO DRIVER_NAME
+ ": Video interface %i found:%s%s\n",
+ ifnum,
+ dev->analog_ep_bulk ? " bulk" : "",
+ dev->analog_ep_isoc ? " isoc" : "");
+ if (has_dvb)
+ printk(KERN_INFO DRIVER_NAME
+ ": DVB interface %i found:%s%s\n",
+ ifnum,
+ dev->dvb_ep_bulk ? " bulk" : "",
+ dev->dvb_ep_isoc ? " isoc" : "");
+
dev->num_alt = interface->num_altsetting;
if ((unsigned)card[nr] < em28xx_bcount)
@@ -3312,6 +3415,9 @@ static int em28xx_usb_probe(struct usb_interface *interface,
/* save our data pointer in this interface device */
usb_set_intfdata(interface, dev);
+ /* initialize videobuf2 stuff */
+ em28xx_vb2_setup(dev);
+
/* allocate device struct */
mutex_init(&dev->lock);
mutex_lock(&dev->lock);
@@ -3320,13 +3426,46 @@ static int em28xx_usb_probe(struct usb_interface *interface,
goto unlock_and_free;
}
+ if (usb_xfer_mode < 0) {
+ if (dev->board.is_webcam)
+ try_bulk = 1;
+ else
+ try_bulk = 0;
+ } else {
+ try_bulk = usb_xfer_mode > 0;
+ }
+
+ /* Select USB transfer types to use */
+ if (has_video) {
+ if (!dev->analog_ep_isoc || (try_bulk && dev->analog_ep_bulk))
+ dev->analog_xfer_bulk = 1;
+ em28xx_info("analog set to %s mode.\n",
+ dev->analog_xfer_bulk ? "bulk" : "isoc");
+ }
if (has_dvb) {
- /* pre-allocate DVB isoc transfer buffers */
- retval = em28xx_alloc_isoc(dev, EM28XX_DIGITAL_MODE,
- EM28XX_DVB_MAX_PACKETS,
- EM28XX_DVB_NUM_BUFS,
- dev->dvb_max_pkt_size);
+ if (!dev->dvb_ep_isoc || (try_bulk && dev->dvb_ep_bulk))
+ dev->dvb_xfer_bulk = 1;
+
+ em28xx_info("dvb set to %s mode.\n",
+ dev->dvb_xfer_bulk ? "bulk" : "isoc");
+
+ /* pre-allocate DVB usb transfer buffers */
+ if (dev->dvb_xfer_bulk) {
+ retval = em28xx_alloc_urbs(dev, EM28XX_DIGITAL_MODE,
+ dev->dvb_xfer_bulk,
+ EM28XX_DVB_NUM_BUFS,
+ 512,
+ EM28XX_DVB_BULK_PACKET_MULTIPLIER);
+ } else {
+ retval = em28xx_alloc_urbs(dev, EM28XX_DIGITAL_MODE,
+ dev->dvb_xfer_bulk,
+ EM28XX_DVB_NUM_BUFS,
+ dev->dvb_max_pkt_size_isoc,
+ EM28XX_DVB_NUM_ISOC_PACKETS);
+ }
if (retval) {
+ printk(DRIVER_NAME
+ ": Failed to pre-allocate USB transfer buffers for DVB.\n");
goto unlock_and_free;
}
}
@@ -3344,7 +3483,7 @@ unlock_and_free:
mutex_unlock(&dev->lock);
err_free:
- kfree(dev->alt_max_pkt_size);
+ kfree(dev->alt_max_pkt_size_isoc);
kfree(dev);
err:
@@ -3370,6 +3509,8 @@ static void em28xx_usb_disconnect(struct usb_interface *interface)
if (!dev)
return;
+ dev->disconnected = 1;
+
if (dev->is_audio_only) {
mutex_lock(&dev->lock);
em28xx_close_extension(dev);
@@ -3381,35 +3522,28 @@ static void em28xx_usb_disconnect(struct usb_interface *interface)
flush_request_modules(dev);
- /* wait until all current v4l2 io is finished then deallocate
- resources */
mutex_lock(&dev->lock);
v4l2_device_disconnect(&dev->v4l2_dev);
if (dev->users) {
- em28xx_warn
- ("device %s is open! Deregistration and memory "
- "deallocation are deferred on close.\n",
- video_device_node_name(dev->vdev));
-
- dev->state |= DEV_MISCONFIGURED;
- em28xx_uninit_isoc(dev, dev->mode);
- dev->state |= DEV_DISCONNECTED;
- } else {
- dev->state |= DEV_DISCONNECTED;
- em28xx_release_resources(dev);
+ em28xx_warn("device %s is open! Deregistration and memory deallocation are deferred on close.\n",
+ video_device_node_name(dev->vdev));
+
+ em28xx_uninit_usb_xfer(dev, EM28XX_ANALOG_MODE);
+ em28xx_uninit_usb_xfer(dev, EM28XX_DIGITAL_MODE);
}
- /* free DVB isoc buffers */
- em28xx_uninit_isoc(dev, EM28XX_DIGITAL_MODE);
+ em28xx_close_extension(dev);
+ /* NOTE: must be called BEFORE the resources are released */
- mutex_unlock(&dev->lock);
+ if (!dev->users)
+ em28xx_release_resources(dev);
- em28xx_close_extension(dev);
+ mutex_unlock(&dev->lock);
if (!dev->users) {
- kfree(dev->alt_max_pkt_size);
+ kfree(dev->alt_max_pkt_size_isoc);
kfree(dev);
}
}
diff --git a/drivers/media/usb/em28xx/em28xx-core.c b/drivers/media/usb/em28xx/em28xx-core.c
index bed07a6c33f8..aaedd11791f2 100644
--- a/drivers/media/usb/em28xx/em28xx-core.c
+++ b/drivers/media/usb/em28xx/em28xx-core.c
@@ -5,6 +5,7 @@
Markus Rechberger <mrechberger@gmail.com>
Mauro Carvalho Chehab <mchehab@infradead.org>
Sascha Sommer <saschasommer@freenet.de>
+ Copyright (C) 2012 Frank Schäfer <fschaefer.oss@googlemail.com>
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
@@ -76,7 +77,7 @@ int em28xx_read_reg_req_len(struct em28xx *dev, u8 req, u16 reg,
int ret;
int pipe = usb_rcvctrlpipe(dev->udev, 0);
- if (dev->state & DEV_DISCONNECTED)
+ if (dev->disconnected)
return -ENODEV;
if (len > URB_MAX_CTRL_SIZE)
@@ -100,7 +101,7 @@ int em28xx_read_reg_req_len(struct em28xx *dev, u8 req, u16 reg,
if (reg_debug)
printk(" failed!\n");
mutex_unlock(&dev->ctrl_urb_lock);
- return ret;
+ return usb_translate_errors(ret);
}
if (len)
@@ -152,7 +153,7 @@ int em28xx_write_regs_req(struct em28xx *dev, u8 req, u16 reg, char *buf,
int ret;
int pipe = usb_sndctrlpipe(dev->udev, 0);
- if (dev->state & DEV_DISCONNECTED)
+ if (dev->disconnected)
return -ENODEV;
if ((len < 1) || (len > URB_MAX_CTRL_SIZE))
@@ -181,6 +182,9 @@ int em28xx_write_regs_req(struct em28xx *dev, u8 req, u16 reg, char *buf,
0x0000, reg, dev->urb_buf, len, HZ);
mutex_unlock(&dev->ctrl_urb_lock);
+ if (ret < 0)
+ return usb_translate_errors(ret);
+
if (dev->wait_after_write)
msleep(dev->wait_after_write);
@@ -729,22 +733,24 @@ static int em28xx_accumulator_set(struct em28xx *dev, u8 xmin, u8 xmax,
return em28xx_write_regs(dev, EM28XX_R2B_YMAX, &ymax, 1);
}
-static int em28xx_capture_area_set(struct em28xx *dev, u8 hstart, u8 vstart,
+static void em28xx_capture_area_set(struct em28xx *dev, u8 hstart, u8 vstart,
u16 width, u16 height)
{
- u8 cwidth = width;
- u8 cheight = height;
- u8 overflow = (height >> 7 & 0x02) | (width >> 8 & 0x01);
+ u8 cwidth = width >> 2;
+ u8 cheight = height >> 2;
+ u8 overflow = (height >> 9 & 0x02) | (width >> 10 & 0x01);
+ /* NOTE: size limit: 2047x1023 = 2MPix */
- em28xx_coredbg("em28xx Area Set: (%d,%d)\n",
- (width | (overflow & 2) << 7),
- (height | (overflow & 1) << 8));
+ em28xx_coredbg("capture area set to (%d,%d): %dx%d\n",
+ hstart, vstart,
+ ((overflow & 2) << 9 | cwidth << 2),
+ ((overflow & 1) << 10 | cheight << 2));
em28xx_write_regs(dev, EM28XX_R1C_HSTART, &hstart, 1);
em28xx_write_regs(dev, EM28XX_R1D_VSTART, &vstart, 1);
em28xx_write_regs(dev, EM28XX_R1E_CWIDTH, &cwidth, 1);
em28xx_write_regs(dev, EM28XX_R1F_CHEIGHT, &cheight, 1);
- return em28xx_write_regs(dev, EM28XX_R1B_OFLOW, &overflow, 1);
+ em28xx_write_regs(dev, EM28XX_R1B_OFLOW, &overflow, 1);
}
static int em28xx_scaler_set(struct em28xx *dev, u16 h, u16 v)
@@ -797,28 +803,30 @@ int em28xx_resolution_set(struct em28xx *dev)
it out, we end up with the same format as the rest of the VBI
region */
if (em28xx_vbi_supported(dev) == 1)
- em28xx_capture_area_set(dev, 0, 2, width >> 2, height >> 2);
+ em28xx_capture_area_set(dev, 0, 2, width, height);
else
- em28xx_capture_area_set(dev, 0, 0, width >> 2, height >> 2);
+ em28xx_capture_area_set(dev, 0, 0, width, height);
return em28xx_scaler_set(dev, dev->hscale, dev->vscale);
}
+/* Set USB alternate setting for analog video */
int em28xx_set_alternate(struct em28xx *dev)
{
- int errCode, prev_alt = dev->alt;
+ int errCode;
int i;
unsigned int min_pkt_size = dev->width * 2 + 4;
- /*
- * alt = 0 is used only for control messages, so, only values
- * greater than 0 can be used for streaming.
- */
- if (alt && alt < dev->num_alt) {
+ /* NOTE: for isoc transfers, only alt settings > 0 are allowed
+ bulk transfers seem to work only with alt=0 ! */
+ dev->alt = 0;
+ if ((alt > 0) && (alt < dev->num_alt)) {
em28xx_coredbg("alternate forced to %d\n", dev->alt);
dev->alt = alt;
goto set_alt;
}
+ if (dev->analog_xfer_bulk)
+ goto set_alt;
/* When image size is bigger than a certain value,
the frame size should be increased, otherwise, only
@@ -829,30 +837,38 @@ int em28xx_set_alternate(struct em28xx *dev)
for (i = 0; i < dev->num_alt; i++) {
/* stop when the selected alt setting offers enough bandwidth */
- if (dev->alt_max_pkt_size[i] >= min_pkt_size) {
+ if (dev->alt_max_pkt_size_isoc[i] >= min_pkt_size) {
dev->alt = i;
break;
/* otherwise make sure that we end up with the maximum bandwidth
because the min_pkt_size equation might be wrong...
*/
- } else if (dev->alt_max_pkt_size[i] >
- dev->alt_max_pkt_size[dev->alt])
+ } else if (dev->alt_max_pkt_size_isoc[i] >
+ dev->alt_max_pkt_size_isoc[dev->alt])
dev->alt = i;
}
set_alt:
- if (dev->alt != prev_alt) {
+ /* NOTE: for bulk transfers, we need to call usb_set_interface()
+ * even if the previous settings were the same. Otherwise streaming
+ * fails with all urbs having status = -EOVERFLOW ! */
+ if (dev->analog_xfer_bulk) {
+ dev->max_pkt_size = 512; /* USB 2.0 spec */
+ dev->packet_multiplier = EM28XX_BULK_PACKET_MULTIPLIER;
+ } else { /* isoc */
em28xx_coredbg("minimum isoc packet size: %u (alt=%d)\n",
- min_pkt_size, dev->alt);
- dev->max_pkt_size = dev->alt_max_pkt_size[dev->alt];
- em28xx_coredbg("setting alternate %d with wMaxPacketSize=%u\n",
- dev->alt, dev->max_pkt_size);
- errCode = usb_set_interface(dev->udev, 0, dev->alt);
- if (errCode < 0) {
- em28xx_errdev("cannot change alternate number to %d (error=%i)\n",
- dev->alt, errCode);
- return errCode;
- }
+ min_pkt_size, dev->alt);
+ dev->max_pkt_size =
+ dev->alt_max_pkt_size_isoc[dev->alt];
+ dev->packet_multiplier = EM28XX_NUM_ISOC_PACKETS;
+ }
+ em28xx_coredbg("setting alternate %d with wMaxPacketSize=%u\n",
+ dev->alt, dev->max_pkt_size);
+ errCode = usb_set_interface(dev->udev, 0, dev->alt);
+ if (errCode < 0) {
+ em28xx_errdev("cannot change alternate number to %d (error=%i)\n",
+ dev->alt, errCode);
+ return errCode;
}
return 0;
}
@@ -919,7 +935,7 @@ EXPORT_SYMBOL_GPL(em28xx_set_mode);
------------------------------------------------------------------*/
/*
- * IRQ callback, called by URB callback
+ * URB completion handler for isoc/bulk transfers
*/
static void em28xx_irq_callback(struct urb *urb)
{
@@ -941,11 +957,12 @@ static void em28xx_irq_callback(struct urb *urb)
/* Copy data from URB */
spin_lock(&dev->slock);
- dev->isoc_ctl.isoc_copy(dev, urb);
+ dev->usb_ctl.urb_data_copy(dev, urb);
spin_unlock(&dev->slock);
/* Reset urb buffers */
for (i = 0; i < urb->number_of_packets; i++) {
+ /* isoc only (bulk: number_of_packets = 0) */
urb->iso_frame_desc[i].status = 0;
urb->iso_frame_desc[i].actual_length = 0;
}
@@ -961,49 +978,50 @@ static void em28xx_irq_callback(struct urb *urb)
/*
* Stop and Deallocate URBs
*/
-void em28xx_uninit_isoc(struct em28xx *dev, enum em28xx_mode mode)
+void em28xx_uninit_usb_xfer(struct em28xx *dev, enum em28xx_mode mode)
{
struct urb *urb;
- struct em28xx_usb_isoc_bufs *isoc_bufs;
+ struct em28xx_usb_bufs *usb_bufs;
int i;
- em28xx_isocdbg("em28xx: called em28xx_uninit_isoc in mode %d\n", mode);
+ em28xx_isocdbg("em28xx: called em28xx_uninit_usb_xfer in mode %d\n",
+ mode);
if (mode == EM28XX_DIGITAL_MODE)
- isoc_bufs = &dev->isoc_ctl.digital_bufs;
+ usb_bufs = &dev->usb_ctl.digital_bufs;
else
- isoc_bufs = &dev->isoc_ctl.analog_bufs;
+ usb_bufs = &dev->usb_ctl.analog_bufs;
- for (i = 0; i < isoc_bufs->num_bufs; i++) {
- urb = isoc_bufs->urb[i];
+ for (i = 0; i < usb_bufs->num_bufs; i++) {
+ urb = usb_bufs->urb[i];
if (urb) {
if (!irqs_disabled())
usb_kill_urb(urb);
else
usb_unlink_urb(urb);
- if (isoc_bufs->transfer_buffer[i]) {
+ if (usb_bufs->transfer_buffer[i]) {
usb_free_coherent(dev->udev,
urb->transfer_buffer_length,
- isoc_bufs->transfer_buffer[i],
+ usb_bufs->transfer_buffer[i],
urb->transfer_dma);
}
usb_free_urb(urb);
- isoc_bufs->urb[i] = NULL;
+ usb_bufs->urb[i] = NULL;
}
- isoc_bufs->transfer_buffer[i] = NULL;
+ usb_bufs->transfer_buffer[i] = NULL;
}
- kfree(isoc_bufs->urb);
- kfree(isoc_bufs->transfer_buffer);
+ kfree(usb_bufs->urb);
+ kfree(usb_bufs->transfer_buffer);
- isoc_bufs->urb = NULL;
- isoc_bufs->transfer_buffer = NULL;
- isoc_bufs->num_bufs = 0;
+ usb_bufs->urb = NULL;
+ usb_bufs->transfer_buffer = NULL;
+ usb_bufs->num_bufs = 0;
em28xx_capture_start(dev, 0);
}
-EXPORT_SYMBOL_GPL(em28xx_uninit_isoc);
+EXPORT_SYMBOL_GPL(em28xx_uninit_usb_xfer);
/*
* Stop URBs
@@ -1012,7 +1030,7 @@ void em28xx_stop_urbs(struct em28xx *dev)
{
int i;
struct urb *urb;
- struct em28xx_usb_isoc_bufs *isoc_bufs = &dev->isoc_ctl.digital_bufs;
+ struct em28xx_usb_bufs *isoc_bufs = &dev->usb_ctl.digital_bufs;
em28xx_isocdbg("em28xx: called em28xx_stop_urbs\n");
@@ -1033,10 +1051,10 @@ EXPORT_SYMBOL_GPL(em28xx_stop_urbs);
/*
* Allocate URBs
*/
-int em28xx_alloc_isoc(struct em28xx *dev, enum em28xx_mode mode,
- int max_packets, int num_bufs, int max_pkt_size)
+int em28xx_alloc_urbs(struct em28xx *dev, enum em28xx_mode mode, int xfer_bulk,
+ int num_bufs, int max_pkt_size, int packet_multiplier)
{
- struct em28xx_usb_isoc_bufs *isoc_bufs;
+ struct em28xx_usb_bufs *usb_bufs;
int i;
int sb_size, pipe;
struct urb *urb;
@@ -1044,140 +1062,180 @@ int em28xx_alloc_isoc(struct em28xx *dev, enum em28xx_mode mode,
em28xx_isocdbg("em28xx: called em28xx_alloc_isoc in mode %d\n", mode);
- if (mode == EM28XX_DIGITAL_MODE)
- isoc_bufs = &dev->isoc_ctl.digital_bufs;
- else
- isoc_bufs = &dev->isoc_ctl.analog_bufs;
+ /* Check mode and if we have an endpoint for the selected
+ transfer type, select buffer */
+ if (mode == EM28XX_DIGITAL_MODE) {
+ if ((xfer_bulk && !dev->dvb_ep_bulk) ||
+ (!xfer_bulk && !dev->dvb_ep_isoc)) {
+ em28xx_errdev("no endpoint for DVB mode and transfer type %d\n",
+ xfer_bulk > 0);
+ return -EINVAL;
+ }
+ usb_bufs = &dev->usb_ctl.digital_bufs;
+ } else if (mode == EM28XX_ANALOG_MODE) {
+ if ((xfer_bulk && !dev->analog_ep_bulk) ||
+ (!xfer_bulk && !dev->analog_ep_isoc)) {
+ em28xx_errdev("no endpoint for analog mode and transfer type %d\n",
+ xfer_bulk > 0);
+ return -EINVAL;
+ }
+ usb_bufs = &dev->usb_ctl.analog_bufs;
+ } else {
+ em28xx_errdev("invalid mode selected\n");
+ return -EINVAL;
+ }
/* De-allocates all pending stuff */
- em28xx_uninit_isoc(dev, mode);
+ em28xx_uninit_usb_xfer(dev, mode);
- isoc_bufs->num_bufs = num_bufs;
+ usb_bufs->num_bufs = num_bufs;
- isoc_bufs->urb = kzalloc(sizeof(void *)*num_bufs, GFP_KERNEL);
- if (!isoc_bufs->urb) {
+ usb_bufs->urb = kzalloc(sizeof(void *)*num_bufs, GFP_KERNEL);
+ if (!usb_bufs->urb) {
em28xx_errdev("cannot alloc memory for usb buffers\n");
return -ENOMEM;
}
- isoc_bufs->transfer_buffer = kzalloc(sizeof(void *)*num_bufs,
+ usb_bufs->transfer_buffer = kzalloc(sizeof(void *)*num_bufs,
GFP_KERNEL);
- if (!isoc_bufs->transfer_buffer) {
+ if (!usb_bufs->transfer_buffer) {
em28xx_errdev("cannot allocate memory for usb transfer\n");
- kfree(isoc_bufs->urb);
+ kfree(usb_bufs->urb);
return -ENOMEM;
}
- isoc_bufs->max_pkt_size = max_pkt_size;
- isoc_bufs->num_packets = max_packets;
- dev->isoc_ctl.vid_buf = NULL;
- dev->isoc_ctl.vbi_buf = NULL;
+ usb_bufs->max_pkt_size = max_pkt_size;
+ if (xfer_bulk)
+ usb_bufs->num_packets = 0;
+ else
+ usb_bufs->num_packets = packet_multiplier;
+ dev->usb_ctl.vid_buf = NULL;
+ dev->usb_ctl.vbi_buf = NULL;
- sb_size = isoc_bufs->num_packets * isoc_bufs->max_pkt_size;
+ sb_size = packet_multiplier * usb_bufs->max_pkt_size;
/* allocate urbs and transfer buffers */
- for (i = 0; i < isoc_bufs->num_bufs; i++) {
- urb = usb_alloc_urb(isoc_bufs->num_packets, GFP_KERNEL);
+ for (i = 0; i < usb_bufs->num_bufs; i++) {
+ urb = usb_alloc_urb(usb_bufs->num_packets, GFP_KERNEL);
if (!urb) {
- em28xx_err("cannot alloc isoc_ctl.urb %i\n", i);
- em28xx_uninit_isoc(dev, mode);
+ em28xx_err("cannot alloc usb_ctl.urb %i\n", i);
+ em28xx_uninit_usb_xfer(dev, mode);
return -ENOMEM;
}
- isoc_bufs->urb[i] = urb;
+ usb_bufs->urb[i] = urb;
- isoc_bufs->transfer_buffer[i] = usb_alloc_coherent(dev->udev,
+ usb_bufs->transfer_buffer[i] = usb_alloc_coherent(dev->udev,
sb_size, GFP_KERNEL, &urb->transfer_dma);
- if (!isoc_bufs->transfer_buffer[i]) {
+ if (!usb_bufs->transfer_buffer[i]) {
em28xx_err("unable to allocate %i bytes for transfer"
" buffer %i%s\n",
sb_size, i,
in_interrupt() ? " while in int" : "");
- em28xx_uninit_isoc(dev, mode);
+ em28xx_uninit_usb_xfer(dev, mode);
return -ENOMEM;
}
- memset(isoc_bufs->transfer_buffer[i], 0, sb_size);
-
- /* FIXME: this is a hack - should be
- 'desc.bEndpointAddress & USB_ENDPOINT_NUMBER_MASK'
- should also be using 'desc.bInterval'
- */
- pipe = usb_rcvisocpipe(dev->udev,
- mode == EM28XX_ANALOG_MODE ?
- EM28XX_EP_ANALOG : EM28XX_EP_DIGITAL);
-
- usb_fill_int_urb(urb, dev->udev, pipe,
- isoc_bufs->transfer_buffer[i], sb_size,
- em28xx_irq_callback, dev, 1);
-
- urb->number_of_packets = isoc_bufs->num_packets;
- urb->transfer_flags = URB_ISO_ASAP | URB_NO_TRANSFER_DMA_MAP;
-
- k = 0;
- for (j = 0; j < isoc_bufs->num_packets; j++) {
- urb->iso_frame_desc[j].offset = k;
- urb->iso_frame_desc[j].length =
- isoc_bufs->max_pkt_size;
- k += isoc_bufs->max_pkt_size;
+ memset(usb_bufs->transfer_buffer[i], 0, sb_size);
+
+ if (xfer_bulk) { /* bulk */
+ pipe = usb_rcvbulkpipe(dev->udev,
+ mode == EM28XX_ANALOG_MODE ?
+ dev->analog_ep_bulk :
+ dev->dvb_ep_bulk);
+ usb_fill_bulk_urb(urb, dev->udev, pipe,
+ usb_bufs->transfer_buffer[i], sb_size,
+ em28xx_irq_callback, dev);
+ urb->transfer_flags = URB_NO_TRANSFER_DMA_MAP;
+ } else { /* isoc */
+ pipe = usb_rcvisocpipe(dev->udev,
+ mode == EM28XX_ANALOG_MODE ?
+ dev->analog_ep_isoc :
+ dev->dvb_ep_isoc);
+ usb_fill_int_urb(urb, dev->udev, pipe,
+ usb_bufs->transfer_buffer[i], sb_size,
+ em28xx_irq_callback, dev, 1);
+ urb->transfer_flags = URB_ISO_ASAP |
+ URB_NO_TRANSFER_DMA_MAP;
+ k = 0;
+ for (j = 0; j < usb_bufs->num_packets; j++) {
+ urb->iso_frame_desc[j].offset = k;
+ urb->iso_frame_desc[j].length =
+ usb_bufs->max_pkt_size;
+ k += usb_bufs->max_pkt_size;
+ }
}
+
+ urb->number_of_packets = usb_bufs->num_packets;
}
return 0;
}
-EXPORT_SYMBOL_GPL(em28xx_alloc_isoc);
+EXPORT_SYMBOL_GPL(em28xx_alloc_urbs);
/*
* Allocate URBs and start IRQ
*/
-int em28xx_init_isoc(struct em28xx *dev, enum em28xx_mode mode,
- int max_packets, int num_bufs, int max_pkt_size,
- int (*isoc_copy) (struct em28xx *dev, struct urb *urb))
+int em28xx_init_usb_xfer(struct em28xx *dev, enum em28xx_mode mode,
+ int xfer_bulk, int num_bufs, int max_pkt_size,
+ int packet_multiplier,
+ int (*urb_data_copy) (struct em28xx *dev, struct urb *urb))
{
struct em28xx_dmaqueue *dma_q = &dev->vidq;
struct em28xx_dmaqueue *vbi_dma_q = &dev->vbiq;
- struct em28xx_usb_isoc_bufs *isoc_bufs;
+ struct em28xx_usb_bufs *usb_bufs;
int i;
int rc;
int alloc;
- em28xx_isocdbg("em28xx: called em28xx_init_isoc in mode %d\n", mode);
+ em28xx_isocdbg("em28xx: called em28xx_init_usb_xfer in mode %d\n",
+ mode);
- dev->isoc_ctl.isoc_copy = isoc_copy;
+ dev->usb_ctl.urb_data_copy = urb_data_copy;
if (mode == EM28XX_DIGITAL_MODE) {
- isoc_bufs = &dev->isoc_ctl.digital_bufs;
- /* no need to free/alloc isoc buffers in digital mode */
+ usb_bufs = &dev->usb_ctl.digital_bufs;
+ /* no need to free/alloc usb buffers in digital mode */
alloc = 0;
} else {
- isoc_bufs = &dev->isoc_ctl.analog_bufs;
+ usb_bufs = &dev->usb_ctl.analog_bufs;
alloc = 1;
}
if (alloc) {
- rc = em28xx_alloc_isoc(dev, mode, max_packets,
- num_bufs, max_pkt_size);
+ rc = em28xx_alloc_urbs(dev, mode, xfer_bulk, num_bufs,
+ max_pkt_size, packet_multiplier);
if (rc)
return rc;
}
+ if (xfer_bulk) {
+ rc = usb_clear_halt(dev->udev, usb_bufs->urb[0]->pipe);
+ if (rc < 0) {
+ em28xx_err("failed to clear USB bulk endpoint stall/halt condition (error=%i)\n",
+ rc);
+ em28xx_uninit_usb_xfer(dev, mode);
+ return rc;
+ }
+ }
+
init_waitqueue_head(&dma_q->wq);
init_waitqueue_head(&vbi_dma_q->wq);
em28xx_capture_start(dev, 1);
/* submit urbs and enables IRQ */
- for (i = 0; i < isoc_bufs->num_bufs; i++) {
- rc = usb_submit_urb(isoc_bufs->urb[i], GFP_ATOMIC);
+ for (i = 0; i < usb_bufs->num_bufs; i++) {
+ rc = usb_submit_urb(usb_bufs->urb[i], GFP_ATOMIC);
if (rc) {
em28xx_err("submit of urb %i failed (error=%i)\n", i,
rc);
- em28xx_uninit_isoc(dev, mode);
+ em28xx_uninit_usb_xfer(dev, mode);
return rc;
}
}
return 0;
}
-EXPORT_SYMBOL_GPL(em28xx_init_isoc);
+EXPORT_SYMBOL_GPL(em28xx_init_usb_xfer);
/*
* em28xx_wake_i2c()
diff --git a/drivers/media/usb/em28xx/em28xx-dvb.c b/drivers/media/usb/em28xx/em28xx-dvb.c
index 63f2e7070c00..a81ec2e8cc9b 100644
--- a/drivers/media/usb/em28xx/em28xx-dvb.c
+++ b/drivers/media/usb/em28xx/em28xx-dvb.c
@@ -10,6 +10,8 @@
(c) 2008 Aidan Thornton <makosoft@googlemail.com>
+ (c) 2012 Frank Schäfer <fschaefer.oss@googlemail.com>
+
Based on cx88-dvb, saa7134-dvb and videobuf-dvb originally written by:
(c) 2004, 2005 Chris Pascoe <c.pascoe@itee.uq.edu.au>
(c) 2004 Gerd Knorr <kraxel@bytesex.org> [SuSE Labs]
@@ -25,7 +27,9 @@
#include "em28xx.h"
#include <media/v4l2-common.h>
-#include <media/videobuf-vmalloc.h>
+#include <dvb_demux.h>
+#include <dvb_net.h>
+#include <dmxdev.h>
#include <media/tuner.h>
#include "tuner-simple.h"
#include <linux/gpio.h>
@@ -124,34 +128,47 @@ static inline void print_err_status(struct em28xx *dev,
}
}
-static inline int em28xx_dvb_isoc_copy(struct em28xx *dev, struct urb *urb)
+static inline int em28xx_dvb_urb_data_copy(struct em28xx *dev, struct urb *urb)
{
- int i;
+ int xfer_bulk, num_packets, i;
if (!dev)
return 0;
- if ((dev->state & DEV_DISCONNECTED) || (dev->state & DEV_MISCONFIGURED))
+ if (dev->disconnected)
return 0;
- if (urb->status < 0) {
+ if (urb->status < 0)
print_err_status(dev, -1, urb->status);
- if (urb->status == -ENOENT)
- return 0;
- }
- for (i = 0; i < urb->number_of_packets; i++) {
- int status = urb->iso_frame_desc[i].status;
+ xfer_bulk = usb_pipebulk(urb->pipe);
- if (status < 0) {
- print_err_status(dev, i, status);
- if (urb->iso_frame_desc[i].status != -EPROTO)
- continue;
- }
+ if (xfer_bulk) /* bulk */
+ num_packets = 1;
+ else /* isoc */
+ num_packets = urb->number_of_packets;
- dvb_dmx_swfilter(&dev->dvb->demux, urb->transfer_buffer +
- urb->iso_frame_desc[i].offset,
- urb->iso_frame_desc[i].actual_length);
+ for (i = 0; i < num_packets; i++) {
+ if (xfer_bulk) {
+ if (urb->status < 0) {
+ print_err_status(dev, i, urb->status);
+ if (urb->status != -EPROTO)
+ continue;
+ }
+ dvb_dmx_swfilter(&dev->dvb->demux, urb->transfer_buffer,
+ urb->actual_length);
+ } else {
+ if (urb->iso_frame_desc[i].status < 0) {
+ print_err_status(dev, i,
+ urb->iso_frame_desc[i].status);
+ if (urb->iso_frame_desc[i].status != -EPROTO)
+ continue;
+ }
+ dvb_dmx_swfilter(&dev->dvb->demux,
+ urb->transfer_buffer +
+ urb->iso_frame_desc[i].offset,
+ urb->iso_frame_desc[i].actual_length);
+ }
}
return 0;
@@ -161,24 +178,40 @@ static int em28xx_start_streaming(struct em28xx_dvb *dvb)
{
int rc;
struct em28xx *dev = dvb->adapter.priv;
- int max_dvb_packet_size;
+ int dvb_max_packet_size, packet_multiplier, dvb_alt;
+
+ if (dev->dvb_xfer_bulk) {
+ if (!dev->dvb_ep_bulk)
+ return -ENODEV;
+ dvb_max_packet_size = 512; /* USB 2.0 spec */
+ packet_multiplier = EM28XX_DVB_BULK_PACKET_MULTIPLIER;
+ dvb_alt = 0;
+ } else { /* isoc */
+ if (!dev->dvb_ep_isoc)
+ return -ENODEV;
+ dvb_max_packet_size = dev->dvb_max_pkt_size_isoc;
+ if (dvb_max_packet_size < 0)
+ return dvb_max_packet_size;
+ packet_multiplier = EM28XX_DVB_NUM_ISOC_PACKETS;
+ dvb_alt = dev->dvb_alt_isoc;
+ }
- usb_set_interface(dev->udev, 0, dev->dvb_alt);
+ usb_set_interface(dev->udev, 0, dvb_alt);
rc = em28xx_set_mode(dev, EM28XX_DIGITAL_MODE);
if (rc < 0)
return rc;
- max_dvb_packet_size = dev->dvb_max_pkt_size;
- if (max_dvb_packet_size < 0)
- return max_dvb_packet_size;
dprintk(1, "Using %d buffers each with %d x %d bytes\n",
EM28XX_DVB_NUM_BUFS,
- EM28XX_DVB_MAX_PACKETS,
- max_dvb_packet_size);
-
- return em28xx_init_isoc(dev, EM28XX_DIGITAL_MODE,
- EM28XX_DVB_MAX_PACKETS, EM28XX_DVB_NUM_BUFS,
- max_dvb_packet_size, em28xx_dvb_isoc_copy);
+ packet_multiplier,
+ dvb_max_packet_size);
+
+ return em28xx_init_usb_xfer(dev, EM28XX_DIGITAL_MODE,
+ dev->dvb_xfer_bulk,
+ EM28XX_DVB_NUM_BUFS,
+ dvb_max_packet_size,
+ packet_multiplier,
+ em28xx_dvb_urb_data_copy);
}
static int em28xx_stop_streaming(struct em28xx_dvb *dvb)
@@ -714,7 +747,8 @@ static struct tda18271_config em28xx_cxd2820r_tda18271_config = {
};
static const struct tda10071_config em28xx_tda10071_config = {
- .i2c_address = 0x55, /* (0xaa >> 1) */
+ .demod_i2c_addr = 0x55, /* (0xaa >> 1) */
+ .tuner_i2c_addr = 0x14,
.i2c_wr_max = 64,
.ts_mode = TDA10071_TS_SERIAL,
.spec_inv = 0,
@@ -1288,7 +1322,7 @@ static int em28xx_dvb_fini(struct em28xx *dev)
if (dev->dvb) {
struct em28xx_dvb *dvb = dev->dvb;
- if (dev->state & DEV_DISCONNECTED) {
+ if (dev->disconnected) {
/* We cannot tell the device to sleep
* once it has been unplugged. */
if (dvb->fe[0])
diff --git a/drivers/media/usb/em28xx/em28xx-i2c.c b/drivers/media/usb/em28xx/em28xx-i2c.c
index 1683bd9d51ee..8532c1d4fd46 100644
--- a/drivers/media/usb/em28xx/em28xx-i2c.c
+++ b/drivers/media/usb/em28xx/em28xx-i2c.c
@@ -50,15 +50,18 @@ do { \
} while (0)
/*
- * em2800_i2c_send_max4()
- * send up to 4 bytes to the i2c device
+ * em2800_i2c_send_bytes()
+ * send up to 4 bytes to the em2800 i2c device
*/
-static int em2800_i2c_send_max4(struct em28xx *dev, unsigned char addr,
- char *buf, int len)
+static int em2800_i2c_send_bytes(struct em28xx *dev, u8 addr, u8 *buf, u16 len)
{
int ret;
int write_timeout;
- unsigned char b2[6];
+ u8 b2[6];
+
+ if (len < 1 || len > 4)
+ return -EOPNOTSUPP;
+
BUG_ON(len < 1 || len > 4);
b2[5] = 0x80 + len - 1;
b2[4] = addr;
@@ -70,165 +73,212 @@ static int em2800_i2c_send_max4(struct em28xx *dev, unsigned char addr,
if (len > 3)
b2[0] = buf[3];
+ /* trigger write */
ret = dev->em28xx_write_regs(dev, 4 - len, &b2[4 - len], 2 + len);
if (ret != 2 + len) {
- em28xx_warn("writing to i2c device failed (error=%i)\n", ret);
- return -EIO;
+ em28xx_warn("failed to trigger write to i2c address 0x%x "
+ "(error=%i)\n", addr, ret);
+ return (ret < 0) ? ret : -EIO;
}
- for (write_timeout = EM2800_I2C_WRITE_TIMEOUT; write_timeout > 0;
+ /* wait for completion */
+ for (write_timeout = EM2800_I2C_XFER_TIMEOUT; write_timeout > 0;
write_timeout -= 5) {
ret = dev->em28xx_read_reg(dev, 0x05);
- if (ret == 0x80 + len - 1)
+ if (ret == 0x80 + len - 1) {
return len;
+ } else if (ret == 0x94 + len - 1) {
+ return -ENODEV;
+ } else if (ret < 0) {
+ em28xx_warn("failed to get i2c transfer status from "
+ "bridge register (error=%i)\n", ret);
+ return ret;
+ }
msleep(5);
}
- em28xx_warn("i2c write timed out\n");
+ em28xx_warn("write to i2c device at 0x%x timed out\n", addr);
return -EIO;
}
/*
- * em2800_i2c_send_bytes()
- */
-static int em2800_i2c_send_bytes(void *data, unsigned char addr, char *buf,
- short len)
-{
- char *bufPtr = buf;
- int ret;
- int wrcount = 0;
- int count;
- int maxLen = 4;
- struct em28xx *dev = (struct em28xx *)data;
- while (len > 0) {
- count = (len > maxLen) ? maxLen : len;
- ret = em2800_i2c_send_max4(dev, addr, bufPtr, count);
- if (ret > 0) {
- len -= count;
- bufPtr += count;
- wrcount += count;
- } else
- return (ret < 0) ? ret : -EFAULT;
- }
- return wrcount;
-}
-
-/*
- * em2800_i2c_check_for_device()
- * check if there is a i2c_device at the supplied address
+ * em2800_i2c_recv_bytes()
+ * read up to 4 bytes from the em2800 i2c device
*/
-static int em2800_i2c_check_for_device(struct em28xx *dev, unsigned char addr)
+static int em2800_i2c_recv_bytes(struct em28xx *dev, u8 addr, u8 *buf, u16 len)
{
- char msg;
+ u8 buf2[4];
int ret;
- int write_timeout;
- msg = addr;
- ret = dev->em28xx_write_regs(dev, 0x04, &msg, 1);
- if (ret < 0) {
- em28xx_warn("setting i2c device address failed (error=%i)\n",
- ret);
- return ret;
- }
- msg = 0x84;
- ret = dev->em28xx_write_regs(dev, 0x05, &msg, 1);
- if (ret < 0) {
- em28xx_warn("preparing i2c read failed (error=%i)\n", ret);
- return ret;
+ int read_timeout;
+ int i;
+
+ if (len < 1 || len > 4)
+ return -EOPNOTSUPP;
+
+ /* trigger read */
+ buf2[1] = 0x84 + len - 1;
+ buf2[0] = addr;
+ ret = dev->em28xx_write_regs(dev, 0x04, buf2, 2);
+ if (ret != 2) {
+ em28xx_warn("failed to trigger read from i2c address 0x%x "
+ "(error=%i)\n", addr, ret);
+ return (ret < 0) ? ret : -EIO;
}
- for (write_timeout = EM2800_I2C_WRITE_TIMEOUT; write_timeout > 0;
- write_timeout -= 5) {
- unsigned reg = dev->em28xx_read_reg(dev, 0x5);
- if (reg == 0x94)
+ /* wait for completion */
+ for (read_timeout = EM2800_I2C_XFER_TIMEOUT; read_timeout > 0;
+ read_timeout -= 5) {
+ ret = dev->em28xx_read_reg(dev, 0x05);
+ if (ret == 0x84 + len - 1) {
+ break;
+ } else if (ret == 0x94 + len - 1) {
return -ENODEV;
- else if (reg == 0x84)
- return 0;
+ } else if (ret < 0) {
+ em28xx_warn("failed to get i2c transfer status from "
+ "bridge register (error=%i)\n", ret);
+ return ret;
+ }
msleep(5);
}
- return -ENODEV;
+ if (ret != 0x84 + len - 1)
+ em28xx_warn("read from i2c device at 0x%x timed out\n", addr);
+
+ /* get the received message */
+ ret = dev->em28xx_read_reg_req_len(dev, 0x00, 4-len, buf2, len);
+ if (ret != len) {
+ em28xx_warn("reading from i2c device at 0x%x failed: "
+ "couldn't get the received message from the bridge "
+ "(error=%i)\n", addr, ret);
+ return (ret < 0) ? ret : -EIO;
+ }
+ for (i = 0; i < len; i++)
+ buf[i] = buf2[len - 1 - i];
+
+ return ret;
}
/*
- * em2800_i2c_recv_bytes()
- * read from the i2c device
+ * em2800_i2c_check_for_device()
+ * check if there is an i2c device at the supplied address
*/
-static int em2800_i2c_recv_bytes(struct em28xx *dev, unsigned char addr,
- char *buf, int len)
+static int em2800_i2c_check_for_device(struct em28xx *dev, u8 addr)
{
+ u8 buf;
int ret;
- /* check for the device and set i2c read address */
- ret = em2800_i2c_check_for_device(dev, addr);
- if (ret) {
- em28xx_warn
- ("preparing read at i2c address 0x%x failed (error=%i)\n",
- addr, ret);
- return ret;
- }
- ret = dev->em28xx_read_reg_req_len(dev, 0x0, 0x3, buf, len);
- if (ret < 0) {
- em28xx_warn("reading from i2c device at 0x%x failed (error=%i)",
- addr, ret);
- return ret;
- }
- return ret;
+
+ ret = em2800_i2c_recv_bytes(dev, addr, &buf, 1);
+ if (ret == 1)
+ return 0;
+ return (ret < 0) ? ret : -EIO;
}
/*
* em28xx_i2c_send_bytes()
*/
-static int em28xx_i2c_send_bytes(void *data, unsigned char addr, char *buf,
- short len, int stop)
+static int em28xx_i2c_send_bytes(struct em28xx *dev, u16 addr, u8 *buf,
+ u16 len, int stop)
{
- int wrcount = 0;
- struct em28xx *dev = (struct em28xx *)data;
int write_timeout, ret;
- wrcount = dev->em28xx_write_regs_req(dev, stop ? 2 : 3, addr, buf, len);
+ if (len < 1 || len > 64)
+ return -EOPNOTSUPP;
+ /* NOTE: limited by the USB ctrl message constraints
+ * Zero length reads always succeed, even if no device is connected */
+
+ /* Write to i2c device */
+ ret = dev->em28xx_write_regs_req(dev, stop ? 2 : 3, addr, buf, len);
+ if (ret != len) {
+ if (ret < 0) {
+ em28xx_warn("writing to i2c device at 0x%x failed "
+ "(error=%i)\n", addr, ret);
+ return ret;
+ } else {
+ em28xx_warn("%i bytes write to i2c device at 0x%x "
+ "requested, but %i bytes written\n",
+ len, addr, ret);
+ return -EIO;
+ }
+ }
- /* Seems to be required after a write */
- for (write_timeout = EM2800_I2C_WRITE_TIMEOUT; write_timeout > 0;
+ /* Check success of the i2c operation */
+ for (write_timeout = EM2800_I2C_XFER_TIMEOUT; write_timeout > 0;
write_timeout -= 5) {
ret = dev->em28xx_read_reg(dev, 0x05);
- if (!ret)
- break;
+ if (ret == 0) { /* success */
+ return len;
+ } else if (ret == 0x10) {
+ return -ENODEV;
+ } else if (ret < 0) {
+ em28xx_warn("failed to read i2c transfer status from "
+ "bridge (error=%i)\n", ret);
+ return ret;
+ }
msleep(5);
+ /* NOTE: do we really have to wait for success ?
+ Never seen anything else than 0x00 or 0x10
+ (even with high payload) ... */
}
-
- return wrcount;
+ em28xx_warn("write to i2c device at 0x%x timed out\n", addr);
+ return -EIO;
}
/*
* em28xx_i2c_recv_bytes()
* read a byte from the i2c device
*/
-static int em28xx_i2c_recv_bytes(struct em28xx *dev, unsigned char addr,
- char *buf, int len)
+static int em28xx_i2c_recv_bytes(struct em28xx *dev, u16 addr, u8 *buf, u16 len)
{
int ret;
+
+ if (len < 1 || len > 64)
+ return -EOPNOTSUPP;
+ /* NOTE: limited by the USB ctrl message constraints
+ * Zero length reads always succeed, even if no device is connected */
+
+ /* Read data from i2c device */
ret = dev->em28xx_read_reg_req_len(dev, 2, addr, buf, len);
+ if (ret != len) {
+ if (ret < 0) {
+ em28xx_warn("reading from i2c device at 0x%x failed "
+ "(error=%i)\n", addr, ret);
+ return ret;
+ } else {
+ em28xx_warn("%i bytes requested from i2c device at "
+ "0x%x, but %i bytes received\n",
+ len, addr, ret);
+ return -EIO;
+ }
+ }
+
+ /* Check success of the i2c operation */
+ ret = dev->em28xx_read_reg(dev, 0x05);
if (ret < 0) {
- em28xx_warn("reading i2c device failed (error=%i)\n", ret);
+ em28xx_warn("failed to read i2c transfer status from "
+ "bridge (error=%i)\n", ret);
return ret;
}
- if (dev->em28xx_read_reg(dev, 0x5) != 0)
- return -ENODEV;
- return ret;
+ if (ret > 0) {
+ if (ret == 0x10) {
+ return -ENODEV;
+ } else {
+ em28xx_warn("unknown i2c error (status=%i)\n", ret);
+ return -EIO;
+ }
+ }
+ return len;
}
/*
* em28xx_i2c_check_for_device()
* check if there is a i2c_device at the supplied address
*/
-static int em28xx_i2c_check_for_device(struct em28xx *dev, unsigned char addr)
+static int em28xx_i2c_check_for_device(struct em28xx *dev, u16 addr)
{
int ret;
+ u8 buf;
- ret = dev->em28xx_read_reg_req(dev, 2, addr);
- if (ret < 0) {
- em28xx_warn("reading from i2c device failed (error=%i)\n", ret);
- return ret;
- }
- if (dev->em28xx_read_reg(dev, 0x5) != 0)
- return -ENODEV;
- return 0;
+ ret = em28xx_i2c_recv_bytes(dev, addr, &buf, 1);
+ if (ret == 1)
+ return 0;
+ return (ret < 0) ? ret : -EIO;
}
/*
@@ -253,11 +303,11 @@ static int em28xx_i2c_xfer(struct i2c_adapter *i2c_adap,
rc = em2800_i2c_check_for_device(dev, addr);
else
rc = em28xx_i2c_check_for_device(dev, addr);
- if (rc < 0) {
- dprintk2(2, " no device\n");
+ if (rc == -ENODEV) {
+ if (i2c_debug >= 2)
+ printk(" no device\n");
return rc;
}
-
} else if (msgs[i].flags & I2C_M_RD) {
/* read bytes */
if (dev->board.is_em2800)
@@ -288,16 +338,16 @@ static int em28xx_i2c_xfer(struct i2c_adapter *i2c_adap,
msgs[i].len,
i == num - 1);
}
- if (rc < 0)
- goto err;
+ if (rc < 0) {
+ if (i2c_debug >= 2)
+ printk(" ERROR: %i\n", rc);
+ return rc;
+ }
if (i2c_debug >= 2)
printk("\n");
}
return num;
-err:
- dprintk2(2, " ERROR: %i\n", rc);
- return rc;
}
/* based on linux/sunrpc/svcauth.h and linux/hash.h
@@ -329,7 +379,7 @@ static int em28xx_i2c_eeprom(struct em28xx *dev, unsigned char *eedata, int len)
{
unsigned char buf, *p = eedata;
struct em28xx_eeprom *em_eeprom = (void *)eedata;
- int i, err, size = len, block;
+ int i, err, size = len, block, block_max;
if (dev->chip_id == CHIP_ID_EM2874 ||
dev->chip_id == CHIP_ID_EM28174 ||
@@ -362,9 +412,15 @@ static int em28xx_i2c_eeprom(struct em28xx *dev, unsigned char *eedata, int len)
dev->name, err);
return err;
}
+
+ if (dev->board.is_em2800)
+ block_max = 4;
+ else
+ block_max = 64;
+
while (size > 0) {
- if (size > 16)
- block = 16;
+ if (size > block_max)
+ block = block_max;
else
block = size;
@@ -449,7 +505,11 @@ static int em28xx_i2c_eeprom(struct em28xx *dev, unsigned char *eedata, int len)
*/
static u32 functionality(struct i2c_adapter *adap)
{
- return I2C_FUNC_SMBUS_EMUL;
+ struct em28xx *dev = adap->algo_data;
+ u32 func_flags = I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL;
+ if (dev->board.is_em2800)
+ func_flags &= ~I2C_FUNC_SMBUS_WRITE_BLOCK_DATA;
+ return func_flags;
}
static struct i2c_algorithm em28xx_algo = {
@@ -474,6 +534,7 @@ static struct i2c_client em28xx_client_template = {
* incomplete list of known devices
*/
static char *i2c_devs[128] = {
+ [0x3e >> 1] = "remote IR sensor",
[0x4a >> 1] = "saa7113h",
[0x52 >> 1] = "drxk",
[0x60 >> 1] = "remote IR sensor",
diff --git a/drivers/media/usb/em28xx/em28xx-input.c b/drivers/media/usb/em28xx/em28xx-input.c
index 660bf803c9e4..1bef990b3f18 100644
--- a/drivers/media/usb/em28xx/em28xx-input.c
+++ b/drivers/media/usb/em28xx/em28xx-input.c
@@ -40,11 +40,6 @@ MODULE_PARM_DESC(ir_debug, "enable debug messages [IR]");
#define MODULE_NAME "em28xx"
-#define i2cdprintk(fmt, arg...) \
- if (ir_debug) { \
- printk(KERN_DEBUG "%s/ir: " fmt, ir->name , ## arg); \
- }
-
#define dprintk(fmt, arg...) \
if (ir_debug) { \
printk(KERN_DEBUG "%s/ir: " fmt, ir->name , ## arg); \
@@ -57,8 +52,8 @@ MODULE_PARM_DESC(ir_debug, "enable debug messages [IR]");
struct em28xx_ir_poll_result {
unsigned int toggle_bit:1;
unsigned int read_count:7;
- u8 rc_address;
- u8 rc_data[4]; /* 1 byte on em2860/2880, 4 on em2874 */
+
+ u32 scancode;
};
struct em28xx_IR {
@@ -67,12 +62,17 @@ struct em28xx_IR {
char name[32];
char phys[32];
- /* poll external decoder */
+ /* poll decoder */
int polling;
struct delayed_work work;
unsigned int full_code:1;
unsigned int last_readcount;
+ u64 rc_type;
+ /* i2c slave address of external device (if used) */
+ u16 i2c_dev_addr;
+
+ int (*get_key_i2c)(struct i2c_client *, u32 *);
int (*get_key)(struct em28xx_IR *, struct em28xx_ir_poll_result *);
};
@@ -80,21 +80,16 @@ struct em28xx_IR {
I2C IR based get keycodes - should be used with ir-kbd-i2c
**********************************************************/
-static int em28xx_get_key_terratec(struct IR_i2c *ir, u32 *ir_key, u32 *ir_raw)
+static int em28xx_get_key_terratec(struct i2c_client *i2c_dev, u32 *ir_key)
{
unsigned char b;
/* poll IR chip */
- if (1 != i2c_master_recv(ir->c, &b, 1)) {
- i2cdprintk("read error\n");
+ if (1 != i2c_master_recv(i2c_dev, &b, 1))
return -EIO;
- }
/* it seems that 0xFE indicates that a button is still hold
- down, while 0xff indicates that no button is hold
- down. 0xfe sequences are sometimes interrupted by 0xFF */
-
- i2cdprintk("key %02x\n", b);
+ down, while 0xff indicates that no button is hold down. */
if (b == 0xff)
return 0;
@@ -104,18 +99,17 @@ static int em28xx_get_key_terratec(struct IR_i2c *ir, u32 *ir_key, u32 *ir_raw)
return 1;
*ir_key = b;
- *ir_raw = b;
return 1;
}
-static int em28xx_get_key_em_haup(struct IR_i2c *ir, u32 *ir_key, u32 *ir_raw)
+static int em28xx_get_key_em_haup(struct i2c_client *i2c_dev, u32 *ir_key)
{
unsigned char buf[2];
u16 code;
int size;
/* poll IR chip */
- size = i2c_master_recv(ir->c, buf, sizeof(buf));
+ size = i2c_master_recv(i2c_dev, buf, sizeof(buf));
if (size != 2)
return -EIO;
@@ -124,8 +118,6 @@ static int em28xx_get_key_em_haup(struct IR_i2c *ir, u32 *ir_key, u32 *ir_raw)
if (buf[1] == 0xff)
return 0;
- ir->old = buf[1];
-
/*
* Rearranges bits to the right order.
* The bit order were determined experimentally by using
@@ -148,65 +140,51 @@ static int em28xx_get_key_em_haup(struct IR_i2c *ir, u32 *ir_key, u32 *ir_raw)
((buf[1] & 0x40) ? 0x0200 : 0) | /* 0000 0010 */
((buf[1] & 0x80) ? 0x0100 : 0); /* 0000 0001 */
- i2cdprintk("ir hauppauge (em2840): code=0x%02x (rcv=0x%02x%02x)\n",
- code, buf[1], buf[0]);
-
/* return key */
*ir_key = code;
- *ir_raw = code;
return 1;
}
-static int em28xx_get_key_pinnacle_usb_grey(struct IR_i2c *ir, u32 *ir_key,
- u32 *ir_raw)
+static int em28xx_get_key_pinnacle_usb_grey(struct i2c_client *i2c_dev,
+ u32 *ir_key)
{
unsigned char buf[3];
/* poll IR chip */
- if (3 != i2c_master_recv(ir->c, buf, 3)) {
- i2cdprintk("read error\n");
+ if (3 != i2c_master_recv(i2c_dev, buf, 3))
return -EIO;
- }
- i2cdprintk("key %02x\n", buf[2]&0x3f);
if (buf[0] != 0x00)
return 0;
*ir_key = buf[2]&0x3f;
- *ir_raw = buf[2]&0x3f;
return 1;
}
-static int em28xx_get_key_winfast_usbii_deluxe(struct IR_i2c *ir, u32 *ir_key,
- u32 *ir_raw)
+static int em28xx_get_key_winfast_usbii_deluxe(struct i2c_client *i2c_dev,
+ u32 *ir_key)
{
unsigned char subaddr, keydetect, key;
- struct i2c_msg msg[] = { { .addr = ir->c->addr, .flags = 0, .buf = &subaddr, .len = 1},
-
- { .addr = ir->c->addr, .flags = I2C_M_RD, .buf = &keydetect, .len = 1} };
+ struct i2c_msg msg[] = { { .addr = i2c_dev->addr, .flags = 0, .buf = &subaddr, .len = 1},
+ { .addr = i2c_dev->addr, .flags = I2C_M_RD, .buf = &keydetect, .len = 1} };
subaddr = 0x10;
- if (2 != i2c_transfer(ir->c->adapter, msg, 2)) {
- i2cdprintk("read error\n");
+ if (2 != i2c_transfer(i2c_dev->adapter, msg, 2))
return -EIO;
- }
if (keydetect == 0x00)
return 0;
subaddr = 0x00;
msg[1].buf = &key;
- if (2 != i2c_transfer(ir->c->adapter, msg, 2)) {
- i2cdprintk("read error\n");
- return -EIO;
- }
+ if (2 != i2c_transfer(i2c_dev->adapter, msg, 2))
+ return -EIO;
if (key == 0x00)
return 0;
*ir_key = key;
- *ir_raw = key;
return 1;
}
@@ -236,11 +214,8 @@ static int default_polling_getkey(struct em28xx_IR *ir,
/* Infrared read count (Reg 0x45[6:0] */
poll_result->read_count = (msg[0] & 0x7f);
- /* Remote Control Address (Reg 0x46) */
- poll_result->rc_address = msg[1];
-
- /* Remote Control Data (Reg 0x47) */
- poll_result->rc_data[0] = msg[2];
+ /* Remote Control Address/Data (Regs 0x46/0x47) */
+ poll_result->scancode = msg[1] << 8 | msg[2];
return 0;
}
@@ -266,13 +241,35 @@ static int em2874_polling_getkey(struct em28xx_IR *ir,
/* Infrared read count (Reg 0x51[6:0] */
poll_result->read_count = (msg[0] & 0x7f);
- /* Remote Control Address (Reg 0x52) */
- poll_result->rc_address = msg[1];
-
- /* Remote Control Data (Reg 0x53-55) */
- poll_result->rc_data[0] = msg[2];
- poll_result->rc_data[1] = msg[3];
- poll_result->rc_data[2] = msg[4];
+ /*
+ * Remote Control Address (Reg 0x52)
+ * Remote Control Data (Reg 0x53-0x55)
+ */
+ switch (ir->rc_type) {
+ case RC_BIT_RC5:
+ poll_result->scancode = msg[1] << 8 | msg[2];
+ break;
+ case RC_BIT_NEC:
+ if ((msg[3] ^ msg[4]) != 0xff) /* 32 bits NEC */
+ poll_result->scancode = (msg[1] << 24) |
+ (msg[2] << 16) |
+ (msg[3] << 8) |
+ msg[4];
+ else if ((msg[1] ^ msg[2]) != 0xff) /* 24 bits NEC */
+ poll_result->scancode = (msg[1] << 16) |
+ (msg[2] << 8) |
+ msg[3];
+ else /* Normal NEC */
+ poll_result->scancode = msg[1] << 8 | msg[3];
+ break;
+ case RC_BIT_RC6_0:
+ poll_result->scancode = msg[1] << 8 | msg[2];
+ break;
+ default:
+ poll_result->scancode = (msg[1] << 24) | (msg[2] << 16) |
+ (msg[3] << 8) | msg[4];
+ break;
+ }
return 0;
}
@@ -281,6 +278,28 @@ static int em2874_polling_getkey(struct em28xx_IR *ir,
Polling code for em28xx
**********************************************************/
+static int em28xx_i2c_ir_handle_key(struct em28xx_IR *ir)
+{
+ static u32 ir_key;
+ int rc;
+ struct i2c_client client;
+
+ client.adapter = &ir->dev->i2c_adap;
+ client.addr = ir->i2c_dev_addr;
+
+ rc = ir->get_key_i2c(&client, &ir_key);
+ if (rc < 0) {
+ dprintk("ir->get_key_i2c() failed: %d\n", rc);
+ return rc;
+ }
+
+ if (rc) {
+ dprintk("%s: keycode = 0x%04x\n", __func__, ir_key);
+ rc_keydown(ir->rc, ir_key, 0);
+ }
+ return 0;
+}
+
static void em28xx_ir_handle_key(struct em28xx_IR *ir)
{
int result;
@@ -289,22 +308,21 @@ static void em28xx_ir_handle_key(struct em28xx_IR *ir)
/* read the registers containing the IR status */
result = ir->get_key(ir, &poll_result);
if (unlikely(result < 0)) {
- dprintk("ir->get_key() failed %d\n", result);
+ dprintk("ir->get_key() failed: %d\n", result);
return;
}
if (unlikely(poll_result.read_count != ir->last_readcount)) {
- dprintk("%s: toggle: %d, count: %d, key 0x%02x%02x\n", __func__,
+ dprintk("%s: toggle: %d, count: %d, key 0x%04x\n", __func__,
poll_result.toggle_bit, poll_result.read_count,
- poll_result.rc_address, poll_result.rc_data[0]);
+ poll_result.scancode);
if (ir->full_code)
rc_keydown(ir->rc,
- poll_result.rc_address << 8 |
- poll_result.rc_data[0],
+ poll_result.scancode,
poll_result.toggle_bit);
else
rc_keydown(ir->rc,
- poll_result.rc_data[0],
+ poll_result.scancode & 0xff,
poll_result.toggle_bit);
if (ir->dev->chip_id == CHIP_ID_EM2874 ||
@@ -324,7 +342,10 @@ static void em28xx_ir_work(struct work_struct *work)
{
struct em28xx_IR *ir = container_of(work, struct em28xx_IR, work.work);
- em28xx_ir_handle_key(ir);
+ if (ir->i2c_dev_addr) /* external i2c device */
+ em28xx_i2c_ir_handle_key(ir);
+ else /* internal device */
+ em28xx_ir_handle_key(ir);
schedule_delayed_work(&ir->work, msecs_to_jiffies(ir->polling));
}
@@ -345,93 +366,107 @@ static void em28xx_ir_stop(struct rc_dev *rc)
cancel_delayed_work_sync(&ir->work);
}
-static int em28xx_ir_change_protocol(struct rc_dev *rc_dev, u64 *rc_type)
+static int em2860_ir_change_protocol(struct rc_dev *rc_dev, u64 *rc_type)
{
- int rc = 0;
struct em28xx_IR *ir = rc_dev->priv;
struct em28xx *dev = ir->dev;
- u8 ir_config = EM2874_IR_RC5;
-
- /* Adjust xclk based o IR table for RC5/NEC tables */
+ /* Adjust xclk based on IR table for RC5/NEC tables */
if (*rc_type & RC_BIT_RC5) {
dev->board.xclk |= EM28XX_XCLK_IR_RC5_MODE;
ir->full_code = 1;
*rc_type = RC_BIT_RC5;
} else if (*rc_type & RC_BIT_NEC) {
dev->board.xclk &= ~EM28XX_XCLK_IR_RC5_MODE;
- ir_config = EM2874_IR_NEC;
ir->full_code = 1;
*rc_type = RC_BIT_NEC;
- } else if (*rc_type != RC_BIT_UNKNOWN)
- rc = -EINVAL;
+ } else if (*rc_type & RC_BIT_UNKNOWN) {
+ *rc_type = RC_BIT_UNKNOWN;
+ } else {
+ *rc_type = ir->rc_type;
+ return -EINVAL;
+ }
+ em28xx_write_reg_bits(dev, EM28XX_R0F_XCLK, dev->board.xclk,
+ EM28XX_XCLK_IR_RC5_MODE);
+
+ ir->rc_type = *rc_type;
+ return 0;
+}
+
+static int em2874_ir_change_protocol(struct rc_dev *rc_dev, u64 *rc_type)
+{
+ struct em28xx_IR *ir = rc_dev->priv;
+ struct em28xx *dev = ir->dev;
+ u8 ir_config = EM2874_IR_RC5;
+
+ /* Adjust xclk and set type based on IR table for RC5/NEC/RC6 tables */
+ if (*rc_type & RC_BIT_RC5) {
+ dev->board.xclk |= EM28XX_XCLK_IR_RC5_MODE;
+ ir->full_code = 1;
+ *rc_type = RC_BIT_RC5;
+ } else if (*rc_type & RC_BIT_NEC) {
+ dev->board.xclk &= ~EM28XX_XCLK_IR_RC5_MODE;
+ ir_config = EM2874_IR_NEC | EM2874_IR_NEC_NO_PARITY;
+ ir->full_code = 1;
+ *rc_type = RC_BIT_NEC;
+ } else if (*rc_type & RC_BIT_RC6_0) {
+ dev->board.xclk |= EM28XX_XCLK_IR_RC5_MODE;
+ ir_config = EM2874_IR_RC6_MODE_0;
+ ir->full_code = 1;
+ *rc_type = RC_BIT_RC6_0;
+ } else if (*rc_type & RC_BIT_UNKNOWN) {
+ *rc_type = RC_BIT_UNKNOWN;
+ } else {
+ *rc_type = ir->rc_type;
+ return -EINVAL;
+ }
+ em28xx_write_regs(dev, EM2874_R50_IR_CONFIG, &ir_config, 1);
em28xx_write_reg_bits(dev, EM28XX_R0F_XCLK, dev->board.xclk,
EM28XX_XCLK_IR_RC5_MODE);
+ ir->rc_type = *rc_type;
+
+ return 0;
+}
+static int em28xx_ir_change_protocol(struct rc_dev *rc_dev, u64 *rc_type)
+{
+ struct em28xx_IR *ir = rc_dev->priv;
+ struct em28xx *dev = ir->dev;
+
/* Setup the proper handler based on the chip */
switch (dev->chip_id) {
case CHIP_ID_EM2860:
case CHIP_ID_EM2883:
- ir->get_key = default_polling_getkey;
- break;
+ return em2860_ir_change_protocol(rc_dev, rc_type);
case CHIP_ID_EM2884:
case CHIP_ID_EM2874:
case CHIP_ID_EM28174:
- ir->get_key = em2874_polling_getkey;
- em28xx_write_regs(dev, EM2874_R50_IR_CONFIG, &ir_config, 1);
- break;
+ return em2874_ir_change_protocol(rc_dev, rc_type);
default:
printk("Unrecognized em28xx chip id 0x%02x: IR not supported\n",
dev->chip_id);
- rc = -EINVAL;
+ return -EINVAL;
}
-
- return rc;
}
-static void em28xx_register_i2c_ir(struct em28xx *dev)
+static int em28xx_probe_i2c_ir(struct em28xx *dev)
{
+ int i = 0;
/* Leadtek winfast tv USBII deluxe can find a non working IR-device */
/* at address 0x18, so if that address is needed for another board in */
/* the future, please put it after 0x1f. */
- struct i2c_board_info info;
const unsigned short addr_list[] = {
0x1f, 0x30, 0x47, I2C_CLIENT_END
};
- memset(&info, 0, sizeof(struct i2c_board_info));
- memset(&dev->init_data, 0, sizeof(dev->init_data));
- strlcpy(info.type, "ir_video", I2C_NAME_SIZE);
-
- /* detect & configure */
- switch (dev->model) {
- case EM2800_BOARD_TERRATEC_CINERGY_200:
- case EM2820_BOARD_TERRATEC_CINERGY_250:
- dev->init_data.ir_codes = RC_MAP_EM_TERRATEC;
- dev->init_data.get_key = em28xx_get_key_terratec;
- dev->init_data.name = "i2c IR (EM28XX Terratec)";
- break;
- case EM2820_BOARD_PINNACLE_USB_2:
- dev->init_data.ir_codes = RC_MAP_PINNACLE_GREY;
- dev->init_data.get_key = em28xx_get_key_pinnacle_usb_grey;
- dev->init_data.name = "i2c IR (EM28XX Pinnacle PCTV)";
- break;
- case EM2820_BOARD_HAUPPAUGE_WINTV_USB_2:
- dev->init_data.ir_codes = RC_MAP_HAUPPAUGE;
- dev->init_data.get_key = em28xx_get_key_em_haup;
- dev->init_data.name = "i2c IR (EM2840 Hauppauge)";
- break;
- case EM2820_BOARD_LEADTEK_WINFAST_USBII_DELUXE:
- dev->init_data.ir_codes = RC_MAP_WINFAST_USBII_DELUXE;
- dev->init_data.get_key = em28xx_get_key_winfast_usbii_deluxe;
- dev->init_data.name = "i2c IR (EM2820 Winfast TV USBII Deluxe)";
- break;
+ while (addr_list[i] != I2C_CLIENT_END) {
+ if (i2c_probe_func_quick_read(&dev->i2c_adap, addr_list[i]) == 1)
+ return addr_list[i];
+ i++;
}
- if (dev->init_data.name)
- info.platform_data = &dev->init_data;
- i2c_new_probed_device(&dev->i2c_adap, &info, addr_list, NULL);
+ return -ENODEV;
}
/**********************************************************
@@ -527,8 +562,21 @@ static int em28xx_ir_init(struct em28xx *dev)
struct rc_dev *rc;
int err = -ENOMEM;
u64 rc_type;
+ u16 i2c_rc_dev_addr = 0;
+
+ if (dev->board.has_snapshot_button)
+ em28xx_register_snapshot_button(dev);
+
+ if (dev->board.has_ir_i2c) {
+ i2c_rc_dev_addr = em28xx_probe_i2c_ir(dev);
+ if (!i2c_rc_dev_addr) {
+ dev->board.has_ir_i2c = 0;
+ em28xx_warn("No i2c IR remote control device found.\n");
+ return -ENODEV;
+ }
+ }
- if (dev->board.ir_codes == NULL) {
+ if (dev->board.ir_codes == NULL && !dev->board.has_ir_i2c) {
/* No remote control support */
em28xx_warn("Remote control support is not available for "
"this card.\n");
@@ -538,35 +586,77 @@ static int em28xx_ir_init(struct em28xx *dev)
ir = kzalloc(sizeof(*ir), GFP_KERNEL);
rc = rc_allocate_device();
if (!ir || !rc)
- goto err_out_free;
+ goto error;
/* record handles to ourself */
ir->dev = dev;
dev->ir = ir;
ir->rc = rc;
- /*
- * em2874 supports more protocols. For now, let's just announce
- * the two protocols that were already tested
- */
- rc->allowed_protos = RC_BIT_RC5 | RC_BIT_NEC;
rc->priv = ir;
- rc->change_protocol = em28xx_ir_change_protocol;
rc->open = em28xx_ir_start;
rc->close = em28xx_ir_stop;
- /* By default, keep protocol field untouched */
- rc_type = RC_BIT_UNKNOWN;
- err = em28xx_ir_change_protocol(rc, &rc_type);
- if (err)
- goto err_out_free;
+ if (dev->board.has_ir_i2c) { /* external i2c device */
+ switch (dev->model) {
+ case EM2800_BOARD_TERRATEC_CINERGY_200:
+ case EM2820_BOARD_TERRATEC_CINERGY_250:
+ rc->map_name = RC_MAP_EM_TERRATEC;
+ ir->get_key_i2c = em28xx_get_key_terratec;
+ break;
+ case EM2820_BOARD_PINNACLE_USB_2:
+ rc->map_name = RC_MAP_PINNACLE_GREY;
+ ir->get_key_i2c = em28xx_get_key_pinnacle_usb_grey;
+ break;
+ case EM2820_BOARD_HAUPPAUGE_WINTV_USB_2:
+ rc->map_name = RC_MAP_HAUPPAUGE;
+ ir->get_key_i2c = em28xx_get_key_em_haup;
+ rc->allowed_protos = RC_BIT_RC5;
+ break;
+ case EM2820_BOARD_LEADTEK_WINFAST_USBII_DELUXE:
+ rc->map_name = RC_MAP_WINFAST_USBII_DELUXE;
+ ir->get_key_i2c = em28xx_get_key_winfast_usbii_deluxe;
+ break;
+ default:
+ err = -ENODEV;
+ goto error;
+ }
+
+ ir->i2c_dev_addr = i2c_rc_dev_addr;
+ } else { /* internal device */
+ switch (dev->chip_id) {
+ case CHIP_ID_EM2860:
+ case CHIP_ID_EM2883:
+ rc->allowed_protos = RC_BIT_RC5 | RC_BIT_NEC;
+ ir->get_key = default_polling_getkey;
+ break;
+ case CHIP_ID_EM2884:
+ case CHIP_ID_EM2874:
+ case CHIP_ID_EM28174:
+ ir->get_key = em2874_polling_getkey;
+ rc->allowed_protos = RC_BIT_RC5 | RC_BIT_NEC |
+ RC_BIT_RC6_0;
+ break;
+ default:
+ err = -ENODEV;
+ goto error;
+ }
+
+ rc->change_protocol = em28xx_ir_change_protocol;
+ rc->map_name = dev->board.ir_codes;
+
+ /* By default, keep protocol field untouched */
+ rc_type = RC_BIT_UNKNOWN;
+ err = em28xx_ir_change_protocol(rc, &rc_type);
+ if (err)
+ goto error;
+ }
/* This is how often we ask the chip for IR information */
ir->polling = 100; /* ms */
/* init input device */
- snprintf(ir->name, sizeof(ir->name), "em28xx IR (%s)",
- dev->name);
+ snprintf(ir->name, sizeof(ir->name), "em28xx IR (%s)", dev->name);
usb_make_path(dev->udev, ir->phys, sizeof(ir->phys));
strlcat(ir->phys, "/input0", sizeof(ir->phys));
@@ -578,28 +668,17 @@ static int em28xx_ir_init(struct em28xx *dev)
rc->input_id.vendor = le16_to_cpu(dev->udev->descriptor.idVendor);
rc->input_id.product = le16_to_cpu(dev->udev->descriptor.idProduct);
rc->dev.parent = &dev->udev->dev;
- rc->map_name = dev->board.ir_codes;
rc->driver_name = MODULE_NAME;
/* all done */
err = rc_register_device(rc);
if (err)
- goto err_out_stop;
-
- em28xx_register_i2c_ir(dev);
-
-#if defined(CONFIG_MODULES) && defined(MODULE)
- if (dev->board.has_ir_i2c)
- request_module("ir-kbd-i2c");
-#endif
- if (dev->board.has_snapshot_button)
- em28xx_register_snapshot_button(dev);
+ goto error;
return 0;
- err_out_stop:
+error:
dev->ir = NULL;
- err_out_free:
rc_free_device(rc);
kfree(ir);
return err;
diff --git a/drivers/media/usb/em28xx/em28xx-reg.h b/drivers/media/usb/em28xx/em28xx-reg.h
index 6ff368297f6e..885089e22bcd 100644
--- a/drivers/media/usb/em28xx/em28xx-reg.h
+++ b/drivers/media/usb/em28xx/em28xx-reg.h
@@ -13,9 +13,9 @@
#define EM_GPO_3 (1 << 3)
/* em28xx endpoints */
-#define EM28XX_EP_ANALOG 0x82
+/* 0x82: (always ?) analog */
#define EM28XX_EP_AUDIO 0x83
-#define EM28XX_EP_DIGITAL 0x84
+/* 0x84: digital or analog */
/* em2800 registers */
#define EM2800_R08_AUDIOSRC 0x08
@@ -177,6 +177,7 @@
/* em2874 IR config register (0x50) */
#define EM2874_IR_NEC 0x00
+#define EM2874_IR_NEC_NO_PARITY 0x01
#define EM2874_IR_RC5 0x04
#define EM2874_IR_RC6_MODE_0 0x08
#define EM2874_IR_RC6_MODE_6A 0x0b
diff --git a/drivers/media/usb/em28xx/em28xx-vbi.c b/drivers/media/usb/em28xx/em28xx-vbi.c
index 2b4c9cba2d67..39f39c527c13 100644
--- a/drivers/media/usb/em28xx/em28xx-vbi.c
+++ b/drivers/media/usb/em28xx/em28xx-vbi.c
@@ -41,105 +41,72 @@ MODULE_PARM_DESC(vbi_debug, "enable debug messages [vbi]");
/* ------------------------------------------------------------------ */
-static void
-free_buffer(struct videobuf_queue *vq, struct em28xx_buffer *buf)
+static int vbi_queue_setup(struct vb2_queue *vq, const struct v4l2_format *fmt,
+ unsigned int *nbuffers, unsigned int *nplanes,
+ unsigned int sizes[], void *alloc_ctxs[])
{
- struct em28xx_fh *fh = vq->priv_data;
- struct em28xx *dev = fh->dev;
- unsigned long flags = 0;
- if (in_interrupt())
- BUG();
-
- /* We used to wait for the buffer to finish here, but this didn't work
- because, as we were keeping the state as VIDEOBUF_QUEUED,
- videobuf_queue_cancel marked it as finished for us.
- (Also, it could wedge forever if the hardware was misconfigured.)
-
- This should be safe; by the time we get here, the buffer isn't
- queued anymore. If we ever start marking the buffers as
- VIDEOBUF_ACTIVE, it won't be, though.
- */
- spin_lock_irqsave(&dev->slock, flags);
- if (dev->isoc_ctl.vbi_buf == buf)
- dev->isoc_ctl.vbi_buf = NULL;
- spin_unlock_irqrestore(&dev->slock, flags);
+ struct em28xx *dev = vb2_get_drv_priv(vq);
+ unsigned long size;
- videobuf_vmalloc_free(&buf->vb);
- buf->vb.state = VIDEOBUF_NEEDS_INIT;
-}
+ if (fmt)
+ size = fmt->fmt.pix.sizeimage;
+ else
+ size = dev->vbi_width * dev->vbi_height * 2;
-static int
-vbi_setup(struct videobuf_queue *q, unsigned int *count, unsigned int *size)
-{
- struct em28xx_fh *fh = q->priv_data;
- struct em28xx *dev = fh->dev;
+ if (0 == *nbuffers)
+ *nbuffers = 32;
+ if (*nbuffers < 2)
+ *nbuffers = 2;
+ if (*nbuffers > 32)
+ *nbuffers = 32;
- *size = dev->vbi_width * dev->vbi_height * 2;
+ *nplanes = 1;
+ sizes[0] = size;
- if (0 == *count)
- *count = vbibufs;
- if (*count < 2)
- *count = 2;
- if (*count > 32)
- *count = 32;
return 0;
}
-static int
-vbi_prepare(struct videobuf_queue *q, struct videobuf_buffer *vb,
- enum v4l2_field field)
+static int vbi_buffer_prepare(struct vb2_buffer *vb)
{
- struct em28xx_fh *fh = q->priv_data;
- struct em28xx *dev = fh->dev;
+ struct em28xx *dev = vb2_get_drv_priv(vb->vb2_queue);
struct em28xx_buffer *buf = container_of(vb, struct em28xx_buffer, vb);
- int rc = 0;
+ unsigned long size;
- buf->vb.size = dev->vbi_width * dev->vbi_height * 2;
+ size = dev->vbi_width * dev->vbi_height * 2;
- if (0 != buf->vb.baddr && buf->vb.bsize < buf->vb.size)
+ if (vb2_plane_size(vb, 0) < size) {
+ printk(KERN_INFO "%s data will not fit into plane (%lu < %lu)\n",
+ __func__, vb2_plane_size(vb, 0), size);
return -EINVAL;
-
- buf->vb.width = dev->vbi_width;
- buf->vb.height = dev->vbi_height;
- buf->vb.field = field;
-
- if (VIDEOBUF_NEEDS_INIT == buf->vb.state) {
- rc = videobuf_iolock(q, &buf->vb, NULL);
- if (rc < 0)
- goto fail;
}
+ vb2_set_plane_payload(&buf->vb, 0, size);
- buf->vb.state = VIDEOBUF_PREPARED;
return 0;
-
-fail:
- free_buffer(q, buf);
- return rc;
}
static void
-vbi_queue(struct videobuf_queue *vq, struct videobuf_buffer *vb)
-{
- struct em28xx_buffer *buf = container_of(vb,
- struct em28xx_buffer,
- vb);
- struct em28xx_fh *fh = vq->priv_data;
- struct em28xx *dev = fh->dev;
- struct em28xx_dmaqueue *vbiq = &dev->vbiq;
-
- buf->vb.state = VIDEOBUF_QUEUED;
- list_add_tail(&buf->vb.queue, &vbiq->active);
-}
-
-static void vbi_release(struct videobuf_queue *q, struct videobuf_buffer *vb)
+vbi_buffer_queue(struct vb2_buffer *vb)
{
+ struct em28xx *dev = vb2_get_drv_priv(vb->vb2_queue);
struct em28xx_buffer *buf = container_of(vb, struct em28xx_buffer, vb);
- free_buffer(q, buf);
+ struct em28xx_dmaqueue *vbiq = &dev->vbiq;
+ unsigned long flags = 0;
+
+ buf->mem = vb2_plane_vaddr(vb, 0);
+ buf->length = vb2_plane_size(vb, 0);
+
+ spin_lock_irqsave(&dev->slock, flags);
+ list_add_tail(&buf->list, &vbiq->active);
+ spin_unlock_irqrestore(&dev->slock, flags);
}
-struct videobuf_queue_ops em28xx_vbi_qops = {
- .buf_setup = vbi_setup,
- .buf_prepare = vbi_prepare,
- .buf_queue = vbi_queue,
- .buf_release = vbi_release,
+
+struct vb2_ops em28xx_vbi_qops = {
+ .queue_setup = vbi_queue_setup,
+ .buf_prepare = vbi_buffer_prepare,
+ .buf_queue = vbi_buffer_queue,
+ .start_streaming = em28xx_start_analog_streaming,
+ .stop_streaming = em28xx_stop_vbi_streaming,
+ .wait_prepare = vb2_ops_wait_prepare,
+ .wait_finish = vb2_ops_wait_finish,
};
diff --git a/drivers/media/usb/em28xx/em28xx-video.c b/drivers/media/usb/em28xx/em28xx-video.c
index 1e553d357380..32bd7de5dec1 100644
--- a/drivers/media/usb/em28xx/em28xx-video.c
+++ b/drivers/media/usb/em28xx/em28xx-video.c
@@ -6,6 +6,7 @@
Markus Rechberger <mrechberger@gmail.com>
Mauro Carvalho Chehab <mchehab@infradead.org>
Sascha Sommer <saschasommer@freenet.de>
+ Copyright (C) 2012 Frank Schäfer <fschaefer.oss@googlemail.com>
Some parts based on SN9C10x PC Camera Controllers GPL driver made
by Luca Risolia <luca.risolia@studio.unibo.it>
@@ -39,6 +40,7 @@
#include "em28xx.h"
#include <media/v4l2-common.h>
#include <media/v4l2-ioctl.h>
+#include <media/v4l2-event.h>
#include <media/v4l2-chip-ident.h>
#include <media/msp3400.h>
#include <media/tuner.h>
@@ -74,9 +76,9 @@ MODULE_DESCRIPTION(DRIVER_DESC);
MODULE_LICENSE("GPL");
MODULE_VERSION(EM28XX_VERSION);
-static unsigned int video_nr[] = {[0 ... (EM28XX_MAXBOARDS - 1)] = UNSET };
-static unsigned int vbi_nr[] = {[0 ... (EM28XX_MAXBOARDS - 1)] = UNSET };
-static unsigned int radio_nr[] = {[0 ... (EM28XX_MAXBOARDS - 1)] = UNSET };
+static unsigned int video_nr[] = {[0 ... (EM28XX_MAXBOARDS - 1)] = -1U };
+static unsigned int vbi_nr[] = {[0 ... (EM28XX_MAXBOARDS - 1)] = -1U };
+static unsigned int radio_nr[] = {[0 ... (EM28XX_MAXBOARDS - 1)] = -1U };
module_param_array(video_nr, int, NULL, 0444);
module_param_array(vbi_nr, int, NULL, 0444);
@@ -124,101 +126,50 @@ static struct em28xx_fmt format[] = {
},
};
-/* supported controls */
-/* Common to all boards */
-static struct v4l2_queryctrl ac97_qctrl[] = {
- {
- .id = V4L2_CID_AUDIO_VOLUME,
- .type = V4L2_CTRL_TYPE_INTEGER,
- .name = "Volume",
- .minimum = 0x0,
- .maximum = 0x1f,
- .step = 0x1,
- .default_value = 0x1f,
- .flags = V4L2_CTRL_FLAG_SLIDER,
- }, {
- .id = V4L2_CID_AUDIO_MUTE,
- .type = V4L2_CTRL_TYPE_BOOLEAN,
- .name = "Mute",
- .minimum = 0,
- .maximum = 1,
- .step = 1,
- .default_value = 1,
- .flags = 0,
- }
-};
-
/* ------------------------------------------------------------------
DMA and thread functions
------------------------------------------------------------------*/
/*
- * Announces that a buffer were filled and request the next
+ * Finish the current buffer
*/
-static inline void buffer_filled(struct em28xx *dev,
- struct em28xx_dmaqueue *dma_q,
- struct em28xx_buffer *buf)
+static inline void finish_buffer(struct em28xx *dev,
+ struct em28xx_buffer *buf)
{
- /* Advice that buffer was filled */
- em28xx_isocdbg("[%p/%d] wakeup\n", buf, buf->vb.i);
- buf->vb.state = VIDEOBUF_DONE;
- buf->vb.field_count++;
- do_gettimeofday(&buf->vb.ts);
+ em28xx_isocdbg("[%p/%d] wakeup\n", buf, buf->top_field);
- dev->isoc_ctl.vid_buf = NULL;
+ buf->vb.v4l2_buf.sequence = dev->field_count++;
+ buf->vb.v4l2_buf.field = V4L2_FIELD_INTERLACED;
+ v4l2_get_timestamp(&buf->vb.v4l2_buf.timestamp);
- list_del(&buf->vb.queue);
- wake_up(&buf->vb.done);
-}
-
-static inline void vbi_buffer_filled(struct em28xx *dev,
- struct em28xx_dmaqueue *dma_q,
- struct em28xx_buffer *buf)
-{
- /* Advice that buffer was filled */
- em28xx_isocdbg("[%p/%d] wakeup\n", buf, buf->vb.i);
-
- buf->vb.state = VIDEOBUF_DONE;
- buf->vb.field_count++;
- do_gettimeofday(&buf->vb.ts);
-
- dev->isoc_ctl.vbi_buf = NULL;
-
- list_del(&buf->vb.queue);
- wake_up(&buf->vb.done);
+ vb2_buffer_done(&buf->vb, VB2_BUF_STATE_DONE);
}
/*
- * Identify the buffer header type and properly handles
+ * Copy picture data from USB buffer to videobuf buffer
*/
static void em28xx_copy_video(struct em28xx *dev,
- struct em28xx_dmaqueue *dma_q,
struct em28xx_buffer *buf,
- unsigned char *p,
- unsigned char *outp, unsigned long len)
+ unsigned char *usb_buf,
+ unsigned long len)
{
void *fieldstart, *startwrite, *startread;
int linesdone, currlinedone, offset, lencopy, remain;
int bytesperline = dev->width << 1;
- if (dma_q->pos + len > buf->vb.size)
- len = buf->vb.size - dma_q->pos;
+ if (buf->pos + len > buf->length)
+ len = buf->length - buf->pos;
- startread = p;
+ startread = usb_buf;
remain = len;
- if (dev->progressive)
- fieldstart = outp;
- else {
- /* Interlaces two half frames */
- if (buf->top_field)
- fieldstart = outp;
- else
- fieldstart = outp + bytesperline;
- }
+ if (dev->progressive || buf->top_field)
+ fieldstart = buf->vb_buf;
+ else /* interlaced mode, even nr. of lines */
+ fieldstart = buf->vb_buf + bytesperline;
- linesdone = dma_q->pos / bytesperline;
- currlinedone = dma_q->pos % bytesperline;
+ linesdone = buf->pos / bytesperline;
+ currlinedone = buf->pos % bytesperline;
if (dev->progressive)
offset = linesdone * bytesperline + currlinedone;
@@ -229,11 +180,12 @@ static void em28xx_copy_video(struct em28xx *dev,
lencopy = bytesperline - currlinedone;
lencopy = lencopy > remain ? remain : lencopy;
- if ((char *)startwrite + lencopy > (char *)outp + buf->vb.size) {
+ if ((char *)startwrite + lencopy > (char *)buf->vb_buf + buf->length) {
em28xx_isocdbg("Overflow of %zi bytes past buffer end (1)\n",
- ((char *)startwrite + lencopy) -
- ((char *)outp + buf->vb.size));
- remain = (char *)outp + buf->vb.size - (char *)startwrite;
+ ((char *)startwrite + lencopy) -
+ ((char *)buf->vb_buf + buf->length));
+ remain = (char *)buf->vb_buf + buf->length -
+ (char *)startwrite;
lencopy = remain;
}
if (lencopy <= 0)
@@ -243,21 +195,24 @@ static void em28xx_copy_video(struct em28xx *dev,
remain -= lencopy;
while (remain > 0) {
- startwrite += lencopy + bytesperline;
+ if (dev->progressive)
+ startwrite += lencopy;
+ else
+ startwrite += lencopy + bytesperline;
startread += lencopy;
if (bytesperline > remain)
lencopy = remain;
else
lencopy = bytesperline;
- if ((char *)startwrite + lencopy > (char *)outp +
- buf->vb.size) {
+ if ((char *)startwrite + lencopy > (char *)buf->vb_buf +
+ buf->length) {
em28xx_isocdbg("Overflow of %zi bytes past buffer end"
"(2)\n",
((char *)startwrite + lencopy) -
- ((char *)outp + buf->vb.size));
- lencopy = remain = (char *)outp + buf->vb.size -
- (char *)startwrite;
+ ((char *)buf->vb_buf + buf->length));
+ lencopy = remain = (char *)buf->vb_buf + buf->length -
+ (char *)startwrite;
}
if (lencopy <= 0)
break;
@@ -267,57 +222,29 @@ static void em28xx_copy_video(struct em28xx *dev,
remain -= lencopy;
}
- dma_q->pos += len;
+ buf->pos += len;
}
+/*
+ * Copy VBI data from USB buffer to videobuf buffer
+ */
static void em28xx_copy_vbi(struct em28xx *dev,
- struct em28xx_dmaqueue *dma_q,
- struct em28xx_buffer *buf,
- unsigned char *p,
- unsigned char *outp, unsigned long len)
+ struct em28xx_buffer *buf,
+ unsigned char *usb_buf,
+ unsigned long len)
{
- void *startwrite, *startread;
- int offset;
- int bytesperline;
-
- if (dev == NULL) {
- em28xx_isocdbg("dev is null\n");
- return;
- }
- bytesperline = dev->vbi_width;
+ unsigned int offset;
- if (dma_q == NULL) {
- em28xx_isocdbg("dma_q is null\n");
- return;
- }
- if (buf == NULL) {
- return;
- }
- if (p == NULL) {
- em28xx_isocdbg("p is null\n");
- return;
- }
- if (outp == NULL) {
- em28xx_isocdbg("outp is null\n");
- return;
- }
-
- if (dma_q->pos + len > buf->vb.size)
- len = buf->vb.size - dma_q->pos;
-
- startread = p;
-
- startwrite = outp + dma_q->pos;
- offset = dma_q->pos;
+ if (buf->pos + len > buf->length)
+ len = buf->length - buf->pos;
+ offset = buf->pos;
/* Make sure the bottom field populates the second half of the frame */
- if (buf->top_field == 0) {
- startwrite += bytesperline * dev->vbi_height;
- offset += bytesperline * dev->vbi_height;
- }
+ if (buf->top_field == 0)
+ offset += dev->vbi_width * dev->vbi_height;
- memcpy(startwrite, startread, len);
- dma_q->pos += len;
+ memcpy(buf->vb_buf + offset, usb_buf, len);
+ buf->pos += len;
}
static inline void print_err_status(struct em28xx *dev,
@@ -360,470 +287,444 @@ static inline void print_err_status(struct em28xx *dev,
}
/*
- * video-buf generic routine to get the next available buffer
+ * get the next available buffer from dma queue
*/
-static inline void get_next_buf(struct em28xx_dmaqueue *dma_q,
- struct em28xx_buffer **buf)
+static inline struct em28xx_buffer *get_next_buf(struct em28xx *dev,
+ struct em28xx_dmaqueue *dma_q)
{
- struct em28xx *dev = container_of(dma_q, struct em28xx, vidq);
- char *outp;
+ struct em28xx_buffer *buf;
if (list_empty(&dma_q->active)) {
em28xx_isocdbg("No active queue to serve\n");
- dev->isoc_ctl.vid_buf = NULL;
- *buf = NULL;
- return;
+ return NULL;
}
/* Get the next buffer */
- *buf = list_entry(dma_q->active.next, struct em28xx_buffer, vb.queue);
-
+ buf = list_entry(dma_q->active.next, struct em28xx_buffer, list);
/* Cleans up buffer - Useful for testing for frame/URB loss */
- outp = videobuf_to_vmalloc(&(*buf)->vb);
- memset(outp, 0, (*buf)->vb.size);
+ list_del(&buf->list);
+ buf->pos = 0;
+ buf->vb_buf = buf->mem;
- dev->isoc_ctl.vid_buf = *buf;
-
- return;
+ return buf;
}
/*
- * video-buf generic routine to get the next available VBI buffer
+ * Finish the current buffer if completed and prepare for the next field
*/
-static inline void vbi_get_next_buf(struct em28xx_dmaqueue *dma_q,
- struct em28xx_buffer **buf)
-{
- struct em28xx *dev = container_of(dma_q, struct em28xx, vbiq);
- char *outp;
-
- if (list_empty(&dma_q->active)) {
- em28xx_isocdbg("No active queue to serve\n");
- dev->isoc_ctl.vbi_buf = NULL;
- *buf = NULL;
- return;
+static struct em28xx_buffer *
+finish_field_prepare_next(struct em28xx *dev,
+ struct em28xx_buffer *buf,
+ struct em28xx_dmaqueue *dma_q)
+{
+ if (dev->progressive || dev->top_field) { /* Brand new frame */
+ if (buf != NULL)
+ finish_buffer(dev, buf);
+ buf = get_next_buf(dev, dma_q);
+ }
+ if (buf != NULL) {
+ buf->top_field = dev->top_field;
+ buf->pos = 0;
}
- /* Get the next buffer */
- *buf = list_entry(dma_q->active.next, struct em28xx_buffer, vb.queue);
- /* Cleans up buffer - Useful for testing for frame/URB loss */
- outp = videobuf_to_vmalloc(&(*buf)->vb);
- memset(outp, 0x00, (*buf)->vb.size);
-
- dev->isoc_ctl.vbi_buf = *buf;
-
- return;
+ return buf;
}
/*
- * Controls the isoc copy of each urb packet
+ * Process data packet according to the em2710/em2750/em28xx frame data format
*/
-static inline int em28xx_isoc_copy(struct em28xx *dev, struct urb *urb)
+static inline void process_frame_data_em28xx(struct em28xx *dev,
+ unsigned char *data_pkt,
+ unsigned int data_len)
{
- struct em28xx_buffer *buf;
+ struct em28xx_buffer *buf = dev->usb_ctl.vid_buf;
+ struct em28xx_buffer *vbi_buf = dev->usb_ctl.vbi_buf;
struct em28xx_dmaqueue *dma_q = &dev->vidq;
- unsigned char *outp = NULL;
- int i, len = 0, rc = 1;
- unsigned char *p;
-
- if (!dev)
- return 0;
-
- if ((dev->state & DEV_DISCONNECTED) || (dev->state & DEV_MISCONFIGURED))
- return 0;
-
- if (urb->status < 0) {
- print_err_status(dev, -1, urb->status);
- if (urb->status == -ENOENT)
- return 0;
- }
-
- buf = dev->isoc_ctl.vid_buf;
- if (buf != NULL)
- outp = videobuf_to_vmalloc(&buf->vb);
-
- for (i = 0; i < urb->number_of_packets; i++) {
- int status = urb->iso_frame_desc[i].status;
+ struct em28xx_dmaqueue *vbi_dma_q = &dev->vbiq;
- if (status < 0) {
- print_err_status(dev, i, status);
- if (urb->iso_frame_desc[i].status != -EPROTO)
- continue;
+ /* capture type 0 = vbi start
+ capture type 1 = vbi in progress
+ capture type 2 = video start
+ capture type 3 = video in progress */
+ if (data_len >= 4) {
+ /* NOTE: Headers are always 4 bytes and
+ * never split across packets */
+ if (data_pkt[0] == 0x88 && data_pkt[1] == 0x88 &&
+ data_pkt[2] == 0x88 && data_pkt[3] == 0x88) {
+ /* Continuation */
+ data_pkt += 4;
+ data_len -= 4;
+ } else if (data_pkt[0] == 0x33 && data_pkt[1] == 0x95) {
+ /* Field start (VBI mode) */
+ dev->capture_type = 0;
+ dev->vbi_read = 0;
+ em28xx_isocdbg("VBI START HEADER !!!\n");
+ dev->top_field = !(data_pkt[2] & 1);
+ data_pkt += 4;
+ data_len -= 4;
+ } else if (data_pkt[0] == 0x22 && data_pkt[1] == 0x5a) {
+ /* Field start (VBI disabled) */
+ dev->capture_type = 2;
+ em28xx_isocdbg("VIDEO START HEADER !!!\n");
+ dev->top_field = !(data_pkt[2] & 1);
+ data_pkt += 4;
+ data_len -= 4;
}
+ }
+ /* NOTE: With bulk transfers, intermediate data packets
+ * have no continuation header */
- len = urb->iso_frame_desc[i].actual_length - 4;
+ if (dev->capture_type == 0) {
+ vbi_buf = finish_field_prepare_next(dev, vbi_buf, vbi_dma_q);
+ dev->usb_ctl.vbi_buf = vbi_buf;
+ dev->capture_type = 1;
+ }
- if (urb->iso_frame_desc[i].actual_length <= 0) {
- /* em28xx_isocdbg("packet %d is empty",i); - spammy */
- continue;
- }
- if (urb->iso_frame_desc[i].actual_length >
- dev->max_pkt_size) {
- em28xx_isocdbg("packet bigger than packet size");
- continue;
- }
+ if (dev->capture_type == 1) {
+ int vbi_size = dev->vbi_width * dev->vbi_height;
+ int vbi_data_len = ((dev->vbi_read + data_len) > vbi_size) ?
+ (vbi_size - dev->vbi_read) : data_len;
- p = urb->transfer_buffer + urb->iso_frame_desc[i].offset;
+ /* Copy VBI data */
+ if (vbi_buf != NULL)
+ em28xx_copy_vbi(dev, vbi_buf, data_pkt, vbi_data_len);
+ dev->vbi_read += vbi_data_len;
- /* FIXME: incomplete buffer checks where removed to make
- logic simpler. Impacts of those changes should be evaluated
- */
- if (p[0] == 0x33 && p[1] == 0x95 && p[2] == 0x00) {
- em28xx_isocdbg("VBI HEADER!!!\n");
- /* FIXME: Should add vbi copy */
- continue;
+ if (vbi_data_len < data_len) {
+ /* Continue with copying video data */
+ dev->capture_type = 2;
+ data_pkt += vbi_data_len;
+ data_len -= vbi_data_len;
}
- if (p[0] == 0x22 && p[1] == 0x5a) {
- em28xx_isocdbg("Video frame %d, length=%i, %s\n", p[2],
- len, (p[2] & 1) ? "odd" : "even");
-
- if (dev->progressive || !(p[2] & 1)) {
- if (buf != NULL)
- buffer_filled(dev, dma_q, buf);
- get_next_buf(dma_q, &buf);
- if (buf == NULL)
- outp = NULL;
- else
- outp = videobuf_to_vmalloc(&buf->vb);
- }
-
- if (buf != NULL) {
- if (p[2] & 1)
- buf->top_field = 0;
- else
- buf->top_field = 1;
- }
+ }
- dma_q->pos = 0;
- }
- if (buf != NULL) {
- if (p[0] != 0x88 && p[0] != 0x22) {
- em28xx_isocdbg("frame is not complete\n");
- len += 4;
- } else {
- p += 4;
- }
- em28xx_copy_video(dev, dma_q, buf, p, outp, len);
- }
+ if (dev->capture_type == 2) {
+ buf = finish_field_prepare_next(dev, buf, dma_q);
+ dev->usb_ctl.vid_buf = buf;
+ dev->capture_type = 3;
}
- return rc;
+
+ if (dev->capture_type == 3 && buf != NULL && data_len > 0)
+ em28xx_copy_video(dev, buf, data_pkt, data_len);
}
-/* Version of isoc handler that takes into account a mixture of video and
- VBI data */
-static inline int em28xx_isoc_copy_vbi(struct em28xx *dev, struct urb *urb)
+/* Processes and copies the URB data content (video and VBI data) */
+static inline int em28xx_urb_data_copy(struct em28xx *dev, struct urb *urb)
{
- struct em28xx_buffer *buf, *vbi_buf;
- struct em28xx_dmaqueue *dma_q = &dev->vidq;
- struct em28xx_dmaqueue *vbi_dma_q = &dev->vbiq;
- unsigned char *outp = NULL;
- unsigned char *vbioutp = NULL;
- int i, len = 0, rc = 1;
- unsigned char *p;
- int vbi_size;
+ int xfer_bulk, num_packets, i;
+ unsigned char *usb_data_pkt;
+ unsigned int usb_data_len;
if (!dev)
return 0;
- if ((dev->state & DEV_DISCONNECTED) || (dev->state & DEV_MISCONFIGURED))
+ if (dev->disconnected)
return 0;
- if (urb->status < 0) {
+ if (urb->status < 0)
print_err_status(dev, -1, urb->status);
- if (urb->status == -ENOENT)
- return 0;
- }
- buf = dev->isoc_ctl.vid_buf;
- if (buf != NULL)
- outp = videobuf_to_vmalloc(&buf->vb);
+ xfer_bulk = usb_pipebulk(urb->pipe);
- vbi_buf = dev->isoc_ctl.vbi_buf;
- if (vbi_buf != NULL)
- vbioutp = videobuf_to_vmalloc(&vbi_buf->vb);
+ if (xfer_bulk) /* bulk */
+ num_packets = 1;
+ else /* isoc */
+ num_packets = urb->number_of_packets;
- for (i = 0; i < urb->number_of_packets; i++) {
- int status = urb->iso_frame_desc[i].status;
+ for (i = 0; i < num_packets; i++) {
+ if (xfer_bulk) { /* bulk */
+ usb_data_len = urb->actual_length;
- if (status < 0) {
- print_err_status(dev, i, status);
- if (urb->iso_frame_desc[i].status != -EPROTO)
+ usb_data_pkt = urb->transfer_buffer;
+ } else { /* isoc */
+ if (urb->iso_frame_desc[i].status < 0) {
+ print_err_status(dev, i,
+ urb->iso_frame_desc[i].status);
+ if (urb->iso_frame_desc[i].status != -EPROTO)
+ continue;
+ }
+
+ usb_data_len = urb->iso_frame_desc[i].actual_length;
+ if (usb_data_len > dev->max_pkt_size) {
+ em28xx_isocdbg("packet bigger than packet size");
continue;
- }
+ }
- len = urb->iso_frame_desc[i].actual_length;
- if (urb->iso_frame_desc[i].actual_length <= 0) {
- /* em28xx_isocdbg("packet %d is empty",i); - spammy */
- continue;
- }
- if (urb->iso_frame_desc[i].actual_length >
- dev->max_pkt_size) {
- em28xx_isocdbg("packet bigger than packet size");
- continue;
+ usb_data_pkt = urb->transfer_buffer +
+ urb->iso_frame_desc[i].offset;
}
- p = urb->transfer_buffer + urb->iso_frame_desc[i].offset;
-
- /* capture type 0 = vbi start
- capture type 1 = video start
- capture type 2 = video in progress */
- if (p[0] == 0x33 && p[1] == 0x95) {
- dev->capture_type = 0;
- dev->vbi_read = 0;
- em28xx_isocdbg("VBI START HEADER!!!\n");
- dev->cur_field = p[2];
- p += 4;
- len -= 4;
- } else if (p[0] == 0x88 && p[1] == 0x88 &&
- p[2] == 0x88 && p[3] == 0x88) {
- /* continuation */
- p += 4;
- len -= 4;
- } else if (p[0] == 0x22 && p[1] == 0x5a) {
- /* start video */
- p += 4;
- len -= 4;
+ if (usb_data_len == 0) {
+ /* NOTE: happens very often with isoc transfers */
+ /* em28xx_usbdbg("packet %d is empty",i); - spammy */
+ continue;
}
- vbi_size = dev->vbi_width * dev->vbi_height;
-
- if (dev->capture_type == 0) {
- if (dev->vbi_read >= vbi_size) {
- /* We've already read all the VBI data, so
- treat the rest as video */
- em28xx_isocdbg("dev->vbi_read > vbi_size\n");
- } else if ((dev->vbi_read + len) < vbi_size) {
- /* This entire frame is VBI data */
- if (dev->vbi_read == 0 &&
- (!(dev->cur_field & 1))) {
- /* Brand new frame */
- if (vbi_buf != NULL)
- vbi_buffer_filled(dev,
- vbi_dma_q,
- vbi_buf);
- vbi_get_next_buf(vbi_dma_q, &vbi_buf);
- if (vbi_buf == NULL)
- vbioutp = NULL;
- else
- vbioutp = videobuf_to_vmalloc(
- &vbi_buf->vb);
- }
-
- if (dev->vbi_read == 0) {
- vbi_dma_q->pos = 0;
- if (vbi_buf != NULL) {
- if (dev->cur_field & 1)
- vbi_buf->top_field = 0;
- else
- vbi_buf->top_field = 1;
- }
- }
-
- dev->vbi_read += len;
- em28xx_copy_vbi(dev, vbi_dma_q, vbi_buf, p,
- vbioutp, len);
- } else {
- /* Some of this frame is VBI data and some is
- video data */
- int vbi_data_len = vbi_size - dev->vbi_read;
- dev->vbi_read += vbi_data_len;
- em28xx_copy_vbi(dev, vbi_dma_q, vbi_buf, p,
- vbioutp, vbi_data_len);
- dev->capture_type = 1;
- p += vbi_data_len;
- len -= vbi_data_len;
- }
- }
+ process_frame_data_em28xx(dev, usb_data_pkt, usb_data_len);
+ }
+ return 1;
+}
- if (dev->capture_type == 1) {
- dev->capture_type = 2;
- if (dev->progressive || !(dev->cur_field & 1)) {
- if (buf != NULL)
- buffer_filled(dev, dma_q, buf);
- get_next_buf(dma_q, &buf);
- if (buf == NULL)
- outp = NULL;
- else
- outp = videobuf_to_vmalloc(&buf->vb);
- }
- if (buf != NULL) {
- if (dev->cur_field & 1)
- buf->top_field = 0;
- else
- buf->top_field = 1;
- }
- dma_q->pos = 0;
- }
+static int get_ressource(enum v4l2_buf_type f_type)
+{
+ switch (f_type) {
+ case V4L2_BUF_TYPE_VIDEO_CAPTURE:
+ return EM28XX_RESOURCE_VIDEO;
+ case V4L2_BUF_TYPE_VBI_CAPTURE:
+ return EM28XX_RESOURCE_VBI;
+ default:
+ BUG();
+ return 0;
+ }
+}
- if (buf != NULL && dev->capture_type == 2) {
- if (len >= 4 && p[0] == 0x88 && p[1] == 0x88 &&
- p[2] == 0x88 && p[3] == 0x88) {
- p += 4;
- len -= 4;
- }
- if (len >= 4 && p[0] == 0x22 && p[1] == 0x5a) {
- em28xx_isocdbg("Video frame %d, len=%i, %s\n",
- p[2], len, (p[2] & 1) ?
- "odd" : "even");
- p += 4;
- len -= 4;
- }
+/* Usage lock check functions */
+static int res_get(struct em28xx *dev, enum v4l2_buf_type f_type)
+{
+ int res_type = get_ressource(f_type);
- if (len > 0)
- em28xx_copy_video(dev, dma_q, buf, p, outp,
- len);
- }
+ /* is it free? */
+ if (dev->resources & res_type) {
+ /* no, someone else uses it */
+ return -EBUSY;
}
- return rc;
+
+ /* it's free, grab it */
+ dev->resources |= res_type;
+ em28xx_videodbg("res: get %d\n", res_type);
+ return 0;
}
+static void res_free(struct em28xx *dev, enum v4l2_buf_type f_type)
+{
+ int res_type = get_ressource(f_type);
+
+ dev->resources &= ~res_type;
+ em28xx_videodbg("res: put %d\n", res_type);
+}
/* ------------------------------------------------------------------
- Videobuf operations
+ Videobuf2 operations
------------------------------------------------------------------*/
-static int
-buffer_setup(struct videobuf_queue *vq, unsigned int *count, unsigned int *size)
+static int queue_setup(struct vb2_queue *vq, const struct v4l2_format *fmt,
+ unsigned int *nbuffers, unsigned int *nplanes,
+ unsigned int sizes[], void *alloc_ctxs[])
{
- struct em28xx_fh *fh = vq->priv_data;
- struct em28xx *dev = fh->dev;
- struct v4l2_frequency f;
+ struct em28xx *dev = vb2_get_drv_priv(vq);
+ unsigned long size;
- *size = (fh->dev->width * fh->dev->height * dev->format->depth + 7)
- >> 3;
-
- if (0 == *count)
- *count = EM28XX_DEF_BUF;
+ if (fmt)
+ size = fmt->fmt.pix.sizeimage;
+ else
+ size = (dev->width * dev->height * dev->format->depth + 7) >> 3;
- if (*count < EM28XX_MIN_BUF)
- *count = EM28XX_MIN_BUF;
+ if (size == 0)
+ return -EINVAL;
- /* Ask tuner to go to analog or radio mode */
- memset(&f, 0, sizeof(f));
- f.frequency = dev->ctl_freq;
- f.type = fh->radio ? V4L2_TUNER_RADIO : V4L2_TUNER_ANALOG_TV;
+ if (0 == *nbuffers)
+ *nbuffers = 32;
- v4l2_device_call_all(&dev->v4l2_dev, 0, tuner, s_frequency, &f);
+ *nplanes = 1;
+ sizes[0] = size;
return 0;
}
-/* This is called *without* dev->slock held; please keep it that way */
-static void free_buffer(struct videobuf_queue *vq, struct em28xx_buffer *buf)
+static int
+buffer_prepare(struct vb2_buffer *vb)
{
- struct em28xx_fh *fh = vq->priv_data;
- struct em28xx *dev = fh->dev;
- unsigned long flags = 0;
- if (in_interrupt())
- BUG();
+ struct em28xx *dev = vb2_get_drv_priv(vb->vb2_queue);
+ struct em28xx_buffer *buf = container_of(vb, struct em28xx_buffer, vb);
+ unsigned long size;
- /* We used to wait for the buffer to finish here, but this didn't work
- because, as we were keeping the state as VIDEOBUF_QUEUED,
- videobuf_queue_cancel marked it as finished for us.
- (Also, it could wedge forever if the hardware was misconfigured.)
+ em28xx_videodbg("%s, field=%d\n", __func__, vb->v4l2_buf.field);
- This should be safe; by the time we get here, the buffer isn't
- queued anymore. If we ever start marking the buffers as
- VIDEOBUF_ACTIVE, it won't be, though.
- */
- spin_lock_irqsave(&dev->slock, flags);
- if (dev->isoc_ctl.vid_buf == buf)
- dev->isoc_ctl.vid_buf = NULL;
- spin_unlock_irqrestore(&dev->slock, flags);
+ size = (dev->width * dev->height * dev->format->depth + 7) >> 3;
- videobuf_vmalloc_free(&buf->vb);
- buf->vb.state = VIDEOBUF_NEEDS_INIT;
+ if (vb2_plane_size(vb, 0) < size) {
+ em28xx_videodbg("%s data will not fit into plane (%lu < %lu)\n",
+ __func__, vb2_plane_size(vb, 0), size);
+ return -EINVAL;
+ }
+ vb2_set_plane_payload(&buf->vb, 0, size);
+
+ return 0;
}
-static int
-buffer_prepare(struct videobuf_queue *vq, struct videobuf_buffer *vb,
- enum v4l2_field field)
+int em28xx_start_analog_streaming(struct vb2_queue *vq, unsigned int count)
{
- struct em28xx_fh *fh = vq->priv_data;
- struct em28xx_buffer *buf = container_of(vb, struct em28xx_buffer, vb);
- struct em28xx *dev = fh->dev;
- int rc = 0, urb_init = 0;
+ struct em28xx *dev = vb2_get_drv_priv(vq);
+ struct v4l2_frequency f;
+ int rc = 0;
- buf->vb.size = (fh->dev->width * fh->dev->height * dev->format->depth
- + 7) >> 3;
+ em28xx_videodbg("%s\n", __func__);
- if (0 != buf->vb.baddr && buf->vb.bsize < buf->vb.size)
- return -EINVAL;
+ /* Make sure streaming is not already in progress for this type
+ of filehandle (e.g. video, vbi) */
+ rc = res_get(dev, vq->type);
+ if (rc)
+ return rc;
+
+ if (dev->streaming_users++ == 0) {
+ /* First active streaming user, so allocate all the URBs */
- buf->vb.width = dev->width;
- buf->vb.height = dev->height;
- buf->vb.field = field;
+ /* Allocate the USB bandwidth */
+ em28xx_set_alternate(dev);
+
+ /* Needed, since GPIO might have disabled power of
+ some i2c device
+ */
+ em28xx_wake_i2c(dev);
- if (VIDEOBUF_NEEDS_INIT == buf->vb.state) {
- rc = videobuf_iolock(vq, &buf->vb, NULL);
+ dev->capture_type = -1;
+ rc = em28xx_init_usb_xfer(dev, EM28XX_ANALOG_MODE,
+ dev->analog_xfer_bulk,
+ EM28XX_NUM_BUFS,
+ dev->max_pkt_size,
+ dev->packet_multiplier,
+ em28xx_urb_data_copy);
if (rc < 0)
goto fail;
- }
- if (!dev->isoc_ctl.analog_bufs.num_bufs)
- urb_init = 1;
+ /*
+ * djh: it's not clear whether this code is still needed. I'm
+ * leaving it in here for now entirely out of concern for
+ * backward compatibility (the old code did it)
+ */
- if (urb_init) {
- if (em28xx_vbi_supported(dev) == 1)
- rc = em28xx_init_isoc(dev, EM28XX_ANALOG_MODE,
- EM28XX_NUM_PACKETS,
- EM28XX_NUM_BUFS,
- dev->max_pkt_size,
- em28xx_isoc_copy_vbi);
+ /* Ask tuner to go to analog or radio mode */
+ memset(&f, 0, sizeof(f));
+ f.frequency = dev->ctl_freq;
+ if (vq->owner && vq->owner->vdev->vfl_type == VFL_TYPE_RADIO)
+ f.type = V4L2_TUNER_RADIO;
else
- rc = em28xx_init_isoc(dev, EM28XX_ANALOG_MODE,
- EM28XX_NUM_PACKETS,
- EM28XX_NUM_BUFS,
- dev->max_pkt_size,
- em28xx_isoc_copy);
- if (rc < 0)
- goto fail;
+ f.type = V4L2_TUNER_ANALOG_TV;
+ v4l2_device_call_all(&dev->v4l2_dev, 0, tuner, s_frequency, &f);
}
- buf->vb.state = VIDEOBUF_PREPARED;
- return 0;
-
fail:
- free_buffer(vq, buf);
return rc;
}
-static void
-buffer_queue(struct videobuf_queue *vq, struct videobuf_buffer *vb)
+static int em28xx_stop_streaming(struct vb2_queue *vq)
{
- struct em28xx_buffer *buf = container_of(vb,
- struct em28xx_buffer,
- vb);
- struct em28xx_fh *fh = vq->priv_data;
- struct em28xx *dev = fh->dev;
- struct em28xx_dmaqueue *vidq = &dev->vidq;
+ struct em28xx *dev = vb2_get_drv_priv(vq);
+ struct em28xx_dmaqueue *vidq = &dev->vidq;
+ unsigned long flags = 0;
+
+ em28xx_videodbg("%s\n", __func__);
+
+ res_free(dev, vq->type);
+
+ if (dev->streaming_users-- == 1) {
+ /* Last active user, so shutdown all the URBS */
+ em28xx_uninit_usb_xfer(dev, EM28XX_ANALOG_MODE);
+ }
- buf->vb.state = VIDEOBUF_QUEUED;
- list_add_tail(&buf->vb.queue, &vidq->active);
+ spin_lock_irqsave(&dev->slock, flags);
+ while (!list_empty(&vidq->active)) {
+ struct em28xx_buffer *buf;
+ buf = list_entry(vidq->active.next, struct em28xx_buffer, list);
+ list_del(&buf->list);
+ vb2_buffer_done(&buf->vb, VB2_BUF_STATE_ERROR);
+ }
+ dev->usb_ctl.vid_buf = NULL;
+ spin_unlock_irqrestore(&dev->slock, flags);
+ return 0;
}
-static void buffer_release(struct videobuf_queue *vq,
- struct videobuf_buffer *vb)
+int em28xx_stop_vbi_streaming(struct vb2_queue *vq)
{
- struct em28xx_buffer *buf = container_of(vb,
- struct em28xx_buffer,
- vb);
- struct em28xx_fh *fh = vq->priv_data;
- struct em28xx *dev = (struct em28xx *)fh->dev;
+ struct em28xx *dev = vb2_get_drv_priv(vq);
+ struct em28xx_dmaqueue *vbiq = &dev->vbiq;
+ unsigned long flags = 0;
+
+ em28xx_videodbg("%s\n", __func__);
- em28xx_isocdbg("em28xx: called buffer_release\n");
+ res_free(dev, vq->type);
- free_buffer(vq, buf);
+ if (dev->streaming_users-- == 1) {
+ /* Last active user, so shutdown all the URBS */
+ em28xx_uninit_usb_xfer(dev, EM28XX_ANALOG_MODE);
+ }
+
+ spin_lock_irqsave(&dev->slock, flags);
+ while (!list_empty(&vbiq->active)) {
+ struct em28xx_buffer *buf;
+ buf = list_entry(vbiq->active.next, struct em28xx_buffer, list);
+ list_del(&buf->list);
+ vb2_buffer_done(&buf->vb, VB2_BUF_STATE_ERROR);
+ }
+ dev->usb_ctl.vbi_buf = NULL;
+ spin_unlock_irqrestore(&dev->slock, flags);
+
+ return 0;
}
-static struct videobuf_queue_ops em28xx_video_qops = {
- .buf_setup = buffer_setup,
+static void
+buffer_queue(struct vb2_buffer *vb)
+{
+ struct em28xx *dev = vb2_get_drv_priv(vb->vb2_queue);
+ struct em28xx_buffer *buf = container_of(vb, struct em28xx_buffer, vb);
+ struct em28xx_dmaqueue *vidq = &dev->vidq;
+ unsigned long flags = 0;
+
+ em28xx_videodbg("%s\n", __func__);
+ buf->mem = vb2_plane_vaddr(vb, 0);
+ buf->length = vb2_plane_size(vb, 0);
+
+ spin_lock_irqsave(&dev->slock, flags);
+ list_add_tail(&buf->list, &vidq->active);
+ spin_unlock_irqrestore(&dev->slock, flags);
+}
+
+static struct vb2_ops em28xx_video_qops = {
+ .queue_setup = queue_setup,
.buf_prepare = buffer_prepare,
.buf_queue = buffer_queue,
- .buf_release = buffer_release,
+ .start_streaming = em28xx_start_analog_streaming,
+ .stop_streaming = em28xx_stop_streaming,
+ .wait_prepare = vb2_ops_wait_prepare,
+ .wait_finish = vb2_ops_wait_finish,
};
+int em28xx_vb2_setup(struct em28xx *dev)
+{
+ int rc;
+ struct vb2_queue *q;
+
+ /* Setup Videobuf2 for Video capture */
+ q = &dev->vb_vidq;
+ q->type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
+ q->io_modes = VB2_READ | VB2_MMAP | VB2_USERPTR | VB2_DMABUF;
+ q->drv_priv = dev;
+ q->buf_struct_size = sizeof(struct em28xx_buffer);
+ q->ops = &em28xx_video_qops;
+ q->mem_ops = &vb2_vmalloc_memops;
+
+ rc = vb2_queue_init(q);
+ if (rc < 0)
+ return rc;
+
+ /* Setup Videobuf2 for VBI capture */
+ q = &dev->vb_vbiq;
+ q->type = V4L2_BUF_TYPE_VBI_CAPTURE;
+ q->io_modes = VB2_READ | VB2_MMAP | VB2_USERPTR;
+ q->drv_priv = dev;
+ q->buf_struct_size = sizeof(struct em28xx_buffer);
+ q->ops = &em28xx_vbi_qops;
+ q->mem_ops = &vb2_vmalloc_memops;
+
+ rc = vb2_queue_init(q);
+ if (rc < 0)
+ return rc;
+
+ return 0;
+}
+
/********************* v4l2 interface **************************************/
static void video_mux(struct em28xx *dev, int index)
@@ -856,143 +757,54 @@ static void video_mux(struct em28xx *dev, int index)
em28xx_audio_analog_set(dev);
}
-/* Usage lock check functions */
-static int res_get(struct em28xx_fh *fh, unsigned int bit)
-{
- struct em28xx *dev = fh->dev;
-
- if (fh->resources & bit)
- /* have it already allocated */
- return 1;
-
- /* is it free? */
- if (dev->resources & bit) {
- /* no, someone else uses it */
- return 0;
- }
- /* it's free, grab it */
- fh->resources |= bit;
- dev->resources |= bit;
- em28xx_videodbg("res: get %d\n", bit);
- return 1;
-}
-
-static int res_check(struct em28xx_fh *fh, unsigned int bit)
-{
- return fh->resources & bit;
-}
-
-static int res_locked(struct em28xx *dev, unsigned int bit)
-{
- return dev->resources & bit;
-}
-
-static void res_free(struct em28xx_fh *fh, unsigned int bits)
+void em28xx_ctrl_notify(struct v4l2_ctrl *ctrl, void *priv)
{
- struct em28xx *dev = fh->dev;
-
- BUG_ON((fh->resources & bits) != bits);
-
- fh->resources &= ~bits;
- dev->resources &= ~bits;
- em28xx_videodbg("res: put %d\n", bits);
-}
-
-static int get_ressource(struct em28xx_fh *fh)
-{
- switch (fh->type) {
- case V4L2_BUF_TYPE_VIDEO_CAPTURE:
- return EM28XX_RESOURCE_VIDEO;
- case V4L2_BUF_TYPE_VBI_CAPTURE:
- return EM28XX_RESOURCE_VBI;
- default:
- BUG();
- return 0;
- }
-}
-
-/*
- * ac97_queryctrl()
- * return the ac97 supported controls
- */
-static int ac97_queryctrl(struct v4l2_queryctrl *qc)
-{
- int i;
+ struct em28xx *dev = priv;
- for (i = 0; i < ARRAY_SIZE(ac97_qctrl); i++) {
- if (qc->id && qc->id == ac97_qctrl[i].id) {
- memcpy(qc, &(ac97_qctrl[i]), sizeof(*qc));
- return 0;
- }
- }
-
- /* Control is not ac97 related */
- return 1;
-}
-
-/*
- * ac97_get_ctrl()
- * return the current values for ac97 mute and volume
- */
-static int ac97_get_ctrl(struct em28xx *dev, struct v4l2_control *ctrl)
-{
+ /*
+ * In the case of non-AC97 volume controls, we still need
+ * to do some setups at em28xx, in order to mute/unmute
+ * and to adjust audio volume. However, the value ranges
+ * should be checked by the corresponding V4L subdriver.
+ */
switch (ctrl->id) {
case V4L2_CID_AUDIO_MUTE:
- ctrl->value = dev->mute;
- return 0;
+ dev->mute = ctrl->val;
+ em28xx_audio_analog_set(dev);
+ break;
case V4L2_CID_AUDIO_VOLUME:
- ctrl->value = dev->volume;
- return 0;
- default:
- /* Control is not ac97 related */
- return 1;
+ dev->volume = ctrl->val;
+ em28xx_audio_analog_set(dev);
+ break;
}
}
-/*
- * ac97_set_ctrl()
- * set values for ac97 mute and volume
- */
-static int ac97_set_ctrl(struct em28xx *dev, const struct v4l2_control *ctrl)
+static int em28xx_s_ctrl(struct v4l2_ctrl *ctrl)
{
- int i;
-
- for (i = 0; i < ARRAY_SIZE(ac97_qctrl); i++)
- if (ctrl->id == ac97_qctrl[i].id)
- goto handle;
-
- /* Announce that hasn't handle it */
- return 1;
-
-handle:
- if (ctrl->value < ac97_qctrl[i].minimum ||
- ctrl->value > ac97_qctrl[i].maximum)
- return -ERANGE;
+ struct em28xx *dev = container_of(ctrl->handler, struct em28xx, ctrl_handler);
switch (ctrl->id) {
case V4L2_CID_AUDIO_MUTE:
- dev->mute = ctrl->value;
+ dev->mute = ctrl->val;
break;
case V4L2_CID_AUDIO_VOLUME:
- dev->volume = ctrl->value;
+ dev->volume = ctrl->val;
break;
}
return em28xx_audio_analog_set(dev);
}
+const struct v4l2_ctrl_ops em28xx_ctrl_ops = {
+ .s_ctrl = em28xx_s_ctrl,
+};
+
static int check_dev(struct em28xx *dev)
{
- if (dev->state & DEV_DISCONNECTED) {
+ if (dev->disconnected) {
em28xx_errdev("v4l2 ioctl: device not present\n");
return -ENODEV;
}
-
- if (dev->state & DEV_MISCONFIGURED) {
- em28xx_errdev("v4l2 ioctl: device is misconfigured; "
- "close and open it again\n");
- return -EIO;
- }
return 0;
}
@@ -1072,8 +884,11 @@ static int vidioc_try_fmt_vid_cap(struct file *file, void *priv,
/* the em2800 can only scale down to 50% */
height = height > (3 * maxh / 4) ? maxh : maxh / 2;
width = width > (3 * maxw / 4) ? maxw : maxw / 2;
- /* MaxPacketSize for em2800 is too small to capture at full resolution
- * use half of maxw as the scaler can only scale to 50% */
+ /*
+ * MaxPacketSize for em2800 is too small to capture at full
+ * resolution use half of maxw as the scaler can only scale
+ * to 50%
+ */
if (width == maxw && height == maxh)
width /= 2;
} else {
@@ -1091,7 +906,7 @@ static int vidioc_try_fmt_vid_cap(struct file *file, void *priv,
f->fmt.pix.width = width;
f->fmt.pix.height = height;
f->fmt.pix.pixelformat = fmt->fourcc;
- f->fmt.pix.bytesperline = (dev->width * fmt->depth + 7) >> 3;
+ f->fmt.pix.bytesperline = (width * fmt->depth + 7) >> 3;
f->fmt.pix.sizeimage = f->fmt.pix.bytesperline * height;
f->fmt.pix.colorspace = V4L2_COLORSPACE_SMPTE170M;
if (dev->progressive)
@@ -1119,7 +934,6 @@ static int em28xx_set_video_format(struct em28xx *dev, unsigned int fourcc,
/* set new image size */
get_scale(dev, dev->width, dev->height, &dev->hscale, &dev->vscale);
- em28xx_set_alternate(dev);
em28xx_resolution_set(dev);
return 0;
@@ -1128,21 +942,13 @@ static int em28xx_set_video_format(struct em28xx *dev, unsigned int fourcc,
static int vidioc_s_fmt_vid_cap(struct file *file, void *priv,
struct v4l2_format *f)
{
- struct em28xx_fh *fh = priv;
- struct em28xx *dev = fh->dev;
- int rc;
+ struct em28xx *dev = video_drvdata(file);
- rc = check_dev(dev);
- if (rc < 0)
- return rc;
+ if (dev->streaming_users > 0)
+ return -EBUSY;
vidioc_try_fmt_vid_cap(file, priv, f);
- if (videobuf_queue_is_busy(&fh->vb_vidq)) {
- em28xx_errdev("%s queue busy\n", __func__);
- return -EBUSY;
- }
-
return em28xx_set_video_format(dev, f->fmt.pix.pixelformat,
f->fmt.pix.width, f->fmt.pix.height);
}
@@ -1153,6 +959,8 @@ static int vidioc_g_std(struct file *file, void *priv, v4l2_std_id *norm)
struct em28xx *dev = fh->dev;
int rc;
+ if (dev->board.is_webcam)
+ return -ENOTTY;
rc = check_dev(dev);
if (rc < 0)
return rc;
@@ -1168,6 +976,8 @@ static int vidioc_querystd(struct file *file, void *priv, v4l2_std_id *norm)
struct em28xx *dev = fh->dev;
int rc;
+ if (dev->board.is_webcam)
+ return -ENOTTY;
rc = check_dev(dev);
if (rc < 0)
return rc;
@@ -1184,15 +994,22 @@ static int vidioc_s_std(struct file *file, void *priv, v4l2_std_id *norm)
struct v4l2_format f;
int rc;
+ if (dev->board.is_webcam)
+ return -ENOTTY;
+ if (*norm == dev->norm)
+ return 0;
rc = check_dev(dev);
if (rc < 0)
return rc;
+ if (dev->streaming_users > 0)
+ return -EBUSY;
+
dev->norm = *norm;
/* Adjusts width/height, if needed */
- f.fmt.pix.width = dev->width;
- f.fmt.pix.height = dev->height;
+ f.fmt.pix.width = 720;
+ f.fmt.pix.height = (*norm & V4L2_STD_525_60) ? 480 : 576;
vidioc_try_fmt_vid_cap(file, priv, &f);
/* set new image size */
@@ -1216,6 +1033,7 @@ static int vidioc_g_parm(struct file *file, void *priv,
if (p->type != V4L2_BUF_TYPE_VIDEO_CAPTURE)
return -EINVAL;
+ p->parm.capture.readbuffers = EM28XX_MIN_BUF;
if (dev->board.is_webcam)
rc = v4l2_device_call_until_err(&dev->v4l2_dev, 0,
video, g_parm, p);
@@ -1233,11 +1051,12 @@ static int vidioc_s_parm(struct file *file, void *priv,
struct em28xx *dev = fh->dev;
if (!dev->board.is_webcam)
- return -EINVAL;
+ return -ENOTTY;
if (p->type != V4L2_BUF_TYPE_VIDEO_CAPTURE)
return -EINVAL;
+ p->parm.capture.readbuffers = EM28XX_MIN_BUF;
return v4l2_device_call_until_err(&dev->v4l2_dev, 0, video, s_parm, p);
}
@@ -1276,6 +1095,9 @@ static int vidioc_enum_input(struct file *file, void *priv,
i->type = V4L2_INPUT_TYPE_TUNER;
i->std = dev->vdev->tvnorms;
+ /* webcams do not have the STD API */
+ if (dev->board.is_webcam)
+ i->capabilities = 0;
return 0;
}
@@ -1375,131 +1197,6 @@ static int vidioc_s_audio(struct file *file, void *priv, const struct v4l2_audio
return 0;
}
-static int vidioc_queryctrl(struct file *file, void *priv,
- struct v4l2_queryctrl *qc)
-{
- struct em28xx_fh *fh = priv;
- struct em28xx *dev = fh->dev;
- int id = qc->id;
- int rc;
-
- rc = check_dev(dev);
- if (rc < 0)
- return rc;
-
- memset(qc, 0, sizeof(*qc));
-
- qc->id = id;
-
- /* enumerate AC97 controls */
- if (dev->audio_mode.ac97 != EM28XX_NO_AC97) {
- rc = ac97_queryctrl(qc);
- if (!rc)
- return 0;
- }
-
- /* enumerate V4L2 device controls */
- v4l2_device_call_all(&dev->v4l2_dev, 0, core, queryctrl, qc);
-
- if (qc->type)
- return 0;
- else
- return -EINVAL;
-}
-
-/*
- * FIXME: This is an indirect way to check if a control exists at a
- * subdev. Instead of that hack, maybe the better would be to change all
- * subdevs to return -ENOIOCTLCMD, if an ioctl is not supported.
- */
-static int check_subdev_ctrl(struct em28xx *dev, int id)
-{
- struct v4l2_queryctrl qc;
-
- memset(&qc, 0, sizeof(qc));
- qc.id = id;
-
- /* enumerate V4L2 device controls */
- v4l2_device_call_all(&dev->v4l2_dev, 0, core, queryctrl, &qc);
-
- if (qc.type)
- return 0;
- else
- return -EINVAL;
-}
-
-static int vidioc_g_ctrl(struct file *file, void *priv,
- struct v4l2_control *ctrl)
-{
- struct em28xx_fh *fh = priv;
- struct em28xx *dev = fh->dev;
- int rc;
-
- rc = check_dev(dev);
- if (rc < 0)
- return rc;
- rc = 0;
-
- /* Set an AC97 control */
- if (dev->audio_mode.ac97 != EM28XX_NO_AC97)
- rc = ac97_get_ctrl(dev, ctrl);
- else
- rc = 1;
-
- /* It were not an AC97 control. Sends it to the v4l2 dev interface */
- if (rc == 1) {
- if (check_subdev_ctrl(dev, ctrl->id))
- return -EINVAL;
-
- v4l2_device_call_all(&dev->v4l2_dev, 0, core, g_ctrl, ctrl);
- rc = 0;
- }
-
- return rc;
-}
-
-static int vidioc_s_ctrl(struct file *file, void *priv,
- struct v4l2_control *ctrl)
-{
- struct em28xx_fh *fh = priv;
- struct em28xx *dev = fh->dev;
- int rc;
-
- rc = check_dev(dev);
- if (rc < 0)
- return rc;
-
- /* Set an AC97 control */
- if (dev->audio_mode.ac97 != EM28XX_NO_AC97)
- rc = ac97_set_ctrl(dev, ctrl);
- else
- rc = 1;
-
- /* It isn't an AC97 control. Sends it to the v4l2 dev interface */
- if (rc == 1) {
- rc = check_subdev_ctrl(dev, ctrl->id);
- if (!rc)
- v4l2_device_call_all(&dev->v4l2_dev, 0,
- core, s_ctrl, ctrl);
- /*
- * In the case of non-AC97 volume controls, we still need
- * to do some setups at em28xx, in order to mute/unmute
- * and to adjust audio volume. However, the value ranges
- * should be checked by the corresponding V4L subdriver.
- */
- switch (ctrl->id) {
- case V4L2_CID_AUDIO_MUTE:
- dev->mute = ctrl->value;
- rc = em28xx_audio_analog_set(dev);
- break;
- case V4L2_CID_AUDIO_VOLUME:
- dev->volume = ctrl->value;
- rc = em28xx_audio_analog_set(dev);
- }
- }
- return (rc < 0) ? rc : 0;
-}
-
static int vidioc_g_tuner(struct file *file, void *priv,
struct v4l2_tuner *t)
{
@@ -1515,7 +1212,6 @@ static int vidioc_g_tuner(struct file *file, void *priv,
return -EINVAL;
strcpy(t->name, "Tuner");
- t->type = V4L2_TUNER_ANALOG_TV;
v4l2_device_call_all(&dev->v4l2_dev, 0, tuner, g_tuner, t);
return 0;
@@ -1545,7 +1241,9 @@ static int vidioc_g_frequency(struct file *file, void *priv,
struct em28xx_fh *fh = priv;
struct em28xx *dev = fh->dev;
- f->type = fh->radio ? V4L2_TUNER_RADIO : V4L2_TUNER_ANALOG_TV;
+ if (0 != f->tuner)
+ return -EINVAL;
+
f->frequency = dev->ctl_freq;
return 0;
}
@@ -1564,13 +1262,9 @@ static int vidioc_s_frequency(struct file *file, void *priv,
if (0 != f->tuner)
return -EINVAL;
- if (unlikely(0 == fh->radio && f->type != V4L2_TUNER_ANALOG_TV))
- return -EINVAL;
- if (unlikely(1 == fh->radio && f->type != V4L2_TUNER_RADIO))
- return -EINVAL;
-
- dev->ctl_freq = f->frequency;
v4l2_device_call_all(&dev->v4l2_dev, 0, tuner, s_frequency, f);
+ v4l2_device_call_all(&dev->v4l2_dev, 0, tuner, g_frequency, f);
+ dev->ctl_freq = f->frequency;
return 0;
}
@@ -1596,6 +1290,14 @@ static int vidioc_g_chip_ident(struct file *file, void *priv,
chip->ident = V4L2_IDENT_NONE;
chip->revision = 0;
+ if (chip->match.type == V4L2_CHIP_MATCH_HOST) {
+ if (v4l2_chip_match_host(&chip->match))
+ chip->ident = V4L2_IDENT_NONE;
+ return 0;
+ }
+ if (chip->match.type != V4L2_CHIP_MATCH_I2C_DRIVER &&
+ chip->match.type != V4L2_CHIP_MATCH_I2C_ADDR)
+ return -EINVAL;
v4l2_device_call_all(&dev->v4l2_dev, 0, core, g_chip_ident, chip);
@@ -1704,72 +1406,10 @@ static int vidioc_cropcap(struct file *file, void *priv,
return 0;
}
-static int vidioc_streamon(struct file *file, void *priv,
- enum v4l2_buf_type type)
-{
- struct em28xx_fh *fh = priv;
- struct em28xx *dev = fh->dev;
- int rc = -EINVAL;
-
- rc = check_dev(dev);
- if (rc < 0)
- return rc;
-
- if (unlikely(type != fh->type))
- return -EINVAL;
-
- em28xx_videodbg("vidioc_streamon fh=%p t=%d fh->res=%d dev->res=%d\n",
- fh, type, fh->resources, dev->resources);
-
- if (unlikely(!res_get(fh, get_ressource(fh))))
- return -EBUSY;
-
- if (fh->type == V4L2_BUF_TYPE_VIDEO_CAPTURE)
- rc = videobuf_streamon(&fh->vb_vidq);
- else if (fh->type == V4L2_BUF_TYPE_VBI_CAPTURE)
- rc = videobuf_streamon(&fh->vb_vbiq);
-
- return rc;
-}
-
-static int vidioc_streamoff(struct file *file, void *priv,
- enum v4l2_buf_type type)
-{
- struct em28xx_fh *fh = priv;
- struct em28xx *dev = fh->dev;
- int rc;
-
- rc = check_dev(dev);
- if (rc < 0)
- return rc;
-
- if (fh->type != V4L2_BUF_TYPE_VIDEO_CAPTURE &&
- fh->type != V4L2_BUF_TYPE_VBI_CAPTURE)
- return -EINVAL;
- if (type != fh->type)
- return -EINVAL;
-
- em28xx_videodbg("vidioc_streamoff fh=%p t=%d fh->res=%d dev->res=%d\n",
- fh, type, fh->resources, dev->resources);
-
- if (fh->type == V4L2_BUF_TYPE_VIDEO_CAPTURE) {
- if (res_check(fh, EM28XX_RESOURCE_VIDEO)) {
- videobuf_streamoff(&fh->vb_vidq);
- res_free(fh, EM28XX_RESOURCE_VIDEO);
- }
- } else if (fh->type == V4L2_BUF_TYPE_VBI_CAPTURE) {
- if (res_check(fh, EM28XX_RESOURCE_VBI)) {
- videobuf_streamoff(&fh->vb_vbiq);
- res_free(fh, EM28XX_RESOURCE_VBI);
- }
- }
-
- return 0;
-}
-
static int vidioc_querycap(struct file *file, void *priv,
struct v4l2_capability *cap)
{
+ struct video_device *vdev = video_devdata(file);
struct em28xx_fh *fh = priv;
struct em28xx *dev = fh->dev;
@@ -1777,20 +1417,26 @@ static int vidioc_querycap(struct file *file, void *priv,
strlcpy(cap->card, em28xx_boards[dev->model].name, sizeof(cap->card));
usb_make_path(dev->udev, cap->bus_info, sizeof(cap->bus_info));
- cap->capabilities =
- V4L2_CAP_SLICED_VBI_CAPTURE |
- V4L2_CAP_VIDEO_CAPTURE |
- V4L2_CAP_READWRITE | V4L2_CAP_STREAMING;
-
- if (dev->vbi_dev)
- cap->capabilities |= V4L2_CAP_VBI_CAPTURE;
+ if (vdev->vfl_type == VFL_TYPE_GRABBER)
+ cap->device_caps = V4L2_CAP_READWRITE |
+ V4L2_CAP_VIDEO_CAPTURE | V4L2_CAP_STREAMING;
+ else if (vdev->vfl_type == VFL_TYPE_RADIO)
+ cap->device_caps = V4L2_CAP_RADIO;
+ else
+ cap->device_caps = V4L2_CAP_READWRITE | V4L2_CAP_VBI_CAPTURE;
if (dev->audio_mode.has_audio)
- cap->capabilities |= V4L2_CAP_AUDIO;
+ cap->device_caps |= V4L2_CAP_AUDIO;
if (dev->tuner_type != TUNER_ABSENT)
- cap->capabilities |= V4L2_CAP_TUNER;
+ cap->device_caps |= V4L2_CAP_TUNER;
+ cap->capabilities = cap->device_caps | V4L2_CAP_DEVICE_CAPS |
+ V4L2_CAP_READWRITE | V4L2_CAP_VIDEO_CAPTURE | V4L2_CAP_STREAMING;
+ if (dev->vbi_dev)
+ cap->capabilities |= V4L2_CAP_VBI_CAPTURE;
+ if (dev->radio_dev)
+ cap->capabilities |= V4L2_CAP_RADIO;
return 0;
}
@@ -1845,46 +1491,6 @@ static int vidioc_enum_framesizes(struct file *file, void *priv,
return 0;
}
-/* Sliced VBI ioctls */
-static int vidioc_g_fmt_sliced_vbi_cap(struct file *file, void *priv,
- struct v4l2_format *f)
-{
- struct em28xx_fh *fh = priv;
- struct em28xx *dev = fh->dev;
- int rc;
-
- rc = check_dev(dev);
- if (rc < 0)
- return rc;
-
- f->fmt.sliced.service_set = 0;
- v4l2_device_call_all(&dev->v4l2_dev, 0, vbi, g_sliced_fmt, &f->fmt.sliced);
-
- if (f->fmt.sliced.service_set == 0)
- rc = -EINVAL;
-
- return rc;
-}
-
-static int vidioc_try_set_sliced_vbi_cap(struct file *file, void *priv,
- struct v4l2_format *f)
-{
- struct em28xx_fh *fh = priv;
- struct em28xx *dev = fh->dev;
- int rc;
-
- rc = check_dev(dev);
- if (rc < 0)
- return rc;
-
- v4l2_device_call_all(&dev->v4l2_dev, 0, vbi, g_sliced_fmt, &f->fmt.sliced);
-
- if (f->fmt.sliced.service_set == 0)
- return -EINVAL;
-
- return 0;
-}
-
/* RAW VBI ioctls */
static int vidioc_g_fmt_vbi_cap(struct file *file, void *priv,
@@ -1900,6 +1506,7 @@ static int vidioc_g_fmt_vbi_cap(struct file *file, void *priv,
format->fmt.vbi.sampling_rate = 6750000 * 4 / 2;
format->fmt.vbi.count[0] = dev->vbi_height;
format->fmt.vbi.count[1] = dev->vbi_height;
+ memset(format->fmt.vbi.reserved, 0, sizeof(format->fmt.vbi.reserved));
/* Varies by video standard (NTSC, PAL, etc.) */
if (dev->norm & V4L2_STD_525_60) {
@@ -1928,6 +1535,7 @@ static int vidioc_s_fmt_vbi_cap(struct file *file, void *priv,
format->fmt.vbi.sampling_rate = 6750000 * 4 / 2;
format->fmt.vbi.count[0] = dev->vbi_height;
format->fmt.vbi.count[1] = dev->vbi_height;
+ memset(format->fmt.vbi.reserved, 0, sizeof(format->fmt.vbi.reserved));
/* Varies by video standard (NTSC, PAL, etc.) */
if (dev->norm & V4L2_STD_525_60) {
@@ -1943,100 +1551,10 @@ static int vidioc_s_fmt_vbi_cap(struct file *file, void *priv,
return 0;
}
-static int vidioc_reqbufs(struct file *file, void *priv,
- struct v4l2_requestbuffers *rb)
-{
- struct em28xx_fh *fh = priv;
- struct em28xx *dev = fh->dev;
- int rc;
-
- rc = check_dev(dev);
- if (rc < 0)
- return rc;
-
- if (fh->type == V4L2_BUF_TYPE_VIDEO_CAPTURE)
- return videobuf_reqbufs(&fh->vb_vidq, rb);
- else
- return videobuf_reqbufs(&fh->vb_vbiq, rb);
-}
-
-static int vidioc_querybuf(struct file *file, void *priv,
- struct v4l2_buffer *b)
-{
- struct em28xx_fh *fh = priv;
- struct em28xx *dev = fh->dev;
- int rc;
-
- rc = check_dev(dev);
- if (rc < 0)
- return rc;
-
- if (fh->type == V4L2_BUF_TYPE_VIDEO_CAPTURE)
- return videobuf_querybuf(&fh->vb_vidq, b);
- else {
- /* FIXME: I'm not sure yet whether this is a bug in zvbi or
- the videobuf framework, but we probably shouldn't be
- returning a buffer larger than that which was asked for.
- At a minimum, it causes a crash in zvbi since it does
- a memcpy based on the source buffer length */
- int result = videobuf_querybuf(&fh->vb_vbiq, b);
- b->length = dev->vbi_width * dev->vbi_height * 2;
-
- return result;
- }
-}
-
-static int vidioc_qbuf(struct file *file, void *priv, struct v4l2_buffer *b)
-{
- struct em28xx_fh *fh = priv;
- struct em28xx *dev = fh->dev;
- int rc;
-
- rc = check_dev(dev);
- if (rc < 0)
- return rc;
-
- if (fh->type == V4L2_BUF_TYPE_VIDEO_CAPTURE)
- return videobuf_qbuf(&fh->vb_vidq, b);
- else
- return videobuf_qbuf(&fh->vb_vbiq, b);
-}
-
-static int vidioc_dqbuf(struct file *file, void *priv, struct v4l2_buffer *b)
-{
- struct em28xx_fh *fh = priv;
- struct em28xx *dev = fh->dev;
- int rc;
-
- rc = check_dev(dev);
- if (rc < 0)
- return rc;
-
- if (fh->type == V4L2_BUF_TYPE_VIDEO_CAPTURE)
- return videobuf_dqbuf(&fh->vb_vidq, b, file->f_flags &
- O_NONBLOCK);
- else
- return videobuf_dqbuf(&fh->vb_vbiq, b, file->f_flags &
- O_NONBLOCK);
-}
-
/* ----------------------------------------------------------- */
/* RADIO ESPECIFIC IOCTLS */
/* ----------------------------------------------------------- */
-static int radio_querycap(struct file *file, void *priv,
- struct v4l2_capability *cap)
-{
- struct em28xx *dev = ((struct em28xx_fh *)priv)->dev;
-
- strlcpy(cap->driver, "em28xx", sizeof(cap->driver));
- strlcpy(cap->card, em28xx_boards[dev->model].name, sizeof(cap->card));
- usb_make_path(dev->udev, cap->bus_info, sizeof(cap->bus_info));
-
- cap->capabilities = V4L2_CAP_TUNER;
- return 0;
-}
-
static int radio_g_tuner(struct file *file, void *priv,
struct v4l2_tuner *t)
{
@@ -2053,26 +1571,6 @@ static int radio_g_tuner(struct file *file, void *priv,
return 0;
}
-static int radio_enum_input(struct file *file, void *priv,
- struct v4l2_input *i)
-{
- if (i->index != 0)
- return -EINVAL;
- strcpy(i->name, "Radio");
- i->type = V4L2_INPUT_TYPE_TUNER;
-
- return 0;
-}
-
-static int radio_g_audio(struct file *file, void *priv, struct v4l2_audio *a)
-{
- if (unlikely(a->index))
- return -EINVAL;
-
- strcpy(a->name, "Radio");
- return 0;
-}
-
static int radio_s_tuner(struct file *file, void *priv,
struct v4l2_tuner *t)
{
@@ -2086,48 +1584,16 @@ static int radio_s_tuner(struct file *file, void *priv,
return 0;
}
-static int radio_s_audio(struct file *file, void *fh,
- const struct v4l2_audio *a)
-{
- return 0;
-}
-
-static int radio_s_input(struct file *file, void *fh, unsigned int i)
-{
- return 0;
-}
-
-static int radio_queryctrl(struct file *file, void *priv,
- struct v4l2_queryctrl *qc)
-{
- int i;
-
- if (qc->id < V4L2_CID_BASE ||
- qc->id >= V4L2_CID_LASTP1)
- return -EINVAL;
-
- for (i = 0; i < ARRAY_SIZE(ac97_qctrl); i++) {
- if (qc->id && qc->id == ac97_qctrl[i].id) {
- memcpy(qc, &(ac97_qctrl[i]), sizeof(*qc));
- return 0;
- }
- }
-
- return -EINVAL;
-}
-
/*
* em28xx_v4l2_open()
* inits the device and starts isoc transfer
*/
static int em28xx_v4l2_open(struct file *filp)
{
- int errCode = 0, radio = 0;
struct video_device *vdev = video_devdata(filp);
struct em28xx *dev = video_drvdata(filp);
enum v4l2_buf_type fh_type = 0;
struct em28xx_fh *fh;
- enum v4l2_field field;
switch (vdev->vfl_type) {
case VFL_TYPE_GRABBER:
@@ -2136,9 +1602,6 @@ static int em28xx_v4l2_open(struct file *filp)
case VFL_TYPE_VBI:
fh_type = V4L2_BUF_TYPE_VBI_CAPTURE;
break;
- case VFL_TYPE_RADIO:
- radio = 1;
- break;
}
em28xx_videodbg("open dev=%s type=%s users=%d\n",
@@ -2154,14 +1617,13 @@ static int em28xx_v4l2_open(struct file *filp)
mutex_unlock(&dev->lock);
return -ENOMEM;
}
+ v4l2_fh_init(&fh->fh, vdev);
fh->dev = dev;
- fh->radio = radio;
fh->type = fh_type;
filp->private_data = fh;
if (fh->type == V4L2_BUF_TYPE_VIDEO_CAPTURE && dev->users == 0) {
em28xx_set_mode(dev, EM28XX_ANALOG_MODE);
- em28xx_set_alternate(dev);
em28xx_resolution_set(dev);
/* Needed, since GPIO might have disabled power of
@@ -2170,31 +1632,18 @@ static int em28xx_v4l2_open(struct file *filp)
em28xx_wake_i2c(dev);
}
- if (fh->radio) {
+
+ if (vdev->vfl_type == VFL_TYPE_RADIO) {
em28xx_videodbg("video_open: setting radio device\n");
v4l2_device_call_all(&dev->v4l2_dev, 0, tuner, s_radio);
}
dev->users++;
- if (dev->progressive)
- field = V4L2_FIELD_NONE;
- else
- field = V4L2_FIELD_INTERLACED;
-
- videobuf_queue_vmalloc_init(&fh->vb_vidq, &em28xx_video_qops,
- NULL, &dev->slock,
- V4L2_BUF_TYPE_VIDEO_CAPTURE, field,
- sizeof(struct em28xx_buffer), fh, &dev->lock);
-
- videobuf_queue_vmalloc_init(&fh->vb_vbiq, &em28xx_vbi_qops,
- NULL, &dev->slock,
- V4L2_BUF_TYPE_VBI_CAPTURE,
- V4L2_FIELD_SEQ_TB,
- sizeof(struct em28xx_buffer), fh, &dev->lock);
mutex_unlock(&dev->lock);
+ v4l2_fh_add(&fh->fh);
- return errCode;
+ return 0;
}
/*
@@ -2248,25 +1697,16 @@ static int em28xx_v4l2_close(struct file *filp)
em28xx_videodbg("users=%d\n", dev->users);
mutex_lock(&dev->lock);
- if (res_check(fh, EM28XX_RESOURCE_VIDEO)) {
- videobuf_stop(&fh->vb_vidq);
- res_free(fh, EM28XX_RESOURCE_VIDEO);
- }
-
- if (res_check(fh, EM28XX_RESOURCE_VBI)) {
- videobuf_stop(&fh->vb_vbiq);
- res_free(fh, EM28XX_RESOURCE_VBI);
- }
+ vb2_fop_release(filp);
if (dev->users == 1) {
/* the device is already disconnect,
free the remaining resources */
- if (dev->state & DEV_DISCONNECTED) {
+ if (dev->disconnected) {
em28xx_release_resources(dev);
- kfree(dev->alt_max_pkt_size);
+ kfree(dev->alt_max_pkt_size_isoc);
mutex_unlock(&dev->lock);
kfree(dev);
- kfree(fh);
return 0;
}
@@ -2274,7 +1714,6 @@ static int em28xx_v4l2_close(struct file *filp)
v4l2_device_call_all(&dev->v4l2_dev, 0, core, s_power, 0);
/* do this before setting alternate! */
- em28xx_uninit_isoc(dev, EM28XX_ANALOG_MODE);
em28xx_set_mode(dev, EM28XX_SUSPEND);
/* set alternate 0 */
@@ -2287,129 +1726,18 @@ static int em28xx_v4l2_close(struct file *filp)
}
}
- videobuf_mmap_free(&fh->vb_vidq);
- videobuf_mmap_free(&fh->vb_vbiq);
- kfree(fh);
dev->users--;
mutex_unlock(&dev->lock);
return 0;
}
-/*
- * em28xx_v4l2_read()
- * will allocate buffers when called for the first time
- */
-static ssize_t
-em28xx_v4l2_read(struct file *filp, char __user *buf, size_t count,
- loff_t *pos)
-{
- struct em28xx_fh *fh = filp->private_data;
- struct em28xx *dev = fh->dev;
- int rc;
-
- rc = check_dev(dev);
- if (rc < 0)
- return rc;
-
- if (mutex_lock_interruptible(&dev->lock))
- return -ERESTARTSYS;
- /* FIXME: read() is not prepared to allow changing the video
- resolution while streaming. Seems a bug at em28xx_set_fmt
- */
-
- if (fh->type == V4L2_BUF_TYPE_VIDEO_CAPTURE) {
- if (res_locked(dev, EM28XX_RESOURCE_VIDEO))
- rc = -EBUSY;
- else
- rc = videobuf_read_stream(&fh->vb_vidq, buf, count, pos, 0,
- filp->f_flags & O_NONBLOCK);
- } else if (fh->type == V4L2_BUF_TYPE_VBI_CAPTURE) {
- if (!res_get(fh, EM28XX_RESOURCE_VBI))
- rc = -EBUSY;
- else
- rc = videobuf_read_stream(&fh->vb_vbiq, buf, count, pos, 0,
- filp->f_flags & O_NONBLOCK);
- }
- mutex_unlock(&dev->lock);
-
- return rc;
-}
-
-/*
- * em28xx_poll()
- * will allocate buffers when called for the first time
- */
-static unsigned int em28xx_poll(struct file *filp, poll_table *wait)
-{
- struct em28xx_fh *fh = filp->private_data;
- struct em28xx *dev = fh->dev;
- int rc;
-
- rc = check_dev(dev);
- if (rc < 0)
- return rc;
-
- if (fh->type == V4L2_BUF_TYPE_VIDEO_CAPTURE) {
- if (!res_get(fh, EM28XX_RESOURCE_VIDEO))
- return POLLERR;
- return videobuf_poll_stream(filp, &fh->vb_vidq, wait);
- } else if (fh->type == V4L2_BUF_TYPE_VBI_CAPTURE) {
- if (!res_get(fh, EM28XX_RESOURCE_VBI))
- return POLLERR;
- return videobuf_poll_stream(filp, &fh->vb_vbiq, wait);
- } else {
- return POLLERR;
- }
-}
-
-static unsigned int em28xx_v4l2_poll(struct file *filp, poll_table *wait)
-{
- struct em28xx_fh *fh = filp->private_data;
- struct em28xx *dev = fh->dev;
- unsigned int res;
-
- mutex_lock(&dev->lock);
- res = em28xx_poll(filp, wait);
- mutex_unlock(&dev->lock);
- return res;
-}
-
-/*
- * em28xx_v4l2_mmap()
- */
-static int em28xx_v4l2_mmap(struct file *filp, struct vm_area_struct *vma)
-{
- struct em28xx_fh *fh = filp->private_data;
- struct em28xx *dev = fh->dev;
- int rc;
-
- rc = check_dev(dev);
- if (rc < 0)
- return rc;
-
- if (mutex_lock_interruptible(&dev->lock))
- return -ERESTARTSYS;
- if (fh->type == V4L2_BUF_TYPE_VIDEO_CAPTURE)
- rc = videobuf_mmap_mapper(&fh->vb_vidq, vma);
- else if (fh->type == V4L2_BUF_TYPE_VBI_CAPTURE)
- rc = videobuf_mmap_mapper(&fh->vb_vbiq, vma);
- mutex_unlock(&dev->lock);
-
- em28xx_videodbg("vma start=0x%08lx, size=%ld, ret=%d\n",
- (unsigned long)vma->vm_start,
- (unsigned long)vma->vm_end-(unsigned long)vma->vm_start,
- rc);
-
- return rc;
-}
-
static const struct v4l2_file_operations em28xx_v4l_fops = {
.owner = THIS_MODULE,
.open = em28xx_v4l2_open,
.release = em28xx_v4l2_close,
- .read = em28xx_v4l2_read,
- .poll = em28xx_v4l2_poll,
- .mmap = em28xx_v4l2_mmap,
+ .read = vb2_fop_read,
+ .poll = vb2_fop_poll,
+ .mmap = vb2_fop_mmap,
.unlocked_ioctl = video_ioctl2,
};
@@ -2420,19 +1748,20 @@ static const struct v4l2_ioctl_ops video_ioctl_ops = {
.vidioc_try_fmt_vid_cap = vidioc_try_fmt_vid_cap,
.vidioc_s_fmt_vid_cap = vidioc_s_fmt_vid_cap,
.vidioc_g_fmt_vbi_cap = vidioc_g_fmt_vbi_cap,
+ .vidioc_try_fmt_vbi_cap = vidioc_g_fmt_vbi_cap,
.vidioc_s_fmt_vbi_cap = vidioc_s_fmt_vbi_cap,
.vidioc_enum_framesizes = vidioc_enum_framesizes,
.vidioc_g_audio = vidioc_g_audio,
.vidioc_s_audio = vidioc_s_audio,
.vidioc_cropcap = vidioc_cropcap,
- .vidioc_g_fmt_sliced_vbi_cap = vidioc_g_fmt_sliced_vbi_cap,
- .vidioc_try_fmt_sliced_vbi_cap = vidioc_try_set_sliced_vbi_cap,
- .vidioc_s_fmt_sliced_vbi_cap = vidioc_try_set_sliced_vbi_cap,
-
- .vidioc_reqbufs = vidioc_reqbufs,
- .vidioc_querybuf = vidioc_querybuf,
- .vidioc_qbuf = vidioc_qbuf,
- .vidioc_dqbuf = vidioc_dqbuf,
+
+ .vidioc_reqbufs = vb2_ioctl_reqbufs,
+ .vidioc_create_bufs = vb2_ioctl_create_bufs,
+ .vidioc_prepare_buf = vb2_ioctl_prepare_buf,
+ .vidioc_querybuf = vb2_ioctl_querybuf,
+ .vidioc_qbuf = vb2_ioctl_qbuf,
+ .vidioc_dqbuf = vb2_ioctl_dqbuf,
+
.vidioc_g_std = vidioc_g_std,
.vidioc_querystd = vidioc_querystd,
.vidioc_s_std = vidioc_s_std,
@@ -2441,15 +1770,14 @@ static const struct v4l2_ioctl_ops video_ioctl_ops = {
.vidioc_enum_input = vidioc_enum_input,
.vidioc_g_input = vidioc_g_input,
.vidioc_s_input = vidioc_s_input,
- .vidioc_queryctrl = vidioc_queryctrl,
- .vidioc_g_ctrl = vidioc_g_ctrl,
- .vidioc_s_ctrl = vidioc_s_ctrl,
- .vidioc_streamon = vidioc_streamon,
- .vidioc_streamoff = vidioc_streamoff,
+ .vidioc_streamon = vb2_ioctl_streamon,
+ .vidioc_streamoff = vb2_ioctl_streamoff,
.vidioc_g_tuner = vidioc_g_tuner,
.vidioc_s_tuner = vidioc_s_tuner,
.vidioc_g_frequency = vidioc_g_frequency,
.vidioc_s_frequency = vidioc_s_frequency,
+ .vidioc_subscribe_event = v4l2_ctrl_subscribe_event,
+ .vidioc_unsubscribe_event = v4l2_event_unsubscribe,
#ifdef CONFIG_VIDEO_ADV_DEBUG
.vidioc_g_register = vidioc_g_register,
.vidioc_s_register = vidioc_s_register,
@@ -2459,11 +1787,10 @@ static const struct v4l2_ioctl_ops video_ioctl_ops = {
static const struct video_device em28xx_video_template = {
.fops = &em28xx_v4l_fops,
- .release = video_device_release,
+ .release = video_device_release_empty,
.ioctl_ops = &video_ioctl_ops,
.tvnorms = V4L2_STD_ALL,
- .current_norm = V4L2_STD_PAL,
};
static const struct v4l2_file_operations radio_fops = {
@@ -2474,18 +1801,13 @@ static const struct v4l2_file_operations radio_fops = {
};
static const struct v4l2_ioctl_ops radio_ioctl_ops = {
- .vidioc_querycap = radio_querycap,
+ .vidioc_querycap = vidioc_querycap,
.vidioc_g_tuner = radio_g_tuner,
- .vidioc_enum_input = radio_enum_input,
- .vidioc_g_audio = radio_g_audio,
.vidioc_s_tuner = radio_s_tuner,
- .vidioc_s_audio = radio_s_audio,
- .vidioc_s_input = radio_s_input,
- .vidioc_queryctrl = radio_queryctrl,
- .vidioc_g_ctrl = vidioc_g_ctrl,
- .vidioc_s_ctrl = vidioc_s_ctrl,
.vidioc_g_frequency = vidioc_g_frequency,
.vidioc_s_frequency = vidioc_s_frequency,
+ .vidioc_subscribe_event = v4l2_ctrl_subscribe_event,
+ .vidioc_unsubscribe_event = v4l2_event_unsubscribe,
#ifdef CONFIG_VIDEO_ADV_DEBUG
.vidioc_g_register = vidioc_g_register,
.vidioc_s_register = vidioc_s_register,
@@ -2514,9 +1836,11 @@ static struct video_device *em28xx_vdev_init(struct em28xx *dev,
*vfd = *template;
vfd->v4l2_dev = &dev->v4l2_dev;
- vfd->release = video_device_release;
vfd->debug = video_debug;
vfd->lock = &dev->lock;
+ set_bit(V4L2_FL_USE_FH_PRIO, &vfd->flags);
+ if (dev->board.is_webcam)
+ vfd->tvnorms = 0;
snprintf(vfd->name, sizeof(vfd->name), "%s %s",
dev->name, type_name);
@@ -2527,7 +1851,7 @@ static struct video_device *em28xx_vdev_init(struct em28xx *dev,
int em28xx_register_analog_devices(struct em28xx *dev)
{
- u8 val;
+ u8 val;
int ret;
unsigned int maxw;
@@ -2535,7 +1859,7 @@ int em28xx_register_analog_devices(struct em28xx *dev)
dev->name, EM28XX_VERSION);
/* set default norm */
- dev->norm = em28xx_video_template.current_norm;
+ dev->norm = V4L2_STD_PAL;
v4l2_device_call_all(&dev->v4l2_dev, 0, core, s_std, dev->norm);
dev->interlaced = EM28XX_INTERLACED_DEFAULT;
@@ -2543,10 +1867,10 @@ int em28xx_register_analog_devices(struct em28xx *dev)
dev->format = &format[0];
maxw = norm_maxw(dev);
- /* MaxPacketSize for em2800 is too small to capture at full resolution
- * use half of maxw as the scaler can only scale to 50% */
- if (dev->board.is_em2800)
- maxw /= 2;
+ /* MaxPacketSize for em2800 is too small to capture at full resolution
+ * use half of maxw as the scaler can only scale to 50% */
+ if (dev->board.is_em2800)
+ maxw /= 2;
em28xx_set_video_format(dev, format[0].fourcc,
maxw, norm_maxh(dev));
@@ -2572,6 +1896,8 @@ int em28xx_register_analog_devices(struct em28xx *dev)
em28xx_errdev("cannot allocate video_device.\n");
return -ENODEV;
}
+ dev->vdev->queue = &dev->vb_vidq;
+ dev->vdev->queue->lock = &dev->vb_queue_lock;
/* register v4l2 video video_device */
ret = video_register_device(dev->vdev, VFL_TYPE_GRABBER,
@@ -2587,6 +1913,9 @@ int em28xx_register_analog_devices(struct em28xx *dev)
dev->vbi_dev = em28xx_vdev_init(dev, &em28xx_video_template,
"vbi");
+ dev->vbi_dev->queue = &dev->vb_vbiq;
+ dev->vbi_dev->queue->lock = &dev->vb_vbi_queue_lock;
+
/* register v4l2 vbi video_device */
ret = video_register_device(dev->vbi_dev, VFL_TYPE_VBI,
vbi_nr[dev->devno]);
diff --git a/drivers/media/usb/em28xx/em28xx.h b/drivers/media/usb/em28xx/em28xx.h
index 86e90d86da6d..5f0b2c59e846 100644
--- a/drivers/media/usb/em28xx/em28xx.h
+++ b/drivers/media/usb/em28xx/em28xx.h
@@ -4,6 +4,7 @@
Copyright (C) 2005 Markus Rechberger <mrechberger@gmail.com>
Ludovico Cavedon <cavedon@sssup.it>
Mauro Carvalho Chehab <mchehab@infradead.org>
+ Copyright (C) 2012 Frank Schäfer <fschaefer.oss@googlemail.com>
Based on the em2800 driver from Sascha Sommer <saschasommer@freenet.de>
@@ -30,13 +31,12 @@
#include <linux/mutex.h>
#include <linux/videodev2.h>
-#include <media/videobuf-vmalloc.h>
+#include <media/videobuf2-vmalloc.h>
#include <media/v4l2-device.h>
+#include <media/v4l2-ctrls.h>
+#include <media/v4l2-fh.h>
#include <media/ir-kbd-i2c.h>
#include <media/rc-core.h>
-#if defined(CONFIG_VIDEO_EM28XX_DVB) || defined(CONFIG_VIDEO_EM28XX_DVB_MODULE)
-#include <media/videobuf-dvb.h>
-#endif
#include "tuner-xc2028.h"
#include "xc5000.h"
#include "em28xx-reg.h"
@@ -157,12 +157,18 @@
#define EM28XX_NUM_BUFS 5
#define EM28XX_DVB_NUM_BUFS 5
-/* number of packets for each buffer
+/* isoc transfers: number of packets for each buffer
windows requests only 64 packets .. so we better do the same
this is what I found out for all alternate numbers there!
*/
-#define EM28XX_NUM_PACKETS 64
-#define EM28XX_DVB_MAX_PACKETS 64
+#define EM28XX_NUM_ISOC_PACKETS 64
+#define EM28XX_DVB_NUM_ISOC_PACKETS 64
+
+/* bulk transfers: transfer buffer size = packet size * packet multiplier
+ USB 2.0 spec says bulk packet size is always 512 bytes
+ */
+#define EM28XX_BULK_PACKET_MULTIPLIER 384
+#define EM28XX_DVB_BULK_PACKET_MULTIPLIER 384
#define EM28XX_INTERLACED_DEFAULT 1
@@ -187,12 +193,8 @@
Interval: 125us
*/
-/* time to wait when stopping the isoc transfer */
-#define EM28XX_URB_TIMEOUT \
- msecs_to_jiffies(EM28XX_NUM_BUFS * EM28XX_NUM_PACKETS)
-
/* time in msecs to wait for i2c writes to finish */
-#define EM2800_I2C_WRITE_TIMEOUT 20
+#define EM2800_I2C_XFER_TIMEOUT 20
enum em28xx_mode {
EM28XX_SUSPEND,
@@ -203,7 +205,7 @@ enum em28xx_mode {
struct em28xx;
-struct em28xx_usb_isoc_bufs {
+struct em28xx_usb_bufs {
/* max packet size of isoc transaction */
int max_pkt_size;
@@ -213,26 +215,26 @@ struct em28xx_usb_isoc_bufs {
/* number of allocated urbs */
int num_bufs;
- /* urb for isoc transfers */
+ /* urb for isoc/bulk transfers */
struct urb **urb;
- /* transfer buffers for isoc transfer */
+ /* transfer buffers for isoc/bulk transfer */
char **transfer_buffer;
};
-struct em28xx_usb_isoc_ctl {
- /* isoc transfer buffers for analog mode */
- struct em28xx_usb_isoc_bufs analog_bufs;
+struct em28xx_usb_ctl {
+ /* isoc/bulk transfer buffers for analog mode */
+ struct em28xx_usb_bufs analog_bufs;
- /* isoc transfer buffers for digital mode */
- struct em28xx_usb_isoc_bufs digital_bufs;
+ /* isoc/bulk transfer buffers for digital mode */
+ struct em28xx_usb_bufs digital_bufs;
/* Stores already requested buffers */
struct em28xx_buffer *vid_buf;
struct em28xx_buffer *vbi_buf;
- /* isoc urb callback */
- int (*isoc_copy) (struct em28xx *dev, struct urb *urb);
+ /* copy data from URB */
+ int (*urb_data_copy) (struct em28xx *dev, struct urb *urb);
};
@@ -247,19 +249,26 @@ struct em28xx_fmt {
/* buffer for one video frame */
struct em28xx_buffer {
/* common v4l buffer stuff -- must be first */
- struct videobuf_buffer vb;
+ struct vb2_buffer vb;
+ struct list_head list;
- struct list_head frame;
+ void *mem;
+ unsigned int length;
int top_field;
+
+ /* counter to control buffer fill */
+ unsigned int pos;
+ /* NOTE; in interlaced mode, this value is reset to zero at
+ * the start of each new field (not frame !) */
+
+ /* pointer to vmalloc memory address in vb */
+ char *vb_buf;
};
struct em28xx_dmaqueue {
struct list_head active;
wait_queue_head_t wq;
-
- /* Counters to control buffer fill */
- int pos;
};
/* inputs */
@@ -430,13 +439,6 @@ struct em28xx_eeprom {
u8 string_idx_table;
};
-/* device states */
-enum em28xx_dev_state {
- DEV_INITIALIZED = 0x01,
- DEV_DISCONNECTED = 0x02,
- DEV_MISCONFIGURED = 0x04,
-};
-
#define EM28XX_AUDIO_BUFS 5
#define EM28XX_NUM_AUDIO_PACKETS 64
#define EM28XX_AUDIO_MAX_PACKET_SIZE 196 /* static value */
@@ -469,12 +471,8 @@ struct em28xx_audio {
struct em28xx;
struct em28xx_fh {
+ struct v4l2_fh fh;
struct em28xx *dev;
- int radio;
- unsigned int resources;
-
- struct videobuf_queue vb_vidq;
- struct videobuf_queue vb_vbiq;
enum v4l2_buf_type type;
};
@@ -487,9 +485,14 @@ struct em28xx {
int devno; /* marks the number of this device */
enum em28xx_chip_id chip_id;
+ unsigned char disconnected:1; /* device has been diconnected */
+
int audio_ifnum;
struct v4l2_device v4l2_dev;
+ struct v4l2_ctrl_handler ctrl_handler;
+ /* provides ac97 mute and volume overrides */
+ struct v4l2_ctrl_handler ac97_ctrl_handler;
struct em28xx_board board;
/* Webcam specific fields */
@@ -497,7 +500,7 @@ struct em28xx {
int sensor_xres, sensor_yres;
int sensor_xtal;
- /* Allows progressive (e. g. non-interlaced) mode */
+ /* Progressive (non-interlaced) mode */
int progressive;
/* Vinmode/Vinctl used at the driver */
@@ -532,6 +535,7 @@ struct em28xx {
struct i2c_client i2c_client;
/* video for linux */
int users; /* user count for exclusive use */
+ int streaming_users; /* Number of actively streaming users */
struct video_device *vdev; /* video for linux device struct */
v4l2_std_id norm; /* selected tv norm */
int ctl_freq; /* selected frequency */
@@ -554,13 +558,10 @@ struct em28xx {
struct em28xx_audio adev;
- /* states */
- enum em28xx_dev_state state;
-
- /* vbi related state tracking */
+ /* capture state tracking */
int capture_type;
+ unsigned char top_field:1;
int vbi_read;
- unsigned char cur_field;
unsigned int vbi_width;
unsigned int vbi_height; /* lines per field */
@@ -574,6 +575,12 @@ struct em28xx {
struct video_device *vbi_dev;
struct video_device *radio_dev;
+ /* Videobuf2 */
+ struct vb2_queue vb_vidq;
+ struct vb2_queue vb_vbiq;
+ struct mutex vb_queue_lock;
+ struct mutex vb_vbi_queue_lock;
+
/* resources in use */
unsigned int resources;
@@ -582,17 +589,31 @@ struct em28xx {
/* Isoc control struct */
struct em28xx_dmaqueue vidq;
struct em28xx_dmaqueue vbiq;
- struct em28xx_usb_isoc_ctl isoc_ctl;
+ struct em28xx_usb_ctl usb_ctl;
spinlock_t slock;
+ unsigned int field_count;
+ unsigned int vbi_field_count;
+
/* usb transfer */
struct usb_device *udev; /* the usb device */
- int alt; /* alternate */
- int max_pkt_size; /* max packet size of isoc transaction */
- int num_alt; /* Number of alternative settings */
- unsigned int *alt_max_pkt_size; /* array of wMaxPacketSize */
- int dvb_alt; /* alternate for DVB */
- unsigned int dvb_max_pkt_size; /* wMaxPacketSize for DVB */
+ u8 analog_ep_isoc; /* address of isoc endpoint for analog */
+ u8 analog_ep_bulk; /* address of bulk endpoint for analog */
+ u8 dvb_ep_isoc; /* address of isoc endpoint for DVB */
+ u8 dvb_ep_bulk; /* address of bulk endpoint for DVC */
+ int alt; /* alternate setting */
+ int max_pkt_size; /* max packet size of the selected ep at alt */
+ int packet_multiplier; /* multiplier for wMaxPacketSize, used for
+ URB buffer size definition */
+ int num_alt; /* number of alternative settings */
+ unsigned int *alt_max_pkt_size_isoc; /* array of isoc wMaxPacketSize */
+ unsigned int analog_xfer_bulk:1; /* use bulk instead of isoc
+ transfers for analog */
+ int dvb_alt_isoc; /* alternate setting for DVB isoc transfers */
+ unsigned int dvb_max_pkt_size_isoc; /* isoc max packet size of the
+ selected DVB ep at dvb_alt */
+ unsigned int dvb_xfer_bulk:1; /* use bulk instead of isoc
+ transfers for DVB */
char urb_buf[URB_MAX_CTRL_SIZE]; /* urb control msg buffer */
/* helper funcs that call usb_control_msg */
@@ -619,9 +640,6 @@ struct em28xx {
struct delayed_work sbutton_query_work;
struct em28xx_dvb *dvb;
-
- /* I2C keyboard data */
- struct IR_i2c_init_data init_data;
};
struct em28xx_ops {
@@ -666,12 +684,14 @@ int em28xx_vbi_supported(struct em28xx *dev);
int em28xx_set_outfmt(struct em28xx *dev);
int em28xx_resolution_set(struct em28xx *dev);
int em28xx_set_alternate(struct em28xx *dev);
-int em28xx_alloc_isoc(struct em28xx *dev, enum em28xx_mode mode,
- int max_packets, int num_bufs, int max_pkt_size);
-int em28xx_init_isoc(struct em28xx *dev, enum em28xx_mode mode,
- int max_packets, int num_bufs, int max_pkt_size,
- int (*isoc_copy) (struct em28xx *dev, struct urb *urb));
-void em28xx_uninit_isoc(struct em28xx *dev, enum em28xx_mode mode);
+int em28xx_alloc_urbs(struct em28xx *dev, enum em28xx_mode mode, int xfer_bulk,
+ int num_bufs, int max_pkt_size, int packet_multiplier);
+int em28xx_init_usb_xfer(struct em28xx *dev, enum em28xx_mode mode,
+ int xfer_bulk,
+ int num_bufs, int max_pkt_size, int packet_multiplier,
+ int (*urb_data_copy)
+ (struct em28xx *dev, struct urb *urb));
+void em28xx_uninit_usb_xfer(struct em28xx *dev, enum em28xx_mode mode);
void em28xx_stop_urbs(struct em28xx *dev);
int em28xx_isoc_dvb_max_packetsize(struct em28xx *dev);
int em28xx_set_mode(struct em28xx *dev, enum em28xx_mode set_mode);
@@ -683,8 +703,13 @@ void em28xx_init_extension(struct em28xx *dev);
void em28xx_close_extension(struct em28xx *dev);
/* Provided by em28xx-video.c */
+int em28xx_vb2_setup(struct em28xx *dev);
int em28xx_register_analog_devices(struct em28xx *dev);
void em28xx_release_analog_resources(struct em28xx *dev);
+void em28xx_ctrl_notify(struct v4l2_ctrl *ctrl, void *priv);
+int em28xx_start_analog_streaming(struct vb2_queue *vq, unsigned int count);
+int em28xx_stop_vbi_streaming(struct vb2_queue *vq);
+extern const struct v4l2_ctrl_ops em28xx_ctrl_ops;
/* Provided by em28xx-cards.c */
extern int em2800_variant_detect(struct usb_device *udev, int model);
@@ -695,7 +720,7 @@ int em28xx_tuner_callback(void *ptr, int component, int command, int arg);
void em28xx_release_resources(struct em28xx *dev);
/* Provided by em28xx-vbi.c */
-extern struct videobuf_queue_ops em28xx_vbi_qops;
+extern struct vb2_ops em28xx_vbi_qops;
/* printk macros */
diff --git a/drivers/media/usb/gspca/cpia1.c b/drivers/media/usb/gspca/cpia1.c
index b3ba47d4d6a2..1dcdd9f95f1c 100644
--- a/drivers/media/usb/gspca/cpia1.c
+++ b/drivers/media/usb/gspca/cpia1.c
@@ -541,7 +541,7 @@ static int do_command(struct gspca_dev *gspca_dev, u16 command,
/* test button press */
a = ((gspca_dev->usb_buf[1] & 0x02) == 0);
if (a != sd->params.qx3.button) {
-#if defined(CONFIG_INPUT) || defined(CONFIG_INPUT_MODULE)
+#if IS_ENABLED(CONFIG_INPUT)
input_report_key(gspca_dev->input_dev, KEY_CAMERA, a);
input_sync(gspca_dev->input_dev);
#endif
@@ -1640,7 +1640,7 @@ static void sd_stopN(struct gspca_dev *gspca_dev)
/* Update the camera status */
do_command(gspca_dev, CPIA_COMMAND_GetCameraStatus, 0, 0, 0, 0);
-#if defined(CONFIG_INPUT) || defined(CONFIG_INPUT_MODULE)
+#if IS_ENABLED(CONFIG_INPUT)
/* If the last button state is pressed, release it now! */
if (sd->params.qx3.button) {
/* The camera latch will hold the pressed state until we reset
@@ -1869,7 +1869,7 @@ static const struct sd_desc sd_desc = {
.stopN = sd_stopN,
.dq_callback = sd_dq_callback,
.pkt_scan = sd_pkt_scan,
-#if defined(CONFIG_INPUT) || defined(CONFIG_INPUT_MODULE)
+#if IS_ENABLED(CONFIG_INPUT)
.other_input = 1,
#endif
};
diff --git a/drivers/media/usb/gspca/gspca.c b/drivers/media/usb/gspca/gspca.c
index e0a431bb0d42..3564bdbb2ea3 100644
--- a/drivers/media/usb/gspca/gspca.c
+++ b/drivers/media/usb/gspca/gspca.c
@@ -44,7 +44,7 @@
#include "gspca.h"
-#if defined(CONFIG_INPUT) || defined(CONFIG_INPUT_MODULE)
+#if IS_ENABLED(CONFIG_INPUT)
#include <linux/input.h>
#include <linux/usb/input.h>
#endif
@@ -118,7 +118,7 @@ static const struct vm_operations_struct gspca_vm_ops = {
/*
* Input and interrupt endpoint handling functions
*/
-#if defined(CONFIG_INPUT) || defined(CONFIG_INPUT_MODULE)
+#if IS_ENABLED(CONFIG_INPUT)
static void int_irq(struct urb *urb)
{
struct gspca_dev *gspca_dev = (struct gspca_dev *) urb->context;
@@ -2303,7 +2303,7 @@ int gspca_dev_probe2(struct usb_interface *intf,
return 0;
out:
-#if defined(CONFIG_INPUT) || defined(CONFIG_INPUT_MODULE)
+#if IS_ENABLED(CONFIG_INPUT)
if (gspca_dev->input_dev)
input_unregister_device(gspca_dev->input_dev);
#endif
@@ -2348,7 +2348,7 @@ EXPORT_SYMBOL(gspca_dev_probe);
void gspca_disconnect(struct usb_interface *intf)
{
struct gspca_dev *gspca_dev = usb_get_intfdata(intf);
-#if defined(CONFIG_INPUT) || defined(CONFIG_INPUT_MODULE)
+#if IS_ENABLED(CONFIG_INPUT)
struct input_dev *input_dev;
#endif
@@ -2360,7 +2360,7 @@ void gspca_disconnect(struct usb_interface *intf)
gspca_dev->present = 0;
destroy_urbs(gspca_dev);
-#if defined(CONFIG_INPUT) || defined(CONFIG_INPUT_MODULE)
+#if IS_ENABLED(CONFIG_INPUT)
gspca_input_destroy_urb(gspca_dev);
input_dev = gspca_dev->input_dev;
if (input_dev) {
diff --git a/drivers/media/usb/gspca/gspca.h b/drivers/media/usb/gspca/gspca.h
index 352317d7acdb..5559932bf2f5 100644
--- a/drivers/media/usb/gspca/gspca.h
+++ b/drivers/media/usb/gspca/gspca.h
@@ -138,7 +138,7 @@ struct sd_desc {
cam_reg_op get_register;
#endif
cam_ident_op get_chip_ident;
-#if defined(CONFIG_INPUT) || defined(CONFIG_INPUT_MODULE)
+#if IS_ENABLED(CONFIG_INPUT)
cam_int_pkt_op int_pkt_scan;
/* other_input makes the gspca core create gspca_dev->input even when
int_pkt_scan is NULL, for cams with non interrupt driven buttons */
@@ -167,7 +167,7 @@ struct gspca_dev {
struct usb_device *dev;
struct file *capt_file; /* file doing video capture */
/* protected by queue_lock */
-#if defined(CONFIG_INPUT) || defined(CONFIG_INPUT_MODULE)
+#if IS_ENABLED(CONFIG_INPUT)
struct input_dev *input_dev;
char phys[64]; /* physical device path */
#endif
@@ -190,7 +190,7 @@ struct gspca_dev {
#define USB_BUF_SZ 64
__u8 *usb_buf; /* buffer for USB exchanges */
struct urb *urb[MAX_NURBS];
-#if defined(CONFIG_INPUT) || defined(CONFIG_INPUT_MODULE)
+#if IS_ENABLED(CONFIG_INPUT)
struct urb *int_urb;
#endif
diff --git a/drivers/media/usb/gspca/jl2005bcd.c b/drivers/media/usb/gspca/jl2005bcd.c
index 62ba80d9b998..fdaeeb14453f 100644
--- a/drivers/media/usb/gspca/jl2005bcd.c
+++ b/drivers/media/usb/gspca/jl2005bcd.c
@@ -536,20 +536,4 @@ static struct usb_driver sd_driver = {
#endif
};
-/* -- module insert / remove -- */
-static int __init sd_mod_init(void)
-{
- int ret;
-
- ret = usb_register(&sd_driver);
- if (ret < 0)
- return ret;
- return 0;
-}
-static void __exit sd_mod_exit(void)
-{
- usb_deregister(&sd_driver);
-}
-
-module_init(sd_mod_init);
-module_exit(sd_mod_exit);
+module_usb_driver(sd_driver);
diff --git a/drivers/media/usb/gspca/konica.c b/drivers/media/usb/gspca/konica.c
index bbf91e07e38b..61e25dbf2447 100644
--- a/drivers/media/usb/gspca/konica.c
+++ b/drivers/media/usb/gspca/konica.c
@@ -246,7 +246,7 @@ static void sd_stopN(struct gspca_dev *gspca_dev)
struct sd *sd = (struct sd *) gspca_dev;
konica_stream_off(gspca_dev);
-#if defined(CONFIG_INPUT) || defined(CONFIG_INPUT_MODULE)
+#if IS_ENABLED(CONFIG_INPUT)
/* Don't keep the button in the pressed state "forever" if it was
pressed when streaming is stopped */
if (sd->snapshot_pressed) {
@@ -345,7 +345,7 @@ static void sd_isoc_irq(struct urb *urb)
gspca_frame_add(gspca_dev, LAST_PACKET, NULL, 0);
gspca_frame_add(gspca_dev, FIRST_PACKET, NULL, 0);
} else {
-#if defined(CONFIG_INPUT) || defined(CONFIG_INPUT_MODULE)
+#if IS_ENABLED(CONFIG_INPUT)
u8 button_state = st & 0x40 ? 1 : 0;
if (sd->snapshot_pressed != button_state) {
input_report_key(gspca_dev->input_dev,
@@ -452,7 +452,7 @@ static const struct sd_desc sd_desc = {
.init_controls = sd_init_controls,
.start = sd_start,
.stopN = sd_stopN,
-#if defined(CONFIG_INPUT) || defined(CONFIG_INPUT_MODULE)
+#if IS_ENABLED(CONFIG_INPUT)
.other_input = 1,
#endif
};
diff --git a/drivers/media/usb/gspca/ov519.c b/drivers/media/usb/gspca/ov519.c
index 9aa09f845ce4..9ad19a7ef81b 100644
--- a/drivers/media/usb/gspca/ov519.c
+++ b/drivers/media/usb/gspca/ov519.c
@@ -4238,7 +4238,7 @@ static void sd_stop0(struct gspca_dev *gspca_dev)
if (sd->bridge == BRIDGE_W9968CF)
w9968cf_stop0(sd);
-#if defined(CONFIG_INPUT) || defined(CONFIG_INPUT_MODULE)
+#if IS_ENABLED(CONFIG_INPUT)
/* If the last button state is pressed, release it now! */
if (sd->snapshot_pressed) {
input_report_key(gspca_dev->input_dev, KEY_CAMERA, 0);
@@ -4255,7 +4255,7 @@ static void ov51x_handle_button(struct gspca_dev *gspca_dev, u8 state)
struct sd *sd = (struct sd *) gspca_dev;
if (sd->snapshot_pressed != state) {
-#if defined(CONFIG_INPUT) || defined(CONFIG_INPUT_MODULE)
+#if IS_ENABLED(CONFIG_INPUT)
input_report_key(gspca_dev->input_dev, KEY_CAMERA, state);
input_sync(gspca_dev->input_dev);
#endif
@@ -4924,7 +4924,7 @@ static const struct sd_desc sd_desc = {
.dq_callback = sd_reset_snapshot,
.get_jcomp = sd_get_jcomp,
.set_jcomp = sd_set_jcomp,
-#if defined(CONFIG_INPUT) || defined(CONFIG_INPUT_MODULE)
+#if IS_ENABLED(CONFIG_INPUT)
.other_input = 1,
#endif
};
diff --git a/drivers/media/usb/gspca/pac207.c b/drivers/media/usb/gspca/pac207.c
index d236d1791f78..3b75097dd34e 100644
--- a/drivers/media/usb/gspca/pac207.c
+++ b/drivers/media/usb/gspca/pac207.c
@@ -55,6 +55,11 @@ MODULE_LICENSE("GPL");
#define PAC207_AUTOGAIN_DEADZONE 30
+/* global parameters */
+static int led_invert;
+module_param(led_invert, int, 0644);
+MODULE_PARM_DESC(led_invert, "Invert led");
+
/* specific webcam descriptor */
struct sd {
struct gspca_dev gspca_dev; /* !! must be the first item */
@@ -187,10 +192,14 @@ static int sd_config(struct gspca_dev *gspca_dev,
/* this function is called at probe and resume time */
static int sd_init(struct gspca_dev *gspca_dev)
{
- pac207_write_reg(gspca_dev, 0x41, 0x00);
- /* Bit_0=Image Format,
- * Bit_1=LED,
- * Bit_2=Compression test mode enable */
+ u8 mode;
+
+ /* mode: Image Format (Bit 0), LED (1), Compr. test mode (2) */
+ if (led_invert)
+ mode = 0x02;
+ else
+ mode = 0x00;
+ pac207_write_reg(gspca_dev, 0x41, mode);
pac207_write_reg(gspca_dev, 0x0f, 0x00); /* Power Control */
return gspca_dev->usb_err;
@@ -303,7 +312,11 @@ static int sd_start(struct gspca_dev *gspca_dev)
pac207_write_reg(gspca_dev, 0x02,
v4l2_ctrl_g_ctrl(gspca_dev->exposure)); /* PXCK = 12MHz /n */
- mode = 0x02; /* Image Format (Bit 0), LED (1), Compr. test mode (2) */
+ /* mode: Image Format (Bit 0), LED (1), Compr. test mode (2) */
+ if (led_invert)
+ mode = 0x00;
+ else
+ mode = 0x02;
if (gspca_dev->width == 176) { /* 176x144 */
mode |= 0x01;
PDEBUG(D_STREAM, "pac207_start mode 176x144");
@@ -325,8 +338,15 @@ static int sd_start(struct gspca_dev *gspca_dev)
static void sd_stopN(struct gspca_dev *gspca_dev)
{
+ u8 mode;
+
+ /* mode: Image Format (Bit 0), LED (1), Compr. test mode (2) */
+ if (led_invert)
+ mode = 0x02;
+ else
+ mode = 0x00;
pac207_write_reg(gspca_dev, 0x40, 0x00); /* Stop ISO pipe */
- pac207_write_reg(gspca_dev, 0x41, 0x00); /* Turn of LED */
+ pac207_write_reg(gspca_dev, 0x41, mode); /* Turn off LED */
pac207_write_reg(gspca_dev, 0x0f, 0x00); /* Power Control */
}
@@ -393,7 +413,7 @@ static void sd_pkt_scan(struct gspca_dev *gspca_dev,
gspca_frame_add(gspca_dev, INTER_PACKET, data, len);
}
-#if defined(CONFIG_INPUT) || defined(CONFIG_INPUT_MODULE)
+#if IS_ENABLED(CONFIG_INPUT)
static int sd_int_pkt_scan(struct gspca_dev *gspca_dev,
u8 *data, /* interrupt packet data */
int len) /* interrput packet length */
@@ -422,7 +442,7 @@ static const struct sd_desc sd_desc = {
.stopN = sd_stopN,
.dq_callback = pac207_do_auto_gain,
.pkt_scan = sd_pkt_scan,
-#if defined(CONFIG_INPUT) || defined(CONFIG_INPUT_MODULE)
+#if IS_ENABLED(CONFIG_INPUT)
.int_pkt_scan = sd_int_pkt_scan,
#endif
};
diff --git a/drivers/media/usb/gspca/pac7302.c b/drivers/media/usb/gspca/pac7302.c
index 4f5869a98082..add6f725ba50 100644
--- a/drivers/media/usb/gspca/pac7302.c
+++ b/drivers/media/usb/gspca/pac7302.c
@@ -890,7 +890,7 @@ static int sd_chip_ident(struct gspca_dev *gspca_dev,
}
#endif
-#if defined(CONFIG_INPUT) || defined(CONFIG_INPUT_MODULE)
+#if IS_ENABLED(CONFIG_INPUT)
static int sd_int_pkt_scan(struct gspca_dev *gspca_dev,
u8 *data, /* interrupt packet data */
int len) /* interrput packet length */
@@ -936,7 +936,7 @@ static const struct sd_desc sd_desc = {
.set_register = sd_dbg_s_register,
.get_chip_ident = sd_chip_ident,
#endif
-#if defined(CONFIG_INPUT) || defined(CONFIG_INPUT_MODULE)
+#if IS_ENABLED(CONFIG_INPUT)
.int_pkt_scan = sd_int_pkt_scan,
#endif
};
diff --git a/drivers/media/usb/gspca/pac7311.c b/drivers/media/usb/gspca/pac7311.c
index ba3558d3f017..a12dfbf6e051 100644
--- a/drivers/media/usb/gspca/pac7311.c
+++ b/drivers/media/usb/gspca/pac7311.c
@@ -621,7 +621,7 @@ static void sd_pkt_scan(struct gspca_dev *gspca_dev,
gspca_frame_add(gspca_dev, INTER_PACKET, data, len);
}
-#if defined(CONFIG_INPUT) || defined(CONFIG_INPUT_MODULE)
+#if IS_ENABLED(CONFIG_INPUT)
static int sd_int_pkt_scan(struct gspca_dev *gspca_dev,
u8 *data, /* interrupt packet data */
int len) /* interrupt packet length */
@@ -661,7 +661,7 @@ static const struct sd_desc sd_desc = {
.stopN = sd_stopN,
.pkt_scan = sd_pkt_scan,
.dq_callback = do_autogain,
-#if defined(CONFIG_INPUT) || defined(CONFIG_INPUT_MODULE)
+#if IS_ENABLED(CONFIG_INPUT)
.int_pkt_scan = sd_int_pkt_scan,
#endif
};
diff --git a/drivers/media/usb/gspca/se401.c b/drivers/media/usb/gspca/se401.c
index a33cb78a839c..5f729b8aa2bd 100644
--- a/drivers/media/usb/gspca/se401.c
+++ b/drivers/media/usb/gspca/se401.c
@@ -594,7 +594,7 @@ static void sd_pkt_scan(struct gspca_dev *gspca_dev, u8 *data, int len)
sd_pkt_scan_janggu(gspca_dev, data, len);
}
-#if defined(CONFIG_INPUT) || defined(CONFIG_INPUT_MODULE)
+#if IS_ENABLED(CONFIG_INPUT)
static int sd_int_pkt_scan(struct gspca_dev *gspca_dev, u8 *data, int len)
{
struct sd *sd = (struct sd *)gspca_dev;
@@ -688,7 +688,7 @@ static const struct sd_desc sd_desc = {
.stopN = sd_stopN,
.dq_callback = sd_dq_callback,
.pkt_scan = sd_pkt_scan,
-#if defined(CONFIG_INPUT) || defined(CONFIG_INPUT_MODULE)
+#if IS_ENABLED(CONFIG_INPUT)
.int_pkt_scan = sd_int_pkt_scan,
#endif
};
diff --git a/drivers/media/usb/gspca/sn9c20x.c b/drivers/media/usb/gspca/sn9c20x.c
index 41f769fe340c..4ec544f4a845 100644
--- a/drivers/media/usb/gspca/sn9c20x.c
+++ b/drivers/media/usb/gspca/sn9c20x.c
@@ -2205,7 +2205,7 @@ static void qual_upd(struct work_struct *work)
mutex_unlock(&gspca_dev->usb_lock);
}
-#if defined(CONFIG_INPUT) || defined(CONFIG_INPUT_MODULE)
+#if IS_ENABLED(CONFIG_INPUT)
static int sd_int_pkt_scan(struct gspca_dev *gspca_dev,
u8 *data, /* interrupt packet */
int len) /* interrupt packet length */
@@ -2349,7 +2349,7 @@ static const struct sd_desc sd_desc = {
.stopN = sd_stopN,
.stop0 = sd_stop0,
.pkt_scan = sd_pkt_scan,
-#if defined(CONFIG_INPUT) || defined(CONFIG_INPUT_MODULE)
+#if IS_ENABLED(CONFIG_INPUT)
.int_pkt_scan = sd_int_pkt_scan,
#endif
.dq_callback = sd_dqcallback,
diff --git a/drivers/media/usb/gspca/sonixb.c b/drivers/media/usb/gspca/sonixb.c
index 1220340e7602..104ae25275b4 100644
--- a/drivers/media/usb/gspca/sonixb.c
+++ b/drivers/media/usb/gspca/sonixb.c
@@ -1400,7 +1400,7 @@ static int sd_querymenu(struct gspca_dev *gspca_dev,
return -EINVAL;
}
-#if defined(CONFIG_INPUT) || defined(CONFIG_INPUT_MODULE)
+#if IS_ENABLED(CONFIG_INPUT)
static int sd_int_pkt_scan(struct gspca_dev *gspca_dev,
u8 *data, /* interrupt packet data */
int len) /* interrupt packet length */
@@ -1430,7 +1430,7 @@ static const struct sd_desc sd_desc = {
.pkt_scan = sd_pkt_scan,
.querymenu = sd_querymenu,
.dq_callback = do_autogain,
-#if defined(CONFIG_INPUT) || defined(CONFIG_INPUT_MODULE)
+#if IS_ENABLED(CONFIG_INPUT)
.int_pkt_scan = sd_int_pkt_scan,
#endif
};
@@ -1448,7 +1448,7 @@ static const struct usb_device_id device_table[] = {
{USB_DEVICE(0x0c45, 0x600d), SB(PAS106, 101)},
{USB_DEVICE(0x0c45, 0x6011), SB(OV6650, 101)},
{USB_DEVICE(0x0c45, 0x6019), SB(OV7630, 101)},
-#if !defined CONFIG_USB_SN9C102 && !defined CONFIG_USB_SN9C102_MODULE
+#if !IS_ENABLED(CONFIG_USB_SN9C102)
{USB_DEVICE(0x0c45, 0x6024), SB(TAS5130CXX, 102)},
{USB_DEVICE(0x0c45, 0x6025), SB(TAS5130CXX, 102)},
#endif
diff --git a/drivers/media/usb/gspca/sonixj.c b/drivers/media/usb/gspca/sonixj.c
index 36307a9028a9..671d0c6dece3 100644
--- a/drivers/media/usb/gspca/sonixj.c
+++ b/drivers/media/usb/gspca/sonixj.c
@@ -3077,7 +3077,7 @@ static int sd_querymenu(struct gspca_dev *gspca_dev,
return -EINVAL;
}
-#if defined(CONFIG_INPUT) || defined(CONFIG_INPUT_MODULE)
+#if IS_ENABLED(CONFIG_INPUT)
static int sd_int_pkt_scan(struct gspca_dev *gspca_dev,
u8 *data, /* interrupt packet data */
int len) /* interrupt packet length */
@@ -3109,7 +3109,7 @@ static const struct sd_desc sd_desc = {
.pkt_scan = sd_pkt_scan,
.dq_callback = do_autogain,
.querymenu = sd_querymenu,
-#if defined(CONFIG_INPUT) || defined(CONFIG_INPUT_MODULE)
+#if IS_ENABLED(CONFIG_INPUT)
.int_pkt_scan = sd_int_pkt_scan,
#endif
};
diff --git a/drivers/media/usb/gspca/spca561.c b/drivers/media/usb/gspca/spca561.c
index cfe71dd6747d..d1db3d8f6522 100644
--- a/drivers/media/usb/gspca/spca561.c
+++ b/drivers/media/usb/gspca/spca561.c
@@ -741,7 +741,7 @@ static void sd_pkt_scan(struct gspca_dev *gspca_dev,
return;
}
-#if defined(CONFIG_INPUT) || defined(CONFIG_INPUT_MODULE)
+#if IS_ENABLED(CONFIG_INPUT)
if (data[0] & 0x20) {
input_report_key(gspca_dev->input_dev, KEY_CAMERA, 1);
input_sync(gspca_dev->input_dev);
@@ -866,7 +866,7 @@ static const struct sd_desc sd_desc_12a = {
.start = sd_start_12a,
.stopN = sd_stopN,
.pkt_scan = sd_pkt_scan,
-#if defined(CONFIG_INPUT) || defined(CONFIG_INPUT_MODULE)
+#if IS_ENABLED(CONFIG_INPUT)
.other_input = 1,
#endif
};
@@ -879,7 +879,7 @@ static const struct sd_desc sd_desc_72a = {
.stopN = sd_stopN,
.pkt_scan = sd_pkt_scan,
.dq_callback = do_autogain,
-#if defined(CONFIG_INPUT) || defined(CONFIG_INPUT_MODULE)
+#if IS_ENABLED(CONFIG_INPUT)
.other_input = 1,
#endif
};
diff --git a/drivers/media/usb/gspca/stv06xx/stv06xx.c b/drivers/media/usb/gspca/stv06xx/stv06xx.c
index 999ec7764449..657160b4a1f7 100644
--- a/drivers/media/usb/gspca/stv06xx/stv06xx.c
+++ b/drivers/media/usb/gspca/stv06xx/stv06xx.c
@@ -492,7 +492,7 @@ frame_data:
}
}
-#if defined(CONFIG_INPUT) || defined(CONFIG_INPUT_MODULE)
+#if IS_ENABLED(CONFIG_INPUT)
static int sd_int_pkt_scan(struct gspca_dev *gspca_dev,
u8 *data, /* interrupt packet data */
int len) /* interrupt packet length */
@@ -529,7 +529,7 @@ static const struct sd_desc sd_desc = {
.pkt_scan = stv06xx_pkt_scan,
.isoc_init = stv06xx_isoc_init,
.isoc_nego = stv06xx_isoc_nego,
-#if defined(CONFIG_INPUT) || defined(CONFIG_INPUT_MODULE)
+#if IS_ENABLED(CONFIG_INPUT)
.int_pkt_scan = sd_int_pkt_scan,
#endif
};
diff --git a/drivers/media/usb/gspca/stv06xx/stv06xx_vv6410.c b/drivers/media/usb/gspca/stv06xx/stv06xx_vv6410.c
index 748e1421d6d8..e95fa8997d22 100644
--- a/drivers/media/usb/gspca/stv06xx/stv06xx_vv6410.c
+++ b/drivers/media/usb/gspca/stv06xx/stv06xx_vv6410.c
@@ -52,9 +52,13 @@ static int vv6410_s_ctrl(struct v4l2_ctrl *ctrl)
switch (ctrl->id) {
case V4L2_CID_HFLIP:
+ if (!gspca_dev->streaming)
+ return 0;
err = vv6410_set_hflip(gspca_dev, ctrl->val);
break;
case V4L2_CID_VFLIP:
+ if (!gspca_dev->streaming)
+ return 0;
err = vv6410_set_vflip(gspca_dev, ctrl->val);
break;
case V4L2_CID_GAIN:
@@ -94,11 +98,14 @@ static int vv6410_init_controls(struct sd *sd)
{
struct v4l2_ctrl_handler *hdl = &sd->gspca_dev.ctrl_handler;
- v4l2_ctrl_handler_init(hdl, 4);
- v4l2_ctrl_new_std(hdl, &vv6410_ctrl_ops,
- V4L2_CID_HFLIP, 0, 1, 1, 0);
- v4l2_ctrl_new_std(hdl, &vv6410_ctrl_ops,
- V4L2_CID_VFLIP, 0, 1, 1, 0);
+ v4l2_ctrl_handler_init(hdl, 2);
+ /* Disable the hardware VFLIP and HFLIP as we currently lack a
+ mechanism to adjust the image offset in such a way that
+ we don't need to renegotiate the announced format */
+ /* v4l2_ctrl_new_std(hdl, &vv6410_ctrl_ops, */
+ /* V4L2_CID_HFLIP, 0, 1, 1, 0); */
+ /* v4l2_ctrl_new_std(hdl, &vv6410_ctrl_ops, */
+ /* V4L2_CID_VFLIP, 0, 1, 1, 0); */
v4l2_ctrl_new_std(hdl, &vv6410_ctrl_ops,
V4L2_CID_EXPOSURE, 0, 32768, 1, 20000);
v4l2_ctrl_new_std(hdl, &vv6410_ctrl_ops,
diff --git a/drivers/media/usb/gspca/t613.c b/drivers/media/usb/gspca/t613.c
index 8bc6c3ceec2c..e2cc4e5a0ccb 100644
--- a/drivers/media/usb/gspca/t613.c
+++ b/drivers/media/usb/gspca/t613.c
@@ -494,7 +494,7 @@ static void setcolors(struct gspca_dev *gspca_dev, s32 val)
static void setgamma(struct gspca_dev *gspca_dev, s32 val)
{
- PDEBUG(D_CONF, "Gamma: %d", sd->gamma);
+ PDEBUG(D_CONF, "Gamma: %d", val);
reg_w_ixbuf(gspca_dev, 0x90,
gamma_table[val], sizeof gamma_table[0]);
}
@@ -823,7 +823,7 @@ static void sd_stopN(struct gspca_dev *gspca_dev)
msleep(20);
reg_w(gspca_dev, 0x0309);
}
-#if defined(CONFIG_INPUT) || defined(CONFIG_INPUT_MODULE)
+#if IS_ENABLED(CONFIG_INPUT)
/* If the last button state is pressed, release it now! */
if (sd->button_pressed) {
input_report_key(gspca_dev->input_dev, KEY_CAMERA, 0);
@@ -841,7 +841,7 @@ static void sd_pkt_scan(struct gspca_dev *gspca_dev,
int pkt_type;
if (data[0] == 0x5a) {
-#if defined(CONFIG_INPUT) || defined(CONFIG_INPUT_MODULE)
+#if IS_ENABLED(CONFIG_INPUT)
if (len > 20) {
u8 state = (data[20] & 0x80) ? 1 : 0;
if (sd->button_pressed != state) {
@@ -1019,7 +1019,7 @@ static const struct sd_desc sd_desc = {
.start = sd_start,
.stopN = sd_stopN,
.pkt_scan = sd_pkt_scan,
-#if defined(CONFIG_INPUT) || defined(CONFIG_INPUT_MODULE)
+#if IS_ENABLED(CONFIG_INPUT)
.other_input = 1,
#endif
};
diff --git a/drivers/media/usb/gspca/xirlink_cit.c b/drivers/media/usb/gspca/xirlink_cit.c
index d4b23c9bf90c..7eaf64eb867c 100644
--- a/drivers/media/usb/gspca/xirlink_cit.c
+++ b/drivers/media/usb/gspca/xirlink_cit.c
@@ -2759,7 +2759,7 @@ static void sd_stop0(struct gspca_dev *gspca_dev)
break;
}
-#if defined(CONFIG_INPUT) || defined(CONFIG_INPUT_MODULE)
+#if IS_ENABLED(CONFIG_INPUT)
/* If the last button state is pressed, release it now! */
if (sd->button_state) {
input_report_key(gspca_dev->input_dev, KEY_CAMERA, 0);
@@ -2914,7 +2914,7 @@ static void sd_pkt_scan(struct gspca_dev *gspca_dev,
gspca_frame_add(gspca_dev, INTER_PACKET, data, len);
}
-#if defined(CONFIG_INPUT) || defined(CONFIG_INPUT_MODULE)
+#if IS_ENABLED(CONFIG_INPUT)
static void cit_check_button(struct gspca_dev *gspca_dev)
{
int new_button_state;
@@ -3062,7 +3062,7 @@ static const struct sd_desc sd_desc = {
.stopN = sd_stopN,
.stop0 = sd_stop0,
.pkt_scan = sd_pkt_scan,
-#if defined(CONFIG_INPUT) || defined(CONFIG_INPUT_MODULE)
+#if IS_ENABLED(CONFIG_INPUT)
.dq_callback = cit_check_button,
.other_input = 1,
#endif
@@ -3079,7 +3079,7 @@ static const struct sd_desc sd_desc_isoc_nego = {
.stopN = sd_stopN,
.stop0 = sd_stop0,
.pkt_scan = sd_pkt_scan,
-#if defined(CONFIG_INPUT) || defined(CONFIG_INPUT_MODULE)
+#if IS_ENABLED(CONFIG_INPUT)
.dq_callback = cit_check_button,
.other_input = 1,
#endif
diff --git a/drivers/media/usb/gspca/zc3xx.c b/drivers/media/usb/gspca/zc3xx.c
index 77c57755e7b4..a8dc421f9f1f 100644
--- a/drivers/media/usb/gspca/zc3xx.c
+++ b/drivers/media/usb/gspca/zc3xx.c
@@ -6902,7 +6902,7 @@ static int sd_get_jcomp(struct gspca_dev *gspca_dev,
return 0;
}
-#if defined(CONFIG_INPUT) || defined(CONFIG_INPUT_MODULE)
+#if IS_ENABLED(CONFIG_INPUT)
static int sd_int_pkt_scan(struct gspca_dev *gspca_dev,
u8 *data, /* interrupt packet data */
int len) /* interrput packet length */
@@ -6929,7 +6929,7 @@ static const struct sd_desc sd_desc = {
.pkt_scan = sd_pkt_scan,
.get_jcomp = sd_get_jcomp,
.set_jcomp = sd_set_jcomp,
-#if defined(CONFIG_INPUT) || defined(CONFIG_INPUT_MODULE)
+#if IS_ENABLED(CONFIG_INPUT)
.int_pkt_scan = sd_int_pkt_scan,
#endif
};
diff --git a/drivers/media/usb/hdpvr/hdpvr-core.c b/drivers/media/usb/hdpvr/hdpvr-core.c
index 84dc26fe80ee..5c6193536399 100644
--- a/drivers/media/usb/hdpvr/hdpvr-core.c
+++ b/drivers/media/usb/hdpvr/hdpvr-core.c
@@ -391,7 +391,7 @@ static int hdpvr_probe(struct usb_interface *interface,
goto error;
}
-#if defined(CONFIG_I2C) || defined(CONFIG_I2C_MODULE)
+#if IS_ENABLED(CONFIG_I2C)
retval = hdpvr_register_i2c_adapter(dev);
if (retval < 0) {
v4l2_err(&dev->v4l2_dev, "i2c adapter register failed\n");
@@ -419,7 +419,7 @@ static int hdpvr_probe(struct usb_interface *interface,
return 0;
reg_fail:
-#if defined(CONFIG_I2C) || defined(CONFIG_I2C_MODULE)
+#if IS_ENABLED(CONFIG_I2C)
i2c_del_adapter(&dev->i2c_adapter);
#endif
error:
@@ -451,7 +451,7 @@ static void hdpvr_disconnect(struct usb_interface *interface)
mutex_lock(&dev->io_mutex);
hdpvr_cancel_queue(dev);
mutex_unlock(&dev->io_mutex);
-#if defined(CONFIG_I2C) || defined(CONFIG_I2C_MODULE)
+#if IS_ENABLED(CONFIG_I2C)
i2c_del_adapter(&dev->i2c_adapter);
#endif
video_unregister_device(dev->video_dev);
diff --git a/drivers/media/usb/hdpvr/hdpvr-i2c.c b/drivers/media/usb/hdpvr/hdpvr-i2c.c
index 031cf024304c..a38f58c4c6bf 100644
--- a/drivers/media/usb/hdpvr/hdpvr-i2c.c
+++ b/drivers/media/usb/hdpvr/hdpvr-i2c.c
@@ -13,7 +13,7 @@
*
*/
-#if defined(CONFIG_I2C) || defined(CONFIG_I2C_MODULE)
+#if IS_ENABLED(CONFIG_I2C)
#include <linux/i2c.h>
#include <linux/slab.h>
@@ -217,8 +217,7 @@ int hdpvr_register_i2c_adapter(struct hdpvr_device *dev)
hdpvr_activate_ir(dev);
- memcpy(&dev->i2c_adapter, &hdpvr_i2c_adapter_template,
- sizeof(struct i2c_adapter));
+ dev->i2c_adapter = hdpvr_i2c_adapter_template;
dev->i2c_adapter.dev.parent = &dev->udev->dev;
i2c_set_adapdata(&dev->i2c_adapter, dev);
diff --git a/drivers/media/usb/pvrusb2/pvrusb2-encoder.c b/drivers/media/usb/pvrusb2/pvrusb2-encoder.c
index e046fdaec5ae..f7702aeeda3f 100644
--- a/drivers/media/usb/pvrusb2/pvrusb2-encoder.c
+++ b/drivers/media/usb/pvrusb2/pvrusb2-encoder.c
@@ -422,8 +422,7 @@ int pvr2_encoder_adjust(struct pvr2_hdw *hdw)
pvr2_trace(PVR2_TRACE_ERROR_LEGS,
"Error from cx2341x module code=%d",ret);
} else {
- memcpy(&hdw->enc_cur_state,&hdw->enc_ctl_state,
- sizeof(struct cx2341x_mpeg_params));
+ hdw->enc_cur_state = hdw->enc_ctl_state;
hdw->enc_cur_valid = !0;
}
return ret;
diff --git a/drivers/media/usb/pvrusb2/pvrusb2-i2c-core.c b/drivers/media/usb/pvrusb2/pvrusb2-i2c-core.c
index 9ab596c78a4e..b5e929f1bf82 100644
--- a/drivers/media/usb/pvrusb2/pvrusb2-i2c-core.c
+++ b/drivers/media/usb/pvrusb2/pvrusb2-i2c-core.c
@@ -649,8 +649,8 @@ void pvr2_i2c_core_init(struct pvr2_hdw *hdw)
}
// Configure the adapter and set up everything else related to it.
- memcpy(&hdw->i2c_adap,&pvr2_i2c_adap_template,sizeof(hdw->i2c_adap));
- memcpy(&hdw->i2c_algo,&pvr2_i2c_algo_template,sizeof(hdw->i2c_algo));
+ hdw->i2c_adap = pvr2_i2c_adap_template;
+ hdw->i2c_algo = pvr2_i2c_algo_template;
strlcpy(hdw->i2c_adap.name,hdw->name,sizeof(hdw->i2c_adap.name));
hdw->i2c_adap.dev.parent = &hdw->usb_dev->dev;
hdw->i2c_adap.algo = &hdw->i2c_algo;
diff --git a/drivers/media/usb/pvrusb2/pvrusb2-v4l2.c b/drivers/media/usb/pvrusb2/pvrusb2-v4l2.c
index 6930676051e7..34c3b6e80e86 100644
--- a/drivers/media/usb/pvrusb2/pvrusb2-v4l2.c
+++ b/drivers/media/usb/pvrusb2/pvrusb2-v4l2.c
@@ -1339,7 +1339,7 @@ static void pvr2_v4l2_dev_init(struct pvr2_v4l2_dev *dip,
return;
}
- memcpy(&dip->devbase,&vdev_template,sizeof(vdev_template));
+ dip->devbase = vdev_template;
dip->devbase.release = pvr2_video_device_release;
dip->devbase.ioctl_ops = &pvr2_ioctl_ops;
{
diff --git a/drivers/media/usb/pwc/pwc-if.c b/drivers/media/usb/pwc/pwc-if.c
index 5210239cbaee..5ec15cb1ed26 100644
--- a/drivers/media/usb/pwc/pwc-if.c
+++ b/drivers/media/usb/pwc/pwc-if.c
@@ -316,7 +316,8 @@ static void pwc_isoc_handler(struct urb *urb)
struct pwc_frame_buf *fbuf = pdev->fill_buf;
if (pdev->vsync == 1) {
- do_gettimeofday(&fbuf->vb.v4l2_buf.timestamp);
+ v4l2_get_timestamp(
+ &fbuf->vb.v4l2_buf.timestamp);
pdev->vsync = 2;
}
@@ -1007,7 +1008,7 @@ static int usb_pwc_probe(struct usb_interface *intf, const struct usb_device_id
}
/* Init video_device structure */
- memcpy(&pdev->vdev, &pwc_template, sizeof(pwc_template));
+ pdev->vdev = pwc_template;
strcpy(pdev->vdev.name, name);
pdev->vdev.queue = &pdev->vb_queue;
pdev->vdev.queue->lock = &pdev->vb_queue_lock;
diff --git a/drivers/media/usb/pwc/pwc-v4l.c b/drivers/media/usb/pwc/pwc-v4l.c
index 545e9bbdeede..aa7449eaca08 100644
--- a/drivers/media/usb/pwc/pwc-v4l.c
+++ b/drivers/media/usb/pwc/pwc-v4l.c
@@ -434,19 +434,18 @@ static int pwc_vidioc_try_fmt(struct pwc_device *pdev, struct v4l2_format *f)
case V4L2_PIX_FMT_PWC1:
if (DEVICE_USE_CODEC23(pdev->type)) {
PWC_DEBUG_IOCTL("codec1 is only supported for old pwc webcam\n");
- return -EINVAL;
+ f->fmt.pix.pixelformat = V4L2_PIX_FMT_YUV420;
}
break;
case V4L2_PIX_FMT_PWC2:
if (DEVICE_USE_CODEC1(pdev->type)) {
PWC_DEBUG_IOCTL("codec23 is only supported for new pwc webcam\n");
- return -EINVAL;
+ f->fmt.pix.pixelformat = V4L2_PIX_FMT_YUV420;
}
break;
default:
PWC_DEBUG_IOCTL("Unsupported pixel format\n");
- return -EINVAL;
-
+ f->fmt.pix.pixelformat = V4L2_PIX_FMT_YUV420;
}
size = pwc_get_size(pdev, f->fmt.pix.width, f->fmt.pix.height);
diff --git a/drivers/media/usb/s2255/s2255drv.c b/drivers/media/usb/s2255/s2255drv.c
index 8ebec0d7bf59..498c57ea5d32 100644
--- a/drivers/media/usb/s2255/s2255drv.c
+++ b/drivers/media/usb/s2255/s2255drv.c
@@ -593,7 +593,7 @@ static int s2255_got_frame(struct s2255_channel *channel, int jpgsize)
buf = list_entry(dma_q->active.next,
struct s2255_buffer, vb.queue);
list_del(&buf->vb.queue);
- do_gettimeofday(&buf->vb.ts);
+ v4l2_get_timestamp(&buf->vb.ts);
s2255_fillbuff(channel, buf, jpgsize);
wake_up(&buf->vb.done);
dprintk(2, "%s: [buf/i] [%p/%d]\n", __func__, buf, buf->vb.i);
@@ -629,7 +629,6 @@ static void s2255_fillbuff(struct s2255_channel *channel,
struct s2255_buffer *buf, int jpgsize)
{
int pos = 0;
- struct timeval ts;
const char *tmpbuf;
char *vbuf = videobuf_to_vmalloc(&buf->vb);
unsigned long last_frame;
@@ -674,8 +673,7 @@ static void s2255_fillbuff(struct s2255_channel *channel,
/* tell v4l buffer was filled */
buf->vb.field_count = channel->frame_count * 2;
- do_gettimeofday(&ts);
- buf->vb.ts = ts;
+ v4l2_get_timestamp(&buf->vb.ts);
buf->vb.state = VIDEOBUF_DONE;
}
diff --git a/drivers/media/usb/sn9c102/sn9c102_core.c b/drivers/media/usb/sn9c102/sn9c102_core.c
index 73605864fffa..c957e9aa6077 100644
--- a/drivers/media/usb/sn9c102/sn9c102_core.c
+++ b/drivers/media/usb/sn9c102/sn9c102_core.c
@@ -173,7 +173,7 @@ sn9c102_request_buffers(struct sn9c102_device* cam, u32 count,
cam->frame[i].buf.sequence = 0;
cam->frame[i].buf.field = V4L2_FIELD_NONE;
cam->frame[i].buf.memory = V4L2_MEMORY_MMAP;
- cam->frame[i].buf.flags = 0;
+ cam->frame[i].buf.flags = V4L2_BUF_FLAG_TIMESTAMP_MONOTONIC;
}
return cam->nbuffers;
@@ -773,7 +773,8 @@ end_of_frame:
img);
if ((*f)->buf.bytesused == 0)
- do_gettimeofday(&(*f)->buf.timestamp);
+ v4l2_get_timestamp(
+ &(*f)->buf.timestamp);
(*f)->buf.bytesused += img;
@@ -2826,7 +2827,7 @@ sn9c102_vidioc_querybuf(struct sn9c102_device* cam, void __user * arg)
b.index >= cam->nbuffers || cam->io != IO_MMAP)
return -EINVAL;
- memcpy(&b, &cam->frame[b.index].buf, sizeof(b));
+ b = cam->frame[b.index].buf;
if (cam->frame[b.index].vma_use_count)
b.flags |= V4L2_BUF_FLAG_MAPPED;
@@ -2929,7 +2930,7 @@ sn9c102_vidioc_dqbuf(struct sn9c102_device* cam, struct file* filp,
f->state = F_UNUSED;
- memcpy(&b, &f->buf, sizeof(b));
+ b = f->buf;
if (f->vma_use_count)
b.flags |= V4L2_BUF_FLAG_MAPPED;
diff --git a/drivers/media/usb/stk1160/stk1160-video.c b/drivers/media/usb/stk1160/stk1160-video.c
index fa3671de02aa..39f1aae209bc 100644
--- a/drivers/media/usb/stk1160/stk1160-video.c
+++ b/drivers/media/usb/stk1160/stk1160-video.c
@@ -78,7 +78,7 @@ struct stk1160_buffer *stk1160_next_buffer(struct stk1160 *dev)
unsigned long flags = 0;
/* Current buffer must be NULL when this functions gets called */
- BUG_ON(dev->isoc_ctl.buf);
+ WARN_ON(dev->isoc_ctl.buf);
spin_lock_irqsave(&dev->buf_lock, flags);
if (!list_empty(&dev->avail_bufs)) {
@@ -101,7 +101,7 @@ void stk1160_buffer_done(struct stk1160 *dev)
buf->vb.v4l2_buf.sequence = dev->field_count >> 1;
buf->vb.v4l2_buf.field = V4L2_FIELD_INTERLACED;
buf->vb.v4l2_buf.bytesused = buf->bytesused;
- do_gettimeofday(&buf->vb.v4l2_buf.timestamp);
+ v4l2_get_timestamp(&buf->vb.v4l2_buf.timestamp);
vb2_set_plane_payload(&buf->vb, 0, buf->bytesused);
vb2_buffer_done(&buf->vb, VB2_BUF_STATE_DONE);
diff --git a/drivers/media/usb/stkwebcam/stk-webcam.c b/drivers/media/usb/stkwebcam/stk-webcam.c
index 5d3c032d733c..4cbab085e348 100644
--- a/drivers/media/usb/stkwebcam/stk-webcam.c
+++ b/drivers/media/usb/stkwebcam/stk-webcam.c
@@ -28,6 +28,7 @@
#include <linux/errno.h>
#include <linux/slab.h>
+#include <linux/dmi.h>
#include <linux/usb.h>
#include <linux/mm.h>
#include <linux/vmalloc.h>
@@ -38,12 +39,12 @@
#include "stk-webcam.h"
-static bool hflip;
-module_param(hflip, bool, 0444);
+static int hflip = -1;
+module_param(hflip, int, 0444);
MODULE_PARM_DESC(hflip, "Horizontal image flip (mirror). Defaults to 0");
-static bool vflip;
-module_param(vflip, bool, 0444);
+static int vflip = -1;
+module_param(vflip, int, 0444);
MODULE_PARM_DESC(vflip, "Vertical image flip. Defaults to 0");
static int debug;
@@ -62,6 +63,19 @@ static struct usb_device_id stkwebcam_table[] = {
};
MODULE_DEVICE_TABLE(usb, stkwebcam_table);
+/* The stk webcam laptop module is mounted upside down in some laptops :( */
+static const struct dmi_system_id stk_upside_down_dmi_table[] = {
+ {
+ .ident = "ASUS G1",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK Computer Inc."),
+ DMI_MATCH(DMI_PRODUCT_NAME, "G1")
+ }
+ },
+ {}
+};
+
+
/*
* Basic stuff
*/
@@ -466,6 +480,7 @@ static int stk_setup_siobuf(struct stk_camera *dev, int index)
buf->dev = dev;
buf->v4lbuf.index = index;
buf->v4lbuf.type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
+ buf->v4lbuf.flags = V4L2_BUF_FLAG_TIMESTAMP_MONOTONIC;
buf->v4lbuf.field = V4L2_FIELD_NONE;
buf->v4lbuf.memory = V4L2_MEMORY_MMAP;
buf->v4lbuf.m.offset = 2*index*buf->v4lbuf.length;
@@ -816,10 +831,16 @@ static int stk_vidioc_g_ctrl(struct file *filp,
c->value = dev->vsettings.brightness;
break;
case V4L2_CID_HFLIP:
- c->value = dev->vsettings.hflip;
+ if (dmi_check_system(stk_upside_down_dmi_table))
+ c->value = !dev->vsettings.hflip;
+ else
+ c->value = dev->vsettings.hflip;
break;
case V4L2_CID_VFLIP:
- c->value = dev->vsettings.vflip;
+ if (dmi_check_system(stk_upside_down_dmi_table))
+ c->value = !dev->vsettings.vflip;
+ else
+ c->value = dev->vsettings.vflip;
break;
default:
return -EINVAL;
@@ -836,10 +857,16 @@ static int stk_vidioc_s_ctrl(struct file *filp,
dev->vsettings.brightness = c->value;
return stk_sensor_set_brightness(dev, c->value >> 8);
case V4L2_CID_HFLIP:
- dev->vsettings.hflip = c->value;
+ if (dmi_check_system(stk_upside_down_dmi_table))
+ dev->vsettings.hflip = !c->value;
+ else
+ dev->vsettings.hflip = c->value;
return 0;
case V4L2_CID_VFLIP:
- dev->vsettings.vflip = c->value;
+ if (dmi_check_system(stk_upside_down_dmi_table))
+ dev->vsettings.vflip = !c->value;
+ else
+ dev->vsettings.vflip = c->value;
return 0;
default:
return -EINVAL;
@@ -1113,7 +1140,7 @@ static int stk_vidioc_dqbuf(struct file *filp,
sbuf->v4lbuf.flags &= ~V4L2_BUF_FLAG_QUEUED;
sbuf->v4lbuf.flags |= V4L2_BUF_FLAG_DONE;
sbuf->v4lbuf.sequence = ++dev->sequence;
- do_gettimeofday(&sbuf->v4lbuf.timestamp);
+ v4l2_get_timestamp(&sbuf->v4lbuf.timestamp);
*buf = sbuf->v4lbuf;
return 0;
@@ -1275,8 +1302,18 @@ static int stk_camera_probe(struct usb_interface *interface,
dev->interface = interface;
usb_get_intf(interface);
- dev->vsettings.vflip = vflip;
- dev->vsettings.hflip = hflip;
+ if (hflip != -1)
+ dev->vsettings.hflip = hflip;
+ else if (dmi_check_system(stk_upside_down_dmi_table))
+ dev->vsettings.hflip = 1;
+ else
+ dev->vsettings.hflip = 0;
+ if (vflip != -1)
+ dev->vsettings.vflip = vflip;
+ else if (dmi_check_system(stk_upside_down_dmi_table))
+ dev->vsettings.vflip = 1;
+ else
+ dev->vsettings.vflip = 0;
dev->n_sbufs = 0;
set_present(dev);
diff --git a/drivers/media/usb/tlg2300/pd-video.c b/drivers/media/usb/tlg2300/pd-video.c
index 3082bfa9b2c5..21723378bb8f 100644
--- a/drivers/media/usb/tlg2300/pd-video.c
+++ b/drivers/media/usb/tlg2300/pd-video.c
@@ -212,7 +212,7 @@ static void submit_frame(struct front_face *front)
front->curr_frame = NULL;
vb->state = VIDEOBUF_DONE;
vb->field_count++;
- do_gettimeofday(&vb->ts);
+ v4l2_get_timestamp(&vb->ts);
wake_up(&vb->done);
}
diff --git a/drivers/media/usb/tm6000/tm6000-core.c b/drivers/media/usb/tm6000/tm6000-core.c
index 22cc0116deb6..7c32353c59db 100644
--- a/drivers/media/usb/tm6000/tm6000-core.c
+++ b/drivers/media/usb/tm6000/tm6000-core.c
@@ -40,10 +40,13 @@ int tm6000_read_write_usb(struct tm6000_core *dev, u8 req_type, u8 req,
u8 *data = NULL;
int delay = 5000;
- mutex_lock(&dev->usb_lock);
-
- if (len)
+ if (len) {
data = kzalloc(len, GFP_KERNEL);
+ if (!data)
+ return -ENOMEM;
+ }
+
+ mutex_lock(&dev->usb_lock);
if (req_type & USB_DIR_IN)
pipe = usb_rcvctrlpipe(dev->udev, 0);
diff --git a/drivers/media/usb/tm6000/tm6000-dvb.c b/drivers/media/usb/tm6000/tm6000-dvb.c
index e1f3f66e1e63..9fc1e940a82b 100644
--- a/drivers/media/usb/tm6000/tm6000-dvb.c
+++ b/drivers/media/usb/tm6000/tm6000-dvb.c
@@ -360,8 +360,8 @@ dvb_dmx_err:
dvb_dmx_release(&dvb->demux);
frontend_err:
if (dvb->frontend) {
- dvb_frontend_detach(dvb->frontend);
dvb_unregister_frontend(dvb->frontend);
+ dvb_frontend_detach(dvb->frontend);
}
adapter_err:
dvb_unregister_adapter(&dvb->adapter);
@@ -384,8 +384,8 @@ static void unregister_dvb(struct tm6000_core *dev)
/* mutex_lock(&tm6000_driver.open_close_mutex); */
if (dvb->frontend) {
- dvb_frontend_detach(dvb->frontend);
dvb_unregister_frontend(dvb->frontend);
+ dvb_frontend_detach(dvb->frontend);
}
dvb_dmxdev_release(&dvb->dmxdev);
diff --git a/drivers/media/usb/tm6000/tm6000-video.c b/drivers/media/usb/tm6000/tm6000-video.c
index f656fd7a39a2..1a6857929c15 100644
--- a/drivers/media/usb/tm6000/tm6000-video.c
+++ b/drivers/media/usb/tm6000/tm6000-video.c
@@ -34,6 +34,7 @@
#include <linux/usb.h>
#include <linux/videodev2.h>
#include <media/v4l2-ioctl.h>
+#include <media/v4l2-event.h>
#include <media/tuner.h>
#include <linux/interrupt.h>
#include <linux/kthread.h>
@@ -49,82 +50,20 @@
#define TM6000_MIN_BUF 4
#define TM6000_DEF_BUF 8
+#define TM6000_NUM_URB_BUF 8
+
#define TM6000_MAX_ISO_PACKETS 46 /* Max number of ISO packets */
/* Declare static vars that will be used as parameters */
static unsigned int vid_limit = 16; /* Video memory limit, in Mb */
static int video_nr = -1; /* /dev/videoN, -1 for autodetect */
static int radio_nr = -1; /* /dev/radioN, -1 for autodetect */
+static bool keep_urb; /* keep urb buffers allocated */
/* Debug level */
int tm6000_debug;
EXPORT_SYMBOL_GPL(tm6000_debug);
-static const struct v4l2_queryctrl no_ctrl = {
- .name = "42",
- .flags = V4L2_CTRL_FLAG_DISABLED,
-};
-
-/* supported controls */
-static struct v4l2_queryctrl tm6000_qctrl[] = {
- {
- .id = V4L2_CID_BRIGHTNESS,
- .type = V4L2_CTRL_TYPE_INTEGER,
- .name = "Brightness",
- .minimum = 0,
- .maximum = 255,
- .step = 1,
- .default_value = 54,
- .flags = 0,
- }, {
- .id = V4L2_CID_CONTRAST,
- .type = V4L2_CTRL_TYPE_INTEGER,
- .name = "Contrast",
- .minimum = 0,
- .maximum = 255,
- .step = 0x1,
- .default_value = 119,
- .flags = 0,
- }, {
- .id = V4L2_CID_SATURATION,
- .type = V4L2_CTRL_TYPE_INTEGER,
- .name = "Saturation",
- .minimum = 0,
- .maximum = 255,
- .step = 0x1,
- .default_value = 112,
- .flags = 0,
- }, {
- .id = V4L2_CID_HUE,
- .type = V4L2_CTRL_TYPE_INTEGER,
- .name = "Hue",
- .minimum = -128,
- .maximum = 127,
- .step = 0x1,
- .default_value = 0,
- .flags = 0,
- },
- /* --- audio --- */
- {
- .id = V4L2_CID_AUDIO_MUTE,
- .name = "Mute",
- .minimum = 0,
- .maximum = 1,
- .type = V4L2_CTRL_TYPE_BOOLEAN,
- }, {
- .id = V4L2_CID_AUDIO_VOLUME,
- .name = "Volume",
- .minimum = -15,
- .maximum = 15,
- .step = 1,
- .default_value = 0,
- .type = V4L2_CTRL_TYPE_INTEGER,
- }
-};
-
-static const unsigned int CTRLS = ARRAY_SIZE(tm6000_qctrl);
-static int qctl_regs[ARRAY_SIZE(tm6000_qctrl)];
-
static struct tm6000_fmt format[] = {
{
.name = "4:2:2, packed, YVY2",
@@ -141,16 +80,6 @@ static struct tm6000_fmt format[] = {
}
};
-static const struct v4l2_queryctrl *ctrl_by_id(unsigned int id)
-{
- unsigned int i;
-
- for (i = 0; i < CTRLS; i++)
- if (tm6000_qctrl[i].id == id)
- return tm6000_qctrl+i;
- return NULL;
-}
-
/* ------------------------------------------------------------------
* DMA and thread functions
* ------------------------------------------------------------------
@@ -191,7 +120,7 @@ static inline void buffer_filled(struct tm6000_core *dev,
dprintk(dev, V4L2_DEBUG_ISOC, "[%p/%d] wakeup\n", buf, buf->vb.i);
buf->vb.state = VIDEOBUF_DONE;
buf->vb.field_count++;
- do_gettimeofday(&buf->vb.ts);
+ v4l2_get_timestamp(&buf->vb.ts);
list_del(&buf->vb.queue);
wake_up(&buf->vb.done);
@@ -538,6 +467,71 @@ static void tm6000_irq_callback(struct urb *urb)
}
/*
+ * Allocate URB buffers
+ */
+static int tm6000_alloc_urb_buffers(struct tm6000_core *dev)
+{
+ int num_bufs = TM6000_NUM_URB_BUF;
+ int i;
+
+ if (dev->urb_buffer != NULL)
+ return 0;
+
+ dev->urb_buffer = kmalloc(sizeof(void *)*num_bufs, GFP_KERNEL);
+ if (!dev->urb_buffer) {
+ tm6000_err("cannot allocate memory for urb buffers\n");
+ return -ENOMEM;
+ }
+
+ dev->urb_dma = kmalloc(sizeof(dma_addr_t *)*num_bufs, GFP_KERNEL);
+ if (!dev->urb_dma) {
+ tm6000_err("cannot allocate memory for urb dma pointers\n");
+ return -ENOMEM;
+ }
+
+ for (i = 0; i < num_bufs; i++) {
+ dev->urb_buffer[i] = usb_alloc_coherent(
+ dev->udev, dev->urb_size,
+ GFP_KERNEL, &dev->urb_dma[i]);
+ if (!dev->urb_buffer[i]) {
+ tm6000_err("unable to allocate %i bytes for transfer buffer %i\n",
+ dev->urb_size, i);
+ return -ENOMEM;
+ }
+ memset(dev->urb_buffer[i], 0, dev->urb_size);
+ }
+
+ return 0;
+}
+
+/*
+ * Free URB buffers
+ */
+static int tm6000_free_urb_buffers(struct tm6000_core *dev)
+{
+ int i;
+
+ if (dev->urb_buffer == NULL)
+ return 0;
+
+ for (i = 0; i < TM6000_NUM_URB_BUF; i++) {
+ if (dev->urb_buffer[i]) {
+ usb_free_coherent(dev->udev,
+ dev->urb_size,
+ dev->urb_buffer[i],
+ dev->urb_dma[i]);
+ dev->urb_buffer[i] = NULL;
+ }
+ }
+ kfree(dev->urb_buffer);
+ kfree(dev->urb_dma);
+ dev->urb_buffer = NULL;
+ dev->urb_dma = NULL;
+
+ return 0;
+}
+
+/*
* Stop and Deallocate URBs
*/
static void tm6000_uninit_isoc(struct tm6000_core *dev)
@@ -551,18 +545,15 @@ static void tm6000_uninit_isoc(struct tm6000_core *dev)
if (urb) {
usb_kill_urb(urb);
usb_unlink_urb(urb);
- if (dev->isoc_ctl.transfer_buffer[i]) {
- usb_free_coherent(dev->udev,
- urb->transfer_buffer_length,
- dev->isoc_ctl.transfer_buffer[i],
- urb->transfer_dma);
- }
usb_free_urb(urb);
dev->isoc_ctl.urb[i] = NULL;
}
dev->isoc_ctl.transfer_buffer[i] = NULL;
}
+ if (!keep_urb)
+ tm6000_free_urb_buffers(dev);
+
kfree(dev->isoc_ctl.urb);
kfree(dev->isoc_ctl.transfer_buffer);
@@ -572,12 +563,13 @@ static void tm6000_uninit_isoc(struct tm6000_core *dev)
}
/*
- * Allocate URBs and start IRQ
+ * Assign URBs and start IRQ
*/
static int tm6000_prepare_isoc(struct tm6000_core *dev)
{
struct tm6000_dmaqueue *dma_q = &dev->vidq;
- int i, j, sb_size, pipe, size, max_packets, num_bufs = 8;
+ int i, j, sb_size, pipe, size, max_packets;
+ int num_bufs = TM6000_NUM_URB_BUF;
struct urb *urb;
/* De-allocates all pending stuff */
@@ -605,6 +597,7 @@ static int tm6000_prepare_isoc(struct tm6000_core *dev)
max_packets = TM6000_MAX_ISO_PACKETS;
sb_size = max_packets * size;
+ dev->urb_size = sb_size;
dev->isoc_ctl.num_bufs = num_bufs;
@@ -627,6 +620,17 @@ static int tm6000_prepare_isoc(struct tm6000_core *dev)
max_packets, num_bufs, sb_size,
dev->isoc_in.maxsize, size);
+
+ if (!dev->urb_buffer && tm6000_alloc_urb_buffers(dev) < 0) {
+ tm6000_err("cannot allocate memory for urb buffers\n");
+
+ /* call free, as some buffers might have been allocated */
+ tm6000_free_urb_buffers(dev);
+ kfree(dev->isoc_ctl.urb);
+ kfree(dev->isoc_ctl.transfer_buffer);
+ return -ENOMEM;
+ }
+
/* allocate urbs and transfer buffers */
for (i = 0; i < dev->isoc_ctl.num_bufs; i++) {
urb = usb_alloc_urb(max_packets, GFP_KERNEL);
@@ -638,17 +642,8 @@ static int tm6000_prepare_isoc(struct tm6000_core *dev)
}
dev->isoc_ctl.urb[i] = urb;
- dev->isoc_ctl.transfer_buffer[i] = usb_alloc_coherent(dev->udev,
- sb_size, GFP_KERNEL, &urb->transfer_dma);
- if (!dev->isoc_ctl.transfer_buffer[i]) {
- tm6000_err("unable to allocate %i bytes for transfer"
- " buffer %i%s\n",
- sb_size, i,
- in_interrupt() ? " while in int" : "");
- tm6000_uninit_isoc(dev);
- return -ENOMEM;
- }
- memset(dev->isoc_ctl.transfer_buffer[i], 0, sb_size);
+ urb->transfer_dma = dev->urb_dma[i];
+ dev->isoc_ctl.transfer_buffer[i] = dev->urb_buffer[i];
usb_fill_bulk_urb(urb, dev->udev, pipe,
dev->isoc_ctl.transfer_buffer[i], sb_size,
@@ -879,16 +874,21 @@ static int vidioc_querycap(struct file *file, void *priv,
struct v4l2_capability *cap)
{
struct tm6000_core *dev = ((struct tm6000_fh *)priv)->dev;
+ struct video_device *vdev = video_devdata(file);
strlcpy(cap->driver, "tm6000", sizeof(cap->driver));
strlcpy(cap->card, "Trident TVMaster TM5600/6000/6010", sizeof(cap->card));
- cap->capabilities = V4L2_CAP_VIDEO_CAPTURE |
- V4L2_CAP_STREAMING |
- V4L2_CAP_AUDIO |
- V4L2_CAP_READWRITE;
-
+ usb_make_path(dev->udev, cap->bus_info, sizeof(cap->bus_info));
if (dev->tuner_type != TUNER_ABSENT)
- cap->capabilities |= V4L2_CAP_TUNER;
+ cap->device_caps |= V4L2_CAP_TUNER;
+ if (vdev->vfl_type == VFL_TYPE_GRABBER)
+ cap->device_caps |= V4L2_CAP_VIDEO_CAPTURE |
+ V4L2_CAP_STREAMING |
+ V4L2_CAP_READWRITE;
+ else
+ cap->device_caps |= V4L2_CAP_RADIO;
+ cap->capabilities = cap->device_caps | V4L2_CAP_DEVICE_CAPS |
+ V4L2_CAP_RADIO | V4L2_CAP_VIDEO_CAPTURE | V4L2_CAP_READWRITE;
return 0;
}
@@ -896,7 +896,7 @@ static int vidioc_querycap(struct file *file, void *priv,
static int vidioc_enum_fmt_vid_cap(struct file *file, void *priv,
struct v4l2_fmtdesc *f)
{
- if (unlikely(f->index >= ARRAY_SIZE(format)))
+ if (f->index >= ARRAY_SIZE(format))
return -EINVAL;
strlcpy(f->description, format[f->index].name, sizeof(f->description));
@@ -913,10 +913,12 @@ static int vidioc_g_fmt_vid_cap(struct file *file, void *priv,
f->fmt.pix.height = fh->height;
f->fmt.pix.field = fh->vb_vidq.field;
f->fmt.pix.pixelformat = fh->fmt->fourcc;
+ f->fmt.pix.colorspace = V4L2_COLORSPACE_SMPTE170M;
f->fmt.pix.bytesperline =
(f->fmt.pix.width * fh->fmt->depth) >> 3;
f->fmt.pix.sizeimage =
f->fmt.pix.height * f->fmt.pix.bytesperline;
+ f->fmt.pix.priv = 0;
return 0;
}
@@ -947,12 +949,7 @@ static int vidioc_try_fmt_vid_cap(struct file *file, void *priv,
field = f->fmt.pix.field;
- if (field == V4L2_FIELD_ANY)
- field = V4L2_FIELD_SEQ_TB;
- else if (V4L2_FIELD_INTERLACED != field) {
- dprintk(dev, V4L2_DEBUG_IOCTL_ARG, "Field type invalid.\n");
- return -EINVAL;
- }
+ field = V4L2_FIELD_INTERLACED;
tm6000_get_std_res(dev);
@@ -962,11 +959,13 @@ static int vidioc_try_fmt_vid_cap(struct file *file, void *priv,
f->fmt.pix.width &= ~0x01;
f->fmt.pix.field = field;
+ f->fmt.pix.priv = 0;
f->fmt.pix.bytesperline =
(f->fmt.pix.width * fmt->depth) >> 3;
f->fmt.pix.sizeimage =
f->fmt.pix.height * f->fmt.pix.bytesperline;
+ f->fmt.pix.colorspace = V4L2_COLORSPACE_SMPTE170M;
return 0;
}
@@ -1141,79 +1140,40 @@ static int vidioc_s_input(struct file *file, void *priv, unsigned int i)
}
/* --- controls ---------------------------------------------- */
-static int vidioc_queryctrl(struct file *file, void *priv,
- struct v4l2_queryctrl *qc)
-{
- int i;
-
- for (i = 0; i < ARRAY_SIZE(tm6000_qctrl); i++)
- if (qc->id && qc->id == tm6000_qctrl[i].id) {
- memcpy(qc, &(tm6000_qctrl[i]),
- sizeof(*qc));
- return 0;
- }
- return -EINVAL;
-}
-
-static int vidioc_g_ctrl(struct file *file, void *priv,
- struct v4l2_control *ctrl)
+static int tm6000_s_ctrl(struct v4l2_ctrl *ctrl)
{
- struct tm6000_fh *fh = priv;
- struct tm6000_core *dev = fh->dev;
- int val;
+ struct tm6000_core *dev = container_of(ctrl->handler, struct tm6000_core, ctrl_handler);
+ u8 val = ctrl->val;
- /* FIXME: Probably, those won't work! Maybe we need shadow regs */
switch (ctrl->id) {
case V4L2_CID_CONTRAST:
- val = tm6000_get_reg(dev, TM6010_REQ07_R08_LUMA_CONTRAST_ADJ, 0);
- break;
+ tm6000_set_reg(dev, TM6010_REQ07_R08_LUMA_CONTRAST_ADJ, val);
+ return 0;
case V4L2_CID_BRIGHTNESS:
- val = tm6000_get_reg(dev, TM6010_REQ07_R09_LUMA_BRIGHTNESS_ADJ, 0);
+ tm6000_set_reg(dev, TM6010_REQ07_R09_LUMA_BRIGHTNESS_ADJ, val);
return 0;
case V4L2_CID_SATURATION:
- val = tm6000_get_reg(dev, TM6010_REQ07_R0A_CHROMA_SATURATION_ADJ, 0);
+ tm6000_set_reg(dev, TM6010_REQ07_R0A_CHROMA_SATURATION_ADJ, val);
return 0;
case V4L2_CID_HUE:
- val = tm6000_get_reg(dev, TM6010_REQ07_R0B_CHROMA_HUE_PHASE_ADJ, 0);
- return 0;
- case V4L2_CID_AUDIO_MUTE:
- val = dev->ctl_mute;
- return 0;
- case V4L2_CID_AUDIO_VOLUME:
- val = dev->ctl_volume;
+ tm6000_set_reg(dev, TM6010_REQ07_R0B_CHROMA_HUE_PHASE_ADJ, val);
return 0;
- default:
- return -EINVAL;
}
+ return -EINVAL;
+}
- if (val < 0)
- return val;
-
- ctrl->value = val;
+static const struct v4l2_ctrl_ops tm6000_ctrl_ops = {
+ .s_ctrl = tm6000_s_ctrl,
+};
- return 0;
-}
-static int vidioc_s_ctrl(struct file *file, void *priv,
- struct v4l2_control *ctrl)
+static int tm6000_radio_s_ctrl(struct v4l2_ctrl *ctrl)
{
- struct tm6000_fh *fh = priv;
- struct tm6000_core *dev = fh->dev;
- u8 val = ctrl->value;
+ struct tm6000_core *dev = container_of(ctrl->handler,
+ struct tm6000_core, radio_ctrl_handler);
+ u8 val = ctrl->val;
switch (ctrl->id) {
- case V4L2_CID_CONTRAST:
- tm6000_set_reg(dev, TM6010_REQ07_R08_LUMA_CONTRAST_ADJ, val);
- return 0;
- case V4L2_CID_BRIGHTNESS:
- tm6000_set_reg(dev, TM6010_REQ07_R09_LUMA_BRIGHTNESS_ADJ, val);
- return 0;
- case V4L2_CID_SATURATION:
- tm6000_set_reg(dev, TM6010_REQ07_R0A_CHROMA_SATURATION_ADJ, val);
- return 0;
- case V4L2_CID_HUE:
- tm6000_set_reg(dev, TM6010_REQ07_R0B_CHROMA_HUE_PHASE_ADJ, val);
- return 0;
case V4L2_CID_AUDIO_MUTE:
dev->ctl_mute = val;
tm6000_tvaudio_set_mute(dev, val);
@@ -1226,20 +1186,24 @@ static int vidioc_s_ctrl(struct file *file, void *priv,
return -EINVAL;
}
+static const struct v4l2_ctrl_ops tm6000_radio_ctrl_ops = {
+ .s_ctrl = tm6000_radio_s_ctrl,
+};
+
static int vidioc_g_tuner(struct file *file, void *priv,
struct v4l2_tuner *t)
{
struct tm6000_fh *fh = priv;
struct tm6000_core *dev = fh->dev;
- if (unlikely(UNSET == dev->tuner_type))
- return -EINVAL;
+ if (UNSET == dev->tuner_type)
+ return -ENOTTY;
if (0 != t->index)
return -EINVAL;
strcpy(t->name, "Television");
t->type = V4L2_TUNER_ANALOG_TV;
- t->capability = V4L2_TUNER_CAP_NORM;
+ t->capability = V4L2_TUNER_CAP_NORM | V4L2_TUNER_CAP_STEREO;
t->rangehigh = 0xffffffffUL;
t->rxsubchans = V4L2_TUNER_SUB_STEREO;
@@ -1257,11 +1221,14 @@ static int vidioc_s_tuner(struct file *file, void *priv,
struct tm6000_core *dev = fh->dev;
if (UNSET == dev->tuner_type)
- return -EINVAL;
+ return -ENOTTY;
if (0 != t->index)
return -EINVAL;
- dev->amode = t->audmode;
+ if (t->audmode > V4L2_TUNER_MODE_STEREO)
+ dev->amode = V4L2_TUNER_MODE_STEREO;
+ else
+ dev->amode = t->audmode;
dprintk(dev, 3, "audio mode: %x\n", t->audmode);
v4l2_device_call_all(&dev->v4l2_dev, 0, tuner, s_tuner, t);
@@ -1275,10 +1242,11 @@ static int vidioc_g_frequency(struct file *file, void *priv,
struct tm6000_fh *fh = priv;
struct tm6000_core *dev = fh->dev;
- if (unlikely(UNSET == dev->tuner_type))
+ if (UNSET == dev->tuner_type)
+ return -ENOTTY;
+ if (f->tuner)
return -EINVAL;
- f->type = fh->radio ? V4L2_TUNER_RADIO : V4L2_TUNER_ANALOG_TV;
f->frequency = dev->freq;
v4l2_device_call_all(&dev->v4l2_dev, 0, tuner, g_frequency, f);
@@ -1292,13 +1260,9 @@ static int vidioc_s_frequency(struct file *file, void *priv,
struct tm6000_fh *fh = priv;
struct tm6000_core *dev = fh->dev;
- if (unlikely(UNSET == dev->tuner_type))
- return -EINVAL;
- if (unlikely(f->tuner != 0))
- return -EINVAL;
- if (0 == fh->radio && V4L2_TUNER_ANALOG_TV != f->type)
- return -EINVAL;
- if (1 == fh->radio && V4L2_TUNER_RADIO != f->type)
+ if (UNSET == dev->tuner_type)
+ return -ENOTTY;
+ if (f->tuner != 0)
return -EINVAL;
dev->freq = f->frequency;
@@ -1307,27 +1271,6 @@ static int vidioc_s_frequency(struct file *file, void *priv,
return 0;
}
-static int radio_querycap(struct file *file, void *priv,
- struct v4l2_capability *cap)
-{
- struct tm6000_fh *fh = file->private_data;
- struct tm6000_core *dev = fh->dev;
-
- strcpy(cap->driver, "tm6000");
- strlcpy(cap->card, dev->name, sizeof(dev->name));
- sprintf(cap->bus_info, "USB%04x:%04x",
- le16_to_cpu(dev->udev->descriptor.idVendor),
- le16_to_cpu(dev->udev->descriptor.idProduct));
- cap->version = dev->dev_type;
- cap->capabilities = V4L2_CAP_TUNER |
- V4L2_CAP_AUDIO |
- V4L2_CAP_RADIO |
- V4L2_CAP_READWRITE |
- V4L2_CAP_STREAMING;
-
- return 0;
-}
-
static int radio_g_tuner(struct file *file, void *priv,
struct v4l2_tuner *t)
{
@@ -1340,7 +1283,9 @@ static int radio_g_tuner(struct file *file, void *priv,
memset(t, 0, sizeof(*t));
strcpy(t->name, "Radio");
t->type = V4L2_TUNER_RADIO;
+ t->capability = V4L2_TUNER_CAP_LOW | V4L2_TUNER_CAP_STEREO;
t->rxsubchans = V4L2_TUNER_SUB_STEREO;
+ t->audmode = V4L2_TUNER_MODE_STEREO;
v4l2_device_call_all(&dev->v4l2_dev, 0, tuner, g_tuner, t);
@@ -1355,95 +1300,14 @@ static int radio_s_tuner(struct file *file, void *priv,
if (0 != t->index)
return -EINVAL;
+ if (t->audmode > V4L2_TUNER_MODE_STEREO)
+ t->audmode = V4L2_TUNER_MODE_STEREO;
v4l2_device_call_all(&dev->v4l2_dev, 0, tuner, s_tuner, t);
return 0;
}
-static int radio_enum_input(struct file *file, void *priv,
- struct v4l2_input *i)
-{
- struct tm6000_fh *fh = priv;
- struct tm6000_core *dev = fh->dev;
-
- if (i->index != 0)
- return -EINVAL;
-
- if (!dev->rinput.type)
- return -EINVAL;
-
- strcpy(i->name, "Radio");
- i->type = V4L2_INPUT_TYPE_TUNER;
-
- return 0;
-}
-
-static int radio_g_input(struct file *filp, void *priv, unsigned int *i)
-{
- struct tm6000_fh *fh = priv;
- struct tm6000_core *dev = fh->dev;
-
- if (dev->input != 5)
- return -EINVAL;
-
- *i = dev->input - 5;
-
- return 0;
-}
-
-static int radio_g_audio(struct file *file, void *priv,
- struct v4l2_audio *a)
-{
- memset(a, 0, sizeof(*a));
- strcpy(a->name, "Radio");
- return 0;
-}
-
-static int radio_s_audio(struct file *file, void *priv,
- const struct v4l2_audio *a)
-{
- return 0;
-}
-
-static int radio_s_input(struct file *filp, void *priv, unsigned int i)
-{
- struct tm6000_fh *fh = priv;
- struct tm6000_core *dev = fh->dev;
-
- if (i)
- return -EINVAL;
-
- if (!dev->rinput.type)
- return -EINVAL;
-
- dev->input = i + 5;
-
- return 0;
-}
-
-static int radio_s_std(struct file *file, void *fh, v4l2_std_id *norm)
-{
- return 0;
-}
-
-static int radio_queryctrl(struct file *file, void *priv,
- struct v4l2_queryctrl *c)
-{
- const struct v4l2_queryctrl *ctrl;
-
- if (c->id < V4L2_CID_BASE ||
- c->id >= V4L2_CID_LASTP1)
- return -EINVAL;
- if (c->id == V4L2_CID_AUDIO_MUTE) {
- ctrl = ctrl_by_id(c->id);
- *c = *ctrl;
- } else
- *c = no_ctrl;
-
- return 0;
-}
-
/* ------------------------------------------------------------------
File operations for the device
------------------------------------------------------------------*/
@@ -1454,7 +1318,7 @@ static int __tm6000_open(struct file *file)
struct tm6000_core *dev = video_drvdata(file);
struct tm6000_fh *fh;
enum v4l2_buf_type type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
- int i, rc;
+ int rc;
int radio = 0;
dprintk(dev, V4L2_DEBUG_OPEN, "tm6000: open called (dev=%s)\n",
@@ -1486,6 +1350,7 @@ static int __tm6000_open(struct file *file)
return -ENOMEM;
}
+ v4l2_fh_init(&fh->fh, vdev);
file->private_data = fh;
fh->dev = dev;
fh->radio = radio;
@@ -1514,13 +1379,7 @@ static int __tm6000_open(struct file *file)
if (rc < 0)
return rc;
- if (dev->mode != TM6000_MODE_ANALOG) {
- /* Put all controls at a sane state */
- for (i = 0; i < ARRAY_SIZE(tm6000_qctrl); i++)
- qctl_regs[i] = tm6000_qctrl[i].default_value;
-
- dev->mode = TM6000_MODE_ANALOG;
- }
+ dev->mode = TM6000_MODE_ANALOG;
if (!fh->radio) {
videobuf_queue_vmalloc_init(&fh->vb_vidq, &tm6000_video_qops,
@@ -1530,12 +1389,12 @@ static int __tm6000_open(struct file *file)
sizeof(struct tm6000_buffer), fh, &dev->lock);
} else {
dprintk(dev, V4L2_DEBUG_OPEN, "video_open: setting radio device\n");
- dev->input = 5;
tm6000_set_audio_rinput(dev);
v4l2_device_call_all(&dev->v4l2_dev, 0, tuner, s_radio);
tm6000_prepare_isoc(dev);
tm6000_start_thread(dev);
}
+ v4l2_fh_add(&fh->fh);
return 0;
}
@@ -1576,29 +1435,35 @@ tm6000_read(struct file *file, char __user *data, size_t count, loff_t *pos)
static unsigned int
__tm6000_poll(struct file *file, struct poll_table_struct *wait)
{
+ unsigned long req_events = poll_requested_events(wait);
struct tm6000_fh *fh = file->private_data;
struct tm6000_buffer *buf;
+ int res = 0;
+ if (v4l2_event_pending(&fh->fh))
+ res = POLLPRI;
+ else if (req_events & POLLPRI)
+ poll_wait(file, &fh->fh.wait, wait);
if (V4L2_BUF_TYPE_VIDEO_CAPTURE != fh->type)
- return POLLERR;
+ return res | POLLERR;
if (!!is_res_streaming(fh->dev, fh))
- return POLLERR;
+ return res | POLLERR;
if (!is_res_read(fh->dev, fh)) {
/* streaming capture */
if (list_empty(&fh->vb_vidq.stream))
- return POLLERR;
+ return res | POLLERR;
buf = list_entry(fh->vb_vidq.stream.next, struct tm6000_buffer, vb.stream);
- } else {
+ poll_wait(file, &buf->vb.done, wait);
+ if (buf->vb.state == VIDEOBUF_DONE ||
+ buf->vb.state == VIDEOBUF_ERROR)
+ return res | POLLIN | POLLRDNORM;
+ } else if (req_events & (POLLIN | POLLRDNORM)) {
/* read() capture */
- return videobuf_poll_stream(file, &fh->vb_vidq, wait);
+ return res | videobuf_poll_stream(file, &fh->vb_vidq, wait);
}
- poll_wait(file, &buf->vb.done, wait);
- if (buf->vb.state == VIDEOBUF_DONE ||
- buf->vb.state == VIDEOBUF_ERROR)
- return POLLIN | POLLRDNORM;
- return 0;
+ return res;
}
static unsigned int tm6000_poll(struct file *file, struct poll_table_struct *wait)
@@ -1648,7 +1513,8 @@ static int tm6000_release(struct file *file)
if (!fh->radio)
videobuf_mmap_free(&fh->vb_vidq);
}
-
+ v4l2_fh_del(&fh->fh);
+ v4l2_fh_exit(&fh->fh);
kfree(fh);
mutex_unlock(&dev->lock);
@@ -1688,9 +1554,6 @@ static const struct v4l2_ioctl_ops video_ioctl_ops = {
.vidioc_enum_input = vidioc_enum_input,
.vidioc_g_input = vidioc_g_input,
.vidioc_s_input = vidioc_s_input,
- .vidioc_queryctrl = vidioc_queryctrl,
- .vidioc_g_ctrl = vidioc_g_ctrl,
- .vidioc_s_ctrl = vidioc_s_ctrl,
.vidioc_g_tuner = vidioc_g_tuner,
.vidioc_s_tuner = vidioc_s_tuner,
.vidioc_g_frequency = vidioc_g_frequency,
@@ -1701,6 +1564,8 @@ static const struct v4l2_ioctl_ops video_ioctl_ops = {
.vidioc_querybuf = vidioc_querybuf,
.vidioc_qbuf = vidioc_qbuf,
.vidioc_dqbuf = vidioc_dqbuf,
+ .vidioc_subscribe_event = v4l2_ctrl_subscribe_event,
+ .vidioc_unsubscribe_event = v4l2_event_unsubscribe,
};
static struct video_device tm6000_template = {
@@ -1715,25 +1580,19 @@ static struct video_device tm6000_template = {
static const struct v4l2_file_operations radio_fops = {
.owner = THIS_MODULE,
.open = tm6000_open,
+ .poll = v4l2_ctrl_poll,
.release = tm6000_release,
.unlocked_ioctl = video_ioctl2,
};
static const struct v4l2_ioctl_ops radio_ioctl_ops = {
- .vidioc_querycap = radio_querycap,
+ .vidioc_querycap = vidioc_querycap,
.vidioc_g_tuner = radio_g_tuner,
- .vidioc_enum_input = radio_enum_input,
- .vidioc_g_audio = radio_g_audio,
.vidioc_s_tuner = radio_s_tuner,
- .vidioc_s_audio = radio_s_audio,
- .vidioc_s_input = radio_s_input,
- .vidioc_s_std = radio_s_std,
- .vidioc_queryctrl = radio_queryctrl,
- .vidioc_g_input = radio_g_input,
- .vidioc_g_ctrl = vidioc_g_ctrl,
- .vidioc_s_ctrl = vidioc_s_ctrl,
.vidioc_g_frequency = vidioc_g_frequency,
.vidioc_s_frequency = vidioc_s_frequency,
+ .vidioc_subscribe_event = v4l2_ctrl_subscribe_event,
+ .vidioc_unsubscribe_event = v4l2_event_unsubscribe,
};
static struct video_device tm6000_radio_template = {
@@ -1762,6 +1621,7 @@ static struct video_device *vdev_init(struct tm6000_core *dev,
vfd->release = video_device_release;
vfd->debug = tm6000_debug;
vfd->lock = &dev->lock;
+ set_bit(V4L2_FL_USE_FH_PRIO, &vfd->flags);
snprintf(vfd->name, sizeof(vfd->name), "%s %s", dev->name, type_name);
@@ -1771,15 +1631,41 @@ static struct video_device *vdev_init(struct tm6000_core *dev,
int tm6000_v4l2_register(struct tm6000_core *dev)
{
- int ret = -1;
+ int ret = 0;
+
+ v4l2_ctrl_handler_init(&dev->ctrl_handler, 6);
+ v4l2_ctrl_handler_init(&dev->radio_ctrl_handler, 2);
+ v4l2_ctrl_new_std(&dev->radio_ctrl_handler, &tm6000_radio_ctrl_ops,
+ V4L2_CID_AUDIO_MUTE, 0, 1, 1, 0);
+ v4l2_ctrl_new_std(&dev->radio_ctrl_handler, &tm6000_radio_ctrl_ops,
+ V4L2_CID_AUDIO_VOLUME, -15, 15, 1, 0);
+ v4l2_ctrl_new_std(&dev->ctrl_handler, &tm6000_ctrl_ops,
+ V4L2_CID_BRIGHTNESS, 0, 255, 1, 54);
+ v4l2_ctrl_new_std(&dev->ctrl_handler, &tm6000_ctrl_ops,
+ V4L2_CID_CONTRAST, 0, 255, 1, 119);
+ v4l2_ctrl_new_std(&dev->ctrl_handler, &tm6000_ctrl_ops,
+ V4L2_CID_SATURATION, 0, 255, 1, 112);
+ v4l2_ctrl_new_std(&dev->ctrl_handler, &tm6000_ctrl_ops,
+ V4L2_CID_HUE, -128, 127, 1, 0);
+ v4l2_ctrl_add_handler(&dev->ctrl_handler,
+ &dev->radio_ctrl_handler, NULL);
+
+ if (dev->radio_ctrl_handler.error)
+ ret = dev->radio_ctrl_handler.error;
+ if (!ret && dev->ctrl_handler.error)
+ ret = dev->ctrl_handler.error;
+ if (ret)
+ goto free_ctrl;
dev->vfd = vdev_init(dev, &tm6000_template, "video");
if (!dev->vfd) {
printk(KERN_INFO "%s: can't register video device\n",
dev->name);
- return -ENOMEM;
+ ret = -ENOMEM;
+ goto free_ctrl;
}
+ dev->vfd->ctrl_handler = &dev->ctrl_handler;
/* init video dma queues */
INIT_LIST_HEAD(&dev->vidq.active);
@@ -1790,7 +1676,9 @@ int tm6000_v4l2_register(struct tm6000_core *dev)
if (ret < 0) {
printk(KERN_INFO "%s: can't register video device\n",
dev->name);
- return ret;
+ video_device_release(dev->vfd);
+ dev->vfd = NULL;
+ goto free_ctrl;
}
printk(KERN_INFO "%s: registered device %s\n",
@@ -1803,15 +1691,17 @@ int tm6000_v4l2_register(struct tm6000_core *dev)
printk(KERN_INFO "%s: can't register radio device\n",
dev->name);
ret = -ENXIO;
- return ret; /* FIXME release resource */
+ goto unreg_video;
}
+ dev->radio_dev->ctrl_handler = &dev->radio_ctrl_handler;
ret = video_register_device(dev->radio_dev, VFL_TYPE_RADIO,
radio_nr);
if (ret < 0) {
printk(KERN_INFO "%s: can't register radio device\n",
dev->name);
- return ret; /* FIXME release resource */
+ video_device_release(dev->radio_dev);
+ goto unreg_video;
}
printk(KERN_INFO "%s: registered device %s\n",
@@ -1820,12 +1710,22 @@ int tm6000_v4l2_register(struct tm6000_core *dev)
printk(KERN_INFO "Trident TVMaster TM5600/TM6000/TM6010 USB2 board (Load status: %d)\n", ret);
return ret;
+
+unreg_video:
+ video_unregister_device(dev->vfd);
+free_ctrl:
+ v4l2_ctrl_handler_free(&dev->ctrl_handler);
+ v4l2_ctrl_handler_free(&dev->radio_ctrl_handler);
+ return ret;
}
int tm6000_v4l2_unregister(struct tm6000_core *dev)
{
video_unregister_device(dev->vfd);
+ /* if URB buffers are still allocated free them now */
+ tm6000_free_urb_buffers(dev);
+
if (dev->radio_dev) {
if (video_is_registered(dev->radio_dev))
video_unregister_device(dev->radio_dev);
@@ -1851,3 +1751,5 @@ MODULE_PARM_DESC(debug, "activates debug info");
module_param(vid_limit, int, 0644);
MODULE_PARM_DESC(vid_limit, "capture memory limit in megabytes");
+module_param(keep_urb, bool, 0);
+MODULE_PARM_DESC(keep_urb, "Keep urb buffers allocated even when the device is closed by the user");
diff --git a/drivers/media/usb/tm6000/tm6000.h b/drivers/media/usb/tm6000/tm6000.h
index 6df418658c9c..08bd0740dd23 100644
--- a/drivers/media/usb/tm6000/tm6000.h
+++ b/drivers/media/usb/tm6000/tm6000.h
@@ -27,6 +27,8 @@
#include <linux/i2c.h>
#include <linux/mutex.h>
#include <media/v4l2-device.h>
+#include <media/v4l2-ctrls.h>
+#include <media/v4l2-fh.h>
#include <linux/dvb/frontend.h>
#include "dvb_demux.h"
@@ -222,6 +224,8 @@ struct tm6000_core {
struct video_device *radio_dev;
struct tm6000_dmaqueue vidq;
struct v4l2_device v4l2_dev;
+ struct v4l2_ctrl_handler ctrl_handler;
+ struct v4l2_ctrl_handler radio_ctrl_handler;
int input;
struct tm6000_input vinput[3]; /* video input */
@@ -264,6 +268,11 @@ struct tm6000_core {
spinlock_t slock;
+ /* urb dma buffers */
+ char **urb_buffer;
+ dma_addr_t *urb_dma;
+ unsigned int urb_size;
+
unsigned long quirks;
};
@@ -282,6 +291,7 @@ struct tm6000_ops {
};
struct tm6000_fh {
+ struct v4l2_fh fh;
struct tm6000_core *dev;
unsigned int radio;
diff --git a/drivers/media/usb/ttusb-budget/dvb-ttusb-budget.c b/drivers/media/usb/ttusb-budget/dvb-ttusb-budget.c
index 5b682cc4c814..e40718552850 100644
--- a/drivers/media/usb/ttusb-budget/dvb-ttusb-budget.c
+++ b/drivers/media/usb/ttusb-budget/dvb-ttusb-budget.c
@@ -561,6 +561,13 @@ static void ttusb_process_muxpack(struct ttusb *ttusb, const u8 * muxpack,
{
u16 csum = 0, cc;
int i;
+
+ if (len < 4 || len & 0x1) {
+ pr_warn("%s: muxpack has invalid len %d\n", __func__, len);
+ numinvalid++;
+ return;
+ }
+
for (i = 0; i < len; i += 2)
csum ^= le16_to_cpup((__le16 *) (muxpack + i));
if (csum) {
diff --git a/drivers/media/usb/usbvision/usbvision-core.c b/drivers/media/usb/usbvision/usbvision-core.c
index c9b2042f8bdf..816b1cffab7d 100644
--- a/drivers/media/usb/usbvision/usbvision-core.c
+++ b/drivers/media/usb/usbvision/usbvision-core.c
@@ -1169,7 +1169,7 @@ static void usbvision_parse_data(struct usb_usbvision *usbvision)
if (newstate == parse_state_next_frame) {
frame->grabstate = frame_state_done;
- do_gettimeofday(&(frame->timestamp));
+ v4l2_get_timestamp(&(frame->timestamp));
frame->sequence = usbvision->frame_num;
spin_lock_irqsave(&usbvision->queue_lock, lock_flags);
diff --git a/drivers/media/usb/usbvision/usbvision-i2c.c b/drivers/media/usb/usbvision/usbvision-i2c.c
index 89fec029e924..ba262a32bd3a 100644
--- a/drivers/media/usb/usbvision/usbvision-i2c.c
+++ b/drivers/media/usb/usbvision/usbvision-i2c.c
@@ -189,8 +189,7 @@ int usbvision_i2c_register(struct usb_usbvision *usbvision)
if (usbvision->registered_i2c)
return 0;
- memcpy(&usbvision->i2c_adap, &i2c_adap_template,
- sizeof(struct i2c_adapter));
+ usbvision->i2c_adap = i2c_adap_template;
sprintf(usbvision->i2c_adap.name, "%s-%d-%s", i2c_adap_template.name,
usbvision->dev->bus->busnum, usbvision->dev->devpath);
diff --git a/drivers/media/usb/usbvision/usbvision-video.c b/drivers/media/usb/usbvision/usbvision-video.c
index ad7f7448072e..cd1fe78a5532 100644
--- a/drivers/media/usb/usbvision/usbvision-video.c
+++ b/drivers/media/usb/usbvision/usbvision-video.c
@@ -761,7 +761,7 @@ static int vidioc_querybuf(struct file *file,
if (vb->index >= usbvision->num_frames)
return -EINVAL;
/* Updating the corresponding frame state */
- vb->flags = 0;
+ vb->flags = V4L2_BUF_FLAG_TIMESTAMP_MONOTONIC;
frame = &usbvision->frame[vb->index];
if (frame->grabstate >= frame_state_ready)
vb->flags |= V4L2_BUF_FLAG_QUEUED;
@@ -843,7 +843,8 @@ static int vidioc_dqbuf(struct file *file, void *priv, struct v4l2_buffer *vb)
vb->memory = V4L2_MEMORY_MMAP;
vb->flags = V4L2_BUF_FLAG_MAPPED |
V4L2_BUF_FLAG_QUEUED |
- V4L2_BUF_FLAG_DONE;
+ V4L2_BUF_FLAG_DONE |
+ V4L2_BUF_FLAG_TIMESTAMP_MONOTONIC;
vb->index = f->index;
vb->sequence = f->sequence;
vb->timestamp = f->timestamp;
diff --git a/drivers/media/usb/uvc/uvc_ctrl.c b/drivers/media/usb/uvc/uvc_ctrl.c
index d5baab17a5ef..61e28dec991d 100644
--- a/drivers/media/usb/uvc/uvc_ctrl.c
+++ b/drivers/media/usb/uvc/uvc_ctrl.c
@@ -1838,7 +1838,7 @@ static int uvc_ctrl_add_info(struct uvc_device *dev, struct uvc_control *ctrl,
{
int ret = 0;
- memcpy(&ctrl->info, info, sizeof(*info));
+ ctrl->info = *info;
INIT_LIST_HEAD(&ctrl->info.mappings);
/* Allocate an array to save control values (cur, def, max, etc.) */
diff --git a/drivers/media/usb/uvc/uvc_queue.c b/drivers/media/usb/uvc/uvc_queue.c
index 778addc5caff..6c233a54ce40 100644
--- a/drivers/media/usb/uvc/uvc_queue.c
+++ b/drivers/media/usb/uvc/uvc_queue.c
@@ -115,11 +115,27 @@ static int uvc_buffer_finish(struct vb2_buffer *vb)
return 0;
}
+static void uvc_wait_prepare(struct vb2_queue *vq)
+{
+ struct uvc_video_queue *queue = vb2_get_drv_priv(vq);
+
+ mutex_unlock(&queue->mutex);
+}
+
+static void uvc_wait_finish(struct vb2_queue *vq)
+{
+ struct uvc_video_queue *queue = vb2_get_drv_priv(vq);
+
+ mutex_lock(&queue->mutex);
+}
+
static struct vb2_ops uvc_queue_qops = {
.queue_setup = uvc_queue_setup,
.buf_prepare = uvc_buffer_prepare,
.buf_queue = uvc_buffer_queue,
.buf_finish = uvc_buffer_finish,
+ .wait_prepare = uvc_wait_prepare,
+ .wait_finish = uvc_wait_finish,
};
int uvc_queue_init(struct uvc_video_queue *queue, enum v4l2_buf_type type,
diff --git a/drivers/media/usb/uvc/uvc_v4l2.c b/drivers/media/usb/uvc/uvc_v4l2.c
index 68d59b527492..b2dc32623a71 100644
--- a/drivers/media/usb/uvc/uvc_v4l2.c
+++ b/drivers/media/usb/uvc/uvc_v4l2.c
@@ -315,7 +315,7 @@ static int uvc_v4l2_set_format(struct uvc_streaming *stream,
goto done;
}
- memcpy(&stream->ctrl, &probe, sizeof probe);
+ stream->ctrl = probe;
stream->cur_format = format;
stream->cur_frame = frame;
@@ -387,7 +387,7 @@ static int uvc_v4l2_set_streamparm(struct uvc_streaming *stream,
return -EBUSY;
}
- memcpy(&probe, &stream->ctrl, sizeof probe);
+ probe = stream->ctrl;
probe.dwFrameInterval =
uvc_try_frame_interval(stream->cur_frame, interval);
@@ -398,7 +398,7 @@ static int uvc_v4l2_set_streamparm(struct uvc_streaming *stream,
return ret;
}
- memcpy(&stream->ctrl, &probe, sizeof probe);
+ stream->ctrl = probe;
mutex_unlock(&stream->mutex);
/* Return the actual frame period. */
@@ -501,8 +501,8 @@ static int uvc_v4l2_open(struct file *file)
if (atomic_inc_return(&stream->dev->users) == 1) {
ret = uvc_status_start(stream->dev);
if (ret < 0) {
- usb_autopm_put_interface(stream->dev->intf);
atomic_dec(&stream->dev->users);
+ usb_autopm_put_interface(stream->dev->intf);
kfree(handle);
return ret;
}
diff --git a/drivers/media/usb/zr364xx/zr364xx.c b/drivers/media/usb/zr364xx/zr364xx.c
index 39edd4442932..74d56df3347f 100644
--- a/drivers/media/usb/zr364xx/zr364xx.c
+++ b/drivers/media/usb/zr364xx/zr364xx.c
@@ -501,7 +501,6 @@ static void zr364xx_fillbuff(struct zr364xx_camera *cam,
int jpgsize)
{
int pos = 0;
- struct timeval ts;
const char *tmpbuf;
char *vbuf = videobuf_to_vmalloc(&buf->vb);
unsigned long last_frame;
@@ -530,8 +529,7 @@ static void zr364xx_fillbuff(struct zr364xx_camera *cam,
/* tell v4l buffer was filled */
buf->vb.field_count = cam->frame_count * 2;
- do_gettimeofday(&ts);
- buf->vb.ts = ts;
+ v4l2_get_timestamp(&buf->vb.ts);
buf->vb.state = VIDEOBUF_DONE;
}
@@ -559,7 +557,7 @@ static int zr364xx_got_frame(struct zr364xx_camera *cam, int jpgsize)
goto unlock;
}
list_del(&buf->vb.queue);
- do_gettimeofday(&buf->vb.ts);
+ v4l2_get_timestamp(&buf->vb.ts);
DBG("[%p/%d] wakeup\n", buf, buf->vb.i);
zr364xx_fillbuff(cam, buf, jpgsize);
wake_up(&buf->vb.done);
diff --git a/drivers/media/v4l2-core/Kconfig b/drivers/media/v4l2-core/Kconfig
index 65875c3aba1b..976d029e9925 100644
--- a/drivers/media/v4l2-core/Kconfig
+++ b/drivers/media/v4l2-core/Kconfig
@@ -82,3 +82,14 @@ config VIDEOBUF2_DMA_SG
#depends on HAS_DMA
select VIDEOBUF2_CORE
select VIDEOBUF2_MEMOPS
+
+config VIDEO_V4L2_INT_DEVICE
+ tristate "V4L2 int device (DEPRECATED)"
+ depends on VIDEO_V4L2
+ ---help---
+ An early framework for a hardware-independent interface for
+ image sensors and bridges etc. Currently used by omap24xxcam and
+ tcm825x drivers that should be converted to V4L2 subdev.
+
+ Do not use for new developments.
+
diff --git a/drivers/media/v4l2-core/Makefile b/drivers/media/v4l2-core/Makefile
index c2d61d4f03d1..a9d355230e8e 100644
--- a/drivers/media/v4l2-core/Makefile
+++ b/drivers/media/v4l2-core/Makefile
@@ -10,7 +10,8 @@ ifeq ($(CONFIG_COMPAT),y)
videodev-objs += v4l2-compat-ioctl32.o
endif
-obj-$(CONFIG_VIDEO_DEV) += videodev.o v4l2-int-device.o
+obj-$(CONFIG_VIDEO_DEV) += videodev.o
+obj-$(CONFIG_VIDEO_V4L2_INT_DEVICE) += v4l2-int-device.o
obj-$(CONFIG_VIDEO_V4L2) += v4l2-common.o
obj-$(CONFIG_VIDEO_TUNER) += tuner.o
diff --git a/drivers/media/v4l2-core/tuner-core.c b/drivers/media/v4l2-core/tuner-core.c
index b5a819af2b8c..b5a8aac2e126 100644
--- a/drivers/media/v4l2-core/tuner-core.c
+++ b/drivers/media/v4l2-core/tuner-core.c
@@ -1013,6 +1013,11 @@ static void set_radio_freq(struct i2c_client *c, unsigned int freq)
t->standby = false;
analog_ops->set_params(&t->fe, &params);
+ /*
+ * The tuner driver might decide to change the audmode if it only
+ * supports stereo, so update t->audmode.
+ */
+ t->audmode = params.audmode;
}
/*
@@ -1235,8 +1240,18 @@ static int tuner_s_tuner(struct v4l2_subdev *sd, struct v4l2_tuner *vt)
if (set_mode(t, vt->type))
return 0;
- if (t->mode == V4L2_TUNER_RADIO)
+ if (t->mode == V4L2_TUNER_RADIO) {
t->audmode = vt->audmode;
+ /*
+ * For radio audmode can only be mono or stereo. Map any
+ * other values to stereo. The actual tuner driver that is
+ * called in set_radio_freq can decide to limit the audmode to
+ * mono if only mono is supported.
+ */
+ if (t->audmode != V4L2_TUNER_MODE_MONO &&
+ t->audmode != V4L2_TUNER_MODE_STEREO)
+ t->audmode = V4L2_TUNER_MODE_STEREO;
+ }
set_freq(t, 0);
return 0;
diff --git a/drivers/media/v4l2-core/v4l2-common.c b/drivers/media/v4l2-core/v4l2-common.c
index 380ddd89fa4c..aa044f491666 100644
--- a/drivers/media/v4l2-core/v4l2-common.c
+++ b/drivers/media/v4l2-core/v4l2-common.c
@@ -238,7 +238,7 @@ int v4l2_chip_match_host(const struct v4l2_dbg_match *match)
}
EXPORT_SYMBOL(v4l2_chip_match_host);
-#if defined(CONFIG_I2C) || (defined(CONFIG_I2C_MODULE) && defined(MODULE))
+#if IS_ENABLED(CONFIG_I2C)
int v4l2_chip_match_i2c_client(struct i2c_client *c, const struct v4l2_dbg_match *match)
{
int len;
@@ -384,7 +384,7 @@ EXPORT_SYMBOL_GPL(v4l2_i2c_subdev_addr);
const unsigned short *v4l2_i2c_tuner_addrs(enum v4l2_i2c_tuner_type type)
{
static const unsigned short radio_addrs[] = {
-#if defined(CONFIG_MEDIA_TUNER_TEA5761) || defined(CONFIG_MEDIA_TUNER_TEA5761_MODULE)
+#if IS_ENABLED(CONFIG_MEDIA_TUNER_TEA5761)
0x10,
#endif
0x60,
@@ -978,3 +978,13 @@ const struct v4l2_frmsize_discrete *v4l2_find_nearest_format(
return best;
}
EXPORT_SYMBOL_GPL(v4l2_find_nearest_format);
+
+void v4l2_get_timestamp(struct timeval *tv)
+{
+ struct timespec ts;
+
+ ktime_get_ts(&ts);
+ tv->tv_sec = ts.tv_sec;
+ tv->tv_usec = ts.tv_nsec / NSEC_PER_USEC;
+}
+EXPORT_SYMBOL_GPL(v4l2_get_timestamp);
diff --git a/drivers/media/v4l2-core/v4l2-ctrls.c b/drivers/media/v4l2-core/v4l2-ctrls.c
index f6ee201d9347..6b28b5800500 100644
--- a/drivers/media/v4l2-core/v4l2-ctrls.c
+++ b/drivers/media/v4l2-core/v4l2-ctrls.c
@@ -577,8 +577,6 @@ const char *v4l2_ctrl_get_name(u32 id)
case V4L2_CID_GAIN: return "Gain";
case V4L2_CID_HFLIP: return "Horizontal Flip";
case V4L2_CID_VFLIP: return "Vertical Flip";
- case V4L2_CID_HCENTER: return "Horizontal Center";
- case V4L2_CID_VCENTER: return "Vertical Center";
case V4L2_CID_POWER_LINE_FREQUENCY: return "Power Line Frequency";
case V4L2_CID_HUE_AUTO: return "Hue, Automatic";
case V4L2_CID_WHITE_BALANCE_TEMPERATURE: return "White Balance Temperature";
@@ -1160,8 +1158,7 @@ static int new_to_user(struct v4l2_ext_control *c,
}
/* Copy the new value to the current value. */
-static void new_to_cur(struct v4l2_fh *fh, struct v4l2_ctrl *ctrl,
- bool update_inactive)
+static void new_to_cur(struct v4l2_fh *fh, struct v4l2_ctrl *ctrl, u32 ch_flags)
{
bool changed = false;
@@ -1185,8 +1182,8 @@ static void new_to_cur(struct v4l2_fh *fh, struct v4l2_ctrl *ctrl,
ctrl->cur.val = ctrl->val;
break;
}
- if (update_inactive) {
- /* Note: update_inactive can only be true for auto clusters. */
+ if (ch_flags & V4L2_EVENT_CTRL_CH_FLAGS) {
+ /* Note: CH_FLAGS is only set for auto clusters. */
ctrl->flags &=
~(V4L2_CTRL_FLAG_INACTIVE | V4L2_CTRL_FLAG_VOLATILE);
if (!is_cur_manual(ctrl->cluster[0])) {
@@ -1196,14 +1193,15 @@ static void new_to_cur(struct v4l2_fh *fh, struct v4l2_ctrl *ctrl,
}
fh = NULL;
}
- if (changed || update_inactive) {
+ if (changed || ch_flags) {
/* If a control was changed that was not one of the controls
modified by the application, then send the event to all. */
if (!ctrl->is_new)
fh = NULL;
send_event(fh, ctrl,
- (changed ? V4L2_EVENT_CTRL_CH_VALUE : 0) |
- (update_inactive ? V4L2_EVENT_CTRL_CH_FLAGS : 0));
+ (changed ? V4L2_EVENT_CTRL_CH_VALUE : 0) | ch_flags);
+ if (ctrl->call_notify && changed && ctrl->handler->notify)
+ ctrl->handler->notify(ctrl, ctrl->handler->notify_priv);
}
}
@@ -1257,6 +1255,41 @@ static int cluster_changed(struct v4l2_ctrl *master)
return diff;
}
+/* Control range checking */
+static int check_range(enum v4l2_ctrl_type type,
+ s32 min, s32 max, u32 step, s32 def)
+{
+ switch (type) {
+ case V4L2_CTRL_TYPE_BOOLEAN:
+ if (step != 1 || max > 1 || min < 0)
+ return -ERANGE;
+ /* fall through */
+ case V4L2_CTRL_TYPE_INTEGER:
+ if (step <= 0 || min > max || def < min || def > max)
+ return -ERANGE;
+ return 0;
+ case V4L2_CTRL_TYPE_BITMASK:
+ if (step || min || !max || (def & ~max))
+ return -ERANGE;
+ return 0;
+ case V4L2_CTRL_TYPE_MENU:
+ case V4L2_CTRL_TYPE_INTEGER_MENU:
+ if (min > max || def < min || def > max)
+ return -ERANGE;
+ /* Note: step == menu_skip_mask for menu controls.
+ So here we check if the default value is masked out. */
+ if (step && ((1 << def) & step))
+ return -EINVAL;
+ return 0;
+ case V4L2_CTRL_TYPE_STRING:
+ if (min > max || min < 0 || step < 1 || def)
+ return -ERANGE;
+ return 0;
+ default:
+ return 0;
+ }
+}
+
/* Validate a new control */
static int validate_new(const struct v4l2_ctrl *ctrl,
struct v4l2_ext_control *c)
@@ -1529,30 +1562,21 @@ static struct v4l2_ctrl *v4l2_ctrl_new(struct v4l2_ctrl_handler *hdl,
{
struct v4l2_ctrl *ctrl;
unsigned sz_extra = 0;
+ int err;
if (hdl->error)
return NULL;
/* Sanity checks */
if (id == 0 || name == NULL || id >= V4L2_CID_PRIVATE_BASE ||
- (type == V4L2_CTRL_TYPE_INTEGER && step == 0) ||
- (type == V4L2_CTRL_TYPE_BITMASK && max == 0) ||
(type == V4L2_CTRL_TYPE_MENU && qmenu == NULL) ||
- (type == V4L2_CTRL_TYPE_INTEGER_MENU && qmenu_int == NULL) ||
- (type == V4L2_CTRL_TYPE_STRING && max == 0)) {
- handler_set_err(hdl, -ERANGE);
- return NULL;
- }
- if (type != V4L2_CTRL_TYPE_BITMASK && max < min) {
+ (type == V4L2_CTRL_TYPE_INTEGER_MENU && qmenu_int == NULL)) {
handler_set_err(hdl, -ERANGE);
return NULL;
}
- if ((type == V4L2_CTRL_TYPE_INTEGER ||
- type == V4L2_CTRL_TYPE_MENU ||
- type == V4L2_CTRL_TYPE_INTEGER_MENU ||
- type == V4L2_CTRL_TYPE_BOOLEAN) &&
- (def < min || def > max)) {
- handler_set_err(hdl, -ERANGE);
+ err = check_range(type, min, max, step, def);
+ if (err) {
+ handler_set_err(hdl, err);
return NULL;
}
if (type == V4L2_CTRL_TYPE_BITMASK && ((def & ~max) || min || step)) {
@@ -1980,6 +2004,13 @@ void v4l2_ctrl_handler_log_status(struct v4l2_ctrl_handler *hdl,
}
EXPORT_SYMBOL(v4l2_ctrl_handler_log_status);
+int v4l2_ctrl_subdev_log_status(struct v4l2_subdev *sd)
+{
+ v4l2_ctrl_handler_log_status(sd->ctrl_handler, sd->name);
+ return 0;
+}
+EXPORT_SYMBOL(v4l2_ctrl_subdev_log_status);
+
/* Call s_ctrl for all controls owned by the handler */
int v4l2_ctrl_handler_setup(struct v4l2_ctrl_handler *hdl)
{
@@ -2426,8 +2457,8 @@ EXPORT_SYMBOL(v4l2_ctrl_g_ctrl_int64);
/* Core function that calls try/s_ctrl and ensures that the new value is
copied to the current value on a set.
Must be called with ctrl->handler->lock held. */
-static int try_or_set_cluster(struct v4l2_fh *fh,
- struct v4l2_ctrl *master, bool set)
+static int try_or_set_cluster(struct v4l2_fh *fh, struct v4l2_ctrl *master,
+ bool set, u32 ch_flags)
{
bool update_flag;
int ret;
@@ -2465,7 +2496,8 @@ static int try_or_set_cluster(struct v4l2_fh *fh,
/* If OK, then make the new values permanent. */
update_flag = is_cur_manual(master) != is_new_manual(master);
for (i = 0; i < master->ncontrols; i++)
- new_to_cur(fh, master->cluster[i], update_flag && i > 0);
+ new_to_cur(fh, master->cluster[i], ch_flags |
+ ((update_flag && i > 0) ? V4L2_EVENT_CTRL_CH_FLAGS : 0));
return 0;
}
@@ -2592,7 +2624,7 @@ static int try_set_ext_ctrls(struct v4l2_fh *fh, struct v4l2_ctrl_handler *hdl,
} while (!ret && idx);
if (!ret)
- ret = try_or_set_cluster(fh, master, set);
+ ret = try_or_set_cluster(fh, master, set, 0);
/* Copy the new values back to userspace. */
if (!ret) {
@@ -2638,10 +2670,9 @@ EXPORT_SYMBOL(v4l2_subdev_s_ext_ctrls);
/* Helper function for VIDIOC_S_CTRL compatibility */
static int set_ctrl(struct v4l2_fh *fh, struct v4l2_ctrl *ctrl,
- struct v4l2_ext_control *c)
+ struct v4l2_ext_control *c, u32 ch_flags)
{
struct v4l2_ctrl *master = ctrl->cluster[0];
- int ret;
int i;
/* String controls are not supported. The user_to_new() and
@@ -2651,12 +2682,6 @@ static int set_ctrl(struct v4l2_fh *fh, struct v4l2_ctrl *ctrl,
if (ctrl->type == V4L2_CTRL_TYPE_STRING)
return -EINVAL;
- ret = validate_new(ctrl, c);
- if (ret)
- return ret;
-
- v4l2_ctrl_lock(ctrl);
-
/* Reset the 'is_new' flags of the cluster */
for (i = 0; i < master->ncontrols; i++)
if (master->cluster[i])
@@ -2670,10 +2695,22 @@ static int set_ctrl(struct v4l2_fh *fh, struct v4l2_ctrl *ctrl,
update_from_auto_cluster(master);
user_to_new(c, ctrl);
- ret = try_or_set_cluster(fh, master, true);
- cur_to_user(c, ctrl);
+ return try_or_set_cluster(fh, master, true, ch_flags);
+}
- v4l2_ctrl_unlock(ctrl);
+/* Helper function for VIDIOC_S_CTRL compatibility */
+static int set_ctrl_lock(struct v4l2_fh *fh, struct v4l2_ctrl *ctrl,
+ struct v4l2_ext_control *c)
+{
+ int ret = validate_new(ctrl, c);
+
+ if (!ret) {
+ v4l2_ctrl_lock(ctrl);
+ ret = set_ctrl(fh, ctrl, c, 0);
+ if (!ret)
+ cur_to_user(c, ctrl);
+ v4l2_ctrl_unlock(ctrl);
+ }
return ret;
}
@@ -2691,7 +2728,7 @@ int v4l2_s_ctrl(struct v4l2_fh *fh, struct v4l2_ctrl_handler *hdl,
return -EACCES;
c.value = control->value;
- ret = set_ctrl(fh, ctrl, &c);
+ ret = set_ctrl_lock(fh, ctrl, &c);
control->value = c.value;
return ret;
}
@@ -2710,7 +2747,7 @@ int v4l2_ctrl_s_ctrl(struct v4l2_ctrl *ctrl, s32 val)
/* It's a driver bug if this happens. */
WARN_ON(!type_is_int(ctrl));
c.value = val;
- return set_ctrl(NULL, ctrl, &c);
+ return set_ctrl_lock(NULL, ctrl, &c);
}
EXPORT_SYMBOL(v4l2_ctrl_s_ctrl);
@@ -2721,10 +2758,61 @@ int v4l2_ctrl_s_ctrl_int64(struct v4l2_ctrl *ctrl, s64 val)
/* It's a driver bug if this happens. */
WARN_ON(ctrl->type != V4L2_CTRL_TYPE_INTEGER64);
c.value64 = val;
- return set_ctrl(NULL, ctrl, &c);
+ return set_ctrl_lock(NULL, ctrl, &c);
}
EXPORT_SYMBOL(v4l2_ctrl_s_ctrl_int64);
+void v4l2_ctrl_notify(struct v4l2_ctrl *ctrl, v4l2_ctrl_notify_fnc notify, void *priv)
+{
+ if (ctrl == NULL)
+ return;
+ if (notify == NULL) {
+ ctrl->call_notify = 0;
+ return;
+ }
+ if (WARN_ON(ctrl->handler->notify && ctrl->handler->notify != notify))
+ return;
+ ctrl->handler->notify = notify;
+ ctrl->handler->notify_priv = priv;
+ ctrl->call_notify = 1;
+}
+EXPORT_SYMBOL(v4l2_ctrl_notify);
+
+int v4l2_ctrl_modify_range(struct v4l2_ctrl *ctrl,
+ s32 min, s32 max, u32 step, s32 def)
+{
+ int ret = check_range(ctrl->type, min, max, step, def);
+ struct v4l2_ext_control c;
+
+ switch (ctrl->type) {
+ case V4L2_CTRL_TYPE_INTEGER:
+ case V4L2_CTRL_TYPE_BOOLEAN:
+ case V4L2_CTRL_TYPE_MENU:
+ case V4L2_CTRL_TYPE_INTEGER_MENU:
+ case V4L2_CTRL_TYPE_BITMASK:
+ if (ret)
+ return ret;
+ break;
+ default:
+ return -EINVAL;
+ }
+ v4l2_ctrl_lock(ctrl);
+ ctrl->minimum = min;
+ ctrl->maximum = max;
+ ctrl->step = step;
+ ctrl->default_value = def;
+ c.value = ctrl->cur.val;
+ if (validate_new(ctrl, &c))
+ c.value = def;
+ if (c.value != ctrl->cur.val)
+ ret = set_ctrl(NULL, ctrl, &c, V4L2_EVENT_CTRL_CH_RANGE);
+ else
+ send_event(NULL, ctrl, V4L2_EVENT_CTRL_CH_RANGE);
+ v4l2_ctrl_unlock(ctrl);
+ return ret;
+}
+EXPORT_SYMBOL(v4l2_ctrl_modify_range);
+
static int v4l2_ctrl_add_event(struct v4l2_subscribed_event *sev, unsigned elems)
{
struct v4l2_ctrl *ctrl = v4l2_ctrl_find(sev->fh->ctrl_handler, sev->id);
@@ -2804,6 +2892,15 @@ int v4l2_ctrl_subscribe_event(struct v4l2_fh *fh,
}
EXPORT_SYMBOL(v4l2_ctrl_subscribe_event);
+int v4l2_ctrl_subdev_subscribe_event(struct v4l2_subdev *sd, struct v4l2_fh *fh,
+ struct v4l2_event_subscription *sub)
+{
+ if (!sd->ctrl_handler)
+ return -EINVAL;
+ return v4l2_ctrl_subscribe_event(fh, sub);
+}
+EXPORT_SYMBOL(v4l2_ctrl_subdev_subscribe_event);
+
unsigned int v4l2_ctrl_poll(struct file *file, struct poll_table_struct *wait)
{
struct v4l2_fh *fh = file->private_data;
diff --git a/drivers/media/v4l2-core/v4l2-dev.c b/drivers/media/v4l2-core/v4l2-dev.c
index 98dcad9c8a3b..de1e9ab7db99 100644
--- a/drivers/media/v4l2-core/v4l2-dev.c
+++ b/drivers/media/v4l2-core/v4l2-dev.c
@@ -222,7 +222,7 @@ static struct class video_class = {
struct video_device *video_devdata(struct file *file)
{
- return video_device[iminor(file->f_path.dentry->d_inode)];
+ return video_device[iminor(file_inode(file))];
}
EXPORT_SYMBOL(video_devdata);
@@ -568,11 +568,6 @@ static void determine_valid_ioctls(struct video_device *vdev)
if (ops->vidioc_s_priority ||
test_bit(V4L2_FL_USE_FH_PRIO, &vdev->flags))
set_bit(_IOC_NR(VIDIOC_S_PRIORITY), valid_ioctls);
- SET_VALID_IOCTL(ops, VIDIOC_REQBUFS, vidioc_reqbufs);
- SET_VALID_IOCTL(ops, VIDIOC_QUERYBUF, vidioc_querybuf);
- SET_VALID_IOCTL(ops, VIDIOC_QBUF, vidioc_qbuf);
- SET_VALID_IOCTL(ops, VIDIOC_EXPBUF, vidioc_expbuf);
- SET_VALID_IOCTL(ops, VIDIOC_DQBUF, vidioc_dqbuf);
SET_VALID_IOCTL(ops, VIDIOC_STREAMON, vidioc_streamon);
SET_VALID_IOCTL(ops, VIDIOC_STREAMOFF, vidioc_streamoff);
/* Note: the control handler can also be passed through the filehandle,
@@ -605,8 +600,6 @@ static void determine_valid_ioctls(struct video_device *vdev)
SET_VALID_IOCTL(ops, VIDIOC_DQEVENT, vidioc_subscribe_event);
SET_VALID_IOCTL(ops, VIDIOC_SUBSCRIBE_EVENT, vidioc_subscribe_event);
SET_VALID_IOCTL(ops, VIDIOC_UNSUBSCRIBE_EVENT, vidioc_unsubscribe_event);
- SET_VALID_IOCTL(ops, VIDIOC_CREATE_BUFS, vidioc_create_bufs);
- SET_VALID_IOCTL(ops, VIDIOC_PREPARE_BUF, vidioc_prepare_buf);
if (ops->vidioc_enum_freq_bands || ops->vidioc_g_tuner || ops->vidioc_g_modulator)
set_bit(_IOC_NR(VIDIOC_ENUM_FREQ_BANDS), valid_ioctls);
@@ -672,6 +665,13 @@ static void determine_valid_ioctls(struct video_device *vdev)
}
if (!is_radio) {
/* ioctls valid for video or vbi */
+ SET_VALID_IOCTL(ops, VIDIOC_REQBUFS, vidioc_reqbufs);
+ SET_VALID_IOCTL(ops, VIDIOC_QUERYBUF, vidioc_querybuf);
+ SET_VALID_IOCTL(ops, VIDIOC_QBUF, vidioc_qbuf);
+ SET_VALID_IOCTL(ops, VIDIOC_EXPBUF, vidioc_expbuf);
+ SET_VALID_IOCTL(ops, VIDIOC_DQBUF, vidioc_dqbuf);
+ SET_VALID_IOCTL(ops, VIDIOC_CREATE_BUFS, vidioc_create_bufs);
+ SET_VALID_IOCTL(ops, VIDIOC_PREPARE_BUF, vidioc_prepare_buf);
if (ops->vidioc_s_std)
set_bit(_IOC_NR(VIDIOC_ENUMSTD), valid_ioctls);
if (ops->vidioc_g_std || vdev->current_norm)
diff --git a/drivers/media/v4l2-core/v4l2-device.c b/drivers/media/v4l2-core/v4l2-device.c
index 513969fa695d..8ed5da2170bf 100644
--- a/drivers/media/v4l2-core/v4l2-device.c
+++ b/drivers/media/v4l2-core/v4l2-device.c
@@ -112,7 +112,7 @@ void v4l2_device_unregister(struct v4l2_device *v4l2_dev)
/* Unregister subdevs */
list_for_each_entry_safe(sd, next, &v4l2_dev->subdevs, list) {
v4l2_device_unregister_subdev(sd);
-#if defined(CONFIG_I2C) || (defined(CONFIG_I2C_MODULE) && defined(MODULE))
+#if IS_ENABLED(CONFIG_I2C)
if (sd->flags & V4L2_SUBDEV_FL_IS_I2C) {
struct i2c_client *client = v4l2_get_subdevdata(sd);
@@ -159,31 +159,21 @@ int v4l2_device_register_subdev(struct v4l2_device *v4l2_dev,
sd->v4l2_dev = v4l2_dev;
if (sd->internal_ops && sd->internal_ops->registered) {
err = sd->internal_ops->registered(sd);
- if (err) {
- module_put(sd->owner);
- return err;
- }
+ if (err)
+ goto error_module;
}
/* This just returns 0 if either of the two args is NULL */
err = v4l2_ctrl_add_handler(v4l2_dev->ctrl_handler, sd->ctrl_handler, NULL);
- if (err) {
- if (sd->internal_ops && sd->internal_ops->unregistered)
- sd->internal_ops->unregistered(sd);
- module_put(sd->owner);
- return err;
- }
+ if (err)
+ goto error_unregister;
#if defined(CONFIG_MEDIA_CONTROLLER)
/* Register the entity. */
if (v4l2_dev->mdev) {
err = media_device_register_entity(v4l2_dev->mdev, entity);
- if (err < 0) {
- if (sd->internal_ops && sd->internal_ops->unregistered)
- sd->internal_ops->unregistered(sd);
- module_put(sd->owner);
- return err;
- }
+ if (err < 0)
+ goto error_unregister;
}
#endif
@@ -192,6 +182,14 @@ int v4l2_device_register_subdev(struct v4l2_device *v4l2_dev,
spin_unlock(&v4l2_dev->lock);
return 0;
+
+error_unregister:
+ if (sd->internal_ops && sd->internal_ops->unregistered)
+ sd->internal_ops->unregistered(sd);
+error_module:
+ module_put(sd->owner);
+ sd->v4l2_dev = NULL;
+ return err;
}
EXPORT_SYMBOL_GPL(v4l2_device_register_subdev);
diff --git a/drivers/media/v4l2-core/v4l2-event.c b/drivers/media/v4l2-core/v4l2-event.c
index c72009218152..86dcb5483c42 100644
--- a/drivers/media/v4l2-core/v4l2-event.c
+++ b/drivers/media/v4l2-core/v4l2-event.c
@@ -311,3 +311,10 @@ int v4l2_event_unsubscribe(struct v4l2_fh *fh,
return 0;
}
EXPORT_SYMBOL_GPL(v4l2_event_unsubscribe);
+
+int v4l2_event_subdev_unsubscribe(struct v4l2_subdev *sd, struct v4l2_fh *fh,
+ struct v4l2_event_subscription *sub)
+{
+ return v4l2_event_unsubscribe(fh, sub);
+}
+EXPORT_SYMBOL_GPL(v4l2_event_subdev_unsubscribe);
diff --git a/drivers/media/v4l2-core/v4l2-mem2mem.c b/drivers/media/v4l2-core/v4l2-mem2mem.c
index 438ea45d1074..da99cf727162 100644
--- a/drivers/media/v4l2-core/v4l2-mem2mem.c
+++ b/drivers/media/v4l2-core/v4l2-mem2mem.c
@@ -62,7 +62,7 @@ struct v4l2_m2m_dev {
struct list_head job_queue;
spinlock_t job_spinlock;
- struct v4l2_m2m_ops *m2m_ops;
+ const struct v4l2_m2m_ops *m2m_ops;
};
static struct v4l2_m2m_queue_ctx *get_queue_ctx(struct v4l2_m2m_ctx *m2m_ctx,
@@ -519,7 +519,7 @@ EXPORT_SYMBOL(v4l2_m2m_mmap);
*
* Usually called from driver's probe() function.
*/
-struct v4l2_m2m_dev *v4l2_m2m_init(struct v4l2_m2m_ops *m2m_ops)
+struct v4l2_m2m_dev *v4l2_m2m_init(const struct v4l2_m2m_ops *m2m_ops)
{
struct v4l2_m2m_dev *m2m_dev;
diff --git a/drivers/media/v4l2-core/videobuf-core.c b/drivers/media/v4l2-core/videobuf-core.c
index 5449e8aa984a..fb5ee5dd8fe9 100644
--- a/drivers/media/v4l2-core/videobuf-core.c
+++ b/drivers/media/v4l2-core/videobuf-core.c
@@ -340,7 +340,7 @@ static void videobuf_status(struct videobuf_queue *q, struct v4l2_buffer *b,
break;
}
- b->flags = 0;
+ b->flags = V4L2_BUF_FLAG_TIMESTAMP_MONOTONIC;
if (vb->map)
b->flags |= V4L2_BUF_FLAG_MAPPED;
diff --git a/drivers/media/v4l2-core/videobuf2-core.c b/drivers/media/v4l2-core/videobuf2-core.c
index e02c4797b1c6..db1235dcb328 100644
--- a/drivers/media/v4l2-core/videobuf2-core.c
+++ b/drivers/media/v4l2-core/videobuf2-core.c
@@ -40,9 +40,10 @@ module_param(debug, int, 0644);
#define call_qop(q, op, args...) \
(((q)->ops->op) ? ((q)->ops->op(args)) : 0)
-#define V4L2_BUFFER_STATE_FLAGS (V4L2_BUF_FLAG_MAPPED | V4L2_BUF_FLAG_QUEUED | \
+#define V4L2_BUFFER_MASK_FLAGS (V4L2_BUF_FLAG_MAPPED | V4L2_BUF_FLAG_QUEUED | \
V4L2_BUF_FLAG_DONE | V4L2_BUF_FLAG_ERROR | \
- V4L2_BUF_FLAG_PREPARED)
+ V4L2_BUF_FLAG_PREPARED | \
+ V4L2_BUF_FLAG_TIMESTAMP_MASK)
/**
* __vb2_buf_mem_alloc() - allocate video memory for the given buffer
@@ -401,7 +402,8 @@ static void __fill_v4l2_buffer(struct vb2_buffer *vb, struct v4l2_buffer *b)
/*
* Clear any buffer state related flags.
*/
- b->flags &= ~V4L2_BUFFER_STATE_FLAGS;
+ b->flags &= ~V4L2_BUFFER_MASK_FLAGS;
+ b->flags |= V4L2_BUF_FLAG_TIMESTAMP_MONOTONIC;
switch (vb->state) {
case VB2_BUF_STATE_QUEUED:
@@ -941,7 +943,7 @@ static void __fill_vb2_buffer(struct vb2_buffer *vb, const struct v4l2_buffer *b
vb->v4l2_buf.field = b->field;
vb->v4l2_buf.timestamp = b->timestamp;
- vb->v4l2_buf.flags = b->flags & ~V4L2_BUFFER_STATE_FLAGS;
+ vb->v4l2_buf.flags = b->flags & ~V4L2_BUFFER_MASK_FLAGS;
}
/**
@@ -1963,6 +1965,11 @@ unsigned int vb2_poll(struct vb2_queue *q, struct file *file, poll_table *wait)
poll_wait(file, &fh->wait, wait);
}
+ if (!V4L2_TYPE_IS_OUTPUT(q->type) && !(req_events & (POLLIN | POLLRDNORM)))
+ return res;
+ if (V4L2_TYPE_IS_OUTPUT(q->type) && !(req_events & (POLLOUT | POLLWRNORM)))
+ return res;
+
/*
* Start file I/O emulator only if streaming API has not been used yet.
*/
diff --git a/drivers/memstick/core/memstick.c b/drivers/memstick/core/memstick.c
index 56ff19cdc2ad..ffcb10ac4341 100644
--- a/drivers/memstick/core/memstick.c
+++ b/drivers/memstick/core/memstick.c
@@ -512,18 +512,17 @@ int memstick_add_host(struct memstick_host *host)
{
int rc;
- while (1) {
- if (!idr_pre_get(&memstick_host_idr, GFP_KERNEL))
- return -ENOMEM;
+ idr_preload(GFP_KERNEL);
+ spin_lock(&memstick_host_lock);
- spin_lock(&memstick_host_lock);
- rc = idr_get_new(&memstick_host_idr, host, &host->id);
- spin_unlock(&memstick_host_lock);
- if (!rc)
- break;
- else if (rc != -EAGAIN)
- return rc;
- }
+ rc = idr_alloc(&memstick_host_idr, host, 0, 0, GFP_NOWAIT);
+ if (rc >= 0)
+ host->id = rc;
+
+ spin_unlock(&memstick_host_lock);
+ idr_preload_end();
+ if (rc < 0)
+ return rc;
dev_set_name(&host->dev, "memstick%u", host->id);
diff --git a/drivers/memstick/core/mspro_block.c b/drivers/memstick/core/mspro_block.c
index 9729b92fbfdd..f12b78dbce04 100644
--- a/drivers/memstick/core/mspro_block.c
+++ b/drivers/memstick/core/mspro_block.c
@@ -1213,21 +1213,10 @@ static int mspro_block_init_disk(struct memstick_dev *card)
msb->page_size = be16_to_cpu(sys_info->unit_size);
mutex_lock(&mspro_block_disk_lock);
- if (!idr_pre_get(&mspro_block_disk_idr, GFP_KERNEL)) {
- mutex_unlock(&mspro_block_disk_lock);
- return -ENOMEM;
- }
-
- rc = idr_get_new(&mspro_block_disk_idr, card, &disk_id);
+ disk_id = idr_alloc(&mspro_block_disk_idr, card, 0, 256, GFP_KERNEL);
mutex_unlock(&mspro_block_disk_lock);
-
- if (rc)
- return rc;
-
- if ((disk_id << MSPRO_BLOCK_PART_SHIFT) > 255) {
- rc = -ENOSPC;
- goto out_release_id;
- }
+ if (disk_id < 0)
+ return disk_id;
msb->disk = alloc_disk(1 << MSPRO_BLOCK_PART_SHIFT);
if (!msb->disk) {
diff --git a/drivers/memstick/host/r592.c b/drivers/memstick/host/r592.c
index 29b2172ae18f..a7c5b31c0d50 100644
--- a/drivers/memstick/host/r592.c
+++ b/drivers/memstick/host/r592.c
@@ -454,7 +454,7 @@ static int r592_transfer_fifo_pio(struct r592_device *dev)
/* Executes one TPC (data is read/written from small or large fifo) */
static void r592_execute_tpc(struct r592_device *dev)
{
- bool is_write = dev->req->tpc >= MS_TPC_SET_RW_REG_ADRS;
+ bool is_write;
int len, error;
u32 status, reg;
@@ -463,6 +463,7 @@ static void r592_execute_tpc(struct r592_device *dev)
return;
}
+ is_write = dev->req->tpc >= MS_TPC_SET_RW_REG_ADRS;
len = dev->req->long_data ?
dev->req->sg.length : dev->req->data_len;
diff --git a/drivers/memstick/host/rtsx_pci_ms.c b/drivers/memstick/host/rtsx_pci_ms.c
index f5ddb82dadb7..64a779c58a74 100644
--- a/drivers/memstick/host/rtsx_pci_ms.c
+++ b/drivers/memstick/host/rtsx_pci_ms.c
@@ -426,6 +426,9 @@ static void rtsx_pci_ms_request(struct memstick_host *msh)
dev_dbg(ms_dev(host), "--> %s\n", __func__);
+ if (rtsx_pci_card_exclusive_check(host->pcr, RTSX_MS_CARD))
+ return;
+
schedule_work(&host->handle_req);
}
@@ -441,6 +444,10 @@ static int rtsx_pci_ms_set_param(struct memstick_host *msh,
dev_dbg(ms_dev(host), "%s: param = %d, value = %d\n",
__func__, param, value);
+ err = rtsx_pci_card_exclusive_check(host->pcr, RTSX_MS_CARD);
+ if (err)
+ return err;
+
switch (param) {
case MEMSTICK_POWER:
if (value == MEMSTICK_POWER_ON)
diff --git a/drivers/mfd/88pm800.c b/drivers/mfd/88pm800.c
index 391e23e6a647..582bda543520 100644
--- a/drivers/mfd/88pm800.c
+++ b/drivers/mfd/88pm800.c
@@ -531,7 +531,7 @@ static int pm800_probe(struct i2c_client *client,
ret = device_800_init(chip, pdata);
if (ret) {
dev_err(chip->dev, "%s id 0x%x failed!\n", __func__, chip->id);
- goto err_800_init;
+ goto err_subchip_alloc;
}
ret = pm800_pages_init(chip);
@@ -546,10 +546,8 @@ static int pm800_probe(struct i2c_client *client,
err_page_init:
mfd_remove_devices(chip->dev);
device_irq_exit_800(chip);
-err_800_init:
- devm_kfree(&client->dev, subchip);
err_subchip_alloc:
- pm80x_deinit(client);
+ pm80x_deinit();
out_init:
return ret;
}
@@ -562,9 +560,7 @@ static int pm800_remove(struct i2c_client *client)
device_irq_exit_800(chip);
pm800_pages_exit(chip);
- devm_kfree(&client->dev, chip->subchip);
-
- pm80x_deinit(client);
+ pm80x_deinit();
return 0;
}
diff --git a/drivers/mfd/88pm805.c b/drivers/mfd/88pm805.c
index e671230be2b1..65d7ac099b20 100644
--- a/drivers/mfd/88pm805.c
+++ b/drivers/mfd/88pm805.c
@@ -257,7 +257,7 @@ static int pm805_probe(struct i2c_client *client,
pdata->plat_config(chip, pdata);
err_805_init:
- pm80x_deinit(client);
+ pm80x_deinit();
out_init:
return ret;
}
@@ -269,7 +269,7 @@ static int pm805_remove(struct i2c_client *client)
mfd_remove_devices(chip->dev);
device_irq_exit_805(chip);
- pm80x_deinit(client);
+ pm80x_deinit();
return 0;
}
diff --git a/drivers/mfd/88pm80x.c b/drivers/mfd/88pm80x.c
index 1adb355d86d1..f736a46eb8c0 100644
--- a/drivers/mfd/88pm80x.c
+++ b/drivers/mfd/88pm80x.c
@@ -48,14 +48,12 @@ int pm80x_init(struct i2c_client *client,
ret = PTR_ERR(map);
dev_err(&client->dev, "Failed to allocate register map: %d\n",
ret);
- goto err_regmap_init;
+ return ret;
}
chip->id = id->driver_data;
- if (chip->id < CHIP_PM800 || chip->id > CHIP_PM805) {
- ret = -EINVAL;
- goto err_chip_id;
- }
+ if (chip->id < CHIP_PM800 || chip->id > CHIP_PM805)
+ return -EINVAL;
chip->client = client;
chip->regmap = map;
@@ -82,19 +80,11 @@ int pm80x_init(struct i2c_client *client,
}
return 0;
-
-err_chip_id:
- regmap_exit(map);
-err_regmap_init:
- devm_kfree(&client->dev, chip);
- return ret;
}
EXPORT_SYMBOL_GPL(pm80x_init);
-int pm80x_deinit(struct i2c_client *client)
+int pm80x_deinit(void)
{
- struct pm80x_chip *chip = i2c_get_clientdata(client);
-
/*
* workaround: clear the dependency between pm800 and pm805.
* would remove it after HW chip fixes the issue.
@@ -103,10 +93,6 @@ int pm80x_deinit(struct i2c_client *client)
g_pm80x_chip->companion = NULL;
else
g_pm80x_chip = NULL;
-
- regmap_exit(chip->regmap);
- devm_kfree(&client->dev, chip);
-
return 0;
}
EXPORT_SYMBOL_GPL(pm80x_deinit);
diff --git a/drivers/mfd/Kconfig b/drivers/mfd/Kconfig
index ff553babf455..671f5b171c73 100644
--- a/drivers/mfd/Kconfig
+++ b/drivers/mfd/Kconfig
@@ -65,7 +65,7 @@ config MFD_SM501_GPIO
config MFD_RTSX_PCI
tristate "Support for Realtek PCI-E card reader"
- depends on PCI
+ depends on PCI && GENERIC_HARDIRQS
select MFD_CORE
help
This supports for Realtek PCI-Express card reader including rts5209,
@@ -95,7 +95,7 @@ config MFD_DM355EVM_MSP
config MFD_TI_SSP
tristate "TI Sequencer Serial Port support"
- depends on ARCH_DAVINCI_TNETV107X
+ depends on ARCH_DAVINCI_TNETV107X && GENERIC_HARDIRQS
select MFD_CORE
---help---
Say Y here if you want support for the Sequencer Serial Port
@@ -109,6 +109,7 @@ config MFD_TI_AM335X_TSCADC
select MFD_CORE
select REGMAP
select REGMAP_MMIO
+ depends on GENERIC_HARDIRQS
help
If you say yes here you get support for Texas Instruments series
of Touch Screen /ADC chips.
@@ -126,6 +127,7 @@ config HTC_EGPIO
config HTC_PASIC3
tristate "HTC PASIC3 LED/DS1WM chip support"
select MFD_CORE
+ depends on GENERIC_HARDIRQS
help
This core driver provides register access for the LED/DS1WM
chips labeled "AIC2" and "AIC3", found on HTC Blueangel and
@@ -157,6 +159,7 @@ config MFD_LM3533
depends on I2C
select MFD_CORE
select REGMAP_I2C
+ depends on GENERIC_HARDIRQS
help
Say yes here to enable support for National Semiconductor / TI
LM3533 Lighting Power chips.
@@ -171,6 +174,7 @@ config TPS6105X
select REGULATOR
select MFD_CORE
select REGULATOR_FIXED_VOLTAGE
+ depends on GENERIC_HARDIRQS
help
This option enables a driver for the TP61050/TPS61052
high-power "white LED driver". This boost converter is
@@ -193,7 +197,7 @@ config TPS65010
config TPS6507X
tristate "TPS6507x Power Management / Touch Screen chips"
select MFD_CORE
- depends on I2C
+ depends on I2C && GENERIC_HARDIRQS
help
If you say yes here you get support for the TPS6507x series of
Power Management / Touch Screen chips. These include voltage
@@ -204,7 +208,7 @@ config TPS6507X
config MFD_TPS65217
tristate "TPS65217 Power Management / White LED chips"
- depends on I2C
+ depends on I2C && GENERIC_HARDIRQS
select MFD_CORE
select REGMAP_I2C
help
@@ -234,7 +238,7 @@ config MFD_TPS6586X
config MFD_TPS65910
bool "TPS65910 Power Management chip"
- depends on I2C=y && GPIOLIB
+ depends on I2C=y && GPIOLIB && GENERIC_HARDIRQS
select MFD_CORE
select REGMAP_I2C
select REGMAP_IRQ
@@ -251,7 +255,7 @@ config MFD_TPS65912_I2C
bool "TPS65912 Power Management chip with I2C"
select MFD_CORE
select MFD_TPS65912
- depends on I2C=y && GPIOLIB
+ depends on I2C=y && GPIOLIB && GENERIC_HARDIRQS
help
If you say yes here you get support for the TPS65912 series of
PM chips with I2C interface.
@@ -260,7 +264,7 @@ config MFD_TPS65912_SPI
bool "TPS65912 Power Management chip with SPI"
select MFD_CORE
select MFD_TPS65912
- depends on SPI_MASTER && GPIOLIB
+ depends on SPI_MASTER && GPIOLIB && GENERIC_HARDIRQS
help
If you say yes here you get support for the TPS65912 series of
PM chips with SPI interface.
@@ -330,13 +334,13 @@ config TWL4030_POWER
config MFD_TWL4030_AUDIO
bool
- depends on TWL4030_CORE
+ depends on TWL4030_CORE && GENERIC_HARDIRQS
select MFD_CORE
default n
config TWL6040_CORE
bool "Support for TWL6040 audio codec"
- depends on I2C=y
+ depends on I2C=y && GENERIC_HARDIRQS
select MFD_CORE
select REGMAP_I2C
select REGMAP_IRQ
@@ -405,7 +409,7 @@ config MFD_TMIO
config MFD_T7L66XB
bool "Support Toshiba T7L66XB"
- depends on ARM && HAVE_CLK
+ depends on ARM && HAVE_CLK && GENERIC_HARDIRQS
select MFD_CORE
select MFD_TMIO
help
@@ -413,7 +417,7 @@ config MFD_T7L66XB
config MFD_SMSC
bool "Support for the SMSC ECE1099 series chips"
- depends on I2C=y
+ depends on I2C=y && GENERIC_HARDIRQS
select MFD_CORE
select REGMAP_I2C
help
@@ -460,7 +464,7 @@ config MFD_DA9052_SPI
select REGMAP_SPI
select REGMAP_IRQ
select PMIC_DA9052
- depends on SPI_MASTER=y
+ depends on SPI_MASTER=y && GENERIC_HARDIRQS
help
Support for the Dialog Semiconductor DA9052 PMIC
when controlled using SPI. This driver provides common support
@@ -472,7 +476,7 @@ config MFD_DA9052_I2C
select REGMAP_I2C
select REGMAP_IRQ
select PMIC_DA9052
- depends on I2C=y
+ depends on I2C=y && GENERIC_HARDIRQS
help
Support for the Dialog Semiconductor DA9052 PMIC
when controlled using I2C. This driver provides common support
@@ -485,7 +489,7 @@ config MFD_DA9055
select REGMAP_IRQ
select PMIC_DA9055
select MFD_CORE
- depends on I2C=y
+ depends on I2C=y && GENERIC_HARDIRQS
help
Say yes here for support of Dialog Semiconductor DA9055. This is
a Power Management IC. This driver provides common support for
@@ -508,7 +512,7 @@ config PMIC_ADP5520
config MFD_LP8788
bool "Texas Instruments LP8788 Power Management Unit Driver"
- depends on I2C=y
+ depends on I2C=y && GENERIC_HARDIRQS
select MFD_CORE
select REGMAP_I2C
select IRQ_DOMAIN
@@ -611,7 +615,7 @@ config MFD_ARIZONA_I2C
select MFD_ARIZONA
select MFD_CORE
select REGMAP_I2C
- depends on I2C
+ depends on I2C && GENERIC_HARDIRQS
help
Support for the Wolfson Microelectronics Arizona platform audio SoC
core functionality controlled via I2C.
@@ -621,7 +625,7 @@ config MFD_ARIZONA_SPI
select MFD_ARIZONA
select MFD_CORE
select REGMAP_SPI
- depends on SPI_MASTER
+ depends on SPI_MASTER && GENERIC_HARDIRQS
help
Support for the Wolfson Microelectronics Arizona platform audio SoC
core functionality controlled via I2C.
@@ -641,7 +645,7 @@ config MFD_WM5110
config MFD_WM8400
bool "Support Wolfson Microelectronics WM8400"
select MFD_CORE
- depends on I2C=y
+ depends on I2C=y && GENERIC_HARDIRQS
select REGMAP_I2C
help
Support for the Wolfson Microelecronics WM8400 PMIC and audio
@@ -785,7 +789,7 @@ config MFD_MC13783
config MFD_MC13XXX
tristate
- depends on SPI_MASTER || I2C
+ depends on (SPI_MASTER || I2C) && GENERIC_HARDIRQS
select MFD_CORE
select MFD_MC13783
help
@@ -796,7 +800,7 @@ config MFD_MC13XXX
config MFD_MC13XXX_SPI
tristate "Freescale MC13783 and MC13892 SPI interface"
- depends on SPI_MASTER
+ depends on SPI_MASTER && GENERIC_HARDIRQS
select REGMAP_SPI
select MFD_MC13XXX
help
@@ -804,7 +808,7 @@ config MFD_MC13XXX_SPI
config MFD_MC13XXX_I2C
tristate "Freescale MC13892 I2C interface"
- depends on I2C
+ depends on I2C && GENERIC_HARDIRQS
select REGMAP_I2C
select MFD_MC13XXX
help
@@ -822,7 +826,7 @@ config ABX500_CORE
config AB3100_CORE
bool "ST-Ericsson AB3100 Mixed Signal Circuit core functions"
- depends on I2C=y && ABX500_CORE
+ depends on I2C=y && ABX500_CORE && GENERIC_HARDIRQS
select MFD_CORE
default y if ARCH_U300
help
@@ -909,7 +913,7 @@ config MFD_TIMBERDALE
config LPC_SCH
tristate "Intel SCH LPC"
- depends on PCI
+ depends on PCI && GENERIC_HARDIRQS
select MFD_CORE
help
LPC bridge function of the Intel SCH provides support for
@@ -917,7 +921,7 @@ config LPC_SCH
config LPC_ICH
tristate "Intel ICH LPC"
- depends on PCI
+ depends on PCI && GENERIC_HARDIRQS
select MFD_CORE
help
The LPC bridge function of the Intel ICH provides support for
@@ -928,7 +932,7 @@ config LPC_ICH
config MFD_RDC321X
tristate "Support for RDC-R321x southbridge"
select MFD_CORE
- depends on PCI
+ depends on PCI && GENERIC_HARDIRQS
help
Say yes here if you want to have support for the RDC R-321x SoC
southbridge which provides access to GPIOs and Watchdog using the
@@ -937,7 +941,7 @@ config MFD_RDC321X
config MFD_JANZ_CMODIO
tristate "Support for Janz CMOD-IO PCI MODULbus Carrier Board"
select MFD_CORE
- depends on PCI
+ depends on PCI && GENERIC_HARDIRQS
help
This is the core driver for the Janz CMOD-IO PCI MODULbus
carrier board. This device is a PCI to MODULbus bridge which may
@@ -955,7 +959,7 @@ config MFD_JZ4740_ADC
config MFD_VX855
tristate "Support for VIA VX855/VX875 integrated south bridge"
- depends on PCI
+ depends on PCI && GENERIC_HARDIRQS
select MFD_CORE
help
Say yes here to enable support for various functions of the
@@ -964,7 +968,7 @@ config MFD_VX855
config MFD_WL1273_CORE
tristate "Support for TI WL1273 FM radio."
- depends on I2C
+ depends on I2C && GENERIC_HARDIRQS
select MFD_CORE
default n
help
@@ -1028,7 +1032,7 @@ config MFD_TPS65090
config MFD_AAT2870_CORE
bool "Support for the AnalogicTech AAT2870"
select MFD_CORE
- depends on I2C=y && GPIOLIB
+ depends on I2C=y && GPIOLIB && GENERIC_HARDIRQS
help
If you say yes here you get support for the AAT2870.
This driver provides common support for accessing the device,
@@ -1060,7 +1064,7 @@ config MFD_RC5T583
config MFD_STA2X11
bool "STA2X11 multi function device support"
- depends on STA2X11
+ depends on STA2X11 && GENERIC_HARDIRQS
select MFD_CORE
select REGMAP_MMIO
@@ -1077,7 +1081,7 @@ config MFD_PALMAS
select MFD_CORE
select REGMAP_I2C
select REGMAP_IRQ
- depends on I2C=y
+ depends on I2C=y && GENERIC_HARDIRQS
help
If you say yes here you get support for the Palmas
series of PMIC chips from Texas Instruments.
@@ -1085,7 +1089,7 @@ config MFD_PALMAS
config MFD_VIPERBOARD
tristate "Support for Nano River Technologies Viperboard"
select MFD_CORE
- depends on USB
+ depends on USB && GENERIC_HARDIRQS
default n
help
Say yes here if you want support for Nano River Technologies
@@ -1099,7 +1103,7 @@ config MFD_VIPERBOARD
config MFD_RETU
tristate "Support for Retu multi-function device"
select MFD_CORE
- depends on I2C
+ depends on I2C && GENERIC_HARDIRQS
select REGMAP_IRQ
help
Retu is a multi-function device found on Nokia Internet Tablets
@@ -1110,7 +1114,7 @@ config MFD_AS3711
select MFD_CORE
select REGMAP_I2C
select REGMAP_IRQ
- depends on I2C=y
+ depends on I2C=y && GENERIC_HARDIRQS
help
Support for the AS3711 PMIC from AMS
diff --git a/drivers/mfd/Makefile b/drivers/mfd/Makefile
index 8b977f8045ae..b90409c23664 100644
--- a/drivers/mfd/Makefile
+++ b/drivers/mfd/Makefile
@@ -9,7 +9,7 @@ obj-$(CONFIG_MFD_88PM805) += 88pm805.o 88pm80x.o
obj-$(CONFIG_MFD_SM501) += sm501.o
obj-$(CONFIG_MFD_ASIC3) += asic3.o tmio_core.o
-rtsx_pci-objs := rtsx_pcr.o rts5209.o rts5229.o rtl8411.o
+rtsx_pci-objs := rtsx_pcr.o rts5209.o rts5229.o rtl8411.o rts5227.o
obj-$(CONFIG_MFD_RTSX_PCI) += rtsx_pci.o
obj-$(CONFIG_HTC_EGPIO) += htc-egpio.o
diff --git a/drivers/mfd/ab8500-core.c b/drivers/mfd/ab8500-core.c
index 8b5d685ab980..7c84ced2e01b 100644
--- a/drivers/mfd/ab8500-core.c
+++ b/drivers/mfd/ab8500-core.c
@@ -320,6 +320,7 @@ static struct abx500_ops ab8500_ops = {
.mask_and_set_register = ab8500_mask_and_set_register,
.event_registers_startup_state_get = NULL,
.startup_irq_enabled = NULL,
+ .dump_all_banks = ab8500_dump_all_banks,
};
static void ab8500_irq_lock(struct irq_data *data)
@@ -368,16 +369,48 @@ static void ab8500_irq_mask(struct irq_data *data)
int mask = 1 << (offset % 8);
ab8500->mask[index] |= mask;
+
+ /* The AB8500 GPIOs have two interrupts each (rising & falling). */
+ if (offset >= AB8500_INT_GPIO6R && offset <= AB8500_INT_GPIO41R)
+ ab8500->mask[index + 2] |= mask;
+ if (offset >= AB9540_INT_GPIO50R && offset <= AB9540_INT_GPIO54R)
+ ab8500->mask[index + 1] |= mask;
+ if (offset == AB8540_INT_GPIO43R || offset == AB8540_INT_GPIO44R)
+ /* Here the falling IRQ is one bit lower */
+ ab8500->mask[index] |= (mask << 1);
}
static void ab8500_irq_unmask(struct irq_data *data)
{
struct ab8500 *ab8500 = irq_data_get_irq_chip_data(data);
+ unsigned int type = irqd_get_trigger_type(data);
int offset = data->hwirq;
int index = offset / 8;
int mask = 1 << (offset % 8);
- ab8500->mask[index] &= ~mask;
+ if (type & IRQ_TYPE_EDGE_RISING)
+ ab8500->mask[index] &= ~mask;
+
+ /* The AB8500 GPIOs have two interrupts each (rising & falling). */
+ if (type & IRQ_TYPE_EDGE_FALLING) {
+ if (offset >= AB8500_INT_GPIO6R && offset <= AB8500_INT_GPIO41R)
+ ab8500->mask[index + 2] &= ~mask;
+ else if (offset >= AB9540_INT_GPIO50R && offset <= AB9540_INT_GPIO54R)
+ ab8500->mask[index + 1] &= ~mask;
+ else if (offset == AB8540_INT_GPIO43R || offset == AB8540_INT_GPIO44R)
+ /* Here the falling IRQ is one bit lower */
+ ab8500->mask[index] &= ~(mask << 1);
+ else
+ ab8500->mask[index] &= ~mask;
+ } else {
+ /* Satisfies the case where type is not set. */
+ ab8500->mask[index] &= ~mask;
+ }
+}
+
+static int ab8500_irq_set_type(struct irq_data *data, unsigned int type)
+{
+ return 0;
}
static struct irq_chip ab8500_irq_chip = {
@@ -387,6 +420,7 @@ static struct irq_chip ab8500_irq_chip = {
.irq_mask = ab8500_irq_mask,
.irq_disable = ab8500_irq_mask,
.irq_unmask = ab8500_irq_unmask,
+ .irq_set_type = ab8500_irq_set_type,
};
static int ab8500_handle_hierarchical_line(struct ab8500 *ab8500,
@@ -411,6 +445,19 @@ static int ab8500_handle_hierarchical_line(struct ab8500 *ab8500,
line = (i << 3) + int_bit;
latch_val &= ~(1 << int_bit);
+ /*
+ * This handles the falling edge hwirqs from the GPIO
+ * lines. Route them back to the line registered for the
+ * rising IRQ, as this is merely a flag for the same IRQ
+ * in linux terms.
+ */
+ if (line >= AB8500_INT_GPIO6F && line <= AB8500_INT_GPIO41F)
+ line -= 16;
+ if (line >= AB9540_INT_GPIO50F && line <= AB9540_INT_GPIO54F)
+ line -= 8;
+ if (line == AB8540_INT_GPIO43F || line == AB8540_INT_GPIO44F)
+ line += 1;
+
handle_nested_irq(ab8500->irq_base + line);
} while (latch_val);
@@ -521,6 +568,7 @@ static irqreturn_t ab8500_irq(int irq, void *dev)
int virq = ab8500_irq_get_virq(ab8500, line);
handle_nested_irq(virq);
+ ab8500_debug_register_interrupt(line);
value &= ~(1 << bit);
} while (value);
@@ -929,7 +977,7 @@ static struct resource ab8505_iddet_resources[] = {
static struct resource ab8500_temp_resources[] = {
{
- .name = "AB8500_TEMP_WARM",
+ .name = "ABX500_TEMP_WARM",
.start = AB8500_INT_TEMP_WARM,
.end = AB8500_INT_TEMP_WARM,
.flags = IORESOURCE_IRQ,
@@ -1005,8 +1053,8 @@ static struct mfd_cell abx500_common_devs[] = {
.of_compatible = "stericsson,ab8500-denc",
},
{
- .name = "ab8500-temp",
- .of_compatible = "stericsson,ab8500-temp",
+ .name = "abx500-temp",
+ .of_compatible = "stericsson,abx500-temp",
.num_resources = ARRAY_SIZE(ab8500_temp_resources),
.resources = ab8500_temp_resources,
},
@@ -1049,7 +1097,7 @@ static struct mfd_cell ab8500_bm_devs[] = {
static struct mfd_cell ab8500_devs[] = {
{
- .name = "ab8500-gpio",
+ .name = "pinctrl-ab8500",
.of_compatible = "stericsson,ab8500-gpio",
},
{
@@ -1066,7 +1114,8 @@ static struct mfd_cell ab8500_devs[] = {
static struct mfd_cell ab9540_devs[] = {
{
- .name = "ab8500-gpio",
+ .name = "pinctrl-ab9540",
+ .of_compatible = "stericsson,ab9540-gpio",
},
{
.name = "ab9540-usb",
diff --git a/drivers/mfd/ab8500-debugfs.c b/drivers/mfd/ab8500-debugfs.c
index 5a8e707bc038..45fe3c50eb03 100644
--- a/drivers/mfd/ab8500-debugfs.c
+++ b/drivers/mfd/ab8500-debugfs.c
@@ -4,6 +4,72 @@
* Author: Mattias Wallin <mattias.wallin@stericsson.com> for ST-Ericsson.
* License Terms: GNU General Public License v2
*/
+/*
+ * AB8500 register access
+ * ======================
+ *
+ * read:
+ * # echo BANK > <debugfs>/ab8500/register-bank
+ * # echo ADDR > <debugfs>/ab8500/register-address
+ * # cat <debugfs>/ab8500/register-value
+ *
+ * write:
+ * # echo BANK > <debugfs>/ab8500/register-bank
+ * # echo ADDR > <debugfs>/ab8500/register-address
+ * # echo VALUE > <debugfs>/ab8500/register-value
+ *
+ * read all registers from a bank:
+ * # echo BANK > <debugfs>/ab8500/register-bank
+ * # cat <debugfs>/ab8500/all-bank-register
+ *
+ * BANK target AB8500 register bank
+ * ADDR target AB8500 register address
+ * VALUE decimal or 0x-prefixed hexadecimal
+ *
+ *
+ * User Space notification on AB8500 IRQ
+ * =====================================
+ *
+ * Allows user space entity to be notified when target AB8500 IRQ occurs.
+ * When subscribed, a sysfs entry is created in ab8500.i2c platform device.
+ * One can pool this file to get target IRQ occurence information.
+ *
+ * subscribe to an AB8500 IRQ:
+ * # echo IRQ > <debugfs>/ab8500/irq-subscribe
+ *
+ * unsubscribe from an AB8500 IRQ:
+ * # echo IRQ > <debugfs>/ab8500/irq-unsubscribe
+ *
+ *
+ * AB8500 register formated read/write access
+ * ==========================================
+ *
+ * Read: read data, data>>SHIFT, data&=MASK, output data
+ * [0xABCDEF98] shift=12 mask=0xFFF => 0x00000CDE
+ * Write: read data, data &= ~(MASK<<SHIFT), data |= (VALUE<<SHIFT), write data
+ * [0xABCDEF98] shift=12 mask=0xFFF value=0x123 => [0xAB123F98]
+ *
+ * Usage:
+ * # echo "CMD [OPTIONS] BANK ADRESS [VALUE]" > $debugfs/ab8500/hwreg
+ *
+ * CMD read read access
+ * write write access
+ *
+ * BANK target reg bank
+ * ADDRESS target reg address
+ * VALUE (write) value to be updated
+ *
+ * OPTIONS
+ * -d|-dec (read) output in decimal
+ * -h|-hexa (read) output in 0x-hexa (default)
+ * -l|-w|-b 32bit (default), 16bit or 8bit reg access
+ * -m|-mask MASK 0x-hexa mask (default 0xFFFFFFFF)
+ * -s|-shift SHIFT bit shift value (read:left, write:right)
+ * -o|-offset OFFSET address offset to add to ADDRESS value
+ *
+ * Warning: bit shift operation is applied to bit-mask.
+ * Warning: bit shift direction depends on read or right command.
+ */
#include <linux/seq_file.h>
#include <linux/uaccess.h>
@@ -11,13 +77,30 @@
#include <linux/module.h>
#include <linux/debugfs.h>
#include <linux/platform_device.h>
+#include <linux/interrupt.h>
+#include <linux/kobject.h>
+#include <linux/slab.h>
#include <linux/mfd/abx500.h>
#include <linux/mfd/abx500/ab8500.h>
+#include <linux/mfd/abx500/ab8500-gpadc.h>
+
+#ifdef CONFIG_DEBUG_FS
+#include <linux/string.h>
+#include <linux/ctype.h>
+#endif
static u32 debug_bank;
static u32 debug_address;
+static int irq_first;
+static int irq_last;
+static u32 *irq_count;
+static int num_irqs;
+
+static struct device_attribute **dev_attr;
+static char **event_name;
+
/**
* struct ab8500_reg_range
* @first: the first address of the range
@@ -42,15 +125,35 @@ struct ab8500_prcmu_ranges {
const struct ab8500_reg_range *range;
};
+/* hwreg- "mask" and "shift" entries ressources */
+struct hwreg_cfg {
+ u32 bank; /* target bank */
+ u32 addr; /* target address */
+ uint fmt; /* format */
+ uint mask; /* read/write mask, applied before any bit shift */
+ int shift; /* bit shift (read:right shift, write:left shift */
+};
+/* fmt bit #0: 0=hexa, 1=dec */
+#define REG_FMT_DEC(c) ((c)->fmt & 0x1)
+#define REG_FMT_HEX(c) (!REG_FMT_DEC(c))
+
+static struct hwreg_cfg hwreg_cfg = {
+ .addr = 0, /* default: invalid phys addr */
+ .fmt = 0, /* default: 32bit access, hex output */
+ .mask = 0xFFFFFFFF, /* default: no mask */
+ .shift = 0, /* default: no bit shift */
+};
+
#define AB8500_NAME_STRING "ab8500"
-#define AB8500_NUM_BANKS 22
+#define AB8500_ADC_NAME_STRING "gpadc"
+#define AB8500_NUM_BANKS 24
#define AB8500_REV_REG 0x80
static struct ab8500_prcmu_ranges debug_ranges[AB8500_NUM_BANKS] = {
[0x0] = {
.num_ranges = 0,
- .range = 0,
+ .range = NULL,
},
[AB8500_SYS_CTRL1_BLOCK] = {
.num_ranges = 3,
@@ -215,7 +318,7 @@ static struct ab8500_prcmu_ranges debug_ranges[AB8500_NUM_BANKS] = {
},
},
[AB8500_CHARGER] = {
- .num_ranges = 8,
+ .num_ranges = 9,
.range = (struct ab8500_reg_range[]) {
{
.first = 0x00,
@@ -249,6 +352,10 @@ static struct ab8500_prcmu_ranges debug_ranges[AB8500_NUM_BANKS] = {
.first = 0xC0,
.last = 0xC2,
},
+ {
+ .first = 0xf5,
+ .last = 0xf6,
+ },
},
},
[AB8500_GAS_GAUGE] = {
@@ -268,6 +375,24 @@ static struct ab8500_prcmu_ranges debug_ranges[AB8500_NUM_BANKS] = {
},
},
},
+ [AB8500_DEVELOPMENT] = {
+ .num_ranges = 1,
+ .range = (struct ab8500_reg_range[]) {
+ {
+ .first = 0x00,
+ .last = 0x00,
+ },
+ },
+ },
+ [AB8500_DEBUG] = {
+ .num_ranges = 1,
+ .range = (struct ab8500_reg_range[]) {
+ {
+ .first = 0x05,
+ .last = 0x07,
+ },
+ },
+ },
[AB8500_AUDIO] = {
.num_ranges = 1,
.range = (struct ab8500_reg_range[]) {
@@ -354,15 +479,30 @@ static struct ab8500_prcmu_ranges debug_ranges[AB8500_NUM_BANKS] = {
},
};
-static int ab8500_registers_print(struct seq_file *s, void *p)
+static irqreturn_t ab8500_debug_handler(int irq, void *data)
{
- struct device *dev = s->private;
- unsigned int i;
- u32 bank = debug_bank;
+ char buf[16];
+ struct kobject *kobj = (struct kobject *)data;
+ unsigned int irq_abb = irq - irq_first;
- seq_printf(s, AB8500_NAME_STRING " register values:\n");
+ if (irq_abb < num_irqs)
+ irq_count[irq_abb]++;
+ /*
+ * This makes it possible to use poll for events (POLLPRI | POLLERR)
+ * from userspace on sysfs file named <irq-nr>
+ */
+ sprintf(buf, "%d", irq);
+ sysfs_notify(kobj, NULL, buf);
+
+ return IRQ_HANDLED;
+}
+
+/* Prints to seq_file or log_buf */
+static int ab8500_registers_print(struct device *dev, u32 bank,
+ struct seq_file *s)
+{
+ unsigned int i;
- seq_printf(s, " bank %u:\n", bank);
for (i = 0; i < debug_ranges[bank].num_ranges; i++) {
u32 reg;
@@ -379,22 +519,42 @@ static int ab8500_registers_print(struct seq_file *s, void *p)
return err;
}
- err = seq_printf(s, " [%u/0x%02X]: 0x%02X\n", bank,
- reg, value);
- if (err < 0) {
- dev_err(dev, "seq_printf overflow\n");
- /* Error is not returned here since
- * the output is wanted in any case */
- return 0;
+ if (s) {
+ err = seq_printf(s, " [%u/0x%02X]: 0x%02X\n",
+ bank, reg, value);
+ if (err < 0) {
+ dev_err(dev,
+ "seq_printf overflow bank=%d reg=%d\n",
+ bank, reg);
+ /* Error is not returned here since
+ * the output is wanted in any case */
+ return 0;
+ }
+ } else {
+ printk(KERN_INFO" [%u/0x%02X]: 0x%02X\n", bank,
+ reg, value);
}
}
}
return 0;
}
+static int ab8500_print_bank_registers(struct seq_file *s, void *p)
+{
+ struct device *dev = s->private;
+ u32 bank = debug_bank;
+
+ seq_printf(s, AB8500_NAME_STRING " register values:\n");
+
+ seq_printf(s, " bank %u:\n", bank);
+
+ ab8500_registers_print(dev, bank, s);
+ return 0;
+}
+
static int ab8500_registers_open(struct inode *inode, struct file *file)
{
- return single_open(file, ab8500_registers_print, inode->i_private);
+ return single_open(file, ab8500_print_bank_registers, inode->i_private);
}
static const struct file_operations ab8500_registers_fops = {
@@ -405,6 +565,64 @@ static const struct file_operations ab8500_registers_fops = {
.owner = THIS_MODULE,
};
+static int ab8500_print_all_banks(struct seq_file *s, void *p)
+{
+ struct device *dev = s->private;
+ unsigned int i;
+ int err;
+
+ seq_printf(s, AB8500_NAME_STRING " register values:\n");
+
+ for (i = 1; i < AB8500_NUM_BANKS; i++) {
+ err = seq_printf(s, " bank %u:\n", i);
+ if (err < 0)
+ dev_err(dev, "seq_printf overflow, bank=%d\n", i);
+
+ ab8500_registers_print(dev, i, s);
+ }
+ return 0;
+}
+
+/* Dump registers to kernel log */
+void ab8500_dump_all_banks(struct device *dev)
+{
+ unsigned int i;
+
+ printk(KERN_INFO"ab8500 register values:\n");
+
+ for (i = 1; i < AB8500_NUM_BANKS; i++) {
+ printk(KERN_INFO" bank %u:\n", i);
+ ab8500_registers_print(dev, i, NULL);
+ }
+}
+
+static int ab8500_all_banks_open(struct inode *inode, struct file *file)
+{
+ struct seq_file *s;
+ int err;
+
+ err = single_open(file, ab8500_print_all_banks, inode->i_private);
+ if (!err) {
+ /* Default buf size in seq_read is not enough */
+ s = (struct seq_file *)file->private_data;
+ s->size = (PAGE_SIZE * 2);
+ s->buf = kmalloc(s->size, GFP_KERNEL);
+ if (!s->buf) {
+ single_release(inode, file);
+ err = -ENOMEM;
+ }
+ }
+ return err;
+}
+
+static const struct file_operations ab8500_all_banks_fops = {
+ .open = ab8500_all_banks_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+ .owner = THIS_MODULE,
+};
+
static int ab8500_bank_print(struct seq_file *s, void *p)
{
return seq_printf(s, "%d\n", debug_bank);
@@ -519,6 +737,761 @@ static ssize_t ab8500_val_write(struct file *file,
return count;
}
+/*
+ * Interrupt status
+ */
+static u32 num_interrupts[AB8500_MAX_NR_IRQS];
+static int num_interrupt_lines;
+
+void ab8500_debug_register_interrupt(int line)
+{
+ if (line < num_interrupt_lines)
+ num_interrupts[line]++;
+}
+
+static int ab8500_interrupts_print(struct seq_file *s, void *p)
+{
+ int line;
+
+ seq_printf(s, "irq: number of\n");
+
+ for (line = 0; line < num_interrupt_lines; line++)
+ seq_printf(s, "%3i: %6i\n", line, num_interrupts[line]);
+
+ return 0;
+}
+
+static int ab8500_interrupts_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, ab8500_interrupts_print, inode->i_private);
+}
+
+/*
+ * - HWREG DB8500 formated routines
+ */
+static int ab8500_hwreg_print(struct seq_file *s, void *d)
+{
+ struct device *dev = s->private;
+ int ret;
+ u8 regvalue;
+
+ ret = abx500_get_register_interruptible(dev,
+ (u8)hwreg_cfg.bank, (u8)hwreg_cfg.addr, &regvalue);
+ if (ret < 0) {
+ dev_err(dev, "abx500_get_reg fail %d, %d\n",
+ ret, __LINE__);
+ return -EINVAL;
+ }
+
+ if (hwreg_cfg.shift >= 0)
+ regvalue >>= hwreg_cfg.shift;
+ else
+ regvalue <<= -hwreg_cfg.shift;
+ regvalue &= hwreg_cfg.mask;
+
+ if (REG_FMT_DEC(&hwreg_cfg))
+ seq_printf(s, "%d\n", regvalue);
+ else
+ seq_printf(s, "0x%02X\n", regvalue);
+ return 0;
+}
+
+static int ab8500_hwreg_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, ab8500_hwreg_print, inode->i_private);
+}
+
+static int ab8500_gpadc_bat_ctrl_print(struct seq_file *s, void *p)
+{
+ int bat_ctrl_raw;
+ int bat_ctrl_convert;
+ struct ab8500_gpadc *gpadc;
+
+ gpadc = ab8500_gpadc_get("ab8500-gpadc.0");
+ bat_ctrl_raw = ab8500_gpadc_read_raw(gpadc, BAT_CTRL);
+ bat_ctrl_convert = ab8500_gpadc_ad_to_voltage(gpadc,
+ BAT_CTRL, bat_ctrl_raw);
+
+ return seq_printf(s, "%d,0x%X\n",
+ bat_ctrl_convert, bat_ctrl_raw);
+}
+
+static int ab8500_gpadc_bat_ctrl_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, ab8500_gpadc_bat_ctrl_print, inode->i_private);
+}
+
+static const struct file_operations ab8500_gpadc_bat_ctrl_fops = {
+ .open = ab8500_gpadc_bat_ctrl_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+ .owner = THIS_MODULE,
+};
+
+static int ab8500_gpadc_btemp_ball_print(struct seq_file *s, void *p)
+{
+ int btemp_ball_raw;
+ int btemp_ball_convert;
+ struct ab8500_gpadc *gpadc;
+
+ gpadc = ab8500_gpadc_get("ab8500-gpadc.0");
+ btemp_ball_raw = ab8500_gpadc_read_raw(gpadc, BTEMP_BALL);
+ btemp_ball_convert = ab8500_gpadc_ad_to_voltage(gpadc, BTEMP_BALL,
+ btemp_ball_raw);
+
+ return seq_printf(s,
+ "%d,0x%X\n", btemp_ball_convert, btemp_ball_raw);
+}
+
+static int ab8500_gpadc_btemp_ball_open(struct inode *inode,
+ struct file *file)
+{
+ return single_open(file, ab8500_gpadc_btemp_ball_print, inode->i_private);
+}
+
+static const struct file_operations ab8500_gpadc_btemp_ball_fops = {
+ .open = ab8500_gpadc_btemp_ball_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+ .owner = THIS_MODULE,
+};
+
+static int ab8500_gpadc_main_charger_v_print(struct seq_file *s, void *p)
+{
+ int main_charger_v_raw;
+ int main_charger_v_convert;
+ struct ab8500_gpadc *gpadc;
+
+ gpadc = ab8500_gpadc_get("ab8500-gpadc.0");
+ main_charger_v_raw = ab8500_gpadc_read_raw(gpadc, MAIN_CHARGER_V);
+ main_charger_v_convert = ab8500_gpadc_ad_to_voltage(gpadc,
+ MAIN_CHARGER_V, main_charger_v_raw);
+
+ return seq_printf(s, "%d,0x%X\n",
+ main_charger_v_convert, main_charger_v_raw);
+}
+
+static int ab8500_gpadc_main_charger_v_open(struct inode *inode,
+ struct file *file)
+{
+ return single_open(file, ab8500_gpadc_main_charger_v_print,
+ inode->i_private);
+}
+
+static const struct file_operations ab8500_gpadc_main_charger_v_fops = {
+ .open = ab8500_gpadc_main_charger_v_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+ .owner = THIS_MODULE,
+};
+
+static int ab8500_gpadc_acc_detect1_print(struct seq_file *s, void *p)
+{
+ int acc_detect1_raw;
+ int acc_detect1_convert;
+ struct ab8500_gpadc *gpadc;
+
+ gpadc = ab8500_gpadc_get("ab8500-gpadc.0");
+ acc_detect1_raw = ab8500_gpadc_read_raw(gpadc, ACC_DETECT1);
+ acc_detect1_convert = ab8500_gpadc_ad_to_voltage(gpadc, ACC_DETECT1,
+ acc_detect1_raw);
+
+ return seq_printf(s, "%d,0x%X\n",
+ acc_detect1_convert, acc_detect1_raw);
+}
+
+static int ab8500_gpadc_acc_detect1_open(struct inode *inode,
+ struct file *file)
+{
+ return single_open(file, ab8500_gpadc_acc_detect1_print,
+ inode->i_private);
+}
+
+static const struct file_operations ab8500_gpadc_acc_detect1_fops = {
+ .open = ab8500_gpadc_acc_detect1_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+ .owner = THIS_MODULE,
+};
+
+static int ab8500_gpadc_acc_detect2_print(struct seq_file *s, void *p)
+{
+ int acc_detect2_raw;
+ int acc_detect2_convert;
+ struct ab8500_gpadc *gpadc;
+
+ gpadc = ab8500_gpadc_get("ab8500-gpadc.0");
+ acc_detect2_raw = ab8500_gpadc_read_raw(gpadc, ACC_DETECT2);
+ acc_detect2_convert = ab8500_gpadc_ad_to_voltage(gpadc,
+ ACC_DETECT2, acc_detect2_raw);
+
+ return seq_printf(s, "%d,0x%X\n",
+ acc_detect2_convert, acc_detect2_raw);
+}
+
+static int ab8500_gpadc_acc_detect2_open(struct inode *inode,
+ struct file *file)
+{
+ return single_open(file, ab8500_gpadc_acc_detect2_print,
+ inode->i_private);
+}
+
+static const struct file_operations ab8500_gpadc_acc_detect2_fops = {
+ .open = ab8500_gpadc_acc_detect2_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+ .owner = THIS_MODULE,
+};
+
+static int ab8500_gpadc_aux1_print(struct seq_file *s, void *p)
+{
+ int aux1_raw;
+ int aux1_convert;
+ struct ab8500_gpadc *gpadc;
+
+ gpadc = ab8500_gpadc_get("ab8500-gpadc.0");
+ aux1_raw = ab8500_gpadc_read_raw(gpadc, ADC_AUX1);
+ aux1_convert = ab8500_gpadc_ad_to_voltage(gpadc, ADC_AUX1,
+ aux1_raw);
+
+ return seq_printf(s, "%d,0x%X\n",
+ aux1_convert, aux1_raw);
+}
+
+static int ab8500_gpadc_aux1_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, ab8500_gpadc_aux1_print, inode->i_private);
+}
+
+static const struct file_operations ab8500_gpadc_aux1_fops = {
+ .open = ab8500_gpadc_aux1_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+ .owner = THIS_MODULE,
+};
+
+static int ab8500_gpadc_aux2_print(struct seq_file *s, void *p)
+{
+ int aux2_raw;
+ int aux2_convert;
+ struct ab8500_gpadc *gpadc;
+
+ gpadc = ab8500_gpadc_get("ab8500-gpadc.0");
+ aux2_raw = ab8500_gpadc_read_raw(gpadc, ADC_AUX2);
+ aux2_convert = ab8500_gpadc_ad_to_voltage(gpadc, ADC_AUX2,
+ aux2_raw);
+
+ return seq_printf(s, "%d,0x%X\n",
+ aux2_convert, aux2_raw);
+}
+
+static int ab8500_gpadc_aux2_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, ab8500_gpadc_aux2_print, inode->i_private);
+}
+
+static const struct file_operations ab8500_gpadc_aux2_fops = {
+ .open = ab8500_gpadc_aux2_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+ .owner = THIS_MODULE,
+};
+
+static int ab8500_gpadc_main_bat_v_print(struct seq_file *s, void *p)
+{
+ int main_bat_v_raw;
+ int main_bat_v_convert;
+ struct ab8500_gpadc *gpadc;
+
+ gpadc = ab8500_gpadc_get("ab8500-gpadc.0");
+ main_bat_v_raw = ab8500_gpadc_read_raw(gpadc, MAIN_BAT_V);
+ main_bat_v_convert = ab8500_gpadc_ad_to_voltage(gpadc, MAIN_BAT_V,
+ main_bat_v_raw);
+
+ return seq_printf(s, "%d,0x%X\n",
+ main_bat_v_convert, main_bat_v_raw);
+}
+
+static int ab8500_gpadc_main_bat_v_open(struct inode *inode,
+ struct file *file)
+{
+ return single_open(file, ab8500_gpadc_main_bat_v_print, inode->i_private);
+}
+
+static const struct file_operations ab8500_gpadc_main_bat_v_fops = {
+ .open = ab8500_gpadc_main_bat_v_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+ .owner = THIS_MODULE,
+};
+
+static int ab8500_gpadc_vbus_v_print(struct seq_file *s, void *p)
+{
+ int vbus_v_raw;
+ int vbus_v_convert;
+ struct ab8500_gpadc *gpadc;
+
+ gpadc = ab8500_gpadc_get("ab8500-gpadc.0");
+ vbus_v_raw = ab8500_gpadc_read_raw(gpadc, VBUS_V);
+ vbus_v_convert = ab8500_gpadc_ad_to_voltage(gpadc, VBUS_V,
+ vbus_v_raw);
+
+ return seq_printf(s, "%d,0x%X\n",
+ vbus_v_convert, vbus_v_raw);
+}
+
+static int ab8500_gpadc_vbus_v_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, ab8500_gpadc_vbus_v_print, inode->i_private);
+}
+
+static const struct file_operations ab8500_gpadc_vbus_v_fops = {
+ .open = ab8500_gpadc_vbus_v_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+ .owner = THIS_MODULE,
+};
+
+static int ab8500_gpadc_main_charger_c_print(struct seq_file *s, void *p)
+{
+ int main_charger_c_raw;
+ int main_charger_c_convert;
+ struct ab8500_gpadc *gpadc;
+
+ gpadc = ab8500_gpadc_get("ab8500-gpadc.0");
+ main_charger_c_raw = ab8500_gpadc_read_raw(gpadc, MAIN_CHARGER_C);
+ main_charger_c_convert = ab8500_gpadc_ad_to_voltage(gpadc,
+ MAIN_CHARGER_C, main_charger_c_raw);
+
+ return seq_printf(s, "%d,0x%X\n",
+ main_charger_c_convert, main_charger_c_raw);
+}
+
+static int ab8500_gpadc_main_charger_c_open(struct inode *inode,
+ struct file *file)
+{
+ return single_open(file, ab8500_gpadc_main_charger_c_print,
+ inode->i_private);
+}
+
+static const struct file_operations ab8500_gpadc_main_charger_c_fops = {
+ .open = ab8500_gpadc_main_charger_c_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+ .owner = THIS_MODULE,
+};
+
+static int ab8500_gpadc_usb_charger_c_print(struct seq_file *s, void *p)
+{
+ int usb_charger_c_raw;
+ int usb_charger_c_convert;
+ struct ab8500_gpadc *gpadc;
+
+ gpadc = ab8500_gpadc_get("ab8500-gpadc.0");
+ usb_charger_c_raw = ab8500_gpadc_read_raw(gpadc, USB_CHARGER_C);
+ usb_charger_c_convert = ab8500_gpadc_ad_to_voltage(gpadc,
+ USB_CHARGER_C, usb_charger_c_raw);
+
+ return seq_printf(s, "%d,0x%X\n",
+ usb_charger_c_convert, usb_charger_c_raw);
+}
+
+static int ab8500_gpadc_usb_charger_c_open(struct inode *inode,
+ struct file *file)
+{
+ return single_open(file, ab8500_gpadc_usb_charger_c_print,
+ inode->i_private);
+}
+
+static const struct file_operations ab8500_gpadc_usb_charger_c_fops = {
+ .open = ab8500_gpadc_usb_charger_c_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+ .owner = THIS_MODULE,
+};
+
+static int ab8500_gpadc_bk_bat_v_print(struct seq_file *s, void *p)
+{
+ int bk_bat_v_raw;
+ int bk_bat_v_convert;
+ struct ab8500_gpadc *gpadc;
+
+ gpadc = ab8500_gpadc_get("ab8500-gpadc.0");
+ bk_bat_v_raw = ab8500_gpadc_read_raw(gpadc, BK_BAT_V);
+ bk_bat_v_convert = ab8500_gpadc_ad_to_voltage(gpadc,
+ BK_BAT_V, bk_bat_v_raw);
+
+ return seq_printf(s, "%d,0x%X\n",
+ bk_bat_v_convert, bk_bat_v_raw);
+}
+
+static int ab8500_gpadc_bk_bat_v_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, ab8500_gpadc_bk_bat_v_print, inode->i_private);
+}
+
+static const struct file_operations ab8500_gpadc_bk_bat_v_fops = {
+ .open = ab8500_gpadc_bk_bat_v_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+ .owner = THIS_MODULE,
+};
+
+static int ab8500_gpadc_die_temp_print(struct seq_file *s, void *p)
+{
+ int die_temp_raw;
+ int die_temp_convert;
+ struct ab8500_gpadc *gpadc;
+
+ gpadc = ab8500_gpadc_get("ab8500-gpadc.0");
+ die_temp_raw = ab8500_gpadc_read_raw(gpadc, DIE_TEMP);
+ die_temp_convert = ab8500_gpadc_ad_to_voltage(gpadc, DIE_TEMP,
+ die_temp_raw);
+
+ return seq_printf(s, "%d,0x%X\n",
+ die_temp_convert, die_temp_raw);
+}
+
+static int ab8500_gpadc_die_temp_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, ab8500_gpadc_die_temp_print, inode->i_private);
+}
+
+static const struct file_operations ab8500_gpadc_die_temp_fops = {
+ .open = ab8500_gpadc_die_temp_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+ .owner = THIS_MODULE,
+};
+
+/*
+ * return length of an ASCII numerical value, 0 is string is not a
+ * numerical value.
+ * string shall start at value 1st char.
+ * string can be tailed with \0 or space or newline chars only.
+ * value can be decimal or hexadecimal (prefixed 0x or 0X).
+ */
+static int strval_len(char *b)
+{
+ char *s = b;
+ if ((*s == '0') && ((*(s+1) == 'x') || (*(s+1) == 'X'))) {
+ s += 2;
+ for (; *s && (*s != ' ') && (*s != '\n'); s++) {
+ if (!isxdigit(*s))
+ return 0;
+ }
+ } else {
+ if (*s == '-')
+ s++;
+ for (; *s && (*s != ' ') && (*s != '\n'); s++) {
+ if (!isdigit(*s))
+ return 0;
+ }
+ }
+ return (int) (s-b);
+}
+
+/*
+ * parse hwreg input data.
+ * update global hwreg_cfg only if input data syntax is ok.
+ */
+static ssize_t hwreg_common_write(char *b, struct hwreg_cfg *cfg,
+ struct device *dev)
+{
+ uint write, val = 0;
+ u8 regvalue;
+ int ret;
+ struct hwreg_cfg loc = {
+ .bank = 0, /* default: invalid phys addr */
+ .addr = 0, /* default: invalid phys addr */
+ .fmt = 0, /* default: 32bit access, hex output */
+ .mask = 0xFFFFFFFF, /* default: no mask */
+ .shift = 0, /* default: no bit shift */
+ };
+
+ /* read or write ? */
+ if (!strncmp(b, "read ", 5)) {
+ write = 0;
+ b += 5;
+ } else if (!strncmp(b, "write ", 6)) {
+ write = 1;
+ b += 6;
+ } else
+ return -EINVAL;
+
+ /* OPTIONS -l|-w|-b -s -m -o */
+ while ((*b == ' ') || (*b == '-')) {
+ if (*(b-1) != ' ') {
+ b++;
+ continue;
+ }
+ if ((!strncmp(b, "-d ", 3)) ||
+ (!strncmp(b, "-dec ", 5))) {
+ b += (*(b+2) == ' ') ? 3 : 5;
+ loc.fmt |= (1<<0);
+ } else if ((!strncmp(b, "-h ", 3)) ||
+ (!strncmp(b, "-hex ", 5))) {
+ b += (*(b+2) == ' ') ? 3 : 5;
+ loc.fmt &= ~(1<<0);
+ } else if ((!strncmp(b, "-m ", 3)) ||
+ (!strncmp(b, "-mask ", 6))) {
+ b += (*(b+2) == ' ') ? 3 : 6;
+ if (strval_len(b) == 0)
+ return -EINVAL;
+ loc.mask = simple_strtoul(b, &b, 0);
+ } else if ((!strncmp(b, "-s ", 3)) ||
+ (!strncmp(b, "-shift ", 7))) {
+ b += (*(b+2) == ' ') ? 3 : 7;
+ if (strval_len(b) == 0)
+ return -EINVAL;
+ loc.shift = simple_strtol(b, &b, 0);
+ } else {
+ return -EINVAL;
+ }
+ }
+ /* get arg BANK and ADDRESS */
+ if (strval_len(b) == 0)
+ return -EINVAL;
+ loc.bank = simple_strtoul(b, &b, 0);
+ while (*b == ' ')
+ b++;
+ if (strval_len(b) == 0)
+ return -EINVAL;
+ loc.addr = simple_strtoul(b, &b, 0);
+
+ if (write) {
+ while (*b == ' ')
+ b++;
+ if (strval_len(b) == 0)
+ return -EINVAL;
+ val = simple_strtoul(b, &b, 0);
+ }
+
+ /* args are ok, update target cfg (mainly for read) */
+ *cfg = loc;
+
+#ifdef ABB_HWREG_DEBUG
+ pr_warn("HWREG request: %s, %s, addr=0x%08X, mask=0x%X, shift=%d"
+ "value=0x%X\n", (write) ? "write" : "read",
+ REG_FMT_DEC(cfg) ? "decimal" : "hexa",
+ cfg->addr, cfg->mask, cfg->shift, val);
+#endif
+
+ if (!write)
+ return 0;
+
+ ret = abx500_get_register_interruptible(dev,
+ (u8)cfg->bank, (u8)cfg->addr, &regvalue);
+ if (ret < 0) {
+ dev_err(dev, "abx500_get_reg fail %d, %d\n",
+ ret, __LINE__);
+ return -EINVAL;
+ }
+
+ if (cfg->shift >= 0) {
+ regvalue &= ~(cfg->mask << (cfg->shift));
+ val = (val & cfg->mask) << (cfg->shift);
+ } else {
+ regvalue &= ~(cfg->mask >> (-cfg->shift));
+ val = (val & cfg->mask) >> (-cfg->shift);
+ }
+ val = val | regvalue;
+
+ ret = abx500_set_register_interruptible(dev,
+ (u8)cfg->bank, (u8)cfg->addr, (u8)val);
+ if (ret < 0) {
+ pr_err("abx500_set_reg failed %d, %d", ret, __LINE__);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static ssize_t ab8500_hwreg_write(struct file *file,
+ const char __user *user_buf, size_t count, loff_t *ppos)
+{
+ struct device *dev = ((struct seq_file *)(file->private_data))->private;
+ char buf[128];
+ int buf_size, ret;
+
+ /* Get userspace string and assure termination */
+ buf_size = min(count, (sizeof(buf)-1));
+ if (copy_from_user(buf, user_buf, buf_size))
+ return -EFAULT;
+ buf[buf_size] = 0;
+
+ /* get args and process */
+ ret = hwreg_common_write(buf, &hwreg_cfg, dev);
+ return (ret) ? ret : buf_size;
+}
+
+/*
+ * - irq subscribe/unsubscribe stuff
+ */
+static int ab8500_subscribe_unsubscribe_print(struct seq_file *s, void *p)
+{
+ seq_printf(s, "%d\n", irq_first);
+
+ return 0;
+}
+
+static int ab8500_subscribe_unsubscribe_open(struct inode *inode,
+ struct file *file)
+{
+ return single_open(file, ab8500_subscribe_unsubscribe_print,
+ inode->i_private);
+}
+
+/*
+ * Userspace should use poll() on this file. When an event occur
+ * the blocking poll will be released.
+ */
+static ssize_t show_irq(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ unsigned long name;
+ unsigned int irq_index;
+ int err;
+
+ err = strict_strtoul(attr->attr.name, 0, &name);
+ if (err)
+ return err;
+
+ irq_index = name - irq_first;
+ if (irq_index >= num_irqs)
+ return -EINVAL;
+ else
+ return sprintf(buf, "%u\n", irq_count[irq_index]);
+}
+
+static ssize_t ab8500_subscribe_write(struct file *file,
+ const char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct device *dev = ((struct seq_file *)(file->private_data))->private;
+ char buf[32];
+ int buf_size;
+ unsigned long user_val;
+ int err;
+ unsigned int irq_index;
+
+ /* Get userspace string and assure termination */
+ buf_size = min(count, (sizeof(buf)-1));
+ if (copy_from_user(buf, user_buf, buf_size))
+ return -EFAULT;
+ buf[buf_size] = 0;
+
+ err = strict_strtoul(buf, 0, &user_val);
+ if (err)
+ return -EINVAL;
+ if (user_val < irq_first) {
+ dev_err(dev, "debugfs error input < %d\n", irq_first);
+ return -EINVAL;
+ }
+ if (user_val > irq_last) {
+ dev_err(dev, "debugfs error input > %d\n", irq_last);
+ return -EINVAL;
+ }
+
+ irq_index = user_val - irq_first;
+ if (irq_index >= num_irqs)
+ return -EINVAL;
+
+ /*
+ * This will create a sysfs file named <irq-nr> which userspace can
+ * use to select or poll and get the AB8500 events
+ */
+ dev_attr[irq_index] = kmalloc(sizeof(struct device_attribute),
+ GFP_KERNEL);
+ event_name[irq_index] = kmalloc(buf_size, GFP_KERNEL);
+ sprintf(event_name[irq_index], "%lu", user_val);
+ dev_attr[irq_index]->show = show_irq;
+ dev_attr[irq_index]->store = NULL;
+ dev_attr[irq_index]->attr.name = event_name[irq_index];
+ dev_attr[irq_index]->attr.mode = S_IRUGO;
+ err = sysfs_create_file(&dev->kobj, &dev_attr[irq_index]->attr);
+ if (err < 0) {
+ printk(KERN_ERR "sysfs_create_file failed %d\n", err);
+ return err;
+ }
+
+ err = request_threaded_irq(user_val, NULL, ab8500_debug_handler,
+ IRQF_SHARED | IRQF_NO_SUSPEND,
+ "ab8500-debug", &dev->kobj);
+ if (err < 0) {
+ printk(KERN_ERR "request_threaded_irq failed %d, %lu\n",
+ err, user_val);
+ sysfs_remove_file(&dev->kobj, &dev_attr[irq_index]->attr);
+ return err;
+ }
+
+ return buf_size;
+}
+
+static ssize_t ab8500_unsubscribe_write(struct file *file,
+ const char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct device *dev = ((struct seq_file *)(file->private_data))->private;
+ char buf[32];
+ int buf_size;
+ unsigned long user_val;
+ int err;
+ unsigned int irq_index;
+
+ /* Get userspace string and assure termination */
+ buf_size = min(count, (sizeof(buf)-1));
+ if (copy_from_user(buf, user_buf, buf_size))
+ return -EFAULT;
+ buf[buf_size] = 0;
+
+ err = strict_strtoul(buf, 0, &user_val);
+ if (err)
+ return -EINVAL;
+ if (user_val < irq_first) {
+ dev_err(dev, "debugfs error input < %d\n", irq_first);
+ return -EINVAL;
+ }
+ if (user_val > irq_last) {
+ dev_err(dev, "debugfs error input > %d\n", irq_last);
+ return -EINVAL;
+ }
+
+ irq_index = user_val - irq_first;
+ if (irq_index >= num_irqs)
+ return -EINVAL;
+
+ /* Set irq count to 0 when unsubscribe */
+ irq_count[irq_index] = 0;
+
+ if (dev_attr[irq_index])
+ sysfs_remove_file(&dev->kobj, &dev_attr[irq_index]->attr);
+
+
+ free_irq(user_val, &dev->kobj);
+ kfree(event_name[irq_index]);
+ kfree(dev_attr[irq_index]);
+
+ return buf_size;
+}
+
+/*
+ * - several deubgfs nodes fops
+ */
+
static const struct file_operations ab8500_bank_fops = {
.open = ab8500_bank_open,
.write = ab8500_bank_write,
@@ -546,64 +1519,231 @@ static const struct file_operations ab8500_val_fops = {
.owner = THIS_MODULE,
};
+static const struct file_operations ab8500_interrupts_fops = {
+ .open = ab8500_interrupts_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+ .owner = THIS_MODULE,
+};
+
+static const struct file_operations ab8500_subscribe_fops = {
+ .open = ab8500_subscribe_unsubscribe_open,
+ .write = ab8500_subscribe_write,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+ .owner = THIS_MODULE,
+};
+
+static const struct file_operations ab8500_unsubscribe_fops = {
+ .open = ab8500_subscribe_unsubscribe_open,
+ .write = ab8500_unsubscribe_write,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+ .owner = THIS_MODULE,
+};
+
+static const struct file_operations ab8500_hwreg_fops = {
+ .open = ab8500_hwreg_open,
+ .write = ab8500_hwreg_write,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+ .owner = THIS_MODULE,
+};
+
static struct dentry *ab8500_dir;
-static struct dentry *ab8500_reg_file;
-static struct dentry *ab8500_bank_file;
-static struct dentry *ab8500_address_file;
-static struct dentry *ab8500_val_file;
+static struct dentry *ab8500_gpadc_dir;
static int ab8500_debug_probe(struct platform_device *plf)
{
+ struct dentry *file;
+ int ret = -ENOMEM;
+ struct ab8500 *ab8500;
debug_bank = AB8500_MISC;
debug_address = AB8500_REV_REG & 0x00FF;
+ ab8500 = dev_get_drvdata(plf->dev.parent);
+ num_irqs = ab8500->mask_size;
+
+ irq_count = kzalloc(sizeof(*irq_count)*num_irqs, GFP_KERNEL);
+ if (!irq_count)
+ return -ENOMEM;
+
+ dev_attr = kzalloc(sizeof(*dev_attr)*num_irqs,GFP_KERNEL);
+ if (!dev_attr)
+ goto out_freeirq_count;
+
+ event_name = kzalloc(sizeof(*event_name)*num_irqs, GFP_KERNEL);
+ if (!event_name)
+ goto out_freedev_attr;
+
+ irq_first = platform_get_irq_byname(plf, "IRQ_FIRST");
+ if (irq_first < 0) {
+ dev_err(&plf->dev, "First irq not found, err %d\n",
+ irq_first);
+ ret = irq_first;
+ goto out_freeevent_name;
+ }
+
+ irq_last = platform_get_irq_byname(plf, "IRQ_LAST");
+ if (irq_last < 0) {
+ dev_err(&plf->dev, "Last irq not found, err %d\n",
+ irq_last);
+ ret = irq_last;
+ goto out_freeevent_name;
+ }
+
ab8500_dir = debugfs_create_dir(AB8500_NAME_STRING, NULL);
if (!ab8500_dir)
- goto exit_no_debugfs;
+ goto err;
+
+ ab8500_gpadc_dir = debugfs_create_dir(AB8500_ADC_NAME_STRING,
+ ab8500_dir);
+ if (!ab8500_gpadc_dir)
+ goto err;
+
+ file = debugfs_create_file("all-bank-registers", S_IRUGO,
+ ab8500_dir, &plf->dev, &ab8500_registers_fops);
+ if (!file)
+ goto err;
+
+ file = debugfs_create_file("all-banks", S_IRUGO,
+ ab8500_dir, &plf->dev, &ab8500_all_banks_fops);
+ if (!file)
+ goto err;
+
+ file = debugfs_create_file("register-bank", (S_IRUGO | S_IWUSR),
+ ab8500_dir, &plf->dev, &ab8500_bank_fops);
+ if (!file)
+ goto err;
+
+ file = debugfs_create_file("register-address", (S_IRUGO | S_IWUSR),
+ ab8500_dir, &plf->dev, &ab8500_address_fops);
+ if (!file)
+ goto err;
+
+ file = debugfs_create_file("register-value", (S_IRUGO | S_IWUSR),
+ ab8500_dir, &plf->dev, &ab8500_val_fops);
+ if (!file)
+ goto err;
- ab8500_reg_file = debugfs_create_file("all-bank-registers",
- S_IRUGO, ab8500_dir, &plf->dev, &ab8500_registers_fops);
- if (!ab8500_reg_file)
- goto exit_destroy_dir;
+ file = debugfs_create_file("irq-subscribe", (S_IRUGO | S_IWUSR),
+ ab8500_dir, &plf->dev, &ab8500_subscribe_fops);
+ if (!file)
+ goto err;
- ab8500_bank_file = debugfs_create_file("register-bank",
- (S_IRUGO | S_IWUSR), ab8500_dir, &plf->dev, &ab8500_bank_fops);
- if (!ab8500_bank_file)
- goto exit_destroy_reg;
+ if (is_ab8500(ab8500))
+ num_interrupt_lines = AB8500_NR_IRQS;
+ else if (is_ab8505(ab8500))
+ num_interrupt_lines = AB8505_NR_IRQS;
+ else if (is_ab9540(ab8500))
+ num_interrupt_lines = AB9540_NR_IRQS;
- ab8500_address_file = debugfs_create_file("register-address",
- (S_IRUGO | S_IWUSR), ab8500_dir, &plf->dev,
- &ab8500_address_fops);
- if (!ab8500_address_file)
- goto exit_destroy_bank;
+ file = debugfs_create_file("interrupts", (S_IRUGO),
+ ab8500_dir, &plf->dev, &ab8500_interrupts_fops);
+ if (!file)
+ goto err;
- ab8500_val_file = debugfs_create_file("register-value",
- (S_IRUGO | S_IWUSR), ab8500_dir, &plf->dev, &ab8500_val_fops);
- if (!ab8500_val_file)
- goto exit_destroy_address;
+ file = debugfs_create_file("irq-unsubscribe", (S_IRUGO | S_IWUSR),
+ ab8500_dir, &plf->dev, &ab8500_unsubscribe_fops);
+ if (!file)
+ goto err;
+
+ file = debugfs_create_file("hwreg", (S_IRUGO | S_IWUSR),
+ ab8500_dir, &plf->dev, &ab8500_hwreg_fops);
+ if (!file)
+ goto err;
+
+ file = debugfs_create_file("bat_ctrl", (S_IRUGO | S_IWUSR),
+ ab8500_gpadc_dir, &plf->dev, &ab8500_gpadc_bat_ctrl_fops);
+ if (!file)
+ goto err;
+
+ file = debugfs_create_file("btemp_ball", (S_IRUGO | S_IWUSR),
+ ab8500_gpadc_dir, &plf->dev, &ab8500_gpadc_btemp_ball_fops);
+ if (!file)
+ goto err;
+
+ file = debugfs_create_file("main_charger_v", (S_IRUGO | S_IWUSR),
+ ab8500_gpadc_dir, &plf->dev, &ab8500_gpadc_main_charger_v_fops);
+ if (!file)
+ goto err;
+
+ file = debugfs_create_file("acc_detect1", (S_IRUGO | S_IWUSR),
+ ab8500_gpadc_dir, &plf->dev, &ab8500_gpadc_acc_detect1_fops);
+ if (!file)
+ goto err;
+
+ file = debugfs_create_file("acc_detect2", (S_IRUGO | S_IWUSR),
+ ab8500_gpadc_dir, &plf->dev, &ab8500_gpadc_acc_detect2_fops);
+ if (!file)
+ goto err;
+
+ file = debugfs_create_file("adc_aux1", (S_IRUGO | S_IWUSR),
+ ab8500_gpadc_dir, &plf->dev, &ab8500_gpadc_aux1_fops);
+ if (!file)
+ goto err;
+
+ file = debugfs_create_file("adc_aux2", (S_IRUGO | S_IWUSR),
+ ab8500_gpadc_dir, &plf->dev, &ab8500_gpadc_aux2_fops);
+ if (!file)
+ goto err;
+
+ file = debugfs_create_file("main_bat_v", (S_IRUGO | S_IWUSR),
+ ab8500_gpadc_dir, &plf->dev, &ab8500_gpadc_main_bat_v_fops);
+ if (!file)
+ goto err;
+
+ file = debugfs_create_file("vbus_v", (S_IRUGO | S_IWUSR),
+ ab8500_gpadc_dir, &plf->dev, &ab8500_gpadc_vbus_v_fops);
+ if (!file)
+ goto err;
+
+ file = debugfs_create_file("main_charger_c", (S_IRUGO | S_IWUSR),
+ ab8500_gpadc_dir, &plf->dev, &ab8500_gpadc_main_charger_c_fops);
+ if (!file)
+ goto err;
+
+ file = debugfs_create_file("usb_charger_c", (S_IRUGO | S_IWUSR),
+ ab8500_gpadc_dir, &plf->dev, &ab8500_gpadc_usb_charger_c_fops);
+ if (!file)
+ goto err;
+
+ file = debugfs_create_file("bk_bat_v", (S_IRUGO | S_IWUSR),
+ ab8500_gpadc_dir, &plf->dev, &ab8500_gpadc_bk_bat_v_fops);
+ if (!file)
+ goto err;
+
+ file = debugfs_create_file("die_temp", (S_IRUGO | S_IWUSR),
+ ab8500_gpadc_dir, &plf->dev, &ab8500_gpadc_die_temp_fops);
+ if (!file)
+ goto err;
return 0;
-exit_destroy_address:
- debugfs_remove(ab8500_address_file);
-exit_destroy_bank:
- debugfs_remove(ab8500_bank_file);
-exit_destroy_reg:
- debugfs_remove(ab8500_reg_file);
-exit_destroy_dir:
- debugfs_remove(ab8500_dir);
-exit_no_debugfs:
+err:
+ if (ab8500_dir)
+ debugfs_remove_recursive(ab8500_dir);
dev_err(&plf->dev, "failed to create debugfs entries.\n");
- return -ENOMEM;
+out_freeevent_name:
+ kfree(event_name);
+out_freedev_attr:
+ kfree(dev_attr);
+out_freeirq_count:
+ kfree(irq_count);
+
+ return ret;
}
static int ab8500_debug_remove(struct platform_device *plf)
{
- debugfs_remove(ab8500_val_file);
- debugfs_remove(ab8500_address_file);
- debugfs_remove(ab8500_bank_file);
- debugfs_remove(ab8500_reg_file);
- debugfs_remove(ab8500_dir);
+ debugfs_remove_recursive(ab8500_dir);
+ kfree(event_name);
+ kfree(dev_attr);
+ kfree(irq_count);
return 0;
}
diff --git a/drivers/mfd/ab8500-gpadc.c b/drivers/mfd/ab8500-gpadc.c
index 3fb1f40d6389..b1f3561b023f 100644
--- a/drivers/mfd/ab8500-gpadc.c
+++ b/drivers/mfd/ab8500-gpadc.c
@@ -12,6 +12,7 @@
#include <linux/interrupt.h>
#include <linux/spinlock.h>
#include <linux/delay.h>
+#include <linux/pm_runtime.h>
#include <linux/platform_device.h>
#include <linux/completion.h>
#include <linux/regulator/consumer.h>
@@ -82,6 +83,11 @@
/* This is used to not lose precision when dividing to get gain and offset */
#define CALIB_SCALE 1000
+/* Time in ms before disabling regulator */
+#define GPADC_AUDOSUSPEND_DELAY 1
+
+#define CONVERSION_TIME 500 /* ms */
+
enum cal_channels {
ADC_INPUT_VMAIN = 0,
ADC_INPUT_BTEMP,
@@ -102,10 +108,10 @@ struct adc_cal_data {
/**
* struct ab8500_gpadc - AB8500 GPADC device information
- * @chip_id ABB chip id
* @dev: pointer to the struct device
* @node: a list of AB8500 GPADCs, hence prepared for
reentrance
+ * @parent: pointer to the struct ab8500
* @ab8500_gpadc_complete: pointer to the struct completion, to indicate
* the completion of gpadc conversion
* @ab8500_gpadc_lock: structure of type mutex
@@ -114,9 +120,9 @@ struct adc_cal_data {
* @cal_data array of ADC calibration data structs
*/
struct ab8500_gpadc {
- u8 chip_id;
struct device *dev;
struct list_head node;
+ struct ab8500 *parent;
struct completion ab8500_gpadc_complete;
struct mutex ab8500_gpadc_lock;
struct regulator *regu;
@@ -282,8 +288,9 @@ int ab8500_gpadc_read_raw(struct ab8500_gpadc *gpadc, u8 channel)
return -ENODEV;
mutex_lock(&gpadc->ab8500_gpadc_lock);
+
/* Enable VTVout LDO this is required for GPADC */
- regulator_enable(gpadc->regu);
+ pm_runtime_get_sync(gpadc->dev);
/* Check if ADC is not busy, lock and proceed */
do {
@@ -332,7 +339,7 @@ int ab8500_gpadc_read_raw(struct ab8500_gpadc *gpadc, u8 channel)
EN_BUF | EN_ICHAR);
break;
case BTEMP_BALL:
- if (gpadc->chip_id >= AB8500_CUT3P0) {
+ if (!is_ab8500_2p0_or_earlier(gpadc->parent)) {
/* Turn on btemp pull-up on ABB 3.0 */
ret = abx500_mask_and_set_register_interruptible(
gpadc->dev,
@@ -344,7 +351,7 @@ int ab8500_gpadc_read_raw(struct ab8500_gpadc *gpadc, u8 channel)
* Delay might be needed for ABB8500 cut 3.0, if not, remove
* when hardware will be available
*/
- msleep(1);
+ usleep_range(1000, 1000);
break;
}
/* Intentional fallthrough */
@@ -367,7 +374,8 @@ int ab8500_gpadc_read_raw(struct ab8500_gpadc *gpadc, u8 channel)
goto out;
}
/* wait for completion of conversion */
- if (!wait_for_completion_timeout(&gpadc->ab8500_gpadc_complete, 2*HZ)) {
+ if (!wait_for_completion_timeout(&gpadc->ab8500_gpadc_complete,
+ msecs_to_jiffies(CONVERSION_TIME))) {
dev_err(gpadc->dev,
"timeout: didn't receive GPADC conversion interrupt\n");
ret = -EINVAL;
@@ -397,8 +405,10 @@ int ab8500_gpadc_read_raw(struct ab8500_gpadc *gpadc, u8 channel)
dev_err(gpadc->dev, "gpadc_conversion: disable gpadc failed\n");
goto out;
}
- /* Disable VTVout LDO this is required for GPADC */
- regulator_disable(gpadc->regu);
+
+ pm_runtime_mark_last_busy(gpadc->dev);
+ pm_runtime_put_autosuspend(gpadc->dev);
+
mutex_unlock(&gpadc->ab8500_gpadc_lock);
return (high_data << 8) | low_data;
@@ -412,7 +422,9 @@ out:
*/
(void) abx500_set_register_interruptible(gpadc->dev, AB8500_GPADC,
AB8500_GPADC_CTRL1_REG, DIS_GPADC);
- regulator_disable(gpadc->regu);
+
+ pm_runtime_put(gpadc->dev);
+
mutex_unlock(&gpadc->ab8500_gpadc_lock);
dev_err(gpadc->dev,
"gpadc_conversion: Failed to AD convert channel %d\n", channel);
@@ -571,6 +583,28 @@ static void ab8500_gpadc_read_calibration_data(struct ab8500_gpadc *gpadc)
gpadc->cal_data[ADC_INPUT_VBAT].offset);
}
+static int ab8500_gpadc_runtime_suspend(struct device *dev)
+{
+ struct ab8500_gpadc *gpadc = dev_get_drvdata(dev);
+
+ regulator_disable(gpadc->regu);
+ return 0;
+}
+
+static int ab8500_gpadc_runtime_resume(struct device *dev)
+{
+ struct ab8500_gpadc *gpadc = dev_get_drvdata(dev);
+
+ regulator_enable(gpadc->regu);
+ return 0;
+}
+
+static int ab8500_gpadc_runtime_idle(struct device *dev)
+{
+ pm_runtime_suspend(dev);
+ return 0;
+}
+
static int ab8500_gpadc_probe(struct platform_device *pdev)
{
int ret = 0;
@@ -591,6 +625,7 @@ static int ab8500_gpadc_probe(struct platform_device *pdev)
}
gpadc->dev = &pdev->dev;
+ gpadc->parent = dev_get_drvdata(pdev->dev.parent);
mutex_init(&gpadc->ab8500_gpadc_lock);
/* Initialize completion used to notify completion of conversion */
@@ -607,14 +642,6 @@ static int ab8500_gpadc_probe(struct platform_device *pdev)
goto fail;
}
- /* Get Chip ID of the ABB ASIC */
- ret = abx500_get_chip_id(gpadc->dev);
- if (ret < 0) {
- dev_err(gpadc->dev, "failed to get chip ID\n");
- goto fail_irq;
- }
- gpadc->chip_id = (u8) ret;
-
/* VTVout LDO used to power up ab8500-GPADC */
gpadc->regu = regulator_get(&pdev->dev, "vddadc");
if (IS_ERR(gpadc->regu)) {
@@ -622,6 +649,16 @@ static int ab8500_gpadc_probe(struct platform_device *pdev)
dev_err(gpadc->dev, "failed to get vtvout LDO\n");
goto fail_irq;
}
+
+ platform_set_drvdata(pdev, gpadc);
+
+ regulator_enable(gpadc->regu);
+
+ pm_runtime_set_autosuspend_delay(gpadc->dev, GPADC_AUDOSUSPEND_DELAY);
+ pm_runtime_use_autosuspend(gpadc->dev);
+ pm_runtime_set_active(gpadc->dev);
+ pm_runtime_enable(gpadc->dev);
+
ab8500_gpadc_read_calibration_data(gpadc);
list_add_tail(&gpadc->node, &ab8500_gpadc_list);
dev_dbg(gpadc->dev, "probe success\n");
@@ -642,19 +679,34 @@ static int ab8500_gpadc_remove(struct platform_device *pdev)
list_del(&gpadc->node);
/* remove interrupt - completion of Sw ADC conversion */
free_irq(gpadc->irq, gpadc);
- /* disable VTVout LDO that is being used by GPADC */
- regulator_put(gpadc->regu);
+
+ pm_runtime_get_sync(gpadc->dev);
+ pm_runtime_disable(gpadc->dev);
+
+ regulator_disable(gpadc->regu);
+
+ pm_runtime_set_suspended(gpadc->dev);
+
+ pm_runtime_put_noidle(gpadc->dev);
+
kfree(gpadc);
gpadc = NULL;
return 0;
}
+static const struct dev_pm_ops ab8500_gpadc_pm_ops = {
+ SET_RUNTIME_PM_OPS(ab8500_gpadc_runtime_suspend,
+ ab8500_gpadc_runtime_resume,
+ ab8500_gpadc_runtime_idle)
+};
+
static struct platform_driver ab8500_gpadc_driver = {
.probe = ab8500_gpadc_probe,
.remove = ab8500_gpadc_remove,
.driver = {
.name = "ab8500-gpadc",
.owner = THIS_MODULE,
+ .pm = &ab8500_gpadc_pm_ops,
},
};
diff --git a/drivers/mfd/ab8500-sysctrl.c b/drivers/mfd/ab8500-sysctrl.c
index 8a33b2c7eead..108fd86552f0 100644
--- a/drivers/mfd/ab8500-sysctrl.c
+++ b/drivers/mfd/ab8500-sysctrl.c
@@ -7,12 +7,73 @@
#include <linux/err.h>
#include <linux/module.h>
#include <linux/platform_device.h>
+#include <linux/pm.h>
+#include <linux/reboot.h>
+#include <linux/signal.h>
+#include <linux/power_supply.h>
#include <linux/mfd/abx500.h>
#include <linux/mfd/abx500/ab8500.h>
#include <linux/mfd/abx500/ab8500-sysctrl.h>
static struct device *sysctrl_dev;
+void ab8500_power_off(void)
+{
+ sigset_t old;
+ sigset_t all;
+ static char *pss[] = {"ab8500_ac", "ab8500_usb"};
+ int i;
+ bool charger_present = false;
+ union power_supply_propval val;
+ struct power_supply *psy;
+ int ret;
+
+ /*
+ * If we have a charger connected and we're powering off,
+ * reboot into charge-only mode.
+ */
+
+ for (i = 0; i < ARRAY_SIZE(pss); i++) {
+ psy = power_supply_get_by_name(pss[i]);
+ if (!psy)
+ continue;
+
+ ret = psy->get_property(psy, POWER_SUPPLY_PROP_ONLINE, &val);
+
+ if (!ret && val.intval) {
+ charger_present = true;
+ break;
+ }
+ }
+
+ if (!charger_present)
+ goto shutdown;
+
+ /* Check if battery is known */
+ psy = power_supply_get_by_name("ab8500_btemp");
+ if (psy) {
+ ret = psy->get_property(psy, POWER_SUPPLY_PROP_TECHNOLOGY,
+ &val);
+ if (!ret && val.intval != POWER_SUPPLY_TECHNOLOGY_UNKNOWN) {
+ printk(KERN_INFO
+ "Charger \"%s\" is connected with known battery."
+ " Rebooting.\n",
+ pss[i]);
+ machine_restart("charging");
+ }
+ }
+
+shutdown:
+ sigfillset(&all);
+
+ if (!sigprocmask(SIG_BLOCK, &all, &old)) {
+ (void)ab8500_sysctrl_set(AB8500_STW4500CTRL1,
+ AB8500_STW4500CTRL1_SWOFF |
+ AB8500_STW4500CTRL1_SWRESET4500N);
+ (void)sigprocmask(SIG_SETMASK, &old, NULL);
+ }
+}
+
static inline bool valid_bank(u8 bank)
{
return ((bank == AB8500_SYS_CTRL1_BLOCK) ||
@@ -33,6 +94,7 @@ int ab8500_sysctrl_read(u16 reg, u8 *value)
return abx500_get_register_interruptible(sysctrl_dev, bank,
(u8)(reg & 0xFF), value);
}
+EXPORT_SYMBOL(ab8500_sysctrl_read);
int ab8500_sysctrl_write(u16 reg, u8 mask, u8 value)
{
@@ -48,10 +110,40 @@ int ab8500_sysctrl_write(u16 reg, u8 mask, u8 value)
return abx500_mask_and_set_register_interruptible(sysctrl_dev, bank,
(u8)(reg & 0xFF), mask, value);
}
+EXPORT_SYMBOL(ab8500_sysctrl_write);
static int ab8500_sysctrl_probe(struct platform_device *pdev)
{
+ struct ab8500_platform_data *plat;
+ struct ab8500_sysctrl_platform_data *pdata;
+
sysctrl_dev = &pdev->dev;
+ plat = dev_get_platdata(pdev->dev.parent);
+ if (plat->pm_power_off)
+ pm_power_off = ab8500_power_off;
+
+ pdata = plat->sysctrl;
+
+ if (pdata) {
+ int ret, i, j;
+
+ for (i = AB8500_SYSCLKREQ1RFCLKBUF;
+ i <= AB8500_SYSCLKREQ8RFCLKBUF; i++) {
+ j = i - AB8500_SYSCLKREQ1RFCLKBUF;
+ ret = ab8500_sysctrl_write(i, 0xff,
+ pdata->initial_req_buf_config[j]);
+ dev_dbg(&pdev->dev,
+ "Setting SysClkReq%dRfClkBuf 0x%X\n",
+ j + 1,
+ pdata->initial_req_buf_config[j]);
+ if (ret < 0) {
+ dev_err(&pdev->dev,
+ "unable to set sysClkReq%dRfClkBuf: "
+ "%d\n", j + 1, ret);
+ }
+ }
+ }
+
return 0;
}
diff --git a/drivers/mfd/abx500-core.c b/drivers/mfd/abx500-core.c
index 7ce65f49480f..9818afba2515 100644
--- a/drivers/mfd/abx500-core.c
+++ b/drivers/mfd/abx500-core.c
@@ -153,6 +153,22 @@ int abx500_startup_irq_enabled(struct device *dev, unsigned int irq)
}
EXPORT_SYMBOL(abx500_startup_irq_enabled);
+void abx500_dump_all_banks(void)
+{
+ struct abx500_ops *ops;
+ struct device dummy_child = {0};
+ struct abx500_device_entry *dev_entry;
+
+ list_for_each_entry(dev_entry, &abx500_list, list) {
+ dummy_child.parent = dev_entry->dev;
+ ops = &dev_entry->ops;
+
+ if ((ops != NULL) && (ops->dump_all_banks != NULL))
+ ops->dump_all_banks(&dummy_child);
+ }
+}
+EXPORT_SYMBOL(abx500_dump_all_banks);
+
MODULE_AUTHOR("Mattias Wallin <mattias.wallin@stericsson.com>");
MODULE_DESCRIPTION("ABX500 core driver");
MODULE_LICENSE("GPL");
diff --git a/drivers/mfd/arizona-core.c b/drivers/mfd/arizona-core.c
index 222c03a5ddc0..b562c7bf8a46 100644
--- a/drivers/mfd/arizona-core.c
+++ b/drivers/mfd/arizona-core.c
@@ -115,7 +115,7 @@ static irqreturn_t arizona_underclocked(int irq, void *data)
if (val & ARIZONA_ADC_UNDERCLOCKED_STS)
dev_err(arizona->dev, "ADC underclocked\n");
if (val & ARIZONA_MIXER_UNDERCLOCKED_STS)
- dev_err(arizona->dev, "Mixer underclocked\n");
+ dev_err(arizona->dev, "Mixer dropped sample\n");
return IRQ_HANDLED;
}
@@ -263,10 +263,36 @@ static int arizona_runtime_suspend(struct device *dev)
}
#endif
+#ifdef CONFIG_PM_SLEEP
+static int arizona_resume_noirq(struct device *dev)
+{
+ struct arizona *arizona = dev_get_drvdata(dev);
+
+ dev_dbg(arizona->dev, "Early resume, disabling IRQ\n");
+ disable_irq(arizona->irq);
+
+ return 0;
+}
+
+static int arizona_resume(struct device *dev)
+{
+ struct arizona *arizona = dev_get_drvdata(dev);
+
+ dev_dbg(arizona->dev, "Late resume, reenabling IRQ\n");
+ enable_irq(arizona->irq);
+
+ return 0;
+}
+#endif
+
const struct dev_pm_ops arizona_pm_ops = {
SET_RUNTIME_PM_OPS(arizona_runtime_suspend,
arizona_runtime_resume,
NULL)
+ SET_SYSTEM_SLEEP_PM_OPS(NULL, arizona_resume)
+#ifdef CONFIG_PM_SLEEP
+ .resume_noirq = arizona_resume_noirq,
+#endif
};
EXPORT_SYMBOL_GPL(arizona_pm_ops);
@@ -275,19 +301,19 @@ static struct mfd_cell early_devs[] = {
};
static struct mfd_cell wm5102_devs[] = {
+ { .name = "arizona-micsupp" },
{ .name = "arizona-extcon" },
{ .name = "arizona-gpio" },
{ .name = "arizona-haptics" },
- { .name = "arizona-micsupp" },
{ .name = "arizona-pwm" },
{ .name = "wm5102-codec" },
};
static struct mfd_cell wm5110_devs[] = {
+ { .name = "arizona-micsupp" },
{ .name = "arizona-extcon" },
{ .name = "arizona-gpio" },
{ .name = "arizona-haptics" },
- { .name = "arizona-micsupp" },
{ .name = "arizona-pwm" },
{ .name = "wm5110-codec" },
};
@@ -484,6 +510,29 @@ int arizona_dev_init(struct arizona *arizona)
goto err_reset;
}
+ for (i = 0; i < ARIZONA_MAX_MICBIAS; i++) {
+ if (!arizona->pdata.micbias[i].mV)
+ continue;
+
+ val = (arizona->pdata.micbias[i].mV - 1500) / 100;
+ val <<= ARIZONA_MICB1_LVL_SHIFT;
+
+ if (arizona->pdata.micbias[i].ext_cap)
+ val |= ARIZONA_MICB1_EXT_CAP;
+
+ if (arizona->pdata.micbias[i].discharge)
+ val |= ARIZONA_MICB1_DISCH;
+
+ if (arizona->pdata.micbias[i].fast_start)
+ val |= ARIZONA_MICB1_RATE;
+
+ regmap_update_bits(arizona->regmap,
+ ARIZONA_MIC_BIAS_CTRL_1 + i,
+ ARIZONA_MICB1_LVL_MASK |
+ ARIZONA_MICB1_DISCH |
+ ARIZONA_MICB1_RATE, val);
+ }
+
for (i = 0; i < ARIZONA_MAX_INPUT; i++) {
/* Default for both is 0 so noop with defaults */
val = arizona->pdata.dmic_ref[i]
diff --git a/drivers/mfd/da9052-i2c.c b/drivers/mfd/da9052-i2c.c
index 885e56780358..6a9fec40d018 100644
--- a/drivers/mfd/da9052-i2c.c
+++ b/drivers/mfd/da9052-i2c.c
@@ -60,7 +60,7 @@ static inline bool i2c_safe_reg(unsigned char reg)
* This fix is to follow any read or write with a dummy read to a safe
* register.
*/
-int da9052_i2c_fix(struct da9052 *da9052, unsigned char reg)
+static int da9052_i2c_fix(struct da9052 *da9052, unsigned char reg)
{
int val;
@@ -85,7 +85,6 @@ int da9052_i2c_fix(struct da9052 *da9052, unsigned char reg)
return 0;
}
-EXPORT_SYMBOL(da9052_i2c_fix);
static int da9052_i2c_enable_multiwrite(struct da9052 *da9052)
{
diff --git a/drivers/mfd/db8500-prcmu.c b/drivers/mfd/db8500-prcmu.c
index 268f45d42394..21f261bf9e95 100644
--- a/drivers/mfd/db8500-prcmu.c
+++ b/drivers/mfd/db8500-prcmu.c
@@ -26,22 +26,19 @@
#include <linux/fs.h>
#include <linux/platform_device.h>
#include <linux/uaccess.h>
+#include <linux/irqchip/arm-gic.h>
#include <linux/mfd/core.h>
#include <linux/mfd/dbx500-prcmu.h>
#include <linux/mfd/abx500/ab8500.h>
#include <linux/regulator/db8500-prcmu.h>
#include <linux/regulator/machine.h>
#include <linux/cpufreq.h>
-#include <asm/hardware/gic.h>
+#include <linux/platform_data/ux500_wdt.h>
#include <mach/hardware.h>
#include <mach/irqs.h>
#include <mach/db8500-regs.h>
-#include <mach/id.h>
#include "dbx500-prcmu-regs.h"
-/* Offset for the firmware version within the TCPM */
-#define PRCMU_FW_VERSION_OFFSET 0xA4
-
/* Index of different voltages to be used when accessing AVSData */
#define PRCM_AVS_BASE 0x2FC
#define PRCM_AVS_VBB_RET (PRCM_AVS_BASE + 0x0)
@@ -216,10 +213,8 @@
#define PRCM_REQ_MB5_I2C_HW_BITS (PRCM_REQ_MB5 + 0x1)
#define PRCM_REQ_MB5_I2C_REG (PRCM_REQ_MB5 + 0x2)
#define PRCM_REQ_MB5_I2C_VAL (PRCM_REQ_MB5 + 0x3)
-#define PRCMU_I2C_WRITE(slave) \
- (((slave) << 1) | (cpu_is_u8500v2() ? BIT(6) : 0))
-#define PRCMU_I2C_READ(slave) \
- (((slave) << 1) | BIT(0) | (cpu_is_u8500v2() ? BIT(6) : 0))
+#define PRCMU_I2C_WRITE(slave) (((slave) << 1) | BIT(6))
+#define PRCMU_I2C_READ(slave) (((slave) << 1) | BIT(0) | BIT(6))
#define PRCMU_I2C_STOP_EN BIT(3)
/* Mailbox 5 ACKs */
@@ -1049,12 +1044,13 @@ int db8500_prcmu_get_ddr_opp(void)
*
* This function sets the operating point of the DDR.
*/
+static bool enable_set_ddr_opp;
int db8500_prcmu_set_ddr_opp(u8 opp)
{
if (opp < DDR_100_OPP || opp > DDR_25_OPP)
return -EINVAL;
/* Changing the DDR OPP can hang the hardware pre-v21 */
- if (cpu_is_u8500v20_or_later() && !cpu_is_u8500v20())
+ if (enable_set_ddr_opp)
writeb(opp, PRCM_DDR_SUBSYS_APE_MINBW);
return 0;
@@ -2212,21 +2208,25 @@ int db8500_prcmu_config_a9wdog(u8 num, bool sleep_auto_off)
sleep_auto_off ? A9WDOG_AUTO_OFF_EN :
A9WDOG_AUTO_OFF_DIS);
}
+EXPORT_SYMBOL(db8500_prcmu_config_a9wdog);
int db8500_prcmu_enable_a9wdog(u8 id)
{
return prcmu_a9wdog(MB4H_A9WDOG_EN, id, 0, 0, 0);
}
+EXPORT_SYMBOL(db8500_prcmu_enable_a9wdog);
int db8500_prcmu_disable_a9wdog(u8 id)
{
return prcmu_a9wdog(MB4H_A9WDOG_DIS, id, 0, 0, 0);
}
+EXPORT_SYMBOL(db8500_prcmu_disable_a9wdog);
int db8500_prcmu_kick_a9wdog(u8 id)
{
return prcmu_a9wdog(MB4H_A9WDOG_KICK, id, 0, 0, 0);
}
+EXPORT_SYMBOL(db8500_prcmu_kick_a9wdog);
/*
* timeout is 28 bit, in ms.
@@ -2244,6 +2244,7 @@ int db8500_prcmu_load_a9wdog(u8 id, u32 timeout)
(u8)((timeout >> 12) & 0xff),
(u8)((timeout >> 20) & 0xff));
}
+EXPORT_SYMBOL(db8500_prcmu_load_a9wdog);
/**
* prcmu_abb_read() - Read register value(s) from the ABB.
@@ -2706,21 +2707,43 @@ static struct irq_chip prcmu_irq_chip = {
.irq_unmask = prcmu_irq_unmask,
};
-static char *fw_project_name(u8 project)
+static __init char *fw_project_name(u32 project)
{
switch (project) {
case PRCMU_FW_PROJECT_U8500:
return "U8500";
- case PRCMU_FW_PROJECT_U8500_C2:
- return "U8500 C2";
+ case PRCMU_FW_PROJECT_U8400:
+ return "U8400";
case PRCMU_FW_PROJECT_U9500:
return "U9500";
- case PRCMU_FW_PROJECT_U9500_C2:
- return "U9500 C2";
+ case PRCMU_FW_PROJECT_U8500_MBB:
+ return "U8500 MBB";
+ case PRCMU_FW_PROJECT_U8500_C1:
+ return "U8500 C1";
+ case PRCMU_FW_PROJECT_U8500_C2:
+ return "U8500 C2";
+ case PRCMU_FW_PROJECT_U8500_C3:
+ return "U8500 C3";
+ case PRCMU_FW_PROJECT_U8500_C4:
+ return "U8500 C4";
+ case PRCMU_FW_PROJECT_U9500_MBL:
+ return "U9500 MBL";
+ case PRCMU_FW_PROJECT_U8500_MBL:
+ return "U8500 MBL";
+ case PRCMU_FW_PROJECT_U8500_MBL2:
+ return "U8500 MBL2";
case PRCMU_FW_PROJECT_U8520:
- return "U8520";
+ return "U8520 MBL";
case PRCMU_FW_PROJECT_U8420:
return "U8420";
+ case PRCMU_FW_PROJECT_U9540:
+ return "U9540";
+ case PRCMU_FW_PROJECT_A9420:
+ return "A9420";
+ case PRCMU_FW_PROJECT_L8540:
+ return "L8540";
+ case PRCMU_FW_PROJECT_L8580:
+ return "L8580";
default:
return "Unknown";
}
@@ -2766,36 +2789,44 @@ static int db8500_irq_init(struct device_node *np)
return 0;
}
-void __init db8500_prcmu_early_init(void)
+static void dbx500_fw_version_init(struct platform_device *pdev,
+ u32 version_offset)
{
- if (cpu_is_u8500v2() || cpu_is_u9540()) {
- void *tcpm_base = ioremap_nocache(U8500_PRCMU_TCPM_BASE, SZ_4K);
-
- if (tcpm_base != NULL) {
- u32 version;
- version = readl(tcpm_base + PRCMU_FW_VERSION_OFFSET);
- fw_info.version.project = version & 0xFF;
- fw_info.version.api_version = (version >> 8) & 0xFF;
- fw_info.version.func_version = (version >> 16) & 0xFF;
- fw_info.version.errata = (version >> 24) & 0xFF;
- fw_info.valid = true;
- pr_info("PRCMU firmware: %s, version %d.%d.%d\n",
- fw_project_name(fw_info.version.project),
- (version >> 8) & 0xFF, (version >> 16) & 0xFF,
- (version >> 24) & 0xFF);
- iounmap(tcpm_base);
- }
+ struct resource *res;
+ void __iomem *tcpm_base;
- if (cpu_is_u9540())
- tcdm_base = ioremap_nocache(U8500_PRCMU_TCDM_BASE,
- SZ_4K + SZ_8K) + SZ_8K;
- else
- tcdm_base = __io_address(U8500_PRCMU_TCDM_BASE);
- } else {
- pr_err("prcmu: Unsupported chip version\n");
- BUG();
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
+ "prcmu-tcpm");
+ if (!res) {
+ dev_err(&pdev->dev,
+ "Error: no prcmu tcpm memory region provided\n");
+ return;
+ }
+ tcpm_base = ioremap(res->start, resource_size(res));
+ if (tcpm_base != NULL) {
+ u32 version;
+
+ version = readl(tcpm_base + version_offset);
+ fw_info.version.project = (version & 0xFF);
+ fw_info.version.api_version = (version >> 8) & 0xFF;
+ fw_info.version.func_version = (version >> 16) & 0xFF;
+ fw_info.version.errata = (version >> 24) & 0xFF;
+ strncpy(fw_info.version.project_name,
+ fw_project_name(fw_info.version.project),
+ PRCMU_FW_PROJECT_NAME_LEN);
+ fw_info.valid = true;
+ pr_info("PRCMU firmware: %s(%d), version %d.%d.%d\n",
+ fw_info.version.project_name,
+ fw_info.version.project,
+ fw_info.version.api_version,
+ fw_info.version.func_version,
+ fw_info.version.errata);
+ iounmap(tcpm_base);
}
+}
+void __init db8500_prcmu_early_init(void)
+{
spin_lock_init(&mb0_transfer.lock);
spin_lock_init(&mb0_transfer.dbb_irqs_lock);
mutex_init(&mb0_transfer.ac_wake_lock);
@@ -3069,6 +3100,11 @@ static struct resource ab8500_resources[] = {
}
};
+static struct ux500_wdt_data db8500_wdt_pdata = {
+ .timeout = 600, /* 10 minutes */
+ .has_28_bits_resolution = true,
+};
+
static struct mfd_cell db8500_prcmu_devs[] = {
{
.name = "db8500-prcmu-regulators",
@@ -3077,12 +3113,18 @@ static struct mfd_cell db8500_prcmu_devs[] = {
.pdata_size = sizeof(db8500_regulators),
},
{
- .name = "cpufreq-u8500",
- .of_compatible = "stericsson,cpufreq-u8500",
+ .name = "cpufreq-ux500",
+ .of_compatible = "stericsson,cpufreq-ux500",
.platform_data = &db8500_cpufreq_table,
.pdata_size = sizeof(db8500_cpufreq_table),
},
{
+ .name = "ux500_wdt",
+ .platform_data = &db8500_wdt_pdata,
+ .pdata_size = sizeof(db8500_wdt_pdata),
+ .id = -1,
+ },
+ {
.name = "ab8500-core",
.of_compatible = "stericsson,ab8500",
.num_resources = ARRAY_SIZE(ab8500_resources),
@@ -3105,23 +3147,30 @@ static void db8500_prcmu_update_cpufreq(void)
*/
static int db8500_prcmu_probe(struct platform_device *pdev)
{
- struct ab8500_platform_data *ab8500_platdata = pdev->dev.platform_data;
struct device_node *np = pdev->dev.of_node;
+ struct prcmu_pdata *pdata = dev_get_platdata(&pdev->dev);
int irq = 0, err = 0, i;
-
- if (ux500_is_svp())
- return -ENODEV;
+ struct resource *res;
init_prcm_registers();
+ dbx500_fw_version_init(pdev, pdata->version_offset);
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "prcmu-tcdm");
+ if (!res) {
+ dev_err(&pdev->dev, "no prcmu tcdm region provided\n");
+ return -ENOENT;
+ }
+ tcdm_base = devm_ioremap(&pdev->dev, res->start,
+ resource_size(res));
+
/* Clean up the mailbox interrupts after pre-kernel code. */
writel(ALL_MBOX_BITS, PRCM_ARM_IT1_CLR);
- if (np)
- irq = platform_get_irq(pdev, 0);
-
- if (!np || irq <= 0)
- irq = IRQ_DB8500_PRCMU1;
+ irq = platform_get_irq(pdev, 0);
+ if (irq <= 0) {
+ dev_err(&pdev->dev, "no prcmu irq provided\n");
+ return -ENOENT;
+ }
err = request_threaded_irq(irq, prcmu_irq_handler,
prcmu_irq_thread_fn, IRQF_NO_SUSPEND, "prcmu", NULL);
@@ -3135,13 +3184,12 @@ static int db8500_prcmu_probe(struct platform_device *pdev)
for (i = 0; i < ARRAY_SIZE(db8500_prcmu_devs); i++) {
if (!strcmp(db8500_prcmu_devs[i].name, "ab8500-core")) {
- db8500_prcmu_devs[i].platform_data = ab8500_platdata;
+ db8500_prcmu_devs[i].platform_data = pdata->ab_platdata;
db8500_prcmu_devs[i].pdata_size = sizeof(struct ab8500_platform_data);
}
}
- if (cpu_is_u8500v20_or_later())
- prcmu_config_esram0_deep_sleep(ESRAM0_DEEP_SLEEP_STATE_RET);
+ prcmu_config_esram0_deep_sleep(ESRAM0_DEEP_SLEEP_STATE_RET);
db8500_prcmu_update_cpufreq();
diff --git a/drivers/mfd/lpc_ich.c b/drivers/mfd/lpc_ich.c
index d9d930302e98..9f12f91d6296 100644
--- a/drivers/mfd/lpc_ich.c
+++ b/drivers/mfd/lpc_ich.c
@@ -50,6 +50,7 @@
* document number TBD : Panther Point
* document number TBD : Lynx Point
* document number TBD : Lynx Point-LP
+ * document number TBD : Wellsburg
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
@@ -75,8 +76,10 @@
#define ACPIBASE_GCS_OFF 0x3410
#define ACPIBASE_GCS_END 0x3414
-#define GPIOBASE 0x48
-#define GPIOCTRL 0x4C
+#define GPIOBASE_ICH0 0x58
+#define GPIOCTRL_ICH0 0x5C
+#define GPIOBASE_ICH6 0x48
+#define GPIOCTRL_ICH6 0x4C
#define RCBABASE 0xf0
@@ -84,8 +87,17 @@
#define wdt_mem_res(i) wdt_res(ICH_RES_MEM_OFF, i)
#define wdt_res(b, i) (&wdt_ich_res[(b) + (i)])
-static int lpc_ich_acpi_save = -1;
-static int lpc_ich_gpio_save = -1;
+struct lpc_ich_cfg {
+ int base;
+ int ctrl;
+ int save;
+};
+
+struct lpc_ich_priv {
+ int chipset;
+ struct lpc_ich_cfg acpi;
+ struct lpc_ich_cfg gpio;
+};
static struct resource wdt_ich_res[] = {
/* ACPI - TCO */
@@ -194,6 +206,7 @@ enum lpc_chipsets {
LPC_PPT, /* Panther Point */
LPC_LPT, /* Lynx Point */
LPC_LPT_LP, /* Lynx Point-LP */
+ LPC_WBG, /* Wellsburg */
};
struct lpc_ich_info lpc_chipset_info[] = {
@@ -474,6 +487,10 @@ struct lpc_ich_info lpc_chipset_info[] = {
.name = "Lynx Point_LP",
.iTCO_version = 2,
},
+ [LPC_WBG] = {
+ .name = "Wellsburg",
+ .iTCO_version = 2,
+ },
};
/*
@@ -655,45 +672,82 @@ static DEFINE_PCI_DEVICE_TABLE(lpc_ich_ids) = {
{ PCI_VDEVICE(INTEL, 0x9c45), LPC_LPT_LP},
{ PCI_VDEVICE(INTEL, 0x9c46), LPC_LPT_LP},
{ PCI_VDEVICE(INTEL, 0x9c47), LPC_LPT_LP},
+ { PCI_VDEVICE(INTEL, 0x8d40), LPC_WBG},
+ { PCI_VDEVICE(INTEL, 0x8d41), LPC_WBG},
+ { PCI_VDEVICE(INTEL, 0x8d42), LPC_WBG},
+ { PCI_VDEVICE(INTEL, 0x8d43), LPC_WBG},
+ { PCI_VDEVICE(INTEL, 0x8d44), LPC_WBG},
+ { PCI_VDEVICE(INTEL, 0x8d45), LPC_WBG},
+ { PCI_VDEVICE(INTEL, 0x8d46), LPC_WBG},
+ { PCI_VDEVICE(INTEL, 0x8d47), LPC_WBG},
+ { PCI_VDEVICE(INTEL, 0x8d48), LPC_WBG},
+ { PCI_VDEVICE(INTEL, 0x8d49), LPC_WBG},
+ { PCI_VDEVICE(INTEL, 0x8d4a), LPC_WBG},
+ { PCI_VDEVICE(INTEL, 0x8d4b), LPC_WBG},
+ { PCI_VDEVICE(INTEL, 0x8d4c), LPC_WBG},
+ { PCI_VDEVICE(INTEL, 0x8d4d), LPC_WBG},
+ { PCI_VDEVICE(INTEL, 0x8d4e), LPC_WBG},
+ { PCI_VDEVICE(INTEL, 0x8d4f), LPC_WBG},
+ { PCI_VDEVICE(INTEL, 0x8d50), LPC_WBG},
+ { PCI_VDEVICE(INTEL, 0x8d51), LPC_WBG},
+ { PCI_VDEVICE(INTEL, 0x8d52), LPC_WBG},
+ { PCI_VDEVICE(INTEL, 0x8d53), LPC_WBG},
+ { PCI_VDEVICE(INTEL, 0x8d54), LPC_WBG},
+ { PCI_VDEVICE(INTEL, 0x8d55), LPC_WBG},
+ { PCI_VDEVICE(INTEL, 0x8d56), LPC_WBG},
+ { PCI_VDEVICE(INTEL, 0x8d57), LPC_WBG},
+ { PCI_VDEVICE(INTEL, 0x8d58), LPC_WBG},
+ { PCI_VDEVICE(INTEL, 0x8d59), LPC_WBG},
+ { PCI_VDEVICE(INTEL, 0x8d5a), LPC_WBG},
+ { PCI_VDEVICE(INTEL, 0x8d5b), LPC_WBG},
+ { PCI_VDEVICE(INTEL, 0x8d5c), LPC_WBG},
+ { PCI_VDEVICE(INTEL, 0x8d5d), LPC_WBG},
+ { PCI_VDEVICE(INTEL, 0x8d5e), LPC_WBG},
+ { PCI_VDEVICE(INTEL, 0x8d5f), LPC_WBG},
{ 0, }, /* End of list */
};
MODULE_DEVICE_TABLE(pci, lpc_ich_ids);
static void lpc_ich_restore_config_space(struct pci_dev *dev)
{
- if (lpc_ich_acpi_save >= 0) {
- pci_write_config_byte(dev, ACPICTRL, lpc_ich_acpi_save);
- lpc_ich_acpi_save = -1;
+ struct lpc_ich_priv *priv = pci_get_drvdata(dev);
+
+ if (priv->acpi.save >= 0) {
+ pci_write_config_byte(dev, priv->acpi.ctrl, priv->acpi.save);
+ priv->acpi.save = -1;
}
- if (lpc_ich_gpio_save >= 0) {
- pci_write_config_byte(dev, GPIOCTRL, lpc_ich_gpio_save);
- lpc_ich_gpio_save = -1;
+ if (priv->gpio.save >= 0) {
+ pci_write_config_byte(dev, priv->gpio.ctrl, priv->gpio.save);
+ priv->gpio.save = -1;
}
}
static void lpc_ich_enable_acpi_space(struct pci_dev *dev)
{
+ struct lpc_ich_priv *priv = pci_get_drvdata(dev);
u8 reg_save;
- pci_read_config_byte(dev, ACPICTRL, &reg_save);
- pci_write_config_byte(dev, ACPICTRL, reg_save | 0x10);
- lpc_ich_acpi_save = reg_save;
+ pci_read_config_byte(dev, priv->acpi.ctrl, &reg_save);
+ pci_write_config_byte(dev, priv->acpi.ctrl, reg_save | 0x10);
+ priv->acpi.save = reg_save;
}
static void lpc_ich_enable_gpio_space(struct pci_dev *dev)
{
+ struct lpc_ich_priv *priv = pci_get_drvdata(dev);
u8 reg_save;
- pci_read_config_byte(dev, GPIOCTRL, &reg_save);
- pci_write_config_byte(dev, GPIOCTRL, reg_save | 0x10);
- lpc_ich_gpio_save = reg_save;
+ pci_read_config_byte(dev, priv->gpio.ctrl, &reg_save);
+ pci_write_config_byte(dev, priv->gpio.ctrl, reg_save | 0x10);
+ priv->gpio.save = reg_save;
}
-static void lpc_ich_finalize_cell(struct mfd_cell *cell,
- const struct pci_device_id *id)
+static void lpc_ich_finalize_cell(struct pci_dev *dev, struct mfd_cell *cell)
{
- cell->platform_data = &lpc_chipset_info[id->driver_data];
+ struct lpc_ich_priv *priv = pci_get_drvdata(dev);
+
+ cell->platform_data = &lpc_chipset_info[priv->chipset];
cell->pdata_size = sizeof(struct lpc_ich_info);
}
@@ -721,9 +775,9 @@ static int lpc_ich_check_conflict_gpio(struct resource *res)
return use_gpio ? use_gpio : ret;
}
-static int lpc_ich_init_gpio(struct pci_dev *dev,
- const struct pci_device_id *id)
+static int lpc_ich_init_gpio(struct pci_dev *dev)
{
+ struct lpc_ich_priv *priv = pci_get_drvdata(dev);
u32 base_addr_cfg;
u32 base_addr;
int ret;
@@ -731,7 +785,7 @@ static int lpc_ich_init_gpio(struct pci_dev *dev,
struct resource *res;
/* Setup power management base register */
- pci_read_config_dword(dev, ACPIBASE, &base_addr_cfg);
+ pci_read_config_dword(dev, priv->acpi.base, &base_addr_cfg);
base_addr = base_addr_cfg & 0x0000ff80;
if (!base_addr) {
dev_notice(&dev->dev, "I/O space for ACPI uninitialized\n");
@@ -757,7 +811,7 @@ static int lpc_ich_init_gpio(struct pci_dev *dev,
gpe0_done:
/* Setup GPIO base register */
- pci_read_config_dword(dev, GPIOBASE, &base_addr_cfg);
+ pci_read_config_dword(dev, priv->gpio.base, &base_addr_cfg);
base_addr = base_addr_cfg & 0x0000ff80;
if (!base_addr) {
dev_notice(&dev->dev, "I/O space for GPIO uninitialized\n");
@@ -768,7 +822,7 @@ gpe0_done:
/* Older devices provide fewer GPIO and have a smaller resource size. */
res = &gpio_ich_res[ICH_RES_GPIO];
res->start = base_addr;
- switch (lpc_chipset_info[id->driver_data].gpio_version) {
+ switch (lpc_chipset_info[priv->chipset].gpio_version) {
case ICH_V5_GPIO:
case ICH_V10CORP_GPIO:
res->end = res->start + 128 - 1;
@@ -784,10 +838,10 @@ gpe0_done:
acpi_conflict = true;
goto gpio_done;
}
- lpc_chipset_info[id->driver_data].use_gpio = ret;
+ lpc_chipset_info[priv->chipset].use_gpio = ret;
lpc_ich_enable_gpio_space(dev);
- lpc_ich_finalize_cell(&lpc_ich_cells[LPC_GPIO], id);
+ lpc_ich_finalize_cell(dev, &lpc_ich_cells[LPC_GPIO]);
ret = mfd_add_devices(&dev->dev, -1, &lpc_ich_cells[LPC_GPIO],
1, NULL, 0, NULL);
@@ -798,16 +852,16 @@ gpio_done:
return ret;
}
-static int lpc_ich_init_wdt(struct pci_dev *dev,
- const struct pci_device_id *id)
+static int lpc_ich_init_wdt(struct pci_dev *dev)
{
+ struct lpc_ich_priv *priv = pci_get_drvdata(dev);
u32 base_addr_cfg;
u32 base_addr;
int ret;
struct resource *res;
/* Setup power management base register */
- pci_read_config_dword(dev, ACPIBASE, &base_addr_cfg);
+ pci_read_config_dword(dev, priv->acpi.base, &base_addr_cfg);
base_addr = base_addr_cfg & 0x0000ff80;
if (!base_addr) {
dev_notice(&dev->dev, "I/O space for ACPI uninitialized\n");
@@ -830,7 +884,7 @@ static int lpc_ich_init_wdt(struct pci_dev *dev,
* we have to read RCBA from PCI Config space 0xf0 and use
* it as base. GCS = RCBA + ICH6_GCS(0x3410).
*/
- if (lpc_chipset_info[id->driver_data].iTCO_version == 1) {
+ if (lpc_chipset_info[priv->chipset].iTCO_version == 1) {
/* Don't register iomem for TCO ver 1 */
lpc_ich_cells[LPC_WDT].num_resources--;
} else {
@@ -847,7 +901,7 @@ static int lpc_ich_init_wdt(struct pci_dev *dev,
res->end = base_addr + ACPIBASE_GCS_END;
}
- lpc_ich_finalize_cell(&lpc_ich_cells[LPC_WDT], id);
+ lpc_ich_finalize_cell(dev, &lpc_ich_cells[LPC_WDT]);
ret = mfd_add_devices(&dev->dev, -1, &lpc_ich_cells[LPC_WDT],
1, NULL, 0, NULL);
@@ -858,14 +912,36 @@ wdt_done:
static int lpc_ich_probe(struct pci_dev *dev,
const struct pci_device_id *id)
{
+ struct lpc_ich_priv *priv;
int ret;
bool cell_added = false;
- ret = lpc_ich_init_wdt(dev, id);
+ priv = devm_kzalloc(&dev->dev,
+ sizeof(struct lpc_ich_priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ priv->chipset = id->driver_data;
+ priv->acpi.save = -1;
+ priv->acpi.base = ACPIBASE;
+ priv->acpi.ctrl = ACPICTRL;
+
+ priv->gpio.save = -1;
+ if (priv->chipset <= LPC_ICH5) {
+ priv->gpio.base = GPIOBASE_ICH0;
+ priv->gpio.ctrl = GPIOCTRL_ICH0;
+ } else {
+ priv->gpio.base = GPIOBASE_ICH6;
+ priv->gpio.ctrl = GPIOCTRL_ICH6;
+ }
+
+ pci_set_drvdata(dev, priv);
+
+ ret = lpc_ich_init_wdt(dev);
if (!ret)
cell_added = true;
- ret = lpc_ich_init_gpio(dev, id);
+ ret = lpc_ich_init_gpio(dev);
if (!ret)
cell_added = true;
@@ -876,6 +952,7 @@ static int lpc_ich_probe(struct pci_dev *dev,
if (!cell_added) {
dev_warn(&dev->dev, "No MFD cells added\n");
lpc_ich_restore_config_space(dev);
+ pci_set_drvdata(dev, NULL);
return -ENODEV;
}
@@ -886,6 +963,7 @@ static void lpc_ich_remove(struct pci_dev *dev)
{
mfd_remove_devices(&dev->dev);
lpc_ich_restore_config_space(dev);
+ pci_set_drvdata(dev, NULL);
}
static struct pci_driver lpc_ich_driver = {
diff --git a/drivers/mfd/lpc_sch.c b/drivers/mfd/lpc_sch.c
index 5624fcbba69b..8cc6aac27cb2 100644
--- a/drivers/mfd/lpc_sch.c
+++ b/drivers/mfd/lpc_sch.c
@@ -45,34 +45,32 @@ static struct resource smbus_sch_resource = {
.flags = IORESOURCE_IO,
};
-
static struct resource gpio_sch_resource = {
.flags = IORESOURCE_IO,
};
-static struct mfd_cell lpc_sch_cells[] = {
- {
- .name = "isch_smbus",
- .num_resources = 1,
- .resources = &smbus_sch_resource,
- },
- {
- .name = "sch_gpio",
- .num_resources = 1,
- .resources = &gpio_sch_resource,
- },
-};
-
static struct resource wdt_sch_resource = {
.flags = IORESOURCE_IO,
};
-static struct mfd_cell tunnelcreek_cells[] = {
- {
- .name = "ie6xx_wdt",
- .num_resources = 1,
- .resources = &wdt_sch_resource,
- },
+static struct mfd_cell lpc_sch_cells[3];
+
+static struct mfd_cell isch_smbus_cell = {
+ .name = "isch_smbus",
+ .num_resources = 1,
+ .resources = &smbus_sch_resource,
+};
+
+static struct mfd_cell sch_gpio_cell = {
+ .name = "sch_gpio",
+ .num_resources = 1,
+ .resources = &gpio_sch_resource,
+};
+
+static struct mfd_cell wdt_sch_cell = {
+ .name = "ie6xx_wdt",
+ .num_resources = 1,
+ .resources = &wdt_sch_resource,
};
static DEFINE_PCI_DEVICE_TABLE(lpc_sch_ids) = {
@@ -88,79 +86,76 @@ static int lpc_sch_probe(struct pci_dev *dev,
{
unsigned int base_addr_cfg;
unsigned short base_addr;
- int i;
+ int i, cells = 0;
int ret;
pci_read_config_dword(dev, SMBASE, &base_addr_cfg);
- if (!(base_addr_cfg & (1 << 31))) {
- dev_err(&dev->dev, "Decode of the SMBus I/O range disabled\n");
- return -ENODEV;
- }
- base_addr = (unsigned short)base_addr_cfg;
- if (base_addr == 0) {
- dev_err(&dev->dev, "I/O space for SMBus uninitialized\n");
- return -ENODEV;
- }
-
- smbus_sch_resource.start = base_addr;
- smbus_sch_resource.end = base_addr + SMBUS_IO_SIZE - 1;
+ base_addr = 0;
+ if (!(base_addr_cfg & (1 << 31)))
+ dev_warn(&dev->dev, "Decode of the SMBus I/O range disabled\n");
+ else
+ base_addr = (unsigned short)base_addr_cfg;
- pci_read_config_dword(dev, GPIOBASE, &base_addr_cfg);
- if (!(base_addr_cfg & (1 << 31))) {
- dev_err(&dev->dev, "Decode of the GPIO I/O range disabled\n");
- return -ENODEV;
- }
- base_addr = (unsigned short)base_addr_cfg;
if (base_addr == 0) {
- dev_err(&dev->dev, "I/O space for GPIO uninitialized\n");
- return -ENODEV;
+ dev_warn(&dev->dev, "I/O space for SMBus uninitialized\n");
+ } else {
+ lpc_sch_cells[cells++] = isch_smbus_cell;
+ smbus_sch_resource.start = base_addr;
+ smbus_sch_resource.end = base_addr + SMBUS_IO_SIZE - 1;
}
- gpio_sch_resource.start = base_addr;
-
- if (id->device == PCI_DEVICE_ID_INTEL_CENTERTON_ILB)
- gpio_sch_resource.end = base_addr + GPIO_IO_SIZE_CENTERTON - 1;
+ pci_read_config_dword(dev, GPIOBASE, &base_addr_cfg);
+ base_addr = 0;
+ if (!(base_addr_cfg & (1 << 31)))
+ dev_warn(&dev->dev, "Decode of the GPIO I/O range disabled\n");
else
- gpio_sch_resource.end = base_addr + GPIO_IO_SIZE - 1;
-
- for (i=0; i < ARRAY_SIZE(lpc_sch_cells); i++)
- lpc_sch_cells[i].id = id->device;
+ base_addr = (unsigned short)base_addr_cfg;
- ret = mfd_add_devices(&dev->dev, 0,
- lpc_sch_cells, ARRAY_SIZE(lpc_sch_cells), NULL,
- 0, NULL);
- if (ret)
- goto out_dev;
+ if (base_addr == 0) {
+ dev_warn(&dev->dev, "I/O space for GPIO uninitialized\n");
+ } else {
+ lpc_sch_cells[cells++] = sch_gpio_cell;
+ gpio_sch_resource.start = base_addr;
+ if (id->device == PCI_DEVICE_ID_INTEL_CENTERTON_ILB)
+ gpio_sch_resource.end = base_addr + GPIO_IO_SIZE_CENTERTON - 1;
+ else
+ gpio_sch_resource.end = base_addr + GPIO_IO_SIZE - 1;
+ }
if (id->device == PCI_DEVICE_ID_INTEL_ITC_LPC
- || id->device == PCI_DEVICE_ID_INTEL_CENTERTON_ILB) {
+ || id->device == PCI_DEVICE_ID_INTEL_CENTERTON_ILB) {
pci_read_config_dword(dev, WDTBASE, &base_addr_cfg);
- if (!(base_addr_cfg & (1 << 31))) {
- dev_err(&dev->dev, "Decode of the WDT I/O range disabled\n");
- ret = -ENODEV;
- goto out_dev;
+ base_addr = 0;
+ if (!(base_addr_cfg & (1 << 31)))
+ dev_warn(&dev->dev, "Decode of the WDT I/O range disabled\n");
+ else
+ base_addr = (unsigned short)base_addr_cfg;
+ if (base_addr == 0)
+ dev_warn(&dev->dev, "I/O space for WDT uninitialized\n");
+ else {
+ lpc_sch_cells[cells++] = wdt_sch_cell;
+ wdt_sch_resource.start = base_addr;
+ wdt_sch_resource.end = base_addr + WDT_IO_SIZE - 1;
}
- base_addr = (unsigned short)base_addr_cfg;
- if (base_addr == 0) {
- dev_err(&dev->dev, "I/O space for WDT uninitialized\n");
- ret = -ENODEV;
- goto out_dev;
- }
-
- wdt_sch_resource.start = base_addr;
- wdt_sch_resource.end = base_addr + WDT_IO_SIZE - 1;
+ }
- for (i = 0; i < ARRAY_SIZE(tunnelcreek_cells); i++)
- tunnelcreek_cells[i].id = id->device;
+ if (WARN_ON(cells > ARRAY_SIZE(lpc_sch_cells))) {
+ dev_err(&dev->dev, "Cell count exceeds array size");
+ return -ENODEV;
+ }
- ret = mfd_add_devices(&dev->dev, 0, tunnelcreek_cells,
- ARRAY_SIZE(tunnelcreek_cells), NULL,
- 0, NULL);
+ if (cells == 0) {
+ dev_err(&dev->dev, "All decode registers disabled.\n");
+ return -ENODEV;
}
- return ret;
-out_dev:
- mfd_remove_devices(&dev->dev);
+ for (i = 0; i < cells; i++)
+ lpc_sch_cells[i].id = id->device;
+
+ ret = mfd_add_devices(&dev->dev, 0, lpc_sch_cells, cells, NULL, 0, NULL);
+ if (ret)
+ mfd_remove_devices(&dev->dev);
+
return ret;
}
diff --git a/drivers/mfd/max8925-core.c b/drivers/mfd/max8925-core.c
index e32466e865b9..f0cc40296d8c 100644
--- a/drivers/mfd/max8925-core.c
+++ b/drivers/mfd/max8925-core.c
@@ -14,10 +14,13 @@
#include <linux/i2c.h>
#include <linux/irq.h>
#include <linux/interrupt.h>
+#include <linux/irqdomain.h>
#include <linux/platform_device.h>
#include <linux/regulator/machine.h>
#include <linux/mfd/core.h>
#include <linux/mfd/max8925.h>
+#include <linux/of.h>
+#include <linux/of_platform.h>
static struct resource bk_resources[] = {
{ 0x84, 0x84, "mode control", IORESOURCE_REG, },
@@ -639,17 +642,33 @@ static struct irq_chip max8925_irq_chip = {
.irq_disable = max8925_irq_disable,
};
+static int max8925_irq_domain_map(struct irq_domain *d, unsigned int virq,
+ irq_hw_number_t hw)
+{
+ irq_set_chip_data(virq, d->host_data);
+ irq_set_chip_and_handler(virq, &max8925_irq_chip, handle_edge_irq);
+ irq_set_nested_thread(virq, 1);
+#ifdef CONFIG_ARM
+ set_irq_flags(virq, IRQF_VALID);
+#else
+ irq_set_noprobe(virq);
+#endif
+ return 0;
+}
+
+static struct irq_domain_ops max8925_irq_domain_ops = {
+ .map = max8925_irq_domain_map,
+ .xlate = irq_domain_xlate_onetwocell,
+};
+
+
static int max8925_irq_init(struct max8925_chip *chip, int irq,
struct max8925_platform_data *pdata)
{
unsigned long flags = IRQF_TRIGGER_FALLING | IRQF_ONESHOT;
- int i, ret;
- int __irq;
+ int ret;
+ struct device_node *node = chip->dev->of_node;
- if (!pdata || !pdata->irq_base) {
- dev_warn(chip->dev, "No interrupt support on IRQ base\n");
- return -EINVAL;
- }
/* clear all interrupts */
max8925_reg_read(chip->i2c, MAX8925_CHG_IRQ1);
max8925_reg_read(chip->i2c, MAX8925_CHG_IRQ2);
@@ -667,35 +686,30 @@ static int max8925_irq_init(struct max8925_chip *chip, int irq,
max8925_reg_write(chip->rtc, MAX8925_RTC_IRQ_MASK, 0xff);
mutex_init(&chip->irq_lock);
- chip->core_irq = irq;
- chip->irq_base = pdata->irq_base;
-
- /* register with genirq */
- for (i = 0; i < ARRAY_SIZE(max8925_irqs); i++) {
- __irq = i + chip->irq_base;
- irq_set_chip_data(__irq, chip);
- irq_set_chip_and_handler(__irq, &max8925_irq_chip,
- handle_edge_irq);
- irq_set_nested_thread(__irq, 1);
-#ifdef CONFIG_ARM
- set_irq_flags(__irq, IRQF_VALID);
-#else
- irq_set_noprobe(__irq);
-#endif
- }
- if (!irq) {
- dev_warn(chip->dev, "No interrupt support on core IRQ\n");
- goto tsc_irq;
+ chip->irq_base = irq_alloc_descs(-1, 0, MAX8925_NR_IRQS, 0);
+ if (chip->irq_base < 0) {
+ dev_err(chip->dev, "Failed to allocate interrupts, ret:%d\n",
+ chip->irq_base);
+ return -EBUSY;
}
+ irq_domain_add_legacy(node, MAX8925_NR_IRQS, chip->irq_base, 0,
+ &max8925_irq_domain_ops, chip);
+
+ /* request irq handler for pmic main irq*/
+ chip->core_irq = irq;
+ if (!chip->core_irq)
+ return -EBUSY;
ret = request_threaded_irq(irq, NULL, max8925_irq, flags | IRQF_ONESHOT,
"max8925", chip);
if (ret) {
dev_err(chip->dev, "Failed to request core IRQ: %d\n", ret);
chip->core_irq = 0;
+ return -EBUSY;
}
-tsc_irq:
+ /* request irq handler for pmic tsc irq*/
+
/* mask TSC interrupt */
max8925_reg_write(chip->adc, MAX8925_TSC_IRQ_MASK, 0x0f);
@@ -704,7 +718,6 @@ tsc_irq:
return 0;
}
chip->tsc_irq = pdata->tsc_irq;
-
ret = request_threaded_irq(chip->tsc_irq, NULL, max8925_tsc_irq,
flags | IRQF_ONESHOT, "max8925-tsc", chip);
if (ret) {
@@ -846,7 +859,7 @@ int max8925_device_init(struct max8925_chip *chip,
ret = mfd_add_devices(chip->dev, 0, &rtc_devs[0],
ARRAY_SIZE(rtc_devs),
- &rtc_resources[0], chip->irq_base, NULL);
+ NULL, chip->irq_base, NULL);
if (ret < 0) {
dev_err(chip->dev, "Failed to add rtc subdev\n");
goto out;
@@ -854,7 +867,7 @@ int max8925_device_init(struct max8925_chip *chip,
ret = mfd_add_devices(chip->dev, 0, &onkey_devs[0],
ARRAY_SIZE(onkey_devs),
- &onkey_resources[0], 0, NULL);
+ NULL, chip->irq_base, NULL);
if (ret < 0) {
dev_err(chip->dev, "Failed to add onkey subdev\n");
goto out_dev;
@@ -873,21 +886,19 @@ int max8925_device_init(struct max8925_chip *chip,
goto out_dev;
}
- if (pdata && pdata->power) {
- ret = mfd_add_devices(chip->dev, 0, &power_devs[0],
- ARRAY_SIZE(power_devs),
- &power_supply_resources[0], 0, NULL);
- if (ret < 0) {
- dev_err(chip->dev, "Failed to add power supply "
- "subdev\n");
- goto out_dev;
- }
+ ret = mfd_add_devices(chip->dev, 0, &power_devs[0],
+ ARRAY_SIZE(power_devs),
+ NULL, 0, NULL);
+ if (ret < 0) {
+ dev_err(chip->dev,
+ "Failed to add power supply subdev, err = %d\n", ret);
+ goto out_dev;
}
if (pdata && pdata->touch) {
ret = mfd_add_devices(chip->dev, 0, &touch_devs[0],
ARRAY_SIZE(touch_devs),
- &touch_resources[0], 0, NULL);
+ NULL, chip->tsc_irq, NULL);
if (ret < 0) {
dev_err(chip->dev, "Failed to add touch subdev\n");
goto out_dev;
diff --git a/drivers/mfd/max8925-i2c.c b/drivers/mfd/max8925-i2c.c
index 00b5b456063d..92bbebd31598 100644
--- a/drivers/mfd/max8925-i2c.c
+++ b/drivers/mfd/max8925-i2c.c
@@ -135,13 +135,37 @@ static const struct i2c_device_id max8925_id_table[] = {
};
MODULE_DEVICE_TABLE(i2c, max8925_id_table);
+static int max8925_dt_init(struct device_node *np, struct device *dev,
+ struct max8925_platform_data *pdata)
+{
+ int ret;
+
+ ret = of_property_read_u32(np, "maxim,tsc-irq", &pdata->tsc_irq);
+ if (ret) {
+ dev_err(dev, "Not found maxim,tsc-irq property\n");
+ return -EINVAL;
+ }
+ return 0;
+}
+
static int max8925_probe(struct i2c_client *client,
const struct i2c_device_id *id)
{
struct max8925_platform_data *pdata = client->dev.platform_data;
static struct max8925_chip *chip;
-
- if (!pdata) {
+ struct device_node *node = client->dev.of_node;
+
+ if (node && !pdata) {
+ /* parse DT to get platform data */
+ pdata = devm_kzalloc(&client->dev,
+ sizeof(struct max8925_platform_data),
+ GFP_KERNEL);
+ if (!pdata)
+ return -ENOMEM;
+
+ if (max8925_dt_init(node, &client->dev, pdata))
+ return -EINVAL;
+ } else if (!pdata) {
pr_info("%s: platform data is missing\n", __func__);
return -EINVAL;
}
@@ -203,11 +227,18 @@ static int max8925_resume(struct device *dev)
static SIMPLE_DEV_PM_OPS(max8925_pm_ops, max8925_suspend, max8925_resume);
+static const struct of_device_id max8925_dt_ids[] = {
+ { .compatible = "maxim,max8925", },
+ {},
+};
+MODULE_DEVICE_TABLE(of, max8925_dt_ids);
+
static struct i2c_driver max8925_driver = {
.driver = {
.name = "max8925",
.owner = THIS_MODULE,
.pm = &max8925_pm_ops,
+ .of_match_table = of_match_ptr(max8925_dt_ids),
},
.probe = max8925_probe,
.remove = max8925_remove,
@@ -217,7 +248,6 @@ static struct i2c_driver max8925_driver = {
static int __init max8925_i2c_init(void)
{
int ret;
-
ret = i2c_add_driver(&max8925_driver);
if (ret != 0)
pr_err("Failed to register MAX8925 I2C driver: %d\n", ret);
diff --git a/drivers/mfd/omap-usb-host.c b/drivers/mfd/omap-usb-host.c
index 05164d7f054b..6b5edf64de2b 100644
--- a/drivers/mfd/omap-usb-host.c
+++ b/drivers/mfd/omap-usb-host.c
@@ -23,7 +23,6 @@
#include <linux/delay.h>
#include <linux/clk.h>
#include <linux/dma-mapping.h>
-#include <linux/spinlock.h>
#include <linux/gpio.h>
#include <linux/platform_device.h>
#include <linux/platform_data/usb-omap.h>
@@ -91,21 +90,23 @@
struct usbhs_hcd_omap {
+ int nports;
+ struct clk **utmi_clk;
+ struct clk **hsic60m_clk;
+ struct clk **hsic480m_clk;
+
struct clk *xclk60mhsp1_ck;
struct clk *xclk60mhsp2_ck;
- struct clk *utmi_p1_fck;
- struct clk *usbhost_p1_fck;
- struct clk *utmi_p2_fck;
- struct clk *usbhost_p2_fck;
+ struct clk *utmi_p1_gfclk;
+ struct clk *utmi_p2_gfclk;
struct clk *init_60m_fclk;
struct clk *ehci_logic_fck;
void __iomem *uhh_base;
- struct usbhs_omap_platform_data platdata;
+ struct usbhs_omap_platform_data *pdata;
u32 usbhs_rev;
- spinlock_t lock;
};
/*-------------------------------------------------------------------------*/
@@ -184,19 +185,13 @@ err_end:
static int omap_usbhs_alloc_children(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
- struct usbhs_hcd_omap *omap;
- struct ehci_hcd_omap_platform_data *ehci_data;
- struct ohci_hcd_omap_platform_data *ohci_data;
+ struct usbhs_omap_platform_data *pdata = dev->platform_data;
struct platform_device *ehci;
struct platform_device *ohci;
struct resource *res;
struct resource resources[2];
int ret;
- omap = platform_get_drvdata(pdev);
- ehci_data = omap->platdata.ehci_data;
- ohci_data = omap->platdata.ohci_data;
-
res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "ehci");
if (!res) {
dev_err(dev, "EHCI get resource IORESOURCE_MEM failed\n");
@@ -213,8 +208,8 @@ static int omap_usbhs_alloc_children(struct platform_device *pdev)
}
resources[1] = *res;
- ehci = omap_usbhs_alloc_child(OMAP_EHCI_DEVICE, resources, 2, ehci_data,
- sizeof(*ehci_data), dev);
+ ehci = omap_usbhs_alloc_child(OMAP_EHCI_DEVICE, resources, 2, pdata,
+ sizeof(*pdata), dev);
if (!ehci) {
dev_err(dev, "omap_usbhs_alloc_child failed\n");
@@ -238,8 +233,8 @@ static int omap_usbhs_alloc_children(struct platform_device *pdev)
}
resources[1] = *res;
- ohci = omap_usbhs_alloc_child(OMAP_OHCI_DEVICE, resources, 2, ohci_data,
- sizeof(*ohci_data), dev);
+ ohci = omap_usbhs_alloc_child(OMAP_OHCI_DEVICE, resources, 2, pdata,
+ sizeof(*pdata), dev);
if (!ohci) {
dev_err(dev, "omap_usbhs_alloc_child failed\n");
ret = -ENOMEM;
@@ -278,31 +273,52 @@ static bool is_ohci_port(enum usbhs_omap_port_mode pmode)
static int usbhs_runtime_resume(struct device *dev)
{
struct usbhs_hcd_omap *omap = dev_get_drvdata(dev);
- struct usbhs_omap_platform_data *pdata = &omap->platdata;
- unsigned long flags;
+ struct usbhs_omap_platform_data *pdata = omap->pdata;
+ int i, r;
dev_dbg(dev, "usbhs_runtime_resume\n");
- if (!pdata) {
- dev_dbg(dev, "missing platform_data\n");
- return -ENODEV;
- }
-
omap_tll_enable();
- spin_lock_irqsave(&omap->lock, flags);
- if (omap->ehci_logic_fck && !IS_ERR(omap->ehci_logic_fck))
+ if (!IS_ERR(omap->ehci_logic_fck))
clk_enable(omap->ehci_logic_fck);
- if (is_ehci_tll_mode(pdata->port_mode[0]))
- clk_enable(omap->usbhost_p1_fck);
- if (is_ehci_tll_mode(pdata->port_mode[1]))
- clk_enable(omap->usbhost_p2_fck);
-
- clk_enable(omap->utmi_p1_fck);
- clk_enable(omap->utmi_p2_fck);
+ for (i = 0; i < omap->nports; i++) {
+ switch (pdata->port_mode[i]) {
+ case OMAP_EHCI_PORT_MODE_HSIC:
+ if (!IS_ERR(omap->hsic60m_clk[i])) {
+ r = clk_enable(omap->hsic60m_clk[i]);
+ if (r) {
+ dev_err(dev,
+ "Can't enable port %d hsic60m clk:%d\n",
+ i, r);
+ }
+ }
- spin_unlock_irqrestore(&omap->lock, flags);
+ if (!IS_ERR(omap->hsic480m_clk[i])) {
+ r = clk_enable(omap->hsic480m_clk[i]);
+ if (r) {
+ dev_err(dev,
+ "Can't enable port %d hsic480m clk:%d\n",
+ i, r);
+ }
+ }
+ /* Fall through as HSIC mode needs utmi_clk */
+
+ case OMAP_EHCI_PORT_MODE_TLL:
+ if (!IS_ERR(omap->utmi_clk[i])) {
+ r = clk_enable(omap->utmi_clk[i]);
+ if (r) {
+ dev_err(dev,
+ "Can't enable port %d clk : %d\n",
+ i, r);
+ }
+ }
+ break;
+ default:
+ break;
+ }
+ }
return 0;
}
@@ -310,51 +326,122 @@ static int usbhs_runtime_resume(struct device *dev)
static int usbhs_runtime_suspend(struct device *dev)
{
struct usbhs_hcd_omap *omap = dev_get_drvdata(dev);
- struct usbhs_omap_platform_data *pdata = &omap->platdata;
- unsigned long flags;
+ struct usbhs_omap_platform_data *pdata = omap->pdata;
+ int i;
dev_dbg(dev, "usbhs_runtime_suspend\n");
- if (!pdata) {
- dev_dbg(dev, "missing platform_data\n");
- return -ENODEV;
- }
-
- spin_lock_irqsave(&omap->lock, flags);
+ for (i = 0; i < omap->nports; i++) {
+ switch (pdata->port_mode[i]) {
+ case OMAP_EHCI_PORT_MODE_HSIC:
+ if (!IS_ERR(omap->hsic60m_clk[i]))
+ clk_disable(omap->hsic60m_clk[i]);
- if (is_ehci_tll_mode(pdata->port_mode[0]))
- clk_disable(omap->usbhost_p1_fck);
- if (is_ehci_tll_mode(pdata->port_mode[1]))
- clk_disable(omap->usbhost_p2_fck);
+ if (!IS_ERR(omap->hsic480m_clk[i]))
+ clk_disable(omap->hsic480m_clk[i]);
+ /* Fall through as utmi_clks were used in HSIC mode */
- clk_disable(omap->utmi_p2_fck);
- clk_disable(omap->utmi_p1_fck);
+ case OMAP_EHCI_PORT_MODE_TLL:
+ if (!IS_ERR(omap->utmi_clk[i]))
+ clk_disable(omap->utmi_clk[i]);
+ break;
+ default:
+ break;
+ }
+ }
- if (omap->ehci_logic_fck && !IS_ERR(omap->ehci_logic_fck))
+ if (!IS_ERR(omap->ehci_logic_fck))
clk_disable(omap->ehci_logic_fck);
- spin_unlock_irqrestore(&omap->lock, flags);
omap_tll_disable();
return 0;
}
+static unsigned omap_usbhs_rev1_hostconfig(struct usbhs_hcd_omap *omap,
+ unsigned reg)
+{
+ struct usbhs_omap_platform_data *pdata = omap->pdata;
+ int i;
+
+ for (i = 0; i < omap->nports; i++) {
+ switch (pdata->port_mode[i]) {
+ case OMAP_USBHS_PORT_MODE_UNUSED:
+ reg &= ~(OMAP_UHH_HOSTCONFIG_P1_CONNECT_STATUS << i);
+ break;
+ case OMAP_EHCI_PORT_MODE_PHY:
+ if (pdata->single_ulpi_bypass)
+ break;
+
+ if (i == 0)
+ reg &= ~OMAP_UHH_HOSTCONFIG_ULPI_P1_BYPASS;
+ else
+ reg &= ~(OMAP_UHH_HOSTCONFIG_ULPI_P2_BYPASS
+ << (i-1));
+ break;
+ default:
+ if (pdata->single_ulpi_bypass)
+ break;
+
+ if (i == 0)
+ reg |= OMAP_UHH_HOSTCONFIG_ULPI_P1_BYPASS;
+ else
+ reg |= OMAP_UHH_HOSTCONFIG_ULPI_P2_BYPASS
+ << (i-1);
+ break;
+ }
+ }
+
+ if (pdata->single_ulpi_bypass) {
+ /* bypass ULPI only if none of the ports use PHY mode */
+ reg |= OMAP_UHH_HOSTCONFIG_ULPI_BYPASS;
+
+ for (i = 0; i < omap->nports; i++) {
+ if (is_ehci_phy_mode(pdata->port_mode[i])) {
+ reg &= OMAP_UHH_HOSTCONFIG_ULPI_BYPASS;
+ break;
+ }
+ }
+ }
+
+ return reg;
+}
+
+static unsigned omap_usbhs_rev2_hostconfig(struct usbhs_hcd_omap *omap,
+ unsigned reg)
+{
+ struct usbhs_omap_platform_data *pdata = omap->pdata;
+ int i;
+
+ for (i = 0; i < omap->nports; i++) {
+ /* Clear port mode fields for PHY mode */
+ reg &= ~(OMAP4_P1_MODE_CLEAR << 2 * i);
+
+ if (is_ehci_tll_mode(pdata->port_mode[i]) ||
+ (is_ohci_port(pdata->port_mode[i])))
+ reg |= OMAP4_P1_MODE_TLL << 2 * i;
+ else if (is_ehci_hsic_mode(pdata->port_mode[i]))
+ reg |= OMAP4_P1_MODE_HSIC << 2 * i;
+ }
+
+ return reg;
+}
+
static void omap_usbhs_init(struct device *dev)
{
struct usbhs_hcd_omap *omap = dev_get_drvdata(dev);
- struct usbhs_omap_platform_data *pdata = &omap->platdata;
- unsigned long flags;
+ struct usbhs_omap_platform_data *pdata = omap->pdata;
unsigned reg;
dev_dbg(dev, "starting TI HSUSB Controller\n");
- if (pdata->ehci_data->phy_reset) {
- if (gpio_is_valid(pdata->ehci_data->reset_gpio_port[0]))
- gpio_request_one(pdata->ehci_data->reset_gpio_port[0],
+ if (pdata->phy_reset) {
+ if (gpio_is_valid(pdata->reset_gpio_port[0]))
+ gpio_request_one(pdata->reset_gpio_port[0],
GPIOF_OUT_INIT_LOW, "USB1 PHY reset");
- if (gpio_is_valid(pdata->ehci_data->reset_gpio_port[1]))
- gpio_request_one(pdata->ehci_data->reset_gpio_port[1],
+ if (gpio_is_valid(pdata->reset_gpio_port[1]))
+ gpio_request_one(pdata->reset_gpio_port[1],
GPIOF_OUT_INIT_LOW, "USB2 PHY reset");
/* Hold the PHY in RESET for enough time till DIR is high */
@@ -362,9 +449,6 @@ static void omap_usbhs_init(struct device *dev)
}
pm_runtime_get_sync(dev);
- spin_lock_irqsave(&omap->lock, flags);
- omap->usbhs_rev = usbhs_read(omap->uhh_base, OMAP_UHH_REVISION);
- dev_dbg(dev, "OMAP UHH_REVISION 0x%x\n", omap->usbhs_rev);
reg = usbhs_read(omap->uhh_base, OMAP_UHH_HOSTCONFIG);
/* setup ULPI bypass and burst configurations */
@@ -374,89 +458,51 @@ static void omap_usbhs_init(struct device *dev)
reg |= OMAP4_UHH_HOSTCONFIG_APP_START_CLK;
reg &= ~OMAP_UHH_HOSTCONFIG_INCRX_ALIGN_EN;
- if (is_omap_usbhs_rev1(omap)) {
- if (pdata->port_mode[0] == OMAP_USBHS_PORT_MODE_UNUSED)
- reg &= ~OMAP_UHH_HOSTCONFIG_P1_CONNECT_STATUS;
- if (pdata->port_mode[1] == OMAP_USBHS_PORT_MODE_UNUSED)
- reg &= ~OMAP_UHH_HOSTCONFIG_P2_CONNECT_STATUS;
- if (pdata->port_mode[2] == OMAP_USBHS_PORT_MODE_UNUSED)
- reg &= ~OMAP_UHH_HOSTCONFIG_P3_CONNECT_STATUS;
-
- /* Bypass the TLL module for PHY mode operation */
- if (pdata->single_ulpi_bypass) {
- dev_dbg(dev, "OMAP3 ES version <= ES2.1\n");
- if (is_ehci_phy_mode(pdata->port_mode[0]) ||
- is_ehci_phy_mode(pdata->port_mode[1]) ||
- is_ehci_phy_mode(pdata->port_mode[2]))
- reg &= ~OMAP_UHH_HOSTCONFIG_ULPI_BYPASS;
- else
- reg |= OMAP_UHH_HOSTCONFIG_ULPI_BYPASS;
- } else {
- dev_dbg(dev, "OMAP3 ES version > ES2.1\n");
- if (is_ehci_phy_mode(pdata->port_mode[0]))
- reg &= ~OMAP_UHH_HOSTCONFIG_ULPI_P1_BYPASS;
- else
- reg |= OMAP_UHH_HOSTCONFIG_ULPI_P1_BYPASS;
- if (is_ehci_phy_mode(pdata->port_mode[1]))
- reg &= ~OMAP_UHH_HOSTCONFIG_ULPI_P2_BYPASS;
- else
- reg |= OMAP_UHH_HOSTCONFIG_ULPI_P2_BYPASS;
- if (is_ehci_phy_mode(pdata->port_mode[2]))
- reg &= ~OMAP_UHH_HOSTCONFIG_ULPI_P3_BYPASS;
- else
- reg |= OMAP_UHH_HOSTCONFIG_ULPI_P3_BYPASS;
- }
- } else if (is_omap_usbhs_rev2(omap)) {
- /* Clear port mode fields for PHY mode*/
- reg &= ~OMAP4_P1_MODE_CLEAR;
- reg &= ~OMAP4_P2_MODE_CLEAR;
+ switch (omap->usbhs_rev) {
+ case OMAP_USBHS_REV1:
+ omap_usbhs_rev1_hostconfig(omap, reg);
+ break;
- if (is_ehci_tll_mode(pdata->port_mode[0]) ||
- (is_ohci_port(pdata->port_mode[0])))
- reg |= OMAP4_P1_MODE_TLL;
- else if (is_ehci_hsic_mode(pdata->port_mode[0]))
- reg |= OMAP4_P1_MODE_HSIC;
+ case OMAP_USBHS_REV2:
+ omap_usbhs_rev2_hostconfig(omap, reg);
+ break;
- if (is_ehci_tll_mode(pdata->port_mode[1]) ||
- (is_ohci_port(pdata->port_mode[1])))
- reg |= OMAP4_P2_MODE_TLL;
- else if (is_ehci_hsic_mode(pdata->port_mode[1]))
- reg |= OMAP4_P2_MODE_HSIC;
+ default: /* newer revisions */
+ omap_usbhs_rev2_hostconfig(omap, reg);
+ break;
}
usbhs_write(omap->uhh_base, OMAP_UHH_HOSTCONFIG, reg);
dev_dbg(dev, "UHH setup done, uhh_hostconfig=%x\n", reg);
- spin_unlock_irqrestore(&omap->lock, flags);
-
pm_runtime_put_sync(dev);
- if (pdata->ehci_data->phy_reset) {
+ if (pdata->phy_reset) {
/* Hold the PHY in RESET for enough time till
* PHY is settled and ready
*/
udelay(10);
- if (gpio_is_valid(pdata->ehci_data->reset_gpio_port[0]))
+ if (gpio_is_valid(pdata->reset_gpio_port[0]))
gpio_set_value_cansleep
- (pdata->ehci_data->reset_gpio_port[0], 1);
+ (pdata->reset_gpio_port[0], 1);
- if (gpio_is_valid(pdata->ehci_data->reset_gpio_port[1]))
+ if (gpio_is_valid(pdata->reset_gpio_port[1]))
gpio_set_value_cansleep
- (pdata->ehci_data->reset_gpio_port[1], 1);
+ (pdata->reset_gpio_port[1], 1);
}
}
static void omap_usbhs_deinit(struct device *dev)
{
struct usbhs_hcd_omap *omap = dev_get_drvdata(dev);
- struct usbhs_omap_platform_data *pdata = &omap->platdata;
+ struct usbhs_omap_platform_data *pdata = omap->pdata;
- if (pdata->ehci_data->phy_reset) {
- if (gpio_is_valid(pdata->ehci_data->reset_gpio_port[0]))
- gpio_free(pdata->ehci_data->reset_gpio_port[0]);
+ if (pdata->phy_reset) {
+ if (gpio_is_valid(pdata->reset_gpio_port[0]))
+ gpio_free(pdata->reset_gpio_port[0]);
- if (gpio_is_valid(pdata->ehci_data->reset_gpio_port[1]))
- gpio_free(pdata->ehci_data->reset_gpio_port[1]);
+ if (gpio_is_valid(pdata->reset_gpio_port[1]))
+ gpio_free(pdata->reset_gpio_port[1]);
}
}
@@ -474,137 +520,185 @@ static int usbhs_omap_probe(struct platform_device *pdev)
struct resource *res;
int ret = 0;
int i;
+ bool need_logic_fck;
if (!pdata) {
dev_err(dev, "Missing platform data\n");
- ret = -ENOMEM;
- goto end_probe;
+ return -ENODEV;
}
- omap = kzalloc(sizeof(*omap), GFP_KERNEL);
+ omap = devm_kzalloc(dev, sizeof(*omap), GFP_KERNEL);
if (!omap) {
dev_err(dev, "Memory allocation failed\n");
- ret = -ENOMEM;
- goto end_probe;
+ return -ENOMEM;
}
- spin_lock_init(&omap->lock);
-
- for (i = 0; i < OMAP3_HS_USB_PORTS; i++)
- omap->platdata.port_mode[i] = pdata->port_mode[i];
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "uhh");
+ omap->uhh_base = devm_request_and_ioremap(dev, res);
+ if (!omap->uhh_base) {
+ dev_err(dev, "Resource request/ioremap failed\n");
+ return -EADDRNOTAVAIL;
+ }
- omap->platdata.ehci_data = pdata->ehci_data;
- omap->platdata.ohci_data = pdata->ohci_data;
+ omap->pdata = pdata;
pm_runtime_enable(dev);
+ platform_set_drvdata(pdev, omap);
+ pm_runtime_get_sync(dev);
+
+ omap->usbhs_rev = usbhs_read(omap->uhh_base, OMAP_UHH_REVISION);
- for (i = 0; i < OMAP3_HS_USB_PORTS; i++)
- if (is_ehci_phy_mode(i) || is_ehci_tll_mode(i) ||
- is_ehci_hsic_mode(i)) {
- omap->ehci_logic_fck = clk_get(dev, "ehci_logic_fck");
- if (IS_ERR(omap->ehci_logic_fck)) {
- ret = PTR_ERR(omap->ehci_logic_fck);
- dev_warn(dev, "ehci_logic_fck failed:%d\n",
- ret);
- }
+ /* we need to call runtime suspend before we update omap->nports
+ * to prevent unbalanced clk_disable()
+ */
+ pm_runtime_put_sync(dev);
+
+ /*
+ * If platform data contains nports then use that
+ * else make out number of ports from USBHS revision
+ */
+ if (pdata->nports) {
+ omap->nports = pdata->nports;
+ } else {
+ switch (omap->usbhs_rev) {
+ case OMAP_USBHS_REV1:
+ omap->nports = 3;
+ break;
+ case OMAP_USBHS_REV2:
+ omap->nports = 2;
+ break;
+ default:
+ omap->nports = OMAP3_HS_USB_PORTS;
+ dev_dbg(dev,
+ "USB HOST Rev:0x%d not recognized, assuming %d ports\n",
+ omap->usbhs_rev, omap->nports);
break;
}
+ }
- omap->utmi_p1_fck = clk_get(dev, "utmi_p1_gfclk");
- if (IS_ERR(omap->utmi_p1_fck)) {
- ret = PTR_ERR(omap->utmi_p1_fck);
- dev_err(dev, "utmi_p1_gfclk failed error:%d\n", ret);
- goto err_end;
+ i = sizeof(struct clk *) * omap->nports;
+ omap->utmi_clk = devm_kzalloc(dev, i, GFP_KERNEL);
+ omap->hsic480m_clk = devm_kzalloc(dev, i, GFP_KERNEL);
+ omap->hsic60m_clk = devm_kzalloc(dev, i, GFP_KERNEL);
+
+ if (!omap->utmi_clk || !omap->hsic480m_clk || !omap->hsic60m_clk) {
+ dev_err(dev, "Memory allocation failed\n");
+ ret = -ENOMEM;
+ goto err_mem;
+ }
+
+ need_logic_fck = false;
+ for (i = 0; i < omap->nports; i++) {
+ if (is_ehci_phy_mode(i) || is_ehci_tll_mode(i) ||
+ is_ehci_hsic_mode(i))
+ need_logic_fck |= true;
+ }
+
+ omap->ehci_logic_fck = ERR_PTR(-EINVAL);
+ if (need_logic_fck) {
+ omap->ehci_logic_fck = clk_get(dev, "ehci_logic_fck");
+ if (IS_ERR(omap->ehci_logic_fck)) {
+ ret = PTR_ERR(omap->ehci_logic_fck);
+ dev_dbg(dev, "ehci_logic_fck failed:%d\n", ret);
+ }
+ }
+
+ omap->utmi_p1_gfclk = clk_get(dev, "utmi_p1_gfclk");
+ if (IS_ERR(omap->utmi_p1_gfclk)) {
+ ret = PTR_ERR(omap->utmi_p1_gfclk);
+ dev_err(dev, "utmi_p1_gfclk failed error:%d\n", ret);
+ goto err_p1_gfclk;
+ }
+
+ omap->utmi_p2_gfclk = clk_get(dev, "utmi_p2_gfclk");
+ if (IS_ERR(omap->utmi_p2_gfclk)) {
+ ret = PTR_ERR(omap->utmi_p2_gfclk);
+ dev_err(dev, "utmi_p2_gfclk failed error:%d\n", ret);
+ goto err_p2_gfclk;
}
omap->xclk60mhsp1_ck = clk_get(dev, "xclk60mhsp1_ck");
if (IS_ERR(omap->xclk60mhsp1_ck)) {
ret = PTR_ERR(omap->xclk60mhsp1_ck);
dev_err(dev, "xclk60mhsp1_ck failed error:%d\n", ret);
- goto err_utmi_p1_fck;
- }
-
- omap->utmi_p2_fck = clk_get(dev, "utmi_p2_gfclk");
- if (IS_ERR(omap->utmi_p2_fck)) {
- ret = PTR_ERR(omap->utmi_p2_fck);
- dev_err(dev, "utmi_p2_gfclk failed error:%d\n", ret);
- goto err_xclk60mhsp1_ck;
+ goto err_xclk60mhsp1;
}
omap->xclk60mhsp2_ck = clk_get(dev, "xclk60mhsp2_ck");
if (IS_ERR(omap->xclk60mhsp2_ck)) {
ret = PTR_ERR(omap->xclk60mhsp2_ck);
dev_err(dev, "xclk60mhsp2_ck failed error:%d\n", ret);
- goto err_utmi_p2_fck;
- }
-
- omap->usbhost_p1_fck = clk_get(dev, "usb_host_hs_utmi_p1_clk");
- if (IS_ERR(omap->usbhost_p1_fck)) {
- ret = PTR_ERR(omap->usbhost_p1_fck);
- dev_err(dev, "usbhost_p1_fck failed error:%d\n", ret);
- goto err_xclk60mhsp2_ck;
- }
-
- omap->usbhost_p2_fck = clk_get(dev, "usb_host_hs_utmi_p2_clk");
- if (IS_ERR(omap->usbhost_p2_fck)) {
- ret = PTR_ERR(omap->usbhost_p2_fck);
- dev_err(dev, "usbhost_p2_fck failed error:%d\n", ret);
- goto err_usbhost_p1_fck;
+ goto err_xclk60mhsp2;
}
omap->init_60m_fclk = clk_get(dev, "init_60m_fclk");
if (IS_ERR(omap->init_60m_fclk)) {
ret = PTR_ERR(omap->init_60m_fclk);
dev_err(dev, "init_60m_fclk failed error:%d\n", ret);
- goto err_usbhost_p2_fck;
+ goto err_init60m;
+ }
+
+ for (i = 0; i < omap->nports; i++) {
+ char clkname[30];
+
+ /* clock names are indexed from 1*/
+ snprintf(clkname, sizeof(clkname),
+ "usb_host_hs_utmi_p%d_clk", i + 1);
+
+ /* If a clock is not found we won't bail out as not all
+ * platforms have all clocks and we can function without
+ * them
+ */
+ omap->utmi_clk[i] = clk_get(dev, clkname);
+ if (IS_ERR(omap->utmi_clk[i]))
+ dev_dbg(dev, "Failed to get clock : %s : %ld\n",
+ clkname, PTR_ERR(omap->utmi_clk[i]));
+
+ snprintf(clkname, sizeof(clkname),
+ "usb_host_hs_hsic480m_p%d_clk", i + 1);
+ omap->hsic480m_clk[i] = clk_get(dev, clkname);
+ if (IS_ERR(omap->hsic480m_clk[i]))
+ dev_dbg(dev, "Failed to get clock : %s : %ld\n",
+ clkname, PTR_ERR(omap->hsic480m_clk[i]));
+
+ snprintf(clkname, sizeof(clkname),
+ "usb_host_hs_hsic60m_p%d_clk", i + 1);
+ omap->hsic60m_clk[i] = clk_get(dev, clkname);
+ if (IS_ERR(omap->hsic60m_clk[i]))
+ dev_dbg(dev, "Failed to get clock : %s : %ld\n",
+ clkname, PTR_ERR(omap->hsic60m_clk[i]));
}
if (is_ehci_phy_mode(pdata->port_mode[0])) {
- /* for OMAP3 , the clk set paretn fails */
- ret = clk_set_parent(omap->utmi_p1_fck,
+ /* for OMAP3, clk_set_parent fails */
+ ret = clk_set_parent(omap->utmi_p1_gfclk,
omap->xclk60mhsp1_ck);
if (ret != 0)
- dev_err(dev, "xclk60mhsp1_ck set parent"
- "failed error:%d\n", ret);
+ dev_dbg(dev, "xclk60mhsp1_ck set parent failed: %d\n",
+ ret);
} else if (is_ehci_tll_mode(pdata->port_mode[0])) {
- ret = clk_set_parent(omap->utmi_p1_fck,
+ ret = clk_set_parent(omap->utmi_p1_gfclk,
omap->init_60m_fclk);
if (ret != 0)
- dev_err(dev, "init_60m_fclk set parent"
- "failed error:%d\n", ret);
+ dev_dbg(dev, "P0 init_60m_fclk set parent failed: %d\n",
+ ret);
}
if (is_ehci_phy_mode(pdata->port_mode[1])) {
- ret = clk_set_parent(omap->utmi_p2_fck,
+ ret = clk_set_parent(omap->utmi_p2_gfclk,
omap->xclk60mhsp2_ck);
if (ret != 0)
- dev_err(dev, "xclk60mhsp2_ck set parent"
- "failed error:%d\n", ret);
+ dev_dbg(dev, "xclk60mhsp2_ck set parent failed: %d\n",
+ ret);
} else if (is_ehci_tll_mode(pdata->port_mode[1])) {
- ret = clk_set_parent(omap->utmi_p2_fck,
+ ret = clk_set_parent(omap->utmi_p2_gfclk,
omap->init_60m_fclk);
if (ret != 0)
- dev_err(dev, "init_60m_fclk set parent"
- "failed error:%d\n", ret);
- }
-
- res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "uhh");
- if (!res) {
- dev_err(dev, "UHH EHCI get resource failed\n");
- ret = -ENODEV;
- goto err_init_60m_fclk;
- }
-
- omap->uhh_base = ioremap(res->start, resource_size(res));
- if (!omap->uhh_base) {
- dev_err(dev, "UHH ioremap failed\n");
- ret = -ENOMEM;
- goto err_init_60m_fclk;
+ dev_dbg(dev, "P1 init_60m_fclk set parent failed: %d\n",
+ ret);
}
- platform_set_drvdata(pdev, omap);
-
omap_usbhs_init(dev);
ret = omap_usbhs_alloc_children(pdev);
if (ret) {
@@ -612,39 +706,41 @@ static int usbhs_omap_probe(struct platform_device *pdev)
goto err_alloc;
}
- goto end_probe;
+ return 0;
err_alloc:
omap_usbhs_deinit(&pdev->dev);
- iounmap(omap->uhh_base);
-
-err_init_60m_fclk:
- clk_put(omap->init_60m_fclk);
-err_usbhost_p2_fck:
- clk_put(omap->usbhost_p2_fck);
+ for (i = 0; i < omap->nports; i++) {
+ if (!IS_ERR(omap->utmi_clk[i]))
+ clk_put(omap->utmi_clk[i]);
+ if (!IS_ERR(omap->hsic60m_clk[i]))
+ clk_put(omap->hsic60m_clk[i]);
+ if (!IS_ERR(omap->hsic480m_clk[i]))
+ clk_put(omap->hsic480m_clk[i]);
+ }
-err_usbhost_p1_fck:
- clk_put(omap->usbhost_p1_fck);
+ clk_put(omap->init_60m_fclk);
-err_xclk60mhsp2_ck:
+err_init60m:
clk_put(omap->xclk60mhsp2_ck);
-err_utmi_p2_fck:
- clk_put(omap->utmi_p2_fck);
-
-err_xclk60mhsp1_ck:
+err_xclk60mhsp2:
clk_put(omap->xclk60mhsp1_ck);
-err_utmi_p1_fck:
- clk_put(omap->utmi_p1_fck);
+err_xclk60mhsp1:
+ clk_put(omap->utmi_p2_gfclk);
-err_end:
- clk_put(omap->ehci_logic_fck);
+err_p2_gfclk:
+ clk_put(omap->utmi_p1_gfclk);
+
+err_p1_gfclk:
+ if (!IS_ERR(omap->ehci_logic_fck))
+ clk_put(omap->ehci_logic_fck);
+
+err_mem:
pm_runtime_disable(dev);
- kfree(omap);
-end_probe:
return ret;
}
@@ -657,19 +753,29 @@ end_probe:
static int usbhs_omap_remove(struct platform_device *pdev)
{
struct usbhs_hcd_omap *omap = platform_get_drvdata(pdev);
+ int i;
omap_usbhs_deinit(&pdev->dev);
- iounmap(omap->uhh_base);
+
+ for (i = 0; i < omap->nports; i++) {
+ if (!IS_ERR(omap->utmi_clk[i]))
+ clk_put(omap->utmi_clk[i]);
+ if (!IS_ERR(omap->hsic60m_clk[i]))
+ clk_put(omap->hsic60m_clk[i]);
+ if (!IS_ERR(omap->hsic480m_clk[i]))
+ clk_put(omap->hsic480m_clk[i]);
+ }
+
clk_put(omap->init_60m_fclk);
- clk_put(omap->usbhost_p2_fck);
- clk_put(omap->usbhost_p1_fck);
+ clk_put(omap->utmi_p1_gfclk);
+ clk_put(omap->utmi_p2_gfclk);
clk_put(omap->xclk60mhsp2_ck);
- clk_put(omap->utmi_p2_fck);
clk_put(omap->xclk60mhsp1_ck);
- clk_put(omap->utmi_p1_fck);
- clk_put(omap->ehci_logic_fck);
+
+ if (!IS_ERR(omap->ehci_logic_fck))
+ clk_put(omap->ehci_logic_fck);
+
pm_runtime_disable(&pdev->dev);
- kfree(omap);
return 0;
}
@@ -685,7 +791,7 @@ static struct platform_driver usbhs_omap_driver = {
.owner = THIS_MODULE,
.pm = &usbhsomap_dev_pm_ops,
},
- .remove = __exit_p(usbhs_omap_remove),
+ .remove = usbhs_omap_remove,
};
MODULE_AUTHOR("Keshava Munegowda <keshava_mgowda@ti.com>");
diff --git a/drivers/mfd/omap-usb-tll.c b/drivers/mfd/omap-usb-tll.c
index eb869153206d..0aef1a768880 100644
--- a/drivers/mfd/omap-usb-tll.c
+++ b/drivers/mfd/omap-usb-tll.c
@@ -54,10 +54,13 @@
#define OMAP_TLL_CHANNEL_CONF(num) (0x040 + 0x004 * num)
#define OMAP_TLL_CHANNEL_CONF_FSLSMODE_SHIFT 24
+#define OMAP_TLL_CHANNEL_CONF_DRVVBUS (1 << 16)
+#define OMAP_TLL_CHANNEL_CONF_CHRGVBUS (1 << 15)
#define OMAP_TLL_CHANNEL_CONF_ULPINOBITSTUFF (1 << 11)
#define OMAP_TLL_CHANNEL_CONF_ULPI_ULPIAUTOIDLE (1 << 10)
#define OMAP_TLL_CHANNEL_CONF_UTMIAUTOIDLE (1 << 9)
#define OMAP_TLL_CHANNEL_CONF_ULPIDDRMODE (1 << 8)
+#define OMAP_TLL_CHANNEL_CONF_MODE_TRANSPARENT_UTMI (2 << 1)
#define OMAP_TLL_CHANNEL_CONF_CHANMODE_FSLS (1 << 1)
#define OMAP_TLL_CHANNEL_CONF_CHANEN (1 << 0)
@@ -92,21 +95,25 @@
#define OMAP_USBTLL_REV1 0x00000015 /* OMAP3 */
#define OMAP_USBTLL_REV2 0x00000018 /* OMAP 3630 */
#define OMAP_USBTLL_REV3 0x00000004 /* OMAP4 */
+#define OMAP_USBTLL_REV4 0x00000006 /* OMAP5 */
#define is_ehci_tll_mode(x) (x == OMAP_EHCI_PORT_MODE_TLL)
+/* only PHY and UNUSED modes don't need TLL */
+#define omap_usb_mode_needs_tll(x) ((x) != OMAP_USBHS_PORT_MODE_UNUSED &&\
+ (x) != OMAP_EHCI_PORT_MODE_PHY)
+
struct usbtll_omap {
- struct clk *usbtll_p1_fck;
- struct clk *usbtll_p2_fck;
- struct usbtll_omap_platform_data platdata;
- /* secure the register updates */
- spinlock_t lock;
+ int nch; /* num. of channels */
+ struct usbhs_omap_platform_data *pdata;
+ struct clk **ch_clk;
};
/*-------------------------------------------------------------------------*/
-const char usbtll_driver_name[] = USBTLL_DRIVER_NAME;
-struct platform_device *tll_pdev;
+static const char usbtll_driver_name[] = USBTLL_DRIVER_NAME;
+static struct device *tll_dev;
+static DEFINE_SPINLOCK(tll_lock); /* serialize access to tll_dev */
/*-------------------------------------------------------------------------*/
@@ -203,84 +210,84 @@ static unsigned ohci_omap3_fslsmode(enum usbhs_omap_port_mode mode)
static int usbtll_omap_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
- struct usbtll_omap_platform_data *pdata = dev->platform_data;
+ struct usbhs_omap_platform_data *pdata = dev->platform_data;
void __iomem *base;
struct resource *res;
struct usbtll_omap *tll;
unsigned reg;
- unsigned long flags;
int ret = 0;
- int i, ver, count;
+ int i, ver;
+ bool needs_tll;
dev_dbg(dev, "starting TI HSUSB TLL Controller\n");
- tll = kzalloc(sizeof(struct usbtll_omap), GFP_KERNEL);
+ tll = devm_kzalloc(dev, sizeof(struct usbtll_omap), GFP_KERNEL);
if (!tll) {
dev_err(dev, "Memory allocation failed\n");
- ret = -ENOMEM;
- goto end;
+ return -ENOMEM;
}
- spin_lock_init(&tll->lock);
-
- for (i = 0; i < OMAP3_HS_USB_PORTS; i++)
- tll->platdata.port_mode[i] = pdata->port_mode[i];
-
- tll->usbtll_p1_fck = clk_get(dev, "usb_tll_hs_usb_ch0_clk");
- if (IS_ERR(tll->usbtll_p1_fck)) {
- ret = PTR_ERR(tll->usbtll_p1_fck);
- dev_err(dev, "usbtll_p1_fck failed error:%d\n", ret);
- goto err_tll;
+ if (!pdata) {
+ dev_err(dev, "Platform data missing\n");
+ return -ENODEV;
}
- tll->usbtll_p2_fck = clk_get(dev, "usb_tll_hs_usb_ch1_clk");
- if (IS_ERR(tll->usbtll_p2_fck)) {
- ret = PTR_ERR(tll->usbtll_p2_fck);
- dev_err(dev, "usbtll_p2_fck failed error:%d\n", ret);
- goto err_usbtll_p1_fck;
- }
+ tll->pdata = pdata;
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- if (!res) {
- dev_err(dev, "usb tll get resource failed\n");
- ret = -ENODEV;
- goto err_usbtll_p2_fck;
- }
-
- base = ioremap(res->start, resource_size(res));
+ base = devm_request_and_ioremap(dev, res);
if (!base) {
- dev_err(dev, "TLL ioremap failed\n");
- ret = -ENOMEM;
- goto err_usbtll_p2_fck;
+ ret = -EADDRNOTAVAIL;
+ dev_err(dev, "Resource request/ioremap failed:%d\n", ret);
+ return ret;
}
platform_set_drvdata(pdev, tll);
pm_runtime_enable(dev);
pm_runtime_get_sync(dev);
- spin_lock_irqsave(&tll->lock, flags);
-
ver = usbtll_read(base, OMAP_USBTLL_REVISION);
switch (ver) {
case OMAP_USBTLL_REV1:
- case OMAP_USBTLL_REV2:
- count = OMAP_TLL_CHANNEL_COUNT;
+ case OMAP_USBTLL_REV4:
+ tll->nch = OMAP_TLL_CHANNEL_COUNT;
break;
+ case OMAP_USBTLL_REV2:
case OMAP_USBTLL_REV3:
- count = OMAP_REV2_TLL_CHANNEL_COUNT;
+ tll->nch = OMAP_REV2_TLL_CHANNEL_COUNT;
break;
default:
- dev_err(dev, "TLL version failed\n");
- ret = -ENODEV;
- goto err_ioremap;
+ tll->nch = OMAP_TLL_CHANNEL_COUNT;
+ dev_dbg(dev,
+ "USB TLL Rev : 0x%x not recognized, assuming %d channels\n",
+ ver, tll->nch);
+ break;
}
- if (is_ehci_tll_mode(pdata->port_mode[0]) ||
- is_ehci_tll_mode(pdata->port_mode[1]) ||
- is_ehci_tll_mode(pdata->port_mode[2]) ||
- is_ohci_port(pdata->port_mode[0]) ||
- is_ohci_port(pdata->port_mode[1]) ||
- is_ohci_port(pdata->port_mode[2])) {
+ tll->ch_clk = devm_kzalloc(dev, sizeof(struct clk * [tll->nch]),
+ GFP_KERNEL);
+ if (!tll->ch_clk) {
+ ret = -ENOMEM;
+ dev_err(dev, "Couldn't allocate memory for channel clocks\n");
+ goto err_clk_alloc;
+ }
+
+ for (i = 0; i < tll->nch; i++) {
+ char clkname[] = "usb_tll_hs_usb_chx_clk";
+
+ snprintf(clkname, sizeof(clkname),
+ "usb_tll_hs_usb_ch%d_clk", i);
+ tll->ch_clk[i] = clk_get(dev, clkname);
+
+ if (IS_ERR(tll->ch_clk[i]))
+ dev_dbg(dev, "can't get clock : %s\n", clkname);
+ }
+
+ needs_tll = false;
+ for (i = 0; i < tll->nch; i++)
+ needs_tll |= omap_usb_mode_needs_tll(pdata->port_mode[i]);
+
+ if (needs_tll) {
/* Program Common TLL register */
reg = usbtll_read(base, OMAP_TLL_SHARED_CONF);
@@ -292,7 +299,7 @@ static int usbtll_omap_probe(struct platform_device *pdev)
usbtll_write(base, OMAP_TLL_SHARED_CONF, reg);
/* Enable channels now */
- for (i = 0; i < count; i++) {
+ for (i = 0; i < tll->nch; i++) {
reg = usbtll_read(base, OMAP_TLL_CHANNEL_CONF(i));
if (is_ohci_port(pdata->port_mode[i])) {
@@ -308,6 +315,15 @@ static int usbtll_omap_probe(struct platform_device *pdev)
reg &= ~(OMAP_TLL_CHANNEL_CONF_UTMIAUTOIDLE
| OMAP_TLL_CHANNEL_CONF_ULPINOBITSTUFF
| OMAP_TLL_CHANNEL_CONF_ULPIDDRMODE);
+ } else if (pdata->port_mode[i] ==
+ OMAP_EHCI_PORT_MODE_HSIC) {
+ /*
+ * HSIC Mode requires UTMI port configurations
+ */
+ reg |= OMAP_TLL_CHANNEL_CONF_DRVVBUS
+ | OMAP_TLL_CHANNEL_CONF_CHRGVBUS
+ | OMAP_TLL_CHANNEL_CONF_MODE_TRANSPARENT_UTMI
+ | OMAP_TLL_CHANNEL_CONF_ULPINOBITSTUFF;
} else {
continue;
}
@@ -320,25 +336,18 @@ static int usbtll_omap_probe(struct platform_device *pdev)
}
}
-err_ioremap:
- spin_unlock_irqrestore(&tll->lock, flags);
- iounmap(base);
pm_runtime_put_sync(dev);
- tll_pdev = pdev;
- if (!ret)
- goto end;
- pm_runtime_disable(dev);
+ /* only after this can omap_tll_enable/disable work */
+ spin_lock(&tll_lock);
+ tll_dev = dev;
+ spin_unlock(&tll_lock);
-err_usbtll_p2_fck:
- clk_put(tll->usbtll_p2_fck);
-
-err_usbtll_p1_fck:
- clk_put(tll->usbtll_p1_fck);
+ return 0;
-err_tll:
- kfree(tll);
+err_clk_alloc:
+ pm_runtime_put_sync(dev);
+ pm_runtime_disable(dev);
-end:
return ret;
}
@@ -351,36 +360,42 @@ end:
static int usbtll_omap_remove(struct platform_device *pdev)
{
struct usbtll_omap *tll = platform_get_drvdata(pdev);
+ int i;
+
+ spin_lock(&tll_lock);
+ tll_dev = NULL;
+ spin_unlock(&tll_lock);
+
+ for (i = 0; i < tll->nch; i++)
+ if (!IS_ERR(tll->ch_clk[i]))
+ clk_put(tll->ch_clk[i]);
- clk_put(tll->usbtll_p2_fck);
- clk_put(tll->usbtll_p1_fck);
pm_runtime_disable(&pdev->dev);
- kfree(tll);
return 0;
}
static int usbtll_runtime_resume(struct device *dev)
{
struct usbtll_omap *tll = dev_get_drvdata(dev);
- struct usbtll_omap_platform_data *pdata = &tll->platdata;
- unsigned long flags;
+ struct usbhs_omap_platform_data *pdata = tll->pdata;
+ int i;
dev_dbg(dev, "usbtll_runtime_resume\n");
- if (!pdata) {
- dev_dbg(dev, "missing platform_data\n");
- return -ENODEV;
- }
-
- spin_lock_irqsave(&tll->lock, flags);
+ for (i = 0; i < tll->nch; i++) {
+ if (omap_usb_mode_needs_tll(pdata->port_mode[i])) {
+ int r;
- if (is_ehci_tll_mode(pdata->port_mode[0]))
- clk_enable(tll->usbtll_p1_fck);
-
- if (is_ehci_tll_mode(pdata->port_mode[1]))
- clk_enable(tll->usbtll_p2_fck);
+ if (IS_ERR(tll->ch_clk[i]))
+ continue;
- spin_unlock_irqrestore(&tll->lock, flags);
+ r = clk_enable(tll->ch_clk[i]);
+ if (r) {
+ dev_err(dev,
+ "Error enabling ch %d clock: %d\n", i, r);
+ }
+ }
+ }
return 0;
}
@@ -388,26 +403,18 @@ static int usbtll_runtime_resume(struct device *dev)
static int usbtll_runtime_suspend(struct device *dev)
{
struct usbtll_omap *tll = dev_get_drvdata(dev);
- struct usbtll_omap_platform_data *pdata = &tll->platdata;
- unsigned long flags;
+ struct usbhs_omap_platform_data *pdata = tll->pdata;
+ int i;
dev_dbg(dev, "usbtll_runtime_suspend\n");
- if (!pdata) {
- dev_dbg(dev, "missing platform_data\n");
- return -ENODEV;
+ for (i = 0; i < tll->nch; i++) {
+ if (omap_usb_mode_needs_tll(pdata->port_mode[i])) {
+ if (!IS_ERR(tll->ch_clk[i]))
+ clk_disable(tll->ch_clk[i]);
+ }
}
- spin_lock_irqsave(&tll->lock, flags);
-
- if (is_ehci_tll_mode(pdata->port_mode[0]))
- clk_disable(tll->usbtll_p1_fck);
-
- if (is_ehci_tll_mode(pdata->port_mode[1]))
- clk_disable(tll->usbtll_p2_fck);
-
- spin_unlock_irqrestore(&tll->lock, flags);
-
return 0;
}
@@ -429,21 +436,39 @@ static struct platform_driver usbtll_omap_driver = {
int omap_tll_enable(void)
{
- if (!tll_pdev) {
- pr_err("missing omap usbhs tll platform_data\n");
- return -ENODEV;
+ int ret;
+
+ spin_lock(&tll_lock);
+
+ if (!tll_dev) {
+ pr_err("%s: OMAP USB TLL not initialized\n", __func__);
+ ret = -ENODEV;
+ } else {
+ ret = pm_runtime_get_sync(tll_dev);
}
- return pm_runtime_get_sync(&tll_pdev->dev);
+
+ spin_unlock(&tll_lock);
+
+ return ret;
}
EXPORT_SYMBOL_GPL(omap_tll_enable);
int omap_tll_disable(void)
{
- if (!tll_pdev) {
- pr_err("missing omap usbhs tll platform_data\n");
- return -ENODEV;
+ int ret;
+
+ spin_lock(&tll_lock);
+
+ if (!tll_dev) {
+ pr_err("%s: OMAP USB TLL not initialized\n", __func__);
+ ret = -ENODEV;
+ } else {
+ ret = pm_runtime_put_sync(tll_dev);
}
- return pm_runtime_put_sync(&tll_pdev->dev);
+
+ spin_unlock(&tll_lock);
+
+ return ret;
}
EXPORT_SYMBOL_GPL(omap_tll_disable);
diff --git a/drivers/mfd/palmas.c b/drivers/mfd/palmas.c
index 6ffd7a2affdc..bbdbc50a3cca 100644
--- a/drivers/mfd/palmas.c
+++ b/drivers/mfd/palmas.c
@@ -39,6 +39,14 @@ enum palmas_ids {
PALMAS_USB_ID,
};
+static struct resource palmas_rtc_resources[] = {
+ {
+ .start = PALMAS_RTC_ALARM_IRQ,
+ .end = PALMAS_RTC_ALARM_IRQ,
+ .flags = IORESOURCE_IRQ,
+ },
+};
+
static const struct mfd_cell palmas_children[] = {
{
.name = "palmas-pmic",
@@ -59,6 +67,8 @@ static const struct mfd_cell palmas_children[] = {
{
.name = "palmas-rtc",
.id = PALMAS_RTC_ID,
+ .resources = &palmas_rtc_resources[0],
+ .num_resources = ARRAY_SIZE(palmas_rtc_resources),
},
{
.name = "palmas-pwrbutton",
@@ -456,8 +466,8 @@ static int palmas_i2c_probe(struct i2c_client *i2c,
ret = mfd_add_devices(palmas->dev, -1,
children, ARRAY_SIZE(palmas_children),
- NULL, regmap_irq_chip_get_base(palmas->irq_data),
- NULL);
+ NULL, 0,
+ regmap_irq_get_domain(palmas->irq_data));
kfree(children);
if (ret < 0)
diff --git a/drivers/mfd/rtl8411.c b/drivers/mfd/rtl8411.c
index 3d3b4addf81a..2a2d31687b72 100644
--- a/drivers/mfd/rtl8411.c
+++ b/drivers/mfd/rtl8411.c
@@ -115,14 +115,24 @@ static int rtl8411_card_power_off(struct rtsx_pcr *pcr, int card)
static int rtl8411_switch_output_voltage(struct rtsx_pcr *pcr, u8 voltage)
{
u8 mask, val;
+ int err;
mask = (BPP_REG_TUNED18 << BPP_TUNED18_SHIFT_8411) | BPP_PAD_MASK;
- if (voltage == OUTPUT_3V3)
+ if (voltage == OUTPUT_3V3) {
+ err = rtsx_pci_write_register(pcr,
+ SD30_DRIVE_SEL, 0x07, DRIVER_TYPE_D);
+ if (err < 0)
+ return err;
val = (BPP_ASIC_3V3 << BPP_TUNED18_SHIFT_8411) | BPP_PAD_3V3;
- else if (voltage == OUTPUT_1V8)
+ } else if (voltage == OUTPUT_1V8) {
+ err = rtsx_pci_write_register(pcr,
+ SD30_DRIVE_SEL, 0x07, DRIVER_TYPE_B);
+ if (err < 0)
+ return err;
val = (BPP_ASIC_1V8 << BPP_TUNED18_SHIFT_8411) | BPP_PAD_1V8;
- else
+ } else {
return -EINVAL;
+ }
return rtsx_pci_write_register(pcr, LDO_CTL, mask, val);
}
diff --git a/drivers/mfd/rts5209.c b/drivers/mfd/rts5209.c
index 98fe0f39463e..ec78d9fb0879 100644
--- a/drivers/mfd/rts5209.c
+++ b/drivers/mfd/rts5209.c
@@ -149,10 +149,18 @@ static int rts5209_switch_output_voltage(struct rtsx_pcr *pcr, u8 voltage)
int err;
if (voltage == OUTPUT_3V3) {
+ err = rtsx_pci_write_register(pcr,
+ SD30_DRIVE_SEL, 0x07, DRIVER_TYPE_D);
+ if (err < 0)
+ return err;
err = rtsx_pci_write_phy_register(pcr, 0x08, 0x4FC0 | 0x24);
if (err < 0)
return err;
} else if (voltage == OUTPUT_1V8) {
+ err = rtsx_pci_write_register(pcr,
+ SD30_DRIVE_SEL, 0x07, DRIVER_TYPE_B);
+ if (err < 0)
+ return err;
err = rtsx_pci_write_phy_register(pcr, 0x08, 0x4C40 | 0x24);
if (err < 0)
return err;
diff --git a/drivers/mfd/rts5227.c b/drivers/mfd/rts5227.c
new file mode 100644
index 000000000000..fc831dcb1480
--- /dev/null
+++ b/drivers/mfd/rts5227.c
@@ -0,0 +1,234 @@
+/* Driver for Realtek PCI-Express card reader
+ *
+ * Copyright(c) 2009 Realtek Semiconductor Corp. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2, or (at your option) any
+ * later version.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, see <http://www.gnu.org/licenses/>.
+ *
+ * Author:
+ * Wei WANG <wei_wang@realsil.com.cn>
+ * No. 450, Shenhu Road, Suzhou Industry Park, Suzhou, China
+ *
+ * Roger Tseng <rogerable@realtek.com>
+ * No. 2, Innovation Road II, Hsinchu Science Park, Hsinchu 300, Taiwan
+ */
+
+#include <linux/module.h>
+#include <linux/delay.h>
+#include <linux/mfd/rtsx_pci.h>
+
+#include "rtsx_pcr.h"
+
+static int rts5227_extra_init_hw(struct rtsx_pcr *pcr)
+{
+ u16 cap;
+
+ rtsx_pci_init_cmd(pcr);
+
+ /* Configure GPIO as output */
+ rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, GPIO_CTL, 0x02, 0x02);
+ /* Switch LDO3318 source from DV33 to card_3v3 */
+ rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, LDO_PWR_SEL, 0x03, 0x00);
+ rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, LDO_PWR_SEL, 0x03, 0x01);
+ /* LED shine disabled, set initial shine cycle period */
+ rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, OLT_LED_CTL, 0x0F, 0x02);
+ /* Configure LTR */
+ pcie_capability_read_word(pcr->pci, PCI_EXP_DEVCTL2, &cap);
+ if (cap & PCI_EXP_LTR_EN)
+ rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, LTR_CTL, 0xFF, 0xA3);
+ /* Configure OBFF */
+ rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, OBFF_CFG, 0x03, 0x03);
+ /* Configure force_clock_req
+ * Maybe We should define 0xFF03 as some name
+ */
+ rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, 0xFF03, 0x08, 0x08);
+ /* Correct driving */
+ rtsx_pci_add_cmd(pcr, WRITE_REG_CMD,
+ SD30_CLK_DRIVE_SEL, 0xFF, 0x96);
+ rtsx_pci_add_cmd(pcr, WRITE_REG_CMD,
+ SD30_CMD_DRIVE_SEL, 0xFF, 0x96);
+ rtsx_pci_add_cmd(pcr, WRITE_REG_CMD,
+ SD30_DAT_DRIVE_SEL, 0xFF, 0x96);
+
+ return rtsx_pci_send_cmd(pcr, 100);
+}
+
+static int rts5227_optimize_phy(struct rtsx_pcr *pcr)
+{
+ /* Optimize RX sensitivity */
+ return rtsx_pci_write_phy_register(pcr, 0x00, 0xBA42);
+}
+
+static int rts5227_turn_on_led(struct rtsx_pcr *pcr)
+{
+ return rtsx_pci_write_register(pcr, GPIO_CTL, 0x02, 0x02);
+}
+
+static int rts5227_turn_off_led(struct rtsx_pcr *pcr)
+{
+ return rtsx_pci_write_register(pcr, GPIO_CTL, 0x02, 0x00);
+}
+
+static int rts5227_enable_auto_blink(struct rtsx_pcr *pcr)
+{
+ return rtsx_pci_write_register(pcr, OLT_LED_CTL, 0x08, 0x08);
+}
+
+static int rts5227_disable_auto_blink(struct rtsx_pcr *pcr)
+{
+ return rtsx_pci_write_register(pcr, OLT_LED_CTL, 0x08, 0x00);
+}
+
+static int rts5227_card_power_on(struct rtsx_pcr *pcr, int card)
+{
+ int err;
+
+ rtsx_pci_init_cmd(pcr);
+ rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, CARD_PWR_CTL,
+ SD_POWER_MASK, SD_PARTIAL_POWER_ON);
+ rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, PWR_GATE_CTRL,
+ LDO3318_PWR_MASK, 0x02);
+ err = rtsx_pci_send_cmd(pcr, 100);
+ if (err < 0)
+ return err;
+
+ /* To avoid too large in-rush current */
+ udelay(150);
+
+ rtsx_pci_init_cmd(pcr);
+ rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, CARD_PWR_CTL,
+ SD_POWER_MASK, SD_POWER_ON);
+ rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, PWR_GATE_CTRL,
+ LDO3318_PWR_MASK, 0x06);
+ err = rtsx_pci_send_cmd(pcr, 100);
+ if (err < 0)
+ return err;
+
+ return 0;
+}
+
+static int rts5227_card_power_off(struct rtsx_pcr *pcr, int card)
+{
+ rtsx_pci_init_cmd(pcr);
+ rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, CARD_PWR_CTL,
+ SD_POWER_MASK | PMOS_STRG_MASK,
+ SD_POWER_OFF | PMOS_STRG_400mA);
+ rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, PWR_GATE_CTRL,
+ LDO3318_PWR_MASK, 0X00);
+ return rtsx_pci_send_cmd(pcr, 100);
+}
+
+static int rts5227_switch_output_voltage(struct rtsx_pcr *pcr, u8 voltage)
+{
+ int err;
+ u8 drive_sel;
+
+ if (voltage == OUTPUT_3V3) {
+ err = rtsx_pci_write_phy_register(pcr, 0x08, 0x4FC0 | 0x24);
+ if (err < 0)
+ return err;
+ drive_sel = 0x96;
+ } else if (voltage == OUTPUT_1V8) {
+ err = rtsx_pci_write_phy_register(pcr, 0x11, 0x3C02);
+ if (err < 0)
+ return err;
+ err = rtsx_pci_write_phy_register(pcr, 0x08, 0x4C80 | 0x24);
+ if (err < 0)
+ return err;
+ drive_sel = 0xB3;
+ } else {
+ return -EINVAL;
+ }
+
+ /* set pad drive */
+ rtsx_pci_init_cmd(pcr);
+ rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, SD30_CLK_DRIVE_SEL,
+ 0xFF, drive_sel);
+ rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, SD30_CMD_DRIVE_SEL,
+ 0xFF, drive_sel);
+ rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, SD30_DAT_DRIVE_SEL,
+ 0xFF, drive_sel);
+ return rtsx_pci_send_cmd(pcr, 100);
+}
+
+static const struct pcr_ops rts5227_pcr_ops = {
+ .extra_init_hw = rts5227_extra_init_hw,
+ .optimize_phy = rts5227_optimize_phy,
+ .turn_on_led = rts5227_turn_on_led,
+ .turn_off_led = rts5227_turn_off_led,
+ .enable_auto_blink = rts5227_enable_auto_blink,
+ .disable_auto_blink = rts5227_disable_auto_blink,
+ .card_power_on = rts5227_card_power_on,
+ .card_power_off = rts5227_card_power_off,
+ .switch_output_voltage = rts5227_switch_output_voltage,
+ .cd_deglitch = NULL,
+ .conv_clk_and_div_n = NULL,
+};
+
+/* SD Pull Control Enable:
+ * SD_DAT[3:0] ==> pull up
+ * SD_CD ==> pull up
+ * SD_WP ==> pull up
+ * SD_CMD ==> pull up
+ * SD_CLK ==> pull down
+ */
+static const u32 rts5227_sd_pull_ctl_enable_tbl[] = {
+ RTSX_REG_PAIR(CARD_PULL_CTL2, 0xAA),
+ RTSX_REG_PAIR(CARD_PULL_CTL3, 0xE9),
+ 0,
+};
+
+/* SD Pull Control Disable:
+ * SD_DAT[3:0] ==> pull down
+ * SD_CD ==> pull up
+ * SD_WP ==> pull down
+ * SD_CMD ==> pull down
+ * SD_CLK ==> pull down
+ */
+static const u32 rts5227_sd_pull_ctl_disable_tbl[] = {
+ RTSX_REG_PAIR(CARD_PULL_CTL2, 0x55),
+ RTSX_REG_PAIR(CARD_PULL_CTL3, 0xD5),
+ 0,
+};
+
+/* MS Pull Control Enable:
+ * MS CD ==> pull up
+ * others ==> pull down
+ */
+static const u32 rts5227_ms_pull_ctl_enable_tbl[] = {
+ RTSX_REG_PAIR(CARD_PULL_CTL5, 0x55),
+ RTSX_REG_PAIR(CARD_PULL_CTL6, 0x15),
+ 0,
+};
+
+/* MS Pull Control Disable:
+ * MS CD ==> pull up
+ * others ==> pull down
+ */
+static const u32 rts5227_ms_pull_ctl_disable_tbl[] = {
+ RTSX_REG_PAIR(CARD_PULL_CTL5, 0x55),
+ RTSX_REG_PAIR(CARD_PULL_CTL6, 0x15),
+ 0,
+};
+
+void rts5227_init_params(struct rtsx_pcr *pcr)
+{
+ pcr->extra_caps = EXTRA_CAPS_SD_SDR50 | EXTRA_CAPS_SD_SDR104;
+ pcr->num_slots = 2;
+ pcr->ops = &rts5227_pcr_ops;
+
+ pcr->sd_pull_ctl_enable_tbl = rts5227_sd_pull_ctl_enable_tbl;
+ pcr->sd_pull_ctl_disable_tbl = rts5227_sd_pull_ctl_disable_tbl;
+ pcr->ms_pull_ctl_enable_tbl = rts5227_ms_pull_ctl_enable_tbl;
+ pcr->ms_pull_ctl_disable_tbl = rts5227_ms_pull_ctl_disable_tbl;
+}
diff --git a/drivers/mfd/rts5229.c b/drivers/mfd/rts5229.c
index 29d889cbb9c5..58af4dbe3586 100644
--- a/drivers/mfd/rts5229.c
+++ b/drivers/mfd/rts5229.c
@@ -119,10 +119,18 @@ static int rts5229_switch_output_voltage(struct rtsx_pcr *pcr, u8 voltage)
int err;
if (voltage == OUTPUT_3V3) {
+ err = rtsx_pci_write_register(pcr,
+ SD30_DRIVE_SEL, 0x07, DRIVER_TYPE_D);
+ if (err < 0)
+ return err;
err = rtsx_pci_write_phy_register(pcr, 0x08, 0x4FC0 | 0x24);
if (err < 0)
return err;
} else if (voltage == OUTPUT_1V8) {
+ err = rtsx_pci_write_register(pcr,
+ SD30_DRIVE_SEL, 0x07, DRIVER_TYPE_B);
+ if (err < 0)
+ return err;
err = rtsx_pci_write_phy_register(pcr, 0x08, 0x4C40 | 0x24);
if (err < 0)
return err;
diff --git a/drivers/mfd/rtsx_pcr.c b/drivers/mfd/rtsx_pcr.c
index 9fc57009e228..2f12cc13489a 100644
--- a/drivers/mfd/rtsx_pcr.c
+++ b/drivers/mfd/rtsx_pcr.c
@@ -55,6 +55,7 @@ static DEFINE_PCI_DEVICE_TABLE(rtsx_pci_ids) = {
{ PCI_DEVICE(0x10EC, 0x5209), PCI_CLASS_OTHERS << 16, 0xFF0000 },
{ PCI_DEVICE(0x10EC, 0x5229), PCI_CLASS_OTHERS << 16, 0xFF0000 },
{ PCI_DEVICE(0x10EC, 0x5289), PCI_CLASS_OTHERS << 16, 0xFF0000 },
+ { PCI_DEVICE(0x10EC, 0x5227), PCI_CLASS_OTHERS << 16, 0xFF0000 },
{ 0, }
};
@@ -325,7 +326,6 @@ static void rtsx_pci_add_sg_tbl(struct rtsx_pcr *pcr,
val = ((u64)addr << 32) | ((u64)len << 12) | option;
put_unaligned_le64(val, ptr);
- ptr++;
pcr->sgi++;
}
@@ -591,8 +591,7 @@ int rtsx_pci_switch_clock(struct rtsx_pcr *pcr, unsigned int card_clock,
u8 ssc_depth, bool initial_mode, bool double_clk, bool vpclk)
{
int err, clk;
- u8 N, min_N, max_N, clk_divider;
- u8 mcu_cnt, div, max_div;
+ u8 n, clk_divider, mcu_cnt, div;
u8 depth[] = {
[RTSX_SSC_DEPTH_4M] = SSC_DEPTH_4M,
[RTSX_SSC_DEPTH_2M] = SSC_DEPTH_2M,
@@ -616,10 +615,6 @@ int rtsx_pci_switch_clock(struct rtsx_pcr *pcr, unsigned int card_clock,
card_clock /= 1000000;
dev_dbg(&(pcr->pci->dev), "Switch card clock to %dMHz\n", card_clock);
- min_N = 80;
- max_N = 208;
- max_div = CLK_DIV_8;
-
clk = card_clock;
if (!initial_mode && double_clk)
clk = card_clock * 2;
@@ -631,30 +626,30 @@ int rtsx_pci_switch_clock(struct rtsx_pcr *pcr, unsigned int card_clock,
return 0;
if (pcr->ops->conv_clk_and_div_n)
- N = (u8)pcr->ops->conv_clk_and_div_n(clk, CLK_TO_DIV_N);
+ n = (u8)pcr->ops->conv_clk_and_div_n(clk, CLK_TO_DIV_N);
else
- N = (u8)(clk - 2);
- if ((clk <= 2) || (N > max_N))
+ n = (u8)(clk - 2);
+ if ((clk <= 2) || (n > MAX_DIV_N_PCR))
return -EINVAL;
mcu_cnt = (u8)(125/clk + 3);
if (mcu_cnt > 15)
mcu_cnt = 15;
- /* Make sure that the SSC clock div_n is equal or greater than min_N */
+ /* Make sure that the SSC clock div_n is not less than MIN_DIV_N_PCR */
div = CLK_DIV_1;
- while ((N < min_N) && (div < max_div)) {
+ while ((n < MIN_DIV_N_PCR) && (div < CLK_DIV_8)) {
if (pcr->ops->conv_clk_and_div_n) {
- int dbl_clk = pcr->ops->conv_clk_and_div_n(N,
+ int dbl_clk = pcr->ops->conv_clk_and_div_n(n,
DIV_N_TO_CLK) * 2;
- N = (u8)pcr->ops->conv_clk_and_div_n(dbl_clk,
+ n = (u8)pcr->ops->conv_clk_and_div_n(dbl_clk,
CLK_TO_DIV_N);
} else {
- N = (N + 2) * 2 - 2;
+ n = (n + 2) * 2 - 2;
}
div++;
}
- dev_dbg(&(pcr->pci->dev), "N = %d, div = %d\n", N, div);
+ dev_dbg(&(pcr->pci->dev), "n = %d, div = %d\n", n, div);
ssc_depth = depth[ssc_depth];
if (double_clk)
@@ -671,7 +666,7 @@ int rtsx_pci_switch_clock(struct rtsx_pcr *pcr, unsigned int card_clock,
rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, SSC_CTL1, SSC_RSTB, 0);
rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, SSC_CTL2,
SSC_DEPTH_MASK, ssc_depth);
- rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, SSC_DIV_N_0, 0xFF, N);
+ rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, SSC_DIV_N_0, 0xFF, n);
rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, SSC_CTL1, SSC_RSTB, SSC_RSTB);
if (vpclk) {
rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, SD_VPCLK0_CTL,
@@ -713,6 +708,25 @@ int rtsx_pci_card_power_off(struct rtsx_pcr *pcr, int card)
}
EXPORT_SYMBOL_GPL(rtsx_pci_card_power_off);
+int rtsx_pci_card_exclusive_check(struct rtsx_pcr *pcr, int card)
+{
+ unsigned int cd_mask[] = {
+ [RTSX_SD_CARD] = SD_EXIST,
+ [RTSX_MS_CARD] = MS_EXIST
+ };
+
+ if (!pcr->ms_pmos) {
+ /* When using single PMOS, accessing card is not permitted
+ * if the existing card is not the designated one.
+ */
+ if (pcr->card_exist & (~cd_mask[card]))
+ return -EIO;
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(rtsx_pci_card_exclusive_check);
+
int rtsx_pci_switch_output_voltage(struct rtsx_pcr *pcr, u8 voltage)
{
if (pcr->ops->switch_output_voltage)
@@ -758,7 +772,7 @@ static void rtsx_pci_card_detect(struct work_struct *work)
struct delayed_work *dwork;
struct rtsx_pcr *pcr;
unsigned long flags;
- unsigned int card_detect = 0;
+ unsigned int card_detect = 0, card_inserted, card_removed;
u32 irq_status;
dwork = to_delayed_work(work);
@@ -766,25 +780,35 @@ static void rtsx_pci_card_detect(struct work_struct *work)
dev_dbg(&(pcr->pci->dev), "--> %s\n", __func__);
+ mutex_lock(&pcr->pcr_mutex);
spin_lock_irqsave(&pcr->lock, flags);
irq_status = rtsx_pci_readl(pcr, RTSX_BIPR);
dev_dbg(&(pcr->pci->dev), "irq_status: 0x%08x\n", irq_status);
- if (pcr->card_inserted || pcr->card_removed) {
+ irq_status &= CARD_EXIST;
+ card_inserted = pcr->card_inserted & irq_status;
+ card_removed = pcr->card_removed;
+ pcr->card_inserted = 0;
+ pcr->card_removed = 0;
+
+ spin_unlock_irqrestore(&pcr->lock, flags);
+
+ if (card_inserted || card_removed) {
dev_dbg(&(pcr->pci->dev),
"card_inserted: 0x%x, card_removed: 0x%x\n",
- pcr->card_inserted, pcr->card_removed);
+ card_inserted, card_removed);
if (pcr->ops->cd_deglitch)
- pcr->card_inserted = pcr->ops->cd_deglitch(pcr);
+ card_inserted = pcr->ops->cd_deglitch(pcr);
+
+ card_detect = card_inserted | card_removed;
- card_detect = pcr->card_inserted | pcr->card_removed;
- pcr->card_inserted = 0;
- pcr->card_removed = 0;
+ pcr->card_exist |= card_inserted;
+ pcr->card_exist &= ~card_removed;
}
- spin_unlock_irqrestore(&pcr->lock, flags);
+ mutex_unlock(&pcr->pcr_mutex);
if ((card_detect & SD_EXIST) && pcr->slots[RTSX_SD_CARD].card_event)
pcr->slots[RTSX_SD_CARD].card_event(
@@ -836,10 +860,6 @@ static irqreturn_t rtsx_pci_isr(int irq, void *dev_id)
}
}
- if (pcr->card_inserted || pcr->card_removed)
- schedule_delayed_work(&pcr->carddet_work,
- msecs_to_jiffies(200));
-
if (int_reg & (NEED_COMPLETE_INT | DELINK_INT)) {
if (int_reg & (TRANS_FAIL_INT | DELINK_INT)) {
pcr->trans_result = TRANS_RESULT_FAIL;
@@ -852,6 +872,10 @@ static irqreturn_t rtsx_pci_isr(int irq, void *dev_id)
}
}
+ if (pcr->card_inserted || pcr->card_removed)
+ schedule_delayed_work(&pcr->carddet_work,
+ msecs_to_jiffies(200));
+
spin_unlock(&pcr->lock);
return IRQ_HANDLED;
}
@@ -974,6 +998,14 @@ static int rtsx_pci_init_hw(struct rtsx_pcr *pcr)
return err;
}
+ /* No CD interrupt if probing driver with card inserted.
+ * So we need to initialize pcr->card_exist here.
+ */
+ if (pcr->ops->cd_deglitch)
+ pcr->card_exist = pcr->ops->cd_deglitch(pcr);
+ else
+ pcr->card_exist = rtsx_pci_readl(pcr, RTSX_BIPR) & CARD_EXIST;
+
return 0;
}
@@ -997,6 +1029,10 @@ static int rtsx_pci_init_chip(struct rtsx_pcr *pcr)
case 0x5289:
rtl8411_init_params(pcr);
break;
+
+ case 0x5227:
+ rts5227_init_params(pcr);
+ break;
}
dev_dbg(&(pcr->pci->dev), "PID: 0x%04x, IC version: 0x%02x\n",
@@ -1030,6 +1066,10 @@ static int rtsx_pci_probe(struct pci_dev *pcidev,
pci_name(pcidev), (int)pcidev->vendor, (int)pcidev->device,
(int)pcidev->revision);
+ ret = pci_set_dma_mask(pcidev, DMA_BIT_MASK(32));
+ if (ret < 0)
+ return ret;
+
ret = pci_enable_device(pcidev);
if (ret)
return ret;
@@ -1051,15 +1091,14 @@ static int rtsx_pci_probe(struct pci_dev *pcidev,
}
handle->pcr = pcr;
- if (!idr_pre_get(&rtsx_pci_idr, GFP_KERNEL)) {
- ret = -ENOMEM;
- goto free_handle;
- }
-
+ idr_preload(GFP_KERNEL);
spin_lock(&rtsx_pci_lock);
- ret = idr_get_new(&rtsx_pci_idr, pcr, &pcr->id);
+ ret = idr_alloc(&rtsx_pci_idr, pcr, 0, 0, GFP_NOWAIT);
+ if (ret >= 0)
+ pcr->id = ret;
spin_unlock(&rtsx_pci_lock);
- if (ret)
+ idr_preload_end();
+ if (ret < 0)
goto free_handle;
pcr->pci = pcidev;
diff --git a/drivers/mfd/rtsx_pcr.h b/drivers/mfd/rtsx_pcr.h
index 12462c1df1a9..2b3ab8a04823 100644
--- a/drivers/mfd/rtsx_pcr.h
+++ b/drivers/mfd/rtsx_pcr.h
@@ -25,8 +25,12 @@
#include <linux/mfd/rtsx_pci.h>
+#define MIN_DIV_N_PCR 80
+#define MAX_DIV_N_PCR 208
+
void rts5209_init_params(struct rtsx_pcr *pcr);
void rts5229_init_params(struct rtsx_pcr *pcr);
void rtl8411_init_params(struct rtsx_pcr *pcr);
+void rts5227_init_params(struct rtsx_pcr *pcr);
#endif
diff --git a/drivers/mfd/syscon.c b/drivers/mfd/syscon.c
index 3f10591ea94e..61aea6381cdf 100644
--- a/drivers/mfd/syscon.c
+++ b/drivers/mfd/syscon.c
@@ -20,6 +20,7 @@
#include <linux/of_platform.h>
#include <linux/platform_device.h>
#include <linux/regmap.h>
+#include <linux/mfd/syscon.h>
static struct platform_driver syscon_driver;
diff --git a/drivers/mfd/tps6507x.c b/drivers/mfd/tps6507x.c
index 409afa23d5dc..5ad4b772b097 100644
--- a/drivers/mfd/tps6507x.c
+++ b/drivers/mfd/tps6507x.c
@@ -19,6 +19,7 @@
#include <linux/init.h>
#include <linux/slab.h>
#include <linux/i2c.h>
+#include <linux/of_device.h>
#include <linux/mfd/core.h>
#include <linux/mfd/tps6507x.h>
@@ -116,11 +117,19 @@ static const struct i2c_device_id tps6507x_i2c_id[] = {
};
MODULE_DEVICE_TABLE(i2c, tps6507x_i2c_id);
+#ifdef CONFIG_OF
+static struct of_device_id tps6507x_of_match[] = {
+ {.compatible = "ti,tps6507x", },
+ {},
+};
+MODULE_DEVICE_TABLE(of, tps6507x_of_match);
+#endif
static struct i2c_driver tps6507x_i2c_driver = {
.driver = {
.name = "tps6507x",
.owner = THIS_MODULE,
+ .of_match_table = of_match_ptr(tps6507x_of_match),
},
.probe = tps6507x_i2c_probe,
.remove = tps6507x_i2c_remove,
diff --git a/drivers/mfd/tps65090.c b/drivers/mfd/tps65090.c
index 8d12a8e00d9c..98edb5be85c6 100644
--- a/drivers/mfd/tps65090.c
+++ b/drivers/mfd/tps65090.c
@@ -25,6 +25,8 @@
#include <linux/i2c.h>
#include <linux/mfd/core.h>
#include <linux/mfd/tps65090.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
#include <linux/err.h>
#define NUM_INT_REG 2
@@ -148,18 +150,31 @@ static const struct regmap_config tps65090_regmap_config = {
.volatile_reg = is_volatile_reg,
};
+#ifdef CONFIG_OF
+static const struct of_device_id tps65090_of_match[] = {
+ { .compatible = "ti,tps65090",},
+ {},
+};
+MODULE_DEVICE_TABLE(of, tps65090_of_match);
+#endif
+
static int tps65090_i2c_probe(struct i2c_client *client,
const struct i2c_device_id *id)
{
struct tps65090_platform_data *pdata = client->dev.platform_data;
+ int irq_base = 0;
struct tps65090 *tps65090;
int ret;
- if (!pdata) {
- dev_err(&client->dev, "tps65090 requires platform data\n");
+ if (!pdata && !client->dev.of_node) {
+ dev_err(&client->dev,
+ "tps65090 requires platform data or of_node\n");
return -EINVAL;
}
+ if (pdata)
+ irq_base = pdata->irq_base;
+
tps65090 = devm_kzalloc(&client->dev, sizeof(*tps65090), GFP_KERNEL);
if (!tps65090) {
dev_err(&client->dev, "mem alloc for tps65090 failed\n");
@@ -178,7 +193,7 @@ static int tps65090_i2c_probe(struct i2c_client *client,
if (client->irq) {
ret = regmap_add_irq_chip(tps65090->rmap, client->irq,
- IRQF_ONESHOT | IRQF_TRIGGER_LOW, pdata->irq_base,
+ IRQF_ONESHOT | IRQF_TRIGGER_LOW, irq_base,
&tps65090_irq_chip, &tps65090->irq_data);
if (ret) {
dev_err(&client->dev,
@@ -189,7 +204,7 @@ static int tps65090_i2c_probe(struct i2c_client *client,
ret = mfd_add_devices(tps65090->dev, -1, tps65090s,
ARRAY_SIZE(tps65090s), NULL,
- regmap_irq_chip_get_base(tps65090->irq_data), NULL);
+ 0, regmap_irq_get_domain(tps65090->irq_data));
if (ret) {
dev_err(&client->dev, "add mfd devices failed with err: %d\n",
ret);
@@ -215,28 +230,6 @@ static int tps65090_i2c_remove(struct i2c_client *client)
return 0;
}
-#ifdef CONFIG_PM_SLEEP
-static int tps65090_suspend(struct device *dev)
-{
- struct i2c_client *client = to_i2c_client(dev);
- if (client->irq)
- disable_irq(client->irq);
- return 0;
-}
-
-static int tps65090_resume(struct device *dev)
-{
- struct i2c_client *client = to_i2c_client(dev);
- if (client->irq)
- enable_irq(client->irq);
- return 0;
-}
-#endif
-
-static const struct dev_pm_ops tps65090_pm_ops = {
- SET_SYSTEM_SLEEP_PM_OPS(tps65090_suspend, tps65090_resume)
-};
-
static const struct i2c_device_id tps65090_id_table[] = {
{ "tps65090", 0 },
{ },
@@ -247,7 +240,7 @@ static struct i2c_driver tps65090_driver = {
.driver = {
.name = "tps65090",
.owner = THIS_MODULE,
- .pm = &tps65090_pm_ops,
+ .of_match_table = of_match_ptr(tps65090_of_match),
},
.probe = tps65090_i2c_probe,
.remove = tps65090_i2c_remove,
diff --git a/drivers/mfd/twl-core.c b/drivers/mfd/twl-core.c
index 4f3baadd0038..89ab4d970643 100644
--- a/drivers/mfd/twl-core.c
+++ b/drivers/mfd/twl-core.c
@@ -66,16 +66,6 @@
/* Triton Core internal information (BEGIN) */
-#define TWL_NUM_SLAVES 4
-
-#define SUB_CHIP_ID0 0
-#define SUB_CHIP_ID1 1
-#define SUB_CHIP_ID2 2
-#define SUB_CHIP_ID3 3
-#define SUB_CHIP_ID_INVAL 0xff
-
-#define TWL_MODULE_LAST TWL4030_MODULE_LAST
-
/* Base Address defns for twl4030_map[] */
/* subchip/slave 0 - USB ID */
@@ -94,10 +84,7 @@
#define TWL4030_BASEADD_MADC 0x0000
#define TWL4030_BASEADD_MAIN_CHARGE 0x0074
#define TWL4030_BASEADD_PRECHARGE 0x00AA
-#define TWL4030_BASEADD_PWM0 0x00F8
-#define TWL4030_BASEADD_PWM1 0x00FB
-#define TWL4030_BASEADD_PWMA 0x00EF
-#define TWL4030_BASEADD_PWMB 0x00F1
+#define TWL4030_BASEADD_PWM 0x00F8
#define TWL4030_BASEADD_KEYPAD 0x00D2
#define TWL5031_BASEADD_ACCESSORY 0x0074 /* Replaces Main Charge */
@@ -117,7 +104,7 @@
/* subchip/slave 0 0x48 - POWER */
#define TWL6030_BASEADD_RTC 0x0000
-#define TWL6030_BASEADD_MEM 0x0017
+#define TWL6030_BASEADD_SECURED_REG 0x0017
#define TWL6030_BASEADD_PM_MASTER 0x001F
#define TWL6030_BASEADD_PM_SLAVE_MISC 0x0030 /* PM_RECEIVER */
#define TWL6030_BASEADD_PM_MISC 0x00E2
@@ -132,6 +119,7 @@
#define TWL6030_BASEADD_PIH 0x00D0
#define TWL6030_BASEADD_CHARGER 0x00E0
#define TWL6025_BASEADD_CHARGER 0x00DA
+#define TWL6030_BASEADD_LED 0x00F4
/* subchip/slave 2 0x4A - DFT */
#define TWL6030_BASEADD_DIEID 0x00C0
@@ -153,33 +141,28 @@
/*----------------------------------------------------------------------*/
-/* is driver active, bound to a chip? */
-static bool inuse;
-
-/* TWL IDCODE Register value */
-static u32 twl_idcode;
-
-static unsigned int twl_id;
-unsigned int twl_rev(void)
-{
- return twl_id;
-}
-EXPORT_SYMBOL(twl_rev);
-
/* Structure for each TWL4030/TWL6030 Slave */
struct twl_client {
struct i2c_client *client;
struct regmap *regmap;
};
-static struct twl_client twl_modules[TWL_NUM_SLAVES];
-
/* mapping the module id to slave id and base address */
struct twl_mapping {
unsigned char sid; /* Slave ID */
unsigned char base; /* base address */
};
-static struct twl_mapping *twl_map;
+
+struct twl_private {
+ bool ready; /* The core driver is ready to be used */
+ u32 twl_idcode; /* TWL IDCODE Register value */
+ unsigned int twl_id;
+
+ struct twl_mapping *twl_map;
+ struct twl_client *twl_modules;
+};
+
+static struct twl_private *twl_priv;
static struct twl_mapping twl4030_map[] = {
/*
@@ -188,34 +171,33 @@ static struct twl_mapping twl4030_map[] = {
* so they continue to match the order in this table.
*/
+ /* Common IPs */
{ 0, TWL4030_BASEADD_USB },
+ { 1, TWL4030_BASEADD_PIH },
+ { 2, TWL4030_BASEADD_MAIN_CHARGE },
+ { 3, TWL4030_BASEADD_PM_MASTER },
+ { 3, TWL4030_BASEADD_PM_RECEIVER },
+
+ { 3, TWL4030_BASEADD_RTC },
+ { 2, TWL4030_BASEADD_PWM },
+ { 2, TWL4030_BASEADD_LED },
+ { 3, TWL4030_BASEADD_SECURED_REG },
+
+ /* TWL4030 specific IPs */
{ 1, TWL4030_BASEADD_AUDIO_VOICE },
{ 1, TWL4030_BASEADD_GPIO },
{ 1, TWL4030_BASEADD_INTBR },
- { 1, TWL4030_BASEADD_PIH },
-
{ 1, TWL4030_BASEADD_TEST },
{ 2, TWL4030_BASEADD_KEYPAD },
+
{ 2, TWL4030_BASEADD_MADC },
{ 2, TWL4030_BASEADD_INTERRUPTS },
- { 2, TWL4030_BASEADD_LED },
-
- { 2, TWL4030_BASEADD_MAIN_CHARGE },
{ 2, TWL4030_BASEADD_PRECHARGE },
- { 2, TWL4030_BASEADD_PWM0 },
- { 2, TWL4030_BASEADD_PWM1 },
- { 2, TWL4030_BASEADD_PWMA },
-
- { 2, TWL4030_BASEADD_PWMB },
- { 2, TWL5031_BASEADD_ACCESSORY },
- { 2, TWL5031_BASEADD_INTERRUPTS },
{ 3, TWL4030_BASEADD_BACKUP },
{ 3, TWL4030_BASEADD_INT },
- { 3, TWL4030_BASEADD_PM_MASTER },
- { 3, TWL4030_BASEADD_PM_RECEIVER },
- { 3, TWL4030_BASEADD_RTC },
- { 3, TWL4030_BASEADD_SECURED_REG },
+ { 2, TWL5031_BASEADD_ACCESSORY },
+ { 2, TWL5031_BASEADD_INTERRUPTS },
};
static struct regmap_config twl4030_regmap_config[4] = {
@@ -251,35 +233,25 @@ static struct twl_mapping twl6030_map[] = {
* <linux/i2c/twl.h> defines for TWL4030_MODULE_*
* so they continue to match the order in this table.
*/
- { SUB_CHIP_ID1, TWL6030_BASEADD_USB },
- { SUB_CHIP_ID_INVAL, TWL6030_BASEADD_AUDIO },
- { SUB_CHIP_ID2, TWL6030_BASEADD_DIEID },
- { SUB_CHIP_ID2, TWL6030_BASEADD_RSV },
- { SUB_CHIP_ID1, TWL6030_BASEADD_PIH },
-
- { SUB_CHIP_ID2, TWL6030_BASEADD_RSV },
- { SUB_CHIP_ID2, TWL6030_BASEADD_RSV },
- { SUB_CHIP_ID1, TWL6030_BASEADD_GPADC_CTRL },
- { SUB_CHIP_ID2, TWL6030_BASEADD_RSV },
- { SUB_CHIP_ID2, TWL6030_BASEADD_RSV },
-
- { SUB_CHIP_ID1, TWL6030_BASEADD_CHARGER },
- { SUB_CHIP_ID1, TWL6030_BASEADD_GASGAUGE },
- { SUB_CHIP_ID1, TWL6030_BASEADD_PWM },
- { SUB_CHIP_ID0, TWL6030_BASEADD_ZERO },
- { SUB_CHIP_ID1, TWL6030_BASEADD_ZERO },
-
- { SUB_CHIP_ID2, TWL6030_BASEADD_ZERO },
- { SUB_CHIP_ID2, TWL6030_BASEADD_ZERO },
- { SUB_CHIP_ID2, TWL6030_BASEADD_RSV },
- { SUB_CHIP_ID2, TWL6030_BASEADD_RSV },
- { SUB_CHIP_ID2, TWL6030_BASEADD_RSV },
-
- { SUB_CHIP_ID0, TWL6030_BASEADD_PM_MASTER },
- { SUB_CHIP_ID0, TWL6030_BASEADD_PM_SLAVE_MISC },
- { SUB_CHIP_ID0, TWL6030_BASEADD_RTC },
- { SUB_CHIP_ID0, TWL6030_BASEADD_MEM },
- { SUB_CHIP_ID1, TWL6025_BASEADD_CHARGER },
+
+ /* Common IPs */
+ { 1, TWL6030_BASEADD_USB },
+ { 1, TWL6030_BASEADD_PIH },
+ { 1, TWL6030_BASEADD_CHARGER },
+ { 0, TWL6030_BASEADD_PM_MASTER },
+ { 0, TWL6030_BASEADD_PM_SLAVE_MISC },
+
+ { 0, TWL6030_BASEADD_RTC },
+ { 1, TWL6030_BASEADD_PWM },
+ { 1, TWL6030_BASEADD_LED },
+ { 0, TWL6030_BASEADD_SECURED_REG },
+
+ /* TWL6030 specific IPs */
+ { 0, TWL6030_BASEADD_ZERO },
+ { 1, TWL6030_BASEADD_ZERO },
+ { 2, TWL6030_BASEADD_ZERO },
+ { 1, TWL6030_BASEADD_GPADC_CTRL },
+ { 1, TWL6030_BASEADD_GASGAUGE },
};
static struct regmap_config twl6030_regmap_config[3] = {
@@ -305,8 +277,30 @@ static struct regmap_config twl6030_regmap_config[3] = {
/*----------------------------------------------------------------------*/
+static inline int twl_get_num_slaves(void)
+{
+ if (twl_class_is_4030())
+ return 4; /* TWL4030 class have four slave address */
+ else
+ return 3; /* TWL6030 class have three slave address */
+}
+
+static inline int twl_get_last_module(void)
+{
+ if (twl_class_is_4030())
+ return TWL4030_MODULE_LAST;
+ else
+ return TWL6030_MODULE_LAST;
+}
+
/* Exported Functions */
+unsigned int twl_rev(void)
+{
+ return twl_priv ? twl_priv->twl_id : 0;
+}
+EXPORT_SYMBOL(twl_rev);
+
/**
* twl_i2c_write - Writes a n bit register in TWL4030/TWL5030/TWL60X0
* @mod_no: module number
@@ -314,9 +308,6 @@ static struct regmap_config twl6030_regmap_config[3] = {
* @reg: register address (just offset will do)
* @num_bytes: number of bytes to transfer
*
- * IMPORTANT: for 'value' parameter: Allocate value num_bytes+1 and
- * valid data starts at Offset 1.
- *
* Returns the result of operation - 0 is success
*/
int twl_i2c_write(u8 mod_no, u8 *value, u8 reg, unsigned num_bytes)
@@ -325,24 +316,21 @@ int twl_i2c_write(u8 mod_no, u8 *value, u8 reg, unsigned num_bytes)
int sid;
struct twl_client *twl;
- if (unlikely(mod_no >= TWL_MODULE_LAST)) {
- pr_err("%s: invalid module number %d\n", DRIVER_NAME, mod_no);
- return -EPERM;
- }
- if (unlikely(!inuse)) {
+ if (unlikely(!twl_priv || !twl_priv->ready)) {
pr_err("%s: not initialized\n", DRIVER_NAME);
return -EPERM;
}
- sid = twl_map[mod_no].sid;
- if (unlikely(sid == SUB_CHIP_ID_INVAL)) {
- pr_err("%s: module %d is not part of the pmic\n",
- DRIVER_NAME, mod_no);
- return -EINVAL;
+ if (unlikely(mod_no >= twl_get_last_module())) {
+ pr_err("%s: invalid module number %d\n", DRIVER_NAME, mod_no);
+ return -EPERM;
}
- twl = &twl_modules[sid];
- ret = regmap_bulk_write(twl->regmap, twl_map[mod_no].base + reg,
- value, num_bytes);
+ sid = twl_priv->twl_map[mod_no].sid;
+ twl = &twl_priv->twl_modules[sid];
+
+ ret = regmap_bulk_write(twl->regmap,
+ twl_priv->twl_map[mod_no].base + reg, value,
+ num_bytes);
if (ret)
pr_err("%s: Write failed (mod %d, reg 0x%02x count %d)\n",
@@ -367,24 +355,21 @@ int twl_i2c_read(u8 mod_no, u8 *value, u8 reg, unsigned num_bytes)
int sid;
struct twl_client *twl;
- if (unlikely(mod_no >= TWL_MODULE_LAST)) {
- pr_err("%s: invalid module number %d\n", DRIVER_NAME, mod_no);
- return -EPERM;
- }
- if (unlikely(!inuse)) {
+ if (unlikely(!twl_priv || !twl_priv->ready)) {
pr_err("%s: not initialized\n", DRIVER_NAME);
return -EPERM;
}
- sid = twl_map[mod_no].sid;
- if (unlikely(sid == SUB_CHIP_ID_INVAL)) {
- pr_err("%s: module %d is not part of the pmic\n",
- DRIVER_NAME, mod_no);
- return -EINVAL;
+ if (unlikely(mod_no >= twl_get_last_module())) {
+ pr_err("%s: invalid module number %d\n", DRIVER_NAME, mod_no);
+ return -EPERM;
}
- twl = &twl_modules[sid];
- ret = regmap_bulk_read(twl->regmap, twl_map[mod_no].base + reg,
- value, num_bytes);
+ sid = twl_priv->twl_map[mod_no].sid;
+ twl = &twl_priv->twl_modules[sid];
+
+ ret = regmap_bulk_read(twl->regmap,
+ twl_priv->twl_map[mod_no].base + reg, value,
+ num_bytes);
if (ret)
pr_err("%s: Read failed (mod %d, reg 0x%02x count %d)\n",
@@ -394,34 +379,6 @@ int twl_i2c_read(u8 mod_no, u8 *value, u8 reg, unsigned num_bytes)
}
EXPORT_SYMBOL(twl_i2c_read);
-/**
- * twl_i2c_write_u8 - Writes a 8 bit register in TWL4030/TWL5030/TWL60X0
- * @mod_no: module number
- * @value: the value to be written 8 bit
- * @reg: register address (just offset will do)
- *
- * Returns result of operation - 0 is success
- */
-int twl_i2c_write_u8(u8 mod_no, u8 value, u8 reg)
-{
- return twl_i2c_write(mod_no, &value, reg, 1);
-}
-EXPORT_SYMBOL(twl_i2c_write_u8);
-
-/**
- * twl_i2c_read_u8 - Reads a 8 bit register from TWL4030/TWL5030/TWL60X0
- * @mod_no: module number
- * @value: the value read 8 bit
- * @reg: register address (just offset will do)
- *
- * Returns result of operation - 0 is success
- */
-int twl_i2c_read_u8(u8 mod_no, u8 *value, u8 reg)
-{
- return twl_i2c_read(mod_no, value, reg, 1);
-}
-EXPORT_SYMBOL(twl_i2c_read_u8);
-
/*----------------------------------------------------------------------*/
/**
@@ -440,7 +397,7 @@ static int twl_read_idcode_register(void)
goto fail;
}
- err = twl_i2c_read(TWL4030_MODULE_INTBR, (u8 *)(&twl_idcode),
+ err = twl_i2c_read(TWL4030_MODULE_INTBR, (u8 *)(&twl_priv->twl_idcode),
REG_IDCODE_7_0, 4);
if (err) {
pr_err("TWL4030: unable to read IDCODE -%d\n", err);
@@ -461,7 +418,7 @@ fail:
*/
int twl_get_type(void)
{
- return TWL_SIL_TYPE(twl_idcode);
+ return TWL_SIL_TYPE(twl_priv->twl_idcode);
}
EXPORT_SYMBOL_GPL(twl_get_type);
@@ -472,7 +429,7 @@ EXPORT_SYMBOL_GPL(twl_get_type);
*/
int twl_get_version(void)
{
- return TWL_SIL_REV(twl_idcode);
+ return TWL_SIL_REV(twl_priv->twl_idcode);
}
EXPORT_SYMBOL_GPL(twl_get_version);
@@ -509,13 +466,20 @@ int twl_get_hfclk_rate(void)
EXPORT_SYMBOL_GPL(twl_get_hfclk_rate);
static struct device *
-add_numbered_child(unsigned chip, const char *name, int num,
+add_numbered_child(unsigned mod_no, const char *name, int num,
void *pdata, unsigned pdata_len,
bool can_wakeup, int irq0, int irq1)
{
struct platform_device *pdev;
- struct twl_client *twl = &twl_modules[chip];
- int status;
+ struct twl_client *twl;
+ int status, sid;
+
+ if (unlikely(mod_no >= twl_get_last_module())) {
+ pr_err("%s: invalid module number %d\n", DRIVER_NAME, mod_no);
+ return ERR_PTR(-EPERM);
+ }
+ sid = twl_priv->twl_map[mod_no].sid;
+ twl = &twl_priv->twl_modules[sid];
pdev = platform_device_alloc(name, num);
if (!pdev) {
@@ -560,11 +524,11 @@ err:
return &pdev->dev;
}
-static inline struct device *add_child(unsigned chip, const char *name,
+static inline struct device *add_child(unsigned mod_no, const char *name,
void *pdata, unsigned pdata_len,
bool can_wakeup, int irq0, int irq1)
{
- return add_numbered_child(chip, name, -1, pdata, pdata_len,
+ return add_numbered_child(mod_no, name, -1, pdata, pdata_len,
can_wakeup, irq0, irq1);
}
@@ -573,7 +537,6 @@ add_regulator_linked(int num, struct regulator_init_data *pdata,
struct regulator_consumer_supply *consumers,
unsigned num_consumers, unsigned long features)
{
- unsigned sub_chip_id;
struct twl_regulator_driver_data drv_data;
/* regulator framework demands init_data ... */
@@ -600,8 +563,7 @@ add_regulator_linked(int num, struct regulator_init_data *pdata,
}
/* NOTE: we currently ignore regulator IRQs, e.g. for short circuits */
- sub_chip_id = twl_map[TWL_MODULE_PM_MASTER].sid;
- return add_numbered_child(sub_chip_id, "twl_reg", num,
+ return add_numbered_child(TWL_MODULE_PM_MASTER, "twl_reg", num,
pdata, sizeof(*pdata), false, 0, 0);
}
@@ -623,10 +585,9 @@ add_children(struct twl4030_platform_data *pdata, unsigned irq_base,
unsigned long features)
{
struct device *child;
- unsigned sub_chip_id;
if (IS_ENABLED(CONFIG_GPIO_TWL4030) && pdata->gpio) {
- child = add_child(SUB_CHIP_ID1, "twl4030_gpio",
+ child = add_child(TWL4030_MODULE_GPIO, "twl4030_gpio",
pdata->gpio, sizeof(*pdata->gpio),
false, irq_base + GPIO_INTR_OFFSET, 0);
if (IS_ERR(child))
@@ -634,7 +595,7 @@ add_children(struct twl4030_platform_data *pdata, unsigned irq_base,
}
if (IS_ENABLED(CONFIG_KEYBOARD_TWL4030) && pdata->keypad) {
- child = add_child(SUB_CHIP_ID2, "twl4030_keypad",
+ child = add_child(TWL4030_MODULE_KEYPAD, "twl4030_keypad",
pdata->keypad, sizeof(*pdata->keypad),
true, irq_base + KEYPAD_INTR_OFFSET, 0);
if (IS_ERR(child))
@@ -643,7 +604,7 @@ add_children(struct twl4030_platform_data *pdata, unsigned irq_base,
if (IS_ENABLED(CONFIG_TWL4030_MADC) && pdata->madc &&
twl_class_is_4030()) {
- child = add_child(SUB_CHIP_ID2, "twl4030_madc",
+ child = add_child(TWL4030_MODULE_MADC, "twl4030_madc",
pdata->madc, sizeof(*pdata->madc),
true, irq_base + MADC_INTR_OFFSET, 0);
if (IS_ERR(child))
@@ -658,22 +619,21 @@ add_children(struct twl4030_platform_data *pdata, unsigned irq_base,
* Eventually, Linux might become more aware of such
* HW security concerns, and "least privilege".
*/
- sub_chip_id = twl_map[TWL_MODULE_RTC].sid;
- child = add_child(sub_chip_id, "twl_rtc", NULL, 0,
+ child = add_child(TWL_MODULE_RTC, "twl_rtc", NULL, 0,
true, irq_base + RTC_INTR_OFFSET, 0);
if (IS_ERR(child))
return PTR_ERR(child);
}
if (IS_ENABLED(CONFIG_PWM_TWL)) {
- child = add_child(SUB_CHIP_ID1, "twl-pwm", NULL, 0,
+ child = add_child(TWL_MODULE_PWM, "twl-pwm", NULL, 0,
false, 0, 0);
if (IS_ERR(child))
return PTR_ERR(child);
}
if (IS_ENABLED(CONFIG_PWM_TWL_LED)) {
- child = add_child(SUB_CHIP_ID1, "twl-pwmled", NULL, 0,
+ child = add_child(TWL_MODULE_LED, "twl-pwmled", NULL, 0,
false, 0, 0);
if (IS_ERR(child))
return PTR_ERR(child);
@@ -725,7 +685,7 @@ add_children(struct twl4030_platform_data *pdata, unsigned irq_base,
}
- child = add_child(SUB_CHIP_ID0, "twl4030_usb",
+ child = add_child(TWL_MODULE_USB, "twl4030_usb",
pdata->usb, sizeof(*pdata->usb), true,
/* irq0 = USB_PRES, irq1 = USB */
irq_base + USB_PRES_INTR_OFFSET,
@@ -774,7 +734,7 @@ add_children(struct twl4030_platform_data *pdata, unsigned irq_base,
pdata->usb->features = features;
- child = add_child(SUB_CHIP_ID0, "twl6030_usb",
+ child = add_child(TWL_MODULE_USB, "twl6030_usb",
pdata->usb, sizeof(*pdata->usb), true,
/* irq1 = VBUS_PRES, irq0 = USB ID */
irq_base + USBOTG_INTR_OFFSET,
@@ -799,22 +759,22 @@ add_children(struct twl4030_platform_data *pdata, unsigned irq_base,
}
if (IS_ENABLED(CONFIG_TWL4030_WATCHDOG) && twl_class_is_4030()) {
- child = add_child(SUB_CHIP_ID3, "twl4030_wdt", NULL, 0,
- false, 0, 0);
+ child = add_child(TWL_MODULE_PM_RECEIVER, "twl4030_wdt", NULL,
+ 0, false, 0, 0);
if (IS_ERR(child))
return PTR_ERR(child);
}
if (IS_ENABLED(CONFIG_INPUT_TWL4030_PWRBUTTON) && twl_class_is_4030()) {
- child = add_child(SUB_CHIP_ID3, "twl4030_pwrbutton", NULL, 0,
- true, irq_base + 8 + 0, 0);
+ child = add_child(TWL_MODULE_PM_MASTER, "twl4030_pwrbutton",
+ NULL, 0, true, irq_base + 8 + 0, 0);
if (IS_ERR(child))
return PTR_ERR(child);
}
if (IS_ENABLED(CONFIG_MFD_TWL4030_AUDIO) && pdata->audio &&
twl_class_is_4030()) {
- child = add_child(SUB_CHIP_ID1, "twl4030-audio",
+ child = add_child(TWL4030_MODULE_AUDIO_VOICE, "twl4030-audio",
pdata->audio, sizeof(*pdata->audio),
false, 0, 0);
if (IS_ERR(child))
@@ -1054,7 +1014,7 @@ add_children(struct twl4030_platform_data *pdata, unsigned irq_base,
if (IS_ENABLED(CONFIG_CHARGER_TWL4030) && pdata->bci &&
!(features & (TPS_SUBSET | TWL5031))) {
- child = add_child(SUB_CHIP_ID3, "twl4030_bci",
+ child = add_child(TWL_MODULE_MAIN_CHARGE, "twl4030_bci",
pdata->bci, sizeof(*pdata->bci), false,
/* irq0 = CHG_PRES, irq1 = BCI */
irq_base + BCI_PRES_INTR_OFFSET,
@@ -1145,25 +1105,23 @@ static int twl_remove(struct i2c_client *client)
unsigned i, num_slaves;
int status;
- if (twl_class_is_4030()) {
+ if (twl_class_is_4030())
status = twl4030_exit_irq();
- num_slaves = TWL_NUM_SLAVES;
- } else {
+ else
status = twl6030_exit_irq();
- num_slaves = TWL_NUM_SLAVES - 1;
- }
if (status < 0)
return status;
+ num_slaves = twl_get_num_slaves();
for (i = 0; i < num_slaves; i++) {
- struct twl_client *twl = &twl_modules[i];
+ struct twl_client *twl = &twl_priv->twl_modules[i];
if (twl->client && twl->client != client)
i2c_unregister_device(twl->client);
- twl_modules[i].client = NULL;
+ twl->client = NULL;
}
- inuse = false;
+ twl_priv->ready = false;
return 0;
}
@@ -1179,6 +1137,17 @@ twl_probe(struct i2c_client *client, const struct i2c_device_id *id)
int status;
unsigned i, num_slaves;
+ if (!node && !pdata) {
+ dev_err(&client->dev, "no platform data\n");
+ return -EINVAL;
+ }
+
+ if (twl_priv) {
+ dev_dbg(&client->dev, "only one instance of %s allowed\n",
+ DRIVER_NAME);
+ return -EBUSY;
+ }
+
pdev = platform_device_alloc(DRIVER_NAME, -1);
if (!pdev) {
dev_err(&client->dev, "can't alloc pdev\n");
@@ -1191,54 +1160,44 @@ twl_probe(struct i2c_client *client, const struct i2c_device_id *id)
return status;
}
- if (node && !pdata) {
- /*
- * XXX: Temporary pdata until the information is correctly
- * retrieved by every TWL modules from DT.
- */
- pdata = devm_kzalloc(&client->dev,
- sizeof(struct twl4030_platform_data),
- GFP_KERNEL);
- if (!pdata) {
- status = -ENOMEM;
- goto free;
- }
- }
-
- if (!pdata) {
- dev_dbg(&client->dev, "no platform data?\n");
- status = -EINVAL;
- goto free;
- }
-
- platform_set_drvdata(pdev, pdata);
-
if (i2c_check_functionality(client->adapter, I2C_FUNC_I2C) == 0) {
dev_dbg(&client->dev, "can't talk I2C?\n");
status = -EIO;
goto free;
}
- if (inuse) {
- dev_dbg(&client->dev, "driver is already in use\n");
- status = -EBUSY;
+ twl_priv = devm_kzalloc(&client->dev, sizeof(struct twl_private),
+ GFP_KERNEL);
+ if (!twl_priv) {
+ status = -ENOMEM;
goto free;
}
if ((id->driver_data) & TWL6030_CLASS) {
- twl_id = TWL6030_CLASS_ID;
- twl_map = &twl6030_map[0];
+ twl_priv->twl_id = TWL6030_CLASS_ID;
+ twl_priv->twl_map = &twl6030_map[0];
+ /* The charger base address is different in twl6025 */
+ if ((id->driver_data) & TWL6025_SUBCLASS)
+ twl_priv->twl_map[TWL_MODULE_MAIN_CHARGE].base =
+ TWL6025_BASEADD_CHARGER;
twl_regmap_config = twl6030_regmap_config;
- num_slaves = TWL_NUM_SLAVES - 1;
} else {
- twl_id = TWL4030_CLASS_ID;
- twl_map = &twl4030_map[0];
+ twl_priv->twl_id = TWL4030_CLASS_ID;
+ twl_priv->twl_map = &twl4030_map[0];
twl_regmap_config = twl4030_regmap_config;
- num_slaves = TWL_NUM_SLAVES;
+ }
+
+ num_slaves = twl_get_num_slaves();
+ twl_priv->twl_modules = devm_kzalloc(&client->dev,
+ sizeof(struct twl_client) * num_slaves,
+ GFP_KERNEL);
+ if (!twl_priv->twl_modules) {
+ status = -ENOMEM;
+ goto free;
}
for (i = 0; i < num_slaves; i++) {
- struct twl_client *twl = &twl_modules[i];
+ struct twl_client *twl = &twl_priv->twl_modules[i];
if (i == 0) {
twl->client = client;
@@ -1264,19 +1223,19 @@ twl_probe(struct i2c_client *client, const struct i2c_device_id *id)
}
}
- inuse = true;
+ twl_priv->ready = true;
/* setup clock framework */
- clocks_init(&pdev->dev, pdata->clock);
+ clocks_init(&pdev->dev, pdata ? pdata->clock : NULL);
/* read TWL IDCODE Register */
- if (twl_id == TWL4030_CLASS_ID) {
+ if (twl_class_is_4030()) {
status = twl_read_idcode_register();
WARN(status < 0, "Error: reading twl_idcode register value\n");
}
/* load power event scripts */
- if (IS_ENABLED(CONFIG_TWL4030_POWER) && pdata->power)
+ if (IS_ENABLED(CONFIG_TWL4030_POWER) && pdata && pdata->power)
twl4030_power_init(pdata->power);
/* Maybe init the T2 Interrupt subsystem */
@@ -1308,10 +1267,9 @@ twl_probe(struct i2c_client *client, const struct i2c_device_id *id)
twl_i2c_write_u8(TWL4030_MODULE_INTBR, temp, REG_GPPUPDCTR1);
}
- status = -ENODEV;
if (node)
status = of_platform_populate(node, NULL, NULL, &client->dev);
- if (status)
+ else
status = add_children(pdata, irq_base, id->driver_data);
fail:
diff --git a/drivers/mfd/vexpress-sysreg.c b/drivers/mfd/vexpress-sysreg.c
index 77048b18439e..bf75e967a1f3 100644
--- a/drivers/mfd/vexpress-sysreg.c
+++ b/drivers/mfd/vexpress-sysreg.c
@@ -49,6 +49,8 @@
#define SYS_ID_HBI_SHIFT 16
#define SYS_PROCIDx_HBI_SHIFT 0
+#define SYS_LED_LED(n) (1 << (n))
+
#define SYS_MCI_CARDIN (1 << 0)
#define SYS_MCI_WPROT (1 << 1)
@@ -313,7 +315,7 @@ static void vexpress_sysreg_config_complete(unsigned long data)
}
-void __init vexpress_sysreg_setup(struct device_node *node)
+void vexpress_sysreg_setup(struct device_node *node)
{
if (WARN_ON(!vexpress_sysreg_base))
return;
@@ -336,34 +338,40 @@ void __init vexpress_sysreg_early_init(void __iomem *base)
void __init vexpress_sysreg_of_early_init(void)
{
- struct device_node *node = of_find_compatible_node(NULL, NULL,
- "arm,vexpress-sysreg");
+ struct device_node *node;
+
+ if (vexpress_sysreg_base)
+ return;
+ node = of_find_compatible_node(NULL, NULL, "arm,vexpress-sysreg");
if (node) {
vexpress_sysreg_base = of_iomap(node, 0);
vexpress_sysreg_setup(node);
- } else {
- pr_info("vexpress-sysreg: No Device Tree node found.");
}
}
+#define VEXPRESS_SYSREG_GPIO(_name, _reg, _value) \
+ [VEXPRESS_GPIO_##_name] = { \
+ .reg = _reg, \
+ .value = _reg##_##_value, \
+ }
+
static struct vexpress_sysreg_gpio {
unsigned long reg;
u32 value;
} vexpress_sysreg_gpios[] = {
- [VEXPRESS_GPIO_MMC_CARDIN] = {
- .reg = SYS_MCI,
- .value = SYS_MCI_CARDIN,
- },
- [VEXPRESS_GPIO_MMC_WPROT] = {
- .reg = SYS_MCI,
- .value = SYS_MCI_WPROT,
- },
- [VEXPRESS_GPIO_FLASH_WPn] = {
- .reg = SYS_FLASH,
- .value = SYS_FLASH_WPn,
- },
+ VEXPRESS_SYSREG_GPIO(MMC_CARDIN, SYS_MCI, CARDIN),
+ VEXPRESS_SYSREG_GPIO(MMC_WPROT, SYS_MCI, WPROT),
+ VEXPRESS_SYSREG_GPIO(FLASH_WPn, SYS_FLASH, WPn),
+ VEXPRESS_SYSREG_GPIO(LED0, SYS_LED, LED(0)),
+ VEXPRESS_SYSREG_GPIO(LED1, SYS_LED, LED(1)),
+ VEXPRESS_SYSREG_GPIO(LED2, SYS_LED, LED(2)),
+ VEXPRESS_SYSREG_GPIO(LED3, SYS_LED, LED(3)),
+ VEXPRESS_SYSREG_GPIO(LED4, SYS_LED, LED(4)),
+ VEXPRESS_SYSREG_GPIO(LED5, SYS_LED, LED(5)),
+ VEXPRESS_SYSREG_GPIO(LED6, SYS_LED, LED(6)),
+ VEXPRESS_SYSREG_GPIO(LED7, SYS_LED, LED(7)),
};
static int vexpress_sysreg_gpio_direction_input(struct gpio_chip *chip,
@@ -372,12 +380,6 @@ static int vexpress_sysreg_gpio_direction_input(struct gpio_chip *chip,
return 0;
}
-static int vexpress_sysreg_gpio_direction_output(struct gpio_chip *chip,
- unsigned offset, int value)
-{
- return 0;
-}
-
static int vexpress_sysreg_gpio_get(struct gpio_chip *chip,
unsigned offset)
{
@@ -401,6 +403,14 @@ static void vexpress_sysreg_gpio_set(struct gpio_chip *chip,
writel(reg_value, vexpress_sysreg_base + gpio->reg);
}
+static int vexpress_sysreg_gpio_direction_output(struct gpio_chip *chip,
+ unsigned offset, int value)
+{
+ vexpress_sysreg_gpio_set(chip, offset, value);
+
+ return 0;
+}
+
static struct gpio_chip vexpress_sysreg_gpio_chip = {
.label = "vexpress-sysreg",
.direction_input = vexpress_sysreg_gpio_direction_input,
@@ -412,6 +422,30 @@ static struct gpio_chip vexpress_sysreg_gpio_chip = {
};
+#define VEXPRESS_SYSREG_GREEN_LED(_name, _default_trigger, _gpio) \
+ { \
+ .name = "v2m:green:"_name, \
+ .default_trigger = _default_trigger, \
+ .gpio = VEXPRESS_GPIO_##_gpio, \
+ }
+
+struct gpio_led vexpress_sysreg_leds[] = {
+ VEXPRESS_SYSREG_GREEN_LED("user1", "heartbeat", LED0),
+ VEXPRESS_SYSREG_GREEN_LED("user2", "mmc0", LED1),
+ VEXPRESS_SYSREG_GREEN_LED("user3", "cpu0", LED2),
+ VEXPRESS_SYSREG_GREEN_LED("user4", "cpu1", LED3),
+ VEXPRESS_SYSREG_GREEN_LED("user5", "cpu2", LED4),
+ VEXPRESS_SYSREG_GREEN_LED("user6", "cpu3", LED5),
+ VEXPRESS_SYSREG_GREEN_LED("user7", "cpu4", LED6),
+ VEXPRESS_SYSREG_GREEN_LED("user8", "cpu5", LED7),
+};
+
+struct gpio_led_platform_data vexpress_sysreg_leds_pdata = {
+ .num_leds = ARRAY_SIZE(vexpress_sysreg_leds),
+ .leds = vexpress_sysreg_leds,
+};
+
+
static ssize_t vexpress_sysreg_sys_id_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
@@ -456,6 +490,10 @@ static int vexpress_sysreg_probe(struct platform_device *pdev)
return err;
}
+ platform_device_register_data(vexpress_sysreg_dev, "leds-gpio",
+ PLATFORM_DEVID_AUTO, &vexpress_sysreg_leds_pdata,
+ sizeof(vexpress_sysreg_leds_pdata));
+
vexpress_sysreg_dev = &pdev->dev;
device_create_file(vexpress_sysreg_dev, &dev_attr_sys_id);
@@ -478,6 +516,7 @@ static struct platform_driver vexpress_sysreg_driver = {
static int __init vexpress_sysreg_init(void)
{
+ vexpress_sysreg_of_early_init();
return platform_driver_register(&vexpress_sysreg_driver);
}
core_initcall(vexpress_sysreg_init);
diff --git a/drivers/mfd/wm5102-tables.c b/drivers/mfd/wm5102-tables.c
index f6fcb87b3504..a433f580aa4c 100644
--- a/drivers/mfd/wm5102-tables.c
+++ b/drivers/mfd/wm5102-tables.c
@@ -59,12 +59,13 @@ static const struct reg_default wm5102_reva_patch[] = {
static const struct reg_default wm5102_revb_patch[] = {
{ 0x80, 0x0003 },
{ 0x081, 0xE022 },
- { 0x410, 0x6080 },
- { 0x418, 0x6080 },
- { 0x420, 0x6080 },
+ { 0x410, 0x4080 },
+ { 0x418, 0x4080 },
+ { 0x420, 0x4080 },
{ 0x428, 0xC000 },
- { 0x441, 0x8014 },
+ { 0x4B0, 0x0066 },
{ 0x458, 0x000b },
+ { 0x212, 0x0000 },
{ 0x80, 0x0000 },
};
@@ -84,6 +85,12 @@ int wm5102_patch(struct arizona *arizona)
}
static const struct regmap_irq wm5102_aod_irqs[ARIZONA_NUM_IRQ] = {
+ [ARIZONA_IRQ_MICD_CLAMP_FALL] = {
+ .mask = ARIZONA_MICD_CLAMP_FALL_EINT1
+ },
+ [ARIZONA_IRQ_MICD_CLAMP_RISE] = {
+ .mask = ARIZONA_MICD_CLAMP_RISE_EINT1
+ },
[ARIZONA_IRQ_GP5_FALL] = { .mask = ARIZONA_GP5_FALL_EINT1 },
[ARIZONA_IRQ_GP5_RISE] = { .mask = ARIZONA_GP5_RISE_EINT1 },
[ARIZONA_IRQ_JD_FALL] = { .mask = ARIZONA_JD1_FALL_EINT1 },
@@ -225,11 +232,9 @@ const struct regmap_irq_chip wm5102_irq = {
static const struct reg_default wm5102_reg_default[] = {
{ 0x00000008, 0x0019 }, /* R8 - Ctrl IF SPI CFG 1 */
{ 0x00000009, 0x0001 }, /* R9 - Ctrl IF I2C1 CFG 1 */
- { 0x0000000D, 0x0000 }, /* R13 - Ctrl IF Status 1 */
{ 0x00000016, 0x0000 }, /* R22 - Write Sequencer Ctrl 0 */
{ 0x00000017, 0x0000 }, /* R23 - Write Sequencer Ctrl 1 */
{ 0x00000018, 0x0000 }, /* R24 - Write Sequencer Ctrl 2 */
- { 0x0000001A, 0x0000 }, /* R26 - Write Sequencer PROM */
{ 0x00000020, 0x0000 }, /* R32 - Tone Generator 1 */
{ 0x00000021, 0x1000 }, /* R33 - Tone Generator 2 */
{ 0x00000022, 0x0000 }, /* R34 - Tone Generator 3 */
@@ -244,12 +249,14 @@ static const struct reg_default wm5102_reg_default[] = {
{ 0x00000062, 0x01FF }, /* R98 - Sample Rate Sequence Select 2 */
{ 0x00000063, 0x01FF }, /* R99 - Sample Rate Sequence Select 3 */
{ 0x00000064, 0x01FF }, /* R100 - Sample Rate Sequence Select 4 */
- { 0x00000068, 0x01FF }, /* R104 - Always On Triggers Sequence Select 1 */
- { 0x00000069, 0x01FF }, /* R105 - Always On Triggers Sequence Select 2 */
- { 0x0000006A, 0x01FF }, /* R106 - Always On Triggers Sequence Select 3 */
- { 0x0000006B, 0x01FF }, /* R107 - Always On Triggers Sequence Select 4 */
- { 0x0000006C, 0x01FF }, /* R108 - Always On Triggers Sequence Select 5 */
- { 0x0000006D, 0x01FF }, /* R109 - Always On Triggers Sequence Select 6 */
+ { 0x00000066, 0x01FF }, /* R102 - Always On Triggers Sequence Select 1 */
+ { 0x00000067, 0x01FF }, /* R103 - Always On Triggers Sequence Select 2 */
+ { 0x00000068, 0x01FF }, /* R104 - Always On Triggers Sequence Select 3 */
+ { 0x00000069, 0x01FF }, /* R105 - Always On Triggers Sequence Select 4 */
+ { 0x0000006A, 0x01FF }, /* R106 - Always On Triggers Sequence Select 5 */
+ { 0x0000006B, 0x01FF }, /* R107 - Always On Triggers Sequence Select 6 */
+ { 0x0000006E, 0x01FF }, /* R110 - Trigger Sequence Select 32 */
+ { 0x0000006F, 0x01FF }, /* R111 - Trigger Sequence Select 33 */
{ 0x00000070, 0x0000 }, /* R112 - Comfort Noise Generator */
{ 0x00000090, 0x0000 }, /* R144 - Haptics Control 1 */
{ 0x00000091, 0x7FFF }, /* R145 - Haptics Control 2 */
@@ -259,13 +266,14 @@ static const struct reg_default wm5102_reg_default[] = {
{ 0x00000095, 0x0000 }, /* R149 - Haptics phase 2 duration */
{ 0x00000096, 0x0000 }, /* R150 - Haptics phase 3 intensity */
{ 0x00000097, 0x0000 }, /* R151 - Haptics phase 3 duration */
- { 0x00000100, 0x0001 }, /* R256 - Clock 32k 1 */
+ { 0x00000100, 0x0002 }, /* R256 - Clock 32k 1 */
{ 0x00000101, 0x0304 }, /* R257 - System Clock 1 */
{ 0x00000102, 0x0011 }, /* R258 - Sample rate 1 */
{ 0x00000103, 0x0011 }, /* R259 - Sample rate 2 */
{ 0x00000104, 0x0011 }, /* R260 - Sample rate 3 */
{ 0x00000112, 0x0305 }, /* R274 - Async clock 1 */
{ 0x00000113, 0x0011 }, /* R275 - Async sample rate 1 */
+ { 0x00000114, 0x0011 }, /* R276 - Async sample rate 2 */
{ 0x00000149, 0x0000 }, /* R329 - Output system clock */
{ 0x0000014A, 0x0000 }, /* R330 - Output async clock */
{ 0x00000152, 0x0000 }, /* R338 - Rate Estimator 1 */
@@ -274,13 +282,14 @@ static const struct reg_default wm5102_reg_default[] = {
{ 0x00000155, 0x0000 }, /* R341 - Rate Estimator 4 */
{ 0x00000156, 0x0000 }, /* R342 - Rate Estimator 5 */
{ 0x00000161, 0x0000 }, /* R353 - Dynamic Frequency Scaling 1 */
- { 0x00000171, 0x0000 }, /* R369 - FLL1 Control 1 */
+ { 0x00000171, 0x0002 }, /* R369 - FLL1 Control 1 */
{ 0x00000172, 0x0008 }, /* R370 - FLL1 Control 2 */
{ 0x00000173, 0x0018 }, /* R371 - FLL1 Control 3 */
{ 0x00000174, 0x007D }, /* R372 - FLL1 Control 4 */
{ 0x00000175, 0x0004 }, /* R373 - FLL1 Control 5 */
{ 0x00000176, 0x0000 }, /* R374 - FLL1 Control 6 */
{ 0x00000177, 0x0181 }, /* R375 - FLL1 Loop Filter Test 1 */
+ { 0x00000178, 0x0000 }, /* R376 - FLL1 NCO Test 0 */
{ 0x00000181, 0x0000 }, /* R385 - FLL1 Synchroniser 1 */
{ 0x00000182, 0x0000 }, /* R386 - FLL1 Synchroniser 2 */
{ 0x00000183, 0x0000 }, /* R387 - FLL1 Synchroniser 3 */
@@ -296,6 +305,7 @@ static const struct reg_default wm5102_reg_default[] = {
{ 0x00000195, 0x0004 }, /* R405 - FLL2 Control 5 */
{ 0x00000196, 0x0000 }, /* R406 - FLL2 Control 6 */
{ 0x00000197, 0x0000 }, /* R407 - FLL2 Loop Filter Test 1 */
+ { 0x00000198, 0x0000 }, /* R408 - FLL2 NCO Test 0 */
{ 0x000001A1, 0x0000 }, /* R417 - FLL2 Synchroniser 1 */
{ 0x000001A2, 0x0000 }, /* R418 - FLL2 Synchroniser 2 */
{ 0x000001A3, 0x0000 }, /* R419 - FLL2 Synchroniser 3 */
@@ -311,8 +321,13 @@ static const struct reg_default wm5102_reg_default[] = {
{ 0x00000218, 0x01A6 }, /* R536 - Mic Bias Ctrl 1 */
{ 0x00000219, 0x01A6 }, /* R537 - Mic Bias Ctrl 2 */
{ 0x0000021A, 0x01A6 }, /* R538 - Mic Bias Ctrl 3 */
+ { 0x00000225, 0x0400 }, /* R549 - HP Ctrl 1L */
+ { 0x00000226, 0x0400 }, /* R550 - HP Ctrl 1R */
{ 0x00000293, 0x0000 }, /* R659 - Accessory Detect Mode 1 */
{ 0x0000029B, 0x0020 }, /* R667 - Headphone Detect 1 */
+ { 0x0000029C, 0x0000 }, /* R668 - Headphone Detect 2 */
+ { 0x0000029F, 0x0000 }, /* R671 - Headphone Detect Test */
+ { 0x000002A2, 0x0000 }, /* R674 - Micd clamp control */
{ 0x000002A3, 0x1102 }, /* R675 - Mic Detect 1 */
{ 0x000002A4, 0x009F }, /* R676 - Mic Detect 2 */
{ 0x000002A5, 0x0000 }, /* R677 - Mic Detect 3 */
@@ -343,53 +358,44 @@ static const struct reg_default wm5102_reg_default[] = {
{ 0x00000400, 0x0000 }, /* R1024 - Output Enables 1 */
{ 0x00000408, 0x0000 }, /* R1032 - Output Rate 1 */
{ 0x00000409, 0x0022 }, /* R1033 - Output Volume Ramp */
- { 0x00000410, 0x0080 }, /* R1040 - Output Path Config 1L */
+ { 0x00000410, 0x4080 }, /* R1040 - Output Path Config 1L */
{ 0x00000411, 0x0180 }, /* R1041 - DAC Digital Volume 1L */
- { 0x00000412, 0x0080 }, /* R1042 - DAC Volume Limit 1L */
+ { 0x00000412, 0x0081 }, /* R1042 - DAC Volume Limit 1L */
{ 0x00000413, 0x0001 }, /* R1043 - Noise Gate Select 1L */
{ 0x00000414, 0x0080 }, /* R1044 - Output Path Config 1R */
{ 0x00000415, 0x0180 }, /* R1045 - DAC Digital Volume 1R */
- { 0x00000416, 0x0080 }, /* R1046 - DAC Volume Limit 1R */
+ { 0x00000416, 0x0081 }, /* R1046 - DAC Volume Limit 1R */
{ 0x00000417, 0x0002 }, /* R1047 - Noise Gate Select 1R */
- { 0x00000418, 0x0080 }, /* R1048 - Output Path Config 2L */
+ { 0x00000418, 0x4080 }, /* R1048 - Output Path Config 2L */
{ 0x00000419, 0x0180 }, /* R1049 - DAC Digital Volume 2L */
- { 0x0000041A, 0x0080 }, /* R1050 - DAC Volume Limit 2L */
+ { 0x0000041A, 0x0081 }, /* R1050 - DAC Volume Limit 2L */
{ 0x0000041B, 0x0004 }, /* R1051 - Noise Gate Select 2L */
{ 0x0000041C, 0x0080 }, /* R1052 - Output Path Config 2R */
{ 0x0000041D, 0x0180 }, /* R1053 - DAC Digital Volume 2R */
- { 0x0000041E, 0x0080 }, /* R1054 - DAC Volume Limit 2R */
+ { 0x0000041E, 0x0081 }, /* R1054 - DAC Volume Limit 2R */
{ 0x0000041F, 0x0008 }, /* R1055 - Noise Gate Select 2R */
- { 0x00000420, 0x0080 }, /* R1056 - Output Path Config 3L */
+ { 0x00000420, 0x4080 }, /* R1056 - Output Path Config 3L */
{ 0x00000421, 0x0180 }, /* R1057 - DAC Digital Volume 3L */
- { 0x00000422, 0x0080 }, /* R1058 - DAC Volume Limit 3L */
+ { 0x00000422, 0x0081 }, /* R1058 - DAC Volume Limit 3L */
{ 0x00000423, 0x0010 }, /* R1059 - Noise Gate Select 3L */
- { 0x00000424, 0x0080 }, /* R1060 - Output Path Config 3R */
- { 0x00000425, 0x0180 }, /* R1061 - DAC Digital Volume 3R */
- { 0x00000426, 0x0080 }, /* R1062 - DAC Volume Limit 3R */
- { 0x00000428, 0x0000 }, /* R1064 - Output Path Config 4L */
+ { 0x00000428, 0xC000 }, /* R1064 - Output Path Config 4L */
{ 0x00000429, 0x0180 }, /* R1065 - DAC Digital Volume 4L */
- { 0x0000042A, 0x0080 }, /* R1066 - Out Volume 4L */
+ { 0x0000042A, 0x0081 }, /* R1066 - Out Volume 4L */
{ 0x0000042B, 0x0040 }, /* R1067 - Noise Gate Select 4L */
- { 0x0000042C, 0x0000 }, /* R1068 - Output Path Config 4R */
{ 0x0000042D, 0x0180 }, /* R1069 - DAC Digital Volume 4R */
- { 0x0000042E, 0x0080 }, /* R1070 - Out Volume 4R */
+ { 0x0000042E, 0x0081 }, /* R1070 - Out Volume 4R */
{ 0x0000042F, 0x0080 }, /* R1071 - Noise Gate Select 4R */
{ 0x00000430, 0x0000 }, /* R1072 - Output Path Config 5L */
{ 0x00000431, 0x0180 }, /* R1073 - DAC Digital Volume 5L */
- { 0x00000432, 0x0080 }, /* R1074 - DAC Volume Limit 5L */
+ { 0x00000432, 0x0081 }, /* R1074 - DAC Volume Limit 5L */
{ 0x00000433, 0x0100 }, /* R1075 - Noise Gate Select 5L */
- { 0x00000434, 0x0000 }, /* R1076 - Output Path Config 5R */
{ 0x00000435, 0x0180 }, /* R1077 - DAC Digital Volume 5R */
- { 0x00000436, 0x0080 }, /* R1078 - DAC Volume Limit 5R */
- { 0x00000437, 0x0200 }, /* R1079 - Noise Gate Select 5R */
+ { 0x00000436, 0x0081 }, /* R1078 - DAC Volume Limit 5R */
+ { 0x00000437, 0x0200 }, /* R1079 - Noise Gate Select 5R */
{ 0x00000450, 0x0000 }, /* R1104 - DAC AEC Control 1 */
{ 0x00000458, 0x0001 }, /* R1112 - Noise Gate Control */
{ 0x00000490, 0x0069 }, /* R1168 - PDM SPK1 CTRL 1 */
{ 0x00000491, 0x0000 }, /* R1169 - PDM SPK1 CTRL 2 */
- { 0x000004DC, 0x0000 }, /* R1244 - DAC comp 1 */
- { 0x000004DD, 0x0000 }, /* R1245 - DAC comp 2 */
- { 0x000004DE, 0x0000 }, /* R1246 - DAC comp 3 */
- { 0x000004DF, 0x0000 }, /* R1247 - DAC comp 4 */
{ 0x00000500, 0x000C }, /* R1280 - AIF1 BCLK Ctrl */
{ 0x00000501, 0x0008 }, /* R1281 - AIF1 Tx Pin Ctrl */
{ 0x00000502, 0x0000 }, /* R1282 - AIF1 Rx Pin Ctrl */
@@ -417,7 +423,6 @@ static const struct reg_default wm5102_reg_default[] = {
{ 0x00000518, 0x0007 }, /* R1304 - AIF1 Frame Ctrl 18 */
{ 0x00000519, 0x0000 }, /* R1305 - AIF1 Tx Enables */
{ 0x0000051A, 0x0000 }, /* R1306 - AIF1 Rx Enables */
- { 0x0000051B, 0x0000 }, /* R1307 - AIF1 Force Write */
{ 0x00000540, 0x000C }, /* R1344 - AIF2 BCLK Ctrl */
{ 0x00000541, 0x0008 }, /* R1345 - AIF2 Tx Pin Ctrl */
{ 0x00000542, 0x0000 }, /* R1346 - AIF2 Rx Pin Ctrl */
@@ -433,7 +438,6 @@ static const struct reg_default wm5102_reg_default[] = {
{ 0x00000552, 0x0001 }, /* R1362 - AIF2 Frame Ctrl 12 */
{ 0x00000559, 0x0000 }, /* R1369 - AIF2 Tx Enables */
{ 0x0000055A, 0x0000 }, /* R1370 - AIF2 Rx Enables */
- { 0x0000055B, 0x0000 }, /* R1371 - AIF2 Force Write */
{ 0x00000580, 0x000C }, /* R1408 - AIF3 BCLK Ctrl */
{ 0x00000581, 0x0008 }, /* R1409 - AIF3 Tx Pin Ctrl */
{ 0x00000582, 0x0000 }, /* R1410 - AIF3 Rx Pin Ctrl */
@@ -449,7 +453,6 @@ static const struct reg_default wm5102_reg_default[] = {
{ 0x00000592, 0x0001 }, /* R1426 - AIF3 Frame Ctrl 12 */
{ 0x00000599, 0x0000 }, /* R1433 - AIF3 Tx Enables */
{ 0x0000059A, 0x0000 }, /* R1434 - AIF3 Rx Enables */
- { 0x0000059B, 0x0000 }, /* R1435 - AIF3 Force Write */
{ 0x000005E3, 0x0004 }, /* R1507 - SLIMbus Framer Ref Gear */
{ 0x000005E5, 0x0000 }, /* R1509 - SLIMbus Rates 1 */
{ 0x000005E6, 0x0000 }, /* R1510 - SLIMbus Rates 2 */
@@ -773,22 +776,6 @@ static const struct reg_default wm5102_reg_default[] = {
{ 0x000008CD, 0x0080 }, /* R2253 - DRC1RMIX Input 3 Volume */
{ 0x000008CE, 0x0000 }, /* R2254 - DRC1RMIX Input 4 Source */
{ 0x000008CF, 0x0080 }, /* R2255 - DRC1RMIX Input 4 Volume */
- { 0x000008D0, 0x0000 }, /* R2256 - DRC2LMIX Input 1 Source */
- { 0x000008D1, 0x0080 }, /* R2257 - DRC2LMIX Input 1 Volume */
- { 0x000008D2, 0x0000 }, /* R2258 - DRC2LMIX Input 2 Source */
- { 0x000008D3, 0x0080 }, /* R2259 - DRC2LMIX Input 2 Volume */
- { 0x000008D4, 0x0000 }, /* R2260 - DRC2LMIX Input 3 Source */
- { 0x000008D5, 0x0080 }, /* R2261 - DRC2LMIX Input 3 Volume */
- { 0x000008D6, 0x0000 }, /* R2262 - DRC2LMIX Input 4 Source */
- { 0x000008D7, 0x0080 }, /* R2263 - DRC2LMIX Input 4 Volume */
- { 0x000008D8, 0x0000 }, /* R2264 - DRC2RMIX Input 1 Source */
- { 0x000008D9, 0x0080 }, /* R2265 - DRC2RMIX Input 1 Volume */
- { 0x000008DA, 0x0000 }, /* R2266 - DRC2RMIX Input 2 Source */
- { 0x000008DB, 0x0080 }, /* R2267 - DRC2RMIX Input 2 Volume */
- { 0x000008DC, 0x0000 }, /* R2268 - DRC2RMIX Input 3 Source */
- { 0x000008DD, 0x0080 }, /* R2269 - DRC2RMIX Input 3 Volume */
- { 0x000008DE, 0x0000 }, /* R2270 - DRC2RMIX Input 4 Source */
- { 0x000008DF, 0x0080 }, /* R2271 - DRC2RMIX Input 4 Volume */
{ 0x00000900, 0x0000 }, /* R2304 - HPLP1MIX Input 1 Source */
{ 0x00000901, 0x0080 }, /* R2305 - HPLP1MIX Input 1 Volume */
{ 0x00000902, 0x0000 }, /* R2306 - HPLP1MIX Input 2 Source */
@@ -880,7 +867,7 @@ static const struct reg_default wm5102_reg_default[] = {
{ 0x00000D1B, 0xFFFF }, /* R3355 - IRQ2 Status 4 Mask */
{ 0x00000D1C, 0xFFFF }, /* R3356 - IRQ2 Status 5 Mask */
{ 0x00000D1F, 0x0000 }, /* R3359 - IRQ2 Control */
- { 0x00000D41, 0x0000 }, /* R3393 - ADSP2 IRQ0 */
+ { 0x00000D50, 0x0000 }, /* R3408 - AOD wkup and trig */
{ 0x00000D53, 0xFFFF }, /* R3411 - AOD IRQ Mask IRQ1 */
{ 0x00000D54, 0xFFFF }, /* R3412 - AOD IRQ Mask IRQ2 */
{ 0x00000D56, 0x0000 }, /* R3414 - Jack detect debounce */
@@ -975,11 +962,6 @@ static const struct reg_default wm5102_reg_default[] = {
{ 0x00000E82, 0x0018 }, /* R3714 - DRC1 ctrl3 */
{ 0x00000E83, 0x0000 }, /* R3715 - DRC1 ctrl4 */
{ 0x00000E84, 0x0000 }, /* R3716 - DRC1 ctrl5 */
- { 0x00000E89, 0x0018 }, /* R3721 - DRC2 ctrl1 */
- { 0x00000E8A, 0x0933 }, /* R3722 - DRC2 ctrl2 */
- { 0x00000E8B, 0x0018 }, /* R3723 - DRC2 ctrl3 */
- { 0x00000E8C, 0x0000 }, /* R3724 - DRC2 ctrl4 */
- { 0x00000E8D, 0x0000 }, /* R3725 - DRC2 ctrl5 */
{ 0x00000EC0, 0x0000 }, /* R3776 - HPLPF1_1 */
{ 0x00000EC1, 0x0000 }, /* R3777 - HPLPF1_2 */
{ 0x00000EC4, 0x0000 }, /* R3780 - HPLPF2_1 */
@@ -990,16 +972,12 @@ static const struct reg_default wm5102_reg_default[] = {
{ 0x00000ECD, 0x0000 }, /* R3789 - HPLPF4_2 */
{ 0x00000EE0, 0x0000 }, /* R3808 - ASRC_ENABLE */
{ 0x00000EE2, 0x0000 }, /* R3810 - ASRC_RATE1 */
- { 0x00000EE3, 0x4000 }, /* R3811 - ASRC_RATE2 */
{ 0x00000EF0, 0x0000 }, /* R3824 - ISRC 1 CTRL 1 */
{ 0x00000EF1, 0x0000 }, /* R3825 - ISRC 1 CTRL 2 */
{ 0x00000EF2, 0x0000 }, /* R3826 - ISRC 1 CTRL 3 */
{ 0x00000EF3, 0x0000 }, /* R3827 - ISRC 2 CTRL 1 */
{ 0x00000EF4, 0x0000 }, /* R3828 - ISRC 2 CTRL 2 */
{ 0x00000EF5, 0x0000 }, /* R3829 - ISRC 2 CTRL 3 */
- { 0x00000EF6, 0x0000 }, /* R3830 - ISRC 3 CTRL 1 */
- { 0x00000EF7, 0x0000 }, /* R3831 - ISRC 3 CTRL 2 */
- { 0x00000EF8, 0x0000 }, /* R3832 - ISRC 3 CTRL 3 */
{ 0x00001100, 0x0010 }, /* R4352 - DSP1 Control 1 */
{ 0x00001101, 0x0000 }, /* R4353 - DSP1 Clocking 1 */
};
@@ -1107,6 +1085,8 @@ static bool wm5102_readable_register(struct device *dev, unsigned int reg)
case ARIZONA_ACCESSORY_DETECT_MODE_1:
case ARIZONA_HEADPHONE_DETECT_1:
case ARIZONA_HEADPHONE_DETECT_2:
+ case ARIZONA_HP_DACVAL:
+ case ARIZONA_MICD_CLAMP_CONTROL:
case ARIZONA_MIC_DETECT_1:
case ARIZONA_MIC_DETECT_2:
case ARIZONA_MIC_DETECT_3:
@@ -1824,17 +1804,24 @@ static bool wm5102_readable_register(struct device *dev, unsigned int reg)
case ARIZONA_DSP1_STATUS_1:
case ARIZONA_DSP1_STATUS_2:
case ARIZONA_DSP1_STATUS_3:
+ case ARIZONA_DSP1_SCRATCH_0:
+ case ARIZONA_DSP1_SCRATCH_1:
+ case ARIZONA_DSP1_SCRATCH_2:
+ case ARIZONA_DSP1_SCRATCH_3:
return true;
default:
- return false;
+ if ((reg >= 0x100000 && reg < 0x106000) ||
+ (reg >= 0x180000 && reg < 0x180800) ||
+ (reg >= 0x190000 && reg < 0x194800) ||
+ (reg >= 0x1a8000 && reg < 0x1a9800))
+ return true;
+ else
+ return false;
}
}
static bool wm5102_volatile_register(struct device *dev, unsigned int reg)
{
- if (reg > 0xffff)
- return true;
-
switch (reg) {
case ARIZONA_SOFTWARE_RESET:
case ARIZONA_DEVICE_REVISION:
@@ -1875,11 +1862,22 @@ static bool wm5102_volatile_register(struct device *dev, unsigned int reg)
case ARIZONA_DSP1_STATUS_1:
case ARIZONA_DSP1_STATUS_2:
case ARIZONA_DSP1_STATUS_3:
+ case ARIZONA_DSP1_SCRATCH_0:
+ case ARIZONA_DSP1_SCRATCH_1:
+ case ARIZONA_DSP1_SCRATCH_2:
+ case ARIZONA_DSP1_SCRATCH_3:
case ARIZONA_HEADPHONE_DETECT_2:
+ case ARIZONA_HP_DACVAL:
case ARIZONA_MIC_DETECT_3:
return true;
default:
- return false;
+ if ((reg >= 0x100000 && reg < 0x106000) ||
+ (reg >= 0x180000 && reg < 0x180800) ||
+ (reg >= 0x190000 && reg < 0x194800) ||
+ (reg >= 0x1a8000 && reg < 0x1a9800))
+ return true;
+ else
+ return false;
}
}
diff --git a/drivers/mfd/wm8994-core.c b/drivers/mfd/wm8994-core.c
index 57c488d42d3e..803e93fae56a 100644
--- a/drivers/mfd/wm8994-core.c
+++ b/drivers/mfd/wm8994-core.c
@@ -467,7 +467,7 @@ static int wm8994_device_init(struct wm8994 *wm8994, int irq)
goto err;
}
- ret = regulator_bulk_get(wm8994->dev, wm8994->num_supplies,
+ ret = devm_regulator_bulk_get(wm8994->dev, wm8994->num_supplies,
wm8994->supplies);
if (ret != 0) {
dev_err(wm8994->dev, "Failed to get supplies: %d\n", ret);
@@ -478,7 +478,7 @@ static int wm8994_device_init(struct wm8994 *wm8994, int irq)
wm8994->supplies);
if (ret != 0) {
dev_err(wm8994->dev, "Failed to enable supplies: %d\n", ret);
- goto err_get;
+ goto err;
}
ret = wm8994_reg_read(wm8994, WM8994_SOFTWARE_RESET);
@@ -658,8 +658,6 @@ err_irq:
err_enable:
regulator_bulk_disable(wm8994->num_supplies,
wm8994->supplies);
-err_get:
- regulator_bulk_free(wm8994->num_supplies, wm8994->supplies);
err:
mfd_remove_devices(wm8994->dev);
return ret;
@@ -672,7 +670,6 @@ static void wm8994_device_exit(struct wm8994 *wm8994)
wm8994_irq_exit(wm8994);
regulator_bulk_disable(wm8994->num_supplies,
wm8994->supplies);
- regulator_bulk_free(wm8994->num_supplies, wm8994->supplies);
}
static const struct of_device_id wm8994_of_match[] = {
diff --git a/drivers/misc/Kconfig b/drivers/misc/Kconfig
index 8f59d88897e7..e83fdfe0c8ca 100644
--- a/drivers/misc/Kconfig
+++ b/drivers/misc/Kconfig
@@ -127,7 +127,7 @@ config PHANTOM
config INTEL_MID_PTI
tristate "Parallel Trace Interface for MIPI P1149.7 cJTAG standard"
- depends on PCI
+ depends on PCI && TTY
default n
help
The PTI (Parallel Trace Interface) driver directs
@@ -499,6 +499,17 @@ config USB_SWITCH_FSA9480
stereo and mono audio, video, microphone and UART data to use
a common connector port.
+config LATTICE_ECP3_CONFIG
+ tristate "Lattice ECP3 FPGA bitstream configuration via SPI"
+ depends on SPI && SYSFS
+ select FW_LOADER
+ default n
+ help
+ This option enables support for bitstream configuration (programming
+ or loading) of the Lattice ECP3 FPGA family via SPI.
+
+ If unsure, say N.
+
source "drivers/misc/c2port/Kconfig"
source "drivers/misc/eeprom/Kconfig"
source "drivers/misc/cb710/Kconfig"
@@ -507,4 +518,5 @@ source "drivers/misc/lis3lv02d/Kconfig"
source "drivers/misc/carma/Kconfig"
source "drivers/misc/altera-stapl/Kconfig"
source "drivers/misc/mei/Kconfig"
+source "drivers/misc/vmw_vmci/Kconfig"
endmenu
diff --git a/drivers/misc/Makefile b/drivers/misc/Makefile
index 2129377c0de6..35a1463c72d9 100644
--- a/drivers/misc/Makefile
+++ b/drivers/misc/Makefile
@@ -49,3 +49,6 @@ obj-y += carma/
obj-$(CONFIG_USB_SWITCH_FSA9480) += fsa9480.o
obj-$(CONFIG_ALTERA_STAPL) +=altera-stapl/
obj-$(CONFIG_INTEL_MEI) += mei/
+obj-$(CONFIG_MAX8997_MUIC) += max8997-muic.o
+obj-$(CONFIG_VMWARE_VMCI) += vmw_vmci/
+obj-$(CONFIG_LATTICE_ECP3_CONFIG) += lattice-ecp3-config.o
diff --git a/drivers/misc/c2port/core.c b/drivers/misc/c2port/core.c
index f428d86bfc10..f32550a74bdd 100644
--- a/drivers/misc/c2port/core.c
+++ b/drivers/misc/c2port/core.c
@@ -885,7 +885,7 @@ struct c2port_device *c2port_device_register(char *name,
struct c2port_ops *ops, void *devdata)
{
struct c2port_device *c2dev;
- int id, ret;
+ int ret;
if (unlikely(!ops) || unlikely(!ops->access) || \
unlikely(!ops->c2d_dir) || unlikely(!ops->c2ck_set) || \
@@ -897,22 +897,18 @@ struct c2port_device *c2port_device_register(char *name,
if (unlikely(!c2dev))
return ERR_PTR(-ENOMEM);
- ret = idr_pre_get(&c2port_idr, GFP_KERNEL);
- if (!ret) {
- ret = -ENOMEM;
- goto error_idr_get_new;
- }
-
+ idr_preload(GFP_KERNEL);
spin_lock_irq(&c2port_idr_lock);
- ret = idr_get_new(&c2port_idr, c2dev, &id);
+ ret = idr_alloc(&c2port_idr, c2dev, 0, 0, GFP_NOWAIT);
spin_unlock_irq(&c2port_idr_lock);
+ idr_preload_end();
if (ret < 0)
- goto error_idr_get_new;
- c2dev->id = id;
+ goto error_idr_alloc;
+ c2dev->id = ret;
c2dev->dev = device_create(c2port_class, NULL, 0, c2dev,
- "c2port%d", id);
+ "c2port%d", c2dev->id);
if (unlikely(IS_ERR(c2dev->dev))) {
ret = PTR_ERR(c2dev->dev);
goto error_device_create;
@@ -946,10 +942,10 @@ error_device_create_bin_file:
error_device_create:
spin_lock_irq(&c2port_idr_lock);
- idr_remove(&c2port_idr, id);
+ idr_remove(&c2port_idr, c2dev->id);
spin_unlock_irq(&c2port_idr_lock);
-error_idr_get_new:
+error_idr_alloc:
kfree(c2dev);
return ERR_PTR(ret);
diff --git a/drivers/misc/carma/carma-fpga-program.c b/drivers/misc/carma/carma-fpga-program.c
index eaddfe9db149..736c7714f565 100644
--- a/drivers/misc/carma/carma-fpga-program.c
+++ b/drivers/misc/carma/carma-fpga-program.c
@@ -546,7 +546,7 @@ static noinline int fpga_program_dma(struct fpga_dev *priv)
goto out_dma_unmap;
}
- dma_async_memcpy_issue_pending(chan);
+ dma_async_issue_pending(chan);
/* Set the total byte count */
fpga_set_byte_count(priv->regs, priv->bytes);
diff --git a/drivers/misc/carma/carma-fpga.c b/drivers/misc/carma/carma-fpga.c
index 8835eabb3b87..7508cafff103 100644
--- a/drivers/misc/carma/carma-fpga.c
+++ b/drivers/misc/carma/carma-fpga.c
@@ -631,6 +631,8 @@ static int data_submit_dma(struct fpga_device *priv, struct data_buf *buf)
struct dma_async_tx_descriptor *tx;
dma_cookie_t cookie;
dma_addr_t dst, src;
+ unsigned long dma_flags = DMA_COMPL_SKIP_DEST_UNMAP |
+ DMA_COMPL_SKIP_SRC_UNMAP;
dst_sg = buf->vb.sglist;
dst_nents = buf->vb.sglen;
@@ -666,7 +668,7 @@ static int data_submit_dma(struct fpga_device *priv, struct data_buf *buf)
src = SYS_FPGA_BLOCK;
tx = chan->device->device_prep_dma_memcpy(chan, dst, src,
REG_BLOCK_SIZE,
- 0);
+ dma_flags);
if (!tx) {
dev_err(priv->dev, "unable to prep SYS-FPGA DMA\n");
return -ENOMEM;
@@ -749,7 +751,7 @@ static irqreturn_t data_irq(int irq, void *dev_id)
submitted = true;
/* Start the DMA Engine */
- dma_async_memcpy_issue_pending(priv->chan);
+ dma_async_issue_pending(priv->chan);
out:
/* If no DMA was submitted, re-enable interrupts */
diff --git a/drivers/misc/cb710/Kconfig b/drivers/misc/cb710/Kconfig
index 22429b8b1068..5acb9c5b49c4 100644
--- a/drivers/misc/cb710/Kconfig
+++ b/drivers/misc/cb710/Kconfig
@@ -1,6 +1,6 @@
config CB710_CORE
tristate "ENE CB710/720 Flash memory card reader support"
- depends on PCI
+ depends on PCI && GENERIC_HARDIRQS
help
This option enables support for PCI ENE CB710/720 Flash memory card
reader found in some laptops (ie. some versions of HP Compaq nx9500).
diff --git a/drivers/misc/eeprom/Kconfig b/drivers/misc/eeprom/Kconfig
index c9e695ea7c9a..04f2e1fa9dd1 100644
--- a/drivers/misc/eeprom/Kconfig
+++ b/drivers/misc/eeprom/Kconfig
@@ -1,13 +1,14 @@
menu "EEPROM support"
config EEPROM_AT24
- tristate "I2C EEPROMs from most vendors"
+ tristate "I2C EEPROMs / RAMs / ROMs from most vendors"
depends on I2C && SYSFS
help
- Enable this driver to get read/write support to most I2C EEPROMs,
- after you configure the driver to know about each EEPROM on
- your target board. Use these generic chip names, instead of
- vendor-specific ones like at24c64 or 24lc02:
+ Enable this driver to get read/write support to most I2C EEPROMs
+ and compatible devices like FRAMs, SRAMs, ROMs etc. After you
+ configure the driver to know about each chip on your target
+ board. Use these generic chip names, instead of vendor-specific
+ ones like at24c64, 24lc02 or fm24c04:
24c00, 24c01, 24c02, spd (readonly 24c02), 24c04, 24c08,
24c16, 24c32, 24c64, 24c128, 24c256, 24c512, 24c1024
diff --git a/drivers/misc/kgdbts.c b/drivers/misc/kgdbts.c
index 3aa9a969b373..36f5d52775a9 100644
--- a/drivers/misc/kgdbts.c
+++ b/drivers/misc/kgdbts.c
@@ -103,6 +103,7 @@
#include <linux/delay.h>
#include <linux/kthread.h>
#include <linux/module.h>
+#include <asm/sections.h>
#define v1printk(a...) do { \
if (verbose) \
@@ -222,6 +223,7 @@ static unsigned long lookup_addr(char *arg)
addr = (unsigned long)do_fork;
else if (!strcmp(arg, "hw_break_val"))
addr = (unsigned long)&hw_break_val;
+ addr = (unsigned long) dereference_function_descriptor((void *)addr);
return addr;
}
diff --git a/drivers/misc/lattice-ecp3-config.c b/drivers/misc/lattice-ecp3-config.c
new file mode 100644
index 000000000000..155700bfd2b6
--- /dev/null
+++ b/drivers/misc/lattice-ecp3-config.c
@@ -0,0 +1,243 @@
+/*
+ * Copyright (C) 2012 Stefan Roese <sr@denx.de>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#include <linux/device.h>
+#include <linux/firmware.h>
+#include <linux/module.h>
+#include <linux/errno.h>
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/spi/spi.h>
+#include <linux/platform_device.h>
+#include <linux/delay.h>
+
+#define FIRMWARE_NAME "lattice-ecp3.bit"
+
+/*
+ * The JTAG ID's of the supported FPGA's. The ID is 32bit wide
+ * reversed as noted in the manual.
+ */
+#define ID_ECP3_17 0xc2088080
+#define ID_ECP3_35 0xc2048080
+
+/* FPGA commands */
+#define FPGA_CMD_READ_ID 0x07 /* plus 24 bits */
+#define FPGA_CMD_READ_STATUS 0x09 /* plus 24 bits */
+#define FPGA_CMD_CLEAR 0x70
+#define FPGA_CMD_REFRESH 0x71
+#define FPGA_CMD_WRITE_EN 0x4a /* plus 2 bits */
+#define FPGA_CMD_WRITE_DIS 0x4f /* plus 8 bits */
+#define FPGA_CMD_WRITE_INC 0x41 /* plus 0 bits */
+
+/*
+ * The status register is 32bit revered, DONE is bit 17 from the TN1222.pdf
+ * (LatticeECP3 Slave SPI Port User's Guide)
+ */
+#define FPGA_STATUS_DONE 0x00004000
+#define FPGA_STATUS_CLEARED 0x00010000
+
+#define FPGA_CLEAR_TIMEOUT 5000 /* max. 5000ms for FPGA clear */
+#define FPGA_CLEAR_MSLEEP 10
+#define FPGA_CLEAR_LOOP_COUNT (FPGA_CLEAR_TIMEOUT / FPGA_CLEAR_MSLEEP)
+
+struct fpga_data {
+ struct completion fw_loaded;
+};
+
+struct ecp3_dev {
+ u32 jedec_id;
+ char *name;
+};
+
+static const struct ecp3_dev ecp3_dev[] = {
+ {
+ .jedec_id = ID_ECP3_17,
+ .name = "Lattice ECP3-17",
+ },
+ {
+ .jedec_id = ID_ECP3_35,
+ .name = "Lattice ECP3-35",
+ },
+};
+
+static void firmware_load(const struct firmware *fw, void *context)
+{
+ struct spi_device *spi = (struct spi_device *)context;
+ struct fpga_data *data = dev_get_drvdata(&spi->dev);
+ u8 *buffer;
+ int ret;
+ u8 txbuf[8];
+ u8 rxbuf[8];
+ int rx_len = 8;
+ int i;
+ u32 jedec_id;
+ u32 status;
+
+ if (fw->size == 0) {
+ dev_err(&spi->dev, "Error: Firmware size is 0!\n");
+ return;
+ }
+
+ /* Fill dummy data (24 stuffing bits for commands) */
+ txbuf[1] = 0x00;
+ txbuf[2] = 0x00;
+ txbuf[3] = 0x00;
+
+ /* Trying to speak with the FPGA via SPI... */
+ txbuf[0] = FPGA_CMD_READ_ID;
+ ret = spi_write_then_read(spi, txbuf, 8, rxbuf, rx_len);
+ dev_dbg(&spi->dev, "FPGA JTAG ID=%08x\n", *(u32 *)&rxbuf[4]);
+ jedec_id = *(u32 *)&rxbuf[4];
+
+ for (i = 0; i < ARRAY_SIZE(ecp3_dev); i++) {
+ if (jedec_id == ecp3_dev[i].jedec_id)
+ break;
+ }
+ if (i == ARRAY_SIZE(ecp3_dev)) {
+ dev_err(&spi->dev,
+ "Error: No supported FPGA detected (JEDEC_ID=%08x)!\n",
+ jedec_id);
+ return;
+ }
+
+ dev_info(&spi->dev, "FPGA %s detected\n", ecp3_dev[i].name);
+
+ txbuf[0] = FPGA_CMD_READ_STATUS;
+ ret = spi_write_then_read(spi, txbuf, 8, rxbuf, rx_len);
+ dev_dbg(&spi->dev, "FPGA Status=%08x\n", *(u32 *)&rxbuf[4]);
+
+ buffer = kzalloc(fw->size + 8, GFP_KERNEL);
+ if (!buffer) {
+ dev_err(&spi->dev, "Error: Can't allocate memory!\n");
+ return;
+ }
+
+ /*
+ * Insert WRITE_INC command into stream (one SPI frame)
+ */
+ buffer[0] = FPGA_CMD_WRITE_INC;
+ buffer[1] = 0xff;
+ buffer[2] = 0xff;
+ buffer[3] = 0xff;
+ memcpy(buffer + 4, fw->data, fw->size);
+
+ txbuf[0] = FPGA_CMD_REFRESH;
+ ret = spi_write(spi, txbuf, 4);
+
+ txbuf[0] = FPGA_CMD_WRITE_EN;
+ ret = spi_write(spi, txbuf, 4);
+
+ txbuf[0] = FPGA_CMD_CLEAR;
+ ret = spi_write(spi, txbuf, 4);
+
+ /*
+ * Wait for FPGA memory to become cleared
+ */
+ for (i = 0; i < FPGA_CLEAR_LOOP_COUNT; i++) {
+ txbuf[0] = FPGA_CMD_READ_STATUS;
+ ret = spi_write_then_read(spi, txbuf, 8, rxbuf, rx_len);
+ status = *(u32 *)&rxbuf[4];
+ if (status == FPGA_STATUS_CLEARED)
+ break;
+
+ msleep(FPGA_CLEAR_MSLEEP);
+ }
+
+ if (i == FPGA_CLEAR_LOOP_COUNT) {
+ dev_err(&spi->dev,
+ "Error: Timeout waiting for FPGA to clear (status=%08x)!\n",
+ status);
+ kfree(buffer);
+ return;
+ }
+
+ dev_info(&spi->dev, "Configuring the FPGA...\n");
+ ret = spi_write(spi, buffer, fw->size + 8);
+
+ txbuf[0] = FPGA_CMD_WRITE_DIS;
+ ret = spi_write(spi, txbuf, 4);
+
+ txbuf[0] = FPGA_CMD_READ_STATUS;
+ ret = spi_write_then_read(spi, txbuf, 8, rxbuf, rx_len);
+ dev_dbg(&spi->dev, "FPGA Status=%08x\n", *(u32 *)&rxbuf[4]);
+ status = *(u32 *)&rxbuf[4];
+
+ /* Check result */
+ if (status & FPGA_STATUS_DONE)
+ dev_info(&spi->dev, "FPGA succesfully configured!\n");
+ else
+ dev_info(&spi->dev, "FPGA not configured (DONE not set)\n");
+
+ /*
+ * Don't forget to release the firmware again
+ */
+ release_firmware(fw);
+
+ kfree(buffer);
+
+ complete(&data->fw_loaded);
+}
+
+static int lattice_ecp3_probe(struct spi_device *spi)
+{
+ struct fpga_data *data;
+ int err;
+
+ data = devm_kzalloc(&spi->dev, sizeof(*data), GFP_KERNEL);
+ if (!data) {
+ dev_err(&spi->dev, "Memory allocation for fpga_data failed\n");
+ return -ENOMEM;
+ }
+ spi_set_drvdata(spi, data);
+
+ init_completion(&data->fw_loaded);
+ err = request_firmware_nowait(THIS_MODULE, FW_ACTION_NOHOTPLUG,
+ FIRMWARE_NAME, &spi->dev,
+ GFP_KERNEL, spi, firmware_load);
+ if (err) {
+ dev_err(&spi->dev, "Firmware loading failed with %d!\n", err);
+ return err;
+ }
+
+ dev_info(&spi->dev, "FPGA bitstream configuration driver registered\n");
+
+ return 0;
+}
+
+static int lattice_ecp3_remove(struct spi_device *spi)
+{
+ struct fpga_data *data = spi_get_drvdata(spi);
+
+ wait_for_completion(&data->fw_loaded);
+
+ return 0;
+}
+
+static const struct spi_device_id lattice_ecp3_id[] = {
+ { "ecp3-17", 0 },
+ { "ecp3-35", 0 },
+ { }
+};
+MODULE_DEVICE_TABLE(spi, lattice_ecp3_id);
+
+static struct spi_driver lattice_ecp3_driver = {
+ .driver = {
+ .name = "lattice-ecp3",
+ .owner = THIS_MODULE,
+ },
+ .probe = lattice_ecp3_probe,
+ .remove = lattice_ecp3_remove,
+ .id_table = lattice_ecp3_id,
+};
+
+module_spi_driver(lattice_ecp3_driver);
+
+MODULE_AUTHOR("Stefan Roese <sr@denx.de>");
+MODULE_DESCRIPTION("Lattice ECP3 FPGA configuration via SPI");
+MODULE_LICENSE("GPL");
diff --git a/drivers/misc/mei/Kconfig b/drivers/misc/mei/Kconfig
index 5a79ccde2fdf..d21b4d006a55 100644
--- a/drivers/misc/mei/Kconfig
+++ b/drivers/misc/mei/Kconfig
@@ -1,11 +1,22 @@
config INTEL_MEI
- tristate "Intel Management Engine Interface (Intel MEI)"
+ tristate "Intel Management Engine Interface"
depends on X86 && PCI && WATCHDOG_CORE
help
The Intel Management Engine (Intel ME) provides Manageability,
Security and Media services for system containing Intel chipsets.
if selected /dev/mei misc device will be created.
+ For more information see
+ <http://software.intel.com/en-us/manageability/>
+
+config INTEL_MEI_ME
+ bool "ME Enabled Intel Chipsets"
+ depends on INTEL_MEI
+ depends on X86 && PCI && WATCHDOG_CORE
+ default y
+ help
+ MEI support for ME Enabled Intel chipsets.
+
Supported Chipsets are:
7 Series Chipset Family
6 Series Chipset Family
@@ -24,5 +35,3 @@ config INTEL_MEI
82Q33 Express
82X38/X48 Express
- For more information see
- <http://software.intel.com/en-us/manageability/>
diff --git a/drivers/misc/mei/Makefile b/drivers/misc/mei/Makefile
index 0017842e166c..040af6c7b147 100644
--- a/drivers/misc/mei/Makefile
+++ b/drivers/misc/mei/Makefile
@@ -4,9 +4,11 @@
#
obj-$(CONFIG_INTEL_MEI) += mei.o
mei-objs := init.o
+mei-objs += hbm.o
mei-objs += interrupt.o
-mei-objs += interface.o
-mei-objs += iorw.o
+mei-objs += client.o
mei-objs += main.o
mei-objs += amthif.o
mei-objs += wd.o
+mei-$(CONFIG_INTEL_MEI_ME) += pci-me.o
+mei-$(CONFIG_INTEL_MEI_ME) += hw-me.o
diff --git a/drivers/misc/mei/amthif.c b/drivers/misc/mei/amthif.c
index e40ffd9502d1..c86d7e3839a4 100644
--- a/drivers/misc/mei/amthif.c
+++ b/drivers/misc/mei/amthif.c
@@ -31,15 +31,16 @@
#include <linux/jiffies.h>
#include <linux/uaccess.h>
+#include <linux/mei.h>
#include "mei_dev.h"
-#include "hw.h"
-#include <linux/mei.h>
-#include "interface.h"
+#include "hbm.h"
+#include "hw-me.h"
+#include "client.h"
-const uuid_le mei_amthi_guid = UUID_LE(0x12f80028, 0xb4b7, 0x4b2d, 0xac,
- 0xa8, 0x46, 0xe0, 0xff, 0x65,
- 0x81, 0x4c);
+const uuid_le mei_amthif_guid = UUID_LE(0x12f80028, 0xb4b7, 0x4b2d,
+ 0xac, 0xa8, 0x46, 0xe0,
+ 0xff, 0x65, 0x81, 0x4c);
/**
* mei_amthif_reset_params - initializes mei device iamthif
@@ -64,22 +65,24 @@ void mei_amthif_reset_params(struct mei_device *dev)
* @dev: the device structure
*
*/
-void mei_amthif_host_init(struct mei_device *dev)
+int mei_amthif_host_init(struct mei_device *dev)
{
- int i;
+ struct mei_cl *cl = &dev->iamthif_cl;
unsigned char *msg_buf;
+ int ret, i;
+
+ dev->iamthif_state = MEI_IAMTHIF_IDLE;
- mei_cl_init(&dev->iamthif_cl, dev);
- dev->iamthif_cl.state = MEI_FILE_DISCONNECTED;
+ mei_cl_init(cl, dev);
- /* find ME amthi client */
- i = mei_me_cl_link(dev, &dev->iamthif_cl,
- &mei_amthi_guid, MEI_IAMTHIF_HOST_CLIENT_ID);
+ i = mei_me_cl_by_uuid(dev, &mei_amthif_guid);
if (i < 0) {
- dev_info(&dev->pdev->dev, "failed to find iamthif client.\n");
- return;
+ dev_info(&dev->pdev->dev, "amthif: failed to find the client\n");
+ return -ENOENT;
}
+ cl->me_client_id = dev->me_clients[i].client_id;
+
/* Assign iamthif_mtu to the value received from ME */
dev->iamthif_mtu = dev->me_clients[i].props.max_msg_length;
@@ -93,19 +96,29 @@ void mei_amthif_host_init(struct mei_device *dev)
msg_buf = kcalloc(dev->iamthif_mtu,
sizeof(unsigned char), GFP_KERNEL);
if (!msg_buf) {
- dev_dbg(&dev->pdev->dev, "memory allocation for ME message buffer failed.\n");
- return;
+ dev_err(&dev->pdev->dev, "amthif: memory allocation for ME message buffer failed.\n");
+ return -ENOMEM;
}
dev->iamthif_msg_buf = msg_buf;
- if (mei_connect(dev, &dev->iamthif_cl)) {
- dev_dbg(&dev->pdev->dev, "Failed to connect to AMTHI client\n");
- dev->iamthif_cl.state = MEI_FILE_DISCONNECTED;
- dev->iamthif_cl.host_client_id = 0;
+ ret = mei_cl_link(cl, MEI_IAMTHIF_HOST_CLIENT_ID);
+
+ if (ret < 0) {
+ dev_err(&dev->pdev->dev, "amthif: failed link client\n");
+ return -ENOENT;
+ }
+
+ cl->state = MEI_FILE_CONNECTING;
+
+ if (mei_hbm_cl_connect_req(dev, cl)) {
+ dev_dbg(&dev->pdev->dev, "amthif: Failed to connect to ME client\n");
+ cl->state = MEI_FILE_DISCONNECTED;
+ cl->host_client_id = 0;
} else {
- dev->iamthif_cl.timer_count = MEI_CONNECT_TIMEOUT;
+ cl->timer_count = MEI_CONNECT_TIMEOUT;
}
+ return 0;
}
/**
@@ -168,10 +181,10 @@ int mei_amthif_read(struct mei_device *dev, struct file *file,
i = mei_me_cl_by_id(dev, dev->iamthif_cl.me_client_id);
if (i < 0) {
- dev_dbg(&dev->pdev->dev, "amthi client not found.\n");
+ dev_dbg(&dev->pdev->dev, "amthif client not found.\n");
return -ENODEV;
}
- dev_dbg(&dev->pdev->dev, "checking amthi data\n");
+ dev_dbg(&dev->pdev->dev, "checking amthif data\n");
cb = mei_amthif_find_read_list_entry(dev, file);
/* Check for if we can block or not*/
@@ -179,7 +192,7 @@ int mei_amthif_read(struct mei_device *dev, struct file *file,
return -EAGAIN;
- dev_dbg(&dev->pdev->dev, "waiting for amthi data\n");
+ dev_dbg(&dev->pdev->dev, "waiting for amthif data\n");
while (cb == NULL) {
/* unlock the Mutex */
mutex_unlock(&dev->device_lock);
@@ -197,17 +210,17 @@ int mei_amthif_read(struct mei_device *dev, struct file *file,
}
- dev_dbg(&dev->pdev->dev, "Got amthi data\n");
+ dev_dbg(&dev->pdev->dev, "Got amthif data\n");
dev->iamthif_timer = 0;
if (cb) {
timeout = cb->read_time +
mei_secs_to_jiffies(MEI_IAMTHIF_READ_TIMER);
- dev_dbg(&dev->pdev->dev, "amthi timeout = %lud\n",
+ dev_dbg(&dev->pdev->dev, "amthif timeout = %lud\n",
timeout);
if (time_after(jiffies, timeout)) {
- dev_dbg(&dev->pdev->dev, "amthi Time out\n");
+ dev_dbg(&dev->pdev->dev, "amthif Time out\n");
/* 15 sec for the message has expired */
list_del(&cb->list);
rets = -ETIMEDOUT;
@@ -227,9 +240,9 @@ int mei_amthif_read(struct mei_device *dev, struct file *file,
* remove message from deletion list
*/
- dev_dbg(&dev->pdev->dev, "amthi cb->response_buffer size - %d\n",
+ dev_dbg(&dev->pdev->dev, "amthif cb->response_buffer size - %d\n",
cb->response_buffer.size);
- dev_dbg(&dev->pdev->dev, "amthi cb->buf_idx - %lu\n", cb->buf_idx);
+ dev_dbg(&dev->pdev->dev, "amthif cb->buf_idx - %lu\n", cb->buf_idx);
/* length is being turncated to PAGE_SIZE, however,
* the buf_idx may point beyond */
@@ -245,7 +258,7 @@ int mei_amthif_read(struct mei_device *dev, struct file *file,
}
}
free:
- dev_dbg(&dev->pdev->dev, "free amthi cb memory.\n");
+ dev_dbg(&dev->pdev->dev, "free amthif cb memory.\n");
*offset = 0;
mei_io_cb_free(cb);
out:
@@ -269,7 +282,7 @@ static int mei_amthif_send_cmd(struct mei_device *dev, struct mei_cl_cb *cb)
if (!dev || !cb)
return -ENODEV;
- dev_dbg(&dev->pdev->dev, "write data to amthi client.\n");
+ dev_dbg(&dev->pdev->dev, "write data to amthif client.\n");
dev->iamthif_state = MEI_IAMTHIF_WRITING;
dev->iamthif_current_cb = cb;
@@ -280,15 +293,15 @@ static int mei_amthif_send_cmd(struct mei_device *dev, struct mei_cl_cb *cb)
memcpy(dev->iamthif_msg_buf, cb->request_buffer.data,
cb->request_buffer.size);
- ret = mei_flow_ctrl_creds(dev, &dev->iamthif_cl);
+ ret = mei_cl_flow_ctrl_creds(&dev->iamthif_cl);
if (ret < 0)
return ret;
- if (ret && dev->mei_host_buffer_is_empty) {
+ if (ret && dev->hbuf_is_ready) {
ret = 0;
- dev->mei_host_buffer_is_empty = false;
- if (cb->request_buffer.size > mei_hbuf_max_data(dev)) {
- mei_hdr.length = mei_hbuf_max_data(dev);
+ dev->hbuf_is_ready = false;
+ if (cb->request_buffer.size > mei_hbuf_max_len(dev)) {
+ mei_hdr.length = mei_hbuf_max_len(dev);
mei_hdr.msg_complete = 0;
} else {
mei_hdr.length = cb->request_buffer.size;
@@ -300,25 +313,24 @@ static int mei_amthif_send_cmd(struct mei_device *dev, struct mei_cl_cb *cb)
mei_hdr.reserved = 0;
dev->iamthif_msg_buf_index += mei_hdr.length;
if (mei_write_message(dev, &mei_hdr,
- (unsigned char *)(dev->iamthif_msg_buf),
- mei_hdr.length))
+ (unsigned char *)dev->iamthif_msg_buf))
return -ENODEV;
if (mei_hdr.msg_complete) {
- if (mei_flow_ctrl_reduce(dev, &dev->iamthif_cl))
+ if (mei_cl_flow_ctrl_reduce(&dev->iamthif_cl))
return -ENODEV;
dev->iamthif_flow_control_pending = true;
dev->iamthif_state = MEI_IAMTHIF_FLOW_CONTROL;
- dev_dbg(&dev->pdev->dev, "add amthi cb to write waiting list\n");
+ dev_dbg(&dev->pdev->dev, "add amthif cb to write waiting list\n");
dev->iamthif_current_cb = cb;
dev->iamthif_file_object = cb->file_object;
list_add_tail(&cb->list, &dev->write_waiting_list.list);
} else {
- dev_dbg(&dev->pdev->dev, "message does not complete, so add amthi cb to write list.\n");
+ dev_dbg(&dev->pdev->dev, "message does not complete, so add amthif cb to write list.\n");
list_add_tail(&cb->list, &dev->write_list.list);
}
} else {
- if (!(dev->mei_host_buffer_is_empty))
+ if (!dev->hbuf_is_ready)
dev_dbg(&dev->pdev->dev, "host buffer is not empty");
dev_dbg(&dev->pdev->dev, "No flow control credentials, so add iamthif cb to write list.\n");
@@ -383,7 +395,7 @@ void mei_amthif_run_next_cmd(struct mei_device *dev)
dev->iamthif_timer = 0;
dev->iamthif_file_object = NULL;
- dev_dbg(&dev->pdev->dev, "complete amthi cmd_list cb.\n");
+ dev_dbg(&dev->pdev->dev, "complete amthif cmd_list cb.\n");
list_for_each_entry_safe(pos, next, &dev->amthif_cmd_list.list, list) {
list_del(&pos->list);
@@ -392,7 +404,7 @@ void mei_amthif_run_next_cmd(struct mei_device *dev)
status = mei_amthif_send_cmd(dev, pos);
if (status) {
dev_dbg(&dev->pdev->dev,
- "amthi write failed status = %d\n",
+ "amthif write failed status = %d\n",
status);
return;
}
@@ -412,7 +424,7 @@ unsigned int mei_amthif_poll(struct mei_device *dev,
if (dev->iamthif_state == MEI_IAMTHIF_READ_COMPLETE &&
dev->iamthif_file_object == file) {
mask |= (POLLIN | POLLRDNORM);
- dev_dbg(&dev->pdev->dev, "run next amthi cb\n");
+ dev_dbg(&dev->pdev->dev, "run next amthif cb\n");
mei_amthif_run_next_cmd(dev);
}
return mask;
@@ -434,54 +446,51 @@ unsigned int mei_amthif_poll(struct mei_device *dev,
int mei_amthif_irq_write_complete(struct mei_device *dev, s32 *slots,
struct mei_cl_cb *cb, struct mei_cl_cb *cmpl_list)
{
- struct mei_msg_hdr *mei_hdr;
+ struct mei_msg_hdr mei_hdr;
struct mei_cl *cl = cb->cl;
size_t len = dev->iamthif_msg_buf_size - dev->iamthif_msg_buf_index;
size_t msg_slots = mei_data2slots(len);
- mei_hdr = (struct mei_msg_hdr *)&dev->wr_msg_buf[0];
- mei_hdr->host_addr = cl->host_client_id;
- mei_hdr->me_addr = cl->me_client_id;
- mei_hdr->reserved = 0;
+ mei_hdr.host_addr = cl->host_client_id;
+ mei_hdr.me_addr = cl->me_client_id;
+ mei_hdr.reserved = 0;
if (*slots >= msg_slots) {
- mei_hdr->length = len;
- mei_hdr->msg_complete = 1;
+ mei_hdr.length = len;
+ mei_hdr.msg_complete = 1;
/* Split the message only if we can write the whole host buffer */
} else if (*slots == dev->hbuf_depth) {
msg_slots = *slots;
len = (*slots * sizeof(u32)) - sizeof(struct mei_msg_hdr);
- mei_hdr->length = len;
- mei_hdr->msg_complete = 0;
+ mei_hdr.length = len;
+ mei_hdr.msg_complete = 0;
} else {
/* wait for next time the host buffer is empty */
return 0;
}
- dev_dbg(&dev->pdev->dev, "msg: len = %d complete = %d\n",
- mei_hdr->length, mei_hdr->msg_complete);
+ dev_dbg(&dev->pdev->dev, MEI_HDR_FMT, MEI_HDR_PRM(&mei_hdr));
*slots -= msg_slots;
- if (mei_write_message(dev, mei_hdr,
- dev->iamthif_msg_buf + dev->iamthif_msg_buf_index,
- mei_hdr->length)) {
+ if (mei_write_message(dev, &mei_hdr,
+ dev->iamthif_msg_buf + dev->iamthif_msg_buf_index)) {
dev->iamthif_state = MEI_IAMTHIF_IDLE;
cl->status = -ENODEV;
list_del(&cb->list);
return -ENODEV;
}
- if (mei_flow_ctrl_reduce(dev, cl))
+ if (mei_cl_flow_ctrl_reduce(cl))
return -ENODEV;
- dev->iamthif_msg_buf_index += mei_hdr->length;
+ dev->iamthif_msg_buf_index += mei_hdr.length;
cl->status = 0;
- if (mei_hdr->msg_complete) {
+ if (mei_hdr.msg_complete) {
dev->iamthif_state = MEI_IAMTHIF_FLOW_CONTROL;
dev->iamthif_flow_control_pending = true;
- /* save iamthif cb sent to amthi client */
+ /* save iamthif cb sent to amthif client */
cb->buf_idx = dev->iamthif_msg_buf_index;
dev->iamthif_current_cb = cb;
@@ -494,11 +503,11 @@ int mei_amthif_irq_write_complete(struct mei_device *dev, s32 *slots,
/**
* mei_amthif_irq_read_message - read routine after ISR to
- * handle the read amthi message
+ * handle the read amthif message
*
* @complete_list: An instance of our list structure
* @dev: the device structure
- * @mei_hdr: header of amthi message
+ * @mei_hdr: header of amthif message
*
* returns 0 on success, <0 on failure.
*/
@@ -522,10 +531,10 @@ int mei_amthif_irq_read_message(struct mei_cl_cb *complete_list,
return 0;
dev_dbg(&dev->pdev->dev,
- "amthi_message_buffer_index =%d\n",
+ "amthif_message_buffer_index =%d\n",
mei_hdr->length);
- dev_dbg(&dev->pdev->dev, "completed amthi read.\n ");
+ dev_dbg(&dev->pdev->dev, "completed amthif read.\n ");
if (!dev->iamthif_current_cb)
return -ENODEV;
@@ -540,8 +549,8 @@ int mei_amthif_irq_read_message(struct mei_cl_cb *complete_list,
cb->read_time = jiffies;
if (dev->iamthif_ioctl && cb->cl == &dev->iamthif_cl) {
/* found the iamthif cb */
- dev_dbg(&dev->pdev->dev, "complete the amthi read cb.\n ");
- dev_dbg(&dev->pdev->dev, "add the amthi read cb to complete.\n ");
+ dev_dbg(&dev->pdev->dev, "complete the amthif read cb.\n ");
+ dev_dbg(&dev->pdev->dev, "add the amthif read cb to complete.\n ");
list_add_tail(&cb->list, &complete_list->list);
}
return 0;
@@ -563,7 +572,7 @@ int mei_amthif_irq_read(struct mei_device *dev, s32 *slots)
return -EMSGSIZE;
}
*slots -= mei_data2slots(sizeof(struct hbm_flow_control));
- if (mei_send_flow_control(dev, &dev->iamthif_cl)) {
+ if (mei_hbm_cl_flow_control_req(dev, &dev->iamthif_cl)) {
dev_dbg(&dev->pdev->dev, "iamthif flow control failed\n");
return -EIO;
}
@@ -574,7 +583,7 @@ int mei_amthif_irq_read(struct mei_device *dev, s32 *slots)
dev->iamthif_msg_buf_index = 0;
dev->iamthif_msg_buf_size = 0;
dev->iamthif_stall_timer = MEI_IAMTHIF_STALL_TIMER;
- dev->mei_host_buffer_is_empty = mei_hbuf_is_empty(dev);
+ dev->hbuf_is_ready = mei_hbuf_is_ready(dev);
return 0;
}
@@ -593,7 +602,7 @@ void mei_amthif_complete(struct mei_device *dev, struct mei_cl_cb *cb)
dev->iamthif_msg_buf,
dev->iamthif_msg_buf_index);
list_add_tail(&cb->list, &dev->amthif_rd_complete_list.list);
- dev_dbg(&dev->pdev->dev, "amthi read completed\n");
+ dev_dbg(&dev->pdev->dev, "amthif read completed\n");
dev->iamthif_timer = jiffies;
dev_dbg(&dev->pdev->dev, "dev->iamthif_timer = %ld\n",
dev->iamthif_timer);
@@ -601,7 +610,7 @@ void mei_amthif_complete(struct mei_device *dev, struct mei_cl_cb *cb)
mei_amthif_run_next_cmd(dev);
}
- dev_dbg(&dev->pdev->dev, "completing amthi call back.\n");
+ dev_dbg(&dev->pdev->dev, "completing amthif call back.\n");
wake_up_interruptible(&dev->iamthif_cl.wait);
}
@@ -635,7 +644,8 @@ static bool mei_clear_list(struct mei_device *dev,
if (dev->iamthif_current_cb == cb_pos) {
dev->iamthif_current_cb = NULL;
/* send flow control to iamthif client */
- mei_send_flow_control(dev, &dev->iamthif_cl);
+ mei_hbm_cl_flow_control_req(dev,
+ &dev->iamthif_cl);
}
/* free all allocated buffers */
mei_io_cb_free(cb_pos);
@@ -706,11 +716,11 @@ int mei_amthif_release(struct mei_device *dev, struct file *file)
if (dev->iamthif_file_object == file &&
dev->iamthif_state != MEI_IAMTHIF_IDLE) {
- dev_dbg(&dev->pdev->dev, "amthi canceled iamthif state %d\n",
+ dev_dbg(&dev->pdev->dev, "amthif canceled iamthif state %d\n",
dev->iamthif_state);
dev->iamthif_canceled = true;
if (dev->iamthif_state == MEI_IAMTHIF_READ_COMPLETE) {
- dev_dbg(&dev->pdev->dev, "run next amthi iamthif cb\n");
+ dev_dbg(&dev->pdev->dev, "run next amthif iamthif cb\n");
mei_amthif_run_next_cmd(dev);
}
}
diff --git a/drivers/misc/mei/client.c b/drivers/misc/mei/client.c
new file mode 100644
index 000000000000..1569afe935de
--- /dev/null
+++ b/drivers/misc/mei/client.c
@@ -0,0 +1,729 @@
+/*
+ *
+ * Intel Management Engine Interface (Intel MEI) Linux driver
+ * Copyright (c) 2003-2012, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ */
+
+#include <linux/pci.h>
+#include <linux/sched.h>
+#include <linux/wait.h>
+#include <linux/delay.h>
+
+#include <linux/mei.h>
+
+#include "mei_dev.h"
+#include "hbm.h"
+#include "client.h"
+
+/**
+ * mei_me_cl_by_uuid - locate index of me client
+ *
+ * @dev: mei device
+ * returns me client index or -ENOENT if not found
+ */
+int mei_me_cl_by_uuid(const struct mei_device *dev, const uuid_le *uuid)
+{
+ int i, res = -ENOENT;
+
+ for (i = 0; i < dev->me_clients_num; ++i)
+ if (uuid_le_cmp(*uuid,
+ dev->me_clients[i].props.protocol_name) == 0) {
+ res = i;
+ break;
+ }
+
+ return res;
+}
+
+
+/**
+ * mei_me_cl_by_id return index to me_clients for client_id
+ *
+ * @dev: the device structure
+ * @client_id: me client id
+ *
+ * Locking: called under "dev->device_lock" lock
+ *
+ * returns index on success, -ENOENT on failure.
+ */
+
+int mei_me_cl_by_id(struct mei_device *dev, u8 client_id)
+{
+ int i;
+ for (i = 0; i < dev->me_clients_num; i++)
+ if (dev->me_clients[i].client_id == client_id)
+ break;
+ if (WARN_ON(dev->me_clients[i].client_id != client_id))
+ return -ENOENT;
+
+ if (i == dev->me_clients_num)
+ return -ENOENT;
+
+ return i;
+}
+
+
+/**
+ * mei_io_list_flush - removes list entry belonging to cl.
+ *
+ * @list: An instance of our list structure
+ * @cl: host client
+ */
+void mei_io_list_flush(struct mei_cl_cb *list, struct mei_cl *cl)
+{
+ struct mei_cl_cb *cb;
+ struct mei_cl_cb *next;
+
+ list_for_each_entry_safe(cb, next, &list->list, list) {
+ if (cb->cl && mei_cl_cmp_id(cl, cb->cl))
+ list_del(&cb->list);
+ }
+}
+
+/**
+ * mei_io_cb_free - free mei_cb_private related memory
+ *
+ * @cb: mei callback struct
+ */
+void mei_io_cb_free(struct mei_cl_cb *cb)
+{
+ if (cb == NULL)
+ return;
+
+ kfree(cb->request_buffer.data);
+ kfree(cb->response_buffer.data);
+ kfree(cb);
+}
+
+/**
+ * mei_io_cb_init - allocate and initialize io callback
+ *
+ * @cl - mei client
+ * @file: pointer to file structure
+ *
+ * returns mei_cl_cb pointer or NULL;
+ */
+struct mei_cl_cb *mei_io_cb_init(struct mei_cl *cl, struct file *fp)
+{
+ struct mei_cl_cb *cb;
+
+ cb = kzalloc(sizeof(struct mei_cl_cb), GFP_KERNEL);
+ if (!cb)
+ return NULL;
+
+ mei_io_list_init(cb);
+
+ cb->file_object = fp;
+ cb->cl = cl;
+ cb->buf_idx = 0;
+ return cb;
+}
+
+/**
+ * mei_io_cb_alloc_req_buf - allocate request buffer
+ *
+ * @cb - io callback structure
+ * @size: size of the buffer
+ *
+ * returns 0 on success
+ * -EINVAL if cb is NULL
+ * -ENOMEM if allocation failed
+ */
+int mei_io_cb_alloc_req_buf(struct mei_cl_cb *cb, size_t length)
+{
+ if (!cb)
+ return -EINVAL;
+
+ if (length == 0)
+ return 0;
+
+ cb->request_buffer.data = kmalloc(length, GFP_KERNEL);
+ if (!cb->request_buffer.data)
+ return -ENOMEM;
+ cb->request_buffer.size = length;
+ return 0;
+}
+/**
+ * mei_io_cb_alloc_req_buf - allocate respose buffer
+ *
+ * @cb - io callback structure
+ * @size: size of the buffer
+ *
+ * returns 0 on success
+ * -EINVAL if cb is NULL
+ * -ENOMEM if allocation failed
+ */
+int mei_io_cb_alloc_resp_buf(struct mei_cl_cb *cb, size_t length)
+{
+ if (!cb)
+ return -EINVAL;
+
+ if (length == 0)
+ return 0;
+
+ cb->response_buffer.data = kmalloc(length, GFP_KERNEL);
+ if (!cb->response_buffer.data)
+ return -ENOMEM;
+ cb->response_buffer.size = length;
+ return 0;
+}
+
+
+
+/**
+ * mei_cl_flush_queues - flushes queue lists belonging to cl.
+ *
+ * @dev: the device structure
+ * @cl: host client
+ */
+int mei_cl_flush_queues(struct mei_cl *cl)
+{
+ if (WARN_ON(!cl || !cl->dev))
+ return -EINVAL;
+
+ dev_dbg(&cl->dev->pdev->dev, "remove list entry belonging to cl\n");
+ mei_io_list_flush(&cl->dev->read_list, cl);
+ mei_io_list_flush(&cl->dev->write_list, cl);
+ mei_io_list_flush(&cl->dev->write_waiting_list, cl);
+ mei_io_list_flush(&cl->dev->ctrl_wr_list, cl);
+ mei_io_list_flush(&cl->dev->ctrl_rd_list, cl);
+ mei_io_list_flush(&cl->dev->amthif_cmd_list, cl);
+ mei_io_list_flush(&cl->dev->amthif_rd_complete_list, cl);
+ return 0;
+}
+
+
+/**
+ * mei_cl_init - initializes intialize cl.
+ *
+ * @cl: host client to be initialized
+ * @dev: mei device
+ */
+void mei_cl_init(struct mei_cl *cl, struct mei_device *dev)
+{
+ memset(cl, 0, sizeof(struct mei_cl));
+ init_waitqueue_head(&cl->wait);
+ init_waitqueue_head(&cl->rx_wait);
+ init_waitqueue_head(&cl->tx_wait);
+ INIT_LIST_HEAD(&cl->link);
+ cl->reading_state = MEI_IDLE;
+ cl->writing_state = MEI_IDLE;
+ cl->dev = dev;
+}
+
+/**
+ * mei_cl_allocate - allocates cl structure and sets it up.
+ *
+ * @dev: mei device
+ * returns The allocated file or NULL on failure
+ */
+struct mei_cl *mei_cl_allocate(struct mei_device *dev)
+{
+ struct mei_cl *cl;
+
+ cl = kmalloc(sizeof(struct mei_cl), GFP_KERNEL);
+ if (!cl)
+ return NULL;
+
+ mei_cl_init(cl, dev);
+
+ return cl;
+}
+
+/**
+ * mei_cl_find_read_cb - find this cl's callback in the read list
+ *
+ * @dev: device structure
+ * returns cb on success, NULL on error
+ */
+struct mei_cl_cb *mei_cl_find_read_cb(struct mei_cl *cl)
+{
+ struct mei_device *dev = cl->dev;
+ struct mei_cl_cb *cb = NULL;
+ struct mei_cl_cb *next = NULL;
+
+ list_for_each_entry_safe(cb, next, &dev->read_list.list, list)
+ if (mei_cl_cmp_id(cl, cb->cl))
+ return cb;
+ return NULL;
+}
+
+/** mei_cl_link: allocte host id in the host map
+ *
+ * @cl - host client
+ * @id - fixed host id or -1 for genereting one
+ * returns 0 on success
+ * -EINVAL on incorrect values
+ * -ENONET if client not found
+ */
+int mei_cl_link(struct mei_cl *cl, int id)
+{
+ struct mei_device *dev;
+
+ if (WARN_ON(!cl || !cl->dev))
+ return -EINVAL;
+
+ dev = cl->dev;
+
+ /* If Id is not asigned get one*/
+ if (id == MEI_HOST_CLIENT_ID_ANY)
+ id = find_first_zero_bit(dev->host_clients_map,
+ MEI_CLIENTS_MAX);
+
+ if (id >= MEI_CLIENTS_MAX) {
+ dev_err(&dev->pdev->dev, "id exceded %d", MEI_CLIENTS_MAX) ;
+ return -ENOENT;
+ }
+
+ dev->open_handle_count++;
+
+ cl->host_client_id = id;
+ list_add_tail(&cl->link, &dev->file_list);
+
+ set_bit(id, dev->host_clients_map);
+
+ cl->state = MEI_FILE_INITIALIZING;
+
+ dev_dbg(&dev->pdev->dev, "link cl host id = %d\n", cl->host_client_id);
+ return 0;
+}
+
+/**
+ * mei_cl_unlink - remove me_cl from the list
+ *
+ * @dev: the device structure
+ */
+int mei_cl_unlink(struct mei_cl *cl)
+{
+ struct mei_device *dev;
+ struct mei_cl *pos, *next;
+
+ /* don't shout on error exit path */
+ if (!cl)
+ return 0;
+
+ /* wd and amthif might not be initialized */
+ if (!cl->dev)
+ return 0;
+
+ dev = cl->dev;
+
+ list_for_each_entry_safe(pos, next, &dev->file_list, link) {
+ if (cl->host_client_id == pos->host_client_id) {
+ dev_dbg(&dev->pdev->dev, "remove host client = %d, ME client = %d\n",
+ pos->host_client_id, pos->me_client_id);
+ list_del_init(&pos->link);
+ break;
+ }
+ }
+ return 0;
+}
+
+
+void mei_host_client_init(struct work_struct *work)
+{
+ struct mei_device *dev = container_of(work,
+ struct mei_device, init_work);
+ struct mei_client_properties *client_props;
+ int i;
+
+ mutex_lock(&dev->device_lock);
+
+ bitmap_zero(dev->host_clients_map, MEI_CLIENTS_MAX);
+ dev->open_handle_count = 0;
+
+ /*
+ * Reserving the first three client IDs
+ * 0: Reserved for MEI Bus Message communications
+ * 1: Reserved for Watchdog
+ * 2: Reserved for AMTHI
+ */
+ bitmap_set(dev->host_clients_map, 0, 3);
+
+ for (i = 0; i < dev->me_clients_num; i++) {
+ client_props = &dev->me_clients[i].props;
+
+ if (!uuid_le_cmp(client_props->protocol_name, mei_amthif_guid))
+ mei_amthif_host_init(dev);
+ else if (!uuid_le_cmp(client_props->protocol_name, mei_wd_guid))
+ mei_wd_host_init(dev);
+ }
+
+ dev->dev_state = MEI_DEV_ENABLED;
+
+ mutex_unlock(&dev->device_lock);
+}
+
+
+/**
+ * mei_cl_disconnect - disconnect host clinet form the me one
+ *
+ * @cl: host client
+ *
+ * Locking: called under "dev->device_lock" lock
+ *
+ * returns 0 on success, <0 on failure.
+ */
+int mei_cl_disconnect(struct mei_cl *cl)
+{
+ struct mei_device *dev;
+ struct mei_cl_cb *cb;
+ int rets, err;
+
+ if (WARN_ON(!cl || !cl->dev))
+ return -ENODEV;
+
+ dev = cl->dev;
+
+ if (cl->state != MEI_FILE_DISCONNECTING)
+ return 0;
+
+ cb = mei_io_cb_init(cl, NULL);
+ if (!cb)
+ return -ENOMEM;
+
+ cb->fop_type = MEI_FOP_CLOSE;
+ if (dev->hbuf_is_ready) {
+ dev->hbuf_is_ready = false;
+ if (mei_hbm_cl_disconnect_req(dev, cl)) {
+ rets = -ENODEV;
+ dev_err(&dev->pdev->dev, "failed to disconnect.\n");
+ goto free;
+ }
+ mdelay(10); /* Wait for hardware disconnection ready */
+ list_add_tail(&cb->list, &dev->ctrl_rd_list.list);
+ } else {
+ dev_dbg(&dev->pdev->dev, "add disconnect cb to control write list\n");
+ list_add_tail(&cb->list, &dev->ctrl_wr_list.list);
+
+ }
+ mutex_unlock(&dev->device_lock);
+
+ err = wait_event_timeout(dev->wait_recvd_msg,
+ MEI_FILE_DISCONNECTED == cl->state,
+ mei_secs_to_jiffies(MEI_CL_CONNECT_TIMEOUT));
+
+ mutex_lock(&dev->device_lock);
+ if (MEI_FILE_DISCONNECTED == cl->state) {
+ rets = 0;
+ dev_dbg(&dev->pdev->dev, "successfully disconnected from FW client.\n");
+ } else {
+ rets = -ENODEV;
+ if (MEI_FILE_DISCONNECTED != cl->state)
+ dev_dbg(&dev->pdev->dev, "wrong status client disconnect.\n");
+
+ if (err)
+ dev_dbg(&dev->pdev->dev,
+ "wait failed disconnect err=%08x\n",
+ err);
+
+ dev_dbg(&dev->pdev->dev, "failed to disconnect from FW client.\n");
+ }
+
+ mei_io_list_flush(&dev->ctrl_rd_list, cl);
+ mei_io_list_flush(&dev->ctrl_wr_list, cl);
+free:
+ mei_io_cb_free(cb);
+ return rets;
+}
+
+
+/**
+ * mei_cl_is_other_connecting - checks if other
+ * client with the same me client id is connecting
+ *
+ * @cl: private data of the file object
+ *
+ * returns ture if other client is connected, 0 - otherwise.
+ */
+bool mei_cl_is_other_connecting(struct mei_cl *cl)
+{
+ struct mei_device *dev;
+ struct mei_cl *pos;
+ struct mei_cl *next;
+
+ if (WARN_ON(!cl || !cl->dev))
+ return false;
+
+ dev = cl->dev;
+
+ list_for_each_entry_safe(pos, next, &dev->file_list, link) {
+ if ((pos->state == MEI_FILE_CONNECTING) &&
+ (pos != cl) && cl->me_client_id == pos->me_client_id)
+ return true;
+
+ }
+
+ return false;
+}
+
+/**
+ * mei_cl_connect - connect host clinet to the me one
+ *
+ * @cl: host client
+ *
+ * Locking: called under "dev->device_lock" lock
+ *
+ * returns 0 on success, <0 on failure.
+ */
+int mei_cl_connect(struct mei_cl *cl, struct file *file)
+{
+ struct mei_device *dev;
+ struct mei_cl_cb *cb;
+ long timeout = mei_secs_to_jiffies(MEI_CL_CONNECT_TIMEOUT);
+ int rets;
+
+ if (WARN_ON(!cl || !cl->dev))
+ return -ENODEV;
+
+ dev = cl->dev;
+
+ cb = mei_io_cb_init(cl, file);
+ if (!cb) {
+ rets = -ENOMEM;
+ goto out;
+ }
+
+ cb->fop_type = MEI_FOP_IOCTL;
+
+ if (dev->hbuf_is_ready && !mei_cl_is_other_connecting(cl)) {
+ dev->hbuf_is_ready = false;
+
+ if (mei_hbm_cl_connect_req(dev, cl)) {
+ rets = -ENODEV;
+ goto out;
+ }
+ cl->timer_count = MEI_CONNECT_TIMEOUT;
+ list_add_tail(&cb->list, &dev->ctrl_rd_list.list);
+ } else {
+ list_add_tail(&cb->list, &dev->ctrl_wr_list.list);
+ }
+
+ mutex_unlock(&dev->device_lock);
+ rets = wait_event_timeout(dev->wait_recvd_msg,
+ (cl->state == MEI_FILE_CONNECTED ||
+ cl->state == MEI_FILE_DISCONNECTED),
+ timeout * HZ);
+ mutex_lock(&dev->device_lock);
+
+ if (cl->state != MEI_FILE_CONNECTED) {
+ rets = -EFAULT;
+
+ mei_io_list_flush(&dev->ctrl_rd_list, cl);
+ mei_io_list_flush(&dev->ctrl_wr_list, cl);
+ goto out;
+ }
+
+ rets = cl->status;
+
+out:
+ mei_io_cb_free(cb);
+ return rets;
+}
+
+/**
+ * mei_cl_flow_ctrl_creds - checks flow_control credits for cl.
+ *
+ * @dev: the device structure
+ * @cl: private data of the file object
+ *
+ * returns 1 if mei_flow_ctrl_creds >0, 0 - otherwise.
+ * -ENOENT if mei_cl is not present
+ * -EINVAL if single_recv_buf == 0
+ */
+int mei_cl_flow_ctrl_creds(struct mei_cl *cl)
+{
+ struct mei_device *dev;
+ int i;
+
+ if (WARN_ON(!cl || !cl->dev))
+ return -EINVAL;
+
+ dev = cl->dev;
+
+ if (!dev->me_clients_num)
+ return 0;
+
+ if (cl->mei_flow_ctrl_creds > 0)
+ return 1;
+
+ for (i = 0; i < dev->me_clients_num; i++) {
+ struct mei_me_client *me_cl = &dev->me_clients[i];
+ if (me_cl->client_id == cl->me_client_id) {
+ if (me_cl->mei_flow_ctrl_creds) {
+ if (WARN_ON(me_cl->props.single_recv_buf == 0))
+ return -EINVAL;
+ return 1;
+ } else {
+ return 0;
+ }
+ }
+ }
+ return -ENOENT;
+}
+
+/**
+ * mei_cl_flow_ctrl_reduce - reduces flow_control.
+ *
+ * @dev: the device structure
+ * @cl: private data of the file object
+ * @returns
+ * 0 on success
+ * -ENOENT when me client is not found
+ * -EINVAL when ctrl credits are <= 0
+ */
+int mei_cl_flow_ctrl_reduce(struct mei_cl *cl)
+{
+ struct mei_device *dev;
+ int i;
+
+ if (WARN_ON(!cl || !cl->dev))
+ return -EINVAL;
+
+ dev = cl->dev;
+
+ if (!dev->me_clients_num)
+ return -ENOENT;
+
+ for (i = 0; i < dev->me_clients_num; i++) {
+ struct mei_me_client *me_cl = &dev->me_clients[i];
+ if (me_cl->client_id == cl->me_client_id) {
+ if (me_cl->props.single_recv_buf != 0) {
+ if (WARN_ON(me_cl->mei_flow_ctrl_creds <= 0))
+ return -EINVAL;
+ dev->me_clients[i].mei_flow_ctrl_creds--;
+ } else {
+ if (WARN_ON(cl->mei_flow_ctrl_creds <= 0))
+ return -EINVAL;
+ cl->mei_flow_ctrl_creds--;
+ }
+ return 0;
+ }
+ }
+ return -ENOENT;
+}
+
+/**
+ * mei_cl_start_read - the start read client message function.
+ *
+ * @cl: host client
+ *
+ * returns 0 on success, <0 on failure.
+ */
+int mei_cl_read_start(struct mei_cl *cl)
+{
+ struct mei_device *dev;
+ struct mei_cl_cb *cb;
+ int rets;
+ int i;
+
+ if (WARN_ON(!cl || !cl->dev))
+ return -ENODEV;
+
+ dev = cl->dev;
+
+ if (cl->state != MEI_FILE_CONNECTED)
+ return -ENODEV;
+
+ if (dev->dev_state != MEI_DEV_ENABLED)
+ return -ENODEV;
+
+ if (cl->read_cb) {
+ dev_dbg(&dev->pdev->dev, "read is pending.\n");
+ return -EBUSY;
+ }
+ i = mei_me_cl_by_id(dev, cl->me_client_id);
+ if (i < 0) {
+ dev_err(&dev->pdev->dev, "no such me client %d\n",
+ cl->me_client_id);
+ return -ENODEV;
+ }
+
+ cb = mei_io_cb_init(cl, NULL);
+ if (!cb)
+ return -ENOMEM;
+
+ rets = mei_io_cb_alloc_resp_buf(cb,
+ dev->me_clients[i].props.max_msg_length);
+ if (rets)
+ goto err;
+
+ cb->fop_type = MEI_FOP_READ;
+ cl->read_cb = cb;
+ if (dev->hbuf_is_ready) {
+ dev->hbuf_is_ready = false;
+ if (mei_hbm_cl_flow_control_req(dev, cl)) {
+ rets = -ENODEV;
+ goto err;
+ }
+ list_add_tail(&cb->list, &dev->read_list.list);
+ } else {
+ list_add_tail(&cb->list, &dev->ctrl_wr_list.list);
+ }
+ return rets;
+err:
+ mei_io_cb_free(cb);
+ return rets;
+}
+
+/**
+ * mei_cl_all_disconnect - disconnect forcefully all connected clients
+ *
+ * @dev - mei device
+ */
+
+void mei_cl_all_disconnect(struct mei_device *dev)
+{
+ struct mei_cl *cl, *next;
+
+ list_for_each_entry_safe(cl, next, &dev->file_list, link) {
+ cl->state = MEI_FILE_DISCONNECTED;
+ cl->mei_flow_ctrl_creds = 0;
+ cl->read_cb = NULL;
+ cl->timer_count = 0;
+ }
+}
+
+
+/**
+ * mei_cl_all_read_wakeup - wake up all readings so they can be interrupted
+ *
+ * @dev - mei device
+ */
+void mei_cl_all_read_wakeup(struct mei_device *dev)
+{
+ struct mei_cl *cl, *next;
+ list_for_each_entry_safe(cl, next, &dev->file_list, link) {
+ if (waitqueue_active(&cl->rx_wait)) {
+ dev_dbg(&dev->pdev->dev, "Waking up client!\n");
+ wake_up_interruptible(&cl->rx_wait);
+ }
+ }
+}
+
+/**
+ * mei_cl_all_write_clear - clear all pending writes
+
+ * @dev - mei device
+ */
+void mei_cl_all_write_clear(struct mei_device *dev)
+{
+ struct mei_cl_cb *cb, *next;
+
+ list_for_each_entry_safe(cb, next, &dev->write_list.list, list) {
+ list_del(&cb->list);
+ mei_io_cb_free(cb);
+ }
+}
+
+
diff --git a/drivers/misc/mei/client.h b/drivers/misc/mei/client.h
new file mode 100644
index 000000000000..214b2397ec3e
--- /dev/null
+++ b/drivers/misc/mei/client.h
@@ -0,0 +1,102 @@
+/*
+ *
+ * Intel Management Engine Interface (Intel MEI) Linux driver
+ * Copyright (c) 2003-2012, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ */
+
+#ifndef _MEI_CLIENT_H_
+#define _MEI_CLIENT_H_
+
+#include <linux/types.h>
+#include <linux/watchdog.h>
+#include <linux/poll.h>
+#include <linux/mei.h>
+
+#include "mei_dev.h"
+
+int mei_me_cl_by_uuid(const struct mei_device *dev, const uuid_le *cuuid);
+int mei_me_cl_by_id(struct mei_device *dev, u8 client_id);
+
+/*
+ * MEI IO Functions
+ */
+struct mei_cl_cb *mei_io_cb_init(struct mei_cl *cl, struct file *fp);
+void mei_io_cb_free(struct mei_cl_cb *priv_cb);
+int mei_io_cb_alloc_req_buf(struct mei_cl_cb *cb, size_t length);
+int mei_io_cb_alloc_resp_buf(struct mei_cl_cb *cb, size_t length);
+
+
+/**
+ * mei_io_list_init - Sets up a queue list.
+ *
+ * @list: An instance cl callback structure
+ */
+static inline void mei_io_list_init(struct mei_cl_cb *list)
+{
+ INIT_LIST_HEAD(&list->list);
+}
+void mei_io_list_flush(struct mei_cl_cb *list, struct mei_cl *cl);
+
+/*
+ * MEI Host Client Functions
+ */
+
+struct mei_cl *mei_cl_allocate(struct mei_device *dev);
+void mei_cl_init(struct mei_cl *cl, struct mei_device *dev);
+
+
+int mei_cl_link(struct mei_cl *cl, int id);
+int mei_cl_unlink(struct mei_cl *cl);
+
+int mei_cl_flush_queues(struct mei_cl *cl);
+struct mei_cl_cb *mei_cl_find_read_cb(struct mei_cl *cl);
+
+/**
+ * mei_cl_cmp_id - tells if file private data have same id
+ *
+ * @fe1: private data of 1. file object
+ * @fe2: private data of 2. file object
+ *
+ * returns true - if ids are the same and not NULL
+ */
+static inline bool mei_cl_cmp_id(const struct mei_cl *cl1,
+ const struct mei_cl *cl2)
+{
+ return cl1 && cl2 &&
+ (cl1->host_client_id == cl2->host_client_id) &&
+ (cl1->me_client_id == cl2->me_client_id);
+}
+
+
+int mei_cl_flow_ctrl_creds(struct mei_cl *cl);
+
+int mei_cl_flow_ctrl_reduce(struct mei_cl *cl);
+/*
+ * MEI input output function prototype
+ */
+bool mei_cl_is_other_connecting(struct mei_cl *cl);
+int mei_cl_disconnect(struct mei_cl *cl);
+
+int mei_cl_read_start(struct mei_cl *cl);
+
+int mei_cl_connect(struct mei_cl *cl, struct file *file);
+
+void mei_host_client_init(struct work_struct *work);
+
+
+void mei_cl_all_disconnect(struct mei_device *dev);
+void mei_cl_all_read_wakeup(struct mei_device *dev);
+void mei_cl_all_write_clear(struct mei_device *dev);
+
+
+#endif /* _MEI_CLIENT_H_ */
diff --git a/drivers/misc/mei/hbm.c b/drivers/misc/mei/hbm.c
new file mode 100644
index 000000000000..fb9e63ba3bb1
--- /dev/null
+++ b/drivers/misc/mei/hbm.c
@@ -0,0 +1,669 @@
+/*
+ *
+ * Intel Management Engine Interface (Intel MEI) Linux driver
+ * Copyright (c) 2003-2012, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ */
+
+#include <linux/pci.h>
+#include <linux/sched.h>
+#include <linux/wait.h>
+#include <linux/mei.h>
+
+#include "mei_dev.h"
+#include "hbm.h"
+#include "hw-me.h"
+
+/**
+ * mei_hbm_me_cl_allocate - allocates storage for me clients
+ *
+ * @dev: the device structure
+ *
+ * returns none.
+ */
+static void mei_hbm_me_cl_allocate(struct mei_device *dev)
+{
+ struct mei_me_client *clients;
+ int b;
+
+ /* count how many ME clients we have */
+ for_each_set_bit(b, dev->me_clients_map, MEI_CLIENTS_MAX)
+ dev->me_clients_num++;
+
+ if (dev->me_clients_num <= 0)
+ return;
+
+ kfree(dev->me_clients);
+ dev->me_clients = NULL;
+
+ dev_dbg(&dev->pdev->dev, "memory allocation for ME clients size=%zd.\n",
+ dev->me_clients_num * sizeof(struct mei_me_client));
+ /* allocate storage for ME clients representation */
+ clients = kcalloc(dev->me_clients_num,
+ sizeof(struct mei_me_client), GFP_KERNEL);
+ if (!clients) {
+ dev_err(&dev->pdev->dev, "memory allocation for ME clients failed.\n");
+ dev->dev_state = MEI_DEV_RESETING;
+ mei_reset(dev, 1);
+ return;
+ }
+ dev->me_clients = clients;
+ return;
+}
+
+/**
+ * mei_hbm_cl_hdr - construct client hbm header
+ * @cl: - client
+ * @hbm_cmd: host bus message command
+ * @buf: buffer for cl header
+ * @len: buffer length
+ */
+static inline
+void mei_hbm_cl_hdr(struct mei_cl *cl, u8 hbm_cmd, void *buf, size_t len)
+{
+ struct mei_hbm_cl_cmd *cmd = buf;
+
+ memset(cmd, 0, len);
+
+ cmd->hbm_cmd = hbm_cmd;
+ cmd->host_addr = cl->host_client_id;
+ cmd->me_addr = cl->me_client_id;
+}
+
+/**
+ * same_disconn_addr - tells if they have the same address
+ *
+ * @file: private data of the file object.
+ * @disconn: disconnection request.
+ *
+ * returns true if addres are same
+ */
+static inline
+bool mei_hbm_cl_addr_equal(struct mei_cl *cl, void *buf)
+{
+ struct mei_hbm_cl_cmd *cmd = buf;
+ return cl->host_client_id == cmd->host_addr &&
+ cl->me_client_id == cmd->me_addr;
+}
+
+
+/**
+ * is_treat_specially_client - checks if the message belongs
+ * to the file private data.
+ *
+ * @cl: private data of the file object
+ * @rs: connect response bus message
+ *
+ */
+static bool is_treat_specially_client(struct mei_cl *cl,
+ struct hbm_client_connect_response *rs)
+{
+ if (mei_hbm_cl_addr_equal(cl, rs)) {
+ if (!rs->status) {
+ cl->state = MEI_FILE_CONNECTED;
+ cl->status = 0;
+
+ } else {
+ cl->state = MEI_FILE_DISCONNECTED;
+ cl->status = -ENODEV;
+ }
+ cl->timer_count = 0;
+
+ return true;
+ }
+ return false;
+}
+
+/**
+ * mei_hbm_start_req - sends start request message.
+ *
+ * @dev: the device structure
+ */
+void mei_hbm_start_req(struct mei_device *dev)
+{
+ struct mei_msg_hdr *mei_hdr = &dev->wr_msg.hdr;
+ struct hbm_host_version_request *start_req;
+ const size_t len = sizeof(struct hbm_host_version_request);
+
+ mei_hbm_hdr(mei_hdr, len);
+
+ /* host start message */
+ start_req = (struct hbm_host_version_request *)dev->wr_msg.data;
+ memset(start_req, 0, len);
+ start_req->hbm_cmd = HOST_START_REQ_CMD;
+ start_req->host_version.major_version = HBM_MAJOR_VERSION;
+ start_req->host_version.minor_version = HBM_MINOR_VERSION;
+
+ dev->recvd_msg = false;
+ if (mei_write_message(dev, mei_hdr, dev->wr_msg.data)) {
+ dev_dbg(&dev->pdev->dev, "write send version message to FW fail.\n");
+ dev->dev_state = MEI_DEV_RESETING;
+ mei_reset(dev, 1);
+ }
+ dev->init_clients_state = MEI_START_MESSAGE;
+ dev->init_clients_timer = MEI_CLIENTS_INIT_TIMEOUT;
+ return ;
+}
+
+/**
+ * mei_hbm_enum_clients_req - sends enumeration client request message.
+ *
+ * @dev: the device structure
+ *
+ * returns none.
+ */
+static void mei_hbm_enum_clients_req(struct mei_device *dev)
+{
+ struct mei_msg_hdr *mei_hdr = &dev->wr_msg.hdr;
+ struct hbm_host_enum_request *enum_req;
+ const size_t len = sizeof(struct hbm_host_enum_request);
+ /* enumerate clients */
+ mei_hbm_hdr(mei_hdr, len);
+
+ enum_req = (struct hbm_host_enum_request *)dev->wr_msg.data;
+ memset(enum_req, 0, len);
+ enum_req->hbm_cmd = HOST_ENUM_REQ_CMD;
+
+ if (mei_write_message(dev, mei_hdr, dev->wr_msg.data)) {
+ dev->dev_state = MEI_DEV_RESETING;
+ dev_dbg(&dev->pdev->dev, "write send enumeration request message to FW fail.\n");
+ mei_reset(dev, 1);
+ }
+ dev->init_clients_state = MEI_ENUM_CLIENTS_MESSAGE;
+ dev->init_clients_timer = MEI_CLIENTS_INIT_TIMEOUT;
+ return;
+}
+
+/**
+ * mei_hbm_prop_requsest - request property for a single client
+ *
+ * @dev: the device structure
+ *
+ * returns none.
+ */
+
+static int mei_hbm_prop_req(struct mei_device *dev)
+{
+
+ struct mei_msg_hdr *mei_hdr = &dev->wr_msg.hdr;
+ struct hbm_props_request *prop_req;
+ const size_t len = sizeof(struct hbm_props_request);
+ unsigned long next_client_index;
+ u8 client_num;
+
+
+ client_num = dev->me_client_presentation_num;
+
+ next_client_index = find_next_bit(dev->me_clients_map, MEI_CLIENTS_MAX,
+ dev->me_client_index);
+
+ /* We got all client properties */
+ if (next_client_index == MEI_CLIENTS_MAX) {
+ schedule_work(&dev->init_work);
+
+ return 0;
+ }
+
+ dev->me_clients[client_num].client_id = next_client_index;
+ dev->me_clients[client_num].mei_flow_ctrl_creds = 0;
+
+ mei_hbm_hdr(mei_hdr, len);
+ prop_req = (struct hbm_props_request *)dev->wr_msg.data;
+
+ memset(prop_req, 0, sizeof(struct hbm_props_request));
+
+
+ prop_req->hbm_cmd = HOST_CLIENT_PROPERTIES_REQ_CMD;
+ prop_req->address = next_client_index;
+
+ if (mei_write_message(dev, mei_hdr, dev->wr_msg.data)) {
+ dev->dev_state = MEI_DEV_RESETING;
+ dev_err(&dev->pdev->dev, "Properties request command failed\n");
+ mei_reset(dev, 1);
+
+ return -EIO;
+ }
+
+ dev->init_clients_timer = MEI_CLIENTS_INIT_TIMEOUT;
+ dev->me_client_index = next_client_index;
+
+ return 0;
+}
+
+/**
+ * mei_hbm_stop_req_prepare - perpare stop request message
+ *
+ * @dev - mei device
+ * @mei_hdr - mei message header
+ * @data - hbm message body buffer
+ */
+static void mei_hbm_stop_req_prepare(struct mei_device *dev,
+ struct mei_msg_hdr *mei_hdr, unsigned char *data)
+{
+ struct hbm_host_stop_request *req =
+ (struct hbm_host_stop_request *)data;
+ const size_t len = sizeof(struct hbm_host_stop_request);
+
+ mei_hbm_hdr(mei_hdr, len);
+
+ memset(req, 0, len);
+ req->hbm_cmd = HOST_STOP_REQ_CMD;
+ req->reason = DRIVER_STOP_REQUEST;
+}
+
+/**
+ * mei_hbm_cl_flow_control_req - sends flow control requst.
+ *
+ * @dev: the device structure
+ * @cl: client info
+ *
+ * This function returns -EIO on write failure
+ */
+int mei_hbm_cl_flow_control_req(struct mei_device *dev, struct mei_cl *cl)
+{
+ struct mei_msg_hdr *mei_hdr = &dev->wr_msg.hdr;
+ const size_t len = sizeof(struct hbm_flow_control);
+
+ mei_hbm_hdr(mei_hdr, len);
+ mei_hbm_cl_hdr(cl, MEI_FLOW_CONTROL_CMD, dev->wr_msg.data, len);
+
+ dev_dbg(&dev->pdev->dev, "sending flow control host client = %d, ME client = %d\n",
+ cl->host_client_id, cl->me_client_id);
+
+ return mei_write_message(dev, mei_hdr, dev->wr_msg.data);
+}
+
+/**
+ * add_single_flow_creds - adds single buffer credentials.
+ *
+ * @file: private data ot the file object.
+ * @flow: flow control.
+ */
+static void mei_hbm_add_single_flow_creds(struct mei_device *dev,
+ struct hbm_flow_control *flow)
+{
+ struct mei_me_client *client;
+ int i;
+
+ for (i = 0; i < dev->me_clients_num; i++) {
+ client = &dev->me_clients[i];
+ if (client && flow->me_addr == client->client_id) {
+ if (client->props.single_recv_buf) {
+ client->mei_flow_ctrl_creds++;
+ dev_dbg(&dev->pdev->dev, "recv flow ctrl msg ME %d (single).\n",
+ flow->me_addr);
+ dev_dbg(&dev->pdev->dev, "flow control credentials =%d.\n",
+ client->mei_flow_ctrl_creds);
+ } else {
+ BUG(); /* error in flow control */
+ }
+ }
+ }
+}
+
+/**
+ * mei_hbm_cl_flow_control_res - flow control response from me
+ *
+ * @dev: the device structure
+ * @flow_control: flow control response bus message
+ */
+static void mei_hbm_cl_flow_control_res(struct mei_device *dev,
+ struct hbm_flow_control *flow_control)
+{
+ struct mei_cl *cl = NULL;
+ struct mei_cl *next = NULL;
+
+ if (!flow_control->host_addr) {
+ /* single receive buffer */
+ mei_hbm_add_single_flow_creds(dev, flow_control);
+ return;
+ }
+
+ /* normal connection */
+ list_for_each_entry_safe(cl, next, &dev->file_list, link) {
+ if (mei_hbm_cl_addr_equal(cl, flow_control)) {
+ cl->mei_flow_ctrl_creds++;
+ dev_dbg(&dev->pdev->dev, "flow ctrl msg for host %d ME %d.\n",
+ flow_control->host_addr, flow_control->me_addr);
+ dev_dbg(&dev->pdev->dev, "flow control credentials = %d.\n",
+ cl->mei_flow_ctrl_creds);
+ break;
+ }
+ }
+}
+
+
+/**
+ * mei_hbm_cl_disconnect_req - sends disconnect message to fw.
+ *
+ * @dev: the device structure
+ * @cl: a client to disconnect from
+ *
+ * This function returns -EIO on write failure
+ */
+int mei_hbm_cl_disconnect_req(struct mei_device *dev, struct mei_cl *cl)
+{
+ struct mei_msg_hdr *mei_hdr = &dev->wr_msg.hdr;
+ const size_t len = sizeof(struct hbm_client_connect_request);
+
+ mei_hbm_hdr(mei_hdr, len);
+ mei_hbm_cl_hdr(cl, CLIENT_DISCONNECT_REQ_CMD, dev->wr_msg.data, len);
+
+ return mei_write_message(dev, mei_hdr, dev->wr_msg.data);
+}
+
+/**
+ * mei_hbm_cl_disconnect_res - disconnect response from ME
+ *
+ * @dev: the device structure
+ * @rs: disconnect response bus message
+ */
+static void mei_hbm_cl_disconnect_res(struct mei_device *dev,
+ struct hbm_client_connect_response *rs)
+{
+ struct mei_cl *cl;
+ struct mei_cl_cb *pos = NULL, *next = NULL;
+
+ dev_dbg(&dev->pdev->dev,
+ "disconnect_response:\n"
+ "ME Client = %d\n"
+ "Host Client = %d\n"
+ "Status = %d\n",
+ rs->me_addr,
+ rs->host_addr,
+ rs->status);
+
+ list_for_each_entry_safe(pos, next, &dev->ctrl_rd_list.list, list) {
+ cl = pos->cl;
+
+ if (!cl) {
+ list_del(&pos->list);
+ return;
+ }
+
+ dev_dbg(&dev->pdev->dev, "list_for_each_entry_safe in ctrl_rd_list.\n");
+ if (mei_hbm_cl_addr_equal(cl, rs)) {
+ list_del(&pos->list);
+ if (!rs->status)
+ cl->state = MEI_FILE_DISCONNECTED;
+
+ cl->status = 0;
+ cl->timer_count = 0;
+ break;
+ }
+ }
+}
+
+/**
+ * mei_hbm_cl_connect_req - send connection request to specific me client
+ *
+ * @dev: the device structure
+ * @cl: a client to connect to
+ *
+ * returns -EIO on write failure
+ */
+int mei_hbm_cl_connect_req(struct mei_device *dev, struct mei_cl *cl)
+{
+ struct mei_msg_hdr *mei_hdr = &dev->wr_msg.hdr;
+ const size_t len = sizeof(struct hbm_client_connect_request);
+
+ mei_hbm_hdr(mei_hdr, len);
+ mei_hbm_cl_hdr(cl, CLIENT_CONNECT_REQ_CMD, dev->wr_msg.data, len);
+
+ return mei_write_message(dev, mei_hdr, dev->wr_msg.data);
+}
+
+/**
+ * mei_hbm_cl_connect_res - connect resposne from the ME
+ *
+ * @dev: the device structure
+ * @rs: connect response bus message
+ */
+static void mei_hbm_cl_connect_res(struct mei_device *dev,
+ struct hbm_client_connect_response *rs)
+{
+
+ struct mei_cl *cl;
+ struct mei_cl_cb *pos = NULL, *next = NULL;
+
+ dev_dbg(&dev->pdev->dev,
+ "connect_response:\n"
+ "ME Client = %d\n"
+ "Host Client = %d\n"
+ "Status = %d\n",
+ rs->me_addr,
+ rs->host_addr,
+ rs->status);
+
+ /* if WD or iamthif client treat specially */
+
+ if (is_treat_specially_client(&dev->wd_cl, rs)) {
+ dev_dbg(&dev->pdev->dev, "successfully connected to WD client.\n");
+ mei_watchdog_register(dev);
+
+ return;
+ }
+
+ if (is_treat_specially_client(&dev->iamthif_cl, rs)) {
+ dev->iamthif_state = MEI_IAMTHIF_IDLE;
+ return;
+ }
+ list_for_each_entry_safe(pos, next, &dev->ctrl_rd_list.list, list) {
+
+ cl = pos->cl;
+ if (!cl) {
+ list_del(&pos->list);
+ return;
+ }
+ if (pos->fop_type == MEI_FOP_IOCTL) {
+ if (is_treat_specially_client(cl, rs)) {
+ list_del(&pos->list);
+ cl->status = 0;
+ cl->timer_count = 0;
+ break;
+ }
+ }
+ }
+}
+
+
+/**
+ * mei_client_disconnect_request - disconnect request initiated by me
+ * host sends disoconnect response
+ *
+ * @dev: the device structure.
+ * @disconnect_req: disconnect request bus message from the me
+ */
+static void mei_hbm_fw_disconnect_req(struct mei_device *dev,
+ struct hbm_client_connect_request *disconnect_req)
+{
+ struct mei_cl *cl, *next;
+ const size_t len = sizeof(struct hbm_client_connect_response);
+
+ list_for_each_entry_safe(cl, next, &dev->file_list, link) {
+ if (mei_hbm_cl_addr_equal(cl, disconnect_req)) {
+ dev_dbg(&dev->pdev->dev, "disconnect request host client %d ME client %d.\n",
+ disconnect_req->host_addr,
+ disconnect_req->me_addr);
+ cl->state = MEI_FILE_DISCONNECTED;
+ cl->timer_count = 0;
+ if (cl == &dev->wd_cl)
+ dev->wd_pending = false;
+ else if (cl == &dev->iamthif_cl)
+ dev->iamthif_timer = 0;
+
+ /* prepare disconnect response */
+ mei_hbm_hdr(&dev->wr_ext_msg.hdr, len);
+ mei_hbm_cl_hdr(cl, CLIENT_DISCONNECT_RES_CMD,
+ dev->wr_ext_msg.data, len);
+ break;
+ }
+ }
+}
+
+
+/**
+ * mei_hbm_dispatch - bottom half read routine after ISR to
+ * handle the read bus message cmd processing.
+ *
+ * @dev: the device structure
+ * @mei_hdr: header of bus message
+ */
+void mei_hbm_dispatch(struct mei_device *dev, struct mei_msg_hdr *hdr)
+{
+ struct mei_bus_message *mei_msg;
+ struct mei_me_client *me_client;
+ struct hbm_host_version_response *version_res;
+ struct hbm_client_connect_response *connect_res;
+ struct hbm_client_connect_response *disconnect_res;
+ struct hbm_client_connect_request *disconnect_req;
+ struct hbm_flow_control *flow_control;
+ struct hbm_props_response *props_res;
+ struct hbm_host_enum_response *enum_res;
+
+ /* read the message to our buffer */
+ BUG_ON(hdr->length >= sizeof(dev->rd_msg_buf));
+ mei_read_slots(dev, dev->rd_msg_buf, hdr->length);
+ mei_msg = (struct mei_bus_message *)dev->rd_msg_buf;
+
+ switch (mei_msg->hbm_cmd) {
+ case HOST_START_RES_CMD:
+ version_res = (struct hbm_host_version_response *)mei_msg;
+ if (!version_res->host_version_supported) {
+ dev->version = version_res->me_max_version;
+ dev_dbg(&dev->pdev->dev, "version mismatch.\n");
+
+ mei_hbm_stop_req_prepare(dev, &dev->wr_msg.hdr,
+ dev->wr_msg.data);
+ mei_write_message(dev, &dev->wr_msg.hdr,
+ dev->wr_msg.data);
+ return;
+ }
+
+ dev->version.major_version = HBM_MAJOR_VERSION;
+ dev->version.minor_version = HBM_MINOR_VERSION;
+ if (dev->dev_state == MEI_DEV_INIT_CLIENTS &&
+ dev->init_clients_state == MEI_START_MESSAGE) {
+ dev->init_clients_timer = 0;
+ mei_hbm_enum_clients_req(dev);
+ } else {
+ dev->recvd_msg = false;
+ dev_dbg(&dev->pdev->dev, "reset due to received hbm: host start\n");
+ mei_reset(dev, 1);
+ return;
+ }
+
+ dev->recvd_msg = true;
+ dev_dbg(&dev->pdev->dev, "host start response message received.\n");
+ break;
+
+ case CLIENT_CONNECT_RES_CMD:
+ connect_res = (struct hbm_client_connect_response *) mei_msg;
+ mei_hbm_cl_connect_res(dev, connect_res);
+ dev_dbg(&dev->pdev->dev, "client connect response message received.\n");
+ wake_up(&dev->wait_recvd_msg);
+ break;
+
+ case CLIENT_DISCONNECT_RES_CMD:
+ disconnect_res = (struct hbm_client_connect_response *) mei_msg;
+ mei_hbm_cl_disconnect_res(dev, disconnect_res);
+ dev_dbg(&dev->pdev->dev, "client disconnect response message received.\n");
+ wake_up(&dev->wait_recvd_msg);
+ break;
+
+ case MEI_FLOW_CONTROL_CMD:
+ flow_control = (struct hbm_flow_control *) mei_msg;
+ mei_hbm_cl_flow_control_res(dev, flow_control);
+ dev_dbg(&dev->pdev->dev, "client flow control response message received.\n");
+ break;
+
+ case HOST_CLIENT_PROPERTIES_RES_CMD:
+ props_res = (struct hbm_props_response *)mei_msg;
+ me_client = &dev->me_clients[dev->me_client_presentation_num];
+
+ if (props_res->status || !dev->me_clients) {
+ dev_dbg(&dev->pdev->dev, "reset due to received host client properties response bus message wrong status.\n");
+ mei_reset(dev, 1);
+ return;
+ }
+
+ if (me_client->client_id != props_res->address) {
+ dev_err(&dev->pdev->dev,
+ "Host client properties reply mismatch\n");
+ mei_reset(dev, 1);
+
+ return;
+ }
+
+ if (dev->dev_state != MEI_DEV_INIT_CLIENTS ||
+ dev->init_clients_state != MEI_CLIENT_PROPERTIES_MESSAGE) {
+ dev_err(&dev->pdev->dev,
+ "Unexpected client properties reply\n");
+ mei_reset(dev, 1);
+
+ return;
+ }
+
+ me_client->props = props_res->client_properties;
+ dev->me_client_index++;
+ dev->me_client_presentation_num++;
+
+ /* request property for the next client */
+ mei_hbm_prop_req(dev);
+
+ break;
+
+ case HOST_ENUM_RES_CMD:
+ enum_res = (struct hbm_host_enum_response *) mei_msg;
+ memcpy(dev->me_clients_map, enum_res->valid_addresses, 32);
+ if (dev->dev_state == MEI_DEV_INIT_CLIENTS &&
+ dev->init_clients_state == MEI_ENUM_CLIENTS_MESSAGE) {
+ dev->init_clients_timer = 0;
+ dev->me_client_presentation_num = 0;
+ dev->me_client_index = 0;
+ mei_hbm_me_cl_allocate(dev);
+ dev->init_clients_state =
+ MEI_CLIENT_PROPERTIES_MESSAGE;
+
+ /* first property reqeust */
+ mei_hbm_prop_req(dev);
+ } else {
+ dev_dbg(&dev->pdev->dev, "reset due to received host enumeration clients response bus message.\n");
+ mei_reset(dev, 1);
+ return;
+ }
+ break;
+
+ case HOST_STOP_RES_CMD:
+ dev->dev_state = MEI_DEV_DISABLED;
+ dev_dbg(&dev->pdev->dev, "resetting because of FW stop response.\n");
+ mei_reset(dev, 1);
+ break;
+
+ case CLIENT_DISCONNECT_REQ_CMD:
+ /* search for client */
+ disconnect_req = (struct hbm_client_connect_request *)mei_msg;
+ mei_hbm_fw_disconnect_req(dev, disconnect_req);
+ break;
+
+ case ME_STOP_REQ_CMD:
+
+ mei_hbm_stop_req_prepare(dev, &dev->wr_ext_msg.hdr,
+ dev->wr_ext_msg.data);
+ break;
+ default:
+ BUG();
+ break;
+
+ }
+}
+
diff --git a/drivers/misc/mei/hbm.h b/drivers/misc/mei/hbm.h
new file mode 100644
index 000000000000..b552afbaf85c
--- /dev/null
+++ b/drivers/misc/mei/hbm.h
@@ -0,0 +1,39 @@
+/*
+ *
+ * Intel Management Engine Interface (Intel MEI) Linux driver
+ * Copyright (c) 2003-2012, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ */
+
+#ifndef _MEI_HBM_H_
+#define _MEI_HBM_H_
+
+void mei_hbm_dispatch(struct mei_device *dev, struct mei_msg_hdr *hdr);
+
+static inline void mei_hbm_hdr(struct mei_msg_hdr *hdr, size_t length)
+{
+ hdr->host_addr = 0;
+ hdr->me_addr = 0;
+ hdr->length = length;
+ hdr->msg_complete = 1;
+ hdr->reserved = 0;
+}
+
+void mei_hbm_start_req(struct mei_device *dev);
+
+int mei_hbm_cl_flow_control_req(struct mei_device *dev, struct mei_cl *cl);
+int mei_hbm_cl_disconnect_req(struct mei_device *dev, struct mei_cl *cl);
+int mei_hbm_cl_connect_req(struct mei_device *dev, struct mei_cl *cl);
+
+
+#endif /* _MEI_HBM_H_ */
+
diff --git a/drivers/misc/mei/hw-me-regs.h b/drivers/misc/mei/hw-me-regs.h
new file mode 100644
index 000000000000..6a203b6e8346
--- /dev/null
+++ b/drivers/misc/mei/hw-me-regs.h
@@ -0,0 +1,167 @@
+/******************************************************************************
+ * Intel Management Engine Interface (Intel MEI) Linux driver
+ * Intel MEI Interface Header
+ *
+ * This file is provided under a dual BSD/GPLv2 license. When using or
+ * redistributing this file, you may do so under either license.
+ *
+ * GPL LICENSE SUMMARY
+ *
+ * Copyright(c) 2003 - 2012 Intel Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
+ * USA
+ *
+ * The full GNU General Public License is included in this distribution
+ * in the file called LICENSE.GPL.
+ *
+ * Contact Information:
+ * Intel Corporation.
+ * linux-mei@linux.intel.com
+ * http://www.intel.com
+ *
+ * BSD LICENSE
+ *
+ * Copyright(c) 2003 - 2012 Intel Corporation. All rights reserved.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ *****************************************************************************/
+#ifndef _MEI_HW_MEI_REGS_H_
+#define _MEI_HW_MEI_REGS_H_
+
+/*
+ * MEI device IDs
+ */
+#define MEI_DEV_ID_82946GZ 0x2974 /* 82946GZ/GL */
+#define MEI_DEV_ID_82G35 0x2984 /* 82G35 Express */
+#define MEI_DEV_ID_82Q965 0x2994 /* 82Q963/Q965 */
+#define MEI_DEV_ID_82G965 0x29A4 /* 82P965/G965 */
+
+#define MEI_DEV_ID_82GM965 0x2A04 /* Mobile PM965/GM965 */
+#define MEI_DEV_ID_82GME965 0x2A14 /* Mobile GME965/GLE960 */
+
+#define MEI_DEV_ID_ICH9_82Q35 0x29B4 /* 82Q35 Express */
+#define MEI_DEV_ID_ICH9_82G33 0x29C4 /* 82G33/G31/P35/P31 Express */
+#define MEI_DEV_ID_ICH9_82Q33 0x29D4 /* 82Q33 Express */
+#define MEI_DEV_ID_ICH9_82X38 0x29E4 /* 82X38/X48 Express */
+#define MEI_DEV_ID_ICH9_3200 0x29F4 /* 3200/3210 Server */
+
+#define MEI_DEV_ID_ICH9_6 0x28B4 /* Bearlake */
+#define MEI_DEV_ID_ICH9_7 0x28C4 /* Bearlake */
+#define MEI_DEV_ID_ICH9_8 0x28D4 /* Bearlake */
+#define MEI_DEV_ID_ICH9_9 0x28E4 /* Bearlake */
+#define MEI_DEV_ID_ICH9_10 0x28F4 /* Bearlake */
+
+#define MEI_DEV_ID_ICH9M_1 0x2A44 /* Cantiga */
+#define MEI_DEV_ID_ICH9M_2 0x2A54 /* Cantiga */
+#define MEI_DEV_ID_ICH9M_3 0x2A64 /* Cantiga */
+#define MEI_DEV_ID_ICH9M_4 0x2A74 /* Cantiga */
+
+#define MEI_DEV_ID_ICH10_1 0x2E04 /* Eaglelake */
+#define MEI_DEV_ID_ICH10_2 0x2E14 /* Eaglelake */
+#define MEI_DEV_ID_ICH10_3 0x2E24 /* Eaglelake */
+#define MEI_DEV_ID_ICH10_4 0x2E34 /* Eaglelake */
+
+#define MEI_DEV_ID_IBXPK_1 0x3B64 /* Calpella */
+#define MEI_DEV_ID_IBXPK_2 0x3B65 /* Calpella */
+
+#define MEI_DEV_ID_CPT_1 0x1C3A /* Couger Point */
+#define MEI_DEV_ID_PBG_1 0x1D3A /* C600/X79 Patsburg */
+
+#define MEI_DEV_ID_PPT_1 0x1E3A /* Panther Point */
+#define MEI_DEV_ID_PPT_2 0x1CBA /* Panther Point */
+#define MEI_DEV_ID_PPT_3 0x1DBA /* Panther Point */
+
+#define MEI_DEV_ID_LPT 0x8C3A /* Lynx Point */
+#define MEI_DEV_ID_LPT_LP 0x9C3A /* Lynx Point LP */
+/*
+ * MEI HW Section
+ */
+
+/* MEI registers */
+/* H_CB_WW - Host Circular Buffer (CB) Write Window register */
+#define H_CB_WW 0
+/* H_CSR - Host Control Status register */
+#define H_CSR 4
+/* ME_CB_RW - ME Circular Buffer Read Window register (read only) */
+#define ME_CB_RW 8
+/* ME_CSR_HA - ME Control Status Host Access register (read only) */
+#define ME_CSR_HA 0xC
+
+
+/* register bits of H_CSR (Host Control Status register) */
+/* Host Circular Buffer Depth - maximum number of 32-bit entries in CB */
+#define H_CBD 0xFF000000
+/* Host Circular Buffer Write Pointer */
+#define H_CBWP 0x00FF0000
+/* Host Circular Buffer Read Pointer */
+#define H_CBRP 0x0000FF00
+/* Host Reset */
+#define H_RST 0x00000010
+/* Host Ready */
+#define H_RDY 0x00000008
+/* Host Interrupt Generate */
+#define H_IG 0x00000004
+/* Host Interrupt Status */
+#define H_IS 0x00000002
+/* Host Interrupt Enable */
+#define H_IE 0x00000001
+
+
+/* register bits of ME_CSR_HA (ME Control Status Host Access register) */
+/* ME CB (Circular Buffer) Depth HRA (Host Read Access) - host read only
+access to ME_CBD */
+#define ME_CBD_HRA 0xFF000000
+/* ME CB Write Pointer HRA - host read only access to ME_CBWP */
+#define ME_CBWP_HRA 0x00FF0000
+/* ME CB Read Pointer HRA - host read only access to ME_CBRP */
+#define ME_CBRP_HRA 0x0000FF00
+/* ME Reset HRA - host read only access to ME_RST */
+#define ME_RST_HRA 0x00000010
+/* ME Ready HRA - host read only access to ME_RDY */
+#define ME_RDY_HRA 0x00000008
+/* ME Interrupt Generate HRA - host read only access to ME_IG */
+#define ME_IG_HRA 0x00000004
+/* ME Interrupt Status HRA - host read only access to ME_IS */
+#define ME_IS_HRA 0x00000002
+/* ME Interrupt Enable HRA - host read only access to ME_IE */
+#define ME_IE_HRA 0x00000001
+
+#endif /* _MEI_HW_MEI_REGS_H_ */
diff --git a/drivers/misc/mei/hw-me.c b/drivers/misc/mei/hw-me.c
new file mode 100644
index 000000000000..45ea7185c003
--- /dev/null
+++ b/drivers/misc/mei/hw-me.c
@@ -0,0 +1,576 @@
+/*
+ *
+ * Intel Management Engine Interface (Intel MEI) Linux driver
+ * Copyright (c) 2003-2012, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ */
+
+#include <linux/pci.h>
+
+#include <linux/kthread.h>
+#include <linux/interrupt.h>
+
+#include "mei_dev.h"
+#include "hw-me.h"
+
+#include "hbm.h"
+
+
+/**
+ * mei_reg_read - Reads 32bit data from the mei device
+ *
+ * @dev: the device structure
+ * @offset: offset from which to read the data
+ *
+ * returns register value (u32)
+ */
+static inline u32 mei_reg_read(const struct mei_me_hw *hw,
+ unsigned long offset)
+{
+ return ioread32(hw->mem_addr + offset);
+}
+
+
+/**
+ * mei_reg_write - Writes 32bit data to the mei device
+ *
+ * @dev: the device structure
+ * @offset: offset from which to write the data
+ * @value: register value to write (u32)
+ */
+static inline void mei_reg_write(const struct mei_me_hw *hw,
+ unsigned long offset, u32 value)
+{
+ iowrite32(value, hw->mem_addr + offset);
+}
+
+/**
+ * mei_mecbrw_read - Reads 32bit data from ME circular buffer
+ * read window register
+ *
+ * @dev: the device structure
+ *
+ * returns ME_CB_RW register value (u32)
+ */
+static u32 mei_me_mecbrw_read(const struct mei_device *dev)
+{
+ return mei_reg_read(to_me_hw(dev), ME_CB_RW);
+}
+/**
+ * mei_mecsr_read - Reads 32bit data from the ME CSR
+ *
+ * @dev: the device structure
+ *
+ * returns ME_CSR_HA register value (u32)
+ */
+static inline u32 mei_mecsr_read(const struct mei_me_hw *hw)
+{
+ return mei_reg_read(hw, ME_CSR_HA);
+}
+
+/**
+ * mei_hcsr_read - Reads 32bit data from the host CSR
+ *
+ * @dev: the device structure
+ *
+ * returns H_CSR register value (u32)
+ */
+static inline u32 mei_hcsr_read(const struct mei_me_hw *hw)
+{
+ return mei_reg_read(hw, H_CSR);
+}
+
+/**
+ * mei_hcsr_set - writes H_CSR register to the mei device,
+ * and ignores the H_IS bit for it is write-one-to-zero.
+ *
+ * @dev: the device structure
+ */
+static inline void mei_hcsr_set(struct mei_me_hw *hw, u32 hcsr)
+{
+ hcsr &= ~H_IS;
+ mei_reg_write(hw, H_CSR, hcsr);
+}
+
+
+/**
+ * me_hw_config - configure hw dependent settings
+ *
+ * @dev: mei device
+ */
+static void mei_me_hw_config(struct mei_device *dev)
+{
+ u32 hcsr = mei_hcsr_read(to_me_hw(dev));
+ /* Doesn't change in runtime */
+ dev->hbuf_depth = (hcsr & H_CBD) >> 24;
+}
+/**
+ * mei_clear_interrupts - clear and stop interrupts
+ *
+ * @dev: the device structure
+ */
+static void mei_me_intr_clear(struct mei_device *dev)
+{
+ struct mei_me_hw *hw = to_me_hw(dev);
+ u32 hcsr = mei_hcsr_read(hw);
+ if ((hcsr & H_IS) == H_IS)
+ mei_reg_write(hw, H_CSR, hcsr);
+}
+/**
+ * mei_me_intr_enable - enables mei device interrupts
+ *
+ * @dev: the device structure
+ */
+static void mei_me_intr_enable(struct mei_device *dev)
+{
+ struct mei_me_hw *hw = to_me_hw(dev);
+ u32 hcsr = mei_hcsr_read(hw);
+ hcsr |= H_IE;
+ mei_hcsr_set(hw, hcsr);
+}
+
+/**
+ * mei_disable_interrupts - disables mei device interrupts
+ *
+ * @dev: the device structure
+ */
+static void mei_me_intr_disable(struct mei_device *dev)
+{
+ struct mei_me_hw *hw = to_me_hw(dev);
+ u32 hcsr = mei_hcsr_read(hw);
+ hcsr &= ~H_IE;
+ mei_hcsr_set(hw, hcsr);
+}
+
+/**
+ * mei_me_hw_reset - resets fw via mei csr register.
+ *
+ * @dev: the device structure
+ * @interrupts_enabled: if interrupt should be enabled after reset.
+ */
+static void mei_me_hw_reset(struct mei_device *dev, bool intr_enable)
+{
+ struct mei_me_hw *hw = to_me_hw(dev);
+ u32 hcsr = mei_hcsr_read(hw);
+
+ dev_dbg(&dev->pdev->dev, "before reset HCSR = 0x%08x.\n", hcsr);
+
+ hcsr |= (H_RST | H_IG);
+
+ if (intr_enable)
+ hcsr |= H_IE;
+ else
+ hcsr &= ~H_IE;
+
+ mei_hcsr_set(hw, hcsr);
+
+ hcsr = mei_hcsr_read(hw) | H_IG;
+ hcsr &= ~H_RST;
+
+ mei_hcsr_set(hw, hcsr);
+
+ hcsr = mei_hcsr_read(hw);
+
+ dev_dbg(&dev->pdev->dev, "current HCSR = 0x%08x.\n", hcsr);
+}
+
+/**
+ * mei_me_host_set_ready - enable device
+ *
+ * @dev - mei device
+ * returns bool
+ */
+
+static void mei_me_host_set_ready(struct mei_device *dev)
+{
+ struct mei_me_hw *hw = to_me_hw(dev);
+ hw->host_hw_state |= H_IE | H_IG | H_RDY;
+ mei_hcsr_set(hw, hw->host_hw_state);
+}
+/**
+ * mei_me_host_is_ready - check whether the host has turned ready
+ *
+ * @dev - mei device
+ * returns bool
+ */
+static bool mei_me_host_is_ready(struct mei_device *dev)
+{
+ struct mei_me_hw *hw = to_me_hw(dev);
+ hw->host_hw_state = mei_hcsr_read(hw);
+ return (hw->host_hw_state & H_RDY) == H_RDY;
+}
+
+/**
+ * mei_me_hw_is_ready - check whether the me(hw) has turned ready
+ *
+ * @dev - mei device
+ * returns bool
+ */
+static bool mei_me_hw_is_ready(struct mei_device *dev)
+{
+ struct mei_me_hw *hw = to_me_hw(dev);
+ hw->me_hw_state = mei_mecsr_read(hw);
+ return (hw->me_hw_state & ME_RDY_HRA) == ME_RDY_HRA;
+}
+
+/**
+ * mei_hbuf_filled_slots - gets number of device filled buffer slots
+ *
+ * @dev: the device structure
+ *
+ * returns number of filled slots
+ */
+static unsigned char mei_hbuf_filled_slots(struct mei_device *dev)
+{
+ struct mei_me_hw *hw = to_me_hw(dev);
+ char read_ptr, write_ptr;
+
+ hw->host_hw_state = mei_hcsr_read(hw);
+
+ read_ptr = (char) ((hw->host_hw_state & H_CBRP) >> 8);
+ write_ptr = (char) ((hw->host_hw_state & H_CBWP) >> 16);
+
+ return (unsigned char) (write_ptr - read_ptr);
+}
+
+/**
+ * mei_hbuf_is_empty - checks if host buffer is empty.
+ *
+ * @dev: the device structure
+ *
+ * returns true if empty, false - otherwise.
+ */
+static bool mei_me_hbuf_is_empty(struct mei_device *dev)
+{
+ return mei_hbuf_filled_slots(dev) == 0;
+}
+
+/**
+ * mei_me_hbuf_empty_slots - counts write empty slots.
+ *
+ * @dev: the device structure
+ *
+ * returns -1(ESLOTS_OVERFLOW) if overflow, otherwise empty slots count
+ */
+static int mei_me_hbuf_empty_slots(struct mei_device *dev)
+{
+ unsigned char filled_slots, empty_slots;
+
+ filled_slots = mei_hbuf_filled_slots(dev);
+ empty_slots = dev->hbuf_depth - filled_slots;
+
+ /* check for overflow */
+ if (filled_slots > dev->hbuf_depth)
+ return -EOVERFLOW;
+
+ return empty_slots;
+}
+
+static size_t mei_me_hbuf_max_len(const struct mei_device *dev)
+{
+ return dev->hbuf_depth * sizeof(u32) - sizeof(struct mei_msg_hdr);
+}
+
+
+/**
+ * mei_write_message - writes a message to mei device.
+ *
+ * @dev: the device structure
+ * @header: mei HECI header of message
+ * @buf: message payload will be written
+ *
+ * This function returns -EIO if write has failed
+ */
+static int mei_me_write_message(struct mei_device *dev,
+ struct mei_msg_hdr *header,
+ unsigned char *buf)
+{
+ struct mei_me_hw *hw = to_me_hw(dev);
+ unsigned long rem, dw_cnt;
+ unsigned long length = header->length;
+ u32 *reg_buf = (u32 *)buf;
+ u32 hcsr;
+ int i;
+ int empty_slots;
+
+ dev_dbg(&dev->pdev->dev, MEI_HDR_FMT, MEI_HDR_PRM(header));
+
+ empty_slots = mei_hbuf_empty_slots(dev);
+ dev_dbg(&dev->pdev->dev, "empty slots = %hu.\n", empty_slots);
+
+ dw_cnt = mei_data2slots(length);
+ if (empty_slots < 0 || dw_cnt > empty_slots)
+ return -EIO;
+
+ mei_reg_write(hw, H_CB_WW, *((u32 *) header));
+
+ for (i = 0; i < length / 4; i++)
+ mei_reg_write(hw, H_CB_WW, reg_buf[i]);
+
+ rem = length & 0x3;
+ if (rem > 0) {
+ u32 reg = 0;
+ memcpy(&reg, &buf[length - rem], rem);
+ mei_reg_write(hw, H_CB_WW, reg);
+ }
+
+ hcsr = mei_hcsr_read(hw) | H_IG;
+ mei_hcsr_set(hw, hcsr);
+ if (!mei_me_hw_is_ready(dev))
+ return -EIO;
+
+ return 0;
+}
+
+/**
+ * mei_me_count_full_read_slots - counts read full slots.
+ *
+ * @dev: the device structure
+ *
+ * returns -1(ESLOTS_OVERFLOW) if overflow, otherwise filled slots count
+ */
+static int mei_me_count_full_read_slots(struct mei_device *dev)
+{
+ struct mei_me_hw *hw = to_me_hw(dev);
+ char read_ptr, write_ptr;
+ unsigned char buffer_depth, filled_slots;
+
+ hw->me_hw_state = mei_mecsr_read(hw);
+ buffer_depth = (unsigned char)((hw->me_hw_state & ME_CBD_HRA) >> 24);
+ read_ptr = (char) ((hw->me_hw_state & ME_CBRP_HRA) >> 8);
+ write_ptr = (char) ((hw->me_hw_state & ME_CBWP_HRA) >> 16);
+ filled_slots = (unsigned char) (write_ptr - read_ptr);
+
+ /* check for overflow */
+ if (filled_slots > buffer_depth)
+ return -EOVERFLOW;
+
+ dev_dbg(&dev->pdev->dev, "filled_slots =%08x\n", filled_slots);
+ return (int)filled_slots;
+}
+
+/**
+ * mei_me_read_slots - reads a message from mei device.
+ *
+ * @dev: the device structure
+ * @buffer: message buffer will be written
+ * @buffer_length: message size will be read
+ */
+static int mei_me_read_slots(struct mei_device *dev, unsigned char *buffer,
+ unsigned long buffer_length)
+{
+ struct mei_me_hw *hw = to_me_hw(dev);
+ u32 *reg_buf = (u32 *)buffer;
+ u32 hcsr;
+
+ for (; buffer_length >= sizeof(u32); buffer_length -= sizeof(u32))
+ *reg_buf++ = mei_me_mecbrw_read(dev);
+
+ if (buffer_length > 0) {
+ u32 reg = mei_me_mecbrw_read(dev);
+ memcpy(reg_buf, &reg, buffer_length);
+ }
+
+ hcsr = mei_hcsr_read(hw) | H_IG;
+ mei_hcsr_set(hw, hcsr);
+ return 0;
+}
+
+/**
+ * mei_me_irq_quick_handler - The ISR of the MEI device
+ *
+ * @irq: The irq number
+ * @dev_id: pointer to the device structure
+ *
+ * returns irqreturn_t
+ */
+
+irqreturn_t mei_me_irq_quick_handler(int irq, void *dev_id)
+{
+ struct mei_device *dev = (struct mei_device *) dev_id;
+ struct mei_me_hw *hw = to_me_hw(dev);
+ u32 csr_reg = mei_hcsr_read(hw);
+
+ if ((csr_reg & H_IS) != H_IS)
+ return IRQ_NONE;
+
+ /* clear H_IS bit in H_CSR */
+ mei_reg_write(hw, H_CSR, csr_reg);
+
+ return IRQ_WAKE_THREAD;
+}
+
+/**
+ * mei_me_irq_thread_handler - function called after ISR to handle the interrupt
+ * processing.
+ *
+ * @irq: The irq number
+ * @dev_id: pointer to the device structure
+ *
+ * returns irqreturn_t
+ *
+ */
+irqreturn_t mei_me_irq_thread_handler(int irq, void *dev_id)
+{
+ struct mei_device *dev = (struct mei_device *) dev_id;
+ struct mei_cl_cb complete_list;
+ struct mei_cl_cb *cb_pos = NULL, *cb_next = NULL;
+ struct mei_cl *cl;
+ s32 slots;
+ int rets;
+ bool bus_message_received;
+
+
+ dev_dbg(&dev->pdev->dev, "function called after ISR to handle the interrupt processing.\n");
+ /* initialize our complete list */
+ mutex_lock(&dev->device_lock);
+ mei_io_list_init(&complete_list);
+
+ /* Ack the interrupt here
+ * In case of MSI we don't go through the quick handler */
+ if (pci_dev_msi_enabled(dev->pdev))
+ mei_clear_interrupts(dev);
+
+ /* check if ME wants a reset */
+ if (!mei_hw_is_ready(dev) &&
+ dev->dev_state != MEI_DEV_RESETING &&
+ dev->dev_state != MEI_DEV_INITIALIZING) {
+ dev_dbg(&dev->pdev->dev, "FW not ready.\n");
+ mei_reset(dev, 1);
+ mutex_unlock(&dev->device_lock);
+ return IRQ_HANDLED;
+ }
+
+ /* check if we need to start the dev */
+ if (!mei_host_is_ready(dev)) {
+ if (mei_hw_is_ready(dev)) {
+ dev_dbg(&dev->pdev->dev, "we need to start the dev.\n");
+
+ mei_host_set_ready(dev);
+
+ dev_dbg(&dev->pdev->dev, "link is established start sending messages.\n");
+ /* link is established * start sending messages. */
+
+ dev->dev_state = MEI_DEV_INIT_CLIENTS;
+
+ mei_hbm_start_req(dev);
+ mutex_unlock(&dev->device_lock);
+ return IRQ_HANDLED;
+ } else {
+ dev_dbg(&dev->pdev->dev, "FW not ready.\n");
+ mutex_unlock(&dev->device_lock);
+ return IRQ_HANDLED;
+ }
+ }
+ /* check slots available for reading */
+ slots = mei_count_full_read_slots(dev);
+ while (slots > 0) {
+ /* we have urgent data to send so break the read */
+ if (dev->wr_ext_msg.hdr.length)
+ break;
+ dev_dbg(&dev->pdev->dev, "slots =%08x\n", slots);
+ dev_dbg(&dev->pdev->dev, "call mei_irq_read_handler.\n");
+ rets = mei_irq_read_handler(dev, &complete_list, &slots);
+ if (rets)
+ goto end;
+ }
+ rets = mei_irq_write_handler(dev, &complete_list);
+end:
+ dev_dbg(&dev->pdev->dev, "end of bottom half function.\n");
+ dev->hbuf_is_ready = mei_hbuf_is_ready(dev);
+
+ bus_message_received = false;
+ if (dev->recvd_msg && waitqueue_active(&dev->wait_recvd_msg)) {
+ dev_dbg(&dev->pdev->dev, "received waiting bus message\n");
+ bus_message_received = true;
+ }
+ mutex_unlock(&dev->device_lock);
+ if (bus_message_received) {
+ dev_dbg(&dev->pdev->dev, "wake up dev->wait_recvd_msg\n");
+ wake_up_interruptible(&dev->wait_recvd_msg);
+ bus_message_received = false;
+ }
+ if (list_empty(&complete_list.list))
+ return IRQ_HANDLED;
+
+
+ list_for_each_entry_safe(cb_pos, cb_next, &complete_list.list, list) {
+ cl = cb_pos->cl;
+ list_del(&cb_pos->list);
+ if (cl) {
+ if (cl != &dev->iamthif_cl) {
+ dev_dbg(&dev->pdev->dev, "completing call back.\n");
+ mei_irq_complete_handler(cl, cb_pos);
+ cb_pos = NULL;
+ } else if (cl == &dev->iamthif_cl) {
+ mei_amthif_complete(dev, cb_pos);
+ }
+ }
+ }
+ return IRQ_HANDLED;
+}
+static const struct mei_hw_ops mei_me_hw_ops = {
+
+ .host_set_ready = mei_me_host_set_ready,
+ .host_is_ready = mei_me_host_is_ready,
+
+ .hw_is_ready = mei_me_hw_is_ready,
+ .hw_reset = mei_me_hw_reset,
+ .hw_config = mei_me_hw_config,
+
+ .intr_clear = mei_me_intr_clear,
+ .intr_enable = mei_me_intr_enable,
+ .intr_disable = mei_me_intr_disable,
+
+ .hbuf_free_slots = mei_me_hbuf_empty_slots,
+ .hbuf_is_ready = mei_me_hbuf_is_empty,
+ .hbuf_max_len = mei_me_hbuf_max_len,
+
+ .write = mei_me_write_message,
+
+ .rdbuf_full_slots = mei_me_count_full_read_slots,
+ .read_hdr = mei_me_mecbrw_read,
+ .read = mei_me_read_slots
+};
+
+/**
+ * init_mei_device - allocates and initializes the mei device structure
+ *
+ * @pdev: The pci device structure
+ *
+ * returns The mei_device_device pointer on success, NULL on failure.
+ */
+struct mei_device *mei_me_dev_init(struct pci_dev *pdev)
+{
+ struct mei_device *dev;
+
+ dev = kzalloc(sizeof(struct mei_device) +
+ sizeof(struct mei_me_hw), GFP_KERNEL);
+ if (!dev)
+ return NULL;
+
+ mei_device_init(dev);
+
+ INIT_LIST_HEAD(&dev->wd_cl.link);
+ INIT_LIST_HEAD(&dev->iamthif_cl.link);
+ mei_io_list_init(&dev->amthif_cmd_list);
+ mei_io_list_init(&dev->amthif_rd_complete_list);
+
+ INIT_DELAYED_WORK(&dev->timer_work, mei_timer);
+ INIT_WORK(&dev->init_work, mei_host_client_init);
+
+ dev->ops = &mei_me_hw_ops;
+
+ dev->pdev = pdev;
+ return dev;
+}
+
diff --git a/drivers/misc/mei/hw-me.h b/drivers/misc/mei/hw-me.h
new file mode 100644
index 000000000000..8518d3eeb838
--- /dev/null
+++ b/drivers/misc/mei/hw-me.h
@@ -0,0 +1,48 @@
+/*
+ *
+ * Intel Management Engine Interface (Intel MEI) Linux driver
+ * Copyright (c) 2003-2012, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ */
+
+
+
+#ifndef _MEI_INTERFACE_H_
+#define _MEI_INTERFACE_H_
+
+#include <linux/mei.h>
+#include "mei_dev.h"
+#include "client.h"
+
+struct mei_me_hw {
+ void __iomem *mem_addr;
+ /*
+ * hw states of host and fw(ME)
+ */
+ u32 host_hw_state;
+ u32 me_hw_state;
+};
+
+#define to_me_hw(dev) (struct mei_me_hw *)((dev)->hw)
+
+struct mei_device *mei_me_dev_init(struct pci_dev *pdev);
+
+/* get slots (dwords) from a message length + header (bytes) */
+static inline unsigned char mei_data2slots(size_t length)
+{
+ return DIV_ROUND_UP(sizeof(struct mei_msg_hdr) + length, 4);
+}
+
+irqreturn_t mei_me_irq_quick_handler(int irq, void *dev_id);
+irqreturn_t mei_me_irq_thread_handler(int irq, void *dev_id);
+
+#endif /* _MEI_INTERFACE_H_ */
diff --git a/drivers/misc/mei/hw.h b/drivers/misc/mei/hw.h
index be8ca6b333ca..cb2f556b4252 100644
--- a/drivers/misc/mei/hw.h
+++ b/drivers/misc/mei/hw.h
@@ -31,109 +31,6 @@
#define MEI_IAMTHIF_STALL_TIMER 12 /* HPS */
#define MEI_IAMTHIF_READ_TIMER 10 /* HPS */
-/*
- * Internal Clients Number
- */
-#define MEI_WD_HOST_CLIENT_ID 1
-#define MEI_IAMTHIF_HOST_CLIENT_ID 2
-
-/*
- * MEI device IDs
- */
-#define MEI_DEV_ID_82946GZ 0x2974 /* 82946GZ/GL */
-#define MEI_DEV_ID_82G35 0x2984 /* 82G35 Express */
-#define MEI_DEV_ID_82Q965 0x2994 /* 82Q963/Q965 */
-#define MEI_DEV_ID_82G965 0x29A4 /* 82P965/G965 */
-
-#define MEI_DEV_ID_82GM965 0x2A04 /* Mobile PM965/GM965 */
-#define MEI_DEV_ID_82GME965 0x2A14 /* Mobile GME965/GLE960 */
-
-#define MEI_DEV_ID_ICH9_82Q35 0x29B4 /* 82Q35 Express */
-#define MEI_DEV_ID_ICH9_82G33 0x29C4 /* 82G33/G31/P35/P31 Express */
-#define MEI_DEV_ID_ICH9_82Q33 0x29D4 /* 82Q33 Express */
-#define MEI_DEV_ID_ICH9_82X38 0x29E4 /* 82X38/X48 Express */
-#define MEI_DEV_ID_ICH9_3200 0x29F4 /* 3200/3210 Server */
-
-#define MEI_DEV_ID_ICH9_6 0x28B4 /* Bearlake */
-#define MEI_DEV_ID_ICH9_7 0x28C4 /* Bearlake */
-#define MEI_DEV_ID_ICH9_8 0x28D4 /* Bearlake */
-#define MEI_DEV_ID_ICH9_9 0x28E4 /* Bearlake */
-#define MEI_DEV_ID_ICH9_10 0x28F4 /* Bearlake */
-
-#define MEI_DEV_ID_ICH9M_1 0x2A44 /* Cantiga */
-#define MEI_DEV_ID_ICH9M_2 0x2A54 /* Cantiga */
-#define MEI_DEV_ID_ICH9M_3 0x2A64 /* Cantiga */
-#define MEI_DEV_ID_ICH9M_4 0x2A74 /* Cantiga */
-
-#define MEI_DEV_ID_ICH10_1 0x2E04 /* Eaglelake */
-#define MEI_DEV_ID_ICH10_2 0x2E14 /* Eaglelake */
-#define MEI_DEV_ID_ICH10_3 0x2E24 /* Eaglelake */
-#define MEI_DEV_ID_ICH10_4 0x2E34 /* Eaglelake */
-
-#define MEI_DEV_ID_IBXPK_1 0x3B64 /* Calpella */
-#define MEI_DEV_ID_IBXPK_2 0x3B65 /* Calpella */
-
-#define MEI_DEV_ID_CPT_1 0x1C3A /* Couger Point */
-#define MEI_DEV_ID_PBG_1 0x1D3A /* C600/X79 Patsburg */
-
-#define MEI_DEV_ID_PPT_1 0x1E3A /* Panther Point */
-#define MEI_DEV_ID_PPT_2 0x1CBA /* Panther Point */
-#define MEI_DEV_ID_PPT_3 0x1DBA /* Panther Point */
-
-#define MEI_DEV_ID_LPT 0x8C3A /* Lynx Point */
-#define MEI_DEV_ID_LPT_LP 0x9C3A /* Lynx Point LP */
-/*
- * MEI HW Section
- */
-
-/* MEI registers */
-/* H_CB_WW - Host Circular Buffer (CB) Write Window register */
-#define H_CB_WW 0
-/* H_CSR - Host Control Status register */
-#define H_CSR 4
-/* ME_CB_RW - ME Circular Buffer Read Window register (read only) */
-#define ME_CB_RW 8
-/* ME_CSR_HA - ME Control Status Host Access register (read only) */
-#define ME_CSR_HA 0xC
-
-
-/* register bits of H_CSR (Host Control Status register) */
-/* Host Circular Buffer Depth - maximum number of 32-bit entries in CB */
-#define H_CBD 0xFF000000
-/* Host Circular Buffer Write Pointer */
-#define H_CBWP 0x00FF0000
-/* Host Circular Buffer Read Pointer */
-#define H_CBRP 0x0000FF00
-/* Host Reset */
-#define H_RST 0x00000010
-/* Host Ready */
-#define H_RDY 0x00000008
-/* Host Interrupt Generate */
-#define H_IG 0x00000004
-/* Host Interrupt Status */
-#define H_IS 0x00000002
-/* Host Interrupt Enable */
-#define H_IE 0x00000001
-
-
-/* register bits of ME_CSR_HA (ME Control Status Host Access register) */
-/* ME CB (Circular Buffer) Depth HRA (Host Read Access) - host read only
-access to ME_CBD */
-#define ME_CBD_HRA 0xFF000000
-/* ME CB Write Pointer HRA - host read only access to ME_CBWP */
-#define ME_CBWP_HRA 0x00FF0000
-/* ME CB Read Pointer HRA - host read only access to ME_CBRP */
-#define ME_CBRP_HRA 0x0000FF00
-/* ME Reset HRA - host read only access to ME_RST */
-#define ME_RST_HRA 0x00000010
-/* ME Ready HRA - host read only access to ME_RDY */
-#define ME_RDY_HRA 0x00000008
-/* ME Interrupt Generate HRA - host read only access to ME_IG */
-#define ME_IG_HRA 0x00000004
-/* ME Interrupt Status HRA - host read only access to ME_IS */
-#define ME_IS_HRA 0x00000002
-/* ME Interrupt Enable HRA - host read only access to ME_IE */
-#define ME_IE_HRA 0x00000001
/*
* MEI Version
@@ -224,6 +121,22 @@ struct mei_bus_message {
u8 data[0];
} __packed;
+/**
+ * struct hbm_cl_cmd - client specific host bus command
+ * CONNECT, DISCONNECT, and FlOW CONTROL
+ *
+ * @hbm_cmd - bus message command header
+ * @me_addr - address of the client in ME
+ * @host_addr - address of the client in the driver
+ * @data
+ */
+struct mei_hbm_cl_cmd {
+ u8 hbm_cmd;
+ u8 me_addr;
+ u8 host_addr;
+ u8 data;
+};
+
struct hbm_version {
u8 minor_version;
u8 major_version;
@@ -333,11 +246,5 @@ struct hbm_flow_control {
u8 reserved[MEI_FC_MESSAGE_RESERVED_LENGTH];
} __packed;
-struct mei_me_client {
- struct mei_client_properties props;
- u8 client_id;
- u8 mei_flow_ctrl_creds;
-} __packed;
-
#endif
diff --git a/drivers/misc/mei/init.c b/drivers/misc/mei/init.c
index a54cd5567ca2..6ec530168afb 100644
--- a/drivers/misc/mei/init.c
+++ b/drivers/misc/mei/init.c
@@ -19,11 +19,11 @@
#include <linux/wait.h>
#include <linux/delay.h>
-#include "mei_dev.h"
-#include "hw.h"
-#include "interface.h"
#include <linux/mei.h>
+#include "mei_dev.h"
+#include "client.h"
+
const char *mei_dev_state_str(int state)
{
#define MEI_DEV_STATE(state) case MEI_DEV_##state: return #state
@@ -42,84 +42,20 @@ const char *mei_dev_state_str(int state)
#undef MEI_DEV_STATE
}
-
-
-/**
- * mei_io_list_flush - removes list entry belonging to cl.
- *
- * @list: An instance of our list structure
- * @cl: private data of the file object
- */
-void mei_io_list_flush(struct mei_cl_cb *list, struct mei_cl *cl)
-{
- struct mei_cl_cb *pos;
- struct mei_cl_cb *next;
-
- list_for_each_entry_safe(pos, next, &list->list, list) {
- if (pos->cl) {
- if (mei_cl_cmp_id(cl, pos->cl))
- list_del(&pos->list);
- }
- }
-}
-/**
- * mei_cl_flush_queues - flushes queue lists belonging to cl.
- *
- * @dev: the device structure
- * @cl: private data of the file object
- */
-int mei_cl_flush_queues(struct mei_cl *cl)
+void mei_device_init(struct mei_device *dev)
{
- if (!cl || !cl->dev)
- return -EINVAL;
-
- dev_dbg(&cl->dev->pdev->dev, "remove list entry belonging to cl\n");
- mei_io_list_flush(&cl->dev->read_list, cl);
- mei_io_list_flush(&cl->dev->write_list, cl);
- mei_io_list_flush(&cl->dev->write_waiting_list, cl);
- mei_io_list_flush(&cl->dev->ctrl_wr_list, cl);
- mei_io_list_flush(&cl->dev->ctrl_rd_list, cl);
- mei_io_list_flush(&cl->dev->amthif_cmd_list, cl);
- mei_io_list_flush(&cl->dev->amthif_rd_complete_list, cl);
- return 0;
-}
-
-
-
-/**
- * init_mei_device - allocates and initializes the mei device structure
- *
- * @pdev: The pci device structure
- *
- * returns The mei_device_device pointer on success, NULL on failure.
- */
-struct mei_device *mei_device_init(struct pci_dev *pdev)
-{
- struct mei_device *dev;
-
- dev = kzalloc(sizeof(struct mei_device), GFP_KERNEL);
- if (!dev)
- return NULL;
-
/* setup our list array */
INIT_LIST_HEAD(&dev->file_list);
- INIT_LIST_HEAD(&dev->wd_cl.link);
- INIT_LIST_HEAD(&dev->iamthif_cl.link);
mutex_init(&dev->device_lock);
init_waitqueue_head(&dev->wait_recvd_msg);
init_waitqueue_head(&dev->wait_stop_wd);
dev->dev_state = MEI_DEV_INITIALIZING;
- dev->iamthif_state = MEI_IAMTHIF_IDLE;
mei_io_list_init(&dev->read_list);
mei_io_list_init(&dev->write_list);
mei_io_list_init(&dev->write_waiting_list);
mei_io_list_init(&dev->ctrl_wr_list);
mei_io_list_init(&dev->ctrl_rd_list);
- mei_io_list_init(&dev->amthif_cmd_list);
- mei_io_list_init(&dev->amthif_rd_complete_list);
- dev->pdev = pdev;
- return dev;
}
/**
@@ -131,101 +67,64 @@ struct mei_device *mei_device_init(struct pci_dev *pdev)
*/
int mei_hw_init(struct mei_device *dev)
{
- int err = 0;
- int ret;
+ int ret = 0;
mutex_lock(&dev->device_lock);
- dev->host_hw_state = mei_hcsr_read(dev);
- dev->me_hw_state = mei_mecsr_read(dev);
- dev_dbg(&dev->pdev->dev, "host_hw_state = 0x%08x, mestate = 0x%08x.\n",
- dev->host_hw_state, dev->me_hw_state);
-
/* acknowledge interrupt and stop interupts */
- if ((dev->host_hw_state & H_IS) == H_IS)
- mei_reg_write(dev, H_CSR, dev->host_hw_state);
+ mei_clear_interrupts(dev);
- /* Doesn't change in runtime */
- dev->hbuf_depth = (dev->host_hw_state & H_CBD) >> 24;
+ mei_hw_config(dev);
dev->recvd_msg = false;
dev_dbg(&dev->pdev->dev, "reset in start the mei device.\n");
mei_reset(dev, 1);
- dev_dbg(&dev->pdev->dev, "host_hw_state = 0x%08x, me_hw_state = 0x%08x.\n",
- dev->host_hw_state, dev->me_hw_state);
-
/* wait for ME to turn on ME_RDY */
if (!dev->recvd_msg) {
mutex_unlock(&dev->device_lock);
- err = wait_event_interruptible_timeout(dev->wait_recvd_msg,
+ ret = wait_event_interruptible_timeout(dev->wait_recvd_msg,
dev->recvd_msg,
mei_secs_to_jiffies(MEI_INTEROP_TIMEOUT));
mutex_lock(&dev->device_lock);
}
- if (err <= 0 && !dev->recvd_msg) {
+ if (ret <= 0 && !dev->recvd_msg) {
dev->dev_state = MEI_DEV_DISABLED;
dev_dbg(&dev->pdev->dev,
"wait_event_interruptible_timeout failed"
"on wait for ME to turn on ME_RDY.\n");
- ret = -ENODEV;
- goto out;
+ goto err;
}
- if (!(((dev->host_hw_state & H_RDY) == H_RDY) &&
- ((dev->me_hw_state & ME_RDY_HRA) == ME_RDY_HRA))) {
- dev->dev_state = MEI_DEV_DISABLED;
- dev_dbg(&dev->pdev->dev,
- "host_hw_state = 0x%08x, me_hw_state = 0x%08x.\n",
- dev->host_hw_state, dev->me_hw_state);
-
- if (!(dev->host_hw_state & H_RDY))
- dev_dbg(&dev->pdev->dev, "host turn off H_RDY.\n");
- if (!(dev->me_hw_state & ME_RDY_HRA))
- dev_dbg(&dev->pdev->dev, "ME turn off ME_RDY.\n");
+ if (!mei_host_is_ready(dev)) {
+ dev_err(&dev->pdev->dev, "host is not ready.\n");
+ goto err;
+ }
- dev_err(&dev->pdev->dev, "link layer initialization failed.\n");
- ret = -ENODEV;
- goto out;
+ if (!mei_hw_is_ready(dev)) {
+ dev_err(&dev->pdev->dev, "ME is not ready.\n");
+ goto err;
}
if (dev->version.major_version != HBM_MAJOR_VERSION ||
dev->version.minor_version != HBM_MINOR_VERSION) {
dev_dbg(&dev->pdev->dev, "MEI start failed.\n");
- ret = -ENODEV;
- goto out;
+ goto err;
}
dev->recvd_msg = false;
- dev_dbg(&dev->pdev->dev, "host_hw_state = 0x%08x, me_hw_state = 0x%08x.\n",
- dev->host_hw_state, dev->me_hw_state);
- dev_dbg(&dev->pdev->dev, "ME turn on ME_RDY and host turn on H_RDY.\n");
dev_dbg(&dev->pdev->dev, "link layer has been established.\n");
- dev_dbg(&dev->pdev->dev, "MEI start success.\n");
- ret = 0;
-out:
mutex_unlock(&dev->device_lock);
- return ret;
-}
-
-/**
- * mei_hw_reset - resets fw via mei csr register.
- *
- * @dev: the device structure
- * @interrupts_enabled: if interrupt should be enabled after reset.
- */
-static void mei_hw_reset(struct mei_device *dev, int interrupts_enabled)
-{
- dev->host_hw_state |= (H_RST | H_IG);
-
- if (interrupts_enabled)
- mei_enable_interrupts(dev);
- else
- mei_disable_interrupts(dev);
+ return 0;
+err:
+ dev_err(&dev->pdev->dev, "link layer initialization failed.\n");
+ dev->dev_state = MEI_DEV_DISABLED;
+ mutex_unlock(&dev->device_lock);
+ return -ENODEV;
}
/**
@@ -236,56 +135,34 @@ static void mei_hw_reset(struct mei_device *dev, int interrupts_enabled)
*/
void mei_reset(struct mei_device *dev, int interrupts_enabled)
{
- struct mei_cl *cl_pos = NULL;
- struct mei_cl *cl_next = NULL;
- struct mei_cl_cb *cb_pos = NULL;
- struct mei_cl_cb *cb_next = NULL;
bool unexpected;
- if (dev->dev_state == MEI_DEV_RECOVERING_FROM_RESET) {
- dev->need_reset = true;
+ if (dev->dev_state == MEI_DEV_RECOVERING_FROM_RESET)
return;
- }
unexpected = (dev->dev_state != MEI_DEV_INITIALIZING &&
dev->dev_state != MEI_DEV_DISABLED &&
dev->dev_state != MEI_DEV_POWER_DOWN &&
dev->dev_state != MEI_DEV_POWER_UP);
- dev->host_hw_state = mei_hcsr_read(dev);
-
- dev_dbg(&dev->pdev->dev, "before reset host_hw_state = 0x%08x.\n",
- dev->host_hw_state);
-
mei_hw_reset(dev, interrupts_enabled);
- dev->host_hw_state &= ~H_RST;
- dev->host_hw_state |= H_IG;
-
- mei_hcsr_set(dev);
-
- dev_dbg(&dev->pdev->dev, "currently saved host_hw_state = 0x%08x.\n",
- dev->host_hw_state);
-
- dev->need_reset = false;
if (dev->dev_state != MEI_DEV_INITIALIZING) {
if (dev->dev_state != MEI_DEV_DISABLED &&
dev->dev_state != MEI_DEV_POWER_DOWN)
dev->dev_state = MEI_DEV_RESETING;
- list_for_each_entry_safe(cl_pos,
- cl_next, &dev->file_list, link) {
- cl_pos->state = MEI_FILE_DISCONNECTED;
- cl_pos->mei_flow_ctrl_creds = 0;
- cl_pos->read_cb = NULL;
- cl_pos->timer_count = 0;
- }
+ mei_cl_all_disconnect(dev);
+
/* remove entry if already in list */
dev_dbg(&dev->pdev->dev, "remove iamthif and wd from the file list.\n");
- mei_me_cl_unlink(dev, &dev->wd_cl);
-
- mei_me_cl_unlink(dev, &dev->iamthif_cl);
+ mei_cl_unlink(&dev->wd_cl);
+ if (dev->open_handle_count > 0)
+ dev->open_handle_count--;
+ mei_cl_unlink(&dev->iamthif_cl);
+ if (dev->open_handle_count > 0)
+ dev->open_handle_count--;
mei_amthif_reset_params(dev);
memset(&dev->wr_ext_msg, 0, sizeof(dev->wr_ext_msg));
@@ -295,392 +172,17 @@ void mei_reset(struct mei_device *dev, int interrupts_enabled)
dev->rd_msg_hdr = 0;
dev->wd_pending = false;
- /* update the state of the registers after reset */
- dev->host_hw_state = mei_hcsr_read(dev);
- dev->me_hw_state = mei_mecsr_read(dev);
-
- dev_dbg(&dev->pdev->dev, "after reset host_hw_state = 0x%08x, me_hw_state = 0x%08x.\n",
- dev->host_hw_state, dev->me_hw_state);
-
if (unexpected)
dev_warn(&dev->pdev->dev, "unexpected reset: dev_state = %s\n",
mei_dev_state_str(dev->dev_state));
- /* Wake up all readings so they can be interrupted */
- list_for_each_entry_safe(cl_pos, cl_next, &dev->file_list, link) {
- if (waitqueue_active(&cl_pos->rx_wait)) {
- dev_dbg(&dev->pdev->dev, "Waking up client!\n");
- wake_up_interruptible(&cl_pos->rx_wait);
- }
- }
- /* remove all waiting requests */
- list_for_each_entry_safe(cb_pos, cb_next, &dev->write_list.list, list) {
- list_del(&cb_pos->list);
- mei_io_cb_free(cb_pos);
- }
-}
-
-
-
-/**
- * host_start_message - mei host sends start message.
- *
- * @dev: the device structure
- *
- * returns none.
- */
-void mei_host_start_message(struct mei_device *dev)
-{
- struct mei_msg_hdr *mei_hdr;
- struct hbm_host_version_request *start_req;
- const size_t len = sizeof(struct hbm_host_version_request);
-
- mei_hdr = mei_hbm_hdr(&dev->wr_msg_buf[0], len);
-
- /* host start message */
- start_req = (struct hbm_host_version_request *)&dev->wr_msg_buf[1];
- memset(start_req, 0, len);
- start_req->hbm_cmd = HOST_START_REQ_CMD;
- start_req->host_version.major_version = HBM_MAJOR_VERSION;
- start_req->host_version.minor_version = HBM_MINOR_VERSION;
-
- dev->recvd_msg = false;
- if (mei_write_message(dev, mei_hdr, (unsigned char *)start_req, len)) {
- dev_dbg(&dev->pdev->dev, "write send version message to FW fail.\n");
- dev->dev_state = MEI_DEV_RESETING;
- mei_reset(dev, 1);
- }
- dev->init_clients_state = MEI_START_MESSAGE;
- dev->init_clients_timer = MEI_CLIENTS_INIT_TIMEOUT;
- return ;
-}
-
-/**
- * host_enum_clients_message - host sends enumeration client request message.
- *
- * @dev: the device structure
- *
- * returns none.
- */
-void mei_host_enum_clients_message(struct mei_device *dev)
-{
- struct mei_msg_hdr *mei_hdr;
- struct hbm_host_enum_request *enum_req;
- const size_t len = sizeof(struct hbm_host_enum_request);
- /* enumerate clients */
- mei_hdr = mei_hbm_hdr(&dev->wr_msg_buf[0], len);
-
- enum_req = (struct hbm_host_enum_request *) &dev->wr_msg_buf[1];
- memset(enum_req, 0, sizeof(struct hbm_host_enum_request));
- enum_req->hbm_cmd = HOST_ENUM_REQ_CMD;
-
- if (mei_write_message(dev, mei_hdr, (unsigned char *)enum_req, len)) {
- dev->dev_state = MEI_DEV_RESETING;
- dev_dbg(&dev->pdev->dev, "write send enumeration request message to FW fail.\n");
- mei_reset(dev, 1);
- }
- dev->init_clients_state = MEI_ENUM_CLIENTS_MESSAGE;
- dev->init_clients_timer = MEI_CLIENTS_INIT_TIMEOUT;
- return;
-}
-
-
-/**
- * allocate_me_clients_storage - allocates storage for me clients
- *
- * @dev: the device structure
- *
- * returns none.
- */
-void mei_allocate_me_clients_storage(struct mei_device *dev)
-{
- struct mei_me_client *clients;
- int b;
-
- /* count how many ME clients we have */
- for_each_set_bit(b, dev->me_clients_map, MEI_CLIENTS_MAX)
- dev->me_clients_num++;
-
- if (dev->me_clients_num <= 0)
- return ;
-
-
- if (dev->me_clients != NULL) {
- kfree(dev->me_clients);
- dev->me_clients = NULL;
- }
- dev_dbg(&dev->pdev->dev, "memory allocation for ME clients size=%zd.\n",
- dev->me_clients_num * sizeof(struct mei_me_client));
- /* allocate storage for ME clients representation */
- clients = kcalloc(dev->me_clients_num,
- sizeof(struct mei_me_client), GFP_KERNEL);
- if (!clients) {
- dev_dbg(&dev->pdev->dev, "memory allocation for ME clients failed.\n");
- dev->dev_state = MEI_DEV_RESETING;
- mei_reset(dev, 1);
- return ;
- }
- dev->me_clients = clients;
- return ;
-}
-
-void mei_host_client_init(struct work_struct *work)
-{
- struct mei_device *dev = container_of(work,
- struct mei_device, init_work);
- struct mei_client_properties *client_props;
- int i;
-
- mutex_lock(&dev->device_lock);
-
- bitmap_zero(dev->host_clients_map, MEI_CLIENTS_MAX);
- dev->open_handle_count = 0;
-
- /*
- * Reserving the first three client IDs
- * 0: Reserved for MEI Bus Message communications
- * 1: Reserved for Watchdog
- * 2: Reserved for AMTHI
- */
- bitmap_set(dev->host_clients_map, 0, 3);
-
- for (i = 0; i < dev->me_clients_num; i++) {
- client_props = &dev->me_clients[i].props;
-
- if (!uuid_le_cmp(client_props->protocol_name, mei_amthi_guid))
- mei_amthif_host_init(dev);
- else if (!uuid_le_cmp(client_props->protocol_name, mei_wd_guid))
- mei_wd_host_init(dev);
- }
-
- dev->dev_state = MEI_DEV_ENABLED;
-
- mutex_unlock(&dev->device_lock);
-}
-
-int mei_host_client_enumerate(struct mei_device *dev)
-{
-
- struct mei_msg_hdr *mei_hdr;
- struct hbm_props_request *prop_req;
- const size_t len = sizeof(struct hbm_props_request);
- unsigned long next_client_index;
- u8 client_num;
-
-
- client_num = dev->me_client_presentation_num;
-
- next_client_index = find_next_bit(dev->me_clients_map, MEI_CLIENTS_MAX,
- dev->me_client_index);
-
- /* We got all client properties */
- if (next_client_index == MEI_CLIENTS_MAX) {
- schedule_work(&dev->init_work);
-
- return 0;
- }
-
- dev->me_clients[client_num].client_id = next_client_index;
- dev->me_clients[client_num].mei_flow_ctrl_creds = 0;
-
- mei_hdr = mei_hbm_hdr(&dev->wr_msg_buf[0], len);
- prop_req = (struct hbm_props_request *)&dev->wr_msg_buf[1];
-
- memset(prop_req, 0, sizeof(struct hbm_props_request));
-
-
- prop_req->hbm_cmd = HOST_CLIENT_PROPERTIES_REQ_CMD;
- prop_req->address = next_client_index;
-
- if (mei_write_message(dev, mei_hdr, (unsigned char *) prop_req,
- mei_hdr->length)) {
- dev->dev_state = MEI_DEV_RESETING;
- dev_err(&dev->pdev->dev, "Properties request command failed\n");
- mei_reset(dev, 1);
-
- return -EIO;
- }
-
- dev->init_clients_timer = MEI_CLIENTS_INIT_TIMEOUT;
- dev->me_client_index = next_client_index;
-
- return 0;
-}
-
-/**
- * mei_init_file_private - initializes private file structure.
- *
- * @priv: private file structure to be initialized
- * @file: the file structure
- */
-void mei_cl_init(struct mei_cl *priv, struct mei_device *dev)
-{
- memset(priv, 0, sizeof(struct mei_cl));
- init_waitqueue_head(&priv->wait);
- init_waitqueue_head(&priv->rx_wait);
- init_waitqueue_head(&priv->tx_wait);
- INIT_LIST_HEAD(&priv->link);
- priv->reading_state = MEI_IDLE;
- priv->writing_state = MEI_IDLE;
- priv->dev = dev;
-}
-
-int mei_me_cl_by_uuid(const struct mei_device *dev, const uuid_le *cuuid)
-{
- int i, res = -ENOENT;
-
- for (i = 0; i < dev->me_clients_num; ++i)
- if (uuid_le_cmp(*cuuid,
- dev->me_clients[i].props.protocol_name) == 0) {
- res = i;
- break;
- }
-
- return res;
-}
-
-
-/**
- * mei_me_cl_link - create link between host and me clinet and add
- * me_cl to the list
- *
- * @dev: the device structure
- * @cl: link between me and host client assocated with opened file descriptor
- * @cuuid: uuid of ME client
- * @client_id: id of the host client
- *
- * returns ME client index if ME client
- * -EINVAL on incorrect values
- * -ENONET if client not found
- */
-int mei_me_cl_link(struct mei_device *dev, struct mei_cl *cl,
- const uuid_le *cuuid, u8 host_cl_id)
-{
- int i;
-
- if (!dev || !cl || !cuuid)
- return -EINVAL;
-
- /* check for valid client id */
- i = mei_me_cl_by_uuid(dev, cuuid);
- if (i >= 0) {
- cl->me_client_id = dev->me_clients[i].client_id;
- cl->state = MEI_FILE_CONNECTING;
- cl->host_client_id = host_cl_id;
-
- list_add_tail(&cl->link, &dev->file_list);
- return (u8)i;
- }
-
- return -ENOENT;
-}
-/**
- * mei_me_cl_unlink - remove me_cl from the list
- *
- * @dev: the device structure
- * @host_client_id: host client id to be removed
- */
-void mei_me_cl_unlink(struct mei_device *dev, struct mei_cl *cl)
-{
- struct mei_cl *pos, *next;
- list_for_each_entry_safe(pos, next, &dev->file_list, link) {
- if (cl->host_client_id == pos->host_client_id) {
- dev_dbg(&dev->pdev->dev, "remove host client = %d, ME client = %d\n",
- pos->host_client_id, pos->me_client_id);
- list_del_init(&pos->link);
- break;
- }
- }
-}
+ /* wake up all readings so they can be interrupted */
+ mei_cl_all_read_wakeup(dev);
-/**
- * mei_alloc_file_private - allocates a private file structure and sets it up.
- * @file: the file structure
- *
- * returns The allocated file or NULL on failure
- */
-struct mei_cl *mei_cl_allocate(struct mei_device *dev)
-{
- struct mei_cl *cl;
-
- cl = kmalloc(sizeof(struct mei_cl), GFP_KERNEL);
- if (!cl)
- return NULL;
-
- mei_cl_init(cl, dev);
-
- return cl;
+ /* remove all waiting requests */
+ mei_cl_all_write_clear(dev);
}
-/**
- * mei_disconnect_host_client - sends disconnect message to fw from host client.
- *
- * @dev: the device structure
- * @cl: private data of the file object
- *
- * Locking: called under "dev->device_lock" lock
- *
- * returns 0 on success, <0 on failure.
- */
-int mei_disconnect_host_client(struct mei_device *dev, struct mei_cl *cl)
-{
- struct mei_cl_cb *cb;
- int rets, err;
-
- if (!dev || !cl)
- return -ENODEV;
-
- if (cl->state != MEI_FILE_DISCONNECTING)
- return 0;
-
- cb = mei_io_cb_init(cl, NULL);
- if (!cb)
- return -ENOMEM;
-
- cb->fop_type = MEI_FOP_CLOSE;
- if (dev->mei_host_buffer_is_empty) {
- dev->mei_host_buffer_is_empty = false;
- if (mei_disconnect(dev, cl)) {
- rets = -ENODEV;
- dev_dbg(&dev->pdev->dev, "failed to call mei_disconnect.\n");
- goto free;
- }
- mdelay(10); /* Wait for hardware disconnection ready */
- list_add_tail(&cb->list, &dev->ctrl_rd_list.list);
- } else {
- dev_dbg(&dev->pdev->dev, "add disconnect cb to control write list\n");
- list_add_tail(&cb->list, &dev->ctrl_wr_list.list);
-
- }
- mutex_unlock(&dev->device_lock);
-
- err = wait_event_timeout(dev->wait_recvd_msg,
- MEI_FILE_DISCONNECTED == cl->state,
- mei_secs_to_jiffies(MEI_CL_CONNECT_TIMEOUT));
-
- mutex_lock(&dev->device_lock);
- if (MEI_FILE_DISCONNECTED == cl->state) {
- rets = 0;
- dev_dbg(&dev->pdev->dev, "successfully disconnected from FW client.\n");
- } else {
- rets = -ENODEV;
- if (MEI_FILE_DISCONNECTED != cl->state)
- dev_dbg(&dev->pdev->dev, "wrong status client disconnect.\n");
-
- if (err)
- dev_dbg(&dev->pdev->dev,
- "wait failed disconnect err=%08x\n",
- err);
-
- dev_dbg(&dev->pdev->dev, "failed to disconnect from FW client.\n");
- }
-
- mei_io_list_flush(&dev->ctrl_rd_list, cl);
- mei_io_list_flush(&dev->ctrl_wr_list, cl);
-free:
- mei_io_cb_free(cb);
- return rets;
-}
diff --git a/drivers/misc/mei/interface.c b/drivers/misc/mei/interface.c
deleted file mode 100644
index 8de854785960..000000000000
--- a/drivers/misc/mei/interface.c
+++ /dev/null
@@ -1,388 +0,0 @@
-/*
- *
- * Intel Management Engine Interface (Intel MEI) Linux driver
- * Copyright (c) 2003-2012, Intel Corporation.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms and conditions of the GNU General Public License,
- * version 2, as published by the Free Software Foundation.
- *
- * This program is distributed in the hope it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- */
-
-#include <linux/pci.h>
-#include "mei_dev.h"
-#include <linux/mei.h>
-#include "interface.h"
-
-
-
-/**
- * mei_set_csr_register - writes H_CSR register to the mei device,
- * and ignores the H_IS bit for it is write-one-to-zero.
- *
- * @dev: the device structure
- */
-void mei_hcsr_set(struct mei_device *dev)
-{
- if ((dev->host_hw_state & H_IS) == H_IS)
- dev->host_hw_state &= ~H_IS;
- mei_reg_write(dev, H_CSR, dev->host_hw_state);
- dev->host_hw_state = mei_hcsr_read(dev);
-}
-
-/**
- * mei_csr_enable_interrupts - enables mei device interrupts
- *
- * @dev: the device structure
- */
-void mei_enable_interrupts(struct mei_device *dev)
-{
- dev->host_hw_state |= H_IE;
- mei_hcsr_set(dev);
-}
-
-/**
- * mei_csr_disable_interrupts - disables mei device interrupts
- *
- * @dev: the device structure
- */
-void mei_disable_interrupts(struct mei_device *dev)
-{
- dev->host_hw_state &= ~H_IE;
- mei_hcsr_set(dev);
-}
-
-/**
- * mei_hbuf_filled_slots - gets number of device filled buffer slots
- *
- * @device: the device structure
- *
- * returns number of filled slots
- */
-static unsigned char mei_hbuf_filled_slots(struct mei_device *dev)
-{
- char read_ptr, write_ptr;
-
- dev->host_hw_state = mei_hcsr_read(dev);
-
- read_ptr = (char) ((dev->host_hw_state & H_CBRP) >> 8);
- write_ptr = (char) ((dev->host_hw_state & H_CBWP) >> 16);
-
- return (unsigned char) (write_ptr - read_ptr);
-}
-
-/**
- * mei_hbuf_is_empty - checks if host buffer is empty.
- *
- * @dev: the device structure
- *
- * returns true if empty, false - otherwise.
- */
-bool mei_hbuf_is_empty(struct mei_device *dev)
-{
- return mei_hbuf_filled_slots(dev) == 0;
-}
-
-/**
- * mei_hbuf_empty_slots - counts write empty slots.
- *
- * @dev: the device structure
- *
- * returns -1(ESLOTS_OVERFLOW) if overflow, otherwise empty slots count
- */
-int mei_hbuf_empty_slots(struct mei_device *dev)
-{
- unsigned char filled_slots, empty_slots;
-
- filled_slots = mei_hbuf_filled_slots(dev);
- empty_slots = dev->hbuf_depth - filled_slots;
-
- /* check for overflow */
- if (filled_slots > dev->hbuf_depth)
- return -EOVERFLOW;
-
- return empty_slots;
-}
-
-/**
- * mei_write_message - writes a message to mei device.
- *
- * @dev: the device structure
- * @header: header of message
- * @write_buffer: message buffer will be written
- * @write_length: message size will be written
- *
- * This function returns -EIO if write has failed
- */
-int mei_write_message(struct mei_device *dev, struct mei_msg_hdr *header,
- unsigned char *buf, unsigned long length)
-{
- unsigned long rem, dw_cnt;
- u32 *reg_buf = (u32 *)buf;
- int i;
- int empty_slots;
-
-
- dev_dbg(&dev->pdev->dev,
- "mei_write_message header=%08x.\n",
- *((u32 *) header));
-
- empty_slots = mei_hbuf_empty_slots(dev);
- dev_dbg(&dev->pdev->dev, "empty slots = %hu.\n", empty_slots);
-
- dw_cnt = mei_data2slots(length);
- if (empty_slots < 0 || dw_cnt > empty_slots)
- return -EIO;
-
- mei_reg_write(dev, H_CB_WW, *((u32 *) header));
-
- for (i = 0; i < length / 4; i++)
- mei_reg_write(dev, H_CB_WW, reg_buf[i]);
-
- rem = length & 0x3;
- if (rem > 0) {
- u32 reg = 0;
- memcpy(&reg, &buf[length - rem], rem);
- mei_reg_write(dev, H_CB_WW, reg);
- }
-
- dev->host_hw_state = mei_hcsr_read(dev);
- dev->host_hw_state |= H_IG;
- mei_hcsr_set(dev);
- dev->me_hw_state = mei_mecsr_read(dev);
- if ((dev->me_hw_state & ME_RDY_HRA) != ME_RDY_HRA)
- return -EIO;
-
- return 0;
-}
-
-/**
- * mei_count_full_read_slots - counts read full slots.
- *
- * @dev: the device structure
- *
- * returns -1(ESLOTS_OVERFLOW) if overflow, otherwise filled slots count
- */
-int mei_count_full_read_slots(struct mei_device *dev)
-{
- char read_ptr, write_ptr;
- unsigned char buffer_depth, filled_slots;
-
- dev->me_hw_state = mei_mecsr_read(dev);
- buffer_depth = (unsigned char)((dev->me_hw_state & ME_CBD_HRA) >> 24);
- read_ptr = (char) ((dev->me_hw_state & ME_CBRP_HRA) >> 8);
- write_ptr = (char) ((dev->me_hw_state & ME_CBWP_HRA) >> 16);
- filled_slots = (unsigned char) (write_ptr - read_ptr);
-
- /* check for overflow */
- if (filled_slots > buffer_depth)
- return -EOVERFLOW;
-
- dev_dbg(&dev->pdev->dev, "filled_slots =%08x\n", filled_slots);
- return (int)filled_slots;
-}
-
-/**
- * mei_read_slots - reads a message from mei device.
- *
- * @dev: the device structure
- * @buffer: message buffer will be written
- * @buffer_length: message size will be read
- */
-void mei_read_slots(struct mei_device *dev, unsigned char *buffer,
- unsigned long buffer_length)
-{
- u32 *reg_buf = (u32 *)buffer;
-
- for (; buffer_length >= sizeof(u32); buffer_length -= sizeof(u32))
- *reg_buf++ = mei_mecbrw_read(dev);
-
- if (buffer_length > 0) {
- u32 reg = mei_mecbrw_read(dev);
- memcpy(reg_buf, &reg, buffer_length);
- }
-
- dev->host_hw_state |= H_IG;
- mei_hcsr_set(dev);
-}
-
-/**
- * mei_flow_ctrl_creds - checks flow_control credentials.
- *
- * @dev: the device structure
- * @cl: private data of the file object
- *
- * returns 1 if mei_flow_ctrl_creds >0, 0 - otherwise.
- * -ENOENT if mei_cl is not present
- * -EINVAL if single_recv_buf == 0
- */
-int mei_flow_ctrl_creds(struct mei_device *dev, struct mei_cl *cl)
-{
- int i;
-
- if (!dev->me_clients_num)
- return 0;
-
- if (cl->mei_flow_ctrl_creds > 0)
- return 1;
-
- for (i = 0; i < dev->me_clients_num; i++) {
- struct mei_me_client *me_cl = &dev->me_clients[i];
- if (me_cl->client_id == cl->me_client_id) {
- if (me_cl->mei_flow_ctrl_creds) {
- if (WARN_ON(me_cl->props.single_recv_buf == 0))
- return -EINVAL;
- return 1;
- } else {
- return 0;
- }
- }
- }
- return -ENOENT;
-}
-
-/**
- * mei_flow_ctrl_reduce - reduces flow_control.
- *
- * @dev: the device structure
- * @cl: private data of the file object
- * @returns
- * 0 on success
- * -ENOENT when me client is not found
- * -EINVAL when ctrl credits are <= 0
- */
-int mei_flow_ctrl_reduce(struct mei_device *dev, struct mei_cl *cl)
-{
- int i;
-
- if (!dev->me_clients_num)
- return -ENOENT;
-
- for (i = 0; i < dev->me_clients_num; i++) {
- struct mei_me_client *me_cl = &dev->me_clients[i];
- if (me_cl->client_id == cl->me_client_id) {
- if (me_cl->props.single_recv_buf != 0) {
- if (WARN_ON(me_cl->mei_flow_ctrl_creds <= 0))
- return -EINVAL;
- dev->me_clients[i].mei_flow_ctrl_creds--;
- } else {
- if (WARN_ON(cl->mei_flow_ctrl_creds <= 0))
- return -EINVAL;
- cl->mei_flow_ctrl_creds--;
- }
- return 0;
- }
- }
- return -ENOENT;
-}
-
-/**
- * mei_send_flow_control - sends flow control to fw.
- *
- * @dev: the device structure
- * @cl: private data of the file object
- *
- * This function returns -EIO on write failure
- */
-int mei_send_flow_control(struct mei_device *dev, struct mei_cl *cl)
-{
- struct mei_msg_hdr *mei_hdr;
- struct hbm_flow_control *flow_ctrl;
- const size_t len = sizeof(struct hbm_flow_control);
-
- mei_hdr = mei_hbm_hdr(&dev->wr_msg_buf[0], len);
-
- flow_ctrl = (struct hbm_flow_control *)&dev->wr_msg_buf[1];
- memset(flow_ctrl, 0, len);
- flow_ctrl->hbm_cmd = MEI_FLOW_CONTROL_CMD;
- flow_ctrl->host_addr = cl->host_client_id;
- flow_ctrl->me_addr = cl->me_client_id;
- /* FIXME: reserved !? */
- memset(flow_ctrl->reserved, 0, sizeof(flow_ctrl->reserved));
- dev_dbg(&dev->pdev->dev, "sending flow control host client = %d, ME client = %d\n",
- cl->host_client_id, cl->me_client_id);
-
- return mei_write_message(dev, mei_hdr,
- (unsigned char *) flow_ctrl, len);
-}
-
-/**
- * mei_other_client_is_connecting - checks if other
- * client with the same client id is connected.
- *
- * @dev: the device structure
- * @cl: private data of the file object
- *
- * returns 1 if other client is connected, 0 - otherwise.
- */
-int mei_other_client_is_connecting(struct mei_device *dev,
- struct mei_cl *cl)
-{
- struct mei_cl *cl_pos = NULL;
- struct mei_cl *cl_next = NULL;
-
- list_for_each_entry_safe(cl_pos, cl_next, &dev->file_list, link) {
- if ((cl_pos->state == MEI_FILE_CONNECTING) &&
- (cl_pos != cl) &&
- cl->me_client_id == cl_pos->me_client_id)
- return 1;
-
- }
- return 0;
-}
-
-/**
- * mei_disconnect - sends disconnect message to fw.
- *
- * @dev: the device structure
- * @cl: private data of the file object
- *
- * This function returns -EIO on write failure
- */
-int mei_disconnect(struct mei_device *dev, struct mei_cl *cl)
-{
- struct mei_msg_hdr *mei_hdr;
- struct hbm_client_connect_request *req;
- const size_t len = sizeof(struct hbm_client_connect_request);
-
- mei_hdr = mei_hbm_hdr(&dev->wr_msg_buf[0], len);
-
- req = (struct hbm_client_connect_request *)&dev->wr_msg_buf[1];
- memset(req, 0, len);
- req->hbm_cmd = CLIENT_DISCONNECT_REQ_CMD;
- req->host_addr = cl->host_client_id;
- req->me_addr = cl->me_client_id;
- req->reserved = 0;
-
- return mei_write_message(dev, mei_hdr, (unsigned char *)req, len);
-}
-
-/**
- * mei_connect - sends connect message to fw.
- *
- * @dev: the device structure
- * @cl: private data of the file object
- *
- * This function returns -EIO on write failure
- */
-int mei_connect(struct mei_device *dev, struct mei_cl *cl)
-{
- struct mei_msg_hdr *mei_hdr;
- struct hbm_client_connect_request *req;
- const size_t len = sizeof(struct hbm_client_connect_request);
-
- mei_hdr = mei_hbm_hdr(&dev->wr_msg_buf[0], len);
-
- req = (struct hbm_client_connect_request *) &dev->wr_msg_buf[1];
- req->hbm_cmd = CLIENT_CONNECT_REQ_CMD;
- req->host_addr = cl->host_client_id;
- req->me_addr = cl->me_client_id;
- req->reserved = 0;
-
- return mei_write_message(dev, mei_hdr, (unsigned char *) req, len);
-}
diff --git a/drivers/misc/mei/interface.h b/drivers/misc/mei/interface.h
deleted file mode 100644
index ec6c785a3961..000000000000
--- a/drivers/misc/mei/interface.h
+++ /dev/null
@@ -1,81 +0,0 @@
-/*
- *
- * Intel Management Engine Interface (Intel MEI) Linux driver
- * Copyright (c) 2003-2012, Intel Corporation.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms and conditions of the GNU General Public License,
- * version 2, as published by the Free Software Foundation.
- *
- * This program is distributed in the hope it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- */
-
-
-
-#ifndef _MEI_INTERFACE_H_
-#define _MEI_INTERFACE_H_
-
-#include <linux/mei.h>
-#include "mei_dev.h"
-
-
-
-void mei_read_slots(struct mei_device *dev,
- unsigned char *buffer,
- unsigned long buffer_length);
-
-int mei_write_message(struct mei_device *dev,
- struct mei_msg_hdr *header,
- unsigned char *write_buffer,
- unsigned long write_length);
-
-bool mei_hbuf_is_empty(struct mei_device *dev);
-
-int mei_hbuf_empty_slots(struct mei_device *dev);
-
-static inline size_t mei_hbuf_max_data(const struct mei_device *dev)
-{
- return dev->hbuf_depth * sizeof(u32) - sizeof(struct mei_msg_hdr);
-}
-
-/* get slots (dwords) from a message length + header (bytes) */
-static inline unsigned char mei_data2slots(size_t length)
-{
- return DIV_ROUND_UP(sizeof(struct mei_msg_hdr) + length, 4);
-}
-
-int mei_count_full_read_slots(struct mei_device *dev);
-
-
-int mei_flow_ctrl_creds(struct mei_device *dev, struct mei_cl *cl);
-
-
-
-int mei_wd_send(struct mei_device *dev);
-int mei_wd_stop(struct mei_device *dev);
-int mei_wd_host_init(struct mei_device *dev);
-/*
- * mei_watchdog_register - Registering watchdog interface
- * once we got connection to the WD Client
- * @dev - mei device
- */
-void mei_watchdog_register(struct mei_device *dev);
-/*
- * mei_watchdog_unregister - Unregistering watchdog interface
- * @dev - mei device
- */
-void mei_watchdog_unregister(struct mei_device *dev);
-
-int mei_flow_ctrl_reduce(struct mei_device *dev, struct mei_cl *cl);
-
-int mei_send_flow_control(struct mei_device *dev, struct mei_cl *cl);
-
-int mei_disconnect(struct mei_device *dev, struct mei_cl *cl);
-int mei_other_client_is_connecting(struct mei_device *dev, struct mei_cl *cl);
-int mei_connect(struct mei_device *dev, struct mei_cl *cl);
-
-#endif /* _MEI_INTERFACE_H_ */
diff --git a/drivers/misc/mei/interrupt.c b/drivers/misc/mei/interrupt.c
index 04fa2134615e..3535b2676c97 100644
--- a/drivers/misc/mei/interrupt.c
+++ b/drivers/misc/mei/interrupt.c
@@ -21,41 +21,21 @@
#include <linux/fs.h>
#include <linux/jiffies.h>
-#include "mei_dev.h"
#include <linux/mei.h>
-#include "hw.h"
-#include "interface.h"
-
-
-/**
- * mei_interrupt_quick_handler - The ISR of the MEI device
- *
- * @irq: The irq number
- * @dev_id: pointer to the device structure
- *
- * returns irqreturn_t
- */
-irqreturn_t mei_interrupt_quick_handler(int irq, void *dev_id)
-{
- struct mei_device *dev = (struct mei_device *) dev_id;
- u32 csr_reg = mei_hcsr_read(dev);
-
- if ((csr_reg & H_IS) != H_IS)
- return IRQ_NONE;
- /* clear H_IS bit in H_CSR */
- mei_reg_write(dev, H_CSR, csr_reg);
+#include "mei_dev.h"
+#include "hbm.h"
+#include "hw-me.h"
+#include "client.h"
- return IRQ_WAKE_THREAD;
-}
/**
- * _mei_cmpl - processes completed operation.
+ * mei_complete_handler - processes completed operation.
*
* @cl: private data of the file object.
* @cb_pos: callback block.
*/
-static void _mei_cmpl(struct mei_cl *cl, struct mei_cl_cb *cb_pos)
+void mei_irq_complete_handler(struct mei_cl *cl, struct mei_cl_cb *cb_pos)
{
if (cb_pos->fop_type == MEI_FOP_WRITE) {
mei_io_cb_free(cb_pos);
@@ -150,8 +130,8 @@ quit:
dev_dbg(&dev->pdev->dev, "message read\n");
if (!buffer) {
mei_read_slots(dev, dev->rd_msg_buf, mei_hdr->length);
- dev_dbg(&dev->pdev->dev, "discarding message, header =%08x.\n",
- *(u32 *) dev->rd_msg_buf);
+ dev_dbg(&dev->pdev->dev, "discarding message " MEI_HDR_FMT "\n",
+ MEI_HDR_PRM(mei_hdr));
}
return 0;
@@ -179,7 +159,7 @@ static int _mei_irq_thread_close(struct mei_device *dev, s32 *slots,
*slots -= mei_data2slots(sizeof(struct hbm_client_connect_request));
- if (mei_disconnect(dev, cl)) {
+ if (mei_hbm_cl_disconnect_req(dev, cl)) {
cl->status = 0;
cb_pos->buf_idx = 0;
list_move_tail(&cb_pos->list, &cmpl_list->list);
@@ -195,440 +175,6 @@ static int _mei_irq_thread_close(struct mei_device *dev, s32 *slots,
return 0;
}
-/**
- * is_treat_specially_client - checks if the message belongs
- * to the file private data.
- *
- * @cl: private data of the file object
- * @rs: connect response bus message
- *
- */
-static bool is_treat_specially_client(struct mei_cl *cl,
- struct hbm_client_connect_response *rs)
-{
-
- if (cl->host_client_id == rs->host_addr &&
- cl->me_client_id == rs->me_addr) {
- if (!rs->status) {
- cl->state = MEI_FILE_CONNECTED;
- cl->status = 0;
-
- } else {
- cl->state = MEI_FILE_DISCONNECTED;
- cl->status = -ENODEV;
- }
- cl->timer_count = 0;
-
- return true;
- }
- return false;
-}
-
-/**
- * mei_client_connect_response - connects to response irq routine
- *
- * @dev: the device structure
- * @rs: connect response bus message
- */
-static void mei_client_connect_response(struct mei_device *dev,
- struct hbm_client_connect_response *rs)
-{
-
- struct mei_cl *cl;
- struct mei_cl_cb *pos = NULL, *next = NULL;
-
- dev_dbg(&dev->pdev->dev,
- "connect_response:\n"
- "ME Client = %d\n"
- "Host Client = %d\n"
- "Status = %d\n",
- rs->me_addr,
- rs->host_addr,
- rs->status);
-
- /* if WD or iamthif client treat specially */
-
- if (is_treat_specially_client(&(dev->wd_cl), rs)) {
- dev_dbg(&dev->pdev->dev, "successfully connected to WD client.\n");
- mei_watchdog_register(dev);
-
- return;
- }
-
- if (is_treat_specially_client(&(dev->iamthif_cl), rs)) {
- dev->iamthif_state = MEI_IAMTHIF_IDLE;
- return;
- }
- list_for_each_entry_safe(pos, next, &dev->ctrl_rd_list.list, list) {
-
- cl = pos->cl;
- if (!cl) {
- list_del(&pos->list);
- return;
- }
- if (pos->fop_type == MEI_FOP_IOCTL) {
- if (is_treat_specially_client(cl, rs)) {
- list_del(&pos->list);
- cl->status = 0;
- cl->timer_count = 0;
- break;
- }
- }
- }
-}
-
-/**
- * mei_client_disconnect_response - disconnects from response irq routine
- *
- * @dev: the device structure
- * @rs: disconnect response bus message
- */
-static void mei_client_disconnect_response(struct mei_device *dev,
- struct hbm_client_connect_response *rs)
-{
- struct mei_cl *cl;
- struct mei_cl_cb *pos = NULL, *next = NULL;
-
- dev_dbg(&dev->pdev->dev,
- "disconnect_response:\n"
- "ME Client = %d\n"
- "Host Client = %d\n"
- "Status = %d\n",
- rs->me_addr,
- rs->host_addr,
- rs->status);
-
- list_for_each_entry_safe(pos, next, &dev->ctrl_rd_list.list, list) {
- cl = pos->cl;
-
- if (!cl) {
- list_del(&pos->list);
- return;
- }
-
- dev_dbg(&dev->pdev->dev, "list_for_each_entry_safe in ctrl_rd_list.\n");
- if (cl->host_client_id == rs->host_addr &&
- cl->me_client_id == rs->me_addr) {
-
- list_del(&pos->list);
- if (!rs->status)
- cl->state = MEI_FILE_DISCONNECTED;
-
- cl->status = 0;
- cl->timer_count = 0;
- break;
- }
- }
-}
-
-/**
- * same_flow_addr - tells if they have the same address.
- *
- * @file: private data of the file object.
- * @flow: flow control.
- *
- * returns !=0, same; 0,not.
- */
-static int same_flow_addr(struct mei_cl *cl, struct hbm_flow_control *flow)
-{
- return (cl->host_client_id == flow->host_addr &&
- cl->me_client_id == flow->me_addr);
-}
-
-/**
- * add_single_flow_creds - adds single buffer credentials.
- *
- * @file: private data ot the file object.
- * @flow: flow control.
- */
-static void add_single_flow_creds(struct mei_device *dev,
- struct hbm_flow_control *flow)
-{
- struct mei_me_client *client;
- int i;
-
- for (i = 0; i < dev->me_clients_num; i++) {
- client = &dev->me_clients[i];
- if (client && flow->me_addr == client->client_id) {
- if (client->props.single_recv_buf) {
- client->mei_flow_ctrl_creds++;
- dev_dbg(&dev->pdev->dev, "recv flow ctrl msg ME %d (single).\n",
- flow->me_addr);
- dev_dbg(&dev->pdev->dev, "flow control credentials =%d.\n",
- client->mei_flow_ctrl_creds);
- } else {
- BUG(); /* error in flow control */
- }
- }
- }
-}
-
-/**
- * mei_client_flow_control_response - flow control response irq routine
- *
- * @dev: the device structure
- * @flow_control: flow control response bus message
- */
-static void mei_client_flow_control_response(struct mei_device *dev,
- struct hbm_flow_control *flow_control)
-{
- struct mei_cl *cl_pos = NULL;
- struct mei_cl *cl_next = NULL;
-
- if (!flow_control->host_addr) {
- /* single receive buffer */
- add_single_flow_creds(dev, flow_control);
- } else {
- /* normal connection */
- list_for_each_entry_safe(cl_pos, cl_next,
- &dev->file_list, link) {
- dev_dbg(&dev->pdev->dev, "list_for_each_entry_safe in file_list\n");
-
- dev_dbg(&dev->pdev->dev, "cl of host client %d ME client %d.\n",
- cl_pos->host_client_id,
- cl_pos->me_client_id);
- dev_dbg(&dev->pdev->dev, "flow ctrl msg for host %d ME %d.\n",
- flow_control->host_addr,
- flow_control->me_addr);
- if (same_flow_addr(cl_pos, flow_control)) {
- dev_dbg(&dev->pdev->dev, "recv ctrl msg for host %d ME %d.\n",
- flow_control->host_addr,
- flow_control->me_addr);
- cl_pos->mei_flow_ctrl_creds++;
- dev_dbg(&dev->pdev->dev, "flow control credentials = %d.\n",
- cl_pos->mei_flow_ctrl_creds);
- break;
- }
- }
- }
-}
-
-/**
- * same_disconn_addr - tells if they have the same address
- *
- * @file: private data of the file object.
- * @disconn: disconnection request.
- *
- * returns !=0, same; 0,not.
- */
-static int same_disconn_addr(struct mei_cl *cl,
- struct hbm_client_connect_request *req)
-{
- return (cl->host_client_id == req->host_addr &&
- cl->me_client_id == req->me_addr);
-}
-
-/**
- * mei_client_disconnect_request - disconnects from request irq routine
- *
- * @dev: the device structure.
- * @disconnect_req: disconnect request bus message.
- */
-static void mei_client_disconnect_request(struct mei_device *dev,
- struct hbm_client_connect_request *disconnect_req)
-{
- struct hbm_client_connect_response *disconnect_res;
- struct mei_cl *pos, *next;
- const size_t len = sizeof(struct hbm_client_connect_response);
-
- list_for_each_entry_safe(pos, next, &dev->file_list, link) {
- if (same_disconn_addr(pos, disconnect_req)) {
- dev_dbg(&dev->pdev->dev, "disconnect request host client %d ME client %d.\n",
- disconnect_req->host_addr,
- disconnect_req->me_addr);
- pos->state = MEI_FILE_DISCONNECTED;
- pos->timer_count = 0;
- if (pos == &dev->wd_cl)
- dev->wd_pending = false;
- else if (pos == &dev->iamthif_cl)
- dev->iamthif_timer = 0;
-
- /* prepare disconnect response */
- (void)mei_hbm_hdr((u32 *)&dev->wr_ext_msg.hdr, len);
- disconnect_res =
- (struct hbm_client_connect_response *)
- &dev->wr_ext_msg.data;
- disconnect_res->hbm_cmd = CLIENT_DISCONNECT_RES_CMD;
- disconnect_res->host_addr = pos->host_client_id;
- disconnect_res->me_addr = pos->me_client_id;
- disconnect_res->status = 0;
- break;
- }
- }
-}
-
-/**
- * mei_irq_thread_read_bus_message - bottom half read routine after ISR to
- * handle the read bus message cmd processing.
- *
- * @dev: the device structure
- * @mei_hdr: header of bus message
- */
-static void mei_irq_thread_read_bus_message(struct mei_device *dev,
- struct mei_msg_hdr *mei_hdr)
-{
- struct mei_bus_message *mei_msg;
- struct mei_me_client *me_client;
- struct hbm_host_version_response *version_res;
- struct hbm_client_connect_response *connect_res;
- struct hbm_client_connect_response *disconnect_res;
- struct hbm_client_connect_request *disconnect_req;
- struct hbm_flow_control *flow_control;
- struct hbm_props_response *props_res;
- struct hbm_host_enum_response *enum_res;
- struct hbm_host_stop_request *stop_req;
-
- /* read the message to our buffer */
- BUG_ON(mei_hdr->length >= sizeof(dev->rd_msg_buf));
- mei_read_slots(dev, dev->rd_msg_buf, mei_hdr->length);
- mei_msg = (struct mei_bus_message *)dev->rd_msg_buf;
-
- switch (mei_msg->hbm_cmd) {
- case HOST_START_RES_CMD:
- version_res = (struct hbm_host_version_response *) mei_msg;
- if (version_res->host_version_supported) {
- dev->version.major_version = HBM_MAJOR_VERSION;
- dev->version.minor_version = HBM_MINOR_VERSION;
- if (dev->dev_state == MEI_DEV_INIT_CLIENTS &&
- dev->init_clients_state == MEI_START_MESSAGE) {
- dev->init_clients_timer = 0;
- mei_host_enum_clients_message(dev);
- } else {
- dev->recvd_msg = false;
- dev_dbg(&dev->pdev->dev, "IMEI reset due to received host start response bus message.\n");
- mei_reset(dev, 1);
- return;
- }
- } else {
- u32 *buf = dev->wr_msg_buf;
- const size_t len = sizeof(struct hbm_host_stop_request);
-
- dev->version = version_res->me_max_version;
-
- /* send stop message */
- mei_hdr = mei_hbm_hdr(&buf[0], len);
- stop_req = (struct hbm_host_stop_request *)&buf[1];
- memset(stop_req, 0, len);
- stop_req->hbm_cmd = HOST_STOP_REQ_CMD;
- stop_req->reason = DRIVER_STOP_REQUEST;
-
- mei_write_message(dev, mei_hdr,
- (unsigned char *)stop_req, len);
- dev_dbg(&dev->pdev->dev, "version mismatch.\n");
- return;
- }
-
- dev->recvd_msg = true;
- dev_dbg(&dev->pdev->dev, "host start response message received.\n");
- break;
-
- case CLIENT_CONNECT_RES_CMD:
- connect_res = (struct hbm_client_connect_response *) mei_msg;
- mei_client_connect_response(dev, connect_res);
- dev_dbg(&dev->pdev->dev, "client connect response message received.\n");
- wake_up(&dev->wait_recvd_msg);
- break;
-
- case CLIENT_DISCONNECT_RES_CMD:
- disconnect_res = (struct hbm_client_connect_response *) mei_msg;
- mei_client_disconnect_response(dev, disconnect_res);
- dev_dbg(&dev->pdev->dev, "client disconnect response message received.\n");
- wake_up(&dev->wait_recvd_msg);
- break;
-
- case MEI_FLOW_CONTROL_CMD:
- flow_control = (struct hbm_flow_control *) mei_msg;
- mei_client_flow_control_response(dev, flow_control);
- dev_dbg(&dev->pdev->dev, "client flow control response message received.\n");
- break;
-
- case HOST_CLIENT_PROPERTIES_RES_CMD:
- props_res = (struct hbm_props_response *)mei_msg;
- me_client = &dev->me_clients[dev->me_client_presentation_num];
-
- if (props_res->status || !dev->me_clients) {
- dev_dbg(&dev->pdev->dev, "reset due to received host client properties response bus message wrong status.\n");
- mei_reset(dev, 1);
- return;
- }
-
- if (me_client->client_id != props_res->address) {
- dev_err(&dev->pdev->dev,
- "Host client properties reply mismatch\n");
- mei_reset(dev, 1);
-
- return;
- }
-
- if (dev->dev_state != MEI_DEV_INIT_CLIENTS ||
- dev->init_clients_state != MEI_CLIENT_PROPERTIES_MESSAGE) {
- dev_err(&dev->pdev->dev,
- "Unexpected client properties reply\n");
- mei_reset(dev, 1);
-
- return;
- }
-
- me_client->props = props_res->client_properties;
- dev->me_client_index++;
- dev->me_client_presentation_num++;
-
- mei_host_client_enumerate(dev);
-
- break;
-
- case HOST_ENUM_RES_CMD:
- enum_res = (struct hbm_host_enum_response *) mei_msg;
- memcpy(dev->me_clients_map, enum_res->valid_addresses, 32);
- if (dev->dev_state == MEI_DEV_INIT_CLIENTS &&
- dev->init_clients_state == MEI_ENUM_CLIENTS_MESSAGE) {
- dev->init_clients_timer = 0;
- dev->me_client_presentation_num = 0;
- dev->me_client_index = 0;
- mei_allocate_me_clients_storage(dev);
- dev->init_clients_state =
- MEI_CLIENT_PROPERTIES_MESSAGE;
-
- mei_host_client_enumerate(dev);
- } else {
- dev_dbg(&dev->pdev->dev, "reset due to received host enumeration clients response bus message.\n");
- mei_reset(dev, 1);
- return;
- }
- break;
-
- case HOST_STOP_RES_CMD:
- dev->dev_state = MEI_DEV_DISABLED;
- dev_dbg(&dev->pdev->dev, "resetting because of FW stop response.\n");
- mei_reset(dev, 1);
- break;
-
- case CLIENT_DISCONNECT_REQ_CMD:
- /* search for client */
- disconnect_req = (struct hbm_client_connect_request *)mei_msg;
- mei_client_disconnect_request(dev, disconnect_req);
- break;
-
- case ME_STOP_REQ_CMD:
- {
- /* prepare stop request: sent in next interrupt event */
-
- const size_t len = sizeof(struct hbm_host_stop_request);
-
- mei_hdr = mei_hbm_hdr((u32 *)&dev->wr_ext_msg.hdr, len);
- stop_req = (struct hbm_host_stop_request *)&dev->wr_ext_msg.data;
- memset(stop_req, 0, len);
- stop_req->hbm_cmd = HOST_STOP_REQ_CMD;
- stop_req->reason = DRIVER_STOP_REQUEST;
- break;
- }
- default:
- BUG();
- break;
-
- }
-}
-
/**
* _mei_hb_read - processes read related operation.
@@ -655,7 +201,7 @@ static int _mei_irq_thread_read(struct mei_device *dev, s32 *slots,
*slots -= mei_data2slots(sizeof(struct hbm_flow_control));
- if (mei_send_flow_control(dev, cl)) {
+ if (mei_hbm_cl_flow_control_req(dev, cl)) {
cl->status = -ENODEV;
cb_pos->buf_idx = 0;
list_move_tail(&cb_pos->list, &cmpl_list->list);
@@ -691,8 +237,8 @@ static int _mei_irq_thread_ioctl(struct mei_device *dev, s32 *slots,
}
cl->state = MEI_FILE_CONNECTING;
- *slots -= mei_data2slots(sizeof(struct hbm_client_connect_request));
- if (mei_connect(dev, cl)) {
+ *slots -= mei_data2slots(sizeof(struct hbm_client_connect_request));
+ if (mei_hbm_cl_connect_req(dev, cl)) {
cl->status = -ENODEV;
cb_pos->buf_idx = 0;
list_del(&cb_pos->list);
@@ -717,25 +263,24 @@ static int _mei_irq_thread_ioctl(struct mei_device *dev, s32 *slots,
static int mei_irq_thread_write_complete(struct mei_device *dev, s32 *slots,
struct mei_cl_cb *cb, struct mei_cl_cb *cmpl_list)
{
- struct mei_msg_hdr *mei_hdr;
+ struct mei_msg_hdr mei_hdr;
struct mei_cl *cl = cb->cl;
size_t len = cb->request_buffer.size - cb->buf_idx;
size_t msg_slots = mei_data2slots(len);
- mei_hdr = (struct mei_msg_hdr *)&dev->wr_msg_buf[0];
- mei_hdr->host_addr = cl->host_client_id;
- mei_hdr->me_addr = cl->me_client_id;
- mei_hdr->reserved = 0;
+ mei_hdr.host_addr = cl->host_client_id;
+ mei_hdr.me_addr = cl->me_client_id;
+ mei_hdr.reserved = 0;
if (*slots >= msg_slots) {
- mei_hdr->length = len;
- mei_hdr->msg_complete = 1;
+ mei_hdr.length = len;
+ mei_hdr.msg_complete = 1;
/* Split the message only if we can write the whole host buffer */
} else if (*slots == dev->hbuf_depth) {
msg_slots = *slots;
len = (*slots * sizeof(u32)) - sizeof(struct mei_msg_hdr);
- mei_hdr->length = len;
- mei_hdr->msg_complete = 0;
+ mei_hdr.length = len;
+ mei_hdr.msg_complete = 0;
} else {
/* wait for next time the host buffer is empty */
return 0;
@@ -743,23 +288,22 @@ static int mei_irq_thread_write_complete(struct mei_device *dev, s32 *slots,
dev_dbg(&dev->pdev->dev, "buf: size = %d idx = %lu\n",
cb->request_buffer.size, cb->buf_idx);
- dev_dbg(&dev->pdev->dev, "msg: len = %d complete = %d\n",
- mei_hdr->length, mei_hdr->msg_complete);
+ dev_dbg(&dev->pdev->dev, MEI_HDR_FMT, MEI_HDR_PRM(&mei_hdr));
*slots -= msg_slots;
- if (mei_write_message(dev, mei_hdr,
- cb->request_buffer.data + cb->buf_idx, len)) {
+ if (mei_write_message(dev, &mei_hdr,
+ cb->request_buffer.data + cb->buf_idx)) {
cl->status = -ENODEV;
list_move_tail(&cb->list, &cmpl_list->list);
return -ENODEV;
}
- if (mei_flow_ctrl_reduce(dev, cl))
+ if (mei_cl_flow_ctrl_reduce(cl))
return -ENODEV;
cl->status = 0;
- cb->buf_idx += mei_hdr->length;
- if (mei_hdr->msg_complete)
+ cb->buf_idx += mei_hdr.length;
+ if (mei_hdr.msg_complete)
list_move_tail(&cb->list, &dev->write_waiting_list.list);
return 0;
@@ -769,15 +313,14 @@ static int mei_irq_thread_write_complete(struct mei_device *dev, s32 *slots,
* mei_irq_thread_read_handler - bottom half read routine after ISR to
* handle the read processing.
*
- * @cmpl_list: An instance of our list structure
* @dev: the device structure
+ * @cmpl_list: An instance of our list structure
* @slots: slots to read.
*
* returns 0 on success, <0 on failure.
*/
-static int mei_irq_thread_read_handler(struct mei_cl_cb *cmpl_list,
- struct mei_device *dev,
- s32 *slots)
+int mei_irq_read_handler(struct mei_device *dev,
+ struct mei_cl_cb *cmpl_list, s32 *slots)
{
struct mei_msg_hdr *mei_hdr;
struct mei_cl *cl_pos = NULL;
@@ -785,13 +328,13 @@ static int mei_irq_thread_read_handler(struct mei_cl_cb *cmpl_list,
int ret = 0;
if (!dev->rd_msg_hdr) {
- dev->rd_msg_hdr = mei_mecbrw_read(dev);
+ dev->rd_msg_hdr = mei_read_hdr(dev);
dev_dbg(&dev->pdev->dev, "slots =%08x.\n", *slots);
(*slots)--;
dev_dbg(&dev->pdev->dev, "slots =%08x.\n", *slots);
}
mei_hdr = (struct mei_msg_hdr *) &dev->rd_msg_hdr;
- dev_dbg(&dev->pdev->dev, "mei_hdr->length =%d\n", mei_hdr->length);
+ dev_dbg(&dev->pdev->dev, MEI_HDR_FMT, MEI_HDR_PRM(mei_hdr));
if (mei_hdr->reserved || !dev->rd_msg_hdr) {
dev_dbg(&dev->pdev->dev, "corrupted message header.\n");
@@ -830,19 +373,18 @@ static int mei_irq_thread_read_handler(struct mei_cl_cb *cmpl_list,
/* decide where to read the message too */
if (!mei_hdr->host_addr) {
dev_dbg(&dev->pdev->dev, "call mei_irq_thread_read_bus_message.\n");
- mei_irq_thread_read_bus_message(dev, mei_hdr);
+ mei_hbm_dispatch(dev, mei_hdr);
dev_dbg(&dev->pdev->dev, "end mei_irq_thread_read_bus_message.\n");
} else if (mei_hdr->host_addr == dev->iamthif_cl.host_client_id &&
(MEI_FILE_CONNECTED == dev->iamthif_cl.state) &&
(dev->iamthif_state == MEI_IAMTHIF_READING)) {
dev_dbg(&dev->pdev->dev, "call mei_irq_thread_read_iamthif_message.\n");
- dev_dbg(&dev->pdev->dev, "mei_hdr->length =%d\n",
- mei_hdr->length);
+
+ dev_dbg(&dev->pdev->dev, MEI_HDR_FMT, MEI_HDR_PRM(mei_hdr));
ret = mei_amthif_irq_read_message(cmpl_list, dev, mei_hdr);
if (ret)
goto end;
-
} else {
dev_dbg(&dev->pdev->dev, "call mei_irq_thread_read_client_message.\n");
ret = mei_irq_thread_read_client_message(cmpl_list,
@@ -869,15 +411,15 @@ end:
/**
- * mei_irq_thread_write_handler - bottom half write routine after
- * ISR to handle the write processing.
+ * mei_irq_write_handler - dispatch write requests
+ * after irq received
*
* @dev: the device structure
* @cmpl_list: An instance of our list structure
*
* returns 0 on success, <0 on failure.
*/
-static int mei_irq_thread_write_handler(struct mei_device *dev,
+int mei_irq_write_handler(struct mei_device *dev,
struct mei_cl_cb *cmpl_list)
{
@@ -887,7 +429,7 @@ static int mei_irq_thread_write_handler(struct mei_device *dev,
s32 slots;
int ret;
- if (!mei_hbuf_is_empty(dev)) {
+ if (!mei_hbuf_is_ready(dev)) {
dev_dbg(&dev->pdev->dev, "host buffer is not empty.\n");
return 0;
}
@@ -930,16 +472,16 @@ static int mei_irq_thread_write_handler(struct mei_device *dev,
if (dev->wr_ext_msg.hdr.length) {
mei_write_message(dev, &dev->wr_ext_msg.hdr,
- dev->wr_ext_msg.data, dev->wr_ext_msg.hdr.length);
+ dev->wr_ext_msg.data);
slots -= mei_data2slots(dev->wr_ext_msg.hdr.length);
dev->wr_ext_msg.hdr.length = 0;
}
if (dev->dev_state == MEI_DEV_ENABLED) {
if (dev->wd_pending &&
- mei_flow_ctrl_creds(dev, &dev->wd_cl) > 0) {
+ mei_cl_flow_ctrl_creds(&dev->wd_cl) > 0) {
if (mei_wd_send(dev))
dev_dbg(&dev->pdev->dev, "wd send failed.\n");
- else if (mei_flow_ctrl_reduce(dev, &dev->wd_cl))
+ else if (mei_cl_flow_ctrl_reduce(&dev->wd_cl))
return -ENODEV;
dev->wd_pending = false;
@@ -978,7 +520,7 @@ static int mei_irq_thread_write_handler(struct mei_device *dev,
break;
case MEI_FOP_IOCTL:
/* connect message */
- if (mei_other_client_is_connecting(dev, cl))
+ if (mei_cl_is_other_connecting(cl))
continue;
ret = _mei_irq_thread_ioctl(dev, &slots, pos,
cl, cmpl_list);
@@ -998,7 +540,7 @@ static int mei_irq_thread_write_handler(struct mei_device *dev,
cl = pos->cl;
if (cl == NULL)
continue;
- if (mei_flow_ctrl_creds(dev, cl) <= 0) {
+ if (mei_cl_flow_ctrl_creds(cl) <= 0) {
dev_dbg(&dev->pdev->dev,
"No flow control credentials for client %d, not sending.\n",
cl->host_client_id);
@@ -1123,115 +665,3 @@ out:
mutex_unlock(&dev->device_lock);
}
-/**
- * mei_interrupt_thread_handler - function called after ISR to handle the interrupt
- * processing.
- *
- * @irq: The irq number
- * @dev_id: pointer to the device structure
- *
- * returns irqreturn_t
- *
- */
-irqreturn_t mei_interrupt_thread_handler(int irq, void *dev_id)
-{
- struct mei_device *dev = (struct mei_device *) dev_id;
- struct mei_cl_cb complete_list;
- struct mei_cl_cb *cb_pos = NULL, *cb_next = NULL;
- struct mei_cl *cl;
- s32 slots;
- int rets;
- bool bus_message_received;
-
-
- dev_dbg(&dev->pdev->dev, "function called after ISR to handle the interrupt processing.\n");
- /* initialize our complete list */
- mutex_lock(&dev->device_lock);
- mei_io_list_init(&complete_list);
- dev->host_hw_state = mei_hcsr_read(dev);
-
- /* Ack the interrupt here
- * In case of MSI we don't go through the quick handler */
- if (pci_dev_msi_enabled(dev->pdev))
- mei_reg_write(dev, H_CSR, dev->host_hw_state);
-
- dev->me_hw_state = mei_mecsr_read(dev);
-
- /* check if ME wants a reset */
- if ((dev->me_hw_state & ME_RDY_HRA) == 0 &&
- dev->dev_state != MEI_DEV_RESETING &&
- dev->dev_state != MEI_DEV_INITIALIZING) {
- dev_dbg(&dev->pdev->dev, "FW not ready.\n");
- mei_reset(dev, 1);
- mutex_unlock(&dev->device_lock);
- return IRQ_HANDLED;
- }
-
- /* check if we need to start the dev */
- if ((dev->host_hw_state & H_RDY) == 0) {
- if ((dev->me_hw_state & ME_RDY_HRA) == ME_RDY_HRA) {
- dev_dbg(&dev->pdev->dev, "we need to start the dev.\n");
- dev->host_hw_state |= (H_IE | H_IG | H_RDY);
- mei_hcsr_set(dev);
- dev->dev_state = MEI_DEV_INIT_CLIENTS;
- dev_dbg(&dev->pdev->dev, "link is established start sending messages.\n");
- /* link is established
- * start sending messages.
- */
- mei_host_start_message(dev);
- mutex_unlock(&dev->device_lock);
- return IRQ_HANDLED;
- } else {
- dev_dbg(&dev->pdev->dev, "FW not ready.\n");
- mutex_unlock(&dev->device_lock);
- return IRQ_HANDLED;
- }
- }
- /* check slots available for reading */
- slots = mei_count_full_read_slots(dev);
- while (slots > 0) {
- /* we have urgent data to send so break the read */
- if (dev->wr_ext_msg.hdr.length)
- break;
- dev_dbg(&dev->pdev->dev, "slots =%08x\n", slots);
- dev_dbg(&dev->pdev->dev, "call mei_irq_thread_read_handler.\n");
- rets = mei_irq_thread_read_handler(&complete_list, dev, &slots);
- if (rets)
- goto end;
- }
- rets = mei_irq_thread_write_handler(dev, &complete_list);
-end:
- dev_dbg(&dev->pdev->dev, "end of bottom half function.\n");
- dev->host_hw_state = mei_hcsr_read(dev);
- dev->mei_host_buffer_is_empty = mei_hbuf_is_empty(dev);
-
- bus_message_received = false;
- if (dev->recvd_msg && waitqueue_active(&dev->wait_recvd_msg)) {
- dev_dbg(&dev->pdev->dev, "received waiting bus message\n");
- bus_message_received = true;
- }
- mutex_unlock(&dev->device_lock);
- if (bus_message_received) {
- dev_dbg(&dev->pdev->dev, "wake up dev->wait_recvd_msg\n");
- wake_up_interruptible(&dev->wait_recvd_msg);
- bus_message_received = false;
- }
- if (list_empty(&complete_list.list))
- return IRQ_HANDLED;
-
-
- list_for_each_entry_safe(cb_pos, cb_next, &complete_list.list, list) {
- cl = cb_pos->cl;
- list_del(&cb_pos->list);
- if (cl) {
- if (cl != &dev->iamthif_cl) {
- dev_dbg(&dev->pdev->dev, "completing call back.\n");
- _mei_cmpl(cl, cb_pos);
- cb_pos = NULL;
- } else if (cl == &dev->iamthif_cl) {
- mei_amthif_complete(dev, cb_pos);
- }
- }
- }
- return IRQ_HANDLED;
-}
diff --git a/drivers/misc/mei/iorw.c b/drivers/misc/mei/iorw.c
deleted file mode 100644
index eb93a1b53b9b..000000000000
--- a/drivers/misc/mei/iorw.c
+++ /dev/null
@@ -1,366 +0,0 @@
-/*
- *
- * Intel Management Engine Interface (Intel MEI) Linux driver
- * Copyright (c) 2003-2012, Intel Corporation.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms and conditions of the GNU General Public License,
- * version 2, as published by the Free Software Foundation.
- *
- * This program is distributed in the hope it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- */
-
-
-#include <linux/kernel.h>
-#include <linux/fs.h>
-#include <linux/errno.h>
-#include <linux/types.h>
-#include <linux/fcntl.h>
-#include <linux/aio.h>
-#include <linux/pci.h>
-#include <linux/init.h>
-#include <linux/ioctl.h>
-#include <linux/cdev.h>
-#include <linux/list.h>
-#include <linux/delay.h>
-#include <linux/sched.h>
-#include <linux/uuid.h>
-#include <linux/jiffies.h>
-#include <linux/uaccess.h>
-
-
-#include "mei_dev.h"
-#include "hw.h"
-#include <linux/mei.h>
-#include "interface.h"
-
-/**
- * mei_io_cb_free - free mei_cb_private related memory
- *
- * @cb: mei callback struct
- */
-void mei_io_cb_free(struct mei_cl_cb *cb)
-{
- if (cb == NULL)
- return;
-
- kfree(cb->request_buffer.data);
- kfree(cb->response_buffer.data);
- kfree(cb);
-}
-/**
- * mei_io_cb_init - allocate and initialize io callback
- *
- * @cl - mei client
- * @file: pointer to file structure
- *
- * returns mei_cl_cb pointer or NULL;
- */
-struct mei_cl_cb *mei_io_cb_init(struct mei_cl *cl, struct file *fp)
-{
- struct mei_cl_cb *cb;
-
- cb = kzalloc(sizeof(struct mei_cl_cb), GFP_KERNEL);
- if (!cb)
- return NULL;
-
- mei_io_list_init(cb);
-
- cb->file_object = fp;
- cb->cl = cl;
- cb->buf_idx = 0;
- return cb;
-}
-
-
-/**
- * mei_io_cb_alloc_req_buf - allocate request buffer
- *
- * @cb - io callback structure
- * @size: size of the buffer
- *
- * returns 0 on success
- * -EINVAL if cb is NULL
- * -ENOMEM if allocation failed
- */
-int mei_io_cb_alloc_req_buf(struct mei_cl_cb *cb, size_t length)
-{
- if (!cb)
- return -EINVAL;
-
- if (length == 0)
- return 0;
-
- cb->request_buffer.data = kmalloc(length, GFP_KERNEL);
- if (!cb->request_buffer.data)
- return -ENOMEM;
- cb->request_buffer.size = length;
- return 0;
-}
-/**
- * mei_io_cb_alloc_req_buf - allocate respose buffer
- *
- * @cb - io callback structure
- * @size: size of the buffer
- *
- * returns 0 on success
- * -EINVAL if cb is NULL
- * -ENOMEM if allocation failed
- */
-int mei_io_cb_alloc_resp_buf(struct mei_cl_cb *cb, size_t length)
-{
- if (!cb)
- return -EINVAL;
-
- if (length == 0)
- return 0;
-
- cb->response_buffer.data = kmalloc(length, GFP_KERNEL);
- if (!cb->response_buffer.data)
- return -ENOMEM;
- cb->response_buffer.size = length;
- return 0;
-}
-
-
-/**
- * mei_me_cl_by_id return index to me_clients for client_id
- *
- * @dev: the device structure
- * @client_id: me client id
- *
- * Locking: called under "dev->device_lock" lock
- *
- * returns index on success, -ENOENT on failure.
- */
-
-int mei_me_cl_by_id(struct mei_device *dev, u8 client_id)
-{
- int i;
- for (i = 0; i < dev->me_clients_num; i++)
- if (dev->me_clients[i].client_id == client_id)
- break;
- if (WARN_ON(dev->me_clients[i].client_id != client_id))
- return -ENOENT;
-
- if (i == dev->me_clients_num)
- return -ENOENT;
-
- return i;
-}
-
-/**
- * mei_ioctl_connect_client - the connect to fw client IOCTL function
- *
- * @dev: the device structure
- * @data: IOCTL connect data, input and output parameters
- * @file: private data of the file object
- *
- * Locking: called under "dev->device_lock" lock
- *
- * returns 0 on success, <0 on failure.
- */
-int mei_ioctl_connect_client(struct file *file,
- struct mei_connect_client_data *data)
-{
- struct mei_device *dev;
- struct mei_cl_cb *cb;
- struct mei_client *client;
- struct mei_cl *cl;
- long timeout = mei_secs_to_jiffies(MEI_CL_CONNECT_TIMEOUT);
- int i;
- int err;
- int rets;
-
- cl = file->private_data;
- if (WARN_ON(!cl || !cl->dev))
- return -ENODEV;
-
- dev = cl->dev;
-
- dev_dbg(&dev->pdev->dev, "mei_ioctl_connect_client() Entry\n");
-
- /* buffered ioctl cb */
- cb = mei_io_cb_init(cl, file);
- if (!cb) {
- rets = -ENOMEM;
- goto end;
- }
-
- cb->fop_type = MEI_FOP_IOCTL;
-
- if (dev->dev_state != MEI_DEV_ENABLED) {
- rets = -ENODEV;
- goto end;
- }
- if (cl->state != MEI_FILE_INITIALIZING &&
- cl->state != MEI_FILE_DISCONNECTED) {
- rets = -EBUSY;
- goto end;
- }
-
- /* find ME client we're trying to connect to */
- i = mei_me_cl_by_uuid(dev, &data->in_client_uuid);
- if (i >= 0 && !dev->me_clients[i].props.fixed_address) {
- cl->me_client_id = dev->me_clients[i].client_id;
- cl->state = MEI_FILE_CONNECTING;
- }
-
- dev_dbg(&dev->pdev->dev, "Connect to FW Client ID = %d\n",
- cl->me_client_id);
- dev_dbg(&dev->pdev->dev, "FW Client - Protocol Version = %d\n",
- dev->me_clients[i].props.protocol_version);
- dev_dbg(&dev->pdev->dev, "FW Client - Max Msg Len = %d\n",
- dev->me_clients[i].props.max_msg_length);
-
- /* if we're connecting to amthi client then we will use the
- * existing connection
- */
- if (uuid_le_cmp(data->in_client_uuid, mei_amthi_guid) == 0) {
- dev_dbg(&dev->pdev->dev, "FW Client is amthi\n");
- if (dev->iamthif_cl.state != MEI_FILE_CONNECTED) {
- rets = -ENODEV;
- goto end;
- }
- clear_bit(cl->host_client_id, dev->host_clients_map);
- mei_me_cl_unlink(dev, cl);
-
- kfree(cl);
- cl = NULL;
- file->private_data = &dev->iamthif_cl;
-
- client = &data->out_client_properties;
- client->max_msg_length =
- dev->me_clients[i].props.max_msg_length;
- client->protocol_version =
- dev->me_clients[i].props.protocol_version;
- rets = dev->iamthif_cl.status;
-
- goto end;
- }
-
- if (cl->state != MEI_FILE_CONNECTING) {
- rets = -ENODEV;
- goto end;
- }
-
-
- /* prepare the output buffer */
- client = &data->out_client_properties;
- client->max_msg_length = dev->me_clients[i].props.max_msg_length;
- client->protocol_version = dev->me_clients[i].props.protocol_version;
- dev_dbg(&dev->pdev->dev, "Can connect?\n");
- if (dev->mei_host_buffer_is_empty
- && !mei_other_client_is_connecting(dev, cl)) {
- dev_dbg(&dev->pdev->dev, "Sending Connect Message\n");
- dev->mei_host_buffer_is_empty = false;
- if (mei_connect(dev, cl)) {
- dev_dbg(&dev->pdev->dev, "Sending connect message - failed\n");
- rets = -ENODEV;
- goto end;
- } else {
- dev_dbg(&dev->pdev->dev, "Sending connect message - succeeded\n");
- cl->timer_count = MEI_CONNECT_TIMEOUT;
- list_add_tail(&cb->list, &dev->ctrl_rd_list.list);
- }
-
-
- } else {
- dev_dbg(&dev->pdev->dev, "Queuing the connect request due to device busy\n");
- dev_dbg(&dev->pdev->dev, "add connect cb to control write list.\n");
- list_add_tail(&cb->list, &dev->ctrl_wr_list.list);
- }
- mutex_unlock(&dev->device_lock);
- err = wait_event_timeout(dev->wait_recvd_msg,
- (MEI_FILE_CONNECTED == cl->state ||
- MEI_FILE_DISCONNECTED == cl->state), timeout);
-
- mutex_lock(&dev->device_lock);
- if (MEI_FILE_CONNECTED == cl->state) {
- dev_dbg(&dev->pdev->dev, "successfully connected to FW client.\n");
- rets = cl->status;
- goto end;
- } else {
- dev_dbg(&dev->pdev->dev, "failed to connect to FW client.cl->state = %d.\n",
- cl->state);
- if (!err) {
- dev_dbg(&dev->pdev->dev,
- "wait_event_interruptible_timeout failed on client"
- " connect message fw response message.\n");
- }
- rets = -EFAULT;
-
- mei_io_list_flush(&dev->ctrl_rd_list, cl);
- mei_io_list_flush(&dev->ctrl_wr_list, cl);
- goto end;
- }
- rets = 0;
-end:
- dev_dbg(&dev->pdev->dev, "free connect cb memory.");
- mei_io_cb_free(cb);
- return rets;
-}
-
-/**
- * mei_start_read - the start read client message function.
- *
- * @dev: the device structure
- * @if_num: minor number
- * @cl: private data of the file object
- *
- * returns 0 on success, <0 on failure.
- */
-int mei_start_read(struct mei_device *dev, struct mei_cl *cl)
-{
- struct mei_cl_cb *cb;
- int rets;
- int i;
-
- if (cl->state != MEI_FILE_CONNECTED)
- return -ENODEV;
-
- if (dev->dev_state != MEI_DEV_ENABLED)
- return -ENODEV;
-
- if (cl->read_pending || cl->read_cb) {
- dev_dbg(&dev->pdev->dev, "read is pending.\n");
- return -EBUSY;
- }
- i = mei_me_cl_by_id(dev, cl->me_client_id);
- if (i < 0) {
- dev_err(&dev->pdev->dev, "no such me client %d\n",
- cl->me_client_id);
- return -ENODEV;
- }
-
- cb = mei_io_cb_init(cl, NULL);
- if (!cb)
- return -ENOMEM;
-
- rets = mei_io_cb_alloc_resp_buf(cb,
- dev->me_clients[i].props.max_msg_length);
- if (rets)
- goto err;
-
- cb->fop_type = MEI_FOP_READ;
- cl->read_cb = cb;
- if (dev->mei_host_buffer_is_empty) {
- dev->mei_host_buffer_is_empty = false;
- if (mei_send_flow_control(dev, cl)) {
- rets = -ENODEV;
- goto err;
- }
- list_add_tail(&cb->list, &dev->read_list.list);
- } else {
- list_add_tail(&cb->list, &dev->ctrl_wr_list.list);
- }
- return rets;
-err:
- mei_io_cb_free(cb);
- return rets;
-}
-
diff --git a/drivers/misc/mei/main.c b/drivers/misc/mei/main.c
index 43fb52ff98ad..903f809b21f7 100644
--- a/drivers/misc/mei/main.c
+++ b/drivers/misc/mei/main.c
@@ -37,79 +37,11 @@
#include <linux/interrupt.h>
#include <linux/miscdevice.h>
-#include "mei_dev.h"
#include <linux/mei.h>
-#include "interface.h"
-
-/* AMT device is a singleton on the platform */
-static struct pci_dev *mei_pdev;
-
-/* mei_pci_tbl - PCI Device ID Table */
-static DEFINE_PCI_DEVICE_TABLE(mei_pci_tbl) = {
- {PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_82946GZ)},
- {PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_82G35)},
- {PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_82Q965)},
- {PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_82G965)},
- {PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_82GM965)},
- {PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_82GME965)},
- {PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_ICH9_82Q35)},
- {PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_ICH9_82G33)},
- {PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_ICH9_82Q33)},
- {PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_ICH9_82X38)},
- {PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_ICH9_3200)},
- {PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_ICH9_6)},
- {PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_ICH9_7)},
- {PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_ICH9_8)},
- {PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_ICH9_9)},
- {PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_ICH9_10)},
- {PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_ICH9M_1)},
- {PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_ICH9M_2)},
- {PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_ICH9M_3)},
- {PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_ICH9M_4)},
- {PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_ICH10_1)},
- {PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_ICH10_2)},
- {PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_ICH10_3)},
- {PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_ICH10_4)},
- {PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_IBXPK_1)},
- {PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_IBXPK_2)},
- {PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_CPT_1)},
- {PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_PBG_1)},
- {PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_PPT_1)},
- {PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_PPT_2)},
- {PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_PPT_3)},
- {PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_LPT)},
- {PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_LPT_LP)},
-
- /* required last entry */
- {0, }
-};
-
-MODULE_DEVICE_TABLE(pci, mei_pci_tbl);
-static DEFINE_MUTEX(mei_mutex);
-
-
-/**
- * find_read_list_entry - find read list entry
- *
- * @dev: device structure
- * @file: pointer to file structure
- *
- * returns cb on success, NULL on error
- */
-static struct mei_cl_cb *find_read_list_entry(
- struct mei_device *dev,
- struct mei_cl *cl)
-{
- struct mei_cl_cb *pos = NULL;
- struct mei_cl_cb *next = NULL;
-
- dev_dbg(&dev->pdev->dev, "remove read_list CB\n");
- list_for_each_entry_safe(pos, next, &dev->read_list.list, list)
- if (mei_cl_cmp_id(cl, pos->cl))
- return pos;
- return NULL;
-}
+#include "mei_dev.h"
+#include "hw-me.h"
+#include "client.h"
/**
* mei_open - the open function
@@ -121,16 +53,20 @@ static struct mei_cl_cb *find_read_list_entry(
*/
static int mei_open(struct inode *inode, struct file *file)
{
+ struct miscdevice *misc = file->private_data;
+ struct pci_dev *pdev;
struct mei_cl *cl;
struct mei_device *dev;
- unsigned long cl_id;
+
int err;
err = -ENODEV;
- if (!mei_pdev)
+ if (!misc->parent)
goto out;
- dev = pci_get_drvdata(mei_pdev);
+ pdev = container_of(misc->parent, struct pci_dev, dev);
+
+ dev = pci_get_drvdata(pdev);
if (!dev)
goto out;
@@ -153,24 +89,9 @@ static int mei_open(struct inode *inode, struct file *file)
goto out_unlock;
}
- cl_id = find_first_zero_bit(dev->host_clients_map, MEI_CLIENTS_MAX);
- if (cl_id >= MEI_CLIENTS_MAX) {
- dev_err(&dev->pdev->dev, "client_id exceded %d",
- MEI_CLIENTS_MAX) ;
+ err = mei_cl_link(cl, MEI_HOST_CLIENT_ID_ANY);
+ if (err)
goto out_unlock;
- }
-
- cl->host_client_id = cl_id;
-
- dev_dbg(&dev->pdev->dev, "client_id = %d\n", cl->host_client_id);
-
- dev->open_handle_count++;
-
- list_add_tail(&cl->link, &dev->file_list);
-
- set_bit(cl->host_client_id, dev->host_clients_map);
- cl->state = MEI_FILE_INITIALIZING;
- cl->sm_state = 0;
file->private_data = cl;
mutex_unlock(&dev->device_lock);
@@ -216,7 +137,7 @@ static int mei_release(struct inode *inode, struct file *file)
"ME client = %d\n",
cl->host_client_id,
cl->me_client_id);
- rets = mei_disconnect_host_client(dev, cl);
+ rets = mei_cl_disconnect(cl);
}
mei_cl_flush_queues(cl);
dev_dbg(&dev->pdev->dev, "remove client host client = %d, ME client = %d\n",
@@ -227,12 +148,13 @@ static int mei_release(struct inode *inode, struct file *file)
clear_bit(cl->host_client_id, dev->host_clients_map);
dev->open_handle_count--;
}
- mei_me_cl_unlink(dev, cl);
+ mei_cl_unlink(cl);
+
/* free read cb */
cb = NULL;
if (cl->read_cb) {
- cb = find_read_list_entry(dev, cl);
+ cb = mei_cl_find_read_cb(cl);
/* Remove entry from read list */
if (cb)
list_del(&cb->list);
@@ -322,7 +244,7 @@ static ssize_t mei_read(struct file *file, char __user *ubuf,
goto out;
}
- err = mei_start_read(dev, cl);
+ err = mei_cl_read_start(cl);
if (err && err != -EBUSY) {
dev_dbg(&dev->pdev->dev,
"mei start read failure with status = %d\n", err);
@@ -393,14 +315,13 @@ copy_buffer:
goto out;
free:
- cb_pos = find_read_list_entry(dev, cl);
+ cb_pos = mei_cl_find_read_cb(cl);
/* Remove entry from read list */
if (cb_pos)
list_del(&cb_pos->list);
mei_io_cb_free(cb);
cl->reading_state = MEI_IDLE;
cl->read_cb = NULL;
- cl->read_pending = 0;
out:
dev_dbg(&dev->pdev->dev, "end mei read rets= %d\n", rets);
mutex_unlock(&dev->device_lock);
@@ -475,16 +396,15 @@ static ssize_t mei_write(struct file *file, const char __user *ubuf,
/* free entry used in read */
if (cl->reading_state == MEI_READ_COMPLETE) {
*offset = 0;
- write_cb = find_read_list_entry(dev, cl);
+ write_cb = mei_cl_find_read_cb(cl);
if (write_cb) {
list_del(&write_cb->list);
mei_io_cb_free(write_cb);
write_cb = NULL;
cl->reading_state = MEI_IDLE;
cl->read_cb = NULL;
- cl->read_pending = 0;
}
- } else if (cl->reading_state == MEI_IDLE && !cl->read_pending)
+ } else if (cl->reading_state == MEI_IDLE)
*offset = 0;
@@ -519,7 +439,7 @@ static ssize_t mei_write(struct file *file, const char __user *ubuf,
if (rets) {
dev_err(&dev->pdev->dev,
- "amthi write failed with status = %d\n", rets);
+ "amthif write failed with status = %d\n", rets);
goto err;
}
mutex_unlock(&dev->device_lock);
@@ -530,20 +450,20 @@ static ssize_t mei_write(struct file *file, const char __user *ubuf,
dev_dbg(&dev->pdev->dev, "host client = %d, ME client = %d\n",
cl->host_client_id, cl->me_client_id);
- rets = mei_flow_ctrl_creds(dev, cl);
+ rets = mei_cl_flow_ctrl_creds(cl);
if (rets < 0)
goto err;
- if (rets == 0 || dev->mei_host_buffer_is_empty == false) {
+ if (rets == 0 || !dev->hbuf_is_ready) {
write_cb->buf_idx = 0;
mei_hdr.msg_complete = 0;
cl->writing_state = MEI_WRITING;
goto out;
}
- dev->mei_host_buffer_is_empty = false;
- if (length > mei_hbuf_max_data(dev)) {
- mei_hdr.length = mei_hbuf_max_data(dev);
+ dev->hbuf_is_ready = false;
+ if (length > mei_hbuf_max_len(dev)) {
+ mei_hdr.length = mei_hbuf_max_len(dev);
mei_hdr.msg_complete = 0;
} else {
mei_hdr.length = length;
@@ -552,10 +472,10 @@ static ssize_t mei_write(struct file *file, const char __user *ubuf,
mei_hdr.host_addr = cl->host_client_id;
mei_hdr.me_addr = cl->me_client_id;
mei_hdr.reserved = 0;
- dev_dbg(&dev->pdev->dev, "call mei_write_message header=%08x.\n",
- *((u32 *) &mei_hdr));
- if (mei_write_message(dev, &mei_hdr,
- write_cb->request_buffer.data, mei_hdr.length)) {
+
+ dev_dbg(&dev->pdev->dev, "write " MEI_HDR_FMT "\n",
+ MEI_HDR_PRM(&mei_hdr));
+ if (mei_write_message(dev, &mei_hdr, write_cb->request_buffer.data)) {
rets = -ENODEV;
goto err;
}
@@ -564,7 +484,7 @@ static ssize_t mei_write(struct file *file, const char __user *ubuf,
out:
if (mei_hdr.msg_complete) {
- if (mei_flow_ctrl_reduce(dev, cl)) {
+ if (mei_cl_flow_ctrl_reduce(cl)) {
rets = -ENODEV;
goto err;
}
@@ -582,6 +502,103 @@ err:
return rets;
}
+/**
+ * mei_ioctl_connect_client - the connect to fw client IOCTL function
+ *
+ * @dev: the device structure
+ * @data: IOCTL connect data, input and output parameters
+ * @file: private data of the file object
+ *
+ * Locking: called under "dev->device_lock" lock
+ *
+ * returns 0 on success, <0 on failure.
+ */
+static int mei_ioctl_connect_client(struct file *file,
+ struct mei_connect_client_data *data)
+{
+ struct mei_device *dev;
+ struct mei_client *client;
+ struct mei_cl *cl;
+ int i;
+ int rets;
+
+ cl = file->private_data;
+ if (WARN_ON(!cl || !cl->dev))
+ return -ENODEV;
+
+ dev = cl->dev;
+
+ if (dev->dev_state != MEI_DEV_ENABLED) {
+ rets = -ENODEV;
+ goto end;
+ }
+
+ if (cl->state != MEI_FILE_INITIALIZING &&
+ cl->state != MEI_FILE_DISCONNECTED) {
+ rets = -EBUSY;
+ goto end;
+ }
+
+ /* find ME client we're trying to connect to */
+ i = mei_me_cl_by_uuid(dev, &data->in_client_uuid);
+ if (i >= 0 && !dev->me_clients[i].props.fixed_address) {
+ cl->me_client_id = dev->me_clients[i].client_id;
+ cl->state = MEI_FILE_CONNECTING;
+ }
+
+ dev_dbg(&dev->pdev->dev, "Connect to FW Client ID = %d\n",
+ cl->me_client_id);
+ dev_dbg(&dev->pdev->dev, "FW Client - Protocol Version = %d\n",
+ dev->me_clients[i].props.protocol_version);
+ dev_dbg(&dev->pdev->dev, "FW Client - Max Msg Len = %d\n",
+ dev->me_clients[i].props.max_msg_length);
+
+ /* if we're connecting to amthif client then we will use the
+ * existing connection
+ */
+ if (uuid_le_cmp(data->in_client_uuid, mei_amthif_guid) == 0) {
+ dev_dbg(&dev->pdev->dev, "FW Client is amthi\n");
+ if (dev->iamthif_cl.state != MEI_FILE_CONNECTED) {
+ rets = -ENODEV;
+ goto end;
+ }
+ clear_bit(cl->host_client_id, dev->host_clients_map);
+ mei_cl_unlink(cl);
+
+ kfree(cl);
+ cl = NULL;
+ file->private_data = &dev->iamthif_cl;
+
+ client = &data->out_client_properties;
+ client->max_msg_length =
+ dev->me_clients[i].props.max_msg_length;
+ client->protocol_version =
+ dev->me_clients[i].props.protocol_version;
+ rets = dev->iamthif_cl.status;
+
+ goto end;
+ }
+
+ if (cl->state != MEI_FILE_CONNECTING) {
+ rets = -ENODEV;
+ goto end;
+ }
+
+
+ /* prepare the output buffer */
+ client = &data->out_client_properties;
+ client->max_msg_length = dev->me_clients[i].props.max_msg_length;
+ client->protocol_version = dev->me_clients[i].props.protocol_version;
+ dev_dbg(&dev->pdev->dev, "Can connect?\n");
+
+
+ rets = mei_cl_connect(cl, file);
+
+end:
+ dev_dbg(&dev->pdev->dev, "free connect cb memory.");
+ return rets;
+}
+
/**
* mei_ioctl - the IOCTL function
@@ -630,6 +647,7 @@ static long mei_ioctl(struct file *file, unsigned int cmd, unsigned long data)
rets = -EFAULT;
goto out;
}
+
rets = mei_ioctl_connect_client(file, connect_data);
/* if all is ok, copying the data back to user. */
@@ -726,7 +744,6 @@ static const struct file_operations mei_fops = {
.llseek = no_llseek
};
-
/*
* Misc Device Struct
*/
@@ -736,300 +753,17 @@ static struct miscdevice mei_misc_device = {
.minor = MISC_DYNAMIC_MINOR,
};
-/**
- * mei_quirk_probe - probe for devices that doesn't valid ME interface
- * @pdev: PCI device structure
- * @ent: entry into pci_device_table
- *
- * returns true if ME Interface is valid, false otherwise
- */
-static bool mei_quirk_probe(struct pci_dev *pdev,
- const struct pci_device_id *ent)
+int mei_register(struct device *dev)
{
- u32 reg;
- if (ent->device == MEI_DEV_ID_PBG_1) {
- pci_read_config_dword(pdev, 0x48, &reg);
- /* make sure that bit 9 is up and bit 10 is down */
- if ((reg & 0x600) == 0x200) {
- dev_info(&pdev->dev, "Device doesn't have valid ME Interface\n");
- return false;
- }
- }
- return true;
-}
-/**
- * mei_probe - Device Initialization Routine
- *
- * @pdev: PCI device structure
- * @ent: entry in kcs_pci_tbl
- *
- * returns 0 on success, <0 on failure.
- */
-static int mei_probe(struct pci_dev *pdev,
- const struct pci_device_id *ent)
-{
- struct mei_device *dev;
- int err;
-
- mutex_lock(&mei_mutex);
-
- if (!mei_quirk_probe(pdev, ent)) {
- err = -ENODEV;
- goto end;
- }
-
- if (mei_pdev) {
- err = -EEXIST;
- goto end;
- }
- /* enable pci dev */
- err = pci_enable_device(pdev);
- if (err) {
- dev_err(&pdev->dev, "failed to enable pci device.\n");
- goto end;
- }
- /* set PCI host mastering */
- pci_set_master(pdev);
- /* pci request regions for mei driver */
- err = pci_request_regions(pdev, KBUILD_MODNAME);
- if (err) {
- dev_err(&pdev->dev, "failed to get pci regions.\n");
- goto disable_device;
- }
- /* allocates and initializes the mei dev structure */
- dev = mei_device_init(pdev);
- if (!dev) {
- err = -ENOMEM;
- goto release_regions;
- }
- /* mapping IO device memory */
- dev->mem_addr = pci_iomap(pdev, 0, 0);
- if (!dev->mem_addr) {
- dev_err(&pdev->dev, "mapping I/O device memory failure.\n");
- err = -ENOMEM;
- goto free_device;
- }
- pci_enable_msi(pdev);
-
- /* request and enable interrupt */
- if (pci_dev_msi_enabled(pdev))
- err = request_threaded_irq(pdev->irq,
- NULL,
- mei_interrupt_thread_handler,
- IRQF_ONESHOT, KBUILD_MODNAME, dev);
- else
- err = request_threaded_irq(pdev->irq,
- mei_interrupt_quick_handler,
- mei_interrupt_thread_handler,
- IRQF_SHARED, KBUILD_MODNAME, dev);
-
- if (err) {
- dev_err(&pdev->dev, "request_threaded_irq failure. irq = %d\n",
- pdev->irq);
- goto disable_msi;
- }
- INIT_DELAYED_WORK(&dev->timer_work, mei_timer);
- INIT_WORK(&dev->init_work, mei_host_client_init);
-
- if (mei_hw_init(dev)) {
- dev_err(&pdev->dev, "init hw failure.\n");
- err = -ENODEV;
- goto release_irq;
- }
-
- err = misc_register(&mei_misc_device);
- if (err)
- goto release_irq;
-
- mei_pdev = pdev;
- pci_set_drvdata(pdev, dev);
-
-
- schedule_delayed_work(&dev->timer_work, HZ);
-
- mutex_unlock(&mei_mutex);
-
- pr_debug("initialization successful.\n");
-
- return 0;
-
-release_irq:
- /* disable interrupts */
- dev->host_hw_state = mei_hcsr_read(dev);
- mei_disable_interrupts(dev);
- flush_scheduled_work();
- free_irq(pdev->irq, dev);
-disable_msi:
- pci_disable_msi(pdev);
- pci_iounmap(pdev, dev->mem_addr);
-free_device:
- kfree(dev);
-release_regions:
- pci_release_regions(pdev);
-disable_device:
- pci_disable_device(pdev);
-end:
- mutex_unlock(&mei_mutex);
- dev_err(&pdev->dev, "initialization failed.\n");
- return err;
+ mei_misc_device.parent = dev;
+ return misc_register(&mei_misc_device);
}
-/**
- * mei_remove - Device Removal Routine
- *
- * @pdev: PCI device structure
- *
- * mei_remove is called by the PCI subsystem to alert the driver
- * that it should release a PCI device.
- */
-static void mei_remove(struct pci_dev *pdev)
+void mei_deregister(void)
{
- struct mei_device *dev;
-
- if (mei_pdev != pdev)
- return;
-
- dev = pci_get_drvdata(pdev);
- if (!dev)
- return;
-
- mutex_lock(&dev->device_lock);
-
- cancel_delayed_work(&dev->timer_work);
-
- mei_wd_stop(dev);
-
- mei_pdev = NULL;
-
- if (dev->iamthif_cl.state == MEI_FILE_CONNECTED) {
- dev->iamthif_cl.state = MEI_FILE_DISCONNECTING;
- mei_disconnect_host_client(dev, &dev->iamthif_cl);
- }
- if (dev->wd_cl.state == MEI_FILE_CONNECTED) {
- dev->wd_cl.state = MEI_FILE_DISCONNECTING;
- mei_disconnect_host_client(dev, &dev->wd_cl);
- }
-
- /* Unregistering watchdog device */
- mei_watchdog_unregister(dev);
-
- /* remove entry if already in list */
- dev_dbg(&pdev->dev, "list del iamthif and wd file list.\n");
- mei_me_cl_unlink(dev, &dev->wd_cl);
- mei_me_cl_unlink(dev, &dev->iamthif_cl);
-
- dev->iamthif_current_cb = NULL;
- dev->me_clients_num = 0;
-
- mutex_unlock(&dev->device_lock);
-
- flush_scheduled_work();
-
- /* disable interrupts */
- mei_disable_interrupts(dev);
-
- free_irq(pdev->irq, dev);
- pci_disable_msi(pdev);
- pci_set_drvdata(pdev, NULL);
-
- if (dev->mem_addr)
- pci_iounmap(pdev, dev->mem_addr);
-
- kfree(dev);
-
- pci_release_regions(pdev);
- pci_disable_device(pdev);
-
misc_deregister(&mei_misc_device);
-}
-#ifdef CONFIG_PM
-static int mei_pci_suspend(struct device *device)
-{
- struct pci_dev *pdev = to_pci_dev(device);
- struct mei_device *dev = pci_get_drvdata(pdev);
- int err;
-
- if (!dev)
- return -ENODEV;
- mutex_lock(&dev->device_lock);
-
- cancel_delayed_work(&dev->timer_work);
-
- /* Stop watchdog if exists */
- err = mei_wd_stop(dev);
- /* Set new mei state */
- if (dev->dev_state == MEI_DEV_ENABLED ||
- dev->dev_state == MEI_DEV_RECOVERING_FROM_RESET) {
- dev->dev_state = MEI_DEV_POWER_DOWN;
- mei_reset(dev, 0);
- }
- mutex_unlock(&dev->device_lock);
-
- free_irq(pdev->irq, dev);
- pci_disable_msi(pdev);
-
- return err;
+ mei_misc_device.parent = NULL;
}
-static int mei_pci_resume(struct device *device)
-{
- struct pci_dev *pdev = to_pci_dev(device);
- struct mei_device *dev;
- int err;
-
- dev = pci_get_drvdata(pdev);
- if (!dev)
- return -ENODEV;
-
- pci_enable_msi(pdev);
-
- /* request and enable interrupt */
- if (pci_dev_msi_enabled(pdev))
- err = request_threaded_irq(pdev->irq,
- NULL,
- mei_interrupt_thread_handler,
- IRQF_ONESHOT, KBUILD_MODNAME, dev);
- else
- err = request_threaded_irq(pdev->irq,
- mei_interrupt_quick_handler,
- mei_interrupt_thread_handler,
- IRQF_SHARED, KBUILD_MODNAME, dev);
-
- if (err) {
- dev_err(&pdev->dev, "request_threaded_irq failed: irq = %d.\n",
- pdev->irq);
- return err;
- }
-
- mutex_lock(&dev->device_lock);
- dev->dev_state = MEI_DEV_POWER_UP;
- mei_reset(dev, 1);
- mutex_unlock(&dev->device_lock);
-
- /* Start timer if stopped in suspend */
- schedule_delayed_work(&dev->timer_work, HZ);
-
- return err;
-}
-static SIMPLE_DEV_PM_OPS(mei_pm_ops, mei_pci_suspend, mei_pci_resume);
-#define MEI_PM_OPS (&mei_pm_ops)
-#else
-#define MEI_PM_OPS NULL
-#endif /* CONFIG_PM */
-/*
- * PCI driver structure
- */
-static struct pci_driver mei_driver = {
- .name = KBUILD_MODNAME,
- .id_table = mei_pci_tbl,
- .probe = mei_probe,
- .remove = mei_remove,
- .shutdown = mei_remove,
- .driver.pm = MEI_PM_OPS,
-};
-
-module_pci_driver(mei_driver);
-
-MODULE_AUTHOR("Intel Corporation");
-MODULE_DESCRIPTION("Intel(R) Management Engine Interface");
MODULE_LICENSE("GPL v2");
+
diff --git a/drivers/misc/mei/mei_dev.h b/drivers/misc/mei/mei_dev.h
index 25da04549d04..cb80166161f0 100644
--- a/drivers/misc/mei/mei_dev.h
+++ b/drivers/misc/mei/mei_dev.h
@@ -21,7 +21,9 @@
#include <linux/watchdog.h>
#include <linux/poll.h>
#include <linux/mei.h>
+
#include "hw.h"
+#include "hw-me-regs.h"
/*
* watch dog definition
@@ -44,7 +46,7 @@
/*
* AMTHI Client UUID
*/
-extern const uuid_le mei_amthi_guid;
+extern const uuid_le mei_amthif_guid;
/*
* Watchdog Client UUID
@@ -65,12 +67,18 @@ extern const u8 mei_wd_state_independence_msg[3][4];
* Number of File descriptors/handles
* that can be opened to the driver.
*
- * Limit to 253: 256 Total Clients
+ * Limit to 255: 256 Total Clients
* minus internal client for MEI Bus Messags
- * minus internal client for AMTHI
- * minus internal client for Watchdog
*/
-#define MEI_MAX_OPEN_HANDLE_COUNT (MEI_CLIENTS_MAX - 3)
+#define MEI_MAX_OPEN_HANDLE_COUNT (MEI_CLIENTS_MAX - 1)
+
+/*
+ * Internal Clients Number
+ */
+#define MEI_HOST_CLIENT_ID_ANY (-1)
+#define MEI_HBM_HOST_CLIENT_ID 0 /* not used, just for documentation */
+#define MEI_WD_HOST_CLIENT_ID 1
+#define MEI_IAMTHIF_HOST_CLIENT_ID 2
/* File state */
@@ -150,6 +158,19 @@ struct mei_message_data {
unsigned char *data;
};
+/**
+ * struct mei_me_client - representation of me (fw) client
+ *
+ * @props - client properties
+ * @client_id - me client id
+ * @mei_flow_ctrl_creds - flow control credits
+ */
+struct mei_me_client {
+ struct mei_client_properties props;
+ u8 client_id;
+ u8 mei_flow_ctrl_creds;
+};
+
struct mei_cl;
@@ -178,7 +199,6 @@ struct mei_cl {
wait_queue_head_t tx_wait;
wait_queue_head_t rx_wait;
wait_queue_head_t wait;
- int read_pending;
int status;
/* ID of client connected */
u8 host_client_id;
@@ -191,10 +211,67 @@ struct mei_cl {
struct mei_cl_cb *read_cb;
};
+/** struct mei_hw_ops
+ *
+ * @host_set_ready - notify FW that host side is ready
+ * @host_is_ready - query for host readiness
+
+ * @hw_is_ready - query if hw is ready
+ * @hw_reset - reset hw
+ * @hw_config - configure hw
+
+ * @intr_clear - clear pending interrupts
+ * @intr_enable - enable interrupts
+ * @intr_disable - disable interrupts
+
+ * @hbuf_free_slots - query for write buffer empty slots
+ * @hbuf_is_ready - query if write buffer is empty
+ * @hbuf_max_len - query for write buffer max len
+
+ * @write - write a message to FW
+
+ * @rdbuf_full_slots - query how many slots are filled
+
+ * @read_hdr - get first 4 bytes (header)
+ * @read - read a buffer from the FW
+ */
+struct mei_hw_ops {
+
+ void (*host_set_ready) (struct mei_device *dev);
+ bool (*host_is_ready) (struct mei_device *dev);
+
+ bool (*hw_is_ready) (struct mei_device *dev);
+ void (*hw_reset) (struct mei_device *dev, bool enable);
+ void (*hw_config) (struct mei_device *dev);
+
+ void (*intr_clear) (struct mei_device *dev);
+ void (*intr_enable) (struct mei_device *dev);
+ void (*intr_disable) (struct mei_device *dev);
+
+ int (*hbuf_free_slots) (struct mei_device *dev);
+ bool (*hbuf_is_ready) (struct mei_device *dev);
+ size_t (*hbuf_max_len) (const struct mei_device *dev);
+
+ int (*write)(struct mei_device *dev,
+ struct mei_msg_hdr *hdr,
+ unsigned char *buf);
+
+ int (*rdbuf_full_slots)(struct mei_device *dev);
+
+ u32 (*read_hdr)(const struct mei_device *dev);
+ int (*read) (struct mei_device *dev,
+ unsigned char *buf, unsigned long len);
+};
+
/**
* struct mei_device - MEI private device struct
- * @hbuf_depth - depth of host(write) buffer
- * @wr_ext_msg - buffer for hbm control responses (set in read cycle)
+
+ * @mem_addr - mem mapped base register address
+
+ * @hbuf_depth - depth of hardware host/write buffer is slots
+ * @hbuf_is_ready - query if the host host/write buffer is ready
+ * @wr_msg - the buffer for hbm control messages
+ * @wr_ext_msg - the buffer for hbm control responses (set in read cycle)
*/
struct mei_device {
struct pci_dev *pdev; /* pointer to pci device struct */
@@ -213,24 +290,14 @@ struct mei_device {
*/
struct list_head file_list;
long open_handle_count;
- /*
- * memory of device
- */
- unsigned int mem_base;
- unsigned int mem_length;
- void __iomem *mem_addr;
+
/*
* lock for the device
*/
struct mutex device_lock; /* device lock */
struct delayed_work timer_work; /* MEI timer delayed work (timeouts) */
bool recvd_msg;
- /*
- * hw states of host and fw(ME)
- */
- u32 host_hw_state;
- u32 me_hw_state;
- u8 hbuf_depth;
+
/*
* waiting queue for receive message from FW
*/
@@ -243,11 +310,20 @@ struct mei_device {
enum mei_dev_state dev_state;
enum mei_init_clients_states init_clients_state;
u16 init_clients_timer;
- bool need_reset;
unsigned char rd_msg_buf[MEI_RD_MSG_BUF_SIZE]; /* control messages */
u32 rd_msg_hdr;
- u32 wr_msg_buf[128]; /* used for control messages */
+
+ /* write buffer */
+ u8 hbuf_depth;
+ bool hbuf_is_ready;
+
+ /* used for control messages */
+ struct {
+ struct mei_msg_hdr hdr;
+ unsigned char data[128];
+ } wr_msg;
+
struct {
struct mei_msg_hdr hdr;
unsigned char data[4]; /* All HBM messages are 4 bytes */
@@ -261,7 +337,6 @@ struct mei_device {
u8 me_clients_num;
u8 me_client_presentation_num;
u8 me_client_index;
- bool mei_host_buffer_is_empty;
struct mei_cl wd_cl;
enum mei_wd_states wd_state;
@@ -289,6 +364,9 @@ struct mei_device {
bool iamthif_canceled;
struct work_struct init_work;
+
+ const struct mei_hw_ops *ops;
+ char hw[0] __aligned(sizeof(void *));
};
static inline unsigned long mei_secs_to_jiffies(unsigned long sec)
@@ -300,96 +378,28 @@ static inline unsigned long mei_secs_to_jiffies(unsigned long sec)
/*
* mei init function prototypes
*/
-struct mei_device *mei_device_init(struct pci_dev *pdev);
+void mei_device_init(struct mei_device *dev);
void mei_reset(struct mei_device *dev, int interrupts);
int mei_hw_init(struct mei_device *dev);
-int mei_task_initialize_clients(void *data);
-int mei_initialize_clients(struct mei_device *dev);
-int mei_disconnect_host_client(struct mei_device *dev, struct mei_cl *cl);
-void mei_allocate_me_clients_storage(struct mei_device *dev);
-
-
-int mei_me_cl_link(struct mei_device *dev, struct mei_cl *cl,
- const uuid_le *cguid, u8 host_client_id);
-void mei_me_cl_unlink(struct mei_device *dev, struct mei_cl *cl);
-int mei_me_cl_by_uuid(const struct mei_device *dev, const uuid_le *cuuid);
-int mei_me_cl_by_id(struct mei_device *dev, u8 client_id);
-
-/*
- * MEI IO Functions
- */
-struct mei_cl_cb *mei_io_cb_init(struct mei_cl *cl, struct file *fp);
-void mei_io_cb_free(struct mei_cl_cb *priv_cb);
-int mei_io_cb_alloc_req_buf(struct mei_cl_cb *cb, size_t length);
-int mei_io_cb_alloc_resp_buf(struct mei_cl_cb *cb, size_t length);
-
-
-/**
- * mei_io_list_init - Sets up a queue list.
- *
- * @list: An instance cl callback structure
- */
-static inline void mei_io_list_init(struct mei_cl_cb *list)
-{
- INIT_LIST_HEAD(&list->list);
-}
-void mei_io_list_flush(struct mei_cl_cb *list, struct mei_cl *cl);
-
-/*
- * MEI ME Client Functions
- */
-
-struct mei_cl *mei_cl_allocate(struct mei_device *dev);
-void mei_cl_init(struct mei_cl *cl, struct mei_device *dev);
-int mei_cl_flush_queues(struct mei_cl *cl);
-/**
- * mei_cl_cmp_id - tells if file private data have same id
- *
- * @fe1: private data of 1. file object
- * @fe2: private data of 2. file object
- *
- * returns true - if ids are the same and not NULL
- */
-static inline bool mei_cl_cmp_id(const struct mei_cl *cl1,
- const struct mei_cl *cl2)
-{
- return cl1 && cl2 &&
- (cl1->host_client_id == cl2->host_client_id) &&
- (cl1->me_client_id == cl2->me_client_id);
-}
-
-
-
-/*
- * MEI Host Client Functions
- */
-void mei_host_start_message(struct mei_device *dev);
-void mei_host_enum_clients_message(struct mei_device *dev);
-int mei_host_client_enumerate(struct mei_device *dev);
-void mei_host_client_init(struct work_struct *work);
/*
* MEI interrupt functions prototype
*/
-irqreturn_t mei_interrupt_quick_handler(int irq, void *dev_id);
-irqreturn_t mei_interrupt_thread_handler(int irq, void *dev_id);
-void mei_timer(struct work_struct *work);
-/*
- * MEI input output function prototype
- */
-int mei_ioctl_connect_client(struct file *file,
- struct mei_connect_client_data *data);
+void mei_timer(struct work_struct *work);
+int mei_irq_read_handler(struct mei_device *dev,
+ struct mei_cl_cb *cmpl_list, s32 *slots);
-int mei_start_read(struct mei_device *dev, struct mei_cl *cl);
+int mei_irq_write_handler(struct mei_device *dev, struct mei_cl_cb *cmpl_list);
+void mei_irq_complete_handler(struct mei_cl *cl, struct mei_cl_cb *cb_pos);
/*
* AMTHIF - AMT Host Interface Functions
*/
void mei_amthif_reset_params(struct mei_device *dev);
-void mei_amthif_host_init(struct mei_device *dev);
+int mei_amthif_host_init(struct mei_device *dev);
int mei_amthif_write(struct mei_device *dev, struct mei_cl_cb *priv_cb);
@@ -407,9 +417,6 @@ struct mei_cl_cb *mei_amthif_find_read_list_entry(struct mei_device *dev,
void mei_amthif_run_next_cmd(struct mei_device *dev);
-int mei_amthif_read_message(struct mei_cl_cb *complete_list,
- struct mei_device *dev, struct mei_msg_hdr *mei_hdr);
-
int mei_amthif_irq_write_complete(struct mei_device *dev, s32 *slots,
struct mei_cl_cb *cb, struct mei_cl_cb *cmpl_list);
@@ -418,92 +425,107 @@ int mei_amthif_irq_read_message(struct mei_cl_cb *complete_list,
struct mei_device *dev, struct mei_msg_hdr *mei_hdr);
int mei_amthif_irq_read(struct mei_device *dev, s32 *slots);
+
+int mei_wd_send(struct mei_device *dev);
+int mei_wd_stop(struct mei_device *dev);
+int mei_wd_host_init(struct mei_device *dev);
/*
- * Register Access Function
+ * mei_watchdog_register - Registering watchdog interface
+ * once we got connection to the WD Client
+ * @dev - mei device
+ */
+void mei_watchdog_register(struct mei_device *dev);
+/*
+ * mei_watchdog_unregister - Unregistering watchdog interface
+ * @dev - mei device
*/
+void mei_watchdog_unregister(struct mei_device *dev);
-/**
- * mei_reg_read - Reads 32bit data from the mei device
- *
- * @dev: the device structure
- * @offset: offset from which to read the data
- *
- * returns register value (u32)
+/*
+ * Register Access Function
*/
-static inline u32 mei_reg_read(const struct mei_device *dev,
- unsigned long offset)
+
+static inline void mei_hw_config(struct mei_device *dev)
+{
+ dev->ops->hw_config(dev);
+}
+static inline void mei_hw_reset(struct mei_device *dev, bool enable)
{
- return ioread32(dev->mem_addr + offset);
+ dev->ops->hw_reset(dev, enable);
}
-/**
- * mei_reg_write - Writes 32bit data to the mei device
- *
- * @dev: the device structure
- * @offset: offset from which to write the data
- * @value: register value to write (u32)
- */
-static inline void mei_reg_write(const struct mei_device *dev,
- unsigned long offset, u32 value)
+static inline void mei_clear_interrupts(struct mei_device *dev)
{
- iowrite32(value, dev->mem_addr + offset);
+ dev->ops->intr_clear(dev);
}
-/**
- * mei_hcsr_read - Reads 32bit data from the host CSR
- *
- * @dev: the device structure
- *
- * returns the byte read.
- */
-static inline u32 mei_hcsr_read(const struct mei_device *dev)
+static inline void mei_enable_interrupts(struct mei_device *dev)
{
- return mei_reg_read(dev, H_CSR);
+ dev->ops->intr_enable(dev);
}
-/**
- * mei_mecsr_read - Reads 32bit data from the ME CSR
- *
- * @dev: the device structure
- *
- * returns ME_CSR_HA register value (u32)
- */
-static inline u32 mei_mecsr_read(const struct mei_device *dev)
+static inline void mei_disable_interrupts(struct mei_device *dev)
{
- return mei_reg_read(dev, ME_CSR_HA);
+ dev->ops->intr_disable(dev);
}
-/**
- * get_me_cb_rw - Reads 32bit data from the mei ME_CB_RW register
- *
- * @dev: the device structure
- *
- * returns ME_CB_RW register value (u32)
- */
-static inline u32 mei_mecbrw_read(const struct mei_device *dev)
+static inline void mei_host_set_ready(struct mei_device *dev)
{
- return mei_reg_read(dev, ME_CB_RW);
+ dev->ops->host_set_ready(dev);
+}
+static inline bool mei_host_is_ready(struct mei_device *dev)
+{
+ return dev->ops->host_is_ready(dev);
+}
+static inline bool mei_hw_is_ready(struct mei_device *dev)
+{
+ return dev->ops->hw_is_ready(dev);
}
+static inline bool mei_hbuf_is_ready(struct mei_device *dev)
+{
+ return dev->ops->hbuf_is_ready(dev);
+}
-/*
- * mei interface function prototypes
- */
-void mei_hcsr_set(struct mei_device *dev);
-void mei_csr_clear_his(struct mei_device *dev);
+static inline int mei_hbuf_empty_slots(struct mei_device *dev)
+{
+ return dev->ops->hbuf_free_slots(dev);
+}
+
+static inline size_t mei_hbuf_max_len(const struct mei_device *dev)
+{
+ return dev->ops->hbuf_max_len(dev);
+}
-void mei_enable_interrupts(struct mei_device *dev);
-void mei_disable_interrupts(struct mei_device *dev);
+static inline int mei_write_message(struct mei_device *dev,
+ struct mei_msg_hdr *hdr,
+ unsigned char *buf)
+{
+ return dev->ops->write(dev, hdr, buf);
+}
-static inline struct mei_msg_hdr *mei_hbm_hdr(u32 *buf, size_t length)
+static inline u32 mei_read_hdr(const struct mei_device *dev)
{
- struct mei_msg_hdr *hdr = (struct mei_msg_hdr *)buf;
- hdr->host_addr = 0;
- hdr->me_addr = 0;
- hdr->length = length;
- hdr->msg_complete = 1;
- hdr->reserved = 0;
- return hdr;
+ return dev->ops->read_hdr(dev);
}
+static inline void mei_read_slots(struct mei_device *dev,
+ unsigned char *buf, unsigned long len)
+{
+ dev->ops->read(dev, buf, len);
+}
+
+static inline int mei_count_full_read_slots(struct mei_device *dev)
+{
+ return dev->ops->rdbuf_full_slots(dev);
+}
+
+int mei_register(struct device *dev);
+void mei_deregister(void);
+
+#define MEI_HDR_FMT "hdr:host=%02d me=%02d len=%d comp=%1d"
+#define MEI_HDR_PRM(hdr) \
+ (hdr)->host_addr, (hdr)->me_addr, \
+ (hdr)->length, (hdr)->msg_complete
+
#endif
diff --git a/drivers/misc/mei/pci-me.c b/drivers/misc/mei/pci-me.c
new file mode 100644
index 000000000000..b40ec0601ab0
--- /dev/null
+++ b/drivers/misc/mei/pci-me.c
@@ -0,0 +1,396 @@
+/*
+ *
+ * Intel Management Engine Interface (Intel MEI) Linux driver
+ * Copyright (c) 2003-2012, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/module.h>
+#include <linux/moduleparam.h>
+#include <linux/kernel.h>
+#include <linux/device.h>
+#include <linux/fs.h>
+#include <linux/errno.h>
+#include <linux/types.h>
+#include <linux/fcntl.h>
+#include <linux/aio.h>
+#include <linux/pci.h>
+#include <linux/poll.h>
+#include <linux/init.h>
+#include <linux/ioctl.h>
+#include <linux/cdev.h>
+#include <linux/sched.h>
+#include <linux/uuid.h>
+#include <linux/compat.h>
+#include <linux/jiffies.h>
+#include <linux/interrupt.h>
+#include <linux/miscdevice.h>
+
+#include <linux/mei.h>
+
+#include "mei_dev.h"
+#include "hw-me.h"
+#include "client.h"
+
+/* AMT device is a singleton on the platform */
+static struct pci_dev *mei_pdev;
+
+/* mei_pci_tbl - PCI Device ID Table */
+static DEFINE_PCI_DEVICE_TABLE(mei_pci_tbl) = {
+ {PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_82946GZ)},
+ {PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_82G35)},
+ {PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_82Q965)},
+ {PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_82G965)},
+ {PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_82GM965)},
+ {PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_82GME965)},
+ {PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_ICH9_82Q35)},
+ {PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_ICH9_82G33)},
+ {PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_ICH9_82Q33)},
+ {PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_ICH9_82X38)},
+ {PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_ICH9_3200)},
+ {PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_ICH9_6)},
+ {PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_ICH9_7)},
+ {PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_ICH9_8)},
+ {PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_ICH9_9)},
+ {PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_ICH9_10)},
+ {PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_ICH9M_1)},
+ {PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_ICH9M_2)},
+ {PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_ICH9M_3)},
+ {PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_ICH9M_4)},
+ {PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_ICH10_1)},
+ {PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_ICH10_2)},
+ {PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_ICH10_3)},
+ {PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_ICH10_4)},
+ {PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_IBXPK_1)},
+ {PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_IBXPK_2)},
+ {PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_CPT_1)},
+ {PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_PBG_1)},
+ {PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_PPT_1)},
+ {PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_PPT_2)},
+ {PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_PPT_3)},
+ {PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_LPT)},
+ {PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_LPT_LP)},
+
+ /* required last entry */
+ {0, }
+};
+
+MODULE_DEVICE_TABLE(pci, mei_pci_tbl);
+
+static DEFINE_MUTEX(mei_mutex);
+
+/**
+ * mei_quirk_probe - probe for devices that doesn't valid ME interface
+ * @pdev: PCI device structure
+ * @ent: entry into pci_device_table
+ *
+ * returns true if ME Interface is valid, false otherwise
+ */
+static bool mei_quirk_probe(struct pci_dev *pdev,
+ const struct pci_device_id *ent)
+{
+ u32 reg;
+ if (ent->device == MEI_DEV_ID_PBG_1) {
+ pci_read_config_dword(pdev, 0x48, &reg);
+ /* make sure that bit 9 is up and bit 10 is down */
+ if ((reg & 0x600) == 0x200) {
+ dev_info(&pdev->dev, "Device doesn't have valid ME Interface\n");
+ return false;
+ }
+ }
+ return true;
+}
+/**
+ * mei_probe - Device Initialization Routine
+ *
+ * @pdev: PCI device structure
+ * @ent: entry in kcs_pci_tbl
+ *
+ * returns 0 on success, <0 on failure.
+ */
+static int mei_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
+{
+ struct mei_device *dev;
+ struct mei_me_hw *hw;
+ int err;
+
+ mutex_lock(&mei_mutex);
+
+ if (!mei_quirk_probe(pdev, ent)) {
+ err = -ENODEV;
+ goto end;
+ }
+
+ if (mei_pdev) {
+ err = -EEXIST;
+ goto end;
+ }
+ /* enable pci dev */
+ err = pci_enable_device(pdev);
+ if (err) {
+ dev_err(&pdev->dev, "failed to enable pci device.\n");
+ goto end;
+ }
+ /* set PCI host mastering */
+ pci_set_master(pdev);
+ /* pci request regions for mei driver */
+ err = pci_request_regions(pdev, KBUILD_MODNAME);
+ if (err) {
+ dev_err(&pdev->dev, "failed to get pci regions.\n");
+ goto disable_device;
+ }
+ /* allocates and initializes the mei dev structure */
+ dev = mei_me_dev_init(pdev);
+ if (!dev) {
+ err = -ENOMEM;
+ goto release_regions;
+ }
+ hw = to_me_hw(dev);
+ /* mapping IO device memory */
+ hw->mem_addr = pci_iomap(pdev, 0, 0);
+ if (!hw->mem_addr) {
+ dev_err(&pdev->dev, "mapping I/O device memory failure.\n");
+ err = -ENOMEM;
+ goto free_device;
+ }
+ pci_enable_msi(pdev);
+
+ /* request and enable interrupt */
+ if (pci_dev_msi_enabled(pdev))
+ err = request_threaded_irq(pdev->irq,
+ NULL,
+ mei_me_irq_thread_handler,
+ IRQF_ONESHOT, KBUILD_MODNAME, dev);
+ else
+ err = request_threaded_irq(pdev->irq,
+ mei_me_irq_quick_handler,
+ mei_me_irq_thread_handler,
+ IRQF_SHARED, KBUILD_MODNAME, dev);
+
+ if (err) {
+ dev_err(&pdev->dev, "request_threaded_irq failure. irq = %d\n",
+ pdev->irq);
+ goto disable_msi;
+ }
+
+ if (mei_hw_init(dev)) {
+ dev_err(&pdev->dev, "init hw failure.\n");
+ err = -ENODEV;
+ goto release_irq;
+ }
+
+ err = mei_register(&pdev->dev);
+ if (err)
+ goto release_irq;
+
+ mei_pdev = pdev;
+ pci_set_drvdata(pdev, dev);
+
+
+ schedule_delayed_work(&dev->timer_work, HZ);
+
+ mutex_unlock(&mei_mutex);
+
+ pr_debug("initialization successful.\n");
+
+ return 0;
+
+release_irq:
+ mei_disable_interrupts(dev);
+ flush_scheduled_work();
+ free_irq(pdev->irq, dev);
+disable_msi:
+ pci_disable_msi(pdev);
+ pci_iounmap(pdev, hw->mem_addr);
+free_device:
+ kfree(dev);
+release_regions:
+ pci_release_regions(pdev);
+disable_device:
+ pci_disable_device(pdev);
+end:
+ mutex_unlock(&mei_mutex);
+ dev_err(&pdev->dev, "initialization failed.\n");
+ return err;
+}
+
+/**
+ * mei_remove - Device Removal Routine
+ *
+ * @pdev: PCI device structure
+ *
+ * mei_remove is called by the PCI subsystem to alert the driver
+ * that it should release a PCI device.
+ */
+static void mei_remove(struct pci_dev *pdev)
+{
+ struct mei_device *dev;
+ struct mei_me_hw *hw;
+
+ if (mei_pdev != pdev)
+ return;
+
+ dev = pci_get_drvdata(pdev);
+ if (!dev)
+ return;
+
+ hw = to_me_hw(dev);
+
+ mutex_lock(&dev->device_lock);
+
+ cancel_delayed_work(&dev->timer_work);
+
+ mei_wd_stop(dev);
+
+ mei_pdev = NULL;
+
+ if (dev->iamthif_cl.state == MEI_FILE_CONNECTED) {
+ dev->iamthif_cl.state = MEI_FILE_DISCONNECTING;
+ mei_cl_disconnect(&dev->iamthif_cl);
+ }
+ if (dev->wd_cl.state == MEI_FILE_CONNECTED) {
+ dev->wd_cl.state = MEI_FILE_DISCONNECTING;
+ mei_cl_disconnect(&dev->wd_cl);
+ }
+
+ /* Unregistering watchdog device */
+ mei_watchdog_unregister(dev);
+
+ /* remove entry if already in list */
+ dev_dbg(&pdev->dev, "list del iamthif and wd file list.\n");
+
+ if (dev->open_handle_count > 0)
+ dev->open_handle_count--;
+ mei_cl_unlink(&dev->wd_cl);
+
+ if (dev->open_handle_count > 0)
+ dev->open_handle_count--;
+ mei_cl_unlink(&dev->iamthif_cl);
+
+ dev->iamthif_current_cb = NULL;
+ dev->me_clients_num = 0;
+
+ mutex_unlock(&dev->device_lock);
+
+ flush_scheduled_work();
+
+ /* disable interrupts */
+ mei_disable_interrupts(dev);
+
+ free_irq(pdev->irq, dev);
+ pci_disable_msi(pdev);
+ pci_set_drvdata(pdev, NULL);
+
+ if (hw->mem_addr)
+ pci_iounmap(pdev, hw->mem_addr);
+
+ kfree(dev);
+
+ pci_release_regions(pdev);
+ pci_disable_device(pdev);
+
+ mei_deregister();
+
+}
+#ifdef CONFIG_PM
+static int mei_pci_suspend(struct device *device)
+{
+ struct pci_dev *pdev = to_pci_dev(device);
+ struct mei_device *dev = pci_get_drvdata(pdev);
+ int err;
+
+ if (!dev)
+ return -ENODEV;
+ mutex_lock(&dev->device_lock);
+
+ cancel_delayed_work(&dev->timer_work);
+
+ /* Stop watchdog if exists */
+ err = mei_wd_stop(dev);
+ /* Set new mei state */
+ if (dev->dev_state == MEI_DEV_ENABLED ||
+ dev->dev_state == MEI_DEV_RECOVERING_FROM_RESET) {
+ dev->dev_state = MEI_DEV_POWER_DOWN;
+ mei_reset(dev, 0);
+ }
+ mutex_unlock(&dev->device_lock);
+
+ free_irq(pdev->irq, dev);
+ pci_disable_msi(pdev);
+
+ return err;
+}
+
+static int mei_pci_resume(struct device *device)
+{
+ struct pci_dev *pdev = to_pci_dev(device);
+ struct mei_device *dev;
+ int err;
+
+ dev = pci_get_drvdata(pdev);
+ if (!dev)
+ return -ENODEV;
+
+ pci_enable_msi(pdev);
+
+ /* request and enable interrupt */
+ if (pci_dev_msi_enabled(pdev))
+ err = request_threaded_irq(pdev->irq,
+ NULL,
+ mei_me_irq_thread_handler,
+ IRQF_ONESHOT, KBUILD_MODNAME, dev);
+ else
+ err = request_threaded_irq(pdev->irq,
+ mei_me_irq_quick_handler,
+ mei_me_irq_thread_handler,
+ IRQF_SHARED, KBUILD_MODNAME, dev);
+
+ if (err) {
+ dev_err(&pdev->dev, "request_threaded_irq failed: irq = %d.\n",
+ pdev->irq);
+ return err;
+ }
+
+ mutex_lock(&dev->device_lock);
+ dev->dev_state = MEI_DEV_POWER_UP;
+ mei_reset(dev, 1);
+ mutex_unlock(&dev->device_lock);
+
+ /* Start timer if stopped in suspend */
+ schedule_delayed_work(&dev->timer_work, HZ);
+
+ return err;
+}
+static SIMPLE_DEV_PM_OPS(mei_pm_ops, mei_pci_suspend, mei_pci_resume);
+#define MEI_PM_OPS (&mei_pm_ops)
+#else
+#define MEI_PM_OPS NULL
+#endif /* CONFIG_PM */
+/*
+ * PCI driver structure
+ */
+static struct pci_driver mei_driver = {
+ .name = KBUILD_MODNAME,
+ .id_table = mei_pci_tbl,
+ .probe = mei_probe,
+ .remove = mei_remove,
+ .shutdown = mei_remove,
+ .driver.pm = MEI_PM_OPS,
+};
+
+module_pci_driver(mei_driver);
+
+MODULE_AUTHOR("Intel Corporation");
+MODULE_DESCRIPTION("Intel(R) Management Engine Interface");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/misc/mei/wd.c b/drivers/misc/mei/wd.c
index 9299a8c29a6f..2413247fc392 100644
--- a/drivers/misc/mei/wd.c
+++ b/drivers/misc/mei/wd.c
@@ -21,11 +21,13 @@
#include <linux/sched.h>
#include <linux/watchdog.h>
-#include "mei_dev.h"
-#include "hw.h"
-#include "interface.h"
#include <linux/mei.h>
+#include "mei_dev.h"
+#include "hbm.h"
+#include "hw-me.h"
+#include "client.h"
+
static const u8 mei_start_wd_params[] = { 0x02, 0x12, 0x13, 0x10 };
static const u8 mei_stop_wd_params[] = { 0x02, 0x02, 0x14, 0x10 };
@@ -62,30 +64,41 @@ static void mei_wd_set_start_timeout(struct mei_device *dev, u16 timeout)
*/
int mei_wd_host_init(struct mei_device *dev)
{
- int id;
- mei_cl_init(&dev->wd_cl, dev);
+ struct mei_cl *cl = &dev->wd_cl;
+ int i;
+ int ret;
+
+ mei_cl_init(cl, dev);
- /* look for WD client and connect to it */
- dev->wd_cl.state = MEI_FILE_DISCONNECTED;
dev->wd_timeout = MEI_WD_DEFAULT_TIMEOUT;
dev->wd_state = MEI_WD_IDLE;
- /* Connect WD ME client to the host client */
- id = mei_me_cl_link(dev, &dev->wd_cl,
- &mei_wd_guid, MEI_WD_HOST_CLIENT_ID);
- if (id < 0) {
+ /* check for valid client id */
+ i = mei_me_cl_by_uuid(dev, &mei_wd_guid);
+ if (i < 0) {
dev_info(&dev->pdev->dev, "wd: failed to find the client\n");
return -ENOENT;
}
- if (mei_connect(dev, &dev->wd_cl)) {
+ cl->me_client_id = dev->me_clients[i].client_id;
+
+ ret = mei_cl_link(cl, MEI_WD_HOST_CLIENT_ID);
+
+ if (ret < 0) {
+ dev_info(&dev->pdev->dev, "wd: failed link client\n");
+ return -ENOENT;
+ }
+
+ cl->state = MEI_FILE_CONNECTING;
+
+ if (mei_hbm_cl_connect_req(dev, cl)) {
dev_err(&dev->pdev->dev, "wd: failed to connect to the client\n");
- dev->wd_cl.state = MEI_FILE_DISCONNECTED;
- dev->wd_cl.host_client_id = 0;
+ cl->state = MEI_FILE_DISCONNECTED;
+ cl->host_client_id = 0;
return -EIO;
}
- dev->wd_cl.timer_count = MEI_CONNECT_TIMEOUT;
+ cl->timer_count = MEI_CONNECT_TIMEOUT;
return 0;
}
@@ -101,22 +114,21 @@ int mei_wd_host_init(struct mei_device *dev)
*/
int mei_wd_send(struct mei_device *dev)
{
- struct mei_msg_hdr *mei_hdr;
+ struct mei_msg_hdr hdr;
- mei_hdr = (struct mei_msg_hdr *) &dev->wr_msg_buf[0];
- mei_hdr->host_addr = dev->wd_cl.host_client_id;
- mei_hdr->me_addr = dev->wd_cl.me_client_id;
- mei_hdr->msg_complete = 1;
- mei_hdr->reserved = 0;
+ hdr.host_addr = dev->wd_cl.host_client_id;
+ hdr.me_addr = dev->wd_cl.me_client_id;
+ hdr.msg_complete = 1;
+ hdr.reserved = 0;
if (!memcmp(dev->wd_data, mei_start_wd_params, MEI_WD_HDR_SIZE))
- mei_hdr->length = MEI_WD_START_MSG_SIZE;
+ hdr.length = MEI_WD_START_MSG_SIZE;
else if (!memcmp(dev->wd_data, mei_stop_wd_params, MEI_WD_HDR_SIZE))
- mei_hdr->length = MEI_WD_STOP_MSG_SIZE;
+ hdr.length = MEI_WD_STOP_MSG_SIZE;
else
return -EINVAL;
- return mei_write_message(dev, mei_hdr, dev->wd_data, mei_hdr->length);
+ return mei_write_message(dev, &hdr, dev->wd_data);
}
/**
@@ -141,16 +153,16 @@ int mei_wd_stop(struct mei_device *dev)
dev->wd_state = MEI_WD_STOPPING;
- ret = mei_flow_ctrl_creds(dev, &dev->wd_cl);
+ ret = mei_cl_flow_ctrl_creds(&dev->wd_cl);
if (ret < 0)
goto out;
- if (ret && dev->mei_host_buffer_is_empty) {
+ if (ret && dev->hbuf_is_ready) {
ret = 0;
- dev->mei_host_buffer_is_empty = false;
+ dev->hbuf_is_ready = false;
if (!mei_wd_send(dev)) {
- ret = mei_flow_ctrl_reduce(dev, &dev->wd_cl);
+ ret = mei_cl_flow_ctrl_reduce(&dev->wd_cl);
if (ret)
goto out;
} else {
@@ -270,10 +282,9 @@ static int mei_wd_ops_ping(struct watchdog_device *wd_dev)
dev->wd_state = MEI_WD_RUNNING;
/* Check if we can send the ping to HW*/
- if (dev->mei_host_buffer_is_empty &&
- mei_flow_ctrl_creds(dev, &dev->wd_cl) > 0) {
+ if (dev->hbuf_is_ready && mei_cl_flow_ctrl_creds(&dev->wd_cl) > 0) {
- dev->mei_host_buffer_is_empty = false;
+ dev->hbuf_is_ready = false;
dev_dbg(&dev->pdev->dev, "wd: sending ping\n");
if (mei_wd_send(dev)) {
@@ -282,9 +293,9 @@ static int mei_wd_ops_ping(struct watchdog_device *wd_dev)
goto end;
}
- if (mei_flow_ctrl_reduce(dev, &dev->wd_cl)) {
+ if (mei_cl_flow_ctrl_reduce(&dev->wd_cl)) {
dev_err(&dev->pdev->dev,
- "wd: mei_flow_ctrl_reduce() failed.\n");
+ "wd: mei_cl_flow_ctrl_reduce() failed.\n");
ret = -EIO;
goto end;
}
diff --git a/drivers/misc/sgi-gru/grutlbpurge.c b/drivers/misc/sgi-gru/grutlbpurge.c
index 240a6d361665..2129274ef7ab 100644
--- a/drivers/misc/sgi-gru/grutlbpurge.c
+++ b/drivers/misc/sgi-gru/grutlbpurge.c
@@ -280,11 +280,10 @@ static struct mmu_notifier *mmu_find_ops(struct mm_struct *mm,
const struct mmu_notifier_ops *ops)
{
struct mmu_notifier *mn, *gru_mn = NULL;
- struct hlist_node *n;
if (mm->mmu_notifier_mm) {
rcu_read_lock();
- hlist_for_each_entry_rcu(mn, n, &mm->mmu_notifier_mm->list,
+ hlist_for_each_entry_rcu(mn, &mm->mmu_notifier_mm->list,
hlist)
if (mn->ops == ops) {
gru_mn = mn;
diff --git a/drivers/misc/ti-st/Kconfig b/drivers/misc/ti-st/Kconfig
index abb5de1afce3..f34dcc514730 100644
--- a/drivers/misc/ti-st/Kconfig
+++ b/drivers/misc/ti-st/Kconfig
@@ -5,7 +5,7 @@
menu "Texas Instruments shared transport line discipline"
config TI_ST
tristate "Shared transport core driver"
- depends on NET && GPIOLIB
+ depends on NET && GPIOLIB && TTY
select FW_LOADER
help
This enables the shared transport core driver for TI
diff --git a/drivers/misc/ti-st/st_core.c b/drivers/misc/ti-st/st_core.c
index b90a2241d79c..0a1428016350 100644
--- a/drivers/misc/ti-st/st_core.c
+++ b/drivers/misc/ti-st/st_core.c
@@ -240,7 +240,8 @@ void st_int_recv(void *disc_data,
char *ptr;
struct st_proto_s *proto;
unsigned short payload_len = 0;
- int len = 0, type = 0;
+ int len = 0;
+ unsigned char type = 0;
unsigned char *plen;
struct st_data_s *st_gdata = (struct st_data_s *)disc_data;
unsigned long flags;
diff --git a/drivers/misc/tifm_core.c b/drivers/misc/tifm_core.c
index 0bd5349b0422..0ab7c922212c 100644
--- a/drivers/misc/tifm_core.c
+++ b/drivers/misc/tifm_core.c
@@ -196,13 +196,14 @@ int tifm_add_adapter(struct tifm_adapter *fm)
{
int rc;
- if (!idr_pre_get(&tifm_adapter_idr, GFP_KERNEL))
- return -ENOMEM;
-
+ idr_preload(GFP_KERNEL);
spin_lock(&tifm_adapter_lock);
- rc = idr_get_new(&tifm_adapter_idr, fm, &fm->id);
+ rc = idr_alloc(&tifm_adapter_idr, fm, 0, 0, GFP_NOWAIT);
+ if (rc >= 0)
+ fm->id = rc;
spin_unlock(&tifm_adapter_lock);
- if (rc)
+ idr_preload_end();
+ if (rc < 0)
return rc;
dev_set_name(&fm->dev, "tifm%u", fm->id);
diff --git a/drivers/misc/vmw_vmci/Kconfig b/drivers/misc/vmw_vmci/Kconfig
new file mode 100644
index 000000000000..39c2ecadb273
--- /dev/null
+++ b/drivers/misc/vmw_vmci/Kconfig
@@ -0,0 +1,16 @@
+#
+# VMware VMCI device
+#
+
+config VMWARE_VMCI
+ tristate "VMware VMCI Driver"
+ depends on X86 && PCI
+ help
+ This is VMware's Virtual Machine Communication Interface. It enables
+ high-speed communication between host and guest in a virtual
+ environment via the VMCI virtual device.
+
+ If unsure, say N.
+
+ To compile this driver as a module, choose M here: the
+ module will be called vmw_vmci.
diff --git a/drivers/misc/vmw_vmci/Makefile b/drivers/misc/vmw_vmci/Makefile
new file mode 100644
index 000000000000..4da9893c3942
--- /dev/null
+++ b/drivers/misc/vmw_vmci/Makefile
@@ -0,0 +1,4 @@
+obj-$(CONFIG_VMWARE_VMCI) += vmw_vmci.o
+vmw_vmci-y += vmci_context.o vmci_datagram.o vmci_doorbell.o \
+ vmci_driver.o vmci_event.o vmci_guest.o vmci_handle_array.o \
+ vmci_host.o vmci_queue_pair.o vmci_resource.o vmci_route.o
diff --git a/drivers/misc/vmw_vmci/vmci_context.c b/drivers/misc/vmw_vmci/vmci_context.c
new file mode 100644
index 000000000000..f866a4baecb5
--- /dev/null
+++ b/drivers/misc/vmw_vmci/vmci_context.c
@@ -0,0 +1,1214 @@
+/*
+ * VMware VMCI Driver
+ *
+ * Copyright (C) 2012 VMware, Inc. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation version 2 and no later version.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
+ * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * for more details.
+ */
+
+#include <linux/vmw_vmci_defs.h>
+#include <linux/vmw_vmci_api.h>
+#include <linux/highmem.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/sched.h>
+#include <linux/slab.h>
+
+#include "vmci_queue_pair.h"
+#include "vmci_datagram.h"
+#include "vmci_doorbell.h"
+#include "vmci_context.h"
+#include "vmci_driver.h"
+#include "vmci_event.h"
+
+/*
+ * List of current VMCI contexts. Contexts can be added by
+ * vmci_ctx_create() and removed via vmci_ctx_destroy().
+ * These, along with context lookup, are protected by the
+ * list structure's lock.
+ */
+static struct {
+ struct list_head head;
+ spinlock_t lock; /* Spinlock for context list operations */
+} ctx_list = {
+ .head = LIST_HEAD_INIT(ctx_list.head),
+ .lock = __SPIN_LOCK_UNLOCKED(ctx_list.lock),
+};
+
+/* Used by contexts that did not set up notify flag pointers */
+static bool ctx_dummy_notify;
+
+static void ctx_signal_notify(struct vmci_ctx *context)
+{
+ *context->notify = true;
+}
+
+static void ctx_clear_notify(struct vmci_ctx *context)
+{
+ *context->notify = false;
+}
+
+/*
+ * If nothing requires the attention of the guest, clears both
+ * notify flag and call.
+ */
+static void ctx_clear_notify_call(struct vmci_ctx *context)
+{
+ if (context->pending_datagrams == 0 &&
+ vmci_handle_arr_get_size(context->pending_doorbell_array) == 0)
+ ctx_clear_notify(context);
+}
+
+/*
+ * Sets the context's notify flag iff datagrams are pending for this
+ * context. Called from vmci_setup_notify().
+ */
+void vmci_ctx_check_signal_notify(struct vmci_ctx *context)
+{
+ spin_lock(&context->lock);
+ if (context->pending_datagrams)
+ ctx_signal_notify(context);
+ spin_unlock(&context->lock);
+}
+
+/*
+ * Allocates and initializes a VMCI context.
+ */
+struct vmci_ctx *vmci_ctx_create(u32 cid, u32 priv_flags,
+ uintptr_t event_hnd,
+ int user_version,
+ const struct cred *cred)
+{
+ struct vmci_ctx *context;
+ int error;
+
+ if (cid == VMCI_INVALID_ID) {
+ pr_devel("Invalid context ID for VMCI context\n");
+ error = -EINVAL;
+ goto err_out;
+ }
+
+ if (priv_flags & ~VMCI_PRIVILEGE_ALL_FLAGS) {
+ pr_devel("Invalid flag (flags=0x%x) for VMCI context\n",
+ priv_flags);
+ error = -EINVAL;
+ goto err_out;
+ }
+
+ if (user_version == 0) {
+ pr_devel("Invalid suer_version %d\n", user_version);
+ error = -EINVAL;
+ goto err_out;
+ }
+
+ context = kzalloc(sizeof(*context), GFP_KERNEL);
+ if (!context) {
+ pr_warn("Failed to allocate memory for VMCI context\n");
+ error = -EINVAL;
+ goto err_out;
+ }
+
+ kref_init(&context->kref);
+ spin_lock_init(&context->lock);
+ INIT_LIST_HEAD(&context->list_item);
+ INIT_LIST_HEAD(&context->datagram_queue);
+ INIT_LIST_HEAD(&context->notifier_list);
+
+ /* Initialize host-specific VMCI context. */
+ init_waitqueue_head(&context->host_context.wait_queue);
+
+ context->queue_pair_array = vmci_handle_arr_create(0);
+ if (!context->queue_pair_array) {
+ error = -ENOMEM;
+ goto err_free_ctx;
+ }
+
+ context->doorbell_array = vmci_handle_arr_create(0);
+ if (!context->doorbell_array) {
+ error = -ENOMEM;
+ goto err_free_qp_array;
+ }
+
+ context->pending_doorbell_array = vmci_handle_arr_create(0);
+ if (!context->pending_doorbell_array) {
+ error = -ENOMEM;
+ goto err_free_db_array;
+ }
+
+ context->user_version = user_version;
+
+ context->priv_flags = priv_flags;
+
+ if (cred)
+ context->cred = get_cred(cred);
+
+ context->notify = &ctx_dummy_notify;
+ context->notify_page = NULL;
+
+ /*
+ * If we collide with an existing context we generate a new
+ * and use it instead. The VMX will determine if regeneration
+ * is okay. Since there isn't 4B - 16 VMs running on a given
+ * host, the below loop will terminate.
+ */
+ spin_lock(&ctx_list.lock);
+
+ while (vmci_ctx_exists(cid)) {
+ /* We reserve the lowest 16 ids for fixed contexts. */
+ cid = max(cid, VMCI_RESERVED_CID_LIMIT - 1) + 1;
+ if (cid == VMCI_INVALID_ID)
+ cid = VMCI_RESERVED_CID_LIMIT;
+ }
+ context->cid = cid;
+
+ list_add_tail_rcu(&context->list_item, &ctx_list.head);
+ spin_unlock(&ctx_list.lock);
+
+ return context;
+
+ err_free_db_array:
+ vmci_handle_arr_destroy(context->doorbell_array);
+ err_free_qp_array:
+ vmci_handle_arr_destroy(context->queue_pair_array);
+ err_free_ctx:
+ kfree(context);
+ err_out:
+ return ERR_PTR(error);
+}
+
+/*
+ * Destroy VMCI context.
+ */
+void vmci_ctx_destroy(struct vmci_ctx *context)
+{
+ spin_lock(&ctx_list.lock);
+ list_del_rcu(&context->list_item);
+ spin_unlock(&ctx_list.lock);
+ synchronize_rcu();
+
+ vmci_ctx_put(context);
+}
+
+/*
+ * Fire notification for all contexts interested in given cid.
+ */
+static int ctx_fire_notification(u32 context_id, u32 priv_flags)
+{
+ u32 i, array_size;
+ struct vmci_ctx *sub_ctx;
+ struct vmci_handle_arr *subscriber_array;
+ struct vmci_handle context_handle =
+ vmci_make_handle(context_id, VMCI_EVENT_HANDLER);
+
+ /*
+ * We create an array to hold the subscribers we find when
+ * scanning through all contexts.
+ */
+ subscriber_array = vmci_handle_arr_create(0);
+ if (subscriber_array == NULL)
+ return VMCI_ERROR_NO_MEM;
+
+ /*
+ * Scan all contexts to find who is interested in being
+ * notified about given contextID.
+ */
+ rcu_read_lock();
+ list_for_each_entry_rcu(sub_ctx, &ctx_list.head, list_item) {
+ struct vmci_handle_list *node;
+
+ /*
+ * We only deliver notifications of the removal of
+ * contexts, if the two contexts are allowed to
+ * interact.
+ */
+ if (vmci_deny_interaction(priv_flags, sub_ctx->priv_flags))
+ continue;
+
+ list_for_each_entry_rcu(node, &sub_ctx->notifier_list, node) {
+ if (!vmci_handle_is_equal(node->handle, context_handle))
+ continue;
+
+ vmci_handle_arr_append_entry(&subscriber_array,
+ vmci_make_handle(sub_ctx->cid,
+ VMCI_EVENT_HANDLER));
+ }
+ }
+ rcu_read_unlock();
+
+ /* Fire event to all subscribers. */
+ array_size = vmci_handle_arr_get_size(subscriber_array);
+ for (i = 0; i < array_size; i++) {
+ int result;
+ struct vmci_event_ctx ev;
+
+ ev.msg.hdr.dst = vmci_handle_arr_get_entry(subscriber_array, i);
+ ev.msg.hdr.src = vmci_make_handle(VMCI_HYPERVISOR_CONTEXT_ID,
+ VMCI_CONTEXT_RESOURCE_ID);
+ ev.msg.hdr.payload_size = sizeof(ev) - sizeof(ev.msg.hdr);
+ ev.msg.event_data.event = VMCI_EVENT_CTX_REMOVED;
+ ev.payload.context_id = context_id;
+
+ result = vmci_datagram_dispatch(VMCI_HYPERVISOR_CONTEXT_ID,
+ &ev.msg.hdr, false);
+ if (result < VMCI_SUCCESS) {
+ pr_devel("Failed to enqueue event datagram (type=%d) for context (ID=0x%x)\n",
+ ev.msg.event_data.event,
+ ev.msg.hdr.dst.context);
+ /* We continue to enqueue on next subscriber. */
+ }
+ }
+ vmci_handle_arr_destroy(subscriber_array);
+
+ return VMCI_SUCCESS;
+}
+
+/*
+ * Returns the current number of pending datagrams. The call may
+ * also serve as a synchronization point for the datagram queue,
+ * as no enqueue operations can occur concurrently.
+ */
+int vmci_ctx_pending_datagrams(u32 cid, u32 *pending)
+{
+ struct vmci_ctx *context;
+
+ context = vmci_ctx_get(cid);
+ if (context == NULL)
+ return VMCI_ERROR_INVALID_ARGS;
+
+ spin_lock(&context->lock);
+ if (pending)
+ *pending = context->pending_datagrams;
+ spin_unlock(&context->lock);
+ vmci_ctx_put(context);
+
+ return VMCI_SUCCESS;
+}
+
+/*
+ * Queues a VMCI datagram for the appropriate target VM context.
+ */
+int vmci_ctx_enqueue_datagram(u32 cid, struct vmci_datagram *dg)
+{
+ struct vmci_datagram_queue_entry *dq_entry;
+ struct vmci_ctx *context;
+ struct vmci_handle dg_src;
+ size_t vmci_dg_size;
+
+ vmci_dg_size = VMCI_DG_SIZE(dg);
+ if (vmci_dg_size > VMCI_MAX_DG_SIZE) {
+ pr_devel("Datagram too large (bytes=%Zu)\n", vmci_dg_size);
+ return VMCI_ERROR_INVALID_ARGS;
+ }
+
+ /* Get the target VM's VMCI context. */
+ context = vmci_ctx_get(cid);
+ if (!context) {
+ pr_devel("Invalid context (ID=0x%x)\n", cid);
+ return VMCI_ERROR_INVALID_ARGS;
+ }
+
+ /* Allocate guest call entry and add it to the target VM's queue. */
+ dq_entry = kmalloc(sizeof(*dq_entry), GFP_KERNEL);
+ if (dq_entry == NULL) {
+ pr_warn("Failed to allocate memory for datagram\n");
+ vmci_ctx_put(context);
+ return VMCI_ERROR_NO_MEM;
+ }
+ dq_entry->dg = dg;
+ dq_entry->dg_size = vmci_dg_size;
+ dg_src = dg->src;
+ INIT_LIST_HEAD(&dq_entry->list_item);
+
+ spin_lock(&context->lock);
+
+ /*
+ * We put a higher limit on datagrams from the hypervisor. If
+ * the pending datagram is not from hypervisor, then we check
+ * if enqueueing it would exceed the
+ * VMCI_MAX_DATAGRAM_QUEUE_SIZE limit on the destination. If
+ * the pending datagram is from hypervisor, we allow it to be
+ * queued at the destination side provided we don't reach the
+ * VMCI_MAX_DATAGRAM_AND_EVENT_QUEUE_SIZE limit.
+ */
+ if (context->datagram_queue_size + vmci_dg_size >=
+ VMCI_MAX_DATAGRAM_QUEUE_SIZE &&
+ (!vmci_handle_is_equal(dg_src,
+ vmci_make_handle
+ (VMCI_HYPERVISOR_CONTEXT_ID,
+ VMCI_CONTEXT_RESOURCE_ID)) ||
+ context->datagram_queue_size + vmci_dg_size >=
+ VMCI_MAX_DATAGRAM_AND_EVENT_QUEUE_SIZE)) {
+ spin_unlock(&context->lock);
+ vmci_ctx_put(context);
+ kfree(dq_entry);
+ pr_devel("Context (ID=0x%x) receive queue is full\n", cid);
+ return VMCI_ERROR_NO_RESOURCES;
+ }
+
+ list_add(&dq_entry->list_item, &context->datagram_queue);
+ context->pending_datagrams++;
+ context->datagram_queue_size += vmci_dg_size;
+ ctx_signal_notify(context);
+ wake_up(&context->host_context.wait_queue);
+ spin_unlock(&context->lock);
+ vmci_ctx_put(context);
+
+ return vmci_dg_size;
+}
+
+/*
+ * Verifies whether a context with the specified context ID exists.
+ * FIXME: utility is dubious as no decisions can be reliably made
+ * using this data as context can appear and disappear at any time.
+ */
+bool vmci_ctx_exists(u32 cid)
+{
+ struct vmci_ctx *context;
+ bool exists = false;
+
+ rcu_read_lock();
+
+ list_for_each_entry_rcu(context, &ctx_list.head, list_item) {
+ if (context->cid == cid) {
+ exists = true;
+ break;
+ }
+ }
+
+ rcu_read_unlock();
+ return exists;
+}
+
+/*
+ * Retrieves VMCI context corresponding to the given cid.
+ */
+struct vmci_ctx *vmci_ctx_get(u32 cid)
+{
+ struct vmci_ctx *c, *context = NULL;
+
+ if (cid == VMCI_INVALID_ID)
+ return NULL;
+
+ rcu_read_lock();
+ list_for_each_entry_rcu(c, &ctx_list.head, list_item) {
+ if (c->cid == cid) {
+ /*
+ * The context owner drops its own reference to the
+ * context only after removing it from the list and
+ * waiting for RCU grace period to expire. This
+ * means that we are not about to increase the
+ * reference count of something that is in the
+ * process of being destroyed.
+ */
+ context = c;
+ kref_get(&context->kref);
+ break;
+ }
+ }
+ rcu_read_unlock();
+
+ return context;
+}
+
+/*
+ * Deallocates all parts of a context data structure. This
+ * function doesn't lock the context, because it assumes that
+ * the caller was holding the last reference to context.
+ */
+static void ctx_free_ctx(struct kref *kref)
+{
+ struct vmci_ctx *context = container_of(kref, struct vmci_ctx, kref);
+ struct vmci_datagram_queue_entry *dq_entry, *dq_entry_tmp;
+ struct vmci_handle temp_handle;
+ struct vmci_handle_list *notifier, *tmp;
+
+ /*
+ * Fire event to all contexts interested in knowing this
+ * context is dying.
+ */
+ ctx_fire_notification(context->cid, context->priv_flags);
+
+ /*
+ * Cleanup all queue pair resources attached to context. If
+ * the VM dies without cleaning up, this code will make sure
+ * that no resources are leaked.
+ */
+ temp_handle = vmci_handle_arr_get_entry(context->queue_pair_array, 0);
+ while (!vmci_handle_is_equal(temp_handle, VMCI_INVALID_HANDLE)) {
+ if (vmci_qp_broker_detach(temp_handle,
+ context) < VMCI_SUCCESS) {
+ /*
+ * When vmci_qp_broker_detach() succeeds it
+ * removes the handle from the array. If
+ * detach fails, we must remove the handle
+ * ourselves.
+ */
+ vmci_handle_arr_remove_entry(context->queue_pair_array,
+ temp_handle);
+ }
+ temp_handle =
+ vmci_handle_arr_get_entry(context->queue_pair_array, 0);
+ }
+
+ /*
+ * It is fine to destroy this without locking the callQueue, as
+ * this is the only thread having a reference to the context.
+ */
+ list_for_each_entry_safe(dq_entry, dq_entry_tmp,
+ &context->datagram_queue, list_item) {
+ WARN_ON(dq_entry->dg_size != VMCI_DG_SIZE(dq_entry->dg));
+ list_del(&dq_entry->list_item);
+ kfree(dq_entry->dg);
+ kfree(dq_entry);
+ }
+
+ list_for_each_entry_safe(notifier, tmp,
+ &context->notifier_list, node) {
+ list_del(&notifier->node);
+ kfree(notifier);
+ }
+
+ vmci_handle_arr_destroy(context->queue_pair_array);
+ vmci_handle_arr_destroy(context->doorbell_array);
+ vmci_handle_arr_destroy(context->pending_doorbell_array);
+ vmci_ctx_unset_notify(context);
+ if (context->cred)
+ put_cred(context->cred);
+ kfree(context);
+}
+
+/*
+ * Drops reference to VMCI context. If this is the last reference to
+ * the context it will be deallocated. A context is created with
+ * a reference count of one, and on destroy, it is removed from
+ * the context list before its reference count is decremented. Thus,
+ * if we reach zero, we are sure that nobody else are about to increment
+ * it (they need the entry in the context list for that), and so there
+ * is no need for locking.
+ */
+void vmci_ctx_put(struct vmci_ctx *context)
+{
+ kref_put(&context->kref, ctx_free_ctx);
+}
+
+/*
+ * Dequeues the next datagram and returns it to caller.
+ * The caller passes in a pointer to the max size datagram
+ * it can handle and the datagram is only unqueued if the
+ * size is less than max_size. If larger max_size is set to
+ * the size of the datagram to give the caller a chance to
+ * set up a larger buffer for the guestcall.
+ */
+int vmci_ctx_dequeue_datagram(struct vmci_ctx *context,
+ size_t *max_size,
+ struct vmci_datagram **dg)
+{
+ struct vmci_datagram_queue_entry *dq_entry;
+ struct list_head *list_item;
+ int rv;
+
+ /* Dequeue the next datagram entry. */
+ spin_lock(&context->lock);
+ if (context->pending_datagrams == 0) {
+ ctx_clear_notify_call(context);
+ spin_unlock(&context->lock);
+ pr_devel("No datagrams pending\n");
+ return VMCI_ERROR_NO_MORE_DATAGRAMS;
+ }
+
+ list_item = context->datagram_queue.next;
+
+ dq_entry =
+ list_entry(list_item, struct vmci_datagram_queue_entry, list_item);
+
+ /* Check size of caller's buffer. */
+ if (*max_size < dq_entry->dg_size) {
+ *max_size = dq_entry->dg_size;
+ spin_unlock(&context->lock);
+ pr_devel("Caller's buffer should be at least (size=%u bytes)\n",
+ (u32) *max_size);
+ return VMCI_ERROR_NO_MEM;
+ }
+
+ list_del(list_item);
+ context->pending_datagrams--;
+ context->datagram_queue_size -= dq_entry->dg_size;
+ if (context->pending_datagrams == 0) {
+ ctx_clear_notify_call(context);
+ rv = VMCI_SUCCESS;
+ } else {
+ /*
+ * Return the size of the next datagram.
+ */
+ struct vmci_datagram_queue_entry *next_entry;
+
+ list_item = context->datagram_queue.next;
+ next_entry =
+ list_entry(list_item, struct vmci_datagram_queue_entry,
+ list_item);
+
+ /*
+ * The following size_t -> int truncation is fine as
+ * the maximum size of a (routable) datagram is 68KB.
+ */
+ rv = (int)next_entry->dg_size;
+ }
+ spin_unlock(&context->lock);
+
+ /* Caller must free datagram. */
+ *dg = dq_entry->dg;
+ dq_entry->dg = NULL;
+ kfree(dq_entry);
+
+ return rv;
+}
+
+/*
+ * Reverts actions set up by vmci_setup_notify(). Unmaps and unlocks the
+ * page mapped/locked by vmci_setup_notify().
+ */
+void vmci_ctx_unset_notify(struct vmci_ctx *context)
+{
+ struct page *notify_page;
+
+ spin_lock(&context->lock);
+
+ notify_page = context->notify_page;
+ context->notify = &ctx_dummy_notify;
+ context->notify_page = NULL;
+
+ spin_unlock(&context->lock);
+
+ if (notify_page) {
+ kunmap(notify_page);
+ put_page(notify_page);
+ }
+}
+
+/*
+ * Add remote_cid to list of contexts current contexts wants
+ * notifications from/about.
+ */
+int vmci_ctx_add_notification(u32 context_id, u32 remote_cid)
+{
+ struct vmci_ctx *context;
+ struct vmci_handle_list *notifier, *n;
+ int result;
+ bool exists = false;
+
+ context = vmci_ctx_get(context_id);
+ if (!context)
+ return VMCI_ERROR_NOT_FOUND;
+
+ if (VMCI_CONTEXT_IS_VM(context_id) && VMCI_CONTEXT_IS_VM(remote_cid)) {
+ pr_devel("Context removed notifications for other VMs not supported (src=0x%x, remote=0x%x)\n",
+ context_id, remote_cid);
+ result = VMCI_ERROR_DST_UNREACHABLE;
+ goto out;
+ }
+
+ if (context->priv_flags & VMCI_PRIVILEGE_FLAG_RESTRICTED) {
+ result = VMCI_ERROR_NO_ACCESS;
+ goto out;
+ }
+
+ notifier = kmalloc(sizeof(struct vmci_handle_list), GFP_KERNEL);
+ if (!notifier) {
+ result = VMCI_ERROR_NO_MEM;
+ goto out;
+ }
+
+ INIT_LIST_HEAD(&notifier->node);
+ notifier->handle = vmci_make_handle(remote_cid, VMCI_EVENT_HANDLER);
+
+ spin_lock(&context->lock);
+
+ list_for_each_entry(n, &context->notifier_list, node) {
+ if (vmci_handle_is_equal(n->handle, notifier->handle)) {
+ exists = true;
+ break;
+ }
+ }
+
+ if (exists) {
+ kfree(notifier);
+ result = VMCI_ERROR_ALREADY_EXISTS;
+ } else {
+ list_add_tail_rcu(&notifier->node, &context->notifier_list);
+ context->n_notifiers++;
+ result = VMCI_SUCCESS;
+ }
+
+ spin_unlock(&context->lock);
+
+ out:
+ vmci_ctx_put(context);
+ return result;
+}
+
+/*
+ * Remove remote_cid from current context's list of contexts it is
+ * interested in getting notifications from/about.
+ */
+int vmci_ctx_remove_notification(u32 context_id, u32 remote_cid)
+{
+ struct vmci_ctx *context;
+ struct vmci_handle_list *notifier, *tmp;
+ struct vmci_handle handle;
+ bool found = false;
+
+ context = vmci_ctx_get(context_id);
+ if (!context)
+ return VMCI_ERROR_NOT_FOUND;
+
+ handle = vmci_make_handle(remote_cid, VMCI_EVENT_HANDLER);
+
+ spin_lock(&context->lock);
+ list_for_each_entry_safe(notifier, tmp,
+ &context->notifier_list, node) {
+ if (vmci_handle_is_equal(notifier->handle, handle)) {
+ list_del_rcu(&notifier->node);
+ context->n_notifiers--;
+ found = true;
+ break;
+ }
+ }
+ spin_unlock(&context->lock);
+
+ if (found) {
+ synchronize_rcu();
+ kfree(notifier);
+ }
+
+ vmci_ctx_put(context);
+
+ return found ? VMCI_SUCCESS : VMCI_ERROR_NOT_FOUND;
+}
+
+static int vmci_ctx_get_chkpt_notifiers(struct vmci_ctx *context,
+ u32 *buf_size, void **pbuf)
+{
+ u32 *notifiers;
+ size_t data_size;
+ struct vmci_handle_list *entry;
+ int i = 0;
+
+ if (context->n_notifiers == 0) {
+ *buf_size = 0;
+ *pbuf = NULL;
+ return VMCI_SUCCESS;
+ }
+
+ data_size = context->n_notifiers * sizeof(*notifiers);
+ if (*buf_size < data_size) {
+ *buf_size = data_size;
+ return VMCI_ERROR_MORE_DATA;
+ }
+
+ notifiers = kmalloc(data_size, GFP_ATOMIC); /* FIXME: want GFP_KERNEL */
+ if (!notifiers)
+ return VMCI_ERROR_NO_MEM;
+
+ list_for_each_entry(entry, &context->notifier_list, node)
+ notifiers[i++] = entry->handle.context;
+
+ *buf_size = data_size;
+ *pbuf = notifiers;
+ return VMCI_SUCCESS;
+}
+
+static int vmci_ctx_get_chkpt_doorbells(struct vmci_ctx *context,
+ u32 *buf_size, void **pbuf)
+{
+ struct dbell_cpt_state *dbells;
+ size_t n_doorbells;
+ int i;
+
+ n_doorbells = vmci_handle_arr_get_size(context->doorbell_array);
+ if (n_doorbells > 0) {
+ size_t data_size = n_doorbells * sizeof(*dbells);
+ if (*buf_size < data_size) {
+ *buf_size = data_size;
+ return VMCI_ERROR_MORE_DATA;
+ }
+
+ dbells = kmalloc(data_size, GFP_ATOMIC);
+ if (!dbells)
+ return VMCI_ERROR_NO_MEM;
+
+ for (i = 0; i < n_doorbells; i++)
+ dbells[i].handle = vmci_handle_arr_get_entry(
+ context->doorbell_array, i);
+
+ *buf_size = data_size;
+ *pbuf = dbells;
+ } else {
+ *buf_size = 0;
+ *pbuf = NULL;
+ }
+
+ return VMCI_SUCCESS;
+}
+
+/*
+ * Get current context's checkpoint state of given type.
+ */
+int vmci_ctx_get_chkpt_state(u32 context_id,
+ u32 cpt_type,
+ u32 *buf_size,
+ void **pbuf)
+{
+ struct vmci_ctx *context;
+ int result;
+
+ context = vmci_ctx_get(context_id);
+ if (!context)
+ return VMCI_ERROR_NOT_FOUND;
+
+ spin_lock(&context->lock);
+
+ switch (cpt_type) {
+ case VMCI_NOTIFICATION_CPT_STATE:
+ result = vmci_ctx_get_chkpt_notifiers(context, buf_size, pbuf);
+ break;
+
+ case VMCI_WELLKNOWN_CPT_STATE:
+ /*
+ * For compatibility with VMX'en with VM to VM communication, we
+ * always return zero wellknown handles.
+ */
+
+ *buf_size = 0;
+ *pbuf = NULL;
+ result = VMCI_SUCCESS;
+ break;
+
+ case VMCI_DOORBELL_CPT_STATE:
+ result = vmci_ctx_get_chkpt_doorbells(context, buf_size, pbuf);
+ break;
+
+ default:
+ pr_devel("Invalid cpt state (type=%d)\n", cpt_type);
+ result = VMCI_ERROR_INVALID_ARGS;
+ break;
+ }
+
+ spin_unlock(&context->lock);
+ vmci_ctx_put(context);
+
+ return result;
+}
+
+/*
+ * Set current context's checkpoint state of given type.
+ */
+int vmci_ctx_set_chkpt_state(u32 context_id,
+ u32 cpt_type,
+ u32 buf_size,
+ void *cpt_buf)
+{
+ u32 i;
+ u32 current_id;
+ int result = VMCI_SUCCESS;
+ u32 num_ids = buf_size / sizeof(u32);
+
+ if (cpt_type == VMCI_WELLKNOWN_CPT_STATE && num_ids > 0) {
+ /*
+ * We would end up here if VMX with VM to VM communication
+ * attempts to restore a checkpoint with wellknown handles.
+ */
+ pr_warn("Attempt to restore checkpoint with obsolete wellknown handles\n");
+ return VMCI_ERROR_OBSOLETE;
+ }
+
+ if (cpt_type != VMCI_NOTIFICATION_CPT_STATE) {
+ pr_devel("Invalid cpt state (type=%d)\n", cpt_type);
+ return VMCI_ERROR_INVALID_ARGS;
+ }
+
+ for (i = 0; i < num_ids && result == VMCI_SUCCESS; i++) {
+ current_id = ((u32 *)cpt_buf)[i];
+ result = vmci_ctx_add_notification(context_id, current_id);
+ if (result != VMCI_SUCCESS)
+ break;
+ }
+ if (result != VMCI_SUCCESS)
+ pr_devel("Failed to set cpt state (type=%d) (error=%d)\n",
+ cpt_type, result);
+
+ return result;
+}
+
+/*
+ * Retrieves the specified context's pending notifications in the
+ * form of a handle array. The handle arrays returned are the
+ * actual data - not a copy and should not be modified by the
+ * caller. They must be released using
+ * vmci_ctx_rcv_notifications_release.
+ */
+int vmci_ctx_rcv_notifications_get(u32 context_id,
+ struct vmci_handle_arr **db_handle_array,
+ struct vmci_handle_arr **qp_handle_array)
+{
+ struct vmci_ctx *context;
+ int result = VMCI_SUCCESS;
+
+ context = vmci_ctx_get(context_id);
+ if (context == NULL)
+ return VMCI_ERROR_NOT_FOUND;
+
+ spin_lock(&context->lock);
+
+ *db_handle_array = context->pending_doorbell_array;
+ context->pending_doorbell_array = vmci_handle_arr_create(0);
+ if (!context->pending_doorbell_array) {
+ context->pending_doorbell_array = *db_handle_array;
+ *db_handle_array = NULL;
+ result = VMCI_ERROR_NO_MEM;
+ }
+ *qp_handle_array = NULL;
+
+ spin_unlock(&context->lock);
+ vmci_ctx_put(context);
+
+ return result;
+}
+
+/*
+ * Releases handle arrays with pending notifications previously
+ * retrieved using vmci_ctx_rcv_notifications_get. If the
+ * notifications were not successfully handed over to the guest,
+ * success must be false.
+ */
+void vmci_ctx_rcv_notifications_release(u32 context_id,
+ struct vmci_handle_arr *db_handle_array,
+ struct vmci_handle_arr *qp_handle_array,
+ bool success)
+{
+ struct vmci_ctx *context = vmci_ctx_get(context_id);
+
+ spin_lock(&context->lock);
+ if (!success) {
+ struct vmci_handle handle;
+
+ /*
+ * New notifications may have been added while we were not
+ * holding the context lock, so we transfer any new pending
+ * doorbell notifications to the old array, and reinstate the
+ * old array.
+ */
+
+ handle = vmci_handle_arr_remove_tail(
+ context->pending_doorbell_array);
+ while (!vmci_handle_is_invalid(handle)) {
+ if (!vmci_handle_arr_has_entry(db_handle_array,
+ handle)) {
+ vmci_handle_arr_append_entry(
+ &db_handle_array, handle);
+ }
+ handle = vmci_handle_arr_remove_tail(
+ context->pending_doorbell_array);
+ }
+ vmci_handle_arr_destroy(context->pending_doorbell_array);
+ context->pending_doorbell_array = db_handle_array;
+ db_handle_array = NULL;
+ } else {
+ ctx_clear_notify_call(context);
+ }
+ spin_unlock(&context->lock);
+ vmci_ctx_put(context);
+
+ if (db_handle_array)
+ vmci_handle_arr_destroy(db_handle_array);
+
+ if (qp_handle_array)
+ vmci_handle_arr_destroy(qp_handle_array);
+}
+
+/*
+ * Registers that a new doorbell handle has been allocated by the
+ * context. Only doorbell handles registered can be notified.
+ */
+int vmci_ctx_dbell_create(u32 context_id, struct vmci_handle handle)
+{
+ struct vmci_ctx *context;
+ int result;
+
+ if (context_id == VMCI_INVALID_ID || vmci_handle_is_invalid(handle))
+ return VMCI_ERROR_INVALID_ARGS;
+
+ context = vmci_ctx_get(context_id);
+ if (context == NULL)
+ return VMCI_ERROR_NOT_FOUND;
+
+ spin_lock(&context->lock);
+ if (!vmci_handle_arr_has_entry(context->doorbell_array, handle)) {
+ vmci_handle_arr_append_entry(&context->doorbell_array, handle);
+ result = VMCI_SUCCESS;
+ } else {
+ result = VMCI_ERROR_DUPLICATE_ENTRY;
+ }
+
+ spin_unlock(&context->lock);
+ vmci_ctx_put(context);
+
+ return result;
+}
+
+/*
+ * Unregisters a doorbell handle that was previously registered
+ * with vmci_ctx_dbell_create.
+ */
+int vmci_ctx_dbell_destroy(u32 context_id, struct vmci_handle handle)
+{
+ struct vmci_ctx *context;
+ struct vmci_handle removed_handle;
+
+ if (context_id == VMCI_INVALID_ID || vmci_handle_is_invalid(handle))
+ return VMCI_ERROR_INVALID_ARGS;
+
+ context = vmci_ctx_get(context_id);
+ if (context == NULL)
+ return VMCI_ERROR_NOT_FOUND;
+
+ spin_lock(&context->lock);
+ removed_handle =
+ vmci_handle_arr_remove_entry(context->doorbell_array, handle);
+ vmci_handle_arr_remove_entry(context->pending_doorbell_array, handle);
+ spin_unlock(&context->lock);
+
+ vmci_ctx_put(context);
+
+ return vmci_handle_is_invalid(removed_handle) ?
+ VMCI_ERROR_NOT_FOUND : VMCI_SUCCESS;
+}
+
+/*
+ * Unregisters all doorbell handles that were previously
+ * registered with vmci_ctx_dbell_create.
+ */
+int vmci_ctx_dbell_destroy_all(u32 context_id)
+{
+ struct vmci_ctx *context;
+ struct vmci_handle handle;
+
+ if (context_id == VMCI_INVALID_ID)
+ return VMCI_ERROR_INVALID_ARGS;
+
+ context = vmci_ctx_get(context_id);
+ if (context == NULL)
+ return VMCI_ERROR_NOT_FOUND;
+
+ spin_lock(&context->lock);
+ do {
+ struct vmci_handle_arr *arr = context->doorbell_array;
+ handle = vmci_handle_arr_remove_tail(arr);
+ } while (!vmci_handle_is_invalid(handle));
+ do {
+ struct vmci_handle_arr *arr = context->pending_doorbell_array;
+ handle = vmci_handle_arr_remove_tail(arr);
+ } while (!vmci_handle_is_invalid(handle));
+ spin_unlock(&context->lock);
+
+ vmci_ctx_put(context);
+
+ return VMCI_SUCCESS;
+}
+
+/*
+ * Registers a notification of a doorbell handle initiated by the
+ * specified source context. The notification of doorbells are
+ * subject to the same isolation rules as datagram delivery. To
+ * allow host side senders of notifications a finer granularity
+ * of sender rights than those assigned to the sending context
+ * itself, the host context is required to specify a different
+ * set of privilege flags that will override the privileges of
+ * the source context.
+ */
+int vmci_ctx_notify_dbell(u32 src_cid,
+ struct vmci_handle handle,
+ u32 src_priv_flags)
+{
+ struct vmci_ctx *dst_context;
+ int result;
+
+ if (vmci_handle_is_invalid(handle))
+ return VMCI_ERROR_INVALID_ARGS;
+
+ /* Get the target VM's VMCI context. */
+ dst_context = vmci_ctx_get(handle.context);
+ if (!dst_context) {
+ pr_devel("Invalid context (ID=0x%x)\n", handle.context);
+ return VMCI_ERROR_NOT_FOUND;
+ }
+
+ if (src_cid != handle.context) {
+ u32 dst_priv_flags;
+
+ if (VMCI_CONTEXT_IS_VM(src_cid) &&
+ VMCI_CONTEXT_IS_VM(handle.context)) {
+ pr_devel("Doorbell notification from VM to VM not supported (src=0x%x, dst=0x%x)\n",
+ src_cid, handle.context);
+ result = VMCI_ERROR_DST_UNREACHABLE;
+ goto out;
+ }
+
+ result = vmci_dbell_get_priv_flags(handle, &dst_priv_flags);
+ if (result < VMCI_SUCCESS) {
+ pr_warn("Failed to get privilege flags for destination (handle=0x%x:0x%x)\n",
+ handle.context, handle.resource);
+ goto out;
+ }
+
+ if (src_cid != VMCI_HOST_CONTEXT_ID ||
+ src_priv_flags == VMCI_NO_PRIVILEGE_FLAGS) {
+ src_priv_flags = vmci_context_get_priv_flags(src_cid);
+ }
+
+ if (vmci_deny_interaction(src_priv_flags, dst_priv_flags)) {
+ result = VMCI_ERROR_NO_ACCESS;
+ goto out;
+ }
+ }
+
+ if (handle.context == VMCI_HOST_CONTEXT_ID) {
+ result = vmci_dbell_host_context_notify(src_cid, handle);
+ } else {
+ spin_lock(&dst_context->lock);
+
+ if (!vmci_handle_arr_has_entry(dst_context->doorbell_array,
+ handle)) {
+ result = VMCI_ERROR_NOT_FOUND;
+ } else {
+ if (!vmci_handle_arr_has_entry(
+ dst_context->pending_doorbell_array,
+ handle)) {
+ vmci_handle_arr_append_entry(
+ &dst_context->pending_doorbell_array,
+ handle);
+
+ ctx_signal_notify(dst_context);
+ wake_up(&dst_context->host_context.wait_queue);
+
+ }
+ result = VMCI_SUCCESS;
+ }
+ spin_unlock(&dst_context->lock);
+ }
+
+ out:
+ vmci_ctx_put(dst_context);
+
+ return result;
+}
+
+bool vmci_ctx_supports_host_qp(struct vmci_ctx *context)
+{
+ return context && context->user_version >= VMCI_VERSION_HOSTQP;
+}
+
+/*
+ * Registers that a new queue pair handle has been allocated by
+ * the context.
+ */
+int vmci_ctx_qp_create(struct vmci_ctx *context, struct vmci_handle handle)
+{
+ int result;
+
+ if (context == NULL || vmci_handle_is_invalid(handle))
+ return VMCI_ERROR_INVALID_ARGS;
+
+ if (!vmci_handle_arr_has_entry(context->queue_pair_array, handle)) {
+ vmci_handle_arr_append_entry(&context->queue_pair_array,
+ handle);
+ result = VMCI_SUCCESS;
+ } else {
+ result = VMCI_ERROR_DUPLICATE_ENTRY;
+ }
+
+ return result;
+}
+
+/*
+ * Unregisters a queue pair handle that was previously registered
+ * with vmci_ctx_qp_create.
+ */
+int vmci_ctx_qp_destroy(struct vmci_ctx *context, struct vmci_handle handle)
+{
+ struct vmci_handle hndl;
+
+ if (context == NULL || vmci_handle_is_invalid(handle))
+ return VMCI_ERROR_INVALID_ARGS;
+
+ hndl = vmci_handle_arr_remove_entry(context->queue_pair_array, handle);
+
+ return vmci_handle_is_invalid(hndl) ?
+ VMCI_ERROR_NOT_FOUND : VMCI_SUCCESS;
+}
+
+/*
+ * Determines whether a given queue pair handle is registered
+ * with the given context.
+ */
+bool vmci_ctx_qp_exists(struct vmci_ctx *context, struct vmci_handle handle)
+{
+ if (context == NULL || vmci_handle_is_invalid(handle))
+ return false;
+
+ return vmci_handle_arr_has_entry(context->queue_pair_array, handle);
+}
+
+/*
+ * vmci_context_get_priv_flags() - Retrieve privilege flags.
+ * @context_id: The context ID of the VMCI context.
+ *
+ * Retrieves privilege flags of the given VMCI context ID.
+ */
+u32 vmci_context_get_priv_flags(u32 context_id)
+{
+ if (vmci_host_code_active()) {
+ u32 flags;
+ struct vmci_ctx *context;
+
+ context = vmci_ctx_get(context_id);
+ if (!context)
+ return VMCI_LEAST_PRIVILEGE_FLAGS;
+
+ flags = context->priv_flags;
+ vmci_ctx_put(context);
+ return flags;
+ }
+ return VMCI_NO_PRIVILEGE_FLAGS;
+}
+EXPORT_SYMBOL_GPL(vmci_context_get_priv_flags);
+
+/*
+ * vmci_is_context_owner() - Determimnes if user is the context owner
+ * @context_id: The context ID of the VMCI context.
+ * @uid: The host user id (real kernel value).
+ *
+ * Determines whether a given UID is the owner of given VMCI context.
+ */
+bool vmci_is_context_owner(u32 context_id, kuid_t uid)
+{
+ bool is_owner = false;
+
+ if (vmci_host_code_active()) {
+ struct vmci_ctx *context = vmci_ctx_get(context_id);
+ if (context) {
+ if (context->cred)
+ is_owner = uid_eq(context->cred->uid, uid);
+ vmci_ctx_put(context);
+ }
+ }
+
+ return is_owner;
+}
+EXPORT_SYMBOL_GPL(vmci_is_context_owner);
diff --git a/drivers/misc/vmw_vmci/vmci_context.h b/drivers/misc/vmw_vmci/vmci_context.h
new file mode 100644
index 000000000000..24a88e68a1e6
--- /dev/null
+++ b/drivers/misc/vmw_vmci/vmci_context.h
@@ -0,0 +1,182 @@
+/*
+ * VMware VMCI driver (vmciContext.h)
+ *
+ * Copyright (C) 2012 VMware, Inc. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation version 2 and no later version.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
+ * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * for more details.
+ */
+
+#ifndef _VMCI_CONTEXT_H_
+#define _VMCI_CONTEXT_H_
+
+#include <linux/vmw_vmci_defs.h>
+#include <linux/atomic.h>
+#include <linux/kref.h>
+#include <linux/types.h>
+#include <linux/wait.h>
+
+#include "vmci_handle_array.h"
+#include "vmci_datagram.h"
+
+/* Used to determine what checkpoint state to get and set. */
+enum {
+ VMCI_NOTIFICATION_CPT_STATE = 1,
+ VMCI_WELLKNOWN_CPT_STATE = 2,
+ VMCI_DG_OUT_STATE = 3,
+ VMCI_DG_IN_STATE = 4,
+ VMCI_DG_IN_SIZE_STATE = 5,
+ VMCI_DOORBELL_CPT_STATE = 6,
+};
+
+/* Host specific struct used for signalling */
+struct vmci_host {
+ wait_queue_head_t wait_queue;
+};
+
+struct vmci_handle_list {
+ struct list_head node;
+ struct vmci_handle handle;
+};
+
+struct vmci_ctx {
+ struct list_head list_item; /* For global VMCI list. */
+ u32 cid;
+ struct kref kref;
+ struct list_head datagram_queue; /* Head of per VM queue. */
+ u32 pending_datagrams;
+ size_t datagram_queue_size; /* Size of datagram queue in bytes. */
+
+ /*
+ * Version of the code that created
+ * this context; e.g., VMX.
+ */
+ int user_version;
+ spinlock_t lock; /* Locks callQueue and handle_arrays. */
+
+ /*
+ * queue_pairs attached to. The array of
+ * handles for queue pairs is accessed
+ * from the code for QP API, and there
+ * it is protected by the QP lock. It
+ * is also accessed from the context
+ * clean up path, which does not
+ * require a lock. VMCILock is not
+ * used to protect the QP array field.
+ */
+ struct vmci_handle_arr *queue_pair_array;
+
+ /* Doorbells created by context. */
+ struct vmci_handle_arr *doorbell_array;
+
+ /* Doorbells pending for context. */
+ struct vmci_handle_arr *pending_doorbell_array;
+
+ /* Contexts current context is subscribing to. */
+ struct list_head notifier_list;
+ unsigned int n_notifiers;
+
+ struct vmci_host host_context;
+ u32 priv_flags;
+
+ const struct cred *cred;
+ bool *notify; /* Notify flag pointer - hosted only. */
+ struct page *notify_page; /* Page backing the notify UVA. */
+};
+
+/* VMCINotifyAddRemoveInfo: Used to add/remove remote context notifications. */
+struct vmci_ctx_info {
+ u32 remote_cid;
+ int result;
+};
+
+/* VMCICptBufInfo: Used to set/get current context's checkpoint state. */
+struct vmci_ctx_chkpt_buf_info {
+ u64 cpt_buf;
+ u32 cpt_type;
+ u32 buf_size;
+ s32 result;
+ u32 _pad;
+};
+
+/*
+ * VMCINotificationReceiveInfo: Used to recieve pending notifications
+ * for doorbells and queue pairs.
+ */
+struct vmci_ctx_notify_recv_info {
+ u64 db_handle_buf_uva;
+ u64 db_handle_buf_size;
+ u64 qp_handle_buf_uva;
+ u64 qp_handle_buf_size;
+ s32 result;
+ u32 _pad;
+};
+
+/*
+ * Utilility function that checks whether two entities are allowed
+ * to interact. If one of them is restricted, the other one must
+ * be trusted.
+ */
+static inline bool vmci_deny_interaction(u32 part_one, u32 part_two)
+{
+ return ((part_one & VMCI_PRIVILEGE_FLAG_RESTRICTED) &&
+ !(part_two & VMCI_PRIVILEGE_FLAG_TRUSTED)) ||
+ ((part_two & VMCI_PRIVILEGE_FLAG_RESTRICTED) &&
+ !(part_one & VMCI_PRIVILEGE_FLAG_TRUSTED));
+}
+
+struct vmci_ctx *vmci_ctx_create(u32 cid, u32 flags,
+ uintptr_t event_hnd, int version,
+ const struct cred *cred);
+void vmci_ctx_destroy(struct vmci_ctx *context);
+
+bool vmci_ctx_supports_host_qp(struct vmci_ctx *context);
+int vmci_ctx_enqueue_datagram(u32 cid, struct vmci_datagram *dg);
+int vmci_ctx_dequeue_datagram(struct vmci_ctx *context,
+ size_t *max_size, struct vmci_datagram **dg);
+int vmci_ctx_pending_datagrams(u32 cid, u32 *pending);
+struct vmci_ctx *vmci_ctx_get(u32 cid);
+void vmci_ctx_put(struct vmci_ctx *context);
+bool vmci_ctx_exists(u32 cid);
+
+int vmci_ctx_add_notification(u32 context_id, u32 remote_cid);
+int vmci_ctx_remove_notification(u32 context_id, u32 remote_cid);
+int vmci_ctx_get_chkpt_state(u32 context_id, u32 cpt_type,
+ u32 *num_cids, void **cpt_buf_ptr);
+int vmci_ctx_set_chkpt_state(u32 context_id, u32 cpt_type,
+ u32 num_cids, void *cpt_buf);
+
+int vmci_ctx_qp_create(struct vmci_ctx *context, struct vmci_handle handle);
+int vmci_ctx_qp_destroy(struct vmci_ctx *context, struct vmci_handle handle);
+bool vmci_ctx_qp_exists(struct vmci_ctx *context, struct vmci_handle handle);
+
+void vmci_ctx_check_signal_notify(struct vmci_ctx *context);
+void vmci_ctx_unset_notify(struct vmci_ctx *context);
+
+int vmci_ctx_dbell_create(u32 context_id, struct vmci_handle handle);
+int vmci_ctx_dbell_destroy(u32 context_id, struct vmci_handle handle);
+int vmci_ctx_dbell_destroy_all(u32 context_id);
+int vmci_ctx_notify_dbell(u32 cid, struct vmci_handle handle,
+ u32 src_priv_flags);
+
+int vmci_ctx_rcv_notifications_get(u32 context_id, struct vmci_handle_arr
+ **db_handle_array, struct vmci_handle_arr
+ **qp_handle_array);
+void vmci_ctx_rcv_notifications_release(u32 context_id, struct vmci_handle_arr
+ *db_handle_array, struct vmci_handle_arr
+ *qp_handle_array, bool success);
+
+static inline u32 vmci_ctx_get_id(struct vmci_ctx *context)
+{
+ if (!context)
+ return VMCI_INVALID_ID;
+ return context->cid;
+}
+
+#endif /* _VMCI_CONTEXT_H_ */
diff --git a/drivers/misc/vmw_vmci/vmci_datagram.c b/drivers/misc/vmw_vmci/vmci_datagram.c
new file mode 100644
index 000000000000..ed5c433cd493
--- /dev/null
+++ b/drivers/misc/vmw_vmci/vmci_datagram.c
@@ -0,0 +1,500 @@
+/*
+ * VMware VMCI Driver
+ *
+ * Copyright (C) 2012 VMware, Inc. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation version 2 and no later version.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
+ * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * for more details.
+ */
+
+#include <linux/vmw_vmci_defs.h>
+#include <linux/vmw_vmci_api.h>
+#include <linux/module.h>
+#include <linux/sched.h>
+#include <linux/slab.h>
+#include <linux/bug.h>
+
+#include "vmci_datagram.h"
+#include "vmci_resource.h"
+#include "vmci_context.h"
+#include "vmci_driver.h"
+#include "vmci_event.h"
+#include "vmci_route.h"
+
+/*
+ * struct datagram_entry describes the datagram entity. It is used for datagram
+ * entities created only on the host.
+ */
+struct datagram_entry {
+ struct vmci_resource resource;
+ u32 flags;
+ bool run_delayed;
+ vmci_datagram_recv_cb recv_cb;
+ void *client_data;
+ u32 priv_flags;
+};
+
+struct delayed_datagram_info {
+ struct datagram_entry *entry;
+ struct vmci_datagram msg;
+ struct work_struct work;
+ bool in_dg_host_queue;
+};
+
+/* Number of in-flight host->host datagrams */
+static atomic_t delayed_dg_host_queue_size = ATOMIC_INIT(0);
+
+/*
+ * Create a datagram entry given a handle pointer.
+ */
+static int dg_create_handle(u32 resource_id,
+ u32 flags,
+ u32 priv_flags,
+ vmci_datagram_recv_cb recv_cb,
+ void *client_data, struct vmci_handle *out_handle)
+{
+ int result;
+ u32 context_id;
+ struct vmci_handle handle;
+ struct datagram_entry *entry;
+
+ if ((flags & VMCI_FLAG_WELLKNOWN_DG_HND) != 0)
+ return VMCI_ERROR_INVALID_ARGS;
+
+ if ((flags & VMCI_FLAG_ANYCID_DG_HND) != 0) {
+ context_id = VMCI_INVALID_ID;
+ } else {
+ context_id = vmci_get_context_id();
+ if (context_id == VMCI_INVALID_ID)
+ return VMCI_ERROR_NO_RESOURCES;
+ }
+
+ handle = vmci_make_handle(context_id, resource_id);
+
+ entry = kmalloc(sizeof(*entry), GFP_KERNEL);
+ if (!entry) {
+ pr_warn("Failed allocating memory for datagram entry\n");
+ return VMCI_ERROR_NO_MEM;
+ }
+
+ entry->run_delayed = (flags & VMCI_FLAG_DG_DELAYED_CB) ? true : false;
+ entry->flags = flags;
+ entry->recv_cb = recv_cb;
+ entry->client_data = client_data;
+ entry->priv_flags = priv_flags;
+
+ /* Make datagram resource live. */
+ result = vmci_resource_add(&entry->resource,
+ VMCI_RESOURCE_TYPE_DATAGRAM,
+ handle);
+ if (result != VMCI_SUCCESS) {
+ pr_warn("Failed to add new resource (handle=0x%x:0x%x), error: %d\n",
+ handle.context, handle.resource, result);
+ kfree(entry);
+ return result;
+ }
+
+ *out_handle = vmci_resource_handle(&entry->resource);
+ return VMCI_SUCCESS;
+}
+
+/*
+ * Internal utility function with the same purpose as
+ * vmci_datagram_get_priv_flags that also takes a context_id.
+ */
+static int vmci_datagram_get_priv_flags(u32 context_id,
+ struct vmci_handle handle,
+ u32 *priv_flags)
+{
+ if (context_id == VMCI_INVALID_ID)
+ return VMCI_ERROR_INVALID_ARGS;
+
+ if (context_id == VMCI_HOST_CONTEXT_ID) {
+ struct datagram_entry *src_entry;
+ struct vmci_resource *resource;
+
+ resource = vmci_resource_by_handle(handle,
+ VMCI_RESOURCE_TYPE_DATAGRAM);
+ if (!resource)
+ return VMCI_ERROR_INVALID_ARGS;
+
+ src_entry = container_of(resource, struct datagram_entry,
+ resource);
+ *priv_flags = src_entry->priv_flags;
+ vmci_resource_put(resource);
+ } else if (context_id == VMCI_HYPERVISOR_CONTEXT_ID)
+ *priv_flags = VMCI_MAX_PRIVILEGE_FLAGS;
+ else
+ *priv_flags = vmci_context_get_priv_flags(context_id);
+
+ return VMCI_SUCCESS;
+}
+
+/*
+ * Calls the specified callback in a delayed context.
+ */
+static void dg_delayed_dispatch(struct work_struct *work)
+{
+ struct delayed_datagram_info *dg_info =
+ container_of(work, struct delayed_datagram_info, work);
+
+ dg_info->entry->recv_cb(dg_info->entry->client_data, &dg_info->msg);
+
+ vmci_resource_put(&dg_info->entry->resource);
+
+ if (dg_info->in_dg_host_queue)
+ atomic_dec(&delayed_dg_host_queue_size);
+
+ kfree(dg_info);
+}
+
+/*
+ * Dispatch datagram as a host, to the host, or other vm context. This
+ * function cannot dispatch to hypervisor context handlers. This should
+ * have been handled before we get here by vmci_datagram_dispatch.
+ * Returns number of bytes sent on success, error code otherwise.
+ */
+static int dg_dispatch_as_host(u32 context_id, struct vmci_datagram *dg)
+{
+ int retval;
+ size_t dg_size;
+ u32 src_priv_flags;
+
+ dg_size = VMCI_DG_SIZE(dg);
+
+ /* Host cannot send to the hypervisor. */
+ if (dg->dst.context == VMCI_HYPERVISOR_CONTEXT_ID)
+ return VMCI_ERROR_DST_UNREACHABLE;
+
+ /* Check that source handle matches sending context. */
+ if (dg->src.context != context_id) {
+ pr_devel("Sender context (ID=0x%x) is not owner of src datagram entry (handle=0x%x:0x%x)\n",
+ context_id, dg->src.context, dg->src.resource);
+ return VMCI_ERROR_NO_ACCESS;
+ }
+
+ /* Get hold of privileges of sending endpoint. */
+ retval = vmci_datagram_get_priv_flags(context_id, dg->src,
+ &src_priv_flags);
+ if (retval != VMCI_SUCCESS) {
+ pr_warn("Couldn't get privileges (handle=0x%x:0x%x)\n",
+ dg->src.context, dg->src.resource);
+ return retval;
+ }
+
+ /* Determine if we should route to host or guest destination. */
+ if (dg->dst.context == VMCI_HOST_CONTEXT_ID) {
+ /* Route to host datagram entry. */
+ struct datagram_entry *dst_entry;
+ struct vmci_resource *resource;
+
+ if (dg->src.context == VMCI_HYPERVISOR_CONTEXT_ID &&
+ dg->dst.resource == VMCI_EVENT_HANDLER) {
+ return vmci_event_dispatch(dg);
+ }
+
+ resource = vmci_resource_by_handle(dg->dst,
+ VMCI_RESOURCE_TYPE_DATAGRAM);
+ if (!resource) {
+ pr_devel("Sending to invalid destination (handle=0x%x:0x%x)\n",
+ dg->dst.context, dg->dst.resource);
+ return VMCI_ERROR_INVALID_RESOURCE;
+ }
+ dst_entry = container_of(resource, struct datagram_entry,
+ resource);
+ if (vmci_deny_interaction(src_priv_flags,
+ dst_entry->priv_flags)) {
+ vmci_resource_put(resource);
+ return VMCI_ERROR_NO_ACCESS;
+ }
+
+ /*
+ * If a VMCI datagram destined for the host is also sent by the
+ * host, we always run it delayed. This ensures that no locks
+ * are held when the datagram callback runs.
+ */
+ if (dst_entry->run_delayed ||
+ dg->src.context == VMCI_HOST_CONTEXT_ID) {
+ struct delayed_datagram_info *dg_info;
+
+ if (atomic_add_return(1, &delayed_dg_host_queue_size)
+ == VMCI_MAX_DELAYED_DG_HOST_QUEUE_SIZE) {
+ atomic_dec(&delayed_dg_host_queue_size);
+ vmci_resource_put(resource);
+ return VMCI_ERROR_NO_MEM;
+ }
+
+ dg_info = kmalloc(sizeof(*dg_info) +
+ (size_t) dg->payload_size, GFP_ATOMIC);
+ if (!dg_info) {
+ atomic_dec(&delayed_dg_host_queue_size);
+ vmci_resource_put(resource);
+ return VMCI_ERROR_NO_MEM;
+ }
+
+ dg_info->in_dg_host_queue = true;
+ dg_info->entry = dst_entry;
+ memcpy(&dg_info->msg, dg, dg_size);
+
+ INIT_WORK(&dg_info->work, dg_delayed_dispatch);
+ schedule_work(&dg_info->work);
+ retval = VMCI_SUCCESS;
+
+ } else {
+ retval = dst_entry->recv_cb(dst_entry->client_data, dg);
+ vmci_resource_put(resource);
+ if (retval < VMCI_SUCCESS)
+ return retval;
+ }
+ } else {
+ /* Route to destination VM context. */
+ struct vmci_datagram *new_dg;
+
+ if (context_id != dg->dst.context) {
+ if (vmci_deny_interaction(src_priv_flags,
+ vmci_context_get_priv_flags
+ (dg->dst.context))) {
+ return VMCI_ERROR_NO_ACCESS;
+ } else if (VMCI_CONTEXT_IS_VM(context_id)) {
+ /*
+ * If the sending context is a VM, it
+ * cannot reach another VM.
+ */
+
+ pr_devel("Datagram communication between VMs not supported (src=0x%x, dst=0x%x)\n",
+ context_id, dg->dst.context);
+ return VMCI_ERROR_DST_UNREACHABLE;
+ }
+ }
+
+ /* We make a copy to enqueue. */
+ new_dg = kmalloc(dg_size, GFP_KERNEL);
+ if (new_dg == NULL)
+ return VMCI_ERROR_NO_MEM;
+
+ memcpy(new_dg, dg, dg_size);
+ retval = vmci_ctx_enqueue_datagram(dg->dst.context, new_dg);
+ if (retval < VMCI_SUCCESS) {
+ kfree(new_dg);
+ return retval;
+ }
+ }
+
+ /*
+ * We currently truncate the size to signed 32 bits. This doesn't
+ * matter for this handler as it only support 4Kb messages.
+ */
+ return (int)dg_size;
+}
+
+/*
+ * Dispatch datagram as a guest, down through the VMX and potentially to
+ * the host.
+ * Returns number of bytes sent on success, error code otherwise.
+ */
+static int dg_dispatch_as_guest(struct vmci_datagram *dg)
+{
+ int retval;
+ struct vmci_resource *resource;
+
+ resource = vmci_resource_by_handle(dg->src,
+ VMCI_RESOURCE_TYPE_DATAGRAM);
+ if (!resource)
+ return VMCI_ERROR_NO_HANDLE;
+
+ retval = vmci_send_datagram(dg);
+ vmci_resource_put(resource);
+ return retval;
+}
+
+/*
+ * Dispatch datagram. This will determine the routing for the datagram
+ * and dispatch it accordingly.
+ * Returns number of bytes sent on success, error code otherwise.
+ */
+int vmci_datagram_dispatch(u32 context_id,
+ struct vmci_datagram *dg, bool from_guest)
+{
+ int retval;
+ enum vmci_route route;
+
+ BUILD_BUG_ON(sizeof(struct vmci_datagram) != 24);
+
+ if (VMCI_DG_SIZE(dg) > VMCI_MAX_DG_SIZE) {
+ pr_devel("Payload (size=%llu bytes) too big to send\n",
+ (unsigned long long)dg->payload_size);
+ return VMCI_ERROR_INVALID_ARGS;
+ }
+
+ retval = vmci_route(&dg->src, &dg->dst, from_guest, &route);
+ if (retval < VMCI_SUCCESS) {
+ pr_devel("Failed to route datagram (src=0x%x, dst=0x%x, err=%d)\n",
+ dg->src.context, dg->dst.context, retval);
+ return retval;
+ }
+
+ if (VMCI_ROUTE_AS_HOST == route) {
+ if (VMCI_INVALID_ID == context_id)
+ context_id = VMCI_HOST_CONTEXT_ID;
+ return dg_dispatch_as_host(context_id, dg);
+ }
+
+ if (VMCI_ROUTE_AS_GUEST == route)
+ return dg_dispatch_as_guest(dg);
+
+ pr_warn("Unknown route (%d) for datagram\n", route);
+ return VMCI_ERROR_DST_UNREACHABLE;
+}
+
+/*
+ * Invoke the handler for the given datagram. This is intended to be
+ * called only when acting as a guest and receiving a datagram from the
+ * virtual device.
+ */
+int vmci_datagram_invoke_guest_handler(struct vmci_datagram *dg)
+{
+ struct vmci_resource *resource;
+ struct datagram_entry *dst_entry;
+
+ resource = vmci_resource_by_handle(dg->dst,
+ VMCI_RESOURCE_TYPE_DATAGRAM);
+ if (!resource) {
+ pr_devel("destination (handle=0x%x:0x%x) doesn't exist\n",
+ dg->dst.context, dg->dst.resource);
+ return VMCI_ERROR_NO_HANDLE;
+ }
+
+ dst_entry = container_of(resource, struct datagram_entry, resource);
+ if (dst_entry->run_delayed) {
+ struct delayed_datagram_info *dg_info;
+
+ dg_info = kmalloc(sizeof(*dg_info) + (size_t)dg->payload_size,
+ GFP_ATOMIC);
+ if (!dg_info) {
+ vmci_resource_put(resource);
+ return VMCI_ERROR_NO_MEM;
+ }
+
+ dg_info->in_dg_host_queue = false;
+ dg_info->entry = dst_entry;
+ memcpy(&dg_info->msg, dg, VMCI_DG_SIZE(dg));
+
+ INIT_WORK(&dg_info->work, dg_delayed_dispatch);
+ schedule_work(&dg_info->work);
+ } else {
+ dst_entry->recv_cb(dst_entry->client_data, dg);
+ vmci_resource_put(resource);
+ }
+
+ return VMCI_SUCCESS;
+}
+
+/*
+ * vmci_datagram_create_handle_priv() - Create host context datagram endpoint
+ * @resource_id: The resource ID.
+ * @flags: Datagram Flags.
+ * @priv_flags: Privilege Flags.
+ * @recv_cb: Callback when receiving datagrams.
+ * @client_data: Pointer for a datagram_entry struct
+ * @out_handle: vmci_handle that is populated as a result of this function.
+ *
+ * Creates a host context datagram endpoint and returns a handle to it.
+ */
+int vmci_datagram_create_handle_priv(u32 resource_id,
+ u32 flags,
+ u32 priv_flags,
+ vmci_datagram_recv_cb recv_cb,
+ void *client_data,
+ struct vmci_handle *out_handle)
+{
+ if (out_handle == NULL)
+ return VMCI_ERROR_INVALID_ARGS;
+
+ if (recv_cb == NULL) {
+ pr_devel("Client callback needed when creating datagram\n");
+ return VMCI_ERROR_INVALID_ARGS;
+ }
+
+ if (priv_flags & ~VMCI_PRIVILEGE_ALL_FLAGS)
+ return VMCI_ERROR_INVALID_ARGS;
+
+ return dg_create_handle(resource_id, flags, priv_flags, recv_cb,
+ client_data, out_handle);
+}
+EXPORT_SYMBOL_GPL(vmci_datagram_create_handle_priv);
+
+/*
+ * vmci_datagram_create_handle() - Create host context datagram endpoint
+ * @resource_id: Resource ID.
+ * @flags: Datagram Flags.
+ * @recv_cb: Callback when receiving datagrams.
+ * @client_ata: Pointer for a datagram_entry struct
+ * @out_handle: vmci_handle that is populated as a result of this function.
+ *
+ * Creates a host context datagram endpoint and returns a handle to
+ * it. Same as vmci_datagram_create_handle_priv without the priviledge
+ * flags argument.
+ */
+int vmci_datagram_create_handle(u32 resource_id,
+ u32 flags,
+ vmci_datagram_recv_cb recv_cb,
+ void *client_data,
+ struct vmci_handle *out_handle)
+{
+ return vmci_datagram_create_handle_priv(
+ resource_id, flags,
+ VMCI_DEFAULT_PROC_PRIVILEGE_FLAGS,
+ recv_cb, client_data,
+ out_handle);
+}
+EXPORT_SYMBOL_GPL(vmci_datagram_create_handle);
+
+/*
+ * vmci_datagram_destroy_handle() - Destroys datagram handle
+ * @handle: vmci_handle to be destroyed and reaped.
+ *
+ * Use this function to destroy any datagram handles created by
+ * vmci_datagram_create_handle{,Priv} functions.
+ */
+int vmci_datagram_destroy_handle(struct vmci_handle handle)
+{
+ struct datagram_entry *entry;
+ struct vmci_resource *resource;
+
+ resource = vmci_resource_by_handle(handle, VMCI_RESOURCE_TYPE_DATAGRAM);
+ if (!resource) {
+ pr_devel("Failed to destroy datagram (handle=0x%x:0x%x)\n",
+ handle.context, handle.resource);
+ return VMCI_ERROR_NOT_FOUND;
+ }
+
+ entry = container_of(resource, struct datagram_entry, resource);
+
+ vmci_resource_put(&entry->resource);
+ vmci_resource_remove(&entry->resource);
+ kfree(entry);
+
+ return VMCI_SUCCESS;
+}
+EXPORT_SYMBOL_GPL(vmci_datagram_destroy_handle);
+
+/*
+ * vmci_datagram_send() - Send a datagram
+ * @msg: The datagram to send.
+ *
+ * Sends the provided datagram on its merry way.
+ */
+int vmci_datagram_send(struct vmci_datagram *msg)
+{
+ if (msg == NULL)
+ return VMCI_ERROR_INVALID_ARGS;
+
+ return vmci_datagram_dispatch(VMCI_INVALID_ID, msg, false);
+}
+EXPORT_SYMBOL_GPL(vmci_datagram_send);
diff --git a/drivers/misc/vmw_vmci/vmci_datagram.h b/drivers/misc/vmw_vmci/vmci_datagram.h
new file mode 100644
index 000000000000..eb4aab7f64ec
--- /dev/null
+++ b/drivers/misc/vmw_vmci/vmci_datagram.h
@@ -0,0 +1,52 @@
+/*
+ * VMware VMCI Driver
+ *
+ * Copyright (C) 2012 VMware, Inc. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation version 2 and no later version.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
+ * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * for more details.
+ */
+
+#ifndef _VMCI_DATAGRAM_H_
+#define _VMCI_DATAGRAM_H_
+
+#include <linux/types.h>
+#include <linux/list.h>
+
+#include "vmci_context.h"
+
+#define VMCI_MAX_DELAYED_DG_HOST_QUEUE_SIZE 256
+
+/*
+ * The struct vmci_datagram_queue_entry is a queue header for the in-kernel VMCI
+ * datagram queues. It is allocated in non-paged memory, as the
+ * content is accessed while holding a spinlock. The pending datagram
+ * itself may be allocated from paged memory. We shadow the size of
+ * the datagram in the non-paged queue entry as this size is used
+ * while holding the same spinlock as above.
+ */
+struct vmci_datagram_queue_entry {
+ struct list_head list_item; /* For queuing. */
+ size_t dg_size; /* Size of datagram. */
+ struct vmci_datagram *dg; /* Pending datagram. */
+};
+
+/* VMCIDatagramSendRecvInfo */
+struct vmci_datagram_snd_rcv_info {
+ u64 addr;
+ u32 len;
+ s32 result;
+};
+
+/* Datagram API for non-public use. */
+int vmci_datagram_dispatch(u32 context_id, struct vmci_datagram *dg,
+ bool from_guest);
+int vmci_datagram_invoke_guest_handler(struct vmci_datagram *dg);
+
+#endif /* _VMCI_DATAGRAM_H_ */
diff --git a/drivers/misc/vmw_vmci/vmci_doorbell.c b/drivers/misc/vmw_vmci/vmci_doorbell.c
new file mode 100644
index 000000000000..a8cee33ae8d2
--- /dev/null
+++ b/drivers/misc/vmw_vmci/vmci_doorbell.c
@@ -0,0 +1,601 @@
+/*
+ * VMware VMCI Driver
+ *
+ * Copyright (C) 2012 VMware, Inc. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation version 2 and no later version.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
+ * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * for more details.
+ */
+
+#include <linux/vmw_vmci_defs.h>
+#include <linux/vmw_vmci_api.h>
+#include <linux/completion.h>
+#include <linux/hash.h>
+#include <linux/kernel.h>
+#include <linux/list.h>
+#include <linux/module.h>
+#include <linux/sched.h>
+#include <linux/slab.h>
+
+#include "vmci_datagram.h"
+#include "vmci_doorbell.h"
+#include "vmci_resource.h"
+#include "vmci_driver.h"
+#include "vmci_route.h"
+
+
+#define VMCI_DOORBELL_INDEX_BITS 6
+#define VMCI_DOORBELL_INDEX_TABLE_SIZE (1 << VMCI_DOORBELL_INDEX_BITS)
+#define VMCI_DOORBELL_HASH(_idx) hash_32(_idx, VMCI_DOORBELL_INDEX_BITS)
+
+/*
+ * DoorbellEntry describes the a doorbell notification handle allocated by the
+ * host.
+ */
+struct dbell_entry {
+ struct vmci_resource resource;
+ struct hlist_node node;
+ struct work_struct work;
+ vmci_callback notify_cb;
+ void *client_data;
+ u32 idx;
+ u32 priv_flags;
+ bool run_delayed;
+ atomic_t active; /* Only used by guest personality */
+};
+
+/* The VMCI index table keeps track of currently registered doorbells. */
+struct dbell_index_table {
+ spinlock_t lock; /* Index table lock */
+ struct hlist_head entries[VMCI_DOORBELL_INDEX_TABLE_SIZE];
+};
+
+static struct dbell_index_table vmci_doorbell_it = {
+ .lock = __SPIN_LOCK_UNLOCKED(vmci_doorbell_it.lock),
+};
+
+/*
+ * The max_notify_idx is one larger than the currently known bitmap index in
+ * use, and is used to determine how much of the bitmap needs to be scanned.
+ */
+static u32 max_notify_idx;
+
+/*
+ * The notify_idx_count is used for determining whether there are free entries
+ * within the bitmap (if notify_idx_count + 1 < max_notify_idx).
+ */
+static u32 notify_idx_count;
+
+/*
+ * The last_notify_idx_reserved is used to track the last index handed out - in
+ * the case where multiple handles share a notification index, we hand out
+ * indexes round robin based on last_notify_idx_reserved.
+ */
+static u32 last_notify_idx_reserved;
+
+/* This is a one entry cache used to by the index allocation. */
+static u32 last_notify_idx_released = PAGE_SIZE;
+
+
+/*
+ * Utility function that retrieves the privilege flags associated
+ * with a given doorbell handle. For guest endpoints, the
+ * privileges are determined by the context ID, but for host
+ * endpoints privileges are associated with the complete
+ * handle. Hypervisor endpoints are not yet supported.
+ */
+int vmci_dbell_get_priv_flags(struct vmci_handle handle, u32 *priv_flags)
+{
+ if (priv_flags == NULL || handle.context == VMCI_INVALID_ID)
+ return VMCI_ERROR_INVALID_ARGS;
+
+ if (handle.context == VMCI_HOST_CONTEXT_ID) {
+ struct dbell_entry *entry;
+ struct vmci_resource *resource;
+
+ resource = vmci_resource_by_handle(handle,
+ VMCI_RESOURCE_TYPE_DOORBELL);
+ if (!resource)
+ return VMCI_ERROR_NOT_FOUND;
+
+ entry = container_of(resource, struct dbell_entry, resource);
+ *priv_flags = entry->priv_flags;
+ vmci_resource_put(resource);
+ } else if (handle.context == VMCI_HYPERVISOR_CONTEXT_ID) {
+ /*
+ * Hypervisor endpoints for notifications are not
+ * supported (yet).
+ */
+ return VMCI_ERROR_INVALID_ARGS;
+ } else {
+ *priv_flags = vmci_context_get_priv_flags(handle.context);
+ }
+
+ return VMCI_SUCCESS;
+}
+
+/*
+ * Find doorbell entry by bitmap index.
+ */
+static struct dbell_entry *dbell_index_table_find(u32 idx)
+{
+ u32 bucket = VMCI_DOORBELL_HASH(idx);
+ struct dbell_entry *dbell;
+
+ hlist_for_each_entry(dbell, &vmci_doorbell_it.entries[bucket],
+ node) {
+ if (idx == dbell->idx)
+ return dbell;
+ }
+
+ return NULL;
+}
+
+/*
+ * Add the given entry to the index table. This willi take a reference to the
+ * entry's resource so that the entry is not deleted before it is removed from
+ * the * table.
+ */
+static void dbell_index_table_add(struct dbell_entry *entry)
+{
+ u32 bucket;
+ u32 new_notify_idx;
+
+ vmci_resource_get(&entry->resource);
+
+ spin_lock_bh(&vmci_doorbell_it.lock);
+
+ /*
+ * Below we try to allocate an index in the notification
+ * bitmap with "not too much" sharing between resources. If we
+ * use less that the full bitmap, we either add to the end if
+ * there are no unused flags within the currently used area,
+ * or we search for unused ones. If we use the full bitmap, we
+ * allocate the index round robin.
+ */
+ if (max_notify_idx < PAGE_SIZE || notify_idx_count < PAGE_SIZE) {
+ if (last_notify_idx_released < max_notify_idx &&
+ !dbell_index_table_find(last_notify_idx_released)) {
+ new_notify_idx = last_notify_idx_released;
+ last_notify_idx_released = PAGE_SIZE;
+ } else {
+ bool reused = false;
+ new_notify_idx = last_notify_idx_reserved;
+ if (notify_idx_count + 1 < max_notify_idx) {
+ do {
+ if (!dbell_index_table_find
+ (new_notify_idx)) {
+ reused = true;
+ break;
+ }
+ new_notify_idx = (new_notify_idx + 1) %
+ max_notify_idx;
+ } while (new_notify_idx !=
+ last_notify_idx_released);
+ }
+ if (!reused) {
+ new_notify_idx = max_notify_idx;
+ max_notify_idx++;
+ }
+ }
+ } else {
+ new_notify_idx = (last_notify_idx_reserved + 1) % PAGE_SIZE;
+ }
+
+ last_notify_idx_reserved = new_notify_idx;
+ notify_idx_count++;
+
+ entry->idx = new_notify_idx;
+ bucket = VMCI_DOORBELL_HASH(entry->idx);
+ hlist_add_head(&entry->node, &vmci_doorbell_it.entries[bucket]);
+
+ spin_unlock_bh(&vmci_doorbell_it.lock);
+}
+
+/*
+ * Remove the given entry from the index table. This will release() the
+ * entry's resource.
+ */
+static void dbell_index_table_remove(struct dbell_entry *entry)
+{
+ spin_lock_bh(&vmci_doorbell_it.lock);
+
+ hlist_del_init(&entry->node);
+
+ notify_idx_count--;
+ if (entry->idx == max_notify_idx - 1) {
+ /*
+ * If we delete an entry with the maximum known
+ * notification index, we take the opportunity to
+ * prune the current max. As there might be other
+ * unused indices immediately below, we lower the
+ * maximum until we hit an index in use.
+ */
+ while (max_notify_idx > 0 &&
+ !dbell_index_table_find(max_notify_idx - 1))
+ max_notify_idx--;
+ }
+
+ last_notify_idx_released = entry->idx;
+
+ spin_unlock_bh(&vmci_doorbell_it.lock);
+
+ vmci_resource_put(&entry->resource);
+}
+
+/*
+ * Creates a link between the given doorbell handle and the given
+ * index in the bitmap in the device backend. A notification state
+ * is created in hypervisor.
+ */
+static int dbell_link(struct vmci_handle handle, u32 notify_idx)
+{
+ struct vmci_doorbell_link_msg link_msg;
+
+ link_msg.hdr.dst = vmci_make_handle(VMCI_HYPERVISOR_CONTEXT_ID,
+ VMCI_DOORBELL_LINK);
+ link_msg.hdr.src = VMCI_ANON_SRC_HANDLE;
+ link_msg.hdr.payload_size = sizeof(link_msg) - VMCI_DG_HEADERSIZE;
+ link_msg.handle = handle;
+ link_msg.notify_idx = notify_idx;
+
+ return vmci_send_datagram(&link_msg.hdr);
+}
+
+/*
+ * Unlinks the given doorbell handle from an index in the bitmap in
+ * the device backend. The notification state is destroyed in hypervisor.
+ */
+static int dbell_unlink(struct vmci_handle handle)
+{
+ struct vmci_doorbell_unlink_msg unlink_msg;
+
+ unlink_msg.hdr.dst = vmci_make_handle(VMCI_HYPERVISOR_CONTEXT_ID,
+ VMCI_DOORBELL_UNLINK);
+ unlink_msg.hdr.src = VMCI_ANON_SRC_HANDLE;
+ unlink_msg.hdr.payload_size = sizeof(unlink_msg) - VMCI_DG_HEADERSIZE;
+ unlink_msg.handle = handle;
+
+ return vmci_send_datagram(&unlink_msg.hdr);
+}
+
+/*
+ * Notify another guest or the host. We send a datagram down to the
+ * host via the hypervisor with the notification info.
+ */
+static int dbell_notify_as_guest(struct vmci_handle handle, u32 priv_flags)
+{
+ struct vmci_doorbell_notify_msg notify_msg;
+
+ notify_msg.hdr.dst = vmci_make_handle(VMCI_HYPERVISOR_CONTEXT_ID,
+ VMCI_DOORBELL_NOTIFY);
+ notify_msg.hdr.src = VMCI_ANON_SRC_HANDLE;
+ notify_msg.hdr.payload_size = sizeof(notify_msg) - VMCI_DG_HEADERSIZE;
+ notify_msg.handle = handle;
+
+ return vmci_send_datagram(&notify_msg.hdr);
+}
+
+/*
+ * Calls the specified callback in a delayed context.
+ */
+static void dbell_delayed_dispatch(struct work_struct *work)
+{
+ struct dbell_entry *entry = container_of(work,
+ struct dbell_entry, work);
+
+ entry->notify_cb(entry->client_data);
+ vmci_resource_put(&entry->resource);
+}
+
+/*
+ * Dispatches a doorbell notification to the host context.
+ */
+int vmci_dbell_host_context_notify(u32 src_cid, struct vmci_handle handle)
+{
+ struct dbell_entry *entry;
+ struct vmci_resource *resource;
+
+ if (vmci_handle_is_invalid(handle)) {
+ pr_devel("Notifying an invalid doorbell (handle=0x%x:0x%x)\n",
+ handle.context, handle.resource);
+ return VMCI_ERROR_INVALID_ARGS;
+ }
+
+ resource = vmci_resource_by_handle(handle,
+ VMCI_RESOURCE_TYPE_DOORBELL);
+ if (!resource) {
+ pr_devel("Notifying an unknown doorbell (handle=0x%x:0x%x)\n",
+ handle.context, handle.resource);
+ return VMCI_ERROR_NOT_FOUND;
+ }
+
+ entry = container_of(resource, struct dbell_entry, resource);
+ if (entry->run_delayed) {
+ schedule_work(&entry->work);
+ } else {
+ entry->notify_cb(entry->client_data);
+ vmci_resource_put(resource);
+ }
+
+ return VMCI_SUCCESS;
+}
+
+/*
+ * Register the notification bitmap with the host.
+ */
+bool vmci_dbell_register_notification_bitmap(u32 bitmap_ppn)
+{
+ int result;
+ struct vmci_notify_bm_set_msg bitmap_set_msg;
+
+ bitmap_set_msg.hdr.dst = vmci_make_handle(VMCI_HYPERVISOR_CONTEXT_ID,
+ VMCI_SET_NOTIFY_BITMAP);
+ bitmap_set_msg.hdr.src = VMCI_ANON_SRC_HANDLE;
+ bitmap_set_msg.hdr.payload_size = sizeof(bitmap_set_msg) -
+ VMCI_DG_HEADERSIZE;
+ bitmap_set_msg.bitmap_ppn = bitmap_ppn;
+
+ result = vmci_send_datagram(&bitmap_set_msg.hdr);
+ if (result != VMCI_SUCCESS) {
+ pr_devel("Failed to register (PPN=%u) as notification bitmap (error=%d)\n",
+ bitmap_ppn, result);
+ return false;
+ }
+ return true;
+}
+
+/*
+ * Executes or schedules the handlers for a given notify index.
+ */
+static void dbell_fire_entries(u32 notify_idx)
+{
+ u32 bucket = VMCI_DOORBELL_HASH(notify_idx);
+ struct dbell_entry *dbell;
+
+ spin_lock_bh(&vmci_doorbell_it.lock);
+
+ hlist_for_each_entry(dbell, &vmci_doorbell_it.entries[bucket], node) {
+ if (dbell->idx == notify_idx &&
+ atomic_read(&dbell->active) == 1) {
+ if (dbell->run_delayed) {
+ vmci_resource_get(&dbell->resource);
+ schedule_work(&dbell->work);
+ } else {
+ dbell->notify_cb(dbell->client_data);
+ }
+ }
+ }
+
+ spin_unlock_bh(&vmci_doorbell_it.lock);
+}
+
+/*
+ * Scans the notification bitmap, collects pending notifications,
+ * resets the bitmap and invokes appropriate callbacks.
+ */
+void vmci_dbell_scan_notification_entries(u8 *bitmap)
+{
+ u32 idx;
+
+ for (idx = 0; idx < max_notify_idx; idx++) {
+ if (bitmap[idx] & 0x1) {
+ bitmap[idx] &= ~1;
+ dbell_fire_entries(idx);
+ }
+ }
+}
+
+/*
+ * vmci_doorbell_create() - Creates a doorbell
+ * @handle: A handle used to track the resource. Can be invalid.
+ * @flags: Flag that determines context of callback.
+ * @priv_flags: Privileges flags.
+ * @notify_cb: The callback to be ivoked when the doorbell fires.
+ * @client_data: A parameter to be passed to the callback.
+ *
+ * Creates a doorbell with the given callback. If the handle is
+ * VMCI_INVALID_HANDLE, a free handle will be assigned, if
+ * possible. The callback can be run immediately (potentially with
+ * locks held - the default) or delayed (in a kernel thread) by
+ * specifying the flag VMCI_FLAG_DELAYED_CB. If delayed execution
+ * is selected, a given callback may not be run if the kernel is
+ * unable to allocate memory for the delayed execution (highly
+ * unlikely).
+ */
+int vmci_doorbell_create(struct vmci_handle *handle,
+ u32 flags,
+ u32 priv_flags,
+ vmci_callback notify_cb, void *client_data)
+{
+ struct dbell_entry *entry;
+ struct vmci_handle new_handle;
+ int result;
+
+ if (!handle || !notify_cb || flags & ~VMCI_FLAG_DELAYED_CB ||
+ priv_flags & ~VMCI_PRIVILEGE_ALL_FLAGS)
+ return VMCI_ERROR_INVALID_ARGS;
+
+ entry = kmalloc(sizeof(*entry), GFP_KERNEL);
+ if (entry == NULL) {
+ pr_warn("Failed allocating memory for datagram entry\n");
+ return VMCI_ERROR_NO_MEM;
+ }
+
+ if (vmci_handle_is_invalid(*handle)) {
+ u32 context_id = vmci_get_context_id();
+
+ /* Let resource code allocate a free ID for us */
+ new_handle = vmci_make_handle(context_id, VMCI_INVALID_ID);
+ } else {
+ bool valid_context = false;
+
+ /*
+ * Validate the handle. We must do both of the checks below
+ * because we can be acting as both a host and a guest at the
+ * same time. We always allow the host context ID, since the
+ * host functionality is in practice always there with the
+ * unified driver.
+ */
+ if (handle->context == VMCI_HOST_CONTEXT_ID ||
+ (vmci_guest_code_active() &&
+ vmci_get_context_id() == handle->context)) {
+ valid_context = true;
+ }
+
+ if (!valid_context || handle->resource == VMCI_INVALID_ID) {
+ pr_devel("Invalid argument (handle=0x%x:0x%x)\n",
+ handle->context, handle->resource);
+ result = VMCI_ERROR_INVALID_ARGS;
+ goto free_mem;
+ }
+
+ new_handle = *handle;
+ }
+
+ entry->idx = 0;
+ INIT_HLIST_NODE(&entry->node);
+ entry->priv_flags = priv_flags;
+ INIT_WORK(&entry->work, dbell_delayed_dispatch);
+ entry->run_delayed = flags & VMCI_FLAG_DELAYED_CB;
+ entry->notify_cb = notify_cb;
+ entry->client_data = client_data;
+ atomic_set(&entry->active, 0);
+
+ result = vmci_resource_add(&entry->resource,
+ VMCI_RESOURCE_TYPE_DOORBELL,
+ new_handle);
+ if (result != VMCI_SUCCESS) {
+ pr_warn("Failed to add new resource (handle=0x%x:0x%x), error: %d\n",
+ new_handle.context, new_handle.resource, result);
+ goto free_mem;
+ }
+
+ new_handle = vmci_resource_handle(&entry->resource);
+ if (vmci_guest_code_active()) {
+ dbell_index_table_add(entry);
+ result = dbell_link(new_handle, entry->idx);
+ if (VMCI_SUCCESS != result)
+ goto destroy_resource;
+
+ atomic_set(&entry->active, 1);
+ }
+
+ *handle = new_handle;
+
+ return result;
+
+ destroy_resource:
+ dbell_index_table_remove(entry);
+ vmci_resource_remove(&entry->resource);
+ free_mem:
+ kfree(entry);
+ return result;
+}
+EXPORT_SYMBOL_GPL(vmci_doorbell_create);
+
+/*
+ * vmci_doorbell_destroy() - Destroy a doorbell.
+ * @handle: The handle tracking the resource.
+ *
+ * Destroys a doorbell previously created with vmcii_doorbell_create. This
+ * operation may block waiting for a callback to finish.
+ */
+int vmci_doorbell_destroy(struct vmci_handle handle)
+{
+ struct dbell_entry *entry;
+ struct vmci_resource *resource;
+
+ if (vmci_handle_is_invalid(handle))
+ return VMCI_ERROR_INVALID_ARGS;
+
+ resource = vmci_resource_by_handle(handle,
+ VMCI_RESOURCE_TYPE_DOORBELL);
+ if (!resource) {
+ pr_devel("Failed to destroy doorbell (handle=0x%x:0x%x)\n",
+ handle.context, handle.resource);
+ return VMCI_ERROR_NOT_FOUND;
+ }
+
+ entry = container_of(resource, struct dbell_entry, resource);
+
+ if (vmci_guest_code_active()) {
+ int result;
+
+ dbell_index_table_remove(entry);
+
+ result = dbell_unlink(handle);
+ if (VMCI_SUCCESS != result) {
+
+ /*
+ * The only reason this should fail would be
+ * an inconsistency between guest and
+ * hypervisor state, where the guest believes
+ * it has an active registration whereas the
+ * hypervisor doesn't. One case where this may
+ * happen is if a doorbell is unregistered
+ * following a hibernation at a time where the
+ * doorbell state hasn't been restored on the
+ * hypervisor side yet. Since the handle has
+ * now been removed in the guest, we just
+ * print a warning and return success.
+ */
+ pr_devel("Unlink of doorbell (handle=0x%x:0x%x) unknown by hypervisor (error=%d)\n",
+ handle.context, handle.resource, result);
+ }
+ }
+
+ /*
+ * Now remove the resource from the table. It might still be in use
+ * after this, in a callback or still on the delayed work queue.
+ */
+ vmci_resource_put(&entry->resource);
+ vmci_resource_remove(&entry->resource);
+
+ kfree(entry);
+
+ return VMCI_SUCCESS;
+}
+EXPORT_SYMBOL_GPL(vmci_doorbell_destroy);
+
+/*
+ * vmci_doorbell_notify() - Ring the doorbell (and hide in the bushes).
+ * @dst: The handlle identifying the doorbell resource
+ * @priv_flags: Priviledge flags.
+ *
+ * Generates a notification on the doorbell identified by the
+ * handle. For host side generation of notifications, the caller
+ * can specify what the privilege of the calling side is.
+ */
+int vmci_doorbell_notify(struct vmci_handle dst, u32 priv_flags)
+{
+ int retval;
+ enum vmci_route route;
+ struct vmci_handle src;
+
+ if (vmci_handle_is_invalid(dst) ||
+ (priv_flags & ~VMCI_PRIVILEGE_ALL_FLAGS))
+ return VMCI_ERROR_INVALID_ARGS;
+
+ src = VMCI_INVALID_HANDLE;
+ retval = vmci_route(&src, &dst, false, &route);
+ if (retval < VMCI_SUCCESS)
+ return retval;
+
+ if (VMCI_ROUTE_AS_HOST == route)
+ return vmci_ctx_notify_dbell(VMCI_HOST_CONTEXT_ID,
+ dst, priv_flags);
+
+ if (VMCI_ROUTE_AS_GUEST == route)
+ return dbell_notify_as_guest(dst, priv_flags);
+
+ pr_warn("Unknown route (%d) for doorbell\n", route);
+ return VMCI_ERROR_DST_UNREACHABLE;
+}
+EXPORT_SYMBOL_GPL(vmci_doorbell_notify);
diff --git a/drivers/misc/vmw_vmci/vmci_doorbell.h b/drivers/misc/vmw_vmci/vmci_doorbell.h
new file mode 100644
index 000000000000..e4c0b17486a5
--- /dev/null
+++ b/drivers/misc/vmw_vmci/vmci_doorbell.h
@@ -0,0 +1,51 @@
+/*
+ * VMware VMCI Driver
+ *
+ * Copyright (C) 2012 VMware, Inc. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation version 2 and no later version.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
+ * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * for more details.
+ */
+
+#ifndef VMCI_DOORBELL_H
+#define VMCI_DOORBELL_H
+
+#include <linux/vmw_vmci_defs.h>
+#include <linux/types.h>
+
+#include "vmci_driver.h"
+
+/*
+ * VMCINotifyResourceInfo: Used to create and destroy doorbells, and
+ * generate a notification for a doorbell or queue pair.
+ */
+struct vmci_dbell_notify_resource_info {
+ struct vmci_handle handle;
+ u16 resource;
+ u16 action;
+ s32 result;
+};
+
+/*
+ * Structure used for checkpointing the doorbell mappings. It is
+ * written to the checkpoint as is, so changing this structure will
+ * break checkpoint compatibility.
+ */
+struct dbell_cpt_state {
+ struct vmci_handle handle;
+ u64 bitmap_idx;
+};
+
+int vmci_dbell_host_context_notify(u32 src_cid, struct vmci_handle handle);
+int vmci_dbell_get_priv_flags(struct vmci_handle handle, u32 *priv_flags);
+
+bool vmci_dbell_register_notification_bitmap(u32 bitmap_ppn);
+void vmci_dbell_scan_notification_entries(u8 *bitmap);
+
+#endif /* VMCI_DOORBELL_H */
diff --git a/drivers/misc/vmw_vmci/vmci_driver.c b/drivers/misc/vmw_vmci/vmci_driver.c
new file mode 100644
index 000000000000..7b3fce2da6c3
--- /dev/null
+++ b/drivers/misc/vmw_vmci/vmci_driver.c
@@ -0,0 +1,117 @@
+/*
+ * VMware VMCI Driver
+ *
+ * Copyright (C) 2012 VMware, Inc. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation version 2 and no later version.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
+ * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * for more details.
+ */
+
+#include <linux/vmw_vmci_defs.h>
+#include <linux/vmw_vmci_api.h>
+#include <linux/atomic.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/init.h>
+
+#include "vmci_driver.h"
+#include "vmci_event.h"
+
+static bool vmci_disable_host;
+module_param_named(disable_host, vmci_disable_host, bool, 0);
+MODULE_PARM_DESC(disable_host,
+ "Disable driver host personality (default=enabled)");
+
+static bool vmci_disable_guest;
+module_param_named(disable_guest, vmci_disable_guest, bool, 0);
+MODULE_PARM_DESC(disable_guest,
+ "Disable driver guest personality (default=enabled)");
+
+static bool vmci_guest_personality_initialized;
+static bool vmci_host_personality_initialized;
+
+/*
+ * vmci_get_context_id() - Gets the current context ID.
+ *
+ * Returns the current context ID. Note that since this is accessed only
+ * from code running in the host, this always returns the host context ID.
+ */
+u32 vmci_get_context_id(void)
+{
+ if (vmci_guest_code_active())
+ return vmci_get_vm_context_id();
+ else if (vmci_host_code_active())
+ return VMCI_HOST_CONTEXT_ID;
+
+ return VMCI_INVALID_ID;
+}
+EXPORT_SYMBOL_GPL(vmci_get_context_id);
+
+static int __init vmci_drv_init(void)
+{
+ int vmci_err;
+ int error;
+
+ vmci_err = vmci_event_init();
+ if (vmci_err < VMCI_SUCCESS) {
+ pr_err("Failed to initialize VMCIEvent (result=%d)\n",
+ vmci_err);
+ return -EINVAL;
+ }
+
+ if (!vmci_disable_guest) {
+ error = vmci_guest_init();
+ if (error) {
+ pr_warn("Failed to initialize guest personality (err=%d)\n",
+ error);
+ } else {
+ vmci_guest_personality_initialized = true;
+ pr_info("Guest personality initialized and is %s\n",
+ vmci_guest_code_active() ?
+ "active" : "inactive");
+ }
+ }
+
+ if (!vmci_disable_host) {
+ error = vmci_host_init();
+ if (error) {
+ pr_warn("Unable to initialize host personality (err=%d)\n",
+ error);
+ } else {
+ vmci_host_personality_initialized = true;
+ pr_info("Initialized host personality\n");
+ }
+ }
+
+ if (!vmci_guest_personality_initialized &&
+ !vmci_host_personality_initialized) {
+ vmci_event_exit();
+ return -ENODEV;
+ }
+
+ return 0;
+}
+module_init(vmci_drv_init);
+
+static void __exit vmci_drv_exit(void)
+{
+ if (vmci_guest_personality_initialized)
+ vmci_guest_exit();
+
+ if (vmci_host_personality_initialized)
+ vmci_host_exit();
+
+ vmci_event_exit();
+}
+module_exit(vmci_drv_exit);
+
+MODULE_AUTHOR("VMware, Inc.");
+MODULE_DESCRIPTION("VMware Virtual Machine Communication Interface.");
+MODULE_VERSION("1.0.0.0-k");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/misc/vmw_vmci/vmci_driver.h b/drivers/misc/vmw_vmci/vmci_driver.h
new file mode 100644
index 000000000000..f69156a1f30c
--- /dev/null
+++ b/drivers/misc/vmw_vmci/vmci_driver.h
@@ -0,0 +1,50 @@
+/*
+ * VMware VMCI Driver
+ *
+ * Copyright (C) 2012 VMware, Inc. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation version 2 and no later version.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
+ * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * for more details.
+ */
+
+#ifndef _VMCI_DRIVER_H_
+#define _VMCI_DRIVER_H_
+
+#include <linux/vmw_vmci_defs.h>
+#include <linux/wait.h>
+
+#include "vmci_queue_pair.h"
+#include "vmci_context.h"
+
+enum vmci_obj_type {
+ VMCIOBJ_VMX_VM = 10,
+ VMCIOBJ_CONTEXT,
+ VMCIOBJ_SOCKET,
+ VMCIOBJ_NOT_SET,
+};
+
+/* For storing VMCI structures in file handles. */
+struct vmci_obj {
+ void *ptr;
+ enum vmci_obj_type type;
+};
+
+u32 vmci_get_context_id(void);
+int vmci_send_datagram(struct vmci_datagram *dg);
+
+int vmci_host_init(void);
+void vmci_host_exit(void);
+bool vmci_host_code_active(void);
+
+int vmci_guest_init(void);
+void vmci_guest_exit(void);
+bool vmci_guest_code_active(void);
+u32 vmci_get_vm_context_id(void);
+
+#endif /* _VMCI_DRIVER_H_ */
diff --git a/drivers/misc/vmw_vmci/vmci_event.c b/drivers/misc/vmw_vmci/vmci_event.c
new file mode 100644
index 000000000000..8449516d6ac6
--- /dev/null
+++ b/drivers/misc/vmw_vmci/vmci_event.c
@@ -0,0 +1,224 @@
+/*
+ * VMware VMCI Driver
+ *
+ * Copyright (C) 2012 VMware, Inc. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation version 2 and no later version.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
+ * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * for more details.
+ */
+
+#include <linux/vmw_vmci_defs.h>
+#include <linux/vmw_vmci_api.h>
+#include <linux/list.h>
+#include <linux/module.h>
+#include <linux/sched.h>
+#include <linux/slab.h>
+
+#include "vmci_driver.h"
+#include "vmci_event.h"
+
+#define EVENT_MAGIC 0xEABE0000
+#define VMCI_EVENT_MAX_ATTEMPTS 10
+
+struct vmci_subscription {
+ u32 id;
+ u32 event;
+ vmci_event_cb callback;
+ void *callback_data;
+ struct list_head node; /* on one of subscriber lists */
+};
+
+static struct list_head subscriber_array[VMCI_EVENT_MAX];
+static DEFINE_MUTEX(subscriber_mutex);
+
+int __init vmci_event_init(void)
+{
+ int i;
+
+ for (i = 0; i < VMCI_EVENT_MAX; i++)
+ INIT_LIST_HEAD(&subscriber_array[i]);
+
+ return VMCI_SUCCESS;
+}
+
+void vmci_event_exit(void)
+{
+ int e;
+
+ /* We free all memory at exit. */
+ for (e = 0; e < VMCI_EVENT_MAX; e++) {
+ struct vmci_subscription *cur, *p2;
+ list_for_each_entry_safe(cur, p2, &subscriber_array[e], node) {
+
+ /*
+ * We should never get here because all events
+ * should have been unregistered before we try
+ * to unload the driver module.
+ */
+ pr_warn("Unexpected free events occurring\n");
+ list_del(&cur->node);
+ kfree(cur);
+ }
+ }
+}
+
+/*
+ * Find entry. Assumes subscriber_mutex is held.
+ */
+static struct vmci_subscription *event_find(u32 sub_id)
+{
+ int e;
+
+ for (e = 0; e < VMCI_EVENT_MAX; e++) {
+ struct vmci_subscription *cur;
+ list_for_each_entry(cur, &subscriber_array[e], node) {
+ if (cur->id == sub_id)
+ return cur;
+ }
+ }
+ return NULL;
+}
+
+/*
+ * Actually delivers the events to the subscribers.
+ * The callback function for each subscriber is invoked.
+ */
+static void event_deliver(struct vmci_event_msg *event_msg)
+{
+ struct vmci_subscription *cur;
+ struct list_head *subscriber_list;
+
+ rcu_read_lock();
+ subscriber_list = &subscriber_array[event_msg->event_data.event];
+ list_for_each_entry_rcu(cur, subscriber_list, node) {
+ cur->callback(cur->id, &event_msg->event_data,
+ cur->callback_data);
+ }
+ rcu_read_unlock();
+}
+
+/*
+ * Dispatcher for the VMCI_EVENT_RECEIVE datagrams. Calls all
+ * subscribers for given event.
+ */
+int vmci_event_dispatch(struct vmci_datagram *msg)
+{
+ struct vmci_event_msg *event_msg = (struct vmci_event_msg *)msg;
+
+ if (msg->payload_size < sizeof(u32) ||
+ msg->payload_size > sizeof(struct vmci_event_data_max))
+ return VMCI_ERROR_INVALID_ARGS;
+
+ if (!VMCI_EVENT_VALID(event_msg->event_data.event))
+ return VMCI_ERROR_EVENT_UNKNOWN;
+
+ event_deliver(event_msg);
+ return VMCI_SUCCESS;
+}
+
+/*
+ * vmci_event_subscribe() - Subscribe to a given event.
+ * @event: The event to subscribe to.
+ * @callback: The callback to invoke upon the event.
+ * @callback_data: Data to pass to the callback.
+ * @subscription_id: ID used to track subscription. Used with
+ * vmci_event_unsubscribe()
+ *
+ * Subscribes to the provided event. The callback specified will be
+ * fired from RCU critical section and therefore must not sleep.
+ */
+int vmci_event_subscribe(u32 event,
+ vmci_event_cb callback,
+ void *callback_data,
+ u32 *new_subscription_id)
+{
+ struct vmci_subscription *sub;
+ int attempts;
+ int retval;
+ bool have_new_id = false;
+
+ if (!new_subscription_id) {
+ pr_devel("%s: Invalid subscription (NULL)\n", __func__);
+ return VMCI_ERROR_INVALID_ARGS;
+ }
+
+ if (!VMCI_EVENT_VALID(event) || !callback) {
+ pr_devel("%s: Failed to subscribe to event (type=%d) (callback=%p) (data=%p)\n",
+ __func__, event, callback, callback_data);
+ return VMCI_ERROR_INVALID_ARGS;
+ }
+
+ sub = kzalloc(sizeof(*sub), GFP_KERNEL);
+ if (!sub)
+ return VMCI_ERROR_NO_MEM;
+
+ sub->id = VMCI_EVENT_MAX;
+ sub->event = event;
+ sub->callback = callback;
+ sub->callback_data = callback_data;
+ INIT_LIST_HEAD(&sub->node);
+
+ mutex_lock(&subscriber_mutex);
+
+ /* Creation of a new event is always allowed. */
+ for (attempts = 0; attempts < VMCI_EVENT_MAX_ATTEMPTS; attempts++) {
+ static u32 subscription_id;
+ /*
+ * We try to get an id a couple of time before
+ * claiming we are out of resources.
+ */
+
+ /* Test for duplicate id. */
+ if (!event_find(++subscription_id)) {
+ sub->id = subscription_id;
+ have_new_id = true;
+ break;
+ }
+ }
+
+ if (have_new_id) {
+ list_add_rcu(&sub->node, &subscriber_array[event]);
+ retval = VMCI_SUCCESS;
+ } else {
+ retval = VMCI_ERROR_NO_RESOURCES;
+ }
+
+ mutex_unlock(&subscriber_mutex);
+
+ *new_subscription_id = sub->id;
+ return retval;
+}
+EXPORT_SYMBOL_GPL(vmci_event_subscribe);
+
+/*
+ * vmci_event_unsubscribe() - unsubscribe from an event.
+ * @sub_id: A subscription ID as provided by vmci_event_subscribe()
+ *
+ * Unsubscribe from given event. Removes it from list and frees it.
+ * Will return callback_data if requested by caller.
+ */
+int vmci_event_unsubscribe(u32 sub_id)
+{
+ struct vmci_subscription *s;
+
+ mutex_lock(&subscriber_mutex);
+ s = event_find(sub_id);
+ if (s)
+ list_del_rcu(&s->node);
+ mutex_unlock(&subscriber_mutex);
+
+ if (!s)
+ return VMCI_ERROR_NOT_FOUND;
+
+ synchronize_rcu();
+ kfree(s);
+
+ return VMCI_SUCCESS;
+}
+EXPORT_SYMBOL_GPL(vmci_event_unsubscribe);
diff --git a/drivers/misc/vmw_vmci/vmci_event.h b/drivers/misc/vmw_vmci/vmci_event.h
new file mode 100644
index 000000000000..7df9b1c0a96c
--- /dev/null
+++ b/drivers/misc/vmw_vmci/vmci_event.h
@@ -0,0 +1,25 @@
+/*
+ * VMware VMCI Driver
+ *
+ * Copyright (C) 2012 VMware, Inc. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation version 2 and no later version.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
+ * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * for more details.
+ */
+
+#ifndef __VMCI_EVENT_H__
+#define __VMCI_EVENT_H__
+
+#include <linux/vmw_vmci_api.h>
+
+int vmci_event_init(void);
+void vmci_event_exit(void);
+int vmci_event_dispatch(struct vmci_datagram *msg);
+
+#endif /*__VMCI_EVENT_H__ */
diff --git a/drivers/misc/vmw_vmci/vmci_guest.c b/drivers/misc/vmw_vmci/vmci_guest.c
new file mode 100644
index 000000000000..60c01999f489
--- /dev/null
+++ b/drivers/misc/vmw_vmci/vmci_guest.c
@@ -0,0 +1,759 @@
+/*
+ * VMware VMCI Driver
+ *
+ * Copyright (C) 2012 VMware, Inc. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation version 2 and no later version.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
+ * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * for more details.
+ */
+
+#include <linux/vmw_vmci_defs.h>
+#include <linux/vmw_vmci_api.h>
+#include <linux/moduleparam.h>
+#include <linux/interrupt.h>
+#include <linux/highmem.h>
+#include <linux/kernel.h>
+#include <linux/mm.h>
+#include <linux/module.h>
+#include <linux/sched.h>
+#include <linux/slab.h>
+#include <linux/init.h>
+#include <linux/pci.h>
+#include <linux/smp.h>
+#include <linux/io.h>
+#include <linux/vmalloc.h>
+
+#include "vmci_datagram.h"
+#include "vmci_doorbell.h"
+#include "vmci_context.h"
+#include "vmci_driver.h"
+#include "vmci_event.h"
+
+#define PCI_VENDOR_ID_VMWARE 0x15AD
+#define PCI_DEVICE_ID_VMWARE_VMCI 0x0740
+
+#define VMCI_UTIL_NUM_RESOURCES 1
+
+static bool vmci_disable_msi;
+module_param_named(disable_msi, vmci_disable_msi, bool, 0);
+MODULE_PARM_DESC(disable_msi, "Disable MSI use in driver - (default=0)");
+
+static bool vmci_disable_msix;
+module_param_named(disable_msix, vmci_disable_msix, bool, 0);
+MODULE_PARM_DESC(disable_msix, "Disable MSI-X use in driver - (default=0)");
+
+static u32 ctx_update_sub_id = VMCI_INVALID_ID;
+static u32 vm_context_id = VMCI_INVALID_ID;
+
+struct vmci_guest_device {
+ struct device *dev; /* PCI device we are attached to */
+ void __iomem *iobase;
+
+ unsigned int irq;
+ unsigned int intr_type;
+ bool exclusive_vectors;
+ struct msix_entry msix_entries[VMCI_MAX_INTRS];
+
+ struct tasklet_struct datagram_tasklet;
+ struct tasklet_struct bm_tasklet;
+
+ void *data_buffer;
+ void *notification_bitmap;
+};
+
+/* vmci_dev singleton device and supporting data*/
+static struct vmci_guest_device *vmci_dev_g;
+static DEFINE_SPINLOCK(vmci_dev_spinlock);
+
+static atomic_t vmci_num_guest_devices = ATOMIC_INIT(0);
+
+bool vmci_guest_code_active(void)
+{
+ return atomic_read(&vmci_num_guest_devices) != 0;
+}
+
+u32 vmci_get_vm_context_id(void)
+{
+ if (vm_context_id == VMCI_INVALID_ID) {
+ struct vmci_datagram get_cid_msg;
+ get_cid_msg.dst =
+ vmci_make_handle(VMCI_HYPERVISOR_CONTEXT_ID,
+ VMCI_GET_CONTEXT_ID);
+ get_cid_msg.src = VMCI_ANON_SRC_HANDLE;
+ get_cid_msg.payload_size = 0;
+ vm_context_id = vmci_send_datagram(&get_cid_msg);
+ }
+ return vm_context_id;
+}
+
+/*
+ * VM to hypervisor call mechanism. We use the standard VMware naming
+ * convention since shared code is calling this function as well.
+ */
+int vmci_send_datagram(struct vmci_datagram *dg)
+{
+ unsigned long flags;
+ int result;
+
+ /* Check args. */
+ if (dg == NULL)
+ return VMCI_ERROR_INVALID_ARGS;
+
+ /*
+ * Need to acquire spinlock on the device because the datagram
+ * data may be spread over multiple pages and the monitor may
+ * interleave device user rpc calls from multiple
+ * VCPUs. Acquiring the spinlock precludes that
+ * possibility. Disabling interrupts to avoid incoming
+ * datagrams during a "rep out" and possibly landing up in
+ * this function.
+ */
+ spin_lock_irqsave(&vmci_dev_spinlock, flags);
+
+ if (vmci_dev_g) {
+ iowrite8_rep(vmci_dev_g->iobase + VMCI_DATA_OUT_ADDR,
+ dg, VMCI_DG_SIZE(dg));
+ result = ioread32(vmci_dev_g->iobase + VMCI_RESULT_LOW_ADDR);
+ } else {
+ result = VMCI_ERROR_UNAVAILABLE;
+ }
+
+ spin_unlock_irqrestore(&vmci_dev_spinlock, flags);
+
+ return result;
+}
+EXPORT_SYMBOL_GPL(vmci_send_datagram);
+
+/*
+ * Gets called with the new context id if updated or resumed.
+ * Context id.
+ */
+static void vmci_guest_cid_update(u32 sub_id,
+ const struct vmci_event_data *event_data,
+ void *client_data)
+{
+ const struct vmci_event_payld_ctx *ev_payload =
+ vmci_event_data_const_payload(event_data);
+
+ if (sub_id != ctx_update_sub_id) {
+ pr_devel("Invalid subscriber (ID=0x%x)\n", sub_id);
+ return;
+ }
+
+ if (!event_data || ev_payload->context_id == VMCI_INVALID_ID) {
+ pr_devel("Invalid event data\n");
+ return;
+ }
+
+ pr_devel("Updating context from (ID=0x%x) to (ID=0x%x) on event (type=%d)\n",
+ vm_context_id, ev_payload->context_id, event_data->event);
+
+ vm_context_id = ev_payload->context_id;
+}
+
+/*
+ * Verify that the host supports the hypercalls we need. If it does not,
+ * try to find fallback hypercalls and use those instead. Returns
+ * true if required hypercalls (or fallback hypercalls) are
+ * supported by the host, false otherwise.
+ */
+static bool vmci_check_host_caps(struct pci_dev *pdev)
+{
+ bool result;
+ struct vmci_resource_query_msg *msg;
+ u32 msg_size = sizeof(struct vmci_resource_query_hdr) +
+ VMCI_UTIL_NUM_RESOURCES * sizeof(u32);
+ struct vmci_datagram *check_msg;
+
+ check_msg = kmalloc(msg_size, GFP_KERNEL);
+ if (!check_msg) {
+ dev_err(&pdev->dev, "%s: Insufficient memory\n", __func__);
+ return false;
+ }
+
+ check_msg->dst = vmci_make_handle(VMCI_HYPERVISOR_CONTEXT_ID,
+ VMCI_RESOURCES_QUERY);
+ check_msg->src = VMCI_ANON_SRC_HANDLE;
+ check_msg->payload_size = msg_size - VMCI_DG_HEADERSIZE;
+ msg = (struct vmci_resource_query_msg *)VMCI_DG_PAYLOAD(check_msg);
+
+ msg->num_resources = VMCI_UTIL_NUM_RESOURCES;
+ msg->resources[0] = VMCI_GET_CONTEXT_ID;
+
+ /* Checks that hyper calls are supported */
+ result = vmci_send_datagram(check_msg) == 0x01;
+ kfree(check_msg);
+
+ dev_dbg(&pdev->dev, "%s: Host capability check: %s\n",
+ __func__, result ? "PASSED" : "FAILED");
+
+ /* We need the vector. There are no fallbacks. */
+ return result;
+}
+
+/*
+ * Reads datagrams from the data in port and dispatches them. We
+ * always start reading datagrams into only the first page of the
+ * datagram buffer. If the datagrams don't fit into one page, we
+ * use the maximum datagram buffer size for the remainder of the
+ * invocation. This is a simple heuristic for not penalizing
+ * small datagrams.
+ *
+ * This function assumes that it has exclusive access to the data
+ * in port for the duration of the call.
+ */
+static void vmci_dispatch_dgs(unsigned long data)
+{
+ struct vmci_guest_device *vmci_dev = (struct vmci_guest_device *)data;
+ u8 *dg_in_buffer = vmci_dev->data_buffer;
+ struct vmci_datagram *dg;
+ size_t dg_in_buffer_size = VMCI_MAX_DG_SIZE;
+ size_t current_dg_in_buffer_size = PAGE_SIZE;
+ size_t remaining_bytes;
+
+ BUILD_BUG_ON(VMCI_MAX_DG_SIZE < PAGE_SIZE);
+
+ ioread8_rep(vmci_dev->iobase + VMCI_DATA_IN_ADDR,
+ vmci_dev->data_buffer, current_dg_in_buffer_size);
+ dg = (struct vmci_datagram *)dg_in_buffer;
+ remaining_bytes = current_dg_in_buffer_size;
+
+ while (dg->dst.resource != VMCI_INVALID_ID ||
+ remaining_bytes > PAGE_SIZE) {
+ unsigned dg_in_size;
+
+ /*
+ * When the input buffer spans multiple pages, a datagram can
+ * start on any page boundary in the buffer.
+ */
+ if (dg->dst.resource == VMCI_INVALID_ID) {
+ dg = (struct vmci_datagram *)roundup(
+ (uintptr_t)dg + 1, PAGE_SIZE);
+ remaining_bytes =
+ (size_t)(dg_in_buffer +
+ current_dg_in_buffer_size -
+ (u8 *)dg);
+ continue;
+ }
+
+ dg_in_size = VMCI_DG_SIZE_ALIGNED(dg);
+
+ if (dg_in_size <= dg_in_buffer_size) {
+ int result;
+
+ /*
+ * If the remaining bytes in the datagram
+ * buffer doesn't contain the complete
+ * datagram, we first make sure we have enough
+ * room for it and then we read the reminder
+ * of the datagram and possibly any following
+ * datagrams.
+ */
+ if (dg_in_size > remaining_bytes) {
+ if (remaining_bytes !=
+ current_dg_in_buffer_size) {
+
+ /*
+ * We move the partial
+ * datagram to the front and
+ * read the reminder of the
+ * datagram and possibly
+ * following calls into the
+ * following bytes.
+ */
+ memmove(dg_in_buffer, dg_in_buffer +
+ current_dg_in_buffer_size -
+ remaining_bytes,
+ remaining_bytes);
+ dg = (struct vmci_datagram *)
+ dg_in_buffer;
+ }
+
+ if (current_dg_in_buffer_size !=
+ dg_in_buffer_size)
+ current_dg_in_buffer_size =
+ dg_in_buffer_size;
+
+ ioread8_rep(vmci_dev->iobase +
+ VMCI_DATA_IN_ADDR,
+ vmci_dev->data_buffer +
+ remaining_bytes,
+ current_dg_in_buffer_size -
+ remaining_bytes);
+ }
+
+ /*
+ * We special case event datagrams from the
+ * hypervisor.
+ */
+ if (dg->src.context == VMCI_HYPERVISOR_CONTEXT_ID &&
+ dg->dst.resource == VMCI_EVENT_HANDLER) {
+ result = vmci_event_dispatch(dg);
+ } else {
+ result = vmci_datagram_invoke_guest_handler(dg);
+ }
+ if (result < VMCI_SUCCESS)
+ dev_dbg(vmci_dev->dev,
+ "Datagram with resource (ID=0x%x) failed (err=%d)\n",
+ dg->dst.resource, result);
+
+ /* On to the next datagram. */
+ dg = (struct vmci_datagram *)((u8 *)dg +
+ dg_in_size);
+ } else {
+ size_t bytes_to_skip;
+
+ /*
+ * Datagram doesn't fit in datagram buffer of maximal
+ * size. We drop it.
+ */
+ dev_dbg(vmci_dev->dev,
+ "Failed to receive datagram (size=%u bytes)\n",
+ dg_in_size);
+
+ bytes_to_skip = dg_in_size - remaining_bytes;
+ if (current_dg_in_buffer_size != dg_in_buffer_size)
+ current_dg_in_buffer_size = dg_in_buffer_size;
+
+ for (;;) {
+ ioread8_rep(vmci_dev->iobase +
+ VMCI_DATA_IN_ADDR,
+ vmci_dev->data_buffer,
+ current_dg_in_buffer_size);
+ if (bytes_to_skip <= current_dg_in_buffer_size)
+ break;
+
+ bytes_to_skip -= current_dg_in_buffer_size;
+ }
+ dg = (struct vmci_datagram *)(dg_in_buffer +
+ bytes_to_skip);
+ }
+
+ remaining_bytes =
+ (size_t) (dg_in_buffer + current_dg_in_buffer_size -
+ (u8 *)dg);
+
+ if (remaining_bytes < VMCI_DG_HEADERSIZE) {
+ /* Get the next batch of datagrams. */
+
+ ioread8_rep(vmci_dev->iobase + VMCI_DATA_IN_ADDR,
+ vmci_dev->data_buffer,
+ current_dg_in_buffer_size);
+ dg = (struct vmci_datagram *)dg_in_buffer;
+ remaining_bytes = current_dg_in_buffer_size;
+ }
+ }
+}
+
+/*
+ * Scans the notification bitmap for raised flags, clears them
+ * and handles the notifications.
+ */
+static void vmci_process_bitmap(unsigned long data)
+{
+ struct vmci_guest_device *dev = (struct vmci_guest_device *)data;
+
+ if (!dev->notification_bitmap) {
+ dev_dbg(dev->dev, "No bitmap present in %s\n", __func__);
+ return;
+ }
+
+ vmci_dbell_scan_notification_entries(dev->notification_bitmap);
+}
+
+/*
+ * Enable MSI-X. Try exclusive vectors first, then shared vectors.
+ */
+static int vmci_enable_msix(struct pci_dev *pdev,
+ struct vmci_guest_device *vmci_dev)
+{
+ int i;
+ int result;
+
+ for (i = 0; i < VMCI_MAX_INTRS; ++i) {
+ vmci_dev->msix_entries[i].entry = i;
+ vmci_dev->msix_entries[i].vector = i;
+ }
+
+ result = pci_enable_msix(pdev, vmci_dev->msix_entries, VMCI_MAX_INTRS);
+ if (result == 0)
+ vmci_dev->exclusive_vectors = true;
+ else if (result > 0)
+ result = pci_enable_msix(pdev, vmci_dev->msix_entries, 1);
+
+ return result;
+}
+
+/*
+ * Interrupt handler for legacy or MSI interrupt, or for first MSI-X
+ * interrupt (vector VMCI_INTR_DATAGRAM).
+ */
+static irqreturn_t vmci_interrupt(int irq, void *_dev)
+{
+ struct vmci_guest_device *dev = _dev;
+
+ /*
+ * If we are using MSI-X with exclusive vectors then we simply schedule
+ * the datagram tasklet, since we know the interrupt was meant for us.
+ * Otherwise we must read the ICR to determine what to do.
+ */
+
+ if (dev->intr_type == VMCI_INTR_TYPE_MSIX && dev->exclusive_vectors) {
+ tasklet_schedule(&dev->datagram_tasklet);
+ } else {
+ unsigned int icr;
+
+ /* Acknowledge interrupt and determine what needs doing. */
+ icr = ioread32(dev->iobase + VMCI_ICR_ADDR);
+ if (icr == 0 || icr == ~0)
+ return IRQ_NONE;
+
+ if (icr & VMCI_ICR_DATAGRAM) {
+ tasklet_schedule(&dev->datagram_tasklet);
+ icr &= ~VMCI_ICR_DATAGRAM;
+ }
+
+ if (icr & VMCI_ICR_NOTIFICATION) {
+ tasklet_schedule(&dev->bm_tasklet);
+ icr &= ~VMCI_ICR_NOTIFICATION;
+ }
+
+ if (icr != 0)
+ dev_warn(dev->dev,
+ "Ignoring unknown interrupt cause (%d)\n",
+ icr);
+ }
+
+ return IRQ_HANDLED;
+}
+
+/*
+ * Interrupt handler for MSI-X interrupt vector VMCI_INTR_NOTIFICATION,
+ * which is for the notification bitmap. Will only get called if we are
+ * using MSI-X with exclusive vectors.
+ */
+static irqreturn_t vmci_interrupt_bm(int irq, void *_dev)
+{
+ struct vmci_guest_device *dev = _dev;
+
+ /* For MSI-X we can just assume it was meant for us. */
+ tasklet_schedule(&dev->bm_tasklet);
+
+ return IRQ_HANDLED;
+}
+
+/*
+ * Most of the initialization at module load time is done here.
+ */
+static int vmci_guest_probe_device(struct pci_dev *pdev,
+ const struct pci_device_id *id)
+{
+ struct vmci_guest_device *vmci_dev;
+ void __iomem *iobase;
+ unsigned int capabilities;
+ unsigned long cmd;
+ int vmci_err;
+ int error;
+
+ dev_dbg(&pdev->dev, "Probing for vmci/PCI guest device\n");
+
+ error = pcim_enable_device(pdev);
+ if (error) {
+ dev_err(&pdev->dev,
+ "Failed to enable VMCI device: %d\n", error);
+ return error;
+ }
+
+ error = pcim_iomap_regions(pdev, 1 << 0, KBUILD_MODNAME);
+ if (error) {
+ dev_err(&pdev->dev, "Failed to reserve/map IO regions\n");
+ return error;
+ }
+
+ iobase = pcim_iomap_table(pdev)[0];
+
+ dev_info(&pdev->dev, "Found VMCI PCI device at %#lx, irq %u\n",
+ (unsigned long)iobase, pdev->irq);
+
+ vmci_dev = devm_kzalloc(&pdev->dev, sizeof(*vmci_dev), GFP_KERNEL);
+ if (!vmci_dev) {
+ dev_err(&pdev->dev,
+ "Can't allocate memory for VMCI device\n");
+ return -ENOMEM;
+ }
+
+ vmci_dev->dev = &pdev->dev;
+ vmci_dev->intr_type = VMCI_INTR_TYPE_INTX;
+ vmci_dev->exclusive_vectors = false;
+ vmci_dev->iobase = iobase;
+
+ tasklet_init(&vmci_dev->datagram_tasklet,
+ vmci_dispatch_dgs, (unsigned long)vmci_dev);
+ tasklet_init(&vmci_dev->bm_tasklet,
+ vmci_process_bitmap, (unsigned long)vmci_dev);
+
+ vmci_dev->data_buffer = vmalloc(VMCI_MAX_DG_SIZE);
+ if (!vmci_dev->data_buffer) {
+ dev_err(&pdev->dev,
+ "Can't allocate memory for datagram buffer\n");
+ return -ENOMEM;
+ }
+
+ pci_set_master(pdev); /* To enable queue_pair functionality. */
+
+ /*
+ * Verify that the VMCI Device supports the capabilities that
+ * we need. If the device is missing capabilities that we would
+ * like to use, check for fallback capabilities and use those
+ * instead (so we can run a new VM on old hosts). Fail the load if
+ * a required capability is missing and there is no fallback.
+ *
+ * Right now, we need datagrams. There are no fallbacks.
+ */
+ capabilities = ioread32(vmci_dev->iobase + VMCI_CAPS_ADDR);
+ if (!(capabilities & VMCI_CAPS_DATAGRAM)) {
+ dev_err(&pdev->dev, "Device does not support datagrams\n");
+ error = -ENXIO;
+ goto err_free_data_buffer;
+ }
+
+ /*
+ * If the hardware supports notifications, we will use that as
+ * well.
+ */
+ if (capabilities & VMCI_CAPS_NOTIFICATIONS) {
+ vmci_dev->notification_bitmap = vmalloc(PAGE_SIZE);
+ if (!vmci_dev->notification_bitmap) {
+ dev_warn(&pdev->dev,
+ "Unable to allocate notification bitmap\n");
+ } else {
+ memset(vmci_dev->notification_bitmap, 0, PAGE_SIZE);
+ capabilities |= VMCI_CAPS_NOTIFICATIONS;
+ }
+ }
+
+ dev_info(&pdev->dev, "Using capabilities 0x%x\n", capabilities);
+
+ /* Let the host know which capabilities we intend to use. */
+ iowrite32(capabilities, vmci_dev->iobase + VMCI_CAPS_ADDR);
+
+ /* Set up global device so that we can start sending datagrams */
+ spin_lock_irq(&vmci_dev_spinlock);
+ vmci_dev_g = vmci_dev;
+ spin_unlock_irq(&vmci_dev_spinlock);
+
+ /*
+ * Register notification bitmap with device if that capability is
+ * used.
+ */
+ if (capabilities & VMCI_CAPS_NOTIFICATIONS) {
+ struct page *page =
+ vmalloc_to_page(vmci_dev->notification_bitmap);
+ unsigned long bitmap_ppn = page_to_pfn(page);
+ if (!vmci_dbell_register_notification_bitmap(bitmap_ppn)) {
+ dev_warn(&pdev->dev,
+ "VMCI device unable to register notification bitmap with PPN 0x%x\n",
+ (u32) bitmap_ppn);
+ goto err_remove_vmci_dev_g;
+ }
+ }
+
+ /* Check host capabilities. */
+ if (!vmci_check_host_caps(pdev))
+ goto err_remove_bitmap;
+
+ /* Enable device. */
+
+ /*
+ * We subscribe to the VMCI_EVENT_CTX_ID_UPDATE here so we can
+ * update the internal context id when needed.
+ */
+ vmci_err = vmci_event_subscribe(VMCI_EVENT_CTX_ID_UPDATE,
+ vmci_guest_cid_update, NULL,
+ &ctx_update_sub_id);
+ if (vmci_err < VMCI_SUCCESS)
+ dev_warn(&pdev->dev,
+ "Failed to subscribe to event (type=%d): %d\n",
+ VMCI_EVENT_CTX_ID_UPDATE, vmci_err);
+
+ /*
+ * Enable interrupts. Try MSI-X first, then MSI, and then fallback on
+ * legacy interrupts.
+ */
+ if (!vmci_disable_msix && !vmci_enable_msix(pdev, vmci_dev)) {
+ vmci_dev->intr_type = VMCI_INTR_TYPE_MSIX;
+ vmci_dev->irq = vmci_dev->msix_entries[0].vector;
+ } else if (!vmci_disable_msi && !pci_enable_msi(pdev)) {
+ vmci_dev->intr_type = VMCI_INTR_TYPE_MSI;
+ vmci_dev->irq = pdev->irq;
+ } else {
+ vmci_dev->intr_type = VMCI_INTR_TYPE_INTX;
+ vmci_dev->irq = pdev->irq;
+ }
+
+ /*
+ * Request IRQ for legacy or MSI interrupts, or for first
+ * MSI-X vector.
+ */
+ error = request_irq(vmci_dev->irq, vmci_interrupt, IRQF_SHARED,
+ KBUILD_MODNAME, vmci_dev);
+ if (error) {
+ dev_err(&pdev->dev, "Irq %u in use: %d\n",
+ vmci_dev->irq, error);
+ goto err_disable_msi;
+ }
+
+ /*
+ * For MSI-X with exclusive vectors we need to request an
+ * interrupt for each vector so that we get a separate
+ * interrupt handler routine. This allows us to distinguish
+ * between the vectors.
+ */
+ if (vmci_dev->exclusive_vectors) {
+ error = request_irq(vmci_dev->msix_entries[1].vector,
+ vmci_interrupt_bm, 0, KBUILD_MODNAME,
+ vmci_dev);
+ if (error) {
+ dev_err(&pdev->dev,
+ "Failed to allocate irq %u: %d\n",
+ vmci_dev->msix_entries[1].vector, error);
+ goto err_free_irq;
+ }
+ }
+
+ dev_dbg(&pdev->dev, "Registered device\n");
+
+ atomic_inc(&vmci_num_guest_devices);
+
+ /* Enable specific interrupt bits. */
+ cmd = VMCI_IMR_DATAGRAM;
+ if (capabilities & VMCI_CAPS_NOTIFICATIONS)
+ cmd |= VMCI_IMR_NOTIFICATION;
+ iowrite32(cmd, vmci_dev->iobase + VMCI_IMR_ADDR);
+
+ /* Enable interrupts. */
+ iowrite32(VMCI_CONTROL_INT_ENABLE,
+ vmci_dev->iobase + VMCI_CONTROL_ADDR);
+
+ pci_set_drvdata(pdev, vmci_dev);
+ return 0;
+
+err_free_irq:
+ free_irq(vmci_dev->irq, &vmci_dev);
+ tasklet_kill(&vmci_dev->datagram_tasklet);
+ tasklet_kill(&vmci_dev->bm_tasklet);
+
+err_disable_msi:
+ if (vmci_dev->intr_type == VMCI_INTR_TYPE_MSIX)
+ pci_disable_msix(pdev);
+ else if (vmci_dev->intr_type == VMCI_INTR_TYPE_MSI)
+ pci_disable_msi(pdev);
+
+ vmci_err = vmci_event_unsubscribe(ctx_update_sub_id);
+ if (vmci_err < VMCI_SUCCESS)
+ dev_warn(&pdev->dev,
+ "Failed to unsubscribe from event (type=%d) with subscriber (ID=0x%x): %d\n",
+ VMCI_EVENT_CTX_ID_UPDATE, ctx_update_sub_id, vmci_err);
+
+err_remove_bitmap:
+ if (vmci_dev->notification_bitmap) {
+ iowrite32(VMCI_CONTROL_RESET,
+ vmci_dev->iobase + VMCI_CONTROL_ADDR);
+ vfree(vmci_dev->notification_bitmap);
+ }
+
+err_remove_vmci_dev_g:
+ spin_lock_irq(&vmci_dev_spinlock);
+ vmci_dev_g = NULL;
+ spin_unlock_irq(&vmci_dev_spinlock);
+
+err_free_data_buffer:
+ vfree(vmci_dev->data_buffer);
+
+ /* The rest are managed resources and will be freed by PCI core */
+ return error;
+}
+
+static void vmci_guest_remove_device(struct pci_dev *pdev)
+{
+ struct vmci_guest_device *vmci_dev = pci_get_drvdata(pdev);
+ int vmci_err;
+
+ dev_dbg(&pdev->dev, "Removing device\n");
+
+ atomic_dec(&vmci_num_guest_devices);
+
+ vmci_qp_guest_endpoints_exit();
+
+ vmci_err = vmci_event_unsubscribe(ctx_update_sub_id);
+ if (vmci_err < VMCI_SUCCESS)
+ dev_warn(&pdev->dev,
+ "Failed to unsubscribe from event (type=%d) with subscriber (ID=0x%x): %d\n",
+ VMCI_EVENT_CTX_ID_UPDATE, ctx_update_sub_id, vmci_err);
+
+ spin_lock_irq(&vmci_dev_spinlock);
+ vmci_dev_g = NULL;
+ spin_unlock_irq(&vmci_dev_spinlock);
+
+ dev_dbg(&pdev->dev, "Resetting vmci device\n");
+ iowrite32(VMCI_CONTROL_RESET, vmci_dev->iobase + VMCI_CONTROL_ADDR);
+
+ /*
+ * Free IRQ and then disable MSI/MSI-X as appropriate. For
+ * MSI-X, we might have multiple vectors, each with their own
+ * IRQ, which we must free too.
+ */
+ free_irq(vmci_dev->irq, vmci_dev);
+ if (vmci_dev->intr_type == VMCI_INTR_TYPE_MSIX) {
+ if (vmci_dev->exclusive_vectors)
+ free_irq(vmci_dev->msix_entries[1].vector, vmci_dev);
+ pci_disable_msix(pdev);
+ } else if (vmci_dev->intr_type == VMCI_INTR_TYPE_MSI) {
+ pci_disable_msi(pdev);
+ }
+
+ tasklet_kill(&vmci_dev->datagram_tasklet);
+ tasklet_kill(&vmci_dev->bm_tasklet);
+
+ if (vmci_dev->notification_bitmap) {
+ /*
+ * The device reset above cleared the bitmap state of the
+ * device, so we can safely free it here.
+ */
+
+ vfree(vmci_dev->notification_bitmap);
+ }
+
+ vfree(vmci_dev->data_buffer);
+
+ /* The rest are managed resources and will be freed by PCI core */
+}
+
+static DEFINE_PCI_DEVICE_TABLE(vmci_ids) = {
+ { PCI_DEVICE(PCI_VENDOR_ID_VMWARE, PCI_DEVICE_ID_VMWARE_VMCI), },
+ { 0 },
+};
+MODULE_DEVICE_TABLE(pci, vmci_ids);
+
+static struct pci_driver vmci_guest_driver = {
+ .name = KBUILD_MODNAME,
+ .id_table = vmci_ids,
+ .probe = vmci_guest_probe_device,
+ .remove = vmci_guest_remove_device,
+};
+
+int __init vmci_guest_init(void)
+{
+ return pci_register_driver(&vmci_guest_driver);
+}
+
+void __exit vmci_guest_exit(void)
+{
+ pci_unregister_driver(&vmci_guest_driver);
+}
diff --git a/drivers/misc/vmw_vmci/vmci_handle_array.c b/drivers/misc/vmw_vmci/vmci_handle_array.c
new file mode 100644
index 000000000000..344973a0fb0a
--- /dev/null
+++ b/drivers/misc/vmw_vmci/vmci_handle_array.c
@@ -0,0 +1,142 @@
+/*
+ * VMware VMCI Driver
+ *
+ * Copyright (C) 2012 VMware, Inc. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation version 2 and no later version.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
+ * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * for more details.
+ */
+
+#include <linux/slab.h>
+#include "vmci_handle_array.h"
+
+static size_t handle_arr_calc_size(size_t capacity)
+{
+ return sizeof(struct vmci_handle_arr) +
+ capacity * sizeof(struct vmci_handle);
+}
+
+struct vmci_handle_arr *vmci_handle_arr_create(size_t capacity)
+{
+ struct vmci_handle_arr *array;
+
+ if (capacity == 0)
+ capacity = VMCI_HANDLE_ARRAY_DEFAULT_SIZE;
+
+ array = kmalloc(handle_arr_calc_size(capacity), GFP_ATOMIC);
+ if (!array)
+ return NULL;
+
+ array->capacity = capacity;
+ array->size = 0;
+
+ return array;
+}
+
+void vmci_handle_arr_destroy(struct vmci_handle_arr *array)
+{
+ kfree(array);
+}
+
+void vmci_handle_arr_append_entry(struct vmci_handle_arr **array_ptr,
+ struct vmci_handle handle)
+{
+ struct vmci_handle_arr *array = *array_ptr;
+
+ if (unlikely(array->size >= array->capacity)) {
+ /* reallocate. */
+ struct vmci_handle_arr *new_array;
+ size_t new_capacity = array->capacity * VMCI_ARR_CAP_MULT;
+ size_t new_size = handle_arr_calc_size(new_capacity);
+
+ new_array = krealloc(array, new_size, GFP_ATOMIC);
+ if (!new_array)
+ return;
+
+ new_array->capacity = new_capacity;
+ *array_ptr = array = new_array;
+ }
+
+ array->entries[array->size] = handle;
+ array->size++;
+}
+
+/*
+ * Handle that was removed, VMCI_INVALID_HANDLE if entry not found.
+ */
+struct vmci_handle vmci_handle_arr_remove_entry(struct vmci_handle_arr *array,
+ struct vmci_handle entry_handle)
+{
+ struct vmci_handle handle = VMCI_INVALID_HANDLE;
+ size_t i;
+
+ for (i = 0; i < array->size; i++) {
+ if (vmci_handle_is_equal(array->entries[i], entry_handle)) {
+ handle = array->entries[i];
+ array->size--;
+ array->entries[i] = array->entries[array->size];
+ array->entries[array->size] = VMCI_INVALID_HANDLE;
+ break;
+ }
+ }
+
+ return handle;
+}
+
+/*
+ * Handle that was removed, VMCI_INVALID_HANDLE if array was empty.
+ */
+struct vmci_handle vmci_handle_arr_remove_tail(struct vmci_handle_arr *array)
+{
+ struct vmci_handle handle = VMCI_INVALID_HANDLE;
+
+ if (array->size) {
+ array->size--;
+ handle = array->entries[array->size];
+ array->entries[array->size] = VMCI_INVALID_HANDLE;
+ }
+
+ return handle;
+}
+
+/*
+ * Handle at given index, VMCI_INVALID_HANDLE if invalid index.
+ */
+struct vmci_handle
+vmci_handle_arr_get_entry(const struct vmci_handle_arr *array, size_t index)
+{
+ if (unlikely(index >= array->size))
+ return VMCI_INVALID_HANDLE;
+
+ return array->entries[index];
+}
+
+bool vmci_handle_arr_has_entry(const struct vmci_handle_arr *array,
+ struct vmci_handle entry_handle)
+{
+ size_t i;
+
+ for (i = 0; i < array->size; i++)
+ if (vmci_handle_is_equal(array->entries[i], entry_handle))
+ return true;
+
+ return false;
+}
+
+/*
+ * NULL if the array is empty. Otherwise, a pointer to the array
+ * of VMCI handles in the handle array.
+ */
+struct vmci_handle *vmci_handle_arr_get_handles(struct vmci_handle_arr *array)
+{
+ if (array->size)
+ return array->entries;
+
+ return NULL;
+}
diff --git a/drivers/misc/vmw_vmci/vmci_handle_array.h b/drivers/misc/vmw_vmci/vmci_handle_array.h
new file mode 100644
index 000000000000..b5f3a7f98cf1
--- /dev/null
+++ b/drivers/misc/vmw_vmci/vmci_handle_array.h
@@ -0,0 +1,52 @@
+/*
+ * VMware VMCI Driver
+ *
+ * Copyright (C) 2012 VMware, Inc. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation version 2 and no later version.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
+ * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * for more details.
+ */
+
+#ifndef _VMCI_HANDLE_ARRAY_H_
+#define _VMCI_HANDLE_ARRAY_H_
+
+#include <linux/vmw_vmci_defs.h>
+#include <linux/types.h>
+
+#define VMCI_HANDLE_ARRAY_DEFAULT_SIZE 4
+#define VMCI_ARR_CAP_MULT 2 /* Array capacity multiplier */
+
+struct vmci_handle_arr {
+ size_t capacity;
+ size_t size;
+ struct vmci_handle entries[];
+};
+
+struct vmci_handle_arr *vmci_handle_arr_create(size_t capacity);
+void vmci_handle_arr_destroy(struct vmci_handle_arr *array);
+void vmci_handle_arr_append_entry(struct vmci_handle_arr **array_ptr,
+ struct vmci_handle handle);
+struct vmci_handle vmci_handle_arr_remove_entry(struct vmci_handle_arr *array,
+ struct vmci_handle
+ entry_handle);
+struct vmci_handle vmci_handle_arr_remove_tail(struct vmci_handle_arr *array);
+struct vmci_handle
+vmci_handle_arr_get_entry(const struct vmci_handle_arr *array, size_t index);
+bool vmci_handle_arr_has_entry(const struct vmci_handle_arr *array,
+ struct vmci_handle entry_handle);
+struct vmci_handle *vmci_handle_arr_get_handles(struct vmci_handle_arr *array);
+
+static inline size_t vmci_handle_arr_get_size(
+ const struct vmci_handle_arr *array)
+{
+ return array->size;
+}
+
+
+#endif /* _VMCI_HANDLE_ARRAY_H_ */
diff --git a/drivers/misc/vmw_vmci/vmci_host.c b/drivers/misc/vmw_vmci/vmci_host.c
new file mode 100644
index 000000000000..d4722b3dc8ec
--- /dev/null
+++ b/drivers/misc/vmw_vmci/vmci_host.c
@@ -0,0 +1,1043 @@
+/*
+ * VMware VMCI Driver
+ *
+ * Copyright (C) 2012 VMware, Inc. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation version 2 and no later version.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
+ * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * for more details.
+ */
+
+#include <linux/vmw_vmci_defs.h>
+#include <linux/vmw_vmci_api.h>
+#include <linux/moduleparam.h>
+#include <linux/miscdevice.h>
+#include <linux/interrupt.h>
+#include <linux/highmem.h>
+#include <linux/atomic.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/mutex.h>
+#include <linux/sched.h>
+#include <linux/slab.h>
+#include <linux/file.h>
+#include <linux/init.h>
+#include <linux/poll.h>
+#include <linux/pci.h>
+#include <linux/smp.h>
+#include <linux/fs.h>
+#include <linux/io.h>
+
+#include "vmci_handle_array.h"
+#include "vmci_queue_pair.h"
+#include "vmci_datagram.h"
+#include "vmci_doorbell.h"
+#include "vmci_resource.h"
+#include "vmci_context.h"
+#include "vmci_driver.h"
+#include "vmci_event.h"
+
+#define VMCI_UTIL_NUM_RESOURCES 1
+
+enum {
+ VMCI_NOTIFY_RESOURCE_QUEUE_PAIR = 0,
+ VMCI_NOTIFY_RESOURCE_DOOR_BELL = 1,
+};
+
+enum {
+ VMCI_NOTIFY_RESOURCE_ACTION_NOTIFY = 0,
+ VMCI_NOTIFY_RESOURCE_ACTION_CREATE = 1,
+ VMCI_NOTIFY_RESOURCE_ACTION_DESTROY = 2,
+};
+
+/*
+ * VMCI driver initialization. This block can also be used to
+ * pass initial group membership etc.
+ */
+struct vmci_init_blk {
+ u32 cid;
+ u32 flags;
+};
+
+/* VMCIqueue_pairAllocInfo_VMToVM */
+struct vmci_qp_alloc_info_vmvm {
+ struct vmci_handle handle;
+ u32 peer;
+ u32 flags;
+ u64 produce_size;
+ u64 consume_size;
+ u64 produce_page_file; /* User VA. */
+ u64 consume_page_file; /* User VA. */
+ u64 produce_page_file_size; /* Size of the file name array. */
+ u64 consume_page_file_size; /* Size of the file name array. */
+ s32 result;
+ u32 _pad;
+};
+
+/* VMCISetNotifyInfo: Used to pass notify flag's address to the host driver. */
+struct vmci_set_notify_info {
+ u64 notify_uva;
+ s32 result;
+ u32 _pad;
+};
+
+/*
+ * Per-instance host state
+ */
+struct vmci_host_dev {
+ struct vmci_ctx *context;
+ int user_version;
+ enum vmci_obj_type ct_type;
+ struct mutex lock; /* Mutex lock for vmci context access */
+};
+
+static struct vmci_ctx *host_context;
+static bool vmci_host_device_initialized;
+static atomic_t vmci_host_active_users = ATOMIC_INIT(0);
+
+/*
+ * Determines whether the VMCI host personality is
+ * available. Since the core functionality of the host driver is
+ * always present, all guests could possibly use the host
+ * personality. However, to minimize the deviation from the
+ * pre-unified driver state of affairs, we only consider the host
+ * device active if there is no active guest device or if there
+ * are VMX'en with active VMCI contexts using the host device.
+ */
+bool vmci_host_code_active(void)
+{
+ return vmci_host_device_initialized &&
+ (!vmci_guest_code_active() ||
+ atomic_read(&vmci_host_active_users) > 0);
+}
+
+/*
+ * Called on open of /dev/vmci.
+ */
+static int vmci_host_open(struct inode *inode, struct file *filp)
+{
+ struct vmci_host_dev *vmci_host_dev;
+
+ vmci_host_dev = kzalloc(sizeof(struct vmci_host_dev), GFP_KERNEL);
+ if (vmci_host_dev == NULL)
+ return -ENOMEM;
+
+ vmci_host_dev->ct_type = VMCIOBJ_NOT_SET;
+ mutex_init(&vmci_host_dev->lock);
+ filp->private_data = vmci_host_dev;
+
+ return 0;
+}
+
+/*
+ * Called on close of /dev/vmci, most often when the process
+ * exits.
+ */
+static int vmci_host_close(struct inode *inode, struct file *filp)
+{
+ struct vmci_host_dev *vmci_host_dev = filp->private_data;
+
+ if (vmci_host_dev->ct_type == VMCIOBJ_CONTEXT) {
+ vmci_ctx_destroy(vmci_host_dev->context);
+ vmci_host_dev->context = NULL;
+
+ /*
+ * The number of active contexts is used to track whether any
+ * VMX'en are using the host personality. It is incremented when
+ * a context is created through the IOCTL_VMCI_INIT_CONTEXT
+ * ioctl.
+ */
+ atomic_dec(&vmci_host_active_users);
+ }
+ vmci_host_dev->ct_type = VMCIOBJ_NOT_SET;
+
+ kfree(vmci_host_dev);
+ filp->private_data = NULL;
+ return 0;
+}
+
+/*
+ * This is used to wake up the VMX when a VMCI call arrives, or
+ * to wake up select() or poll() at the next clock tick.
+ */
+static unsigned int vmci_host_poll(struct file *filp, poll_table *wait)
+{
+ struct vmci_host_dev *vmci_host_dev = filp->private_data;
+ struct vmci_ctx *context = vmci_host_dev->context;
+ unsigned int mask = 0;
+
+ if (vmci_host_dev->ct_type == VMCIOBJ_CONTEXT) {
+ /* Check for VMCI calls to this VM context. */
+ if (wait)
+ poll_wait(filp, &context->host_context.wait_queue,
+ wait);
+
+ spin_lock(&context->lock);
+ if (context->pending_datagrams > 0 ||
+ vmci_handle_arr_get_size(
+ context->pending_doorbell_array) > 0) {
+ mask = POLLIN;
+ }
+ spin_unlock(&context->lock);
+ }
+ return mask;
+}
+
+/*
+ * Copies the handles of a handle array into a user buffer, and
+ * returns the new length in userBufferSize. If the copy to the
+ * user buffer fails, the functions still returns VMCI_SUCCESS,
+ * but retval != 0.
+ */
+static int drv_cp_harray_to_user(void __user *user_buf_uva,
+ u64 *user_buf_size,
+ struct vmci_handle_arr *handle_array,
+ int *retval)
+{
+ u32 array_size = 0;
+ struct vmci_handle *handles;
+
+ if (handle_array)
+ array_size = vmci_handle_arr_get_size(handle_array);
+
+ if (array_size * sizeof(*handles) > *user_buf_size)
+ return VMCI_ERROR_MORE_DATA;
+
+ *user_buf_size = array_size * sizeof(*handles);
+ if (*user_buf_size)
+ *retval = copy_to_user(user_buf_uva,
+ vmci_handle_arr_get_handles
+ (handle_array), *user_buf_size);
+
+ return VMCI_SUCCESS;
+}
+
+/*
+ * Sets up a given context for notify to work. Calls drv_map_bool_ptr()
+ * which maps the notify boolean in user VA in kernel space.
+ */
+static int vmci_host_setup_notify(struct vmci_ctx *context,
+ unsigned long uva)
+{
+ struct page *page;
+ int retval;
+
+ if (context->notify_page) {
+ pr_devel("%s: Notify mechanism is already set up\n", __func__);
+ return VMCI_ERROR_DUPLICATE_ENTRY;
+ }
+
+ /*
+ * We are using 'bool' internally, but let's make sure we explicit
+ * about the size.
+ */
+ BUILD_BUG_ON(sizeof(bool) != sizeof(u8));
+ if (!access_ok(VERIFY_WRITE, (void __user *)uva, sizeof(u8)))
+ return VMCI_ERROR_GENERIC;
+
+ /*
+ * Lock physical page backing a given user VA.
+ */
+ down_read(&current->mm->mmap_sem);
+ retval = get_user_pages(current, current->mm,
+ PAGE_ALIGN(uva),
+ 1, 1, 0, &page, NULL);
+ up_read(&current->mm->mmap_sem);
+ if (retval != 1)
+ return VMCI_ERROR_GENERIC;
+
+ /*
+ * Map the locked page and set up notify pointer.
+ */
+ context->notify = kmap(page) + (uva & (PAGE_SIZE - 1));
+ vmci_ctx_check_signal_notify(context);
+
+ return VMCI_SUCCESS;
+}
+
+static int vmci_host_get_version(struct vmci_host_dev *vmci_host_dev,
+ unsigned int cmd, void __user *uptr)
+{
+ if (cmd == IOCTL_VMCI_VERSION2) {
+ int __user *vptr = uptr;
+ if (get_user(vmci_host_dev->user_version, vptr))
+ return -EFAULT;
+ }
+
+ /*
+ * The basic logic here is:
+ *
+ * If the user sends in a version of 0 tell it our version.
+ * If the user didn't send in a version, tell it our version.
+ * If the user sent in an old version, tell it -its- version.
+ * If the user sent in an newer version, tell it our version.
+ *
+ * The rationale behind telling the caller its version is that
+ * Workstation 6.5 required that VMX and VMCI kernel module were
+ * version sync'd. All new VMX users will be programmed to
+ * handle the VMCI kernel module version.
+ */
+
+ if (vmci_host_dev->user_version > 0 &&
+ vmci_host_dev->user_version < VMCI_VERSION_HOSTQP) {
+ return vmci_host_dev->user_version;
+ }
+
+ return VMCI_VERSION;
+}
+
+#define vmci_ioctl_err(fmt, ...) \
+ pr_devel("%s: " fmt, ioctl_name, ##__VA_ARGS__)
+
+static int vmci_host_do_init_context(struct vmci_host_dev *vmci_host_dev,
+ const char *ioctl_name,
+ void __user *uptr)
+{
+ struct vmci_init_blk init_block;
+ const struct cred *cred;
+ int retval;
+
+ if (copy_from_user(&init_block, uptr, sizeof(init_block))) {
+ vmci_ioctl_err("error reading init block\n");
+ return -EFAULT;
+ }
+
+ mutex_lock(&vmci_host_dev->lock);
+
+ if (vmci_host_dev->ct_type != VMCIOBJ_NOT_SET) {
+ vmci_ioctl_err("received VMCI init on initialized handle\n");
+ retval = -EINVAL;
+ goto out;
+ }
+
+ if (init_block.flags & ~VMCI_PRIVILEGE_FLAG_RESTRICTED) {
+ vmci_ioctl_err("unsupported VMCI restriction flag\n");
+ retval = -EINVAL;
+ goto out;
+ }
+
+ cred = get_current_cred();
+ vmci_host_dev->context = vmci_ctx_create(init_block.cid,
+ init_block.flags, 0,
+ vmci_host_dev->user_version,
+ cred);
+ put_cred(cred);
+ if (IS_ERR(vmci_host_dev->context)) {
+ retval = PTR_ERR(vmci_host_dev->context);
+ vmci_ioctl_err("error initializing context\n");
+ goto out;
+ }
+
+ /*
+ * Copy cid to userlevel, we do this to allow the VMX
+ * to enforce its policy on cid generation.
+ */
+ init_block.cid = vmci_ctx_get_id(vmci_host_dev->context);
+ if (copy_to_user(uptr, &init_block, sizeof(init_block))) {
+ vmci_ctx_destroy(vmci_host_dev->context);
+ vmci_host_dev->context = NULL;
+ vmci_ioctl_err("error writing init block\n");
+ retval = -EFAULT;
+ goto out;
+ }
+
+ vmci_host_dev->ct_type = VMCIOBJ_CONTEXT;
+ atomic_inc(&vmci_host_active_users);
+
+ retval = 0;
+
+out:
+ mutex_unlock(&vmci_host_dev->lock);
+ return retval;
+}
+
+static int vmci_host_do_send_datagram(struct vmci_host_dev *vmci_host_dev,
+ const char *ioctl_name,
+ void __user *uptr)
+{
+ struct vmci_datagram_snd_rcv_info send_info;
+ struct vmci_datagram *dg = NULL;
+ u32 cid;
+
+ if (vmci_host_dev->ct_type != VMCIOBJ_CONTEXT) {
+ vmci_ioctl_err("only valid for contexts\n");
+ return -EINVAL;
+ }
+
+ if (copy_from_user(&send_info, uptr, sizeof(send_info)))
+ return -EFAULT;
+
+ if (send_info.len > VMCI_MAX_DG_SIZE) {
+ vmci_ioctl_err("datagram is too big (size=%d)\n",
+ send_info.len);
+ return -EINVAL;
+ }
+
+ if (send_info.len < sizeof(*dg)) {
+ vmci_ioctl_err("datagram is too small (size=%d)\n",
+ send_info.len);
+ return -EINVAL;
+ }
+
+ dg = kmalloc(send_info.len, GFP_KERNEL);
+ if (!dg) {
+ vmci_ioctl_err(
+ "cannot allocate memory to dispatch datagram\n");
+ return -ENOMEM;
+ }
+
+ if (copy_from_user(dg, (void __user *)(uintptr_t)send_info.addr,
+ send_info.len)) {
+ vmci_ioctl_err("error getting datagram\n");
+ kfree(dg);
+ return -EFAULT;
+ }
+
+ pr_devel("Datagram dst (handle=0x%x:0x%x) src (handle=0x%x:0x%x), payload (size=%llu bytes)\n",
+ dg->dst.context, dg->dst.resource,
+ dg->src.context, dg->src.resource,
+ (unsigned long long)dg->payload_size);
+
+ /* Get source context id. */
+ cid = vmci_ctx_get_id(vmci_host_dev->context);
+ send_info.result = vmci_datagram_dispatch(cid, dg, true);
+ kfree(dg);
+
+ return copy_to_user(uptr, &send_info, sizeof(send_info)) ? -EFAULT : 0;
+}
+
+static int vmci_host_do_receive_datagram(struct vmci_host_dev *vmci_host_dev,
+ const char *ioctl_name,
+ void __user *uptr)
+{
+ struct vmci_datagram_snd_rcv_info recv_info;
+ struct vmci_datagram *dg = NULL;
+ int retval;
+ size_t size;
+
+ if (vmci_host_dev->ct_type != VMCIOBJ_CONTEXT) {
+ vmci_ioctl_err("only valid for contexts\n");
+ return -EINVAL;
+ }
+
+ if (copy_from_user(&recv_info, uptr, sizeof(recv_info)))
+ return -EFAULT;
+
+ size = recv_info.len;
+ recv_info.result = vmci_ctx_dequeue_datagram(vmci_host_dev->context,
+ &size, &dg);
+
+ if (recv_info.result >= VMCI_SUCCESS) {
+ void __user *ubuf = (void __user *)(uintptr_t)recv_info.addr;
+ retval = copy_to_user(ubuf, dg, VMCI_DG_SIZE(dg));
+ kfree(dg);
+ if (retval != 0)
+ return -EFAULT;
+ }
+
+ return copy_to_user(uptr, &recv_info, sizeof(recv_info)) ? -EFAULT : 0;
+}
+
+static int vmci_host_do_alloc_queuepair(struct vmci_host_dev *vmci_host_dev,
+ const char *ioctl_name,
+ void __user *uptr)
+{
+ struct vmci_handle handle;
+ int vmci_status;
+ int __user *retptr;
+ u32 cid;
+
+ if (vmci_host_dev->ct_type != VMCIOBJ_CONTEXT) {
+ vmci_ioctl_err("only valid for contexts\n");
+ return -EINVAL;
+ }
+
+ cid = vmci_ctx_get_id(vmci_host_dev->context);
+
+ if (vmci_host_dev->user_version < VMCI_VERSION_NOVMVM) {
+ struct vmci_qp_alloc_info_vmvm alloc_info;
+ struct vmci_qp_alloc_info_vmvm __user *info = uptr;
+
+ if (copy_from_user(&alloc_info, uptr, sizeof(alloc_info)))
+ return -EFAULT;
+
+ handle = alloc_info.handle;
+ retptr = &info->result;
+
+ vmci_status = vmci_qp_broker_alloc(alloc_info.handle,
+ alloc_info.peer,
+ alloc_info.flags,
+ VMCI_NO_PRIVILEGE_FLAGS,
+ alloc_info.produce_size,
+ alloc_info.consume_size,
+ NULL,
+ vmci_host_dev->context);
+
+ if (vmci_status == VMCI_SUCCESS)
+ vmci_status = VMCI_SUCCESS_QUEUEPAIR_CREATE;
+ } else {
+ struct vmci_qp_alloc_info alloc_info;
+ struct vmci_qp_alloc_info __user *info = uptr;
+ struct vmci_qp_page_store page_store;
+
+ if (copy_from_user(&alloc_info, uptr, sizeof(alloc_info)))
+ return -EFAULT;
+
+ handle = alloc_info.handle;
+ retptr = &info->result;
+
+ page_store.pages = alloc_info.ppn_va;
+ page_store.len = alloc_info.num_ppns;
+
+ vmci_status = vmci_qp_broker_alloc(alloc_info.handle,
+ alloc_info.peer,
+ alloc_info.flags,
+ VMCI_NO_PRIVILEGE_FLAGS,
+ alloc_info.produce_size,
+ alloc_info.consume_size,
+ &page_store,
+ vmci_host_dev->context);
+ }
+
+ if (put_user(vmci_status, retptr)) {
+ if (vmci_status >= VMCI_SUCCESS) {
+ vmci_status = vmci_qp_broker_detach(handle,
+ vmci_host_dev->context);
+ }
+ return -EFAULT;
+ }
+
+ return 0;
+}
+
+static int vmci_host_do_queuepair_setva(struct vmci_host_dev *vmci_host_dev,
+ const char *ioctl_name,
+ void __user *uptr)
+{
+ struct vmci_qp_set_va_info set_va_info;
+ struct vmci_qp_set_va_info __user *info = uptr;
+ s32 result;
+
+ if (vmci_host_dev->ct_type != VMCIOBJ_CONTEXT) {
+ vmci_ioctl_err("only valid for contexts\n");
+ return -EINVAL;
+ }
+
+ if (vmci_host_dev->user_version < VMCI_VERSION_NOVMVM) {
+ vmci_ioctl_err("is not allowed\n");
+ return -EINVAL;
+ }
+
+ if (copy_from_user(&set_va_info, uptr, sizeof(set_va_info)))
+ return -EFAULT;
+
+ if (set_va_info.va) {
+ /*
+ * VMX is passing down a new VA for the queue
+ * pair mapping.
+ */
+ result = vmci_qp_broker_map(set_va_info.handle,
+ vmci_host_dev->context,
+ set_va_info.va);
+ } else {
+ /*
+ * The queue pair is about to be unmapped by
+ * the VMX.
+ */
+ result = vmci_qp_broker_unmap(set_va_info.handle,
+ vmci_host_dev->context, 0);
+ }
+
+ return put_user(result, &info->result) ? -EFAULT : 0;
+}
+
+static int vmci_host_do_queuepair_setpf(struct vmci_host_dev *vmci_host_dev,
+ const char *ioctl_name,
+ void __user *uptr)
+{
+ struct vmci_qp_page_file_info page_file_info;
+ struct vmci_qp_page_file_info __user *info = uptr;
+ s32 result;
+
+ if (vmci_host_dev->user_version < VMCI_VERSION_HOSTQP ||
+ vmci_host_dev->user_version >= VMCI_VERSION_NOVMVM) {
+ vmci_ioctl_err("not supported on this VMX (version=%d)\n",
+ vmci_host_dev->user_version);
+ return -EINVAL;
+ }
+
+ if (vmci_host_dev->ct_type != VMCIOBJ_CONTEXT) {
+ vmci_ioctl_err("only valid for contexts\n");
+ return -EINVAL;
+ }
+
+ if (copy_from_user(&page_file_info, uptr, sizeof(*info)))
+ return -EFAULT;
+
+ /*
+ * Communicate success pre-emptively to the caller. Note that the
+ * basic premise is that it is incumbent upon the caller not to look at
+ * the info.result field until after the ioctl() returns. And then,
+ * only if the ioctl() result indicates no error. We send up the
+ * SUCCESS status before calling SetPageStore() store because failing
+ * to copy up the result code means unwinding the SetPageStore().
+ *
+ * It turns out the logic to unwind a SetPageStore() opens a can of
+ * worms. For example, if a host had created the queue_pair and a
+ * guest attaches and SetPageStore() is successful but writing success
+ * fails, then ... the host has to be stopped from writing (anymore)
+ * data into the queue_pair. That means an additional test in the
+ * VMCI_Enqueue() code path. Ugh.
+ */
+
+ if (put_user(VMCI_SUCCESS, &info->result)) {
+ /*
+ * In this case, we can't write a result field of the
+ * caller's info block. So, we don't even try to
+ * SetPageStore().
+ */
+ return -EFAULT;
+ }
+
+ result = vmci_qp_broker_set_page_store(page_file_info.handle,
+ page_file_info.produce_va,
+ page_file_info.consume_va,
+ vmci_host_dev->context);
+ if (result < VMCI_SUCCESS) {
+ if (put_user(result, &info->result)) {
+ /*
+ * Note that in this case the SetPageStore()
+ * call failed but we were unable to
+ * communicate that to the caller (because the
+ * copy_to_user() call failed). So, if we
+ * simply return an error (in this case
+ * -EFAULT) then the caller will know that the
+ * SetPageStore failed even though we couldn't
+ * put the result code in the result field and
+ * indicate exactly why it failed.
+ *
+ * That says nothing about the issue where we
+ * were once able to write to the caller's info
+ * memory and now can't. Something more
+ * serious is probably going on than the fact
+ * that SetPageStore() didn't work.
+ */
+ return -EFAULT;
+ }
+ }
+
+ return 0;
+}
+
+static int vmci_host_do_qp_detach(struct vmci_host_dev *vmci_host_dev,
+ const char *ioctl_name,
+ void __user *uptr)
+{
+ struct vmci_qp_dtch_info detach_info;
+ struct vmci_qp_dtch_info __user *info = uptr;
+ s32 result;
+
+ if (vmci_host_dev->ct_type != VMCIOBJ_CONTEXT) {
+ vmci_ioctl_err("only valid for contexts\n");
+ return -EINVAL;
+ }
+
+ if (copy_from_user(&detach_info, uptr, sizeof(detach_info)))
+ return -EFAULT;
+
+ result = vmci_qp_broker_detach(detach_info.handle,
+ vmci_host_dev->context);
+ if (result == VMCI_SUCCESS &&
+ vmci_host_dev->user_version < VMCI_VERSION_NOVMVM) {
+ result = VMCI_SUCCESS_LAST_DETACH;
+ }
+
+ return put_user(result, &info->result) ? -EFAULT : 0;
+}
+
+static int vmci_host_do_ctx_add_notify(struct vmci_host_dev *vmci_host_dev,
+ const char *ioctl_name,
+ void __user *uptr)
+{
+ struct vmci_ctx_info ar_info;
+ struct vmci_ctx_info __user *info = uptr;
+ s32 result;
+ u32 cid;
+
+ if (vmci_host_dev->ct_type != VMCIOBJ_CONTEXT) {
+ vmci_ioctl_err("only valid for contexts\n");
+ return -EINVAL;
+ }
+
+ if (copy_from_user(&ar_info, uptr, sizeof(ar_info)))
+ return -EFAULT;
+
+ cid = vmci_ctx_get_id(vmci_host_dev->context);
+ result = vmci_ctx_add_notification(cid, ar_info.remote_cid);
+
+ return put_user(result, &info->result) ? -EFAULT : 0;
+}
+
+static int vmci_host_do_ctx_remove_notify(struct vmci_host_dev *vmci_host_dev,
+ const char *ioctl_name,
+ void __user *uptr)
+{
+ struct vmci_ctx_info ar_info;
+ struct vmci_ctx_info __user *info = uptr;
+ u32 cid;
+ int result;
+
+ if (vmci_host_dev->ct_type != VMCIOBJ_CONTEXT) {
+ vmci_ioctl_err("only valid for contexts\n");
+ return -EINVAL;
+ }
+
+ if (copy_from_user(&ar_info, uptr, sizeof(ar_info)))
+ return -EFAULT;
+
+ cid = vmci_ctx_get_id(vmci_host_dev->context);
+ result = vmci_ctx_remove_notification(cid,
+ ar_info.remote_cid);
+
+ return put_user(result, &info->result) ? -EFAULT : 0;
+}
+
+static int vmci_host_do_ctx_get_cpt_state(struct vmci_host_dev *vmci_host_dev,
+ const char *ioctl_name,
+ void __user *uptr)
+{
+ struct vmci_ctx_chkpt_buf_info get_info;
+ u32 cid;
+ void *cpt_buf;
+ int retval;
+
+ if (vmci_host_dev->ct_type != VMCIOBJ_CONTEXT) {
+ vmci_ioctl_err("only valid for contexts\n");
+ return -EINVAL;
+ }
+
+ if (copy_from_user(&get_info, uptr, sizeof(get_info)))
+ return -EFAULT;
+
+ cid = vmci_ctx_get_id(vmci_host_dev->context);
+ get_info.result = vmci_ctx_get_chkpt_state(cid, get_info.cpt_type,
+ &get_info.buf_size, &cpt_buf);
+ if (get_info.result == VMCI_SUCCESS && get_info.buf_size) {
+ void __user *ubuf = (void __user *)(uintptr_t)get_info.cpt_buf;
+ retval = copy_to_user(ubuf, cpt_buf, get_info.buf_size);
+ kfree(cpt_buf);
+
+ if (retval)
+ return -EFAULT;
+ }
+
+ return copy_to_user(uptr, &get_info, sizeof(get_info)) ? -EFAULT : 0;
+}
+
+static int vmci_host_do_ctx_set_cpt_state(struct vmci_host_dev *vmci_host_dev,
+ const char *ioctl_name,
+ void __user *uptr)
+{
+ struct vmci_ctx_chkpt_buf_info set_info;
+ u32 cid;
+ void *cpt_buf;
+ int retval;
+
+ if (vmci_host_dev->ct_type != VMCIOBJ_CONTEXT) {
+ vmci_ioctl_err("only valid for contexts\n");
+ return -EINVAL;
+ }
+
+ if (copy_from_user(&set_info, uptr, sizeof(set_info)))
+ return -EFAULT;
+
+ cpt_buf = kmalloc(set_info.buf_size, GFP_KERNEL);
+ if (!cpt_buf) {
+ vmci_ioctl_err(
+ "cannot allocate memory to set cpt state (type=%d)\n",
+ set_info.cpt_type);
+ return -ENOMEM;
+ }
+
+ if (copy_from_user(cpt_buf, (void __user *)(uintptr_t)set_info.cpt_buf,
+ set_info.buf_size)) {
+ retval = -EFAULT;
+ goto out;
+ }
+
+ cid = vmci_ctx_get_id(vmci_host_dev->context);
+ set_info.result = vmci_ctx_set_chkpt_state(cid, set_info.cpt_type,
+ set_info.buf_size, cpt_buf);
+
+ retval = copy_to_user(uptr, &set_info, sizeof(set_info)) ? -EFAULT : 0;
+
+out:
+ kfree(cpt_buf);
+ return retval;
+}
+
+static int vmci_host_do_get_context_id(struct vmci_host_dev *vmci_host_dev,
+ const char *ioctl_name,
+ void __user *uptr)
+{
+ u32 __user *u32ptr = uptr;
+
+ return put_user(VMCI_HOST_CONTEXT_ID, u32ptr) ? -EFAULT : 0;
+}
+
+static int vmci_host_do_set_notify(struct vmci_host_dev *vmci_host_dev,
+ const char *ioctl_name,
+ void __user *uptr)
+{
+ struct vmci_set_notify_info notify_info;
+
+ if (vmci_host_dev->ct_type != VMCIOBJ_CONTEXT) {
+ vmci_ioctl_err("only valid for contexts\n");
+ return -EINVAL;
+ }
+
+ if (copy_from_user(&notify_info, uptr, sizeof(notify_info)))
+ return -EFAULT;
+
+ if (notify_info.notify_uva) {
+ notify_info.result =
+ vmci_host_setup_notify(vmci_host_dev->context,
+ notify_info.notify_uva);
+ } else {
+ vmci_ctx_unset_notify(vmci_host_dev->context);
+ notify_info.result = VMCI_SUCCESS;
+ }
+
+ return copy_to_user(uptr, &notify_info, sizeof(notify_info)) ?
+ -EFAULT : 0;
+}
+
+static int vmci_host_do_notify_resource(struct vmci_host_dev *vmci_host_dev,
+ const char *ioctl_name,
+ void __user *uptr)
+{
+ struct vmci_dbell_notify_resource_info info;
+ u32 cid;
+
+ if (vmci_host_dev->user_version < VMCI_VERSION_NOTIFY) {
+ vmci_ioctl_err("invalid for current VMX versions\n");
+ return -EINVAL;
+ }
+
+ if (vmci_host_dev->ct_type != VMCIOBJ_CONTEXT) {
+ vmci_ioctl_err("only valid for contexts\n");
+ return -EINVAL;
+ }
+
+ if (copy_from_user(&info, uptr, sizeof(info)))
+ return -EFAULT;
+
+ cid = vmci_ctx_get_id(vmci_host_dev->context);
+
+ switch (info.action) {
+ case VMCI_NOTIFY_RESOURCE_ACTION_NOTIFY:
+ if (info.resource == VMCI_NOTIFY_RESOURCE_DOOR_BELL) {
+ u32 flags = VMCI_NO_PRIVILEGE_FLAGS;
+ info.result = vmci_ctx_notify_dbell(cid, info.handle,
+ flags);
+ } else {
+ info.result = VMCI_ERROR_UNAVAILABLE;
+ }
+ break;
+
+ case VMCI_NOTIFY_RESOURCE_ACTION_CREATE:
+ info.result = vmci_ctx_dbell_create(cid, info.handle);
+ break;
+
+ case VMCI_NOTIFY_RESOURCE_ACTION_DESTROY:
+ info.result = vmci_ctx_dbell_destroy(cid, info.handle);
+ break;
+
+ default:
+ vmci_ioctl_err("got unknown action (action=%d)\n",
+ info.action);
+ info.result = VMCI_ERROR_INVALID_ARGS;
+ }
+
+ return copy_to_user(uptr, &info, sizeof(info)) ? -EFAULT : 0;
+}
+
+static int vmci_host_do_recv_notifications(struct vmci_host_dev *vmci_host_dev,
+ const char *ioctl_name,
+ void __user *uptr)
+{
+ struct vmci_ctx_notify_recv_info info;
+ struct vmci_handle_arr *db_handle_array;
+ struct vmci_handle_arr *qp_handle_array;
+ void __user *ubuf;
+ u32 cid;
+ int retval = 0;
+
+ if (vmci_host_dev->ct_type != VMCIOBJ_CONTEXT) {
+ vmci_ioctl_err("only valid for contexts\n");
+ return -EINVAL;
+ }
+
+ if (vmci_host_dev->user_version < VMCI_VERSION_NOTIFY) {
+ vmci_ioctl_err("not supported for the current vmx version\n");
+ return -EINVAL;
+ }
+
+ if (copy_from_user(&info, uptr, sizeof(info)))
+ return -EFAULT;
+
+ if ((info.db_handle_buf_size && !info.db_handle_buf_uva) ||
+ (info.qp_handle_buf_size && !info.qp_handle_buf_uva)) {
+ return -EINVAL;
+ }
+
+ cid = vmci_ctx_get_id(vmci_host_dev->context);
+
+ info.result = vmci_ctx_rcv_notifications_get(cid,
+ &db_handle_array, &qp_handle_array);
+ if (info.result != VMCI_SUCCESS)
+ return copy_to_user(uptr, &info, sizeof(info)) ? -EFAULT : 0;
+
+ ubuf = (void __user *)(uintptr_t)info.db_handle_buf_uva;
+ info.result = drv_cp_harray_to_user(ubuf, &info.db_handle_buf_size,
+ db_handle_array, &retval);
+ if (info.result == VMCI_SUCCESS && !retval) {
+ ubuf = (void __user *)(uintptr_t)info.qp_handle_buf_uva;
+ info.result = drv_cp_harray_to_user(ubuf,
+ &info.qp_handle_buf_size,
+ qp_handle_array, &retval);
+ }
+
+ if (!retval && copy_to_user(uptr, &info, sizeof(info)))
+ retval = -EFAULT;
+
+ vmci_ctx_rcv_notifications_release(cid,
+ db_handle_array, qp_handle_array,
+ info.result == VMCI_SUCCESS && !retval);
+
+ return retval;
+}
+
+static long vmci_host_unlocked_ioctl(struct file *filp,
+ unsigned int iocmd, unsigned long ioarg)
+{
+#define VMCI_DO_IOCTL(ioctl_name, ioctl_fn) do { \
+ char *name = __stringify(IOCTL_VMCI_ ## ioctl_name); \
+ return vmci_host_do_ ## ioctl_fn( \
+ vmci_host_dev, name, uptr); \
+ } while (0)
+
+ struct vmci_host_dev *vmci_host_dev = filp->private_data;
+ void __user *uptr = (void __user *)ioarg;
+
+ switch (iocmd) {
+ case IOCTL_VMCI_INIT_CONTEXT:
+ VMCI_DO_IOCTL(INIT_CONTEXT, init_context);
+ case IOCTL_VMCI_DATAGRAM_SEND:
+ VMCI_DO_IOCTL(DATAGRAM_SEND, send_datagram);
+ case IOCTL_VMCI_DATAGRAM_RECEIVE:
+ VMCI_DO_IOCTL(DATAGRAM_RECEIVE, receive_datagram);
+ case IOCTL_VMCI_QUEUEPAIR_ALLOC:
+ VMCI_DO_IOCTL(QUEUEPAIR_ALLOC, alloc_queuepair);
+ case IOCTL_VMCI_QUEUEPAIR_SETVA:
+ VMCI_DO_IOCTL(QUEUEPAIR_SETVA, queuepair_setva);
+ case IOCTL_VMCI_QUEUEPAIR_SETPAGEFILE:
+ VMCI_DO_IOCTL(QUEUEPAIR_SETPAGEFILE, queuepair_setpf);
+ case IOCTL_VMCI_QUEUEPAIR_DETACH:
+ VMCI_DO_IOCTL(QUEUEPAIR_DETACH, qp_detach);
+ case IOCTL_VMCI_CTX_ADD_NOTIFICATION:
+ VMCI_DO_IOCTL(CTX_ADD_NOTIFICATION, ctx_add_notify);
+ case IOCTL_VMCI_CTX_REMOVE_NOTIFICATION:
+ VMCI_DO_IOCTL(CTX_REMOVE_NOTIFICATION, ctx_remove_notify);
+ case IOCTL_VMCI_CTX_GET_CPT_STATE:
+ VMCI_DO_IOCTL(CTX_GET_CPT_STATE, ctx_get_cpt_state);
+ case IOCTL_VMCI_CTX_SET_CPT_STATE:
+ VMCI_DO_IOCTL(CTX_SET_CPT_STATE, ctx_set_cpt_state);
+ case IOCTL_VMCI_GET_CONTEXT_ID:
+ VMCI_DO_IOCTL(GET_CONTEXT_ID, get_context_id);
+ case IOCTL_VMCI_SET_NOTIFY:
+ VMCI_DO_IOCTL(SET_NOTIFY, set_notify);
+ case IOCTL_VMCI_NOTIFY_RESOURCE:
+ VMCI_DO_IOCTL(NOTIFY_RESOURCE, notify_resource);
+ case IOCTL_VMCI_NOTIFICATIONS_RECEIVE:
+ VMCI_DO_IOCTL(NOTIFICATIONS_RECEIVE, recv_notifications);
+
+ case IOCTL_VMCI_VERSION:
+ case IOCTL_VMCI_VERSION2:
+ return vmci_host_get_version(vmci_host_dev, iocmd, uptr);
+
+ default:
+ pr_devel("%s: Unknown ioctl (iocmd=%d)\n", __func__, iocmd);
+ return -EINVAL;
+ }
+
+#undef VMCI_DO_IOCTL
+}
+
+static const struct file_operations vmuser_fops = {
+ .owner = THIS_MODULE,
+ .open = vmci_host_open,
+ .release = vmci_host_close,
+ .poll = vmci_host_poll,
+ .unlocked_ioctl = vmci_host_unlocked_ioctl,
+ .compat_ioctl = vmci_host_unlocked_ioctl,
+};
+
+static struct miscdevice vmci_host_miscdev = {
+ .name = "vmci",
+ .minor = MISC_DYNAMIC_MINOR,
+ .fops = &vmuser_fops,
+};
+
+int __init vmci_host_init(void)
+{
+ int error;
+
+ host_context = vmci_ctx_create(VMCI_HOST_CONTEXT_ID,
+ VMCI_DEFAULT_PROC_PRIVILEGE_FLAGS,
+ -1, VMCI_VERSION, NULL);
+ if (IS_ERR(host_context)) {
+ error = PTR_ERR(host_context);
+ pr_warn("Failed to initialize VMCIContext (error%d)\n",
+ error);
+ return error;
+ }
+
+ error = misc_register(&vmci_host_miscdev);
+ if (error) {
+ pr_warn("Module registration error (name=%s, major=%d, minor=%d, err=%d)\n",
+ vmci_host_miscdev.name,
+ MISC_MAJOR, vmci_host_miscdev.minor,
+ error);
+ pr_warn("Unable to initialize host personality\n");
+ vmci_ctx_destroy(host_context);
+ return error;
+ }
+
+ pr_info("VMCI host device registered (name=%s, major=%d, minor=%d)\n",
+ vmci_host_miscdev.name, MISC_MAJOR, vmci_host_miscdev.minor);
+
+ vmci_host_device_initialized = true;
+ return 0;
+}
+
+void __exit vmci_host_exit(void)
+{
+ int error;
+
+ vmci_host_device_initialized = false;
+
+ error = misc_deregister(&vmci_host_miscdev);
+ if (error)
+ pr_warn("Error unregistering character device: %d\n", error);
+
+ vmci_ctx_destroy(host_context);
+ vmci_qp_broker_exit();
+
+ pr_debug("VMCI host driver module unloaded\n");
+}
diff --git a/drivers/misc/vmw_vmci/vmci_queue_pair.c b/drivers/misc/vmw_vmci/vmci_queue_pair.c
new file mode 100644
index 000000000000..d94245dbd765
--- /dev/null
+++ b/drivers/misc/vmw_vmci/vmci_queue_pair.c
@@ -0,0 +1,3425 @@
+/*
+ * VMware VMCI Driver
+ *
+ * Copyright (C) 2012 VMware, Inc. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation version 2 and no later version.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
+ * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * for more details.
+ */
+
+#include <linux/vmw_vmci_defs.h>
+#include <linux/vmw_vmci_api.h>
+#include <linux/highmem.h>
+#include <linux/kernel.h>
+#include <linux/mm.h>
+#include <linux/module.h>
+#include <linux/mutex.h>
+#include <linux/pagemap.h>
+#include <linux/sched.h>
+#include <linux/slab.h>
+#include <linux/socket.h>
+#include <linux/wait.h>
+#include <linux/vmalloc.h>
+
+#include "vmci_handle_array.h"
+#include "vmci_queue_pair.h"
+#include "vmci_datagram.h"
+#include "vmci_resource.h"
+#include "vmci_context.h"
+#include "vmci_driver.h"
+#include "vmci_event.h"
+#include "vmci_route.h"
+
+/*
+ * In the following, we will distinguish between two kinds of VMX processes -
+ * the ones with versions lower than VMCI_VERSION_NOVMVM that use specialized
+ * VMCI page files in the VMX and supporting VM to VM communication and the
+ * newer ones that use the guest memory directly. We will in the following
+ * refer to the older VMX versions as old-style VMX'en, and the newer ones as
+ * new-style VMX'en.
+ *
+ * The state transition datagram is as follows (the VMCIQPB_ prefix has been
+ * removed for readability) - see below for more details on the transtions:
+ *
+ * -------------- NEW -------------
+ * | |
+ * \_/ \_/
+ * CREATED_NO_MEM <-----------------> CREATED_MEM
+ * | | |
+ * | o-----------------------o |
+ * | | |
+ * \_/ \_/ \_/
+ * ATTACHED_NO_MEM <----------------> ATTACHED_MEM
+ * | | |
+ * | o----------------------o |
+ * | | |
+ * \_/ \_/ \_/
+ * SHUTDOWN_NO_MEM <----------------> SHUTDOWN_MEM
+ * | |
+ * | |
+ * -------------> gone <-------------
+ *
+ * In more detail. When a VMCI queue pair is first created, it will be in the
+ * VMCIQPB_NEW state. It will then move into one of the following states:
+ *
+ * - VMCIQPB_CREATED_NO_MEM: this state indicates that either:
+ *
+ * - the created was performed by a host endpoint, in which case there is
+ * no backing memory yet.
+ *
+ * - the create was initiated by an old-style VMX, that uses
+ * vmci_qp_broker_set_page_store to specify the UVAs of the queue pair at
+ * a later point in time. This state can be distinguished from the one
+ * above by the context ID of the creator. A host side is not allowed to
+ * attach until the page store has been set.
+ *
+ * - VMCIQPB_CREATED_MEM: this state is the result when the queue pair
+ * is created by a VMX using the queue pair device backend that
+ * sets the UVAs of the queue pair immediately and stores the
+ * information for later attachers. At this point, it is ready for
+ * the host side to attach to it.
+ *
+ * Once the queue pair is in one of the created states (with the exception of
+ * the case mentioned for older VMX'en above), it is possible to attach to the
+ * queue pair. Again we have two new states possible:
+ *
+ * - VMCIQPB_ATTACHED_MEM: this state can be reached through the following
+ * paths:
+ *
+ * - from VMCIQPB_CREATED_NO_MEM when a new-style VMX allocates a queue
+ * pair, and attaches to a queue pair previously created by the host side.
+ *
+ * - from VMCIQPB_CREATED_MEM when the host side attaches to a queue pair
+ * already created by a guest.
+ *
+ * - from VMCIQPB_ATTACHED_NO_MEM, when an old-style VMX calls
+ * vmci_qp_broker_set_page_store (see below).
+ *
+ * - VMCIQPB_ATTACHED_NO_MEM: If the queue pair already was in the
+ * VMCIQPB_CREATED_NO_MEM due to a host side create, an old-style VMX will
+ * bring the queue pair into this state. Once vmci_qp_broker_set_page_store
+ * is called to register the user memory, the VMCIQPB_ATTACH_MEM state
+ * will be entered.
+ *
+ * From the attached queue pair, the queue pair can enter the shutdown states
+ * when either side of the queue pair detaches. If the guest side detaches
+ * first, the queue pair will enter the VMCIQPB_SHUTDOWN_NO_MEM state, where
+ * the content of the queue pair will no longer be available. If the host
+ * side detaches first, the queue pair will either enter the
+ * VMCIQPB_SHUTDOWN_MEM, if the guest memory is currently mapped, or
+ * VMCIQPB_SHUTDOWN_NO_MEM, if the guest memory is not mapped
+ * (e.g., the host detaches while a guest is stunned).
+ *
+ * New-style VMX'en will also unmap guest memory, if the guest is
+ * quiesced, e.g., during a snapshot operation. In that case, the guest
+ * memory will no longer be available, and the queue pair will transition from
+ * *_MEM state to a *_NO_MEM state. The VMX may later map the memory once more,
+ * in which case the queue pair will transition from the *_NO_MEM state at that
+ * point back to the *_MEM state. Note that the *_NO_MEM state may have changed,
+ * since the peer may have either attached or detached in the meantime. The
+ * values are laid out such that ++ on a state will move from a *_NO_MEM to a
+ * *_MEM state, and vice versa.
+ */
+
+/*
+ * VMCIMemcpy{To,From}QueueFunc() prototypes. Functions of these
+ * types are passed around to enqueue and dequeue routines. Note that
+ * often the functions passed are simply wrappers around memcpy
+ * itself.
+ *
+ * Note: In order for the memcpy typedefs to be compatible with the VMKernel,
+ * there's an unused last parameter for the hosted side. In
+ * ESX, that parameter holds a buffer type.
+ */
+typedef int vmci_memcpy_to_queue_func(struct vmci_queue *queue,
+ u64 queue_offset, const void *src,
+ size_t src_offset, size_t size);
+typedef int vmci_memcpy_from_queue_func(void *dest, size_t dest_offset,
+ const struct vmci_queue *queue,
+ u64 queue_offset, size_t size);
+
+/* The Kernel specific component of the struct vmci_queue structure. */
+struct vmci_queue_kern_if {
+ struct page **page;
+ struct page **header_page;
+ void *va;
+ struct mutex __mutex; /* Protects the queue. */
+ struct mutex *mutex; /* Shared by producer and consumer queues. */
+ bool host;
+ size_t num_pages;
+ bool mapped;
+};
+
+/*
+ * This structure is opaque to the clients.
+ */
+struct vmci_qp {
+ struct vmci_handle handle;
+ struct vmci_queue *produce_q;
+ struct vmci_queue *consume_q;
+ u64 produce_q_size;
+ u64 consume_q_size;
+ u32 peer;
+ u32 flags;
+ u32 priv_flags;
+ bool guest_endpoint;
+ unsigned int blocked;
+ unsigned int generation;
+ wait_queue_head_t event;
+};
+
+enum qp_broker_state {
+ VMCIQPB_NEW,
+ VMCIQPB_CREATED_NO_MEM,
+ VMCIQPB_CREATED_MEM,
+ VMCIQPB_ATTACHED_NO_MEM,
+ VMCIQPB_ATTACHED_MEM,
+ VMCIQPB_SHUTDOWN_NO_MEM,
+ VMCIQPB_SHUTDOWN_MEM,
+ VMCIQPB_GONE
+};
+
+#define QPBROKERSTATE_HAS_MEM(_qpb) (_qpb->state == VMCIQPB_CREATED_MEM || \
+ _qpb->state == VMCIQPB_ATTACHED_MEM || \
+ _qpb->state == VMCIQPB_SHUTDOWN_MEM)
+
+/*
+ * In the queue pair broker, we always use the guest point of view for
+ * the produce and consume queue values and references, e.g., the
+ * produce queue size stored is the guests produce queue size. The
+ * host endpoint will need to swap these around. The only exception is
+ * the local queue pairs on the host, in which case the host endpoint
+ * that creates the queue pair will have the right orientation, and
+ * the attaching host endpoint will need to swap.
+ */
+struct qp_entry {
+ struct list_head list_item;
+ struct vmci_handle handle;
+ u32 peer;
+ u32 flags;
+ u64 produce_size;
+ u64 consume_size;
+ u32 ref_count;
+};
+
+struct qp_broker_entry {
+ struct vmci_resource resource;
+ struct qp_entry qp;
+ u32 create_id;
+ u32 attach_id;
+ enum qp_broker_state state;
+ bool require_trusted_attach;
+ bool created_by_trusted;
+ bool vmci_page_files; /* Created by VMX using VMCI page files */
+ struct vmci_queue *produce_q;
+ struct vmci_queue *consume_q;
+ struct vmci_queue_header saved_produce_q;
+ struct vmci_queue_header saved_consume_q;
+ vmci_event_release_cb wakeup_cb;
+ void *client_data;
+ void *local_mem; /* Kernel memory for local queue pair */
+};
+
+struct qp_guest_endpoint {
+ struct vmci_resource resource;
+ struct qp_entry qp;
+ u64 num_ppns;
+ void *produce_q;
+ void *consume_q;
+ struct ppn_set ppn_set;
+};
+
+struct qp_list {
+ struct list_head head;
+ struct mutex mutex; /* Protect queue list. */
+};
+
+static struct qp_list qp_broker_list = {
+ .head = LIST_HEAD_INIT(qp_broker_list.head),
+ .mutex = __MUTEX_INITIALIZER(qp_broker_list.mutex),
+};
+
+static struct qp_list qp_guest_endpoints = {
+ .head = LIST_HEAD_INIT(qp_guest_endpoints.head),
+ .mutex = __MUTEX_INITIALIZER(qp_guest_endpoints.mutex),
+};
+
+#define INVALID_VMCI_GUEST_MEM_ID 0
+#define QPE_NUM_PAGES(_QPE) ((u32) \
+ (DIV_ROUND_UP(_QPE.produce_size, PAGE_SIZE) + \
+ DIV_ROUND_UP(_QPE.consume_size, PAGE_SIZE) + 2))
+
+
+/*
+ * Frees kernel VA space for a given queue and its queue header, and
+ * frees physical data pages.
+ */
+static void qp_free_queue(void *q, u64 size)
+{
+ struct vmci_queue *queue = q;
+
+ if (queue) {
+ u64 i = DIV_ROUND_UP(size, PAGE_SIZE);
+
+ if (queue->kernel_if->mapped) {
+ vunmap(queue->kernel_if->va);
+ queue->kernel_if->va = NULL;
+ }
+
+ while (i)
+ __free_page(queue->kernel_if->page[--i]);
+
+ vfree(queue->q_header);
+ }
+}
+
+/*
+ * Allocates kernel VA space of specified size, plus space for the
+ * queue structure/kernel interface and the queue header. Allocates
+ * physical pages for the queue data pages.
+ *
+ * PAGE m: struct vmci_queue_header (struct vmci_queue->q_header)
+ * PAGE m+1: struct vmci_queue
+ * PAGE m+1+q: struct vmci_queue_kern_if (struct vmci_queue->kernel_if)
+ * PAGE n-size: Data pages (struct vmci_queue->kernel_if->page[])
+ */
+static void *qp_alloc_queue(u64 size, u32 flags)
+{
+ u64 i;
+ struct vmci_queue *queue;
+ struct vmci_queue_header *q_header;
+ const u64 num_data_pages = DIV_ROUND_UP(size, PAGE_SIZE);
+ const uint queue_size =
+ PAGE_SIZE +
+ sizeof(*queue) + sizeof(*(queue->kernel_if)) +
+ num_data_pages * sizeof(*(queue->kernel_if->page));
+
+ q_header = vmalloc(queue_size);
+ if (!q_header)
+ return NULL;
+
+ queue = (void *)q_header + PAGE_SIZE;
+ queue->q_header = q_header;
+ queue->saved_header = NULL;
+ queue->kernel_if = (struct vmci_queue_kern_if *)(queue + 1);
+ queue->kernel_if->header_page = NULL; /* Unused in guest. */
+ queue->kernel_if->page = (struct page **)(queue->kernel_if + 1);
+ queue->kernel_if->host = false;
+ queue->kernel_if->va = NULL;
+ queue->kernel_if->mapped = false;
+
+ for (i = 0; i < num_data_pages; i++) {
+ queue->kernel_if->page[i] = alloc_pages(GFP_KERNEL, 0);
+ if (!queue->kernel_if->page[i])
+ goto fail;
+ }
+
+ if (vmci_qp_pinned(flags)) {
+ queue->kernel_if->va =
+ vmap(queue->kernel_if->page, num_data_pages, VM_MAP,
+ PAGE_KERNEL);
+ if (!queue->kernel_if->va)
+ goto fail;
+
+ queue->kernel_if->mapped = true;
+ }
+
+ return (void *)queue;
+
+ fail:
+ qp_free_queue(queue, i * PAGE_SIZE);
+ return NULL;
+}
+
+/*
+ * Copies from a given buffer or iovector to a VMCI Queue. Uses
+ * kmap()/kunmap() to dynamically map/unmap required portions of the queue
+ * by traversing the offset -> page translation structure for the queue.
+ * Assumes that offset + size does not wrap around in the queue.
+ */
+static int __qp_memcpy_to_queue(struct vmci_queue *queue,
+ u64 queue_offset,
+ const void *src,
+ size_t size,
+ bool is_iovec)
+{
+ struct vmci_queue_kern_if *kernel_if = queue->kernel_if;
+ size_t bytes_copied = 0;
+
+ while (bytes_copied < size) {
+ u64 page_index = (queue_offset + bytes_copied) / PAGE_SIZE;
+ size_t page_offset =
+ (queue_offset + bytes_copied) & (PAGE_SIZE - 1);
+ void *va;
+ size_t to_copy;
+
+ if (!kernel_if->mapped)
+ va = kmap(kernel_if->page[page_index]);
+ else
+ va = (void *)((u8 *)kernel_if->va +
+ (page_index * PAGE_SIZE));
+
+ if (size - bytes_copied > PAGE_SIZE - page_offset)
+ /* Enough payload to fill up from this page. */
+ to_copy = PAGE_SIZE - page_offset;
+ else
+ to_copy = size - bytes_copied;
+
+ if (is_iovec) {
+ struct iovec *iov = (struct iovec *)src;
+ int err;
+
+ /* The iovec will track bytes_copied internally. */
+ err = memcpy_fromiovec((u8 *)va + page_offset,
+ iov, to_copy);
+ if (err != 0) {
+ kunmap(kernel_if->page[page_index]);
+ return VMCI_ERROR_INVALID_ARGS;
+ }
+ } else {
+ memcpy((u8 *)va + page_offset,
+ (u8 *)src + bytes_copied, to_copy);
+ }
+
+ bytes_copied += to_copy;
+ if (!kernel_if->mapped)
+ kunmap(kernel_if->page[page_index]);
+ }
+
+ return VMCI_SUCCESS;
+}
+
+/*
+ * Copies to a given buffer or iovector from a VMCI Queue. Uses
+ * kmap()/kunmap() to dynamically map/unmap required portions of the queue
+ * by traversing the offset -> page translation structure for the queue.
+ * Assumes that offset + size does not wrap around in the queue.
+ */
+static int __qp_memcpy_from_queue(void *dest,
+ const struct vmci_queue *queue,
+ u64 queue_offset,
+ size_t size,
+ bool is_iovec)
+{
+ struct vmci_queue_kern_if *kernel_if = queue->kernel_if;
+ size_t bytes_copied = 0;
+
+ while (bytes_copied < size) {
+ u64 page_index = (queue_offset + bytes_copied) / PAGE_SIZE;
+ size_t page_offset =
+ (queue_offset + bytes_copied) & (PAGE_SIZE - 1);
+ void *va;
+ size_t to_copy;
+
+ if (!kernel_if->mapped)
+ va = kmap(kernel_if->page[page_index]);
+ else
+ va = (void *)((u8 *)kernel_if->va +
+ (page_index * PAGE_SIZE));
+
+ if (size - bytes_copied > PAGE_SIZE - page_offset)
+ /* Enough payload to fill up this page. */
+ to_copy = PAGE_SIZE - page_offset;
+ else
+ to_copy = size - bytes_copied;
+
+ if (is_iovec) {
+ struct iovec *iov = (struct iovec *)dest;
+ int err;
+
+ /* The iovec will track bytes_copied internally. */
+ err = memcpy_toiovec(iov, (u8 *)va + page_offset,
+ to_copy);
+ if (err != 0) {
+ kunmap(kernel_if->page[page_index]);
+ return VMCI_ERROR_INVALID_ARGS;
+ }
+ } else {
+ memcpy((u8 *)dest + bytes_copied,
+ (u8 *)va + page_offset, to_copy);
+ }
+
+ bytes_copied += to_copy;
+ if (!kernel_if->mapped)
+ kunmap(kernel_if->page[page_index]);
+ }
+
+ return VMCI_SUCCESS;
+}
+
+/*
+ * Allocates two list of PPNs --- one for the pages in the produce queue,
+ * and the other for the pages in the consume queue. Intializes the list
+ * of PPNs with the page frame numbers of the KVA for the two queues (and
+ * the queue headers).
+ */
+static int qp_alloc_ppn_set(void *prod_q,
+ u64 num_produce_pages,
+ void *cons_q,
+ u64 num_consume_pages, struct ppn_set *ppn_set)
+{
+ u32 *produce_ppns;
+ u32 *consume_ppns;
+ struct vmci_queue *produce_q = prod_q;
+ struct vmci_queue *consume_q = cons_q;
+ u64 i;
+
+ if (!produce_q || !num_produce_pages || !consume_q ||
+ !num_consume_pages || !ppn_set)
+ return VMCI_ERROR_INVALID_ARGS;
+
+ if (ppn_set->initialized)
+ return VMCI_ERROR_ALREADY_EXISTS;
+
+ produce_ppns =
+ kmalloc(num_produce_pages * sizeof(*produce_ppns), GFP_KERNEL);
+ if (!produce_ppns)
+ return VMCI_ERROR_NO_MEM;
+
+ consume_ppns =
+ kmalloc(num_consume_pages * sizeof(*consume_ppns), GFP_KERNEL);
+ if (!consume_ppns) {
+ kfree(produce_ppns);
+ return VMCI_ERROR_NO_MEM;
+ }
+
+ produce_ppns[0] = page_to_pfn(vmalloc_to_page(produce_q->q_header));
+ for (i = 1; i < num_produce_pages; i++) {
+ unsigned long pfn;
+
+ produce_ppns[i] =
+ page_to_pfn(produce_q->kernel_if->page[i - 1]);
+ pfn = produce_ppns[i];
+
+ /* Fail allocation if PFN isn't supported by hypervisor. */
+ if (sizeof(pfn) > sizeof(*produce_ppns)
+ && pfn != produce_ppns[i])
+ goto ppn_error;
+ }
+
+ consume_ppns[0] = page_to_pfn(vmalloc_to_page(consume_q->q_header));
+ for (i = 1; i < num_consume_pages; i++) {
+ unsigned long pfn;
+
+ consume_ppns[i] =
+ page_to_pfn(consume_q->kernel_if->page[i - 1]);
+ pfn = consume_ppns[i];
+
+ /* Fail allocation if PFN isn't supported by hypervisor. */
+ if (sizeof(pfn) > sizeof(*consume_ppns)
+ && pfn != consume_ppns[i])
+ goto ppn_error;
+ }
+
+ ppn_set->num_produce_pages = num_produce_pages;
+ ppn_set->num_consume_pages = num_consume_pages;
+ ppn_set->produce_ppns = produce_ppns;
+ ppn_set->consume_ppns = consume_ppns;
+ ppn_set->initialized = true;
+ return VMCI_SUCCESS;
+
+ ppn_error:
+ kfree(produce_ppns);
+ kfree(consume_ppns);
+ return VMCI_ERROR_INVALID_ARGS;
+}
+
+/*
+ * Frees the two list of PPNs for a queue pair.
+ */
+static void qp_free_ppn_set(struct ppn_set *ppn_set)
+{
+ if (ppn_set->initialized) {
+ /* Do not call these functions on NULL inputs. */
+ kfree(ppn_set->produce_ppns);
+ kfree(ppn_set->consume_ppns);
+ }
+ memset(ppn_set, 0, sizeof(*ppn_set));
+}
+
+/*
+ * Populates the list of PPNs in the hypercall structure with the PPNS
+ * of the produce queue and the consume queue.
+ */
+static int qp_populate_ppn_set(u8 *call_buf, const struct ppn_set *ppn_set)
+{
+ memcpy(call_buf, ppn_set->produce_ppns,
+ ppn_set->num_produce_pages * sizeof(*ppn_set->produce_ppns));
+ memcpy(call_buf +
+ ppn_set->num_produce_pages * sizeof(*ppn_set->produce_ppns),
+ ppn_set->consume_ppns,
+ ppn_set->num_consume_pages * sizeof(*ppn_set->consume_ppns));
+
+ return VMCI_SUCCESS;
+}
+
+static int qp_memcpy_to_queue(struct vmci_queue *queue,
+ u64 queue_offset,
+ const void *src, size_t src_offset, size_t size)
+{
+ return __qp_memcpy_to_queue(queue, queue_offset,
+ (u8 *)src + src_offset, size, false);
+}
+
+static int qp_memcpy_from_queue(void *dest,
+ size_t dest_offset,
+ const struct vmci_queue *queue,
+ u64 queue_offset, size_t size)
+{
+ return __qp_memcpy_from_queue((u8 *)dest + dest_offset,
+ queue, queue_offset, size, false);
+}
+
+/*
+ * Copies from a given iovec from a VMCI Queue.
+ */
+static int qp_memcpy_to_queue_iov(struct vmci_queue *queue,
+ u64 queue_offset,
+ const void *src,
+ size_t src_offset, size_t size)
+{
+
+ /*
+ * We ignore src_offset because src is really a struct iovec * and will
+ * maintain offset internally.
+ */
+ return __qp_memcpy_to_queue(queue, queue_offset, src, size, true);
+}
+
+/*
+ * Copies to a given iovec from a VMCI Queue.
+ */
+static int qp_memcpy_from_queue_iov(void *dest,
+ size_t dest_offset,
+ const struct vmci_queue *queue,
+ u64 queue_offset, size_t size)
+{
+ /*
+ * We ignore dest_offset because dest is really a struct iovec * and
+ * will maintain offset internally.
+ */
+ return __qp_memcpy_from_queue(dest, queue, queue_offset, size, true);
+}
+
+/*
+ * Allocates kernel VA space of specified size plus space for the queue
+ * and kernel interface. This is different from the guest queue allocator,
+ * because we do not allocate our own queue header/data pages here but
+ * share those of the guest.
+ */
+static struct vmci_queue *qp_host_alloc_queue(u64 size)
+{
+ struct vmci_queue *queue;
+ const size_t num_pages = DIV_ROUND_UP(size, PAGE_SIZE) + 1;
+ const size_t queue_size = sizeof(*queue) + sizeof(*(queue->kernel_if));
+ const size_t queue_page_size =
+ num_pages * sizeof(*queue->kernel_if->page);
+
+ queue = kzalloc(queue_size + queue_page_size, GFP_KERNEL);
+ if (queue) {
+ queue->q_header = NULL;
+ queue->saved_header = NULL;
+ queue->kernel_if =
+ (struct vmci_queue_kern_if *)((u8 *)queue +
+ sizeof(*queue));
+ queue->kernel_if->host = true;
+ queue->kernel_if->mutex = NULL;
+ queue->kernel_if->num_pages = num_pages;
+ queue->kernel_if->header_page =
+ (struct page **)((u8 *)queue + queue_size);
+ queue->kernel_if->page = &queue->kernel_if->header_page[1];
+ queue->kernel_if->va = NULL;
+ queue->kernel_if->mapped = false;
+ }
+
+ return queue;
+}
+
+/*
+ * Frees kernel memory for a given queue (header plus translation
+ * structure).
+ */
+static void qp_host_free_queue(struct vmci_queue *queue, u64 queue_size)
+{
+ kfree(queue);
+}
+
+/*
+ * Initialize the mutex for the pair of queues. This mutex is used to
+ * protect the q_header and the buffer from changing out from under any
+ * users of either queue. Of course, it's only any good if the mutexes
+ * are actually acquired. Queue structure must lie on non-paged memory
+ * or we cannot guarantee access to the mutex.
+ */
+static void qp_init_queue_mutex(struct vmci_queue *produce_q,
+ struct vmci_queue *consume_q)
+{
+ /*
+ * Only the host queue has shared state - the guest queues do not
+ * need to synchronize access using a queue mutex.
+ */
+
+ if (produce_q->kernel_if->host) {
+ produce_q->kernel_if->mutex = &produce_q->kernel_if->__mutex;
+ consume_q->kernel_if->mutex = &produce_q->kernel_if->__mutex;
+ mutex_init(produce_q->kernel_if->mutex);
+ }
+}
+
+/*
+ * Cleans up the mutex for the pair of queues.
+ */
+static void qp_cleanup_queue_mutex(struct vmci_queue *produce_q,
+ struct vmci_queue *consume_q)
+{
+ if (produce_q->kernel_if->host) {
+ produce_q->kernel_if->mutex = NULL;
+ consume_q->kernel_if->mutex = NULL;
+ }
+}
+
+/*
+ * Acquire the mutex for the queue. Note that the produce_q and
+ * the consume_q share a mutex. So, only one of the two need to
+ * be passed in to this routine. Either will work just fine.
+ */
+static void qp_acquire_queue_mutex(struct vmci_queue *queue)
+{
+ if (queue->kernel_if->host)
+ mutex_lock(queue->kernel_if->mutex);
+}
+
+/*
+ * Release the mutex for the queue. Note that the produce_q and
+ * the consume_q share a mutex. So, only one of the two need to
+ * be passed in to this routine. Either will work just fine.
+ */
+static void qp_release_queue_mutex(struct vmci_queue *queue)
+{
+ if (queue->kernel_if->host)
+ mutex_unlock(queue->kernel_if->mutex);
+}
+
+/*
+ * Helper function to release pages in the PageStoreAttachInfo
+ * previously obtained using get_user_pages.
+ */
+static void qp_release_pages(struct page **pages,
+ u64 num_pages, bool dirty)
+{
+ int i;
+
+ for (i = 0; i < num_pages; i++) {
+ if (dirty)
+ set_page_dirty(pages[i]);
+
+ page_cache_release(pages[i]);
+ pages[i] = NULL;
+ }
+}
+
+/*
+ * Lock the user pages referenced by the {produce,consume}Buffer
+ * struct into memory and populate the {produce,consume}Pages
+ * arrays in the attach structure with them.
+ */
+static int qp_host_get_user_memory(u64 produce_uva,
+ u64 consume_uva,
+ struct vmci_queue *produce_q,
+ struct vmci_queue *consume_q)
+{
+ int retval;
+ int err = VMCI_SUCCESS;
+
+ down_write(&current->mm->mmap_sem);
+ retval = get_user_pages(current,
+ current->mm,
+ (uintptr_t) produce_uva,
+ produce_q->kernel_if->num_pages,
+ 1, 0, produce_q->kernel_if->header_page, NULL);
+ if (retval < produce_q->kernel_if->num_pages) {
+ pr_warn("get_user_pages(produce) failed (retval=%d)", retval);
+ qp_release_pages(produce_q->kernel_if->header_page, retval,
+ false);
+ err = VMCI_ERROR_NO_MEM;
+ goto out;
+ }
+
+ retval = get_user_pages(current,
+ current->mm,
+ (uintptr_t) consume_uva,
+ consume_q->kernel_if->num_pages,
+ 1, 0, consume_q->kernel_if->header_page, NULL);
+ if (retval < consume_q->kernel_if->num_pages) {
+ pr_warn("get_user_pages(consume) failed (retval=%d)", retval);
+ qp_release_pages(consume_q->kernel_if->header_page, retval,
+ false);
+ qp_release_pages(produce_q->kernel_if->header_page,
+ produce_q->kernel_if->num_pages, false);
+ err = VMCI_ERROR_NO_MEM;
+ }
+
+ out:
+ up_write(&current->mm->mmap_sem);
+
+ return err;
+}
+
+/*
+ * Registers the specification of the user pages used for backing a queue
+ * pair. Enough information to map in pages is stored in the OS specific
+ * part of the struct vmci_queue structure.
+ */
+static int qp_host_register_user_memory(struct vmci_qp_page_store *page_store,
+ struct vmci_queue *produce_q,
+ struct vmci_queue *consume_q)
+{
+ u64 produce_uva;
+ u64 consume_uva;
+
+ /*
+ * The new style and the old style mapping only differs in
+ * that we either get a single or two UVAs, so we split the
+ * single UVA range at the appropriate spot.
+ */
+ produce_uva = page_store->pages;
+ consume_uva = page_store->pages +
+ produce_q->kernel_if->num_pages * PAGE_SIZE;
+ return qp_host_get_user_memory(produce_uva, consume_uva, produce_q,
+ consume_q);
+}
+
+/*
+ * Releases and removes the references to user pages stored in the attach
+ * struct. Pages are released from the page cache and may become
+ * swappable again.
+ */
+static void qp_host_unregister_user_memory(struct vmci_queue *produce_q,
+ struct vmci_queue *consume_q)
+{
+ qp_release_pages(produce_q->kernel_if->header_page,
+ produce_q->kernel_if->num_pages, true);
+ memset(produce_q->kernel_if->header_page, 0,
+ sizeof(*produce_q->kernel_if->header_page) *
+ produce_q->kernel_if->num_pages);
+ qp_release_pages(consume_q->kernel_if->header_page,
+ consume_q->kernel_if->num_pages, true);
+ memset(consume_q->kernel_if->header_page, 0,
+ sizeof(*consume_q->kernel_if->header_page) *
+ consume_q->kernel_if->num_pages);
+}
+
+/*
+ * Once qp_host_register_user_memory has been performed on a
+ * queue, the queue pair headers can be mapped into the
+ * kernel. Once mapped, they must be unmapped with
+ * qp_host_unmap_queues prior to calling
+ * qp_host_unregister_user_memory.
+ * Pages are pinned.
+ */
+static int qp_host_map_queues(struct vmci_queue *produce_q,
+ struct vmci_queue *consume_q)
+{
+ int result;
+
+ if (!produce_q->q_header || !consume_q->q_header) {
+ struct page *headers[2];
+
+ if (produce_q->q_header != consume_q->q_header)
+ return VMCI_ERROR_QUEUEPAIR_MISMATCH;
+
+ if (produce_q->kernel_if->header_page == NULL ||
+ *produce_q->kernel_if->header_page == NULL)
+ return VMCI_ERROR_UNAVAILABLE;
+
+ headers[0] = *produce_q->kernel_if->header_page;
+ headers[1] = *consume_q->kernel_if->header_page;
+
+ produce_q->q_header = vmap(headers, 2, VM_MAP, PAGE_KERNEL);
+ if (produce_q->q_header != NULL) {
+ consume_q->q_header =
+ (struct vmci_queue_header *)((u8 *)
+ produce_q->q_header +
+ PAGE_SIZE);
+ result = VMCI_SUCCESS;
+ } else {
+ pr_warn("vmap failed\n");
+ result = VMCI_ERROR_NO_MEM;
+ }
+ } else {
+ result = VMCI_SUCCESS;
+ }
+
+ return result;
+}
+
+/*
+ * Unmaps previously mapped queue pair headers from the kernel.
+ * Pages are unpinned.
+ */
+static int qp_host_unmap_queues(u32 gid,
+ struct vmci_queue *produce_q,
+ struct vmci_queue *consume_q)
+{
+ if (produce_q->q_header) {
+ if (produce_q->q_header < consume_q->q_header)
+ vunmap(produce_q->q_header);
+ else
+ vunmap(consume_q->q_header);
+
+ produce_q->q_header = NULL;
+ consume_q->q_header = NULL;
+ }
+
+ return VMCI_SUCCESS;
+}
+
+/*
+ * Finds the entry in the list corresponding to a given handle. Assumes
+ * that the list is locked.
+ */
+static struct qp_entry *qp_list_find(struct qp_list *qp_list,
+ struct vmci_handle handle)
+{
+ struct qp_entry *entry;
+
+ if (vmci_handle_is_invalid(handle))
+ return NULL;
+
+ list_for_each_entry(entry, &qp_list->head, list_item) {
+ if (vmci_handle_is_equal(entry->handle, handle))
+ return entry;
+ }
+
+ return NULL;
+}
+
+/*
+ * Finds the entry in the list corresponding to a given handle.
+ */
+static struct qp_guest_endpoint *
+qp_guest_handle_to_entry(struct vmci_handle handle)
+{
+ struct qp_guest_endpoint *entry;
+ struct qp_entry *qp = qp_list_find(&qp_guest_endpoints, handle);
+
+ entry = qp ? container_of(
+ qp, struct qp_guest_endpoint, qp) : NULL;
+ return entry;
+}
+
+/*
+ * Finds the entry in the list corresponding to a given handle.
+ */
+static struct qp_broker_entry *
+qp_broker_handle_to_entry(struct vmci_handle handle)
+{
+ struct qp_broker_entry *entry;
+ struct qp_entry *qp = qp_list_find(&qp_broker_list, handle);
+
+ entry = qp ? container_of(
+ qp, struct qp_broker_entry, qp) : NULL;
+ return entry;
+}
+
+/*
+ * Dispatches a queue pair event message directly into the local event
+ * queue.
+ */
+static int qp_notify_peer_local(bool attach, struct vmci_handle handle)
+{
+ u32 context_id = vmci_get_context_id();
+ struct vmci_event_qp ev;
+
+ ev.msg.hdr.dst = vmci_make_handle(context_id, VMCI_EVENT_HANDLER);
+ ev.msg.hdr.src = vmci_make_handle(VMCI_HYPERVISOR_CONTEXT_ID,
+ VMCI_CONTEXT_RESOURCE_ID);
+ ev.msg.hdr.payload_size = sizeof(ev) - sizeof(ev.msg.hdr);
+ ev.msg.event_data.event =
+ attach ? VMCI_EVENT_QP_PEER_ATTACH : VMCI_EVENT_QP_PEER_DETACH;
+ ev.payload.peer_id = context_id;
+ ev.payload.handle = handle;
+
+ return vmci_event_dispatch(&ev.msg.hdr);
+}
+
+/*
+ * Allocates and initializes a qp_guest_endpoint structure.
+ * Allocates a queue_pair rid (and handle) iff the given entry has
+ * an invalid handle. 0 through VMCI_RESERVED_RESOURCE_ID_MAX
+ * are reserved handles. Assumes that the QP list mutex is held
+ * by the caller.
+ */
+static struct qp_guest_endpoint *
+qp_guest_endpoint_create(struct vmci_handle handle,
+ u32 peer,
+ u32 flags,
+ u64 produce_size,
+ u64 consume_size,
+ void *produce_q,
+ void *consume_q)
+{
+ int result;
+ struct qp_guest_endpoint *entry;
+ /* One page each for the queue headers. */
+ const u64 num_ppns = DIV_ROUND_UP(produce_size, PAGE_SIZE) +
+ DIV_ROUND_UP(consume_size, PAGE_SIZE) + 2;
+
+ if (vmci_handle_is_invalid(handle)) {
+ u32 context_id = vmci_get_context_id();
+
+ handle = vmci_make_handle(context_id, VMCI_INVALID_ID);
+ }
+
+ entry = kzalloc(sizeof(*entry), GFP_KERNEL);
+ if (entry) {
+ entry->qp.peer = peer;
+ entry->qp.flags = flags;
+ entry->qp.produce_size = produce_size;
+ entry->qp.consume_size = consume_size;
+ entry->qp.ref_count = 0;
+ entry->num_ppns = num_ppns;
+ entry->produce_q = produce_q;
+ entry->consume_q = consume_q;
+ INIT_LIST_HEAD(&entry->qp.list_item);
+
+ /* Add resource obj */
+ result = vmci_resource_add(&entry->resource,
+ VMCI_RESOURCE_TYPE_QPAIR_GUEST,
+ handle);
+ entry->qp.handle = vmci_resource_handle(&entry->resource);
+ if ((result != VMCI_SUCCESS) ||
+ qp_list_find(&qp_guest_endpoints, entry->qp.handle)) {
+ pr_warn("Failed to add new resource (handle=0x%x:0x%x), error: %d",
+ handle.context, handle.resource, result);
+ kfree(entry);
+ entry = NULL;
+ }
+ }
+ return entry;
+}
+
+/*
+ * Frees a qp_guest_endpoint structure.
+ */
+static void qp_guest_endpoint_destroy(struct qp_guest_endpoint *entry)
+{
+ qp_free_ppn_set(&entry->ppn_set);
+ qp_cleanup_queue_mutex(entry->produce_q, entry->consume_q);
+ qp_free_queue(entry->produce_q, entry->qp.produce_size);
+ qp_free_queue(entry->consume_q, entry->qp.consume_size);
+ /* Unlink from resource hash table and free callback */
+ vmci_resource_remove(&entry->resource);
+
+ kfree(entry);
+}
+
+/*
+ * Helper to make a queue_pairAlloc hypercall when the driver is
+ * supporting a guest device.
+ */
+static int qp_alloc_hypercall(const struct qp_guest_endpoint *entry)
+{
+ struct vmci_qp_alloc_msg *alloc_msg;
+ size_t msg_size;
+ int result;
+
+ if (!entry || entry->num_ppns <= 2)
+ return VMCI_ERROR_INVALID_ARGS;
+
+ msg_size = sizeof(*alloc_msg) +
+ (size_t) entry->num_ppns * sizeof(u32);
+ alloc_msg = kmalloc(msg_size, GFP_KERNEL);
+ if (!alloc_msg)
+ return VMCI_ERROR_NO_MEM;
+
+ alloc_msg->hdr.dst = vmci_make_handle(VMCI_HYPERVISOR_CONTEXT_ID,
+ VMCI_QUEUEPAIR_ALLOC);
+ alloc_msg->hdr.src = VMCI_ANON_SRC_HANDLE;
+ alloc_msg->hdr.payload_size = msg_size - VMCI_DG_HEADERSIZE;
+ alloc_msg->handle = entry->qp.handle;
+ alloc_msg->peer = entry->qp.peer;
+ alloc_msg->flags = entry->qp.flags;
+ alloc_msg->produce_size = entry->qp.produce_size;
+ alloc_msg->consume_size = entry->qp.consume_size;
+ alloc_msg->num_ppns = entry->num_ppns;
+
+ result = qp_populate_ppn_set((u8 *)alloc_msg + sizeof(*alloc_msg),
+ &entry->ppn_set);
+ if (result == VMCI_SUCCESS)
+ result = vmci_send_datagram(&alloc_msg->hdr);
+
+ kfree(alloc_msg);
+
+ return result;
+}
+
+/*
+ * Helper to make a queue_pairDetach hypercall when the driver is
+ * supporting a guest device.
+ */
+static int qp_detatch_hypercall(struct vmci_handle handle)
+{
+ struct vmci_qp_detach_msg detach_msg;
+
+ detach_msg.hdr.dst = vmci_make_handle(VMCI_HYPERVISOR_CONTEXT_ID,
+ VMCI_QUEUEPAIR_DETACH);
+ detach_msg.hdr.src = VMCI_ANON_SRC_HANDLE;
+ detach_msg.hdr.payload_size = sizeof(handle);
+ detach_msg.handle = handle;
+
+ return vmci_send_datagram(&detach_msg.hdr);
+}
+
+/*
+ * Adds the given entry to the list. Assumes that the list is locked.
+ */
+static void qp_list_add_entry(struct qp_list *qp_list, struct qp_entry *entry)
+{
+ if (entry)
+ list_add(&entry->list_item, &qp_list->head);
+}
+
+/*
+ * Removes the given entry from the list. Assumes that the list is locked.
+ */
+static void qp_list_remove_entry(struct qp_list *qp_list,
+ struct qp_entry *entry)
+{
+ if (entry)
+ list_del(&entry->list_item);
+}
+
+/*
+ * Helper for VMCI queue_pair detach interface. Frees the physical
+ * pages for the queue pair.
+ */
+static int qp_detatch_guest_work(struct vmci_handle handle)
+{
+ int result;
+ struct qp_guest_endpoint *entry;
+ u32 ref_count = ~0; /* To avoid compiler warning below */
+
+ mutex_lock(&qp_guest_endpoints.mutex);
+
+ entry = qp_guest_handle_to_entry(handle);
+ if (!entry) {
+ mutex_unlock(&qp_guest_endpoints.mutex);
+ return VMCI_ERROR_NOT_FOUND;
+ }
+
+ if (entry->qp.flags & VMCI_QPFLAG_LOCAL) {
+ result = VMCI_SUCCESS;
+
+ if (entry->qp.ref_count > 1) {
+ result = qp_notify_peer_local(false, handle);
+ /*
+ * We can fail to notify a local queuepair
+ * because we can't allocate. We still want
+ * to release the entry if that happens, so
+ * don't bail out yet.
+ */
+ }
+ } else {
+ result = qp_detatch_hypercall(handle);
+ if (result < VMCI_SUCCESS) {
+ /*
+ * We failed to notify a non-local queuepair.
+ * That other queuepair might still be
+ * accessing the shared memory, so don't
+ * release the entry yet. It will get cleaned
+ * up by VMCIqueue_pair_Exit() if necessary
+ * (assuming we are going away, otherwise why
+ * did this fail?).
+ */
+
+ mutex_unlock(&qp_guest_endpoints.mutex);
+ return result;
+ }
+ }
+
+ /*
+ * If we get here then we either failed to notify a local queuepair, or
+ * we succeeded in all cases. Release the entry if required.
+ */
+
+ entry->qp.ref_count--;
+ if (entry->qp.ref_count == 0)
+ qp_list_remove_entry(&qp_guest_endpoints, &entry->qp);
+
+ /* If we didn't remove the entry, this could change once we unlock. */
+ if (entry)
+ ref_count = entry->qp.ref_count;
+
+ mutex_unlock(&qp_guest_endpoints.mutex);
+
+ if (ref_count == 0)
+ qp_guest_endpoint_destroy(entry);
+
+ return result;
+}
+
+/*
+ * This functions handles the actual allocation of a VMCI queue
+ * pair guest endpoint. Allocates physical pages for the queue
+ * pair. It makes OS dependent calls through generic wrappers.
+ */
+static int qp_alloc_guest_work(struct vmci_handle *handle,
+ struct vmci_queue **produce_q,
+ u64 produce_size,
+ struct vmci_queue **consume_q,
+ u64 consume_size,
+ u32 peer,
+ u32 flags,
+ u32 priv_flags)
+{
+ const u64 num_produce_pages =
+ DIV_ROUND_UP(produce_size, PAGE_SIZE) + 1;
+ const u64 num_consume_pages =
+ DIV_ROUND_UP(consume_size, PAGE_SIZE) + 1;
+ void *my_produce_q = NULL;
+ void *my_consume_q = NULL;
+ int result;
+ struct qp_guest_endpoint *queue_pair_entry = NULL;
+
+ if (priv_flags != VMCI_NO_PRIVILEGE_FLAGS)
+ return VMCI_ERROR_NO_ACCESS;
+
+ mutex_lock(&qp_guest_endpoints.mutex);
+
+ queue_pair_entry = qp_guest_handle_to_entry(*handle);
+ if (queue_pair_entry) {
+ if (queue_pair_entry->qp.flags & VMCI_QPFLAG_LOCAL) {
+ /* Local attach case. */
+ if (queue_pair_entry->qp.ref_count > 1) {
+ pr_devel("Error attempting to attach more than once\n");
+ result = VMCI_ERROR_UNAVAILABLE;
+ goto error_keep_entry;
+ }
+
+ if (queue_pair_entry->qp.produce_size != consume_size ||
+ queue_pair_entry->qp.consume_size !=
+ produce_size ||
+ queue_pair_entry->qp.flags !=
+ (flags & ~VMCI_QPFLAG_ATTACH_ONLY)) {
+ pr_devel("Error mismatched queue pair in local attach\n");
+ result = VMCI_ERROR_QUEUEPAIR_MISMATCH;
+ goto error_keep_entry;
+ }
+
+ /*
+ * Do a local attach. We swap the consume and
+ * produce queues for the attacher and deliver
+ * an attach event.
+ */
+ result = qp_notify_peer_local(true, *handle);
+ if (result < VMCI_SUCCESS)
+ goto error_keep_entry;
+
+ my_produce_q = queue_pair_entry->consume_q;
+ my_consume_q = queue_pair_entry->produce_q;
+ goto out;
+ }
+
+ result = VMCI_ERROR_ALREADY_EXISTS;
+ goto error_keep_entry;
+ }
+
+ my_produce_q = qp_alloc_queue(produce_size, flags);
+ if (!my_produce_q) {
+ pr_warn("Error allocating pages for produce queue\n");
+ result = VMCI_ERROR_NO_MEM;
+ goto error;
+ }
+
+ my_consume_q = qp_alloc_queue(consume_size, flags);
+ if (!my_consume_q) {
+ pr_warn("Error allocating pages for consume queue\n");
+ result = VMCI_ERROR_NO_MEM;
+ goto error;
+ }
+
+ queue_pair_entry = qp_guest_endpoint_create(*handle, peer, flags,
+ produce_size, consume_size,
+ my_produce_q, my_consume_q);
+ if (!queue_pair_entry) {
+ pr_warn("Error allocating memory in %s\n", __func__);
+ result = VMCI_ERROR_NO_MEM;
+ goto error;
+ }
+
+ result = qp_alloc_ppn_set(my_produce_q, num_produce_pages, my_consume_q,
+ num_consume_pages,
+ &queue_pair_entry->ppn_set);
+ if (result < VMCI_SUCCESS) {
+ pr_warn("qp_alloc_ppn_set failed\n");
+ goto error;
+ }
+
+ /*
+ * It's only necessary to notify the host if this queue pair will be
+ * attached to from another context.
+ */
+ if (queue_pair_entry->qp.flags & VMCI_QPFLAG_LOCAL) {
+ /* Local create case. */
+ u32 context_id = vmci_get_context_id();
+
+ /*
+ * Enforce similar checks on local queue pairs as we
+ * do for regular ones. The handle's context must
+ * match the creator or attacher context id (here they
+ * are both the current context id) and the
+ * attach-only flag cannot exist during create. We
+ * also ensure specified peer is this context or an
+ * invalid one.
+ */
+ if (queue_pair_entry->qp.handle.context != context_id ||
+ (queue_pair_entry->qp.peer != VMCI_INVALID_ID &&
+ queue_pair_entry->qp.peer != context_id)) {
+ result = VMCI_ERROR_NO_ACCESS;
+ goto error;
+ }
+
+ if (queue_pair_entry->qp.flags & VMCI_QPFLAG_ATTACH_ONLY) {
+ result = VMCI_ERROR_NOT_FOUND;
+ goto error;
+ }
+ } else {
+ result = qp_alloc_hypercall(queue_pair_entry);
+ if (result < VMCI_SUCCESS) {
+ pr_warn("qp_alloc_hypercall result = %d\n", result);
+ goto error;
+ }
+ }
+
+ qp_init_queue_mutex((struct vmci_queue *)my_produce_q,
+ (struct vmci_queue *)my_consume_q);
+
+ qp_list_add_entry(&qp_guest_endpoints, &queue_pair_entry->qp);
+
+ out:
+ queue_pair_entry->qp.ref_count++;
+ *handle = queue_pair_entry->qp.handle;
+ *produce_q = (struct vmci_queue *)my_produce_q;
+ *consume_q = (struct vmci_queue *)my_consume_q;
+
+ /*
+ * We should initialize the queue pair header pages on a local
+ * queue pair create. For non-local queue pairs, the
+ * hypervisor initializes the header pages in the create step.
+ */
+ if ((queue_pair_entry->qp.flags & VMCI_QPFLAG_LOCAL) &&
+ queue_pair_entry->qp.ref_count == 1) {
+ vmci_q_header_init((*produce_q)->q_header, *handle);
+ vmci_q_header_init((*consume_q)->q_header, *handle);
+ }
+
+ mutex_unlock(&qp_guest_endpoints.mutex);
+
+ return VMCI_SUCCESS;
+
+ error:
+ mutex_unlock(&qp_guest_endpoints.mutex);
+ if (queue_pair_entry) {
+ /* The queues will be freed inside the destroy routine. */
+ qp_guest_endpoint_destroy(queue_pair_entry);
+ } else {
+ qp_free_queue(my_produce_q, produce_size);
+ qp_free_queue(my_consume_q, consume_size);
+ }
+ return result;
+
+ error_keep_entry:
+ /* This path should only be used when an existing entry was found. */
+ mutex_unlock(&qp_guest_endpoints.mutex);
+ return result;
+}
+
+/*
+ * The first endpoint issuing a queue pair allocation will create the state
+ * of the queue pair in the queue pair broker.
+ *
+ * If the creator is a guest, it will associate a VMX virtual address range
+ * with the queue pair as specified by the page_store. For compatibility with
+ * older VMX'en, that would use a separate step to set the VMX virtual
+ * address range, the virtual address range can be registered later using
+ * vmci_qp_broker_set_page_store. In that case, a page_store of NULL should be
+ * used.
+ *
+ * If the creator is the host, a page_store of NULL should be used as well,
+ * since the host is not able to supply a page store for the queue pair.
+ *
+ * For older VMX and host callers, the queue pair will be created in the
+ * VMCIQPB_CREATED_NO_MEM state, and for current VMX callers, it will be
+ * created in VMCOQPB_CREATED_MEM state.
+ */
+static int qp_broker_create(struct vmci_handle handle,
+ u32 peer,
+ u32 flags,
+ u32 priv_flags,
+ u64 produce_size,
+ u64 consume_size,
+ struct vmci_qp_page_store *page_store,
+ struct vmci_ctx *context,
+ vmci_event_release_cb wakeup_cb,
+ void *client_data, struct qp_broker_entry **ent)
+{
+ struct qp_broker_entry *entry = NULL;
+ const u32 context_id = vmci_ctx_get_id(context);
+ bool is_local = flags & VMCI_QPFLAG_LOCAL;
+ int result;
+ u64 guest_produce_size;
+ u64 guest_consume_size;
+
+ /* Do not create if the caller asked not to. */
+ if (flags & VMCI_QPFLAG_ATTACH_ONLY)
+ return VMCI_ERROR_NOT_FOUND;
+
+ /*
+ * Creator's context ID should match handle's context ID or the creator
+ * must allow the context in handle's context ID as the "peer".
+ */
+ if (handle.context != context_id && handle.context != peer)
+ return VMCI_ERROR_NO_ACCESS;
+
+ if (VMCI_CONTEXT_IS_VM(context_id) && VMCI_CONTEXT_IS_VM(peer))
+ return VMCI_ERROR_DST_UNREACHABLE;
+
+ /*
+ * Creator's context ID for local queue pairs should match the
+ * peer, if a peer is specified.
+ */
+ if (is_local && peer != VMCI_INVALID_ID && context_id != peer)
+ return VMCI_ERROR_NO_ACCESS;
+
+ entry = kzalloc(sizeof(*entry), GFP_ATOMIC);
+ if (!entry)
+ return VMCI_ERROR_NO_MEM;
+
+ if (vmci_ctx_get_id(context) == VMCI_HOST_CONTEXT_ID && !is_local) {
+ /*
+ * The queue pair broker entry stores values from the guest
+ * point of view, so a creating host side endpoint should swap
+ * produce and consume values -- unless it is a local queue
+ * pair, in which case no swapping is necessary, since the local
+ * attacher will swap queues.
+ */
+
+ guest_produce_size = consume_size;
+ guest_consume_size = produce_size;
+ } else {
+ guest_produce_size = produce_size;
+ guest_consume_size = consume_size;
+ }
+
+ entry->qp.handle = handle;
+ entry->qp.peer = peer;
+ entry->qp.flags = flags;
+ entry->qp.produce_size = guest_produce_size;
+ entry->qp.consume_size = guest_consume_size;
+ entry->qp.ref_count = 1;
+ entry->create_id = context_id;
+ entry->attach_id = VMCI_INVALID_ID;
+ entry->state = VMCIQPB_NEW;
+ entry->require_trusted_attach =
+ !!(context->priv_flags & VMCI_PRIVILEGE_FLAG_RESTRICTED);
+ entry->created_by_trusted =
+ !!(priv_flags & VMCI_PRIVILEGE_FLAG_TRUSTED);
+ entry->vmci_page_files = false;
+ entry->wakeup_cb = wakeup_cb;
+ entry->client_data = client_data;
+ entry->produce_q = qp_host_alloc_queue(guest_produce_size);
+ if (entry->produce_q == NULL) {
+ result = VMCI_ERROR_NO_MEM;
+ goto error;
+ }
+ entry->consume_q = qp_host_alloc_queue(guest_consume_size);
+ if (entry->consume_q == NULL) {
+ result = VMCI_ERROR_NO_MEM;
+ goto error;
+ }
+
+ qp_init_queue_mutex(entry->produce_q, entry->consume_q);
+
+ INIT_LIST_HEAD(&entry->qp.list_item);
+
+ if (is_local) {
+ u8 *tmp;
+
+ entry->local_mem = kcalloc(QPE_NUM_PAGES(entry->qp),
+ PAGE_SIZE, GFP_KERNEL);
+ if (entry->local_mem == NULL) {
+ result = VMCI_ERROR_NO_MEM;
+ goto error;
+ }
+ entry->state = VMCIQPB_CREATED_MEM;
+ entry->produce_q->q_header = entry->local_mem;
+ tmp = (u8 *)entry->local_mem + PAGE_SIZE *
+ (DIV_ROUND_UP(entry->qp.produce_size, PAGE_SIZE) + 1);
+ entry->consume_q->q_header = (struct vmci_queue_header *)tmp;
+ } else if (page_store) {
+ /*
+ * The VMX already initialized the queue pair headers, so no
+ * need for the kernel side to do that.
+ */
+ result = qp_host_register_user_memory(page_store,
+ entry->produce_q,
+ entry->consume_q);
+ if (result < VMCI_SUCCESS)
+ goto error;
+
+ entry->state = VMCIQPB_CREATED_MEM;
+ } else {
+ /*
+ * A create without a page_store may be either a host
+ * side create (in which case we are waiting for the
+ * guest side to supply the memory) or an old style
+ * queue pair create (in which case we will expect a
+ * set page store call as the next step).
+ */
+ entry->state = VMCIQPB_CREATED_NO_MEM;
+ }
+
+ qp_list_add_entry(&qp_broker_list, &entry->qp);
+ if (ent != NULL)
+ *ent = entry;
+
+ /* Add to resource obj */
+ result = vmci_resource_add(&entry->resource,
+ VMCI_RESOURCE_TYPE_QPAIR_HOST,
+ handle);
+ if (result != VMCI_SUCCESS) {
+ pr_warn("Failed to add new resource (handle=0x%x:0x%x), error: %d",
+ handle.context, handle.resource, result);
+ goto error;
+ }
+
+ entry->qp.handle = vmci_resource_handle(&entry->resource);
+ if (is_local) {
+ vmci_q_header_init(entry->produce_q->q_header,
+ entry->qp.handle);
+ vmci_q_header_init(entry->consume_q->q_header,
+ entry->qp.handle);
+ }
+
+ vmci_ctx_qp_create(context, entry->qp.handle);
+
+ return VMCI_SUCCESS;
+
+ error:
+ if (entry != NULL) {
+ qp_host_free_queue(entry->produce_q, guest_produce_size);
+ qp_host_free_queue(entry->consume_q, guest_consume_size);
+ kfree(entry);
+ }
+
+ return result;
+}
+
+/*
+ * Enqueues an event datagram to notify the peer VM attached to
+ * the given queue pair handle about attach/detach event by the
+ * given VM. Returns Payload size of datagram enqueued on
+ * success, error code otherwise.
+ */
+static int qp_notify_peer(bool attach,
+ struct vmci_handle handle,
+ u32 my_id,
+ u32 peer_id)
+{
+ int rv;
+ struct vmci_event_qp ev;
+
+ if (vmci_handle_is_invalid(handle) || my_id == VMCI_INVALID_ID ||
+ peer_id == VMCI_INVALID_ID)
+ return VMCI_ERROR_INVALID_ARGS;
+
+ /*
+ * In vmci_ctx_enqueue_datagram() we enforce the upper limit on
+ * number of pending events from the hypervisor to a given VM
+ * otherwise a rogue VM could do an arbitrary number of attach
+ * and detach operations causing memory pressure in the host
+ * kernel.
+ */
+
+ ev.msg.hdr.dst = vmci_make_handle(peer_id, VMCI_EVENT_HANDLER);
+ ev.msg.hdr.src = vmci_make_handle(VMCI_HYPERVISOR_CONTEXT_ID,
+ VMCI_CONTEXT_RESOURCE_ID);
+ ev.msg.hdr.payload_size = sizeof(ev) - sizeof(ev.msg.hdr);
+ ev.msg.event_data.event = attach ?
+ VMCI_EVENT_QP_PEER_ATTACH : VMCI_EVENT_QP_PEER_DETACH;
+ ev.payload.handle = handle;
+ ev.payload.peer_id = my_id;
+
+ rv = vmci_datagram_dispatch(VMCI_HYPERVISOR_CONTEXT_ID,
+ &ev.msg.hdr, false);
+ if (rv < VMCI_SUCCESS)
+ pr_warn("Failed to enqueue queue_pair %s event datagram for context (ID=0x%x)\n",
+ attach ? "ATTACH" : "DETACH", peer_id);
+
+ return rv;
+}
+
+/*
+ * The second endpoint issuing a queue pair allocation will attach to
+ * the queue pair registered with the queue pair broker.
+ *
+ * If the attacher is a guest, it will associate a VMX virtual address
+ * range with the queue pair as specified by the page_store. At this
+ * point, the already attach host endpoint may start using the queue
+ * pair, and an attach event is sent to it. For compatibility with
+ * older VMX'en, that used a separate step to set the VMX virtual
+ * address range, the virtual address range can be registered later
+ * using vmci_qp_broker_set_page_store. In that case, a page_store of
+ * NULL should be used, and the attach event will be generated once
+ * the actual page store has been set.
+ *
+ * If the attacher is the host, a page_store of NULL should be used as
+ * well, since the page store information is already set by the guest.
+ *
+ * For new VMX and host callers, the queue pair will be moved to the
+ * VMCIQPB_ATTACHED_MEM state, and for older VMX callers, it will be
+ * moved to the VMCOQPB_ATTACHED_NO_MEM state.
+ */
+static int qp_broker_attach(struct qp_broker_entry *entry,
+ u32 peer,
+ u32 flags,
+ u32 priv_flags,
+ u64 produce_size,
+ u64 consume_size,
+ struct vmci_qp_page_store *page_store,
+ struct vmci_ctx *context,
+ vmci_event_release_cb wakeup_cb,
+ void *client_data,
+ struct qp_broker_entry **ent)
+{
+ const u32 context_id = vmci_ctx_get_id(context);
+ bool is_local = flags & VMCI_QPFLAG_LOCAL;
+ int result;
+
+ if (entry->state != VMCIQPB_CREATED_NO_MEM &&
+ entry->state != VMCIQPB_CREATED_MEM)
+ return VMCI_ERROR_UNAVAILABLE;
+
+ if (is_local) {
+ if (!(entry->qp.flags & VMCI_QPFLAG_LOCAL) ||
+ context_id != entry->create_id) {
+ return VMCI_ERROR_INVALID_ARGS;
+ }
+ } else if (context_id == entry->create_id ||
+ context_id == entry->attach_id) {
+ return VMCI_ERROR_ALREADY_EXISTS;
+ }
+
+ if (VMCI_CONTEXT_IS_VM(context_id) &&
+ VMCI_CONTEXT_IS_VM(entry->create_id))
+ return VMCI_ERROR_DST_UNREACHABLE;
+
+ /*
+ * If we are attaching from a restricted context then the queuepair
+ * must have been created by a trusted endpoint.
+ */
+ if ((context->priv_flags & VMCI_PRIVILEGE_FLAG_RESTRICTED) &&
+ !entry->created_by_trusted)
+ return VMCI_ERROR_NO_ACCESS;
+
+ /*
+ * If we are attaching to a queuepair that was created by a restricted
+ * context then we must be trusted.
+ */
+ if (entry->require_trusted_attach &&
+ (!(priv_flags & VMCI_PRIVILEGE_FLAG_TRUSTED)))
+ return VMCI_ERROR_NO_ACCESS;
+
+ /*
+ * If the creator specifies VMCI_INVALID_ID in "peer" field, access
+ * control check is not performed.
+ */
+ if (entry->qp.peer != VMCI_INVALID_ID && entry->qp.peer != context_id)
+ return VMCI_ERROR_NO_ACCESS;
+
+ if (entry->create_id == VMCI_HOST_CONTEXT_ID) {
+ /*
+ * Do not attach if the caller doesn't support Host Queue Pairs
+ * and a host created this queue pair.
+ */
+
+ if (!vmci_ctx_supports_host_qp(context))
+ return VMCI_ERROR_INVALID_RESOURCE;
+
+ } else if (context_id == VMCI_HOST_CONTEXT_ID) {
+ struct vmci_ctx *create_context;
+ bool supports_host_qp;
+
+ /*
+ * Do not attach a host to a user created queue pair if that
+ * user doesn't support host queue pair end points.
+ */
+
+ create_context = vmci_ctx_get(entry->create_id);
+ supports_host_qp = vmci_ctx_supports_host_qp(create_context);
+ vmci_ctx_put(create_context);
+
+ if (!supports_host_qp)
+ return VMCI_ERROR_INVALID_RESOURCE;
+ }
+
+ if ((entry->qp.flags & ~VMCI_QP_ASYMM) != (flags & ~VMCI_QP_ASYMM_PEER))
+ return VMCI_ERROR_QUEUEPAIR_MISMATCH;
+
+ if (context_id != VMCI_HOST_CONTEXT_ID) {
+ /*
+ * The queue pair broker entry stores values from the guest
+ * point of view, so an attaching guest should match the values
+ * stored in the entry.
+ */
+
+ if (entry->qp.produce_size != produce_size ||
+ entry->qp.consume_size != consume_size) {
+ return VMCI_ERROR_QUEUEPAIR_MISMATCH;
+ }
+ } else if (entry->qp.produce_size != consume_size ||
+ entry->qp.consume_size != produce_size) {
+ return VMCI_ERROR_QUEUEPAIR_MISMATCH;
+ }
+
+ if (context_id != VMCI_HOST_CONTEXT_ID) {
+ /*
+ * If a guest attached to a queue pair, it will supply
+ * the backing memory. If this is a pre NOVMVM vmx,
+ * the backing memory will be supplied by calling
+ * vmci_qp_broker_set_page_store() following the
+ * return of the vmci_qp_broker_alloc() call. If it is
+ * a vmx of version NOVMVM or later, the page store
+ * must be supplied as part of the
+ * vmci_qp_broker_alloc call. Under all circumstances
+ * must the initially created queue pair not have any
+ * memory associated with it already.
+ */
+
+ if (entry->state != VMCIQPB_CREATED_NO_MEM)
+ return VMCI_ERROR_INVALID_ARGS;
+
+ if (page_store != NULL) {
+ /*
+ * Patch up host state to point to guest
+ * supplied memory. The VMX already
+ * initialized the queue pair headers, so no
+ * need for the kernel side to do that.
+ */
+
+ result = qp_host_register_user_memory(page_store,
+ entry->produce_q,
+ entry->consume_q);
+ if (result < VMCI_SUCCESS)
+ return result;
+
+ /*
+ * Preemptively load in the headers if non-blocking to
+ * prevent blocking later.
+ */
+ if (entry->qp.flags & VMCI_QPFLAG_NONBLOCK) {
+ result = qp_host_map_queues(entry->produce_q,
+ entry->consume_q);
+ if (result < VMCI_SUCCESS) {
+ qp_host_unregister_user_memory(
+ entry->produce_q,
+ entry->consume_q);
+ return result;
+ }
+ }
+
+ entry->state = VMCIQPB_ATTACHED_MEM;
+ } else {
+ entry->state = VMCIQPB_ATTACHED_NO_MEM;
+ }
+ } else if (entry->state == VMCIQPB_CREATED_NO_MEM) {
+ /*
+ * The host side is attempting to attach to a queue
+ * pair that doesn't have any memory associated with
+ * it. This must be a pre NOVMVM vmx that hasn't set
+ * the page store information yet, or a quiesced VM.
+ */
+
+ return VMCI_ERROR_UNAVAILABLE;
+ } else {
+ /*
+ * For non-blocking queue pairs, we cannot rely on
+ * enqueue/dequeue to map in the pages on the
+ * host-side, since it may block, so we make an
+ * attempt here.
+ */
+
+ if (flags & VMCI_QPFLAG_NONBLOCK) {
+ result =
+ qp_host_map_queues(entry->produce_q,
+ entry->consume_q);
+ if (result < VMCI_SUCCESS)
+ return result;
+
+ entry->qp.flags |= flags &
+ (VMCI_QPFLAG_NONBLOCK | VMCI_QPFLAG_PINNED);
+ }
+
+ /* The host side has successfully attached to a queue pair. */
+ entry->state = VMCIQPB_ATTACHED_MEM;
+ }
+
+ if (entry->state == VMCIQPB_ATTACHED_MEM) {
+ result =
+ qp_notify_peer(true, entry->qp.handle, context_id,
+ entry->create_id);
+ if (result < VMCI_SUCCESS)
+ pr_warn("Failed to notify peer (ID=0x%x) of attach to queue pair (handle=0x%x:0x%x)\n",
+ entry->create_id, entry->qp.handle.context,
+ entry->qp.handle.resource);
+ }
+
+ entry->attach_id = context_id;
+ entry->qp.ref_count++;
+ if (wakeup_cb) {
+ entry->wakeup_cb = wakeup_cb;
+ entry->client_data = client_data;
+ }
+
+ /*
+ * When attaching to local queue pairs, the context already has
+ * an entry tracking the queue pair, so don't add another one.
+ */
+ if (!is_local)
+ vmci_ctx_qp_create(context, entry->qp.handle);
+
+ if (ent != NULL)
+ *ent = entry;
+
+ return VMCI_SUCCESS;
+}
+
+/*
+ * queue_pair_Alloc for use when setting up queue pair endpoints
+ * on the host.
+ */
+static int qp_broker_alloc(struct vmci_handle handle,
+ u32 peer,
+ u32 flags,
+ u32 priv_flags,
+ u64 produce_size,
+ u64 consume_size,
+ struct vmci_qp_page_store *page_store,
+ struct vmci_ctx *context,
+ vmci_event_release_cb wakeup_cb,
+ void *client_data,
+ struct qp_broker_entry **ent,
+ bool *swap)
+{
+ const u32 context_id = vmci_ctx_get_id(context);
+ bool create;
+ struct qp_broker_entry *entry = NULL;
+ bool is_local = flags & VMCI_QPFLAG_LOCAL;
+ int result;
+
+ if (vmci_handle_is_invalid(handle) ||
+ (flags & ~VMCI_QP_ALL_FLAGS) || is_local ||
+ !(produce_size || consume_size) ||
+ !context || context_id == VMCI_INVALID_ID ||
+ handle.context == VMCI_INVALID_ID) {
+ return VMCI_ERROR_INVALID_ARGS;
+ }
+
+ if (page_store && !VMCI_QP_PAGESTORE_IS_WELLFORMED(page_store))
+ return VMCI_ERROR_INVALID_ARGS;
+
+ /*
+ * In the initial argument check, we ensure that non-vmkernel hosts
+ * are not allowed to create local queue pairs.
+ */
+
+ mutex_lock(&qp_broker_list.mutex);
+
+ if (!is_local && vmci_ctx_qp_exists(context, handle)) {
+ pr_devel("Context (ID=0x%x) already attached to queue pair (handle=0x%x:0x%x)\n",
+ context_id, handle.context, handle.resource);
+ mutex_unlock(&qp_broker_list.mutex);
+ return VMCI_ERROR_ALREADY_EXISTS;
+ }
+
+ if (handle.resource != VMCI_INVALID_ID)
+ entry = qp_broker_handle_to_entry(handle);
+
+ if (!entry) {
+ create = true;
+ result =
+ qp_broker_create(handle, peer, flags, priv_flags,
+ produce_size, consume_size, page_store,
+ context, wakeup_cb, client_data, ent);
+ } else {
+ create = false;
+ result =
+ qp_broker_attach(entry, peer, flags, priv_flags,
+ produce_size, consume_size, page_store,
+ context, wakeup_cb, client_data, ent);
+ }
+
+ mutex_unlock(&qp_broker_list.mutex);
+
+ if (swap)
+ *swap = (context_id == VMCI_HOST_CONTEXT_ID) &&
+ !(create && is_local);
+
+ return result;
+}
+
+/*
+ * This function implements the kernel API for allocating a queue
+ * pair.
+ */
+static int qp_alloc_host_work(struct vmci_handle *handle,
+ struct vmci_queue **produce_q,
+ u64 produce_size,
+ struct vmci_queue **consume_q,
+ u64 consume_size,
+ u32 peer,
+ u32 flags,
+ u32 priv_flags,
+ vmci_event_release_cb wakeup_cb,
+ void *client_data)
+{
+ struct vmci_handle new_handle;
+ struct vmci_ctx *context;
+ struct qp_broker_entry *entry;
+ int result;
+ bool swap;
+
+ if (vmci_handle_is_invalid(*handle)) {
+ new_handle = vmci_make_handle(
+ VMCI_HOST_CONTEXT_ID, VMCI_INVALID_ID);
+ } else
+ new_handle = *handle;
+
+ context = vmci_ctx_get(VMCI_HOST_CONTEXT_ID);
+ entry = NULL;
+ result =
+ qp_broker_alloc(new_handle, peer, flags, priv_flags,
+ produce_size, consume_size, NULL, context,
+ wakeup_cb, client_data, &entry, &swap);
+ if (result == VMCI_SUCCESS) {
+ if (swap) {
+ /*
+ * If this is a local queue pair, the attacher
+ * will swap around produce and consume
+ * queues.
+ */
+
+ *produce_q = entry->consume_q;
+ *consume_q = entry->produce_q;
+ } else {
+ *produce_q = entry->produce_q;
+ *consume_q = entry->consume_q;
+ }
+
+ *handle = vmci_resource_handle(&entry->resource);
+ } else {
+ *handle = VMCI_INVALID_HANDLE;
+ pr_devel("queue pair broker failed to alloc (result=%d)\n",
+ result);
+ }
+ vmci_ctx_put(context);
+ return result;
+}
+
+/*
+ * Allocates a VMCI queue_pair. Only checks validity of input
+ * arguments. The real work is done in the host or guest
+ * specific function.
+ */
+int vmci_qp_alloc(struct vmci_handle *handle,
+ struct vmci_queue **produce_q,
+ u64 produce_size,
+ struct vmci_queue **consume_q,
+ u64 consume_size,
+ u32 peer,
+ u32 flags,
+ u32 priv_flags,
+ bool guest_endpoint,
+ vmci_event_release_cb wakeup_cb,
+ void *client_data)
+{
+ if (!handle || !produce_q || !consume_q ||
+ (!produce_size && !consume_size) || (flags & ~VMCI_QP_ALL_FLAGS))
+ return VMCI_ERROR_INVALID_ARGS;
+
+ if (guest_endpoint) {
+ return qp_alloc_guest_work(handle, produce_q,
+ produce_size, consume_q,
+ consume_size, peer,
+ flags, priv_flags);
+ } else {
+ return qp_alloc_host_work(handle, produce_q,
+ produce_size, consume_q,
+ consume_size, peer, flags,
+ priv_flags, wakeup_cb, client_data);
+ }
+}
+
+/*
+ * This function implements the host kernel API for detaching from
+ * a queue pair.
+ */
+static int qp_detatch_host_work(struct vmci_handle handle)
+{
+ int result;
+ struct vmci_ctx *context;
+
+ context = vmci_ctx_get(VMCI_HOST_CONTEXT_ID);
+
+ result = vmci_qp_broker_detach(handle, context);
+
+ vmci_ctx_put(context);
+ return result;
+}
+
+/*
+ * Detaches from a VMCI queue_pair. Only checks validity of input argument.
+ * Real work is done in the host or guest specific function.
+ */
+static int qp_detatch(struct vmci_handle handle, bool guest_endpoint)
+{
+ if (vmci_handle_is_invalid(handle))
+ return VMCI_ERROR_INVALID_ARGS;
+
+ if (guest_endpoint)
+ return qp_detatch_guest_work(handle);
+ else
+ return qp_detatch_host_work(handle);
+}
+
+/*
+ * Returns the entry from the head of the list. Assumes that the list is
+ * locked.
+ */
+static struct qp_entry *qp_list_get_head(struct qp_list *qp_list)
+{
+ if (!list_empty(&qp_list->head)) {
+ struct qp_entry *entry =
+ list_first_entry(&qp_list->head, struct qp_entry,
+ list_item);
+ return entry;
+ }
+
+ return NULL;
+}
+
+void vmci_qp_broker_exit(void)
+{
+ struct qp_entry *entry;
+ struct qp_broker_entry *be;
+
+ mutex_lock(&qp_broker_list.mutex);
+
+ while ((entry = qp_list_get_head(&qp_broker_list))) {
+ be = (struct qp_broker_entry *)entry;
+
+ qp_list_remove_entry(&qp_broker_list, entry);
+ kfree(be);
+ }
+
+ mutex_unlock(&qp_broker_list.mutex);
+}
+
+/*
+ * Requests that a queue pair be allocated with the VMCI queue
+ * pair broker. Allocates a queue pair entry if one does not
+ * exist. Attaches to one if it exists, and retrieves the page
+ * files backing that queue_pair. Assumes that the queue pair
+ * broker lock is held.
+ */
+int vmci_qp_broker_alloc(struct vmci_handle handle,
+ u32 peer,
+ u32 flags,
+ u32 priv_flags,
+ u64 produce_size,
+ u64 consume_size,
+ struct vmci_qp_page_store *page_store,
+ struct vmci_ctx *context)
+{
+ return qp_broker_alloc(handle, peer, flags, priv_flags,
+ produce_size, consume_size,
+ page_store, context, NULL, NULL, NULL, NULL);
+}
+
+/*
+ * VMX'en with versions lower than VMCI_VERSION_NOVMVM use a separate
+ * step to add the UVAs of the VMX mapping of the queue pair. This function
+ * provides backwards compatibility with such VMX'en, and takes care of
+ * registering the page store for a queue pair previously allocated by the
+ * VMX during create or attach. This function will move the queue pair state
+ * to either from VMCIQBP_CREATED_NO_MEM to VMCIQBP_CREATED_MEM or
+ * VMCIQBP_ATTACHED_NO_MEM to VMCIQBP_ATTACHED_MEM. If moving to the
+ * attached state with memory, the queue pair is ready to be used by the
+ * host peer, and an attached event will be generated.
+ *
+ * Assumes that the queue pair broker lock is held.
+ *
+ * This function is only used by the hosted platform, since there is no
+ * issue with backwards compatibility for vmkernel.
+ */
+int vmci_qp_broker_set_page_store(struct vmci_handle handle,
+ u64 produce_uva,
+ u64 consume_uva,
+ struct vmci_ctx *context)
+{
+ struct qp_broker_entry *entry;
+ int result;
+ const u32 context_id = vmci_ctx_get_id(context);
+
+ if (vmci_handle_is_invalid(handle) || !context ||
+ context_id == VMCI_INVALID_ID)
+ return VMCI_ERROR_INVALID_ARGS;
+
+ /*
+ * We only support guest to host queue pairs, so the VMX must
+ * supply UVAs for the mapped page files.
+ */
+
+ if (produce_uva == 0 || consume_uva == 0)
+ return VMCI_ERROR_INVALID_ARGS;
+
+ mutex_lock(&qp_broker_list.mutex);
+
+ if (!vmci_ctx_qp_exists(context, handle)) {
+ pr_warn("Context (ID=0x%x) not attached to queue pair (handle=0x%x:0x%x)\n",
+ context_id, handle.context, handle.resource);
+ result = VMCI_ERROR_NOT_FOUND;
+ goto out;
+ }
+
+ entry = qp_broker_handle_to_entry(handle);
+ if (!entry) {
+ result = VMCI_ERROR_NOT_FOUND;
+ goto out;
+ }
+
+ /*
+ * If I'm the owner then I can set the page store.
+ *
+ * Or, if a host created the queue_pair and I'm the attached peer
+ * then I can set the page store.
+ */
+ if (entry->create_id != context_id &&
+ (entry->create_id != VMCI_HOST_CONTEXT_ID ||
+ entry->attach_id != context_id)) {
+ result = VMCI_ERROR_QUEUEPAIR_NOTOWNER;
+ goto out;
+ }
+
+ if (entry->state != VMCIQPB_CREATED_NO_MEM &&
+ entry->state != VMCIQPB_ATTACHED_NO_MEM) {
+ result = VMCI_ERROR_UNAVAILABLE;
+ goto out;
+ }
+
+ result = qp_host_get_user_memory(produce_uva, consume_uva,
+ entry->produce_q, entry->consume_q);
+ if (result < VMCI_SUCCESS)
+ goto out;
+
+ result = qp_host_map_queues(entry->produce_q, entry->consume_q);
+ if (result < VMCI_SUCCESS) {
+ qp_host_unregister_user_memory(entry->produce_q,
+ entry->consume_q);
+ goto out;
+ }
+
+ if (entry->state == VMCIQPB_CREATED_NO_MEM)
+ entry->state = VMCIQPB_CREATED_MEM;
+ else
+ entry->state = VMCIQPB_ATTACHED_MEM;
+
+ entry->vmci_page_files = true;
+
+ if (entry->state == VMCIQPB_ATTACHED_MEM) {
+ result =
+ qp_notify_peer(true, handle, context_id, entry->create_id);
+ if (result < VMCI_SUCCESS) {
+ pr_warn("Failed to notify peer (ID=0x%x) of attach to queue pair (handle=0x%x:0x%x)\n",
+ entry->create_id, entry->qp.handle.context,
+ entry->qp.handle.resource);
+ }
+ }
+
+ result = VMCI_SUCCESS;
+ out:
+ mutex_unlock(&qp_broker_list.mutex);
+ return result;
+}
+
+/*
+ * Resets saved queue headers for the given QP broker
+ * entry. Should be used when guest memory becomes available
+ * again, or the guest detaches.
+ */
+static void qp_reset_saved_headers(struct qp_broker_entry *entry)
+{
+ entry->produce_q->saved_header = NULL;
+ entry->consume_q->saved_header = NULL;
+}
+
+/*
+ * The main entry point for detaching from a queue pair registered with the
+ * queue pair broker. If more than one endpoint is attached to the queue
+ * pair, the first endpoint will mainly decrement a reference count and
+ * generate a notification to its peer. The last endpoint will clean up
+ * the queue pair state registered with the broker.
+ *
+ * When a guest endpoint detaches, it will unmap and unregister the guest
+ * memory backing the queue pair. If the host is still attached, it will
+ * no longer be able to access the queue pair content.
+ *
+ * If the queue pair is already in a state where there is no memory
+ * registered for the queue pair (any *_NO_MEM state), it will transition to
+ * the VMCIQPB_SHUTDOWN_NO_MEM state. This will also happen, if a guest
+ * endpoint is the first of two endpoints to detach. If the host endpoint is
+ * the first out of two to detach, the queue pair will move to the
+ * VMCIQPB_SHUTDOWN_MEM state.
+ */
+int vmci_qp_broker_detach(struct vmci_handle handle, struct vmci_ctx *context)
+{
+ struct qp_broker_entry *entry;
+ const u32 context_id = vmci_ctx_get_id(context);
+ u32 peer_id;
+ bool is_local = false;
+ int result;
+
+ if (vmci_handle_is_invalid(handle) || !context ||
+ context_id == VMCI_INVALID_ID) {
+ return VMCI_ERROR_INVALID_ARGS;
+ }
+
+ mutex_lock(&qp_broker_list.mutex);
+
+ if (!vmci_ctx_qp_exists(context, handle)) {
+ pr_devel("Context (ID=0x%x) not attached to queue pair (handle=0x%x:0x%x)\n",
+ context_id, handle.context, handle.resource);
+ result = VMCI_ERROR_NOT_FOUND;
+ goto out;
+ }
+
+ entry = qp_broker_handle_to_entry(handle);
+ if (!entry) {
+ pr_devel("Context (ID=0x%x) reports being attached to queue pair(handle=0x%x:0x%x) that isn't present in broker\n",
+ context_id, handle.context, handle.resource);
+ result = VMCI_ERROR_NOT_FOUND;
+ goto out;
+ }
+
+ if (context_id != entry->create_id && context_id != entry->attach_id) {
+ result = VMCI_ERROR_QUEUEPAIR_NOTATTACHED;
+ goto out;
+ }
+
+ if (context_id == entry->create_id) {
+ peer_id = entry->attach_id;
+ entry->create_id = VMCI_INVALID_ID;
+ } else {
+ peer_id = entry->create_id;
+ entry->attach_id = VMCI_INVALID_ID;
+ }
+ entry->qp.ref_count--;
+
+ is_local = entry->qp.flags & VMCI_QPFLAG_LOCAL;
+
+ if (context_id != VMCI_HOST_CONTEXT_ID) {
+ bool headers_mapped;
+
+ /*
+ * Pre NOVMVM vmx'en may detach from a queue pair
+ * before setting the page store, and in that case
+ * there is no user memory to detach from. Also, more
+ * recent VMX'en may detach from a queue pair in the
+ * quiesced state.
+ */
+
+ qp_acquire_queue_mutex(entry->produce_q);
+ headers_mapped = entry->produce_q->q_header ||
+ entry->consume_q->q_header;
+ if (QPBROKERSTATE_HAS_MEM(entry)) {
+ result =
+ qp_host_unmap_queues(INVALID_VMCI_GUEST_MEM_ID,
+ entry->produce_q,
+ entry->consume_q);
+ if (result < VMCI_SUCCESS)
+ pr_warn("Failed to unmap queue headers for queue pair (handle=0x%x:0x%x,result=%d)\n",
+ handle.context, handle.resource,
+ result);
+
+ if (entry->vmci_page_files)
+ qp_host_unregister_user_memory(entry->produce_q,
+ entry->
+ consume_q);
+ else
+ qp_host_unregister_user_memory(entry->produce_q,
+ entry->
+ consume_q);
+
+ }
+
+ if (!headers_mapped)
+ qp_reset_saved_headers(entry);
+
+ qp_release_queue_mutex(entry->produce_q);
+
+ if (!headers_mapped && entry->wakeup_cb)
+ entry->wakeup_cb(entry->client_data);
+
+ } else {
+ if (entry->wakeup_cb) {
+ entry->wakeup_cb = NULL;
+ entry->client_data = NULL;
+ }
+ }
+
+ if (entry->qp.ref_count == 0) {
+ qp_list_remove_entry(&qp_broker_list, &entry->qp);
+
+ if (is_local)
+ kfree(entry->local_mem);
+
+ qp_cleanup_queue_mutex(entry->produce_q, entry->consume_q);
+ qp_host_free_queue(entry->produce_q, entry->qp.produce_size);
+ qp_host_free_queue(entry->consume_q, entry->qp.consume_size);
+ /* Unlink from resource hash table and free callback */
+ vmci_resource_remove(&entry->resource);
+
+ kfree(entry);
+
+ vmci_ctx_qp_destroy(context, handle);
+ } else {
+ qp_notify_peer(false, handle, context_id, peer_id);
+ if (context_id == VMCI_HOST_CONTEXT_ID &&
+ QPBROKERSTATE_HAS_MEM(entry)) {
+ entry->state = VMCIQPB_SHUTDOWN_MEM;
+ } else {
+ entry->state = VMCIQPB_SHUTDOWN_NO_MEM;
+ }
+
+ if (!is_local)
+ vmci_ctx_qp_destroy(context, handle);
+
+ }
+ result = VMCI_SUCCESS;
+ out:
+ mutex_unlock(&qp_broker_list.mutex);
+ return result;
+}
+
+/*
+ * Establishes the necessary mappings for a queue pair given a
+ * reference to the queue pair guest memory. This is usually
+ * called when a guest is unquiesced and the VMX is allowed to
+ * map guest memory once again.
+ */
+int vmci_qp_broker_map(struct vmci_handle handle,
+ struct vmci_ctx *context,
+ u64 guest_mem)
+{
+ struct qp_broker_entry *entry;
+ const u32 context_id = vmci_ctx_get_id(context);
+ bool is_local = false;
+ int result;
+
+ if (vmci_handle_is_invalid(handle) || !context ||
+ context_id == VMCI_INVALID_ID)
+ return VMCI_ERROR_INVALID_ARGS;
+
+ mutex_lock(&qp_broker_list.mutex);
+
+ if (!vmci_ctx_qp_exists(context, handle)) {
+ pr_devel("Context (ID=0x%x) not attached to queue pair (handle=0x%x:0x%x)\n",
+ context_id, handle.context, handle.resource);
+ result = VMCI_ERROR_NOT_FOUND;
+ goto out;
+ }
+
+ entry = qp_broker_handle_to_entry(handle);
+ if (!entry) {
+ pr_devel("Context (ID=0x%x) reports being attached to queue pair (handle=0x%x:0x%x) that isn't present in broker\n",
+ context_id, handle.context, handle.resource);
+ result = VMCI_ERROR_NOT_FOUND;
+ goto out;
+ }
+
+ if (context_id != entry->create_id && context_id != entry->attach_id) {
+ result = VMCI_ERROR_QUEUEPAIR_NOTATTACHED;
+ goto out;
+ }
+
+ is_local = entry->qp.flags & VMCI_QPFLAG_LOCAL;
+ result = VMCI_SUCCESS;
+
+ if (context_id != VMCI_HOST_CONTEXT_ID) {
+ struct vmci_qp_page_store page_store;
+
+ page_store.pages = guest_mem;
+ page_store.len = QPE_NUM_PAGES(entry->qp);
+
+ qp_acquire_queue_mutex(entry->produce_q);
+ qp_reset_saved_headers(entry);
+ result =
+ qp_host_register_user_memory(&page_store,
+ entry->produce_q,
+ entry->consume_q);
+ qp_release_queue_mutex(entry->produce_q);
+ if (result == VMCI_SUCCESS) {
+ /* Move state from *_NO_MEM to *_MEM */
+
+ entry->state++;
+
+ if (entry->wakeup_cb)
+ entry->wakeup_cb(entry->client_data);
+ }
+ }
+
+ out:
+ mutex_unlock(&qp_broker_list.mutex);
+ return result;
+}
+
+/*
+ * Saves a snapshot of the queue headers for the given QP broker
+ * entry. Should be used when guest memory is unmapped.
+ * Results:
+ * VMCI_SUCCESS on success, appropriate error code if guest memory
+ * can't be accessed..
+ */
+static int qp_save_headers(struct qp_broker_entry *entry)
+{
+ int result;
+
+ if (entry->produce_q->saved_header != NULL &&
+ entry->consume_q->saved_header != NULL) {
+ /*
+ * If the headers have already been saved, we don't need to do
+ * it again, and we don't want to map in the headers
+ * unnecessarily.
+ */
+
+ return VMCI_SUCCESS;
+ }
+
+ if (NULL == entry->produce_q->q_header ||
+ NULL == entry->consume_q->q_header) {
+ result = qp_host_map_queues(entry->produce_q, entry->consume_q);
+ if (result < VMCI_SUCCESS)
+ return result;
+ }
+
+ memcpy(&entry->saved_produce_q, entry->produce_q->q_header,
+ sizeof(entry->saved_produce_q));
+ entry->produce_q->saved_header = &entry->saved_produce_q;
+ memcpy(&entry->saved_consume_q, entry->consume_q->q_header,
+ sizeof(entry->saved_consume_q));
+ entry->consume_q->saved_header = &entry->saved_consume_q;
+
+ return VMCI_SUCCESS;
+}
+
+/*
+ * Removes all references to the guest memory of a given queue pair, and
+ * will move the queue pair from state *_MEM to *_NO_MEM. It is usually
+ * called when a VM is being quiesced where access to guest memory should
+ * avoided.
+ */
+int vmci_qp_broker_unmap(struct vmci_handle handle,
+ struct vmci_ctx *context,
+ u32 gid)
+{
+ struct qp_broker_entry *entry;
+ const u32 context_id = vmci_ctx_get_id(context);
+ bool is_local = false;
+ int result;
+
+ if (vmci_handle_is_invalid(handle) || !context ||
+ context_id == VMCI_INVALID_ID)
+ return VMCI_ERROR_INVALID_ARGS;
+
+ mutex_lock(&qp_broker_list.mutex);
+
+ if (!vmci_ctx_qp_exists(context, handle)) {
+ pr_devel("Context (ID=0x%x) not attached to queue pair (handle=0x%x:0x%x)\n",
+ context_id, handle.context, handle.resource);
+ result = VMCI_ERROR_NOT_FOUND;
+ goto out;
+ }
+
+ entry = qp_broker_handle_to_entry(handle);
+ if (!entry) {
+ pr_devel("Context (ID=0x%x) reports being attached to queue pair (handle=0x%x:0x%x) that isn't present in broker\n",
+ context_id, handle.context, handle.resource);
+ result = VMCI_ERROR_NOT_FOUND;
+ goto out;
+ }
+
+ if (context_id != entry->create_id && context_id != entry->attach_id) {
+ result = VMCI_ERROR_QUEUEPAIR_NOTATTACHED;
+ goto out;
+ }
+
+ is_local = entry->qp.flags & VMCI_QPFLAG_LOCAL;
+
+ if (context_id != VMCI_HOST_CONTEXT_ID) {
+ qp_acquire_queue_mutex(entry->produce_q);
+ result = qp_save_headers(entry);
+ if (result < VMCI_SUCCESS)
+ pr_warn("Failed to save queue headers for queue pair (handle=0x%x:0x%x,result=%d)\n",
+ handle.context, handle.resource, result);
+
+ qp_host_unmap_queues(gid, entry->produce_q, entry->consume_q);
+
+ /*
+ * On hosted, when we unmap queue pairs, the VMX will also
+ * unmap the guest memory, so we invalidate the previously
+ * registered memory. If the queue pair is mapped again at a
+ * later point in time, we will need to reregister the user
+ * memory with a possibly new user VA.
+ */
+ qp_host_unregister_user_memory(entry->produce_q,
+ entry->consume_q);
+
+ /*
+ * Move state from *_MEM to *_NO_MEM.
+ */
+ entry->state--;
+
+ qp_release_queue_mutex(entry->produce_q);
+ }
+
+ result = VMCI_SUCCESS;
+
+ out:
+ mutex_unlock(&qp_broker_list.mutex);
+ return result;
+}
+
+/*
+ * Destroys all guest queue pair endpoints. If active guest queue
+ * pairs still exist, hypercalls to attempt detach from these
+ * queue pairs will be made. Any failure to detach is silently
+ * ignored.
+ */
+void vmci_qp_guest_endpoints_exit(void)
+{
+ struct qp_entry *entry;
+ struct qp_guest_endpoint *ep;
+
+ mutex_lock(&qp_guest_endpoints.mutex);
+
+ while ((entry = qp_list_get_head(&qp_guest_endpoints))) {
+ ep = (struct qp_guest_endpoint *)entry;
+
+ /* Don't make a hypercall for local queue_pairs. */
+ if (!(entry->flags & VMCI_QPFLAG_LOCAL))
+ qp_detatch_hypercall(entry->handle);
+
+ /* We cannot fail the exit, so let's reset ref_count. */
+ entry->ref_count = 0;
+ qp_list_remove_entry(&qp_guest_endpoints, entry);
+
+ qp_guest_endpoint_destroy(ep);
+ }
+
+ mutex_unlock(&qp_guest_endpoints.mutex);
+}
+
+/*
+ * Helper routine that will lock the queue pair before subsequent
+ * operations.
+ * Note: Non-blocking on the host side is currently only implemented in ESX.
+ * Since non-blocking isn't yet implemented on the host personality we
+ * have no reason to acquire a spin lock. So to avoid the use of an
+ * unnecessary lock only acquire the mutex if we can block.
+ * Note: It is assumed that QPFLAG_PINNED implies QPFLAG_NONBLOCK. Therefore
+ * we can use the same locking function for access to both the queue
+ * and the queue headers as it is the same logic. Assert this behvior.
+ */
+static void qp_lock(const struct vmci_qp *qpair)
+{
+ if (vmci_can_block(qpair->flags))
+ qp_acquire_queue_mutex(qpair->produce_q);
+}
+
+/*
+ * Helper routine that unlocks the queue pair after calling
+ * qp_lock. Respects non-blocking and pinning flags.
+ */
+static void qp_unlock(const struct vmci_qp *qpair)
+{
+ if (vmci_can_block(qpair->flags))
+ qp_release_queue_mutex(qpair->produce_q);
+}
+
+/*
+ * The queue headers may not be mapped at all times. If a queue is
+ * currently not mapped, it will be attempted to do so.
+ */
+static int qp_map_queue_headers(struct vmci_queue *produce_q,
+ struct vmci_queue *consume_q,
+ bool can_block)
+{
+ int result;
+
+ if (NULL == produce_q->q_header || NULL == consume_q->q_header) {
+ if (can_block)
+ result = qp_host_map_queues(produce_q, consume_q);
+ else
+ result = VMCI_ERROR_QUEUEPAIR_NOT_READY;
+
+ if (result < VMCI_SUCCESS)
+ return (produce_q->saved_header &&
+ consume_q->saved_header) ?
+ VMCI_ERROR_QUEUEPAIR_NOT_READY :
+ VMCI_ERROR_QUEUEPAIR_NOTATTACHED;
+ }
+
+ return VMCI_SUCCESS;
+}
+
+/*
+ * Helper routine that will retrieve the produce and consume
+ * headers of a given queue pair. If the guest memory of the
+ * queue pair is currently not available, the saved queue headers
+ * will be returned, if these are available.
+ */
+static int qp_get_queue_headers(const struct vmci_qp *qpair,
+ struct vmci_queue_header **produce_q_header,
+ struct vmci_queue_header **consume_q_header)
+{
+ int result;
+
+ result = qp_map_queue_headers(qpair->produce_q, qpair->consume_q,
+ vmci_can_block(qpair->flags));
+ if (result == VMCI_SUCCESS) {
+ *produce_q_header = qpair->produce_q->q_header;
+ *consume_q_header = qpair->consume_q->q_header;
+ } else if (qpair->produce_q->saved_header &&
+ qpair->consume_q->saved_header) {
+ *produce_q_header = qpair->produce_q->saved_header;
+ *consume_q_header = qpair->consume_q->saved_header;
+ result = VMCI_SUCCESS;
+ }
+
+ return result;
+}
+
+/*
+ * Callback from VMCI queue pair broker indicating that a queue
+ * pair that was previously not ready, now either is ready or
+ * gone forever.
+ */
+static int qp_wakeup_cb(void *client_data)
+{
+ struct vmci_qp *qpair = (struct vmci_qp *)client_data;
+
+ qp_lock(qpair);
+ while (qpair->blocked > 0) {
+ qpair->blocked--;
+ qpair->generation++;
+ wake_up(&qpair->event);
+ }
+ qp_unlock(qpair);
+
+ return VMCI_SUCCESS;
+}
+
+/*
+ * Makes the calling thread wait for the queue pair to become
+ * ready for host side access. Returns true when thread is
+ * woken up after queue pair state change, false otherwise.
+ */
+static bool qp_wait_for_ready_queue(struct vmci_qp *qpair)
+{
+ unsigned int generation;
+
+ if (qpair->flags & VMCI_QPFLAG_NONBLOCK)
+ return false;
+
+ qpair->blocked++;
+ generation = qpair->generation;
+ qp_unlock(qpair);
+ wait_event(qpair->event, generation != qpair->generation);
+ qp_lock(qpair);
+
+ return true;
+}
+
+/*
+ * Enqueues a given buffer to the produce queue using the provided
+ * function. As many bytes as possible (space available in the queue)
+ * are enqueued. Assumes the queue->mutex has been acquired. Returns
+ * VMCI_ERROR_QUEUEPAIR_NOSPACE if no space was available to enqueue
+ * data, VMCI_ERROR_INVALID_SIZE, if any queue pointer is outside the
+ * queue (as defined by the queue size), VMCI_ERROR_INVALID_ARGS, if
+ * an error occured when accessing the buffer,
+ * VMCI_ERROR_QUEUEPAIR_NOTATTACHED, if the queue pair pages aren't
+ * available. Otherwise, the number of bytes written to the queue is
+ * returned. Updates the tail pointer of the produce queue.
+ */
+static ssize_t qp_enqueue_locked(struct vmci_queue *produce_q,
+ struct vmci_queue *consume_q,
+ const u64 produce_q_size,
+ const void *buf,
+ size_t buf_size,
+ vmci_memcpy_to_queue_func memcpy_to_queue,
+ bool can_block)
+{
+ s64 free_space;
+ u64 tail;
+ size_t written;
+ ssize_t result;
+
+ result = qp_map_queue_headers(produce_q, consume_q, can_block);
+ if (unlikely(result != VMCI_SUCCESS))
+ return result;
+
+ free_space = vmci_q_header_free_space(produce_q->q_header,
+ consume_q->q_header,
+ produce_q_size);
+ if (free_space == 0)
+ return VMCI_ERROR_QUEUEPAIR_NOSPACE;
+
+ if (free_space < VMCI_SUCCESS)
+ return (ssize_t) free_space;
+
+ written = (size_t) (free_space > buf_size ? buf_size : free_space);
+ tail = vmci_q_header_producer_tail(produce_q->q_header);
+ if (likely(tail + written < produce_q_size)) {
+ result = memcpy_to_queue(produce_q, tail, buf, 0, written);
+ } else {
+ /* Tail pointer wraps around. */
+
+ const size_t tmp = (size_t) (produce_q_size - tail);
+
+ result = memcpy_to_queue(produce_q, tail, buf, 0, tmp);
+ if (result >= VMCI_SUCCESS)
+ result = memcpy_to_queue(produce_q, 0, buf, tmp,
+ written - tmp);
+ }
+
+ if (result < VMCI_SUCCESS)
+ return result;
+
+ vmci_q_header_add_producer_tail(produce_q->q_header, written,
+ produce_q_size);
+ return written;
+}
+
+/*
+ * Dequeues data (if available) from the given consume queue. Writes data
+ * to the user provided buffer using the provided function.
+ * Assumes the queue->mutex has been acquired.
+ * Results:
+ * VMCI_ERROR_QUEUEPAIR_NODATA if no data was available to dequeue.
+ * VMCI_ERROR_INVALID_SIZE, if any queue pointer is outside the queue
+ * (as defined by the queue size).
+ * VMCI_ERROR_INVALID_ARGS, if an error occured when accessing the buffer.
+ * Otherwise the number of bytes dequeued is returned.
+ * Side effects:
+ * Updates the head pointer of the consume queue.
+ */
+static ssize_t qp_dequeue_locked(struct vmci_queue *produce_q,
+ struct vmci_queue *consume_q,
+ const u64 consume_q_size,
+ void *buf,
+ size_t buf_size,
+ vmci_memcpy_from_queue_func memcpy_from_queue,
+ bool update_consumer,
+ bool can_block)
+{
+ s64 buf_ready;
+ u64 head;
+ size_t read;
+ ssize_t result;
+
+ result = qp_map_queue_headers(produce_q, consume_q, can_block);
+ if (unlikely(result != VMCI_SUCCESS))
+ return result;
+
+ buf_ready = vmci_q_header_buf_ready(consume_q->q_header,
+ produce_q->q_header,
+ consume_q_size);
+ if (buf_ready == 0)
+ return VMCI_ERROR_QUEUEPAIR_NODATA;
+
+ if (buf_ready < VMCI_SUCCESS)
+ return (ssize_t) buf_ready;
+
+ read = (size_t) (buf_ready > buf_size ? buf_size : buf_ready);
+ head = vmci_q_header_consumer_head(produce_q->q_header);
+ if (likely(head + read < consume_q_size)) {
+ result = memcpy_from_queue(buf, 0, consume_q, head, read);
+ } else {
+ /* Head pointer wraps around. */
+
+ const size_t tmp = (size_t) (consume_q_size - head);
+
+ result = memcpy_from_queue(buf, 0, consume_q, head, tmp);
+ if (result >= VMCI_SUCCESS)
+ result = memcpy_from_queue(buf, tmp, consume_q, 0,
+ read - tmp);
+
+ }
+
+ if (result < VMCI_SUCCESS)
+ return result;
+
+ if (update_consumer)
+ vmci_q_header_add_consumer_head(produce_q->q_header,
+ read, consume_q_size);
+
+ return read;
+}
+
+/*
+ * vmci_qpair_alloc() - Allocates a queue pair.
+ * @qpair: Pointer for the new vmci_qp struct.
+ * @handle: Handle to track the resource.
+ * @produce_qsize: Desired size of the producer queue.
+ * @consume_qsize: Desired size of the consumer queue.
+ * @peer: ContextID of the peer.
+ * @flags: VMCI flags.
+ * @priv_flags: VMCI priviledge flags.
+ *
+ * This is the client interface for allocating the memory for a
+ * vmci_qp structure and then attaching to the underlying
+ * queue. If an error occurs allocating the memory for the
+ * vmci_qp structure no attempt is made to attach. If an
+ * error occurs attaching, then the structure is freed.
+ */
+int vmci_qpair_alloc(struct vmci_qp **qpair,
+ struct vmci_handle *handle,
+ u64 produce_qsize,
+ u64 consume_qsize,
+ u32 peer,
+ u32 flags,
+ u32 priv_flags)
+{
+ struct vmci_qp *my_qpair;
+ int retval;
+ struct vmci_handle src = VMCI_INVALID_HANDLE;
+ struct vmci_handle dst = vmci_make_handle(peer, VMCI_INVALID_ID);
+ enum vmci_route route;
+ vmci_event_release_cb wakeup_cb;
+ void *client_data;
+
+ /*
+ * Restrict the size of a queuepair. The device already
+ * enforces a limit on the total amount of memory that can be
+ * allocated to queuepairs for a guest. However, we try to
+ * allocate this memory before we make the queuepair
+ * allocation hypercall. On Linux, we allocate each page
+ * separately, which means rather than fail, the guest will
+ * thrash while it tries to allocate, and will become
+ * increasingly unresponsive to the point where it appears to
+ * be hung. So we place a limit on the size of an individual
+ * queuepair here, and leave the device to enforce the
+ * restriction on total queuepair memory. (Note that this
+ * doesn't prevent all cases; a user with only this much
+ * physical memory could still get into trouble.) The error
+ * used by the device is NO_RESOURCES, so use that here too.
+ */
+
+ if (produce_qsize + consume_qsize < max(produce_qsize, consume_qsize) ||
+ produce_qsize + consume_qsize > VMCI_MAX_GUEST_QP_MEMORY)
+ return VMCI_ERROR_NO_RESOURCES;
+
+ retval = vmci_route(&src, &dst, false, &route);
+ if (retval < VMCI_SUCCESS)
+ route = vmci_guest_code_active() ?
+ VMCI_ROUTE_AS_GUEST : VMCI_ROUTE_AS_HOST;
+
+ /* If NONBLOCK or PINNED is set, we better be the guest personality. */
+ if ((!vmci_can_block(flags) || vmci_qp_pinned(flags)) &&
+ VMCI_ROUTE_AS_GUEST != route) {
+ pr_devel("Not guest personality w/ NONBLOCK OR PINNED set");
+ return VMCI_ERROR_INVALID_ARGS;
+ }
+
+ /*
+ * Limit the size of pinned QPs and check sanity.
+ *
+ * Pinned pages implies non-blocking mode. Mutexes aren't acquired
+ * when the NONBLOCK flag is set in qpair code; and also should not be
+ * acquired when the PINNED flagged is set. Since pinning pages
+ * implies we want speed, it makes no sense not to have NONBLOCK
+ * set if PINNED is set. Hence enforce this implication.
+ */
+ if (vmci_qp_pinned(flags)) {
+ if (vmci_can_block(flags)) {
+ pr_err("Attempted to enable pinning w/o non-blocking");
+ return VMCI_ERROR_INVALID_ARGS;
+ }
+
+ if (produce_qsize + consume_qsize > VMCI_MAX_PINNED_QP_MEMORY)
+ return VMCI_ERROR_NO_RESOURCES;
+ }
+
+ my_qpair = kzalloc(sizeof(*my_qpair), GFP_KERNEL);
+ if (!my_qpair)
+ return VMCI_ERROR_NO_MEM;
+
+ my_qpair->produce_q_size = produce_qsize;
+ my_qpair->consume_q_size = consume_qsize;
+ my_qpair->peer = peer;
+ my_qpair->flags = flags;
+ my_qpair->priv_flags = priv_flags;
+
+ wakeup_cb = NULL;
+ client_data = NULL;
+
+ if (VMCI_ROUTE_AS_HOST == route) {
+ my_qpair->guest_endpoint = false;
+ if (!(flags & VMCI_QPFLAG_LOCAL)) {
+ my_qpair->blocked = 0;
+ my_qpair->generation = 0;
+ init_waitqueue_head(&my_qpair->event);
+ wakeup_cb = qp_wakeup_cb;
+ client_data = (void *)my_qpair;
+ }
+ } else {
+ my_qpair->guest_endpoint = true;
+ }
+
+ retval = vmci_qp_alloc(handle,
+ &my_qpair->produce_q,
+ my_qpair->produce_q_size,
+ &my_qpair->consume_q,
+ my_qpair->consume_q_size,
+ my_qpair->peer,
+ my_qpair->flags,
+ my_qpair->priv_flags,
+ my_qpair->guest_endpoint,
+ wakeup_cb, client_data);
+
+ if (retval < VMCI_SUCCESS) {
+ kfree(my_qpair);
+ return retval;
+ }
+
+ *qpair = my_qpair;
+ my_qpair->handle = *handle;
+
+ return retval;
+}
+EXPORT_SYMBOL_GPL(vmci_qpair_alloc);
+
+/*
+ * vmci_qpair_detach() - Detatches the client from a queue pair.
+ * @qpair: Reference of a pointer to the qpair struct.
+ *
+ * This is the client interface for detaching from a VMCIQPair.
+ * Note that this routine will free the memory allocated for the
+ * vmci_qp structure too.
+ */
+int vmci_qpair_detach(struct vmci_qp **qpair)
+{
+ int result;
+ struct vmci_qp *old_qpair;
+
+ if (!qpair || !(*qpair))
+ return VMCI_ERROR_INVALID_ARGS;
+
+ old_qpair = *qpair;
+ result = qp_detatch(old_qpair->handle, old_qpair->guest_endpoint);
+
+ /*
+ * The guest can fail to detach for a number of reasons, and
+ * if it does so, it will cleanup the entry (if there is one).
+ * The host can fail too, but it won't cleanup the entry
+ * immediately, it will do that later when the context is
+ * freed. Either way, we need to release the qpair struct
+ * here; there isn't much the caller can do, and we don't want
+ * to leak.
+ */
+
+ memset(old_qpair, 0, sizeof(*old_qpair));
+ old_qpair->handle = VMCI_INVALID_HANDLE;
+ old_qpair->peer = VMCI_INVALID_ID;
+ kfree(old_qpair);
+ *qpair = NULL;
+
+ return result;
+}
+EXPORT_SYMBOL_GPL(vmci_qpair_detach);
+
+/*
+ * vmci_qpair_get_produce_indexes() - Retrieves the indexes of the producer.
+ * @qpair: Pointer to the queue pair struct.
+ * @producer_tail: Reference used for storing producer tail index.
+ * @consumer_head: Reference used for storing the consumer head index.
+ *
+ * This is the client interface for getting the current indexes of the
+ * QPair from the point of the view of the caller as the producer.
+ */
+int vmci_qpair_get_produce_indexes(const struct vmci_qp *qpair,
+ u64 *producer_tail,
+ u64 *consumer_head)
+{
+ struct vmci_queue_header *produce_q_header;
+ struct vmci_queue_header *consume_q_header;
+ int result;
+
+ if (!qpair)
+ return VMCI_ERROR_INVALID_ARGS;
+
+ qp_lock(qpair);
+ result =
+ qp_get_queue_headers(qpair, &produce_q_header, &consume_q_header);
+ if (result == VMCI_SUCCESS)
+ vmci_q_header_get_pointers(produce_q_header, consume_q_header,
+ producer_tail, consumer_head);
+ qp_unlock(qpair);
+
+ if (result == VMCI_SUCCESS &&
+ ((producer_tail && *producer_tail >= qpair->produce_q_size) ||
+ (consumer_head && *consumer_head >= qpair->produce_q_size)))
+ return VMCI_ERROR_INVALID_SIZE;
+
+ return result;
+}
+EXPORT_SYMBOL_GPL(vmci_qpair_get_produce_indexes);
+
+/*
+ * vmci_qpair_get_consume_indexes() - Retrieves the indexes of the comsumer.
+ * @qpair: Pointer to the queue pair struct.
+ * @consumer_tail: Reference used for storing consumer tail index.
+ * @producer_head: Reference used for storing the producer head index.
+ *
+ * This is the client interface for getting the current indexes of the
+ * QPair from the point of the view of the caller as the consumer.
+ */
+int vmci_qpair_get_consume_indexes(const struct vmci_qp *qpair,
+ u64 *consumer_tail,
+ u64 *producer_head)
+{
+ struct vmci_queue_header *produce_q_header;
+ struct vmci_queue_header *consume_q_header;
+ int result;
+
+ if (!qpair)
+ return VMCI_ERROR_INVALID_ARGS;
+
+ qp_lock(qpair);
+ result =
+ qp_get_queue_headers(qpair, &produce_q_header, &consume_q_header);
+ if (result == VMCI_SUCCESS)
+ vmci_q_header_get_pointers(consume_q_header, produce_q_header,
+ consumer_tail, producer_head);
+ qp_unlock(qpair);
+
+ if (result == VMCI_SUCCESS &&
+ ((consumer_tail && *consumer_tail >= qpair->consume_q_size) ||
+ (producer_head && *producer_head >= qpair->consume_q_size)))
+ return VMCI_ERROR_INVALID_SIZE;
+
+ return result;
+}
+EXPORT_SYMBOL_GPL(vmci_qpair_get_consume_indexes);
+
+/*
+ * vmci_qpair_produce_free_space() - Retrieves free space in producer queue.
+ * @qpair: Pointer to the queue pair struct.
+ *
+ * This is the client interface for getting the amount of free
+ * space in the QPair from the point of the view of the caller as
+ * the producer which is the common case. Returns < 0 if err, else
+ * available bytes into which data can be enqueued if > 0.
+ */
+s64 vmci_qpair_produce_free_space(const struct vmci_qp *qpair)
+{
+ struct vmci_queue_header *produce_q_header;
+ struct vmci_queue_header *consume_q_header;
+ s64 result;
+
+ if (!qpair)
+ return VMCI_ERROR_INVALID_ARGS;
+
+ qp_lock(qpair);
+ result =
+ qp_get_queue_headers(qpair, &produce_q_header, &consume_q_header);
+ if (result == VMCI_SUCCESS)
+ result = vmci_q_header_free_space(produce_q_header,
+ consume_q_header,
+ qpair->produce_q_size);
+ else
+ result = 0;
+
+ qp_unlock(qpair);
+
+ return result;
+}
+EXPORT_SYMBOL_GPL(vmci_qpair_produce_free_space);
+
+/*
+ * vmci_qpair_consume_free_space() - Retrieves free space in consumer queue.
+ * @qpair: Pointer to the queue pair struct.
+ *
+ * This is the client interface for getting the amount of free
+ * space in the QPair from the point of the view of the caller as
+ * the consumer which is not the common case. Returns < 0 if err, else
+ * available bytes into which data can be enqueued if > 0.
+ */
+s64 vmci_qpair_consume_free_space(const struct vmci_qp *qpair)
+{
+ struct vmci_queue_header *produce_q_header;
+ struct vmci_queue_header *consume_q_header;
+ s64 result;
+
+ if (!qpair)
+ return VMCI_ERROR_INVALID_ARGS;
+
+ qp_lock(qpair);
+ result =
+ qp_get_queue_headers(qpair, &produce_q_header, &consume_q_header);
+ if (result == VMCI_SUCCESS)
+ result = vmci_q_header_free_space(consume_q_header,
+ produce_q_header,
+ qpair->consume_q_size);
+ else
+ result = 0;
+
+ qp_unlock(qpair);
+
+ return result;
+}
+EXPORT_SYMBOL_GPL(vmci_qpair_consume_free_space);
+
+/*
+ * vmci_qpair_produce_buf_ready() - Gets bytes ready to read from
+ * producer queue.
+ * @qpair: Pointer to the queue pair struct.
+ *
+ * This is the client interface for getting the amount of
+ * enqueued data in the QPair from the point of the view of the
+ * caller as the producer which is not the common case. Returns < 0 if err,
+ * else available bytes that may be read.
+ */
+s64 vmci_qpair_produce_buf_ready(const struct vmci_qp *qpair)
+{
+ struct vmci_queue_header *produce_q_header;
+ struct vmci_queue_header *consume_q_header;
+ s64 result;
+
+ if (!qpair)
+ return VMCI_ERROR_INVALID_ARGS;
+
+ qp_lock(qpair);
+ result =
+ qp_get_queue_headers(qpair, &produce_q_header, &consume_q_header);
+ if (result == VMCI_SUCCESS)
+ result = vmci_q_header_buf_ready(produce_q_header,
+ consume_q_header,
+ qpair->produce_q_size);
+ else
+ result = 0;
+
+ qp_unlock(qpair);
+
+ return result;
+}
+EXPORT_SYMBOL_GPL(vmci_qpair_produce_buf_ready);
+
+/*
+ * vmci_qpair_consume_buf_ready() - Gets bytes ready to read from
+ * consumer queue.
+ * @qpair: Pointer to the queue pair struct.
+ *
+ * This is the client interface for getting the amount of
+ * enqueued data in the QPair from the point of the view of the
+ * caller as the consumer which is the normal case. Returns < 0 if err,
+ * else available bytes that may be read.
+ */
+s64 vmci_qpair_consume_buf_ready(const struct vmci_qp *qpair)
+{
+ struct vmci_queue_header *produce_q_header;
+ struct vmci_queue_header *consume_q_header;
+ s64 result;
+
+ if (!qpair)
+ return VMCI_ERROR_INVALID_ARGS;
+
+ qp_lock(qpair);
+ result =
+ qp_get_queue_headers(qpair, &produce_q_header, &consume_q_header);
+ if (result == VMCI_SUCCESS)
+ result = vmci_q_header_buf_ready(consume_q_header,
+ produce_q_header,
+ qpair->consume_q_size);
+ else
+ result = 0;
+
+ qp_unlock(qpair);
+
+ return result;
+}
+EXPORT_SYMBOL_GPL(vmci_qpair_consume_buf_ready);
+
+/*
+ * vmci_qpair_enqueue() - Throw data on the queue.
+ * @qpair: Pointer to the queue pair struct.
+ * @buf: Pointer to buffer containing data
+ * @buf_size: Length of buffer.
+ * @buf_type: Buffer type (Unused).
+ *
+ * This is the client interface for enqueueing data into the queue.
+ * Returns number of bytes enqueued or < 0 on error.
+ */
+ssize_t vmci_qpair_enqueue(struct vmci_qp *qpair,
+ const void *buf,
+ size_t buf_size,
+ int buf_type)
+{
+ ssize_t result;
+
+ if (!qpair || !buf)
+ return VMCI_ERROR_INVALID_ARGS;
+
+ qp_lock(qpair);
+
+ do {
+ result = qp_enqueue_locked(qpair->produce_q,
+ qpair->consume_q,
+ qpair->produce_q_size,
+ buf, buf_size,
+ qp_memcpy_to_queue,
+ vmci_can_block(qpair->flags));
+
+ if (result == VMCI_ERROR_QUEUEPAIR_NOT_READY &&
+ !qp_wait_for_ready_queue(qpair))
+ result = VMCI_ERROR_WOULD_BLOCK;
+
+ } while (result == VMCI_ERROR_QUEUEPAIR_NOT_READY);
+
+ qp_unlock(qpair);
+
+ return result;
+}
+EXPORT_SYMBOL_GPL(vmci_qpair_enqueue);
+
+/*
+ * vmci_qpair_dequeue() - Get data from the queue.
+ * @qpair: Pointer to the queue pair struct.
+ * @buf: Pointer to buffer for the data
+ * @buf_size: Length of buffer.
+ * @buf_type: Buffer type (Unused).
+ *
+ * This is the client interface for dequeueing data from the queue.
+ * Returns number of bytes dequeued or < 0 on error.
+ */
+ssize_t vmci_qpair_dequeue(struct vmci_qp *qpair,
+ void *buf,
+ size_t buf_size,
+ int buf_type)
+{
+ ssize_t result;
+
+ if (!qpair || !buf)
+ return VMCI_ERROR_INVALID_ARGS;
+
+ qp_lock(qpair);
+
+ do {
+ result = qp_dequeue_locked(qpair->produce_q,
+ qpair->consume_q,
+ qpair->consume_q_size,
+ buf, buf_size,
+ qp_memcpy_from_queue, true,
+ vmci_can_block(qpair->flags));
+
+ if (result == VMCI_ERROR_QUEUEPAIR_NOT_READY &&
+ !qp_wait_for_ready_queue(qpair))
+ result = VMCI_ERROR_WOULD_BLOCK;
+
+ } while (result == VMCI_ERROR_QUEUEPAIR_NOT_READY);
+
+ qp_unlock(qpair);
+
+ return result;
+}
+EXPORT_SYMBOL_GPL(vmci_qpair_dequeue);
+
+/*
+ * vmci_qpair_peek() - Peek at the data in the queue.
+ * @qpair: Pointer to the queue pair struct.
+ * @buf: Pointer to buffer for the data
+ * @buf_size: Length of buffer.
+ * @buf_type: Buffer type (Unused on Linux).
+ *
+ * This is the client interface for peeking into a queue. (I.e.,
+ * copy data from the queue without updating the head pointer.)
+ * Returns number of bytes dequeued or < 0 on error.
+ */
+ssize_t vmci_qpair_peek(struct vmci_qp *qpair,
+ void *buf,
+ size_t buf_size,
+ int buf_type)
+{
+ ssize_t result;
+
+ if (!qpair || !buf)
+ return VMCI_ERROR_INVALID_ARGS;
+
+ qp_lock(qpair);
+
+ do {
+ result = qp_dequeue_locked(qpair->produce_q,
+ qpair->consume_q,
+ qpair->consume_q_size,
+ buf, buf_size,
+ qp_memcpy_from_queue, false,
+ vmci_can_block(qpair->flags));
+
+ if (result == VMCI_ERROR_QUEUEPAIR_NOT_READY &&
+ !qp_wait_for_ready_queue(qpair))
+ result = VMCI_ERROR_WOULD_BLOCK;
+
+ } while (result == VMCI_ERROR_QUEUEPAIR_NOT_READY);
+
+ qp_unlock(qpair);
+
+ return result;
+}
+EXPORT_SYMBOL_GPL(vmci_qpair_peek);
+
+/*
+ * vmci_qpair_enquev() - Throw data on the queue using iov.
+ * @qpair: Pointer to the queue pair struct.
+ * @iov: Pointer to buffer containing data
+ * @iov_size: Length of buffer.
+ * @buf_type: Buffer type (Unused).
+ *
+ * This is the client interface for enqueueing data into the queue.
+ * This function uses IO vectors to handle the work. Returns number
+ * of bytes enqueued or < 0 on error.
+ */
+ssize_t vmci_qpair_enquev(struct vmci_qp *qpair,
+ void *iov,
+ size_t iov_size,
+ int buf_type)
+{
+ ssize_t result;
+
+ if (!qpair || !iov)
+ return VMCI_ERROR_INVALID_ARGS;
+
+ qp_lock(qpair);
+
+ do {
+ result = qp_enqueue_locked(qpair->produce_q,
+ qpair->consume_q,
+ qpair->produce_q_size,
+ iov, iov_size,
+ qp_memcpy_to_queue_iov,
+ vmci_can_block(qpair->flags));
+
+ if (result == VMCI_ERROR_QUEUEPAIR_NOT_READY &&
+ !qp_wait_for_ready_queue(qpair))
+ result = VMCI_ERROR_WOULD_BLOCK;
+
+ } while (result == VMCI_ERROR_QUEUEPAIR_NOT_READY);
+
+ qp_unlock(qpair);
+
+ return result;
+}
+EXPORT_SYMBOL_GPL(vmci_qpair_enquev);
+
+/*
+ * vmci_qpair_dequev() - Get data from the queue using iov.
+ * @qpair: Pointer to the queue pair struct.
+ * @iov: Pointer to buffer for the data
+ * @iov_size: Length of buffer.
+ * @buf_type: Buffer type (Unused).
+ *
+ * This is the client interface for dequeueing data from the queue.
+ * This function uses IO vectors to handle the work. Returns number
+ * of bytes dequeued or < 0 on error.
+ */
+ssize_t vmci_qpair_dequev(struct vmci_qp *qpair,
+ void *iov,
+ size_t iov_size,
+ int buf_type)
+{
+ ssize_t result;
+
+ if (!qpair || !iov)
+ return VMCI_ERROR_INVALID_ARGS;
+
+ qp_lock(qpair);
+
+ do {
+ result = qp_dequeue_locked(qpair->produce_q,
+ qpair->consume_q,
+ qpair->consume_q_size,
+ iov, iov_size,
+ qp_memcpy_from_queue_iov,
+ true, vmci_can_block(qpair->flags));
+
+ if (result == VMCI_ERROR_QUEUEPAIR_NOT_READY &&
+ !qp_wait_for_ready_queue(qpair))
+ result = VMCI_ERROR_WOULD_BLOCK;
+
+ } while (result == VMCI_ERROR_QUEUEPAIR_NOT_READY);
+
+ qp_unlock(qpair);
+
+ return result;
+}
+EXPORT_SYMBOL_GPL(vmci_qpair_dequev);
+
+/*
+ * vmci_qpair_peekv() - Peek at the data in the queue using iov.
+ * @qpair: Pointer to the queue pair struct.
+ * @iov: Pointer to buffer for the data
+ * @iov_size: Length of buffer.
+ * @buf_type: Buffer type (Unused on Linux).
+ *
+ * This is the client interface for peeking into a queue. (I.e.,
+ * copy data from the queue without updating the head pointer.)
+ * This function uses IO vectors to handle the work. Returns number
+ * of bytes peeked or < 0 on error.
+ */
+ssize_t vmci_qpair_peekv(struct vmci_qp *qpair,
+ void *iov,
+ size_t iov_size,
+ int buf_type)
+{
+ ssize_t result;
+
+ if (!qpair || !iov)
+ return VMCI_ERROR_INVALID_ARGS;
+
+ qp_lock(qpair);
+
+ do {
+ result = qp_dequeue_locked(qpair->produce_q,
+ qpair->consume_q,
+ qpair->consume_q_size,
+ iov, iov_size,
+ qp_memcpy_from_queue_iov,
+ false, vmci_can_block(qpair->flags));
+
+ if (result == VMCI_ERROR_QUEUEPAIR_NOT_READY &&
+ !qp_wait_for_ready_queue(qpair))
+ result = VMCI_ERROR_WOULD_BLOCK;
+
+ } while (result == VMCI_ERROR_QUEUEPAIR_NOT_READY);
+
+ qp_unlock(qpair);
+ return result;
+}
+EXPORT_SYMBOL_GPL(vmci_qpair_peekv);
diff --git a/drivers/misc/vmw_vmci/vmci_queue_pair.h b/drivers/misc/vmw_vmci/vmci_queue_pair.h
new file mode 100644
index 000000000000..58c6959f6b6d
--- /dev/null
+++ b/drivers/misc/vmw_vmci/vmci_queue_pair.h
@@ -0,0 +1,191 @@
+/*
+ * VMware VMCI Driver
+ *
+ * Copyright (C) 2012 VMware, Inc. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation version 2 and no later version.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
+ * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * for more details.
+ */
+
+#ifndef _VMCI_QUEUE_PAIR_H_
+#define _VMCI_QUEUE_PAIR_H_
+
+#include <linux/vmw_vmci_defs.h>
+#include <linux/types.h>
+
+#include "vmci_context.h"
+
+/* Callback needed for correctly waiting on events. */
+typedef int (*vmci_event_release_cb) (void *client_data);
+
+/* Guest device port I/O. */
+struct ppn_set {
+ u64 num_produce_pages;
+ u64 num_consume_pages;
+ u32 *produce_ppns;
+ u32 *consume_ppns;
+ bool initialized;
+};
+
+/* VMCIqueue_pairAllocInfo */
+struct vmci_qp_alloc_info {
+ struct vmci_handle handle;
+ u32 peer;
+ u32 flags;
+ u64 produce_size;
+ u64 consume_size;
+ u64 ppn_va; /* Start VA of queue pair PPNs. */
+ u64 num_ppns;
+ s32 result;
+ u32 version;
+};
+
+/* VMCIqueue_pairSetVAInfo */
+struct vmci_qp_set_va_info {
+ struct vmci_handle handle;
+ u64 va; /* Start VA of queue pair PPNs. */
+ u64 num_ppns;
+ u32 version;
+ s32 result;
+};
+
+/*
+ * For backwards compatibility, here is a version of the
+ * VMCIqueue_pairPageFileInfo before host support end-points was added.
+ * Note that the current version of that structure requires VMX to
+ * pass down the VA of the mapped file. Before host support was added
+ * there was nothing of the sort. So, when the driver sees the ioctl
+ * with a parameter that is the sizeof
+ * VMCIqueue_pairPageFileInfo_NoHostQP then it can infer that the version
+ * of VMX running can't attach to host end points because it doesn't
+ * provide the VA of the mapped files.
+ *
+ * The Linux driver doesn't get an indication of the size of the
+ * structure passed down from user space. So, to fix a long standing
+ * but unfiled bug, the _pad field has been renamed to version.
+ * Existing versions of VMX always initialize the PageFileInfo
+ * structure so that _pad, er, version is set to 0.
+ *
+ * A version value of 1 indicates that the size of the structure has
+ * been increased to include two UVA's: produce_uva and consume_uva.
+ * These UVA's are of the mmap()'d queue contents backing files.
+ *
+ * In addition, if when VMX is sending down the
+ * VMCIqueue_pairPageFileInfo structure it gets an error then it will
+ * try again with the _NoHostQP version of the file to see if an older
+ * VMCI kernel module is running.
+ */
+
+/* VMCIqueue_pairPageFileInfo */
+struct vmci_qp_page_file_info {
+ struct vmci_handle handle;
+ u64 produce_page_file; /* User VA. */
+ u64 consume_page_file; /* User VA. */
+ u64 produce_page_file_size; /* Size of the file name array. */
+ u64 consume_page_file_size; /* Size of the file name array. */
+ s32 result;
+ u32 version; /* Was _pad. */
+ u64 produce_va; /* User VA of the mapped file. */
+ u64 consume_va; /* User VA of the mapped file. */
+};
+
+/* vmci queuepair detach info */
+struct vmci_qp_dtch_info {
+ struct vmci_handle handle;
+ s32 result;
+ u32 _pad;
+};
+
+/*
+ * struct vmci_qp_page_store describes how the memory of a given queue pair
+ * is backed. When the queue pair is between the host and a guest, the
+ * page store consists of references to the guest pages. On vmkernel,
+ * this is a list of PPNs, and on hosted, it is a user VA where the
+ * queue pair is mapped into the VMX address space.
+ */
+struct vmci_qp_page_store {
+ /* Reference to pages backing the queue pair. */
+ u64 pages;
+ /* Length of pageList/virtual addres range (in pages). */
+ u32 len;
+};
+
+/*
+ * This data type contains the information about a queue.
+ * There are two queues (hence, queue pairs) per transaction model between a
+ * pair of end points, A & B. One queue is used by end point A to transmit
+ * commands and responses to B. The other queue is used by B to transmit
+ * commands and responses.
+ *
+ * struct vmci_queue_kern_if is a per-OS defined Queue structure. It contains
+ * either a direct pointer to the linear address of the buffer contents or a
+ * pointer to structures which help the OS locate those data pages. See
+ * vmciKernelIf.c for each platform for its definition.
+ */
+struct vmci_queue {
+ struct vmci_queue_header *q_header;
+ struct vmci_queue_header *saved_header;
+ struct vmci_queue_kern_if *kernel_if;
+};
+
+/*
+ * Utility function that checks whether the fields of the page
+ * store contain valid values.
+ * Result:
+ * true if the page store is wellformed. false otherwise.
+ */
+static inline bool
+VMCI_QP_PAGESTORE_IS_WELLFORMED(struct vmci_qp_page_store *page_store)
+{
+ return page_store->len >= 2;
+}
+
+/*
+ * Helper function to check if the non-blocking flag
+ * is set for a given queue pair.
+ */
+static inline bool vmci_can_block(u32 flags)
+{
+ return !(flags & VMCI_QPFLAG_NONBLOCK);
+}
+
+/*
+ * Helper function to check if the queue pair is pinned
+ * into memory.
+ */
+static inline bool vmci_qp_pinned(u32 flags)
+{
+ return flags & VMCI_QPFLAG_PINNED;
+}
+
+void vmci_qp_broker_exit(void);
+int vmci_qp_broker_alloc(struct vmci_handle handle, u32 peer,
+ u32 flags, u32 priv_flags,
+ u64 produce_size, u64 consume_size,
+ struct vmci_qp_page_store *page_store,
+ struct vmci_ctx *context);
+int vmci_qp_broker_set_page_store(struct vmci_handle handle,
+ u64 produce_uva, u64 consume_uva,
+ struct vmci_ctx *context);
+int vmci_qp_broker_detach(struct vmci_handle handle, struct vmci_ctx *context);
+
+void vmci_qp_guest_endpoints_exit(void);
+
+int vmci_qp_alloc(struct vmci_handle *handle,
+ struct vmci_queue **produce_q, u64 produce_size,
+ struct vmci_queue **consume_q, u64 consume_size,
+ u32 peer, u32 flags, u32 priv_flags,
+ bool guest_endpoint, vmci_event_release_cb wakeup_cb,
+ void *client_data);
+int vmci_qp_broker_map(struct vmci_handle handle,
+ struct vmci_ctx *context, u64 guest_mem);
+int vmci_qp_broker_unmap(struct vmci_handle handle,
+ struct vmci_ctx *context, u32 gid);
+
+#endif /* _VMCI_QUEUE_PAIR_H_ */
diff --git a/drivers/misc/vmw_vmci/vmci_resource.c b/drivers/misc/vmw_vmci/vmci_resource.c
new file mode 100644
index 000000000000..9a53a30de445
--- /dev/null
+++ b/drivers/misc/vmw_vmci/vmci_resource.c
@@ -0,0 +1,227 @@
+/*
+ * VMware VMCI Driver
+ *
+ * Copyright (C) 2012 VMware, Inc. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation version 2 and no later version.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
+ * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * for more details.
+ */
+
+#include <linux/vmw_vmci_defs.h>
+#include <linux/hash.h>
+#include <linux/types.h>
+#include <linux/rculist.h>
+
+#include "vmci_resource.h"
+#include "vmci_driver.h"
+
+
+#define VMCI_RESOURCE_HASH_BITS 7
+#define VMCI_RESOURCE_HASH_BUCKETS (1 << VMCI_RESOURCE_HASH_BITS)
+
+struct vmci_hash_table {
+ spinlock_t lock;
+ struct hlist_head entries[VMCI_RESOURCE_HASH_BUCKETS];
+};
+
+static struct vmci_hash_table vmci_resource_table = {
+ .lock = __SPIN_LOCK_UNLOCKED(vmci_resource_table.lock),
+};
+
+static unsigned int vmci_resource_hash(struct vmci_handle handle)
+{
+ return hash_32(handle.resource, VMCI_RESOURCE_HASH_BITS);
+}
+
+/*
+ * Gets a resource (if one exists) matching given handle from the hash table.
+ */
+static struct vmci_resource *vmci_resource_lookup(struct vmci_handle handle,
+ enum vmci_resource_type type)
+{
+ struct vmci_resource *r, *resource = NULL;
+ unsigned int idx = vmci_resource_hash(handle);
+
+ rcu_read_lock();
+ hlist_for_each_entry_rcu(r,
+ &vmci_resource_table.entries[idx], node) {
+ u32 cid = r->handle.context;
+ u32 rid = r->handle.resource;
+
+ if (r->type == type &&
+ rid == handle.resource &&
+ (cid == handle.context || cid == VMCI_INVALID_ID)) {
+ resource = r;
+ break;
+ }
+ }
+ rcu_read_unlock();
+
+ return resource;
+}
+
+/*
+ * Find an unused resource ID and return it. The first
+ * VMCI_RESERVED_RESOURCE_ID_MAX are reserved so we start from
+ * its value + 1.
+ * Returns VMCI resource id on success, VMCI_INVALID_ID on failure.
+ */
+static u32 vmci_resource_find_id(u32 context_id,
+ enum vmci_resource_type resource_type)
+{
+ static u32 resource_id = VMCI_RESERVED_RESOURCE_ID_MAX + 1;
+ u32 old_rid = resource_id;
+ u32 current_rid;
+
+ /*
+ * Generate a unique resource ID. Keep on trying until we wrap around
+ * in the RID space.
+ */
+ do {
+ struct vmci_handle handle;
+
+ current_rid = resource_id;
+ resource_id++;
+ if (unlikely(resource_id == VMCI_INVALID_ID)) {
+ /* Skip the reserved rids. */
+ resource_id = VMCI_RESERVED_RESOURCE_ID_MAX + 1;
+ }
+
+ handle = vmci_make_handle(context_id, current_rid);
+ if (!vmci_resource_lookup(handle, resource_type))
+ return current_rid;
+ } while (resource_id != old_rid);
+
+ return VMCI_INVALID_ID;
+}
+
+
+int vmci_resource_add(struct vmci_resource *resource,
+ enum vmci_resource_type resource_type,
+ struct vmci_handle handle)
+
+{
+ unsigned int idx;
+ int result;
+
+ spin_lock(&vmci_resource_table.lock);
+
+ if (handle.resource == VMCI_INVALID_ID) {
+ handle.resource = vmci_resource_find_id(handle.context,
+ resource_type);
+ if (handle.resource == VMCI_INVALID_ID) {
+ result = VMCI_ERROR_NO_HANDLE;
+ goto out;
+ }
+ } else if (vmci_resource_lookup(handle, resource_type)) {
+ result = VMCI_ERROR_ALREADY_EXISTS;
+ goto out;
+ }
+
+ resource->handle = handle;
+ resource->type = resource_type;
+ INIT_HLIST_NODE(&resource->node);
+ kref_init(&resource->kref);
+ init_completion(&resource->done);
+
+ idx = vmci_resource_hash(resource->handle);
+ hlist_add_head_rcu(&resource->node, &vmci_resource_table.entries[idx]);
+
+ result = VMCI_SUCCESS;
+
+out:
+ spin_unlock(&vmci_resource_table.lock);
+ return result;
+}
+
+void vmci_resource_remove(struct vmci_resource *resource)
+{
+ struct vmci_handle handle = resource->handle;
+ unsigned int idx = vmci_resource_hash(handle);
+ struct vmci_resource *r;
+
+ /* Remove resource from hash table. */
+ spin_lock(&vmci_resource_table.lock);
+
+ hlist_for_each_entry(r, &vmci_resource_table.entries[idx], node) {
+ if (vmci_handle_is_equal(r->handle, resource->handle)) {
+ hlist_del_init_rcu(&r->node);
+ break;
+ }
+ }
+
+ spin_unlock(&vmci_resource_table.lock);
+ synchronize_rcu();
+
+ vmci_resource_put(resource);
+ wait_for_completion(&resource->done);
+}
+
+struct vmci_resource *
+vmci_resource_by_handle(struct vmci_handle resource_handle,
+ enum vmci_resource_type resource_type)
+{
+ struct vmci_resource *r, *resource = NULL;
+
+ rcu_read_lock();
+
+ r = vmci_resource_lookup(resource_handle, resource_type);
+ if (r &&
+ (resource_type == r->type ||
+ resource_type == VMCI_RESOURCE_TYPE_ANY)) {
+ resource = vmci_resource_get(r);
+ }
+
+ rcu_read_unlock();
+
+ return resource;
+}
+
+/*
+ * Get a reference to given resource.
+ */
+struct vmci_resource *vmci_resource_get(struct vmci_resource *resource)
+{
+ kref_get(&resource->kref);
+
+ return resource;
+}
+
+static void vmci_release_resource(struct kref *kref)
+{
+ struct vmci_resource *resource =
+ container_of(kref, struct vmci_resource, kref);
+
+ /* Verify the resource has been unlinked from hash table */
+ WARN_ON(!hlist_unhashed(&resource->node));
+
+ /* Signal that container of this resource can now be destroyed */
+ complete(&resource->done);
+}
+
+/*
+ * Resource's release function will get called if last reference.
+ * If it is the last reference, then we are sure that nobody else
+ * can increment the count again (it's gone from the resource hash
+ * table), so there's no need for locking here.
+ */
+int vmci_resource_put(struct vmci_resource *resource)
+{
+ /*
+ * We propagate the information back to caller in case it wants to know
+ * whether entry was freed.
+ */
+ return kref_put(&resource->kref, vmci_release_resource) ?
+ VMCI_SUCCESS_ENTRY_DEAD : VMCI_SUCCESS;
+}
+
+struct vmci_handle vmci_resource_handle(struct vmci_resource *resource)
+{
+ return resource->handle;
+}
diff --git a/drivers/misc/vmw_vmci/vmci_resource.h b/drivers/misc/vmw_vmci/vmci_resource.h
new file mode 100644
index 000000000000..9190cd298bee
--- /dev/null
+++ b/drivers/misc/vmw_vmci/vmci_resource.h
@@ -0,0 +1,59 @@
+/*
+ * VMware VMCI Driver
+ *
+ * Copyright (C) 2012 VMware, Inc. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation version 2 and no later version.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
+ * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * for more details.
+ */
+
+#ifndef _VMCI_RESOURCE_H_
+#define _VMCI_RESOURCE_H_
+
+#include <linux/vmw_vmci_defs.h>
+#include <linux/types.h>
+
+#include "vmci_context.h"
+
+
+enum vmci_resource_type {
+ VMCI_RESOURCE_TYPE_ANY,
+ VMCI_RESOURCE_TYPE_API,
+ VMCI_RESOURCE_TYPE_GROUP,
+ VMCI_RESOURCE_TYPE_DATAGRAM,
+ VMCI_RESOURCE_TYPE_DOORBELL,
+ VMCI_RESOURCE_TYPE_QPAIR_GUEST,
+ VMCI_RESOURCE_TYPE_QPAIR_HOST
+};
+
+struct vmci_resource {
+ struct vmci_handle handle;
+ enum vmci_resource_type type;
+ struct hlist_node node;
+ struct kref kref;
+ struct completion done;
+};
+
+
+int vmci_resource_add(struct vmci_resource *resource,
+ enum vmci_resource_type resource_type,
+ struct vmci_handle handle);
+
+void vmci_resource_remove(struct vmci_resource *resource);
+
+struct vmci_resource *
+vmci_resource_by_handle(struct vmci_handle resource_handle,
+ enum vmci_resource_type resource_type);
+
+struct vmci_resource *vmci_resource_get(struct vmci_resource *resource);
+int vmci_resource_put(struct vmci_resource *resource);
+
+struct vmci_handle vmci_resource_handle(struct vmci_resource *resource);
+
+#endif /* _VMCI_RESOURCE_H_ */
diff --git a/drivers/misc/vmw_vmci/vmci_route.c b/drivers/misc/vmw_vmci/vmci_route.c
new file mode 100644
index 000000000000..91090658b929
--- /dev/null
+++ b/drivers/misc/vmw_vmci/vmci_route.c
@@ -0,0 +1,226 @@
+/*
+ * VMware VMCI Driver
+ *
+ * Copyright (C) 2012 VMware, Inc. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation version 2 and no later version.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
+ * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * for more details.
+ */
+
+#include <linux/vmw_vmci_defs.h>
+#include <linux/vmw_vmci_api.h>
+
+#include "vmci_context.h"
+#include "vmci_driver.h"
+#include "vmci_route.h"
+
+/*
+ * Make a routing decision for the given source and destination handles.
+ * This will try to determine the route using the handles and the available
+ * devices. Will set the source context if it is invalid.
+ */
+int vmci_route(struct vmci_handle *src,
+ const struct vmci_handle *dst,
+ bool from_guest,
+ enum vmci_route *route)
+{
+ bool has_host_device = vmci_host_code_active();
+ bool has_guest_device = vmci_guest_code_active();
+
+ *route = VMCI_ROUTE_NONE;
+
+ /*
+ * "from_guest" is only ever set to true by
+ * IOCTL_VMCI_DATAGRAM_SEND (or by the vmkernel equivalent),
+ * which comes from the VMX, so we know it is coming from a
+ * guest.
+ *
+ * To avoid inconsistencies, test these once. We will test
+ * them again when we do the actual send to ensure that we do
+ * not touch a non-existent device.
+ */
+
+ /* Must have a valid destination context. */
+ if (VMCI_INVALID_ID == dst->context)
+ return VMCI_ERROR_INVALID_ARGS;
+
+ /* Anywhere to hypervisor. */
+ if (VMCI_HYPERVISOR_CONTEXT_ID == dst->context) {
+
+ /*
+ * If this message already came from a guest then we
+ * cannot send it to the hypervisor. It must come
+ * from a local client.
+ */
+ if (from_guest)
+ return VMCI_ERROR_DST_UNREACHABLE;
+
+ /*
+ * We must be acting as a guest in order to send to
+ * the hypervisor.
+ */
+ if (!has_guest_device)
+ return VMCI_ERROR_DEVICE_NOT_FOUND;
+
+ /* And we cannot send if the source is the host context. */
+ if (VMCI_HOST_CONTEXT_ID == src->context)
+ return VMCI_ERROR_INVALID_ARGS;
+
+ /*
+ * If the client passed the ANON source handle then
+ * respect it (both context and resource are invalid).
+ * However, if they passed only an invalid context,
+ * then they probably mean ANY, in which case we
+ * should set the real context here before passing it
+ * down.
+ */
+ if (VMCI_INVALID_ID == src->context &&
+ VMCI_INVALID_ID != src->resource)
+ src->context = vmci_get_context_id();
+
+ /* Send from local client down to the hypervisor. */
+ *route = VMCI_ROUTE_AS_GUEST;
+ return VMCI_SUCCESS;
+ }
+
+ /* Anywhere to local client on host. */
+ if (VMCI_HOST_CONTEXT_ID == dst->context) {
+ /*
+ * If it is not from a guest but we are acting as a
+ * guest, then we need to send it down to the host.
+ * Note that if we are also acting as a host then this
+ * will prevent us from sending from local client to
+ * local client, but we accept that restriction as a
+ * way to remove any ambiguity from the host context.
+ */
+ if (src->context == VMCI_HYPERVISOR_CONTEXT_ID) {
+ /*
+ * If the hypervisor is the source, this is
+ * host local communication. The hypervisor
+ * may send vmci event datagrams to the host
+ * itself, but it will never send datagrams to
+ * an "outer host" through the guest device.
+ */
+
+ if (has_host_device) {
+ *route = VMCI_ROUTE_AS_HOST;
+ return VMCI_SUCCESS;
+ } else {
+ return VMCI_ERROR_DEVICE_NOT_FOUND;
+ }
+ }
+
+ if (!from_guest && has_guest_device) {
+ /* If no source context then use the current. */
+ if (VMCI_INVALID_ID == src->context)
+ src->context = vmci_get_context_id();
+
+ /* Send it from local client down to the host. */
+ *route = VMCI_ROUTE_AS_GUEST;
+ return VMCI_SUCCESS;
+ }
+
+ /*
+ * Otherwise we already received it from a guest and
+ * it is destined for a local client on this host, or
+ * it is from another local client on this host. We
+ * must be acting as a host to service it.
+ */
+ if (!has_host_device)
+ return VMCI_ERROR_DEVICE_NOT_FOUND;
+
+ if (VMCI_INVALID_ID == src->context) {
+ /*
+ * If it came from a guest then it must have a
+ * valid context. Otherwise we can use the
+ * host context.
+ */
+ if (from_guest)
+ return VMCI_ERROR_INVALID_ARGS;
+
+ src->context = VMCI_HOST_CONTEXT_ID;
+ }
+
+ /* Route to local client. */
+ *route = VMCI_ROUTE_AS_HOST;
+ return VMCI_SUCCESS;
+ }
+
+ /*
+ * If we are acting as a host then this might be destined for
+ * a guest.
+ */
+ if (has_host_device) {
+ /* It will have a context if it is meant for a guest. */
+ if (vmci_ctx_exists(dst->context)) {
+ if (VMCI_INVALID_ID == src->context) {
+ /*
+ * If it came from a guest then it
+ * must have a valid context.
+ * Otherwise we can use the host
+ * context.
+ */
+
+ if (from_guest)
+ return VMCI_ERROR_INVALID_ARGS;
+
+ src->context = VMCI_HOST_CONTEXT_ID;
+ } else if (VMCI_CONTEXT_IS_VM(src->context) &&
+ src->context != dst->context) {
+ /*
+ * VM to VM communication is not
+ * allowed. Since we catch all
+ * communication destined for the host
+ * above, this must be destined for a
+ * VM since there is a valid context.
+ */
+
+ return VMCI_ERROR_DST_UNREACHABLE;
+ }
+
+ /* Pass it up to the guest. */
+ *route = VMCI_ROUTE_AS_HOST;
+ return VMCI_SUCCESS;
+ } else if (!has_guest_device) {
+ /*
+ * The host is attempting to reach a CID
+ * without an active context, and we can't
+ * send it down, since we have no guest
+ * device.
+ */
+
+ return VMCI_ERROR_DST_UNREACHABLE;
+ }
+ }
+
+ /*
+ * We must be a guest trying to send to another guest, which means
+ * we need to send it down to the host. We do not filter out VM to
+ * VM communication here, since we want to be able to use the guest
+ * driver on older versions that do support VM to VM communication.
+ */
+ if (!has_guest_device) {
+ /*
+ * Ending up here means we have neither guest nor host
+ * device.
+ */
+ return VMCI_ERROR_DEVICE_NOT_FOUND;
+ }
+
+ /* If no source context then use the current context. */
+ if (VMCI_INVALID_ID == src->context)
+ src->context = vmci_get_context_id();
+
+ /*
+ * Send it from local client down to the host, which will
+ * route it to the other guest for us.
+ */
+ *route = VMCI_ROUTE_AS_GUEST;
+ return VMCI_SUCCESS;
+}
diff --git a/drivers/misc/vmw_vmci/vmci_route.h b/drivers/misc/vmw_vmci/vmci_route.h
new file mode 100644
index 000000000000..3b30e82419c3
--- /dev/null
+++ b/drivers/misc/vmw_vmci/vmci_route.h
@@ -0,0 +1,30 @@
+/*
+ * VMware VMCI Driver
+ *
+ * Copyright (C) 2012 VMware, Inc. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation version 2 and no later version.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
+ * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * for more details.
+ */
+
+#ifndef _VMCI_ROUTE_H_
+#define _VMCI_ROUTE_H_
+
+#include <linux/vmw_vmci_defs.h>
+
+enum vmci_route {
+ VMCI_ROUTE_NONE,
+ VMCI_ROUTE_AS_HOST,
+ VMCI_ROUTE_AS_GUEST,
+};
+
+int vmci_route(struct vmci_handle *src, const struct vmci_handle *dst,
+ bool from_guest, enum vmci_route *route);
+
+#endif /* _VMCI_ROUTE_H_ */
diff --git a/drivers/mmc/card/Kconfig b/drivers/mmc/card/Kconfig
index 3b1f783bf924..5562308699bc 100644
--- a/drivers/mmc/card/Kconfig
+++ b/drivers/mmc/card/Kconfig
@@ -52,6 +52,7 @@ config MMC_BLOCK_BOUNCE
config SDIO_UART
tristate "SDIO UART/GPS class support"
+ depends on TTY
help
SDIO function driver for SDIO cards that implements the UART
class, as well as the GPS class which appears like a UART.
diff --git a/drivers/mmc/card/block.c b/drivers/mmc/card/block.c
index 21056b9ef0a0..5bab73b91c20 100644
--- a/drivers/mmc/card/block.c
+++ b/drivers/mmc/card/block.c
@@ -59,6 +59,12 @@ MODULE_ALIAS("mmc:block");
#define INAND_CMD38_ARG_SECTRIM2 0x88
#define MMC_BLK_TIMEOUT_MS (10 * 60 * 1000) /* 10 minute timeout */
+#define mmc_req_rel_wr(req) (((req->cmd_flags & REQ_FUA) || \
+ (req->cmd_flags & REQ_META)) && \
+ (rq_data_dir(req) == WRITE))
+#define PACKED_CMD_VER 0x01
+#define PACKED_CMD_WR 0x02
+
static DEFINE_MUTEX(block_mutex);
/*
@@ -89,6 +95,7 @@ struct mmc_blk_data {
unsigned int flags;
#define MMC_BLK_CMD23 (1 << 0) /* Can do SET_BLOCK_COUNT for multiblock */
#define MMC_BLK_REL_WR (1 << 1) /* MMC Reliable write support */
+#define MMC_BLK_PACKED_CMD (1 << 2) /* MMC packed command support */
unsigned int usage;
unsigned int read_only;
@@ -113,15 +120,10 @@ struct mmc_blk_data {
static DEFINE_MUTEX(open_lock);
-enum mmc_blk_status {
- MMC_BLK_SUCCESS = 0,
- MMC_BLK_PARTIAL,
- MMC_BLK_CMD_ERR,
- MMC_BLK_RETRY,
- MMC_BLK_ABORT,
- MMC_BLK_DATA_ERR,
- MMC_BLK_ECC_ERR,
- MMC_BLK_NOMEDIUM,
+enum {
+ MMC_PACKED_NR_IDX = -1,
+ MMC_PACKED_NR_ZERO,
+ MMC_PACKED_NR_SINGLE,
};
module_param(perdev_minors, int, 0444);
@@ -131,6 +133,19 @@ static inline int mmc_blk_part_switch(struct mmc_card *card,
struct mmc_blk_data *md);
static int get_card_status(struct mmc_card *card, u32 *status, int retries);
+static inline void mmc_blk_clear_packed(struct mmc_queue_req *mqrq)
+{
+ struct mmc_packed *packed = mqrq->packed;
+
+ BUG_ON(!packed);
+
+ mqrq->cmd_type = MMC_PACKED_NONE;
+ packed->nr_entries = MMC_PACKED_NR_ZERO;
+ packed->idx_failure = MMC_PACKED_NR_IDX;
+ packed->retries = 0;
+ packed->blocks = 0;
+}
+
static struct mmc_blk_data *mmc_blk_get(struct gendisk *disk)
{
struct mmc_blk_data *md;
@@ -1148,12 +1163,78 @@ static int mmc_blk_err_check(struct mmc_card *card,
if (!brq->data.bytes_xfered)
return MMC_BLK_RETRY;
+ if (mmc_packed_cmd(mq_mrq->cmd_type)) {
+ if (unlikely(brq->data.blocks << 9 != brq->data.bytes_xfered))
+ return MMC_BLK_PARTIAL;
+ else
+ return MMC_BLK_SUCCESS;
+ }
+
if (blk_rq_bytes(req) != brq->data.bytes_xfered)
return MMC_BLK_PARTIAL;
return MMC_BLK_SUCCESS;
}
+static int mmc_blk_packed_err_check(struct mmc_card *card,
+ struct mmc_async_req *areq)
+{
+ struct mmc_queue_req *mq_rq = container_of(areq, struct mmc_queue_req,
+ mmc_active);
+ struct request *req = mq_rq->req;
+ struct mmc_packed *packed = mq_rq->packed;
+ int err, check, status;
+ u8 *ext_csd;
+
+ BUG_ON(!packed);
+
+ packed->retries--;
+ check = mmc_blk_err_check(card, areq);
+ err = get_card_status(card, &status, 0);
+ if (err) {
+ pr_err("%s: error %d sending status command\n",
+ req->rq_disk->disk_name, err);
+ return MMC_BLK_ABORT;
+ }
+
+ if (status & R1_EXCEPTION_EVENT) {
+ ext_csd = kzalloc(512, GFP_KERNEL);
+ if (!ext_csd) {
+ pr_err("%s: unable to allocate buffer for ext_csd\n",
+ req->rq_disk->disk_name);
+ return -ENOMEM;
+ }
+
+ err = mmc_send_ext_csd(card, ext_csd);
+ if (err) {
+ pr_err("%s: error %d sending ext_csd\n",
+ req->rq_disk->disk_name, err);
+ check = MMC_BLK_ABORT;
+ goto free;
+ }
+
+ if ((ext_csd[EXT_CSD_EXP_EVENTS_STATUS] &
+ EXT_CSD_PACKED_FAILURE) &&
+ (ext_csd[EXT_CSD_PACKED_CMD_STATUS] &
+ EXT_CSD_PACKED_GENERIC_ERROR)) {
+ if (ext_csd[EXT_CSD_PACKED_CMD_STATUS] &
+ EXT_CSD_PACKED_INDEXED_ERROR) {
+ packed->idx_failure =
+ ext_csd[EXT_CSD_PACKED_FAILURE_INDEX] - 1;
+ check = MMC_BLK_PARTIAL;
+ }
+ pr_err("%s: packed cmd failed, nr %u, sectors %u, "
+ "failure index: %d\n",
+ req->rq_disk->disk_name, packed->nr_entries,
+ packed->blocks, packed->idx_failure);
+ }
+free:
+ kfree(ext_csd);
+ }
+
+ return check;
+}
+
static void mmc_blk_rw_rq_prep(struct mmc_queue_req *mqrq,
struct mmc_card *card,
int disable_multi,
@@ -1308,10 +1389,221 @@ static void mmc_blk_rw_rq_prep(struct mmc_queue_req *mqrq,
mmc_queue_bounce_pre(mqrq);
}
+static inline u8 mmc_calc_packed_hdr_segs(struct request_queue *q,
+ struct mmc_card *card)
+{
+ unsigned int hdr_sz = mmc_large_sector(card) ? 4096 : 512;
+ unsigned int max_seg_sz = queue_max_segment_size(q);
+ unsigned int len, nr_segs = 0;
+
+ do {
+ len = min(hdr_sz, max_seg_sz);
+ hdr_sz -= len;
+ nr_segs++;
+ } while (hdr_sz);
+
+ return nr_segs;
+}
+
+static u8 mmc_blk_prep_packed_list(struct mmc_queue *mq, struct request *req)
+{
+ struct request_queue *q = mq->queue;
+ struct mmc_card *card = mq->card;
+ struct request *cur = req, *next = NULL;
+ struct mmc_blk_data *md = mq->data;
+ struct mmc_queue_req *mqrq = mq->mqrq_cur;
+ bool en_rel_wr = card->ext_csd.rel_param & EXT_CSD_WR_REL_PARAM_EN;
+ unsigned int req_sectors = 0, phys_segments = 0;
+ unsigned int max_blk_count, max_phys_segs;
+ bool put_back = true;
+ u8 max_packed_rw = 0;
+ u8 reqs = 0;
+
+ if (!(md->flags & MMC_BLK_PACKED_CMD))
+ goto no_packed;
+
+ if ((rq_data_dir(cur) == WRITE) &&
+ mmc_host_packed_wr(card->host))
+ max_packed_rw = card->ext_csd.max_packed_writes;
+
+ if (max_packed_rw == 0)
+ goto no_packed;
+
+ if (mmc_req_rel_wr(cur) &&
+ (md->flags & MMC_BLK_REL_WR) && !en_rel_wr)
+ goto no_packed;
+
+ if (mmc_large_sector(card) &&
+ !IS_ALIGNED(blk_rq_sectors(cur), 8))
+ goto no_packed;
+
+ mmc_blk_clear_packed(mqrq);
+
+ max_blk_count = min(card->host->max_blk_count,
+ card->host->max_req_size >> 9);
+ if (unlikely(max_blk_count > 0xffff))
+ max_blk_count = 0xffff;
+
+ max_phys_segs = queue_max_segments(q);
+ req_sectors += blk_rq_sectors(cur);
+ phys_segments += cur->nr_phys_segments;
+
+ if (rq_data_dir(cur) == WRITE) {
+ req_sectors += mmc_large_sector(card) ? 8 : 1;
+ phys_segments += mmc_calc_packed_hdr_segs(q, card);
+ }
+
+ do {
+ if (reqs >= max_packed_rw - 1) {
+ put_back = false;
+ break;
+ }
+
+ spin_lock_irq(q->queue_lock);
+ next = blk_fetch_request(q);
+ spin_unlock_irq(q->queue_lock);
+ if (!next) {
+ put_back = false;
+ break;
+ }
+
+ if (mmc_large_sector(card) &&
+ !IS_ALIGNED(blk_rq_sectors(next), 8))
+ break;
+
+ if (next->cmd_flags & REQ_DISCARD ||
+ next->cmd_flags & REQ_FLUSH)
+ break;
+
+ if (rq_data_dir(cur) != rq_data_dir(next))
+ break;
+
+ if (mmc_req_rel_wr(next) &&
+ (md->flags & MMC_BLK_REL_WR) && !en_rel_wr)
+ break;
+
+ req_sectors += blk_rq_sectors(next);
+ if (req_sectors > max_blk_count)
+ break;
+
+ phys_segments += next->nr_phys_segments;
+ if (phys_segments > max_phys_segs)
+ break;
+
+ list_add_tail(&next->queuelist, &mqrq->packed->list);
+ cur = next;
+ reqs++;
+ } while (1);
+
+ if (put_back) {
+ spin_lock_irq(q->queue_lock);
+ blk_requeue_request(q, next);
+ spin_unlock_irq(q->queue_lock);
+ }
+
+ if (reqs > 0) {
+ list_add(&req->queuelist, &mqrq->packed->list);
+ mqrq->packed->nr_entries = ++reqs;
+ mqrq->packed->retries = reqs;
+ return reqs;
+ }
+
+no_packed:
+ mqrq->cmd_type = MMC_PACKED_NONE;
+ return 0;
+}
+
+static void mmc_blk_packed_hdr_wrq_prep(struct mmc_queue_req *mqrq,
+ struct mmc_card *card,
+ struct mmc_queue *mq)
+{
+ struct mmc_blk_request *brq = &mqrq->brq;
+ struct request *req = mqrq->req;
+ struct request *prq;
+ struct mmc_blk_data *md = mq->data;
+ struct mmc_packed *packed = mqrq->packed;
+ bool do_rel_wr, do_data_tag;
+ u32 *packed_cmd_hdr;
+ u8 hdr_blocks;
+ u8 i = 1;
+
+ BUG_ON(!packed);
+
+ mqrq->cmd_type = MMC_PACKED_WRITE;
+ packed->blocks = 0;
+ packed->idx_failure = MMC_PACKED_NR_IDX;
+
+ packed_cmd_hdr = packed->cmd_hdr;
+ memset(packed_cmd_hdr, 0, sizeof(packed->cmd_hdr));
+ packed_cmd_hdr[0] = (packed->nr_entries << 16) |
+ (PACKED_CMD_WR << 8) | PACKED_CMD_VER;
+ hdr_blocks = mmc_large_sector(card) ? 8 : 1;
+
+ /*
+ * Argument for each entry of packed group
+ */
+ list_for_each_entry(prq, &packed->list, queuelist) {
+ do_rel_wr = mmc_req_rel_wr(prq) && (md->flags & MMC_BLK_REL_WR);
+ do_data_tag = (card->ext_csd.data_tag_unit_size) &&
+ (prq->cmd_flags & REQ_META) &&
+ (rq_data_dir(prq) == WRITE) &&
+ ((brq->data.blocks * brq->data.blksz) >=
+ card->ext_csd.data_tag_unit_size);
+ /* Argument of CMD23 */
+ packed_cmd_hdr[(i * 2)] =
+ (do_rel_wr ? MMC_CMD23_ARG_REL_WR : 0) |
+ (do_data_tag ? MMC_CMD23_ARG_TAG_REQ : 0) |
+ blk_rq_sectors(prq);
+ /* Argument of CMD18 or CMD25 */
+ packed_cmd_hdr[((i * 2)) + 1] =
+ mmc_card_blockaddr(card) ?
+ blk_rq_pos(prq) : blk_rq_pos(prq) << 9;
+ packed->blocks += blk_rq_sectors(prq);
+ i++;
+ }
+
+ memset(brq, 0, sizeof(struct mmc_blk_request));
+ brq->mrq.cmd = &brq->cmd;
+ brq->mrq.data = &brq->data;
+ brq->mrq.sbc = &brq->sbc;
+ brq->mrq.stop = &brq->stop;
+
+ brq->sbc.opcode = MMC_SET_BLOCK_COUNT;
+ brq->sbc.arg = MMC_CMD23_ARG_PACKED | (packed->blocks + hdr_blocks);
+ brq->sbc.flags = MMC_RSP_R1 | MMC_CMD_AC;
+
+ brq->cmd.opcode = MMC_WRITE_MULTIPLE_BLOCK;
+ brq->cmd.arg = blk_rq_pos(req);
+ if (!mmc_card_blockaddr(card))
+ brq->cmd.arg <<= 9;
+ brq->cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R1 | MMC_CMD_ADTC;
+
+ brq->data.blksz = 512;
+ brq->data.blocks = packed->blocks + hdr_blocks;
+ brq->data.flags |= MMC_DATA_WRITE;
+
+ brq->stop.opcode = MMC_STOP_TRANSMISSION;
+ brq->stop.arg = 0;
+ brq->stop.flags = MMC_RSP_SPI_R1B | MMC_RSP_R1B | MMC_CMD_AC;
+
+ mmc_set_data_timeout(&brq->data, card);
+
+ brq->data.sg = mqrq->sg;
+ brq->data.sg_len = mmc_queue_map_sg(mq, mqrq);
+
+ mqrq->mmc_active.mrq = &brq->mrq;
+ mqrq->mmc_active.err_check = mmc_blk_packed_err_check;
+
+ mmc_queue_bounce_pre(mqrq);
+}
+
static int mmc_blk_cmd_err(struct mmc_blk_data *md, struct mmc_card *card,
struct mmc_blk_request *brq, struct request *req,
int ret)
{
+ struct mmc_queue_req *mq_rq;
+ mq_rq = container_of(brq, struct mmc_queue_req, brq);
+
/*
* If this is an SD card and we're writing, we can first
* mark the known good sectors as ok.
@@ -1328,11 +1620,84 @@ static int mmc_blk_cmd_err(struct mmc_blk_data *md, struct mmc_card *card,
ret = blk_end_request(req, 0, blocks << 9);
}
} else {
- ret = blk_end_request(req, 0, brq->data.bytes_xfered);
+ if (!mmc_packed_cmd(mq_rq->cmd_type))
+ ret = blk_end_request(req, 0, brq->data.bytes_xfered);
}
return ret;
}
+static int mmc_blk_end_packed_req(struct mmc_queue_req *mq_rq)
+{
+ struct request *prq;
+ struct mmc_packed *packed = mq_rq->packed;
+ int idx = packed->idx_failure, i = 0;
+ int ret = 0;
+
+ BUG_ON(!packed);
+
+ while (!list_empty(&packed->list)) {
+ prq = list_entry_rq(packed->list.next);
+ if (idx == i) {
+ /* retry from error index */
+ packed->nr_entries -= idx;
+ mq_rq->req = prq;
+ ret = 1;
+
+ if (packed->nr_entries == MMC_PACKED_NR_SINGLE) {
+ list_del_init(&prq->queuelist);
+ mmc_blk_clear_packed(mq_rq);
+ }
+ return ret;
+ }
+ list_del_init(&prq->queuelist);
+ blk_end_request(prq, 0, blk_rq_bytes(prq));
+ i++;
+ }
+
+ mmc_blk_clear_packed(mq_rq);
+ return ret;
+}
+
+static void mmc_blk_abort_packed_req(struct mmc_queue_req *mq_rq)
+{
+ struct request *prq;
+ struct mmc_packed *packed = mq_rq->packed;
+
+ BUG_ON(!packed);
+
+ while (!list_empty(&packed->list)) {
+ prq = list_entry_rq(packed->list.next);
+ list_del_init(&prq->queuelist);
+ blk_end_request(prq, -EIO, blk_rq_bytes(prq));
+ }
+
+ mmc_blk_clear_packed(mq_rq);
+}
+
+static void mmc_blk_revert_packed_req(struct mmc_queue *mq,
+ struct mmc_queue_req *mq_rq)
+{
+ struct request *prq;
+ struct request_queue *q = mq->queue;
+ struct mmc_packed *packed = mq_rq->packed;
+
+ BUG_ON(!packed);
+
+ while (!list_empty(&packed->list)) {
+ prq = list_entry_rq(packed->list.prev);
+ if (prq->queuelist.prev != &packed->list) {
+ list_del_init(&prq->queuelist);
+ spin_lock_irq(q->queue_lock);
+ blk_requeue_request(mq->queue, prq);
+ spin_unlock_irq(q->queue_lock);
+ } else {
+ list_del_init(&prq->queuelist);
+ }
+ }
+
+ mmc_blk_clear_packed(mq_rq);
+}
+
static int mmc_blk_issue_rw_rq(struct mmc_queue *mq, struct request *rqc)
{
struct mmc_blk_data *md = mq->data;
@@ -1343,10 +1708,15 @@ static int mmc_blk_issue_rw_rq(struct mmc_queue *mq, struct request *rqc)
struct mmc_queue_req *mq_rq;
struct request *req = rqc;
struct mmc_async_req *areq;
+ const u8 packed_nr = 2;
+ u8 reqs = 0;
if (!rqc && !mq->mqrq_prev->req)
return 0;
+ if (rqc)
+ reqs = mmc_blk_prep_packed_list(mq, rqc);
+
do {
if (rqc) {
/*
@@ -1357,15 +1727,24 @@ static int mmc_blk_issue_rw_rq(struct mmc_queue *mq, struct request *rqc)
(card->ext_csd.data_sector_size == 4096)) {
pr_err("%s: Transfer size is not 4KB sector size aligned\n",
req->rq_disk->disk_name);
+ mq_rq = mq->mqrq_cur;
goto cmd_abort;
}
- mmc_blk_rw_rq_prep(mq->mqrq_cur, card, 0, mq);
+
+ if (reqs >= packed_nr)
+ mmc_blk_packed_hdr_wrq_prep(mq->mqrq_cur,
+ card, mq);
+ else
+ mmc_blk_rw_rq_prep(mq->mqrq_cur, card, 0, mq);
areq = &mq->mqrq_cur->mmc_active;
} else
areq = NULL;
areq = mmc_start_req(card->host, areq, (int *) &status);
- if (!areq)
+ if (!areq) {
+ if (status == MMC_BLK_NEW_REQUEST)
+ mq->flags |= MMC_QUEUE_NEW_REQUEST;
return 0;
+ }
mq_rq = container_of(areq, struct mmc_queue_req, mmc_active);
brq = &mq_rq->brq;
@@ -1380,8 +1759,15 @@ static int mmc_blk_issue_rw_rq(struct mmc_queue *mq, struct request *rqc)
* A block was successfully transferred.
*/
mmc_blk_reset_success(md, type);
- ret = blk_end_request(req, 0,
+
+ if (mmc_packed_cmd(mq_rq->cmd_type)) {
+ ret = mmc_blk_end_packed_req(mq_rq);
+ break;
+ } else {
+ ret = blk_end_request(req, 0,
brq->data.bytes_xfered);
+ }
+
/*
* If the blk_end_request function returns non-zero even
* though all data has been transferred and no errors
@@ -1414,7 +1800,8 @@ static int mmc_blk_issue_rw_rq(struct mmc_queue *mq, struct request *rqc)
err = mmc_blk_reset(md, card->host, type);
if (!err)
break;
- if (err == -ENODEV)
+ if (err == -ENODEV ||
+ mmc_packed_cmd(mq_rq->cmd_type))
goto cmd_abort;
/* Fall through */
}
@@ -1438,30 +1825,62 @@ static int mmc_blk_issue_rw_rq(struct mmc_queue *mq, struct request *rqc)
break;
case MMC_BLK_NOMEDIUM:
goto cmd_abort;
+ default:
+ pr_err("%s: Unhandled return value (%d)",
+ req->rq_disk->disk_name, status);
+ goto cmd_abort;
}
if (ret) {
- /*
- * In case of a incomplete request
- * prepare it again and resend.
- */
- mmc_blk_rw_rq_prep(mq_rq, card, disable_multi, mq);
- mmc_start_req(card->host, &mq_rq->mmc_active, NULL);
+ if (mmc_packed_cmd(mq_rq->cmd_type)) {
+ if (!mq_rq->packed->retries)
+ goto cmd_abort;
+ mmc_blk_packed_hdr_wrq_prep(mq_rq, card, mq);
+ mmc_start_req(card->host,
+ &mq_rq->mmc_active, NULL);
+ } else {
+
+ /*
+ * In case of a incomplete request
+ * prepare it again and resend.
+ */
+ mmc_blk_rw_rq_prep(mq_rq, card,
+ disable_multi, mq);
+ mmc_start_req(card->host,
+ &mq_rq->mmc_active, NULL);
+ }
}
} while (ret);
return 1;
cmd_abort:
- if (mmc_card_removed(card))
- req->cmd_flags |= REQ_QUIET;
- while (ret)
- ret = blk_end_request(req, -EIO, blk_rq_cur_bytes(req));
+ if (mmc_packed_cmd(mq_rq->cmd_type)) {
+ mmc_blk_abort_packed_req(mq_rq);
+ } else {
+ if (mmc_card_removed(card))
+ req->cmd_flags |= REQ_QUIET;
+ while (ret)
+ ret = blk_end_request(req, -EIO,
+ blk_rq_cur_bytes(req));
+ }
start_new_req:
if (rqc) {
- mmc_blk_rw_rq_prep(mq->mqrq_cur, card, 0, mq);
- mmc_start_req(card->host, &mq->mqrq_cur->mmc_active, NULL);
+ if (mmc_card_removed(card)) {
+ rqc->cmd_flags |= REQ_QUIET;
+ blk_end_request_all(rqc, -EIO);
+ } else {
+ /*
+ * If current request is packed, it needs to put back.
+ */
+ if (mmc_packed_cmd(mq->mqrq_cur->cmd_type))
+ mmc_blk_revert_packed_req(mq, mq->mqrq_cur);
+
+ mmc_blk_rw_rq_prep(mq->mqrq_cur, card, 0, mq);
+ mmc_start_req(card->host,
+ &mq->mqrq_cur->mmc_active, NULL);
+ }
}
return 0;
@@ -1472,6 +1891,8 @@ static int mmc_blk_issue_rq(struct mmc_queue *mq, struct request *req)
int ret;
struct mmc_blk_data *md = mq->data;
struct mmc_card *card = md->queue.card;
+ struct mmc_host *host = card->host;
+ unsigned long flags;
if (req && !mq->mqrq_prev->req)
/* claim host only for the first request */
@@ -1486,6 +1907,7 @@ static int mmc_blk_issue_rq(struct mmc_queue *mq, struct request *req)
goto out;
}
+ mq->flags &= ~MMC_QUEUE_NEW_REQUEST;
if (req && req->cmd_flags & REQ_DISCARD) {
/* complete ongoing async transfer before issuing discard */
if (card->host->areq)
@@ -1501,11 +1923,16 @@ static int mmc_blk_issue_rq(struct mmc_queue *mq, struct request *req)
mmc_blk_issue_rw_rq(mq, NULL);
ret = mmc_blk_issue_flush(mq, req);
} else {
+ if (!req && host->areq) {
+ spin_lock_irqsave(&host->context_info.lock, flags);
+ host->context_info.is_waiting_last_req = true;
+ spin_unlock_irqrestore(&host->context_info.lock, flags);
+ }
ret = mmc_blk_issue_rw_rq(mq, req);
}
out:
- if (!req)
+ if (!req && !(mq->flags & MMC_QUEUE_NEW_REQUEST))
/* release host only when there are no more requests */
mmc_release_host(card->host);
return ret;
@@ -1624,6 +2051,14 @@ static struct mmc_blk_data *mmc_blk_alloc_req(struct mmc_card *card,
blk_queue_flush(md->queue.queue, REQ_FLUSH | REQ_FUA);
}
+ if (mmc_card_mmc(card) &&
+ (area_type == MMC_BLK_DATA_AREA_MAIN) &&
+ (md->flags & MMC_BLK_CMD23) &&
+ card->ext_csd.packed_event_en) {
+ if (!mmc_packed_init(&md->queue, card))
+ md->flags |= MMC_BLK_PACKED_CMD;
+ }
+
return md;
err_putdisk:
@@ -1732,6 +2167,8 @@ static void mmc_blk_remove_req(struct mmc_blk_data *md)
/* Then flush out any already in there */
mmc_cleanup_queue(&md->queue);
+ if (md->flags & MMC_BLK_PACKED_CMD)
+ mmc_packed_clean(&md->queue);
mmc_blk_put(md);
}
}
diff --git a/drivers/mmc/card/queue.c b/drivers/mmc/card/queue.c
index fadf52eb5d70..fa4e44ee7961 100644
--- a/drivers/mmc/card/queue.c
+++ b/drivers/mmc/card/queue.c
@@ -22,7 +22,8 @@
#define MMC_QUEUE_BOUNCESZ 65536
-#define MMC_QUEUE_SUSPENDED (1 << 0)
+
+#define MMC_REQ_SPECIAL_MASK (REQ_DISCARD | REQ_FLUSH)
/*
* Prepare a MMC request. This just filters out odd stuff.
@@ -58,6 +59,7 @@ static int mmc_queue_thread(void *d)
do {
struct request *req = NULL;
struct mmc_queue_req *tmp;
+ unsigned int cmd_flags = 0;
spin_lock_irq(q->queue_lock);
set_current_state(TASK_INTERRUPTIBLE);
@@ -67,12 +69,23 @@ static int mmc_queue_thread(void *d)
if (req || mq->mqrq_prev->req) {
set_current_state(TASK_RUNNING);
+ cmd_flags = req ? req->cmd_flags : 0;
mq->issue_fn(mq, req);
+ if (mq->flags & MMC_QUEUE_NEW_REQUEST) {
+ mq->flags &= ~MMC_QUEUE_NEW_REQUEST;
+ continue; /* fetch again */
+ }
/*
* Current request becomes previous request
* and vice versa.
+ * In case of special requests, current request
+ * has been finished. Do not assign it to previous
+ * request.
*/
+ if (cmd_flags & MMC_REQ_SPECIAL_MASK)
+ mq->mqrq_cur->req = NULL;
+
mq->mqrq_prev->brq.mrq.data = NULL;
mq->mqrq_prev->req = NULL;
tmp = mq->mqrq_prev;
@@ -103,6 +116,8 @@ static void mmc_request_fn(struct request_queue *q)
{
struct mmc_queue *mq = q->queuedata;
struct request *req;
+ unsigned long flags;
+ struct mmc_context_info *cntx;
if (!mq) {
while ((req = blk_fetch_request(q)) != NULL) {
@@ -112,7 +127,20 @@ static void mmc_request_fn(struct request_queue *q)
return;
}
- if (!mq->mqrq_cur->req && !mq->mqrq_prev->req)
+ cntx = &mq->card->host->context_info;
+ if (!mq->mqrq_cur->req && mq->mqrq_prev->req) {
+ /*
+ * New MMC request arrived when MMC thread may be
+ * blocked on the previous request to be complete
+ * with no current request fetched
+ */
+ spin_lock_irqsave(&cntx->lock, flags);
+ if (cntx->is_waiting_last_req) {
+ cntx->is_new_req = true;
+ wake_up_interruptible(&cntx->wait);
+ }
+ spin_unlock_irqrestore(&cntx->lock, flags);
+ } else if (!mq->mqrq_cur->req && !mq->mqrq_prev->req)
wake_up_process(mq->thread);
}
@@ -334,6 +362,49 @@ void mmc_cleanup_queue(struct mmc_queue *mq)
}
EXPORT_SYMBOL(mmc_cleanup_queue);
+int mmc_packed_init(struct mmc_queue *mq, struct mmc_card *card)
+{
+ struct mmc_queue_req *mqrq_cur = &mq->mqrq[0];
+ struct mmc_queue_req *mqrq_prev = &mq->mqrq[1];
+ int ret = 0;
+
+
+ mqrq_cur->packed = kzalloc(sizeof(struct mmc_packed), GFP_KERNEL);
+ if (!mqrq_cur->packed) {
+ pr_warn("%s: unable to allocate packed cmd for mqrq_cur\n",
+ mmc_card_name(card));
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ mqrq_prev->packed = kzalloc(sizeof(struct mmc_packed), GFP_KERNEL);
+ if (!mqrq_prev->packed) {
+ pr_warn("%s: unable to allocate packed cmd for mqrq_prev\n",
+ mmc_card_name(card));
+ kfree(mqrq_cur->packed);
+ mqrq_cur->packed = NULL;
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ INIT_LIST_HEAD(&mqrq_cur->packed->list);
+ INIT_LIST_HEAD(&mqrq_prev->packed->list);
+
+out:
+ return ret;
+}
+
+void mmc_packed_clean(struct mmc_queue *mq)
+{
+ struct mmc_queue_req *mqrq_cur = &mq->mqrq[0];
+ struct mmc_queue_req *mqrq_prev = &mq->mqrq[1];
+
+ kfree(mqrq_cur->packed);
+ mqrq_cur->packed = NULL;
+ kfree(mqrq_prev->packed);
+ mqrq_prev->packed = NULL;
+}
+
/**
* mmc_queue_suspend - suspend a MMC request queue
* @mq: MMC queue to suspend
@@ -378,6 +449,41 @@ void mmc_queue_resume(struct mmc_queue *mq)
}
}
+static unsigned int mmc_queue_packed_map_sg(struct mmc_queue *mq,
+ struct mmc_packed *packed,
+ struct scatterlist *sg,
+ enum mmc_packed_type cmd_type)
+{
+ struct scatterlist *__sg = sg;
+ unsigned int sg_len = 0;
+ struct request *req;
+
+ if (mmc_packed_wr(cmd_type)) {
+ unsigned int hdr_sz = mmc_large_sector(mq->card) ? 4096 : 512;
+ unsigned int max_seg_sz = queue_max_segment_size(mq->queue);
+ unsigned int len, remain, offset = 0;
+ u8 *buf = (u8 *)packed->cmd_hdr;
+
+ remain = hdr_sz;
+ do {
+ len = min(remain, max_seg_sz);
+ sg_set_buf(__sg, buf + offset, len);
+ offset += len;
+ remain -= len;
+ (__sg++)->page_link &= ~0x02;
+ sg_len++;
+ } while (remain);
+ }
+
+ list_for_each_entry(req, &packed->list, queuelist) {
+ sg_len += blk_rq_map_sg(mq->queue, req, __sg);
+ __sg = sg + (sg_len - 1);
+ (__sg++)->page_link &= ~0x02;
+ }
+ sg_mark_end(sg + (sg_len - 1));
+ return sg_len;
+}
+
/*
* Prepare the sg list(s) to be handed of to the host driver
*/
@@ -386,14 +492,26 @@ unsigned int mmc_queue_map_sg(struct mmc_queue *mq, struct mmc_queue_req *mqrq)
unsigned int sg_len;
size_t buflen;
struct scatterlist *sg;
+ enum mmc_packed_type cmd_type;
int i;
- if (!mqrq->bounce_buf)
- return blk_rq_map_sg(mq->queue, mqrq->req, mqrq->sg);
+ cmd_type = mqrq->cmd_type;
+
+ if (!mqrq->bounce_buf) {
+ if (mmc_packed_cmd(cmd_type))
+ return mmc_queue_packed_map_sg(mq, mqrq->packed,
+ mqrq->sg, cmd_type);
+ else
+ return blk_rq_map_sg(mq->queue, mqrq->req, mqrq->sg);
+ }
BUG_ON(!mqrq->bounce_sg);
- sg_len = blk_rq_map_sg(mq->queue, mqrq->req, mqrq->bounce_sg);
+ if (mmc_packed_cmd(cmd_type))
+ sg_len = mmc_queue_packed_map_sg(mq, mqrq->packed,
+ mqrq->bounce_sg, cmd_type);
+ else
+ sg_len = blk_rq_map_sg(mq->queue, mqrq->req, mqrq->bounce_sg);
mqrq->bounce_sg_len = sg_len;
diff --git a/drivers/mmc/card/queue.h b/drivers/mmc/card/queue.h
index d2a1eb4b9f9f..031bf6376c99 100644
--- a/drivers/mmc/card/queue.h
+++ b/drivers/mmc/card/queue.h
@@ -12,6 +12,23 @@ struct mmc_blk_request {
struct mmc_data data;
};
+enum mmc_packed_type {
+ MMC_PACKED_NONE = 0,
+ MMC_PACKED_WRITE,
+};
+
+#define mmc_packed_cmd(type) ((type) != MMC_PACKED_NONE)
+#define mmc_packed_wr(type) ((type) == MMC_PACKED_WRITE)
+
+struct mmc_packed {
+ struct list_head list;
+ u32 cmd_hdr[1024];
+ unsigned int blocks;
+ u8 nr_entries;
+ u8 retries;
+ s16 idx_failure;
+};
+
struct mmc_queue_req {
struct request *req;
struct mmc_blk_request brq;
@@ -20,6 +37,8 @@ struct mmc_queue_req {
struct scatterlist *bounce_sg;
unsigned int bounce_sg_len;
struct mmc_async_req mmc_active;
+ enum mmc_packed_type cmd_type;
+ struct mmc_packed *packed;
};
struct mmc_queue {
@@ -27,6 +46,9 @@ struct mmc_queue {
struct task_struct *thread;
struct semaphore thread_sem;
unsigned int flags;
+#define MMC_QUEUE_SUSPENDED (1 << 0)
+#define MMC_QUEUE_NEW_REQUEST (1 << 1)
+
int (*issue_fn)(struct mmc_queue *, struct request *);
void *data;
struct request_queue *queue;
@@ -46,4 +68,7 @@ extern unsigned int mmc_queue_map_sg(struct mmc_queue *,
extern void mmc_queue_bounce_pre(struct mmc_queue_req *);
extern void mmc_queue_bounce_post(struct mmc_queue_req *);
+extern int mmc_packed_init(struct mmc_queue *, struct mmc_card *);
+extern void mmc_packed_clean(struct mmc_queue *);
+
#endif
diff --git a/drivers/mmc/card/sdio_uart.c b/drivers/mmc/card/sdio_uart.c
index bd57a11acc79..c931dfe6a59c 100644
--- a/drivers/mmc/card/sdio_uart.c
+++ b/drivers/mmc/card/sdio_uart.c
@@ -381,7 +381,6 @@ static void sdio_uart_stop_rx(struct sdio_uart_port *port)
static void sdio_uart_receive_chars(struct sdio_uart_port *port,
unsigned int *status)
{
- struct tty_struct *tty = tty_port_tty_get(&port->port);
unsigned int ch, flag;
int max_count = 256;
@@ -418,23 +417,19 @@ static void sdio_uart_receive_chars(struct sdio_uart_port *port,
}
if ((*status & port->ignore_status_mask & ~UART_LSR_OE) == 0)
- if (tty)
- tty_insert_flip_char(tty, ch, flag);
+ tty_insert_flip_char(&port->port, ch, flag);
/*
* Overrun is special. Since it's reported immediately,
* it doesn't affect the current character.
*/
if (*status & ~port->ignore_status_mask & UART_LSR_OE)
- if (tty)
- tty_insert_flip_char(tty, 0, TTY_OVERRUN);
+ tty_insert_flip_char(&port->port, 0, TTY_OVERRUN);
*status = sdio_in(port, UART_LSR);
} while ((*status & UART_LSR_DR) && (max_count-- > 0));
- if (tty) {
- tty_flip_buffer_push(tty);
- tty_kref_put(tty);
- }
+
+ tty_flip_buffer_push(&port->port);
}
static void sdio_uart_transmit_chars(struct sdio_uart_port *port)
diff --git a/drivers/mmc/core/bus.c b/drivers/mmc/core/bus.c
index 420cb6753c1e..e219c97a02a4 100644
--- a/drivers/mmc/core/bus.c
+++ b/drivers/mmc/core/bus.c
@@ -321,6 +321,7 @@ int mmc_add_card(struct mmc_card *card)
#ifdef CONFIG_DEBUG_FS
mmc_add_card_debugfs(card);
#endif
+ mmc_init_context_info(card->host);
ret = device_add(&card->dev);
if (ret)
diff --git a/drivers/mmc/core/core.c b/drivers/mmc/core/core.c
index aaed7687cf09..08a3cf2a7610 100644
--- a/drivers/mmc/core/core.c
+++ b/drivers/mmc/core/core.c
@@ -319,11 +319,45 @@ out:
}
EXPORT_SYMBOL(mmc_start_bkops);
+/*
+ * mmc_wait_data_done() - done callback for data request
+ * @mrq: done data request
+ *
+ * Wakes up mmc context, passed as a callback to host controller driver
+ */
+static void mmc_wait_data_done(struct mmc_request *mrq)
+{
+ mrq->host->context_info.is_done_rcv = true;
+ wake_up_interruptible(&mrq->host->context_info.wait);
+}
+
static void mmc_wait_done(struct mmc_request *mrq)
{
complete(&mrq->completion);
}
+/*
+ *__mmc_start_data_req() - starts data request
+ * @host: MMC host to start the request
+ * @mrq: data request to start
+ *
+ * Sets the done callback to be called when request is completed by the card.
+ * Starts data mmc request execution
+ */
+static int __mmc_start_data_req(struct mmc_host *host, struct mmc_request *mrq)
+{
+ mrq->done = mmc_wait_data_done;
+ mrq->host = host;
+ if (mmc_card_removed(host->card)) {
+ mrq->cmd->error = -ENOMEDIUM;
+ mmc_wait_data_done(mrq);
+ return -ENOMEDIUM;
+ }
+ mmc_start_request(host, mrq);
+
+ return 0;
+}
+
static int __mmc_start_req(struct mmc_host *host, struct mmc_request *mrq)
{
init_completion(&mrq->completion);
@@ -337,6 +371,62 @@ static int __mmc_start_req(struct mmc_host *host, struct mmc_request *mrq)
return 0;
}
+/*
+ * mmc_wait_for_data_req_done() - wait for request completed
+ * @host: MMC host to prepare the command.
+ * @mrq: MMC request to wait for
+ *
+ * Blocks MMC context till host controller will ack end of data request
+ * execution or new request notification arrives from the block layer.
+ * Handles command retries.
+ *
+ * Returns enum mmc_blk_status after checking errors.
+ */
+static int mmc_wait_for_data_req_done(struct mmc_host *host,
+ struct mmc_request *mrq,
+ struct mmc_async_req *next_req)
+{
+ struct mmc_command *cmd;
+ struct mmc_context_info *context_info = &host->context_info;
+ int err;
+ unsigned long flags;
+
+ while (1) {
+ wait_event_interruptible(context_info->wait,
+ (context_info->is_done_rcv ||
+ context_info->is_new_req));
+ spin_lock_irqsave(&context_info->lock, flags);
+ context_info->is_waiting_last_req = false;
+ spin_unlock_irqrestore(&context_info->lock, flags);
+ if (context_info->is_done_rcv) {
+ context_info->is_done_rcv = false;
+ context_info->is_new_req = false;
+ cmd = mrq->cmd;
+ if (!cmd->error || !cmd->retries ||
+ mmc_card_removed(host->card)) {
+ err = host->areq->err_check(host->card,
+ host->areq);
+ break; /* return err */
+ } else {
+ pr_info("%s: req failed (CMD%u): %d, retrying...\n",
+ mmc_hostname(host),
+ cmd->opcode, cmd->error);
+ cmd->retries--;
+ cmd->error = 0;
+ host->ops->request(host, mrq);
+ continue; /* wait for done/new event again */
+ }
+ } else if (context_info->is_new_req) {
+ context_info->is_new_req = false;
+ if (!next_req) {
+ err = MMC_BLK_NEW_REQUEST;
+ break; /* return err */
+ }
+ }
+ }
+ return err;
+}
+
static void mmc_wait_for_req_done(struct mmc_host *host,
struct mmc_request *mrq)
{
@@ -426,8 +516,16 @@ struct mmc_async_req *mmc_start_req(struct mmc_host *host,
mmc_pre_req(host, areq->mrq, !host->areq);
if (host->areq) {
- mmc_wait_for_req_done(host, host->areq->mrq);
- err = host->areq->err_check(host->card, host->areq);
+ err = mmc_wait_for_data_req_done(host, host->areq->mrq, areq);
+ if (err == MMC_BLK_NEW_REQUEST) {
+ if (error)
+ *error = err;
+ /*
+ * The previous request was not completed,
+ * nothing to return
+ */
+ return NULL;
+ }
/*
* Check BKOPS urgency for each R1 response
*/
@@ -439,14 +537,14 @@ struct mmc_async_req *mmc_start_req(struct mmc_host *host,
}
if (!err && areq)
- start_err = __mmc_start_req(host, areq->mrq);
+ start_err = __mmc_start_data_req(host, areq->mrq);
if (host->areq)
mmc_post_req(host, host->areq->mrq, 0);
/* Cancel a prepared request if it was not started. */
if ((err || start_err) && areq)
- mmc_post_req(host, areq->mrq, -EINVAL);
+ mmc_post_req(host, areq->mrq, -EINVAL);
if (err)
host->areq = NULL;
@@ -1137,7 +1235,7 @@ int mmc_regulator_set_ocr(struct mmc_host *mmc,
*/
voltage = regulator_get_voltage(supply);
- if (regulator_count_voltages(supply) == 1)
+ if (!regulator_can_change_voltage(supply))
min_uV = max_uV = voltage;
if (voltage < 0)
@@ -1219,10 +1317,30 @@ u32 mmc_select_voltage(struct mmc_host *host, u32 ocr)
return ocr;
}
-int mmc_set_signal_voltage(struct mmc_host *host, int signal_voltage, bool cmd11)
+int __mmc_set_signal_voltage(struct mmc_host *host, int signal_voltage)
+{
+ int err = 0;
+ int old_signal_voltage = host->ios.signal_voltage;
+
+ host->ios.signal_voltage = signal_voltage;
+ if (host->ops->start_signal_voltage_switch) {
+ mmc_host_clk_hold(host);
+ err = host->ops->start_signal_voltage_switch(host, &host->ios);
+ mmc_host_clk_release(host);
+ }
+
+ if (err)
+ host->ios.signal_voltage = old_signal_voltage;
+
+ return err;
+
+}
+
+int mmc_set_signal_voltage(struct mmc_host *host, int signal_voltage)
{
struct mmc_command cmd = {0};
int err = 0;
+ u32 clock;
BUG_ON(!host);
@@ -1230,27 +1348,81 @@ int mmc_set_signal_voltage(struct mmc_host *host, int signal_voltage, bool cmd11
* Send CMD11 only if the request is to switch the card to
* 1.8V signalling.
*/
- if ((signal_voltage != MMC_SIGNAL_VOLTAGE_330) && cmd11) {
- cmd.opcode = SD_SWITCH_VOLTAGE;
- cmd.arg = 0;
- cmd.flags = MMC_RSP_R1 | MMC_CMD_AC;
+ if (signal_voltage == MMC_SIGNAL_VOLTAGE_330)
+ return __mmc_set_signal_voltage(host, signal_voltage);
- err = mmc_wait_for_cmd(host, &cmd, 0);
- if (err)
- return err;
+ /*
+ * If we cannot switch voltages, return failure so the caller
+ * can continue without UHS mode
+ */
+ if (!host->ops->start_signal_voltage_switch)
+ return -EPERM;
+ if (!host->ops->card_busy)
+ pr_warning("%s: cannot verify signal voltage switch\n",
+ mmc_hostname(host));
+
+ cmd.opcode = SD_SWITCH_VOLTAGE;
+ cmd.arg = 0;
+ cmd.flags = MMC_RSP_R1 | MMC_CMD_AC;
- if (!mmc_host_is_spi(host) && (cmd.resp[0] & R1_ERROR))
- return -EIO;
+ err = mmc_wait_for_cmd(host, &cmd, 0);
+ if (err)
+ return err;
+
+ if (!mmc_host_is_spi(host) && (cmd.resp[0] & R1_ERROR))
+ return -EIO;
+
+ mmc_host_clk_hold(host);
+ /*
+ * The card should drive cmd and dat[0:3] low immediately
+ * after the response of cmd11, but wait 1 ms to be sure
+ */
+ mmc_delay(1);
+ if (host->ops->card_busy && !host->ops->card_busy(host)) {
+ err = -EAGAIN;
+ goto power_cycle;
}
+ /*
+ * During a signal voltage level switch, the clock must be gated
+ * for 5 ms according to the SD spec
+ */
+ clock = host->ios.clock;
+ host->ios.clock = 0;
+ mmc_set_ios(host);
- host->ios.signal_voltage = signal_voltage;
+ if (__mmc_set_signal_voltage(host, signal_voltage)) {
+ /*
+ * Voltages may not have been switched, but we've already
+ * sent CMD11, so a power cycle is required anyway
+ */
+ err = -EAGAIN;
+ goto power_cycle;
+ }
- if (host->ops->start_signal_voltage_switch) {
- mmc_host_clk_hold(host);
- err = host->ops->start_signal_voltage_switch(host, &host->ios);
- mmc_host_clk_release(host);
+ /* Keep clock gated for at least 5 ms */
+ mmc_delay(5);
+ host->ios.clock = clock;
+ mmc_set_ios(host);
+
+ /* Wait for at least 1 ms according to spec */
+ mmc_delay(1);
+
+ /*
+ * Failure to switch is indicated by the card holding
+ * dat[0:3] low
+ */
+ if (host->ops->card_busy && host->ops->card_busy(host))
+ err = -EAGAIN;
+
+power_cycle:
+ if (err) {
+ pr_debug("%s: Signal voltage switch failed, "
+ "power cycling card\n", mmc_hostname(host));
+ mmc_power_cycle(host);
}
+ mmc_host_clk_release(host);
+
return err;
}
@@ -1314,7 +1486,7 @@ static void mmc_power_up(struct mmc_host *host)
mmc_set_ios(host);
/* Set signal voltage to 3.3V */
- mmc_set_signal_voltage(host, MMC_SIGNAL_VOLTAGE_330, false);
+ __mmc_set_signal_voltage(host, MMC_SIGNAL_VOLTAGE_330);
/*
* This delay should be sufficient to allow the power supply
@@ -1372,6 +1544,14 @@ void mmc_power_off(struct mmc_host *host)
mmc_host_clk_release(host);
}
+void mmc_power_cycle(struct mmc_host *host)
+{
+ mmc_power_off(host);
+ /* Wait at least 1 ms according to SD spec */
+ mmc_delay(1);
+ mmc_power_up(host);
+}
+
/*
* Cleanup when the last reference to the bus operator is dropped.
*/
@@ -2388,6 +2568,7 @@ EXPORT_SYMBOL(mmc_flush_cache);
* Turn the cache ON/OFF.
* Turning the cache OFF shall trigger flushing of the data
* to the non-volatile storage.
+ * This function should be called with host claimed
*/
int mmc_cache_ctrl(struct mmc_host *host, u8 enable)
{
@@ -2399,7 +2580,6 @@ int mmc_cache_ctrl(struct mmc_host *host, u8 enable)
mmc_card_is_removable(host))
return err;
- mmc_claim_host(host);
if (card && mmc_card_mmc(card) &&
(card->ext_csd.cache_size > 0)) {
enable = !!enable;
@@ -2417,7 +2597,6 @@ int mmc_cache_ctrl(struct mmc_host *host, u8 enable)
card->ext_csd.cache_ctrl = enable;
}
}
- mmc_release_host(host);
return err;
}
@@ -2436,10 +2615,6 @@ int mmc_suspend_host(struct mmc_host *host)
cancel_delayed_work(&host->detect);
mmc_flush_scheduled_work();
- err = mmc_cache_ctrl(host, 0);
- if (err)
- goto out;
-
mmc_bus_get(host);
if (host->bus_ops && !host->bus_dead) {
if (host->bus_ops->suspend) {
@@ -2581,6 +2756,23 @@ int mmc_pm_notify(struct notifier_block *notify_block,
}
#endif
+/**
+ * mmc_init_context_info() - init synchronization context
+ * @host: mmc host
+ *
+ * Init struct context_info needed to implement asynchronous
+ * request mechanism, used by mmc core, host driver and mmc requests
+ * supplier.
+ */
+void mmc_init_context_info(struct mmc_host *host)
+{
+ spin_lock_init(&host->context_info.lock);
+ host->context_info.is_new_req = false;
+ host->context_info.is_done_rcv = false;
+ host->context_info.is_waiting_last_req = false;
+ init_waitqueue_head(&host->context_info.wait);
+}
+
static int __init mmc_init(void)
{
int ret;
diff --git a/drivers/mmc/core/core.h b/drivers/mmc/core/core.h
index 3bdafbca354f..b9f18a2a8874 100644
--- a/drivers/mmc/core/core.h
+++ b/drivers/mmc/core/core.h
@@ -40,11 +40,12 @@ void mmc_set_ungated(struct mmc_host *host);
void mmc_set_bus_mode(struct mmc_host *host, unsigned int mode);
void mmc_set_bus_width(struct mmc_host *host, unsigned int width);
u32 mmc_select_voltage(struct mmc_host *host, u32 ocr);
-int mmc_set_signal_voltage(struct mmc_host *host, int signal_voltage,
- bool cmd11);
+int mmc_set_signal_voltage(struct mmc_host *host, int signal_voltage);
+int __mmc_set_signal_voltage(struct mmc_host *host, int signal_voltage);
void mmc_set_timing(struct mmc_host *host, unsigned int timing);
void mmc_set_driver_type(struct mmc_host *host, unsigned int drv_type);
void mmc_power_off(struct mmc_host *host);
+void mmc_power_cycle(struct mmc_host *host);
static inline void mmc_delay(unsigned int ms)
{
@@ -76,5 +77,6 @@ void mmc_remove_host_debugfs(struct mmc_host *host);
void mmc_add_card_debugfs(struct mmc_card *card);
void mmc_remove_card_debugfs(struct mmc_card *card);
+void mmc_init_context_info(struct mmc_host *host);
#endif
diff --git a/drivers/mmc/core/host.c b/drivers/mmc/core/host.c
index ee2e16b17017..2a3593d9f87d 100644
--- a/drivers/mmc/core/host.c
+++ b/drivers/mmc/core/host.c
@@ -15,6 +15,8 @@
#include <linux/device.h>
#include <linux/err.h>
#include <linux/idr.h>
+#include <linux/of.h>
+#include <linux/of_gpio.h>
#include <linux/pagemap.h>
#include <linux/export.h>
#include <linux/leds.h>
@@ -23,6 +25,7 @@
#include <linux/mmc/host.h>
#include <linux/mmc/card.h>
+#include <linux/mmc/slot-gpio.h>
#include "core.h"
#include "host.h"
@@ -295,6 +298,126 @@ static inline void mmc_host_clk_sysfs_init(struct mmc_host *host)
#endif
/**
+ * mmc_of_parse() - parse host's device-tree node
+ * @host: host whose node should be parsed.
+ *
+ * To keep the rest of the MMC subsystem unaware of whether DT has been
+ * used to to instantiate and configure this host instance or not, we
+ * parse the properties and set respective generic mmc-host flags and
+ * parameters.
+ */
+void mmc_of_parse(struct mmc_host *host)
+{
+ struct device_node *np;
+ u32 bus_width;
+ bool explicit_inv_wp, gpio_inv_wp = false;
+ enum of_gpio_flags flags;
+ int len, ret, gpio;
+
+ if (!host->parent || !host->parent->of_node)
+ return;
+
+ np = host->parent->of_node;
+
+ /* "bus-width" is translated to MMC_CAP_*_BIT_DATA flags */
+ if (of_property_read_u32(np, "bus-width", &bus_width) < 0) {
+ dev_dbg(host->parent,
+ "\"bus-width\" property is missing, assuming 1 bit.\n");
+ bus_width = 1;
+ }
+
+ switch (bus_width) {
+ case 8:
+ host->caps |= MMC_CAP_8_BIT_DATA;
+ /* Hosts capable of 8-bit transfers can also do 4 bits */
+ case 4:
+ host->caps |= MMC_CAP_4_BIT_DATA;
+ break;
+ case 1:
+ break;
+ default:
+ dev_err(host->parent,
+ "Invalid \"bus-width\" value %ud!\n", bus_width);
+ }
+
+ /* f_max is obtained from the optional "max-frequency" property */
+ of_property_read_u32(np, "max-frequency", &host->f_max);
+
+ /*
+ * Configure CD and WP pins. They are both by default active low to
+ * match the SDHCI spec. If GPIOs are provided for CD and / or WP, the
+ * mmc-gpio helpers are used to attach, configure and use them. If
+ * polarity inversion is specified in DT, one of MMC_CAP2_CD_ACTIVE_HIGH
+ * and MMC_CAP2_RO_ACTIVE_HIGH capability-2 flags is set. If the
+ * "broken-cd" property is provided, the MMC_CAP_NEEDS_POLL capability
+ * is set. If the "non-removable" property is found, the
+ * MMC_CAP_NONREMOVABLE capability is set and no card-detection
+ * configuration is performed.
+ */
+
+ /* Parse Card Detection */
+ if (of_find_property(np, "non-removable", &len)) {
+ host->caps |= MMC_CAP_NONREMOVABLE;
+ } else {
+ bool explicit_inv_cd, gpio_inv_cd = false;
+
+ explicit_inv_cd = of_property_read_bool(np, "cd-inverted");
+
+ if (of_find_property(np, "broken-cd", &len))
+ host->caps |= MMC_CAP_NEEDS_POLL;
+
+ gpio = of_get_named_gpio_flags(np, "cd-gpios", 0, &flags);
+ if (gpio_is_valid(gpio)) {
+ if (!(flags & OF_GPIO_ACTIVE_LOW))
+ gpio_inv_cd = true;
+
+ ret = mmc_gpio_request_cd(host, gpio);
+ if (ret < 0)
+ dev_err(host->parent,
+ "Failed to request CD GPIO #%d: %d!\n",
+ gpio, ret);
+ else
+ dev_info(host->parent, "Got CD GPIO #%d.\n",
+ gpio);
+ }
+
+ if (explicit_inv_cd ^ gpio_inv_cd)
+ host->caps2 |= MMC_CAP2_CD_ACTIVE_HIGH;
+ }
+
+ /* Parse Write Protection */
+ explicit_inv_wp = of_property_read_bool(np, "wp-inverted");
+
+ gpio = of_get_named_gpio_flags(np, "wp-gpios", 0, &flags);
+ if (gpio_is_valid(gpio)) {
+ if (!(flags & OF_GPIO_ACTIVE_LOW))
+ gpio_inv_wp = true;
+
+ ret = mmc_gpio_request_ro(host, gpio);
+ if (ret < 0)
+ dev_err(host->parent,
+ "Failed to request WP GPIO: %d!\n", ret);
+ }
+ if (explicit_inv_wp ^ gpio_inv_wp)
+ host->caps2 |= MMC_CAP2_RO_ACTIVE_HIGH;
+
+ if (of_find_property(np, "cap-sd-highspeed", &len))
+ host->caps |= MMC_CAP_SD_HIGHSPEED;
+ if (of_find_property(np, "cap-mmc-highspeed", &len))
+ host->caps |= MMC_CAP_MMC_HIGHSPEED;
+ if (of_find_property(np, "cap-power-off-card", &len))
+ host->caps |= MMC_CAP_POWER_OFF_CARD;
+ if (of_find_property(np, "cap-sdio-irq", &len))
+ host->caps |= MMC_CAP_SDIO_IRQ;
+ if (of_find_property(np, "keep-power-in-suspend", &len))
+ host->pm_caps |= MMC_PM_KEEP_POWER;
+ if (of_find_property(np, "enable-sdio-wakeup", &len))
+ host->pm_caps |= MMC_PM_WAKE_SDIO_IRQ;
+}
+
+EXPORT_SYMBOL(mmc_of_parse);
+
+/**
* mmc_alloc_host - initialise the per-host structure.
* @extra: sizeof private data structure
* @dev: pointer to host device model structure
@@ -306,19 +429,20 @@ struct mmc_host *mmc_alloc_host(int extra, struct device *dev)
int err;
struct mmc_host *host;
- if (!idr_pre_get(&mmc_host_idr, GFP_KERNEL))
- return NULL;
-
host = kzalloc(sizeof(struct mmc_host) + extra, GFP_KERNEL);
if (!host)
return NULL;
/* scanning will be enabled when we're ready */
host->rescan_disable = 1;
+ idr_preload(GFP_KERNEL);
spin_lock(&mmc_host_lock);
- err = idr_get_new(&mmc_host_idr, host, &host->index);
+ err = idr_alloc(&mmc_host_idr, host, 0, 0, GFP_NOWAIT);
+ if (err >= 0)
+ host->index = err;
spin_unlock(&mmc_host_lock);
- if (err)
+ idr_preload_end();
+ if (err < 0)
goto free;
dev_set_name(&host->class_dev, "mmc%d", host->index);
diff --git a/drivers/mmc/core/mmc.c b/drivers/mmc/core/mmc.c
index e6e39111e05b..c8f3d6e0684e 100644
--- a/drivers/mmc/core/mmc.c
+++ b/drivers/mmc/core/mmc.c
@@ -496,7 +496,7 @@ static int mmc_read_ext_csd(struct mmc_card *card, u8 *ext_csd)
* RPMB regions are defined in multiples of 128K.
*/
card->ext_csd.raw_rpmb_size_mult = ext_csd[EXT_CSD_RPMB_MULT];
- if (ext_csd[EXT_CSD_RPMB_MULT]) {
+ if (ext_csd[EXT_CSD_RPMB_MULT] && mmc_host_cmd23(card->host)) {
mmc_part_add(card, ext_csd[EXT_CSD_RPMB_MULT] << 17,
EXT_CSD_PART_CONFIG_ACC_RPMB,
"rpmb", 0, false,
@@ -538,6 +538,11 @@ static int mmc_read_ext_csd(struct mmc_card *card, u8 *ext_csd)
} else {
card->ext_csd.data_tag_unit_size = 0;
}
+
+ card->ext_csd.max_packed_writes =
+ ext_csd[EXT_CSD_MAX_PACKED_WRITES];
+ card->ext_csd.max_packed_reads =
+ ext_csd[EXT_CSD_MAX_PACKED_READS];
} else {
card->ext_csd.data_sector_size = 512;
}
@@ -769,11 +774,11 @@ static int mmc_select_hs200(struct mmc_card *card)
if (card->ext_csd.card_type & EXT_CSD_CARD_TYPE_SDR_1_2V &&
host->caps2 & MMC_CAP2_HS200_1_2V_SDR)
- err = mmc_set_signal_voltage(host, MMC_SIGNAL_VOLTAGE_120, 0);
+ err = __mmc_set_signal_voltage(host, MMC_SIGNAL_VOLTAGE_120);
if (err && card->ext_csd.card_type & EXT_CSD_CARD_TYPE_SDR_1_8V &&
host->caps2 & MMC_CAP2_HS200_1_8V_SDR)
- err = mmc_set_signal_voltage(host, MMC_SIGNAL_VOLTAGE_180, 0);
+ err = __mmc_set_signal_voltage(host, MMC_SIGNAL_VOLTAGE_180);
/* If fails try again during next card power cycle */
if (err)
@@ -1221,8 +1226,8 @@ static int mmc_init_card(struct mmc_host *host, u32 ocr,
* WARNING: eMMC rules are NOT the same as SD DDR
*/
if (ddr == MMC_1_2V_DDR_MODE) {
- err = mmc_set_signal_voltage(host,
- MMC_SIGNAL_VOLTAGE_120, 0);
+ err = __mmc_set_signal_voltage(host,
+ MMC_SIGNAL_VOLTAGE_120);
if (err)
goto err;
}
@@ -1275,6 +1280,29 @@ static int mmc_init_card(struct mmc_host *host, u32 ocr,
}
}
+ /*
+ * The mandatory minimum values are defined for packed command.
+ * read: 5, write: 3
+ */
+ if (card->ext_csd.max_packed_writes >= 3 &&
+ card->ext_csd.max_packed_reads >= 5 &&
+ host->caps2 & MMC_CAP2_PACKED_CMD) {
+ err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
+ EXT_CSD_EXP_EVENTS_CTRL,
+ EXT_CSD_PACKED_EVENT_EN,
+ card->ext_csd.generic_cmd6_time);
+ if (err && err != -EBADMSG)
+ goto free_card;
+ if (err) {
+ pr_warn("%s: Enabling packed event failed\n",
+ mmc_hostname(card->host));
+ card->ext_csd.packed_event_en = 0;
+ err = 0;
+ } else {
+ card->ext_csd.packed_event_en = 1;
+ }
+ }
+
if (!oldcard)
host->card = card;
@@ -1379,6 +1407,11 @@ static int mmc_suspend(struct mmc_host *host)
BUG_ON(!host->card);
mmc_claim_host(host);
+
+ err = mmc_cache_ctrl(host, 0);
+ if (err)
+ goto out;
+
if (mmc_can_poweroff_notify(host->card))
err = mmc_poweroff_notify(host->card, EXT_CSD_POWER_OFF_SHORT);
else if (mmc_card_can_sleep(host))
@@ -1386,8 +1419,9 @@ static int mmc_suspend(struct mmc_host *host)
else if (!mmc_host_is_spi(host))
err = mmc_deselect_cards(host);
host->card->state &= ~(MMC_STATE_HIGHSPEED | MMC_STATE_HIGHSPEED_200);
- mmc_release_host(host);
+out:
+ mmc_release_host(host);
return err;
}
diff --git a/drivers/mmc/core/mmc_ops.c b/drivers/mmc/core/mmc_ops.c
index 6d8f7012d73a..49f04bc9d0eb 100644
--- a/drivers/mmc/core/mmc_ops.c
+++ b/drivers/mmc/core/mmc_ops.c
@@ -363,6 +363,7 @@ int mmc_send_ext_csd(struct mmc_card *card, u8 *ext_csd)
return mmc_send_cxd_data(card, card->host, MMC_SEND_EXT_CSD,
ext_csd, 512);
}
+EXPORT_SYMBOL_GPL(mmc_send_ext_csd);
int mmc_spi_read_ocr(struct mmc_host *host, int highcap, u32 *ocrp)
{
diff --git a/drivers/mmc/core/sd.c b/drivers/mmc/core/sd.c
index 74972c241dff..9e645e19cec6 100644
--- a/drivers/mmc/core/sd.c
+++ b/drivers/mmc/core/sd.c
@@ -444,8 +444,7 @@ static void sd_update_bus_speed_mode(struct mmc_card *card)
* If the host doesn't support any of the UHS-I modes, fallback on
* default speed.
*/
- if (!(card->host->caps & (MMC_CAP_UHS_SDR12 | MMC_CAP_UHS_SDR25 |
- MMC_CAP_UHS_SDR50 | MMC_CAP_UHS_SDR104 | MMC_CAP_UHS_DDR50))) {
+ if (!mmc_host_uhs(card->host)) {
card->sd_bus_speed = 0;
return;
}
@@ -713,6 +712,14 @@ int mmc_sd_get_cid(struct mmc_host *host, u32 ocr, u32 *cid, u32 *rocr)
{
int err;
u32 max_current;
+ int retries = 10;
+
+try_again:
+ if (!retries) {
+ ocr &= ~SD_OCR_S18R;
+ pr_warning("%s: Skipping voltage switch\n",
+ mmc_hostname(host));
+ }
/*
* Since we're changing the OCR value, we seem to
@@ -734,10 +741,10 @@ int mmc_sd_get_cid(struct mmc_host *host, u32 ocr, u32 *cid, u32 *rocr)
/*
* If the host supports one of UHS-I modes, request the card
- * to switch to 1.8V signaling level.
+ * to switch to 1.8V signaling level. If the card has failed
+ * repeatedly to switch however, skip this.
*/
- if (host->caps & (MMC_CAP_UHS_SDR12 | MMC_CAP_UHS_SDR25 |
- MMC_CAP_UHS_SDR50 | MMC_CAP_UHS_SDR104 | MMC_CAP_UHS_DDR50))
+ if (retries && mmc_host_uhs(host))
ocr |= SD_OCR_S18R;
/*
@@ -748,7 +755,6 @@ int mmc_sd_get_cid(struct mmc_host *host, u32 ocr, u32 *cid, u32 *rocr)
if (max_current > 150)
ocr |= SD_OCR_XPC;
-try_again:
err = mmc_send_app_op_cond(host, ocr, rocr);
if (err)
return err;
@@ -759,9 +765,12 @@ try_again:
*/
if (!mmc_host_is_spi(host) && rocr &&
((*rocr & 0x41000000) == 0x41000000)) {
- err = mmc_set_signal_voltage(host, MMC_SIGNAL_VOLTAGE_180, true);
- if (err) {
- ocr &= ~SD_OCR_S18R;
+ err = mmc_set_signal_voltage(host, MMC_SIGNAL_VOLTAGE_180);
+ if (err == -EAGAIN) {
+ retries--;
+ goto try_again;
+ } else if (err) {
+ retries = 0;
goto try_again;
}
}
@@ -960,16 +969,6 @@ static int mmc_sd_init_card(struct mmc_host *host, u32 ocr,
/* Card is an ultra-high-speed card */
mmc_card_set_uhs(card);
-
- /*
- * Since initialization is now complete, enable preset
- * value registers for UHS-I cards.
- */
- if (host->ops->enable_preset_value) {
- mmc_host_clk_hold(card->host);
- host->ops->enable_preset_value(host, true);
- mmc_host_clk_release(card->host);
- }
} else {
/*
* Attempt to change to high-speed (if supported)
@@ -1148,13 +1147,6 @@ int mmc_attach_sd(struct mmc_host *host)
BUG_ON(!host);
WARN_ON(!host->claimed);
- /* Disable preset value enable if already set since last time */
- if (host->ops->enable_preset_value) {
- mmc_host_clk_hold(host);
- host->ops->enable_preset_value(host, false);
- mmc_host_clk_release(host);
- }
-
err = mmc_send_app_op_cond(host, 0, &ocr);
if (err)
return err;
diff --git a/drivers/mmc/core/sdio.c b/drivers/mmc/core/sdio.c
index 2273ce6b6c1a..aa0719a4dfd1 100644
--- a/drivers/mmc/core/sdio.c
+++ b/drivers/mmc/core/sdio.c
@@ -157,10 +157,7 @@ static int sdio_read_cccr(struct mmc_card *card, u32 ocr)
if (ret)
goto out;
- if (card->host->caps &
- (MMC_CAP_UHS_SDR12 | MMC_CAP_UHS_SDR25 |
- MMC_CAP_UHS_SDR50 | MMC_CAP_UHS_SDR104 |
- MMC_CAP_UHS_DDR50)) {
+ if (mmc_host_uhs(card->host)) {
if (data & SDIO_UHS_DDR50)
card->sw_caps.sd3_bus_mode
|= SD_MODE_UHS_DDR50;
@@ -478,8 +475,7 @@ static int sdio_set_bus_speed_mode(struct mmc_card *card)
* If the host doesn't support any of the UHS-I modes, fallback on
* default speed.
*/
- if (!(card->host->caps & (MMC_CAP_UHS_SDR12 | MMC_CAP_UHS_SDR25 |
- MMC_CAP_UHS_SDR50 | MMC_CAP_UHS_SDR104 | MMC_CAP_UHS_DDR50)))
+ if (!mmc_host_uhs(card->host))
return 0;
bus_speed = SDIO_SPEED_SDR12;
@@ -489,23 +485,27 @@ static int sdio_set_bus_speed_mode(struct mmc_card *card)
bus_speed = SDIO_SPEED_SDR104;
timing = MMC_TIMING_UHS_SDR104;
card->sw_caps.uhs_max_dtr = UHS_SDR104_MAX_DTR;
+ card->sd_bus_speed = UHS_SDR104_BUS_SPEED;
} else if ((card->host->caps & MMC_CAP_UHS_DDR50) &&
(card->sw_caps.sd3_bus_mode & SD_MODE_UHS_DDR50)) {
bus_speed = SDIO_SPEED_DDR50;
timing = MMC_TIMING_UHS_DDR50;
card->sw_caps.uhs_max_dtr = UHS_DDR50_MAX_DTR;
+ card->sd_bus_speed = UHS_DDR50_BUS_SPEED;
} else if ((card->host->caps & (MMC_CAP_UHS_SDR104 |
MMC_CAP_UHS_SDR50)) && (card->sw_caps.sd3_bus_mode &
SD_MODE_UHS_SDR50)) {
bus_speed = SDIO_SPEED_SDR50;
timing = MMC_TIMING_UHS_SDR50;
card->sw_caps.uhs_max_dtr = UHS_SDR50_MAX_DTR;
+ card->sd_bus_speed = UHS_SDR50_BUS_SPEED;
} else if ((card->host->caps & (MMC_CAP_UHS_SDR104 |
MMC_CAP_UHS_SDR50 | MMC_CAP_UHS_SDR25)) &&
(card->sw_caps.sd3_bus_mode & SD_MODE_UHS_SDR25)) {
bus_speed = SDIO_SPEED_SDR25;
timing = MMC_TIMING_UHS_SDR25;
card->sw_caps.uhs_max_dtr = UHS_SDR25_MAX_DTR;
+ card->sd_bus_speed = UHS_SDR25_BUS_SPEED;
} else if ((card->host->caps & (MMC_CAP_UHS_SDR104 |
MMC_CAP_UHS_SDR50 | MMC_CAP_UHS_SDR25 |
MMC_CAP_UHS_SDR12)) && (card->sw_caps.sd3_bus_mode &
@@ -513,6 +513,7 @@ static int sdio_set_bus_speed_mode(struct mmc_card *card)
bus_speed = SDIO_SPEED_SDR12;
timing = MMC_TIMING_UHS_SDR12;
card->sw_caps.uhs_max_dtr = UHS_SDR12_MAX_DTR;
+ card->sd_bus_speed = UHS_SDR12_BUS_SPEED;
}
err = mmc_io_rw_direct(card, 0, 0, SDIO_CCCR_SPEED, 0, &speed);
@@ -583,10 +584,19 @@ static int mmc_sdio_init_card(struct mmc_host *host, u32 ocr,
{
struct mmc_card *card;
int err;
+ int retries = 10;
BUG_ON(!host);
WARN_ON(!host->claimed);
+try_again:
+ if (!retries) {
+ pr_warning("%s: Skipping voltage switch\n",
+ mmc_hostname(host));
+ ocr &= ~R4_18V_PRESENT;
+ host->ocr &= ~R4_18V_PRESENT;
+ }
+
/*
* Inform the card of the voltage
*/
@@ -645,14 +655,16 @@ static int mmc_sdio_init_card(struct mmc_host *host, u32 ocr,
* systems that claim 1.8v signalling in fact do not support
* it.
*/
- if ((ocr & R4_18V_PRESENT) &&
- (host->caps &
- (MMC_CAP_UHS_SDR12 | MMC_CAP_UHS_SDR25 |
- MMC_CAP_UHS_SDR50 | MMC_CAP_UHS_SDR104 |
- MMC_CAP_UHS_DDR50))) {
- err = mmc_set_signal_voltage(host, MMC_SIGNAL_VOLTAGE_180,
- true);
- if (err) {
+ if (!powered_resume && (ocr & R4_18V_PRESENT) && mmc_host_uhs(host)) {
+ err = mmc_set_signal_voltage(host, MMC_SIGNAL_VOLTAGE_180);
+ if (err == -EAGAIN) {
+ sdio_reset(host);
+ mmc_go_idle(host);
+ mmc_send_if_cond(host, host->ocr_avail);
+ mmc_remove_card(card);
+ retries--;
+ goto try_again;
+ } else if (err) {
ocr &= ~R4_18V_PRESENT;
host->ocr &= ~R4_18V_PRESENT;
}
@@ -937,10 +949,12 @@ static int mmc_sdio_resume(struct mmc_host *host)
mmc_claim_host(host);
/* No need to reinitialize powered-resumed nonremovable cards */
- if (mmc_card_is_removable(host) || !mmc_card_keep_power(host))
+ if (mmc_card_is_removable(host) || !mmc_card_keep_power(host)) {
+ sdio_reset(host);
+ mmc_go_idle(host);
err = mmc_sdio_init_card(host, host->ocr, host->card,
mmc_card_keep_power(host));
- else if (mmc_card_keep_power(host) && mmc_card_wake_sdio_irq(host)) {
+ } else if (mmc_card_keep_power(host) && mmc_card_wake_sdio_irq(host)) {
/* We may have switched to 1-bit mode during suspend */
err = sdio_enable_4bit_bus(host->card);
if (err > 0) {
@@ -1020,6 +1034,10 @@ static int mmc_sdio_power_restore(struct mmc_host *host)
goto out;
}
+ if (mmc_host_uhs(host))
+ /* to query card if 1.8V signalling is supported */
+ host->ocr |= R4_18V_PRESENT;
+
ret = mmc_sdio_init_card(host, host->ocr, host->card,
mmc_card_keep_power(host));
if (!ret && host->sdio_irqs)
@@ -1085,6 +1103,10 @@ int mmc_attach_sdio(struct mmc_host *host)
/*
* Detect and init the card.
*/
+ if (mmc_host_uhs(host))
+ /* to query card if 1.8V signalling is supported */
+ host->ocr |= R4_18V_PRESENT;
+
err = mmc_sdio_init_card(host, host->ocr, NULL, 0);
if (err) {
if (err == -EAGAIN) {
diff --git a/drivers/mmc/core/slot-gpio.c b/drivers/mmc/core/slot-gpio.c
index 16a1c0b6f264..324235105519 100644
--- a/drivers/mmc/core/slot-gpio.c
+++ b/drivers/mmc/core/slot-gpio.c
@@ -92,6 +92,20 @@ int mmc_gpio_get_cd(struct mmc_host *host)
}
EXPORT_SYMBOL(mmc_gpio_get_cd);
+/**
+ * mmc_gpio_request_ro - request a gpio for write-protection
+ * @host: mmc host
+ * @gpio: gpio number requested
+ *
+ * As devm_* managed functions are used in mmc_gpio_request_ro(), client
+ * drivers do not need to explicitly call mmc_gpio_free_ro() for freeing up,
+ * if the requesting and freeing are only needed at probing and unbinding time
+ * for once. However, if client drivers do something special like runtime
+ * switching for write-protection, they are responsible for calling
+ * mmc_gpio_request_ro() and mmc_gpio_free_ro() as a pair on their own.
+ *
+ * Returns zero on success, else an error.
+ */
int mmc_gpio_request_ro(struct mmc_host *host, unsigned int gpio)
{
struct mmc_gpio *ctx;
@@ -106,7 +120,8 @@ int mmc_gpio_request_ro(struct mmc_host *host, unsigned int gpio)
ctx = host->slot.handler_priv;
- ret = gpio_request_one(gpio, GPIOF_DIR_IN, ctx->ro_label);
+ ret = devm_gpio_request_one(&host->class_dev, gpio, GPIOF_DIR_IN,
+ ctx->ro_label);
if (ret < 0)
return ret;
@@ -116,6 +131,20 @@ int mmc_gpio_request_ro(struct mmc_host *host, unsigned int gpio)
}
EXPORT_SYMBOL(mmc_gpio_request_ro);
+/**
+ * mmc_gpio_request_cd - request a gpio for card-detection
+ * @host: mmc host
+ * @gpio: gpio number requested
+ *
+ * As devm_* managed functions are used in mmc_gpio_request_cd(), client
+ * drivers do not need to explicitly call mmc_gpio_free_cd() for freeing up,
+ * if the requesting and freeing are only needed at probing and unbinding time
+ * for once. However, if client drivers do something special like runtime
+ * switching for card-detection, they are responsible for calling
+ * mmc_gpio_request_cd() and mmc_gpio_free_cd() as a pair on their own.
+ *
+ * Returns zero on success, else an error.
+ */
int mmc_gpio_request_cd(struct mmc_host *host, unsigned int gpio)
{
struct mmc_gpio *ctx;
@@ -128,7 +157,8 @@ int mmc_gpio_request_cd(struct mmc_host *host, unsigned int gpio)
ctx = host->slot.handler_priv;
- ret = gpio_request_one(gpio, GPIOF_DIR_IN, ctx->cd_label);
+ ret = devm_gpio_request_one(&host->class_dev, gpio, GPIOF_DIR_IN,
+ ctx->cd_label);
if (ret < 0)
/*
* don't bother freeing memory. It might still get used by other
@@ -146,7 +176,8 @@ int mmc_gpio_request_cd(struct mmc_host *host, unsigned int gpio)
irq = -EINVAL;
if (irq >= 0) {
- ret = request_threaded_irq(irq, NULL, mmc_gpio_cd_irqt,
+ ret = devm_request_threaded_irq(&host->class_dev, irq,
+ NULL, mmc_gpio_cd_irqt,
IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING | IRQF_ONESHOT,
ctx->cd_label, host);
if (ret < 0)
@@ -164,6 +195,13 @@ int mmc_gpio_request_cd(struct mmc_host *host, unsigned int gpio)
}
EXPORT_SYMBOL(mmc_gpio_request_cd);
+/**
+ * mmc_gpio_free_ro - free the write-protection gpio
+ * @host: mmc host
+ *
+ * It's provided only for cases that client drivers need to manually free
+ * up the write-protection gpio requested by mmc_gpio_request_ro().
+ */
void mmc_gpio_free_ro(struct mmc_host *host)
{
struct mmc_gpio *ctx = host->slot.handler_priv;
@@ -175,10 +213,17 @@ void mmc_gpio_free_ro(struct mmc_host *host)
gpio = ctx->ro_gpio;
ctx->ro_gpio = -EINVAL;
- gpio_free(gpio);
+ devm_gpio_free(&host->class_dev, gpio);
}
EXPORT_SYMBOL(mmc_gpio_free_ro);
+/**
+ * mmc_gpio_free_cd - free the card-detection gpio
+ * @host: mmc host
+ *
+ * It's provided only for cases that client drivers need to manually free
+ * up the card-detection gpio requested by mmc_gpio_request_cd().
+ */
void mmc_gpio_free_cd(struct mmc_host *host)
{
struct mmc_gpio *ctx = host->slot.handler_priv;
@@ -188,13 +233,13 @@ void mmc_gpio_free_cd(struct mmc_host *host)
return;
if (host->slot.cd_irq >= 0) {
- free_irq(host->slot.cd_irq, host);
+ devm_free_irq(&host->class_dev, host->slot.cd_irq, host);
host->slot.cd_irq = -EINVAL;
}
gpio = ctx->cd_gpio;
ctx->cd_gpio = -EINVAL;
- gpio_free(gpio);
+ devm_gpio_free(&host->class_dev, gpio);
}
EXPORT_SYMBOL(mmc_gpio_free_cd);
diff --git a/drivers/mmc/host/Kconfig b/drivers/mmc/host/Kconfig
index cc8a8fad455a..d88219e1d86e 100644
--- a/drivers/mmc/host/Kconfig
+++ b/drivers/mmc/host/Kconfig
@@ -238,6 +238,17 @@ config MMC_SDHCI_S3C_DMA
YMMV.
+config MMC_SDHCI_BCM2835
+ tristate "SDHCI platform support for the BCM2835 SD/MMC Controller"
+ depends on ARCH_BCM2835
+ depends on MMC_SDHCI_PLTFM
+ select MMC_SDHCI_IO_ACCESSORS
+ help
+ This selects the BCM2835 SD/MMC controller. If you have a BCM2835
+ platform with SD or MMC devices, say Y or M here.
+
+ If unsure, say N.
+
config MMC_OMAP
tristate "TI OMAP Multimedia Card Interface support"
depends on ARCH_OMAP
@@ -361,6 +372,13 @@ config MMC_DAVINCI
If you have an DAVINCI board with a Multimedia Card slot,
say Y or M here. If unsure, say N.
+config MMC_GOLDFISH
+ tristate "goldfish qemu Multimedia Card Interface support"
+ depends on GOLDFISH
+ help
+ This selects the Goldfish Multimedia card Interface emulation
+ found on the Goldfish Android virtual device emulation.
+
config MMC_SPI
tristate "MMC/SD/SDIO over SPI"
depends on SPI_MASTER && !HIGHMEM && HAS_DMA
@@ -457,7 +475,7 @@ config MMC_SDHI
config MMC_CB710
tristate "ENE CB710 MMC/SD Interface support"
- depends on PCI
+ depends on PCI && GENERIC_HARDIRQS
select CB710_CORE
help
This option enables support for MMC/SD part of ENE CB710/720 Flash
diff --git a/drivers/mmc/host/Makefile b/drivers/mmc/host/Makefile
index e4e218c930bd..c380e3cf0a3b 100644
--- a/drivers/mmc/host/Makefile
+++ b/drivers/mmc/host/Makefile
@@ -23,6 +23,7 @@ obj-$(CONFIG_MMC_TIFM_SD) += tifm_sd.o
obj-$(CONFIG_MMC_MSM) += msm_sdcc.o
obj-$(CONFIG_MMC_MVSDIO) += mvsdio.o
obj-$(CONFIG_MMC_DAVINCI) += davinci_mmc.o
+obj-$(CONFIG_MMC_GOLDFISH) += android-goldfish.o
obj-$(CONFIG_MMC_SPI) += mmc_spi.o
ifeq ($(CONFIG_OF),y)
obj-$(CONFIG_MMC_SPI) += of_mmc_spi.o
@@ -58,6 +59,7 @@ obj-$(CONFIG_MMC_SDHCI_DOVE) += sdhci-dove.o
obj-$(CONFIG_MMC_SDHCI_TEGRA) += sdhci-tegra.o
obj-$(CONFIG_MMC_SDHCI_OF_ESDHC) += sdhci-of-esdhc.o
obj-$(CONFIG_MMC_SDHCI_OF_HLWD) += sdhci-of-hlwd.o
+obj-$(CONFIG_MMC_SDHCI_BCM2835) += sdhci-bcm2835.o
ifeq ($(CONFIG_CB710_DEBUG),y)
CFLAGS-cb710-mmc += -DDEBUG
diff --git a/drivers/mmc/host/android-goldfish.c b/drivers/mmc/host/android-goldfish.c
new file mode 100644
index 000000000000..ef3aef0f376d
--- /dev/null
+++ b/drivers/mmc/host/android-goldfish.c
@@ -0,0 +1,570 @@
+/*
+ * Copyright 2007, Google Inc.
+ * Copyright 2012, Intel Inc.
+ *
+ * based on omap.c driver, which was
+ * Copyright (C) 2004 Nokia Corporation
+ * Written by Tuukka Tikkanen and Juha Yrjölä <juha.yrjola@nokia.com>
+ * Misc hacks here and there by Tony Lindgren <tony@atomide.com>
+ * Other hacks (DMA, SD, etc) by David Brownell
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/major.h>
+
+#include <linux/types.h>
+#include <linux/pci.h>
+#include <linux/interrupt.h>
+
+#include <linux/kernel.h>
+#include <linux/fs.h>
+#include <linux/errno.h>
+#include <linux/hdreg.h>
+#include <linux/kdev_t.h>
+#include <linux/blkdev.h>
+#include <linux/mutex.h>
+#include <linux/scatterlist.h>
+#include <linux/mmc/mmc.h>
+#include <linux/mmc/sdio.h>
+#include <linux/mmc/host.h>
+#include <linux/mmc/card.h>
+
+#include <linux/moduleparam.h>
+#include <linux/init.h>
+#include <linux/ioport.h>
+#include <linux/dma-mapping.h>
+#include <linux/delay.h>
+#include <linux/spinlock.h>
+#include <linux/timer.h>
+#include <linux/clk.h>
+
+#include <asm/io.h>
+#include <asm/irq.h>
+#include <asm/scatterlist.h>
+
+#include <asm/types.h>
+#include <asm/io.h>
+#include <asm/uaccess.h>
+
+#define DRIVER_NAME "goldfish_mmc"
+
+#define BUFFER_SIZE 16384
+
+#define GOLDFISH_MMC_READ(host, addr) (readl(host->reg_base + addr))
+#define GOLDFISH_MMC_WRITE(host, addr, x) (writel(x, host->reg_base + addr))
+
+enum {
+ /* status register */
+ MMC_INT_STATUS = 0x00,
+ /* set this to enable IRQ */
+ MMC_INT_ENABLE = 0x04,
+ /* set this to specify buffer address */
+ MMC_SET_BUFFER = 0x08,
+
+ /* MMC command number */
+ MMC_CMD = 0x0C,
+
+ /* MMC argument */
+ MMC_ARG = 0x10,
+
+ /* MMC response (or R2 bits 0 - 31) */
+ MMC_RESP_0 = 0x14,
+
+ /* MMC R2 response bits 32 - 63 */
+ MMC_RESP_1 = 0x18,
+
+ /* MMC R2 response bits 64 - 95 */
+ MMC_RESP_2 = 0x1C,
+
+ /* MMC R2 response bits 96 - 127 */
+ MMC_RESP_3 = 0x20,
+
+ MMC_BLOCK_LENGTH = 0x24,
+ MMC_BLOCK_COUNT = 0x28,
+
+ /* MMC state flags */
+ MMC_STATE = 0x2C,
+
+ /* MMC_INT_STATUS bits */
+
+ MMC_STAT_END_OF_CMD = 1U << 0,
+ MMC_STAT_END_OF_DATA = 1U << 1,
+ MMC_STAT_STATE_CHANGE = 1U << 2,
+ MMC_STAT_CMD_TIMEOUT = 1U << 3,
+
+ /* MMC_STATE bits */
+ MMC_STATE_INSERTED = 1U << 0,
+ MMC_STATE_READ_ONLY = 1U << 1,
+};
+
+/*
+ * Command types
+ */
+#define OMAP_MMC_CMDTYPE_BC 0
+#define OMAP_MMC_CMDTYPE_BCR 1
+#define OMAP_MMC_CMDTYPE_AC 2
+#define OMAP_MMC_CMDTYPE_ADTC 3
+
+
+struct goldfish_mmc_host {
+ struct mmc_request *mrq;
+ struct mmc_command *cmd;
+ struct mmc_data *data;
+ struct mmc_host *mmc;
+ struct device *dev;
+ unsigned char id; /* 16xx chips have 2 MMC blocks */
+ void __iomem *virt_base;
+ unsigned int phys_base;
+ int irq;
+ unsigned char bus_mode;
+ unsigned char hw_bus_mode;
+
+ unsigned int sg_len;
+ unsigned dma_done:1;
+ unsigned dma_in_use:1;
+
+ void __iomem *reg_base;
+};
+
+static inline int
+goldfish_mmc_cover_is_open(struct goldfish_mmc_host *host)
+{
+ return 0;
+}
+
+static ssize_t
+goldfish_mmc_show_cover_switch(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct goldfish_mmc_host *host = dev_get_drvdata(dev);
+
+ return sprintf(buf, "%s\n", goldfish_mmc_cover_is_open(host) ? "open" :
+ "closed");
+}
+
+static DEVICE_ATTR(cover_switch, S_IRUGO, goldfish_mmc_show_cover_switch, NULL);
+
+static void
+goldfish_mmc_start_command(struct goldfish_mmc_host *host, struct mmc_command *cmd)
+{
+ u32 cmdreg;
+ u32 resptype;
+ u32 cmdtype;
+
+ host->cmd = cmd;
+
+ resptype = 0;
+ cmdtype = 0;
+
+ /* Our hardware needs to know exact type */
+ switch (mmc_resp_type(cmd)) {
+ case MMC_RSP_NONE:
+ break;
+ case MMC_RSP_R1:
+ case MMC_RSP_R1B:
+ /* resp 1, 1b, 6, 7 */
+ resptype = 1;
+ break;
+ case MMC_RSP_R2:
+ resptype = 2;
+ break;
+ case MMC_RSP_R3:
+ resptype = 3;
+ break;
+ default:
+ dev_err(mmc_dev(host->mmc),
+ "Invalid response type: %04x\n", mmc_resp_type(cmd));
+ break;
+ }
+
+ if (mmc_cmd_type(cmd) == MMC_CMD_ADTC)
+ cmdtype = OMAP_MMC_CMDTYPE_ADTC;
+ else if (mmc_cmd_type(cmd) == MMC_CMD_BC)
+ cmdtype = OMAP_MMC_CMDTYPE_BC;
+ else if (mmc_cmd_type(cmd) == MMC_CMD_BCR)
+ cmdtype = OMAP_MMC_CMDTYPE_BCR;
+ else
+ cmdtype = OMAP_MMC_CMDTYPE_AC;
+
+ cmdreg = cmd->opcode | (resptype << 8) | (cmdtype << 12);
+
+ if (host->bus_mode == MMC_BUSMODE_OPENDRAIN)
+ cmdreg |= 1 << 6;
+
+ if (cmd->flags & MMC_RSP_BUSY)
+ cmdreg |= 1 << 11;
+
+ if (host->data && !(host->data->flags & MMC_DATA_WRITE))
+ cmdreg |= 1 << 15;
+
+ GOLDFISH_MMC_WRITE(host, MMC_ARG, cmd->arg);
+ GOLDFISH_MMC_WRITE(host, MMC_CMD, cmdreg);
+}
+
+static void goldfish_mmc_xfer_done(struct goldfish_mmc_host *host,
+ struct mmc_data *data)
+{
+ if (host->dma_in_use) {
+ enum dma_data_direction dma_data_dir;
+
+ if (data->flags & MMC_DATA_WRITE)
+ dma_data_dir = DMA_TO_DEVICE;
+ else
+ dma_data_dir = DMA_FROM_DEVICE;
+
+ if (dma_data_dir == DMA_FROM_DEVICE) {
+ /*
+ * We don't really have DMA, so we need
+ * to copy from our platform driver buffer
+ */
+ uint8_t *dest = (uint8_t *)sg_virt(data->sg);
+ memcpy(dest, host->virt_base, data->sg->length);
+ }
+ host->data->bytes_xfered += data->sg->length;
+ dma_unmap_sg(mmc_dev(host->mmc), data->sg, host->sg_len,
+ dma_data_dir);
+ }
+
+ host->data = NULL;
+ host->sg_len = 0;
+
+ /*
+ * NOTE: MMC layer will sometimes poll-wait CMD13 next, issuing
+ * dozens of requests until the card finishes writing data.
+ * It'd be cheaper to just wait till an EOFB interrupt arrives...
+ */
+
+ if (!data->stop) {
+ host->mrq = NULL;
+ mmc_request_done(host->mmc, data->mrq);
+ return;
+ }
+
+ goldfish_mmc_start_command(host, data->stop);
+}
+
+static void goldfish_mmc_end_of_data(struct goldfish_mmc_host *host,
+ struct mmc_data *data)
+{
+ if (!host->dma_in_use) {
+ goldfish_mmc_xfer_done(host, data);
+ return;
+ }
+ if (host->dma_done)
+ goldfish_mmc_xfer_done(host, data);
+}
+
+static void goldfish_mmc_cmd_done(struct goldfish_mmc_host *host,
+ struct mmc_command *cmd)
+{
+ host->cmd = NULL;
+ if (cmd->flags & MMC_RSP_PRESENT) {
+ if (cmd->flags & MMC_RSP_136) {
+ /* response type 2 */
+ cmd->resp[3] =
+ GOLDFISH_MMC_READ(host, MMC_RESP_0);
+ cmd->resp[2] =
+ GOLDFISH_MMC_READ(host, MMC_RESP_1);
+ cmd->resp[1] =
+ GOLDFISH_MMC_READ(host, MMC_RESP_2);
+ cmd->resp[0] =
+ GOLDFISH_MMC_READ(host, MMC_RESP_3);
+ } else {
+ /* response types 1, 1b, 3, 4, 5, 6 */
+ cmd->resp[0] =
+ GOLDFISH_MMC_READ(host, MMC_RESP_0);
+ }
+ }
+
+ if (host->data == NULL || cmd->error) {
+ host->mrq = NULL;
+ mmc_request_done(host->mmc, cmd->mrq);
+ }
+}
+
+static irqreturn_t goldfish_mmc_irq(int irq, void *dev_id)
+{
+ struct goldfish_mmc_host *host = (struct goldfish_mmc_host *)dev_id;
+ u16 status;
+ int end_command = 0;
+ int end_transfer = 0;
+ int transfer_error = 0;
+ int state_changed = 0;
+ int cmd_timeout = 0;
+
+ while ((status = GOLDFISH_MMC_READ(host, MMC_INT_STATUS)) != 0) {
+ GOLDFISH_MMC_WRITE(host, MMC_INT_STATUS, status);
+
+ if (status & MMC_STAT_END_OF_CMD)
+ end_command = 1;
+
+ if (status & MMC_STAT_END_OF_DATA)
+ end_transfer = 1;
+
+ if (status & MMC_STAT_STATE_CHANGE)
+ state_changed = 1;
+
+ if (status & MMC_STAT_CMD_TIMEOUT) {
+ end_command = 0;
+ cmd_timeout = 1;
+ }
+ }
+
+ if (cmd_timeout) {
+ struct mmc_request *mrq = host->mrq;
+ mrq->cmd->error = -ETIMEDOUT;
+ host->mrq = NULL;
+ mmc_request_done(host->mmc, mrq);
+ }
+
+ if (end_command)
+ goldfish_mmc_cmd_done(host, host->cmd);
+
+ if (transfer_error)
+ goldfish_mmc_xfer_done(host, host->data);
+ else if (end_transfer) {
+ host->dma_done = 1;
+ goldfish_mmc_end_of_data(host, host->data);
+ } else if (host->data != NULL) {
+ /*
+ * WORKAROUND -- after porting this driver from 2.6 to 3.4,
+ * during device initialization, cases where host->data is
+ * non-null but end_transfer is false would occur. Doing
+ * nothing in such cases results in no further interrupts,
+ * and initialization failure.
+ * TODO -- find the real cause.
+ */
+ host->dma_done = 1;
+ goldfish_mmc_end_of_data(host, host->data);
+ }
+
+ if (state_changed) {
+ u32 state = GOLDFISH_MMC_READ(host, MMC_STATE);
+ pr_info("%s: Card detect now %d\n", __func__,
+ (state & MMC_STATE_INSERTED));
+ mmc_detect_change(host->mmc, 0);
+ }
+
+ if (!end_command && !end_transfer &&
+ !transfer_error && !state_changed && !cmd_timeout) {
+ status = GOLDFISH_MMC_READ(host, MMC_INT_STATUS);
+ dev_info(mmc_dev(host->mmc),"spurious irq 0x%04x\n", status);
+ if (status != 0) {
+ GOLDFISH_MMC_WRITE(host, MMC_INT_STATUS, status);
+ GOLDFISH_MMC_WRITE(host, MMC_INT_ENABLE, 0);
+ }
+ }
+
+ return IRQ_HANDLED;
+}
+
+static void goldfish_mmc_prepare_data(struct goldfish_mmc_host *host,
+ struct mmc_request *req)
+{
+ struct mmc_data *data = req->data;
+ int block_size;
+ unsigned sg_len;
+ enum dma_data_direction dma_data_dir;
+
+ host->data = data;
+ if (data == NULL) {
+ GOLDFISH_MMC_WRITE(host, MMC_BLOCK_LENGTH, 0);
+ GOLDFISH_MMC_WRITE(host, MMC_BLOCK_COUNT, 0);
+ host->dma_in_use = 0;
+ return;
+ }
+
+ block_size = data->blksz;
+
+ GOLDFISH_MMC_WRITE(host, MMC_BLOCK_COUNT, data->blocks - 1);
+ GOLDFISH_MMC_WRITE(host, MMC_BLOCK_LENGTH, block_size - 1);
+
+ /*
+ * Cope with calling layer confusion; it issues "single
+ * block" writes using multi-block scatterlists.
+ */
+ sg_len = (data->blocks == 1) ? 1 : data->sg_len;
+
+ if (data->flags & MMC_DATA_WRITE)
+ dma_data_dir = DMA_TO_DEVICE;
+ else
+ dma_data_dir = DMA_FROM_DEVICE;
+
+ host->sg_len = dma_map_sg(mmc_dev(host->mmc), data->sg,
+ sg_len, dma_data_dir);
+ host->dma_done = 0;
+ host->dma_in_use = 1;
+
+ if (dma_data_dir == DMA_TO_DEVICE) {
+ /*
+ * We don't really have DMA, so we need to copy to our
+ * platform driver buffer
+ */
+ const uint8_t *src = (uint8_t *)sg_virt(data->sg);
+ memcpy(host->virt_base, src, data->sg->length);
+ }
+}
+
+static void goldfish_mmc_request(struct mmc_host *mmc, struct mmc_request *req)
+{
+ struct goldfish_mmc_host *host = mmc_priv(mmc);
+
+ WARN_ON(host->mrq != NULL);
+
+ host->mrq = req;
+ goldfish_mmc_prepare_data(host, req);
+ goldfish_mmc_start_command(host, req->cmd);
+
+ /*
+ * This is to avoid accidentally being detected as an SDIO card
+ * in mmc_attach_sdio().
+ */
+ if (req->cmd->opcode == SD_IO_SEND_OP_COND &&
+ req->cmd->flags == (MMC_RSP_SPI_R4 | MMC_RSP_R4 | MMC_CMD_BCR))
+ req->cmd->error = -EINVAL;
+}
+
+static void goldfish_mmc_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
+{
+ struct goldfish_mmc_host *host = mmc_priv(mmc);
+
+ host->bus_mode = ios->bus_mode;
+ host->hw_bus_mode = host->bus_mode;
+}
+
+static int goldfish_mmc_get_ro(struct mmc_host *mmc)
+{
+ uint32_t state;
+ struct goldfish_mmc_host *host = mmc_priv(mmc);
+
+ state = GOLDFISH_MMC_READ(host, MMC_STATE);
+ return ((state & MMC_STATE_READ_ONLY) != 0);
+}
+
+static const struct mmc_host_ops goldfish_mmc_ops = {
+ .request = goldfish_mmc_request,
+ .set_ios = goldfish_mmc_set_ios,
+ .get_ro = goldfish_mmc_get_ro,
+};
+
+static int goldfish_mmc_probe(struct platform_device *pdev)
+{
+ struct mmc_host *mmc;
+ struct goldfish_mmc_host *host = NULL;
+ struct resource *res;
+ int ret = 0;
+ int irq;
+ dma_addr_t buf_addr;
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ irq = platform_get_irq(pdev, 0);
+ if (res == NULL || irq < 0)
+ return -ENXIO;
+
+ mmc = mmc_alloc_host(sizeof(struct goldfish_mmc_host), &pdev->dev);
+ if (mmc == NULL) {
+ ret = -ENOMEM;
+ goto err_alloc_host_failed;
+ }
+
+ host = mmc_priv(mmc);
+ host->mmc = mmc;
+
+ pr_err("mmc: Mapping %lX to %lX\n", (long)res->start, (long)res->end);
+ host->reg_base = ioremap(res->start, res->end - res->start + 1);
+ if (host->reg_base == NULL) {
+ ret = -ENOMEM;
+ goto ioremap_failed;
+ }
+ host->virt_base = dma_alloc_coherent(&pdev->dev, BUFFER_SIZE,
+ &buf_addr, GFP_KERNEL);
+
+ if (host->virt_base == 0) {
+ ret = -ENOMEM;
+ goto dma_alloc_failed;
+ }
+ host->phys_base = buf_addr;
+
+ host->id = pdev->id;
+ host->irq = irq;
+
+ mmc->ops = &goldfish_mmc_ops;
+ mmc->f_min = 400000;
+ mmc->f_max = 24000000;
+ mmc->ocr_avail = MMC_VDD_32_33 | MMC_VDD_33_34;
+ mmc->caps = MMC_CAP_4_BIT_DATA;
+
+ /* Use scatterlist DMA to reduce per-transfer costs.
+ * NOTE max_seg_size assumption that small blocks aren't
+ * normally used (except e.g. for reading SD registers).
+ */
+ mmc->max_segs = 32;
+ mmc->max_blk_size = 2048; /* MMC_BLOCK_LENGTH is 11 bits (+1) */
+ mmc->max_blk_count = 2048; /* MMC_BLOCK_COUNT is 11 bits (+1) */
+ mmc->max_req_size = BUFFER_SIZE;
+ mmc->max_seg_size = mmc->max_req_size;
+
+ ret = request_irq(host->irq, goldfish_mmc_irq, 0, DRIVER_NAME, host);
+ if (ret) {
+ dev_err(&pdev->dev, "Failed IRQ Adding goldfish MMC\n");
+ goto err_request_irq_failed;
+ }
+
+ host->dev = &pdev->dev;
+ platform_set_drvdata(pdev, host);
+
+ ret = device_create_file(&pdev->dev, &dev_attr_cover_switch);
+ if (ret)
+ dev_warn(mmc_dev(host->mmc),
+ "Unable to create sysfs attributes\n");
+
+ GOLDFISH_MMC_WRITE(host, MMC_SET_BUFFER, host->phys_base);
+ GOLDFISH_MMC_WRITE(host, MMC_INT_ENABLE,
+ MMC_STAT_END_OF_CMD | MMC_STAT_END_OF_DATA |
+ MMC_STAT_STATE_CHANGE | MMC_STAT_CMD_TIMEOUT);
+
+ mmc_add_host(mmc);
+ return 0;
+
+err_request_irq_failed:
+ dma_free_coherent(&pdev->dev, BUFFER_SIZE, host->virt_base,
+ host->phys_base);
+dma_alloc_failed:
+ iounmap(host->reg_base);
+ioremap_failed:
+ mmc_free_host(host->mmc);
+err_alloc_host_failed:
+ return ret;
+}
+
+static int goldfish_mmc_remove(struct platform_device *pdev)
+{
+ struct goldfish_mmc_host *host = platform_get_drvdata(pdev);
+
+ platform_set_drvdata(pdev, NULL);
+
+ BUG_ON(host == NULL);
+
+ mmc_remove_host(host->mmc);
+ free_irq(host->irq, host);
+ dma_free_coherent(&pdev->dev, BUFFER_SIZE, host->virt_base, host->phys_base);
+ iounmap(host->reg_base);
+ mmc_free_host(host->mmc);
+ return 0;
+}
+
+static struct platform_driver goldfish_mmc_driver = {
+ .probe = goldfish_mmc_probe,
+ .remove = goldfish_mmc_remove,
+ .driver = {
+ .name = DRIVER_NAME,
+ },
+};
+
+module_platform_driver(goldfish_mmc_driver);
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/mmc/host/dw_mmc-exynos.c b/drivers/mmc/host/dw_mmc-exynos.c
index 4d50da618166..72fd0f2c9013 100644
--- a/drivers/mmc/host/dw_mmc-exynos.c
+++ b/drivers/mmc/host/dw_mmc-exynos.c
@@ -175,16 +175,6 @@ static int dw_mci_exynos_setup_bus(struct dw_mci *host,
}
}
- gpio = of_get_named_gpio(slot_np, "wp-gpios", 0);
- if (gpio_is_valid(gpio)) {
- if (devm_gpio_request(host->dev, gpio, "dw-mci-wp"))
- dev_info(host->dev, "gpio [%d] request failed\n",
- gpio);
- } else {
- dev_info(host->dev, "wp gpio not available");
- host->pdata->quirks |= DW_MCI_QUIRK_NO_WRITE_PROTECT;
- }
-
if (host->pdata->quirks & DW_MCI_QUIRK_BROKEN_CARD_DETECTION)
return 0;
diff --git a/drivers/mmc/host/dw_mmc.c b/drivers/mmc/host/dw_mmc.c
index 323c5022c2ca..98342213ed21 100644
--- a/drivers/mmc/host/dw_mmc.c
+++ b/drivers/mmc/host/dw_mmc.c
@@ -34,6 +34,7 @@
#include <linux/regulator/consumer.h>
#include <linux/workqueue.h>
#include <linux/of.h>
+#include <linux/of_gpio.h>
#include "dw_mmc.h"
@@ -74,6 +75,8 @@ struct idmac_desc {
* struct dw_mci_slot - MMC slot state
* @mmc: The mmc_host representing this slot.
* @host: The MMC controller this slot is using.
+ * @quirks: Slot-level quirks (DW_MCI_SLOT_QUIRK_XXX)
+ * @wp_gpio: If gpio_is_valid() we'll use this to read write protect.
* @ctype: Card type for this slot.
* @mrq: mmc_request currently being processed or waiting to be
* processed, or NULL when the slot is idle.
@@ -88,6 +91,9 @@ struct dw_mci_slot {
struct mmc_host *mmc;
struct dw_mci *host;
+ int quirks;
+ int wp_gpio;
+
u32 ctype;
struct mmc_request *mrq;
@@ -825,10 +831,12 @@ static int dw_mci_get_ro(struct mmc_host *mmc)
struct dw_mci_board *brd = slot->host->pdata;
/* Use platform get_ro function, else try on board write protect */
- if (brd->quirks & DW_MCI_QUIRK_NO_WRITE_PROTECT)
+ if (slot->quirks & DW_MCI_SLOT_QUIRK_NO_WRITE_PROTECT)
read_only = 0;
else if (brd->get_ro)
read_only = brd->get_ro(slot->id);
+ else if (gpio_is_valid(slot->wp_gpio))
+ read_only = gpio_get_value(slot->wp_gpio);
else
read_only =
mci_readl(slot->host, WRTPRT) & (1 << slot->id) ? 1 : 0;
@@ -1445,7 +1453,7 @@ static void dw_mci_read_data_pio(struct dw_mci *host)
if (!sg_miter_next(sg_miter))
goto done;
- host->sg = sg_miter->__sg;
+ host->sg = sg_miter->piter.sg;
buf = sg_miter->addr;
remain = sg_miter->length;
offset = 0;
@@ -1500,7 +1508,7 @@ static void dw_mci_write_data_pio(struct dw_mci *host)
if (!sg_miter_next(sg_miter))
goto done;
- host->sg = sg_miter->__sg;
+ host->sg = sg_miter->piter.sg;
buf = sg_miter->addr;
remain = sg_miter->length;
offset = 0;
@@ -1785,6 +1793,30 @@ static struct device_node *dw_mci_of_find_slot_node(struct device *dev, u8 slot)
return NULL;
}
+static struct dw_mci_of_slot_quirks {
+ char *quirk;
+ int id;
+} of_slot_quirks[] = {
+ {
+ .quirk = "disable-wp",
+ .id = DW_MCI_SLOT_QUIRK_NO_WRITE_PROTECT,
+ },
+};
+
+static int dw_mci_of_get_slot_quirks(struct device *dev, u8 slot)
+{
+ struct device_node *np = dw_mci_of_find_slot_node(dev, slot);
+ int quirks = 0;
+ int idx;
+
+ /* get quirks */
+ for (idx = 0; idx < ARRAY_SIZE(of_slot_quirks); idx++)
+ if (of_get_property(np, of_slot_quirks[idx].quirk, NULL))
+ quirks |= of_slot_quirks[idx].id;
+
+ return quirks;
+}
+
/* find out bus-width for a given slot */
static u32 dw_mci_of_get_bus_wd(struct device *dev, u8 slot)
{
@@ -1799,7 +1831,34 @@ static u32 dw_mci_of_get_bus_wd(struct device *dev, u8 slot)
" as 1\n");
return bus_wd;
}
+
+/* find the write protect gpio for a given slot; or -1 if none specified */
+static int dw_mci_of_get_wp_gpio(struct device *dev, u8 slot)
+{
+ struct device_node *np = dw_mci_of_find_slot_node(dev, slot);
+ int gpio;
+
+ if (!np)
+ return -EINVAL;
+
+ gpio = of_get_named_gpio(np, "wp-gpios", 0);
+
+ /* Having a missing entry is valid; return silently */
+ if (!gpio_is_valid(gpio))
+ return -EINVAL;
+
+ if (devm_gpio_request(dev, gpio, "dw-mci-wp")) {
+ dev_warn(dev, "gpio [%d] request failed\n", gpio);
+ return -EINVAL;
+ }
+
+ return gpio;
+}
#else /* CONFIG_OF */
+static int dw_mci_of_get_slot_quirks(struct device *dev, u8 slot)
+{
+ return 0;
+}
static u32 dw_mci_of_get_bus_wd(struct device *dev, u8 slot)
{
return 1;
@@ -1808,6 +1867,10 @@ static struct device_node *dw_mci_of_find_slot_node(struct device *dev, u8 slot)
{
return NULL;
}
+static int dw_mci_of_get_wp_gpio(struct device *dev, u8 slot)
+{
+ return -EINVAL;
+}
#endif /* CONFIG_OF */
static int dw_mci_init_slot(struct dw_mci *host, unsigned int id)
@@ -1828,6 +1891,8 @@ static int dw_mci_init_slot(struct dw_mci *host, unsigned int id)
slot->host = host;
host->slot[id] = slot;
+ slot->quirks = dw_mci_of_get_slot_quirks(host->dev, slot->id);
+
mmc->ops = &dw_mci_ops;
mmc->f_min = DIV_ROUND_UP(host->bus_hz, 510);
mmc->f_max = host->bus_hz;
@@ -1923,6 +1988,8 @@ static int dw_mci_init_slot(struct dw_mci *host, unsigned int id)
else
clear_bit(DW_MMC_CARD_PRESENT, &slot->flags);
+ slot->wp_gpio = dw_mci_of_get_wp_gpio(host->dev, slot->id);
+
mmc_add_host(mmc);
#if defined(CONFIG_DEBUG_FS)
diff --git a/drivers/mmc/host/mvsdio.c b/drivers/mmc/host/mvsdio.c
index f8dd36102949..145cdaf000d1 100644
--- a/drivers/mmc/host/mvsdio.c
+++ b/drivers/mmc/host/mvsdio.c
@@ -21,7 +21,11 @@
#include <linux/irq.h>
#include <linux/clk.h>
#include <linux/gpio.h>
+#include <linux/of_gpio.h>
+#include <linux/of_irq.h>
#include <linux/mmc/host.h>
+#include <linux/mmc/slot-gpio.h>
+#include <linux/pinctrl/consumer.h>
#include <asm/sizes.h>
#include <asm/unaligned.h>
@@ -51,8 +55,6 @@ struct mvsd_host {
struct mmc_host *mmc;
struct device *dev;
struct clk *clk;
- int gpio_card_detect;
- int gpio_write_protect;
};
#define mvsd_write(offs, val) writel(val, iobase + (offs))
@@ -538,13 +540,6 @@ static void mvsd_timeout_timer(unsigned long data)
mmc_request_done(host->mmc, mrq);
}
-static irqreturn_t mvsd_card_detect_irq(int irq, void *dev)
-{
- struct mvsd_host *host = dev;
- mmc_detect_change(host->mmc, msecs_to_jiffies(100));
- return IRQ_HANDLED;
-}
-
static void mvsd_enable_sdio_irq(struct mmc_host *mmc, int enable)
{
struct mvsd_host *host = mmc_priv(mmc);
@@ -564,20 +559,6 @@ static void mvsd_enable_sdio_irq(struct mmc_host *mmc, int enable)
spin_unlock_irqrestore(&host->lock, flags);
}
-static int mvsd_get_ro(struct mmc_host *mmc)
-{
- struct mvsd_host *host = mmc_priv(mmc);
-
- if (host->gpio_write_protect)
- return gpio_get_value(host->gpio_write_protect);
-
- /*
- * Board doesn't support read only detection; let the mmc core
- * decide what to do.
- */
- return -ENOSYS;
-}
-
static void mvsd_power_up(struct mvsd_host *host)
{
void __iomem *iobase = host->base;
@@ -674,7 +655,7 @@ static void mvsd_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
static const struct mmc_host_ops mvsd_ops = {
.request = mvsd_request,
- .get_ro = mvsd_get_ro,
+ .get_ro = mmc_gpio_get_ro,
.set_ios = mvsd_set_ios,
.enable_sdio_irq = mvsd_enable_sdio_irq,
};
@@ -703,17 +684,18 @@ mv_conf_mbus_windows(struct mvsd_host *host,
static int __init mvsd_probe(struct platform_device *pdev)
{
+ struct device_node *np = pdev->dev.of_node;
struct mmc_host *mmc = NULL;
struct mvsd_host *host = NULL;
- const struct mvsdio_platform_data *mvsd_data;
const struct mbus_dram_target_info *dram;
struct resource *r;
int ret, irq;
+ int gpio_card_detect, gpio_write_protect;
+ struct pinctrl *pinctrl;
r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
irq = platform_get_irq(pdev, 0);
- mvsd_data = pdev->dev.platform_data;
- if (!r || irq < 0 || !mvsd_data)
+ if (!r || irq < 0)
return -ENXIO;
mmc = mmc_alloc_host(sizeof(struct mvsd_host), &pdev->dev);
@@ -725,8 +707,43 @@ static int __init mvsd_probe(struct platform_device *pdev)
host = mmc_priv(mmc);
host->mmc = mmc;
host->dev = &pdev->dev;
- host->base_clock = mvsd_data->clock / 2;
- host->clk = ERR_PTR(-EINVAL);
+
+ pinctrl = devm_pinctrl_get_select_default(&pdev->dev);
+ if (IS_ERR(pinctrl))
+ dev_warn(&pdev->dev, "no pins associated\n");
+
+ /*
+ * Some non-DT platforms do not pass a clock, and the clock
+ * frequency is passed through platform_data. On DT platforms,
+ * a clock must always be passed, even if there is no gatable
+ * clock associated to the SDIO interface (it can simply be a
+ * fixed rate clock).
+ */
+ host->clk = devm_clk_get(&pdev->dev, NULL);
+ if (!IS_ERR(host->clk))
+ clk_prepare_enable(host->clk);
+
+ if (np) {
+ if (IS_ERR(host->clk)) {
+ dev_err(&pdev->dev, "DT platforms must have a clock associated\n");
+ ret = -EINVAL;
+ goto out;
+ }
+
+ host->base_clock = clk_get_rate(host->clk) / 2;
+ gpio_card_detect = of_get_named_gpio(np, "cd-gpios", 0);
+ gpio_write_protect = of_get_named_gpio(np, "wp-gpios", 0);
+ } else {
+ const struct mvsdio_platform_data *mvsd_data;
+ mvsd_data = pdev->dev.platform_data;
+ if (!mvsd_data) {
+ ret = -ENXIO;
+ goto out;
+ }
+ host->base_clock = mvsd_data->clock / 2;
+ gpio_card_detect = mvsd_data->gpio_card_detect;
+ gpio_write_protect = mvsd_data->gpio_write_protect;
+ }
mmc->ops = &mvsd_ops;
@@ -765,43 +782,14 @@ static int __init mvsd_probe(struct platform_device *pdev)
goto out;
}
- /* Not all platforms can gate the clock, so it is not
- an error if the clock does not exists. */
- host->clk = devm_clk_get(&pdev->dev, NULL);
- if (!IS_ERR(host->clk))
- clk_prepare_enable(host->clk);
-
- if (mvsd_data->gpio_card_detect) {
- ret = devm_gpio_request_one(&pdev->dev,
- mvsd_data->gpio_card_detect,
- GPIOF_IN, DRIVER_NAME " cd");
- if (ret == 0) {
- irq = gpio_to_irq(mvsd_data->gpio_card_detect);
- ret = devm_request_irq(&pdev->dev, irq,
- mvsd_card_detect_irq,
- IRQ_TYPE_EDGE_RISING |
- IRQ_TYPE_EDGE_FALLING,
- DRIVER_NAME " cd", host);
- if (ret == 0)
- host->gpio_card_detect =
- mvsd_data->gpio_card_detect;
- else
- devm_gpio_free(&pdev->dev,
- mvsd_data->gpio_card_detect);
- }
- }
- if (!host->gpio_card_detect)
+ if (gpio_is_valid(gpio_card_detect)) {
+ ret = mmc_gpio_request_cd(mmc, gpio_card_detect);
+ if (ret)
+ goto out;
+ } else
mmc->caps |= MMC_CAP_NEEDS_POLL;
- if (mvsd_data->gpio_write_protect) {
- ret = devm_gpio_request_one(&pdev->dev,
- mvsd_data->gpio_write_protect,
- GPIOF_IN, DRIVER_NAME " wp");
- if (ret == 0) {
- host->gpio_write_protect =
- mvsd_data->gpio_write_protect;
- }
- }
+ mmc_gpio_request_ro(mmc, gpio_write_protect);
setup_timer(&host->timer, mvsd_timeout_timer, (unsigned long)host);
platform_set_drvdata(pdev, mmc);
@@ -811,15 +799,17 @@ static int __init mvsd_probe(struct platform_device *pdev)
pr_notice("%s: %s driver initialized, ",
mmc_hostname(mmc), DRIVER_NAME);
- if (host->gpio_card_detect)
+ if (!(mmc->caps & MMC_CAP_NEEDS_POLL))
printk("using GPIO %d for card detection\n",
- host->gpio_card_detect);
+ gpio_card_detect);
else
printk("lacking card detect (fall back to polling)\n");
return 0;
out:
if (mmc) {
+ mmc_gpio_free_cd(mmc);
+ mmc_gpio_free_ro(mmc);
if (!IS_ERR(host->clk))
clk_disable_unprepare(host->clk);
mmc_free_host(mmc);
@@ -834,6 +824,8 @@ static int __exit mvsd_remove(struct platform_device *pdev)
struct mvsd_host *host = mmc_priv(mmc);
+ mmc_gpio_free_cd(mmc);
+ mmc_gpio_free_ro(mmc);
mmc_remove_host(mmc);
del_timer_sync(&host->timer);
mvsd_power_down(host);
@@ -873,12 +865,19 @@ static int mvsd_resume(struct platform_device *dev)
#define mvsd_resume NULL
#endif
+static const struct of_device_id mvsdio_dt_ids[] = {
+ { .compatible = "marvell,orion-sdio" },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, mvsdio_dt_ids);
+
static struct platform_driver mvsd_driver = {
.remove = __exit_p(mvsd_remove),
.suspend = mvsd_suspend,
.resume = mvsd_resume,
.driver = {
.name = DRIVER_NAME,
+ .of_match_table = mvsdio_dt_ids,
},
};
diff --git a/drivers/mmc/host/mxs-mmc.c b/drivers/mmc/host/mxs-mmc.c
index 5b665551a6f3..4efe3021b217 100644
--- a/drivers/mmc/host/mxs-mmc.c
+++ b/drivers/mmc/host/mxs-mmc.c
@@ -354,7 +354,7 @@ static void mxs_mmc_adtc(struct mxs_mmc_host *host)
struct dma_async_tx_descriptor *desc;
struct scatterlist *sgl = data->sg, *sg;
unsigned int sg_len = data->sg_len;
- int i;
+ unsigned int i;
unsigned short dma_data_dir, timeout;
enum dma_transfer_direction slave_dirn;
@@ -804,3 +804,4 @@ module_platform_driver(mxs_mmc_driver);
MODULE_DESCRIPTION("FREESCALE MXS MMC peripheral");
MODULE_AUTHOR("Freescale Semiconductor");
MODULE_LICENSE("GPL");
+MODULE_ALIAS("platform:" DRIVER_NAME);
diff --git a/drivers/mmc/host/of_mmc_spi.c b/drivers/mmc/host/of_mmc_spi.c
index 1534b582c419..d720b5e05b9c 100644
--- a/drivers/mmc/host/of_mmc_spi.c
+++ b/drivers/mmc/host/of_mmc_spi.c
@@ -146,7 +146,7 @@ struct mmc_spi_platform_data *mmc_spi_get_pdata(struct spi_device *spi)
oms->pdata.get_ro = of_mmc_spi_get_ro;
oms->detect_irq = irq_of_parse_and_map(np, 0);
- if (oms->detect_irq != NO_IRQ) {
+ if (oms->detect_irq != 0) {
oms->pdata.init = of_mmc_spi_init;
oms->pdata.exit = of_mmc_spi_exit;
} else {
diff --git a/drivers/mmc/host/rtsx_pci_sdmmc.c b/drivers/mmc/host/rtsx_pci_sdmmc.c
index f74b5adca642..f981f7d1f6e3 100644
--- a/drivers/mmc/host/rtsx_pci_sdmmc.c
+++ b/drivers/mmc/host/rtsx_pci_sdmmc.c
@@ -678,12 +678,19 @@ static void sdmmc_request(struct mmc_host *mmc, struct mmc_request *mrq)
struct mmc_command *cmd = mrq->cmd;
struct mmc_data *data = mrq->data;
unsigned int data_size = 0;
+ int err;
if (host->eject) {
cmd->error = -ENOMEDIUM;
goto finish;
}
+ err = rtsx_pci_card_exclusive_check(host->pcr, RTSX_SD_CARD);
+ if (err) {
+ cmd->error = err;
+ goto finish;
+ }
+
mutex_lock(&pcr->pcr_mutex);
rtsx_pci_start_run(pcr);
@@ -901,6 +908,9 @@ static void sdmmc_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
if (host->eject)
return;
+ if (rtsx_pci_card_exclusive_check(host->pcr, RTSX_SD_CARD))
+ return;
+
mutex_lock(&pcr->pcr_mutex);
rtsx_pci_start_run(pcr);
@@ -1073,6 +1083,10 @@ static int sdmmc_switch_voltage(struct mmc_host *mmc, struct mmc_ios *ios)
if (host->eject)
return -ENOMEDIUM;
+ err = rtsx_pci_card_exclusive_check(host->pcr, RTSX_SD_CARD);
+ if (err)
+ return err;
+
mutex_lock(&pcr->pcr_mutex);
rtsx_pci_start_run(pcr);
@@ -1083,11 +1097,6 @@ static int sdmmc_switch_voltage(struct mmc_host *mmc, struct mmc_ios *ios)
voltage = OUTPUT_1V8;
if (voltage == OUTPUT_1V8) {
- err = rtsx_pci_write_register(pcr,
- SD30_DRIVE_SEL, 0x07, DRIVER_TYPE_B);
- if (err < 0)
- goto out;
-
err = sd_wait_voltage_stable_1(host);
if (err < 0)
goto out;
@@ -1122,6 +1131,10 @@ static int sdmmc_execute_tuning(struct mmc_host *mmc, u32 opcode)
if (host->eject)
return -ENOMEDIUM;
+ err = rtsx_pci_card_exclusive_check(host->pcr, RTSX_SD_CARD);
+ if (err)
+ return err;
+
mutex_lock(&pcr->pcr_mutex);
rtsx_pci_start_run(pcr);
diff --git a/drivers/mmc/host/sdhci-bcm2835.c b/drivers/mmc/host/sdhci-bcm2835.c
new file mode 100644
index 000000000000..8ffea05152c6
--- /dev/null
+++ b/drivers/mmc/host/sdhci-bcm2835.c
@@ -0,0 +1,210 @@
+/*
+ * BCM2835 SDHCI
+ * Copyright (C) 2012 Stephen Warren
+ * Based on U-Boot's MMC driver for the BCM2835 by Oleksandr Tymoshenko & me
+ * Portions of the code there were obviously based on the Linux kernel at:
+ * git://github.com/raspberrypi/linux.git rpi-3.6.y
+ * commit f5b930b "Main bcm2708 linux port" signed-off-by Dom Cobley.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include <linux/delay.h>
+#include <linux/module.h>
+#include <linux/mmc/host.h>
+#include "sdhci-pltfm.h"
+
+/*
+ * 400KHz is max freq for card ID etc. Use that as min card clock. We need to
+ * know the min to enable static calculation of max BCM2835_SDHCI_WRITE_DELAY.
+ */
+#define MIN_FREQ 400000
+
+/*
+ * The Arasan has a bugette whereby it may lose the content of successive
+ * writes to registers that are within two SD-card clock cycles of each other
+ * (a clock domain crossing problem). It seems, however, that the data
+ * register does not have this problem, which is just as well - otherwise we'd
+ * have to nobble the DMA engine too.
+ *
+ * This should probably be dynamically calculated based on the actual card
+ * frequency. However, this is the longest we'll have to wait, and doesn't
+ * seem to slow access down too much, so the added complexity doesn't seem
+ * worth it for now.
+ *
+ * 1/MIN_FREQ is (max) time per tick of eMMC clock.
+ * 2/MIN_FREQ is time for two ticks.
+ * Multiply by 1000000 to get uS per two ticks.
+ * *1000000 for uSecs.
+ * +1 for hack rounding.
+ */
+#define BCM2835_SDHCI_WRITE_DELAY (((2 * 1000000) / MIN_FREQ) + 1)
+
+struct bcm2835_sdhci {
+ u32 shadow;
+};
+
+static void bcm2835_sdhci_writel(struct sdhci_host *host, u32 val, int reg)
+{
+ writel(val, host->ioaddr + reg);
+
+ udelay(BCM2835_SDHCI_WRITE_DELAY);
+}
+
+static inline u32 bcm2835_sdhci_readl(struct sdhci_host *host, int reg)
+{
+ u32 val = readl(host->ioaddr + reg);
+
+ if (reg == SDHCI_CAPABILITIES)
+ val |= SDHCI_CAN_VDD_330;
+
+ return val;
+}
+
+static void bcm2835_sdhci_writew(struct sdhci_host *host, u16 val, int reg)
+{
+ struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
+ struct bcm2835_sdhci *bcm2835_host = pltfm_host->priv;
+ u32 oldval = (reg == SDHCI_COMMAND) ? bcm2835_host->shadow :
+ bcm2835_sdhci_readl(host, reg & ~3);
+ u32 word_num = (reg >> 1) & 1;
+ u32 word_shift = word_num * 16;
+ u32 mask = 0xffff << word_shift;
+ u32 newval = (oldval & ~mask) | (val << word_shift);
+
+ if (reg == SDHCI_TRANSFER_MODE)
+ bcm2835_host->shadow = newval;
+ else
+ bcm2835_sdhci_writel(host, newval, reg & ~3);
+}
+
+static u16 bcm2835_sdhci_readw(struct sdhci_host *host, int reg)
+{
+ u32 val = bcm2835_sdhci_readl(host, (reg & ~3));
+ u32 word_num = (reg >> 1) & 1;
+ u32 word_shift = word_num * 16;
+ u32 word = (val >> word_shift) & 0xffff;
+
+ return word;
+}
+
+static void bcm2835_sdhci_writeb(struct sdhci_host *host, u8 val, int reg)
+{
+ u32 oldval = bcm2835_sdhci_readl(host, reg & ~3);
+ u32 byte_num = reg & 3;
+ u32 byte_shift = byte_num * 8;
+ u32 mask = 0xff << byte_shift;
+ u32 newval = (oldval & ~mask) | (val << byte_shift);
+
+ bcm2835_sdhci_writel(host, newval, reg & ~3);
+}
+
+static u8 bcm2835_sdhci_readb(struct sdhci_host *host, int reg)
+{
+ u32 val = bcm2835_sdhci_readl(host, (reg & ~3));
+ u32 byte_num = reg & 3;
+ u32 byte_shift = byte_num * 8;
+ u32 byte = (val >> byte_shift) & 0xff;
+
+ return byte;
+}
+
+unsigned int bcm2835_sdhci_get_min_clock(struct sdhci_host *host)
+{
+ return MIN_FREQ;
+}
+
+static struct sdhci_ops bcm2835_sdhci_ops = {
+ .write_l = bcm2835_sdhci_writel,
+ .write_w = bcm2835_sdhci_writew,
+ .write_b = bcm2835_sdhci_writeb,
+ .read_l = bcm2835_sdhci_readl,
+ .read_w = bcm2835_sdhci_readw,
+ .read_b = bcm2835_sdhci_readb,
+ .get_max_clock = sdhci_pltfm_clk_get_max_clock,
+ .get_min_clock = bcm2835_sdhci_get_min_clock,
+};
+
+static struct sdhci_pltfm_data bcm2835_sdhci_pdata = {
+ .quirks = SDHCI_QUIRK_BROKEN_CARD_DETECTION |
+ SDHCI_QUIRK_DATA_TIMEOUT_USES_SDCLK,
+ .ops = &bcm2835_sdhci_ops,
+};
+
+static int bcm2835_sdhci_probe(struct platform_device *pdev)
+{
+ struct sdhci_host *host;
+ struct bcm2835_sdhci *bcm2835_host;
+ struct sdhci_pltfm_host *pltfm_host;
+ int ret;
+
+ host = sdhci_pltfm_init(pdev, &bcm2835_sdhci_pdata);
+ if (IS_ERR(host))
+ return PTR_ERR(host);
+
+ bcm2835_host = devm_kzalloc(&pdev->dev, sizeof(*bcm2835_host),
+ GFP_KERNEL);
+ if (!bcm2835_host) {
+ dev_err(mmc_dev(host->mmc),
+ "failed to allocate bcm2835_sdhci\n");
+ return -ENOMEM;
+ }
+
+ pltfm_host = sdhci_priv(host);
+ pltfm_host->priv = bcm2835_host;
+
+ pltfm_host->clk = devm_clk_get(&pdev->dev, NULL);
+ if (IS_ERR(pltfm_host->clk)) {
+ ret = PTR_ERR(pltfm_host->clk);
+ goto err;
+ }
+
+ return sdhci_add_host(host);
+
+err:
+ sdhci_pltfm_free(pdev);
+ return ret;
+}
+
+static int bcm2835_sdhci_remove(struct platform_device *pdev)
+{
+ struct sdhci_host *host = platform_get_drvdata(pdev);
+ int dead = (readl(host->ioaddr + SDHCI_INT_STATUS) == 0xffffffff);
+
+ sdhci_remove_host(host, dead);
+ sdhci_pltfm_free(pdev);
+
+ return 0;
+}
+
+static const struct of_device_id bcm2835_sdhci_of_match[] = {
+ { .compatible = "brcm,bcm2835-sdhci" },
+ { }
+};
+MODULE_DEVICE_TABLE(of, bcm2835_sdhci_of_match);
+
+static struct platform_driver bcm2835_sdhci_driver = {
+ .driver = {
+ .name = "sdhci-bcm2835",
+ .owner = THIS_MODULE,
+ .of_match_table = bcm2835_sdhci_of_match,
+ .pm = SDHCI_PLTFM_PMOPS,
+ },
+ .probe = bcm2835_sdhci_probe,
+ .remove = bcm2835_sdhci_remove,
+};
+module_platform_driver(bcm2835_sdhci_driver);
+
+MODULE_DESCRIPTION("BCM2835 SDHCI driver");
+MODULE_AUTHOR("Stephen Warren");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/mmc/host/sdhci-esdhc-imx.c b/drivers/mmc/host/sdhci-esdhc-imx.c
index e07df812ff1e..78ac00227c1a 100644
--- a/drivers/mmc/host/sdhci-esdhc-imx.c
+++ b/drivers/mmc/host/sdhci-esdhc-imx.c
@@ -21,6 +21,7 @@
#include <linux/mmc/host.h>
#include <linux/mmc/mmc.h>
#include <linux/mmc/sdio.h>
+#include <linux/mmc/slot-gpio.h>
#include <linux/of.h>
#include <linux/of_device.h>
#include <linux/of_gpio.h>
@@ -29,12 +30,22 @@
#include "sdhci-pltfm.h"
#include "sdhci-esdhc.h"
-#define SDHCI_CTRL_D3CD 0x08
+#define ESDHC_CTRL_D3CD 0x08
/* VENDOR SPEC register */
-#define SDHCI_VENDOR_SPEC 0xC0
-#define SDHCI_VENDOR_SPEC_SDIO_QUIRK 0x00000002
-#define SDHCI_WTMK_LVL 0x44
-#define SDHCI_MIX_CTRL 0x48
+#define ESDHC_VENDOR_SPEC 0xc0
+#define ESDHC_VENDOR_SPEC_SDIO_QUIRK (1 << 1)
+#define ESDHC_WTMK_LVL 0x44
+#define ESDHC_MIX_CTRL 0x48
+#define ESDHC_MIX_CTRL_AC23EN (1 << 7)
+/* Bits 3 and 6 are not SDHCI standard definitions */
+#define ESDHC_MIX_CTRL_SDHCI_MASK 0xb7
+
+/*
+ * Our interpretation of the SDHCI_HOST_CONTROL register
+ */
+#define ESDHC_CTRL_4BITBUS (0x1 << 1)
+#define ESDHC_CTRL_8BITBUS (0x2 << 1)
+#define ESDHC_CTRL_BUSWIDTH_MASK (0x3 << 1)
/*
* There is an INT DMA ERR mis-match between eSDHC and STD SDHC SPEC:
@@ -42,7 +53,7 @@
* but bit28 is used as the INT DMA ERR in fsl eSDHC design.
* Define this macro DMA error INT for fsl eSDHC
*/
-#define SDHCI_INT_VENDOR_SPEC_DMA_ERR 0x10000000
+#define ESDHC_INT_VENDOR_SPEC_DMA_ERR (1 << 28)
/*
* The CMDTYPE of the CMD register (offset 0xE) should be set to
@@ -143,23 +154,8 @@ static inline void esdhc_clrset_le(struct sdhci_host *host, u32 mask, u32 val, i
static u32 esdhc_readl_le(struct sdhci_host *host, int reg)
{
- struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
- struct pltfm_imx_data *imx_data = pltfm_host->priv;
- struct esdhc_platform_data *boarddata = &imx_data->boarddata;
-
- /* fake CARD_PRESENT flag */
u32 val = readl(host->ioaddr + reg);
- if (unlikely((reg == SDHCI_PRESENT_STATE)
- && gpio_is_valid(boarddata->cd_gpio))) {
- if (gpio_get_value(boarddata->cd_gpio))
- /* no card, if a valid gpio says so... */
- val &= ~SDHCI_CARD_PRESENT;
- else
- /* ... in all other cases assume card is present */
- val |= SDHCI_CARD_PRESENT;
- }
-
if (unlikely(reg == SDHCI_CAPABILITIES)) {
/* In FSL esdhc IC module, only bit20 is used to indicate the
* ADMA2 capability of esdhc, but this bit is messed up on
@@ -175,8 +171,8 @@ static u32 esdhc_readl_le(struct sdhci_host *host, int reg)
}
if (unlikely(reg == SDHCI_INT_STATUS)) {
- if (val & SDHCI_INT_VENDOR_SPEC_DMA_ERR) {
- val &= ~SDHCI_INT_VENDOR_SPEC_DMA_ERR;
+ if (val & ESDHC_INT_VENDOR_SPEC_DMA_ERR) {
+ val &= ~ESDHC_INT_VENDOR_SPEC_DMA_ERR;
val |= SDHCI_INT_ADMA_ERROR;
}
}
@@ -188,17 +184,9 @@ static void esdhc_writel_le(struct sdhci_host *host, u32 val, int reg)
{
struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
struct pltfm_imx_data *imx_data = pltfm_host->priv;
- struct esdhc_platform_data *boarddata = &imx_data->boarddata;
u32 data;
if (unlikely(reg == SDHCI_INT_ENABLE || reg == SDHCI_SIGNAL_ENABLE)) {
- if (boarddata->cd_type == ESDHC_CD_GPIO)
- /*
- * These interrupts won't work with a custom
- * card_detect gpio (only applied to mx25/35)
- */
- val &= ~(SDHCI_INT_CARD_REMOVE | SDHCI_INT_CARD_INSERT);
-
if (val & SDHCI_INT_CARD_INT) {
/*
* Clear and then set D3CD bit to avoid missing the
@@ -209,9 +197,9 @@ static void esdhc_writel_le(struct sdhci_host *host, u32 val, int reg)
* re-sample it by the following steps.
*/
data = readl(host->ioaddr + SDHCI_HOST_CONTROL);
- data &= ~SDHCI_CTRL_D3CD;
+ data &= ~ESDHC_CTRL_D3CD;
writel(data, host->ioaddr + SDHCI_HOST_CONTROL);
- data |= SDHCI_CTRL_D3CD;
+ data |= ESDHC_CTRL_D3CD;
writel(data, host->ioaddr + SDHCI_HOST_CONTROL);
}
}
@@ -220,15 +208,15 @@ static void esdhc_writel_le(struct sdhci_host *host, u32 val, int reg)
&& (reg == SDHCI_INT_STATUS)
&& (val & SDHCI_INT_DATA_END))) {
u32 v;
- v = readl(host->ioaddr + SDHCI_VENDOR_SPEC);
- v &= ~SDHCI_VENDOR_SPEC_SDIO_QUIRK;
- writel(v, host->ioaddr + SDHCI_VENDOR_SPEC);
+ v = readl(host->ioaddr + ESDHC_VENDOR_SPEC);
+ v &= ~ESDHC_VENDOR_SPEC_SDIO_QUIRK;
+ writel(v, host->ioaddr + ESDHC_VENDOR_SPEC);
}
if (unlikely(reg == SDHCI_INT_ENABLE || reg == SDHCI_SIGNAL_ENABLE)) {
if (val & SDHCI_INT_ADMA_ERROR) {
val &= ~SDHCI_INT_ADMA_ERROR;
- val |= SDHCI_INT_VENDOR_SPEC_DMA_ERR;
+ val |= ESDHC_INT_VENDOR_SPEC_DMA_ERR;
}
}
@@ -237,15 +225,18 @@ static void esdhc_writel_le(struct sdhci_host *host, u32 val, int reg)
static u16 esdhc_readw_le(struct sdhci_host *host, int reg)
{
+ struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
+ struct pltfm_imx_data *imx_data = pltfm_host->priv;
+
if (unlikely(reg == SDHCI_HOST_VERSION)) {
- u16 val = readw(host->ioaddr + (reg ^ 2));
- /*
- * uSDHC supports SDHCI v3.0, but it's encoded as value
- * 0x3 in host controller version register, which violates
- * SDHCI_SPEC_300 definition. Work it around here.
- */
- if ((val & SDHCI_SPEC_VER_MASK) == 3)
- return --val;
+ reg ^= 2;
+ if (is_imx6q_usdhc(imx_data)) {
+ /*
+ * The usdhc register returns a wrong host version.
+ * Correct it here.
+ */
+ return SDHCI_SPEC_300;
+ }
}
return readw(host->ioaddr + reg);
@@ -258,20 +249,32 @@ static void esdhc_writew_le(struct sdhci_host *host, u16 val, int reg)
switch (reg) {
case SDHCI_TRANSFER_MODE:
- /*
- * Postpone this write, we must do it together with a
- * command write that is down below.
- */
if ((imx_data->flags & ESDHC_FLAG_MULTIBLK_NO_INT)
&& (host->cmd->opcode == SD_IO_RW_EXTENDED)
&& (host->cmd->data->blocks > 1)
&& (host->cmd->data->flags & MMC_DATA_READ)) {
u32 v;
- v = readl(host->ioaddr + SDHCI_VENDOR_SPEC);
- v |= SDHCI_VENDOR_SPEC_SDIO_QUIRK;
- writel(v, host->ioaddr + SDHCI_VENDOR_SPEC);
+ v = readl(host->ioaddr + ESDHC_VENDOR_SPEC);
+ v |= ESDHC_VENDOR_SPEC_SDIO_QUIRK;
+ writel(v, host->ioaddr + ESDHC_VENDOR_SPEC);
+ }
+
+ if (is_imx6q_usdhc(imx_data)) {
+ u32 m = readl(host->ioaddr + ESDHC_MIX_CTRL);
+ /* Swap AC23 bit */
+ if (val & SDHCI_TRNS_AUTO_CMD23) {
+ val &= ~SDHCI_TRNS_AUTO_CMD23;
+ val |= ESDHC_MIX_CTRL_AC23EN;
+ }
+ m = val | (m & ~ESDHC_MIX_CTRL_SDHCI_MASK);
+ writel(m, host->ioaddr + ESDHC_MIX_CTRL);
+ } else {
+ /*
+ * Postpone this write, we must do it together with a
+ * command write that is down below.
+ */
+ imx_data->scratchpad = val;
}
- imx_data->scratchpad = val;
return;
case SDHCI_COMMAND:
if ((host->cmd->opcode == MMC_STOP_TRANSMISSION ||
@@ -279,16 +282,12 @@ static void esdhc_writew_le(struct sdhci_host *host, u16 val, int reg)
(imx_data->flags & ESDHC_FLAG_MULTIBLK_NO_INT))
val |= SDHCI_CMD_ABORTCMD;
- if (is_imx6q_usdhc(imx_data)) {
- u32 m = readl(host->ioaddr + SDHCI_MIX_CTRL);
- m = imx_data->scratchpad | (m & 0xffff0000);
- writel(m, host->ioaddr + SDHCI_MIX_CTRL);
+ if (is_imx6q_usdhc(imx_data))
writel(val << 16,
host->ioaddr + SDHCI_TRANSFER_MODE);
- } else {
+ else
writel(val << 16 | imx_data->scratchpad,
host->ioaddr + SDHCI_TRANSFER_MODE);
- }
return;
case SDHCI_BLOCK_SIZE:
val &= ~SDHCI_MAKE_BLKSZ(0x7, 0);
@@ -302,6 +301,7 @@ static void esdhc_writeb_le(struct sdhci_host *host, u8 val, int reg)
struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
struct pltfm_imx_data *imx_data = pltfm_host->priv;
u32 new_val;
+ u32 mask;
switch (reg) {
case SDHCI_POWER_CONTROL:
@@ -311,10 +311,8 @@ static void esdhc_writeb_le(struct sdhci_host *host, u8 val, int reg)
*/
return;
case SDHCI_HOST_CONTROL:
- /* FSL messed up here, so we can just keep those three */
- new_val = val & (SDHCI_CTRL_LED | \
- SDHCI_CTRL_4BITBUS | \
- SDHCI_CTRL_D3CD);
+ /* FSL messed up here, so we need to manually compose it. */
+ new_val = val & SDHCI_CTRL_LED;
/* ensure the endianness */
new_val |= ESDHC_HOST_CONTROL_LE;
/* bits 8&9 are reserved on mx25 */
@@ -323,7 +321,13 @@ static void esdhc_writeb_le(struct sdhci_host *host, u8 val, int reg)
new_val |= (val & SDHCI_CTRL_DMA_MASK) << 5;
}
- esdhc_clrset_le(host, 0xffff, new_val, reg);
+ /*
+ * Do not touch buswidth bits here. This is done in
+ * esdhc_pltfm_bus_width.
+ */
+ mask = 0xffff & ~ESDHC_CTRL_BUSWIDTH_MASK;
+
+ esdhc_clrset_le(host, mask, new_val, reg);
return;
}
esdhc_clrset_le(host, 0xff, val, reg);
@@ -336,15 +340,15 @@ static void esdhc_writeb_le(struct sdhci_host *host, u8 val, int reg)
* circuit relies on. To work around it, we turn the clocks on back
* to keep card detection circuit functional.
*/
- if ((reg == SDHCI_SOFTWARE_RESET) && (val & 1))
+ if ((reg == SDHCI_SOFTWARE_RESET) && (val & 1)) {
esdhc_clrset_le(host, 0x7, 0x7, ESDHC_SYSTEM_CONTROL);
-}
-
-static unsigned int esdhc_pltfm_get_max_clock(struct sdhci_host *host)
-{
- struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
-
- return clk_get_rate(pltfm_host->clk);
+ /*
+ * The reset on usdhc fails to clear MIX_CTRL register.
+ * Do it manually here.
+ */
+ if (is_imx6q_usdhc(imx_data))
+ writel(0, host->ioaddr + ESDHC_MIX_CTRL);
+ }
}
static unsigned int esdhc_pltfm_get_min_clock(struct sdhci_host *host)
@@ -362,8 +366,7 @@ static unsigned int esdhc_pltfm_get_ro(struct sdhci_host *host)
switch (boarddata->wp_type) {
case ESDHC_WP_GPIO:
- if (gpio_is_valid(boarddata->wp_gpio))
- return gpio_get_value(boarddata->wp_gpio);
+ return mmc_gpio_get_ro(host->mmc);
case ESDHC_WP_CONTROLLER:
return !(readl(host->ioaddr + SDHCI_PRESENT_STATE) &
SDHCI_WRITE_PROTECT);
@@ -374,6 +377,28 @@ static unsigned int esdhc_pltfm_get_ro(struct sdhci_host *host)
return -ENOSYS;
}
+static int esdhc_pltfm_bus_width(struct sdhci_host *host, int width)
+{
+ u32 ctrl;
+
+ switch (width) {
+ case MMC_BUS_WIDTH_8:
+ ctrl = ESDHC_CTRL_8BITBUS;
+ break;
+ case MMC_BUS_WIDTH_4:
+ ctrl = ESDHC_CTRL_4BITBUS;
+ break;
+ default:
+ ctrl = 0;
+ break;
+ }
+
+ esdhc_clrset_le(host, ESDHC_CTRL_BUSWIDTH_MASK, ctrl,
+ SDHCI_HOST_CONTROL);
+
+ return 0;
+}
+
static struct sdhci_ops sdhci_esdhc_ops = {
.read_l = esdhc_readl_le,
.read_w = esdhc_readw_le,
@@ -381,9 +406,10 @@ static struct sdhci_ops sdhci_esdhc_ops = {
.write_w = esdhc_writew_le,
.write_b = esdhc_writeb_le,
.set_clock = esdhc_set_clock,
- .get_max_clock = esdhc_pltfm_get_max_clock,
+ .get_max_clock = sdhci_pltfm_clk_get_max_clock,
.get_min_clock = esdhc_pltfm_get_min_clock,
.get_ro = esdhc_pltfm_get_ro,
+ .platform_bus_width = esdhc_pltfm_bus_width,
};
static struct sdhci_pltfm_data sdhci_esdhc_imx_pdata = {
@@ -394,14 +420,6 @@ static struct sdhci_pltfm_data sdhci_esdhc_imx_pdata = {
.ops = &sdhci_esdhc_ops,
};
-static irqreturn_t cd_irq(int irq, void *data)
-{
- struct sdhci_host *sdhost = (struct sdhci_host *)data;
-
- tasklet_schedule(&sdhost->card_tasklet);
- return IRQ_HANDLED;
-};
-
#ifdef CONFIG_OF
static int
sdhci_esdhc_imx_probe_dt(struct platform_device *pdev,
@@ -429,6 +447,8 @@ sdhci_esdhc_imx_probe_dt(struct platform_device *pdev,
if (gpio_is_valid(boarddata->wp_gpio))
boarddata->wp_type = ESDHC_WP_GPIO;
+ of_property_read_u32(np, "bus-width", &boarddata->max_bus_width);
+
return 0;
}
#else
@@ -512,7 +532,7 @@ static int sdhci_esdhc_imx_probe(struct platform_device *pdev)
* to something insane. Change it back here.
*/
if (is_imx6q_usdhc(imx_data))
- writel(0x08100810, host->ioaddr + SDHCI_WTMK_LVL);
+ writel(0x08100810, host->ioaddr + ESDHC_WTMK_LVL);
boarddata = &imx_data->boarddata;
if (sdhci_esdhc_imx_probe_dt(pdev, boarddata) < 0) {
@@ -527,37 +547,22 @@ static int sdhci_esdhc_imx_probe(struct platform_device *pdev)
/* write_protect */
if (boarddata->wp_type == ESDHC_WP_GPIO) {
- err = devm_gpio_request_one(&pdev->dev, boarddata->wp_gpio,
- GPIOF_IN, "ESDHC_WP");
+ err = mmc_gpio_request_ro(host->mmc, boarddata->wp_gpio);
if (err) {
- dev_warn(mmc_dev(host->mmc),
- "no write-protect pin available!\n");
- boarddata->wp_gpio = -EINVAL;
+ dev_err(mmc_dev(host->mmc),
+ "failed to request write-protect gpio!\n");
+ goto disable_clk;
}
- } else {
- boarddata->wp_gpio = -EINVAL;
+ host->mmc->caps2 |= MMC_CAP2_RO_ACTIVE_HIGH;
}
/* card_detect */
- if (boarddata->cd_type != ESDHC_CD_GPIO)
- boarddata->cd_gpio = -EINVAL;
-
switch (boarddata->cd_type) {
case ESDHC_CD_GPIO:
- err = devm_gpio_request_one(&pdev->dev, boarddata->cd_gpio,
- GPIOF_IN, "ESDHC_CD");
+ err = mmc_gpio_request_cd(host->mmc, boarddata->cd_gpio);
if (err) {
dev_err(mmc_dev(host->mmc),
- "no card-detect pin available!\n");
- goto disable_clk;
- }
-
- err = devm_request_irq(&pdev->dev,
- gpio_to_irq(boarddata->cd_gpio), cd_irq,
- IRQF_TRIGGER_FALLING | IRQF_TRIGGER_RISING,
- mmc_hostname(host->mmc), host);
- if (err) {
- dev_err(mmc_dev(host->mmc), "request irq error\n");
+ "failed to request card-detect gpio!\n");
goto disable_clk;
}
/* fall through */
@@ -575,6 +580,19 @@ static int sdhci_esdhc_imx_probe(struct platform_device *pdev)
break;
}
+ switch (boarddata->max_bus_width) {
+ case 8:
+ host->mmc->caps |= MMC_CAP_8_BIT_DATA | MMC_CAP_4_BIT_DATA;
+ break;
+ case 4:
+ host->mmc->caps |= MMC_CAP_4_BIT_DATA;
+ break;
+ case 1:
+ default:
+ host->quirks |= SDHCI_QUIRK_FORCE_1_BIT_DATA;
+ break;
+ }
+
err = sdhci_add_host(host);
if (err)
goto disable_clk;
diff --git a/drivers/mmc/host/sdhci-pci.c b/drivers/mmc/host/sdhci-pci.c
index c7dd0cbc99de..c7ccf3034dad 100644
--- a/drivers/mmc/host/sdhci-pci.c
+++ b/drivers/mmc/host/sdhci-pci.c
@@ -935,7 +935,7 @@ static int sdhci_pci_enable_dma(struct sdhci_host *host)
return 0;
}
-static int sdhci_pci_8bit_width(struct sdhci_host *host, int width)
+static int sdhci_pci_bus_width(struct sdhci_host *host, int width)
{
u8 ctrl;
@@ -977,7 +977,7 @@ static void sdhci_pci_hw_reset(struct sdhci_host *host)
static struct sdhci_ops sdhci_pci_ops = {
.enable_dma = sdhci_pci_enable_dma,
- .platform_8bit_width = sdhci_pci_8bit_width,
+ .platform_bus_width = sdhci_pci_bus_width,
.hw_reset = sdhci_pci_hw_reset,
};
diff --git a/drivers/mmc/host/sdhci-pltfm.c b/drivers/mmc/host/sdhci-pltfm.c
index d4283ef5917a..3145a780b035 100644
--- a/drivers/mmc/host/sdhci-pltfm.c
+++ b/drivers/mmc/host/sdhci-pltfm.c
@@ -36,6 +36,14 @@
#endif
#include "sdhci-pltfm.h"
+unsigned int sdhci_pltfm_clk_get_max_clock(struct sdhci_host *host)
+{
+ struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
+
+ return clk_get_rate(pltfm_host->clk);
+}
+EXPORT_SYMBOL_GPL(sdhci_pltfm_clk_get_max_clock);
+
static struct sdhci_ops sdhci_pltfm_ops = {
};
diff --git a/drivers/mmc/host/sdhci-pltfm.h b/drivers/mmc/host/sdhci-pltfm.h
index 37e0e184a0bb..153b6c509ebe 100644
--- a/drivers/mmc/host/sdhci-pltfm.h
+++ b/drivers/mmc/host/sdhci-pltfm.h
@@ -98,6 +98,8 @@ extern int sdhci_pltfm_register(struct platform_device *pdev,
struct sdhci_pltfm_data *pdata);
extern int sdhci_pltfm_unregister(struct platform_device *pdev);
+extern unsigned int sdhci_pltfm_clk_get_max_clock(struct sdhci_host *host);
+
#ifdef CONFIG_PM
extern const struct dev_pm_ops sdhci_pltfm_pmops;
#define SDHCI_PLTFM_PMOPS (&sdhci_pltfm_pmops)
diff --git a/drivers/mmc/host/sdhci-pxav2.c b/drivers/mmc/host/sdhci-pxav2.c
index ac854aa192a8..eeb7d439db1d 100644
--- a/drivers/mmc/host/sdhci-pxav2.c
+++ b/drivers/mmc/host/sdhci-pxav2.c
@@ -111,17 +111,10 @@ static int pxav2_mmc_set_width(struct sdhci_host *host, int width)
return 0;
}
-static u32 pxav2_get_max_clock(struct sdhci_host *host)
-{
- struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
-
- return clk_get_rate(pltfm_host->clk);
-}
-
static struct sdhci_ops pxav2_sdhci_ops = {
- .get_max_clock = pxav2_get_max_clock,
+ .get_max_clock = sdhci_pltfm_clk_get_max_clock,
.platform_reset_exit = pxav2_set_private_registers,
- .platform_8bit_width = pxav2_mmc_set_width,
+ .platform_bus_width = pxav2_mmc_set_width,
};
#ifdef CONFIG_OF
diff --git a/drivers/mmc/host/sdhci-pxav3.c b/drivers/mmc/host/sdhci-pxav3.c
index fad0966427fd..a0cdbc570a83 100644
--- a/drivers/mmc/host/sdhci-pxav3.c
+++ b/drivers/mmc/host/sdhci-pxav3.c
@@ -32,10 +32,14 @@
#include <linux/of.h>
#include <linux/of_device.h>
#include <linux/of_gpio.h>
+#include <linux/pm.h>
+#include <linux/pm_runtime.h>
#include "sdhci.h"
#include "sdhci-pltfm.h"
+#define PXAV3_RPM_DELAY_MS 50
+
#define SD_CLOCK_BURST_SIZE_SETUP 0x10A
#define SDCLK_SEL 0x100
#define SDCLK_DELAY_SHIFT 9
@@ -163,18 +167,11 @@ static int pxav3_set_uhs_signaling(struct sdhci_host *host, unsigned int uhs)
return 0;
}
-static u32 pxav3_get_max_clock(struct sdhci_host *host)
-{
- struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
-
- return clk_get_rate(pltfm_host->clk);
-}
-
static struct sdhci_ops pxav3_sdhci_ops = {
.platform_reset_exit = pxav3_set_private_registers,
.set_uhs_signaling = pxav3_set_uhs_signaling,
.platform_send_init_74_clocks = pxav3_gen_init_74_clocks,
- .get_max_clock = pxav3_get_max_clock,
+ .get_max_clock = sdhci_pltfm_clk_get_max_clock,
};
#ifdef CONFIG_OF
@@ -303,20 +300,37 @@ static int sdhci_pxav3_probe(struct platform_device *pdev)
sdhci_get_of_property(pdev);
+ pm_runtime_set_active(&pdev->dev);
+ pm_runtime_enable(&pdev->dev);
+ pm_runtime_set_autosuspend_delay(&pdev->dev, PXAV3_RPM_DELAY_MS);
+ pm_runtime_use_autosuspend(&pdev->dev);
+ pm_suspend_ignore_children(&pdev->dev, 1);
+ pm_runtime_get_noresume(&pdev->dev);
+
ret = sdhci_add_host(host);
if (ret) {
dev_err(&pdev->dev, "failed to add host\n");
+ pm_runtime_forbid(&pdev->dev);
+ pm_runtime_disable(&pdev->dev);
goto err_add_host;
}
platform_set_drvdata(pdev, host);
+ if (pdata->pm_caps & MMC_PM_KEEP_POWER) {
+ device_init_wakeup(&pdev->dev, 1);
+ host->mmc->pm_flags |= MMC_PM_WAKE_SDIO_IRQ;
+ } else {
+ device_init_wakeup(&pdev->dev, 0);
+ }
+
+ pm_runtime_put_autosuspend(&pdev->dev);
+
return 0;
err_add_host:
clk_disable_unprepare(clk);
clk_put(clk);
- mmc_gpio_free_cd(host->mmc);
err_cd_req:
err_clk_get:
sdhci_pltfm_free(pdev);
@@ -329,16 +343,14 @@ static int sdhci_pxav3_remove(struct platform_device *pdev)
struct sdhci_host *host = platform_get_drvdata(pdev);
struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
struct sdhci_pxa *pxa = pltfm_host->priv;
- struct sdhci_pxa_platdata *pdata = pdev->dev.platform_data;
+ pm_runtime_get_sync(&pdev->dev);
sdhci_remove_host(host, 1);
+ pm_runtime_disable(&pdev->dev);
clk_disable_unprepare(pltfm_host->clk);
clk_put(pltfm_host->clk);
- if (gpio_is_valid(pdata->ext_cd_gpio))
- mmc_gpio_free_cd(host->mmc);
-
sdhci_pltfm_free(pdev);
kfree(pxa);
@@ -347,6 +359,83 @@ static int sdhci_pxav3_remove(struct platform_device *pdev)
return 0;
}
+#ifdef CONFIG_PM_SLEEP
+static int sdhci_pxav3_suspend(struct device *dev)
+{
+ int ret;
+ struct sdhci_host *host = dev_get_drvdata(dev);
+
+ pm_runtime_get_sync(dev);
+ ret = sdhci_suspend_host(host);
+ pm_runtime_mark_last_busy(dev);
+ pm_runtime_put_autosuspend(dev);
+
+ return ret;
+}
+
+static int sdhci_pxav3_resume(struct device *dev)
+{
+ int ret;
+ struct sdhci_host *host = dev_get_drvdata(dev);
+
+ pm_runtime_get_sync(dev);
+ ret = sdhci_resume_host(host);
+ pm_runtime_mark_last_busy(dev);
+ pm_runtime_put_autosuspend(dev);
+
+ return ret;
+}
+#endif
+
+#ifdef CONFIG_PM_RUNTIME
+static int sdhci_pxav3_runtime_suspend(struct device *dev)
+{
+ struct sdhci_host *host = dev_get_drvdata(dev);
+ struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
+ unsigned long flags;
+
+ if (pltfm_host->clk) {
+ spin_lock_irqsave(&host->lock, flags);
+ host->runtime_suspended = true;
+ spin_unlock_irqrestore(&host->lock, flags);
+
+ clk_disable_unprepare(pltfm_host->clk);
+ }
+
+ return 0;
+}
+
+static int sdhci_pxav3_runtime_resume(struct device *dev)
+{
+ struct sdhci_host *host = dev_get_drvdata(dev);
+ struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
+ unsigned long flags;
+
+ if (pltfm_host->clk) {
+ clk_prepare_enable(pltfm_host->clk);
+
+ spin_lock_irqsave(&host->lock, flags);
+ host->runtime_suspended = false;
+ spin_unlock_irqrestore(&host->lock, flags);
+ }
+
+ return 0;
+}
+#endif
+
+#ifdef CONFIG_PM
+static const struct dev_pm_ops sdhci_pxav3_pmops = {
+ SET_SYSTEM_SLEEP_PM_OPS(sdhci_pxav3_suspend, sdhci_pxav3_resume)
+ SET_RUNTIME_PM_OPS(sdhci_pxav3_runtime_suspend,
+ sdhci_pxav3_runtime_resume, NULL)
+};
+
+#define SDHCI_PXAV3_PMOPS (&sdhci_pxav3_pmops)
+
+#else
+#define SDHCI_PXAV3_PMOPS NULL
+#endif
+
static struct platform_driver sdhci_pxav3_driver = {
.driver = {
.name = "sdhci-pxav3",
@@ -354,7 +443,7 @@ static struct platform_driver sdhci_pxav3_driver = {
.of_match_table = sdhci_pxav3_of_match,
#endif
.owner = THIS_MODULE,
- .pm = SDHCI_PLTFM_PMOPS,
+ .pm = SDHCI_PXAV3_PMOPS,
},
.probe = sdhci_pxav3_probe,
.remove = sdhci_pxav3_remove,
diff --git a/drivers/mmc/host/sdhci-s3c.c b/drivers/mmc/host/sdhci-s3c.c
index a0c621421ee8..7363efe72287 100644
--- a/drivers/mmc/host/sdhci-s3c.c
+++ b/drivers/mmc/host/sdhci-s3c.c
@@ -332,14 +332,14 @@ static void sdhci_cmu_set_clock(struct sdhci_host *host, unsigned int clock)
}
/**
- * sdhci_s3c_platform_8bit_width - support 8bit buswidth
+ * sdhci_s3c_platform_bus_width - support 8bit buswidth
* @host: The SDHCI host being queried
* @width: MMC_BUS_WIDTH_ macro for the bus width being requested
*
* We have 8-bit width support but is not a v3 controller.
- * So we add platform_8bit_width() and support 8bit width.
+ * So we add platform_bus_width() and support 8bit width.
*/
-static int sdhci_s3c_platform_8bit_width(struct sdhci_host *host, int width)
+static int sdhci_s3c_platform_bus_width(struct sdhci_host *host, int width)
{
u8 ctrl;
@@ -369,7 +369,7 @@ static struct sdhci_ops sdhci_s3c_ops = {
.get_max_clock = sdhci_s3c_get_max_clk,
.set_clock = sdhci_s3c_set_clock,
.get_min_clock = sdhci_s3c_get_min_clock,
- .platform_8bit_width = sdhci_s3c_platform_8bit_width,
+ .platform_bus_width = sdhci_s3c_platform_bus_width,
};
static void sdhci_s3c_notify_change(struct platform_device *dev, int state)
diff --git a/drivers/mmc/host/sdhci-tegra.c b/drivers/mmc/host/sdhci-tegra.c
index 3695b2e0cbd2..08b06e9a3a21 100644
--- a/drivers/mmc/host/sdhci-tegra.c
+++ b/drivers/mmc/host/sdhci-tegra.c
@@ -27,8 +27,6 @@
#include <asm/gpio.h>
-#include <linux/platform_data/mmc-sdhci-tegra.h>
-
#include "sdhci-pltfm.h"
/* Tegra SDHOST controller vendor register definitions */
@@ -45,8 +43,11 @@ struct sdhci_tegra_soc_data {
};
struct sdhci_tegra {
- const struct tegra_sdhci_platform_data *plat;
const struct sdhci_tegra_soc_data *soc_data;
+ int cd_gpio;
+ int wp_gpio;
+ int power_gpio;
+ int is_8bit;
};
static u32 tegra_sdhci_readl(struct sdhci_host *host, int reg)
@@ -108,12 +109,11 @@ static unsigned int tegra_sdhci_get_ro(struct sdhci_host *host)
{
struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
struct sdhci_tegra *tegra_host = pltfm_host->priv;
- const struct tegra_sdhci_platform_data *plat = tegra_host->plat;
- if (!gpio_is_valid(plat->wp_gpio))
+ if (!gpio_is_valid(tegra_host->wp_gpio))
return -1;
- return gpio_get_value(plat->wp_gpio);
+ return gpio_get_value(tegra_host->wp_gpio);
}
static irqreturn_t carddetect_irq(int irq, void *data)
@@ -143,15 +143,14 @@ static void tegra_sdhci_reset_exit(struct sdhci_host *host, u8 mask)
}
}
-static int tegra_sdhci_8bit(struct sdhci_host *host, int bus_width)
+static int tegra_sdhci_buswidth(struct sdhci_host *host, int bus_width)
{
struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
struct sdhci_tegra *tegra_host = pltfm_host->priv;
- const struct tegra_sdhci_platform_data *plat = tegra_host->plat;
u32 ctrl;
ctrl = sdhci_readb(host, SDHCI_HOST_CONTROL);
- if (plat->is_8bit && bus_width == MMC_BUS_WIDTH_8) {
+ if (tegra_host->is_8bit && bus_width == MMC_BUS_WIDTH_8) {
ctrl &= ~SDHCI_CTRL_4BITBUS;
ctrl |= SDHCI_CTRL_8BITBUS;
} else {
@@ -170,7 +169,7 @@ static struct sdhci_ops tegra_sdhci_ops = {
.read_l = tegra_sdhci_readl,
.read_w = tegra_sdhci_readw,
.write_l = tegra_sdhci_writel,
- .platform_8bit_width = tegra_sdhci_8bit,
+ .platform_bus_width = tegra_sdhci_buswidth,
.platform_reset_exit = tegra_sdhci_reset_exit,
};
@@ -217,31 +216,19 @@ static const struct of_device_id sdhci_tegra_dt_match[] = {
};
MODULE_DEVICE_TABLE(of, sdhci_dt_ids);
-static struct tegra_sdhci_platform_data *sdhci_tegra_dt_parse_pdata(
- struct platform_device *pdev)
+static void sdhci_tegra_parse_dt(struct device *dev,
+ struct sdhci_tegra *tegra_host)
{
- struct tegra_sdhci_platform_data *plat;
- struct device_node *np = pdev->dev.of_node;
+ struct device_node *np = dev->of_node;
u32 bus_width;
- if (!np)
- return NULL;
-
- plat = devm_kzalloc(&pdev->dev, sizeof(*plat), GFP_KERNEL);
- if (!plat) {
- dev_err(&pdev->dev, "Can't allocate platform data\n");
- return NULL;
- }
-
- plat->cd_gpio = of_get_named_gpio(np, "cd-gpios", 0);
- plat->wp_gpio = of_get_named_gpio(np, "wp-gpios", 0);
- plat->power_gpio = of_get_named_gpio(np, "power-gpios", 0);
+ tegra_host->cd_gpio = of_get_named_gpio(np, "cd-gpios", 0);
+ tegra_host->wp_gpio = of_get_named_gpio(np, "wp-gpios", 0);
+ tegra_host->power_gpio = of_get_named_gpio(np, "power-gpios", 0);
if (of_property_read_u32(np, "bus-width", &bus_width) == 0 &&
bus_width == 8)
- plat->is_8bit = 1;
-
- return plat;
+ tegra_host->is_8bit = 1;
}
static int sdhci_tegra_probe(struct platform_device *pdev)
@@ -250,7 +237,6 @@ static int sdhci_tegra_probe(struct platform_device *pdev)
const struct sdhci_tegra_soc_data *soc_data;
struct sdhci_host *host;
struct sdhci_pltfm_host *pltfm_host;
- struct tegra_sdhci_platform_data *plat;
struct sdhci_tegra *tegra_host;
struct clk *clk;
int rc;
@@ -263,52 +249,40 @@ static int sdhci_tegra_probe(struct platform_device *pdev)
host = sdhci_pltfm_init(pdev, soc_data->pdata);
if (IS_ERR(host))
return PTR_ERR(host);
-
pltfm_host = sdhci_priv(host);
- plat = pdev->dev.platform_data;
-
- if (plat == NULL)
- plat = sdhci_tegra_dt_parse_pdata(pdev);
-
- if (plat == NULL) {
- dev_err(mmc_dev(host->mmc), "missing platform data\n");
- rc = -ENXIO;
- goto err_no_plat;
- }
-
tegra_host = devm_kzalloc(&pdev->dev, sizeof(*tegra_host), GFP_KERNEL);
if (!tegra_host) {
dev_err(mmc_dev(host->mmc), "failed to allocate tegra_host\n");
rc = -ENOMEM;
- goto err_no_plat;
+ goto err_alloc_tegra_host;
}
-
- tegra_host->plat = plat;
tegra_host->soc_data = soc_data;
-
pltfm_host->priv = tegra_host;
- if (gpio_is_valid(plat->power_gpio)) {
- rc = gpio_request(plat->power_gpio, "sdhci_power");
+ sdhci_tegra_parse_dt(&pdev->dev, tegra_host);
+
+ if (gpio_is_valid(tegra_host->power_gpio)) {
+ rc = gpio_request(tegra_host->power_gpio, "sdhci_power");
if (rc) {
dev_err(mmc_dev(host->mmc),
"failed to allocate power gpio\n");
goto err_power_req;
}
- gpio_direction_output(plat->power_gpio, 1);
+ gpio_direction_output(tegra_host->power_gpio, 1);
}
- if (gpio_is_valid(plat->cd_gpio)) {
- rc = gpio_request(plat->cd_gpio, "sdhci_cd");
+ if (gpio_is_valid(tegra_host->cd_gpio)) {
+ rc = gpio_request(tegra_host->cd_gpio, "sdhci_cd");
if (rc) {
dev_err(mmc_dev(host->mmc),
"failed to allocate cd gpio\n");
goto err_cd_req;
}
- gpio_direction_input(plat->cd_gpio);
+ gpio_direction_input(tegra_host->cd_gpio);
- rc = request_irq(gpio_to_irq(plat->cd_gpio), carddetect_irq,
+ rc = request_irq(gpio_to_irq(tegra_host->cd_gpio),
+ carddetect_irq,
IRQF_TRIGGER_FALLING | IRQF_TRIGGER_RISING,
mmc_hostname(host->mmc), host);
@@ -319,14 +293,14 @@ static int sdhci_tegra_probe(struct platform_device *pdev)
}
- if (gpio_is_valid(plat->wp_gpio)) {
- rc = gpio_request(plat->wp_gpio, "sdhci_wp");
+ if (gpio_is_valid(tegra_host->wp_gpio)) {
+ rc = gpio_request(tegra_host->wp_gpio, "sdhci_wp");
if (rc) {
dev_err(mmc_dev(host->mmc),
"failed to allocate wp gpio\n");
goto err_wp_req;
}
- gpio_direction_input(plat->wp_gpio);
+ gpio_direction_input(tegra_host->wp_gpio);
}
clk = clk_get(mmc_dev(host->mmc), NULL);
@@ -338,9 +312,7 @@ static int sdhci_tegra_probe(struct platform_device *pdev)
clk_prepare_enable(clk);
pltfm_host->clk = clk;
- host->mmc->pm_caps = plat->pm_flags;
-
- if (plat->is_8bit)
+ if (tegra_host->is_8bit)
host->mmc->caps |= MMC_CAP_8_BIT_DATA;
rc = sdhci_add_host(host);
@@ -353,19 +325,19 @@ err_add_host:
clk_disable_unprepare(pltfm_host->clk);
clk_put(pltfm_host->clk);
err_clk_get:
- if (gpio_is_valid(plat->wp_gpio))
- gpio_free(plat->wp_gpio);
+ if (gpio_is_valid(tegra_host->wp_gpio))
+ gpio_free(tegra_host->wp_gpio);
err_wp_req:
- if (gpio_is_valid(plat->cd_gpio))
- free_irq(gpio_to_irq(plat->cd_gpio), host);
+ if (gpio_is_valid(tegra_host->cd_gpio))
+ free_irq(gpio_to_irq(tegra_host->cd_gpio), host);
err_cd_irq_req:
- if (gpio_is_valid(plat->cd_gpio))
- gpio_free(plat->cd_gpio);
+ if (gpio_is_valid(tegra_host->cd_gpio))
+ gpio_free(tegra_host->cd_gpio);
err_cd_req:
- if (gpio_is_valid(plat->power_gpio))
- gpio_free(plat->power_gpio);
+ if (gpio_is_valid(tegra_host->power_gpio))
+ gpio_free(tegra_host->power_gpio);
err_power_req:
-err_no_plat:
+err_alloc_tegra_host:
sdhci_pltfm_free(pdev);
return rc;
}
@@ -375,21 +347,20 @@ static int sdhci_tegra_remove(struct platform_device *pdev)
struct sdhci_host *host = platform_get_drvdata(pdev);
struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
struct sdhci_tegra *tegra_host = pltfm_host->priv;
- const struct tegra_sdhci_platform_data *plat = tegra_host->plat;
int dead = (readl(host->ioaddr + SDHCI_INT_STATUS) == 0xffffffff);
sdhci_remove_host(host, dead);
- if (gpio_is_valid(plat->wp_gpio))
- gpio_free(plat->wp_gpio);
+ if (gpio_is_valid(tegra_host->wp_gpio))
+ gpio_free(tegra_host->wp_gpio);
- if (gpio_is_valid(plat->cd_gpio)) {
- free_irq(gpio_to_irq(plat->cd_gpio), host);
- gpio_free(plat->cd_gpio);
+ if (gpio_is_valid(tegra_host->cd_gpio)) {
+ free_irq(gpio_to_irq(tegra_host->cd_gpio), host);
+ gpio_free(tegra_host->cd_gpio);
}
- if (gpio_is_valid(plat->power_gpio))
- gpio_free(plat->power_gpio);
+ if (gpio_is_valid(tegra_host->power_gpio))
+ gpio_free(tegra_host->power_gpio);
clk_disable_unprepare(pltfm_host->clk);
clk_put(pltfm_host->clk);
diff --git a/drivers/mmc/host/sdhci.c b/drivers/mmc/host/sdhci.c
index 6f0bfc0c8c9c..51bbba486f38 100644
--- a/drivers/mmc/host/sdhci.c
+++ b/drivers/mmc/host/sdhci.c
@@ -53,6 +53,7 @@ static void sdhci_send_command(struct sdhci_host *, struct mmc_command *);
static void sdhci_finish_command(struct sdhci_host *);
static int sdhci_execute_tuning(struct mmc_host *mmc, u32 opcode);
static void sdhci_tuning_timer(unsigned long data);
+static void sdhci_enable_preset_value(struct sdhci_host *host, bool enable);
#ifdef CONFIG_PM_RUNTIME
static int sdhci_runtime_pm_get(struct sdhci_host *host);
@@ -1082,6 +1083,37 @@ static void sdhci_finish_command(struct sdhci_host *host)
}
}
+static u16 sdhci_get_preset_value(struct sdhci_host *host)
+{
+ u16 ctrl, preset = 0;
+
+ ctrl = sdhci_readw(host, SDHCI_HOST_CONTROL2);
+
+ switch (ctrl & SDHCI_CTRL_UHS_MASK) {
+ case SDHCI_CTRL_UHS_SDR12:
+ preset = sdhci_readw(host, SDHCI_PRESET_FOR_SDR12);
+ break;
+ case SDHCI_CTRL_UHS_SDR25:
+ preset = sdhci_readw(host, SDHCI_PRESET_FOR_SDR25);
+ break;
+ case SDHCI_CTRL_UHS_SDR50:
+ preset = sdhci_readw(host, SDHCI_PRESET_FOR_SDR50);
+ break;
+ case SDHCI_CTRL_UHS_SDR104:
+ preset = sdhci_readw(host, SDHCI_PRESET_FOR_SDR104);
+ break;
+ case SDHCI_CTRL_UHS_DDR50:
+ preset = sdhci_readw(host, SDHCI_PRESET_FOR_DDR50);
+ break;
+ default:
+ pr_warn("%s: Invalid UHS-I mode selected\n",
+ mmc_hostname(host->mmc));
+ preset = sdhci_readw(host, SDHCI_PRESET_FOR_SDR12);
+ break;
+ }
+ return preset;
+}
+
static void sdhci_set_clock(struct sdhci_host *host, unsigned int clock)
{
int div = 0; /* Initialized for compiler warning */
@@ -1106,35 +1138,43 @@ static void sdhci_set_clock(struct sdhci_host *host, unsigned int clock)
goto out;
if (host->version >= SDHCI_SPEC_300) {
+ if (sdhci_readw(host, SDHCI_HOST_CONTROL2) &
+ SDHCI_CTRL_PRESET_VAL_ENABLE) {
+ u16 pre_val;
+
+ clk = sdhci_readw(host, SDHCI_CLOCK_CONTROL);
+ pre_val = sdhci_get_preset_value(host);
+ div = (pre_val & SDHCI_PRESET_SDCLK_FREQ_MASK)
+ >> SDHCI_PRESET_SDCLK_FREQ_SHIFT;
+ if (host->clk_mul &&
+ (pre_val & SDHCI_PRESET_CLKGEN_SEL_MASK)) {
+ clk = SDHCI_PROG_CLOCK_MODE;
+ real_div = div + 1;
+ clk_mul = host->clk_mul;
+ } else {
+ real_div = max_t(int, 1, div << 1);
+ }
+ goto clock_set;
+ }
+
/*
* Check if the Host Controller supports Programmable Clock
* Mode.
*/
if (host->clk_mul) {
- u16 ctrl;
-
+ for (div = 1; div <= 1024; div++) {
+ if ((host->max_clk * host->clk_mul / div)
+ <= clock)
+ break;
+ }
/*
- * We need to figure out whether the Host Driver needs
- * to select Programmable Clock Mode, or the value can
- * be set automatically by the Host Controller based on
- * the Preset Value registers.
+ * Set Programmable Clock Mode in the Clock
+ * Control register.
*/
- ctrl = sdhci_readw(host, SDHCI_HOST_CONTROL2);
- if (!(ctrl & SDHCI_CTRL_PRESET_VAL_ENABLE)) {
- for (div = 1; div <= 1024; div++) {
- if (((host->max_clk * host->clk_mul) /
- div) <= clock)
- break;
- }
- /*
- * Set Programmable Clock Mode in the Clock
- * Control register.
- */
- clk = SDHCI_PROG_CLOCK_MODE;
- real_div = div;
- clk_mul = host->clk_mul;
- div--;
- }
+ clk = SDHCI_PROG_CLOCK_MODE;
+ real_div = div;
+ clk_mul = host->clk_mul;
+ div--;
} else {
/* Version 3.00 divisors must be a multiple of 2. */
if (host->max_clk <= clock)
@@ -1159,6 +1199,7 @@ static void sdhci_set_clock(struct sdhci_host *host, unsigned int clock)
div >>= 1;
}
+clock_set:
if (real_div)
host->mmc->actual_clock = (host->max_clk * clk_mul) / real_div;
@@ -1189,6 +1230,15 @@ out:
host->clock = clock;
}
+static inline void sdhci_update_clock(struct sdhci_host *host)
+{
+ unsigned int clock;
+
+ clock = host->clock;
+ host->clock = 0;
+ sdhci_set_clock(host, clock);
+}
+
static int sdhci_set_power(struct sdhci_host *host, unsigned short power)
{
u8 pwr = 0;
@@ -1258,7 +1308,7 @@ static int sdhci_set_power(struct sdhci_host *host, unsigned short power)
static void sdhci_request(struct mmc_host *mmc, struct mmc_request *mrq)
{
struct sdhci_host *host;
- bool present;
+ int present;
unsigned long flags;
u32 tuning_opcode;
@@ -1287,18 +1337,21 @@ static void sdhci_request(struct mmc_host *mmc, struct mmc_request *mrq)
host->mrq = mrq;
- /* If polling, assume that the card is always present. */
- if (host->quirks & SDHCI_QUIRK_BROKEN_CARD_DETECTION)
- present = true;
- else
- present = sdhci_readl(host, SDHCI_PRESENT_STATE) &
- SDHCI_CARD_PRESENT;
-
- /* If we're using a cd-gpio, testing the presence bit might fail. */
- if (!present) {
- int ret = mmc_gpio_get_cd(host->mmc);
- if (ret > 0)
- present = true;
+ /*
+ * Firstly check card presence from cd-gpio. The return could
+ * be one of the following possibilities:
+ * negative: cd-gpio is not available
+ * zero: cd-gpio is used, and card is removed
+ * one: cd-gpio is used, and card is present
+ */
+ present = mmc_gpio_get_cd(host->mmc);
+ if (present < 0) {
+ /* If polling, assume that the card is always present. */
+ if (host->quirks & SDHCI_QUIRK_BROKEN_CARD_DETECTION)
+ present = 1;
+ else
+ present = sdhci_readl(host, SDHCI_PRESENT_STATE) &
+ SDHCI_CARD_PRESENT;
}
if (!present || host->flags & SDHCI_DEVICE_DEAD) {
@@ -1364,6 +1417,10 @@ static void sdhci_do_set_ios(struct sdhci_host *host, struct mmc_ios *ios)
sdhci_reinit(host);
}
+ if (host->version >= SDHCI_SPEC_300 &&
+ (ios->power_mode == MMC_POWER_UP))
+ sdhci_enable_preset_value(host, false);
+
sdhci_set_clock(host, ios->clock);
if (ios->power_mode == MMC_POWER_OFF)
@@ -1383,11 +1440,11 @@ static void sdhci_do_set_ios(struct sdhci_host *host, struct mmc_ios *ios)
/*
* If your platform has 8-bit width support but is not a v3 controller,
* or if it requires special setup code, you should implement that in
- * platform_8bit_width().
+ * platform_bus_width().
*/
- if (host->ops->platform_8bit_width)
- host->ops->platform_8bit_width(host, ios->bus_width);
- else {
+ if (host->ops->platform_bus_width) {
+ host->ops->platform_bus_width(host, ios->bus_width);
+ } else {
ctrl = sdhci_readb(host, SDHCI_HOST_CONTROL);
if (ios->bus_width == MMC_BUS_WIDTH_8) {
ctrl &= ~SDHCI_CTRL_4BITBUS;
@@ -1415,7 +1472,6 @@ static void sdhci_do_set_ios(struct sdhci_host *host, struct mmc_ios *ios)
if (host->version >= SDHCI_SPEC_300) {
u16 clk, ctrl_2;
- unsigned int clock;
/* In case of UHS-I modes, set High Speed Enable */
if ((ios->timing == MMC_TIMING_MMC_HS200) ||
@@ -1455,9 +1511,7 @@ static void sdhci_do_set_ios(struct sdhci_host *host, struct mmc_ios *ios)
sdhci_writeb(host, ctrl, SDHCI_HOST_CONTROL);
/* Re-enable SD Clock */
- clock = host->clock;
- host->clock = 0;
- sdhci_set_clock(host, clock);
+ sdhci_update_clock(host);
}
@@ -1487,10 +1541,22 @@ static void sdhci_do_set_ios(struct sdhci_host *host, struct mmc_ios *ios)
sdhci_writew(host, ctrl_2, SDHCI_HOST_CONTROL2);
}
+ if (!(host->quirks2 & SDHCI_QUIRK2_PRESET_VALUE_BROKEN) &&
+ ((ios->timing == MMC_TIMING_UHS_SDR12) ||
+ (ios->timing == MMC_TIMING_UHS_SDR25) ||
+ (ios->timing == MMC_TIMING_UHS_SDR50) ||
+ (ios->timing == MMC_TIMING_UHS_SDR104) ||
+ (ios->timing == MMC_TIMING_UHS_DDR50))) {
+ u16 preset;
+
+ sdhci_enable_preset_value(host, true);
+ preset = sdhci_get_preset_value(host);
+ ios->drv_type = (preset & SDHCI_PRESET_DRV_MASK)
+ >> SDHCI_PRESET_DRV_SHIFT;
+ }
+
/* Re-enable SD Clock */
- clock = host->clock;
- host->clock = 0;
- sdhci_set_clock(host, clock);
+ sdhci_update_clock(host);
} else
sdhci_writeb(host, ctrl, SDHCI_HOST_CONTROL);
@@ -1608,141 +1674,91 @@ static void sdhci_enable_sdio_irq(struct mmc_host *mmc, int enable)
spin_unlock_irqrestore(&host->lock, flags);
}
-static int sdhci_do_3_3v_signal_voltage_switch(struct sdhci_host *host,
- u16 ctrl)
+static int sdhci_do_start_signal_voltage_switch(struct sdhci_host *host,
+ struct mmc_ios *ios)
{
+ u16 ctrl;
int ret;
- /* Set 1.8V Signal Enable in the Host Control2 register to 0 */
- ctrl &= ~SDHCI_CTRL_VDD_180;
- sdhci_writew(host, ctrl, SDHCI_HOST_CONTROL2);
-
- if (host->vqmmc) {
- ret = regulator_set_voltage(host->vqmmc, 2700000, 3600000);
- if (ret) {
- pr_warning("%s: Switching to 3.3V signalling voltage "
- " failed\n", mmc_hostname(host->mmc));
- return -EIO;
- }
- }
- /* Wait for 5ms */
- usleep_range(5000, 5500);
+ /*
+ * Signal Voltage Switching is only applicable for Host Controllers
+ * v3.00 and above.
+ */
+ if (host->version < SDHCI_SPEC_300)
+ return 0;
- /* 3.3V regulator output should be stable within 5 ms */
ctrl = sdhci_readw(host, SDHCI_HOST_CONTROL2);
- if (!(ctrl & SDHCI_CTRL_VDD_180))
- return 0;
- pr_warning("%s: 3.3V regulator output did not became stable\n",
- mmc_hostname(host->mmc));
+ switch (ios->signal_voltage) {
+ case MMC_SIGNAL_VOLTAGE_330:
+ /* Set 1.8V Signal Enable in the Host Control2 register to 0 */
+ ctrl &= ~SDHCI_CTRL_VDD_180;
+ sdhci_writew(host, ctrl, SDHCI_HOST_CONTROL2);
- return -EIO;
-}
+ if (host->vqmmc) {
+ ret = regulator_set_voltage(host->vqmmc, 2700000, 3600000);
+ if (ret) {
+ pr_warning("%s: Switching to 3.3V signalling voltage "
+ " failed\n", mmc_hostname(host->mmc));
+ return -EIO;
+ }
+ }
+ /* Wait for 5ms */
+ usleep_range(5000, 5500);
-static int sdhci_do_1_8v_signal_voltage_switch(struct sdhci_host *host,
- u16 ctrl)
-{
- u8 pwr;
- u16 clk;
- u32 present_state;
- int ret;
+ /* 3.3V regulator output should be stable within 5 ms */
+ ctrl = sdhci_readw(host, SDHCI_HOST_CONTROL2);
+ if (!(ctrl & SDHCI_CTRL_VDD_180))
+ return 0;
- /* Stop SDCLK */
- clk = sdhci_readw(host, SDHCI_CLOCK_CONTROL);
- clk &= ~SDHCI_CLOCK_CARD_EN;
- sdhci_writew(host, clk, SDHCI_CLOCK_CONTROL);
+ pr_warning("%s: 3.3V regulator output did not became stable\n",
+ mmc_hostname(host->mmc));
+
+ return -EAGAIN;
+ case MMC_SIGNAL_VOLTAGE_180:
+ if (host->vqmmc) {
+ ret = regulator_set_voltage(host->vqmmc,
+ 1700000, 1950000);
+ if (ret) {
+ pr_warning("%s: Switching to 1.8V signalling voltage "
+ " failed\n", mmc_hostname(host->mmc));
+ return -EIO;
+ }
+ }
- /* Check whether DAT[3:0] is 0000 */
- present_state = sdhci_readl(host, SDHCI_PRESENT_STATE);
- if (!((present_state & SDHCI_DATA_LVL_MASK) >>
- SDHCI_DATA_LVL_SHIFT)) {
/*
* Enable 1.8V Signal Enable in the Host Control2
* register
*/
- if (host->vqmmc)
- ret = regulator_set_voltage(host->vqmmc,
- 1700000, 1950000);
- else
- ret = 0;
+ ctrl |= SDHCI_CTRL_VDD_180;
+ sdhci_writew(host, ctrl, SDHCI_HOST_CONTROL2);
- if (!ret) {
- ctrl |= SDHCI_CTRL_VDD_180;
- sdhci_writew(host, ctrl, SDHCI_HOST_CONTROL2);
+ /* Wait for 5ms */
+ usleep_range(5000, 5500);
- /* Wait for 5ms */
- usleep_range(5000, 5500);
+ /* 1.8V regulator output should be stable within 5 ms */
+ ctrl = sdhci_readw(host, SDHCI_HOST_CONTROL2);
+ if (ctrl & SDHCI_CTRL_VDD_180)
+ return 0;
- ctrl = sdhci_readw(host, SDHCI_HOST_CONTROL2);
- if (ctrl & SDHCI_CTRL_VDD_180) {
- /* Provide SDCLK again and wait for 1ms */
- clk = sdhci_readw(host, SDHCI_CLOCK_CONTROL);
- clk |= SDHCI_CLOCK_CARD_EN;
- sdhci_writew(host, clk, SDHCI_CLOCK_CONTROL);
- usleep_range(1000, 1500);
+ pr_warning("%s: 1.8V regulator output did not became stable\n",
+ mmc_hostname(host->mmc));
- /*
- * If DAT[3:0] level is 1111b, then the card
- * was successfully switched to 1.8V signaling.
- */
- present_state = sdhci_readl(host,
- SDHCI_PRESENT_STATE);
- if ((present_state & SDHCI_DATA_LVL_MASK) ==
- SDHCI_DATA_LVL_MASK)
- return 0;
+ return -EAGAIN;
+ case MMC_SIGNAL_VOLTAGE_120:
+ if (host->vqmmc) {
+ ret = regulator_set_voltage(host->vqmmc, 1100000, 1300000);
+ if (ret) {
+ pr_warning("%s: Switching to 1.2V signalling voltage "
+ " failed\n", mmc_hostname(host->mmc));
+ return -EIO;
}
}
- }
-
- /*
- * If we are here, that means the switch to 1.8V signaling
- * failed. We power cycle the card, and retry initialization
- * sequence by setting S18R to 0.
- */
- pwr = sdhci_readb(host, SDHCI_POWER_CONTROL);
- pwr &= ~SDHCI_POWER_ON;
- sdhci_writeb(host, pwr, SDHCI_POWER_CONTROL);
- if (host->vmmc)
- regulator_disable(host->vmmc);
-
- /* Wait for 1ms as per the spec */
- usleep_range(1000, 1500);
- pwr |= SDHCI_POWER_ON;
- sdhci_writeb(host, pwr, SDHCI_POWER_CONTROL);
- if (host->vmmc)
- regulator_enable(host->vmmc);
-
- pr_warning("%s: Switching to 1.8V signalling voltage failed, "
- "retrying with S18R set to 0\n", mmc_hostname(host->mmc));
-
- return -EAGAIN;
-}
-
-static int sdhci_do_start_signal_voltage_switch(struct sdhci_host *host,
- struct mmc_ios *ios)
-{
- u16 ctrl;
-
- /*
- * Signal Voltage Switching is only applicable for Host Controllers
- * v3.00 and above.
- */
- if (host->version < SDHCI_SPEC_300)
return 0;
-
- /*
- * We first check whether the request is to set signalling voltage
- * to 3.3V. If so, we change the voltage to 3.3V and return quickly.
- */
- ctrl = sdhci_readw(host, SDHCI_HOST_CONTROL2);
- if (ios->signal_voltage == MMC_SIGNAL_VOLTAGE_330)
- return sdhci_do_3_3v_signal_voltage_switch(host, ctrl);
- else if (!(ctrl & SDHCI_CTRL_VDD_180) &&
- (ios->signal_voltage == MMC_SIGNAL_VOLTAGE_180))
- return sdhci_do_1_8v_signal_voltage_switch(host, ctrl);
- else
+ default:
/* No signal voltage switch required */
return 0;
+ }
}
static int sdhci_start_signal_voltage_switch(struct mmc_host *mmc,
@@ -1759,6 +1775,19 @@ static int sdhci_start_signal_voltage_switch(struct mmc_host *mmc,
return err;
}
+static int sdhci_card_busy(struct mmc_host *mmc)
+{
+ struct sdhci_host *host = mmc_priv(mmc);
+ u32 present_state;
+
+ sdhci_runtime_pm_get(host);
+ /* Check whether DAT[3:0] is 0000 */
+ present_state = sdhci_readl(host, SDHCI_PRESENT_STATE);
+ sdhci_runtime_pm_put(host);
+
+ return !(present_state & SDHCI_DATA_LVL_MASK);
+}
+
static int sdhci_execute_tuning(struct mmc_host *mmc, u32 opcode)
{
struct sdhci_host *host;
@@ -1955,17 +1984,15 @@ out:
return err;
}
-static void sdhci_do_enable_preset_value(struct sdhci_host *host, bool enable)
+
+static void sdhci_enable_preset_value(struct sdhci_host *host, bool enable)
{
u16 ctrl;
- unsigned long flags;
/* Host Controller v3.00 defines preset value registers */
if (host->version < SDHCI_SPEC_300)
return;
- spin_lock_irqsave(&host->lock, flags);
-
ctrl = sdhci_readw(host, SDHCI_HOST_CONTROL2);
/*
@@ -1981,17 +2008,6 @@ static void sdhci_do_enable_preset_value(struct sdhci_host *host, bool enable)
sdhci_writew(host, ctrl, SDHCI_HOST_CONTROL2);
host->flags &= ~SDHCI_PV_ENABLED;
}
-
- spin_unlock_irqrestore(&host->lock, flags);
-}
-
-static void sdhci_enable_preset_value(struct mmc_host *mmc, bool enable)
-{
- struct sdhci_host *host = mmc_priv(mmc);
-
- sdhci_runtime_pm_get(host);
- sdhci_do_enable_preset_value(host, enable);
- sdhci_runtime_pm_put(host);
}
static void sdhci_card_event(struct mmc_host *mmc)
@@ -2027,8 +2043,8 @@ static const struct mmc_host_ops sdhci_ops = {
.enable_sdio_irq = sdhci_enable_sdio_irq,
.start_signal_voltage_switch = sdhci_start_signal_voltage_switch,
.execute_tuning = sdhci_execute_tuning,
- .enable_preset_value = sdhci_enable_preset_value,
.card_event = sdhci_card_event,
+ .card_busy = sdhci_card_busy,
};
/*****************************************************************************\
@@ -2080,14 +2096,9 @@ static void sdhci_tasklet_finish(unsigned long param)
(host->quirks & SDHCI_QUIRK_RESET_AFTER_REQUEST))) {
/* Some controllers need this kick or reset won't work here */
- if (host->quirks & SDHCI_QUIRK_CLOCK_BEFORE_RESET) {
- unsigned int clock;
-
+ if (host->quirks & SDHCI_QUIRK_CLOCK_BEFORE_RESET)
/* This is to force an update */
- clock = host->clock;
- host->clock = 0;
- sdhci_set_clock(host, clock);
- }
+ sdhci_update_clock(host);
/* Spec says we should do both at the same time, but Ricoh
controllers do not like that. */
@@ -2455,6 +2466,32 @@ out:
\*****************************************************************************/
#ifdef CONFIG_PM
+void sdhci_enable_irq_wakeups(struct sdhci_host *host)
+{
+ u8 val;
+ u8 mask = SDHCI_WAKE_ON_INSERT | SDHCI_WAKE_ON_REMOVE
+ | SDHCI_WAKE_ON_INT;
+
+ val = sdhci_readb(host, SDHCI_WAKE_UP_CONTROL);
+ val |= mask ;
+ /* Avoid fake wake up */
+ if (host->quirks & SDHCI_QUIRK_BROKEN_CARD_DETECTION)
+ val &= ~(SDHCI_WAKE_ON_INSERT | SDHCI_WAKE_ON_REMOVE);
+ sdhci_writeb(host, val, SDHCI_WAKE_UP_CONTROL);
+}
+EXPORT_SYMBOL_GPL(sdhci_enable_irq_wakeups);
+
+void sdhci_disable_irq_wakeups(struct sdhci_host *host)
+{
+ u8 val;
+ u8 mask = SDHCI_WAKE_ON_INSERT | SDHCI_WAKE_ON_REMOVE
+ | SDHCI_WAKE_ON_INT;
+
+ val = sdhci_readb(host, SDHCI_WAKE_UP_CONTROL);
+ val &= ~mask;
+ sdhci_writeb(host, val, SDHCI_WAKE_UP_CONTROL);
+}
+EXPORT_SYMBOL_GPL(sdhci_disable_irq_wakeups);
int sdhci_suspend_host(struct sdhci_host *host)
{
@@ -2484,8 +2521,13 @@ int sdhci_suspend_host(struct sdhci_host *host)
return ret;
}
- free_irq(host->irq, host);
-
+ if (!device_may_wakeup(mmc_dev(host->mmc))) {
+ sdhci_mask_irqs(host, SDHCI_INT_ALL_MASK);
+ free_irq(host->irq, host);
+ } else {
+ sdhci_enable_irq_wakeups(host);
+ enable_irq_wake(host->irq);
+ }
return ret;
}
@@ -2500,10 +2542,15 @@ int sdhci_resume_host(struct sdhci_host *host)
host->ops->enable_dma(host);
}
- ret = request_irq(host->irq, sdhci_irq, IRQF_SHARED,
- mmc_hostname(host->mmc), host);
- if (ret)
- return ret;
+ if (!device_may_wakeup(mmc_dev(host->mmc))) {
+ ret = request_irq(host->irq, sdhci_irq, IRQF_SHARED,
+ mmc_hostname(host->mmc), host);
+ if (ret)
+ return ret;
+ } else {
+ sdhci_disable_irq_wakeups(host);
+ disable_irq_wake(host->irq);
+ }
if ((host->mmc->pm_flags & MMC_PM_KEEP_POWER) &&
(host->quirks2 & SDHCI_QUIRK2_HOST_OFF_CARD_ON)) {
@@ -2531,17 +2578,6 @@ int sdhci_resume_host(struct sdhci_host *host)
}
EXPORT_SYMBOL_GPL(sdhci_resume_host);
-
-void sdhci_enable_irq_wakeups(struct sdhci_host *host)
-{
- u8 val;
- val = sdhci_readb(host, SDHCI_WAKE_UP_CONTROL);
- val |= SDHCI_WAKE_ON_INT;
- sdhci_writeb(host, val, SDHCI_WAKE_UP_CONTROL);
-}
-
-EXPORT_SYMBOL_GPL(sdhci_enable_irq_wakeups);
-
#endif /* CONFIG_PM */
#ifdef CONFIG_PM_RUNTIME
@@ -2600,8 +2636,12 @@ int sdhci_runtime_resume_host(struct sdhci_host *host)
sdhci_do_set_ios(host, &host->mmc->ios);
sdhci_do_start_signal_voltage_switch(host, &host->mmc->ios);
- if (host_flags & SDHCI_PV_ENABLED)
- sdhci_do_enable_preset_value(host, true);
+ if ((host_flags & SDHCI_PV_ENABLED) &&
+ !(host->quirks2 & SDHCI_QUIRK2_PRESET_VALUE_BROKEN)) {
+ spin_lock_irqsave(&host->lock, flags);
+ sdhci_enable_preset_value(host, true);
+ spin_unlock_irqrestore(&host->lock, flags);
+ }
/* Set the re-tuning expiration flag */
if (host->flags & SDHCI_USING_RETUNING_TIMER)
@@ -2936,7 +2976,11 @@ int sdhci_add_host(struct sdhci_host *host)
}
#ifdef CONFIG_REGULATOR
- if (host->vmmc) {
+ /*
+ * Voltage range check makes sense only if regulator reports
+ * any voltage value.
+ */
+ if (host->vmmc && regulator_get_voltage(host->vmmc) > 0) {
ret = regulator_is_supported_voltage(host->vmmc, 2700000,
3600000);
if ((ret <= 0) || (!(caps[0] & SDHCI_CAN_VDD_330)))
@@ -3139,6 +3183,7 @@ int sdhci_add_host(struct sdhci_host *host)
#ifdef SDHCI_USE_LEDS_CLASS
reset:
sdhci_reset(host, SDHCI_RESET_ALL);
+ sdhci_mask_irqs(host, SDHCI_INT_ALL_MASK);
free_irq(host->irq, host);
#endif
untasklet:
@@ -3181,6 +3226,7 @@ void sdhci_remove_host(struct sdhci_host *host, int dead)
if (!dead)
sdhci_reset(host, SDHCI_RESET_ALL);
+ sdhci_mask_irqs(host, SDHCI_INT_ALL_MASK);
free_irq(host->irq, host);
del_timer_sync(&host->timer);
diff --git a/drivers/mmc/host/sdhci.h b/drivers/mmc/host/sdhci.h
index a6d69b7bdea2..379e09d9f3c1 100644
--- a/drivers/mmc/host/sdhci.h
+++ b/drivers/mmc/host/sdhci.h
@@ -229,6 +229,18 @@
/* 60-FB reserved */
+#define SDHCI_PRESET_FOR_SDR12 0x66
+#define SDHCI_PRESET_FOR_SDR25 0x68
+#define SDHCI_PRESET_FOR_SDR50 0x6A
+#define SDHCI_PRESET_FOR_SDR104 0x6C
+#define SDHCI_PRESET_FOR_DDR50 0x6E
+#define SDHCI_PRESET_DRV_MASK 0xC000
+#define SDHCI_PRESET_DRV_SHIFT 14
+#define SDHCI_PRESET_CLKGEN_SEL_MASK 0x400
+#define SDHCI_PRESET_CLKGEN_SEL_SHIFT 10
+#define SDHCI_PRESET_SDCLK_FREQ_MASK 0x3FF
+#define SDHCI_PRESET_SDCLK_FREQ_SHIFT 0
+
#define SDHCI_SLOT_INT_STATUS 0xFC
#define SDHCI_HOST_VERSION 0xFE
@@ -269,7 +281,7 @@ struct sdhci_ops {
unsigned int (*get_max_clock)(struct sdhci_host *host);
unsigned int (*get_min_clock)(struct sdhci_host *host);
unsigned int (*get_timeout_clock)(struct sdhci_host *host);
- int (*platform_8bit_width)(struct sdhci_host *host,
+ int (*platform_bus_width)(struct sdhci_host *host,
int width);
void (*platform_send_init_74_clocks)(struct sdhci_host *host,
u8 power_mode);
diff --git a/drivers/mmc/host/sh_mmcif.c b/drivers/mmc/host/sh_mmcif.c
index 9a4c151067dd..ba76a532ae30 100644
--- a/drivers/mmc/host/sh_mmcif.c
+++ b/drivers/mmc/host/sh_mmcif.c
@@ -56,6 +56,7 @@
#include <linux/mmc/sh_mmcif.h>
#include <linux/mmc/slot-gpio.h>
#include <linux/mod_devicetable.h>
+#include <linux/mutex.h>
#include <linux/pagemap.h>
#include <linux/platform_device.h>
#include <linux/pm_qos.h>
@@ -88,6 +89,7 @@
#define CMD_SET_TBIT (1 << 7) /* 1: tran mission bit "Low" */
#define CMD_SET_OPDM (1 << 6) /* 1: open/drain */
#define CMD_SET_CCSH (1 << 5)
+#define CMD_SET_DARS (1 << 2) /* Dual Data Rate */
#define CMD_SET_DATW_1 ((0 << 1) | (0 << 0)) /* 1bit */
#define CMD_SET_DATW_4 ((0 << 1) | (1 << 0)) /* 4bit */
#define CMD_SET_DATW_8 ((1 << 1) | (0 << 0)) /* 8bit */
@@ -127,6 +129,10 @@
INT_CCSTO | INT_CRCSTO | INT_WDATTO | \
INT_RDATTO | INT_RBSYTO | INT_RSPTO)
+#define INT_ALL (INT_RBSYE | INT_CRSPE | INT_BUFREN | \
+ INT_BUFWEN | INT_CMD12DRE | INT_BUFRE | \
+ INT_DTRANE | INT_CMD12RBE | INT_CMD12CRE)
+
/* CE_INT_MASK */
#define MASK_ALL 0x00000000
#define MASK_MCCSDE (1 << 29)
@@ -158,6 +164,11 @@
MASK_MCCSTO | MASK_MCRCSTO | MASK_MWDATTO | \
MASK_MRDATTO | MASK_MRBSYTO | MASK_MRSPTO)
+#define MASK_CLEAN (INT_ERR_STS | MASK_MRBSYE | MASK_MCRSPE | \
+ MASK_MBUFREN | MASK_MBUFWEN | \
+ MASK_MCMD12DRE | MASK_MBUFRE | MASK_MDTRANE | \
+ MASK_MCMD12RBE | MASK_MCMD12CRE)
+
/* CE_HOST_STS1 */
#define STS1_CMDSEQ (1 << 31)
@@ -195,6 +206,7 @@ enum mmcif_state {
STATE_IDLE,
STATE_REQUEST,
STATE_IOS,
+ STATE_TIMEOUT,
};
enum mmcif_wait_for {
@@ -216,6 +228,7 @@ struct sh_mmcif_host {
struct clk *hclk;
unsigned int clk;
int bus_width;
+ unsigned char timing;
bool sd_error;
bool dying;
long timeout;
@@ -230,6 +243,7 @@ struct sh_mmcif_host {
int sg_blkidx;
bool power;
bool card_present;
+ struct mutex thread_lock;
/* DMA support */
struct dma_chan *chan_rx;
@@ -253,23 +267,14 @@ static inline void sh_mmcif_bitclr(struct sh_mmcif_host *host,
static void mmcif_dma_complete(void *arg)
{
struct sh_mmcif_host *host = arg;
- struct mmc_data *data = host->mrq->data;
+ struct mmc_request *mrq = host->mrq;
dev_dbg(&host->pd->dev, "Command completed\n");
- if (WARN(!data, "%s: NULL data in DMA completion!\n",
+ if (WARN(!mrq || !mrq->data, "%s: NULL data in DMA completion!\n",
dev_name(&host->pd->dev)))
return;
- if (data->flags & MMC_DATA_READ)
- dma_unmap_sg(host->chan_rx->device->dev,
- data->sg, data->sg_len,
- DMA_FROM_DEVICE);
- else
- dma_unmap_sg(host->chan_tx->device->dev,
- data->sg, data->sg_len,
- DMA_TO_DEVICE);
-
complete(&host->dma_complete);
}
@@ -423,8 +428,6 @@ static void sh_mmcif_request_dma(struct sh_mmcif_host *host,
if (ret < 0)
goto ecfgrx;
- init_completion(&host->dma_complete);
-
return;
ecfgrx:
@@ -520,13 +523,16 @@ static int sh_mmcif_error_manage(struct sh_mmcif_host *host)
}
if (state2 & STS2_CRC_ERR) {
- dev_dbg(&host->pd->dev, ": CRC error\n");
+ dev_err(&host->pd->dev, " CRC error: state %u, wait %u\n",
+ host->state, host->wait_for);
ret = -EIO;
} else if (state2 & STS2_TIMEOUT_ERR) {
- dev_dbg(&host->pd->dev, ": Timeout\n");
+ dev_err(&host->pd->dev, " Timeout: state %u, wait %u\n",
+ host->state, host->wait_for);
ret = -ETIMEDOUT;
} else {
- dev_dbg(&host->pd->dev, ": End/Index error\n");
+ dev_dbg(&host->pd->dev, " End/Index error: state %u, wait %u\n",
+ host->state, host->wait_for);
ret = -EIO;
}
return ret;
@@ -549,10 +555,7 @@ static bool sh_mmcif_next_block(struct sh_mmcif_host *host, u32 *p)
host->pio_ptr = p;
}
- if (host->sg_idx == data->sg_len)
- return false;
-
- return true;
+ return host->sg_idx != data->sg_len;
}
static void sh_mmcif_single_read(struct sh_mmcif_host *host,
@@ -562,7 +565,6 @@ static void sh_mmcif_single_read(struct sh_mmcif_host *host,
BLOCK_SIZE_MASK) + 3;
host->wait_for = MMCIF_WAIT_FOR_READ;
- schedule_delayed_work(&host->timeout_work, host->timeout);
/* buf read enable */
sh_mmcif_bitset(host, MMCIF_CE_INT_MASK, MASK_MBUFREN);
@@ -576,6 +578,7 @@ static bool sh_mmcif_read_block(struct sh_mmcif_host *host)
if (host->sd_error) {
data->error = sh_mmcif_error_manage(host);
+ dev_dbg(&host->pd->dev, "%s(): %d\n", __func__, data->error);
return false;
}
@@ -604,7 +607,7 @@ static void sh_mmcif_multi_read(struct sh_mmcif_host *host,
host->sg_idx = 0;
host->sg_blkidx = 0;
host->pio_ptr = sg_virt(data->sg);
- schedule_delayed_work(&host->timeout_work, host->timeout);
+
sh_mmcif_bitset(host, MMCIF_CE_INT_MASK, MASK_MBUFREN);
}
@@ -616,6 +619,7 @@ static bool sh_mmcif_mread_block(struct sh_mmcif_host *host)
if (host->sd_error) {
data->error = sh_mmcif_error_manage(host);
+ dev_dbg(&host->pd->dev, "%s(): %d\n", __func__, data->error);
return false;
}
@@ -627,7 +631,6 @@ static bool sh_mmcif_mread_block(struct sh_mmcif_host *host)
if (!sh_mmcif_next_block(host, p))
return false;
- schedule_delayed_work(&host->timeout_work, host->timeout);
sh_mmcif_bitset(host, MMCIF_CE_INT_MASK, MASK_MBUFREN);
return true;
@@ -640,7 +643,6 @@ static void sh_mmcif_single_write(struct sh_mmcif_host *host,
BLOCK_SIZE_MASK) + 3;
host->wait_for = MMCIF_WAIT_FOR_WRITE;
- schedule_delayed_work(&host->timeout_work, host->timeout);
/* buf write enable */
sh_mmcif_bitset(host, MMCIF_CE_INT_MASK, MASK_MBUFWEN);
@@ -654,6 +656,7 @@ static bool sh_mmcif_write_block(struct sh_mmcif_host *host)
if (host->sd_error) {
data->error = sh_mmcif_error_manage(host);
+ dev_dbg(&host->pd->dev, "%s(): %d\n", __func__, data->error);
return false;
}
@@ -682,7 +685,7 @@ static void sh_mmcif_multi_write(struct sh_mmcif_host *host,
host->sg_idx = 0;
host->sg_blkidx = 0;
host->pio_ptr = sg_virt(data->sg);
- schedule_delayed_work(&host->timeout_work, host->timeout);
+
sh_mmcif_bitset(host, MMCIF_CE_INT_MASK, MASK_MBUFWEN);
}
@@ -694,6 +697,7 @@ static bool sh_mmcif_mwrite_block(struct sh_mmcif_host *host)
if (host->sd_error) {
data->error = sh_mmcif_error_manage(host);
+ dev_dbg(&host->pd->dev, "%s(): %d\n", __func__, data->error);
return false;
}
@@ -705,7 +709,6 @@ static bool sh_mmcif_mwrite_block(struct sh_mmcif_host *host)
if (!sh_mmcif_next_block(host, p))
return false;
- schedule_delayed_work(&host->timeout_work, host->timeout);
sh_mmcif_bitset(host, MMCIF_CE_INT_MASK, MASK_MBUFWEN);
return true;
@@ -756,6 +759,7 @@ static u32 sh_mmcif_set_cmd(struct sh_mmcif_host *host,
}
switch (opc) {
/* RBSY */
+ case MMC_SLEEP_AWAKE:
case MMC_SWITCH:
case MMC_STOP_TRANSMISSION:
case MMC_SET_WRITE_PROT:
@@ -781,6 +785,17 @@ static u32 sh_mmcif_set_cmd(struct sh_mmcif_host *host,
dev_err(&host->pd->dev, "Unsupported bus width.\n");
break;
}
+ switch (host->timing) {
+ case MMC_TIMING_UHS_DDR50:
+ /*
+ * MMC core will only set this timing, if the host
+ * advertises the MMC_CAP_UHS_DDR50 capability. MMCIF
+ * implementations with this capability, e.g. sh73a0,
+ * will have to set it in their platform data.
+ */
+ tmp |= CMD_SET_DARS;
+ break;
+ }
}
/* DWEN */
if (opc == MMC_WRITE_BLOCK || opc == MMC_WRITE_MULTIPLE_BLOCK)
@@ -824,7 +839,7 @@ static int sh_mmcif_data_trans(struct sh_mmcif_host *host,
sh_mmcif_single_read(host, mrq);
return 0;
default:
- dev_err(&host->pd->dev, "UNSUPPORTED CMD = d'%08d\n", opc);
+ dev_err(&host->pd->dev, "Unsupported CMD%d\n", opc);
return -EINVAL;
}
}
@@ -838,6 +853,7 @@ static void sh_mmcif_start_cmd(struct sh_mmcif_host *host,
switch (opc) {
/* response busy check */
+ case MMC_SLEEP_AWAKE:
case MMC_SWITCH:
case MMC_STOP_TRANSMISSION:
case MMC_SET_WRITE_PROT:
@@ -885,7 +901,6 @@ static void sh_mmcif_stop_cmd(struct sh_mmcif_host *host,
}
host->wait_for = MMCIF_WAIT_FOR_STOP;
- schedule_delayed_work(&host->timeout_work, host->timeout);
}
static void sh_mmcif_request(struct mmc_host *mmc, struct mmc_request *mrq)
@@ -895,6 +910,7 @@ static void sh_mmcif_request(struct mmc_host *mmc, struct mmc_request *mrq)
spin_lock_irqsave(&host->lock, flags);
if (host->state != STATE_IDLE) {
+ dev_dbg(&host->pd->dev, "%s() rejected, state %u\n", __func__, host->state);
spin_unlock_irqrestore(&host->lock, flags);
mrq->cmd->error = -EAGAIN;
mmc_request_done(mmc, mrq);
@@ -911,6 +927,7 @@ static void sh_mmcif_request(struct mmc_host *mmc, struct mmc_request *mrq)
if ((mrq->cmd->flags & MMC_CMD_MASK) != MMC_CMD_BCR)
break;
case MMC_APP_CMD:
+ case SD_IO_RW_DIRECT:
host->state = STATE_IDLE;
mrq->cmd->error = -ETIMEDOUT;
mmc_request_done(mmc, mrq);
@@ -957,6 +974,7 @@ static void sh_mmcif_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
spin_lock_irqsave(&host->lock, flags);
if (host->state != STATE_IDLE) {
+ dev_dbg(&host->pd->dev, "%s() rejected, state %u\n", __func__, host->state);
spin_unlock_irqrestore(&host->lock, flags);
return;
}
@@ -981,7 +999,7 @@ static void sh_mmcif_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
}
}
if (host->power) {
- pm_runtime_put(&host->pd->dev);
+ pm_runtime_put_sync(&host->pd->dev);
clk_disable(host->hclk);
host->power = false;
if (ios->power_mode == MMC_POWER_OFF)
@@ -1001,6 +1019,7 @@ static void sh_mmcif_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
sh_mmcif_clock_control(host, ios->clock);
}
+ host->timing = ios->timing;
host->bus_width = ios->bus_width;
host->state = STATE_IDLE;
}
@@ -1038,14 +1057,14 @@ static bool sh_mmcif_end_cmd(struct sh_mmcif_host *host)
case MMC_SELECT_CARD:
case MMC_APP_CMD:
cmd->error = -ETIMEDOUT;
- host->sd_error = false;
break;
default:
cmd->error = sh_mmcif_error_manage(host);
- dev_dbg(&host->pd->dev, "Cmd(d'%d) error %d\n",
- cmd->opcode, cmd->error);
break;
}
+ dev_dbg(&host->pd->dev, "CMD%d error %d\n",
+ cmd->opcode, cmd->error);
+ host->sd_error = false;
return false;
}
if (!(cmd->flags & MMC_RSP_PRESENT)) {
@@ -1058,6 +1077,12 @@ static bool sh_mmcif_end_cmd(struct sh_mmcif_host *host)
if (!data)
return false;
+ /*
+ * Completion can be signalled from DMA callback and error, so, have to
+ * reset here, before setting .dma_active
+ */
+ init_completion(&host->dma_complete);
+
if (data->flags & MMC_DATA_READ) {
if (host->chan_rx)
sh_mmcif_start_dma_rx(host);
@@ -1068,34 +1093,47 @@ static bool sh_mmcif_end_cmd(struct sh_mmcif_host *host)
if (!host->dma_active) {
data->error = sh_mmcif_data_trans(host, host->mrq, cmd->opcode);
- if (!data->error)
- return true;
- return false;
+ return !data->error;
}
/* Running in the IRQ thread, can sleep */
time = wait_for_completion_interruptible_timeout(&host->dma_complete,
host->timeout);
+
+ if (data->flags & MMC_DATA_READ)
+ dma_unmap_sg(host->chan_rx->device->dev,
+ data->sg, data->sg_len,
+ DMA_FROM_DEVICE);
+ else
+ dma_unmap_sg(host->chan_tx->device->dev,
+ data->sg, data->sg_len,
+ DMA_TO_DEVICE);
+
if (host->sd_error) {
dev_err(host->mmc->parent,
"Error IRQ while waiting for DMA completion!\n");
/* Woken up by an error IRQ: abort DMA */
- if (data->flags & MMC_DATA_READ)
- dmaengine_terminate_all(host->chan_rx);
- else
- dmaengine_terminate_all(host->chan_tx);
data->error = sh_mmcif_error_manage(host);
} else if (!time) {
+ dev_err(host->mmc->parent, "DMA timeout!\n");
data->error = -ETIMEDOUT;
} else if (time < 0) {
+ dev_err(host->mmc->parent,
+ "wait_for_completion_...() error %ld!\n", time);
data->error = time;
}
sh_mmcif_bitclr(host, MMCIF_CE_BUF_ACC,
BUF_ACC_DMAREN | BUF_ACC_DMAWEN);
host->dma_active = false;
- if (data->error)
+ if (data->error) {
data->bytes_xfered = 0;
+ /* Abort DMA */
+ if (data->flags & MMC_DATA_READ)
+ dmaengine_terminate_all(host->chan_rx);
+ else
+ dmaengine_terminate_all(host->chan_tx);
+ }
return false;
}
@@ -1103,10 +1141,21 @@ static bool sh_mmcif_end_cmd(struct sh_mmcif_host *host)
static irqreturn_t sh_mmcif_irqt(int irq, void *dev_id)
{
struct sh_mmcif_host *host = dev_id;
- struct mmc_request *mrq = host->mrq;
+ struct mmc_request *mrq;
+ bool wait = false;
cancel_delayed_work_sync(&host->timeout_work);
+ mutex_lock(&host->thread_lock);
+
+ mrq = host->mrq;
+ if (!mrq) {
+ dev_dbg(&host->pd->dev, "IRQ thread state %u, wait %u: NULL mrq!\n",
+ host->state, host->wait_for);
+ mutex_unlock(&host->thread_lock);
+ return IRQ_HANDLED;
+ }
+
/*
* All handlers return true, if processing continues, and false, if the
* request has to be completed - successfully or not
@@ -1114,35 +1163,32 @@ static irqreturn_t sh_mmcif_irqt(int irq, void *dev_id)
switch (host->wait_for) {
case MMCIF_WAIT_FOR_REQUEST:
/* We're too late, the timeout has already kicked in */
+ mutex_unlock(&host->thread_lock);
return IRQ_HANDLED;
case MMCIF_WAIT_FOR_CMD:
- if (sh_mmcif_end_cmd(host))
- /* Wait for data */
- return IRQ_HANDLED;
+ /* Wait for data? */
+ wait = sh_mmcif_end_cmd(host);
break;
case MMCIF_WAIT_FOR_MREAD:
- if (sh_mmcif_mread_block(host))
- /* Wait for more data */
- return IRQ_HANDLED;
+ /* Wait for more data? */
+ wait = sh_mmcif_mread_block(host);
break;
case MMCIF_WAIT_FOR_READ:
- if (sh_mmcif_read_block(host))
- /* Wait for data end */
- return IRQ_HANDLED;
+ /* Wait for data end? */
+ wait = sh_mmcif_read_block(host);
break;
case MMCIF_WAIT_FOR_MWRITE:
- if (sh_mmcif_mwrite_block(host))
- /* Wait data to write */
- return IRQ_HANDLED;
+ /* Wait data to write? */
+ wait = sh_mmcif_mwrite_block(host);
break;
case MMCIF_WAIT_FOR_WRITE:
- if (sh_mmcif_write_block(host))
- /* Wait for data end */
- return IRQ_HANDLED;
+ /* Wait for data end? */
+ wait = sh_mmcif_write_block(host);
break;
case MMCIF_WAIT_FOR_STOP:
if (host->sd_error) {
mrq->stop->error = sh_mmcif_error_manage(host);
+ dev_dbg(&host->pd->dev, "%s(): %d\n", __func__, mrq->stop->error);
break;
}
sh_mmcif_get_cmd12response(host, mrq->stop);
@@ -1150,13 +1196,22 @@ static irqreturn_t sh_mmcif_irqt(int irq, void *dev_id)
break;
case MMCIF_WAIT_FOR_READ_END:
case MMCIF_WAIT_FOR_WRITE_END:
- if (host->sd_error)
+ if (host->sd_error) {
mrq->data->error = sh_mmcif_error_manage(host);
+ dev_dbg(&host->pd->dev, "%s(): %d\n", __func__, mrq->data->error);
+ }
break;
default:
BUG();
}
+ if (wait) {
+ schedule_delayed_work(&host->timeout_work, host->timeout);
+ /* Wait for more data */
+ mutex_unlock(&host->thread_lock);
+ return IRQ_HANDLED;
+ }
+
if (host->wait_for != MMCIF_WAIT_FOR_STOP) {
struct mmc_data *data = mrq->data;
if (!mrq->cmd->error && data && !data->error)
@@ -1165,8 +1220,11 @@ static irqreturn_t sh_mmcif_irqt(int irq, void *dev_id)
if (mrq->stop && !mrq->cmd->error && (!data || !data->error)) {
sh_mmcif_stop_cmd(host, mrq);
- if (!mrq->stop->error)
+ if (!mrq->stop->error) {
+ schedule_delayed_work(&host->timeout_work, host->timeout);
+ mutex_unlock(&host->thread_lock);
return IRQ_HANDLED;
+ }
}
}
@@ -1175,6 +1233,8 @@ static irqreturn_t sh_mmcif_irqt(int irq, void *dev_id)
host->mrq = NULL;
mmc_request_done(host->mmc, mrq);
+ mutex_unlock(&host->thread_lock);
+
return IRQ_HANDLED;
}
@@ -1182,56 +1242,22 @@ static irqreturn_t sh_mmcif_intr(int irq, void *dev_id)
{
struct sh_mmcif_host *host = dev_id;
u32 state;
- int err = 0;
state = sh_mmcif_readl(host->addr, MMCIF_CE_INT);
+ sh_mmcif_writel(host->addr, MMCIF_CE_INT, ~state);
+ sh_mmcif_bitclr(host, MMCIF_CE_INT_MASK, state & MASK_CLEAN);
- if (state & INT_ERR_STS) {
- /* error interrupts - process first */
- sh_mmcif_writel(host->addr, MMCIF_CE_INT, ~state);
- sh_mmcif_bitclr(host, MMCIF_CE_INT_MASK, state);
- err = 1;
- } else if (state & INT_RBSYE) {
- sh_mmcif_writel(host->addr, MMCIF_CE_INT,
- ~(INT_RBSYE | INT_CRSPE));
- sh_mmcif_bitclr(host, MMCIF_CE_INT_MASK, MASK_MRBSYE);
- } else if (state & INT_CRSPE) {
- sh_mmcif_writel(host->addr, MMCIF_CE_INT, ~INT_CRSPE);
- sh_mmcif_bitclr(host, MMCIF_CE_INT_MASK, MASK_MCRSPE);
- } else if (state & INT_BUFREN) {
- sh_mmcif_writel(host->addr, MMCIF_CE_INT, ~INT_BUFREN);
- sh_mmcif_bitclr(host, MMCIF_CE_INT_MASK, MASK_MBUFREN);
- } else if (state & INT_BUFWEN) {
- sh_mmcif_writel(host->addr, MMCIF_CE_INT, ~INT_BUFWEN);
- sh_mmcif_bitclr(host, MMCIF_CE_INT_MASK, MASK_MBUFWEN);
- } else if (state & INT_CMD12DRE) {
- sh_mmcif_writel(host->addr, MMCIF_CE_INT,
- ~(INT_CMD12DRE | INT_CMD12RBE |
- INT_CMD12CRE | INT_BUFRE));
- sh_mmcif_bitclr(host, MMCIF_CE_INT_MASK, MASK_MCMD12DRE);
- } else if (state & INT_BUFRE) {
- sh_mmcif_writel(host->addr, MMCIF_CE_INT, ~INT_BUFRE);
- sh_mmcif_bitclr(host, MMCIF_CE_INT_MASK, MASK_MBUFRE);
- } else if (state & INT_DTRANE) {
- sh_mmcif_writel(host->addr, MMCIF_CE_INT,
- ~(INT_CMD12DRE | INT_CMD12RBE |
- INT_CMD12CRE | INT_DTRANE));
- sh_mmcif_bitclr(host, MMCIF_CE_INT_MASK, MASK_MDTRANE);
- } else if (state & INT_CMD12RBE) {
- sh_mmcif_writel(host->addr, MMCIF_CE_INT,
- ~(INT_CMD12RBE | INT_CMD12CRE));
- sh_mmcif_bitclr(host, MMCIF_CE_INT_MASK, MASK_MCMD12RBE);
- } else {
- dev_dbg(&host->pd->dev, "Unsupported interrupt: 0x%x\n", state);
- sh_mmcif_writel(host->addr, MMCIF_CE_INT, ~state);
- sh_mmcif_bitclr(host, MMCIF_CE_INT_MASK, state);
- err = 1;
- }
- if (err) {
+ if (state & ~MASK_CLEAN)
+ dev_dbg(&host->pd->dev, "IRQ state = 0x%08x incompletely cleared\n",
+ state);
+
+ if (state & INT_ERR_STS || state & ~INT_ALL) {
host->sd_error = true;
- dev_dbg(&host->pd->dev, "int err state = %08x\n", state);
+ dev_dbg(&host->pd->dev, "int err state = 0x%08x\n", state);
}
if (state & ~(INT_CMD12RBE | INT_CMD12CRE)) {
+ if (!host->mrq)
+ dev_dbg(&host->pd->dev, "NULL IRQ state = 0x%08x\n", state);
if (!host->dma_active)
return IRQ_WAKE_THREAD;
else if (host->sd_error)
@@ -1248,11 +1274,24 @@ static void mmcif_timeout_work(struct work_struct *work)
struct delayed_work *d = container_of(work, struct delayed_work, work);
struct sh_mmcif_host *host = container_of(d, struct sh_mmcif_host, timeout_work);
struct mmc_request *mrq = host->mrq;
+ unsigned long flags;
if (host->dying)
/* Don't run after mmc_remove_host() */
return;
+ dev_err(&host->pd->dev, "Timeout waiting for %u on CMD%u\n",
+ host->wait_for, mrq->cmd->opcode);
+
+ spin_lock_irqsave(&host->lock, flags);
+ if (host->state == STATE_IDLE) {
+ spin_unlock_irqrestore(&host->lock, flags);
+ return;
+ }
+
+ host->state = STATE_TIMEOUT;
+ spin_unlock_irqrestore(&host->lock, flags);
+
/*
* Handle races with cancel_delayed_work(), unless
* cancel_delayed_work_sync() is used
@@ -1306,10 +1345,11 @@ static int sh_mmcif_probe(struct platform_device *pdev)
struct sh_mmcif_plat_data *pd = pdev->dev.platform_data;
struct resource *res;
void __iomem *reg;
+ const char *name;
irq[0] = platform_get_irq(pdev, 0);
irq[1] = platform_get_irq(pdev, 1);
- if (irq[0] < 0 || irq[1] < 0) {
+ if (irq[0] < 0) {
dev_err(&pdev->dev, "Get irq error\n");
return -ENXIO;
}
@@ -1329,10 +1369,11 @@ static int sh_mmcif_probe(struct platform_device *pdev)
ret = -ENOMEM;
goto ealloch;
}
+ mmc_of_parse(mmc);
host = mmc_priv(mmc);
host->mmc = mmc;
host->addr = reg;
- host->timeout = 1000;
+ host->timeout = msecs_to_jiffies(1000);
host->pd = pdev;
@@ -1341,7 +1382,7 @@ static int sh_mmcif_probe(struct platform_device *pdev)
mmc->ops = &sh_mmcif_ops;
sh_mmcif_init_ocr(host);
- mmc->caps = MMC_CAP_MMC_HIGHSPEED;
+ mmc->caps |= MMC_CAP_MMC_HIGHSPEED | MMC_CAP_WAIT_WHILE_BUSY;
if (pd && pd->caps)
mmc->caps |= pd->caps;
mmc->max_segs = 32;
@@ -1374,15 +1415,19 @@ static int sh_mmcif_probe(struct platform_device *pdev)
sh_mmcif_sync_reset(host);
sh_mmcif_writel(host->addr, MMCIF_CE_INT_MASK, MASK_ALL);
- ret = request_threaded_irq(irq[0], sh_mmcif_intr, sh_mmcif_irqt, 0, "sh_mmc:error", host);
+ name = irq[1] < 0 ? dev_name(&pdev->dev) : "sh_mmc:error";
+ ret = request_threaded_irq(irq[0], sh_mmcif_intr, sh_mmcif_irqt, 0, name, host);
if (ret) {
- dev_err(&pdev->dev, "request_irq error (sh_mmc:error)\n");
+ dev_err(&pdev->dev, "request_irq error (%s)\n", name);
goto ereqirq0;
}
- ret = request_threaded_irq(irq[1], sh_mmcif_intr, sh_mmcif_irqt, 0, "sh_mmc:int", host);
- if (ret) {
- dev_err(&pdev->dev, "request_irq error (sh_mmc:int)\n");
- goto ereqirq1;
+ if (irq[1] >= 0) {
+ ret = request_threaded_irq(irq[1], sh_mmcif_intr, sh_mmcif_irqt,
+ 0, "sh_mmc:int", host);
+ if (ret) {
+ dev_err(&pdev->dev, "request_irq error (sh_mmc:int)\n");
+ goto ereqirq1;
+ }
}
if (pd && pd->use_cd_gpio) {
@@ -1391,6 +1436,8 @@ static int sh_mmcif_probe(struct platform_device *pdev)
goto erqcd;
}
+ mutex_init(&host->thread_lock);
+
clk_disable(host->hclk);
ret = mmc_add_host(mmc);
if (ret < 0)
@@ -1404,10 +1451,9 @@ static int sh_mmcif_probe(struct platform_device *pdev)
return ret;
emmcaddh:
- if (pd && pd->use_cd_gpio)
- mmc_gpio_free_cd(mmc);
erqcd:
- free_irq(irq[1], host);
+ if (irq[1] >= 0)
+ free_irq(irq[1], host);
ereqirq1:
free_irq(irq[0], host);
ereqirq0:
@@ -1427,7 +1473,6 @@ ealloch:
static int sh_mmcif_remove(struct platform_device *pdev)
{
struct sh_mmcif_host *host = platform_get_drvdata(pdev);
- struct sh_mmcif_plat_data *pd = pdev->dev.platform_data;
int irq[2];
host->dying = true;
@@ -1436,9 +1481,6 @@ static int sh_mmcif_remove(struct platform_device *pdev)
dev_pm_qos_hide_latency_limit(&pdev->dev);
- if (pd && pd->use_cd_gpio)
- mmc_gpio_free_cd(host->mmc);
-
mmc_remove_host(host->mmc);
sh_mmcif_writel(host->addr, MMCIF_CE_INT_MASK, MASK_ALL);
@@ -1456,7 +1498,8 @@ static int sh_mmcif_remove(struct platform_device *pdev)
irq[1] = platform_get_irq(pdev, 1);
free_irq(irq[0], host);
- free_irq(irq[1], host);
+ if (irq[1] >= 0)
+ free_irq(irq[1], host);
platform_set_drvdata(pdev, NULL);
diff --git a/drivers/mmc/host/sh_mobile_sdhi.c b/drivers/mmc/host/sh_mobile_sdhi.c
index 524a7f773820..fe90853900b4 100644
--- a/drivers/mmc/host/sh_mobile_sdhi.c
+++ b/drivers/mmc/host/sh_mobile_sdhi.c
@@ -23,6 +23,7 @@
#include <linux/slab.h>
#include <linux/mod_devicetable.h>
#include <linux/module.h>
+#include <linux/of_device.h>
#include <linux/platform_device.h>
#include <linux/mmc/host.h>
#include <linux/mmc/sh_mobile_sdhi.h>
@@ -32,6 +33,16 @@
#include "tmio_mmc.h"
+struct sh_mobile_sdhi_of_data {
+ unsigned long tmio_flags;
+};
+
+static const struct sh_mobile_sdhi_of_data sh_mobile_sdhi_of_cfg[] = {
+ {
+ .tmio_flags = TMIO_MMC_HAS_IDLE_WAIT,
+ },
+};
+
struct sh_mobile_sdhi {
struct clk *clk;
struct tmio_mmc_data mmc_data;
@@ -117,8 +128,18 @@ static const struct sh_mobile_sdhi_ops sdhi_ops = {
.cd_wakeup = sh_mobile_sdhi_cd_wakeup,
};
+static const struct of_device_id sh_mobile_sdhi_of_match[] = {
+ { .compatible = "renesas,shmobile-sdhi" },
+ { .compatible = "renesas,sh7372-sdhi" },
+ { .compatible = "renesas,r8a7740-sdhi", .data = &sh_mobile_sdhi_of_cfg[0], },
+ {},
+};
+MODULE_DEVICE_TABLE(of, sh_mobile_sdhi_of_match);
+
static int sh_mobile_sdhi_probe(struct platform_device *pdev)
{
+ const struct of_device_id *of_id =
+ of_match_device(sh_mobile_sdhi_of_match, &pdev->dev);
struct sh_mobile_sdhi *priv;
struct tmio_mmc_data *mmc_data;
struct sh_mobile_sdhi_info *p = pdev->dev.platform_data;
@@ -126,7 +147,7 @@ static int sh_mobile_sdhi_probe(struct platform_device *pdev)
int irq, ret, i = 0;
bool multiplexed_isr = true;
- priv = kzalloc(sizeof(struct sh_mobile_sdhi), GFP_KERNEL);
+ priv = devm_kzalloc(&pdev->dev, sizeof(struct sh_mobile_sdhi), GFP_KERNEL);
if (priv == NULL) {
dev_err(&pdev->dev, "kzalloc failed\n");
return -ENOMEM;
@@ -135,15 +156,14 @@ static int sh_mobile_sdhi_probe(struct platform_device *pdev)
mmc_data = &priv->mmc_data;
if (p) {
- p->pdata = mmc_data;
if (p->init) {
ret = p->init(pdev, &sdhi_ops);
if (ret)
- goto einit;
+ return ret;
}
}
- priv->clk = clk_get(&pdev->dev, NULL);
+ priv->clk = devm_clk_get(&pdev->dev, NULL);
if (IS_ERR(priv->clk)) {
ret = PTR_ERR(priv->clk);
dev_err(&pdev->dev, "cannot get clock: %d\n", ret);
@@ -153,10 +173,9 @@ static int sh_mobile_sdhi_probe(struct platform_device *pdev)
mmc_data->clk_enable = sh_mobile_sdhi_clk_enable;
mmc_data->clk_disable = sh_mobile_sdhi_clk_disable;
mmc_data->capabilities = MMC_CAP_MMC_HIGHSPEED;
+ mmc_data->write16_hook = sh_mobile_sdhi_write16_hook;
if (p) {
mmc_data->flags = p->tmio_flags;
- if (mmc_data->flags & TMIO_MMC_HAS_IDLE_WAIT)
- mmc_data->write16_hook = sh_mobile_sdhi_write16_hook;
mmc_data->ocr_mask = p->tmio_ocr_mask;
mmc_data->capabilities |= p->tmio_caps;
mmc_data->capabilities2 |= p->tmio_caps2;
@@ -187,6 +206,11 @@ static int sh_mobile_sdhi_probe(struct platform_device *pdev)
*/
mmc_data->flags |= TMIO_MMC_SDIO_IRQ;
+ if (of_id && of_id->data) {
+ const struct sh_mobile_sdhi_of_data *of_data = of_id->data;
+ mmc_data->flags |= of_data->tmio_flags;
+ }
+
ret = tmio_mmc_host_probe(&host, pdev, mmc_data);
if (ret < 0)
goto eprobe;
@@ -199,33 +223,33 @@ static int sh_mobile_sdhi_probe(struct platform_device *pdev)
irq = platform_get_irq_byname(pdev, SH_MOBILE_SDHI_IRQ_CARD_DETECT);
if (irq >= 0) {
multiplexed_isr = false;
- ret = request_irq(irq, tmio_mmc_card_detect_irq, 0,
+ ret = devm_request_irq(&pdev->dev, irq, tmio_mmc_card_detect_irq, 0,
dev_name(&pdev->dev), host);
if (ret)
- goto eirq_card_detect;
+ goto eirq;
}
irq = platform_get_irq_byname(pdev, SH_MOBILE_SDHI_IRQ_SDIO);
if (irq >= 0) {
multiplexed_isr = false;
- ret = request_irq(irq, tmio_mmc_sdio_irq, 0,
+ ret = devm_request_irq(&pdev->dev, irq, tmio_mmc_sdio_irq, 0,
dev_name(&pdev->dev), host);
if (ret)
- goto eirq_sdio;
+ goto eirq;
}
irq = platform_get_irq_byname(pdev, SH_MOBILE_SDHI_IRQ_SDCARD);
if (irq >= 0) {
multiplexed_isr = false;
- ret = request_irq(irq, tmio_mmc_sdcard_irq, 0,
+ ret = devm_request_irq(&pdev->dev, irq, tmio_mmc_sdcard_irq, 0,
dev_name(&pdev->dev), host);
if (ret)
- goto eirq_sdcard;
+ goto eirq;
} else if (!multiplexed_isr) {
dev_err(&pdev->dev,
"Principal SD-card IRQ is missing among named interrupts\n");
ret = irq;
- goto eirq_sdcard;
+ goto eirq;
}
if (multiplexed_isr) {
@@ -234,15 +258,15 @@ static int sh_mobile_sdhi_probe(struct platform_device *pdev)
if (irq < 0)
break;
i++;
- ret = request_irq(irq, tmio_mmc_irq, 0,
+ ret = devm_request_irq(&pdev->dev, irq, tmio_mmc_irq, 0,
dev_name(&pdev->dev), host);
if (ret)
- goto eirq_multiplexed;
+ goto eirq;
}
/* There must be at least one IRQ source */
if (!i)
- goto eirq_multiplexed;
+ goto eirq;
}
dev_info(&pdev->dev, "%s base at 0x%08lx clock rate %u MHz\n",
@@ -252,28 +276,12 @@ static int sh_mobile_sdhi_probe(struct platform_device *pdev)
return ret;
-eirq_multiplexed:
- while (i--) {
- irq = platform_get_irq(pdev, i);
- free_irq(irq, host);
- }
-eirq_sdcard:
- irq = platform_get_irq_byname(pdev, SH_MOBILE_SDHI_IRQ_SDIO);
- if (irq >= 0)
- free_irq(irq, host);
-eirq_sdio:
- irq = platform_get_irq_byname(pdev, SH_MOBILE_SDHI_IRQ_CARD_DETECT);
- if (irq >= 0)
- free_irq(irq, host);
-eirq_card_detect:
+eirq:
tmio_mmc_host_remove(host);
eprobe:
- clk_put(priv->clk);
eclkget:
if (p && p->cleanup)
p->cleanup(pdev);
-einit:
- kfree(priv);
return ret;
}
@@ -281,29 +289,13 @@ static int sh_mobile_sdhi_remove(struct platform_device *pdev)
{
struct mmc_host *mmc = platform_get_drvdata(pdev);
struct tmio_mmc_host *host = mmc_priv(mmc);
- struct sh_mobile_sdhi *priv = container_of(host->pdata, struct sh_mobile_sdhi, mmc_data);
struct sh_mobile_sdhi_info *p = pdev->dev.platform_data;
- int i = 0, irq;
-
- if (p)
- p->pdata = NULL;
tmio_mmc_host_remove(host);
- while (1) {
- irq = platform_get_irq(pdev, i++);
- if (irq < 0)
- break;
- free_irq(irq, host);
- }
-
- clk_put(priv->clk);
-
if (p && p->cleanup)
p->cleanup(pdev);
- kfree(priv);
-
return 0;
}
@@ -314,12 +306,6 @@ static const struct dev_pm_ops tmio_mmc_dev_pm_ops = {
.runtime_resume = tmio_mmc_host_runtime_resume,
};
-static const struct of_device_id sh_mobile_sdhi_of_match[] = {
- { .compatible = "renesas,shmobile-sdhi" },
- { }
-};
-MODULE_DEVICE_TABLE(of, sh_mobile_sdhi_of_match);
-
static struct platform_driver sh_mobile_sdhi_driver = {
.driver = {
.name = "sh_mobile_sdhi",
diff --git a/drivers/mmc/host/tmio_mmc_pio.c b/drivers/mmc/host/tmio_mmc_pio.c
index 50bf495a988b..f508ecb5b8a7 100644
--- a/drivers/mmc/host/tmio_mmc_pio.c
+++ b/drivers/mmc/host/tmio_mmc_pio.c
@@ -43,6 +43,7 @@
#include <linux/platform_device.h>
#include <linux/pm_qos.h>
#include <linux/pm_runtime.h>
+#include <linux/regulator/consumer.h>
#include <linux/scatterlist.h>
#include <linux/spinlock.h>
#include <linux/workqueue.h>
@@ -155,6 +156,7 @@ static void tmio_mmc_set_clock(struct tmio_mmc_host *host, int new_clock)
host->set_clk_div(host->pdev, (clk>>22) & 1);
sd_ctrl_write16(host, CTL_SD_CARD_CLK_CTL, clk & 0x1ff);
+ msleep(10);
}
static void tmio_mmc_clk_stop(struct tmio_mmc_host *host)
@@ -768,16 +770,48 @@ static int tmio_mmc_clk_update(struct mmc_host *mmc)
return ret;
}
-static void tmio_mmc_set_power(struct tmio_mmc_host *host, struct mmc_ios *ios)
+static void tmio_mmc_power_on(struct tmio_mmc_host *host, unsigned short vdd)
{
struct mmc_host *mmc = host->mmc;
+ int ret = 0;
+
+ /* .set_ios() is returning void, so, no chance to report an error */
if (host->set_pwr)
- host->set_pwr(host->pdev, ios->power_mode != MMC_POWER_OFF);
+ host->set_pwr(host->pdev, 1);
+
+ if (!IS_ERR(mmc->supply.vmmc)) {
+ ret = mmc_regulator_set_ocr(mmc, mmc->supply.vmmc, vdd);
+ /*
+ * Attention: empiric value. With a b43 WiFi SDIO card this
+ * delay proved necessary for reliable card-insertion probing.
+ * 100us were not enough. Is this the same 140us delay, as in
+ * tmio_mmc_set_ios()?
+ */
+ udelay(200);
+ }
+ /*
+ * It seems, VccQ should be switched on after Vcc, this is also what the
+ * omap_hsmmc.c driver does.
+ */
+ if (!IS_ERR(mmc->supply.vqmmc) && !ret) {
+ regulator_enable(mmc->supply.vqmmc);
+ udelay(200);
+ }
+}
+
+static void tmio_mmc_power_off(struct tmio_mmc_host *host)
+{
+ struct mmc_host *mmc = host->mmc;
+
+ if (!IS_ERR(mmc->supply.vqmmc))
+ regulator_disable(mmc->supply.vqmmc);
+
if (!IS_ERR(mmc->supply.vmmc))
- /* Errors ignored... */
- mmc_regulator_set_ocr(mmc, mmc->supply.vmmc,
- ios->power_mode ? ios->vdd : 0);
+ mmc_regulator_set_ocr(mmc, mmc->supply.vmmc, 0);
+
+ if (host->set_pwr)
+ host->set_pwr(host->pdev, 0);
}
/* Set MMC clock / power.
@@ -828,18 +862,20 @@ static void tmio_mmc_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
if (!host->power) {
tmio_mmc_clk_update(mmc);
pm_runtime_get_sync(dev);
- host->power = true;
}
tmio_mmc_set_clock(host, ios->clock);
- /* power up SD bus */
- tmio_mmc_set_power(host, ios);
+ if (!host->power) {
+ /* power up SD card and the bus */
+ tmio_mmc_power_on(host, ios->vdd);
+ host->power = true;
+ }
/* start bus clock */
tmio_mmc_clk_start(host);
} else if (ios->power_mode != MMC_POWER_UP) {
- if (ios->power_mode == MMC_POWER_OFF)
- tmio_mmc_set_power(host, ios);
if (host->power) {
struct tmio_mmc_data *pdata = host->pdata;
+ if (ios->power_mode == MMC_POWER_OFF)
+ tmio_mmc_power_off(host);
tmio_mmc_clk_stop(host);
host->power = false;
pm_runtime_put(dev);
@@ -918,6 +954,17 @@ static void tmio_mmc_init_ocr(struct tmio_mmc_host *host)
dev_warn(mmc_dev(mmc), "Platform OCR mask is ignored\n");
}
+static void tmio_mmc_of_parse(struct platform_device *pdev,
+ struct tmio_mmc_data *pdata)
+{
+ const struct device_node *np = pdev->dev.of_node;
+ if (!np)
+ return;
+
+ if (of_get_property(np, "toshiba,mmc-wrprotect-disable", NULL))
+ pdata->flags |= TMIO_MMC_WRPROTECT_DISABLE;
+}
+
int tmio_mmc_host_probe(struct tmio_mmc_host **host,
struct platform_device *pdev,
struct tmio_mmc_data *pdata)
@@ -928,6 +975,11 @@ int tmio_mmc_host_probe(struct tmio_mmc_host **host,
int ret;
u32 irq_mask = TMIO_MASK_CMD;
+ tmio_mmc_of_parse(pdev, pdata);
+
+ if (!(pdata->flags & TMIO_MMC_HAS_IDLE_WAIT))
+ pdata->write16_hook = NULL;
+
res_ctl = platform_get_resource(pdev, IORESOURCE_MEM, 0);
if (!res_ctl)
return -EINVAL;
@@ -936,6 +988,8 @@ int tmio_mmc_host_probe(struct tmio_mmc_host **host,
if (!mmc)
return -ENOMEM;
+ mmc_of_parse(mmc);
+
pdata->dev = &pdev->dev;
_host = mmc_priv(mmc);
_host->pdata = pdata;
@@ -956,7 +1010,7 @@ int tmio_mmc_host_probe(struct tmio_mmc_host **host,
}
mmc->ops = &tmio_mmc_ops;
- mmc->caps = MMC_CAP_4_BIT_DATA | pdata->capabilities;
+ mmc->caps |= MMC_CAP_4_BIT_DATA | pdata->capabilities;
mmc->caps2 = pdata->capabilities2;
mmc->max_segs = 32;
mmc->max_blk_size = 512;
@@ -968,7 +1022,8 @@ int tmio_mmc_host_probe(struct tmio_mmc_host **host,
_host->native_hotplug = !(pdata->flags & TMIO_MMC_USE_GPIO_CD ||
mmc->caps & MMC_CAP_NEEDS_POLL ||
- mmc->caps & MMC_CAP_NONREMOVABLE);
+ mmc->caps & MMC_CAP_NONREMOVABLE ||
+ mmc->slot.cd_irq >= 0);
_host->power = false;
pm_runtime_enable(&pdev->dev);
@@ -1060,16 +1115,8 @@ EXPORT_SYMBOL(tmio_mmc_host_probe);
void tmio_mmc_host_remove(struct tmio_mmc_host *host)
{
struct platform_device *pdev = host->pdev;
- struct tmio_mmc_data *pdata = host->pdata;
struct mmc_host *mmc = host->mmc;
- if (pdata->flags & TMIO_MMC_USE_GPIO_CD)
- /*
- * This means we can miss a card-eject, but this is anyway
- * possible, because of delayed processing of hotplug events.
- */
- mmc_gpio_free_cd(mmc);
-
if (!host->native_hotplug)
pm_runtime_get_sync(&pdev->dev);
diff --git a/drivers/mmc/host/wmt-sdmmc.c b/drivers/mmc/host/wmt-sdmmc.c
index 154f0e8e931c..c6d001509e5a 100644
--- a/drivers/mmc/host/wmt-sdmmc.c
+++ b/drivers/mmc/host/wmt-sdmmc.c
@@ -1012,7 +1012,7 @@ static const struct dev_pm_ops wmt_mci_pm = {
static struct platform_driver wmt_mci_driver = {
.probe = wmt_mci_probe,
- .remove = __exit_p(wmt_mci_remove),
+ .remove = wmt_mci_remove,
.driver = {
.name = DRIVER_NAME,
.owner = THIS_MODULE,
diff --git a/drivers/mtd/Kconfig b/drivers/mtd/Kconfig
index 03f2eb5627ec..557bec599f4f 100644
--- a/drivers/mtd/Kconfig
+++ b/drivers/mtd/Kconfig
@@ -74,8 +74,8 @@ config MTD_REDBOOT_PARTS_READONLY
endif # MTD_REDBOOT_PARTS
config MTD_CMDLINE_PARTS
- bool "Command line partition table parsing"
- depends on MTD = "y"
+ tristate "Command line partition table parsing"
+ depends on MTD
---help---
Allow generic configuration of the MTD partition tables via the kernel
command line. Multiple flash resources are supported for hardware where
diff --git a/drivers/mtd/ar7part.c b/drivers/mtd/ar7part.c
index 7c057a05adb6..ddc0a4287a4b 100644
--- a/drivers/mtd/ar7part.c
+++ b/drivers/mtd/ar7part.c
@@ -142,7 +142,13 @@ static int __init ar7_parser_init(void)
return register_mtd_parser(&ar7_parser);
}
+static void __exit ar7_parser_exit(void)
+{
+ deregister_mtd_parser(&ar7_parser);
+}
+
module_init(ar7_parser_init);
+module_exit(ar7_parser_exit);
MODULE_LICENSE("GPL");
MODULE_AUTHOR( "Felix Fietkau <nbd@openwrt.org>, "
diff --git a/drivers/mtd/bcm47xxpart.c b/drivers/mtd/bcm47xxpart.c
index e06d782489a6..63feb75cc8e0 100644
--- a/drivers/mtd/bcm47xxpart.c
+++ b/drivers/mtd/bcm47xxpart.c
@@ -14,17 +14,11 @@
#include <linux/slab.h>
#include <linux/mtd/mtd.h>
#include <linux/mtd/partitions.h>
-#include <asm/mach-bcm47xx/nvram.h>
+#include <bcm47xx_nvram.h>
/* 10 parts were found on sflash on Netgear WNDR4500 */
#define BCM47XXPART_MAX_PARTS 12
-/*
- * Amount of bytes we read when analyzing each block of flash memory.
- * Set it big enough to allow detecting partition and reading important data.
- */
-#define BCM47XXPART_BYTES_TO_READ 0x404
-
/* Magics */
#define BOARD_DATA_MAGIC 0x5246504D /* MPFR */
#define POT_MAGIC1 0x54544f50 /* POTT */
@@ -59,13 +53,21 @@ static int bcm47xxpart_parse(struct mtd_info *master,
uint32_t *buf;
size_t bytes_read;
uint32_t offset;
- uint32_t blocksize = 0x10000;
+ uint32_t blocksize = master->erasesize;
struct trx_header *trx;
+ int trx_part = -1;
+ int last_trx_part = -1;
+ int max_bytes_to_read = 0x8004;
+
+ if (blocksize <= 0x10000)
+ blocksize = 0x10000;
+ if (blocksize == 0x20000)
+ max_bytes_to_read = 0x18004;
/* Alloc */
parts = kzalloc(sizeof(struct mtd_partition) * BCM47XXPART_MAX_PARTS,
GFP_KERNEL);
- buf = kzalloc(BCM47XXPART_BYTES_TO_READ, GFP_KERNEL);
+ buf = kzalloc(max_bytes_to_read, GFP_KERNEL);
/* Parse block by block looking for magics */
for (offset = 0; offset <= master->size - blocksize;
@@ -80,7 +82,7 @@ static int bcm47xxpart_parse(struct mtd_info *master,
}
/* Read beginning of the block */
- if (mtd_read(master, offset, BCM47XXPART_BYTES_TO_READ,
+ if (mtd_read(master, offset, max_bytes_to_read,
&bytes_read, (uint8_t *)buf) < 0) {
pr_err("mtd_read error while parsing (offset: 0x%X)!\n",
offset);
@@ -95,9 +97,16 @@ static int bcm47xxpart_parse(struct mtd_info *master,
}
/* Standard NVRAM */
- if (buf[0x000 / 4] == NVRAM_HEADER) {
+ if (buf[0x000 / 4] == NVRAM_HEADER ||
+ buf[0x1000 / 4] == NVRAM_HEADER ||
+ buf[0x8000 / 4] == NVRAM_HEADER ||
+ (blocksize == 0x20000 && (
+ buf[0x10000 / 4] == NVRAM_HEADER ||
+ buf[0x11000 / 4] == NVRAM_HEADER ||
+ buf[0x18000 / 4] == NVRAM_HEADER))) {
bcm47xxpart_add_part(&parts[curr_part++], "nvram",
offset, 0);
+ offset = rounddown(offset, blocksize);
continue;
}
@@ -131,6 +140,10 @@ static int bcm47xxpart_parse(struct mtd_info *master,
if (buf[0x000 / 4] == TRX_MAGIC) {
trx = (struct trx_header *)buf;
+ trx_part = curr_part;
+ bcm47xxpart_add_part(&parts[curr_part++], "firmware",
+ offset, 0);
+
i = 0;
/* We have LZMA loader if offset[2] points to sth */
if (trx->offset[2]) {
@@ -154,6 +167,8 @@ static int bcm47xxpart_parse(struct mtd_info *master,
offset + trx->offset[i], 0);
i++;
+ last_trx_part = curr_part - 1;
+
/*
* We have whole TRX scanned, skip to the next part. Use
* roundown (not roundup), as the loop will increase
@@ -169,11 +184,15 @@ static int bcm47xxpart_parse(struct mtd_info *master,
* Assume that partitions end at the beginning of the one they are
* followed by.
*/
- for (i = 0; i < curr_part - 1; i++)
- parts[i].size = parts[i + 1].offset - parts[i].offset;
- if (curr_part > 0)
- parts[curr_part - 1].size =
- master->size - parts[curr_part - 1].offset;
+ for (i = 0; i < curr_part; i++) {
+ u64 next_part_offset = (i < curr_part - 1) ?
+ parts[i + 1].offset : master->size;
+
+ parts[i].size = next_part_offset - parts[i].offset;
+ if (i == last_trx_part && trx_part >= 0)
+ parts[trx_part].size = next_part_offset -
+ parts[trx_part].offset;
+ }
*pparts = parts;
return curr_part;
diff --git a/drivers/mtd/chips/cfi_cmdset_0002.c b/drivers/mtd/chips/cfi_cmdset_0002.c
index b86197286f24..fff665d59a0d 100644
--- a/drivers/mtd/chips/cfi_cmdset_0002.c
+++ b/drivers/mtd/chips/cfi_cmdset_0002.c
@@ -33,6 +33,8 @@
#include <linux/delay.h>
#include <linux/interrupt.h>
#include <linux/reboot.h>
+#include <linux/of.h>
+#include <linux/of_platform.h>
#include <linux/mtd/map.h>
#include <linux/mtd/mtd.h>
#include <linux/mtd/cfi.h>
@@ -74,6 +76,10 @@ static void put_chip(struct map_info *map, struct flchip *chip, unsigned long ad
static int cfi_atmel_lock(struct mtd_info *mtd, loff_t ofs, uint64_t len);
static int cfi_atmel_unlock(struct mtd_info *mtd, loff_t ofs, uint64_t len);
+static int cfi_ppb_lock(struct mtd_info *mtd, loff_t ofs, uint64_t len);
+static int cfi_ppb_unlock(struct mtd_info *mtd, loff_t ofs, uint64_t len);
+static int cfi_ppb_is_locked(struct mtd_info *mtd, loff_t ofs, uint64_t len);
+
static struct mtd_chip_driver cfi_amdstd_chipdrv = {
.probe = NULL, /* Not usable directly */
.destroy = cfi_amdstd_destroy,
@@ -496,6 +502,7 @@ static void cfi_fixup_m29ew_delay_after_resume(struct cfi_private *cfi)
struct mtd_info *cfi_cmdset_0002(struct map_info *map, int primary)
{
struct cfi_private *cfi = map->fldrv_priv;
+ struct device_node __maybe_unused *np = map->device_node;
struct mtd_info *mtd;
int i;
@@ -570,6 +577,17 @@ struct mtd_info *cfi_cmdset_0002(struct map_info *map, int primary)
cfi_tell_features(extp);
#endif
+#ifdef CONFIG_OF
+ if (np && of_property_read_bool(
+ np, "use-advanced-sector-protection")
+ && extp->BlkProtUnprot == 8) {
+ printk(KERN_INFO " Advanced Sector Protection (PPB Locking) supported\n");
+ mtd->_lock = cfi_ppb_lock;
+ mtd->_unlock = cfi_ppb_unlock;
+ mtd->_is_locked = cfi_ppb_is_locked;
+ }
+#endif
+
bootloc = extp->TopBottom;
if ((bootloc < 2) || (bootloc > 5)) {
printk(KERN_WARNING "%s: CFI contains unrecognised boot "
@@ -2172,6 +2190,205 @@ static int cfi_atmel_unlock(struct mtd_info *mtd, loff_t ofs, uint64_t len)
return cfi_varsize_frob(mtd, do_atmel_unlock, ofs, len, NULL);
}
+/*
+ * Advanced Sector Protection - PPB (Persistent Protection Bit) locking
+ */
+
+struct ppb_lock {
+ struct flchip *chip;
+ loff_t offset;
+ int locked;
+};
+
+#define MAX_SECTORS 512
+
+#define DO_XXLOCK_ONEBLOCK_LOCK ((void *)1)
+#define DO_XXLOCK_ONEBLOCK_UNLOCK ((void *)2)
+#define DO_XXLOCK_ONEBLOCK_GETLOCK ((void *)3)
+
+static int __maybe_unused do_ppb_xxlock(struct map_info *map,
+ struct flchip *chip,
+ unsigned long adr, int len, void *thunk)
+{
+ struct cfi_private *cfi = map->fldrv_priv;
+ unsigned long timeo;
+ int ret;
+
+ mutex_lock(&chip->mutex);
+ ret = get_chip(map, chip, adr + chip->start, FL_LOCKING);
+ if (ret) {
+ mutex_unlock(&chip->mutex);
+ return ret;
+ }
+
+ pr_debug("MTD %s(): XXLOCK 0x%08lx len %d\n", __func__, adr, len);
+
+ cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi,
+ cfi->device_type, NULL);
+ cfi_send_gen_cmd(0x55, cfi->addr_unlock2, chip->start, map, cfi,
+ cfi->device_type, NULL);
+ /* PPB entry command */
+ cfi_send_gen_cmd(0xC0, cfi->addr_unlock1, chip->start, map, cfi,
+ cfi->device_type, NULL);
+
+ if (thunk == DO_XXLOCK_ONEBLOCK_LOCK) {
+ chip->state = FL_LOCKING;
+ map_write(map, CMD(0xA0), chip->start + adr);
+ map_write(map, CMD(0x00), chip->start + adr);
+ } else if (thunk == DO_XXLOCK_ONEBLOCK_UNLOCK) {
+ /*
+ * Unlocking of one specific sector is not supported, so we
+ * have to unlock all sectors of this device instead
+ */
+ chip->state = FL_UNLOCKING;
+ map_write(map, CMD(0x80), chip->start);
+ map_write(map, CMD(0x30), chip->start);
+ } else if (thunk == DO_XXLOCK_ONEBLOCK_GETLOCK) {
+ chip->state = FL_JEDEC_QUERY;
+ /* Return locked status: 0->locked, 1->unlocked */
+ ret = !cfi_read_query(map, adr);
+ } else
+ BUG();
+
+ /*
+ * Wait for some time as unlocking of all sectors takes quite long
+ */
+ timeo = jiffies + msecs_to_jiffies(2000); /* 2s max (un)locking */
+ for (;;) {
+ if (chip_ready(map, adr))
+ break;
+
+ if (time_after(jiffies, timeo)) {
+ printk(KERN_ERR "Waiting for chip to be ready timed out.\n");
+ ret = -EIO;
+ break;
+ }
+
+ UDELAY(map, chip, adr, 1);
+ }
+
+ /* Exit BC commands */
+ map_write(map, CMD(0x90), chip->start);
+ map_write(map, CMD(0x00), chip->start);
+
+ chip->state = FL_READY;
+ put_chip(map, chip, adr + chip->start);
+ mutex_unlock(&chip->mutex);
+
+ return ret;
+}
+
+static int __maybe_unused cfi_ppb_lock(struct mtd_info *mtd, loff_t ofs,
+ uint64_t len)
+{
+ return cfi_varsize_frob(mtd, do_ppb_xxlock, ofs, len,
+ DO_XXLOCK_ONEBLOCK_LOCK);
+}
+
+static int __maybe_unused cfi_ppb_unlock(struct mtd_info *mtd, loff_t ofs,
+ uint64_t len)
+{
+ struct mtd_erase_region_info *regions = mtd->eraseregions;
+ struct map_info *map = mtd->priv;
+ struct cfi_private *cfi = map->fldrv_priv;
+ struct ppb_lock *sect;
+ unsigned long adr;
+ loff_t offset;
+ uint64_t length;
+ int chipnum;
+ int i;
+ int sectors;
+ int ret;
+
+ /*
+ * PPB unlocking always unlocks all sectors of the flash chip.
+ * We need to re-lock all previously locked sectors. So lets
+ * first check the locking status of all sectors and save
+ * it for future use.
+ */
+ sect = kzalloc(MAX_SECTORS * sizeof(struct ppb_lock), GFP_KERNEL);
+ if (!sect)
+ return -ENOMEM;
+
+ /*
+ * This code to walk all sectors is a slightly modified version
+ * of the cfi_varsize_frob() code.
+ */
+ i = 0;
+ chipnum = 0;
+ adr = 0;
+ sectors = 0;
+ offset = 0;
+ length = mtd->size;
+
+ while (length) {
+ int size = regions[i].erasesize;
+
+ /*
+ * Only test sectors that shall not be unlocked. The other
+ * sectors shall be unlocked, so lets keep their locking
+ * status at "unlocked" (locked=0) for the final re-locking.
+ */
+ if ((adr < ofs) || (adr >= (ofs + len))) {
+ sect[sectors].chip = &cfi->chips[chipnum];
+ sect[sectors].offset = offset;
+ sect[sectors].locked = do_ppb_xxlock(
+ map, &cfi->chips[chipnum], adr, 0,
+ DO_XXLOCK_ONEBLOCK_GETLOCK);
+ }
+
+ adr += size;
+ offset += size;
+ length -= size;
+
+ if (offset == regions[i].offset + size * regions[i].numblocks)
+ i++;
+
+ if (adr >> cfi->chipshift) {
+ adr = 0;
+ chipnum++;
+
+ if (chipnum >= cfi->numchips)
+ break;
+ }
+
+ sectors++;
+ if (sectors >= MAX_SECTORS) {
+ printk(KERN_ERR "Only %d sectors for PPB locking supported!\n",
+ MAX_SECTORS);
+ kfree(sect);
+ return -EINVAL;
+ }
+ }
+
+ /* Now unlock the whole chip */
+ ret = cfi_varsize_frob(mtd, do_ppb_xxlock, ofs, len,
+ DO_XXLOCK_ONEBLOCK_UNLOCK);
+ if (ret) {
+ kfree(sect);
+ return ret;
+ }
+
+ /*
+ * PPB unlocking always unlocks all sectors of the flash chip.
+ * We need to re-lock all previously locked sectors.
+ */
+ for (i = 0; i < sectors; i++) {
+ if (sect[i].locked)
+ do_ppb_xxlock(map, sect[i].chip, sect[i].offset, 0,
+ DO_XXLOCK_ONEBLOCK_LOCK);
+ }
+
+ kfree(sect);
+ return ret;
+}
+
+static int __maybe_unused cfi_ppb_is_locked(struct mtd_info *mtd, loff_t ofs,
+ uint64_t len)
+{
+ return cfi_varsize_frob(mtd, do_ppb_xxlock, ofs, len,
+ DO_XXLOCK_ONEBLOCK_GETLOCK) ? 1 : 0;
+}
static void cfi_amdstd_sync (struct mtd_info *mtd)
{
diff --git a/drivers/mtd/cmdlinepart.c b/drivers/mtd/cmdlinepart.c
index c533f27d863f..721caebbc5cc 100644
--- a/drivers/mtd/cmdlinepart.c
+++ b/drivers/mtd/cmdlinepart.c
@@ -22,11 +22,22 @@
*
* mtdparts=<mtddef>[;<mtddef]
* <mtddef> := <mtd-id>:<partdef>[,<partdef>]
- * where <mtd-id> is the name from the "cat /proc/mtd" command
- * <partdef> := <size>[@offset][<name>][ro][lk]
+ * <partdef> := <size>[@<offset>][<name>][ro][lk]
* <mtd-id> := unique name used in mapping driver/device (mtd->name)
* <size> := standard linux memsize OR "-" to denote all remaining space
+ * size is automatically truncated at end of device
+ * if specified or trucated size is 0 the part is skipped
+ * <offset> := standard linux memsize
+ * if omitted the part will immediately follow the previous part
+ * or 0 if the first part
* <name> := '(' NAME ')'
+ * NAME will appear in /proc/mtd
+ *
+ * <size> and <offset> can be specified such that the parts are out of order
+ * in physical memory and may even overlap.
+ *
+ * The parts are assigned MTD numbers in the order they are specified in the
+ * command line regardless of their order in physical memory.
*
* Examples:
*
@@ -70,6 +81,7 @@ struct cmdline_mtd_partition {
static struct cmdline_mtd_partition *partitions;
/* the command line passed to mtdpart_setup() */
+static char *mtdparts;
static char *cmdline;
static int cmdline_parsed;
@@ -330,6 +342,14 @@ static int parse_cmdline_partitions(struct mtd_info *master,
if (part->parts[i].size == SIZE_REMAINING)
part->parts[i].size = master->size - offset;
+ if (offset + part->parts[i].size > master->size) {
+ printk(KERN_WARNING ERRP
+ "%s: partitioning exceeds flash size, truncating\n",
+ part->mtd_id);
+ part->parts[i].size = master->size - offset;
+ }
+ offset += part->parts[i].size;
+
if (part->parts[i].size == 0) {
printk(KERN_WARNING ERRP
"%s: skipping zero sized partition\n",
@@ -337,16 +357,8 @@ static int parse_cmdline_partitions(struct mtd_info *master,
part->num_parts--;
memmove(&part->parts[i], &part->parts[i + 1],
sizeof(*part->parts) * (part->num_parts - i));
- continue;
- }
-
- if (offset + part->parts[i].size > master->size) {
- printk(KERN_WARNING ERRP
- "%s: partitioning exceeds flash size, truncating\n",
- part->mtd_id);
- part->parts[i].size = master->size - offset;
+ i--;
}
- offset += part->parts[i].size;
}
*pparts = kmemdup(part->parts, sizeof(*part->parts) * part->num_parts,
@@ -365,7 +377,7 @@ static int parse_cmdline_partitions(struct mtd_info *master,
*
* This function needs to be visible for bootloaders.
*/
-static int mtdpart_setup(char *s)
+static int __init mtdpart_setup(char *s)
{
cmdline = s;
return 1;
@@ -381,10 +393,21 @@ static struct mtd_part_parser cmdline_parser = {
static int __init cmdline_parser_init(void)
{
+ if (mtdparts)
+ mtdpart_setup(mtdparts);
return register_mtd_parser(&cmdline_parser);
}
+static void __exit cmdline_parser_exit(void)
+{
+ deregister_mtd_parser(&cmdline_parser);
+}
+
module_init(cmdline_parser_init);
+module_exit(cmdline_parser_exit);
+
+MODULE_PARM_DESC(mtdparts, "Partitioning specification");
+module_param(mtdparts, charp, 0);
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Marius Groeger <mag@sysgo.de>");
diff --git a/drivers/mtd/devices/Makefile b/drivers/mtd/devices/Makefile
index 395733a30ef4..369a1943ca25 100644
--- a/drivers/mtd/devices/Makefile
+++ b/drivers/mtd/devices/Makefile
@@ -17,8 +17,10 @@ obj-$(CONFIG_MTD_LART) += lart.o
obj-$(CONFIG_MTD_BLOCK2MTD) += block2mtd.o
obj-$(CONFIG_MTD_DATAFLASH) += mtd_dataflash.o
obj-$(CONFIG_MTD_M25P80) += m25p80.o
+obj-$(CONFIG_MTD_NAND_OMAP_BCH) += elm.o
obj-$(CONFIG_MTD_SPEAR_SMI) += spear_smi.o
obj-$(CONFIG_MTD_SST25L) += sst25l.o
obj-$(CONFIG_MTD_BCM47XXSFLASH) += bcm47xxsflash.o
-CFLAGS_docg3.o += -I$(src) \ No newline at end of file
+
+CFLAGS_docg3.o += -I$(src)
diff --git a/drivers/mtd/devices/bcm47xxsflash.c b/drivers/mtd/devices/bcm47xxsflash.c
index 4714584aa993..95266285acb1 100644
--- a/drivers/mtd/devices/bcm47xxsflash.c
+++ b/drivers/mtd/devices/bcm47xxsflash.c
@@ -5,6 +5,8 @@
#include <linux/platform_device.h>
#include <linux/bcma/bcma.h>
+#include "bcm47xxsflash.h"
+
MODULE_LICENSE("GPL");
MODULE_DESCRIPTION("Serial flash driver for BCMA bus");
@@ -13,26 +15,28 @@ static const char *probes[] = { "bcm47xxpart", NULL };
static int bcm47xxsflash_read(struct mtd_info *mtd, loff_t from, size_t len,
size_t *retlen, u_char *buf)
{
- struct bcma_sflash *sflash = mtd->priv;
+ struct bcm47xxsflash *b47s = mtd->priv;
/* Check address range */
if ((from + len) > mtd->size)
return -EINVAL;
- memcpy_fromio(buf, (void __iomem *)KSEG0ADDR(sflash->window + from),
+ memcpy_fromio(buf, (void __iomem *)KSEG0ADDR(b47s->window + from),
len);
+ *retlen = len;
return len;
}
-static void bcm47xxsflash_fill_mtd(struct bcma_sflash *sflash,
- struct mtd_info *mtd)
+static void bcm47xxsflash_fill_mtd(struct bcm47xxsflash *b47s)
{
- mtd->priv = sflash;
+ struct mtd_info *mtd = &b47s->mtd;
+
+ mtd->priv = b47s;
mtd->name = "bcm47xxsflash";
mtd->owner = THIS_MODULE;
mtd->type = MTD_ROM;
- mtd->size = sflash->size;
+ mtd->size = b47s->size;
mtd->_read = bcm47xxsflash_read;
/* TODO: implement writing support and verify/change following code */
@@ -40,19 +44,30 @@ static void bcm47xxsflash_fill_mtd(struct bcma_sflash *sflash,
mtd->writebufsize = mtd->writesize = 1;
}
-static int bcm47xxsflash_probe(struct platform_device *pdev)
+/**************************************************
+ * BCMA
+ **************************************************/
+
+static int bcm47xxsflash_bcma_probe(struct platform_device *pdev)
{
struct bcma_sflash *sflash = dev_get_platdata(&pdev->dev);
+ struct bcm47xxsflash *b47s;
int err;
- sflash->mtd = kzalloc(sizeof(struct mtd_info), GFP_KERNEL);
- if (!sflash->mtd) {
+ b47s = kzalloc(sizeof(*b47s), GFP_KERNEL);
+ if (!b47s) {
err = -ENOMEM;
goto out;
}
- bcm47xxsflash_fill_mtd(sflash, sflash->mtd);
+ sflash->priv = b47s;
- err = mtd_device_parse_register(sflash->mtd, probes, NULL, NULL, 0);
+ b47s->window = sflash->window;
+ b47s->blocksize = sflash->blocksize;
+ b47s->numblocks = sflash->numblocks;
+ b47s->size = sflash->size;
+ bcm47xxsflash_fill_mtd(b47s);
+
+ err = mtd_device_parse_register(&b47s->mtd, probes, NULL, NULL, 0);
if (err) {
pr_err("Failed to register MTD device: %d\n", err);
goto err_dev_reg;
@@ -61,34 +76,40 @@ static int bcm47xxsflash_probe(struct platform_device *pdev)
return 0;
err_dev_reg:
- kfree(sflash->mtd);
+ kfree(&b47s->mtd);
out:
return err;
}
-static int bcm47xxsflash_remove(struct platform_device *pdev)
+static int bcm47xxsflash_bcma_remove(struct platform_device *pdev)
{
struct bcma_sflash *sflash = dev_get_platdata(&pdev->dev);
+ struct bcm47xxsflash *b47s = sflash->priv;
- mtd_device_unregister(sflash->mtd);
- kfree(sflash->mtd);
+ mtd_device_unregister(&b47s->mtd);
+ kfree(b47s);
return 0;
}
static struct platform_driver bcma_sflash_driver = {
- .remove = bcm47xxsflash_remove,
+ .probe = bcm47xxsflash_bcma_probe,
+ .remove = bcm47xxsflash_bcma_remove,
.driver = {
.name = "bcma_sflash",
.owner = THIS_MODULE,
},
};
+/**************************************************
+ * Init
+ **************************************************/
+
static int __init bcm47xxsflash_init(void)
{
int err;
- err = platform_driver_probe(&bcma_sflash_driver, bcm47xxsflash_probe);
+ err = platform_driver_register(&bcma_sflash_driver);
if (err)
pr_err("Failed to register BCMA serial flash driver: %d\n",
err);
diff --git a/drivers/mtd/devices/bcm47xxsflash.h b/drivers/mtd/devices/bcm47xxsflash.h
new file mode 100644
index 000000000000..ebf6f710e23c
--- /dev/null
+++ b/drivers/mtd/devices/bcm47xxsflash.h
@@ -0,0 +1,15 @@
+#ifndef __BCM47XXSFLASH_H
+#define __BCM47XXSFLASH_H
+
+#include <linux/mtd/mtd.h>
+
+struct bcm47xxsflash {
+ u32 window;
+ u32 blocksize;
+ u16 numblocks;
+ u32 size;
+
+ struct mtd_info mtd;
+};
+
+#endif /* BCM47XXSFLASH */
diff --git a/drivers/mtd/devices/elm.c b/drivers/mtd/devices/elm.c
new file mode 100644
index 000000000000..2ec5da9ee248
--- /dev/null
+++ b/drivers/mtd/devices/elm.c
@@ -0,0 +1,404 @@
+/*
+ * Error Location Module
+ *
+ * Copyright (C) 2012 Texas Instruments Incorporated - http://www.ti.com/
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/platform_device.h>
+#include <linux/module.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/of.h>
+#include <linux/pm_runtime.h>
+#include <linux/platform_data/elm.h>
+
+#define ELM_IRQSTATUS 0x018
+#define ELM_IRQENABLE 0x01c
+#define ELM_LOCATION_CONFIG 0x020
+#define ELM_PAGE_CTRL 0x080
+#define ELM_SYNDROME_FRAGMENT_0 0x400
+#define ELM_SYNDROME_FRAGMENT_6 0x418
+#define ELM_LOCATION_STATUS 0x800
+#define ELM_ERROR_LOCATION_0 0x880
+
+/* ELM Interrupt Status Register */
+#define INTR_STATUS_PAGE_VALID BIT(8)
+
+/* ELM Interrupt Enable Register */
+#define INTR_EN_PAGE_MASK BIT(8)
+
+/* ELM Location Configuration Register */
+#define ECC_BCH_LEVEL_MASK 0x3
+
+/* ELM syndrome */
+#define ELM_SYNDROME_VALID BIT(16)
+
+/* ELM_LOCATION_STATUS Register */
+#define ECC_CORRECTABLE_MASK BIT(8)
+#define ECC_NB_ERRORS_MASK 0x1f
+
+/* ELM_ERROR_LOCATION_0-15 Registers */
+#define ECC_ERROR_LOCATION_MASK 0x1fff
+
+#define ELM_ECC_SIZE 0x7ff
+
+#define SYNDROME_FRAGMENT_REG_SIZE 0x40
+#define ERROR_LOCATION_SIZE 0x100
+
+struct elm_info {
+ struct device *dev;
+ void __iomem *elm_base;
+ struct completion elm_completion;
+ struct list_head list;
+ enum bch_ecc bch_type;
+};
+
+static LIST_HEAD(elm_devices);
+
+static void elm_write_reg(struct elm_info *info, int offset, u32 val)
+{
+ writel(val, info->elm_base + offset);
+}
+
+static u32 elm_read_reg(struct elm_info *info, int offset)
+{
+ return readl(info->elm_base + offset);
+}
+
+/**
+ * elm_config - Configure ELM module
+ * @dev: ELM device
+ * @bch_type: Type of BCH ecc
+ */
+void elm_config(struct device *dev, enum bch_ecc bch_type)
+{
+ u32 reg_val;
+ struct elm_info *info = dev_get_drvdata(dev);
+
+ reg_val = (bch_type & ECC_BCH_LEVEL_MASK) | (ELM_ECC_SIZE << 16);
+ elm_write_reg(info, ELM_LOCATION_CONFIG, reg_val);
+ info->bch_type = bch_type;
+}
+EXPORT_SYMBOL(elm_config);
+
+/**
+ * elm_configure_page_mode - Enable/Disable page mode
+ * @info: elm info
+ * @index: index number of syndrome fragment vector
+ * @enable: enable/disable flag for page mode
+ *
+ * Enable page mode for syndrome fragment index
+ */
+static void elm_configure_page_mode(struct elm_info *info, int index,
+ bool enable)
+{
+ u32 reg_val;
+
+ reg_val = elm_read_reg(info, ELM_PAGE_CTRL);
+ if (enable)
+ reg_val |= BIT(index); /* enable page mode */
+ else
+ reg_val &= ~BIT(index); /* disable page mode */
+
+ elm_write_reg(info, ELM_PAGE_CTRL, reg_val);
+}
+
+/**
+ * elm_load_syndrome - Load ELM syndrome reg
+ * @info: elm info
+ * @err_vec: elm error vectors
+ * @ecc: buffer with calculated ecc
+ *
+ * Load syndrome fragment registers with calculated ecc in reverse order.
+ */
+static void elm_load_syndrome(struct elm_info *info,
+ struct elm_errorvec *err_vec, u8 *ecc)
+{
+ int i, offset;
+ u32 val;
+
+ for (i = 0; i < ERROR_VECTOR_MAX; i++) {
+
+ /* Check error reported */
+ if (err_vec[i].error_reported) {
+ elm_configure_page_mode(info, i, true);
+ offset = ELM_SYNDROME_FRAGMENT_0 +
+ SYNDROME_FRAGMENT_REG_SIZE * i;
+
+ /* BCH8 */
+ if (info->bch_type) {
+
+ /* syndrome fragment 0 = ecc[9-12B] */
+ val = cpu_to_be32(*(u32 *) &ecc[9]);
+ elm_write_reg(info, offset, val);
+
+ /* syndrome fragment 1 = ecc[5-8B] */
+ offset += 4;
+ val = cpu_to_be32(*(u32 *) &ecc[5]);
+ elm_write_reg(info, offset, val);
+
+ /* syndrome fragment 2 = ecc[1-4B] */
+ offset += 4;
+ val = cpu_to_be32(*(u32 *) &ecc[1]);
+ elm_write_reg(info, offset, val);
+
+ /* syndrome fragment 3 = ecc[0B] */
+ offset += 4;
+ val = ecc[0];
+ elm_write_reg(info, offset, val);
+ } else {
+ /* syndrome fragment 0 = ecc[20-52b] bits */
+ val = (cpu_to_be32(*(u32 *) &ecc[3]) >> 4) |
+ ((ecc[2] & 0xf) << 28);
+ elm_write_reg(info, offset, val);
+
+ /* syndrome fragment 1 = ecc[0-20b] bits */
+ offset += 4;
+ val = cpu_to_be32(*(u32 *) &ecc[0]) >> 12;
+ elm_write_reg(info, offset, val);
+ }
+ }
+
+ /* Update ecc pointer with ecc byte size */
+ ecc += info->bch_type ? BCH8_SIZE : BCH4_SIZE;
+ }
+}
+
+/**
+ * elm_start_processing - start elm syndrome processing
+ * @info: elm info
+ * @err_vec: elm error vectors
+ *
+ * Set syndrome valid bit for syndrome fragment registers for which
+ * elm syndrome fragment registers are loaded. This enables elm module
+ * to start processing syndrome vectors.
+ */
+static void elm_start_processing(struct elm_info *info,
+ struct elm_errorvec *err_vec)
+{
+ int i, offset;
+ u32 reg_val;
+
+ /*
+ * Set syndrome vector valid, so that ELM module
+ * will process it for vectors error is reported
+ */
+ for (i = 0; i < ERROR_VECTOR_MAX; i++) {
+ if (err_vec[i].error_reported) {
+ offset = ELM_SYNDROME_FRAGMENT_6 +
+ SYNDROME_FRAGMENT_REG_SIZE * i;
+ reg_val = elm_read_reg(info, offset);
+ reg_val |= ELM_SYNDROME_VALID;
+ elm_write_reg(info, offset, reg_val);
+ }
+ }
+}
+
+/**
+ * elm_error_correction - locate correctable error position
+ * @info: elm info
+ * @err_vec: elm error vectors
+ *
+ * On completion of processing by elm module, error location status
+ * register updated with correctable/uncorrectable error information.
+ * In case of correctable errors, number of errors located from
+ * elm location status register & read the positions from
+ * elm error location register.
+ */
+static void elm_error_correction(struct elm_info *info,
+ struct elm_errorvec *err_vec)
+{
+ int i, j, errors = 0;
+ int offset;
+ u32 reg_val;
+
+ for (i = 0; i < ERROR_VECTOR_MAX; i++) {
+
+ /* Check error reported */
+ if (err_vec[i].error_reported) {
+ offset = ELM_LOCATION_STATUS + ERROR_LOCATION_SIZE * i;
+ reg_val = elm_read_reg(info, offset);
+
+ /* Check correctable error or not */
+ if (reg_val & ECC_CORRECTABLE_MASK) {
+ offset = ELM_ERROR_LOCATION_0 +
+ ERROR_LOCATION_SIZE * i;
+
+ /* Read count of correctable errors */
+ err_vec[i].error_count = reg_val &
+ ECC_NB_ERRORS_MASK;
+
+ /* Update the error locations in error vector */
+ for (j = 0; j < err_vec[i].error_count; j++) {
+
+ reg_val = elm_read_reg(info, offset);
+ err_vec[i].error_loc[j] = reg_val &
+ ECC_ERROR_LOCATION_MASK;
+
+ /* Update error location register */
+ offset += 4;
+ }
+
+ errors += err_vec[i].error_count;
+ } else {
+ err_vec[i].error_uncorrectable = true;
+ }
+
+ /* Clearing interrupts for processed error vectors */
+ elm_write_reg(info, ELM_IRQSTATUS, BIT(i));
+
+ /* Disable page mode */
+ elm_configure_page_mode(info, i, false);
+ }
+ }
+}
+
+/**
+ * elm_decode_bch_error_page - Locate error position
+ * @dev: device pointer
+ * @ecc_calc: calculated ECC bytes from GPMC
+ * @err_vec: elm error vectors
+ *
+ * Called with one or more error reported vectors & vectors with
+ * error reported is updated in err_vec[].error_reported
+ */
+void elm_decode_bch_error_page(struct device *dev, u8 *ecc_calc,
+ struct elm_errorvec *err_vec)
+{
+ struct elm_info *info = dev_get_drvdata(dev);
+ u32 reg_val;
+
+ /* Enable page mode interrupt */
+ reg_val = elm_read_reg(info, ELM_IRQSTATUS);
+ elm_write_reg(info, ELM_IRQSTATUS, reg_val & INTR_STATUS_PAGE_VALID);
+ elm_write_reg(info, ELM_IRQENABLE, INTR_EN_PAGE_MASK);
+
+ /* Load valid ecc byte to syndrome fragment register */
+ elm_load_syndrome(info, err_vec, ecc_calc);
+
+ /* Enable syndrome processing for which syndrome fragment is updated */
+ elm_start_processing(info, err_vec);
+
+ /* Wait for ELM module to finish locating error correction */
+ wait_for_completion(&info->elm_completion);
+
+ /* Disable page mode interrupt */
+ reg_val = elm_read_reg(info, ELM_IRQENABLE);
+ elm_write_reg(info, ELM_IRQENABLE, reg_val & ~INTR_EN_PAGE_MASK);
+ elm_error_correction(info, err_vec);
+}
+EXPORT_SYMBOL(elm_decode_bch_error_page);
+
+static irqreturn_t elm_isr(int this_irq, void *dev_id)
+{
+ u32 reg_val;
+ struct elm_info *info = dev_id;
+
+ reg_val = elm_read_reg(info, ELM_IRQSTATUS);
+
+ /* All error vectors processed */
+ if (reg_val & INTR_STATUS_PAGE_VALID) {
+ elm_write_reg(info, ELM_IRQSTATUS,
+ reg_val & INTR_STATUS_PAGE_VALID);
+ complete(&info->elm_completion);
+ return IRQ_HANDLED;
+ }
+
+ return IRQ_NONE;
+}
+
+static int elm_probe(struct platform_device *pdev)
+{
+ int ret = 0;
+ struct resource *res, *irq;
+ struct elm_info *info;
+
+ info = devm_kzalloc(&pdev->dev, sizeof(*info), GFP_KERNEL);
+ if (!info) {
+ dev_err(&pdev->dev, "failed to allocate memory\n");
+ return -ENOMEM;
+ }
+
+ info->dev = &pdev->dev;
+
+ irq = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
+ if (!irq) {
+ dev_err(&pdev->dev, "no irq resource defined\n");
+ return -ENODEV;
+ }
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!res) {
+ dev_err(&pdev->dev, "no memory resource defined\n");
+ return -ENODEV;
+ }
+
+ info->elm_base = devm_request_and_ioremap(&pdev->dev, res);
+ if (!info->elm_base)
+ return -EADDRNOTAVAIL;
+
+ ret = devm_request_irq(&pdev->dev, irq->start, elm_isr, 0,
+ pdev->name, info);
+ if (ret) {
+ dev_err(&pdev->dev, "failure requesting irq %i\n", irq->start);
+ return ret;
+ }
+
+ pm_runtime_enable(&pdev->dev);
+ if (pm_runtime_get_sync(&pdev->dev)) {
+ ret = -EINVAL;
+ pm_runtime_disable(&pdev->dev);
+ dev_err(&pdev->dev, "can't enable clock\n");
+ return ret;
+ }
+
+ init_completion(&info->elm_completion);
+ INIT_LIST_HEAD(&info->list);
+ list_add(&info->list, &elm_devices);
+ platform_set_drvdata(pdev, info);
+ return ret;
+}
+
+static int elm_remove(struct platform_device *pdev)
+{
+ pm_runtime_put_sync(&pdev->dev);
+ pm_runtime_disable(&pdev->dev);
+ platform_set_drvdata(pdev, NULL);
+ return 0;
+}
+
+#ifdef CONFIG_OF
+static const struct of_device_id elm_of_match[] = {
+ { .compatible = "ti,am3352-elm" },
+ {},
+};
+MODULE_DEVICE_TABLE(of, elm_of_match);
+#endif
+
+static struct platform_driver elm_driver = {
+ .driver = {
+ .name = "elm",
+ .owner = THIS_MODULE,
+ .of_match_table = of_match_ptr(elm_of_match),
+ },
+ .probe = elm_probe,
+ .remove = elm_remove,
+};
+
+module_platform_driver(elm_driver);
+
+MODULE_DESCRIPTION("ELM driver for BCH error correction");
+MODULE_AUTHOR("Texas Instruments");
+MODULE_ALIAS("platform: elm");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/mtd/devices/m25p80.c b/drivers/mtd/devices/m25p80.c
index 4eeeb2d7f6ea..5b6b0728be21 100644
--- a/drivers/mtd/devices/m25p80.c
+++ b/drivers/mtd/devices/m25p80.c
@@ -565,6 +565,96 @@ time_out:
return ret;
}
+static int m25p80_lock(struct mtd_info *mtd, loff_t ofs, uint64_t len)
+{
+ struct m25p *flash = mtd_to_m25p(mtd);
+ uint32_t offset = ofs;
+ uint8_t status_old, status_new;
+ int res = 0;
+
+ mutex_lock(&flash->lock);
+ /* Wait until finished previous command */
+ if (wait_till_ready(flash)) {
+ res = 1;
+ goto err;
+ }
+
+ status_old = read_sr(flash);
+
+ if (offset < flash->mtd.size-(flash->mtd.size/2))
+ status_new = status_old | SR_BP2 | SR_BP1 | SR_BP0;
+ else if (offset < flash->mtd.size-(flash->mtd.size/4))
+ status_new = (status_old & ~SR_BP0) | SR_BP2 | SR_BP1;
+ else if (offset < flash->mtd.size-(flash->mtd.size/8))
+ status_new = (status_old & ~SR_BP1) | SR_BP2 | SR_BP0;
+ else if (offset < flash->mtd.size-(flash->mtd.size/16))
+ status_new = (status_old & ~(SR_BP0|SR_BP1)) | SR_BP2;
+ else if (offset < flash->mtd.size-(flash->mtd.size/32))
+ status_new = (status_old & ~SR_BP2) | SR_BP1 | SR_BP0;
+ else if (offset < flash->mtd.size-(flash->mtd.size/64))
+ status_new = (status_old & ~(SR_BP2|SR_BP0)) | SR_BP1;
+ else
+ status_new = (status_old & ~(SR_BP2|SR_BP1)) | SR_BP0;
+
+ /* Only modify protection if it will not unlock other areas */
+ if ((status_new&(SR_BP2|SR_BP1|SR_BP0)) >
+ (status_old&(SR_BP2|SR_BP1|SR_BP0))) {
+ write_enable(flash);
+ if (write_sr(flash, status_new) < 0) {
+ res = 1;
+ goto err;
+ }
+ }
+
+err: mutex_unlock(&flash->lock);
+ return res;
+}
+
+static int m25p80_unlock(struct mtd_info *mtd, loff_t ofs, uint64_t len)
+{
+ struct m25p *flash = mtd_to_m25p(mtd);
+ uint32_t offset = ofs;
+ uint8_t status_old, status_new;
+ int res = 0;
+
+ mutex_lock(&flash->lock);
+ /* Wait until finished previous command */
+ if (wait_till_ready(flash)) {
+ res = 1;
+ goto err;
+ }
+
+ status_old = read_sr(flash);
+
+ if (offset+len > flash->mtd.size-(flash->mtd.size/64))
+ status_new = status_old & ~(SR_BP2|SR_BP1|SR_BP0);
+ else if (offset+len > flash->mtd.size-(flash->mtd.size/32))
+ status_new = (status_old & ~(SR_BP2|SR_BP1)) | SR_BP0;
+ else if (offset+len > flash->mtd.size-(flash->mtd.size/16))
+ status_new = (status_old & ~(SR_BP2|SR_BP0)) | SR_BP1;
+ else if (offset+len > flash->mtd.size-(flash->mtd.size/8))
+ status_new = (status_old & ~SR_BP2) | SR_BP1 | SR_BP0;
+ else if (offset+len > flash->mtd.size-(flash->mtd.size/4))
+ status_new = (status_old & ~(SR_BP0|SR_BP1)) | SR_BP2;
+ else if (offset+len > flash->mtd.size-(flash->mtd.size/2))
+ status_new = (status_old & ~SR_BP1) | SR_BP2 | SR_BP0;
+ else
+ status_new = (status_old & ~SR_BP0) | SR_BP2 | SR_BP1;
+
+ /* Only modify protection if it will not lock other areas */
+ if ((status_new&(SR_BP2|SR_BP1|SR_BP0)) <
+ (status_old&(SR_BP2|SR_BP1|SR_BP0))) {
+ write_enable(flash);
+ if (write_sr(flash, status_new) < 0) {
+ res = 1;
+ goto err;
+ }
+ }
+
+err: mutex_unlock(&flash->lock);
+ return res;
+}
+
/****************************************************************************/
/*
@@ -642,6 +732,10 @@ static const struct spi_device_id m25p_ids[] = {
/* Everspin */
{ "mr25h256", CAT25_INFO( 32 * 1024, 1, 256, 2) },
+ /* GigaDevice */
+ { "gd25q32", INFO(0xc84016, 0, 64 * 1024, 64, SECT_4K) },
+ { "gd25q64", INFO(0xc84017, 0, 64 * 1024, 128, SECT_4K) },
+
/* Intel/Numonyx -- xxxs33b */
{ "160s33b", INFO(0x898911, 0, 64 * 1024, 32, 0) },
{ "320s33b", INFO(0x898912, 0, 64 * 1024, 64, 0) },
@@ -899,6 +993,12 @@ static int m25p_probe(struct spi_device *spi)
flash->mtd._erase = m25p80_erase;
flash->mtd._read = m25p80_read;
+ /* flash protection support for STmicro chips */
+ if (JEDEC_MFR(info->jedec_id) == CFI_MFR_ST) {
+ flash->mtd._lock = m25p80_lock;
+ flash->mtd._unlock = m25p80_unlock;
+ }
+
/* sst flash chips use AAI word program */
if (JEDEC_MFR(info->jedec_id) == CFI_MFR_SST)
flash->mtd._write = sst_write;
diff --git a/drivers/mtd/maps/Kconfig b/drivers/mtd/maps/Kconfig
index 62ba82c396c2..3ed17c4d4358 100644
--- a/drivers/mtd/maps/Kconfig
+++ b/drivers/mtd/maps/Kconfig
@@ -429,7 +429,7 @@ config MTD_GPIO_ADDR
config MTD_UCLINUX
bool "Generic uClinux RAM/ROM filesystem support"
- depends on MTD_RAM=y && (!MMU || COLDFIRE)
+ depends on (MTD_RAM=y || MTD_ROM=y) && (!MMU || COLDFIRE)
help
Map driver to support image based filesystems for uClinux.
diff --git a/drivers/mtd/maps/physmap_of.c b/drivers/mtd/maps/physmap_of.c
index 7901d72c9242..363939dfad05 100644
--- a/drivers/mtd/maps/physmap_of.c
+++ b/drivers/mtd/maps/physmap_of.c
@@ -68,9 +68,6 @@ static int of_flash_remove(struct platform_device *dev)
kfree(info->list[i].res);
}
}
-
- kfree(info);
-
return 0;
}
@@ -199,8 +196,9 @@ static int of_flash_probe(struct platform_device *dev)
map_indirect = of_property_read_bool(dp, "no-unaligned-direct-access");
err = -ENOMEM;
- info = kzalloc(sizeof(struct of_flash) +
- sizeof(struct of_flash_list) * count, GFP_KERNEL);
+ info = devm_kzalloc(&dev->dev,
+ sizeof(struct of_flash) +
+ sizeof(struct of_flash_list) * count, GFP_KERNEL);
if (!info)
goto err_flash_remove;
@@ -241,6 +239,7 @@ static int of_flash_probe(struct platform_device *dev)
info->list[i].map.phys = res.start;
info->list[i].map.size = res_size;
info->list[i].map.bankwidth = be32_to_cpup(width);
+ info->list[i].map.device_node = dp;
err = -ENOMEM;
info->list[i].map.virt = ioremap(info->list[i].map.phys,
diff --git a/drivers/mtd/maps/uclinux.c b/drivers/mtd/maps/uclinux.c
index 299bf88a6f41..c1af83db5202 100644
--- a/drivers/mtd/maps/uclinux.c
+++ b/drivers/mtd/maps/uclinux.c
@@ -23,12 +23,26 @@
/****************************************************************************/
+#ifdef CONFIG_MTD_ROM
+#define MAP_NAME "rom"
+#else
+#define MAP_NAME "ram"
+#endif
+
+/*
+ * Blackfin uses uclinux_ram_map during startup, so it must not be static.
+ * Provide a dummy declaration to make sparse happy.
+ */
+extern struct map_info uclinux_ram_map;
+
struct map_info uclinux_ram_map = {
- .name = "RAM",
- .phys = (unsigned long)__bss_stop,
+ .name = MAP_NAME,
.size = 0,
};
+static unsigned long physaddr = -1;
+module_param(physaddr, ulong, S_IRUGO);
+
static struct mtd_info *uclinux_ram_mtdinfo;
/****************************************************************************/
@@ -60,11 +74,17 @@ static int __init uclinux_mtd_init(void)
struct map_info *mapp;
mapp = &uclinux_ram_map;
+
+ if (physaddr == -1)
+ mapp->phys = (resource_size_t)__bss_stop;
+ else
+ mapp->phys = physaddr;
+
if (!mapp->size)
mapp->size = PAGE_ALIGN(ntohl(*((unsigned long *)(mapp->phys + 8))));
mapp->bankwidth = 4;
- printk("uclinux[mtd]: RAM probe address=0x%x size=0x%x\n",
+ printk("uclinux[mtd]: probe address=0x%x size=0x%x\n",
(int) mapp->phys, (int) mapp->size);
/*
@@ -82,7 +102,7 @@ static int __init uclinux_mtd_init(void)
simple_map_init(mapp);
- mtd = do_map_probe("map_ram", mapp);
+ mtd = do_map_probe("map_" MAP_NAME, mapp);
if (!mtd) {
printk("uclinux[mtd]: failed to find a mapping?\n");
return(-ENXIO);
@@ -118,6 +138,6 @@ module_exit(uclinux_mtd_cleanup);
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Greg Ungerer <gerg@snapgear.com>");
-MODULE_DESCRIPTION("Generic RAM based MTD for uClinux");
+MODULE_DESCRIPTION("Generic MTD for uClinux");
/****************************************************************************/
diff --git a/drivers/mtd/mtdcore.c b/drivers/mtd/mtdcore.c
index ec794a72975d..61d5f56473e1 100644
--- a/drivers/mtd/mtdcore.c
+++ b/drivers/mtd/mtdcore.c
@@ -349,13 +349,8 @@ int add_mtd_device(struct mtd_info *mtd)
BUG_ON(mtd->writesize == 0);
mutex_lock(&mtd_table_mutex);
- do {
- if (!idr_pre_get(&mtd_idr, GFP_KERNEL))
- goto fail_locked;
- error = idr_get_new(&mtd_idr, mtd, &i);
- } while (error == -EAGAIN);
-
- if (error)
+ i = idr_alloc(&mtd_idr, mtd, 0, 0, GFP_KERNEL);
+ if (i < 0)
goto fail_locked;
mtd->index = i;
diff --git a/drivers/mtd/nand/atmel_nand.c b/drivers/mtd/nand/atmel_nand.c
index c516a9408087..ffcbcca2fd2d 100644
--- a/drivers/mtd/nand/atmel_nand.c
+++ b/drivers/mtd/nand/atmel_nand.c
@@ -101,6 +101,8 @@ struct atmel_nand_host {
u8 pmecc_corr_cap;
u16 pmecc_sector_size;
u32 pmecc_lookup_table_offset;
+ u32 pmecc_lookup_table_offset_512;
+ u32 pmecc_lookup_table_offset_1024;
int pmecc_bytes_per_sector;
int pmecc_sector_number;
@@ -908,6 +910,84 @@ static void atmel_pmecc_core_init(struct mtd_info *mtd)
pmecc_writel(host->ecc, CTRL, PMECC_CTRL_ENABLE);
}
+/*
+ * Get ECC requirement in ONFI parameters, returns -1 if ONFI
+ * parameters is not supported.
+ * return 0 if success to get the ECC requirement.
+ */
+static int get_onfi_ecc_param(struct nand_chip *chip,
+ int *ecc_bits, int *sector_size)
+{
+ *ecc_bits = *sector_size = 0;
+
+ if (chip->onfi_params.ecc_bits == 0xff)
+ /* TODO: the sector_size and ecc_bits need to be find in
+ * extended ecc parameter, currently we don't support it.
+ */
+ return -1;
+
+ *ecc_bits = chip->onfi_params.ecc_bits;
+
+ /* The default sector size (ecc codeword size) is 512 */
+ *sector_size = 512;
+
+ return 0;
+}
+
+/*
+ * Get ecc requirement from ONFI parameters ecc requirement.
+ * If pmecc-cap, pmecc-sector-size in DTS are not specified, this function
+ * will set them according to ONFI ecc requirement. Otherwise, use the
+ * value in DTS file.
+ * return 0 if success. otherwise return error code.
+ */
+static int pmecc_choose_ecc(struct atmel_nand_host *host,
+ int *cap, int *sector_size)
+{
+ /* Get ECC requirement from ONFI parameters */
+ *cap = *sector_size = 0;
+ if (host->nand_chip.onfi_version) {
+ if (!get_onfi_ecc_param(&host->nand_chip, cap, sector_size))
+ dev_info(host->dev, "ONFI params, minimum required ECC: %d bits in %d bytes\n",
+ *cap, *sector_size);
+ else
+ dev_info(host->dev, "NAND chip ECC reqirement is in Extended ONFI parameter, we don't support yet.\n");
+ } else {
+ dev_info(host->dev, "NAND chip is not ONFI compliant, assume ecc_bits is 2 in 512 bytes");
+ }
+ if (*cap == 0 && *sector_size == 0) {
+ *cap = 2;
+ *sector_size = 512;
+ }
+
+ /* If dts file doesn't specify then use the one in ONFI parameters */
+ if (host->pmecc_corr_cap == 0) {
+ /* use the most fitable ecc bits (the near bigger one ) */
+ if (*cap <= 2)
+ host->pmecc_corr_cap = 2;
+ else if (*cap <= 4)
+ host->pmecc_corr_cap = 4;
+ else if (*cap < 8)
+ host->pmecc_corr_cap = 8;
+ else if (*cap < 12)
+ host->pmecc_corr_cap = 12;
+ else if (*cap < 24)
+ host->pmecc_corr_cap = 24;
+ else
+ return -EINVAL;
+ }
+ if (host->pmecc_sector_size == 0) {
+ /* use the most fitable sector size (the near smaller one ) */
+ if (*sector_size >= 1024)
+ host->pmecc_sector_size = 1024;
+ else if (*sector_size >= 512)
+ host->pmecc_sector_size = 512;
+ else
+ return -EINVAL;
+ }
+ return 0;
+}
+
static int __init atmel_pmecc_nand_init_params(struct platform_device *pdev,
struct atmel_nand_host *host)
{
@@ -916,8 +996,22 @@ static int __init atmel_pmecc_nand_init_params(struct platform_device *pdev,
struct resource *regs, *regs_pmerr, *regs_rom;
int cap, sector_size, err_no;
+ err_no = pmecc_choose_ecc(host, &cap, &sector_size);
+ if (err_no) {
+ dev_err(host->dev, "The NAND flash's ECC requirement are not support!");
+ return err_no;
+ }
+
+ if (cap != host->pmecc_corr_cap ||
+ sector_size != host->pmecc_sector_size)
+ dev_info(host->dev, "WARNING: Be Caution! Using different PMECC parameters from Nand ONFI ECC reqirement.\n");
+
cap = host->pmecc_corr_cap;
sector_size = host->pmecc_sector_size;
+ host->pmecc_lookup_table_offset = (sector_size == 512) ?
+ host->pmecc_lookup_table_offset_512 :
+ host->pmecc_lookup_table_offset_1024;
+
dev_info(host->dev, "Initialize PMECC params, cap: %d, sector: %d\n",
cap, sector_size);
@@ -1215,7 +1309,7 @@ static void atmel_nand_hwctl(struct mtd_info *mtd, int mode)
static int atmel_of_init_port(struct atmel_nand_host *host,
struct device_node *np)
{
- u32 val, table_offset;
+ u32 val;
u32 offset[2];
int ecc_mode;
struct atmel_nand_data *board = &host->board;
@@ -1259,42 +1353,41 @@ static int atmel_of_init_port(struct atmel_nand_host *host,
/* use PMECC, get correction capability, sector size and lookup
* table offset.
+ * If correction bits and sector size are not specified, then find
+ * them from NAND ONFI parameters.
*/
- if (of_property_read_u32(np, "atmel,pmecc-cap", &val) != 0) {
- dev_err(host->dev, "Cannot decide PMECC Capability\n");
- return -EINVAL;
- } else if ((val != 2) && (val != 4) && (val != 8) && (val != 12) &&
- (val != 24)) {
- dev_err(host->dev,
- "Unsupported PMECC correction capability: %d; should be 2, 4, 8, 12 or 24\n",
- val);
- return -EINVAL;
+ if (of_property_read_u32(np, "atmel,pmecc-cap", &val) == 0) {
+ if ((val != 2) && (val != 4) && (val != 8) && (val != 12) &&
+ (val != 24)) {
+ dev_err(host->dev,
+ "Unsupported PMECC correction capability: %d; should be 2, 4, 8, 12 or 24\n",
+ val);
+ return -EINVAL;
+ }
+ host->pmecc_corr_cap = (u8)val;
}
- host->pmecc_corr_cap = (u8)val;
- if (of_property_read_u32(np, "atmel,pmecc-sector-size", &val) != 0) {
- dev_err(host->dev, "Cannot decide PMECC Sector Size\n");
- return -EINVAL;
- } else if ((val != 512) && (val != 1024)) {
- dev_err(host->dev,
- "Unsupported PMECC sector size: %d; should be 512 or 1024 bytes\n",
- val);
- return -EINVAL;
+ if (of_property_read_u32(np, "atmel,pmecc-sector-size", &val) == 0) {
+ if ((val != 512) && (val != 1024)) {
+ dev_err(host->dev,
+ "Unsupported PMECC sector size: %d; should be 512 or 1024 bytes\n",
+ val);
+ return -EINVAL;
+ }
+ host->pmecc_sector_size = (u16)val;
}
- host->pmecc_sector_size = (u16)val;
if (of_property_read_u32_array(np, "atmel,pmecc-lookup-table-offset",
offset, 2) != 0) {
dev_err(host->dev, "Cannot get PMECC lookup table offset\n");
return -EINVAL;
}
- table_offset = host->pmecc_sector_size == 512 ? offset[0] : offset[1];
-
- if (!table_offset) {
+ if (!offset[0] && !offset[1]) {
dev_err(host->dev, "Invalid PMECC lookup table offset\n");
return -EINVAL;
}
- host->pmecc_lookup_table_offset = table_offset;
+ host->pmecc_lookup_table_offset_512 = offset[0];
+ host->pmecc_lookup_table_offset_1024 = offset[1];
return 0;
}
diff --git a/drivers/mtd/nand/bcm47xxnflash/bcm47xxnflash.h b/drivers/mtd/nand/bcm47xxnflash/bcm47xxnflash.h
index 0bdb2ce4da75..c005a62330b1 100644
--- a/drivers/mtd/nand/bcm47xxnflash/bcm47xxnflash.h
+++ b/drivers/mtd/nand/bcm47xxnflash/bcm47xxnflash.h
@@ -1,6 +1,10 @@
#ifndef __BCM47XXNFLASH_H
#define __BCM47XXNFLASH_H
+#ifndef pr_fmt
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+#endif
+
#include <linux/mtd/mtd.h>
#include <linux/mtd/nand.h>
diff --git a/drivers/mtd/nand/bcm47xxnflash/main.c b/drivers/mtd/nand/bcm47xxnflash/main.c
index 8363a9a5fa3f..7bae569fdc79 100644
--- a/drivers/mtd/nand/bcm47xxnflash/main.c
+++ b/drivers/mtd/nand/bcm47xxnflash/main.c
@@ -9,14 +9,14 @@
*
*/
+#include "bcm47xxnflash.h"
+
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/slab.h>
#include <linux/platform_device.h>
#include <linux/bcma/bcma.h>
-#include "bcm47xxnflash.h"
-
MODULE_DESCRIPTION("NAND flash driver for BCMA bus");
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Rafał Miłecki");
@@ -77,6 +77,7 @@ static int bcm47xxnflash_remove(struct platform_device *pdev)
}
static struct platform_driver bcm47xxnflash_driver = {
+ .probe = bcm47xxnflash_probe,
.remove = bcm47xxnflash_remove,
.driver = {
.name = "bcma_nflash",
@@ -88,13 +89,10 @@ static int __init bcm47xxnflash_init(void)
{
int err;
- /*
- * Platform device "bcma_nflash" exists on SoCs and is registered very
- * early, it won't be added during runtime (use platform_driver_probe).
- */
- err = platform_driver_probe(&bcm47xxnflash_driver, bcm47xxnflash_probe);
+ err = platform_driver_register(&bcm47xxnflash_driver);
if (err)
- pr_err("Failed to register serial flash driver: %d\n", err);
+ pr_err("Failed to register bcm47xx nand flash driver: %d\n",
+ err);
return err;
}
diff --git a/drivers/mtd/nand/bcm47xxnflash/ops_bcm4706.c b/drivers/mtd/nand/bcm47xxnflash/ops_bcm4706.c
index 595de4012e71..b2ab373c9eef 100644
--- a/drivers/mtd/nand/bcm47xxnflash/ops_bcm4706.c
+++ b/drivers/mtd/nand/bcm47xxnflash/ops_bcm4706.c
@@ -9,13 +9,13 @@
*
*/
+#include "bcm47xxnflash.h"
+
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/slab.h>
#include <linux/bcma/bcma.h>
-#include "bcm47xxnflash.h"
-
/* Broadcom uses 1'000'000 but it seems to be too many. Tests on WNDR4500 has
* shown ~1000 retries as maxiumum. */
#define NFLASH_READY_RETRIES 10000
diff --git a/drivers/mtd/nand/davinci_nand.c b/drivers/mtd/nand/davinci_nand.c
index feae55c7b880..94e17af8e450 100644
--- a/drivers/mtd/nand/davinci_nand.c
+++ b/drivers/mtd/nand/davinci_nand.c
@@ -606,7 +606,7 @@ static int __init nand_davinci_probe(struct platform_device *pdev)
if (pdev->id < 0 || pdev->id > 3)
return -ENODEV;
- info = kzalloc(sizeof(*info), GFP_KERNEL);
+ info = devm_kzalloc(&pdev->dev, sizeof(*info), GFP_KERNEL);
if (!info) {
dev_err(&pdev->dev, "unable to allocate memory\n");
ret = -ENOMEM;
@@ -623,11 +623,11 @@ static int __init nand_davinci_probe(struct platform_device *pdev)
goto err_nomem;
}
- vaddr = ioremap(res1->start, resource_size(res1));
- base = ioremap(res2->start, resource_size(res2));
+ vaddr = devm_request_and_ioremap(&pdev->dev, res1);
+ base = devm_request_and_ioremap(&pdev->dev, res2);
if (!vaddr || !base) {
dev_err(&pdev->dev, "ioremap failed\n");
- ret = -EINVAL;
+ ret = -EADDRNOTAVAIL;
goto err_ioremap;
}
@@ -717,7 +717,7 @@ static int __init nand_davinci_probe(struct platform_device *pdev)
}
info->chip.ecc.mode = ecc_mode;
- info->clk = clk_get(&pdev->dev, "aemif");
+ info->clk = devm_clk_get(&pdev->dev, "aemif");
if (IS_ERR(info->clk)) {
ret = PTR_ERR(info->clk);
dev_dbg(&pdev->dev, "unable to get AEMIF clock, err %d\n", ret);
@@ -845,8 +845,6 @@ err_timing:
clk_disable_unprepare(info->clk);
err_clk_enable:
- clk_put(info->clk);
-
spin_lock_irq(&davinci_nand_lock);
if (ecc_mode == NAND_ECC_HW_SYNDROME)
ecc4_busy = false;
@@ -855,13 +853,7 @@ err_clk_enable:
err_ecc:
err_clk:
err_ioremap:
- if (base)
- iounmap(base);
- if (vaddr)
- iounmap(vaddr);
-
err_nomem:
- kfree(info);
return ret;
}
@@ -874,15 +866,9 @@ static int __exit nand_davinci_remove(struct platform_device *pdev)
ecc4_busy = false;
spin_unlock_irq(&davinci_nand_lock);
- iounmap(info->base);
- iounmap(info->vaddr);
-
nand_release(&info->mtd);
clk_disable_unprepare(info->clk);
- clk_put(info->clk);
-
- kfree(info);
return 0;
}
diff --git a/drivers/mtd/nand/fsl_ifc_nand.c b/drivers/mtd/nand/fsl_ifc_nand.c
index ad6222627fed..f1f7f12ab501 100644
--- a/drivers/mtd/nand/fsl_ifc_nand.c
+++ b/drivers/mtd/nand/fsl_ifc_nand.c
@@ -176,8 +176,8 @@ static void set_addr(struct mtd_info *mtd, int column, int page_addr, int oob)
ifc_nand_ctrl->page = page_addr;
/* Program ROW0/COL0 */
- out_be32(&ifc->ifc_nand.row0, page_addr);
- out_be32(&ifc->ifc_nand.col0, (oob ? IFC_NAND_COL_MS : 0) | column);
+ iowrite32be(page_addr, &ifc->ifc_nand.row0);
+ iowrite32be((oob ? IFC_NAND_COL_MS : 0) | column, &ifc->ifc_nand.col0);
buf_num = page_addr & priv->bufnum_mask;
@@ -239,18 +239,19 @@ static void fsl_ifc_run_command(struct mtd_info *mtd)
int i;
/* set the chip select for NAND Transaction */
- out_be32(&ifc->ifc_nand.nand_csel, priv->bank << IFC_NAND_CSEL_SHIFT);
+ iowrite32be(priv->bank << IFC_NAND_CSEL_SHIFT,
+ &ifc->ifc_nand.nand_csel);
dev_vdbg(priv->dev,
"%s: fir0=%08x fcr0=%08x\n",
__func__,
- in_be32(&ifc->ifc_nand.nand_fir0),
- in_be32(&ifc->ifc_nand.nand_fcr0));
+ ioread32be(&ifc->ifc_nand.nand_fir0),
+ ioread32be(&ifc->ifc_nand.nand_fcr0));
ctrl->nand_stat = 0;
/* start read/write seq */
- out_be32(&ifc->ifc_nand.nandseq_strt, IFC_NAND_SEQ_STRT_FIR_STRT);
+ iowrite32be(IFC_NAND_SEQ_STRT_FIR_STRT, &ifc->ifc_nand.nandseq_strt);
/* wait for command complete flag or timeout */
wait_event_timeout(ctrl->nand_wait, ctrl->nand_stat,
@@ -273,7 +274,7 @@ static void fsl_ifc_run_command(struct mtd_info *mtd)
int sector_end = sector + chip->ecc.steps - 1;
for (i = sector / 4; i <= sector_end / 4; i++)
- eccstat[i] = in_be32(&ifc->ifc_nand.nand_eccstat[i]);
+ eccstat[i] = ioread32be(&ifc->ifc_nand.nand_eccstat[i]);
for (i = sector; i <= sector_end; i++) {
errors = check_read_ecc(mtd, ctrl, eccstat, i);
@@ -313,31 +314,33 @@ static void fsl_ifc_do_read(struct nand_chip *chip,
/* Program FIR/IFC_NAND_FCR0 for Small/Large page */
if (mtd->writesize > 512) {
- out_be32(&ifc->ifc_nand.nand_fir0,
- (IFC_FIR_OP_CW0 << IFC_NAND_FIR0_OP0_SHIFT) |
- (IFC_FIR_OP_CA0 << IFC_NAND_FIR0_OP1_SHIFT) |
- (IFC_FIR_OP_RA0 << IFC_NAND_FIR0_OP2_SHIFT) |
- (IFC_FIR_OP_CMD1 << IFC_NAND_FIR0_OP3_SHIFT) |
- (IFC_FIR_OP_RBCD << IFC_NAND_FIR0_OP4_SHIFT));
- out_be32(&ifc->ifc_nand.nand_fir1, 0x0);
-
- out_be32(&ifc->ifc_nand.nand_fcr0,
- (NAND_CMD_READ0 << IFC_NAND_FCR0_CMD0_SHIFT) |
- (NAND_CMD_READSTART << IFC_NAND_FCR0_CMD1_SHIFT));
+ iowrite32be((IFC_FIR_OP_CW0 << IFC_NAND_FIR0_OP0_SHIFT) |
+ (IFC_FIR_OP_CA0 << IFC_NAND_FIR0_OP1_SHIFT) |
+ (IFC_FIR_OP_RA0 << IFC_NAND_FIR0_OP2_SHIFT) |
+ (IFC_FIR_OP_CMD1 << IFC_NAND_FIR0_OP3_SHIFT) |
+ (IFC_FIR_OP_RBCD << IFC_NAND_FIR0_OP4_SHIFT),
+ &ifc->ifc_nand.nand_fir0);
+ iowrite32be(0x0, &ifc->ifc_nand.nand_fir1);
+
+ iowrite32be((NAND_CMD_READ0 << IFC_NAND_FCR0_CMD0_SHIFT) |
+ (NAND_CMD_READSTART << IFC_NAND_FCR0_CMD1_SHIFT),
+ &ifc->ifc_nand.nand_fcr0);
} else {
- out_be32(&ifc->ifc_nand.nand_fir0,
- (IFC_FIR_OP_CW0 << IFC_NAND_FIR0_OP0_SHIFT) |
- (IFC_FIR_OP_CA0 << IFC_NAND_FIR0_OP1_SHIFT) |
- (IFC_FIR_OP_RA0 << IFC_NAND_FIR0_OP2_SHIFT) |
- (IFC_FIR_OP_RBCD << IFC_NAND_FIR0_OP3_SHIFT));
- out_be32(&ifc->ifc_nand.nand_fir1, 0x0);
+ iowrite32be((IFC_FIR_OP_CW0 << IFC_NAND_FIR0_OP0_SHIFT) |
+ (IFC_FIR_OP_CA0 << IFC_NAND_FIR0_OP1_SHIFT) |
+ (IFC_FIR_OP_RA0 << IFC_NAND_FIR0_OP2_SHIFT) |
+ (IFC_FIR_OP_RBCD << IFC_NAND_FIR0_OP3_SHIFT),
+ &ifc->ifc_nand.nand_fir0);
+ iowrite32be(0x0, &ifc->ifc_nand.nand_fir1);
if (oob)
- out_be32(&ifc->ifc_nand.nand_fcr0,
- NAND_CMD_READOOB << IFC_NAND_FCR0_CMD0_SHIFT);
+ iowrite32be(NAND_CMD_READOOB <<
+ IFC_NAND_FCR0_CMD0_SHIFT,
+ &ifc->ifc_nand.nand_fcr0);
else
- out_be32(&ifc->ifc_nand.nand_fcr0,
- NAND_CMD_READ0 << IFC_NAND_FCR0_CMD0_SHIFT);
+ iowrite32be(NAND_CMD_READ0 <<
+ IFC_NAND_FCR0_CMD0_SHIFT,
+ &ifc->ifc_nand.nand_fcr0);
}
}
@@ -357,7 +360,7 @@ static void fsl_ifc_cmdfunc(struct mtd_info *mtd, unsigned int command,
switch (command) {
/* READ0 read the entire buffer to use hardware ECC. */
case NAND_CMD_READ0:
- out_be32(&ifc->ifc_nand.nand_fbcr, 0);
+ iowrite32be(0, &ifc->ifc_nand.nand_fbcr);
set_addr(mtd, 0, page_addr, 0);
ifc_nand_ctrl->read_bytes = mtd->writesize + mtd->oobsize;
@@ -372,7 +375,7 @@ static void fsl_ifc_cmdfunc(struct mtd_info *mtd, unsigned int command,
/* READOOB reads only the OOB because no ECC is performed. */
case NAND_CMD_READOOB:
- out_be32(&ifc->ifc_nand.nand_fbcr, mtd->oobsize - column);
+ iowrite32be(mtd->oobsize - column, &ifc->ifc_nand.nand_fbcr);
set_addr(mtd, column, page_addr, 1);
ifc_nand_ctrl->read_bytes = mtd->writesize + mtd->oobsize;
@@ -388,19 +391,19 @@ static void fsl_ifc_cmdfunc(struct mtd_info *mtd, unsigned int command,
if (command == NAND_CMD_PARAM)
timing = IFC_FIR_OP_RBCD;
- out_be32(&ifc->ifc_nand.nand_fir0,
- (IFC_FIR_OP_CW0 << IFC_NAND_FIR0_OP0_SHIFT) |
- (IFC_FIR_OP_UA << IFC_NAND_FIR0_OP1_SHIFT) |
- (timing << IFC_NAND_FIR0_OP2_SHIFT));
- out_be32(&ifc->ifc_nand.nand_fcr0,
- command << IFC_NAND_FCR0_CMD0_SHIFT);
- out_be32(&ifc->ifc_nand.row3, column);
+ iowrite32be((IFC_FIR_OP_CW0 << IFC_NAND_FIR0_OP0_SHIFT) |
+ (IFC_FIR_OP_UA << IFC_NAND_FIR0_OP1_SHIFT) |
+ (timing << IFC_NAND_FIR0_OP2_SHIFT),
+ &ifc->ifc_nand.nand_fir0);
+ iowrite32be(command << IFC_NAND_FCR0_CMD0_SHIFT,
+ &ifc->ifc_nand.nand_fcr0);
+ iowrite32be(column, &ifc->ifc_nand.row3);
/*
* although currently it's 8 bytes for READID, we always read
* the maximum 256 bytes(for PARAM)
*/
- out_be32(&ifc->ifc_nand.nand_fbcr, 256);
+ iowrite32be(256, &ifc->ifc_nand.nand_fbcr);
ifc_nand_ctrl->read_bytes = 256;
set_addr(mtd, 0, 0, 0);
@@ -415,16 +418,16 @@ static void fsl_ifc_cmdfunc(struct mtd_info *mtd, unsigned int command,
/* ERASE2 uses the block and page address from ERASE1 */
case NAND_CMD_ERASE2:
- out_be32(&ifc->ifc_nand.nand_fir0,
- (IFC_FIR_OP_CW0 << IFC_NAND_FIR0_OP0_SHIFT) |
- (IFC_FIR_OP_RA0 << IFC_NAND_FIR0_OP1_SHIFT) |
- (IFC_FIR_OP_CMD1 << IFC_NAND_FIR0_OP2_SHIFT));
+ iowrite32be((IFC_FIR_OP_CW0 << IFC_NAND_FIR0_OP0_SHIFT) |
+ (IFC_FIR_OP_RA0 << IFC_NAND_FIR0_OP1_SHIFT) |
+ (IFC_FIR_OP_CMD1 << IFC_NAND_FIR0_OP2_SHIFT),
+ &ifc->ifc_nand.nand_fir0);
- out_be32(&ifc->ifc_nand.nand_fcr0,
- (NAND_CMD_ERASE1 << IFC_NAND_FCR0_CMD0_SHIFT) |
- (NAND_CMD_ERASE2 << IFC_NAND_FCR0_CMD1_SHIFT));
+ iowrite32be((NAND_CMD_ERASE1 << IFC_NAND_FCR0_CMD0_SHIFT) |
+ (NAND_CMD_ERASE2 << IFC_NAND_FCR0_CMD1_SHIFT),
+ &ifc->ifc_nand.nand_fcr0);
- out_be32(&ifc->ifc_nand.nand_fbcr, 0);
+ iowrite32be(0, &ifc->ifc_nand.nand_fbcr);
ifc_nand_ctrl->read_bytes = 0;
fsl_ifc_run_command(mtd);
return;
@@ -440,26 +443,28 @@ static void fsl_ifc_cmdfunc(struct mtd_info *mtd, unsigned int command,
(NAND_CMD_SEQIN << IFC_NAND_FCR0_CMD0_SHIFT) |
(NAND_CMD_PAGEPROG << IFC_NAND_FCR0_CMD1_SHIFT);
- out_be32(&ifc->ifc_nand.nand_fir0,
- (IFC_FIR_OP_CW0 << IFC_NAND_FIR0_OP0_SHIFT) |
- (IFC_FIR_OP_CA0 << IFC_NAND_FIR0_OP1_SHIFT) |
- (IFC_FIR_OP_RA0 << IFC_NAND_FIR0_OP2_SHIFT) |
- (IFC_FIR_OP_WBCD << IFC_NAND_FIR0_OP3_SHIFT) |
- (IFC_FIR_OP_CW1 << IFC_NAND_FIR0_OP4_SHIFT));
+ iowrite32be(
+ (IFC_FIR_OP_CW0 << IFC_NAND_FIR0_OP0_SHIFT) |
+ (IFC_FIR_OP_CA0 << IFC_NAND_FIR0_OP1_SHIFT) |
+ (IFC_FIR_OP_RA0 << IFC_NAND_FIR0_OP2_SHIFT) |
+ (IFC_FIR_OP_WBCD << IFC_NAND_FIR0_OP3_SHIFT) |
+ (IFC_FIR_OP_CW1 << IFC_NAND_FIR0_OP4_SHIFT),
+ &ifc->ifc_nand.nand_fir0);
} else {
nand_fcr0 = ((NAND_CMD_PAGEPROG <<
IFC_NAND_FCR0_CMD1_SHIFT) |
(NAND_CMD_SEQIN <<
IFC_NAND_FCR0_CMD2_SHIFT));
- out_be32(&ifc->ifc_nand.nand_fir0,
- (IFC_FIR_OP_CW0 << IFC_NAND_FIR0_OP0_SHIFT) |
- (IFC_FIR_OP_CMD2 << IFC_NAND_FIR0_OP1_SHIFT) |
- (IFC_FIR_OP_CA0 << IFC_NAND_FIR0_OP2_SHIFT) |
- (IFC_FIR_OP_RA0 << IFC_NAND_FIR0_OP3_SHIFT) |
- (IFC_FIR_OP_WBCD << IFC_NAND_FIR0_OP4_SHIFT));
- out_be32(&ifc->ifc_nand.nand_fir1,
- (IFC_FIR_OP_CW1 << IFC_NAND_FIR1_OP5_SHIFT));
+ iowrite32be(
+ (IFC_FIR_OP_CW0 << IFC_NAND_FIR0_OP0_SHIFT) |
+ (IFC_FIR_OP_CMD2 << IFC_NAND_FIR0_OP1_SHIFT) |
+ (IFC_FIR_OP_CA0 << IFC_NAND_FIR0_OP2_SHIFT) |
+ (IFC_FIR_OP_RA0 << IFC_NAND_FIR0_OP3_SHIFT) |
+ (IFC_FIR_OP_WBCD << IFC_NAND_FIR0_OP4_SHIFT),
+ &ifc->ifc_nand.nand_fir0);
+ iowrite32be(IFC_FIR_OP_CW1 << IFC_NAND_FIR1_OP5_SHIFT,
+ &ifc->ifc_nand.nand_fir1);
if (column >= mtd->writesize)
nand_fcr0 |=
@@ -474,7 +479,7 @@ static void fsl_ifc_cmdfunc(struct mtd_info *mtd, unsigned int command,
column -= mtd->writesize;
ifc_nand_ctrl->oob = 1;
}
- out_be32(&ifc->ifc_nand.nand_fcr0, nand_fcr0);
+ iowrite32be(nand_fcr0, &ifc->ifc_nand.nand_fcr0);
set_addr(mtd, column, page_addr, ifc_nand_ctrl->oob);
return;
}
@@ -482,10 +487,11 @@ static void fsl_ifc_cmdfunc(struct mtd_info *mtd, unsigned int command,
/* PAGEPROG reuses all of the setup from SEQIN and adds the length */
case NAND_CMD_PAGEPROG: {
if (ifc_nand_ctrl->oob) {
- out_be32(&ifc->ifc_nand.nand_fbcr,
- ifc_nand_ctrl->index - ifc_nand_ctrl->column);
+ iowrite32be(ifc_nand_ctrl->index -
+ ifc_nand_ctrl->column,
+ &ifc->ifc_nand.nand_fbcr);
} else {
- out_be32(&ifc->ifc_nand.nand_fbcr, 0);
+ iowrite32be(0, &ifc->ifc_nand.nand_fbcr);
}
fsl_ifc_run_command(mtd);
@@ -493,12 +499,12 @@ static void fsl_ifc_cmdfunc(struct mtd_info *mtd, unsigned int command,
}
case NAND_CMD_STATUS:
- out_be32(&ifc->ifc_nand.nand_fir0,
- (IFC_FIR_OP_CW0 << IFC_NAND_FIR0_OP0_SHIFT) |
- (IFC_FIR_OP_RB << IFC_NAND_FIR0_OP1_SHIFT));
- out_be32(&ifc->ifc_nand.nand_fcr0,
- NAND_CMD_STATUS << IFC_NAND_FCR0_CMD0_SHIFT);
- out_be32(&ifc->ifc_nand.nand_fbcr, 1);
+ iowrite32be((IFC_FIR_OP_CW0 << IFC_NAND_FIR0_OP0_SHIFT) |
+ (IFC_FIR_OP_RB << IFC_NAND_FIR0_OP1_SHIFT),
+ &ifc->ifc_nand.nand_fir0);
+ iowrite32be(NAND_CMD_STATUS << IFC_NAND_FCR0_CMD0_SHIFT,
+ &ifc->ifc_nand.nand_fcr0);
+ iowrite32be(1, &ifc->ifc_nand.nand_fbcr);
set_addr(mtd, 0, 0, 0);
ifc_nand_ctrl->read_bytes = 1;
@@ -512,10 +518,10 @@ static void fsl_ifc_cmdfunc(struct mtd_info *mtd, unsigned int command,
return;
case NAND_CMD_RESET:
- out_be32(&ifc->ifc_nand.nand_fir0,
- IFC_FIR_OP_CW0 << IFC_NAND_FIR0_OP0_SHIFT);
- out_be32(&ifc->ifc_nand.nand_fcr0,
- NAND_CMD_RESET << IFC_NAND_FCR0_CMD0_SHIFT);
+ iowrite32be(IFC_FIR_OP_CW0 << IFC_NAND_FIR0_OP0_SHIFT,
+ &ifc->ifc_nand.nand_fir0);
+ iowrite32be(NAND_CMD_RESET << IFC_NAND_FCR0_CMD0_SHIFT,
+ &ifc->ifc_nand.nand_fcr0);
fsl_ifc_run_command(mtd);
return;
@@ -639,18 +645,18 @@ static int fsl_ifc_wait(struct mtd_info *mtd, struct nand_chip *chip)
u32 nand_fsr;
/* Use READ_STATUS command, but wait for the device to be ready */
- out_be32(&ifc->ifc_nand.nand_fir0,
- (IFC_FIR_OP_CW0 << IFC_NAND_FIR0_OP0_SHIFT) |
- (IFC_FIR_OP_RDSTAT << IFC_NAND_FIR0_OP1_SHIFT));
- out_be32(&ifc->ifc_nand.nand_fcr0, NAND_CMD_STATUS <<
- IFC_NAND_FCR0_CMD0_SHIFT);
- out_be32(&ifc->ifc_nand.nand_fbcr, 1);
+ iowrite32be((IFC_FIR_OP_CW0 << IFC_NAND_FIR0_OP0_SHIFT) |
+ (IFC_FIR_OP_RDSTAT << IFC_NAND_FIR0_OP1_SHIFT),
+ &ifc->ifc_nand.nand_fir0);
+ iowrite32be(NAND_CMD_STATUS << IFC_NAND_FCR0_CMD0_SHIFT,
+ &ifc->ifc_nand.nand_fcr0);
+ iowrite32be(1, &ifc->ifc_nand.nand_fbcr);
set_addr(mtd, 0, 0, 0);
ifc_nand_ctrl->read_bytes = 1;
fsl_ifc_run_command(mtd);
- nand_fsr = in_be32(&ifc->ifc_nand.nand_fsr);
+ nand_fsr = ioread32be(&ifc->ifc_nand.nand_fsr);
/*
* The chip always seems to report that it is
@@ -744,34 +750,34 @@ static void fsl_ifc_sram_init(struct fsl_ifc_mtd *priv)
uint32_t cs = priv->bank;
/* Save CSOR and CSOR_ext */
- csor = in_be32(&ifc->csor_cs[cs].csor);
- csor_ext = in_be32(&ifc->csor_cs[cs].csor_ext);
+ csor = ioread32be(&ifc->csor_cs[cs].csor);
+ csor_ext = ioread32be(&ifc->csor_cs[cs].csor_ext);
/* chage PageSize 8K and SpareSize 1K*/
csor_8k = (csor & ~(CSOR_NAND_PGS_MASK)) | 0x0018C000;
- out_be32(&ifc->csor_cs[cs].csor, csor_8k);
- out_be32(&ifc->csor_cs[cs].csor_ext, 0x0000400);
+ iowrite32be(csor_8k, &ifc->csor_cs[cs].csor);
+ iowrite32be(0x0000400, &ifc->csor_cs[cs].csor_ext);
/* READID */
- out_be32(&ifc->ifc_nand.nand_fir0,
- (IFC_FIR_OP_CW0 << IFC_NAND_FIR0_OP0_SHIFT) |
- (IFC_FIR_OP_UA << IFC_NAND_FIR0_OP1_SHIFT) |
- (IFC_FIR_OP_RB << IFC_NAND_FIR0_OP2_SHIFT));
- out_be32(&ifc->ifc_nand.nand_fcr0,
- NAND_CMD_READID << IFC_NAND_FCR0_CMD0_SHIFT);
- out_be32(&ifc->ifc_nand.row3, 0x0);
+ iowrite32be((IFC_FIR_OP_CW0 << IFC_NAND_FIR0_OP0_SHIFT) |
+ (IFC_FIR_OP_UA << IFC_NAND_FIR0_OP1_SHIFT) |
+ (IFC_FIR_OP_RB << IFC_NAND_FIR0_OP2_SHIFT),
+ &ifc->ifc_nand.nand_fir0);
+ iowrite32be(NAND_CMD_READID << IFC_NAND_FCR0_CMD0_SHIFT,
+ &ifc->ifc_nand.nand_fcr0);
+ iowrite32be(0x0, &ifc->ifc_nand.row3);
- out_be32(&ifc->ifc_nand.nand_fbcr, 0x0);
+ iowrite32be(0x0, &ifc->ifc_nand.nand_fbcr);
/* Program ROW0/COL0 */
- out_be32(&ifc->ifc_nand.row0, 0x0);
- out_be32(&ifc->ifc_nand.col0, 0x0);
+ iowrite32be(0x0, &ifc->ifc_nand.row0);
+ iowrite32be(0x0, &ifc->ifc_nand.col0);
/* set the chip select for NAND Transaction */
- out_be32(&ifc->ifc_nand.nand_csel, cs << IFC_NAND_CSEL_SHIFT);
+ iowrite32be(cs << IFC_NAND_CSEL_SHIFT, &ifc->ifc_nand.nand_csel);
/* start read seq */
- out_be32(&ifc->ifc_nand.nandseq_strt, IFC_NAND_SEQ_STRT_FIR_STRT);
+ iowrite32be(IFC_NAND_SEQ_STRT_FIR_STRT, &ifc->ifc_nand.nandseq_strt);
/* wait for command complete flag or timeout */
wait_event_timeout(ctrl->nand_wait, ctrl->nand_stat,
@@ -781,8 +787,8 @@ static void fsl_ifc_sram_init(struct fsl_ifc_mtd *priv)
printk(KERN_ERR "fsl-ifc: Failed to Initialise SRAM\n");
/* Restore CSOR and CSOR_ext */
- out_be32(&ifc->csor_cs[cs].csor, csor);
- out_be32(&ifc->csor_cs[cs].csor_ext, csor_ext);
+ iowrite32be(csor, &ifc->csor_cs[cs].csor);
+ iowrite32be(csor_ext, &ifc->csor_cs[cs].csor_ext);
}
static int fsl_ifc_chip_init(struct fsl_ifc_mtd *priv)
@@ -799,7 +805,7 @@ static int fsl_ifc_chip_init(struct fsl_ifc_mtd *priv)
/* fill in nand_chip structure */
/* set up function call table */
- if ((in_be32(&ifc->cspr_cs[priv->bank].cspr)) & CSPR_PORT_SIZE_16)
+ if ((ioread32be(&ifc->cspr_cs[priv->bank].cspr)) & CSPR_PORT_SIZE_16)
chip->read_byte = fsl_ifc_read_byte16;
else
chip->read_byte = fsl_ifc_read_byte;
@@ -813,13 +819,13 @@ static int fsl_ifc_chip_init(struct fsl_ifc_mtd *priv)
chip->bbt_td = &bbt_main_descr;
chip->bbt_md = &bbt_mirror_descr;
- out_be32(&ifc->ifc_nand.ncfgr, 0x0);
+ iowrite32be(0x0, &ifc->ifc_nand.ncfgr);
/* set up nand options */
chip->bbt_options = NAND_BBT_USE_FLASH;
- if (in_be32(&ifc->cspr_cs[priv->bank].cspr) & CSPR_PORT_SIZE_16) {
+ if (ioread32be(&ifc->cspr_cs[priv->bank].cspr) & CSPR_PORT_SIZE_16) {
chip->read_byte = fsl_ifc_read_byte16;
chip->options |= NAND_BUSWIDTH_16;
} else {
@@ -832,7 +838,7 @@ static int fsl_ifc_chip_init(struct fsl_ifc_mtd *priv)
chip->ecc.read_page = fsl_ifc_read_page;
chip->ecc.write_page = fsl_ifc_write_page;
- csor = in_be32(&ifc->csor_cs[priv->bank].csor);
+ csor = ioread32be(&ifc->csor_cs[priv->bank].csor);
/* Hardware generates ECC per 512 Bytes */
chip->ecc.size = 512;
@@ -884,7 +890,7 @@ static int fsl_ifc_chip_init(struct fsl_ifc_mtd *priv)
chip->ecc.mode = NAND_ECC_SOFT;
}
- ver = in_be32(&ifc->ifc_rev);
+ ver = ioread32be(&ifc->ifc_rev);
if (ver == FSL_IFC_V1_1_0)
fsl_ifc_sram_init(priv);
@@ -910,7 +916,7 @@ static int fsl_ifc_chip_remove(struct fsl_ifc_mtd *priv)
static int match_bank(struct fsl_ifc_regs __iomem *ifc, int bank,
phys_addr_t addr)
{
- u32 cspr = in_be32(&ifc->cspr_cs[bank].cspr);
+ u32 cspr = ioread32be(&ifc->cspr_cs[bank].cspr);
if (!(cspr & CSPR_V))
return 0;
@@ -997,17 +1003,16 @@ static int fsl_ifc_nand_probe(struct platform_device *dev)
dev_set_drvdata(priv->dev, priv);
- out_be32(&ifc->ifc_nand.nand_evter_en,
- IFC_NAND_EVTER_EN_OPC_EN |
- IFC_NAND_EVTER_EN_FTOER_EN |
- IFC_NAND_EVTER_EN_WPER_EN);
+ iowrite32be(IFC_NAND_EVTER_EN_OPC_EN |
+ IFC_NAND_EVTER_EN_FTOER_EN |
+ IFC_NAND_EVTER_EN_WPER_EN,
+ &ifc->ifc_nand.nand_evter_en);
/* enable NAND Machine Interrupts */
- out_be32(&ifc->ifc_nand.nand_evter_intr_en,
- IFC_NAND_EVTER_INTR_OPCIR_EN |
- IFC_NAND_EVTER_INTR_FTOERIR_EN |
- IFC_NAND_EVTER_INTR_WPERIR_EN);
-
+ iowrite32be(IFC_NAND_EVTER_INTR_OPCIR_EN |
+ IFC_NAND_EVTER_INTR_FTOERIR_EN |
+ IFC_NAND_EVTER_INTR_WPERIR_EN,
+ &ifc->ifc_nand.nand_evter_intr_en);
priv->mtd.name = kasprintf(GFP_KERNEL, "%x.flash", (unsigned)res.start);
if (!priv->mtd.name) {
ret = -ENOMEM;
diff --git a/drivers/mtd/nand/fsmc_nand.c b/drivers/mtd/nand/fsmc_nand.c
index c543cc09f193..05ba3f0c2d19 100644
--- a/drivers/mtd/nand/fsmc_nand.c
+++ b/drivers/mtd/nand/fsmc_nand.c
@@ -573,23 +573,22 @@ static int dma_xfer(struct fsmc_nand_data *host, void *buffer, int len,
dma_dev = chan->device;
dma_addr = dma_map_single(dma_dev->dev, buffer, len, direction);
+ flags |= DMA_COMPL_SKIP_SRC_UNMAP | DMA_COMPL_SKIP_DEST_UNMAP;
+
if (direction == DMA_TO_DEVICE) {
dma_src = dma_addr;
dma_dst = host->data_pa;
- flags |= DMA_COMPL_SRC_UNMAP_SINGLE | DMA_COMPL_SKIP_DEST_UNMAP;
} else {
dma_src = host->data_pa;
dma_dst = dma_addr;
- flags |= DMA_COMPL_DEST_UNMAP_SINGLE | DMA_COMPL_SKIP_SRC_UNMAP;
}
tx = dma_dev->device_prep_dma_memcpy(chan, dma_dst, dma_src,
len, flags);
-
if (!tx) {
dev_err(host->dev, "device_prep_dma_memcpy error\n");
- dma_unmap_single(dma_dev->dev, dma_addr, len, direction);
- return -EIO;
+ ret = -EIO;
+ goto unmap_dma;
}
tx->callback = dma_complete;
@@ -599,7 +598,7 @@ static int dma_xfer(struct fsmc_nand_data *host, void *buffer, int len,
ret = dma_submit_error(cookie);
if (ret) {
dev_err(host->dev, "dma_submit_error %d\n", cookie);
- return ret;
+ goto unmap_dma;
}
dma_async_issue_pending(chan);
@@ -610,10 +609,17 @@ static int dma_xfer(struct fsmc_nand_data *host, void *buffer, int len,
if (ret <= 0) {
chan->device->device_control(chan, DMA_TERMINATE_ALL, 0);
dev_err(host->dev, "wait_for_completion_timeout\n");
- return ret ? ret : -ETIMEDOUT;
+ if (!ret)
+ ret = -ETIMEDOUT;
+ goto unmap_dma;
}
- return 0;
+ ret = 0;
+
+unmap_dma:
+ dma_unmap_single(dma_dev->dev, dma_addr, len, direction);
+
+ return ret;
}
/*
@@ -1211,6 +1217,7 @@ static SIMPLE_DEV_PM_OPS(fsmc_nand_pm_ops, fsmc_nand_suspend, fsmc_nand_resume);
#ifdef CONFIG_OF
static const struct of_device_id fsmc_nand_id_table[] = {
{ .compatible = "st,spear600-fsmc-nand" },
+ { .compatible = "stericsson,fsmc-nand" },
{}
};
MODULE_DEVICE_TABLE(of, fsmc_nand_id_table);
diff --git a/drivers/mtd/nand/gpmi-nand/bch-regs.h b/drivers/mtd/nand/gpmi-nand/bch-regs.h
index a0924515c396..588f5374047c 100644
--- a/drivers/mtd/nand/gpmi-nand/bch-regs.h
+++ b/drivers/mtd/nand/gpmi-nand/bch-regs.h
@@ -61,6 +61,16 @@
& BM_BCH_FLASH0LAYOUT0_ECC0) \
)
+#define MX6Q_BP_BCH_FLASH0LAYOUT0_GF_13_14 10
+#define MX6Q_BM_BCH_FLASH0LAYOUT0_GF_13_14 \
+ (0x1 << MX6Q_BP_BCH_FLASH0LAYOUT0_GF_13_14)
+#define BF_BCH_FLASH0LAYOUT0_GF(v, x) \
+ ((GPMI_IS_MX6Q(x) && ((v) == 14)) \
+ ? (((1) << MX6Q_BP_BCH_FLASH0LAYOUT0_GF_13_14) \
+ & MX6Q_BM_BCH_FLASH0LAYOUT0_GF_13_14) \
+ : 0 \
+ )
+
#define BP_BCH_FLASH0LAYOUT0_DATA0_SIZE 0
#define BM_BCH_FLASH0LAYOUT0_DATA0_SIZE \
(0xfff << BP_BCH_FLASH0LAYOUT0_DATA0_SIZE)
@@ -93,6 +103,16 @@
& BM_BCH_FLASH0LAYOUT1_ECCN) \
)
+#define MX6Q_BP_BCH_FLASH0LAYOUT1_GF_13_14 10
+#define MX6Q_BM_BCH_FLASH0LAYOUT1_GF_13_14 \
+ (0x1 << MX6Q_BP_BCH_FLASH0LAYOUT1_GF_13_14)
+#define BF_BCH_FLASH0LAYOUT1_GF(v, x) \
+ ((GPMI_IS_MX6Q(x) && ((v) == 14)) \
+ ? (((1) << MX6Q_BP_BCH_FLASH0LAYOUT1_GF_13_14) \
+ & MX6Q_BM_BCH_FLASH0LAYOUT1_GF_13_14) \
+ : 0 \
+ )
+
#define BP_BCH_FLASH0LAYOUT1_DATAN_SIZE 0
#define BM_BCH_FLASH0LAYOUT1_DATAN_SIZE \
(0xfff << BP_BCH_FLASH0LAYOUT1_DATAN_SIZE)
@@ -103,4 +123,6 @@
? (((v) >> 2) & MX6Q_BM_BCH_FLASH0LAYOUT1_DATAN_SIZE) \
: ((v) & BM_BCH_FLASH0LAYOUT1_DATAN_SIZE) \
)
+
+#define HW_BCH_VERSION 0x00000160
#endif
diff --git a/drivers/mtd/nand/gpmi-nand/gpmi-lib.c b/drivers/mtd/nand/gpmi-nand/gpmi-lib.c
index d84699c7968e..4f8857fa48a7 100644
--- a/drivers/mtd/nand/gpmi-nand/gpmi-lib.c
+++ b/drivers/mtd/nand/gpmi-nand/gpmi-lib.c
@@ -208,6 +208,11 @@ void gpmi_dump_info(struct gpmi_nand_data *this)
}
/* start to print out the BCH info */
+ pr_err("Show BCH registers :\n");
+ for (i = 0; i <= HW_BCH_VERSION / 0x10 + 1; i++) {
+ reg = readl(r->bch_regs + i * 0x10);
+ pr_err("offset 0x%.3x : 0x%.8x\n", i * 0x10, reg);
+ }
pr_err("BCH Geometry :\n");
pr_err("GF length : %u\n", geo->gf_len);
pr_err("ECC Strength : %u\n", geo->ecc_strength);
@@ -232,6 +237,7 @@ int bch_set_geometry(struct gpmi_nand_data *this)
unsigned int metadata_size;
unsigned int ecc_strength;
unsigned int page_size;
+ unsigned int gf_len;
int ret;
if (common_nfc_set_geometry(this))
@@ -242,6 +248,7 @@ int bch_set_geometry(struct gpmi_nand_data *this)
metadata_size = bch_geo->metadata_size;
ecc_strength = bch_geo->ecc_strength >> 1;
page_size = bch_geo->page_size;
+ gf_len = bch_geo->gf_len;
ret = gpmi_enable_clk(this);
if (ret)
@@ -263,11 +270,13 @@ int bch_set_geometry(struct gpmi_nand_data *this)
writel(BF_BCH_FLASH0LAYOUT0_NBLOCKS(block_count)
| BF_BCH_FLASH0LAYOUT0_META_SIZE(metadata_size)
| BF_BCH_FLASH0LAYOUT0_ECC0(ecc_strength, this)
+ | BF_BCH_FLASH0LAYOUT0_GF(gf_len, this)
| BF_BCH_FLASH0LAYOUT0_DATA0_SIZE(block_size, this),
r->bch_regs + HW_BCH_FLASH0LAYOUT0);
writel(BF_BCH_FLASH0LAYOUT1_PAGE_SIZE(page_size)
| BF_BCH_FLASH0LAYOUT1_ECCN(ecc_strength, this)
+ | BF_BCH_FLASH0LAYOUT1_GF(gf_len, this)
| BF_BCH_FLASH0LAYOUT1_DATAN_SIZE(block_size, this),
r->bch_regs + HW_BCH_FLASH0LAYOUT1);
diff --git a/drivers/mtd/nand/gpmi-nand/gpmi-nand.c b/drivers/mtd/nand/gpmi-nand/gpmi-nand.c
index e9b1c47e3cf9..717881a3d1b8 100644
--- a/drivers/mtd/nand/gpmi-nand/gpmi-nand.c
+++ b/drivers/mtd/nand/gpmi-nand/gpmi-nand.c
@@ -94,6 +94,25 @@ static inline int get_ecc_strength(struct gpmi_nand_data *this)
return round_down(ecc_strength, 2);
}
+static inline bool gpmi_check_ecc(struct gpmi_nand_data *this)
+{
+ struct bch_geometry *geo = &this->bch_geometry;
+
+ /* Do the sanity check. */
+ if (GPMI_IS_MX23(this) || GPMI_IS_MX28(this)) {
+ /* The mx23/mx28 only support the GF13. */
+ if (geo->gf_len == 14)
+ return false;
+
+ if (geo->ecc_strength > MXS_ECC_STRENGTH_MAX)
+ return false;
+ } else if (GPMI_IS_MX6Q(this)) {
+ if (geo->ecc_strength > MX6_ECC_STRENGTH_MAX)
+ return false;
+ }
+ return true;
+}
+
int common_nfc_set_geometry(struct gpmi_nand_data *this)
{
struct bch_geometry *geo = &this->bch_geometry;
@@ -112,17 +131,24 @@ int common_nfc_set_geometry(struct gpmi_nand_data *this)
/* The default for the length of Galois Field. */
geo->gf_len = 13;
- /* The default for chunk size. There is no oobsize greater then 512. */
+ /* The default for chunk size. */
geo->ecc_chunk_size = 512;
- while (geo->ecc_chunk_size < mtd->oobsize)
+ while (geo->ecc_chunk_size < mtd->oobsize) {
geo->ecc_chunk_size *= 2; /* keep C >= O */
+ geo->gf_len = 14;
+ }
geo->ecc_chunk_count = mtd->writesize / geo->ecc_chunk_size;
/* We use the same ECC strength for all chunks. */
geo->ecc_strength = get_ecc_strength(this);
- if (!geo->ecc_strength) {
- pr_err("wrong ECC strength.\n");
+ if (!gpmi_check_ecc(this)) {
+ dev_err(this->dev,
+ "We can not support this nand chip."
+ " Its required ecc strength(%d) is beyond our"
+ " capability(%d).\n", geo->ecc_strength,
+ (GPMI_IS_MX6Q(this) ? MX6_ECC_STRENGTH_MAX
+ : MXS_ECC_STRENGTH_MAX));
return -EINVAL;
}
@@ -920,8 +946,7 @@ static int gpmi_ecc_read_page(struct mtd_info *mtd, struct nand_chip *chip,
dma_addr_t auxiliary_phys;
unsigned int i;
unsigned char *status;
- unsigned int failed;
- unsigned int corrected;
+ unsigned int max_bitflips = 0;
int ret;
pr_debug("page number is : %d\n", page);
@@ -945,35 +970,25 @@ static int gpmi_ecc_read_page(struct mtd_info *mtd, struct nand_chip *chip,
payload_virt, payload_phys);
if (ret) {
pr_err("Error in ECC-based read: %d\n", ret);
- goto exit_nfc;
+ return ret;
}
/* handle the block mark swapping */
block_mark_swapping(this, payload_virt, auxiliary_virt);
/* Loop over status bytes, accumulating ECC status. */
- failed = 0;
- corrected = 0;
- status = auxiliary_virt + nfc_geo->auxiliary_status_offset;
+ status = auxiliary_virt + nfc_geo->auxiliary_status_offset;
for (i = 0; i < nfc_geo->ecc_chunk_count; i++, status++) {
if ((*status == STATUS_GOOD) || (*status == STATUS_ERASED))
continue;
if (*status == STATUS_UNCORRECTABLE) {
- failed++;
+ mtd->ecc_stats.failed++;
continue;
}
- corrected += *status;
- }
-
- /*
- * Propagate ECC status to the owning MTD only when failed or
- * corrected times nearly reaches our ECC correction threshold.
- */
- if (failed || corrected >= (nfc_geo->ecc_strength - 1)) {
- mtd->ecc_stats.failed += failed;
- mtd->ecc_stats.corrected += corrected;
+ mtd->ecc_stats.corrected += *status;
+ max_bitflips = max_t(unsigned int, max_bitflips, *status);
}
if (oob_required) {
@@ -995,8 +1010,8 @@ static int gpmi_ecc_read_page(struct mtd_info *mtd, struct nand_chip *chip,
this->payload_virt, this->payload_phys,
nfc_geo->payload_size,
payload_virt, payload_phys);
-exit_nfc:
- return ret;
+
+ return max_bitflips;
}
static int gpmi_ecc_write_page(struct mtd_info *mtd, struct nand_chip *chip,
@@ -1668,8 +1683,8 @@ exit_nfc_init:
release_resources(this);
exit_acquire_resources:
platform_set_drvdata(pdev, NULL);
- kfree(this);
dev_err(this->dev, "driver registration failed: %d\n", ret);
+ kfree(this);
return ret;
}
diff --git a/drivers/mtd/nand/gpmi-nand/gpmi-nand.h b/drivers/mtd/nand/gpmi-nand/gpmi-nand.h
index 3d93a5e39090..072947731277 100644
--- a/drivers/mtd/nand/gpmi-nand/gpmi-nand.h
+++ b/drivers/mtd/nand/gpmi-nand/gpmi-nand.h
@@ -284,6 +284,10 @@ extern int gpmi_read_page(struct gpmi_nand_data *,
#define STATUS_ERASED 0xff
#define STATUS_UNCORRECTABLE 0xfe
+/* BCH's bit correction capability. */
+#define MXS_ECC_STRENGTH_MAX 20 /* mx23 and mx28 */
+#define MX6_ECC_STRENGTH_MAX 40
+
/* Use the platform_id to distinguish different Archs. */
#define IS_MX23 0x0
#define IS_MX28 0x1
diff --git a/drivers/mtd/nand/mxc_nand.c b/drivers/mtd/nand/mxc_nand.c
index 60ac5b98b718..07e5784e5cd3 100644
--- a/drivers/mtd/nand/mxc_nand.c
+++ b/drivers/mtd/nand/mxc_nand.c
@@ -530,12 +530,23 @@ static void send_page_v1(struct mtd_info *mtd, unsigned int ops)
static void send_read_id_v3(struct mxc_nand_host *host)
{
+ struct nand_chip *this = &host->nand;
+
/* Read ID into main buffer */
writel(NFC_ID, NFC_V3_LAUNCH);
wait_op_done(host, true);
memcpy32_fromio(host->data_buf, host->main_area0, 16);
+
+ if (this->options & NAND_BUSWIDTH_16) {
+ /* compress the ID info */
+ host->data_buf[1] = host->data_buf[2];
+ host->data_buf[2] = host->data_buf[4];
+ host->data_buf[3] = host->data_buf[6];
+ host->data_buf[4] = host->data_buf[8];
+ host->data_buf[5] = host->data_buf[10];
+ }
}
/* Request the NANDFC to perform a read of the NAND device ID. */
diff --git a/drivers/mtd/nand/nand_base.c b/drivers/mtd/nand/nand_base.c
index 3766682a0289..43214151b882 100644
--- a/drivers/mtd/nand/nand_base.c
+++ b/drivers/mtd/nand/nand_base.c
@@ -825,13 +825,8 @@ static void panic_nand_wait(struct mtd_info *mtd, struct nand_chip *chip,
static int nand_wait(struct mtd_info *mtd, struct nand_chip *chip)
{
- unsigned long timeo = jiffies;
int status, state = chip->state;
-
- if (state == FL_ERASING)
- timeo += (HZ * 400) / 1000;
- else
- timeo += (HZ * 20) / 1000;
+ unsigned long timeo = (state == FL_ERASING ? 400 : 20);
led_trigger_event(nand_led_trigger, LED_FULL);
@@ -849,6 +844,7 @@ static int nand_wait(struct mtd_info *mtd, struct nand_chip *chip)
if (in_interrupt() || oops_in_progress)
panic_nand_wait(mtd, chip, timeo);
else {
+ timeo = jiffies + msecs_to_jiffies(timeo);
while (time_before(jiffies, timeo)) {
if (chip->dev_ready) {
if (chip->dev_ready(mtd))
diff --git a/drivers/mtd/nand/nand_ecc.c b/drivers/mtd/nand/nand_ecc.c
index b7cfe0d37121..053c9a2d47c3 100644
--- a/drivers/mtd/nand/nand_ecc.c
+++ b/drivers/mtd/nand/nand_ecc.c
@@ -55,8 +55,7 @@ struct mtd_info;
#define MODULE_AUTHOR(x) /* x */
#define MODULE_DESCRIPTION(x) /* x */
-#define printk printf
-#define KERN_ERR ""
+#define pr_err printf
#endif
/*
@@ -507,7 +506,7 @@ int __nand_correct_data(unsigned char *buf,
if ((bitsperbyte[b0] + bitsperbyte[b1] + bitsperbyte[b2]) == 1)
return 1; /* error in ECC data; no action needed */
- printk(KERN_ERR "uncorrectable error : ");
+ pr_err("%s: uncorrectable ECC error", __func__);
return -1;
}
EXPORT_SYMBOL(__nand_correct_data);
diff --git a/drivers/mtd/nand/nandsim.c b/drivers/mtd/nand/nandsim.c
index 818b65c85d12..891c52a30e6a 100644
--- a/drivers/mtd/nand/nandsim.c
+++ b/drivers/mtd/nand/nandsim.c
@@ -1408,40 +1408,32 @@ static void clear_memalloc(int memalloc)
current->flags &= ~PF_MEMALLOC;
}
-static ssize_t read_file(struct nandsim *ns, struct file *file, void *buf, size_t count, loff_t *pos)
+static ssize_t read_file(struct nandsim *ns, struct file *file, void *buf, size_t count, loff_t pos)
{
- mm_segment_t old_fs;
ssize_t tx;
int err, memalloc;
- err = get_pages(ns, file, count, *pos);
+ err = get_pages(ns, file, count, pos);
if (err)
return err;
- old_fs = get_fs();
- set_fs(get_ds());
memalloc = set_memalloc();
- tx = vfs_read(file, (char __user *)buf, count, pos);
+ tx = kernel_read(file, pos, buf, count);
clear_memalloc(memalloc);
- set_fs(old_fs);
put_pages(ns);
return tx;
}
-static ssize_t write_file(struct nandsim *ns, struct file *file, void *buf, size_t count, loff_t *pos)
+static ssize_t write_file(struct nandsim *ns, struct file *file, void *buf, size_t count, loff_t pos)
{
- mm_segment_t old_fs;
ssize_t tx;
int err, memalloc;
- err = get_pages(ns, file, count, *pos);
+ err = get_pages(ns, file, count, pos);
if (err)
return err;
- old_fs = get_fs();
- set_fs(get_ds());
memalloc = set_memalloc();
- tx = vfs_write(file, (char __user *)buf, count, pos);
+ tx = kernel_write(file, buf, count, pos);
clear_memalloc(memalloc);
- set_fs(old_fs);
put_pages(ns);
return tx;
}
@@ -1476,12 +1468,12 @@ int do_read_error(struct nandsim *ns, int num)
void do_bit_flips(struct nandsim *ns, int num)
{
- if (bitflips && random32() < (1 << 22)) {
+ if (bitflips && prandom_u32() < (1 << 22)) {
int flips = 1;
if (bitflips > 1)
- flips = (random32() % (int) bitflips) + 1;
+ flips = (prandom_u32() % (int) bitflips) + 1;
while (flips--) {
- int pos = random32() % (num * 8);
+ int pos = prandom_u32() % (num * 8);
ns->buf.byte[pos / 8] ^= (1 << (pos % 8));
NS_WARN("read_page: flipping bit %d in page %d "
"reading from %d ecc: corrected=%u failed=%u\n",
@@ -1511,7 +1503,7 @@ static void read_page(struct nandsim *ns, int num)
if (do_read_error(ns, num))
return;
pos = (loff_t)ns->regs.row * ns->geom.pgszoob + ns->regs.column + ns->regs.off;
- tx = read_file(ns, ns->cfile, ns->buf.byte, num, &pos);
+ tx = read_file(ns, ns->cfile, ns->buf.byte, num, pos);
if (tx != num) {
NS_ERR("read_page: read error for page %d ret %ld\n", ns->regs.row, (long)tx);
return;
@@ -1573,7 +1565,7 @@ static int prog_page(struct nandsim *ns, int num)
u_char *pg_off;
if (ns->cfile) {
- loff_t off, pos;
+ loff_t off;
ssize_t tx;
int all;
@@ -1585,8 +1577,7 @@ static int prog_page(struct nandsim *ns, int num)
memset(ns->file_buf, 0xff, ns->geom.pgszoob);
} else {
all = 0;
- pos = off;
- tx = read_file(ns, ns->cfile, pg_off, num, &pos);
+ tx = read_file(ns, ns->cfile, pg_off, num, off);
if (tx != num) {
NS_ERR("prog_page: read error for page %d ret %ld\n", ns->regs.row, (long)tx);
return -1;
@@ -1595,16 +1586,15 @@ static int prog_page(struct nandsim *ns, int num)
for (i = 0; i < num; i++)
pg_off[i] &= ns->buf.byte[i];
if (all) {
- pos = (loff_t)ns->regs.row * ns->geom.pgszoob;
- tx = write_file(ns, ns->cfile, ns->file_buf, ns->geom.pgszoob, &pos);
+ loff_t pos = (loff_t)ns->regs.row * ns->geom.pgszoob;
+ tx = write_file(ns, ns->cfile, ns->file_buf, ns->geom.pgszoob, pos);
if (tx != ns->geom.pgszoob) {
NS_ERR("prog_page: write error for page %d ret %ld\n", ns->regs.row, (long)tx);
return -1;
}
ns->pages_written[ns->regs.row] = 1;
} else {
- pos = off;
- tx = write_file(ns, ns->cfile, pg_off, num, &pos);
+ tx = write_file(ns, ns->cfile, pg_off, num, off);
if (tx != num) {
NS_ERR("prog_page: write error for page %d ret %ld\n", ns->regs.row, (long)tx);
return -1;
diff --git a/drivers/mtd/nand/omap2.c b/drivers/mtd/nand/omap2.c
index 0002d5e94f0d..8e820ddf4e08 100644
--- a/drivers/mtd/nand/omap2.c
+++ b/drivers/mtd/nand/omap2.c
@@ -22,9 +22,12 @@
#include <linux/omap-dma.h>
#include <linux/io.h>
#include <linux/slab.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
#ifdef CONFIG_MTD_NAND_OMAP_BCH
#include <linux/bch.h>
+#include <linux/platform_data/elm.h>
#endif
#include <linux/platform_data/mtd-nand-omap2.h>
@@ -117,6 +120,33 @@
#define OMAP24XX_DMA_GPMC 4
+#define BCH8_MAX_ERROR 8 /* upto 8 bit correctable */
+#define BCH4_MAX_ERROR 4 /* upto 4 bit correctable */
+
+#define SECTOR_BYTES 512
+/* 4 bit padding to make byte aligned, 56 = 52 + 4 */
+#define BCH4_BIT_PAD 4
+#define BCH8_ECC_MAX ((SECTOR_BYTES + BCH8_ECC_OOB_BYTES) * 8)
+#define BCH4_ECC_MAX ((SECTOR_BYTES + BCH4_ECC_OOB_BYTES) * 8)
+
+/* GPMC ecc engine settings for read */
+#define BCH_WRAPMODE_1 1 /* BCH wrap mode 1 */
+#define BCH8R_ECC_SIZE0 0x1a /* ecc_size0 = 26 */
+#define BCH8R_ECC_SIZE1 0x2 /* ecc_size1 = 2 */
+#define BCH4R_ECC_SIZE0 0xd /* ecc_size0 = 13 */
+#define BCH4R_ECC_SIZE1 0x3 /* ecc_size1 = 3 */
+
+/* GPMC ecc engine settings for write */
+#define BCH_WRAPMODE_6 6 /* BCH wrap mode 6 */
+#define BCH_ECC_SIZE0 0x0 /* ecc_size0 = 0, no oob protection */
+#define BCH_ECC_SIZE1 0x20 /* ecc_size1 = 32 */
+
+#ifdef CONFIG_MTD_NAND_OMAP_BCH
+static u_char bch8_vector[] = {0xf3, 0xdb, 0x14, 0x16, 0x8b, 0xd2, 0xbe, 0xcc,
+ 0xac, 0x6b, 0xff, 0x99, 0x7b};
+static u_char bch4_vector[] = {0x00, 0x6b, 0x31, 0xdd, 0x41, 0xbc, 0x10};
+#endif
+
/* oob info generated runtime depending on ecc algorithm and layout selected */
static struct nand_ecclayout omap_oobinfo;
/* Define some generic bad / good block scan pattern which are used
@@ -156,6 +186,9 @@ struct omap_nand_info {
#ifdef CONFIG_MTD_NAND_OMAP_BCH
struct bch_control *bch;
struct nand_ecclayout ecclayout;
+ bool is_elm_used;
+ struct device *elm_dev;
+ struct device_node *of_node;
#endif
};
@@ -1031,6 +1064,13 @@ static int omap_dev_ready(struct mtd_info *mtd)
* omap3_enable_hwecc_bch - Program OMAP3 GPMC to perform BCH ECC correction
* @mtd: MTD device structure
* @mode: Read/Write mode
+ *
+ * When using BCH, sector size is hardcoded to 512 bytes.
+ * Using wrapping mode 6 both for reading and writing if ELM module not uses
+ * for error correction.
+ * On writing,
+ * eccsize0 = 0 (no additional protected byte in spare area)
+ * eccsize1 = 32 (skip 32 nibbles = 16 bytes per sector in spare area)
*/
static void omap3_enable_hwecc_bch(struct mtd_info *mtd, int mode)
{
@@ -1039,32 +1079,57 @@ static void omap3_enable_hwecc_bch(struct mtd_info *mtd, int mode)
struct omap_nand_info *info = container_of(mtd, struct omap_nand_info,
mtd);
struct nand_chip *chip = mtd->priv;
- u32 val;
+ u32 val, wr_mode;
+ unsigned int ecc_size1, ecc_size0;
+
+ /* Using wrapping mode 6 for writing */
+ wr_mode = BCH_WRAPMODE_6;
- nerrors = (info->nand.ecc.bytes == 13) ? 8 : 4;
- dev_width = (chip->options & NAND_BUSWIDTH_16) ? 1 : 0;
- nsectors = 1;
/*
- * Program GPMC to perform correction on one 512-byte sector at a time.
- * Using 4 sectors at a time (i.e. ecc.size = 2048) is also possible and
- * gives a slight (5%) performance gain (but requires additional code).
+ * ECC engine enabled for valid ecc_size0 nibbles
+ * and disabled for ecc_size1 nibbles.
*/
+ ecc_size0 = BCH_ECC_SIZE0;
+ ecc_size1 = BCH_ECC_SIZE1;
+
+ /* Perform ecc calculation on 512-byte sector */
+ nsectors = 1;
+
+ /* Update number of error correction */
+ nerrors = info->nand.ecc.strength;
+
+ /* Multi sector reading/writing for NAND flash with page size < 4096 */
+ if (info->is_elm_used && (mtd->writesize <= 4096)) {
+ if (mode == NAND_ECC_READ) {
+ /* Using wrapping mode 1 for reading */
+ wr_mode = BCH_WRAPMODE_1;
+
+ /*
+ * ECC engine enabled for ecc_size0 nibbles
+ * and disabled for ecc_size1 nibbles.
+ */
+ ecc_size0 = (nerrors == 8) ?
+ BCH8R_ECC_SIZE0 : BCH4R_ECC_SIZE0;
+ ecc_size1 = (nerrors == 8) ?
+ BCH8R_ECC_SIZE1 : BCH4R_ECC_SIZE1;
+ }
+
+ /* Perform ecc calculation for one page (< 4096) */
+ nsectors = info->nand.ecc.steps;
+ }
writel(ECC1, info->reg.gpmc_ecc_control);
- /*
- * When using BCH, sector size is hardcoded to 512 bytes.
- * Here we are using wrapping mode 6 both for reading and writing, with:
- * size0 = 0 (no additional protected byte in spare area)
- * size1 = 32 (skip 32 nibbles = 16 bytes per sector in spare area)
- */
- val = (32 << ECCSIZE1_SHIFT) | (0 << ECCSIZE0_SHIFT);
+ /* Configure ecc size for BCH */
+ val = (ecc_size1 << ECCSIZE1_SHIFT) | (ecc_size0 << ECCSIZE0_SHIFT);
writel(val, info->reg.gpmc_ecc_size_config);
+ dev_width = (chip->options & NAND_BUSWIDTH_16) ? 1 : 0;
+
/* BCH configuration */
val = ((1 << 16) | /* enable BCH */
(((nerrors == 8) ? 1 : 0) << 12) | /* 8 or 4 bits */
- (0x06 << 8) | /* wrap mode = 6 */
+ (wr_mode << 8) | /* wrap mode */
(dev_width << 7) | /* bus width */
(((nsectors-1) & 0x7) << 4) | /* number of sectors */
(info->gpmc_cs << 1) | /* ECC CS */
@@ -1072,7 +1137,7 @@ static void omap3_enable_hwecc_bch(struct mtd_info *mtd, int mode)
writel(val, info->reg.gpmc_ecc_config);
- /* clear ecc and enable bits */
+ /* Clear ecc and enable bits */
writel(ECCCLEAR | ECC1, info->reg.gpmc_ecc_control);
}
@@ -1162,6 +1227,298 @@ static int omap3_calculate_ecc_bch8(struct mtd_info *mtd, const u_char *dat,
}
/**
+ * omap3_calculate_ecc_bch - Generate bytes of ECC bytes
+ * @mtd: MTD device structure
+ * @dat: The pointer to data on which ecc is computed
+ * @ecc_code: The ecc_code buffer
+ *
+ * Support calculating of BCH4/8 ecc vectors for the page
+ */
+static int omap3_calculate_ecc_bch(struct mtd_info *mtd, const u_char *dat,
+ u_char *ecc_code)
+{
+ struct omap_nand_info *info = container_of(mtd, struct omap_nand_info,
+ mtd);
+ unsigned long nsectors, bch_val1, bch_val2, bch_val3, bch_val4;
+ int i, eccbchtsel;
+
+ nsectors = ((readl(info->reg.gpmc_ecc_config) >> 4) & 0x7) + 1;
+ /*
+ * find BCH scheme used
+ * 0 -> BCH4
+ * 1 -> BCH8
+ */
+ eccbchtsel = ((readl(info->reg.gpmc_ecc_config) >> 12) & 0x3);
+
+ for (i = 0; i < nsectors; i++) {
+
+ /* Read hw-computed remainder */
+ bch_val1 = readl(info->reg.gpmc_bch_result0[i]);
+ bch_val2 = readl(info->reg.gpmc_bch_result1[i]);
+ if (eccbchtsel) {
+ bch_val3 = readl(info->reg.gpmc_bch_result2[i]);
+ bch_val4 = readl(info->reg.gpmc_bch_result3[i]);
+ }
+
+ if (eccbchtsel) {
+ /* BCH8 ecc scheme */
+ *ecc_code++ = (bch_val4 & 0xFF);
+ *ecc_code++ = ((bch_val3 >> 24) & 0xFF);
+ *ecc_code++ = ((bch_val3 >> 16) & 0xFF);
+ *ecc_code++ = ((bch_val3 >> 8) & 0xFF);
+ *ecc_code++ = (bch_val3 & 0xFF);
+ *ecc_code++ = ((bch_val2 >> 24) & 0xFF);
+ *ecc_code++ = ((bch_val2 >> 16) & 0xFF);
+ *ecc_code++ = ((bch_val2 >> 8) & 0xFF);
+ *ecc_code++ = (bch_val2 & 0xFF);
+ *ecc_code++ = ((bch_val1 >> 24) & 0xFF);
+ *ecc_code++ = ((bch_val1 >> 16) & 0xFF);
+ *ecc_code++ = ((bch_val1 >> 8) & 0xFF);
+ *ecc_code++ = (bch_val1 & 0xFF);
+ /*
+ * Setting 14th byte to zero to handle
+ * erased page & maintain compatibility
+ * with RBL
+ */
+ *ecc_code++ = 0x0;
+ } else {
+ /* BCH4 ecc scheme */
+ *ecc_code++ = ((bch_val2 >> 12) & 0xFF);
+ *ecc_code++ = ((bch_val2 >> 4) & 0xFF);
+ *ecc_code++ = ((bch_val2 & 0xF) << 4) |
+ ((bch_val1 >> 28) & 0xF);
+ *ecc_code++ = ((bch_val1 >> 20) & 0xFF);
+ *ecc_code++ = ((bch_val1 >> 12) & 0xFF);
+ *ecc_code++ = ((bch_val1 >> 4) & 0xFF);
+ *ecc_code++ = ((bch_val1 & 0xF) << 4);
+ /*
+ * Setting 8th byte to zero to handle
+ * erased page
+ */
+ *ecc_code++ = 0x0;
+ }
+ }
+
+ return 0;
+}
+
+/**
+ * erased_sector_bitflips - count bit flips
+ * @data: data sector buffer
+ * @oob: oob buffer
+ * @info: omap_nand_info
+ *
+ * Check the bit flips in erased page falls below correctable level.
+ * If falls below, report the page as erased with correctable bit
+ * flip, else report as uncorrectable page.
+ */
+static int erased_sector_bitflips(u_char *data, u_char *oob,
+ struct omap_nand_info *info)
+{
+ int flip_bits = 0, i;
+
+ for (i = 0; i < info->nand.ecc.size; i++) {
+ flip_bits += hweight8(~data[i]);
+ if (flip_bits > info->nand.ecc.strength)
+ return 0;
+ }
+
+ for (i = 0; i < info->nand.ecc.bytes - 1; i++) {
+ flip_bits += hweight8(~oob[i]);
+ if (flip_bits > info->nand.ecc.strength)
+ return 0;
+ }
+
+ /*
+ * Bit flips falls in correctable level.
+ * Fill data area with 0xFF
+ */
+ if (flip_bits) {
+ memset(data, 0xFF, info->nand.ecc.size);
+ memset(oob, 0xFF, info->nand.ecc.bytes);
+ }
+
+ return flip_bits;
+}
+
+/**
+ * omap_elm_correct_data - corrects page data area in case error reported
+ * @mtd: MTD device structure
+ * @data: page data
+ * @read_ecc: ecc read from nand flash
+ * @calc_ecc: ecc read from HW ECC registers
+ *
+ * Calculated ecc vector reported as zero in case of non-error pages.
+ * In case of error/erased pages non-zero error vector is reported.
+ * In case of non-zero ecc vector, check read_ecc at fixed offset
+ * (x = 13/7 in case of BCH8/4 == 0) to find page programmed or not.
+ * To handle bit flips in this data, count the number of 0's in
+ * read_ecc[x] and check if it greater than 4. If it is less, it is
+ * programmed page, else erased page.
+ *
+ * 1. If page is erased, check with standard ecc vector (ecc vector
+ * for erased page to find any bit flip). If check fails, bit flip
+ * is present in erased page. Count the bit flips in erased page and
+ * if it falls under correctable level, report page with 0xFF and
+ * update the correctable bit information.
+ * 2. If error is reported on programmed page, update elm error
+ * vector and correct the page with ELM error correction routine.
+ *
+ */
+static int omap_elm_correct_data(struct mtd_info *mtd, u_char *data,
+ u_char *read_ecc, u_char *calc_ecc)
+{
+ struct omap_nand_info *info = container_of(mtd, struct omap_nand_info,
+ mtd);
+ int eccsteps = info->nand.ecc.steps;
+ int i , j, stat = 0;
+ int eccsize, eccflag, ecc_vector_size;
+ struct elm_errorvec err_vec[ERROR_VECTOR_MAX];
+ u_char *ecc_vec = calc_ecc;
+ u_char *spare_ecc = read_ecc;
+ u_char *erased_ecc_vec;
+ enum bch_ecc type;
+ bool is_error_reported = false;
+
+ /* Initialize elm error vector to zero */
+ memset(err_vec, 0, sizeof(err_vec));
+
+ if (info->nand.ecc.strength == BCH8_MAX_ERROR) {
+ type = BCH8_ECC;
+ erased_ecc_vec = bch8_vector;
+ } else {
+ type = BCH4_ECC;
+ erased_ecc_vec = bch4_vector;
+ }
+
+ ecc_vector_size = info->nand.ecc.bytes;
+
+ /*
+ * Remove extra byte padding for BCH8 RBL
+ * compatibility and erased page handling
+ */
+ eccsize = ecc_vector_size - 1;
+
+ for (i = 0; i < eccsteps ; i++) {
+ eccflag = 0; /* initialize eccflag */
+
+ /*
+ * Check any error reported,
+ * In case of error, non zero ecc reported.
+ */
+
+ for (j = 0; (j < eccsize); j++) {
+ if (calc_ecc[j] != 0) {
+ eccflag = 1; /* non zero ecc, error present */
+ break;
+ }
+ }
+
+ if (eccflag == 1) {
+ /*
+ * Set threshold to minimum of 4, half of ecc.strength/2
+ * to allow max bit flip in byte to 4
+ */
+ unsigned int threshold = min_t(unsigned int, 4,
+ info->nand.ecc.strength / 2);
+
+ /*
+ * Check data area is programmed by counting
+ * number of 0's at fixed offset in spare area.
+ * Checking count of 0's against threshold.
+ * In case programmed page expects at least threshold
+ * zeros in byte.
+ * If zeros are less than threshold for programmed page/
+ * zeros are more than threshold erased page, either
+ * case page reported as uncorrectable.
+ */
+ if (hweight8(~read_ecc[eccsize]) >= threshold) {
+ /*
+ * Update elm error vector as
+ * data area is programmed
+ */
+ err_vec[i].error_reported = true;
+ is_error_reported = true;
+ } else {
+ /* Error reported in erased page */
+ int bitflip_count;
+ u_char *buf = &data[info->nand.ecc.size * i];
+
+ if (memcmp(calc_ecc, erased_ecc_vec, eccsize)) {
+ bitflip_count = erased_sector_bitflips(
+ buf, read_ecc, info);
+
+ if (bitflip_count)
+ stat += bitflip_count;
+ else
+ return -EINVAL;
+ }
+ }
+ }
+
+ /* Update the ecc vector */
+ calc_ecc += ecc_vector_size;
+ read_ecc += ecc_vector_size;
+ }
+
+ /* Check if any error reported */
+ if (!is_error_reported)
+ return 0;
+
+ /* Decode BCH error using ELM module */
+ elm_decode_bch_error_page(info->elm_dev, ecc_vec, err_vec);
+
+ for (i = 0; i < eccsteps; i++) {
+ if (err_vec[i].error_reported) {
+ for (j = 0; j < err_vec[i].error_count; j++) {
+ u32 bit_pos, byte_pos, error_max, pos;
+
+ if (type == BCH8_ECC)
+ error_max = BCH8_ECC_MAX;
+ else
+ error_max = BCH4_ECC_MAX;
+
+ if (info->nand.ecc.strength == BCH8_MAX_ERROR)
+ pos = err_vec[i].error_loc[j];
+ else
+ /* Add 4 to take care 4 bit padding */
+ pos = err_vec[i].error_loc[j] +
+ BCH4_BIT_PAD;
+
+ /* Calculate bit position of error */
+ bit_pos = pos % 8;
+
+ /* Calculate byte position of error */
+ byte_pos = (error_max - pos - 1) / 8;
+
+ if (pos < error_max) {
+ if (byte_pos < 512)
+ data[byte_pos] ^= 1 << bit_pos;
+ else
+ spare_ecc[byte_pos - 512] ^=
+ 1 << bit_pos;
+ }
+ /* else, not interested to correct ecc */
+ }
+ }
+
+ /* Update number of correctable errors */
+ stat += err_vec[i].error_count;
+
+ /* Update page data with sector size */
+ data += info->nand.ecc.size;
+ spare_ecc += ecc_vector_size;
+ }
+
+ for (i = 0; i < eccsteps; i++)
+ /* Return error if uncorrectable error present */
+ if (err_vec[i].error_uncorrectable)
+ return -EINVAL;
+
+ return stat;
+}
+
+/**
* omap3_correct_data_bch - Decode received data and correct errors
* @mtd: MTD device structure
* @data: page data
@@ -1194,6 +1551,92 @@ static int omap3_correct_data_bch(struct mtd_info *mtd, u_char *data,
}
/**
+ * omap_write_page_bch - BCH ecc based write page function for entire page
+ * @mtd: mtd info structure
+ * @chip: nand chip info structure
+ * @buf: data buffer
+ * @oob_required: must write chip->oob_poi to OOB
+ *
+ * Custom write page method evolved to support multi sector writing in one shot
+ */
+static int omap_write_page_bch(struct mtd_info *mtd, struct nand_chip *chip,
+ const uint8_t *buf, int oob_required)
+{
+ int i;
+ uint8_t *ecc_calc = chip->buffers->ecccalc;
+ uint32_t *eccpos = chip->ecc.layout->eccpos;
+
+ /* Enable GPMC ecc engine */
+ chip->ecc.hwctl(mtd, NAND_ECC_WRITE);
+
+ /* Write data */
+ chip->write_buf(mtd, buf, mtd->writesize);
+
+ /* Update ecc vector from GPMC result registers */
+ chip->ecc.calculate(mtd, buf, &ecc_calc[0]);
+
+ for (i = 0; i < chip->ecc.total; i++)
+ chip->oob_poi[eccpos[i]] = ecc_calc[i];
+
+ /* Write ecc vector to OOB area */
+ chip->write_buf(mtd, chip->oob_poi, mtd->oobsize);
+ return 0;
+}
+
+/**
+ * omap_read_page_bch - BCH ecc based page read function for entire page
+ * @mtd: mtd info structure
+ * @chip: nand chip info structure
+ * @buf: buffer to store read data
+ * @oob_required: caller requires OOB data read to chip->oob_poi
+ * @page: page number to read
+ *
+ * For BCH ecc scheme, GPMC used for syndrome calculation and ELM module
+ * used for error correction.
+ * Custom method evolved to support ELM error correction & multi sector
+ * reading. On reading page data area is read along with OOB data with
+ * ecc engine enabled. ecc vector updated after read of OOB data.
+ * For non error pages ecc vector reported as zero.
+ */
+static int omap_read_page_bch(struct mtd_info *mtd, struct nand_chip *chip,
+ uint8_t *buf, int oob_required, int page)
+{
+ uint8_t *ecc_calc = chip->buffers->ecccalc;
+ uint8_t *ecc_code = chip->buffers->ecccode;
+ uint32_t *eccpos = chip->ecc.layout->eccpos;
+ uint8_t *oob = &chip->oob_poi[eccpos[0]];
+ uint32_t oob_pos = mtd->writesize + chip->ecc.layout->eccpos[0];
+ int stat;
+ unsigned int max_bitflips = 0;
+
+ /* Enable GPMC ecc engine */
+ chip->ecc.hwctl(mtd, NAND_ECC_READ);
+
+ /* Read data */
+ chip->read_buf(mtd, buf, mtd->writesize);
+
+ /* Read oob bytes */
+ chip->cmdfunc(mtd, NAND_CMD_RNDOUT, oob_pos, -1);
+ chip->read_buf(mtd, oob, chip->ecc.total);
+
+ /* Calculate ecc bytes */
+ chip->ecc.calculate(mtd, buf, ecc_calc);
+
+ memcpy(ecc_code, &chip->oob_poi[eccpos[0]], chip->ecc.total);
+
+ stat = chip->ecc.correct(mtd, buf, ecc_code, ecc_calc);
+
+ if (stat < 0) {
+ mtd->ecc_stats.failed++;
+ } else {
+ mtd->ecc_stats.corrected += stat;
+ max_bitflips = max_t(unsigned int, max_bitflips, stat);
+ }
+
+ return max_bitflips;
+}
+
+/**
* omap3_free_bch - Release BCH ecc resources
* @mtd: MTD device structure
*/
@@ -1218,43 +1661,86 @@ static int omap3_init_bch(struct mtd_info *mtd, int ecc_opt)
struct omap_nand_info *info = container_of(mtd, struct omap_nand_info,
mtd);
#ifdef CONFIG_MTD_NAND_OMAP_BCH8
- const int hw_errors = 8;
+ const int hw_errors = BCH8_MAX_ERROR;
#else
- const int hw_errors = 4;
+ const int hw_errors = BCH4_MAX_ERROR;
#endif
+ enum bch_ecc bch_type;
+ const __be32 *parp;
+ int lenp;
+ struct device_node *elm_node;
+
info->bch = NULL;
- max_errors = (ecc_opt == OMAP_ECC_BCH8_CODE_HW) ? 8 : 4;
+ max_errors = (ecc_opt == OMAP_ECC_BCH8_CODE_HW) ?
+ BCH8_MAX_ERROR : BCH4_MAX_ERROR;
if (max_errors != hw_errors) {
pr_err("cannot configure %d-bit BCH ecc, only %d-bit supported",
max_errors, hw_errors);
goto fail;
}
- /* software bch library is only used to detect and locate errors */
- info->bch = init_bch(13, max_errors, 0x201b /* hw polynomial */);
- if (!info->bch)
- goto fail;
+ info->nand.ecc.size = 512;
+ info->nand.ecc.hwctl = omap3_enable_hwecc_bch;
+ info->nand.ecc.mode = NAND_ECC_HW;
+ info->nand.ecc.strength = max_errors;
- info->nand.ecc.size = 512;
- info->nand.ecc.hwctl = omap3_enable_hwecc_bch;
- info->nand.ecc.correct = omap3_correct_data_bch;
- info->nand.ecc.mode = NAND_ECC_HW;
+ if (hw_errors == BCH8_MAX_ERROR)
+ bch_type = BCH8_ECC;
+ else
+ bch_type = BCH4_ECC;
- /*
- * The number of corrected errors in an ecc block that will trigger
- * block scrubbing defaults to the ecc strength (4 or 8).
- * Set mtd->bitflip_threshold here to define a custom threshold.
- */
+ /* Detect availability of ELM module */
+ parp = of_get_property(info->of_node, "elm_id", &lenp);
+ if ((parp == NULL) && (lenp != (sizeof(void *) * 2))) {
+ pr_err("Missing elm_id property, fall back to Software BCH\n");
+ info->is_elm_used = false;
+ } else {
+ struct platform_device *pdev;
- if (max_errors == 8) {
- info->nand.ecc.strength = 8;
- info->nand.ecc.bytes = 13;
- info->nand.ecc.calculate = omap3_calculate_ecc_bch8;
+ elm_node = of_find_node_by_phandle(be32_to_cpup(parp));
+ pdev = of_find_device_by_node(elm_node);
+ info->elm_dev = &pdev->dev;
+ elm_config(info->elm_dev, bch_type);
+ info->is_elm_used = true;
+ }
+
+ if (info->is_elm_used && (mtd->writesize <= 4096)) {
+
+ if (hw_errors == BCH8_MAX_ERROR)
+ info->nand.ecc.bytes = BCH8_SIZE;
+ else
+ info->nand.ecc.bytes = BCH4_SIZE;
+
+ info->nand.ecc.correct = omap_elm_correct_data;
+ info->nand.ecc.calculate = omap3_calculate_ecc_bch;
+ info->nand.ecc.read_page = omap_read_page_bch;
+ info->nand.ecc.write_page = omap_write_page_bch;
} else {
- info->nand.ecc.strength = 4;
- info->nand.ecc.bytes = 7;
- info->nand.ecc.calculate = omap3_calculate_ecc_bch4;
+ /*
+ * software bch library is only used to detect and
+ * locate errors
+ */
+ info->bch = init_bch(13, max_errors,
+ 0x201b /* hw polynomial */);
+ if (!info->bch)
+ goto fail;
+
+ info->nand.ecc.correct = omap3_correct_data_bch;
+
+ /*
+ * The number of corrected errors in an ecc block that will
+ * trigger block scrubbing defaults to the ecc strength (4 or 8)
+ * Set mtd->bitflip_threshold here to define a custom threshold.
+ */
+
+ if (max_errors == 8) {
+ info->nand.ecc.bytes = 13;
+ info->nand.ecc.calculate = omap3_calculate_ecc_bch8;
+ } else {
+ info->nand.ecc.bytes = 7;
+ info->nand.ecc.calculate = omap3_calculate_ecc_bch4;
+ }
}
pr_info("enabling NAND BCH ecc with %d-bit correction\n", max_errors);
@@ -1270,7 +1756,7 @@ fail:
*/
static int omap3_init_bch_tail(struct mtd_info *mtd)
{
- int i, steps;
+ int i, steps, offset;
struct omap_nand_info *info = container_of(mtd, struct omap_nand_info,
mtd);
struct nand_ecclayout *layout = &info->ecclayout;
@@ -1292,11 +1778,21 @@ static int omap3_init_bch_tail(struct mtd_info *mtd)
goto fail;
}
+ /* ECC layout compatible with RBL for BCH8 */
+ if (info->is_elm_used && (info->nand.ecc.bytes == BCH8_SIZE))
+ offset = 2;
+ else
+ offset = mtd->oobsize - layout->eccbytes;
+
/* put ecc bytes at oob tail */
for (i = 0; i < layout->eccbytes; i++)
- layout->eccpos[i] = mtd->oobsize-layout->eccbytes+i;
+ layout->eccpos[i] = offset + i;
+
+ if (info->is_elm_used && (info->nand.ecc.bytes == BCH8_SIZE))
+ layout->oobfree[0].offset = 2 + layout->eccbytes * steps;
+ else
+ layout->oobfree[0].offset = 2;
- layout->oobfree[0].offset = 2;
layout->oobfree[0].length = mtd->oobsize-2-layout->eccbytes;
info->nand.ecc.layout = layout;
@@ -1332,6 +1828,7 @@ static int omap_nand_probe(struct platform_device *pdev)
dma_cap_mask_t mask;
unsigned sig;
struct resource *res;
+ struct mtd_part_parser_data ppdata = {};
pdata = pdev->dev.platform_data;
if (pdata == NULL) {
@@ -1359,6 +1856,9 @@ static int omap_nand_probe(struct platform_device *pdev)
info->nand.options = pdata->devsize;
info->nand.options |= NAND_SKIP_BBTSCAN;
+#ifdef CONFIG_MTD_NAND_OMAP_BCH
+ info->of_node = pdata->of_node;
+#endif
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
if (res == NULL) {
@@ -1557,7 +2057,8 @@ static int omap_nand_probe(struct platform_device *pdev)
goto out_release_mem_region;
}
- mtd_device_parse_register(&info->mtd, NULL, NULL, pdata->parts,
+ ppdata.of_node = pdata->of_node;
+ mtd_device_parse_register(&info->mtd, NULL, &ppdata, pdata->parts,
pdata->nr_parts);
platform_set_drvdata(pdev, &info->mtd);
diff --git a/drivers/mtd/ofpart.c b/drivers/mtd/ofpart.c
index dbd3aa574eaf..30bd907a260a 100644
--- a/drivers/mtd/ofpart.c
+++ b/drivers/mtd/ofpart.c
@@ -174,7 +174,14 @@ out:
return rc;
}
+static void __exit ofpart_parser_exit(void)
+{
+ deregister_mtd_parser(&ofpart_parser);
+ deregister_mtd_parser(&ofoldpart_parser);
+}
+
module_init(ofpart_parser_init);
+module_exit(ofpart_parser_exit);
MODULE_LICENSE("GPL");
MODULE_DESCRIPTION("Parser for MTD partitioning information in device tree");
diff --git a/drivers/mtd/onenand/omap2.c b/drivers/mtd/onenand/omap2.c
index 065f3fe02a2f..eec2aedb4ab8 100644
--- a/drivers/mtd/onenand/omap2.c
+++ b/drivers/mtd/onenand/omap2.c
@@ -637,6 +637,7 @@ static int omap2_onenand_probe(struct platform_device *pdev)
struct onenand_chip *this;
int r;
struct resource *res;
+ struct mtd_part_parser_data ppdata = {};
pdata = pdev->dev.platform_data;
if (pdata == NULL) {
@@ -767,7 +768,8 @@ static int omap2_onenand_probe(struct platform_device *pdev)
if ((r = onenand_scan(&c->mtd, 1)) < 0)
goto err_release_regulator;
- r = mtd_device_parse_register(&c->mtd, NULL, NULL,
+ ppdata.of_node = pdata->of_node;
+ r = mtd_device_parse_register(&c->mtd, NULL, &ppdata,
pdata ? pdata->parts : NULL,
pdata ? pdata->nr_parts : 0);
if (r)
diff --git a/drivers/mtd/tests/mtd_nandecctest.c b/drivers/mtd/tests/mtd_nandecctest.c
index 1eee264509a8..70106607c247 100644
--- a/drivers/mtd/tests/mtd_nandecctest.c
+++ b/drivers/mtd/tests/mtd_nandecctest.c
@@ -44,7 +44,7 @@ struct nand_ecc_test {
static void single_bit_error_data(void *error_data, void *correct_data,
size_t size)
{
- unsigned int offset = random32() % (size * BITS_PER_BYTE);
+ unsigned int offset = prandom_u32() % (size * BITS_PER_BYTE);
memcpy(error_data, correct_data, size);
__change_bit_le(offset, error_data);
@@ -55,9 +55,9 @@ static void double_bit_error_data(void *error_data, void *correct_data,
{
unsigned int offset[2];
- offset[0] = random32() % (size * BITS_PER_BYTE);
+ offset[0] = prandom_u32() % (size * BITS_PER_BYTE);
do {
- offset[1] = random32() % (size * BITS_PER_BYTE);
+ offset[1] = prandom_u32() % (size * BITS_PER_BYTE);
} while (offset[0] == offset[1]);
memcpy(error_data, correct_data, size);
@@ -68,7 +68,7 @@ static void double_bit_error_data(void *error_data, void *correct_data,
static unsigned int random_ecc_bit(size_t size)
{
- unsigned int offset = random32() % (3 * BITS_PER_BYTE);
+ unsigned int offset = prandom_u32() % (3 * BITS_PER_BYTE);
if (size == 256) {
/*
@@ -76,7 +76,7 @@ static unsigned int random_ecc_bit(size_t size)
* and 17th bit) in ECC code for 256 byte data block
*/
while (offset == 16 || offset == 17)
- offset = random32() % (3 * BITS_PER_BYTE);
+ offset = prandom_u32() % (3 * BITS_PER_BYTE);
}
return offset;
@@ -256,7 +256,7 @@ static int nand_ecc_test_run(const size_t size)
goto error;
}
- get_random_bytes(correct_data, size);
+ prandom_bytes(correct_data, size);
__nand_calculate_ecc(correct_data, size, correct_ecc);
for (i = 0; i < ARRAY_SIZE(nand_ecc_test); i++) {
diff --git a/drivers/mtd/tests/mtd_oobtest.c b/drivers/mtd/tests/mtd_oobtest.c
index e827fa8cd844..3e24b379ffa4 100644
--- a/drivers/mtd/tests/mtd_oobtest.c
+++ b/drivers/mtd/tests/mtd_oobtest.c
@@ -29,6 +29,7 @@
#include <linux/mtd/mtd.h>
#include <linux/slab.h>
#include <linux/sched.h>
+#include <linux/random.h>
static int dev = -EINVAL;
module_param(dev, int, S_IRUGO);
@@ -46,26 +47,7 @@ static int use_offset;
static int use_len;
static int use_len_max;
static int vary_offset;
-static unsigned long next = 1;
-
-static inline unsigned int simple_rand(void)
-{
- next = next * 1103515245 + 12345;
- return (unsigned int)((next / 65536) % 32768);
-}
-
-static inline void simple_srand(unsigned long seed)
-{
- next = seed;
-}
-
-static void set_random_data(unsigned char *buf, size_t len)
-{
- size_t i;
-
- for (i = 0; i < len; ++i)
- buf[i] = simple_rand();
-}
+static struct rnd_state rnd_state;
static int erase_eraseblock(int ebnum)
{
@@ -129,7 +111,7 @@ static int write_eraseblock(int ebnum)
loff_t addr = ebnum * mtd->erasesize;
for (i = 0; i < pgcnt; ++i, addr += mtd->writesize) {
- set_random_data(writebuf, use_len);
+ prandom_bytes_state(&rnd_state, writebuf, use_len);
ops.mode = MTD_OPS_AUTO_OOB;
ops.len = 0;
ops.retlen = 0;
@@ -182,7 +164,7 @@ static int verify_eraseblock(int ebnum)
loff_t addr = ebnum * mtd->erasesize;
for (i = 0; i < pgcnt; ++i, addr += mtd->writesize) {
- set_random_data(writebuf, use_len);
+ prandom_bytes_state(&rnd_state, writebuf, use_len);
ops.mode = MTD_OPS_AUTO_OOB;
ops.len = 0;
ops.retlen = 0;
@@ -273,7 +255,7 @@ static int verify_eraseblock_in_one_go(int ebnum)
loff_t addr = ebnum * mtd->erasesize;
size_t len = mtd->ecclayout->oobavail * pgcnt;
- set_random_data(writebuf, len);
+ prandom_bytes_state(&rnd_state, writebuf, len);
ops.mode = MTD_OPS_AUTO_OOB;
ops.len = 0;
ops.retlen = 0;
@@ -424,12 +406,12 @@ static int __init mtd_oobtest_init(void)
if (err)
goto out;
- simple_srand(1);
+ prandom_seed_state(&rnd_state, 1);
err = write_whole_device();
if (err)
goto out;
- simple_srand(1);
+ prandom_seed_state(&rnd_state, 1);
err = verify_all_eraseblocks();
if (err)
goto out;
@@ -444,13 +426,13 @@ static int __init mtd_oobtest_init(void)
if (err)
goto out;
- simple_srand(3);
+ prandom_seed_state(&rnd_state, 3);
err = write_whole_device();
if (err)
goto out;
/* Check all eraseblocks */
- simple_srand(3);
+ prandom_seed_state(&rnd_state, 3);
pr_info("verifying all eraseblocks\n");
for (i = 0; i < ebcnt; ++i) {
if (bbt[i])
@@ -479,7 +461,7 @@ static int __init mtd_oobtest_init(void)
use_len = mtd->ecclayout->oobavail;
use_len_max = mtd->ecclayout->oobavail;
vary_offset = 1;
- simple_srand(5);
+ prandom_seed_state(&rnd_state, 5);
err = write_whole_device();
if (err)
@@ -490,7 +472,7 @@ static int __init mtd_oobtest_init(void)
use_len = mtd->ecclayout->oobavail;
use_len_max = mtd->ecclayout->oobavail;
vary_offset = 1;
- simple_srand(5);
+ prandom_seed_state(&rnd_state, 5);
err = verify_all_eraseblocks();
if (err)
goto out;
@@ -649,7 +631,7 @@ static int __init mtd_oobtest_init(void)
goto out;
/* Write all eraseblocks */
- simple_srand(11);
+ prandom_seed_state(&rnd_state, 11);
pr_info("writing OOBs of whole device\n");
for (i = 0; i < ebcnt - 1; ++i) {
int cnt = 2;
@@ -659,7 +641,7 @@ static int __init mtd_oobtest_init(void)
continue;
addr = (i + 1) * mtd->erasesize - mtd->writesize;
for (pg = 0; pg < cnt; ++pg) {
- set_random_data(writebuf, sz);
+ prandom_bytes_state(&rnd_state, writebuf, sz);
ops.mode = MTD_OPS_AUTO_OOB;
ops.len = 0;
ops.retlen = 0;
@@ -680,12 +662,13 @@ static int __init mtd_oobtest_init(void)
pr_info("written %u eraseblocks\n", i);
/* Check all eraseblocks */
- simple_srand(11);
+ prandom_seed_state(&rnd_state, 11);
pr_info("verifying all eraseblocks\n");
for (i = 0; i < ebcnt - 1; ++i) {
if (bbt[i] || bbt[i + 1])
continue;
- set_random_data(writebuf, mtd->ecclayout->oobavail * 2);
+ prandom_bytes_state(&rnd_state, writebuf,
+ mtd->ecclayout->oobavail * 2);
addr = (i + 1) * mtd->erasesize - mtd->writesize;
ops.mode = MTD_OPS_AUTO_OOB;
ops.len = 0;
diff --git a/drivers/mtd/tests/mtd_pagetest.c b/drivers/mtd/tests/mtd_pagetest.c
index f93a76f88113..0c1140b6c286 100644
--- a/drivers/mtd/tests/mtd_pagetest.c
+++ b/drivers/mtd/tests/mtd_pagetest.c
@@ -29,6 +29,7 @@
#include <linux/mtd/mtd.h>
#include <linux/slab.h>
#include <linux/sched.h>
+#include <linux/random.h>
static int dev = -EINVAL;
module_param(dev, int, S_IRUGO);
@@ -45,26 +46,7 @@ static int bufsize;
static int ebcnt;
static int pgcnt;
static int errcnt;
-static unsigned long next = 1;
-
-static inline unsigned int simple_rand(void)
-{
- next = next * 1103515245 + 12345;
- return (unsigned int)((next / 65536) % 32768);
-}
-
-static inline void simple_srand(unsigned long seed)
-{
- next = seed;
-}
-
-static void set_random_data(unsigned char *buf, size_t len)
-{
- size_t i;
-
- for (i = 0; i < len; ++i)
- buf[i] = simple_rand();
-}
+static struct rnd_state rnd_state;
static int erase_eraseblock(int ebnum)
{
@@ -98,7 +80,7 @@ static int write_eraseblock(int ebnum)
size_t written;
loff_t addr = ebnum * mtd->erasesize;
- set_random_data(writebuf, mtd->erasesize);
+ prandom_bytes_state(&rnd_state, writebuf, mtd->erasesize);
cond_resched();
err = mtd_write(mtd, addr, mtd->erasesize, &written, writebuf);
if (err || written != mtd->erasesize)
@@ -124,7 +106,7 @@ static int verify_eraseblock(int ebnum)
for (i = 0; i < ebcnt && bbt[ebcnt - i - 1]; ++i)
addrn -= mtd->erasesize;
- set_random_data(writebuf, mtd->erasesize);
+ prandom_bytes_state(&rnd_state, writebuf, mtd->erasesize);
for (j = 0; j < pgcnt - 1; ++j, addr += pgsize) {
/* Do a read to set the internal dataRAMs to different data */
err = mtd_read(mtd, addr0, bufsize, &read, twopages);
@@ -160,7 +142,8 @@ static int verify_eraseblock(int ebnum)
}
/* Check boundary between eraseblocks */
if (addr <= addrn - pgsize - pgsize && !bbt[ebnum + 1]) {
- unsigned long oldnext = next;
+ struct rnd_state old_state = rnd_state;
+
/* Do a read to set the internal dataRAMs to different data */
err = mtd_read(mtd, addr0, bufsize, &read, twopages);
if (mtd_is_bitflip(err))
@@ -188,13 +171,13 @@ static int verify_eraseblock(int ebnum)
return err;
}
memcpy(boundary, writebuf + mtd->erasesize - pgsize, pgsize);
- set_random_data(boundary + pgsize, pgsize);
+ prandom_bytes_state(&rnd_state, boundary + pgsize, pgsize);
if (memcmp(twopages, boundary, bufsize)) {
pr_err("error: verify failed at %#llx\n",
(long long)addr);
errcnt += 1;
}
- next = oldnext;
+ rnd_state = old_state;
}
return err;
}
@@ -326,7 +309,7 @@ static int erasecrosstest(void)
return err;
pr_info("writing 1st page of block %d\n", ebnum);
- set_random_data(writebuf, pgsize);
+ prandom_bytes_state(&rnd_state, writebuf, pgsize);
strcpy(writebuf, "There is no data like this!");
err = mtd_write(mtd, addr0, pgsize, &written, writebuf);
if (err || written != pgsize) {
@@ -359,7 +342,7 @@ static int erasecrosstest(void)
return err;
pr_info("writing 1st page of block %d\n", ebnum);
- set_random_data(writebuf, pgsize);
+ prandom_bytes_state(&rnd_state, writebuf, pgsize);
strcpy(writebuf, "There is no data like this!");
err = mtd_write(mtd, addr0, pgsize, &written, writebuf);
if (err || written != pgsize) {
@@ -417,7 +400,7 @@ static int erasetest(void)
return err;
pr_info("writing 1st page of block %d\n", ebnum);
- set_random_data(writebuf, pgsize);
+ prandom_bytes_state(&rnd_state, writebuf, pgsize);
err = mtd_write(mtd, addr0, pgsize, &written, writebuf);
if (err || written != pgsize) {
pr_err("error: write failed at %#llx\n",
@@ -565,7 +548,7 @@ static int __init mtd_pagetest_init(void)
pr_info("erased %u eraseblocks\n", i);
/* Write all eraseblocks */
- simple_srand(1);
+ prandom_seed_state(&rnd_state, 1);
pr_info("writing whole device\n");
for (i = 0; i < ebcnt; ++i) {
if (bbt[i])
@@ -580,7 +563,7 @@ static int __init mtd_pagetest_init(void)
pr_info("written %u eraseblocks\n", i);
/* Check all eraseblocks */
- simple_srand(1);
+ prandom_seed_state(&rnd_state, 1);
pr_info("verifying all eraseblocks\n");
for (i = 0; i < ebcnt; ++i) {
if (bbt[i])
diff --git a/drivers/mtd/tests/mtd_speedtest.c b/drivers/mtd/tests/mtd_speedtest.c
index 596cbea8df4c..a6ce9c1fa6c5 100644
--- a/drivers/mtd/tests/mtd_speedtest.c
+++ b/drivers/mtd/tests/mtd_speedtest.c
@@ -49,13 +49,6 @@ static int pgcnt;
static int goodebcnt;
static struct timeval start, finish;
-static void set_random_data(unsigned char *buf, size_t len)
-{
- size_t i;
-
- for (i = 0; i < len; ++i)
- buf[i] = random32();
-}
static int erase_eraseblock(int ebnum)
{
@@ -396,7 +389,7 @@ static int __init mtd_speedtest_init(void)
goto out;
}
- set_random_data(iobuf, mtd->erasesize);
+ prandom_bytes(iobuf, mtd->erasesize);
err = scan_for_bad_eraseblocks();
if (err)
diff --git a/drivers/mtd/tests/mtd_stresstest.c b/drivers/mtd/tests/mtd_stresstest.c
index 3729f679ae5d..787f539d16ca 100644
--- a/drivers/mtd/tests/mtd_stresstest.c
+++ b/drivers/mtd/tests/mtd_stresstest.c
@@ -55,7 +55,7 @@ static int rand_eb(void)
unsigned int eb;
again:
- eb = random32();
+ eb = prandom_u32();
/* Read or write up 2 eraseblocks at a time - hence 'ebcnt - 1' */
eb %= (ebcnt - 1);
if (bbt[eb])
@@ -67,7 +67,7 @@ static int rand_offs(void)
{
unsigned int offs;
- offs = random32();
+ offs = prandom_u32();
offs %= bufsize;
return offs;
}
@@ -76,7 +76,7 @@ static int rand_len(int offs)
{
unsigned int len;
- len = random32();
+ len = prandom_u32();
len %= (bufsize - offs);
return len;
}
@@ -191,7 +191,7 @@ static int do_write(void)
static int do_operation(void)
{
- if (random32() & 1)
+ if (prandom_u32() & 1)
return do_read();
else
return do_write();
@@ -282,8 +282,7 @@ static int __init mtd_stresstest_init(void)
}
for (i = 0; i < ebcnt; i++)
offsets[i] = mtd->erasesize;
- for (i = 0; i < bufsize; i++)
- writebuf[i] = random32();
+ prandom_bytes(writebuf, bufsize);
err = scan_for_bad_eraseblocks();
if (err)
diff --git a/drivers/mtd/tests/mtd_subpagetest.c b/drivers/mtd/tests/mtd_subpagetest.c
index c880c2229c59..aade56f27945 100644
--- a/drivers/mtd/tests/mtd_subpagetest.c
+++ b/drivers/mtd/tests/mtd_subpagetest.c
@@ -28,6 +28,7 @@
#include <linux/mtd/mtd.h>
#include <linux/slab.h>
#include <linux/sched.h>
+#include <linux/random.h>
static int dev = -EINVAL;
module_param(dev, int, S_IRUGO);
@@ -43,26 +44,7 @@ static int bufsize;
static int ebcnt;
static int pgcnt;
static int errcnt;
-static unsigned long next = 1;
-
-static inline unsigned int simple_rand(void)
-{
- next = next * 1103515245 + 12345;
- return (unsigned int)((next / 65536) % 32768);
-}
-
-static inline void simple_srand(unsigned long seed)
-{
- next = seed;
-}
-
-static void set_random_data(unsigned char *buf, size_t len)
-{
- size_t i;
-
- for (i = 0; i < len; ++i)
- buf[i] = simple_rand();
-}
+static struct rnd_state rnd_state;
static inline void clear_data(unsigned char *buf, size_t len)
{
@@ -119,7 +101,7 @@ static int write_eraseblock(int ebnum)
int err = 0;
loff_t addr = ebnum * mtd->erasesize;
- set_random_data(writebuf, subpgsize);
+ prandom_bytes_state(&rnd_state, writebuf, subpgsize);
err = mtd_write(mtd, addr, subpgsize, &written, writebuf);
if (unlikely(err || written != subpgsize)) {
pr_err("error: write failed at %#llx\n",
@@ -133,7 +115,7 @@ static int write_eraseblock(int ebnum)
addr += subpgsize;
- set_random_data(writebuf, subpgsize);
+ prandom_bytes_state(&rnd_state, writebuf, subpgsize);
err = mtd_write(mtd, addr, subpgsize, &written, writebuf);
if (unlikely(err || written != subpgsize)) {
pr_err("error: write failed at %#llx\n",
@@ -157,7 +139,7 @@ static int write_eraseblock2(int ebnum)
for (k = 1; k < 33; ++k) {
if (addr + (subpgsize * k) > (ebnum + 1) * mtd->erasesize)
break;
- set_random_data(writebuf, subpgsize * k);
+ prandom_bytes_state(&rnd_state, writebuf, subpgsize * k);
err = mtd_write(mtd, addr, subpgsize * k, &written, writebuf);
if (unlikely(err || written != subpgsize * k)) {
pr_err("error: write failed at %#llx\n",
@@ -193,7 +175,7 @@ static int verify_eraseblock(int ebnum)
int err = 0;
loff_t addr = ebnum * mtd->erasesize;
- set_random_data(writebuf, subpgsize);
+ prandom_bytes_state(&rnd_state, writebuf, subpgsize);
clear_data(readbuf, subpgsize);
err = mtd_read(mtd, addr, subpgsize, &read, readbuf);
if (unlikely(err || read != subpgsize)) {
@@ -220,7 +202,7 @@ static int verify_eraseblock(int ebnum)
addr += subpgsize;
- set_random_data(writebuf, subpgsize);
+ prandom_bytes_state(&rnd_state, writebuf, subpgsize);
clear_data(readbuf, subpgsize);
err = mtd_read(mtd, addr, subpgsize, &read, readbuf);
if (unlikely(err || read != subpgsize)) {
@@ -257,7 +239,7 @@ static int verify_eraseblock2(int ebnum)
for (k = 1; k < 33; ++k) {
if (addr + (subpgsize * k) > (ebnum + 1) * mtd->erasesize)
break;
- set_random_data(writebuf, subpgsize * k);
+ prandom_bytes_state(&rnd_state, writebuf, subpgsize * k);
clear_data(readbuf, subpgsize * k);
err = mtd_read(mtd, addr, subpgsize * k, &read, readbuf);
if (unlikely(err || read != subpgsize * k)) {
@@ -430,7 +412,7 @@ static int __init mtd_subpagetest_init(void)
goto out;
pr_info("writing whole device\n");
- simple_srand(1);
+ prandom_seed_state(&rnd_state, 1);
for (i = 0; i < ebcnt; ++i) {
if (bbt[i])
continue;
@@ -443,7 +425,7 @@ static int __init mtd_subpagetest_init(void)
}
pr_info("written %u eraseblocks\n", i);
- simple_srand(1);
+ prandom_seed_state(&rnd_state, 1);
pr_info("verifying all eraseblocks\n");
for (i = 0; i < ebcnt; ++i) {
if (bbt[i])
@@ -466,7 +448,7 @@ static int __init mtd_subpagetest_init(void)
goto out;
/* Write all eraseblocks */
- simple_srand(3);
+ prandom_seed_state(&rnd_state, 3);
pr_info("writing whole device\n");
for (i = 0; i < ebcnt; ++i) {
if (bbt[i])
@@ -481,7 +463,7 @@ static int __init mtd_subpagetest_init(void)
pr_info("written %u eraseblocks\n", i);
/* Check all eraseblocks */
- simple_srand(3);
+ prandom_seed_state(&rnd_state, 3);
pr_info("verifying all eraseblocks\n");
for (i = 0; i < ebcnt; ++i) {
if (bbt[i])
diff --git a/drivers/mtd/tests/mtd_torturetest.c b/drivers/mtd/tests/mtd_torturetest.c
index c4cde1e9eddb..3a9f6a6a79f9 100644
--- a/drivers/mtd/tests/mtd_torturetest.c
+++ b/drivers/mtd/tests/mtd_torturetest.c
@@ -208,7 +208,7 @@ static inline int write_pattern(int ebnum, void *buf)
static int __init tort_init(void)
{
int err = 0, i, infinite = !cycles_count;
- int bad_ebs[ebcnt];
+ int *bad_ebs;
printk(KERN_INFO "\n");
printk(KERN_INFO "=================================================\n");
@@ -250,28 +250,24 @@ static int __init tort_init(void)
err = -ENOMEM;
patt_5A5 = kmalloc(mtd->erasesize, GFP_KERNEL);
- if (!patt_5A5) {
- pr_err("error: cannot allocate memory\n");
+ if (!patt_5A5)
goto out_mtd;
- }
patt_A5A = kmalloc(mtd->erasesize, GFP_KERNEL);
- if (!patt_A5A) {
- pr_err("error: cannot allocate memory\n");
+ if (!patt_A5A)
goto out_patt_5A5;
- }
patt_FF = kmalloc(mtd->erasesize, GFP_KERNEL);
- if (!patt_FF) {
- pr_err("error: cannot allocate memory\n");
+ if (!patt_FF)
goto out_patt_A5A;
- }
check_buf = kmalloc(mtd->erasesize, GFP_KERNEL);
- if (!check_buf) {
- pr_err("error: cannot allocate memory\n");
+ if (!check_buf)
goto out_patt_FF;
- }
+
+ bad_ebs = kcalloc(ebcnt, sizeof(*bad_ebs), GFP_KERNEL);
+ if (!bad_ebs)
+ goto out_check_buf;
err = 0;
@@ -290,7 +286,6 @@ static int __init tort_init(void)
/*
* Check if there is a bad eraseblock among those we are going to test.
*/
- memset(&bad_ebs[0], 0, sizeof(int) * ebcnt);
if (mtd_can_have_bb(mtd)) {
for (i = eb; i < eb + ebcnt; i++) {
err = mtd_block_isbad(mtd, (loff_t)i * mtd->erasesize);
@@ -394,6 +389,8 @@ out:
pr_info("finished after %u erase cycles\n",
erase_cycles);
+ kfree(bad_ebs);
+out_check_buf:
kfree(check_buf);
out_patt_FF:
kfree(patt_FF);
diff --git a/drivers/mtd/ubi/cdev.c b/drivers/mtd/ubi/cdev.c
index dfcc65b33e99..4f02848bb2bc 100644
--- a/drivers/mtd/ubi/cdev.c
+++ b/drivers/mtd/ubi/cdev.c
@@ -194,7 +194,7 @@ static int vol_cdev_fsync(struct file *file, loff_t start, loff_t end,
{
struct ubi_volume_desc *desc = file->private_data;
struct ubi_device *ubi = desc->vol->ubi;
- struct inode *inode = file->f_path.dentry->d_inode;
+ struct inode *inode = file_inode(file);
int err;
mutex_lock(&inode->i_mutex);
err = ubi_sync(ubi->ubi_num);
diff --git a/drivers/mtd/ubi/debug.h b/drivers/mtd/ubi/debug.h
index 33f8f3b2c9b2..cba89fcd1587 100644
--- a/drivers/mtd/ubi/debug.h
+++ b/drivers/mtd/ubi/debug.h
@@ -86,7 +86,7 @@ static inline int ubi_dbg_is_bgt_disabled(const struct ubi_device *ubi)
static inline int ubi_dbg_is_bitflip(const struct ubi_device *ubi)
{
if (ubi->dbg.emulate_bitflips)
- return !(random32() % 200);
+ return !(prandom_u32() % 200);
return 0;
}
@@ -100,7 +100,7 @@ static inline int ubi_dbg_is_bitflip(const struct ubi_device *ubi)
static inline int ubi_dbg_is_write_failure(const struct ubi_device *ubi)
{
if (ubi->dbg.emulate_io_failures)
- return !(random32() % 500);
+ return !(prandom_u32() % 500);
return 0;
}
@@ -114,7 +114,7 @@ static inline int ubi_dbg_is_write_failure(const struct ubi_device *ubi)
static inline int ubi_dbg_is_erase_failure(const struct ubi_device *ubi)
{
if (ubi->dbg.emulate_io_failures)
- return !(random32() % 400);
+ return !(prandom_u32() % 400);
return 0;
}
diff --git a/drivers/net/Kconfig b/drivers/net/Kconfig
index 2334190ff8d2..87f1d39ca551 100644
--- a/drivers/net/Kconfig
+++ b/drivers/net/Kconfig
@@ -150,7 +150,7 @@ config MACVTAP
config VXLAN
tristate "Virtual eXtensible Local Area Network (VXLAN)"
- depends on EXPERIMENTAL && INET
+ depends on INET
---help---
This allows one to create vxlan virtual interfaces that provide
Layer 2 Networks over Layer 3 Networks. VXLAN is often used
@@ -188,6 +188,10 @@ config NETPOLL_TRAP
config NET_POLL_CONTROLLER
def_bool NETPOLL
+config NTB_NETDEV
+ tristate "Virtual Ethernet over NTB"
+ depends on NTB
+
config RIONET
tristate "RapidIO Ethernet over messaging driver support"
depends on RAPIDIO
diff --git a/drivers/net/Makefile b/drivers/net/Makefile
index 335db78fd987..ef3d090efedf 100644
--- a/drivers/net/Makefile
+++ b/drivers/net/Makefile
@@ -71,3 +71,4 @@ obj-$(CONFIG_USB_IPHETH) += usb/
obj-$(CONFIG_USB_CDC_PHONET) += usb/
obj-$(CONFIG_HYPERV_NET) += hyperv/
+obj-$(CONFIG_NTB_NETDEV) += ntb_netdev.o
diff --git a/drivers/net/caif/Kconfig b/drivers/net/caif/Kconfig
index abf4d7a9dcce..60c2142373c9 100644
--- a/drivers/net/caif/Kconfig
+++ b/drivers/net/caif/Kconfig
@@ -6,7 +6,7 @@ comment "CAIF transport drivers"
config CAIF_TTY
tristate "CAIF TTY transport driver"
- depends on CAIF
+ depends on CAIF && TTY
default n
---help---
The CAIF TTY transport driver is a Line Discipline (ldisc)
diff --git a/drivers/net/caif/caif_serial.c b/drivers/net/caif/caif_serial.c
index 5de74e762021..666891a9a248 100644
--- a/drivers/net/caif/caif_serial.c
+++ b/drivers/net/caif/caif_serial.c
@@ -91,7 +91,7 @@ static inline void update_tty_status(struct ser_device *ser)
ser->tty->hw_stopped << 4 |
ser->tty->flow_stopped << 3 |
ser->tty->packet << 2 |
- ser->tty->low_latency << 1 |
+ ser->tty->port->low_latency << 1 |
ser->tty->warned;
}
static inline void debugfs_init(struct ser_device *ser, struct tty_struct *tty)
diff --git a/drivers/net/can/Kconfig b/drivers/net/can/Kconfig
index 1cca19f1c490..9862b2e07644 100644
--- a/drivers/net/can/Kconfig
+++ b/drivers/net/can/Kconfig
@@ -11,6 +11,7 @@ config CAN_VCAN
config CAN_SLCAN
tristate "Serial / USB serial CAN Adaptors (slcan)"
+ depends on TTY
---help---
CAN driver for several 'low cost' CAN interfaces that are attached
via serial lines or via USB-to-serial adapters using the LAWICEL
diff --git a/drivers/net/ethernet/broadcom/b44.c b/drivers/net/ethernet/broadcom/b44.c
index a7efec293037..9b017d9c58e9 100644
--- a/drivers/net/ethernet/broadcom/b44.c
+++ b/drivers/net/ethernet/broadcom/b44.c
@@ -381,7 +381,7 @@ static void b44_set_flow_ctrl(struct b44 *bp, u32 local, u32 remote)
}
#ifdef CONFIG_BCM47XX
-#include <asm/mach-bcm47xx/nvram.h>
+#include <bcm47xx_nvram.h>
static void b44_wap54g10_workaround(struct b44 *bp)
{
char buf[20];
@@ -393,7 +393,7 @@ static void b44_wap54g10_workaround(struct b44 *bp)
* see https://dev.openwrt.org/ticket/146
* check and reset bit "isolate"
*/
- if (nvram_getenv("boardnum", buf, sizeof(buf)) < 0)
+ if (bcm47xx_nvram_getenv("boardnum", buf, sizeof(buf)) < 0)
return;
if (simple_strtoul(buf, NULL, 0) == 2) {
err = __b44_readphy(bp, 0, MII_BMCR, &val);
diff --git a/drivers/net/ethernet/broadcom/bgmac.c b/drivers/net/ethernet/broadcom/bgmac.c
index 3fd32880e526..639049d7e92d 100644
--- a/drivers/net/ethernet/broadcom/bgmac.c
+++ b/drivers/net/ethernet/broadcom/bgmac.c
@@ -15,7 +15,7 @@
#include <linux/mii.h>
#include <linux/interrupt.h>
#include <linux/dma-mapping.h>
-#include <asm/mach-bcm47xx/nvram.h>
+#include <bcm47xx_nvram.h>
static const struct bcma_device_id bgmac_bcma_tbl[] = {
BCMA_CORE(BCMA_MANUF_BCM, BCMA_CORE_4706_MAC_GBIT, BCMA_ANY_REV, BCMA_ANY_CLASS),
@@ -436,6 +436,8 @@ static int bgmac_dma_alloc(struct bgmac *bgmac)
}
for (i = 0; i < BGMAC_MAX_RX_RINGS; i++) {
+ int j;
+
ring = &bgmac->rx_ring[i];
ring->num_slots = BGMAC_RX_RING_SLOTS;
ring->mmio_base = ring_base[i];
@@ -458,8 +460,8 @@ static int bgmac_dma_alloc(struct bgmac *bgmac)
bgmac_warn(bgmac, "DMA address using 0xC0000000 bit(s), it may need translation trick\n");
/* Alloc RX slots */
- for (i = 0; i < ring->num_slots; i++) {
- err = bgmac_dma_rx_skb_for_slot(bgmac, &ring->slots[i]);
+ for (j = 0; j < ring->num_slots; j++) {
+ err = bgmac_dma_rx_skb_for_slot(bgmac, &ring->slots[j]);
if (err) {
bgmac_err(bgmac, "Can't allocate skb for slot in RX ring\n");
goto err_dma_free;
@@ -496,6 +498,8 @@ static void bgmac_dma_init(struct bgmac *bgmac)
}
for (i = 0; i < BGMAC_MAX_RX_RINGS; i++) {
+ int j;
+
ring = &bgmac->rx_ring[i];
/* We don't implement unaligned addressing, so enable first */
@@ -505,11 +509,11 @@ static void bgmac_dma_init(struct bgmac *bgmac)
bgmac_write(bgmac, ring->mmio_base + BGMAC_DMA_RX_RINGHI,
upper_32_bits(ring->dma_base));
- for (i = 0, dma_desc = ring->cpu_base; i < ring->num_slots;
- i++, dma_desc++) {
+ for (j = 0, dma_desc = ring->cpu_base; j < ring->num_slots;
+ j++, dma_desc++) {
ctl0 = ctl1 = 0;
- if (i == ring->num_slots - 1)
+ if (j == ring->num_slots - 1)
ctl0 |= BGMAC_DESC_CTL0_EOT;
ctl1 |= BGMAC_RX_BUF_SIZE & BGMAC_DESC_CTL1_LEN;
/* Is there any BGMAC device that requires extension? */
@@ -517,8 +521,8 @@ static void bgmac_dma_init(struct bgmac *bgmac)
* B43_DMA64_DCTL1_ADDREXT_MASK;
*/
- dma_desc->addr_low = cpu_to_le32(lower_32_bits(ring->slots[i].dma_addr));
- dma_desc->addr_high = cpu_to_le32(upper_32_bits(ring->slots[i].dma_addr));
+ dma_desc->addr_low = cpu_to_le32(lower_32_bits(ring->slots[j].dma_addr));
+ dma_desc->addr_high = cpu_to_le32(upper_32_bits(ring->slots[j].dma_addr));
dma_desc->ctl0 = cpu_to_le32(ctl0);
dma_desc->ctl1 = cpu_to_le32(ctl1);
}
@@ -904,7 +908,7 @@ static void bgmac_chip_reset(struct bgmac *bgmac)
BGMAC_CHIPCTL_1_IF_TYPE_RMII;
char buf[2];
- if (nvram_getenv("et_swtype", buf, 1) > 0) {
+ if (bcm47xx_nvram_getenv("et_swtype", buf, 1) > 0) {
if (kstrtou8(buf, 0, &et_swtype))
bgmac_err(bgmac, "Failed to parse et_swtype (%s)\n",
buf);
@@ -1382,7 +1386,7 @@ static int bgmac_probe(struct bcma_device *core)
}
bgmac->int_mask = BGMAC_IS_ERRMASK | BGMAC_IS_RX | BGMAC_IS_TX_MASK;
- if (nvram_getenv("et0_no_txint", NULL, 0) == 0)
+ if (bcm47xx_nvram_getenv("et0_no_txint", NULL, 0) == 0)
bgmac->int_mask &= ~BGMAC_IS_TX_MASK;
/* TODO: reset the external phy. Specs are needed */
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c
index c6da77fa9d07..1663e0b6b5a0 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c
@@ -13013,64 +13013,6 @@ static int bnx2x_84833_common_init_phy(struct bnx2x *bp,
return 0;
}
-static int bnx2x_84833_pre_init_phy(struct bnx2x *bp,
- struct bnx2x_phy *phy,
- u8 port)
-{
- u16 val, cnt;
- /* Wait for FW completing its initialization. */
- for (cnt = 0; cnt < 1500; cnt++) {
- bnx2x_cl45_read(bp, phy,
- MDIO_PMA_DEVAD,
- MDIO_PMA_REG_CTRL, &val);
- if (!(val & (1<<15)))
- break;
- usleep_range(1000, 2000);
- }
- if (cnt >= 1500) {
- DP(NETIF_MSG_LINK, "84833 reset timeout\n");
- return -EINVAL;
- }
-
- /* Put the port in super isolate mode. */
- bnx2x_cl45_read(bp, phy,
- MDIO_CTL_DEVAD,
- MDIO_84833_TOP_CFG_XGPHY_STRAP1, &val);
- val |= MDIO_84833_SUPER_ISOLATE;
- bnx2x_cl45_write(bp, phy,
- MDIO_CTL_DEVAD,
- MDIO_84833_TOP_CFG_XGPHY_STRAP1, val);
-
- /* Save spirom version */
- bnx2x_save_848xx_spirom_version(phy, bp, port);
- return 0;
-}
-
-int bnx2x_pre_init_phy(struct bnx2x *bp,
- u32 shmem_base,
- u32 shmem2_base,
- u32 chip_id,
- u8 port)
-{
- int rc = 0;
- struct bnx2x_phy phy;
- if (bnx2x_populate_phy(bp, EXT_PHY1, shmem_base, shmem2_base,
- port, &phy) != 0) {
- DP(NETIF_MSG_LINK, "populate_phy failed\n");
- return -EINVAL;
- }
- bnx2x_set_mdio_clk(bp, chip_id, phy.mdio_ctrl);
- switch (phy.type) {
- case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84833:
- case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84834:
- rc = bnx2x_84833_pre_init_phy(bp, &phy, port);
- break;
- default:
- break;
- }
- return rc;
-}
-
static int bnx2x_ext_phy_common_init(struct bnx2x *bp, u32 shmem_base_path[],
u32 shmem2_base_path[], u8 phy_index,
u32 ext_phy_type, u32 chip_id)
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
index c4daee1b7286..e81a747ea8ce 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
@@ -8843,7 +8843,7 @@ static void bnx2x_set_234_gates(struct bnx2x *bp, bool close)
(!close) ? (val | HC_CONFIG_0_REG_BLOCK_DISABLE_0) :
(val & ~(u32)HC_CONFIG_0_REG_BLOCK_DISABLE_0));
} else {
- /* Prevent incomming interrupts in IGU */
+ /* Prevent incoming interrupts in IGU */
val = REG_RD(bp, IGU_REG_BLOCK_CONFIGURATION);
REG_WR(bp, IGU_REG_BLOCK_CONFIGURATION,
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c
index 36246129864c..531eebf40d60 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c
@@ -98,7 +98,7 @@ static inline int bnx2x_pfvf_status_codes(int rc)
}
}
-int bnx2x_send_msg2pf(struct bnx2x *bp, u8 *done, dma_addr_t msg_mapping)
+static int bnx2x_send_msg2pf(struct bnx2x *bp, u8 *done, dma_addr_t msg_mapping)
{
struct cstorm_vf_zone_data __iomem *zone_data =
REG_ADDR(bp, PXP_VF_ADDR_CSDM_GLOBAL_START);
@@ -141,7 +141,7 @@ int bnx2x_send_msg2pf(struct bnx2x *bp, u8 *done, dma_addr_t msg_mapping)
return 0;
}
-int bnx2x_get_vf_id(struct bnx2x *bp, u32 *vf_id)
+static int bnx2x_get_vf_id(struct bnx2x *bp, u32 *vf_id)
{
u32 me_reg;
int tout = 10, interval = 100; /* Wait for 1 sec */
diff --git a/drivers/net/ethernet/cadence/Kconfig b/drivers/net/ethernet/cadence/Kconfig
index ceb0de0cf62c..1194446f859a 100644
--- a/drivers/net/ethernet/cadence/Kconfig
+++ b/drivers/net/ethernet/cadence/Kconfig
@@ -22,6 +22,7 @@ if NET_CADENCE
config ARM_AT91_ETHER
tristate "AT91RM9200 Ethernet support"
+ depends on GENERIC_HARDIRQS
select NET_CORE
select MACB
---help---
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
index c6c05bfef0e0..e707e31abd81 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
@@ -2347,7 +2347,7 @@ static ssize_t mem_read(struct file *file, char __user *buf, size_t count,
loff_t *ppos)
{
loff_t pos = *ppos;
- loff_t avail = file->f_path.dentry->d_inode->i_size;
+ loff_t avail = file_inode(file)->i_size;
unsigned int mem = (uintptr_t)file->private_data & 3;
struct adapter *adap = file->private_data - mem;
diff --git a/drivers/net/ethernet/freescale/fec.c b/drivers/net/ethernet/freescale/fec.c
index 29d82cf1528e..fccc3bf2141d 100644
--- a/drivers/net/ethernet/freescale/fec.c
+++ b/drivers/net/ethernet/freescale/fec.c
@@ -1782,24 +1782,6 @@ fec_probe(struct platform_device *pdev)
fep->phy_interface = ret;
}
- for (i = 0; i < FEC_IRQ_NUM; i++) {
- irq = platform_get_irq(pdev, i);
- if (irq < 0) {
- if (i)
- break;
- ret = irq;
- goto failed_irq;
- }
- ret = request_irq(irq, fec_enet_interrupt, IRQF_DISABLED, pdev->name, ndev);
- if (ret) {
- while (--i >= 0) {
- irq = platform_get_irq(pdev, i);
- free_irq(irq, ndev);
- }
- goto failed_irq;
- }
- }
-
pinctrl = devm_pinctrl_get_select_default(&pdev->dev);
if (IS_ERR(pinctrl)) {
ret = PTR_ERR(pinctrl);
@@ -1850,6 +1832,24 @@ fec_probe(struct platform_device *pdev)
if (ret)
goto failed_init;
+ for (i = 0; i < FEC_IRQ_NUM; i++) {
+ irq = platform_get_irq(pdev, i);
+ if (irq < 0) {
+ if (i)
+ break;
+ ret = irq;
+ goto failed_irq;
+ }
+ ret = request_irq(irq, fec_enet_interrupt, IRQF_DISABLED, pdev->name, ndev);
+ if (ret) {
+ while (--i >= 0) {
+ irq = platform_get_irq(pdev, i);
+ free_irq(irq, ndev);
+ }
+ goto failed_irq;
+ }
+ }
+
ret = fec_enet_mii_init(pdev);
if (ret)
goto failed_mii_init;
@@ -1867,6 +1867,12 @@ failed_register:
fec_enet_mii_remove(fep);
failed_mii_init:
failed_init:
+ for (i = 0; i < FEC_IRQ_NUM; i++) {
+ irq = platform_get_irq(pdev, i);
+ if (irq > 0)
+ free_irq(irq, ndev);
+ }
+failed_irq:
failed_regulator:
clk_disable_unprepare(fep->clk_ahb);
clk_disable_unprepare(fep->clk_ipg);
@@ -1874,12 +1880,6 @@ failed_regulator:
clk_disable_unprepare(fep->clk_ptp);
failed_pin:
failed_clk:
- for (i = 0; i < FEC_IRQ_NUM; i++) {
- irq = platform_get_irq(pdev, i);
- if (irq > 0)
- free_irq(irq, ndev);
- }
-failed_irq:
iounmap(fep->hwp);
failed_ioremap:
free_netdev(ndev);
@@ -1899,17 +1899,17 @@ fec_drv_remove(struct platform_device *pdev)
unregister_netdev(ndev);
fec_enet_mii_remove(fep);
- for (i = 0; i < FEC_IRQ_NUM; i++) {
- int irq = platform_get_irq(pdev, i);
- if (irq > 0)
- free_irq(irq, ndev);
- }
del_timer_sync(&fep->time_keep);
clk_disable_unprepare(fep->clk_ptp);
if (fep->ptp_clock)
ptp_clock_unregister(fep->ptp_clock);
clk_disable_unprepare(fep->clk_ahb);
clk_disable_unprepare(fep->clk_ipg);
+ for (i = 0; i < FEC_IRQ_NUM; i++) {
+ int irq = platform_get_irq(pdev, i);
+ if (irq > 0)
+ free_irq(irq, ndev);
+ }
iounmap(fep->hwp);
free_netdev(ndev);
diff --git a/drivers/net/ethernet/freescale/fec_mpc52xx.c b/drivers/net/ethernet/freescale/fec_mpc52xx.c
index 7f91b0c5c264..77943a6a1b8c 100644
--- a/drivers/net/ethernet/freescale/fec_mpc52xx.c
+++ b/drivers/net/ethernet/freescale/fec_mpc52xx.c
@@ -41,8 +41,8 @@
#include <asm/delay.h>
#include <asm/mpc52xx.h>
-#include <sysdev/bestcomm/bestcomm.h>
-#include <sysdev/bestcomm/fec.h>
+#include <linux/fsl/bestcomm/bestcomm.h>
+#include <linux/fsl/bestcomm/fec.h>
#include "fec_mpc52xx.h"
diff --git a/drivers/net/ethernet/freescale/gianfar.c b/drivers/net/ethernet/freescale/gianfar.c
index 4b5e8a692481..d2c5441d1bf0 100644
--- a/drivers/net/ethernet/freescale/gianfar.c
+++ b/drivers/net/ethernet/freescale/gianfar.c
@@ -2906,21 +2906,23 @@ static void gfar_netpoll(struct net_device *dev)
/* If the device has multiple interrupts, run tx/rx */
if (priv->device_flags & FSL_GIANFAR_DEV_HAS_MULTI_INTR) {
for (i = 0; i < priv->num_grps; i++) {
- disable_irq(priv->gfargrp[i].interruptTransmit);
- disable_irq(priv->gfargrp[i].interruptReceive);
- disable_irq(priv->gfargrp[i].interruptError);
- gfar_interrupt(priv->gfargrp[i].interruptTransmit,
- &priv->gfargrp[i]);
- enable_irq(priv->gfargrp[i].interruptError);
- enable_irq(priv->gfargrp[i].interruptReceive);
- enable_irq(priv->gfargrp[i].interruptTransmit);
+ struct gfar_priv_grp *grp = &priv->gfargrp[i];
+
+ disable_irq(gfar_irq(grp, TX)->irq);
+ disable_irq(gfar_irq(grp, RX)->irq);
+ disable_irq(gfar_irq(grp, ER)->irq);
+ gfar_interrupt(gfar_irq(grp, TX)->irq, grp);
+ enable_irq(gfar_irq(grp, ER)->irq);
+ enable_irq(gfar_irq(grp, RX)->irq);
+ enable_irq(gfar_irq(grp, TX)->irq);
}
} else {
for (i = 0; i < priv->num_grps; i++) {
- disable_irq(priv->gfargrp[i].interruptTransmit);
- gfar_interrupt(priv->gfargrp[i].interruptTransmit,
- &priv->gfargrp[i]);
- enable_irq(priv->gfargrp[i].interruptTransmit);
+ struct gfar_priv_grp *grp = &priv->gfargrp[i];
+
+ disable_irq(gfar_irq(grp, TX)->irq);
+ gfar_interrupt(gfar_irq(grp, TX)->irq, grp);
+ enable_irq(gfar_irq(grp, TX)->irq);
}
}
}
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c
index f4d2e9e3c6d5..c3f1afd86906 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c
@@ -2197,13 +2197,13 @@ static int ixgbe_get_ethtool_fdir_entry(struct ixgbe_adapter *adapter,
union ixgbe_atr_input *mask = &adapter->fdir_mask;
struct ethtool_rx_flow_spec *fsp =
(struct ethtool_rx_flow_spec *)&cmd->fs;
- struct hlist_node *node, *node2;
+ struct hlist_node *node2;
struct ixgbe_fdir_filter *rule = NULL;
/* report total rule count */
cmd->data = (1024 << adapter->fdir_pballoc) - 2;
- hlist_for_each_entry_safe(rule, node, node2,
+ hlist_for_each_entry_safe(rule, node2,
&adapter->fdir_filter_list, fdir_node) {
if (fsp->location <= rule->sw_idx)
break;
@@ -2264,14 +2264,14 @@ static int ixgbe_get_ethtool_fdir_all(struct ixgbe_adapter *adapter,
struct ethtool_rxnfc *cmd,
u32 *rule_locs)
{
- struct hlist_node *node, *node2;
+ struct hlist_node *node2;
struct ixgbe_fdir_filter *rule;
int cnt = 0;
/* report total rule count */
cmd->data = (1024 << adapter->fdir_pballoc) - 2;
- hlist_for_each_entry_safe(rule, node, node2,
+ hlist_for_each_entry_safe(rule, node2,
&adapter->fdir_filter_list, fdir_node) {
if (cnt == cmd->rule_cnt)
return -EMSGSIZE;
@@ -2358,19 +2358,19 @@ static int ixgbe_update_ethtool_fdir_entry(struct ixgbe_adapter *adapter,
u16 sw_idx)
{
struct ixgbe_hw *hw = &adapter->hw;
- struct hlist_node *node, *node2, *parent;
- struct ixgbe_fdir_filter *rule;
+ struct hlist_node *node2;
+ struct ixgbe_fdir_filter *rule, *parent;
int err = -EINVAL;
parent = NULL;
rule = NULL;
- hlist_for_each_entry_safe(rule, node, node2,
+ hlist_for_each_entry_safe(rule, node2,
&adapter->fdir_filter_list, fdir_node) {
/* hash found, or no matching entry */
if (rule->sw_idx >= sw_idx)
break;
- parent = node;
+ parent = rule;
}
/* if there is an old rule occupying our place remove it */
@@ -2399,7 +2399,7 @@ static int ixgbe_update_ethtool_fdir_entry(struct ixgbe_adapter *adapter,
/* add filter to the list */
if (parent)
- hlist_add_after(parent, &input->fdir_node);
+ hlist_add_after(&parent->fdir_node, &input->fdir_node);
else
hlist_add_head(&input->fdir_node,
&adapter->fdir_filter_list);
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
index 68478d6dfa2d..db5611ae407e 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
@@ -3891,7 +3891,7 @@ static void ixgbe_configure_pb(struct ixgbe_adapter *adapter)
static void ixgbe_fdir_filter_restore(struct ixgbe_adapter *adapter)
{
struct ixgbe_hw *hw = &adapter->hw;
- struct hlist_node *node, *node2;
+ struct hlist_node *node2;
struct ixgbe_fdir_filter *filter;
spin_lock(&adapter->fdir_perfect_lock);
@@ -3899,7 +3899,7 @@ static void ixgbe_fdir_filter_restore(struct ixgbe_adapter *adapter)
if (!hlist_empty(&adapter->fdir_filter_list))
ixgbe_fdir_set_input_mask_82599(hw, &adapter->fdir_mask);
- hlist_for_each_entry_safe(filter, node, node2,
+ hlist_for_each_entry_safe(filter, node2,
&adapter->fdir_filter_list, fdir_node) {
ixgbe_fdir_write_perfect_filter_82599(hw,
&filter->filter,
@@ -4356,12 +4356,12 @@ static void ixgbe_clean_all_tx_rings(struct ixgbe_adapter *adapter)
static void ixgbe_fdir_filter_exit(struct ixgbe_adapter *adapter)
{
- struct hlist_node *node, *node2;
+ struct hlist_node *node2;
struct ixgbe_fdir_filter *filter;
spin_lock(&adapter->fdir_perfect_lock);
- hlist_for_each_entry_safe(filter, node, node2,
+ hlist_for_each_entry_safe(filter, node2,
&adapter->fdir_filter_list, fdir_node) {
hlist_del(&filter->fdir_node);
kfree(filter);
diff --git a/drivers/net/ethernet/marvell/mvneta.c b/drivers/net/ethernet/marvell/mvneta.c
index b6025c305e10..cd345b8969bc 100644
--- a/drivers/net/ethernet/marvell/mvneta.c
+++ b/drivers/net/ethernet/marvell/mvneta.c
@@ -12,7 +12,6 @@
*/
#include <linux/kernel.h>
-#include <linux/version.h>
#include <linux/netdevice.h>
#include <linux/etherdevice.h>
#include <linux/platform_device.h>
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_main.c b/drivers/net/ethernet/mellanox/mlx4/en_main.c
index e3c3d122979e..fc27800e9c38 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_main.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_main.c
@@ -64,7 +64,7 @@ static const char mlx4_en_version[] =
/* Enable RSS UDP traffic */
MLX4_EN_PARM_INT(udp_rss, 1,
- "Enable RSS for incomming UDP traffic or disabled (0)");
+ "Enable RSS for incoming UDP traffic or disabled (0)");
/* Priority pausing */
MLX4_EN_PARM_INT(pfctx, 0, "Priority based Flow Control policy on TX[7:0]."
@@ -198,7 +198,7 @@ static void mlx4_en_remove(struct mlx4_dev *dev, void *endev_ptr)
flush_workqueue(mdev->workqueue);
destroy_workqueue(mdev->workqueue);
- mlx4_mr_free(dev, &mdev->mr);
+ (void) mlx4_mr_free(dev, &mdev->mr);
iounmap(mdev->uar_map);
mlx4_uar_free(dev, &mdev->priv_uar);
mlx4_pd_free(dev, mdev->priv_pdn);
@@ -303,7 +303,7 @@ static void *mlx4_en_add(struct mlx4_dev *dev)
return mdev;
err_mr:
- mlx4_mr_free(dev, &mdev->mr);
+ (void) mlx4_mr_free(dev, &mdev->mr);
err_map:
if (!mdev->uar_map)
iounmap(mdev->uar_map);
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c
index 5088dc5c3d1a..bb4d8d99f36d 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c
@@ -225,11 +225,10 @@ static inline struct mlx4_en_filter *
mlx4_en_filter_find(struct mlx4_en_priv *priv, __be32 src_ip, __be32 dst_ip,
__be16 src_port, __be16 dst_port)
{
- struct hlist_node *elem;
struct mlx4_en_filter *filter;
struct mlx4_en_filter *ret = NULL;
- hlist_for_each_entry(filter, elem,
+ hlist_for_each_entry(filter,
filter_hash_bucket(priv, src_ip, dst_ip,
src_port, dst_port),
filter_chain) {
@@ -574,13 +573,13 @@ static void mlx4_en_put_qp(struct mlx4_en_priv *priv)
if (dev->caps.steering_mode != MLX4_STEERING_MODE_A0) {
struct mlx4_mac_entry *entry;
- struct hlist_node *n, *tmp;
+ struct hlist_node *tmp;
struct hlist_head *bucket;
unsigned int mac_hash;
mac_hash = priv->dev->dev_addr[MLX4_EN_MAC_HASH_IDX];
bucket = &priv->mac_hash[mac_hash];
- hlist_for_each_entry_safe(entry, n, tmp, bucket, hlist) {
+ hlist_for_each_entry_safe(entry, tmp, bucket, hlist) {
if (ether_addr_equal_64bits(entry->mac,
priv->dev->dev_addr)) {
en_dbg(DRV, priv, "Releasing qp: port %d, MAC %pM, qpn %d\n",
@@ -609,11 +608,11 @@ static int mlx4_en_replace_mac(struct mlx4_en_priv *priv, int qpn,
struct hlist_head *bucket;
unsigned int mac_hash;
struct mlx4_mac_entry *entry;
- struct hlist_node *n, *tmp;
+ struct hlist_node *tmp;
u64 prev_mac_u64 = mlx4_en_mac_to_u64(prev_mac);
bucket = &priv->mac_hash[prev_mac[MLX4_EN_MAC_HASH_IDX]];
- hlist_for_each_entry_safe(entry, n, tmp, bucket, hlist) {
+ hlist_for_each_entry_safe(entry, tmp, bucket, hlist) {
if (ether_addr_equal_64bits(entry->mac, prev_mac)) {
mlx4_en_uc_steer_release(priv, entry->mac,
qpn, entry->reg_id);
@@ -1019,7 +1018,7 @@ static void mlx4_en_do_uc_filter(struct mlx4_en_priv *priv,
{
struct netdev_hw_addr *ha;
struct mlx4_mac_entry *entry;
- struct hlist_node *n, *tmp;
+ struct hlist_node *tmp;
bool found;
u64 mac;
int err = 0;
@@ -1035,7 +1034,7 @@ static void mlx4_en_do_uc_filter(struct mlx4_en_priv *priv,
/* find what to remove */
for (i = 0; i < MLX4_EN_MAC_HASH_SIZE; ++i) {
bucket = &priv->mac_hash[i];
- hlist_for_each_entry_safe(entry, n, tmp, bucket, hlist) {
+ hlist_for_each_entry_safe(entry, tmp, bucket, hlist) {
found = false;
netdev_for_each_uc_addr(ha, dev) {
if (ether_addr_equal_64bits(entry->mac,
@@ -1078,7 +1077,7 @@ static void mlx4_en_do_uc_filter(struct mlx4_en_priv *priv,
netdev_for_each_uc_addr(ha, dev) {
found = false;
bucket = &priv->mac_hash[ha->addr[MLX4_EN_MAC_HASH_IDX]];
- hlist_for_each_entry(entry, n, bucket, hlist) {
+ hlist_for_each_entry(entry, bucket, hlist) {
if (ether_addr_equal_64bits(entry->mac, ha->addr)) {
found = true;
break;
@@ -1829,7 +1828,7 @@ int mlx4_en_alloc_resources(struct mlx4_en_priv *priv)
}
#ifdef CONFIG_RFS_ACCEL
- priv->dev->rx_cpu_rmap = alloc_irq_cpu_rmap(priv->rx_ring_num);
+ priv->dev->rx_cpu_rmap = alloc_irq_cpu_rmap(priv->mdev->dev->caps.comp_pool);
if (!priv->dev->rx_cpu_rmap)
goto err;
#endif
@@ -2067,7 +2066,7 @@ int mlx4_en_init_netdev(struct mlx4_en_dev *mdev, int port,
err = -ENOMEM;
goto out;
}
- priv->tx_cq = kzalloc(sizeof(struct mlx4_en_cq) * MAX_RX_RINGS,
+ priv->tx_cq = kzalloc(sizeof(struct mlx4_en_cq) * MAX_TX_RINGS,
GFP_KERNEL);
if (!priv->tx_cq) {
err = -ENOMEM;
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_rx.c b/drivers/net/ethernet/mellanox/mlx4/en_rx.c
index ce38654bbdd0..c7f856308e1a 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_rx.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_rx.c
@@ -35,6 +35,7 @@
#include <linux/slab.h>
#include <linux/mlx4/qp.h>
#include <linux/skbuff.h>
+#include <linux/rculist.h>
#include <linux/if_ether.h>
#include <linux/if_vlan.h>
#include <linux/vmalloc.h>
@@ -617,7 +618,6 @@ int mlx4_en_process_rx_cq(struct net_device *dev, struct mlx4_en_cq *cq, int bud
if (is_multicast_ether_addr(ethh->h_dest)) {
struct mlx4_mac_entry *entry;
- struct hlist_node *n;
struct hlist_head *bucket;
unsigned int mac_hash;
@@ -625,7 +625,7 @@ int mlx4_en_process_rx_cq(struct net_device *dev, struct mlx4_en_cq *cq, int bud
mac_hash = ethh->h_source[MLX4_EN_MAC_HASH_IDX];
bucket = &priv->mac_hash[mac_hash];
rcu_read_lock();
- hlist_for_each_entry_rcu(entry, n, bucket, hlist) {
+ hlist_for_each_entry_rcu(entry, bucket, hlist) {
if (ether_addr_equal_64bits(entry->mac,
ethh->h_source)) {
rcu_read_unlock();
diff --git a/drivers/net/ethernet/mellanox/mlx4/fw.c b/drivers/net/ethernet/mellanox/mlx4/fw.c
index 38b62c78d5da..50917eb3013e 100644
--- a/drivers/net/ethernet/mellanox/mlx4/fw.c
+++ b/drivers/net/ethernet/mellanox/mlx4/fw.c
@@ -762,15 +762,19 @@ int mlx4_QUERY_DEV_CAP_wrapper(struct mlx4_dev *dev, int slave,
u64 flags;
int err = 0;
u8 field;
+ u32 bmme_flags;
err = mlx4_cmd_box(dev, 0, outbox->dma, 0, 0, MLX4_CMD_QUERY_DEV_CAP,
MLX4_CMD_TIME_CLASS_A, MLX4_CMD_NATIVE);
if (err)
return err;
- /* add port mng change event capability unconditionally to slaves */
+ /* add port mng change event capability and disable mw type 1
+ * unconditionally to slaves
+ */
MLX4_GET(flags, outbox->buf, QUERY_DEV_CAP_EXT_FLAGS_OFFSET);
flags |= MLX4_DEV_CAP_FLAG_PORT_MNG_CHG_EV;
+ flags &= ~MLX4_DEV_CAP_FLAG_MEM_WINDOW;
MLX4_PUT(outbox->buf, flags, QUERY_DEV_CAP_EXT_FLAGS_OFFSET);
/* For guests, report Blueflame disabled */
@@ -778,6 +782,11 @@ int mlx4_QUERY_DEV_CAP_wrapper(struct mlx4_dev *dev, int slave,
field &= 0x7f;
MLX4_PUT(outbox->buf, field, QUERY_DEV_CAP_BF_OFFSET);
+ /* For guests, disable mw type 2 */
+ MLX4_GET(bmme_flags, outbox, QUERY_DEV_CAP_BMME_FLAGS_OFFSET);
+ bmme_flags &= ~MLX4_BMME_FLAG_TYPE_2_WIN;
+ MLX4_PUT(outbox->buf, bmme_flags, QUERY_DEV_CAP_BMME_FLAGS_OFFSET);
+
return 0;
}
@@ -1203,6 +1212,7 @@ int mlx4_INIT_HCA(struct mlx4_dev *dev, struct mlx4_init_hca_param *param)
#define INIT_HCA_FS_IB_NUM_ADDRS_OFFSET (INIT_HCA_FS_PARAM_OFFSET + 0x26)
#define INIT_HCA_TPT_OFFSET 0x0f0
#define INIT_HCA_DMPT_BASE_OFFSET (INIT_HCA_TPT_OFFSET + 0x00)
+#define INIT_HCA_TPT_MW_OFFSET (INIT_HCA_TPT_OFFSET + 0x08)
#define INIT_HCA_LOG_MPT_SZ_OFFSET (INIT_HCA_TPT_OFFSET + 0x0b)
#define INIT_HCA_MTT_BASE_OFFSET (INIT_HCA_TPT_OFFSET + 0x10)
#define INIT_HCA_CMPT_BASE_OFFSET (INIT_HCA_TPT_OFFSET + 0x18)
@@ -1319,6 +1329,7 @@ int mlx4_INIT_HCA(struct mlx4_dev *dev, struct mlx4_init_hca_param *param)
/* TPT attributes */
MLX4_PUT(inbox, param->dmpt_base, INIT_HCA_DMPT_BASE_OFFSET);
+ MLX4_PUT(inbox, param->mw_enabled, INIT_HCA_TPT_MW_OFFSET);
MLX4_PUT(inbox, param->log_mpt_sz, INIT_HCA_LOG_MPT_SZ_OFFSET);
MLX4_PUT(inbox, param->mtt_base, INIT_HCA_MTT_BASE_OFFSET);
MLX4_PUT(inbox, param->cmpt_base, INIT_HCA_CMPT_BASE_OFFSET);
@@ -1415,6 +1426,7 @@ int mlx4_QUERY_HCA(struct mlx4_dev *dev,
/* TPT attributes */
MLX4_GET(param->dmpt_base, outbox, INIT_HCA_DMPT_BASE_OFFSET);
+ MLX4_GET(param->mw_enabled, outbox, INIT_HCA_TPT_MW_OFFSET);
MLX4_GET(param->log_mpt_sz, outbox, INIT_HCA_LOG_MPT_SZ_OFFSET);
MLX4_GET(param->mtt_base, outbox, INIT_HCA_MTT_BASE_OFFSET);
MLX4_GET(param->cmpt_base, outbox, INIT_HCA_CMPT_BASE_OFFSET);
diff --git a/drivers/net/ethernet/mellanox/mlx4/fw.h b/drivers/net/ethernet/mellanox/mlx4/fw.h
index 3af33ff669cc..151c2bb380a6 100644
--- a/drivers/net/ethernet/mellanox/mlx4/fw.h
+++ b/drivers/net/ethernet/mellanox/mlx4/fw.h
@@ -170,6 +170,7 @@ struct mlx4_init_hca_param {
u8 log_mc_table_sz;
u8 log_mpt_sz;
u8 log_uar_sz;
+ u8 mw_enabled; /* Enable memory windows */
u8 uar_page_sz; /* log pg sz in 4k chunks */
u8 steering_mode; /* for QUERY_HCA */
u64 dev_cap_enabled;
diff --git a/drivers/net/ethernet/mellanox/mlx4/main.c b/drivers/net/ethernet/mellanox/mlx4/main.c
index b9dde139dac5..d180bc46826a 100644
--- a/drivers/net/ethernet/mellanox/mlx4/main.c
+++ b/drivers/net/ethernet/mellanox/mlx4/main.c
@@ -1431,6 +1431,10 @@ static int mlx4_init_hca(struct mlx4_dev *dev)
init_hca.log_uar_sz = ilog2(dev->caps.num_uars);
init_hca.uar_page_sz = PAGE_SHIFT - 12;
+ init_hca.mw_enabled = 0;
+ if (dev->caps.flags & MLX4_DEV_CAP_FLAG_MEM_WINDOW ||
+ dev->caps.bmme_flags & MLX4_BMME_FLAG_TYPE_2_WIN)
+ init_hca.mw_enabled = INIT_HCA_TPT_MW_ENABLE;
err = mlx4_init_icm(dev, &dev_cap, &init_hca, icm_size);
if (err)
diff --git a/drivers/net/ethernet/mellanox/mlx4/mlx4.h b/drivers/net/ethernet/mellanox/mlx4/mlx4.h
index ed4a6959e828..cf883345af88 100644
--- a/drivers/net/ethernet/mellanox/mlx4/mlx4.h
+++ b/drivers/net/ethernet/mellanox/mlx4/mlx4.h
@@ -60,6 +60,8 @@
#define MLX4_FS_MGM_LOG_ENTRY_SIZE 7
#define MLX4_FS_NUM_MCG (1 << 17)
+#define INIT_HCA_TPT_MW_ENABLE (1 << 7)
+
#define MLX4_NUM_UP 8
#define MLX4_NUM_TC 8
#define MLX4_RATELIMIT_UNITS 3 /* 100 Mbps */
@@ -113,10 +115,10 @@ enum {
MLX4_NUM_CMPTS = MLX4_CMPT_NUM_TYPE << MLX4_CMPT_SHIFT
};
-enum mlx4_mr_state {
- MLX4_MR_DISABLED = 0,
- MLX4_MR_EN_HW,
- MLX4_MR_EN_SW
+enum mlx4_mpt_state {
+ MLX4_MPT_DISABLED = 0,
+ MLX4_MPT_EN_HW,
+ MLX4_MPT_EN_SW
};
#define MLX4_COMM_TIME 10000
@@ -263,6 +265,22 @@ struct mlx4_icm_table {
struct mlx4_icm **icm;
};
+#define MLX4_MPT_FLAG_SW_OWNS (0xfUL << 28)
+#define MLX4_MPT_FLAG_FREE (0x3UL << 28)
+#define MLX4_MPT_FLAG_MIO (1 << 17)
+#define MLX4_MPT_FLAG_BIND_ENABLE (1 << 15)
+#define MLX4_MPT_FLAG_PHYSICAL (1 << 9)
+#define MLX4_MPT_FLAG_REGION (1 << 8)
+
+#define MLX4_MPT_PD_FLAG_FAST_REG (1 << 27)
+#define MLX4_MPT_PD_FLAG_RAE (1 << 28)
+#define MLX4_MPT_PD_FLAG_EN_INV (3 << 24)
+
+#define MLX4_MPT_QP_FLAG_BOUND_QP (1 << 7)
+
+#define MLX4_MPT_STATUS_SW 0xF0
+#define MLX4_MPT_STATUS_HW 0x00
+
/*
* Must be packed because mtt_seg is 64 bits but only aligned to 32 bits.
*/
@@ -863,10 +881,10 @@ int __mlx4_cq_alloc_icm(struct mlx4_dev *dev, int *cqn);
void __mlx4_cq_free_icm(struct mlx4_dev *dev, int cqn);
int __mlx4_srq_alloc_icm(struct mlx4_dev *dev, int *srqn);
void __mlx4_srq_free_icm(struct mlx4_dev *dev, int srqn);
-int __mlx4_mr_reserve(struct mlx4_dev *dev);
-void __mlx4_mr_release(struct mlx4_dev *dev, u32 index);
-int __mlx4_mr_alloc_icm(struct mlx4_dev *dev, u32 index);
-void __mlx4_mr_free_icm(struct mlx4_dev *dev, u32 index);
+int __mlx4_mpt_reserve(struct mlx4_dev *dev);
+void __mlx4_mpt_release(struct mlx4_dev *dev, u32 index);
+int __mlx4_mpt_alloc_icm(struct mlx4_dev *dev, u32 index);
+void __mlx4_mpt_free_icm(struct mlx4_dev *dev, u32 index);
u32 __mlx4_alloc_mtt_range(struct mlx4_dev *dev, int order);
void __mlx4_free_mtt_range(struct mlx4_dev *dev, u32 first_seg, int order);
diff --git a/drivers/net/ethernet/mellanox/mlx4/mr.c b/drivers/net/ethernet/mellanox/mlx4/mr.c
index c202d3ad2a0e..602ca9bf78e4 100644
--- a/drivers/net/ethernet/mellanox/mlx4/mr.c
+++ b/drivers/net/ethernet/mellanox/mlx4/mr.c
@@ -44,20 +44,6 @@
#include "mlx4.h"
#include "icm.h"
-#define MLX4_MPT_FLAG_SW_OWNS (0xfUL << 28)
-#define MLX4_MPT_FLAG_FREE (0x3UL << 28)
-#define MLX4_MPT_FLAG_MIO (1 << 17)
-#define MLX4_MPT_FLAG_BIND_ENABLE (1 << 15)
-#define MLX4_MPT_FLAG_PHYSICAL (1 << 9)
-#define MLX4_MPT_FLAG_REGION (1 << 8)
-
-#define MLX4_MPT_PD_FLAG_FAST_REG (1 << 27)
-#define MLX4_MPT_PD_FLAG_RAE (1 << 28)
-#define MLX4_MPT_PD_FLAG_EN_INV (3 << 24)
-
-#define MLX4_MPT_STATUS_SW 0xF0
-#define MLX4_MPT_STATUS_HW 0x00
-
static u32 mlx4_buddy_alloc(struct mlx4_buddy *buddy, int order)
{
int o;
@@ -321,7 +307,7 @@ static int mlx4_mr_alloc_reserved(struct mlx4_dev *dev, u32 mridx, u32 pd,
mr->size = size;
mr->pd = pd;
mr->access = access;
- mr->enabled = MLX4_MR_DISABLED;
+ mr->enabled = MLX4_MPT_DISABLED;
mr->key = hw_index_to_key(mridx);
return mlx4_mtt_init(dev, npages, page_shift, &mr->mtt);
@@ -335,14 +321,14 @@ static int mlx4_WRITE_MTT(struct mlx4_dev *dev,
MLX4_CMD_TIME_CLASS_A, MLX4_CMD_WRAPPED);
}
-int __mlx4_mr_reserve(struct mlx4_dev *dev)
+int __mlx4_mpt_reserve(struct mlx4_dev *dev)
{
struct mlx4_priv *priv = mlx4_priv(dev);
return mlx4_bitmap_alloc(&priv->mr_table.mpt_bitmap);
}
-static int mlx4_mr_reserve(struct mlx4_dev *dev)
+static int mlx4_mpt_reserve(struct mlx4_dev *dev)
{
u64 out_param;
@@ -353,17 +339,17 @@ static int mlx4_mr_reserve(struct mlx4_dev *dev)
return -1;
return get_param_l(&out_param);
}
- return __mlx4_mr_reserve(dev);
+ return __mlx4_mpt_reserve(dev);
}
-void __mlx4_mr_release(struct mlx4_dev *dev, u32 index)
+void __mlx4_mpt_release(struct mlx4_dev *dev, u32 index)
{
struct mlx4_priv *priv = mlx4_priv(dev);
mlx4_bitmap_free(&priv->mr_table.mpt_bitmap, index);
}
-static void mlx4_mr_release(struct mlx4_dev *dev, u32 index)
+static void mlx4_mpt_release(struct mlx4_dev *dev, u32 index)
{
u64 in_param;
@@ -376,17 +362,17 @@ static void mlx4_mr_release(struct mlx4_dev *dev, u32 index)
index);
return;
}
- __mlx4_mr_release(dev, index);
+ __mlx4_mpt_release(dev, index);
}
-int __mlx4_mr_alloc_icm(struct mlx4_dev *dev, u32 index)
+int __mlx4_mpt_alloc_icm(struct mlx4_dev *dev, u32 index)
{
struct mlx4_mr_table *mr_table = &mlx4_priv(dev)->mr_table;
return mlx4_table_get(dev, &mr_table->dmpt_table, index);
}
-static int mlx4_mr_alloc_icm(struct mlx4_dev *dev, u32 index)
+static int mlx4_mpt_alloc_icm(struct mlx4_dev *dev, u32 index)
{
u64 param;
@@ -397,17 +383,17 @@ static int mlx4_mr_alloc_icm(struct mlx4_dev *dev, u32 index)
MLX4_CMD_TIME_CLASS_A,
MLX4_CMD_WRAPPED);
}
- return __mlx4_mr_alloc_icm(dev, index);
+ return __mlx4_mpt_alloc_icm(dev, index);
}
-void __mlx4_mr_free_icm(struct mlx4_dev *dev, u32 index)
+void __mlx4_mpt_free_icm(struct mlx4_dev *dev, u32 index)
{
struct mlx4_mr_table *mr_table = &mlx4_priv(dev)->mr_table;
mlx4_table_put(dev, &mr_table->dmpt_table, index);
}
-static void mlx4_mr_free_icm(struct mlx4_dev *dev, u32 index)
+static void mlx4_mpt_free_icm(struct mlx4_dev *dev, u32 index)
{
u64 in_param;
@@ -420,7 +406,7 @@ static void mlx4_mr_free_icm(struct mlx4_dev *dev, u32 index)
index);
return;
}
- return __mlx4_mr_free_icm(dev, index);
+ return __mlx4_mpt_free_icm(dev, index);
}
int mlx4_mr_alloc(struct mlx4_dev *dev, u32 pd, u64 iova, u64 size, u32 access,
@@ -429,41 +415,52 @@ int mlx4_mr_alloc(struct mlx4_dev *dev, u32 pd, u64 iova, u64 size, u32 access,
u32 index;
int err;
- index = mlx4_mr_reserve(dev);
+ index = mlx4_mpt_reserve(dev);
if (index == -1)
return -ENOMEM;
err = mlx4_mr_alloc_reserved(dev, index, pd, iova, size,
access, npages, page_shift, mr);
if (err)
- mlx4_mr_release(dev, index);
+ mlx4_mpt_release(dev, index);
return err;
}
EXPORT_SYMBOL_GPL(mlx4_mr_alloc);
-static void mlx4_mr_free_reserved(struct mlx4_dev *dev, struct mlx4_mr *mr)
+static int mlx4_mr_free_reserved(struct mlx4_dev *dev, struct mlx4_mr *mr)
{
int err;
- if (mr->enabled == MLX4_MR_EN_HW) {
+ if (mr->enabled == MLX4_MPT_EN_HW) {
err = mlx4_HW2SW_MPT(dev, NULL,
key_to_hw_index(mr->key) &
(dev->caps.num_mpts - 1));
- if (err)
- mlx4_warn(dev, "xxx HW2SW_MPT failed (%d)\n", err);
+ if (err) {
+ mlx4_warn(dev, "HW2SW_MPT failed (%d),", err);
+ mlx4_warn(dev, "MR has MWs bound to it.\n");
+ return err;
+ }
- mr->enabled = MLX4_MR_EN_SW;
+ mr->enabled = MLX4_MPT_EN_SW;
}
mlx4_mtt_cleanup(dev, &mr->mtt);
+
+ return 0;
}
-void mlx4_mr_free(struct mlx4_dev *dev, struct mlx4_mr *mr)
+int mlx4_mr_free(struct mlx4_dev *dev, struct mlx4_mr *mr)
{
- mlx4_mr_free_reserved(dev, mr);
+ int ret;
+
+ ret = mlx4_mr_free_reserved(dev, mr);
+ if (ret)
+ return ret;
if (mr->enabled)
- mlx4_mr_free_icm(dev, key_to_hw_index(mr->key));
- mlx4_mr_release(dev, key_to_hw_index(mr->key));
+ mlx4_mpt_free_icm(dev, key_to_hw_index(mr->key));
+ mlx4_mpt_release(dev, key_to_hw_index(mr->key));
+
+ return 0;
}
EXPORT_SYMBOL_GPL(mlx4_mr_free);
@@ -473,7 +470,7 @@ int mlx4_mr_enable(struct mlx4_dev *dev, struct mlx4_mr *mr)
struct mlx4_mpt_entry *mpt_entry;
int err;
- err = mlx4_mr_alloc_icm(dev, key_to_hw_index(mr->key));
+ err = mlx4_mpt_alloc_icm(dev, key_to_hw_index(mr->key));
if (err)
return err;
@@ -520,7 +517,7 @@ int mlx4_mr_enable(struct mlx4_dev *dev, struct mlx4_mr *mr)
mlx4_warn(dev, "SW2HW_MPT failed (%d)\n", err);
goto err_cmd;
}
- mr->enabled = MLX4_MR_EN_HW;
+ mr->enabled = MLX4_MPT_EN_HW;
mlx4_free_cmd_mailbox(dev, mailbox);
@@ -530,7 +527,7 @@ err_cmd:
mlx4_free_cmd_mailbox(dev, mailbox);
err_table:
- mlx4_mr_free_icm(dev, key_to_hw_index(mr->key));
+ mlx4_mpt_free_icm(dev, key_to_hw_index(mr->key));
return err;
}
EXPORT_SYMBOL_GPL(mlx4_mr_enable);
@@ -657,6 +654,101 @@ int mlx4_buf_write_mtt(struct mlx4_dev *dev, struct mlx4_mtt *mtt,
}
EXPORT_SYMBOL_GPL(mlx4_buf_write_mtt);
+int mlx4_mw_alloc(struct mlx4_dev *dev, u32 pd, enum mlx4_mw_type type,
+ struct mlx4_mw *mw)
+{
+ u32 index;
+
+ if ((type == MLX4_MW_TYPE_1 &&
+ !(dev->caps.flags & MLX4_DEV_CAP_FLAG_MEM_WINDOW)) ||
+ (type == MLX4_MW_TYPE_2 &&
+ !(dev->caps.bmme_flags & MLX4_BMME_FLAG_TYPE_2_WIN)))
+ return -ENOTSUPP;
+
+ index = mlx4_mpt_reserve(dev);
+ if (index == -1)
+ return -ENOMEM;
+
+ mw->key = hw_index_to_key(index);
+ mw->pd = pd;
+ mw->type = type;
+ mw->enabled = MLX4_MPT_DISABLED;
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(mlx4_mw_alloc);
+
+int mlx4_mw_enable(struct mlx4_dev *dev, struct mlx4_mw *mw)
+{
+ struct mlx4_cmd_mailbox *mailbox;
+ struct mlx4_mpt_entry *mpt_entry;
+ int err;
+
+ err = mlx4_mpt_alloc_icm(dev, key_to_hw_index(mw->key));
+ if (err)
+ return err;
+
+ mailbox = mlx4_alloc_cmd_mailbox(dev);
+ if (IS_ERR(mailbox)) {
+ err = PTR_ERR(mailbox);
+ goto err_table;
+ }
+ mpt_entry = mailbox->buf;
+
+ memset(mpt_entry, 0, sizeof(*mpt_entry));
+
+ /* Note that the MLX4_MPT_FLAG_REGION bit in mpt_entry->flags is turned
+ * off, thus creating a memory window and not a memory region.
+ */
+ mpt_entry->key = cpu_to_be32(key_to_hw_index(mw->key));
+ mpt_entry->pd_flags = cpu_to_be32(mw->pd);
+ if (mw->type == MLX4_MW_TYPE_2) {
+ mpt_entry->flags |= cpu_to_be32(MLX4_MPT_FLAG_FREE);
+ mpt_entry->qpn = cpu_to_be32(MLX4_MPT_QP_FLAG_BOUND_QP);
+ mpt_entry->pd_flags |= cpu_to_be32(MLX4_MPT_PD_FLAG_EN_INV);
+ }
+
+ err = mlx4_SW2HW_MPT(dev, mailbox,
+ key_to_hw_index(mw->key) &
+ (dev->caps.num_mpts - 1));
+ if (err) {
+ mlx4_warn(dev, "SW2HW_MPT failed (%d)\n", err);
+ goto err_cmd;
+ }
+ mw->enabled = MLX4_MPT_EN_HW;
+
+ mlx4_free_cmd_mailbox(dev, mailbox);
+
+ return 0;
+
+err_cmd:
+ mlx4_free_cmd_mailbox(dev, mailbox);
+
+err_table:
+ mlx4_mpt_free_icm(dev, key_to_hw_index(mw->key));
+ return err;
+}
+EXPORT_SYMBOL_GPL(mlx4_mw_enable);
+
+void mlx4_mw_free(struct mlx4_dev *dev, struct mlx4_mw *mw)
+{
+ int err;
+
+ if (mw->enabled == MLX4_MPT_EN_HW) {
+ err = mlx4_HW2SW_MPT(dev, NULL,
+ key_to_hw_index(mw->key) &
+ (dev->caps.num_mpts - 1));
+ if (err)
+ mlx4_warn(dev, "xxx HW2SW_MPT failed (%d)\n", err);
+
+ mw->enabled = MLX4_MPT_EN_SW;
+ }
+ if (mw->enabled)
+ mlx4_mpt_free_icm(dev, key_to_hw_index(mw->key));
+ mlx4_mpt_release(dev, key_to_hw_index(mw->key));
+}
+EXPORT_SYMBOL_GPL(mlx4_mw_free);
+
int mlx4_init_mr_table(struct mlx4_dev *dev)
{
struct mlx4_priv *priv = mlx4_priv(dev);
@@ -831,7 +923,7 @@ int mlx4_fmr_alloc(struct mlx4_dev *dev, u32 pd, u32 access, int max_pages,
return 0;
err_free:
- mlx4_mr_free(dev, &fmr->mr);
+ (void) mlx4_mr_free(dev, &fmr->mr);
return err;
}
EXPORT_SYMBOL_GPL(mlx4_fmr_alloc);
@@ -882,17 +974,21 @@ void mlx4_fmr_unmap(struct mlx4_dev *dev, struct mlx4_fmr *fmr,
err);
return;
}
- fmr->mr.enabled = MLX4_MR_EN_SW;
+ fmr->mr.enabled = MLX4_MPT_EN_SW;
}
EXPORT_SYMBOL_GPL(mlx4_fmr_unmap);
int mlx4_fmr_free(struct mlx4_dev *dev, struct mlx4_fmr *fmr)
{
+ int ret;
+
if (fmr->maps)
return -EBUSY;
- mlx4_mr_free(dev, &fmr->mr);
- fmr->mr.enabled = MLX4_MR_DISABLED;
+ ret = mlx4_mr_free(dev, &fmr->mr);
+ if (ret)
+ return ret;
+ fmr->mr.enabled = MLX4_MPT_DISABLED;
return 0;
}
diff --git a/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c b/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c
index 5997adc943d0..083fb48dc3d7 100644
--- a/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c
+++ b/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c
@@ -1231,14 +1231,14 @@ static int mpt_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd,
switch (op) {
case RES_OP_RESERVE:
- index = __mlx4_mr_reserve(dev);
+ index = __mlx4_mpt_reserve(dev);
if (index == -1)
break;
id = index & mpt_mask(dev);
err = add_res_range(dev, slave, id, 1, RES_MPT, index);
if (err) {
- __mlx4_mr_release(dev, index);
+ __mlx4_mpt_release(dev, index);
break;
}
set_param_l(out_param, index);
@@ -1251,7 +1251,7 @@ static int mpt_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd,
if (err)
return err;
- err = __mlx4_mr_alloc_icm(dev, mpt->key);
+ err = __mlx4_mpt_alloc_icm(dev, mpt->key);
if (err) {
res_abort_move(dev, slave, RES_MPT, id);
return err;
@@ -1586,7 +1586,7 @@ static int mpt_free_res(struct mlx4_dev *dev, int slave, int op, int cmd,
err = rem_res_range(dev, slave, id, 1, RES_MPT, 0);
if (err)
break;
- __mlx4_mr_release(dev, index);
+ __mlx4_mpt_release(dev, index);
break;
case RES_OP_MAP_ICM:
index = get_param_l(&in_param);
@@ -1596,7 +1596,7 @@ static int mpt_free_res(struct mlx4_dev *dev, int slave, int op, int cmd,
if (err)
return err;
- __mlx4_mr_free_icm(dev, mpt->key);
+ __mlx4_mpt_free_icm(dev, mpt->key);
res_end_move(dev, slave, RES_MPT, id);
return err;
break;
@@ -1796,6 +1796,26 @@ static int mr_get_mtt_size(struct mlx4_mpt_entry *mpt)
return be32_to_cpu(mpt->mtt_sz);
}
+static u32 mr_get_pd(struct mlx4_mpt_entry *mpt)
+{
+ return be32_to_cpu(mpt->pd_flags) & 0x00ffffff;
+}
+
+static int mr_is_fmr(struct mlx4_mpt_entry *mpt)
+{
+ return be32_to_cpu(mpt->pd_flags) & MLX4_MPT_PD_FLAG_FAST_REG;
+}
+
+static int mr_is_bind_enabled(struct mlx4_mpt_entry *mpt)
+{
+ return be32_to_cpu(mpt->flags) & MLX4_MPT_FLAG_BIND_ENABLE;
+}
+
+static int mr_is_region(struct mlx4_mpt_entry *mpt)
+{
+ return be32_to_cpu(mpt->flags) & MLX4_MPT_FLAG_REGION;
+}
+
static int qp_get_mtt_addr(struct mlx4_qp_context *qpc)
{
return be32_to_cpu(qpc->mtt_base_addr_l) & 0xfffffff8;
@@ -1856,12 +1876,41 @@ int mlx4_SW2HW_MPT_wrapper(struct mlx4_dev *dev, int slave,
int mtt_base = mr_get_mtt_addr(inbox->buf) / dev->caps.mtt_entry_sz;
int phys;
int id;
+ u32 pd;
+ int pd_slave;
id = index & mpt_mask(dev);
err = mr_res_start_move_to(dev, slave, id, RES_MPT_HW, &mpt);
if (err)
return err;
+ /* Disable memory windows for VFs. */
+ if (!mr_is_region(inbox->buf)) {
+ err = -EPERM;
+ goto ex_abort;
+ }
+
+ /* Make sure that the PD bits related to the slave id are zeros. */
+ pd = mr_get_pd(inbox->buf);
+ pd_slave = (pd >> 17) & 0x7f;
+ if (pd_slave != 0 && pd_slave != slave) {
+ err = -EPERM;
+ goto ex_abort;
+ }
+
+ if (mr_is_fmr(inbox->buf)) {
+ /* FMR and Bind Enable are forbidden in slave devices. */
+ if (mr_is_bind_enabled(inbox->buf)) {
+ err = -EPERM;
+ goto ex_abort;
+ }
+ /* FMR and Memory Windows are also forbidden. */
+ if (!mr_is_region(inbox->buf)) {
+ err = -EPERM;
+ goto ex_abort;
+ }
+ }
+
phys = mr_phys_mpt(inbox->buf);
if (!phys) {
err = get_res(dev, slave, mtt_base, RES_MTT, &mtt);
@@ -3480,7 +3529,7 @@ static void rem_slave_mrs(struct mlx4_dev *dev, int slave)
while (state != 0) {
switch (state) {
case RES_MPT_RESERVED:
- __mlx4_mr_release(dev, mpt->key);
+ __mlx4_mpt_release(dev, mpt->key);
spin_lock_irq(mlx4_tlock(dev));
rb_erase(&mpt->com.node,
&tracker->res_tree[RES_MPT]);
@@ -3491,7 +3540,7 @@ static void rem_slave_mrs(struct mlx4_dev *dev, int slave)
break;
case RES_MPT_MAPPED:
- __mlx4_mr_free_icm(dev, mpt->key);
+ __mlx4_mpt_free_icm(dev, mpt->key);
state = RES_MPT_RESERVED;
break;
diff --git a/drivers/net/ethernet/pasemi/pasemi_mac.c b/drivers/net/ethernet/pasemi/pasemi_mac.c
index 0be5844d6372..b1cfbb75ff1e 100644
--- a/drivers/net/ethernet/pasemi/pasemi_mac.c
+++ b/drivers/net/ethernet/pasemi/pasemi_mac.c
@@ -579,8 +579,9 @@ static void pasemi_mac_free_tx_resources(struct pasemi_mac *mac)
(TX_RING_SIZE-1)].dma;
freed = pasemi_mac_unmap_tx_skb(mac, nfrags,
info->skb, dmas);
- } else
+ } else {
freed = 2;
+ }
}
kfree(txring->ring_info);
@@ -808,8 +809,9 @@ static int pasemi_mac_clean_rx(struct pasemi_mac_rxring *rx,
skb->ip_summed = CHECKSUM_UNNECESSARY;
skb->csum = (macrx & XCT_MACRX_CSUM_M) >>
XCT_MACRX_CSUM_S;
- } else
+ } else {
skb_checksum_none_assert(skb);
+ }
packets++;
tot_bytes += len;
@@ -1829,10 +1831,11 @@ pasemi_mac_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
dev_err(&mac->pdev->dev, "register_netdev failed with error %d\n",
err);
goto out;
- } else if netif_msg_probe(mac)
+ } else if (netif_msg_probe(mac)) {
printk(KERN_INFO "%s: PA Semi %s: intf %d, hw addr %pM\n",
dev->name, mac->type == MAC_TYPE_GMAC ? "GMAC" : "XAUI",
mac->dma_if, dev->dev_addr);
+ }
return err;
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic.h b/drivers/net/ethernet/qlogic/qlcnic/qlcnic.h
index 11c3db6daffd..ba3c72fce1f2 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic.h
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic.h
@@ -38,8 +38,8 @@
#define _QLCNIC_LINUX_MAJOR 5
#define _QLCNIC_LINUX_MINOR 1
-#define _QLCNIC_LINUX_SUBVERSION 34
-#define QLCNIC_LINUX_VERSIONID "5.1.34"
+#define _QLCNIC_LINUX_SUBVERSION 35
+#define QLCNIC_LINUX_VERSIONID "5.1.35"
#define QLCNIC_DRV_IDC_VER 0x01
#define QLCNIC_DRIVER_VERSION ((_QLCNIC_LINUX_MAJOR << 16) |\
(_QLCNIC_LINUX_MINOR << 8) | (_QLCNIC_LINUX_SUBVERSION))
@@ -1755,7 +1755,7 @@ static inline int qlcnic_set_lb_mode(struct qlcnic_adapter *adapter, u8 mode)
static inline int qlcnic_clear_lb_mode(struct qlcnic_adapter *adapter, u8 mode)
{
- return adapter->ahw->hw_ops->config_loopback(adapter, mode);
+ return adapter->ahw->hw_ops->clear_loopback(adapter, mode);
}
static inline int qlcnic_nic_set_promisc(struct qlcnic_adapter *adapter,
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_init.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_init.c
index c53832b02b3e..5c033f268ca5 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_init.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_init.c
@@ -589,13 +589,6 @@ static int qlcnic_83xx_idc_reattach_driver(struct qlcnic_adapter *adapter)
qlcnic_83xx_register_nic_idc_func(adapter, 1);
qlcnic_83xx_enable_mbx_intrpt(adapter);
- if ((adapter->flags & QLCNIC_MSIX_ENABLED)) {
- if (qlcnic_83xx_config_intrpt(adapter, 1)) {
- netdev_err(adapter->netdev,
- "Failed to enable mbx intr\n");
- return -EIO;
- }
- }
if (qlcnic_83xx_configure_opmode(adapter)) {
qlcnic_83xx_idc_enter_failed_state(adapter, 1);
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_hw.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_hw.c
index 325e11e1ce0f..f89cc7a3fe6c 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_hw.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_hw.c
@@ -576,7 +576,7 @@ void qlcnic_free_mac_list(struct qlcnic_adapter *adapter)
void qlcnic_prune_lb_filters(struct qlcnic_adapter *adapter)
{
struct qlcnic_filter *tmp_fil;
- struct hlist_node *tmp_hnode, *n;
+ struct hlist_node *n;
struct hlist_head *head;
int i;
unsigned long time;
@@ -584,7 +584,7 @@ void qlcnic_prune_lb_filters(struct qlcnic_adapter *adapter)
for (i = 0; i < adapter->fhash.fbucket_size; i++) {
head = &(adapter->fhash.fhead[i]);
- hlist_for_each_entry_safe(tmp_fil, tmp_hnode, n, head, fnode) {
+ hlist_for_each_entry_safe(tmp_fil, n, head, fnode) {
cmd = tmp_fil->vlan_id ? QLCNIC_MAC_VLAN_DEL :
QLCNIC_MAC_DEL;
time = tmp_fil->ftime;
@@ -604,7 +604,7 @@ void qlcnic_prune_lb_filters(struct qlcnic_adapter *adapter)
for (i = 0; i < adapter->rx_fhash.fbucket_size; i++) {
head = &(adapter->rx_fhash.fhead[i]);
- hlist_for_each_entry_safe(tmp_fil, tmp_hnode, n, head, fnode)
+ hlist_for_each_entry_safe(tmp_fil, n, head, fnode)
{
time = tmp_fil->ftime;
if (jiffies > (QLCNIC_FILTER_AGE * HZ + time)) {
@@ -621,14 +621,14 @@ void qlcnic_prune_lb_filters(struct qlcnic_adapter *adapter)
void qlcnic_delete_lb_filters(struct qlcnic_adapter *adapter)
{
struct qlcnic_filter *tmp_fil;
- struct hlist_node *tmp_hnode, *n;
+ struct hlist_node *n;
struct hlist_head *head;
int i;
u8 cmd;
for (i = 0; i < adapter->fhash.fbucket_size; i++) {
head = &(adapter->fhash.fhead[i]);
- hlist_for_each_entry_safe(tmp_fil, tmp_hnode, n, head, fnode) {
+ hlist_for_each_entry_safe(tmp_fil, n, head, fnode) {
cmd = tmp_fil->vlan_id ? QLCNIC_MAC_VLAN_DEL :
QLCNIC_MAC_DEL;
qlcnic_sre_macaddr_change(adapter,
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c
index 6387e0cc3ea9..0e630061bff3 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c
@@ -162,7 +162,7 @@ void qlcnic_add_lb_filter(struct qlcnic_adapter *adapter, struct sk_buff *skb,
{
struct ethhdr *phdr = (struct ethhdr *)(skb->data);
struct qlcnic_filter *fil, *tmp_fil;
- struct hlist_node *tmp_hnode, *n;
+ struct hlist_node *n;
struct hlist_head *head;
unsigned long time;
u64 src_addr = 0;
@@ -179,7 +179,7 @@ void qlcnic_add_lb_filter(struct qlcnic_adapter *adapter, struct sk_buff *skb,
(adapter->fhash.fbucket_size - 1);
head = &(adapter->rx_fhash.fhead[hindex]);
- hlist_for_each_entry_safe(tmp_fil, tmp_hnode, n, head, fnode) {
+ hlist_for_each_entry_safe(tmp_fil, n, head, fnode) {
if (!memcmp(tmp_fil->faddr, &src_addr, ETH_ALEN) &&
tmp_fil->vlan_id == vlan_id) {
time = tmp_fil->ftime;
@@ -205,7 +205,7 @@ void qlcnic_add_lb_filter(struct qlcnic_adapter *adapter, struct sk_buff *skb,
(adapter->fhash.fbucket_size - 1);
head = &(adapter->rx_fhash.fhead[hindex]);
spin_lock(&adapter->rx_mac_learn_lock);
- hlist_for_each_entry_safe(tmp_fil, tmp_hnode, n, head, fnode) {
+ hlist_for_each_entry_safe(tmp_fil, n, head, fnode) {
if (!memcmp(tmp_fil->faddr, &src_addr, ETH_ALEN) &&
tmp_fil->vlan_id == vlan_id) {
found = 1;
@@ -272,7 +272,7 @@ static void qlcnic_send_filter(struct qlcnic_adapter *adapter,
struct sk_buff *skb)
{
struct qlcnic_filter *fil, *tmp_fil;
- struct hlist_node *tmp_hnode, *n;
+ struct hlist_node *n;
struct hlist_head *head;
struct net_device *netdev = adapter->netdev;
struct ethhdr *phdr = (struct ethhdr *)(skb->data);
@@ -294,7 +294,7 @@ static void qlcnic_send_filter(struct qlcnic_adapter *adapter,
hindex = qlcnic_mac_hash(src_addr) & (adapter->fhash.fbucket_size - 1);
head = &(adapter->fhash.fhead[hindex]);
- hlist_for_each_entry_safe(tmp_fil, tmp_hnode, n, head, fnode) {
+ hlist_for_each_entry_safe(tmp_fil, n, head, fnode) {
if (!memcmp(tmp_fil->faddr, &src_addr, ETH_ALEN) &&
tmp_fil->vlan_id == vlan_id) {
if (jiffies > (QLCNIC_READD_AGE * HZ + tmp_fil->ftime))
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c
index 5d5fd06c4b42..28a6d4838364 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c
@@ -1150,7 +1150,7 @@ static int qlcnic_check_npar_opertional(struct qlcnic_adapter *adapter)
}
if (!npar_opt_timeo) {
dev_err(&adapter->pdev->dev,
- "Waiting for NPAR state to opertional timeout\n");
+ "Waiting for NPAR state to operational timeout\n");
return -EIO;
}
return 0;
diff --git a/drivers/net/ethernet/sun/Kconfig b/drivers/net/ethernet/sun/Kconfig
index ae3a3557293f..3074aa374c6b 100644
--- a/drivers/net/ethernet/sun/Kconfig
+++ b/drivers/net/ethernet/sun/Kconfig
@@ -61,7 +61,7 @@ config SUNGEM
select SUNGEM_PHY
---help---
Support for the Sun GEM chip, aka Sun GigabitEthernet/P 2.0. See also
- <http://www.sun.com/products-n-solutions/hardware/docs/pdf/806-3985-10.pdf>.
+ <http://docs.oracle.com/cd/E19455-01/806-3985-10/806-3985-10.pdf>.
config CASSINI
tristate "Sun Cassini support"
@@ -69,7 +69,7 @@ config CASSINI
select CRC32
---help---
Support for the Sun Cassini chip, aka Sun GigaSwift Ethernet. See also
- <http://www.sun.com/products-n-solutions/hardware/docs/pdf/817-4341-10.pdf>
+ <http://docs.oracle.com/cd/E19113-01/giga.ether.pci/817-4341-10/817-4341-10.pdf>.
config SUNVNET
tristate "Sun Virtual Network support"
diff --git a/drivers/net/ethernet/sun/sunvnet.c b/drivers/net/ethernet/sun/sunvnet.c
index 289b4eefb42f..1df0ff3839e8 100644
--- a/drivers/net/ethernet/sun/sunvnet.c
+++ b/drivers/net/ethernet/sun/sunvnet.c
@@ -614,10 +614,9 @@ struct vnet_port *__tx_port_find(struct vnet *vp, struct sk_buff *skb)
{
unsigned int hash = vnet_hashfn(skb->data);
struct hlist_head *hp = &vp->port_hash[hash];
- struct hlist_node *n;
struct vnet_port *port;
- hlist_for_each_entry(port, n, hp, hash) {
+ hlist_for_each_entry(port, hp, hash) {
if (ether_addr_equal(port->raddr, skb->data))
return port;
}
diff --git a/drivers/net/ethernet/ti/davinci_cpdma.c b/drivers/net/ethernet/ti/davinci_cpdma.c
index 68c3418160ba..ee13dc78430c 100644
--- a/drivers/net/ethernet/ti/davinci_cpdma.c
+++ b/drivers/net/ethernet/ti/davinci_cpdma.c
@@ -492,11 +492,13 @@ int cpdma_ctlr_int_ctrl(struct cpdma_ctlr *ctlr, bool enable)
spin_unlock_irqrestore(&ctlr->lock, flags);
return 0;
}
+EXPORT_SYMBOL_GPL(cpdma_ctlr_int_ctrl);
void cpdma_ctlr_eoi(struct cpdma_ctlr *ctlr, u32 value)
{
dma_reg_write(ctlr, CPDMA_MACEOIVECTOR, value);
}
+EXPORT_SYMBOL_GPL(cpdma_ctlr_eoi);
struct cpdma_chan *cpdma_chan_create(struct cpdma_ctlr *ctlr, int chan_num,
cpdma_handler_fn handler)
@@ -1028,3 +1030,4 @@ unlock_ret:
spin_unlock_irqrestore(&ctlr->lock, flags);
return ret;
}
+EXPORT_SYMBOL_GPL(cpdma_control_set);
diff --git a/drivers/net/hamradio/Kconfig b/drivers/net/hamradio/Kconfig
index 95dbcfdf131d..bf5e59687680 100644
--- a/drivers/net/hamradio/Kconfig
+++ b/drivers/net/hamradio/Kconfig
@@ -1,6 +1,6 @@
config MKISS
tristate "Serial port KISS driver"
- depends on AX25
+ depends on AX25 && TTY
select CRC16
---help---
KISS is a protocol used for the exchange of data between a computer
@@ -18,7 +18,7 @@ config MKISS
config 6PACK
tristate "Serial port 6PACK driver"
- depends on AX25
+ depends on AX25 && TTY
---help---
6pack is a transmission protocol for the data exchange between your
PC and your TNC (the Terminal Node Controller acts as a kind of
diff --git a/drivers/net/hyperv/netvsc_drv.c b/drivers/net/hyperv/netvsc_drv.c
index d5202a4b0877..5f85205cd12b 100644
--- a/drivers/net/hyperv/netvsc_drv.c
+++ b/drivers/net/hyperv/netvsc_drv.c
@@ -498,8 +498,7 @@ static int netvsc_remove(struct hv_device *dev)
static const struct hv_vmbus_device_id id_table[] = {
/* Network guid */
- { VMBUS_DEVICE(0x63, 0x51, 0x61, 0xF8, 0x3E, 0xDF, 0xc5, 0x46,
- 0x91, 0x3F, 0xF2, 0xD2, 0xF9, 0x65, 0xED, 0x0E) },
+ { HV_NIC_GUID, },
{ },
};
diff --git a/drivers/net/irda/Kconfig b/drivers/net/irda/Kconfig
index 59e9d9e1fd0f..2a30193d0d50 100644
--- a/drivers/net/irda/Kconfig
+++ b/drivers/net/irda/Kconfig
@@ -5,7 +5,7 @@ comment "SIR device drivers"
config IRTTY_SIR
tristate "IrTTY (uses Linux serial driver)"
- depends on IRDA
+ depends on IRDA && TTY
help
Say Y here if you want to build support for the IrTTY line
discipline. To compile it as a module, choose M here: the module
diff --git a/drivers/net/irda/ali-ircc.c b/drivers/net/irda/ali-ircc.c
index 84872043b5c6..9cea451a6081 100644
--- a/drivers/net/irda/ali-ircc.c
+++ b/drivers/net/irda/ali-ircc.c
@@ -993,7 +993,7 @@ static void ali_ircc_change_speed(struct ali_ircc_cb *self, __u32 baud)
/* Enable Interuupt */
self->ier = IER_EOM; // benjamin 2000/11/20 07:24PM
- /* Be ready for incomming frames */
+ /* Be ready for incoming frames */
ali_ircc_dma_receive(self); // benajmin 2000/11/8 07:46PM not complete
}
/* Go to SIR Speed */
diff --git a/drivers/net/irda/irtty-sir.c b/drivers/net/irda/irtty-sir.c
index 6e4d4b62c9a8..a41267197839 100644
--- a/drivers/net/irda/irtty-sir.c
+++ b/drivers/net/irda/irtty-sir.c
@@ -210,7 +210,7 @@ static int irtty_do_write(struct sir_dev *dev, const unsigned char *ptr, size_t
* been received, which can now be decapsulated and delivered for
* further processing
*
- * calling context depends on underlying driver and tty->low_latency!
+ * calling context depends on underlying driver and tty->port->low_latency!
* for example (low_latency: 1 / 0):
* serial.c: uart-interrupt / softint
* usbserial: urb-complete-interrupt / softint
diff --git a/drivers/net/macvlan.c b/drivers/net/macvlan.c
index defcd8a85744..417b2af1aa80 100644
--- a/drivers/net/macvlan.c
+++ b/drivers/net/macvlan.c
@@ -55,9 +55,8 @@ static struct macvlan_dev *macvlan_hash_lookup(const struct macvlan_port *port,
const unsigned char *addr)
{
struct macvlan_dev *vlan;
- struct hlist_node *n;
- hlist_for_each_entry_rcu(vlan, n, &port->vlan_hash[addr[5]], hlist) {
+ hlist_for_each_entry_rcu(vlan, &port->vlan_hash[addr[5]], hlist) {
if (ether_addr_equal_64bits(vlan->dev->dev_addr, addr))
return vlan;
}
@@ -149,7 +148,6 @@ static void macvlan_broadcast(struct sk_buff *skb,
{
const struct ethhdr *eth = eth_hdr(skb);
const struct macvlan_dev *vlan;
- struct hlist_node *n;
struct sk_buff *nskb;
unsigned int i;
int err;
@@ -159,7 +157,7 @@ static void macvlan_broadcast(struct sk_buff *skb,
return;
for (i = 0; i < MACVLAN_HASH_SIZE; i++) {
- hlist_for_each_entry_rcu(vlan, n, &port->vlan_hash[i], hlist) {
+ hlist_for_each_entry_rcu(vlan, &port->vlan_hash[i], hlist) {
if (vlan->dev == src || !(vlan->mode & mode))
continue;
diff --git a/drivers/net/macvtap.c b/drivers/net/macvtap.c
index 97243011d319..a449439bd653 100644
--- a/drivers/net/macvtap.c
+++ b/drivers/net/macvtap.c
@@ -279,28 +279,17 @@ static int macvtap_receive(struct sk_buff *skb)
static int macvtap_get_minor(struct macvlan_dev *vlan)
{
int retval = -ENOMEM;
- int id;
mutex_lock(&minor_lock);
- if (idr_pre_get(&minor_idr, GFP_KERNEL) == 0)
- goto exit;
-
- retval = idr_get_new_above(&minor_idr, vlan, 1, &id);
- if (retval < 0) {
- if (retval == -EAGAIN)
- retval = -ENOMEM;
- goto exit;
- }
- if (id < MACVTAP_NUM_DEVS) {
- vlan->minor = id;
- } else {
+ retval = idr_alloc(&minor_idr, vlan, 1, MACVTAP_NUM_DEVS, GFP_KERNEL);
+ if (retval >= 0) {
+ vlan->minor = retval;
+ } else if (retval == -ENOSPC) {
printk(KERN_ERR "too many macvtap devices\n");
retval = -EINVAL;
- idr_remove(&minor_idr, id);
}
-exit:
mutex_unlock(&minor_lock);
- return retval;
+ return retval < 0 ? retval : 0;
}
static void macvtap_free_minor(struct macvlan_dev *vlan)
diff --git a/drivers/net/ntb_netdev.c b/drivers/net/ntb_netdev.c
new file mode 100644
index 000000000000..ed947dd76fbd
--- /dev/null
+++ b/drivers/net/ntb_netdev.c
@@ -0,0 +1,408 @@
+/*
+ * This file is provided under a dual BSD/GPLv2 license. When using or
+ * redistributing this file, you may do so under either license.
+ *
+ * GPL LICENSE SUMMARY
+ *
+ * Copyright(c) 2012 Intel Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * BSD LICENSE
+ *
+ * Copyright(c) 2012 Intel Corporation. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copy
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ * Intel PCIe NTB Network Linux driver
+ *
+ * Contact Information:
+ * Jon Mason <jon.mason@intel.com>
+ */
+#include <linux/etherdevice.h>
+#include <linux/ethtool.h>
+#include <linux/module.h>
+#include <linux/pci.h>
+#include <linux/ntb.h>
+
+#define NTB_NETDEV_VER "0.7"
+
+MODULE_DESCRIPTION(KBUILD_MODNAME);
+MODULE_VERSION(NTB_NETDEV_VER);
+MODULE_LICENSE("Dual BSD/GPL");
+MODULE_AUTHOR("Intel Corporation");
+
+struct ntb_netdev {
+ struct list_head list;
+ struct pci_dev *pdev;
+ struct net_device *ndev;
+ struct ntb_transport_qp *qp;
+};
+
+#define NTB_TX_TIMEOUT_MS 1000
+#define NTB_RXQ_SIZE 100
+
+static LIST_HEAD(dev_list);
+
+static void ntb_netdev_event_handler(void *data, int status)
+{
+ struct net_device *ndev = data;
+ struct ntb_netdev *dev = netdev_priv(ndev);
+
+ netdev_dbg(ndev, "Event %x, Link %x\n", status,
+ ntb_transport_link_query(dev->qp));
+
+ /* Currently, only link status event is supported */
+ if (status)
+ netif_carrier_on(ndev);
+ else
+ netif_carrier_off(ndev);
+}
+
+static void ntb_netdev_rx_handler(struct ntb_transport_qp *qp, void *qp_data,
+ void *data, int len)
+{
+ struct net_device *ndev = qp_data;
+ struct sk_buff *skb;
+ int rc;
+
+ skb = data;
+ if (!skb)
+ return;
+
+ netdev_dbg(ndev, "%s: %d byte payload received\n", __func__, len);
+
+ skb_put(skb, len);
+ skb->protocol = eth_type_trans(skb, ndev);
+ skb->ip_summed = CHECKSUM_NONE;
+
+ if (netif_rx(skb) == NET_RX_DROP) {
+ ndev->stats.rx_errors++;
+ ndev->stats.rx_dropped++;
+ } else {
+ ndev->stats.rx_packets++;
+ ndev->stats.rx_bytes += len;
+ }
+
+ skb = netdev_alloc_skb(ndev, ndev->mtu + ETH_HLEN);
+ if (!skb) {
+ ndev->stats.rx_errors++;
+ ndev->stats.rx_frame_errors++;
+ return;
+ }
+
+ rc = ntb_transport_rx_enqueue(qp, skb, skb->data, ndev->mtu + ETH_HLEN);
+ if (rc) {
+ dev_kfree_skb(skb);
+ ndev->stats.rx_errors++;
+ ndev->stats.rx_fifo_errors++;
+ }
+}
+
+static void ntb_netdev_tx_handler(struct ntb_transport_qp *qp, void *qp_data,
+ void *data, int len)
+{
+ struct net_device *ndev = qp_data;
+ struct sk_buff *skb;
+
+ skb = data;
+ if (!skb || !ndev)
+ return;
+
+ if (len > 0) {
+ ndev->stats.tx_packets++;
+ ndev->stats.tx_bytes += skb->len;
+ } else {
+ ndev->stats.tx_errors++;
+ ndev->stats.tx_aborted_errors++;
+ }
+
+ dev_kfree_skb(skb);
+}
+
+static netdev_tx_t ntb_netdev_start_xmit(struct sk_buff *skb,
+ struct net_device *ndev)
+{
+ struct ntb_netdev *dev = netdev_priv(ndev);
+ int rc;
+
+ netdev_dbg(ndev, "%s: skb len %d\n", __func__, skb->len);
+
+ rc = ntb_transport_tx_enqueue(dev->qp, skb, skb->data, skb->len);
+ if (rc)
+ goto err;
+
+ return NETDEV_TX_OK;
+
+err:
+ ndev->stats.tx_dropped++;
+ ndev->stats.tx_errors++;
+ return NETDEV_TX_BUSY;
+}
+
+static int ntb_netdev_open(struct net_device *ndev)
+{
+ struct ntb_netdev *dev = netdev_priv(ndev);
+ struct sk_buff *skb;
+ int rc, i, len;
+
+ /* Add some empty rx bufs */
+ for (i = 0; i < NTB_RXQ_SIZE; i++) {
+ skb = netdev_alloc_skb(ndev, ndev->mtu + ETH_HLEN);
+ if (!skb) {
+ rc = -ENOMEM;
+ goto err;
+ }
+
+ rc = ntb_transport_rx_enqueue(dev->qp, skb, skb->data,
+ ndev->mtu + ETH_HLEN);
+ if (rc == -EINVAL)
+ goto err;
+ }
+
+ netif_carrier_off(ndev);
+ ntb_transport_link_up(dev->qp);
+
+ return 0;
+
+err:
+ while ((skb = ntb_transport_rx_remove(dev->qp, &len)))
+ dev_kfree_skb(skb);
+ return rc;
+}
+
+static int ntb_netdev_close(struct net_device *ndev)
+{
+ struct ntb_netdev *dev = netdev_priv(ndev);
+ struct sk_buff *skb;
+ int len;
+
+ ntb_transport_link_down(dev->qp);
+
+ while ((skb = ntb_transport_rx_remove(dev->qp, &len)))
+ dev_kfree_skb(skb);
+
+ return 0;
+}
+
+static int ntb_netdev_change_mtu(struct net_device *ndev, int new_mtu)
+{
+ struct ntb_netdev *dev = netdev_priv(ndev);
+ struct sk_buff *skb;
+ int len, rc;
+
+ if (new_mtu > ntb_transport_max_size(dev->qp) - ETH_HLEN)
+ return -EINVAL;
+
+ if (!netif_running(ndev)) {
+ ndev->mtu = new_mtu;
+ return 0;
+ }
+
+ /* Bring down the link and dispose of posted rx entries */
+ ntb_transport_link_down(dev->qp);
+
+ if (ndev->mtu < new_mtu) {
+ int i;
+
+ for (i = 0; (skb = ntb_transport_rx_remove(dev->qp, &len)); i++)
+ dev_kfree_skb(skb);
+
+ for (; i; i--) {
+ skb = netdev_alloc_skb(ndev, new_mtu + ETH_HLEN);
+ if (!skb) {
+ rc = -ENOMEM;
+ goto err;
+ }
+
+ rc = ntb_transport_rx_enqueue(dev->qp, skb, skb->data,
+ new_mtu + ETH_HLEN);
+ if (rc) {
+ dev_kfree_skb(skb);
+ goto err;
+ }
+ }
+ }
+
+ ndev->mtu = new_mtu;
+
+ ntb_transport_link_up(dev->qp);
+
+ return 0;
+
+err:
+ ntb_transport_link_down(dev->qp);
+
+ while ((skb = ntb_transport_rx_remove(dev->qp, &len)))
+ dev_kfree_skb(skb);
+
+ netdev_err(ndev, "Error changing MTU, device inoperable\n");
+ return rc;
+}
+
+static const struct net_device_ops ntb_netdev_ops = {
+ .ndo_open = ntb_netdev_open,
+ .ndo_stop = ntb_netdev_close,
+ .ndo_start_xmit = ntb_netdev_start_xmit,
+ .ndo_change_mtu = ntb_netdev_change_mtu,
+ .ndo_set_mac_address = eth_mac_addr,
+};
+
+static void ntb_get_drvinfo(struct net_device *ndev,
+ struct ethtool_drvinfo *info)
+{
+ struct ntb_netdev *dev = netdev_priv(ndev);
+
+ strlcpy(info->driver, KBUILD_MODNAME, sizeof(info->driver));
+ strlcpy(info->version, NTB_NETDEV_VER, sizeof(info->version));
+ strlcpy(info->bus_info, pci_name(dev->pdev), sizeof(info->bus_info));
+}
+
+static int ntb_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
+{
+ cmd->supported = SUPPORTED_Backplane;
+ cmd->advertising = ADVERTISED_Backplane;
+ cmd->speed = SPEED_UNKNOWN;
+ ethtool_cmd_speed_set(cmd, SPEED_UNKNOWN);
+ cmd->duplex = DUPLEX_FULL;
+ cmd->port = PORT_OTHER;
+ cmd->phy_address = 0;
+ cmd->transceiver = XCVR_DUMMY1;
+ cmd->autoneg = AUTONEG_ENABLE;
+ cmd->maxtxpkt = 0;
+ cmd->maxrxpkt = 0;
+
+ return 0;
+}
+
+static const struct ethtool_ops ntb_ethtool_ops = {
+ .get_drvinfo = ntb_get_drvinfo,
+ .get_link = ethtool_op_get_link,
+ .get_settings = ntb_get_settings,
+};
+
+static const struct ntb_queue_handlers ntb_netdev_handlers = {
+ .tx_handler = ntb_netdev_tx_handler,
+ .rx_handler = ntb_netdev_rx_handler,
+ .event_handler = ntb_netdev_event_handler,
+};
+
+static int ntb_netdev_probe(struct pci_dev *pdev)
+{
+ struct net_device *ndev;
+ struct ntb_netdev *dev;
+ int rc;
+
+ ndev = alloc_etherdev(sizeof(struct ntb_netdev));
+ if (!ndev)
+ return -ENOMEM;
+
+ dev = netdev_priv(ndev);
+ dev->ndev = ndev;
+ dev->pdev = pdev;
+ BUG_ON(!dev->pdev);
+ ndev->features = NETIF_F_HIGHDMA;
+
+ ndev->priv_flags |= IFF_LIVE_ADDR_CHANGE;
+
+ ndev->hw_features = ndev->features;
+ ndev->watchdog_timeo = msecs_to_jiffies(NTB_TX_TIMEOUT_MS);
+
+ random_ether_addr(ndev->perm_addr);
+ memcpy(ndev->dev_addr, ndev->perm_addr, ndev->addr_len);
+
+ ndev->netdev_ops = &ntb_netdev_ops;
+ SET_ETHTOOL_OPS(ndev, &ntb_ethtool_ops);
+
+ dev->qp = ntb_transport_create_queue(ndev, pdev, &ntb_netdev_handlers);
+ if (!dev->qp) {
+ rc = -EIO;
+ goto err;
+ }
+
+ ndev->mtu = ntb_transport_max_size(dev->qp) - ETH_HLEN;
+
+ rc = register_netdev(ndev);
+ if (rc)
+ goto err1;
+
+ list_add(&dev->list, &dev_list);
+ dev_info(&pdev->dev, "%s created\n", ndev->name);
+ return 0;
+
+err1:
+ ntb_transport_free_queue(dev->qp);
+err:
+ free_netdev(ndev);
+ return rc;
+}
+
+static void ntb_netdev_remove(struct pci_dev *pdev)
+{
+ struct net_device *ndev;
+ struct ntb_netdev *dev;
+
+ list_for_each_entry(dev, &dev_list, list) {
+ if (dev->pdev == pdev)
+ break;
+ }
+ if (dev == NULL)
+ return;
+
+ ndev = dev->ndev;
+
+ unregister_netdev(ndev);
+ ntb_transport_free_queue(dev->qp);
+ free_netdev(ndev);
+}
+
+static struct ntb_client ntb_netdev_client = {
+ .driver.name = KBUILD_MODNAME,
+ .driver.owner = THIS_MODULE,
+ .probe = ntb_netdev_probe,
+ .remove = ntb_netdev_remove,
+};
+
+static int __init ntb_netdev_init_module(void)
+{
+ int rc;
+
+ rc = ntb_register_client_dev(KBUILD_MODNAME);
+ if (rc)
+ return rc;
+ return ntb_register_client(&ntb_netdev_client);
+}
+module_init(ntb_netdev_init_module);
+
+static void __exit ntb_netdev_exit_module(void)
+{
+ ntb_unregister_client(&ntb_netdev_client);
+ ntb_unregister_client_dev(KBUILD_MODNAME);
+}
+module_exit(ntb_netdev_exit_module);
diff --git a/drivers/net/phy/Kconfig b/drivers/net/phy/Kconfig
index 961f0b293913..450345261bd3 100644
--- a/drivers/net/phy/Kconfig
+++ b/drivers/net/phy/Kconfig
@@ -4,7 +4,6 @@
menuconfig PHYLIB
tristate "PHY Device support and infrastructure"
- depends on !S390
depends on NETDEVICES
help
Ethernet controllers are usually attached to PHY
diff --git a/drivers/net/ppp/Kconfig b/drivers/net/ppp/Kconfig
index 278dea0c4c98..1373c6d7278d 100644
--- a/drivers/net/ppp/Kconfig
+++ b/drivers/net/ppp/Kconfig
@@ -147,6 +147,7 @@ config PPPOL2TP
Support for PPP-over-L2TP socket family. L2TP is a protocol
used by ISPs and enterprises to tunnel PPP traffic over UDP
tunnels. L2TP is replacing PPTP for VPN uses.
+if TTY
config PPP_ASYNC
tristate "PPP support for async serial ports"
@@ -172,4 +173,6 @@ config PPP_SYNC_TTY
To compile this driver as a module, choose M here.
+endif # TTY
+
endif # PPP
diff --git a/drivers/net/ppp/ppp_generic.c b/drivers/net/ppp/ppp_generic.c
index 3db9131e9229..72ff14b811c6 100644
--- a/drivers/net/ppp/ppp_generic.c
+++ b/drivers/net/ppp/ppp_generic.c
@@ -2953,46 +2953,21 @@ static void __exit ppp_cleanup(void)
* by holding all_ppp_mutex
*/
-static int __unit_alloc(struct idr *p, void *ptr, int n)
-{
- int unit, err;
-
-again:
- if (!idr_pre_get(p, GFP_KERNEL)) {
- pr_err("PPP: No free memory for idr\n");
- return -ENOMEM;
- }
-
- err = idr_get_new_above(p, ptr, n, &unit);
- if (err < 0) {
- if (err == -EAGAIN)
- goto again;
- return err;
- }
-
- return unit;
-}
-
/* associate pointer with specified number */
static int unit_set(struct idr *p, void *ptr, int n)
{
int unit;
- unit = __unit_alloc(p, ptr, n);
- if (unit < 0)
- return unit;
- else if (unit != n) {
- idr_remove(p, unit);
- return -EINVAL;
- }
-
+ unit = idr_alloc(p, ptr, n, n + 1, GFP_KERNEL);
+ if (unit == -ENOSPC)
+ unit = -EINVAL;
return unit;
}
/* get new free unit number and associate pointer with it */
static int unit_get(struct idr *p, void *ptr)
{
- return __unit_alloc(p, ptr, 0);
+ return idr_alloc(p, ptr, 0, 0, GFP_KERNEL);
}
/* put unit number back to a pool */
diff --git a/drivers/net/slip/Kconfig b/drivers/net/slip/Kconfig
index 211b160e4e9c..48e68714eef3 100644
--- a/drivers/net/slip/Kconfig
+++ b/drivers/net/slip/Kconfig
@@ -4,6 +4,7 @@
config SLIP
tristate "SLIP (serial line) support"
+ depends on TTY
---help---
Say Y if you intend to use SLIP or CSLIP (compressed SLIP) to
connect to your Internet service provider or to connect to some
diff --git a/drivers/net/tun.c b/drivers/net/tun.c
index b6f45c5d84d5..2c6a22e278ea 100644
--- a/drivers/net/tun.c
+++ b/drivers/net/tun.c
@@ -197,9 +197,8 @@ static inline u32 tun_hashfn(u32 rxhash)
static struct tun_flow_entry *tun_flow_find(struct hlist_head *head, u32 rxhash)
{
struct tun_flow_entry *e;
- struct hlist_node *n;
- hlist_for_each_entry_rcu(e, n, head, hash_link) {
+ hlist_for_each_entry_rcu(e, head, hash_link) {
if (e->rxhash == rxhash)
return e;
}
@@ -241,9 +240,9 @@ static void tun_flow_flush(struct tun_struct *tun)
spin_lock_bh(&tun->lock);
for (i = 0; i < TUN_NUM_FLOW_ENTRIES; i++) {
struct tun_flow_entry *e;
- struct hlist_node *h, *n;
+ struct hlist_node *n;
- hlist_for_each_entry_safe(e, h, n, &tun->flows[i], hash_link)
+ hlist_for_each_entry_safe(e, n, &tun->flows[i], hash_link)
tun_flow_delete(tun, e);
}
spin_unlock_bh(&tun->lock);
@@ -256,9 +255,9 @@ static void tun_flow_delete_by_queue(struct tun_struct *tun, u16 queue_index)
spin_lock_bh(&tun->lock);
for (i = 0; i < TUN_NUM_FLOW_ENTRIES; i++) {
struct tun_flow_entry *e;
- struct hlist_node *h, *n;
+ struct hlist_node *n;
- hlist_for_each_entry_safe(e, h, n, &tun->flows[i], hash_link) {
+ hlist_for_each_entry_safe(e, n, &tun->flows[i], hash_link) {
if (e->queue_index == queue_index)
tun_flow_delete(tun, e);
}
@@ -279,9 +278,9 @@ static void tun_flow_cleanup(unsigned long data)
spin_lock_bh(&tun->lock);
for (i = 0; i < TUN_NUM_FLOW_ENTRIES; i++) {
struct tun_flow_entry *e;
- struct hlist_node *h, *n;
+ struct hlist_node *n;
- hlist_for_each_entry_safe(e, h, n, &tun->flows[i], hash_link) {
+ hlist_for_each_entry_safe(e, n, &tun->flows[i], hash_link) {
unsigned long this_timer;
count++;
this_timer = e->updated + delay;
diff --git a/drivers/net/usb/Kconfig b/drivers/net/usb/Kconfig
index 3a44a5d7bf9e..da92ed3797aa 100644
--- a/drivers/net/usb/Kconfig
+++ b/drivers/net/usb/Kconfig
@@ -443,7 +443,7 @@ config USB_NET_QMI_WWAN
config USB_HSO
tristate "Option USB High Speed Mobile Devices"
- depends on USB && RFKILL
+ depends on USB && RFKILL && TTY
default n
help
Choose this option if you have an Option HSDPA/HSUPA card.
@@ -491,7 +491,7 @@ config USB_SIERRA_NET
config USB_VL600
tristate "LG VL600 modem dongle"
- depends on USB_NET_CDCETHER
+ depends on USB_NET_CDCETHER && TTY
select USB_ACM
help
Select this if you want to use an LG Electronics 4G/LTE usb modem
diff --git a/drivers/net/usb/hso.c b/drivers/net/usb/hso.c
index 41e5dfb5ee64..e2dd3249b6bd 100644
--- a/drivers/net/usb/hso.c
+++ b/drivers/net/usb/hso.c
@@ -2035,25 +2035,23 @@ static int put_rxbuf_data(struct urb *urb, struct hso_serial *serial)
tty = tty_port_tty_get(&serial->port);
/* Push data to tty */
- if (tty) {
- write_length_remaining = urb->actual_length -
- serial->curr_rx_urb_offset;
- D1("data to push to tty");
- while (write_length_remaining) {
- if (test_bit(TTY_THROTTLED, &tty->flags)) {
- tty_kref_put(tty);
- return -1;
- }
- curr_write_len = tty_insert_flip_string
- (tty, urb->transfer_buffer +
- serial->curr_rx_urb_offset,
- write_length_remaining);
- serial->curr_rx_urb_offset += curr_write_len;
- write_length_remaining -= curr_write_len;
- tty_flip_buffer_push(tty);
+ write_length_remaining = urb->actual_length -
+ serial->curr_rx_urb_offset;
+ D1("data to push to tty");
+ while (write_length_remaining) {
+ if (tty && test_bit(TTY_THROTTLED, &tty->flags)) {
+ tty_kref_put(tty);
+ return -1;
}
- tty_kref_put(tty);
+ curr_write_len = tty_insert_flip_string(&serial->port,
+ urb->transfer_buffer + serial->curr_rx_urb_offset,
+ write_length_remaining);
+ serial->curr_rx_urb_offset += curr_write_len;
+ write_length_remaining -= curr_write_len;
+ tty_flip_buffer_push(&serial->port);
}
+ tty_kref_put(tty);
+
if (write_length_remaining == 0) {
serial->curr_rx_urb_offset = 0;
serial->rx_urb_filled[hso_urb_to_index(serial, urb)] = 0;
diff --git a/drivers/net/usb/smsc95xx.c b/drivers/net/usb/smsc95xx.c
index ff4fa37dfd1d..e6d2dea1373c 100644
--- a/drivers/net/usb/smsc95xx.c
+++ b/drivers/net/usb/smsc95xx.c
@@ -53,7 +53,7 @@
#define FEATURE_8_WAKEUP_FILTERS (0x01)
#define FEATURE_PHY_NLP_CROSSOVER (0x02)
-#define FEATURE_AUTOSUSPEND (0x04)
+#define FEATURE_REMOTE_WAKEUP (0x04)
#define SUSPEND_SUSPEND0 (0x01)
#define SUSPEND_SUSPEND1 (0x02)
@@ -1146,7 +1146,7 @@ static int smsc95xx_bind(struct usbnet *dev, struct usb_interface *intf)
(val == ID_REV_CHIP_ID_89530_) || (val == ID_REV_CHIP_ID_9730_))
pdata->features = (FEATURE_8_WAKEUP_FILTERS |
FEATURE_PHY_NLP_CROSSOVER |
- FEATURE_AUTOSUSPEND);
+ FEATURE_REMOTE_WAKEUP);
else if (val == ID_REV_CHIP_ID_9512_)
pdata->features = FEATURE_8_WAKEUP_FILTERS;
@@ -1247,10 +1247,12 @@ static int smsc95xx_enter_suspend0(struct usbnet *dev)
/* read back PM_CTRL */
ret = smsc95xx_read_reg_nopm(dev, PM_CTRL, &val);
+ if (ret < 0)
+ return ret;
pdata->suspend_flags |= SUSPEND_SUSPEND0;
- return ret;
+ return 0;
}
static int smsc95xx_enter_suspend1(struct usbnet *dev)
@@ -1293,10 +1295,12 @@ static int smsc95xx_enter_suspend1(struct usbnet *dev)
val |= (PM_CTL_WUPS_ED_ | PM_CTL_ED_EN_);
ret = smsc95xx_write_reg_nopm(dev, PM_CTRL, val);
+ if (ret < 0)
+ return ret;
pdata->suspend_flags |= SUSPEND_SUSPEND1;
- return ret;
+ return 0;
}
static int smsc95xx_enter_suspend2(struct usbnet *dev)
@@ -1313,10 +1317,12 @@ static int smsc95xx_enter_suspend2(struct usbnet *dev)
val |= PM_CTL_SUS_MODE_2;
ret = smsc95xx_write_reg_nopm(dev, PM_CTRL, val);
+ if (ret < 0)
+ return ret;
pdata->suspend_flags |= SUSPEND_SUSPEND2;
- return ret;
+ return 0;
}
static int smsc95xx_enter_suspend3(struct usbnet *dev)
@@ -1372,7 +1378,7 @@ static int smsc95xx_autosuspend(struct usbnet *dev, u32 link_up)
if (!link_up) {
/* link is down so enter EDPD mode, but only if device can
* reliably resume from it. This check should be redundant
- * as current FEATURE_AUTOSUSPEND parts also support
+ * as current FEATURE_REMOTE_WAKEUP parts also support
* FEATURE_PHY_NLP_CROSSOVER but it's included for clarity */
if (!(pdata->features & FEATURE_PHY_NLP_CROSSOVER)) {
netdev_warn(dev->net, "EDPD not supported\n");
@@ -1412,15 +1418,6 @@ static int smsc95xx_suspend(struct usb_interface *intf, pm_message_t message)
u32 val, link_up;
int ret;
- /* TODO: don't indicate this feature to usb framework if
- * our current hardware doesn't have the capability
- */
- if ((message.event == PM_EVENT_AUTO_SUSPEND) &&
- (!(pdata->features & FEATURE_AUTOSUSPEND))) {
- netdev_warn(dev->net, "autosuspend not supported\n");
- return -EBUSY;
- }
-
ret = usbnet_suspend(intf, message);
if (ret < 0) {
netdev_warn(dev->net, "usbnet_suspend error\n");
@@ -1435,7 +1432,8 @@ static int smsc95xx_suspend(struct usb_interface *intf, pm_message_t message)
/* determine if link is up using only _nopm functions */
link_up = smsc95xx_link_ok_nopm(dev);
- if (message.event == PM_EVENT_AUTO_SUSPEND) {
+ if (message.event == PM_EVENT_AUTO_SUSPEND &&
+ (pdata->features & FEATURE_REMOTE_WAKEUP)) {
ret = smsc95xx_autosuspend(dev, link_up);
goto done;
}
@@ -1872,11 +1870,11 @@ static int smsc95xx_manage_power(struct usbnet *dev, int on)
dev->intf->needs_remote_wakeup = on;
- if (pdata->features & FEATURE_AUTOSUSPEND)
+ if (pdata->features & FEATURE_REMOTE_WAKEUP)
return 0;
- /* this chip revision doesn't support autosuspend */
- netdev_info(dev->net, "hardware doesn't support USB autosuspend\n");
+ /* this chip revision isn't capable of remote wakeup */
+ netdev_info(dev->net, "hardware isn't capable of remote wakeup\n");
if (on)
usb_autopm_get_interface_no_resume(dev->intf);
diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c
index 192c91c8e799..57ac4b0294bc 100644
--- a/drivers/net/virtio_net.c
+++ b/drivers/net/virtio_net.c
@@ -1736,17 +1736,7 @@ static struct virtio_driver virtio_net_driver = {
#endif
};
-static int __init init(void)
-{
- return register_virtio_driver(&virtio_net_driver);
-}
-
-static void __exit fini(void)
-{
- unregister_virtio_driver(&virtio_net_driver);
-}
-module_init(init);
-module_exit(fini);
+module_virtio_driver(virtio_net_driver);
MODULE_DEVICE_TABLE(virtio, id_table);
MODULE_DESCRIPTION("Virtio network driver");
diff --git a/drivers/net/vmxnet3/vmxnet3_drv.c b/drivers/net/vmxnet3/vmxnet3_drv.c
index ffb97b2a15a0..4aad350e4dae 100644
--- a/drivers/net/vmxnet3/vmxnet3_drv.c
+++ b/drivers/net/vmxnet3/vmxnet3_drv.c
@@ -1385,8 +1385,8 @@ vmxnet3_rq_cleanup_all(struct vmxnet3_adapter *adapter)
}
-void vmxnet3_rq_destroy(struct vmxnet3_rx_queue *rq,
- struct vmxnet3_adapter *adapter)
+static void vmxnet3_rq_destroy(struct vmxnet3_rx_queue *rq,
+ struct vmxnet3_adapter *adapter)
{
int i;
int j;
diff --git a/drivers/net/vmxnet3/vmxnet3_ethtool.c b/drivers/net/vmxnet3/vmxnet3_ethtool.c
index 9bc542be2937..a0feb17a0238 100644
--- a/drivers/net/vmxnet3/vmxnet3_ethtool.c
+++ b/drivers/net/vmxnet3/vmxnet3_ethtool.c
@@ -448,10 +448,8 @@ vmxnet3_get_ringparam(struct net_device *netdev,
param->rx_mini_max_pending = 0;
param->rx_jumbo_max_pending = 0;
- param->rx_pending = adapter->rx_queue[0].rx_ring[0].size *
- adapter->num_rx_queues;
- param->tx_pending = adapter->tx_queue[0].tx_ring.size *
- adapter->num_tx_queues;
+ param->rx_pending = adapter->rx_queue[0].rx_ring[0].size;
+ param->tx_pending = adapter->tx_queue[0].tx_ring.size;
param->rx_mini_pending = 0;
param->rx_jumbo_pending = 0;
}
diff --git a/drivers/net/vxlan.c b/drivers/net/vxlan.c
index 9d70421cf3a0..f10e58ac9c1b 100644
--- a/drivers/net/vxlan.c
+++ b/drivers/net/vxlan.c
@@ -33,6 +33,7 @@
#include <net/arp.h>
#include <net/ndisc.h>
#include <net/ip.h>
+#include <net/ipip.h>
#include <net/icmp.h>
#include <net/udp.h>
#include <net/rtnetlink.h>
@@ -144,9 +145,8 @@ static inline struct hlist_head *vni_head(struct net *net, u32 id)
static struct vxlan_dev *vxlan_find_vni(struct net *net, u32 id)
{
struct vxlan_dev *vxlan;
- struct hlist_node *node;
- hlist_for_each_entry_rcu(vxlan, node, vni_head(net, id), hlist) {
+ hlist_for_each_entry_rcu(vxlan, vni_head(net, id), hlist) {
if (vxlan->vni == id)
return vxlan;
}
@@ -291,9 +291,8 @@ static struct vxlan_fdb *vxlan_find_mac(struct vxlan_dev *vxlan,
{
struct hlist_head *head = vxlan_fdb_head(vxlan, mac);
struct vxlan_fdb *f;
- struct hlist_node *node;
- hlist_for_each_entry_rcu(f, node, head, hlist) {
+ hlist_for_each_entry_rcu(f, head, hlist) {
if (compare_ether_addr(mac, f->eth_addr) == 0)
return f;
}
@@ -421,10 +420,9 @@ static int vxlan_fdb_dump(struct sk_buff *skb, struct netlink_callback *cb,
for (h = 0; h < FDB_HASH_SIZE; ++h) {
struct vxlan_fdb *f;
- struct hlist_node *n;
int err;
- hlist_for_each_entry_rcu(f, n, &vxlan->fdb_head[h], hlist) {
+ hlist_for_each_entry_rcu(f, &vxlan->fdb_head[h], hlist) {
if (idx < cb->args[0])
goto skip;
@@ -482,11 +480,10 @@ static bool vxlan_group_used(struct vxlan_net *vn,
const struct vxlan_dev *this)
{
const struct vxlan_dev *vxlan;
- struct hlist_node *node;
unsigned h;
for (h = 0; h < VNI_HASH_SIZE; ++h)
- hlist_for_each_entry(vxlan, node, &vn->vni_list[h], hlist) {
+ hlist_for_each_entry(vxlan, &vn->vni_list[h], hlist) {
if (vxlan == this)
continue;
@@ -962,13 +959,13 @@ static netdev_tx_t vxlan_xmit(struct sk_buff *skb, struct net_device *dev)
iph->daddr = dst;
iph->saddr = fl4.saddr;
iph->ttl = ttl ? : ip4_dst_hoplimit(&rt->dst);
+ tunnel_ip_select_ident(skb, old_iph, &rt->dst);
vxlan_set_owner(dev, skb);
/* See iptunnel_xmit() */
if (skb->ip_summed != CHECKSUM_PARTIAL)
skb->ip_summed = CHECKSUM_NONE;
- ip_select_ident(iph, &rt->dst, NULL);
err = ip_local_out(skb);
if (likely(net_xmit_eval(err) == 0)) {
diff --git a/drivers/net/wan/Kconfig b/drivers/net/wan/Kconfig
index 13daec88d918..94e234975c61 100644
--- a/drivers/net/wan/Kconfig
+++ b/drivers/net/wan/Kconfig
@@ -375,7 +375,7 @@ config LAPBETHER
config X25_ASY
tristate "X.25 async driver"
- depends on LAPB && X25
+ depends on LAPB && X25 && TTY
---help---
Send and receive X.25 frames over regular asynchronous serial
lines such as telephone lines equipped with ordinary modems.
diff --git a/drivers/net/wan/cosa.c b/drivers/net/wan/cosa.c
index 0179cefae438..84734a805092 100644
--- a/drivers/net/wan/cosa.c
+++ b/drivers/net/wan/cosa.c
@@ -938,14 +938,14 @@ static int cosa_open(struct inode *inode, struct file *file)
int ret = 0;
mutex_lock(&cosa_chardev_mutex);
- if ((n=iminor(file->f_path.dentry->d_inode)>>CARD_MINOR_BITS)
+ if ((n=iminor(file_inode(file))>>CARD_MINOR_BITS)
>= nr_cards) {
ret = -ENODEV;
goto out;
}
cosa = cosa_cards+n;
- if ((n=iminor(file->f_path.dentry->d_inode)
+ if ((n=iminor(file_inode(file))
& ((1<<CARD_MINOR_BITS)-1)) >= cosa->nchannels) {
ret = -ENODEV;
goto out;
diff --git a/drivers/net/wireless/b43/main.c b/drivers/net/wireless/b43/main.c
index 806e34c19281..05682736e466 100644
--- a/drivers/net/wireless/b43/main.c
+++ b/drivers/net/wireless/b43/main.c
@@ -4214,7 +4214,6 @@ redo:
mutex_unlock(&wl->mutex);
cancel_delayed_work_sync(&dev->periodic_work);
cancel_work_sync(&wl->tx_work);
- cancel_work_sync(&wl->firmware_load);
mutex_lock(&wl->mutex);
dev = wl->current_dev;
if (!dev || b43_status(dev) < B43_STAT_STARTED) {
@@ -5434,6 +5433,7 @@ static void b43_bcma_remove(struct bcma_device *core)
/* We must cancel any work here before unregistering from ieee80211,
* as the ieee80211 unreg will destroy the workqueue. */
cancel_work_sync(&wldev->restart_work);
+ cancel_work_sync(&wl->firmware_load);
B43_WARN_ON(!wl);
if (!wldev->fw.ucode.data)
@@ -5510,6 +5510,7 @@ static void b43_ssb_remove(struct ssb_device *sdev)
/* We must cancel any work here before unregistering from ieee80211,
* as the ieee80211 unreg will destroy the workqueue. */
cancel_work_sync(&wldev->restart_work);
+ cancel_work_sync(&wl->firmware_load);
B43_WARN_ON(!wl);
if (!wldev->fw.ucode.data)
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/wl_cfg80211.c b/drivers/net/wireless/brcm80211/brcmfmac/wl_cfg80211.c
index cecc3eff72e9..2af9c0f0798d 100644
--- a/drivers/net/wireless/brcm80211/brcmfmac/wl_cfg80211.c
+++ b/drivers/net/wireless/brcm80211/brcmfmac/wl_cfg80211.c
@@ -4615,8 +4615,10 @@ static s32 brcmf_notify_vif_event(struct brcmf_if *ifp,
switch (ifevent->action) {
case BRCMF_E_IF_ADD:
/* waiting process may have timed out */
- if (!cfg->vif_event.vif)
+ if (!cfg->vif_event.vif) {
+ mutex_unlock(&event->vif_event_lock);
return -EBADF;
+ }
ifp->vif = vif;
vif->ifp = ifp;
diff --git a/drivers/net/wireless/mwifiex/pcie.c b/drivers/net/wireless/mwifiex/pcie.c
index 4b54bcf382f3..35c79722c361 100644
--- a/drivers/net/wireless/mwifiex/pcie.c
+++ b/drivers/net/wireless/mwifiex/pcie.c
@@ -171,7 +171,7 @@ static int mwifiex_pcie_suspend(struct pci_dev *pdev, pm_message_t state)
{
struct mwifiex_adapter *adapter;
struct pcie_service_card *card;
- int hs_actived, i;
+ int hs_actived;
if (pdev) {
card = (struct pcie_service_card *) pci_get_drvdata(pdev);
@@ -191,9 +191,6 @@ static int mwifiex_pcie_suspend(struct pci_dev *pdev, pm_message_t state)
/* Indicate device suspended */
adapter->is_suspended = true;
- for (i = 0; i < adapter->priv_num; i++)
- netif_carrier_off(adapter->priv[i]->netdev);
-
return 0;
}
@@ -209,7 +206,6 @@ static int mwifiex_pcie_resume(struct pci_dev *pdev)
{
struct mwifiex_adapter *adapter;
struct pcie_service_card *card;
- int i;
if (pdev) {
card = (struct pcie_service_card *) pci_get_drvdata(pdev);
@@ -231,10 +227,6 @@ static int mwifiex_pcie_resume(struct pci_dev *pdev)
adapter->is_suspended = false;
- for (i = 0; i < adapter->priv_num; i++)
- if (adapter->priv[i]->media_connected)
- netif_carrier_on(adapter->priv[i]->netdev);
-
mwifiex_cancel_hs(mwifiex_get_priv(adapter, MWIFIEX_BSS_ROLE_STA),
MWIFIEX_ASYNC_CMD);
@@ -916,17 +908,8 @@ static int mwifiex_pcie_delete_sleep_cookie_buf(struct mwifiex_adapter *adapter)
static int mwifiex_clean_pcie_ring_buf(struct mwifiex_adapter *adapter)
{
struct pcie_service_card *card = adapter->card;
- const struct mwifiex_pcie_card_reg *reg = card->pcie.reg;
- u32 rdptr;
-
- /* Read the TX ring read pointer set by firmware */
- if (mwifiex_read_reg(adapter, reg->tx_rdptr, &rdptr)) {
- dev_err(adapter->dev,
- "Flush TXBD: failed to read reg->tx_rdptr\n");
- return -1;
- }
- if (!mwifiex_pcie_txbd_empty(card, rdptr)) {
+ if (!mwifiex_pcie_txbd_empty(card, card->txbd_rdptr)) {
card->txbd_flush = 1;
/* write pointer already set at last send
* send dnld-rdy intr again, wait for completion.
diff --git a/drivers/net/wireless/mwifiex/sdio.c b/drivers/net/wireless/mwifiex/sdio.c
index e63f646a260e..363ba31b58bf 100644
--- a/drivers/net/wireless/mwifiex/sdio.c
+++ b/drivers/net/wireless/mwifiex/sdio.c
@@ -161,7 +161,6 @@ static int mwifiex_sdio_suspend(struct device *dev)
struct sdio_mmc_card *card;
struct mwifiex_adapter *adapter;
mmc_pm_flag_t pm_flag = 0;
- int i;
int ret = 0;
if (func) {
@@ -198,9 +197,6 @@ static int mwifiex_sdio_suspend(struct device *dev)
/* Indicate device suspended */
adapter->is_suspended = true;
- for (i = 0; i < adapter->priv_num; i++)
- netif_carrier_off(adapter->priv[i]->netdev);
-
return ret;
}
@@ -220,7 +216,6 @@ static int mwifiex_sdio_resume(struct device *dev)
struct sdio_mmc_card *card;
struct mwifiex_adapter *adapter;
mmc_pm_flag_t pm_flag = 0;
- int i;
if (func) {
pm_flag = sdio_get_host_pm_caps(func);
@@ -243,10 +238,6 @@ static int mwifiex_sdio_resume(struct device *dev)
adapter->is_suspended = false;
- for (i = 0; i < adapter->priv_num; i++)
- if (adapter->priv[i]->media_connected)
- netif_carrier_on(adapter->priv[i]->netdev);
-
/* Disable Host Sleep */
mwifiex_cancel_hs(mwifiex_get_priv(adapter, MWIFIEX_BSS_ROLE_STA),
MWIFIEX_ASYNC_CMD);
diff --git a/drivers/net/wireless/ray_cs.c b/drivers/net/wireless/ray_cs.c
index e7cf37f550d1..3109c0db66e1 100644
--- a/drivers/net/wireless/ray_cs.c
+++ b/drivers/net/wireless/ray_cs.c
@@ -2778,7 +2778,7 @@ static ssize_t int_proc_write(struct file *file, const char __user *buffer,
nr = nr * 10 + c;
p++;
} while (--len);
- *(int *)PDE(file->f_path.dentry->d_inode)->data = nr;
+ *(int *)PDE(file_inode(file))->data = nr;
return count;
}
diff --git a/drivers/net/wireless/zd1201.c b/drivers/net/wireless/zd1201.c
index 48273dd05b63..4941f201d6c8 100644
--- a/drivers/net/wireless/zd1201.c
+++ b/drivers/net/wireless/zd1201.c
@@ -309,7 +309,6 @@ static void zd1201_usbrx(struct urb *urb)
if (data[urb->actual_length-1] == ZD1201_PACKET_RXDATA) {
int datalen = urb->actual_length-1;
unsigned short len, fc, seq;
- struct hlist_node *node;
len = ntohs(*(__be16 *)&data[datalen-2]);
if (len>datalen)
@@ -362,7 +361,7 @@ static void zd1201_usbrx(struct urb *urb)
hlist_add_head(&frag->fnode, &zd->fraglist);
goto resubmit;
}
- hlist_for_each_entry(frag, node, &zd->fraglist, fnode)
+ hlist_for_each_entry(frag, &zd->fraglist, fnode)
if (frag->seq == (seq&IEEE80211_SCTL_SEQ))
break;
if (!frag)
@@ -1831,14 +1830,14 @@ err_zd:
static void zd1201_disconnect(struct usb_interface *interface)
{
struct zd1201 *zd = usb_get_intfdata(interface);
- struct hlist_node *node, *node2;
+ struct hlist_node *node2;
struct zd1201_frag *frag;
if (!zd)
return;
usb_set_intfdata(interface, NULL);
- hlist_for_each_entry_safe(frag, node, node2, &zd->fraglist, fnode) {
+ hlist_for_each_entry_safe(frag, node2, &zd->fraglist, fnode) {
hlist_del_init(&frag->fnode);
kfree_skb(frag->skb);
kfree(frag);
diff --git a/drivers/ntb/Kconfig b/drivers/ntb/Kconfig
new file mode 100644
index 000000000000..37ee6495acc1
--- /dev/null
+++ b/drivers/ntb/Kconfig
@@ -0,0 +1,13 @@
+config NTB
+ tristate "Intel Non-Transparent Bridge support"
+ depends on PCI
+ depends on X86_64
+ help
+ The PCI-E Non-transparent bridge hardware is a point-to-point PCI-E bus
+ connecting 2 systems. When configured, writes to the device's PCI
+ mapped memory will be mirrored to a buffer on the remote system. The
+ ntb Linux driver uses this point-to-point communication as a method to
+ transfer data from one system to the other.
+
+ If unsure, say N.
+
diff --git a/drivers/ntb/Makefile b/drivers/ntb/Makefile
new file mode 100644
index 000000000000..15cb59fd354e
--- /dev/null
+++ b/drivers/ntb/Makefile
@@ -0,0 +1,3 @@
+obj-$(CONFIG_NTB) += ntb.o
+
+ntb-objs := ntb_hw.o ntb_transport.o
diff --git a/drivers/ntb/ntb_hw.c b/drivers/ntb/ntb_hw.c
new file mode 100644
index 000000000000..f802e7c92356
--- /dev/null
+++ b/drivers/ntb/ntb_hw.c
@@ -0,0 +1,1141 @@
+/*
+ * This file is provided under a dual BSD/GPLv2 license. When using or
+ * redistributing this file, you may do so under either license.
+ *
+ * GPL LICENSE SUMMARY
+ *
+ * Copyright(c) 2012 Intel Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * BSD LICENSE
+ *
+ * Copyright(c) 2012 Intel Corporation. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copy
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ * Intel PCIe NTB Linux driver
+ *
+ * Contact Information:
+ * Jon Mason <jon.mason@intel.com>
+ */
+#include <linux/debugfs.h>
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/module.h>
+#include <linux/pci.h>
+#include <linux/slab.h>
+#include "ntb_hw.h"
+#include "ntb_regs.h"
+
+#define NTB_NAME "Intel(R) PCI-E Non-Transparent Bridge Driver"
+#define NTB_VER "0.25"
+
+MODULE_DESCRIPTION(NTB_NAME);
+MODULE_VERSION(NTB_VER);
+MODULE_LICENSE("Dual BSD/GPL");
+MODULE_AUTHOR("Intel Corporation");
+
+enum {
+ NTB_CONN_CLASSIC = 0,
+ NTB_CONN_B2B,
+ NTB_CONN_RP,
+};
+
+enum {
+ NTB_DEV_USD = 0,
+ NTB_DEV_DSD,
+};
+
+enum {
+ SNB_HW = 0,
+ BWD_HW,
+};
+
+/* Translate memory window 0,1 to BAR 2,4 */
+#define MW_TO_BAR(mw) (mw * 2 + 2)
+
+static DEFINE_PCI_DEVICE_TABLE(ntb_pci_tbl) = {
+ {PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_NTB_B2B_BWD)},
+ {PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_NTB_B2B_JSF)},
+ {PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_NTB_CLASSIC_JSF)},
+ {PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_NTB_RP_JSF)},
+ {PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_NTB_RP_SNB)},
+ {PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_NTB_B2B_SNB)},
+ {PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_NTB_CLASSIC_SNB)},
+ {0}
+};
+MODULE_DEVICE_TABLE(pci, ntb_pci_tbl);
+
+/**
+ * ntb_register_event_callback() - register event callback
+ * @ndev: pointer to ntb_device instance
+ * @func: callback function to register
+ *
+ * This function registers a callback for any HW driver events such as link
+ * up/down, power management notices and etc.
+ *
+ * RETURNS: An appropriate -ERRNO error value on error, or zero for success.
+ */
+int ntb_register_event_callback(struct ntb_device *ndev,
+ void (*func)(void *handle, enum ntb_hw_event event))
+{
+ if (ndev->event_cb)
+ return -EINVAL;
+
+ ndev->event_cb = func;
+
+ return 0;
+}
+
+/**
+ * ntb_unregister_event_callback() - unregisters the event callback
+ * @ndev: pointer to ntb_device instance
+ *
+ * This function unregisters the existing callback from transport
+ */
+void ntb_unregister_event_callback(struct ntb_device *ndev)
+{
+ ndev->event_cb = NULL;
+}
+
+/**
+ * ntb_register_db_callback() - register a callback for doorbell interrupt
+ * @ndev: pointer to ntb_device instance
+ * @idx: doorbell index to register callback, zero based
+ * @func: callback function to register
+ *
+ * This function registers a callback function for the doorbell interrupt
+ * on the primary side. The function will unmask the doorbell as well to
+ * allow interrupt.
+ *
+ * RETURNS: An appropriate -ERRNO error value on error, or zero for success.
+ */
+int ntb_register_db_callback(struct ntb_device *ndev, unsigned int idx,
+ void *data, void (*func)(void *data, int db_num))
+{
+ unsigned long mask;
+
+ if (idx >= ndev->max_cbs || ndev->db_cb[idx].callback) {
+ dev_warn(&ndev->pdev->dev, "Invalid Index.\n");
+ return -EINVAL;
+ }
+
+ ndev->db_cb[idx].callback = func;
+ ndev->db_cb[idx].data = data;
+
+ /* unmask interrupt */
+ mask = readw(ndev->reg_ofs.pdb_mask);
+ clear_bit(idx * ndev->bits_per_vector, &mask);
+ writew(mask, ndev->reg_ofs.pdb_mask);
+
+ return 0;
+}
+
+/**
+ * ntb_unregister_db_callback() - unregister a callback for doorbell interrupt
+ * @ndev: pointer to ntb_device instance
+ * @idx: doorbell index to register callback, zero based
+ *
+ * This function unregisters a callback function for the doorbell interrupt
+ * on the primary side. The function will also mask the said doorbell.
+ */
+void ntb_unregister_db_callback(struct ntb_device *ndev, unsigned int idx)
+{
+ unsigned long mask;
+
+ if (idx >= ndev->max_cbs || !ndev->db_cb[idx].callback)
+ return;
+
+ mask = readw(ndev->reg_ofs.pdb_mask);
+ set_bit(idx * ndev->bits_per_vector, &mask);
+ writew(mask, ndev->reg_ofs.pdb_mask);
+
+ ndev->db_cb[idx].callback = NULL;
+}
+
+/**
+ * ntb_find_transport() - find the transport pointer
+ * @transport: pointer to pci device
+ *
+ * Given the pci device pointer, return the transport pointer passed in when
+ * the transport attached when it was inited.
+ *
+ * RETURNS: pointer to transport.
+ */
+void *ntb_find_transport(struct pci_dev *pdev)
+{
+ struct ntb_device *ndev = pci_get_drvdata(pdev);
+ return ndev->ntb_transport;
+}
+
+/**
+ * ntb_register_transport() - Register NTB transport with NTB HW driver
+ * @transport: transport identifier
+ *
+ * This function allows a transport to reserve the hardware driver for
+ * NTB usage.
+ *
+ * RETURNS: pointer to ntb_device, NULL on error.
+ */
+struct ntb_device *ntb_register_transport(struct pci_dev *pdev, void *transport)
+{
+ struct ntb_device *ndev = pci_get_drvdata(pdev);
+
+ if (ndev->ntb_transport)
+ return NULL;
+
+ ndev->ntb_transport = transport;
+ return ndev;
+}
+
+/**
+ * ntb_unregister_transport() - Unregister the transport with the NTB HW driver
+ * @ndev - ntb_device of the transport to be freed
+ *
+ * This function unregisters the transport from the HW driver and performs any
+ * necessary cleanups.
+ */
+void ntb_unregister_transport(struct ntb_device *ndev)
+{
+ int i;
+
+ if (!ndev->ntb_transport)
+ return;
+
+ for (i = 0; i < ndev->max_cbs; i++)
+ ntb_unregister_db_callback(ndev, i);
+
+ ntb_unregister_event_callback(ndev);
+ ndev->ntb_transport = NULL;
+}
+
+/**
+ * ntb_write_local_spad() - write to the secondary scratchpad register
+ * @ndev: pointer to ntb_device instance
+ * @idx: index to the scratchpad register, 0 based
+ * @val: the data value to put into the register
+ *
+ * This function allows writing of a 32bit value to the indexed scratchpad
+ * register. This writes over the data mirrored to the local scratchpad register
+ * by the remote system.
+ *
+ * RETURNS: An appropriate -ERRNO error value on error, or zero for success.
+ */
+int ntb_write_local_spad(struct ntb_device *ndev, unsigned int idx, u32 val)
+{
+ if (idx >= ndev->limits.max_spads)
+ return -EINVAL;
+
+ dev_dbg(&ndev->pdev->dev, "Writing %x to local scratch pad index %d\n",
+ val, idx);
+ writel(val, ndev->reg_ofs.spad_read + idx * 4);
+
+ return 0;
+}
+
+/**
+ * ntb_read_local_spad() - read from the primary scratchpad register
+ * @ndev: pointer to ntb_device instance
+ * @idx: index to scratchpad register, 0 based
+ * @val: pointer to 32bit integer for storing the register value
+ *
+ * This function allows reading of the 32bit scratchpad register on
+ * the primary (internal) side. This allows the local system to read data
+ * written and mirrored to the scratchpad register by the remote system.
+ *
+ * RETURNS: An appropriate -ERRNO error value on error, or zero for success.
+ */
+int ntb_read_local_spad(struct ntb_device *ndev, unsigned int idx, u32 *val)
+{
+ if (idx >= ndev->limits.max_spads)
+ return -EINVAL;
+
+ *val = readl(ndev->reg_ofs.spad_write + idx * 4);
+ dev_dbg(&ndev->pdev->dev,
+ "Reading %x from local scratch pad index %d\n", *val, idx);
+
+ return 0;
+}
+
+/**
+ * ntb_write_remote_spad() - write to the secondary scratchpad register
+ * @ndev: pointer to ntb_device instance
+ * @idx: index to the scratchpad register, 0 based
+ * @val: the data value to put into the register
+ *
+ * This function allows writing of a 32bit value to the indexed scratchpad
+ * register. The register resides on the secondary (external) side. This allows
+ * the local system to write data to be mirrored to the remote systems
+ * scratchpad register.
+ *
+ * RETURNS: An appropriate -ERRNO error value on error, or zero for success.
+ */
+int ntb_write_remote_spad(struct ntb_device *ndev, unsigned int idx, u32 val)
+{
+ if (idx >= ndev->limits.max_spads)
+ return -EINVAL;
+
+ dev_dbg(&ndev->pdev->dev, "Writing %x to remote scratch pad index %d\n",
+ val, idx);
+ writel(val, ndev->reg_ofs.spad_write + idx * 4);
+
+ return 0;
+}
+
+/**
+ * ntb_read_remote_spad() - read from the primary scratchpad register
+ * @ndev: pointer to ntb_device instance
+ * @idx: index to scratchpad register, 0 based
+ * @val: pointer to 32bit integer for storing the register value
+ *
+ * This function allows reading of the 32bit scratchpad register on
+ * the primary (internal) side. This alloows the local system to read the data
+ * it wrote to be mirrored on the remote system.
+ *
+ * RETURNS: An appropriate -ERRNO error value on error, or zero for success.
+ */
+int ntb_read_remote_spad(struct ntb_device *ndev, unsigned int idx, u32 *val)
+{
+ if (idx >= ndev->limits.max_spads)
+ return -EINVAL;
+
+ *val = readl(ndev->reg_ofs.spad_read + idx * 4);
+ dev_dbg(&ndev->pdev->dev,
+ "Reading %x from remote scratch pad index %d\n", *val, idx);
+
+ return 0;
+}
+
+/**
+ * ntb_get_mw_vbase() - get virtual addr for the NTB memory window
+ * @ndev: pointer to ntb_device instance
+ * @mw: memory window number
+ *
+ * This function provides the base virtual address of the memory window
+ * specified.
+ *
+ * RETURNS: pointer to virtual address, or NULL on error.
+ */
+void __iomem *ntb_get_mw_vbase(struct ntb_device *ndev, unsigned int mw)
+{
+ if (mw > NTB_NUM_MW)
+ return NULL;
+
+ return ndev->mw[mw].vbase;
+}
+
+/**
+ * ntb_get_mw_size() - return size of NTB memory window
+ * @ndev: pointer to ntb_device instance
+ * @mw: memory window number
+ *
+ * This function provides the physical size of the memory window specified
+ *
+ * RETURNS: the size of the memory window or zero on error
+ */
+resource_size_t ntb_get_mw_size(struct ntb_device *ndev, unsigned int mw)
+{
+ if (mw > NTB_NUM_MW)
+ return 0;
+
+ return ndev->mw[mw].bar_sz;
+}
+
+/**
+ * ntb_set_mw_addr - set the memory window address
+ * @ndev: pointer to ntb_device instance
+ * @mw: memory window number
+ * @addr: base address for data
+ *
+ * This function sets the base physical address of the memory window. This
+ * memory address is where data from the remote system will be transfered into
+ * or out of depending on how the transport is configured.
+ */
+void ntb_set_mw_addr(struct ntb_device *ndev, unsigned int mw, u64 addr)
+{
+ if (mw > NTB_NUM_MW)
+ return;
+
+ dev_dbg(&ndev->pdev->dev, "Writing addr %Lx to BAR %d\n", addr,
+ MW_TO_BAR(mw));
+
+ ndev->mw[mw].phys_addr = addr;
+
+ switch (MW_TO_BAR(mw)) {
+ case NTB_BAR_23:
+ writeq(addr, ndev->reg_ofs.sbar2_xlat);
+ break;
+ case NTB_BAR_45:
+ writeq(addr, ndev->reg_ofs.sbar4_xlat);
+ break;
+ }
+}
+
+/**
+ * ntb_ring_sdb() - Set the doorbell on the secondary/external side
+ * @ndev: pointer to ntb_device instance
+ * @db: doorbell to ring
+ *
+ * This function allows triggering of a doorbell on the secondary/external
+ * side that will initiate an interrupt on the remote host
+ *
+ * RETURNS: An appropriate -ERRNO error value on error, or zero for success.
+ */
+void ntb_ring_sdb(struct ntb_device *ndev, unsigned int db)
+{
+ dev_dbg(&ndev->pdev->dev, "%s: ringing doorbell %d\n", __func__, db);
+
+ if (ndev->hw_type == BWD_HW)
+ writeq((u64) 1 << db, ndev->reg_ofs.sdb);
+ else
+ writew(((1 << ndev->bits_per_vector) - 1) <<
+ (db * ndev->bits_per_vector), ndev->reg_ofs.sdb);
+}
+
+static void ntb_link_event(struct ntb_device *ndev, int link_state)
+{
+ unsigned int event;
+
+ if (ndev->link_status == link_state)
+ return;
+
+ if (link_state == NTB_LINK_UP) {
+ u16 status;
+
+ dev_info(&ndev->pdev->dev, "Link Up\n");
+ ndev->link_status = NTB_LINK_UP;
+ event = NTB_EVENT_HW_LINK_UP;
+
+ if (ndev->hw_type == BWD_HW)
+ status = readw(ndev->reg_ofs.lnk_stat);
+ else {
+ int rc = pci_read_config_word(ndev->pdev,
+ SNB_LINK_STATUS_OFFSET,
+ &status);
+ if (rc)
+ return;
+ }
+ dev_info(&ndev->pdev->dev, "Link Width %d, Link Speed %d\n",
+ (status & NTB_LINK_WIDTH_MASK) >> 4,
+ (status & NTB_LINK_SPEED_MASK));
+ } else {
+ dev_info(&ndev->pdev->dev, "Link Down\n");
+ ndev->link_status = NTB_LINK_DOWN;
+ event = NTB_EVENT_HW_LINK_DOWN;
+ }
+
+ /* notify the upper layer if we have an event change */
+ if (ndev->event_cb)
+ ndev->event_cb(ndev->ntb_transport, event);
+}
+
+static int ntb_link_status(struct ntb_device *ndev)
+{
+ int link_state;
+
+ if (ndev->hw_type == BWD_HW) {
+ u32 ntb_cntl;
+
+ ntb_cntl = readl(ndev->reg_ofs.lnk_cntl);
+ if (ntb_cntl & BWD_CNTL_LINK_DOWN)
+ link_state = NTB_LINK_DOWN;
+ else
+ link_state = NTB_LINK_UP;
+ } else {
+ u16 status;
+ int rc;
+
+ rc = pci_read_config_word(ndev->pdev, SNB_LINK_STATUS_OFFSET,
+ &status);
+ if (rc)
+ return rc;
+
+ if (status & NTB_LINK_STATUS_ACTIVE)
+ link_state = NTB_LINK_UP;
+ else
+ link_state = NTB_LINK_DOWN;
+ }
+
+ ntb_link_event(ndev, link_state);
+
+ return 0;
+}
+
+/* BWD doesn't have link status interrupt, poll on that platform */
+static void bwd_link_poll(struct work_struct *work)
+{
+ struct ntb_device *ndev = container_of(work, struct ntb_device,
+ hb_timer.work);
+ unsigned long ts = jiffies;
+
+ /* If we haven't gotten an interrupt in a while, check the BWD link
+ * status bit
+ */
+ if (ts > ndev->last_ts + NTB_HB_TIMEOUT) {
+ int rc = ntb_link_status(ndev);
+ if (rc)
+ dev_err(&ndev->pdev->dev,
+ "Error determining link status\n");
+ }
+
+ schedule_delayed_work(&ndev->hb_timer, NTB_HB_TIMEOUT);
+}
+
+static int ntb_xeon_setup(struct ntb_device *ndev)
+{
+ int rc;
+ u8 val;
+
+ ndev->hw_type = SNB_HW;
+
+ rc = pci_read_config_byte(ndev->pdev, NTB_PPD_OFFSET, &val);
+ if (rc)
+ return rc;
+
+ switch (val & SNB_PPD_CONN_TYPE) {
+ case NTB_CONN_B2B:
+ ndev->conn_type = NTB_CONN_B2B;
+ break;
+ case NTB_CONN_CLASSIC:
+ case NTB_CONN_RP:
+ default:
+ dev_err(&ndev->pdev->dev, "Only B2B supported at this time\n");
+ return -EINVAL;
+ }
+
+ if (val & SNB_PPD_DEV_TYPE)
+ ndev->dev_type = NTB_DEV_DSD;
+ else
+ ndev->dev_type = NTB_DEV_USD;
+
+ ndev->reg_ofs.pdb = ndev->reg_base + SNB_PDOORBELL_OFFSET;
+ ndev->reg_ofs.pdb_mask = ndev->reg_base + SNB_PDBMSK_OFFSET;
+ ndev->reg_ofs.sbar2_xlat = ndev->reg_base + SNB_SBAR2XLAT_OFFSET;
+ ndev->reg_ofs.sbar4_xlat = ndev->reg_base + SNB_SBAR4XLAT_OFFSET;
+ ndev->reg_ofs.lnk_cntl = ndev->reg_base + SNB_NTBCNTL_OFFSET;
+ ndev->reg_ofs.lnk_stat = ndev->reg_base + SNB_LINK_STATUS_OFFSET;
+ ndev->reg_ofs.spad_read = ndev->reg_base + SNB_SPAD_OFFSET;
+ ndev->reg_ofs.spci_cmd = ndev->reg_base + SNB_PCICMD_OFFSET;
+
+ if (ndev->conn_type == NTB_CONN_B2B) {
+ ndev->reg_ofs.sdb = ndev->reg_base + SNB_B2B_DOORBELL_OFFSET;
+ ndev->reg_ofs.spad_write = ndev->reg_base + SNB_B2B_SPAD_OFFSET;
+ ndev->limits.max_spads = SNB_MAX_SPADS;
+ } else {
+ ndev->reg_ofs.sdb = ndev->reg_base + SNB_SDOORBELL_OFFSET;
+ ndev->reg_ofs.spad_write = ndev->reg_base + SNB_SPAD_OFFSET;
+ ndev->limits.max_spads = SNB_MAX_COMPAT_SPADS;
+ }
+
+ ndev->limits.max_db_bits = SNB_MAX_DB_BITS;
+ ndev->limits.msix_cnt = SNB_MSIX_CNT;
+ ndev->bits_per_vector = SNB_DB_BITS_PER_VEC;
+
+ return 0;
+}
+
+static int ntb_bwd_setup(struct ntb_device *ndev)
+{
+ int rc;
+ u32 val;
+
+ ndev->hw_type = BWD_HW;
+
+ rc = pci_read_config_dword(ndev->pdev, NTB_PPD_OFFSET, &val);
+ if (rc)
+ return rc;
+
+ switch ((val & BWD_PPD_CONN_TYPE) >> 8) {
+ case NTB_CONN_B2B:
+ ndev->conn_type = NTB_CONN_B2B;
+ break;
+ case NTB_CONN_RP:
+ default:
+ dev_err(&ndev->pdev->dev, "Only B2B supported at this time\n");
+ return -EINVAL;
+ }
+
+ if (val & BWD_PPD_DEV_TYPE)
+ ndev->dev_type = NTB_DEV_DSD;
+ else
+ ndev->dev_type = NTB_DEV_USD;
+
+ /* Initiate PCI-E link training */
+ rc = pci_write_config_dword(ndev->pdev, NTB_PPD_OFFSET,
+ val | BWD_PPD_INIT_LINK);
+ if (rc)
+ return rc;
+
+ ndev->reg_ofs.pdb = ndev->reg_base + BWD_PDOORBELL_OFFSET;
+ ndev->reg_ofs.pdb_mask = ndev->reg_base + BWD_PDBMSK_OFFSET;
+ ndev->reg_ofs.sbar2_xlat = ndev->reg_base + BWD_SBAR2XLAT_OFFSET;
+ ndev->reg_ofs.sbar4_xlat = ndev->reg_base + BWD_SBAR4XLAT_OFFSET;
+ ndev->reg_ofs.lnk_cntl = ndev->reg_base + BWD_NTBCNTL_OFFSET;
+ ndev->reg_ofs.lnk_stat = ndev->reg_base + BWD_LINK_STATUS_OFFSET;
+ ndev->reg_ofs.spad_read = ndev->reg_base + BWD_SPAD_OFFSET;
+ ndev->reg_ofs.spci_cmd = ndev->reg_base + BWD_PCICMD_OFFSET;
+
+ if (ndev->conn_type == NTB_CONN_B2B) {
+ ndev->reg_ofs.sdb = ndev->reg_base + BWD_B2B_DOORBELL_OFFSET;
+ ndev->reg_ofs.spad_write = ndev->reg_base + BWD_B2B_SPAD_OFFSET;
+ ndev->limits.max_spads = BWD_MAX_SPADS;
+ } else {
+ ndev->reg_ofs.sdb = ndev->reg_base + BWD_PDOORBELL_OFFSET;
+ ndev->reg_ofs.spad_write = ndev->reg_base + BWD_SPAD_OFFSET;
+ ndev->limits.max_spads = BWD_MAX_COMPAT_SPADS;
+ }
+
+ ndev->limits.max_db_bits = BWD_MAX_DB_BITS;
+ ndev->limits.msix_cnt = BWD_MSIX_CNT;
+ ndev->bits_per_vector = BWD_DB_BITS_PER_VEC;
+
+ /* Since bwd doesn't have a link interrupt, setup a poll timer */
+ INIT_DELAYED_WORK(&ndev->hb_timer, bwd_link_poll);
+ schedule_delayed_work(&ndev->hb_timer, NTB_HB_TIMEOUT);
+
+ return 0;
+}
+
+static int ntb_device_setup(struct ntb_device *ndev)
+{
+ int rc;
+
+ switch (ndev->pdev->device) {
+ case PCI_DEVICE_ID_INTEL_NTB_2ND_SNB:
+ case PCI_DEVICE_ID_INTEL_NTB_RP_JSF:
+ case PCI_DEVICE_ID_INTEL_NTB_RP_SNB:
+ case PCI_DEVICE_ID_INTEL_NTB_CLASSIC_JSF:
+ case PCI_DEVICE_ID_INTEL_NTB_CLASSIC_SNB:
+ case PCI_DEVICE_ID_INTEL_NTB_B2B_JSF:
+ case PCI_DEVICE_ID_INTEL_NTB_B2B_SNB:
+ rc = ntb_xeon_setup(ndev);
+ break;
+ case PCI_DEVICE_ID_INTEL_NTB_B2B_BWD:
+ rc = ntb_bwd_setup(ndev);
+ break;
+ default:
+ rc = -ENODEV;
+ }
+
+ /* Enable Bus Master and Memory Space on the secondary side */
+ writew(PCI_COMMAND_MEMORY | PCI_COMMAND_MASTER, ndev->reg_ofs.spci_cmd);
+
+ return rc;
+}
+
+static void ntb_device_free(struct ntb_device *ndev)
+{
+ if (ndev->hw_type == BWD_HW)
+ cancel_delayed_work_sync(&ndev->hb_timer);
+}
+
+static irqreturn_t bwd_callback_msix_irq(int irq, void *data)
+{
+ struct ntb_db_cb *db_cb = data;
+ struct ntb_device *ndev = db_cb->ndev;
+
+ dev_dbg(&ndev->pdev->dev, "MSI-X irq %d received for DB %d\n", irq,
+ db_cb->db_num);
+
+ if (db_cb->callback)
+ db_cb->callback(db_cb->data, db_cb->db_num);
+
+ /* No need to check for the specific HB irq, any interrupt means
+ * we're connected.
+ */
+ ndev->last_ts = jiffies;
+
+ writeq((u64) 1 << db_cb->db_num, ndev->reg_ofs.pdb);
+
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t xeon_callback_msix_irq(int irq, void *data)
+{
+ struct ntb_db_cb *db_cb = data;
+ struct ntb_device *ndev = db_cb->ndev;
+
+ dev_dbg(&ndev->pdev->dev, "MSI-X irq %d received for DB %d\n", irq,
+ db_cb->db_num);
+
+ if (db_cb->callback)
+ db_cb->callback(db_cb->data, db_cb->db_num);
+
+ /* On Sandybridge, there are 16 bits in the interrupt register
+ * but only 4 vectors. So, 5 bits are assigned to the first 3
+ * vectors, with the 4th having a single bit for link
+ * interrupts.
+ */
+ writew(((1 << ndev->bits_per_vector) - 1) <<
+ (db_cb->db_num * ndev->bits_per_vector), ndev->reg_ofs.pdb);
+
+ return IRQ_HANDLED;
+}
+
+/* Since we do not have a HW doorbell in BWD, this is only used in JF/JT */
+static irqreturn_t xeon_event_msix_irq(int irq, void *dev)
+{
+ struct ntb_device *ndev = dev;
+ int rc;
+
+ dev_dbg(&ndev->pdev->dev, "MSI-X irq %d received for Events\n", irq);
+
+ rc = ntb_link_status(ndev);
+ if (rc)
+ dev_err(&ndev->pdev->dev, "Error determining link status\n");
+
+ /* bit 15 is always the link bit */
+ writew(1 << ndev->limits.max_db_bits, ndev->reg_ofs.pdb);
+
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t ntb_interrupt(int irq, void *dev)
+{
+ struct ntb_device *ndev = dev;
+ unsigned int i = 0;
+
+ if (ndev->hw_type == BWD_HW) {
+ u64 pdb = readq(ndev->reg_ofs.pdb);
+
+ dev_dbg(&ndev->pdev->dev, "irq %d - pdb = %Lx\n", irq, pdb);
+
+ while (pdb) {
+ i = __ffs(pdb);
+ pdb &= pdb - 1;
+ bwd_callback_msix_irq(irq, &ndev->db_cb[i]);
+ }
+ } else {
+ u16 pdb = readw(ndev->reg_ofs.pdb);
+
+ dev_dbg(&ndev->pdev->dev, "irq %d - pdb = %x sdb %x\n", irq,
+ pdb, readw(ndev->reg_ofs.sdb));
+
+ if (pdb & SNB_DB_HW_LINK) {
+ xeon_event_msix_irq(irq, dev);
+ pdb &= ~SNB_DB_HW_LINK;
+ }
+
+ while (pdb) {
+ i = __ffs(pdb);
+ pdb &= pdb - 1;
+ xeon_callback_msix_irq(irq, &ndev->db_cb[i]);
+ }
+ }
+
+ return IRQ_HANDLED;
+}
+
+static int ntb_setup_msix(struct ntb_device *ndev)
+{
+ struct pci_dev *pdev = ndev->pdev;
+ struct msix_entry *msix;
+ int msix_entries;
+ int rc, i, pos;
+ u16 val;
+
+ pos = pci_find_capability(pdev, PCI_CAP_ID_MSIX);
+ if (!pos) {
+ rc = -EIO;
+ goto err;
+ }
+
+ rc = pci_read_config_word(pdev, pos + PCI_MSIX_FLAGS, &val);
+ if (rc)
+ goto err;
+
+ msix_entries = msix_table_size(val);
+ if (msix_entries > ndev->limits.msix_cnt) {
+ rc = -EINVAL;
+ goto err;
+ }
+
+ ndev->msix_entries = kmalloc(sizeof(struct msix_entry) * msix_entries,
+ GFP_KERNEL);
+ if (!ndev->msix_entries) {
+ rc = -ENOMEM;
+ goto err;
+ }
+
+ for (i = 0; i < msix_entries; i++)
+ ndev->msix_entries[i].entry = i;
+
+ rc = pci_enable_msix(pdev, ndev->msix_entries, msix_entries);
+ if (rc < 0)
+ goto err1;
+ if (rc > 0) {
+ /* On SNB, the link interrupt is always tied to 4th vector. If
+ * we can't get all 4, then we can't use MSI-X.
+ */
+ if (ndev->hw_type != BWD_HW) {
+ rc = -EIO;
+ goto err1;
+ }
+
+ dev_warn(&pdev->dev,
+ "Only %d MSI-X vectors. Limiting the number of queues to that number.\n",
+ rc);
+ msix_entries = rc;
+ }
+
+ for (i = 0; i < msix_entries; i++) {
+ msix = &ndev->msix_entries[i];
+ WARN_ON(!msix->vector);
+
+ /* Use the last MSI-X vector for Link status */
+ if (ndev->hw_type == BWD_HW) {
+ rc = request_irq(msix->vector, bwd_callback_msix_irq, 0,
+ "ntb-callback-msix", &ndev->db_cb[i]);
+ if (rc)
+ goto err2;
+ } else {
+ if (i == msix_entries - 1) {
+ rc = request_irq(msix->vector,
+ xeon_event_msix_irq, 0,
+ "ntb-event-msix", ndev);
+ if (rc)
+ goto err2;
+ } else {
+ rc = request_irq(msix->vector,
+ xeon_callback_msix_irq, 0,
+ "ntb-callback-msix",
+ &ndev->db_cb[i]);
+ if (rc)
+ goto err2;
+ }
+ }
+ }
+
+ ndev->num_msix = msix_entries;
+ if (ndev->hw_type == BWD_HW)
+ ndev->max_cbs = msix_entries;
+ else
+ ndev->max_cbs = msix_entries - 1;
+
+ return 0;
+
+err2:
+ while (--i >= 0) {
+ msix = &ndev->msix_entries[i];
+ if (ndev->hw_type != BWD_HW && i == ndev->num_msix - 1)
+ free_irq(msix->vector, ndev);
+ else
+ free_irq(msix->vector, &ndev->db_cb[i]);
+ }
+ pci_disable_msix(pdev);
+err1:
+ kfree(ndev->msix_entries);
+ dev_err(&pdev->dev, "Error allocating MSI-X interrupt\n");
+err:
+ ndev->num_msix = 0;
+ return rc;
+}
+
+static int ntb_setup_msi(struct ntb_device *ndev)
+{
+ struct pci_dev *pdev = ndev->pdev;
+ int rc;
+
+ rc = pci_enable_msi(pdev);
+ if (rc)
+ return rc;
+
+ rc = request_irq(pdev->irq, ntb_interrupt, 0, "ntb-msi", ndev);
+ if (rc) {
+ pci_disable_msi(pdev);
+ dev_err(&pdev->dev, "Error allocating MSI interrupt\n");
+ return rc;
+ }
+
+ return 0;
+}
+
+static int ntb_setup_intx(struct ntb_device *ndev)
+{
+ struct pci_dev *pdev = ndev->pdev;
+ int rc;
+
+ pci_msi_off(pdev);
+
+ /* Verify intx is enabled */
+ pci_intx(pdev, 1);
+
+ rc = request_irq(pdev->irq, ntb_interrupt, IRQF_SHARED, "ntb-intx",
+ ndev);
+ if (rc)
+ return rc;
+
+ return 0;
+}
+
+static int ntb_setup_interrupts(struct ntb_device *ndev)
+{
+ int rc;
+
+ /* On BWD, disable all interrupts. On SNB, disable all but Link
+ * Interrupt. The rest will be unmasked as callbacks are registered.
+ */
+ if (ndev->hw_type == BWD_HW)
+ writeq(~0, ndev->reg_ofs.pdb_mask);
+ else
+ writew(~(1 << ndev->limits.max_db_bits),
+ ndev->reg_ofs.pdb_mask);
+
+ rc = ntb_setup_msix(ndev);
+ if (!rc)
+ goto done;
+
+ ndev->bits_per_vector = 1;
+ ndev->max_cbs = ndev->limits.max_db_bits;
+
+ rc = ntb_setup_msi(ndev);
+ if (!rc)
+ goto done;
+
+ rc = ntb_setup_intx(ndev);
+ if (rc) {
+ dev_err(&ndev->pdev->dev, "no usable interrupts\n");
+ return rc;
+ }
+
+done:
+ return 0;
+}
+
+static void ntb_free_interrupts(struct ntb_device *ndev)
+{
+ struct pci_dev *pdev = ndev->pdev;
+
+ /* mask interrupts */
+ if (ndev->hw_type == BWD_HW)
+ writeq(~0, ndev->reg_ofs.pdb_mask);
+ else
+ writew(~0, ndev->reg_ofs.pdb_mask);
+
+ if (ndev->num_msix) {
+ struct msix_entry *msix;
+ u32 i;
+
+ for (i = 0; i < ndev->num_msix; i++) {
+ msix = &ndev->msix_entries[i];
+ if (ndev->hw_type != BWD_HW && i == ndev->num_msix - 1)
+ free_irq(msix->vector, ndev);
+ else
+ free_irq(msix->vector, &ndev->db_cb[i]);
+ }
+ pci_disable_msix(pdev);
+ } else {
+ free_irq(pdev->irq, ndev);
+
+ if (pci_dev_msi_enabled(pdev))
+ pci_disable_msi(pdev);
+ }
+}
+
+static int ntb_create_callbacks(struct ntb_device *ndev)
+{
+ int i;
+
+ /* Checken-egg issue. We won't know how many callbacks are necessary
+ * until we see how many MSI-X vectors we get, but these pointers need
+ * to be passed into the MSI-X register fucntion. So, we allocate the
+ * max, knowing that they might not all be used, to work around this.
+ */
+ ndev->db_cb = kcalloc(ndev->limits.max_db_bits,
+ sizeof(struct ntb_db_cb),
+ GFP_KERNEL);
+ if (!ndev->db_cb)
+ return -ENOMEM;
+
+ for (i = 0; i < ndev->limits.max_db_bits; i++) {
+ ndev->db_cb[i].db_num = i;
+ ndev->db_cb[i].ndev = ndev;
+ }
+
+ return 0;
+}
+
+static void ntb_free_callbacks(struct ntb_device *ndev)
+{
+ int i;
+
+ for (i = 0; i < ndev->limits.max_db_bits; i++)
+ ntb_unregister_db_callback(ndev, i);
+
+ kfree(ndev->db_cb);
+}
+
+static int ntb_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
+{
+ struct ntb_device *ndev;
+ int rc, i;
+
+ ndev = kzalloc(sizeof(struct ntb_device), GFP_KERNEL);
+ if (!ndev)
+ return -ENOMEM;
+
+ ndev->pdev = pdev;
+ ndev->link_status = NTB_LINK_DOWN;
+ pci_set_drvdata(pdev, ndev);
+
+ rc = pci_enable_device(pdev);
+ if (rc)
+ goto err;
+
+ pci_set_master(ndev->pdev);
+
+ rc = pci_request_selected_regions(pdev, NTB_BAR_MASK, KBUILD_MODNAME);
+ if (rc)
+ goto err1;
+
+ ndev->reg_base = pci_ioremap_bar(pdev, NTB_BAR_MMIO);
+ if (!ndev->reg_base) {
+ dev_warn(&pdev->dev, "Cannot remap BAR 0\n");
+ rc = -EIO;
+ goto err2;
+ }
+
+ for (i = 0; i < NTB_NUM_MW; i++) {
+ ndev->mw[i].bar_sz = pci_resource_len(pdev, MW_TO_BAR(i));
+ ndev->mw[i].vbase =
+ ioremap_wc(pci_resource_start(pdev, MW_TO_BAR(i)),
+ ndev->mw[i].bar_sz);
+ dev_info(&pdev->dev, "MW %d size %d\n", i,
+ (u32) pci_resource_len(pdev, MW_TO_BAR(i)));
+ if (!ndev->mw[i].vbase) {
+ dev_warn(&pdev->dev, "Cannot remap BAR %d\n",
+ MW_TO_BAR(i));
+ rc = -EIO;
+ goto err3;
+ }
+ }
+
+ rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(64));
+ if (rc) {
+ rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
+ if (rc)
+ goto err3;
+
+ dev_warn(&pdev->dev, "Cannot DMA highmem\n");
+ }
+
+ rc = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64));
+ if (rc) {
+ rc = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
+ if (rc)
+ goto err3;
+
+ dev_warn(&pdev->dev, "Cannot DMA consistent highmem\n");
+ }
+
+ rc = ntb_device_setup(ndev);
+ if (rc)
+ goto err3;
+
+ rc = ntb_create_callbacks(ndev);
+ if (rc)
+ goto err4;
+
+ rc = ntb_setup_interrupts(ndev);
+ if (rc)
+ goto err5;
+
+ /* The scratchpad registers keep the values between rmmod/insmod,
+ * blast them now
+ */
+ for (i = 0; i < ndev->limits.max_spads; i++) {
+ ntb_write_local_spad(ndev, i, 0);
+ ntb_write_remote_spad(ndev, i, 0);
+ }
+
+ rc = ntb_transport_init(pdev);
+ if (rc)
+ goto err6;
+
+ /* Let's bring the NTB link up */
+ writel(NTB_CNTL_BAR23_SNOOP | NTB_CNTL_BAR45_SNOOP,
+ ndev->reg_ofs.lnk_cntl);
+
+ return 0;
+
+err6:
+ ntb_free_interrupts(ndev);
+err5:
+ ntb_free_callbacks(ndev);
+err4:
+ ntb_device_free(ndev);
+err3:
+ for (i--; i >= 0; i--)
+ iounmap(ndev->mw[i].vbase);
+ iounmap(ndev->reg_base);
+err2:
+ pci_release_selected_regions(pdev, NTB_BAR_MASK);
+err1:
+ pci_disable_device(pdev);
+err:
+ kfree(ndev);
+
+ dev_err(&pdev->dev, "Error loading %s module\n", KBUILD_MODNAME);
+ return rc;
+}
+
+static void ntb_pci_remove(struct pci_dev *pdev)
+{
+ struct ntb_device *ndev = pci_get_drvdata(pdev);
+ int i;
+ u32 ntb_cntl;
+
+ /* Bring NTB link down */
+ ntb_cntl = readl(ndev->reg_ofs.lnk_cntl);
+ ntb_cntl |= NTB_LINK_DISABLE;
+ writel(ntb_cntl, ndev->reg_ofs.lnk_cntl);
+
+ ntb_transport_free(ndev->ntb_transport);
+
+ ntb_free_interrupts(ndev);
+ ntb_free_callbacks(ndev);
+ ntb_device_free(ndev);
+
+ for (i = 0; i < NTB_NUM_MW; i++)
+ iounmap(ndev->mw[i].vbase);
+
+ iounmap(ndev->reg_base);
+ pci_release_selected_regions(pdev, NTB_BAR_MASK);
+ pci_disable_device(pdev);
+ kfree(ndev);
+}
+
+static struct pci_driver ntb_pci_driver = {
+ .name = KBUILD_MODNAME,
+ .id_table = ntb_pci_tbl,
+ .probe = ntb_pci_probe,
+ .remove = ntb_pci_remove,
+};
+module_pci_driver(ntb_pci_driver);
diff --git a/drivers/ntb/ntb_hw.h b/drivers/ntb/ntb_hw.h
new file mode 100644
index 000000000000..3a3038ca83e6
--- /dev/null
+++ b/drivers/ntb/ntb_hw.h
@@ -0,0 +1,181 @@
+/*
+ * This file is provided under a dual BSD/GPLv2 license. When using or
+ * redistributing this file, you may do so under either license.
+ *
+ * GPL LICENSE SUMMARY
+ *
+ * Copyright(c) 2012 Intel Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * BSD LICENSE
+ *
+ * Copyright(c) 2012 Intel Corporation. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copy
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ * Intel PCIe NTB Linux driver
+ *
+ * Contact Information:
+ * Jon Mason <jon.mason@intel.com>
+ */
+
+#define PCI_DEVICE_ID_INTEL_NTB_B2B_JSF 0x3725
+#define PCI_DEVICE_ID_INTEL_NTB_CLASSIC_JSF 0x3726
+#define PCI_DEVICE_ID_INTEL_NTB_RP_JSF 0x3727
+#define PCI_DEVICE_ID_INTEL_NTB_RP_SNB 0x3C08
+#define PCI_DEVICE_ID_INTEL_NTB_B2B_SNB 0x3C0D
+#define PCI_DEVICE_ID_INTEL_NTB_CLASSIC_SNB 0x3C0E
+#define PCI_DEVICE_ID_INTEL_NTB_2ND_SNB 0x3C0F
+#define PCI_DEVICE_ID_INTEL_NTB_B2B_BWD 0x0C4E
+
+#define msix_table_size(control) ((control & PCI_MSIX_FLAGS_QSIZE)+1)
+
+#define NTB_BAR_MMIO 0
+#define NTB_BAR_23 2
+#define NTB_BAR_45 4
+#define NTB_BAR_MASK ((1 << NTB_BAR_MMIO) | (1 << NTB_BAR_23) |\
+ (1 << NTB_BAR_45))
+
+#define NTB_LINK_DOWN 0
+#define NTB_LINK_UP 1
+
+#define NTB_HB_TIMEOUT msecs_to_jiffies(1000)
+
+#define NTB_NUM_MW 2
+
+enum ntb_hw_event {
+ NTB_EVENT_SW_EVENT0 = 0,
+ NTB_EVENT_SW_EVENT1,
+ NTB_EVENT_SW_EVENT2,
+ NTB_EVENT_HW_ERROR,
+ NTB_EVENT_HW_LINK_UP,
+ NTB_EVENT_HW_LINK_DOWN,
+};
+
+struct ntb_mw {
+ dma_addr_t phys_addr;
+ void __iomem *vbase;
+ resource_size_t bar_sz;
+};
+
+struct ntb_db_cb {
+ void (*callback) (void *data, int db_num);
+ unsigned int db_num;
+ void *data;
+ struct ntb_device *ndev;
+};
+
+struct ntb_device {
+ struct pci_dev *pdev;
+ struct msix_entry *msix_entries;
+ void __iomem *reg_base;
+ struct ntb_mw mw[NTB_NUM_MW];
+ struct {
+ unsigned int max_spads;
+ unsigned int max_db_bits;
+ unsigned int msix_cnt;
+ } limits;
+ struct {
+ void __iomem *pdb;
+ void __iomem *pdb_mask;
+ void __iomem *sdb;
+ void __iomem *sbar2_xlat;
+ void __iomem *sbar4_xlat;
+ void __iomem *spad_write;
+ void __iomem *spad_read;
+ void __iomem *lnk_cntl;
+ void __iomem *lnk_stat;
+ void __iomem *spci_cmd;
+ } reg_ofs;
+ struct ntb_transport *ntb_transport;
+ void (*event_cb)(void *handle, enum ntb_hw_event event);
+
+ struct ntb_db_cb *db_cb;
+ unsigned char hw_type;
+ unsigned char conn_type;
+ unsigned char dev_type;
+ unsigned char num_msix;
+ unsigned char bits_per_vector;
+ unsigned char max_cbs;
+ unsigned char link_status;
+ struct delayed_work hb_timer;
+ unsigned long last_ts;
+};
+
+/**
+ * ntb_hw_link_status() - return the hardware link status
+ * @ndev: pointer to ntb_device instance
+ *
+ * Returns true if the hardware is connected to the remote system
+ *
+ * RETURNS: true or false based on the hardware link state
+ */
+static inline bool ntb_hw_link_status(struct ntb_device *ndev)
+{
+ return ndev->link_status == NTB_LINK_UP;
+}
+
+/**
+ * ntb_query_pdev() - return the pci_dev pointer
+ * @ndev: pointer to ntb_device instance
+ *
+ * Given the ntb pointer return the pci_dev pointerfor the NTB hardware device
+ *
+ * RETURNS: a pointer to the ntb pci_dev
+ */
+static inline struct pci_dev *ntb_query_pdev(struct ntb_device *ndev)
+{
+ return ndev->pdev;
+}
+
+struct ntb_device *ntb_register_transport(struct pci_dev *pdev,
+ void *transport);
+void ntb_unregister_transport(struct ntb_device *ndev);
+void ntb_set_mw_addr(struct ntb_device *ndev, unsigned int mw, u64 addr);
+int ntb_register_db_callback(struct ntb_device *ndev, unsigned int idx,
+ void *data, void (*db_cb_func) (void *data,
+ int db_num));
+void ntb_unregister_db_callback(struct ntb_device *ndev, unsigned int idx);
+int ntb_register_event_callback(struct ntb_device *ndev,
+ void (*event_cb_func) (void *handle,
+ enum ntb_hw_event event));
+void ntb_unregister_event_callback(struct ntb_device *ndev);
+int ntb_get_max_spads(struct ntb_device *ndev);
+int ntb_write_local_spad(struct ntb_device *ndev, unsigned int idx, u32 val);
+int ntb_read_local_spad(struct ntb_device *ndev, unsigned int idx, u32 *val);
+int ntb_write_remote_spad(struct ntb_device *ndev, unsigned int idx, u32 val);
+int ntb_read_remote_spad(struct ntb_device *ndev, unsigned int idx, u32 *val);
+void __iomem *ntb_get_mw_vbase(struct ntb_device *ndev, unsigned int mw);
+resource_size_t ntb_get_mw_size(struct ntb_device *ndev, unsigned int mw);
+void ntb_ring_sdb(struct ntb_device *ndev, unsigned int idx);
+void *ntb_find_transport(struct pci_dev *pdev);
+
+int ntb_transport_init(struct pci_dev *pdev);
+void ntb_transport_free(void *transport);
diff --git a/drivers/ntb/ntb_regs.h b/drivers/ntb/ntb_regs.h
new file mode 100644
index 000000000000..5bfa8c06c059
--- /dev/null
+++ b/drivers/ntb/ntb_regs.h
@@ -0,0 +1,139 @@
+/*
+ * This file is provided under a dual BSD/GPLv2 license. When using or
+ * redistributing this file, you may do so under either license.
+ *
+ * GPL LICENSE SUMMARY
+ *
+ * Copyright(c) 2012 Intel Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * BSD LICENSE
+ *
+ * Copyright(c) 2012 Intel Corporation. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copy
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ * Intel PCIe NTB Linux driver
+ *
+ * Contact Information:
+ * Jon Mason <jon.mason@intel.com>
+ */
+
+#define NTB_LINK_ENABLE 0x0000
+#define NTB_LINK_DISABLE 0x0002
+#define NTB_LINK_STATUS_ACTIVE 0x2000
+#define NTB_LINK_SPEED_MASK 0x000f
+#define NTB_LINK_WIDTH_MASK 0x03f0
+
+#define SNB_MSIX_CNT 4
+#define SNB_MAX_SPADS 16
+#define SNB_MAX_COMPAT_SPADS 8
+/* Reserve the uppermost bit for link interrupt */
+#define SNB_MAX_DB_BITS 15
+#define SNB_DB_BITS_PER_VEC 5
+
+#define SNB_DB_HW_LINK 0x8000
+
+#define SNB_PCICMD_OFFSET 0x0504
+#define SNB_DEVCTRL_OFFSET 0x0598
+#define SNB_LINK_STATUS_OFFSET 0x01A2
+
+#define SNB_PBAR2LMT_OFFSET 0x0000
+#define SNB_PBAR4LMT_OFFSET 0x0008
+#define SNB_PBAR2XLAT_OFFSET 0x0010
+#define SNB_PBAR4XLAT_OFFSET 0x0018
+#define SNB_SBAR2LMT_OFFSET 0x0020
+#define SNB_SBAR4LMT_OFFSET 0x0028
+#define SNB_SBAR2XLAT_OFFSET 0x0030
+#define SNB_SBAR4XLAT_OFFSET 0x0038
+#define SNB_SBAR0BASE_OFFSET 0x0040
+#define SNB_SBAR2BASE_OFFSET 0x0048
+#define SNB_SBAR4BASE_OFFSET 0x0050
+#define SNB_NTBCNTL_OFFSET 0x0058
+#define SNB_SBDF_OFFSET 0x005C
+#define SNB_PDOORBELL_OFFSET 0x0060
+#define SNB_PDBMSK_OFFSET 0x0062
+#define SNB_SDOORBELL_OFFSET 0x0064
+#define SNB_SDBMSK_OFFSET 0x0066
+#define SNB_USMEMMISS 0x0070
+#define SNB_SPAD_OFFSET 0x0080
+#define SNB_SPADSEMA4_OFFSET 0x00c0
+#define SNB_WCCNTRL_OFFSET 0x00e0
+#define SNB_B2B_SPAD_OFFSET 0x0100
+#define SNB_B2B_DOORBELL_OFFSET 0x0140
+#define SNB_B2B_XLAT_OFFSET 0x0144
+
+#define BWD_MSIX_CNT 34
+#define BWD_MAX_SPADS 16
+#define BWD_MAX_COMPAT_SPADS 16
+#define BWD_MAX_DB_BITS 34
+#define BWD_DB_BITS_PER_VEC 1
+
+#define BWD_PCICMD_OFFSET 0xb004
+#define BWD_MBAR23_OFFSET 0xb018
+#define BWD_MBAR45_OFFSET 0xb020
+#define BWD_DEVCTRL_OFFSET 0xb048
+#define BWD_LINK_STATUS_OFFSET 0xb052
+
+#define BWD_SBAR2XLAT_OFFSET 0x0008
+#define BWD_SBAR4XLAT_OFFSET 0x0010
+#define BWD_PDOORBELL_OFFSET 0x0020
+#define BWD_PDBMSK_OFFSET 0x0028
+#define BWD_NTBCNTL_OFFSET 0x0060
+#define BWD_EBDF_OFFSET 0x0064
+#define BWD_SPAD_OFFSET 0x0080
+#define BWD_SPADSEMA_OFFSET 0x00c0
+#define BWD_STKYSPAD_OFFSET 0x00c4
+#define BWD_PBAR2XLAT_OFFSET 0x8008
+#define BWD_PBAR4XLAT_OFFSET 0x8010
+#define BWD_B2B_DOORBELL_OFFSET 0x8020
+#define BWD_B2B_SPAD_OFFSET 0x8080
+#define BWD_B2B_SPADSEMA_OFFSET 0x80c0
+#define BWD_B2B_STKYSPAD_OFFSET 0x80c4
+
+#define NTB_CNTL_BAR23_SNOOP (1 << 2)
+#define NTB_CNTL_BAR45_SNOOP (1 << 6)
+#define BWD_CNTL_LINK_DOWN (1 << 16)
+
+#define NTB_PPD_OFFSET 0x00D4
+#define SNB_PPD_CONN_TYPE 0x0003
+#define SNB_PPD_DEV_TYPE 0x0010
+#define BWD_PPD_INIT_LINK 0x0008
+#define BWD_PPD_CONN_TYPE 0x0300
+#define BWD_PPD_DEV_TYPE 0x1000
+
+#define BWD_PBAR2XLAT_USD_ADDR 0x0000004000000000
+#define BWD_PBAR4XLAT_USD_ADDR 0x0000008000000000
+#define BWD_MBAR23_USD_ADDR 0x000000410000000C
+#define BWD_MBAR45_USD_ADDR 0x000000810000000C
+#define BWD_PBAR2XLAT_DSD_ADDR 0x0000004100000000
+#define BWD_PBAR4XLAT_DSD_ADDR 0x0000008100000000
+#define BWD_MBAR23_DSD_ADDR 0x000000400000000C
+#define BWD_MBAR45_DSD_ADDR 0x000000800000000C
diff --git a/drivers/ntb/ntb_transport.c b/drivers/ntb/ntb_transport.c
new file mode 100644
index 000000000000..e0bdfd7f9930
--- /dev/null
+++ b/drivers/ntb/ntb_transport.c
@@ -0,0 +1,1441 @@
+/*
+ * This file is provided under a dual BSD/GPLv2 license. When using or
+ * redistributing this file, you may do so under either license.
+ *
+ * GPL LICENSE SUMMARY
+ *
+ * Copyright(c) 2012 Intel Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * BSD LICENSE
+ *
+ * Copyright(c) 2012 Intel Corporation. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copy
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ * Intel PCIe NTB Linux driver
+ *
+ * Contact Information:
+ * Jon Mason <jon.mason@intel.com>
+ */
+#include <linux/debugfs.h>
+#include <linux/delay.h>
+#include <linux/dma-mapping.h>
+#include <linux/errno.h>
+#include <linux/export.h>
+#include <linux/interrupt.h>
+#include <linux/module.h>
+#include <linux/pci.h>
+#include <linux/slab.h>
+#include <linux/types.h>
+#include <linux/ntb.h>
+#include "ntb_hw.h"
+
+#define NTB_TRANSPORT_VERSION 2
+
+static unsigned int transport_mtu = 0x401E;
+module_param(transport_mtu, uint, 0644);
+MODULE_PARM_DESC(transport_mtu, "Maximum size of NTB transport packets");
+
+static unsigned char max_num_clients = 2;
+module_param(max_num_clients, byte, 0644);
+MODULE_PARM_DESC(max_num_clients, "Maximum number of NTB transport clients");
+
+struct ntb_queue_entry {
+ /* ntb_queue list reference */
+ struct list_head entry;
+ /* pointers to data to be transfered */
+ void *cb_data;
+ void *buf;
+ unsigned int len;
+ unsigned int flags;
+};
+
+struct ntb_rx_info {
+ unsigned int entry;
+};
+
+struct ntb_transport_qp {
+ struct ntb_transport *transport;
+ struct ntb_device *ndev;
+ void *cb_data;
+
+ bool client_ready;
+ bool qp_link;
+ u8 qp_num; /* Only 64 QP's are allowed. 0-63 */
+
+ struct ntb_rx_info __iomem *rx_info;
+ struct ntb_rx_info *remote_rx_info;
+
+ void (*tx_handler) (struct ntb_transport_qp *qp, void *qp_data,
+ void *data, int len);
+ struct list_head tx_free_q;
+ spinlock_t ntb_tx_free_q_lock;
+ void __iomem *tx_mw;
+ unsigned int tx_index;
+ unsigned int tx_max_entry;
+ unsigned int tx_max_frame;
+
+ void (*rx_handler) (struct ntb_transport_qp *qp, void *qp_data,
+ void *data, int len);
+ struct tasklet_struct rx_work;
+ struct list_head rx_pend_q;
+ struct list_head rx_free_q;
+ spinlock_t ntb_rx_pend_q_lock;
+ spinlock_t ntb_rx_free_q_lock;
+ void *rx_buff;
+ unsigned int rx_index;
+ unsigned int rx_max_entry;
+ unsigned int rx_max_frame;
+
+ void (*event_handler) (void *data, int status);
+ struct delayed_work link_work;
+ struct work_struct link_cleanup;
+
+ struct dentry *debugfs_dir;
+ struct dentry *debugfs_stats;
+
+ /* Stats */
+ u64 rx_bytes;
+ u64 rx_pkts;
+ u64 rx_ring_empty;
+ u64 rx_err_no_buf;
+ u64 rx_err_oflow;
+ u64 rx_err_ver;
+ u64 tx_bytes;
+ u64 tx_pkts;
+ u64 tx_ring_full;
+};
+
+struct ntb_transport_mw {
+ size_t size;
+ void *virt_addr;
+ dma_addr_t dma_addr;
+};
+
+struct ntb_transport_client_dev {
+ struct list_head entry;
+ struct device dev;
+};
+
+struct ntb_transport {
+ struct list_head entry;
+ struct list_head client_devs;
+
+ struct ntb_device *ndev;
+ struct ntb_transport_mw mw[NTB_NUM_MW];
+ struct ntb_transport_qp *qps;
+ unsigned int max_qps;
+ unsigned long qp_bitmap;
+ bool transport_link;
+ struct delayed_work link_work;
+ struct work_struct link_cleanup;
+ struct dentry *debugfs_dir;
+};
+
+enum {
+ DESC_DONE_FLAG = 1 << 0,
+ LINK_DOWN_FLAG = 1 << 1,
+};
+
+struct ntb_payload_header {
+ unsigned int ver;
+ unsigned int len;
+ unsigned int flags;
+};
+
+enum {
+ VERSION = 0,
+ MW0_SZ,
+ MW1_SZ,
+ NUM_QPS,
+ QP_LINKS,
+ MAX_SPAD,
+};
+
+#define QP_TO_MW(qp) ((qp) % NTB_NUM_MW)
+#define NTB_QP_DEF_NUM_ENTRIES 100
+#define NTB_LINK_DOWN_TIMEOUT 10
+
+static int ntb_match_bus(struct device *dev, struct device_driver *drv)
+{
+ return !strncmp(dev_name(dev), drv->name, strlen(drv->name));
+}
+
+static int ntb_client_probe(struct device *dev)
+{
+ const struct ntb_client *drv = container_of(dev->driver,
+ struct ntb_client, driver);
+ struct pci_dev *pdev = container_of(dev->parent, struct pci_dev, dev);
+ int rc = -EINVAL;
+
+ get_device(dev);
+ if (drv && drv->probe)
+ rc = drv->probe(pdev);
+ if (rc)
+ put_device(dev);
+
+ return rc;
+}
+
+static int ntb_client_remove(struct device *dev)
+{
+ const struct ntb_client *drv = container_of(dev->driver,
+ struct ntb_client, driver);
+ struct pci_dev *pdev = container_of(dev->parent, struct pci_dev, dev);
+
+ if (drv && drv->remove)
+ drv->remove(pdev);
+
+ put_device(dev);
+
+ return 0;
+}
+
+static struct bus_type ntb_bus_type = {
+ .name = "ntb_bus",
+ .match = ntb_match_bus,
+ .probe = ntb_client_probe,
+ .remove = ntb_client_remove,
+};
+
+static LIST_HEAD(ntb_transport_list);
+
+static int ntb_bus_init(struct ntb_transport *nt)
+{
+ if (list_empty(&ntb_transport_list)) {
+ int rc = bus_register(&ntb_bus_type);
+ if (rc)
+ return rc;
+ }
+
+ list_add(&nt->entry, &ntb_transport_list);
+
+ return 0;
+}
+
+static void ntb_bus_remove(struct ntb_transport *nt)
+{
+ struct ntb_transport_client_dev *client_dev, *cd;
+
+ list_for_each_entry_safe(client_dev, cd, &nt->client_devs, entry) {
+ dev_err(client_dev->dev.parent, "%s still attached to bus, removing\n",
+ dev_name(&client_dev->dev));
+ list_del(&client_dev->entry);
+ device_unregister(&client_dev->dev);
+ }
+
+ list_del(&nt->entry);
+
+ if (list_empty(&ntb_transport_list))
+ bus_unregister(&ntb_bus_type);
+}
+
+static void ntb_client_release(struct device *dev)
+{
+ struct ntb_transport_client_dev *client_dev;
+ client_dev = container_of(dev, struct ntb_transport_client_dev, dev);
+
+ kfree(client_dev);
+}
+
+/**
+ * ntb_unregister_client_dev - Unregister NTB client device
+ * @device_name: Name of NTB client device
+ *
+ * Unregister an NTB client device with the NTB transport layer
+ */
+void ntb_unregister_client_dev(char *device_name)
+{
+ struct ntb_transport_client_dev *client, *cd;
+ struct ntb_transport *nt;
+
+ list_for_each_entry(nt, &ntb_transport_list, entry)
+ list_for_each_entry_safe(client, cd, &nt->client_devs, entry)
+ if (!strncmp(dev_name(&client->dev), device_name,
+ strlen(device_name))) {
+ list_del(&client->entry);
+ device_unregister(&client->dev);
+ }
+}
+EXPORT_SYMBOL_GPL(ntb_unregister_client_dev);
+
+/**
+ * ntb_register_client_dev - Register NTB client device
+ * @device_name: Name of NTB client device
+ *
+ * Register an NTB client device with the NTB transport layer
+ */
+int ntb_register_client_dev(char *device_name)
+{
+ struct ntb_transport_client_dev *client_dev;
+ struct ntb_transport *nt;
+ int rc;
+
+ if (list_empty(&ntb_transport_list))
+ return -ENODEV;
+
+ list_for_each_entry(nt, &ntb_transport_list, entry) {
+ struct device *dev;
+
+ client_dev = kzalloc(sizeof(struct ntb_transport_client_dev),
+ GFP_KERNEL);
+ if (!client_dev) {
+ rc = -ENOMEM;
+ goto err;
+ }
+
+ dev = &client_dev->dev;
+
+ /* setup and register client devices */
+ dev_set_name(dev, "%s", device_name);
+ dev->bus = &ntb_bus_type;
+ dev->release = ntb_client_release;
+ dev->parent = &ntb_query_pdev(nt->ndev)->dev;
+
+ rc = device_register(dev);
+ if (rc) {
+ kfree(client_dev);
+ goto err;
+ }
+
+ list_add_tail(&client_dev->entry, &nt->client_devs);
+ }
+
+ return 0;
+
+err:
+ ntb_unregister_client_dev(device_name);
+
+ return rc;
+}
+EXPORT_SYMBOL_GPL(ntb_register_client_dev);
+
+/**
+ * ntb_register_client - Register NTB client driver
+ * @drv: NTB client driver to be registered
+ *
+ * Register an NTB client driver with the NTB transport layer
+ *
+ * RETURNS: An appropriate -ERRNO error value on error, or zero for success.
+ */
+int ntb_register_client(struct ntb_client *drv)
+{
+ drv->driver.bus = &ntb_bus_type;
+
+ if (list_empty(&ntb_transport_list))
+ return -ENODEV;
+
+ return driver_register(&drv->driver);
+}
+EXPORT_SYMBOL_GPL(ntb_register_client);
+
+/**
+ * ntb_unregister_client - Unregister NTB client driver
+ * @drv: NTB client driver to be unregistered
+ *
+ * Unregister an NTB client driver with the NTB transport layer
+ *
+ * RETURNS: An appropriate -ERRNO error value on error, or zero for success.
+ */
+void ntb_unregister_client(struct ntb_client *drv)
+{
+ driver_unregister(&drv->driver);
+}
+EXPORT_SYMBOL_GPL(ntb_unregister_client);
+
+static ssize_t debugfs_read(struct file *filp, char __user *ubuf, size_t count,
+ loff_t *offp)
+{
+ struct ntb_transport_qp *qp;
+ char *buf;
+ ssize_t ret, out_offset, out_count;
+
+ out_count = 600;
+
+ buf = kmalloc(out_count, GFP_KERNEL);
+ if (!buf)
+ return -ENOMEM;
+
+ qp = filp->private_data;
+ out_offset = 0;
+ out_offset += snprintf(buf + out_offset, out_count - out_offset,
+ "NTB QP stats\n");
+ out_offset += snprintf(buf + out_offset, out_count - out_offset,
+ "rx_bytes - \t%llu\n", qp->rx_bytes);
+ out_offset += snprintf(buf + out_offset, out_count - out_offset,
+ "rx_pkts - \t%llu\n", qp->rx_pkts);
+ out_offset += snprintf(buf + out_offset, out_count - out_offset,
+ "rx_ring_empty - %llu\n", qp->rx_ring_empty);
+ out_offset += snprintf(buf + out_offset, out_count - out_offset,
+ "rx_err_no_buf - %llu\n", qp->rx_err_no_buf);
+ out_offset += snprintf(buf + out_offset, out_count - out_offset,
+ "rx_err_oflow - \t%llu\n", qp->rx_err_oflow);
+ out_offset += snprintf(buf + out_offset, out_count - out_offset,
+ "rx_err_ver - \t%llu\n", qp->rx_err_ver);
+ out_offset += snprintf(buf + out_offset, out_count - out_offset,
+ "rx_buff - \t%p\n", qp->rx_buff);
+ out_offset += snprintf(buf + out_offset, out_count - out_offset,
+ "rx_index - \t%u\n", qp->rx_index);
+ out_offset += snprintf(buf + out_offset, out_count - out_offset,
+ "rx_max_entry - \t%u\n", qp->rx_max_entry);
+
+ out_offset += snprintf(buf + out_offset, out_count - out_offset,
+ "tx_bytes - \t%llu\n", qp->tx_bytes);
+ out_offset += snprintf(buf + out_offset, out_count - out_offset,
+ "tx_pkts - \t%llu\n", qp->tx_pkts);
+ out_offset += snprintf(buf + out_offset, out_count - out_offset,
+ "tx_ring_full - \t%llu\n", qp->tx_ring_full);
+ out_offset += snprintf(buf + out_offset, out_count - out_offset,
+ "tx_mw - \t%p\n", qp->tx_mw);
+ out_offset += snprintf(buf + out_offset, out_count - out_offset,
+ "tx_index - \t%u\n", qp->tx_index);
+ out_offset += snprintf(buf + out_offset, out_count - out_offset,
+ "tx_max_entry - \t%u\n", qp->tx_max_entry);
+
+ out_offset += snprintf(buf + out_offset, out_count - out_offset,
+ "\nQP Link %s\n", (qp->qp_link == NTB_LINK_UP) ?
+ "Up" : "Down");
+ if (out_offset > out_count)
+ out_offset = out_count;
+
+ ret = simple_read_from_buffer(ubuf, count, offp, buf, out_offset);
+ kfree(buf);
+ return ret;
+}
+
+static const struct file_operations ntb_qp_debugfs_stats = {
+ .owner = THIS_MODULE,
+ .open = simple_open,
+ .read = debugfs_read,
+};
+
+static void ntb_list_add(spinlock_t *lock, struct list_head *entry,
+ struct list_head *list)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(lock, flags);
+ list_add_tail(entry, list);
+ spin_unlock_irqrestore(lock, flags);
+}
+
+static struct ntb_queue_entry *ntb_list_rm(spinlock_t *lock,
+ struct list_head *list)
+{
+ struct ntb_queue_entry *entry;
+ unsigned long flags;
+
+ spin_lock_irqsave(lock, flags);
+ if (list_empty(list)) {
+ entry = NULL;
+ goto out;
+ }
+ entry = list_first_entry(list, struct ntb_queue_entry, entry);
+ list_del(&entry->entry);
+out:
+ spin_unlock_irqrestore(lock, flags);
+
+ return entry;
+}
+
+static void ntb_transport_setup_qp_mw(struct ntb_transport *nt,
+ unsigned int qp_num)
+{
+ struct ntb_transport_qp *qp = &nt->qps[qp_num];
+ unsigned int rx_size, num_qps_mw;
+ u8 mw_num = QP_TO_MW(qp_num);
+ unsigned int i;
+
+ WARN_ON(nt->mw[mw_num].virt_addr == NULL);
+
+ if (nt->max_qps % NTB_NUM_MW && mw_num < nt->max_qps % NTB_NUM_MW)
+ num_qps_mw = nt->max_qps / NTB_NUM_MW + 1;
+ else
+ num_qps_mw = nt->max_qps / NTB_NUM_MW;
+
+ rx_size = (unsigned int) nt->mw[mw_num].size / num_qps_mw;
+ qp->remote_rx_info = nt->mw[mw_num].virt_addr +
+ (qp_num / NTB_NUM_MW * rx_size);
+ rx_size -= sizeof(struct ntb_rx_info);
+
+ qp->rx_buff = qp->remote_rx_info + sizeof(struct ntb_rx_info);
+ qp->rx_max_frame = min(transport_mtu, rx_size);
+ qp->rx_max_entry = rx_size / qp->rx_max_frame;
+ qp->rx_index = 0;
+
+ qp->remote_rx_info->entry = qp->rx_max_entry;
+
+ /* setup the hdr offsets with 0's */
+ for (i = 0; i < qp->rx_max_entry; i++) {
+ void *offset = qp->rx_buff + qp->rx_max_frame * (i + 1) -
+ sizeof(struct ntb_payload_header);
+ memset(offset, 0, sizeof(struct ntb_payload_header));
+ }
+
+ qp->rx_pkts = 0;
+ qp->tx_pkts = 0;
+}
+
+static int ntb_set_mw(struct ntb_transport *nt, int num_mw, unsigned int size)
+{
+ struct ntb_transport_mw *mw = &nt->mw[num_mw];
+ struct pci_dev *pdev = ntb_query_pdev(nt->ndev);
+
+ /* Alloc memory for receiving data. Must be 4k aligned */
+ mw->size = ALIGN(size, 4096);
+
+ mw->virt_addr = dma_alloc_coherent(&pdev->dev, mw->size, &mw->dma_addr,
+ GFP_KERNEL);
+ if (!mw->virt_addr) {
+ dev_err(&pdev->dev, "Unable to allocate MW buffer of size %d\n",
+ (int) mw->size);
+ return -ENOMEM;
+ }
+
+ /* Notify HW the memory location of the receive buffer */
+ ntb_set_mw_addr(nt->ndev, num_mw, mw->dma_addr);
+
+ return 0;
+}
+
+static void ntb_qp_link_cleanup(struct work_struct *work)
+{
+ struct ntb_transport_qp *qp = container_of(work,
+ struct ntb_transport_qp,
+ link_cleanup);
+ struct ntb_transport *nt = qp->transport;
+ struct pci_dev *pdev = ntb_query_pdev(nt->ndev);
+
+ if (qp->qp_link == NTB_LINK_DOWN) {
+ cancel_delayed_work_sync(&qp->link_work);
+ return;
+ }
+
+ if (qp->event_handler)
+ qp->event_handler(qp->cb_data, NTB_LINK_DOWN);
+
+ dev_info(&pdev->dev, "qp %d: Link Down\n", qp->qp_num);
+ qp->qp_link = NTB_LINK_DOWN;
+
+ if (nt->transport_link == NTB_LINK_UP)
+ schedule_delayed_work(&qp->link_work,
+ msecs_to_jiffies(NTB_LINK_DOWN_TIMEOUT));
+}
+
+static void ntb_qp_link_down(struct ntb_transport_qp *qp)
+{
+ schedule_work(&qp->link_cleanup);
+}
+
+static void ntb_transport_link_cleanup(struct work_struct *work)
+{
+ struct ntb_transport *nt = container_of(work, struct ntb_transport,
+ link_cleanup);
+ int i;
+
+ if (nt->transport_link == NTB_LINK_DOWN)
+ cancel_delayed_work_sync(&nt->link_work);
+ else
+ nt->transport_link = NTB_LINK_DOWN;
+
+ /* Pass along the info to any clients */
+ for (i = 0; i < nt->max_qps; i++)
+ if (!test_bit(i, &nt->qp_bitmap))
+ ntb_qp_link_down(&nt->qps[i]);
+
+ /* The scratchpad registers keep the values if the remote side
+ * goes down, blast them now to give them a sane value the next
+ * time they are accessed
+ */
+ for (i = 0; i < MAX_SPAD; i++)
+ ntb_write_local_spad(nt->ndev, i, 0);
+}
+
+static void ntb_transport_event_callback(void *data, enum ntb_hw_event event)
+{
+ struct ntb_transport *nt = data;
+
+ switch (event) {
+ case NTB_EVENT_HW_LINK_UP:
+ schedule_delayed_work(&nt->link_work, 0);
+ break;
+ case NTB_EVENT_HW_LINK_DOWN:
+ schedule_work(&nt->link_cleanup);
+ break;
+ default:
+ BUG();
+ }
+}
+
+static void ntb_transport_link_work(struct work_struct *work)
+{
+ struct ntb_transport *nt = container_of(work, struct ntb_transport,
+ link_work.work);
+ struct ntb_device *ndev = nt->ndev;
+ struct pci_dev *pdev = ntb_query_pdev(ndev);
+ u32 val;
+ int rc, i;
+
+ /* send the local info */
+ rc = ntb_write_remote_spad(ndev, VERSION, NTB_TRANSPORT_VERSION);
+ if (rc) {
+ dev_err(&pdev->dev, "Error writing %x to remote spad %d\n",
+ 0, VERSION);
+ goto out;
+ }
+
+ rc = ntb_write_remote_spad(ndev, MW0_SZ, ntb_get_mw_size(ndev, 0));
+ if (rc) {
+ dev_err(&pdev->dev, "Error writing %x to remote spad %d\n",
+ (u32) ntb_get_mw_size(ndev, 0), MW0_SZ);
+ goto out;
+ }
+
+ rc = ntb_write_remote_spad(ndev, MW1_SZ, ntb_get_mw_size(ndev, 1));
+ if (rc) {
+ dev_err(&pdev->dev, "Error writing %x to remote spad %d\n",
+ (u32) ntb_get_mw_size(ndev, 1), MW1_SZ);
+ goto out;
+ }
+
+ rc = ntb_write_remote_spad(ndev, NUM_QPS, nt->max_qps);
+ if (rc) {
+ dev_err(&pdev->dev, "Error writing %x to remote spad %d\n",
+ nt->max_qps, NUM_QPS);
+ goto out;
+ }
+
+ rc = ntb_read_local_spad(nt->ndev, QP_LINKS, &val);
+ if (rc) {
+ dev_err(&pdev->dev, "Error reading spad %d\n", QP_LINKS);
+ goto out;
+ }
+
+ rc = ntb_write_remote_spad(ndev, QP_LINKS, val);
+ if (rc) {
+ dev_err(&pdev->dev, "Error writing %x to remote spad %d\n",
+ val, QP_LINKS);
+ goto out;
+ }
+
+ /* Query the remote side for its info */
+ rc = ntb_read_remote_spad(ndev, VERSION, &val);
+ if (rc) {
+ dev_err(&pdev->dev, "Error reading remote spad %d\n", VERSION);
+ goto out;
+ }
+
+ if (val != NTB_TRANSPORT_VERSION)
+ goto out;
+ dev_dbg(&pdev->dev, "Remote version = %d\n", val);
+
+ rc = ntb_read_remote_spad(ndev, NUM_QPS, &val);
+ if (rc) {
+ dev_err(&pdev->dev, "Error reading remote spad %d\n", NUM_QPS);
+ goto out;
+ }
+
+ if (val != nt->max_qps)
+ goto out;
+ dev_dbg(&pdev->dev, "Remote max number of qps = %d\n", val);
+
+ rc = ntb_read_remote_spad(ndev, MW0_SZ, &val);
+ if (rc) {
+ dev_err(&pdev->dev, "Error reading remote spad %d\n", MW0_SZ);
+ goto out;
+ }
+
+ if (!val)
+ goto out;
+ dev_dbg(&pdev->dev, "Remote MW0 size = %d\n", val);
+
+ rc = ntb_set_mw(nt, 0, val);
+ if (rc)
+ goto out;
+
+ rc = ntb_read_remote_spad(ndev, MW1_SZ, &val);
+ if (rc) {
+ dev_err(&pdev->dev, "Error reading remote spad %d\n", MW1_SZ);
+ goto out;
+ }
+
+ if (!val)
+ goto out;
+ dev_dbg(&pdev->dev, "Remote MW1 size = %d\n", val);
+
+ rc = ntb_set_mw(nt, 1, val);
+ if (rc)
+ goto out;
+
+ nt->transport_link = NTB_LINK_UP;
+
+ for (i = 0; i < nt->max_qps; i++) {
+ struct ntb_transport_qp *qp = &nt->qps[i];
+
+ ntb_transport_setup_qp_mw(nt, i);
+
+ if (qp->client_ready == NTB_LINK_UP)
+ schedule_delayed_work(&qp->link_work, 0);
+ }
+
+ return;
+
+out:
+ if (ntb_hw_link_status(ndev))
+ schedule_delayed_work(&nt->link_work,
+ msecs_to_jiffies(NTB_LINK_DOWN_TIMEOUT));
+}
+
+static void ntb_qp_link_work(struct work_struct *work)
+{
+ struct ntb_transport_qp *qp = container_of(work,
+ struct ntb_transport_qp,
+ link_work.work);
+ struct pci_dev *pdev = ntb_query_pdev(qp->ndev);
+ struct ntb_transport *nt = qp->transport;
+ int rc, val;
+
+ WARN_ON(nt->transport_link != NTB_LINK_UP);
+
+ rc = ntb_read_local_spad(nt->ndev, QP_LINKS, &val);
+ if (rc) {
+ dev_err(&pdev->dev, "Error reading spad %d\n", QP_LINKS);
+ return;
+ }
+
+ rc = ntb_write_remote_spad(nt->ndev, QP_LINKS, val | 1 << qp->qp_num);
+ if (rc)
+ dev_err(&pdev->dev, "Error writing %x to remote spad %d\n",
+ val | 1 << qp->qp_num, QP_LINKS);
+
+ /* query remote spad for qp ready bits */
+ rc = ntb_read_remote_spad(nt->ndev, QP_LINKS, &val);
+ if (rc)
+ dev_err(&pdev->dev, "Error reading remote spad %d\n", QP_LINKS);
+
+ dev_dbg(&pdev->dev, "Remote QP link status = %x\n", val);
+
+ /* See if the remote side is up */
+ if (1 << qp->qp_num & val) {
+ qp->qp_link = NTB_LINK_UP;
+
+ dev_info(&pdev->dev, "qp %d: Link Up\n", qp->qp_num);
+ if (qp->event_handler)
+ qp->event_handler(qp->cb_data, NTB_LINK_UP);
+ } else if (nt->transport_link == NTB_LINK_UP)
+ schedule_delayed_work(&qp->link_work,
+ msecs_to_jiffies(NTB_LINK_DOWN_TIMEOUT));
+}
+
+static void ntb_transport_init_queue(struct ntb_transport *nt,
+ unsigned int qp_num)
+{
+ struct ntb_transport_qp *qp;
+ unsigned int num_qps_mw, tx_size;
+ u8 mw_num = QP_TO_MW(qp_num);
+
+ qp = &nt->qps[qp_num];
+ qp->qp_num = qp_num;
+ qp->transport = nt;
+ qp->ndev = nt->ndev;
+ qp->qp_link = NTB_LINK_DOWN;
+ qp->client_ready = NTB_LINK_DOWN;
+ qp->event_handler = NULL;
+
+ if (nt->max_qps % NTB_NUM_MW && mw_num < nt->max_qps % NTB_NUM_MW)
+ num_qps_mw = nt->max_qps / NTB_NUM_MW + 1;
+ else
+ num_qps_mw = nt->max_qps / NTB_NUM_MW;
+
+ tx_size = (unsigned int) ntb_get_mw_size(qp->ndev, mw_num) / num_qps_mw;
+ qp->rx_info = ntb_get_mw_vbase(nt->ndev, mw_num) +
+ (qp_num / NTB_NUM_MW * tx_size);
+ tx_size -= sizeof(struct ntb_rx_info);
+
+ qp->tx_mw = qp->rx_info + sizeof(struct ntb_rx_info);
+ qp->tx_max_frame = min(transport_mtu, tx_size);
+ qp->tx_max_entry = tx_size / qp->tx_max_frame;
+ qp->tx_index = 0;
+
+ if (nt->debugfs_dir) {
+ char debugfs_name[4];
+
+ snprintf(debugfs_name, 4, "qp%d", qp_num);
+ qp->debugfs_dir = debugfs_create_dir(debugfs_name,
+ nt->debugfs_dir);
+
+ qp->debugfs_stats = debugfs_create_file("stats", S_IRUSR,
+ qp->debugfs_dir, qp,
+ &ntb_qp_debugfs_stats);
+ }
+
+ INIT_DELAYED_WORK(&qp->link_work, ntb_qp_link_work);
+ INIT_WORK(&qp->link_cleanup, ntb_qp_link_cleanup);
+
+ spin_lock_init(&qp->ntb_rx_pend_q_lock);
+ spin_lock_init(&qp->ntb_rx_free_q_lock);
+ spin_lock_init(&qp->ntb_tx_free_q_lock);
+
+ INIT_LIST_HEAD(&qp->rx_pend_q);
+ INIT_LIST_HEAD(&qp->rx_free_q);
+ INIT_LIST_HEAD(&qp->tx_free_q);
+}
+
+int ntb_transport_init(struct pci_dev *pdev)
+{
+ struct ntb_transport *nt;
+ int rc, i;
+
+ nt = kzalloc(sizeof(struct ntb_transport), GFP_KERNEL);
+ if (!nt)
+ return -ENOMEM;
+
+ if (debugfs_initialized())
+ nt->debugfs_dir = debugfs_create_dir(KBUILD_MODNAME, NULL);
+ else
+ nt->debugfs_dir = NULL;
+
+ nt->ndev = ntb_register_transport(pdev, nt);
+ if (!nt->ndev) {
+ rc = -EIO;
+ goto err;
+ }
+
+ nt->max_qps = min(nt->ndev->max_cbs, max_num_clients);
+
+ nt->qps = kcalloc(nt->max_qps, sizeof(struct ntb_transport_qp),
+ GFP_KERNEL);
+ if (!nt->qps) {
+ rc = -ENOMEM;
+ goto err1;
+ }
+
+ nt->qp_bitmap = ((u64) 1 << nt->max_qps) - 1;
+
+ for (i = 0; i < nt->max_qps; i++)
+ ntb_transport_init_queue(nt, i);
+
+ INIT_DELAYED_WORK(&nt->link_work, ntb_transport_link_work);
+ INIT_WORK(&nt->link_cleanup, ntb_transport_link_cleanup);
+
+ rc = ntb_register_event_callback(nt->ndev,
+ ntb_transport_event_callback);
+ if (rc)
+ goto err2;
+
+ INIT_LIST_HEAD(&nt->client_devs);
+ rc = ntb_bus_init(nt);
+ if (rc)
+ goto err3;
+
+ if (ntb_hw_link_status(nt->ndev))
+ schedule_delayed_work(&nt->link_work, 0);
+
+ return 0;
+
+err3:
+ ntb_unregister_event_callback(nt->ndev);
+err2:
+ kfree(nt->qps);
+err1:
+ ntb_unregister_transport(nt->ndev);
+err:
+ debugfs_remove_recursive(nt->debugfs_dir);
+ kfree(nt);
+ return rc;
+}
+
+void ntb_transport_free(void *transport)
+{
+ struct ntb_transport *nt = transport;
+ struct pci_dev *pdev;
+ int i;
+
+ nt->transport_link = NTB_LINK_DOWN;
+
+ /* verify that all the qp's are freed */
+ for (i = 0; i < nt->max_qps; i++)
+ if (!test_bit(i, &nt->qp_bitmap))
+ ntb_transport_free_queue(&nt->qps[i]);
+
+ ntb_bus_remove(nt);
+
+ cancel_delayed_work_sync(&nt->link_work);
+
+ debugfs_remove_recursive(nt->debugfs_dir);
+
+ ntb_unregister_event_callback(nt->ndev);
+
+ pdev = ntb_query_pdev(nt->ndev);
+
+ for (i = 0; i < NTB_NUM_MW; i++)
+ if (nt->mw[i].virt_addr)
+ dma_free_coherent(&pdev->dev, nt->mw[i].size,
+ nt->mw[i].virt_addr,
+ nt->mw[i].dma_addr);
+
+ kfree(nt->qps);
+ ntb_unregister_transport(nt->ndev);
+ kfree(nt);
+}
+
+static void ntb_rx_copy_task(struct ntb_transport_qp *qp,
+ struct ntb_queue_entry *entry, void *offset)
+{
+ void *cb_data = entry->cb_data;
+ unsigned int len = entry->len;
+
+ memcpy(entry->buf, offset, entry->len);
+
+ ntb_list_add(&qp->ntb_rx_free_q_lock, &entry->entry, &qp->rx_free_q);
+
+ if (qp->rx_handler && qp->client_ready == NTB_LINK_UP)
+ qp->rx_handler(qp, qp->cb_data, cb_data, len);
+}
+
+static int ntb_process_rxc(struct ntb_transport_qp *qp)
+{
+ struct ntb_payload_header *hdr;
+ struct ntb_queue_entry *entry;
+ void *offset;
+
+ offset = qp->rx_buff + qp->rx_max_frame * qp->rx_index;
+ hdr = offset + qp->rx_max_frame - sizeof(struct ntb_payload_header);
+
+ entry = ntb_list_rm(&qp->ntb_rx_pend_q_lock, &qp->rx_pend_q);
+ if (!entry) {
+ dev_dbg(&ntb_query_pdev(qp->ndev)->dev,
+ "no buffer - HDR ver %u, len %d, flags %x\n",
+ hdr->ver, hdr->len, hdr->flags);
+ qp->rx_err_no_buf++;
+ return -ENOMEM;
+ }
+
+ if (!(hdr->flags & DESC_DONE_FLAG)) {
+ ntb_list_add(&qp->ntb_rx_pend_q_lock, &entry->entry,
+ &qp->rx_pend_q);
+ qp->rx_ring_empty++;
+ return -EAGAIN;
+ }
+
+ if (hdr->ver != (u32) qp->rx_pkts) {
+ dev_dbg(&ntb_query_pdev(qp->ndev)->dev,
+ "qp %d: version mismatch, expected %llu - got %u\n",
+ qp->qp_num, qp->rx_pkts, hdr->ver);
+ ntb_list_add(&qp->ntb_rx_pend_q_lock, &entry->entry,
+ &qp->rx_pend_q);
+ qp->rx_err_ver++;
+ return -EIO;
+ }
+
+ if (hdr->flags & LINK_DOWN_FLAG) {
+ ntb_qp_link_down(qp);
+
+ ntb_list_add(&qp->ntb_rx_pend_q_lock, &entry->entry,
+ &qp->rx_pend_q);
+ goto out;
+ }
+
+ dev_dbg(&ntb_query_pdev(qp->ndev)->dev,
+ "rx offset %u, ver %u - %d payload received, buf size %d\n",
+ qp->rx_index, hdr->ver, hdr->len, entry->len);
+
+ if (hdr->len <= entry->len) {
+ entry->len = hdr->len;
+ ntb_rx_copy_task(qp, entry, offset);
+ } else {
+ ntb_list_add(&qp->ntb_rx_pend_q_lock, &entry->entry,
+ &qp->rx_pend_q);
+
+ qp->rx_err_oflow++;
+ dev_dbg(&ntb_query_pdev(qp->ndev)->dev,
+ "RX overflow! Wanted %d got %d\n",
+ hdr->len, entry->len);
+ }
+
+ qp->rx_bytes += hdr->len;
+ qp->rx_pkts++;
+
+out:
+ /* Ensure that the data is fully copied out before clearing the flag */
+ wmb();
+ hdr->flags = 0;
+ iowrite32(qp->rx_index, &qp->rx_info->entry);
+
+ qp->rx_index++;
+ qp->rx_index %= qp->rx_max_entry;
+
+ return 0;
+}
+
+static void ntb_transport_rx(unsigned long data)
+{
+ struct ntb_transport_qp *qp = (struct ntb_transport_qp *)data;
+ int rc;
+
+ do {
+ rc = ntb_process_rxc(qp);
+ } while (!rc);
+}
+
+static void ntb_transport_rxc_db(void *data, int db_num)
+{
+ struct ntb_transport_qp *qp = data;
+
+ dev_dbg(&ntb_query_pdev(qp->ndev)->dev, "%s: doorbell %d received\n",
+ __func__, db_num);
+
+ tasklet_schedule(&qp->rx_work);
+}
+
+static void ntb_tx_copy_task(struct ntb_transport_qp *qp,
+ struct ntb_queue_entry *entry,
+ void __iomem *offset)
+{
+ struct ntb_payload_header __iomem *hdr;
+
+ memcpy_toio(offset, entry->buf, entry->len);
+
+ hdr = offset + qp->tx_max_frame - sizeof(struct ntb_payload_header);
+ iowrite32(entry->len, &hdr->len);
+ iowrite32((u32) qp->tx_pkts, &hdr->ver);
+
+ /* Ensure that the data is fully copied out before setting the flag */
+ wmb();
+ iowrite32(entry->flags | DESC_DONE_FLAG, &hdr->flags);
+
+ ntb_ring_sdb(qp->ndev, qp->qp_num);
+
+ /* The entry length can only be zero if the packet is intended to be a
+ * "link down" or similar. Since no payload is being sent in these
+ * cases, there is nothing to add to the completion queue.
+ */
+ if (entry->len > 0) {
+ qp->tx_bytes += entry->len;
+
+ if (qp->tx_handler)
+ qp->tx_handler(qp, qp->cb_data, entry->cb_data,
+ entry->len);
+ }
+
+ ntb_list_add(&qp->ntb_tx_free_q_lock, &entry->entry, &qp->tx_free_q);
+}
+
+static int ntb_process_tx(struct ntb_transport_qp *qp,
+ struct ntb_queue_entry *entry)
+{
+ void __iomem *offset;
+
+ offset = qp->tx_mw + qp->tx_max_frame * qp->tx_index;
+
+ dev_dbg(&ntb_query_pdev(qp->ndev)->dev, "%lld - offset %p, tx %u, entry len %d flags %x buff %p\n",
+ qp->tx_pkts, offset, qp->tx_index, entry->len, entry->flags,
+ entry->buf);
+ if (qp->tx_index == qp->remote_rx_info->entry) {
+ qp->tx_ring_full++;
+ return -EAGAIN;
+ }
+
+ if (entry->len > qp->tx_max_frame - sizeof(struct ntb_payload_header)) {
+ if (qp->tx_handler)
+ qp->tx_handler(qp->cb_data, qp, NULL, -EIO);
+
+ ntb_list_add(&qp->ntb_tx_free_q_lock, &entry->entry,
+ &qp->tx_free_q);
+ return 0;
+ }
+
+ ntb_tx_copy_task(qp, entry, offset);
+
+ qp->tx_index++;
+ qp->tx_index %= qp->tx_max_entry;
+
+ qp->tx_pkts++;
+
+ return 0;
+}
+
+static void ntb_send_link_down(struct ntb_transport_qp *qp)
+{
+ struct pci_dev *pdev = ntb_query_pdev(qp->ndev);
+ struct ntb_queue_entry *entry;
+ int i, rc;
+
+ if (qp->qp_link == NTB_LINK_DOWN)
+ return;
+
+ qp->qp_link = NTB_LINK_DOWN;
+ dev_info(&pdev->dev, "qp %d: Link Down\n", qp->qp_num);
+
+ for (i = 0; i < NTB_LINK_DOWN_TIMEOUT; i++) {
+ entry = ntb_list_rm(&qp->ntb_tx_free_q_lock, &qp->tx_free_q);
+ if (entry)
+ break;
+ msleep(100);
+ }
+
+ if (!entry)
+ return;
+
+ entry->cb_data = NULL;
+ entry->buf = NULL;
+ entry->len = 0;
+ entry->flags = LINK_DOWN_FLAG;
+
+ rc = ntb_process_tx(qp, entry);
+ if (rc)
+ dev_err(&pdev->dev, "ntb: QP%d unable to send linkdown msg\n",
+ qp->qp_num);
+}
+
+/**
+ * ntb_transport_create_queue - Create a new NTB transport layer queue
+ * @rx_handler: receive callback function
+ * @tx_handler: transmit callback function
+ * @event_handler: event callback function
+ *
+ * Create a new NTB transport layer queue and provide the queue with a callback
+ * routine for both transmit and receive. The receive callback routine will be
+ * used to pass up data when the transport has received it on the queue. The
+ * transmit callback routine will be called when the transport has completed the
+ * transmission of the data on the queue and the data is ready to be freed.
+ *
+ * RETURNS: pointer to newly created ntb_queue, NULL on error.
+ */
+struct ntb_transport_qp *
+ntb_transport_create_queue(void *data, struct pci_dev *pdev,
+ const struct ntb_queue_handlers *handlers)
+{
+ struct ntb_queue_entry *entry;
+ struct ntb_transport_qp *qp;
+ struct ntb_transport *nt;
+ unsigned int free_queue;
+ int rc, i;
+
+ nt = ntb_find_transport(pdev);
+ if (!nt)
+ goto err;
+
+ free_queue = ffs(nt->qp_bitmap);
+ if (!free_queue)
+ goto err;
+
+ /* decrement free_queue to make it zero based */
+ free_queue--;
+
+ clear_bit(free_queue, &nt->qp_bitmap);
+
+ qp = &nt->qps[free_queue];
+ qp->cb_data = data;
+ qp->rx_handler = handlers->rx_handler;
+ qp->tx_handler = handlers->tx_handler;
+ qp->event_handler = handlers->event_handler;
+
+ for (i = 0; i < NTB_QP_DEF_NUM_ENTRIES; i++) {
+ entry = kzalloc(sizeof(struct ntb_queue_entry), GFP_ATOMIC);
+ if (!entry)
+ goto err1;
+
+ ntb_list_add(&qp->ntb_rx_free_q_lock, &entry->entry,
+ &qp->rx_free_q);
+ }
+
+ for (i = 0; i < NTB_QP_DEF_NUM_ENTRIES; i++) {
+ entry = kzalloc(sizeof(struct ntb_queue_entry), GFP_ATOMIC);
+ if (!entry)
+ goto err2;
+
+ ntb_list_add(&qp->ntb_tx_free_q_lock, &entry->entry,
+ &qp->tx_free_q);
+ }
+
+ tasklet_init(&qp->rx_work, ntb_transport_rx, (unsigned long) qp);
+
+ rc = ntb_register_db_callback(qp->ndev, free_queue, qp,
+ ntb_transport_rxc_db);
+ if (rc)
+ goto err3;
+
+ dev_info(&pdev->dev, "NTB Transport QP %d created\n", qp->qp_num);
+
+ return qp;
+
+err3:
+ tasklet_disable(&qp->rx_work);
+err2:
+ while ((entry = ntb_list_rm(&qp->ntb_tx_free_q_lock, &qp->tx_free_q)))
+ kfree(entry);
+err1:
+ while ((entry = ntb_list_rm(&qp->ntb_rx_free_q_lock, &qp->rx_free_q)))
+ kfree(entry);
+ set_bit(free_queue, &nt->qp_bitmap);
+err:
+ return NULL;
+}
+EXPORT_SYMBOL_GPL(ntb_transport_create_queue);
+
+/**
+ * ntb_transport_free_queue - Frees NTB transport queue
+ * @qp: NTB queue to be freed
+ *
+ * Frees NTB transport queue
+ */
+void ntb_transport_free_queue(struct ntb_transport_qp *qp)
+{
+ struct pci_dev *pdev = ntb_query_pdev(qp->ndev);
+ struct ntb_queue_entry *entry;
+
+ if (!qp)
+ return;
+
+ cancel_delayed_work_sync(&qp->link_work);
+
+ ntb_unregister_db_callback(qp->ndev, qp->qp_num);
+ tasklet_disable(&qp->rx_work);
+
+ while ((entry = ntb_list_rm(&qp->ntb_rx_free_q_lock, &qp->rx_free_q)))
+ kfree(entry);
+
+ while ((entry = ntb_list_rm(&qp->ntb_rx_pend_q_lock, &qp->rx_pend_q))) {
+ dev_warn(&pdev->dev, "Freeing item from a non-empty queue\n");
+ kfree(entry);
+ }
+
+ while ((entry = ntb_list_rm(&qp->ntb_tx_free_q_lock, &qp->tx_free_q)))
+ kfree(entry);
+
+ set_bit(qp->qp_num, &qp->transport->qp_bitmap);
+
+ dev_info(&pdev->dev, "NTB Transport QP %d freed\n", qp->qp_num);
+}
+EXPORT_SYMBOL_GPL(ntb_transport_free_queue);
+
+/**
+ * ntb_transport_rx_remove - Dequeues enqueued rx packet
+ * @qp: NTB queue to be freed
+ * @len: pointer to variable to write enqueued buffers length
+ *
+ * Dequeues unused buffers from receive queue. Should only be used during
+ * shutdown of qp.
+ *
+ * RETURNS: NULL error value on error, or void* for success.
+ */
+void *ntb_transport_rx_remove(struct ntb_transport_qp *qp, unsigned int *len)
+{
+ struct ntb_queue_entry *entry;
+ void *buf;
+
+ if (!qp || qp->client_ready == NTB_LINK_UP)
+ return NULL;
+
+ entry = ntb_list_rm(&qp->ntb_rx_pend_q_lock, &qp->rx_pend_q);
+ if (!entry)
+ return NULL;
+
+ buf = entry->cb_data;
+ *len = entry->len;
+
+ ntb_list_add(&qp->ntb_rx_free_q_lock, &entry->entry, &qp->rx_free_q);
+
+ return buf;
+}
+EXPORT_SYMBOL_GPL(ntb_transport_rx_remove);
+
+/**
+ * ntb_transport_rx_enqueue - Enqueue a new NTB queue entry
+ * @qp: NTB transport layer queue the entry is to be enqueued on
+ * @cb: per buffer pointer for callback function to use
+ * @data: pointer to data buffer that incoming packets will be copied into
+ * @len: length of the data buffer
+ *
+ * Enqueue a new receive buffer onto the transport queue into which a NTB
+ * payload can be received into.
+ *
+ * RETURNS: An appropriate -ERRNO error value on error, or zero for success.
+ */
+int ntb_transport_rx_enqueue(struct ntb_transport_qp *qp, void *cb, void *data,
+ unsigned int len)
+{
+ struct ntb_queue_entry *entry;
+
+ if (!qp)
+ return -EINVAL;
+
+ entry = ntb_list_rm(&qp->ntb_rx_free_q_lock, &qp->rx_free_q);
+ if (!entry)
+ return -ENOMEM;
+
+ entry->cb_data = cb;
+ entry->buf = data;
+ entry->len = len;
+
+ ntb_list_add(&qp->ntb_rx_pend_q_lock, &entry->entry, &qp->rx_pend_q);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(ntb_transport_rx_enqueue);
+
+/**
+ * ntb_transport_tx_enqueue - Enqueue a new NTB queue entry
+ * @qp: NTB transport layer queue the entry is to be enqueued on
+ * @cb: per buffer pointer for callback function to use
+ * @data: pointer to data buffer that will be sent
+ * @len: length of the data buffer
+ *
+ * Enqueue a new transmit buffer onto the transport queue from which a NTB
+ * payload will be transmitted. This assumes that a lock is behing held to
+ * serialize access to the qp.
+ *
+ * RETURNS: An appropriate -ERRNO error value on error, or zero for success.
+ */
+int ntb_transport_tx_enqueue(struct ntb_transport_qp *qp, void *cb, void *data,
+ unsigned int len)
+{
+ struct ntb_queue_entry *entry;
+ int rc;
+
+ if (!qp || qp->qp_link != NTB_LINK_UP || !len)
+ return -EINVAL;
+
+ entry = ntb_list_rm(&qp->ntb_tx_free_q_lock, &qp->tx_free_q);
+ if (!entry)
+ return -ENOMEM;
+
+ entry->cb_data = cb;
+ entry->buf = data;
+ entry->len = len;
+ entry->flags = 0;
+
+ rc = ntb_process_tx(qp, entry);
+ if (rc)
+ ntb_list_add(&qp->ntb_tx_free_q_lock, &entry->entry,
+ &qp->tx_free_q);
+
+ return rc;
+}
+EXPORT_SYMBOL_GPL(ntb_transport_tx_enqueue);
+
+/**
+ * ntb_transport_link_up - Notify NTB transport of client readiness to use queue
+ * @qp: NTB transport layer queue to be enabled
+ *
+ * Notify NTB transport layer of client readiness to use queue
+ */
+void ntb_transport_link_up(struct ntb_transport_qp *qp)
+{
+ if (!qp)
+ return;
+
+ qp->client_ready = NTB_LINK_UP;
+
+ if (qp->transport->transport_link == NTB_LINK_UP)
+ schedule_delayed_work(&qp->link_work, 0);
+}
+EXPORT_SYMBOL_GPL(ntb_transport_link_up);
+
+/**
+ * ntb_transport_link_down - Notify NTB transport to no longer enqueue data
+ * @qp: NTB transport layer queue to be disabled
+ *
+ * Notify NTB transport layer of client's desire to no longer receive data on
+ * transport queue specified. It is the client's responsibility to ensure all
+ * entries on queue are purged or otherwise handled appropraitely.
+ */
+void ntb_transport_link_down(struct ntb_transport_qp *qp)
+{
+ struct pci_dev *pdev = ntb_query_pdev(qp->ndev);
+ int rc, val;
+
+ if (!qp)
+ return;
+
+ qp->client_ready = NTB_LINK_DOWN;
+
+ rc = ntb_read_local_spad(qp->ndev, QP_LINKS, &val);
+ if (rc) {
+ dev_err(&pdev->dev, "Error reading spad %d\n", QP_LINKS);
+ return;
+ }
+
+ rc = ntb_write_remote_spad(qp->ndev, QP_LINKS,
+ val & ~(1 << qp->qp_num));
+ if (rc)
+ dev_err(&pdev->dev, "Error writing %x to remote spad %d\n",
+ val & ~(1 << qp->qp_num), QP_LINKS);
+
+ if (qp->qp_link == NTB_LINK_UP)
+ ntb_send_link_down(qp);
+ else
+ cancel_delayed_work_sync(&qp->link_work);
+}
+EXPORT_SYMBOL_GPL(ntb_transport_link_down);
+
+/**
+ * ntb_transport_link_query - Query transport link state
+ * @qp: NTB transport layer queue to be queried
+ *
+ * Query connectivity to the remote system of the NTB transport queue
+ *
+ * RETURNS: true for link up or false for link down
+ */
+bool ntb_transport_link_query(struct ntb_transport_qp *qp)
+{
+ return qp->qp_link == NTB_LINK_UP;
+}
+EXPORT_SYMBOL_GPL(ntb_transport_link_query);
+
+/**
+ * ntb_transport_qp_num - Query the qp number
+ * @qp: NTB transport layer queue to be queried
+ *
+ * Query qp number of the NTB transport queue
+ *
+ * RETURNS: a zero based number specifying the qp number
+ */
+unsigned char ntb_transport_qp_num(struct ntb_transport_qp *qp)
+{
+ return qp->qp_num;
+}
+EXPORT_SYMBOL_GPL(ntb_transport_qp_num);
+
+/**
+ * ntb_transport_max_size - Query the max payload size of a qp
+ * @qp: NTB transport layer queue to be queried
+ *
+ * Query the maximum payload size permissible on the given qp
+ *
+ * RETURNS: the max payload size of a qp
+ */
+unsigned int ntb_transport_max_size(struct ntb_transport_qp *qp)
+{
+ return qp->tx_max_frame - sizeof(struct ntb_payload_header);
+}
+EXPORT_SYMBOL_GPL(ntb_transport_max_size);
diff --git a/drivers/of/address.c b/drivers/of/address.c
index 0125524c08c4..04da786c84d2 100644
--- a/drivers/of/address.c
+++ b/drivers/of/address.c
@@ -429,7 +429,7 @@ static u64 __of_translate_address(struct device_node *dev,
goto bail;
bus = of_match_bus(parent);
- /* Cound address cells & copy address locally */
+ /* Count address cells & copy address locally */
bus->count_cells(dev, &na, &ns);
if (!OF_CHECK_COUNTS(na, ns)) {
printk(KERN_ERR "prom_parse: Bad cell count for %s\n",
diff --git a/drivers/of/platform.c b/drivers/of/platform.c
index b80891b43816..e0a6514ab46c 100644
--- a/drivers/of/platform.c
+++ b/drivers/of/platform.c
@@ -436,6 +436,7 @@ EXPORT_SYMBOL(of_platform_bus_probe);
* of_platform_populate() - Populate platform_devices from device tree data
* @root: parent of the first level to probe or NULL for the root of the tree
* @matches: match table, NULL to use the default
+ * @lookup: auxdata table for matching id and platform_data with device nodes
* @parent: parent to hook devices from, NULL for toplevel
*
* Similar to of_platform_bus_probe(), this function walks the device tree
diff --git a/drivers/oprofile/oprofilefs.c b/drivers/oprofile/oprofilefs.c
index 849357c1045c..445ffda715ad 100644
--- a/drivers/oprofile/oprofilefs.c
+++ b/drivers/oprofile/oprofilefs.c
@@ -139,17 +139,22 @@ static int __oprofilefs_create_file(struct super_block *sb,
struct dentry *dentry;
struct inode *inode;
+ mutex_lock(&root->d_inode->i_mutex);
dentry = d_alloc_name(root, name);
- if (!dentry)
+ if (!dentry) {
+ mutex_unlock(&root->d_inode->i_mutex);
return -ENOMEM;
+ }
inode = oprofilefs_get_inode(sb, S_IFREG | perm);
if (!inode) {
dput(dentry);
+ mutex_unlock(&root->d_inode->i_mutex);
return -ENOMEM;
}
inode->i_fop = fops;
+ inode->i_private = priv;
d_add(dentry, inode);
- dentry->d_inode->i_private = priv;
+ mutex_unlock(&root->d_inode->i_mutex);
return 0;
}
@@ -212,17 +217,22 @@ struct dentry *oprofilefs_mkdir(struct super_block *sb,
struct dentry *dentry;
struct inode *inode;
+ mutex_lock(&root->d_inode->i_mutex);
dentry = d_alloc_name(root, name);
- if (!dentry)
+ if (!dentry) {
+ mutex_unlock(&root->d_inode->i_mutex);
return NULL;
+ }
inode = oprofilefs_get_inode(sb, S_IFDIR | 0755);
if (!inode) {
dput(dentry);
+ mutex_unlock(&root->d_inode->i_mutex);
return NULL;
}
inode->i_op = &simple_dir_inode_operations;
inode->i_fop = &simple_dir_operations;
d_add(dentry, inode);
+ mutex_unlock(&root->d_inode->i_mutex);
return dentry;
}
diff --git a/drivers/parisc/Kconfig b/drivers/parisc/Kconfig
index 620264936341..592de566e72f 100644
--- a/drivers/parisc/Kconfig
+++ b/drivers/parisc/Kconfig
@@ -128,6 +128,7 @@ config SUPERIO
config CHASSIS_LCD_LED
bool "Chassis LCD and LED support"
default y
+ select VM_EVENT_COUNTERS
help
Say Y here if you want to enable support for the Heartbeat,
Disk/Network activities LEDs on some PA-RISC machines,
diff --git a/drivers/parisc/dino.c b/drivers/parisc/dino.c
index 8e4e86b78428..9eae9834bcc7 100644
--- a/drivers/parisc/dino.c
+++ b/drivers/parisc/dino.c
@@ -580,15 +580,13 @@ dino_fixup_bus(struct pci_bus *bus)
}
- DBG("DEBUG %s assigning %d [0x%lx,0x%lx]\n",
+ DBG("DEBUG %s assigning %d [%pR]\n",
dev_name(&bus->self->dev), i,
- bus->self->resource[i].start,
- bus->self->resource[i].end);
+ &bus->self->resource[i]);
WARN_ON(pci_assign_resource(bus->self, i));
- DBG("DEBUG %s after assign %d [0x%lx,0x%lx]\n",
+ DBG("DEBUG %s after assign %d [%pR]\n",
dev_name(&bus->self->dev), i,
- bus->self->resource[i].start,
- bus->self->resource[i].end);
+ &bus->self->resource[i]);
}
}
@@ -772,8 +770,7 @@ dino_bridge_init(struct dino_device *dino_dev, const char *name)
result = ccio_request_resource(dino_dev->hba.dev, &res[i]);
if (result < 0) {
printk(KERN_ERR "%s: failed to claim PCI Bus address "
- "space %d (0x%lx-0x%lx)!\n", name, i,
- (unsigned long)res[i].start, (unsigned long)res[i].end);
+ "space %d (%pR)!\n", name, i, &res[i]);
return result;
}
}
diff --git a/drivers/parisc/hppb.c b/drivers/parisc/hppb.c
index 815db175d427..898208e4f302 100644
--- a/drivers/parisc/hppb.c
+++ b/drivers/parisc/hppb.c
@@ -74,10 +74,8 @@ static int hppb_probe(struct parisc_device *dev)
status = ccio_request_resource(dev, &card->mmio_region);
if(status < 0) {
- printk(KERN_ERR "%s: failed to claim HP-PB "
- "bus space (0x%08llx, 0x%08llx)\n",
- __FILE__, (unsigned long long) card->mmio_region.start,
- (unsigned long long) card->mmio_region.end);
+ printk(KERN_ERR "%s: failed to claim HP-PB bus space (%pR)\n",
+ __FILE__, &card->mmio_region);
}
return 0;
diff --git a/drivers/parisc/led.c b/drivers/parisc/led.c
index f2f501e5b6a0..d4d800c54d86 100644
--- a/drivers/parisc/led.c
+++ b/drivers/parisc/led.c
@@ -179,7 +179,7 @@ static int led_proc_open(struct inode *inode, struct file *file)
static ssize_t led_proc_write(struct file *file, const char *buf,
size_t count, loff_t *pos)
{
- void *data = PDE(file->f_path.dentry->d_inode)->data;
+ void *data = PDE(file_inode(file))->data;
char *cur, lbuf[32];
int d;
diff --git a/drivers/parisc/pdc_stable.c b/drivers/parisc/pdc_stable.c
index 246a92f677e4..0f54ab6260df 100644
--- a/drivers/parisc/pdc_stable.c
+++ b/drivers/parisc/pdc_stable.c
@@ -212,12 +212,10 @@ pdcspath_store(struct pdcspath_entry *entry)
entry, devpath, entry->addr);
/* addr, devpath and count must be word aligned */
- if (pdc_stable_write(entry->addr, devpath, sizeof(*devpath)) != PDC_OK) {
- printk(KERN_ERR "%s: an error occurred when writing to PDC.\n"
+ if (pdc_stable_write(entry->addr, devpath, sizeof(*devpath)) != PDC_OK)
+ WARN(1, KERN_ERR "%s: an error occurred when writing to PDC.\n"
"It is likely that the Stable Storage data has been corrupted.\n"
"Please check it carefully upon next reboot.\n", __func__);
- WARN_ON(1);
- }
/* kobject is already registered */
entry->ready = 2;
diff --git a/drivers/parisc/superio.c b/drivers/parisc/superio.c
index 5003458980d3..ac6e8e7a02df 100644
--- a/drivers/parisc/superio.c
+++ b/drivers/parisc/superio.c
@@ -274,7 +274,7 @@ superio_init(struct pci_dev *pcidev)
else
printk(KERN_ERR PFX "USB regulator not initialized!\n");
- if (request_irq(pdev->irq, superio_interrupt, IRQF_DISABLED,
+ if (request_irq(pdev->irq, superio_interrupt, 0,
SUPERIO, (void *)sio)) {
printk(KERN_ERR PFX "could not get irq\n");
diff --git a/drivers/parport/Kconfig b/drivers/parport/Kconfig
index 0e60438ebe30..24e12d4d1769 100644
--- a/drivers/parport/Kconfig
+++ b/drivers/parport/Kconfig
@@ -35,7 +35,7 @@ if PARPORT
config PARPORT_PC
tristate "PC-style hardware"
- depends on (!SPARC64 || PCI) && !SPARC32 && !M32R && !FRV && \
+ depends on (!SPARC64 || PCI) && !SPARC32 && !M32R && !FRV && !S390 && \
(!M68K || ISA) && !MN10300 && !AVR32 && !BLACKFIN && !XTENSA
---help---
You should say Y here if you have a PC-style parallel port. All
diff --git a/drivers/parport/parport_serial.c b/drivers/parport/parport_serial.c
index ef6169adb845..1b8bdb7e9bf4 100644
--- a/drivers/parport/parport_serial.c
+++ b/drivers/parport/parport_serial.c
@@ -63,6 +63,7 @@ enum parport_pc_pci_cards {
timedia_9079b,
timedia_9079c,
wch_ch353_2s1p,
+ sunix_2s1p,
};
/* each element directly indexed from enum list, above */
@@ -148,8 +149,12 @@ static struct parport_pc_pci cards[] = {
/* timedia_9079b */ { 1, { { 2, 3 }, } },
/* timedia_9079c */ { 1, { { 2, 3 }, } },
/* wch_ch353_2s1p*/ { 1, { { 2, -1}, } },
+ /* sunix_2s1p */ { 1, { { 3, -1 }, } },
};
+#define PCI_VENDOR_ID_SUNIX 0x1fd4
+#define PCI_DEVICE_ID_SUNIX_1999 0x1999
+
static struct pci_device_id parport_serial_pci_tbl[] = {
/* PCI cards */
{ PCI_VENDOR_ID_TITAN, PCI_DEVICE_ID_TITAN_110L,
@@ -246,8 +251,18 @@ static struct pci_device_id parport_serial_pci_tbl[] = {
{ 0x1409, 0x7168, 0x1409, 0xb079, 0, 0, timedia_9079a },
{ 0x1409, 0x7168, 0x1409, 0xc079, 0, 0, timedia_9079b },
{ 0x1409, 0x7168, 0x1409, 0xd079, 0, 0, timedia_9079c },
+
/* WCH CARDS */
{ 0x4348, 0x7053, 0x4348, 0x3253, 0, 0, wch_ch353_2s1p},
+
+ /*
+ * More SUNIX variations. At least one of these has part number
+ * '5079A but subdevice 0x102. That board reports 0x0708 as
+ * its PCI Class.
+ */
+ { PCI_VENDOR_ID_SUNIX, PCI_DEVICE_ID_SUNIX_1999, PCI_VENDOR_ID_SUNIX,
+ 0x0102, 0, 0, sunix_2s1p },
+
{ 0, } /* terminate list */
};
MODULE_DEVICE_TABLE(pci,parport_serial_pci_tbl);
@@ -470,6 +485,12 @@ static struct pciserial_board pci_parport_serial_boards[] = {
.base_baud = 115200,
.uart_offset = 8,
},
+ [sunix_2s1p] = {
+ .flags = FL_BASE0|FL_BASE_BARS,
+ .num_ports = 2,
+ .base_baud = 921600,
+ .uart_offset = 8,
+ },
};
struct parport_serial_private {
diff --git a/drivers/pci/access.c b/drivers/pci/access.c
index 3af0478c057b..1cc23661f79b 100644
--- a/drivers/pci/access.c
+++ b/drivers/pci/access.c
@@ -472,7 +472,7 @@ EXPORT_SYMBOL_GPL(pci_cfg_access_unlock);
static inline int pcie_cap_version(const struct pci_dev *dev)
{
- return dev->pcie_flags_reg & PCI_EXP_FLAGS_VERS;
+ return pcie_caps_reg(dev) & PCI_EXP_FLAGS_VERS;
}
static inline bool pcie_cap_has_devctl(const struct pci_dev *dev)
@@ -497,7 +497,7 @@ static inline bool pcie_cap_has_sltctl(const struct pci_dev *dev)
return pcie_cap_version(dev) > 1 ||
type == PCI_EXP_TYPE_ROOT_PORT ||
(type == PCI_EXP_TYPE_DOWNSTREAM &&
- dev->pcie_flags_reg & PCI_EXP_FLAGS_SLOT);
+ pcie_caps_reg(dev) & PCI_EXP_FLAGS_SLOT);
}
static inline bool pcie_cap_has_rtctl(const struct pci_dev *dev)
@@ -515,7 +515,7 @@ static bool pcie_capability_reg_implemented(struct pci_dev *dev, int pos)
return false;
switch (pos) {
- case PCI_EXP_FLAGS_TYPE:
+ case PCI_EXP_FLAGS:
return true;
case PCI_EXP_DEVCAP:
case PCI_EXP_DEVCTL:
diff --git a/drivers/pci/bus.c b/drivers/pci/bus.c
index ad6a8b635692..8647dc6f52d0 100644
--- a/drivers/pci/bus.c
+++ b/drivers/pci/bus.c
@@ -158,69 +158,38 @@ pci_bus_alloc_resource(struct pci_bus *bus, struct resource *res,
return ret;
}
+void __weak pcibios_resource_survey_bus(struct pci_bus *bus) { }
+
/**
- * pci_bus_add_device - add a single device
+ * pci_bus_add_device - start driver for a single device
* @dev: device to add
*
- * This adds a single pci device to the global
- * device list and adds sysfs and procfs entries
+ * This adds add sysfs entries and start device drivers
*/
int pci_bus_add_device(struct pci_dev *dev)
{
int retval;
- pci_fixup_device(pci_fixup_final, dev);
-
- retval = pcibios_add_device(dev);
- if (retval)
- return retval;
-
- retval = device_add(&dev->dev);
- if (retval)
- return retval;
-
- dev->is_added = 1;
- pci_proc_attach_device(dev);
+ /*
+ * Can not put in pci_device_add yet because resources
+ * are not assigned yet for some devices.
+ */
pci_create_sysfs_dev_files(dev);
- return 0;
-}
-
-/**
- * pci_bus_add_child - add a child bus
- * @bus: bus to add
- *
- * This adds sysfs entries for a single bus
- */
-int pci_bus_add_child(struct pci_bus *bus)
-{
- int retval;
-
- if (bus->bridge)
- bus->dev.parent = bus->bridge;
- retval = device_register(&bus->dev);
- if (retval)
- return retval;
+ dev->match_driver = true;
+ retval = device_attach(&dev->dev);
+ WARN_ON(retval < 0);
- bus->is_added = 1;
-
- /* Create legacy_io and legacy_mem files for this bus */
- pci_create_legacy_files(bus);
+ dev->is_added = 1;
- return retval;
+ return 0;
}
/**
- * pci_bus_add_devices - insert newly discovered PCI devices
+ * pci_bus_add_devices - start driver for PCI devices
* @bus: bus to check for new devices
*
- * Add newly discovered PCI devices (which are on the bus->devices
- * list) to the global PCI device list, add the sysfs and procfs
- * entries. Where a bridge is found, add the discovered bus to
- * the parents list of child buses, and recurse (breadth-first
- * to be compatible with 2.4)
- *
- * Call hotplug for each new devices.
+ * Start driver for PCI devices and add some sysfs entries.
*/
void pci_bus_add_devices(const struct pci_bus *bus)
{
@@ -233,36 +202,20 @@ void pci_bus_add_devices(const struct pci_bus *bus)
if (dev->is_added)
continue;
retval = pci_bus_add_device(dev);
- if (retval)
- dev_err(&dev->dev, "Error adding device, continuing\n");
}
list_for_each_entry(dev, &bus->devices, bus_list) {
BUG_ON(!dev->is_added);
child = dev->subordinate;
- /*
- * If there is an unattached subordinate bus, attach
- * it and then scan for unattached PCI devices.
- */
+
if (!child)
continue;
- if (list_empty(&child->node)) {
- down_write(&pci_bus_sem);
- list_add_tail(&child->node, &dev->bus->children);
- up_write(&pci_bus_sem);
- }
pci_bus_add_devices(child);
- /*
- * register the bus with sysfs as the parent is now
- * properly registered.
- */
if (child->is_added)
continue;
- retval = pci_bus_add_child(child);
- if (retval)
- dev_err(&dev->dev, "Error adding bus, continuing\n");
+ child->is_added = 1;
}
}
diff --git a/drivers/pci/hotplug/acpiphp.h b/drivers/pci/hotplug/acpiphp.h
index a1afb5b39ad4..b70ac00a117e 100644
--- a/drivers/pci/hotplug/acpiphp.h
+++ b/drivers/pci/hotplug/acpiphp.h
@@ -79,7 +79,6 @@ struct acpiphp_bridge {
/* Ejectable PCI-to-PCI bridge (PCI bridge and PCI function) */
struct acpiphp_func *func;
- int type;
int nr_slots;
u32 flags;
@@ -146,10 +145,6 @@ struct acpiphp_attention_info
/* PCI bus bridge HID */
#define ACPI_PCI_HOST_HID "PNP0A03"
-/* PCI BRIDGE type */
-#define BRIDGE_TYPE_HOST 0
-#define BRIDGE_TYPE_P2P 1
-
/* ACPI _STA method value (ignore bit 4; battery present) */
#define ACPI_STA_PRESENT (0x00000001)
#define ACPI_STA_ENABLED (0x00000002)
@@ -158,13 +153,7 @@ struct acpiphp_attention_info
#define ACPI_STA_ALL (0x0000000f)
/* bridge flags */
-#define BRIDGE_HAS_STA (0x00000001)
-#define BRIDGE_HAS_EJ0 (0x00000002)
-#define BRIDGE_HAS_HPP (0x00000004)
-#define BRIDGE_HAS_PS0 (0x00000010)
-#define BRIDGE_HAS_PS1 (0x00000020)
-#define BRIDGE_HAS_PS2 (0x00000040)
-#define BRIDGE_HAS_PS3 (0x00000080)
+#define BRIDGE_HAS_EJ0 (0x00000001)
/* slot flags */
@@ -193,7 +182,6 @@ extern void acpiphp_unregister_hotplug_slot(struct acpiphp_slot *slot);
/* acpiphp_glue.c */
extern int acpiphp_glue_init (void);
extern void acpiphp_glue_exit (void);
-extern int acpiphp_get_num_slots (void);
typedef int (*acpiphp_callback)(struct acpiphp_slot *slot, void *data);
extern int acpiphp_enable_slot (struct acpiphp_slot *slot);
diff --git a/drivers/pci/hotplug/acpiphp_core.c b/drivers/pci/hotplug/acpiphp_core.c
index 96316b74969f..c2fd3095701f 100644
--- a/drivers/pci/hotplug/acpiphp_core.c
+++ b/drivers/pci/hotplug/acpiphp_core.c
@@ -50,7 +50,6 @@
bool acpiphp_debug;
/* local variables */
-static int num_slots;
static struct acpiphp_attention_info *attention_info;
#define DRIVER_VERSION "0.5"
@@ -272,25 +271,6 @@ static int get_adapter_status(struct hotplug_slot *hotplug_slot, u8 *value)
return 0;
}
-static int __init init_acpi(void)
-{
- int retval;
-
- /* initialize internal data structure etc. */
- retval = acpiphp_glue_init();
-
- /* read initial number of slots */
- if (!retval) {
- num_slots = acpiphp_get_num_slots();
- if (num_slots == 0) {
- acpiphp_glue_exit();
- retval = -ENODEV;
- }
- }
-
- return retval;
-}
-
/**
* release_slot - free up the memory used by a slot
* @hotplug_slot: slot to free
@@ -379,7 +359,8 @@ static int __init acpiphp_init(void)
return 0;
/* read all the ACPI info from the system */
- return init_acpi();
+ /* initialize internal data structure etc. */
+ return acpiphp_glue_init();
}
diff --git a/drivers/pci/hotplug/acpiphp_glue.c b/drivers/pci/hotplug/acpiphp_glue.c
index a951c22921d1..270fdbadc19c 100644
--- a/drivers/pci/hotplug/acpiphp_glue.c
+++ b/drivers/pci/hotplug/acpiphp_glue.c
@@ -325,8 +325,8 @@ static void init_bridge_misc(struct acpiphp_bridge *bridge)
return;
}
- /* install notify handler */
- if (bridge->type != BRIDGE_TYPE_HOST) {
+ /* install notify handler for P2P bridges */
+ if (!pci_is_root_bus(bridge->pci_bus)) {
if ((bridge->flags & BRIDGE_HAS_EJ0) && bridge->func) {
status = acpi_remove_notify_handler(bridge->func->handle,
ACPI_SYSTEM_NOTIFY,
@@ -369,27 +369,12 @@ static struct acpiphp_func *acpiphp_bridge_handle_to_function(acpi_handle handle
static inline void config_p2p_bridge_flags(struct acpiphp_bridge *bridge)
{
acpi_handle dummy_handle;
+ struct acpiphp_func *func;
if (ACPI_SUCCESS(acpi_get_handle(bridge->handle,
- "_STA", &dummy_handle)))
- bridge->flags |= BRIDGE_HAS_STA;
-
- if (ACPI_SUCCESS(acpi_get_handle(bridge->handle,
- "_EJ0", &dummy_handle)))
+ "_EJ0", &dummy_handle))) {
bridge->flags |= BRIDGE_HAS_EJ0;
- if (ACPI_SUCCESS(acpi_get_handle(bridge->handle,
- "_PS0", &dummy_handle)))
- bridge->flags |= BRIDGE_HAS_PS0;
-
- if (ACPI_SUCCESS(acpi_get_handle(bridge->handle,
- "_PS3", &dummy_handle)))
- bridge->flags |= BRIDGE_HAS_PS3;
-
- /* is this ejectable p2p bridge? */
- if (bridge->flags & BRIDGE_HAS_EJ0) {
- struct acpiphp_func *func;
-
dbg("found ejectable p2p bridge\n");
/* make link between PCI bridge and PCI function */
@@ -412,7 +397,6 @@ static void add_host_bridge(struct acpi_pci_root *root)
if (bridge == NULL)
return;
- bridge->type = BRIDGE_TYPE_HOST;
bridge->handle = handle;
bridge->pci_bus = root->bus;
@@ -432,7 +416,6 @@ static void add_p2p_bridge(acpi_handle *handle)
return;
}
- bridge->type = BRIDGE_TYPE_P2P;
bridge->handle = handle;
config_p2p_bridge_flags(bridge);
@@ -543,13 +526,15 @@ static void cleanup_bridge(struct acpiphp_bridge *bridge)
acpi_status status;
acpi_handle handle = bridge->handle;
- status = acpi_remove_notify_handler(handle, ACPI_SYSTEM_NOTIFY,
+ if (!pci_is_root_bus(bridge->pci_bus)) {
+ status = acpi_remove_notify_handler(handle,
+ ACPI_SYSTEM_NOTIFY,
handle_hotplug_event_bridge);
- if (ACPI_FAILURE(status))
- err("failed to remove notify handler\n");
+ if (ACPI_FAILURE(status))
+ err("failed to remove notify handler\n");
+ }
- if ((bridge->type != BRIDGE_TYPE_HOST) &&
- ((bridge->flags & BRIDGE_HAS_EJ0) && bridge->func)) {
+ if ((bridge->flags & BRIDGE_HAS_EJ0) && bridge->func) {
status = acpi_install_notify_handler(bridge->func->handle,
ACPI_SYSTEM_NOTIFY,
handle_hotplug_event_func,
@@ -630,9 +615,6 @@ static void remove_bridge(struct acpi_pci_root *root)
bridge = acpiphp_handle_to_bridge(handle);
if (bridge)
cleanup_bridge(bridge);
- else
- acpi_remove_notify_handler(handle, ACPI_SYSTEM_NOTIFY,
- handle_hotplug_event_bridge);
}
static int power_on_slot(struct acpiphp_slot *slot)
@@ -793,6 +775,29 @@ static void acpiphp_set_acpi_region(struct acpiphp_slot *slot)
}
}
+static void check_hotplug_bridge(struct acpiphp_slot *slot, struct pci_dev *dev)
+{
+ struct acpiphp_func *func;
+
+ if (!dev->subordinate)
+ return;
+
+ /* quirk, or pcie could set it already */
+ if (dev->is_hotplug_bridge)
+ return;
+
+ if (PCI_SLOT(dev->devfn) != slot->device)
+ return;
+
+ list_for_each_entry(func, &slot->funcs, sibling) {
+ if (PCI_FUNC(dev->devfn) == func->function) {
+ /* check if this bridge has ejectable slots */
+ if ((detect_ejectable_slots(func->handle) > 0))
+ dev->is_hotplug_bridge = 1;
+ break;
+ }
+ }
+}
/**
* enable_device - enable, configure a slot
* @slot: slot to be enabled
@@ -812,6 +817,9 @@ static int __ref enable_device(struct acpiphp_slot *slot)
if (slot->flags & SLOT_ENABLED)
goto err_exit;
+ list_for_each_entry(func, &slot->funcs, sibling)
+ acpiphp_bus_add(func);
+
num = pci_scan_slot(bus, PCI_DEVFN(slot->device, 0));
if (num == 0) {
/* Maybe only part of funcs are added. */
@@ -827,15 +835,14 @@ static int __ref enable_device(struct acpiphp_slot *slot)
if (dev->hdr_type == PCI_HEADER_TYPE_BRIDGE ||
dev->hdr_type == PCI_HEADER_TYPE_CARDBUS) {
max = pci_scan_bridge(bus, dev, max, pass);
- if (pass && dev->subordinate)
+ if (pass && dev->subordinate) {
+ check_hotplug_bridge(slot, dev);
pci_bus_size_bridges(dev->subordinate);
+ }
}
}
}
- list_for_each_entry(func, &slot->funcs, sibling)
- acpiphp_bus_add(func);
-
pci_bus_assign_resources(bus);
acpiphp_sanitize_bus(bus);
acpiphp_set_hpp_values(bus);
@@ -1093,70 +1100,11 @@ static void acpiphp_sanitize_bus(struct pci_bus *bus)
}
}
-/* Program resources in newly inserted bridge */
-static int acpiphp_configure_bridge (acpi_handle handle)
-{
- struct pci_bus *bus;
-
- if (acpi_is_root_bridge(handle)) {
- struct acpi_pci_root *root = acpi_pci_find_root(handle);
- bus = root->bus;
- } else {
- struct pci_dev *pdev = acpi_get_pci_dev(handle);
- bus = pdev->subordinate;
- pci_dev_put(pdev);
- }
-
- pci_bus_size_bridges(bus);
- pci_bus_assign_resources(bus);
- acpiphp_sanitize_bus(bus);
- acpiphp_set_hpp_values(bus);
- pci_enable_bridges(bus);
- return 0;
-}
-
-static void handle_bridge_insertion(acpi_handle handle, u32 type)
-{
- struct acpi_device *device;
-
- if ((type != ACPI_NOTIFY_BUS_CHECK) &&
- (type != ACPI_NOTIFY_DEVICE_CHECK)) {
- err("unexpected notification type %d\n", type);
- return;
- }
-
- if (acpi_bus_scan(handle)) {
- err("cannot add bridge to acpi list\n");
- return;
- }
- if (acpi_bus_get_device(handle, &device)) {
- err("ACPI device object missing\n");
- return;
- }
- if (!acpiphp_configure_bridge(handle))
- add_bridge(handle);
- else
- err("cannot configure and start bridge\n");
-
-}
-
/*
* ACPI event handlers
*/
static acpi_status
-count_sub_bridges(acpi_handle handle, u32 lvl, void *context, void **rv)
-{
- int *count = (int *)context;
- struct acpiphp_bridge *bridge;
-
- bridge = acpiphp_handle_to_bridge(handle);
- if (bridge)
- (*count)++;
- return AE_OK ;
-}
-
-static acpi_status
check_sub_bridges(acpi_handle handle, u32 lvl, void *context, void **rv)
{
struct acpiphp_bridge *bridge;
@@ -1174,83 +1122,33 @@ check_sub_bridges(acpi_handle handle, u32 lvl, void *context, void **rv)
return AE_OK ;
}
-struct acpiphp_hp_work {
- struct work_struct work;
- acpi_handle handle;
- u32 type;
- void *context;
-};
-
-static void alloc_acpiphp_hp_work(acpi_handle handle, u32 type,
- void *context,
- void (*func)(struct work_struct *work))
-{
- struct acpiphp_hp_work *hp_work;
- int ret;
-
- hp_work = kmalloc(sizeof(*hp_work), GFP_KERNEL);
- if (!hp_work)
- return;
-
- hp_work->handle = handle;
- hp_work->type = type;
- hp_work->context = context;
-
- INIT_WORK(&hp_work->work, func);
- ret = queue_work(kacpi_hotplug_wq, &hp_work->work);
- if (!ret)
- kfree(hp_work);
-}
-
static void _handle_hotplug_event_bridge(struct work_struct *work)
{
struct acpiphp_bridge *bridge;
char objname[64];
struct acpi_buffer buffer = { .length = sizeof(objname),
.pointer = objname };
- struct acpi_device *device;
- int num_sub_bridges = 0;
- struct acpiphp_hp_work *hp_work;
+ struct acpi_hp_work *hp_work;
acpi_handle handle;
u32 type;
- hp_work = container_of(work, struct acpiphp_hp_work, work);
+ hp_work = container_of(work, struct acpi_hp_work, work);
handle = hp_work->handle;
type = hp_work->type;
+ bridge = (struct acpiphp_bridge *)hp_work->context;
acpi_scan_lock_acquire();
- if (acpi_bus_get_device(handle, &device)) {
- /* This bridge must have just been physically inserted */
- handle_bridge_insertion(handle, type);
- goto out;
- }
-
- bridge = acpiphp_handle_to_bridge(handle);
- if (type == ACPI_NOTIFY_BUS_CHECK) {
- acpi_walk_namespace(ACPI_TYPE_DEVICE, handle, ACPI_UINT32_MAX,
- count_sub_bridges, NULL, &num_sub_bridges, NULL);
- }
-
- if (!bridge && !num_sub_bridges) {
- err("cannot get bridge info\n");
- goto out;
- }
-
acpi_get_name(handle, ACPI_FULL_PATHNAME, &buffer);
switch (type) {
case ACPI_NOTIFY_BUS_CHECK:
/* bus re-enumerate */
dbg("%s: Bus check notify on %s\n", __func__, objname);
- if (bridge) {
- dbg("%s: re-enumerating slots under %s\n",
- __func__, objname);
- acpiphp_check_bridge(bridge);
- }
- if (num_sub_bridges)
- acpi_walk_namespace(ACPI_TYPE_DEVICE, handle,
- ACPI_UINT32_MAX, check_sub_bridges, NULL, NULL, NULL);
+ dbg("%s: re-enumerating slots under %s\n", __func__, objname);
+ acpiphp_check_bridge(bridge);
+ acpi_walk_namespace(ACPI_TYPE_DEVICE, handle,
+ ACPI_UINT32_MAX, check_sub_bridges, NULL, NULL, NULL);
break;
case ACPI_NOTIFY_DEVICE_CHECK:
@@ -1267,8 +1165,7 @@ static void _handle_hotplug_event_bridge(struct work_struct *work)
case ACPI_NOTIFY_EJECT_REQUEST:
/* request device eject */
dbg("%s: Device eject notify on %s\n", __func__, objname);
- if ((bridge->type != BRIDGE_TYPE_HOST) &&
- (bridge->flags & BRIDGE_HAS_EJ0)) {
+ if ((bridge->flags & BRIDGE_HAS_EJ0) && bridge->func) {
struct acpiphp_slot *slot;
slot = bridge->func->slot;
if (!acpiphp_disable_slot(slot))
@@ -1296,7 +1193,6 @@ static void _handle_hotplug_event_bridge(struct work_struct *work)
break;
}
-out:
acpi_scan_lock_release();
kfree(hp_work); /* allocated in handle_hotplug_event_bridge */
}
@@ -1320,8 +1216,7 @@ static void handle_hotplug_event_bridge(acpi_handle handle, u32 type,
* For now just re-add this work to the kacpi_hotplug_wq so we
* don't deadlock on hotplug actions.
*/
- alloc_acpiphp_hp_work(handle, type, context,
- _handle_hotplug_event_bridge);
+ alloc_acpi_hp_work(handle, type, context, _handle_hotplug_event_bridge);
}
static void _handle_hotplug_event_func(struct work_struct *work)
@@ -1330,22 +1225,19 @@ static void _handle_hotplug_event_func(struct work_struct *work)
char objname[64];
struct acpi_buffer buffer = { .length = sizeof(objname),
.pointer = objname };
- struct acpiphp_hp_work *hp_work;
+ struct acpi_hp_work *hp_work;
acpi_handle handle;
u32 type;
- void *context;
- hp_work = container_of(work, struct acpiphp_hp_work, work);
+ hp_work = container_of(work, struct acpi_hp_work, work);
handle = hp_work->handle;
type = hp_work->type;
- context = hp_work->context;
-
- acpi_get_name(handle, ACPI_FULL_PATHNAME, &buffer);
-
- func = (struct acpiphp_func *)context;
+ func = (struct acpiphp_func *)hp_work->context;
acpi_scan_lock_acquire();
+ acpi_get_name(handle, ACPI_FULL_PATHNAME, &buffer);
+
switch (type) {
case ACPI_NOTIFY_BUS_CHECK:
/* bus re-enumerate */
@@ -1399,23 +1291,7 @@ static void handle_hotplug_event_func(acpi_handle handle, u32 type,
* For now just re-add this work to the kacpi_hotplug_wq so we
* don't deadlock on hotplug actions.
*/
- alloc_acpiphp_hp_work(handle, type, context,
- _handle_hotplug_event_func);
-}
-
-static acpi_status
-find_root_bridges(acpi_handle handle, u32 lvl, void *context, void **rv)
-{
- int *count = (int *)context;
-
- if (!acpi_is_root_bridge(handle))
- return AE_OK;
-
- (*count)++;
- acpi_install_notify_handler(handle, ACPI_SYSTEM_NOTIFY,
- handle_hotplug_event_bridge, NULL);
-
- return AE_OK ;
+ alloc_acpi_hp_work(handle, type, context, _handle_hotplug_event_func);
}
static struct acpi_pci_driver acpi_pci_hp_driver = {
@@ -1428,15 +1304,7 @@ static struct acpi_pci_driver acpi_pci_hp_driver = {
*/
int __init acpiphp_glue_init(void)
{
- int num = 0;
-
- acpi_walk_namespace(ACPI_TYPE_DEVICE, ACPI_ROOT_OBJECT,
- ACPI_UINT32_MAX, find_root_bridges, NULL, &num, NULL);
-
- if (num <= 0)
- return -1;
- else
- acpi_pci_register_driver(&acpi_pci_hp_driver);
+ acpi_pci_register_driver(&acpi_pci_hp_driver);
return 0;
}
@@ -1452,28 +1320,6 @@ void acpiphp_glue_exit(void)
acpi_pci_unregister_driver(&acpi_pci_hp_driver);
}
-
-/**
- * acpiphp_get_num_slots - count number of slots in a system
- */
-int __init acpiphp_get_num_slots(void)
-{
- struct acpiphp_bridge *bridge;
- int num_slots = 0;
-
- list_for_each_entry(bridge, &bridge_list, list) {
- dbg("Bus %04x:%02x has %d slot%s\n",
- pci_domain_nr(bridge->pci_bus),
- bridge->pci_bus->number, bridge->nr_slots,
- bridge->nr_slots == 1 ? "" : "s");
- num_slots += bridge->nr_slots;
- }
-
- dbg("Total %d slots\n", num_slots);
- return num_slots;
-}
-
-
/**
* acpiphp_enable_slot - power on slot
* @slot: ACPI PHP slot
diff --git a/drivers/pci/hotplug/cpci_hotplug_pci.c b/drivers/pci/hotplug/cpci_hotplug_pci.c
index dcc75c785443..d8add34177f2 100644
--- a/drivers/pci/hotplug/cpci_hotplug_pci.c
+++ b/drivers/pci/hotplug/cpci_hotplug_pci.c
@@ -252,8 +252,8 @@ int cpci_led_off(struct slot* slot)
int __ref cpci_configure_slot(struct slot *slot)
{
+ struct pci_dev *dev;
struct pci_bus *parent;
- int fn;
dbg("%s - enter", __func__);
@@ -282,18 +282,13 @@ int __ref cpci_configure_slot(struct slot *slot)
}
parent = slot->dev->bus;
- for (fn = 0; fn < 8; fn++) {
- struct pci_dev *dev;
-
- dev = pci_get_slot(parent,
- PCI_DEVFN(PCI_SLOT(slot->devfn), fn));
- if (!dev)
+ list_for_each_entry(dev, &parent->devices, bus_list)
+ if (PCI_SLOT(dev->devfn) != PCI_SLOT(slot->devfn))
continue;
if ((dev->hdr_type == PCI_HEADER_TYPE_BRIDGE) ||
(dev->hdr_type == PCI_HEADER_TYPE_CARDBUS))
pci_hp_add_bridge(dev);
- pci_dev_put(dev);
- }
+
pci_assign_unassigned_bridge_resources(parent->self);
@@ -305,8 +300,7 @@ int __ref cpci_configure_slot(struct slot *slot)
int cpci_unconfigure_slot(struct slot* slot)
{
- int i;
- struct pci_dev *dev;
+ struct pci_dev *dev, *temp;
dbg("%s - enter", __func__);
if (!slot->dev) {
@@ -314,13 +308,12 @@ int cpci_unconfigure_slot(struct slot* slot)
return -ENODEV;
}
- for (i = 0; i < 8; i++) {
- dev = pci_get_slot(slot->bus,
- PCI_DEVFN(PCI_SLOT(slot->devfn), i));
- if (dev) {
- pci_stop_and_remove_bus_device(dev);
- pci_dev_put(dev);
- }
+ list_for_each_entry_safe(dev, temp, &slot->bus->devices, bus_list) {
+ if (PCI_SLOT(dev->devfn) != PCI_SLOT(slot->devfn))
+ continue;
+ pci_dev_get(dev);
+ pci_stop_and_remove_bus_device(dev);
+ pci_dev_put(dev);
}
pci_dev_put(slot->dev);
slot->dev = NULL;
diff --git a/drivers/pci/hotplug/cpqphp_ctrl.c b/drivers/pci/hotplug/cpqphp_ctrl.c
index 36112fe212d3..d282019cda5f 100644
--- a/drivers/pci/hotplug/cpqphp_ctrl.c
+++ b/drivers/pci/hotplug/cpqphp_ctrl.c
@@ -1900,8 +1900,7 @@ static void interrupt_event_handler(struct controller *ctrl)
dbg("power fault\n");
} else {
/* refresh notification */
- if (p_slot)
- update_slot_info(ctrl, p_slot);
+ update_slot_info(ctrl, p_slot);
}
ctrl->event_queue[loop].event_type = 0;
@@ -2520,44 +2519,28 @@ static int configure_new_function(struct controller *ctrl, struct pci_func *func
/* If we have IO resources copy them and fill in the bridge's
* IO range registers */
- if (io_node) {
- memcpy(hold_IO_node, io_node, sizeof(struct pci_resource));
- io_node->next = NULL;
+ memcpy(hold_IO_node, io_node, sizeof(struct pci_resource));
+ io_node->next = NULL;
- /* set IO base and Limit registers */
- temp_byte = io_node->base >> 8;
- rc = pci_bus_write_config_byte(pci_bus, devfn, PCI_IO_BASE, temp_byte);
+ /* set IO base and Limit registers */
+ temp_byte = io_node->base >> 8;
+ rc = pci_bus_write_config_byte(pci_bus, devfn, PCI_IO_BASE, temp_byte);
- temp_byte = (io_node->base + io_node->length - 1) >> 8;
- rc = pci_bus_write_config_byte(pci_bus, devfn, PCI_IO_LIMIT, temp_byte);
- } else {
- kfree(hold_IO_node);
- hold_IO_node = NULL;
- }
-
- /* If we have memory resources copy them and fill in the
- * bridge's memory range registers. Otherwise, fill in the
- * range registers with values that disable them. */
- if (mem_node) {
- memcpy(hold_mem_node, mem_node, sizeof(struct pci_resource));
- mem_node->next = NULL;
-
- /* set Mem base and Limit registers */
- temp_word = mem_node->base >> 16;
- rc = pci_bus_write_config_word(pci_bus, devfn, PCI_MEMORY_BASE, temp_word);
+ temp_byte = (io_node->base + io_node->length - 1) >> 8;
+ rc = pci_bus_write_config_byte(pci_bus, devfn, PCI_IO_LIMIT, temp_byte);
- temp_word = (mem_node->base + mem_node->length - 1) >> 16;
- rc = pci_bus_write_config_word(pci_bus, devfn, PCI_MEMORY_LIMIT, temp_word);
- } else {
- temp_word = 0xFFFF;
- rc = pci_bus_write_config_word(pci_bus, devfn, PCI_MEMORY_BASE, temp_word);
+ /* Copy the memory resources and fill in the bridge's memory
+ * range registers.
+ */
+ memcpy(hold_mem_node, mem_node, sizeof(struct pci_resource));
+ mem_node->next = NULL;
- temp_word = 0x0000;
- rc = pci_bus_write_config_word(pci_bus, devfn, PCI_MEMORY_LIMIT, temp_word);
+ /* set Mem base and Limit registers */
+ temp_word = mem_node->base >> 16;
+ rc = pci_bus_write_config_word(pci_bus, devfn, PCI_MEMORY_BASE, temp_word);
- kfree(hold_mem_node);
- hold_mem_node = NULL;
- }
+ temp_word = (mem_node->base + mem_node->length - 1) >> 16;
+ rc = pci_bus_write_config_word(pci_bus, devfn, PCI_MEMORY_LIMIT, temp_word);
memcpy(hold_p_mem_node, p_mem_node, sizeof(struct pci_resource));
p_mem_node->next = NULL;
@@ -2627,7 +2610,7 @@ static int configure_new_function(struct controller *ctrl, struct pci_func *func
/* Return unused bus resources
* First use the temporary node to store information for
* the board */
- if (hold_bus_node && bus_node && temp_resources.bus_head) {
+ if (bus_node && temp_resources.bus_head) {
hold_bus_node->length = bus_node->base - hold_bus_node->base;
hold_bus_node->next = func->bus_head;
@@ -2751,7 +2734,7 @@ static int configure_new_function(struct controller *ctrl, struct pci_func *func
}
/* If we have prefetchable memory space available and there
* is some left at the end, return the unused portion */
- if (hold_p_mem_node && temp_resources.p_mem_head) {
+ if (temp_resources.p_mem_head) {
p_mem_node = do_pre_bridge_resource_split(&(temp_resources.p_mem_head),
&hold_p_mem_node, 0x100000);
diff --git a/drivers/pci/hotplug/pciehp_core.c b/drivers/pci/hotplug/pciehp_core.c
index 939bd1d4b5b1..7d72c5e2eba9 100644
--- a/drivers/pci/hotplug/pciehp_core.c
+++ b/drivers/pci/hotplug/pciehp_core.c
@@ -293,7 +293,6 @@ static void pciehp_remove(struct pcie_device *dev)
#ifdef CONFIG_PM
static int pciehp_suspend (struct pcie_device *dev)
{
- dev_info(&dev->device, "%s ENTRY\n", __func__);
return 0;
}
@@ -303,7 +302,6 @@ static int pciehp_resume (struct pcie_device *dev)
struct slot *slot;
u8 status;
- dev_info(&dev->device, "%s ENTRY\n", __func__);
ctrl = get_service_data(dev);
/* reinitialize the chipset's event detection logic */
diff --git a/drivers/pci/hotplug/pciehp_pci.c b/drivers/pci/hotplug/pciehp_pci.c
index 09cecaf450c5..aac7a40e4a4a 100644
--- a/drivers/pci/hotplug/pciehp_pci.c
+++ b/drivers/pci/hotplug/pciehp_pci.c
@@ -39,7 +39,7 @@ int pciehp_configure_device(struct slot *p_slot)
struct pci_dev *dev;
struct pci_dev *bridge = p_slot->ctrl->pcie->port;
struct pci_bus *parent = bridge->subordinate;
- int num, fn;
+ int num;
struct controller *ctrl = p_slot->ctrl;
dev = pci_get_slot(parent, PCI_DEVFN(0, 0));
@@ -57,28 +57,18 @@ int pciehp_configure_device(struct slot *p_slot)
return -ENODEV;
}
- for (fn = 0; fn < 8; fn++) {
- dev = pci_get_slot(parent, PCI_DEVFN(0, fn));
- if (!dev)
- continue;
+ list_for_each_entry(dev, &parent->devices, bus_list)
if ((dev->hdr_type == PCI_HEADER_TYPE_BRIDGE) ||
(dev->hdr_type == PCI_HEADER_TYPE_CARDBUS))
pci_hp_add_bridge(dev);
- pci_dev_put(dev);
- }
pci_assign_unassigned_bridge_resources(bridge);
- for (fn = 0; fn < 8; fn++) {
- dev = pci_get_slot(parent, PCI_DEVFN(0, fn));
- if (!dev)
+ list_for_each_entry(dev, &parent->devices, bus_list) {
+ if ((dev->class >> 16) == PCI_BASE_CLASS_DISPLAY)
continue;
- if ((dev->class >> 16) == PCI_BASE_CLASS_DISPLAY) {
- pci_dev_put(dev);
- continue;
- }
+
pci_configure_slot(dev);
- pci_dev_put(dev);
}
pci_bus_add_devices(parent);
@@ -89,9 +79,9 @@ int pciehp_configure_device(struct slot *p_slot)
int pciehp_unconfigure_device(struct slot *p_slot)
{
int ret, rc = 0;
- int j;
u8 bctl = 0;
u8 presence = 0;
+ struct pci_dev *dev, *temp;
struct pci_bus *parent = p_slot->ctrl->pcie->port->subordinate;
u16 command;
struct controller *ctrl = p_slot->ctrl;
@@ -102,33 +92,31 @@ int pciehp_unconfigure_device(struct slot *p_slot)
if (ret)
presence = 0;
- for (j = 0; j < 8; j++) {
- struct pci_dev *temp = pci_get_slot(parent, PCI_DEVFN(0, j));
- if (!temp)
- continue;
- if (temp->hdr_type == PCI_HEADER_TYPE_BRIDGE && presence) {
- pci_read_config_byte(temp, PCI_BRIDGE_CONTROL, &bctl);
+ list_for_each_entry_safe(dev, temp, &parent->devices, bus_list) {
+ pci_dev_get(dev);
+ if (dev->hdr_type == PCI_HEADER_TYPE_BRIDGE && presence) {
+ pci_read_config_byte(dev, PCI_BRIDGE_CONTROL, &bctl);
if (bctl & PCI_BRIDGE_CTL_VGA) {
ctrl_err(ctrl,
"Cannot remove display device %s\n",
- pci_name(temp));
- pci_dev_put(temp);
+ pci_name(dev));
+ pci_dev_put(dev);
rc = -EINVAL;
break;
}
}
- pci_stop_and_remove_bus_device(temp);
+ pci_stop_and_remove_bus_device(dev);
/*
* Ensure that no new Requests will be generated from
* the device.
*/
if (presence) {
- pci_read_config_word(temp, PCI_COMMAND, &command);
+ pci_read_config_word(dev, PCI_COMMAND, &command);
command &= ~(PCI_COMMAND_MASTER | PCI_COMMAND_SERR);
command |= PCI_COMMAND_INTX_DISABLE;
- pci_write_config_word(temp, PCI_COMMAND, command);
+ pci_write_config_word(dev, PCI_COMMAND, command);
}
- pci_dev_put(temp);
+ pci_dev_put(dev);
}
return rc;
diff --git a/drivers/pci/hotplug/s390_pci_hpc.c b/drivers/pci/hotplug/s390_pci_hpc.c
index dee68e0698e1..7db249a25016 100644
--- a/drivers/pci/hotplug/s390_pci_hpc.c
+++ b/drivers/pci/hotplug/s390_pci_hpc.c
@@ -172,25 +172,6 @@ error:
return -ENOMEM;
}
-static int __init init_pci_slots(void)
-{
- struct zpci_dev *zdev;
- int device = 0;
-
- /*
- * Create a structure for each slot, and register that slot
- * with the pci_hotplug subsystem.
- */
- mutex_lock(&zpci_list_lock);
- list_for_each_entry(zdev, &zpci_list, entry) {
- init_pci_slot(zdev);
- device++;
- }
-
- mutex_unlock(&zpci_list_lock);
- return (device) ? 0 : -ENODEV;
-}
-
static void exit_pci_slot(struct zpci_dev *zdev)
{
struct list_head *tmp, *n;
@@ -205,6 +186,26 @@ static void exit_pci_slot(struct zpci_dev *zdev)
}
}
+static struct pci_hp_callback_ops hp_ops = {
+ .create_slot = init_pci_slot,
+ .remove_slot = exit_pci_slot,
+};
+
+static void __init init_pci_slots(void)
+{
+ struct zpci_dev *zdev;
+
+ /*
+ * Create a structure for each slot, and register that slot
+ * with the pci_hotplug subsystem.
+ */
+ mutex_lock(&zpci_list_lock);
+ list_for_each_entry(zdev, &zpci_list, entry) {
+ init_pci_slot(zdev);
+ }
+ mutex_unlock(&zpci_list_lock);
+}
+
static void __exit exit_pci_slots(void)
{
struct list_head *tmp, *n;
@@ -224,28 +225,19 @@ static void __exit exit_pci_slots(void)
static int __init pci_hotplug_s390_init(void)
{
- /*
- * Do specific initialization stuff for your driver here
- * like initializing your controller hardware (if any) and
- * determining the number of slots you have in the system
- * right now.
- */
-
- if (!pci_probe)
+ if (!s390_pci_probe)
return -EOPNOTSUPP;
- /* register callbacks for slot handling from arch code */
- mutex_lock(&zpci_list_lock);
- hotplug_ops.create_slot = init_pci_slot;
- hotplug_ops.remove_slot = exit_pci_slot;
- mutex_unlock(&zpci_list_lock);
- pr_info("registered hotplug slot callbacks\n");
- return init_pci_slots();
+ zpci_register_hp_ops(&hp_ops);
+ init_pci_slots();
+
+ return 0;
}
static void __exit pci_hotplug_s390_exit(void)
{
exit_pci_slots();
+ zpci_deregister_hp_ops();
}
module_init(pci_hotplug_s390_init);
diff --git a/drivers/pci/hotplug/sgi_hotplug.c b/drivers/pci/hotplug/sgi_hotplug.c
index 574421bc2fa6..b2781dfe60e9 100644
--- a/drivers/pci/hotplug/sgi_hotplug.c
+++ b/drivers/pci/hotplug/sgi_hotplug.c
@@ -334,7 +334,7 @@ static int enable_slot(struct hotplug_slot *bss_hotplug_slot)
struct slot *slot = bss_hotplug_slot->private;
struct pci_bus *new_bus = NULL;
struct pci_dev *dev;
- int func, num_funcs;
+ int num_funcs;
int new_ppb = 0;
int rc;
char *ssdt = NULL;
@@ -381,29 +381,26 @@ static int enable_slot(struct hotplug_slot *bss_hotplug_slot)
* to the Linux PCI interface and tell the drivers
* about them.
*/
- for (func = 0; func < num_funcs; func++) {
- dev = pci_get_slot(slot->pci_bus,
- PCI_DEVFN(slot->device_num + 1,
- PCI_FUNC(func)));
- if (dev) {
- /* Need to do slot fixup on PPB before fixup of children
- * (PPB's pcidev_info needs to be in pcidev_info list
- * before child's SN_PCIDEV_INFO() call to setup
- * pdi_host_pcidev_info).
- */
- pcibios_fixup_device_resources(dev);
- if (SN_ACPI_BASE_SUPPORT())
- sn_acpi_slot_fixup(dev);
- else
- sn_io_slot_fixup(dev);
- if (dev->hdr_type == PCI_HEADER_TYPE_BRIDGE) {
- pci_hp_add_bridge(dev);
- if (dev->subordinate) {
- new_bus = dev->subordinate;
- new_ppb = 1;
- }
+ list_for_each_entry(dev, &slot->pci_bus->devices, bus_list) {
+ if (PCI_SLOT(dev->devfn) != slot->device_num + 1)
+ continue;
+
+ /* Need to do slot fixup on PPB before fixup of children
+ * (PPB's pcidev_info needs to be in pcidev_info list
+ * before child's SN_PCIDEV_INFO() call to setup
+ * pdi_host_pcidev_info).
+ */
+ pcibios_fixup_device_resources(dev);
+ if (SN_ACPI_BASE_SUPPORT())
+ sn_acpi_slot_fixup(dev);
+ else
+ sn_io_slot_fixup(dev);
+ if (dev->hdr_type == PCI_HEADER_TYPE_BRIDGE) {
+ pci_hp_add_bridge(dev);
+ if (dev->subordinate) {
+ new_bus = dev->subordinate;
+ new_ppb = 1;
}
- pci_dev_put(dev);
}
}
@@ -483,8 +480,7 @@ static int enable_slot(struct hotplug_slot *bss_hotplug_slot)
static int disable_slot(struct hotplug_slot *bss_hotplug_slot)
{
struct slot *slot = bss_hotplug_slot->private;
- struct pci_dev *dev;
- int func;
+ struct pci_dev *dev, *temp;
int rc;
acpi_owner_id ssdt_id = 0;
@@ -545,15 +541,14 @@ static int disable_slot(struct hotplug_slot *bss_hotplug_slot)
}
/* Free the SN resources assigned to the Linux device.*/
- for (func = 0; func < 8; func++) {
- dev = pci_get_slot(slot->pci_bus,
- PCI_DEVFN(slot->device_num + 1,
- PCI_FUNC(func)));
- if (dev) {
- sn_bus_free_data(dev);
- pci_stop_and_remove_bus_device(dev);
- pci_dev_put(dev);
- }
+ list_for_each_entry_safe(dev, temp, &slot->pci_bus->devices, bus_list) {
+ if (PCI_SLOT(dev->devfn) != slot->device_num + 1)
+ continue;
+
+ pci_dev_get(dev);
+ sn_bus_free_data(dev);
+ pci_stop_and_remove_bus_device(dev);
+ pci_dev_put(dev);
}
/* Remove the SSDT for the slot from the ACPI namespace */
diff --git a/drivers/pci/hotplug/shpchp_pci.c b/drivers/pci/hotplug/shpchp_pci.c
index c627ed9957d1..b0e83132542e 100644
--- a/drivers/pci/hotplug/shpchp_pci.c
+++ b/drivers/pci/hotplug/shpchp_pci.c
@@ -40,7 +40,7 @@ int __ref shpchp_configure_device(struct slot *p_slot)
struct controller *ctrl = p_slot->ctrl;
struct pci_dev *bridge = ctrl->pci_dev;
struct pci_bus *parent = bridge->subordinate;
- int num, fn;
+ int num;
dev = pci_get_slot(parent, PCI_DEVFN(p_slot->device, 0));
if (dev) {
@@ -57,24 +57,20 @@ int __ref shpchp_configure_device(struct slot *p_slot)
return -ENODEV;
}
- for (fn = 0; fn < 8; fn++) {
- dev = pci_get_slot(parent, PCI_DEVFN(p_slot->device, fn));
- if (!dev)
+ list_for_each_entry(dev, &parent->devices, bus_list) {
+ if (PCI_SLOT(dev->devfn) != p_slot->device)
continue;
if ((dev->hdr_type == PCI_HEADER_TYPE_BRIDGE) ||
(dev->hdr_type == PCI_HEADER_TYPE_CARDBUS))
pci_hp_add_bridge(dev);
- pci_dev_put(dev);
}
pci_assign_unassigned_bridge_resources(bridge);
- for (fn = 0; fn < 8; fn++) {
- dev = pci_get_slot(parent, PCI_DEVFN(p_slot->device, fn));
- if (!dev)
+ list_for_each_entry(dev, &parent->devices, bus_list) {
+ if (PCI_SLOT(dev->devfn) != p_slot->device)
continue;
pci_configure_slot(dev);
- pci_dev_put(dev);
}
pci_bus_add_devices(parent);
@@ -85,32 +81,32 @@ int __ref shpchp_configure_device(struct slot *p_slot)
int shpchp_unconfigure_device(struct slot *p_slot)
{
int rc = 0;
- int j;
u8 bctl = 0;
struct pci_bus *parent = p_slot->ctrl->pci_dev->subordinate;
+ struct pci_dev *dev, *temp;
struct controller *ctrl = p_slot->ctrl;
ctrl_dbg(ctrl, "%s: domain:bus:dev = %04x:%02x:%02x\n",
__func__, pci_domain_nr(parent), p_slot->bus, p_slot->device);
- for (j = 0; j < 8 ; j++) {
- struct pci_dev *temp = pci_get_slot(parent,
- (p_slot->device << 3) | j);
- if (!temp)
+ list_for_each_entry_safe(dev, temp, &parent->devices, bus_list) {
+ if (PCI_SLOT(dev->devfn) != p_slot->device)
continue;
- if (temp->hdr_type == PCI_HEADER_TYPE_BRIDGE) {
- pci_read_config_byte(temp, PCI_BRIDGE_CONTROL, &bctl);
+
+ pci_dev_get(dev);
+ if (dev->hdr_type == PCI_HEADER_TYPE_BRIDGE) {
+ pci_read_config_byte(dev, PCI_BRIDGE_CONTROL, &bctl);
if (bctl & PCI_BRIDGE_CTL_VGA) {
ctrl_err(ctrl,
"Cannot remove display device %s\n",
- pci_name(temp));
- pci_dev_put(temp);
+ pci_name(dev));
+ pci_dev_put(dev);
rc = -EINVAL;
break;
}
}
- pci_stop_and_remove_bus_device(temp);
- pci_dev_put(temp);
+ pci_stop_and_remove_bus_device(dev);
+ pci_dev_put(dev);
}
return rc;
}
diff --git a/drivers/pci/iov.c b/drivers/pci/iov.c
index c18e5bf444fa..ee599f274f05 100644
--- a/drivers/pci/iov.c
+++ b/drivers/pci/iov.c
@@ -33,7 +33,6 @@ static inline u8 virtfn_devfn(struct pci_dev *dev, int id)
static struct pci_bus *virtfn_add_bus(struct pci_bus *bus, int busnr)
{
- int rc;
struct pci_bus *child;
if (bus->number == busnr)
@@ -48,12 +47,7 @@ static struct pci_bus *virtfn_add_bus(struct pci_bus *bus, int busnr)
return NULL;
pci_bus_insert_busn_res(child, busnr, busnr);
- child->dev.parent = bus->bridge;
- rc = pci_bus_add_child(child);
- if (rc) {
- pci_remove_bus(child);
- return NULL;
- }
+ bus->is_added = 1;
return child;
}
@@ -123,8 +117,6 @@ static int virtfn_add(struct pci_dev *dev, int id, int reset)
virtfn->is_virtfn = 1;
rc = pci_bus_add_device(virtfn);
- if (rc)
- goto failed1;
sprintf(buf, "virtfn%u", id);
rc = sysfs_create_link(&dev->dev.kobj, &virtfn->dev.kobj, buf);
if (rc)
diff --git a/drivers/pci/pci-acpi.c b/drivers/pci/pci-acpi.c
index e407c61559ca..39c937f9b426 100644
--- a/drivers/pci/pci-acpi.c
+++ b/drivers/pci/pci-acpi.c
@@ -302,48 +302,11 @@ static int acpi_pci_find_device(struct device *dev, acpi_handle *handle)
return 0;
}
-static int acpi_pci_find_root_bridge(struct device *dev, acpi_handle *handle)
-{
- int num;
- unsigned int seg, bus;
-
- /*
- * The string should be the same as root bridge's name
- * Please look at 'pci_scan_bus_parented'
- */
- num = sscanf(dev_name(dev), "pci%04x:%02x", &seg, &bus);
- if (num != 2)
- return -ENODEV;
- *handle = acpi_get_pci_rootbridge_handle(seg, bus);
- if (!*handle)
- return -ENODEV;
- return 0;
-}
-
static void pci_acpi_setup(struct device *dev)
{
struct pci_dev *pci_dev = to_pci_dev(dev);
acpi_handle handle = ACPI_HANDLE(dev);
struct acpi_device *adev;
- acpi_status status;
- acpi_handle dummy;
-
- /*
- * Evaluate and parse _PRT, if exists. This code allows parsing of
- * _PRT objects within the scope of non-bridge devices. Note that
- * _PRTs within the scope of a PCI bridge assume the bridge's
- * subordinate bus number.
- *
- * TBD: Can _PRTs exist within the scope of non-bridge PCI devices?
- */
- status = acpi_get_handle(handle, METHOD_NAME__PRT, &dummy);
- if (ACPI_SUCCESS(status)) {
- unsigned char bus;
-
- bus = pci_dev->subordinate ?
- pci_dev->subordinate->number : pci_dev->bus->number;
- acpi_pci_irq_add_prt(handle, pci_domain_nr(pci_dev->bus), bus);
- }
if (acpi_bus_get_device(handle, &adev) || !adev->wakeup.flags.valid)
return;
@@ -358,7 +321,6 @@ static void pci_acpi_setup(struct device *dev)
static void pci_acpi_cleanup(struct device *dev)
{
- struct pci_dev *pci_dev = to_pci_dev(dev);
acpi_handle handle = ACPI_HANDLE(dev);
struct acpi_device *adev;
@@ -367,16 +329,11 @@ static void pci_acpi_cleanup(struct device *dev)
device_set_run_wake(dev, false);
pci_acpi_remove_pm_notifier(adev);
}
-
- if (pci_dev->subordinate)
- acpi_pci_irq_del_prt(pci_domain_nr(pci_dev->bus),
- pci_dev->subordinate->number);
}
static struct acpi_bus_type acpi_pci_bus = {
.bus = &pci_bus_type,
.find_device = acpi_pci_find_device,
- .find_bridge = acpi_pci_find_root_bridge,
.setup = pci_acpi_setup,
.cleanup = pci_acpi_cleanup,
};
diff --git a/drivers/pci/pci-driver.c b/drivers/pci/pci-driver.c
index f79cbcd3944b..1fa1e482a999 100644
--- a/drivers/pci/pci-driver.c
+++ b/drivers/pci/pci-driver.c
@@ -392,7 +392,7 @@ static void pci_device_shutdown(struct device *dev)
* Turn off Bus Master bit on the device to tell it to not
* continue to do DMA
*/
- pci_disable_device(pci_dev);
+ pci_clear_master(pci_dev);
}
#ifdef CONFIG_PM
@@ -628,6 +628,7 @@ static int pci_pm_suspend(struct device *dev)
goto Fixup;
}
+ pci_dev->state_saved = false;
if (pm->suspend) {
pci_power_t prev = pci_dev->current_state;
int error;
@@ -774,6 +775,7 @@ static int pci_pm_freeze(struct device *dev)
return 0;
}
+ pci_dev->state_saved = false;
if (pm->freeze) {
int error;
@@ -862,6 +864,7 @@ static int pci_pm_poweroff(struct device *dev)
goto Fixup;
}
+ pci_dev->state_saved = false;
if (pm->poweroff) {
int error;
@@ -987,6 +990,7 @@ static int pci_pm_runtime_suspend(struct device *dev)
if (!pm || !pm->runtime_suspend)
return -ENOSYS;
+ pci_dev->state_saved = false;
pci_dev->no_d3cold = false;
error = pm->runtime_suspend(dev);
suspend_report_result(pm->runtime_suspend, error);
@@ -1186,9 +1190,13 @@ pci_dev_driver(const struct pci_dev *dev)
static int pci_bus_match(struct device *dev, struct device_driver *drv)
{
struct pci_dev *pci_dev = to_pci_dev(dev);
- struct pci_driver *pci_drv = to_pci_driver(drv);
+ struct pci_driver *pci_drv;
const struct pci_device_id *found_id;
+ if (!pci_dev->match_driver)
+ return 0;
+
+ pci_drv = to_pci_driver(drv);
found_id = pci_match_device(pci_drv, pci_dev);
if (found_id)
return 1;
diff --git a/drivers/pci/pci.c b/drivers/pci/pci.c
index 0c4f641b7be1..b099e0025d2b 100644
--- a/drivers/pci/pci.c
+++ b/drivers/pci/pci.c
@@ -842,9 +842,8 @@ static struct pci_cap_saved_state *pci_find_saved_cap(
struct pci_dev *pci_dev, char cap)
{
struct pci_cap_saved_state *tmp;
- struct hlist_node *pos;
- hlist_for_each_entry(tmp, pos, &pci_dev->saved_cap_space, next) {
+ hlist_for_each_entry(tmp, &pci_dev->saved_cap_space, next) {
if (tmp->cap.cap_nr == cap)
return tmp;
}
@@ -1041,7 +1040,6 @@ struct pci_saved_state *pci_store_saved_state(struct pci_dev *dev)
struct pci_saved_state *state;
struct pci_cap_saved_state *tmp;
struct pci_cap_saved_data *cap;
- struct hlist_node *pos;
size_t size;
if (!dev->state_saved)
@@ -1049,7 +1047,7 @@ struct pci_saved_state *pci_store_saved_state(struct pci_dev *dev)
size = sizeof(*state) + sizeof(struct pci_cap_saved_data);
- hlist_for_each_entry(tmp, pos, &dev->saved_cap_space, next)
+ hlist_for_each_entry(tmp, &dev->saved_cap_space, next)
size += sizeof(struct pci_cap_saved_data) + tmp->cap.size;
state = kzalloc(size, GFP_KERNEL);
@@ -1060,7 +1058,7 @@ struct pci_saved_state *pci_store_saved_state(struct pci_dev *dev)
sizeof(state->config_space));
cap = state->cap;
- hlist_for_each_entry(tmp, pos, &dev->saved_cap_space, next) {
+ hlist_for_each_entry(tmp, &dev->saved_cap_space, next) {
size_t len = sizeof(struct pci_cap_saved_data) + tmp->cap.size;
memcpy(cap, &tmp->cap, len);
cap = (struct pci_cap_saved_data *)((u8 *)cap + len);
@@ -1151,8 +1149,7 @@ int pci_reenable_device(struct pci_dev *dev)
return 0;
}
-static int __pci_enable_device_flags(struct pci_dev *dev,
- resource_size_t flags)
+static int pci_enable_device_flags(struct pci_dev *dev, unsigned long flags)
{
int err;
int i, bars = 0;
@@ -1169,7 +1166,7 @@ static int __pci_enable_device_flags(struct pci_dev *dev,
dev->current_state = (pmcsr & PCI_PM_CTRL_STATE_MASK);
}
- if (atomic_add_return(1, &dev->enable_cnt) > 1)
+ if (atomic_inc_return(&dev->enable_cnt) > 1)
return 0; /* already enabled */
/* only skip sriov related */
@@ -1196,7 +1193,7 @@ static int __pci_enable_device_flags(struct pci_dev *dev,
*/
int pci_enable_device_io(struct pci_dev *dev)
{
- return __pci_enable_device_flags(dev, IORESOURCE_IO);
+ return pci_enable_device_flags(dev, IORESOURCE_IO);
}
/**
@@ -1209,7 +1206,7 @@ int pci_enable_device_io(struct pci_dev *dev)
*/
int pci_enable_device_mem(struct pci_dev *dev)
{
- return __pci_enable_device_flags(dev, IORESOURCE_MEM);
+ return pci_enable_device_flags(dev, IORESOURCE_MEM);
}
/**
@@ -1225,7 +1222,7 @@ int pci_enable_device_mem(struct pci_dev *dev)
*/
int pci_enable_device(struct pci_dev *dev)
{
- return __pci_enable_device_flags(dev, IORESOURCE_MEM | IORESOURCE_IO);
+ return pci_enable_device_flags(dev, IORESOURCE_MEM | IORESOURCE_IO);
}
/*
@@ -1396,7 +1393,10 @@ pci_disable_device(struct pci_dev *dev)
if (dr)
dr->enabled = 0;
- if (atomic_sub_return(1, &dev->enable_cnt) != 0)
+ dev_WARN_ONCE(&dev->dev, atomic_read(&dev->enable_cnt) <= 0,
+ "disabling already-disabled device");
+
+ if (atomic_dec_return(&dev->enable_cnt) != 0)
return;
do_pci_disable_device(dev);
@@ -2036,17 +2036,20 @@ void pci_allocate_cap_save_buffers(struct pci_dev *dev)
void pci_free_cap_save_buffers(struct pci_dev *dev)
{
struct pci_cap_saved_state *tmp;
- struct hlist_node *pos, *n;
+ struct hlist_node *n;
- hlist_for_each_entry_safe(tmp, pos, n, &dev->saved_cap_space, next)
+ hlist_for_each_entry_safe(tmp, n, &dev->saved_cap_space, next)
kfree(tmp);
}
/**
- * pci_enable_ari - enable ARI forwarding if hardware support it
+ * pci_configure_ari - enable or disable ARI forwarding
* @dev: the PCI device
+ *
+ * If @dev and its upstream bridge both support ARI, enable ARI in the
+ * bridge. Otherwise, disable ARI in the bridge.
*/
-void pci_enable_ari(struct pci_dev *dev)
+void pci_configure_ari(struct pci_dev *dev)
{
u32 cap;
struct pci_dev *bridge;
@@ -2054,9 +2057,6 @@ void pci_enable_ari(struct pci_dev *dev)
if (pcie_ari_disabled || !pci_is_pcie(dev) || dev->devfn)
return;
- if (!pci_find_ext_capability(dev, PCI_EXT_CAP_ID_ARI))
- return;
-
bridge = dev->bus->self;
if (!bridge)
return;
@@ -2065,8 +2065,15 @@ void pci_enable_ari(struct pci_dev *dev)
if (!(cap & PCI_EXP_DEVCAP2_ARI))
return;
- pcie_capability_set_word(bridge, PCI_EXP_DEVCTL2, PCI_EXP_DEVCTL2_ARI);
- bridge->ari_enabled = 1;
+ if (pci_find_ext_capability(dev, PCI_EXT_CAP_ID_ARI)) {
+ pcie_capability_set_word(bridge, PCI_EXP_DEVCTL2,
+ PCI_EXP_DEVCTL2_ARI);
+ bridge->ari_enabled = 1;
+ } else {
+ pcie_capability_clear_word(bridge, PCI_EXP_DEVCTL2,
+ PCI_EXP_DEVCTL2_ARI);
+ bridge->ari_enabled = 0;
+ }
}
/**
@@ -3742,18 +3749,6 @@ resource_size_t pci_specified_resource_alignment(struct pci_dev *dev)
return align;
}
-/**
- * pci_is_reassigndev - check if specified PCI is target device to reassign
- * @dev: the PCI device to check
- *
- * RETURNS: non-zero for PCI device is a target device to reassign,
- * or zero is not.
- */
-int pci_is_reassigndev(struct pci_dev *dev)
-{
- return (pci_specified_resource_alignment(dev) != 0);
-}
-
/*
* This function disables memory decoding and releases memory resources
* of the device specified by kernel's boot parameter 'pci=resource_alignment='.
@@ -3768,7 +3763,9 @@ void pci_reassigndev_resource_alignment(struct pci_dev *dev)
resource_size_t align, size;
u16 command;
- if (!pci_is_reassigndev(dev))
+ /* check if specified PCI is target device to reassign */
+ align = pci_specified_resource_alignment(dev);
+ if (!align)
return;
if (dev->hdr_type == PCI_HEADER_TYPE_NORMAL &&
@@ -3784,7 +3781,6 @@ void pci_reassigndev_resource_alignment(struct pci_dev *dev)
command &= ~PCI_COMMAND_MEMORY;
pci_write_config_word(dev, PCI_COMMAND, command);
- align = pci_specified_resource_alignment(dev);
for (i = 0; i < PCI_BRIDGE_RESOURCES; i++) {
r = &dev->resource[i];
if (!(r->flags & IORESOURCE_MEM))
diff --git a/drivers/pci/pci.h b/drivers/pci/pci.h
index adfd172c5b9b..7346ee68f47d 100644
--- a/drivers/pci/pci.h
+++ b/drivers/pci/pci.h
@@ -203,8 +203,8 @@ extern int __pci_read_base(struct pci_dev *dev, enum pci_bar_type type,
struct resource *res, unsigned int reg);
extern int pci_resource_bar(struct pci_dev *dev, int resno,
enum pci_bar_type *type);
-extern int pci_bus_add_child(struct pci_bus *bus);
-extern void pci_enable_ari(struct pci_dev *dev);
+extern void pci_configure_ari(struct pci_dev *dev);
+
/**
* pci_ari_enabled - query ARI forwarding status
* @bus: the PCI bus
diff --git a/drivers/pci/pcie/aspm.c b/drivers/pci/pcie/aspm.c
index 8474b6a4fc9b..d320df6375a2 100644
--- a/drivers/pci/pcie/aspm.c
+++ b/drivers/pci/pcie/aspm.c
@@ -556,6 +556,9 @@ void pcie_aspm_init_link_state(struct pci_dev *pdev)
struct pcie_link_state *link;
int blacklist = !!pcie_aspm_sanity_check(pdev);
+ if (!aspm_support_enabled)
+ return;
+
if (!pci_is_pcie(pdev) || pdev->link_state)
return;
if (pci_pcie_type(pdev) != PCI_EXP_TYPE_ROOT_PORT &&
@@ -634,10 +637,7 @@ void pcie_aspm_exit_link_state(struct pci_dev *pdev)
struct pci_dev *parent = pdev->bus->self;
struct pcie_link_state *link, *root, *parent_link;
- if (!pci_is_pcie(pdev) || !parent || !parent->link_state)
- return;
- if ((pci_pcie_type(parent) != PCI_EXP_TYPE_ROOT_PORT) &&
- (pci_pcie_type(parent) != PCI_EXP_TYPE_DOWNSTREAM))
+ if (!parent || !parent->link_state)
return;
down_read(&pci_bus_sem);
diff --git a/drivers/pci/pcie/portdrv_core.c b/drivers/pci/pcie/portdrv_core.c
index b42133afca98..31063ac30992 100644
--- a/drivers/pci/pcie/portdrv_core.c
+++ b/drivers/pci/pcie/portdrv_core.c
@@ -272,7 +272,7 @@ static int get_port_device_capability(struct pci_dev *dev)
/* Hot-Plug Capable */
if ((cap_mask & PCIE_PORT_SERVICE_HP) &&
- dev->pcie_flags_reg & PCI_EXP_FLAGS_SLOT) {
+ pcie_caps_reg(dev) & PCI_EXP_FLAGS_SLOT) {
pcie_capability_read_dword(dev, PCI_EXP_SLTCAP, &reg32);
if (reg32 & PCI_EXP_SLTCAP_HPC) {
services |= PCIE_PORT_SERVICE_HP;
diff --git a/drivers/pci/probe.c b/drivers/pci/probe.c
index 2dcd22d9c816..b494066ef32f 100644
--- a/drivers/pci/probe.c
+++ b/drivers/pci/probe.c
@@ -623,6 +623,7 @@ static struct pci_bus *pci_alloc_child_bus(struct pci_bus *parent,
{
struct pci_bus *child;
int i;
+ int ret;
/*
* Allocate a new bus, and inherit stuff from the parent..
@@ -637,8 +638,7 @@ static struct pci_bus *pci_alloc_child_bus(struct pci_bus *parent,
child->bus_flags = parent->bus_flags;
/* initialize some portions of the bus device, but don't register it
- * now as the parent is not properly set up yet. This device will get
- * registered later in pci_bus_add_devices()
+ * now as the parent is not properly set up yet.
*/
child->dev.class = &pcibus_class;
dev_set_name(&child->dev, "%04x:%02x", pci_domain_nr(child), busnr);
@@ -651,11 +651,14 @@ static struct pci_bus *pci_alloc_child_bus(struct pci_bus *parent,
child->primary = parent->busn_res.start;
child->busn_res.end = 0xff;
- if (!bridge)
- return child;
+ if (!bridge) {
+ child->dev.parent = parent->bridge;
+ goto add_dev;
+ }
child->self = bridge;
child->bridge = get_device(&bridge->dev);
+ child->dev.parent = child->bridge;
pci_set_bus_of_node(child);
pci_set_bus_speed(child);
@@ -666,6 +669,13 @@ static struct pci_bus *pci_alloc_child_bus(struct pci_bus *parent,
}
bridge->subordinate = child;
+add_dev:
+ ret = device_register(&child->dev);
+ WARN_ON(ret < 0);
+
+ /* Create legacy_io and legacy_mem files for this bus */
+ pci_create_legacy_files(child);
+
return child;
}
@@ -1285,7 +1295,7 @@ static void pci_init_capabilities(struct pci_dev *dev)
pci_vpd_pci22_init(dev);
/* Alternative Routing-ID Forwarding */
- pci_enable_ari(dev);
+ pci_configure_ari(dev);
/* Single Root I/O Virtualization */
pci_iov_init(dev);
@@ -1296,10 +1306,12 @@ static void pci_init_capabilities(struct pci_dev *dev)
void pci_device_add(struct pci_dev *dev, struct pci_bus *bus)
{
+ int ret;
+
device_initialize(&dev->dev);
dev->dev.release = pci_release_dev;
- pci_dev_get(dev);
+ set_dev_node(&dev->dev, pcibus_to_node(bus));
dev->dev.dma_mask = &dev->dma_mask;
dev->dev.dma_parms = &dev->dma_parms;
dev->dev.coherent_dma_mask = 0xffffffffull;
@@ -1326,6 +1338,17 @@ void pci_device_add(struct pci_dev *dev, struct pci_bus *bus)
down_write(&pci_bus_sem);
list_add_tail(&dev->bus_list, &bus->devices);
up_write(&pci_bus_sem);
+
+ pci_fixup_device(pci_fixup_final, dev);
+ ret = pcibios_add_device(dev);
+ WARN_ON(ret < 0);
+
+ /* Notifier could use PCI capabilities */
+ dev->match_driver = false;
+ ret = device_add(&dev->dev);
+ WARN_ON(ret < 0);
+
+ pci_proc_attach_device(dev);
}
struct pci_dev *__ref pci_scan_single_device(struct pci_bus *bus, int devfn)
@@ -1348,31 +1371,31 @@ struct pci_dev *__ref pci_scan_single_device(struct pci_bus *bus, int devfn)
}
EXPORT_SYMBOL(pci_scan_single_device);
-static unsigned next_ari_fn(struct pci_dev *dev, unsigned fn)
+static unsigned next_fn(struct pci_bus *bus, struct pci_dev *dev, unsigned fn)
{
- u16 cap;
- unsigned pos, next_fn;
+ int pos;
+ u16 cap = 0;
+ unsigned next_fn;
- if (!dev)
- return 0;
+ if (pci_ari_enabled(bus)) {
+ if (!dev)
+ return 0;
+ pos = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_ARI);
+ if (!pos)
+ return 0;
- pos = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_ARI);
- if (!pos)
- return 0;
- pci_read_config_word(dev, pos + 4, &cap);
- next_fn = cap >> 8;
- if (next_fn <= fn)
- return 0;
- return next_fn;
-}
+ pci_read_config_word(dev, pos + PCI_ARI_CAP, &cap);
+ next_fn = PCI_ARI_CAP_NFN(cap);
+ if (next_fn <= fn)
+ return 0; /* protect against malformed list */
-static unsigned next_trad_fn(struct pci_dev *dev, unsigned fn)
-{
- return (fn + 1) % 8;
-}
+ return next_fn;
+ }
+
+ /* dev may be NULL for non-contiguous multifunction devices */
+ if (!dev || dev->multifunction)
+ return (fn + 1) % 8;
-static unsigned no_next_fn(struct pci_dev *dev, unsigned fn)
-{
return 0;
}
@@ -1405,7 +1428,6 @@ int pci_scan_slot(struct pci_bus *bus, int devfn)
{
unsigned fn, nr = 0;
struct pci_dev *dev;
- unsigned (*next_fn)(struct pci_dev *, unsigned) = no_next_fn;
if (only_one_child(bus) && (devfn > 0))
return 0; /* Already scanned the entire slot */
@@ -1416,12 +1438,7 @@ int pci_scan_slot(struct pci_bus *bus, int devfn)
if (!dev->is_added)
nr++;
- if (pci_ari_enabled(bus))
- next_fn = next_ari_fn;
- else if (dev->multifunction)
- next_fn = next_trad_fn;
-
- for (fn = next_fn(dev, 0); fn > 0; fn = next_fn(dev, fn)) {
+ for (fn = next_fn(bus, dev, 0); fn > 0; fn = next_fn(bus, dev, fn)) {
dev = pci_scan_single_device(bus, devfn + fn);
if (dev) {
if (!dev->is_added)
@@ -1632,6 +1649,18 @@ unsigned int pci_scan_child_bus(struct pci_bus *bus)
return max;
}
+/**
+ * pcibios_root_bridge_prepare - Platform-specific host bridge setup.
+ * @bridge: Host bridge to set up.
+ *
+ * Default empty implementation. Replace with an architecture-specific setup
+ * routine, if necessary.
+ */
+int __weak pcibios_root_bridge_prepare(struct pci_host_bridge *bridge)
+{
+ return 0;
+}
+
struct pci_bus *pci_create_root_bus(struct device *parent, int bus,
struct pci_ops *ops, void *sysdata, struct list_head *resources)
{
@@ -1644,13 +1673,13 @@ struct pci_bus *pci_create_root_bus(struct device *parent, int bus,
char bus_addr[64];
char *fmt;
-
b = pci_alloc_bus();
if (!b)
return NULL;
b->sysdata = sysdata;
b->ops = ops;
+ b->number = b->busn_res.start = bus;
b2 = pci_find_bus(pci_domain_nr(b), bus);
if (b2) {
/* If we already got to this bus through a different bridge, ignore it */
@@ -1665,6 +1694,10 @@ struct pci_bus *pci_create_root_bus(struct device *parent, int bus,
bridge->dev.parent = parent;
bridge->dev.release = pci_release_bus_bridge_dev;
dev_set_name(&bridge->dev, "pci%04x:%02x", pci_domain_nr(b), bus);
+ error = pcibios_root_bridge_prepare(bridge);
+ if (error)
+ goto bridge_dev_reg_err;
+
error = device_register(&bridge->dev);
if (error)
goto bridge_dev_reg_err;
@@ -1685,8 +1718,6 @@ struct pci_bus *pci_create_root_bus(struct device *parent, int bus,
/* Create legacy_io and legacy_mem files for this bus */
pci_create_legacy_files(b);
- b->number = b->busn_res.start = bus;
-
if (parent)
dev_info(parent, "PCI host bridge to bus %s\n", dev_name(&b->dev));
else
diff --git a/drivers/pci/proc.c b/drivers/pci/proc.c
index 9b8505ccc56d..0b009470e6db 100644
--- a/drivers/pci/proc.c
+++ b/drivers/pci/proc.c
@@ -21,7 +21,7 @@ static loff_t
proc_bus_pci_lseek(struct file *file, loff_t off, int whence)
{
loff_t new = -1;
- struct inode *inode = file->f_path.dentry->d_inode;
+ struct inode *inode = file_inode(file);
mutex_lock(&inode->i_mutex);
switch (whence) {
@@ -46,7 +46,7 @@ proc_bus_pci_lseek(struct file *file, loff_t off, int whence)
static ssize_t
proc_bus_pci_read(struct file *file, char __user *buf, size_t nbytes, loff_t *ppos)
{
- const struct inode *ino = file->f_path.dentry->d_inode;
+ const struct inode *ino = file_inode(file);
const struct proc_dir_entry *dp = PDE(ino);
struct pci_dev *dev = dp->data;
unsigned int pos = *ppos;
@@ -132,7 +132,7 @@ proc_bus_pci_read(struct file *file, char __user *buf, size_t nbytes, loff_t *pp
static ssize_t
proc_bus_pci_write(struct file *file, const char __user *buf, size_t nbytes, loff_t *ppos)
{
- struct inode *ino = file->f_path.dentry->d_inode;
+ struct inode *ino = file_inode(file);
const struct proc_dir_entry *dp = PDE(ino);
struct pci_dev *dev = dp->data;
int pos = *ppos;
@@ -212,7 +212,7 @@ struct pci_filp_private {
static long proc_bus_pci_ioctl(struct file *file, unsigned int cmd,
unsigned long arg)
{
- const struct proc_dir_entry *dp = PDE(file->f_dentry->d_inode);
+ const struct proc_dir_entry *dp = PDE(file_inode(file));
struct pci_dev *dev = dp->data;
#ifdef HAVE_PCI_MMAP
struct pci_filp_private *fpriv = file->private_data;
@@ -253,7 +253,7 @@ static long proc_bus_pci_ioctl(struct file *file, unsigned int cmd,
#ifdef HAVE_PCI_MMAP
static int proc_bus_pci_mmap(struct file *file, struct vm_area_struct *vma)
{
- struct inode *inode = file->f_path.dentry->d_inode;
+ struct inode *inode = file_inode(file);
const struct proc_dir_entry *dp = PDE(inode);
struct pci_dev *dev = dp->data;
struct pci_filp_private *fpriv = file->private_data;
diff --git a/drivers/pci/remove.c b/drivers/pci/remove.c
index 84954a726a94..cc875e6ed159 100644
--- a/drivers/pci/remove.c
+++ b/drivers/pci/remove.c
@@ -24,7 +24,7 @@ static void pci_stop_dev(struct pci_dev *dev)
if (dev->is_added) {
pci_proc_detach_device(dev);
pci_remove_sysfs_dev_files(dev);
- device_unregister(&dev->dev);
+ device_del(&dev->dev);
dev->is_added = 0;
}
@@ -39,7 +39,7 @@ static void pci_destroy_dev(struct pci_dev *dev)
up_write(&pci_bus_sem);
pci_free_resources(dev);
- pci_dev_put(dev);
+ put_device(&dev->dev);
}
void pci_remove_bus(struct pci_bus *bus)
diff --git a/drivers/pci/search.c b/drivers/pci/search.c
index bf969ba58e59..d0627fa9f368 100644
--- a/drivers/pci/search.c
+++ b/drivers/pci/search.c
@@ -319,13 +319,13 @@ int pci_dev_present(const struct pci_device_id *ids)
WARN_ON(in_interrupt());
while (ids->vendor || ids->subvendor || ids->class_mask) {
found = pci_get_dev_by_id(ids, NULL);
- if (found)
- goto exit;
+ if (found) {
+ pci_dev_put(found);
+ return 1;
+ }
ids++;
}
-exit:
- if (found)
- return 1;
+
return 0;
}
EXPORT_SYMBOL(pci_dev_present);
diff --git a/drivers/pci/setup-bus.c b/drivers/pci/setup-bus.c
index 6d3591d57ea0..7e8739e25b9e 100644
--- a/drivers/pci/setup-bus.c
+++ b/drivers/pci/setup-bus.c
@@ -283,7 +283,7 @@ static void assign_requested_resources_sorted(struct list_head *head,
idx = res - &dev_res->dev->resource[0];
if (resource_size(res) &&
pci_assign_resource(dev_res->dev, idx)) {
- if (fail_head && !pci_is_root_bus(dev_res->dev->bus)) {
+ if (fail_head) {
/*
* if the failed res is for ROM BAR, and it will
* be enabled later, don't add it to the list
diff --git a/drivers/pcmcia/cs.c b/drivers/pcmcia/cs.c
index 673c14ea11e3..5292db69c426 100644
--- a/drivers/pcmcia/cs.c
+++ b/drivers/pcmcia/cs.c
@@ -484,7 +484,7 @@ static int socket_early_resume(struct pcmcia_socket *skt)
static int socket_late_resume(struct pcmcia_socket *skt)
{
- int ret;
+ int ret = 0;
mutex_lock(&skt->ops_mutex);
skt->state &= ~SOCKET_SUSPEND;
@@ -511,19 +511,31 @@ static int socket_late_resume(struct pcmcia_socket *skt)
return socket_insert(skt);
}
+ if (!(skt->state & SOCKET_CARDBUS) && (skt->callback))
+ ret = skt->callback->early_resume(skt);
+ return ret;
+}
+
+/*
+ * Finalize the resume. In case of a cardbus socket, we have
+ * to rebind the devices as we can't be certain that it has been
+ * replaced, or not.
+ */
+static int socket_complete_resume(struct pcmcia_socket *skt)
+{
+ int ret = 0;
#ifdef CONFIG_CARDBUS
if (skt->state & SOCKET_CARDBUS) {
/* We can't be sure the CardBus card is the same
* as the one previously inserted. Therefore, remove
* and re-add... */
cb_free(skt);
- cb_alloc(skt);
- return 0;
+ ret = cb_alloc(skt);
+ if (ret)
+ cb_free(skt);
}
#endif
- if (!(skt->state & SOCKET_CARDBUS) && (skt->callback))
- skt->callback->early_resume(skt);
- return 0;
+ return ret;
}
/*
@@ -533,11 +545,15 @@ static int socket_late_resume(struct pcmcia_socket *skt)
*/
static int socket_resume(struct pcmcia_socket *skt)
{
+ int err;
if (!(skt->state & SOCKET_SUSPEND))
return -EBUSY;
socket_early_resume(skt);
- return socket_late_resume(skt);
+ err = socket_late_resume(skt);
+ if (!err)
+ err = socket_complete_resume(skt);
+ return err;
}
static void socket_remove(struct pcmcia_socket *skt)
@@ -848,6 +864,12 @@ static int __used pcmcia_socket_dev_resume(struct device *dev)
return __pcmcia_pm_op(dev, socket_late_resume);
}
+static void __used pcmcia_socket_dev_complete(struct device *dev)
+{
+ WARN(__pcmcia_pm_op(dev, socket_complete_resume),
+ "failed to complete resume");
+}
+
static const struct dev_pm_ops pcmcia_socket_pm_ops = {
/* dev_resume may be called with IRQs enabled */
SET_SYSTEM_SLEEP_PM_OPS(NULL,
@@ -862,6 +884,7 @@ static const struct dev_pm_ops pcmcia_socket_pm_ops = {
.resume_noirq = pcmcia_socket_dev_resume_noirq,
.thaw_noirq = pcmcia_socket_dev_resume_noirq,
.restore_noirq = pcmcia_socket_dev_resume_noirq,
+ .complete = pcmcia_socket_dev_complete,
};
#define PCMCIA_SOCKET_CLASS_PM_OPS (&pcmcia_socket_pm_ops)
diff --git a/drivers/pcmcia/i82092.c b/drivers/pcmcia/i82092.c
index 3578e1ca97a0..519c4d6003a6 100644
--- a/drivers/pcmcia/i82092.c
+++ b/drivers/pcmcia/i82092.c
@@ -133,8 +133,6 @@ static int i82092aa_pci_probe(struct pci_dev *dev, const struct pci_device_id *i
goto err_out_free_res;
}
- pci_set_drvdata(dev, &sockets[i].socket);
-
for (i = 0; i<socket_count; i++) {
sockets[i].socket.dev.parent = &dev->dev;
sockets[i].socket.ops = &i82092aa_operations;
@@ -164,14 +162,14 @@ err_out_disable:
static void i82092aa_pci_remove(struct pci_dev *dev)
{
- struct pcmcia_socket *socket = pci_get_drvdata(dev);
+ int i;
enter("i82092aa_pci_remove");
free_irq(dev->irq, i82092aa_interrupt);
- if (socket)
- pcmcia_unregister_socket(socket);
+ for (i = 0; i < socket_count; i++)
+ pcmcia_unregister_socket(&sockets[i].socket);
leave("i82092aa_pci_remove");
}
diff --git a/drivers/pcmcia/rsrc_nonstatic.c b/drivers/pcmcia/rsrc_nonstatic.c
index 430a9ac56091..065704c605d5 100644
--- a/drivers/pcmcia/rsrc_nonstatic.c
+++ b/drivers/pcmcia/rsrc_nonstatic.c
@@ -369,12 +369,12 @@ static int do_validate_mem(struct pcmcia_socket *s,
}
}
- free_region(res2);
- free_region(res1);
-
dev_dbg(&s->dev, "cs: memory probe 0x%06lx-0x%06lx: %p %p %u %u %u",
base, base+size-1, res1, res2, ret, info1, info2);
+ free_region(res2);
+ free_region(res1);
+
if ((ret) || (info1 != info2) || (info1 == 0))
return -EINVAL;
diff --git a/drivers/pcmcia/vrc4171_card.c b/drivers/pcmcia/vrc4171_card.c
index 75806be344e5..d98a08612492 100644
--- a/drivers/pcmcia/vrc4171_card.c
+++ b/drivers/pcmcia/vrc4171_card.c
@@ -246,6 +246,7 @@ static int pccard_init(struct pcmcia_socket *sock)
socket = &vrc4171_sockets[slot];
socket->csc_irq = search_nonuse_irq();
socket->io_irq = search_nonuse_irq();
+ spin_lock_init(&socket->lock);
return 0;
}
diff --git a/drivers/pinctrl/Kconfig b/drivers/pinctrl/Kconfig
index 393b0ecf4ca4..34f51d2d90d2 100644
--- a/drivers/pinctrl/Kconfig
+++ b/drivers/pinctrl/Kconfig
@@ -227,7 +227,7 @@ config PINCTRL_EXYNOS5440
select PINCONF
source "drivers/pinctrl/mvebu/Kconfig"
-
+source "drivers/pinctrl/sh-pfc/Kconfig"
source "drivers/pinctrl/spear/Kconfig"
config PINCTRL_XWAY
diff --git a/drivers/pinctrl/Makefile b/drivers/pinctrl/Makefile
index 0fd5f57fcb57..f82cc5baf767 100644
--- a/drivers/pinctrl/Makefile
+++ b/drivers/pinctrl/Makefile
@@ -49,4 +49,6 @@ obj-$(CONFIG_PINCTRL_XWAY) += pinctrl-xway.o
obj-$(CONFIG_PINCTRL_LANTIQ) += pinctrl-lantiq.o
obj-$(CONFIG_PLAT_ORION) += mvebu/
+obj-$(CONFIG_ARCH_SHMOBILE) += sh-pfc/
+obj-$(CONFIG_SUPERH) += sh-pfc/
obj-$(CONFIG_PLAT_SPEAR) += spear/
diff --git a/drivers/pinctrl/pinctrl-nomadik.c b/drivers/pinctrl/pinctrl-nomadik.c
index 3c80dd98304b..36d20293de5c 100644
--- a/drivers/pinctrl/pinctrl-nomadik.c
+++ b/drivers/pinctrl/pinctrl-nomadik.c
@@ -2104,6 +2104,10 @@ static struct pinctrl_desc nmk_pinctrl_desc = {
static const struct of_device_id nmk_pinctrl_match[] = {
{
+ .compatible = "stericsson,nmk-pinctrl-stn8815",
+ .data = (void *)PINCTRL_NMK_STN8815,
+ },
+ {
.compatible = "stericsson,nmk-pinctrl",
.data = (void *)PINCTRL_NMK_DB8500,
},
diff --git a/drivers/pinctrl/pinctrl-samsung.c b/drivers/pinctrl/pinctrl-samsung.c
index 5f8441ef59ca..f206df175656 100644
--- a/drivers/pinctrl/pinctrl-samsung.c
+++ b/drivers/pinctrl/pinctrl-samsung.c
@@ -944,9 +944,9 @@ static int samsung_pinctrl_probe(struct platform_device *pdev)
}
static const struct of_device_id samsung_pinctrl_dt_match[] = {
- { .compatible = "samsung,pinctrl-exynos4210",
+ { .compatible = "samsung,exynos4210-pinctrl",
.data = (void *)exynos4210_pin_ctrl },
- { .compatible = "samsung,pinctrl-exynos4x12",
+ { .compatible = "samsung,exynos4x12-pinctrl",
.data = (void *)exynos4x12_pin_ctrl },
{},
};
diff --git a/drivers/pinctrl/sh-pfc/Kconfig b/drivers/pinctrl/sh-pfc/Kconfig
new file mode 100644
index 000000000000..c3340f54d2ad
--- /dev/null
+++ b/drivers/pinctrl/sh-pfc/Kconfig
@@ -0,0 +1,116 @@
+#
+# Renesas SH and SH Mobile PINCTRL drivers
+#
+
+if ARCH_SHMOBILE || SUPERH
+
+config PINCTRL_SH_PFC
+ # XXX move off the gpio dependency
+ depends on GENERIC_GPIO
+ select GPIO_SH_PFC if ARCH_REQUIRE_GPIOLIB
+ select PINMUX
+ select PINCONF
+ def_bool y
+ help
+ This enables pin control drivers for SH and SH Mobile platforms
+
+config GPIO_SH_PFC
+ bool "SuperH PFC GPIO support"
+ depends on PINCTRL_SH_PFC && GPIOLIB
+ help
+ This enables support for GPIOs within the SoC's pin function
+ controller.
+
+config PINCTRL_PFC_R8A7740
+ def_bool y
+ depends on ARCH_R8A7740
+ select PINCTRL_SH_PFC
+
+config PINCTRL_PFC_R8A7779
+ def_bool y
+ depends on ARCH_R8A7779
+ select PINCTRL_SH_PFC
+
+config PINCTRL_PFC_SH7203
+ def_bool y
+ depends on CPU_SUBTYPE_SH7203
+ depends on GENERIC_GPIO
+ select PINCTRL_SH_PFC
+
+config PINCTRL_PFC_SH7264
+ def_bool y
+ depends on CPU_SUBTYPE_SH7264
+ depends on GENERIC_GPIO
+ select PINCTRL_SH_PFC
+
+config PINCTRL_PFC_SH7269
+ def_bool y
+ depends on CPU_SUBTYPE_SH7269
+ depends on GENERIC_GPIO
+ select PINCTRL_SH_PFC
+
+config PINCTRL_PFC_SH7372
+ def_bool y
+ depends on ARCH_SH7372
+ select PINCTRL_SH_PFC
+
+config PINCTRL_PFC_SH73A0
+ def_bool y
+ depends on ARCH_SH73A0
+ select PINCTRL_SH_PFC
+
+config PINCTRL_PFC_SH7720
+ def_bool y
+ depends on CPU_SUBTYPE_SH7720
+ depends on GENERIC_GPIO
+ select PINCTRL_SH_PFC
+
+config PINCTRL_PFC_SH7722
+ def_bool y
+ depends on CPU_SUBTYPE_SH7722
+ depends on GENERIC_GPIO
+ select PINCTRL_SH_PFC
+
+config PINCTRL_PFC_SH7723
+ def_bool y
+ depends on CPU_SUBTYPE_SH7723
+ depends on GENERIC_GPIO
+ select PINCTRL_SH_PFC
+
+config PINCTRL_PFC_SH7724
+ def_bool y
+ depends on CPU_SUBTYPE_SH7724
+ depends on GENERIC_GPIO
+ select PINCTRL_SH_PFC
+
+config PINCTRL_PFC_SH7734
+ def_bool y
+ depends on CPU_SUBTYPE_SH7734
+ depends on GENERIC_GPIO
+ select PINCTRL_SH_PFC
+
+config PINCTRL_PFC_SH7757
+ def_bool y
+ depends on CPU_SUBTYPE_SH7757
+ depends on GENERIC_GPIO
+ select PINCTRL_SH_PFC
+
+config PINCTRL_PFC_SH7785
+ def_bool y
+ depends on CPU_SUBTYPE_SH7785
+ depends on GENERIC_GPIO
+ select PINCTRL_SH_PFC
+
+config PINCTRL_PFC_SH7786
+ def_bool y
+ depends on CPU_SUBTYPE_SH7786
+ depends on GENERIC_GPIO
+ select PINCTRL_SH_PFC
+
+config PINCTRL_PFC_SHX3
+ def_bool y
+ depends on CPU_SUBTYPE_SHX3
+ depends on GENERIC_GPIO
+ select PINCTRL_SH_PFC
+
+endif
diff --git a/drivers/pinctrl/sh-pfc/Makefile b/drivers/pinctrl/sh-pfc/Makefile
new file mode 100644
index 000000000000..e8b9562c47e1
--- /dev/null
+++ b/drivers/pinctrl/sh-pfc/Makefile
@@ -0,0 +1,21 @@
+sh-pfc-objs = core.o pinctrl.o
+ifeq ($(CONFIG_GPIO_SH_PFC),y)
+sh-pfc-objs += gpio.o
+endif
+obj-$(CONFIG_PINCTRL_SH_PFC) += sh-pfc.o
+obj-$(CONFIG_PINCTRL_PFC_R8A7740) += pfc-r8a7740.o
+obj-$(CONFIG_PINCTRL_PFC_R8A7779) += pfc-r8a7779.o
+obj-$(CONFIG_PINCTRL_PFC_SH7203) += pfc-sh7203.o
+obj-$(CONFIG_PINCTRL_PFC_SH7264) += pfc-sh7264.o
+obj-$(CONFIG_PINCTRL_PFC_SH7269) += pfc-sh7269.o
+obj-$(CONFIG_PINCTRL_PFC_SH7372) += pfc-sh7372.o
+obj-$(CONFIG_PINCTRL_PFC_SH73A0) += pfc-sh73a0.o
+obj-$(CONFIG_PINCTRL_PFC_SH7720) += pfc-sh7720.o
+obj-$(CONFIG_PINCTRL_PFC_SH7722) += pfc-sh7722.o
+obj-$(CONFIG_PINCTRL_PFC_SH7723) += pfc-sh7723.o
+obj-$(CONFIG_PINCTRL_PFC_SH7724) += pfc-sh7724.o
+obj-$(CONFIG_PINCTRL_PFC_SH7734) += pfc-sh7734.o
+obj-$(CONFIG_PINCTRL_PFC_SH7757) += pfc-sh7757.o
+obj-$(CONFIG_PINCTRL_PFC_SH7785) += pfc-sh7785.o
+obj-$(CONFIG_PINCTRL_PFC_SH7786) += pfc-sh7786.o
+obj-$(CONFIG_PINCTRL_PFC_SHX3) += pfc-shx3.o
diff --git a/drivers/sh/pfc/core.c b/drivers/pinctrl/sh-pfc/core.c
index 68169373c98b..970ddff2b0b6 100644
--- a/drivers/sh/pfc/core.c
+++ b/drivers/pinctrl/sh-pfc/core.c
@@ -8,78 +8,61 @@
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*/
-#define pr_fmt(fmt) "sh_pfc " KBUILD_MODNAME ": " fmt
-#include <linux/errno.h>
-#include <linux/kernel.h>
-#include <linux/sh_pfc.h>
-#include <linux/module.h>
+#define DRV_NAME "sh-pfc"
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/bitops.h>
#include <linux/err.h>
+#include <linux/errno.h>
#include <linux/io.h>
-#include <linux/bitops.h>
-#include <linux/slab.h>
#include <linux/ioport.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
#include <linux/pinctrl/machine.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
-static struct sh_pfc *sh_pfc __read_mostly;
-
-static inline bool sh_pfc_initialized(void)
-{
- return !!sh_pfc;
-}
-
-static void pfc_iounmap(struct sh_pfc *pfc)
-{
- int k;
-
- for (k = 0; k < pfc->num_resources; k++)
- if (pfc->window[k].virt)
- iounmap(pfc->window[k].virt);
-
- kfree(pfc->window);
- pfc->window = NULL;
-}
+#include "core.h"
-static int pfc_ioremap(struct sh_pfc *pfc)
+static int sh_pfc_ioremap(struct sh_pfc *pfc, struct platform_device *pdev)
{
struct resource *res;
int k;
- if (!pfc->num_resources)
+ if (pdev->num_resources == 0) {
+ pfc->num_windows = 0;
return 0;
+ }
- pfc->window = kzalloc(pfc->num_resources * sizeof(*pfc->window),
- GFP_NOWAIT);
+ pfc->window = devm_kzalloc(pfc->dev, pdev->num_resources *
+ sizeof(*pfc->window), GFP_NOWAIT);
if (!pfc->window)
- goto err1;
+ return -ENOMEM;
- for (k = 0; k < pfc->num_resources; k++) {
- res = pfc->resource + k;
+ pfc->num_windows = pdev->num_resources;
+
+ for (k = 0, res = pdev->resource; k < pdev->num_resources; k++, res++) {
WARN_ON(resource_type(res) != IORESOURCE_MEM);
pfc->window[k].phys = res->start;
pfc->window[k].size = resource_size(res);
- pfc->window[k].virt = ioremap_nocache(res->start,
- resource_size(res));
+ pfc->window[k].virt = devm_ioremap_nocache(pfc->dev, res->start,
+ resource_size(res));
if (!pfc->window[k].virt)
- goto err2;
+ return -ENOMEM;
}
return 0;
-
-err2:
- pfc_iounmap(pfc);
-err1:
- return -1;
}
-static void __iomem *pfc_phys_to_virt(struct sh_pfc *pfc,
- unsigned long address)
+static void __iomem *sh_pfc_phys_to_virt(struct sh_pfc *pfc,
+ unsigned long address)
{
- struct pfc_window *window;
+ struct sh_pfc_window *window;
int k;
/* scan through physical windows and convert address */
- for (k = 0; k < pfc->num_resources; k++) {
+ for (k = 0; k < pfc->num_windows; k++) {
window = pfc->window + k;
if (address < window->phys)
@@ -95,7 +78,7 @@ static void __iomem *pfc_phys_to_virt(struct sh_pfc *pfc,
return (void __iomem *)address;
}
-static int enum_in_range(pinmux_enum_t enum_id, struct pinmux_range *r)
+static int sh_pfc_enum_in_range(pinmux_enum_t enum_id, struct pinmux_range *r)
{
if (enum_id < r->begin)
return 0;
@@ -106,8 +89,8 @@ static int enum_in_range(pinmux_enum_t enum_id, struct pinmux_range *r)
return 1;
}
-static unsigned long gpio_read_raw_reg(void __iomem *mapped_reg,
- unsigned long reg_width)
+static unsigned long sh_pfc_read_raw_reg(void __iomem *mapped_reg,
+ unsigned long reg_width)
{
switch (reg_width) {
case 8:
@@ -122,9 +105,8 @@ static unsigned long gpio_read_raw_reg(void __iomem *mapped_reg,
return 0;
}
-static void gpio_write_raw_reg(void __iomem *mapped_reg,
- unsigned long reg_width,
- unsigned long data)
+static void sh_pfc_write_raw_reg(void __iomem *mapped_reg,
+ unsigned long reg_width, unsigned long data)
{
switch (reg_width) {
case 8:
@@ -150,9 +132,8 @@ int sh_pfc_read_bit(struct pinmux_data_reg *dr, unsigned long in_pos)
pr_debug("read_bit: addr = %lx, pos = %ld, "
"r_width = %ld\n", dr->reg, pos, dr->reg_width);
- return (gpio_read_raw_reg(dr->mapped_reg, dr->reg_width) >> pos) & 1;
+ return (sh_pfc_read_raw_reg(dr->mapped_reg, dr->reg_width) >> pos) & 1;
}
-EXPORT_SYMBOL_GPL(sh_pfc_read_bit);
void sh_pfc_write_bit(struct pinmux_data_reg *dr, unsigned long in_pos,
unsigned long value)
@@ -170,20 +151,19 @@ void sh_pfc_write_bit(struct pinmux_data_reg *dr, unsigned long in_pos,
else
clear_bit(pos, &dr->reg_shadow);
- gpio_write_raw_reg(dr->mapped_reg, dr->reg_width, dr->reg_shadow);
+ sh_pfc_write_raw_reg(dr->mapped_reg, dr->reg_width, dr->reg_shadow);
}
-EXPORT_SYMBOL_GPL(sh_pfc_write_bit);
-
-static void config_reg_helper(struct sh_pfc *pfc,
- struct pinmux_cfg_reg *crp,
- unsigned long in_pos,
- void __iomem **mapped_regp,
- unsigned long *maskp,
- unsigned long *posp)
+
+static void sh_pfc_config_reg_helper(struct sh_pfc *pfc,
+ struct pinmux_cfg_reg *crp,
+ unsigned long in_pos,
+ void __iomem **mapped_regp,
+ unsigned long *maskp,
+ unsigned long *posp)
{
int k;
- *mapped_regp = pfc_phys_to_virt(pfc, crp->reg);
+ *mapped_regp = sh_pfc_phys_to_virt(pfc, crp->reg);
if (crp->field_width) {
*maskp = (1 << crp->field_width) - 1;
@@ -196,30 +176,30 @@ static void config_reg_helper(struct sh_pfc *pfc,
}
}
-static int read_config_reg(struct sh_pfc *pfc,
- struct pinmux_cfg_reg *crp,
- unsigned long field)
+static int sh_pfc_read_config_reg(struct sh_pfc *pfc,
+ struct pinmux_cfg_reg *crp,
+ unsigned long field)
{
void __iomem *mapped_reg;
unsigned long mask, pos;
- config_reg_helper(pfc, crp, field, &mapped_reg, &mask, &pos);
+ sh_pfc_config_reg_helper(pfc, crp, field, &mapped_reg, &mask, &pos);
pr_debug("read_reg: addr = %lx, field = %ld, "
"r_width = %ld, f_width = %ld\n",
crp->reg, field, crp->reg_width, crp->field_width);
- return (gpio_read_raw_reg(mapped_reg, crp->reg_width) >> pos) & mask;
+ return (sh_pfc_read_raw_reg(mapped_reg, crp->reg_width) >> pos) & mask;
}
-static void write_config_reg(struct sh_pfc *pfc,
- struct pinmux_cfg_reg *crp,
- unsigned long field, unsigned long value)
+static void sh_pfc_write_config_reg(struct sh_pfc *pfc,
+ struct pinmux_cfg_reg *crp,
+ unsigned long field, unsigned long value)
{
void __iomem *mapped_reg;
unsigned long mask, pos, data;
- config_reg_helper(pfc, crp, field, &mapped_reg, &mask, &pos);
+ sh_pfc_config_reg_helper(pfc, crp, field, &mapped_reg, &mask, &pos);
pr_debug("write_reg addr = %lx, value = %ld, field = %ld, "
"r_width = %ld, f_width = %ld\n",
@@ -228,34 +208,35 @@ static void write_config_reg(struct sh_pfc *pfc,
mask = ~(mask << pos);
value = value << pos;
- data = gpio_read_raw_reg(mapped_reg, crp->reg_width);
+ data = sh_pfc_read_raw_reg(mapped_reg, crp->reg_width);
data &= mask;
data |= value;
- if (pfc->unlock_reg)
- gpio_write_raw_reg(pfc_phys_to_virt(pfc, pfc->unlock_reg),
- 32, ~data);
+ if (pfc->info->unlock_reg)
+ sh_pfc_write_raw_reg(
+ sh_pfc_phys_to_virt(pfc, pfc->info->unlock_reg), 32,
+ ~data);
- gpio_write_raw_reg(mapped_reg, crp->reg_width, data);
+ sh_pfc_write_raw_reg(mapped_reg, crp->reg_width, data);
}
-static int setup_data_reg(struct sh_pfc *pfc, unsigned gpio)
+static int sh_pfc_setup_data_reg(struct sh_pfc *pfc, unsigned gpio)
{
- struct pinmux_gpio *gpiop = &pfc->gpios[gpio];
+ struct pinmux_gpio *gpiop = &pfc->info->gpios[gpio];
struct pinmux_data_reg *data_reg;
int k, n;
- if (!enum_in_range(gpiop->enum_id, &pfc->data))
+ if (!sh_pfc_enum_in_range(gpiop->enum_id, &pfc->info->data))
return -1;
k = 0;
while (1) {
- data_reg = pfc->data_regs + k;
+ data_reg = pfc->info->data_regs + k;
if (!data_reg->reg_width)
break;
- data_reg->mapped_reg = pfc_phys_to_virt(pfc, data_reg->reg);
+ data_reg->mapped_reg = sh_pfc_phys_to_virt(pfc, data_reg->reg);
for (n = 0; n < data_reg->reg_width; n++) {
if (data_reg->enum_ids[n] == gpiop->enum_id) {
@@ -274,23 +255,23 @@ static int setup_data_reg(struct sh_pfc *pfc, unsigned gpio)
return -1;
}
-static void setup_data_regs(struct sh_pfc *pfc)
+static void sh_pfc_setup_data_regs(struct sh_pfc *pfc)
{
struct pinmux_data_reg *drp;
int k;
- for (k = pfc->first_gpio; k <= pfc->last_gpio; k++)
- setup_data_reg(pfc, k);
+ for (k = pfc->info->first_gpio; k <= pfc->info->last_gpio; k++)
+ sh_pfc_setup_data_reg(pfc, k);
k = 0;
while (1) {
- drp = pfc->data_regs + k;
+ drp = pfc->info->data_regs + k;
if (!drp->reg_width)
break;
- drp->reg_shadow = gpio_read_raw_reg(drp->mapped_reg,
- drp->reg_width);
+ drp->reg_shadow = sh_pfc_read_raw_reg(drp->mapped_reg,
+ drp->reg_width);
k++;
}
}
@@ -298,24 +279,22 @@ static void setup_data_regs(struct sh_pfc *pfc)
int sh_pfc_get_data_reg(struct sh_pfc *pfc, unsigned gpio,
struct pinmux_data_reg **drp, int *bitp)
{
- struct pinmux_gpio *gpiop = &pfc->gpios[gpio];
+ struct pinmux_gpio *gpiop = &pfc->info->gpios[gpio];
int k, n;
- if (!enum_in_range(gpiop->enum_id, &pfc->data))
+ if (!sh_pfc_enum_in_range(gpiop->enum_id, &pfc->info->data))
return -1;
k = (gpiop->flags & PINMUX_FLAG_DREG) >> PINMUX_FLAG_DREG_SHIFT;
n = (gpiop->flags & PINMUX_FLAG_DBIT) >> PINMUX_FLAG_DBIT_SHIFT;
- *drp = pfc->data_regs + k;
+ *drp = pfc->info->data_regs + k;
*bitp = n;
return 0;
}
-EXPORT_SYMBOL_GPL(sh_pfc_get_data_reg);
-static int get_config_reg(struct sh_pfc *pfc, pinmux_enum_t enum_id,
- struct pinmux_cfg_reg **crp,
- int *fieldp, int *valuep,
- unsigned long **cntp)
+static int sh_pfc_get_config_reg(struct sh_pfc *pfc, pinmux_enum_t enum_id,
+ struct pinmux_cfg_reg **crp, int *fieldp,
+ int *valuep, unsigned long **cntp)
{
struct pinmux_cfg_reg *config_reg;
unsigned long r_width, f_width, curr_width, ncomb;
@@ -323,7 +302,7 @@ static int get_config_reg(struct sh_pfc *pfc, pinmux_enum_t enum_id,
k = 0;
while (1) {
- config_reg = pfc->cfg_regs + k;
+ config_reg = pfc->info->cfg_regs + k;
r_width = config_reg->reg_width;
f_width = config_reg->field_width;
@@ -361,12 +340,12 @@ static int get_config_reg(struct sh_pfc *pfc, pinmux_enum_t enum_id,
int sh_pfc_gpio_to_enum(struct sh_pfc *pfc, unsigned gpio, int pos,
pinmux_enum_t *enum_idp)
{
- pinmux_enum_t enum_id = pfc->gpios[gpio].enum_id;
- pinmux_enum_t *data = pfc->gpio_data;
+ pinmux_enum_t enum_id = pfc->info->gpios[gpio].enum_id;
+ pinmux_enum_t *data = pfc->info->gpio_data;
int k;
- if (!enum_in_range(enum_id, &pfc->data)) {
- if (!enum_in_range(enum_id, &pfc->mark)) {
+ if (!sh_pfc_enum_in_range(enum_id, &pfc->info->data)) {
+ if (!sh_pfc_enum_in_range(enum_id, &pfc->info->mark)) {
pr_err("non data/mark enum_id for gpio %d\n", gpio);
return -1;
}
@@ -377,7 +356,7 @@ int sh_pfc_gpio_to_enum(struct sh_pfc *pfc, unsigned gpio, int pos,
return pos + 1;
}
- for (k = 0; k < pfc->gpio_data_size; k++) {
+ for (k = 0; k < pfc->info->gpio_data_size; k++) {
if (data[k] == enum_id) {
*enum_idp = data[k + 1];
return k + 1;
@@ -387,7 +366,6 @@ int sh_pfc_gpio_to_enum(struct sh_pfc *pfc, unsigned gpio, int pos,
pr_err("cannot locate data/mark enum_id for gpio %d\n", gpio);
return -1;
}
-EXPORT_SYMBOL_GPL(sh_pfc_gpio_to_enum);
int sh_pfc_config_gpio(struct sh_pfc *pfc, unsigned gpio, int pinmux_type,
int cfg_mode)
@@ -405,19 +383,19 @@ int sh_pfc_config_gpio(struct sh_pfc *pfc, unsigned gpio, int pinmux_type,
break;
case PINMUX_TYPE_OUTPUT:
- range = &pfc->output;
+ range = &pfc->info->output;
break;
case PINMUX_TYPE_INPUT:
- range = &pfc->input;
+ range = &pfc->info->input;
break;
case PINMUX_TYPE_INPUT_PULLUP:
- range = &pfc->input_pu;
+ range = &pfc->info->input_pu;
break;
case PINMUX_TYPE_INPUT_PULLDOWN:
- range = &pfc->input_pd;
+ range = &pfc->info->input_pd;
break;
default:
@@ -437,7 +415,7 @@ int sh_pfc_config_gpio(struct sh_pfc *pfc, unsigned gpio, int pinmux_type,
break;
/* first check if this is a function enum */
- in_range = enum_in_range(enum_id, &pfc->function);
+ in_range = sh_pfc_enum_in_range(enum_id, &pfc->info->function);
if (!in_range) {
/* not a function enum */
if (range) {
@@ -449,7 +427,7 @@ int sh_pfc_config_gpio(struct sh_pfc *pfc, unsigned gpio, int pinmux_type,
* for this case we only allow function enums
* and the enums that match the other range.
*/
- in_range = enum_in_range(enum_id, range);
+ in_range = sh_pfc_enum_in_range(enum_id, range);
/*
* special case pass through for fixed
@@ -474,19 +452,19 @@ int sh_pfc_config_gpio(struct sh_pfc *pfc, unsigned gpio, int pinmux_type,
if (!in_range)
continue;
- if (get_config_reg(pfc, enum_id, &cr,
- &field, &value, &cntp) != 0)
+ if (sh_pfc_get_config_reg(pfc, enum_id, &cr,
+ &field, &value, &cntp) != 0)
goto out_err;
switch (cfg_mode) {
case GPIO_CFG_DRYRUN:
if (!*cntp ||
- (read_config_reg(pfc, cr, field) != value))
+ (sh_pfc_read_config_reg(pfc, cr, field) != value))
continue;
break;
case GPIO_CFG_REQ:
- write_config_reg(pfc, cr, field, value);
+ sh_pfc_write_config_reg(pfc, cr, field, value);
*cntp = *cntp + 1;
break;
@@ -500,11 +478,11 @@ int sh_pfc_config_gpio(struct sh_pfc *pfc, unsigned gpio, int pinmux_type,
out_err:
return -1;
}
-EXPORT_SYMBOL_GPL(sh_pfc_config_gpio);
-int register_sh_pfc(struct sh_pfc *pfc)
+static int sh_pfc_probe(struct platform_device *pdev)
{
- int (*initroutine)(struct sh_pfc *) = NULL;
+ struct sh_pfc_soc_info *info;
+ struct sh_pfc *pfc;
int ret;
/*
@@ -512,61 +490,146 @@ int register_sh_pfc(struct sh_pfc *pfc)
*/
BUILD_BUG_ON(PINMUX_FLAG_TYPE > ((1 << PINMUX_FLAG_DBIT_SHIFT) - 1));
- if (sh_pfc)
- return -EBUSY;
+ info = pdev->id_entry->driver_data
+ ? (void *)pdev->id_entry->driver_data : pdev->dev.platform_data;
+ if (info == NULL)
+ return -ENODEV;
+
+ pfc = devm_kzalloc(&pdev->dev, sizeof(*pfc), GFP_KERNEL);
+ if (pfc == NULL)
+ return -ENOMEM;
+
+ pfc->info = info;
+ pfc->dev = &pdev->dev;
- ret = pfc_ioremap(pfc);
+ ret = sh_pfc_ioremap(pfc, pdev);
if (unlikely(ret < 0))
return ret;
spin_lock_init(&pfc->lock);
pinctrl_provide_dummies();
- setup_data_regs(pfc);
-
- sh_pfc = pfc;
+ sh_pfc_setup_data_regs(pfc);
/*
* Initialize pinctrl bindings first
*/
- initroutine = symbol_request(sh_pfc_register_pinctrl);
- if (initroutine) {
- ret = (*initroutine)(pfc);
- symbol_put_addr(initroutine);
-
- if (unlikely(ret != 0))
- goto err;
- } else {
- pr_err("failed to initialize pinctrl bindings\n");
- goto err;
- }
+ ret = sh_pfc_register_pinctrl(pfc);
+ if (unlikely(ret != 0))
+ return ret;
+#ifdef CONFIG_GPIO_SH_PFC
/*
* Then the GPIO chip
*/
- initroutine = symbol_request(sh_pfc_register_gpiochip);
- if (initroutine) {
- ret = (*initroutine)(pfc);
- symbol_put_addr(initroutine);
-
+ ret = sh_pfc_register_gpiochip(pfc);
+ if (unlikely(ret != 0)) {
/*
* If the GPIO chip fails to come up we still leave the
* PFC state as it is, given that there are already
* extant users of it that have succeeded by this point.
*/
- if (unlikely(ret != 0)) {
- pr_notice("failed to init GPIO chip, ignoring...\n");
- ret = 0;
- }
+ pr_notice("failed to init GPIO chip, ignoring...\n");
}
+#endif
+
+ platform_set_drvdata(pdev, pfc);
- pr_info("%s support registered\n", pfc->name);
+ pr_info("%s support registered\n", info->name);
return 0;
+}
-err:
- pfc_iounmap(pfc);
- sh_pfc = NULL;
+static int sh_pfc_remove(struct platform_device *pdev)
+{
+ struct sh_pfc *pfc = platform_get_drvdata(pdev);
+
+#ifdef CONFIG_GPIO_SH_PFC
+ sh_pfc_unregister_gpiochip(pfc);
+#endif
+ sh_pfc_unregister_pinctrl(pfc);
+
+ platform_set_drvdata(pdev, NULL);
- return ret;
+ return 0;
}
+
+static const struct platform_device_id sh_pfc_id_table[] = {
+#ifdef CONFIG_PINCTRL_PFC_R8A7740
+ { "pfc-r8a7740", (kernel_ulong_t)&r8a7740_pinmux_info },
+#endif
+#ifdef CONFIG_PINCTRL_PFC_R8A7779
+ { "pfc-r8a7779", (kernel_ulong_t)&r8a7779_pinmux_info },
+#endif
+#ifdef CONFIG_PINCTRL_PFC_SH7203
+ { "pfc-sh7203", (kernel_ulong_t)&sh7203_pinmux_info },
+#endif
+#ifdef CONFIG_PINCTRL_PFC_SH7264
+ { "pfc-sh7264", (kernel_ulong_t)&sh7264_pinmux_info },
+#endif
+#ifdef CONFIG_PINCTRL_PFC_SH7269
+ { "pfc-sh7269", (kernel_ulong_t)&sh7269_pinmux_info },
+#endif
+#ifdef CONFIG_PINCTRL_PFC_SH7372
+ { "pfc-sh7372", (kernel_ulong_t)&sh7372_pinmux_info },
+#endif
+#ifdef CONFIG_PINCTRL_PFC_SH73A0
+ { "pfc-sh73a0", (kernel_ulong_t)&sh73a0_pinmux_info },
+#endif
+#ifdef CONFIG_PINCTRL_PFC_SH7720
+ { "pfc-sh7720", (kernel_ulong_t)&sh7720_pinmux_info },
+#endif
+#ifdef CONFIG_PINCTRL_PFC_SH7722
+ { "pfc-sh7722", (kernel_ulong_t)&sh7722_pinmux_info },
+#endif
+#ifdef CONFIG_PINCTRL_PFC_SH7723
+ { "pfc-sh7723", (kernel_ulong_t)&sh7723_pinmux_info },
+#endif
+#ifdef CONFIG_PINCTRL_PFC_SH7724
+ { "pfc-sh7724", (kernel_ulong_t)&sh7724_pinmux_info },
+#endif
+#ifdef CONFIG_PINCTRL_PFC_SH7734
+ { "pfc-sh7734", (kernel_ulong_t)&sh7734_pinmux_info },
+#endif
+#ifdef CONFIG_PINCTRL_PFC_SH7757
+ { "pfc-sh7757", (kernel_ulong_t)&sh7757_pinmux_info },
+#endif
+#ifdef CONFIG_PINCTRL_PFC_SH7785
+ { "pfc-sh7785", (kernel_ulong_t)&sh7785_pinmux_info },
+#endif
+#ifdef CONFIG_PINCTRL_PFC_SH7786
+ { "pfc-sh7786", (kernel_ulong_t)&sh7786_pinmux_info },
+#endif
+#ifdef CONFIG_PINCTRL_PFC_SHX3
+ { "pfc-shx3", (kernel_ulong_t)&shx3_pinmux_info },
+#endif
+ { "sh-pfc", 0 },
+ { },
+};
+MODULE_DEVICE_TABLE(platform, sh_pfc_id_table);
+
+static struct platform_driver sh_pfc_driver = {
+ .probe = sh_pfc_probe,
+ .remove = sh_pfc_remove,
+ .id_table = sh_pfc_id_table,
+ .driver = {
+ .name = DRV_NAME,
+ .owner = THIS_MODULE,
+ },
+};
+
+static int __init sh_pfc_init(void)
+{
+ return platform_driver_register(&sh_pfc_driver);
+}
+postcore_initcall(sh_pfc_init);
+
+static void __exit sh_pfc_exit(void)
+{
+ platform_driver_unregister(&sh_pfc_driver);
+}
+module_exit(sh_pfc_exit);
+
+MODULE_AUTHOR("Magnus Damm, Paul Mundt, Laurent Pinchart");
+MODULE_DESCRIPTION("Pin Control and GPIO driver for SuperH pin function controller");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/pinctrl/sh-pfc/core.h b/drivers/pinctrl/sh-pfc/core.h
new file mode 100644
index 000000000000..ba7c33c33599
--- /dev/null
+++ b/drivers/pinctrl/sh-pfc/core.h
@@ -0,0 +1,72 @@
+/*
+ * SuperH Pin Function Controller support.
+ *
+ * Copyright (C) 2012 Renesas Solutions Corp.
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ */
+#ifndef __SH_PFC_CORE_H__
+#define __SH_PFC_CORE_H__
+
+#include <linux/compiler.h>
+#include <linux/types.h>
+
+#include "sh_pfc.h"
+
+struct sh_pfc_window {
+ phys_addr_t phys;
+ void __iomem *virt;
+ unsigned long size;
+};
+
+struct sh_pfc_chip;
+struct sh_pfc_pinctrl;
+
+struct sh_pfc {
+ struct device *dev;
+ struct sh_pfc_soc_info *info;
+ spinlock_t lock;
+
+ unsigned int num_windows;
+ struct sh_pfc_window *window;
+
+ struct sh_pfc_chip *gpio;
+ struct sh_pfc_pinctrl *pinctrl;
+};
+
+int sh_pfc_register_gpiochip(struct sh_pfc *pfc);
+int sh_pfc_unregister_gpiochip(struct sh_pfc *pfc);
+
+int sh_pfc_register_pinctrl(struct sh_pfc *pfc);
+int sh_pfc_unregister_pinctrl(struct sh_pfc *pfc);
+
+int sh_pfc_read_bit(struct pinmux_data_reg *dr, unsigned long in_pos);
+void sh_pfc_write_bit(struct pinmux_data_reg *dr, unsigned long in_pos,
+ unsigned long value);
+int sh_pfc_get_data_reg(struct sh_pfc *pfc, unsigned gpio,
+ struct pinmux_data_reg **drp, int *bitp);
+int sh_pfc_gpio_to_enum(struct sh_pfc *pfc, unsigned gpio, int pos,
+ pinmux_enum_t *enum_idp);
+int sh_pfc_config_gpio(struct sh_pfc *pfc, unsigned gpio, int pinmux_type,
+ int cfg_mode);
+
+extern struct sh_pfc_soc_info r8a7740_pinmux_info;
+extern struct sh_pfc_soc_info r8a7779_pinmux_info;
+extern struct sh_pfc_soc_info sh7203_pinmux_info;
+extern struct sh_pfc_soc_info sh7264_pinmux_info;
+extern struct sh_pfc_soc_info sh7269_pinmux_info;
+extern struct sh_pfc_soc_info sh7372_pinmux_info;
+extern struct sh_pfc_soc_info sh73a0_pinmux_info;
+extern struct sh_pfc_soc_info sh7720_pinmux_info;
+extern struct sh_pfc_soc_info sh7722_pinmux_info;
+extern struct sh_pfc_soc_info sh7723_pinmux_info;
+extern struct sh_pfc_soc_info sh7724_pinmux_info;
+extern struct sh_pfc_soc_info sh7734_pinmux_info;
+extern struct sh_pfc_soc_info sh7757_pinmux_info;
+extern struct sh_pfc_soc_info sh7785_pinmux_info;
+extern struct sh_pfc_soc_info sh7786_pinmux_info;
+extern struct sh_pfc_soc_info shx3_pinmux_info;
+
+#endif /* __SH_PFC_CORE_H__ */
diff --git a/drivers/sh/pfc/gpio.c b/drivers/pinctrl/sh-pfc/gpio.c
index 6a24f07c2013..a535075c8b69 100644
--- a/drivers/sh/pfc/gpio.c
+++ b/drivers/pinctrl/sh-pfc/gpio.c
@@ -8,16 +8,18 @@
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*/
-#define pr_fmt(fmt) "sh_pfc " KBUILD_MODNAME ": " fmt
-#include <linux/init.h>
+#define pr_fmt(fmt) KBUILD_MODNAME " gpio: " fmt
+
+#include <linux/device.h>
#include <linux/gpio.h>
-#include <linux/slab.h>
-#include <linux/spinlock.h>
+#include <linux/init.h>
#include <linux/module.h>
-#include <linux/platform_device.h>
#include <linux/pinctrl/consumer.h>
-#include <linux/sh_pfc.h>
+#include <linux/slab.h>
+#include <linux/spinlock.h>
+
+#include "core.h"
struct sh_pfc_chip {
struct sh_pfc *pfc;
@@ -49,7 +51,7 @@ static void sh_gpio_set_value(struct sh_pfc *pfc, unsigned gpio, int value)
struct pinmux_data_reg *dr = NULL;
int bit = 0;
- if (!pfc || sh_pfc_get_data_reg(pfc, gpio, &dr, &bit) != 0)
+ if (sh_pfc_get_data_reg(pfc, gpio, &dr, &bit) != 0)
BUG();
else
sh_pfc_write_bit(dr, bit, value);
@@ -60,7 +62,7 @@ static int sh_gpio_get_value(struct sh_pfc *pfc, unsigned gpio)
struct pinmux_data_reg *dr = NULL;
int bit = 0;
- if (!pfc || sh_pfc_get_data_reg(pfc, gpio, &dr, &bit) != 0)
+ if (sh_pfc_get_data_reg(pfc, gpio, &dr, &bit) != 0)
return -EINVAL;
return sh_pfc_read_bit(dr, bit);
@@ -103,11 +105,11 @@ static int sh_gpio_to_irq(struct gpio_chip *gc, unsigned offset)
if (pos <= 0 || !enum_id)
break;
- for (i = 0; i < pfc->gpio_irq_size; i++) {
- enum_ids = pfc->gpio_irq[i].enum_ids;
+ for (i = 0; i < pfc->info->gpio_irq_size; i++) {
+ enum_ids = pfc->info->gpio_irq[i].enum_ids;
for (k = 0; enum_ids[k]; k++) {
if (enum_ids[k] == enum_id)
- return pfc->gpio_irq[i].irq;
+ return pfc->info->gpio_irq[i].irq;
}
}
}
@@ -128,12 +130,12 @@ static void sh_pfc_gpio_setup(struct sh_pfc_chip *chip)
gc->set = sh_gpio_set;
gc->to_irq = sh_gpio_to_irq;
- WARN_ON(pfc->first_gpio != 0); /* needs testing */
+ WARN_ON(pfc->info->first_gpio != 0); /* needs testing */
- gc->label = pfc->name;
+ gc->label = pfc->info->name;
gc->owner = THIS_MODULE;
- gc->base = pfc->first_gpio;
- gc->ngpio = (pfc->last_gpio - pfc->first_gpio) + 1;
+ gc->base = pfc->info->first_gpio;
+ gc->ngpio = (pfc->info->last_gpio - pfc->info->first_gpio) + 1;
}
int sh_pfc_register_gpiochip(struct sh_pfc *pfc)
@@ -141,7 +143,7 @@ int sh_pfc_register_gpiochip(struct sh_pfc *pfc)
struct sh_pfc_chip *chip;
int ret;
- chip = kzalloc(sizeof(struct sh_pfc_chip), GFP_KERNEL);
+ chip = devm_kzalloc(pfc->dev, sizeof(*chip), GFP_KERNEL);
if (unlikely(!chip))
return -ENOMEM;
@@ -151,90 +153,26 @@ int sh_pfc_register_gpiochip(struct sh_pfc *pfc)
ret = gpiochip_add(&chip->gpio_chip);
if (unlikely(ret < 0))
- kfree(chip);
-
- pr_info("%s handling gpio %d -> %d\n",
- pfc->name, pfc->first_gpio, pfc->last_gpio);
-
- return ret;
-}
-EXPORT_SYMBOL_GPL(sh_pfc_register_gpiochip);
-
-static int sh_pfc_gpio_match(struct gpio_chip *gc, void *data)
-{
- return !!strstr(gc->label, data);
-}
-
-static int sh_pfc_gpio_probe(struct platform_device *pdev)
-{
- struct sh_pfc_chip *chip;
- struct gpio_chip *gc;
-
- gc = gpiochip_find("_pfc", sh_pfc_gpio_match);
- if (unlikely(!gc)) {
- pr_err("Cant find gpio chip\n");
- return -ENODEV;
- }
+ return ret;
- chip = gpio_to_pfc_chip(gc);
- platform_set_drvdata(pdev, chip);
+ pfc->gpio = chip;
- pr_info("attaching to GPIO chip %s\n", chip->pfc->name);
+ pr_info("%s handling gpio %d -> %d\n",
+ pfc->info->name, pfc->info->first_gpio,
+ pfc->info->last_gpio);
return 0;
}
-static int sh_pfc_gpio_remove(struct platform_device *pdev)
+int sh_pfc_unregister_gpiochip(struct sh_pfc *pfc)
{
- struct sh_pfc_chip *chip = platform_get_drvdata(pdev);
+ struct sh_pfc_chip *chip = pfc->gpio;
int ret;
ret = gpiochip_remove(&chip->gpio_chip);
if (unlikely(ret < 0))
return ret;
- kfree(chip);
+ pfc->gpio = NULL;
return 0;
}
-
-static struct platform_driver sh_pfc_gpio_driver = {
- .probe = sh_pfc_gpio_probe,
- .remove = sh_pfc_gpio_remove,
- .driver = {
- .name = KBUILD_MODNAME,
- .owner = THIS_MODULE,
- },
-};
-
-static struct platform_device sh_pfc_gpio_device = {
- .name = KBUILD_MODNAME,
- .id = -1,
-};
-
-static int __init sh_pfc_gpio_init(void)
-{
- int rc;
-
- rc = platform_driver_register(&sh_pfc_gpio_driver);
- if (likely(!rc)) {
- rc = platform_device_register(&sh_pfc_gpio_device);
- if (unlikely(rc))
- platform_driver_unregister(&sh_pfc_gpio_driver);
- }
-
- return rc;
-}
-
-static void __exit sh_pfc_gpio_exit(void)
-{
- platform_device_unregister(&sh_pfc_gpio_device);
- platform_driver_unregister(&sh_pfc_gpio_driver);
-}
-
-module_init(sh_pfc_gpio_init);
-module_exit(sh_pfc_gpio_exit);
-
-MODULE_AUTHOR("Magnus Damm, Paul Mundt");
-MODULE_DESCRIPTION("GPIO driver for SuperH pin function controller");
-MODULE_LICENSE("GPL v2");
-MODULE_ALIAS("platform:pfc-gpio");
diff --git a/drivers/pinctrl/sh-pfc/pfc-r8a7740.c b/drivers/pinctrl/sh-pfc/pfc-r8a7740.c
new file mode 100644
index 000000000000..214788c4a606
--- /dev/null
+++ b/drivers/pinctrl/sh-pfc/pfc-r8a7740.c
@@ -0,0 +1,2612 @@
+/*
+ * R8A7740 processor support
+ *
+ * Copyright (C) 2011 Renesas Solutions Corp.
+ * Copyright (C) 2011 Kuninori Morimoto <kuninori.morimoto.gx@renesas.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; version 2 of the
+ * License.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+#include <linux/kernel.h>
+#include <mach/r8a7740.h>
+#include <mach/irqs.h>
+
+#include "sh_pfc.h"
+
+#define CPU_ALL_PORT(fn, pfx, sfx) \
+ PORT_10(fn, pfx, sfx), PORT_90(fn, pfx, sfx), \
+ PORT_10(fn, pfx##10, sfx), PORT_90(fn, pfx##1, sfx), \
+ PORT_10(fn, pfx##20, sfx), \
+ PORT_1(fn, pfx##210, sfx), PORT_1(fn, pfx##211, sfx)
+
+enum {
+ PINMUX_RESERVED = 0,
+
+ /* PORT0_DATA -> PORT211_DATA */
+ PINMUX_DATA_BEGIN,
+ PORT_ALL(DATA),
+ PINMUX_DATA_END,
+
+ /* PORT0_IN -> PORT211_IN */
+ PINMUX_INPUT_BEGIN,
+ PORT_ALL(IN),
+ PINMUX_INPUT_END,
+
+ /* PORT0_IN_PU -> PORT211_IN_PU */
+ PINMUX_INPUT_PULLUP_BEGIN,
+ PORT_ALL(IN_PU),
+ PINMUX_INPUT_PULLUP_END,
+
+ /* PORT0_IN_PD -> PORT211_IN_PD */
+ PINMUX_INPUT_PULLDOWN_BEGIN,
+ PORT_ALL(IN_PD),
+ PINMUX_INPUT_PULLDOWN_END,
+
+ /* PORT0_OUT -> PORT211_OUT */
+ PINMUX_OUTPUT_BEGIN,
+ PORT_ALL(OUT),
+ PINMUX_OUTPUT_END,
+
+ PINMUX_FUNCTION_BEGIN,
+ PORT_ALL(FN_IN), /* PORT0_FN_IN -> PORT211_FN_IN */
+ PORT_ALL(FN_OUT), /* PORT0_FN_OUT -> PORT211_FN_OUT */
+ PORT_ALL(FN0), /* PORT0_FN0 -> PORT211_FN0 */
+ PORT_ALL(FN1), /* PORT0_FN1 -> PORT211_FN1 */
+ PORT_ALL(FN2), /* PORT0_FN2 -> PORT211_FN2 */
+ PORT_ALL(FN3), /* PORT0_FN3 -> PORT211_FN3 */
+ PORT_ALL(FN4), /* PORT0_FN4 -> PORT211_FN4 */
+ PORT_ALL(FN5), /* PORT0_FN5 -> PORT211_FN5 */
+ PORT_ALL(FN6), /* PORT0_FN6 -> PORT211_FN6 */
+ PORT_ALL(FN7), /* PORT0_FN7 -> PORT211_FN7 */
+
+ MSEL1CR_31_0, MSEL1CR_31_1,
+ MSEL1CR_30_0, MSEL1CR_30_1,
+ MSEL1CR_29_0, MSEL1CR_29_1,
+ MSEL1CR_28_0, MSEL1CR_28_1,
+ MSEL1CR_27_0, MSEL1CR_27_1,
+ MSEL1CR_26_0, MSEL1CR_26_1,
+ MSEL1CR_16_0, MSEL1CR_16_1,
+ MSEL1CR_15_0, MSEL1CR_15_1,
+ MSEL1CR_14_0, MSEL1CR_14_1,
+ MSEL1CR_13_0, MSEL1CR_13_1,
+ MSEL1CR_12_0, MSEL1CR_12_1,
+ MSEL1CR_9_0, MSEL1CR_9_1,
+ MSEL1CR_7_0, MSEL1CR_7_1,
+ MSEL1CR_6_0, MSEL1CR_6_1,
+ MSEL1CR_5_0, MSEL1CR_5_1,
+ MSEL1CR_4_0, MSEL1CR_4_1,
+ MSEL1CR_3_0, MSEL1CR_3_1,
+ MSEL1CR_2_0, MSEL1CR_2_1,
+ MSEL1CR_0_0, MSEL1CR_0_1,
+
+ MSEL3CR_15_0, MSEL3CR_15_1, /* Trace / Debug ? */
+ MSEL3CR_6_0, MSEL3CR_6_1,
+
+ MSEL4CR_19_0, MSEL4CR_19_1,
+ MSEL4CR_18_0, MSEL4CR_18_1,
+ MSEL4CR_15_0, MSEL4CR_15_1,
+ MSEL4CR_10_0, MSEL4CR_10_1,
+ MSEL4CR_6_0, MSEL4CR_6_1,
+ MSEL4CR_4_0, MSEL4CR_4_1,
+ MSEL4CR_1_0, MSEL4CR_1_1,
+
+ MSEL5CR_31_0, MSEL5CR_31_1, /* irq/fiq output */
+ MSEL5CR_30_0, MSEL5CR_30_1,
+ MSEL5CR_29_0, MSEL5CR_29_1,
+ MSEL5CR_27_0, MSEL5CR_27_1,
+ MSEL5CR_25_0, MSEL5CR_25_1,
+ MSEL5CR_23_0, MSEL5CR_23_1,
+ MSEL5CR_21_0, MSEL5CR_21_1,
+ MSEL5CR_19_0, MSEL5CR_19_1,
+ MSEL5CR_17_0, MSEL5CR_17_1,
+ MSEL5CR_15_0, MSEL5CR_15_1,
+ MSEL5CR_14_0, MSEL5CR_14_1,
+ MSEL5CR_13_0, MSEL5CR_13_1,
+ MSEL5CR_12_0, MSEL5CR_12_1,
+ MSEL5CR_11_0, MSEL5CR_11_1,
+ MSEL5CR_10_0, MSEL5CR_10_1,
+ MSEL5CR_8_0, MSEL5CR_8_1,
+ MSEL5CR_7_0, MSEL5CR_7_1,
+ MSEL5CR_6_0, MSEL5CR_6_1,
+ MSEL5CR_5_0, MSEL5CR_5_1,
+ MSEL5CR_4_0, MSEL5CR_4_1,
+ MSEL5CR_3_0, MSEL5CR_3_1,
+ MSEL5CR_2_0, MSEL5CR_2_1,
+ MSEL5CR_0_0, MSEL5CR_0_1,
+ PINMUX_FUNCTION_END,
+
+ PINMUX_MARK_BEGIN,
+
+ /* IRQ */
+ IRQ0_PORT2_MARK, IRQ0_PORT13_MARK,
+ IRQ1_MARK,
+ IRQ2_PORT11_MARK, IRQ2_PORT12_MARK,
+ IRQ3_PORT10_MARK, IRQ3_PORT14_MARK,
+ IRQ4_PORT15_MARK, IRQ4_PORT172_MARK,
+ IRQ5_PORT0_MARK, IRQ5_PORT1_MARK,
+ IRQ6_PORT121_MARK, IRQ6_PORT173_MARK,
+ IRQ7_PORT120_MARK, IRQ7_PORT209_MARK,
+ IRQ8_MARK,
+ IRQ9_PORT118_MARK, IRQ9_PORT210_MARK,
+ IRQ10_MARK,
+ IRQ11_MARK,
+ IRQ12_PORT42_MARK, IRQ12_PORT97_MARK,
+ IRQ13_PORT64_MARK, IRQ13_PORT98_MARK,
+ IRQ14_PORT63_MARK, IRQ14_PORT99_MARK,
+ IRQ15_PORT62_MARK, IRQ15_PORT100_MARK,
+ IRQ16_PORT68_MARK, IRQ16_PORT211_MARK,
+ IRQ17_MARK,
+ IRQ18_MARK,
+ IRQ19_MARK,
+ IRQ20_MARK,
+ IRQ21_MARK,
+ IRQ22_MARK,
+ IRQ23_MARK,
+ IRQ24_MARK,
+ IRQ25_MARK,
+ IRQ26_PORT58_MARK, IRQ26_PORT81_MARK,
+ IRQ27_PORT57_MARK, IRQ27_PORT168_MARK,
+ IRQ28_PORT56_MARK, IRQ28_PORT169_MARK,
+ IRQ29_PORT50_MARK, IRQ29_PORT170_MARK,
+ IRQ30_PORT49_MARK, IRQ30_PORT171_MARK,
+ IRQ31_PORT41_MARK, IRQ31_PORT167_MARK,
+
+ /* Function */
+
+ /* DBGT */
+ DBGMDT2_MARK, DBGMDT1_MARK, DBGMDT0_MARK,
+ DBGMD10_MARK, DBGMD11_MARK, DBGMD20_MARK,
+ DBGMD21_MARK,
+
+ /* FSI-A */
+ FSIAISLD_PORT0_MARK, /* FSIAISLD Port 0/5 */
+ FSIAISLD_PORT5_MARK,
+ FSIASPDIF_PORT9_MARK, /* FSIASPDIF Port 9/18 */
+ FSIASPDIF_PORT18_MARK,
+ FSIAOSLD1_MARK, FSIAOSLD2_MARK, FSIAOLR_MARK,
+ FSIAOBT_MARK, FSIAOSLD_MARK, FSIAOMC_MARK,
+ FSIACK_MARK, FSIAILR_MARK, FSIAIBT_MARK,
+
+ /* FSI-B */
+ FSIBCK_MARK,
+
+ /* FMSI */
+ FMSISLD_PORT1_MARK, /* FMSISLD Port 1/6 */
+ FMSISLD_PORT6_MARK,
+ FMSIILR_MARK, FMSIIBT_MARK, FMSIOLR_MARK, FMSIOBT_MARK,
+ FMSICK_MARK, FMSOILR_MARK, FMSOIBT_MARK, FMSOOLR_MARK,
+ FMSOOBT_MARK, FMSOSLD_MARK, FMSOCK_MARK,
+
+ /* SCIFA0 */
+ SCIFA0_SCK_MARK, SCIFA0_CTS_MARK, SCIFA0_RTS_MARK,
+ SCIFA0_RXD_MARK, SCIFA0_TXD_MARK,
+
+ /* SCIFA1 */
+ SCIFA1_CTS_MARK, SCIFA1_SCK_MARK, SCIFA1_RXD_MARK,
+ SCIFA1_TXD_MARK, SCIFA1_RTS_MARK,
+
+ /* SCIFA2 */
+ SCIFA2_SCK_PORT22_MARK, /* SCIFA2_SCK Port 22/199 */
+ SCIFA2_SCK_PORT199_MARK,
+ SCIFA2_RXD_MARK, SCIFA2_TXD_MARK,
+ SCIFA2_CTS_MARK, SCIFA2_RTS_MARK,
+
+ /* SCIFA3 */
+ SCIFA3_RTS_PORT105_MARK, /* MSEL5CR_8_0 */
+ SCIFA3_SCK_PORT116_MARK,
+ SCIFA3_CTS_PORT117_MARK,
+ SCIFA3_RXD_PORT174_MARK,
+ SCIFA3_TXD_PORT175_MARK,
+
+ SCIFA3_RTS_PORT161_MARK, /* MSEL5CR_8_1 */
+ SCIFA3_SCK_PORT158_MARK,
+ SCIFA3_CTS_PORT162_MARK,
+ SCIFA3_RXD_PORT159_MARK,
+ SCIFA3_TXD_PORT160_MARK,
+
+ /* SCIFA4 */
+ SCIFA4_RXD_PORT12_MARK, /* MSEL5CR[12:11] = 00 */
+ SCIFA4_TXD_PORT13_MARK,
+
+ SCIFA4_RXD_PORT204_MARK, /* MSEL5CR[12:11] = 01 */
+ SCIFA4_TXD_PORT203_MARK,
+
+ SCIFA4_RXD_PORT94_MARK, /* MSEL5CR[12:11] = 10 */
+ SCIFA4_TXD_PORT93_MARK,
+
+ SCIFA4_SCK_PORT21_MARK, /* SCIFA4_SCK Port 21/205 */
+ SCIFA4_SCK_PORT205_MARK,
+
+ /* SCIFA5 */
+ SCIFA5_TXD_PORT20_MARK, /* MSEL5CR[15:14] = 00 */
+ SCIFA5_RXD_PORT10_MARK,
+
+ SCIFA5_RXD_PORT207_MARK, /* MSEL5CR[15:14] = 01 */
+ SCIFA5_TXD_PORT208_MARK,
+
+ SCIFA5_TXD_PORT91_MARK, /* MSEL5CR[15:14] = 10 */
+ SCIFA5_RXD_PORT92_MARK,
+
+ SCIFA5_SCK_PORT23_MARK, /* SCIFA5_SCK Port 23/206 */
+ SCIFA5_SCK_PORT206_MARK,
+
+ /* SCIFA6 */
+ SCIFA6_SCK_MARK, SCIFA6_RXD_MARK, SCIFA6_TXD_MARK,
+
+ /* SCIFA7 */
+ SCIFA7_TXD_MARK, SCIFA7_RXD_MARK,
+
+ /* SCIFAB */
+ SCIFB_SCK_PORT190_MARK, /* MSEL5CR_17_0 */
+ SCIFB_RXD_PORT191_MARK,
+ SCIFB_TXD_PORT192_MARK,
+ SCIFB_RTS_PORT186_MARK,
+ SCIFB_CTS_PORT187_MARK,
+
+ SCIFB_SCK_PORT2_MARK, /* MSEL5CR_17_1 */
+ SCIFB_RXD_PORT3_MARK,
+ SCIFB_TXD_PORT4_MARK,
+ SCIFB_RTS_PORT172_MARK,
+ SCIFB_CTS_PORT173_MARK,
+
+ /* LCD0 */
+ LCDC0_SELECT_MARK,
+
+ LCD0_D0_MARK, LCD0_D1_MARK, LCD0_D2_MARK, LCD0_D3_MARK,
+ LCD0_D4_MARK, LCD0_D5_MARK, LCD0_D6_MARK, LCD0_D7_MARK,
+ LCD0_D8_MARK, LCD0_D9_MARK, LCD0_D10_MARK, LCD0_D11_MARK,
+ LCD0_D12_MARK, LCD0_D13_MARK, LCD0_D14_MARK, LCD0_D15_MARK,
+ LCD0_D16_MARK, LCD0_D17_MARK,
+ LCD0_DON_MARK, LCD0_VCPWC_MARK, LCD0_VEPWC_MARK,
+ LCD0_DCK_MARK, LCD0_VSYN_MARK, /* for RGB */
+ LCD0_HSYN_MARK, LCD0_DISP_MARK, /* for RGB */
+ LCD0_WR_MARK, LCD0_RD_MARK, /* for SYS */
+ LCD0_CS_MARK, LCD0_RS_MARK, /* for SYS */
+
+ LCD0_D21_PORT158_MARK, LCD0_D23_PORT159_MARK, /* MSEL5CR_6_1 */
+ LCD0_D22_PORT160_MARK, LCD0_D20_PORT161_MARK,
+ LCD0_D19_PORT162_MARK, LCD0_D18_PORT163_MARK,
+ LCD0_LCLK_PORT165_MARK,
+
+ LCD0_D18_PORT40_MARK, LCD0_D22_PORT0_MARK, /* MSEL5CR_6_0 */
+ LCD0_D23_PORT1_MARK, LCD0_D21_PORT2_MARK,
+ LCD0_D20_PORT3_MARK, LCD0_D19_PORT4_MARK,
+ LCD0_LCLK_PORT102_MARK,
+
+ /* LCD1 */
+ LCDC1_SELECT_MARK,
+
+ LCD1_D0_MARK, LCD1_D1_MARK, LCD1_D2_MARK, LCD1_D3_MARK,
+ LCD1_D4_MARK, LCD1_D5_MARK, LCD1_D6_MARK, LCD1_D7_MARK,
+ LCD1_D8_MARK, LCD1_D9_MARK, LCD1_D10_MARK, LCD1_D11_MARK,
+ LCD1_D12_MARK, LCD1_D13_MARK, LCD1_D14_MARK, LCD1_D15_MARK,
+ LCD1_D16_MARK, LCD1_D17_MARK, LCD1_D18_MARK, LCD1_D19_MARK,
+ LCD1_D20_MARK, LCD1_D21_MARK, LCD1_D22_MARK, LCD1_D23_MARK,
+ LCD1_DON_MARK, LCD1_VCPWC_MARK,
+ LCD1_LCLK_MARK, LCD1_VEPWC_MARK,
+
+ LCD1_DCK_MARK, LCD1_VSYN_MARK, /* for RGB */
+ LCD1_HSYN_MARK, LCD1_DISP_MARK, /* for RGB */
+ LCD1_RS_MARK, LCD1_CS_MARK, /* for SYS */
+ LCD1_RD_MARK, LCD1_WR_MARK, /* for SYS */
+
+ /* RSPI */
+ RSPI_SSL0_A_MARK, RSPI_SSL1_A_MARK, RSPI_SSL2_A_MARK,
+ RSPI_SSL3_A_MARK, RSPI_CK_A_MARK, RSPI_MOSI_A_MARK,
+ RSPI_MISO_A_MARK,
+
+ /* VIO CKO */
+ VIO_CKO1_MARK, /* needs fixup */
+ VIO_CKO2_MARK,
+ VIO_CKO_1_MARK,
+ VIO_CKO_MARK,
+
+ /* VIO0 */
+ VIO0_D0_MARK, VIO0_D1_MARK, VIO0_D2_MARK, VIO0_D3_MARK,
+ VIO0_D4_MARK, VIO0_D5_MARK, VIO0_D6_MARK, VIO0_D7_MARK,
+ VIO0_D8_MARK, VIO0_D9_MARK, VIO0_D10_MARK, VIO0_D11_MARK,
+ VIO0_D12_MARK, VIO0_VD_MARK, VIO0_HD_MARK, VIO0_CLK_MARK,
+ VIO0_FIELD_MARK,
+
+ VIO0_D13_PORT26_MARK, /* MSEL5CR_27_0 */
+ VIO0_D14_PORT25_MARK,
+ VIO0_D15_PORT24_MARK,
+
+ VIO0_D13_PORT22_MARK, /* MSEL5CR_27_1 */
+ VIO0_D14_PORT95_MARK,
+ VIO0_D15_PORT96_MARK,
+
+ /* VIO1 */
+ VIO1_D0_MARK, VIO1_D1_MARK, VIO1_D2_MARK, VIO1_D3_MARK,
+ VIO1_D4_MARK, VIO1_D5_MARK, VIO1_D6_MARK, VIO1_D7_MARK,
+ VIO1_VD_MARK, VIO1_HD_MARK, VIO1_CLK_MARK, VIO1_FIELD_MARK,
+
+ /* TPU0 */
+ TPU0TO0_MARK, TPU0TO1_MARK, TPU0TO3_MARK,
+ TPU0TO2_PORT66_MARK, /* TPU0TO2 Port 66/202 */
+ TPU0TO2_PORT202_MARK,
+
+ /* SSP1 0 */
+ STP0_IPD0_MARK, STP0_IPD1_MARK, STP0_IPD2_MARK, STP0_IPD3_MARK,
+ STP0_IPD4_MARK, STP0_IPD5_MARK, STP0_IPD6_MARK, STP0_IPD7_MARK,
+ STP0_IPEN_MARK, STP0_IPCLK_MARK, STP0_IPSYNC_MARK,
+
+ /* SSP1 1 */
+ STP1_IPD1_MARK, STP1_IPD2_MARK, STP1_IPD3_MARK, STP1_IPD4_MARK,
+ STP1_IPD5_MARK, STP1_IPD6_MARK, STP1_IPD7_MARK, STP1_IPCLK_MARK,
+ STP1_IPSYNC_MARK,
+
+ STP1_IPD0_PORT186_MARK, /* MSEL5CR_23_0 */
+ STP1_IPEN_PORT187_MARK,
+
+ STP1_IPD0_PORT194_MARK, /* MSEL5CR_23_1 */
+ STP1_IPEN_PORT193_MARK,
+
+ /* SIM */
+ SIM_RST_MARK, SIM_CLK_MARK,
+ SIM_D_PORT22_MARK, /* SIM_D Port 22/199 */
+ SIM_D_PORT199_MARK,
+
+ /* SDHI0 */
+ SDHI0_D0_MARK, SDHI0_D1_MARK, SDHI0_D2_MARK, SDHI0_D3_MARK,
+ SDHI0_CD_MARK, SDHI0_WP_MARK, SDHI0_CMD_MARK, SDHI0_CLK_MARK,
+
+ /* SDHI1 */
+ SDHI1_D0_MARK, SDHI1_D1_MARK, SDHI1_D2_MARK, SDHI1_D3_MARK,
+ SDHI1_CD_MARK, SDHI1_WP_MARK, SDHI1_CMD_MARK, SDHI1_CLK_MARK,
+
+ /* SDHI2 */
+ SDHI2_D0_MARK, SDHI2_D1_MARK, SDHI2_D2_MARK, SDHI2_D3_MARK,
+ SDHI2_CLK_MARK, SDHI2_CMD_MARK,
+
+ SDHI2_CD_PORT24_MARK, /* MSEL5CR_19_0 */
+ SDHI2_WP_PORT25_MARK,
+
+ SDHI2_WP_PORT177_MARK, /* MSEL5CR_19_1 */
+ SDHI2_CD_PORT202_MARK,
+
+ /* MSIOF2 */
+ MSIOF2_TXD_MARK, MSIOF2_RXD_MARK, MSIOF2_TSCK_MARK,
+ MSIOF2_SS2_MARK, MSIOF2_TSYNC_MARK, MSIOF2_SS1_MARK,
+ MSIOF2_MCK1_MARK, MSIOF2_MCK0_MARK, MSIOF2_RSYNC_MARK,
+ MSIOF2_RSCK_MARK,
+
+ /* KEYSC */
+ KEYIN4_MARK, KEYIN5_MARK, KEYIN6_MARK, KEYIN7_MARK,
+ KEYOUT0_MARK, KEYOUT1_MARK, KEYOUT2_MARK, KEYOUT3_MARK,
+ KEYOUT4_MARK, KEYOUT5_MARK, KEYOUT6_MARK, KEYOUT7_MARK,
+
+ KEYIN0_PORT43_MARK, /* MSEL4CR_18_0 */
+ KEYIN1_PORT44_MARK,
+ KEYIN2_PORT45_MARK,
+ KEYIN3_PORT46_MARK,
+
+ KEYIN0_PORT58_MARK, /* MSEL4CR_18_1 */
+ KEYIN1_PORT57_MARK,
+ KEYIN2_PORT56_MARK,
+ KEYIN3_PORT55_MARK,
+
+ /* VOU */
+ DV_D0_MARK, DV_D1_MARK, DV_D2_MARK, DV_D3_MARK,
+ DV_D4_MARK, DV_D5_MARK, DV_D6_MARK, DV_D7_MARK,
+ DV_D8_MARK, DV_D9_MARK, DV_D10_MARK, DV_D11_MARK,
+ DV_D12_MARK, DV_D13_MARK, DV_D14_MARK, DV_D15_MARK,
+ DV_CLK_MARK, DV_VSYNC_MARK, DV_HSYNC_MARK,
+
+ /* MEMC */
+ MEMC_AD0_MARK, MEMC_AD1_MARK, MEMC_AD2_MARK, MEMC_AD3_MARK,
+ MEMC_AD4_MARK, MEMC_AD5_MARK, MEMC_AD6_MARK, MEMC_AD7_MARK,
+ MEMC_AD8_MARK, MEMC_AD9_MARK, MEMC_AD10_MARK, MEMC_AD11_MARK,
+ MEMC_AD12_MARK, MEMC_AD13_MARK, MEMC_AD14_MARK, MEMC_AD15_MARK,
+ MEMC_CS0_MARK, MEMC_INT_MARK, MEMC_NWE_MARK, MEMC_NOE_MARK,
+
+ MEMC_CS1_MARK, /* MSEL4CR_6_0 */
+ MEMC_ADV_MARK,
+ MEMC_WAIT_MARK,
+ MEMC_BUSCLK_MARK,
+
+ MEMC_A1_MARK, /* MSEL4CR_6_1 */
+ MEMC_DREQ0_MARK,
+ MEMC_DREQ1_MARK,
+ MEMC_A0_MARK,
+
+ /* MMC */
+ MMC0_D0_PORT68_MARK, MMC0_D1_PORT69_MARK, MMC0_D2_PORT70_MARK,
+ MMC0_D3_PORT71_MARK, MMC0_D4_PORT72_MARK, MMC0_D5_PORT73_MARK,
+ MMC0_D6_PORT74_MARK, MMC0_D7_PORT75_MARK, MMC0_CLK_PORT66_MARK,
+ MMC0_CMD_PORT67_MARK, /* MSEL4CR_15_0 */
+
+ MMC1_D0_PORT149_MARK, MMC1_D1_PORT148_MARK, MMC1_D2_PORT147_MARK,
+ MMC1_D3_PORT146_MARK, MMC1_D4_PORT145_MARK, MMC1_D5_PORT144_MARK,
+ MMC1_D6_PORT143_MARK, MMC1_D7_PORT142_MARK, MMC1_CLK_PORT103_MARK,
+ MMC1_CMD_PORT104_MARK, /* MSEL4CR_15_1 */
+
+ /* MSIOF0 */
+ MSIOF0_SS1_MARK, MSIOF0_SS2_MARK, MSIOF0_RXD_MARK,
+ MSIOF0_TXD_MARK, MSIOF0_MCK0_MARK, MSIOF0_MCK1_MARK,
+ MSIOF0_RSYNC_MARK, MSIOF0_RSCK_MARK, MSIOF0_TSCK_MARK,
+ MSIOF0_TSYNC_MARK,
+
+ /* MSIOF1 */
+ MSIOF1_RSCK_MARK, MSIOF1_RSYNC_MARK,
+ MSIOF1_MCK0_MARK, MSIOF1_MCK1_MARK,
+
+ MSIOF1_SS2_PORT116_MARK, MSIOF1_SS1_PORT117_MARK,
+ MSIOF1_RXD_PORT118_MARK, MSIOF1_TXD_PORT119_MARK,
+ MSIOF1_TSYNC_PORT120_MARK,
+ MSIOF1_TSCK_PORT121_MARK, /* MSEL4CR_10_0 */
+
+ MSIOF1_SS1_PORT67_MARK, MSIOF1_TSCK_PORT72_MARK,
+ MSIOF1_TSYNC_PORT73_MARK, MSIOF1_TXD_PORT74_MARK,
+ MSIOF1_RXD_PORT75_MARK,
+ MSIOF1_SS2_PORT202_MARK, /* MSEL4CR_10_1 */
+
+ /* GPIO */
+ GPO0_MARK, GPI0_MARK, GPO1_MARK, GPI1_MARK,
+
+ /* USB0 */
+ USB0_OCI_MARK, USB0_PPON_MARK, VBUS_MARK,
+
+ /* USB1 */
+ USB1_OCI_MARK, USB1_PPON_MARK,
+
+ /* BBIF1 */
+ BBIF1_RXD_MARK, BBIF1_TXD_MARK, BBIF1_TSYNC_MARK,
+ BBIF1_TSCK_MARK, BBIF1_RSCK_MARK, BBIF1_RSYNC_MARK,
+ BBIF1_FLOW_MARK, BBIF1_RX_FLOW_N_MARK,
+
+ /* BBIF2 */
+ BBIF2_TXD2_PORT5_MARK, /* MSEL5CR_0_0 */
+ BBIF2_RXD2_PORT60_MARK,
+ BBIF2_TSYNC2_PORT6_MARK,
+ BBIF2_TSCK2_PORT59_MARK,
+
+ BBIF2_RXD2_PORT90_MARK, /* MSEL5CR_0_1 */
+ BBIF2_TXD2_PORT183_MARK,
+ BBIF2_TSCK2_PORT89_MARK,
+ BBIF2_TSYNC2_PORT184_MARK,
+
+ /* BSC / FLCTL / PCMCIA */
+ CS0_MARK, CS2_MARK, CS4_MARK,
+ CS5B_MARK, CS6A_MARK,
+ CS5A_PORT105_MARK, /* CS5A PORT 19/105 */
+ CS5A_PORT19_MARK,
+ IOIS16_MARK, /* ? */
+
+ A0_MARK, A1_MARK, A2_MARK, A3_MARK,
+ A4_FOE_MARK, /* share with FLCTL */
+ A5_FCDE_MARK, /* share with FLCTL */
+ A6_MARK, A7_MARK, A8_MARK, A9_MARK,
+ A10_MARK, A11_MARK, A12_MARK, A13_MARK,
+ A14_MARK, A15_MARK, A16_MARK, A17_MARK,
+ A18_MARK, A19_MARK, A20_MARK, A21_MARK,
+ A22_MARK, A23_MARK, A24_MARK, A25_MARK,
+ A26_MARK,
+
+ D0_NAF0_MARK, D1_NAF1_MARK, D2_NAF2_MARK, /* share with FLCTL */
+ D3_NAF3_MARK, D4_NAF4_MARK, D5_NAF5_MARK, /* share with FLCTL */
+ D6_NAF6_MARK, D7_NAF7_MARK, D8_NAF8_MARK, /* share with FLCTL */
+ D9_NAF9_MARK, D10_NAF10_MARK, D11_NAF11_MARK, /* share with FLCTL */
+ D12_NAF12_MARK, D13_NAF13_MARK, D14_NAF14_MARK, /* share with FLCTL */
+ D15_NAF15_MARK, /* share with FLCTL */
+ D16_MARK, D17_MARK, D18_MARK, D19_MARK,
+ D20_MARK, D21_MARK, D22_MARK, D23_MARK,
+ D24_MARK, D25_MARK, D26_MARK, D27_MARK,
+ D28_MARK, D29_MARK, D30_MARK, D31_MARK,
+
+ WE0_FWE_MARK, /* share with FLCTL */
+ WE1_MARK,
+ WE2_ICIORD_MARK, /* share with PCMCIA */
+ WE3_ICIOWR_MARK, /* share with PCMCIA */
+ CKO_MARK, BS_MARK, RDWR_MARK,
+ RD_FSC_MARK, /* share with FLCTL */
+ WAIT_PORT177_MARK, /* WAIT Port 90/177 */
+ WAIT_PORT90_MARK,
+
+ FCE0_MARK, FCE1_MARK, FRB_MARK, /* FLCTL */
+
+ /* IRDA */
+ IRDA_FIRSEL_MARK, IRDA_IN_MARK, IRDA_OUT_MARK,
+
+ /* ATAPI */
+ IDE_D0_MARK, IDE_D1_MARK, IDE_D2_MARK, IDE_D3_MARK,
+ IDE_D4_MARK, IDE_D5_MARK, IDE_D6_MARK, IDE_D7_MARK,
+ IDE_D8_MARK, IDE_D9_MARK, IDE_D10_MARK, IDE_D11_MARK,
+ IDE_D12_MARK, IDE_D13_MARK, IDE_D14_MARK, IDE_D15_MARK,
+ IDE_A0_MARK, IDE_A1_MARK, IDE_A2_MARK, IDE_CS0_MARK,
+ IDE_CS1_MARK, IDE_IOWR_MARK, IDE_IORD_MARK, IDE_IORDY_MARK,
+ IDE_INT_MARK, IDE_RST_MARK, IDE_DIRECTION_MARK,
+ IDE_EXBUF_ENB_MARK, IDE_IODACK_MARK, IDE_IODREQ_MARK,
+
+ /* RMII */
+ RMII_CRS_DV_MARK, RMII_RX_ER_MARK, RMII_RXD0_MARK,
+ RMII_RXD1_MARK, RMII_TX_EN_MARK, RMII_TXD0_MARK,
+ RMII_MDC_MARK, RMII_TXD1_MARK, RMII_MDIO_MARK,
+ RMII_REF50CK_MARK, /* for RMII */
+ RMII_REF125CK_MARK, /* for GMII */
+
+ /* GEther */
+ ET_TX_CLK_MARK, ET_TX_EN_MARK, ET_ETXD0_MARK, ET_ETXD1_MARK,
+ ET_ETXD2_MARK, ET_ETXD3_MARK,
+ ET_ETXD4_MARK, ET_ETXD5_MARK, /* for GEther */
+ ET_ETXD6_MARK, ET_ETXD7_MARK, /* for GEther */
+ ET_COL_MARK, ET_TX_ER_MARK, ET_RX_CLK_MARK, ET_RX_DV_MARK,
+ ET_ERXD0_MARK, ET_ERXD1_MARK, ET_ERXD2_MARK, ET_ERXD3_MARK,
+ ET_ERXD4_MARK, ET_ERXD5_MARK, /* for GEther */
+ ET_ERXD6_MARK, ET_ERXD7_MARK, /* for GEther */
+ ET_RX_ER_MARK, ET_CRS_MARK, ET_MDC_MARK, ET_MDIO_MARK,
+ ET_LINK_MARK, ET_PHY_INT_MARK, ET_WOL_MARK, ET_GTX_CLK_MARK,
+
+ /* DMA0 */
+ DREQ0_MARK, DACK0_MARK,
+
+ /* DMA1 */
+ DREQ1_MARK, DACK1_MARK,
+
+ /* SYSC */
+ RESETOUTS_MARK, RESETP_PULLUP_MARK, RESETP_PLAIN_MARK,
+
+ /* IRREM */
+ IROUT_MARK,
+
+ /* SDENC */
+ SDENC_CPG_MARK, SDENC_DV_CLKI_MARK,
+
+ /* HDMI */
+ HDMI_HPD_MARK, HDMI_CEC_MARK,
+
+ /* DEBUG */
+ EDEBGREQ_PULLUP_MARK, /* for JTAG */
+ EDEBGREQ_PULLDOWN_MARK,
+
+ TRACEAUD_FROM_VIO_MARK, /* for TRACE/AUD */
+ TRACEAUD_FROM_LCDC0_MARK,
+ TRACEAUD_FROM_MEMC_MARK,
+
+ PINMUX_MARK_END,
+};
+
+static pinmux_enum_t pinmux_data[] = {
+ /* specify valid pin states for each pin in GPIO mode */
+
+ /* I/O and Pull U/D */
+ PORT_DATA_IO_PD(0), PORT_DATA_IO_PD(1),
+ PORT_DATA_IO_PD(2), PORT_DATA_IO_PD(3),
+ PORT_DATA_IO_PD(4), PORT_DATA_IO_PD(5),
+ PORT_DATA_IO_PD(6), PORT_DATA_IO(7),
+ PORT_DATA_IO(8), PORT_DATA_IO(9),
+
+ PORT_DATA_IO_PD(10), PORT_DATA_IO_PD(11),
+ PORT_DATA_IO_PD(12), PORT_DATA_IO_PU_PD(13),
+ PORT_DATA_IO_PD(14), PORT_DATA_IO_PD(15),
+ PORT_DATA_IO_PD(16), PORT_DATA_IO_PD(17),
+ PORT_DATA_IO(18), PORT_DATA_IO_PU(19),
+
+ PORT_DATA_IO_PU_PD(20), PORT_DATA_IO_PD(21),
+ PORT_DATA_IO_PU_PD(22), PORT_DATA_IO(23),
+ PORT_DATA_IO_PU(24), PORT_DATA_IO_PU(25),
+ PORT_DATA_IO_PU(26), PORT_DATA_IO_PU(27),
+ PORT_DATA_IO_PU(28), PORT_DATA_IO_PU(29),
+
+ PORT_DATA_IO_PU(30), PORT_DATA_IO_PD(31),
+ PORT_DATA_IO_PD(32), PORT_DATA_IO_PD(33),
+ PORT_DATA_IO_PD(34), PORT_DATA_IO_PU(35),
+ PORT_DATA_IO_PU(36), PORT_DATA_IO_PD(37),
+ PORT_DATA_IO_PU(38), PORT_DATA_IO_PD(39),
+
+ PORT_DATA_IO_PU_PD(40), PORT_DATA_IO_PD(41),
+ PORT_DATA_IO_PD(42), PORT_DATA_IO_PU_PD(43),
+ PORT_DATA_IO_PU_PD(44), PORT_DATA_IO_PU_PD(45),
+ PORT_DATA_IO_PU_PD(46), PORT_DATA_IO_PU_PD(47),
+ PORT_DATA_IO_PU_PD(48), PORT_DATA_IO_PU_PD(49),
+
+ PORT_DATA_IO_PU_PD(50), PORT_DATA_IO_PD(51),
+ PORT_DATA_IO_PD(52), PORT_DATA_IO_PD(53),
+ PORT_DATA_IO_PD(54), PORT_DATA_IO_PU_PD(55),
+ PORT_DATA_IO_PU_PD(56), PORT_DATA_IO_PU_PD(57),
+ PORT_DATA_IO_PU_PD(58), PORT_DATA_IO_PU_PD(59),
+
+ PORT_DATA_IO_PU_PD(60), PORT_DATA_IO_PD(61),
+ PORT_DATA_IO_PD(62), PORT_DATA_IO_PD(63),
+ PORT_DATA_IO_PD(64), PORT_DATA_IO_PD(65),
+ PORT_DATA_IO_PU_PD(66), PORT_DATA_IO_PU_PD(67),
+ PORT_DATA_IO_PU_PD(68), PORT_DATA_IO_PU_PD(69),
+
+ PORT_DATA_IO_PU_PD(70), PORT_DATA_IO_PU_PD(71),
+ PORT_DATA_IO_PU_PD(72), PORT_DATA_IO_PU_PD(73),
+ PORT_DATA_IO_PU_PD(74), PORT_DATA_IO_PU_PD(75),
+ PORT_DATA_IO_PU_PD(76), PORT_DATA_IO_PU_PD(77),
+ PORT_DATA_IO_PU_PD(78), PORT_DATA_IO_PU_PD(79),
+
+ PORT_DATA_IO_PU_PD(80), PORT_DATA_IO_PU_PD(81),
+ PORT_DATA_IO(82), PORT_DATA_IO_PU_PD(83),
+ PORT_DATA_IO(84), PORT_DATA_IO_PD(85),
+ PORT_DATA_IO_PD(86), PORT_DATA_IO_PD(87),
+ PORT_DATA_IO_PD(88), PORT_DATA_IO_PD(89),
+
+ PORT_DATA_IO_PD(90), PORT_DATA_IO_PU_PD(91),
+ PORT_DATA_IO_PU_PD(92), PORT_DATA_IO_PU_PD(93),
+ PORT_DATA_IO_PU_PD(94), PORT_DATA_IO_PU_PD(95),
+ PORT_DATA_IO_PU_PD(96), PORT_DATA_IO_PU_PD(97),
+ PORT_DATA_IO_PU_PD(98), PORT_DATA_IO_PU_PD(99),
+
+ PORT_DATA_IO_PU_PD(100), PORT_DATA_IO(101),
+ PORT_DATA_IO_PU(102), PORT_DATA_IO_PU_PD(103),
+ PORT_DATA_IO_PU(104), PORT_DATA_IO_PU(105),
+ PORT_DATA_IO_PU_PD(106), PORT_DATA_IO(107),
+ PORT_DATA_IO(108), PORT_DATA_IO(109),
+
+ PORT_DATA_IO(110), PORT_DATA_IO(111),
+ PORT_DATA_IO(112), PORT_DATA_IO(113),
+ PORT_DATA_IO_PU_PD(114), PORT_DATA_IO(115),
+ PORT_DATA_IO_PD(116), PORT_DATA_IO_PD(117),
+ PORT_DATA_IO_PD(118), PORT_DATA_IO_PD(119),
+
+ PORT_DATA_IO_PD(120), PORT_DATA_IO_PD(121),
+ PORT_DATA_IO_PD(122), PORT_DATA_IO_PD(123),
+ PORT_DATA_IO_PD(124), PORT_DATA_IO(125),
+ PORT_DATA_IO(126), PORT_DATA_IO(127),
+ PORT_DATA_IO(128), PORT_DATA_IO(129),
+
+ PORT_DATA_IO(130), PORT_DATA_IO(131),
+ PORT_DATA_IO(132), PORT_DATA_IO(133),
+ PORT_DATA_IO(134), PORT_DATA_IO(135),
+ PORT_DATA_IO(136), PORT_DATA_IO(137),
+ PORT_DATA_IO(138), PORT_DATA_IO(139),
+
+ PORT_DATA_IO(140), PORT_DATA_IO(141),
+ PORT_DATA_IO_PU(142), PORT_DATA_IO_PU(143),
+ PORT_DATA_IO_PU(144), PORT_DATA_IO_PU(145),
+ PORT_DATA_IO_PU(146), PORT_DATA_IO_PU(147),
+ PORT_DATA_IO_PU(148), PORT_DATA_IO_PU(149),
+
+ PORT_DATA_IO_PU(150), PORT_DATA_IO_PU(151),
+ PORT_DATA_IO_PU(152), PORT_DATA_IO_PU(153),
+ PORT_DATA_IO_PU(154), PORT_DATA_IO_PU(155),
+ PORT_DATA_IO_PU(156), PORT_DATA_IO_PU(157),
+ PORT_DATA_IO_PD(158), PORT_DATA_IO_PD(159),
+
+ PORT_DATA_IO_PU_PD(160), PORT_DATA_IO_PD(161),
+ PORT_DATA_IO_PD(162), PORT_DATA_IO_PD(163),
+ PORT_DATA_IO_PD(164), PORT_DATA_IO_PD(165),
+ PORT_DATA_IO_PU(166), PORT_DATA_IO_PU(167),
+ PORT_DATA_IO_PU(168), PORT_DATA_IO_PU(169),
+
+ PORT_DATA_IO_PU(170), PORT_DATA_IO_PU(171),
+ PORT_DATA_IO_PD(172), PORT_DATA_IO_PD(173),
+ PORT_DATA_IO_PD(174), PORT_DATA_IO_PD(175),
+ PORT_DATA_IO_PU(176), PORT_DATA_IO_PU_PD(177),
+ PORT_DATA_IO_PU(178), PORT_DATA_IO_PD(179),
+
+ PORT_DATA_IO_PD(180), PORT_DATA_IO_PU(181),
+ PORT_DATA_IO_PU(182), PORT_DATA_IO(183),
+ PORT_DATA_IO_PD(184), PORT_DATA_IO_PD(185),
+ PORT_DATA_IO_PD(186), PORT_DATA_IO_PD(187),
+ PORT_DATA_IO_PD(188), PORT_DATA_IO_PD(189),
+
+ PORT_DATA_IO_PD(190), PORT_DATA_IO_PD(191),
+ PORT_DATA_IO_PD(192), PORT_DATA_IO_PU_PD(193),
+ PORT_DATA_IO_PU_PD(194), PORT_DATA_IO_PD(195),
+ PORT_DATA_IO_PU_PD(196), PORT_DATA_IO_PD(197),
+ PORT_DATA_IO_PU_PD(198), PORT_DATA_IO_PU_PD(199),
+
+ PORT_DATA_IO_PU_PD(200), PORT_DATA_IO_PU(201),
+ PORT_DATA_IO_PU_PD(202), PORT_DATA_IO(203),
+ PORT_DATA_IO_PU_PD(204), PORT_DATA_IO_PU_PD(205),
+ PORT_DATA_IO_PU_PD(206), PORT_DATA_IO_PU_PD(207),
+ PORT_DATA_IO_PU_PD(208), PORT_DATA_IO_PD(209),
+
+ PORT_DATA_IO_PD(210), PORT_DATA_IO_PD(211),
+
+ /* Port0 */
+ PINMUX_DATA(DBGMDT2_MARK, PORT0_FN1),
+ PINMUX_DATA(FSIAISLD_PORT0_MARK, PORT0_FN2, MSEL5CR_3_0),
+ PINMUX_DATA(FSIAOSLD1_MARK, PORT0_FN3),
+ PINMUX_DATA(LCD0_D22_PORT0_MARK, PORT0_FN4, MSEL5CR_6_0),
+ PINMUX_DATA(SCIFA7_RXD_MARK, PORT0_FN6),
+ PINMUX_DATA(LCD1_D4_MARK, PORT0_FN7),
+ PINMUX_DATA(IRQ5_PORT0_MARK, PORT0_FN0, MSEL1CR_5_0),
+
+ /* Port1 */
+ PINMUX_DATA(DBGMDT1_MARK, PORT1_FN1),
+ PINMUX_DATA(FMSISLD_PORT1_MARK, PORT1_FN2, MSEL5CR_5_0),
+ PINMUX_DATA(FSIAOSLD2_MARK, PORT1_FN3),
+ PINMUX_DATA(LCD0_D23_PORT1_MARK, PORT1_FN4, MSEL5CR_6_0),
+ PINMUX_DATA(SCIFA7_TXD_MARK, PORT1_FN6),
+ PINMUX_DATA(LCD1_D3_MARK, PORT1_FN7),
+ PINMUX_DATA(IRQ5_PORT1_MARK, PORT1_FN0, MSEL1CR_5_1),
+
+ /* Port2 */
+ PINMUX_DATA(DBGMDT0_MARK, PORT2_FN1),
+ PINMUX_DATA(SCIFB_SCK_PORT2_MARK, PORT2_FN2, MSEL5CR_17_1),
+ PINMUX_DATA(LCD0_D21_PORT2_MARK, PORT2_FN4, MSEL5CR_6_0),
+ PINMUX_DATA(LCD1_D2_MARK, PORT2_FN7),
+ PINMUX_DATA(IRQ0_PORT2_MARK, PORT2_FN0, MSEL1CR_0_1),
+
+ /* Port3 */
+ PINMUX_DATA(DBGMD21_MARK, PORT3_FN1),
+ PINMUX_DATA(SCIFB_RXD_PORT3_MARK, PORT3_FN2, MSEL5CR_17_1),
+ PINMUX_DATA(LCD0_D20_PORT3_MARK, PORT3_FN4, MSEL5CR_6_0),
+ PINMUX_DATA(LCD1_D1_MARK, PORT3_FN7),
+
+ /* Port4 */
+ PINMUX_DATA(DBGMD20_MARK, PORT4_FN1),
+ PINMUX_DATA(SCIFB_TXD_PORT4_MARK, PORT4_FN2, MSEL5CR_17_1),
+ PINMUX_DATA(LCD0_D19_PORT4_MARK, PORT4_FN4, MSEL5CR_6_0),
+ PINMUX_DATA(LCD1_D0_MARK, PORT4_FN7),
+
+ /* Port5 */
+ PINMUX_DATA(DBGMD11_MARK, PORT5_FN1),
+ PINMUX_DATA(BBIF2_TXD2_PORT5_MARK, PORT5_FN2, MSEL5CR_0_0),
+ PINMUX_DATA(FSIAISLD_PORT5_MARK, PORT5_FN4, MSEL5CR_3_1),
+ PINMUX_DATA(RSPI_SSL0_A_MARK, PORT5_FN6),
+ PINMUX_DATA(LCD1_VCPWC_MARK, PORT5_FN7),
+
+ /* Port6 */
+ PINMUX_DATA(DBGMD10_MARK, PORT6_FN1),
+ PINMUX_DATA(BBIF2_TSYNC2_PORT6_MARK, PORT6_FN2, MSEL5CR_0_0),
+ PINMUX_DATA(FMSISLD_PORT6_MARK, PORT6_FN4, MSEL5CR_5_1),
+ PINMUX_DATA(RSPI_SSL1_A_MARK, PORT6_FN6),
+ PINMUX_DATA(LCD1_VEPWC_MARK, PORT6_FN7),
+
+ /* Port7 */
+ PINMUX_DATA(FSIAOLR_MARK, PORT7_FN1),
+
+ /* Port8 */
+ PINMUX_DATA(FSIAOBT_MARK, PORT8_FN1),
+
+ /* Port9 */
+ PINMUX_DATA(FSIAOSLD_MARK, PORT9_FN1),
+ PINMUX_DATA(FSIASPDIF_PORT9_MARK, PORT9_FN2, MSEL5CR_4_0),
+
+ /* Port10 */
+ PINMUX_DATA(FSIAOMC_MARK, PORT10_FN1),
+ PINMUX_DATA(SCIFA5_RXD_PORT10_MARK, PORT10_FN3, MSEL5CR_14_0, MSEL5CR_15_0),
+ PINMUX_DATA(IRQ3_PORT10_MARK, PORT10_FN0, MSEL1CR_3_0),
+
+ /* Port11 */
+ PINMUX_DATA(FSIACK_MARK, PORT11_FN1),
+ PINMUX_DATA(FSIBCK_MARK, PORT11_FN2),
+ PINMUX_DATA(IRQ2_PORT11_MARK, PORT11_FN0, MSEL1CR_2_0),
+
+ /* Port12 */
+ PINMUX_DATA(FSIAILR_MARK, PORT12_FN1),
+ PINMUX_DATA(SCIFA4_RXD_PORT12_MARK, PORT12_FN2, MSEL5CR_12_0, MSEL5CR_11_0),
+ PINMUX_DATA(LCD1_RS_MARK, PORT12_FN6),
+ PINMUX_DATA(LCD1_DISP_MARK, PORT12_FN7),
+ PINMUX_DATA(IRQ2_PORT12_MARK, PORT12_FN0, MSEL1CR_2_1),
+
+ /* Port13 */
+ PINMUX_DATA(FSIAIBT_MARK, PORT13_FN1),
+ PINMUX_DATA(SCIFA4_TXD_PORT13_MARK, PORT13_FN2, MSEL5CR_12_0, MSEL5CR_11_0),
+ PINMUX_DATA(LCD1_RD_MARK, PORT13_FN7),
+ PINMUX_DATA(IRQ0_PORT13_MARK, PORT13_FN0, MSEL1CR_0_0),
+
+ /* Port14 */
+ PINMUX_DATA(FMSOILR_MARK, PORT14_FN1),
+ PINMUX_DATA(FMSIILR_MARK, PORT14_FN2),
+ PINMUX_DATA(VIO_CKO1_MARK, PORT14_FN3),
+ PINMUX_DATA(LCD1_D23_MARK, PORT14_FN7),
+ PINMUX_DATA(IRQ3_PORT14_MARK, PORT14_FN0, MSEL1CR_3_1),
+
+ /* Port15 */
+ PINMUX_DATA(FMSOIBT_MARK, PORT15_FN1),
+ PINMUX_DATA(FMSIIBT_MARK, PORT15_FN2),
+ PINMUX_DATA(VIO_CKO2_MARK, PORT15_FN3),
+ PINMUX_DATA(LCD1_D22_MARK, PORT15_FN7),
+ PINMUX_DATA(IRQ4_PORT15_MARK, PORT15_FN0, MSEL1CR_4_0),
+
+ /* Port16 */
+ PINMUX_DATA(FMSOOLR_MARK, PORT16_FN1),
+ PINMUX_DATA(FMSIOLR_MARK, PORT16_FN2),
+
+ /* Port17 */
+ PINMUX_DATA(FMSOOBT_MARK, PORT17_FN1),
+ PINMUX_DATA(FMSIOBT_MARK, PORT17_FN2),
+
+ /* Port18 */
+ PINMUX_DATA(FMSOSLD_MARK, PORT18_FN1),
+ PINMUX_DATA(FSIASPDIF_PORT18_MARK, PORT18_FN2, MSEL5CR_4_1),
+
+ /* Port19 */
+ PINMUX_DATA(FMSICK_MARK, PORT19_FN1),
+ PINMUX_DATA(CS5A_PORT19_MARK, PORT19_FN7, MSEL5CR_2_1),
+ PINMUX_DATA(IRQ10_MARK, PORT19_FN0),
+
+ /* Port20 */
+ PINMUX_DATA(FMSOCK_MARK, PORT20_FN1),
+ PINMUX_DATA(SCIFA5_TXD_PORT20_MARK, PORT20_FN3, MSEL5CR_15_0, MSEL5CR_14_0),
+ PINMUX_DATA(IRQ1_MARK, PORT20_FN0),
+
+ /* Port21 */
+ PINMUX_DATA(SCIFA1_CTS_MARK, PORT21_FN1),
+ PINMUX_DATA(SCIFA4_SCK_PORT21_MARK, PORT21_FN2, MSEL5CR_10_0),
+ PINMUX_DATA(TPU0TO1_MARK, PORT21_FN4),
+ PINMUX_DATA(VIO1_FIELD_MARK, PORT21_FN5),
+ PINMUX_DATA(STP0_IPD5_MARK, PORT21_FN6),
+ PINMUX_DATA(LCD1_D10_MARK, PORT21_FN7),
+
+ /* Port22 */
+ PINMUX_DATA(SCIFA2_SCK_PORT22_MARK, PORT22_FN1, MSEL5CR_7_0),
+ PINMUX_DATA(SIM_D_PORT22_MARK, PORT22_FN4, MSEL5CR_21_0),
+ PINMUX_DATA(VIO0_D13_PORT22_MARK, PORT22_FN7, MSEL5CR_27_1),
+
+ /* Port23 */
+ PINMUX_DATA(SCIFA1_RTS_MARK, PORT23_FN1),
+ PINMUX_DATA(SCIFA5_SCK_PORT23_MARK, PORT23_FN3, MSEL5CR_13_0),
+ PINMUX_DATA(TPU0TO0_MARK, PORT23_FN4),
+ PINMUX_DATA(VIO_CKO_1_MARK, PORT23_FN5),
+ PINMUX_DATA(STP0_IPD2_MARK, PORT23_FN6),
+ PINMUX_DATA(LCD1_D7_MARK, PORT23_FN7),
+
+ /* Port24 */
+ PINMUX_DATA(VIO0_D15_PORT24_MARK, PORT24_FN1, MSEL5CR_27_0),
+ PINMUX_DATA(VIO1_D7_MARK, PORT24_FN5),
+ PINMUX_DATA(SCIFA6_SCK_MARK, PORT24_FN6),
+ PINMUX_DATA(SDHI2_CD_PORT24_MARK, PORT24_FN7, MSEL5CR_19_0),
+
+ /* Port25 */
+ PINMUX_DATA(VIO0_D14_PORT25_MARK, PORT25_FN1, MSEL5CR_27_0),
+ PINMUX_DATA(VIO1_D6_MARK, PORT25_FN5),
+ PINMUX_DATA(SCIFA6_RXD_MARK, PORT25_FN6),
+ PINMUX_DATA(SDHI2_WP_PORT25_MARK, PORT25_FN7, MSEL5CR_19_0),
+
+ /* Port26 */
+ PINMUX_DATA(VIO0_D13_PORT26_MARK, PORT26_FN1, MSEL5CR_27_0),
+ PINMUX_DATA(VIO1_D5_MARK, PORT26_FN5),
+ PINMUX_DATA(SCIFA6_TXD_MARK, PORT26_FN6),
+
+ /* Port27 - Port39 Function */
+ PINMUX_DATA(VIO0_D7_MARK, PORT27_FN1),
+ PINMUX_DATA(VIO0_D6_MARK, PORT28_FN1),
+ PINMUX_DATA(VIO0_D5_MARK, PORT29_FN1),
+ PINMUX_DATA(VIO0_D4_MARK, PORT30_FN1),
+ PINMUX_DATA(VIO0_D3_MARK, PORT31_FN1),
+ PINMUX_DATA(VIO0_D2_MARK, PORT32_FN1),
+ PINMUX_DATA(VIO0_D1_MARK, PORT33_FN1),
+ PINMUX_DATA(VIO0_D0_MARK, PORT34_FN1),
+ PINMUX_DATA(VIO0_CLK_MARK, PORT35_FN1),
+ PINMUX_DATA(VIO_CKO_MARK, PORT36_FN1),
+ PINMUX_DATA(VIO0_HD_MARK, PORT37_FN1),
+ PINMUX_DATA(VIO0_FIELD_MARK, PORT38_FN1),
+ PINMUX_DATA(VIO0_VD_MARK, PORT39_FN1),
+
+ /* Port38 IRQ */
+ PINMUX_DATA(IRQ25_MARK, PORT38_FN0),
+
+ /* Port40 */
+ PINMUX_DATA(LCD0_D18_PORT40_MARK, PORT40_FN4, MSEL5CR_6_0),
+ PINMUX_DATA(RSPI_CK_A_MARK, PORT40_FN6),
+ PINMUX_DATA(LCD1_LCLK_MARK, PORT40_FN7),
+
+ /* Port41 */
+ PINMUX_DATA(LCD0_D17_MARK, PORT41_FN1),
+ PINMUX_DATA(MSIOF2_SS1_MARK, PORT41_FN2),
+ PINMUX_DATA(IRQ31_PORT41_MARK, PORT41_FN0, MSEL1CR_31_1),
+
+ /* Port42 */
+ PINMUX_DATA(LCD0_D16_MARK, PORT42_FN1),
+ PINMUX_DATA(MSIOF2_MCK1_MARK, PORT42_FN2),
+ PINMUX_DATA(IRQ12_PORT42_MARK, PORT42_FN0, MSEL1CR_12_1),
+
+ /* Port43 */
+ PINMUX_DATA(LCD0_D15_MARK, PORT43_FN1),
+ PINMUX_DATA(MSIOF2_MCK0_MARK, PORT43_FN2),
+ PINMUX_DATA(KEYIN0_PORT43_MARK, PORT43_FN3, MSEL4CR_18_0),
+ PINMUX_DATA(DV_D15_MARK, PORT43_FN6),
+
+ /* Port44 */
+ PINMUX_DATA(LCD0_D14_MARK, PORT44_FN1),
+ PINMUX_DATA(MSIOF2_RSYNC_MARK, PORT44_FN2),
+ PINMUX_DATA(KEYIN1_PORT44_MARK, PORT44_FN3, MSEL4CR_18_0),
+ PINMUX_DATA(DV_D14_MARK, PORT44_FN6),
+
+ /* Port45 */
+ PINMUX_DATA(LCD0_D13_MARK, PORT45_FN1),
+ PINMUX_DATA(MSIOF2_RSCK_MARK, PORT45_FN2),
+ PINMUX_DATA(KEYIN2_PORT45_MARK, PORT45_FN3, MSEL4CR_18_0),
+ PINMUX_DATA(DV_D13_MARK, PORT45_FN6),
+
+ /* Port46 */
+ PINMUX_DATA(LCD0_D12_MARK, PORT46_FN1),
+ PINMUX_DATA(KEYIN3_PORT46_MARK, PORT46_FN3, MSEL4CR_18_0),
+ PINMUX_DATA(DV_D12_MARK, PORT46_FN6),
+
+ /* Port47 */
+ PINMUX_DATA(LCD0_D11_MARK, PORT47_FN1),
+ PINMUX_DATA(KEYIN4_MARK, PORT47_FN3),
+ PINMUX_DATA(DV_D11_MARK, PORT47_FN6),
+
+ /* Port48 */
+ PINMUX_DATA(LCD0_D10_MARK, PORT48_FN1),
+ PINMUX_DATA(KEYIN5_MARK, PORT48_FN3),
+ PINMUX_DATA(DV_D10_MARK, PORT48_FN6),
+
+ /* Port49 */
+ PINMUX_DATA(LCD0_D9_MARK, PORT49_FN1),
+ PINMUX_DATA(KEYIN6_MARK, PORT49_FN3),
+ PINMUX_DATA(DV_D9_MARK, PORT49_FN6),
+ PINMUX_DATA(IRQ30_PORT49_MARK, PORT49_FN0, MSEL1CR_30_1),
+
+ /* Port50 */
+ PINMUX_DATA(LCD0_D8_MARK, PORT50_FN1),
+ PINMUX_DATA(KEYIN7_MARK, PORT50_FN3),
+ PINMUX_DATA(DV_D8_MARK, PORT50_FN6),
+ PINMUX_DATA(IRQ29_PORT50_MARK, PORT50_FN0, MSEL1CR_29_1),
+
+ /* Port51 */
+ PINMUX_DATA(LCD0_D7_MARK, PORT51_FN1),
+ PINMUX_DATA(KEYOUT0_MARK, PORT51_FN3),
+ PINMUX_DATA(DV_D7_MARK, PORT51_FN6),
+
+ /* Port52 */
+ PINMUX_DATA(LCD0_D6_MARK, PORT52_FN1),
+ PINMUX_DATA(KEYOUT1_MARK, PORT52_FN3),
+ PINMUX_DATA(DV_D6_MARK, PORT52_FN6),
+
+ /* Port53 */
+ PINMUX_DATA(LCD0_D5_MARK, PORT53_FN1),
+ PINMUX_DATA(KEYOUT2_MARK, PORT53_FN3),
+ PINMUX_DATA(DV_D5_MARK, PORT53_FN6),
+
+ /* Port54 */
+ PINMUX_DATA(LCD0_D4_MARK, PORT54_FN1),
+ PINMUX_DATA(KEYOUT3_MARK, PORT54_FN3),
+ PINMUX_DATA(DV_D4_MARK, PORT54_FN6),
+
+ /* Port55 */
+ PINMUX_DATA(LCD0_D3_MARK, PORT55_FN1),
+ PINMUX_DATA(KEYOUT4_MARK, PORT55_FN3),
+ PINMUX_DATA(KEYIN3_PORT55_MARK, PORT55_FN4, MSEL4CR_18_1),
+ PINMUX_DATA(DV_D3_MARK, PORT55_FN6),
+
+ /* Port56 */
+ PINMUX_DATA(LCD0_D2_MARK, PORT56_FN1),
+ PINMUX_DATA(KEYOUT5_MARK, PORT56_FN3),
+ PINMUX_DATA(KEYIN2_PORT56_MARK, PORT56_FN4, MSEL4CR_18_1),
+ PINMUX_DATA(DV_D2_MARK, PORT56_FN6),
+ PINMUX_DATA(IRQ28_PORT56_MARK, PORT56_FN0, MSEL1CR_28_1),
+
+ /* Port57 */
+ PINMUX_DATA(LCD0_D1_MARK, PORT57_FN1),
+ PINMUX_DATA(KEYOUT6_MARK, PORT57_FN3),
+ PINMUX_DATA(KEYIN1_PORT57_MARK, PORT57_FN4, MSEL4CR_18_1),
+ PINMUX_DATA(DV_D1_MARK, PORT57_FN6),
+ PINMUX_DATA(IRQ27_PORT57_MARK, PORT57_FN0, MSEL1CR_27_1),
+
+ /* Port58 */
+ PINMUX_DATA(LCD0_D0_MARK, PORT58_FN1),
+ PINMUX_DATA(KEYOUT7_MARK, PORT58_FN3),
+ PINMUX_DATA(KEYIN0_PORT58_MARK, PORT58_FN4, MSEL4CR_18_1),
+ PINMUX_DATA(DV_D0_MARK, PORT58_FN6),
+ PINMUX_DATA(IRQ26_PORT58_MARK, PORT58_FN0, MSEL1CR_26_1),
+
+ /* Port59 */
+ PINMUX_DATA(LCD0_VCPWC_MARK, PORT59_FN1),
+ PINMUX_DATA(BBIF2_TSCK2_PORT59_MARK, PORT59_FN2, MSEL5CR_0_0),
+ PINMUX_DATA(RSPI_MOSI_A_MARK, PORT59_FN6),
+
+ /* Port60 */
+ PINMUX_DATA(LCD0_VEPWC_MARK, PORT60_FN1),
+ PINMUX_DATA(BBIF2_RXD2_PORT60_MARK, PORT60_FN2, MSEL5CR_0_0),
+ PINMUX_DATA(RSPI_MISO_A_MARK, PORT60_FN6),
+
+ /* Port61 */
+ PINMUX_DATA(LCD0_DON_MARK, PORT61_FN1),
+ PINMUX_DATA(MSIOF2_TXD_MARK, PORT61_FN2),
+
+ /* Port62 */
+ PINMUX_DATA(LCD0_DCK_MARK, PORT62_FN1),
+ PINMUX_DATA(LCD0_WR_MARK, PORT62_FN4),
+ PINMUX_DATA(DV_CLK_MARK, PORT62_FN6),
+ PINMUX_DATA(IRQ15_PORT62_MARK, PORT62_FN0, MSEL1CR_15_1),
+
+ /* Port63 */
+ PINMUX_DATA(LCD0_VSYN_MARK, PORT63_FN1),
+ PINMUX_DATA(DV_VSYNC_MARK, PORT63_FN6),
+ PINMUX_DATA(IRQ14_PORT63_MARK, PORT63_FN0, MSEL1CR_14_1),
+
+ /* Port64 */
+ PINMUX_DATA(LCD0_HSYN_MARK, PORT64_FN1),
+ PINMUX_DATA(LCD0_CS_MARK, PORT64_FN4),
+ PINMUX_DATA(DV_HSYNC_MARK, PORT64_FN6),
+ PINMUX_DATA(IRQ13_PORT64_MARK, PORT64_FN0, MSEL1CR_13_1),
+
+ /* Port65 */
+ PINMUX_DATA(LCD0_DISP_MARK, PORT65_FN1),
+ PINMUX_DATA(MSIOF2_TSCK_MARK, PORT65_FN2),
+ PINMUX_DATA(LCD0_RS_MARK, PORT65_FN4),
+
+ /* Port66 */
+ PINMUX_DATA(MEMC_INT_MARK, PORT66_FN1),
+ PINMUX_DATA(TPU0TO2_PORT66_MARK, PORT66_FN3, MSEL5CR_25_0),
+ PINMUX_DATA(MMC0_CLK_PORT66_MARK, PORT66_FN4, MSEL4CR_15_0),
+ PINMUX_DATA(SDHI1_CLK_MARK, PORT66_FN6),
+
+ /* Port67 - Port73 Function1 */
+ PINMUX_DATA(MEMC_CS0_MARK, PORT67_FN1),
+ PINMUX_DATA(MEMC_AD8_MARK, PORT68_FN1),
+ PINMUX_DATA(MEMC_AD9_MARK, PORT69_FN1),
+ PINMUX_DATA(MEMC_AD10_MARK, PORT70_FN1),
+ PINMUX_DATA(MEMC_AD11_MARK, PORT71_FN1),
+ PINMUX_DATA(MEMC_AD12_MARK, PORT72_FN1),
+ PINMUX_DATA(MEMC_AD13_MARK, PORT73_FN1),
+
+ /* Port67 - Port73 Function2 */
+ PINMUX_DATA(MSIOF1_SS1_PORT67_MARK, PORT67_FN2, MSEL4CR_10_1),
+ PINMUX_DATA(MSIOF1_RSCK_MARK, PORT68_FN2),
+ PINMUX_DATA(MSIOF1_RSYNC_MARK, PORT69_FN2),
+ PINMUX_DATA(MSIOF1_MCK0_MARK, PORT70_FN2),
+ PINMUX_DATA(MSIOF1_MCK1_MARK, PORT71_FN2),
+ PINMUX_DATA(MSIOF1_TSCK_PORT72_MARK, PORT72_FN2, MSEL4CR_10_1),
+ PINMUX_DATA(MSIOF1_TSYNC_PORT73_MARK, PORT73_FN2, MSEL4CR_10_1),
+
+ /* Port67 - Port73 Function4 */
+ PINMUX_DATA(MMC0_CMD_PORT67_MARK, PORT67_FN4, MSEL4CR_15_0),
+ PINMUX_DATA(MMC0_D0_PORT68_MARK, PORT68_FN4, MSEL4CR_15_0),
+ PINMUX_DATA(MMC0_D1_PORT69_MARK, PORT69_FN4, MSEL4CR_15_0),
+ PINMUX_DATA(MMC0_D2_PORT70_MARK, PORT70_FN4, MSEL4CR_15_0),
+ PINMUX_DATA(MMC0_D3_PORT71_MARK, PORT71_FN4, MSEL4CR_15_0),
+ PINMUX_DATA(MMC0_D4_PORT72_MARK, PORT72_FN4, MSEL4CR_15_0),
+ PINMUX_DATA(MMC0_D5_PORT73_MARK, PORT73_FN4, MSEL4CR_15_0),
+
+ /* Port67 - Port73 Function6 */
+ PINMUX_DATA(SDHI1_CMD_MARK, PORT67_FN6),
+ PINMUX_DATA(SDHI1_D0_MARK, PORT68_FN6),
+ PINMUX_DATA(SDHI1_D1_MARK, PORT69_FN6),
+ PINMUX_DATA(SDHI1_D2_MARK, PORT70_FN6),
+ PINMUX_DATA(SDHI1_D3_MARK, PORT71_FN6),
+ PINMUX_DATA(SDHI1_CD_MARK, PORT72_FN6),
+ PINMUX_DATA(SDHI1_WP_MARK, PORT73_FN6),
+
+ /* Port67 - Port71 IRQ */
+ PINMUX_DATA(IRQ20_MARK, PORT67_FN0),
+ PINMUX_DATA(IRQ16_PORT68_MARK, PORT68_FN0, MSEL1CR_16_0),
+ PINMUX_DATA(IRQ17_MARK, PORT69_FN0),
+ PINMUX_DATA(IRQ18_MARK, PORT70_FN0),
+ PINMUX_DATA(IRQ19_MARK, PORT71_FN0),
+
+ /* Port74 */
+ PINMUX_DATA(MEMC_AD14_MARK, PORT74_FN1),
+ PINMUX_DATA(MSIOF1_TXD_PORT74_MARK, PORT74_FN2, MSEL4CR_10_1),
+ PINMUX_DATA(MMC0_D6_PORT74_MARK, PORT74_FN4, MSEL4CR_15_0),
+ PINMUX_DATA(STP1_IPD7_MARK, PORT74_FN6),
+ PINMUX_DATA(LCD1_D21_MARK, PORT74_FN7),
+
+ /* Port75 */
+ PINMUX_DATA(MEMC_AD15_MARK, PORT75_FN1),
+ PINMUX_DATA(MSIOF1_RXD_PORT75_MARK, PORT75_FN2, MSEL4CR_10_1),
+ PINMUX_DATA(MMC0_D7_PORT75_MARK, PORT75_FN4, MSEL4CR_15_0),
+ PINMUX_DATA(STP1_IPD6_MARK, PORT75_FN6),
+ PINMUX_DATA(LCD1_D20_MARK, PORT75_FN7),
+
+ /* Port76 - Port80 Function */
+ PINMUX_DATA(SDHI0_CMD_MARK, PORT76_FN1),
+ PINMUX_DATA(SDHI0_D0_MARK, PORT77_FN1),
+ PINMUX_DATA(SDHI0_D1_MARK, PORT78_FN1),
+ PINMUX_DATA(SDHI0_D2_MARK, PORT79_FN1),
+ PINMUX_DATA(SDHI0_D3_MARK, PORT80_FN1),
+
+ /* Port81 */
+ PINMUX_DATA(SDHI0_CD_MARK, PORT81_FN1),
+ PINMUX_DATA(IRQ26_PORT81_MARK, PORT81_FN0, MSEL1CR_26_0),
+
+ /* Port82 - Port88 Function */
+ PINMUX_DATA(SDHI0_CLK_MARK, PORT82_FN1),
+ PINMUX_DATA(SDHI0_WP_MARK, PORT83_FN1),
+ PINMUX_DATA(RESETOUTS_MARK, PORT84_FN1),
+ PINMUX_DATA(USB0_PPON_MARK, PORT85_FN1),
+ PINMUX_DATA(USB0_OCI_MARK, PORT86_FN1),
+ PINMUX_DATA(USB1_PPON_MARK, PORT87_FN1),
+ PINMUX_DATA(USB1_OCI_MARK, PORT88_FN1),
+
+ /* Port89 */
+ PINMUX_DATA(DREQ0_MARK, PORT89_FN1),
+ PINMUX_DATA(BBIF2_TSCK2_PORT89_MARK, PORT89_FN2, MSEL5CR_0_1),
+ PINMUX_DATA(RSPI_SSL3_A_MARK, PORT89_FN6),
+
+ /* Port90 */
+ PINMUX_DATA(DACK0_MARK, PORT90_FN1),
+ PINMUX_DATA(BBIF2_RXD2_PORT90_MARK, PORT90_FN2, MSEL5CR_0_1),
+ PINMUX_DATA(RSPI_SSL2_A_MARK, PORT90_FN6),
+ PINMUX_DATA(WAIT_PORT90_MARK, PORT90_FN7, MSEL5CR_2_1),
+
+ /* Port91 */
+ PINMUX_DATA(MEMC_AD0_MARK, PORT91_FN1),
+ PINMUX_DATA(BBIF1_RXD_MARK, PORT91_FN2),
+ PINMUX_DATA(SCIFA5_TXD_PORT91_MARK, PORT91_FN3, MSEL5CR_15_1, MSEL5CR_14_0),
+ PINMUX_DATA(LCD1_D5_MARK, PORT91_FN7),
+
+ /* Port92 */
+ PINMUX_DATA(MEMC_AD1_MARK, PORT92_FN1),
+ PINMUX_DATA(BBIF1_TSYNC_MARK, PORT92_FN2),
+ PINMUX_DATA(SCIFA5_RXD_PORT92_MARK, PORT92_FN3, MSEL5CR_15_1, MSEL5CR_14_0),
+ PINMUX_DATA(STP0_IPD1_MARK, PORT92_FN6),
+ PINMUX_DATA(LCD1_D6_MARK, PORT92_FN7),
+
+ /* Port93 */
+ PINMUX_DATA(MEMC_AD2_MARK, PORT93_FN1),
+ PINMUX_DATA(BBIF1_TSCK_MARK, PORT93_FN2),
+ PINMUX_DATA(SCIFA4_TXD_PORT93_MARK, PORT93_FN3, MSEL5CR_12_1, MSEL5CR_11_0),
+ PINMUX_DATA(STP0_IPD3_MARK, PORT93_FN6),
+ PINMUX_DATA(LCD1_D8_MARK, PORT93_FN7),
+
+ /* Port94 */
+ PINMUX_DATA(MEMC_AD3_MARK, PORT94_FN1),
+ PINMUX_DATA(BBIF1_TXD_MARK, PORT94_FN2),
+ PINMUX_DATA(SCIFA4_RXD_PORT94_MARK, PORT94_FN3, MSEL5CR_12_1, MSEL5CR_11_0),
+ PINMUX_DATA(STP0_IPD4_MARK, PORT94_FN6),
+ PINMUX_DATA(LCD1_D9_MARK, PORT94_FN7),
+
+ /* Port95 */
+ PINMUX_DATA(MEMC_CS1_MARK, PORT95_FN1, MSEL4CR_6_0),
+ PINMUX_DATA(MEMC_A1_MARK, PORT95_FN1, MSEL4CR_6_1),
+
+ PINMUX_DATA(SCIFA2_CTS_MARK, PORT95_FN2),
+ PINMUX_DATA(SIM_RST_MARK, PORT95_FN4),
+ PINMUX_DATA(VIO0_D14_PORT95_MARK, PORT95_FN7, MSEL5CR_27_1),
+ PINMUX_DATA(IRQ22_MARK, PORT95_FN0),
+
+ /* Port96 */
+ PINMUX_DATA(MEMC_ADV_MARK, PORT96_FN1, MSEL4CR_6_0),
+ PINMUX_DATA(MEMC_DREQ0_MARK, PORT96_FN1, MSEL4CR_6_1),
+
+ PINMUX_DATA(SCIFA2_RTS_MARK, PORT96_FN2),
+ PINMUX_DATA(SIM_CLK_MARK, PORT96_FN4),
+ PINMUX_DATA(VIO0_D15_PORT96_MARK, PORT96_FN7, MSEL5CR_27_1),
+ PINMUX_DATA(IRQ23_MARK, PORT96_FN0),
+
+ /* Port97 */
+ PINMUX_DATA(MEMC_AD4_MARK, PORT97_FN1),
+ PINMUX_DATA(BBIF1_RSCK_MARK, PORT97_FN2),
+ PINMUX_DATA(LCD1_CS_MARK, PORT97_FN6),
+ PINMUX_DATA(LCD1_HSYN_MARK, PORT97_FN7),
+ PINMUX_DATA(IRQ12_PORT97_MARK, PORT97_FN0, MSEL1CR_12_0),
+
+ /* Port98 */
+ PINMUX_DATA(MEMC_AD5_MARK, PORT98_FN1),
+ PINMUX_DATA(BBIF1_RSYNC_MARK, PORT98_FN2),
+ PINMUX_DATA(LCD1_VSYN_MARK, PORT98_FN7),
+ PINMUX_DATA(IRQ13_PORT98_MARK, PORT98_FN0, MSEL1CR_13_0),
+
+ /* Port99 */
+ PINMUX_DATA(MEMC_AD6_MARK, PORT99_FN1),
+ PINMUX_DATA(BBIF1_FLOW_MARK, PORT99_FN2),
+ PINMUX_DATA(LCD1_WR_MARK, PORT99_FN6),
+ PINMUX_DATA(LCD1_DCK_MARK, PORT99_FN7),
+ PINMUX_DATA(IRQ14_PORT99_MARK, PORT99_FN0, MSEL1CR_14_0),
+
+ /* Port100 */
+ PINMUX_DATA(MEMC_AD7_MARK, PORT100_FN1),
+ PINMUX_DATA(BBIF1_RX_FLOW_N_MARK, PORT100_FN2),
+ PINMUX_DATA(LCD1_DON_MARK, PORT100_FN7),
+ PINMUX_DATA(IRQ15_PORT100_MARK, PORT100_FN0, MSEL1CR_15_0),
+
+ /* Port101 */
+ PINMUX_DATA(FCE0_MARK, PORT101_FN1),
+
+ /* Port102 */
+ PINMUX_DATA(FRB_MARK, PORT102_FN1),
+ PINMUX_DATA(LCD0_LCLK_PORT102_MARK, PORT102_FN4, MSEL5CR_6_0),
+
+ /* Port103 */
+ PINMUX_DATA(CS5B_MARK, PORT103_FN1),
+ PINMUX_DATA(FCE1_MARK, PORT103_FN2),
+ PINMUX_DATA(MMC1_CLK_PORT103_MARK, PORT103_FN3, MSEL4CR_15_1),
+
+ /* Port104 */
+ PINMUX_DATA(CS6A_MARK, PORT104_FN1),
+ PINMUX_DATA(MMC1_CMD_PORT104_MARK, PORT104_FN3, MSEL4CR_15_1),
+ PINMUX_DATA(IRQ11_MARK, PORT104_FN0),
+
+ /* Port105 */
+ PINMUX_DATA(CS5A_PORT105_MARK, PORT105_FN1, MSEL5CR_2_0),
+ PINMUX_DATA(SCIFA3_RTS_PORT105_MARK, PORT105_FN4, MSEL5CR_8_0),
+
+ /* Port106 */
+ PINMUX_DATA(IOIS16_MARK, PORT106_FN1),
+ PINMUX_DATA(IDE_EXBUF_ENB_MARK, PORT106_FN6),
+
+ /* Port107 - Port115 Function */
+ PINMUX_DATA(WE3_ICIOWR_MARK, PORT107_FN1),
+ PINMUX_DATA(WE2_ICIORD_MARK, PORT108_FN1),
+ PINMUX_DATA(CS0_MARK, PORT109_FN1),
+ PINMUX_DATA(CS2_MARK, PORT110_FN1),
+ PINMUX_DATA(CS4_MARK, PORT111_FN1),
+ PINMUX_DATA(WE1_MARK, PORT112_FN1),
+ PINMUX_DATA(WE0_FWE_MARK, PORT113_FN1),
+ PINMUX_DATA(RDWR_MARK, PORT114_FN1),
+ PINMUX_DATA(RD_FSC_MARK, PORT115_FN1),
+
+ /* Port116 */
+ PINMUX_DATA(A25_MARK, PORT116_FN1),
+ PINMUX_DATA(MSIOF0_SS2_MARK, PORT116_FN2),
+ PINMUX_DATA(MSIOF1_SS2_PORT116_MARK, PORT116_FN3, MSEL4CR_10_0),
+ PINMUX_DATA(SCIFA3_SCK_PORT116_MARK, PORT116_FN4, MSEL5CR_8_0),
+ PINMUX_DATA(GPO1_MARK, PORT116_FN5),
+
+ /* Port117 */
+ PINMUX_DATA(A24_MARK, PORT117_FN1),
+ PINMUX_DATA(MSIOF0_SS1_MARK, PORT117_FN2),
+ PINMUX_DATA(MSIOF1_SS1_PORT117_MARK, PORT117_FN3, MSEL4CR_10_0),
+ PINMUX_DATA(SCIFA3_CTS_PORT117_MARK, PORT117_FN4, MSEL5CR_8_0),
+ PINMUX_DATA(GPO0_MARK, PORT117_FN5),
+
+ /* Port118 */
+ PINMUX_DATA(A23_MARK, PORT118_FN1),
+ PINMUX_DATA(MSIOF0_MCK1_MARK, PORT118_FN2),
+ PINMUX_DATA(MSIOF1_RXD_PORT118_MARK, PORT118_FN3, MSEL4CR_10_0),
+ PINMUX_DATA(GPI1_MARK, PORT118_FN5),
+ PINMUX_DATA(IRQ9_PORT118_MARK, PORT118_FN0, MSEL1CR_9_0),
+
+ /* Port119 */
+ PINMUX_DATA(A22_MARK, PORT119_FN1),
+ PINMUX_DATA(MSIOF0_MCK0_MARK, PORT119_FN2),
+ PINMUX_DATA(MSIOF1_TXD_PORT119_MARK, PORT119_FN3, MSEL4CR_10_0),
+ PINMUX_DATA(GPI0_MARK, PORT119_FN5),
+ PINMUX_DATA(IRQ8_MARK, PORT119_FN0),
+
+ /* Port120 */
+ PINMUX_DATA(A21_MARK, PORT120_FN1),
+ PINMUX_DATA(MSIOF0_RSYNC_MARK, PORT120_FN2),
+ PINMUX_DATA(MSIOF1_TSYNC_PORT120_MARK, PORT120_FN3, MSEL4CR_10_0),
+ PINMUX_DATA(IRQ7_PORT120_MARK, PORT120_FN0, MSEL1CR_7_1),
+
+ /* Port121 */
+ PINMUX_DATA(A20_MARK, PORT121_FN1),
+ PINMUX_DATA(MSIOF0_RSCK_MARK, PORT121_FN2),
+ PINMUX_DATA(MSIOF1_TSCK_PORT121_MARK, PORT121_FN3, MSEL4CR_10_0),
+ PINMUX_DATA(IRQ6_PORT121_MARK, PORT121_FN0, MSEL1CR_6_0),
+
+ /* Port122 */
+ PINMUX_DATA(A19_MARK, PORT122_FN1),
+ PINMUX_DATA(MSIOF0_RXD_MARK, PORT122_FN2),
+
+ /* Port123 */
+ PINMUX_DATA(A18_MARK, PORT123_FN1),
+ PINMUX_DATA(MSIOF0_TSCK_MARK, PORT123_FN2),
+
+ /* Port124 */
+ PINMUX_DATA(A17_MARK, PORT124_FN1),
+ PINMUX_DATA(MSIOF0_TSYNC_MARK, PORT124_FN2),
+
+ /* Port125 - Port141 Function */
+ PINMUX_DATA(A16_MARK, PORT125_FN1),
+ PINMUX_DATA(A15_MARK, PORT126_FN1),
+ PINMUX_DATA(A14_MARK, PORT127_FN1),
+ PINMUX_DATA(A13_MARK, PORT128_FN1),
+ PINMUX_DATA(A12_MARK, PORT129_FN1),
+ PINMUX_DATA(A11_MARK, PORT130_FN1),
+ PINMUX_DATA(A10_MARK, PORT131_FN1),
+ PINMUX_DATA(A9_MARK, PORT132_FN1),
+ PINMUX_DATA(A8_MARK, PORT133_FN1),
+ PINMUX_DATA(A7_MARK, PORT134_FN1),
+ PINMUX_DATA(A6_MARK, PORT135_FN1),
+ PINMUX_DATA(A5_FCDE_MARK, PORT136_FN1),
+ PINMUX_DATA(A4_FOE_MARK, PORT137_FN1),
+ PINMUX_DATA(A3_MARK, PORT138_FN1),
+ PINMUX_DATA(A2_MARK, PORT139_FN1),
+ PINMUX_DATA(A1_MARK, PORT140_FN1),
+ PINMUX_DATA(CKO_MARK, PORT141_FN1),
+
+ /* Port142 - Port157 Function1 */
+ PINMUX_DATA(D15_NAF15_MARK, PORT142_FN1),
+ PINMUX_DATA(D14_NAF14_MARK, PORT143_FN1),
+ PINMUX_DATA(D13_NAF13_MARK, PORT144_FN1),
+ PINMUX_DATA(D12_NAF12_MARK, PORT145_FN1),
+ PINMUX_DATA(D11_NAF11_MARK, PORT146_FN1),
+ PINMUX_DATA(D10_NAF10_MARK, PORT147_FN1),
+ PINMUX_DATA(D9_NAF9_MARK, PORT148_FN1),
+ PINMUX_DATA(D8_NAF8_MARK, PORT149_FN1),
+ PINMUX_DATA(D7_NAF7_MARK, PORT150_FN1),
+ PINMUX_DATA(D6_NAF6_MARK, PORT151_FN1),
+ PINMUX_DATA(D5_NAF5_MARK, PORT152_FN1),
+ PINMUX_DATA(D4_NAF4_MARK, PORT153_FN1),
+ PINMUX_DATA(D3_NAF3_MARK, PORT154_FN1),
+ PINMUX_DATA(D2_NAF2_MARK, PORT155_FN1),
+ PINMUX_DATA(D1_NAF1_MARK, PORT156_FN1),
+ PINMUX_DATA(D0_NAF0_MARK, PORT157_FN1),
+
+ /* Port142 - Port149 Function3 */
+ PINMUX_DATA(MMC1_D7_PORT142_MARK, PORT142_FN3, MSEL4CR_15_1),
+ PINMUX_DATA(MMC1_D6_PORT143_MARK, PORT143_FN3, MSEL4CR_15_1),
+ PINMUX_DATA(MMC1_D5_PORT144_MARK, PORT144_FN3, MSEL4CR_15_1),
+ PINMUX_DATA(MMC1_D4_PORT145_MARK, PORT145_FN3, MSEL4CR_15_1),
+ PINMUX_DATA(MMC1_D3_PORT146_MARK, PORT146_FN3, MSEL4CR_15_1),
+ PINMUX_DATA(MMC1_D2_PORT147_MARK, PORT147_FN3, MSEL4CR_15_1),
+ PINMUX_DATA(MMC1_D1_PORT148_MARK, PORT148_FN3, MSEL4CR_15_1),
+ PINMUX_DATA(MMC1_D0_PORT149_MARK, PORT149_FN3, MSEL4CR_15_1),
+
+ /* Port158 */
+ PINMUX_DATA(D31_MARK, PORT158_FN1),
+ PINMUX_DATA(SCIFA3_SCK_PORT158_MARK, PORT158_FN2, MSEL5CR_8_1),
+ PINMUX_DATA(RMII_REF125CK_MARK, PORT158_FN3),
+ PINMUX_DATA(LCD0_D21_PORT158_MARK, PORT158_FN4, MSEL5CR_6_1),
+ PINMUX_DATA(IRDA_FIRSEL_MARK, PORT158_FN5),
+ PINMUX_DATA(IDE_D15_MARK, PORT158_FN6),
+
+ /* Port159 */
+ PINMUX_DATA(D30_MARK, PORT159_FN1),
+ PINMUX_DATA(SCIFA3_RXD_PORT159_MARK, PORT159_FN2, MSEL5CR_8_1),
+ PINMUX_DATA(RMII_REF50CK_MARK, PORT159_FN3),
+ PINMUX_DATA(LCD0_D23_PORT159_MARK, PORT159_FN4, MSEL5CR_6_1),
+ PINMUX_DATA(IDE_D14_MARK, PORT159_FN6),
+
+ /* Port160 */
+ PINMUX_DATA(D29_MARK, PORT160_FN1),
+ PINMUX_DATA(SCIFA3_TXD_PORT160_MARK, PORT160_FN2, MSEL5CR_8_1),
+ PINMUX_DATA(LCD0_D22_PORT160_MARK, PORT160_FN4, MSEL5CR_6_1),
+ PINMUX_DATA(VIO1_HD_MARK, PORT160_FN5),
+ PINMUX_DATA(IDE_D13_MARK, PORT160_FN6),
+
+ /* Port161 */
+ PINMUX_DATA(D28_MARK, PORT161_FN1),
+ PINMUX_DATA(SCIFA3_RTS_PORT161_MARK, PORT161_FN2, MSEL5CR_8_1),
+ PINMUX_DATA(ET_RX_DV_MARK, PORT161_FN3),
+ PINMUX_DATA(LCD0_D20_PORT161_MARK, PORT161_FN4, MSEL5CR_6_1),
+ PINMUX_DATA(IRDA_IN_MARK, PORT161_FN5),
+ PINMUX_DATA(IDE_D12_MARK, PORT161_FN6),
+
+ /* Port162 */
+ PINMUX_DATA(D27_MARK, PORT162_FN1),
+ PINMUX_DATA(SCIFA3_CTS_PORT162_MARK, PORT162_FN2, MSEL5CR_8_1),
+ PINMUX_DATA(LCD0_D19_PORT162_MARK, PORT162_FN4, MSEL5CR_6_1),
+ PINMUX_DATA(IRDA_OUT_MARK, PORT162_FN5),
+ PINMUX_DATA(IDE_D11_MARK, PORT162_FN6),
+
+ /* Port163 */
+ PINMUX_DATA(D26_MARK, PORT163_FN1),
+ PINMUX_DATA(MSIOF2_SS2_MARK, PORT163_FN2),
+ PINMUX_DATA(ET_COL_MARK, PORT163_FN3),
+ PINMUX_DATA(LCD0_D18_PORT163_MARK, PORT163_FN4, MSEL5CR_6_1),
+ PINMUX_DATA(IROUT_MARK, PORT163_FN5),
+ PINMUX_DATA(IDE_D10_MARK, PORT163_FN6),
+
+ /* Port164 */
+ PINMUX_DATA(D25_MARK, PORT164_FN1),
+ PINMUX_DATA(MSIOF2_TSYNC_MARK, PORT164_FN2),
+ PINMUX_DATA(ET_PHY_INT_MARK, PORT164_FN3),
+ PINMUX_DATA(LCD0_RD_MARK, PORT164_FN4),
+ PINMUX_DATA(IDE_D9_MARK, PORT164_FN6),
+
+ /* Port165 */
+ PINMUX_DATA(D24_MARK, PORT165_FN1),
+ PINMUX_DATA(MSIOF2_RXD_MARK, PORT165_FN2),
+ PINMUX_DATA(LCD0_LCLK_PORT165_MARK, PORT165_FN4, MSEL5CR_6_1),
+ PINMUX_DATA(IDE_D8_MARK, PORT165_FN6),
+
+ /* Port166 - Port171 Function1 */
+ PINMUX_DATA(D21_MARK, PORT166_FN1),
+ PINMUX_DATA(D20_MARK, PORT167_FN1),
+ PINMUX_DATA(D19_MARK, PORT168_FN1),
+ PINMUX_DATA(D18_MARK, PORT169_FN1),
+ PINMUX_DATA(D17_MARK, PORT170_FN1),
+ PINMUX_DATA(D16_MARK, PORT171_FN1),
+
+ /* Port166 - Port171 Function3 */
+ PINMUX_DATA(ET_ETXD5_MARK, PORT166_FN3),
+ PINMUX_DATA(ET_ETXD4_MARK, PORT167_FN3),
+ PINMUX_DATA(ET_ETXD3_MARK, PORT168_FN3),
+ PINMUX_DATA(ET_ETXD2_MARK, PORT169_FN3),
+ PINMUX_DATA(ET_ETXD1_MARK, PORT170_FN3),
+ PINMUX_DATA(ET_ETXD0_MARK, PORT171_FN3),
+
+ /* Port166 - Port171 Function6 */
+ PINMUX_DATA(IDE_D5_MARK, PORT166_FN6),
+ PINMUX_DATA(IDE_D4_MARK, PORT167_FN6),
+ PINMUX_DATA(IDE_D3_MARK, PORT168_FN6),
+ PINMUX_DATA(IDE_D2_MARK, PORT169_FN6),
+ PINMUX_DATA(IDE_D1_MARK, PORT170_FN6),
+ PINMUX_DATA(IDE_D0_MARK, PORT171_FN6),
+
+ /* Port167 - Port171 IRQ */
+ PINMUX_DATA(IRQ31_PORT167_MARK, PORT167_FN0, MSEL1CR_31_0),
+ PINMUX_DATA(IRQ27_PORT168_MARK, PORT168_FN0, MSEL1CR_27_0),
+ PINMUX_DATA(IRQ28_PORT169_MARK, PORT169_FN0, MSEL1CR_28_0),
+ PINMUX_DATA(IRQ29_PORT170_MARK, PORT170_FN0, MSEL1CR_29_0),
+ PINMUX_DATA(IRQ30_PORT171_MARK, PORT171_FN0, MSEL1CR_30_0),
+
+ /* Port172 */
+ PINMUX_DATA(D23_MARK, PORT172_FN1),
+ PINMUX_DATA(SCIFB_RTS_PORT172_MARK, PORT172_FN2, MSEL5CR_17_1),
+ PINMUX_DATA(ET_ETXD7_MARK, PORT172_FN3),
+ PINMUX_DATA(IDE_D7_MARK, PORT172_FN6),
+ PINMUX_DATA(IRQ4_PORT172_MARK, PORT172_FN0, MSEL1CR_4_1),
+
+ /* Port173 */
+ PINMUX_DATA(D22_MARK, PORT173_FN1),
+ PINMUX_DATA(SCIFB_CTS_PORT173_MARK, PORT173_FN2, MSEL5CR_17_1),
+ PINMUX_DATA(ET_ETXD6_MARK, PORT173_FN3),
+ PINMUX_DATA(IDE_D6_MARK, PORT173_FN6),
+ PINMUX_DATA(IRQ6_PORT173_MARK, PORT173_FN0, MSEL1CR_6_1),
+
+ /* Port174 */
+ PINMUX_DATA(A26_MARK, PORT174_FN1),
+ PINMUX_DATA(MSIOF0_TXD_MARK, PORT174_FN2),
+ PINMUX_DATA(ET_RX_CLK_MARK, PORT174_FN3),
+ PINMUX_DATA(SCIFA3_RXD_PORT174_MARK, PORT174_FN4, MSEL5CR_8_0),
+
+ /* Port175 */
+ PINMUX_DATA(A0_MARK, PORT175_FN1),
+ PINMUX_DATA(BS_MARK, PORT175_FN2),
+ PINMUX_DATA(ET_WOL_MARK, PORT175_FN3),
+ PINMUX_DATA(SCIFA3_TXD_PORT175_MARK, PORT175_FN4, MSEL5CR_8_0),
+
+ /* Port176 */
+ PINMUX_DATA(ET_GTX_CLK_MARK, PORT176_FN3),
+
+ /* Port177 */
+ PINMUX_DATA(WAIT_PORT177_MARK, PORT177_FN1, MSEL5CR_2_0),
+ PINMUX_DATA(ET_LINK_MARK, PORT177_FN3),
+ PINMUX_DATA(IDE_IOWR_MARK, PORT177_FN6),
+ PINMUX_DATA(SDHI2_WP_PORT177_MARK, PORT177_FN7, MSEL5CR_19_1),
+
+ /* Port178 */
+ PINMUX_DATA(VIO0_D12_MARK, PORT178_FN1),
+ PINMUX_DATA(VIO1_D4_MARK, PORT178_FN5),
+ PINMUX_DATA(IDE_IORD_MARK, PORT178_FN6),
+
+ /* Port179 */
+ PINMUX_DATA(VIO0_D11_MARK, PORT179_FN1),
+ PINMUX_DATA(VIO1_D3_MARK, PORT179_FN5),
+ PINMUX_DATA(IDE_IORDY_MARK, PORT179_FN6),
+
+ /* Port180 */
+ PINMUX_DATA(VIO0_D10_MARK, PORT180_FN1),
+ PINMUX_DATA(TPU0TO3_MARK, PORT180_FN4),
+ PINMUX_DATA(VIO1_D2_MARK, PORT180_FN5),
+ PINMUX_DATA(IDE_INT_MARK, PORT180_FN6),
+ PINMUX_DATA(IRQ24_MARK, PORT180_FN0),
+
+ /* Port181 */
+ PINMUX_DATA(VIO0_D9_MARK, PORT181_FN1),
+ PINMUX_DATA(VIO1_D1_MARK, PORT181_FN5),
+ PINMUX_DATA(IDE_RST_MARK, PORT181_FN6),
+
+ /* Port182 */
+ PINMUX_DATA(VIO0_D8_MARK, PORT182_FN1),
+ PINMUX_DATA(VIO1_D0_MARK, PORT182_FN5),
+ PINMUX_DATA(IDE_DIRECTION_MARK, PORT182_FN6),
+
+ /* Port183 */
+ PINMUX_DATA(DREQ1_MARK, PORT183_FN1),
+ PINMUX_DATA(BBIF2_TXD2_PORT183_MARK, PORT183_FN2, MSEL5CR_0_1),
+ PINMUX_DATA(ET_TX_EN_MARK, PORT183_FN3),
+
+ /* Port184 */
+ PINMUX_DATA(DACK1_MARK, PORT184_FN1),
+ PINMUX_DATA(BBIF2_TSYNC2_PORT184_MARK, PORT184_FN2, MSEL5CR_0_1),
+ PINMUX_DATA(ET_TX_CLK_MARK, PORT184_FN3),
+
+ /* Port185 - Port192 Function1 */
+ PINMUX_DATA(SCIFA1_SCK_MARK, PORT185_FN1),
+ PINMUX_DATA(SCIFB_RTS_PORT186_MARK, PORT186_FN1, MSEL5CR_17_0),
+ PINMUX_DATA(SCIFB_CTS_PORT187_MARK, PORT187_FN1, MSEL5CR_17_0),
+ PINMUX_DATA(SCIFA0_SCK_MARK, PORT188_FN1),
+ PINMUX_DATA(SCIFB_SCK_PORT190_MARK, PORT190_FN1, MSEL5CR_17_0),
+ PINMUX_DATA(SCIFB_RXD_PORT191_MARK, PORT191_FN1, MSEL5CR_17_0),
+ PINMUX_DATA(SCIFB_TXD_PORT192_MARK, PORT192_FN1, MSEL5CR_17_0),
+
+ /* Port185 - Port192 Function3 */
+ PINMUX_DATA(ET_ERXD0_MARK, PORT185_FN3),
+ PINMUX_DATA(ET_ERXD1_MARK, PORT186_FN3),
+ PINMUX_DATA(ET_ERXD2_MARK, PORT187_FN3),
+ PINMUX_DATA(ET_ERXD3_MARK, PORT188_FN3),
+ PINMUX_DATA(ET_ERXD4_MARK, PORT189_FN3),
+ PINMUX_DATA(ET_ERXD5_MARK, PORT190_FN3),
+ PINMUX_DATA(ET_ERXD6_MARK, PORT191_FN3),
+ PINMUX_DATA(ET_ERXD7_MARK, PORT192_FN3),
+
+ /* Port185 - Port192 Function6 */
+ PINMUX_DATA(STP1_IPCLK_MARK, PORT185_FN6),
+ PINMUX_DATA(STP1_IPD0_PORT186_MARK, PORT186_FN6, MSEL5CR_23_0),
+ PINMUX_DATA(STP1_IPEN_PORT187_MARK, PORT187_FN6, MSEL5CR_23_0),
+ PINMUX_DATA(STP1_IPSYNC_MARK, PORT188_FN6),
+ PINMUX_DATA(STP0_IPCLK_MARK, PORT189_FN6),
+ PINMUX_DATA(STP0_IPD0_MARK, PORT190_FN6),
+ PINMUX_DATA(STP0_IPEN_MARK, PORT191_FN6),
+ PINMUX_DATA(STP0_IPSYNC_MARK, PORT192_FN6),
+
+ /* Port193 */
+ PINMUX_DATA(SCIFA0_CTS_MARK, PORT193_FN1),
+ PINMUX_DATA(RMII_CRS_DV_MARK, PORT193_FN3),
+ PINMUX_DATA(STP1_IPEN_PORT193_MARK, PORT193_FN6, MSEL5CR_23_1), /* ? */
+ PINMUX_DATA(LCD1_D17_MARK, PORT193_FN7),
+
+ /* Port194 */
+ PINMUX_DATA(SCIFA0_RTS_MARK, PORT194_FN1),
+ PINMUX_DATA(RMII_RX_ER_MARK, PORT194_FN3),
+ PINMUX_DATA(STP1_IPD0_PORT194_MARK, PORT194_FN6, MSEL5CR_23_1), /* ? */
+ PINMUX_DATA(LCD1_D16_MARK, PORT194_FN7),
+
+ /* Port195 */
+ PINMUX_DATA(SCIFA1_RXD_MARK, PORT195_FN1),
+ PINMUX_DATA(RMII_RXD0_MARK, PORT195_FN3),
+ PINMUX_DATA(STP1_IPD3_MARK, PORT195_FN6),
+ PINMUX_DATA(LCD1_D15_MARK, PORT195_FN7),
+
+ /* Port196 */
+ PINMUX_DATA(SCIFA1_TXD_MARK, PORT196_FN1),
+ PINMUX_DATA(RMII_RXD1_MARK, PORT196_FN3),
+ PINMUX_DATA(STP1_IPD2_MARK, PORT196_FN6),
+ PINMUX_DATA(LCD1_D14_MARK, PORT196_FN7),
+
+ /* Port197 */
+ PINMUX_DATA(SCIFA0_RXD_MARK, PORT197_FN1),
+ PINMUX_DATA(VIO1_CLK_MARK, PORT197_FN5),
+ PINMUX_DATA(STP1_IPD5_MARK, PORT197_FN6),
+ PINMUX_DATA(LCD1_D19_MARK, PORT197_FN7),
+
+ /* Port198 */
+ PINMUX_DATA(SCIFA0_TXD_MARK, PORT198_FN1),
+ PINMUX_DATA(VIO1_VD_MARK, PORT198_FN5),
+ PINMUX_DATA(STP1_IPD4_MARK, PORT198_FN6),
+ PINMUX_DATA(LCD1_D18_MARK, PORT198_FN7),
+
+ /* Port199 */
+ PINMUX_DATA(MEMC_NWE_MARK, PORT199_FN1),
+ PINMUX_DATA(SCIFA2_SCK_PORT199_MARK, PORT199_FN2, MSEL5CR_7_1),
+ PINMUX_DATA(RMII_TX_EN_MARK, PORT199_FN3),
+ PINMUX_DATA(SIM_D_PORT199_MARK, PORT199_FN4, MSEL5CR_21_1),
+ PINMUX_DATA(STP1_IPD1_MARK, PORT199_FN6),
+ PINMUX_DATA(LCD1_D13_MARK, PORT199_FN7),
+
+ /* Port200 */
+ PINMUX_DATA(MEMC_NOE_MARK, PORT200_FN1),
+ PINMUX_DATA(SCIFA2_RXD_MARK, PORT200_FN2),
+ PINMUX_DATA(RMII_TXD0_MARK, PORT200_FN3),
+ PINMUX_DATA(STP0_IPD7_MARK, PORT200_FN6),
+ PINMUX_DATA(LCD1_D12_MARK, PORT200_FN7),
+
+ /* Port201 */
+ PINMUX_DATA(MEMC_WAIT_MARK, PORT201_FN1, MSEL4CR_6_0),
+ PINMUX_DATA(MEMC_DREQ1_MARK, PORT201_FN1, MSEL4CR_6_1),
+
+ PINMUX_DATA(SCIFA2_TXD_MARK, PORT201_FN2),
+ PINMUX_DATA(RMII_TXD1_MARK, PORT201_FN3),
+ PINMUX_DATA(STP0_IPD6_MARK, PORT201_FN6),
+ PINMUX_DATA(LCD1_D11_MARK, PORT201_FN7),
+
+ /* Port202 */
+ PINMUX_DATA(MEMC_BUSCLK_MARK, PORT202_FN1, MSEL4CR_6_0),
+ PINMUX_DATA(MEMC_A0_MARK, PORT202_FN1, MSEL4CR_6_1),
+
+ PINMUX_DATA(MSIOF1_SS2_PORT202_MARK, PORT202_FN2, MSEL4CR_10_1),
+ PINMUX_DATA(RMII_MDC_MARK, PORT202_FN3),
+ PINMUX_DATA(TPU0TO2_PORT202_MARK, PORT202_FN4, MSEL5CR_25_1),
+ PINMUX_DATA(IDE_CS0_MARK, PORT202_FN6),
+ PINMUX_DATA(SDHI2_CD_PORT202_MARK, PORT202_FN7, MSEL5CR_19_1),
+ PINMUX_DATA(IRQ21_MARK, PORT202_FN0),
+
+ /* Port203 - Port208 Function1 */
+ PINMUX_DATA(SDHI2_CLK_MARK, PORT203_FN1),
+ PINMUX_DATA(SDHI2_CMD_MARK, PORT204_FN1),
+ PINMUX_DATA(SDHI2_D0_MARK, PORT205_FN1),
+ PINMUX_DATA(SDHI2_D1_MARK, PORT206_FN1),
+ PINMUX_DATA(SDHI2_D2_MARK, PORT207_FN1),
+ PINMUX_DATA(SDHI2_D3_MARK, PORT208_FN1),
+
+ /* Port203 - Port208 Function3 */
+ PINMUX_DATA(ET_TX_ER_MARK, PORT203_FN3),
+ PINMUX_DATA(ET_RX_ER_MARK, PORT204_FN3),
+ PINMUX_DATA(ET_CRS_MARK, PORT205_FN3),
+ PINMUX_DATA(ET_MDC_MARK, PORT206_FN3),
+ PINMUX_DATA(ET_MDIO_MARK, PORT207_FN3),
+ PINMUX_DATA(RMII_MDIO_MARK, PORT208_FN3),
+
+ /* Port203 - Port208 Function6 */
+ PINMUX_DATA(IDE_A2_MARK, PORT203_FN6),
+ PINMUX_DATA(IDE_A1_MARK, PORT204_FN6),
+ PINMUX_DATA(IDE_A0_MARK, PORT205_FN6),
+ PINMUX_DATA(IDE_IODACK_MARK, PORT206_FN6),
+ PINMUX_DATA(IDE_IODREQ_MARK, PORT207_FN6),
+ PINMUX_DATA(IDE_CS1_MARK, PORT208_FN6),
+
+ /* Port203 - Port208 Function7 */
+ PINMUX_DATA(SCIFA4_TXD_PORT203_MARK, PORT203_FN7, MSEL5CR_12_0, MSEL5CR_11_1),
+ PINMUX_DATA(SCIFA4_RXD_PORT204_MARK, PORT204_FN7, MSEL5CR_12_0, MSEL5CR_11_1),
+ PINMUX_DATA(SCIFA4_SCK_PORT205_MARK, PORT205_FN7, MSEL5CR_10_1),
+ PINMUX_DATA(SCIFA5_SCK_PORT206_MARK, PORT206_FN7, MSEL5CR_13_1),
+ PINMUX_DATA(SCIFA5_RXD_PORT207_MARK, PORT207_FN7, MSEL5CR_15_0, MSEL5CR_14_1),
+ PINMUX_DATA(SCIFA5_TXD_PORT208_MARK, PORT208_FN7, MSEL5CR_15_0, MSEL5CR_14_1),
+
+ /* Port209 */
+ PINMUX_DATA(VBUS_MARK, PORT209_FN1),
+ PINMUX_DATA(IRQ7_PORT209_MARK, PORT209_FN0, MSEL1CR_7_0),
+
+ /* Port210 */
+ PINMUX_DATA(IRQ9_PORT210_MARK, PORT210_FN0, MSEL1CR_9_1),
+ PINMUX_DATA(HDMI_HPD_MARK, PORT210_FN1),
+
+ /* Port211 */
+ PINMUX_DATA(IRQ16_PORT211_MARK, PORT211_FN0, MSEL1CR_16_1),
+ PINMUX_DATA(HDMI_CEC_MARK, PORT211_FN1),
+
+ /* LCDC select */
+ PINMUX_DATA(LCDC0_SELECT_MARK, MSEL3CR_6_0),
+ PINMUX_DATA(LCDC1_SELECT_MARK, MSEL3CR_6_1),
+
+ /* SDENC */
+ PINMUX_DATA(SDENC_CPG_MARK, MSEL4CR_19_0),
+ PINMUX_DATA(SDENC_DV_CLKI_MARK, MSEL4CR_19_1),
+
+ /* SYSC */
+ PINMUX_DATA(RESETP_PULLUP_MARK, MSEL4CR_4_0),
+ PINMUX_DATA(RESETP_PLAIN_MARK, MSEL4CR_4_1),
+
+ /* DEBUG */
+ PINMUX_DATA(EDEBGREQ_PULLDOWN_MARK, MSEL4CR_1_0),
+ PINMUX_DATA(EDEBGREQ_PULLUP_MARK, MSEL4CR_1_1),
+
+ PINMUX_DATA(TRACEAUD_FROM_VIO_MARK, MSEL5CR_30_0, MSEL5CR_29_0),
+ PINMUX_DATA(TRACEAUD_FROM_LCDC0_MARK, MSEL5CR_30_0, MSEL5CR_29_1),
+ PINMUX_DATA(TRACEAUD_FROM_MEMC_MARK, MSEL5CR_30_1, MSEL5CR_29_0),
+};
+
+static struct pinmux_gpio pinmux_gpios[] = {
+
+ /* PORT */
+ GPIO_PORT_ALL(),
+
+ /* IRQ */
+ GPIO_FN(IRQ0_PORT2), GPIO_FN(IRQ0_PORT13),
+ GPIO_FN(IRQ1),
+ GPIO_FN(IRQ2_PORT11), GPIO_FN(IRQ2_PORT12),
+ GPIO_FN(IRQ3_PORT10), GPIO_FN(IRQ3_PORT14),
+ GPIO_FN(IRQ4_PORT15), GPIO_FN(IRQ4_PORT172),
+ GPIO_FN(IRQ5_PORT0), GPIO_FN(IRQ5_PORT1),
+ GPIO_FN(IRQ6_PORT121), GPIO_FN(IRQ6_PORT173),
+ GPIO_FN(IRQ7_PORT120), GPIO_FN(IRQ7_PORT209),
+ GPIO_FN(IRQ8),
+ GPIO_FN(IRQ9_PORT118), GPIO_FN(IRQ9_PORT210),
+ GPIO_FN(IRQ10),
+ GPIO_FN(IRQ11),
+ GPIO_FN(IRQ12_PORT42), GPIO_FN(IRQ12_PORT97),
+ GPIO_FN(IRQ13_PORT64), GPIO_FN(IRQ13_PORT98),
+ GPIO_FN(IRQ14_PORT63), GPIO_FN(IRQ14_PORT99),
+ GPIO_FN(IRQ15_PORT62), GPIO_FN(IRQ15_PORT100),
+ GPIO_FN(IRQ16_PORT68), GPIO_FN(IRQ16_PORT211),
+ GPIO_FN(IRQ17),
+ GPIO_FN(IRQ18),
+ GPIO_FN(IRQ19),
+ GPIO_FN(IRQ20),
+ GPIO_FN(IRQ21),
+ GPIO_FN(IRQ22),
+ GPIO_FN(IRQ23),
+ GPIO_FN(IRQ24),
+ GPIO_FN(IRQ25),
+ GPIO_FN(IRQ26_PORT58), GPIO_FN(IRQ26_PORT81),
+ GPIO_FN(IRQ27_PORT57), GPIO_FN(IRQ27_PORT168),
+ GPIO_FN(IRQ28_PORT56), GPIO_FN(IRQ28_PORT169),
+ GPIO_FN(IRQ29_PORT50), GPIO_FN(IRQ29_PORT170),
+ GPIO_FN(IRQ30_PORT49), GPIO_FN(IRQ30_PORT171),
+ GPIO_FN(IRQ31_PORT41), GPIO_FN(IRQ31_PORT167),
+
+ /* Function */
+
+ /* DBGT */
+ GPIO_FN(DBGMDT2), GPIO_FN(DBGMDT1), GPIO_FN(DBGMDT0),
+ GPIO_FN(DBGMD10), GPIO_FN(DBGMD11), GPIO_FN(DBGMD20),
+ GPIO_FN(DBGMD21),
+
+ /* FSI-A */
+ GPIO_FN(FSIAISLD_PORT0), /* FSIAISLD Port 0/5 */
+ GPIO_FN(FSIAISLD_PORT5),
+ GPIO_FN(FSIASPDIF_PORT9), /* FSIASPDIF Port 9/18 */
+ GPIO_FN(FSIASPDIF_PORT18),
+ GPIO_FN(FSIAOSLD1), GPIO_FN(FSIAOSLD2), GPIO_FN(FSIAOLR),
+ GPIO_FN(FSIAOBT), GPIO_FN(FSIAOSLD), GPIO_FN(FSIAOMC),
+ GPIO_FN(FSIACK), GPIO_FN(FSIAILR), GPIO_FN(FSIAIBT),
+
+ /* FSI-B */
+ GPIO_FN(FSIBCK),
+
+ /* FMSI */
+ GPIO_FN(FMSISLD_PORT1), /* FMSISLD Port 1/6 */
+ GPIO_FN(FMSISLD_PORT6),
+ GPIO_FN(FMSIILR), GPIO_FN(FMSIIBT), GPIO_FN(FMSIOLR),
+ GPIO_FN(FMSIOBT), GPIO_FN(FMSICK), GPIO_FN(FMSOILR),
+ GPIO_FN(FMSOIBT), GPIO_FN(FMSOOLR), GPIO_FN(FMSOOBT),
+ GPIO_FN(FMSOSLD), GPIO_FN(FMSOCK),
+
+ /* SCIFA0 */
+ GPIO_FN(SCIFA0_SCK), GPIO_FN(SCIFA0_CTS), GPIO_FN(SCIFA0_RTS),
+ GPIO_FN(SCIFA0_RXD), GPIO_FN(SCIFA0_TXD),
+
+ /* SCIFA1 */
+ GPIO_FN(SCIFA1_CTS), GPIO_FN(SCIFA1_SCK),
+ GPIO_FN(SCIFA1_RXD), GPIO_FN(SCIFA1_TXD), GPIO_FN(SCIFA1_RTS),
+
+ /* SCIFA2 */
+ GPIO_FN(SCIFA2_SCK_PORT22), /* SCIFA2_SCK Port 22/199 */
+ GPIO_FN(SCIFA2_SCK_PORT199),
+ GPIO_FN(SCIFA2_RXD), GPIO_FN(SCIFA2_TXD),
+ GPIO_FN(SCIFA2_CTS), GPIO_FN(SCIFA2_RTS),
+
+ /* SCIFA3 */
+ GPIO_FN(SCIFA3_RTS_PORT105), /* MSEL5CR_8_0 */
+ GPIO_FN(SCIFA3_SCK_PORT116),
+ GPIO_FN(SCIFA3_CTS_PORT117),
+ GPIO_FN(SCIFA3_RXD_PORT174),
+ GPIO_FN(SCIFA3_TXD_PORT175),
+
+ GPIO_FN(SCIFA3_RTS_PORT161), /* MSEL5CR_8_1 */
+ GPIO_FN(SCIFA3_SCK_PORT158),
+ GPIO_FN(SCIFA3_CTS_PORT162),
+ GPIO_FN(SCIFA3_RXD_PORT159),
+ GPIO_FN(SCIFA3_TXD_PORT160),
+
+ /* SCIFA4 */
+ GPIO_FN(SCIFA4_RXD_PORT12), /* MSEL5CR[12:11] = 00 */
+ GPIO_FN(SCIFA4_TXD_PORT13),
+
+ GPIO_FN(SCIFA4_RXD_PORT204), /* MSEL5CR[12:11] = 01 */
+ GPIO_FN(SCIFA4_TXD_PORT203),
+
+ GPIO_FN(SCIFA4_RXD_PORT94), /* MSEL5CR[12:11] = 10 */
+ GPIO_FN(SCIFA4_TXD_PORT93),
+
+ GPIO_FN(SCIFA4_SCK_PORT21), /* SCIFA4_SCK Port 21/205 */
+ GPIO_FN(SCIFA4_SCK_PORT205),
+
+ /* SCIFA5 */
+ GPIO_FN(SCIFA5_TXD_PORT20), /* MSEL5CR[15:14] = 00 */
+ GPIO_FN(SCIFA5_RXD_PORT10),
+
+ GPIO_FN(SCIFA5_RXD_PORT207), /* MSEL5CR[15:14] = 01 */
+ GPIO_FN(SCIFA5_TXD_PORT208),
+
+ GPIO_FN(SCIFA5_TXD_PORT91), /* MSEL5CR[15:14] = 10 */
+ GPIO_FN(SCIFA5_RXD_PORT92),
+
+ GPIO_FN(SCIFA5_SCK_PORT23), /* SCIFA5_SCK Port 23/206 */
+ GPIO_FN(SCIFA5_SCK_PORT206),
+
+ /* SCIFA6 */
+ GPIO_FN(SCIFA6_SCK), GPIO_FN(SCIFA6_RXD), GPIO_FN(SCIFA6_TXD),
+
+ /* SCIFA7 */
+ GPIO_FN(SCIFA7_TXD), GPIO_FN(SCIFA7_RXD),
+
+ /* SCIFAB */
+ GPIO_FN(SCIFB_SCK_PORT190), /* MSEL5CR_17_0 */
+ GPIO_FN(SCIFB_RXD_PORT191),
+ GPIO_FN(SCIFB_TXD_PORT192),
+ GPIO_FN(SCIFB_RTS_PORT186),
+ GPIO_FN(SCIFB_CTS_PORT187),
+
+ GPIO_FN(SCIFB_SCK_PORT2), /* MSEL5CR_17_1 */
+ GPIO_FN(SCIFB_RXD_PORT3),
+ GPIO_FN(SCIFB_TXD_PORT4),
+ GPIO_FN(SCIFB_RTS_PORT172),
+ GPIO_FN(SCIFB_CTS_PORT173),
+
+ /* LCD0 */
+ GPIO_FN(LCD0_D0), GPIO_FN(LCD0_D1), GPIO_FN(LCD0_D2),
+ GPIO_FN(LCD0_D3), GPIO_FN(LCD0_D4), GPIO_FN(LCD0_D5),
+ GPIO_FN(LCD0_D6), GPIO_FN(LCD0_D7), GPIO_FN(LCD0_D8),
+ GPIO_FN(LCD0_D9), GPIO_FN(LCD0_D10), GPIO_FN(LCD0_D11),
+ GPIO_FN(LCD0_D12), GPIO_FN(LCD0_D13), GPIO_FN(LCD0_D14),
+ GPIO_FN(LCD0_D15), GPIO_FN(LCD0_D16), GPIO_FN(LCD0_D17),
+ GPIO_FN(LCD0_DON), GPIO_FN(LCD0_VCPWC), GPIO_FN(LCD0_VEPWC),
+ GPIO_FN(LCD0_DCK), GPIO_FN(LCD0_VSYN),
+ GPIO_FN(LCD0_HSYN), GPIO_FN(LCD0_DISP),
+ GPIO_FN(LCD0_WR), GPIO_FN(LCD0_RD),
+ GPIO_FN(LCD0_CS), GPIO_FN(LCD0_RS),
+
+ GPIO_FN(LCD0_D18_PORT163), GPIO_FN(LCD0_D19_PORT162),
+ GPIO_FN(LCD0_D20_PORT161), GPIO_FN(LCD0_D21_PORT158),
+ GPIO_FN(LCD0_D22_PORT160), GPIO_FN(LCD0_D23_PORT159),
+ GPIO_FN(LCD0_LCLK_PORT165), /* MSEL5CR_6_1 */
+
+ GPIO_FN(LCD0_D18_PORT40), GPIO_FN(LCD0_D19_PORT4),
+ GPIO_FN(LCD0_D20_PORT3), GPIO_FN(LCD0_D21_PORT2),
+ GPIO_FN(LCD0_D22_PORT0), GPIO_FN(LCD0_D23_PORT1),
+ GPIO_FN(LCD0_LCLK_PORT102), /* MSEL5CR_6_0 */
+
+ /* LCD1 */
+ GPIO_FN(LCD1_D0), GPIO_FN(LCD1_D1), GPIO_FN(LCD1_D2),
+ GPIO_FN(LCD1_D3), GPIO_FN(LCD1_D4), GPIO_FN(LCD1_D5),
+ GPIO_FN(LCD1_D6), GPIO_FN(LCD1_D7), GPIO_FN(LCD1_D8),
+ GPIO_FN(LCD1_D9), GPIO_FN(LCD1_D10), GPIO_FN(LCD1_D11),
+ GPIO_FN(LCD1_D12), GPIO_FN(LCD1_D13), GPIO_FN(LCD1_D14),
+ GPIO_FN(LCD1_D15), GPIO_FN(LCD1_D16), GPIO_FN(LCD1_D17),
+ GPIO_FN(LCD1_D18), GPIO_FN(LCD1_D19), GPIO_FN(LCD1_D20),
+ GPIO_FN(LCD1_D21), GPIO_FN(LCD1_D22), GPIO_FN(LCD1_D23),
+ GPIO_FN(LCD1_RS), GPIO_FN(LCD1_RD), GPIO_FN(LCD1_CS),
+ GPIO_FN(LCD1_WR), GPIO_FN(LCD1_DCK), GPIO_FN(LCD1_DON),
+ GPIO_FN(LCD1_VCPWC), GPIO_FN(LCD1_LCLK), GPIO_FN(LCD1_HSYN),
+ GPIO_FN(LCD1_VSYN), GPIO_FN(LCD1_VEPWC), GPIO_FN(LCD1_DISP),
+
+ /* RSPI */
+ GPIO_FN(RSPI_SSL0_A), GPIO_FN(RSPI_SSL1_A), GPIO_FN(RSPI_SSL2_A),
+ GPIO_FN(RSPI_SSL3_A), GPIO_FN(RSPI_CK_A), GPIO_FN(RSPI_MOSI_A),
+ GPIO_FN(RSPI_MISO_A),
+
+ /* VIO CKO */
+ GPIO_FN(VIO_CKO1),
+ GPIO_FN(VIO_CKO2),
+ GPIO_FN(VIO_CKO_1),
+ GPIO_FN(VIO_CKO),
+
+ /* VIO0 */
+ GPIO_FN(VIO0_D0), GPIO_FN(VIO0_D1), GPIO_FN(VIO0_D2),
+ GPIO_FN(VIO0_D3), GPIO_FN(VIO0_D4), GPIO_FN(VIO0_D5),
+ GPIO_FN(VIO0_D6), GPIO_FN(VIO0_D7), GPIO_FN(VIO0_D8),
+ GPIO_FN(VIO0_D9), GPIO_FN(VIO0_D10), GPIO_FN(VIO0_D11),
+ GPIO_FN(VIO0_D12), GPIO_FN(VIO0_VD), GPIO_FN(VIO0_HD),
+ GPIO_FN(VIO0_CLK), GPIO_FN(VIO0_FIELD),
+
+ GPIO_FN(VIO0_D13_PORT26), /* MSEL5CR_27_0 */
+ GPIO_FN(VIO0_D14_PORT25),
+ GPIO_FN(VIO0_D15_PORT24),
+
+ GPIO_FN(VIO0_D13_PORT22), /* MSEL5CR_27_1 */
+ GPIO_FN(VIO0_D14_PORT95),
+ GPIO_FN(VIO0_D15_PORT96),
+
+ /* VIO1 */
+ GPIO_FN(VIO1_D0), GPIO_FN(VIO1_D1), GPIO_FN(VIO1_D2),
+ GPIO_FN(VIO1_D3), GPIO_FN(VIO1_D4), GPIO_FN(VIO1_D5),
+ GPIO_FN(VIO1_D6), GPIO_FN(VIO1_D7), GPIO_FN(VIO1_VD),
+ GPIO_FN(VIO1_HD), GPIO_FN(VIO1_CLK), GPIO_FN(VIO1_FIELD),
+
+ /* TPU0 */
+ GPIO_FN(TPU0TO0), GPIO_FN(TPU0TO1), GPIO_FN(TPU0TO3),
+ GPIO_FN(TPU0TO2_PORT66), /* TPU0TO2 Port 66/202 */
+ GPIO_FN(TPU0TO2_PORT202),
+
+ /* SSP1 0 */
+ GPIO_FN(STP0_IPD0), GPIO_FN(STP0_IPD1), GPIO_FN(STP0_IPD2),
+ GPIO_FN(STP0_IPD3), GPIO_FN(STP0_IPD4), GPIO_FN(STP0_IPD5),
+ GPIO_FN(STP0_IPD6), GPIO_FN(STP0_IPD7), GPIO_FN(STP0_IPEN),
+ GPIO_FN(STP0_IPCLK), GPIO_FN(STP0_IPSYNC),
+
+ /* SSP1 1 */
+ GPIO_FN(STP1_IPD1), GPIO_FN(STP1_IPD2), GPIO_FN(STP1_IPD3),
+ GPIO_FN(STP1_IPD4), GPIO_FN(STP1_IPD5), GPIO_FN(STP1_IPD6),
+ GPIO_FN(STP1_IPD7), GPIO_FN(STP1_IPCLK), GPIO_FN(STP1_IPSYNC),
+
+ GPIO_FN(STP1_IPD0_PORT186), /* MSEL5CR_23_0 */
+ GPIO_FN(STP1_IPEN_PORT187),
+
+ GPIO_FN(STP1_IPD0_PORT194), /* MSEL5CR_23_1 */
+ GPIO_FN(STP1_IPEN_PORT193),
+
+ /* SIM */
+ GPIO_FN(SIM_RST), GPIO_FN(SIM_CLK),
+ GPIO_FN(SIM_D_PORT22), /* SIM_D Port 22/199 */
+ GPIO_FN(SIM_D_PORT199),
+
+ /* SDHI0 */
+ GPIO_FN(SDHI0_D0), GPIO_FN(SDHI0_D1), GPIO_FN(SDHI0_D2),
+ GPIO_FN(SDHI0_D3), GPIO_FN(SDHI0_CD), GPIO_FN(SDHI0_WP),
+ GPIO_FN(SDHI0_CMD), GPIO_FN(SDHI0_CLK),
+
+ /* SDHI1 */
+ GPIO_FN(SDHI1_D0), GPIO_FN(SDHI1_D1), GPIO_FN(SDHI1_D2),
+ GPIO_FN(SDHI1_D3), GPIO_FN(SDHI1_CD), GPIO_FN(SDHI1_WP),
+ GPIO_FN(SDHI1_CMD), GPIO_FN(SDHI1_CLK),
+
+ /* SDHI2 */
+ GPIO_FN(SDHI2_D0), GPIO_FN(SDHI2_D1), GPIO_FN(SDHI2_D2),
+ GPIO_FN(SDHI2_D3), GPIO_FN(SDHI2_CLK), GPIO_FN(SDHI2_CMD),
+
+ GPIO_FN(SDHI2_CD_PORT24), /* MSEL5CR_19_0 */
+ GPIO_FN(SDHI2_WP_PORT25),
+
+ GPIO_FN(SDHI2_WP_PORT177), /* MSEL5CR_19_1 */
+ GPIO_FN(SDHI2_CD_PORT202),
+
+ /* MSIOF2 */
+ GPIO_FN(MSIOF2_TXD), GPIO_FN(MSIOF2_RXD), GPIO_FN(MSIOF2_TSCK),
+ GPIO_FN(MSIOF2_SS2), GPIO_FN(MSIOF2_TSYNC), GPIO_FN(MSIOF2_SS1),
+ GPIO_FN(MSIOF2_MCK1), GPIO_FN(MSIOF2_MCK0), GPIO_FN(MSIOF2_RSYNC),
+ GPIO_FN(MSIOF2_RSCK),
+
+ /* KEYSC */
+ GPIO_FN(KEYIN4), GPIO_FN(KEYIN5),
+ GPIO_FN(KEYIN6), GPIO_FN(KEYIN7),
+ GPIO_FN(KEYOUT0), GPIO_FN(KEYOUT1), GPIO_FN(KEYOUT2),
+ GPIO_FN(KEYOUT3), GPIO_FN(KEYOUT4), GPIO_FN(KEYOUT5),
+ GPIO_FN(KEYOUT6), GPIO_FN(KEYOUT7),
+
+ GPIO_FN(KEYIN0_PORT43), /* MSEL4CR_18_0 */
+ GPIO_FN(KEYIN1_PORT44),
+ GPIO_FN(KEYIN2_PORT45),
+ GPIO_FN(KEYIN3_PORT46),
+
+ GPIO_FN(KEYIN0_PORT58), /* MSEL4CR_18_1 */
+ GPIO_FN(KEYIN1_PORT57),
+ GPIO_FN(KEYIN2_PORT56),
+ GPIO_FN(KEYIN3_PORT55),
+
+ /* VOU */
+ GPIO_FN(DV_D0), GPIO_FN(DV_D1), GPIO_FN(DV_D2),
+ GPIO_FN(DV_D3), GPIO_FN(DV_D4), GPIO_FN(DV_D5),
+ GPIO_FN(DV_D6), GPIO_FN(DV_D7), GPIO_FN(DV_D8),
+ GPIO_FN(DV_D9), GPIO_FN(DV_D10), GPIO_FN(DV_D11),
+ GPIO_FN(DV_D12), GPIO_FN(DV_D13), GPIO_FN(DV_D14),
+ GPIO_FN(DV_D15), GPIO_FN(DV_CLK),
+ GPIO_FN(DV_VSYNC), GPIO_FN(DV_HSYNC),
+
+ /* MEMC */
+ GPIO_FN(MEMC_AD0), GPIO_FN(MEMC_AD1), GPIO_FN(MEMC_AD2),
+ GPIO_FN(MEMC_AD3), GPIO_FN(MEMC_AD4), GPIO_FN(MEMC_AD5),
+ GPIO_FN(MEMC_AD6), GPIO_FN(MEMC_AD7), GPIO_FN(MEMC_AD8),
+ GPIO_FN(MEMC_AD9), GPIO_FN(MEMC_AD10), GPIO_FN(MEMC_AD11),
+ GPIO_FN(MEMC_AD12), GPIO_FN(MEMC_AD13), GPIO_FN(MEMC_AD14),
+ GPIO_FN(MEMC_AD15), GPIO_FN(MEMC_CS0), GPIO_FN(MEMC_INT),
+ GPIO_FN(MEMC_NWE), GPIO_FN(MEMC_NOE), GPIO_FN(MEMC_CS1),
+ GPIO_FN(MEMC_A1), GPIO_FN(MEMC_ADV), GPIO_FN(MEMC_DREQ0),
+ GPIO_FN(MEMC_WAIT), GPIO_FN(MEMC_DREQ1), GPIO_FN(MEMC_BUSCLK),
+ GPIO_FN(MEMC_A0),
+
+ /* MMC */
+ GPIO_FN(MMC0_D0_PORT68), GPIO_FN(MMC0_D1_PORT69),
+ GPIO_FN(MMC0_D2_PORT70), GPIO_FN(MMC0_D3_PORT71),
+ GPIO_FN(MMC0_D4_PORT72), GPIO_FN(MMC0_D5_PORT73),
+ GPIO_FN(MMC0_D6_PORT74), GPIO_FN(MMC0_D7_PORT75),
+ GPIO_FN(MMC0_CLK_PORT66),
+ GPIO_FN(MMC0_CMD_PORT67), /* MSEL4CR_15_0 */
+
+ GPIO_FN(MMC1_D0_PORT149), GPIO_FN(MMC1_D1_PORT148),
+ GPIO_FN(MMC1_D2_PORT147), GPIO_FN(MMC1_D3_PORT146),
+ GPIO_FN(MMC1_D4_PORT145), GPIO_FN(MMC1_D5_PORT144),
+ GPIO_FN(MMC1_D6_PORT143), GPIO_FN(MMC1_D7_PORT142),
+ GPIO_FN(MMC1_CLK_PORT103),
+ GPIO_FN(MMC1_CMD_PORT104), /* MSEL4CR_15_1 */
+
+ /* MSIOF0 */
+ GPIO_FN(MSIOF0_SS1), GPIO_FN(MSIOF0_SS2), GPIO_FN(MSIOF0_RXD),
+ GPIO_FN(MSIOF0_TXD), GPIO_FN(MSIOF0_MCK0), GPIO_FN(MSIOF0_MCK1),
+ GPIO_FN(MSIOF0_RSYNC), GPIO_FN(MSIOF0_RSCK), GPIO_FN(MSIOF0_TSCK),
+ GPIO_FN(MSIOF0_TSYNC),
+
+ /* MSIOF1 */
+ GPIO_FN(MSIOF1_RSCK), GPIO_FN(MSIOF1_RSYNC),
+ GPIO_FN(MSIOF1_MCK0), GPIO_FN(MSIOF1_MCK1),
+
+ GPIO_FN(MSIOF1_SS2_PORT116), GPIO_FN(MSIOF1_SS1_PORT117),
+ GPIO_FN(MSIOF1_RXD_PORT118), GPIO_FN(MSIOF1_TXD_PORT119),
+ GPIO_FN(MSIOF1_TSYNC_PORT120),
+ GPIO_FN(MSIOF1_TSCK_PORT121), /* MSEL4CR_10_0 */
+
+ GPIO_FN(MSIOF1_SS1_PORT67), GPIO_FN(MSIOF1_TSCK_PORT72),
+ GPIO_FN(MSIOF1_TSYNC_PORT73), GPIO_FN(MSIOF1_TXD_PORT74),
+ GPIO_FN(MSIOF1_RXD_PORT75),
+ GPIO_FN(MSIOF1_SS2_PORT202), /* MSEL4CR_10_1 */
+
+ /* GPIO */
+ GPIO_FN(GPO0), GPIO_FN(GPI0),
+ GPIO_FN(GPO1), GPIO_FN(GPI1),
+
+ /* USB0 */
+ GPIO_FN(USB0_OCI), GPIO_FN(USB0_PPON), GPIO_FN(VBUS),
+
+ /* USB1 */
+ GPIO_FN(USB1_OCI), GPIO_FN(USB1_PPON),
+
+ /* BBIF1 */
+ GPIO_FN(BBIF1_RXD), GPIO_FN(BBIF1_TXD), GPIO_FN(BBIF1_TSYNC),
+ GPIO_FN(BBIF1_TSCK), GPIO_FN(BBIF1_RSCK), GPIO_FN(BBIF1_RSYNC),
+ GPIO_FN(BBIF1_FLOW), GPIO_FN(BBIF1_RX_FLOW_N),
+
+ /* BBIF2 */
+ GPIO_FN(BBIF2_TXD2_PORT5), /* MSEL5CR_0_0 */
+ GPIO_FN(BBIF2_RXD2_PORT60),
+ GPIO_FN(BBIF2_TSYNC2_PORT6),
+ GPIO_FN(BBIF2_TSCK2_PORT59),
+
+ GPIO_FN(BBIF2_RXD2_PORT90), /* MSEL5CR_0_1 */
+ GPIO_FN(BBIF2_TXD2_PORT183),
+ GPIO_FN(BBIF2_TSCK2_PORT89),
+ GPIO_FN(BBIF2_TSYNC2_PORT184),
+
+ /* BSC / FLCTL / PCMCIA */
+ GPIO_FN(CS0), GPIO_FN(CS2), GPIO_FN(CS4),
+ GPIO_FN(CS5B), GPIO_FN(CS6A),
+ GPIO_FN(CS5A_PORT105), /* CS5A PORT 19/105 */
+ GPIO_FN(CS5A_PORT19),
+ GPIO_FN(IOIS16), /* ? */
+
+ GPIO_FN(A0), GPIO_FN(A1), GPIO_FN(A2), GPIO_FN(A3),
+ GPIO_FN(A4_FOE), GPIO_FN(A5_FCDE), /* share with FLCTL */
+ GPIO_FN(A6), GPIO_FN(A7), GPIO_FN(A8), GPIO_FN(A9),
+ GPIO_FN(A10), GPIO_FN(A11), GPIO_FN(A12), GPIO_FN(A13),
+ GPIO_FN(A14), GPIO_FN(A15), GPIO_FN(A16), GPIO_FN(A17),
+ GPIO_FN(A18), GPIO_FN(A19), GPIO_FN(A20), GPIO_FN(A21),
+ GPIO_FN(A22), GPIO_FN(A23), GPIO_FN(A24), GPIO_FN(A25),
+ GPIO_FN(A26),
+
+ GPIO_FN(D0_NAF0), GPIO_FN(D1_NAF1), /* share with FLCTL */
+ GPIO_FN(D2_NAF2), GPIO_FN(D3_NAF3), /* share with FLCTL */
+ GPIO_FN(D4_NAF4), GPIO_FN(D5_NAF5), /* share with FLCTL */
+ GPIO_FN(D6_NAF6), GPIO_FN(D7_NAF7), /* share with FLCTL */
+ GPIO_FN(D8_NAF8), GPIO_FN(D9_NAF9), /* share with FLCTL */
+ GPIO_FN(D10_NAF10), GPIO_FN(D11_NAF11), /* share with FLCTL */
+ GPIO_FN(D12_NAF12), GPIO_FN(D13_NAF13), /* share with FLCTL */
+ GPIO_FN(D14_NAF14), GPIO_FN(D15_NAF15), /* share with FLCTL */
+ GPIO_FN(D16), GPIO_FN(D17), GPIO_FN(D18), GPIO_FN(D19),
+ GPIO_FN(D20), GPIO_FN(D21), GPIO_FN(D22), GPIO_FN(D23),
+ GPIO_FN(D24), GPIO_FN(D25), GPIO_FN(D26), GPIO_FN(D27),
+ GPIO_FN(D28), GPIO_FN(D29), GPIO_FN(D30), GPIO_FN(D31),
+
+ GPIO_FN(WE0_FWE), /* share with FLCTL */
+ GPIO_FN(WE1),
+ GPIO_FN(WE2_ICIORD), /* share with PCMCIA */
+ GPIO_FN(WE3_ICIOWR), /* share with PCMCIA */
+ GPIO_FN(CKO), GPIO_FN(BS), GPIO_FN(RDWR),
+ GPIO_FN(RD_FSC), /* share with FLCTL */
+ GPIO_FN(WAIT_PORT177), /* WAIT Port 90/177 */
+ GPIO_FN(WAIT_PORT90),
+
+ GPIO_FN(FCE0), GPIO_FN(FCE1), GPIO_FN(FRB), /* FLCTL */
+
+ /* IRDA */
+ GPIO_FN(IRDA_FIRSEL), GPIO_FN(IRDA_IN), GPIO_FN(IRDA_OUT),
+
+ /* ATAPI */
+ GPIO_FN(IDE_D0), GPIO_FN(IDE_D1), GPIO_FN(IDE_D2),
+ GPIO_FN(IDE_D3), GPIO_FN(IDE_D4), GPIO_FN(IDE_D5),
+ GPIO_FN(IDE_D6), GPIO_FN(IDE_D7), GPIO_FN(IDE_D8),
+ GPIO_FN(IDE_D9), GPIO_FN(IDE_D10), GPIO_FN(IDE_D11),
+ GPIO_FN(IDE_D12), GPIO_FN(IDE_D13), GPIO_FN(IDE_D14),
+ GPIO_FN(IDE_D15), GPIO_FN(IDE_A0), GPIO_FN(IDE_A1),
+ GPIO_FN(IDE_A2), GPIO_FN(IDE_CS0), GPIO_FN(IDE_CS1),
+ GPIO_FN(IDE_IOWR), GPIO_FN(IDE_IORD), GPIO_FN(IDE_IORDY),
+ GPIO_FN(IDE_INT), GPIO_FN(IDE_RST), GPIO_FN(IDE_DIRECTION),
+ GPIO_FN(IDE_EXBUF_ENB), GPIO_FN(IDE_IODACK), GPIO_FN(IDE_IODREQ),
+
+ /* RMII */
+ GPIO_FN(RMII_CRS_DV), GPIO_FN(RMII_RX_ER), GPIO_FN(RMII_RXD0),
+ GPIO_FN(RMII_RXD1), GPIO_FN(RMII_TX_EN), GPIO_FN(RMII_TXD0),
+ GPIO_FN(RMII_MDC), GPIO_FN(RMII_TXD1), GPIO_FN(RMII_MDIO),
+ GPIO_FN(RMII_REF50CK), GPIO_FN(RMII_REF125CK), /* for GMII */
+
+ /* GEther */
+ GPIO_FN(ET_TX_CLK), GPIO_FN(ET_TX_EN), GPIO_FN(ET_ETXD0),
+ GPIO_FN(ET_ETXD1), GPIO_FN(ET_ETXD2), GPIO_FN(ET_ETXD3),
+ GPIO_FN(ET_ETXD4), GPIO_FN(ET_ETXD5), /* for GEther */
+ GPIO_FN(ET_ETXD6), GPIO_FN(ET_ETXD7), /* for GEther */
+ GPIO_FN(ET_COL), GPIO_FN(ET_TX_ER), GPIO_FN(ET_RX_CLK),
+ GPIO_FN(ET_RX_DV), GPIO_FN(ET_ERXD0), GPIO_FN(ET_ERXD1),
+ GPIO_FN(ET_ERXD2), GPIO_FN(ET_ERXD3),
+ GPIO_FN(ET_ERXD4), GPIO_FN(ET_ERXD5), /* for GEther */
+ GPIO_FN(ET_ERXD6), GPIO_FN(ET_ERXD7), /* for GEther */
+ GPIO_FN(ET_RX_ER), GPIO_FN(ET_CRS), GPIO_FN(ET_MDC),
+ GPIO_FN(ET_MDIO), GPIO_FN(ET_LINK), GPIO_FN(ET_PHY_INT),
+ GPIO_FN(ET_WOL), GPIO_FN(ET_GTX_CLK),
+
+ /* DMA0 */
+ GPIO_FN(DREQ0), GPIO_FN(DACK0),
+
+ /* DMA1 */
+ GPIO_FN(DREQ1), GPIO_FN(DACK1),
+
+ /* SYSC */
+ GPIO_FN(RESETOUTS),
+
+ /* IRREM */
+ GPIO_FN(IROUT),
+
+ /* LCDC */
+ GPIO_FN(LCDC0_SELECT),
+ GPIO_FN(LCDC1_SELECT),
+
+ /* SDENC */
+ GPIO_FN(SDENC_CPG),
+ GPIO_FN(SDENC_DV_CLKI),
+
+ /* HDMI */
+ GPIO_FN(HDMI_HPD),
+ GPIO_FN(HDMI_CEC),
+
+ /* SYSC */
+ GPIO_FN(RESETP_PULLUP),
+ GPIO_FN(RESETP_PLAIN),
+
+ /* DEBUG */
+ GPIO_FN(EDEBGREQ_PULLDOWN),
+ GPIO_FN(EDEBGREQ_PULLUP),
+
+ GPIO_FN(TRACEAUD_FROM_VIO),
+ GPIO_FN(TRACEAUD_FROM_LCDC0),
+ GPIO_FN(TRACEAUD_FROM_MEMC),
+};
+
+static struct pinmux_cfg_reg pinmux_config_regs[] = {
+ PORTCR(0, 0xe6050000), /* PORT0CR */
+ PORTCR(1, 0xe6050001), /* PORT1CR */
+ PORTCR(2, 0xe6050002), /* PORT2CR */
+ PORTCR(3, 0xe6050003), /* PORT3CR */
+ PORTCR(4, 0xe6050004), /* PORT4CR */
+ PORTCR(5, 0xe6050005), /* PORT5CR */
+ PORTCR(6, 0xe6050006), /* PORT6CR */
+ PORTCR(7, 0xe6050007), /* PORT7CR */
+ PORTCR(8, 0xe6050008), /* PORT8CR */
+ PORTCR(9, 0xe6050009), /* PORT9CR */
+ PORTCR(10, 0xe605000a), /* PORT10CR */
+ PORTCR(11, 0xe605000b), /* PORT11CR */
+ PORTCR(12, 0xe605000c), /* PORT12CR */
+ PORTCR(13, 0xe605000d), /* PORT13CR */
+ PORTCR(14, 0xe605000e), /* PORT14CR */
+ PORTCR(15, 0xe605000f), /* PORT15CR */
+ PORTCR(16, 0xe6050010), /* PORT16CR */
+ PORTCR(17, 0xe6050011), /* PORT17CR */
+ PORTCR(18, 0xe6050012), /* PORT18CR */
+ PORTCR(19, 0xe6050013), /* PORT19CR */
+ PORTCR(20, 0xe6050014), /* PORT20CR */
+ PORTCR(21, 0xe6050015), /* PORT21CR */
+ PORTCR(22, 0xe6050016), /* PORT22CR */
+ PORTCR(23, 0xe6050017), /* PORT23CR */
+ PORTCR(24, 0xe6050018), /* PORT24CR */
+ PORTCR(25, 0xe6050019), /* PORT25CR */
+ PORTCR(26, 0xe605001a), /* PORT26CR */
+ PORTCR(27, 0xe605001b), /* PORT27CR */
+ PORTCR(28, 0xe605001c), /* PORT28CR */
+ PORTCR(29, 0xe605001d), /* PORT29CR */
+ PORTCR(30, 0xe605001e), /* PORT30CR */
+ PORTCR(31, 0xe605001f), /* PORT31CR */
+ PORTCR(32, 0xe6050020), /* PORT32CR */
+ PORTCR(33, 0xe6050021), /* PORT33CR */
+ PORTCR(34, 0xe6050022), /* PORT34CR */
+ PORTCR(35, 0xe6050023), /* PORT35CR */
+ PORTCR(36, 0xe6050024), /* PORT36CR */
+ PORTCR(37, 0xe6050025), /* PORT37CR */
+ PORTCR(38, 0xe6050026), /* PORT38CR */
+ PORTCR(39, 0xe6050027), /* PORT39CR */
+ PORTCR(40, 0xe6050028), /* PORT40CR */
+ PORTCR(41, 0xe6050029), /* PORT41CR */
+ PORTCR(42, 0xe605002a), /* PORT42CR */
+ PORTCR(43, 0xe605002b), /* PORT43CR */
+ PORTCR(44, 0xe605002c), /* PORT44CR */
+ PORTCR(45, 0xe605002d), /* PORT45CR */
+ PORTCR(46, 0xe605002e), /* PORT46CR */
+ PORTCR(47, 0xe605002f), /* PORT47CR */
+ PORTCR(48, 0xe6050030), /* PORT48CR */
+ PORTCR(49, 0xe6050031), /* PORT49CR */
+ PORTCR(50, 0xe6050032), /* PORT50CR */
+ PORTCR(51, 0xe6050033), /* PORT51CR */
+ PORTCR(52, 0xe6050034), /* PORT52CR */
+ PORTCR(53, 0xe6050035), /* PORT53CR */
+ PORTCR(54, 0xe6050036), /* PORT54CR */
+ PORTCR(55, 0xe6050037), /* PORT55CR */
+ PORTCR(56, 0xe6050038), /* PORT56CR */
+ PORTCR(57, 0xe6050039), /* PORT57CR */
+ PORTCR(58, 0xe605003a), /* PORT58CR */
+ PORTCR(59, 0xe605003b), /* PORT59CR */
+ PORTCR(60, 0xe605003c), /* PORT60CR */
+ PORTCR(61, 0xe605003d), /* PORT61CR */
+ PORTCR(62, 0xe605003e), /* PORT62CR */
+ PORTCR(63, 0xe605003f), /* PORT63CR */
+ PORTCR(64, 0xe6050040), /* PORT64CR */
+ PORTCR(65, 0xe6050041), /* PORT65CR */
+ PORTCR(66, 0xe6050042), /* PORT66CR */
+ PORTCR(67, 0xe6050043), /* PORT67CR */
+ PORTCR(68, 0xe6050044), /* PORT68CR */
+ PORTCR(69, 0xe6050045), /* PORT69CR */
+ PORTCR(70, 0xe6050046), /* PORT70CR */
+ PORTCR(71, 0xe6050047), /* PORT71CR */
+ PORTCR(72, 0xe6050048), /* PORT72CR */
+ PORTCR(73, 0xe6050049), /* PORT73CR */
+ PORTCR(74, 0xe605004a), /* PORT74CR */
+ PORTCR(75, 0xe605004b), /* PORT75CR */
+ PORTCR(76, 0xe605004c), /* PORT76CR */
+ PORTCR(77, 0xe605004d), /* PORT77CR */
+ PORTCR(78, 0xe605004e), /* PORT78CR */
+ PORTCR(79, 0xe605004f), /* PORT79CR */
+ PORTCR(80, 0xe6050050), /* PORT80CR */
+ PORTCR(81, 0xe6050051), /* PORT81CR */
+ PORTCR(82, 0xe6050052), /* PORT82CR */
+ PORTCR(83, 0xe6050053), /* PORT83CR */
+
+ PORTCR(84, 0xe6051054), /* PORT84CR */
+ PORTCR(85, 0xe6051055), /* PORT85CR */
+ PORTCR(86, 0xe6051056), /* PORT86CR */
+ PORTCR(87, 0xe6051057), /* PORT87CR */
+ PORTCR(88, 0xe6051058), /* PORT88CR */
+ PORTCR(89, 0xe6051059), /* PORT89CR */
+ PORTCR(90, 0xe605105a), /* PORT90CR */
+ PORTCR(91, 0xe605105b), /* PORT91CR */
+ PORTCR(92, 0xe605105c), /* PORT92CR */
+ PORTCR(93, 0xe605105d), /* PORT93CR */
+ PORTCR(94, 0xe605105e), /* PORT94CR */
+ PORTCR(95, 0xe605105f), /* PORT95CR */
+ PORTCR(96, 0xe6051060), /* PORT96CR */
+ PORTCR(97, 0xe6051061), /* PORT97CR */
+ PORTCR(98, 0xe6051062), /* PORT98CR */
+ PORTCR(99, 0xe6051063), /* PORT99CR */
+ PORTCR(100, 0xe6051064), /* PORT100CR */
+ PORTCR(101, 0xe6051065), /* PORT101CR */
+ PORTCR(102, 0xe6051066), /* PORT102CR */
+ PORTCR(103, 0xe6051067), /* PORT103CR */
+ PORTCR(104, 0xe6051068), /* PORT104CR */
+ PORTCR(105, 0xe6051069), /* PORT105CR */
+ PORTCR(106, 0xe605106a), /* PORT106CR */
+ PORTCR(107, 0xe605106b), /* PORT107CR */
+ PORTCR(108, 0xe605106c), /* PORT108CR */
+ PORTCR(109, 0xe605106d), /* PORT109CR */
+ PORTCR(110, 0xe605106e), /* PORT110CR */
+ PORTCR(111, 0xe605106f), /* PORT111CR */
+ PORTCR(112, 0xe6051070), /* PORT112CR */
+ PORTCR(113, 0xe6051071), /* PORT113CR */
+ PORTCR(114, 0xe6051072), /* PORT114CR */
+
+ PORTCR(115, 0xe6052073), /* PORT115CR */
+ PORTCR(116, 0xe6052074), /* PORT116CR */
+ PORTCR(117, 0xe6052075), /* PORT117CR */
+ PORTCR(118, 0xe6052076), /* PORT118CR */
+ PORTCR(119, 0xe6052077), /* PORT119CR */
+ PORTCR(120, 0xe6052078), /* PORT120CR */
+ PORTCR(121, 0xe6052079), /* PORT121CR */
+ PORTCR(122, 0xe605207a), /* PORT122CR */
+ PORTCR(123, 0xe605207b), /* PORT123CR */
+ PORTCR(124, 0xe605207c), /* PORT124CR */
+ PORTCR(125, 0xe605207d), /* PORT125CR */
+ PORTCR(126, 0xe605207e), /* PORT126CR */
+ PORTCR(127, 0xe605207f), /* PORT127CR */
+ PORTCR(128, 0xe6052080), /* PORT128CR */
+ PORTCR(129, 0xe6052081), /* PORT129CR */
+ PORTCR(130, 0xe6052082), /* PORT130CR */
+ PORTCR(131, 0xe6052083), /* PORT131CR */
+ PORTCR(132, 0xe6052084), /* PORT132CR */
+ PORTCR(133, 0xe6052085), /* PORT133CR */
+ PORTCR(134, 0xe6052086), /* PORT134CR */
+ PORTCR(135, 0xe6052087), /* PORT135CR */
+ PORTCR(136, 0xe6052088), /* PORT136CR */
+ PORTCR(137, 0xe6052089), /* PORT137CR */
+ PORTCR(138, 0xe605208a), /* PORT138CR */
+ PORTCR(139, 0xe605208b), /* PORT139CR */
+ PORTCR(140, 0xe605208c), /* PORT140CR */
+ PORTCR(141, 0xe605208d), /* PORT141CR */
+ PORTCR(142, 0xe605208e), /* PORT142CR */
+ PORTCR(143, 0xe605208f), /* PORT143CR */
+ PORTCR(144, 0xe6052090), /* PORT144CR */
+ PORTCR(145, 0xe6052091), /* PORT145CR */
+ PORTCR(146, 0xe6052092), /* PORT146CR */
+ PORTCR(147, 0xe6052093), /* PORT147CR */
+ PORTCR(148, 0xe6052094), /* PORT148CR */
+ PORTCR(149, 0xe6052095), /* PORT149CR */
+ PORTCR(150, 0xe6052096), /* PORT150CR */
+ PORTCR(151, 0xe6052097), /* PORT151CR */
+ PORTCR(152, 0xe6052098), /* PORT152CR */
+ PORTCR(153, 0xe6052099), /* PORT153CR */
+ PORTCR(154, 0xe605209a), /* PORT154CR */
+ PORTCR(155, 0xe605209b), /* PORT155CR */
+ PORTCR(156, 0xe605209c), /* PORT156CR */
+ PORTCR(157, 0xe605209d), /* PORT157CR */
+ PORTCR(158, 0xe605209e), /* PORT158CR */
+ PORTCR(159, 0xe605209f), /* PORT159CR */
+ PORTCR(160, 0xe60520a0), /* PORT160CR */
+ PORTCR(161, 0xe60520a1), /* PORT161CR */
+ PORTCR(162, 0xe60520a2), /* PORT162CR */
+ PORTCR(163, 0xe60520a3), /* PORT163CR */
+ PORTCR(164, 0xe60520a4), /* PORT164CR */
+ PORTCR(165, 0xe60520a5), /* PORT165CR */
+ PORTCR(166, 0xe60520a6), /* PORT166CR */
+ PORTCR(167, 0xe60520a7), /* PORT167CR */
+ PORTCR(168, 0xe60520a8), /* PORT168CR */
+ PORTCR(169, 0xe60520a9), /* PORT169CR */
+ PORTCR(170, 0xe60520aa), /* PORT170CR */
+ PORTCR(171, 0xe60520ab), /* PORT171CR */
+ PORTCR(172, 0xe60520ac), /* PORT172CR */
+ PORTCR(173, 0xe60520ad), /* PORT173CR */
+ PORTCR(174, 0xe60520ae), /* PORT174CR */
+ PORTCR(175, 0xe60520af), /* PORT175CR */
+ PORTCR(176, 0xe60520b0), /* PORT176CR */
+ PORTCR(177, 0xe60520b1), /* PORT177CR */
+ PORTCR(178, 0xe60520b2), /* PORT178CR */
+ PORTCR(179, 0xe60520b3), /* PORT179CR */
+ PORTCR(180, 0xe60520b4), /* PORT180CR */
+ PORTCR(181, 0xe60520b5), /* PORT181CR */
+ PORTCR(182, 0xe60520b6), /* PORT182CR */
+ PORTCR(183, 0xe60520b7), /* PORT183CR */
+ PORTCR(184, 0xe60520b8), /* PORT184CR */
+ PORTCR(185, 0xe60520b9), /* PORT185CR */
+ PORTCR(186, 0xe60520ba), /* PORT186CR */
+ PORTCR(187, 0xe60520bb), /* PORT187CR */
+ PORTCR(188, 0xe60520bc), /* PORT188CR */
+ PORTCR(189, 0xe60520bd), /* PORT189CR */
+ PORTCR(190, 0xe60520be), /* PORT190CR */
+ PORTCR(191, 0xe60520bf), /* PORT191CR */
+ PORTCR(192, 0xe60520c0), /* PORT192CR */
+ PORTCR(193, 0xe60520c1), /* PORT193CR */
+ PORTCR(194, 0xe60520c2), /* PORT194CR */
+ PORTCR(195, 0xe60520c3), /* PORT195CR */
+ PORTCR(196, 0xe60520c4), /* PORT196CR */
+ PORTCR(197, 0xe60520c5), /* PORT197CR */
+ PORTCR(198, 0xe60520c6), /* PORT198CR */
+ PORTCR(199, 0xe60520c7), /* PORT199CR */
+ PORTCR(200, 0xe60520c8), /* PORT200CR */
+ PORTCR(201, 0xe60520c9), /* PORT201CR */
+ PORTCR(202, 0xe60520ca), /* PORT202CR */
+ PORTCR(203, 0xe60520cb), /* PORT203CR */
+ PORTCR(204, 0xe60520cc), /* PORT204CR */
+ PORTCR(205, 0xe60520cd), /* PORT205CR */
+ PORTCR(206, 0xe60520ce), /* PORT206CR */
+ PORTCR(207, 0xe60520cf), /* PORT207CR */
+ PORTCR(208, 0xe60520d0), /* PORT208CR */
+ PORTCR(209, 0xe60520d1), /* PORT209CR */
+
+ PORTCR(210, 0xe60530d2), /* PORT210CR */
+ PORTCR(211, 0xe60530d3), /* PORT211CR */
+
+ { PINMUX_CFG_REG("MSEL1CR", 0xe605800c, 32, 1) {
+ MSEL1CR_31_0, MSEL1CR_31_1,
+ MSEL1CR_30_0, MSEL1CR_30_1,
+ MSEL1CR_29_0, MSEL1CR_29_1,
+ MSEL1CR_28_0, MSEL1CR_28_1,
+ MSEL1CR_27_0, MSEL1CR_27_1,
+ MSEL1CR_26_0, MSEL1CR_26_1,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0,
+ MSEL1CR_16_0, MSEL1CR_16_1,
+ MSEL1CR_15_0, MSEL1CR_15_1,
+ MSEL1CR_14_0, MSEL1CR_14_1,
+ MSEL1CR_13_0, MSEL1CR_13_1,
+ MSEL1CR_12_0, MSEL1CR_12_1,
+ 0, 0, 0, 0,
+ MSEL1CR_9_0, MSEL1CR_9_1,
+ 0, 0,
+ MSEL1CR_7_0, MSEL1CR_7_1,
+ MSEL1CR_6_0, MSEL1CR_6_1,
+ MSEL1CR_5_0, MSEL1CR_5_1,
+ MSEL1CR_4_0, MSEL1CR_4_1,
+ MSEL1CR_3_0, MSEL1CR_3_1,
+ MSEL1CR_2_0, MSEL1CR_2_1,
+ 0, 0,
+ MSEL1CR_0_0, MSEL1CR_0_1,
+ }
+ },
+ { PINMUX_CFG_REG("MSEL3CR", 0xE6058020, 32, 1) {
+ 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0,
+ MSEL3CR_15_0, MSEL3CR_15_1,
+ 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0,
+ MSEL3CR_6_0, MSEL3CR_6_1,
+ 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0,
+ }
+ },
+ { PINMUX_CFG_REG("MSEL4CR", 0xE6058024, 32, 1) {
+ 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0,
+ MSEL4CR_19_0, MSEL4CR_19_1,
+ MSEL4CR_18_0, MSEL4CR_18_1,
+ 0, 0, 0, 0,
+ MSEL4CR_15_0, MSEL4CR_15_1,
+ 0, 0, 0, 0, 0, 0, 0, 0,
+ MSEL4CR_10_0, MSEL4CR_10_1,
+ 0, 0, 0, 0, 0, 0,
+ MSEL4CR_6_0, MSEL4CR_6_1,
+ 0, 0,
+ MSEL4CR_4_0, MSEL4CR_4_1,
+ 0, 0, 0, 0,
+ MSEL4CR_1_0, MSEL4CR_1_1,
+ 0, 0,
+ }
+ },
+ { PINMUX_CFG_REG("MSEL5CR", 0xE6058028, 32, 1) {
+ MSEL5CR_31_0, MSEL5CR_31_1,
+ MSEL5CR_30_0, MSEL5CR_30_1,
+ MSEL5CR_29_0, MSEL5CR_29_1,
+ 0, 0,
+ MSEL5CR_27_0, MSEL5CR_27_1,
+ 0, 0,
+ MSEL5CR_25_0, MSEL5CR_25_1,
+ 0, 0,
+ MSEL5CR_23_0, MSEL5CR_23_1,
+ 0, 0,
+ MSEL5CR_21_0, MSEL5CR_21_1,
+ 0, 0,
+ MSEL5CR_19_0, MSEL5CR_19_1,
+ 0, 0,
+ MSEL5CR_17_0, MSEL5CR_17_1,
+ 0, 0,
+ MSEL5CR_15_0, MSEL5CR_15_1,
+ MSEL5CR_14_0, MSEL5CR_14_1,
+ MSEL5CR_13_0, MSEL5CR_13_1,
+ MSEL5CR_12_0, MSEL5CR_12_1,
+ MSEL5CR_11_0, MSEL5CR_11_1,
+ MSEL5CR_10_0, MSEL5CR_10_1,
+ 0, 0,
+ MSEL5CR_8_0, MSEL5CR_8_1,
+ MSEL5CR_7_0, MSEL5CR_7_1,
+ MSEL5CR_6_0, MSEL5CR_6_1,
+ MSEL5CR_5_0, MSEL5CR_5_1,
+ MSEL5CR_4_0, MSEL5CR_4_1,
+ MSEL5CR_3_0, MSEL5CR_3_1,
+ MSEL5CR_2_0, MSEL5CR_2_1,
+ 0, 0,
+ MSEL5CR_0_0, MSEL5CR_0_1,
+ }
+ },
+ { },
+};
+
+static struct pinmux_data_reg pinmux_data_regs[] = {
+ { PINMUX_DATA_REG("PORTL031_000DR", 0xe6054800, 32) {
+ PORT31_DATA, PORT30_DATA, PORT29_DATA, PORT28_DATA,
+ PORT27_DATA, PORT26_DATA, PORT25_DATA, PORT24_DATA,
+ PORT23_DATA, PORT22_DATA, PORT21_DATA, PORT20_DATA,
+ PORT19_DATA, PORT18_DATA, PORT17_DATA, PORT16_DATA,
+ PORT15_DATA, PORT14_DATA, PORT13_DATA, PORT12_DATA,
+ PORT11_DATA, PORT10_DATA, PORT9_DATA, PORT8_DATA,
+ PORT7_DATA, PORT6_DATA, PORT5_DATA, PORT4_DATA,
+ PORT3_DATA, PORT2_DATA, PORT1_DATA, PORT0_DATA }
+ },
+ { PINMUX_DATA_REG("PORTL063_032DR", 0xe6054804, 32) {
+ PORT63_DATA, PORT62_DATA, PORT61_DATA, PORT60_DATA,
+ PORT59_DATA, PORT58_DATA, PORT57_DATA, PORT56_DATA,
+ PORT55_DATA, PORT54_DATA, PORT53_DATA, PORT52_DATA,
+ PORT51_DATA, PORT50_DATA, PORT49_DATA, PORT48_DATA,
+ PORT47_DATA, PORT46_DATA, PORT45_DATA, PORT44_DATA,
+ PORT43_DATA, PORT42_DATA, PORT41_DATA, PORT40_DATA,
+ PORT39_DATA, PORT38_DATA, PORT37_DATA, PORT36_DATA,
+ PORT35_DATA, PORT34_DATA, PORT33_DATA, PORT32_DATA }
+ },
+ { PINMUX_DATA_REG("PORTL095_064DR", 0xe6054808, 32) {
+ 0, 0, 0, 0,
+ 0, 0, 0, 0,
+ 0, 0, 0, 0,
+ PORT83_DATA, PORT82_DATA, PORT81_DATA, PORT80_DATA,
+ PORT79_DATA, PORT78_DATA, PORT77_DATA, PORT76_DATA,
+ PORT75_DATA, PORT74_DATA, PORT73_DATA, PORT72_DATA,
+ PORT71_DATA, PORT70_DATA, PORT69_DATA, PORT68_DATA,
+ PORT67_DATA, PORT66_DATA, PORT65_DATA, PORT64_DATA }
+ },
+ { PINMUX_DATA_REG("PORTD095_064DR", 0xe6055808, 32) {
+ PORT95_DATA, PORT94_DATA, PORT93_DATA, PORT92_DATA,
+ PORT91_DATA, PORT90_DATA, PORT89_DATA, PORT88_DATA,
+ PORT87_DATA, PORT86_DATA, PORT85_DATA, PORT84_DATA,
+ 0, 0, 0, 0,
+ 0, 0, 0, 0,
+ 0, 0, 0, 0,
+ 0, 0, 0, 0,
+ 0, 0, 0, 0 }
+ },
+ { PINMUX_DATA_REG("PORTD127_096DR", 0xe605580c, 32) {
+ 0, 0, 0, 0,
+ 0, 0, 0, 0,
+ 0, 0, 0, 0,
+ 0, PORT114_DATA, PORT113_DATA, PORT112_DATA,
+ PORT111_DATA, PORT110_DATA, PORT109_DATA, PORT108_DATA,
+ PORT107_DATA, PORT106_DATA, PORT105_DATA, PORT104_DATA,
+ PORT103_DATA, PORT102_DATA, PORT101_DATA, PORT100_DATA,
+ PORT99_DATA, PORT98_DATA, PORT97_DATA, PORT96_DATA }
+ },
+ { PINMUX_DATA_REG("PORTR127_096DR", 0xe605680C, 32) {
+ PORT127_DATA, PORT126_DATA, PORT125_DATA, PORT124_DATA,
+ PORT123_DATA, PORT122_DATA, PORT121_DATA, PORT120_DATA,
+ PORT119_DATA, PORT118_DATA, PORT117_DATA, PORT116_DATA,
+ PORT115_DATA, 0, 0, 0,
+ 0, 0, 0, 0,
+ 0, 0, 0, 0,
+ 0, 0, 0, 0,
+ 0, 0, 0, 0 }
+ },
+ { PINMUX_DATA_REG("PORTR159_128DR", 0xe6056810, 32) {
+ PORT159_DATA, PORT158_DATA, PORT157_DATA, PORT156_DATA,
+ PORT155_DATA, PORT154_DATA, PORT153_DATA, PORT152_DATA,
+ PORT151_DATA, PORT150_DATA, PORT149_DATA, PORT148_DATA,
+ PORT147_DATA, PORT146_DATA, PORT145_DATA, PORT144_DATA,
+ PORT143_DATA, PORT142_DATA, PORT141_DATA, PORT140_DATA,
+ PORT139_DATA, PORT138_DATA, PORT137_DATA, PORT136_DATA,
+ PORT135_DATA, PORT134_DATA, PORT133_DATA, PORT132_DATA,
+ PORT131_DATA, PORT130_DATA, PORT129_DATA, PORT128_DATA }
+ },
+ { PINMUX_DATA_REG("PORTR191_160DR", 0xe6056814, 32) {
+ PORT191_DATA, PORT190_DATA, PORT189_DATA, PORT188_DATA,
+ PORT187_DATA, PORT186_DATA, PORT185_DATA, PORT184_DATA,
+ PORT183_DATA, PORT182_DATA, PORT181_DATA, PORT180_DATA,
+ PORT179_DATA, PORT178_DATA, PORT177_DATA, PORT176_DATA,
+ PORT175_DATA, PORT174_DATA, PORT173_DATA, PORT172_DATA,
+ PORT171_DATA, PORT170_DATA, PORT169_DATA, PORT168_DATA,
+ PORT167_DATA, PORT166_DATA, PORT165_DATA, PORT164_DATA,
+ PORT163_DATA, PORT162_DATA, PORT161_DATA, PORT160_DATA }
+ },
+ { PINMUX_DATA_REG("PORTR223_192DR", 0xe6056818, 32) {
+ 0, 0, 0, 0,
+ 0, 0, 0, 0,
+ 0, 0, 0, 0,
+ 0, 0, PORT209_DATA, PORT208_DATA,
+ PORT207_DATA, PORT206_DATA, PORT205_DATA, PORT204_DATA,
+ PORT203_DATA, PORT202_DATA, PORT201_DATA, PORT200_DATA,
+ PORT199_DATA, PORT198_DATA, PORT197_DATA, PORT196_DATA,
+ PORT195_DATA, PORT194_DATA, PORT193_DATA, PORT192_DATA }
+ },
+ { PINMUX_DATA_REG("PORTU223_192DR", 0xe6057818, 32) {
+ 0, 0, 0, 0,
+ 0, 0, 0, 0,
+ 0, 0, 0, 0,
+ PORT211_DATA, PORT210_DATA, 0, 0,
+ 0, 0, 0, 0,
+ 0, 0, 0, 0,
+ 0, 0, 0, 0,
+ 0, 0, 0, 0 }
+ },
+ { },
+};
+
+static struct pinmux_irq pinmux_irqs[] = {
+ PINMUX_IRQ(evt2irq(0x0200), PORT2_FN0, PORT13_FN0), /* IRQ0A */
+ PINMUX_IRQ(evt2irq(0x0220), PORT20_FN0), /* IRQ1A */
+ PINMUX_IRQ(evt2irq(0x0240), PORT11_FN0, PORT12_FN0), /* IRQ2A */
+ PINMUX_IRQ(evt2irq(0x0260), PORT10_FN0, PORT14_FN0), /* IRQ3A */
+ PINMUX_IRQ(evt2irq(0x0280), PORT15_FN0, PORT172_FN0), /* IRQ4A */
+ PINMUX_IRQ(evt2irq(0x02A0), PORT0_FN0, PORT1_FN0), /* IRQ5A */
+ PINMUX_IRQ(evt2irq(0x02C0), PORT121_FN0, PORT173_FN0), /* IRQ6A */
+ PINMUX_IRQ(evt2irq(0x02E0), PORT120_FN0, PORT209_FN0), /* IRQ7A */
+ PINMUX_IRQ(evt2irq(0x0300), PORT119_FN0), /* IRQ8A */
+ PINMUX_IRQ(evt2irq(0x0320), PORT118_FN0, PORT210_FN0), /* IRQ9A */
+ PINMUX_IRQ(evt2irq(0x0340), PORT19_FN0), /* IRQ10A */
+ PINMUX_IRQ(evt2irq(0x0360), PORT104_FN0), /* IRQ11A */
+ PINMUX_IRQ(evt2irq(0x0380), PORT42_FN0, PORT97_FN0), /* IRQ12A */
+ PINMUX_IRQ(evt2irq(0x03A0), PORT64_FN0, PORT98_FN0), /* IRQ13A */
+ PINMUX_IRQ(evt2irq(0x03C0), PORT63_FN0, PORT99_FN0), /* IRQ14A */
+ PINMUX_IRQ(evt2irq(0x03E0), PORT62_FN0, PORT100_FN0), /* IRQ15A */
+ PINMUX_IRQ(evt2irq(0x3200), PORT68_FN0, PORT211_FN0), /* IRQ16A */
+ PINMUX_IRQ(evt2irq(0x3220), PORT69_FN0), /* IRQ17A */
+ PINMUX_IRQ(evt2irq(0x3240), PORT70_FN0), /* IRQ18A */
+ PINMUX_IRQ(evt2irq(0x3260), PORT71_FN0), /* IRQ19A */
+ PINMUX_IRQ(evt2irq(0x3280), PORT67_FN0), /* IRQ20A */
+ PINMUX_IRQ(evt2irq(0x32A0), PORT202_FN0), /* IRQ21A */
+ PINMUX_IRQ(evt2irq(0x32C0), PORT95_FN0), /* IRQ22A */
+ PINMUX_IRQ(evt2irq(0x32E0), PORT96_FN0), /* IRQ23A */
+ PINMUX_IRQ(evt2irq(0x3300), PORT180_FN0), /* IRQ24A */
+ PINMUX_IRQ(evt2irq(0x3320), PORT38_FN0), /* IRQ25A */
+ PINMUX_IRQ(evt2irq(0x3340), PORT58_FN0, PORT81_FN0), /* IRQ26A */
+ PINMUX_IRQ(evt2irq(0x3360), PORT57_FN0, PORT168_FN0), /* IRQ27A */
+ PINMUX_IRQ(evt2irq(0x3380), PORT56_FN0, PORT169_FN0), /* IRQ28A */
+ PINMUX_IRQ(evt2irq(0x33A0), PORT50_FN0, PORT170_FN0), /* IRQ29A */
+ PINMUX_IRQ(evt2irq(0x33C0), PORT49_FN0, PORT171_FN0), /* IRQ30A */
+ PINMUX_IRQ(evt2irq(0x33E0), PORT41_FN0, PORT167_FN0), /* IRQ31A */
+};
+
+struct sh_pfc_soc_info r8a7740_pinmux_info = {
+ .name = "r8a7740_pfc",
+ .reserved_id = PINMUX_RESERVED,
+ .data = { PINMUX_DATA_BEGIN,
+ PINMUX_DATA_END },
+ .input = { PINMUX_INPUT_BEGIN,
+ PINMUX_INPUT_END },
+ .input_pu = { PINMUX_INPUT_PULLUP_BEGIN,
+ PINMUX_INPUT_PULLUP_END },
+ .input_pd = { PINMUX_INPUT_PULLDOWN_BEGIN,
+ PINMUX_INPUT_PULLDOWN_END },
+ .output = { PINMUX_OUTPUT_BEGIN,
+ PINMUX_OUTPUT_END },
+ .mark = { PINMUX_MARK_BEGIN,
+ PINMUX_MARK_END },
+ .function = { PINMUX_FUNCTION_BEGIN,
+ PINMUX_FUNCTION_END },
+
+ .first_gpio = GPIO_PORT0,
+ .last_gpio = GPIO_FN_TRACEAUD_FROM_MEMC,
+
+ .gpios = pinmux_gpios,
+ .cfg_regs = pinmux_config_regs,
+ .data_regs = pinmux_data_regs,
+
+ .gpio_data = pinmux_data,
+ .gpio_data_size = ARRAY_SIZE(pinmux_data),
+
+ .gpio_irq = pinmux_irqs,
+ .gpio_irq_size = ARRAY_SIZE(pinmux_irqs),
+};
diff --git a/drivers/pinctrl/sh-pfc/pfc-r8a7779.c b/drivers/pinctrl/sh-pfc/pfc-r8a7779.c
new file mode 100644
index 000000000000..13feaa0c0eb7
--- /dev/null
+++ b/drivers/pinctrl/sh-pfc/pfc-r8a7779.c
@@ -0,0 +1,2624 @@
+/*
+ * r8a7779 processor support - PFC hardware block
+ *
+ * Copyright (C) 2011 Renesas Solutions Corp.
+ * Copyright (C) 2011 Magnus Damm
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 of the License.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#include <linux/kernel.h>
+#include <mach/r8a7779.h>
+
+#include "sh_pfc.h"
+
+#define CPU_32_PORT(fn, pfx, sfx) \
+ PORT_10(fn, pfx, sfx), PORT_10(fn, pfx##1, sfx), \
+ PORT_10(fn, pfx##2, sfx), PORT_1(fn, pfx##30, sfx), \
+ PORT_1(fn, pfx##31, sfx)
+
+#define CPU_32_PORT6(fn, pfx, sfx) \
+ PORT_1(fn, pfx##0, sfx), PORT_1(fn, pfx##1, sfx), \
+ PORT_1(fn, pfx##2, sfx), PORT_1(fn, pfx##3, sfx), \
+ PORT_1(fn, pfx##4, sfx), PORT_1(fn, pfx##5, sfx), \
+ PORT_1(fn, pfx##6, sfx), PORT_1(fn, pfx##7, sfx), \
+ PORT_1(fn, pfx##8, sfx)
+
+#define CPU_ALL_PORT(fn, pfx, sfx) \
+ CPU_32_PORT(fn, pfx##_0_, sfx), \
+ CPU_32_PORT(fn, pfx##_1_, sfx), \
+ CPU_32_PORT(fn, pfx##_2_, sfx), \
+ CPU_32_PORT(fn, pfx##_3_, sfx), \
+ CPU_32_PORT(fn, pfx##_4_, sfx), \
+ CPU_32_PORT(fn, pfx##_5_, sfx), \
+ CPU_32_PORT6(fn, pfx##_6_, sfx)
+
+#define _GP_GPIO(pfx, sfx) PINMUX_GPIO(GPIO_GP##pfx, GP##pfx##_DATA)
+#define _GP_DATA(pfx, sfx) PINMUX_DATA(GP##pfx##_DATA, GP##pfx##_FN, \
+ GP##pfx##_IN, GP##pfx##_OUT)
+
+#define _GP_INOUTSEL(pfx, sfx) GP##pfx##_IN, GP##pfx##_OUT
+#define _GP_INDT(pfx, sfx) GP##pfx##_DATA
+
+#define GP_ALL(str) CPU_ALL_PORT(_PORT_ALL, GP, str)
+#define PINMUX_GPIO_GP_ALL() CPU_ALL_PORT(_GP_GPIO, , unused)
+#define PINMUX_DATA_GP_ALL() CPU_ALL_PORT(_GP_DATA, , unused)
+
+
+#define PORT_10_REV(fn, pfx, sfx) \
+ PORT_1(fn, pfx##9, sfx), PORT_1(fn, pfx##8, sfx), \
+ PORT_1(fn, pfx##7, sfx), PORT_1(fn, pfx##6, sfx), \
+ PORT_1(fn, pfx##5, sfx), PORT_1(fn, pfx##4, sfx), \
+ PORT_1(fn, pfx##3, sfx), PORT_1(fn, pfx##2, sfx), \
+ PORT_1(fn, pfx##1, sfx), PORT_1(fn, pfx##0, sfx)
+
+#define CPU_32_PORT_REV(fn, pfx, sfx) \
+ PORT_1(fn, pfx##31, sfx), PORT_1(fn, pfx##30, sfx), \
+ PORT_10_REV(fn, pfx##2, sfx), PORT_10_REV(fn, pfx##1, sfx), \
+ PORT_10_REV(fn, pfx, sfx)
+
+#define GP_INOUTSEL(bank) CPU_32_PORT_REV(_GP_INOUTSEL, _##bank##_, unused)
+#define GP_INDT(bank) CPU_32_PORT_REV(_GP_INDT, _##bank##_, unused)
+
+#define PINMUX_IPSR_DATA(ipsr, fn) PINMUX_DATA(fn##_MARK, FN_##ipsr, FN_##fn)
+#define PINMUX_IPSR_MODSEL_DATA(ipsr, fn, ms) PINMUX_DATA(fn##_MARK, FN_##ms, \
+ FN_##ipsr, FN_##fn)
+
+enum {
+ PINMUX_RESERVED = 0,
+
+ PINMUX_DATA_BEGIN,
+ GP_ALL(DATA), /* GP_0_0_DATA -> GP_6_8_DATA */
+ PINMUX_DATA_END,
+
+ PINMUX_INPUT_BEGIN,
+ GP_ALL(IN), /* GP_0_0_IN -> GP_6_8_IN */
+ PINMUX_INPUT_END,
+
+ PINMUX_OUTPUT_BEGIN,
+ GP_ALL(OUT), /* GP_0_0_OUT -> GP_6_8_OUT */
+ PINMUX_OUTPUT_END,
+
+ PINMUX_FUNCTION_BEGIN,
+ GP_ALL(FN), /* GP_0_0_FN -> GP_6_8_FN */
+
+ /* GPSR0 */
+ FN_AVS1, FN_AVS2, FN_IP0_7_6, FN_A17,
+ FN_A18, FN_A19, FN_IP0_9_8, FN_IP0_11_10,
+ FN_IP0_13_12, FN_IP0_15_14, FN_IP0_18_16, FN_IP0_22_19,
+ FN_IP0_24_23, FN_IP0_25, FN_IP0_27_26, FN_IP1_1_0,
+ FN_IP1_3_2, FN_IP1_6_4, FN_IP1_10_7, FN_IP1_14_11,
+ FN_IP1_18_15, FN_IP0_5_3, FN_IP0_30_28, FN_IP2_18_16,
+ FN_IP2_21_19, FN_IP2_30_28, FN_IP3_2_0, FN_IP3_11_9,
+ FN_IP3_14_12, FN_IP3_22_21, FN_IP3_26_24, FN_IP3_31_29,
+
+ /* GPSR1 */
+ FN_IP4_1_0, FN_IP4_4_2, FN_IP4_7_5, FN_IP4_10_8,
+ FN_IP4_11, FN_IP4_12, FN_IP4_13, FN_IP4_14,
+ FN_IP4_15, FN_IP4_16, FN_IP4_19_17, FN_IP4_22_20,
+ FN_IP4_23, FN_IP4_24, FN_IP4_25, FN_IP4_26,
+ FN_IP4_27, FN_IP4_28, FN_IP4_31_29, FN_IP5_2_0,
+ FN_IP5_3, FN_IP5_4, FN_IP5_5, FN_IP5_6,
+ FN_IP5_7, FN_IP5_8, FN_IP5_10_9, FN_IP5_12_11,
+ FN_IP5_14_13, FN_IP5_16_15, FN_IP5_20_17, FN_IP5_23_21,
+
+ /* GPSR2 */
+ FN_IP5_27_24, FN_IP8_20, FN_IP8_22_21, FN_IP8_24_23,
+ FN_IP8_27_25, FN_IP8_30_28, FN_IP9_1_0, FN_IP9_3_2,
+ FN_IP9_4, FN_IP9_5, FN_IP9_6, FN_IP9_7,
+ FN_IP9_9_8, FN_IP9_11_10, FN_IP9_13_12, FN_IP9_15_14,
+ FN_IP9_18_16, FN_IP9_21_19, FN_IP9_23_22, FN_IP9_25_24,
+ FN_IP9_27_26, FN_IP9_29_28, FN_IP10_2_0, FN_IP10_5_3,
+ FN_IP10_8_6, FN_IP10_11_9, FN_IP10_14_12, FN_IP10_17_15,
+ FN_IP10_20_18, FN_IP10_23_21, FN_IP10_25_24, FN_IP10_28_26,
+
+ /* GPSR3 */
+ FN_IP10_31_29, FN_IP11_2_0, FN_IP11_5_3, FN_IP11_8_6,
+ FN_IP11_11_9, FN_IP11_14_12, FN_IP11_17_15, FN_IP11_20_18,
+ FN_IP11_23_21, FN_IP11_26_24, FN_IP11_29_27, FN_IP12_2_0,
+ FN_IP12_5_3, FN_IP12_8_6, FN_IP12_11_9, FN_IP12_14_12,
+ FN_IP12_17_15, FN_IP7_16_15, FN_IP7_18_17, FN_IP7_28_27,
+ FN_IP7_30_29, FN_IP7_20_19, FN_IP7_22_21, FN_IP7_24_23,
+ FN_IP7_26_25, FN_IP1_20_19, FN_IP1_22_21, FN_IP1_24_23,
+ FN_IP5_28, FN_IP5_30_29, FN_IP6_1_0, FN_IP6_3_2,
+
+ /* GPSR4 */
+ FN_IP6_5_4, FN_IP6_7_6, FN_IP6_8, FN_IP6_11_9,
+ FN_IP6_14_12, FN_IP6_17_15, FN_IP6_19_18, FN_IP6_22_20,
+ FN_IP6_24_23, FN_IP6_26_25, FN_IP6_30_29, FN_IP7_1_0,
+ FN_IP7_3_2, FN_IP7_6_4, FN_IP7_9_7, FN_IP7_12_10,
+ FN_IP7_14_13, FN_IP2_7_4, FN_IP2_11_8, FN_IP2_15_12,
+ FN_IP1_28_25, FN_IP2_3_0, FN_IP8_3_0, FN_IP8_7_4,
+ FN_IP8_11_8, FN_IP8_15_12, FN_USB_PENC0, FN_USB_PENC1,
+ FN_IP0_2_0, FN_IP8_17_16, FN_IP8_18, FN_IP8_19,
+
+ /* GPSR5 */
+ FN_A1, FN_A2, FN_A3, FN_A4,
+ FN_A5, FN_A6, FN_A7, FN_A8,
+ FN_A9, FN_A10, FN_A11, FN_A12,
+ FN_A13, FN_A14, FN_A15, FN_A16,
+ FN_RD, FN_WE0, FN_WE1, FN_EX_WAIT0,
+ FN_IP3_23, FN_IP3_27, FN_IP3_28, FN_IP2_22,
+ FN_IP2_23, FN_IP2_24, FN_IP2_25, FN_IP2_26,
+ FN_IP2_27, FN_IP3_3, FN_IP3_4, FN_IP3_5,
+
+ /* GPSR6 */
+ FN_IP3_6, FN_IP3_7, FN_IP3_8, FN_IP3_15,
+ FN_IP3_16, FN_IP3_17, FN_IP3_18, FN_IP3_19,
+ FN_IP3_20,
+
+ /* IPSR0 */
+ FN_RD_WR, FN_FWE, FN_ATAG0, FN_VI1_R7,
+ FN_HRTS1, FN_RX4_C,
+ FN_CS1_A26, FN_HSPI_TX2, FN_SDSELF_B,
+ FN_CS0, FN_HSPI_CS2_B,
+ FN_CLKOUT, FN_TX3C_IRDA_TX_C, FN_PWM0_B,
+ FN_A25, FN_SD1_WP, FN_MMC0_D5, FN_FD5,
+ FN_HSPI_RX2, FN_VI1_R3, FN_TX5_B, FN_SSI_SDATA7_B,
+ FN_CTS0_B,
+ FN_A24, FN_SD1_CD, FN_MMC0_D4, FN_FD4,
+ FN_HSPI_CS2, FN_VI1_R2, FN_SSI_WS78_B,
+ FN_A23, FN_FCLE, FN_HSPI_CLK2, FN_VI1_R1,
+ FN_A22, FN_RX5_D, FN_HSPI_RX2_B, FN_VI1_R0,
+ FN_A21, FN_SCK5_D, FN_HSPI_CLK2_B,
+ FN_A20, FN_TX5_D, FN_HSPI_TX2_B,
+ FN_A0, FN_SD1_DAT3, FN_MMC0_D3, FN_FD3,
+ FN_BS, FN_SD1_DAT2, FN_MMC0_D2, FN_FD2,
+ FN_ATADIR0, FN_SDSELF, FN_HCTS1, FN_TX4_C,
+ FN_USB_PENC2, FN_SCK0, FN_PWM1, FN_PWMFSW0,
+ FN_SCIF_CLK, FN_TCLK0_C,
+
+ /* IPSR1 */
+ FN_EX_CS0, FN_RX3_C_IRDA_RX_C, FN_MMC0_D6,
+ FN_FD6, FN_EX_CS1, FN_MMC0_D7, FN_FD7,
+ FN_EX_CS2, FN_SD1_CLK, FN_MMC0_CLK, FN_FALE,
+ FN_ATACS00, FN_EX_CS3, FN_SD1_CMD, FN_MMC0_CMD,
+ FN_FRE, FN_ATACS10, FN_VI1_R4, FN_RX5_B,
+ FN_HSCK1, FN_SSI_SDATA8_B, FN_RTS0_B_TANS_B, FN_SSI_SDATA9,
+ FN_EX_CS4, FN_SD1_DAT0, FN_MMC0_D0, FN_FD0,
+ FN_ATARD0, FN_VI1_R5, FN_SCK5_B, FN_HTX1,
+ FN_TX2_E, FN_TX0_B, FN_SSI_SCK9, FN_EX_CS5,
+ FN_SD1_DAT1, FN_MMC0_D1, FN_FD1, FN_ATAWR0,
+ FN_VI1_R6, FN_HRX1, FN_RX2_E, FN_RX0_B,
+ FN_SSI_WS9, FN_MLB_CLK, FN_PWM2, FN_SCK4,
+ FN_MLB_SIG, FN_PWM3, FN_TX4, FN_MLB_DAT,
+ FN_PWM4, FN_RX4, FN_HTX0, FN_TX1,
+ FN_SDATA, FN_CTS0_C, FN_SUB_TCK, FN_CC5_STATE2,
+ FN_CC5_STATE10, FN_CC5_STATE18, FN_CC5_STATE26, FN_CC5_STATE34,
+
+ /* IPSR2 */
+ FN_HRX0, FN_RX1, FN_SCKZ, FN_RTS0_C_TANS_C,
+ FN_SUB_TDI, FN_CC5_STATE3, FN_CC5_STATE11, FN_CC5_STATE19,
+ FN_CC5_STATE27, FN_CC5_STATE35, FN_HSCK0, FN_SCK1,
+ FN_MTS, FN_PWM5, FN_SCK0_C, FN_SSI_SDATA9_B,
+ FN_SUB_TDO, FN_CC5_STATE0, FN_CC5_STATE8, FN_CC5_STATE16,
+ FN_CC5_STATE24, FN_CC5_STATE32, FN_HCTS0, FN_CTS1,
+ FN_STM, FN_PWM0_D, FN_RX0_C, FN_SCIF_CLK_C,
+ FN_SUB_TRST, FN_TCLK1_B, FN_CC5_OSCOUT, FN_HRTS0,
+ FN_RTS1_TANS, FN_MDATA, FN_TX0_C, FN_SUB_TMS,
+ FN_CC5_STATE1, FN_CC5_STATE9, FN_CC5_STATE17, FN_CC5_STATE25,
+ FN_CC5_STATE33, FN_DU0_DR0, FN_LCDOUT0, FN_DREQ0,
+ FN_GPS_CLK_B, FN_AUDATA0, FN_TX5_C, FN_DU0_DR1,
+ FN_LCDOUT1, FN_DACK0, FN_DRACK0, FN_GPS_SIGN_B,
+ FN_AUDATA1, FN_RX5_C, FN_DU0_DR2, FN_LCDOUT2,
+ FN_DU0_DR3, FN_LCDOUT3, FN_DU0_DR4, FN_LCDOUT4,
+ FN_DU0_DR5, FN_LCDOUT5, FN_DU0_DR6, FN_LCDOUT6,
+ FN_DU0_DR7, FN_LCDOUT7, FN_DU0_DG0, FN_LCDOUT8,
+ FN_DREQ1, FN_SCL2, FN_AUDATA2,
+
+ /* IPSR3 */
+ FN_DU0_DG1, FN_LCDOUT9, FN_DACK1, FN_SDA2,
+ FN_AUDATA3, FN_DU0_DG2, FN_LCDOUT10, FN_DU0_DG3,
+ FN_LCDOUT11, FN_DU0_DG4, FN_LCDOUT12, FN_DU0_DG5,
+ FN_LCDOUT13, FN_DU0_DG6, FN_LCDOUT14, FN_DU0_DG7,
+ FN_LCDOUT15, FN_DU0_DB0, FN_LCDOUT16, FN_EX_WAIT1,
+ FN_SCL1, FN_TCLK1, FN_AUDATA4, FN_DU0_DB1,
+ FN_LCDOUT17, FN_EX_WAIT2, FN_SDA1, FN_GPS_MAG_B,
+ FN_AUDATA5, FN_SCK5_C, FN_DU0_DB2, FN_LCDOUT18,
+ FN_DU0_DB3, FN_LCDOUT19, FN_DU0_DB4, FN_LCDOUT20,
+ FN_DU0_DB5, FN_LCDOUT21, FN_DU0_DB6, FN_LCDOUT22,
+ FN_DU0_DB7, FN_LCDOUT23, FN_DU0_DOTCLKIN, FN_QSTVA_QVS,
+ FN_TX3_D_IRDA_TX_D, FN_SCL3_B, FN_DU0_DOTCLKOUT0, FN_QCLK,
+ FN_DU0_DOTCLKOUT1, FN_QSTVB_QVE, FN_RX3_D_IRDA_RX_D, FN_SDA3_B,
+ FN_SDA2_C, FN_DACK0_B, FN_DRACK0_B, FN_DU0_EXHSYNC_DU0_HSYNC,
+ FN_QSTH_QHS, FN_DU0_EXVSYNC_DU0_VSYNC, FN_QSTB_QHE,
+ FN_DU0_EXODDF_DU0_ODDF_DISP_CDE, FN_QCPV_QDE, FN_CAN1_TX,
+ FN_TX2_C, FN_SCL2_C, FN_REMOCON,
+
+ /* IPSR4 */
+ FN_DU0_DISP, FN_QPOLA, FN_CAN_CLK_C, FN_SCK2_C,
+ FN_DU0_CDE, FN_QPOLB, FN_CAN1_RX, FN_RX2_C,
+ FN_DREQ0_B, FN_SSI_SCK78_B, FN_SCK0_B, FN_DU1_DR0,
+ FN_VI2_DATA0_VI2_B0, FN_PWM6, FN_SD3_CLK, FN_TX3_E_IRDA_TX_E,
+ FN_AUDCK, FN_PWMFSW0_B, FN_DU1_DR1, FN_VI2_DATA1_VI2_B1,
+ FN_PWM0, FN_SD3_CMD, FN_RX3_E_IRDA_RX_E, FN_AUDSYNC,
+ FN_CTS0_D, FN_DU1_DR2, FN_VI2_G0, FN_DU1_DR3,
+ FN_VI2_G1, FN_DU1_DR4, FN_VI2_G2, FN_DU1_DR5,
+ FN_VI2_G3, FN_DU1_DR6, FN_VI2_G4, FN_DU1_DR7,
+ FN_VI2_G5, FN_DU1_DG0, FN_VI2_DATA2_VI2_B2, FN_SCL1_B,
+ FN_SD3_DAT2, FN_SCK3_E, FN_AUDATA6, FN_TX0_D,
+ FN_DU1_DG1, FN_VI2_DATA3_VI2_B3, FN_SDA1_B, FN_SD3_DAT3,
+ FN_SCK5, FN_AUDATA7, FN_RX0_D, FN_DU1_DG2,
+ FN_VI2_G6, FN_DU1_DG3, FN_VI2_G7, FN_DU1_DG4,
+ FN_VI2_R0, FN_DU1_DG5, FN_VI2_R1, FN_DU1_DG6,
+ FN_VI2_R2, FN_DU1_DG7, FN_VI2_R3, FN_DU1_DB0,
+ FN_VI2_DATA4_VI2_B4, FN_SCL2_B, FN_SD3_DAT0, FN_TX5,
+ FN_SCK0_D,
+
+ /* IPSR5 */
+ FN_DU1_DB1, FN_VI2_DATA5_VI2_B5, FN_SDA2_B, FN_SD3_DAT1,
+ FN_RX5, FN_RTS0_D_TANS_D, FN_DU1_DB2, FN_VI2_R4,
+ FN_DU1_DB3, FN_VI2_R5, FN_DU1_DB4, FN_VI2_R6,
+ FN_DU1_DB5, FN_VI2_R7, FN_DU1_DB6, FN_SCL2_D,
+ FN_DU1_DB7, FN_SDA2_D, FN_DU1_DOTCLKIN, FN_VI2_CLKENB,
+ FN_HSPI_CS1, FN_SCL1_D, FN_DU1_DOTCLKOUT, FN_VI2_FIELD,
+ FN_SDA1_D, FN_DU1_EXHSYNC_DU1_HSYNC, FN_VI2_HSYNC,
+ FN_VI3_HSYNC, FN_DU1_EXVSYNC_DU1_VSYNC, FN_VI2_VSYNC, FN_VI3_VSYNC,
+ FN_DU1_EXODDF_DU1_ODDF_DISP_CDE, FN_VI2_CLK, FN_TX3_B_IRDA_TX_B,
+ FN_SD3_CD, FN_HSPI_TX1, FN_VI1_CLKENB, FN_VI3_CLKENB,
+ FN_AUDIO_CLKC, FN_TX2_D, FN_SPEEDIN, FN_GPS_SIGN_D,
+ FN_DU1_DISP, FN_VI2_DATA6_VI2_B6, FN_TCLK0, FN_QSTVA_B_QVS_B,
+ FN_HSPI_CLK1, FN_SCK2_D, FN_AUDIO_CLKOUT_B, FN_GPS_MAG_D,
+ FN_DU1_CDE, FN_VI2_DATA7_VI2_B7, FN_RX3_B_IRDA_RX_B,
+ FN_SD3_WP, FN_HSPI_RX1, FN_VI1_FIELD, FN_VI3_FIELD,
+ FN_AUDIO_CLKOUT, FN_RX2_D, FN_GPS_CLK_C, FN_GPS_CLK_D,
+ FN_AUDIO_CLKA, FN_CAN_TXCLK, FN_AUDIO_CLKB, FN_USB_OVC2,
+ FN_CAN_DEBUGOUT0, FN_MOUT0,
+
+ /* IPSR6 */
+ FN_SSI_SCK0129, FN_CAN_DEBUGOUT1, FN_MOUT1, FN_SSI_WS0129,
+ FN_CAN_DEBUGOUT2, FN_MOUT2, FN_SSI_SDATA0, FN_CAN_DEBUGOUT3,
+ FN_MOUT5, FN_SSI_SDATA1, FN_CAN_DEBUGOUT4, FN_MOUT6,
+ FN_SSI_SDATA2, FN_CAN_DEBUGOUT5, FN_SSI_SCK34, FN_CAN_DEBUGOUT6,
+ FN_CAN0_TX_B, FN_IERX, FN_SSI_SCK9_C, FN_SSI_WS34,
+ FN_CAN_DEBUGOUT7, FN_CAN0_RX_B, FN_IETX, FN_SSI_WS9_C,
+ FN_SSI_SDATA3, FN_PWM0_C, FN_CAN_DEBUGOUT8, FN_CAN_CLK_B,
+ FN_IECLK, FN_SCIF_CLK_B, FN_TCLK0_B, FN_SSI_SDATA4,
+ FN_CAN_DEBUGOUT9, FN_SSI_SDATA9_C, FN_SSI_SCK5, FN_ADICLK,
+ FN_CAN_DEBUGOUT10, FN_SCK3, FN_TCLK0_D, FN_SSI_WS5,
+ FN_ADICS_SAMP, FN_CAN_DEBUGOUT11, FN_TX3_IRDA_TX, FN_SSI_SDATA5,
+ FN_ADIDATA, FN_CAN_DEBUGOUT12, FN_RX3_IRDA_RX, FN_SSI_SCK6,
+ FN_ADICHS0, FN_CAN0_TX, FN_IERX_B,
+
+ /* IPSR7 */
+ FN_SSI_WS6, FN_ADICHS1, FN_CAN0_RX, FN_IETX_B,
+ FN_SSI_SDATA6, FN_ADICHS2, FN_CAN_CLK, FN_IECLK_B,
+ FN_SSI_SCK78, FN_CAN_DEBUGOUT13, FN_IRQ0_B, FN_SSI_SCK9_B,
+ FN_HSPI_CLK1_C, FN_SSI_WS78, FN_CAN_DEBUGOUT14, FN_IRQ1_B,
+ FN_SSI_WS9_B, FN_HSPI_CS1_C, FN_SSI_SDATA7, FN_CAN_DEBUGOUT15,
+ FN_IRQ2_B, FN_TCLK1_C, FN_HSPI_TX1_C, FN_SSI_SDATA8,
+ FN_VSP, FN_IRQ3_B, FN_HSPI_RX1_C, FN_SD0_CLK,
+ FN_ATACS01, FN_SCK1_B, FN_SD0_CMD, FN_ATACS11,
+ FN_TX1_B, FN_CC5_TDO, FN_SD0_DAT0, FN_ATADIR1,
+ FN_RX1_B, FN_CC5_TRST, FN_SD0_DAT1, FN_ATAG1,
+ FN_SCK2_B, FN_CC5_TMS, FN_SD0_DAT2, FN_ATARD1,
+ FN_TX2_B, FN_CC5_TCK, FN_SD0_DAT3, FN_ATAWR1,
+ FN_RX2_B, FN_CC5_TDI, FN_SD0_CD, FN_DREQ2,
+ FN_RTS1_B_TANS_B, FN_SD0_WP, FN_DACK2, FN_CTS1_B,
+
+ /* IPSR8 */
+ FN_HSPI_CLK0, FN_CTS0, FN_USB_OVC0, FN_AD_CLK,
+ FN_CC5_STATE4, FN_CC5_STATE12, FN_CC5_STATE20, FN_CC5_STATE28,
+ FN_CC5_STATE36, FN_HSPI_CS0, FN_RTS0_TANS, FN_USB_OVC1,
+ FN_AD_DI, FN_CC5_STATE5, FN_CC5_STATE13, FN_CC5_STATE21,
+ FN_CC5_STATE29, FN_CC5_STATE37, FN_HSPI_TX0, FN_TX0,
+ FN_CAN_DEBUG_HW_TRIGGER, FN_AD_DO, FN_CC5_STATE6, FN_CC5_STATE14,
+ FN_CC5_STATE22, FN_CC5_STATE30, FN_CC5_STATE38, FN_HSPI_RX0,
+ FN_RX0, FN_CAN_STEP0, FN_AD_NCS, FN_CC5_STATE7,
+ FN_CC5_STATE15, FN_CC5_STATE23, FN_CC5_STATE31, FN_CC5_STATE39,
+ FN_FMCLK, FN_RDS_CLK, FN_PCMOE, FN_BPFCLK,
+ FN_PCMWE, FN_FMIN, FN_RDS_DATA, FN_VI0_CLK,
+ FN_MMC1_CLK, FN_VI0_CLKENB, FN_TX1_C, FN_HTX1_B,
+ FN_MT1_SYNC, FN_VI0_FIELD, FN_RX1_C, FN_HRX1_B,
+ FN_VI0_HSYNC, FN_VI0_DATA0_B_VI0_B0_B, FN_CTS1_C, FN_TX4_D,
+ FN_MMC1_CMD, FN_HSCK1_B, FN_VI0_VSYNC, FN_VI0_DATA1_B_VI0_B1_B,
+ FN_RTS1_C_TANS_C, FN_RX4_D, FN_PWMFSW0_C,
+
+ /* IPSR9 */
+ FN_VI0_DATA0_VI0_B0, FN_HRTS1_B, FN_MT1_VCXO, FN_VI0_DATA1_VI0_B1,
+ FN_HCTS1_B, FN_MT1_PWM, FN_VI0_DATA2_VI0_B2, FN_MMC1_D0,
+ FN_VI0_DATA3_VI0_B3, FN_MMC1_D1, FN_VI0_DATA4_VI0_B4, FN_MMC1_D2,
+ FN_VI0_DATA5_VI0_B5, FN_MMC1_D3, FN_VI0_DATA6_VI0_B6, FN_MMC1_D4,
+ FN_ARM_TRACEDATA_0, FN_VI0_DATA7_VI0_B7, FN_MMC1_D5,
+ FN_ARM_TRACEDATA_1, FN_VI0_G0, FN_SSI_SCK78_C, FN_IRQ0,
+ FN_ARM_TRACEDATA_2, FN_VI0_G1, FN_SSI_WS78_C, FN_IRQ1,
+ FN_ARM_TRACEDATA_3, FN_VI0_G2, FN_ETH_TXD1, FN_MMC1_D6,
+ FN_ARM_TRACEDATA_4, FN_TS_SPSYNC0, FN_VI0_G3, FN_ETH_CRS_DV,
+ FN_MMC1_D7, FN_ARM_TRACEDATA_5, FN_TS_SDAT0, FN_VI0_G4,
+ FN_ETH_TX_EN, FN_SD2_DAT0_B, FN_ARM_TRACEDATA_6, FN_VI0_G5,
+ FN_ETH_RX_ER, FN_SD2_DAT1_B, FN_ARM_TRACEDATA_7, FN_VI0_G6,
+ FN_ETH_RXD0, FN_SD2_DAT2_B, FN_ARM_TRACEDATA_8, FN_VI0_G7,
+ FN_ETH_RXD1, FN_SD2_DAT3_B, FN_ARM_TRACEDATA_9,
+
+ /* IPSR10 */
+ FN_VI0_R0, FN_SSI_SDATA7_C, FN_SCK1_C, FN_DREQ1_B,
+ FN_ARM_TRACEDATA_10, FN_DREQ0_C, FN_VI0_R1, FN_SSI_SDATA8_C,
+ FN_DACK1_B, FN_ARM_TRACEDATA_11, FN_DACK0_C, FN_DRACK0_C,
+ FN_VI0_R2, FN_ETH_LINK, FN_SD2_CLK_B, FN_IRQ2,
+ FN_ARM_TRACEDATA_12, FN_VI0_R3, FN_ETH_MAGIC, FN_SD2_CMD_B,
+ FN_IRQ3, FN_ARM_TRACEDATA_13, FN_VI0_R4, FN_ETH_REFCLK,
+ FN_SD2_CD_B, FN_HSPI_CLK1_B, FN_ARM_TRACEDATA_14, FN_MT1_CLK,
+ FN_TS_SCK0, FN_VI0_R5, FN_ETH_TXD0, FN_SD2_WP_B, FN_HSPI_CS1_B,
+ FN_ARM_TRACEDATA_15, FN_MT1_D, FN_TS_SDEN0, FN_VI0_R6,
+ FN_ETH_MDC, FN_DREQ2_C, FN_HSPI_TX1_B, FN_TRACECLK,
+ FN_MT1_BEN, FN_PWMFSW0_D, FN_VI0_R7, FN_ETH_MDIO,
+ FN_DACK2_C, FN_HSPI_RX1_B, FN_SCIF_CLK_D, FN_TRACECTL,
+ FN_MT1_PEN, FN_VI1_CLK, FN_SIM_D, FN_SDA3,
+ FN_VI1_HSYNC, FN_VI3_CLK, FN_SSI_SCK4, FN_GPS_SIGN_C,
+ FN_PWMFSW0_E, FN_VI1_VSYNC, FN_AUDIO_CLKOUT_C, FN_SSI_WS4,
+ FN_SIM_CLK, FN_GPS_MAG_C, FN_SPV_TRST, FN_SCL3,
+
+ /* IPSR11 */
+ FN_VI1_DATA0_VI1_B0, FN_SD2_DAT0, FN_SIM_RST, FN_SPV_TCK,
+ FN_ADICLK_B, FN_VI1_DATA1_VI1_B1, FN_SD2_DAT1, FN_MT0_CLK,
+ FN_SPV_TMS, FN_ADICS_B_SAMP_B, FN_VI1_DATA2_VI1_B2, FN_SD2_DAT2,
+ FN_MT0_D, FN_SPVTDI, FN_ADIDATA_B, FN_VI1_DATA3_VI1_B3,
+ FN_SD2_DAT3, FN_MT0_BEN, FN_SPV_TDO, FN_ADICHS0_B,
+ FN_VI1_DATA4_VI1_B4, FN_SD2_CLK, FN_MT0_PEN, FN_SPA_TRST,
+ FN_HSPI_CLK1_D, FN_ADICHS1_B, FN_VI1_DATA5_VI1_B5, FN_SD2_CMD,
+ FN_MT0_SYNC, FN_SPA_TCK, FN_HSPI_CS1_D, FN_ADICHS2_B,
+ FN_VI1_DATA6_VI1_B6, FN_SD2_CD, FN_MT0_VCXO, FN_SPA_TMS,
+ FN_HSPI_TX1_D, FN_VI1_DATA7_VI1_B7, FN_SD2_WP, FN_MT0_PWM,
+ FN_SPA_TDI, FN_HSPI_RX1_D, FN_VI1_G0, FN_VI3_DATA0,
+ FN_DU1_DOTCLKOUT1, FN_TS_SCK1, FN_DREQ2_B, FN_TX2,
+ FN_SPA_TDO, FN_HCTS0_B, FN_VI1_G1, FN_VI3_DATA1,
+ FN_SSI_SCK1, FN_TS_SDEN1, FN_DACK2_B, FN_RX2, FN_HRTS0_B,
+
+ /* IPSR12 */
+ FN_VI1_G2, FN_VI3_DATA2, FN_SSI_WS1, FN_TS_SPSYNC1,
+ FN_SCK2, FN_HSCK0_B, FN_VI1_G3, FN_VI3_DATA3,
+ FN_SSI_SCK2, FN_TS_SDAT1, FN_SCL1_C, FN_HTX0_B,
+ FN_VI1_G4, FN_VI3_DATA4, FN_SSI_WS2, FN_SDA1_C,
+ FN_SIM_RST_B, FN_HRX0_B, FN_VI1_G5, FN_VI3_DATA5,
+ FN_GPS_CLK, FN_FSE, FN_TX4_B, FN_SIM_D_B,
+ FN_VI1_G6, FN_VI3_DATA6, FN_GPS_SIGN, FN_FRB,
+ FN_RX4_B, FN_SIM_CLK_B, FN_VI1_G7, FN_VI3_DATA7,
+ FN_GPS_MAG, FN_FCE, FN_SCK4_B,
+
+ FN_SEL_SCIF5_0, FN_SEL_SCIF5_1, FN_SEL_SCIF5_2, FN_SEL_SCIF5_3,
+ FN_SEL_SCIF4_0, FN_SEL_SCIF4_1, FN_SEL_SCIF4_2, FN_SEL_SCIF4_3,
+ FN_SEL_SCIF3_0, FN_SEL_SCIF3_1, FN_SEL_SCIF3_2,
+ FN_SEL_SCIF3_3, FN_SEL_SCIF3_4,
+ FN_SEL_SCIF2_0, FN_SEL_SCIF2_1, FN_SEL_SCIF2_2,
+ FN_SEL_SCIF2_3, FN_SEL_SCIF2_4,
+ FN_SEL_SCIF1_0, FN_SEL_SCIF1_1, FN_SEL_SCIF1_2,
+ FN_SEL_SCIF0_0, FN_SEL_SCIF0_1, FN_SEL_SCIF0_2, FN_SEL_SCIF0_3,
+ FN_SEL_SSI9_0, FN_SEL_SSI9_1, FN_SEL_SSI9_2,
+ FN_SEL_SSI8_0, FN_SEL_SSI8_1, FN_SEL_SSI8_2,
+ FN_SEL_SSI7_0, FN_SEL_SSI7_1, FN_SEL_SSI7_2,
+ FN_SEL_VI0_0, FN_SEL_VI0_1,
+ FN_SEL_SD2_0, FN_SEL_SD2_1,
+ FN_SEL_INT3_0, FN_SEL_INT3_1,
+ FN_SEL_INT2_0, FN_SEL_INT2_1,
+ FN_SEL_INT1_0, FN_SEL_INT1_1,
+ FN_SEL_INT0_0, FN_SEL_INT0_1,
+ FN_SEL_IE_0, FN_SEL_IE_1,
+ FN_SEL_EXBUS2_0, FN_SEL_EXBUS2_1, FN_SEL_EXBUS2_2,
+ FN_SEL_EXBUS1_0, FN_SEL_EXBUS1_1,
+ FN_SEL_EXBUS0_0, FN_SEL_EXBUS0_1, FN_SEL_EXBUS0_2,
+
+ FN_SEL_TMU1_0, FN_SEL_TMU1_1, FN_SEL_TMU1_2,
+ FN_SEL_TMU0_0, FN_SEL_TMU0_1, FN_SEL_TMU0_2, FN_SEL_TMU0_3,
+ FN_SEL_SCIF_0, FN_SEL_SCIF_1, FN_SEL_SCIF_2, FN_SEL_SCIF_3,
+ FN_SEL_CANCLK_0, FN_SEL_CANCLK_1, FN_SEL_CANCLK_2,
+ FN_SEL_CAN0_0, FN_SEL_CAN0_1,
+ FN_SEL_HSCIF1_0, FN_SEL_HSCIF1_1,
+ FN_SEL_HSCIF0_0, FN_SEL_HSCIF0_1,
+ FN_SEL_PWMFSW_0, FN_SEL_PWMFSW_1, FN_SEL_PWMFSW_2,
+ FN_SEL_PWMFSW_3, FN_SEL_PWMFSW_4,
+ FN_SEL_ADI_0, FN_SEL_ADI_1,
+ FN_SEL_GPS_0, FN_SEL_GPS_1, FN_SEL_GPS_2, FN_SEL_GPS_3,
+ FN_SEL_SIM_0, FN_SEL_SIM_1,
+ FN_SEL_HSPI2_0, FN_SEL_HSPI2_1,
+ FN_SEL_HSPI1_0, FN_SEL_HSPI1_1, FN_SEL_HSPI1_2, FN_SEL_HSPI1_3,
+ FN_SEL_I2C3_0, FN_SEL_I2C3_1,
+ FN_SEL_I2C2_0, FN_SEL_I2C2_1, FN_SEL_I2C2_2, FN_SEL_I2C2_3,
+ FN_SEL_I2C1_0, FN_SEL_I2C1_1, FN_SEL_I2C1_2, FN_SEL_I2C1_3,
+ PINMUX_FUNCTION_END,
+
+ PINMUX_MARK_BEGIN,
+ AVS1_MARK, AVS2_MARK, A17_MARK, A18_MARK,
+ A19_MARK,
+
+ RD_WR_MARK, FWE_MARK, ATAG0_MARK, VI1_R7_MARK,
+ HRTS1_MARK, RX4_C_MARK,
+ CS1_A26_MARK, HSPI_TX2_MARK, SDSELF_B_MARK,
+ CS0_MARK, HSPI_CS2_B_MARK,
+ CLKOUT_MARK, TX3C_IRDA_TX_C_MARK, PWM0_B_MARK,
+ A25_MARK, SD1_WP_MARK, MMC0_D5_MARK, FD5_MARK,
+ HSPI_RX2_MARK, VI1_R3_MARK, TX5_B_MARK, SSI_SDATA7_B_MARK, CTS0_B_MARK,
+ A24_MARK, SD1_CD_MARK, MMC0_D4_MARK, FD4_MARK,
+ HSPI_CS2_MARK, VI1_R2_MARK, SSI_WS78_B_MARK,
+ A23_MARK, FCLE_MARK, HSPI_CLK2_MARK, VI1_R1_MARK,
+ A22_MARK, RX5_D_MARK, HSPI_RX2_B_MARK, VI1_R0_MARK,
+ A21_MARK, SCK5_D_MARK, HSPI_CLK2_B_MARK,
+ A20_MARK, TX5_D_MARK, HSPI_TX2_B_MARK,
+ A0_MARK, SD1_DAT3_MARK, MMC0_D3_MARK, FD3_MARK,
+ BS_MARK, SD1_DAT2_MARK, MMC0_D2_MARK, FD2_MARK,
+ ATADIR0_MARK, SDSELF_MARK, HCTS1_MARK, TX4_C_MARK,
+ USB_PENC2_MARK, SCK0_MARK, PWM1_MARK, PWMFSW0_MARK,
+ SCIF_CLK_MARK, TCLK0_C_MARK,
+
+ EX_CS0_MARK, RX3_C_IRDA_RX_C_MARK, MMC0_D6_MARK,
+ FD6_MARK, EX_CS1_MARK, MMC0_D7_MARK, FD7_MARK,
+ EX_CS2_MARK, SD1_CLK_MARK, MMC0_CLK_MARK, FALE_MARK,
+ ATACS00_MARK, EX_CS3_MARK, SD1_CMD_MARK, MMC0_CMD_MARK,
+ FRE_MARK, ATACS10_MARK, VI1_R4_MARK, RX5_B_MARK,
+ HSCK1_MARK, SSI_SDATA8_B_MARK, RTS0_B_TANS_B_MARK, SSI_SDATA9_MARK,
+ EX_CS4_MARK, SD1_DAT0_MARK, MMC0_D0_MARK, FD0_MARK,
+ ATARD0_MARK, VI1_R5_MARK, SCK5_B_MARK, HTX1_MARK,
+ TX2_E_MARK, TX0_B_MARK, SSI_SCK9_MARK, EX_CS5_MARK,
+ SD1_DAT1_MARK, MMC0_D1_MARK, FD1_MARK, ATAWR0_MARK,
+ VI1_R6_MARK, HRX1_MARK, RX2_E_MARK, RX0_B_MARK,
+ SSI_WS9_MARK, MLB_CLK_MARK, PWM2_MARK, SCK4_MARK,
+ MLB_SIG_MARK, PWM3_MARK, TX4_MARK, MLB_DAT_MARK,
+ PWM4_MARK, RX4_MARK, HTX0_MARK, TX1_MARK,
+ SDATA_MARK, CTS0_C_MARK, SUB_TCK_MARK, CC5_STATE2_MARK,
+ CC5_STATE10_MARK, CC5_STATE18_MARK, CC5_STATE26_MARK, CC5_STATE34_MARK,
+
+ HRX0_MARK, RX1_MARK, SCKZ_MARK, RTS0_C_TANS_C_MARK,
+ SUB_TDI_MARK, CC5_STATE3_MARK, CC5_STATE11_MARK, CC5_STATE19_MARK,
+ CC5_STATE27_MARK, CC5_STATE35_MARK, HSCK0_MARK, SCK1_MARK,
+ MTS_MARK, PWM5_MARK, SCK0_C_MARK, SSI_SDATA9_B_MARK,
+ SUB_TDO_MARK, CC5_STATE0_MARK, CC5_STATE8_MARK, CC5_STATE16_MARK,
+ CC5_STATE24_MARK, CC5_STATE32_MARK, HCTS0_MARK, CTS1_MARK,
+ STM_MARK, PWM0_D_MARK, RX0_C_MARK, SCIF_CLK_C_MARK,
+ SUB_TRST_MARK, TCLK1_B_MARK, CC5_OSCOUT_MARK, HRTS0_MARK,
+ RTS1_TANS_MARK, MDATA_MARK, TX0_C_MARK, SUB_TMS_MARK,
+ CC5_STATE1_MARK, CC5_STATE9_MARK, CC5_STATE17_MARK, CC5_STATE25_MARK,
+ CC5_STATE33_MARK, DU0_DR0_MARK, LCDOUT0_MARK, DREQ0_MARK,
+ GPS_CLK_B_MARK, AUDATA0_MARK, TX5_C_MARK, DU0_DR1_MARK,
+ LCDOUT1_MARK, DACK0_MARK, DRACK0_MARK, GPS_SIGN_B_MARK,
+ AUDATA1_MARK, RX5_C_MARK, DU0_DR2_MARK, LCDOUT2_MARK,
+ DU0_DR3_MARK, LCDOUT3_MARK, DU0_DR4_MARK, LCDOUT4_MARK,
+ DU0_DR5_MARK, LCDOUT5_MARK, DU0_DR6_MARK, LCDOUT6_MARK,
+ DU0_DR7_MARK, LCDOUT7_MARK, DU0_DG0_MARK, LCDOUT8_MARK,
+ DREQ1_MARK, SCL2_MARK, AUDATA2_MARK,
+
+ DU0_DG1_MARK, LCDOUT9_MARK, DACK1_MARK, SDA2_MARK,
+ AUDATA3_MARK, DU0_DG2_MARK, LCDOUT10_MARK, DU0_DG3_MARK,
+ LCDOUT11_MARK, DU0_DG4_MARK, LCDOUT12_MARK, DU0_DG5_MARK,
+ LCDOUT13_MARK, DU0_DG6_MARK, LCDOUT14_MARK, DU0_DG7_MARK,
+ LCDOUT15_MARK, DU0_DB0_MARK, LCDOUT16_MARK, EX_WAIT1_MARK,
+ SCL1_MARK, TCLK1_MARK, AUDATA4_MARK, DU0_DB1_MARK,
+ LCDOUT17_MARK, EX_WAIT2_MARK, SDA1_MARK, GPS_MAG_B_MARK,
+ AUDATA5_MARK, SCK5_C_MARK, DU0_DB2_MARK, LCDOUT18_MARK,
+ DU0_DB3_MARK, LCDOUT19_MARK, DU0_DB4_MARK, LCDOUT20_MARK,
+ DU0_DB5_MARK, LCDOUT21_MARK, DU0_DB6_MARK, LCDOUT22_MARK,
+ DU0_DB7_MARK, LCDOUT23_MARK, DU0_DOTCLKIN_MARK, QSTVA_QVS_MARK,
+ TX3_D_IRDA_TX_D_MARK, SCL3_B_MARK, DU0_DOTCLKOUT0_MARK, QCLK_MARK,
+ DU0_DOTCLKOUT1_MARK, QSTVB_QVE_MARK, RX3_D_IRDA_RX_D_MARK, SDA3_B_MARK,
+ SDA2_C_MARK, DACK0_B_MARK, DRACK0_B_MARK, DU0_EXHSYNC_DU0_HSYNC_MARK,
+ QSTH_QHS_MARK, DU0_EXVSYNC_DU0_VSYNC_MARK, QSTB_QHE_MARK,
+ DU0_EXODDF_DU0_ODDF_DISP_CDE_MARK, QCPV_QDE_MARK, CAN1_TX_MARK,
+ TX2_C_MARK, SCL2_C_MARK, REMOCON_MARK,
+
+ DU0_DISP_MARK, QPOLA_MARK, CAN_CLK_C_MARK, SCK2_C_MARK,
+ DU0_CDE_MARK, QPOLB_MARK, CAN1_RX_MARK, RX2_C_MARK,
+ DREQ0_B_MARK, SSI_SCK78_B_MARK, SCK0_B_MARK, DU1_DR0_MARK,
+ VI2_DATA0_VI2_B0_MARK, PWM6_MARK, SD3_CLK_MARK, TX3_E_IRDA_TX_E_MARK,
+ AUDCK_MARK, PWMFSW0_B_MARK, DU1_DR1_MARK, VI2_DATA1_VI2_B1_MARK,
+ PWM0_MARK, SD3_CMD_MARK, RX3_E_IRDA_RX_E_MARK, AUDSYNC_MARK,
+ CTS0_D_MARK, DU1_DR2_MARK, VI2_G0_MARK, DU1_DR3_MARK,
+ VI2_G1_MARK, DU1_DR4_MARK, VI2_G2_MARK, DU1_DR5_MARK,
+ VI2_G3_MARK, DU1_DR6_MARK, VI2_G4_MARK, DU1_DR7_MARK,
+ VI2_G5_MARK, DU1_DG0_MARK, VI2_DATA2_VI2_B2_MARK, SCL1_B_MARK,
+ SD3_DAT2_MARK, SCK3_E_MARK, AUDATA6_MARK, TX0_D_MARK,
+ DU1_DG1_MARK, VI2_DATA3_VI2_B3_MARK, SDA1_B_MARK, SD3_DAT3_MARK,
+ SCK5_MARK, AUDATA7_MARK, RX0_D_MARK, DU1_DG2_MARK,
+ VI2_G6_MARK, DU1_DG3_MARK, VI2_G7_MARK, DU1_DG4_MARK,
+ VI2_R0_MARK, DU1_DG5_MARK, VI2_R1_MARK, DU1_DG6_MARK,
+ VI2_R2_MARK, DU1_DG7_MARK, VI2_R3_MARK, DU1_DB0_MARK,
+ VI2_DATA4_VI2_B4_MARK, SCL2_B_MARK, SD3_DAT0_MARK, TX5_MARK,
+ SCK0_D_MARK,
+
+ DU1_DB1_MARK, VI2_DATA5_VI2_B5_MARK, SDA2_B_MARK, SD3_DAT1_MARK,
+ RX5_MARK, RTS0_D_TANS_D_MARK, DU1_DB2_MARK, VI2_R4_MARK,
+ DU1_DB3_MARK, VI2_R5_MARK, DU1_DB4_MARK, VI2_R6_MARK,
+ DU1_DB5_MARK, VI2_R7_MARK, DU1_DB6_MARK, SCL2_D_MARK,
+ DU1_DB7_MARK, SDA2_D_MARK, DU1_DOTCLKIN_MARK, VI2_CLKENB_MARK,
+ HSPI_CS1_MARK, SCL1_D_MARK, DU1_DOTCLKOUT_MARK, VI2_FIELD_MARK,
+ SDA1_D_MARK, DU1_EXHSYNC_DU1_HSYNC_MARK, VI2_HSYNC_MARK,
+ VI3_HSYNC_MARK, DU1_EXVSYNC_DU1_VSYNC_MARK, VI2_VSYNC_MARK,
+ VI3_VSYNC_MARK, DU1_EXODDF_DU1_ODDF_DISP_CDE_MARK, VI2_CLK_MARK,
+ TX3_B_IRDA_TX_B_MARK, SD3_CD_MARK, HSPI_TX1_MARK, VI1_CLKENB_MARK,
+ VI3_CLKENB_MARK, AUDIO_CLKC_MARK, TX2_D_MARK, SPEEDIN_MARK,
+ GPS_SIGN_D_MARK, DU1_DISP_MARK, VI2_DATA6_VI2_B6_MARK, TCLK0_MARK,
+ QSTVA_B_QVS_B_MARK, HSPI_CLK1_MARK, SCK2_D_MARK, AUDIO_CLKOUT_B_MARK,
+ GPS_MAG_D_MARK, DU1_CDE_MARK, VI2_DATA7_VI2_B7_MARK,
+ RX3_B_IRDA_RX_B_MARK, SD3_WP_MARK, HSPI_RX1_MARK, VI1_FIELD_MARK,
+ VI3_FIELD_MARK, AUDIO_CLKOUT_MARK, RX2_D_MARK, GPS_CLK_C_MARK,
+ GPS_CLK_D_MARK, AUDIO_CLKA_MARK, CAN_TXCLK_MARK, AUDIO_CLKB_MARK,
+ USB_OVC2_MARK, CAN_DEBUGOUT0_MARK, MOUT0_MARK,
+
+ SSI_SCK0129_MARK, CAN_DEBUGOUT1_MARK, MOUT1_MARK, SSI_WS0129_MARK,
+ CAN_DEBUGOUT2_MARK, MOUT2_MARK, SSI_SDATA0_MARK, CAN_DEBUGOUT3_MARK,
+ MOUT5_MARK, SSI_SDATA1_MARK, CAN_DEBUGOUT4_MARK, MOUT6_MARK,
+ SSI_SDATA2_MARK, CAN_DEBUGOUT5_MARK, SSI_SCK34_MARK,
+ CAN_DEBUGOUT6_MARK, CAN0_TX_B_MARK, IERX_MARK, SSI_SCK9_C_MARK,
+ SSI_WS34_MARK, CAN_DEBUGOUT7_MARK, CAN0_RX_B_MARK, IETX_MARK,
+ SSI_WS9_C_MARK, SSI_SDATA3_MARK, PWM0_C_MARK, CAN_DEBUGOUT8_MARK,
+ CAN_CLK_B_MARK, IECLK_MARK, SCIF_CLK_B_MARK, TCLK0_B_MARK,
+ SSI_SDATA4_MARK, CAN_DEBUGOUT9_MARK, SSI_SDATA9_C_MARK, SSI_SCK5_MARK,
+ ADICLK_MARK, CAN_DEBUGOUT10_MARK, SCK3_MARK, TCLK0_D_MARK,
+ SSI_WS5_MARK, ADICS_SAMP_MARK, CAN_DEBUGOUT11_MARK, TX3_IRDA_TX_MARK,
+ SSI_SDATA5_MARK, ADIDATA_MARK, CAN_DEBUGOUT12_MARK, RX3_IRDA_RX_MARK,
+ SSI_SCK6_MARK, ADICHS0_MARK, CAN0_TX_MARK, IERX_B_MARK,
+
+ SSI_WS6_MARK, ADICHS1_MARK, CAN0_RX_MARK, IETX_B_MARK,
+ SSI_SDATA6_MARK, ADICHS2_MARK, CAN_CLK_MARK, IECLK_B_MARK,
+ SSI_SCK78_MARK, CAN_DEBUGOUT13_MARK, IRQ0_B_MARK, SSI_SCK9_B_MARK,
+ HSPI_CLK1_C_MARK, SSI_WS78_MARK, CAN_DEBUGOUT14_MARK, IRQ1_B_MARK,
+ SSI_WS9_B_MARK, HSPI_CS1_C_MARK, SSI_SDATA7_MARK, CAN_DEBUGOUT15_MARK,
+ IRQ2_B_MARK, TCLK1_C_MARK, HSPI_TX1_C_MARK, SSI_SDATA8_MARK,
+ VSP_MARK, IRQ3_B_MARK, HSPI_RX1_C_MARK, SD0_CLK_MARK,
+ ATACS01_MARK, SCK1_B_MARK, SD0_CMD_MARK, ATACS11_MARK,
+ TX1_B_MARK, CC5_TDO_MARK, SD0_DAT0_MARK, ATADIR1_MARK,
+ RX1_B_MARK, CC5_TRST_MARK, SD0_DAT1_MARK, ATAG1_MARK,
+ SCK2_B_MARK, CC5_TMS_MARK, SD0_DAT2_MARK, ATARD1_MARK,
+ TX2_B_MARK, CC5_TCK_MARK, SD0_DAT3_MARK, ATAWR1_MARK,
+ RX2_B_MARK, CC5_TDI_MARK, SD0_CD_MARK, DREQ2_MARK,
+ RTS1_B_TANS_B_MARK, SD0_WP_MARK, DACK2_MARK, CTS1_B_MARK,
+
+ HSPI_CLK0_MARK, CTS0_MARK, USB_OVC0_MARK, AD_CLK_MARK,
+ CC5_STATE4_MARK, CC5_STATE12_MARK, CC5_STATE20_MARK, CC5_STATE28_MARK,
+ CC5_STATE36_MARK, HSPI_CS0_MARK, RTS0_TANS_MARK, USB_OVC1_MARK,
+ AD_DI_MARK, CC5_STATE5_MARK, CC5_STATE13_MARK, CC5_STATE21_MARK,
+ CC5_STATE29_MARK, CC5_STATE37_MARK, HSPI_TX0_MARK, TX0_MARK,
+ CAN_DEBUG_HW_TRIGGER_MARK, AD_DO_MARK, CC5_STATE6_MARK,
+ CC5_STATE14_MARK, CC5_STATE22_MARK, CC5_STATE30_MARK,
+ CC5_STATE38_MARK, HSPI_RX0_MARK, RX0_MARK, CAN_STEP0_MARK,
+ AD_NCS_MARK, CC5_STATE7_MARK, CC5_STATE15_MARK, CC5_STATE23_MARK,
+ CC5_STATE31_MARK, CC5_STATE39_MARK, FMCLK_MARK, RDS_CLK_MARK,
+ PCMOE_MARK, BPFCLK_MARK, PCMWE_MARK, FMIN_MARK, RDS_DATA_MARK,
+ VI0_CLK_MARK, MMC1_CLK_MARK, VI0_CLKENB_MARK, TX1_C_MARK, HTX1_B_MARK,
+ MT1_SYNC_MARK, VI0_FIELD_MARK, RX1_C_MARK, HRX1_B_MARK,
+ VI0_HSYNC_MARK, VI0_DATA0_B_VI0_B0_B_MARK, CTS1_C_MARK, TX4_D_MARK,
+ MMC1_CMD_MARK, HSCK1_B_MARK, VI0_VSYNC_MARK, VI0_DATA1_B_VI0_B1_B_MARK,
+ RTS1_C_TANS_C_MARK, RX4_D_MARK, PWMFSW0_C_MARK,
+
+ VI0_DATA0_VI0_B0_MARK, HRTS1_B_MARK, MT1_VCXO_MARK,
+ VI0_DATA1_VI0_B1_MARK, HCTS1_B_MARK, MT1_PWM_MARK,
+ VI0_DATA2_VI0_B2_MARK, MMC1_D0_MARK, VI0_DATA3_VI0_B3_MARK,
+ MMC1_D1_MARK, VI0_DATA4_VI0_B4_MARK, MMC1_D2_MARK,
+ VI0_DATA5_VI0_B5_MARK, MMC1_D3_MARK, VI0_DATA6_VI0_B6_MARK,
+ MMC1_D4_MARK, ARM_TRACEDATA_0_MARK, VI0_DATA7_VI0_B7_MARK,
+ MMC1_D5_MARK, ARM_TRACEDATA_1_MARK, VI0_G0_MARK, SSI_SCK78_C_MARK,
+ IRQ0_MARK, ARM_TRACEDATA_2_MARK, VI0_G1_MARK, SSI_WS78_C_MARK,
+ IRQ1_MARK, ARM_TRACEDATA_3_MARK, VI0_G2_MARK, ETH_TXD1_MARK,
+ MMC1_D6_MARK, ARM_TRACEDATA_4_MARK, TS_SPSYNC0_MARK, VI0_G3_MARK,
+ ETH_CRS_DV_MARK, MMC1_D7_MARK, ARM_TRACEDATA_5_MARK, TS_SDAT0_MARK,
+ VI0_G4_MARK, ETH_TX_EN_MARK, SD2_DAT0_B_MARK, ARM_TRACEDATA_6_MARK,
+ VI0_G5_MARK, ETH_RX_ER_MARK, SD2_DAT1_B_MARK, ARM_TRACEDATA_7_MARK,
+ VI0_G6_MARK, ETH_RXD0_MARK, SD2_DAT2_B_MARK, ARM_TRACEDATA_8_MARK,
+ VI0_G7_MARK, ETH_RXD1_MARK, SD2_DAT3_B_MARK, ARM_TRACEDATA_9_MARK,
+
+ VI0_R0_MARK, SSI_SDATA7_C_MARK, SCK1_C_MARK, DREQ1_B_MARK,
+ ARM_TRACEDATA_10_MARK, DREQ0_C_MARK, VI0_R1_MARK, SSI_SDATA8_C_MARK,
+ DACK1_B_MARK, ARM_TRACEDATA_11_MARK, DACK0_C_MARK, DRACK0_C_MARK,
+ VI0_R2_MARK, ETH_LINK_MARK, SD2_CLK_B_MARK, IRQ2_MARK,
+ ARM_TRACEDATA_12_MARK, VI0_R3_MARK, ETH_MAGIC_MARK, SD2_CMD_B_MARK,
+ IRQ3_MARK, ARM_TRACEDATA_13_MARK, VI0_R4_MARK, ETH_REFCLK_MARK,
+ SD2_CD_B_MARK, HSPI_CLK1_B_MARK, ARM_TRACEDATA_14_MARK, MT1_CLK_MARK,
+ TS_SCK0_MARK, VI0_R5_MARK, ETH_TXD0_MARK, SD2_WP_B_MARK,
+ HSPI_CS1_B_MARK, ARM_TRACEDATA_15_MARK, MT1_D_MARK, TS_SDEN0_MARK,
+ VI0_R6_MARK, ETH_MDC_MARK, DREQ2_C_MARK, HSPI_TX1_B_MARK,
+ TRACECLK_MARK, MT1_BEN_MARK, PWMFSW0_D_MARK, VI0_R7_MARK,
+ ETH_MDIO_MARK, DACK2_C_MARK, HSPI_RX1_B_MARK, SCIF_CLK_D_MARK,
+ TRACECTL_MARK, MT1_PEN_MARK, VI1_CLK_MARK, SIM_D_MARK, SDA3_MARK,
+ VI1_HSYNC_MARK, VI3_CLK_MARK, SSI_SCK4_MARK, GPS_SIGN_C_MARK,
+ PWMFSW0_E_MARK, VI1_VSYNC_MARK, AUDIO_CLKOUT_C_MARK, SSI_WS4_MARK,
+ SIM_CLK_MARK, GPS_MAG_C_MARK, SPV_TRST_MARK, SCL3_MARK,
+
+ VI1_DATA0_VI1_B0_MARK, SD2_DAT0_MARK, SIM_RST_MARK, SPV_TCK_MARK,
+ ADICLK_B_MARK, VI1_DATA1_VI1_B1_MARK, SD2_DAT1_MARK, MT0_CLK_MARK,
+ SPV_TMS_MARK, ADICS_B_SAMP_B_MARK, VI1_DATA2_VI1_B2_MARK,
+ SD2_DAT2_MARK, MT0_D_MARK, SPVTDI_MARK, ADIDATA_B_MARK,
+ VI1_DATA3_VI1_B3_MARK, SD2_DAT3_MARK, MT0_BEN_MARK, SPV_TDO_MARK,
+ ADICHS0_B_MARK, VI1_DATA4_VI1_B4_MARK, SD2_CLK_MARK, MT0_PEN_MARK,
+ SPA_TRST_MARK, HSPI_CLK1_D_MARK, ADICHS1_B_MARK,
+ VI1_DATA5_VI1_B5_MARK, SD2_CMD_MARK, MT0_SYNC_MARK, SPA_TCK_MARK,
+ HSPI_CS1_D_MARK, ADICHS2_B_MARK, VI1_DATA6_VI1_B6_MARK, SD2_CD_MARK,
+ MT0_VCXO_MARK, SPA_TMS_MARK, HSPI_TX1_D_MARK, VI1_DATA7_VI1_B7_MARK,
+ SD2_WP_MARK, MT0_PWM_MARK, SPA_TDI_MARK, HSPI_RX1_D_MARK,
+ VI1_G0_MARK, VI3_DATA0_MARK, DU1_DOTCLKOUT1_MARK, TS_SCK1_MARK,
+ DREQ2_B_MARK, TX2_MARK, SPA_TDO_MARK, HCTS0_B_MARK,
+ VI1_G1_MARK, VI3_DATA1_MARK, SSI_SCK1_MARK, TS_SDEN1_MARK,
+ DACK2_B_MARK, RX2_MARK, HRTS0_B_MARK,
+
+ VI1_G2_MARK, VI3_DATA2_MARK, SSI_WS1_MARK, TS_SPSYNC1_MARK,
+ SCK2_MARK, HSCK0_B_MARK, VI1_G3_MARK, VI3_DATA3_MARK,
+ SSI_SCK2_MARK, TS_SDAT1_MARK, SCL1_C_MARK, HTX0_B_MARK,
+ VI1_G4_MARK, VI3_DATA4_MARK, SSI_WS2_MARK, SDA1_C_MARK,
+ SIM_RST_B_MARK, HRX0_B_MARK, VI1_G5_MARK, VI3_DATA5_MARK,
+ GPS_CLK_MARK, FSE_MARK, TX4_B_MARK, SIM_D_B_MARK,
+ VI1_G6_MARK, VI3_DATA6_MARK, GPS_SIGN_MARK, FRB_MARK,
+ RX4_B_MARK, SIM_CLK_B_MARK, VI1_G7_MARK, VI3_DATA7_MARK,
+ GPS_MAG_MARK, FCE_MARK, SCK4_B_MARK,
+ PINMUX_MARK_END,
+};
+
+static pinmux_enum_t pinmux_data[] = {
+ PINMUX_DATA_GP_ALL(), /* PINMUX_DATA(GP_M_N_DATA, GP_M_N_FN...), */
+
+ PINMUX_DATA(AVS1_MARK, FN_AVS1),
+ PINMUX_DATA(AVS1_MARK, FN_AVS1),
+ PINMUX_DATA(A17_MARK, FN_A17),
+ PINMUX_DATA(A18_MARK, FN_A18),
+ PINMUX_DATA(A19_MARK, FN_A19),
+
+ PINMUX_IPSR_DATA(IP0_2_0, USB_PENC2),
+ PINMUX_IPSR_MODSEL_DATA(IP0_2_0, SCK0, SEL_SCIF0_0),
+ PINMUX_IPSR_DATA(IP0_2_0, PWM1),
+ PINMUX_IPSR_MODSEL_DATA(IP0_2_0, PWMFSW0, SEL_PWMFSW_0),
+ PINMUX_IPSR_MODSEL_DATA(IP0_2_0, SCIF_CLK, SEL_SCIF_0),
+ PINMUX_IPSR_MODSEL_DATA(IP0_2_0, TCLK0_C, SEL_TMU0_2),
+ PINMUX_IPSR_DATA(IP0_5_3, BS),
+ PINMUX_IPSR_DATA(IP0_5_3, SD1_DAT2),
+ PINMUX_IPSR_DATA(IP0_5_3, MMC0_D2),
+ PINMUX_IPSR_DATA(IP0_5_3, FD2),
+ PINMUX_IPSR_DATA(IP0_5_3, ATADIR0),
+ PINMUX_IPSR_DATA(IP0_5_3, SDSELF),
+ PINMUX_IPSR_MODSEL_DATA(IP0_5_3, HCTS1, SEL_HSCIF1_0),
+ PINMUX_IPSR_DATA(IP0_5_3, TX4_C),
+ PINMUX_IPSR_DATA(IP0_7_6, A0),
+ PINMUX_IPSR_DATA(IP0_7_6, SD1_DAT3),
+ PINMUX_IPSR_DATA(IP0_7_6, MMC0_D3),
+ PINMUX_IPSR_DATA(IP0_7_6, FD3),
+ PINMUX_IPSR_DATA(IP0_9_8, A20),
+ PINMUX_IPSR_DATA(IP0_9_8, TX5_D),
+ PINMUX_IPSR_DATA(IP0_9_8, HSPI_TX2_B),
+ PINMUX_IPSR_DATA(IP0_11_10, A21),
+ PINMUX_IPSR_MODSEL_DATA(IP0_11_10, SCK5_D, SEL_SCIF5_3),
+ PINMUX_IPSR_MODSEL_DATA(IP0_11_10, HSPI_CLK2_B, SEL_HSPI2_1),
+ PINMUX_IPSR_DATA(IP0_13_12, A22),
+ PINMUX_IPSR_MODSEL_DATA(IP0_13_12, RX5_D, SEL_SCIF5_3),
+ PINMUX_IPSR_MODSEL_DATA(IP0_13_12, HSPI_RX2_B, SEL_HSPI2_1),
+ PINMUX_IPSR_DATA(IP0_13_12, VI1_R0),
+ PINMUX_IPSR_DATA(IP0_15_14, A23),
+ PINMUX_IPSR_DATA(IP0_15_14, FCLE),
+ PINMUX_IPSR_MODSEL_DATA(IP0_15_14, HSPI_CLK2, SEL_HSPI2_0),
+ PINMUX_IPSR_DATA(IP0_15_14, VI1_R1),
+ PINMUX_IPSR_DATA(IP0_18_16, A24),
+ PINMUX_IPSR_DATA(IP0_18_16, SD1_CD),
+ PINMUX_IPSR_DATA(IP0_18_16, MMC0_D4),
+ PINMUX_IPSR_DATA(IP0_18_16, FD4),
+ PINMUX_IPSR_MODSEL_DATA(IP0_18_16, HSPI_CS2, SEL_HSPI2_0),
+ PINMUX_IPSR_DATA(IP0_18_16, VI1_R2),
+ PINMUX_IPSR_MODSEL_DATA(IP0_18_16, SSI_WS78_B, SEL_SSI7_1),
+ PINMUX_IPSR_DATA(IP0_22_19, A25),
+ PINMUX_IPSR_DATA(IP0_22_19, SD1_WP),
+ PINMUX_IPSR_DATA(IP0_22_19, MMC0_D5),
+ PINMUX_IPSR_DATA(IP0_22_19, FD5),
+ PINMUX_IPSR_MODSEL_DATA(IP0_22_19, HSPI_RX2, SEL_HSPI2_0),
+ PINMUX_IPSR_DATA(IP0_22_19, VI1_R3),
+ PINMUX_IPSR_DATA(IP0_22_19, TX5_B),
+ PINMUX_IPSR_MODSEL_DATA(IP0_22_19, SSI_SDATA7_B, SEL_SSI7_1),
+ PINMUX_IPSR_MODSEL_DATA(IP0_22_19, CTS0_B, SEL_SCIF0_1),
+ PINMUX_IPSR_DATA(IP0_24_23, CLKOUT),
+ PINMUX_IPSR_DATA(IP0_24_23, TX3C_IRDA_TX_C),
+ PINMUX_IPSR_DATA(IP0_24_23, PWM0_B),
+ PINMUX_IPSR_DATA(IP0_25, CS0),
+ PINMUX_IPSR_MODSEL_DATA(IP0_25, HSPI_CS2_B, SEL_HSPI2_1),
+ PINMUX_IPSR_DATA(IP0_27_26, CS1_A26),
+ PINMUX_IPSR_DATA(IP0_27_26, HSPI_TX2),
+ PINMUX_IPSR_DATA(IP0_27_26, SDSELF_B),
+ PINMUX_IPSR_DATA(IP0_30_28, RD_WR),
+ PINMUX_IPSR_DATA(IP0_30_28, FWE),
+ PINMUX_IPSR_DATA(IP0_30_28, ATAG0),
+ PINMUX_IPSR_DATA(IP0_30_28, VI1_R7),
+ PINMUX_IPSR_MODSEL_DATA(IP0_30_28, HRTS1, SEL_HSCIF1_0),
+ PINMUX_IPSR_MODSEL_DATA(IP0_30_28, RX4_C, SEL_SCIF4_2),
+
+ PINMUX_IPSR_DATA(IP1_1_0, EX_CS0),
+ PINMUX_IPSR_MODSEL_DATA(IP1_1_0, RX3_C_IRDA_RX_C, SEL_SCIF3_2),
+ PINMUX_IPSR_DATA(IP1_1_0, MMC0_D6),
+ PINMUX_IPSR_DATA(IP1_1_0, FD6),
+ PINMUX_IPSR_DATA(IP1_3_2, EX_CS1),
+ PINMUX_IPSR_DATA(IP1_3_2, MMC0_D7),
+ PINMUX_IPSR_DATA(IP1_3_2, FD7),
+ PINMUX_IPSR_DATA(IP1_6_4, EX_CS2),
+ PINMUX_IPSR_DATA(IP1_6_4, SD1_CLK),
+ PINMUX_IPSR_DATA(IP1_6_4, MMC0_CLK),
+ PINMUX_IPSR_DATA(IP1_6_4, FALE),
+ PINMUX_IPSR_DATA(IP1_6_4, ATACS00),
+ PINMUX_IPSR_DATA(IP1_10_7, EX_CS3),
+ PINMUX_IPSR_DATA(IP1_10_7, SD1_CMD),
+ PINMUX_IPSR_DATA(IP1_10_7, MMC0_CMD),
+ PINMUX_IPSR_DATA(IP1_10_7, FRE),
+ PINMUX_IPSR_DATA(IP1_10_7, ATACS10),
+ PINMUX_IPSR_DATA(IP1_10_7, VI1_R4),
+ PINMUX_IPSR_MODSEL_DATA(IP1_10_7, RX5_B, SEL_SCIF5_1),
+ PINMUX_IPSR_MODSEL_DATA(IP1_10_7, HSCK1, SEL_HSCIF1_0),
+ PINMUX_IPSR_MODSEL_DATA(IP1_10_7, SSI_SDATA8_B, SEL_SSI8_1),
+ PINMUX_IPSR_MODSEL_DATA(IP1_10_7, RTS0_B_TANS_B, SEL_SCIF0_1),
+ PINMUX_IPSR_MODSEL_DATA(IP1_10_7, SSI_SDATA9, SEL_SSI9_0),
+ PINMUX_IPSR_DATA(IP1_14_11, EX_CS4),
+ PINMUX_IPSR_DATA(IP1_14_11, SD1_DAT0),
+ PINMUX_IPSR_DATA(IP1_14_11, MMC0_D0),
+ PINMUX_IPSR_DATA(IP1_14_11, FD0),
+ PINMUX_IPSR_DATA(IP1_14_11, ATARD0),
+ PINMUX_IPSR_DATA(IP1_14_11, VI1_R5),
+ PINMUX_IPSR_MODSEL_DATA(IP1_14_11, SCK5_B, SEL_SCIF5_1),
+ PINMUX_IPSR_DATA(IP1_14_11, HTX1),
+ PINMUX_IPSR_DATA(IP1_14_11, TX2_E),
+ PINMUX_IPSR_DATA(IP1_14_11, TX0_B),
+ PINMUX_IPSR_MODSEL_DATA(IP1_14_11, SSI_SCK9, SEL_SSI9_0),
+ PINMUX_IPSR_DATA(IP1_18_15, EX_CS5),
+ PINMUX_IPSR_DATA(IP1_18_15, SD1_DAT1),
+ PINMUX_IPSR_DATA(IP1_18_15, MMC0_D1),
+ PINMUX_IPSR_DATA(IP1_18_15, FD1),
+ PINMUX_IPSR_DATA(IP1_18_15, ATAWR0),
+ PINMUX_IPSR_DATA(IP1_18_15, VI1_R6),
+ PINMUX_IPSR_MODSEL_DATA(IP1_18_15, HRX1, SEL_HSCIF1_0),
+ PINMUX_IPSR_MODSEL_DATA(IP1_18_15, RX2_E, SEL_SCIF2_4),
+ PINMUX_IPSR_MODSEL_DATA(IP1_18_15, RX0_B, SEL_SCIF0_1),
+ PINMUX_IPSR_MODSEL_DATA(IP1_18_15, SSI_WS9, SEL_SSI9_0),
+ PINMUX_IPSR_DATA(IP1_20_19, MLB_CLK),
+ PINMUX_IPSR_DATA(IP1_20_19, PWM2),
+ PINMUX_IPSR_MODSEL_DATA(IP1_20_19, SCK4, SEL_SCIF4_0),
+ PINMUX_IPSR_DATA(IP1_22_21, MLB_SIG),
+ PINMUX_IPSR_DATA(IP1_22_21, PWM3),
+ PINMUX_IPSR_DATA(IP1_22_21, TX4),
+ PINMUX_IPSR_DATA(IP1_24_23, MLB_DAT),
+ PINMUX_IPSR_DATA(IP1_24_23, PWM4),
+ PINMUX_IPSR_MODSEL_DATA(IP1_24_23, RX4, SEL_SCIF4_0),
+ PINMUX_IPSR_DATA(IP1_28_25, HTX0),
+ PINMUX_IPSR_DATA(IP1_28_25, TX1),
+ PINMUX_IPSR_DATA(IP1_28_25, SDATA),
+ PINMUX_IPSR_MODSEL_DATA(IP1_28_25, CTS0_C, SEL_SCIF0_2),
+ PINMUX_IPSR_DATA(IP1_28_25, SUB_TCK),
+ PINMUX_IPSR_DATA(IP1_28_25, CC5_STATE2),
+ PINMUX_IPSR_DATA(IP1_28_25, CC5_STATE10),
+ PINMUX_IPSR_DATA(IP1_28_25, CC5_STATE18),
+ PINMUX_IPSR_DATA(IP1_28_25, CC5_STATE26),
+ PINMUX_IPSR_DATA(IP1_28_25, CC5_STATE34),
+
+ PINMUX_IPSR_MODSEL_DATA(IP2_3_0, HRX0, SEL_HSCIF0_0),
+ PINMUX_IPSR_MODSEL_DATA(IP2_3_0, RX1, SEL_SCIF1_0),
+ PINMUX_IPSR_DATA(IP2_3_0, SCKZ),
+ PINMUX_IPSR_MODSEL_DATA(IP2_3_0, RTS0_C_TANS_C, SEL_SCIF0_2),
+ PINMUX_IPSR_DATA(IP2_3_0, SUB_TDI),
+ PINMUX_IPSR_DATA(IP2_3_0, CC5_STATE3),
+ PINMUX_IPSR_DATA(IP2_3_0, CC5_STATE11),
+ PINMUX_IPSR_DATA(IP2_3_0, CC5_STATE19),
+ PINMUX_IPSR_DATA(IP2_3_0, CC5_STATE27),
+ PINMUX_IPSR_DATA(IP2_3_0, CC5_STATE35),
+ PINMUX_IPSR_MODSEL_DATA(IP2_7_4, HSCK0, SEL_HSCIF0_0),
+ PINMUX_IPSR_MODSEL_DATA(IP2_7_4, SCK1, SEL_SCIF1_0),
+ PINMUX_IPSR_DATA(IP2_7_4, MTS),
+ PINMUX_IPSR_DATA(IP2_7_4, PWM5),
+ PINMUX_IPSR_MODSEL_DATA(IP2_7_4, SCK0_C, SEL_SCIF0_2),
+ PINMUX_IPSR_MODSEL_DATA(IP2_7_4, SSI_SDATA9_B, SEL_SSI9_1),
+ PINMUX_IPSR_DATA(IP2_7_4, SUB_TDO),
+ PINMUX_IPSR_DATA(IP2_7_4, CC5_STATE0),
+ PINMUX_IPSR_DATA(IP2_7_4, CC5_STATE8),
+ PINMUX_IPSR_DATA(IP2_7_4, CC5_STATE16),
+ PINMUX_IPSR_DATA(IP2_7_4, CC5_STATE24),
+ PINMUX_IPSR_DATA(IP2_7_4, CC5_STATE32),
+ PINMUX_IPSR_MODSEL_DATA(IP2_11_8, HCTS0, SEL_HSCIF0_0),
+ PINMUX_IPSR_MODSEL_DATA(IP2_11_8, CTS1, SEL_SCIF1_0),
+ PINMUX_IPSR_DATA(IP2_11_8, STM),
+ PINMUX_IPSR_DATA(IP2_11_8, PWM0_D),
+ PINMUX_IPSR_MODSEL_DATA(IP2_11_8, RX0_C, SEL_SCIF0_2),
+ PINMUX_IPSR_MODSEL_DATA(IP2_11_8, SCIF_CLK_C, SEL_SCIF_2),
+ PINMUX_IPSR_DATA(IP2_11_8, SUB_TRST),
+ PINMUX_IPSR_MODSEL_DATA(IP2_11_8, TCLK1_B, SEL_TMU1_1),
+ PINMUX_IPSR_DATA(IP2_11_8, CC5_OSCOUT),
+ PINMUX_IPSR_MODSEL_DATA(IP2_15_12, HRTS0, SEL_HSCIF0_0),
+ PINMUX_IPSR_MODSEL_DATA(IP2_15_12, RTS1_TANS, SEL_SCIF1_0),
+ PINMUX_IPSR_DATA(IP2_15_12, MDATA),
+ PINMUX_IPSR_DATA(IP2_15_12, TX0_C),
+ PINMUX_IPSR_DATA(IP2_15_12, SUB_TMS),
+ PINMUX_IPSR_DATA(IP2_15_12, CC5_STATE1),
+ PINMUX_IPSR_DATA(IP2_15_12, CC5_STATE9),
+ PINMUX_IPSR_DATA(IP2_15_12, CC5_STATE17),
+ PINMUX_IPSR_DATA(IP2_15_12, CC5_STATE25),
+ PINMUX_IPSR_DATA(IP2_15_12, CC5_STATE33),
+ PINMUX_IPSR_DATA(IP2_18_16, DU0_DR0),
+ PINMUX_IPSR_DATA(IP2_18_16, LCDOUT0),
+ PINMUX_IPSR_MODSEL_DATA(IP2_18_16, DREQ0, SEL_EXBUS0_0),
+ PINMUX_IPSR_MODSEL_DATA(IP2_18_16, GPS_CLK_B, SEL_GPS_1),
+ PINMUX_IPSR_DATA(IP2_18_16, AUDATA0),
+ PINMUX_IPSR_DATA(IP2_18_16, TX5_C),
+ PINMUX_IPSR_DATA(IP2_21_19, DU0_DR1),
+ PINMUX_IPSR_DATA(IP2_21_19, LCDOUT1),
+ PINMUX_IPSR_DATA(IP2_21_19, DACK0),
+ PINMUX_IPSR_DATA(IP2_21_19, DRACK0),
+ PINMUX_IPSR_MODSEL_DATA(IP2_21_19, GPS_SIGN_B, SEL_GPS_1),
+ PINMUX_IPSR_DATA(IP2_21_19, AUDATA1),
+ PINMUX_IPSR_MODSEL_DATA(IP2_21_19, RX5_C, SEL_SCIF5_2),
+ PINMUX_IPSR_DATA(IP2_22, DU0_DR2),
+ PINMUX_IPSR_DATA(IP2_22, LCDOUT2),
+ PINMUX_IPSR_DATA(IP2_23, DU0_DR3),
+ PINMUX_IPSR_DATA(IP2_23, LCDOUT3),
+ PINMUX_IPSR_DATA(IP2_24, DU0_DR4),
+ PINMUX_IPSR_DATA(IP2_24, LCDOUT4),
+ PINMUX_IPSR_DATA(IP2_25, DU0_DR5),
+ PINMUX_IPSR_DATA(IP2_25, LCDOUT5),
+ PINMUX_IPSR_DATA(IP2_26, DU0_DR6),
+ PINMUX_IPSR_DATA(IP2_26, LCDOUT6),
+ PINMUX_IPSR_DATA(IP2_27, DU0_DR7),
+ PINMUX_IPSR_DATA(IP2_27, LCDOUT7),
+ PINMUX_IPSR_DATA(IP2_30_28, DU0_DG0),
+ PINMUX_IPSR_DATA(IP2_30_28, LCDOUT8),
+ PINMUX_IPSR_MODSEL_DATA(IP2_30_28, DREQ1, SEL_EXBUS1_0),
+ PINMUX_IPSR_MODSEL_DATA(IP2_30_28, SCL2, SEL_I2C2_0),
+ PINMUX_IPSR_DATA(IP2_30_28, AUDATA2),
+
+ PINMUX_IPSR_DATA(IP3_2_0, DU0_DG1),
+ PINMUX_IPSR_DATA(IP3_2_0, LCDOUT9),
+ PINMUX_IPSR_DATA(IP3_2_0, DACK1),
+ PINMUX_IPSR_MODSEL_DATA(IP3_2_0, SDA2, SEL_I2C2_0),
+ PINMUX_IPSR_DATA(IP3_2_0, AUDATA3),
+ PINMUX_IPSR_DATA(IP3_3, DU0_DG2),
+ PINMUX_IPSR_DATA(IP3_3, LCDOUT10),
+ PINMUX_IPSR_DATA(IP3_4, DU0_DG3),
+ PINMUX_IPSR_DATA(IP3_4, LCDOUT11),
+ PINMUX_IPSR_DATA(IP3_5, DU0_DG4),
+ PINMUX_IPSR_DATA(IP3_5, LCDOUT12),
+ PINMUX_IPSR_DATA(IP3_6, DU0_DG5),
+ PINMUX_IPSR_DATA(IP3_6, LCDOUT13),
+ PINMUX_IPSR_DATA(IP3_7, DU0_DG6),
+ PINMUX_IPSR_DATA(IP3_7, LCDOUT14),
+ PINMUX_IPSR_DATA(IP3_8, DU0_DG7),
+ PINMUX_IPSR_DATA(IP3_8, LCDOUT15),
+ PINMUX_IPSR_DATA(IP3_11_9, DU0_DB0),
+ PINMUX_IPSR_DATA(IP3_11_9, LCDOUT16),
+ PINMUX_IPSR_DATA(IP3_11_9, EX_WAIT1),
+ PINMUX_IPSR_MODSEL_DATA(IP3_11_9, SCL1, SEL_I2C1_0),
+ PINMUX_IPSR_MODSEL_DATA(IP3_11_9, TCLK1, SEL_TMU1_0),
+ PINMUX_IPSR_DATA(IP3_11_9, AUDATA4),
+ PINMUX_IPSR_DATA(IP3_14_12, DU0_DB1),
+ PINMUX_IPSR_DATA(IP3_14_12, LCDOUT17),
+ PINMUX_IPSR_DATA(IP3_14_12, EX_WAIT2),
+ PINMUX_IPSR_MODSEL_DATA(IP3_14_12, SDA1, SEL_I2C1_0),
+ PINMUX_IPSR_MODSEL_DATA(IP3_14_12, GPS_MAG_B, SEL_GPS_1),
+ PINMUX_IPSR_DATA(IP3_14_12, AUDATA5),
+ PINMUX_IPSR_MODSEL_DATA(IP3_14_12, SCK5_C, SEL_SCIF5_2),
+ PINMUX_IPSR_DATA(IP3_15, DU0_DB2),
+ PINMUX_IPSR_DATA(IP3_15, LCDOUT18),
+ PINMUX_IPSR_DATA(IP3_16, DU0_DB3),
+ PINMUX_IPSR_DATA(IP3_16, LCDOUT19),
+ PINMUX_IPSR_DATA(IP3_17, DU0_DB4),
+ PINMUX_IPSR_DATA(IP3_17, LCDOUT20),
+ PINMUX_IPSR_DATA(IP3_18, DU0_DB5),
+ PINMUX_IPSR_DATA(IP3_18, LCDOUT21),
+ PINMUX_IPSR_DATA(IP3_19, DU0_DB6),
+ PINMUX_IPSR_DATA(IP3_19, LCDOUT22),
+ PINMUX_IPSR_DATA(IP3_20, DU0_DB7),
+ PINMUX_IPSR_DATA(IP3_20, LCDOUT23),
+ PINMUX_IPSR_DATA(IP3_22_21, DU0_DOTCLKIN),
+ PINMUX_IPSR_DATA(IP3_22_21, QSTVA_QVS),
+ PINMUX_IPSR_DATA(IP3_22_21, TX3_D_IRDA_TX_D),
+ PINMUX_IPSR_MODSEL_DATA(IP3_22_21, SCL3_B, SEL_I2C3_1),
+ PINMUX_IPSR_DATA(IP3_23, DU0_DOTCLKOUT0),
+ PINMUX_IPSR_DATA(IP3_23, QCLK),
+ PINMUX_IPSR_DATA(IP3_26_24, DU0_DOTCLKOUT1),
+ PINMUX_IPSR_DATA(IP3_26_24, QSTVB_QVE),
+ PINMUX_IPSR_MODSEL_DATA(IP3_26_24, RX3_D_IRDA_RX_D, SEL_SCIF3_3),
+ PINMUX_IPSR_MODSEL_DATA(IP3_26_24, SDA3_B, SEL_I2C3_1),
+ PINMUX_IPSR_MODSEL_DATA(IP3_26_24, SDA2_C, SEL_I2C2_2),
+ PINMUX_IPSR_DATA(IP3_26_24, DACK0_B),
+ PINMUX_IPSR_DATA(IP3_26_24, DRACK0_B),
+ PINMUX_IPSR_DATA(IP3_27, DU0_EXHSYNC_DU0_HSYNC),
+ PINMUX_IPSR_DATA(IP3_27, QSTH_QHS),
+ PINMUX_IPSR_DATA(IP3_28, DU0_EXVSYNC_DU0_VSYNC),
+ PINMUX_IPSR_DATA(IP3_28, QSTB_QHE),
+ PINMUX_IPSR_DATA(IP3_31_29, DU0_EXODDF_DU0_ODDF_DISP_CDE),
+ PINMUX_IPSR_DATA(IP3_31_29, QCPV_QDE),
+ PINMUX_IPSR_DATA(IP3_31_29, CAN1_TX),
+ PINMUX_IPSR_DATA(IP3_31_29, TX2_C),
+ PINMUX_IPSR_MODSEL_DATA(IP3_31_29, SCL2_C, SEL_I2C2_2),
+ PINMUX_IPSR_DATA(IP3_31_29, REMOCON),
+
+ PINMUX_IPSR_DATA(IP4_1_0, DU0_DISP),
+ PINMUX_IPSR_DATA(IP4_1_0, QPOLA),
+ PINMUX_IPSR_MODSEL_DATA(IP4_1_0, CAN_CLK_C, SEL_CANCLK_2),
+ PINMUX_IPSR_MODSEL_DATA(IP4_1_0, SCK2_C, SEL_SCIF2_2),
+ PINMUX_IPSR_DATA(IP4_4_2, DU0_CDE),
+ PINMUX_IPSR_DATA(IP4_4_2, QPOLB),
+ PINMUX_IPSR_DATA(IP4_4_2, CAN1_RX),
+ PINMUX_IPSR_MODSEL_DATA(IP4_4_2, RX2_C, SEL_SCIF2_2),
+ PINMUX_IPSR_MODSEL_DATA(IP4_4_2, DREQ0_B, SEL_EXBUS0_1),
+ PINMUX_IPSR_MODSEL_DATA(IP4_4_2, SSI_SCK78_B, SEL_SSI7_1),
+ PINMUX_IPSR_MODSEL_DATA(IP4_4_2, SCK0_B, SEL_SCIF0_1),
+ PINMUX_IPSR_DATA(IP4_7_5, DU1_DR0),
+ PINMUX_IPSR_DATA(IP4_7_5, VI2_DATA0_VI2_B0),
+ PINMUX_IPSR_DATA(IP4_7_5, PWM6),
+ PINMUX_IPSR_DATA(IP4_7_5, SD3_CLK),
+ PINMUX_IPSR_DATA(IP4_7_5, TX3_E_IRDA_TX_E),
+ PINMUX_IPSR_DATA(IP4_7_5, AUDCK),
+ PINMUX_IPSR_MODSEL_DATA(IP4_7_5, PWMFSW0_B, SEL_PWMFSW_1),
+ PINMUX_IPSR_DATA(IP4_10_8, DU1_DR1),
+ PINMUX_IPSR_DATA(IP4_10_8, VI2_DATA1_VI2_B1),
+ PINMUX_IPSR_DATA(IP4_10_8, PWM0),
+ PINMUX_IPSR_DATA(IP4_10_8, SD3_CMD),
+ PINMUX_IPSR_MODSEL_DATA(IP4_10_8, RX3_E_IRDA_RX_E, SEL_SCIF3_4),
+ PINMUX_IPSR_DATA(IP4_10_8, AUDSYNC),
+ PINMUX_IPSR_MODSEL_DATA(IP4_10_8, CTS0_D, SEL_SCIF0_3),
+ PINMUX_IPSR_DATA(IP4_11, DU1_DR2),
+ PINMUX_IPSR_DATA(IP4_11, VI2_G0),
+ PINMUX_IPSR_DATA(IP4_12, DU1_DR3),
+ PINMUX_IPSR_DATA(IP4_12, VI2_G1),
+ PINMUX_IPSR_DATA(IP4_13, DU1_DR4),
+ PINMUX_IPSR_DATA(IP4_13, VI2_G2),
+ PINMUX_IPSR_DATA(IP4_14, DU1_DR5),
+ PINMUX_IPSR_DATA(IP4_14, VI2_G3),
+ PINMUX_IPSR_DATA(IP4_15, DU1_DR6),
+ PINMUX_IPSR_DATA(IP4_15, VI2_G4),
+ PINMUX_IPSR_DATA(IP4_16, DU1_DR7),
+ PINMUX_IPSR_DATA(IP4_16, VI2_G5),
+ PINMUX_IPSR_DATA(IP4_19_17, DU1_DG0),
+ PINMUX_IPSR_DATA(IP4_19_17, VI2_DATA2_VI2_B2),
+ PINMUX_IPSR_MODSEL_DATA(IP4_19_17, SCL1_B, SEL_I2C1_1),
+ PINMUX_IPSR_DATA(IP4_19_17, SD3_DAT2),
+ PINMUX_IPSR_MODSEL_DATA(IP4_19_17, SCK3_E, SEL_SCIF3_4),
+ PINMUX_IPSR_DATA(IP4_19_17, AUDATA6),
+ PINMUX_IPSR_DATA(IP4_19_17, TX0_D),
+ PINMUX_IPSR_DATA(IP4_22_20, DU1_DG1),
+ PINMUX_IPSR_DATA(IP4_22_20, VI2_DATA3_VI2_B3),
+ PINMUX_IPSR_MODSEL_DATA(IP4_22_20, SDA1_B, SEL_I2C1_1),
+ PINMUX_IPSR_DATA(IP4_22_20, SD3_DAT3),
+ PINMUX_IPSR_MODSEL_DATA(IP4_22_20, SCK5, SEL_SCIF5_0),
+ PINMUX_IPSR_DATA(IP4_22_20, AUDATA7),
+ PINMUX_IPSR_MODSEL_DATA(IP4_22_20, RX0_D, SEL_SCIF0_3),
+ PINMUX_IPSR_DATA(IP4_23, DU1_DG2),
+ PINMUX_IPSR_DATA(IP4_23, VI2_G6),
+ PINMUX_IPSR_DATA(IP4_24, DU1_DG3),
+ PINMUX_IPSR_DATA(IP4_24, VI2_G7),
+ PINMUX_IPSR_DATA(IP4_25, DU1_DG4),
+ PINMUX_IPSR_DATA(IP4_25, VI2_R0),
+ PINMUX_IPSR_DATA(IP4_26, DU1_DG5),
+ PINMUX_IPSR_DATA(IP4_26, VI2_R1),
+ PINMUX_IPSR_DATA(IP4_27, DU1_DG6),
+ PINMUX_IPSR_DATA(IP4_27, VI2_R2),
+ PINMUX_IPSR_DATA(IP4_28, DU1_DG7),
+ PINMUX_IPSR_DATA(IP4_28, VI2_R3),
+ PINMUX_IPSR_DATA(IP4_31_29, DU1_DB0),
+ PINMUX_IPSR_DATA(IP4_31_29, VI2_DATA4_VI2_B4),
+ PINMUX_IPSR_MODSEL_DATA(IP4_31_29, SCL2_B, SEL_I2C2_1),
+ PINMUX_IPSR_DATA(IP4_31_29, SD3_DAT0),
+ PINMUX_IPSR_DATA(IP4_31_29, TX5),
+ PINMUX_IPSR_MODSEL_DATA(IP4_31_29, SCK0_D, SEL_SCIF0_3),
+
+ PINMUX_IPSR_DATA(IP5_2_0, DU1_DB1),
+ PINMUX_IPSR_DATA(IP5_2_0, VI2_DATA5_VI2_B5),
+ PINMUX_IPSR_MODSEL_DATA(IP5_2_0, SDA2_B, SEL_I2C2_1),
+ PINMUX_IPSR_DATA(IP5_2_0, SD3_DAT1),
+ PINMUX_IPSR_MODSEL_DATA(IP5_2_0, RX5, SEL_SCIF5_0),
+ PINMUX_IPSR_MODSEL_DATA(IP5_2_0, RTS0_D_TANS_D, SEL_SCIF0_3),
+ PINMUX_IPSR_DATA(IP5_3, DU1_DB2),
+ PINMUX_IPSR_DATA(IP5_3, VI2_R4),
+ PINMUX_IPSR_DATA(IP5_4, DU1_DB3),
+ PINMUX_IPSR_DATA(IP5_4, VI2_R5),
+ PINMUX_IPSR_DATA(IP5_5, DU1_DB4),
+ PINMUX_IPSR_DATA(IP5_5, VI2_R6),
+ PINMUX_IPSR_DATA(IP5_6, DU1_DB5),
+ PINMUX_IPSR_DATA(IP5_6, VI2_R7),
+ PINMUX_IPSR_DATA(IP5_7, DU1_DB6),
+ PINMUX_IPSR_MODSEL_DATA(IP5_7, SCL2_D, SEL_I2C2_3),
+ PINMUX_IPSR_DATA(IP5_8, DU1_DB7),
+ PINMUX_IPSR_MODSEL_DATA(IP5_8, SDA2_D, SEL_I2C2_3),
+ PINMUX_IPSR_DATA(IP5_10_9, DU1_DOTCLKIN),
+ PINMUX_IPSR_DATA(IP5_10_9, VI2_CLKENB),
+ PINMUX_IPSR_MODSEL_DATA(IP5_10_9, HSPI_CS1, SEL_HSPI1_0),
+ PINMUX_IPSR_MODSEL_DATA(IP5_10_9, SCL1_D, SEL_I2C1_3),
+ PINMUX_IPSR_DATA(IP5_12_11, DU1_DOTCLKOUT),
+ PINMUX_IPSR_DATA(IP5_12_11, VI2_FIELD),
+ PINMUX_IPSR_MODSEL_DATA(IP5_12_11, SDA1_D, SEL_I2C1_3),
+ PINMUX_IPSR_DATA(IP5_14_13, DU1_EXHSYNC_DU1_HSYNC),
+ PINMUX_IPSR_DATA(IP5_14_13, VI2_HSYNC),
+ PINMUX_IPSR_DATA(IP5_14_13, VI3_HSYNC),
+ PINMUX_IPSR_DATA(IP5_16_15, DU1_EXVSYNC_DU1_VSYNC),
+ PINMUX_IPSR_DATA(IP5_16_15, VI2_VSYNC),
+ PINMUX_IPSR_DATA(IP5_16_15, VI3_VSYNC),
+ PINMUX_IPSR_DATA(IP5_20_17, DU1_EXODDF_DU1_ODDF_DISP_CDE),
+ PINMUX_IPSR_DATA(IP5_20_17, VI2_CLK),
+ PINMUX_IPSR_DATA(IP5_20_17, TX3_B_IRDA_TX_B),
+ PINMUX_IPSR_DATA(IP5_20_17, SD3_CD),
+ PINMUX_IPSR_DATA(IP5_20_17, HSPI_TX1),
+ PINMUX_IPSR_DATA(IP5_20_17, VI1_CLKENB),
+ PINMUX_IPSR_DATA(IP5_20_17, VI3_CLKENB),
+ PINMUX_IPSR_DATA(IP5_20_17, AUDIO_CLKC),
+ PINMUX_IPSR_DATA(IP5_20_17, TX2_D),
+ PINMUX_IPSR_DATA(IP5_20_17, SPEEDIN),
+ PINMUX_IPSR_MODSEL_DATA(IP5_20_17, GPS_SIGN_D, SEL_GPS_3),
+ PINMUX_IPSR_DATA(IP5_23_21, DU1_DISP),
+ PINMUX_IPSR_DATA(IP5_23_21, VI2_DATA6_VI2_B6),
+ PINMUX_IPSR_MODSEL_DATA(IP5_23_21, TCLK0, SEL_TMU0_0),
+ PINMUX_IPSR_DATA(IP5_23_21, QSTVA_B_QVS_B),
+ PINMUX_IPSR_MODSEL_DATA(IP5_23_21, HSPI_CLK1, SEL_HSPI1_0),
+ PINMUX_IPSR_MODSEL_DATA(IP5_23_21, SCK2_D, SEL_SCIF2_3),
+ PINMUX_IPSR_DATA(IP5_23_21, AUDIO_CLKOUT_B),
+ PINMUX_IPSR_MODSEL_DATA(IP5_23_21, GPS_MAG_D, SEL_GPS_3),
+ PINMUX_IPSR_DATA(IP5_27_24, DU1_CDE),
+ PINMUX_IPSR_DATA(IP5_27_24, VI2_DATA7_VI2_B7),
+ PINMUX_IPSR_MODSEL_DATA(IP5_27_24, RX3_B_IRDA_RX_B, SEL_SCIF3_1),
+ PINMUX_IPSR_DATA(IP5_27_24, SD3_WP),
+ PINMUX_IPSR_MODSEL_DATA(IP5_27_24, HSPI_RX1, SEL_HSPI1_0),
+ PINMUX_IPSR_DATA(IP5_27_24, VI1_FIELD),
+ PINMUX_IPSR_DATA(IP5_27_24, VI3_FIELD),
+ PINMUX_IPSR_DATA(IP5_27_24, AUDIO_CLKOUT),
+ PINMUX_IPSR_MODSEL_DATA(IP5_27_24, RX2_D, SEL_SCIF2_3),
+ PINMUX_IPSR_MODSEL_DATA(IP5_27_24, GPS_CLK_C, SEL_GPS_2),
+ PINMUX_IPSR_MODSEL_DATA(IP5_27_24, GPS_CLK_D, SEL_GPS_3),
+ PINMUX_IPSR_DATA(IP5_28, AUDIO_CLKA),
+ PINMUX_IPSR_DATA(IP5_28, CAN_TXCLK),
+ PINMUX_IPSR_DATA(IP5_30_29, AUDIO_CLKB),
+ PINMUX_IPSR_DATA(IP5_30_29, USB_OVC2),
+ PINMUX_IPSR_DATA(IP5_30_29, CAN_DEBUGOUT0),
+ PINMUX_IPSR_DATA(IP5_30_29, MOUT0),
+
+ PINMUX_IPSR_DATA(IP6_1_0, SSI_SCK0129),
+ PINMUX_IPSR_DATA(IP6_1_0, CAN_DEBUGOUT1),
+ PINMUX_IPSR_DATA(IP6_1_0, MOUT1),
+ PINMUX_IPSR_DATA(IP6_3_2, SSI_WS0129),
+ PINMUX_IPSR_DATA(IP6_3_2, CAN_DEBUGOUT2),
+ PINMUX_IPSR_DATA(IP6_3_2, MOUT2),
+ PINMUX_IPSR_DATA(IP6_5_4, SSI_SDATA0),
+ PINMUX_IPSR_DATA(IP6_5_4, CAN_DEBUGOUT3),
+ PINMUX_IPSR_DATA(IP6_5_4, MOUT5),
+ PINMUX_IPSR_DATA(IP6_7_6, SSI_SDATA1),
+ PINMUX_IPSR_DATA(IP6_7_6, CAN_DEBUGOUT4),
+ PINMUX_IPSR_DATA(IP6_7_6, MOUT6),
+ PINMUX_IPSR_DATA(IP6_8, SSI_SDATA2),
+ PINMUX_IPSR_DATA(IP6_8, CAN_DEBUGOUT5),
+ PINMUX_IPSR_DATA(IP6_11_9, SSI_SCK34),
+ PINMUX_IPSR_DATA(IP6_11_9, CAN_DEBUGOUT6),
+ PINMUX_IPSR_DATA(IP6_11_9, CAN0_TX_B),
+ PINMUX_IPSR_MODSEL_DATA(IP6_11_9, IERX, SEL_IE_0),
+ PINMUX_IPSR_MODSEL_DATA(IP6_11_9, SSI_SCK9_C, SEL_SSI9_2),
+ PINMUX_IPSR_DATA(IP6_14_12, SSI_WS34),
+ PINMUX_IPSR_DATA(IP6_14_12, CAN_DEBUGOUT7),
+ PINMUX_IPSR_MODSEL_DATA(IP6_14_12, CAN0_RX_B, SEL_CAN0_1),
+ PINMUX_IPSR_DATA(IP6_14_12, IETX),
+ PINMUX_IPSR_MODSEL_DATA(IP6_14_12, SSI_WS9_C, SEL_SSI9_2),
+ PINMUX_IPSR_DATA(IP6_17_15, SSI_SDATA3),
+ PINMUX_IPSR_DATA(IP6_17_15, PWM0_C),
+ PINMUX_IPSR_DATA(IP6_17_15, CAN_DEBUGOUT8),
+ PINMUX_IPSR_MODSEL_DATA(IP6_17_15, CAN_CLK_B, SEL_CANCLK_1),
+ PINMUX_IPSR_MODSEL_DATA(IP6_17_15, IECLK, SEL_IE_0),
+ PINMUX_IPSR_MODSEL_DATA(IP6_17_15, SCIF_CLK_B, SEL_SCIF_1),
+ PINMUX_IPSR_MODSEL_DATA(IP6_17_15, TCLK0_B, SEL_TMU0_1),
+ PINMUX_IPSR_DATA(IP6_19_18, SSI_SDATA4),
+ PINMUX_IPSR_DATA(IP6_19_18, CAN_DEBUGOUT9),
+ PINMUX_IPSR_MODSEL_DATA(IP6_19_18, SSI_SDATA9_C, SEL_SSI9_2),
+ PINMUX_IPSR_DATA(IP6_22_20, SSI_SCK5),
+ PINMUX_IPSR_DATA(IP6_22_20, ADICLK),
+ PINMUX_IPSR_DATA(IP6_22_20, CAN_DEBUGOUT10),
+ PINMUX_IPSR_MODSEL_DATA(IP6_22_20, SCK3, SEL_SCIF3_0),
+ PINMUX_IPSR_MODSEL_DATA(IP6_22_20, TCLK0_D, SEL_TMU0_3),
+ PINMUX_IPSR_DATA(IP6_24_23, SSI_WS5),
+ PINMUX_IPSR_MODSEL_DATA(IP6_24_23, ADICS_SAMP, SEL_ADI_0),
+ PINMUX_IPSR_DATA(IP6_24_23, CAN_DEBUGOUT11),
+ PINMUX_IPSR_DATA(IP6_24_23, TX3_IRDA_TX),
+ PINMUX_IPSR_DATA(IP6_26_25, SSI_SDATA5),
+ PINMUX_IPSR_MODSEL_DATA(IP6_26_25, ADIDATA, SEL_ADI_0),
+ PINMUX_IPSR_DATA(IP6_26_25, CAN_DEBUGOUT12),
+ PINMUX_IPSR_MODSEL_DATA(IP6_26_25, RX3_IRDA_RX, SEL_SCIF3_0),
+ PINMUX_IPSR_DATA(IP6_30_29, SSI_SCK6),
+ PINMUX_IPSR_DATA(IP6_30_29, ADICHS0),
+ PINMUX_IPSR_DATA(IP6_30_29, CAN0_TX),
+ PINMUX_IPSR_MODSEL_DATA(IP6_30_29, IERX_B, SEL_IE_1),
+
+ PINMUX_IPSR_DATA(IP7_1_0, SSI_WS6),
+ PINMUX_IPSR_DATA(IP7_1_0, ADICHS1),
+ PINMUX_IPSR_MODSEL_DATA(IP7_1_0, CAN0_RX, SEL_CAN0_0),
+ PINMUX_IPSR_DATA(IP7_1_0, IETX_B),
+ PINMUX_IPSR_DATA(IP7_3_2, SSI_SDATA6),
+ PINMUX_IPSR_DATA(IP7_3_2, ADICHS2),
+ PINMUX_IPSR_MODSEL_DATA(IP7_3_2, CAN_CLK, SEL_CANCLK_0),
+ PINMUX_IPSR_MODSEL_DATA(IP7_3_2, IECLK_B, SEL_IE_1),
+ PINMUX_IPSR_MODSEL_DATA(IP7_6_4, SSI_SCK78, SEL_SSI7_0),
+ PINMUX_IPSR_DATA(IP7_6_4, CAN_DEBUGOUT13),
+ PINMUX_IPSR_MODSEL_DATA(IP7_6_4, IRQ0_B, SEL_INT0_1),
+ PINMUX_IPSR_MODSEL_DATA(IP7_6_4, SSI_SCK9_B, SEL_SSI9_1),
+ PINMUX_IPSR_MODSEL_DATA(IP7_6_4, HSPI_CLK1_C, SEL_HSPI1_2),
+ PINMUX_IPSR_MODSEL_DATA(IP7_9_7, SSI_WS78, SEL_SSI7_0),
+ PINMUX_IPSR_DATA(IP7_9_7, CAN_DEBUGOUT14),
+ PINMUX_IPSR_MODSEL_DATA(IP7_9_7, IRQ1_B, SEL_INT1_1),
+ PINMUX_IPSR_MODSEL_DATA(IP7_9_7, SSI_WS9_B, SEL_SSI9_1),
+ PINMUX_IPSR_MODSEL_DATA(IP7_9_7, HSPI_CS1_C, SEL_HSPI1_2),
+ PINMUX_IPSR_MODSEL_DATA(IP7_12_10, SSI_SDATA7, SEL_SSI7_0),
+ PINMUX_IPSR_DATA(IP7_12_10, CAN_DEBUGOUT15),
+ PINMUX_IPSR_MODSEL_DATA(IP7_12_10, IRQ2_B, SEL_INT2_1),
+ PINMUX_IPSR_MODSEL_DATA(IP7_12_10, TCLK1_C, SEL_TMU1_2),
+ PINMUX_IPSR_DATA(IP7_12_10, HSPI_TX1_C),
+ PINMUX_IPSR_MODSEL_DATA(IP7_14_13, SSI_SDATA8, SEL_SSI8_0),
+ PINMUX_IPSR_DATA(IP7_14_13, VSP),
+ PINMUX_IPSR_MODSEL_DATA(IP7_14_13, IRQ3_B, SEL_INT3_1),
+ PINMUX_IPSR_MODSEL_DATA(IP7_14_13, HSPI_RX1_C, SEL_HSPI1_2),
+ PINMUX_IPSR_DATA(IP7_16_15, SD0_CLK),
+ PINMUX_IPSR_DATA(IP7_16_15, ATACS01),
+ PINMUX_IPSR_MODSEL_DATA(IP7_16_15, SCK1_B, SEL_SCIF1_1),
+ PINMUX_IPSR_DATA(IP7_18_17, SD0_CMD),
+ PINMUX_IPSR_DATA(IP7_18_17, ATACS11),
+ PINMUX_IPSR_DATA(IP7_18_17, TX1_B),
+ PINMUX_IPSR_DATA(IP7_18_17, CC5_TDO),
+ PINMUX_IPSR_DATA(IP7_20_19, SD0_DAT0),
+ PINMUX_IPSR_DATA(IP7_20_19, ATADIR1),
+ PINMUX_IPSR_MODSEL_DATA(IP7_20_19, RX1_B, SEL_SCIF1_1),
+ PINMUX_IPSR_DATA(IP7_20_19, CC5_TRST),
+ PINMUX_IPSR_DATA(IP7_22_21, SD0_DAT1),
+ PINMUX_IPSR_DATA(IP7_22_21, ATAG1),
+ PINMUX_IPSR_MODSEL_DATA(IP7_22_21, SCK2_B, SEL_SCIF2_1),
+ PINMUX_IPSR_DATA(IP7_22_21, CC5_TMS),
+ PINMUX_IPSR_DATA(IP7_24_23, SD0_DAT2),
+ PINMUX_IPSR_DATA(IP7_24_23, ATARD1),
+ PINMUX_IPSR_DATA(IP7_24_23, TX2_B),
+ PINMUX_IPSR_DATA(IP7_24_23, CC5_TCK),
+ PINMUX_IPSR_DATA(IP7_26_25, SD0_DAT3),
+ PINMUX_IPSR_DATA(IP7_26_25, ATAWR1),
+ PINMUX_IPSR_MODSEL_DATA(IP7_26_25, RX2_B, SEL_SCIF2_1),
+ PINMUX_IPSR_DATA(IP7_26_25, CC5_TDI),
+ PINMUX_IPSR_DATA(IP7_28_27, SD0_CD),
+ PINMUX_IPSR_MODSEL_DATA(IP7_28_27, DREQ2, SEL_EXBUS2_0),
+ PINMUX_IPSR_MODSEL_DATA(IP7_28_27, RTS1_B_TANS_B, SEL_SCIF1_1),
+ PINMUX_IPSR_DATA(IP7_30_29, SD0_WP),
+ PINMUX_IPSR_DATA(IP7_30_29, DACK2),
+ PINMUX_IPSR_MODSEL_DATA(IP7_30_29, CTS1_B, SEL_SCIF1_1),
+
+ PINMUX_IPSR_DATA(IP8_3_0, HSPI_CLK0),
+ PINMUX_IPSR_MODSEL_DATA(IP8_3_0, CTS0, SEL_SCIF0_0),
+ PINMUX_IPSR_DATA(IP8_3_0, USB_OVC0),
+ PINMUX_IPSR_DATA(IP8_3_0, AD_CLK),
+ PINMUX_IPSR_DATA(IP8_3_0, CC5_STATE4),
+ PINMUX_IPSR_DATA(IP8_3_0, CC5_STATE12),
+ PINMUX_IPSR_DATA(IP8_3_0, CC5_STATE20),
+ PINMUX_IPSR_DATA(IP8_3_0, CC5_STATE28),
+ PINMUX_IPSR_DATA(IP8_3_0, CC5_STATE36),
+ PINMUX_IPSR_DATA(IP8_7_4, HSPI_CS0),
+ PINMUX_IPSR_MODSEL_DATA(IP8_7_4, RTS0_TANS, SEL_SCIF0_0),
+ PINMUX_IPSR_DATA(IP8_7_4, USB_OVC1),
+ PINMUX_IPSR_DATA(IP8_7_4, AD_DI),
+ PINMUX_IPSR_DATA(IP8_7_4, CC5_STATE5),
+ PINMUX_IPSR_DATA(IP8_7_4, CC5_STATE13),
+ PINMUX_IPSR_DATA(IP8_7_4, CC5_STATE21),
+ PINMUX_IPSR_DATA(IP8_7_4, CC5_STATE29),
+ PINMUX_IPSR_DATA(IP8_7_4, CC5_STATE37),
+ PINMUX_IPSR_DATA(IP8_11_8, HSPI_TX0),
+ PINMUX_IPSR_DATA(IP8_11_8, TX0),
+ PINMUX_IPSR_DATA(IP8_11_8, CAN_DEBUG_HW_TRIGGER),
+ PINMUX_IPSR_DATA(IP8_11_8, AD_DO),
+ PINMUX_IPSR_DATA(IP8_11_8, CC5_STATE6),
+ PINMUX_IPSR_DATA(IP8_11_8, CC5_STATE14),
+ PINMUX_IPSR_DATA(IP8_11_8, CC5_STATE22),
+ PINMUX_IPSR_DATA(IP8_11_8, CC5_STATE30),
+ PINMUX_IPSR_DATA(IP8_11_8, CC5_STATE38),
+ PINMUX_IPSR_DATA(IP8_15_12, HSPI_RX0),
+ PINMUX_IPSR_MODSEL_DATA(IP8_15_12, RX0, SEL_SCIF0_0),
+ PINMUX_IPSR_DATA(IP8_15_12, CAN_STEP0),
+ PINMUX_IPSR_DATA(IP8_15_12, AD_NCS),
+ PINMUX_IPSR_DATA(IP8_15_12, CC5_STATE7),
+ PINMUX_IPSR_DATA(IP8_15_12, CC5_STATE15),
+ PINMUX_IPSR_DATA(IP8_15_12, CC5_STATE23),
+ PINMUX_IPSR_DATA(IP8_15_12, CC5_STATE31),
+ PINMUX_IPSR_DATA(IP8_15_12, CC5_STATE39),
+ PINMUX_IPSR_DATA(IP8_17_16, FMCLK),
+ PINMUX_IPSR_DATA(IP8_17_16, RDS_CLK),
+ PINMUX_IPSR_DATA(IP8_17_16, PCMOE),
+ PINMUX_IPSR_DATA(IP8_18, BPFCLK),
+ PINMUX_IPSR_DATA(IP8_18, PCMWE),
+ PINMUX_IPSR_DATA(IP8_19, FMIN),
+ PINMUX_IPSR_DATA(IP8_19, RDS_DATA),
+ PINMUX_IPSR_DATA(IP8_20, VI0_CLK),
+ PINMUX_IPSR_DATA(IP8_20, MMC1_CLK),
+ PINMUX_IPSR_DATA(IP8_22_21, VI0_CLKENB),
+ PINMUX_IPSR_DATA(IP8_22_21, TX1_C),
+ PINMUX_IPSR_DATA(IP8_22_21, HTX1_B),
+ PINMUX_IPSR_DATA(IP8_22_21, MT1_SYNC),
+ PINMUX_IPSR_DATA(IP8_24_23, VI0_FIELD),
+ PINMUX_IPSR_MODSEL_DATA(IP8_24_23, RX1_C, SEL_SCIF1_2),
+ PINMUX_IPSR_MODSEL_DATA(IP8_24_23, HRX1_B, SEL_HSCIF1_1),
+ PINMUX_IPSR_DATA(IP8_27_25, VI0_HSYNC),
+ PINMUX_IPSR_MODSEL_DATA(IP8_27_25, VI0_DATA0_B_VI0_B0_B, SEL_VI0_1),
+ PINMUX_IPSR_MODSEL_DATA(IP8_27_25, CTS1_C, SEL_SCIF1_2),
+ PINMUX_IPSR_DATA(IP8_27_25, TX4_D),
+ PINMUX_IPSR_DATA(IP8_27_25, MMC1_CMD),
+ PINMUX_IPSR_MODSEL_DATA(IP8_27_25, HSCK1_B, SEL_HSCIF1_1),
+ PINMUX_IPSR_DATA(IP8_30_28, VI0_VSYNC),
+ PINMUX_IPSR_MODSEL_DATA(IP8_30_28, VI0_DATA1_B_VI0_B1_B, SEL_VI0_1),
+ PINMUX_IPSR_MODSEL_DATA(IP8_30_28, RTS1_C_TANS_C, SEL_SCIF1_2),
+ PINMUX_IPSR_MODSEL_DATA(IP8_30_28, RX4_D, SEL_SCIF4_3),
+ PINMUX_IPSR_MODSEL_DATA(IP8_30_28, PWMFSW0_C, SEL_PWMFSW_2),
+
+ PINMUX_IPSR_MODSEL_DATA(IP9_1_0, VI0_DATA0_VI0_B0, SEL_VI0_0),
+ PINMUX_IPSR_MODSEL_DATA(IP9_1_0, HRTS1_B, SEL_HSCIF1_1),
+ PINMUX_IPSR_DATA(IP9_1_0, MT1_VCXO),
+ PINMUX_IPSR_MODSEL_DATA(IP9_3_2, VI0_DATA1_VI0_B1, SEL_VI0_0),
+ PINMUX_IPSR_MODSEL_DATA(IP9_3_2, HCTS1_B, SEL_HSCIF1_1),
+ PINMUX_IPSR_DATA(IP9_3_2, MT1_PWM),
+ PINMUX_IPSR_DATA(IP9_4, VI0_DATA2_VI0_B2),
+ PINMUX_IPSR_DATA(IP9_4, MMC1_D0),
+ PINMUX_IPSR_DATA(IP9_5, VI0_DATA3_VI0_B3),
+ PINMUX_IPSR_DATA(IP9_5, MMC1_D1),
+ PINMUX_IPSR_DATA(IP9_6, VI0_DATA4_VI0_B4),
+ PINMUX_IPSR_DATA(IP9_6, MMC1_D2),
+ PINMUX_IPSR_DATA(IP9_7, VI0_DATA5_VI0_B5),
+ PINMUX_IPSR_DATA(IP9_7, MMC1_D3),
+ PINMUX_IPSR_DATA(IP9_9_8, VI0_DATA6_VI0_B6),
+ PINMUX_IPSR_DATA(IP9_9_8, MMC1_D4),
+ PINMUX_IPSR_DATA(IP9_9_8, ARM_TRACEDATA_0),
+ PINMUX_IPSR_DATA(IP9_11_10, VI0_DATA7_VI0_B7),
+ PINMUX_IPSR_DATA(IP9_11_10, MMC1_D5),
+ PINMUX_IPSR_DATA(IP9_11_10, ARM_TRACEDATA_1),
+ PINMUX_IPSR_DATA(IP9_13_12, VI0_G0),
+ PINMUX_IPSR_MODSEL_DATA(IP9_13_12, SSI_SCK78_C, SEL_SSI7_2),
+ PINMUX_IPSR_MODSEL_DATA(IP9_13_12, IRQ0, SEL_INT0_0),
+ PINMUX_IPSR_DATA(IP9_13_12, ARM_TRACEDATA_2),
+ PINMUX_IPSR_DATA(IP9_15_14, VI0_G1),
+ PINMUX_IPSR_MODSEL_DATA(IP9_15_14, SSI_WS78_C, SEL_SSI7_2),
+ PINMUX_IPSR_MODSEL_DATA(IP9_15_14, IRQ1, SEL_INT1_0),
+ PINMUX_IPSR_DATA(IP9_15_14, ARM_TRACEDATA_3),
+ PINMUX_IPSR_DATA(IP9_18_16, VI0_G2),
+ PINMUX_IPSR_DATA(IP9_18_16, ETH_TXD1),
+ PINMUX_IPSR_DATA(IP9_18_16, MMC1_D6),
+ PINMUX_IPSR_DATA(IP9_18_16, ARM_TRACEDATA_4),
+ PINMUX_IPSR_DATA(IP9_18_16, TS_SPSYNC0),
+ PINMUX_IPSR_DATA(IP9_21_19, VI0_G3),
+ PINMUX_IPSR_DATA(IP9_21_19, ETH_CRS_DV),
+ PINMUX_IPSR_DATA(IP9_21_19, MMC1_D7),
+ PINMUX_IPSR_DATA(IP9_21_19, ARM_TRACEDATA_5),
+ PINMUX_IPSR_DATA(IP9_21_19, TS_SDAT0),
+ PINMUX_IPSR_DATA(IP9_23_22, VI0_G4),
+ PINMUX_IPSR_DATA(IP9_23_22, ETH_TX_EN),
+ PINMUX_IPSR_MODSEL_DATA(IP9_23_22, SD2_DAT0_B, SEL_SD2_1),
+ PINMUX_IPSR_DATA(IP9_23_22, ARM_TRACEDATA_6),
+ PINMUX_IPSR_DATA(IP9_25_24, VI0_G5),
+ PINMUX_IPSR_DATA(IP9_25_24, ETH_RX_ER),
+ PINMUX_IPSR_MODSEL_DATA(IP9_25_24, SD2_DAT1_B, SEL_SD2_1),
+ PINMUX_IPSR_DATA(IP9_25_24, ARM_TRACEDATA_7),
+ PINMUX_IPSR_DATA(IP9_27_26, VI0_G6),
+ PINMUX_IPSR_DATA(IP9_27_26, ETH_RXD0),
+ PINMUX_IPSR_MODSEL_DATA(IP9_27_26, SD2_DAT2_B, SEL_SD2_1),
+ PINMUX_IPSR_DATA(IP9_27_26, ARM_TRACEDATA_8),
+ PINMUX_IPSR_DATA(IP9_29_28, VI0_G7),
+ PINMUX_IPSR_DATA(IP9_29_28, ETH_RXD1),
+ PINMUX_IPSR_MODSEL_DATA(IP9_29_28, SD2_DAT3_B, SEL_SD2_1),
+ PINMUX_IPSR_DATA(IP9_29_28, ARM_TRACEDATA_9),
+
+ PINMUX_IPSR_DATA(IP10_2_0, VI0_R0),
+ PINMUX_IPSR_MODSEL_DATA(IP10_2_0, SSI_SDATA7_C, SEL_SSI7_2),
+ PINMUX_IPSR_MODSEL_DATA(IP10_2_0, SCK1_C, SEL_SCIF1_2),
+ PINMUX_IPSR_MODSEL_DATA(IP10_2_0, DREQ1_B, SEL_EXBUS1_0),
+ PINMUX_IPSR_DATA(IP10_2_0, ARM_TRACEDATA_10),
+ PINMUX_IPSR_MODSEL_DATA(IP10_2_0, DREQ0_C, SEL_EXBUS0_2),
+ PINMUX_IPSR_DATA(IP10_5_3, VI0_R1),
+ PINMUX_IPSR_MODSEL_DATA(IP10_5_3, SSI_SDATA8_C, SEL_SSI8_2),
+ PINMUX_IPSR_DATA(IP10_5_3, DACK1_B),
+ PINMUX_IPSR_DATA(IP10_5_3, ARM_TRACEDATA_11),
+ PINMUX_IPSR_DATA(IP10_5_3, DACK0_C),
+ PINMUX_IPSR_DATA(IP10_5_3, DRACK0_C),
+ PINMUX_IPSR_DATA(IP10_8_6, VI0_R2),
+ PINMUX_IPSR_DATA(IP10_8_6, ETH_LINK),
+ PINMUX_IPSR_DATA(IP10_8_6, SD2_CLK_B),
+ PINMUX_IPSR_MODSEL_DATA(IP10_8_6, IRQ2, SEL_INT2_0),
+ PINMUX_IPSR_DATA(IP10_8_6, ARM_TRACEDATA_12),
+ PINMUX_IPSR_DATA(IP10_11_9, VI0_R3),
+ PINMUX_IPSR_DATA(IP10_11_9, ETH_MAGIC),
+ PINMUX_IPSR_MODSEL_DATA(IP10_11_9, SD2_CMD_B, SEL_SD2_1),
+ PINMUX_IPSR_MODSEL_DATA(IP10_11_9, IRQ3, SEL_INT3_0),
+ PINMUX_IPSR_DATA(IP10_11_9, ARM_TRACEDATA_13),
+ PINMUX_IPSR_DATA(IP10_14_12, VI0_R4),
+ PINMUX_IPSR_DATA(IP10_14_12, ETH_REFCLK),
+ PINMUX_IPSR_MODSEL_DATA(IP10_14_12, SD2_CD_B, SEL_SD2_1),
+ PINMUX_IPSR_MODSEL_DATA(IP10_14_12, HSPI_CLK1_B, SEL_HSPI1_1),
+ PINMUX_IPSR_DATA(IP10_14_12, ARM_TRACEDATA_14),
+ PINMUX_IPSR_DATA(IP10_14_12, MT1_CLK),
+ PINMUX_IPSR_DATA(IP10_14_12, TS_SCK0),
+ PINMUX_IPSR_DATA(IP10_17_15, VI0_R5),
+ PINMUX_IPSR_DATA(IP10_17_15, ETH_TXD0),
+ PINMUX_IPSR_MODSEL_DATA(IP10_17_15, SD2_WP_B, SEL_SD2_1),
+ PINMUX_IPSR_MODSEL_DATA(IP10_17_15, HSPI_CS1_B, SEL_HSPI1_1),
+ PINMUX_IPSR_DATA(IP10_17_15, ARM_TRACEDATA_15),
+ PINMUX_IPSR_DATA(IP10_17_15, MT1_D),
+ PINMUX_IPSR_DATA(IP10_17_15, TS_SDEN0),
+ PINMUX_IPSR_DATA(IP10_20_18, VI0_R6),
+ PINMUX_IPSR_DATA(IP10_20_18, ETH_MDC),
+ PINMUX_IPSR_MODSEL_DATA(IP10_20_18, DREQ2_C, SEL_EXBUS2_2),
+ PINMUX_IPSR_DATA(IP10_20_18, HSPI_TX1_B),
+ PINMUX_IPSR_DATA(IP10_20_18, TRACECLK),
+ PINMUX_IPSR_DATA(IP10_20_18, MT1_BEN),
+ PINMUX_IPSR_MODSEL_DATA(IP10_20_18, PWMFSW0_D, SEL_PWMFSW_3),
+ PINMUX_IPSR_DATA(IP10_23_21, VI0_R7),
+ PINMUX_IPSR_DATA(IP10_23_21, ETH_MDIO),
+ PINMUX_IPSR_DATA(IP10_23_21, DACK2_C),
+ PINMUX_IPSR_MODSEL_DATA(IP10_23_21, HSPI_RX1_B, SEL_HSPI1_1),
+ PINMUX_IPSR_MODSEL_DATA(IP10_23_21, SCIF_CLK_D, SEL_SCIF_3),
+ PINMUX_IPSR_DATA(IP10_23_21, TRACECTL),
+ PINMUX_IPSR_DATA(IP10_23_21, MT1_PEN),
+ PINMUX_IPSR_DATA(IP10_25_24, VI1_CLK),
+ PINMUX_IPSR_MODSEL_DATA(IP10_25_24, SIM_D, SEL_SIM_0),
+ PINMUX_IPSR_MODSEL_DATA(IP10_25_24, SDA3, SEL_I2C3_0),
+ PINMUX_IPSR_DATA(IP10_28_26, VI1_HSYNC),
+ PINMUX_IPSR_DATA(IP10_28_26, VI3_CLK),
+ PINMUX_IPSR_DATA(IP10_28_26, SSI_SCK4),
+ PINMUX_IPSR_MODSEL_DATA(IP10_28_26, GPS_SIGN_C, SEL_GPS_2),
+ PINMUX_IPSR_MODSEL_DATA(IP10_28_26, PWMFSW0_E, SEL_PWMFSW_4),
+ PINMUX_IPSR_DATA(IP10_31_29, VI1_VSYNC),
+ PINMUX_IPSR_DATA(IP10_31_29, AUDIO_CLKOUT_C),
+ PINMUX_IPSR_DATA(IP10_31_29, SSI_WS4),
+ PINMUX_IPSR_DATA(IP10_31_29, SIM_CLK),
+ PINMUX_IPSR_MODSEL_DATA(IP10_31_29, GPS_MAG_C, SEL_GPS_2),
+ PINMUX_IPSR_DATA(IP10_31_29, SPV_TRST),
+ PINMUX_IPSR_MODSEL_DATA(IP10_31_29, SCL3, SEL_I2C3_0),
+
+ PINMUX_IPSR_DATA(IP11_2_0, VI1_DATA0_VI1_B0),
+ PINMUX_IPSR_MODSEL_DATA(IP11_2_0, SD2_DAT0, SEL_SD2_0),
+ PINMUX_IPSR_DATA(IP11_2_0, SIM_RST),
+ PINMUX_IPSR_DATA(IP11_2_0, SPV_TCK),
+ PINMUX_IPSR_DATA(IP11_2_0, ADICLK_B),
+ PINMUX_IPSR_DATA(IP11_5_3, VI1_DATA1_VI1_B1),
+ PINMUX_IPSR_MODSEL_DATA(IP11_5_3, SD2_DAT1, SEL_SD2_0),
+ PINMUX_IPSR_DATA(IP11_5_3, MT0_CLK),
+ PINMUX_IPSR_DATA(IP11_5_3, SPV_TMS),
+ PINMUX_IPSR_MODSEL_DATA(IP11_5_3, ADICS_B_SAMP_B, SEL_ADI_1),
+ PINMUX_IPSR_DATA(IP11_8_6, VI1_DATA2_VI1_B2),
+ PINMUX_IPSR_MODSEL_DATA(IP11_8_6, SD2_DAT2, SEL_SD2_0),
+ PINMUX_IPSR_DATA(IP11_8_6, MT0_D),
+ PINMUX_IPSR_DATA(IP11_8_6, SPVTDI),
+ PINMUX_IPSR_MODSEL_DATA(IP11_8_6, ADIDATA_B, SEL_ADI_1),
+ PINMUX_IPSR_DATA(IP11_11_9, VI1_DATA3_VI1_B3),
+ PINMUX_IPSR_MODSEL_DATA(IP11_11_9, SD2_DAT3, SEL_SD2_0),
+ PINMUX_IPSR_DATA(IP11_11_9, MT0_BEN),
+ PINMUX_IPSR_DATA(IP11_11_9, SPV_TDO),
+ PINMUX_IPSR_DATA(IP11_11_9, ADICHS0_B),
+ PINMUX_IPSR_DATA(IP11_14_12, VI1_DATA4_VI1_B4),
+ PINMUX_IPSR_DATA(IP11_14_12, SD2_CLK),
+ PINMUX_IPSR_DATA(IP11_14_12, MT0_PEN),
+ PINMUX_IPSR_DATA(IP11_14_12, SPA_TRST),
+ PINMUX_IPSR_MODSEL_DATA(IP11_14_12, HSPI_CLK1_D, SEL_HSPI1_3),
+ PINMUX_IPSR_DATA(IP11_14_12, ADICHS1_B),
+ PINMUX_IPSR_DATA(IP11_17_15, VI1_DATA5_VI1_B5),
+ PINMUX_IPSR_MODSEL_DATA(IP11_17_15, SD2_CMD, SEL_SD2_0),
+ PINMUX_IPSR_DATA(IP11_17_15, MT0_SYNC),
+ PINMUX_IPSR_DATA(IP11_17_15, SPA_TCK),
+ PINMUX_IPSR_MODSEL_DATA(IP11_17_15, HSPI_CS1_D, SEL_HSPI1_3),
+ PINMUX_IPSR_DATA(IP11_17_15, ADICHS2_B),
+ PINMUX_IPSR_DATA(IP11_20_18, VI1_DATA6_VI1_B6),
+ PINMUX_IPSR_MODSEL_DATA(IP11_20_18, SD2_CD, SEL_SD2_0),
+ PINMUX_IPSR_DATA(IP11_20_18, MT0_VCXO),
+ PINMUX_IPSR_DATA(IP11_20_18, SPA_TMS),
+ PINMUX_IPSR_DATA(IP11_20_18, HSPI_TX1_D),
+ PINMUX_IPSR_DATA(IP11_23_21, VI1_DATA7_VI1_B7),
+ PINMUX_IPSR_MODSEL_DATA(IP11_23_21, SD2_WP, SEL_SD2_0),
+ PINMUX_IPSR_DATA(IP11_23_21, MT0_PWM),
+ PINMUX_IPSR_DATA(IP11_23_21, SPA_TDI),
+ PINMUX_IPSR_MODSEL_DATA(IP11_23_21, HSPI_RX1_D, SEL_HSPI1_3),
+ PINMUX_IPSR_DATA(IP11_26_24, VI1_G0),
+ PINMUX_IPSR_DATA(IP11_26_24, VI3_DATA0),
+ PINMUX_IPSR_DATA(IP11_26_24, DU1_DOTCLKOUT1),
+ PINMUX_IPSR_DATA(IP11_26_24, TS_SCK1),
+ PINMUX_IPSR_MODSEL_DATA(IP11_26_24, DREQ2_B, SEL_EXBUS2_1),
+ PINMUX_IPSR_DATA(IP11_26_24, TX2),
+ PINMUX_IPSR_DATA(IP11_26_24, SPA_TDO),
+ PINMUX_IPSR_MODSEL_DATA(IP11_26_24, HCTS0_B, SEL_HSCIF0_1),
+ PINMUX_IPSR_DATA(IP11_29_27, VI1_G1),
+ PINMUX_IPSR_DATA(IP11_29_27, VI3_DATA1),
+ PINMUX_IPSR_DATA(IP11_29_27, SSI_SCK1),
+ PINMUX_IPSR_DATA(IP11_29_27, TS_SDEN1),
+ PINMUX_IPSR_DATA(IP11_29_27, DACK2_B),
+ PINMUX_IPSR_MODSEL_DATA(IP11_29_27, RX2, SEL_SCIF2_0),
+ PINMUX_IPSR_MODSEL_DATA(IP11_29_27, HRTS0_B, SEL_HSCIF0_1),
+
+ PINMUX_IPSR_DATA(IP12_2_0, VI1_G2),
+ PINMUX_IPSR_DATA(IP12_2_0, VI3_DATA2),
+ PINMUX_IPSR_DATA(IP12_2_0, SSI_WS1),
+ PINMUX_IPSR_DATA(IP12_2_0, TS_SPSYNC1),
+ PINMUX_IPSR_MODSEL_DATA(IP12_2_0, SCK2, SEL_SCIF2_0),
+ PINMUX_IPSR_MODSEL_DATA(IP12_2_0, HSCK0_B, SEL_HSCIF0_1),
+ PINMUX_IPSR_DATA(IP12_5_3, VI1_G3),
+ PINMUX_IPSR_DATA(IP12_5_3, VI3_DATA3),
+ PINMUX_IPSR_DATA(IP12_5_3, SSI_SCK2),
+ PINMUX_IPSR_DATA(IP12_5_3, TS_SDAT1),
+ PINMUX_IPSR_MODSEL_DATA(IP12_5_3, SCL1_C, SEL_I2C1_2),
+ PINMUX_IPSR_DATA(IP12_5_3, HTX0_B),
+ PINMUX_IPSR_DATA(IP12_8_6, VI1_G4),
+ PINMUX_IPSR_DATA(IP12_8_6, VI3_DATA4),
+ PINMUX_IPSR_DATA(IP12_8_6, SSI_WS2),
+ PINMUX_IPSR_MODSEL_DATA(IP12_8_6, SDA1_C, SEL_I2C1_2),
+ PINMUX_IPSR_DATA(IP12_8_6, SIM_RST_B),
+ PINMUX_IPSR_MODSEL_DATA(IP12_8_6, HRX0_B, SEL_HSCIF0_1),
+ PINMUX_IPSR_DATA(IP12_11_9, VI1_G5),
+ PINMUX_IPSR_DATA(IP12_11_9, VI3_DATA5),
+ PINMUX_IPSR_MODSEL_DATA(IP12_11_9, GPS_CLK, SEL_GPS_0),
+ PINMUX_IPSR_DATA(IP12_11_9, FSE),
+ PINMUX_IPSR_DATA(IP12_11_9, TX4_B),
+ PINMUX_IPSR_MODSEL_DATA(IP12_11_9, SIM_D_B, SEL_SIM_1),
+ PINMUX_IPSR_DATA(IP12_14_12, VI1_G6),
+ PINMUX_IPSR_DATA(IP12_14_12, VI3_DATA6),
+ PINMUX_IPSR_MODSEL_DATA(IP12_14_12, GPS_SIGN, SEL_GPS_0),
+ PINMUX_IPSR_DATA(IP12_14_12, FRB),
+ PINMUX_IPSR_MODSEL_DATA(IP12_14_12, RX4_B, SEL_SCIF4_1),
+ PINMUX_IPSR_DATA(IP12_14_12, SIM_CLK_B),
+ PINMUX_IPSR_DATA(IP12_17_15, VI1_G7),
+ PINMUX_IPSR_DATA(IP12_17_15, VI3_DATA7),
+ PINMUX_IPSR_MODSEL_DATA(IP12_17_15, GPS_MAG, SEL_GPS_0),
+ PINMUX_IPSR_DATA(IP12_17_15, FCE),
+ PINMUX_IPSR_MODSEL_DATA(IP12_17_15, SCK4_B, SEL_SCIF4_1),
+};
+
+static struct pinmux_gpio pinmux_gpios[] = {
+ PINMUX_GPIO_GP_ALL(),
+ GPIO_FN(AVS1), GPIO_FN(AVS2), GPIO_FN(A17), GPIO_FN(A18),
+ GPIO_FN(A19),
+
+ /* IPSR0 */
+ GPIO_FN(USB_PENC2), GPIO_FN(SCK0), GPIO_FN(PWM1), GPIO_FN(PWMFSW0),
+ GPIO_FN(SCIF_CLK), GPIO_FN(TCLK0_C), GPIO_FN(BS), GPIO_FN(SD1_DAT2),
+ GPIO_FN(MMC0_D2), GPIO_FN(FD2), GPIO_FN(ATADIR0), GPIO_FN(SDSELF),
+ GPIO_FN(HCTS1), GPIO_FN(TX4_C), GPIO_FN(A0), GPIO_FN(SD1_DAT3),
+ GPIO_FN(MMC0_D3), GPIO_FN(FD3), GPIO_FN(A20), GPIO_FN(TX5_D),
+ GPIO_FN(HSPI_TX2_B), GPIO_FN(A21), GPIO_FN(SCK5_D),
+ GPIO_FN(HSPI_CLK2_B), GPIO_FN(A22), GPIO_FN(RX5_D),
+ GPIO_FN(HSPI_RX2_B), GPIO_FN(VI1_R0), GPIO_FN(A23), GPIO_FN(FCLE),
+ GPIO_FN(HSPI_CLK2), GPIO_FN(VI1_R1), GPIO_FN(A24), GPIO_FN(SD1_CD),
+ GPIO_FN(MMC0_D4), GPIO_FN(FD4), GPIO_FN(HSPI_CS2), GPIO_FN(VI1_R2),
+ GPIO_FN(SSI_WS78_B), GPIO_FN(A25), GPIO_FN(SD1_WP), GPIO_FN(MMC0_D5),
+ GPIO_FN(FD5), GPIO_FN(HSPI_RX2), GPIO_FN(VI1_R3), GPIO_FN(TX5_B),
+ GPIO_FN(SSI_SDATA7_B), GPIO_FN(CTS0_B), GPIO_FN(CLKOUT),
+ GPIO_FN(TX3C_IRDA_TX_C), GPIO_FN(PWM0_B), GPIO_FN(CS0),
+ GPIO_FN(HSPI_CS2_B), GPIO_FN(CS1_A26), GPIO_FN(HSPI_TX2),
+ GPIO_FN(SDSELF_B), GPIO_FN(RD_WR), GPIO_FN(FWE), GPIO_FN(ATAG0),
+ GPIO_FN(VI1_R7), GPIO_FN(HRTS1), GPIO_FN(RX4_C),
+
+ /* IPSR1 */
+ GPIO_FN(EX_CS0), GPIO_FN(RX3_C_IRDA_RX_C), GPIO_FN(MMC0_D6),
+ GPIO_FN(FD6), GPIO_FN(EX_CS1), GPIO_FN(MMC0_D7), GPIO_FN(FD7),
+ GPIO_FN(EX_CS2), GPIO_FN(SD1_CLK), GPIO_FN(MMC0_CLK), GPIO_FN(FALE),
+ GPIO_FN(ATACS00), GPIO_FN(EX_CS3), GPIO_FN(SD1_CMD), GPIO_FN(MMC0_CMD),
+ GPIO_FN(FRE), GPIO_FN(ATACS10), GPIO_FN(VI1_R4), GPIO_FN(RX5_B),
+ GPIO_FN(HSCK1), GPIO_FN(SSI_SDATA8_B), GPIO_FN(RTS0_B_TANS_B),
+ GPIO_FN(SSI_SDATA9), GPIO_FN(EX_CS4), GPIO_FN(SD1_DAT0),
+ GPIO_FN(MMC0_D0), GPIO_FN(FD0), GPIO_FN(ATARD0), GPIO_FN(VI1_R5),
+ GPIO_FN(SCK5_B), GPIO_FN(HTX1), GPIO_FN(TX2_E), GPIO_FN(TX0_B),
+ GPIO_FN(SSI_SCK9), GPIO_FN(EX_CS5), GPIO_FN(SD1_DAT1),
+ GPIO_FN(MMC0_D1), GPIO_FN(FD1), GPIO_FN(ATAWR0), GPIO_FN(VI1_R6),
+ GPIO_FN(HRX1), GPIO_FN(RX2_E), GPIO_FN(RX0_B), GPIO_FN(SSI_WS9),
+ GPIO_FN(MLB_CLK), GPIO_FN(PWM2), GPIO_FN(SCK4), GPIO_FN(MLB_SIG),
+ GPIO_FN(PWM3), GPIO_FN(TX4), GPIO_FN(MLB_DAT), GPIO_FN(PWM4),
+ GPIO_FN(RX4), GPIO_FN(HTX0), GPIO_FN(TX1), GPIO_FN(SDATA),
+ GPIO_FN(CTS0_C), GPIO_FN(SUB_TCK), GPIO_FN(CC5_STATE2),
+ GPIO_FN(CC5_STATE10), GPIO_FN(CC5_STATE18), GPIO_FN(CC5_STATE26),
+ GPIO_FN(CC5_STATE34),
+
+ /* IPSR2 */
+ GPIO_FN(HRX0), GPIO_FN(RX1), GPIO_FN(SCKZ), GPIO_FN(RTS0_C_TANS_C),
+ GPIO_FN(SUB_TDI), GPIO_FN(CC5_STATE3), GPIO_FN(CC5_STATE11),
+ GPIO_FN(CC5_STATE19), GPIO_FN(CC5_STATE27), GPIO_FN(CC5_STATE35),
+ GPIO_FN(HSCK0), GPIO_FN(SCK1), GPIO_FN(MTS), GPIO_FN(PWM5),
+ GPIO_FN(SCK0_C), GPIO_FN(SSI_SDATA9_B), GPIO_FN(SUB_TDO),
+ GPIO_FN(CC5_STATE0), GPIO_FN(CC5_STATE8), GPIO_FN(CC5_STATE16),
+ GPIO_FN(CC5_STATE24), GPIO_FN(CC5_STATE32), GPIO_FN(HCTS0),
+ GPIO_FN(CTS1), GPIO_FN(STM), GPIO_FN(PWM0_D), GPIO_FN(RX0_C),
+ GPIO_FN(SCIF_CLK_C), GPIO_FN(SUB_TRST), GPIO_FN(TCLK1_B),
+ GPIO_FN(CC5_OSCOUT), GPIO_FN(HRTS0), GPIO_FN(RTS1_TANS),
+ GPIO_FN(MDATA), GPIO_FN(TX0_C), GPIO_FN(SUB_TMS), GPIO_FN(CC5_STATE1),
+ GPIO_FN(CC5_STATE9), GPIO_FN(CC5_STATE17), GPIO_FN(CC5_STATE25),
+ GPIO_FN(CC5_STATE33), GPIO_FN(DU0_DR0), GPIO_FN(LCDOUT0),
+ GPIO_FN(DREQ0), GPIO_FN(GPS_CLK_B), GPIO_FN(AUDATA0),
+ GPIO_FN(TX5_C), GPIO_FN(DU0_DR1), GPIO_FN(LCDOUT1), GPIO_FN(DACK0),
+ GPIO_FN(DRACK0), GPIO_FN(GPS_SIGN_B), GPIO_FN(AUDATA1), GPIO_FN(RX5_C),
+ GPIO_FN(DU0_DR2), GPIO_FN(LCDOUT2), GPIO_FN(DU0_DR3), GPIO_FN(LCDOUT3),
+ GPIO_FN(DU0_DR4), GPIO_FN(LCDOUT4), GPIO_FN(DU0_DR5), GPIO_FN(LCDOUT5),
+ GPIO_FN(DU0_DR6), GPIO_FN(LCDOUT6), GPIO_FN(DU0_DR7), GPIO_FN(LCDOUT7),
+ GPIO_FN(DU0_DG0), GPIO_FN(LCDOUT8), GPIO_FN(DREQ1), GPIO_FN(SCL2),
+ GPIO_FN(AUDATA2),
+
+ /* IPSR3 */
+ GPIO_FN(DU0_DG1), GPIO_FN(LCDOUT9), GPIO_FN(DACK1), GPIO_FN(SDA2),
+ GPIO_FN(AUDATA3), GPIO_FN(DU0_DG2), GPIO_FN(LCDOUT10),
+ GPIO_FN(DU0_DG3), GPIO_FN(LCDOUT11), GPIO_FN(DU0_DG4),
+ GPIO_FN(LCDOUT12), GPIO_FN(DU0_DG5), GPIO_FN(LCDOUT13),
+ GPIO_FN(DU0_DG6), GPIO_FN(LCDOUT14), GPIO_FN(DU0_DG7),
+ GPIO_FN(LCDOUT15), GPIO_FN(DU0_DB0), GPIO_FN(LCDOUT16),
+ GPIO_FN(EX_WAIT1), GPIO_FN(SCL1), GPIO_FN(TCLK1), GPIO_FN(AUDATA4),
+ GPIO_FN(DU0_DB1), GPIO_FN(LCDOUT17), GPIO_FN(EX_WAIT2), GPIO_FN(SDA1),
+ GPIO_FN(GPS_MAG_B), GPIO_FN(AUDATA5), GPIO_FN(SCK5_C),
+ GPIO_FN(DU0_DB2), GPIO_FN(LCDOUT18), GPIO_FN(DU0_DB3),
+ GPIO_FN(LCDOUT19), GPIO_FN(DU0_DB4), GPIO_FN(LCDOUT20),
+ GPIO_FN(DU0_DB5), GPIO_FN(LCDOUT21), GPIO_FN(DU0_DB6),
+ GPIO_FN(LCDOUT22), GPIO_FN(DU0_DB7), GPIO_FN(LCDOUT23),
+ GPIO_FN(DU0_DOTCLKIN), GPIO_FN(QSTVA_QVS), GPIO_FN(TX3_D_IRDA_TX_D),
+ GPIO_FN(SCL3_B), GPIO_FN(DU0_DOTCLKOUT0), GPIO_FN(QCLK),
+ GPIO_FN(DU0_DOTCLKOUT1), GPIO_FN(QSTVB_QVE), GPIO_FN(RX3_D_IRDA_RX_D),
+ GPIO_FN(SDA3_B), GPIO_FN(SDA2_C), GPIO_FN(DACK0_B), GPIO_FN(DRACK0_B),
+ GPIO_FN(DU0_EXHSYNC_DU0_HSYNC), GPIO_FN(QSTH_QHS),
+ GPIO_FN(DU0_EXVSYNC_DU0_VSYNC), GPIO_FN(QSTB_QHE),
+ GPIO_FN(DU0_EXODDF_DU0_ODDF_DISP_CDE), GPIO_FN(QCPV_QDE),
+ GPIO_FN(CAN1_TX), GPIO_FN(TX2_C), GPIO_FN(SCL2_C), GPIO_FN(REMOCON),
+
+ /* IPSR4 */
+ GPIO_FN(DU0_DISP), GPIO_FN(QPOLA), GPIO_FN(CAN_CLK_C), GPIO_FN(SCK2_C),
+ GPIO_FN(DU0_CDE), GPIO_FN(QPOLB), GPIO_FN(CAN1_RX), GPIO_FN(RX2_C),
+ GPIO_FN(DREQ0_B), GPIO_FN(SSI_SCK78_B), GPIO_FN(SCK0_B),
+ GPIO_FN(DU1_DR0), GPIO_FN(VI2_DATA0_VI2_B0), GPIO_FN(PWM6),
+ GPIO_FN(SD3_CLK), GPIO_FN(TX3_E_IRDA_TX_E), GPIO_FN(AUDCK),
+ GPIO_FN(PWMFSW0_B), GPIO_FN(DU1_DR1), GPIO_FN(VI2_DATA1_VI2_B1),
+ GPIO_FN(PWM0), GPIO_FN(SD3_CMD), GPIO_FN(RX3_E_IRDA_RX_E),
+ GPIO_FN(AUDSYNC), GPIO_FN(CTS0_D), GPIO_FN(DU1_DR2), GPIO_FN(VI2_G0),
+ GPIO_FN(DU1_DR3), GPIO_FN(VI2_G1), GPIO_FN(DU1_DR4), GPIO_FN(VI2_G2),
+ GPIO_FN(DU1_DR5), GPIO_FN(VI2_G3), GPIO_FN(DU1_DR6), GPIO_FN(VI2_G4),
+ GPIO_FN(DU1_DR7), GPIO_FN(VI2_G5), GPIO_FN(DU1_DG0),
+ GPIO_FN(VI2_DATA2_VI2_B2), GPIO_FN(SCL1_B), GPIO_FN(SD3_DAT2),
+ GPIO_FN(SCK3_E), GPIO_FN(AUDATA6), GPIO_FN(TX0_D), GPIO_FN(DU1_DG1),
+ GPIO_FN(VI2_DATA3_VI2_B3), GPIO_FN(SDA1_B), GPIO_FN(SD3_DAT3),
+ GPIO_FN(SCK5), GPIO_FN(AUDATA7), GPIO_FN(RX0_D), GPIO_FN(DU1_DG2),
+ GPIO_FN(VI2_G6), GPIO_FN(DU1_DG3), GPIO_FN(VI2_G7), GPIO_FN(DU1_DG4),
+ GPIO_FN(VI2_R0), GPIO_FN(DU1_DG5), GPIO_FN(VI2_R1), GPIO_FN(DU1_DG6),
+ GPIO_FN(VI2_R2), GPIO_FN(DU1_DG7), GPIO_FN(VI2_R3), GPIO_FN(DU1_DB0),
+ GPIO_FN(VI2_DATA4_VI2_B4), GPIO_FN(SCL2_B), GPIO_FN(SD3_DAT0),
+ GPIO_FN(TX5), GPIO_FN(SCK0_D),
+
+ /* IPSR5 */
+ GPIO_FN(DU1_DB1), GPIO_FN(VI2_DATA5_VI2_B5), GPIO_FN(SDA2_B),
+ GPIO_FN(SD3_DAT1), GPIO_FN(RX5), GPIO_FN(RTS0_D_TANS_D),
+ GPIO_FN(DU1_DB2), GPIO_FN(VI2_R4), GPIO_FN(DU1_DB3), GPIO_FN(VI2_R5),
+ GPIO_FN(DU1_DB4), GPIO_FN(VI2_R6), GPIO_FN(DU1_DB5), GPIO_FN(VI2_R7),
+ GPIO_FN(DU1_DB6), GPIO_FN(SCL2_D), GPIO_FN(DU1_DB7), GPIO_FN(SDA2_D),
+ GPIO_FN(DU1_DOTCLKIN), GPIO_FN(VI2_CLKENB), GPIO_FN(HSPI_CS1),
+ GPIO_FN(SCL1_D), GPIO_FN(DU1_DOTCLKOUT), GPIO_FN(VI2_FIELD),
+ GPIO_FN(SDA1_D), GPIO_FN(DU1_EXHSYNC_DU1_HSYNC), GPIO_FN(VI2_HSYNC),
+ GPIO_FN(VI3_HSYNC), GPIO_FN(DU1_EXVSYNC_DU1_VSYNC), GPIO_FN(VI2_VSYNC),
+ GPIO_FN(VI3_VSYNC), GPIO_FN(DU1_EXODDF_DU1_ODDF_DISP_CDE),
+ GPIO_FN(VI2_CLK), GPIO_FN(TX3_B_IRDA_TX_B), GPIO_FN(SD3_CD),
+ GPIO_FN(HSPI_TX1), GPIO_FN(VI1_CLKENB), GPIO_FN(VI3_CLKENB),
+ GPIO_FN(AUDIO_CLKC), GPIO_FN(TX2_D), GPIO_FN(SPEEDIN),
+ GPIO_FN(GPS_SIGN_D), GPIO_FN(DU1_DISP), GPIO_FN(VI2_DATA6_VI2_B6),
+ GPIO_FN(TCLK0), GPIO_FN(QSTVA_B_QVS_B), GPIO_FN(HSPI_CLK1),
+ GPIO_FN(SCK2_D), GPIO_FN(AUDIO_CLKOUT_B), GPIO_FN(GPS_MAG_D),
+ GPIO_FN(DU1_CDE), GPIO_FN(VI2_DATA7_VI2_B7), GPIO_FN(RX3_B_IRDA_RX_B),
+ GPIO_FN(SD3_WP), GPIO_FN(HSPI_RX1), GPIO_FN(VI1_FIELD),
+ GPIO_FN(VI3_FIELD), GPIO_FN(AUDIO_CLKOUT), GPIO_FN(RX2_D),
+ GPIO_FN(GPS_CLK_C), GPIO_FN(GPS_CLK_D), GPIO_FN(AUDIO_CLKA),
+ GPIO_FN(CAN_TXCLK), GPIO_FN(AUDIO_CLKB), GPIO_FN(USB_OVC2),
+ GPIO_FN(CAN_DEBUGOUT0), GPIO_FN(MOUT0),
+
+ /* IPSR6 */
+ GPIO_FN(SSI_SCK0129), GPIO_FN(CAN_DEBUGOUT1), GPIO_FN(MOUT1),
+ GPIO_FN(SSI_WS0129), GPIO_FN(CAN_DEBUGOUT2), GPIO_FN(MOUT2),
+ GPIO_FN(SSI_SDATA0), GPIO_FN(CAN_DEBUGOUT3), GPIO_FN(MOUT5),
+ GPIO_FN(SSI_SDATA1), GPIO_FN(CAN_DEBUGOUT4), GPIO_FN(MOUT6),
+ GPIO_FN(SSI_SDATA2), GPIO_FN(CAN_DEBUGOUT5), GPIO_FN(SSI_SCK34),
+ GPIO_FN(CAN_DEBUGOUT6), GPIO_FN(CAN0_TX_B), GPIO_FN(IERX),
+ GPIO_FN(SSI_SCK9_C), GPIO_FN(SSI_WS34), GPIO_FN(CAN_DEBUGOUT7),
+ GPIO_FN(CAN0_RX_B), GPIO_FN(IETX), GPIO_FN(SSI_WS9_C),
+ GPIO_FN(SSI_SDATA3), GPIO_FN(PWM0_C), GPIO_FN(CAN_DEBUGOUT8),
+ GPIO_FN(CAN_CLK_B), GPIO_FN(IECLK), GPIO_FN(SCIF_CLK_B),
+ GPIO_FN(TCLK0_B), GPIO_FN(SSI_SDATA4), GPIO_FN(CAN_DEBUGOUT9),
+ GPIO_FN(SSI_SDATA9_C), GPIO_FN(SSI_SCK5), GPIO_FN(ADICLK),
+ GPIO_FN(CAN_DEBUGOUT10), GPIO_FN(SCK3), GPIO_FN(TCLK0_D),
+ GPIO_FN(SSI_WS5), GPIO_FN(ADICS_SAMP), GPIO_FN(CAN_DEBUGOUT11),
+ GPIO_FN(TX3_IRDA_TX), GPIO_FN(SSI_SDATA5), GPIO_FN(ADIDATA),
+ GPIO_FN(CAN_DEBUGOUT12), GPIO_FN(RX3_IRDA_RX), GPIO_FN(SSI_SCK6),
+ GPIO_FN(ADICHS0), GPIO_FN(CAN0_TX), GPIO_FN(IERX_B),
+
+ /* IPSR7 */
+ GPIO_FN(SSI_WS6), GPIO_FN(ADICHS1), GPIO_FN(CAN0_RX), GPIO_FN(IETX_B),
+ GPIO_FN(SSI_SDATA6), GPIO_FN(ADICHS2), GPIO_FN(CAN_CLK),
+ GPIO_FN(IECLK_B), GPIO_FN(SSI_SCK78), GPIO_FN(CAN_DEBUGOUT13),
+ GPIO_FN(IRQ0_B), GPIO_FN(SSI_SCK9_B), GPIO_FN(HSPI_CLK1_C),
+ GPIO_FN(SSI_WS78), GPIO_FN(CAN_DEBUGOUT14), GPIO_FN(IRQ1_B),
+ GPIO_FN(SSI_WS9_B), GPIO_FN(HSPI_CS1_C), GPIO_FN(SSI_SDATA7),
+ GPIO_FN(CAN_DEBUGOUT15), GPIO_FN(IRQ2_B), GPIO_FN(TCLK1_C),
+ GPIO_FN(HSPI_TX1_C), GPIO_FN(SSI_SDATA8), GPIO_FN(VSP),
+ GPIO_FN(IRQ3_B), GPIO_FN(HSPI_RX1_C), GPIO_FN(SD0_CLK),
+ GPIO_FN(ATACS01), GPIO_FN(SCK1_B), GPIO_FN(SD0_CMD), GPIO_FN(ATACS11),
+ GPIO_FN(TX1_B), GPIO_FN(CC5_TDO), GPIO_FN(SD0_DAT0), GPIO_FN(ATADIR1),
+ GPIO_FN(RX1_B), GPIO_FN(CC5_TRST), GPIO_FN(SD0_DAT1), GPIO_FN(ATAG1),
+ GPIO_FN(SCK2_B), GPIO_FN(CC5_TMS), GPIO_FN(SD0_DAT2), GPIO_FN(ATARD1),
+ GPIO_FN(TX2_B), GPIO_FN(CC5_TCK), GPIO_FN(SD0_DAT3), GPIO_FN(ATAWR1),
+ GPIO_FN(RX2_B), GPIO_FN(CC5_TDI), GPIO_FN(SD0_CD), GPIO_FN(DREQ2),
+ GPIO_FN(RTS1_B_TANS_B), GPIO_FN(SD0_WP), GPIO_FN(DACK2),
+ GPIO_FN(CTS1_B),
+
+ /* IPSR8 */
+ GPIO_FN(HSPI_CLK0), GPIO_FN(CTS0), GPIO_FN(USB_OVC0), GPIO_FN(AD_CLK),
+ GPIO_FN(CC5_STATE4), GPIO_FN(CC5_STATE12), GPIO_FN(CC5_STATE20),
+ GPIO_FN(CC5_STATE28), GPIO_FN(CC5_STATE36), GPIO_FN(HSPI_CS0),
+ GPIO_FN(RTS0_TANS), GPIO_FN(USB_OVC1), GPIO_FN(AD_DI),
+ GPIO_FN(CC5_STATE5), GPIO_FN(CC5_STATE13), GPIO_FN(CC5_STATE21),
+ GPIO_FN(CC5_STATE29), GPIO_FN(CC5_STATE37), GPIO_FN(HSPI_TX0),
+ GPIO_FN(TX0), GPIO_FN(CAN_DEBUG_HW_TRIGGER), GPIO_FN(AD_DO),
+ GPIO_FN(CC5_STATE6), GPIO_FN(CC5_STATE14), GPIO_FN(CC5_STATE22),
+ GPIO_FN(CC5_STATE30), GPIO_FN(CC5_STATE38), GPIO_FN(HSPI_RX0),
+ GPIO_FN(RX0), GPIO_FN(CAN_STEP0), GPIO_FN(AD_NCS), GPIO_FN(CC5_STATE7),
+ GPIO_FN(CC5_STATE15), GPIO_FN(CC5_STATE23), GPIO_FN(CC5_STATE31),
+ GPIO_FN(CC5_STATE39), GPIO_FN(FMCLK), GPIO_FN(RDS_CLK), GPIO_FN(PCMOE),
+ GPIO_FN(BPFCLK), GPIO_FN(PCMWE), GPIO_FN(FMIN), GPIO_FN(RDS_DATA),
+ GPIO_FN(VI0_CLK), GPIO_FN(MMC1_CLK), GPIO_FN(VI0_CLKENB),
+ GPIO_FN(TX1_C), GPIO_FN(HTX1_B), GPIO_FN(MT1_SYNC),
+ GPIO_FN(VI0_FIELD), GPIO_FN(RX1_C), GPIO_FN(HRX1_B),
+ GPIO_FN(VI0_HSYNC), GPIO_FN(VI0_DATA0_B_VI0_B0_B), GPIO_FN(CTS1_C),
+ GPIO_FN(TX4_D), GPIO_FN(MMC1_CMD), GPIO_FN(HSCK1_B),
+ GPIO_FN(VI0_VSYNC), GPIO_FN(VI0_DATA1_B_VI0_B1_B),
+ GPIO_FN(RTS1_C_TANS_C), GPIO_FN(RX4_D), GPIO_FN(PWMFSW0_C),
+
+ /* IPSR9 */
+ GPIO_FN(VI0_DATA0_VI0_B0), GPIO_FN(HRTS1_B), GPIO_FN(MT1_VCXO),
+ GPIO_FN(VI0_DATA1_VI0_B1), GPIO_FN(HCTS1_B), GPIO_FN(MT1_PWM),
+ GPIO_FN(VI0_DATA2_VI0_B2), GPIO_FN(MMC1_D0), GPIO_FN(VI0_DATA3_VI0_B3),
+ GPIO_FN(MMC1_D1), GPIO_FN(VI0_DATA4_VI0_B4), GPIO_FN(MMC1_D2),
+ GPIO_FN(VI0_DATA5_VI0_B5), GPIO_FN(MMC1_D3), GPIO_FN(VI0_DATA6_VI0_B6),
+ GPIO_FN(MMC1_D4), GPIO_FN(ARM_TRACEDATA_0), GPIO_FN(VI0_DATA7_VI0_B7),
+ GPIO_FN(MMC1_D5), GPIO_FN(ARM_TRACEDATA_1), GPIO_FN(VI0_G0),
+ GPIO_FN(SSI_SCK78_C), GPIO_FN(IRQ0), GPIO_FN(ARM_TRACEDATA_2),
+ GPIO_FN(VI0_G1), GPIO_FN(SSI_WS78_C), GPIO_FN(IRQ1),
+ GPIO_FN(ARM_TRACEDATA_3), GPIO_FN(VI0_G2), GPIO_FN(ETH_TXD1),
+ GPIO_FN(MMC1_D6), GPIO_FN(ARM_TRACEDATA_4), GPIO_FN(TS_SPSYNC0),
+ GPIO_FN(VI0_G3), GPIO_FN(ETH_CRS_DV), GPIO_FN(MMC1_D7),
+ GPIO_FN(ARM_TRACEDATA_5), GPIO_FN(TS_SDAT0), GPIO_FN(VI0_G4),
+ GPIO_FN(ETH_TX_EN), GPIO_FN(SD2_DAT0_B), GPIO_FN(ARM_TRACEDATA_6),
+ GPIO_FN(VI0_G5), GPIO_FN(ETH_RX_ER), GPIO_FN(SD2_DAT1_B),
+ GPIO_FN(ARM_TRACEDATA_7), GPIO_FN(VI0_G6), GPIO_FN(ETH_RXD0),
+ GPIO_FN(SD2_DAT2_B), GPIO_FN(ARM_TRACEDATA_8), GPIO_FN(VI0_G7),
+ GPIO_FN(ETH_RXD1), GPIO_FN(SD2_DAT3_B), GPIO_FN(ARM_TRACEDATA_9),
+
+ /* IPSR10 */
+ GPIO_FN(VI0_R0), GPIO_FN(SSI_SDATA7_C), GPIO_FN(SCK1_C),
+ GPIO_FN(DREQ1_B), GPIO_FN(ARM_TRACEDATA_10), GPIO_FN(DREQ0_C),
+ GPIO_FN(VI0_R1), GPIO_FN(SSI_SDATA8_C), GPIO_FN(DACK1_B),
+ GPIO_FN(ARM_TRACEDATA_11), GPIO_FN(DACK0_C), GPIO_FN(DRACK0_C),
+ GPIO_FN(VI0_R2), GPIO_FN(ETH_LINK), GPIO_FN(SD2_CLK_B), GPIO_FN(IRQ2),
+ GPIO_FN(ARM_TRACEDATA_12), GPIO_FN(VI0_R3), GPIO_FN(ETH_MAGIC),
+ GPIO_FN(SD2_CMD_B), GPIO_FN(IRQ3), GPIO_FN(ARM_TRACEDATA_13),
+ GPIO_FN(VI0_R4), GPIO_FN(ETH_REFCLK), GPIO_FN(SD2_CD_B),
+ GPIO_FN(HSPI_CLK1_B), GPIO_FN(ARM_TRACEDATA_14), GPIO_FN(MT1_CLK),
+ GPIO_FN(TS_SCK0), GPIO_FN(VI0_R5), GPIO_FN(ETH_TXD0),
+ GPIO_FN(SD2_WP_B), GPIO_FN(HSPI_CS1_B), GPIO_FN(ARM_TRACEDATA_15),
+ GPIO_FN(MT1_D), GPIO_FN(TS_SDEN0), GPIO_FN(VI0_R6), GPIO_FN(ETH_MDC),
+ GPIO_FN(DREQ2_C), GPIO_FN(HSPI_TX1_B), GPIO_FN(TRACECLK),
+ GPIO_FN(MT1_BEN), GPIO_FN(PWMFSW0_D), GPIO_FN(VI0_R7),
+ GPIO_FN(ETH_MDIO), GPIO_FN(DACK2_C), GPIO_FN(HSPI_RX1_B),
+ GPIO_FN(SCIF_CLK_D), GPIO_FN(TRACECTL), GPIO_FN(MT1_PEN),
+ GPIO_FN(VI1_CLK), GPIO_FN(SIM_D), GPIO_FN(SDA3), GPIO_FN(VI1_HSYNC),
+ GPIO_FN(VI3_CLK), GPIO_FN(SSI_SCK4), GPIO_FN(GPS_SIGN_C),
+ GPIO_FN(PWMFSW0_E), GPIO_FN(VI1_VSYNC), GPIO_FN(AUDIO_CLKOUT_C),
+ GPIO_FN(SSI_WS4), GPIO_FN(SIM_CLK), GPIO_FN(GPS_MAG_C),
+ GPIO_FN(SPV_TRST), GPIO_FN(SCL3),
+
+ /* IPSR11 */
+ GPIO_FN(VI1_DATA0_VI1_B0), GPIO_FN(SD2_DAT0), GPIO_FN(SIM_RST),
+ GPIO_FN(SPV_TCK), GPIO_FN(ADICLK_B), GPIO_FN(VI1_DATA1_VI1_B1),
+ GPIO_FN(SD2_DAT1), GPIO_FN(MT0_CLK), GPIO_FN(SPV_TMS),
+ GPIO_FN(ADICS_B_SAMP_B), GPIO_FN(VI1_DATA2_VI1_B2), GPIO_FN(SD2_DAT2),
+ GPIO_FN(MT0_D), GPIO_FN(SPVTDI), GPIO_FN(ADIDATA_B),
+ GPIO_FN(VI1_DATA3_VI1_B3), GPIO_FN(SD2_DAT3), GPIO_FN(MT0_BEN),
+ GPIO_FN(SPV_TDO), GPIO_FN(ADICHS0_B), GPIO_FN(VI1_DATA4_VI1_B4),
+ GPIO_FN(SD2_CLK), GPIO_FN(MT0_PEN), GPIO_FN(SPA_TRST),
+ GPIO_FN(HSPI_CLK1_D), GPIO_FN(ADICHS1_B), GPIO_FN(VI1_DATA5_VI1_B5),
+ GPIO_FN(SD2_CMD), GPIO_FN(MT0_SYNC), GPIO_FN(SPA_TCK),
+ GPIO_FN(HSPI_CS1_D), GPIO_FN(ADICHS2_B), GPIO_FN(VI1_DATA6_VI1_B6),
+ GPIO_FN(SD2_CD), GPIO_FN(MT0_VCXO), GPIO_FN(SPA_TMS),
+ GPIO_FN(HSPI_TX1_D), GPIO_FN(VI1_DATA7_VI1_B7), GPIO_FN(SD2_WP),
+ GPIO_FN(MT0_PWM), GPIO_FN(SPA_TDI), GPIO_FN(HSPI_RX1_D),
+ GPIO_FN(VI1_G0), GPIO_FN(VI3_DATA0), GPIO_FN(DU1_DOTCLKOUT1),
+ GPIO_FN(TS_SCK1), GPIO_FN(DREQ2_B), GPIO_FN(TX2), GPIO_FN(SPA_TDO),
+ GPIO_FN(HCTS0_B), GPIO_FN(VI1_G1), GPIO_FN(VI3_DATA1),
+ GPIO_FN(SSI_SCK1), GPIO_FN(TS_SDEN1), GPIO_FN(DACK2_B), GPIO_FN(RX2),
+ GPIO_FN(HRTS0_B),
+
+ /* IPSR12 */
+ GPIO_FN(VI1_G2), GPIO_FN(VI3_DATA2), GPIO_FN(SSI_WS1),
+ GPIO_FN(TS_SPSYNC1), GPIO_FN(SCK2), GPIO_FN(HSCK0_B), GPIO_FN(VI1_G3),
+ GPIO_FN(VI3_DATA3), GPIO_FN(SSI_SCK2), GPIO_FN(TS_SDAT1),
+ GPIO_FN(SCL1_C), GPIO_FN(HTX0_B), GPIO_FN(VI1_G4), GPIO_FN(VI3_DATA4),
+ GPIO_FN(SSI_WS2), GPIO_FN(SDA1_C), GPIO_FN(SIM_RST_B),
+ GPIO_FN(HRX0_B), GPIO_FN(VI1_G5), GPIO_FN(VI3_DATA5),
+ GPIO_FN(GPS_CLK), GPIO_FN(FSE), GPIO_FN(TX4_B), GPIO_FN(SIM_D_B),
+ GPIO_FN(VI1_G6), GPIO_FN(VI3_DATA6), GPIO_FN(GPS_SIGN), GPIO_FN(FRB),
+ GPIO_FN(RX4_B), GPIO_FN(SIM_CLK_B), GPIO_FN(VI1_G7),
+ GPIO_FN(VI3_DATA7), GPIO_FN(GPS_MAG), GPIO_FN(FCE), GPIO_FN(SCK4_B),
+};
+
+static struct pinmux_cfg_reg pinmux_config_regs[] = {
+ { PINMUX_CFG_REG("GPSR0", 0xfffc0004, 32, 1) {
+ GP_0_31_FN, FN_IP3_31_29,
+ GP_0_30_FN, FN_IP3_26_24,
+ GP_0_29_FN, FN_IP3_22_21,
+ GP_0_28_FN, FN_IP3_14_12,
+ GP_0_27_FN, FN_IP3_11_9,
+ GP_0_26_FN, FN_IP3_2_0,
+ GP_0_25_FN, FN_IP2_30_28,
+ GP_0_24_FN, FN_IP2_21_19,
+ GP_0_23_FN, FN_IP2_18_16,
+ GP_0_22_FN, FN_IP0_30_28,
+ GP_0_21_FN, FN_IP0_5_3,
+ GP_0_20_FN, FN_IP1_18_15,
+ GP_0_19_FN, FN_IP1_14_11,
+ GP_0_18_FN, FN_IP1_10_7,
+ GP_0_17_FN, FN_IP1_6_4,
+ GP_0_16_FN, FN_IP1_3_2,
+ GP_0_15_FN, FN_IP1_1_0,
+ GP_0_14_FN, FN_IP0_27_26,
+ GP_0_13_FN, FN_IP0_25,
+ GP_0_12_FN, FN_IP0_24_23,
+ GP_0_11_FN, FN_IP0_22_19,
+ GP_0_10_FN, FN_IP0_18_16,
+ GP_0_9_FN, FN_IP0_15_14,
+ GP_0_8_FN, FN_IP0_13_12,
+ GP_0_7_FN, FN_IP0_11_10,
+ GP_0_6_FN, FN_IP0_9_8,
+ GP_0_5_FN, FN_A19,
+ GP_0_4_FN, FN_A18,
+ GP_0_3_FN, FN_A17,
+ GP_0_2_FN, FN_IP0_7_6,
+ GP_0_1_FN, FN_AVS2,
+ GP_0_0_FN, FN_AVS1 }
+ },
+ { PINMUX_CFG_REG("GPSR1", 0xfffc0008, 32, 1) {
+ GP_1_31_FN, FN_IP5_23_21,
+ GP_1_30_FN, FN_IP5_20_17,
+ GP_1_29_FN, FN_IP5_16_15,
+ GP_1_28_FN, FN_IP5_14_13,
+ GP_1_27_FN, FN_IP5_12_11,
+ GP_1_26_FN, FN_IP5_10_9,
+ GP_1_25_FN, FN_IP5_8,
+ GP_1_24_FN, FN_IP5_7,
+ GP_1_23_FN, FN_IP5_6,
+ GP_1_22_FN, FN_IP5_5,
+ GP_1_21_FN, FN_IP5_4,
+ GP_1_20_FN, FN_IP5_3,
+ GP_1_19_FN, FN_IP5_2_0,
+ GP_1_18_FN, FN_IP4_31_29,
+ GP_1_17_FN, FN_IP4_28,
+ GP_1_16_FN, FN_IP4_27,
+ GP_1_15_FN, FN_IP4_26,
+ GP_1_14_FN, FN_IP4_25,
+ GP_1_13_FN, FN_IP4_24,
+ GP_1_12_FN, FN_IP4_23,
+ GP_1_11_FN, FN_IP4_22_20,
+ GP_1_10_FN, FN_IP4_19_17,
+ GP_1_9_FN, FN_IP4_16,
+ GP_1_8_FN, FN_IP4_15,
+ GP_1_7_FN, FN_IP4_14,
+ GP_1_6_FN, FN_IP4_13,
+ GP_1_5_FN, FN_IP4_12,
+ GP_1_4_FN, FN_IP4_11,
+ GP_1_3_FN, FN_IP4_10_8,
+ GP_1_2_FN, FN_IP4_7_5,
+ GP_1_1_FN, FN_IP4_4_2,
+ GP_1_0_FN, FN_IP4_1_0 }
+ },
+ { PINMUX_CFG_REG("GPSR2", 0xfffc000c, 32, 1) {
+ GP_2_31_FN, FN_IP10_28_26,
+ GP_2_30_FN, FN_IP10_25_24,
+ GP_2_29_FN, FN_IP10_23_21,
+ GP_2_28_FN, FN_IP10_20_18,
+ GP_2_27_FN, FN_IP10_17_15,
+ GP_2_26_FN, FN_IP10_14_12,
+ GP_2_25_FN, FN_IP10_11_9,
+ GP_2_24_FN, FN_IP10_8_6,
+ GP_2_23_FN, FN_IP10_5_3,
+ GP_2_22_FN, FN_IP10_2_0,
+ GP_2_21_FN, FN_IP9_29_28,
+ GP_2_20_FN, FN_IP9_27_26,
+ GP_2_19_FN, FN_IP9_25_24,
+ GP_2_18_FN, FN_IP9_23_22,
+ GP_2_17_FN, FN_IP9_21_19,
+ GP_2_16_FN, FN_IP9_18_16,
+ GP_2_15_FN, FN_IP9_15_14,
+ GP_2_14_FN, FN_IP9_13_12,
+ GP_2_13_FN, FN_IP9_11_10,
+ GP_2_12_FN, FN_IP9_9_8,
+ GP_2_11_FN, FN_IP9_7,
+ GP_2_10_FN, FN_IP9_6,
+ GP_2_9_FN, FN_IP9_5,
+ GP_2_8_FN, FN_IP9_4,
+ GP_2_7_FN, FN_IP9_3_2,
+ GP_2_6_FN, FN_IP9_1_0,
+ GP_2_5_FN, FN_IP8_30_28,
+ GP_2_4_FN, FN_IP8_27_25,
+ GP_2_3_FN, FN_IP8_24_23,
+ GP_2_2_FN, FN_IP8_22_21,
+ GP_2_1_FN, FN_IP8_20,
+ GP_2_0_FN, FN_IP5_27_24 }
+ },
+ { PINMUX_CFG_REG("GPSR3", 0xfffc0010, 32, 1) {
+ GP_3_31_FN, FN_IP6_3_2,
+ GP_3_30_FN, FN_IP6_1_0,
+ GP_3_29_FN, FN_IP5_30_29,
+ GP_3_28_FN, FN_IP5_28,
+ GP_3_27_FN, FN_IP1_24_23,
+ GP_3_26_FN, FN_IP1_22_21,
+ GP_3_25_FN, FN_IP1_20_19,
+ GP_3_24_FN, FN_IP7_26_25,
+ GP_3_23_FN, FN_IP7_24_23,
+ GP_3_22_FN, FN_IP7_22_21,
+ GP_3_21_FN, FN_IP7_20_19,
+ GP_3_20_FN, FN_IP7_30_29,
+ GP_3_19_FN, FN_IP7_28_27,
+ GP_3_18_FN, FN_IP7_18_17,
+ GP_3_17_FN, FN_IP7_16_15,
+ GP_3_16_FN, FN_IP12_17_15,
+ GP_3_15_FN, FN_IP12_14_12,
+ GP_3_14_FN, FN_IP12_11_9,
+ GP_3_13_FN, FN_IP12_8_6,
+ GP_3_12_FN, FN_IP12_5_3,
+ GP_3_11_FN, FN_IP12_2_0,
+ GP_3_10_FN, FN_IP11_29_27,
+ GP_3_9_FN, FN_IP11_26_24,
+ GP_3_8_FN, FN_IP11_23_21,
+ GP_3_7_FN, FN_IP11_20_18,
+ GP_3_6_FN, FN_IP11_17_15,
+ GP_3_5_FN, FN_IP11_14_12,
+ GP_3_4_FN, FN_IP11_11_9,
+ GP_3_3_FN, FN_IP11_8_6,
+ GP_3_2_FN, FN_IP11_5_3,
+ GP_3_1_FN, FN_IP11_2_0,
+ GP_3_0_FN, FN_IP10_31_29 }
+ },
+ { PINMUX_CFG_REG("GPSR4", 0xfffc0014, 32, 1) {
+ GP_4_31_FN, FN_IP8_19,
+ GP_4_30_FN, FN_IP8_18,
+ GP_4_29_FN, FN_IP8_17_16,
+ GP_4_28_FN, FN_IP0_2_0,
+ GP_4_27_FN, FN_USB_PENC1,
+ GP_4_26_FN, FN_USB_PENC0,
+ GP_4_25_FN, FN_IP8_15_12,
+ GP_4_24_FN, FN_IP8_11_8,
+ GP_4_23_FN, FN_IP8_7_4,
+ GP_4_22_FN, FN_IP8_3_0,
+ GP_4_21_FN, FN_IP2_3_0,
+ GP_4_20_FN, FN_IP1_28_25,
+ GP_4_19_FN, FN_IP2_15_12,
+ GP_4_18_FN, FN_IP2_11_8,
+ GP_4_17_FN, FN_IP2_7_4,
+ GP_4_16_FN, FN_IP7_14_13,
+ GP_4_15_FN, FN_IP7_12_10,
+ GP_4_14_FN, FN_IP7_9_7,
+ GP_4_13_FN, FN_IP7_6_4,
+ GP_4_12_FN, FN_IP7_3_2,
+ GP_4_11_FN, FN_IP7_1_0,
+ GP_4_10_FN, FN_IP6_30_29,
+ GP_4_9_FN, FN_IP6_26_25,
+ GP_4_8_FN, FN_IP6_24_23,
+ GP_4_7_FN, FN_IP6_22_20,
+ GP_4_6_FN, FN_IP6_19_18,
+ GP_4_5_FN, FN_IP6_17_15,
+ GP_4_4_FN, FN_IP6_14_12,
+ GP_4_3_FN, FN_IP6_11_9,
+ GP_4_2_FN, FN_IP6_8,
+ GP_4_1_FN, FN_IP6_7_6,
+ GP_4_0_FN, FN_IP6_5_4 }
+ },
+ { PINMUX_CFG_REG("GPSR5", 0xfffc0018, 32, 1) {
+ GP_5_31_FN, FN_IP3_5,
+ GP_5_30_FN, FN_IP3_4,
+ GP_5_29_FN, FN_IP3_3,
+ GP_5_28_FN, FN_IP2_27,
+ GP_5_27_FN, FN_IP2_26,
+ GP_5_26_FN, FN_IP2_25,
+ GP_5_25_FN, FN_IP2_24,
+ GP_5_24_FN, FN_IP2_23,
+ GP_5_23_FN, FN_IP2_22,
+ GP_5_22_FN, FN_IP3_28,
+ GP_5_21_FN, FN_IP3_27,
+ GP_5_20_FN, FN_IP3_23,
+ GP_5_19_FN, FN_EX_WAIT0,
+ GP_5_18_FN, FN_WE1,
+ GP_5_17_FN, FN_WE0,
+ GP_5_16_FN, FN_RD,
+ GP_5_15_FN, FN_A16,
+ GP_5_14_FN, FN_A15,
+ GP_5_13_FN, FN_A14,
+ GP_5_12_FN, FN_A13,
+ GP_5_11_FN, FN_A12,
+ GP_5_10_FN, FN_A11,
+ GP_5_9_FN, FN_A10,
+ GP_5_8_FN, FN_A9,
+ GP_5_7_FN, FN_A8,
+ GP_5_6_FN, FN_A7,
+ GP_5_5_FN, FN_A6,
+ GP_5_4_FN, FN_A5,
+ GP_5_3_FN, FN_A4,
+ GP_5_2_FN, FN_A3,
+ GP_5_1_FN, FN_A2,
+ GP_5_0_FN, FN_A1 }
+ },
+ { PINMUX_CFG_REG("GPSR6", 0xfffc001c, 32, 1) {
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0,
+ 0, 0,
+ 0, 0,
+ GP_6_8_FN, FN_IP3_20,
+ GP_6_7_FN, FN_IP3_19,
+ GP_6_6_FN, FN_IP3_18,
+ GP_6_5_FN, FN_IP3_17,
+ GP_6_4_FN, FN_IP3_16,
+ GP_6_3_FN, FN_IP3_15,
+ GP_6_2_FN, FN_IP3_8,
+ GP_6_1_FN, FN_IP3_7,
+ GP_6_0_FN, FN_IP3_6 }
+ },
+
+ { PINMUX_CFG_REG_VAR("IPSR0", 0xfffc0020, 32,
+ 1, 3, 2, 1, 2, 4, 3, 2, 2, 2, 2, 2, 3, 3) {
+ /* IP0_31 [1] */
+ 0, 0,
+ /* IP0_30_28 [3] */
+ FN_RD_WR, FN_FWE, FN_ATAG0, FN_VI1_R7,
+ FN_HRTS1, FN_RX4_C, 0, 0,
+ /* IP0_27_26 [2] */
+ FN_CS1_A26, FN_HSPI_TX2, FN_SDSELF_B, 0,
+ /* IP0_25 [1] */
+ FN_CS0, FN_HSPI_CS2_B,
+ /* IP0_24_23 [2] */
+ FN_CLKOUT, FN_TX3C_IRDA_TX_C, FN_PWM0_B, 0,
+ /* IP0_22_19 [4] */
+ FN_A25, FN_SD1_WP, FN_MMC0_D5, FN_FD5,
+ FN_HSPI_RX2, FN_VI1_R3, FN_TX5_B, FN_SSI_SDATA7_B,
+ FN_CTS0_B, 0, 0, 0,
+ 0, 0, 0, 0,
+ /* IP0_18_16 [3] */
+ FN_A24, FN_SD1_CD, FN_MMC0_D4, FN_FD4,
+ FN_HSPI_CS2, FN_VI1_R2, FN_SSI_WS78_B, 0,
+ /* IP0_15_14 [2] */
+ FN_A23, FN_FCLE, FN_HSPI_CLK2, FN_VI1_R1,
+ /* IP0_13_12 [2] */
+ FN_A22, FN_RX5_D, FN_HSPI_RX2_B, FN_VI1_R0,
+ /* IP0_11_10 [2] */
+ FN_A21, FN_SCK5_D, FN_HSPI_CLK2_B, 0,
+ /* IP0_9_8 [2] */
+ FN_A20, FN_TX5_D, FN_HSPI_TX2_B, 0,
+ /* IP0_7_6 [2] */
+ FN_A0, FN_SD1_DAT3, FN_MMC0_D3, FN_FD3,
+ /* IP0_5_3 [3] */
+ FN_BS, FN_SD1_DAT2, FN_MMC0_D2, FN_FD2,
+ FN_ATADIR0, FN_SDSELF, FN_HCTS1, FN_TX4_C,
+ /* IP0_2_0 [3] */
+ FN_USB_PENC2, FN_SCK0, FN_PWM1, FN_PWMFSW0,
+ FN_SCIF_CLK, FN_TCLK0_C, 0, 0 }
+ },
+ { PINMUX_CFG_REG_VAR("IPSR1", 0xfffc0024, 32,
+ 3, 4, 2, 2, 2, 4, 4, 4, 3, 2, 2) {
+ /* IP1_31_29 [3] */
+ 0, 0, 0, 0, 0, 0, 0, 0,
+ /* IP1_28_25 [4] */
+ FN_HTX0, FN_TX1, FN_SDATA, FN_CTS0_C,
+ FN_SUB_TCK, FN_CC5_STATE2, FN_CC5_STATE10, FN_CC5_STATE18,
+ FN_CC5_STATE26, FN_CC5_STATE34, 0, 0,
+ 0, 0, 0, 0,
+ /* IP1_24_23 [2] */
+ FN_MLB_DAT, FN_PWM4, FN_RX4, 0,
+ /* IP1_22_21 [2] */
+ FN_MLB_SIG, FN_PWM3, FN_TX4, 0,
+ /* IP1_20_19 [2] */
+ FN_MLB_CLK, FN_PWM2, FN_SCK4, 0,
+ /* IP1_18_15 [4] */
+ FN_EX_CS5, FN_SD1_DAT1, FN_MMC0_D1, FN_FD1,
+ FN_ATAWR0, FN_VI1_R6, FN_HRX1, FN_RX2_E,
+ FN_RX0_B, FN_SSI_WS9, 0, 0,
+ 0, 0, 0, 0,
+ /* IP1_14_11 [4] */
+ FN_EX_CS4, FN_SD1_DAT0, FN_MMC0_D0, FN_FD0,
+ FN_ATARD0, FN_VI1_R5, FN_SCK5_B, FN_HTX1,
+ FN_TX2_E, FN_TX0_B, FN_SSI_SCK9, 0,
+ 0, 0, 0, 0,
+ /* IP1_10_7 [4] */
+ FN_EX_CS3, FN_SD1_CMD, FN_MMC0_CMD, FN_FRE,
+ FN_ATACS10, FN_VI1_R4, FN_RX5_B, FN_HSCK1,
+ FN_SSI_SDATA8_B, FN_RTS0_B_TANS_B, FN_SSI_SDATA9, 0,
+ 0, 0, 0, 0,
+ /* IP1_6_4 [3] */
+ FN_EX_CS2, FN_SD1_CLK, FN_MMC0_CLK, FN_FALE,
+ FN_ATACS00, 0, 0, 0,
+ /* IP1_3_2 [2] */
+ FN_EX_CS1, FN_MMC0_D7, FN_FD7, 0,
+ /* IP1_1_0 [2] */
+ FN_EX_CS0, FN_RX3_C_IRDA_RX_C, FN_MMC0_D6, FN_FD6 }
+ },
+ { PINMUX_CFG_REG_VAR("IPSR2", 0xfffc0028, 32,
+ 1, 3, 1, 1, 1, 1, 1, 1, 3, 3, 4, 4, 4, 4) {
+ /* IP2_31 [1] */
+ 0, 0,
+ /* IP2_30_28 [3] */
+ FN_DU0_DG0, FN_LCDOUT8, FN_DREQ1, FN_SCL2,
+ FN_AUDATA2, 0, 0, 0,
+ /* IP2_27 [1] */
+ FN_DU0_DR7, FN_LCDOUT7,
+ /* IP2_26 [1] */
+ FN_DU0_DR6, FN_LCDOUT6,
+ /* IP2_25 [1] */
+ FN_DU0_DR5, FN_LCDOUT5,
+ /* IP2_24 [1] */
+ FN_DU0_DR4, FN_LCDOUT4,
+ /* IP2_23 [1] */
+ FN_DU0_DR3, FN_LCDOUT3,
+ /* IP2_22 [1] */
+ FN_DU0_DR2, FN_LCDOUT2,
+ /* IP2_21_19 [3] */
+ FN_DU0_DR1, FN_LCDOUT1, FN_DACK0, FN_DRACK0,
+ FN_GPS_SIGN_B, FN_AUDATA1, FN_RX5_C, 0,
+ /* IP2_18_16 [3] */
+ FN_DU0_DR0, FN_LCDOUT0, FN_DREQ0, FN_GPS_CLK_B,
+ FN_AUDATA0, FN_TX5_C, 0, 0,
+ /* IP2_15_12 [4] */
+ FN_HRTS0, FN_RTS1_TANS, FN_MDATA, FN_TX0_C,
+ FN_SUB_TMS, FN_CC5_STATE1, FN_CC5_STATE9, FN_CC5_STATE17,
+ FN_CC5_STATE25, FN_CC5_STATE33, 0, 0,
+ 0, 0, 0, 0,
+ /* IP2_11_8 [4] */
+ FN_HCTS0, FN_CTS1, FN_STM, FN_PWM0_D,
+ FN_RX0_C, FN_SCIF_CLK_C, FN_SUB_TRST, FN_TCLK1_B,
+ FN_CC5_OSCOUT, 0, 0, 0,
+ 0, 0, 0, 0,
+ /* IP2_7_4 [4] */
+ FN_HSCK0, FN_SCK1, FN_MTS, FN_PWM5,
+ FN_SCK0_C, FN_SSI_SDATA9_B, FN_SUB_TDO, FN_CC5_STATE0,
+ FN_CC5_STATE8, FN_CC5_STATE16, FN_CC5_STATE24, FN_CC5_STATE32,
+ 0, 0, 0, 0,
+ /* IP2_3_0 [4] */
+ FN_HRX0, FN_RX1, FN_SCKZ, FN_RTS0_C_TANS_C,
+ FN_SUB_TDI, FN_CC5_STATE3, FN_CC5_STATE11, FN_CC5_STATE19,
+ FN_CC5_STATE27, FN_CC5_STATE35, 0, 0,
+ 0, 0, 0, 0 }
+ },
+ { PINMUX_CFG_REG_VAR("IPSR3", 0xfffc002c, 32,
+ 3, 1, 1, 3, 1, 2, 1, 1, 1, 1, 1,
+ 1, 3, 3, 1, 1, 1, 1, 1, 1, 3) {
+ /* IP3_31_29 [3] */
+ FN_DU0_EXODDF_DU0_ODDF_DISP_CDE, FN_QCPV_QDE, FN_CAN1_TX, FN_TX2_C,
+ FN_SCL2_C, FN_REMOCON, 0, 0,
+ /* IP3_28 [1] */
+ FN_DU0_EXVSYNC_DU0_VSYNC, FN_QSTB_QHE,
+ /* IP3_27 [1] */
+ FN_DU0_EXHSYNC_DU0_HSYNC, FN_QSTH_QHS,
+ /* IP3_26_24 [3] */
+ FN_DU0_DOTCLKOUT1, FN_QSTVB_QVE, FN_RX3_D_IRDA_RX_D, FN_SDA3_B,
+ FN_SDA2_C, FN_DACK0_B, FN_DRACK0_B, 0,
+ /* IP3_23 [1] */
+ FN_DU0_DOTCLKOUT0, FN_QCLK,
+ /* IP3_22_21 [2] */
+ FN_DU0_DOTCLKIN, FN_QSTVA_QVS, FN_TX3_D_IRDA_TX_D, FN_SCL3_B,
+ /* IP3_20 [1] */
+ FN_DU0_DB7, FN_LCDOUT23,
+ /* IP3_19 [1] */
+ FN_DU0_DB6, FN_LCDOUT22,
+ /* IP3_18 [1] */
+ FN_DU0_DB5, FN_LCDOUT21,
+ /* IP3_17 [1] */
+ FN_DU0_DB4, FN_LCDOUT20,
+ /* IP3_16 [1] */
+ FN_DU0_DB3, FN_LCDOUT19,
+ /* IP3_15 [1] */
+ FN_DU0_DB2, FN_LCDOUT18,
+ /* IP3_14_12 [3] */
+ FN_DU0_DB1, FN_LCDOUT17, FN_EX_WAIT2, FN_SDA1,
+ FN_GPS_MAG_B, FN_AUDATA5, FN_SCK5_C, 0,
+ /* IP3_11_9 [3] */
+ FN_DU0_DB0, FN_LCDOUT16, FN_EX_WAIT1, FN_SCL1,
+ FN_TCLK1, FN_AUDATA4, 0, 0,
+ /* IP3_8 [1] */
+ FN_DU0_DG7, FN_LCDOUT15,
+ /* IP3_7 [1] */
+ FN_DU0_DG6, FN_LCDOUT14,
+ /* IP3_6 [1] */
+ FN_DU0_DG5, FN_LCDOUT13,
+ /* IP3_5 [1] */
+ FN_DU0_DG4, FN_LCDOUT12,
+ /* IP3_4 [1] */
+ FN_DU0_DG3, FN_LCDOUT11,
+ /* IP3_3 [1] */
+ FN_DU0_DG2, FN_LCDOUT10,
+ /* IP3_2_0 [3] */
+ FN_DU0_DG1, FN_LCDOUT9, FN_DACK1, FN_SDA2,
+ FN_AUDATA3, 0, 0, 0 }
+ },
+ { PINMUX_CFG_REG_VAR("IPSR4", 0xfffc0030, 32,
+ 3, 1, 1, 1, 1, 1, 1, 3, 3,
+ 1, 1, 1, 1, 1, 1, 3, 3, 3, 2) {
+ /* IP4_31_29 [3] */
+ FN_DU1_DB0, FN_VI2_DATA4_VI2_B4, FN_SCL2_B, FN_SD3_DAT0,
+ FN_TX5, FN_SCK0_D, 0, 0,
+ /* IP4_28 [1] */
+ FN_DU1_DG7, FN_VI2_R3,
+ /* IP4_27 [1] */
+ FN_DU1_DG6, FN_VI2_R2,
+ /* IP4_26 [1] */
+ FN_DU1_DG5, FN_VI2_R1,
+ /* IP4_25 [1] */
+ FN_DU1_DG4, FN_VI2_R0,
+ /* IP4_24 [1] */
+ FN_DU1_DG3, FN_VI2_G7,
+ /* IP4_23 [1] */
+ FN_DU1_DG2, FN_VI2_G6,
+ /* IP4_22_20 [3] */
+ FN_DU1_DG1, FN_VI2_DATA3_VI2_B3, FN_SDA1_B, FN_SD3_DAT3,
+ FN_SCK5, FN_AUDATA7, FN_RX0_D, 0,
+ /* IP4_19_17 [3] */
+ FN_DU1_DG0, FN_VI2_DATA2_VI2_B2, FN_SCL1_B, FN_SD3_DAT2,
+ FN_SCK3_E, FN_AUDATA6, FN_TX0_D, 0,
+ /* IP4_16 [1] */
+ FN_DU1_DR7, FN_VI2_G5,
+ /* IP4_15 [1] */
+ FN_DU1_DR6, FN_VI2_G4,
+ /* IP4_14 [1] */
+ FN_DU1_DR5, FN_VI2_G3,
+ /* IP4_13 [1] */
+ FN_DU1_DR4, FN_VI2_G2,
+ /* IP4_12 [1] */
+ FN_DU1_DR3, FN_VI2_G1,
+ /* IP4_11 [1] */
+ FN_DU1_DR2, FN_VI2_G0,
+ /* IP4_10_8 [3] */
+ FN_DU1_DR1, FN_VI2_DATA1_VI2_B1, FN_PWM0, FN_SD3_CMD,
+ FN_RX3_E_IRDA_RX_E, FN_AUDSYNC, FN_CTS0_D, 0,
+ /* IP4_7_5 [3] */
+ FN_DU1_DR0, FN_VI2_DATA0_VI2_B0, FN_PWM6, FN_SD3_CLK,
+ FN_TX3_E_IRDA_TX_E, FN_AUDCK, FN_PWMFSW0_B, 0,
+ /* IP4_4_2 [3] */
+ FN_DU0_CDE, FN_QPOLB, FN_CAN1_RX, FN_RX2_C,
+ FN_DREQ0_B, FN_SSI_SCK78_B, FN_SCK0_B, 0,
+ /* IP4_1_0 [2] */
+ FN_DU0_DISP, FN_QPOLA, FN_CAN_CLK_C, FN_SCK2_C }
+ },
+ { PINMUX_CFG_REG_VAR("IPSR5", 0xfffc0034, 32,
+ 1, 2, 1, 4, 3, 4, 2, 2,
+ 2, 2, 1, 1, 1, 1, 1, 1, 3) {
+ /* IP5_31 [1] */
+ 0, 0,
+ /* IP5_30_29 [2] */
+ FN_AUDIO_CLKB, FN_USB_OVC2, FN_CAN_DEBUGOUT0, FN_MOUT0,
+ /* IP5_28 [1] */
+ FN_AUDIO_CLKA, FN_CAN_TXCLK,
+ /* IP5_27_24 [4] */
+ FN_DU1_CDE, FN_VI2_DATA7_VI2_B7, FN_RX3_B_IRDA_RX_B, FN_SD3_WP,
+ FN_HSPI_RX1, FN_VI1_FIELD, FN_VI3_FIELD, FN_AUDIO_CLKOUT,
+ FN_RX2_D, FN_GPS_CLK_C, FN_GPS_CLK_D, 0,
+ 0, 0, 0, 0,
+ /* IP5_23_21 [3] */
+ FN_DU1_DISP, FN_VI2_DATA6_VI2_B6, FN_TCLK0, FN_QSTVA_B_QVS_B,
+ FN_HSPI_CLK1, FN_SCK2_D, FN_AUDIO_CLKOUT_B, FN_GPS_MAG_D,
+ /* IP5_20_17 [4] */
+ FN_DU1_EXODDF_DU1_ODDF_DISP_CDE, FN_VI2_CLK, FN_TX3_B_IRDA_TX_B,
+ FN_SD3_CD, FN_HSPI_TX1, FN_VI1_CLKENB, FN_VI3_CLKENB,
+ FN_AUDIO_CLKC, FN_TX2_D, FN_SPEEDIN, FN_GPS_SIGN_D, 0,
+ 0, 0, 0, 0,
+ /* IP5_16_15 [2] */
+ FN_DU1_EXVSYNC_DU1_VSYNC, FN_VI2_VSYNC, FN_VI3_VSYNC, 0,
+ /* IP5_14_13 [2] */
+ FN_DU1_EXHSYNC_DU1_HSYNC, FN_VI2_HSYNC, FN_VI3_HSYNC, 0,
+ /* IP5_12_11 [2] */
+ FN_DU1_DOTCLKOUT, FN_VI2_FIELD, FN_SDA1_D, 0,
+ /* IP5_10_9 [2] */
+ FN_DU1_DOTCLKIN, FN_VI2_CLKENB, FN_HSPI_CS1, FN_SCL1_D,
+ /* IP5_8 [1] */
+ FN_DU1_DB7, FN_SDA2_D,
+ /* IP5_7 [1] */
+ FN_DU1_DB6, FN_SCL2_D,
+ /* IP5_6 [1] */
+ FN_DU1_DB5, FN_VI2_R7,
+ /* IP5_5 [1] */
+ FN_DU1_DB4, FN_VI2_R6,
+ /* IP5_4 [1] */
+ FN_DU1_DB3, FN_VI2_R5,
+ /* IP5_3 [1] */
+ FN_DU1_DB2, FN_VI2_R4,
+ /* IP5_2_0 [3] */
+ FN_DU1_DB1, FN_VI2_DATA5_VI2_B5, FN_SDA2_B, FN_SD3_DAT1,
+ FN_RX5, FN_RTS0_D_TANS_D, 0, 0 }
+ },
+ { PINMUX_CFG_REG_VAR("IPSR6", 0xfffc0038, 32,
+ 1, 2, 2, 2, 2, 3, 2, 3, 3, 3, 1, 2, 2, 2, 2) {
+ /* IP6_31 [1] */
+ 0, 0,
+ /* IP6_30_29 [2] */
+ FN_SSI_SCK6, FN_ADICHS0, FN_CAN0_TX, FN_IERX_B,
+ /* IP_28_27 [2] */
+ 0, 0, 0, 0,
+ /* IP6_26_25 [2] */
+ FN_SSI_SDATA5, FN_ADIDATA, FN_CAN_DEBUGOUT12, FN_RX3_IRDA_RX,
+ /* IP6_24_23 [2] */
+ FN_SSI_WS5, FN_ADICS_SAMP, FN_CAN_DEBUGOUT11, FN_TX3_IRDA_TX,
+ /* IP6_22_20 [3] */
+ FN_SSI_SCK5, FN_ADICLK, FN_CAN_DEBUGOUT10, FN_SCK3,
+ FN_TCLK0_D, 0, 0, 0,
+ /* IP6_19_18 [2] */
+ FN_SSI_SDATA4, FN_CAN_DEBUGOUT9, FN_SSI_SDATA9_C, 0,
+ /* IP6_17_15 [3] */
+ FN_SSI_SDATA3, FN_PWM0_C, FN_CAN_DEBUGOUT8, FN_CAN_CLK_B,
+ FN_IECLK, FN_SCIF_CLK_B, FN_TCLK0_B, 0,
+ /* IP6_14_12 [3] */
+ FN_SSI_WS34, FN_CAN_DEBUGOUT7, FN_CAN0_RX_B, FN_IETX,
+ FN_SSI_WS9_C, 0, 0, 0,
+ /* IP6_11_9 [3] */
+ FN_SSI_SCK34, FN_CAN_DEBUGOUT6, FN_CAN0_TX_B, FN_IERX,
+ FN_SSI_SCK9_C, 0, 0, 0,
+ /* IP6_8 [1] */
+ FN_SSI_SDATA2, FN_CAN_DEBUGOUT5,
+ /* IP6_7_6 [2] */
+ FN_SSI_SDATA1, FN_CAN_DEBUGOUT4, FN_MOUT6, 0,
+ /* IP6_5_4 [2] */
+ FN_SSI_SDATA0, FN_CAN_DEBUGOUT3, FN_MOUT5, 0,
+ /* IP6_3_2 [2] */
+ FN_SSI_WS0129, FN_CAN_DEBUGOUT2, FN_MOUT2, 0,
+ /* IP6_1_0 [2] */
+ FN_SSI_SCK0129, FN_CAN_DEBUGOUT1, FN_MOUT1, 0 }
+ },
+ { PINMUX_CFG_REG_VAR("IPSR7", 0xfffc003c, 32,
+ 1, 2, 2, 2, 2, 2, 2, 2, 2, 2, 3, 3, 3, 2, 2) {
+ /* IP7_31 [1] */
+ 0, 0,
+ /* IP7_30_29 [2] */
+ FN_SD0_WP, FN_DACK2, FN_CTS1_B, 0,
+ /* IP7_28_27 [2] */
+ FN_SD0_CD, FN_DREQ2, FN_RTS1_B_TANS_B, 0,
+ /* IP7_26_25 [2] */
+ FN_SD0_DAT3, FN_ATAWR1, FN_RX2_B, FN_CC5_TDI,
+ /* IP7_24_23 [2] */
+ FN_SD0_DAT2, FN_ATARD1, FN_TX2_B, FN_CC5_TCK,
+ /* IP7_22_21 [2] */
+ FN_SD0_DAT1, FN_ATAG1, FN_SCK2_B, FN_CC5_TMS,
+ /* IP7_20_19 [2] */
+ FN_SD0_DAT0, FN_ATADIR1, FN_RX1_B, FN_CC5_TRST,
+ /* IP7_18_17 [2] */
+ FN_SD0_CMD, FN_ATACS11, FN_TX1_B, FN_CC5_TDO,
+ /* IP7_16_15 [2] */
+ FN_SD0_CLK, FN_ATACS01, FN_SCK1_B, 0,
+ /* IP7_14_13 [2] */
+ FN_SSI_SDATA8, FN_VSP, FN_IRQ3_B, FN_HSPI_RX1_C,
+ /* IP7_12_10 [3] */
+ FN_SSI_SDATA7, FN_CAN_DEBUGOUT15, FN_IRQ2_B, FN_TCLK1_C,
+ FN_HSPI_TX1_C, 0, 0, 0,
+ /* IP7_9_7 [3] */
+ FN_SSI_WS78, FN_CAN_DEBUGOUT14, FN_IRQ1_B, FN_SSI_WS9_B,
+ FN_HSPI_CS1_C, 0, 0, 0,
+ /* IP7_6_4 [3] */
+ FN_SSI_SCK78, FN_CAN_DEBUGOUT13, FN_IRQ0_B, FN_SSI_SCK9_B,
+ FN_HSPI_CLK1_C, 0, 0, 0,
+ /* IP7_3_2 [2] */
+ FN_SSI_SDATA6, FN_ADICHS2, FN_CAN_CLK, FN_IECLK_B,
+ /* IP7_1_0 [2] */
+ FN_SSI_WS6, FN_ADICHS1, FN_CAN0_RX, FN_IETX_B }
+ },
+ { PINMUX_CFG_REG_VAR("IPSR8", 0xfffc0040, 32,
+ 1, 3, 3, 2, 2, 1, 1, 1, 2, 4, 4, 4, 4) {
+ /* IP8_31 [1] */
+ 0, 0,
+ /* IP8_30_28 [3] */
+ FN_VI0_VSYNC, FN_VI0_DATA1_B_VI0_B1_B, FN_RTS1_C_TANS_C, FN_RX4_D,
+ FN_PWMFSW0_C, 0, 0, 0,
+ /* IP8_27_25 [3] */
+ FN_VI0_HSYNC, FN_VI0_DATA0_B_VI0_B0_B, FN_CTS1_C, FN_TX4_D,
+ FN_MMC1_CMD, FN_HSCK1_B, 0, 0,
+ /* IP8_24_23 [2] */
+ FN_VI0_FIELD, FN_RX1_C, FN_HRX1_B, 0,
+ /* IP8_22_21 [2] */
+ FN_VI0_CLKENB, FN_TX1_C, FN_HTX1_B, FN_MT1_SYNC,
+ /* IP8_20 [1] */
+ FN_VI0_CLK, FN_MMC1_CLK,
+ /* IP8_19 [1] */
+ FN_FMIN, FN_RDS_DATA,
+ /* IP8_18 [1] */
+ FN_BPFCLK, FN_PCMWE,
+ /* IP8_17_16 [2] */
+ FN_FMCLK, FN_RDS_CLK, FN_PCMOE, 0,
+ /* IP8_15_12 [4] */
+ FN_HSPI_RX0, FN_RX0, FN_CAN_STEP0, FN_AD_NCS,
+ FN_CC5_STATE7, FN_CC5_STATE15, FN_CC5_STATE23, FN_CC5_STATE31,
+ FN_CC5_STATE39, 0, 0, 0,
+ 0, 0, 0, 0,
+ /* IP8_11_8 [4] */
+ FN_HSPI_TX0, FN_TX0, FN_CAN_DEBUG_HW_TRIGGER, FN_AD_DO,
+ FN_CC5_STATE6, FN_CC5_STATE14, FN_CC5_STATE22, FN_CC5_STATE30,
+ FN_CC5_STATE38, 0, 0, 0,
+ 0, 0, 0, 0,
+ /* IP8_7_4 [4] */
+ FN_HSPI_CS0, FN_RTS0_TANS, FN_USB_OVC1, FN_AD_DI,
+ FN_CC5_STATE5, FN_CC5_STATE13, FN_CC5_STATE21, FN_CC5_STATE29,
+ FN_CC5_STATE37, 0, 0, 0,
+ 0, 0, 0, 0,
+ /* IP8_3_0 [4] */
+ FN_HSPI_CLK0, FN_CTS0, FN_USB_OVC0, FN_AD_CLK,
+ FN_CC5_STATE4, FN_CC5_STATE12, FN_CC5_STATE20, FN_CC5_STATE28,
+ FN_CC5_STATE36, 0, 0, 0,
+ 0, 0, 0, 0 }
+ },
+ { PINMUX_CFG_REG_VAR("IPSR9", 0xfffc0044, 32,
+ 2, 2, 2, 2, 2, 3, 3, 2, 2,
+ 2, 2, 1, 1, 1, 1, 2, 2) {
+ /* IP9_31_30 [2] */
+ 0, 0, 0, 0,
+ /* IP9_29_28 [2] */
+ FN_VI0_G7, FN_ETH_RXD1, FN_SD2_DAT3_B, FN_ARM_TRACEDATA_9,
+ /* IP9_27_26 [2] */
+ FN_VI0_G6, FN_ETH_RXD0, FN_SD2_DAT2_B, FN_ARM_TRACEDATA_8,
+ /* IP9_25_24 [2] */
+ FN_VI0_G5, FN_ETH_RX_ER, FN_SD2_DAT1_B, FN_ARM_TRACEDATA_7,
+ /* IP9_23_22 [2] */
+ FN_VI0_G4, FN_ETH_TX_EN, FN_SD2_DAT0_B, FN_ARM_TRACEDATA_6,
+ /* IP9_21_19 [3] */
+ FN_VI0_G3, FN_ETH_CRS_DV, FN_MMC1_D7, FN_ARM_TRACEDATA_5,
+ FN_TS_SDAT0, 0, 0, 0,
+ /* IP9_18_16 [3] */
+ FN_VI0_G2, FN_ETH_TXD1, FN_MMC1_D6, FN_ARM_TRACEDATA_4,
+ FN_TS_SPSYNC0, 0, 0, 0,
+ /* IP9_15_14 [2] */
+ FN_VI0_G1, FN_SSI_WS78_C, FN_IRQ1, FN_ARM_TRACEDATA_3,
+ /* IP9_13_12 [2] */
+ FN_VI0_G0, FN_SSI_SCK78_C, FN_IRQ0, FN_ARM_TRACEDATA_2,
+ /* IP9_11_10 [2] */
+ FN_VI0_DATA7_VI0_B7, FN_MMC1_D5, FN_ARM_TRACEDATA_1, 0,
+ /* IP9_9_8 [2] */
+ FN_VI0_DATA6_VI0_B6, FN_MMC1_D4, FN_ARM_TRACEDATA_0, 0,
+ /* IP9_7 [1] */
+ FN_VI0_DATA5_VI0_B5, FN_MMC1_D3,
+ /* IP9_6 [1] */
+ FN_VI0_DATA4_VI0_B4, FN_MMC1_D2,
+ /* IP9_5 [1] */
+ FN_VI0_DATA3_VI0_B3, FN_MMC1_D1,
+ /* IP9_4 [1] */
+ FN_VI0_DATA2_VI0_B2, FN_MMC1_D0,
+ /* IP9_3_2 [2] */
+ FN_VI0_DATA1_VI0_B1, FN_HCTS1_B, FN_MT1_PWM, 0,
+ /* IP9_1_0 [2] */
+ FN_VI0_DATA0_VI0_B0, FN_HRTS1_B, FN_MT1_VCXO, 0 }
+ },
+ { PINMUX_CFG_REG_VAR("IPSR10", 0xfffc0048, 32,
+ 3, 3, 2, 3, 3, 3, 3, 3, 3, 3, 3) {
+ /* IP10_31_29 [3] */
+ FN_VI1_VSYNC, FN_AUDIO_CLKOUT_C, FN_SSI_WS4, FN_SIM_CLK,
+ FN_GPS_MAG_C, FN_SPV_TRST, FN_SCL3, 0,
+ /* IP10_28_26 [3] */
+ FN_VI1_HSYNC, FN_VI3_CLK, FN_SSI_SCK4, FN_GPS_SIGN_C,
+ FN_PWMFSW0_E, 0, 0, 0,
+ /* IP10_25_24 [2] */
+ FN_VI1_CLK, FN_SIM_D, FN_SDA3, 0,
+ /* IP10_23_21 [3] */
+ FN_VI0_R7, FN_ETH_MDIO, FN_DACK2_C, FN_HSPI_RX1_B,
+ FN_SCIF_CLK_D, FN_TRACECTL, FN_MT1_PEN, 0,
+ /* IP10_20_18 [3] */
+ FN_VI0_R6, FN_ETH_MDC, FN_DREQ2_C, FN_HSPI_TX1_B,
+ FN_TRACECLK, FN_MT1_BEN, FN_PWMFSW0_D, 0,
+ /* IP10_17_15 [3] */
+ FN_VI0_R5, FN_ETH_TXD0, FN_SD2_WP_B, FN_HSPI_CS1_B,
+ FN_ARM_TRACEDATA_15, FN_MT1_D, FN_TS_SDEN0, 0,
+ /* IP10_14_12 [3] */
+ FN_VI0_R4, FN_ETH_REFCLK, FN_SD2_CD_B, FN_HSPI_CLK1_B,
+ FN_ARM_TRACEDATA_14, FN_MT1_CLK, FN_TS_SCK0, 0,
+ /* IP10_11_9 [3] */
+ FN_VI0_R3, FN_ETH_MAGIC, FN_SD2_CMD_B, FN_IRQ3,
+ FN_ARM_TRACEDATA_13, 0, 0, 0,
+ /* IP10_8_6 [3] */
+ FN_VI0_R2, FN_ETH_LINK, FN_SD2_CLK_B, FN_IRQ2,
+ FN_ARM_TRACEDATA_12, 0, 0, 0,
+ /* IP10_5_3 [3] */
+ FN_VI0_R1, FN_SSI_SDATA8_C, FN_DACK1_B, FN_ARM_TRACEDATA_11,
+ FN_DACK0_C, FN_DRACK0_C, 0, 0,
+ /* IP10_2_0 [3] */
+ FN_VI0_R0, FN_SSI_SDATA7_C, FN_SCK1_C, FN_DREQ1_B,
+ FN_ARM_TRACEDATA_10, FN_DREQ0_C, 0, 0 }
+ },
+ { PINMUX_CFG_REG_VAR("IPSR11", 0xfffc004c, 32,
+ 2, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3) {
+ /* IP11_31_30 [2] */
+ 0, 0, 0, 0,
+ /* IP11_29_27 [3] */
+ FN_VI1_G1, FN_VI3_DATA1, FN_SSI_SCK1, FN_TS_SDEN1,
+ FN_DACK2_B, FN_RX2, FN_HRTS0_B, 0,
+ /* IP11_26_24 [3] */
+ FN_VI1_G0, FN_VI3_DATA0, FN_DU1_DOTCLKOUT1, FN_TS_SCK1,
+ FN_DREQ2_B, FN_TX2, FN_SPA_TDO, FN_HCTS0_B,
+ /* IP11_23_21 [3] */
+ FN_VI1_DATA7_VI1_B7, FN_SD2_WP, FN_MT0_PWM, FN_SPA_TDI,
+ FN_HSPI_RX1_D, 0, 0, 0,
+ /* IP11_20_18 [3] */
+ FN_VI1_DATA6_VI1_B6, FN_SD2_CD, FN_MT0_VCXO, FN_SPA_TMS,
+ FN_HSPI_TX1_D, 0, 0, 0,
+ /* IP11_17_15 [3] */
+ FN_VI1_DATA5_VI1_B5, FN_SD2_CMD, FN_MT0_SYNC, FN_SPA_TCK,
+ FN_HSPI_CS1_D, FN_ADICHS2_B, 0, 0,
+ /* IP11_14_12 [3] */
+ FN_VI1_DATA4_VI1_B4, FN_SD2_CLK, FN_MT0_PEN, FN_SPA_TRST,
+ FN_HSPI_CLK1_D, FN_ADICHS1_B, 0, 0,
+ /* IP11_11_9 [3] */
+ FN_VI1_DATA3_VI1_B3, FN_SD2_DAT3, FN_MT0_BEN, FN_SPV_TDO,
+ FN_ADICHS0_B, 0, 0, 0,
+ /* IP11_8_6 [3] */
+ FN_VI1_DATA2_VI1_B2, FN_SD2_DAT2, FN_MT0_D, FN_SPVTDI,
+ FN_ADIDATA_B, 0, 0, 0,
+ /* IP11_5_3 [3] */
+ FN_VI1_DATA1_VI1_B1, FN_SD2_DAT1, FN_MT0_CLK, FN_SPV_TMS,
+ FN_ADICS_B_SAMP_B, 0, 0, 0,
+ /* IP11_2_0 [3] */
+ FN_VI1_DATA0_VI1_B0, FN_SD2_DAT0, FN_SIM_RST, FN_SPV_TCK,
+ FN_ADICLK_B, 0, 0, 0 }
+ },
+ { PINMUX_CFG_REG_VAR("IPSR12", 0xfffc0050, 32,
+ 4, 4, 4, 2, 3, 3, 3, 3, 3, 3) {
+ /* IP12_31_28 [4] */
+ 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0,
+ /* IP12_27_24 [4] */
+ 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0,
+ /* IP12_23_20 [4] */
+ 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0,
+ /* IP12_19_18 [2] */
+ 0, 0, 0, 0,
+ /* IP12_17_15 [3] */
+ FN_VI1_G7, FN_VI3_DATA7, FN_GPS_MAG, FN_FCE,
+ FN_SCK4_B, 0, 0, 0,
+ /* IP12_14_12 [3] */
+ FN_VI1_G6, FN_VI3_DATA6, FN_GPS_SIGN, FN_FRB,
+ FN_RX4_B, FN_SIM_CLK_B, 0, 0,
+ /* IP12_11_9 [3] */
+ FN_VI1_G5, FN_VI3_DATA5, FN_GPS_CLK, FN_FSE,
+ FN_TX4_B, FN_SIM_D_B, 0, 0,
+ /* IP12_8_6 [3] */
+ FN_VI1_G4, FN_VI3_DATA4, FN_SSI_WS2, FN_SDA1_C,
+ FN_SIM_RST_B, FN_HRX0_B, 0, 0,
+ /* IP12_5_3 [3] */
+ FN_VI1_G3, FN_VI3_DATA3, FN_SSI_SCK2, FN_TS_SDAT1,
+ FN_SCL1_C, FN_HTX0_B, 0, 0,
+ /* IP12_2_0 [3] */
+ FN_VI1_G2, FN_VI3_DATA2, FN_SSI_WS1, FN_TS_SPSYNC1,
+ FN_SCK2, FN_HSCK0_B, 0, 0 }
+ },
+ { PINMUX_CFG_REG_VAR("MOD_SEL", 0xfffc0090, 32,
+ 2, 2, 3, 3, 2, 2, 2, 2, 2,
+ 1, 1, 1, 1, 1, 1, 1, 2, 1, 2) {
+ /* SEL_SCIF5 [2] */
+ FN_SEL_SCIF5_0, FN_SEL_SCIF5_1, FN_SEL_SCIF5_2, FN_SEL_SCIF5_3,
+ /* SEL_SCIF4 [2] */
+ FN_SEL_SCIF4_0, FN_SEL_SCIF4_1, FN_SEL_SCIF4_2, FN_SEL_SCIF4_3,
+ /* SEL_SCIF3 [3] */
+ FN_SEL_SCIF3_0, FN_SEL_SCIF3_1, FN_SEL_SCIF3_2, FN_SEL_SCIF3_3,
+ FN_SEL_SCIF3_4, 0, 0, 0,
+ /* SEL_SCIF2 [3] */
+ FN_SEL_SCIF2_0, FN_SEL_SCIF2_1, FN_SEL_SCIF2_2, FN_SEL_SCIF2_3,
+ FN_SEL_SCIF2_4, 0, 0, 0,
+ /* SEL_SCIF1 [2] */
+ FN_SEL_SCIF1_0, FN_SEL_SCIF1_1, FN_SEL_SCIF1_2, 0,
+ /* SEL_SCIF0 [2] */
+ FN_SEL_SCIF0_0, FN_SEL_SCIF0_1, FN_SEL_SCIF0_2, FN_SEL_SCIF0_3,
+ /* SEL_SSI9 [2] */
+ FN_SEL_SSI9_0, FN_SEL_SSI9_1, FN_SEL_SSI9_2, 0,
+ /* SEL_SSI8 [2] */
+ FN_SEL_SSI8_0, FN_SEL_SSI8_1, FN_SEL_SSI8_2, 0,
+ /* SEL_SSI7 [2] */
+ FN_SEL_SSI7_0, FN_SEL_SSI7_1, FN_SEL_SSI7_2, 0,
+ /* SEL_VI0 [1] */
+ FN_SEL_VI0_0, FN_SEL_VI0_1,
+ /* SEL_SD2 [1] */
+ FN_SEL_SD2_0, FN_SEL_SD2_1,
+ /* SEL_INT3 [1] */
+ FN_SEL_INT3_0, FN_SEL_INT3_1,
+ /* SEL_INT2 [1] */
+ FN_SEL_INT2_0, FN_SEL_INT2_1,
+ /* SEL_INT1 [1] */
+ FN_SEL_INT1_0, FN_SEL_INT1_1,
+ /* SEL_INT0 [1] */
+ FN_SEL_INT0_0, FN_SEL_INT0_1,
+ /* SEL_IE [1] */
+ FN_SEL_IE_0, FN_SEL_IE_1,
+ /* SEL_EXBUS2 [2] */
+ FN_SEL_EXBUS2_0, FN_SEL_EXBUS2_1, FN_SEL_EXBUS2_2, 0,
+ /* SEL_EXBUS1 [1] */
+ FN_SEL_EXBUS1_0, FN_SEL_EXBUS1_1,
+ /* SEL_EXBUS0 [2] */
+ FN_SEL_EXBUS0_0, FN_SEL_EXBUS0_1, FN_SEL_EXBUS0_2, 0 }
+ },
+ { PINMUX_CFG_REG_VAR("MOD_SEL2", 0xfffc0094, 32,
+ 2, 2, 2, 2, 1, 1, 1, 3, 1,
+ 2, 2, 2, 2, 1, 1, 2, 1, 2, 2) {
+ /* SEL_TMU1 [2] */
+ FN_SEL_TMU1_0, FN_SEL_TMU1_1, FN_SEL_TMU1_2, 0,
+ /* SEL_TMU0 [2] */
+ FN_SEL_TMU0_0, FN_SEL_TMU0_1, FN_SEL_TMU0_2, FN_SEL_TMU0_3,
+ /* SEL_SCIF [2] */
+ FN_SEL_SCIF_0, FN_SEL_SCIF_1, FN_SEL_SCIF_2, FN_SEL_SCIF_3,
+ /* SEL_CANCLK [2] */
+ FN_SEL_CANCLK_0, FN_SEL_CANCLK_1, FN_SEL_CANCLK_2,
+ /* SEL_CAN0 [1] */
+ FN_SEL_CAN0_0, FN_SEL_CAN0_1,
+ /* SEL_HSCIF1 [1] */
+ FN_SEL_HSCIF1_0, FN_SEL_HSCIF1_1,
+ /* SEL_HSCIF0 [1] */
+ FN_SEL_HSCIF0_0, FN_SEL_HSCIF0_1,
+ /* SEL_PWMFSW [3] */
+ FN_SEL_PWMFSW_0, FN_SEL_PWMFSW_1, FN_SEL_PWMFSW_2,
+ FN_SEL_PWMFSW_3, FN_SEL_PWMFSW_4, 0, 0, 0,
+ /* SEL_ADI [1] */
+ FN_SEL_ADI_0, FN_SEL_ADI_1,
+ /* [2] */
+ 0, 0, 0, 0,
+ /* [2] */
+ 0, 0, 0, 0,
+ /* [2] */
+ 0, 0, 0, 0,
+ /* SEL_GPS [2] */
+ FN_SEL_GPS_0, FN_SEL_GPS_1, FN_SEL_GPS_2, FN_SEL_GPS_3,
+ /* SEL_SIM [1] */
+ FN_SEL_SIM_0, FN_SEL_SIM_1,
+ /* SEL_HSPI2 [1] */
+ FN_SEL_HSPI2_0, FN_SEL_HSPI2_1,
+ /* SEL_HSPI1 [2] */
+ FN_SEL_HSPI1_0, FN_SEL_HSPI1_1, FN_SEL_HSPI1_2, FN_SEL_HSPI1_3,
+ /* SEL_I2C3 [1] */
+ FN_SEL_I2C3_0, FN_SEL_I2C3_1,
+ /* SEL_I2C2 [2] */
+ FN_SEL_I2C2_0, FN_SEL_I2C2_1, FN_SEL_I2C2_2, FN_SEL_I2C2_3,
+ /* SEL_I2C1 [2] */
+ FN_SEL_I2C1_0, FN_SEL_I2C1_1, FN_SEL_I2C1_2, FN_SEL_I2C1_3 }
+ },
+ { PINMUX_CFG_REG("INOUTSEL0", 0xffc40004, 32, 1) { GP_INOUTSEL(0) } },
+ { PINMUX_CFG_REG("INOUTSEL1", 0xffc41004, 32, 1) { GP_INOUTSEL(1) } },
+ { PINMUX_CFG_REG("INOUTSEL2", 0xffc42004, 32, 1) { GP_INOUTSEL(2) } },
+ { PINMUX_CFG_REG("INOUTSEL3", 0xffc43004, 32, 1) { GP_INOUTSEL(3) } },
+ { PINMUX_CFG_REG("INOUTSEL4", 0xffc44004, 32, 1) { GP_INOUTSEL(4) } },
+ { PINMUX_CFG_REG("INOUTSEL5", 0xffc45004, 32, 1) { GP_INOUTSEL(5) } },
+ { PINMUX_CFG_REG("INOUTSEL6", 0xffc46004, 32, 1) {
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0,
+ 0, 0,
+ 0, 0,
+ GP_6_8_IN, GP_6_8_OUT,
+ GP_6_7_IN, GP_6_7_OUT,
+ GP_6_6_IN, GP_6_6_OUT,
+ GP_6_5_IN, GP_6_5_OUT,
+ GP_6_4_IN, GP_6_4_OUT,
+ GP_6_3_IN, GP_6_3_OUT,
+ GP_6_2_IN, GP_6_2_OUT,
+ GP_6_1_IN, GP_6_1_OUT,
+ GP_6_0_IN, GP_6_0_OUT, }
+ },
+ { },
+};
+
+static struct pinmux_data_reg pinmux_data_regs[] = {
+ { PINMUX_DATA_REG("INDT0", 0xffc40008, 32) { GP_INDT(0) } },
+ { PINMUX_DATA_REG("INDT1", 0xffc41008, 32) { GP_INDT(1) } },
+ { PINMUX_DATA_REG("INDT2", 0xffc42008, 32) { GP_INDT(2) } },
+ { PINMUX_DATA_REG("INDT3", 0xffc43008, 32) { GP_INDT(3) } },
+ { PINMUX_DATA_REG("INDT4", 0xffc44008, 32) { GP_INDT(4) } },
+ { PINMUX_DATA_REG("INDT5", 0xffc45008, 32) { GP_INDT(5) } },
+ { PINMUX_DATA_REG("INDT6", 0xffc46008, 32) {
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, GP_6_8_DATA,
+ GP_6_7_DATA, GP_6_6_DATA, GP_6_5_DATA, GP_6_4_DATA,
+ GP_6_3_DATA, GP_6_2_DATA, GP_6_1_DATA, GP_6_0_DATA }
+ },
+ { },
+};
+
+struct sh_pfc_soc_info r8a7779_pinmux_info = {
+ .name = "r8a7779_pfc",
+
+ .unlock_reg = 0xfffc0000, /* PMMR */
+
+ .reserved_id = PINMUX_RESERVED,
+ .data = { PINMUX_DATA_BEGIN, PINMUX_DATA_END },
+ .input = { PINMUX_INPUT_BEGIN, PINMUX_INPUT_END },
+ .output = { PINMUX_OUTPUT_BEGIN, PINMUX_OUTPUT_END },
+ .mark = { PINMUX_MARK_BEGIN, PINMUX_MARK_END },
+ .function = { PINMUX_FUNCTION_BEGIN, PINMUX_FUNCTION_END },
+
+ .first_gpio = GPIO_GP_0_0,
+ .last_gpio = GPIO_FN_SCK4_B,
+
+ .gpios = pinmux_gpios,
+ .cfg_regs = pinmux_config_regs,
+ .data_regs = pinmux_data_regs,
+
+ .gpio_data = pinmux_data,
+ .gpio_data_size = ARRAY_SIZE(pinmux_data),
+};
diff --git a/drivers/pinctrl/sh-pfc/pfc-sh7203.c b/drivers/pinctrl/sh-pfc/pfc-sh7203.c
new file mode 100644
index 000000000000..01b425dfd162
--- /dev/null
+++ b/drivers/pinctrl/sh-pfc/pfc-sh7203.c
@@ -0,0 +1,1592 @@
+/*
+ * SH7203 Pinmux
+ *
+ * Copyright (C) 2008 Magnus Damm
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ */
+
+#include <linux/kernel.h>
+#include <linux/gpio.h>
+#include <cpu/sh7203.h>
+
+#include "sh_pfc.h"
+
+enum {
+ PINMUX_RESERVED = 0,
+
+ PINMUX_DATA_BEGIN,
+ PA7_DATA, PA6_DATA, PA5_DATA, PA4_DATA,
+ PA3_DATA, PA2_DATA, PA1_DATA, PA0_DATA,
+ PB12_DATA,
+ PB11_DATA, PB10_DATA, PB9_DATA, PB8_DATA,
+ PB7_DATA, PB6_DATA, PB5_DATA, PB4_DATA,
+ PB3_DATA, PB2_DATA, PB1_DATA, PB0_DATA,
+ PC14_DATA, PC13_DATA, PC12_DATA,
+ PC11_DATA, PC10_DATA, PC9_DATA, PC8_DATA,
+ PC7_DATA, PC6_DATA, PC5_DATA, PC4_DATA,
+ PC3_DATA, PC2_DATA, PC1_DATA, PC0_DATA,
+ PD15_DATA, PD14_DATA, PD13_DATA, PD12_DATA,
+ PD11_DATA, PD10_DATA, PD9_DATA, PD8_DATA,
+ PD7_DATA, PD6_DATA, PD5_DATA, PD4_DATA,
+ PD3_DATA, PD2_DATA, PD1_DATA, PD0_DATA,
+ PE15_DATA, PE14_DATA, PE13_DATA, PE12_DATA,
+ PE11_DATA, PE10_DATA, PE9_DATA, PE8_DATA,
+ PE7_DATA, PE6_DATA, PE5_DATA, PE4_DATA,
+ PE3_DATA, PE2_DATA, PE1_DATA, PE0_DATA,
+ PF30_DATA, PF29_DATA, PF28_DATA,
+ PF27_DATA, PF26_DATA, PF25_DATA, PF24_DATA,
+ PF23_DATA, PF22_DATA, PF21_DATA, PF20_DATA,
+ PF19_DATA, PF18_DATA, PF17_DATA, PF16_DATA,
+ PF15_DATA, PF14_DATA, PF13_DATA, PF12_DATA,
+ PF11_DATA, PF10_DATA, PF9_DATA, PF8_DATA,
+ PF7_DATA, PF6_DATA, PF5_DATA, PF4_DATA,
+ PF3_DATA, PF2_DATA, PF1_DATA, PF0_DATA,
+ PINMUX_DATA_END,
+
+ PINMUX_INPUT_BEGIN,
+ FORCE_IN,
+ PA7_IN, PA6_IN, PA5_IN, PA4_IN,
+ PA3_IN, PA2_IN, PA1_IN, PA0_IN,
+ PB11_IN, PB10_IN, PB9_IN, PB8_IN,
+ PC14_IN, PC13_IN, PC12_IN,
+ PC11_IN, PC10_IN, PC9_IN, PC8_IN,
+ PC7_IN, PC6_IN, PC5_IN, PC4_IN,
+ PC3_IN, PC2_IN, PC1_IN, PC0_IN,
+ PD15_IN, PD14_IN, PD13_IN, PD12_IN,
+ PD11_IN, PD10_IN, PD9_IN, PD8_IN,
+ PD7_IN, PD6_IN, PD5_IN, PD4_IN,
+ PD3_IN, PD2_IN, PD1_IN, PD0_IN,
+ PE15_IN, PE14_IN, PE13_IN, PE12_IN,
+ PE11_IN, PE10_IN, PE9_IN, PE8_IN,
+ PE7_IN, PE6_IN, PE5_IN, PE4_IN,
+ PE3_IN, PE2_IN, PE1_IN, PE0_IN,
+ PF30_IN, PF29_IN, PF28_IN,
+ PF27_IN, PF26_IN, PF25_IN, PF24_IN,
+ PF23_IN, PF22_IN, PF21_IN, PF20_IN,
+ PF19_IN, PF18_IN, PF17_IN, PF16_IN,
+ PF15_IN, PF14_IN, PF13_IN, PF12_IN,
+ PF11_IN, PF10_IN, PF9_IN, PF8_IN,
+ PF7_IN, PF6_IN, PF5_IN, PF4_IN,
+ PF3_IN, PF2_IN, PF1_IN, PF0_IN,
+ PINMUX_INPUT_END,
+
+ PINMUX_OUTPUT_BEGIN,
+ FORCE_OUT,
+ PB11_OUT, PB10_OUT, PB9_OUT, PB8_OUT,
+ PC14_OUT, PC13_OUT, PC12_OUT,
+ PC11_OUT, PC10_OUT, PC9_OUT, PC8_OUT,
+ PC7_OUT, PC6_OUT, PC5_OUT, PC4_OUT,
+ PC3_OUT, PC2_OUT, PC1_OUT, PC0_OUT,
+ PD15_OUT, PD14_OUT, PD13_OUT, PD12_OUT,
+ PD11_OUT, PD10_OUT, PD9_OUT, PD8_OUT,
+ PD7_OUT, PD6_OUT, PD5_OUT, PD4_OUT,
+ PD3_OUT, PD2_OUT, PD1_OUT, PD0_OUT,
+ PE15_OUT, PE14_OUT, PE13_OUT, PE12_OUT,
+ PE11_OUT, PE10_OUT, PE9_OUT, PE8_OUT,
+ PE7_OUT, PE6_OUT, PE5_OUT, PE4_OUT,
+ PE3_OUT, PE2_OUT, PE1_OUT, PE0_OUT,
+ PF30_OUT, PF29_OUT, PF28_OUT,
+ PF27_OUT, PF26_OUT, PF25_OUT, PF24_OUT,
+ PF23_OUT, PF22_OUT, PF21_OUT, PF20_OUT,
+ PF19_OUT, PF18_OUT, PF17_OUT, PF16_OUT,
+ PF15_OUT, PF14_OUT, PF13_OUT, PF12_OUT,
+ PF11_OUT, PF10_OUT, PF9_OUT, PF8_OUT,
+ PF7_OUT, PF6_OUT, PF5_OUT, PF4_OUT,
+ PF3_OUT, PF2_OUT, PF1_OUT, PF0_OUT,
+ PINMUX_OUTPUT_END,
+
+ PINMUX_FUNCTION_BEGIN,
+ PB11_IOR_IN, PB11_IOR_OUT,
+ PB10_IOR_IN, PB10_IOR_OUT,
+ PB9_IOR_IN, PB9_IOR_OUT,
+ PB8_IOR_IN, PB8_IOR_OUT,
+ PB12MD_00, PB12MD_01, PB12MD_10, PB12MD_11,
+ PB11MD_0, PB11MD_1,
+ PB10MD_0, PB10MD_1,
+ PB9MD_00, PB9MD_01, PB9MD_10,
+ PB8MD_00, PB8MD_01, PB8MD_10,
+ PB7MD_00, PB7MD_01, PB7MD_10, PB7MD_11,
+ PB6MD_00, PB6MD_01, PB6MD_10, PB6MD_11,
+ PB5MD_00, PB5MD_01, PB5MD_10, PB5MD_11,
+ PB4MD_00, PB4MD_01, PB4MD_10, PB4MD_11,
+ PB3MD_00, PB3MD_01, PB3MD_10, PB3MD_11,
+ PB2MD_00, PB2MD_01, PB2MD_10, PB2MD_11,
+ PB1MD_00, PB1MD_01, PB1MD_10, PB1MD_11,
+ PB0MD_00, PB0MD_01, PB0MD_10, PB0MD_11,
+
+ PB12IRQ_00, PB12IRQ_01, PB12IRQ_10,
+
+ PC14MD_0, PC14MD_1,
+ PC13MD_0, PC13MD_1,
+ PC12MD_0, PC12MD_1,
+ PC11MD_00, PC11MD_01, PC11MD_10,
+ PC10MD_00, PC10MD_01, PC10MD_10,
+ PC9MD_0, PC9MD_1,
+ PC8MD_0, PC8MD_1,
+ PC7MD_0, PC7MD_1,
+ PC6MD_0, PC6MD_1,
+ PC5MD_0, PC5MD_1,
+ PC4MD_0, PC4MD_1,
+ PC3MD_0, PC3MD_1,
+ PC2MD_0, PC2MD_1,
+ PC1MD_0, PC1MD_1,
+ PC0MD_00, PC0MD_01, PC0MD_10,
+
+ PD15MD_000, PD15MD_001, PD15MD_010, PD15MD_100, PD15MD_101,
+ PD14MD_000, PD14MD_001, PD14MD_010, PD14MD_101,
+ PD13MD_000, PD13MD_001, PD13MD_010, PD13MD_100, PD13MD_101,
+ PD12MD_000, PD12MD_001, PD12MD_010, PD12MD_100, PD12MD_101,
+ PD11MD_000, PD11MD_001, PD11MD_010, PD11MD_100, PD11MD_101,
+ PD10MD_000, PD10MD_001, PD10MD_010, PD10MD_100, PD10MD_101,
+ PD9MD_000, PD9MD_001, PD9MD_010, PD9MD_100, PD9MD_101,
+ PD8MD_000, PD8MD_001, PD8MD_010, PD8MD_100, PD8MD_101,
+ PD7MD_000, PD7MD_001, PD7MD_010, PD7MD_011, PD7MD_100, PD7MD_101,
+ PD6MD_000, PD6MD_001, PD6MD_010, PD6MD_011, PD6MD_100, PD6MD_101,
+ PD5MD_000, PD5MD_001, PD5MD_010, PD5MD_011, PD5MD_100, PD5MD_101,
+ PD4MD_000, PD4MD_001, PD4MD_010, PD4MD_011, PD4MD_100, PD4MD_101,
+ PD3MD_000, PD3MD_001, PD3MD_010, PD3MD_011, PD3MD_100, PD3MD_101,
+ PD2MD_000, PD2MD_001, PD2MD_010, PD2MD_011, PD2MD_100, PD2MD_101,
+ PD1MD_000, PD1MD_001, PD1MD_010, PD1MD_011, PD1MD_100, PD1MD_101,
+ PD0MD_000, PD0MD_001, PD0MD_010, PD0MD_011, PD0MD_100, PD0MD_101,
+
+ PE15MD_00, PE15MD_01, PE15MD_11,
+ PE14MD_00, PE14MD_01, PE14MD_11,
+ PE13MD_00, PE13MD_11,
+ PE12MD_00, PE12MD_11,
+ PE11MD_000, PE11MD_001, PE11MD_010, PE11MD_100,
+ PE10MD_000, PE10MD_001, PE10MD_010, PE10MD_100,
+ PE9MD_00, PE9MD_01, PE9MD_10, PE9MD_11,
+ PE8MD_00, PE8MD_01, PE8MD_10, PE8MD_11,
+ PE7MD_000, PE7MD_001, PE7MD_010, PE7MD_011, PE7MD_100,
+ PE6MD_000, PE6MD_001, PE6MD_010, PE6MD_011, PE6MD_100,
+ PE5MD_000, PE5MD_001, PE5MD_010, PE5MD_011, PE5MD_100,
+ PE4MD_000, PE4MD_001, PE4MD_010, PE4MD_011, PE4MD_100,
+ PE3MD_00, PE3MD_01, PE3MD_11,
+ PE2MD_00, PE2MD_01, PE2MD_11,
+ PE1MD_00, PE1MD_01, PE1MD_10, PE1MD_11,
+ PE0MD_000, PE0MD_001, PE0MD_011, PE0MD_100,
+
+ PF30MD_0, PF30MD_1,
+ PF29MD_0, PF29MD_1,
+ PF28MD_0, PF28MD_1,
+ PF27MD_0, PF27MD_1,
+ PF26MD_0, PF26MD_1,
+ PF25MD_0, PF25MD_1,
+ PF24MD_0, PF24MD_1,
+ PF23MD_00, PF23MD_01, PF23MD_10,
+ PF22MD_00, PF22MD_01, PF22MD_10,
+ PF21MD_00, PF21MD_01, PF21MD_10,
+ PF20MD_00, PF20MD_01, PF20MD_10,
+ PF19MD_00, PF19MD_01, PF19MD_10,
+ PF18MD_00, PF18MD_01, PF18MD_10,
+ PF17MD_00, PF17MD_01, PF17MD_10,
+ PF16MD_00, PF16MD_01, PF16MD_10,
+ PF15MD_00, PF15MD_01, PF15MD_10,
+ PF14MD_00, PF14MD_01, PF14MD_10,
+ PF13MD_00, PF13MD_01, PF13MD_10,
+ PF12MD_00, PF12MD_01, PF12MD_10,
+ PF11MD_00, PF11MD_01, PF11MD_10,
+ PF10MD_00, PF10MD_01, PF10MD_10,
+ PF9MD_00, PF9MD_01, PF9MD_10,
+ PF8MD_00, PF8MD_01, PF8MD_10,
+ PF7MD_00, PF7MD_01, PF7MD_10, PF7MD_11,
+ PF6MD_00, PF6MD_01, PF6MD_10, PF6MD_11,
+ PF5MD_00, PF5MD_01, PF5MD_10, PF5MD_11,
+ PF4MD_00, PF4MD_01, PF4MD_10, PF4MD_11,
+ PF3MD_00, PF3MD_01, PF3MD_10, PF3MD_11,
+ PF2MD_00, PF2MD_01, PF2MD_10, PF2MD_11,
+ PF1MD_00, PF1MD_01, PF1MD_10, PF1MD_11,
+ PF0MD_00, PF0MD_01, PF0MD_10, PF0MD_11,
+ PINMUX_FUNCTION_END,
+
+ PINMUX_MARK_BEGIN,
+ PINT7_PB_MARK, PINT6_PB_MARK, PINT5_PB_MARK, PINT4_PB_MARK,
+ PINT3_PB_MARK, PINT2_PB_MARK, PINT1_PB_MARK, PINT0_PB_MARK,
+ PINT7_PD_MARK, PINT6_PD_MARK, PINT5_PD_MARK, PINT4_PD_MARK,
+ PINT3_PD_MARK, PINT2_PD_MARK, PINT1_PD_MARK, PINT0_PD_MARK,
+ IRQ7_PB_MARK, IRQ6_PB_MARK, IRQ5_PB_MARK, IRQ4_PB_MARK,
+ IRQ3_PB_MARK, IRQ2_PB_MARK, IRQ1_PB_MARK, IRQ0_PB_MARK,
+ IRQ7_PD_MARK, IRQ6_PD_MARK, IRQ5_PD_MARK, IRQ4_PD_MARK,
+ IRQ3_PD_MARK, IRQ2_PD_MARK, IRQ1_PD_MARK, IRQ0_PD_MARK,
+ IRQ7_PE_MARK, IRQ6_PE_MARK, IRQ5_PE_MARK, IRQ4_PE_MARK,
+ IRQ3_PE_MARK, IRQ2_PE_MARK, IRQ1_PE_MARK, IRQ0_PE_MARK,
+ WDTOVF_MARK, IRQOUT_MARK, REFOUT_MARK, IRQOUT_REFOUT_MARK,
+ UBCTRG_MARK,
+ CTX1_MARK, CRX1_MARK, CTX0_MARK, CTX0_CTX1_MARK,
+ CRX0_MARK, CRX0_CRX1_MARK,
+ SDA3_MARK, SCL3_MARK,
+ SDA2_MARK, SCL2_MARK,
+ SDA1_MARK, SCL1_MARK,
+ SDA0_MARK, SCL0_MARK,
+ TEND0_PD_MARK, TEND0_PE_MARK, DACK0_PD_MARK, DACK0_PE_MARK,
+ DREQ0_PD_MARK, DREQ0_PE_MARK, TEND1_PD_MARK, TEND1_PE_MARK,
+ DACK1_PD_MARK, DACK1_PE_MARK, DREQ1_PD_MARK, DREQ1_PE_MARK,
+ DACK2_MARK, DREQ2_MARK, DACK3_MARK, DREQ3_MARK,
+ ADTRG_PD_MARK, ADTRG_PE_MARK,
+ D31_MARK, D30_MARK, D29_MARK, D28_MARK,
+ D27_MARK, D26_MARK, D25_MARK, D24_MARK,
+ D23_MARK, D22_MARK, D21_MARK, D20_MARK,
+ D19_MARK, D18_MARK, D17_MARK, D16_MARK,
+ A25_MARK, A24_MARK, A23_MARK, A22_MARK,
+ A21_MARK, CS4_MARK, MRES_MARK, BS_MARK,
+ IOIS16_MARK, CS1_MARK, CS6_CE1B_MARK, CE2B_MARK,
+ CS5_CE1A_MARK, CE2A_MARK, FRAME_MARK, WAIT_MARK,
+ RDWR_MARK, CKE_MARK, CASU_MARK, BREQ_MARK,
+ RASU_MARK, BACK_MARK, CASL_MARK, RASL_MARK,
+ WE3_DQMUU_AH_ICIO_WR_MARK, WE2_DQMUL_ICIORD_MARK,
+ WE1_DQMLU_WE_MARK, WE0_DQMLL_MARK,
+ CS3_MARK, CS2_MARK, A1_MARK, A0_MARK, CS7_MARK,
+ TIOC4D_MARK, TIOC4C_MARK, TIOC4B_MARK, TIOC4A_MARK,
+ TIOC3D_MARK, TIOC3C_MARK, TIOC3B_MARK, TIOC3A_MARK,
+ TIOC2B_MARK, TIOC1B_MARK, TIOC2A_MARK, TIOC1A_MARK,
+ TIOC0D_MARK, TIOC0C_MARK, TIOC0B_MARK, TIOC0A_MARK,
+ TCLKD_PD_MARK, TCLKC_PD_MARK, TCLKB_PD_MARK, TCLKA_PD_MARK,
+ TCLKD_PF_MARK, TCLKC_PF_MARK, TCLKB_PF_MARK, TCLKA_PF_MARK,
+ SCS0_PD_MARK, SSO0_PD_MARK, SSI0_PD_MARK, SSCK0_PD_MARK,
+ SCS0_PF_MARK, SSO0_PF_MARK, SSI0_PF_MARK, SSCK0_PF_MARK,
+ SCS1_PD_MARK, SSO1_PD_MARK, SSI1_PD_MARK, SSCK1_PD_MARK,
+ SCS1_PF_MARK, SSO1_PF_MARK, SSI1_PF_MARK, SSCK1_PF_MARK,
+ TXD0_MARK, RXD0_MARK, SCK0_MARK,
+ TXD1_MARK, RXD1_MARK, SCK1_MARK,
+ TXD2_MARK, RXD2_MARK, SCK2_MARK,
+ RTS3_MARK, CTS3_MARK, TXD3_MARK,
+ RXD3_MARK, SCK3_MARK,
+ AUDIO_CLK_MARK,
+ SSIDATA3_MARK, SSIWS3_MARK, SSISCK3_MARK,
+ SSIDATA2_MARK, SSIWS2_MARK, SSISCK2_MARK,
+ SSIDATA1_MARK, SSIWS1_MARK, SSISCK1_MARK,
+ SSIDATA0_MARK, SSIWS0_MARK, SSISCK0_MARK,
+ FCE_MARK, FRB_MARK,
+ NAF7_MARK, NAF6_MARK, NAF5_MARK, NAF4_MARK,
+ NAF3_MARK, NAF2_MARK, NAF1_MARK, NAF0_MARK,
+ FSC_MARK, FOE_MARK, FCDE_MARK, FWE_MARK,
+ LCD_VEPWC_MARK, LCD_VCPWC_MARK, LCD_CLK_MARK, LCD_FLM_MARK,
+ LCD_M_DISP_MARK, LCD_CL2_MARK, LCD_CL1_MARK, LCD_DON_MARK,
+ LCD_DATA15_MARK, LCD_DATA14_MARK, LCD_DATA13_MARK, LCD_DATA12_MARK,
+ LCD_DATA11_MARK, LCD_DATA10_MARK, LCD_DATA9_MARK, LCD_DATA8_MARK,
+ LCD_DATA7_MARK, LCD_DATA6_MARK, LCD_DATA5_MARK, LCD_DATA4_MARK,
+ LCD_DATA3_MARK, LCD_DATA2_MARK, LCD_DATA1_MARK, LCD_DATA0_MARK,
+ PINMUX_MARK_END,
+};
+
+static pinmux_enum_t pinmux_data[] = {
+
+ /* PA */
+ PINMUX_DATA(PA7_DATA, PA7_IN),
+ PINMUX_DATA(PA6_DATA, PA6_IN),
+ PINMUX_DATA(PA5_DATA, PA5_IN),
+ PINMUX_DATA(PA4_DATA, PA4_IN),
+ PINMUX_DATA(PA3_DATA, PA3_IN),
+ PINMUX_DATA(PA2_DATA, PA2_IN),
+ PINMUX_DATA(PA1_DATA, PA1_IN),
+ PINMUX_DATA(PA0_DATA, PA0_IN),
+
+ /* PB */
+ PINMUX_DATA(PB12_DATA, PB12MD_00, FORCE_OUT),
+ PINMUX_DATA(WDTOVF_MARK, PB12MD_01),
+ PINMUX_DATA(IRQOUT_MARK, PB12MD_10, PB12IRQ_00),
+ PINMUX_DATA(REFOUT_MARK, PB12MD_10, PB12IRQ_01),
+ PINMUX_DATA(IRQOUT_REFOUT_MARK, PB12MD_10, PB12IRQ_10),
+ PINMUX_DATA(UBCTRG_MARK, PB12MD_11),
+
+ PINMUX_DATA(PB11_DATA, PB11MD_0, PB11_IN, PB11_OUT),
+ PINMUX_DATA(CTX1_MARK, PB11MD_1),
+
+ PINMUX_DATA(PB10_DATA, PB10MD_0, PB10_IN, PB10_OUT),
+ PINMUX_DATA(CRX1_MARK, PB10MD_1),
+
+ PINMUX_DATA(PB9_DATA, PB9MD_00, PB9_IN, PB9_OUT),
+ PINMUX_DATA(CTX0_MARK, PB9MD_01),
+ PINMUX_DATA(CTX0_CTX1_MARK, PB9MD_10),
+
+ PINMUX_DATA(PB8_DATA, PB8MD_00, PB8_IN, PB8_OUT),
+ PINMUX_DATA(CRX0_MARK, PB8MD_01),
+ PINMUX_DATA(CRX0_CRX1_MARK, PB8MD_10),
+
+ PINMUX_DATA(PB7_DATA, PB7MD_00, FORCE_IN),
+ PINMUX_DATA(SDA3_MARK, PB7MD_01),
+ PINMUX_DATA(PINT7_PB_MARK, PB7MD_10),
+ PINMUX_DATA(IRQ7_PB_MARK, PB7MD_11),
+
+ PINMUX_DATA(PB6_DATA, PB6MD_00, FORCE_IN),
+ PINMUX_DATA(SCL3_MARK, PB6MD_01),
+ PINMUX_DATA(PINT6_PB_MARK, PB6MD_10),
+ PINMUX_DATA(IRQ6_PB_MARK, PB6MD_11),
+
+ PINMUX_DATA(PB5_DATA, PB5MD_00, FORCE_IN),
+ PINMUX_DATA(SDA2_MARK, PB6MD_01),
+ PINMUX_DATA(PINT5_PB_MARK, PB6MD_10),
+ PINMUX_DATA(IRQ5_PB_MARK, PB6MD_11),
+
+ PINMUX_DATA(PB4_DATA, PB4MD_00, FORCE_IN),
+ PINMUX_DATA(SCL2_MARK, PB4MD_01),
+ PINMUX_DATA(PINT4_PB_MARK, PB4MD_10),
+ PINMUX_DATA(IRQ4_PB_MARK, PB4MD_11),
+
+ PINMUX_DATA(PB3_DATA, PB3MD_00, FORCE_IN),
+ PINMUX_DATA(SDA1_MARK, PB3MD_01),
+ PINMUX_DATA(PINT3_PB_MARK, PB3MD_10),
+ PINMUX_DATA(IRQ3_PB_MARK, PB3MD_11),
+
+ PINMUX_DATA(PB2_DATA, PB2MD_00, FORCE_IN),
+ PINMUX_DATA(SCL1_MARK, PB2MD_01),
+ PINMUX_DATA(PINT2_PB_MARK, PB2MD_10),
+ PINMUX_DATA(IRQ2_PB_MARK, PB2MD_11),
+
+ PINMUX_DATA(PB1_DATA, PB1MD_00, FORCE_IN),
+ PINMUX_DATA(SDA0_MARK, PB1MD_01),
+ PINMUX_DATA(PINT1_PB_MARK, PB1MD_10),
+ PINMUX_DATA(IRQ1_PB_MARK, PB1MD_11),
+
+ PINMUX_DATA(PB0_DATA, PB0MD_00, FORCE_IN),
+ PINMUX_DATA(SCL0_MARK, PB0MD_01),
+ PINMUX_DATA(PINT0_PB_MARK, PB0MD_10),
+ PINMUX_DATA(IRQ0_PB_MARK, PB0MD_11),
+
+ /* PC */
+ PINMUX_DATA(PC14_DATA, PC14MD_0, PC14_IN, PC14_OUT),
+ PINMUX_DATA(WAIT_MARK, PC14MD_1),
+
+ PINMUX_DATA(PC13_DATA, PC13MD_0, PC13_IN, PC13_OUT),
+ PINMUX_DATA(RDWR_MARK, PC13MD_1),
+
+ PINMUX_DATA(PC12_DATA, PC12MD_0, PC12_IN, PC12_OUT),
+ PINMUX_DATA(CKE_MARK, PC12MD_1),
+
+ PINMUX_DATA(PC11_DATA, PC11MD_00, PC11_IN, PC11_OUT),
+ PINMUX_DATA(CASU_MARK, PC11MD_01),
+ PINMUX_DATA(BREQ_MARK, PC11MD_10),
+
+ PINMUX_DATA(PC10_DATA, PC10MD_00, PC10_IN, PC10_OUT),
+ PINMUX_DATA(RASU_MARK, PC10MD_01),
+ PINMUX_DATA(BACK_MARK, PC10MD_10),
+
+ PINMUX_DATA(PC9_DATA, PC9MD_0, PC9_IN, PC9_OUT),
+ PINMUX_DATA(CASL_MARK, PC9MD_1),
+
+ PINMUX_DATA(PC8_DATA, PC8MD_0, PC8_IN, PC8_OUT),
+ PINMUX_DATA(RASL_MARK, PC8MD_1),
+
+ PINMUX_DATA(PC7_DATA, PC7MD_0, PC7_IN, PC7_OUT),
+ PINMUX_DATA(WE3_DQMUU_AH_ICIO_WR_MARK, PC7MD_1),
+
+ PINMUX_DATA(PC6_DATA, PC6MD_0, PC6_IN, PC6_OUT),
+ PINMUX_DATA(WE2_DQMUL_ICIORD_MARK, PC6MD_1),
+
+ PINMUX_DATA(PC5_DATA, PC5MD_0, PC5_IN, PC5_OUT),
+ PINMUX_DATA(WE1_DQMLU_WE_MARK, PC5MD_1),
+
+ PINMUX_DATA(PC4_DATA, PC4MD_0, PC4_IN, PC4_OUT),
+ PINMUX_DATA(WE0_DQMLL_MARK, PC4MD_1),
+
+ PINMUX_DATA(PC3_DATA, PC3MD_0, PC3_IN, PC3_OUT),
+ PINMUX_DATA(CS3_MARK, PC3MD_1),
+
+ PINMUX_DATA(PC2_DATA, PC2MD_0, PC2_IN, PC2_OUT),
+ PINMUX_DATA(CS2_MARK, PC2MD_1),
+
+ PINMUX_DATA(PC1_DATA, PC1MD_0, PC1_IN, PC1_OUT),
+ PINMUX_DATA(A1_MARK, PC1MD_1),
+
+ PINMUX_DATA(PC0_DATA, PC0MD_00, PC0_IN, PC0_OUT),
+ PINMUX_DATA(A0_MARK, PC0MD_01),
+ PINMUX_DATA(CS7_MARK, PC0MD_10),
+
+ /* PD */
+ PINMUX_DATA(PD15_DATA, PD15MD_000, PD15_IN, PD15_OUT),
+ PINMUX_DATA(D31_MARK, PD15MD_001),
+ PINMUX_DATA(PINT7_PD_MARK, PD15MD_010),
+ PINMUX_DATA(ADTRG_PD_MARK, PD15MD_100),
+ PINMUX_DATA(TIOC4D_MARK, PD15MD_101),
+
+ PINMUX_DATA(PD14_DATA, PD14MD_000, PD14_IN, PD14_OUT),
+ PINMUX_DATA(D30_MARK, PD14MD_001),
+ PINMUX_DATA(PINT6_PD_MARK, PD14MD_010),
+ PINMUX_DATA(TIOC4C_MARK, PD14MD_101),
+
+ PINMUX_DATA(PD13_DATA, PD13MD_000, PD13_IN, PD13_OUT),
+ PINMUX_DATA(D29_MARK, PD13MD_001),
+ PINMUX_DATA(PINT5_PD_MARK, PD13MD_010),
+ PINMUX_DATA(TEND1_PD_MARK, PD13MD_100),
+ PINMUX_DATA(TIOC4B_MARK, PD13MD_101),
+
+ PINMUX_DATA(PD12_DATA, PD12MD_000, PD12_IN, PD12_OUT),
+ PINMUX_DATA(D28_MARK, PD12MD_001),
+ PINMUX_DATA(PINT4_PD_MARK, PD12MD_010),
+ PINMUX_DATA(DACK1_PD_MARK, PD12MD_100),
+ PINMUX_DATA(TIOC4A_MARK, PD12MD_101),
+
+ PINMUX_DATA(PD11_DATA, PD11MD_000, PD11_IN, PD11_OUT),
+ PINMUX_DATA(D27_MARK, PD11MD_001),
+ PINMUX_DATA(PINT3_PD_MARK, PD11MD_010),
+ PINMUX_DATA(DREQ1_PD_MARK, PD11MD_100),
+ PINMUX_DATA(TIOC3D_MARK, PD11MD_101),
+
+ PINMUX_DATA(PD10_DATA, PD10MD_000, PD10_IN, PD10_OUT),
+ PINMUX_DATA(D26_MARK, PD10MD_001),
+ PINMUX_DATA(PINT2_PD_MARK, PD10MD_010),
+ PINMUX_DATA(TEND0_PD_MARK, PD10MD_100),
+ PINMUX_DATA(TIOC3C_MARK, PD10MD_101),
+
+ PINMUX_DATA(PD9_DATA, PD9MD_000, PD9_IN, PD9_OUT),
+ PINMUX_DATA(D25_MARK, PD9MD_001),
+ PINMUX_DATA(PINT1_PD_MARK, PD9MD_010),
+ PINMUX_DATA(DACK0_PD_MARK, PD9MD_100),
+ PINMUX_DATA(TIOC3B_MARK, PD9MD_101),
+
+ PINMUX_DATA(PD8_DATA, PD8MD_000, PD8_IN, PD8_OUT),
+ PINMUX_DATA(D24_MARK, PD8MD_001),
+ PINMUX_DATA(PINT0_PD_MARK, PD8MD_010),
+ PINMUX_DATA(DREQ0_PD_MARK, PD8MD_100),
+ PINMUX_DATA(TIOC3A_MARK, PD8MD_101),
+
+ PINMUX_DATA(PD7_DATA, PD7MD_000, PD7_IN, PD7_OUT),
+ PINMUX_DATA(D23_MARK, PD7MD_001),
+ PINMUX_DATA(IRQ7_PD_MARK, PD7MD_010),
+ PINMUX_DATA(SCS1_PD_MARK, PD7MD_011),
+ PINMUX_DATA(TCLKD_PD_MARK, PD7MD_100),
+ PINMUX_DATA(TIOC2B_MARK, PD7MD_101),
+
+ PINMUX_DATA(PD6_DATA, PD6MD_000, PD6_IN, PD6_OUT),
+ PINMUX_DATA(D22_MARK, PD6MD_001),
+ PINMUX_DATA(IRQ6_PD_MARK, PD6MD_010),
+ PINMUX_DATA(SSO1_PD_MARK, PD6MD_011),
+ PINMUX_DATA(TCLKC_PD_MARK, PD6MD_100),
+ PINMUX_DATA(TIOC2A_MARK, PD6MD_101),
+
+ PINMUX_DATA(PD5_DATA, PD5MD_000, PD5_IN, PD5_OUT),
+ PINMUX_DATA(D21_MARK, PD5MD_001),
+ PINMUX_DATA(IRQ5_PD_MARK, PD5MD_010),
+ PINMUX_DATA(SSI1_PD_MARK, PD5MD_011),
+ PINMUX_DATA(TCLKB_PD_MARK, PD5MD_100),
+ PINMUX_DATA(TIOC1B_MARK, PD5MD_101),
+
+ PINMUX_DATA(PD4_DATA, PD4MD_000, PD4_IN, PD4_OUT),
+ PINMUX_DATA(D20_MARK, PD4MD_001),
+ PINMUX_DATA(IRQ4_PD_MARK, PD4MD_010),
+ PINMUX_DATA(SSCK1_PD_MARK, PD4MD_011),
+ PINMUX_DATA(TCLKA_PD_MARK, PD4MD_100),
+ PINMUX_DATA(TIOC1A_MARK, PD4MD_101),
+
+ PINMUX_DATA(PD3_DATA, PD3MD_000, PD3_IN, PD3_OUT),
+ PINMUX_DATA(D19_MARK, PD3MD_001),
+ PINMUX_DATA(IRQ3_PD_MARK, PD3MD_010),
+ PINMUX_DATA(SCS0_PD_MARK, PD3MD_011),
+ PINMUX_DATA(DACK3_MARK, PD3MD_100),
+ PINMUX_DATA(TIOC0D_MARK, PD3MD_101),
+
+ PINMUX_DATA(PD2_DATA, PD2MD_000, PD2_IN, PD2_OUT),
+ PINMUX_DATA(D18_MARK, PD2MD_001),
+ PINMUX_DATA(IRQ2_PD_MARK, PD2MD_010),
+ PINMUX_DATA(SSO0_PD_MARK, PD2MD_011),
+ PINMUX_DATA(DREQ3_MARK, PD2MD_100),
+ PINMUX_DATA(TIOC0C_MARK, PD2MD_101),
+
+ PINMUX_DATA(PD1_DATA, PD1MD_000, PD1_IN, PD1_OUT),
+ PINMUX_DATA(D17_MARK, PD1MD_001),
+ PINMUX_DATA(IRQ1_PD_MARK, PD1MD_010),
+ PINMUX_DATA(SSI0_PD_MARK, PD1MD_011),
+ PINMUX_DATA(DACK2_MARK, PD1MD_100),
+ PINMUX_DATA(TIOC0B_MARK, PD1MD_101),
+
+ PINMUX_DATA(PD0_DATA, PD0MD_000, PD0_IN, PD0_OUT),
+ PINMUX_DATA(D16_MARK, PD0MD_001),
+ PINMUX_DATA(IRQ0_PD_MARK, PD0MD_010),
+ PINMUX_DATA(SSCK0_PD_MARK, PD0MD_011),
+ PINMUX_DATA(DREQ2_MARK, PD0MD_100),
+ PINMUX_DATA(TIOC0A_MARK, PD0MD_101),
+
+ /* PE */
+ PINMUX_DATA(PE15_DATA, PE15MD_00, PE15_IN, PE15_OUT),
+ PINMUX_DATA(IOIS16_MARK, PE15MD_01),
+ PINMUX_DATA(RTS3_MARK, PE15MD_11),
+
+ PINMUX_DATA(PE14_DATA, PE14MD_00, PE14_IN, PE14_OUT),
+ PINMUX_DATA(CS1_MARK, PE14MD_01),
+ PINMUX_DATA(CTS3_MARK, PE14MD_11),
+
+ PINMUX_DATA(PE13_DATA, PE13MD_00, PE13_IN, PE13_OUT),
+ PINMUX_DATA(TXD3_MARK, PE13MD_11),
+
+ PINMUX_DATA(PE12_DATA, PE12MD_00, PE12_IN, PE12_OUT),
+ PINMUX_DATA(RXD3_MARK, PE12MD_11),
+
+ PINMUX_DATA(PE11_DATA, PE11MD_000, PE11_IN, PE11_OUT),
+ PINMUX_DATA(CS6_CE1B_MARK, PE11MD_001),
+ PINMUX_DATA(IRQ7_PE_MARK, PE11MD_010),
+ PINMUX_DATA(TEND1_PE_MARK, PE11MD_100),
+
+ PINMUX_DATA(PE10_DATA, PE10MD_000, PE10_IN, PE10_OUT),
+ PINMUX_DATA(CE2B_MARK, PE10MD_001),
+ PINMUX_DATA(IRQ6_PE_MARK, PE10MD_010),
+ PINMUX_DATA(TEND0_PE_MARK, PE10MD_100),
+
+ PINMUX_DATA(PE9_DATA, PE9MD_00, PE9_IN, PE9_OUT),
+ PINMUX_DATA(CS5_CE1A_MARK, PE9MD_01),
+ PINMUX_DATA(IRQ5_PE_MARK, PE9MD_10),
+ PINMUX_DATA(SCK3_MARK, PE9MD_11),
+
+ PINMUX_DATA(PE8_DATA, PE8MD_00, PE8_IN, PE8_OUT),
+ PINMUX_DATA(CE2A_MARK, PE8MD_01),
+ PINMUX_DATA(IRQ4_PE_MARK, PE8MD_10),
+ PINMUX_DATA(SCK2_MARK, PE8MD_11),
+
+ PINMUX_DATA(PE7_DATA, PE7MD_000, PE7_IN, PE7_OUT),
+ PINMUX_DATA(FRAME_MARK, PE7MD_001),
+ PINMUX_DATA(IRQ3_PE_MARK, PE7MD_010),
+ PINMUX_DATA(TXD2_MARK, PE7MD_011),
+ PINMUX_DATA(DACK1_PE_MARK, PE7MD_100),
+
+ PINMUX_DATA(PE6_DATA, PE6MD_000, PE6_IN, PE6_OUT),
+ PINMUX_DATA(A25_MARK, PE6MD_001),
+ PINMUX_DATA(IRQ2_PE_MARK, PE6MD_010),
+ PINMUX_DATA(RXD2_MARK, PE6MD_011),
+ PINMUX_DATA(DREQ1_PE_MARK, PE6MD_100),
+
+ PINMUX_DATA(PE5_DATA, PE5MD_000, PE5_IN, PE5_OUT),
+ PINMUX_DATA(A24_MARK, PE5MD_001),
+ PINMUX_DATA(IRQ1_PE_MARK, PE5MD_010),
+ PINMUX_DATA(TXD1_MARK, PE5MD_011),
+ PINMUX_DATA(DACK0_PE_MARK, PE5MD_100),
+
+ PINMUX_DATA(PE4_DATA, PE4MD_000, PE4_IN, PE4_OUT),
+ PINMUX_DATA(A23_MARK, PE4MD_001),
+ PINMUX_DATA(IRQ0_PE_MARK, PE4MD_010),
+ PINMUX_DATA(RXD1_MARK, PE4MD_011),
+ PINMUX_DATA(DREQ0_PE_MARK, PE4MD_100),
+
+ PINMUX_DATA(PE3_DATA, PE3MD_00, PE3_IN, PE3_OUT),
+ PINMUX_DATA(A22_MARK, PE3MD_01),
+ PINMUX_DATA(SCK1_MARK, PE3MD_11),
+
+ PINMUX_DATA(PE2_DATA, PE2MD_00, PE2_IN, PE2_OUT),
+ PINMUX_DATA(A21_MARK, PE2MD_01),
+ PINMUX_DATA(SCK0_MARK, PE2MD_11),
+
+ PINMUX_DATA(PE1_DATA, PE1MD_00, PE1_IN, PE1_OUT),
+ PINMUX_DATA(CS4_MARK, PE1MD_01),
+ PINMUX_DATA(MRES_MARK, PE1MD_10),
+ PINMUX_DATA(TXD0_MARK, PE1MD_11),
+
+ PINMUX_DATA(PE0_DATA, PE0MD_000, PE0_IN, PE0_OUT),
+ PINMUX_DATA(BS_MARK, PE0MD_001),
+ PINMUX_DATA(RXD0_MARK, PE0MD_011),
+ PINMUX_DATA(ADTRG_PE_MARK, PE0MD_100),
+
+ /* PF */
+ PINMUX_DATA(PF30_DATA, PF30MD_0, PF30_IN, PF30_OUT),
+ PINMUX_DATA(AUDIO_CLK_MARK, PF30MD_1),
+
+ PINMUX_DATA(PF29_DATA, PF29MD_0, PF29_IN, PF29_OUT),
+ PINMUX_DATA(SSIDATA3_MARK, PF29MD_1),
+
+ PINMUX_DATA(PF28_DATA, PF28MD_0, PF28_IN, PF28_OUT),
+ PINMUX_DATA(SSIWS3_MARK, PF28MD_1),
+
+ PINMUX_DATA(PF27_DATA, PF27MD_0, PF27_IN, PF27_OUT),
+ PINMUX_DATA(SSISCK3_MARK, PF27MD_1),
+
+ PINMUX_DATA(PF26_DATA, PF26MD_0, PF26_IN, PF26_OUT),
+ PINMUX_DATA(SSIDATA2_MARK, PF26MD_1),
+
+ PINMUX_DATA(PF25_DATA, PF25MD_0, PF25_IN, PF25_OUT),
+ PINMUX_DATA(SSIWS2_MARK, PF25MD_1),
+
+ PINMUX_DATA(PF24_DATA, PF24MD_0, PF24_IN, PF24_OUT),
+ PINMUX_DATA(SSISCK2_MARK, PF24MD_1),
+
+ PINMUX_DATA(PF23_DATA, PF23MD_00, PF23_IN, PF23_OUT),
+ PINMUX_DATA(SSIDATA1_MARK, PF23MD_01),
+ PINMUX_DATA(LCD_VEPWC_MARK, PF23MD_10),
+
+ PINMUX_DATA(PF22_DATA, PF22MD_00, PF22_IN, PF22_OUT),
+ PINMUX_DATA(SSIWS1_MARK, PF22MD_01),
+ PINMUX_DATA(LCD_VCPWC_MARK, PF22MD_10),
+
+ PINMUX_DATA(PF21_DATA, PF21MD_00, PF21_IN, PF21_OUT),
+ PINMUX_DATA(SSISCK1_MARK, PF21MD_01),
+ PINMUX_DATA(LCD_CLK_MARK, PF21MD_10),
+
+ PINMUX_DATA(PF20_DATA, PF20MD_00, PF20_IN, PF20_OUT),
+ PINMUX_DATA(SSIDATA0_MARK, PF20MD_01),
+ PINMUX_DATA(LCD_FLM_MARK, PF20MD_10),
+
+ PINMUX_DATA(PF19_DATA, PF19MD_00, PF19_IN, PF19_OUT),
+ PINMUX_DATA(SSIWS0_MARK, PF19MD_01),
+ PINMUX_DATA(LCD_M_DISP_MARK, PF19MD_10),
+
+ PINMUX_DATA(PF18_DATA, PF18MD_00, PF18_IN, PF18_OUT),
+ PINMUX_DATA(SSISCK0_MARK, PF18MD_01),
+ PINMUX_DATA(LCD_CL2_MARK, PF18MD_10),
+
+ PINMUX_DATA(PF17_DATA, PF17MD_00, PF17_IN, PF17_OUT),
+ PINMUX_DATA(FCE_MARK, PF17MD_01),
+ PINMUX_DATA(LCD_CL1_MARK, PF17MD_10),
+
+ PINMUX_DATA(PF16_DATA, PF16MD_00, PF16_IN, PF16_OUT),
+ PINMUX_DATA(FRB_MARK, PF16MD_01),
+ PINMUX_DATA(LCD_DON_MARK, PF16MD_10),
+
+ PINMUX_DATA(PF15_DATA, PF15MD_00, PF15_IN, PF15_OUT),
+ PINMUX_DATA(NAF7_MARK, PF15MD_01),
+ PINMUX_DATA(LCD_DATA15_MARK, PF15MD_10),
+
+ PINMUX_DATA(PF14_DATA, PF14MD_00, PF14_IN, PF14_OUT),
+ PINMUX_DATA(NAF6_MARK, PF14MD_01),
+ PINMUX_DATA(LCD_DATA14_MARK, PF14MD_10),
+
+ PINMUX_DATA(PF13_DATA, PF13MD_00, PF13_IN, PF13_OUT),
+ PINMUX_DATA(NAF5_MARK, PF13MD_01),
+ PINMUX_DATA(LCD_DATA13_MARK, PF13MD_10),
+
+ PINMUX_DATA(PF12_DATA, PF12MD_00, PF12_IN, PF12_OUT),
+ PINMUX_DATA(NAF4_MARK, PF12MD_01),
+ PINMUX_DATA(LCD_DATA12_MARK, PF12MD_10),
+
+ PINMUX_DATA(PF11_DATA, PF11MD_00, PF11_IN, PF11_OUT),
+ PINMUX_DATA(NAF3_MARK, PF11MD_01),
+ PINMUX_DATA(LCD_DATA11_MARK, PF11MD_10),
+
+ PINMUX_DATA(PF10_DATA, PF10MD_00, PF10_IN, PF10_OUT),
+ PINMUX_DATA(NAF2_MARK, PF10MD_01),
+ PINMUX_DATA(LCD_DATA10_MARK, PF10MD_10),
+
+ PINMUX_DATA(PF9_DATA, PF9MD_00, PF9_IN, PF9_OUT),
+ PINMUX_DATA(NAF1_MARK, PF9MD_01),
+ PINMUX_DATA(LCD_DATA9_MARK, PF9MD_10),
+
+ PINMUX_DATA(PF8_DATA, PF8MD_00, PF8_IN, PF8_OUT),
+ PINMUX_DATA(NAF0_MARK, PF8MD_01),
+ PINMUX_DATA(LCD_DATA8_MARK, PF8MD_10),
+
+ PINMUX_DATA(PF7_DATA, PF7MD_00, PF7_IN, PF7_OUT),
+ PINMUX_DATA(FSC_MARK, PF7MD_01),
+ PINMUX_DATA(LCD_DATA7_MARK, PF7MD_10),
+ PINMUX_DATA(SCS1_PF_MARK, PF7MD_11),
+
+ PINMUX_DATA(PF6_DATA, PF6MD_00, PF6_IN, PF6_OUT),
+ PINMUX_DATA(FOE_MARK, PF6MD_01),
+ PINMUX_DATA(LCD_DATA6_MARK, PF6MD_10),
+ PINMUX_DATA(SSO1_PF_MARK, PF6MD_11),
+
+ PINMUX_DATA(PF5_DATA, PF5MD_00, PF5_IN, PF5_OUT),
+ PINMUX_DATA(FCDE_MARK, PF5MD_01),
+ PINMUX_DATA(LCD_DATA5_MARK, PF5MD_10),
+ PINMUX_DATA(SSI1_PF_MARK, PF5MD_11),
+
+ PINMUX_DATA(PF4_DATA, PF4MD_00, PF4_IN, PF4_OUT),
+ PINMUX_DATA(FWE_MARK, PF4MD_01),
+ PINMUX_DATA(LCD_DATA4_MARK, PF4MD_10),
+ PINMUX_DATA(SSCK1_PF_MARK, PF4MD_11),
+
+ PINMUX_DATA(PF3_DATA, PF3MD_00, PF3_IN, PF3_OUT),
+ PINMUX_DATA(TCLKD_PF_MARK, PF3MD_01),
+ PINMUX_DATA(LCD_DATA3_MARK, PF3MD_10),
+ PINMUX_DATA(SCS0_PF_MARK, PF3MD_11),
+
+ PINMUX_DATA(PF2_DATA, PF2MD_00, PF2_IN, PF2_OUT),
+ PINMUX_DATA(TCLKC_PF_MARK, PF2MD_01),
+ PINMUX_DATA(LCD_DATA2_MARK, PF2MD_10),
+ PINMUX_DATA(SSO0_PF_MARK, PF2MD_11),
+
+ PINMUX_DATA(PF1_DATA, PF1MD_00, PF1_IN, PF1_OUT),
+ PINMUX_DATA(TCLKB_PF_MARK, PF1MD_01),
+ PINMUX_DATA(LCD_DATA1_MARK, PF1MD_10),
+ PINMUX_DATA(SSI0_PF_MARK, PF1MD_11),
+
+ PINMUX_DATA(PF0_DATA, PF0MD_00, PF0_IN, PF0_OUT),
+ PINMUX_DATA(TCLKA_PF_MARK, PF0MD_01),
+ PINMUX_DATA(LCD_DATA0_MARK, PF0MD_10),
+ PINMUX_DATA(SSCK0_PF_MARK, PF0MD_11),
+};
+
+static struct pinmux_gpio pinmux_gpios[] = {
+
+ /* PA */
+ PINMUX_GPIO(GPIO_PA7, PA7_DATA),
+ PINMUX_GPIO(GPIO_PA6, PA6_DATA),
+ PINMUX_GPIO(GPIO_PA5, PA5_DATA),
+ PINMUX_GPIO(GPIO_PA4, PA4_DATA),
+ PINMUX_GPIO(GPIO_PA3, PA3_DATA),
+ PINMUX_GPIO(GPIO_PA2, PA2_DATA),
+ PINMUX_GPIO(GPIO_PA1, PA1_DATA),
+ PINMUX_GPIO(GPIO_PA0, PA0_DATA),
+
+ /* PB */
+ PINMUX_GPIO(GPIO_PB12, PB12_DATA),
+ PINMUX_GPIO(GPIO_PB11, PB11_DATA),
+ PINMUX_GPIO(GPIO_PB10, PB10_DATA),
+ PINMUX_GPIO(GPIO_PB9, PB9_DATA),
+ PINMUX_GPIO(GPIO_PB8, PB8_DATA),
+ PINMUX_GPIO(GPIO_PB7, PB7_DATA),
+ PINMUX_GPIO(GPIO_PB6, PB6_DATA),
+ PINMUX_GPIO(GPIO_PB5, PB5_DATA),
+ PINMUX_GPIO(GPIO_PB4, PB4_DATA),
+ PINMUX_GPIO(GPIO_PB3, PB3_DATA),
+ PINMUX_GPIO(GPIO_PB2, PB2_DATA),
+ PINMUX_GPIO(GPIO_PB1, PB1_DATA),
+ PINMUX_GPIO(GPIO_PB0, PB0_DATA),
+
+ /* PC */
+ PINMUX_GPIO(GPIO_PC14, PC14_DATA),
+ PINMUX_GPIO(GPIO_PC13, PC13_DATA),
+ PINMUX_GPIO(GPIO_PC12, PC12_DATA),
+ PINMUX_GPIO(GPIO_PC11, PC11_DATA),
+ PINMUX_GPIO(GPIO_PC10, PC10_DATA),
+ PINMUX_GPIO(GPIO_PC9, PC9_DATA),
+ PINMUX_GPIO(GPIO_PC8, PC8_DATA),
+ PINMUX_GPIO(GPIO_PC7, PC7_DATA),
+ PINMUX_GPIO(GPIO_PC6, PC6_DATA),
+ PINMUX_GPIO(GPIO_PC5, PC5_DATA),
+ PINMUX_GPIO(GPIO_PC4, PC4_DATA),
+ PINMUX_GPIO(GPIO_PC3, PC3_DATA),
+ PINMUX_GPIO(GPIO_PC2, PC2_DATA),
+ PINMUX_GPIO(GPIO_PC1, PC1_DATA),
+ PINMUX_GPIO(GPIO_PC0, PC0_DATA),
+
+ /* PD */
+ PINMUX_GPIO(GPIO_PD15, PD15_DATA),
+ PINMUX_GPIO(GPIO_PD14, PD14_DATA),
+ PINMUX_GPIO(GPIO_PD13, PD13_DATA),
+ PINMUX_GPIO(GPIO_PD12, PD12_DATA),
+ PINMUX_GPIO(GPIO_PD11, PD11_DATA),
+ PINMUX_GPIO(GPIO_PD10, PD10_DATA),
+ PINMUX_GPIO(GPIO_PD9, PD9_DATA),
+ PINMUX_GPIO(GPIO_PD8, PD8_DATA),
+ PINMUX_GPIO(GPIO_PD7, PD7_DATA),
+ PINMUX_GPIO(GPIO_PD6, PD6_DATA),
+ PINMUX_GPIO(GPIO_PD5, PD5_DATA),
+ PINMUX_GPIO(GPIO_PD4, PD4_DATA),
+ PINMUX_GPIO(GPIO_PD3, PD3_DATA),
+ PINMUX_GPIO(GPIO_PD2, PD2_DATA),
+ PINMUX_GPIO(GPIO_PD1, PD1_DATA),
+ PINMUX_GPIO(GPIO_PD0, PD0_DATA),
+
+ /* PE */
+ PINMUX_GPIO(GPIO_PE15, PE15_DATA),
+ PINMUX_GPIO(GPIO_PE14, PE14_DATA),
+ PINMUX_GPIO(GPIO_PE13, PE13_DATA),
+ PINMUX_GPIO(GPIO_PE12, PE12_DATA),
+ PINMUX_GPIO(GPIO_PE11, PE11_DATA),
+ PINMUX_GPIO(GPIO_PE10, PE10_DATA),
+ PINMUX_GPIO(GPIO_PE9, PE9_DATA),
+ PINMUX_GPIO(GPIO_PE8, PE8_DATA),
+ PINMUX_GPIO(GPIO_PE7, PE7_DATA),
+ PINMUX_GPIO(GPIO_PE6, PE6_DATA),
+ PINMUX_GPIO(GPIO_PE5, PE5_DATA),
+ PINMUX_GPIO(GPIO_PE4, PE4_DATA),
+ PINMUX_GPIO(GPIO_PE3, PE3_DATA),
+ PINMUX_GPIO(GPIO_PE2, PE2_DATA),
+ PINMUX_GPIO(GPIO_PE1, PE1_DATA),
+ PINMUX_GPIO(GPIO_PE0, PE0_DATA),
+
+ /* PF */
+ PINMUX_GPIO(GPIO_PF30, PF30_DATA),
+ PINMUX_GPIO(GPIO_PF29, PF29_DATA),
+ PINMUX_GPIO(GPIO_PF28, PF28_DATA),
+ PINMUX_GPIO(GPIO_PF27, PF27_DATA),
+ PINMUX_GPIO(GPIO_PF26, PF26_DATA),
+ PINMUX_GPIO(GPIO_PF25, PF25_DATA),
+ PINMUX_GPIO(GPIO_PF24, PF24_DATA),
+ PINMUX_GPIO(GPIO_PF23, PF23_DATA),
+ PINMUX_GPIO(GPIO_PF22, PF22_DATA),
+ PINMUX_GPIO(GPIO_PF21, PF21_DATA),
+ PINMUX_GPIO(GPIO_PF20, PF20_DATA),
+ PINMUX_GPIO(GPIO_PF19, PF19_DATA),
+ PINMUX_GPIO(GPIO_PF18, PF18_DATA),
+ PINMUX_GPIO(GPIO_PF17, PF17_DATA),
+ PINMUX_GPIO(GPIO_PF16, PF16_DATA),
+ PINMUX_GPIO(GPIO_PF15, PF15_DATA),
+ PINMUX_GPIO(GPIO_PF14, PF14_DATA),
+ PINMUX_GPIO(GPIO_PF13, PF13_DATA),
+ PINMUX_GPIO(GPIO_PF12, PF12_DATA),
+ PINMUX_GPIO(GPIO_PF11, PF11_DATA),
+ PINMUX_GPIO(GPIO_PF10, PF10_DATA),
+ PINMUX_GPIO(GPIO_PF9, PF9_DATA),
+ PINMUX_GPIO(GPIO_PF8, PF8_DATA),
+ PINMUX_GPIO(GPIO_PF7, PF7_DATA),
+ PINMUX_GPIO(GPIO_PF6, PF6_DATA),
+ PINMUX_GPIO(GPIO_PF5, PF5_DATA),
+ PINMUX_GPIO(GPIO_PF4, PF4_DATA),
+ PINMUX_GPIO(GPIO_PF3, PF3_DATA),
+ PINMUX_GPIO(GPIO_PF2, PF2_DATA),
+ PINMUX_GPIO(GPIO_PF1, PF1_DATA),
+ PINMUX_GPIO(GPIO_PF0, PF0_DATA),
+
+ /* INTC */
+ PINMUX_GPIO(GPIO_FN_PINT7_PB, PINT7_PB_MARK),
+ PINMUX_GPIO(GPIO_FN_PINT6_PB, PINT6_PB_MARK),
+ PINMUX_GPIO(GPIO_FN_PINT5_PB, PINT5_PB_MARK),
+ PINMUX_GPIO(GPIO_FN_PINT4_PB, PINT4_PB_MARK),
+ PINMUX_GPIO(GPIO_FN_PINT3_PB, PINT3_PB_MARK),
+ PINMUX_GPIO(GPIO_FN_PINT2_PB, PINT2_PB_MARK),
+ PINMUX_GPIO(GPIO_FN_PINT1_PB, PINT1_PB_MARK),
+ PINMUX_GPIO(GPIO_FN_PINT0_PB, PINT0_PB_MARK),
+ PINMUX_GPIO(GPIO_FN_PINT7_PD, PINT7_PD_MARK),
+ PINMUX_GPIO(GPIO_FN_PINT6_PD, PINT6_PD_MARK),
+ PINMUX_GPIO(GPIO_FN_PINT5_PD, PINT5_PD_MARK),
+ PINMUX_GPIO(GPIO_FN_PINT4_PD, PINT4_PD_MARK),
+ PINMUX_GPIO(GPIO_FN_PINT3_PD, PINT3_PD_MARK),
+ PINMUX_GPIO(GPIO_FN_PINT2_PD, PINT2_PD_MARK),
+ PINMUX_GPIO(GPIO_FN_PINT1_PD, PINT1_PD_MARK),
+ PINMUX_GPIO(GPIO_FN_PINT0_PD, PINT0_PD_MARK),
+ PINMUX_GPIO(GPIO_FN_IRQ7_PB, IRQ7_PB_MARK),
+ PINMUX_GPIO(GPIO_FN_IRQ6_PB, IRQ6_PB_MARK),
+ PINMUX_GPIO(GPIO_FN_IRQ5_PB, IRQ5_PB_MARK),
+ PINMUX_GPIO(GPIO_FN_IRQ4_PB, IRQ4_PB_MARK),
+ PINMUX_GPIO(GPIO_FN_IRQ3_PB, IRQ3_PB_MARK),
+ PINMUX_GPIO(GPIO_FN_IRQ2_PB, IRQ2_PB_MARK),
+ PINMUX_GPIO(GPIO_FN_IRQ1_PB, IRQ1_PB_MARK),
+ PINMUX_GPIO(GPIO_FN_IRQ0_PB, IRQ0_PB_MARK),
+ PINMUX_GPIO(GPIO_FN_IRQ7_PD, IRQ7_PD_MARK),
+ PINMUX_GPIO(GPIO_FN_IRQ6_PD, IRQ6_PD_MARK),
+ PINMUX_GPIO(GPIO_FN_IRQ5_PD, IRQ5_PD_MARK),
+ PINMUX_GPIO(GPIO_FN_IRQ4_PD, IRQ4_PD_MARK),
+ PINMUX_GPIO(GPIO_FN_IRQ3_PD, IRQ3_PD_MARK),
+ PINMUX_GPIO(GPIO_FN_IRQ2_PD, IRQ2_PD_MARK),
+ PINMUX_GPIO(GPIO_FN_IRQ1_PD, IRQ1_PD_MARK),
+ PINMUX_GPIO(GPIO_FN_IRQ0_PD, IRQ0_PD_MARK),
+ PINMUX_GPIO(GPIO_FN_IRQ7_PE, IRQ7_PE_MARK),
+ PINMUX_GPIO(GPIO_FN_IRQ6_PE, IRQ6_PE_MARK),
+ PINMUX_GPIO(GPIO_FN_IRQ5_PE, IRQ5_PE_MARK),
+ PINMUX_GPIO(GPIO_FN_IRQ4_PE, IRQ4_PE_MARK),
+ PINMUX_GPIO(GPIO_FN_IRQ3_PE, IRQ3_PE_MARK),
+ PINMUX_GPIO(GPIO_FN_IRQ2_PE, IRQ2_PE_MARK),
+ PINMUX_GPIO(GPIO_FN_IRQ1_PE, IRQ1_PE_MARK),
+ PINMUX_GPIO(GPIO_FN_IRQ0_PE, IRQ0_PE_MARK),
+
+ PINMUX_GPIO(GPIO_FN_WDTOVF, WDTOVF_MARK),
+ PINMUX_GPIO(GPIO_FN_IRQOUT, IRQOUT_MARK),
+ PINMUX_GPIO(GPIO_FN_REFOUT, REFOUT_MARK),
+ PINMUX_GPIO(GPIO_FN_IRQOUT_REFOUT, IRQOUT_REFOUT_MARK),
+ PINMUX_GPIO(GPIO_FN_UBCTRG, UBCTRG_MARK),
+
+ /* CAN */
+ PINMUX_GPIO(GPIO_FN_CTX1, CTX1_MARK),
+ PINMUX_GPIO(GPIO_FN_CRX1, CRX1_MARK),
+ PINMUX_GPIO(GPIO_FN_CTX0, CTX0_MARK),
+ PINMUX_GPIO(GPIO_FN_CTX0_CTX1, CTX0_CTX1_MARK),
+ PINMUX_GPIO(GPIO_FN_CRX0, CRX0_MARK),
+ PINMUX_GPIO(GPIO_FN_CRX0_CRX1, CRX0_CRX1_MARK),
+
+ /* IIC3 */
+ PINMUX_GPIO(GPIO_FN_SDA3, SDA3_MARK),
+ PINMUX_GPIO(GPIO_FN_SCL3, SCL3_MARK),
+ PINMUX_GPIO(GPIO_FN_SDA2, SDA2_MARK),
+ PINMUX_GPIO(GPIO_FN_SCL2, SCL2_MARK),
+ PINMUX_GPIO(GPIO_FN_SDA1, SDA1_MARK),
+ PINMUX_GPIO(GPIO_FN_SCL1, SCL1_MARK),
+ PINMUX_GPIO(GPIO_FN_SDA0, SDA0_MARK),
+ PINMUX_GPIO(GPIO_FN_SCL0, SCL0_MARK),
+
+ /* DMAC */
+ PINMUX_GPIO(GPIO_FN_TEND0_PD, TEND0_PD_MARK),
+ PINMUX_GPIO(GPIO_FN_TEND0_PE, TEND0_PE_MARK),
+ PINMUX_GPIO(GPIO_FN_DACK0_PD, DACK0_PD_MARK),
+ PINMUX_GPIO(GPIO_FN_DACK0_PE, DACK0_PE_MARK),
+ PINMUX_GPIO(GPIO_FN_DREQ0_PD, DREQ0_PD_MARK),
+ PINMUX_GPIO(GPIO_FN_DREQ0_PE, DREQ0_PE_MARK),
+ PINMUX_GPIO(GPIO_FN_TEND1_PD, TEND1_PD_MARK),
+ PINMUX_GPIO(GPIO_FN_TEND1_PE, TEND1_PE_MARK),
+ PINMUX_GPIO(GPIO_FN_DACK1_PD, DACK1_PD_MARK),
+ PINMUX_GPIO(GPIO_FN_DACK1_PE, DACK1_PE_MARK),
+ PINMUX_GPIO(GPIO_FN_DREQ1_PD, DREQ1_PD_MARK),
+ PINMUX_GPIO(GPIO_FN_DREQ1_PE, DREQ1_PE_MARK),
+ PINMUX_GPIO(GPIO_FN_DACK2, DACK2_MARK),
+ PINMUX_GPIO(GPIO_FN_DREQ2, DREQ2_MARK),
+ PINMUX_GPIO(GPIO_FN_DACK3, DACK3_MARK),
+ PINMUX_GPIO(GPIO_FN_DREQ3, DREQ3_MARK),
+
+ /* ADC */
+ PINMUX_GPIO(GPIO_FN_ADTRG_PD, ADTRG_PD_MARK),
+ PINMUX_GPIO(GPIO_FN_ADTRG_PE, ADTRG_PE_MARK),
+
+ /* BSC */
+ PINMUX_GPIO(GPIO_FN_D31, D31_MARK),
+ PINMUX_GPIO(GPIO_FN_D30, D30_MARK),
+ PINMUX_GPIO(GPIO_FN_D29, D29_MARK),
+ PINMUX_GPIO(GPIO_FN_D28, D28_MARK),
+ PINMUX_GPIO(GPIO_FN_D27, D27_MARK),
+ PINMUX_GPIO(GPIO_FN_D26, D26_MARK),
+ PINMUX_GPIO(GPIO_FN_D25, D25_MARK),
+ PINMUX_GPIO(GPIO_FN_D24, D24_MARK),
+ PINMUX_GPIO(GPIO_FN_D23, D23_MARK),
+ PINMUX_GPIO(GPIO_FN_D22, D22_MARK),
+ PINMUX_GPIO(GPIO_FN_D21, D21_MARK),
+ PINMUX_GPIO(GPIO_FN_D20, D20_MARK),
+ PINMUX_GPIO(GPIO_FN_D19, D19_MARK),
+ PINMUX_GPIO(GPIO_FN_D18, D18_MARK),
+ PINMUX_GPIO(GPIO_FN_D17, D17_MARK),
+ PINMUX_GPIO(GPIO_FN_D16, D16_MARK),
+ PINMUX_GPIO(GPIO_FN_A25, A25_MARK),
+ PINMUX_GPIO(GPIO_FN_A24, A24_MARK),
+ PINMUX_GPIO(GPIO_FN_A23, A23_MARK),
+ PINMUX_GPIO(GPIO_FN_A22, A22_MARK),
+ PINMUX_GPIO(GPIO_FN_A21, A21_MARK),
+ PINMUX_GPIO(GPIO_FN_CS4, CS4_MARK),
+ PINMUX_GPIO(GPIO_FN_MRES, MRES_MARK),
+ PINMUX_GPIO(GPIO_FN_BS, BS_MARK),
+ PINMUX_GPIO(GPIO_FN_IOIS16, IOIS16_MARK),
+ PINMUX_GPIO(GPIO_FN_CS1, CS1_MARK),
+ PINMUX_GPIO(GPIO_FN_CS6_CE1B, CS6_CE1B_MARK),
+ PINMUX_GPIO(GPIO_FN_CE2B, CE2B_MARK),
+ PINMUX_GPIO(GPIO_FN_CS5_CE1A, CS5_CE1A_MARK),
+ PINMUX_GPIO(GPIO_FN_CE2A, CE2A_MARK),
+ PINMUX_GPIO(GPIO_FN_FRAME, FRAME_MARK),
+ PINMUX_GPIO(GPIO_FN_WAIT, WAIT_MARK),
+ PINMUX_GPIO(GPIO_FN_RDWR, RDWR_MARK),
+ PINMUX_GPIO(GPIO_FN_CKE, CKE_MARK),
+ PINMUX_GPIO(GPIO_FN_CASU, CASU_MARK),
+ PINMUX_GPIO(GPIO_FN_BREQ, BREQ_MARK),
+ PINMUX_GPIO(GPIO_FN_RASU, RASU_MARK),
+ PINMUX_GPIO(GPIO_FN_BACK, BACK_MARK),
+ PINMUX_GPIO(GPIO_FN_CASL, CASL_MARK),
+ PINMUX_GPIO(GPIO_FN_RASL, RASL_MARK),
+ PINMUX_GPIO(GPIO_FN_WE3_DQMUU_AH_ICIO_WR, WE3_DQMUU_AH_ICIO_WR_MARK),
+ PINMUX_GPIO(GPIO_FN_WE2_DQMUL_ICIORD, WE2_DQMUL_ICIORD_MARK),
+ PINMUX_GPIO(GPIO_FN_WE1_DQMLU_WE, WE1_DQMLU_WE_MARK),
+ PINMUX_GPIO(GPIO_FN_WE0_DQMLL, WE0_DQMLL_MARK),
+ PINMUX_GPIO(GPIO_FN_CS3, CS3_MARK),
+ PINMUX_GPIO(GPIO_FN_CS2, CS2_MARK),
+ PINMUX_GPIO(GPIO_FN_A1, A1_MARK),
+ PINMUX_GPIO(GPIO_FN_A0, A0_MARK),
+ PINMUX_GPIO(GPIO_FN_CS7, CS7_MARK),
+
+ /* TMU */
+ PINMUX_GPIO(GPIO_FN_TIOC4D, TIOC4D_MARK),
+ PINMUX_GPIO(GPIO_FN_TIOC4C, TIOC4C_MARK),
+ PINMUX_GPIO(GPIO_FN_TIOC4B, TIOC4B_MARK),
+ PINMUX_GPIO(GPIO_FN_TIOC4A, TIOC4A_MARK),
+ PINMUX_GPIO(GPIO_FN_TIOC3D, TIOC3D_MARK),
+ PINMUX_GPIO(GPIO_FN_TIOC3C, TIOC3C_MARK),
+ PINMUX_GPIO(GPIO_FN_TIOC3B, TIOC3B_MARK),
+ PINMUX_GPIO(GPIO_FN_TIOC3A, TIOC3A_MARK),
+ PINMUX_GPIO(GPIO_FN_TIOC2B, TIOC2B_MARK),
+ PINMUX_GPIO(GPIO_FN_TIOC1B, TIOC1B_MARK),
+ PINMUX_GPIO(GPIO_FN_TIOC2A, TIOC2A_MARK),
+ PINMUX_GPIO(GPIO_FN_TIOC1A, TIOC1A_MARK),
+ PINMUX_GPIO(GPIO_FN_TIOC0D, TIOC0D_MARK),
+ PINMUX_GPIO(GPIO_FN_TIOC0C, TIOC0C_MARK),
+ PINMUX_GPIO(GPIO_FN_TIOC0B, TIOC0B_MARK),
+ PINMUX_GPIO(GPIO_FN_TIOC0A, TIOC0A_MARK),
+ PINMUX_GPIO(GPIO_FN_TCLKD_PD, TCLKD_PD_MARK),
+ PINMUX_GPIO(GPIO_FN_TCLKC_PD, TCLKC_PD_MARK),
+ PINMUX_GPIO(GPIO_FN_TCLKB_PD, TCLKB_PD_MARK),
+ PINMUX_GPIO(GPIO_FN_TCLKA_PD, TCLKA_PD_MARK),
+ PINMUX_GPIO(GPIO_FN_TCLKD_PF, TCLKD_PF_MARK),
+ PINMUX_GPIO(GPIO_FN_TCLKC_PF, TCLKC_PF_MARK),
+ PINMUX_GPIO(GPIO_FN_TCLKB_PF, TCLKB_PF_MARK),
+ PINMUX_GPIO(GPIO_FN_TCLKA_PF, TCLKA_PF_MARK),
+
+ /* SSU */
+ PINMUX_GPIO(GPIO_FN_SCS0_PD, SCS0_PD_MARK),
+ PINMUX_GPIO(GPIO_FN_SSO0_PD, SSO0_PD_MARK),
+ PINMUX_GPIO(GPIO_FN_SSI0_PD, SSI0_PD_MARK),
+ PINMUX_GPIO(GPIO_FN_SSCK0_PD, SSCK0_PD_MARK),
+ PINMUX_GPIO(GPIO_FN_SCS0_PF, SCS0_PF_MARK),
+ PINMUX_GPIO(GPIO_FN_SSO0_PF, SSO0_PF_MARK),
+ PINMUX_GPIO(GPIO_FN_SSI0_PF, SSI0_PF_MARK),
+ PINMUX_GPIO(GPIO_FN_SSCK0_PF, SSCK0_PF_MARK),
+ PINMUX_GPIO(GPIO_FN_SCS1_PD, SCS1_PD_MARK),
+ PINMUX_GPIO(GPIO_FN_SSO1_PD, SSO1_PD_MARK),
+ PINMUX_GPIO(GPIO_FN_SSI1_PD, SSI1_PD_MARK),
+ PINMUX_GPIO(GPIO_FN_SSCK1_PD, SSCK1_PD_MARK),
+ PINMUX_GPIO(GPIO_FN_SCS1_PF, SCS1_PF_MARK),
+ PINMUX_GPIO(GPIO_FN_SSO1_PF, SSO1_PF_MARK),
+ PINMUX_GPIO(GPIO_FN_SSI1_PF, SSI1_PF_MARK),
+ PINMUX_GPIO(GPIO_FN_SSCK1_PF, SSCK1_PF_MARK),
+
+ /* SCIF */
+ PINMUX_GPIO(GPIO_FN_TXD0, TXD0_MARK),
+ PINMUX_GPIO(GPIO_FN_RXD0, RXD0_MARK),
+ PINMUX_GPIO(GPIO_FN_SCK0, SCK0_MARK),
+ PINMUX_GPIO(GPIO_FN_TXD1, TXD1_MARK),
+ PINMUX_GPIO(GPIO_FN_RXD1, RXD1_MARK),
+ PINMUX_GPIO(GPIO_FN_SCK1, SCK1_MARK),
+ PINMUX_GPIO(GPIO_FN_TXD2, TXD2_MARK),
+ PINMUX_GPIO(GPIO_FN_RXD2, RXD2_MARK),
+ PINMUX_GPIO(GPIO_FN_SCK2, SCK2_MARK),
+ PINMUX_GPIO(GPIO_FN_RTS3, RTS3_MARK),
+ PINMUX_GPIO(GPIO_FN_CTS3, CTS3_MARK),
+ PINMUX_GPIO(GPIO_FN_TXD3, TXD3_MARK),
+ PINMUX_GPIO(GPIO_FN_RXD3, RXD3_MARK),
+ PINMUX_GPIO(GPIO_FN_SCK3, SCK3_MARK),
+
+ /* SSI */
+ PINMUX_GPIO(GPIO_FN_AUDIO_CLK, AUDIO_CLK_MARK),
+ PINMUX_GPIO(GPIO_FN_SSIDATA3, SSIDATA3_MARK),
+ PINMUX_GPIO(GPIO_FN_SSIWS3, SSIWS3_MARK),
+ PINMUX_GPIO(GPIO_FN_SSISCK3, SSISCK3_MARK),
+ PINMUX_GPIO(GPIO_FN_SSIDATA2, SSIDATA2_MARK),
+ PINMUX_GPIO(GPIO_FN_SSIWS2, SSIWS2_MARK),
+ PINMUX_GPIO(GPIO_FN_SSISCK2, SSISCK2_MARK),
+ PINMUX_GPIO(GPIO_FN_SSIDATA1, SSIDATA1_MARK),
+ PINMUX_GPIO(GPIO_FN_SSIWS1, SSIWS1_MARK),
+ PINMUX_GPIO(GPIO_FN_SSISCK1, SSISCK1_MARK),
+ PINMUX_GPIO(GPIO_FN_SSIDATA0, SSIDATA0_MARK),
+ PINMUX_GPIO(GPIO_FN_SSIWS0, SSIWS0_MARK),
+ PINMUX_GPIO(GPIO_FN_SSISCK0, SSISCK0_MARK),
+
+ /* FLCTL */
+ PINMUX_GPIO(GPIO_FN_FCE, FCE_MARK),
+ PINMUX_GPIO(GPIO_FN_FRB, FRB_MARK),
+ PINMUX_GPIO(GPIO_FN_NAF7, NAF7_MARK),
+ PINMUX_GPIO(GPIO_FN_NAF6, NAF6_MARK),
+ PINMUX_GPIO(GPIO_FN_NAF5, NAF5_MARK),
+ PINMUX_GPIO(GPIO_FN_NAF4, NAF4_MARK),
+ PINMUX_GPIO(GPIO_FN_NAF3, NAF3_MARK),
+ PINMUX_GPIO(GPIO_FN_NAF2, NAF2_MARK),
+ PINMUX_GPIO(GPIO_FN_NAF1, NAF1_MARK),
+ PINMUX_GPIO(GPIO_FN_NAF0, NAF0_MARK),
+ PINMUX_GPIO(GPIO_FN_FSC, FSC_MARK),
+ PINMUX_GPIO(GPIO_FN_FOE, FOE_MARK),
+ PINMUX_GPIO(GPIO_FN_FCDE, FCDE_MARK),
+ PINMUX_GPIO(GPIO_FN_FWE, FWE_MARK),
+
+ /* LCDC */
+ PINMUX_GPIO(GPIO_FN_LCD_VEPWC, LCD_VEPWC_MARK),
+ PINMUX_GPIO(GPIO_FN_LCD_VCPWC, LCD_VCPWC_MARK),
+ PINMUX_GPIO(GPIO_FN_LCD_CLK, LCD_CLK_MARK),
+ PINMUX_GPIO(GPIO_FN_LCD_FLM, LCD_FLM_MARK),
+ PINMUX_GPIO(GPIO_FN_LCD_M_DISP, LCD_M_DISP_MARK),
+ PINMUX_GPIO(GPIO_FN_LCD_CL2, LCD_CL2_MARK),
+ PINMUX_GPIO(GPIO_FN_LCD_CL1, LCD_CL1_MARK),
+ PINMUX_GPIO(GPIO_FN_LCD_DON, LCD_DON_MARK),
+ PINMUX_GPIO(GPIO_FN_LCD_DATA15, LCD_DATA15_MARK),
+ PINMUX_GPIO(GPIO_FN_LCD_DATA14, LCD_DATA14_MARK),
+ PINMUX_GPIO(GPIO_FN_LCD_DATA13, LCD_DATA13_MARK),
+ PINMUX_GPIO(GPIO_FN_LCD_DATA12, LCD_DATA12_MARK),
+ PINMUX_GPIO(GPIO_FN_LCD_DATA11, LCD_DATA11_MARK),
+ PINMUX_GPIO(GPIO_FN_LCD_DATA10, LCD_DATA10_MARK),
+ PINMUX_GPIO(GPIO_FN_LCD_DATA9, LCD_DATA9_MARK),
+ PINMUX_GPIO(GPIO_FN_LCD_DATA8, LCD_DATA8_MARK),
+ PINMUX_GPIO(GPIO_FN_LCD_DATA7, LCD_DATA7_MARK),
+ PINMUX_GPIO(GPIO_FN_LCD_DATA6, LCD_DATA6_MARK),
+ PINMUX_GPIO(GPIO_FN_LCD_DATA5, LCD_DATA5_MARK),
+ PINMUX_GPIO(GPIO_FN_LCD_DATA4, LCD_DATA4_MARK),
+ PINMUX_GPIO(GPIO_FN_LCD_DATA3, LCD_DATA3_MARK),
+ PINMUX_GPIO(GPIO_FN_LCD_DATA2, LCD_DATA2_MARK),
+ PINMUX_GPIO(GPIO_FN_LCD_DATA1, LCD_DATA1_MARK),
+ PINMUX_GPIO(GPIO_FN_LCD_DATA0, LCD_DATA0_MARK),
+};
+
+static struct pinmux_cfg_reg pinmux_config_regs[] = {
+ { PINMUX_CFG_REG("PBIORL", 0xfffe3886, 16, 1) {
+ 0, 0,
+ 0, 0,
+ 0, 0,
+ 0, 0,
+ PB11_IN, PB11_OUT,
+ PB10_IN, PB10_OUT,
+ PB9_IN, PB9_OUT,
+ PB8_IN, PB8_OUT,
+ 0, 0,
+ 0, 0,
+ 0, 0,
+ 0, 0,
+ 0, 0,
+ 0, 0,
+ 0, 0,
+ 0, 0 }
+ },
+ { PINMUX_CFG_REG("PBCRL4", 0xfffe3890, 16, 4) {
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+
+ PB12MD_00, PB12MD_01, PB12MD_10, PB12MD_11,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 }
+ },
+ { PINMUX_CFG_REG("PBCRL3", 0xfffe3892, 16, 4) {
+ PB11MD_0, PB11MD_1,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+
+ PB10MD_0, PB10MD_1,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+
+ PB9MD_00, PB9MD_01, PB9MD_10, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+
+ PB8MD_00, PB8MD_01, PB8MD_10, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 }
+ },
+ { PINMUX_CFG_REG("PBCRL2", 0xfffe3894, 16, 4) {
+ PB7MD_00, PB7MD_01, PB7MD_10, PB7MD_11,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+
+ PB6MD_00, PB6MD_01, PB6MD_10, PB6MD_11,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+
+ PB5MD_00, PB5MD_01, PB5MD_10, PB5MD_11,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+
+ PB4MD_00, PB4MD_01, PB4MD_10, PB4MD_11,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 }
+ },
+ { PINMUX_CFG_REG("PBCRL1", 0xfffe3896, 16, 4) {
+ PB3MD_00, PB3MD_01, PB3MD_10, PB3MD_11,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+
+ PB2MD_00, PB2MD_01, PB2MD_10, PB2MD_11,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+
+ PB1MD_00, PB1MD_01, PB1MD_10, PB1MD_11,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+
+ PB0MD_00, PB0MD_01, PB0MD_10, PB0MD_11,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 }
+ },
+ { PINMUX_CFG_REG("IFCR", 0xfffe38a2, 16, 4) {
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+
+ PB12IRQ_00, PB12IRQ_01, PB12IRQ_10, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 }
+ },
+ { PINMUX_CFG_REG("PCIORL", 0xfffe3906, 16, 1) {
+ 0, 0,
+ PC14_IN, PC14_OUT,
+ PC13_IN, PC13_OUT,
+ PC12_IN, PC12_OUT,
+ PC11_IN, PC11_OUT,
+ PC10_IN, PC10_OUT,
+ PC9_IN, PC9_OUT,
+ PC8_IN, PC8_OUT,
+ PC7_IN, PC7_OUT,
+ PC6_IN, PC6_OUT,
+ PC5_IN, PC5_OUT,
+ PC4_IN, PC4_OUT,
+ PC3_IN, PC3_OUT,
+ PC2_IN, PC2_OUT,
+ PC1_IN, PC1_OUT,
+ PC0_IN, PC0_OUT }
+ },
+ { PINMUX_CFG_REG("PCCRL4", 0xfffe3910, 16, 4) {
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+
+ PC14MD_0, PC14MD_1,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+
+ PC13MD_0, PC13MD_1,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+
+ PC12MD_0, PC12MD_1,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 }
+ },
+ { PINMUX_CFG_REG("PCCRL3", 0xfffe3912, 16, 4) {
+ PC11MD_00, PC11MD_01, PC11MD_10, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+
+ PC10MD_00, PC10MD_01, PC10MD_10, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+
+ PC9MD_0, PC9MD_1,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+
+ PC8MD_0, PC8MD_1,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 }
+ },
+ { PINMUX_CFG_REG("PCCRL2", 0xfffe3914, 16, 4) {
+ PC7MD_0, PC7MD_1,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+
+ PC6MD_0, PC6MD_1,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+
+ PC5MD_0, PC5MD_1,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+
+ PC4MD_0, PC4MD_1,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 }
+ },
+ { PINMUX_CFG_REG("PCCRL1", 0xfffe3916, 16, 4) {
+ PC3MD_0, PC3MD_1,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+
+ PC2MD_0, PC2MD_1,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+
+ PC1MD_0, PC1MD_1,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+
+ PC0MD_00, PC0MD_01, PC0MD_10, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 }
+ },
+ { PINMUX_CFG_REG("PDIORL", 0xfffe3986, 16, 1) {
+ PD15_IN, PD15_OUT,
+ PD14_IN, PD14_OUT,
+ PD13_IN, PD13_OUT,
+ PD12_IN, PD12_OUT,
+ PD11_IN, PD11_OUT,
+ PD10_IN, PD10_OUT,
+ PD9_IN, PD9_OUT,
+ PD8_IN, PD8_OUT,
+ PD7_IN, PD7_OUT,
+ PD6_IN, PD6_OUT,
+ PD5_IN, PD5_OUT,
+ PD4_IN, PD4_OUT,
+ PD3_IN, PD3_OUT,
+ PD2_IN, PD2_OUT,
+ PD1_IN, PD1_OUT,
+ PD0_IN, PD0_OUT }
+ },
+ { PINMUX_CFG_REG("PDCRL4", 0xfffe3990, 16, 4) {
+ PD15MD_000, PD15MD_001, PD15MD_010, 0,
+ PD15MD_100, PD15MD_101, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0,
+
+ PD14MD_000, PD14MD_001, PD14MD_010, 0,
+ 0, PD14MD_101, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0,
+
+ PD13MD_000, PD13MD_001, PD13MD_010, 0,
+ PD13MD_100, PD13MD_101, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0,
+
+ PD12MD_000, PD12MD_001, PD12MD_010, 0,
+ PD12MD_100, PD12MD_101, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0 }
+ },
+ { PINMUX_CFG_REG("PDCRL3", 0xfffe3992, 16, 4) {
+ PD11MD_000, PD11MD_001, PD11MD_010, 0,
+ PD11MD_100, PD11MD_101, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0,
+
+ PD10MD_000, PD10MD_001, PD10MD_010, 0,
+ PD10MD_100, PD10MD_101, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0,
+
+ PD9MD_000, PD9MD_001, PD9MD_010, 0,
+ PD9MD_100, PD9MD_101, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0,
+
+ PD8MD_000, PD8MD_001, PD8MD_010, 0,
+ PD8MD_100, PD8MD_101, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0 }
+ },
+ { PINMUX_CFG_REG("PDCRL2", 0xfffe3994, 16, 4) {
+ PD7MD_000, PD7MD_001, PD7MD_010, PD7MD_011,
+ PD7MD_100, PD7MD_101, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0,
+
+ PD6MD_000, PD6MD_001, PD6MD_010, PD6MD_011,
+ PD6MD_100, PD6MD_101, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0,
+
+ PD5MD_000, PD5MD_001, PD5MD_010, PD5MD_011,
+ PD5MD_100, PD5MD_101, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0,
+
+ PD4MD_000, PD4MD_001, PD4MD_010, PD4MD_011,
+ PD4MD_100, PD4MD_101, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0 }
+ },
+ { PINMUX_CFG_REG("PDCRL1", 0xfffe3996, 16, 4) {
+ PD3MD_000, PD3MD_001, PD3MD_010, PD3MD_011,
+ PD3MD_100, PD3MD_101, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0,
+
+ PD2MD_000, PD2MD_001, PD2MD_010, PD2MD_011,
+ PD2MD_100, PD2MD_101, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0,
+
+ PD1MD_000, PD1MD_001, PD1MD_010, PD1MD_011,
+ PD1MD_100, PD1MD_101, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0,
+
+ PD0MD_000, PD0MD_001, PD0MD_010, PD0MD_011,
+ PD0MD_100, PD0MD_101, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0 }
+ },
+ { PINMUX_CFG_REG("PEIORL", 0xfffe3a06, 16, 1) {
+ PE15_IN, PE15_OUT,
+ PE14_IN, PE14_OUT,
+ PE13_IN, PE13_OUT,
+ PE12_IN, PE12_OUT,
+ PE11_IN, PE11_OUT,
+ PE10_IN, PE10_OUT,
+ PE9_IN, PE9_OUT,
+ PE8_IN, PE8_OUT,
+ PE7_IN, PE7_OUT,
+ PE6_IN, PE6_OUT,
+ PE5_IN, PE5_OUT,
+ PE4_IN, PE4_OUT,
+ PE3_IN, PE3_OUT,
+ PE2_IN, PE2_OUT,
+ PE1_IN, PE1_OUT,
+ PE0_IN, PE0_OUT }
+ },
+ { PINMUX_CFG_REG("PECRL4", 0xfffe3a10, 16, 4) {
+ PE15MD_00, PE15MD_01, 0, PE15MD_11,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+
+ PE14MD_00, PE14MD_01, 0, PE14MD_11,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+
+ PE13MD_00, 0, 0, PE13MD_11,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+
+ PE12MD_00, 0, 0, PE12MD_11,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 }
+ },
+ { PINMUX_CFG_REG("PECRL3", 0xfffe3a12, 16, 4) {
+ PE11MD_000, PE11MD_001, PE11MD_010, 0,
+ PE11MD_100, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0,
+
+ PE10MD_000, PE10MD_001, PE10MD_010, 0,
+ PE10MD_100, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0,
+
+ PE9MD_00, PE9MD_01, PE9MD_10, PE9MD_11,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+
+ PE8MD_00, PE8MD_01, PE8MD_10, PE8MD_11,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 }
+ },
+ { PINMUX_CFG_REG("PECRL2", 0xfffe3a14, 16, 4) {
+ PE7MD_000, PE7MD_001, PE7MD_010, PE7MD_011,
+ PE7MD_100, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0,
+
+ PE6MD_000, PE6MD_001, PE6MD_010, PE6MD_011,
+ PE6MD_100, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0,
+
+ PE5MD_000, PE5MD_001, PE5MD_010, PE5MD_011,
+ PE5MD_100, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0,
+
+ PE4MD_000, PE4MD_001, PE4MD_010, PE4MD_011,
+ PE4MD_100, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0 }
+ },
+ { PINMUX_CFG_REG("PECRL1", 0xfffe3a16, 16, 4) {
+ PE3MD_00, PE3MD_01, 0, PE3MD_11,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+
+ PE2MD_00, PE2MD_01, 0, PE2MD_11,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+
+ PE1MD_00, PE1MD_01, PE1MD_10, PE1MD_11,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+
+ PE0MD_000, PE0MD_001, 0, PE0MD_011,
+ PE0MD_100, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0 }
+ },
+ { PINMUX_CFG_REG("PFIORH", 0xfffe3a84, 16, 1) {
+ 0, 0,
+ PF30_IN, PF30_OUT,
+ PF29_IN, PF29_OUT,
+ PF28_IN, PF28_OUT,
+ PF27_IN, PF27_OUT,
+ PF26_IN, PF26_OUT,
+ PF25_IN, PF25_OUT,
+ PF24_IN, PF24_OUT,
+ PF23_IN, PF23_OUT,
+ PF22_IN, PF22_OUT,
+ PF21_IN, PF21_OUT,
+ PF20_IN, PF20_OUT,
+ PF19_IN, PF19_OUT,
+ PF18_IN, PF18_OUT,
+ PF17_IN, PF17_OUT,
+ PF16_IN, PF16_OUT }
+ },
+ { PINMUX_CFG_REG("PFIORL", 0xfffe3a86, 16, 1) {
+ PF15_IN, PF15_OUT,
+ PF14_IN, PF14_OUT,
+ PF13_IN, PF13_OUT,
+ PF12_IN, PF12_OUT,
+ PF11_IN, PF11_OUT,
+ PF10_IN, PF10_OUT,
+ PF9_IN, PF9_OUT,
+ PF8_IN, PF8_OUT,
+ PF7_IN, PF7_OUT,
+ PF6_IN, PF6_OUT,
+ PF5_IN, PF5_OUT,
+ PF4_IN, PF4_OUT,
+ PF3_IN, PF3_OUT,
+ PF2_IN, PF2_OUT,
+ PF1_IN, PF1_OUT,
+ PF0_IN, PF0_OUT }
+ },
+ { PINMUX_CFG_REG("PFCRH4", 0xfffe3a88, 16, 4) {
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+
+ PF30MD_0, PF30MD_1,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+
+ PF29MD_0, PF29MD_1,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+
+ PF28MD_0, PF28MD_1,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 }
+ },
+ { PINMUX_CFG_REG("PFCRH3", 0xfffe3a8a, 16, 4) {
+ PF27MD_0, PF27MD_1,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+
+ PF26MD_0, PF26MD_1,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+
+ PF25MD_0, PF25MD_1,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+
+ PF24MD_0, PF24MD_1,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 }
+ },
+ { PINMUX_CFG_REG("PFCRH2", 0xfffe3a8c, 16, 4) {
+ PF23MD_00, PF23MD_01, PF23MD_10, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+
+ PF22MD_00, PF22MD_01, PF22MD_10, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+
+ PF21MD_00, PF21MD_01, PF21MD_10, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+
+ PF20MD_00, PF20MD_01, PF20MD_10, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 }
+ },
+ { PINMUX_CFG_REG("PFCRH1", 0xfffe3a8e, 16, 4) {
+ PF19MD_00, PF19MD_01, PF19MD_10, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+
+ PF18MD_00, PF18MD_01, PF18MD_10, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+
+ PF17MD_00, PF17MD_01, PF17MD_10, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+
+ PF16MD_00, PF16MD_01, PF16MD_10, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 }
+ },
+ { PINMUX_CFG_REG("PFCRL4", 0xfffe3a90, 16, 4) {
+ PF15MD_00, PF15MD_01, PF15MD_10, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+
+ PF14MD_00, PF14MD_01, PF14MD_10, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+
+ PF13MD_00, PF13MD_01, PF13MD_10, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+
+ PF12MD_00, PF12MD_01, PF12MD_10, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 }
+ },
+ { PINMUX_CFG_REG("PFCRL3", 0xfffe3a92, 16, 4) {
+ PF11MD_00, PF11MD_01, PF11MD_10, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+
+ PF10MD_00, PF10MD_01, PF10MD_10, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+
+ PF9MD_00, PF9MD_01, PF9MD_10, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+
+ PF8MD_00, PF8MD_01, PF8MD_10, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 }
+ },
+ { PINMUX_CFG_REG("PFCRL2", 0xfffe3a94, 16, 4) {
+ PF7MD_00, PF7MD_01, PF7MD_10, PF7MD_11,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+
+ PF6MD_00, PF6MD_01, PF6MD_10, PF6MD_11,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+
+ PF5MD_00, PF5MD_01, PF5MD_10, PF5MD_11,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+
+ PF4MD_00, PF4MD_01, PF4MD_10, PF4MD_11,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 }
+ },
+ { PINMUX_CFG_REG("PFCRL1", 0xfffe3a96, 16, 4) {
+ PF3MD_00, PF3MD_01, PF3MD_10, PF3MD_11,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+
+ PF2MD_00, PF2MD_01, PF2MD_10, PF2MD_11,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+
+ PF1MD_00, PF1MD_01, PF1MD_10, PF1MD_11,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+
+ PF0MD_00, PF0MD_01, PF0MD_10, PF0MD_11,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 }
+ },
+ {}
+};
+
+static struct pinmux_data_reg pinmux_data_regs[] = {
+ { PINMUX_DATA_REG("PADRL", 0xfffe3802, 16) {
+ 0, 0, 0, 0,
+ 0, 0, 0, 0,
+ PA7_DATA, PA6_DATA, PA5_DATA, PA4_DATA,
+ PA3_DATA, PA2_DATA, PA1_DATA, PA0_DATA }
+ },
+ { PINMUX_DATA_REG("PBDRL", 0xfffe3882, 16) {
+ 0, 0, 0, PB12_DATA,
+ PB11_DATA, PB10_DATA, PB9_DATA, PB8_DATA,
+ PB7_DATA, PB6_DATA, PB5_DATA, PB4_DATA,
+ PB3_DATA, PB2_DATA, PB1_DATA, PB0_DATA }
+ },
+ { PINMUX_DATA_REG("PCDRL", 0xfffe3902, 16) {
+ 0, PC14_DATA, PC13_DATA, PC12_DATA,
+ PC11_DATA, PC10_DATA, PC9_DATA, PC8_DATA,
+ PC7_DATA, PC6_DATA, PC5_DATA, PC4_DATA,
+ PC3_DATA, PC2_DATA, PC1_DATA, PC0_DATA }
+ },
+ { PINMUX_DATA_REG("PDDRL", 0xfffe3982, 16) {
+ PD15_DATA, PD14_DATA, PD13_DATA, PD12_DATA,
+ PD11_DATA, PD10_DATA, PD9_DATA, PD8_DATA,
+ PD7_DATA, PD6_DATA, PD5_DATA, PD4_DATA,
+ PD3_DATA, PD2_DATA, PD1_DATA, PD0_DATA }
+ },
+ { PINMUX_DATA_REG("PEDRL", 0xfffe3a02, 16) {
+ PE15_DATA, PE14_DATA, PE13_DATA, PE12_DATA,
+ PE11_DATA, PE10_DATA, PE9_DATA, PE8_DATA,
+ PE7_DATA, PE6_DATA, PE5_DATA, PE4_DATA,
+ PE3_DATA, PE2_DATA, PE1_DATA, PE0_DATA }
+ },
+ { PINMUX_DATA_REG("PFDRH", 0xfffe3a80, 16) {
+ 0, PF30_DATA, PF29_DATA, PF28_DATA,
+ PF27_DATA, PF26_DATA, PF25_DATA, PF24_DATA,
+ PF23_DATA, PF22_DATA, PF21_DATA, PF20_DATA,
+ PF19_DATA, PF18_DATA, PF17_DATA, PF16_DATA }
+ },
+ { PINMUX_DATA_REG("PFDRL", 0xfffe3a82, 16) {
+ PF15_DATA, PF14_DATA, PF13_DATA, PF12_DATA,
+ PF11_DATA, PF10_DATA, PF9_DATA, PF8_DATA,
+ PF7_DATA, PF6_DATA, PF5_DATA, PF4_DATA,
+ PF3_DATA, PF2_DATA, PF1_DATA, PF0_DATA }
+ },
+ { },
+};
+
+struct sh_pfc_soc_info sh7203_pinmux_info = {
+ .name = "sh7203_pfc",
+ .reserved_id = PINMUX_RESERVED,
+ .data = { PINMUX_DATA_BEGIN, PINMUX_DATA_END },
+ .input = { PINMUX_INPUT_BEGIN, PINMUX_INPUT_END, FORCE_IN },
+ .output = { PINMUX_OUTPUT_BEGIN, PINMUX_OUTPUT_END, FORCE_OUT },
+ .mark = { PINMUX_MARK_BEGIN, PINMUX_MARK_END },
+ .function = { PINMUX_FUNCTION_BEGIN, PINMUX_FUNCTION_END },
+
+ .first_gpio = GPIO_PA7,
+ .last_gpio = GPIO_FN_LCD_DATA0,
+
+ .gpios = pinmux_gpios,
+ .cfg_regs = pinmux_config_regs,
+ .data_regs = pinmux_data_regs,
+
+ .gpio_data = pinmux_data,
+ .gpio_data_size = ARRAY_SIZE(pinmux_data),
+};
diff --git a/drivers/pinctrl/sh-pfc/pfc-sh7264.c b/drivers/pinctrl/sh-pfc/pfc-sh7264.c
new file mode 100644
index 000000000000..2ba5639dcf34
--- /dev/null
+++ b/drivers/pinctrl/sh-pfc/pfc-sh7264.c
@@ -0,0 +1,2131 @@
+/*
+ * SH7264 Pinmux
+ *
+ * Copyright (C) 2012 Renesas Electronics Europe Ltd
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ */
+
+#include <linux/kernel.h>
+#include <linux/gpio.h>
+#include <cpu/sh7264.h>
+
+#include "sh_pfc.h"
+
+enum {
+ PINMUX_RESERVED = 0,
+
+ PINMUX_DATA_BEGIN,
+ /* Port A */
+ PA3_DATA, PA2_DATA, PA1_DATA, PA0_DATA,
+ /* Port B */
+ PB22_DATA, PB21_DATA, PB20_DATA,
+ PB19_DATA, PB18_DATA, PB17_DATA, PB16_DATA,
+ PB15_DATA, PB14_DATA, PB13_DATA, PB12_DATA,
+ PB11_DATA, PB10_DATA, PB9_DATA, PB8_DATA,
+ PB7_DATA, PB6_DATA, PB5_DATA, PB4_DATA,
+ PB3_DATA, PB2_DATA, PB1_DATA,
+ /* Port C */
+ PC10_DATA, PC9_DATA, PC8_DATA,
+ PC7_DATA, PC6_DATA, PC5_DATA, PC4_DATA,
+ PC3_DATA, PC2_DATA, PC1_DATA, PC0_DATA,
+ /* Port D */
+ PD15_DATA, PD14_DATA, PD13_DATA, PD12_DATA,
+ PD11_DATA, PD10_DATA, PD9_DATA, PD8_DATA,
+ PD7_DATA, PD6_DATA, PD5_DATA, PD4_DATA,
+ PD3_DATA, PD2_DATA, PD1_DATA, PD0_DATA,
+ /* Port E */
+ PE5_DATA, PE4_DATA,
+ PE3_DATA, PE2_DATA, PE1_DATA, PE0_DATA,
+ /* Port F */
+ PF12_DATA,
+ PF11_DATA, PF10_DATA, PF9_DATA, PF8_DATA,
+ PF7_DATA, PF6_DATA, PF5_DATA, PF4_DATA,
+ PF3_DATA, PF2_DATA, PF1_DATA, PF0_DATA,
+ /* Port G */
+ PG24_DATA,
+ PG23_DATA, PG22_DATA, PG21_DATA, PG20_DATA,
+ PG19_DATA, PG18_DATA, PG17_DATA, PG16_DATA,
+ PG15_DATA, PG14_DATA, PG13_DATA, PG12_DATA,
+ PG11_DATA, PG10_DATA, PG9_DATA, PG8_DATA,
+ PG7_DATA, PG6_DATA, PG5_DATA, PG4_DATA,
+ PG3_DATA, PG2_DATA, PG1_DATA, PG0_DATA,
+ /* Port H */
+ /* NOTE - Port H does not have a Data Register, but PH Data is
+ connected to PH Port Register */
+ PH7_DATA, PH6_DATA, PH5_DATA, PH4_DATA,
+ PH3_DATA, PH2_DATA, PH1_DATA, PH0_DATA,
+ /* Port I - not on device */
+ /* Port J */
+ PJ12_DATA,
+ PJ11_DATA, PJ10_DATA, PJ9_DATA, PJ8_DATA,
+ PJ7_DATA, PJ6_DATA, PJ5_DATA, PJ4_DATA,
+ PJ3_DATA, PJ2_DATA, PJ1_DATA, PJ0_DATA,
+ /* Port K */
+ PK12_DATA,
+ PK11_DATA, PK10_DATA, PK9_DATA, PK8_DATA,
+ PK7_DATA, PK6_DATA, PK5_DATA, PK4_DATA,
+ PK3_DATA, PK2_DATA, PK1_DATA, PK0_DATA,
+ PINMUX_DATA_END,
+
+ PINMUX_INPUT_BEGIN,
+ FORCE_IN,
+ /* Port A */
+ PA3_IN, PA2_IN, PA1_IN, PA0_IN,
+ /* Port B */
+ PB22_IN, PB21_IN, PB20_IN,
+ PB19_IN, PB18_IN, PB17_IN, PB16_IN,
+ PB15_IN, PB14_IN, PB13_IN, PB12_IN,
+ PB11_IN, PB10_IN, PB9_IN, PB8_IN,
+ PB7_IN, PB6_IN, PB5_IN, PB4_IN,
+ PB3_IN, PB2_IN, PB1_IN,
+ /* Port C */
+ PC10_IN, PC9_IN, PC8_IN,
+ PC7_IN, PC6_IN, PC5_IN, PC4_IN,
+ PC3_IN, PC2_IN, PC1_IN, PC0_IN,
+ /* Port D */
+ PD15_IN, PD14_IN, PD13_IN, PD12_IN,
+ PD11_IN, PD10_IN, PD9_IN, PD8_IN,
+ PD7_IN, PD6_IN, PD5_IN, PD4_IN,
+ PD3_IN, PD2_IN, PD1_IN, PD0_IN,
+ /* Port E */
+ PE5_IN, PE4_IN,
+ PE3_IN, PE2_IN, PE1_IN, PE0_IN,
+ /* Port F */
+ PF12_IN,
+ PF11_IN, PF10_IN, PF9_IN, PF8_IN,
+ PF7_IN, PF6_IN, PF5_IN, PF4_IN,
+ PF3_IN, PF2_IN, PF1_IN, PF0_IN,
+ /* Port G */
+ PG24_IN,
+ PG23_IN, PG22_IN, PG21_IN, PG20_IN,
+ PG19_IN, PG18_IN, PG17_IN, PG16_IN,
+ PG15_IN, PG14_IN, PG13_IN, PG12_IN,
+ PG11_IN, PG10_IN, PG9_IN, PG8_IN,
+ PG7_IN, PG6_IN, PG5_IN, PG4_IN,
+ PG3_IN, PG2_IN, PG1_IN, PG0_IN,
+ /* Port H - Port H does not have a Data Register */
+ /* Port I - not on device */
+ /* Port J */
+ PJ12_IN,
+ PJ11_IN, PJ10_IN, PJ9_IN, PJ8_IN,
+ PJ7_IN, PJ6_IN, PJ5_IN, PJ4_IN,
+ PJ3_IN, PJ2_IN, PJ1_IN, PJ0_IN,
+ /* Port K */
+ PK12_IN,
+ PK11_IN, PK10_IN, PK9_IN, PK8_IN,
+ PK7_IN, PK6_IN, PK5_IN, PK4_IN,
+ PK3_IN, PK2_IN, PK1_IN, PK0_IN,
+ PINMUX_INPUT_END,
+
+ PINMUX_OUTPUT_BEGIN,
+ FORCE_OUT,
+ /* Port A */
+ PA3_OUT, PA2_OUT, PA1_OUT, PA0_OUT,
+ /* Port B */
+ PB22_OUT, PB21_OUT, PB20_OUT,
+ PB19_OUT, PB18_OUT, PB17_OUT, PB16_OUT,
+ PB15_OUT, PB14_OUT, PB13_OUT, PB12_OUT,
+ PB11_OUT, PB10_OUT, PB9_OUT, PB8_OUT,
+ PB7_OUT, PB6_OUT, PB5_OUT, PB4_OUT,
+ PB3_OUT, PB2_OUT, PB1_OUT,
+ /* Port C */
+ PC10_OUT, PC9_OUT, PC8_OUT,
+ PC7_OUT, PC6_OUT, PC5_OUT, PC4_OUT,
+ PC3_OUT, PC2_OUT, PC1_OUT, PC0_OUT,
+ /* Port D */
+ PD15_OUT, PD14_OUT, PD13_OUT, PD12_OUT,
+ PD11_OUT, PD10_OUT, PD9_OUT, PD8_OUT,
+ PD7_OUT, PD6_OUT, PD5_OUT, PD4_OUT,
+ PD3_OUT, PD2_OUT, PD1_OUT, PD0_OUT,
+ /* Port E */
+ PE5_OUT, PE4_OUT,
+ PE3_OUT, PE2_OUT, PE1_OUT, PE0_OUT,
+ /* Port F */
+ PF12_OUT,
+ PF11_OUT, PF10_OUT, PF9_OUT, PF8_OUT,
+ PF7_OUT, PF6_OUT, PF5_OUT, PF4_OUT,
+ PF3_OUT, PF2_OUT, PF1_OUT, PF0_OUT,
+ /* Port G */
+ PG24_OUT,
+ PG23_OUT, PG22_OUT, PG21_OUT, PG20_OUT,
+ PG19_OUT, PG18_OUT, PG17_OUT, PG16_OUT,
+ PG15_OUT, PG14_OUT, PG13_OUT, PG12_OUT,
+ PG11_OUT, PG10_OUT, PG9_OUT, PG8_OUT,
+ PG7_OUT, PG6_OUT, PG5_OUT, PG4_OUT,
+ PG3_OUT, PG2_OUT, PG1_OUT, PG0_OUT,
+ /* Port H - Port H does not have a Data Register */
+ /* Port I - not on device */
+ /* Port J */
+ PJ12_OUT,
+ PJ11_OUT, PJ10_OUT, PJ9_OUT, PJ8_OUT,
+ PJ7_OUT, PJ6_OUT, PJ5_OUT, PJ4_OUT,
+ PJ3_OUT, PJ2_OUT, PJ1_OUT, PJ0_OUT,
+ /* Port K */
+ PK12_OUT,
+ PK11_OUT, PK10_OUT, PK9_OUT, PK8_OUT,
+ PK7_OUT, PK6_OUT, PK5_OUT, PK4_OUT,
+ PK3_OUT, PK2_OUT, PK1_OUT, PK0_OUT,
+ PINMUX_OUTPUT_END,
+
+ PINMUX_FUNCTION_BEGIN,
+ /* Port A */
+ PA3_IOR_IN, PA3_IOR_OUT,
+ PA2_IOR_IN, PA2_IOR_OUT,
+ PA1_IOR_IN, PA1_IOR_OUT,
+ PA0_IOR_IN, PA0_IOR_OUT,
+
+ /* Port B */
+ PB11_IOR_IN, PB11_IOR_OUT,
+ PB10_IOR_IN, PB10_IOR_OUT,
+ PB9_IOR_IN, PB9_IOR_OUT,
+ PB8_IOR_IN, PB8_IOR_OUT,
+
+ PB22MD_00, PB22MD_01, PB22MD_10,
+ PB21MD_0, PB21MD_1,
+ PB20MD_0, PB20MD_1,
+ PB19MD_00, PB19MD_01, PB19MD_10, PB19MD_11,
+ PB18MD_00, PB18MD_01, PB18MD_10, PB18MD_11,
+ PB17MD_00, PB17MD_01, PB17MD_10, PB17MD_11,
+ PB16MD_00, PB16MD_01, PB16MD_10, PB16MD_11,
+ PB15MD_00, PB15MD_01, PB15MD_10, PB15MD_11,
+ PB14MD_00, PB14MD_01, PB14MD_10, PB14MD_11,
+ PB13MD_00, PB13MD_01, PB13MD_10, PB13MD_11,
+ PB12MD_00, PB12MD_01, PB12MD_10, PB12MD_11,
+ PB11MD_00, PB11MD_01, PB11MD_10, PB11MD_11,
+ PB10MD_00, PB10MD_01, PB10MD_10, PB10MD_11,
+ PB9MD_00, PB9MD_01, PB9MD_10, PB9MD_11,
+ PB8MD_00, PB8MD_01, PB8MD_10, PB8MD_11,
+ PB7MD_00, PB7MD_01, PB7MD_10, PB7MD_11,
+ PB6MD_00, PB6MD_01, PB6MD_10, PB6MD_11,
+ PB5MD_00, PB5MD_01, PB5MD_10, PB5MD_11,
+ PB4MD_00, PB4MD_01, PB4MD_10, PB4MD_11,
+ PB3MD_0, PB3MD_1,
+ PB2MD_0, PB2MD_1,
+ PB1MD_0, PB1MD_1,
+
+ /* Port C */
+ PC14_IOR_IN, PC14_IOR_OUT,
+ PC13_IOR_IN, PC13_IOR_OUT,
+ PC12_IOR_IN, PC12_IOR_OUT,
+ PC11_IOR_IN, PC11_IOR_OUT,
+ PC10_IOR_IN, PC10_IOR_OUT,
+ PC9_IOR_IN, PC9_IOR_OUT,
+ PC8_IOR_IN, PC8_IOR_OUT,
+ PC7_IOR_IN, PC7_IOR_OUT,
+ PC6_IOR_IN, PC6_IOR_OUT,
+ PC5_IOR_IN, PC5_IOR_OUT,
+ PC4_IOR_IN, PC4_IOR_OUT,
+ PC3_IOR_IN, PC3_IOR_OUT,
+ PC2_IOR_IN, PC2_IOR_OUT,
+ PC1_IOR_IN, PC1_IOR_OUT,
+ PC0_IOR_IN, PC0_IOR_OUT,
+
+ PC10MD_0, PC10MD_1,
+ PC9MD_0, PC9MD_1,
+ PC8MD_00, PC8MD_01, PC8MD_10, PC8MD_11,
+ PC7MD_00, PC7MD_01, PC7MD_10, PC7MD_11,
+ PC6MD_00, PC6MD_01, PC6MD_10, PC6MD_11,
+ PC5MD_00, PC5MD_01, PC5MD_10, PC5MD_11,
+ PC4MD_0, PC4MD_1,
+ PC3MD_0, PC3MD_1,
+ PC2MD_0, PC2MD_1,
+ PC1MD_0, PC1MD_1,
+ PC0MD_0, PC0MD_1,
+
+ /* Port D */
+ PD15_IOR_IN, PD15_IOR_OUT,
+ PD14_IOR_IN, PD14_IOR_OUT,
+ PD13_IOR_IN, PD13_IOR_OUT,
+ PD12_IOR_IN, PD12_IOR_OUT,
+ PD11_IOR_IN, PD11_IOR_OUT,
+ PD10_IOR_IN, PD10_IOR_OUT,
+ PD9_IOR_IN, PD9_IOR_OUT,
+ PD8_IOR_IN, PD8_IOR_OUT,
+ PD7_IOR_IN, PD7_IOR_OUT,
+ PD6_IOR_IN, PD6_IOR_OUT,
+ PD5_IOR_IN, PD5_IOR_OUT,
+ PD4_IOR_IN, PD4_IOR_OUT,
+ PD3_IOR_IN, PD3_IOR_OUT,
+ PD2_IOR_IN, PD2_IOR_OUT,
+ PD1_IOR_IN, PD1_IOR_OUT,
+ PD0_IOR_IN, PD0_IOR_OUT,
+
+ PD15MD_00, PD15MD_01, PD15MD_10, PD15MD_11,
+ PD14MD_00, PD14MD_01, PD14MD_10, PD14MD_11,
+ PD13MD_00, PD13MD_01, PD13MD_10, PD13MD_11,
+ PD12MD_00, PD12MD_01, PD12MD_10, PD12MD_11,
+ PD11MD_00, PD11MD_01, PD11MD_10, PD11MD_11,
+ PD10MD_00, PD10MD_01, PD10MD_10, PD10MD_11,
+ PD9MD_00, PD9MD_01, PD9MD_10, PD9MD_11,
+ PD8MD_00, PD8MD_01, PD8MD_10, PD8MD_11,
+ PD7MD_00, PD7MD_01, PD7MD_10, PD7MD_11,
+ PD6MD_00, PD6MD_01, PD6MD_10, PD6MD_11,
+ PD5MD_00, PD5MD_01, PD5MD_10, PD5MD_11,
+ PD4MD_00, PD4MD_01, PD4MD_10, PD4MD_11,
+ PD3MD_00, PD3MD_01, PD3MD_10, PD3MD_11,
+ PD2MD_00, PD2MD_01, PD2MD_10, PD2MD_11,
+ PD1MD_00, PD1MD_01, PD1MD_10, PD1MD_11,
+ PD0MD_00, PD0MD_01, PD0MD_10, PD0MD_11,
+
+ /* Port E */
+ PE5_IOR_IN, PE5_IOR_OUT,
+ PE4_IOR_IN, PE4_IOR_OUT,
+ PE3_IOR_IN, PE3_IOR_OUT,
+ PE2_IOR_IN, PE2_IOR_OUT,
+ PE1_IOR_IN, PE1_IOR_OUT,
+ PE0_IOR_IN, PE0_IOR_OUT,
+
+ PE5MD_00, PE5MD_01, PE5MD_10, PE5MD_11,
+ PE4MD_00, PE4MD_01, PE4MD_10, PE4MD_11,
+ PE3MD_00, PE3MD_01, PE3MD_10, PE3MD_11,
+ PE2MD_00, PE2MD_01, PE2MD_10, PE2MD_11,
+ PE1MD_000, PE1MD_001, PE1MD_010, PE1MD_011,
+ PE1MD_100, PE1MD_101, PE1MD_110, PE1MD_111,
+ PE0MD_00, PE0MD_01, PE0MD_10, PE0MD_11,
+
+ /* Port F */
+ PF12_IOR_IN, PF12_IOR_OUT,
+ PF11_IOR_IN, PF11_IOR_OUT,
+ PF10_IOR_IN, PF10_IOR_OUT,
+ PF9_IOR_IN, PF9_IOR_OUT,
+ PF8_IOR_IN, PF8_IOR_OUT,
+ PF7_IOR_IN, PF7_IOR_OUT,
+ PF6_IOR_IN, PF6_IOR_OUT,
+ PF5_IOR_IN, PF5_IOR_OUT,
+ PF4_IOR_IN, PF4_IOR_OUT,
+ PF3_IOR_IN, PF3_IOR_OUT,
+ PF2_IOR_IN, PF2_IOR_OUT,
+ PF1_IOR_IN, PF1_IOR_OUT,
+ PF0_IOR_IN, PF0_IOR_OUT,
+
+ PF12MD_000, PF12MD_001, PF12MD_010, PF12MD_011,
+ PF12MD_100, PF12MD_101, PF12MD_110, PF12MD_111,
+ PF11MD_000, PF11MD_001, PF11MD_010, PF11MD_011,
+ PF11MD_100, PF11MD_101, PF11MD_110, PF11MD_111,
+ PF10MD_000, PF10MD_001, PF10MD_010, PF10MD_011,
+ PF10MD_100, PF10MD_101, PF10MD_110, PF10MD_111,
+ PF9MD_000, PF9MD_001, PF9MD_010, PF9MD_011,
+ PF9MD_100, PF9MD_101, PF9MD_110, PF9MD_111,
+ PF8MD_00, PF8MD_01, PF8MD_10, PF8MD_11,
+ PF7MD_000, PF7MD_001, PF7MD_010, PF7MD_011,
+ PF7MD_100, PF7MD_101, PF7MD_110, PF7MD_111,
+ PF6MD_000, PF6MD_001, PF6MD_010, PF6MD_011,
+ PF6MD_100, PF6MD_101, PF6MD_110, PF6MD_111,
+ PF5MD_000, PF5MD_001, PF5MD_010, PF5MD_011,
+ PF5MD_100, PF5MD_101, PF5MD_110, PF5MD_111,
+ PF4MD_000, PF4MD_001, PF4MD_010, PF4MD_011,
+ PF4MD_100, PF4MD_101, PF4MD_110, PF4MD_111,
+ PF3MD_000, PF3MD_001, PF3MD_010, PF3MD_011,
+ PF3MD_100, PF3MD_101, PF3MD_110, PF3MD_111,
+ PF2MD_000, PF2MD_001, PF2MD_010, PF2MD_011,
+ PF2MD_100, PF2MD_101, PF2MD_110, PF2MD_111,
+ PF1MD_000, PF1MD_001, PF1MD_010, PF1MD_011,
+ PF1MD_100, PF1MD_101, PF1MD_110, PF1MD_111,
+ PF0MD_000, PF0MD_001, PF0MD_010, PF0MD_011,
+ PF0MD_100, PF0MD_101, PF0MD_110, PF0MD_111,
+
+ /* Port G */
+ PG24_IOR_IN, PG24_IOR_OUT,
+ PG23_IOR_IN, PG23_IOR_OUT,
+ PG22_IOR_IN, PG22_IOR_OUT,
+ PG21_IOR_IN, PG21_IOR_OUT,
+ PG20_IOR_IN, PG20_IOR_OUT,
+ PG19_IOR_IN, PG19_IOR_OUT,
+ PG18_IOR_IN, PG18_IOR_OUT,
+ PG17_IOR_IN, PG17_IOR_OUT,
+ PG16_IOR_IN, PG16_IOR_OUT,
+ PG15_IOR_IN, PG15_IOR_OUT,
+ PG14_IOR_IN, PG14_IOR_OUT,
+ PG13_IOR_IN, PG13_IOR_OUT,
+ PG12_IOR_IN, PG12_IOR_OUT,
+ PG11_IOR_IN, PG11_IOR_OUT,
+ PG10_IOR_IN, PG10_IOR_OUT,
+ PG9_IOR_IN, PG9_IOR_OUT,
+ PG8_IOR_IN, PG8_IOR_OUT,
+ PG7_IOR_IN, PG7_IOR_OUT,
+ PG6_IOR_IN, PG6_IOR_OUT,
+ PG5_IOR_IN, PG5_IOR_OUT,
+ PG4_IOR_IN, PG4_IOR_OUT,
+ PG3_IOR_IN, PG3_IOR_OUT,
+ PG2_IOR_IN, PG2_IOR_OUT,
+ PG1_IOR_IN, PG1_IOR_OUT,
+ PG0_IOR_IN, PG0_IOR_OUT,
+
+ PG24MD_00, PG24MD_01, PG24MD_10, PG24MD_11,
+ PG23MD_00, PG23MD_01, PG23MD_10, PG23MD_11,
+ PG22MD_00, PG22MD_01, PG22MD_10, PG22MD_11,
+ PG21MD_00, PG21MD_01, PG21MD_10, PG21MD_11,
+ PG20MD_000, PG20MD_001, PG20MD_010, PG20MD_011,
+ PG20MD_100, PG20MD_101, PG20MD_110, PG20MD_111,
+ PG19MD_000, PG19MD_001, PG19MD_010, PG19MD_011,
+ PG19MD_100, PG19MD_101, PG19MD_110, PG19MD_111,
+ PG18MD_000, PG18MD_001, PG18MD_010, PG18MD_011,
+ PG18MD_100, PG18MD_101, PG18MD_110, PG18MD_111,
+ PG17MD_000, PG17MD_001, PG17MD_010, PG17MD_011,
+ PG17MD_100, PG17MD_101, PG17MD_110, PG17MD_111,
+ PG16MD_000, PG16MD_001, PG16MD_010, PG16MD_011,
+ PG16MD_100, PG16MD_101, PG16MD_110, PG16MD_111,
+ PG15MD_000, PG15MD_001, PG15MD_010, PG15MD_011,
+ PG15MD_100, PG15MD_101, PG15MD_110, PG15MD_111,
+ PG14MD_000, PG14MD_001, PG14MD_010, PG14MD_011,
+ PG14MD_100, PG14MD_101, PG14MD_110, PG14MD_111,
+ PG13MD_000, PG13MD_001, PG13MD_010, PG13MD_011,
+ PG13MD_100, PG13MD_101, PG13MD_110, PG13MD_111,
+ PG12MD_000, PG12MD_001, PG12MD_010, PG12MD_011,
+ PG12MD_100, PG12MD_101, PG12MD_110, PG12MD_111,
+ PG11MD_000, PG11MD_001, PG11MD_010, PG11MD_011,
+ PG11MD_100, PG11MD_101, PG11MD_110, PG11MD_111,
+ PG10MD_000, PG10MD_001, PG10MD_010, PG10MD_011,
+ PG10MD_100, PG10MD_101, PG10MD_110, PG10MD_111,
+ PG9MD_000, PG9MD_001, PG9MD_010, PG9MD_011,
+ PG9MD_100, PG9MD_101, PG9MD_110, PG9MD_111,
+ PG8MD_000, PG8MD_001, PG8MD_010, PG8MD_011,
+ PG8MD_100, PG8MD_101, PG8MD_110, PG8MD_111,
+ PG7MD_00, PG7MD_01, PG7MD_10, PG7MD_11,
+ PG6MD_00, PG6MD_01, PG6MD_10, PG6MD_11,
+ PG5MD_00, PG5MD_01, PG5MD_10, PG5MD_11,
+ PG4MD_00, PG4MD_01, PG4MD_10, PG4MD_11,
+ PG3MD_00, PG3MD_01, PG3MD_10, PG3MD_11,
+ PG2MD_00, PG2MD_01, PG2MD_10, PG2MD_11,
+ PG1MD_00, PG1MD_01, PG1MD_10, PG1MD_11,
+ PG0MD_000, PG0MD_001, PG0MD_010, PG0MD_011,
+ PG0MD_100, PG0MD_101, PG0MD_110, PG0MD_111,
+
+ /* Port H */
+ PH7MD_0, PH7MD_1,
+ PH6MD_0, PH6MD_1,
+ PH5MD_0, PH5MD_1,
+ PH4MD_0, PH4MD_1,
+ PH3MD_0, PH3MD_1,
+ PH2MD_0, PH2MD_1,
+ PH1MD_0, PH1MD_1,
+ PH0MD_0, PH0MD_1,
+
+ /* Port I - not on device */
+
+ /* Port J */
+ PJ11_IOR_IN, PJ11_IOR_OUT,
+ PJ10_IOR_IN, PJ10_IOR_OUT,
+ PJ9_IOR_IN, PJ9_IOR_OUT,
+ PJ8_IOR_IN, PJ8_IOR_OUT,
+ PJ7_IOR_IN, PJ7_IOR_OUT,
+ PJ6_IOR_IN, PJ6_IOR_OUT,
+ PJ5_IOR_IN, PJ5_IOR_OUT,
+ PJ4_IOR_IN, PJ4_IOR_OUT,
+ PJ3_IOR_IN, PJ3_IOR_OUT,
+ PJ2_IOR_IN, PJ2_IOR_OUT,
+ PJ1_IOR_IN, PJ1_IOR_OUT,
+ PJ0_IOR_IN, PJ0_IOR_OUT,
+
+ PJ11MD_00, PJ11MD_01, PJ11MD_10, PJ11MD_11,
+ PJ10MD_00, PJ10MD_01, PJ10MD_10, PJ10MD_11,
+ PJ9MD_00, PJ9MD_01, PJ9MD_10, PJ9MD_11,
+ PJ8MD_00, PJ8MD_01, PJ8MD_10, PJ8MD_11,
+ PJ7MD_00, PJ7MD_01, PJ7MD_10, PJ7MD_11,
+ PJ6MD_00, PJ6MD_01, PJ6MD_10, PJ6MD_11,
+ PJ5MD_00, PJ5MD_01, PJ5MD_10, PJ5MD_11,
+ PJ4MD_00, PJ4MD_01, PJ4MD_10, PJ4MD_11,
+ PJ3MD_00, PJ3MD_01, PJ3MD_10, PJ3MD_11,
+ PJ2MD_000, PJ2MD_001, PJ2MD_010, PJ2MD_011,
+ PJ2MD_100, PJ2MD_101, PJ2MD_110, PJ2MD_111,
+ PJ1MD_000, PJ1MD_001, PJ1MD_010, PJ1MD_011,
+ PJ1MD_100, PJ1MD_101, PJ1MD_110, PJ1MD_111,
+ PJ0MD_000, PJ0MD_001, PJ0MD_010, PJ0MD_011,
+ PJ0MD_100, PJ0MD_101, PJ0MD_110, PJ0MD_111,
+
+ /* Port K */
+ PK11_IOR_IN, PK11_IOR_OUT,
+ PK10_IOR_IN, PK10_IOR_OUT,
+ PK9_IOR_IN, PK9_IOR_OUT,
+ PK8_IOR_IN, PK8_IOR_OUT,
+ PK7_IOR_IN, PK7_IOR_OUT,
+ PK6_IOR_IN, PK6_IOR_OUT,
+ PK5_IOR_IN, PK5_IOR_OUT,
+ PK4_IOR_IN, PK4_IOR_OUT,
+ PK3_IOR_IN, PK3_IOR_OUT,
+ PK2_IOR_IN, PK2_IOR_OUT,
+ PK1_IOR_IN, PK1_IOR_OUT,
+ PK0_IOR_IN, PK0_IOR_OUT,
+
+ PK11MD_00, PK11MD_01, PK11MD_10, PK11MD_11,
+ PK10MD_00, PK10MD_01, PK10MD_10, PK10MD_11,
+ PK9MD_00, PK9MD_01, PK9MD_10, PK9MD_11,
+ PK8MD_00, PK8MD_01, PK8MD_10, PK8MD_11,
+ PK7MD_00, PK7MD_01, PK7MD_10, PK7MD_11,
+ PK6MD_00, PK6MD_01, PK6MD_10, PK6MD_11,
+ PK5MD_00, PK5MD_01, PK5MD_10, PK5MD_11,
+ PK4MD_00, PK4MD_01, PK4MD_10, PK4MD_11,
+ PK3MD_00, PK3MD_01, PK3MD_10, PK3MD_11,
+ PK2MD_00, PK2MD_01, PK2MD_10, PK2MD_11,
+ PK1MD_00, PK1MD_01, PK1MD_10, PK1MD_11,
+ PK0MD_00, PK0MD_01, PK0MD_10, PK0MD_11,
+ PINMUX_FUNCTION_END,
+
+ PINMUX_MARK_BEGIN,
+ /* Port A */
+
+ /* Port B */
+
+ /* Port C */
+
+ /* Port D */
+
+ /* Port E */
+
+ /* Port F */
+
+ /* Port G */
+
+ /* Port H */
+ PHAN7_MARK, PHAN6_MARK, PHAN5_MARK, PHAN4_MARK,
+ PHAN3_MARK, PHAN2_MARK, PHAN1_MARK, PHAN0_MARK,
+
+ /* Port I - not on device */
+
+ /* Port J */
+
+ /* Port K */
+
+ IRQ7_PC_MARK, IRQ6_PC_MARK, IRQ5_PC_MARK, IRQ4_PC_MARK,
+ IRQ3_PG_MARK, IRQ2_PG_MARK, IRQ1_PJ_MARK, IRQ0_PJ_MARK,
+ IRQ3_PE_MARK, IRQ2_PE_MARK, IRQ1_PE_MARK, IRQ0_PE_MARK,
+
+ PINT7_PG_MARK, PINT6_PG_MARK, PINT5_PG_MARK, PINT4_PG_MARK,
+ PINT3_PG_MARK, PINT2_PG_MARK, PINT1_PG_MARK, PINT0_PG_MARK,
+
+ SD_CD_MARK, SD_D0_MARK, SD_D1_MARK, SD_D2_MARK, SD_D3_MARK,
+ SD_WP_MARK, SD_CLK_MARK, SD_CMD_MARK,
+ CRX0_MARK, CRX1_MARK,
+ CTX0_MARK, CTX1_MARK,
+
+ PWM1A_MARK, PWM1B_MARK, PWM1C_MARK, PWM1D_MARK,
+ PWM1E_MARK, PWM1F_MARK, PWM1G_MARK, PWM1H_MARK,
+ PWM2A_MARK, PWM2B_MARK, PWM2C_MARK, PWM2D_MARK,
+ PWM2E_MARK, PWM2F_MARK, PWM2G_MARK, PWM2H_MARK,
+ IERXD_MARK, IETXD_MARK,
+ CRX0_CRX1_MARK,
+ WDTOVF_MARK,
+
+ CRX0X1_MARK,
+
+ /* DMAC */
+ TEND0_MARK, DACK0_MARK, DREQ0_MARK,
+ TEND1_MARK, DACK1_MARK, DREQ1_MARK,
+
+ /* ADC */
+ ADTRG_MARK,
+
+ /* BSC */
+ A25_MARK, A24_MARK,
+ A23_MARK, A22_MARK, A21_MARK, A20_MARK,
+ A19_MARK, A18_MARK, A17_MARK, A16_MARK,
+ A15_MARK, A14_MARK, A13_MARK, A12_MARK,
+ A11_MARK, A10_MARK, A9_MARK, A8_MARK,
+ A7_MARK, A6_MARK, A5_MARK, A4_MARK,
+ A3_MARK, A2_MARK, A1_MARK, A0_MARK,
+ D15_MARK, D14_MARK, D13_MARK, D12_MARK,
+ D11_MARK, D10_MARK, D9_MARK, D8_MARK,
+ D7_MARK, D6_MARK, D5_MARK, D4_MARK,
+ D3_MARK, D2_MARK, D1_MARK, D0_MARK,
+ BS_MARK,
+ CS4_MARK, CS3_MARK, CS2_MARK, CS1_MARK, CS0_MARK,
+ CS6CE1B_MARK, CS5CE1A_MARK,
+ CE2A_MARK, CE2B_MARK,
+ RD_MARK, RDWR_MARK,
+ ICIOWRAH_MARK,
+ ICIORD_MARK,
+ WE1DQMUWE_MARK,
+ WE0DQML_MARK,
+ RAS_MARK, CAS_MARK, CKE_MARK,
+ WAIT_MARK, BREQ_MARK, BACK_MARK, IOIS16_MARK,
+
+ /* TMU */
+ TIOC0A_MARK, TIOC0B_MARK, TIOC0C_MARK, TIOC0D_MARK,
+ TIOC1A_MARK, TIOC1B_MARK,
+ TIOC2A_MARK, TIOC2B_MARK,
+ TIOC3A_MARK, TIOC3B_MARK, TIOC3C_MARK, TIOC3D_MARK,
+ TIOC4A_MARK, TIOC4B_MARK, TIOC4C_MARK, TIOC4D_MARK,
+ TCLKA_MARK, TCLKB_MARK, TCLKC_MARK, TCLKD_MARK,
+
+ /* SCIF */
+ SCK0_MARK, SCK1_MARK, SCK2_MARK, SCK3_MARK,
+ RXD0_MARK, RXD1_MARK, RXD2_MARK, RXD3_MARK,
+ TXD0_MARK, TXD1_MARK, TXD2_MARK, TXD3_MARK,
+ RXD4_MARK, RXD5_MARK, RXD6_MARK, RXD7_MARK,
+ TXD4_MARK, TXD5_MARK, TXD6_MARK, TXD7_MARK,
+ RTS1_MARK, RTS3_MARK,
+ CTS1_MARK, CTS3_MARK,
+
+ /* RSPI */
+ RSPCK0_MARK, RSPCK1_MARK,
+ MOSI0_MARK, MOSI1_MARK,
+ MISO0_PF12_MARK, MISO1_MARK, MISO1_PG19_MARK,
+ SSL00_MARK, SSL10_MARK,
+
+ /* IIC3 */
+ SCL0_MARK, SCL1_MARK, SCL2_MARK,
+ SDA0_MARK, SDA1_MARK, SDA2_MARK,
+
+ /* SSI */
+ SSISCK0_MARK,
+ SSIWS0_MARK,
+ SSITXD0_MARK,
+ SSIRXD0_MARK,
+ SSIWS1_MARK, SSIWS2_MARK, SSIWS3_MARK,
+ SSISCK1_MARK, SSISCK2_MARK, SSISCK3_MARK,
+ SSIDATA1_MARK, SSIDATA2_MARK, SSIDATA3_MARK,
+ AUDIO_CLK_MARK,
+
+ /* SIOF */ /* NOTE Shares AUDIO_CLK with SSI */
+ SIOFTXD_MARK, SIOFRXD_MARK, SIOFSYNC_MARK, SIOFSCK_MARK,
+
+ /* SPDIF */ /* NOTE Shares AUDIO_CLK with SSI */
+ SPDIF_IN_MARK, SPDIF_OUT_MARK,
+
+ /* NANDFMC */ /* NOTE Controller is not available in boot mode 0 */
+ FCE_MARK,
+ FRB_MARK,
+
+ /* VDC3 */
+ DV_CLK_MARK,
+ DV_VSYNC_MARK, DV_HSYNC_MARK,
+ DV_DATA7_MARK, DV_DATA6_MARK, DV_DATA5_MARK, DV_DATA4_MARK,
+ DV_DATA3_MARK, DV_DATA2_MARK, DV_DATA1_MARK, DV_DATA0_MARK,
+ LCD_CLK_MARK, LCD_EXTCLK_MARK,
+ LCD_VSYNC_MARK, LCD_HSYNC_MARK, LCD_DE_MARK,
+ LCD_DATA15_MARK, LCD_DATA14_MARK, LCD_DATA13_MARK, LCD_DATA12_MARK,
+ LCD_DATA11_MARK, LCD_DATA10_MARK, LCD_DATA9_MARK, LCD_DATA8_MARK,
+ LCD_DATA7_MARK, LCD_DATA6_MARK, LCD_DATA5_MARK, LCD_DATA4_MARK,
+ LCD_DATA3_MARK, LCD_DATA2_MARK, LCD_DATA1_MARK, LCD_DATA0_MARK,
+ LCD_M_DISP_MARK,
+ PINMUX_MARK_END,
+};
+
+static pinmux_enum_t pinmux_data[] = {
+
+ /* Port A */
+ PINMUX_DATA(PA3_DATA, PA3_IN),
+ PINMUX_DATA(PA2_DATA, PA2_IN),
+ PINMUX_DATA(PA1_DATA, PA1_IN),
+ PINMUX_DATA(PA0_DATA, PA0_IN),
+
+ /* Port B */
+ PINMUX_DATA(PB22_DATA, PB22MD_00, PB22_IN, PB22_OUT),
+ PINMUX_DATA(A22_MARK, PB22MD_01),
+ PINMUX_DATA(CS4_MARK, PB22MD_10),
+
+ PINMUX_DATA(PB21_DATA, PB21MD_0, PB21_IN, PB21_OUT),
+ PINMUX_DATA(A21_MARK, PB21MD_1),
+ PINMUX_DATA(A20_MARK, PB20MD_1),
+ PINMUX_DATA(A19_MARK, PB19MD_01),
+ PINMUX_DATA(A18_MARK, PB18MD_01),
+ PINMUX_DATA(A17_MARK, PB17MD_01),
+ PINMUX_DATA(A16_MARK, PB16MD_01),
+ PINMUX_DATA(A15_MARK, PB15MD_01),
+ PINMUX_DATA(A14_MARK, PB14MD_01),
+ PINMUX_DATA(A13_MARK, PB13MD_01),
+ PINMUX_DATA(A12_MARK, PB12MD_01),
+ PINMUX_DATA(A11_MARK, PB11MD_01),
+ PINMUX_DATA(A10_MARK, PB10MD_01),
+ PINMUX_DATA(A9_MARK, PB9MD_01),
+ PINMUX_DATA(A8_MARK, PB8MD_01),
+ PINMUX_DATA(A7_MARK, PB7MD_01),
+ PINMUX_DATA(A6_MARK, PB6MD_01),
+ PINMUX_DATA(A5_MARK, PB5MD_01),
+ PINMUX_DATA(A4_MARK, PB4MD_01),
+ PINMUX_DATA(A3_MARK, PB3MD_1),
+ PINMUX_DATA(A2_MARK, PB2MD_1),
+ PINMUX_DATA(A1_MARK, PB1MD_1),
+
+ /* Port C */
+ PINMUX_DATA(PC10_DATA, PC10MD_0),
+ PINMUX_DATA(TIOC2B_MARK, PC1MD_1),
+ PINMUX_DATA(PC9_DATA, PC9MD_0),
+ PINMUX_DATA(TIOC2A_MARK, PC9MD_1),
+ PINMUX_DATA(PC8_DATA, PC8MD_00),
+ PINMUX_DATA(CS3_MARK, PC8MD_01),
+ PINMUX_DATA(TIOC4D_MARK, PC8MD_10),
+ PINMUX_DATA(IRQ7_PC_MARK, PC8MD_11),
+ PINMUX_DATA(PC7_DATA, PC7MD_00),
+ PINMUX_DATA(CKE_MARK, PC7MD_01),
+ PINMUX_DATA(TIOC4C_MARK, PC7MD_10),
+ PINMUX_DATA(IRQ6_PC_MARK, PC7MD_11),
+ PINMUX_DATA(PC6_DATA, PC6MD_00),
+ PINMUX_DATA(CAS_MARK, PC6MD_01),
+ PINMUX_DATA(TIOC4B_MARK, PC6MD_10),
+ PINMUX_DATA(IRQ5_PC_MARK, PC6MD_11),
+ PINMUX_DATA(PC5_DATA, PC5MD_00),
+ PINMUX_DATA(RAS_MARK, PC5MD_01),
+ PINMUX_DATA(TIOC4A_MARK, PC5MD_10),
+ PINMUX_DATA(IRQ4_PC_MARK, PC5MD_11),
+ PINMUX_DATA(PC4_DATA, PC4MD_0),
+ PINMUX_DATA(WE1DQMUWE_MARK, PC4MD_1),
+ PINMUX_DATA(PC3_DATA, PC3MD_0),
+ PINMUX_DATA(WE0DQML_MARK, PC3MD_1),
+ PINMUX_DATA(PC2_DATA, PC2MD_0),
+ PINMUX_DATA(RDWR_MARK, PC2MD_1),
+ PINMUX_DATA(PC1_DATA, PC1MD_0),
+ PINMUX_DATA(RD_MARK, PC1MD_1),
+ PINMUX_DATA(PC0_DATA, PC0MD_0),
+ PINMUX_DATA(CS0_MARK, PC0MD_1),
+
+ /* Port D */
+ PINMUX_DATA(D15_MARK, PD15MD_01),
+ PINMUX_DATA(D14_MARK, PD14MD_01),
+ PINMUX_DATA(D13_MARK, PD13MD_01),
+ PINMUX_DATA(D12_MARK, PD12MD_01),
+ PINMUX_DATA(D11_MARK, PD11MD_01),
+ PINMUX_DATA(D10_MARK, PD10MD_01),
+ PINMUX_DATA(D9_MARK, PD9MD_01),
+ PINMUX_DATA(D8_MARK, PD8MD_01),
+ PINMUX_DATA(D7_MARK, PD7MD_01),
+ PINMUX_DATA(D6_MARK, PD6MD_01),
+ PINMUX_DATA(D5_MARK, PD5MD_01),
+ PINMUX_DATA(D4_MARK, PD4MD_01),
+ PINMUX_DATA(D3_MARK, PD3MD_01),
+ PINMUX_DATA(D2_MARK, PD2MD_01),
+ PINMUX_DATA(D1_MARK, PD1MD_01),
+ PINMUX_DATA(D0_MARK, PD0MD_01),
+
+ /* Port E */
+ PINMUX_DATA(PE5_DATA, PE5MD_00),
+ PINMUX_DATA(SDA2_MARK, PE5MD_01),
+ PINMUX_DATA(DV_HSYNC_MARK, PE5MD_11),
+
+ PINMUX_DATA(PE4_DATA, PE4MD_00),
+ PINMUX_DATA(SCL2_MARK, PE4MD_01),
+ PINMUX_DATA(DV_VSYNC_MARK, PE4MD_11),
+
+ PINMUX_DATA(PE3_DATA, PE3MD_00),
+ PINMUX_DATA(SDA1_MARK, PE3MD_01),
+ PINMUX_DATA(IRQ3_PE_MARK, PE3MD_11),
+
+ PINMUX_DATA(PE2_DATA, PE2MD_00),
+ PINMUX_DATA(SCL1_MARK, PE2MD_01),
+ PINMUX_DATA(IRQ2_PE_MARK, PE2MD_11),
+
+ PINMUX_DATA(PE1_DATA, PE1MD_000),
+ PINMUX_DATA(SDA0_MARK, PE1MD_001),
+ PINMUX_DATA(IOIS16_MARK, PE1MD_010),
+ PINMUX_DATA(IRQ1_PE_MARK, PE1MD_011),
+ PINMUX_DATA(TCLKA_MARK, PE1MD_100),
+ PINMUX_DATA(ADTRG_MARK, PE1MD_101),
+
+ PINMUX_DATA(PE0_DATA, PE0MD_00),
+ PINMUX_DATA(SCL0_MARK, PE0MD_01),
+ PINMUX_DATA(AUDIO_CLK_MARK, PE0MD_10),
+ PINMUX_DATA(IRQ0_PE_MARK, PE0MD_11),
+
+ /* Port F */
+ PINMUX_DATA(PF12_DATA, PF12MD_000),
+ PINMUX_DATA(BS_MARK, PF12MD_001),
+ PINMUX_DATA(MISO0_PF12_MARK, PF12MD_011),
+ PINMUX_DATA(TIOC3D_MARK, PF12MD_100),
+ PINMUX_DATA(SPDIF_OUT_MARK, PF12MD_101),
+
+ PINMUX_DATA(PF11_DATA, PF11MD_000),
+ PINMUX_DATA(A25_MARK, PF11MD_001),
+ PINMUX_DATA(SSIDATA3_MARK, PF11MD_010),
+ PINMUX_DATA(MOSI0_MARK, PF11MD_011),
+ PINMUX_DATA(TIOC3C_MARK, PF11MD_100),
+ PINMUX_DATA(SPDIF_IN_MARK, PF11MD_101),
+
+ PINMUX_DATA(PF10_DATA, PF10MD_000),
+ PINMUX_DATA(A24_MARK, PF10MD_001),
+ PINMUX_DATA(SSIWS3_MARK, PF10MD_010),
+ PINMUX_DATA(SSL00_MARK, PF10MD_011),
+ PINMUX_DATA(TIOC3B_MARK, PF10MD_100),
+ PINMUX_DATA(FCE_MARK, PF10MD_101),
+
+ PINMUX_DATA(PF9_DATA, PF9MD_000),
+ PINMUX_DATA(A23_MARK, PF9MD_001),
+ PINMUX_DATA(SSISCK3_MARK, PF9MD_010),
+ PINMUX_DATA(RSPCK0_MARK, PF9MD_011),
+ PINMUX_DATA(TIOC3A_MARK, PF9MD_100),
+ PINMUX_DATA(FRB_MARK, PF9MD_101),
+
+ PINMUX_DATA(PF8_DATA, PF8MD_00),
+ PINMUX_DATA(CE2B_MARK, PF8MD_01),
+ PINMUX_DATA(SSIDATA3_MARK, PF8MD_10),
+ PINMUX_DATA(DV_CLK_MARK, PF8MD_11),
+
+ PINMUX_DATA(PF7_DATA, PF7MD_000),
+ PINMUX_DATA(CE2A_MARK, PF7MD_001),
+ PINMUX_DATA(SSIWS3_MARK, PF7MD_010),
+ PINMUX_DATA(DV_DATA7_MARK, PF7MD_011),
+ PINMUX_DATA(TCLKD_MARK, PF7MD_100),
+
+ PINMUX_DATA(PF6_DATA, PF6MD_000),
+ PINMUX_DATA(CS6CE1B_MARK, PF6MD_001),
+ PINMUX_DATA(SSISCK3_MARK, PF6MD_010),
+ PINMUX_DATA(DV_DATA6_MARK, PF6MD_011),
+ PINMUX_DATA(TCLKB_MARK, PF6MD_100),
+
+ PINMUX_DATA(PF5_DATA, PF5MD_000),
+ PINMUX_DATA(CS5CE1A_MARK, PF5MD_001),
+ PINMUX_DATA(SSIDATA2_MARK, PF5MD_010),
+ PINMUX_DATA(DV_DATA5_MARK, PF5MD_011),
+ PINMUX_DATA(TCLKC_MARK, PF5MD_100),
+
+ PINMUX_DATA(PF4_DATA, PF4MD_000),
+ PINMUX_DATA(ICIOWRAH_MARK, PF4MD_001),
+ PINMUX_DATA(SSIWS2_MARK, PF4MD_010),
+ PINMUX_DATA(DV_DATA4_MARK, PF4MD_011),
+ PINMUX_DATA(TXD3_MARK, PF4MD_100),
+
+ PINMUX_DATA(PF3_DATA, PF3MD_000),
+ PINMUX_DATA(ICIORD_MARK, PF3MD_001),
+ PINMUX_DATA(SSISCK2_MARK, PF3MD_010),
+ PINMUX_DATA(DV_DATA3_MARK, PF3MD_011),
+ PINMUX_DATA(RXD3_MARK, PF3MD_100),
+
+ PINMUX_DATA(PF2_DATA, PF2MD_000),
+ PINMUX_DATA(BACK_MARK, PF2MD_001),
+ PINMUX_DATA(SSIDATA1_MARK, PF2MD_010),
+ PINMUX_DATA(DV_DATA2_MARK, PF2MD_011),
+ PINMUX_DATA(TXD2_MARK, PF2MD_100),
+ PINMUX_DATA(DACK0_MARK, PF2MD_101),
+
+ PINMUX_DATA(PF1_DATA, PF1MD_000),
+ PINMUX_DATA(BREQ_MARK, PF1MD_001),
+ PINMUX_DATA(SSIWS1_MARK, PF1MD_010),
+ PINMUX_DATA(DV_DATA1_MARK, PF1MD_011),
+ PINMUX_DATA(RXD2_MARK, PF1MD_100),
+ PINMUX_DATA(DREQ0_MARK, PF1MD_101),
+
+ PINMUX_DATA(PF0_DATA, PF0MD_000),
+ PINMUX_DATA(WAIT_MARK, PF0MD_001),
+ PINMUX_DATA(SSISCK1_MARK, PF0MD_010),
+ PINMUX_DATA(DV_DATA0_MARK, PF0MD_011),
+ PINMUX_DATA(SCK2_MARK, PF0MD_100),
+ PINMUX_DATA(TEND0_MARK, PF0MD_101),
+
+ /* Port G */
+ PINMUX_DATA(PG24_DATA, PG24MD_00),
+ PINMUX_DATA(MOSI0_MARK, PG24MD_01),
+ PINMUX_DATA(TIOC0D_MARK, PG24MD_10),
+
+ PINMUX_DATA(PG23_DATA, PG23MD_00),
+ PINMUX_DATA(MOSI1_MARK, PG23MD_01),
+ PINMUX_DATA(TIOC0C_MARK, PG23MD_10),
+
+ PINMUX_DATA(PG22_DATA, PG22MD_00),
+ PINMUX_DATA(SSL10_MARK, PG22MD_01),
+ PINMUX_DATA(TIOC0B_MARK, PG22MD_10),
+
+ PINMUX_DATA(PG21_DATA, PG21MD_00),
+ PINMUX_DATA(RSPCK1_MARK, PG21MD_01),
+ PINMUX_DATA(TIOC0A_MARK, PG21MD_10),
+
+ PINMUX_DATA(PG20_DATA, PG20MD_000),
+ PINMUX_DATA(LCD_EXTCLK_MARK, PG20MD_001),
+ PINMUX_DATA(MISO1_MARK, PG20MD_011),
+ PINMUX_DATA(TXD7_MARK, PG20MD_100),
+
+ PINMUX_DATA(PG19_DATA, PG19MD_000),
+ PINMUX_DATA(LCD_CLK_MARK, PG19MD_001),
+ PINMUX_DATA(TIOC2B_MARK, PG19MD_010),
+ PINMUX_DATA(MISO1_PG19_MARK, PG19MD_011),
+ PINMUX_DATA(RXD7_MARK, PG19MD_100),
+
+ PINMUX_DATA(PG18_DATA, PG18MD_000),
+ PINMUX_DATA(LCD_DE_MARK, PG18MD_001),
+ PINMUX_DATA(TIOC2A_MARK, PG18MD_010),
+ PINMUX_DATA(SSL10_MARK, PG18MD_011),
+ PINMUX_DATA(TXD6_MARK, PG18MD_100),
+
+ PINMUX_DATA(PG17_DATA, PG17MD_000),
+ PINMUX_DATA(LCD_HSYNC_MARK, PG17MD_001),
+ PINMUX_DATA(TIOC1B_MARK, PG17MD_010),
+ PINMUX_DATA(RSPCK1_MARK, PG17MD_011),
+ PINMUX_DATA(RXD6_MARK, PG17MD_100),
+
+ PINMUX_DATA(PG16_DATA, PG16MD_000),
+ PINMUX_DATA(LCD_VSYNC_MARK, PG16MD_001),
+ PINMUX_DATA(TIOC1A_MARK, PG16MD_010),
+ PINMUX_DATA(TXD3_MARK, PG16MD_011),
+ PINMUX_DATA(CTS1_MARK, PG16MD_100),
+
+ PINMUX_DATA(PG15_DATA, PG15MD_000),
+ PINMUX_DATA(LCD_DATA15_MARK, PG15MD_001),
+ PINMUX_DATA(TIOC0D_MARK, PG15MD_010),
+ PINMUX_DATA(RXD3_MARK, PG15MD_011),
+ PINMUX_DATA(RTS1_MARK, PG15MD_100),
+
+ PINMUX_DATA(PG14_DATA, PG14MD_000),
+ PINMUX_DATA(LCD_DATA14_MARK, PG14MD_001),
+ PINMUX_DATA(TIOC0C_MARK, PG14MD_010),
+ PINMUX_DATA(SCK1_MARK, PG14MD_100),
+
+ PINMUX_DATA(PG13_DATA, PG13MD_000),
+ PINMUX_DATA(LCD_DATA13_MARK, PG13MD_001),
+ PINMUX_DATA(TIOC0B_MARK, PG13MD_010),
+ PINMUX_DATA(TXD1_MARK, PG13MD_100),
+
+ PINMUX_DATA(PG12_DATA, PG12MD_000),
+ PINMUX_DATA(LCD_DATA12_MARK, PG12MD_001),
+ PINMUX_DATA(TIOC0A_MARK, PG12MD_010),
+ PINMUX_DATA(RXD1_MARK, PG12MD_100),
+
+ PINMUX_DATA(PG11_DATA, PG11MD_000),
+ PINMUX_DATA(LCD_DATA11_MARK, PG11MD_001),
+ PINMUX_DATA(SSITXD0_MARK, PG11MD_010),
+ PINMUX_DATA(IRQ3_PG_MARK, PG11MD_011),
+ PINMUX_DATA(TXD5_MARK, PG11MD_100),
+ PINMUX_DATA(SIOFTXD_MARK, PG11MD_101),
+
+ PINMUX_DATA(PG10_DATA, PG10MD_000),
+ PINMUX_DATA(LCD_DATA10_MARK, PG10MD_001),
+ PINMUX_DATA(SSIRXD0_MARK, PG10MD_010),
+ PINMUX_DATA(IRQ2_PG_MARK, PG10MD_011),
+ PINMUX_DATA(RXD5_MARK, PG10MD_100),
+ PINMUX_DATA(SIOFRXD_MARK, PG10MD_101),
+
+ PINMUX_DATA(PG9_DATA, PG9MD_000),
+ PINMUX_DATA(LCD_DATA9_MARK, PG9MD_001),
+ PINMUX_DATA(SSIWS0_MARK, PG9MD_010),
+ PINMUX_DATA(TXD4_MARK, PG9MD_100),
+ PINMUX_DATA(SIOFSYNC_MARK, PG9MD_101),
+
+ PINMUX_DATA(PG8_DATA, PG8MD_000),
+ PINMUX_DATA(LCD_DATA8_MARK, PG8MD_001),
+ PINMUX_DATA(SSISCK0_MARK, PG8MD_010),
+ PINMUX_DATA(RXD4_MARK, PG8MD_100),
+ PINMUX_DATA(SIOFSCK_MARK, PG8MD_101),
+
+ PINMUX_DATA(PG7_DATA, PG7MD_00),
+ PINMUX_DATA(LCD_DATA7_MARK, PG7MD_01),
+ PINMUX_DATA(SD_CD_MARK, PG7MD_10),
+ PINMUX_DATA(PINT7_PG_MARK, PG7MD_11),
+
+ PINMUX_DATA(PG6_DATA, PG7MD_00),
+ PINMUX_DATA(LCD_DATA6_MARK, PG7MD_01),
+ PINMUX_DATA(SD_WP_MARK, PG7MD_10),
+ PINMUX_DATA(PINT6_PG_MARK, PG7MD_11),
+
+ PINMUX_DATA(PG5_DATA, PG5MD_00),
+ PINMUX_DATA(LCD_DATA5_MARK, PG5MD_01),
+ PINMUX_DATA(SD_D1_MARK, PG5MD_10),
+ PINMUX_DATA(PINT5_PG_MARK, PG5MD_11),
+
+ PINMUX_DATA(PG4_DATA, PG4MD_00),
+ PINMUX_DATA(LCD_DATA4_MARK, PG4MD_01),
+ PINMUX_DATA(SD_D0_MARK, PG4MD_10),
+ PINMUX_DATA(PINT4_PG_MARK, PG4MD_11),
+
+ PINMUX_DATA(PG3_DATA, PG3MD_00),
+ PINMUX_DATA(LCD_DATA3_MARK, PG3MD_01),
+ PINMUX_DATA(SD_CLK_MARK, PG3MD_10),
+ PINMUX_DATA(PINT3_PG_MARK, PG3MD_11),
+
+ PINMUX_DATA(PG2_DATA, PG2MD_00),
+ PINMUX_DATA(LCD_DATA2_MARK, PG2MD_01),
+ PINMUX_DATA(SD_CMD_MARK, PG2MD_10),
+ PINMUX_DATA(PINT2_PG_MARK, PG2MD_11),
+
+ PINMUX_DATA(PG1_DATA, PG1MD_00),
+ PINMUX_DATA(LCD_DATA1_MARK, PG1MD_01),
+ PINMUX_DATA(SD_D3_MARK, PG1MD_10),
+ PINMUX_DATA(PINT1_PG_MARK, PG1MD_11),
+
+ PINMUX_DATA(PG0_DATA, PG0MD_000),
+ PINMUX_DATA(LCD_DATA0_MARK, PG0MD_001),
+ PINMUX_DATA(SD_D2_MARK, PG0MD_010),
+ PINMUX_DATA(PINT0_PG_MARK, PG0MD_011),
+ PINMUX_DATA(WDTOVF_MARK, PG0MD_100),
+
+ /* Port H */
+ PINMUX_DATA(PH7_DATA, PH7MD_0),
+ PINMUX_DATA(PHAN7_MARK, PH7MD_1),
+
+ PINMUX_DATA(PH6_DATA, PH6MD_0),
+ PINMUX_DATA(PHAN6_MARK, PH6MD_1),
+
+ PINMUX_DATA(PH5_DATA, PH5MD_0),
+ PINMUX_DATA(PHAN5_MARK, PH5MD_1),
+
+ PINMUX_DATA(PH4_DATA, PH4MD_0),
+ PINMUX_DATA(PHAN4_MARK, PH4MD_1),
+
+ PINMUX_DATA(PH3_DATA, PH3MD_0),
+ PINMUX_DATA(PHAN3_MARK, PH3MD_1),
+
+ PINMUX_DATA(PH2_DATA, PH2MD_0),
+ PINMUX_DATA(PHAN2_MARK, PH2MD_1),
+
+ PINMUX_DATA(PH1_DATA, PH1MD_0),
+ PINMUX_DATA(PHAN1_MARK, PH1MD_1),
+
+ PINMUX_DATA(PH0_DATA, PH0MD_0),
+ PINMUX_DATA(PHAN0_MARK, PH0MD_1),
+
+ /* Port I - not on device */
+
+ /* Port J */
+ PINMUX_DATA(PJ11_DATA, PJ11MD_00),
+ PINMUX_DATA(PWM2H_MARK, PJ11MD_01),
+ PINMUX_DATA(DACK1_MARK, PJ11MD_10),
+
+ PINMUX_DATA(PJ10_DATA, PJ10MD_00),
+ PINMUX_DATA(PWM2G_MARK, PJ10MD_01),
+ PINMUX_DATA(DREQ1_MARK, PJ10MD_10),
+
+ PINMUX_DATA(PJ9_DATA, PJ9MD_00),
+ PINMUX_DATA(PWM2F_MARK, PJ9MD_01),
+ PINMUX_DATA(TEND1_MARK, PJ9MD_10),
+
+ PINMUX_DATA(PJ8_DATA, PJ8MD_00),
+ PINMUX_DATA(PWM2E_MARK, PJ8MD_01),
+ PINMUX_DATA(RTS3_MARK, PJ8MD_10),
+
+ PINMUX_DATA(PJ7_DATA, PJ7MD_00),
+ PINMUX_DATA(TIOC1B_MARK, PJ7MD_01),
+ PINMUX_DATA(CTS3_MARK, PJ7MD_10),
+
+ PINMUX_DATA(PJ6_DATA, PJ6MD_00),
+ PINMUX_DATA(TIOC1A_MARK, PJ6MD_01),
+ PINMUX_DATA(SCK3_MARK, PJ6MD_10),
+
+ PINMUX_DATA(PJ5_DATA, PJ5MD_00),
+ PINMUX_DATA(IERXD_MARK, PJ5MD_01),
+ PINMUX_DATA(TXD3_MARK, PJ5MD_10),
+
+ PINMUX_DATA(PJ4_DATA, PJ4MD_00),
+ PINMUX_DATA(IETXD_MARK, PJ4MD_01),
+ PINMUX_DATA(RXD3_MARK, PJ4MD_10),
+
+ PINMUX_DATA(PJ3_DATA, PJ3MD_00),
+ PINMUX_DATA(CRX1_MARK, PJ3MD_01),
+ PINMUX_DATA(CRX0X1_MARK, PJ3MD_10),
+ PINMUX_DATA(IRQ1_PJ_MARK, PJ3MD_11),
+
+ PINMUX_DATA(PJ2_DATA, PJ2MD_000),
+ PINMUX_DATA(CTX1_MARK, PJ2MD_001),
+ PINMUX_DATA(CRX0_CRX1_MARK, PJ2MD_010),
+ PINMUX_DATA(CS2_MARK, PJ2MD_011),
+ PINMUX_DATA(SCK0_MARK, PJ2MD_100),
+ PINMUX_DATA(LCD_M_DISP_MARK, PJ2MD_101),
+
+ PINMUX_DATA(PJ1_DATA, PJ1MD_000),
+ PINMUX_DATA(CRX0_MARK, PJ1MD_001),
+ PINMUX_DATA(IERXD_MARK, PJ1MD_010),
+ PINMUX_DATA(IRQ0_PJ_MARK, PJ1MD_011),
+ PINMUX_DATA(RXD0_MARK, PJ1MD_100),
+
+ PINMUX_DATA(PJ0_DATA, PJ0MD_000),
+ PINMUX_DATA(CTX0_MARK, PJ0MD_001),
+ PINMUX_DATA(IERXD_MARK, PJ0MD_010),
+ PINMUX_DATA(CS1_MARK, PJ0MD_011),
+ PINMUX_DATA(TXD0_MARK, PJ0MD_100),
+ PINMUX_DATA(A0_MARK, PJ0MD_101),
+
+ /* Port K */
+ PINMUX_DATA(PK11_DATA, PK11MD_00),
+ PINMUX_DATA(PWM2D_MARK, PK11MD_01),
+ PINMUX_DATA(SSITXD0_MARK, PK11MD_10),
+
+ PINMUX_DATA(PK10_DATA, PK10MD_00),
+ PINMUX_DATA(PWM2C_MARK, PK10MD_01),
+ PINMUX_DATA(SSIRXD0_MARK, PK10MD_10),
+
+ PINMUX_DATA(PK9_DATA, PK9MD_00),
+ PINMUX_DATA(PWM2B_MARK, PK9MD_01),
+ PINMUX_DATA(SSIWS0_MARK, PK9MD_10),
+
+ PINMUX_DATA(PK8_DATA, PK8MD_00),
+ PINMUX_DATA(PWM2A_MARK, PK8MD_01),
+ PINMUX_DATA(SSISCK0_MARK, PK8MD_10),
+
+ PINMUX_DATA(PK7_DATA, PK7MD_00),
+ PINMUX_DATA(PWM1H_MARK, PK7MD_01),
+ PINMUX_DATA(SD_CD_MARK, PK7MD_10),
+
+ PINMUX_DATA(PK6_DATA, PK6MD_00),
+ PINMUX_DATA(PWM1G_MARK, PK6MD_01),
+ PINMUX_DATA(SD_WP_MARK, PK6MD_10),
+
+ PINMUX_DATA(PK5_DATA, PK5MD_00),
+ PINMUX_DATA(PWM1F_MARK, PK5MD_01),
+ PINMUX_DATA(SD_D1_MARK, PK5MD_10),
+
+ PINMUX_DATA(PK4_DATA, PK4MD_00),
+ PINMUX_DATA(PWM1E_MARK, PK4MD_01),
+ PINMUX_DATA(SD_D0_MARK, PK4MD_10),
+
+ PINMUX_DATA(PK3_DATA, PK3MD_00),
+ PINMUX_DATA(PWM1D_MARK, PK3MD_01),
+ PINMUX_DATA(SD_CLK_MARK, PK3MD_10),
+
+ PINMUX_DATA(PK2_DATA, PK2MD_00),
+ PINMUX_DATA(PWM1C_MARK, PK2MD_01),
+ PINMUX_DATA(SD_CMD_MARK, PK2MD_10),
+
+ PINMUX_DATA(PK1_DATA, PK1MD_00),
+ PINMUX_DATA(PWM1B_MARK, PK1MD_01),
+ PINMUX_DATA(SD_D3_MARK, PK1MD_10),
+
+ PINMUX_DATA(PK0_DATA, PK0MD_00),
+ PINMUX_DATA(PWM1A_MARK, PK0MD_01),
+ PINMUX_DATA(SD_D2_MARK, PK0MD_10),
+};
+
+static struct pinmux_gpio pinmux_gpios[] = {
+
+ /* Port A */
+ PINMUX_GPIO(GPIO_PA3, PA3_DATA),
+ PINMUX_GPIO(GPIO_PA2, PA2_DATA),
+ PINMUX_GPIO(GPIO_PA1, PA1_DATA),
+ PINMUX_GPIO(GPIO_PA0, PA0_DATA),
+
+ /* Port B */
+ PINMUX_GPIO(GPIO_PB22, PB22_DATA),
+ PINMUX_GPIO(GPIO_PB21, PB21_DATA),
+ PINMUX_GPIO(GPIO_PB20, PB20_DATA),
+ PINMUX_GPIO(GPIO_PB19, PB19_DATA),
+ PINMUX_GPIO(GPIO_PB18, PB18_DATA),
+ PINMUX_GPIO(GPIO_PB17, PB17_DATA),
+ PINMUX_GPIO(GPIO_PB16, PB16_DATA),
+ PINMUX_GPIO(GPIO_PB15, PB15_DATA),
+ PINMUX_GPIO(GPIO_PB14, PB14_DATA),
+ PINMUX_GPIO(GPIO_PB13, PB13_DATA),
+ PINMUX_GPIO(GPIO_PB12, PB12_DATA),
+ PINMUX_GPIO(GPIO_PB11, PB11_DATA),
+ PINMUX_GPIO(GPIO_PB10, PB10_DATA),
+ PINMUX_GPIO(GPIO_PB9, PB9_DATA),
+ PINMUX_GPIO(GPIO_PB8, PB8_DATA),
+ PINMUX_GPIO(GPIO_PB7, PB7_DATA),
+ PINMUX_GPIO(GPIO_PB6, PB6_DATA),
+ PINMUX_GPIO(GPIO_PB5, PB5_DATA),
+ PINMUX_GPIO(GPIO_PB4, PB4_DATA),
+ PINMUX_GPIO(GPIO_PB3, PB3_DATA),
+ PINMUX_GPIO(GPIO_PB2, PB2_DATA),
+ PINMUX_GPIO(GPIO_PB1, PB1_DATA),
+
+ /* Port C */
+ PINMUX_GPIO(GPIO_PC10, PC10_DATA),
+ PINMUX_GPIO(GPIO_PC9, PC9_DATA),
+ PINMUX_GPIO(GPIO_PC8, PC8_DATA),
+ PINMUX_GPIO(GPIO_PC7, PC7_DATA),
+ PINMUX_GPIO(GPIO_PC6, PC6_DATA),
+ PINMUX_GPIO(GPIO_PC5, PC5_DATA),
+ PINMUX_GPIO(GPIO_PC4, PC4_DATA),
+ PINMUX_GPIO(GPIO_PC3, PC3_DATA),
+ PINMUX_GPIO(GPIO_PC2, PC2_DATA),
+ PINMUX_GPIO(GPIO_PC1, PC1_DATA),
+ PINMUX_GPIO(GPIO_PC0, PC0_DATA),
+
+ /* Port D */
+ PINMUX_GPIO(GPIO_PD15, PD15_DATA),
+ PINMUX_GPIO(GPIO_PD14, PD14_DATA),
+ PINMUX_GPIO(GPIO_PD13, PD13_DATA),
+ PINMUX_GPIO(GPIO_PD12, PD12_DATA),
+ PINMUX_GPIO(GPIO_PD11, PD11_DATA),
+ PINMUX_GPIO(GPIO_PD10, PD10_DATA),
+ PINMUX_GPIO(GPIO_PD9, PD9_DATA),
+ PINMUX_GPIO(GPIO_PD8, PD8_DATA),
+ PINMUX_GPIO(GPIO_PD7, PD7_DATA),
+ PINMUX_GPIO(GPIO_PD6, PD6_DATA),
+ PINMUX_GPIO(GPIO_PD5, PD5_DATA),
+ PINMUX_GPIO(GPIO_PD4, PD4_DATA),
+ PINMUX_GPIO(GPIO_PD3, PD3_DATA),
+ PINMUX_GPIO(GPIO_PD2, PD2_DATA),
+ PINMUX_GPIO(GPIO_PD1, PD1_DATA),
+ PINMUX_GPIO(GPIO_PD0, PD0_DATA),
+
+ /* Port E */
+ PINMUX_GPIO(GPIO_PE5, PE5_DATA),
+ PINMUX_GPIO(GPIO_PE4, PE4_DATA),
+ PINMUX_GPIO(GPIO_PE3, PE3_DATA),
+ PINMUX_GPIO(GPIO_PE2, PE2_DATA),
+ PINMUX_GPIO(GPIO_PE1, PE1_DATA),
+ PINMUX_GPIO(GPIO_PE0, PE0_DATA),
+
+ /* Port F */
+ PINMUX_GPIO(GPIO_PF12, PF12_DATA),
+ PINMUX_GPIO(GPIO_PF11, PF11_DATA),
+ PINMUX_GPIO(GPIO_PF10, PF10_DATA),
+ PINMUX_GPIO(GPIO_PF9, PF9_DATA),
+ PINMUX_GPIO(GPIO_PF8, PF8_DATA),
+ PINMUX_GPIO(GPIO_PF7, PF7_DATA),
+ PINMUX_GPIO(GPIO_PF6, PF6_DATA),
+ PINMUX_GPIO(GPIO_PF5, PF5_DATA),
+ PINMUX_GPIO(GPIO_PF4, PF4_DATA),
+ PINMUX_GPIO(GPIO_PF3, PF3_DATA),
+ PINMUX_GPIO(GPIO_PF2, PF2_DATA),
+ PINMUX_GPIO(GPIO_PF1, PF1_DATA),
+ PINMUX_GPIO(GPIO_PF0, PF0_DATA),
+
+ /* Port G */
+ PINMUX_GPIO(GPIO_PG24, PG24_DATA),
+ PINMUX_GPIO(GPIO_PG23, PG23_DATA),
+ PINMUX_GPIO(GPIO_PG22, PG22_DATA),
+ PINMUX_GPIO(GPIO_PG21, PG21_DATA),
+ PINMUX_GPIO(GPIO_PG20, PG20_DATA),
+ PINMUX_GPIO(GPIO_PG19, PG19_DATA),
+ PINMUX_GPIO(GPIO_PG18, PG18_DATA),
+ PINMUX_GPIO(GPIO_PG17, PG17_DATA),
+ PINMUX_GPIO(GPIO_PG16, PG16_DATA),
+ PINMUX_GPIO(GPIO_PG15, PG15_DATA),
+ PINMUX_GPIO(GPIO_PG14, PG14_DATA),
+ PINMUX_GPIO(GPIO_PG13, PG13_DATA),
+ PINMUX_GPIO(GPIO_PG12, PG12_DATA),
+ PINMUX_GPIO(GPIO_PG11, PG11_DATA),
+ PINMUX_GPIO(GPIO_PG10, PG10_DATA),
+ PINMUX_GPIO(GPIO_PG9, PG9_DATA),
+ PINMUX_GPIO(GPIO_PG8, PG8_DATA),
+ PINMUX_GPIO(GPIO_PG7, PG7_DATA),
+ PINMUX_GPIO(GPIO_PG6, PG6_DATA),
+ PINMUX_GPIO(GPIO_PG5, PG5_DATA),
+ PINMUX_GPIO(GPIO_PG4, PG4_DATA),
+ PINMUX_GPIO(GPIO_PG3, PG3_DATA),
+ PINMUX_GPIO(GPIO_PG2, PG2_DATA),
+ PINMUX_GPIO(GPIO_PG1, PG1_DATA),
+ PINMUX_GPIO(GPIO_PG0, PG0_DATA),
+
+ /* Port H - Port H does not have a Data Register */
+
+ /* Port I - not on device */
+
+ /* Port J */
+ PINMUX_GPIO(GPIO_PJ11, PJ11_DATA),
+ PINMUX_GPIO(GPIO_PJ10, PJ10_DATA),
+ PINMUX_GPIO(GPIO_PJ9, PJ9_DATA),
+ PINMUX_GPIO(GPIO_PJ8, PJ8_DATA),
+ PINMUX_GPIO(GPIO_PJ7, PJ7_DATA),
+ PINMUX_GPIO(GPIO_PJ6, PJ6_DATA),
+ PINMUX_GPIO(GPIO_PJ5, PJ5_DATA),
+ PINMUX_GPIO(GPIO_PJ4, PJ4_DATA),
+ PINMUX_GPIO(GPIO_PJ3, PJ3_DATA),
+ PINMUX_GPIO(GPIO_PJ2, PJ2_DATA),
+ PINMUX_GPIO(GPIO_PJ1, PJ1_DATA),
+ PINMUX_GPIO(GPIO_PJ0, PJ0_DATA),
+
+ /* Port K */
+ PINMUX_GPIO(GPIO_PK11, PK11_DATA),
+ PINMUX_GPIO(GPIO_PK10, PK10_DATA),
+ PINMUX_GPIO(GPIO_PK9, PK9_DATA),
+ PINMUX_GPIO(GPIO_PK8, PK8_DATA),
+ PINMUX_GPIO(GPIO_PK7, PK7_DATA),
+ PINMUX_GPIO(GPIO_PK6, PK6_DATA),
+ PINMUX_GPIO(GPIO_PK5, PK5_DATA),
+ PINMUX_GPIO(GPIO_PK4, PK4_DATA),
+ PINMUX_GPIO(GPIO_PK3, PK3_DATA),
+ PINMUX_GPIO(GPIO_PK2, PK2_DATA),
+ PINMUX_GPIO(GPIO_PK1, PK1_DATA),
+ PINMUX_GPIO(GPIO_PK0, PK0_DATA),
+
+ /* INTC */
+ PINMUX_GPIO(GPIO_FN_PINT7_PG, PINT7_PG_MARK),
+ PINMUX_GPIO(GPIO_FN_PINT6_PG, PINT6_PG_MARK),
+ PINMUX_GPIO(GPIO_FN_PINT5_PG, PINT5_PG_MARK),
+ PINMUX_GPIO(GPIO_FN_PINT4_PG, PINT4_PG_MARK),
+ PINMUX_GPIO(GPIO_FN_PINT3_PG, PINT3_PG_MARK),
+ PINMUX_GPIO(GPIO_FN_PINT2_PG, PINT2_PG_MARK),
+ PINMUX_GPIO(GPIO_FN_PINT1_PG, PINT1_PG_MARK),
+
+ PINMUX_GPIO(GPIO_FN_IRQ7_PC, IRQ7_PC_MARK),
+ PINMUX_GPIO(GPIO_FN_IRQ6_PC, IRQ6_PC_MARK),
+ PINMUX_GPIO(GPIO_FN_IRQ5_PC, IRQ5_PC_MARK),
+ PINMUX_GPIO(GPIO_FN_IRQ4_PC, IRQ4_PC_MARK),
+ PINMUX_GPIO(GPIO_FN_IRQ3_PG, IRQ3_PG_MARK),
+ PINMUX_GPIO(GPIO_FN_IRQ2_PG, IRQ2_PG_MARK),
+ PINMUX_GPIO(GPIO_FN_IRQ1_PJ, IRQ1_PJ_MARK),
+ PINMUX_GPIO(GPIO_FN_IRQ0_PJ, IRQ0_PJ_MARK),
+ PINMUX_GPIO(GPIO_FN_IRQ3_PE, IRQ3_PE_MARK),
+ PINMUX_GPIO(GPIO_FN_IRQ2_PE, IRQ2_PE_MARK),
+ PINMUX_GPIO(GPIO_FN_IRQ1_PE, IRQ1_PE_MARK),
+ PINMUX_GPIO(GPIO_FN_IRQ0_PE, IRQ0_PE_MARK),
+
+ /* WDT */
+ PINMUX_GPIO(GPIO_FN_WDTOVF, WDTOVF_MARK),
+
+ /* CAN */
+ PINMUX_GPIO(GPIO_FN_CTX1, CTX1_MARK),
+ PINMUX_GPIO(GPIO_FN_CRX1, CRX1_MARK),
+ PINMUX_GPIO(GPIO_FN_CTX0, CTX0_MARK),
+ PINMUX_GPIO(GPIO_FN_CRX0, CRX0_MARK),
+ PINMUX_GPIO(GPIO_FN_CRX0_CRX1, CRX0_CRX1_MARK),
+
+ /* DMAC */
+ PINMUX_GPIO(GPIO_FN_TEND0, TEND0_MARK),
+ PINMUX_GPIO(GPIO_FN_DACK0, DACK0_MARK),
+ PINMUX_GPIO(GPIO_FN_DREQ0, DREQ0_MARK),
+ PINMUX_GPIO(GPIO_FN_TEND1, TEND1_MARK),
+ PINMUX_GPIO(GPIO_FN_DACK1, DACK1_MARK),
+ PINMUX_GPIO(GPIO_FN_DREQ1, DREQ1_MARK),
+
+ /* ADC */
+ PINMUX_GPIO(GPIO_FN_ADTRG, ADTRG_MARK),
+
+ /* BSCh */
+ PINMUX_GPIO(GPIO_FN_A25, A25_MARK),
+ PINMUX_GPIO(GPIO_FN_A24, A24_MARK),
+ PINMUX_GPIO(GPIO_FN_A23, A23_MARK),
+ PINMUX_GPIO(GPIO_FN_A22, A22_MARK),
+ PINMUX_GPIO(GPIO_FN_A21, A21_MARK),
+ PINMUX_GPIO(GPIO_FN_A20, A20_MARK),
+ PINMUX_GPIO(GPIO_FN_A19, A19_MARK),
+ PINMUX_GPIO(GPIO_FN_A18, A18_MARK),
+ PINMUX_GPIO(GPIO_FN_A17, A17_MARK),
+ PINMUX_GPIO(GPIO_FN_A16, A16_MARK),
+ PINMUX_GPIO(GPIO_FN_A15, A15_MARK),
+ PINMUX_GPIO(GPIO_FN_A14, A14_MARK),
+ PINMUX_GPIO(GPIO_FN_A13, A13_MARK),
+ PINMUX_GPIO(GPIO_FN_A12, A12_MARK),
+ PINMUX_GPIO(GPIO_FN_A11, A11_MARK),
+ PINMUX_GPIO(GPIO_FN_A10, A10_MARK),
+ PINMUX_GPIO(GPIO_FN_A9, A9_MARK),
+ PINMUX_GPIO(GPIO_FN_A8, A8_MARK),
+ PINMUX_GPIO(GPIO_FN_A7, A7_MARK),
+ PINMUX_GPIO(GPIO_FN_A6, A6_MARK),
+ PINMUX_GPIO(GPIO_FN_A5, A5_MARK),
+ PINMUX_GPIO(GPIO_FN_A4, A4_MARK),
+ PINMUX_GPIO(GPIO_FN_A3, A3_MARK),
+ PINMUX_GPIO(GPIO_FN_A2, A2_MARK),
+ PINMUX_GPIO(GPIO_FN_A1, A1_MARK),
+ PINMUX_GPIO(GPIO_FN_A0, A0_MARK),
+
+ PINMUX_GPIO(GPIO_FN_D15, D15_MARK),
+ PINMUX_GPIO(GPIO_FN_D14, D14_MARK),
+ PINMUX_GPIO(GPIO_FN_D13, D13_MARK),
+ PINMUX_GPIO(GPIO_FN_D12, D12_MARK),
+ PINMUX_GPIO(GPIO_FN_D11, D11_MARK),
+ PINMUX_GPIO(GPIO_FN_D10, D10_MARK),
+ PINMUX_GPIO(GPIO_FN_D9, D9_MARK),
+ PINMUX_GPIO(GPIO_FN_D8, D8_MARK),
+ PINMUX_GPIO(GPIO_FN_D7, D7_MARK),
+ PINMUX_GPIO(GPIO_FN_D6, D6_MARK),
+ PINMUX_GPIO(GPIO_FN_D5, D5_MARK),
+ PINMUX_GPIO(GPIO_FN_D4, D4_MARK),
+ PINMUX_GPIO(GPIO_FN_D3, D3_MARK),
+ PINMUX_GPIO(GPIO_FN_D2, D2_MARK),
+ PINMUX_GPIO(GPIO_FN_D1, D1_MARK),
+ PINMUX_GPIO(GPIO_FN_D0, D0_MARK),
+
+ PINMUX_GPIO(GPIO_FN_BS, BS_MARK),
+ PINMUX_GPIO(GPIO_FN_CS4, CS4_MARK),
+ PINMUX_GPIO(GPIO_FN_CS3, CS3_MARK),
+ PINMUX_GPIO(GPIO_FN_CS2, CS2_MARK),
+ PINMUX_GPIO(GPIO_FN_CS1, CS1_MARK),
+ PINMUX_GPIO(GPIO_FN_CS0, CS0_MARK),
+ PINMUX_GPIO(GPIO_FN_CS6CE1B, CS6CE1B_MARK),
+ PINMUX_GPIO(GPIO_FN_CS5CE1A, CS5CE1A_MARK),
+ PINMUX_GPIO(GPIO_FN_CE2A, CE2A_MARK),
+ PINMUX_GPIO(GPIO_FN_CE2B, CE2B_MARK),
+ PINMUX_GPIO(GPIO_FN_RD, RD_MARK),
+ PINMUX_GPIO(GPIO_FN_RDWR, RDWR_MARK),
+ PINMUX_GPIO(GPIO_FN_ICIOWRAH, ICIOWRAH_MARK),
+ PINMUX_GPIO(GPIO_FN_ICIORD, ICIORD_MARK),
+ PINMUX_GPIO(GPIO_FN_WE1DQMUWE, WE1DQMUWE_MARK),
+ PINMUX_GPIO(GPIO_FN_WE0DQML, WE0DQML_MARK),
+ PINMUX_GPIO(GPIO_FN_RAS, RAS_MARK),
+ PINMUX_GPIO(GPIO_FN_CAS, CAS_MARK),
+ PINMUX_GPIO(GPIO_FN_CKE, CKE_MARK),
+ PINMUX_GPIO(GPIO_FN_WAIT, WAIT_MARK),
+ PINMUX_GPIO(GPIO_FN_BREQ, BREQ_MARK),
+ PINMUX_GPIO(GPIO_FN_BACK, BACK_MARK),
+ PINMUX_GPIO(GPIO_FN_IOIS16, IOIS16_MARK),
+
+ /* TMU */
+ PINMUX_GPIO(GPIO_FN_TIOC4D, TIOC4D_MARK),
+ PINMUX_GPIO(GPIO_FN_TIOC4C, TIOC4C_MARK),
+ PINMUX_GPIO(GPIO_FN_TIOC4B, TIOC4B_MARK),
+ PINMUX_GPIO(GPIO_FN_TIOC4A, TIOC4A_MARK),
+ PINMUX_GPIO(GPIO_FN_TIOC3D, TIOC3D_MARK),
+ PINMUX_GPIO(GPIO_FN_TIOC3C, TIOC3C_MARK),
+ PINMUX_GPIO(GPIO_FN_TIOC3B, TIOC3B_MARK),
+ PINMUX_GPIO(GPIO_FN_TIOC3A, TIOC3A_MARK),
+ PINMUX_GPIO(GPIO_FN_TIOC2B, TIOC2B_MARK),
+ PINMUX_GPIO(GPIO_FN_TIOC1B, TIOC1B_MARK),
+ PINMUX_GPIO(GPIO_FN_TIOC2A, TIOC2A_MARK),
+ PINMUX_GPIO(GPIO_FN_TIOC1A, TIOC1A_MARK),
+ PINMUX_GPIO(GPIO_FN_TIOC0D, TIOC0D_MARK),
+ PINMUX_GPIO(GPIO_FN_TIOC0C, TIOC0C_MARK),
+ PINMUX_GPIO(GPIO_FN_TIOC0B, TIOC0B_MARK),
+ PINMUX_GPIO(GPIO_FN_TIOC0A, TIOC0A_MARK),
+ PINMUX_GPIO(GPIO_FN_TCLKD, TCLKD_MARK),
+ PINMUX_GPIO(GPIO_FN_TCLKC, TCLKC_MARK),
+ PINMUX_GPIO(GPIO_FN_TCLKB, TCLKB_MARK),
+ PINMUX_GPIO(GPIO_FN_TCLKA, TCLKA_MARK),
+
+ /* SCIF */
+ PINMUX_GPIO(GPIO_FN_TXD0, TXD0_MARK),
+ PINMUX_GPIO(GPIO_FN_RXD0, RXD0_MARK),
+ PINMUX_GPIO(GPIO_FN_SCK0, SCK0_MARK),
+ PINMUX_GPIO(GPIO_FN_TXD1, TXD1_MARK),
+ PINMUX_GPIO(GPIO_FN_RXD1, RXD1_MARK),
+ PINMUX_GPIO(GPIO_FN_SCK1, SCK1_MARK),
+ PINMUX_GPIO(GPIO_FN_TXD2, TXD2_MARK),
+ PINMUX_GPIO(GPIO_FN_RXD2, RXD2_MARK),
+ PINMUX_GPIO(GPIO_FN_SCK2, SCK2_MARK),
+ PINMUX_GPIO(GPIO_FN_RTS3, RTS3_MARK),
+ PINMUX_GPIO(GPIO_FN_CTS3, CTS3_MARK),
+ PINMUX_GPIO(GPIO_FN_TXD3, TXD3_MARK),
+ PINMUX_GPIO(GPIO_FN_RXD3, RXD3_MARK),
+ PINMUX_GPIO(GPIO_FN_SCK3, SCK3_MARK),
+ PINMUX_GPIO(GPIO_FN_TXD4, TXD4_MARK),
+ PINMUX_GPIO(GPIO_FN_RXD4, RXD4_MARK),
+ PINMUX_GPIO(GPIO_FN_TXD5, TXD5_MARK),
+ PINMUX_GPIO(GPIO_FN_RXD5, RXD5_MARK),
+ PINMUX_GPIO(GPIO_FN_TXD6, TXD6_MARK),
+ PINMUX_GPIO(GPIO_FN_RXD6, RXD6_MARK),
+ PINMUX_GPIO(GPIO_FN_TXD7, TXD7_MARK),
+ PINMUX_GPIO(GPIO_FN_RXD7, RXD7_MARK),
+ PINMUX_GPIO(GPIO_FN_RTS1, RTS1_MARK),
+ PINMUX_GPIO(GPIO_FN_CTS1, CTS1_MARK),
+
+ /* RSPI */
+ PINMUX_GPIO(GPIO_FN_RSPCK0, RSPCK0_MARK),
+ PINMUX_GPIO(GPIO_FN_MOSI0, MOSI0_MARK),
+ PINMUX_GPIO(GPIO_FN_MISO0_PF12, MISO0_PF12_MARK),
+ PINMUX_GPIO(GPIO_FN_MISO1, MISO1_MARK),
+ PINMUX_GPIO(GPIO_FN_SSL00, SSL00_MARK),
+ PINMUX_GPIO(GPIO_FN_RSPCK1, RSPCK1_MARK),
+ PINMUX_GPIO(GPIO_FN_MOSI1, MOSI1_MARK),
+ PINMUX_GPIO(GPIO_FN_MISO1_PG19, MISO1_PG19_MARK),
+ PINMUX_GPIO(GPIO_FN_SSL10, SSL10_MARK),
+
+ /* IIC3 */
+ PINMUX_GPIO(GPIO_FN_SCL0, SCL0_MARK),
+ PINMUX_GPIO(GPIO_FN_SCL1, SCL1_MARK),
+ PINMUX_GPIO(GPIO_FN_SCL2, SCL2_MARK),
+ PINMUX_GPIO(GPIO_FN_SDA0, SDA0_MARK),
+ PINMUX_GPIO(GPIO_FN_SDA1, SDA1_MARK),
+ PINMUX_GPIO(GPIO_FN_SDA2, SDA2_MARK),
+
+ /* SSI */
+ PINMUX_GPIO(GPIO_FN_SSISCK0, SSISCK0_MARK),
+ PINMUX_GPIO(GPIO_FN_SSIWS0, SSIWS0_MARK),
+ PINMUX_GPIO(GPIO_FN_SSITXD0, SSITXD0_MARK),
+ PINMUX_GPIO(GPIO_FN_SSIRXD0, SSIRXD0_MARK),
+ PINMUX_GPIO(GPIO_FN_SSIWS1, SSIWS1_MARK),
+ PINMUX_GPIO(GPIO_FN_SSIWS2, SSIWS2_MARK),
+ PINMUX_GPIO(GPIO_FN_SSIWS3, SSIWS3_MARK),
+ PINMUX_GPIO(GPIO_FN_SSISCK1, SSISCK1_MARK),
+ PINMUX_GPIO(GPIO_FN_SSISCK2, SSISCK2_MARK),
+ PINMUX_GPIO(GPIO_FN_SSISCK3, SSISCK3_MARK),
+ PINMUX_GPIO(GPIO_FN_SSIDATA1, SSIDATA1_MARK),
+ PINMUX_GPIO(GPIO_FN_SSIDATA2, SSIDATA2_MARK),
+ PINMUX_GPIO(GPIO_FN_SSIDATA3, SSIDATA3_MARK),
+ PINMUX_GPIO(GPIO_FN_AUDIO_CLK, AUDIO_CLK_MARK),
+
+ /* SIOF */ /* NOTE Shares AUDIO_CLK with SSI */
+ PINMUX_GPIO(GPIO_FN_SIOFTXD, SIOFTXD_MARK),
+ PINMUX_GPIO(GPIO_FN_SIOFRXD, SIOFRXD_MARK),
+ PINMUX_GPIO(GPIO_FN_SIOFSYNC, SIOFSYNC_MARK),
+ PINMUX_GPIO(GPIO_FN_SIOFSCK, SIOFSCK_MARK),
+
+ /* SPDIF */ /* NOTE Shares AUDIO_CLK with SSI */
+ PINMUX_GPIO(GPIO_FN_SPDIF_IN, SPDIF_IN_MARK),
+ PINMUX_GPIO(GPIO_FN_SPDIF_OUT, SPDIF_OUT_MARK),
+
+ /* NANDFMC */ /* NOTE Controller is not available in boot mode 0 */
+ PINMUX_GPIO(GPIO_FN_FCE, FCE_MARK),
+ PINMUX_GPIO(GPIO_FN_FRB, FRB_MARK),
+
+ /* VDC3 */
+ PINMUX_GPIO(GPIO_FN_DV_CLK, DV_CLK_MARK),
+ PINMUX_GPIO(GPIO_FN_DV_VSYNC, DV_VSYNC_MARK),
+ PINMUX_GPIO(GPIO_FN_DV_HSYNC, DV_HSYNC_MARK),
+
+ PINMUX_GPIO(GPIO_FN_DV_DATA7, DV_DATA7_MARK),
+ PINMUX_GPIO(GPIO_FN_DV_DATA6, DV_DATA6_MARK),
+ PINMUX_GPIO(GPIO_FN_DV_DATA5, DV_DATA5_MARK),
+ PINMUX_GPIO(GPIO_FN_DV_DATA4, DV_DATA4_MARK),
+ PINMUX_GPIO(GPIO_FN_DV_DATA3, DV_DATA3_MARK),
+ PINMUX_GPIO(GPIO_FN_DV_DATA2, DV_DATA2_MARK),
+ PINMUX_GPIO(GPIO_FN_DV_DATA1, DV_DATA1_MARK),
+ PINMUX_GPIO(GPIO_FN_DV_DATA0, DV_DATA0_MARK),
+
+ PINMUX_GPIO(GPIO_FN_LCD_CLK, LCD_CLK_MARK),
+ PINMUX_GPIO(GPIO_FN_LCD_EXTCLK, LCD_EXTCLK_MARK),
+ PINMUX_GPIO(GPIO_FN_LCD_VSYNC, LCD_VSYNC_MARK),
+ PINMUX_GPIO(GPIO_FN_LCD_HSYNC, LCD_HSYNC_MARK),
+ PINMUX_GPIO(GPIO_FN_LCD_DE, LCD_DE_MARK),
+
+ PINMUX_GPIO(GPIO_FN_LCD_DATA15, LCD_DATA15_MARK),
+ PINMUX_GPIO(GPIO_FN_LCD_DATA14, LCD_DATA14_MARK),
+ PINMUX_GPIO(GPIO_FN_LCD_DATA13, LCD_DATA13_MARK),
+ PINMUX_GPIO(GPIO_FN_LCD_DATA12, LCD_DATA12_MARK),
+ PINMUX_GPIO(GPIO_FN_LCD_DATA11, LCD_DATA11_MARK),
+ PINMUX_GPIO(GPIO_FN_LCD_DATA10, LCD_DATA10_MARK),
+ PINMUX_GPIO(GPIO_FN_LCD_DATA9, LCD_DATA9_MARK),
+ PINMUX_GPIO(GPIO_FN_LCD_DATA8, LCD_DATA8_MARK),
+ PINMUX_GPIO(GPIO_FN_LCD_DATA7, LCD_DATA7_MARK),
+ PINMUX_GPIO(GPIO_FN_LCD_DATA6, LCD_DATA6_MARK),
+ PINMUX_GPIO(GPIO_FN_LCD_DATA5, LCD_DATA5_MARK),
+ PINMUX_GPIO(GPIO_FN_LCD_DATA4, LCD_DATA4_MARK),
+ PINMUX_GPIO(GPIO_FN_LCD_DATA3, LCD_DATA3_MARK),
+ PINMUX_GPIO(GPIO_FN_LCD_DATA2, LCD_DATA2_MARK),
+ PINMUX_GPIO(GPIO_FN_LCD_DATA1, LCD_DATA1_MARK),
+ PINMUX_GPIO(GPIO_FN_LCD_DATA0, LCD_DATA0_MARK),
+
+ PINMUX_GPIO(GPIO_FN_LCD_M_DISP, LCD_M_DISP_MARK),
+};
+
+static struct pinmux_cfg_reg pinmux_config_regs[] = {
+ { PINMUX_CFG_REG("PAIOR0", 0xfffe3812, 16, 1) {
+ 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0,
+ PA3_IN, PA3_OUT,
+ PA2_IN, PA2_OUT,
+ PA1_IN, PA1_OUT,
+ PA0_IN, PA0_OUT }
+ },
+
+ { PINMUX_CFG_REG("PBCR5", 0xfffe3824, 16, 4) {
+ 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0,
+ PB22MD_00, PB22MD_01, PB22MD_10, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0,
+ PB21MD_0, PB21MD_1, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, PB20MD_1, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0 }
+
+ },
+ { PINMUX_CFG_REG("PBCR4", 0xfffe3826, 16, 4) {
+ 0, PB19MD_01, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, PB18MD_01, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, PB17MD_01, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, PB16MD_01, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0 }
+ },
+ { PINMUX_CFG_REG("PBCR3", 0xfffe3828, 16, 4) {
+ 0, PB15MD_01, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, PB14MD_01, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, PB13MD_01, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, PB12MD_01, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0 }
+ },
+ { PINMUX_CFG_REG("PBCR2", 0xfffe382a, 16, 4) {
+ 0, PB11MD_01, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, PB10MD_01, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, PB9MD_01, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, PB8MD_01, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0 }
+ },
+ { PINMUX_CFG_REG("PBCR1", 0xfffe382c, 16, 4) {
+ 0, PB7MD_01, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, PB6MD_01, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, PB5MD_01, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, PB4MD_01, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0 }
+ },
+ { PINMUX_CFG_REG("PBCR0", 0xfffe382e, 16, 4) {
+ 0, PB3MD_1, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, PB2MD_1, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, PB1MD_1, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0 }
+ },
+
+ { PINMUX_CFG_REG("PBIOR1", 0xfffe3830, 16, 1) {
+ 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0,
+ PB22_IN, PB22_OUT,
+ PB21_IN, PB21_OUT,
+ PB20_IN, PB20_OUT,
+ PB19_IN, PB19_OUT,
+ PB18_IN, PB18_OUT,
+ PB17_IN, PB17_OUT,
+ PB16_IN, PB16_OUT }
+ },
+
+ { PINMUX_CFG_REG("PBIOR0", 0xfffe3832, 16, 1) {
+ PB15_IN, PB15_OUT,
+ PB14_IN, PB14_OUT,
+ PB13_IN, PB13_OUT,
+ PB12_IN, PB12_OUT,
+ PB11_IN, PB11_OUT,
+ PB10_IN, PB10_OUT,
+ PB9_IN, PB9_OUT,
+ PB8_IN, PB8_OUT,
+ PB7_IN, PB7_OUT,
+ PB6_IN, PB6_OUT,
+ PB5_IN, PB5_OUT,
+ PB4_IN, PB4_OUT,
+ PB3_IN, PB3_OUT,
+ PB2_IN, PB2_OUT,
+ PB1_IN, PB1_OUT,
+ 0, 0 }
+ },
+
+ { PINMUX_CFG_REG("PCCR2", 0xfffe384a, 16, 4) {
+ 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0,
+ PC10MD_0, PC10MD_1, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0,
+ PC9MD_0, PC9MD_1, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0,
+ PC8MD_00, PC8MD_01, PC8MD_10, PC8MD_11, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0 }
+ },
+ { PINMUX_CFG_REG("PCCR1", 0xfffe384c, 16, 4) {
+ PC7MD_00, PC7MD_01, PC7MD_10, PC7MD_11, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0,
+ PC6MD_00, PC6MD_01, PC6MD_10, PC6MD_11, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0,
+ PC5MD_00, PC5MD_01, PC5MD_10, PC5MD_11, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0,
+ PC4MD_0, PC4MD_1, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0 }
+ },
+ { PINMUX_CFG_REG("PCCR0", 0xfffe384e, 16, 4) {
+ PC3MD_0, PC3MD_1, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0,
+ PC2MD_0, PC2MD_1, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0,
+ PC1MD_0, PC1MD_1, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0,
+ PC0MD_0, PC0MD_1, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0 }
+ },
+
+ { PINMUX_CFG_REG("PCIOR0", 0xfffe3852, 16, 1) {
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ PC10_IN, PC10_OUT,
+ PC9_IN, PC9_OUT,
+ PC8_IN, PC8_OUT,
+ PC7_IN, PC7_OUT,
+ PC6_IN, PC6_OUT,
+ PC5_IN, PC5_OUT,
+ PC4_IN, PC4_OUT,
+ PC3_IN, PC3_OUT,
+ PC2_IN, PC2_OUT,
+ PC1_IN, PC1_OUT,
+ PC0_IN, PC0_OUT
+ }
+ },
+
+ { PINMUX_CFG_REG("PDCR3", 0xfffe3868, 16, 4) {
+ 0, PD15MD_01, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, PD14MD_01, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, PD13MD_01, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, PD12MD_01, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0 }
+ },
+ { PINMUX_CFG_REG("PDCR2", 0xfffe386a, 16, 4) {
+ 0, PD11MD_01, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, PD10MD_01, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, PD9MD_01, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, PD8MD_01, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0 }
+ },
+ { PINMUX_CFG_REG("PDCR1", 0xfffe386c, 16, 4) {
+ 0, PD7MD_01, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, PD6MD_01, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, PD5MD_01, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, PD4MD_01, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0 }
+ },
+ { PINMUX_CFG_REG("PDCR0", 0xfffe386e, 16, 4) {
+ 0, PD3MD_01, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, PD2MD_01, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, PD1MD_01, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, PD0MD_01, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0 }
+ },
+
+ { PINMUX_CFG_REG("PDIOR0", 0xfffe3872, 16, 1) {
+ PD15_IN, PD15_OUT,
+ PD14_IN, PD14_OUT,
+ PD13_IN, PD13_OUT,
+ PD12_IN, PD12_OUT,
+ PD11_IN, PD11_OUT,
+ PD10_IN, PD10_OUT,
+ PD9_IN, PD9_OUT,
+ PD8_IN, PD8_OUT,
+ PD7_IN, PD7_OUT,
+ PD6_IN, PD6_OUT,
+ PD5_IN, PD5_OUT,
+ PD4_IN, PD4_OUT,
+ PD3_IN, PD3_OUT,
+ PD2_IN, PD2_OUT,
+ PD1_IN, PD1_OUT,
+ PD0_IN, PD0_OUT }
+ },
+
+ { PINMUX_CFG_REG("PECR1", 0xfffe388c, 16, 4) {
+ 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0,
+ PE5MD_00, PE5MD_01, 0, PE5MD_11, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0,
+ PE4MD_00, PE4MD_01, 0, PE4MD_11, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0 }
+ },
+
+ { PINMUX_CFG_REG("PECR0", 0xfffe388e, 16, 4) {
+ PE3MD_00, PE3MD_01, 0, PE3MD_11, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0,
+ PE2MD_00, PE2MD_01, 0, PE2MD_11, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0,
+ PE1MD_000, PE1MD_001, PE1MD_010, PE1MD_011,
+ PE1MD_100, PE1MD_101, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0,
+ PE0MD_00, PE0MD_01, PE0MD_10, PE0MD_11, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0 }
+ },
+
+ { PINMUX_CFG_REG("PEIOR0", 0xfffe3892, 16, 1) {
+ 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0,
+ PE5_IN, PE5_OUT,
+ PE4_IN, PE4_OUT,
+ PE3_IN, PE3_OUT,
+ PE2_IN, PE2_OUT,
+ PE1_IN, PE1_OUT,
+ PE0_IN, PE0_OUT }
+ },
+
+ { PINMUX_CFG_REG("PFCR3", 0xfffe38a8, 16, 4) {
+ PF12MD_000, PF12MD_001, 0, PF12MD_011,
+ PF12MD_100, PF12MD_101, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0 }
+ },
+
+ { PINMUX_CFG_REG("PFCR2", 0xfffe38aa, 16, 4) {
+ PF11MD_000, PF11MD_001, PF11MD_010, PF11MD_011,
+ PF11MD_100, PF11MD_101, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0,
+ PF10MD_000, PF10MD_001, PF10MD_010, PF10MD_011,
+ PF10MD_100, PF10MD_101, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0,
+ PF9MD_000, PF9MD_001, PF9MD_010, PF9MD_011,
+ PF9MD_100, PF9MD_101, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0,
+ PF8MD_00, PF8MD_01, PF8MD_10, PF8MD_11, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0 }
+ },
+
+ { PINMUX_CFG_REG("PFCR1", 0xfffe38ac, 16, 4) {
+ PF7MD_000, PF7MD_001, PF7MD_010, PF7MD_011,
+ PF7MD_100, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0,
+ PF6MD_000, PF6MD_001, PF6MD_010, PF6MD_011,
+ PF6MD_100, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0,
+ PF5MD_000, PF5MD_001, PF5MD_010, PF5MD_011,
+ PF5MD_100, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0,
+ PF4MD_000, PF4MD_001, PF4MD_010, PF4MD_011,
+ PF4MD_100, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0 }
+ },
+
+ { PINMUX_CFG_REG("PFCR0", 0xfffe38ae, 16, 4) {
+ PF3MD_000, PF3MD_001, PF3MD_010, PF3MD_011,
+ PF3MD_100, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0,
+ PF2MD_000, PF2MD_001, PF2MD_010, PF2MD_011,
+ PF2MD_100, PF2MD_101, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0,
+ PF1MD_000, PF1MD_001, PF1MD_010, PF1MD_011,
+ PF1MD_100, PF1MD_101, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0
+ }
+ },
+
+ { PINMUX_CFG_REG("PFIOR0", 0xfffe38b2, 16, 1) {
+ 0, 0, 0, 0, 0, 0,
+ PF12_IN, PF12_OUT,
+ PF11_IN, PF11_OUT,
+ PF10_IN, PF10_OUT,
+ PF9_IN, PF9_OUT,
+ PF8_IN, PF8_OUT,
+ PF7_IN, PF7_OUT,
+ PF6_IN, PF6_OUT,
+ PF5_IN, PF5_OUT,
+ PF4_IN, PF4_OUT,
+ PF3_IN, PF3_OUT,
+ PF2_IN, PF2_OUT,
+ PF1_IN, PF1_OUT,
+ PF0_IN, PF0_OUT }
+ },
+
+ { PINMUX_CFG_REG("PGCR7", 0xfffe38c0, 16, 4) {
+ 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0,
+ PG0MD_000, PG0MD_001, PG0MD_010, PG0MD_011,
+ PG0MD_100, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0 }
+ },
+
+ { PINMUX_CFG_REG("PGCR6", 0xfffe38c2, 16, 4) {
+ 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0,
+ PG24MD_00, PG24MD_01, PG24MD_10, PG24MD_11, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0 }
+ },
+
+ { PINMUX_CFG_REG("PGCR5", 0xfffe38c4, 16, 4) {
+ PG23MD_00, PG23MD_01, PG23MD_10, PG23MD_11, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0,
+ PG22MD_00, PG22MD_01, PG22MD_10, PG22MD_11, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0,
+ PG21MD_00, PG21MD_01, PG21MD_10, PG21MD_11, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0,
+ PG20MD_000, PG20MD_001, PG20MD_010, PG20MD_011,
+ PG20MD_100, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0 }
+ },
+
+ { PINMUX_CFG_REG("PGCR4", 0xfffe38c6, 16, 4) {
+ PG19MD_000, PG19MD_001, PG19MD_010, PG19MD_011,
+ PG19MD_100, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0,
+ PG18MD_000, PG18MD_001, PG18MD_010, PG18MD_011,
+ PG18MD_100, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0,
+ PG17MD_000, PG17MD_001, PG17MD_010, PG17MD_011,
+ PG17MD_100, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0,
+ PG16MD_000, PG16MD_001, PG16MD_010, PG16MD_011,
+ PG16MD_100, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0 }
+ },
+
+ { PINMUX_CFG_REG("PGCR3", 0xfffe38c8, 16, 4) {
+ PG15MD_000, PG15MD_001, PG15MD_010, PG15MD_011,
+ PG15MD_100, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0,
+ PG14MD_000, PG14MD_001, PG14MD_010, 0,
+ PG14MD_100, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0,
+ PG13MD_000, PG13MD_001, PG13MD_010, 0,
+ PG13MD_100, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0,
+ PG12MD_000, PG12MD_001, PG12MD_010, 0,
+ PG12MD_100, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0 }
+ },
+ { PINMUX_CFG_REG("PGCR2", 0xfffe38ca, 16, 4) {
+ PG11MD_000, PG11MD_001, PG11MD_010, PG11MD_011,
+ PG11MD_100, PG11MD_101, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0,
+ PG10MD_000, PG10MD_001, PG10MD_010, PG10MD_011,
+ PG10MD_100, PG10MD_101, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0,
+ PG9MD_000, PG9MD_001, PG9MD_010, PG9MD_011,
+ PG9MD_100, PG9MD_101, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0,
+ PG8MD_000, PG8MD_001, PG8MD_010, PG8MD_011,
+ PG8MD_100, PG8MD_101, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0 }
+ },
+
+ { PINMUX_CFG_REG("PGCR1", 0xfffe38cc, 16, 4) {
+ PG7MD_00, PG7MD_01, PG7MD_10, PG7MD_11, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0,
+ PG6MD_00, PG6MD_01, PG6MD_10, PG6MD_11, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0,
+ PG5MD_00, PG5MD_01, PG5MD_10, PG5MD_11, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0,
+ PG4MD_00, PG4MD_01, PG4MD_10, PG4MD_11, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0 }
+ },
+ { PINMUX_CFG_REG("PGCR0", 0xfffe38ce, 16, 4) {
+ PG3MD_00, PG3MD_01, PG3MD_10, PG3MD_11, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0,
+ PG2MD_00, PG2MD_01, PG2MD_10, PG2MD_11, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0,
+ PG1MD_00, PG1MD_01, PG1MD_10, PG1MD_11, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0 }
+ },
+ { PINMUX_CFG_REG("PGIOR1", 0xfffe38d0, 16, 1) {
+ 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0,
+ PG24_IN, PG24_OUT,
+ PG23_IN, PG23_OUT,
+ PG22_IN, PG22_OUT,
+ PG21_IN, PG21_OUT,
+ PG20_IN, PG20_OUT,
+ PG19_IN, PG19_OUT,
+ PG18_IN, PG18_OUT,
+ PG17_IN, PG17_OUT,
+ PG16_IN, PG16_OUT }
+ },
+
+ { PINMUX_CFG_REG("PGIOR0", 0xfffe38d2, 16, 1) {
+ PG15_IN, PG15_OUT,
+ PG14_IN, PG14_OUT,
+ PG13_IN, PG13_OUT,
+ PG12_IN, PG12_OUT,
+ PG11_IN, PG11_OUT,
+ PG10_IN, PG10_OUT,
+ PG9_IN, PG9_OUT,
+ PG8_IN, PG8_OUT,
+ PG7_IN, PG7_OUT,
+ PG6_IN, PG6_OUT,
+ PG5_IN, PG5_OUT,
+ PG4_IN, PG4_OUT,
+ PG3_IN, PG3_OUT,
+ PG2_IN, PG2_OUT,
+ PG1_IN, PG1_OUT,
+ PG0_IN, PG0_OUT
+ }
+ },
+
+ { PINMUX_CFG_REG("PHCR1", 0xfffe38ec, 16, 4) {
+ PH7MD_0, PH7MD_1, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0,
+ PH6MD_0, PH6MD_1, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0,
+ PH5MD_0, PH5MD_1, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0,
+ PH4MD_0, PH4MD_1, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0 }
+ },
+
+ { PINMUX_CFG_REG("PHCR0", 0xfffe38ee, 16, 4) {
+ PH3MD_0, PH3MD_1, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0,
+ PH2MD_0, PH2MD_1, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0,
+ PH1MD_0, PH1MD_1, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0,
+ PH0MD_0, PH0MD_1, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0 }
+ },
+
+ { PINMUX_CFG_REG("PJCR2", 0xfffe390a, 16, 4) {
+ PJ11MD_00, PJ11MD_01, PJ11MD_10, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0,
+ PJ10MD_00, PJ10MD_01, PJ10MD_10, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0,
+ PJ9MD_00, PJ9MD_01, PJ9MD_10, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0,
+ PJ8MD_00, PJ8MD_01, PJ8MD_10, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0 }
+ },
+ { PINMUX_CFG_REG("PJCR1", 0xfffe390c, 16, 4) {
+ PJ7MD_00, PJ7MD_01, PJ7MD_10, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0,
+ PJ6MD_00, PJ6MD_01, PJ6MD_10, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0,
+ PJ5MD_00, PJ5MD_01, PJ5MD_10, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0,
+ PJ4MD_00, PJ4MD_01, PJ4MD_10, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0 }
+ },
+ { PINMUX_CFG_REG("PJCR0", 0xfffe390e, 16, 4) {
+ PJ3MD_00, PJ3MD_01, PJ3MD_10, PJ3MD_11, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0,
+ PJ2MD_000, PJ2MD_001, PJ2MD_010, PJ2MD_011,
+ PJ2MD_100, PJ2MD_101, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0,
+ PJ1MD_000, PJ1MD_001, PJ1MD_010, PJ1MD_011,
+ PJ1MD_100, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0,
+ PJ0MD_000, PJ0MD_001, PJ0MD_010, PJ0MD_011,
+ PJ0MD_100, PJ0MD_101, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, }
+ },
+ { PINMUX_CFG_REG("PJIOR0", 0xfffe3912, 16, 1) {
+ 0, 0, 0, 0, 0, 0, 0, 0,
+ PJ11_IN, PJ11_OUT,
+ PJ10_IN, PJ10_OUT,
+ PJ9_IN, PJ9_OUT,
+ PJ8_IN, PJ8_OUT,
+ PJ7_IN, PJ7_OUT,
+ PJ6_IN, PJ6_OUT,
+ PJ5_IN, PJ5_OUT,
+ PJ4_IN, PJ4_OUT,
+ PJ3_IN, PJ3_OUT,
+ PJ2_IN, PJ2_OUT,
+ PJ1_IN, PJ1_OUT,
+ PJ0_IN, PJ0_OUT }
+ },
+
+ { PINMUX_CFG_REG("PKCR2", 0xfffe392a, 16, 4) {
+ PK11MD_00, PK11MD_01, PK11MD_10, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0,
+ PK10MD_00, PK10MD_01, PK10MD_10, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0,
+ PK9MD_00, PK9MD_01, PK9MD_10, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0,
+ PK8MD_00, PK8MD_01, PK8MD_10, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0 }
+ },
+
+ { PINMUX_CFG_REG("PKCR1", 0xfffe392c, 16, 4) {
+ PK7MD_00, PK7MD_01, PK7MD_10, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0,
+ PK6MD_00, PK6MD_01, PK6MD_10, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0,
+ PK5MD_00, PK5MD_01, PK5MD_10, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0,
+ PK4MD_00, PK4MD_01, PK4MD_10, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0 }
+ },
+ { PINMUX_CFG_REG("PKCR0", 0xfffe392e, 16, 4) {
+ PK3MD_00, PK3MD_01, PK3MD_10, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0,
+ PK2MD_00, PK2MD_01, PK2MD_10, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0,
+ PK1MD_00, PK1MD_01, PK1MD_10, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0,
+ PK0MD_00, PK0MD_01, PK0MD_10, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0 }
+ },
+
+ { PINMUX_CFG_REG("PKIOR0", 0xfffe3932, 16, 1) {
+ 0, 0, 0, 0, 0, 0, 0, 0,
+ PJ11_IN, PJ11_OUT,
+ PJ10_IN, PJ10_OUT,
+ PJ9_IN, PJ9_OUT,
+ PJ8_IN, PJ8_OUT,
+ PJ7_IN, PJ7_OUT,
+ PJ6_IN, PJ6_OUT,
+ PJ5_IN, PJ5_OUT,
+ PJ4_IN, PJ4_OUT,
+ PJ3_IN, PJ3_OUT,
+ PJ2_IN, PJ2_OUT,
+ PJ1_IN, PJ1_OUT,
+ PJ0_IN, PJ0_OUT }
+ },
+ {}
+};
+
+static struct pinmux_data_reg pinmux_data_regs[] = {
+ { PINMUX_DATA_REG("PADR1", 0xfffe3814, 16) {
+ 0, 0, 0, 0, 0, 0, 0, PA3_DATA,
+ 0, 0, 0, 0, 0, 0, 0, PA2_DATA }
+ },
+
+ { PINMUX_DATA_REG("PADR0", 0xfffe3816, 16) {
+ 0, 0, 0, 0, 0, 0, 0, PA1_DATA,
+ 0, 0, 0, 0, 0, 0, 0, PA0_DATA }
+ },
+
+ { PINMUX_DATA_REG("PBDR1", 0xfffe3834, 16) {
+ 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, PB22_DATA, PB21_DATA, PB20_DATA,
+ PB19_DATA, PB18_DATA, PB17_DATA, PB16_DATA }
+ },
+
+ { PINMUX_DATA_REG("PBDR0", 0xfffe3836, 16) {
+ PB15_DATA, PB14_DATA, PB13_DATA, PB12_DATA,
+ PB11_DATA, PB10_DATA, PB9_DATA, PB8_DATA,
+ PB7_DATA, PB6_DATA, PB5_DATA, PB4_DATA,
+ PB3_DATA, PB2_DATA, PB1_DATA, 0 }
+ },
+
+ { PINMUX_DATA_REG("PCDR0", 0xfffe3856, 16) {
+ 0, 0, 0, 0,
+ 0, PC10_DATA, PC9_DATA, PC8_DATA,
+ PC7_DATA, PC6_DATA, PC5_DATA, PC4_DATA,
+ PC3_DATA, PC2_DATA, PC1_DATA, PC0_DATA }
+ },
+
+ { PINMUX_DATA_REG("PDDR0", 0xfffe3876, 16) {
+ PD15_DATA, PD14_DATA, PD13_DATA, PD12_DATA,
+ PD11_DATA, PD10_DATA, PD9_DATA, PD8_DATA,
+ PD7_DATA, PD6_DATA, PD5_DATA, PD4_DATA,
+ PD3_DATA, PD2_DATA, PD1_DATA, PD0_DATA }
+ },
+
+ { PINMUX_DATA_REG("PEDR0", 0xfffe3896, 16) {
+ 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, PE5_DATA, PE4_DATA,
+ PE3_DATA, PE2_DATA, PE1_DATA, PE0_DATA }
+ },
+
+ { PINMUX_DATA_REG("PFDR0", 0xfffe38b6, 16) {
+ 0, 0, 0, PF12_DATA,
+ PF11_DATA, PF10_DATA, PF9_DATA, PF8_DATA,
+ PF7_DATA, PF6_DATA, PF5_DATA, PF4_DATA,
+ PF3_DATA, PF2_DATA, PF1_DATA, PF0_DATA }
+ },
+
+ { PINMUX_DATA_REG("PGDR1", 0xfffe38d4, 16) {
+ 0, 0, 0, 0, 0, 0, 0, PG24_DATA,
+ PG23_DATA, PG22_DATA, PG21_DATA, PG20_DATA,
+ PG19_DATA, PG18_DATA, PG17_DATA, PG16_DATA }
+ },
+
+ { PINMUX_DATA_REG("PGDR0", 0xfffe38d6, 16) {
+ PG15_DATA, PG14_DATA, PG13_DATA, PG12_DATA,
+ PG11_DATA, PG10_DATA, PG9_DATA, PG8_DATA,
+ PG7_DATA, PG6_DATA, PG5_DATA, PG4_DATA,
+ PG3_DATA, PG2_DATA, PG1_DATA, PG0_DATA }
+ },
+ { PINMUX_DATA_REG("PJDR0", 0xfffe3916, 16) {
+ 0, 0, 0, PJ12_DATA,
+ PJ11_DATA, PJ10_DATA, PJ9_DATA, PJ8_DATA,
+ PJ7_DATA, PJ6_DATA, PJ5_DATA, PJ4_DATA,
+ PJ3_DATA, PJ2_DATA, PJ1_DATA, PJ0_DATA }
+ },
+ { PINMUX_DATA_REG("PKDR0", 0xfffe3936, 16) {
+ 0, 0, 0, PK12_DATA,
+ PK11_DATA, PK10_DATA, PK9_DATA, PK8_DATA,
+ PK7_DATA, PK6_DATA, PK5_DATA, PK4_DATA,
+ PK3_DATA, PK2_DATA, PK1_DATA, PK0_DATA }
+ },
+ { }
+};
+
+struct sh_pfc_soc_info sh7264_pinmux_info = {
+ .name = "sh7264_pfc",
+ .reserved_id = PINMUX_RESERVED,
+ .data = { PINMUX_DATA_BEGIN, PINMUX_DATA_END },
+ .input = { PINMUX_INPUT_BEGIN, PINMUX_INPUT_END, FORCE_IN },
+ .output = { PINMUX_OUTPUT_BEGIN, PINMUX_OUTPUT_END, FORCE_OUT },
+ .mark = { PINMUX_MARK_BEGIN, PINMUX_MARK_END },
+ .function = { PINMUX_FUNCTION_BEGIN, PINMUX_FUNCTION_END },
+
+ .first_gpio = GPIO_PA3,
+ .last_gpio = GPIO_FN_LCD_M_DISP,
+
+ .gpios = pinmux_gpios,
+ .cfg_regs = pinmux_config_regs,
+ .data_regs = pinmux_data_regs,
+
+ .gpio_data = pinmux_data,
+ .gpio_data_size = ARRAY_SIZE(pinmux_data),
+};
diff --git a/drivers/pinctrl/sh-pfc/pfc-sh7269.c b/drivers/pinctrl/sh-pfc/pfc-sh7269.c
new file mode 100644
index 000000000000..b1b5d6d4ad76
--- /dev/null
+++ b/drivers/pinctrl/sh-pfc/pfc-sh7269.c
@@ -0,0 +1,2834 @@
+/*
+ * SH7269 Pinmux
+ *
+ * Copyright (C) 2012 Renesas Electronics Europe Ltd
+ * Copyright (C) 2012 Phil Edworthy
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ */
+
+#include <linux/kernel.h>
+#include <linux/gpio.h>
+#include <cpu/sh7269.h>
+
+#include "sh_pfc.h"
+
+enum {
+ PINMUX_RESERVED = 0,
+
+ PINMUX_DATA_BEGIN,
+ /* Port A */
+ PA1_DATA, PA0_DATA,
+ /* Port B */
+ PB22_DATA, PB21_DATA, PB20_DATA,
+ PB19_DATA, PB18_DATA, PB17_DATA, PB16_DATA,
+ PB15_DATA, PB14_DATA, PB13_DATA, PB12_DATA,
+ PB11_DATA, PB10_DATA, PB9_DATA, PB8_DATA,
+ PB7_DATA, PB6_DATA, PB5_DATA, PB4_DATA,
+ PB3_DATA, PB2_DATA, PB1_DATA,
+ /* Port C */
+ PC8_DATA,
+ PC7_DATA, PC6_DATA, PC5_DATA, PC4_DATA,
+ PC3_DATA, PC2_DATA, PC1_DATA, PC0_DATA,
+ /* Port D */
+ PD15_DATA, PD14_DATA, PD13_DATA, PD12_DATA,
+ PD11_DATA, PD10_DATA, PD9_DATA, PD8_DATA,
+ PD7_DATA, PD6_DATA, PD5_DATA, PD4_DATA,
+ PD3_DATA, PD2_DATA, PD1_DATA, PD0_DATA,
+ /* Port E */
+ PE7_DATA, PE6_DATA, PE5_DATA, PE4_DATA,
+ PE3_DATA, PE2_DATA, PE1_DATA, PE0_DATA,
+ /* Port F */
+ PF23_DATA, PF22_DATA, PF21_DATA, PF20_DATA,
+ PF19_DATA, PF18_DATA, PF17_DATA, PF16_DATA,
+ PF15_DATA, PF14_DATA, PF13_DATA, PF12_DATA,
+ PF11_DATA, PF10_DATA, PF9_DATA, PF8_DATA,
+ PF7_DATA, PF6_DATA, PF5_DATA, PF4_DATA,
+ PF3_DATA, PF2_DATA, PF1_DATA, PF0_DATA,
+ /* Port G */
+ PG27_DATA, PG26_DATA, PG25_DATA, PG24_DATA,
+ PG23_DATA, PG22_DATA, PG21_DATA, PG20_DATA,
+ PG19_DATA, PG18_DATA, PG17_DATA, PG16_DATA,
+ PG15_DATA, PG14_DATA, PG13_DATA, PG12_DATA,
+ PG11_DATA, PG10_DATA, PG9_DATA, PG8_DATA,
+ PG7_DATA, PG6_DATA, PG5_DATA, PG4_DATA,
+ PG3_DATA, PG2_DATA, PG1_DATA, PG0_DATA,
+ /* Port H */
+ /* NOTE - Port H does not have a Data Register, but PH Data is
+ connected to PH Port Register */
+ PH7_DATA, PH6_DATA, PH5_DATA, PH4_DATA,
+ PH3_DATA, PH2_DATA, PH1_DATA, PH0_DATA,
+ /* Port I - not on device */
+ /* Port J */
+ PJ31_DATA, PJ30_DATA, PJ29_DATA, PJ28_DATA,
+ PJ27_DATA, PJ26_DATA, PJ25_DATA, PJ24_DATA,
+ PJ23_DATA, PJ22_DATA, PJ21_DATA, PJ20_DATA,
+ PJ19_DATA, PJ18_DATA, PJ17_DATA, PJ16_DATA,
+ PJ15_DATA, PJ14_DATA, PJ13_DATA, PJ12_DATA,
+ PJ11_DATA, PJ10_DATA, PJ9_DATA, PJ8_DATA,
+ PJ7_DATA, PJ6_DATA, PJ5_DATA, PJ4_DATA,
+ PJ3_DATA, PJ2_DATA, PJ1_DATA, PJ0_DATA,
+ PINMUX_DATA_END,
+
+ PINMUX_INPUT_BEGIN,
+ FORCE_IN,
+ /* Port A */
+ PA1_IN, PA0_IN,
+ /* Port B */
+ PB22_IN, PB21_IN, PB20_IN,
+ PB19_IN, PB18_IN, PB17_IN, PB16_IN,
+ PB15_IN, PB14_IN, PB13_IN, PB12_IN,
+ PB11_IN, PB10_IN, PB9_IN, PB8_IN,
+ PB7_IN, PB6_IN, PB5_IN, PB4_IN,
+ PB3_IN, PB2_IN, PB1_IN,
+ /* Port C */
+ PC8_IN,
+ PC7_IN, PC6_IN, PC5_IN, PC4_IN,
+ PC3_IN, PC2_IN, PC1_IN, PC0_IN,
+ /* Port D */
+ PD15_IN, PD14_IN, PD13_IN, PD12_IN,
+ PD11_IN, PD10_IN, PD9_IN, PD8_IN,
+ PD7_IN, PD6_IN, PD5_IN, PD4_IN,
+ PD3_IN, PD2_IN, PD1_IN, PD0_IN,
+ /* Port E */
+ PE7_IN, PE6_IN, PE5_IN, PE4_IN,
+ PE3_IN, PE2_IN, PE1_IN, PE0_IN,
+ /* Port F */
+ PF23_IN, PF22_IN, PF21_IN, PF20_IN,
+ PF19_IN, PF18_IN, PF17_IN, PF16_IN,
+ PF15_IN, PF14_IN, PF13_IN, PF12_IN,
+ PF11_IN, PF10_IN, PF9_IN, PF8_IN,
+ PF7_IN, PF6_IN, PF5_IN, PF4_IN,
+ PF3_IN, PF2_IN, PF1_IN, PF0_IN,
+ /* Port G */
+ PG27_IN, PG26_IN, PG25_IN, PG24_IN,
+ PG23_IN, PG22_IN, PG21_IN, PG20_IN,
+ PG19_IN, PG18_IN, PG17_IN, PG16_IN,
+ PG15_IN, PG14_IN, PG13_IN, PG12_IN,
+ PG11_IN, PG10_IN, PG9_IN, PG8_IN,
+ PG7_IN, PG6_IN, PG5_IN, PG4_IN,
+ PG3_IN, PG2_IN, PG1_IN, PG0_IN,
+ /* Port H - Port H does not have a Data Register */
+ /* Port I - not on device */
+ /* Port J */
+ PJ31_IN, PJ30_IN, PJ29_IN, PJ28_IN,
+ PJ27_IN, PJ26_IN, PJ25_IN, PJ24_IN,
+ PJ23_IN, PJ22_IN, PJ21_IN, PJ20_IN,
+ PJ19_IN, PJ18_IN, PJ17_IN, PJ16_IN,
+ PJ15_IN, PJ14_IN, PJ13_IN, PJ12_IN,
+ PJ11_IN, PJ10_IN, PJ9_IN, PJ8_IN,
+ PJ7_IN, PJ6_IN, PJ5_IN, PJ4_IN,
+ PJ3_IN, PJ2_IN, PJ1_IN, PJ0_IN,
+ PINMUX_INPUT_END,
+
+ PINMUX_OUTPUT_BEGIN,
+ FORCE_OUT,
+ /* Port A */
+ PA1_OUT, PA0_OUT,
+ /* Port B */
+ PB22_OUT, PB21_OUT, PB20_OUT,
+ PB19_OUT, PB18_OUT, PB17_OUT, PB16_OUT,
+ PB15_OUT, PB14_OUT, PB13_OUT, PB12_OUT,
+ PB11_OUT, PB10_OUT, PB9_OUT, PB8_OUT,
+ PB7_OUT, PB6_OUT, PB5_OUT, PB4_OUT,
+ PB3_OUT, PB2_OUT, PB1_OUT,
+ /* Port C */
+ PC8_OUT,
+ PC7_OUT, PC6_OUT, PC5_OUT, PC4_OUT,
+ PC3_OUT, PC2_OUT, PC1_OUT, PC0_OUT,
+ /* Port D */
+ PD15_OUT, PD14_OUT, PD13_OUT, PD12_OUT,
+ PD11_OUT, PD10_OUT, PD9_OUT, PD8_OUT,
+ PD7_OUT, PD6_OUT, PD5_OUT, PD4_OUT,
+ PD3_OUT, PD2_OUT, PD1_OUT, PD0_OUT,
+ /* Port E */
+ PE7_OUT, PE6_OUT, PE5_OUT, PE4_OUT,
+ PE3_OUT, PE2_OUT, PE1_OUT, PE0_OUT,
+ /* Port F */
+ PF23_OUT, PF22_OUT, PF21_OUT, PF20_OUT,
+ PF19_OUT, PF18_OUT, PF17_OUT, PF16_OUT,
+ PF15_OUT, PF14_OUT, PF13_OUT, PF12_OUT,
+ PF11_OUT, PF10_OUT, PF9_OUT, PF8_OUT,
+ PF7_OUT, PF6_OUT, PF5_OUT, PF4_OUT,
+ PF3_OUT, PF2_OUT, PF1_OUT, PF0_OUT,
+ /* Port G */
+ PG27_OUT, PG26_OUT, PG25_OUT, PG24_OUT,
+ PG23_OUT, PG22_OUT, PG21_OUT, PG20_OUT,
+ PG19_OUT, PG18_OUT, PG17_OUT, PG16_OUT,
+ PG15_OUT, PG14_OUT, PG13_OUT, PG12_OUT,
+ PG11_OUT, PG10_OUT, PG9_OUT, PG8_OUT,
+ PG7_OUT, PG6_OUT, PG5_OUT, PG4_OUT,
+ PG3_OUT, PG2_OUT, PG1_OUT, PG0_OUT,
+ /* Port H - Port H does not have a Data Register */
+ /* Port I - not on device */
+ /* Port J */
+ PJ31_OUT, PJ30_OUT, PJ29_OUT, PJ28_OUT,
+ PJ27_OUT, PJ26_OUT, PJ25_OUT, PJ24_OUT,
+ PJ23_OUT, PJ22_OUT, PJ21_OUT, PJ20_OUT,
+ PJ19_OUT, PJ18_OUT, PJ17_OUT, PJ16_OUT,
+ PJ15_OUT, PJ14_OUT, PJ13_OUT, PJ12_OUT,
+ PJ11_OUT, PJ10_OUT, PJ9_OUT, PJ8_OUT,
+ PJ7_OUT, PJ6_OUT, PJ5_OUT, PJ4_OUT,
+ PJ3_OUT, PJ2_OUT, PJ1_OUT, PJ0_OUT,
+ PINMUX_OUTPUT_END,
+
+ PINMUX_FUNCTION_BEGIN,
+ /* Port A */
+ PA1_IOR_IN, PA1_IOR_OUT,
+ PA0_IOR_IN, PA0_IOR_OUT,
+
+ /* Port B */
+ PB22_IOR_IN, PB22_IOR_OUT,
+ PB21_IOR_IN, PB21_IOR_OUT,
+ PB20_IOR_IN, PB20_IOR_OUT,
+ PB19_IOR_IN, PB19_IOR_OUT,
+ PB18_IOR_IN, PB18_IOR_OUT,
+ PB17_IOR_IN, PB17_IOR_OUT,
+ PB16_IOR_IN, PB16_IOR_OUT,
+
+ PB15_IOR_IN, PB15_IOR_OUT,
+ PB14_IOR_IN, PB14_IOR_OUT,
+ PB13_IOR_IN, PB13_IOR_OUT,
+ PB12_IOR_IN, PB12_IOR_OUT,
+ PB11_IOR_IN, PB11_IOR_OUT,
+ PB10_IOR_IN, PB10_IOR_OUT,
+ PB9_IOR_IN, PB9_IOR_OUT,
+ PB8_IOR_IN, PB8_IOR_OUT,
+
+ PB7_IOR_IN, PB7_IOR_OUT,
+ PB6_IOR_IN, PB6_IOR_OUT,
+ PB5_IOR_IN, PB5_IOR_OUT,
+ PB4_IOR_IN, PB4_IOR_OUT,
+ PB3_IOR_IN, PB3_IOR_OUT,
+ PB2_IOR_IN, PB2_IOR_OUT,
+ PB1_IOR_IN, PB1_IOR_OUT,
+ PB0_IOR_IN, PB0_IOR_OUT,
+
+ PB22MD_000, PB22MD_001, PB22MD_010, PB22MD_011,
+ PB22MD_100, PB22MD_101, PB22MD_110, PB22MD_111,
+ PB21MD_00, PB21MD_01, PB21MD_10, PB21MD_11,
+ PB20MD_000, PB20MD_001, PB20MD_010, PB20MD_011,
+ PB20MD_100, PB20MD_101, PB20MD_110, PB20MD_111,
+ PB19MD_000, PB19MD_001, PB19MD_010, PB19MD_011,
+ PB19MD_100, PB19MD_101, PB19MD_110, PB19MD_111,
+ PB18MD_000, PB18MD_001, PB18MD_010, PB18MD_011,
+ PB18MD_100, PB18MD_101, PB18MD_110, PB18MD_111,
+ PB17MD_000, PB17MD_001, PB17MD_010, PB17MD_011,
+ PB17MD_100, PB17MD_101, PB17MD_110, PB17MD_111,
+ PB16MD_000, PB16MD_001, PB16MD_010, PB16MD_011,
+ PB16MD_100, PB16MD_101, PB16MD_110, PB16MD_111,
+ PB15MD_000, PB15MD_001, PB15MD_010, PB15MD_011,
+ PB15MD_100, PB15MD_101, PB15MD_110, PB15MD_111,
+ PB14MD_000, PB14MD_001, PB14MD_010, PB14MD_011,
+ PB14MD_100, PB14MD_101, PB14MD_110, PB14MD_111,
+ PB13MD_000, PB13MD_001, PB13MD_010, PB13MD_011,
+ PB13MD_100, PB13MD_101, PB13MD_110, PB13MD_111,
+ PB12MD_00, PB12MD_01, PB12MD_10, PB12MD_11,
+
+ PB11MD_00, PB11MD_01, PB11MD_10, PB11MD_11,
+ PB10MD_00, PB10MD_01, PB10MD_10, PB10MD_11,
+ PB9MD_00, PB9MD_01, PB9MD_10, PB9MD_11,
+ PB8MD_00, PB8MD_01, PB8MD_10, PB8MD_11,
+
+ PB7MD_00, PB7MD_01, PB7MD_10, PB7MD_11,
+ PB6MD_00, PB6MD_01, PB6MD_10, PB6MD_11,
+ PB5MD_00, PB5MD_01, PB5MD_10, PB5MD_11,
+ PB4MD_00, PB4MD_01, PB4MD_10, PB4MD_11,
+
+ PB3MD_00, PB3MD_01, PB3MD_10, PB3MD_11,
+ PB2MD_00, PB2MD_01, PB2MD_10, PB2MD_11,
+ PB1MD_00, PB1MD_01, PB1MD_10, PB1MD_11,
+
+ /* Port C */
+ PC8_IOR_IN, PC8_IOR_OUT,
+ PC7_IOR_IN, PC7_IOR_OUT,
+ PC6_IOR_IN, PC6_IOR_OUT,
+ PC5_IOR_IN, PC5_IOR_OUT,
+ PC4_IOR_IN, PC4_IOR_OUT,
+ PC3_IOR_IN, PC3_IOR_OUT,
+ PC2_IOR_IN, PC2_IOR_OUT,
+ PC1_IOR_IN, PC1_IOR_OUT,
+ PC0_IOR_IN, PC0_IOR_OUT,
+
+ PC8MD_000, PC8MD_001, PC8MD_010, PC8MD_011,
+ PC8MD_100, PC8MD_101, PC8MD_110, PC8MD_111,
+ PC7MD_000, PC7MD_001, PC7MD_010, PC7MD_011,
+ PC7MD_100, PC7MD_101, PC7MD_110, PC7MD_111,
+ PC6MD_000, PC6MD_001, PC6MD_010, PC6MD_011,
+ PC6MD_100, PC6MD_101, PC6MD_110, PC6MD_111,
+ PC5MD_000, PC5MD_001, PC5MD_010, PC5MD_011,
+ PC5MD_100, PC5MD_101, PC5MD_110, PC5MD_111,
+ PC4MD_00, PC4MD_01, PC4MD_10, PC4MD_11,
+
+ PC3MD_00, PC3MD_01, PC3MD_10, PC3MD_11,
+ PC2MD_00, PC2MD_01, PC2MD_10, PC2MD_11,
+ PC1MD_0, PC1MD_1,
+ PC0MD_0, PC0MD_1,
+
+ /* Port D */
+ PD15_IOR_IN, PD15_IOR_OUT,
+ PD14_IOR_IN, PD14_IOR_OUT,
+ PD13_IOR_IN, PD13_IOR_OUT,
+ PD12_IOR_IN, PD12_IOR_OUT,
+ PD11_IOR_IN, PD11_IOR_OUT,
+ PD10_IOR_IN, PD10_IOR_OUT,
+ PD9_IOR_IN, PD9_IOR_OUT,
+ PD8_IOR_IN, PD8_IOR_OUT,
+ PD7_IOR_IN, PD7_IOR_OUT,
+ PD6_IOR_IN, PD6_IOR_OUT,
+ PD5_IOR_IN, PD5_IOR_OUT,
+ PD4_IOR_IN, PD4_IOR_OUT,
+ PD3_IOR_IN, PD3_IOR_OUT,
+ PD2_IOR_IN, PD2_IOR_OUT,
+ PD1_IOR_IN, PD1_IOR_OUT,
+ PD0_IOR_IN, PD0_IOR_OUT,
+
+ PD15MD_00, PD15MD_01, PD15MD_10, PD15MD_11,
+ PD14MD_00, PD14MD_01, PD14MD_10, PD14MD_11,
+ PD13MD_00, PD13MD_01, PD13MD_10, PD13MD_11,
+ PD12MD_00, PD12MD_01, PD12MD_10, PD12MD_11,
+
+ PD11MD_00, PD11MD_01, PD11MD_10, PD11MD_11,
+ PD10MD_00, PD10MD_01, PD10MD_10, PD10MD_11,
+ PD9MD_00, PD9MD_01, PD9MD_10, PD9MD_11,
+ PD8MD_00, PD8MD_01, PD8MD_10, PD8MD_11,
+
+ PD7MD_00, PD7MD_01, PD7MD_10, PD7MD_11,
+ PD6MD_00, PD6MD_01, PD6MD_10, PD6MD_11,
+ PD5MD_00, PD5MD_01, PD5MD_10, PD5MD_11,
+ PD4MD_00, PD4MD_01, PD4MD_10, PD4MD_11,
+
+ PD3MD_00, PD3MD_01, PD3MD_10, PD3MD_11,
+ PD2MD_00, PD2MD_01, PD2MD_10, PD2MD_11,
+ PD1MD_00, PD1MD_01, PD1MD_10, PD1MD_11,
+ PD0MD_00, PD0MD_01, PD0MD_10, PD0MD_11,
+
+ /* Port E */
+ PE7_IOR_IN, PE7_IOR_OUT,
+ PE6_IOR_IN, PE6_IOR_OUT,
+ PE5_IOR_IN, PE5_IOR_OUT,
+ PE4_IOR_IN, PE4_IOR_OUT,
+ PE3_IOR_IN, PE3_IOR_OUT,
+ PE2_IOR_IN, PE2_IOR_OUT,
+ PE1_IOR_IN, PE1_IOR_OUT,
+ PE0_IOR_IN, PE0_IOR_OUT,
+
+ PE7MD_00, PE7MD_01, PE7MD_10, PE7MD_11,
+ PE6MD_00, PE6MD_01, PE6MD_10, PE6MD_11,
+ PE5MD_00, PE5MD_01, PE5MD_10, PE5MD_11,
+ PE4MD_00, PE4MD_01, PE4MD_10, PE4MD_11,
+
+ PE3MD_000, PE3MD_001, PE3MD_010, PE3MD_011,
+ PE3MD_100, PE3MD_101, PE3MD_110, PE3MD_111,
+ PE2MD_000, PE2MD_001, PE2MD_010, PE2MD_011,
+ PE2MD_100, PE2MD_101, PE2MD_110, PE2MD_111,
+ PE1MD_000, PE1MD_001, PE1MD_010, PE1MD_011,
+ PE1MD_100, PE1MD_101, PE1MD_110, PE1MD_111,
+ PE0MD_00, PE0MD_01, PE0MD_10, PE0MD_11,
+
+ /* Port F */
+ PF23_IOR_IN, PF23_IOR_OUT,
+ PF22_IOR_IN, PF22_IOR_OUT,
+ PF21_IOR_IN, PF21_IOR_OUT,
+ PF20_IOR_IN, PF20_IOR_OUT,
+ PF19_IOR_IN, PF19_IOR_OUT,
+ PF18_IOR_IN, PF18_IOR_OUT,
+ PF17_IOR_IN, PF17_IOR_OUT,
+ PF16_IOR_IN, PF16_IOR_OUT,
+ PF15_IOR_IN, PF15_IOR_OUT,
+ PF14_IOR_IN, PF14_IOR_OUT,
+ PF13_IOR_IN, PF13_IOR_OUT,
+ PF12_IOR_IN, PF12_IOR_OUT,
+ PF11_IOR_IN, PF11_IOR_OUT,
+ PF10_IOR_IN, PF10_IOR_OUT,
+ PF9_IOR_IN, PF9_IOR_OUT,
+ PF8_IOR_IN, PF8_IOR_OUT,
+ PF7_IOR_IN, PF7_IOR_OUT,
+ PF6_IOR_IN, PF6_IOR_OUT,
+ PF5_IOR_IN, PF5_IOR_OUT,
+ PF4_IOR_IN, PF4_IOR_OUT,
+ PF3_IOR_IN, PF3_IOR_OUT,
+ PF2_IOR_IN, PF2_IOR_OUT,
+ PF1_IOR_IN, PF1_IOR_OUT,
+ PF0_IOR_IN, PF0_IOR_OUT,
+
+ PF23MD_000, PF23MD_001, PF23MD_010, PF23MD_011,
+ PF23MD_100, PF23MD_101, PF23MD_110, PF23MD_111,
+ PF22MD_000, PF22MD_001, PF22MD_010, PF22MD_011,
+ PF22MD_100, PF22MD_101, PF22MD_110, PF22MD_111,
+ PF21MD_000, PF21MD_001, PF21MD_010, PF21MD_011,
+ PF21MD_100, PF21MD_101, PF21MD_110, PF21MD_111,
+ PF20MD_000, PF20MD_001, PF20MD_010, PF20MD_011,
+ PF20MD_100, PF20MD_101, PF20MD_110, PF20MD_111,
+
+ PF19MD_000, PF19MD_001, PF19MD_010, PF19MD_011,
+ PF19MD_100, PF19MD_101, PF19MD_110, PF19MD_111,
+ PF18MD_000, PF18MD_001, PF18MD_010, PF18MD_011,
+ PF18MD_100, PF18MD_101, PF18MD_110, PF18MD_111,
+ PF17MD_000, PF17MD_001, PF17MD_010, PF17MD_011,
+ PF17MD_100, PF17MD_101, PF17MD_110, PF17MD_111,
+ PF16MD_000, PF16MD_001, PF16MD_010, PF16MD_011,
+ PF16MD_100, PF16MD_101, PF16MD_110, PF16MD_111,
+
+ PF15MD_000, PF15MD_001, PF15MD_010, PF15MD_011,
+ PF15MD_100, PF15MD_101, PF15MD_110, PF15MD_111,
+ PF14MD_000, PF14MD_001, PF14MD_010, PF14MD_011,
+ PF14MD_100, PF14MD_101, PF14MD_110, PF14MD_111,
+ PF13MD_000, PF13MD_001, PF13MD_010, PF13MD_011,
+ PF13MD_100, PF13MD_101, PF13MD_110, PF13MD_111,
+ PF12MD_000, PF12MD_001, PF12MD_010, PF12MD_011,
+ PF12MD_100, PF12MD_101, PF12MD_110, PF12MD_111,
+
+ PF11MD_000, PF11MD_001, PF11MD_010, PF11MD_011,
+ PF11MD_100, PF11MD_101, PF11MD_110, PF11MD_111,
+ PF10MD_000, PF10MD_001, PF10MD_010, PF10MD_011,
+ PF10MD_100, PF10MD_101, PF10MD_110, PF10MD_111,
+ PF9MD_000, PF9MD_001, PF9MD_010, PF9MD_011,
+ PF9MD_100, PF9MD_101, PF9MD_110, PF9MD_111,
+ PF8MD_000, PF8MD_001, PF8MD_010, PF8MD_011,
+ PF8MD_100, PF8MD_101, PF8MD_110, PF8MD_111,
+
+ PF7MD_000, PF7MD_001, PF7MD_010, PF7MD_011,
+ PF7MD_100, PF7MD_101, PF7MD_110, PF7MD_111,
+ PF6MD_000, PF6MD_001, PF6MD_010, PF6MD_011,
+ PF6MD_100, PF6MD_101, PF6MD_110, PF6MD_111,
+ PF5MD_000, PF5MD_001, PF5MD_010, PF5MD_011,
+ PF5MD_100, PF5MD_101, PF5MD_110, PF5MD_111,
+ PF4MD_000, PF4MD_001, PF4MD_010, PF4MD_011,
+ PF4MD_100, PF4MD_101, PF4MD_110, PF4MD_111,
+
+ PF3MD_000, PF3MD_001, PF3MD_010, PF3MD_011,
+ PF3MD_100, PF3MD_101, PF3MD_110, PF3MD_111,
+ PF2MD_000, PF2MD_001, PF2MD_010, PF2MD_011,
+ PF2MD_100, PF2MD_101, PF2MD_110, PF2MD_111,
+ PF1MD_000, PF1MD_001, PF1MD_010, PF1MD_011,
+ PF1MD_100, PF1MD_101, PF1MD_110, PF1MD_111,
+ PF0MD_000, PF0MD_001, PF0MD_010, PF0MD_011,
+ PF0MD_100, PF0MD_101, PF0MD_110, PF0MD_111,
+
+ /* Port G */
+ PG27_IOR_IN, PG27_IOR_OUT,
+ PG26_IOR_IN, PG26_IOR_OUT,
+ PG25_IOR_IN, PG25_IOR_OUT,
+ PG24_IOR_IN, PG24_IOR_OUT,
+ PG23_IOR_IN, PG23_IOR_OUT,
+ PG22_IOR_IN, PG22_IOR_OUT,
+ PG21_IOR_IN, PG21_IOR_OUT,
+ PG20_IOR_IN, PG20_IOR_OUT,
+ PG19_IOR_IN, PG19_IOR_OUT,
+ PG18_IOR_IN, PG18_IOR_OUT,
+ PG17_IOR_IN, PG17_IOR_OUT,
+ PG16_IOR_IN, PG16_IOR_OUT,
+ PG15_IOR_IN, PG15_IOR_OUT,
+ PG14_IOR_IN, PG14_IOR_OUT,
+ PG13_IOR_IN, PG13_IOR_OUT,
+ PG12_IOR_IN, PG12_IOR_OUT,
+ PG11_IOR_IN, PG11_IOR_OUT,
+ PG10_IOR_IN, PG10_IOR_OUT,
+ PG9_IOR_IN, PG9_IOR_OUT,
+ PG8_IOR_IN, PG8_IOR_OUT,
+ PG7_IOR_IN, PG7_IOR_OUT,
+ PG6_IOR_IN, PG6_IOR_OUT,
+ PG5_IOR_IN, PG5_IOR_OUT,
+ PG4_IOR_IN, PG4_IOR_OUT,
+ PG3_IOR_IN, PG3_IOR_OUT,
+ PG2_IOR_IN, PG2_IOR_OUT,
+ PG1_IOR_IN, PG1_IOR_OUT,
+ PG0_IOR_IN, PG0_IOR_OUT,
+
+ PG27MD_00, PG27MD_01, PG27MD_10, PG27MD_11,
+ PG26MD_00, PG26MD_01, PG26MD_10, PG26MD_11,
+ PG25MD_00, PG25MD_01, PG25MD_10, PG25MD_11,
+ PG24MD_00, PG24MD_01, PG24MD_10, PG24MD_11,
+
+ PG23MD_000, PG23MD_001, PG23MD_010, PG23MD_011,
+ PG23MD_100, PG23MD_101, PG23MD_110, PG23MD_111,
+ PG22MD_000, PG22MD_001, PG22MD_010, PG22MD_011,
+ PG22MD_100, PG22MD_101, PG22MD_110, PG22MD_111,
+ PG21MD_000, PG21MD_001, PG21MD_010, PG21MD_011,
+ PG21MD_100, PG21MD_101, PG21MD_110, PG21MD_111,
+ PG20MD_000, PG20MD_001, PG20MD_010, PG20MD_011,
+ PG20MD_100, PG20MD_101, PG20MD_110, PG20MD_111,
+
+ PG19MD_000, PG19MD_001, PG19MD_010, PG19MD_011,
+ PG19MD_100, PG19MD_101, PG19MD_110, PG19MD_111,
+ PG18MD_000, PG18MD_001, PG18MD_010, PG18MD_011,
+ PG18MD_100, PG18MD_101, PG18MD_110, PG18MD_111,
+ PG17MD_00, PG17MD_01, PG17MD_10, PG17MD_11,
+ PG16MD_00, PG16MD_01, PG16MD_10, PG16MD_11,
+
+ PG15MD_00, PG15MD_01, PG15MD_10, PG15MD_11,
+ PG14MD_00, PG14MD_01, PG14MD_10, PG14MD_11,
+ PG13MD_00, PG13MD_01, PG13MD_10, PG13MD_11,
+ PG12MD_00, PG12MD_01, PG12MD_10, PG12MD_11,
+
+ PG11MD_000, PG11MD_001, PG11MD_010, PG11MD_011,
+ PG11MD_100, PG11MD_101, PG11MD_110, PG11MD_111,
+ PG10MD_000, PG10MD_001, PG10MD_010, PG10MD_011,
+ PG10MD_100, PG10MD_101, PG10MD_110, PG10MD_111,
+ PG9MD_000, PG9MD_001, PG9MD_010, PG9MD_011,
+ PG9MD_100, PG9MD_101, PG9MD_110, PG9MD_111,
+ PG8MD_000, PG8MD_001, PG8MD_010, PG8MD_011,
+ PG8MD_100, PG8MD_101, PG8MD_110, PG8MD_111,
+
+ PG7MD_000, PG7MD_001, PG7MD_010, PG7MD_011,
+ PG7MD_100, PG7MD_101, PG7MD_110, PG7MD_111,
+ PG6MD_000, PG6MD_001, PG6MD_010, PG6MD_011,
+ PG6MD_100, PG6MD_101, PG6MD_110, PG6MD_111,
+ PG5MD_000, PG5MD_001, PG5MD_010, PG5MD_011,
+ PG5MD_100, PG5MD_101, PG5MD_110, PG5MD_111,
+ PG4MD_000, PG4MD_001, PG4MD_010, PG4MD_011,
+ PG4MD_100, PG4MD_101, PG4MD_110, PG4MD_111,
+
+ PG3MD_000, PG3MD_001, PG3MD_010, PG3MD_011,
+ PG3MD_100, PG3MD_101, PG3MD_110, PG3MD_111,
+ PG2MD_000, PG2MD_001, PG2MD_010, PG2MD_011,
+ PG2MD_100, PG2MD_101, PG2MD_110, PG2MD_111,
+ PG1MD_000, PG1MD_001, PG1MD_010, PG1MD_011,
+ PG1MD_100, PG1MD_101, PG1MD_110, PG1MD_111,
+ PG0MD_000, PG0MD_001, PG0MD_010, PG0MD_011,
+ PG0MD_100, PG0MD_101, PG0MD_110, PG0MD_111,
+
+ /* Port H */
+ PH7MD_00, PH7MD_01, PH7MD_10, PH7MD_11,
+ PH6MD_00, PH6MD_01, PH6MD_10, PH6MD_11,
+ PH5MD_00, PH5MD_01, PH5MD_10, PH5MD_11,
+ PH4MD_00, PH4MD_01, PH4MD_10, PH4MD_11,
+
+ PH3MD_00, PH3MD_01, PH3MD_10, PH3MD_11,
+ PH2MD_00, PH2MD_01, PH2MD_10, PH2MD_11,
+ PH1MD_00, PH1MD_01, PH1MD_10, PH1MD_11,
+ PH0MD_00, PH0MD_01, PH0MD_10, PH0MD_11,
+
+ /* Port I - not on device */
+
+ /* Port J */
+ PJ31_IOR_IN, PJ31_IOR_OUT,
+ PJ30_IOR_IN, PJ30_IOR_OUT,
+ PJ29_IOR_IN, PJ29_IOR_OUT,
+ PJ28_IOR_IN, PJ28_IOR_OUT,
+ PJ27_IOR_IN, PJ27_IOR_OUT,
+ PJ26_IOR_IN, PJ26_IOR_OUT,
+ PJ25_IOR_IN, PJ25_IOR_OUT,
+ PJ24_IOR_IN, PJ24_IOR_OUT,
+ PJ23_IOR_IN, PJ23_IOR_OUT,
+ PJ22_IOR_IN, PJ22_IOR_OUT,
+ PJ21_IOR_IN, PJ21_IOR_OUT,
+ PJ20_IOR_IN, PJ20_IOR_OUT,
+ PJ19_IOR_IN, PJ19_IOR_OUT,
+ PJ18_IOR_IN, PJ18_IOR_OUT,
+ PJ17_IOR_IN, PJ17_IOR_OUT,
+ PJ16_IOR_IN, PJ16_IOR_OUT,
+ PJ15_IOR_IN, PJ15_IOR_OUT,
+ PJ14_IOR_IN, PJ14_IOR_OUT,
+ PJ13_IOR_IN, PJ13_IOR_OUT,
+ PJ12_IOR_IN, PJ12_IOR_OUT,
+ PJ11_IOR_IN, PJ11_IOR_OUT,
+ PJ10_IOR_IN, PJ10_IOR_OUT,
+ PJ9_IOR_IN, PJ9_IOR_OUT,
+ PJ8_IOR_IN, PJ8_IOR_OUT,
+ PJ7_IOR_IN, PJ7_IOR_OUT,
+ PJ6_IOR_IN, PJ6_IOR_OUT,
+ PJ5_IOR_IN, PJ5_IOR_OUT,
+ PJ4_IOR_IN, PJ4_IOR_OUT,
+ PJ3_IOR_IN, PJ3_IOR_OUT,
+ PJ2_IOR_IN, PJ2_IOR_OUT,
+ PJ1_IOR_IN, PJ1_IOR_OUT,
+ PJ0_IOR_IN, PJ0_IOR_OUT,
+
+ PJ31MD_0, PJ31MD_1,
+ PJ30MD_000, PJ30MD_001, PJ30MD_010, PJ30MD_011,
+ PJ30MD_100, PJ30MD_101, PJ30MD_110, PJ30MD_111,
+ PJ29MD_000, PJ29MD_001, PJ29MD_010, PJ29MD_011,
+ PJ29MD_100, PJ29MD_101, PJ29MD_110, PJ29MD_111,
+ PJ28MD_000, PJ28MD_001, PJ28MD_010, PJ28MD_011,
+ PJ28MD_100, PJ28MD_101, PJ28MD_110, PJ28MD_111,
+
+ PJ27MD_000, PJ27MD_001, PJ27MD_010, PJ27MD_011,
+ PJ27MD_100, PJ27MD_101, PJ27MD_110, PJ27MD_111,
+ PJ26MD_000, PJ26MD_001, PJ26MD_010, PJ26MD_011,
+ PJ26MD_100, PJ26MD_101, PJ26MD_110, PJ26MD_111,
+ PJ25MD_000, PJ25MD_001, PJ25MD_010, PJ25MD_011,
+ PJ25MD_100, PJ25MD_101, PJ25MD_110, PJ25MD_111,
+ PJ24MD_000, PJ24MD_001, PJ24MD_010, PJ24MD_011,
+ PJ24MD_100, PJ24MD_101, PJ24MD_110, PJ24MD_111,
+
+ PJ23MD_000, PJ23MD_001, PJ23MD_010, PJ23MD_011,
+ PJ23MD_100, PJ23MD_101, PJ23MD_110, PJ23MD_111,
+ PJ22MD_000, PJ22MD_001, PJ22MD_010, PJ22MD_011,
+ PJ22MD_100, PJ22MD_101, PJ22MD_110, PJ22MD_111,
+ PJ21MD_000, PJ21MD_001, PJ21MD_010, PJ21MD_011,
+ PJ21MD_100, PJ21MD_101, PJ21MD_110, PJ21MD_111,
+ PJ20MD_000, PJ20MD_001, PJ20MD_010, PJ20MD_011,
+ PJ20MD_100, PJ20MD_101, PJ20MD_110, PJ20MD_111,
+
+ PJ19MD_000, PJ19MD_001, PJ19MD_010, PJ19MD_011,
+ PJ19MD_100, PJ19MD_101, PJ19MD_110, PJ19MD_111,
+ PJ18MD_000, PJ18MD_001, PJ18MD_010, PJ18MD_011,
+ PJ18MD_100, PJ18MD_101, PJ18MD_110, PJ18MD_111,
+ PJ17MD_000, PJ17MD_001, PJ17MD_010, PJ17MD_011,
+ PJ17MD_100, PJ17MD_101, PJ17MD_110, PJ17MD_111,
+ PJ16MD_000, PJ16MD_001, PJ16MD_010, PJ16MD_011,
+ PJ16MD_100, PJ16MD_101, PJ16MD_110, PJ16MD_111,
+
+ PJ15MD_000, PJ15MD_001, PJ15MD_010, PJ15MD_011,
+ PJ15MD_100, PJ15MD_101, PJ15MD_110, PJ15MD_111,
+ PJ14MD_000, PJ14MD_001, PJ14MD_010, PJ14MD_011,
+ PJ14MD_100, PJ14MD_101, PJ14MD_110, PJ14MD_111,
+ PJ13MD_000, PJ13MD_001, PJ13MD_010, PJ13MD_011,
+ PJ13MD_100, PJ13MD_101, PJ13MD_110, PJ13MD_111,
+ PJ12MD_000, PJ12MD_001, PJ12MD_010, PJ12MD_011,
+ PJ12MD_100, PJ12MD_101, PJ12MD_110, PJ12MD_111,
+
+ PJ11MD_000, PJ11MD_001, PJ11MD_010, PJ11MD_011,
+ PJ11MD_100, PJ11MD_101, PJ11MD_110, PJ11MD_111,
+ PJ10MD_000, PJ10MD_001, PJ10MD_010, PJ10MD_011,
+ PJ10MD_100, PJ10MD_101, PJ10MD_110, PJ10MD_111,
+ PJ9MD_000, PJ9MD_001, PJ9MD_010, PJ9MD_011,
+ PJ9MD_100, PJ9MD_101, PJ9MD_110, PJ9MD_111,
+ PJ8MD_000, PJ8MD_001, PJ8MD_010, PJ8MD_011,
+ PJ8MD_100, PJ8MD_101, PJ8MD_110, PJ8MD_111,
+
+ PJ7MD_000, PJ7MD_001, PJ7MD_010, PJ7MD_011,
+ PJ7MD_100, PJ7MD_101, PJ7MD_110, PJ7MD_111,
+ PJ6MD_000, PJ6MD_001, PJ6MD_010, PJ6MD_011,
+ PJ6MD_100, PJ6MD_101, PJ6MD_110, PJ6MD_111,
+ PJ5MD_000, PJ5MD_001, PJ5MD_010, PJ5MD_011,
+ PJ5MD_100, PJ5MD_101, PJ5MD_110, PJ5MD_111,
+ PJ4MD_000, PJ4MD_001, PJ4MD_010, PJ4MD_011,
+ PJ4MD_100, PJ4MD_101, PJ4MD_110, PJ4MD_111,
+
+ PJ3MD_000, PJ3MD_001, PJ3MD_010, PJ3MD_011,
+ PJ3MD_100, PJ3MD_101, PJ3MD_110, PJ3MD_111,
+ PJ2MD_000, PJ2MD_001, PJ2MD_010, PJ2MD_011,
+ PJ2MD_100, PJ2MD_101, PJ2MD_110, PJ2MD_111,
+ PJ1MD_000, PJ1MD_001, PJ1MD_010, PJ1MD_011,
+ PJ1MD_100, PJ1MD_101, PJ1MD_110, PJ1MD_111,
+ PJ0MD_000, PJ0MD_001, PJ0MD_010, PJ0MD_011,
+ PJ0MD_100, PJ0MD_101, PJ0MD_110, PJ0MD_111,
+
+ PINMUX_FUNCTION_END,
+
+ PINMUX_MARK_BEGIN,
+ /* Port H */
+ PHAN7_MARK, PHAN6_MARK, PHAN5_MARK, PHAN4_MARK,
+ PHAN3_MARK, PHAN2_MARK, PHAN1_MARK, PHAN0_MARK,
+
+ /* IRQs */
+ IRQ7_PG_MARK, IRQ6_PG_MARK, IRQ5_PG_MARK, IRQ4_PG_MARK,
+ IRQ3_PG_MARK, IRQ2_PG_MARK, IRQ1_PG_MARK, IRQ0_PG_MARK,
+ IRQ7_PF_MARK, IRQ6_PF_MARK, IRQ5_PF_MARK, IRQ4_PF_MARK,
+ IRQ3_PJ_MARK, IRQ2_PJ_MARK, IRQ1_PJ_MARK, IRQ0_PJ_MARK,
+ IRQ1_PC_MARK, IRQ0_PC_MARK,
+
+ PINT7_PG_MARK, PINT6_PG_MARK, PINT5_PG_MARK, PINT4_PG_MARK,
+ PINT3_PG_MARK, PINT2_PG_MARK, PINT1_PG_MARK, PINT0_PG_MARK,
+ PINT7_PH_MARK, PINT6_PH_MARK, PINT5_PH_MARK, PINT4_PH_MARK,
+ PINT3_PH_MARK, PINT2_PH_MARK, PINT1_PH_MARK, PINT0_PH_MARK,
+ PINT7_PJ_MARK, PINT6_PJ_MARK, PINT5_PJ_MARK, PINT4_PJ_MARK,
+ PINT3_PJ_MARK, PINT2_PJ_MARK, PINT1_PJ_MARK, PINT0_PJ_MARK,
+
+ /* SD */
+ SD_D0_MARK, SD_D1_MARK, SD_D2_MARK, SD_D3_MARK,
+ SD_WP_MARK, SD_CLK_MARK, SD_CMD_MARK, SD_CD_MARK,
+
+ /* MMC */
+ MMC_D0_MARK, MMC_D1_MARK, MMC_D2_MARK, MMC_D3_MARK,
+ MMC_D4_MARK, MMC_D5_MARK, MMC_D6_MARK, MMC_D7_MARK,
+ MMC_CLK_MARK, MMC_CMD_MARK, MMC_CD_MARK,
+
+ /* PWM */
+ PWM1A_MARK, PWM1B_MARK, PWM1C_MARK, PWM1D_MARK,
+ PWM1E_MARK, PWM1F_MARK, PWM1G_MARK, PWM1H_MARK,
+ PWM2A_MARK, PWM2B_MARK, PWM2C_MARK, PWM2D_MARK,
+ PWM2E_MARK, PWM2F_MARK, PWM2G_MARK, PWM2H_MARK,
+
+ /* IEBus */
+ IERXD_MARK, IETXD_MARK,
+
+ /* WDT */
+ WDTOVF_MARK,
+
+ /* DMAC */
+ TEND0_MARK, DACK0_MARK, DREQ0_MARK,
+ TEND1_MARK, DACK1_MARK, DREQ1_MARK,
+
+ /* ADC */
+ ADTRG_MARK,
+
+ /* BSC */
+ A25_MARK, A24_MARK,
+ A23_MARK, A22_MARK, A21_MARK, A20_MARK,
+ A19_MARK, A18_MARK, A17_MARK, A16_MARK,
+ A15_MARK, A14_MARK, A13_MARK, A12_MARK,
+ A11_MARK, A10_MARK, A9_MARK, A8_MARK,
+ A7_MARK, A6_MARK, A5_MARK, A4_MARK,
+ A3_MARK, A2_MARK, A1_MARK, A0_MARK,
+ D31_MARK, D30_MARK, D29_MARK, D28_MARK,
+ D27_MARK, D26_MARK, D25_MARK, D24_MARK,
+ D23_MARK, D22_MARK, D21_MARK, D20_MARK,
+ D19_MARK, D18_MARK, D17_MARK, D16_MARK,
+ D15_MARK, D14_MARK, D13_MARK, D12_MARK,
+ D11_MARK, D10_MARK, D9_MARK, D8_MARK,
+ D7_MARK, D6_MARK, D5_MARK, D4_MARK,
+ D3_MARK, D2_MARK, D1_MARK, D0_MARK,
+ BS_MARK,
+ CS4_MARK, CS3_MARK, CS2_MARK, CS1_MARK, CS0_MARK,
+ CS5CE1A_MARK,
+ CE2A_MARK, CE2B_MARK,
+ RD_MARK, RDWR_MARK,
+ WE3ICIOWRAHDQMUU_MARK,
+ WE2ICIORDDQMUL_MARK,
+ WE1DQMUWE_MARK,
+ WE0DQML_MARK,
+ RAS_MARK, CAS_MARK, CKE_MARK,
+ WAIT_MARK, BREQ_MARK, BACK_MARK, IOIS16_MARK,
+
+ /* TMU */
+ TIOC0A_MARK, TIOC0B_MARK, TIOC0C_MARK, TIOC0D_MARK,
+ TIOC1A_MARK, TIOC1B_MARK,
+ TIOC2A_MARK, TIOC2B_MARK,
+ TIOC3A_MARK, TIOC3B_MARK, TIOC3C_MARK, TIOC3D_MARK,
+ TIOC4A_MARK, TIOC4B_MARK, TIOC4C_MARK, TIOC4D_MARK,
+ TCLKA_MARK, TCLKB_MARK, TCLKC_MARK, TCLKD_MARK,
+
+ /* SCIF */
+ SCK0_MARK, RXD0_MARK, TXD0_MARK,
+ SCK1_MARK, RXD1_MARK, TXD1_MARK, RTS1_MARK, CTS1_MARK,
+ SCK2_MARK, RXD2_MARK, TXD2_MARK,
+ SCK3_MARK, RXD3_MARK, TXD3_MARK,
+ SCK4_MARK, RXD4_MARK, TXD4_MARK,
+ SCK5_MARK, RXD5_MARK, TXD5_MARK, RTS5_MARK, CTS5_MARK,
+ SCK6_MARK, RXD6_MARK, TXD6_MARK,
+ SCK7_MARK, RXD7_MARK, TXD7_MARK, RTS7_MARK, CTS7_MARK,
+
+ /* RSPI */
+ MISO0_PB20_MARK, MOSI0_PB19_MARK, SSL00_PB18_MARK, RSPCK0_PB17_MARK,
+ MISO0_PJ19_MARK, MOSI0_PJ18_MARK, SSL00_PJ17_MARK, RSPCK0_PJ16_MARK,
+ MISO1_MARK, MOSI1_MARK, SSL10_MARK, RSPCK1_MARK,
+
+ /* IIC3 */
+ SCL0_MARK, SDA0_MARK,
+ SCL1_MARK, SDA1_MARK,
+ SCL2_MARK, SDA2_MARK,
+ SCL3_MARK, SDA3_MARK,
+
+ /* SSI */
+ SSISCK0_MARK, SSIWS0_MARK, SSITXD0_MARK, SSIRXD0_MARK,
+ SSISCK1_MARK, SSIWS1_MARK, SSIDATA1_MARK,
+ SSISCK2_MARK, SSIWS2_MARK, SSIDATA2_MARK,
+ SSISCK3_MARK, SSIWS3_MARK, SSIDATA3_MARK,
+ SSISCK4_MARK, SSIWS4_MARK, SSIDATA4_MARK,
+ SSISCK5_MARK, SSIWS5_MARK, SSIDATA5_MARK,
+ AUDIO_CLK_MARK,
+ AUDIO_XOUT_MARK,
+
+ /* SIOF */ /* NOTE Shares AUDIO_CLK with SSI */
+ SIOFTXD_MARK, SIOFRXD_MARK, SIOFSYNC_MARK, SIOFSCK_MARK,
+
+ /* SPDIF */ /* NOTE Shares AUDIO_CLK with SSI */
+ SPDIF_IN_MARK, SPDIF_OUT_MARK,
+ SPDIF_IN_PJ24_MARK, SPDIF_OUT_PJ25_MARK,
+
+ /* NANDFMC */ /* NOTE Controller is not available in boot mode 0 */
+ FCE_MARK,
+ FRB_MARK,
+
+ /* CAN */
+ CRX0_MARK, CTX0_MARK,
+ CRX1_MARK, CTX1_MARK,
+ CRX2_MARK, CTX2_MARK,
+ CRX0_CRX1_MARK,
+ CRX0_CRX1_CRX2_MARK,
+ CTX0CTX1CTX2_MARK,
+ CRX1_PJ22_MARK, CTX1_PJ23_MARK,
+ CRX2_PJ20_MARK, CTX2_PJ21_MARK,
+ CRX0CRX1_PJ22_MARK,
+ CRX0CRX1CRX2_PJ20_MARK,
+
+ /* VDC */
+ DV_CLK_MARK,
+ DV_VSYNC_MARK, DV_HSYNC_MARK,
+ DV_DATA23_MARK, DV_DATA22_MARK, DV_DATA21_MARK, DV_DATA20_MARK,
+ DV_DATA19_MARK, DV_DATA18_MARK, DV_DATA17_MARK, DV_DATA16_MARK,
+ DV_DATA15_MARK, DV_DATA14_MARK, DV_DATA13_MARK, DV_DATA12_MARK,
+ DV_DATA11_MARK, DV_DATA10_MARK, DV_DATA9_MARK, DV_DATA8_MARK,
+ DV_DATA7_MARK, DV_DATA6_MARK, DV_DATA5_MARK, DV_DATA4_MARK,
+ DV_DATA3_MARK, DV_DATA2_MARK, DV_DATA1_MARK, DV_DATA0_MARK,
+ LCD_CLK_MARK, LCD_EXTCLK_MARK,
+ LCD_VSYNC_MARK, LCD_HSYNC_MARK, LCD_DE_MARK,
+ LCD_DATA23_PG23_MARK, LCD_DATA22_PG22_MARK, LCD_DATA21_PG21_MARK,
+ LCD_DATA20_PG20_MARK, LCD_DATA19_PG19_MARK, LCD_DATA18_PG18_MARK,
+ LCD_DATA17_PG17_MARK, LCD_DATA16_PG16_MARK, LCD_DATA15_PG15_MARK,
+ LCD_DATA14_PG14_MARK, LCD_DATA13_PG13_MARK, LCD_DATA12_PG12_MARK,
+ LCD_DATA11_PG11_MARK, LCD_DATA10_PG10_MARK, LCD_DATA9_PG9_MARK,
+ LCD_DATA8_PG8_MARK, LCD_DATA7_PG7_MARK, LCD_DATA6_PG6_MARK,
+ LCD_DATA5_PG5_MARK, LCD_DATA4_PG4_MARK, LCD_DATA3_PG3_MARK,
+ LCD_DATA2_PG2_MARK, LCD_DATA1_PG1_MARK, LCD_DATA0_PG0_MARK,
+ LCD_DATA23_PJ23_MARK, LCD_DATA22_PJ22_MARK, LCD_DATA21_PJ21_MARK,
+ LCD_DATA20_PJ20_MARK, LCD_DATA19_PJ19_MARK, LCD_DATA18_PJ18_MARK,
+ LCD_DATA17_PJ17_MARK, LCD_DATA16_PJ16_MARK, LCD_DATA15_PJ15_MARK,
+ LCD_DATA14_PJ14_MARK, LCD_DATA13_PJ13_MARK, LCD_DATA12_PJ12_MARK,
+ LCD_DATA11_PJ11_MARK, LCD_DATA10_PJ10_MARK, LCD_DATA9_PJ9_MARK,
+ LCD_DATA8_PJ8_MARK, LCD_DATA7_PJ7_MARK, LCD_DATA6_PJ6_MARK,
+ LCD_DATA5_PJ5_MARK, LCD_DATA4_PJ4_MARK, LCD_DATA3_PJ3_MARK,
+ LCD_DATA2_PJ2_MARK, LCD_DATA1_PJ1_MARK, LCD_DATA0_PJ0_MARK,
+ LCD_TCON6_MARK, LCD_TCON5_MARK, LCD_TCON4_MARK,
+ LCD_TCON3_MARK, LCD_TCON2_MARK, LCD_TCON1_MARK, LCD_TCON0_MARK,
+ LCD_M_DISP_MARK,
+ PINMUX_MARK_END,
+};
+
+static pinmux_enum_t pinmux_data[] = {
+
+ /* Port A */
+ PINMUX_DATA(PA1_DATA, PA1_IN),
+ PINMUX_DATA(PA0_DATA, PA0_IN),
+
+ /* Port B */
+ PINMUX_DATA(PB22_DATA, PB22MD_000, PB22_IN, PB22_OUT),
+ PINMUX_DATA(A22_MARK, PB22MD_001),
+ PINMUX_DATA(CTX2_MARK, PB22MD_010),
+ PINMUX_DATA(IETXD_MARK, PB22MD_011),
+ PINMUX_DATA(CS4_MARK, PB22MD_100),
+
+ PINMUX_DATA(PB21_DATA, PB21MD_00, PB21_IN, PB21_OUT),
+ PINMUX_DATA(A21_MARK, PB21MD_01),
+ PINMUX_DATA(CRX2_MARK, PB21MD_10),
+ PINMUX_DATA(IERXD_MARK, PB21MD_11),
+
+ PINMUX_DATA(A20_MARK, PB20MD_001),
+ PINMUX_DATA(A19_MARK, PB19MD_001),
+ PINMUX_DATA(A18_MARK, PB18MD_001),
+ PINMUX_DATA(A17_MARK, PB17MD_001),
+ PINMUX_DATA(A16_MARK, PB16MD_001),
+ PINMUX_DATA(A15_MARK, PB15MD_001),
+ PINMUX_DATA(A14_MARK, PB14MD_001),
+ PINMUX_DATA(A13_MARK, PB13MD_001),
+ PINMUX_DATA(A12_MARK, PB12MD_01),
+ PINMUX_DATA(A11_MARK, PB11MD_01),
+ PINMUX_DATA(A10_MARK, PB10MD_01),
+ PINMUX_DATA(A9_MARK, PB9MD_01),
+ PINMUX_DATA(A8_MARK, PB8MD_01),
+ PINMUX_DATA(A7_MARK, PB7MD_01),
+ PINMUX_DATA(A6_MARK, PB6MD_01),
+ PINMUX_DATA(A5_MARK, PB5MD_01),
+ PINMUX_DATA(A4_MARK, PB4MD_01),
+ PINMUX_DATA(A3_MARK, PB3MD_01),
+ PINMUX_DATA(A2_MARK, PB2MD_01),
+ PINMUX_DATA(A1_MARK, PB1MD_01),
+
+ /* Port C */
+ PINMUX_DATA(PC8_DATA, PC8MD_000),
+ PINMUX_DATA(CS3_MARK, PC8MD_001),
+ PINMUX_DATA(TXD7_MARK, PC8MD_010),
+ PINMUX_DATA(CTX1_MARK, PC8MD_011),
+
+ PINMUX_DATA(PC7_DATA, PC7MD_000),
+ PINMUX_DATA(CKE_MARK, PC7MD_001),
+ PINMUX_DATA(RXD7_MARK, PC7MD_010),
+ PINMUX_DATA(CRX1_MARK, PC7MD_011),
+ PINMUX_DATA(CRX0_CRX1_MARK, PC7MD_100),
+ PINMUX_DATA(IRQ1_PC_MARK, PC7MD_101),
+
+ PINMUX_DATA(PC6_DATA, PC6MD_000),
+ PINMUX_DATA(CAS_MARK, PC6MD_001),
+ PINMUX_DATA(SCK7_MARK, PC6MD_010),
+ PINMUX_DATA(CTX0_MARK, PC6MD_011),
+
+ PINMUX_DATA(PC5_DATA, PC5MD_000),
+ PINMUX_DATA(RAS_MARK, PC5MD_001),
+ PINMUX_DATA(CRX0_MARK, PC5MD_011),
+ PINMUX_DATA(CTX0CTX1CTX2_MARK, PC5MD_100),
+ PINMUX_DATA(IRQ0_PC_MARK, PC5MD_101),
+
+ PINMUX_DATA(PC4_DATA, PC4MD_00),
+ PINMUX_DATA(WE1DQMUWE_MARK, PC4MD_01),
+ PINMUX_DATA(TXD6_MARK, PC4MD_10),
+
+ PINMUX_DATA(PC3_DATA, PC3MD_00),
+ PINMUX_DATA(WE0DQML_MARK, PC3MD_01),
+ PINMUX_DATA(RXD6_MARK, PC3MD_10),
+
+ PINMUX_DATA(PC2_DATA, PC2MD_00),
+ PINMUX_DATA(RDWR_MARK, PC2MD_01),
+ PINMUX_DATA(SCK5_MARK, PC2MD_10),
+
+ PINMUX_DATA(PC1_DATA, PC1MD_0),
+ PINMUX_DATA(RD_MARK, PC1MD_1),
+
+ PINMUX_DATA(PC0_DATA, PC0MD_0),
+ PINMUX_DATA(CS0_MARK, PC0MD_1),
+
+ /* Port D */
+ PINMUX_DATA(D15_MARK, PD15MD_01),
+ PINMUX_DATA(D14_MARK, PD14MD_01),
+
+ PINMUX_DATA(PD13_DATA, PD13MD_00),
+ PINMUX_DATA(D13_MARK, PD13MD_01),
+ PINMUX_DATA(PWM2F_MARK, PD13MD_10),
+
+ PINMUX_DATA(PD12_DATA, PD12MD_00),
+ PINMUX_DATA(D12_MARK, PD12MD_01),
+ PINMUX_DATA(PWM2E_MARK, PD12MD_10),
+
+ PINMUX_DATA(D11_MARK, PD11MD_01),
+ PINMUX_DATA(D10_MARK, PD10MD_01),
+ PINMUX_DATA(D9_MARK, PD9MD_01),
+ PINMUX_DATA(D8_MARK, PD8MD_01),
+ PINMUX_DATA(D7_MARK, PD7MD_01),
+ PINMUX_DATA(D6_MARK, PD6MD_01),
+ PINMUX_DATA(D5_MARK, PD5MD_01),
+ PINMUX_DATA(D4_MARK, PD4MD_01),
+ PINMUX_DATA(D3_MARK, PD3MD_01),
+ PINMUX_DATA(D2_MARK, PD2MD_01),
+ PINMUX_DATA(D1_MARK, PD1MD_01),
+ PINMUX_DATA(D0_MARK, PD0MD_01),
+
+ /* Port E */
+ PINMUX_DATA(PE7_DATA, PE7MD_00),
+ PINMUX_DATA(SDA3_MARK, PE7MD_01),
+ PINMUX_DATA(RXD7_MARK, PE7MD_10),
+
+ PINMUX_DATA(PE6_DATA, PE6MD_00),
+ PINMUX_DATA(SCL3_MARK, PE6MD_01),
+ PINMUX_DATA(RXD6_MARK, PE6MD_10),
+
+ PINMUX_DATA(PE5_DATA, PE5MD_00),
+ PINMUX_DATA(SDA2_MARK, PE5MD_01),
+ PINMUX_DATA(RXD5_MARK, PE5MD_10),
+ PINMUX_DATA(DV_HSYNC_MARK, PE5MD_11),
+
+ PINMUX_DATA(PE4_DATA, PE4MD_00),
+ PINMUX_DATA(SCL2_MARK, PE4MD_01),
+ PINMUX_DATA(DV_VSYNC_MARK, PE4MD_11),
+
+ PINMUX_DATA(PE3_DATA, PE3MD_000),
+ PINMUX_DATA(SDA1_MARK, PE3MD_001),
+ PINMUX_DATA(TCLKD_MARK, PE3MD_010),
+ PINMUX_DATA(ADTRG_MARK, PE3MD_011),
+ PINMUX_DATA(DV_HSYNC_MARK, PE3MD_100),
+
+ PINMUX_DATA(PE2_DATA, PE2MD_000),
+ PINMUX_DATA(SCL1_MARK, PE2MD_001),
+ PINMUX_DATA(TCLKD_MARK, PE2MD_010),
+ PINMUX_DATA(IOIS16_MARK, PE2MD_011),
+ PINMUX_DATA(DV_VSYNC_MARK, PE2MD_100),
+
+ PINMUX_DATA(PE1_DATA, PE1MD_000),
+ PINMUX_DATA(SDA0_MARK, PE1MD_001),
+ PINMUX_DATA(TCLKB_MARK, PE1MD_010),
+ PINMUX_DATA(AUDIO_CLK_MARK, PE1MD_010),
+ PINMUX_DATA(DV_CLK_MARK, PE1MD_100),
+
+ PINMUX_DATA(PE0_DATA, PE0MD_00),
+ PINMUX_DATA(SCL0_MARK, PE0MD_01),
+ PINMUX_DATA(TCLKA_MARK, PE0MD_10),
+ PINMUX_DATA(LCD_EXTCLK_MARK, PE0MD_11),
+
+ /* Port F */
+ PINMUX_DATA(PF23_DATA, PF23MD_000),
+ PINMUX_DATA(SD_D2_MARK, PF23MD_001),
+ PINMUX_DATA(TXD3_MARK, PF23MD_100),
+ PINMUX_DATA(MMC_D2_MARK, PF23MD_101),
+
+ PINMUX_DATA(PF22_DATA, PF22MD_000),
+ PINMUX_DATA(SD_D3_MARK, PF22MD_001),
+ PINMUX_DATA(RXD3_MARK, PF22MD_100),
+ PINMUX_DATA(MMC_D3_MARK, PF22MD_101),
+
+ PINMUX_DATA(PF21_DATA, PF21MD_000),
+ PINMUX_DATA(SD_CMD_MARK, PF21MD_001),
+ PINMUX_DATA(SCK3_MARK, PF21MD_100),
+ PINMUX_DATA(MMC_CMD_MARK, PF21MD_101),
+
+ PINMUX_DATA(PF20_DATA, PF20MD_000),
+ PINMUX_DATA(SD_CLK_MARK, PF20MD_001),
+ PINMUX_DATA(SSIDATA3_MARK, PF20MD_010),
+ PINMUX_DATA(MMC_CLK_MARK, PF20MD_101),
+
+ PINMUX_DATA(PF19_DATA, PF19MD_000),
+ PINMUX_DATA(SD_D0_MARK, PF19MD_001),
+ PINMUX_DATA(SSIWS3_MARK, PF19MD_010),
+ PINMUX_DATA(IRQ7_PF_MARK, PF19MD_100),
+ PINMUX_DATA(MMC_D0_MARK, PF19MD_101),
+
+ PINMUX_DATA(PF18_DATA, PF18MD_000),
+ PINMUX_DATA(SD_D1_MARK, PF18MD_001),
+ PINMUX_DATA(SSISCK3_MARK, PF18MD_010),
+ PINMUX_DATA(IRQ6_PF_MARK, PF18MD_100),
+ PINMUX_DATA(MMC_D1_MARK, PF18MD_101),
+
+ PINMUX_DATA(PF17_DATA, PF17MD_000),
+ PINMUX_DATA(SD_WP_MARK, PF17MD_001),
+ PINMUX_DATA(FRB_MARK, PF17MD_011),
+ PINMUX_DATA(IRQ5_PF_MARK, PF17MD_100),
+
+ PINMUX_DATA(PF16_DATA, PF16MD_000),
+ PINMUX_DATA(SD_CD_MARK, PF16MD_001),
+ PINMUX_DATA(FCE_MARK, PF16MD_011),
+ PINMUX_DATA(IRQ4_PF_MARK, PF16MD_100),
+ PINMUX_DATA(MMC_CD_MARK, PF16MD_101),
+
+ PINMUX_DATA(PF15_DATA, PF15MD_000),
+ PINMUX_DATA(A0_MARK, PF15MD_001),
+ PINMUX_DATA(SSIDATA2_MARK, PF15MD_010),
+ PINMUX_DATA(WDTOVF_MARK, PF15MD_011),
+ PINMUX_DATA(TXD2_MARK, PF15MD_100),
+
+ PINMUX_DATA(PF14_DATA, PF14MD_000),
+ PINMUX_DATA(A25_MARK, PF14MD_001),
+ PINMUX_DATA(SSIWS2_MARK, PF14MD_010),
+ PINMUX_DATA(RXD2_MARK, PF14MD_100),
+
+ PINMUX_DATA(PF13_DATA, PF13MD_000),
+ PINMUX_DATA(A24_MARK, PF13MD_001),
+ PINMUX_DATA(SSISCK2_MARK, PF13MD_010),
+ PINMUX_DATA(SCK2_MARK, PF13MD_100),
+
+ PINMUX_DATA(PF12_DATA, PF12MD_000),
+ PINMUX_DATA(SSIDATA1_MARK, PF12MD_010),
+ PINMUX_DATA(DV_DATA12_MARK, PF12MD_011),
+ PINMUX_DATA(TXD1_MARK, PF12MD_100),
+ PINMUX_DATA(MMC_D7_MARK, PF12MD_101),
+
+ PINMUX_DATA(PF11_DATA, PF11MD_000),
+ PINMUX_DATA(SSIWS1_MARK, PF11MD_010),
+ PINMUX_DATA(DV_DATA2_MARK, PF11MD_011),
+ PINMUX_DATA(RXD1_MARK, PF11MD_100),
+ PINMUX_DATA(MMC_D6_MARK, PF11MD_101),
+
+ PINMUX_DATA(PF10_DATA, PF10MD_000),
+ PINMUX_DATA(CS1_MARK, PF10MD_001),
+ PINMUX_DATA(SSISCK1_MARK, PF10MD_010),
+ PINMUX_DATA(DV_DATA1_MARK, PF10MD_011),
+ PINMUX_DATA(SCK1_MARK, PF10MD_100),
+ PINMUX_DATA(MMC_D5_MARK, PF10MD_101),
+
+ PINMUX_DATA(PF9_DATA, PF9MD_000),
+ PINMUX_DATA(BS_MARK, PF9MD_001),
+ PINMUX_DATA(DV_DATA0_MARK, PF9MD_011),
+ PINMUX_DATA(SCK0_MARK, PF9MD_100),
+ PINMUX_DATA(MMC_D4_MARK, PF9MD_101),
+ PINMUX_DATA(RTS1_MARK, PF9MD_110),
+
+ PINMUX_DATA(PF8_DATA, PF8MD_000),
+ PINMUX_DATA(A23_MARK, PF8MD_001),
+ PINMUX_DATA(TXD0_MARK, PF8MD_100),
+
+ PINMUX_DATA(PF7_DATA, PF7MD_000),
+ PINMUX_DATA(SSIRXD0_MARK, PF7MD_010),
+ PINMUX_DATA(RXD0_MARK, PF7MD_100),
+ PINMUX_DATA(CTS1_MARK, PF7MD_110),
+
+ PINMUX_DATA(PF6_DATA, PF6MD_000),
+ PINMUX_DATA(CE2A_MARK, PF6MD_001),
+ PINMUX_DATA(SSITXD0_MARK, PF6MD_010),
+
+ PINMUX_DATA(PF5_DATA, PF5MD_000),
+ PINMUX_DATA(SSIWS0_MARK, PF5MD_010),
+
+ PINMUX_DATA(PF4_DATA, PF4MD_000),
+ PINMUX_DATA(CS5CE1A_MARK, PF4MD_001),
+ PINMUX_DATA(SSISCK0_MARK, PF4MD_010),
+
+ PINMUX_DATA(PF3_DATA, PF3MD_000),
+ PINMUX_DATA(CS2_MARK, PF3MD_001),
+ PINMUX_DATA(MISO1_MARK, PF3MD_011),
+ PINMUX_DATA(TIOC4D_MARK, PF3MD_100),
+
+ PINMUX_DATA(PF2_DATA, PF2MD_000),
+ PINMUX_DATA(WAIT_MARK, PF2MD_001),
+ PINMUX_DATA(MOSI1_MARK, PF2MD_011),
+ PINMUX_DATA(TIOC4C_MARK, PF2MD_100),
+ PINMUX_DATA(TEND0_MARK, PF2MD_101),
+
+ PINMUX_DATA(PF1_DATA, PF1MD_000),
+ PINMUX_DATA(BACK_MARK, PF1MD_001),
+ PINMUX_DATA(SSL10_MARK, PF1MD_011),
+ PINMUX_DATA(TIOC4B_MARK, PF1MD_100),
+ PINMUX_DATA(DACK0_MARK, PF1MD_101),
+
+ PINMUX_DATA(PF0_DATA, PF0MD_000),
+ PINMUX_DATA(BREQ_MARK, PF0MD_001),
+ PINMUX_DATA(RSPCK1_MARK, PF0MD_011),
+ PINMUX_DATA(TIOC4A_MARK, PF0MD_100),
+ PINMUX_DATA(DREQ0_MARK, PF0MD_101),
+
+ /* Port G */
+ PINMUX_DATA(PG27_DATA, PG27MD_00),
+ PINMUX_DATA(LCD_TCON2_MARK, PG27MD_10),
+ PINMUX_DATA(LCD_EXTCLK_MARK, PG27MD_11),
+ PINMUX_DATA(LCD_DE_MARK, PG27MD_11),
+
+ PINMUX_DATA(PG26_DATA, PG26MD_00),
+ PINMUX_DATA(LCD_TCON1_MARK, PG26MD_10),
+ PINMUX_DATA(LCD_HSYNC_MARK, PG26MD_10),
+
+ PINMUX_DATA(PG25_DATA, PG25MD_00),
+ PINMUX_DATA(LCD_TCON0_MARK, PG25MD_10),
+ PINMUX_DATA(LCD_VSYNC_MARK, PG25MD_10),
+
+ PINMUX_DATA(PG24_DATA, PG24MD_00),
+ PINMUX_DATA(LCD_CLK_MARK, PG24MD_10),
+
+ PINMUX_DATA(PG23_DATA, PG23MD_000),
+ PINMUX_DATA(LCD_DATA23_PG23_MARK, PG23MD_010),
+ PINMUX_DATA(LCD_TCON6_MARK, PG23MD_011),
+ PINMUX_DATA(TXD5_MARK, PG23MD_100),
+
+ PINMUX_DATA(PG22_DATA, PG22MD_000),
+ PINMUX_DATA(LCD_DATA22_PG22_MARK, PG22MD_010),
+ PINMUX_DATA(LCD_TCON5_MARK, PG22MD_011),
+ PINMUX_DATA(RXD5_MARK, PG22MD_100),
+
+ PINMUX_DATA(PG21_DATA, PG21MD_000),
+ PINMUX_DATA(DV_DATA7_MARK, PG21MD_001),
+ PINMUX_DATA(LCD_DATA21_PG21_MARK, PG21MD_010),
+ PINMUX_DATA(LCD_TCON4_MARK, PG21MD_011),
+ PINMUX_DATA(TXD4_MARK, PG21MD_100),
+
+ PINMUX_DATA(PG20_DATA, PG20MD_000),
+ PINMUX_DATA(DV_DATA6_MARK, PG20MD_001),
+ PINMUX_DATA(LCD_DATA20_PG20_MARK, PG21MD_010),
+ PINMUX_DATA(LCD_TCON3_MARK, PG20MD_011),
+ PINMUX_DATA(RXD4_MARK, PG20MD_100),
+
+ PINMUX_DATA(PG19_DATA, PG19MD_000),
+ PINMUX_DATA(DV_DATA5_MARK, PG19MD_001),
+ PINMUX_DATA(LCD_DATA19_PG19_MARK, PG19MD_010),
+ PINMUX_DATA(SPDIF_OUT_MARK, PG19MD_011),
+ PINMUX_DATA(SCK5_MARK, PG19MD_100),
+
+ PINMUX_DATA(PG18_DATA, PG18MD_000),
+ PINMUX_DATA(DV_DATA4_MARK, PG18MD_001),
+ PINMUX_DATA(LCD_DATA18_PG18_MARK, PG18MD_010),
+ PINMUX_DATA(SPDIF_IN_MARK, PG18MD_011),
+ PINMUX_DATA(SCK4_MARK, PG18MD_100),
+
+// TODO hardware manual has PG17 3 bits wide in reg picture and 2 bits in description
+// we're going with 2 bits
+ PINMUX_DATA(PG17_DATA, PG17MD_00),
+ PINMUX_DATA(WE3ICIOWRAHDQMUU_MARK, PG17MD_01),
+ PINMUX_DATA(LCD_DATA17_PG17_MARK, PG17MD_10),
+
+// TODO hardware manual has PG16 3 bits wide in reg picture and 2 bits in description
+// we're going with 2 bits
+ PINMUX_DATA(PG16_DATA, PG16MD_00),
+ PINMUX_DATA(WE2ICIORDDQMUL_MARK, PG16MD_01),
+ PINMUX_DATA(LCD_DATA16_PG16_MARK, PG16MD_10),
+
+ PINMUX_DATA(PG15_DATA, PG15MD_00),
+ PINMUX_DATA(D31_MARK, PG15MD_01),
+ PINMUX_DATA(LCD_DATA15_PG15_MARK, PG15MD_10),
+ PINMUX_DATA(PINT7_PG_MARK, PG15MD_11),
+
+ PINMUX_DATA(PG14_DATA, PG14MD_00),
+ PINMUX_DATA(D30_MARK, PG14MD_01),
+ PINMUX_DATA(LCD_DATA14_PG14_MARK, PG14MD_10),
+ PINMUX_DATA(PINT6_PG_MARK, PG14MD_11),
+
+ PINMUX_DATA(PG13_DATA, PG13MD_00),
+ PINMUX_DATA(D29_MARK, PG13MD_01),
+ PINMUX_DATA(LCD_DATA13_PG13_MARK, PG13MD_10),
+ PINMUX_DATA(PINT5_PG_MARK, PG13MD_11),
+
+ PINMUX_DATA(PG12_DATA, PG12MD_00),
+ PINMUX_DATA(D28_MARK, PG12MD_01),
+ PINMUX_DATA(LCD_DATA12_PG12_MARK, PG12MD_10),
+ PINMUX_DATA(PINT4_PG_MARK, PG12MD_11),
+
+ PINMUX_DATA(PG11_DATA, PG11MD_000),
+ PINMUX_DATA(D27_MARK, PG11MD_001),
+ PINMUX_DATA(LCD_DATA11_PG11_MARK, PG11MD_010),
+ PINMUX_DATA(PINT3_PG_MARK, PG11MD_011),
+ PINMUX_DATA(TIOC3D_MARK, PG11MD_100),
+
+ PINMUX_DATA(PG10_DATA, PG10MD_000),
+ PINMUX_DATA(D26_MARK, PG10MD_001),
+ PINMUX_DATA(LCD_DATA10_PG10_MARK, PG10MD_010),
+ PINMUX_DATA(PINT2_PG_MARK, PG10MD_011),
+ PINMUX_DATA(TIOC3C_MARK, PG10MD_100),
+
+ PINMUX_DATA(PG9_DATA, PG9MD_000),
+ PINMUX_DATA(D25_MARK, PG9MD_001),
+ PINMUX_DATA(LCD_DATA9_PG9_MARK, PG9MD_010),
+ PINMUX_DATA(PINT1_PG_MARK, PG9MD_011),
+ PINMUX_DATA(TIOC3B_MARK, PG9MD_100),
+
+ PINMUX_DATA(PG8_DATA, PG8MD_000),
+ PINMUX_DATA(D24_MARK, PG8MD_001),
+ PINMUX_DATA(LCD_DATA8_PG8_MARK, PG8MD_010),
+ PINMUX_DATA(PINT0_PG_MARK, PG8MD_011),
+ PINMUX_DATA(TIOC3A_MARK, PG8MD_100),
+
+ PINMUX_DATA(PG7_DATA, PG7MD_000),
+ PINMUX_DATA(D23_MARK, PG7MD_001),
+ PINMUX_DATA(LCD_DATA7_PG7_MARK, PG7MD_010),
+ PINMUX_DATA(IRQ7_PG_MARK, PG7MD_011),
+ PINMUX_DATA(TIOC2B_MARK, PG7MD_100),
+
+ PINMUX_DATA(PG6_DATA, PG6MD_000),
+ PINMUX_DATA(D22_MARK, PG6MD_001),
+ PINMUX_DATA(LCD_DATA6_PG6_MARK, PG6MD_010),
+ PINMUX_DATA(IRQ6_PG_MARK, PG6MD_011),
+ PINMUX_DATA(TIOC2A_MARK, PG6MD_100),
+
+ PINMUX_DATA(PG5_DATA, PG5MD_000),
+ PINMUX_DATA(D21_MARK, PG5MD_001),
+ PINMUX_DATA(LCD_DATA5_PG5_MARK, PG5MD_010),
+ PINMUX_DATA(IRQ5_PG_MARK, PG5MD_011),
+ PINMUX_DATA(TIOC1B_MARK, PG5MD_100),
+
+ PINMUX_DATA(PG4_DATA, PG4MD_000),
+ PINMUX_DATA(D20_MARK, PG4MD_001),
+ PINMUX_DATA(LCD_DATA4_PG4_MARK, PG4MD_010),
+ PINMUX_DATA(IRQ4_PG_MARK, PG4MD_011),
+ PINMUX_DATA(TIOC1A_MARK, PG4MD_100),
+
+ PINMUX_DATA(PG3_DATA, PG3MD_000),
+ PINMUX_DATA(D19_MARK, PG3MD_001),
+ PINMUX_DATA(LCD_DATA3_PG3_MARK, PG3MD_010),
+ PINMUX_DATA(IRQ3_PG_MARK, PG3MD_011),
+ PINMUX_DATA(TIOC0D_MARK, PG3MD_100),
+
+ PINMUX_DATA(PG2_DATA, PG2MD_000),
+ PINMUX_DATA(D18_MARK, PG2MD_001),
+ PINMUX_DATA(LCD_DATA2_PG2_MARK, PG2MD_010),
+ PINMUX_DATA(IRQ2_PG_MARK, PG2MD_011),
+ PINMUX_DATA(TIOC0C_MARK, PG2MD_100),
+
+ PINMUX_DATA(PG1_DATA, PG1MD_000),
+ PINMUX_DATA(D17_MARK, PG1MD_001),
+ PINMUX_DATA(LCD_DATA1_PG1_MARK, PG1MD_010),
+ PINMUX_DATA(IRQ1_PG_MARK, PG1MD_011),
+ PINMUX_DATA(TIOC0B_MARK, PG1MD_100),
+
+ PINMUX_DATA(PG0_DATA, PG0MD_000),
+ PINMUX_DATA(D16_MARK, PG0MD_001),
+ PINMUX_DATA(LCD_DATA0_PG0_MARK, PG0MD_010),
+ PINMUX_DATA(IRQ0_PG_MARK, PG0MD_011),
+ PINMUX_DATA(TIOC0A_MARK, PG0MD_100),
+
+ /* Port H */
+ PINMUX_DATA(PH7_DATA, PH7MD_00),
+ PINMUX_DATA(PHAN7_MARK, PH7MD_01),
+ PINMUX_DATA(PINT7_PH_MARK, PH7MD_10),
+
+ PINMUX_DATA(PH6_DATA, PH6MD_00),
+ PINMUX_DATA(PHAN6_MARK, PH6MD_01),
+ PINMUX_DATA(PINT6_PH_MARK, PH6MD_10),
+
+ PINMUX_DATA(PH5_DATA, PH5MD_00),
+ PINMUX_DATA(PHAN5_MARK, PH5MD_01),
+ PINMUX_DATA(PINT5_PH_MARK, PH5MD_10),
+ PINMUX_DATA(LCD_EXTCLK_MARK, PH5MD_11),
+
+ PINMUX_DATA(PH4_DATA, PH4MD_00),
+ PINMUX_DATA(PHAN4_MARK, PH4MD_01),
+ PINMUX_DATA(PINT4_PH_MARK, PH4MD_10),
+
+ PINMUX_DATA(PH3_DATA, PH3MD_00),
+ PINMUX_DATA(PHAN3_MARK, PH3MD_01),
+ PINMUX_DATA(PINT3_PH_MARK, PH3MD_10),
+
+ PINMUX_DATA(PH2_DATA, PH2MD_00),
+ PINMUX_DATA(PHAN2_MARK, PH2MD_01),
+ PINMUX_DATA(PINT2_PH_MARK, PH2MD_10),
+
+ PINMUX_DATA(PH1_DATA, PH1MD_00),
+ PINMUX_DATA(PHAN1_MARK, PH1MD_01),
+ PINMUX_DATA(PINT1_PH_MARK, PH1MD_10),
+
+ PINMUX_DATA(PH0_DATA, PH0MD_00),
+ PINMUX_DATA(PHAN0_MARK, PH0MD_01),
+ PINMUX_DATA(PINT0_PH_MARK, PH0MD_10),
+
+ /* Port I - not on device */
+
+ /* Port J */
+ PINMUX_DATA(PJ31_DATA, PJ31MD_0),
+ PINMUX_DATA(DV_CLK_MARK, PJ31MD_1),
+
+ PINMUX_DATA(PJ30_DATA, PJ30MD_000),
+ PINMUX_DATA(SSIDATA5_MARK, PJ30MD_010),
+ PINMUX_DATA(TIOC2B_MARK, PJ30MD_100),
+ PINMUX_DATA(IETXD_MARK, PJ30MD_101),
+
+ PINMUX_DATA(PJ29_DATA, PJ29MD_000),
+ PINMUX_DATA(SSIWS5_MARK, PJ29MD_010),
+ PINMUX_DATA(TIOC2A_MARK, PJ29MD_100),
+ PINMUX_DATA(IERXD_MARK, PJ29MD_101),
+
+ PINMUX_DATA(PJ28_DATA, PJ28MD_000),
+ PINMUX_DATA(SSISCK5_MARK, PJ28MD_010),
+ PINMUX_DATA(TIOC1B_MARK, PJ28MD_100),
+ PINMUX_DATA(RTS7_MARK, PJ28MD_101),
+
+ PINMUX_DATA(PJ27_DATA, PJ27MD_000),
+ PINMUX_DATA(TIOC1A_MARK, PJ27MD_100),
+ PINMUX_DATA(CTS7_MARK, PJ27MD_101),
+
+ PINMUX_DATA(PJ26_DATA, PJ26MD_000),
+ PINMUX_DATA(SSIDATA4_MARK, PJ26MD_010),
+ PINMUX_DATA(LCD_TCON5_MARK, PJ26MD_011),
+ PINMUX_DATA(TXD7_MARK, PJ26MD_101),
+
+ PINMUX_DATA(PJ25_DATA, PJ25MD_000),
+ PINMUX_DATA(SSIWS4_MARK, PJ25MD_010),
+ PINMUX_DATA(LCD_TCON4_MARK, PJ25MD_011),
+ PINMUX_DATA(SPDIF_OUT_MARK, PJ25MD_100),
+ PINMUX_DATA(RXD7_MARK, PJ25MD_101),
+
+ PINMUX_DATA(PJ24_DATA, PJ24MD_000),
+ PINMUX_DATA(SSISCK4_MARK, PJ24MD_010),
+ PINMUX_DATA(LCD_TCON3_MARK, PJ24MD_011),
+ PINMUX_DATA(SPDIF_IN_MARK, PJ24MD_100),
+ PINMUX_DATA(SCK7_MARK, PJ24MD_101),
+
+ PINMUX_DATA(PJ23_DATA, PJ23MD_000),
+ PINMUX_DATA(DV_DATA23_MARK, PJ23MD_001),
+ PINMUX_DATA(LCD_DATA23_PJ23_MARK, PJ23MD_010),
+ PINMUX_DATA(LCD_TCON6_MARK, PJ23MD_011),
+ PINMUX_DATA(IRQ3_PJ_MARK, PJ23MD_100),
+ PINMUX_DATA(CTX1_MARK, PJ23MD_101),
+
+ PINMUX_DATA(PJ22_DATA, PJ22MD_000),
+ PINMUX_DATA(DV_DATA22_MARK, PJ22MD_001),
+ PINMUX_DATA(LCD_DATA22_PJ22_MARK, PJ22MD_010),
+ PINMUX_DATA(LCD_TCON5_MARK, PJ22MD_011),
+ PINMUX_DATA(IRQ2_PJ_MARK, PJ22MD_100),
+ PINMUX_DATA(CRX1_MARK, PJ22MD_101),
+ PINMUX_DATA(CRX0_CRX1_MARK, PJ22MD_110),
+
+ PINMUX_DATA(PJ21_DATA, PJ21MD_000),
+ PINMUX_DATA(DV_DATA21_MARK, PJ21MD_001),
+ PINMUX_DATA(LCD_DATA21_PJ21_MARK, PJ21MD_010),
+ PINMUX_DATA(LCD_TCON4_MARK, PJ21MD_011),
+ PINMUX_DATA(IRQ1_PJ_MARK, PJ21MD_100),
+ PINMUX_DATA(CTX2_MARK, PJ21MD_101),
+
+ PINMUX_DATA(PJ20_DATA, PJ20MD_000),
+ PINMUX_DATA(DV_DATA20_MARK, PJ20MD_001),
+ PINMUX_DATA(LCD_DATA20_PJ20_MARK, PJ20MD_010),
+ PINMUX_DATA(LCD_TCON3_MARK, PJ20MD_011),
+ PINMUX_DATA(IRQ0_PJ_MARK, PJ20MD_100),
+ PINMUX_DATA(CRX2_MARK, PJ20MD_101),
+ PINMUX_DATA(CRX0CRX1CRX2_PJ20_MARK, PJ20MD_110),
+
+ PINMUX_DATA(PJ19_DATA, PJ19MD_000),
+ PINMUX_DATA(DV_DATA19_MARK, PJ19MD_001),
+ PINMUX_DATA(LCD_DATA19_PJ19_MARK, PJ19MD_010),
+ PINMUX_DATA(MISO0_PJ19_MARK, PJ19MD_011),
+ PINMUX_DATA(TIOC0D_MARK, PJ19MD_100),
+ PINMUX_DATA(SIOFRXD_MARK, PJ19MD_101),
+ PINMUX_DATA(AUDIO_XOUT_MARK, PJ19MD_110),
+
+ PINMUX_DATA(PJ18_DATA, PJ18MD_000),
+ PINMUX_DATA(DV_DATA18_MARK, PJ18MD_001),
+ PINMUX_DATA(LCD_DATA18_PJ18_MARK, PJ18MD_010),
+ PINMUX_DATA(MOSI0_PJ18_MARK, PJ18MD_011),
+ PINMUX_DATA(TIOC0C_MARK, PJ18MD_100),
+ PINMUX_DATA(SIOFTXD_MARK, PJ18MD_101),
+
+ PINMUX_DATA(PJ17_DATA, PJ17MD_000),
+ PINMUX_DATA(DV_DATA17_MARK, PJ17MD_001),
+ PINMUX_DATA(LCD_DATA17_PJ17_MARK, PJ17MD_010),
+ PINMUX_DATA(SSL00_PJ17_MARK, PJ17MD_011),
+ PINMUX_DATA(TIOC0B_MARK, PJ17MD_100),
+ PINMUX_DATA(SIOFSYNC_MARK, PJ17MD_101),
+
+ PINMUX_DATA(PJ16_DATA, PJ16MD_000),
+ PINMUX_DATA(DV_DATA16_MARK, PJ16MD_001),
+ PINMUX_DATA(LCD_DATA16_PJ16_MARK, PJ16MD_010),
+ PINMUX_DATA(RSPCK0_PJ16_MARK, PJ16MD_011),
+ PINMUX_DATA(TIOC0A_MARK, PJ16MD_100),
+ PINMUX_DATA(SIOFSCK_MARK, PJ16MD_101),
+
+ PINMUX_DATA(PJ15_DATA, PJ15MD_000),
+ PINMUX_DATA(DV_DATA15_MARK, PJ15MD_001),
+ PINMUX_DATA(LCD_DATA15_PJ15_MARK, PJ15MD_010),
+ PINMUX_DATA(PINT7_PJ_MARK, PJ15MD_011),
+ PINMUX_DATA(PWM2H_MARK, PJ15MD_100),
+ PINMUX_DATA(TXD7_MARK, PJ15MD_101),
+
+ PINMUX_DATA(PJ14_DATA, PJ14MD_000),
+ PINMUX_DATA(DV_DATA14_MARK, PJ14MD_001),
+ PINMUX_DATA(LCD_DATA14_PJ14_MARK, PJ14MD_010),
+ PINMUX_DATA(PINT6_PJ_MARK, PJ14MD_011),
+ PINMUX_DATA(PWM2G_MARK, PJ14MD_100),
+ PINMUX_DATA(TXD6_MARK, PJ14MD_101),
+
+ PINMUX_DATA(PJ13_DATA, PJ13MD_000),
+ PINMUX_DATA(DV_DATA13_MARK, PJ13MD_001),
+ PINMUX_DATA(LCD_DATA13_PJ13_MARK, PJ13MD_010),
+ PINMUX_DATA(PINT5_PJ_MARK, PJ13MD_011),
+ PINMUX_DATA(PWM2F_MARK, PJ13MD_100),
+ PINMUX_DATA(TXD5_MARK, PJ13MD_101),
+
+ PINMUX_DATA(PJ12_DATA, PJ12MD_000),
+ PINMUX_DATA(DV_DATA12_MARK, PJ12MD_001),
+ PINMUX_DATA(LCD_DATA12_PJ12_MARK, PJ12MD_010),
+ PINMUX_DATA(PINT4_PJ_MARK, PJ12MD_011),
+ PINMUX_DATA(PWM2E_MARK, PJ12MD_100),
+ PINMUX_DATA(SCK7_MARK, PJ12MD_101),
+
+ PINMUX_DATA(PJ11_DATA, PJ11MD_000),
+ PINMUX_DATA(DV_DATA11_MARK, PJ11MD_001),
+ PINMUX_DATA(LCD_DATA11_PJ11_MARK, PJ11MD_010),
+ PINMUX_DATA(PINT3_PJ_MARK, PJ11MD_011),
+ PINMUX_DATA(PWM2D_MARK, PJ11MD_100),
+ PINMUX_DATA(SCK6_MARK, PJ11MD_101),
+
+ PINMUX_DATA(PJ10_DATA, PJ10MD_000),
+ PINMUX_DATA(DV_DATA10_MARK, PJ10MD_001),
+ PINMUX_DATA(LCD_DATA10_PJ10_MARK, PJ10MD_010),
+ PINMUX_DATA(PINT2_PJ_MARK, PJ10MD_011),
+ PINMUX_DATA(PWM2C_MARK, PJ10MD_100),
+ PINMUX_DATA(SCK5_MARK, PJ10MD_101),
+
+ PINMUX_DATA(PJ9_DATA, PJ9MD_000),
+ PINMUX_DATA(DV_DATA9_MARK, PJ9MD_001),
+ PINMUX_DATA(LCD_DATA9_PJ9_MARK, PJ9MD_010),
+ PINMUX_DATA(PINT1_PJ_MARK, PJ9MD_011),
+ PINMUX_DATA(PWM2B_MARK, PJ9MD_100),
+ PINMUX_DATA(RTS5_MARK, PJ9MD_101),
+
+ PINMUX_DATA(PJ8_DATA, PJ8MD_000),
+ PINMUX_DATA(DV_DATA8_MARK, PJ8MD_001),
+ PINMUX_DATA(LCD_DATA8_PJ8_MARK, PJ8MD_010),
+ PINMUX_DATA(PINT0_PJ_MARK, PJ8MD_011),
+ PINMUX_DATA(PWM2A_MARK, PJ8MD_100),
+ PINMUX_DATA(CTS5_MARK, PJ8MD_101),
+
+ PINMUX_DATA(PJ7_DATA, PJ7MD_000),
+ PINMUX_DATA(DV_DATA7_MARK, PJ7MD_001),
+ PINMUX_DATA(LCD_DATA7_PJ7_MARK, PJ7MD_010),
+ PINMUX_DATA(SD_D2_MARK, PJ7MD_011),
+ PINMUX_DATA(PWM1H_MARK, PJ7MD_100),
+
+ PINMUX_DATA(PJ6_DATA, PJ6MD_000),
+ PINMUX_DATA(DV_DATA6_MARK, PJ6MD_001),
+ PINMUX_DATA(LCD_DATA6_PJ6_MARK, PJ6MD_010),
+ PINMUX_DATA(SD_D3_MARK, PJ6MD_011),
+ PINMUX_DATA(PWM1G_MARK, PJ6MD_100),
+
+ PINMUX_DATA(PJ5_DATA, PJ5MD_000),
+ PINMUX_DATA(DV_DATA5_MARK, PJ5MD_001),
+ PINMUX_DATA(LCD_DATA5_PJ5_MARK, PJ5MD_010),
+ PINMUX_DATA(SD_CMD_MARK, PJ5MD_011),
+ PINMUX_DATA(PWM1F_MARK, PJ5MD_100),
+
+ PINMUX_DATA(PJ4_DATA, PJ4MD_000),
+ PINMUX_DATA(DV_DATA4_MARK, PJ4MD_001),
+ PINMUX_DATA(LCD_DATA4_PJ4_MARK, PJ4MD_010),
+ PINMUX_DATA(SD_CLK_MARK, PJ4MD_011),
+ PINMUX_DATA(PWM1E_MARK, PJ4MD_100),
+
+ PINMUX_DATA(PJ3_DATA, PJ3MD_000),
+ PINMUX_DATA(DV_DATA3_MARK, PJ3MD_001),
+ PINMUX_DATA(LCD_DATA3_PJ3_MARK, PJ3MD_010),
+ PINMUX_DATA(SD_D0_MARK, PJ3MD_011),
+ PINMUX_DATA(PWM1D_MARK, PJ3MD_100),
+
+ PINMUX_DATA(PJ2_DATA, PJ2MD_000),
+ PINMUX_DATA(DV_DATA2_MARK, PJ2MD_001),
+ PINMUX_DATA(LCD_DATA2_PJ2_MARK, PJ2MD_010),
+ PINMUX_DATA(SD_D1_MARK, PJ2MD_011),
+ PINMUX_DATA(PWM1C_MARK, PJ2MD_100),
+
+ PINMUX_DATA(PJ1_DATA, PJ1MD_000),
+ PINMUX_DATA(DV_DATA1_MARK, PJ1MD_001),
+ PINMUX_DATA(LCD_DATA1_PJ1_MARK, PJ1MD_010),
+ PINMUX_DATA(SD_WP_MARK, PJ1MD_011),
+ PINMUX_DATA(PWM1B_MARK, PJ1MD_100),
+
+ PINMUX_DATA(PJ0_DATA, PJ0MD_000),
+ PINMUX_DATA(DV_DATA0_MARK, PJ0MD_001),
+ PINMUX_DATA(LCD_DATA0_PJ0_MARK, PJ0MD_010),
+ PINMUX_DATA(SD_CD_MARK, PJ0MD_011),
+ PINMUX_DATA(PWM1A_MARK, PJ0MD_100),
+};
+
+static struct pinmux_gpio pinmux_gpios[] = {
+ /* Port A */
+ PINMUX_GPIO(GPIO_PA1, PA1_DATA),
+ PINMUX_GPIO(GPIO_PA0, PA0_DATA),
+
+ /* Port B */
+ PINMUX_GPIO(GPIO_PB22, PB22_DATA),
+ PINMUX_GPIO(GPIO_PB21, PB21_DATA),
+ PINMUX_GPIO(GPIO_PB20, PB20_DATA),
+ PINMUX_GPIO(GPIO_PB19, PB19_DATA),
+ PINMUX_GPIO(GPIO_PB18, PB18_DATA),
+ PINMUX_GPIO(GPIO_PB17, PB17_DATA),
+ PINMUX_GPIO(GPIO_PB16, PB16_DATA),
+ PINMUX_GPIO(GPIO_PB15, PB15_DATA),
+ PINMUX_GPIO(GPIO_PB14, PB14_DATA),
+ PINMUX_GPIO(GPIO_PB13, PB13_DATA),
+ PINMUX_GPIO(GPIO_PB12, PB12_DATA),
+ PINMUX_GPIO(GPIO_PB11, PB11_DATA),
+ PINMUX_GPIO(GPIO_PB10, PB10_DATA),
+ PINMUX_GPIO(GPIO_PB9, PB9_DATA),
+ PINMUX_GPIO(GPIO_PB8, PB8_DATA),
+ PINMUX_GPIO(GPIO_PB7, PB7_DATA),
+ PINMUX_GPIO(GPIO_PB6, PB6_DATA),
+ PINMUX_GPIO(GPIO_PB5, PB5_DATA),
+ PINMUX_GPIO(GPIO_PB4, PB4_DATA),
+ PINMUX_GPIO(GPIO_PB3, PB3_DATA),
+ PINMUX_GPIO(GPIO_PB2, PB2_DATA),
+ PINMUX_GPIO(GPIO_PB1, PB1_DATA),
+
+ /* Port C */
+ PINMUX_GPIO(GPIO_PC8, PC8_DATA),
+ PINMUX_GPIO(GPIO_PC7, PC7_DATA),
+ PINMUX_GPIO(GPIO_PC6, PC6_DATA),
+ PINMUX_GPIO(GPIO_PC5, PC5_DATA),
+ PINMUX_GPIO(GPIO_PC4, PC4_DATA),
+ PINMUX_GPIO(GPIO_PC3, PC3_DATA),
+ PINMUX_GPIO(GPIO_PC2, PC2_DATA),
+ PINMUX_GPIO(GPIO_PC1, PC1_DATA),
+ PINMUX_GPIO(GPIO_PC0, PC0_DATA),
+
+ /* Port D */
+ PINMUX_GPIO(GPIO_PD15, PD15_DATA),
+ PINMUX_GPIO(GPIO_PD14, PD14_DATA),
+ PINMUX_GPIO(GPIO_PD13, PD13_DATA),
+ PINMUX_GPIO(GPIO_PD12, PD12_DATA),
+ PINMUX_GPIO(GPIO_PD11, PD11_DATA),
+ PINMUX_GPIO(GPIO_PD10, PD10_DATA),
+ PINMUX_GPIO(GPIO_PD9, PD9_DATA),
+ PINMUX_GPIO(GPIO_PD8, PD8_DATA),
+ PINMUX_GPIO(GPIO_PD7, PD7_DATA),
+ PINMUX_GPIO(GPIO_PD6, PD6_DATA),
+ PINMUX_GPIO(GPIO_PD5, PD5_DATA),
+ PINMUX_GPIO(GPIO_PD4, PD4_DATA),
+ PINMUX_GPIO(GPIO_PD3, PD3_DATA),
+ PINMUX_GPIO(GPIO_PD2, PD2_DATA),
+ PINMUX_GPIO(GPIO_PD1, PD1_DATA),
+ PINMUX_GPIO(GPIO_PD0, PD0_DATA),
+
+ /* Port E */
+ PINMUX_GPIO(GPIO_PE7, PE7_DATA),
+ PINMUX_GPIO(GPIO_PE6, PE6_DATA),
+ PINMUX_GPIO(GPIO_PE5, PE5_DATA),
+ PINMUX_GPIO(GPIO_PE4, PE4_DATA),
+ PINMUX_GPIO(GPIO_PE3, PE3_DATA),
+ PINMUX_GPIO(GPIO_PE2, PE2_DATA),
+ PINMUX_GPIO(GPIO_PE1, PE1_DATA),
+ PINMUX_GPIO(GPIO_PE0, PE0_DATA),
+
+ /* Port F */
+ PINMUX_GPIO(GPIO_PF23, PF23_DATA),
+ PINMUX_GPIO(GPIO_PF22, PF22_DATA),
+ PINMUX_GPIO(GPIO_PF21, PF21_DATA),
+ PINMUX_GPIO(GPIO_PF20, PF20_DATA),
+ PINMUX_GPIO(GPIO_PF19, PF19_DATA),
+ PINMUX_GPIO(GPIO_PF18, PF18_DATA),
+ PINMUX_GPIO(GPIO_PF17, PF17_DATA),
+ PINMUX_GPIO(GPIO_PF16, PF16_DATA),
+ PINMUX_GPIO(GPIO_PF15, PF15_DATA),
+ PINMUX_GPIO(GPIO_PF14, PF14_DATA),
+ PINMUX_GPIO(GPIO_PF13, PF13_DATA),
+ PINMUX_GPIO(GPIO_PF12, PF12_DATA),
+ PINMUX_GPIO(GPIO_PF11, PF11_DATA),
+ PINMUX_GPIO(GPIO_PF10, PF10_DATA),
+ PINMUX_GPIO(GPIO_PF9, PF9_DATA),
+ PINMUX_GPIO(GPIO_PF8, PF8_DATA),
+ PINMUX_GPIO(GPIO_PF7, PF7_DATA),
+ PINMUX_GPIO(GPIO_PF6, PF6_DATA),
+ PINMUX_GPIO(GPIO_PF5, PF5_DATA),
+ PINMUX_GPIO(GPIO_PF4, PF4_DATA),
+ PINMUX_GPIO(GPIO_PF3, PF3_DATA),
+ PINMUX_GPIO(GPIO_PF2, PF2_DATA),
+ PINMUX_GPIO(GPIO_PF1, PF1_DATA),
+ PINMUX_GPIO(GPIO_PF0, PF0_DATA),
+
+ /* Port G */
+ PINMUX_GPIO(GPIO_PG27, PG27_DATA),
+ PINMUX_GPIO(GPIO_PG26, PG26_DATA),
+ PINMUX_GPIO(GPIO_PG25, PG25_DATA),
+ PINMUX_GPIO(GPIO_PG24, PG24_DATA),
+ PINMUX_GPIO(GPIO_PG23, PG23_DATA),
+ PINMUX_GPIO(GPIO_PG22, PG22_DATA),
+ PINMUX_GPIO(GPIO_PG21, PG21_DATA),
+ PINMUX_GPIO(GPIO_PG20, PG20_DATA),
+ PINMUX_GPIO(GPIO_PG19, PG19_DATA),
+ PINMUX_GPIO(GPIO_PG18, PG18_DATA),
+ PINMUX_GPIO(GPIO_PG17, PG17_DATA),
+ PINMUX_GPIO(GPIO_PG16, PG16_DATA),
+ PINMUX_GPIO(GPIO_PG15, PG15_DATA),
+ PINMUX_GPIO(GPIO_PG14, PG14_DATA),
+ PINMUX_GPIO(GPIO_PG13, PG13_DATA),
+ PINMUX_GPIO(GPIO_PG12, PG12_DATA),
+ PINMUX_GPIO(GPIO_PG11, PG11_DATA),
+ PINMUX_GPIO(GPIO_PG10, PG10_DATA),
+ PINMUX_GPIO(GPIO_PG9, PG9_DATA),
+ PINMUX_GPIO(GPIO_PG8, PG8_DATA),
+ PINMUX_GPIO(GPIO_PG7, PG7_DATA),
+ PINMUX_GPIO(GPIO_PG6, PG6_DATA),
+ PINMUX_GPIO(GPIO_PG5, PG5_DATA),
+ PINMUX_GPIO(GPIO_PG4, PG4_DATA),
+ PINMUX_GPIO(GPIO_PG3, PG3_DATA),
+ PINMUX_GPIO(GPIO_PG2, PG2_DATA),
+ PINMUX_GPIO(GPIO_PG1, PG1_DATA),
+ PINMUX_GPIO(GPIO_PG0, PG0_DATA),
+
+ /* Port H - Port H does not have a Data Register */
+
+ /* Port I - not on device */
+
+ /* Port J */
+ PINMUX_GPIO(GPIO_PJ31, PJ31_DATA),
+ PINMUX_GPIO(GPIO_PJ30, PJ30_DATA),
+ PINMUX_GPIO(GPIO_PJ29, PJ29_DATA),
+ PINMUX_GPIO(GPIO_PJ28, PJ28_DATA),
+ PINMUX_GPIO(GPIO_PJ27, PJ27_DATA),
+ PINMUX_GPIO(GPIO_PJ26, PJ26_DATA),
+ PINMUX_GPIO(GPIO_PJ25, PJ25_DATA),
+ PINMUX_GPIO(GPIO_PJ24, PJ24_DATA),
+ PINMUX_GPIO(GPIO_PJ23, PJ23_DATA),
+ PINMUX_GPIO(GPIO_PJ22, PJ22_DATA),
+ PINMUX_GPIO(GPIO_PJ21, PJ21_DATA),
+ PINMUX_GPIO(GPIO_PJ20, PJ20_DATA),
+ PINMUX_GPIO(GPIO_PJ19, PJ19_DATA),
+ PINMUX_GPIO(GPIO_PJ18, PJ18_DATA),
+ PINMUX_GPIO(GPIO_PJ17, PJ17_DATA),
+ PINMUX_GPIO(GPIO_PJ16, PJ16_DATA),
+ PINMUX_GPIO(GPIO_PJ15, PJ15_DATA),
+ PINMUX_GPIO(GPIO_PJ14, PJ14_DATA),
+ PINMUX_GPIO(GPIO_PJ13, PJ13_DATA),
+ PINMUX_GPIO(GPIO_PJ12, PJ12_DATA),
+ PINMUX_GPIO(GPIO_PJ11, PJ11_DATA),
+ PINMUX_GPIO(GPIO_PJ10, PJ10_DATA),
+ PINMUX_GPIO(GPIO_PJ9, PJ9_DATA),
+ PINMUX_GPIO(GPIO_PJ8, PJ8_DATA),
+ PINMUX_GPIO(GPIO_PJ7, PJ7_DATA),
+ PINMUX_GPIO(GPIO_PJ6, PJ6_DATA),
+ PINMUX_GPIO(GPIO_PJ5, PJ5_DATA),
+ PINMUX_GPIO(GPIO_PJ4, PJ4_DATA),
+ PINMUX_GPIO(GPIO_PJ3, PJ3_DATA),
+ PINMUX_GPIO(GPIO_PJ2, PJ2_DATA),
+ PINMUX_GPIO(GPIO_PJ1, PJ1_DATA),
+ PINMUX_GPIO(GPIO_PJ0, PJ0_DATA),
+
+ /* INTC */
+ PINMUX_GPIO(GPIO_FN_IRQ7_PG, IRQ7_PG_MARK),
+ PINMUX_GPIO(GPIO_FN_IRQ6_PG, IRQ6_PG_MARK),
+ PINMUX_GPIO(GPIO_FN_IRQ5_PG, IRQ5_PG_MARK),
+ PINMUX_GPIO(GPIO_FN_IRQ4_PG, IRQ4_PG_MARK),
+ PINMUX_GPIO(GPIO_FN_IRQ3_PG, IRQ3_PG_MARK),
+ PINMUX_GPIO(GPIO_FN_IRQ2_PG, IRQ2_PG_MARK),
+ PINMUX_GPIO(GPIO_FN_IRQ1_PG, IRQ1_PG_MARK),
+ PINMUX_GPIO(GPIO_FN_IRQ0_PG, IRQ0_PG_MARK),
+ PINMUX_GPIO(GPIO_FN_IRQ7_PF, IRQ7_PF_MARK),
+ PINMUX_GPIO(GPIO_FN_IRQ6_PF, IRQ6_PF_MARK),
+ PINMUX_GPIO(GPIO_FN_IRQ5_PF, IRQ5_PF_MARK),
+ PINMUX_GPIO(GPIO_FN_IRQ4_PF, IRQ4_PF_MARK),
+ PINMUX_GPIO(GPIO_FN_IRQ3_PJ, IRQ3_PJ_MARK),
+ PINMUX_GPIO(GPIO_FN_IRQ2_PJ, IRQ2_PJ_MARK),
+ PINMUX_GPIO(GPIO_FN_IRQ1_PJ, IRQ1_PJ_MARK),
+ PINMUX_GPIO(GPIO_FN_IRQ0_PJ, IRQ0_PJ_MARK),
+ PINMUX_GPIO(GPIO_FN_IRQ1_PC, IRQ1_PC_MARK),
+ PINMUX_GPIO(GPIO_FN_IRQ0_PC, IRQ0_PC_MARK),
+
+ PINMUX_GPIO(GPIO_FN_PINT7_PG, PINT7_PG_MARK),
+ PINMUX_GPIO(GPIO_FN_PINT6_PG, PINT6_PG_MARK),
+ PINMUX_GPIO(GPIO_FN_PINT5_PG, PINT5_PG_MARK),
+ PINMUX_GPIO(GPIO_FN_PINT4_PG, PINT4_PG_MARK),
+ PINMUX_GPIO(GPIO_FN_PINT3_PG, PINT3_PG_MARK),
+ PINMUX_GPIO(GPIO_FN_PINT2_PG, PINT2_PG_MARK),
+ PINMUX_GPIO(GPIO_FN_PINT1_PG, PINT1_PG_MARK),
+ PINMUX_GPIO(GPIO_FN_PINT0_PG, PINT0_PG_MARK),
+ PINMUX_GPIO(GPIO_FN_PINT7_PH, PINT7_PH_MARK),
+ PINMUX_GPIO(GPIO_FN_PINT6_PH, PINT6_PH_MARK),
+ PINMUX_GPIO(GPIO_FN_PINT5_PH, PINT5_PH_MARK),
+ PINMUX_GPIO(GPIO_FN_PINT4_PH, PINT4_PH_MARK),
+ PINMUX_GPIO(GPIO_FN_PINT3_PH, PINT3_PH_MARK),
+ PINMUX_GPIO(GPIO_FN_PINT2_PH, PINT2_PH_MARK),
+ PINMUX_GPIO(GPIO_FN_PINT1_PH, PINT1_PH_MARK),
+ PINMUX_GPIO(GPIO_FN_PINT0_PH, PINT0_PH_MARK),
+ PINMUX_GPIO(GPIO_FN_PINT7_PJ, PINT7_PJ_MARK),
+ PINMUX_GPIO(GPIO_FN_PINT6_PJ, PINT6_PJ_MARK),
+ PINMUX_GPIO(GPIO_FN_PINT5_PJ, PINT5_PJ_MARK),
+ PINMUX_GPIO(GPIO_FN_PINT4_PJ, PINT4_PJ_MARK),
+ PINMUX_GPIO(GPIO_FN_PINT3_PJ, PINT3_PJ_MARK),
+ PINMUX_GPIO(GPIO_FN_PINT2_PJ, PINT2_PJ_MARK),
+ PINMUX_GPIO(GPIO_FN_PINT1_PJ, PINT1_PJ_MARK),
+ PINMUX_GPIO(GPIO_FN_PINT0_PJ, PINT0_PJ_MARK),
+
+ /* WDT */
+ PINMUX_GPIO(GPIO_FN_WDTOVF, WDTOVF_MARK),
+
+ /* CAN */
+ PINMUX_GPIO(GPIO_FN_CTX1, CTX1_MARK),
+ PINMUX_GPIO(GPIO_FN_CRX1, CRX1_MARK),
+ PINMUX_GPIO(GPIO_FN_CTX0, CTX0_MARK),
+ PINMUX_GPIO(GPIO_FN_CRX0, CRX0_MARK),
+ PINMUX_GPIO(GPIO_FN_CRX0_CRX1, CRX0_CRX1_MARK),
+ PINMUX_GPIO(GPIO_FN_CRX0_CRX1_CRX2, CRX0_CRX1_CRX2_MARK),
+
+ /* DMAC */
+ PINMUX_GPIO(GPIO_FN_TEND0, TEND0_MARK),
+ PINMUX_GPIO(GPIO_FN_DACK0, DACK0_MARK),
+ PINMUX_GPIO(GPIO_FN_DREQ0, DREQ0_MARK),
+ PINMUX_GPIO(GPIO_FN_TEND1, TEND1_MARK),
+ PINMUX_GPIO(GPIO_FN_DACK1, DACK1_MARK),
+ PINMUX_GPIO(GPIO_FN_DREQ1, DREQ1_MARK),
+
+ /* ADC */
+ PINMUX_GPIO(GPIO_FN_ADTRG, ADTRG_MARK),
+
+ /* BSCh */
+ PINMUX_GPIO(GPIO_FN_A25, A25_MARK),
+ PINMUX_GPIO(GPIO_FN_A24, A24_MARK),
+ PINMUX_GPIO(GPIO_FN_A23, A23_MARK),
+ PINMUX_GPIO(GPIO_FN_A22, A22_MARK),
+ PINMUX_GPIO(GPIO_FN_A21, A21_MARK),
+ PINMUX_GPIO(GPIO_FN_A20, A20_MARK),
+ PINMUX_GPIO(GPIO_FN_A19, A19_MARK),
+ PINMUX_GPIO(GPIO_FN_A18, A18_MARK),
+ PINMUX_GPIO(GPIO_FN_A17, A17_MARK),
+ PINMUX_GPIO(GPIO_FN_A16, A16_MARK),
+ PINMUX_GPIO(GPIO_FN_A15, A15_MARK),
+ PINMUX_GPIO(GPIO_FN_A14, A14_MARK),
+ PINMUX_GPIO(GPIO_FN_A13, A13_MARK),
+ PINMUX_GPIO(GPIO_FN_A12, A12_MARK),
+ PINMUX_GPIO(GPIO_FN_A11, A11_MARK),
+ PINMUX_GPIO(GPIO_FN_A10, A10_MARK),
+ PINMUX_GPIO(GPIO_FN_A9, A9_MARK),
+ PINMUX_GPIO(GPIO_FN_A8, A8_MARK),
+ PINMUX_GPIO(GPIO_FN_A7, A7_MARK),
+ PINMUX_GPIO(GPIO_FN_A6, A6_MARK),
+ PINMUX_GPIO(GPIO_FN_A5, A5_MARK),
+ PINMUX_GPIO(GPIO_FN_A4, A4_MARK),
+ PINMUX_GPIO(GPIO_FN_A3, A3_MARK),
+ PINMUX_GPIO(GPIO_FN_A2, A2_MARK),
+ PINMUX_GPIO(GPIO_FN_A1, A1_MARK),
+ PINMUX_GPIO(GPIO_FN_A0, A0_MARK),
+
+ PINMUX_GPIO(GPIO_FN_D15, D15_MARK),
+ PINMUX_GPIO(GPIO_FN_D14, D14_MARK),
+ PINMUX_GPIO(GPIO_FN_D13, D13_MARK),
+ PINMUX_GPIO(GPIO_FN_D12, D12_MARK),
+ PINMUX_GPIO(GPIO_FN_D11, D11_MARK),
+ PINMUX_GPIO(GPIO_FN_D10, D10_MARK),
+ PINMUX_GPIO(GPIO_FN_D9, D9_MARK),
+ PINMUX_GPIO(GPIO_FN_D8, D8_MARK),
+ PINMUX_GPIO(GPIO_FN_D7, D7_MARK),
+ PINMUX_GPIO(GPIO_FN_D6, D6_MARK),
+ PINMUX_GPIO(GPIO_FN_D5, D5_MARK),
+ PINMUX_GPIO(GPIO_FN_D4, D4_MARK),
+ PINMUX_GPIO(GPIO_FN_D3, D3_MARK),
+ PINMUX_GPIO(GPIO_FN_D2, D2_MARK),
+ PINMUX_GPIO(GPIO_FN_D1, D1_MARK),
+ PINMUX_GPIO(GPIO_FN_D0, D0_MARK),
+
+ PINMUX_GPIO(GPIO_FN_BS, BS_MARK),
+ PINMUX_GPIO(GPIO_FN_CS4, CS4_MARK),
+ PINMUX_GPIO(GPIO_FN_CS3, CS3_MARK),
+ PINMUX_GPIO(GPIO_FN_CS2, CS2_MARK),
+ PINMUX_GPIO(GPIO_FN_CS1, CS1_MARK),
+ PINMUX_GPIO(GPIO_FN_CS0, CS0_MARK),
+ PINMUX_GPIO(GPIO_FN_CS5CE1A, CS5CE1A_MARK),
+ PINMUX_GPIO(GPIO_FN_CE2A, CE2A_MARK),
+ PINMUX_GPIO(GPIO_FN_CE2B, CE2B_MARK),
+ PINMUX_GPIO(GPIO_FN_RD, RD_MARK),
+ PINMUX_GPIO(GPIO_FN_RDWR, RDWR_MARK),
+ PINMUX_GPIO(GPIO_FN_WE3ICIOWRAHDQMUU, WE3ICIOWRAHDQMUU_MARK),
+ PINMUX_GPIO(GPIO_FN_WE2ICIORDDQMUL, WE2ICIORDDQMUL_MARK),
+ PINMUX_GPIO(GPIO_FN_WE1DQMUWE, WE1DQMUWE_MARK),
+ PINMUX_GPIO(GPIO_FN_WE0DQML, WE0DQML_MARK),
+ PINMUX_GPIO(GPIO_FN_RAS, RAS_MARK),
+ PINMUX_GPIO(GPIO_FN_CAS, CAS_MARK),
+ PINMUX_GPIO(GPIO_FN_CKE, CKE_MARK),
+ PINMUX_GPIO(GPIO_FN_WAIT, WAIT_MARK),
+ PINMUX_GPIO(GPIO_FN_BREQ, BREQ_MARK),
+ PINMUX_GPIO(GPIO_FN_BACK, BACK_MARK),
+ PINMUX_GPIO(GPIO_FN_IOIS16, IOIS16_MARK),
+
+ /* TMU */
+ PINMUX_GPIO(GPIO_FN_TIOC4D, TIOC4D_MARK),
+ PINMUX_GPIO(GPIO_FN_TIOC4C, TIOC4C_MARK),
+ PINMUX_GPIO(GPIO_FN_TIOC4B, TIOC4B_MARK),
+ PINMUX_GPIO(GPIO_FN_TIOC4A, TIOC4A_MARK),
+ PINMUX_GPIO(GPIO_FN_TIOC3D, TIOC3D_MARK),
+ PINMUX_GPIO(GPIO_FN_TIOC3C, TIOC3C_MARK),
+ PINMUX_GPIO(GPIO_FN_TIOC3B, TIOC3B_MARK),
+ PINMUX_GPIO(GPIO_FN_TIOC3A, TIOC3A_MARK),
+ PINMUX_GPIO(GPIO_FN_TIOC2B, TIOC2B_MARK),
+ PINMUX_GPIO(GPIO_FN_TIOC1B, TIOC1B_MARK),
+ PINMUX_GPIO(GPIO_FN_TIOC2A, TIOC2A_MARK),
+ PINMUX_GPIO(GPIO_FN_TIOC1A, TIOC1A_MARK),
+ PINMUX_GPIO(GPIO_FN_TIOC0D, TIOC0D_MARK),
+ PINMUX_GPIO(GPIO_FN_TIOC0C, TIOC0C_MARK),
+ PINMUX_GPIO(GPIO_FN_TIOC0B, TIOC0B_MARK),
+ PINMUX_GPIO(GPIO_FN_TIOC0A, TIOC0A_MARK),
+ PINMUX_GPIO(GPIO_FN_TCLKD, TCLKD_MARK),
+ PINMUX_GPIO(GPIO_FN_TCLKC, TCLKC_MARK),
+ PINMUX_GPIO(GPIO_FN_TCLKB, TCLKB_MARK),
+ PINMUX_GPIO(GPIO_FN_TCLKA, TCLKA_MARK),
+
+ /* SCIF */
+ PINMUX_GPIO(GPIO_FN_SCK0, SCK0_MARK),
+ PINMUX_GPIO(GPIO_FN_TXD0, TXD0_MARK),
+ PINMUX_GPIO(GPIO_FN_RXD0, RXD0_MARK),
+ PINMUX_GPIO(GPIO_FN_SCK1, SCK1_MARK),
+ PINMUX_GPIO(GPIO_FN_TXD1, TXD1_MARK),
+ PINMUX_GPIO(GPIO_FN_RXD1, RXD1_MARK),
+ PINMUX_GPIO(GPIO_FN_RTS1, RTS1_MARK),
+ PINMUX_GPIO(GPIO_FN_CTS1, CTS1_MARK),
+ PINMUX_GPIO(GPIO_FN_SCK2, SCK2_MARK),
+ PINMUX_GPIO(GPIO_FN_TXD2, TXD2_MARK),
+ PINMUX_GPIO(GPIO_FN_RXD2, RXD2_MARK),
+ PINMUX_GPIO(GPIO_FN_SCK3, SCK3_MARK),
+ PINMUX_GPIO(GPIO_FN_TXD3, TXD3_MARK),
+ PINMUX_GPIO(GPIO_FN_RXD3, RXD3_MARK),
+ PINMUX_GPIO(GPIO_FN_SCK4, SCK4_MARK),
+ PINMUX_GPIO(GPIO_FN_TXD4, TXD4_MARK),
+ PINMUX_GPIO(GPIO_FN_RXD4, RXD4_MARK),
+ PINMUX_GPIO(GPIO_FN_SCK5, SCK5_MARK),
+ PINMUX_GPIO(GPIO_FN_TXD5, TXD5_MARK),
+ PINMUX_GPIO(GPIO_FN_RXD5, RXD5_MARK),
+ PINMUX_GPIO(GPIO_FN_RTS5, RTS5_MARK),
+ PINMUX_GPIO(GPIO_FN_CTS5, CTS5_MARK),
+ PINMUX_GPIO(GPIO_FN_SCK6, SCK6_MARK),
+ PINMUX_GPIO(GPIO_FN_TXD6, TXD6_MARK),
+ PINMUX_GPIO(GPIO_FN_RXD6, RXD6_MARK),
+ PINMUX_GPIO(GPIO_FN_SCK7, SCK7_MARK),
+ PINMUX_GPIO(GPIO_FN_TXD7, TXD7_MARK),
+ PINMUX_GPIO(GPIO_FN_RXD7, RXD7_MARK),
+ PINMUX_GPIO(GPIO_FN_RTS7, RTS7_MARK),
+ PINMUX_GPIO(GPIO_FN_CTS7, CTS7_MARK),
+
+ /* RSPI */
+ PINMUX_GPIO(GPIO_FN_RSPCK0_PJ16, RSPCK0_PJ16_MARK),
+ PINMUX_GPIO(GPIO_FN_SSL00_PJ17, SSL00_PJ17_MARK),
+ PINMUX_GPIO(GPIO_FN_MOSI0_PJ18, MOSI0_PJ18_MARK),
+ PINMUX_GPIO(GPIO_FN_MISO0_PJ19, MISO0_PJ19_MARK),
+ PINMUX_GPIO(GPIO_FN_RSPCK0_PB17, RSPCK0_PB17_MARK),
+ PINMUX_GPIO(GPIO_FN_SSL00_PB18, SSL00_PB18_MARK),
+ PINMUX_GPIO(GPIO_FN_MOSI0_PB19, MOSI0_PB19_MARK),
+ PINMUX_GPIO(GPIO_FN_MISO0_PB20, MISO0_PB20_MARK),
+ PINMUX_GPIO(GPIO_FN_RSPCK1, RSPCK1_MARK),
+ PINMUX_GPIO(GPIO_FN_MOSI1, MOSI1_MARK),
+ PINMUX_GPIO(GPIO_FN_MISO1, MISO1_MARK),
+ PINMUX_GPIO(GPIO_FN_SSL10, SSL10_MARK),
+
+ /* IIC3 */
+ PINMUX_GPIO(GPIO_FN_SCL0, SCL0_MARK),
+ PINMUX_GPIO(GPIO_FN_SCL1, SCL1_MARK),
+ PINMUX_GPIO(GPIO_FN_SCL2, SCL2_MARK),
+ PINMUX_GPIO(GPIO_FN_SDA0, SDA0_MARK),
+ PINMUX_GPIO(GPIO_FN_SDA1, SDA1_MARK),
+ PINMUX_GPIO(GPIO_FN_SDA2, SDA2_MARK),
+
+ /* SSI */
+ PINMUX_GPIO(GPIO_FN_SSISCK0, SSISCK0_MARK),
+ PINMUX_GPIO(GPIO_FN_SSIWS0, SSIWS0_MARK),
+ PINMUX_GPIO(GPIO_FN_SSITXD0, SSITXD0_MARK),
+ PINMUX_GPIO(GPIO_FN_SSIRXD0, SSIRXD0_MARK),
+ PINMUX_GPIO(GPIO_FN_SSIWS1, SSIWS1_MARK),
+ PINMUX_GPIO(GPIO_FN_SSIWS2, SSIWS2_MARK),
+ PINMUX_GPIO(GPIO_FN_SSIWS3, SSIWS3_MARK),
+ PINMUX_GPIO(GPIO_FN_SSISCK1, SSISCK1_MARK),
+ PINMUX_GPIO(GPIO_FN_SSISCK2, SSISCK2_MARK),
+ PINMUX_GPIO(GPIO_FN_SSISCK3, SSISCK3_MARK),
+ PINMUX_GPIO(GPIO_FN_SSIDATA1, SSIDATA1_MARK),
+ PINMUX_GPIO(GPIO_FN_SSIDATA2, SSIDATA2_MARK),
+ PINMUX_GPIO(GPIO_FN_SSIDATA3, SSIDATA3_MARK),
+ PINMUX_GPIO(GPIO_FN_AUDIO_CLK, AUDIO_CLK_MARK),
+ PINMUX_GPIO(GPIO_FN_AUDIO_XOUT, AUDIO_XOUT_MARK),
+
+ /* SIOF */ /* NOTE Shares AUDIO_CLK with SSI */
+ PINMUX_GPIO(GPIO_FN_SIOFTXD, SIOFTXD_MARK),
+ PINMUX_GPIO(GPIO_FN_SIOFRXD, SIOFRXD_MARK),
+ PINMUX_GPIO(GPIO_FN_SIOFSYNC, SIOFSYNC_MARK),
+ PINMUX_GPIO(GPIO_FN_SIOFSCK, SIOFSCK_MARK),
+
+ /* SPDIF */ /* NOTE Shares AUDIO_CLK with SSI */
+ PINMUX_GPIO(GPIO_FN_SPDIF_IN, SPDIF_IN_MARK),
+ PINMUX_GPIO(GPIO_FN_SPDIF_OUT, SPDIF_OUT_MARK),
+
+ /* NANDFMC */ /* NOTE Controller is not available in boot mode 0 */
+ PINMUX_GPIO(GPIO_FN_FCE, FCE_MARK),
+ PINMUX_GPIO(GPIO_FN_FRB, FRB_MARK),
+
+ /* VDC3 */
+ PINMUX_GPIO(GPIO_FN_DV_CLK, DV_CLK_MARK),
+ PINMUX_GPIO(GPIO_FN_DV_VSYNC, DV_VSYNC_MARK),
+ PINMUX_GPIO(GPIO_FN_DV_HSYNC, DV_HSYNC_MARK),
+
+ PINMUX_GPIO(GPIO_FN_DV_DATA23, DV_DATA23_MARK),
+ PINMUX_GPIO(GPIO_FN_DV_DATA22, DV_DATA22_MARK),
+ PINMUX_GPIO(GPIO_FN_DV_DATA21, DV_DATA21_MARK),
+ PINMUX_GPIO(GPIO_FN_DV_DATA20, DV_DATA20_MARK),
+ PINMUX_GPIO(GPIO_FN_DV_DATA19, DV_DATA19_MARK),
+ PINMUX_GPIO(GPIO_FN_DV_DATA18, DV_DATA18_MARK),
+ PINMUX_GPIO(GPIO_FN_DV_DATA17, DV_DATA17_MARK),
+ PINMUX_GPIO(GPIO_FN_DV_DATA16, DV_DATA16_MARK),
+ PINMUX_GPIO(GPIO_FN_DV_DATA15, DV_DATA15_MARK),
+ PINMUX_GPIO(GPIO_FN_DV_DATA14, DV_DATA14_MARK),
+ PINMUX_GPIO(GPIO_FN_DV_DATA13, DV_DATA13_MARK),
+ PINMUX_GPIO(GPIO_FN_DV_DATA12, DV_DATA12_MARK),
+ PINMUX_GPIO(GPIO_FN_DV_DATA11, DV_DATA11_MARK),
+ PINMUX_GPIO(GPIO_FN_DV_DATA10, DV_DATA10_MARK),
+ PINMUX_GPIO(GPIO_FN_DV_DATA9, DV_DATA9_MARK),
+ PINMUX_GPIO(GPIO_FN_DV_DATA8, DV_DATA8_MARK),
+ PINMUX_GPIO(GPIO_FN_DV_DATA7, DV_DATA7_MARK),
+ PINMUX_GPIO(GPIO_FN_DV_DATA6, DV_DATA6_MARK),
+ PINMUX_GPIO(GPIO_FN_DV_DATA5, DV_DATA5_MARK),
+ PINMUX_GPIO(GPIO_FN_DV_DATA4, DV_DATA4_MARK),
+ PINMUX_GPIO(GPIO_FN_DV_DATA3, DV_DATA3_MARK),
+ PINMUX_GPIO(GPIO_FN_DV_DATA2, DV_DATA2_MARK),
+ PINMUX_GPIO(GPIO_FN_DV_DATA1, DV_DATA1_MARK),
+ PINMUX_GPIO(GPIO_FN_DV_DATA0, DV_DATA0_MARK),
+
+ PINMUX_GPIO(GPIO_FN_LCD_CLK, LCD_CLK_MARK),
+ PINMUX_GPIO(GPIO_FN_LCD_EXTCLK, LCD_EXTCLK_MARK),
+ PINMUX_GPIO(GPIO_FN_LCD_VSYNC, LCD_VSYNC_MARK),
+ PINMUX_GPIO(GPIO_FN_LCD_HSYNC, LCD_HSYNC_MARK),
+ PINMUX_GPIO(GPIO_FN_LCD_DE, LCD_DE_MARK),
+
+ PINMUX_GPIO(GPIO_FN_LCD_DATA23_PG23, LCD_DATA23_PG23_MARK),
+ PINMUX_GPIO(GPIO_FN_LCD_DATA22_PG22, LCD_DATA22_PG22_MARK),
+ PINMUX_GPIO(GPIO_FN_LCD_DATA21_PG21, LCD_DATA21_PG21_MARK),
+ PINMUX_GPIO(GPIO_FN_LCD_DATA20_PG20, LCD_DATA20_PG20_MARK),
+ PINMUX_GPIO(GPIO_FN_LCD_DATA19_PG19, LCD_DATA19_PG19_MARK),
+ PINMUX_GPIO(GPIO_FN_LCD_DATA18_PG18, LCD_DATA18_PG18_MARK),
+ PINMUX_GPIO(GPIO_FN_LCD_DATA17_PG17, LCD_DATA17_PG17_MARK),
+ PINMUX_GPIO(GPIO_FN_LCD_DATA16_PG16, LCD_DATA16_PG16_MARK),
+ PINMUX_GPIO(GPIO_FN_LCD_DATA15_PG15, LCD_DATA15_PG15_MARK),
+ PINMUX_GPIO(GPIO_FN_LCD_DATA14_PG14, LCD_DATA14_PG14_MARK),
+ PINMUX_GPIO(GPIO_FN_LCD_DATA13_PG13, LCD_DATA13_PG13_MARK),
+ PINMUX_GPIO(GPIO_FN_LCD_DATA12_PG12, LCD_DATA12_PG12_MARK),
+ PINMUX_GPIO(GPIO_FN_LCD_DATA11_PG11, LCD_DATA11_PG11_MARK),
+ PINMUX_GPIO(GPIO_FN_LCD_DATA10_PG10, LCD_DATA10_PG10_MARK),
+ PINMUX_GPIO(GPIO_FN_LCD_DATA9_PG9, LCD_DATA9_PG9_MARK),
+ PINMUX_GPIO(GPIO_FN_LCD_DATA8_PG8, LCD_DATA8_PG8_MARK),
+ PINMUX_GPIO(GPIO_FN_LCD_DATA7_PG7, LCD_DATA7_PG7_MARK),
+ PINMUX_GPIO(GPIO_FN_LCD_DATA6_PG6, LCD_DATA6_PG6_MARK),
+ PINMUX_GPIO(GPIO_FN_LCD_DATA5_PG5, LCD_DATA5_PG5_MARK),
+ PINMUX_GPIO(GPIO_FN_LCD_DATA4_PG4, LCD_DATA4_PG4_MARK),
+ PINMUX_GPIO(GPIO_FN_LCD_DATA3_PG3, LCD_DATA3_PG3_MARK),
+ PINMUX_GPIO(GPIO_FN_LCD_DATA2_PG2, LCD_DATA2_PG2_MARK),
+ PINMUX_GPIO(GPIO_FN_LCD_DATA1_PG1, LCD_DATA1_PG1_MARK),
+ PINMUX_GPIO(GPIO_FN_LCD_DATA0_PG0, LCD_DATA0_PG0_MARK),
+
+ PINMUX_GPIO(GPIO_FN_LCD_DATA23_PJ23, LCD_DATA23_PJ23_MARK),
+ PINMUX_GPIO(GPIO_FN_LCD_DATA22_PJ22, LCD_DATA22_PJ22_MARK),
+ PINMUX_GPIO(GPIO_FN_LCD_DATA21_PJ21, LCD_DATA21_PJ21_MARK),
+ PINMUX_GPIO(GPIO_FN_LCD_DATA20_PJ20, LCD_DATA20_PJ20_MARK),
+ PINMUX_GPIO(GPIO_FN_LCD_DATA19_PJ19, LCD_DATA19_PJ19_MARK),
+ PINMUX_GPIO(GPIO_FN_LCD_DATA18_PJ18, LCD_DATA18_PJ18_MARK),
+ PINMUX_GPIO(GPIO_FN_LCD_DATA17_PJ17, LCD_DATA17_PJ17_MARK),
+ PINMUX_GPIO(GPIO_FN_LCD_DATA16_PJ16, LCD_DATA16_PJ16_MARK),
+ PINMUX_GPIO(GPIO_FN_LCD_DATA15_PJ15, LCD_DATA15_PJ15_MARK),
+ PINMUX_GPIO(GPIO_FN_LCD_DATA14_PJ14, LCD_DATA14_PJ14_MARK),
+ PINMUX_GPIO(GPIO_FN_LCD_DATA13_PJ13, LCD_DATA13_PJ13_MARK),
+ PINMUX_GPIO(GPIO_FN_LCD_DATA12_PJ12, LCD_DATA12_PJ12_MARK),
+ PINMUX_GPIO(GPIO_FN_LCD_DATA11_PJ11, LCD_DATA11_PJ11_MARK),
+ PINMUX_GPIO(GPIO_FN_LCD_DATA10_PJ10, LCD_DATA10_PJ10_MARK),
+ PINMUX_GPIO(GPIO_FN_LCD_DATA9_PJ9, LCD_DATA9_PJ9_MARK),
+ PINMUX_GPIO(GPIO_FN_LCD_DATA8_PJ8, LCD_DATA8_PJ8_MARK),
+ PINMUX_GPIO(GPIO_FN_LCD_DATA7_PJ7, LCD_DATA7_PJ7_MARK),
+ PINMUX_GPIO(GPIO_FN_LCD_DATA6_PJ6, LCD_DATA6_PJ6_MARK),
+ PINMUX_GPIO(GPIO_FN_LCD_DATA5_PJ5, LCD_DATA5_PJ5_MARK),
+ PINMUX_GPIO(GPIO_FN_LCD_DATA4_PJ4, LCD_DATA4_PJ4_MARK),
+ PINMUX_GPIO(GPIO_FN_LCD_DATA3_PJ3, LCD_DATA3_PJ3_MARK),
+ PINMUX_GPIO(GPIO_FN_LCD_DATA2_PJ2, LCD_DATA2_PJ2_MARK),
+ PINMUX_GPIO(GPIO_FN_LCD_DATA1_PJ1, LCD_DATA1_PJ1_MARK),
+ PINMUX_GPIO(GPIO_FN_LCD_DATA0_PJ0, LCD_DATA0_PJ0_MARK),
+
+ PINMUX_GPIO(GPIO_FN_LCD_M_DISP, LCD_M_DISP_MARK),
+};
+
+static struct pinmux_cfg_reg pinmux_config_regs[] = {
+ /* "name" addr register_size Field_Width */
+
+ /* where Field_Width is 1 for single mode registers or 4 for upto 16
+ mode registers and modes are described in assending order [0..16] */
+
+ { PINMUX_CFG_REG("PAIOR0", 0xfffe3812, 16, 1) {
+ 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, PA1_IN, PA1_OUT,
+ 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, PA0_IN, PA0_OUT }
+ },
+ { PINMUX_CFG_REG("PBCR5", 0xfffe3824, 16, 4) {
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+
+ PB22MD_000, PB22MD_001, PB22MD_010, PB22MD_011,
+ PB22MD_100, PB22MD_101, PB22MD_110, PB22MD_111,
+ 0, 0, 0, 0, 0, 0, 0, 0,
+
+ PB21MD_00, PB21MD_01, PB21MD_10, PB21MD_11, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0,
+
+ PB20MD_000, PB20MD_001, PB20MD_010, PB20MD_011,
+ PB20MD_100, PB20MD_101, PB20MD_110, PB20MD_111,
+ 0, 0, 0, 0, 0, 0, 0, 0 }
+ },
+ { PINMUX_CFG_REG("PBCR4", 0xfffe3826, 16, 4) {
+ PB19MD_000, PB19MD_001, PB19MD_010, PB19MD_011,
+ PB19MD_100, PB19MD_101, PB19MD_110, PB19MD_111,
+ 0, 0, 0, 0, 0, 0, 0, 0,
+
+ PB18MD_000, PB18MD_001, PB18MD_010, PB18MD_011,
+ PB18MD_100, PB18MD_101, PB18MD_110, PB18MD_111,
+ 0, 0, 0, 0, 0, 0, 0, 0,
+
+ PB17MD_000, PB17MD_001, PB17MD_010, PB17MD_011,
+ PB17MD_100, PB17MD_101, PB17MD_110, PB17MD_111,
+ 0, 0, 0, 0, 0, 0, 0, 0,
+
+ PB16MD_000, PB16MD_001, PB16MD_010, PB16MD_011,
+ PB16MD_100, PB16MD_101, PB16MD_110, PB16MD_111,
+ 0, 0, 0, 0, 0, 0, 0, 0 }
+ },
+ { PINMUX_CFG_REG("PBCR3", 0xfffe3828, 16, 4) {
+ PB15MD_000, PB15MD_001, PB15MD_010, PB15MD_011,
+ PB15MD_100, PB15MD_101, PB15MD_110, PB15MD_111,
+ 0, 0, 0, 0, 0, 0, 0, 0,
+
+ PB14MD_000, PB14MD_001, PB14MD_010, PB14MD_011,
+ PB14MD_100, PB14MD_101, PB14MD_110, PB14MD_111,
+ 0, 0, 0, 0, 0, 0, 0, 0,
+
+ PB13MD_000, PB13MD_001, PB13MD_010, PB13MD_011,
+ PB13MD_100, PB13MD_101, PB13MD_110, PB13MD_111,
+ 0, 0, 0, 0, 0, 0, 0, 0,
+
+ PB12MD_00, PB12MD_01, PB12MD_10, PB12MD_11, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0 }
+ },
+ { PINMUX_CFG_REG("PBCR2", 0xfffe382a, 16, 4) {
+ PB11MD_00, PB11MD_01, PB11MD_10, PB11MD_11, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0,
+
+ PB10MD_00, PB10MD_01, PB10MD_10, PB10MD_11, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0,
+
+ PB9MD_00, PB9MD_01, PB9MD_10, PB9MD_11, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0,
+
+ PB8MD_00, PB8MD_01, PB8MD_10, PB8MD_11, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0 }
+ },
+ { PINMUX_CFG_REG("PBCR1", 0xfffe382c, 16, 4) {
+ PB7MD_00, PB7MD_01, PB7MD_10, PB7MD_11, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0,
+
+ PB6MD_00, PB6MD_01, PB6MD_10, PB6MD_11, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0,
+
+ PB5MD_00, PB5MD_01, PB5MD_10, PB5MD_11, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0,
+
+ PB4MD_00, PB4MD_01, PB4MD_10, PB4MD_11, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0 }
+ },
+ { PINMUX_CFG_REG("PBCR0", 0xfffe382e, 16, 4) {
+ PB3MD_00, PB3MD_01, PB3MD_10, PB3MD_11, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0,
+
+ PB2MD_00, PB2MD_01, PB2MD_10, PB2MD_11, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0,
+
+ PB1MD_00, PB1MD_01, PB1MD_10, PB1MD_11, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0,
+
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 }
+ },
+
+ { PINMUX_CFG_REG("PBIOR1", 0xfffe3830, 16, 1) {
+ 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0,
+ PB22_IN, PB22_OUT,
+ PB21_IN, PB21_OUT,
+ PB20_IN, PB20_OUT,
+ PB19_IN, PB19_OUT,
+ PB18_IN, PB18_OUT,
+ PB17_IN, PB17_OUT,
+ PB16_IN, PB16_OUT }
+ },
+ { PINMUX_CFG_REG("PBIOR0", 0xfffe3832, 16, 1) {
+ PB15_IN, PB15_OUT,
+ PB14_IN, PB14_OUT,
+ PB13_IN, PB13_OUT,
+ PB12_IN, PB12_OUT,
+ PB11_IN, PB11_OUT,
+ PB10_IN, PB10_OUT,
+ PB9_IN, PB9_OUT,
+ PB8_IN, PB8_OUT,
+ PB7_IN, PB7_OUT,
+ PB6_IN, PB6_OUT,
+ PB5_IN, PB5_OUT,
+ PB4_IN, PB4_OUT,
+ PB3_IN, PB3_OUT,
+ PB2_IN, PB2_OUT,
+ PB1_IN, PB1_OUT,
+ 0, 0 }
+ },
+
+ { PINMUX_CFG_REG("PCCR2", 0xfffe384a, 16, 4) {
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+
+ PC8MD_000, PC8MD_001, PC8MD_010, PC8MD_011,
+ PC8MD_100, PC8MD_101, PC8MD_110, PC8MD_111,
+ 0, 0, 0, 0, 0, 0, 0, 0 }
+ },
+ { PINMUX_CFG_REG("PCCR1", 0xfffe384c, 16, 4) {
+ PC7MD_000, PC7MD_001, PC7MD_010, PC7MD_011,
+ PC7MD_100, PC7MD_101, PC7MD_110, PC7MD_111,
+ 0, 0, 0, 0, 0, 0, 0, 0,
+
+ PC6MD_000, PC6MD_001, PC6MD_010, PC6MD_011,
+ PC6MD_100, PC6MD_101, PC6MD_110, PC6MD_111,
+ 0, 0, 0, 0, 0, 0, 0, 0,
+
+ PC5MD_000, PC5MD_001, PC5MD_010, PC5MD_011,
+ PC5MD_100, PC5MD_101, PC5MD_110, PC5MD_111,
+ 0, 0, 0, 0, 0, 0, 0, 0,
+
+ PC4MD_00, PC4MD_01, PC4MD_10, PC4MD_11, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0 }
+ },
+ { PINMUX_CFG_REG("PCCR0", 0xfffe384e, 16, 4) {
+ PC3MD_00, PC3MD_01, PC3MD_10, PC3MD_11, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0,
+
+ PC2MD_00, PC2MD_01, PC2MD_10, PC2MD_11, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0,
+
+ PC1MD_0, PC1MD_1, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0,
+
+ PC0MD_0, PC0MD_1, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0 }
+ },
+
+ { PINMUX_CFG_REG("PCIOR0", 0xfffe3852, 16, 1) {
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ PC8_IN, PC8_OUT,
+ PC7_IN, PC7_OUT,
+ PC6_IN, PC6_OUT,
+ PC5_IN, PC5_OUT,
+ PC4_IN, PC4_OUT,
+ PC3_IN, PC3_OUT,
+ PC2_IN, PC2_OUT,
+ PC1_IN, PC1_OUT,
+ PC0_IN, PC0_OUT }
+ },
+
+ { PINMUX_CFG_REG("PDCR3", 0xfffe3868, 16, 4) {
+ PD15MD_00, PD15MD_01, PD15MD_10, PD15MD_11, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0,
+
+ PD14MD_00, PD14MD_01, PD14MD_10, PD14MD_11, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0,
+
+ PD13MD_00, PD13MD_01, PD13MD_10, PD13MD_11, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0,
+
+ PD12MD_00, PD12MD_01, PD12MD_10, PD12MD_11, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0 }
+ },
+ { PINMUX_CFG_REG("PDCR2", 0xfffe386a, 16, 4) {
+ PD11MD_00, PD11MD_01, PD11MD_10, PD11MD_11, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0,
+
+ PD10MD_00, PD10MD_01, PD10MD_10, PD10MD_11, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0,
+
+ PD9MD_00, PD9MD_01, PD9MD_10, PD9MD_11, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0,
+
+ PD8MD_00, PD8MD_01, PD8MD_10, PD8MD_11, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0 }
+ },
+ { PINMUX_CFG_REG("PDCR1", 0xfffe386c, 16, 4) {
+ PD7MD_00, PD7MD_01, PD7MD_10, PD7MD_11, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0,
+
+ PD6MD_00, PD6MD_01, PD6MD_10, PD6MD_11, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0,
+
+ PD5MD_00, PD5MD_01, PD5MD_10, PD5MD_11, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0,
+
+ PD4MD_00, PD4MD_01, PD4MD_10, PD4MD_11, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0 }
+ },
+ { PINMUX_CFG_REG("PDCR0", 0xfffe386e, 16, 4) {
+ PD3MD_00, PD3MD_01, PD3MD_10, PD3MD_11, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0,
+
+ PD2MD_00, PD2MD_01, PD2MD_10, PD2MD_11, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0,
+
+ PD1MD_00, PD1MD_01, PD1MD_10, PD1MD_11, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0,
+
+ PD0MD_00, PD0MD_01, PD0MD_10, PD0MD_11, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0 }
+ },
+
+ { PINMUX_CFG_REG("PDIOR0", 0xfffe3872, 16, 1) {
+ PD15_IN, PD15_OUT,
+ PD14_IN, PD14_OUT,
+ PD13_IN, PD13_OUT,
+ PD12_IN, PD12_OUT,
+ PD11_IN, PD11_OUT,
+ PD10_IN, PD10_OUT,
+ PD9_IN, PD9_OUT,
+ PD8_IN, PD8_OUT,
+ PD7_IN, PD7_OUT,
+ PD6_IN, PD6_OUT,
+ PD5_IN, PD5_OUT,
+ PD4_IN, PD4_OUT,
+ PD3_IN, PD3_OUT,
+ PD2_IN, PD2_OUT,
+ PD1_IN, PD1_OUT,
+ PD0_IN, PD0_OUT }
+ },
+
+ { PINMUX_CFG_REG("PECR1", 0xfffe388c, 16, 4) {
+ PE7MD_00, PE7MD_01, PE7MD_10, PE7MD_11, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0,
+
+ PE6MD_00, PE6MD_01, PE6MD_10, PE6MD_11, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0,
+
+ PE5MD_00, PE5MD_01, PE5MD_10, PE5MD_11, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0,
+
+ PE4MD_00, PE4MD_01, PE4MD_10, PE4MD_11, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0 }
+ },
+ { PINMUX_CFG_REG("PECR0", 0xfffe388e, 16, 4) {
+ PE3MD_000, PE3MD_001, PE3MD_010, PE3MD_011,
+ PE3MD_100, PE3MD_101, PE3MD_110, PE3MD_111,
+ 0, 0, 0, 0, 0, 0, 0, 0,
+
+ PE2MD_000, PE2MD_001, PE2MD_010, PE2MD_011,
+ PE2MD_100, PE2MD_101, PE2MD_110, PE2MD_111,
+ 0, 0, 0, 0, 0, 0, 0, 0,
+
+ PE1MD_000, PE1MD_001, PE1MD_010, PE1MD_011,
+ PE1MD_100, PE1MD_101, PE1MD_110, PE1MD_111,
+ 0, 0, 0, 0, 0, 0, 0, 0,
+
+ PE0MD_00, PE0MD_01, PE0MD_10, PE0MD_11, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0 }
+ },
+ { PINMUX_CFG_REG("PEIOR0", 0xfffe3892, 16, 1) {
+ 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0,
+ PE7_IN, PE7_OUT,
+ PE6_IN, PE6_OUT,
+ PE5_IN, PE5_OUT,
+ PE4_IN, PE4_OUT,
+ PE3_IN, PE3_OUT,
+ PE2_IN, PE2_OUT,
+ PE1_IN, PE1_OUT,
+ PE0_IN, PE0_OUT }
+ },
+
+ { PINMUX_CFG_REG("PFCR6", 0xfffe38a2, 16, 4) {
+ PF23MD_000, PF23MD_001, PF23MD_010, PF23MD_011,
+ PF23MD_100, PF23MD_101, PF23MD_110, PF23MD_111,
+ 0, 0, 0, 0, 0, 0, 0, 0,
+
+ PF22MD_000, PF22MD_001, PF22MD_010, PF22MD_011,
+ PF22MD_100, PF22MD_101, PF22MD_110, PF22MD_111,
+ 0, 0, 0, 0, 0, 0, 0, 0,
+
+ PF21MD_000, PF21MD_001, PF21MD_010, PF21MD_011,
+ PF21MD_100, PF21MD_101, PF21MD_110, PF21MD_111,
+ 0, 0, 0, 0, 0, 0, 0, 0,
+
+ PF20MD_000, PF20MD_001, PF20MD_010, PF20MD_011,
+ PF20MD_100, PF20MD_101, PF20MD_110, PF20MD_111,
+ 0, 0, 0, 0, 0, 0, 0, 0 }
+ },
+ { PINMUX_CFG_REG("PFCR5", 0xfffe38a4, 16, 4) {
+ PF19MD_000, PF19MD_001, PF19MD_010, PF19MD_011,
+ PF19MD_100, PF19MD_101, PF19MD_110, PF19MD_111,
+ 0, 0, 0, 0, 0, 0, 0, 0,
+
+ PF18MD_000, PF18MD_001, PF18MD_010, PF18MD_011,
+ PF18MD_100, PF18MD_101, PF18MD_110, PF18MD_111,
+ 0, 0, 0, 0, 0, 0, 0, 0,
+
+ PF17MD_000, PF17MD_001, PF17MD_010, PF17MD_011,
+ PF17MD_100, PF17MD_101, PF17MD_110, PF17MD_111,
+ 0, 0, 0, 0, 0, 0, 0, 0,
+
+ PF16MD_000, PF16MD_001, PF16MD_010, PF16MD_011,
+ PF16MD_100, PF16MD_101, PF16MD_110, PF16MD_111,
+ 0, 0, 0, 0, 0, 0, 0, 0 }
+ },
+ { PINMUX_CFG_REG("PFCR4", 0xfffe38a6, 16, 4) {
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+
+ PF15MD_000, PF15MD_001, PF15MD_010, PF15MD_011,
+ PF15MD_100, PF15MD_101, PF15MD_110, PF15MD_111,
+ 0, 0, 0, 0, 0, 0, 0, 0 }
+ },
+ { PINMUX_CFG_REG("PFCR3", 0xfffe38a8, 16, 4) {
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+
+ PF14MD_000, PF14MD_001, PF14MD_010, PF14MD_011,
+ PF14MD_100, PF14MD_101, PF14MD_110, PF14MD_111,
+ 0, 0, 0, 0, 0, 0, 0, 0,
+
+ PF13MD_000, PF13MD_001, PF13MD_010, PF13MD_011,
+ PF13MD_100, PF13MD_101, PF13MD_110, PF13MD_111,
+ 0, 0, 0, 0, 0, 0, 0, 0,
+
+ PF12MD_000, PF12MD_001, PF12MD_010, PF12MD_011,
+ PF12MD_100, PF12MD_101, PF12MD_110, PF12MD_111,
+ 0, 0, 0, 0, 0, 0, 0, 0 }
+ },
+ { PINMUX_CFG_REG("PFCR2", 0xfffe38aa, 16, 4) {
+ PF11MD_000, PF11MD_001, PF11MD_010, PF11MD_011,
+ PF11MD_100, PF11MD_101, PF11MD_110, PF11MD_111,
+ 0, 0, 0, 0, 0, 0, 0, 0,
+
+ PF10MD_000, PF10MD_001, PF10MD_010, PF10MD_011,
+ PF10MD_100, PF10MD_101, PF10MD_110, PF10MD_111,
+ 0, 0, 0, 0, 0, 0, 0, 0,
+
+ PF9MD_000, PF9MD_001, PF9MD_010, PF9MD_011,
+ PF9MD_100, PF9MD_101, PF9MD_110, PF9MD_111,
+ 0, 0, 0, 0, 0, 0, 0, 0,
+
+ PF8MD_000, PF8MD_001, PF8MD_010, PF8MD_011,
+ PF8MD_100, PF8MD_101, PF8MD_110, PF8MD_111,
+ 0, 0, 0, 0, 0, 0, 0, 0 }
+ },
+ { PINMUX_CFG_REG("PFCR1", 0xfffe38ac, 16, 4) {
+ PF7MD_000, PF7MD_001, PF7MD_010, PF7MD_011,
+ PF7MD_100, PF7MD_101, PF7MD_110, PF7MD_111,
+ 0, 0, 0, 0, 0, 0, 0, 0,
+
+ PF6MD_000, PF6MD_001, PF6MD_010, PF6MD_011,
+ PF6MD_100, PF6MD_101, PF6MD_110, PF6MD_111,
+ 0, 0, 0, 0, 0, 0, 0, 0,
+
+ PF5MD_000, PF5MD_001, PF5MD_010, PF5MD_011,
+ PF5MD_100, PF5MD_101, PF5MD_110, PF5MD_111,
+ 0, 0, 0, 0, 0, 0, 0, 0,
+
+ PF4MD_000, PF4MD_001, PF4MD_010, PF4MD_011,
+ PF4MD_100, PF4MD_101, PF4MD_110, PF4MD_111,
+ 0, 0, 0, 0, 0, 0, 0, 0 }
+ },
+ { PINMUX_CFG_REG("PFCR0", 0xfffe38ae, 16, 4) {
+ PF3MD_000, PF3MD_001, PF3MD_010, PF3MD_011,
+ PF3MD_100, PF3MD_101, PF3MD_110, PF3MD_111,
+ 0, 0, 0, 0, 0, 0, 0, 0,
+
+ PF2MD_000, PF2MD_001, PF2MD_010, PF2MD_011,
+ PF2MD_100, PF2MD_101, PF2MD_110, PF2MD_111,
+ 0, 0, 0, 0, 0, 0, 0, 0,
+
+ PF1MD_000, PF1MD_001, PF1MD_010, PF1MD_011,
+ PF1MD_100, PF1MD_101, PF1MD_110, PF1MD_111,
+ 0, 0, 0, 0, 0, 0, 0, 0,
+
+ PF0MD_000, PF0MD_001, PF0MD_010, PF0MD_011,
+ PF0MD_100, PF0MD_101, PF0MD_110, PF0MD_111,
+ 0, 0, 0, 0, 0, 0, 0, 0 }
+ },
+
+ { PINMUX_CFG_REG("PFIOR1", 0xfffe38b0, 16, 1) {
+ 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0,
+ PF23_IN, PF23_OUT,
+ PF22_IN, PF22_OUT,
+ PF21_IN, PF21_OUT,
+ PF20_IN, PF20_OUT,
+ PF19_IN, PF19_OUT,
+ PF18_IN, PF18_OUT,
+ PF17_IN, PF17_OUT,
+ PF16_IN, PF16_OUT }
+ },
+ { PINMUX_CFG_REG("PFIOR0", 0xfffe38b2, 16, 1) {
+ PF15_IN, PF15_OUT,
+ PF14_IN, PF14_OUT,
+ PF13_IN, PF13_OUT,
+ PF12_IN, PF12_OUT,
+ PF11_IN, PF11_OUT,
+ PF10_IN, PF10_OUT,
+ PF9_IN, PF9_OUT,
+ PF8_IN, PF8_OUT,
+ PF7_IN, PF7_OUT,
+ PF6_IN, PF6_OUT,
+ PF5_IN, PF5_OUT,
+ PF4_IN, PF4_OUT,
+ PF3_IN, PF3_OUT,
+ PF2_IN, PF2_OUT,
+ PF1_IN, PF1_OUT,
+ PF0_IN, PF0_OUT }
+ },
+
+ { PINMUX_CFG_REG("PGCR6", 0xfffe38c2, 16, 4) {
+ PG27MD_00, PG27MD_01, PG27MD_10, PG27MD_11, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0,
+
+ PG26MD_00, PG26MD_01, PG26MD_10, PG26MD_11, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0,
+
+ PG25MD_00, PG25MD_01, PG25MD_10, PG25MD_11, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0,
+
+ PG24MD_00, PG24MD_01, PG24MD_10, PG24MD_11, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0 }
+ },
+ { PINMUX_CFG_REG("PGCR5", 0xfffe38c4, 16, 4) {
+ PG23MD_000, PG23MD_001, PG23MD_010, PG23MD_011,
+ PG23MD_100, PG23MD_101, PG23MD_110, PG23MD_111,
+ 0, 0, 0, 0, 0, 0, 0, 0,
+
+ PG22MD_000, PG22MD_001, PG22MD_010, PG22MD_011,
+ PG22MD_100, PG22MD_101, PG22MD_110, PG22MD_111,
+ 0, 0, 0, 0, 0, 0, 0, 0,
+
+ PG21MD_000, PG21MD_001, PG21MD_010, PG21MD_011,
+ PG21MD_100, PG21MD_101, PG21MD_110, PG21MD_111,
+ 0, 0, 0, 0, 0, 0, 0, 0,
+
+ PG20MD_000, PG20MD_001, PG20MD_010, PG20MD_011,
+ PG20MD_100, PG20MD_101, PG20MD_110, PG20MD_111,
+ 0, 0, 0, 0, 0, 0, 0, 0 }
+ },
+ { PINMUX_CFG_REG("PGCR4", 0xfffe38c6, 16, 4) {
+ PG19MD_000, PG19MD_001, PG19MD_010, PG19MD_011,
+ PG19MD_100, PG19MD_101, PG19MD_110, PG19MD_111,
+ 0, 0, 0, 0, 0, 0, 0, 0,
+
+ PG18MD_000, PG18MD_001, PG18MD_010, PG18MD_011,
+ PG18MD_100, PG18MD_101, PG18MD_110, PG18MD_111,
+ 0, 0, 0, 0, 0, 0, 0, 0,
+
+ PG17MD_00, PG17MD_01, PG17MD_10, PG17MD_11, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0,
+
+ PG16MD_00, PG16MD_01, PG16MD_10, PG16MD_11, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0 }
+ },
+ { PINMUX_CFG_REG("PGCR3", 0xfffe38c8, 16, 4) {
+ PG15MD_00, PG15MD_01, PG15MD_10, PG15MD_11, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0,
+
+ PG14MD_00, PG14MD_01, PG14MD_10, PG14MD_11, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0,
+
+ PG13MD_00, PG13MD_01, PG13MD_10, PG13MD_11, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0,
+
+ PG12MD_00, PG12MD_01, PG12MD_10, PG12MD_11, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0 }
+ },
+ { PINMUX_CFG_REG("PGCR2", 0xfffe38ca, 16, 4) {
+ PG11MD_000, PG11MD_001, PG11MD_010, PG11MD_011,
+ PG11MD_100, PG11MD_101, PG11MD_110, PG11MD_111,
+ 0, 0, 0, 0, 0, 0, 0, 0,
+
+ PG10MD_000, PG10MD_001, PG10MD_010, PG10MD_011,
+ PG10MD_100, PG10MD_101, PG10MD_110, PG10MD_111,
+ 0, 0, 0, 0, 0, 0, 0, 0,
+
+ PG9MD_000, PG9MD_001, PG9MD_010, PG9MD_011,
+ PG9MD_100, PG9MD_101, PG9MD_110, PG9MD_111,
+ 0, 0, 0, 0, 0, 0, 0, 0,
+
+ PG8MD_000, PG8MD_001, PG8MD_010, PG8MD_011,
+ PG8MD_100, PG8MD_101, PG8MD_110, PG8MD_111,
+ 0, 0, 0, 0, 0, 0, 0, 0 }
+ },
+
+ { PINMUX_CFG_REG("PGCR1", 0xfffe38cc, 16, 4) {
+ PG7MD_000, PG7MD_001, PG7MD_010, PG7MD_011,
+ PG7MD_100, PG7MD_101, PG7MD_110, PG7MD_111,
+ 0, 0, 0, 0, 0, 0, 0, 0,
+
+ PG6MD_000, PG6MD_001, PG6MD_010, PG6MD_011,
+ PG6MD_100, PG6MD_101, PG6MD_110, PG6MD_111,
+ 0, 0, 0, 0, 0, 0, 0, 0,
+
+ PG5MD_000, PG5MD_001, PG5MD_010, PG5MD_011,
+ PG5MD_100, PG5MD_101, PG5MD_110, PG5MD_111,
+ 0, 0, 0, 0, 0, 0, 0, 0,
+
+ PG4MD_000, PG4MD_001, PG4MD_010, PG4MD_011,
+ PG4MD_100, PG4MD_101, PG4MD_110, PG4MD_111,
+ 0, 0, 0, 0, 0, 0, 0, 0 }
+ },
+ { PINMUX_CFG_REG("PGCR0", 0xfffe38ce, 16, 4) {
+ PG3MD_000, PG3MD_001, PG3MD_010, PG3MD_011,
+ PG3MD_100, PG3MD_101, PG3MD_110, PG3MD_111,
+ 0, 0, 0, 0, 0, 0, 0, 0,
+
+ PG2MD_000, PG2MD_001, PG2MD_010, PG2MD_011,
+ PG2MD_100, PG2MD_101, PG2MD_110, PG2MD_111,
+ 0, 0, 0, 0, 0, 0, 0, 0,
+
+ PG1MD_000, PG1MD_001, PG1MD_010, PG1MD_011,
+ PG1MD_100, PG1MD_101, PG1MD_110, PG1MD_111,
+ 0, 0, 0, 0, 0, 0, 0, 0,
+
+ PG0MD_000, PG0MD_001, PG0MD_010, PG0MD_011,
+ PG0MD_100, PG0MD_101, PG0MD_110, PG0MD_111,
+ 0, 0, 0, 0, 0, 0, 0, 0 }
+ },
+
+ { PINMUX_CFG_REG("PGIOR1", 0xfffe38d0, 16, 1) {
+ 0, 0, 0, 0, 0, 0, 0, 0,
+ PG27_IN, PG27_OUT,
+ PG26_IN, PG26_OUT,
+ PG25_IN, PG25_OUT,
+ PG24_IN, PG24_OUT,
+ PG23_IN, PG23_OUT,
+ PG22_IN, PG22_OUT,
+ PG21_IN, PG21_OUT,
+ PG20_IN, PG20_OUT,
+ PG19_IN, PG19_OUT,
+ PG18_IN, PG18_OUT,
+ PG17_IN, PG17_OUT,
+ PG16_IN, PG16_OUT }
+ },
+ { PINMUX_CFG_REG("PGIOR0", 0xfffe38d2, 16, 1) {
+ PG15_IN, PG15_OUT,
+ PG14_IN, PG14_OUT,
+ PG13_IN, PG13_OUT,
+ PG12_IN, PG12_OUT,
+ PG11_IN, PG11_OUT,
+ PG10_IN, PG10_OUT,
+ PG9_IN, PG9_OUT,
+ PG8_IN, PG8_OUT,
+ PG7_IN, PG7_OUT,
+ PG6_IN, PG6_OUT,
+ PG5_IN, PG5_OUT,
+ PG4_IN, PG4_OUT,
+ PG3_IN, PG3_OUT,
+ PG2_IN, PG2_OUT,
+ PG1_IN, PG1_OUT,
+ PG0_IN, PG0_OUT }
+ },
+
+ { PINMUX_CFG_REG("PHCR1", 0xfffe38ec, 16, 4) {
+ PH7MD_00, PH7MD_01, PH7MD_10, PH7MD_11, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0,
+
+ PH6MD_00, PH6MD_01, PH6MD_10, PH6MD_11, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0,
+
+ PH5MD_00, PH5MD_01, PH5MD_10, PH5MD_11, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0,
+
+ PH4MD_00, PH4MD_01, PH4MD_10, PH4MD_11, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0 }
+ },
+
+ { PINMUX_CFG_REG("PHCR0", 0xfffe38ee, 16, 4) {
+ PH3MD_00, PH3MD_01, PH3MD_10, PH3MD_11, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0,
+
+ PH2MD_00, PH2MD_01, PH2MD_10, PH2MD_11, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0,
+
+ PH1MD_00, PH1MD_01, PH1MD_10, PH1MD_11, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0,
+
+ PH0MD_00, PH0MD_01, PH0MD_10, PH0MD_11, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0 }
+ },
+
+ { PINMUX_CFG_REG("PJCR7", 0xfffe3900, 16, 4) {
+ PJ31MD_0, PJ31MD_1, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0,
+
+ PJ30MD_000, PJ30MD_001, PJ30MD_010, PJ30MD_011,
+ PJ30MD_100, PJ30MD_101, PJ30MD_110, PJ30MD_111,
+ 0, 0, 0, 0, 0, 0, 0, 0,
+
+ PJ29MD_000, PJ29MD_001, PJ29MD_010, PJ29MD_011,
+ PJ29MD_100, PJ29MD_101, PJ29MD_110, PJ29MD_111,
+ 0, 0, 0, 0, 0, 0, 0, 0,
+
+ PJ28MD_000, PJ28MD_001, PJ28MD_010, PJ28MD_011,
+ PJ28MD_100, PJ28MD_101, PJ28MD_110, PJ28MD_111,
+ 0, 0, 0, 0, 0, 0, 0, 0 }
+ },
+ { PINMUX_CFG_REG("PJCR6", 0xfffe3902, 16, 4) {
+ PJ27MD_000, PJ27MD_001, PJ27MD_010, PJ27MD_011,
+ PJ27MD_100, PJ27MD_101, PJ27MD_110, PJ27MD_111,
+ 0, 0, 0, 0, 0, 0, 0, 0,
+
+ PJ26MD_000, PJ26MD_001, PJ26MD_010, PJ26MD_011,
+ PJ26MD_100, PJ26MD_101, PJ26MD_110, PJ26MD_111,
+ 0, 0, 0, 0, 0, 0, 0, 0,
+
+ PJ25MD_000, PJ25MD_001, PJ25MD_010, PJ25MD_011,
+ PJ25MD_100, PJ25MD_101, PJ25MD_110, PJ25MD_111,
+ 0, 0, 0, 0, 0, 0, 0, 0,
+
+ PJ24MD_000, PJ24MD_001, PJ24MD_010, PJ24MD_011,
+ PJ24MD_100, PJ24MD_101, PJ24MD_110, PJ24MD_111,
+ 0, 0, 0, 0, 0, 0, 0, 0 }
+ },
+ { PINMUX_CFG_REG("PJCR5", 0xfffe3904, 16, 4) {
+ PJ23MD_000, PJ23MD_001, PJ23MD_010, PJ23MD_011,
+ PJ23MD_100, PJ23MD_101, PJ23MD_110, PJ23MD_111,
+ 0, 0, 0, 0, 0, 0, 0, 0,
+
+ PJ22MD_000, PJ22MD_001, PJ22MD_010, PJ22MD_011,
+ PJ22MD_100, PJ22MD_101, PJ22MD_110, PJ22MD_111,
+ 0, 0, 0, 0, 0, 0, 0, 0,
+
+ PJ21MD_000, PJ21MD_001, PJ21MD_010, PJ21MD_011,
+ PJ21MD_100, PJ21MD_101, PJ21MD_110, PJ21MD_111,
+ 0, 0, 0, 0, 0, 0, 0, 0,
+
+ PJ20MD_000, PJ20MD_001, PJ20MD_010, PJ20MD_011,
+ PJ20MD_100, PJ20MD_101, PJ20MD_110, PJ20MD_111,
+ 0, 0, 0, 0, 0, 0, 0, 0 }
+ },
+ { PINMUX_CFG_REG("PJCR4", 0xfffe3906, 16, 4) {
+ PJ19MD_000, PJ19MD_001, PJ19MD_010, PJ19MD_011,
+ PJ19MD_100, PJ19MD_101, PJ19MD_110, PJ19MD_111,
+ 0, 0, 0, 0, 0, 0, 0, 0,
+
+ PJ18MD_000, PJ18MD_001, PJ18MD_010, PJ18MD_011,
+ PJ18MD_100, PJ18MD_101, PJ18MD_110, PJ18MD_111,
+ 0, 0, 0, 0, 0, 0, 0, 0,
+
+ PJ17MD_000, PJ17MD_001, PJ17MD_010, PJ17MD_011,
+ PJ17MD_100, PJ17MD_101, PJ17MD_110, PJ17MD_111,
+ 0, 0, 0, 0, 0, 0, 0, 0,
+
+ PJ16MD_000, PJ16MD_001, PJ16MD_010, PJ16MD_011,
+ PJ16MD_100, PJ16MD_101, PJ16MD_110, PJ16MD_111,
+ 0, 0, 0, 0, 0, 0, 0, 0 }
+ },
+ { PINMUX_CFG_REG("PJCR3", 0xfffe3908, 16, 4) {
+ PJ15MD_000, PJ15MD_001, PJ15MD_010, PJ15MD_011,
+ PJ15MD_100, PJ15MD_101, PJ15MD_110, PJ15MD_111,
+ 0, 0, 0, 0, 0, 0, 0, 0,
+
+ PJ14MD_000, PJ14MD_001, PJ14MD_010, PJ14MD_011,
+ PJ14MD_100, PJ14MD_101, PJ14MD_110, PJ14MD_111,
+ 0, 0, 0, 0, 0, 0, 0, 0,
+
+ PJ13MD_000, PJ13MD_001, PJ13MD_010, PJ13MD_011,
+ PJ13MD_100, PJ13MD_101, PJ13MD_110, PJ13MD_111,
+ 0, 0, 0, 0, 0, 0, 0, 0,
+
+ PJ12MD_000, PJ12MD_001, PJ12MD_010, PJ12MD_011,
+ PJ12MD_100, PJ12MD_101, PJ12MD_110, PJ12MD_111,
+ 0, 0, 0, 0, 0, 0, 0, 0 }
+ },
+ { PINMUX_CFG_REG("PJCR2", 0xfffe390a, 16, 4) {
+ PJ11MD_000, PJ11MD_001, PJ11MD_010, PJ11MD_011,
+ PJ11MD_100, PJ11MD_101, PJ11MD_110, PJ11MD_111,
+ 0, 0, 0, 0, 0, 0, 0, 0,
+
+ PJ10MD_000, PJ10MD_001, PJ10MD_010, PJ10MD_011,
+ PJ10MD_100, PJ10MD_101, PJ10MD_110, PJ10MD_111,
+ 0, 0, 0, 0, 0, 0, 0, 0,
+
+ PJ9MD_000, PJ9MD_001, PJ9MD_010, PJ9MD_011,
+ PJ9MD_100, PJ9MD_101, PJ9MD_110, PJ9MD_111,
+ 0, 0, 0, 0, 0, 0, 0, 0,
+
+ PJ8MD_000, PJ8MD_001, PJ8MD_010, PJ8MD_011,
+ PJ8MD_100, PJ8MD_101, PJ8MD_110, PJ8MD_111,
+ 0, 0, 0, 0, 0, 0, 0, 0 }
+ },
+ { PINMUX_CFG_REG("PJCR1", 0xfffe390c, 16, 4) {
+ PJ7MD_000, PJ7MD_001, PJ7MD_010, PJ7MD_011,
+ PJ7MD_100, PJ7MD_101, PJ7MD_110, PJ7MD_111,
+ 0, 0, 0, 0, 0, 0, 0, 0,
+
+ PJ6MD_000, PJ6MD_001, PJ6MD_010, PJ6MD_011,
+ PJ6MD_100, PJ6MD_101, PJ6MD_110, PJ6MD_111,
+ 0, 0, 0, 0, 0, 0, 0, 0,
+
+ PJ5MD_000, PJ5MD_001, PJ5MD_010, PJ5MD_011,
+ PJ5MD_100, PJ5MD_101, PJ5MD_110, PJ5MD_111,
+ 0, 0, 0, 0, 0, 0, 0, 0,
+
+ PJ4MD_000, PJ4MD_001, PJ4MD_010, PJ4MD_011,
+ PJ4MD_100, PJ4MD_101, PJ4MD_110, PJ4MD_111,
+ 0, 0, 0, 0, 0, 0, 0, 0 }
+ },
+ { PINMUX_CFG_REG("PJCR0", 0xfffe390e, 16, 4) {
+ PJ3MD_000, PJ3MD_001, PJ3MD_010, PJ3MD_011,
+ PJ3MD_100, PJ3MD_101, PJ3MD_110, PJ3MD_111,
+ 0, 0, 0, 0, 0, 0, 0, 0,
+
+ PJ2MD_000, PJ2MD_001, PJ2MD_010, PJ2MD_011,
+ PJ2MD_100, PJ2MD_101, PJ2MD_110, PJ2MD_111,
+ 0, 0, 0, 0, 0, 0, 0, 0,
+
+ PJ1MD_000, PJ1MD_001, PJ1MD_010, PJ1MD_011,
+ PJ1MD_100, PJ1MD_101, PJ1MD_110, PJ1MD_111,
+ 0, 0, 0, 0, 0, 0, 0, 0,
+
+ PJ0MD_000, PJ0MD_001, PJ0MD_010, PJ0MD_011,
+ PJ0MD_100, PJ0MD_101, PJ0MD_110, PJ0MD_111,
+ 0, 0, 0, 0, 0, 0, 0, 0 }
+ },
+
+ { PINMUX_CFG_REG("PJIOR1", 0xfffe3910, 16, 1) {
+ PJ31_IN, PJ31_OUT,
+ PJ30_IN, PJ30_OUT,
+ PJ29_IN, PJ29_OUT,
+ PJ28_IN, PJ28_OUT,
+ PJ27_IN, PJ27_OUT,
+ PJ26_IN, PJ26_OUT,
+ PJ25_IN, PJ25_OUT,
+ PJ24_IN, PJ24_OUT,
+ PJ23_IN, PJ23_OUT,
+ PJ22_IN, PJ22_OUT,
+ PJ21_IN, PJ21_OUT,
+ PJ20_IN, PJ20_OUT,
+ PJ19_IN, PJ19_OUT,
+ PJ18_IN, PJ18_OUT,
+ PJ17_IN, PJ17_OUT,
+ PJ16_IN, PJ16_OUT }
+ },
+ { PINMUX_CFG_REG("PJIOR0", 0xfffe3912, 16, 1) {
+ PJ15_IN, PJ15_OUT,
+ PJ14_IN, PJ14_OUT,
+ PJ13_IN, PJ13_OUT,
+ PJ12_IN, PJ12_OUT,
+ PJ11_IN, PJ11_OUT,
+ PJ10_IN, PJ10_OUT,
+ PJ9_IN, PJ9_OUT,
+ PJ8_IN, PJ8_OUT,
+ PJ7_IN, PJ7_OUT,
+ PJ6_IN, PJ6_OUT,
+ PJ5_IN, PJ5_OUT,
+ PJ4_IN, PJ4_OUT,
+ PJ3_IN, PJ3_OUT,
+ PJ2_IN, PJ2_OUT,
+ PJ1_IN, PJ1_OUT,
+ PJ0_IN, PJ0_OUT }
+ },
+
+ {}
+};
+
+static struct pinmux_data_reg pinmux_data_regs[] = {
+ { PINMUX_DATA_REG("PADR0", 0xfffe3816, 16) {
+ 0, 0, 0, 0, 0, 0, 0, PA1_DATA,
+ 0, 0, 0, 0, 0, 0, 0, PA0_DATA }
+ },
+
+ { PINMUX_DATA_REG("PBDR1", 0xfffe3834, 16) {
+ 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, PB22_DATA, PB21_DATA, PB20_DATA,
+ PB19_DATA, PB18_DATA, PB17_DATA, PB16_DATA }
+ },
+ { PINMUX_DATA_REG("PBDR0", 0xfffe3836, 16) {
+ PB15_DATA, PB14_DATA, PB13_DATA, PB12_DATA,
+ PB11_DATA, PB10_DATA, PB9_DATA, PB8_DATA,
+ PB7_DATA, PB6_DATA, PB5_DATA, PB4_DATA,
+ PB3_DATA, PB2_DATA, PB1_DATA, 0 }
+ },
+
+ { PINMUX_DATA_REG("PCDR0", 0xfffe3856, 16) {
+ 0, 0, 0, 0,
+ 0, 0, 0, PC8_DATA,
+ PC7_DATA, PC6_DATA, PC5_DATA, PC4_DATA,
+ PC3_DATA, PC2_DATA, PC1_DATA, PC0_DATA }
+ },
+
+ { PINMUX_DATA_REG("PDDR0", 0xfffe3876, 16) {
+ PD15_DATA, PD14_DATA, PD13_DATA, PD12_DATA,
+ PD11_DATA, PD10_DATA, PD9_DATA, PD8_DATA,
+ PD7_DATA, PD6_DATA, PD5_DATA, PD4_DATA,
+ PD3_DATA, PD2_DATA, PD1_DATA, PD0_DATA }
+ },
+
+ { PINMUX_DATA_REG("PEDR0", 0xfffe3896, 16) {
+ 0, 0, 0, 0, 0, 0, 0, 0,
+ PE7_DATA, PE6_DATA, PE5_DATA, PE4_DATA,
+ PE3_DATA, PE2_DATA, PE1_DATA, PE0_DATA }
+ },
+
+ { PINMUX_DATA_REG("PFDR1", 0xfffe38b4, 16) {
+ 0, 0, 0, 0, 0, 0, 0, 0,
+ PF23_DATA, PF22_DATA, PF21_DATA, PF20_DATA,
+ PF19_DATA, PF18_DATA, PF17_DATA, PF16_DATA }
+ },
+ { PINMUX_DATA_REG("PFDR0", 0xfffe38b6, 16) {
+ PF15_DATA, PF14_DATA, PF13_DATA, PF12_DATA,
+ PF11_DATA, PF10_DATA, PF9_DATA, PF8_DATA,
+ PF7_DATA, PF6_DATA, PF5_DATA, PF4_DATA,
+ PF3_DATA, PF2_DATA, PF1_DATA, PF0_DATA }
+ },
+
+ { PINMUX_DATA_REG("PGDR1", 0xfffe38d4, 16) {
+ 0, 0, 0, 0,
+ PG27_DATA, PG26_DATA, PG25_DATA, PG24_DATA,
+ PG23_DATA, PG22_DATA, PG21_DATA, PG20_DATA,
+ PG19_DATA, PG18_DATA, PG17_DATA, PG16_DATA }
+ },
+ { PINMUX_DATA_REG("PGDR0", 0xfffe38d6, 16) {
+ PG15_DATA, PG14_DATA, PG13_DATA, PG12_DATA,
+ PG11_DATA, PG10_DATA, PG9_DATA, PG8_DATA,
+ PG7_DATA, PG6_DATA, PG5_DATA, PG4_DATA,
+ PG3_DATA, PG2_DATA, PG1_DATA, PG0_DATA }
+ },
+
+ { PINMUX_DATA_REG("PJDR1", 0xfffe3914, 16) {
+ PJ31_DATA, PJ30_DATA, PJ29_DATA, PJ28_DATA,
+ PJ27_DATA, PJ26_DATA, PJ25_DATA, PJ24_DATA,
+ PJ23_DATA, PJ22_DATA, PJ21_DATA, PJ20_DATA,
+ PJ19_DATA, PJ18_DATA, PJ17_DATA, PJ16_DATA }
+ },
+ { PINMUX_DATA_REG("PJDR0", 0xfffe3916, 16) {
+ PJ15_DATA, PJ14_DATA, PJ13_DATA, PJ12_DATA,
+ PJ11_DATA, PJ10_DATA, PJ9_DATA, PJ8_DATA,
+ PJ7_DATA, PJ6_DATA, PJ5_DATA, PJ4_DATA,
+ PJ3_DATA, PJ2_DATA, PJ1_DATA, PJ0_DATA }
+ },
+
+ { }
+};
+
+struct sh_pfc_soc_info sh7269_pinmux_info = {
+ .name = "sh7269_pfc",
+ .reserved_id = PINMUX_RESERVED,
+ .data = { PINMUX_DATA_BEGIN, PINMUX_DATA_END },
+ .input = { PINMUX_INPUT_BEGIN, PINMUX_INPUT_END, FORCE_IN },
+ .output = { PINMUX_OUTPUT_BEGIN, PINMUX_OUTPUT_END, FORCE_OUT },
+ .mark = { PINMUX_MARK_BEGIN, PINMUX_MARK_END },
+ .function = { PINMUX_FUNCTION_BEGIN, PINMUX_FUNCTION_END },
+
+ .first_gpio = GPIO_PA1,
+ .last_gpio = GPIO_FN_LCD_M_DISP,
+
+ .gpios = pinmux_gpios,
+ .cfg_regs = pinmux_config_regs,
+ .data_regs = pinmux_data_regs,
+
+ .gpio_data = pinmux_data,
+ .gpio_data_size = ARRAY_SIZE(pinmux_data),
+};
diff --git a/drivers/pinctrl/sh-pfc/pfc-sh7372.c b/drivers/pinctrl/sh-pfc/pfc-sh7372.c
new file mode 100644
index 000000000000..d44e7f02069b
--- /dev/null
+++ b/drivers/pinctrl/sh-pfc/pfc-sh7372.c
@@ -0,0 +1,1658 @@
+/*
+ * sh7372 processor support - PFC hardware block
+ *
+ * Copyright (C) 2010 Kuninori Morimoto <morimoto.kuninori@renesas.com>
+ *
+ * Based on
+ * sh7367 processor support - PFC hardware block
+ * Copyright (C) 2010 Magnus Damm
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 of the License.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+#include <linux/kernel.h>
+#include <mach/irqs.h>
+#include <mach/sh7372.h>
+
+#include "sh_pfc.h"
+
+#define CPU_ALL_PORT(fn, pfx, sfx) \
+ PORT_10(fn, pfx, sfx), PORT_90(fn, pfx, sfx), \
+ PORT_10(fn, pfx##10, sfx), PORT_10(fn, pfx##11, sfx), \
+ PORT_10(fn, pfx##12, sfx), PORT_10(fn, pfx##13, sfx), \
+ PORT_10(fn, pfx##14, sfx), PORT_10(fn, pfx##15, sfx), \
+ PORT_10(fn, pfx##16, sfx), PORT_10(fn, pfx##17, sfx), \
+ PORT_10(fn, pfx##18, sfx), PORT_1(fn, pfx##190, sfx)
+
+enum {
+ PINMUX_RESERVED = 0,
+
+ /* PORT0_DATA -> PORT190_DATA */
+ PINMUX_DATA_BEGIN,
+ PORT_ALL(DATA),
+ PINMUX_DATA_END,
+
+ /* PORT0_IN -> PORT190_IN */
+ PINMUX_INPUT_BEGIN,
+ PORT_ALL(IN),
+ PINMUX_INPUT_END,
+
+ /* PORT0_IN_PU -> PORT190_IN_PU */
+ PINMUX_INPUT_PULLUP_BEGIN,
+ PORT_ALL(IN_PU),
+ PINMUX_INPUT_PULLUP_END,
+
+ /* PORT0_IN_PD -> PORT190_IN_PD */
+ PINMUX_INPUT_PULLDOWN_BEGIN,
+ PORT_ALL(IN_PD),
+ PINMUX_INPUT_PULLDOWN_END,
+
+ /* PORT0_OUT -> PORT190_OUT */
+ PINMUX_OUTPUT_BEGIN,
+ PORT_ALL(OUT),
+ PINMUX_OUTPUT_END,
+
+ PINMUX_FUNCTION_BEGIN,
+ PORT_ALL(FN_IN), /* PORT0_FN_IN -> PORT190_FN_IN */
+ PORT_ALL(FN_OUT), /* PORT0_FN_OUT -> PORT190_FN_OUT */
+ PORT_ALL(FN0), /* PORT0_FN0 -> PORT190_FN0 */
+ PORT_ALL(FN1), /* PORT0_FN1 -> PORT190_FN1 */
+ PORT_ALL(FN2), /* PORT0_FN2 -> PORT190_FN2 */
+ PORT_ALL(FN3), /* PORT0_FN3 -> PORT190_FN3 */
+ PORT_ALL(FN4), /* PORT0_FN4 -> PORT190_FN4 */
+ PORT_ALL(FN5), /* PORT0_FN5 -> PORT190_FN5 */
+ PORT_ALL(FN6), /* PORT0_FN6 -> PORT190_FN6 */
+ PORT_ALL(FN7), /* PORT0_FN7 -> PORT190_FN7 */
+
+ MSEL1CR_31_0, MSEL1CR_31_1,
+ MSEL1CR_30_0, MSEL1CR_30_1,
+ MSEL1CR_29_0, MSEL1CR_29_1,
+ MSEL1CR_28_0, MSEL1CR_28_1,
+ MSEL1CR_27_0, MSEL1CR_27_1,
+ MSEL1CR_26_0, MSEL1CR_26_1,
+ MSEL1CR_16_0, MSEL1CR_16_1,
+ MSEL1CR_15_0, MSEL1CR_15_1,
+ MSEL1CR_14_0, MSEL1CR_14_1,
+ MSEL1CR_13_0, MSEL1CR_13_1,
+ MSEL1CR_12_0, MSEL1CR_12_1,
+ MSEL1CR_9_0, MSEL1CR_9_1,
+ MSEL1CR_8_0, MSEL1CR_8_1,
+ MSEL1CR_7_0, MSEL1CR_7_1,
+ MSEL1CR_6_0, MSEL1CR_6_1,
+ MSEL1CR_4_0, MSEL1CR_4_1,
+ MSEL1CR_3_0, MSEL1CR_3_1,
+ MSEL1CR_2_0, MSEL1CR_2_1,
+ MSEL1CR_0_0, MSEL1CR_0_1,
+
+ MSEL3CR_27_0, MSEL3CR_27_1,
+ MSEL3CR_26_0, MSEL3CR_26_1,
+ MSEL3CR_21_0, MSEL3CR_21_1,
+ MSEL3CR_20_0, MSEL3CR_20_1,
+ MSEL3CR_15_0, MSEL3CR_15_1,
+ MSEL3CR_9_0, MSEL3CR_9_1,
+ MSEL3CR_6_0, MSEL3CR_6_1,
+
+ MSEL4CR_19_0, MSEL4CR_19_1,
+ MSEL4CR_18_0, MSEL4CR_18_1,
+ MSEL4CR_17_0, MSEL4CR_17_1,
+ MSEL4CR_16_0, MSEL4CR_16_1,
+ MSEL4CR_15_0, MSEL4CR_15_1,
+ MSEL4CR_14_0, MSEL4CR_14_1,
+ MSEL4CR_10_0, MSEL4CR_10_1,
+ MSEL4CR_6_0, MSEL4CR_6_1,
+ MSEL4CR_4_0, MSEL4CR_4_1,
+ MSEL4CR_1_0, MSEL4CR_1_1,
+ PINMUX_FUNCTION_END,
+
+ PINMUX_MARK_BEGIN,
+
+ /* IRQ */
+ IRQ0_6_MARK, IRQ0_162_MARK, IRQ1_MARK, IRQ2_4_MARK,
+ IRQ2_5_MARK, IRQ3_8_MARK, IRQ3_16_MARK, IRQ4_17_MARK,
+ IRQ4_163_MARK, IRQ5_MARK, IRQ6_39_MARK, IRQ6_164_MARK,
+ IRQ7_40_MARK, IRQ7_167_MARK, IRQ8_41_MARK, IRQ8_168_MARK,
+ IRQ9_42_MARK, IRQ9_169_MARK, IRQ10_MARK, IRQ11_MARK,
+ IRQ12_80_MARK, IRQ12_137_MARK, IRQ13_81_MARK, IRQ13_145_MARK,
+ IRQ14_82_MARK, IRQ14_146_MARK, IRQ15_83_MARK, IRQ15_147_MARK,
+ IRQ16_84_MARK, IRQ16_170_MARK, IRQ17_MARK, IRQ18_MARK,
+ IRQ19_MARK, IRQ20_MARK, IRQ21_MARK, IRQ22_MARK,
+ IRQ23_MARK, IRQ24_MARK, IRQ25_MARK, IRQ26_121_MARK,
+ IRQ26_172_MARK, IRQ27_122_MARK, IRQ27_180_MARK, IRQ28_123_MARK,
+ IRQ28_181_MARK, IRQ29_129_MARK, IRQ29_182_MARK, IRQ30_130_MARK,
+ IRQ30_183_MARK, IRQ31_138_MARK, IRQ31_184_MARK,
+
+ /* MSIOF0 */
+ MSIOF0_TSYNC_MARK, MSIOF0_TSCK_MARK, MSIOF0_RXD_MARK,
+ MSIOF0_RSCK_MARK, MSIOF0_RSYNC_MARK, MSIOF0_MCK0_MARK,
+ MSIOF0_MCK1_MARK, MSIOF0_SS1_MARK, MSIOF0_SS2_MARK,
+ MSIOF0_TXD_MARK,
+
+ /* MSIOF1 */
+ MSIOF1_TSCK_39_MARK, MSIOF1_TSYNC_40_MARK,
+ MSIOF1_TSCK_88_MARK, MSIOF1_TSYNC_89_MARK,
+ MSIOF1_TXD_41_MARK, MSIOF1_RXD_42_MARK,
+ MSIOF1_TXD_90_MARK, MSIOF1_RXD_91_MARK,
+ MSIOF1_SS1_43_MARK, MSIOF1_SS2_44_MARK,
+ MSIOF1_SS1_92_MARK, MSIOF1_SS2_93_MARK,
+ MSIOF1_RSCK_MARK, MSIOF1_RSYNC_MARK,
+ MSIOF1_MCK0_MARK, MSIOF1_MCK1_MARK,
+
+ /* MSIOF2 */
+ MSIOF2_RSCK_MARK, MSIOF2_RSYNC_MARK, MSIOF2_MCK0_MARK,
+ MSIOF2_MCK1_MARK, MSIOF2_SS1_MARK, MSIOF2_SS2_MARK,
+ MSIOF2_TSYNC_MARK, MSIOF2_TSCK_MARK, MSIOF2_RXD_MARK,
+ MSIOF2_TXD_MARK,
+
+ /* BBIF1 */
+ BBIF1_RXD_MARK, BBIF1_TSYNC_MARK, BBIF1_TSCK_MARK,
+ BBIF1_TXD_MARK, BBIF1_RSCK_MARK, BBIF1_RSYNC_MARK,
+ BBIF1_FLOW_MARK, BB_RX_FLOW_N_MARK,
+
+ /* BBIF2 */
+ BBIF2_TSCK1_MARK, BBIF2_TSYNC1_MARK,
+ BBIF2_TXD1_MARK, BBIF2_RXD_MARK,
+
+ /* FSI */
+ FSIACK_MARK, FSIBCK_MARK, FSIAILR_MARK, FSIAIBT_MARK,
+ FSIAISLD_MARK, FSIAOMC_MARK, FSIAOLR_MARK, FSIAOBT_MARK,
+ FSIAOSLD_MARK, FSIASPDIF_11_MARK, FSIASPDIF_15_MARK,
+
+ /* FMSI */
+ FMSOCK_MARK, FMSOOLR_MARK, FMSIOLR_MARK, FMSOOBT_MARK,
+ FMSIOBT_MARK, FMSOSLD_MARK, FMSOILR_MARK, FMSIILR_MARK,
+ FMSOIBT_MARK, FMSIIBT_MARK, FMSISLD_MARK, FMSICK_MARK,
+
+ /* SCIFA0 */
+ SCIFA0_TXD_MARK, SCIFA0_RXD_MARK, SCIFA0_SCK_MARK,
+ SCIFA0_RTS_MARK, SCIFA0_CTS_MARK,
+
+ /* SCIFA1 */
+ SCIFA1_TXD_MARK, SCIFA1_RXD_MARK, SCIFA1_SCK_MARK,
+ SCIFA1_RTS_MARK, SCIFA1_CTS_MARK,
+
+ /* SCIFA2 */
+ SCIFA2_CTS1_MARK, SCIFA2_RTS1_MARK, SCIFA2_TXD1_MARK,
+ SCIFA2_RXD1_MARK, SCIFA2_SCK1_MARK,
+
+ /* SCIFA3 */
+ SCIFA3_CTS_43_MARK, SCIFA3_CTS_140_MARK, SCIFA3_RTS_44_MARK,
+ SCIFA3_RTS_141_MARK, SCIFA3_SCK_MARK, SCIFA3_TXD_MARK,
+ SCIFA3_RXD_MARK,
+
+ /* SCIFA4 */
+ SCIFA4_RXD_MARK, SCIFA4_TXD_MARK,
+
+ /* SCIFA5 */
+ SCIFA5_RXD_MARK, SCIFA5_TXD_MARK,
+
+ /* SCIFB */
+ SCIFB_SCK_MARK, SCIFB_RTS_MARK, SCIFB_CTS_MARK,
+ SCIFB_TXD_MARK, SCIFB_RXD_MARK,
+
+ /* CEU */
+ VIO_HD_MARK, VIO_CKO1_MARK, VIO_CKO2_MARK, VIO_VD_MARK,
+ VIO_CLK_MARK, VIO_FIELD_MARK, VIO_CKO_MARK,
+ VIO_D0_MARK, VIO_D1_MARK, VIO_D2_MARK, VIO_D3_MARK,
+ VIO_D4_MARK, VIO_D5_MARK, VIO_D6_MARK, VIO_D7_MARK,
+ VIO_D8_MARK, VIO_D9_MARK, VIO_D10_MARK, VIO_D11_MARK,
+ VIO_D12_MARK, VIO_D13_MARK, VIO_D14_MARK, VIO_D15_MARK,
+
+ /* USB0 */
+ IDIN_0_MARK, EXTLP_0_MARK, OVCN2_0_MARK, PWEN_0_MARK,
+ OVCN_0_MARK, VBUS0_0_MARK,
+
+ /* USB1 */
+ IDIN_1_18_MARK, IDIN_1_113_MARK,
+ PWEN_1_115_MARK, PWEN_1_138_MARK,
+ OVCN_1_114_MARK, OVCN_1_162_MARK,
+ EXTLP_1_MARK, OVCN2_1_MARK,
+ VBUS0_1_MARK,
+
+ /* GPIO */
+ GPI0_MARK, GPI1_MARK, GPO0_MARK, GPO1_MARK,
+
+ /* BSC */
+ BS_MARK, WE1_MARK,
+ CKO_MARK, WAIT_MARK, RDWR_MARK,
+
+ A0_MARK, A1_MARK, A2_MARK, A3_MARK,
+ A6_MARK, A7_MARK, A8_MARK, A9_MARK,
+ A10_MARK, A11_MARK, A12_MARK, A13_MARK,
+ A14_MARK, A15_MARK, A16_MARK, A17_MARK,
+ A18_MARK, A19_MARK, A20_MARK, A21_MARK,
+ A22_MARK, A23_MARK, A24_MARK, A25_MARK,
+ A26_MARK,
+
+ CS0_MARK, CS2_MARK, CS4_MARK,
+ CS5A_MARK, CS5B_MARK, CS6A_MARK,
+
+ /* BSC/FLCTL */
+ RD_FSC_MARK, WE0_FWE_MARK, A4_FOE_MARK, A5_FCDE_MARK,
+ D0_NAF0_MARK, D1_NAF1_MARK, D2_NAF2_MARK, D3_NAF3_MARK,
+ D4_NAF4_MARK, D5_NAF5_MARK, D6_NAF6_MARK, D7_NAF7_MARK,
+ D8_NAF8_MARK, D9_NAF9_MARK, D10_NAF10_MARK, D11_NAF11_MARK,
+ D12_NAF12_MARK, D13_NAF13_MARK, D14_NAF14_MARK, D15_NAF15_MARK,
+
+ /* MMCIF(1) */
+ MMCD0_0_MARK, MMCD0_1_MARK, MMCD0_2_MARK, MMCD0_3_MARK,
+ MMCD0_4_MARK, MMCD0_5_MARK, MMCD0_6_MARK, MMCD0_7_MARK,
+ MMCCMD0_MARK, MMCCLK0_MARK,
+
+ /* MMCIF(2) */
+ MMCD1_0_MARK, MMCD1_1_MARK, MMCD1_2_MARK, MMCD1_3_MARK,
+ MMCD1_4_MARK, MMCD1_5_MARK, MMCD1_6_MARK, MMCD1_7_MARK,
+ MMCCLK1_MARK, MMCCMD1_MARK,
+
+ /* SPU2 */
+ VINT_I_MARK,
+
+ /* FLCTL */
+ FCE1_MARK, FCE0_MARK, FRB_MARK,
+
+ /* HSI */
+ GP_RX_FLAG_MARK, GP_RX_DATA_MARK, GP_TX_READY_MARK,
+ GP_RX_WAKE_MARK, MP_TX_FLAG_MARK, MP_TX_DATA_MARK,
+ MP_RX_READY_MARK, MP_TX_WAKE_MARK,
+
+ /* MFI */
+ MFIv6_MARK,
+ MFIv4_MARK,
+
+ MEMC_CS0_MARK, MEMC_BUSCLK_MEMC_A0_MARK,
+ MEMC_CS1_MEMC_A1_MARK, MEMC_ADV_MEMC_DREQ0_MARK,
+ MEMC_WAIT_MEMC_DREQ1_MARK, MEMC_NOE_MARK,
+ MEMC_NWE_MARK, MEMC_INT_MARK,
+
+ MEMC_AD0_MARK, MEMC_AD1_MARK, MEMC_AD2_MARK,
+ MEMC_AD3_MARK, MEMC_AD4_MARK, MEMC_AD5_MARK,
+ MEMC_AD6_MARK, MEMC_AD7_MARK, MEMC_AD8_MARK,
+ MEMC_AD9_MARK, MEMC_AD10_MARK, MEMC_AD11_MARK,
+ MEMC_AD12_MARK, MEMC_AD13_MARK, MEMC_AD14_MARK,
+ MEMC_AD15_MARK,
+
+ /* SIM */
+ SIM_RST_MARK, SIM_CLK_MARK, SIM_D_MARK,
+
+ /* TPU */
+ TPU0TO0_MARK, TPU0TO1_MARK,
+ TPU0TO2_93_MARK, TPU0TO2_99_MARK,
+ TPU0TO3_MARK,
+
+ /* I2C2 */
+ I2C_SCL2_MARK, I2C_SDA2_MARK,
+
+ /* I2C3(1) */
+ I2C_SCL3_MARK, I2C_SDA3_MARK,
+
+ /* I2C3(2) */
+ I2C_SCL3S_MARK, I2C_SDA3S_MARK,
+
+ /* I2C4(2) */
+ I2C_SCL4_MARK, I2C_SDA4_MARK,
+
+ /* I2C4(2) */
+ I2C_SCL4S_MARK, I2C_SDA4S_MARK,
+
+ /* KEYSC */
+ KEYOUT0_MARK, KEYIN0_121_MARK, KEYIN0_136_MARK,
+ KEYOUT1_MARK, KEYIN1_122_MARK, KEYIN1_135_MARK,
+ KEYOUT2_MARK, KEYIN2_123_MARK, KEYIN2_134_MARK,
+ KEYOUT3_MARK, KEYIN3_124_MARK, KEYIN3_133_MARK,
+ KEYOUT4_MARK, KEYIN4_MARK,
+ KEYOUT5_MARK, KEYIN5_MARK,
+ KEYOUT6_MARK, KEYIN6_MARK,
+ KEYOUT7_MARK, KEYIN7_MARK,
+
+ /* LCDC */
+ LCDC0_SELECT_MARK,
+ LCDC1_SELECT_MARK,
+ LCDHSYN_MARK, LCDCS_MARK, LCDVSYN_MARK, LCDDCK_MARK,
+ LCDWR_MARK, LCDRD_MARK, LCDDISP_MARK, LCDRS_MARK,
+ LCDLCLK_MARK, LCDDON_MARK,
+
+ LCDD0_MARK, LCDD1_MARK, LCDD2_MARK, LCDD3_MARK,
+ LCDD4_MARK, LCDD5_MARK, LCDD6_MARK, LCDD7_MARK,
+ LCDD8_MARK, LCDD9_MARK, LCDD10_MARK, LCDD11_MARK,
+ LCDD12_MARK, LCDD13_MARK, LCDD14_MARK, LCDD15_MARK,
+ LCDD16_MARK, LCDD17_MARK, LCDD18_MARK, LCDD19_MARK,
+ LCDD20_MARK, LCDD21_MARK, LCDD22_MARK, LCDD23_MARK,
+
+ /* IRDA */
+ IRDA_OUT_MARK, IRDA_IN_MARK, IRDA_FIRSEL_MARK,
+ IROUT_139_MARK, IROUT_140_MARK,
+
+ /* TSIF1 */
+ TS0_1SELECT_MARK,
+ TS0_2SELECT_MARK,
+ TS1_1SELECT_MARK,
+ TS1_2SELECT_MARK,
+
+ TS_SPSYNC1_MARK, TS_SDAT1_MARK,
+ TS_SDEN1_MARK, TS_SCK1_MARK,
+
+ /* TSIF2 */
+ TS_SPSYNC2_MARK, TS_SDAT2_MARK,
+ TS_SDEN2_MARK, TS_SCK2_MARK,
+
+ /* HDMI */
+ HDMI_HPD_MARK, HDMI_CEC_MARK,
+
+ /* SDHI0 */
+ SDHICLK0_MARK, SDHICD0_MARK,
+ SDHICMD0_MARK, SDHIWP0_MARK,
+ SDHID0_0_MARK, SDHID0_1_MARK,
+ SDHID0_2_MARK, SDHID0_3_MARK,
+
+ /* SDHI1 */
+ SDHICLK1_MARK, SDHICMD1_MARK, SDHID1_0_MARK,
+ SDHID1_1_MARK, SDHID1_2_MARK, SDHID1_3_MARK,
+
+ /* SDHI2 */
+ SDHICLK2_MARK, SDHICMD2_MARK, SDHID2_0_MARK,
+ SDHID2_1_MARK, SDHID2_2_MARK, SDHID2_3_MARK,
+
+ /* SDENC */
+ SDENC_CPG_MARK,
+ SDENC_DV_CLKI_MARK,
+
+ PINMUX_MARK_END,
+};
+
+static pinmux_enum_t pinmux_data[] = {
+
+ /* specify valid pin states for each pin in GPIO mode */
+ PORT_DATA_IO_PD(0), PORT_DATA_IO_PD(1),
+ PORT_DATA_O(2), PORT_DATA_I_PD(3),
+ PORT_DATA_I_PD(4), PORT_DATA_I_PD(5),
+ PORT_DATA_IO_PU_PD(6), PORT_DATA_I_PD(7),
+ PORT_DATA_IO_PD(8), PORT_DATA_O(9),
+
+ PORT_DATA_O(10), PORT_DATA_O(11),
+ PORT_DATA_IO_PU_PD(12), PORT_DATA_IO_PD(13),
+ PORT_DATA_IO_PD(14), PORT_DATA_O(15),
+ PORT_DATA_IO_PD(16), PORT_DATA_IO_PD(17),
+ PORT_DATA_I_PD(18), PORT_DATA_IO(19),
+
+ PORT_DATA_IO(20), PORT_DATA_IO(21),
+ PORT_DATA_IO(22), PORT_DATA_IO(23),
+ PORT_DATA_IO(24), PORT_DATA_IO(25),
+ PORT_DATA_IO(26), PORT_DATA_IO(27),
+ PORT_DATA_IO(28), PORT_DATA_IO(29),
+
+ PORT_DATA_IO(30), PORT_DATA_IO(31),
+ PORT_DATA_IO(32), PORT_DATA_IO(33),
+ PORT_DATA_IO(34), PORT_DATA_IO(35),
+ PORT_DATA_IO(36), PORT_DATA_IO(37),
+ PORT_DATA_IO(38), PORT_DATA_IO(39),
+
+ PORT_DATA_IO(40), PORT_DATA_IO(41),
+ PORT_DATA_IO(42), PORT_DATA_IO(43),
+ PORT_DATA_IO(44), PORT_DATA_IO(45),
+ PORT_DATA_IO_PU(46), PORT_DATA_IO_PU(47),
+ PORT_DATA_IO_PU(48), PORT_DATA_IO_PU(49),
+
+ PORT_DATA_IO_PU(50), PORT_DATA_IO_PU(51),
+ PORT_DATA_IO_PU(52), PORT_DATA_IO_PU(53),
+ PORT_DATA_IO_PU(54), PORT_DATA_IO_PU(55),
+ PORT_DATA_IO_PU(56), PORT_DATA_IO_PU(57),
+ PORT_DATA_IO_PU(58), PORT_DATA_IO_PU(59),
+
+ PORT_DATA_IO_PU(60), PORT_DATA_IO_PU(61),
+ PORT_DATA_IO(62), PORT_DATA_O(63),
+ PORT_DATA_O(64), PORT_DATA_IO_PU(65),
+ PORT_DATA_O(66), PORT_DATA_IO_PU(67), /*66?*/
+ PORT_DATA_O(68), PORT_DATA_IO(69),
+
+ PORT_DATA_IO(70), PORT_DATA_IO(71),
+ PORT_DATA_O(72), PORT_DATA_I_PU(73),
+ PORT_DATA_I_PU_PD(74), PORT_DATA_IO_PU_PD(75),
+ PORT_DATA_IO_PU_PD(76), PORT_DATA_IO_PU_PD(77),
+ PORT_DATA_IO_PU_PD(78), PORT_DATA_IO_PU_PD(79),
+
+ PORT_DATA_IO_PU_PD(80), PORT_DATA_IO_PU_PD(81),
+ PORT_DATA_IO_PU_PD(82), PORT_DATA_IO_PU_PD(83),
+ PORT_DATA_IO_PU_PD(84), PORT_DATA_IO_PU_PD(85),
+ PORT_DATA_IO_PU_PD(86), PORT_DATA_IO_PU_PD(87),
+ PORT_DATA_IO_PU_PD(88), PORT_DATA_IO_PU_PD(89),
+
+ PORT_DATA_IO_PU_PD(90), PORT_DATA_IO_PU_PD(91),
+ PORT_DATA_IO_PU_PD(92), PORT_DATA_IO_PU_PD(93),
+ PORT_DATA_IO_PU_PD(94), PORT_DATA_IO_PU_PD(95),
+ PORT_DATA_IO_PU(96), PORT_DATA_IO_PU_PD(97),
+ PORT_DATA_IO_PU_PD(98), PORT_DATA_O(99), /*99?*/
+
+ PORT_DATA_IO_PD(100), PORT_DATA_IO_PD(101),
+ PORT_DATA_IO_PD(102), PORT_DATA_IO_PD(103),
+ PORT_DATA_IO_PD(104), PORT_DATA_IO_PD(105),
+ PORT_DATA_IO_PU(106), PORT_DATA_IO_PU(107),
+ PORT_DATA_IO_PU(108), PORT_DATA_IO_PU(109),
+
+ PORT_DATA_IO_PU(110), PORT_DATA_IO_PU(111),
+ PORT_DATA_IO_PD(112), PORT_DATA_IO_PD(113),
+ PORT_DATA_IO_PU(114), PORT_DATA_IO_PU(115),
+ PORT_DATA_IO_PU(116), PORT_DATA_IO_PU(117),
+ PORT_DATA_IO_PU(118), PORT_DATA_IO_PU(119),
+
+ PORT_DATA_IO_PU(120), PORT_DATA_IO_PD(121),
+ PORT_DATA_IO_PD(122), PORT_DATA_IO_PD(123),
+ PORT_DATA_IO_PD(124), PORT_DATA_IO_PD(125),
+ PORT_DATA_IO_PD(126), PORT_DATA_IO_PD(127),
+ PORT_DATA_IO_PD(128), PORT_DATA_IO_PU_PD(129),
+
+ PORT_DATA_IO_PU_PD(130), PORT_DATA_IO_PU_PD(131),
+ PORT_DATA_IO_PU_PD(132), PORT_DATA_IO_PU_PD(133),
+ PORT_DATA_IO_PU_PD(134), PORT_DATA_IO_PU_PD(135),
+ PORT_DATA_IO_PD(136), PORT_DATA_IO_PD(137),
+ PORT_DATA_IO_PD(138), PORT_DATA_IO_PD(139),
+
+ PORT_DATA_IO_PD(140), PORT_DATA_IO_PD(141),
+ PORT_DATA_IO_PD(142), PORT_DATA_IO_PU_PD(143),
+ PORT_DATA_IO_PD(144), PORT_DATA_IO_PD(145),
+ PORT_DATA_IO_PD(146), PORT_DATA_IO_PD(147),
+ PORT_DATA_IO_PD(148), PORT_DATA_IO_PD(149),
+
+ PORT_DATA_IO_PD(150), PORT_DATA_IO_PD(151),
+ PORT_DATA_IO_PU_PD(152), PORT_DATA_I_PD(153),
+ PORT_DATA_IO_PU_PD(154), PORT_DATA_I_PD(155),
+ PORT_DATA_IO_PD(156), PORT_DATA_IO_PD(157),
+ PORT_DATA_I_PD(158), PORT_DATA_IO_PD(159),
+
+ PORT_DATA_O(160), PORT_DATA_IO_PD(161),
+ PORT_DATA_IO_PD(162), PORT_DATA_IO_PD(163),
+ PORT_DATA_I_PD(164), PORT_DATA_IO_PD(165),
+ PORT_DATA_I_PD(166), PORT_DATA_I_PD(167),
+ PORT_DATA_I_PD(168), PORT_DATA_I_PD(169),
+
+ PORT_DATA_I_PD(170), PORT_DATA_O(171),
+ PORT_DATA_IO_PU_PD(172), PORT_DATA_IO_PU_PD(173),
+ PORT_DATA_IO_PU_PD(174), PORT_DATA_IO_PU_PD(175),
+ PORT_DATA_IO_PU_PD(176), PORT_DATA_IO_PU_PD(177),
+ PORT_DATA_IO_PU_PD(178), PORT_DATA_O(179),
+
+ PORT_DATA_IO_PU_PD(180), PORT_DATA_IO_PU_PD(181),
+ PORT_DATA_IO_PU_PD(182), PORT_DATA_IO_PU_PD(183),
+ PORT_DATA_IO_PU_PD(184), PORT_DATA_O(185),
+ PORT_DATA_IO_PU_PD(186), PORT_DATA_IO_PU_PD(187),
+ PORT_DATA_IO_PU_PD(188), PORT_DATA_IO_PU_PD(189),
+
+ PORT_DATA_IO_PU_PD(190),
+
+ /* IRQ */
+ PINMUX_DATA(IRQ0_6_MARK, PORT6_FN0, MSEL1CR_0_0),
+ PINMUX_DATA(IRQ0_162_MARK, PORT162_FN0, MSEL1CR_0_1),
+ PINMUX_DATA(IRQ1_MARK, PORT12_FN0),
+ PINMUX_DATA(IRQ2_4_MARK, PORT4_FN0, MSEL1CR_2_0),
+ PINMUX_DATA(IRQ2_5_MARK, PORT5_FN0, MSEL1CR_2_1),
+ PINMUX_DATA(IRQ3_8_MARK, PORT8_FN0, MSEL1CR_3_0),
+ PINMUX_DATA(IRQ3_16_MARK, PORT16_FN0, MSEL1CR_3_1),
+ PINMUX_DATA(IRQ4_17_MARK, PORT17_FN0, MSEL1CR_4_0),
+ PINMUX_DATA(IRQ4_163_MARK, PORT163_FN0, MSEL1CR_4_1),
+ PINMUX_DATA(IRQ5_MARK, PORT18_FN0),
+ PINMUX_DATA(IRQ6_39_MARK, PORT39_FN0, MSEL1CR_6_0),
+ PINMUX_DATA(IRQ6_164_MARK, PORT164_FN0, MSEL1CR_6_1),
+ PINMUX_DATA(IRQ7_40_MARK, PORT40_FN0, MSEL1CR_7_1),
+ PINMUX_DATA(IRQ7_167_MARK, PORT167_FN0, MSEL1CR_7_0),
+ PINMUX_DATA(IRQ8_41_MARK, PORT41_FN0, MSEL1CR_8_1),
+ PINMUX_DATA(IRQ8_168_MARK, PORT168_FN0, MSEL1CR_8_0),
+ PINMUX_DATA(IRQ9_42_MARK, PORT42_FN0, MSEL1CR_9_0),
+ PINMUX_DATA(IRQ9_169_MARK, PORT169_FN0, MSEL1CR_9_1),
+ PINMUX_DATA(IRQ10_MARK, PORT65_FN0, MSEL1CR_9_1),
+ PINMUX_DATA(IRQ11_MARK, PORT67_FN0),
+ PINMUX_DATA(IRQ12_80_MARK, PORT80_FN0, MSEL1CR_12_0),
+ PINMUX_DATA(IRQ12_137_MARK, PORT137_FN0, MSEL1CR_12_1),
+ PINMUX_DATA(IRQ13_81_MARK, PORT81_FN0, MSEL1CR_13_0),
+ PINMUX_DATA(IRQ13_145_MARK, PORT145_FN0, MSEL1CR_13_1),
+ PINMUX_DATA(IRQ14_82_MARK, PORT82_FN0, MSEL1CR_14_0),
+ PINMUX_DATA(IRQ14_146_MARK, PORT146_FN0, MSEL1CR_14_1),
+ PINMUX_DATA(IRQ15_83_MARK, PORT83_FN0, MSEL1CR_15_0),
+ PINMUX_DATA(IRQ15_147_MARK, PORT147_FN0, MSEL1CR_15_1),
+ PINMUX_DATA(IRQ16_84_MARK, PORT84_FN0, MSEL1CR_16_0),
+ PINMUX_DATA(IRQ16_170_MARK, PORT170_FN0, MSEL1CR_16_1),
+ PINMUX_DATA(IRQ17_MARK, PORT85_FN0),
+ PINMUX_DATA(IRQ18_MARK, PORT86_FN0),
+ PINMUX_DATA(IRQ19_MARK, PORT87_FN0),
+ PINMUX_DATA(IRQ20_MARK, PORT92_FN0),
+ PINMUX_DATA(IRQ21_MARK, PORT93_FN0),
+ PINMUX_DATA(IRQ22_MARK, PORT94_FN0),
+ PINMUX_DATA(IRQ23_MARK, PORT95_FN0),
+ PINMUX_DATA(IRQ24_MARK, PORT112_FN0),
+ PINMUX_DATA(IRQ25_MARK, PORT119_FN0),
+ PINMUX_DATA(IRQ26_121_MARK, PORT121_FN0, MSEL1CR_26_1),
+ PINMUX_DATA(IRQ26_172_MARK, PORT172_FN0, MSEL1CR_26_0),
+ PINMUX_DATA(IRQ27_122_MARK, PORT122_FN0, MSEL1CR_27_1),
+ PINMUX_DATA(IRQ27_180_MARK, PORT180_FN0, MSEL1CR_27_0),
+ PINMUX_DATA(IRQ28_123_MARK, PORT123_FN0, MSEL1CR_28_1),
+ PINMUX_DATA(IRQ28_181_MARK, PORT181_FN0, MSEL1CR_28_0),
+ PINMUX_DATA(IRQ29_129_MARK, PORT129_FN0, MSEL1CR_29_1),
+ PINMUX_DATA(IRQ29_182_MARK, PORT182_FN0, MSEL1CR_29_0),
+ PINMUX_DATA(IRQ30_130_MARK, PORT130_FN0, MSEL1CR_30_1),
+ PINMUX_DATA(IRQ30_183_MARK, PORT183_FN0, MSEL1CR_30_0),
+ PINMUX_DATA(IRQ31_138_MARK, PORT138_FN0, MSEL1CR_31_1),
+ PINMUX_DATA(IRQ31_184_MARK, PORT184_FN0, MSEL1CR_31_0),
+
+ /* Function 1 */
+ PINMUX_DATA(BBIF2_TSCK1_MARK, PORT0_FN1),
+ PINMUX_DATA(BBIF2_TSYNC1_MARK, PORT1_FN1),
+ PINMUX_DATA(BBIF2_TXD1_MARK, PORT2_FN1),
+ PINMUX_DATA(BBIF2_RXD_MARK, PORT3_FN1),
+ PINMUX_DATA(FSIACK_MARK, PORT4_FN1),
+ PINMUX_DATA(FSIAILR_MARK, PORT5_FN1),
+ PINMUX_DATA(FSIAIBT_MARK, PORT6_FN1),
+ PINMUX_DATA(FSIAISLD_MARK, PORT7_FN1),
+ PINMUX_DATA(FSIAOMC_MARK, PORT8_FN1),
+ PINMUX_DATA(FSIAOLR_MARK, PORT9_FN1),
+ PINMUX_DATA(FSIAOBT_MARK, PORT10_FN1),
+ PINMUX_DATA(FSIAOSLD_MARK, PORT11_FN1),
+ PINMUX_DATA(FMSOCK_MARK, PORT12_FN1),
+ PINMUX_DATA(FMSOOLR_MARK, PORT13_FN1),
+ PINMUX_DATA(FMSOOBT_MARK, PORT14_FN1),
+ PINMUX_DATA(FMSOSLD_MARK, PORT15_FN1),
+ PINMUX_DATA(FMSOILR_MARK, PORT16_FN1),
+ PINMUX_DATA(FMSOIBT_MARK, PORT17_FN1),
+ PINMUX_DATA(FMSISLD_MARK, PORT18_FN1),
+ PINMUX_DATA(A0_MARK, PORT19_FN1),
+ PINMUX_DATA(A1_MARK, PORT20_FN1),
+ PINMUX_DATA(A2_MARK, PORT21_FN1),
+ PINMUX_DATA(A3_MARK, PORT22_FN1),
+ PINMUX_DATA(A4_FOE_MARK, PORT23_FN1),
+ PINMUX_DATA(A5_FCDE_MARK, PORT24_FN1),
+ PINMUX_DATA(A6_MARK, PORT25_FN1),
+ PINMUX_DATA(A7_MARK, PORT26_FN1),
+ PINMUX_DATA(A8_MARK, PORT27_FN1),
+ PINMUX_DATA(A9_MARK, PORT28_FN1),
+ PINMUX_DATA(A10_MARK, PORT29_FN1),
+ PINMUX_DATA(A11_MARK, PORT30_FN1),
+ PINMUX_DATA(A12_MARK, PORT31_FN1),
+ PINMUX_DATA(A13_MARK, PORT32_FN1),
+ PINMUX_DATA(A14_MARK, PORT33_FN1),
+ PINMUX_DATA(A15_MARK, PORT34_FN1),
+ PINMUX_DATA(A16_MARK, PORT35_FN1),
+ PINMUX_DATA(A17_MARK, PORT36_FN1),
+ PINMUX_DATA(A18_MARK, PORT37_FN1),
+ PINMUX_DATA(A19_MARK, PORT38_FN1),
+ PINMUX_DATA(A20_MARK, PORT39_FN1),
+ PINMUX_DATA(A21_MARK, PORT40_FN1),
+ PINMUX_DATA(A22_MARK, PORT41_FN1),
+ PINMUX_DATA(A23_MARK, PORT42_FN1),
+ PINMUX_DATA(A24_MARK, PORT43_FN1),
+ PINMUX_DATA(A25_MARK, PORT44_FN1),
+ PINMUX_DATA(A26_MARK, PORT45_FN1),
+ PINMUX_DATA(D0_NAF0_MARK, PORT46_FN1),
+ PINMUX_DATA(D1_NAF1_MARK, PORT47_FN1),
+ PINMUX_DATA(D2_NAF2_MARK, PORT48_FN1),
+ PINMUX_DATA(D3_NAF3_MARK, PORT49_FN1),
+ PINMUX_DATA(D4_NAF4_MARK, PORT50_FN1),
+ PINMUX_DATA(D5_NAF5_MARK, PORT51_FN1),
+ PINMUX_DATA(D6_NAF6_MARK, PORT52_FN1),
+ PINMUX_DATA(D7_NAF7_MARK, PORT53_FN1),
+ PINMUX_DATA(D8_NAF8_MARK, PORT54_FN1),
+ PINMUX_DATA(D9_NAF9_MARK, PORT55_FN1),
+ PINMUX_DATA(D10_NAF10_MARK, PORT56_FN1),
+ PINMUX_DATA(D11_NAF11_MARK, PORT57_FN1),
+ PINMUX_DATA(D12_NAF12_MARK, PORT58_FN1),
+ PINMUX_DATA(D13_NAF13_MARK, PORT59_FN1),
+ PINMUX_DATA(D14_NAF14_MARK, PORT60_FN1),
+ PINMUX_DATA(D15_NAF15_MARK, PORT61_FN1),
+ PINMUX_DATA(CS0_MARK, PORT62_FN1),
+ PINMUX_DATA(CS2_MARK, PORT63_FN1),
+ PINMUX_DATA(CS4_MARK, PORT64_FN1),
+ PINMUX_DATA(CS5A_MARK, PORT65_FN1),
+ PINMUX_DATA(CS5B_MARK, PORT66_FN1),
+ PINMUX_DATA(CS6A_MARK, PORT67_FN1),
+ PINMUX_DATA(FCE0_MARK, PORT68_FN1),
+ PINMUX_DATA(RD_FSC_MARK, PORT69_FN1),
+ PINMUX_DATA(WE0_FWE_MARK, PORT70_FN1),
+ PINMUX_DATA(WE1_MARK, PORT71_FN1),
+ PINMUX_DATA(CKO_MARK, PORT72_FN1),
+ PINMUX_DATA(FRB_MARK, PORT73_FN1),
+ PINMUX_DATA(WAIT_MARK, PORT74_FN1),
+ PINMUX_DATA(RDWR_MARK, PORT75_FN1),
+ PINMUX_DATA(MEMC_AD0_MARK, PORT76_FN1),
+ PINMUX_DATA(MEMC_AD1_MARK, PORT77_FN1),
+ PINMUX_DATA(MEMC_AD2_MARK, PORT78_FN1),
+ PINMUX_DATA(MEMC_AD3_MARK, PORT79_FN1),
+ PINMUX_DATA(MEMC_AD4_MARK, PORT80_FN1),
+ PINMUX_DATA(MEMC_AD5_MARK, PORT81_FN1),
+ PINMUX_DATA(MEMC_AD6_MARK, PORT82_FN1),
+ PINMUX_DATA(MEMC_AD7_MARK, PORT83_FN1),
+ PINMUX_DATA(MEMC_AD8_MARK, PORT84_FN1),
+ PINMUX_DATA(MEMC_AD9_MARK, PORT85_FN1),
+ PINMUX_DATA(MEMC_AD10_MARK, PORT86_FN1),
+ PINMUX_DATA(MEMC_AD11_MARK, PORT87_FN1),
+ PINMUX_DATA(MEMC_AD12_MARK, PORT88_FN1),
+ PINMUX_DATA(MEMC_AD13_MARK, PORT89_FN1),
+ PINMUX_DATA(MEMC_AD14_MARK, PORT90_FN1),
+ PINMUX_DATA(MEMC_AD15_MARK, PORT91_FN1),
+ PINMUX_DATA(MEMC_CS0_MARK, PORT92_FN1),
+ PINMUX_DATA(MEMC_BUSCLK_MEMC_A0_MARK, PORT93_FN1),
+ PINMUX_DATA(MEMC_CS1_MEMC_A1_MARK, PORT94_FN1),
+ PINMUX_DATA(MEMC_ADV_MEMC_DREQ0_MARK, PORT95_FN1),
+ PINMUX_DATA(MEMC_WAIT_MEMC_DREQ1_MARK, PORT96_FN1),
+ PINMUX_DATA(MEMC_NOE_MARK, PORT97_FN1),
+ PINMUX_DATA(MEMC_NWE_MARK, PORT98_FN1),
+ PINMUX_DATA(MEMC_INT_MARK, PORT99_FN1),
+ PINMUX_DATA(VIO_VD_MARK, PORT100_FN1),
+ PINMUX_DATA(VIO_HD_MARK, PORT101_FN1),
+ PINMUX_DATA(VIO_D0_MARK, PORT102_FN1),
+ PINMUX_DATA(VIO_D1_MARK, PORT103_FN1),
+ PINMUX_DATA(VIO_D2_MARK, PORT104_FN1),
+ PINMUX_DATA(VIO_D3_MARK, PORT105_FN1),
+ PINMUX_DATA(VIO_D4_MARK, PORT106_FN1),
+ PINMUX_DATA(VIO_D5_MARK, PORT107_FN1),
+ PINMUX_DATA(VIO_D6_MARK, PORT108_FN1),
+ PINMUX_DATA(VIO_D7_MARK, PORT109_FN1),
+ PINMUX_DATA(VIO_D8_MARK, PORT110_FN1),
+ PINMUX_DATA(VIO_D9_MARK, PORT111_FN1),
+ PINMUX_DATA(VIO_D10_MARK, PORT112_FN1),
+ PINMUX_DATA(VIO_D11_MARK, PORT113_FN1),
+ PINMUX_DATA(VIO_D12_MARK, PORT114_FN1),
+ PINMUX_DATA(VIO_D13_MARK, PORT115_FN1),
+ PINMUX_DATA(VIO_D14_MARK, PORT116_FN1),
+ PINMUX_DATA(VIO_D15_MARK, PORT117_FN1),
+ PINMUX_DATA(VIO_CLK_MARK, PORT118_FN1),
+ PINMUX_DATA(VIO_FIELD_MARK, PORT119_FN1),
+ PINMUX_DATA(VIO_CKO_MARK, PORT120_FN1),
+ PINMUX_DATA(LCDD0_MARK, PORT121_FN1),
+ PINMUX_DATA(LCDD1_MARK, PORT122_FN1),
+ PINMUX_DATA(LCDD2_MARK, PORT123_FN1),
+ PINMUX_DATA(LCDD3_MARK, PORT124_FN1),
+ PINMUX_DATA(LCDD4_MARK, PORT125_FN1),
+ PINMUX_DATA(LCDD5_MARK, PORT126_FN1),
+ PINMUX_DATA(LCDD6_MARK, PORT127_FN1),
+ PINMUX_DATA(LCDD7_MARK, PORT128_FN1),
+ PINMUX_DATA(LCDD8_MARK, PORT129_FN1),
+ PINMUX_DATA(LCDD9_MARK, PORT130_FN1),
+ PINMUX_DATA(LCDD10_MARK, PORT131_FN1),
+ PINMUX_DATA(LCDD11_MARK, PORT132_FN1),
+ PINMUX_DATA(LCDD12_MARK, PORT133_FN1),
+ PINMUX_DATA(LCDD13_MARK, PORT134_FN1),
+ PINMUX_DATA(LCDD14_MARK, PORT135_FN1),
+ PINMUX_DATA(LCDD15_MARK, PORT136_FN1),
+ PINMUX_DATA(LCDD16_MARK, PORT137_FN1),
+ PINMUX_DATA(LCDD17_MARK, PORT138_FN1),
+ PINMUX_DATA(LCDD18_MARK, PORT139_FN1),
+ PINMUX_DATA(LCDD19_MARK, PORT140_FN1),
+ PINMUX_DATA(LCDD20_MARK, PORT141_FN1),
+ PINMUX_DATA(LCDD21_MARK, PORT142_FN1),
+ PINMUX_DATA(LCDD22_MARK, PORT143_FN1),
+ PINMUX_DATA(LCDD23_MARK, PORT144_FN1),
+ PINMUX_DATA(LCDHSYN_MARK, PORT145_FN1),
+ PINMUX_DATA(LCDVSYN_MARK, PORT146_FN1),
+ PINMUX_DATA(LCDDCK_MARK, PORT147_FN1),
+ PINMUX_DATA(LCDRD_MARK, PORT148_FN1),
+ PINMUX_DATA(LCDDISP_MARK, PORT149_FN1),
+ PINMUX_DATA(LCDLCLK_MARK, PORT150_FN1),
+ PINMUX_DATA(LCDDON_MARK, PORT151_FN1),
+ PINMUX_DATA(SCIFA0_TXD_MARK, PORT152_FN1),
+ PINMUX_DATA(SCIFA0_RXD_MARK, PORT153_FN1),
+ PINMUX_DATA(SCIFA1_TXD_MARK, PORT154_FN1),
+ PINMUX_DATA(SCIFA1_RXD_MARK, PORT155_FN1),
+ PINMUX_DATA(TS_SPSYNC1_MARK, PORT156_FN1),
+ PINMUX_DATA(TS_SDAT1_MARK, PORT157_FN1),
+ PINMUX_DATA(TS_SDEN1_MARK, PORT158_FN1),
+ PINMUX_DATA(TS_SCK1_MARK, PORT159_FN1),
+ PINMUX_DATA(TPU0TO0_MARK, PORT160_FN1),
+ PINMUX_DATA(TPU0TO1_MARK, PORT161_FN1),
+ PINMUX_DATA(SCIFB_SCK_MARK, PORT162_FN1),
+ PINMUX_DATA(SCIFB_RTS_MARK, PORT163_FN1),
+ PINMUX_DATA(SCIFB_CTS_MARK, PORT164_FN1),
+ PINMUX_DATA(SCIFB_TXD_MARK, PORT165_FN1),
+ PINMUX_DATA(SCIFB_RXD_MARK, PORT166_FN1),
+ PINMUX_DATA(VBUS0_0_MARK, PORT167_FN1),
+ PINMUX_DATA(VBUS0_1_MARK, PORT168_FN1),
+ PINMUX_DATA(HDMI_HPD_MARK, PORT169_FN1),
+ PINMUX_DATA(HDMI_CEC_MARK, PORT170_FN1),
+ PINMUX_DATA(SDHICLK0_MARK, PORT171_FN1),
+ PINMUX_DATA(SDHICD0_MARK, PORT172_FN1),
+ PINMUX_DATA(SDHID0_0_MARK, PORT173_FN1),
+ PINMUX_DATA(SDHID0_1_MARK, PORT174_FN1),
+ PINMUX_DATA(SDHID0_2_MARK, PORT175_FN1),
+ PINMUX_DATA(SDHID0_3_MARK, PORT176_FN1),
+ PINMUX_DATA(SDHICMD0_MARK, PORT177_FN1),
+ PINMUX_DATA(SDHIWP0_MARK, PORT178_FN1),
+ PINMUX_DATA(SDHICLK1_MARK, PORT179_FN1),
+ PINMUX_DATA(SDHID1_0_MARK, PORT180_FN1),
+ PINMUX_DATA(SDHID1_1_MARK, PORT181_FN1),
+ PINMUX_DATA(SDHID1_2_MARK, PORT182_FN1),
+ PINMUX_DATA(SDHID1_3_MARK, PORT183_FN1),
+ PINMUX_DATA(SDHICMD1_MARK, PORT184_FN1),
+ PINMUX_DATA(SDHICLK2_MARK, PORT185_FN1),
+ PINMUX_DATA(SDHID2_0_MARK, PORT186_FN1),
+ PINMUX_DATA(SDHID2_1_MARK, PORT187_FN1),
+ PINMUX_DATA(SDHID2_2_MARK, PORT188_FN1),
+ PINMUX_DATA(SDHID2_3_MARK, PORT189_FN1),
+ PINMUX_DATA(SDHICMD2_MARK, PORT190_FN1),
+
+ /* Function 2 */
+ PINMUX_DATA(FSIBCK_MARK, PORT4_FN2),
+ PINMUX_DATA(SCIFA4_RXD_MARK, PORT5_FN2),
+ PINMUX_DATA(SCIFA4_TXD_MARK, PORT6_FN2),
+ PINMUX_DATA(SCIFA5_RXD_MARK, PORT8_FN2),
+ PINMUX_DATA(FSIASPDIF_11_MARK, PORT11_FN2),
+ PINMUX_DATA(SCIFA5_TXD_MARK, PORT12_FN2),
+ PINMUX_DATA(FMSIOLR_MARK, PORT13_FN2),
+ PINMUX_DATA(FMSIOBT_MARK, PORT14_FN2),
+ PINMUX_DATA(FSIASPDIF_15_MARK, PORT15_FN2),
+ PINMUX_DATA(FMSIILR_MARK, PORT16_FN2),
+ PINMUX_DATA(FMSIIBT_MARK, PORT17_FN2),
+ PINMUX_DATA(BS_MARK, PORT19_FN2),
+ PINMUX_DATA(MSIOF0_TSYNC_MARK, PORT36_FN2),
+ PINMUX_DATA(MSIOF0_TSCK_MARK, PORT37_FN2),
+ PINMUX_DATA(MSIOF0_RXD_MARK, PORT38_FN2),
+ PINMUX_DATA(MSIOF0_RSCK_MARK, PORT39_FN2),
+ PINMUX_DATA(MSIOF0_RSYNC_MARK, PORT40_FN2),
+ PINMUX_DATA(MSIOF0_MCK0_MARK, PORT41_FN2),
+ PINMUX_DATA(MSIOF0_MCK1_MARK, PORT42_FN2),
+ PINMUX_DATA(MSIOF0_SS1_MARK, PORT43_FN2),
+ PINMUX_DATA(MSIOF0_SS2_MARK, PORT44_FN2),
+ PINMUX_DATA(MSIOF0_TXD_MARK, PORT45_FN2),
+ PINMUX_DATA(FMSICK_MARK, PORT65_FN2),
+ PINMUX_DATA(FCE1_MARK, PORT66_FN2),
+ PINMUX_DATA(BBIF1_RXD_MARK, PORT76_FN2),
+ PINMUX_DATA(BBIF1_TSYNC_MARK, PORT77_FN2),
+ PINMUX_DATA(BBIF1_TSCK_MARK, PORT78_FN2),
+ PINMUX_DATA(BBIF1_TXD_MARK, PORT79_FN2),
+ PINMUX_DATA(BBIF1_RSCK_MARK, PORT80_FN2),
+ PINMUX_DATA(BBIF1_RSYNC_MARK, PORT81_FN2),
+ PINMUX_DATA(BBIF1_FLOW_MARK, PORT82_FN2),
+ PINMUX_DATA(BB_RX_FLOW_N_MARK, PORT83_FN2),
+ PINMUX_DATA(MSIOF1_RSCK_MARK, PORT84_FN2),
+ PINMUX_DATA(MSIOF1_RSYNC_MARK, PORT85_FN2),
+ PINMUX_DATA(MSIOF1_MCK0_MARK, PORT86_FN2),
+ PINMUX_DATA(MSIOF1_MCK1_MARK, PORT87_FN2),
+ PINMUX_DATA(MSIOF1_TSCK_88_MARK, PORT88_FN2, MSEL4CR_10_1),
+ PINMUX_DATA(MSIOF1_TSYNC_89_MARK, PORT89_FN2, MSEL4CR_10_1),
+ PINMUX_DATA(MSIOF1_TXD_90_MARK, PORT90_FN2, MSEL4CR_10_1),
+ PINMUX_DATA(MSIOF1_RXD_91_MARK, PORT91_FN2, MSEL4CR_10_1),
+ PINMUX_DATA(MSIOF1_SS1_92_MARK, PORT92_FN2, MSEL4CR_10_1),
+ PINMUX_DATA(MSIOF1_SS2_93_MARK, PORT93_FN2, MSEL4CR_10_1),
+ PINMUX_DATA(SCIFA2_CTS1_MARK, PORT94_FN2),
+ PINMUX_DATA(SCIFA2_RTS1_MARK, PORT95_FN2),
+ PINMUX_DATA(SCIFA2_TXD1_MARK, PORT96_FN2),
+ PINMUX_DATA(SCIFA2_RXD1_MARK, PORT97_FN2),
+ PINMUX_DATA(SCIFA2_SCK1_MARK, PORT98_FN2),
+ PINMUX_DATA(I2C_SCL2_MARK, PORT110_FN2),
+ PINMUX_DATA(I2C_SDA2_MARK, PORT111_FN2),
+ PINMUX_DATA(I2C_SCL3_MARK, PORT114_FN2, MSEL4CR_16_1),
+ PINMUX_DATA(I2C_SDA3_MARK, PORT115_FN2, MSEL4CR_16_1),
+ PINMUX_DATA(I2C_SCL4_MARK, PORT116_FN2, MSEL4CR_17_1),
+ PINMUX_DATA(I2C_SDA4_MARK, PORT117_FN2, MSEL4CR_17_1),
+ PINMUX_DATA(MSIOF2_RSCK_MARK, PORT134_FN2),
+ PINMUX_DATA(MSIOF2_RSYNC_MARK, PORT135_FN2),
+ PINMUX_DATA(MSIOF2_MCK0_MARK, PORT136_FN2),
+ PINMUX_DATA(MSIOF2_MCK1_MARK, PORT137_FN2),
+ PINMUX_DATA(MSIOF2_SS1_MARK, PORT138_FN2),
+ PINMUX_DATA(MSIOF2_SS2_MARK, PORT139_FN2),
+ PINMUX_DATA(SCIFA3_CTS_140_MARK, PORT140_FN2, MSEL3CR_9_1),
+ PINMUX_DATA(SCIFA3_RTS_141_MARK, PORT141_FN2),
+ PINMUX_DATA(SCIFA3_SCK_MARK, PORT142_FN2),
+ PINMUX_DATA(SCIFA3_TXD_MARK, PORT143_FN2),
+ PINMUX_DATA(SCIFA3_RXD_MARK, PORT144_FN2),
+ PINMUX_DATA(MSIOF2_TSYNC_MARK, PORT148_FN2),
+ PINMUX_DATA(MSIOF2_TSCK_MARK, PORT149_FN2),
+ PINMUX_DATA(MSIOF2_RXD_MARK, PORT150_FN2),
+ PINMUX_DATA(MSIOF2_TXD_MARK, PORT151_FN2),
+ PINMUX_DATA(SCIFA0_SCK_MARK, PORT156_FN2),
+ PINMUX_DATA(SCIFA0_RTS_MARK, PORT157_FN2),
+ PINMUX_DATA(SCIFA0_CTS_MARK, PORT158_FN2),
+ PINMUX_DATA(SCIFA1_SCK_MARK, PORT159_FN2),
+ PINMUX_DATA(SCIFA1_RTS_MARK, PORT160_FN2),
+ PINMUX_DATA(SCIFA1_CTS_MARK, PORT161_FN2),
+
+ /* Function 3 */
+ PINMUX_DATA(VIO_CKO1_MARK, PORT16_FN3),
+ PINMUX_DATA(VIO_CKO2_MARK, PORT17_FN3),
+ PINMUX_DATA(IDIN_1_18_MARK, PORT18_FN3, MSEL4CR_14_1),
+ PINMUX_DATA(MSIOF1_TSCK_39_MARK, PORT39_FN3, MSEL4CR_10_0),
+ PINMUX_DATA(MSIOF1_TSYNC_40_MARK, PORT40_FN3, MSEL4CR_10_0),
+ PINMUX_DATA(MSIOF1_TXD_41_MARK, PORT41_FN3, MSEL4CR_10_0),
+ PINMUX_DATA(MSIOF1_RXD_42_MARK, PORT42_FN3, MSEL4CR_10_0),
+ PINMUX_DATA(MSIOF1_SS1_43_MARK, PORT43_FN3, MSEL4CR_10_0),
+ PINMUX_DATA(MSIOF1_SS2_44_MARK, PORT44_FN3, MSEL4CR_10_0),
+ PINMUX_DATA(MMCD1_0_MARK, PORT54_FN3, MSEL4CR_15_1),
+ PINMUX_DATA(MMCD1_1_MARK, PORT55_FN3, MSEL4CR_15_1),
+ PINMUX_DATA(MMCD1_2_MARK, PORT56_FN3, MSEL4CR_15_1),
+ PINMUX_DATA(MMCD1_3_MARK, PORT57_FN3, MSEL4CR_15_1),
+ PINMUX_DATA(MMCD1_4_MARK, PORT58_FN3, MSEL4CR_15_1),
+ PINMUX_DATA(MMCD1_5_MARK, PORT59_FN3, MSEL4CR_15_1),
+ PINMUX_DATA(MMCD1_6_MARK, PORT60_FN3, MSEL4CR_15_1),
+ PINMUX_DATA(MMCD1_7_MARK, PORT61_FN3, MSEL4CR_15_1),
+ PINMUX_DATA(VINT_I_MARK, PORT65_FN3),
+ PINMUX_DATA(MMCCLK1_MARK, PORT66_FN3, MSEL4CR_15_1),
+ PINMUX_DATA(MMCCMD1_MARK, PORT67_FN3, MSEL4CR_15_1),
+ PINMUX_DATA(TPU0TO2_93_MARK, PORT93_FN3),
+ PINMUX_DATA(TPU0TO2_99_MARK, PORT99_FN3),
+ PINMUX_DATA(TPU0TO3_MARK, PORT112_FN3),
+ PINMUX_DATA(IDIN_0_MARK, PORT113_FN3),
+ PINMUX_DATA(EXTLP_0_MARK, PORT114_FN3),
+ PINMUX_DATA(OVCN2_0_MARK, PORT115_FN3),
+ PINMUX_DATA(PWEN_0_MARK, PORT116_FN3),
+ PINMUX_DATA(OVCN_0_MARK, PORT117_FN3),
+ PINMUX_DATA(KEYOUT7_MARK, PORT121_FN3),
+ PINMUX_DATA(KEYOUT6_MARK, PORT122_FN3),
+ PINMUX_DATA(KEYOUT5_MARK, PORT123_FN3),
+ PINMUX_DATA(KEYOUT4_MARK, PORT124_FN3),
+ PINMUX_DATA(KEYOUT3_MARK, PORT125_FN3),
+ PINMUX_DATA(KEYOUT2_MARK, PORT126_FN3),
+ PINMUX_DATA(KEYOUT1_MARK, PORT127_FN3),
+ PINMUX_DATA(KEYOUT0_MARK, PORT128_FN3),
+ PINMUX_DATA(KEYIN7_MARK, PORT129_FN3),
+ PINMUX_DATA(KEYIN6_MARK, PORT130_FN3),
+ PINMUX_DATA(KEYIN5_MARK, PORT131_FN3),
+ PINMUX_DATA(KEYIN4_MARK, PORT132_FN3),
+ PINMUX_DATA(KEYIN3_133_MARK, PORT133_FN3, MSEL4CR_18_0),
+ PINMUX_DATA(KEYIN2_134_MARK, PORT134_FN3, MSEL4CR_18_0),
+ PINMUX_DATA(KEYIN1_135_MARK, PORT135_FN3, MSEL4CR_18_0),
+ PINMUX_DATA(KEYIN0_136_MARK, PORT136_FN3, MSEL4CR_18_0),
+ PINMUX_DATA(TS_SPSYNC2_MARK, PORT137_FN3),
+ PINMUX_DATA(IROUT_139_MARK, PORT139_FN3),
+ PINMUX_DATA(IRDA_OUT_MARK, PORT140_FN3),
+ PINMUX_DATA(IRDA_IN_MARK, PORT141_FN3),
+ PINMUX_DATA(IRDA_FIRSEL_MARK, PORT142_FN3),
+ PINMUX_DATA(TS_SDAT2_MARK, PORT145_FN3),
+ PINMUX_DATA(TS_SDEN2_MARK, PORT146_FN3),
+ PINMUX_DATA(TS_SCK2_MARK, PORT147_FN3),
+
+ /* Function 4 */
+ PINMUX_DATA(SCIFA3_CTS_43_MARK, PORT43_FN4, MSEL3CR_9_0),
+ PINMUX_DATA(SCIFA3_RTS_44_MARK, PORT44_FN4),
+ PINMUX_DATA(GP_RX_FLAG_MARK, PORT76_FN4),
+ PINMUX_DATA(GP_RX_DATA_MARK, PORT77_FN4),
+ PINMUX_DATA(GP_TX_READY_MARK, PORT78_FN4),
+ PINMUX_DATA(GP_RX_WAKE_MARK, PORT79_FN4),
+ PINMUX_DATA(MP_TX_FLAG_MARK, PORT80_FN4),
+ PINMUX_DATA(MP_TX_DATA_MARK, PORT81_FN4),
+ PINMUX_DATA(MP_RX_READY_MARK, PORT82_FN4),
+ PINMUX_DATA(MP_TX_WAKE_MARK, PORT83_FN4),
+ PINMUX_DATA(MMCD0_0_MARK, PORT84_FN4, MSEL4CR_15_0),
+ PINMUX_DATA(MMCD0_1_MARK, PORT85_FN4, MSEL4CR_15_0),
+ PINMUX_DATA(MMCD0_2_MARK, PORT86_FN4, MSEL4CR_15_0),
+ PINMUX_DATA(MMCD0_3_MARK, PORT87_FN4, MSEL4CR_15_0),
+ PINMUX_DATA(MMCD0_4_MARK, PORT88_FN4, MSEL4CR_15_0),
+ PINMUX_DATA(MMCD0_5_MARK, PORT89_FN4, MSEL4CR_15_0),
+ PINMUX_DATA(MMCD0_6_MARK, PORT90_FN4, MSEL4CR_15_0),
+ PINMUX_DATA(MMCD0_7_MARK, PORT91_FN4, MSEL4CR_15_0),
+ PINMUX_DATA(MMCCMD0_MARK, PORT92_FN4, MSEL4CR_15_0),
+ PINMUX_DATA(SIM_RST_MARK, PORT94_FN4),
+ PINMUX_DATA(SIM_CLK_MARK, PORT95_FN4),
+ PINMUX_DATA(SIM_D_MARK, PORT98_FN4),
+ PINMUX_DATA(MMCCLK0_MARK, PORT99_FN4, MSEL4CR_15_0),
+ PINMUX_DATA(IDIN_1_113_MARK, PORT113_FN4, MSEL4CR_14_0),
+ PINMUX_DATA(OVCN_1_114_MARK, PORT114_FN4, MSEL4CR_14_0),
+ PINMUX_DATA(PWEN_1_115_MARK, PORT115_FN4),
+ PINMUX_DATA(EXTLP_1_MARK, PORT116_FN4),
+ PINMUX_DATA(OVCN2_1_MARK, PORT117_FN4),
+ PINMUX_DATA(KEYIN0_121_MARK, PORT121_FN4, MSEL4CR_18_1),
+ PINMUX_DATA(KEYIN1_122_MARK, PORT122_FN4, MSEL4CR_18_1),
+ PINMUX_DATA(KEYIN2_123_MARK, PORT123_FN4, MSEL4CR_18_1),
+ PINMUX_DATA(KEYIN3_124_MARK, PORT124_FN4, MSEL4CR_18_1),
+ PINMUX_DATA(PWEN_1_138_MARK, PORT138_FN4),
+ PINMUX_DATA(IROUT_140_MARK, PORT140_FN4),
+ PINMUX_DATA(LCDCS_MARK, PORT145_FN4),
+ PINMUX_DATA(LCDWR_MARK, PORT147_FN4),
+ PINMUX_DATA(LCDRS_MARK, PORT149_FN4),
+ PINMUX_DATA(OVCN_1_162_MARK, PORT162_FN4, MSEL4CR_14_1),
+
+ /* Function 5 */
+ PINMUX_DATA(GPI0_MARK, PORT41_FN5),
+ PINMUX_DATA(GPI1_MARK, PORT42_FN5),
+ PINMUX_DATA(GPO0_MARK, PORT43_FN5),
+ PINMUX_DATA(GPO1_MARK, PORT44_FN5),
+ PINMUX_DATA(I2C_SCL3S_MARK, PORT137_FN5, MSEL4CR_16_0),
+ PINMUX_DATA(I2C_SDA3S_MARK, PORT145_FN5, MSEL4CR_16_0),
+ PINMUX_DATA(I2C_SCL4S_MARK, PORT146_FN5, MSEL4CR_17_0),
+ PINMUX_DATA(I2C_SDA4S_MARK, PORT147_FN5, MSEL4CR_17_0),
+
+ /* Function select */
+ PINMUX_DATA(LCDC0_SELECT_MARK, MSEL3CR_6_0),
+ PINMUX_DATA(LCDC1_SELECT_MARK, MSEL3CR_6_1),
+
+ PINMUX_DATA(TS0_1SELECT_MARK, MSEL3CR_21_0, MSEL3CR_20_0),
+ PINMUX_DATA(TS0_2SELECT_MARK, MSEL3CR_21_0, MSEL3CR_20_1),
+ PINMUX_DATA(TS1_1SELECT_MARK, MSEL3CR_27_0, MSEL3CR_26_0),
+ PINMUX_DATA(TS1_2SELECT_MARK, MSEL3CR_27_0, MSEL3CR_26_1),
+
+ PINMUX_DATA(SDENC_CPG_MARK, MSEL4CR_19_0),
+ PINMUX_DATA(SDENC_DV_CLKI_MARK, MSEL4CR_19_1),
+
+ PINMUX_DATA(MFIv6_MARK, MSEL4CR_6_0),
+ PINMUX_DATA(MFIv4_MARK, MSEL4CR_6_1),
+};
+
+static struct pinmux_gpio pinmux_gpios[] = {
+
+ /* PORT */
+ GPIO_PORT_ALL(),
+
+ /* IRQ */
+ GPIO_FN(IRQ0_6), GPIO_FN(IRQ0_162), GPIO_FN(IRQ1),
+ GPIO_FN(IRQ2_4), GPIO_FN(IRQ2_5), GPIO_FN(IRQ3_8),
+ GPIO_FN(IRQ3_16), GPIO_FN(IRQ4_17), GPIO_FN(IRQ4_163),
+ GPIO_FN(IRQ5), GPIO_FN(IRQ6_39), GPIO_FN(IRQ6_164),
+ GPIO_FN(IRQ7_40), GPIO_FN(IRQ7_167), GPIO_FN(IRQ8_41),
+ GPIO_FN(IRQ8_168), GPIO_FN(IRQ9_42), GPIO_FN(IRQ9_169),
+ GPIO_FN(IRQ10), GPIO_FN(IRQ11), GPIO_FN(IRQ12_80),
+ GPIO_FN(IRQ12_137), GPIO_FN(IRQ13_81), GPIO_FN(IRQ13_145),
+ GPIO_FN(IRQ14_82), GPIO_FN(IRQ14_146), GPIO_FN(IRQ15_83),
+ GPIO_FN(IRQ15_147), GPIO_FN(IRQ16_84), GPIO_FN(IRQ16_170),
+ GPIO_FN(IRQ17), GPIO_FN(IRQ18), GPIO_FN(IRQ19),
+ GPIO_FN(IRQ20), GPIO_FN(IRQ21), GPIO_FN(IRQ22),
+ GPIO_FN(IRQ23), GPIO_FN(IRQ24), GPIO_FN(IRQ25),
+ GPIO_FN(IRQ26_121), GPIO_FN(IRQ26_172), GPIO_FN(IRQ27_122),
+ GPIO_FN(IRQ27_180), GPIO_FN(IRQ28_123), GPIO_FN(IRQ28_181),
+ GPIO_FN(IRQ29_129), GPIO_FN(IRQ29_182), GPIO_FN(IRQ30_130),
+ GPIO_FN(IRQ30_183), GPIO_FN(IRQ31_138), GPIO_FN(IRQ31_184),
+
+ /* MSIOF0 */
+ GPIO_FN(MSIOF0_TSYNC), GPIO_FN(MSIOF0_TSCK), GPIO_FN(MSIOF0_RXD),
+ GPIO_FN(MSIOF0_RSCK), GPIO_FN(MSIOF0_RSYNC), GPIO_FN(MSIOF0_MCK0),
+ GPIO_FN(MSIOF0_MCK1), GPIO_FN(MSIOF0_SS1), GPIO_FN(MSIOF0_SS2),
+ GPIO_FN(MSIOF0_TXD),
+
+ /* MSIOF1 */
+ GPIO_FN(MSIOF1_TSCK_39), GPIO_FN(MSIOF1_TSCK_88),
+ GPIO_FN(MSIOF1_TSYNC_40), GPIO_FN(MSIOF1_TSYNC_89),
+ GPIO_FN(MSIOF1_TXD_41), GPIO_FN(MSIOF1_TXD_90),
+ GPIO_FN(MSIOF1_RXD_42), GPIO_FN(MSIOF1_RXD_91),
+ GPIO_FN(MSIOF1_SS1_43), GPIO_FN(MSIOF1_SS1_92),
+ GPIO_FN(MSIOF1_SS2_44), GPIO_FN(MSIOF1_SS2_93),
+ GPIO_FN(MSIOF1_RSCK), GPIO_FN(MSIOF1_RSYNC),
+ GPIO_FN(MSIOF1_MCK0), GPIO_FN(MSIOF1_MCK1),
+
+ /* MSIOF2 */
+ GPIO_FN(MSIOF2_RSCK), GPIO_FN(MSIOF2_RSYNC), GPIO_FN(MSIOF2_MCK0),
+ GPIO_FN(MSIOF2_MCK1), GPIO_FN(MSIOF2_SS1), GPIO_FN(MSIOF2_SS2),
+ GPIO_FN(MSIOF2_TSYNC), GPIO_FN(MSIOF2_TSCK), GPIO_FN(MSIOF2_RXD),
+ GPIO_FN(MSIOF2_TXD),
+
+ /* BBIF1 */
+ GPIO_FN(BBIF1_RXD), GPIO_FN(BBIF1_TSYNC), GPIO_FN(BBIF1_TSCK),
+ GPIO_FN(BBIF1_TXD), GPIO_FN(BBIF1_RSCK), GPIO_FN(BBIF1_RSYNC),
+ GPIO_FN(BBIF1_FLOW), GPIO_FN(BB_RX_FLOW_N),
+
+ /* BBIF2 */
+ GPIO_FN(BBIF2_TSCK1), GPIO_FN(BBIF2_TSYNC1),
+ GPIO_FN(BBIF2_TXD1), GPIO_FN(BBIF2_RXD),
+
+ /* FSI */
+ GPIO_FN(FSIACK), GPIO_FN(FSIBCK), GPIO_FN(FSIAILR),
+ GPIO_FN(FSIAIBT), GPIO_FN(FSIAISLD), GPIO_FN(FSIAOMC),
+ GPIO_FN(FSIAOLR), GPIO_FN(FSIAOBT), GPIO_FN(FSIAOSLD),
+ GPIO_FN(FSIASPDIF_11), GPIO_FN(FSIASPDIF_15),
+
+ /* FMSI */
+ GPIO_FN(FMSOCK), GPIO_FN(FMSOOLR), GPIO_FN(FMSIOLR),
+ GPIO_FN(FMSOOBT), GPIO_FN(FMSIOBT), GPIO_FN(FMSOSLD),
+ GPIO_FN(FMSOILR), GPIO_FN(FMSIILR), GPIO_FN(FMSOIBT),
+ GPIO_FN(FMSIIBT), GPIO_FN(FMSISLD), GPIO_FN(FMSICK),
+
+ /* SCIFA0 */
+ GPIO_FN(SCIFA0_TXD), GPIO_FN(SCIFA0_RXD), GPIO_FN(SCIFA0_SCK),
+ GPIO_FN(SCIFA0_RTS), GPIO_FN(SCIFA0_CTS),
+
+ /* SCIFA1 */
+ GPIO_FN(SCIFA1_TXD), GPIO_FN(SCIFA1_RXD), GPIO_FN(SCIFA1_SCK),
+ GPIO_FN(SCIFA1_RTS), GPIO_FN(SCIFA1_CTS),
+
+ /* SCIFA2 */
+ GPIO_FN(SCIFA2_CTS1), GPIO_FN(SCIFA2_RTS1), GPIO_FN(SCIFA2_TXD1),
+ GPIO_FN(SCIFA2_RXD1), GPIO_FN(SCIFA2_SCK1),
+
+ /* SCIFA3 */
+ GPIO_FN(SCIFA3_CTS_43), GPIO_FN(SCIFA3_CTS_140),
+ GPIO_FN(SCIFA3_RTS_44), GPIO_FN(SCIFA3_RTS_141),
+ GPIO_FN(SCIFA3_SCK), GPIO_FN(SCIFA3_TXD),
+ GPIO_FN(SCIFA3_RXD),
+
+ /* SCIFA4 */
+ GPIO_FN(SCIFA4_RXD), GPIO_FN(SCIFA4_TXD),
+
+ /* SCIFA5 */
+ GPIO_FN(SCIFA5_RXD), GPIO_FN(SCIFA5_TXD),
+
+ /* SCIFB */
+ GPIO_FN(SCIFB_SCK), GPIO_FN(SCIFB_RTS), GPIO_FN(SCIFB_CTS),
+ GPIO_FN(SCIFB_TXD), GPIO_FN(SCIFB_RXD),
+
+ /* CEU */
+ GPIO_FN(VIO_HD), GPIO_FN(VIO_CKO1), GPIO_FN(VIO_CKO2),
+ GPIO_FN(VIO_VD), GPIO_FN(VIO_CLK), GPIO_FN(VIO_FIELD),
+ GPIO_FN(VIO_CKO), GPIO_FN(VIO_D0), GPIO_FN(VIO_D1),
+ GPIO_FN(VIO_D2), GPIO_FN(VIO_D3), GPIO_FN(VIO_D4),
+ GPIO_FN(VIO_D5), GPIO_FN(VIO_D6), GPIO_FN(VIO_D7),
+ GPIO_FN(VIO_D8), GPIO_FN(VIO_D9), GPIO_FN(VIO_D10),
+ GPIO_FN(VIO_D11), GPIO_FN(VIO_D12), GPIO_FN(VIO_D13),
+ GPIO_FN(VIO_D14), GPIO_FN(VIO_D15),
+
+ /* USB0 */
+ GPIO_FN(IDIN_0), GPIO_FN(EXTLP_0), GPIO_FN(OVCN2_0),
+ GPIO_FN(PWEN_0), GPIO_FN(OVCN_0), GPIO_FN(VBUS0_0),
+
+ /* USB1 */
+ GPIO_FN(IDIN_1_18), GPIO_FN(IDIN_1_113),
+ GPIO_FN(OVCN_1_114), GPIO_FN(OVCN_1_162),
+ GPIO_FN(PWEN_1_115), GPIO_FN(PWEN_1_138),
+ GPIO_FN(EXTLP_1), GPIO_FN(OVCN2_1),
+ GPIO_FN(VBUS0_1),
+
+ /* GPIO */
+ GPIO_FN(GPI0), GPIO_FN(GPI1), GPIO_FN(GPO0), GPIO_FN(GPO1),
+
+ /* BSC */
+ GPIO_FN(BS), GPIO_FN(WE1), GPIO_FN(CKO),
+ GPIO_FN(WAIT), GPIO_FN(RDWR),
+
+ GPIO_FN(A0), GPIO_FN(A1), GPIO_FN(A2),
+ GPIO_FN(A3), GPIO_FN(A6), GPIO_FN(A7),
+ GPIO_FN(A8), GPIO_FN(A9), GPIO_FN(A10),
+ GPIO_FN(A11), GPIO_FN(A12), GPIO_FN(A13),
+ GPIO_FN(A14), GPIO_FN(A15), GPIO_FN(A16),
+ GPIO_FN(A17), GPIO_FN(A18), GPIO_FN(A19),
+ GPIO_FN(A20), GPIO_FN(A21), GPIO_FN(A22),
+ GPIO_FN(A23), GPIO_FN(A24), GPIO_FN(A25),
+ GPIO_FN(A26),
+
+ GPIO_FN(CS0), GPIO_FN(CS2), GPIO_FN(CS4),
+ GPIO_FN(CS5A), GPIO_FN(CS5B), GPIO_FN(CS6A),
+
+ /* BSC/FLCTL */
+ GPIO_FN(RD_FSC), GPIO_FN(WE0_FWE), GPIO_FN(A4_FOE),
+ GPIO_FN(A5_FCDE), GPIO_FN(D0_NAF0), GPIO_FN(D1_NAF1),
+ GPIO_FN(D2_NAF2), GPIO_FN(D3_NAF3), GPIO_FN(D4_NAF4),
+ GPIO_FN(D5_NAF5), GPIO_FN(D6_NAF6), GPIO_FN(D7_NAF7),
+ GPIO_FN(D8_NAF8), GPIO_FN(D9_NAF9), GPIO_FN(D10_NAF10),
+ GPIO_FN(D11_NAF11), GPIO_FN(D12_NAF12), GPIO_FN(D13_NAF13),
+ GPIO_FN(D14_NAF14), GPIO_FN(D15_NAF15),
+
+ /* MMCIF(1) */
+ GPIO_FN(MMCD0_0), GPIO_FN(MMCD0_1), GPIO_FN(MMCD0_2),
+ GPIO_FN(MMCD0_3), GPIO_FN(MMCD0_4), GPIO_FN(MMCD0_5),
+ GPIO_FN(MMCD0_6), GPIO_FN(MMCD0_7), GPIO_FN(MMCCMD0),
+ GPIO_FN(MMCCLK0),
+
+ /* MMCIF(2) */
+ GPIO_FN(MMCD1_0), GPIO_FN(MMCD1_1), GPIO_FN(MMCD1_2),
+ GPIO_FN(MMCD1_3), GPIO_FN(MMCD1_4), GPIO_FN(MMCD1_5),
+ GPIO_FN(MMCD1_6), GPIO_FN(MMCD1_7), GPIO_FN(MMCCLK1),
+ GPIO_FN(MMCCMD1),
+
+ /* SPU2 */
+ GPIO_FN(VINT_I),
+
+ /* FLCTL */
+ GPIO_FN(FCE1), GPIO_FN(FCE0), GPIO_FN(FRB),
+
+ /* HSI */
+ GPIO_FN(GP_RX_FLAG), GPIO_FN(GP_RX_DATA), GPIO_FN(GP_TX_READY),
+ GPIO_FN(GP_RX_WAKE), GPIO_FN(MP_TX_FLAG), GPIO_FN(MP_TX_DATA),
+ GPIO_FN(MP_RX_READY), GPIO_FN(MP_TX_WAKE),
+
+ /* MFI */
+ GPIO_FN(MFIv6),
+ GPIO_FN(MFIv4),
+
+ GPIO_FN(MEMC_BUSCLK_MEMC_A0), GPIO_FN(MEMC_ADV_MEMC_DREQ0),
+ GPIO_FN(MEMC_WAIT_MEMC_DREQ1), GPIO_FN(MEMC_CS1_MEMC_A1),
+ GPIO_FN(MEMC_CS0), GPIO_FN(MEMC_NOE),
+ GPIO_FN(MEMC_NWE), GPIO_FN(MEMC_INT),
+
+ GPIO_FN(MEMC_AD0), GPIO_FN(MEMC_AD1), GPIO_FN(MEMC_AD2),
+ GPIO_FN(MEMC_AD3), GPIO_FN(MEMC_AD4), GPIO_FN(MEMC_AD5),
+ GPIO_FN(MEMC_AD6), GPIO_FN(MEMC_AD7), GPIO_FN(MEMC_AD8),
+ GPIO_FN(MEMC_AD9), GPIO_FN(MEMC_AD10), GPIO_FN(MEMC_AD11),
+ GPIO_FN(MEMC_AD12), GPIO_FN(MEMC_AD13), GPIO_FN(MEMC_AD14),
+ GPIO_FN(MEMC_AD15),
+
+ /* SIM */
+ GPIO_FN(SIM_RST), GPIO_FN(SIM_CLK), GPIO_FN(SIM_D),
+
+ /* TPU */
+ GPIO_FN(TPU0TO0), GPIO_FN(TPU0TO1), GPIO_FN(TPU0TO2_93),
+ GPIO_FN(TPU0TO2_99), GPIO_FN(TPU0TO3),
+
+ /* I2C2 */
+ GPIO_FN(I2C_SCL2), GPIO_FN(I2C_SDA2),
+
+ /* I2C3(1) */
+ GPIO_FN(I2C_SCL3), GPIO_FN(I2C_SDA3),
+
+ /* I2C3(2) */
+ GPIO_FN(I2C_SCL3S), GPIO_FN(I2C_SDA3S),
+
+ /* I2C4(2) */
+ GPIO_FN(I2C_SCL4), GPIO_FN(I2C_SDA4),
+
+ /* I2C4(2) */
+ GPIO_FN(I2C_SCL4S), GPIO_FN(I2C_SDA4S),
+
+ /* KEYSC */
+ GPIO_FN(KEYOUT0), GPIO_FN(KEYIN0_121), GPIO_FN(KEYIN0_136),
+ GPIO_FN(KEYOUT1), GPIO_FN(KEYIN1_122), GPIO_FN(KEYIN1_135),
+ GPIO_FN(KEYOUT2), GPIO_FN(KEYIN2_123), GPIO_FN(KEYIN2_134),
+ GPIO_FN(KEYOUT3), GPIO_FN(KEYIN3_124), GPIO_FN(KEYIN3_133),
+ GPIO_FN(KEYOUT4), GPIO_FN(KEYIN4), GPIO_FN(KEYOUT5),
+ GPIO_FN(KEYIN5), GPIO_FN(KEYOUT6), GPIO_FN(KEYIN6),
+ GPIO_FN(KEYOUT7), GPIO_FN(KEYIN7),
+
+ /* LCDC */
+ GPIO_FN(LCDHSYN), GPIO_FN(LCDCS), GPIO_FN(LCDVSYN),
+ GPIO_FN(LCDDCK), GPIO_FN(LCDWR), GPIO_FN(LCDRD),
+ GPIO_FN(LCDDISP), GPIO_FN(LCDRS), GPIO_FN(LCDLCLK),
+ GPIO_FN(LCDDON),
+
+ GPIO_FN(LCDD0), GPIO_FN(LCDD1), GPIO_FN(LCDD2),
+ GPIO_FN(LCDD3), GPIO_FN(LCDD4), GPIO_FN(LCDD5),
+ GPIO_FN(LCDD6), GPIO_FN(LCDD7), GPIO_FN(LCDD8),
+ GPIO_FN(LCDD9), GPIO_FN(LCDD10), GPIO_FN(LCDD11),
+ GPIO_FN(LCDD12), GPIO_FN(LCDD13), GPIO_FN(LCDD14),
+ GPIO_FN(LCDD15), GPIO_FN(LCDD16), GPIO_FN(LCDD17),
+ GPIO_FN(LCDD18), GPIO_FN(LCDD19), GPIO_FN(LCDD20),
+ GPIO_FN(LCDD21), GPIO_FN(LCDD22), GPIO_FN(LCDD23),
+
+ GPIO_FN(LCDC0_SELECT),
+ GPIO_FN(LCDC1_SELECT),
+
+ /* IRDA */
+ GPIO_FN(IRDA_OUT), GPIO_FN(IRDA_IN), GPIO_FN(IRDA_FIRSEL),
+ GPIO_FN(IROUT_139), GPIO_FN(IROUT_140),
+
+ /* TSIF1 */
+ GPIO_FN(TS0_1SELECT),
+ GPIO_FN(TS0_2SELECT),
+ GPIO_FN(TS1_1SELECT),
+ GPIO_FN(TS1_2SELECT),
+
+ GPIO_FN(TS_SPSYNC1), GPIO_FN(TS_SDAT1),
+ GPIO_FN(TS_SDEN1), GPIO_FN(TS_SCK1),
+
+ /* TSIF2 */
+ GPIO_FN(TS_SPSYNC2), GPIO_FN(TS_SDAT2),
+ GPIO_FN(TS_SDEN2), GPIO_FN(TS_SCK2),
+
+ /* HDMI */
+ GPIO_FN(HDMI_HPD), GPIO_FN(HDMI_CEC),
+
+ /* SDHI0 */
+ GPIO_FN(SDHICLK0), GPIO_FN(SDHICD0), GPIO_FN(SDHICMD0),
+ GPIO_FN(SDHIWP0), GPIO_FN(SDHID0_0), GPIO_FN(SDHID0_1),
+ GPIO_FN(SDHID0_2), GPIO_FN(SDHID0_3),
+
+ /* SDHI1 */
+ GPIO_FN(SDHICLK1), GPIO_FN(SDHICMD1), GPIO_FN(SDHID1_0),
+ GPIO_FN(SDHID1_1), GPIO_FN(SDHID1_2), GPIO_FN(SDHID1_3),
+
+ /* SDHI2 */
+ GPIO_FN(SDHICLK2), GPIO_FN(SDHICMD2), GPIO_FN(SDHID2_0),
+ GPIO_FN(SDHID2_1), GPIO_FN(SDHID2_2), GPIO_FN(SDHID2_3),
+
+ /* SDENC */
+ GPIO_FN(SDENC_CPG),
+ GPIO_FN(SDENC_DV_CLKI),
+};
+
+static struct pinmux_cfg_reg pinmux_config_regs[] = {
+ PORTCR(0, 0xE6051000), /* PORT0CR */
+ PORTCR(1, 0xE6051001), /* PORT1CR */
+ PORTCR(2, 0xE6051002), /* PORT2CR */
+ PORTCR(3, 0xE6051003), /* PORT3CR */
+ PORTCR(4, 0xE6051004), /* PORT4CR */
+ PORTCR(5, 0xE6051005), /* PORT5CR */
+ PORTCR(6, 0xE6051006), /* PORT6CR */
+ PORTCR(7, 0xE6051007), /* PORT7CR */
+ PORTCR(8, 0xE6051008), /* PORT8CR */
+ PORTCR(9, 0xE6051009), /* PORT9CR */
+ PORTCR(10, 0xE605100A), /* PORT10CR */
+ PORTCR(11, 0xE605100B), /* PORT11CR */
+ PORTCR(12, 0xE605100C), /* PORT12CR */
+ PORTCR(13, 0xE605100D), /* PORT13CR */
+ PORTCR(14, 0xE605100E), /* PORT14CR */
+ PORTCR(15, 0xE605100F), /* PORT15CR */
+ PORTCR(16, 0xE6051010), /* PORT16CR */
+ PORTCR(17, 0xE6051011), /* PORT17CR */
+ PORTCR(18, 0xE6051012), /* PORT18CR */
+ PORTCR(19, 0xE6051013), /* PORT19CR */
+ PORTCR(20, 0xE6051014), /* PORT20CR */
+ PORTCR(21, 0xE6051015), /* PORT21CR */
+ PORTCR(22, 0xE6051016), /* PORT22CR */
+ PORTCR(23, 0xE6051017), /* PORT23CR */
+ PORTCR(24, 0xE6051018), /* PORT24CR */
+ PORTCR(25, 0xE6051019), /* PORT25CR */
+ PORTCR(26, 0xE605101A), /* PORT26CR */
+ PORTCR(27, 0xE605101B), /* PORT27CR */
+ PORTCR(28, 0xE605101C), /* PORT28CR */
+ PORTCR(29, 0xE605101D), /* PORT29CR */
+ PORTCR(30, 0xE605101E), /* PORT30CR */
+ PORTCR(31, 0xE605101F), /* PORT31CR */
+ PORTCR(32, 0xE6051020), /* PORT32CR */
+ PORTCR(33, 0xE6051021), /* PORT33CR */
+ PORTCR(34, 0xE6051022), /* PORT34CR */
+ PORTCR(35, 0xE6051023), /* PORT35CR */
+ PORTCR(36, 0xE6051024), /* PORT36CR */
+ PORTCR(37, 0xE6051025), /* PORT37CR */
+ PORTCR(38, 0xE6051026), /* PORT38CR */
+ PORTCR(39, 0xE6051027), /* PORT39CR */
+ PORTCR(40, 0xE6051028), /* PORT40CR */
+ PORTCR(41, 0xE6051029), /* PORT41CR */
+ PORTCR(42, 0xE605102A), /* PORT42CR */
+ PORTCR(43, 0xE605102B), /* PORT43CR */
+ PORTCR(44, 0xE605102C), /* PORT44CR */
+ PORTCR(45, 0xE605102D), /* PORT45CR */
+ PORTCR(46, 0xE605202E), /* PORT46CR */
+ PORTCR(47, 0xE605202F), /* PORT47CR */
+ PORTCR(48, 0xE6052030), /* PORT48CR */
+ PORTCR(49, 0xE6052031), /* PORT49CR */
+ PORTCR(50, 0xE6052032), /* PORT50CR */
+ PORTCR(51, 0xE6052033), /* PORT51CR */
+ PORTCR(52, 0xE6052034), /* PORT52CR */
+ PORTCR(53, 0xE6052035), /* PORT53CR */
+ PORTCR(54, 0xE6052036), /* PORT54CR */
+ PORTCR(55, 0xE6052037), /* PORT55CR */
+ PORTCR(56, 0xE6052038), /* PORT56CR */
+ PORTCR(57, 0xE6052039), /* PORT57CR */
+ PORTCR(58, 0xE605203A), /* PORT58CR */
+ PORTCR(59, 0xE605203B), /* PORT59CR */
+ PORTCR(60, 0xE605203C), /* PORT60CR */
+ PORTCR(61, 0xE605203D), /* PORT61CR */
+ PORTCR(62, 0xE605203E), /* PORT62CR */
+ PORTCR(63, 0xE605203F), /* PORT63CR */
+ PORTCR(64, 0xE6052040), /* PORT64CR */
+ PORTCR(65, 0xE6052041), /* PORT65CR */
+ PORTCR(66, 0xE6052042), /* PORT66CR */
+ PORTCR(67, 0xE6052043), /* PORT67CR */
+ PORTCR(68, 0xE6052044), /* PORT68CR */
+ PORTCR(69, 0xE6052045), /* PORT69CR */
+ PORTCR(70, 0xE6052046), /* PORT70CR */
+ PORTCR(71, 0xE6052047), /* PORT71CR */
+ PORTCR(72, 0xE6052048), /* PORT72CR */
+ PORTCR(73, 0xE6052049), /* PORT73CR */
+ PORTCR(74, 0xE605204A), /* PORT74CR */
+ PORTCR(75, 0xE605204B), /* PORT75CR */
+ PORTCR(76, 0xE605004C), /* PORT76CR */
+ PORTCR(77, 0xE605004D), /* PORT77CR */
+ PORTCR(78, 0xE605004E), /* PORT78CR */
+ PORTCR(79, 0xE605004F), /* PORT79CR */
+ PORTCR(80, 0xE6050050), /* PORT80CR */
+ PORTCR(81, 0xE6050051), /* PORT81CR */
+ PORTCR(82, 0xE6050052), /* PORT82CR */
+ PORTCR(83, 0xE6050053), /* PORT83CR */
+ PORTCR(84, 0xE6050054), /* PORT84CR */
+ PORTCR(85, 0xE6050055), /* PORT85CR */
+ PORTCR(86, 0xE6050056), /* PORT86CR */
+ PORTCR(87, 0xE6050057), /* PORT87CR */
+ PORTCR(88, 0xE6050058), /* PORT88CR */
+ PORTCR(89, 0xE6050059), /* PORT89CR */
+ PORTCR(90, 0xE605005A), /* PORT90CR */
+ PORTCR(91, 0xE605005B), /* PORT91CR */
+ PORTCR(92, 0xE605005C), /* PORT92CR */
+ PORTCR(93, 0xE605005D), /* PORT93CR */
+ PORTCR(94, 0xE605005E), /* PORT94CR */
+ PORTCR(95, 0xE605005F), /* PORT95CR */
+ PORTCR(96, 0xE6050060), /* PORT96CR */
+ PORTCR(97, 0xE6050061), /* PORT97CR */
+ PORTCR(98, 0xE6050062), /* PORT98CR */
+ PORTCR(99, 0xE6050063), /* PORT99CR */
+ PORTCR(100, 0xE6053064), /* PORT100CR */
+ PORTCR(101, 0xE6053065), /* PORT101CR */
+ PORTCR(102, 0xE6053066), /* PORT102CR */
+ PORTCR(103, 0xE6053067), /* PORT103CR */
+ PORTCR(104, 0xE6053068), /* PORT104CR */
+ PORTCR(105, 0xE6053069), /* PORT105CR */
+ PORTCR(106, 0xE605306A), /* PORT106CR */
+ PORTCR(107, 0xE605306B), /* PORT107CR */
+ PORTCR(108, 0xE605306C), /* PORT108CR */
+ PORTCR(109, 0xE605306D), /* PORT109CR */
+ PORTCR(110, 0xE605306E), /* PORT110CR */
+ PORTCR(111, 0xE605306F), /* PORT111CR */
+ PORTCR(112, 0xE6053070), /* PORT112CR */
+ PORTCR(113, 0xE6053071), /* PORT113CR */
+ PORTCR(114, 0xE6053072), /* PORT114CR */
+ PORTCR(115, 0xE6053073), /* PORT115CR */
+ PORTCR(116, 0xE6053074), /* PORT116CR */
+ PORTCR(117, 0xE6053075), /* PORT117CR */
+ PORTCR(118, 0xE6053076), /* PORT118CR */
+ PORTCR(119, 0xE6053077), /* PORT119CR */
+ PORTCR(120, 0xE6053078), /* PORT120CR */
+ PORTCR(121, 0xE6050079), /* PORT121CR */
+ PORTCR(122, 0xE605007A), /* PORT122CR */
+ PORTCR(123, 0xE605007B), /* PORT123CR */
+ PORTCR(124, 0xE605007C), /* PORT124CR */
+ PORTCR(125, 0xE605007D), /* PORT125CR */
+ PORTCR(126, 0xE605007E), /* PORT126CR */
+ PORTCR(127, 0xE605007F), /* PORT127CR */
+ PORTCR(128, 0xE6050080), /* PORT128CR */
+ PORTCR(129, 0xE6050081), /* PORT129CR */
+ PORTCR(130, 0xE6050082), /* PORT130CR */
+ PORTCR(131, 0xE6050083), /* PORT131CR */
+ PORTCR(132, 0xE6050084), /* PORT132CR */
+ PORTCR(133, 0xE6050085), /* PORT133CR */
+ PORTCR(134, 0xE6050086), /* PORT134CR */
+ PORTCR(135, 0xE6050087), /* PORT135CR */
+ PORTCR(136, 0xE6050088), /* PORT136CR */
+ PORTCR(137, 0xE6050089), /* PORT137CR */
+ PORTCR(138, 0xE605008A), /* PORT138CR */
+ PORTCR(139, 0xE605008B), /* PORT139CR */
+ PORTCR(140, 0xE605008C), /* PORT140CR */
+ PORTCR(141, 0xE605008D), /* PORT141CR */
+ PORTCR(142, 0xE605008E), /* PORT142CR */
+ PORTCR(143, 0xE605008F), /* PORT143CR */
+ PORTCR(144, 0xE6050090), /* PORT144CR */
+ PORTCR(145, 0xE6050091), /* PORT145CR */
+ PORTCR(146, 0xE6050092), /* PORT146CR */
+ PORTCR(147, 0xE6050093), /* PORT147CR */
+ PORTCR(148, 0xE6050094), /* PORT148CR */
+ PORTCR(149, 0xE6050095), /* PORT149CR */
+ PORTCR(150, 0xE6050096), /* PORT150CR */
+ PORTCR(151, 0xE6050097), /* PORT151CR */
+ PORTCR(152, 0xE6053098), /* PORT152CR */
+ PORTCR(153, 0xE6053099), /* PORT153CR */
+ PORTCR(154, 0xE605309A), /* PORT154CR */
+ PORTCR(155, 0xE605309B), /* PORT155CR */
+ PORTCR(156, 0xE605009C), /* PORT156CR */
+ PORTCR(157, 0xE605009D), /* PORT157CR */
+ PORTCR(158, 0xE605009E), /* PORT158CR */
+ PORTCR(159, 0xE605009F), /* PORT159CR */
+ PORTCR(160, 0xE60500A0), /* PORT160CR */
+ PORTCR(161, 0xE60500A1), /* PORT161CR */
+ PORTCR(162, 0xE60500A2), /* PORT162CR */
+ PORTCR(163, 0xE60500A3), /* PORT163CR */
+ PORTCR(164, 0xE60500A4), /* PORT164CR */
+ PORTCR(165, 0xE60500A5), /* PORT165CR */
+ PORTCR(166, 0xE60500A6), /* PORT166CR */
+ PORTCR(167, 0xE60520A7), /* PORT167CR */
+ PORTCR(168, 0xE60520A8), /* PORT168CR */
+ PORTCR(169, 0xE60520A9), /* PORT169CR */
+ PORTCR(170, 0xE60520AA), /* PORT170CR */
+ PORTCR(171, 0xE60520AB), /* PORT171CR */
+ PORTCR(172, 0xE60520AC), /* PORT172CR */
+ PORTCR(173, 0xE60520AD), /* PORT173CR */
+ PORTCR(174, 0xE60520AE), /* PORT174CR */
+ PORTCR(175, 0xE60520AF), /* PORT175CR */
+ PORTCR(176, 0xE60520B0), /* PORT176CR */
+ PORTCR(177, 0xE60520B1), /* PORT177CR */
+ PORTCR(178, 0xE60520B2), /* PORT178CR */
+ PORTCR(179, 0xE60520B3), /* PORT179CR */
+ PORTCR(180, 0xE60520B4), /* PORT180CR */
+ PORTCR(181, 0xE60520B5), /* PORT181CR */
+ PORTCR(182, 0xE60520B6), /* PORT182CR */
+ PORTCR(183, 0xE60520B7), /* PORT183CR */
+ PORTCR(184, 0xE60520B8), /* PORT184CR */
+ PORTCR(185, 0xE60520B9), /* PORT185CR */
+ PORTCR(186, 0xE60520BA), /* PORT186CR */
+ PORTCR(187, 0xE60520BB), /* PORT187CR */
+ PORTCR(188, 0xE60520BC), /* PORT188CR */
+ PORTCR(189, 0xE60520BD), /* PORT189CR */
+ PORTCR(190, 0xE60520BE), /* PORT190CR */
+
+ { PINMUX_CFG_REG("MSEL1CR", 0xE605800C, 32, 1) {
+ MSEL1CR_31_0, MSEL1CR_31_1,
+ MSEL1CR_30_0, MSEL1CR_30_1,
+ MSEL1CR_29_0, MSEL1CR_29_1,
+ MSEL1CR_28_0, MSEL1CR_28_1,
+ MSEL1CR_27_0, MSEL1CR_27_1,
+ MSEL1CR_26_0, MSEL1CR_26_1,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0,
+ MSEL1CR_16_0, MSEL1CR_16_1,
+ MSEL1CR_15_0, MSEL1CR_15_1,
+ MSEL1CR_14_0, MSEL1CR_14_1,
+ MSEL1CR_13_0, MSEL1CR_13_1,
+ MSEL1CR_12_0, MSEL1CR_12_1,
+ 0, 0, 0, 0,
+ MSEL1CR_9_0, MSEL1CR_9_1,
+ MSEL1CR_8_0, MSEL1CR_8_1,
+ MSEL1CR_7_0, MSEL1CR_7_1,
+ MSEL1CR_6_0, MSEL1CR_6_1,
+ 0, 0,
+ MSEL1CR_4_0, MSEL1CR_4_1,
+ MSEL1CR_3_0, MSEL1CR_3_1,
+ MSEL1CR_2_0, MSEL1CR_2_1,
+ 0, 0,
+ MSEL1CR_0_0, MSEL1CR_0_1,
+ }
+ },
+ { PINMUX_CFG_REG("MSEL3CR", 0xE6058020, 32, 1) {
+ 0, 0, 0, 0,
+ 0, 0, 0, 0,
+ MSEL3CR_27_0, MSEL3CR_27_1,
+ MSEL3CR_26_0, MSEL3CR_26_1,
+ 0, 0, 0, 0,
+ 0, 0, 0, 0,
+ MSEL3CR_21_0, MSEL3CR_21_1,
+ MSEL3CR_20_0, MSEL3CR_20_1,
+ 0, 0, 0, 0,
+ 0, 0, 0, 0,
+ MSEL3CR_15_0, MSEL3CR_15_1,
+ 0, 0, 0, 0,
+ 0, 0, 0, 0,
+ 0, 0,
+ MSEL3CR_9_0, MSEL3CR_9_1,
+ 0, 0, 0, 0,
+ MSEL3CR_6_0, MSEL3CR_6_1,
+ 0, 0, 0, 0,
+ 0, 0, 0, 0,
+ 0, 0, 0, 0,
+ }
+ },
+ { PINMUX_CFG_REG("MSEL4CR", 0xE6058024, 32, 1) {
+ 0, 0, 0, 0,
+ 0, 0, 0, 0,
+ 0, 0, 0, 0,
+ 0, 0, 0, 0,
+ 0, 0, 0, 0,
+ 0, 0, 0, 0,
+ MSEL4CR_19_0, MSEL4CR_19_1,
+ MSEL4CR_18_0, MSEL4CR_18_1,
+ MSEL4CR_17_0, MSEL4CR_17_1,
+ MSEL4CR_16_0, MSEL4CR_16_1,
+ MSEL4CR_15_0, MSEL4CR_15_1,
+ MSEL4CR_14_0, MSEL4CR_14_1,
+ 0, 0, 0, 0,
+ 0, 0,
+ MSEL4CR_10_0, MSEL4CR_10_1,
+ 0, 0, 0, 0,
+ 0, 0,
+ MSEL4CR_6_0, MSEL4CR_6_1,
+ 0, 0,
+ MSEL4CR_4_0, MSEL4CR_4_1,
+ 0, 0, 0, 0,
+ MSEL4CR_1_0, MSEL4CR_1_1,
+ 0, 0,
+ }
+ },
+ { },
+};
+
+static struct pinmux_data_reg pinmux_data_regs[] = {
+ { PINMUX_DATA_REG("PORTL095_064DR", 0xE6054008, 32) {
+ PORT95_DATA, PORT94_DATA, PORT93_DATA, PORT92_DATA,
+ PORT91_DATA, PORT90_DATA, PORT89_DATA, PORT88_DATA,
+ PORT87_DATA, PORT86_DATA, PORT85_DATA, PORT84_DATA,
+ PORT83_DATA, PORT82_DATA, PORT81_DATA, PORT80_DATA,
+ PORT79_DATA, PORT78_DATA, PORT77_DATA, PORT76_DATA,
+ 0, 0, 0, 0,
+ 0, 0, 0, 0,
+ 0, 0, 0, 0,
+ }
+ },
+ { PINMUX_DATA_REG("PORTL127_096DR", 0xE605400C, 32) {
+ PORT127_DATA, PORT126_DATA, PORT125_DATA, PORT124_DATA,
+ PORT123_DATA, PORT122_DATA, PORT121_DATA, 0,
+ 0, 0, 0, 0,
+ 0, 0, 0, 0,
+ 0, 0, 0, 0,
+ 0, 0, 0, 0,
+ 0, 0, 0, 0,
+ PORT99_DATA, PORT98_DATA, PORT97_DATA, PORT96_DATA,
+ }
+ },
+ { PINMUX_DATA_REG("PORTL159_128DR", 0xE6054010, 32) {
+ PORT159_DATA, PORT158_DATA, PORT157_DATA, PORT156_DATA,
+ 0, 0, 0, 0,
+ PORT151_DATA, PORT150_DATA, PORT149_DATA, PORT148_DATA,
+ PORT147_DATA, PORT146_DATA, PORT145_DATA, PORT144_DATA,
+ PORT143_DATA, PORT142_DATA, PORT141_DATA, PORT140_DATA,
+ PORT139_DATA, PORT138_DATA, PORT137_DATA, PORT136_DATA,
+ PORT135_DATA, PORT134_DATA, PORT133_DATA, PORT132_DATA,
+ PORT131_DATA, PORT130_DATA, PORT129_DATA, PORT128_DATA,
+ }
+ },
+ { PINMUX_DATA_REG("PORTL191_160DR", 0xE6054014, 32) {
+ 0, 0, 0, 0,
+ 0, 0, 0, 0,
+ 0, 0, 0, 0,
+ 0, 0, 0, 0,
+ 0, 0, 0, 0,
+ 0, 0, 0, 0,
+ 0, PORT166_DATA, PORT165_DATA, PORT164_DATA,
+ PORT163_DATA, PORT162_DATA, PORT161_DATA, PORT160_DATA,
+ }
+ },
+ { PINMUX_DATA_REG("PORTD031_000DR", 0xE6055000, 32) {
+ PORT31_DATA, PORT30_DATA, PORT29_DATA, PORT28_DATA,
+ PORT27_DATA, PORT26_DATA, PORT25_DATA, PORT24_DATA,
+ PORT23_DATA, PORT22_DATA, PORT21_DATA, PORT20_DATA,
+ PORT19_DATA, PORT18_DATA, PORT17_DATA, PORT16_DATA,
+ PORT15_DATA, PORT14_DATA, PORT13_DATA, PORT12_DATA,
+ PORT11_DATA, PORT10_DATA, PORT9_DATA, PORT8_DATA,
+ PORT7_DATA, PORT6_DATA, PORT5_DATA, PORT4_DATA,
+ PORT3_DATA, PORT2_DATA, PORT1_DATA, PORT0_DATA,
+ }
+ },
+ { PINMUX_DATA_REG("PORTD063_032DR", 0xE6055004, 32) {
+ 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, PORT45_DATA, PORT44_DATA,
+ PORT43_DATA, PORT42_DATA, PORT41_DATA, PORT40_DATA,
+ PORT39_DATA, PORT38_DATA, PORT37_DATA, PORT36_DATA,
+ PORT35_DATA, PORT34_DATA, PORT33_DATA, PORT32_DATA,
+ }
+ },
+ { PINMUX_DATA_REG("PORTR063_032DR", 0xE6056004, 32) {
+ PORT63_DATA, PORT62_DATA, PORT61_DATA, PORT60_DATA,
+ PORT59_DATA, PORT58_DATA, PORT57_DATA, PORT56_DATA,
+ PORT55_DATA, PORT54_DATA, PORT53_DATA, PORT52_DATA,
+ PORT51_DATA, PORT50_DATA, PORT49_DATA, PORT48_DATA,
+ PORT47_DATA, PORT46_DATA, 0, 0,
+ 0, 0, 0, 0,
+ 0, 0, 0, 0,
+ 0, 0, 0, 0,
+ }
+ },
+ { PINMUX_DATA_REG("PORTR095_064DR", 0xE6056008, 32) {
+ 0, 0, 0, 0,
+ 0, 0, 0, 0,
+ 0, 0, 0, 0,
+ 0, 0, 0, 0,
+ 0, 0, 0, 0,
+ PORT75_DATA, PORT74_DATA, PORT73_DATA, PORT72_DATA,
+ PORT71_DATA, PORT70_DATA, PORT69_DATA, PORT68_DATA,
+ PORT67_DATA, PORT66_DATA, PORT65_DATA, PORT64_DATA,
+ }
+ },
+ { PINMUX_DATA_REG("PORTR191_160DR", 0xE6056014, 32) {
+ 0, PORT190_DATA, PORT189_DATA, PORT188_DATA,
+ PORT187_DATA, PORT186_DATA, PORT185_DATA, PORT184_DATA,
+ PORT183_DATA, PORT182_DATA, PORT181_DATA, PORT180_DATA,
+ PORT179_DATA, PORT178_DATA, PORT177_DATA, PORT176_DATA,
+ PORT175_DATA, PORT174_DATA, PORT173_DATA, PORT172_DATA,
+ PORT171_DATA, PORT170_DATA, PORT169_DATA, PORT168_DATA,
+ PORT167_DATA, 0, 0, 0,
+ 0, 0, 0, 0,
+ }
+ },
+ { PINMUX_DATA_REG("PORTU127_096DR", 0xE605700C, 32) {
+ 0, 0, 0, 0,
+ 0, 0, 0, PORT120_DATA,
+ PORT119_DATA, PORT118_DATA, PORT117_DATA, PORT116_DATA,
+ PORT115_DATA, PORT114_DATA, PORT113_DATA, PORT112_DATA,
+ PORT111_DATA, PORT110_DATA, PORT109_DATA, PORT108_DATA,
+ PORT107_DATA, PORT106_DATA, PORT105_DATA, PORT104_DATA,
+ PORT103_DATA, PORT102_DATA, PORT101_DATA, PORT100_DATA,
+ 0, 0, 0, 0,
+ }
+ },
+ { PINMUX_DATA_REG("PORTU159_128DR", 0xE6057010, 32) {
+ 0, 0, 0, 0,
+ PORT155_DATA, PORT154_DATA, PORT153_DATA, PORT152_DATA,
+ 0, 0, 0, 0,
+ 0, 0, 0, 0,
+ 0, 0, 0, 0,
+ 0, 0, 0, 0,
+ 0, 0, 0, 0,
+ 0, 0, 0, 0,
+ }
+ },
+ { },
+};
+
+#define EXT_IRQ16L(n) evt2irq(0x200 + ((n) << 5))
+#define EXT_IRQ16H(n) evt2irq(0x3200 + (((n) - 16) << 5))
+static struct pinmux_irq pinmux_irqs[] = {
+ PINMUX_IRQ(EXT_IRQ16L(0), PORT6_FN0, PORT162_FN0),
+ PINMUX_IRQ(EXT_IRQ16L(1), PORT12_FN0),
+ PINMUX_IRQ(EXT_IRQ16L(2), PORT4_FN0, PORT5_FN0),
+ PINMUX_IRQ(EXT_IRQ16L(3), PORT8_FN0, PORT16_FN0),
+ PINMUX_IRQ(EXT_IRQ16L(4), PORT17_FN0, PORT163_FN0),
+ PINMUX_IRQ(EXT_IRQ16L(5), PORT18_FN0),
+ PINMUX_IRQ(EXT_IRQ16L(6), PORT39_FN0, PORT164_FN0),
+ PINMUX_IRQ(EXT_IRQ16L(7), PORT40_FN0, PORT167_FN0),
+ PINMUX_IRQ(EXT_IRQ16L(8), PORT41_FN0, PORT168_FN0),
+ PINMUX_IRQ(EXT_IRQ16L(9), PORT42_FN0, PORT169_FN0),
+ PINMUX_IRQ(EXT_IRQ16L(10), PORT65_FN0),
+ PINMUX_IRQ(EXT_IRQ16L(11), PORT67_FN0),
+ PINMUX_IRQ(EXT_IRQ16L(12), PORT80_FN0, PORT137_FN0),
+ PINMUX_IRQ(EXT_IRQ16L(13), PORT81_FN0, PORT145_FN0),
+ PINMUX_IRQ(EXT_IRQ16L(14), PORT82_FN0, PORT146_FN0),
+ PINMUX_IRQ(EXT_IRQ16L(15), PORT83_FN0, PORT147_FN0),
+ PINMUX_IRQ(EXT_IRQ16H(16), PORT84_FN0, PORT170_FN0),
+ PINMUX_IRQ(EXT_IRQ16H(17), PORT85_FN0),
+ PINMUX_IRQ(EXT_IRQ16H(18), PORT86_FN0),
+ PINMUX_IRQ(EXT_IRQ16H(19), PORT87_FN0),
+ PINMUX_IRQ(EXT_IRQ16H(20), PORT92_FN0),
+ PINMUX_IRQ(EXT_IRQ16H(21), PORT93_FN0),
+ PINMUX_IRQ(EXT_IRQ16H(22), PORT94_FN0),
+ PINMUX_IRQ(EXT_IRQ16H(23), PORT95_FN0),
+ PINMUX_IRQ(EXT_IRQ16H(24), PORT112_FN0),
+ PINMUX_IRQ(EXT_IRQ16H(25), PORT119_FN0),
+ PINMUX_IRQ(EXT_IRQ16H(26), PORT121_FN0, PORT172_FN0),
+ PINMUX_IRQ(EXT_IRQ16H(27), PORT122_FN0, PORT180_FN0),
+ PINMUX_IRQ(EXT_IRQ16H(28), PORT123_FN0, PORT181_FN0),
+ PINMUX_IRQ(EXT_IRQ16H(29), PORT129_FN0, PORT182_FN0),
+ PINMUX_IRQ(EXT_IRQ16H(30), PORT130_FN0, PORT183_FN0),
+ PINMUX_IRQ(EXT_IRQ16H(31), PORT138_FN0, PORT184_FN0),
+};
+
+struct sh_pfc_soc_info sh7372_pinmux_info = {
+ .name = "sh7372_pfc",
+ .reserved_id = PINMUX_RESERVED,
+ .data = { PINMUX_DATA_BEGIN, PINMUX_DATA_END },
+ .input = { PINMUX_INPUT_BEGIN, PINMUX_INPUT_END },
+ .input_pu = { PINMUX_INPUT_PULLUP_BEGIN, PINMUX_INPUT_PULLUP_END },
+ .input_pd = { PINMUX_INPUT_PULLDOWN_BEGIN, PINMUX_INPUT_PULLDOWN_END },
+ .output = { PINMUX_OUTPUT_BEGIN, PINMUX_OUTPUT_END },
+ .mark = { PINMUX_MARK_BEGIN, PINMUX_MARK_END },
+ .function = { PINMUX_FUNCTION_BEGIN, PINMUX_FUNCTION_END },
+
+ .first_gpio = GPIO_PORT0,
+ .last_gpio = GPIO_FN_SDENC_DV_CLKI,
+
+ .gpios = pinmux_gpios,
+ .cfg_regs = pinmux_config_regs,
+ .data_regs = pinmux_data_regs,
+
+ .gpio_data = pinmux_data,
+ .gpio_data_size = ARRAY_SIZE(pinmux_data),
+
+ .gpio_irq = pinmux_irqs,
+ .gpio_irq_size = ARRAY_SIZE(pinmux_irqs),
+};
diff --git a/drivers/pinctrl/sh-pfc/pfc-sh73a0.c b/drivers/pinctrl/sh-pfc/pfc-sh73a0.c
new file mode 100644
index 000000000000..709008e94124
--- /dev/null
+++ b/drivers/pinctrl/sh-pfc/pfc-sh73a0.c
@@ -0,0 +1,2798 @@
+/*
+ * sh73a0 processor support - PFC hardware block
+ *
+ * Copyright (C) 2010 Renesas Solutions Corp.
+ * Copyright (C) 2010 NISHIMOTO Hiroki
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; version 2 of the
+ * License.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+#include <linux/kernel.h>
+#include <mach/sh73a0.h>
+#include <mach/irqs.h>
+
+#include "sh_pfc.h"
+
+#define CPU_ALL_PORT(fn, pfx, sfx) \
+ PORT_10(fn, pfx, sfx), PORT_10(fn, pfx##1, sfx), \
+ PORT_10(fn, pfx##2, sfx), PORT_10(fn, pfx##3, sfx), \
+ PORT_10(fn, pfx##4, sfx), PORT_10(fn, pfx##5, sfx), \
+ PORT_10(fn, pfx##6, sfx), PORT_10(fn, pfx##7, sfx), \
+ PORT_10(fn, pfx##8, sfx), PORT_10(fn, pfx##9, sfx), \
+ PORT_10(fn, pfx##10, sfx), \
+ PORT_1(fn, pfx##110, sfx), PORT_1(fn, pfx##111, sfx), \
+ PORT_1(fn, pfx##112, sfx), PORT_1(fn, pfx##113, sfx), \
+ PORT_1(fn, pfx##114, sfx), PORT_1(fn, pfx##115, sfx), \
+ PORT_1(fn, pfx##116, sfx), PORT_1(fn, pfx##117, sfx), \
+ PORT_1(fn, pfx##118, sfx), \
+ PORT_1(fn, pfx##128, sfx), PORT_1(fn, pfx##129, sfx), \
+ PORT_10(fn, pfx##13, sfx), PORT_10(fn, pfx##14, sfx), \
+ PORT_10(fn, pfx##15, sfx), \
+ PORT_1(fn, pfx##160, sfx), PORT_1(fn, pfx##161, sfx), \
+ PORT_1(fn, pfx##162, sfx), PORT_1(fn, pfx##163, sfx), \
+ PORT_1(fn, pfx##164, sfx), \
+ PORT_1(fn, pfx##192, sfx), PORT_1(fn, pfx##193, sfx), \
+ PORT_1(fn, pfx##194, sfx), PORT_1(fn, pfx##195, sfx), \
+ PORT_1(fn, pfx##196, sfx), PORT_1(fn, pfx##197, sfx), \
+ PORT_1(fn, pfx##198, sfx), PORT_1(fn, pfx##199, sfx), \
+ PORT_10(fn, pfx##20, sfx), PORT_10(fn, pfx##21, sfx), \
+ PORT_10(fn, pfx##22, sfx), PORT_10(fn, pfx##23, sfx), \
+ PORT_10(fn, pfx##24, sfx), PORT_10(fn, pfx##25, sfx), \
+ PORT_10(fn, pfx##26, sfx), PORT_10(fn, pfx##27, sfx), \
+ PORT_1(fn, pfx##280, sfx), PORT_1(fn, pfx##281, sfx), \
+ PORT_1(fn, pfx##282, sfx), \
+ PORT_1(fn, pfx##288, sfx), PORT_1(fn, pfx##289, sfx), \
+ PORT_10(fn, pfx##29, sfx), PORT_10(fn, pfx##30, sfx)
+
+enum {
+ PINMUX_RESERVED = 0,
+
+ PINMUX_DATA_BEGIN,
+ PORT_ALL(DATA), /* PORT0_DATA -> PORT309_DATA */
+ PINMUX_DATA_END,
+
+ PINMUX_INPUT_BEGIN,
+ PORT_ALL(IN), /* PORT0_IN -> PORT309_IN */
+ PINMUX_INPUT_END,
+
+ PINMUX_INPUT_PULLUP_BEGIN,
+ PORT_ALL(IN_PU), /* PORT0_IN_PU -> PORT309_IN_PU */
+ PINMUX_INPUT_PULLUP_END,
+
+ PINMUX_INPUT_PULLDOWN_BEGIN,
+ PORT_ALL(IN_PD), /* PORT0_IN_PD -> PORT309_IN_PD */
+ PINMUX_INPUT_PULLDOWN_END,
+
+ PINMUX_OUTPUT_BEGIN,
+ PORT_ALL(OUT), /* PORT0_OUT -> PORT309_OUT */
+ PINMUX_OUTPUT_END,
+
+ PINMUX_FUNCTION_BEGIN,
+ PORT_ALL(FN_IN), /* PORT0_FN_IN -> PORT309_FN_IN */
+ PORT_ALL(FN_OUT), /* PORT0_FN_OUT -> PORT309_FN_OUT */
+ PORT_ALL(FN0), /* PORT0_FN0 -> PORT309_FN0 */
+ PORT_ALL(FN1), /* PORT0_FN1 -> PORT309_FN1 */
+ PORT_ALL(FN2), /* PORT0_FN2 -> PORT309_FN2 */
+ PORT_ALL(FN3), /* PORT0_FN3 -> PORT309_FN3 */
+ PORT_ALL(FN4), /* PORT0_FN4 -> PORT309_FN4 */
+ PORT_ALL(FN5), /* PORT0_FN5 -> PORT309_FN5 */
+ PORT_ALL(FN6), /* PORT0_FN6 -> PORT309_FN6 */
+ PORT_ALL(FN7), /* PORT0_FN7 -> PORT309_FN7 */
+
+ MSEL2CR_MSEL19_0, MSEL2CR_MSEL19_1,
+ MSEL2CR_MSEL18_0, MSEL2CR_MSEL18_1,
+ MSEL2CR_MSEL17_0, MSEL2CR_MSEL17_1,
+ MSEL2CR_MSEL16_0, MSEL2CR_MSEL16_1,
+ MSEL2CR_MSEL14_0, MSEL2CR_MSEL14_1,
+ MSEL2CR_MSEL13_0, MSEL2CR_MSEL13_1,
+ MSEL2CR_MSEL12_0, MSEL2CR_MSEL12_1,
+ MSEL2CR_MSEL11_0, MSEL2CR_MSEL11_1,
+ MSEL2CR_MSEL10_0, MSEL2CR_MSEL10_1,
+ MSEL2CR_MSEL9_0, MSEL2CR_MSEL9_1,
+ MSEL2CR_MSEL8_0, MSEL2CR_MSEL8_1,
+ MSEL2CR_MSEL7_0, MSEL2CR_MSEL7_1,
+ MSEL2CR_MSEL6_0, MSEL2CR_MSEL6_1,
+ MSEL2CR_MSEL4_0, MSEL2CR_MSEL4_1,
+ MSEL2CR_MSEL5_0, MSEL2CR_MSEL5_1,
+ MSEL2CR_MSEL3_0, MSEL2CR_MSEL3_1,
+ MSEL2CR_MSEL2_0, MSEL2CR_MSEL2_1,
+ MSEL2CR_MSEL1_0, MSEL2CR_MSEL1_1,
+ MSEL2CR_MSEL0_0, MSEL2CR_MSEL0_1,
+ MSEL3CR_MSEL28_0, MSEL3CR_MSEL28_1,
+ MSEL3CR_MSEL15_0, MSEL3CR_MSEL15_1,
+ MSEL3CR_MSEL11_0, MSEL3CR_MSEL11_1,
+ MSEL3CR_MSEL9_0, MSEL3CR_MSEL9_1,
+ MSEL3CR_MSEL6_0, MSEL3CR_MSEL6_1,
+ MSEL3CR_MSEL2_0, MSEL3CR_MSEL2_1,
+ MSEL4CR_MSEL29_0, MSEL4CR_MSEL29_1,
+ MSEL4CR_MSEL27_0, MSEL4CR_MSEL27_1,
+ MSEL4CR_MSEL26_0, MSEL4CR_MSEL26_1,
+ MSEL4CR_MSEL22_0, MSEL4CR_MSEL22_1,
+ MSEL4CR_MSEL21_0, MSEL4CR_MSEL21_1,
+ MSEL4CR_MSEL20_0, MSEL4CR_MSEL20_1,
+ MSEL4CR_MSEL19_0, MSEL4CR_MSEL19_1,
+ MSEL4CR_MSEL15_0, MSEL4CR_MSEL15_1,
+ MSEL4CR_MSEL13_0, MSEL4CR_MSEL13_1,
+ MSEL4CR_MSEL12_0, MSEL4CR_MSEL12_1,
+ MSEL4CR_MSEL11_0, MSEL4CR_MSEL11_1,
+ MSEL4CR_MSEL10_0, MSEL4CR_MSEL10_1,
+ MSEL4CR_MSEL9_0, MSEL4CR_MSEL9_1,
+ MSEL4CR_MSEL8_0, MSEL4CR_MSEL8_1,
+ MSEL4CR_MSEL7_0, MSEL4CR_MSEL7_1,
+ MSEL4CR_MSEL4_0, MSEL4CR_MSEL4_1,
+ MSEL4CR_MSEL1_0, MSEL4CR_MSEL1_1,
+ PINMUX_FUNCTION_END,
+
+ PINMUX_MARK_BEGIN,
+ /* Hardware manual Table 25-1 (Function 0-7) */
+ VBUS_0_MARK,
+ GPI0_MARK,
+ GPI1_MARK,
+ GPI2_MARK,
+ GPI3_MARK,
+ GPI4_MARK,
+ GPI5_MARK,
+ GPI6_MARK,
+ GPI7_MARK,
+ SCIFA7_RXD_MARK,
+ SCIFA7_CTS__MARK,
+ GPO7_MARK, MFG0_OUT2_MARK,
+ GPO6_MARK, MFG1_OUT2_MARK,
+ GPO5_MARK, SCIFA0_SCK_MARK, FSICOSLDT3_MARK, PORT16_VIO_CKOR_MARK,
+ SCIFA0_TXD_MARK,
+ SCIFA7_TXD_MARK,
+ SCIFA7_RTS__MARK, PORT19_VIO_CKO2_MARK,
+ GPO0_MARK,
+ GPO1_MARK,
+ GPO2_MARK, STATUS0_MARK,
+ GPO3_MARK, STATUS1_MARK,
+ GPO4_MARK, STATUS2_MARK,
+ VINT_MARK,
+ TCKON_MARK,
+ XDVFS1_MARK, PORT27_I2C_SCL2_MARK, PORT27_I2C_SCL3_MARK, \
+ MFG0_OUT1_MARK, PORT27_IROUT_MARK,
+ XDVFS2_MARK, PORT28_I2C_SDA2_MARK, PORT28_I2C_SDA3_MARK, \
+ PORT28_TPU1TO1_MARK,
+ SIM_RST_MARK, PORT29_TPU1TO1_MARK,
+ SIM_CLK_MARK, PORT30_VIO_CKOR_MARK,
+ SIM_D_MARK, PORT31_IROUT_MARK,
+ SCIFA4_TXD_MARK,
+ SCIFA4_RXD_MARK, XWUP_MARK,
+ SCIFA4_RTS__MARK,
+ SCIFA4_CTS__MARK,
+ FSIBOBT_MARK, FSIBIBT_MARK,
+ FSIBOLR_MARK, FSIBILR_MARK,
+ FSIBOSLD_MARK,
+ FSIBISLD_MARK,
+ VACK_MARK,
+ XTAL1L_MARK,
+ SCIFA0_RTS__MARK, FSICOSLDT2_MARK,
+ SCIFA0_RXD_MARK,
+ SCIFA0_CTS__MARK, FSICOSLDT1_MARK,
+ FSICOBT_MARK, FSICIBT_MARK, FSIDOBT_MARK, FSIDIBT_MARK,
+ FSICOLR_MARK, FSICILR_MARK, FSIDOLR_MARK, FSIDILR_MARK,
+ FSICOSLD_MARK, PORT47_FSICSPDIF_MARK,
+ FSICISLD_MARK, FSIDISLD_MARK,
+ FSIACK_MARK, PORT49_IRDA_OUT_MARK, PORT49_IROUT_MARK, FSIAOMC_MARK,
+ FSIAOLR_MARK, BBIF2_TSYNC2_MARK, TPU2TO2_MARK, FSIAILR_MARK,
+
+ FSIAOBT_MARK, BBIF2_TSCK2_MARK, TPU2TO3_MARK, FSIAIBT_MARK,
+ FSIAOSLD_MARK, BBIF2_TXD2_MARK,
+ FSIASPDIF_MARK, PORT53_IRDA_IN_MARK, TPU3TO3_MARK, FSIBSPDIF_MARK, \
+ PORT53_FSICSPDIF_MARK,
+ FSIBCK_MARK, PORT54_IRDA_FIRSEL_MARK, TPU3TO2_MARK, FSIBOMC_MARK, \
+ FSICCK_MARK, FSICOMC_MARK,
+ FSIAISLD_MARK, TPU0TO0_MARK,
+ A0_MARK, BS__MARK,
+ A12_MARK, PORT58_KEYOUT7_MARK, TPU4TO2_MARK,
+ A13_MARK, PORT59_KEYOUT6_MARK, TPU0TO1_MARK,
+ A14_MARK, KEYOUT5_MARK,
+ A15_MARK, KEYOUT4_MARK,
+ A16_MARK, KEYOUT3_MARK, MSIOF0_SS1_MARK,
+ A17_MARK, KEYOUT2_MARK, MSIOF0_TSYNC_MARK,
+ A18_MARK, KEYOUT1_MARK, MSIOF0_TSCK_MARK,
+ A19_MARK, KEYOUT0_MARK, MSIOF0_TXD_MARK,
+ A20_MARK, KEYIN0_MARK, MSIOF0_RSCK_MARK,
+ A21_MARK, KEYIN1_MARK, MSIOF0_RSYNC_MARK,
+ A22_MARK, KEYIN2_MARK, MSIOF0_MCK0_MARK,
+ A23_MARK, KEYIN3_MARK, MSIOF0_MCK1_MARK,
+ A24_MARK, KEYIN4_MARK, MSIOF0_RXD_MARK,
+ A25_MARK, KEYIN5_MARK, MSIOF0_SS2_MARK,
+ A26_MARK, KEYIN6_MARK,
+ KEYIN7_MARK,
+ D0_NAF0_MARK,
+ D1_NAF1_MARK,
+ D2_NAF2_MARK,
+ D3_NAF3_MARK,
+ D4_NAF4_MARK,
+ D5_NAF5_MARK,
+ D6_NAF6_MARK,
+ D7_NAF7_MARK,
+ D8_NAF8_MARK,
+ D9_NAF9_MARK,
+ D10_NAF10_MARK,
+ D11_NAF11_MARK,
+ D12_NAF12_MARK,
+ D13_NAF13_MARK,
+ D14_NAF14_MARK,
+ D15_NAF15_MARK,
+ CS4__MARK,
+ CS5A__MARK, PORT91_RDWR_MARK,
+ CS5B__MARK, FCE1__MARK,
+ CS6B__MARK, DACK0_MARK,
+ FCE0__MARK, CS6A__MARK,
+ WAIT__MARK, DREQ0_MARK,
+ RD__FSC_MARK,
+ WE0__FWE_MARK, RDWR_FWE_MARK,
+ WE1__MARK,
+ FRB_MARK,
+ CKO_MARK,
+ NBRSTOUT__MARK,
+ NBRST__MARK,
+ BBIF2_TXD_MARK,
+ BBIF2_RXD_MARK,
+ BBIF2_SYNC_MARK,
+ BBIF2_SCK_MARK,
+ SCIFA3_CTS__MARK, MFG3_IN2_MARK,
+ SCIFA3_RXD_MARK, MFG3_IN1_MARK,
+ BBIF1_SS2_MARK, SCIFA3_RTS__MARK, MFG3_OUT1_MARK,
+ SCIFA3_TXD_MARK,
+ HSI_RX_DATA_MARK, BBIF1_RXD_MARK,
+ HSI_TX_WAKE_MARK, BBIF1_TSCK_MARK,
+ HSI_TX_DATA_MARK, BBIF1_TSYNC_MARK,
+ HSI_TX_READY_MARK, BBIF1_TXD_MARK,
+ HSI_RX_READY_MARK, BBIF1_RSCK_MARK, PORT115_I2C_SCL2_MARK, \
+ PORT115_I2C_SCL3_MARK,
+ HSI_RX_WAKE_MARK, BBIF1_RSYNC_MARK, PORT116_I2C_SDA2_MARK, \
+ PORT116_I2C_SDA3_MARK,
+ HSI_RX_FLAG_MARK, BBIF1_SS1_MARK, BBIF1_FLOW_MARK,
+ HSI_TX_FLAG_MARK,
+ VIO_VD_MARK, PORT128_LCD2VSYN_MARK, VIO2_VD_MARK, LCD2D0_MARK,
+
+ VIO_HD_MARK, PORT129_LCD2HSYN_MARK, PORT129_LCD2CS__MARK, \
+ VIO2_HD_MARK, LCD2D1_MARK,
+ VIO_D0_MARK, PORT130_MSIOF2_RXD_MARK, LCD2D10_MARK,
+ VIO_D1_MARK, PORT131_KEYOUT6_MARK, PORT131_MSIOF2_SS1_MARK, \
+ PORT131_KEYOUT11_MARK, LCD2D11_MARK,
+ VIO_D2_MARK, PORT132_KEYOUT7_MARK, PORT132_MSIOF2_SS2_MARK, \
+ PORT132_KEYOUT10_MARK, LCD2D12_MARK,
+ VIO_D3_MARK, MSIOF2_TSYNC_MARK, LCD2D13_MARK,
+ VIO_D4_MARK, MSIOF2_TXD_MARK, LCD2D14_MARK,
+ VIO_D5_MARK, MSIOF2_TSCK_MARK, LCD2D15_MARK,
+ VIO_D6_MARK, PORT136_KEYOUT8_MARK, LCD2D16_MARK,
+ VIO_D7_MARK, PORT137_KEYOUT9_MARK, LCD2D17_MARK,
+ VIO_D8_MARK, PORT138_KEYOUT8_MARK, VIO2_D0_MARK, LCD2D6_MARK,
+ VIO_D9_MARK, PORT139_KEYOUT9_MARK, VIO2_D1_MARK, LCD2D7_MARK,
+ VIO_D10_MARK, TPU0TO2_MARK, VIO2_D2_MARK, LCD2D8_MARK,
+ VIO_D11_MARK, TPU0TO3_MARK, VIO2_D3_MARK, LCD2D9_MARK,
+ VIO_D12_MARK, PORT142_KEYOUT10_MARK, VIO2_D4_MARK, LCD2D2_MARK,
+ VIO_D13_MARK, PORT143_KEYOUT11_MARK, PORT143_KEYOUT6_MARK, \
+ VIO2_D5_MARK, LCD2D3_MARK,
+ VIO_D14_MARK, PORT144_KEYOUT7_MARK, VIO2_D6_MARK, LCD2D4_MARK,
+ VIO_D15_MARK, TPU1TO3_MARK, PORT145_LCD2DISP_MARK, \
+ PORT145_LCD2RS_MARK, VIO2_D7_MARK, LCD2D5_MARK,
+ VIO_CLK_MARK, LCD2DCK_MARK, PORT146_LCD2WR__MARK, VIO2_CLK_MARK, \
+ LCD2D18_MARK,
+ VIO_FIELD_MARK, LCD2RD__MARK, VIO2_FIELD_MARK, LCD2D19_MARK,
+ VIO_CKO_MARK,
+ A27_MARK, PORT149_RDWR_MARK, MFG0_IN1_MARK, PORT149_KEYOUT9_MARK,
+ MFG0_IN2_MARK,
+ TS_SPSYNC3_MARK, MSIOF2_RSCK_MARK,
+ TS_SDAT3_MARK, MSIOF2_RSYNC_MARK,
+ TPU1TO2_MARK, TS_SDEN3_MARK, PORT153_MSIOF2_SS1_MARK,
+ SCIFA2_TXD1_MARK, MSIOF2_MCK0_MARK,
+ SCIFA2_RXD1_MARK, MSIOF2_MCK1_MARK,
+ SCIFA2_RTS1__MARK, PORT156_MSIOF2_SS2_MARK,
+ SCIFA2_CTS1__MARK, PORT157_MSIOF2_RXD_MARK,
+ DINT__MARK, SCIFA2_SCK1_MARK, TS_SCK3_MARK,
+ PORT159_SCIFB_SCK_MARK, PORT159_SCIFA5_SCK_MARK, NMI_MARK,
+ PORT160_SCIFB_TXD_MARK, PORT160_SCIFA5_TXD_MARK,
+ PORT161_SCIFB_CTS__MARK, PORT161_SCIFA5_CTS__MARK,
+ PORT162_SCIFB_RXD_MARK, PORT162_SCIFA5_RXD_MARK,
+ PORT163_SCIFB_RTS__MARK, PORT163_SCIFA5_RTS__MARK, TPU3TO0_MARK,
+ LCDD0_MARK,
+ LCDD1_MARK, PORT193_SCIFA5_CTS__MARK, BBIF2_TSYNC1_MARK,
+ LCDD2_MARK, PORT194_SCIFA5_RTS__MARK, BBIF2_TSCK1_MARK,
+ LCDD3_MARK, PORT195_SCIFA5_RXD_MARK, BBIF2_TXD1_MARK,
+ LCDD4_MARK, PORT196_SCIFA5_TXD_MARK,
+ LCDD5_MARK, PORT197_SCIFA5_SCK_MARK, MFG2_OUT2_MARK, TPU2TO1_MARK,
+ LCDD6_MARK,
+ LCDD7_MARK, TPU4TO1_MARK, MFG4_OUT2_MARK,
+ LCDD8_MARK, D16_MARK,
+ LCDD9_MARK, D17_MARK,
+ LCDD10_MARK, D18_MARK,
+ LCDD11_MARK, D19_MARK,
+ LCDD12_MARK, D20_MARK,
+ LCDD13_MARK, D21_MARK,
+ LCDD14_MARK, D22_MARK,
+ LCDD15_MARK, PORT207_MSIOF0L_SS1_MARK, D23_MARK,
+ LCDD16_MARK, PORT208_MSIOF0L_SS2_MARK, D24_MARK,
+ LCDD17_MARK, D25_MARK,
+ LCDD18_MARK, DREQ2_MARK, PORT210_MSIOF0L_SS1_MARK, D26_MARK,
+ LCDD19_MARK, PORT211_MSIOF0L_SS2_MARK, D27_MARK,
+ LCDD20_MARK, TS_SPSYNC1_MARK, MSIOF0L_MCK0_MARK, D28_MARK,
+ LCDD21_MARK, TS_SDAT1_MARK, MSIOF0L_MCK1_MARK, D29_MARK,
+ LCDD22_MARK, TS_SDEN1_MARK, MSIOF0L_RSCK_MARK, D30_MARK,
+ LCDD23_MARK, TS_SCK1_MARK, MSIOF0L_RSYNC_MARK, D31_MARK,
+ LCDDCK_MARK, LCDWR__MARK,
+ LCDRD__MARK, DACK2_MARK, PORT217_LCD2RS_MARK, MSIOF0L_TSYNC_MARK, \
+ VIO2_FIELD3_MARK, PORT217_LCD2DISP_MARK,
+ LCDHSYN_MARK, LCDCS__MARK, LCDCS2__MARK, DACK3_MARK, \
+ PORT218_VIO_CKOR_MARK,
+ LCDDISP_MARK, LCDRS_MARK, PORT219_LCD2WR__MARK, DREQ3_MARK, \
+ MSIOF0L_TSCK_MARK, VIO2_CLK3_MARK, LCD2DCK_2_MARK,
+ LCDVSYN_MARK, LCDVSYN2_MARK,
+ LCDLCLK_MARK, DREQ1_MARK, PORT221_LCD2CS__MARK, PWEN_MARK, \
+ MSIOF0L_RXD_MARK, VIO2_HD3_MARK, PORT221_LCD2HSYN_MARK,
+ LCDDON_MARK, LCDDON2_MARK, DACK1_MARK, OVCN_MARK, MSIOF0L_TXD_MARK, \
+ VIO2_VD3_MARK, PORT222_LCD2VSYN_MARK,
+
+ SCIFA1_TXD_MARK, OVCN2_MARK,
+ EXTLP_MARK, SCIFA1_SCK_MARK, PORT226_VIO_CKO2_MARK,
+ SCIFA1_RTS__MARK, IDIN_MARK,
+ SCIFA1_RXD_MARK,
+ SCIFA1_CTS__MARK, MFG1_IN1_MARK,
+ MSIOF1_TXD_MARK, SCIFA2_TXD2_MARK,
+ MSIOF1_TSYNC_MARK, SCIFA2_CTS2__MARK,
+ MSIOF1_TSCK_MARK, SCIFA2_SCK2_MARK,
+ MSIOF1_RXD_MARK, SCIFA2_RXD2_MARK,
+ MSIOF1_RSCK_MARK, SCIFA2_RTS2__MARK, VIO2_CLK2_MARK, LCD2D20_MARK,
+ MSIOF1_RSYNC_MARK, MFG1_IN2_MARK, VIO2_VD2_MARK, LCD2D21_MARK,
+ MSIOF1_MCK0_MARK, PORT236_I2C_SDA2_MARK,
+ MSIOF1_MCK1_MARK, PORT237_I2C_SCL2_MARK,
+ MSIOF1_SS1_MARK, VIO2_FIELD2_MARK, LCD2D22_MARK,
+ MSIOF1_SS2_MARK, VIO2_HD2_MARK, LCD2D23_MARK,
+ SCIFA6_TXD_MARK,
+ PORT241_IRDA_OUT_MARK, PORT241_IROUT_MARK, MFG4_OUT1_MARK, TPU4TO0_MARK,
+ PORT242_IRDA_IN_MARK, MFG4_IN2_MARK,
+ PORT243_IRDA_FIRSEL_MARK, PORT243_VIO_CKO2_MARK,
+ PORT244_SCIFA5_CTS__MARK, MFG2_IN1_MARK, PORT244_SCIFB_CTS__MARK, \
+ MSIOF2R_RXD_MARK,
+ PORT245_SCIFA5_RTS__MARK, MFG2_IN2_MARK, PORT245_SCIFB_RTS__MARK, \
+ MSIOF2R_TXD_MARK,
+ PORT246_SCIFA5_RXD_MARK, MFG1_OUT1_MARK, PORT246_SCIFB_RXD_MARK, \
+ TPU1TO0_MARK,
+ PORT247_SCIFA5_TXD_MARK, MFG3_OUT2_MARK, PORT247_SCIFB_TXD_MARK, \
+ TPU3TO1_MARK,
+ PORT248_SCIFA5_SCK_MARK, MFG2_OUT1_MARK, PORT248_SCIFB_SCK_MARK, \
+ TPU2TO0_MARK, PORT248_I2C_SCL3_MARK, MSIOF2R_TSCK_MARK,
+ PORT249_IROUT_MARK, MFG4_IN1_MARK, PORT249_I2C_SDA3_MARK, \
+ MSIOF2R_TSYNC_MARK,
+ SDHICLK0_MARK,
+ SDHICD0_MARK,
+ SDHID0_0_MARK,
+ SDHID0_1_MARK,
+ SDHID0_2_MARK,
+ SDHID0_3_MARK,
+ SDHICMD0_MARK,
+ SDHIWP0_MARK,
+ SDHICLK1_MARK,
+ SDHID1_0_MARK, TS_SPSYNC2_MARK,
+ SDHID1_1_MARK, TS_SDAT2_MARK,
+ SDHID1_2_MARK, TS_SDEN2_MARK,
+ SDHID1_3_MARK, TS_SCK2_MARK,
+ SDHICMD1_MARK,
+ SDHICLK2_MARK,
+ SDHID2_0_MARK, TS_SPSYNC4_MARK,
+ SDHID2_1_MARK, TS_SDAT4_MARK,
+ SDHID2_2_MARK, TS_SDEN4_MARK,
+ SDHID2_3_MARK, TS_SCK4_MARK,
+ SDHICMD2_MARK,
+ MMCCLK0_MARK,
+ MMCD0_0_MARK,
+ MMCD0_1_MARK,
+ MMCD0_2_MARK,
+ MMCD0_3_MARK,
+ MMCD0_4_MARK, TS_SPSYNC5_MARK,
+ MMCD0_5_MARK, TS_SDAT5_MARK,
+ MMCD0_6_MARK, TS_SDEN5_MARK,
+ MMCD0_7_MARK, TS_SCK5_MARK,
+ MMCCMD0_MARK,
+ RESETOUTS__MARK, EXTAL2OUT_MARK,
+ MCP_WAIT__MCP_FRB_MARK,
+ MCP_CKO_MARK, MMCCLK1_MARK,
+ MCP_D15_MCP_NAF15_MARK,
+ MCP_D14_MCP_NAF14_MARK,
+ MCP_D13_MCP_NAF13_MARK,
+ MCP_D12_MCP_NAF12_MARK,
+ MCP_D11_MCP_NAF11_MARK,
+ MCP_D10_MCP_NAF10_MARK,
+ MCP_D9_MCP_NAF9_MARK,
+ MCP_D8_MCP_NAF8_MARK, MMCCMD1_MARK,
+ MCP_D7_MCP_NAF7_MARK, MMCD1_7_MARK,
+
+ MCP_D6_MCP_NAF6_MARK, MMCD1_6_MARK,
+ MCP_D5_MCP_NAF5_MARK, MMCD1_5_MARK,
+ MCP_D4_MCP_NAF4_MARK, MMCD1_4_MARK,
+ MCP_D3_MCP_NAF3_MARK, MMCD1_3_MARK,
+ MCP_D2_MCP_NAF2_MARK, MMCD1_2_MARK,
+ MCP_D1_MCP_NAF1_MARK, MMCD1_1_MARK,
+ MCP_D0_MCP_NAF0_MARK, MMCD1_0_MARK,
+ MCP_NBRSTOUT__MARK,
+ MCP_WE0__MCP_FWE_MARK, MCP_RDWR_MCP_FWE_MARK,
+
+ /* MSEL2 special cases */
+ TSIF2_TS_XX1_MARK,
+ TSIF2_TS_XX2_MARK,
+ TSIF2_TS_XX3_MARK,
+ TSIF2_TS_XX4_MARK,
+ TSIF2_TS_XX5_MARK,
+ TSIF1_TS_XX1_MARK,
+ TSIF1_TS_XX2_MARK,
+ TSIF1_TS_XX3_MARK,
+ TSIF1_TS_XX4_MARK,
+ TSIF1_TS_XX5_MARK,
+ TSIF0_TS_XX1_MARK,
+ TSIF0_TS_XX2_MARK,
+ TSIF0_TS_XX3_MARK,
+ TSIF0_TS_XX4_MARK,
+ TSIF0_TS_XX5_MARK,
+ MST1_TS_XX1_MARK,
+ MST1_TS_XX2_MARK,
+ MST1_TS_XX3_MARK,
+ MST1_TS_XX4_MARK,
+ MST1_TS_XX5_MARK,
+ MST0_TS_XX1_MARK,
+ MST0_TS_XX2_MARK,
+ MST0_TS_XX3_MARK,
+ MST0_TS_XX4_MARK,
+ MST0_TS_XX5_MARK,
+
+ /* MSEL3 special cases */
+ SDHI0_VCCQ_MC0_ON_MARK,
+ SDHI0_VCCQ_MC0_OFF_MARK,
+ DEBUG_MON_VIO_MARK,
+ DEBUG_MON_LCDD_MARK,
+ LCDC_LCDC0_MARK,
+ LCDC_LCDC1_MARK,
+
+ /* MSEL4 special cases */
+ IRQ9_MEM_INT_MARK,
+ IRQ9_MCP_INT_MARK,
+ A11_MARK,
+ KEYOUT8_MARK,
+ TPU4TO3_MARK,
+ RESETA_N_PU_ON_MARK,
+ RESETA_N_PU_OFF_MARK,
+ EDBGREQ_PD_MARK,
+ EDBGREQ_PU_MARK,
+
+ /* Functions with pull-ups */
+ KEYIN0_PU_MARK,
+ KEYIN1_PU_MARK,
+ KEYIN2_PU_MARK,
+ KEYIN3_PU_MARK,
+ KEYIN4_PU_MARK,
+ KEYIN5_PU_MARK,
+ KEYIN6_PU_MARK,
+ KEYIN7_PU_MARK,
+ SDHICD0_PU_MARK,
+ SDHID0_0_PU_MARK,
+ SDHID0_1_PU_MARK,
+ SDHID0_2_PU_MARK,
+ SDHID0_3_PU_MARK,
+ SDHICMD0_PU_MARK,
+ SDHIWP0_PU_MARK,
+ SDHID1_0_PU_MARK,
+ SDHID1_1_PU_MARK,
+ SDHID1_2_PU_MARK,
+ SDHID1_3_PU_MARK,
+ SDHICMD1_PU_MARK,
+ SDHID2_0_PU_MARK,
+ SDHID2_1_PU_MARK,
+ SDHID2_2_PU_MARK,
+ SDHID2_3_PU_MARK,
+ SDHICMD2_PU_MARK,
+ MMCCMD0_PU_MARK,
+ MMCCMD1_PU_MARK,
+ MMCD0_0_PU_MARK,
+ MMCD0_1_PU_MARK,
+ MMCD0_2_PU_MARK,
+ MMCD0_3_PU_MARK,
+ MMCD0_4_PU_MARK,
+ MMCD0_5_PU_MARK,
+ MMCD0_6_PU_MARK,
+ MMCD0_7_PU_MARK,
+ FSIBISLD_PU_MARK,
+ FSIACK_PU_MARK,
+ FSIAILR_PU_MARK,
+ FSIAIBT_PU_MARK,
+ FSIAISLD_PU_MARK,
+
+ PINMUX_MARK_END,
+};
+
+static pinmux_enum_t pinmux_data[] = {
+ /* specify valid pin states for each pin in GPIO mode */
+
+ /* Table 25-1 (I/O and Pull U/D) */
+ PORT_DATA_I_PD(0),
+ PORT_DATA_I_PU(1),
+ PORT_DATA_I_PU(2),
+ PORT_DATA_I_PU(3),
+ PORT_DATA_I_PU(4),
+ PORT_DATA_I_PU(5),
+ PORT_DATA_I_PU(6),
+ PORT_DATA_I_PU(7),
+ PORT_DATA_I_PU(8),
+ PORT_DATA_I_PD(9),
+ PORT_DATA_I_PD(10),
+ PORT_DATA_I_PU_PD(11),
+ PORT_DATA_IO_PU_PD(12),
+ PORT_DATA_IO_PU_PD(13),
+ PORT_DATA_IO_PU_PD(14),
+ PORT_DATA_IO_PU_PD(15),
+ PORT_DATA_IO_PD(16),
+ PORT_DATA_IO_PD(17),
+ PORT_DATA_IO_PU(18),
+ PORT_DATA_IO_PU(19),
+ PORT_DATA_O(20),
+ PORT_DATA_O(21),
+ PORT_DATA_O(22),
+ PORT_DATA_O(23),
+ PORT_DATA_O(24),
+ PORT_DATA_I_PD(25),
+ PORT_DATA_I_PD(26),
+ PORT_DATA_IO_PU(27),
+ PORT_DATA_IO_PU(28),
+ PORT_DATA_IO_PD(29),
+ PORT_DATA_IO_PD(30),
+ PORT_DATA_IO_PU(31),
+ PORT_DATA_IO_PD(32),
+ PORT_DATA_I_PU_PD(33),
+ PORT_DATA_IO_PD(34),
+ PORT_DATA_I_PU_PD(35),
+ PORT_DATA_IO_PD(36),
+ PORT_DATA_IO(37),
+ PORT_DATA_O(38),
+ PORT_DATA_I_PU(39),
+ PORT_DATA_I_PU_PD(40),
+ PORT_DATA_O(41),
+ PORT_DATA_IO_PD(42),
+ PORT_DATA_IO_PU_PD(43),
+ PORT_DATA_IO_PU_PD(44),
+ PORT_DATA_IO_PD(45),
+ PORT_DATA_IO_PD(46),
+ PORT_DATA_IO_PD(47),
+ PORT_DATA_I_PD(48),
+ PORT_DATA_IO_PU_PD(49),
+ PORT_DATA_IO_PD(50),
+
+ PORT_DATA_IO_PD(51),
+ PORT_DATA_O(52),
+ PORT_DATA_IO_PU_PD(53),
+ PORT_DATA_IO_PU_PD(54),
+ PORT_DATA_IO_PD(55),
+ PORT_DATA_I_PU_PD(56),
+ PORT_DATA_IO(57),
+ PORT_DATA_IO(58),
+ PORT_DATA_IO(59),
+ PORT_DATA_IO(60),
+ PORT_DATA_IO(61),
+ PORT_DATA_IO_PD(62),
+ PORT_DATA_IO_PD(63),
+ PORT_DATA_IO_PU_PD(64),
+ PORT_DATA_IO_PD(65),
+ PORT_DATA_IO_PU_PD(66),
+ PORT_DATA_IO_PU_PD(67),
+ PORT_DATA_IO_PU_PD(68),
+ PORT_DATA_IO_PU_PD(69),
+ PORT_DATA_IO_PU_PD(70),
+ PORT_DATA_IO_PU_PD(71),
+ PORT_DATA_IO_PU_PD(72),
+ PORT_DATA_I_PU_PD(73),
+ PORT_DATA_IO_PU(74),
+ PORT_DATA_IO_PU(75),
+ PORT_DATA_IO_PU(76),
+ PORT_DATA_IO_PU(77),
+ PORT_DATA_IO_PU(78),
+ PORT_DATA_IO_PU(79),
+ PORT_DATA_IO_PU(80),
+ PORT_DATA_IO_PU(81),
+ PORT_DATA_IO_PU(82),
+ PORT_DATA_IO_PU(83),
+ PORT_DATA_IO_PU(84),
+ PORT_DATA_IO_PU(85),
+ PORT_DATA_IO_PU(86),
+ PORT_DATA_IO_PU(87),
+ PORT_DATA_IO_PU(88),
+ PORT_DATA_IO_PU(89),
+ PORT_DATA_O(90),
+ PORT_DATA_IO_PU(91),
+ PORT_DATA_O(92),
+ PORT_DATA_IO_PU(93),
+ PORT_DATA_O(94),
+ PORT_DATA_I_PU_PD(95),
+ PORT_DATA_IO(96),
+ PORT_DATA_IO(97),
+ PORT_DATA_IO(98),
+ PORT_DATA_I_PU(99),
+ PORT_DATA_O(100),
+ PORT_DATA_O(101),
+ PORT_DATA_I_PU(102),
+ PORT_DATA_IO_PD(103),
+ PORT_DATA_I_PU_PD(104),
+ PORT_DATA_I_PD(105),
+ PORT_DATA_I_PD(106),
+ PORT_DATA_I_PU_PD(107),
+ PORT_DATA_I_PU_PD(108),
+ PORT_DATA_IO_PD(109),
+ PORT_DATA_IO_PD(110),
+ PORT_DATA_IO_PU_PD(111),
+ PORT_DATA_IO_PU_PD(112),
+ PORT_DATA_IO_PU_PD(113),
+ PORT_DATA_IO_PD(114),
+ PORT_DATA_IO_PU(115),
+ PORT_DATA_IO_PU(116),
+ PORT_DATA_IO_PU_PD(117),
+ PORT_DATA_IO_PU_PD(118),
+ PORT_DATA_IO_PD(128),
+
+ PORT_DATA_IO_PD(129),
+ PORT_DATA_IO_PU_PD(130),
+ PORT_DATA_IO_PD(131),
+ PORT_DATA_IO_PD(132),
+ PORT_DATA_IO_PD(133),
+ PORT_DATA_IO_PU_PD(134),
+ PORT_DATA_IO_PU_PD(135),
+ PORT_DATA_IO_PU_PD(136),
+ PORT_DATA_IO_PU_PD(137),
+ PORT_DATA_IO_PD(138),
+ PORT_DATA_IO_PD(139),
+ PORT_DATA_IO_PD(140),
+ PORT_DATA_IO_PD(141),
+ PORT_DATA_IO_PD(142),
+ PORT_DATA_IO_PD(143),
+ PORT_DATA_IO_PU_PD(144),
+ PORT_DATA_IO_PD(145),
+ PORT_DATA_IO_PU_PD(146),
+ PORT_DATA_IO_PU_PD(147),
+ PORT_DATA_IO_PU_PD(148),
+ PORT_DATA_IO_PU_PD(149),
+ PORT_DATA_I_PU_PD(150),
+ PORT_DATA_IO_PU_PD(151),
+ PORT_DATA_IO_PU_PD(152),
+ PORT_DATA_IO_PD(153),
+ PORT_DATA_IO_PD(154),
+ PORT_DATA_I_PU_PD(155),
+ PORT_DATA_IO_PU_PD(156),
+ PORT_DATA_I_PD(157),
+ PORT_DATA_IO_PD(158),
+ PORT_DATA_IO_PU_PD(159),
+ PORT_DATA_IO_PU_PD(160),
+ PORT_DATA_I_PU_PD(161),
+ PORT_DATA_I_PU_PD(162),
+ PORT_DATA_IO_PU_PD(163),
+ PORT_DATA_I_PU_PD(164),
+ PORT_DATA_IO_PD(192),
+ PORT_DATA_IO_PU_PD(193),
+ PORT_DATA_IO_PD(194),
+ PORT_DATA_IO_PU_PD(195),
+ PORT_DATA_IO_PD(196),
+ PORT_DATA_IO_PD(197),
+ PORT_DATA_IO_PD(198),
+ PORT_DATA_IO_PD(199),
+ PORT_DATA_IO_PU_PD(200),
+ PORT_DATA_IO_PU_PD(201),
+ PORT_DATA_IO_PU_PD(202),
+ PORT_DATA_IO_PU_PD(203),
+ PORT_DATA_IO_PU_PD(204),
+ PORT_DATA_IO_PU_PD(205),
+ PORT_DATA_IO_PU_PD(206),
+ PORT_DATA_IO_PD(207),
+ PORT_DATA_IO_PD(208),
+ PORT_DATA_IO_PD(209),
+ PORT_DATA_IO_PD(210),
+ PORT_DATA_IO_PD(211),
+ PORT_DATA_IO_PD(212),
+ PORT_DATA_IO_PD(213),
+ PORT_DATA_IO_PU_PD(214),
+ PORT_DATA_IO_PU_PD(215),
+ PORT_DATA_IO_PD(216),
+ PORT_DATA_IO_PD(217),
+ PORT_DATA_O(218),
+ PORT_DATA_IO_PD(219),
+ PORT_DATA_IO_PD(220),
+ PORT_DATA_IO_PU_PD(221),
+ PORT_DATA_IO_PU_PD(222),
+ PORT_DATA_I_PU_PD(223),
+ PORT_DATA_I_PU_PD(224),
+
+ PORT_DATA_IO_PU_PD(225),
+ PORT_DATA_O(226),
+ PORT_DATA_IO_PU_PD(227),
+ PORT_DATA_I_PU_PD(228),
+ PORT_DATA_I_PD(229),
+ PORT_DATA_IO(230),
+ PORT_DATA_IO_PU_PD(231),
+ PORT_DATA_IO_PU_PD(232),
+ PORT_DATA_I_PU_PD(233),
+ PORT_DATA_IO_PU_PD(234),
+ PORT_DATA_IO_PU_PD(235),
+ PORT_DATA_IO_PU_PD(236),
+ PORT_DATA_IO_PD(237),
+ PORT_DATA_IO_PU_PD(238),
+ PORT_DATA_IO_PU_PD(239),
+ PORT_DATA_IO_PU_PD(240),
+ PORT_DATA_O(241),
+ PORT_DATA_I_PD(242),
+ PORT_DATA_IO_PU_PD(243),
+ PORT_DATA_IO_PU_PD(244),
+ PORT_DATA_IO_PU_PD(245),
+ PORT_DATA_IO_PU_PD(246),
+ PORT_DATA_IO_PU_PD(247),
+ PORT_DATA_IO_PU_PD(248),
+ PORT_DATA_IO_PU_PD(249),
+ PORT_DATA_IO_PU_PD(250),
+ PORT_DATA_IO_PU_PD(251),
+ PORT_DATA_IO_PU_PD(252),
+ PORT_DATA_IO_PU_PD(253),
+ PORT_DATA_IO_PU_PD(254),
+ PORT_DATA_IO_PU_PD(255),
+ PORT_DATA_IO_PU_PD(256),
+ PORT_DATA_IO_PU_PD(257),
+ PORT_DATA_IO_PU_PD(258),
+ PORT_DATA_IO_PU_PD(259),
+ PORT_DATA_IO_PU_PD(260),
+ PORT_DATA_IO_PU_PD(261),
+ PORT_DATA_IO_PU_PD(262),
+ PORT_DATA_IO_PU_PD(263),
+ PORT_DATA_IO_PU_PD(264),
+ PORT_DATA_IO_PU_PD(265),
+ PORT_DATA_IO_PU_PD(266),
+ PORT_DATA_IO_PU_PD(267),
+ PORT_DATA_IO_PU_PD(268),
+ PORT_DATA_IO_PU_PD(269),
+ PORT_DATA_IO_PU_PD(270),
+ PORT_DATA_IO_PU_PD(271),
+ PORT_DATA_IO_PU_PD(272),
+ PORT_DATA_IO_PU_PD(273),
+ PORT_DATA_IO_PU_PD(274),
+ PORT_DATA_IO_PU_PD(275),
+ PORT_DATA_IO_PU_PD(276),
+ PORT_DATA_IO_PU_PD(277),
+ PORT_DATA_IO_PU_PD(278),
+ PORT_DATA_IO_PU_PD(279),
+ PORT_DATA_IO_PU_PD(280),
+ PORT_DATA_O(281),
+ PORT_DATA_O(282),
+ PORT_DATA_I_PU(288),
+ PORT_DATA_IO_PU_PD(289),
+ PORT_DATA_IO_PU_PD(290),
+ PORT_DATA_IO_PU_PD(291),
+ PORT_DATA_IO_PU_PD(292),
+ PORT_DATA_IO_PU_PD(293),
+ PORT_DATA_IO_PU_PD(294),
+ PORT_DATA_IO_PU_PD(295),
+ PORT_DATA_IO_PU_PD(296),
+ PORT_DATA_IO_PU_PD(297),
+ PORT_DATA_IO_PU_PD(298),
+
+ PORT_DATA_IO_PU_PD(299),
+ PORT_DATA_IO_PU_PD(300),
+ PORT_DATA_IO_PU_PD(301),
+ PORT_DATA_IO_PU_PD(302),
+ PORT_DATA_IO_PU_PD(303),
+ PORT_DATA_IO_PU_PD(304),
+ PORT_DATA_IO_PU_PD(305),
+ PORT_DATA_O(306),
+ PORT_DATA_O(307),
+ PORT_DATA_I_PU(308),
+ PORT_DATA_O(309),
+
+ /* Table 25-1 (Function 0-7) */
+ PINMUX_DATA(VBUS_0_MARK, PORT0_FN1),
+ PINMUX_DATA(GPI0_MARK, PORT1_FN1),
+ PINMUX_DATA(GPI1_MARK, PORT2_FN1),
+ PINMUX_DATA(GPI2_MARK, PORT3_FN1),
+ PINMUX_DATA(GPI3_MARK, PORT4_FN1),
+ PINMUX_DATA(GPI4_MARK, PORT5_FN1),
+ PINMUX_DATA(GPI5_MARK, PORT6_FN1),
+ PINMUX_DATA(GPI6_MARK, PORT7_FN1),
+ PINMUX_DATA(GPI7_MARK, PORT8_FN1),
+ PINMUX_DATA(SCIFA7_RXD_MARK, PORT12_FN2),
+ PINMUX_DATA(SCIFA7_CTS__MARK, PORT13_FN2),
+ PINMUX_DATA(GPO7_MARK, PORT14_FN1), \
+ PINMUX_DATA(MFG0_OUT2_MARK, PORT14_FN4),
+ PINMUX_DATA(GPO6_MARK, PORT15_FN1), \
+ PINMUX_DATA(MFG1_OUT2_MARK, PORT15_FN4),
+ PINMUX_DATA(GPO5_MARK, PORT16_FN1), \
+ PINMUX_DATA(SCIFA0_SCK_MARK, PORT16_FN2), \
+ PINMUX_DATA(FSICOSLDT3_MARK, PORT16_FN3), \
+ PINMUX_DATA(PORT16_VIO_CKOR_MARK, PORT16_FN4),
+ PINMUX_DATA(SCIFA0_TXD_MARK, PORT17_FN2),
+ PINMUX_DATA(SCIFA7_TXD_MARK, PORT18_FN2),
+ PINMUX_DATA(SCIFA7_RTS__MARK, PORT19_FN2), \
+ PINMUX_DATA(PORT19_VIO_CKO2_MARK, PORT19_FN3),
+ PINMUX_DATA(GPO0_MARK, PORT20_FN1),
+ PINMUX_DATA(GPO1_MARK, PORT21_FN1),
+ PINMUX_DATA(GPO2_MARK, PORT22_FN1), \
+ PINMUX_DATA(STATUS0_MARK, PORT22_FN2),
+ PINMUX_DATA(GPO3_MARK, PORT23_FN1), \
+ PINMUX_DATA(STATUS1_MARK, PORT23_FN2),
+ PINMUX_DATA(GPO4_MARK, PORT24_FN1), \
+ PINMUX_DATA(STATUS2_MARK, PORT24_FN2),
+ PINMUX_DATA(VINT_MARK, PORT25_FN1),
+ PINMUX_DATA(TCKON_MARK, PORT26_FN1),
+ PINMUX_DATA(XDVFS1_MARK, PORT27_FN1), \
+ PINMUX_DATA(PORT27_I2C_SCL2_MARK, PORT27_FN2, MSEL2CR_MSEL17_0,
+ MSEL2CR_MSEL16_1), \
+ PINMUX_DATA(PORT27_I2C_SCL3_MARK, PORT27_FN3, MSEL2CR_MSEL19_0,
+ MSEL2CR_MSEL18_1), \
+ PINMUX_DATA(MFG0_OUT1_MARK, PORT27_FN4), \
+ PINMUX_DATA(PORT27_IROUT_MARK, PORT27_FN7),
+ PINMUX_DATA(XDVFS2_MARK, PORT28_FN1), \
+ PINMUX_DATA(PORT28_I2C_SDA2_MARK, PORT28_FN2, MSEL2CR_MSEL17_0,
+ MSEL2CR_MSEL16_1), \
+ PINMUX_DATA(PORT28_I2C_SDA3_MARK, PORT28_FN3, MSEL2CR_MSEL19_0,
+ MSEL2CR_MSEL18_1), \
+ PINMUX_DATA(PORT28_TPU1TO1_MARK, PORT28_FN7),
+ PINMUX_DATA(SIM_RST_MARK, PORT29_FN1), \
+ PINMUX_DATA(PORT29_TPU1TO1_MARK, PORT29_FN4),
+ PINMUX_DATA(SIM_CLK_MARK, PORT30_FN1), \
+ PINMUX_DATA(PORT30_VIO_CKOR_MARK, PORT30_FN4),
+ PINMUX_DATA(SIM_D_MARK, PORT31_FN1), \
+ PINMUX_DATA(PORT31_IROUT_MARK, PORT31_FN4),
+ PINMUX_DATA(SCIFA4_TXD_MARK, PORT32_FN2),
+ PINMUX_DATA(SCIFA4_RXD_MARK, PORT33_FN2), \
+ PINMUX_DATA(XWUP_MARK, PORT33_FN3),
+ PINMUX_DATA(SCIFA4_RTS__MARK, PORT34_FN2),
+ PINMUX_DATA(SCIFA4_CTS__MARK, PORT35_FN2),
+ PINMUX_DATA(FSIBOBT_MARK, PORT36_FN1), \
+ PINMUX_DATA(FSIBIBT_MARK, PORT36_FN2),
+ PINMUX_DATA(FSIBOLR_MARK, PORT37_FN1), \
+ PINMUX_DATA(FSIBILR_MARK, PORT37_FN2),
+ PINMUX_DATA(FSIBOSLD_MARK, PORT38_FN1),
+ PINMUX_DATA(FSIBISLD_MARK, PORT39_FN1),
+ PINMUX_DATA(VACK_MARK, PORT40_FN1),
+ PINMUX_DATA(XTAL1L_MARK, PORT41_FN1),
+ PINMUX_DATA(SCIFA0_RTS__MARK, PORT42_FN2), \
+ PINMUX_DATA(FSICOSLDT2_MARK, PORT42_FN3),
+ PINMUX_DATA(SCIFA0_RXD_MARK, PORT43_FN2),
+ PINMUX_DATA(SCIFA0_CTS__MARK, PORT44_FN2), \
+ PINMUX_DATA(FSICOSLDT1_MARK, PORT44_FN3),
+ PINMUX_DATA(FSICOBT_MARK, PORT45_FN1), \
+ PINMUX_DATA(FSICIBT_MARK, PORT45_FN2), \
+ PINMUX_DATA(FSIDOBT_MARK, PORT45_FN3), \
+ PINMUX_DATA(FSIDIBT_MARK, PORT45_FN4),
+ PINMUX_DATA(FSICOLR_MARK, PORT46_FN1), \
+ PINMUX_DATA(FSICILR_MARK, PORT46_FN2), \
+ PINMUX_DATA(FSIDOLR_MARK, PORT46_FN3), \
+ PINMUX_DATA(FSIDILR_MARK, PORT46_FN4),
+ PINMUX_DATA(FSICOSLD_MARK, PORT47_FN1), \
+ PINMUX_DATA(PORT47_FSICSPDIF_MARK, PORT47_FN2),
+ PINMUX_DATA(FSICISLD_MARK, PORT48_FN1), \
+ PINMUX_DATA(FSIDISLD_MARK, PORT48_FN3),
+ PINMUX_DATA(FSIACK_MARK, PORT49_FN1), \
+ PINMUX_DATA(PORT49_IRDA_OUT_MARK, PORT49_FN2, MSEL4CR_MSEL19_1), \
+ PINMUX_DATA(PORT49_IROUT_MARK, PORT49_FN4), \
+ PINMUX_DATA(FSIAOMC_MARK, PORT49_FN5),
+ PINMUX_DATA(FSIAOLR_MARK, PORT50_FN1), \
+ PINMUX_DATA(BBIF2_TSYNC2_MARK, PORT50_FN2), \
+ PINMUX_DATA(TPU2TO2_MARK, PORT50_FN3), \
+ PINMUX_DATA(FSIAILR_MARK, PORT50_FN5),
+
+ PINMUX_DATA(FSIAOBT_MARK, PORT51_FN1), \
+ PINMUX_DATA(BBIF2_TSCK2_MARK, PORT51_FN2), \
+ PINMUX_DATA(TPU2TO3_MARK, PORT51_FN3), \
+ PINMUX_DATA(FSIAIBT_MARK, PORT51_FN5),
+ PINMUX_DATA(FSIAOSLD_MARK, PORT52_FN1), \
+ PINMUX_DATA(BBIF2_TXD2_MARK, PORT52_FN2),
+ PINMUX_DATA(FSIASPDIF_MARK, PORT53_FN1), \
+ PINMUX_DATA(PORT53_IRDA_IN_MARK, PORT53_FN2, MSEL4CR_MSEL19_1), \
+ PINMUX_DATA(TPU3TO3_MARK, PORT53_FN3), \
+ PINMUX_DATA(FSIBSPDIF_MARK, PORT53_FN5), \
+ PINMUX_DATA(PORT53_FSICSPDIF_MARK, PORT53_FN6),
+ PINMUX_DATA(FSIBCK_MARK, PORT54_FN1), \
+ PINMUX_DATA(PORT54_IRDA_FIRSEL_MARK, PORT54_FN2, MSEL4CR_MSEL19_1), \
+ PINMUX_DATA(TPU3TO2_MARK, PORT54_FN3), \
+ PINMUX_DATA(FSIBOMC_MARK, PORT54_FN5), \
+ PINMUX_DATA(FSICCK_MARK, PORT54_FN6), \
+ PINMUX_DATA(FSICOMC_MARK, PORT54_FN7),
+ PINMUX_DATA(FSIAISLD_MARK, PORT55_FN1), \
+ PINMUX_DATA(TPU0TO0_MARK, PORT55_FN3),
+ PINMUX_DATA(A0_MARK, PORT57_FN1), \
+ PINMUX_DATA(BS__MARK, PORT57_FN2),
+ PINMUX_DATA(A12_MARK, PORT58_FN1), \
+ PINMUX_DATA(PORT58_KEYOUT7_MARK, PORT58_FN2), \
+ PINMUX_DATA(TPU4TO2_MARK, PORT58_FN4),
+ PINMUX_DATA(A13_MARK, PORT59_FN1), \
+ PINMUX_DATA(PORT59_KEYOUT6_MARK, PORT59_FN2), \
+ PINMUX_DATA(TPU0TO1_MARK, PORT59_FN4),
+ PINMUX_DATA(A14_MARK, PORT60_FN1), \
+ PINMUX_DATA(KEYOUT5_MARK, PORT60_FN2),
+ PINMUX_DATA(A15_MARK, PORT61_FN1), \
+ PINMUX_DATA(KEYOUT4_MARK, PORT61_FN2),
+ PINMUX_DATA(A16_MARK, PORT62_FN1), \
+ PINMUX_DATA(KEYOUT3_MARK, PORT62_FN2), \
+ PINMUX_DATA(MSIOF0_SS1_MARK, PORT62_FN4, MSEL3CR_MSEL11_0),
+ PINMUX_DATA(A17_MARK, PORT63_FN1), \
+ PINMUX_DATA(KEYOUT2_MARK, PORT63_FN2), \
+ PINMUX_DATA(MSIOF0_TSYNC_MARK, PORT63_FN4, MSEL3CR_MSEL11_0),
+ PINMUX_DATA(A18_MARK, PORT64_FN1), \
+ PINMUX_DATA(KEYOUT1_MARK, PORT64_FN2), \
+ PINMUX_DATA(MSIOF0_TSCK_MARK, PORT64_FN4, MSEL3CR_MSEL11_0),
+ PINMUX_DATA(A19_MARK, PORT65_FN1), \
+ PINMUX_DATA(KEYOUT0_MARK, PORT65_FN2), \
+ PINMUX_DATA(MSIOF0_TXD_MARK, PORT65_FN4, MSEL3CR_MSEL11_0),
+ PINMUX_DATA(A20_MARK, PORT66_FN1), \
+ PINMUX_DATA(KEYIN0_MARK, PORT66_FN2), \
+ PINMUX_DATA(MSIOF0_RSCK_MARK, PORT66_FN4, MSEL3CR_MSEL11_0),
+ PINMUX_DATA(A21_MARK, PORT67_FN1), \
+ PINMUX_DATA(KEYIN1_MARK, PORT67_FN2), \
+ PINMUX_DATA(MSIOF0_RSYNC_MARK, PORT67_FN4, MSEL3CR_MSEL11_0),
+ PINMUX_DATA(A22_MARK, PORT68_FN1), \
+ PINMUX_DATA(KEYIN2_MARK, PORT68_FN2), \
+ PINMUX_DATA(MSIOF0_MCK0_MARK, PORT68_FN4, MSEL3CR_MSEL11_0),
+ PINMUX_DATA(A23_MARK, PORT69_FN1), \
+ PINMUX_DATA(KEYIN3_MARK, PORT69_FN2), \
+ PINMUX_DATA(MSIOF0_MCK1_MARK, PORT69_FN4, MSEL3CR_MSEL11_0),
+ PINMUX_DATA(A24_MARK, PORT70_FN1), \
+ PINMUX_DATA(KEYIN4_MARK, PORT70_FN2), \
+ PINMUX_DATA(MSIOF0_RXD_MARK, PORT70_FN4, MSEL3CR_MSEL11_0),
+ PINMUX_DATA(A25_MARK, PORT71_FN1), \
+ PINMUX_DATA(KEYIN5_MARK, PORT71_FN2), \
+ PINMUX_DATA(MSIOF0_SS2_MARK, PORT71_FN4, MSEL3CR_MSEL11_0),
+ PINMUX_DATA(A26_MARK, PORT72_FN1), \
+ PINMUX_DATA(KEYIN6_MARK, PORT72_FN2),
+ PINMUX_DATA(KEYIN7_MARK, PORT73_FN2),
+ PINMUX_DATA(D0_NAF0_MARK, PORT74_FN1),
+ PINMUX_DATA(D1_NAF1_MARK, PORT75_FN1),
+ PINMUX_DATA(D2_NAF2_MARK, PORT76_FN1),
+ PINMUX_DATA(D3_NAF3_MARK, PORT77_FN1),
+ PINMUX_DATA(D4_NAF4_MARK, PORT78_FN1),
+ PINMUX_DATA(D5_NAF5_MARK, PORT79_FN1),
+ PINMUX_DATA(D6_NAF6_MARK, PORT80_FN1),
+ PINMUX_DATA(D7_NAF7_MARK, PORT81_FN1),
+ PINMUX_DATA(D8_NAF8_MARK, PORT82_FN1),
+ PINMUX_DATA(D9_NAF9_MARK, PORT83_FN1),
+ PINMUX_DATA(D10_NAF10_MARK, PORT84_FN1),
+ PINMUX_DATA(D11_NAF11_MARK, PORT85_FN1),
+ PINMUX_DATA(D12_NAF12_MARK, PORT86_FN1),
+ PINMUX_DATA(D13_NAF13_MARK, PORT87_FN1),
+ PINMUX_DATA(D14_NAF14_MARK, PORT88_FN1),
+ PINMUX_DATA(D15_NAF15_MARK, PORT89_FN1),
+ PINMUX_DATA(CS4__MARK, PORT90_FN1),
+ PINMUX_DATA(CS5A__MARK, PORT91_FN1), \
+ PINMUX_DATA(PORT91_RDWR_MARK, PORT91_FN2),
+ PINMUX_DATA(CS5B__MARK, PORT92_FN1), \
+ PINMUX_DATA(FCE1__MARK, PORT92_FN2),
+ PINMUX_DATA(CS6B__MARK, PORT93_FN1), \
+ PINMUX_DATA(DACK0_MARK, PORT93_FN4),
+ PINMUX_DATA(FCE0__MARK, PORT94_FN1), \
+ PINMUX_DATA(CS6A__MARK, PORT94_FN2),
+ PINMUX_DATA(WAIT__MARK, PORT95_FN1), \
+ PINMUX_DATA(DREQ0_MARK, PORT95_FN2),
+ PINMUX_DATA(RD__FSC_MARK, PORT96_FN1),
+ PINMUX_DATA(WE0__FWE_MARK, PORT97_FN1), \
+ PINMUX_DATA(RDWR_FWE_MARK, PORT97_FN2),
+ PINMUX_DATA(WE1__MARK, PORT98_FN1),
+ PINMUX_DATA(FRB_MARK, PORT99_FN1),
+ PINMUX_DATA(CKO_MARK, PORT100_FN1),
+ PINMUX_DATA(NBRSTOUT__MARK, PORT101_FN1),
+ PINMUX_DATA(NBRST__MARK, PORT102_FN1),
+ PINMUX_DATA(BBIF2_TXD_MARK, PORT103_FN3),
+ PINMUX_DATA(BBIF2_RXD_MARK, PORT104_FN3),
+ PINMUX_DATA(BBIF2_SYNC_MARK, PORT105_FN3),
+ PINMUX_DATA(BBIF2_SCK_MARK, PORT106_FN3),
+ PINMUX_DATA(SCIFA3_CTS__MARK, PORT107_FN3), \
+ PINMUX_DATA(MFG3_IN2_MARK, PORT107_FN4),
+ PINMUX_DATA(SCIFA3_RXD_MARK, PORT108_FN3), \
+ PINMUX_DATA(MFG3_IN1_MARK, PORT108_FN4),
+ PINMUX_DATA(BBIF1_SS2_MARK, PORT109_FN2), \
+ PINMUX_DATA(SCIFA3_RTS__MARK, PORT109_FN3), \
+ PINMUX_DATA(MFG3_OUT1_MARK, PORT109_FN4),
+ PINMUX_DATA(SCIFA3_TXD_MARK, PORT110_FN3),
+ PINMUX_DATA(HSI_RX_DATA_MARK, PORT111_FN1), \
+ PINMUX_DATA(BBIF1_RXD_MARK, PORT111_FN3),
+ PINMUX_DATA(HSI_TX_WAKE_MARK, PORT112_FN1), \
+ PINMUX_DATA(BBIF1_TSCK_MARK, PORT112_FN3),
+ PINMUX_DATA(HSI_TX_DATA_MARK, PORT113_FN1), \
+ PINMUX_DATA(BBIF1_TSYNC_MARK, PORT113_FN3),
+ PINMUX_DATA(HSI_TX_READY_MARK, PORT114_FN1), \
+ PINMUX_DATA(BBIF1_TXD_MARK, PORT114_FN3),
+ PINMUX_DATA(HSI_RX_READY_MARK, PORT115_FN1), \
+ PINMUX_DATA(BBIF1_RSCK_MARK, PORT115_FN3), \
+ PINMUX_DATA(PORT115_I2C_SCL2_MARK, PORT115_FN5, MSEL2CR_MSEL17_1), \
+ PINMUX_DATA(PORT115_I2C_SCL3_MARK, PORT115_FN6, MSEL2CR_MSEL19_1),
+ PINMUX_DATA(HSI_RX_WAKE_MARK, PORT116_FN1), \
+ PINMUX_DATA(BBIF1_RSYNC_MARK, PORT116_FN3), \
+ PINMUX_DATA(PORT116_I2C_SDA2_MARK, PORT116_FN5, MSEL2CR_MSEL17_1), \
+ PINMUX_DATA(PORT116_I2C_SDA3_MARK, PORT116_FN6, MSEL2CR_MSEL19_1),
+ PINMUX_DATA(HSI_RX_FLAG_MARK, PORT117_FN1), \
+ PINMUX_DATA(BBIF1_SS1_MARK, PORT117_FN2), \
+ PINMUX_DATA(BBIF1_FLOW_MARK, PORT117_FN3),
+ PINMUX_DATA(HSI_TX_FLAG_MARK, PORT118_FN1),
+ PINMUX_DATA(VIO_VD_MARK, PORT128_FN1), \
+ PINMUX_DATA(PORT128_LCD2VSYN_MARK, PORT128_FN4, MSEL3CR_MSEL2_0), \
+ PINMUX_DATA(VIO2_VD_MARK, PORT128_FN6, MSEL4CR_MSEL27_0), \
+ PINMUX_DATA(LCD2D0_MARK, PORT128_FN7),
+
+ PINMUX_DATA(VIO_HD_MARK, PORT129_FN1), \
+ PINMUX_DATA(PORT129_LCD2HSYN_MARK, PORT129_FN4), \
+ PINMUX_DATA(PORT129_LCD2CS__MARK, PORT129_FN5), \
+ PINMUX_DATA(VIO2_HD_MARK, PORT129_FN6, MSEL4CR_MSEL27_0), \
+ PINMUX_DATA(LCD2D1_MARK, PORT129_FN7),
+ PINMUX_DATA(VIO_D0_MARK, PORT130_FN1), \
+ PINMUX_DATA(PORT130_MSIOF2_RXD_MARK, PORT130_FN3, MSEL4CR_MSEL11_0,
+ MSEL4CR_MSEL10_1), \
+ PINMUX_DATA(LCD2D10_MARK, PORT130_FN7),
+ PINMUX_DATA(VIO_D1_MARK, PORT131_FN1), \
+ PINMUX_DATA(PORT131_KEYOUT6_MARK, PORT131_FN2), \
+ PINMUX_DATA(PORT131_MSIOF2_SS1_MARK, PORT131_FN3), \
+ PINMUX_DATA(PORT131_KEYOUT11_MARK, PORT131_FN4), \
+ PINMUX_DATA(LCD2D11_MARK, PORT131_FN7),
+ PINMUX_DATA(VIO_D2_MARK, PORT132_FN1), \
+ PINMUX_DATA(PORT132_KEYOUT7_MARK, PORT132_FN2), \
+ PINMUX_DATA(PORT132_MSIOF2_SS2_MARK, PORT132_FN3), \
+ PINMUX_DATA(PORT132_KEYOUT10_MARK, PORT132_FN4), \
+ PINMUX_DATA(LCD2D12_MARK, PORT132_FN7),
+ PINMUX_DATA(VIO_D3_MARK, PORT133_FN1), \
+ PINMUX_DATA(MSIOF2_TSYNC_MARK, PORT133_FN3, MSEL4CR_MSEL11_0), \
+ PINMUX_DATA(LCD2D13_MARK, PORT133_FN7),
+ PINMUX_DATA(VIO_D4_MARK, PORT134_FN1), \
+ PINMUX_DATA(MSIOF2_TXD_MARK, PORT134_FN3, MSEL4CR_MSEL11_0), \
+ PINMUX_DATA(LCD2D14_MARK, PORT134_FN7),
+ PINMUX_DATA(VIO_D5_MARK, PORT135_FN1), \
+ PINMUX_DATA(MSIOF2_TSCK_MARK, PORT135_FN3, MSEL4CR_MSEL11_0), \
+ PINMUX_DATA(LCD2D15_MARK, PORT135_FN7),
+ PINMUX_DATA(VIO_D6_MARK, PORT136_FN1), \
+ PINMUX_DATA(PORT136_KEYOUT8_MARK, PORT136_FN2), \
+ PINMUX_DATA(LCD2D16_MARK, PORT136_FN7),
+ PINMUX_DATA(VIO_D7_MARK, PORT137_FN1), \
+ PINMUX_DATA(PORT137_KEYOUT9_MARK, PORT137_FN2), \
+ PINMUX_DATA(LCD2D17_MARK, PORT137_FN7),
+ PINMUX_DATA(VIO_D8_MARK, PORT138_FN1), \
+ PINMUX_DATA(PORT138_KEYOUT8_MARK, PORT138_FN2), \
+ PINMUX_DATA(VIO2_D0_MARK, PORT138_FN6), \
+ PINMUX_DATA(LCD2D6_MARK, PORT138_FN7),
+ PINMUX_DATA(VIO_D9_MARK, PORT139_FN1), \
+ PINMUX_DATA(PORT139_KEYOUT9_MARK, PORT139_FN2), \
+ PINMUX_DATA(VIO2_D1_MARK, PORT139_FN6), \
+ PINMUX_DATA(LCD2D7_MARK, PORT139_FN7),
+ PINMUX_DATA(VIO_D10_MARK, PORT140_FN1), \
+ PINMUX_DATA(TPU0TO2_MARK, PORT140_FN4), \
+ PINMUX_DATA(VIO2_D2_MARK, PORT140_FN6), \
+ PINMUX_DATA(LCD2D8_MARK, PORT140_FN7),
+ PINMUX_DATA(VIO_D11_MARK, PORT141_FN1), \
+ PINMUX_DATA(TPU0TO3_MARK, PORT141_FN4), \
+ PINMUX_DATA(VIO2_D3_MARK, PORT141_FN6), \
+ PINMUX_DATA(LCD2D9_MARK, PORT141_FN7),
+ PINMUX_DATA(VIO_D12_MARK, PORT142_FN1), \
+ PINMUX_DATA(PORT142_KEYOUT10_MARK, PORT142_FN2), \
+ PINMUX_DATA(VIO2_D4_MARK, PORT142_FN6), \
+ PINMUX_DATA(LCD2D2_MARK, PORT142_FN7),
+ PINMUX_DATA(VIO_D13_MARK, PORT143_FN1), \
+ PINMUX_DATA(PORT143_KEYOUT11_MARK, PORT143_FN2), \
+ PINMUX_DATA(PORT143_KEYOUT6_MARK, PORT143_FN3), \
+ PINMUX_DATA(VIO2_D5_MARK, PORT143_FN6), \
+ PINMUX_DATA(LCD2D3_MARK, PORT143_FN7),
+ PINMUX_DATA(VIO_D14_MARK, PORT144_FN1), \
+ PINMUX_DATA(PORT144_KEYOUT7_MARK, PORT144_FN2), \
+ PINMUX_DATA(VIO2_D6_MARK, PORT144_FN6), \
+ PINMUX_DATA(LCD2D4_MARK, PORT144_FN7),
+ PINMUX_DATA(VIO_D15_MARK, PORT145_FN1), \
+ PINMUX_DATA(TPU1TO3_MARK, PORT145_FN3), \
+ PINMUX_DATA(PORT145_LCD2DISP_MARK, PORT145_FN4), \
+ PINMUX_DATA(PORT145_LCD2RS_MARK, PORT145_FN5), \
+ PINMUX_DATA(VIO2_D7_MARK, PORT145_FN6), \
+ PINMUX_DATA(LCD2D5_MARK, PORT145_FN7),
+ PINMUX_DATA(VIO_CLK_MARK, PORT146_FN1), \
+ PINMUX_DATA(LCD2DCK_MARK, PORT146_FN4), \
+ PINMUX_DATA(PORT146_LCD2WR__MARK, PORT146_FN5), \
+ PINMUX_DATA(VIO2_CLK_MARK, PORT146_FN6, MSEL4CR_MSEL27_0), \
+ PINMUX_DATA(LCD2D18_MARK, PORT146_FN7),
+ PINMUX_DATA(VIO_FIELD_MARK, PORT147_FN1), \
+ PINMUX_DATA(LCD2RD__MARK, PORT147_FN4), \
+ PINMUX_DATA(VIO2_FIELD_MARK, PORT147_FN6, MSEL4CR_MSEL27_0), \
+ PINMUX_DATA(LCD2D19_MARK, PORT147_FN7),
+ PINMUX_DATA(VIO_CKO_MARK, PORT148_FN1),
+ PINMUX_DATA(A27_MARK, PORT149_FN1), \
+ PINMUX_DATA(PORT149_RDWR_MARK, PORT149_FN2), \
+ PINMUX_DATA(MFG0_IN1_MARK, PORT149_FN3), \
+ PINMUX_DATA(PORT149_KEYOUT9_MARK, PORT149_FN4),
+ PINMUX_DATA(MFG0_IN2_MARK, PORT150_FN3),
+ PINMUX_DATA(TS_SPSYNC3_MARK, PORT151_FN4), \
+ PINMUX_DATA(MSIOF2_RSCK_MARK, PORT151_FN5),
+ PINMUX_DATA(TS_SDAT3_MARK, PORT152_FN4), \
+ PINMUX_DATA(MSIOF2_RSYNC_MARK, PORT152_FN5),
+ PINMUX_DATA(TPU1TO2_MARK, PORT153_FN3), \
+ PINMUX_DATA(TS_SDEN3_MARK, PORT153_FN4), \
+ PINMUX_DATA(PORT153_MSIOF2_SS1_MARK, PORT153_FN5),
+ PINMUX_DATA(SCIFA2_TXD1_MARK, PORT154_FN2, MSEL3CR_MSEL9_0), \
+ PINMUX_DATA(MSIOF2_MCK0_MARK, PORT154_FN5),
+ PINMUX_DATA(SCIFA2_RXD1_MARK, PORT155_FN2, MSEL3CR_MSEL9_0), \
+ PINMUX_DATA(MSIOF2_MCK1_MARK, PORT155_FN5),
+ PINMUX_DATA(SCIFA2_RTS1__MARK, PORT156_FN2, MSEL3CR_MSEL9_0), \
+ PINMUX_DATA(PORT156_MSIOF2_SS2_MARK, PORT156_FN5),
+ PINMUX_DATA(SCIFA2_CTS1__MARK, PORT157_FN2, MSEL3CR_MSEL9_0), \
+ PINMUX_DATA(PORT157_MSIOF2_RXD_MARK, PORT157_FN5, MSEL4CR_MSEL11_0,
+ MSEL4CR_MSEL10_0),
+ PINMUX_DATA(DINT__MARK, PORT158_FN1), \
+ PINMUX_DATA(SCIFA2_SCK1_MARK, PORT158_FN2, MSEL3CR_MSEL9_0), \
+ PINMUX_DATA(TS_SCK3_MARK, PORT158_FN4),
+ PINMUX_DATA(PORT159_SCIFB_SCK_MARK, PORT159_FN1, MSEL4CR_MSEL22_0), \
+ PINMUX_DATA(PORT159_SCIFA5_SCK_MARK, PORT159_FN2, MSEL4CR_MSEL21_1), \
+ PINMUX_DATA(NMI_MARK, PORT159_FN3),
+ PINMUX_DATA(PORT160_SCIFB_TXD_MARK, PORT160_FN1, MSEL4CR_MSEL22_0), \
+ PINMUX_DATA(PORT160_SCIFA5_TXD_MARK, PORT160_FN2, MSEL4CR_MSEL21_1),
+ PINMUX_DATA(PORT161_SCIFB_CTS__MARK, PORT161_FN1, MSEL4CR_MSEL22_0), \
+ PINMUX_DATA(PORT161_SCIFA5_CTS__MARK, PORT161_FN2, MSEL4CR_MSEL21_1),
+ PINMUX_DATA(PORT162_SCIFB_RXD_MARK, PORT162_FN1, MSEL4CR_MSEL22_0), \
+ PINMUX_DATA(PORT162_SCIFA5_RXD_MARK, PORT162_FN2, MSEL4CR_MSEL21_1),
+ PINMUX_DATA(PORT163_SCIFB_RTS__MARK, PORT163_FN1, MSEL4CR_MSEL22_0), \
+ PINMUX_DATA(PORT163_SCIFA5_RTS__MARK, PORT163_FN2, MSEL4CR_MSEL21_1), \
+ PINMUX_DATA(TPU3TO0_MARK, PORT163_FN5),
+ PINMUX_DATA(LCDD0_MARK, PORT192_FN1),
+ PINMUX_DATA(LCDD1_MARK, PORT193_FN1), \
+ PINMUX_DATA(PORT193_SCIFA5_CTS__MARK, PORT193_FN3, MSEL4CR_MSEL21_0,
+ MSEL4CR_MSEL20_1), \
+ PINMUX_DATA(BBIF2_TSYNC1_MARK, PORT193_FN5),
+ PINMUX_DATA(LCDD2_MARK, PORT194_FN1), \
+ PINMUX_DATA(PORT194_SCIFA5_RTS__MARK, PORT194_FN3, MSEL4CR_MSEL21_0,
+ MSEL4CR_MSEL20_1), \
+ PINMUX_DATA(BBIF2_TSCK1_MARK, PORT194_FN5),
+ PINMUX_DATA(LCDD3_MARK, PORT195_FN1), \
+ PINMUX_DATA(PORT195_SCIFA5_RXD_MARK, PORT195_FN3, MSEL4CR_MSEL21_0,
+ MSEL4CR_MSEL20_1), \
+ PINMUX_DATA(BBIF2_TXD1_MARK, PORT195_FN5),
+ PINMUX_DATA(LCDD4_MARK, PORT196_FN1), \
+ PINMUX_DATA(PORT196_SCIFA5_TXD_MARK, PORT196_FN3, MSEL4CR_MSEL21_0,
+ MSEL4CR_MSEL20_1),
+ PINMUX_DATA(LCDD5_MARK, PORT197_FN1), \
+ PINMUX_DATA(PORT197_SCIFA5_SCK_MARK, PORT197_FN3, MSEL4CR_MSEL21_0,
+ MSEL4CR_MSEL20_1), \
+ PINMUX_DATA(MFG2_OUT2_MARK, PORT197_FN5), \
+ PINMUX_DATA(TPU2TO1_MARK, PORT197_FN7),
+ PINMUX_DATA(LCDD6_MARK, PORT198_FN1),
+ PINMUX_DATA(LCDD7_MARK, PORT199_FN1), \
+ PINMUX_DATA(TPU4TO1_MARK, PORT199_FN2), \
+ PINMUX_DATA(MFG4_OUT2_MARK, PORT199_FN5),
+ PINMUX_DATA(LCDD8_MARK, PORT200_FN1), \
+ PINMUX_DATA(D16_MARK, PORT200_FN6),
+ PINMUX_DATA(LCDD9_MARK, PORT201_FN1), \
+ PINMUX_DATA(D17_MARK, PORT201_FN6),
+ PINMUX_DATA(LCDD10_MARK, PORT202_FN1), \
+ PINMUX_DATA(D18_MARK, PORT202_FN6),
+ PINMUX_DATA(LCDD11_MARK, PORT203_FN1), \
+ PINMUX_DATA(D19_MARK, PORT203_FN6),
+ PINMUX_DATA(LCDD12_MARK, PORT204_FN1), \
+ PINMUX_DATA(D20_MARK, PORT204_FN6),
+ PINMUX_DATA(LCDD13_MARK, PORT205_FN1), \
+ PINMUX_DATA(D21_MARK, PORT205_FN6),
+ PINMUX_DATA(LCDD14_MARK, PORT206_FN1), \
+ PINMUX_DATA(D22_MARK, PORT206_FN6),
+ PINMUX_DATA(LCDD15_MARK, PORT207_FN1), \
+ PINMUX_DATA(PORT207_MSIOF0L_SS1_MARK, PORT207_FN2, MSEL3CR_MSEL11_1), \
+ PINMUX_DATA(D23_MARK, PORT207_FN6),
+ PINMUX_DATA(LCDD16_MARK, PORT208_FN1), \
+ PINMUX_DATA(PORT208_MSIOF0L_SS2_MARK, PORT208_FN2, MSEL3CR_MSEL11_1), \
+ PINMUX_DATA(D24_MARK, PORT208_FN6),
+ PINMUX_DATA(LCDD17_MARK, PORT209_FN1), \
+ PINMUX_DATA(D25_MARK, PORT209_FN6),
+ PINMUX_DATA(LCDD18_MARK, PORT210_FN1), \
+ PINMUX_DATA(DREQ2_MARK, PORT210_FN2), \
+ PINMUX_DATA(PORT210_MSIOF0L_SS1_MARK, PORT210_FN5, MSEL3CR_MSEL11_1), \
+ PINMUX_DATA(D26_MARK, PORT210_FN6),
+ PINMUX_DATA(LCDD19_MARK, PORT211_FN1), \
+ PINMUX_DATA(PORT211_MSIOF0L_SS2_MARK, PORT211_FN5, MSEL3CR_MSEL11_1), \
+ PINMUX_DATA(D27_MARK, PORT211_FN6),
+ PINMUX_DATA(LCDD20_MARK, PORT212_FN1), \
+ PINMUX_DATA(TS_SPSYNC1_MARK, PORT212_FN2), \
+ PINMUX_DATA(MSIOF0L_MCK0_MARK, PORT212_FN5, MSEL3CR_MSEL11_1), \
+ PINMUX_DATA(D28_MARK, PORT212_FN6),
+ PINMUX_DATA(LCDD21_MARK, PORT213_FN1), \
+ PINMUX_DATA(TS_SDAT1_MARK, PORT213_FN2), \
+ PINMUX_DATA(MSIOF0L_MCK1_MARK, PORT213_FN5, MSEL3CR_MSEL11_1), \
+ PINMUX_DATA(D29_MARK, PORT213_FN6),
+ PINMUX_DATA(LCDD22_MARK, PORT214_FN1), \
+ PINMUX_DATA(TS_SDEN1_MARK, PORT214_FN2), \
+ PINMUX_DATA(MSIOF0L_RSCK_MARK, PORT214_FN5, MSEL3CR_MSEL11_1), \
+ PINMUX_DATA(D30_MARK, PORT214_FN6),
+ PINMUX_DATA(LCDD23_MARK, PORT215_FN1), \
+ PINMUX_DATA(TS_SCK1_MARK, PORT215_FN2), \
+ PINMUX_DATA(MSIOF0L_RSYNC_MARK, PORT215_FN5, MSEL3CR_MSEL11_1), \
+ PINMUX_DATA(D31_MARK, PORT215_FN6),
+ PINMUX_DATA(LCDDCK_MARK, PORT216_FN1), \
+ PINMUX_DATA(LCDWR__MARK, PORT216_FN2),
+ PINMUX_DATA(LCDRD__MARK, PORT217_FN1), \
+ PINMUX_DATA(DACK2_MARK, PORT217_FN2), \
+ PINMUX_DATA(PORT217_LCD2RS_MARK, PORT217_FN3), \
+ PINMUX_DATA(MSIOF0L_TSYNC_MARK, PORT217_FN5, MSEL3CR_MSEL11_1), \
+ PINMUX_DATA(VIO2_FIELD3_MARK, PORT217_FN6, MSEL4CR_MSEL27_1,
+ MSEL4CR_MSEL26_1), \
+ PINMUX_DATA(PORT217_LCD2DISP_MARK, PORT217_FN7),
+ PINMUX_DATA(LCDHSYN_MARK, PORT218_FN1), \
+ PINMUX_DATA(LCDCS__MARK, PORT218_FN2), \
+ PINMUX_DATA(LCDCS2__MARK, PORT218_FN3), \
+ PINMUX_DATA(DACK3_MARK, PORT218_FN4), \
+ PINMUX_DATA(PORT218_VIO_CKOR_MARK, PORT218_FN5),
+ PINMUX_DATA(LCDDISP_MARK, PORT219_FN1), \
+ PINMUX_DATA(LCDRS_MARK, PORT219_FN2), \
+ PINMUX_DATA(PORT219_LCD2WR__MARK, PORT219_FN3), \
+ PINMUX_DATA(DREQ3_MARK, PORT219_FN4), \
+ PINMUX_DATA(MSIOF0L_TSCK_MARK, PORT219_FN5, MSEL3CR_MSEL11_1), \
+ PINMUX_DATA(VIO2_CLK3_MARK, PORT219_FN6, MSEL4CR_MSEL27_1,
+ MSEL4CR_MSEL26_1), \
+ PINMUX_DATA(LCD2DCK_2_MARK, PORT219_FN7),
+ PINMUX_DATA(LCDVSYN_MARK, PORT220_FN1), \
+ PINMUX_DATA(LCDVSYN2_MARK, PORT220_FN2),
+ PINMUX_DATA(LCDLCLK_MARK, PORT221_FN1), \
+ PINMUX_DATA(DREQ1_MARK, PORT221_FN2), \
+ PINMUX_DATA(PORT221_LCD2CS__MARK, PORT221_FN3), \
+ PINMUX_DATA(PWEN_MARK, PORT221_FN4), \
+ PINMUX_DATA(MSIOF0L_RXD_MARK, PORT221_FN5, MSEL3CR_MSEL11_1), \
+ PINMUX_DATA(VIO2_HD3_MARK, PORT221_FN6, MSEL4CR_MSEL27_1,
+ MSEL4CR_MSEL26_1), \
+ PINMUX_DATA(PORT221_LCD2HSYN_MARK, PORT221_FN7),
+ PINMUX_DATA(LCDDON_MARK, PORT222_FN1), \
+ PINMUX_DATA(LCDDON2_MARK, PORT222_FN2), \
+ PINMUX_DATA(DACK1_MARK, PORT222_FN3), \
+ PINMUX_DATA(OVCN_MARK, PORT222_FN4), \
+ PINMUX_DATA(MSIOF0L_TXD_MARK, PORT222_FN5, MSEL3CR_MSEL11_1), \
+ PINMUX_DATA(VIO2_VD3_MARK, PORT222_FN6, MSEL4CR_MSEL27_1,
+ MSEL4CR_MSEL26_1), \
+ PINMUX_DATA(PORT222_LCD2VSYN_MARK, PORT222_FN7, MSEL3CR_MSEL2_1),
+
+ PINMUX_DATA(SCIFA1_TXD_MARK, PORT225_FN2), \
+ PINMUX_DATA(OVCN2_MARK, PORT225_FN4),
+ PINMUX_DATA(EXTLP_MARK, PORT226_FN1), \
+ PINMUX_DATA(SCIFA1_SCK_MARK, PORT226_FN2), \
+ PINMUX_DATA(PORT226_VIO_CKO2_MARK, PORT226_FN5),
+ PINMUX_DATA(SCIFA1_RTS__MARK, PORT227_FN2), \
+ PINMUX_DATA(IDIN_MARK, PORT227_FN4),
+ PINMUX_DATA(SCIFA1_RXD_MARK, PORT228_FN2),
+ PINMUX_DATA(SCIFA1_CTS__MARK, PORT229_FN2), \
+ PINMUX_DATA(MFG1_IN1_MARK, PORT229_FN3),
+ PINMUX_DATA(MSIOF1_TXD_MARK, PORT230_FN1), \
+ PINMUX_DATA(SCIFA2_TXD2_MARK, PORT230_FN2, MSEL3CR_MSEL9_1),
+ PINMUX_DATA(MSIOF1_TSYNC_MARK, PORT231_FN1), \
+ PINMUX_DATA(SCIFA2_CTS2__MARK, PORT231_FN2, MSEL3CR_MSEL9_1),
+ PINMUX_DATA(MSIOF1_TSCK_MARK, PORT232_FN1), \
+ PINMUX_DATA(SCIFA2_SCK2_MARK, PORT232_FN2, MSEL3CR_MSEL9_1),
+ PINMUX_DATA(MSIOF1_RXD_MARK, PORT233_FN1), \
+ PINMUX_DATA(SCIFA2_RXD2_MARK, PORT233_FN2, MSEL3CR_MSEL9_1),
+ PINMUX_DATA(MSIOF1_RSCK_MARK, PORT234_FN1), \
+ PINMUX_DATA(SCIFA2_RTS2__MARK, PORT234_FN2, MSEL3CR_MSEL9_1), \
+ PINMUX_DATA(VIO2_CLK2_MARK, PORT234_FN6, MSEL4CR_MSEL27_1,
+ MSEL4CR_MSEL26_0), \
+ PINMUX_DATA(LCD2D20_MARK, PORT234_FN7),
+ PINMUX_DATA(MSIOF1_RSYNC_MARK, PORT235_FN1), \
+ PINMUX_DATA(MFG1_IN2_MARK, PORT235_FN3), \
+ PINMUX_DATA(VIO2_VD2_MARK, PORT235_FN6, MSEL4CR_MSEL27_1,
+ MSEL4CR_MSEL26_0), \
+ PINMUX_DATA(LCD2D21_MARK, PORT235_FN7),
+ PINMUX_DATA(MSIOF1_MCK0_MARK, PORT236_FN1), \
+ PINMUX_DATA(PORT236_I2C_SDA2_MARK, PORT236_FN2, MSEL2CR_MSEL17_0,
+ MSEL2CR_MSEL16_0),
+ PINMUX_DATA(MSIOF1_MCK1_MARK, PORT237_FN1), \
+ PINMUX_DATA(PORT237_I2C_SCL2_MARK, PORT237_FN2, MSEL2CR_MSEL17_0,
+ MSEL2CR_MSEL16_0),
+ PINMUX_DATA(MSIOF1_SS1_MARK, PORT238_FN1), \
+ PINMUX_DATA(VIO2_FIELD2_MARK, PORT238_FN6, MSEL4CR_MSEL27_1,
+ MSEL4CR_MSEL26_0), \
+ PINMUX_DATA(LCD2D22_MARK, PORT238_FN7),
+ PINMUX_DATA(MSIOF1_SS2_MARK, PORT239_FN1), \
+ PINMUX_DATA(VIO2_HD2_MARK, PORT239_FN6, MSEL4CR_MSEL27_1,
+ MSEL4CR_MSEL26_0), \
+ PINMUX_DATA(LCD2D23_MARK, PORT239_FN7),
+ PINMUX_DATA(SCIFA6_TXD_MARK, PORT240_FN1),
+ PINMUX_DATA(PORT241_IRDA_OUT_MARK, PORT241_FN1, MSEL4CR_MSEL19_0), \
+ PINMUX_DATA(PORT241_IROUT_MARK, PORT241_FN2), \
+ PINMUX_DATA(MFG4_OUT1_MARK, PORT241_FN3), \
+ PINMUX_DATA(TPU4TO0_MARK, PORT241_FN4),
+ PINMUX_DATA(PORT242_IRDA_IN_MARK, PORT242_FN1, MSEL4CR_MSEL19_0), \
+ PINMUX_DATA(MFG4_IN2_MARK, PORT242_FN3),
+ PINMUX_DATA(PORT243_IRDA_FIRSEL_MARK, PORT243_FN1, MSEL4CR_MSEL19_0), \
+ PINMUX_DATA(PORT243_VIO_CKO2_MARK, PORT243_FN2),
+ PINMUX_DATA(PORT244_SCIFA5_CTS__MARK, PORT244_FN1, MSEL4CR_MSEL21_0,
+ MSEL4CR_MSEL20_0), \
+ PINMUX_DATA(MFG2_IN1_MARK, PORT244_FN2), \
+ PINMUX_DATA(PORT244_SCIFB_CTS__MARK, PORT244_FN3, MSEL4CR_MSEL22_1), \
+ PINMUX_DATA(MSIOF2R_RXD_MARK, PORT244_FN7, MSEL4CR_MSEL11_1),
+ PINMUX_DATA(PORT245_SCIFA5_RTS__MARK, PORT245_FN1, MSEL4CR_MSEL21_0,
+ MSEL4CR_MSEL20_0), \
+ PINMUX_DATA(MFG2_IN2_MARK, PORT245_FN2), \
+ PINMUX_DATA(PORT245_SCIFB_RTS__MARK, PORT245_FN3, MSEL4CR_MSEL22_1), \
+ PINMUX_DATA(MSIOF2R_TXD_MARK, PORT245_FN7, MSEL4CR_MSEL11_1),
+ PINMUX_DATA(PORT246_SCIFA5_RXD_MARK, PORT246_FN1, MSEL4CR_MSEL21_0,
+ MSEL4CR_MSEL20_0), \
+ PINMUX_DATA(MFG1_OUT1_MARK, PORT246_FN2), \
+ PINMUX_DATA(PORT246_SCIFB_RXD_MARK, PORT246_FN3, MSEL4CR_MSEL22_1), \
+ PINMUX_DATA(TPU1TO0_MARK, PORT246_FN4),
+ PINMUX_DATA(PORT247_SCIFA5_TXD_MARK, PORT247_FN1, MSEL4CR_MSEL21_0,
+ MSEL4CR_MSEL20_0), \
+ PINMUX_DATA(MFG3_OUT2_MARK, PORT247_FN2), \
+ PINMUX_DATA(PORT247_SCIFB_TXD_MARK, PORT247_FN3, MSEL4CR_MSEL22_1), \
+ PINMUX_DATA(TPU3TO1_MARK, PORT247_FN4),
+ PINMUX_DATA(PORT248_SCIFA5_SCK_MARK, PORT248_FN1, MSEL4CR_MSEL21_0,
+ MSEL4CR_MSEL20_0), \
+ PINMUX_DATA(MFG2_OUT1_MARK, PORT248_FN2), \
+ PINMUX_DATA(PORT248_SCIFB_SCK_MARK, PORT248_FN3, MSEL4CR_MSEL22_1), \
+ PINMUX_DATA(TPU2TO0_MARK, PORT248_FN4), \
+ PINMUX_DATA(PORT248_I2C_SCL3_MARK, PORT248_FN5, MSEL2CR_MSEL19_0,
+ MSEL2CR_MSEL18_0), \
+ PINMUX_DATA(MSIOF2R_TSCK_MARK, PORT248_FN7, MSEL4CR_MSEL11_1),
+ PINMUX_DATA(PORT249_IROUT_MARK, PORT249_FN1), \
+ PINMUX_DATA(MFG4_IN1_MARK, PORT249_FN2), \
+ PINMUX_DATA(PORT249_I2C_SDA3_MARK, PORT249_FN5, MSEL2CR_MSEL19_0,
+ MSEL2CR_MSEL18_0), \
+ PINMUX_DATA(MSIOF2R_TSYNC_MARK, PORT249_FN7, MSEL4CR_MSEL11_1),
+ PINMUX_DATA(SDHICLK0_MARK, PORT250_FN1),
+ PINMUX_DATA(SDHICD0_MARK, PORT251_FN1),
+ PINMUX_DATA(SDHID0_0_MARK, PORT252_FN1),
+ PINMUX_DATA(SDHID0_1_MARK, PORT253_FN1),
+ PINMUX_DATA(SDHID0_2_MARK, PORT254_FN1),
+ PINMUX_DATA(SDHID0_3_MARK, PORT255_FN1),
+ PINMUX_DATA(SDHICMD0_MARK, PORT256_FN1),
+ PINMUX_DATA(SDHIWP0_MARK, PORT257_FN1),
+ PINMUX_DATA(SDHICLK1_MARK, PORT258_FN1),
+ PINMUX_DATA(SDHID1_0_MARK, PORT259_FN1), \
+ PINMUX_DATA(TS_SPSYNC2_MARK, PORT259_FN3),
+ PINMUX_DATA(SDHID1_1_MARK, PORT260_FN1), \
+ PINMUX_DATA(TS_SDAT2_MARK, PORT260_FN3),
+ PINMUX_DATA(SDHID1_2_MARK, PORT261_FN1), \
+ PINMUX_DATA(TS_SDEN2_MARK, PORT261_FN3),
+ PINMUX_DATA(SDHID1_3_MARK, PORT262_FN1), \
+ PINMUX_DATA(TS_SCK2_MARK, PORT262_FN3),
+ PINMUX_DATA(SDHICMD1_MARK, PORT263_FN1),
+ PINMUX_DATA(SDHICLK2_MARK, PORT264_FN1),
+ PINMUX_DATA(SDHID2_0_MARK, PORT265_FN1), \
+ PINMUX_DATA(TS_SPSYNC4_MARK, PORT265_FN3),
+ PINMUX_DATA(SDHID2_1_MARK, PORT266_FN1), \
+ PINMUX_DATA(TS_SDAT4_MARK, PORT266_FN3),
+ PINMUX_DATA(SDHID2_2_MARK, PORT267_FN1), \
+ PINMUX_DATA(TS_SDEN4_MARK, PORT267_FN3),
+ PINMUX_DATA(SDHID2_3_MARK, PORT268_FN1), \
+ PINMUX_DATA(TS_SCK4_MARK, PORT268_FN3),
+ PINMUX_DATA(SDHICMD2_MARK, PORT269_FN1),
+ PINMUX_DATA(MMCCLK0_MARK, PORT270_FN1, MSEL4CR_MSEL15_0),
+ PINMUX_DATA(MMCD0_0_MARK, PORT271_FN1, PORT271_IN_PU,
+ MSEL4CR_MSEL15_0),
+ PINMUX_DATA(MMCD0_1_MARK, PORT272_FN1, PORT272_IN_PU,
+ MSEL4CR_MSEL15_0),
+ PINMUX_DATA(MMCD0_2_MARK, PORT273_FN1, PORT273_IN_PU,
+ MSEL4CR_MSEL15_0),
+ PINMUX_DATA(MMCD0_3_MARK, PORT274_FN1, PORT274_IN_PU,
+ MSEL4CR_MSEL15_0),
+ PINMUX_DATA(MMCD0_4_MARK, PORT275_FN1, PORT275_IN_PU,
+ MSEL4CR_MSEL15_0), \
+ PINMUX_DATA(TS_SPSYNC5_MARK, PORT275_FN3),
+ PINMUX_DATA(MMCD0_5_MARK, PORT276_FN1, PORT276_IN_PU,
+ MSEL4CR_MSEL15_0), \
+ PINMUX_DATA(TS_SDAT5_MARK, PORT276_FN3),
+ PINMUX_DATA(MMCD0_6_MARK, PORT277_FN1, PORT277_IN_PU,
+ MSEL4CR_MSEL15_0), \
+ PINMUX_DATA(TS_SDEN5_MARK, PORT277_FN3),
+ PINMUX_DATA(MMCD0_7_MARK, PORT278_FN1, PORT278_IN_PU,
+ MSEL4CR_MSEL15_0), \
+ PINMUX_DATA(TS_SCK5_MARK, PORT278_FN3),
+ PINMUX_DATA(MMCCMD0_MARK, PORT279_FN1, PORT279_IN_PU,
+ MSEL4CR_MSEL15_0),
+ PINMUX_DATA(RESETOUTS__MARK, PORT281_FN1), \
+ PINMUX_DATA(EXTAL2OUT_MARK, PORT281_FN2),
+ PINMUX_DATA(MCP_WAIT__MCP_FRB_MARK, PORT288_FN1),
+ PINMUX_DATA(MCP_CKO_MARK, PORT289_FN1), \
+ PINMUX_DATA(MMCCLK1_MARK, PORT289_FN2, MSEL4CR_MSEL15_1),
+ PINMUX_DATA(MCP_D15_MCP_NAF15_MARK, PORT290_FN1),
+ PINMUX_DATA(MCP_D14_MCP_NAF14_MARK, PORT291_FN1),
+ PINMUX_DATA(MCP_D13_MCP_NAF13_MARK, PORT292_FN1),
+ PINMUX_DATA(MCP_D12_MCP_NAF12_MARK, PORT293_FN1),
+ PINMUX_DATA(MCP_D11_MCP_NAF11_MARK, PORT294_FN1),
+ PINMUX_DATA(MCP_D10_MCP_NAF10_MARK, PORT295_FN1),
+ PINMUX_DATA(MCP_D9_MCP_NAF9_MARK, PORT296_FN1),
+ PINMUX_DATA(MCP_D8_MCP_NAF8_MARK, PORT297_FN1), \
+ PINMUX_DATA(MMCCMD1_MARK, PORT297_FN2, MSEL4CR_MSEL15_1),
+ PINMUX_DATA(MCP_D7_MCP_NAF7_MARK, PORT298_FN1), \
+ PINMUX_DATA(MMCD1_7_MARK, PORT298_FN2, MSEL4CR_MSEL15_1),
+
+ PINMUX_DATA(MCP_D6_MCP_NAF6_MARK, PORT299_FN1), \
+ PINMUX_DATA(MMCD1_6_MARK, PORT299_FN2, MSEL4CR_MSEL15_1),
+ PINMUX_DATA(MCP_D5_MCP_NAF5_MARK, PORT300_FN1), \
+ PINMUX_DATA(MMCD1_5_MARK, PORT300_FN2, MSEL4CR_MSEL15_1),
+ PINMUX_DATA(MCP_D4_MCP_NAF4_MARK, PORT301_FN1), \
+ PINMUX_DATA(MMCD1_4_MARK, PORT301_FN2, MSEL4CR_MSEL15_1),
+ PINMUX_DATA(MCP_D3_MCP_NAF3_MARK, PORT302_FN1), \
+ PINMUX_DATA(MMCD1_3_MARK, PORT302_FN2, MSEL4CR_MSEL15_1),
+ PINMUX_DATA(MCP_D2_MCP_NAF2_MARK, PORT303_FN1), \
+ PINMUX_DATA(MMCD1_2_MARK, PORT303_FN2, MSEL4CR_MSEL15_1),
+ PINMUX_DATA(MCP_D1_MCP_NAF1_MARK, PORT304_FN1), \
+ PINMUX_DATA(MMCD1_1_MARK, PORT304_FN2, MSEL4CR_MSEL15_1),
+ PINMUX_DATA(MCP_D0_MCP_NAF0_MARK, PORT305_FN1), \
+ PINMUX_DATA(MMCD1_0_MARK, PORT305_FN2, MSEL4CR_MSEL15_1),
+ PINMUX_DATA(MCP_NBRSTOUT__MARK, PORT306_FN1),
+ PINMUX_DATA(MCP_WE0__MCP_FWE_MARK, PORT309_FN1), \
+ PINMUX_DATA(MCP_RDWR_MCP_FWE_MARK, PORT309_FN2),
+
+ /* MSEL2 special cases */
+ PINMUX_DATA(TSIF2_TS_XX1_MARK, MSEL2CR_MSEL14_0, MSEL2CR_MSEL13_0,
+ MSEL2CR_MSEL12_0),
+ PINMUX_DATA(TSIF2_TS_XX2_MARK, MSEL2CR_MSEL14_0, MSEL2CR_MSEL13_0,
+ MSEL2CR_MSEL12_1),
+ PINMUX_DATA(TSIF2_TS_XX3_MARK, MSEL2CR_MSEL14_0, MSEL2CR_MSEL13_1,
+ MSEL2CR_MSEL12_0),
+ PINMUX_DATA(TSIF2_TS_XX4_MARK, MSEL2CR_MSEL14_0, MSEL2CR_MSEL13_1,
+ MSEL2CR_MSEL12_1),
+ PINMUX_DATA(TSIF2_TS_XX5_MARK, MSEL2CR_MSEL14_1, MSEL2CR_MSEL13_0,
+ MSEL2CR_MSEL12_0),
+ PINMUX_DATA(TSIF1_TS_XX1_MARK, MSEL2CR_MSEL11_0, MSEL2CR_MSEL10_0,
+ MSEL2CR_MSEL9_0),
+ PINMUX_DATA(TSIF1_TS_XX2_MARK, MSEL2CR_MSEL11_0, MSEL2CR_MSEL10_0,
+ MSEL2CR_MSEL9_1),
+ PINMUX_DATA(TSIF1_TS_XX3_MARK, MSEL2CR_MSEL11_0, MSEL2CR_MSEL10_1,
+ MSEL2CR_MSEL9_0),
+ PINMUX_DATA(TSIF1_TS_XX4_MARK, MSEL2CR_MSEL11_0, MSEL2CR_MSEL10_1,
+ MSEL2CR_MSEL9_1),
+ PINMUX_DATA(TSIF1_TS_XX5_MARK, MSEL2CR_MSEL11_1, MSEL2CR_MSEL10_0,
+ MSEL2CR_MSEL9_0),
+ PINMUX_DATA(TSIF0_TS_XX1_MARK, MSEL2CR_MSEL8_0, MSEL2CR_MSEL7_0,
+ MSEL2CR_MSEL6_0),
+ PINMUX_DATA(TSIF0_TS_XX2_MARK, MSEL2CR_MSEL8_0, MSEL2CR_MSEL7_0,
+ MSEL2CR_MSEL6_1),
+ PINMUX_DATA(TSIF0_TS_XX3_MARK, MSEL2CR_MSEL8_0, MSEL2CR_MSEL7_1,
+ MSEL2CR_MSEL6_0),
+ PINMUX_DATA(TSIF0_TS_XX4_MARK, MSEL2CR_MSEL8_0, MSEL2CR_MSEL7_1,
+ MSEL2CR_MSEL6_1),
+ PINMUX_DATA(TSIF0_TS_XX5_MARK, MSEL2CR_MSEL8_1, MSEL2CR_MSEL7_0,
+ MSEL2CR_MSEL6_0),
+ PINMUX_DATA(MST1_TS_XX1_MARK, MSEL2CR_MSEL5_0, MSEL2CR_MSEL4_0,
+ MSEL2CR_MSEL3_0),
+ PINMUX_DATA(MST1_TS_XX2_MARK, MSEL2CR_MSEL5_0, MSEL2CR_MSEL4_0,
+ MSEL2CR_MSEL3_1),
+ PINMUX_DATA(MST1_TS_XX3_MARK, MSEL2CR_MSEL5_0, MSEL2CR_MSEL4_1,
+ MSEL2CR_MSEL3_0),
+ PINMUX_DATA(MST1_TS_XX4_MARK, MSEL2CR_MSEL5_0, MSEL2CR_MSEL4_1,
+ MSEL2CR_MSEL3_1),
+ PINMUX_DATA(MST1_TS_XX5_MARK, MSEL2CR_MSEL5_1, MSEL2CR_MSEL4_0,
+ MSEL2CR_MSEL3_0),
+ PINMUX_DATA(MST0_TS_XX1_MARK, MSEL2CR_MSEL2_0, MSEL2CR_MSEL1_0,
+ MSEL2CR_MSEL0_0),
+ PINMUX_DATA(MST0_TS_XX2_MARK, MSEL2CR_MSEL2_0, MSEL2CR_MSEL1_0,
+ MSEL2CR_MSEL0_1),
+ PINMUX_DATA(MST0_TS_XX3_MARK, MSEL2CR_MSEL2_0, MSEL2CR_MSEL1_1,
+ MSEL2CR_MSEL0_0),
+ PINMUX_DATA(MST0_TS_XX4_MARK, MSEL2CR_MSEL2_0, MSEL2CR_MSEL1_1,
+ MSEL2CR_MSEL0_1),
+ PINMUX_DATA(MST0_TS_XX5_MARK, MSEL2CR_MSEL2_1, MSEL2CR_MSEL1_0,
+ MSEL2CR_MSEL0_0),
+
+ /* MSEL3 special cases */
+ PINMUX_DATA(SDHI0_VCCQ_MC0_ON_MARK, MSEL3CR_MSEL28_1),
+ PINMUX_DATA(SDHI0_VCCQ_MC0_OFF_MARK, MSEL3CR_MSEL28_0),
+ PINMUX_DATA(DEBUG_MON_VIO_MARK, MSEL3CR_MSEL15_0),
+ PINMUX_DATA(DEBUG_MON_LCDD_MARK, MSEL3CR_MSEL15_1),
+ PINMUX_DATA(LCDC_LCDC0_MARK, MSEL3CR_MSEL6_0),
+ PINMUX_DATA(LCDC_LCDC1_MARK, MSEL3CR_MSEL6_1),
+
+ /* MSEL4 special cases */
+ PINMUX_DATA(IRQ9_MEM_INT_MARK, MSEL4CR_MSEL29_0),
+ PINMUX_DATA(IRQ9_MCP_INT_MARK, MSEL4CR_MSEL29_1),
+ PINMUX_DATA(A11_MARK, MSEL4CR_MSEL13_0, MSEL4CR_MSEL12_0),
+ PINMUX_DATA(KEYOUT8_MARK, MSEL4CR_MSEL13_0, MSEL4CR_MSEL12_1),
+ PINMUX_DATA(TPU4TO3_MARK, MSEL4CR_MSEL13_1, MSEL4CR_MSEL12_0),
+ PINMUX_DATA(RESETA_N_PU_ON_MARK, MSEL4CR_MSEL4_0),
+ PINMUX_DATA(RESETA_N_PU_OFF_MARK, MSEL4CR_MSEL4_1),
+ PINMUX_DATA(EDBGREQ_PD_MARK, MSEL4CR_MSEL1_0),
+ PINMUX_DATA(EDBGREQ_PU_MARK, MSEL4CR_MSEL1_1),
+
+ /* Functions with pull-ups */
+ PINMUX_DATA(KEYIN0_PU_MARK, PORT66_FN2, PORT66_IN_PU),
+ PINMUX_DATA(KEYIN1_PU_MARK, PORT67_FN2, PORT67_IN_PU),
+ PINMUX_DATA(KEYIN2_PU_MARK, PORT68_FN2, PORT68_IN_PU),
+ PINMUX_DATA(KEYIN3_PU_MARK, PORT69_FN2, PORT69_IN_PU),
+ PINMUX_DATA(KEYIN4_PU_MARK, PORT70_FN2, PORT70_IN_PU),
+ PINMUX_DATA(KEYIN5_PU_MARK, PORT71_FN2, PORT71_IN_PU),
+ PINMUX_DATA(KEYIN6_PU_MARK, PORT72_FN2, PORT72_IN_PU),
+ PINMUX_DATA(KEYIN7_PU_MARK, PORT73_FN2, PORT73_IN_PU),
+
+ PINMUX_DATA(SDHICD0_PU_MARK, PORT251_FN1, PORT251_IN_PU),
+ PINMUX_DATA(SDHID0_0_PU_MARK, PORT252_FN1, PORT252_IN_PU),
+ PINMUX_DATA(SDHID0_1_PU_MARK, PORT253_FN1, PORT253_IN_PU),
+ PINMUX_DATA(SDHID0_2_PU_MARK, PORT254_FN1, PORT254_IN_PU),
+ PINMUX_DATA(SDHID0_3_PU_MARK, PORT255_FN1, PORT255_IN_PU),
+ PINMUX_DATA(SDHICMD0_PU_MARK, PORT256_FN1, PORT256_IN_PU),
+ PINMUX_DATA(SDHIWP0_PU_MARK, PORT257_FN1, PORT256_IN_PU),
+ PINMUX_DATA(SDHID1_0_PU_MARK, PORT259_FN1, PORT259_IN_PU),
+ PINMUX_DATA(SDHID1_1_PU_MARK, PORT260_FN1, PORT260_IN_PU),
+ PINMUX_DATA(SDHID1_2_PU_MARK, PORT261_FN1, PORT261_IN_PU),
+ PINMUX_DATA(SDHID1_3_PU_MARK, PORT262_FN1, PORT262_IN_PU),
+ PINMUX_DATA(SDHICMD1_PU_MARK, PORT263_FN1, PORT263_IN_PU),
+ PINMUX_DATA(SDHID2_0_PU_MARK, PORT265_FN1, PORT265_IN_PU),
+ PINMUX_DATA(SDHID2_1_PU_MARK, PORT266_FN1, PORT266_IN_PU),
+ PINMUX_DATA(SDHID2_2_PU_MARK, PORT267_FN1, PORT267_IN_PU),
+ PINMUX_DATA(SDHID2_3_PU_MARK, PORT268_FN1, PORT268_IN_PU),
+ PINMUX_DATA(SDHICMD2_PU_MARK, PORT269_FN1, PORT269_IN_PU),
+
+ PINMUX_DATA(MMCCMD0_PU_MARK, PORT279_FN1, PORT279_IN_PU,
+ MSEL4CR_MSEL15_0),
+ PINMUX_DATA(MMCCMD1_PU_MARK, PORT297_FN2, PORT297_IN_PU,
+ MSEL4CR_MSEL15_1),
+
+ PINMUX_DATA(MMCD0_0_PU_MARK,
+ PORT271_FN1, PORT271_IN_PU, MSEL4CR_MSEL15_0),
+ PINMUX_DATA(MMCD0_1_PU_MARK,
+ PORT272_FN1, PORT272_IN_PU, MSEL4CR_MSEL15_0),
+ PINMUX_DATA(MMCD0_2_PU_MARK,
+ PORT273_FN1, PORT273_IN_PU, MSEL4CR_MSEL15_0),
+ PINMUX_DATA(MMCD0_3_PU_MARK,
+ PORT274_FN1, PORT274_IN_PU, MSEL4CR_MSEL15_0),
+ PINMUX_DATA(MMCD0_4_PU_MARK,
+ PORT275_FN1, PORT275_IN_PU, MSEL4CR_MSEL15_0),
+ PINMUX_DATA(MMCD0_5_PU_MARK,
+ PORT276_FN1, PORT276_IN_PU, MSEL4CR_MSEL15_0),
+ PINMUX_DATA(MMCD0_6_PU_MARK,
+ PORT277_FN1, PORT277_IN_PU, MSEL4CR_MSEL15_0),
+ PINMUX_DATA(MMCD0_7_PU_MARK,
+ PORT278_FN1, PORT278_IN_PU, MSEL4CR_MSEL15_0),
+
+ PINMUX_DATA(FSIBISLD_PU_MARK, PORT39_FN1, PORT39_IN_PU),
+ PINMUX_DATA(FSIACK_PU_MARK, PORT49_FN1, PORT49_IN_PU),
+ PINMUX_DATA(FSIAILR_PU_MARK, PORT50_FN5, PORT50_IN_PU),
+ PINMUX_DATA(FSIAIBT_PU_MARK, PORT51_FN5, PORT51_IN_PU),
+ PINMUX_DATA(FSIAISLD_PU_MARK, PORT55_FN1, PORT55_IN_PU),
+};
+
+static struct pinmux_gpio pinmux_gpios[] = {
+ GPIO_PORT_ALL(),
+
+ /* Table 25-1 (Functions 0-7) */
+ GPIO_FN(VBUS_0),
+ GPIO_FN(GPI0),
+ GPIO_FN(GPI1),
+ GPIO_FN(GPI2),
+ GPIO_FN(GPI3),
+ GPIO_FN(GPI4),
+ GPIO_FN(GPI5),
+ GPIO_FN(GPI6),
+ GPIO_FN(GPI7),
+ GPIO_FN(SCIFA7_RXD),
+ GPIO_FN(SCIFA7_CTS_),
+ GPIO_FN(GPO7), \
+ GPIO_FN(MFG0_OUT2),
+ GPIO_FN(GPO6), \
+ GPIO_FN(MFG1_OUT2),
+ GPIO_FN(GPO5), \
+ GPIO_FN(SCIFA0_SCK), \
+ GPIO_FN(FSICOSLDT3), \
+ GPIO_FN(PORT16_VIO_CKOR),
+ GPIO_FN(SCIFA0_TXD),
+ GPIO_FN(SCIFA7_TXD),
+ GPIO_FN(SCIFA7_RTS_), \
+ GPIO_FN(PORT19_VIO_CKO2),
+ GPIO_FN(GPO0),
+ GPIO_FN(GPO1),
+ GPIO_FN(GPO2), \
+ GPIO_FN(STATUS0),
+ GPIO_FN(GPO3), \
+ GPIO_FN(STATUS1),
+ GPIO_FN(GPO4), \
+ GPIO_FN(STATUS2),
+ GPIO_FN(VINT),
+ GPIO_FN(TCKON),
+ GPIO_FN(XDVFS1), \
+ GPIO_FN(PORT27_I2C_SCL2), \
+ GPIO_FN(PORT27_I2C_SCL3), \
+ GPIO_FN(MFG0_OUT1), \
+ GPIO_FN(PORT27_IROUT),
+ GPIO_FN(XDVFS2), \
+ GPIO_FN(PORT28_I2C_SDA2), \
+ GPIO_FN(PORT28_I2C_SDA3), \
+ GPIO_FN(PORT28_TPU1TO1),
+ GPIO_FN(SIM_RST), \
+ GPIO_FN(PORT29_TPU1TO1),
+ GPIO_FN(SIM_CLK), \
+ GPIO_FN(PORT30_VIO_CKOR),
+ GPIO_FN(SIM_D), \
+ GPIO_FN(PORT31_IROUT),
+ GPIO_FN(SCIFA4_TXD),
+ GPIO_FN(SCIFA4_RXD), \
+ GPIO_FN(XWUP),
+ GPIO_FN(SCIFA4_RTS_),
+ GPIO_FN(SCIFA4_CTS_),
+ GPIO_FN(FSIBOBT), \
+ GPIO_FN(FSIBIBT),
+ GPIO_FN(FSIBOLR), \
+ GPIO_FN(FSIBILR),
+ GPIO_FN(FSIBOSLD),
+ GPIO_FN(FSIBISLD),
+ GPIO_FN(VACK),
+ GPIO_FN(XTAL1L),
+ GPIO_FN(SCIFA0_RTS_), \
+ GPIO_FN(FSICOSLDT2),
+ GPIO_FN(SCIFA0_RXD),
+ GPIO_FN(SCIFA0_CTS_), \
+ GPIO_FN(FSICOSLDT1),
+ GPIO_FN(FSICOBT), \
+ GPIO_FN(FSICIBT), \
+ GPIO_FN(FSIDOBT), \
+ GPIO_FN(FSIDIBT),
+ GPIO_FN(FSICOLR), \
+ GPIO_FN(FSICILR), \
+ GPIO_FN(FSIDOLR), \
+ GPIO_FN(FSIDILR),
+ GPIO_FN(FSICOSLD), \
+ GPIO_FN(PORT47_FSICSPDIF),
+ GPIO_FN(FSICISLD), \
+ GPIO_FN(FSIDISLD),
+ GPIO_FN(FSIACK), \
+ GPIO_FN(PORT49_IRDA_OUT), \
+ GPIO_FN(PORT49_IROUT), \
+ GPIO_FN(FSIAOMC),
+ GPIO_FN(FSIAOLR), \
+ GPIO_FN(BBIF2_TSYNC2), \
+ GPIO_FN(TPU2TO2), \
+ GPIO_FN(FSIAILR),
+
+ GPIO_FN(FSIAOBT), \
+ GPIO_FN(BBIF2_TSCK2), \
+ GPIO_FN(TPU2TO3), \
+ GPIO_FN(FSIAIBT),
+ GPIO_FN(FSIAOSLD), \
+ GPIO_FN(BBIF2_TXD2),
+ GPIO_FN(FSIASPDIF), \
+ GPIO_FN(PORT53_IRDA_IN), \
+ GPIO_FN(TPU3TO3), \
+ GPIO_FN(FSIBSPDIF), \
+ GPIO_FN(PORT53_FSICSPDIF),
+ GPIO_FN(FSIBCK), \
+ GPIO_FN(PORT54_IRDA_FIRSEL), \
+ GPIO_FN(TPU3TO2), \
+ GPIO_FN(FSIBOMC), \
+ GPIO_FN(FSICCK), \
+ GPIO_FN(FSICOMC),
+ GPIO_FN(FSIAISLD), \
+ GPIO_FN(TPU0TO0),
+ GPIO_FN(A0), \
+ GPIO_FN(BS_),
+ GPIO_FN(A12), \
+ GPIO_FN(PORT58_KEYOUT7), \
+ GPIO_FN(TPU4TO2),
+ GPIO_FN(A13), \
+ GPIO_FN(PORT59_KEYOUT6), \
+ GPIO_FN(TPU0TO1),
+ GPIO_FN(A14), \
+ GPIO_FN(KEYOUT5),
+ GPIO_FN(A15), \
+ GPIO_FN(KEYOUT4),
+ GPIO_FN(A16), \
+ GPIO_FN(KEYOUT3), \
+ GPIO_FN(MSIOF0_SS1),
+ GPIO_FN(A17), \
+ GPIO_FN(KEYOUT2), \
+ GPIO_FN(MSIOF0_TSYNC),
+ GPIO_FN(A18), \
+ GPIO_FN(KEYOUT1), \
+ GPIO_FN(MSIOF0_TSCK),
+ GPIO_FN(A19), \
+ GPIO_FN(KEYOUT0), \
+ GPIO_FN(MSIOF0_TXD),
+ GPIO_FN(A20), \
+ GPIO_FN(KEYIN0), \
+ GPIO_FN(MSIOF0_RSCK),
+ GPIO_FN(A21), \
+ GPIO_FN(KEYIN1), \
+ GPIO_FN(MSIOF0_RSYNC),
+ GPIO_FN(A22), \
+ GPIO_FN(KEYIN2), \
+ GPIO_FN(MSIOF0_MCK0),
+ GPIO_FN(A23), \
+ GPIO_FN(KEYIN3), \
+ GPIO_FN(MSIOF0_MCK1),
+ GPIO_FN(A24), \
+ GPIO_FN(KEYIN4), \
+ GPIO_FN(MSIOF0_RXD),
+ GPIO_FN(A25), \
+ GPIO_FN(KEYIN5), \
+ GPIO_FN(MSIOF0_SS2),
+ GPIO_FN(A26), \
+ GPIO_FN(KEYIN6),
+ GPIO_FN(KEYIN7),
+ GPIO_FN(D0_NAF0),
+ GPIO_FN(D1_NAF1),
+ GPIO_FN(D2_NAF2),
+ GPIO_FN(D3_NAF3),
+ GPIO_FN(D4_NAF4),
+ GPIO_FN(D5_NAF5),
+ GPIO_FN(D6_NAF6),
+ GPIO_FN(D7_NAF7),
+ GPIO_FN(D8_NAF8),
+ GPIO_FN(D9_NAF9),
+ GPIO_FN(D10_NAF10),
+ GPIO_FN(D11_NAF11),
+ GPIO_FN(D12_NAF12),
+ GPIO_FN(D13_NAF13),
+ GPIO_FN(D14_NAF14),
+ GPIO_FN(D15_NAF15),
+ GPIO_FN(CS4_),
+ GPIO_FN(CS5A_), \
+ GPIO_FN(PORT91_RDWR),
+ GPIO_FN(CS5B_), \
+ GPIO_FN(FCE1_),
+ GPIO_FN(CS6B_), \
+ GPIO_FN(DACK0),
+ GPIO_FN(FCE0_), \
+ GPIO_FN(CS6A_),
+ GPIO_FN(WAIT_), \
+ GPIO_FN(DREQ0),
+ GPIO_FN(RD__FSC),
+ GPIO_FN(WE0__FWE), \
+ GPIO_FN(RDWR_FWE),
+ GPIO_FN(WE1_),
+ GPIO_FN(FRB),
+ GPIO_FN(CKO),
+ GPIO_FN(NBRSTOUT_),
+ GPIO_FN(NBRST_),
+ GPIO_FN(BBIF2_TXD),
+ GPIO_FN(BBIF2_RXD),
+ GPIO_FN(BBIF2_SYNC),
+ GPIO_FN(BBIF2_SCK),
+ GPIO_FN(SCIFA3_CTS_), \
+ GPIO_FN(MFG3_IN2),
+ GPIO_FN(SCIFA3_RXD), \
+ GPIO_FN(MFG3_IN1),
+ GPIO_FN(BBIF1_SS2), \
+ GPIO_FN(SCIFA3_RTS_), \
+ GPIO_FN(MFG3_OUT1),
+ GPIO_FN(SCIFA3_TXD),
+ GPIO_FN(HSI_RX_DATA), \
+ GPIO_FN(BBIF1_RXD),
+ GPIO_FN(HSI_TX_WAKE), \
+ GPIO_FN(BBIF1_TSCK),
+ GPIO_FN(HSI_TX_DATA), \
+ GPIO_FN(BBIF1_TSYNC),
+ GPIO_FN(HSI_TX_READY), \
+ GPIO_FN(BBIF1_TXD),
+ GPIO_FN(HSI_RX_READY), \
+ GPIO_FN(BBIF1_RSCK), \
+ GPIO_FN(PORT115_I2C_SCL2), \
+ GPIO_FN(PORT115_I2C_SCL3),
+ GPIO_FN(HSI_RX_WAKE), \
+ GPIO_FN(BBIF1_RSYNC), \
+ GPIO_FN(PORT116_I2C_SDA2), \
+ GPIO_FN(PORT116_I2C_SDA3),
+ GPIO_FN(HSI_RX_FLAG), \
+ GPIO_FN(BBIF1_SS1), \
+ GPIO_FN(BBIF1_FLOW),
+ GPIO_FN(HSI_TX_FLAG),
+ GPIO_FN(VIO_VD), \
+ GPIO_FN(PORT128_LCD2VSYN), \
+ GPIO_FN(VIO2_VD), \
+ GPIO_FN(LCD2D0),
+
+ GPIO_FN(VIO_HD), \
+ GPIO_FN(PORT129_LCD2HSYN), \
+ GPIO_FN(PORT129_LCD2CS_), \
+ GPIO_FN(VIO2_HD), \
+ GPIO_FN(LCD2D1),
+ GPIO_FN(VIO_D0), \
+ GPIO_FN(PORT130_MSIOF2_RXD), \
+ GPIO_FN(LCD2D10),
+ GPIO_FN(VIO_D1), \
+ GPIO_FN(PORT131_KEYOUT6), \
+ GPIO_FN(PORT131_MSIOF2_SS1), \
+ GPIO_FN(PORT131_KEYOUT11), \
+ GPIO_FN(LCD2D11),
+ GPIO_FN(VIO_D2), \
+ GPIO_FN(PORT132_KEYOUT7), \
+ GPIO_FN(PORT132_MSIOF2_SS2), \
+ GPIO_FN(PORT132_KEYOUT10), \
+ GPIO_FN(LCD2D12),
+ GPIO_FN(VIO_D3), \
+ GPIO_FN(MSIOF2_TSYNC), \
+ GPIO_FN(LCD2D13),
+ GPIO_FN(VIO_D4), \
+ GPIO_FN(MSIOF2_TXD), \
+ GPIO_FN(LCD2D14),
+ GPIO_FN(VIO_D5), \
+ GPIO_FN(MSIOF2_TSCK), \
+ GPIO_FN(LCD2D15),
+ GPIO_FN(VIO_D6), \
+ GPIO_FN(PORT136_KEYOUT8), \
+ GPIO_FN(LCD2D16),
+ GPIO_FN(VIO_D7), \
+ GPIO_FN(PORT137_KEYOUT9), \
+ GPIO_FN(LCD2D17),
+ GPIO_FN(VIO_D8), \
+ GPIO_FN(PORT138_KEYOUT8), \
+ GPIO_FN(VIO2_D0), \
+ GPIO_FN(LCD2D6),
+ GPIO_FN(VIO_D9), \
+ GPIO_FN(PORT139_KEYOUT9), \
+ GPIO_FN(VIO2_D1), \
+ GPIO_FN(LCD2D7),
+ GPIO_FN(VIO_D10), \
+ GPIO_FN(TPU0TO2), \
+ GPIO_FN(VIO2_D2), \
+ GPIO_FN(LCD2D8),
+ GPIO_FN(VIO_D11), \
+ GPIO_FN(TPU0TO3), \
+ GPIO_FN(VIO2_D3), \
+ GPIO_FN(LCD2D9),
+ GPIO_FN(VIO_D12), \
+ GPIO_FN(PORT142_KEYOUT10), \
+ GPIO_FN(VIO2_D4), \
+ GPIO_FN(LCD2D2),
+ GPIO_FN(VIO_D13), \
+ GPIO_FN(PORT143_KEYOUT11), \
+ GPIO_FN(PORT143_KEYOUT6), \
+ GPIO_FN(VIO2_D5), \
+ GPIO_FN(LCD2D3),
+ GPIO_FN(VIO_D14), \
+ GPIO_FN(PORT144_KEYOUT7), \
+ GPIO_FN(VIO2_D6), \
+ GPIO_FN(LCD2D4),
+ GPIO_FN(VIO_D15), \
+ GPIO_FN(TPU1TO3), \
+ GPIO_FN(PORT145_LCD2DISP), \
+ GPIO_FN(PORT145_LCD2RS), \
+ GPIO_FN(VIO2_D7), \
+ GPIO_FN(LCD2D5),
+ GPIO_FN(VIO_CLK), \
+ GPIO_FN(LCD2DCK), \
+ GPIO_FN(PORT146_LCD2WR_), \
+ GPIO_FN(VIO2_CLK), \
+ GPIO_FN(LCD2D18),
+ GPIO_FN(VIO_FIELD), \
+ GPIO_FN(LCD2RD_), \
+ GPIO_FN(VIO2_FIELD), \
+ GPIO_FN(LCD2D19),
+ GPIO_FN(VIO_CKO),
+ GPIO_FN(A27), \
+ GPIO_FN(PORT149_RDWR), \
+ GPIO_FN(MFG0_IN1), \
+ GPIO_FN(PORT149_KEYOUT9),
+ GPIO_FN(MFG0_IN2),
+ GPIO_FN(TS_SPSYNC3), \
+ GPIO_FN(MSIOF2_RSCK),
+ GPIO_FN(TS_SDAT3), \
+ GPIO_FN(MSIOF2_RSYNC),
+ GPIO_FN(TPU1TO2), \
+ GPIO_FN(TS_SDEN3), \
+ GPIO_FN(PORT153_MSIOF2_SS1),
+ GPIO_FN(SCIFA2_TXD1), \
+ GPIO_FN(MSIOF2_MCK0),
+ GPIO_FN(SCIFA2_RXD1), \
+ GPIO_FN(MSIOF2_MCK1),
+ GPIO_FN(SCIFA2_RTS1_), \
+ GPIO_FN(PORT156_MSIOF2_SS2),
+ GPIO_FN(SCIFA2_CTS1_), \
+ GPIO_FN(PORT157_MSIOF2_RXD),
+ GPIO_FN(DINT_), \
+ GPIO_FN(SCIFA2_SCK1), \
+ GPIO_FN(TS_SCK3),
+ GPIO_FN(PORT159_SCIFB_SCK), \
+ GPIO_FN(PORT159_SCIFA5_SCK), \
+ GPIO_FN(NMI),
+ GPIO_FN(PORT160_SCIFB_TXD), \
+ GPIO_FN(PORT160_SCIFA5_TXD),
+ GPIO_FN(PORT161_SCIFB_CTS_), \
+ GPIO_FN(PORT161_SCIFA5_CTS_),
+ GPIO_FN(PORT162_SCIFB_RXD), \
+ GPIO_FN(PORT162_SCIFA5_RXD),
+ GPIO_FN(PORT163_SCIFB_RTS_), \
+ GPIO_FN(PORT163_SCIFA5_RTS_), \
+ GPIO_FN(TPU3TO0),
+ GPIO_FN(LCDD0),
+ GPIO_FN(LCDD1), \
+ GPIO_FN(PORT193_SCIFA5_CTS_), \
+ GPIO_FN(BBIF2_TSYNC1),
+ GPIO_FN(LCDD2), \
+ GPIO_FN(PORT194_SCIFA5_RTS_), \
+ GPIO_FN(BBIF2_TSCK1),
+ GPIO_FN(LCDD3), \
+ GPIO_FN(PORT195_SCIFA5_RXD), \
+ GPIO_FN(BBIF2_TXD1),
+ GPIO_FN(LCDD4), \
+ GPIO_FN(PORT196_SCIFA5_TXD),
+ GPIO_FN(LCDD5), \
+ GPIO_FN(PORT197_SCIFA5_SCK), \
+ GPIO_FN(MFG2_OUT2), \
+ GPIO_FN(TPU2TO1),
+ GPIO_FN(LCDD6),
+ GPIO_FN(LCDD7), \
+ GPIO_FN(TPU4TO1), \
+ GPIO_FN(MFG4_OUT2),
+ GPIO_FN(LCDD8), \
+ GPIO_FN(D16),
+ GPIO_FN(LCDD9), \
+ GPIO_FN(D17),
+ GPIO_FN(LCDD10), \
+ GPIO_FN(D18),
+ GPIO_FN(LCDD11), \
+ GPIO_FN(D19),
+ GPIO_FN(LCDD12), \
+ GPIO_FN(D20),
+ GPIO_FN(LCDD13), \
+ GPIO_FN(D21),
+ GPIO_FN(LCDD14), \
+ GPIO_FN(D22),
+ GPIO_FN(LCDD15), \
+ GPIO_FN(PORT207_MSIOF0L_SS1), \
+ GPIO_FN(D23),
+ GPIO_FN(LCDD16), \
+ GPIO_FN(PORT208_MSIOF0L_SS2), \
+ GPIO_FN(D24),
+ GPIO_FN(LCDD17), \
+ GPIO_FN(D25),
+ GPIO_FN(LCDD18), \
+ GPIO_FN(DREQ2), \
+ GPIO_FN(PORT210_MSIOF0L_SS1), \
+ GPIO_FN(D26),
+ GPIO_FN(LCDD19), \
+ GPIO_FN(PORT211_MSIOF0L_SS2), \
+ GPIO_FN(D27),
+ GPIO_FN(LCDD20), \
+ GPIO_FN(TS_SPSYNC1), \
+ GPIO_FN(MSIOF0L_MCK0), \
+ GPIO_FN(D28),
+ GPIO_FN(LCDD21), \
+ GPIO_FN(TS_SDAT1), \
+ GPIO_FN(MSIOF0L_MCK1), \
+ GPIO_FN(D29),
+ GPIO_FN(LCDD22), \
+ GPIO_FN(TS_SDEN1), \
+ GPIO_FN(MSIOF0L_RSCK), \
+ GPIO_FN(D30),
+ GPIO_FN(LCDD23), \
+ GPIO_FN(TS_SCK1), \
+ GPIO_FN(MSIOF0L_RSYNC), \
+ GPIO_FN(D31),
+ GPIO_FN(LCDDCK), \
+ GPIO_FN(LCDWR_),
+ GPIO_FN(LCDRD_), \
+ GPIO_FN(DACK2), \
+ GPIO_FN(PORT217_LCD2RS), \
+ GPIO_FN(MSIOF0L_TSYNC), \
+ GPIO_FN(VIO2_FIELD3), \
+ GPIO_FN(PORT217_LCD2DISP),
+ GPIO_FN(LCDHSYN), \
+ GPIO_FN(LCDCS_), \
+ GPIO_FN(LCDCS2_), \
+ GPIO_FN(DACK3), \
+ GPIO_FN(PORT218_VIO_CKOR),
+ GPIO_FN(LCDDISP), \
+ GPIO_FN(LCDRS), \
+ GPIO_FN(PORT219_LCD2WR_), \
+ GPIO_FN(DREQ3), \
+ GPIO_FN(MSIOF0L_TSCK), \
+ GPIO_FN(VIO2_CLK3), \
+ GPIO_FN(LCD2DCK_2),
+ GPIO_FN(LCDVSYN), \
+ GPIO_FN(LCDVSYN2),
+ GPIO_FN(LCDLCLK), \
+ GPIO_FN(DREQ1), \
+ GPIO_FN(PORT221_LCD2CS_), \
+ GPIO_FN(PWEN), \
+ GPIO_FN(MSIOF0L_RXD), \
+ GPIO_FN(VIO2_HD3), \
+ GPIO_FN(PORT221_LCD2HSYN),
+ GPIO_FN(LCDDON), \
+ GPIO_FN(LCDDON2), \
+ GPIO_FN(DACK1), \
+ GPIO_FN(OVCN), \
+ GPIO_FN(MSIOF0L_TXD), \
+ GPIO_FN(VIO2_VD3), \
+ GPIO_FN(PORT222_LCD2VSYN),
+
+ GPIO_FN(SCIFA1_TXD), \
+ GPIO_FN(OVCN2),
+ GPIO_FN(EXTLP), \
+ GPIO_FN(SCIFA1_SCK), \
+ GPIO_FN(PORT226_VIO_CKO2),
+ GPIO_FN(SCIFA1_RTS_), \
+ GPIO_FN(IDIN),
+ GPIO_FN(SCIFA1_RXD),
+ GPIO_FN(SCIFA1_CTS_), \
+ GPIO_FN(MFG1_IN1),
+ GPIO_FN(MSIOF1_TXD), \
+ GPIO_FN(SCIFA2_TXD2),
+ GPIO_FN(MSIOF1_TSYNC), \
+ GPIO_FN(SCIFA2_CTS2_),
+ GPIO_FN(MSIOF1_TSCK), \
+ GPIO_FN(SCIFA2_SCK2),
+ GPIO_FN(MSIOF1_RXD), \
+ GPIO_FN(SCIFA2_RXD2),
+ GPIO_FN(MSIOF1_RSCK), \
+ GPIO_FN(SCIFA2_RTS2_), \
+ GPIO_FN(VIO2_CLK2), \
+ GPIO_FN(LCD2D20),
+ GPIO_FN(MSIOF1_RSYNC), \
+ GPIO_FN(MFG1_IN2), \
+ GPIO_FN(VIO2_VD2), \
+ GPIO_FN(LCD2D21),
+ GPIO_FN(MSIOF1_MCK0), \
+ GPIO_FN(PORT236_I2C_SDA2),
+ GPIO_FN(MSIOF1_MCK1), \
+ GPIO_FN(PORT237_I2C_SCL2),
+ GPIO_FN(MSIOF1_SS1), \
+ GPIO_FN(VIO2_FIELD2), \
+ GPIO_FN(LCD2D22),
+ GPIO_FN(MSIOF1_SS2), \
+ GPIO_FN(VIO2_HD2), \
+ GPIO_FN(LCD2D23),
+ GPIO_FN(SCIFA6_TXD),
+ GPIO_FN(PORT241_IRDA_OUT), \
+ GPIO_FN(PORT241_IROUT), \
+ GPIO_FN(MFG4_OUT1), \
+ GPIO_FN(TPU4TO0),
+ GPIO_FN(PORT242_IRDA_IN), \
+ GPIO_FN(MFG4_IN2),
+ GPIO_FN(PORT243_IRDA_FIRSEL), \
+ GPIO_FN(PORT243_VIO_CKO2),
+ GPIO_FN(PORT244_SCIFA5_CTS_), \
+ GPIO_FN(MFG2_IN1), \
+ GPIO_FN(PORT244_SCIFB_CTS_), \
+ GPIO_FN(MSIOF2R_RXD),
+ GPIO_FN(PORT245_SCIFA5_RTS_), \
+ GPIO_FN(MFG2_IN2), \
+ GPIO_FN(PORT245_SCIFB_RTS_), \
+ GPIO_FN(MSIOF2R_TXD),
+ GPIO_FN(PORT246_SCIFA5_RXD), \
+ GPIO_FN(MFG1_OUT1), \
+ GPIO_FN(PORT246_SCIFB_RXD), \
+ GPIO_FN(TPU1TO0),
+ GPIO_FN(PORT247_SCIFA5_TXD), \
+ GPIO_FN(MFG3_OUT2), \
+ GPIO_FN(PORT247_SCIFB_TXD), \
+ GPIO_FN(TPU3TO1),
+ GPIO_FN(PORT248_SCIFA5_SCK), \
+ GPIO_FN(MFG2_OUT1), \
+ GPIO_FN(PORT248_SCIFB_SCK), \
+ GPIO_FN(TPU2TO0), \
+ GPIO_FN(PORT248_I2C_SCL3), \
+ GPIO_FN(MSIOF2R_TSCK),
+ GPIO_FN(PORT249_IROUT), \
+ GPIO_FN(MFG4_IN1), \
+ GPIO_FN(PORT249_I2C_SDA3), \
+ GPIO_FN(MSIOF2R_TSYNC),
+ GPIO_FN(SDHICLK0),
+ GPIO_FN(SDHICD0),
+ GPIO_FN(SDHID0_0),
+ GPIO_FN(SDHID0_1),
+ GPIO_FN(SDHID0_2),
+ GPIO_FN(SDHID0_3),
+ GPIO_FN(SDHICMD0),
+ GPIO_FN(SDHIWP0),
+ GPIO_FN(SDHICLK1),
+ GPIO_FN(SDHID1_0), \
+ GPIO_FN(TS_SPSYNC2),
+ GPIO_FN(SDHID1_1), \
+ GPIO_FN(TS_SDAT2),
+ GPIO_FN(SDHID1_2), \
+ GPIO_FN(TS_SDEN2),
+ GPIO_FN(SDHID1_3), \
+ GPIO_FN(TS_SCK2),
+ GPIO_FN(SDHICMD1),
+ GPIO_FN(SDHICLK2),
+ GPIO_FN(SDHID2_0), \
+ GPIO_FN(TS_SPSYNC4),
+ GPIO_FN(SDHID2_1), \
+ GPIO_FN(TS_SDAT4),
+ GPIO_FN(SDHID2_2), \
+ GPIO_FN(TS_SDEN4),
+ GPIO_FN(SDHID2_3), \
+ GPIO_FN(TS_SCK4),
+ GPIO_FN(SDHICMD2),
+ GPIO_FN(MMCCLK0),
+ GPIO_FN(MMCD0_0),
+ GPIO_FN(MMCD0_1),
+ GPIO_FN(MMCD0_2),
+ GPIO_FN(MMCD0_3),
+ GPIO_FN(MMCD0_4), \
+ GPIO_FN(TS_SPSYNC5),
+ GPIO_FN(MMCD0_5), \
+ GPIO_FN(TS_SDAT5),
+ GPIO_FN(MMCD0_6), \
+ GPIO_FN(TS_SDEN5),
+ GPIO_FN(MMCD0_7), \
+ GPIO_FN(TS_SCK5),
+ GPIO_FN(MMCCMD0),
+ GPIO_FN(RESETOUTS_), \
+ GPIO_FN(EXTAL2OUT),
+ GPIO_FN(MCP_WAIT__MCP_FRB),
+ GPIO_FN(MCP_CKO), \
+ GPIO_FN(MMCCLK1),
+ GPIO_FN(MCP_D15_MCP_NAF15),
+ GPIO_FN(MCP_D14_MCP_NAF14),
+ GPIO_FN(MCP_D13_MCP_NAF13),
+ GPIO_FN(MCP_D12_MCP_NAF12),
+ GPIO_FN(MCP_D11_MCP_NAF11),
+ GPIO_FN(MCP_D10_MCP_NAF10),
+ GPIO_FN(MCP_D9_MCP_NAF9),
+ GPIO_FN(MCP_D8_MCP_NAF8), \
+ GPIO_FN(MMCCMD1),
+ GPIO_FN(MCP_D7_MCP_NAF7), \
+ GPIO_FN(MMCD1_7),
+
+ GPIO_FN(MCP_D6_MCP_NAF6), \
+ GPIO_FN(MMCD1_6),
+ GPIO_FN(MCP_D5_MCP_NAF5), \
+ GPIO_FN(MMCD1_5),
+ GPIO_FN(MCP_D4_MCP_NAF4), \
+ GPIO_FN(MMCD1_4),
+ GPIO_FN(MCP_D3_MCP_NAF3), \
+ GPIO_FN(MMCD1_3),
+ GPIO_FN(MCP_D2_MCP_NAF2), \
+ GPIO_FN(MMCD1_2),
+ GPIO_FN(MCP_D1_MCP_NAF1), \
+ GPIO_FN(MMCD1_1),
+ GPIO_FN(MCP_D0_MCP_NAF0), \
+ GPIO_FN(MMCD1_0),
+ GPIO_FN(MCP_NBRSTOUT_),
+ GPIO_FN(MCP_WE0__MCP_FWE), \
+ GPIO_FN(MCP_RDWR_MCP_FWE),
+
+ /* MSEL2 special cases */
+ GPIO_FN(TSIF2_TS_XX1),
+ GPIO_FN(TSIF2_TS_XX2),
+ GPIO_FN(TSIF2_TS_XX3),
+ GPIO_FN(TSIF2_TS_XX4),
+ GPIO_FN(TSIF2_TS_XX5),
+ GPIO_FN(TSIF1_TS_XX1),
+ GPIO_FN(TSIF1_TS_XX2),
+ GPIO_FN(TSIF1_TS_XX3),
+ GPIO_FN(TSIF1_TS_XX4),
+ GPIO_FN(TSIF1_TS_XX5),
+ GPIO_FN(TSIF0_TS_XX1),
+ GPIO_FN(TSIF0_TS_XX2),
+ GPIO_FN(TSIF0_TS_XX3),
+ GPIO_FN(TSIF0_TS_XX4),
+ GPIO_FN(TSIF0_TS_XX5),
+ GPIO_FN(MST1_TS_XX1),
+ GPIO_FN(MST1_TS_XX2),
+ GPIO_FN(MST1_TS_XX3),
+ GPIO_FN(MST1_TS_XX4),
+ GPIO_FN(MST1_TS_XX5),
+ GPIO_FN(MST0_TS_XX1),
+ GPIO_FN(MST0_TS_XX2),
+ GPIO_FN(MST0_TS_XX3),
+ GPIO_FN(MST0_TS_XX4),
+ GPIO_FN(MST0_TS_XX5),
+
+ /* MSEL3 special cases */
+ GPIO_FN(SDHI0_VCCQ_MC0_ON),
+ GPIO_FN(SDHI0_VCCQ_MC0_OFF),
+ GPIO_FN(DEBUG_MON_VIO),
+ GPIO_FN(DEBUG_MON_LCDD),
+ GPIO_FN(LCDC_LCDC0),
+ GPIO_FN(LCDC_LCDC1),
+
+ /* MSEL4 special cases */
+ GPIO_FN(IRQ9_MEM_INT),
+ GPIO_FN(IRQ9_MCP_INT),
+ GPIO_FN(A11),
+ GPIO_FN(KEYOUT8),
+ GPIO_FN(TPU4TO3),
+ GPIO_FN(RESETA_N_PU_ON),
+ GPIO_FN(RESETA_N_PU_OFF),
+ GPIO_FN(EDBGREQ_PD),
+ GPIO_FN(EDBGREQ_PU),
+
+ /* Functions with pull-ups */
+ GPIO_FN(KEYIN0_PU),
+ GPIO_FN(KEYIN1_PU),
+ GPIO_FN(KEYIN2_PU),
+ GPIO_FN(KEYIN3_PU),
+ GPIO_FN(KEYIN4_PU),
+ GPIO_FN(KEYIN5_PU),
+ GPIO_FN(KEYIN6_PU),
+ GPIO_FN(KEYIN7_PU),
+ GPIO_FN(SDHICD0_PU),
+ GPIO_FN(SDHID0_0_PU),
+ GPIO_FN(SDHID0_1_PU),
+ GPIO_FN(SDHID0_2_PU),
+ GPIO_FN(SDHID0_3_PU),
+ GPIO_FN(SDHICMD0_PU),
+ GPIO_FN(SDHIWP0_PU),
+ GPIO_FN(SDHID1_0_PU),
+ GPIO_FN(SDHID1_1_PU),
+ GPIO_FN(SDHID1_2_PU),
+ GPIO_FN(SDHID1_3_PU),
+ GPIO_FN(SDHICMD1_PU),
+ GPIO_FN(SDHID2_0_PU),
+ GPIO_FN(SDHID2_1_PU),
+ GPIO_FN(SDHID2_2_PU),
+ GPIO_FN(SDHID2_3_PU),
+ GPIO_FN(SDHICMD2_PU),
+ GPIO_FN(MMCCMD0_PU),
+ GPIO_FN(MMCCMD1_PU),
+ GPIO_FN(MMCD0_0_PU),
+ GPIO_FN(MMCD0_1_PU),
+ GPIO_FN(MMCD0_2_PU),
+ GPIO_FN(MMCD0_3_PU),
+ GPIO_FN(MMCD0_4_PU),
+ GPIO_FN(MMCD0_5_PU),
+ GPIO_FN(MMCD0_6_PU),
+ GPIO_FN(MMCD0_7_PU),
+ GPIO_FN(FSIACK_PU),
+ GPIO_FN(FSIAILR_PU),
+ GPIO_FN(FSIAIBT_PU),
+ GPIO_FN(FSIAISLD_PU),
+};
+
+static struct pinmux_cfg_reg pinmux_config_regs[] = {
+ PORTCR(0, 0xe6050000), /* PORT0CR */
+ PORTCR(1, 0xe6050001), /* PORT1CR */
+ PORTCR(2, 0xe6050002), /* PORT2CR */
+ PORTCR(3, 0xe6050003), /* PORT3CR */
+ PORTCR(4, 0xe6050004), /* PORT4CR */
+ PORTCR(5, 0xe6050005), /* PORT5CR */
+ PORTCR(6, 0xe6050006), /* PORT6CR */
+ PORTCR(7, 0xe6050007), /* PORT7CR */
+ PORTCR(8, 0xe6050008), /* PORT8CR */
+ PORTCR(9, 0xe6050009), /* PORT9CR */
+
+ PORTCR(10, 0xe605000a), /* PORT10CR */
+ PORTCR(11, 0xe605000b), /* PORT11CR */
+ PORTCR(12, 0xe605000c), /* PORT12CR */
+ PORTCR(13, 0xe605000d), /* PORT13CR */
+ PORTCR(14, 0xe605000e), /* PORT14CR */
+ PORTCR(15, 0xe605000f), /* PORT15CR */
+ PORTCR(16, 0xe6050010), /* PORT16CR */
+ PORTCR(17, 0xe6050011), /* PORT17CR */
+ PORTCR(18, 0xe6050012), /* PORT18CR */
+ PORTCR(19, 0xe6050013), /* PORT19CR */
+
+ PORTCR(20, 0xe6050014), /* PORT20CR */
+ PORTCR(21, 0xe6050015), /* PORT21CR */
+ PORTCR(22, 0xe6050016), /* PORT22CR */
+ PORTCR(23, 0xe6050017), /* PORT23CR */
+ PORTCR(24, 0xe6050018), /* PORT24CR */
+ PORTCR(25, 0xe6050019), /* PORT25CR */
+ PORTCR(26, 0xe605001a), /* PORT26CR */
+ PORTCR(27, 0xe605001b), /* PORT27CR */
+ PORTCR(28, 0xe605001c), /* PORT28CR */
+ PORTCR(29, 0xe605001d), /* PORT29CR */
+
+ PORTCR(30, 0xe605001e), /* PORT30CR */
+ PORTCR(31, 0xe605001f), /* PORT31CR */
+ PORTCR(32, 0xe6051020), /* PORT32CR */
+ PORTCR(33, 0xe6051021), /* PORT33CR */
+ PORTCR(34, 0xe6051022), /* PORT34CR */
+ PORTCR(35, 0xe6051023), /* PORT35CR */
+ PORTCR(36, 0xe6051024), /* PORT36CR */
+ PORTCR(37, 0xe6051025), /* PORT37CR */
+ PORTCR(38, 0xe6051026), /* PORT38CR */
+ PORTCR(39, 0xe6051027), /* PORT39CR */
+
+ PORTCR(40, 0xe6051028), /* PORT40CR */
+ PORTCR(41, 0xe6051029), /* PORT41CR */
+ PORTCR(42, 0xe605102a), /* PORT42CR */
+ PORTCR(43, 0xe605102b), /* PORT43CR */
+ PORTCR(44, 0xe605102c), /* PORT44CR */
+ PORTCR(45, 0xe605102d), /* PORT45CR */
+ PORTCR(46, 0xe605102e), /* PORT46CR */
+ PORTCR(47, 0xe605102f), /* PORT47CR */
+ PORTCR(48, 0xe6051030), /* PORT48CR */
+ PORTCR(49, 0xe6051031), /* PORT49CR */
+
+ PORTCR(50, 0xe6051032), /* PORT50CR */
+ PORTCR(51, 0xe6051033), /* PORT51CR */
+ PORTCR(52, 0xe6051034), /* PORT52CR */
+ PORTCR(53, 0xe6051035), /* PORT53CR */
+ PORTCR(54, 0xe6051036), /* PORT54CR */
+ PORTCR(55, 0xe6051037), /* PORT55CR */
+ PORTCR(56, 0xe6051038), /* PORT56CR */
+ PORTCR(57, 0xe6051039), /* PORT57CR */
+ PORTCR(58, 0xe605103a), /* PORT58CR */
+ PORTCR(59, 0xe605103b), /* PORT59CR */
+
+ PORTCR(60, 0xe605103c), /* PORT60CR */
+ PORTCR(61, 0xe605103d), /* PORT61CR */
+ PORTCR(62, 0xe605103e), /* PORT62CR */
+ PORTCR(63, 0xe605103f), /* PORT63CR */
+ PORTCR(64, 0xe6051040), /* PORT64CR */
+ PORTCR(65, 0xe6051041), /* PORT65CR */
+ PORTCR(66, 0xe6051042), /* PORT66CR */
+ PORTCR(67, 0xe6051043), /* PORT67CR */
+ PORTCR(68, 0xe6051044), /* PORT68CR */
+ PORTCR(69, 0xe6051045), /* PORT69CR */
+
+ PORTCR(70, 0xe6051046), /* PORT70CR */
+ PORTCR(71, 0xe6051047), /* PORT71CR */
+ PORTCR(72, 0xe6051048), /* PORT72CR */
+ PORTCR(73, 0xe6051049), /* PORT73CR */
+ PORTCR(74, 0xe605104a), /* PORT74CR */
+ PORTCR(75, 0xe605104b), /* PORT75CR */
+ PORTCR(76, 0xe605104c), /* PORT76CR */
+ PORTCR(77, 0xe605104d), /* PORT77CR */
+ PORTCR(78, 0xe605104e), /* PORT78CR */
+ PORTCR(79, 0xe605104f), /* PORT79CR */
+
+ PORTCR(80, 0xe6051050), /* PORT80CR */
+ PORTCR(81, 0xe6051051), /* PORT81CR */
+ PORTCR(82, 0xe6051052), /* PORT82CR */
+ PORTCR(83, 0xe6051053), /* PORT83CR */
+ PORTCR(84, 0xe6051054), /* PORT84CR */
+ PORTCR(85, 0xe6051055), /* PORT85CR */
+ PORTCR(86, 0xe6051056), /* PORT86CR */
+ PORTCR(87, 0xe6051057), /* PORT87CR */
+ PORTCR(88, 0xe6051058), /* PORT88CR */
+ PORTCR(89, 0xe6051059), /* PORT89CR */
+
+ PORTCR(90, 0xe605105a), /* PORT90CR */
+ PORTCR(91, 0xe605105b), /* PORT91CR */
+ PORTCR(92, 0xe605105c), /* PORT92CR */
+ PORTCR(93, 0xe605105d), /* PORT93CR */
+ PORTCR(94, 0xe605105e), /* PORT94CR */
+ PORTCR(95, 0xe605105f), /* PORT95CR */
+ PORTCR(96, 0xe6052060), /* PORT96CR */
+ PORTCR(97, 0xe6052061), /* PORT97CR */
+ PORTCR(98, 0xe6052062), /* PORT98CR */
+ PORTCR(99, 0xe6052063), /* PORT99CR */
+
+ PORTCR(100, 0xe6052064), /* PORT100CR */
+ PORTCR(101, 0xe6052065), /* PORT101CR */
+ PORTCR(102, 0xe6052066), /* PORT102CR */
+ PORTCR(103, 0xe6052067), /* PORT103CR */
+ PORTCR(104, 0xe6052068), /* PORT104CR */
+ PORTCR(105, 0xe6052069), /* PORT105CR */
+ PORTCR(106, 0xe605206a), /* PORT106CR */
+ PORTCR(107, 0xe605206b), /* PORT107CR */
+ PORTCR(108, 0xe605206c), /* PORT108CR */
+ PORTCR(109, 0xe605206d), /* PORT109CR */
+
+ PORTCR(110, 0xe605206e), /* PORT110CR */
+ PORTCR(111, 0xe605206f), /* PORT111CR */
+ PORTCR(112, 0xe6052070), /* PORT112CR */
+ PORTCR(113, 0xe6052071), /* PORT113CR */
+ PORTCR(114, 0xe6052072), /* PORT114CR */
+ PORTCR(115, 0xe6052073), /* PORT115CR */
+ PORTCR(116, 0xe6052074), /* PORT116CR */
+ PORTCR(117, 0xe6052075), /* PORT117CR */
+ PORTCR(118, 0xe6052076), /* PORT118CR */
+
+ PORTCR(128, 0xe6052080), /* PORT128CR */
+ PORTCR(129, 0xe6052081), /* PORT129CR */
+
+ PORTCR(130, 0xe6052082), /* PORT130CR */
+ PORTCR(131, 0xe6052083), /* PORT131CR */
+ PORTCR(132, 0xe6052084), /* PORT132CR */
+ PORTCR(133, 0xe6052085), /* PORT133CR */
+ PORTCR(134, 0xe6052086), /* PORT134CR */
+ PORTCR(135, 0xe6052087), /* PORT135CR */
+ PORTCR(136, 0xe6052088), /* PORT136CR */
+ PORTCR(137, 0xe6052089), /* PORT137CR */
+ PORTCR(138, 0xe605208a), /* PORT138CR */
+ PORTCR(139, 0xe605208b), /* PORT139CR */
+
+ PORTCR(140, 0xe605208c), /* PORT140CR */
+ PORTCR(141, 0xe605208d), /* PORT141CR */
+ PORTCR(142, 0xe605208e), /* PORT142CR */
+ PORTCR(143, 0xe605208f), /* PORT143CR */
+ PORTCR(144, 0xe6052090), /* PORT144CR */
+ PORTCR(145, 0xe6052091), /* PORT145CR */
+ PORTCR(146, 0xe6052092), /* PORT146CR */
+ PORTCR(147, 0xe6052093), /* PORT147CR */
+ PORTCR(148, 0xe6052094), /* PORT148CR */
+ PORTCR(149, 0xe6052095), /* PORT149CR */
+
+ PORTCR(150, 0xe6052096), /* PORT150CR */
+ PORTCR(151, 0xe6052097), /* PORT151CR */
+ PORTCR(152, 0xe6052098), /* PORT152CR */
+ PORTCR(153, 0xe6052099), /* PORT153CR */
+ PORTCR(154, 0xe605209a), /* PORT154CR */
+ PORTCR(155, 0xe605209b), /* PORT155CR */
+ PORTCR(156, 0xe605209c), /* PORT156CR */
+ PORTCR(157, 0xe605209d), /* PORT157CR */
+ PORTCR(158, 0xe605209e), /* PORT158CR */
+ PORTCR(159, 0xe605209f), /* PORT159CR */
+
+ PORTCR(160, 0xe60520a0), /* PORT160CR */
+ PORTCR(161, 0xe60520a1), /* PORT161CR */
+ PORTCR(162, 0xe60520a2), /* PORT162CR */
+ PORTCR(163, 0xe60520a3), /* PORT163CR */
+ PORTCR(164, 0xe60520a4), /* PORT164CR */
+
+ PORTCR(192, 0xe60520c0), /* PORT192CR */
+ PORTCR(193, 0xe60520c1), /* PORT193CR */
+ PORTCR(194, 0xe60520c2), /* PORT194CR */
+ PORTCR(195, 0xe60520c3), /* PORT195CR */
+ PORTCR(196, 0xe60520c4), /* PORT196CR */
+ PORTCR(197, 0xe60520c5), /* PORT197CR */
+ PORTCR(198, 0xe60520c6), /* PORT198CR */
+ PORTCR(199, 0xe60520c7), /* PORT199CR */
+
+ PORTCR(200, 0xe60520c8), /* PORT200CR */
+ PORTCR(201, 0xe60520c9), /* PORT201CR */
+ PORTCR(202, 0xe60520ca), /* PORT202CR */
+ PORTCR(203, 0xe60520cb), /* PORT203CR */
+ PORTCR(204, 0xe60520cc), /* PORT204CR */
+ PORTCR(205, 0xe60520cd), /* PORT205CR */
+ PORTCR(206, 0xe60520ce), /* PORT206CR */
+ PORTCR(207, 0xe60520cf), /* PORT207CR */
+ PORTCR(208, 0xe60520d0), /* PORT208CR */
+ PORTCR(209, 0xe60520d1), /* PORT209CR */
+
+ PORTCR(210, 0xe60520d2), /* PORT210CR */
+ PORTCR(211, 0xe60520d3), /* PORT211CR */
+ PORTCR(212, 0xe60520d4), /* PORT212CR */
+ PORTCR(213, 0xe60520d5), /* PORT213CR */
+ PORTCR(214, 0xe60520d6), /* PORT214CR */
+ PORTCR(215, 0xe60520d7), /* PORT215CR */
+ PORTCR(216, 0xe60520d8), /* PORT216CR */
+ PORTCR(217, 0xe60520d9), /* PORT217CR */
+ PORTCR(218, 0xe60520da), /* PORT218CR */
+ PORTCR(219, 0xe60520db), /* PORT219CR */
+
+ PORTCR(220, 0xe60520dc), /* PORT220CR */
+ PORTCR(221, 0xe60520dd), /* PORT221CR */
+ PORTCR(222, 0xe60520de), /* PORT222CR */
+ PORTCR(223, 0xe60520df), /* PORT223CR */
+ PORTCR(224, 0xe60530e0), /* PORT224CR */
+ PORTCR(225, 0xe60530e1), /* PORT225CR */
+ PORTCR(226, 0xe60530e2), /* PORT226CR */
+ PORTCR(227, 0xe60530e3), /* PORT227CR */
+ PORTCR(228, 0xe60530e4), /* PORT228CR */
+ PORTCR(229, 0xe60530e5), /* PORT229CR */
+
+ PORTCR(230, 0xe60530e6), /* PORT230CR */
+ PORTCR(231, 0xe60530e7), /* PORT231CR */
+ PORTCR(232, 0xe60530e8), /* PORT232CR */
+ PORTCR(233, 0xe60530e9), /* PORT233CR */
+ PORTCR(234, 0xe60530ea), /* PORT234CR */
+ PORTCR(235, 0xe60530eb), /* PORT235CR */
+ PORTCR(236, 0xe60530ec), /* PORT236CR */
+ PORTCR(237, 0xe60530ed), /* PORT237CR */
+ PORTCR(238, 0xe60530ee), /* PORT238CR */
+ PORTCR(239, 0xe60530ef), /* PORT239CR */
+
+ PORTCR(240, 0xe60530f0), /* PORT240CR */
+ PORTCR(241, 0xe60530f1), /* PORT241CR */
+ PORTCR(242, 0xe60530f2), /* PORT242CR */
+ PORTCR(243, 0xe60530f3), /* PORT243CR */
+ PORTCR(244, 0xe60530f4), /* PORT244CR */
+ PORTCR(245, 0xe60530f5), /* PORT245CR */
+ PORTCR(246, 0xe60530f6), /* PORT246CR */
+ PORTCR(247, 0xe60530f7), /* PORT247CR */
+ PORTCR(248, 0xe60530f8), /* PORT248CR */
+ PORTCR(249, 0xe60530f9), /* PORT249CR */
+
+ PORTCR(250, 0xe60530fa), /* PORT250CR */
+ PORTCR(251, 0xe60530fb), /* PORT251CR */
+ PORTCR(252, 0xe60530fc), /* PORT252CR */
+ PORTCR(253, 0xe60530fd), /* PORT253CR */
+ PORTCR(254, 0xe60530fe), /* PORT254CR */
+ PORTCR(255, 0xe60530ff), /* PORT255CR */
+ PORTCR(256, 0xe6053100), /* PORT256CR */
+ PORTCR(257, 0xe6053101), /* PORT257CR */
+ PORTCR(258, 0xe6053102), /* PORT258CR */
+ PORTCR(259, 0xe6053103), /* PORT259CR */
+
+ PORTCR(260, 0xe6053104), /* PORT260CR */
+ PORTCR(261, 0xe6053105), /* PORT261CR */
+ PORTCR(262, 0xe6053106), /* PORT262CR */
+ PORTCR(263, 0xe6053107), /* PORT263CR */
+ PORTCR(264, 0xe6053108), /* PORT264CR */
+ PORTCR(265, 0xe6053109), /* PORT265CR */
+ PORTCR(266, 0xe605310a), /* PORT266CR */
+ PORTCR(267, 0xe605310b), /* PORT267CR */
+ PORTCR(268, 0xe605310c), /* PORT268CR */
+ PORTCR(269, 0xe605310d), /* PORT269CR */
+
+ PORTCR(270, 0xe605310e), /* PORT270CR */
+ PORTCR(271, 0xe605310f), /* PORT271CR */
+ PORTCR(272, 0xe6053110), /* PORT272CR */
+ PORTCR(273, 0xe6053111), /* PORT273CR */
+ PORTCR(274, 0xe6053112), /* PORT274CR */
+ PORTCR(275, 0xe6053113), /* PORT275CR */
+ PORTCR(276, 0xe6053114), /* PORT276CR */
+ PORTCR(277, 0xe6053115), /* PORT277CR */
+ PORTCR(278, 0xe6053116), /* PORT278CR */
+ PORTCR(279, 0xe6053117), /* PORT279CR */
+
+ PORTCR(280, 0xe6053118), /* PORT280CR */
+ PORTCR(281, 0xe6053119), /* PORT281CR */
+ PORTCR(282, 0xe605311a), /* PORT282CR */
+
+ PORTCR(288, 0xe6052120), /* PORT288CR */
+ PORTCR(289, 0xe6052121), /* PORT289CR */
+
+ PORTCR(290, 0xe6052122), /* PORT290CR */
+ PORTCR(291, 0xe6052123), /* PORT291CR */
+ PORTCR(292, 0xe6052124), /* PORT292CR */
+ PORTCR(293, 0xe6052125), /* PORT293CR */
+ PORTCR(294, 0xe6052126), /* PORT294CR */
+ PORTCR(295, 0xe6052127), /* PORT295CR */
+ PORTCR(296, 0xe6052128), /* PORT296CR */
+ PORTCR(297, 0xe6052129), /* PORT297CR */
+ PORTCR(298, 0xe605212a), /* PORT298CR */
+ PORTCR(299, 0xe605212b), /* PORT299CR */
+
+ PORTCR(300, 0xe605212c), /* PORT300CR */
+ PORTCR(301, 0xe605212d), /* PORT301CR */
+ PORTCR(302, 0xe605212e), /* PORT302CR */
+ PORTCR(303, 0xe605212f), /* PORT303CR */
+ PORTCR(304, 0xe6052130), /* PORT304CR */
+ PORTCR(305, 0xe6052131), /* PORT305CR */
+ PORTCR(306, 0xe6052132), /* PORT306CR */
+ PORTCR(307, 0xe6052133), /* PORT307CR */
+ PORTCR(308, 0xe6052134), /* PORT308CR */
+ PORTCR(309, 0xe6052135), /* PORT309CR */
+
+ { PINMUX_CFG_REG("MSEL2CR", 0xe605801c, 32, 1) {
+ 0, 0,
+ 0, 0,
+ 0, 0,
+ 0, 0,
+ 0, 0,
+ 0, 0,
+ 0, 0,
+ 0, 0,
+ 0, 0,
+ 0, 0,
+ 0, 0,
+ 0, 0,
+ MSEL2CR_MSEL19_0, MSEL2CR_MSEL19_1,
+ MSEL2CR_MSEL18_0, MSEL2CR_MSEL18_1,
+ MSEL2CR_MSEL17_0, MSEL2CR_MSEL17_1,
+ MSEL2CR_MSEL16_0, MSEL2CR_MSEL16_1,
+ 0, 0,
+ MSEL2CR_MSEL14_0, MSEL2CR_MSEL14_1,
+ MSEL2CR_MSEL13_0, MSEL2CR_MSEL13_1,
+ MSEL2CR_MSEL12_0, MSEL2CR_MSEL12_1,
+ MSEL2CR_MSEL11_0, MSEL2CR_MSEL11_1,
+ MSEL2CR_MSEL10_0, MSEL2CR_MSEL10_1,
+ MSEL2CR_MSEL9_0, MSEL2CR_MSEL9_1,
+ MSEL2CR_MSEL8_0, MSEL2CR_MSEL8_1,
+ MSEL2CR_MSEL7_0, MSEL2CR_MSEL7_1,
+ MSEL2CR_MSEL6_0, MSEL2CR_MSEL6_1,
+ MSEL2CR_MSEL5_0, MSEL2CR_MSEL5_1,
+ MSEL2CR_MSEL4_0, MSEL2CR_MSEL4_1,
+ MSEL2CR_MSEL3_0, MSEL2CR_MSEL3_1,
+ MSEL2CR_MSEL2_0, MSEL2CR_MSEL2_1,
+ MSEL2CR_MSEL1_0, MSEL2CR_MSEL1_1,
+ MSEL2CR_MSEL0_0, MSEL2CR_MSEL0_1,
+ }
+ },
+ { PINMUX_CFG_REG("MSEL3CR", 0xe6058020, 32, 1) {
+ 0, 0,
+ 0, 0,
+ 0, 0,
+ MSEL3CR_MSEL28_0, MSEL3CR_MSEL28_1,
+ 0, 0,
+ 0, 0,
+ 0, 0,
+ 0, 0,
+ 0, 0,
+ 0, 0,
+ 0, 0,
+ 0, 0,
+ 0, 0,
+ 0, 0,
+ 0, 0,
+ 0, 0,
+ MSEL3CR_MSEL15_0, MSEL3CR_MSEL15_1,
+ 0, 0,
+ 0, 0,
+ 0, 0,
+ MSEL3CR_MSEL11_0, MSEL3CR_MSEL11_1,
+ 0, 0,
+ MSEL3CR_MSEL9_0, MSEL3CR_MSEL9_1,
+ 0, 0,
+ 0, 0,
+ MSEL3CR_MSEL6_0, MSEL3CR_MSEL6_1,
+ 0, 0,
+ 0, 0,
+ 0, 0,
+ MSEL3CR_MSEL2_0, MSEL3CR_MSEL2_1,
+ 0, 0,
+ 0, 0,
+ }
+ },
+ { PINMUX_CFG_REG("MSEL4CR", 0xe6058024, 32, 1) {
+ 0, 0,
+ 0, 0,
+ MSEL4CR_MSEL29_0, MSEL4CR_MSEL29_1,
+ 0, 0,
+ MSEL4CR_MSEL27_0, MSEL4CR_MSEL27_1,
+ MSEL4CR_MSEL26_0, MSEL4CR_MSEL26_1,
+ 0, 0,
+ 0, 0,
+ 0, 0,
+ MSEL4CR_MSEL22_0, MSEL4CR_MSEL22_1,
+ MSEL4CR_MSEL21_0, MSEL4CR_MSEL21_1,
+ MSEL4CR_MSEL20_0, MSEL4CR_MSEL20_1,
+ MSEL4CR_MSEL19_0, MSEL4CR_MSEL19_1,
+ 0, 0,
+ 0, 0,
+ 0, 0,
+ MSEL4CR_MSEL15_0, MSEL4CR_MSEL15_1,
+ 0, 0,
+ MSEL4CR_MSEL13_0, MSEL4CR_MSEL13_1,
+ MSEL4CR_MSEL12_0, MSEL4CR_MSEL12_1,
+ MSEL4CR_MSEL11_0, MSEL4CR_MSEL11_1,
+ MSEL4CR_MSEL10_0, MSEL4CR_MSEL10_1,
+ MSEL4CR_MSEL9_0, MSEL4CR_MSEL9_1,
+ MSEL4CR_MSEL8_0, MSEL4CR_MSEL8_1,
+ MSEL4CR_MSEL7_0, MSEL4CR_MSEL7_1,
+ 0, 0,
+ 0, 0,
+ MSEL4CR_MSEL4_0, MSEL4CR_MSEL4_1,
+ 0, 0,
+ 0, 0,
+ MSEL4CR_MSEL1_0, MSEL4CR_MSEL1_1,
+ 0, 0,
+ }
+ },
+ { },
+};
+
+static struct pinmux_data_reg pinmux_data_regs[] = {
+ { PINMUX_DATA_REG("PORTL031_000DR", 0xe6054000, 32) {
+ PORT31_DATA, PORT30_DATA, PORT29_DATA, PORT28_DATA,
+ PORT27_DATA, PORT26_DATA, PORT25_DATA, PORT24_DATA,
+ PORT23_DATA, PORT22_DATA, PORT21_DATA, PORT20_DATA,
+ PORT19_DATA, PORT18_DATA, PORT17_DATA, PORT16_DATA,
+ PORT15_DATA, PORT14_DATA, PORT13_DATA, PORT12_DATA,
+ PORT11_DATA, PORT10_DATA, PORT9_DATA, PORT8_DATA,
+ PORT7_DATA, PORT6_DATA, PORT5_DATA, PORT4_DATA,
+ PORT3_DATA, PORT2_DATA, PORT1_DATA, PORT0_DATA }
+ },
+ { PINMUX_DATA_REG("PORTD063_032DR", 0xe6055000, 32) {
+ PORT63_DATA, PORT62_DATA, PORT61_DATA, PORT60_DATA,
+ PORT59_DATA, PORT58_DATA, PORT57_DATA, PORT56_DATA,
+ PORT55_DATA, PORT54_DATA, PORT53_DATA, PORT52_DATA,
+ PORT51_DATA, PORT50_DATA, PORT49_DATA, PORT48_DATA,
+ PORT47_DATA, PORT46_DATA, PORT45_DATA, PORT44_DATA,
+ PORT43_DATA, PORT42_DATA, PORT41_DATA, PORT40_DATA,
+ PORT39_DATA, PORT38_DATA, PORT37_DATA, PORT36_DATA,
+ PORT35_DATA, PORT34_DATA, PORT33_DATA, PORT32_DATA }
+ },
+ { PINMUX_DATA_REG("PORTD095_064DR", 0xe6055004, 32) {
+ PORT95_DATA, PORT94_DATA, PORT93_DATA, PORT92_DATA,
+ PORT91_DATA, PORT90_DATA, PORT89_DATA, PORT88_DATA,
+ PORT87_DATA, PORT86_DATA, PORT85_DATA, PORT84_DATA,
+ PORT83_DATA, PORT82_DATA, PORT81_DATA, PORT80_DATA,
+ PORT79_DATA, PORT78_DATA, PORT77_DATA, PORT76_DATA,
+ PORT75_DATA, PORT74_DATA, PORT73_DATA, PORT72_DATA,
+ PORT71_DATA, PORT70_DATA, PORT69_DATA, PORT68_DATA,
+ PORT67_DATA, PORT66_DATA, PORT65_DATA, PORT64_DATA }
+ },
+ { PINMUX_DATA_REG("PORTR127_096DR", 0xe6056000, 32) {
+ 0, 0, 0, 0,
+ 0, 0, 0, 0,
+ 0, PORT118_DATA, PORT117_DATA, PORT116_DATA,
+ PORT115_DATA, PORT114_DATA, PORT113_DATA, PORT112_DATA,
+ PORT111_DATA, PORT110_DATA, PORT109_DATA, PORT108_DATA,
+ PORT107_DATA, PORT106_DATA, PORT105_DATA, PORT104_DATA,
+ PORT103_DATA, PORT102_DATA, PORT101_DATA, PORT100_DATA,
+ PORT99_DATA, PORT98_DATA, PORT97_DATA, PORT96_DATA }
+ },
+ { PINMUX_DATA_REG("PORTR159_128DR", 0xe6056004, 32) {
+ PORT159_DATA, PORT158_DATA, PORT157_DATA, PORT156_DATA,
+ PORT155_DATA, PORT154_DATA, PORT153_DATA, PORT152_DATA,
+ PORT151_DATA, PORT150_DATA, PORT149_DATA, PORT148_DATA,
+ PORT147_DATA, PORT146_DATA, PORT145_DATA, PORT144_DATA,
+ PORT143_DATA, PORT142_DATA, PORT141_DATA, PORT140_DATA,
+ PORT139_DATA, PORT138_DATA, PORT137_DATA, PORT136_DATA,
+ PORT135_DATA, PORT134_DATA, PORT133_DATA, PORT132_DATA,
+ PORT131_DATA, PORT130_DATA, PORT129_DATA, PORT128_DATA }
+ },
+ { PINMUX_DATA_REG("PORTR191_160DR", 0xe6056008, 32) {
+ 0, 0, 0, 0,
+ 0, 0, 0, 0,
+ 0, 0, 0, 0,
+ 0, 0, 0, 0,
+ 0, 0, 0, 0,
+ 0, 0, 0, 0,
+ 0, 0, 0, PORT164_DATA,
+ PORT163_DATA, PORT162_DATA, PORT161_DATA, PORT160_DATA }
+ },
+ { PINMUX_DATA_REG("PORTR223_192DR", 0xe605600C, 32) {
+ PORT223_DATA, PORT222_DATA, PORT221_DATA, PORT220_DATA,
+ PORT219_DATA, PORT218_DATA, PORT217_DATA, PORT216_DATA,
+ PORT215_DATA, PORT214_DATA, PORT213_DATA, PORT212_DATA,
+ PORT211_DATA, PORT210_DATA, PORT209_DATA, PORT208_DATA,
+ PORT207_DATA, PORT206_DATA, PORT205_DATA, PORT204_DATA,
+ PORT203_DATA, PORT202_DATA, PORT201_DATA, PORT200_DATA,
+ PORT199_DATA, PORT198_DATA, PORT197_DATA, PORT196_DATA,
+ PORT195_DATA, PORT194_DATA, PORT193_DATA, PORT192_DATA }
+ },
+ { PINMUX_DATA_REG("PORTU255_224DR", 0xe6057000, 32) {
+ PORT255_DATA, PORT254_DATA, PORT253_DATA, PORT252_DATA,
+ PORT251_DATA, PORT250_DATA, PORT249_DATA, PORT248_DATA,
+ PORT247_DATA, PORT246_DATA, PORT245_DATA, PORT244_DATA,
+ PORT243_DATA, PORT242_DATA, PORT241_DATA, PORT240_DATA,
+ PORT239_DATA, PORT238_DATA, PORT237_DATA, PORT236_DATA,
+ PORT235_DATA, PORT234_DATA, PORT233_DATA, PORT232_DATA,
+ PORT231_DATA, PORT230_DATA, PORT229_DATA, PORT228_DATA,
+ PORT227_DATA, PORT226_DATA, PORT225_DATA, PORT224_DATA }
+ },
+ { PINMUX_DATA_REG("PORTU287_256DR", 0xe6057004, 32) {
+ 0, 0, 0, 0,
+ 0, PORT282_DATA, PORT281_DATA, PORT280_DATA,
+ PORT279_DATA, PORT278_DATA, PORT277_DATA, PORT276_DATA,
+ PORT275_DATA, PORT274_DATA, PORT273_DATA, PORT272_DATA,
+ PORT271_DATA, PORT270_DATA, PORT269_DATA, PORT268_DATA,
+ PORT267_DATA, PORT266_DATA, PORT265_DATA, PORT264_DATA,
+ PORT263_DATA, PORT262_DATA, PORT261_DATA, PORT260_DATA,
+ PORT259_DATA, PORT258_DATA, PORT257_DATA, PORT256_DATA }
+ },
+ { PINMUX_DATA_REG("PORTR319_288DR", 0xe6056010, 32) {
+ 0, 0, 0, 0,
+ 0, 0, 0, 0,
+ 0, 0, PORT309_DATA, PORT308_DATA,
+ PORT307_DATA, PORT306_DATA, PORT305_DATA, PORT304_DATA,
+ PORT303_DATA, PORT302_DATA, PORT301_DATA, PORT300_DATA,
+ PORT299_DATA, PORT298_DATA, PORT297_DATA, PORT296_DATA,
+ PORT295_DATA, PORT294_DATA, PORT293_DATA, PORT292_DATA,
+ PORT291_DATA, PORT290_DATA, PORT289_DATA, PORT288_DATA }
+ },
+ { },
+};
+
+/* IRQ pins through INTCS with IRQ0->15 from 0x200 and IRQ16-31 from 0x3200 */
+#define EXT_IRQ16L(n) intcs_evt2irq(0x200 + ((n) << 5))
+#define EXT_IRQ16H(n) intcs_evt2irq(0x3200 + ((n - 16) << 5))
+
+static struct pinmux_irq pinmux_irqs[] = {
+ PINMUX_IRQ(EXT_IRQ16H(19), PORT9_FN0),
+ PINMUX_IRQ(EXT_IRQ16L(1), PORT10_FN0),
+ PINMUX_IRQ(EXT_IRQ16L(0), PORT11_FN0),
+ PINMUX_IRQ(EXT_IRQ16H(18), PORT13_FN0),
+ PINMUX_IRQ(EXT_IRQ16H(20), PORT14_FN0),
+ PINMUX_IRQ(EXT_IRQ16H(21), PORT15_FN0),
+ PINMUX_IRQ(EXT_IRQ16H(31), PORT26_FN0),
+ PINMUX_IRQ(EXT_IRQ16H(30), PORT27_FN0),
+ PINMUX_IRQ(EXT_IRQ16H(29), PORT28_FN0),
+ PINMUX_IRQ(EXT_IRQ16H(22), PORT40_FN0),
+ PINMUX_IRQ(EXT_IRQ16H(23), PORT53_FN0),
+ PINMUX_IRQ(EXT_IRQ16L(10), PORT54_FN0),
+ PINMUX_IRQ(EXT_IRQ16L(9), PORT56_FN0),
+ PINMUX_IRQ(EXT_IRQ16H(26), PORT115_FN0),
+ PINMUX_IRQ(EXT_IRQ16H(27), PORT116_FN0),
+ PINMUX_IRQ(EXT_IRQ16H(28), PORT117_FN0),
+ PINMUX_IRQ(EXT_IRQ16H(24), PORT118_FN0),
+ PINMUX_IRQ(EXT_IRQ16L(6), PORT147_FN0),
+ PINMUX_IRQ(EXT_IRQ16L(2), PORT149_FN0),
+ PINMUX_IRQ(EXT_IRQ16L(7), PORT150_FN0),
+ PINMUX_IRQ(EXT_IRQ16L(12), PORT156_FN0),
+ PINMUX_IRQ(EXT_IRQ16L(4), PORT159_FN0),
+ PINMUX_IRQ(EXT_IRQ16H(25), PORT164_FN0),
+ PINMUX_IRQ(EXT_IRQ16L(8), PORT223_FN0),
+ PINMUX_IRQ(EXT_IRQ16L(3), PORT224_FN0),
+ PINMUX_IRQ(EXT_IRQ16L(5), PORT227_FN0),
+ PINMUX_IRQ(EXT_IRQ16H(17), PORT234_FN0),
+ PINMUX_IRQ(EXT_IRQ16L(11), PORT238_FN0),
+ PINMUX_IRQ(EXT_IRQ16L(13), PORT239_FN0),
+ PINMUX_IRQ(EXT_IRQ16H(16), PORT249_FN0),
+ PINMUX_IRQ(EXT_IRQ16L(14), PORT251_FN0),
+ PINMUX_IRQ(EXT_IRQ16L(9), PORT308_FN0),
+};
+
+struct sh_pfc_soc_info sh73a0_pinmux_info = {
+ .name = "sh73a0_pfc",
+ .reserved_id = PINMUX_RESERVED,
+ .data = { PINMUX_DATA_BEGIN, PINMUX_DATA_END },
+ .input = { PINMUX_INPUT_BEGIN, PINMUX_INPUT_END },
+ .input_pu = { PINMUX_INPUT_PULLUP_BEGIN, PINMUX_INPUT_PULLUP_END },
+ .input_pd = { PINMUX_INPUT_PULLDOWN_BEGIN, PINMUX_INPUT_PULLDOWN_END },
+ .output = { PINMUX_OUTPUT_BEGIN, PINMUX_OUTPUT_END },
+ .mark = { PINMUX_MARK_BEGIN, PINMUX_MARK_END },
+ .function = { PINMUX_FUNCTION_BEGIN, PINMUX_FUNCTION_END },
+
+ .first_gpio = GPIO_PORT0,
+ .last_gpio = GPIO_FN_FSIAISLD_PU,
+
+ .gpios = pinmux_gpios,
+ .cfg_regs = pinmux_config_regs,
+ .data_regs = pinmux_data_regs,
+
+ .gpio_data = pinmux_data,
+ .gpio_data_size = ARRAY_SIZE(pinmux_data),
+
+ .gpio_irq = pinmux_irqs,
+ .gpio_irq_size = ARRAY_SIZE(pinmux_irqs),
+};
diff --git a/drivers/pinctrl/sh-pfc/pfc-sh7720.c b/drivers/pinctrl/sh-pfc/pfc-sh7720.c
new file mode 100644
index 000000000000..10872ed688a6
--- /dev/null
+++ b/drivers/pinctrl/sh-pfc/pfc-sh7720.c
@@ -0,0 +1,1236 @@
+/*
+ * SH7720 Pinmux
+ *
+ * Copyright (C) 2008 Magnus Damm
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ */
+
+#include <linux/kernel.h>
+#include <linux/gpio.h>
+#include <cpu/sh7720.h>
+
+#include "sh_pfc.h"
+
+enum {
+ PINMUX_RESERVED = 0,
+
+ PINMUX_DATA_BEGIN,
+ PTA7_DATA, PTA6_DATA, PTA5_DATA, PTA4_DATA,
+ PTA3_DATA, PTA2_DATA, PTA1_DATA, PTA0_DATA,
+ PTB7_DATA, PTB6_DATA, PTB5_DATA, PTB4_DATA,
+ PTB3_DATA, PTB2_DATA, PTB1_DATA, PTB0_DATA,
+ PTC7_DATA, PTC6_DATA, PTC5_DATA, PTC4_DATA,
+ PTC3_DATA, PTC2_DATA, PTC1_DATA, PTC0_DATA,
+ PTD7_DATA, PTD6_DATA, PTD5_DATA, PTD4_DATA,
+ PTD3_DATA, PTD2_DATA, PTD1_DATA, PTD0_DATA,
+ PTE6_DATA, PTE5_DATA, PTE4_DATA,
+ PTE3_DATA, PTE2_DATA, PTE1_DATA, PTE0_DATA,
+ PTF6_DATA, PTF5_DATA, PTF4_DATA,
+ PTF3_DATA, PTF2_DATA, PTF1_DATA, PTF0_DATA,
+ PTG6_DATA, PTG5_DATA, PTG4_DATA,
+ PTG3_DATA, PTG2_DATA, PTG1_DATA, PTG0_DATA,
+ PTH6_DATA, PTH5_DATA, PTH4_DATA,
+ PTH3_DATA, PTH2_DATA, PTH1_DATA, PTH0_DATA,
+ PTJ6_DATA, PTJ5_DATA, PTJ4_DATA,
+ PTJ3_DATA, PTJ2_DATA, PTJ1_DATA, PTJ0_DATA,
+ PTK3_DATA, PTK2_DATA, PTK1_DATA, PTK0_DATA,
+ PTL7_DATA, PTL6_DATA, PTL5_DATA, PTL4_DATA, PTL3_DATA,
+ PTM7_DATA, PTM6_DATA, PTM5_DATA, PTM4_DATA,
+ PTM3_DATA, PTM2_DATA, PTM1_DATA, PTM0_DATA,
+ PTP4_DATA, PTP3_DATA, PTP2_DATA, PTP1_DATA, PTP0_DATA,
+ PTR7_DATA, PTR6_DATA, PTR5_DATA, PTR4_DATA,
+ PTR3_DATA, PTR2_DATA, PTR1_DATA, PTR0_DATA,
+ PTS4_DATA, PTS3_DATA, PTS2_DATA, PTS1_DATA, PTS0_DATA,
+ PTT4_DATA, PTT3_DATA, PTT2_DATA, PTT1_DATA, PTT0_DATA,
+ PTU4_DATA, PTU3_DATA, PTU2_DATA, PTU1_DATA, PTU0_DATA,
+ PTV4_DATA, PTV3_DATA, PTV2_DATA, PTV1_DATA, PTV0_DATA,
+ PINMUX_DATA_END,
+
+ PINMUX_INPUT_BEGIN,
+ PTA7_IN, PTA6_IN, PTA5_IN, PTA4_IN,
+ PTA3_IN, PTA2_IN, PTA1_IN, PTA0_IN,
+ PTB7_IN, PTB6_IN, PTB5_IN, PTB4_IN,
+ PTB3_IN, PTB2_IN, PTB1_IN, PTB0_IN,
+ PTC7_IN, PTC6_IN, PTC5_IN, PTC4_IN,
+ PTC3_IN, PTC2_IN, PTC1_IN, PTC0_IN,
+ PTD7_IN, PTD6_IN, PTD5_IN, PTD4_IN,
+ PTD3_IN, PTD2_IN, PTD1_IN, PTD0_IN,
+ PTE6_IN, PTE5_IN, PTE4_IN,
+ PTE3_IN, PTE2_IN, PTE1_IN, PTE0_IN,
+ PTF6_IN, PTF5_IN, PTF4_IN,
+ PTF3_IN, PTF2_IN, PTF1_IN, PTF0_IN,
+ PTG6_IN, PTG5_IN, PTG4_IN,
+ PTG3_IN, PTG2_IN, PTG1_IN, PTG0_IN,
+ PTH6_IN, PTH5_IN, PTH4_IN,
+ PTH3_IN, PTH2_IN, PTH1_IN, PTH0_IN,
+ PTJ6_IN, PTJ5_IN, PTJ4_IN,
+ PTJ3_IN, PTJ2_IN, PTJ1_IN, PTJ0_IN,
+ PTK3_IN, PTK2_IN, PTK1_IN, PTK0_IN,
+ PTL7_IN, PTL6_IN, PTL5_IN, PTL4_IN, PTL3_IN,
+ PTM7_IN, PTM6_IN, PTM5_IN, PTM4_IN,
+ PTM3_IN, PTM2_IN, PTM1_IN, PTM0_IN,
+ PTP4_IN, PTP3_IN, PTP2_IN, PTP1_IN, PTP0_IN,
+ PTR7_IN, PTR6_IN, PTR5_IN, PTR4_IN,
+ PTR3_IN, PTR2_IN, PTR1_IN, PTR0_IN,
+ PTS4_IN, PTS3_IN, PTS2_IN, PTS1_IN, PTS0_IN,
+ PTT4_IN, PTT3_IN, PTT2_IN, PTT1_IN, PTT0_IN,
+ PTU4_IN, PTU3_IN, PTU2_IN, PTU1_IN, PTU0_IN,
+ PTV4_IN, PTV3_IN, PTV2_IN, PTV1_IN, PTV0_IN,
+ PINMUX_INPUT_END,
+
+ PINMUX_INPUT_PULLUP_BEGIN,
+ PTA7_IN_PU, PTA6_IN_PU, PTA5_IN_PU, PTA4_IN_PU,
+ PTA3_IN_PU, PTA2_IN_PU, PTA1_IN_PU, PTA0_IN_PU,
+ PTB7_IN_PU, PTB6_IN_PU, PTB5_IN_PU, PTB4_IN_PU,
+ PTB3_IN_PU, PTB2_IN_PU, PTB1_IN_PU, PTB0_IN_PU,
+ PTC7_IN_PU, PTC6_IN_PU, PTC5_IN_PU, PTC4_IN_PU,
+ PTC3_IN_PU, PTC2_IN_PU, PTC1_IN_PU, PTC0_IN_PU,
+ PTD7_IN_PU, PTD6_IN_PU, PTD5_IN_PU, PTD4_IN_PU,
+ PTD3_IN_PU, PTD2_IN_PU, PTD1_IN_PU, PTD0_IN_PU,
+ PTE4_IN_PU, PTE3_IN_PU, PTE2_IN_PU, PTE1_IN_PU, PTE0_IN_PU,
+ PTF0_IN_PU,
+ PTG6_IN_PU, PTG5_IN_PU, PTG4_IN_PU,
+ PTG3_IN_PU, PTG2_IN_PU, PTG1_IN_PU, PTG0_IN_PU,
+ PTH6_IN_PU, PTH5_IN_PU, PTH4_IN_PU,
+ PTH3_IN_PU, PTH2_IN_PU, PTH1_IN_PU, PTH0_IN_PU,
+ PTJ6_IN_PU, PTJ5_IN_PU, PTJ4_IN_PU,
+ PTJ3_IN_PU, PTJ2_IN_PU, PTJ1_IN_PU, PTJ0_IN_PU,
+ PTK3_IN_PU, PTK2_IN_PU, PTK1_IN_PU, PTK0_IN_PU,
+ PTL7_IN_PU, PTL6_IN_PU, PTL5_IN_PU, PTL4_IN_PU, PTL3_IN_PU,
+ PTM7_IN_PU, PTM6_IN_PU, PTM5_IN_PU, PTM4_IN_PU,
+ PTM3_IN_PU, PTM2_IN_PU, PTM1_IN_PU, PTM0_IN_PU,
+ PTP4_IN_PU, PTP3_IN_PU, PTP2_IN_PU, PTP1_IN_PU, PTP0_IN_PU,
+ PTR7_IN_PU, PTR6_IN_PU, PTR5_IN_PU, PTR4_IN_PU,
+ PTR3_IN_PU, PTR2_IN_PU, PTR1_IN_PU, PTR0_IN_PU,
+ PTS4_IN_PU, PTS3_IN_PU, PTS2_IN_PU, PTS1_IN_PU, PTS0_IN_PU,
+ PTT4_IN_PU, PTT3_IN_PU, PTT2_IN_PU, PTT1_IN_PU, PTT0_IN_PU,
+ PTU4_IN_PU, PTU3_IN_PU, PTU2_IN_PU, PTU1_IN_PU, PTU0_IN_PU,
+ PTV4_IN_PU, PTV3_IN_PU, PTV2_IN_PU, PTV1_IN_PU, PTV0_IN_PU,
+ PINMUX_INPUT_PULLUP_END,
+
+ PINMUX_OUTPUT_BEGIN,
+ PTA7_OUT, PTA6_OUT, PTA5_OUT, PTA4_OUT,
+ PTA3_OUT, PTA2_OUT, PTA1_OUT, PTA0_OUT,
+ PTB7_OUT, PTB6_OUT, PTB5_OUT, PTB4_OUT,
+ PTB3_OUT, PTB2_OUT, PTB1_OUT, PTB0_OUT,
+ PTC7_OUT, PTC6_OUT, PTC5_OUT, PTC4_OUT,
+ PTC3_OUT, PTC2_OUT, PTC1_OUT, PTC0_OUT,
+ PTD7_OUT, PTD6_OUT, PTD5_OUT, PTD4_OUT,
+ PTD3_OUT, PTD2_OUT, PTD1_OUT, PTD0_OUT,
+ PTE4_OUT, PTE3_OUT, PTE2_OUT, PTE1_OUT, PTE0_OUT,
+ PTF0_OUT,
+ PTG6_OUT, PTG5_OUT, PTG4_OUT,
+ PTG3_OUT, PTG2_OUT, PTG1_OUT, PTG0_OUT,
+ PTH6_OUT, PTH5_OUT, PTH4_OUT,
+ PTH3_OUT, PTH2_OUT, PTH1_OUT, PTH0_OUT,
+ PTJ6_OUT, PTJ5_OUT, PTJ4_OUT,
+ PTJ3_OUT, PTJ2_OUT, PTJ1_OUT, PTJ0_OUT,
+ PTK3_OUT, PTK2_OUT, PTK1_OUT, PTK0_OUT,
+ PTL7_OUT, PTL6_OUT, PTL5_OUT, PTL4_OUT, PTL3_OUT,
+ PTM7_OUT, PTM6_OUT, PTM5_OUT, PTM4_OUT,
+ PTM3_OUT, PTM2_OUT, PTM1_OUT, PTM0_OUT,
+ PTP4_OUT, PTP3_OUT, PTP2_OUT, PTP1_OUT, PTP0_OUT,
+ PTR7_OUT, PTR6_OUT, PTR5_OUT, PTR4_OUT,
+ PTR3_OUT, PTR2_OUT, PTR1_OUT, PTR0_OUT,
+ PTS4_OUT, PTS3_OUT, PTS2_OUT, PTS1_OUT, PTS0_OUT,
+ PTT4_OUT, PTT3_OUT, PTT2_OUT, PTT1_OUT, PTT0_OUT,
+ PTU4_OUT, PTU3_OUT, PTU2_OUT, PTU1_OUT, PTU0_OUT,
+ PTV4_OUT, PTV3_OUT, PTV2_OUT, PTV1_OUT, PTV0_OUT,
+ PINMUX_OUTPUT_END,
+
+ PINMUX_FUNCTION_BEGIN,
+ PTA7_FN, PTA6_FN, PTA5_FN, PTA4_FN,
+ PTA3_FN, PTA2_FN, PTA1_FN, PTA0_FN,
+ PTB7_FN, PTB6_FN, PTB5_FN, PTB4_FN,
+ PTB3_FN, PTB2_FN, PTB1_FN, PTB0_FN,
+ PTC7_FN, PTC6_FN, PTC5_FN, PTC4_FN,
+ PTC3_FN, PTC2_FN, PTC1_FN, PTC0_FN,
+ PTD7_FN, PTD6_FN, PTD5_FN, PTD4_FN,
+ PTD3_FN, PTD2_FN, PTD1_FN, PTD0_FN,
+ PTE6_FN, PTE5_FN, PTE4_FN,
+ PTE3_FN, PTE2_FN, PTE1_FN, PTE0_FN,
+ PTF6_FN, PTF5_FN, PTF4_FN,
+ PTF3_FN, PTF2_FN, PTF1_FN, PTF0_FN,
+ PTG6_FN, PTG5_FN, PTG4_FN,
+ PTG3_FN, PTG2_FN, PTG1_FN, PTG0_FN,
+ PTH6_FN, PTH5_FN, PTH4_FN,
+ PTH3_FN, PTH2_FN, PTH1_FN, PTH0_FN,
+ PTJ6_FN, PTJ5_FN, PTJ4_FN,
+ PTJ3_FN, PTJ2_FN, PTJ1_FN, PTJ0_FN,
+ PTK3_FN, PTK2_FN, PTK1_FN, PTK0_FN,
+ PTL7_FN, PTL6_FN, PTL5_FN, PTL4_FN, PTL3_FN,
+ PTM7_FN, PTM6_FN, PTM5_FN, PTM4_FN,
+ PTM3_FN, PTM2_FN, PTM1_FN, PTM0_FN,
+ PTP4_FN, PTP3_FN, PTP2_FN, PTP1_FN, PTP0_FN,
+ PTR7_FN, PTR6_FN, PTR5_FN, PTR4_FN,
+ PTR3_FN, PTR2_FN, PTR1_FN, PTR0_FN,
+ PTS4_FN, PTS3_FN, PTS2_FN, PTS1_FN, PTS0_FN,
+ PTT4_FN, PTT3_FN, PTT2_FN, PTT1_FN, PTT0_FN,
+ PTU4_FN, PTU3_FN, PTU2_FN, PTU1_FN, PTU0_FN,
+ PTV4_FN, PTV3_FN, PTV2_FN, PTV1_FN, PTV0_FN,
+
+ PSELA_1_0_00, PSELA_1_0_01, PSELA_1_0_10,
+ PSELA_3_2_00, PSELA_3_2_01, PSELA_3_2_10, PSELA_3_2_11,
+ PSELA_5_4_00, PSELA_5_4_01, PSELA_5_4_10, PSELA_5_4_11,
+ PSELA_7_6_00, PSELA_7_6_01, PSELA_7_6_10,
+ PSELA_9_8_00, PSELA_9_8_01, PSELA_9_8_10,
+ PSELA_11_10_00, PSELA_11_10_01, PSELA_11_10_10,
+ PSELA_13_12_00, PSELA_13_12_10,
+ PSELA_15_14_00, PSELA_15_14_10,
+ PSELB_9_8_00, PSELB_9_8_11,
+ PSELB_11_10_00, PSELB_11_10_01, PSELB_11_10_10, PSELB_11_10_11,
+ PSELB_13_12_00, PSELB_13_12_01, PSELB_13_12_10, PSELB_13_12_11,
+ PSELB_15_14_00, PSELB_15_14_11,
+ PSELC_9_8_00, PSELC_9_8_10,
+ PSELC_11_10_00, PSELC_11_10_10,
+ PSELC_13_12_00, PSELC_13_12_01, PSELC_13_12_10,
+ PSELC_15_14_00, PSELC_15_14_01, PSELC_15_14_10,
+ PSELD_1_0_00, PSELD_1_0_10,
+ PSELD_11_10_00, PSELD_11_10_01,
+ PSELD_15_14_00, PSELD_15_14_01, PSELD_15_14_10,
+ PINMUX_FUNCTION_END,
+
+ PINMUX_MARK_BEGIN,
+ D31_MARK, D30_MARK, D29_MARK, D28_MARK,
+ D27_MARK, D26_MARK, D25_MARK, D24_MARK,
+ D23_MARK, D22_MARK, D21_MARK, D20_MARK,
+ D19_MARK, D18_MARK, D17_MARK, D16_MARK,
+ IOIS16_MARK, RAS_MARK, CAS_MARK, CKE_MARK,
+ CS5B_CE1A_MARK, CS6B_CE1B_MARK,
+ A25_MARK, A24_MARK, A23_MARK, A22_MARK,
+ A21_MARK, A20_MARK, A19_MARK, A0_MARK,
+ REFOUT_MARK, IRQOUT_MARK,
+ LCD_DATA15_MARK, LCD_DATA14_MARK,
+ LCD_DATA13_MARK, LCD_DATA12_MARK,
+ LCD_DATA11_MARK, LCD_DATA10_MARK,
+ LCD_DATA9_MARK, LCD_DATA8_MARK,
+ LCD_DATA7_MARK, LCD_DATA6_MARK,
+ LCD_DATA5_MARK, LCD_DATA4_MARK,
+ LCD_DATA3_MARK, LCD_DATA2_MARK,
+ LCD_DATA1_MARK, LCD_DATA0_MARK,
+ LCD_M_DISP_MARK,
+ LCD_CL1_MARK, LCD_CL2_MARK,
+ LCD_DON_MARK, LCD_FLM_MARK,
+ LCD_VEPWC_MARK, LCD_VCPWC_MARK,
+ AFE_RXIN_MARK, AFE_RDET_MARK,
+ AFE_FS_MARK, AFE_TXOUT_MARK,
+ AFE_SCLK_MARK, AFE_RLYCNT_MARK,
+ AFE_HC1_MARK,
+ IIC_SCL_MARK, IIC_SDA_MARK,
+ DA1_MARK, DA0_MARK,
+ AN3_MARK, AN2_MARK, AN1_MARK, AN0_MARK, ADTRG_MARK,
+ USB1D_RCV_MARK, USB1D_TXSE0_MARK,
+ USB1D_TXDPLS_MARK, USB1D_DMNS_MARK,
+ USB1D_DPLS_MARK, USB1D_SPEED_MARK,
+ USB1D_TXENL_MARK,
+ USB2_PWR_EN_MARK, USB1_PWR_EN_USBF_UPLUP_MARK, USB1D_SUSPEND_MARK,
+ IRQ5_MARK, IRQ4_MARK,
+ IRQ3_IRL3_MARK, IRQ2_IRL2_MARK,
+ IRQ1_IRL1_MARK, IRQ0_IRL0_MARK,
+ PCC_REG_MARK, PCC_DRV_MARK,
+ PCC_BVD2_MARK, PCC_BVD1_MARK,
+ PCC_CD2_MARK, PCC_CD1_MARK,
+ PCC_RESET_MARK, PCC_RDY_MARK,
+ PCC_VS2_MARK, PCC_VS1_MARK,
+ AUDATA3_MARK, AUDATA2_MARK, AUDATA1_MARK, AUDATA0_MARK,
+ AUDCK_MARK, AUDSYNC_MARK, ASEBRKAK_MARK, TRST_MARK,
+ TMS_MARK, TDO_MARK, TDI_MARK, TCK_MARK,
+ DACK1_MARK, DREQ1_MARK, DACK0_MARK, DREQ0_MARK,
+ TEND1_MARK, TEND0_MARK,
+ SIOF0_SYNC_MARK, SIOF0_MCLK_MARK,
+ SIOF0_TXD_MARK, SIOF0_RXD_MARK,
+ SIOF0_SCK_MARK,
+ SIOF1_SYNC_MARK, SIOF1_MCLK_MARK,
+ SIOF1_TXD_MARK, SIOF1_RXD_MARK,
+ SIOF1_SCK_MARK,
+ SCIF0_TXD_MARK, SCIF0_RXD_MARK,
+ SCIF0_RTS_MARK, SCIF0_CTS_MARK, SCIF0_SCK_MARK,
+ SCIF1_TXD_MARK, SCIF1_RXD_MARK,
+ SCIF1_RTS_MARK, SCIF1_CTS_MARK, SCIF1_SCK_MARK,
+ TPU_TO1_MARK, TPU_TO0_MARK,
+ TPU_TI3B_MARK, TPU_TI3A_MARK,
+ TPU_TI2B_MARK, TPU_TI2A_MARK,
+ TPU_TO3_MARK, TPU_TO2_MARK,
+ SIM_D_MARK, SIM_CLK_MARK, SIM_RST_MARK,
+ MMC_DAT_MARK, MMC_CMD_MARK,
+ MMC_CLK_MARK, MMC_VDDON_MARK,
+ MMC_ODMOD_MARK,
+ STATUS0_MARK, STATUS1_MARK,
+ PINMUX_MARK_END,
+};
+
+static pinmux_enum_t pinmux_data[] = {
+ /* PTA GPIO */
+ PINMUX_DATA(PTA7_DATA, PTA7_IN, PTA7_OUT, PTA7_IN_PU),
+ PINMUX_DATA(PTA6_DATA, PTA6_IN, PTA6_OUT, PTA6_IN_PU),
+ PINMUX_DATA(PTA5_DATA, PTA5_IN, PTA5_OUT, PTA5_IN_PU),
+ PINMUX_DATA(PTA4_DATA, PTA4_IN, PTA4_OUT, PTA4_IN_PU),
+ PINMUX_DATA(PTA3_DATA, PTA3_IN, PTA3_OUT, PTA3_IN_PU),
+ PINMUX_DATA(PTA2_DATA, PTA2_IN, PTA2_OUT, PTA2_IN_PU),
+ PINMUX_DATA(PTA1_DATA, PTA1_IN, PTA1_OUT, PTA1_IN_PU),
+ PINMUX_DATA(PTA0_DATA, PTA0_IN, PTA0_OUT, PTA0_IN_PU),
+
+ /* PTB GPIO */
+ PINMUX_DATA(PTB7_DATA, PTB7_IN, PTB7_OUT, PTB7_IN_PU),
+ PINMUX_DATA(PTB6_DATA, PTB6_IN, PTB6_OUT, PTB6_IN_PU),
+ PINMUX_DATA(PTB5_DATA, PTB5_IN, PTB5_OUT, PTB5_IN_PU),
+ PINMUX_DATA(PTB4_DATA, PTB4_IN, PTB4_OUT, PTB4_IN_PU),
+ PINMUX_DATA(PTB3_DATA, PTB3_IN, PTB3_OUT, PTB3_IN_PU),
+ PINMUX_DATA(PTB2_DATA, PTB2_IN, PTB2_OUT, PTB2_IN_PU),
+ PINMUX_DATA(PTB1_DATA, PTB1_IN, PTB1_OUT, PTB1_IN_PU),
+ PINMUX_DATA(PTB0_DATA, PTB0_IN, PTB0_OUT, PTB0_IN_PU),
+
+ /* PTC GPIO */
+ PINMUX_DATA(PTC7_DATA, PTC7_IN, PTC7_OUT, PTC7_IN_PU),
+ PINMUX_DATA(PTC6_DATA, PTC6_IN, PTC6_OUT, PTC6_IN_PU),
+ PINMUX_DATA(PTC5_DATA, PTC5_IN, PTC5_OUT, PTC5_IN_PU),
+ PINMUX_DATA(PTC4_DATA, PTC4_IN, PTC4_OUT, PTC4_IN_PU),
+ PINMUX_DATA(PTC3_DATA, PTC3_IN, PTC3_OUT, PTC3_IN_PU),
+ PINMUX_DATA(PTC2_DATA, PTC2_IN, PTC2_OUT, PTC2_IN_PU),
+ PINMUX_DATA(PTC1_DATA, PTC1_IN, PTC1_OUT, PTC1_IN_PU),
+ PINMUX_DATA(PTC0_DATA, PTC0_IN, PTC0_OUT, PTC0_IN_PU),
+
+ /* PTD GPIO */
+ PINMUX_DATA(PTD7_DATA, PTD7_IN, PTD7_OUT, PTD7_IN_PU),
+ PINMUX_DATA(PTD6_DATA, PTD6_IN, PTD6_OUT, PTD6_IN_PU),
+ PINMUX_DATA(PTD5_DATA, PTD5_IN, PTD5_OUT, PTD5_IN_PU),
+ PINMUX_DATA(PTD4_DATA, PTD4_IN, PTD4_OUT, PTD4_IN_PU),
+ PINMUX_DATA(PTD3_DATA, PTD3_IN, PTD3_OUT, PTD3_IN_PU),
+ PINMUX_DATA(PTD2_DATA, PTD2_IN, PTD2_OUT, PTD2_IN_PU),
+ PINMUX_DATA(PTD1_DATA, PTD1_IN, PTD1_OUT, PTD1_IN_PU),
+ PINMUX_DATA(PTD0_DATA, PTD0_IN, PTD0_OUT, PTD0_IN_PU),
+
+ /* PTE GPIO */
+ PINMUX_DATA(PTE6_DATA, PTE6_IN),
+ PINMUX_DATA(PTE5_DATA, PTE5_IN),
+ PINMUX_DATA(PTE4_DATA, PTE4_IN, PTE4_OUT, PTE4_IN_PU),
+ PINMUX_DATA(PTE3_DATA, PTE3_IN, PTE3_OUT, PTE3_IN_PU),
+ PINMUX_DATA(PTE2_DATA, PTE2_IN, PTE2_OUT, PTE2_IN_PU),
+ PINMUX_DATA(PTE1_DATA, PTE1_IN, PTE1_OUT, PTE1_IN_PU),
+ PINMUX_DATA(PTE0_DATA, PTE0_IN, PTE0_OUT, PTE0_IN_PU),
+
+ /* PTF GPIO */
+ PINMUX_DATA(PTF6_DATA, PTF6_IN),
+ PINMUX_DATA(PTF5_DATA, PTF5_IN),
+ PINMUX_DATA(PTF4_DATA, PTF4_IN),
+ PINMUX_DATA(PTF3_DATA, PTF3_IN),
+ PINMUX_DATA(PTF2_DATA, PTF2_IN),
+ PINMUX_DATA(PTF1_DATA, PTF1_IN),
+ PINMUX_DATA(PTF0_DATA, PTF0_IN, PTF0_OUT, PTF0_IN_PU),
+
+ /* PTG GPIO */
+ PINMUX_DATA(PTG6_DATA, PTG6_IN, PTG6_OUT, PTG6_IN_PU),
+ PINMUX_DATA(PTG5_DATA, PTG5_IN, PTG5_OUT, PTG5_IN_PU),
+ PINMUX_DATA(PTG4_DATA, PTG4_IN, PTG4_OUT, PTG4_IN_PU),
+ PINMUX_DATA(PTG3_DATA, PTG3_IN, PTG3_OUT, PTG3_IN_PU),
+ PINMUX_DATA(PTG2_DATA, PTG2_IN, PTG2_OUT, PTG2_IN_PU),
+ PINMUX_DATA(PTG1_DATA, PTG1_IN, PTG1_OUT, PTG1_IN_PU),
+ PINMUX_DATA(PTG0_DATA, PTG0_IN, PTG0_OUT, PTG0_IN_PU),
+
+ /* PTH GPIO */
+ PINMUX_DATA(PTH6_DATA, PTH6_IN, PTH6_OUT, PTH6_IN_PU),
+ PINMUX_DATA(PTH5_DATA, PTH5_IN, PTH5_OUT, PTH5_IN_PU),
+ PINMUX_DATA(PTH4_DATA, PTH4_IN, PTH4_OUT, PTH4_IN_PU),
+ PINMUX_DATA(PTH3_DATA, PTH3_IN, PTH3_OUT, PTH3_IN_PU),
+ PINMUX_DATA(PTH2_DATA, PTH2_IN, PTH2_OUT, PTH2_IN_PU),
+ PINMUX_DATA(PTH1_DATA, PTH1_IN, PTH1_OUT, PTH1_IN_PU),
+ PINMUX_DATA(PTH0_DATA, PTH0_IN, PTH0_OUT, PTH0_IN_PU),
+
+ /* PTJ GPIO */
+ PINMUX_DATA(PTJ6_DATA, PTJ6_IN, PTJ6_OUT, PTJ6_IN_PU),
+ PINMUX_DATA(PTJ5_DATA, PTJ5_IN, PTJ5_OUT, PTJ5_IN_PU),
+ PINMUX_DATA(PTJ4_DATA, PTJ4_IN, PTJ4_OUT, PTJ4_IN_PU),
+ PINMUX_DATA(PTJ3_DATA, PTJ3_IN, PTJ3_OUT, PTJ3_IN_PU),
+ PINMUX_DATA(PTJ2_DATA, PTJ2_IN, PTJ2_OUT, PTJ2_IN_PU),
+ PINMUX_DATA(PTJ1_DATA, PTJ1_IN, PTJ1_OUT, PTJ1_IN_PU),
+ PINMUX_DATA(PTJ0_DATA, PTJ0_IN, PTJ0_OUT, PTJ0_IN_PU),
+
+ /* PTK GPIO */
+ PINMUX_DATA(PTK3_DATA, PTK3_IN, PTK3_OUT, PTK3_IN_PU),
+ PINMUX_DATA(PTK2_DATA, PTK2_IN, PTK2_OUT, PTK2_IN_PU),
+ PINMUX_DATA(PTK1_DATA, PTK1_IN, PTK1_OUT, PTK1_IN_PU),
+ PINMUX_DATA(PTK0_DATA, PTK0_IN, PTK0_OUT, PTK0_IN_PU),
+
+ /* PTL GPIO */
+ PINMUX_DATA(PTL7_DATA, PTL7_IN, PTL7_OUT, PTL7_IN_PU),
+ PINMUX_DATA(PTL6_DATA, PTL6_IN, PTL6_OUT, PTL6_IN_PU),
+ PINMUX_DATA(PTL5_DATA, PTL5_IN, PTL5_OUT, PTL5_IN_PU),
+ PINMUX_DATA(PTL4_DATA, PTL4_IN, PTL4_OUT, PTL4_IN_PU),
+ PINMUX_DATA(PTL3_DATA, PTL3_IN, PTL3_OUT, PTL3_IN_PU),
+
+ /* PTM GPIO */
+ PINMUX_DATA(PTM7_DATA, PTM7_IN, PTM7_OUT, PTM7_IN_PU),
+ PINMUX_DATA(PTM6_DATA, PTM6_IN, PTM6_OUT, PTM6_IN_PU),
+ PINMUX_DATA(PTM5_DATA, PTM5_IN, PTM5_OUT, PTM5_IN_PU),
+ PINMUX_DATA(PTM4_DATA, PTM4_IN, PTM4_OUT, PTM4_IN_PU),
+ PINMUX_DATA(PTM3_DATA, PTM3_IN, PTM3_OUT, PTM3_IN_PU),
+ PINMUX_DATA(PTM2_DATA, PTM2_IN, PTM2_OUT, PTM2_IN_PU),
+ PINMUX_DATA(PTM1_DATA, PTM1_IN, PTM1_OUT, PTM1_IN_PU),
+ PINMUX_DATA(PTM0_DATA, PTM0_IN, PTM0_OUT, PTM0_IN_PU),
+
+ /* PTP GPIO */
+ PINMUX_DATA(PTP4_DATA, PTP4_IN, PTP4_OUT, PTP4_IN_PU),
+ PINMUX_DATA(PTP3_DATA, PTP3_IN, PTP3_OUT, PTP3_IN_PU),
+ PINMUX_DATA(PTP2_DATA, PTP2_IN, PTP2_OUT, PTP2_IN_PU),
+ PINMUX_DATA(PTP1_DATA, PTP1_IN, PTP1_OUT, PTP1_IN_PU),
+ PINMUX_DATA(PTP0_DATA, PTP0_IN, PTP0_OUT, PTP0_IN_PU),
+
+ /* PTR GPIO */
+ PINMUX_DATA(PTR7_DATA, PTR7_IN, PTR7_OUT, PTR7_IN_PU),
+ PINMUX_DATA(PTR6_DATA, PTR6_IN, PTR6_OUT, PTR6_IN_PU),
+ PINMUX_DATA(PTR5_DATA, PTR5_IN, PTR5_OUT, PTR5_IN_PU),
+ PINMUX_DATA(PTR4_DATA, PTR4_IN, PTR4_OUT, PTR4_IN_PU),
+ PINMUX_DATA(PTR3_DATA, PTR3_IN, PTR3_OUT, PTR3_IN_PU),
+ PINMUX_DATA(PTR2_DATA, PTR2_IN, PTR2_OUT, PTR2_IN_PU),
+ PINMUX_DATA(PTR1_DATA, PTR1_IN, PTR1_OUT, PTR1_IN_PU),
+ PINMUX_DATA(PTR0_DATA, PTR0_IN, PTR0_OUT, PTR0_IN_PU),
+
+ /* PTS GPIO */
+ PINMUX_DATA(PTS4_DATA, PTS4_IN, PTS4_OUT, PTS4_IN_PU),
+ PINMUX_DATA(PTS3_DATA, PTS3_IN, PTS3_OUT, PTS3_IN_PU),
+ PINMUX_DATA(PTS2_DATA, PTS2_IN, PTS2_OUT, PTS2_IN_PU),
+ PINMUX_DATA(PTS1_DATA, PTS1_IN, PTS1_OUT, PTS1_IN_PU),
+ PINMUX_DATA(PTS0_DATA, PTS0_IN, PTS0_OUT, PTS0_IN_PU),
+
+ /* PTT GPIO */
+ PINMUX_DATA(PTT4_DATA, PTT4_IN, PTT4_OUT, PTT4_IN_PU),
+ PINMUX_DATA(PTT3_DATA, PTT3_IN, PTT3_OUT, PTT3_IN_PU),
+ PINMUX_DATA(PTT2_DATA, PTT2_IN, PTT2_OUT, PTT2_IN_PU),
+ PINMUX_DATA(PTT1_DATA, PTT1_IN, PTT1_OUT, PTT1_IN_PU),
+ PINMUX_DATA(PTT0_DATA, PTT0_IN, PTT0_OUT, PTT0_IN_PU),
+
+ /* PTU GPIO */
+ PINMUX_DATA(PTU4_DATA, PTU4_IN, PTU4_OUT, PTU4_IN_PU),
+ PINMUX_DATA(PTU3_DATA, PTU3_IN, PTU3_OUT, PTU3_IN_PU),
+ PINMUX_DATA(PTU2_DATA, PTU2_IN, PTU2_OUT, PTU2_IN_PU),
+ PINMUX_DATA(PTU1_DATA, PTU1_IN, PTU1_OUT, PTU1_IN_PU),
+ PINMUX_DATA(PTU0_DATA, PTU0_IN, PTU0_OUT, PTU0_IN_PU),
+
+ /* PTV GPIO */
+ PINMUX_DATA(PTV4_DATA, PTV4_IN, PTV4_OUT, PTV4_IN_PU),
+ PINMUX_DATA(PTV3_DATA, PTV3_IN, PTV3_OUT, PTV3_IN_PU),
+ PINMUX_DATA(PTV2_DATA, PTV2_IN, PTV2_OUT, PTV2_IN_PU),
+ PINMUX_DATA(PTV1_DATA, PTV1_IN, PTV1_OUT, PTV1_IN_PU),
+ PINMUX_DATA(PTV0_DATA, PTV0_IN, PTV0_OUT, PTV0_IN_PU),
+
+ /* PTA FN */
+ PINMUX_DATA(D23_MARK, PTA7_FN),
+ PINMUX_DATA(D22_MARK, PTA6_FN),
+ PINMUX_DATA(D21_MARK, PTA5_FN),
+ PINMUX_DATA(D20_MARK, PTA4_FN),
+ PINMUX_DATA(D19_MARK, PTA3_FN),
+ PINMUX_DATA(D18_MARK, PTA2_FN),
+ PINMUX_DATA(D17_MARK, PTA1_FN),
+ PINMUX_DATA(D16_MARK, PTA0_FN),
+
+ /* PTB FN */
+ PINMUX_DATA(D31_MARK, PTB7_FN),
+ PINMUX_DATA(D30_MARK, PTB6_FN),
+ PINMUX_DATA(D29_MARK, PTB5_FN),
+ PINMUX_DATA(D28_MARK, PTB4_FN),
+ PINMUX_DATA(D27_MARK, PTB3_FN),
+ PINMUX_DATA(D26_MARK, PTB2_FN),
+ PINMUX_DATA(D25_MARK, PTB1_FN),
+ PINMUX_DATA(D24_MARK, PTB0_FN),
+
+ /* PTC FN */
+ PINMUX_DATA(LCD_DATA7_MARK, PTC7_FN),
+ PINMUX_DATA(LCD_DATA6_MARK, PTC6_FN),
+ PINMUX_DATA(LCD_DATA5_MARK, PTC5_FN),
+ PINMUX_DATA(LCD_DATA4_MARK, PTC4_FN),
+ PINMUX_DATA(LCD_DATA3_MARK, PTC3_FN),
+ PINMUX_DATA(LCD_DATA2_MARK, PTC2_FN),
+ PINMUX_DATA(LCD_DATA1_MARK, PTC1_FN),
+ PINMUX_DATA(LCD_DATA0_MARK, PTC0_FN),
+
+ /* PTD FN */
+ PINMUX_DATA(LCD_DATA15_MARK, PTD7_FN),
+ PINMUX_DATA(LCD_DATA14_MARK, PTD6_FN),
+ PINMUX_DATA(LCD_DATA13_MARK, PTD5_FN),
+ PINMUX_DATA(LCD_DATA12_MARK, PTD4_FN),
+ PINMUX_DATA(LCD_DATA11_MARK, PTD3_FN),
+ PINMUX_DATA(LCD_DATA10_MARK, PTD2_FN),
+ PINMUX_DATA(LCD_DATA9_MARK, PTD1_FN),
+ PINMUX_DATA(LCD_DATA8_MARK, PTD0_FN),
+
+ /* PTE FN */
+ PINMUX_DATA(IIC_SCL_MARK, PSELB_9_8_00, PTE6_FN),
+ PINMUX_DATA(AFE_RXIN_MARK, PSELB_9_8_11, PTE6_FN),
+ PINMUX_DATA(IIC_SDA_MARK, PSELB_9_8_00, PTE5_FN),
+ PINMUX_DATA(AFE_RDET_MARK, PSELB_9_8_11, PTE5_FN),
+ PINMUX_DATA(LCD_M_DISP_MARK, PTE4_FN),
+ PINMUX_DATA(LCD_CL1_MARK, PTE3_FN),
+ PINMUX_DATA(LCD_CL2_MARK, PTE2_FN),
+ PINMUX_DATA(LCD_DON_MARK, PTE1_FN),
+ PINMUX_DATA(LCD_FLM_MARK, PTE0_FN),
+
+ /* PTF FN */
+ PINMUX_DATA(DA1_MARK, PTF6_FN),
+ PINMUX_DATA(DA0_MARK, PTF5_FN),
+ PINMUX_DATA(AN3_MARK, PTF4_FN),
+ PINMUX_DATA(AN2_MARK, PTF3_FN),
+ PINMUX_DATA(AN1_MARK, PTF2_FN),
+ PINMUX_DATA(AN0_MARK, PTF1_FN),
+ PINMUX_DATA(ADTRG_MARK, PTF0_FN),
+
+ /* PTG FN */
+ PINMUX_DATA(USB1D_RCV_MARK, PSELA_3_2_00, PTG6_FN),
+ PINMUX_DATA(AFE_FS_MARK, PSELA_3_2_01, PTG6_FN),
+ PINMUX_DATA(PCC_REG_MARK, PSELA_3_2_10, PTG6_FN),
+ PINMUX_DATA(IRQ5_MARK, PSELA_3_2_11, PTG6_FN),
+ PINMUX_DATA(USB1D_TXSE0_MARK, PSELA_5_4_00, PTG5_FN),
+ PINMUX_DATA(AFE_TXOUT_MARK, PSELA_5_4_01, PTG5_FN),
+ PINMUX_DATA(PCC_DRV_MARK, PSELA_5_4_10, PTG5_FN),
+ PINMUX_DATA(IRQ4_MARK, PSELA_5_4_11, PTG5_FN),
+ PINMUX_DATA(USB1D_TXDPLS_MARK, PSELA_7_6_00, PTG4_FN),
+ PINMUX_DATA(AFE_SCLK_MARK, PSELA_7_6_01, PTG4_FN),
+ PINMUX_DATA(IOIS16_MARK, PSELA_7_6_10, PTG4_FN),
+ PINMUX_DATA(USB1D_DMNS_MARK, PSELA_9_8_00, PTG3_FN),
+ PINMUX_DATA(AFE_RLYCNT_MARK, PSELA_9_8_01, PTG3_FN),
+ PINMUX_DATA(PCC_BVD2_MARK, PSELA_9_8_10, PTG3_FN),
+ PINMUX_DATA(USB1D_DPLS_MARK, PSELA_11_10_00, PTG2_FN),
+ PINMUX_DATA(AFE_HC1_MARK, PSELA_11_10_01, PTG2_FN),
+ PINMUX_DATA(PCC_BVD1_MARK, PSELA_11_10_10, PTG2_FN),
+ PINMUX_DATA(USB1D_SPEED_MARK, PSELA_13_12_00, PTG1_FN),
+ PINMUX_DATA(PCC_CD2_MARK, PSELA_13_12_10, PTG1_FN),
+ PINMUX_DATA(USB1D_TXENL_MARK, PSELA_15_14_00, PTG0_FN),
+ PINMUX_DATA(PCC_CD1_MARK, PSELA_15_14_10, PTG0_FN),
+
+ /* PTH FN */
+ PINMUX_DATA(RAS_MARK, PTH6_FN),
+ PINMUX_DATA(CAS_MARK, PTH5_FN),
+ PINMUX_DATA(CKE_MARK, PTH4_FN),
+ PINMUX_DATA(STATUS1_MARK, PTH3_FN),
+ PINMUX_DATA(STATUS0_MARK, PTH2_FN),
+ PINMUX_DATA(USB2_PWR_EN_MARK, PTH1_FN),
+ PINMUX_DATA(USB1_PWR_EN_USBF_UPLUP_MARK, PTH0_FN),
+
+ /* PTJ FN */
+ PINMUX_DATA(AUDCK_MARK, PTJ6_FN),
+ PINMUX_DATA(ASEBRKAK_MARK, PTJ5_FN),
+ PINMUX_DATA(AUDATA3_MARK, PTJ4_FN),
+ PINMUX_DATA(AUDATA2_MARK, PTJ3_FN),
+ PINMUX_DATA(AUDATA1_MARK, PTJ2_FN),
+ PINMUX_DATA(AUDATA0_MARK, PTJ1_FN),
+ PINMUX_DATA(AUDSYNC_MARK, PTJ0_FN),
+
+ /* PTK FN */
+ PINMUX_DATA(PCC_RESET_MARK, PTK3_FN),
+ PINMUX_DATA(PCC_RDY_MARK, PTK2_FN),
+ PINMUX_DATA(PCC_VS2_MARK, PTK1_FN),
+ PINMUX_DATA(PCC_VS1_MARK, PTK0_FN),
+
+ /* PTL FN */
+ PINMUX_DATA(TRST_MARK, PTL7_FN),
+ PINMUX_DATA(TMS_MARK, PTL6_FN),
+ PINMUX_DATA(TDO_MARK, PTL5_FN),
+ PINMUX_DATA(TDI_MARK, PTL4_FN),
+ PINMUX_DATA(TCK_MARK, PTL3_FN),
+
+ /* PTM FN */
+ PINMUX_DATA(DREQ1_MARK, PTM7_FN),
+ PINMUX_DATA(DREQ0_MARK, PTM6_FN),
+ PINMUX_DATA(DACK1_MARK, PTM5_FN),
+ PINMUX_DATA(DACK0_MARK, PTM4_FN),
+ PINMUX_DATA(TEND1_MARK, PTM3_FN),
+ PINMUX_DATA(TEND0_MARK, PTM2_FN),
+ PINMUX_DATA(CS5B_CE1A_MARK, PTM1_FN),
+ PINMUX_DATA(CS6B_CE1B_MARK, PTM0_FN),
+
+ /* PTP FN */
+ PINMUX_DATA(USB1D_SUSPEND_MARK, PSELA_1_0_00, PTP4_FN),
+ PINMUX_DATA(REFOUT_MARK, PSELA_1_0_01, PTP4_FN),
+ PINMUX_DATA(IRQOUT_MARK, PSELA_1_0_10, PTP4_FN),
+ PINMUX_DATA(IRQ3_IRL3_MARK, PTP3_FN),
+ PINMUX_DATA(IRQ2_IRL2_MARK, PTP2_FN),
+ PINMUX_DATA(IRQ1_IRL1_MARK, PTP1_FN),
+ PINMUX_DATA(IRQ0_IRL0_MARK, PTP0_FN),
+
+ /* PTR FN */
+ PINMUX_DATA(A25_MARK, PTR7_FN),
+ PINMUX_DATA(A24_MARK, PTR6_FN),
+ PINMUX_DATA(A23_MARK, PTR5_FN),
+ PINMUX_DATA(A22_MARK, PTR4_FN),
+ PINMUX_DATA(A21_MARK, PTR3_FN),
+ PINMUX_DATA(A20_MARK, PTR2_FN),
+ PINMUX_DATA(A19_MARK, PTR1_FN),
+ PINMUX_DATA(A0_MARK, PTR0_FN),
+
+ /* PTS FN */
+ PINMUX_DATA(SIOF0_SYNC_MARK, PTS4_FN),
+ PINMUX_DATA(SIOF0_MCLK_MARK, PTS3_FN),
+ PINMUX_DATA(SIOF0_TXD_MARK, PTS2_FN),
+ PINMUX_DATA(SIOF0_RXD_MARK, PTS1_FN),
+ PINMUX_DATA(SIOF0_SCK_MARK, PTS0_FN),
+
+ /* PTT FN */
+ PINMUX_DATA(SCIF0_CTS_MARK, PSELB_15_14_00, PTT4_FN),
+ PINMUX_DATA(TPU_TO1_MARK, PSELB_15_14_11, PTT4_FN),
+ PINMUX_DATA(SCIF0_RTS_MARK, PSELB_15_14_00, PTT3_FN),
+ PINMUX_DATA(TPU_TO0_MARK, PSELB_15_14_11, PTT3_FN),
+ PINMUX_DATA(SCIF0_TXD_MARK, PTT2_FN),
+ PINMUX_DATA(SCIF0_RXD_MARK, PTT1_FN),
+ PINMUX_DATA(SCIF0_SCK_MARK, PTT0_FN),
+
+ /* PTU FN */
+ PINMUX_DATA(SIOF1_SYNC_MARK, PTU4_FN),
+ PINMUX_DATA(SIOF1_MCLK_MARK, PSELD_11_10_00, PTU3_FN),
+ PINMUX_DATA(TPU_TI3B_MARK, PSELD_11_10_01, PTU3_FN),
+ PINMUX_DATA(SIOF1_TXD_MARK, PSELD_15_14_00, PTU2_FN),
+ PINMUX_DATA(TPU_TI3A_MARK, PSELD_15_14_01, PTU2_FN),
+ PINMUX_DATA(MMC_DAT_MARK, PSELD_15_14_10, PTU2_FN),
+ PINMUX_DATA(SIOF1_RXD_MARK, PSELC_13_12_00, PTU1_FN),
+ PINMUX_DATA(TPU_TI2B_MARK, PSELC_13_12_01, PTU1_FN),
+ PINMUX_DATA(MMC_CMD_MARK, PSELC_13_12_10, PTU1_FN),
+ PINMUX_DATA(SIOF1_SCK_MARK, PSELC_15_14_00, PTU0_FN),
+ PINMUX_DATA(TPU_TI2A_MARK, PSELC_15_14_01, PTU0_FN),
+ PINMUX_DATA(MMC_CLK_MARK, PSELC_15_14_10, PTU0_FN),
+
+ /* PTV FN */
+ PINMUX_DATA(SCIF1_CTS_MARK, PSELB_11_10_00, PTV4_FN),
+ PINMUX_DATA(TPU_TO3_MARK, PSELB_11_10_01, PTV4_FN),
+ PINMUX_DATA(MMC_VDDON_MARK, PSELB_11_10_10, PTV4_FN),
+ PINMUX_DATA(LCD_VEPWC_MARK, PSELB_11_10_11, PTV4_FN),
+ PINMUX_DATA(SCIF1_RTS_MARK, PSELB_13_12_00, PTV3_FN),
+ PINMUX_DATA(TPU_TO2_MARK, PSELB_13_12_01, PTV3_FN),
+ PINMUX_DATA(MMC_ODMOD_MARK, PSELB_13_12_10, PTV3_FN),
+ PINMUX_DATA(LCD_VCPWC_MARK, PSELB_13_12_11, PTV3_FN),
+ PINMUX_DATA(SCIF1_TXD_MARK, PSELC_9_8_00, PTV2_FN),
+ PINMUX_DATA(SIM_D_MARK, PSELC_9_8_10, PTV2_FN),
+ PINMUX_DATA(SCIF1_RXD_MARK, PSELC_11_10_00, PTV1_FN),
+ PINMUX_DATA(SIM_RST_MARK, PSELC_11_10_10, PTV1_FN),
+ PINMUX_DATA(SCIF1_SCK_MARK, PSELD_1_0_00, PTV0_FN),
+ PINMUX_DATA(SIM_CLK_MARK, PSELD_1_0_10, PTV0_FN),
+};
+
+static struct pinmux_gpio pinmux_gpios[] = {
+ /* PTA */
+ PINMUX_GPIO(GPIO_PTA7, PTA7_DATA),
+ PINMUX_GPIO(GPIO_PTA6, PTA6_DATA),
+ PINMUX_GPIO(GPIO_PTA5, PTA5_DATA),
+ PINMUX_GPIO(GPIO_PTA4, PTA4_DATA),
+ PINMUX_GPIO(GPIO_PTA3, PTA3_DATA),
+ PINMUX_GPIO(GPIO_PTA2, PTA2_DATA),
+ PINMUX_GPIO(GPIO_PTA1, PTA1_DATA),
+ PINMUX_GPIO(GPIO_PTA0, PTA0_DATA),
+
+ /* PTB */
+ PINMUX_GPIO(GPIO_PTB7, PTB7_DATA),
+ PINMUX_GPIO(GPIO_PTB6, PTB6_DATA),
+ PINMUX_GPIO(GPIO_PTB5, PTB5_DATA),
+ PINMUX_GPIO(GPIO_PTB4, PTB4_DATA),
+ PINMUX_GPIO(GPIO_PTB3, PTB3_DATA),
+ PINMUX_GPIO(GPIO_PTB2, PTB2_DATA),
+ PINMUX_GPIO(GPIO_PTB1, PTB1_DATA),
+ PINMUX_GPIO(GPIO_PTB0, PTB0_DATA),
+
+ /* PTC */
+ PINMUX_GPIO(GPIO_PTC7, PTC7_DATA),
+ PINMUX_GPIO(GPIO_PTC6, PTC6_DATA),
+ PINMUX_GPIO(GPIO_PTC5, PTC5_DATA),
+ PINMUX_GPIO(GPIO_PTC4, PTC4_DATA),
+ PINMUX_GPIO(GPIO_PTC3, PTC3_DATA),
+ PINMUX_GPIO(GPIO_PTC2, PTC2_DATA),
+ PINMUX_GPIO(GPIO_PTC1, PTC1_DATA),
+ PINMUX_GPIO(GPIO_PTC0, PTC0_DATA),
+
+ /* PTD */
+ PINMUX_GPIO(GPIO_PTD7, PTD7_DATA),
+ PINMUX_GPIO(GPIO_PTD6, PTD6_DATA),
+ PINMUX_GPIO(GPIO_PTD5, PTD5_DATA),
+ PINMUX_GPIO(GPIO_PTD4, PTD4_DATA),
+ PINMUX_GPIO(GPIO_PTD3, PTD3_DATA),
+ PINMUX_GPIO(GPIO_PTD2, PTD2_DATA),
+ PINMUX_GPIO(GPIO_PTD1, PTD1_DATA),
+ PINMUX_GPIO(GPIO_PTD0, PTD0_DATA),
+
+ /* PTE */
+ PINMUX_GPIO(GPIO_PTE6, PTE6_DATA),
+ PINMUX_GPIO(GPIO_PTE5, PTE5_DATA),
+ PINMUX_GPIO(GPIO_PTE4, PTE4_DATA),
+ PINMUX_GPIO(GPIO_PTE3, PTE3_DATA),
+ PINMUX_GPIO(GPIO_PTE2, PTE2_DATA),
+ PINMUX_GPIO(GPIO_PTE1, PTE1_DATA),
+ PINMUX_GPIO(GPIO_PTE0, PTE0_DATA),
+
+ /* PTF */
+ PINMUX_GPIO(GPIO_PTF6, PTF6_DATA),
+ PINMUX_GPIO(GPIO_PTF5, PTF5_DATA),
+ PINMUX_GPIO(GPIO_PTF4, PTF4_DATA),
+ PINMUX_GPIO(GPIO_PTF3, PTF3_DATA),
+ PINMUX_GPIO(GPIO_PTF2, PTF2_DATA),
+ PINMUX_GPIO(GPIO_PTF1, PTF1_DATA),
+ PINMUX_GPIO(GPIO_PTF0, PTF0_DATA),
+
+ /* PTG */
+ PINMUX_GPIO(GPIO_PTG6, PTG6_DATA),
+ PINMUX_GPIO(GPIO_PTG5, PTG5_DATA),
+ PINMUX_GPIO(GPIO_PTG4, PTG4_DATA),
+ PINMUX_GPIO(GPIO_PTG3, PTG3_DATA),
+ PINMUX_GPIO(GPIO_PTG2, PTG2_DATA),
+ PINMUX_GPIO(GPIO_PTG1, PTG1_DATA),
+ PINMUX_GPIO(GPIO_PTG0, PTG0_DATA),
+
+ /* PTH */
+ PINMUX_GPIO(GPIO_PTH6, PTH6_DATA),
+ PINMUX_GPIO(GPIO_PTH5, PTH5_DATA),
+ PINMUX_GPIO(GPIO_PTH4, PTH4_DATA),
+ PINMUX_GPIO(GPIO_PTH3, PTH3_DATA),
+ PINMUX_GPIO(GPIO_PTH2, PTH2_DATA),
+ PINMUX_GPIO(GPIO_PTH1, PTH1_DATA),
+ PINMUX_GPIO(GPIO_PTH0, PTH0_DATA),
+
+ /* PTJ */
+ PINMUX_GPIO(GPIO_PTJ6, PTJ6_DATA),
+ PINMUX_GPIO(GPIO_PTJ5, PTJ5_DATA),
+ PINMUX_GPIO(GPIO_PTJ4, PTJ4_DATA),
+ PINMUX_GPIO(GPIO_PTJ3, PTJ3_DATA),
+ PINMUX_GPIO(GPIO_PTJ2, PTJ2_DATA),
+ PINMUX_GPIO(GPIO_PTJ1, PTJ1_DATA),
+ PINMUX_GPIO(GPIO_PTJ0, PTJ0_DATA),
+
+ /* PTK */
+ PINMUX_GPIO(GPIO_PTK3, PTK3_DATA),
+ PINMUX_GPIO(GPIO_PTK2, PTK2_DATA),
+ PINMUX_GPIO(GPIO_PTK1, PTK1_DATA),
+ PINMUX_GPIO(GPIO_PTK0, PTK0_DATA),
+
+ /* PTL */
+ PINMUX_GPIO(GPIO_PTL7, PTL7_DATA),
+ PINMUX_GPIO(GPIO_PTL6, PTL6_DATA),
+ PINMUX_GPIO(GPIO_PTL5, PTL5_DATA),
+ PINMUX_GPIO(GPIO_PTL4, PTL4_DATA),
+ PINMUX_GPIO(GPIO_PTL3, PTL3_DATA),
+
+ /* PTM */
+ PINMUX_GPIO(GPIO_PTM7, PTM7_DATA),
+ PINMUX_GPIO(GPIO_PTM6, PTM6_DATA),
+ PINMUX_GPIO(GPIO_PTM5, PTM5_DATA),
+ PINMUX_GPIO(GPIO_PTM4, PTM4_DATA),
+ PINMUX_GPIO(GPIO_PTM3, PTM3_DATA),
+ PINMUX_GPIO(GPIO_PTM2, PTM2_DATA),
+ PINMUX_GPIO(GPIO_PTM1, PTM1_DATA),
+ PINMUX_GPIO(GPIO_PTM0, PTM0_DATA),
+
+ /* PTP */
+ PINMUX_GPIO(GPIO_PTP4, PTP4_DATA),
+ PINMUX_GPIO(GPIO_PTP3, PTP3_DATA),
+ PINMUX_GPIO(GPIO_PTP2, PTP2_DATA),
+ PINMUX_GPIO(GPIO_PTP1, PTP1_DATA),
+ PINMUX_GPIO(GPIO_PTP0, PTP0_DATA),
+
+ /* PTR */
+ PINMUX_GPIO(GPIO_PTR7, PTR7_DATA),
+ PINMUX_GPIO(GPIO_PTR6, PTR6_DATA),
+ PINMUX_GPIO(GPIO_PTR5, PTR5_DATA),
+ PINMUX_GPIO(GPIO_PTR4, PTR4_DATA),
+ PINMUX_GPIO(GPIO_PTR3, PTR3_DATA),
+ PINMUX_GPIO(GPIO_PTR2, PTR2_DATA),
+ PINMUX_GPIO(GPIO_PTR1, PTR1_DATA),
+ PINMUX_GPIO(GPIO_PTR0, PTR0_DATA),
+
+ /* PTS */
+ PINMUX_GPIO(GPIO_PTS4, PTS4_DATA),
+ PINMUX_GPIO(GPIO_PTS3, PTS3_DATA),
+ PINMUX_GPIO(GPIO_PTS2, PTS2_DATA),
+ PINMUX_GPIO(GPIO_PTS1, PTS1_DATA),
+ PINMUX_GPIO(GPIO_PTS0, PTS0_DATA),
+
+ /* PTT */
+ PINMUX_GPIO(GPIO_PTT4, PTT4_DATA),
+ PINMUX_GPIO(GPIO_PTT3, PTT3_DATA),
+ PINMUX_GPIO(GPIO_PTT2, PTT2_DATA),
+ PINMUX_GPIO(GPIO_PTT1, PTT1_DATA),
+ PINMUX_GPIO(GPIO_PTT0, PTT0_DATA),
+
+ /* PTU */
+ PINMUX_GPIO(GPIO_PTU4, PTU4_DATA),
+ PINMUX_GPIO(GPIO_PTU3, PTU3_DATA),
+ PINMUX_GPIO(GPIO_PTU2, PTU2_DATA),
+ PINMUX_GPIO(GPIO_PTU1, PTU1_DATA),
+ PINMUX_GPIO(GPIO_PTU0, PTU0_DATA),
+
+ /* PTV */
+ PINMUX_GPIO(GPIO_PTV4, PTV4_DATA),
+ PINMUX_GPIO(GPIO_PTV3, PTV3_DATA),
+ PINMUX_GPIO(GPIO_PTV2, PTV2_DATA),
+ PINMUX_GPIO(GPIO_PTV1, PTV1_DATA),
+ PINMUX_GPIO(GPIO_PTV0, PTV0_DATA),
+
+ /* BSC */
+ PINMUX_GPIO(GPIO_FN_D31, D31_MARK),
+ PINMUX_GPIO(GPIO_FN_D30, D30_MARK),
+ PINMUX_GPIO(GPIO_FN_D29, D29_MARK),
+ PINMUX_GPIO(GPIO_FN_D28, D28_MARK),
+ PINMUX_GPIO(GPIO_FN_D27, D27_MARK),
+ PINMUX_GPIO(GPIO_FN_D26, D26_MARK),
+ PINMUX_GPIO(GPIO_FN_D25, D25_MARK),
+ PINMUX_GPIO(GPIO_FN_D24, D24_MARK),
+ PINMUX_GPIO(GPIO_FN_D23, D23_MARK),
+ PINMUX_GPIO(GPIO_FN_D22, D22_MARK),
+ PINMUX_GPIO(GPIO_FN_D21, D21_MARK),
+ PINMUX_GPIO(GPIO_FN_D20, D20_MARK),
+ PINMUX_GPIO(GPIO_FN_D19, D19_MARK),
+ PINMUX_GPIO(GPIO_FN_D18, D18_MARK),
+ PINMUX_GPIO(GPIO_FN_D17, D17_MARK),
+ PINMUX_GPIO(GPIO_FN_D16, D16_MARK),
+ PINMUX_GPIO(GPIO_FN_IOIS16, IOIS16_MARK),
+ PINMUX_GPIO(GPIO_FN_RAS, RAS_MARK),
+ PINMUX_GPIO(GPIO_FN_CAS, CAS_MARK),
+ PINMUX_GPIO(GPIO_FN_CKE, CKE_MARK),
+ PINMUX_GPIO(GPIO_FN_CS5B_CE1A, CS5B_CE1A_MARK),
+ PINMUX_GPIO(GPIO_FN_CS6B_CE1B, CS6B_CE1B_MARK),
+ PINMUX_GPIO(GPIO_FN_A25, A25_MARK),
+ PINMUX_GPIO(GPIO_FN_A24, A24_MARK),
+ PINMUX_GPIO(GPIO_FN_A23, A23_MARK),
+ PINMUX_GPIO(GPIO_FN_A22, A22_MARK),
+ PINMUX_GPIO(GPIO_FN_A21, A21_MARK),
+ PINMUX_GPIO(GPIO_FN_A20, A20_MARK),
+ PINMUX_GPIO(GPIO_FN_A19, A19_MARK),
+ PINMUX_GPIO(GPIO_FN_A0, A0_MARK),
+ PINMUX_GPIO(GPIO_FN_REFOUT, REFOUT_MARK),
+ PINMUX_GPIO(GPIO_FN_IRQOUT, IRQOUT_MARK),
+
+ /* LCDC */
+ PINMUX_GPIO(GPIO_FN_LCD_DATA15, LCD_DATA15_MARK),
+ PINMUX_GPIO(GPIO_FN_LCD_DATA14, LCD_DATA14_MARK),
+ PINMUX_GPIO(GPIO_FN_LCD_DATA13, LCD_DATA13_MARK),
+ PINMUX_GPIO(GPIO_FN_LCD_DATA12, LCD_DATA12_MARK),
+ PINMUX_GPIO(GPIO_FN_LCD_DATA11, LCD_DATA11_MARK),
+ PINMUX_GPIO(GPIO_FN_LCD_DATA10, LCD_DATA10_MARK),
+ PINMUX_GPIO(GPIO_FN_LCD_DATA9, LCD_DATA9_MARK),
+ PINMUX_GPIO(GPIO_FN_LCD_DATA8, LCD_DATA8_MARK),
+ PINMUX_GPIO(GPIO_FN_LCD_DATA7, LCD_DATA7_MARK),
+ PINMUX_GPIO(GPIO_FN_LCD_DATA6, LCD_DATA6_MARK),
+ PINMUX_GPIO(GPIO_FN_LCD_DATA5, LCD_DATA5_MARK),
+ PINMUX_GPIO(GPIO_FN_LCD_DATA4, LCD_DATA4_MARK),
+ PINMUX_GPIO(GPIO_FN_LCD_DATA3, LCD_DATA3_MARK),
+ PINMUX_GPIO(GPIO_FN_LCD_DATA2, LCD_DATA2_MARK),
+ PINMUX_GPIO(GPIO_FN_LCD_DATA1, LCD_DATA1_MARK),
+ PINMUX_GPIO(GPIO_FN_LCD_DATA0, LCD_DATA0_MARK),
+ PINMUX_GPIO(GPIO_FN_LCD_M_DISP, LCD_M_DISP_MARK),
+ PINMUX_GPIO(GPIO_FN_LCD_CL1, LCD_CL1_MARK),
+ PINMUX_GPIO(GPIO_FN_LCD_CL2, LCD_CL2_MARK),
+ PINMUX_GPIO(GPIO_FN_LCD_DON, LCD_DON_MARK),
+ PINMUX_GPIO(GPIO_FN_LCD_FLM, LCD_FLM_MARK),
+ PINMUX_GPIO(GPIO_FN_LCD_VEPWC, LCD_VEPWC_MARK),
+ PINMUX_GPIO(GPIO_FN_LCD_VCPWC, LCD_VCPWC_MARK),
+
+ /* AFEIF */
+ PINMUX_GPIO(GPIO_FN_AFE_RXIN, AFE_RXIN_MARK),
+ PINMUX_GPIO(GPIO_FN_AFE_RDET, AFE_RDET_MARK),
+ PINMUX_GPIO(GPIO_FN_AFE_FS, AFE_FS_MARK),
+ PINMUX_GPIO(GPIO_FN_AFE_TXOUT, AFE_TXOUT_MARK),
+ PINMUX_GPIO(GPIO_FN_AFE_SCLK, AFE_SCLK_MARK),
+ PINMUX_GPIO(GPIO_FN_AFE_RLYCNT, AFE_RLYCNT_MARK),
+ PINMUX_GPIO(GPIO_FN_AFE_HC1, AFE_HC1_MARK),
+
+ /* IIC */
+ PINMUX_GPIO(GPIO_FN_IIC_SCL, IIC_SCL_MARK),
+ PINMUX_GPIO(GPIO_FN_IIC_SDA, IIC_SDA_MARK),
+
+ /* DAC */
+ PINMUX_GPIO(GPIO_FN_DA1, DA1_MARK),
+ PINMUX_GPIO(GPIO_FN_DA0, DA0_MARK),
+
+ /* ADC */
+ PINMUX_GPIO(GPIO_FN_AN3, AN3_MARK),
+ PINMUX_GPIO(GPIO_FN_AN2, AN2_MARK),
+ PINMUX_GPIO(GPIO_FN_AN1, AN1_MARK),
+ PINMUX_GPIO(GPIO_FN_AN0, AN0_MARK),
+ PINMUX_GPIO(GPIO_FN_ADTRG, ADTRG_MARK),
+
+ /* USB */
+ PINMUX_GPIO(GPIO_FN_USB1D_RCV, USB1D_RCV_MARK),
+ PINMUX_GPIO(GPIO_FN_USB1D_TXSE0, USB1D_TXSE0_MARK),
+ PINMUX_GPIO(GPIO_FN_USB1D_TXDPLS, USB1D_TXDPLS_MARK),
+ PINMUX_GPIO(GPIO_FN_USB1D_DMNS, USB1D_DMNS_MARK),
+ PINMUX_GPIO(GPIO_FN_USB1D_DPLS, USB1D_DPLS_MARK),
+ PINMUX_GPIO(GPIO_FN_USB1D_SPEED, USB1D_SPEED_MARK),
+ PINMUX_GPIO(GPIO_FN_USB1D_TXENL, USB1D_TXENL_MARK),
+
+ PINMUX_GPIO(GPIO_FN_USB2_PWR_EN, USB2_PWR_EN_MARK),
+ PINMUX_GPIO(GPIO_FN_USB1_PWR_EN_USBF_UPLUP,
+ USB1_PWR_EN_USBF_UPLUP_MARK),
+ PINMUX_GPIO(GPIO_FN_USB1D_SUSPEND, USB1D_SUSPEND_MARK),
+
+ /* INTC */
+ PINMUX_GPIO(GPIO_FN_IRQ5, IRQ5_MARK),
+ PINMUX_GPIO(GPIO_FN_IRQ4, IRQ4_MARK),
+ PINMUX_GPIO(GPIO_FN_IRQ3_IRL3, IRQ3_IRL3_MARK),
+ PINMUX_GPIO(GPIO_FN_IRQ2_IRL2, IRQ2_IRL2_MARK),
+ PINMUX_GPIO(GPIO_FN_IRQ1_IRL1, IRQ1_IRL1_MARK),
+ PINMUX_GPIO(GPIO_FN_IRQ0_IRL0, IRQ0_IRL0_MARK),
+
+ /* PCC */
+ PINMUX_GPIO(GPIO_FN_PCC_REG, PCC_REG_MARK),
+ PINMUX_GPIO(GPIO_FN_PCC_DRV, PCC_DRV_MARK),
+ PINMUX_GPIO(GPIO_FN_PCC_BVD2, PCC_BVD2_MARK),
+ PINMUX_GPIO(GPIO_FN_PCC_BVD1, PCC_BVD1_MARK),
+ PINMUX_GPIO(GPIO_FN_PCC_CD2, PCC_CD2_MARK),
+ PINMUX_GPIO(GPIO_FN_PCC_CD1, PCC_CD1_MARK),
+ PINMUX_GPIO(GPIO_FN_PCC_RESET, PCC_RESET_MARK),
+ PINMUX_GPIO(GPIO_FN_PCC_RDY, PCC_RDY_MARK),
+ PINMUX_GPIO(GPIO_FN_PCC_VS2, PCC_VS2_MARK),
+ PINMUX_GPIO(GPIO_FN_PCC_VS1, PCC_VS1_MARK),
+
+ /* HUDI */
+ PINMUX_GPIO(GPIO_FN_AUDATA3, AUDATA3_MARK),
+ PINMUX_GPIO(GPIO_FN_AUDATA2, AUDATA2_MARK),
+ PINMUX_GPIO(GPIO_FN_AUDATA1, AUDATA1_MARK),
+ PINMUX_GPIO(GPIO_FN_AUDATA0, AUDATA0_MARK),
+ PINMUX_GPIO(GPIO_FN_AUDCK, AUDCK_MARK),
+ PINMUX_GPIO(GPIO_FN_AUDSYNC, AUDSYNC_MARK),
+ PINMUX_GPIO(GPIO_FN_ASEBRKAK, ASEBRKAK_MARK),
+ PINMUX_GPIO(GPIO_FN_TRST, TRST_MARK),
+ PINMUX_GPIO(GPIO_FN_TMS, TMS_MARK),
+ PINMUX_GPIO(GPIO_FN_TDO, TDO_MARK),
+ PINMUX_GPIO(GPIO_FN_TDI, TDI_MARK),
+ PINMUX_GPIO(GPIO_FN_TCK, TCK_MARK),
+
+ /* DMAC */
+ PINMUX_GPIO(GPIO_FN_DACK1, DACK1_MARK),
+ PINMUX_GPIO(GPIO_FN_DREQ1, DREQ1_MARK),
+ PINMUX_GPIO(GPIO_FN_DACK0, DACK0_MARK),
+ PINMUX_GPIO(GPIO_FN_DREQ0, DREQ0_MARK),
+ PINMUX_GPIO(GPIO_FN_TEND1, TEND1_MARK),
+ PINMUX_GPIO(GPIO_FN_TEND0, TEND0_MARK),
+
+ /* SIOF0 */
+ PINMUX_GPIO(GPIO_FN_SIOF0_SYNC, SIOF0_SYNC_MARK),
+ PINMUX_GPIO(GPIO_FN_SIOF0_MCLK, SIOF0_MCLK_MARK),
+ PINMUX_GPIO(GPIO_FN_SIOF0_TXD, SIOF0_TXD_MARK),
+ PINMUX_GPIO(GPIO_FN_SIOF0_RXD, SIOF0_RXD_MARK),
+ PINMUX_GPIO(GPIO_FN_SIOF0_SCK, SIOF0_SCK_MARK),
+
+ /* SIOF1 */
+ PINMUX_GPIO(GPIO_FN_SIOF1_SYNC, SIOF1_SYNC_MARK),
+ PINMUX_GPIO(GPIO_FN_SIOF1_MCLK, SIOF1_MCLK_MARK),
+ PINMUX_GPIO(GPIO_FN_SIOF1_TXD, SIOF1_TXD_MARK),
+ PINMUX_GPIO(GPIO_FN_SIOF1_RXD, SIOF1_RXD_MARK),
+ PINMUX_GPIO(GPIO_FN_SIOF1_SCK, SIOF1_SCK_MARK),
+
+ /* SCIF0 */
+ PINMUX_GPIO(GPIO_FN_SCIF0_TXD, SCIF0_TXD_MARK),
+ PINMUX_GPIO(GPIO_FN_SCIF0_RXD, SCIF0_RXD_MARK),
+ PINMUX_GPIO(GPIO_FN_SCIF0_RTS, SCIF0_RTS_MARK),
+ PINMUX_GPIO(GPIO_FN_SCIF0_CTS, SCIF0_CTS_MARK),
+ PINMUX_GPIO(GPIO_FN_SCIF0_SCK, SCIF0_SCK_MARK),
+
+ /* SCIF1 */
+ PINMUX_GPIO(GPIO_FN_SCIF1_TXD, SCIF1_TXD_MARK),
+ PINMUX_GPIO(GPIO_FN_SCIF1_RXD, SCIF1_RXD_MARK),
+ PINMUX_GPIO(GPIO_FN_SCIF1_RTS, SCIF1_RTS_MARK),
+ PINMUX_GPIO(GPIO_FN_SCIF1_CTS, SCIF1_CTS_MARK),
+ PINMUX_GPIO(GPIO_FN_SCIF1_SCK, SCIF1_SCK_MARK),
+
+ /* TPU */
+ PINMUX_GPIO(GPIO_FN_TPU_TO1, TPU_TO1_MARK),
+ PINMUX_GPIO(GPIO_FN_TPU_TO0, TPU_TO0_MARK),
+ PINMUX_GPIO(GPIO_FN_TPU_TI3B, TPU_TI3B_MARK),
+ PINMUX_GPIO(GPIO_FN_TPU_TI3A, TPU_TI3A_MARK),
+ PINMUX_GPIO(GPIO_FN_TPU_TI2B, TPU_TI2B_MARK),
+ PINMUX_GPIO(GPIO_FN_TPU_TI2A, TPU_TI2A_MARK),
+ PINMUX_GPIO(GPIO_FN_TPU_TO3, TPU_TO3_MARK),
+ PINMUX_GPIO(GPIO_FN_TPU_TO2, TPU_TO2_MARK),
+
+ /* SIM */
+ PINMUX_GPIO(GPIO_FN_SIM_D, SIM_D_MARK),
+ PINMUX_GPIO(GPIO_FN_SIM_CLK, SIM_CLK_MARK),
+ PINMUX_GPIO(GPIO_FN_SIM_RST, SIM_RST_MARK),
+
+ /* MMC */
+ PINMUX_GPIO(GPIO_FN_MMC_DAT, MMC_DAT_MARK),
+ PINMUX_GPIO(GPIO_FN_MMC_CMD, MMC_CMD_MARK),
+ PINMUX_GPIO(GPIO_FN_MMC_CLK, MMC_CLK_MARK),
+ PINMUX_GPIO(GPIO_FN_MMC_VDDON, MMC_VDDON_MARK),
+ PINMUX_GPIO(GPIO_FN_MMC_ODMOD, MMC_ODMOD_MARK),
+
+ /* SYSC */
+ PINMUX_GPIO(GPIO_FN_STATUS0, STATUS0_MARK),
+ PINMUX_GPIO(GPIO_FN_STATUS1, STATUS1_MARK),
+};
+
+static struct pinmux_cfg_reg pinmux_config_regs[] = {
+ { PINMUX_CFG_REG("PACR", 0xa4050100, 16, 2) {
+ PTA7_FN, PTA7_OUT, PTA7_IN_PU, PTA7_IN,
+ PTA6_FN, PTA6_OUT, PTA6_IN_PU, PTA6_IN,
+ PTA5_FN, PTA5_OUT, PTA5_IN_PU, PTA5_IN,
+ PTA4_FN, PTA4_OUT, PTA4_IN_PU, PTA4_IN,
+ PTA3_FN, PTA3_OUT, PTA3_IN_PU, PTA3_IN,
+ PTA2_FN, PTA2_OUT, PTA2_IN_PU, PTA2_IN,
+ PTA1_FN, PTA1_OUT, PTA1_IN_PU, PTA1_IN,
+ PTA0_FN, PTA0_OUT, PTA0_IN_PU, PTA0_IN }
+ },
+ { PINMUX_CFG_REG("PBCR", 0xa4050102, 16, 2) {
+ PTB7_FN, PTB7_OUT, PTB7_IN_PU, PTB7_IN,
+ PTB6_FN, PTB6_OUT, PTB6_IN_PU, PTB6_IN,
+ PTB5_FN, PTB5_OUT, PTB5_IN_PU, PTB5_IN,
+ PTB4_FN, PTB4_OUT, PTB4_IN_PU, PTB4_IN,
+ PTB3_FN, PTB3_OUT, PTB3_IN_PU, PTB3_IN,
+ PTB2_FN, PTB2_OUT, PTB2_IN_PU, PTB2_IN,
+ PTB1_FN, PTB1_OUT, PTB1_IN_PU, PTB1_IN,
+ PTB0_FN, PTB0_OUT, PTB0_IN_PU, PTB0_IN }
+ },
+ { PINMUX_CFG_REG("PCCR", 0xa4050104, 16, 2) {
+ PTC7_FN, PTC7_OUT, PTC7_IN_PU, PTC7_IN,
+ PTC6_FN, PTC6_OUT, PTC6_IN_PU, PTC6_IN,
+ PTC5_FN, PTC5_OUT, PTC5_IN_PU, PTC5_IN,
+ PTC4_FN, PTC4_OUT, PTC4_IN_PU, PTC4_IN,
+ PTC3_FN, PTC3_OUT, PTC3_IN_PU, PTC3_IN,
+ PTC2_FN, PTC2_OUT, PTC2_IN_PU, PTC2_IN,
+ PTC1_FN, PTC1_OUT, PTC1_IN_PU, PTC1_IN,
+ PTC0_FN, PTC0_OUT, PTC0_IN_PU, PTC0_IN }
+ },
+ { PINMUX_CFG_REG("PDCR", 0xa4050106, 16, 2) {
+ PTD7_FN, PTD7_OUT, PTD7_IN_PU, PTD7_IN,
+ PTD6_FN, PTD6_OUT, PTD6_IN_PU, PTD6_IN,
+ PTD5_FN, PTD5_OUT, PTD5_IN_PU, PTD5_IN,
+ PTD4_FN, PTD4_OUT, PTD4_IN_PU, PTD4_IN,
+ PTD3_FN, PTD3_OUT, PTD3_IN_PU, PTD3_IN,
+ PTD2_FN, PTD2_OUT, PTD2_IN_PU, PTD2_IN,
+ PTD1_FN, PTD1_OUT, PTD1_IN_PU, PTD1_IN,
+ PTD0_FN, PTD0_OUT, PTD0_IN_PU, PTD0_IN }
+ },
+ { PINMUX_CFG_REG("PECR", 0xa4050108, 16, 2) {
+ 0, 0, 0, 0,
+ PTE6_FN, 0, 0, PTE6_IN,
+ PTE5_FN, 0, 0, PTE5_IN,
+ PTE4_FN, PTE4_OUT, PTE4_IN_PU, PTE4_IN,
+ PTE3_FN, PTE3_OUT, PTE3_IN_PU, PTE3_IN,
+ PTE2_FN, PTE2_OUT, PTE2_IN_PU, PTE2_IN,
+ PTE1_FN, PTE1_OUT, PTE1_IN_PU, PTE1_IN,
+ PTE0_FN, PTE0_OUT, PTE0_IN_PU, PTE0_IN }
+ },
+ { PINMUX_CFG_REG("PFCR", 0xa405010a, 16, 2) {
+ 0, 0, 0, 0,
+ PTF6_FN, 0, 0, PTF6_IN,
+ PTF5_FN, 0, 0, PTF5_IN,
+ PTF4_FN, 0, 0, PTF4_IN,
+ PTF3_FN, 0, 0, PTF3_IN,
+ PTF2_FN, 0, 0, PTF2_IN,
+ PTF1_FN, 0, 0, PTF1_IN,
+ PTF0_FN, 0, 0, PTF0_IN }
+ },
+ { PINMUX_CFG_REG("PGCR", 0xa405010c, 16, 2) {
+ 0, 0, 0, 0,
+ PTG6_FN, PTG6_OUT, PTG6_IN_PU, PTG6_IN,
+ PTG5_FN, PTG5_OUT, PTG5_IN_PU, PTG5_IN,
+ PTG4_FN, PTG4_OUT, PTG4_IN_PU, PTG4_IN,
+ PTG3_FN, PTG3_OUT, PTG3_IN_PU, PTG3_IN,
+ PTG2_FN, PTG2_OUT, PTG2_IN_PU, PTG2_IN,
+ PTG1_FN, PTG1_OUT, PTG1_IN_PU, PTG1_IN,
+ PTG0_FN, PTG0_OUT, PTG0_IN_PU, PTG0_IN }
+ },
+ { PINMUX_CFG_REG("PHCR", 0xa405010e, 16, 2) {
+ 0, 0, 0, 0,
+ PTH6_FN, PTH6_OUT, PTH6_IN_PU, PTH6_IN,
+ PTH5_FN, PTH5_OUT, PTH5_IN_PU, PTH5_IN,
+ PTH4_FN, PTH4_OUT, PTH4_IN_PU, PTH4_IN,
+ PTH3_FN, PTH3_OUT, PTH3_IN_PU, PTH3_IN,
+ PTH2_FN, PTH2_OUT, PTH2_IN_PU, PTH2_IN,
+ PTH1_FN, PTH1_OUT, PTH1_IN_PU, PTH1_IN,
+ PTH0_FN, PTH0_OUT, PTH0_IN_PU, PTH0_IN }
+ },
+ { PINMUX_CFG_REG("PJCR", 0xa4050110, 16, 2) {
+ 0, 0, 0, 0,
+ PTJ6_FN, PTJ6_OUT, PTJ6_IN_PU, PTJ6_IN,
+ PTJ5_FN, PTJ5_OUT, PTJ5_IN_PU, PTJ5_IN,
+ PTJ4_FN, PTJ4_OUT, PTJ4_IN_PU, PTJ4_IN,
+ PTJ3_FN, PTJ3_OUT, PTJ3_IN_PU, PTJ3_IN,
+ PTJ2_FN, PTJ2_OUT, PTJ2_IN_PU, PTJ2_IN,
+ PTJ1_FN, PTJ1_OUT, PTJ1_IN_PU, PTJ1_IN,
+ PTJ0_FN, PTJ0_OUT, PTJ0_IN_PU, PTJ0_IN }
+ },
+ { PINMUX_CFG_REG("PKCR", 0xa4050112, 16, 2) {
+ 0, 0, 0, 0,
+ 0, 0, 0, 0,
+ 0, 0, 0, 0,
+ 0, 0, 0, 0,
+ PTK3_FN, PTK3_OUT, PTK3_IN_PU, PTK3_IN,
+ PTK2_FN, PTK2_OUT, PTK2_IN_PU, PTK2_IN,
+ PTK1_FN, PTK1_OUT, PTK1_IN_PU, PTK1_IN,
+ PTK0_FN, PTK0_OUT, PTK0_IN_PU, PTK0_IN }
+ },
+ { PINMUX_CFG_REG("PLCR", 0xa4050114, 16, 2) {
+ PTL7_FN, PTL7_OUT, PTL7_IN_PU, PTL7_IN,
+ PTL6_FN, PTL6_OUT, PTL6_IN_PU, PTL6_IN,
+ PTL5_FN, PTL5_OUT, PTL5_IN_PU, PTL5_IN,
+ PTL4_FN, PTL4_OUT, PTL4_IN_PU, PTL4_IN,
+ PTL3_FN, PTL3_OUT, PTL3_IN_PU, PTL3_IN,
+ 0, 0, 0, 0,
+ 0, 0, 0, 0,
+ 0, 0, 0, 0 }
+ },
+ { PINMUX_CFG_REG("PMCR", 0xa4050116, 16, 2) {
+ PTM7_FN, PTM7_OUT, PTM7_IN_PU, PTM7_IN,
+ PTM6_FN, PTM6_OUT, PTM6_IN_PU, PTM6_IN,
+ PTM5_FN, PTM5_OUT, PTM5_IN_PU, PTM5_IN,
+ PTM4_FN, PTM4_OUT, PTM4_IN_PU, PTM4_IN,
+ PTM3_FN, PTM3_OUT, PTM3_IN_PU, PTM3_IN,
+ PTM2_FN, PTM2_OUT, PTM2_IN_PU, PTM2_IN,
+ PTM1_FN, PTM1_OUT, PTM1_IN_PU, PTM1_IN,
+ PTM0_FN, PTM0_OUT, PTM0_IN_PU, PTM0_IN }
+ },
+ { PINMUX_CFG_REG("PPCR", 0xa4050118, 16, 2) {
+ 0, 0, 0, 0,
+ 0, 0, 0, 0,
+ 0, 0, 0, 0,
+ PTP4_FN, PTP4_OUT, PTP4_IN_PU, PTP4_IN,
+ PTP3_FN, PTP3_OUT, PTP3_IN_PU, PTP3_IN,
+ PTP2_FN, PTP2_OUT, PTP2_IN_PU, PTP2_IN,
+ PTP1_FN, PTP1_OUT, PTP1_IN_PU, PTP1_IN,
+ PTP0_FN, PTP0_OUT, PTP0_IN_PU, PTP0_IN }
+ },
+ { PINMUX_CFG_REG("PRCR", 0xa405011a, 16, 2) {
+ PTR7_FN, PTR7_OUT, PTR7_IN_PU, PTR7_IN,
+ PTR6_FN, PTR6_OUT, PTR6_IN_PU, PTR6_IN,
+ PTR5_FN, PTR5_OUT, PTR5_IN_PU, PTR5_IN,
+ PTR4_FN, PTR4_OUT, PTR4_IN_PU, PTR4_IN,
+ PTR3_FN, PTR3_OUT, PTR3_IN_PU, PTR3_IN,
+ PTR2_FN, PTR2_OUT, PTR2_IN_PU, PTR2_IN,
+ PTR1_FN, PTR1_OUT, PTR1_IN_PU, PTR1_IN,
+ PTR0_FN, PTR0_OUT, PTR0_IN_PU, PTR0_IN }
+ },
+ { PINMUX_CFG_REG("PSCR", 0xa405011c, 16, 2) {
+ 0, 0, 0, 0,
+ 0, 0, 0, 0,
+ 0, 0, 0, 0,
+ PTS4_FN, PTS4_OUT, PTS4_IN_PU, PTS4_IN,
+ PTS3_FN, PTS3_OUT, PTS3_IN_PU, PTS3_IN,
+ PTS2_FN, PTS2_OUT, PTS2_IN_PU, PTS2_IN,
+ PTS1_FN, PTS1_OUT, PTS1_IN_PU, PTS1_IN,
+ PTS0_FN, PTS0_OUT, PTS0_IN_PU, PTS0_IN }
+ },
+ { PINMUX_CFG_REG("PTCR", 0xa405011e, 16, 2) {
+ 0, 0, 0, 0,
+ 0, 0, 0, 0,
+ 0, 0, 0, 0,
+ PTT4_FN, PTT4_OUT, PTT4_IN_PU, PTT4_IN,
+ PTT3_FN, PTT3_OUT, PTT3_IN_PU, PTT3_IN,
+ PTT2_FN, PTT2_OUT, PTT2_IN_PU, PTT2_IN,
+ PTT1_FN, PTT1_OUT, PTT1_IN_PU, PTT1_IN,
+ PTT0_FN, PTT0_OUT, PTT0_IN_PU, PTT0_IN }
+ },
+ { PINMUX_CFG_REG("PUCR", 0xa4050120, 16, 2) {
+ 0, 0, 0, 0,
+ 0, 0, 0, 0,
+ 0, 0, 0, 0,
+ PTU4_FN, PTU4_OUT, PTU4_IN_PU, PTU4_IN,
+ PTU3_FN, PTU3_OUT, PTU3_IN_PU, PTU3_IN,
+ PTU2_FN, PTU2_OUT, PTU2_IN_PU, PTU2_IN,
+ PTU1_FN, PTU1_OUT, PTU1_IN_PU, PTU1_IN,
+ PTU0_FN, PTU0_OUT, PTU0_IN_PU, PTU0_IN }
+ },
+ { PINMUX_CFG_REG("PVCR", 0xa4050122, 16, 2) {
+ 0, 0, 0, 0,
+ 0, 0, 0, 0,
+ 0, 0, 0, 0,
+ PTV4_FN, PTV4_OUT, PTV4_IN_PU, PTV4_IN,
+ PTV3_FN, PTV3_OUT, PTV3_IN_PU, PTV3_IN,
+ PTV2_FN, PTV2_OUT, PTV2_IN_PU, PTV2_IN,
+ PTV1_FN, PTV1_OUT, PTV1_IN_PU, PTV1_IN,
+ PTV0_FN, PTV0_OUT, PTV0_IN_PU, PTV0_IN }
+ },
+ {}
+};
+
+static struct pinmux_data_reg pinmux_data_regs[] = {
+ { PINMUX_DATA_REG("PADR", 0xa4050140, 8) {
+ PTA7_DATA, PTA6_DATA, PTA5_DATA, PTA4_DATA,
+ PTA3_DATA, PTA2_DATA, PTA1_DATA, PTA0_DATA }
+ },
+ { PINMUX_DATA_REG("PBDR", 0xa4050142, 8) {
+ PTB7_DATA, PTB6_DATA, PTB5_DATA, PTB4_DATA,
+ PTB3_DATA, PTB2_DATA, PTB1_DATA, PTB0_DATA }
+ },
+ { PINMUX_DATA_REG("PCDR", 0xa4050144, 8) {
+ PTC7_DATA, PTC6_DATA, PTC5_DATA, PTC4_DATA,
+ PTC3_DATA, PTC2_DATA, PTC1_DATA, PTC0_DATA }
+ },
+ { PINMUX_DATA_REG("PDDR", 0xa4050126, 8) {
+ PTD7_DATA, PTD6_DATA, PTD5_DATA, PTD4_DATA,
+ PTD3_DATA, PTD2_DATA, PTD1_DATA, PTD0_DATA }
+ },
+ { PINMUX_DATA_REG("PEDR", 0xa4050148, 8) {
+ 0, PTE6_DATA, PTE5_DATA, PTE4_DATA,
+ PTE3_DATA, PTE2_DATA, PTE1_DATA, PTE0_DATA }
+ },
+ { PINMUX_DATA_REG("PFDR", 0xa405014a, 8) {
+ 0, PTF6_DATA, PTF5_DATA, PTF4_DATA,
+ PTF3_DATA, PTF2_DATA, PTF1_DATA, PTF0_DATA }
+ },
+ { PINMUX_DATA_REG("PGDR", 0xa405014c, 8) {
+ 0, PTG6_DATA, PTG5_DATA, PTG4_DATA,
+ PTG3_DATA, PTG2_DATA, PTG1_DATA, PTG0_DATA }
+ },
+ { PINMUX_DATA_REG("PHDR", 0xa405014e, 8) {
+ 0, PTH6_DATA, PTH5_DATA, PTH4_DATA,
+ PTH3_DATA, PTH2_DATA, PTH1_DATA, PTH0_DATA }
+ },
+ { PINMUX_DATA_REG("PJDR", 0xa4050150, 8) {
+ 0, PTJ6_DATA, PTJ5_DATA, PTJ4_DATA,
+ PTJ3_DATA, PTJ2_DATA, PTJ1_DATA, PTJ0_DATA }
+ },
+ { PINMUX_DATA_REG("PKDR", 0xa4050152, 8) {
+ 0, 0, 0, 0,
+ PTK3_DATA, PTK2_DATA, PTK1_DATA, PTK0_DATA }
+ },
+ { PINMUX_DATA_REG("PLDR", 0xa4050154, 8) {
+ PTL7_DATA, PTL6_DATA, PTL5_DATA, PTL4_DATA,
+ PTL3_DATA, 0, 0, 0 }
+ },
+ { PINMUX_DATA_REG("PMDR", 0xa4050156, 8) {
+ PTM7_DATA, PTM6_DATA, PTM5_DATA, PTM4_DATA,
+ PTM3_DATA, PTM2_DATA, PTM1_DATA, PTM0_DATA }
+ },
+ { PINMUX_DATA_REG("PPDR", 0xa4050158, 8) {
+ 0, 0, 0, PTP4_DATA,
+ PTP3_DATA, PTP2_DATA, PTP1_DATA, PTP0_DATA }
+ },
+ { PINMUX_DATA_REG("PRDR", 0xa405015a, 8) {
+ PTR7_DATA, PTR6_DATA, PTR5_DATA, PTR4_DATA,
+ PTR3_DATA, PTR2_DATA, PTR1_DATA, PTR0_DATA }
+ },
+ { PINMUX_DATA_REG("PSDR", 0xa405015c, 8) {
+ 0, 0, 0, PTS4_DATA,
+ PTS3_DATA, PTS2_DATA, PTS1_DATA, PTS0_DATA }
+ },
+ { PINMUX_DATA_REG("PTDR", 0xa405015e, 8) {
+ 0, 0, 0, PTT4_DATA,
+ PTT3_DATA, PTT2_DATA, PTT1_DATA, PTT0_DATA }
+ },
+ { PINMUX_DATA_REG("PUDR", 0xa4050160, 8) {
+ 0, 0, 0, PTU4_DATA,
+ PTU3_DATA, PTU2_DATA, PTU1_DATA, PTU0_DATA }
+ },
+ { PINMUX_DATA_REG("PVDR", 0xa4050162, 8) {
+ 0, 0, 0, PTV4_DATA,
+ PTV3_DATA, PTV2_DATA, PTV1_DATA, PTV0_DATA }
+ },
+ { },
+};
+
+struct sh_pfc_soc_info sh7720_pinmux_info = {
+ .name = "sh7720_pfc",
+ .reserved_id = PINMUX_RESERVED,
+ .data = { PINMUX_DATA_BEGIN, PINMUX_DATA_END },
+ .input = { PINMUX_INPUT_BEGIN, PINMUX_INPUT_END },
+ .input_pu = { PINMUX_INPUT_PULLUP_BEGIN, PINMUX_INPUT_PULLUP_END },
+ .output = { PINMUX_OUTPUT_BEGIN, PINMUX_OUTPUT_END },
+ .mark = { PINMUX_MARK_BEGIN, PINMUX_MARK_END },
+ .function = { PINMUX_FUNCTION_BEGIN, PINMUX_FUNCTION_END },
+
+ .first_gpio = GPIO_PTA7,
+ .last_gpio = GPIO_FN_STATUS1,
+
+ .gpios = pinmux_gpios,
+ .cfg_regs = pinmux_config_regs,
+ .data_regs = pinmux_data_regs,
+
+ .gpio_data = pinmux_data,
+ .gpio_data_size = ARRAY_SIZE(pinmux_data),
+};
diff --git a/drivers/pinctrl/sh-pfc/pfc-sh7722.c b/drivers/pinctrl/sh-pfc/pfc-sh7722.c
new file mode 100644
index 000000000000..2de0929315e6
--- /dev/null
+++ b/drivers/pinctrl/sh-pfc/pfc-sh7722.c
@@ -0,0 +1,1779 @@
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/gpio.h>
+#include <cpu/sh7722.h>
+
+#include "sh_pfc.h"
+
+enum {
+ PINMUX_RESERVED = 0,
+
+ PINMUX_DATA_BEGIN,
+ PTA7_DATA, PTA6_DATA, PTA5_DATA, PTA4_DATA,
+ PTA3_DATA, PTA2_DATA, PTA1_DATA, PTA0_DATA,
+ PTB7_DATA, PTB6_DATA, PTB5_DATA, PTB4_DATA,
+ PTB3_DATA, PTB2_DATA, PTB1_DATA, PTB0_DATA,
+ PTC7_DATA, PTC5_DATA, PTC4_DATA, PTC3_DATA, PTC2_DATA, PTC0_DATA,
+ PTD7_DATA, PTD6_DATA, PTD5_DATA, PTD4_DATA,
+ PTD3_DATA, PTD2_DATA, PTD1_DATA, PTD0_DATA,
+ PTE7_DATA, PTE6_DATA, PTE5_DATA, PTE4_DATA, PTE1_DATA, PTE0_DATA,
+ PTF6_DATA, PTF5_DATA, PTF4_DATA,
+ PTF3_DATA, PTF2_DATA, PTF1_DATA, PTF0_DATA,
+ PTG4_DATA, PTG3_DATA, PTG2_DATA, PTG1_DATA, PTG0_DATA,
+ PTH7_DATA, PTH6_DATA, PTH5_DATA, PTH4_DATA,
+ PTH3_DATA, PTH2_DATA, PTH1_DATA, PTH0_DATA,
+ PTJ7_DATA, PTJ6_DATA, PTJ5_DATA, PTJ1_DATA, PTJ0_DATA,
+ PTK6_DATA, PTK5_DATA, PTK4_DATA,
+ PTK3_DATA, PTK2_DATA, PTK1_DATA, PTK0_DATA,
+ PTL7_DATA, PTL6_DATA, PTL5_DATA, PTL4_DATA,
+ PTL3_DATA, PTL2_DATA, PTL1_DATA, PTL0_DATA,
+ PTM7_DATA, PTM6_DATA, PTM5_DATA, PTM4_DATA,
+ PTM3_DATA, PTM2_DATA, PTM1_DATA, PTM0_DATA,
+ PTN7_DATA, PTN6_DATA, PTN5_DATA, PTN4_DATA,
+ PTN3_DATA, PTN2_DATA, PTN1_DATA, PTN0_DATA,
+ PTQ6_DATA, PTQ5_DATA, PTQ4_DATA,
+ PTQ3_DATA, PTQ2_DATA, PTQ1_DATA, PTQ0_DATA,
+ PTR4_DATA, PTR3_DATA, PTR2_DATA, PTR1_DATA, PTR0_DATA,
+ PTS4_DATA, PTS3_DATA, PTS2_DATA, PTS1_DATA, PTS0_DATA,
+ PTT4_DATA, PTT3_DATA, PTT2_DATA, PTT1_DATA, PTT0_DATA,
+ PTU4_DATA, PTU3_DATA, PTU2_DATA, PTU1_DATA, PTU0_DATA,
+ PTV4_DATA, PTV3_DATA, PTV2_DATA, PTV1_DATA, PTV0_DATA,
+ PTW6_DATA, PTW5_DATA, PTW4_DATA,
+ PTW3_DATA, PTW2_DATA, PTW1_DATA, PTW0_DATA,
+ PTX6_DATA, PTX5_DATA, PTX4_DATA,
+ PTX3_DATA, PTX2_DATA, PTX1_DATA, PTX0_DATA,
+ PTY6_DATA, PTY5_DATA, PTY4_DATA,
+ PTY3_DATA, PTY2_DATA, PTY1_DATA, PTY0_DATA,
+ PTZ5_DATA, PTZ4_DATA, PTZ3_DATA, PTZ2_DATA, PTZ1_DATA, PTZ0_DATA,
+ PINMUX_DATA_END,
+
+ PINMUX_INPUT_BEGIN,
+ PTA7_IN, PTA6_IN, PTA5_IN, PTA4_IN,
+ PTA3_IN, PTA2_IN, PTA1_IN, PTA0_IN,
+ PTB7_IN, PTB6_IN, PTB5_IN, PTB4_IN,
+ PTB3_IN, PTB2_IN, PTB1_IN, PTB0_IN,
+ PTC7_IN, PTC5_IN, PTC4_IN, PTC3_IN, PTC2_IN, PTC0_IN,
+ PTD7_IN, PTD6_IN, PTD5_IN, PTD4_IN, PTD3_IN, PTD2_IN, PTD1_IN,
+ PTE7_IN, PTE6_IN, PTE5_IN, PTE4_IN, PTE1_IN, PTE0_IN,
+ PTF6_IN, PTF5_IN, PTF4_IN, PTF3_IN, PTF2_IN, PTF1_IN,
+ PTH6_IN, PTH5_IN, PTH1_IN, PTH0_IN,
+ PTJ1_IN, PTJ0_IN,
+ PTK6_IN, PTK5_IN, PTK4_IN, PTK3_IN, PTK2_IN, PTK0_IN,
+ PTL7_IN, PTL6_IN, PTL5_IN, PTL4_IN,
+ PTL3_IN, PTL2_IN, PTL1_IN, PTL0_IN,
+ PTM7_IN, PTM6_IN, PTM5_IN, PTM4_IN,
+ PTM3_IN, PTM2_IN, PTM1_IN, PTM0_IN,
+ PTN7_IN, PTN6_IN, PTN5_IN, PTN4_IN,
+ PTN3_IN, PTN2_IN, PTN1_IN, PTN0_IN,
+ PTQ5_IN, PTQ4_IN, PTQ3_IN, PTQ2_IN, PTQ0_IN,
+ PTR2_IN,
+ PTS4_IN, PTS2_IN, PTS1_IN,
+ PTT4_IN, PTT3_IN, PTT2_IN, PTT1_IN,
+ PTU4_IN, PTU3_IN, PTU2_IN, PTU1_IN, PTU0_IN,
+ PTV4_IN, PTV3_IN, PTV2_IN, PTV1_IN, PTV0_IN,
+ PTW6_IN, PTW4_IN, PTW3_IN, PTW2_IN, PTW1_IN, PTW0_IN,
+ PTX6_IN, PTX5_IN, PTX4_IN, PTX3_IN, PTX2_IN, PTX1_IN, PTX0_IN,
+ PTY5_IN, PTY4_IN, PTY3_IN, PTY2_IN, PTY0_IN,
+ PTZ5_IN, PTZ4_IN, PTZ3_IN, PTZ2_IN, PTZ1_IN,
+ PINMUX_INPUT_END,
+
+ PINMUX_INPUT_PULLDOWN_BEGIN,
+ PTA7_IN_PD, PTA6_IN_PD, PTA5_IN_PD, PTA4_IN_PD,
+ PTA3_IN_PD, PTA2_IN_PD, PTA1_IN_PD, PTA0_IN_PD,
+ PTE7_IN_PD, PTE6_IN_PD, PTE5_IN_PD, PTE4_IN_PD, PTE1_IN_PD, PTE0_IN_PD,
+ PTF6_IN_PD, PTF5_IN_PD, PTF4_IN_PD, PTF3_IN_PD, PTF2_IN_PD, PTF1_IN_PD,
+ PTH6_IN_PD, PTH5_IN_PD, PTH1_IN_PD, PTH0_IN_PD,
+ PTK6_IN_PD, PTK5_IN_PD, PTK4_IN_PD, PTK3_IN_PD, PTK2_IN_PD, PTK0_IN_PD,
+ PTL7_IN_PD, PTL6_IN_PD, PTL5_IN_PD, PTL4_IN_PD,
+ PTL3_IN_PD, PTL2_IN_PD, PTL1_IN_PD, PTL0_IN_PD,
+ PTM7_IN_PD, PTM6_IN_PD, PTM5_IN_PD, PTM4_IN_PD,
+ PTM3_IN_PD, PTM2_IN_PD, PTM1_IN_PD, PTM0_IN_PD,
+ PTQ5_IN_PD, PTQ4_IN_PD, PTQ3_IN_PD, PTQ2_IN_PD,
+ PTS4_IN_PD, PTS2_IN_PD, PTS1_IN_PD,
+ PTT4_IN_PD, PTT3_IN_PD, PTT2_IN_PD, PTT1_IN_PD,
+ PTU4_IN_PD, PTU3_IN_PD, PTU2_IN_PD, PTU1_IN_PD, PTU0_IN_PD,
+ PTV4_IN_PD, PTV3_IN_PD, PTV2_IN_PD, PTV1_IN_PD, PTV0_IN_PD,
+ PTW6_IN_PD, PTW4_IN_PD, PTW3_IN_PD, PTW2_IN_PD, PTW1_IN_PD, PTW0_IN_PD,
+ PTX6_IN_PD, PTX5_IN_PD, PTX4_IN_PD,
+ PTX3_IN_PD, PTX2_IN_PD, PTX1_IN_PD, PTX0_IN_PD,
+ PINMUX_INPUT_PULLDOWN_END,
+
+ PINMUX_INPUT_PULLUP_BEGIN,
+ PTC7_IN_PU, PTC5_IN_PU,
+ PTD7_IN_PU, PTD6_IN_PU, PTD5_IN_PU, PTD4_IN_PU,
+ PTD3_IN_PU, PTD2_IN_PU, PTD1_IN_PU,
+ PTJ1_IN_PU, PTJ0_IN_PU,
+ PTQ0_IN_PU,
+ PTR2_IN_PU,
+ PTX6_IN_PU,
+ PTY5_IN_PU, PTY4_IN_PU, PTY3_IN_PU, PTY2_IN_PU, PTY0_IN_PU,
+ PTZ5_IN_PU, PTZ4_IN_PU, PTZ3_IN_PU, PTZ2_IN_PU, PTZ1_IN_PU,
+ PINMUX_INPUT_PULLUP_END,
+
+ PINMUX_OUTPUT_BEGIN,
+ PTA7_OUT, PTA5_OUT,
+ PTB7_OUT, PTB6_OUT, PTB5_OUT, PTB4_OUT,
+ PTB3_OUT, PTB2_OUT, PTB1_OUT, PTB0_OUT,
+ PTC4_OUT, PTC3_OUT, PTC2_OUT, PTC0_OUT,
+ PTD6_OUT, PTD5_OUT, PTD4_OUT,
+ PTD3_OUT, PTD2_OUT, PTD1_OUT, PTD0_OUT,
+ PTE7_OUT, PTE6_OUT, PTE5_OUT, PTE4_OUT, PTE1_OUT, PTE0_OUT,
+ PTF6_OUT, PTF5_OUT, PTF4_OUT, PTF3_OUT, PTF2_OUT, PTF0_OUT,
+ PTG4_OUT, PTG3_OUT, PTG2_OUT, PTG1_OUT, PTG0_OUT,
+ PTH7_OUT, PTH6_OUT, PTH5_OUT, PTH4_OUT,
+ PTH3_OUT, PTH2_OUT, PTH1_OUT, PTH0_OUT,
+ PTJ7_OUT, PTJ6_OUT, PTJ5_OUT, PTJ1_OUT, PTJ0_OUT,
+ PTK6_OUT, PTK5_OUT, PTK4_OUT, PTK3_OUT, PTK1_OUT, PTK0_OUT,
+ PTL7_OUT, PTL6_OUT, PTL5_OUT, PTL4_OUT,
+ PTL3_OUT, PTL2_OUT, PTL1_OUT, PTL0_OUT,
+ PTM7_OUT, PTM6_OUT, PTM5_OUT, PTM4_OUT,
+ PTM3_OUT, PTM2_OUT, PTM1_OUT, PTM0_OUT,
+ PTN7_OUT, PTN6_OUT, PTN5_OUT, PTN4_OUT,
+ PTN3_OUT, PTN2_OUT, PTN1_OUT, PTN0_OUT, PTQ6_OUT, PTQ5_OUT, PTQ4_OUT,
+ PTQ3_OUT, PTQ2_OUT, PTQ1_OUT, PTQ0_OUT,
+ PTR4_OUT, PTR3_OUT, PTR1_OUT, PTR0_OUT,
+ PTS3_OUT, PTS2_OUT, PTS0_OUT,
+ PTT4_OUT, PTT3_OUT, PTT2_OUT, PTT0_OUT,
+ PTU4_OUT, PTU3_OUT, PTU2_OUT, PTU0_OUT,
+ PTV4_OUT, PTV3_OUT, PTV2_OUT, PTV1_OUT, PTV0_OUT,
+ PTW5_OUT, PTW4_OUT, PTW3_OUT, PTW2_OUT, PTW1_OUT, PTW0_OUT,
+ PTX6_OUT, PTX5_OUT, PTX4_OUT, PTX3_OUT, PTX2_OUT, PTX1_OUT, PTX0_OUT,
+ PTY5_OUT, PTY4_OUT, PTY3_OUT, PTY2_OUT, PTY1_OUT, PTY0_OUT,
+ PINMUX_OUTPUT_END,
+
+ PINMUX_MARK_BEGIN,
+ SCIF0_TXD_MARK, SCIF0_RXD_MARK,
+ SCIF0_RTS_MARK, SCIF0_CTS_MARK, SCIF0_SCK_MARK,
+ SCIF1_TXD_MARK, SCIF1_RXD_MARK,
+ SCIF1_RTS_MARK, SCIF1_CTS_MARK, SCIF1_SCK_MARK,
+ SCIF2_TXD_MARK, SCIF2_RXD_MARK,
+ SCIF2_RTS_MARK, SCIF2_CTS_MARK, SCIF2_SCK_MARK,
+ SIOTXD_MARK, SIORXD_MARK,
+ SIOD_MARK, SIOSTRB0_MARK, SIOSTRB1_MARK,
+ SIOSCK_MARK, SIOMCK_MARK,
+ VIO_D15_MARK, VIO_D14_MARK, VIO_D13_MARK, VIO_D12_MARK,
+ VIO_D11_MARK, VIO_D10_MARK, VIO_D9_MARK, VIO_D8_MARK,
+ VIO_D7_MARK, VIO_D6_MARK, VIO_D5_MARK, VIO_D4_MARK,
+ VIO_D3_MARK, VIO_D2_MARK, VIO_D1_MARK, VIO_D0_MARK,
+ VIO_CLK_MARK, VIO_VD_MARK, VIO_HD_MARK, VIO_FLD_MARK,
+ VIO_CKO_MARK, VIO_STEX_MARK, VIO_STEM_MARK, VIO_VD2_MARK,
+ VIO_HD2_MARK, VIO_CLK2_MARK,
+ LCDD23_MARK, LCDD22_MARK, LCDD21_MARK, LCDD20_MARK,
+ LCDD19_MARK, LCDD18_MARK, LCDD17_MARK, LCDD16_MARK,
+ LCDD15_MARK, LCDD14_MARK, LCDD13_MARK, LCDD12_MARK,
+ LCDD11_MARK, LCDD10_MARK, LCDD9_MARK, LCDD8_MARK,
+ LCDD7_MARK, LCDD6_MARK, LCDD5_MARK, LCDD4_MARK,
+ LCDD3_MARK, LCDD2_MARK, LCDD1_MARK, LCDD0_MARK,
+ LCDLCLK_MARK, LCDDON_MARK, LCDVCPWC_MARK, LCDVEPWC_MARK,
+ LCDVSYN_MARK, LCDDCK_MARK, LCDHSYN_MARK, LCDDISP_MARK,
+ LCDRS_MARK, LCDCS_MARK, LCDWR_MARK, LCDRD_MARK,
+ LCDDON2_MARK, LCDVCPWC2_MARK, LCDVEPWC2_MARK, LCDVSYN2_MARK,
+ LCDCS2_MARK,
+ IOIS16_MARK, A25_MARK, A24_MARK, A23_MARK, A22_MARK,
+ BS_MARK, CS6B_CE1B_MARK, WAIT_MARK, CS6A_CE2B_MARK,
+ HPD63_MARK, HPD62_MARK, HPD61_MARK, HPD60_MARK,
+ HPD59_MARK, HPD58_MARK, HPD57_MARK, HPD56_MARK,
+ HPD55_MARK, HPD54_MARK, HPD53_MARK, HPD52_MARK,
+ HPD51_MARK, HPD50_MARK, HPD49_MARK, HPD48_MARK,
+ HPDQM7_MARK, HPDQM6_MARK, HPDQM5_MARK, HPDQM4_MARK,
+ IRQ0_MARK, IRQ1_MARK, IRQ2_MARK, IRQ3_MARK,
+ IRQ4_MARK, IRQ5_MARK, IRQ6_MARK, IRQ7_MARK,
+ SDHICD_MARK, SDHIWP_MARK, SDHID3_MARK, SDHID2_MARK,
+ SDHID1_MARK, SDHID0_MARK, SDHICMD_MARK, SDHICLK_MARK,
+ SIUAOLR_MARK, SIUAOBT_MARK, SIUAISLD_MARK, SIUAILR_MARK,
+ SIUAIBT_MARK, SIUAOSLD_MARK, SIUMCKA_MARK, SIUFCKA_MARK,
+ SIUBOLR_MARK, SIUBOBT_MARK, SIUBISLD_MARK, SIUBILR_MARK,
+ SIUBIBT_MARK, SIUBOSLD_MARK, SIUMCKB_MARK, SIUFCKB_MARK,
+ AUDSYNC_MARK, AUDATA3_MARK, AUDATA2_MARK, AUDATA1_MARK, AUDATA0_MARK,
+ DACK_MARK, DREQ0_MARK,
+ DV_CLKI_MARK, DV_CLK_MARK, DV_HSYNC_MARK, DV_VSYNC_MARK,
+ DV_D15_MARK, DV_D14_MARK, DV_D13_MARK, DV_D12_MARK,
+ DV_D11_MARK, DV_D10_MARK, DV_D9_MARK, DV_D8_MARK,
+ DV_D7_MARK, DV_D6_MARK, DV_D5_MARK, DV_D4_MARK,
+ DV_D3_MARK, DV_D2_MARK, DV_D1_MARK, DV_D0_MARK,
+ STATUS0_MARK, PDSTATUS_MARK,
+ SIOF0_MCK_MARK, SIOF0_SCK_MARK,
+ SIOF0_SYNC_MARK, SIOF0_SS1_MARK, SIOF0_SS2_MARK,
+ SIOF0_TXD_MARK, SIOF0_RXD_MARK,
+ SIOF1_MCK_MARK, SIOF1_SCK_MARK,
+ SIOF1_SYNC_MARK, SIOF1_SS1_MARK, SIOF1_SS2_MARK,
+ SIOF1_TXD_MARK, SIOF1_RXD_MARK,
+ SIM_D_MARK, SIM_CLK_MARK, SIM_RST_MARK,
+ TS_SDAT_MARK, TS_SCK_MARK, TS_SDEN_MARK, TS_SPSYNC_MARK,
+ IRDA_IN_MARK, IRDA_OUT_MARK,
+ TPUTO_MARK,
+ FCE_MARK, NAF7_MARK, NAF6_MARK, NAF5_MARK, NAF4_MARK,
+ NAF3_MARK, NAF2_MARK, NAF1_MARK, NAF0_MARK, FCDE_MARK,
+ FOE_MARK, FSC_MARK, FWE_MARK, FRB_MARK,
+ KEYIN0_MARK, KEYIN1_MARK, KEYIN2_MARK, KEYIN3_MARK, KEYIN4_MARK,
+ KEYOUT0_MARK, KEYOUT1_MARK, KEYOUT2_MARK, KEYOUT3_MARK,
+ KEYOUT4_IN6_MARK, KEYOUT5_IN5_MARK,
+ PINMUX_MARK_END,
+
+ PINMUX_FUNCTION_BEGIN,
+ VIO_D7_SCIF1_SCK, VIO_D6_SCIF1_RXD, VIO_D5_SCIF1_TXD, VIO_D4,
+ VIO_D3, VIO_D2, VIO_D1, VIO_D0_LCDLCLK,
+ HPD55, HPD54, HPD53, HPD52, HPD51, HPD50, HPD49, HPD48,
+ IOIS16, HPDQM7, HPDQM6, HPDQM5, HPDQM4,
+ SDHICD, SDHIWP, SDHID3, IRQ2_SDHID2, SDHID1, SDHID0, SDHICMD, SDHICLK,
+ A25, A24, A23, A22, IRQ5, IRQ4_BS,
+ PTF6, SIOSCK_SIUBOBT, SIOSTRB1_SIUBOLR,
+ SIOSTRB0_SIUBIBT, SIOD_SIUBILR, SIORXD_SIUBISLD, SIOTXD_SIUBOSLD,
+ AUDSYNC, AUDATA3, AUDATA2, AUDATA1, AUDATA0,
+ LCDVCPWC_LCDVCPWC2, LCDVSYN2_DACK, LCDVSYN, LCDDISP_LCDRS,
+ LCDHSYN_LCDCS, LCDDON_LCDDON2, LCDD17_DV_HSYNC, LCDD16_DV_VSYNC,
+ STATUS0, PDSTATUS, IRQ1, IRQ0,
+ SIUAILR_SIOF1_SS2, SIUAIBT_SIOF1_SS1, SIUAOLR_SIOF1_SYNC,
+ SIUAOBT_SIOF1_SCK, SIUAISLD_SIOF1_RXD, SIUAOSLD_SIOF1_TXD, PTK0,
+ LCDD15_DV_D15, LCDD14_DV_D14, LCDD13_DV_D13, LCDD12_DV_D12,
+ LCDD11_DV_D11, LCDD10_DV_D10, LCDD9_DV_D9, LCDD8_DV_D8,
+ LCDD7_DV_D7, LCDD6_DV_D6, LCDD5_DV_D5, LCDD4_DV_D4,
+ LCDD3_DV_D3, LCDD2_DV_D2, LCDD1_DV_D1, LCDD0_DV_D0,
+ HPD63, HPD62, HPD61, HPD60, HPD59, HPD58, HPD57, HPD56,
+ SIOF0_SS2_SIM_RST, SIOF0_SS1_TS_SPSYNC, SIOF0_SYNC_TS_SDEN,
+ SIOF0_SCK_TS_SCK, PTQ2, PTQ1, PTQ0,
+ LCDRD, CS6B_CE1B_LCDCS2, WAIT, LCDDCK_LCDWR, LCDVEPWC_LCDVEPWC2,
+ SCIF0_CTS_SIUAISPD, SCIF0_RTS_SIUAOSPD,
+ SCIF0_SCK_TPUTO, SCIF0_RXD, SCIF0_TXD,
+ FOE_VIO_VD2, FWE, FSC, DREQ0, FCDE,
+ NAF2_VIO_D10, NAF1_VIO_D9, NAF0_VIO_D8,
+ FRB_VIO_CLK2, FCE_VIO_HD2,
+ NAF7_VIO_D15, NAF6_VIO_D14, NAF5_VIO_D13, NAF4_VIO_D12, NAF3_VIO_D11,
+ VIO_FLD_SCIF2_CTS, VIO_CKO_SCIF2_RTS, VIO_STEX_SCIF2_SCK,
+ VIO_STEM_SCIF2_TXD, VIO_HD_SCIF2_RXD,
+ VIO_VD_SCIF1_CTS, VIO_CLK_SCIF1_RTS,
+ CS6A_CE2B, LCDD23, LCDD22, LCDD21, LCDD20,
+ LCDD19_DV_CLKI, LCDD18_DV_CLK,
+ KEYOUT5_IN5, KEYOUT4_IN6, KEYOUT3, KEYOUT2, KEYOUT1, KEYOUT0,
+ KEYIN4_IRQ7, KEYIN3, KEYIN2, KEYIN1, KEYIN0_IRQ6,
+
+ PSA15_KEYIN0, PSA15_IRQ6, PSA14_KEYIN4, PSA14_IRQ7,
+ PSA9_IRQ4, PSA9_BS, PSA4_IRQ2, PSA4_SDHID2,
+ PSB15_SIOTXD, PSB15_SIUBOSLD, PSB14_SIORXD, PSB14_SIUBISLD,
+ PSB13_SIOD, PSB13_SIUBILR, PSB12_SIOSTRB0, PSB12_SIUBIBT,
+ PSB11_SIOSTRB1, PSB11_SIUBOLR, PSB10_SIOSCK, PSB10_SIUBOBT,
+ PSB9_SIOMCK, PSB9_SIUMCKB, PSB8_SIOF0_MCK, PSB8_IRQ3,
+ PSB7_SIOF0_TXD, PSB7_IRDA_OUT, PSB6_SIOF0_RXD, PSB6_IRDA_IN,
+ PSB5_SIOF0_SCK, PSB5_TS_SCK, PSB4_SIOF0_SYNC, PSB4_TS_SDEN,
+ PSB3_SIOF0_SS1, PSB3_TS_SPSYNC, PSB2_SIOF0_SS2, PSB2_SIM_RST,
+ PSB1_SIUMCKA, PSB1_SIOF1_MCK, PSB0_SIUAOSLD, PSB0_SIOF1_TXD,
+ PSC15_SIUAISLD, PSC15_SIOF1_RXD, PSC14_SIUAOBT, PSC14_SIOF1_SCK,
+ PSC13_SIUAOLR, PSC13_SIOF1_SYNC, PSC12_SIUAIBT, PSC12_SIOF1_SS1,
+ PSC11_SIUAILR, PSC11_SIOF1_SS2, PSC0_NAF, PSC0_VIO,
+ PSD13_VIO, PSD13_SCIF2, PSD12_VIO, PSD12_SCIF1,
+ PSD11_VIO, PSD11_SCIF1, PSD10_VIO_D0, PSD10_LCDLCLK,
+ PSD9_SIOMCK_SIUMCKB, PSD9_SIUFCKB, PSD8_SCIF0_SCK, PSD8_TPUTO,
+ PSD7_SCIF0_RTS, PSD7_SIUAOSPD, PSD6_SCIF0_CTS, PSD6_SIUAISPD,
+ PSD5_CS6B_CE1B, PSD5_LCDCS2,
+ PSD3_LCDVEPWC_LCDVCPWC, PSD3_LCDVEPWC2_LCDVCPWC2,
+ PSD2_LCDDON, PSD2_LCDDON2, PSD0_LCDD19_LCDD0, PSD0_DV,
+ PSE15_SIOF0_MCK_IRQ3, PSE15_SIM_D,
+ PSE14_SIOF0_TXD_IRDA_OUT, PSE14_SIM_CLK,
+ PSE13_SIOF0_RXD_IRDA_IN, PSE13_TS_SDAT, PSE12_LCDVSYN2, PSE12_DACK,
+ PSE11_SIUMCKA_SIOF1_MCK, PSE11_SIUFCKA,
+ PSE3_FLCTL, PSE3_VIO, PSE2_NAF2, PSE2_VIO_D10,
+ PSE1_NAF1, PSE1_VIO_D9, PSE0_NAF0, PSE0_VIO_D8,
+
+ HIZA14_KEYSC, HIZA14_HIZ,
+ HIZA10_NAF, HIZA10_HIZ,
+ HIZA9_VIO, HIZA9_HIZ,
+ HIZA8_LCDC, HIZA8_HIZ,
+ HIZA7_LCDC, HIZA7_HIZ,
+ HIZA6_LCDC, HIZA6_HIZ,
+ HIZB4_SIUA, HIZB4_HIZ,
+ HIZB1_VIO, HIZB1_HIZ,
+ HIZB0_VIO, HIZB0_HIZ,
+ HIZC15_IRQ7, HIZC15_HIZ,
+ HIZC14_IRQ6, HIZC14_HIZ,
+ HIZC13_IRQ5, HIZC13_HIZ,
+ HIZC12_IRQ4, HIZC12_HIZ,
+ HIZC11_IRQ3, HIZC11_HIZ,
+ HIZC10_IRQ2, HIZC10_HIZ,
+ HIZC9_IRQ1, HIZC9_HIZ,
+ HIZC8_IRQ0, HIZC8_HIZ,
+ MSELB9_VIO, MSELB9_VIO2,
+ MSELB8_RGB, MSELB8_SYS,
+ PINMUX_FUNCTION_END,
+};
+
+static pinmux_enum_t pinmux_data[] = {
+ /* PTA */
+ PINMUX_DATA(PTA7_DATA, PTA7_IN, PTA7_IN_PD, PTA7_OUT),
+ PINMUX_DATA(PTA6_DATA, PTA6_IN, PTA6_IN_PD),
+ PINMUX_DATA(PTA5_DATA, PTA5_IN, PTA5_IN_PD, PTA5_OUT),
+ PINMUX_DATA(PTA4_DATA, PTA4_IN, PTA4_IN_PD),
+ PINMUX_DATA(PTA3_DATA, PTA3_IN, PTA3_IN_PD),
+ PINMUX_DATA(PTA2_DATA, PTA2_IN, PTA2_IN_PD),
+ PINMUX_DATA(PTA1_DATA, PTA1_IN, PTA1_IN_PD),
+ PINMUX_DATA(PTA0_DATA, PTA0_IN, PTA0_IN_PD),
+
+ /* PTB */
+ PINMUX_DATA(PTB7_DATA, PTB7_IN, PTB7_OUT),
+ PINMUX_DATA(PTB6_DATA, PTB6_IN, PTB6_OUT),
+ PINMUX_DATA(PTB5_DATA, PTB5_IN, PTB5_OUT),
+ PINMUX_DATA(PTB4_DATA, PTB4_IN, PTB4_OUT),
+ PINMUX_DATA(PTB3_DATA, PTB3_IN, PTB3_OUT),
+ PINMUX_DATA(PTB2_DATA, PTB2_IN, PTB2_OUT),
+ PINMUX_DATA(PTB1_DATA, PTB1_IN, PTB1_OUT),
+ PINMUX_DATA(PTB0_DATA, PTB0_IN, PTB0_OUT),
+
+ /* PTC */
+ PINMUX_DATA(PTC7_DATA, PTC7_IN, PTC7_IN_PU),
+ PINMUX_DATA(PTC5_DATA, PTC5_IN, PTC5_IN_PU),
+ PINMUX_DATA(PTC4_DATA, PTC4_IN, PTC4_OUT),
+ PINMUX_DATA(PTC3_DATA, PTC3_IN, PTC3_OUT),
+ PINMUX_DATA(PTC2_DATA, PTC2_IN, PTC2_OUT),
+ PINMUX_DATA(PTC0_DATA, PTC0_IN, PTC0_OUT),
+
+ /* PTD */
+ PINMUX_DATA(PTD7_DATA, PTD7_IN, PTD7_IN_PU),
+ PINMUX_DATA(PTD6_DATA, PTD6_OUT, PTD6_IN, PTD6_IN_PU),
+ PINMUX_DATA(PTD5_DATA, PTD5_OUT, PTD5_IN, PTD5_IN_PU),
+ PINMUX_DATA(PTD4_DATA, PTD4_OUT, PTD4_IN, PTD4_IN_PU),
+ PINMUX_DATA(PTD3_DATA, PTD3_OUT, PTD3_IN, PTD3_IN_PU),
+ PINMUX_DATA(PTD2_DATA, PTD2_OUT, PTD2_IN, PTD2_IN_PU),
+ PINMUX_DATA(PTD1_DATA, PTD1_OUT, PTD1_IN, PTD1_IN_PU),
+ PINMUX_DATA(PTD0_DATA, PTD0_OUT),
+
+ /* PTE */
+ PINMUX_DATA(PTE7_DATA, PTE7_OUT, PTE7_IN, PTE7_IN_PD),
+ PINMUX_DATA(PTE6_DATA, PTE6_OUT, PTE6_IN, PTE6_IN_PD),
+ PINMUX_DATA(PTE5_DATA, PTE5_OUT, PTE5_IN, PTE5_IN_PD),
+ PINMUX_DATA(PTE4_DATA, PTE4_OUT, PTE4_IN, PTE4_IN_PD),
+ PINMUX_DATA(PTE1_DATA, PTE1_OUT, PTE1_IN, PTE1_IN_PD),
+ PINMUX_DATA(PTE0_DATA, PTE0_OUT, PTE0_IN, PTE0_IN_PD),
+
+ /* PTF */
+ PINMUX_DATA(PTF6_DATA, PTF6_OUT, PTF6_IN, PTF6_IN_PD),
+ PINMUX_DATA(PTF5_DATA, PTF5_OUT, PTF5_IN, PTF5_IN_PD),
+ PINMUX_DATA(PTF4_DATA, PTF4_OUT, PTF4_IN, PTF4_IN_PD),
+ PINMUX_DATA(PTF3_DATA, PTF3_OUT, PTF3_IN, PTF3_IN_PD),
+ PINMUX_DATA(PTF2_DATA, PTF2_OUT, PTF2_IN, PTF2_IN_PD),
+ PINMUX_DATA(PTF1_DATA, PTF1_IN, PTF1_IN_PD),
+ PINMUX_DATA(PTF0_DATA, PTF0_OUT),
+
+ /* PTG */
+ PINMUX_DATA(PTG4_DATA, PTG4_OUT),
+ PINMUX_DATA(PTG3_DATA, PTG3_OUT),
+ PINMUX_DATA(PTG2_DATA, PTG2_OUT),
+ PINMUX_DATA(PTG1_DATA, PTG1_OUT),
+ PINMUX_DATA(PTG0_DATA, PTG0_OUT),
+
+ /* PTH */
+ PINMUX_DATA(PTH7_DATA, PTH7_OUT),
+ PINMUX_DATA(PTH6_DATA, PTH6_OUT, PTH6_IN, PTH6_IN_PD),
+ PINMUX_DATA(PTH5_DATA, PTH5_OUT, PTH5_IN, PTH5_IN_PD),
+ PINMUX_DATA(PTH4_DATA, PTH4_OUT),
+ PINMUX_DATA(PTH3_DATA, PTH3_OUT),
+ PINMUX_DATA(PTH2_DATA, PTH2_OUT),
+ PINMUX_DATA(PTH1_DATA, PTH1_OUT, PTH1_IN, PTH1_IN_PD),
+ PINMUX_DATA(PTH0_DATA, PTH0_OUT, PTH0_IN, PTH0_IN_PD),
+
+ /* PTJ */
+ PINMUX_DATA(PTJ7_DATA, PTJ7_OUT),
+ PINMUX_DATA(PTJ6_DATA, PTJ6_OUT),
+ PINMUX_DATA(PTJ5_DATA, PTJ5_OUT),
+ PINMUX_DATA(PTJ1_DATA, PTJ1_OUT, PTJ1_IN, PTJ1_IN_PU),
+ PINMUX_DATA(PTJ0_DATA, PTJ0_OUT, PTJ0_IN, PTJ0_IN_PU),
+
+ /* PTK */
+ PINMUX_DATA(PTK6_DATA, PTK6_OUT, PTK6_IN, PTK6_IN_PD),
+ PINMUX_DATA(PTK5_DATA, PTK5_OUT, PTK5_IN, PTK5_IN_PD),
+ PINMUX_DATA(PTK4_DATA, PTK4_OUT, PTK4_IN, PTK4_IN_PD),
+ PINMUX_DATA(PTK3_DATA, PTK3_OUT, PTK3_IN, PTK3_IN_PD),
+ PINMUX_DATA(PTK2_DATA, PTK2_IN, PTK2_IN_PD),
+ PINMUX_DATA(PTK1_DATA, PTK1_OUT),
+ PINMUX_DATA(PTK0_DATA, PTK0_OUT, PTK0_IN, PTK0_IN_PD),
+
+ /* PTL */
+ PINMUX_DATA(PTL7_DATA, PTL7_OUT, PTL7_IN, PTL7_IN_PD),
+ PINMUX_DATA(PTL6_DATA, PTL6_OUT, PTL6_IN, PTL6_IN_PD),
+ PINMUX_DATA(PTL5_DATA, PTL5_OUT, PTL5_IN, PTL5_IN_PD),
+ PINMUX_DATA(PTL4_DATA, PTL4_OUT, PTL4_IN, PTL4_IN_PD),
+ PINMUX_DATA(PTL3_DATA, PTL3_OUT, PTL3_IN, PTL3_IN_PD),
+ PINMUX_DATA(PTL2_DATA, PTL2_OUT, PTL2_IN, PTL2_IN_PD),
+ PINMUX_DATA(PTL1_DATA, PTL1_OUT, PTL1_IN, PTL1_IN_PD),
+ PINMUX_DATA(PTL0_DATA, PTL0_OUT, PTL0_IN, PTL0_IN_PD),
+
+ /* PTM */
+ PINMUX_DATA(PTM7_DATA, PTM7_OUT, PTM7_IN, PTM7_IN_PD),
+ PINMUX_DATA(PTM6_DATA, PTM6_OUT, PTM6_IN, PTM6_IN_PD),
+ PINMUX_DATA(PTM5_DATA, PTM5_OUT, PTM5_IN, PTM5_IN_PD),
+ PINMUX_DATA(PTM4_DATA, PTM4_OUT, PTM4_IN, PTM4_IN_PD),
+ PINMUX_DATA(PTM3_DATA, PTM3_OUT, PTM3_IN, PTM3_IN_PD),
+ PINMUX_DATA(PTM2_DATA, PTM2_OUT, PTM2_IN, PTM2_IN_PD),
+ PINMUX_DATA(PTM1_DATA, PTM1_OUT, PTM1_IN, PTM1_IN_PD),
+ PINMUX_DATA(PTM0_DATA, PTM0_OUT, PTM0_IN, PTM0_IN_PD),
+
+ /* PTN */
+ PINMUX_DATA(PTN7_DATA, PTN7_OUT, PTN7_IN),
+ PINMUX_DATA(PTN6_DATA, PTN6_OUT, PTN6_IN),
+ PINMUX_DATA(PTN5_DATA, PTN5_OUT, PTN5_IN),
+ PINMUX_DATA(PTN4_DATA, PTN4_OUT, PTN4_IN),
+ PINMUX_DATA(PTN3_DATA, PTN3_OUT, PTN3_IN),
+ PINMUX_DATA(PTN2_DATA, PTN2_OUT, PTN2_IN),
+ PINMUX_DATA(PTN1_DATA, PTN1_OUT, PTN1_IN),
+ PINMUX_DATA(PTN0_DATA, PTN0_OUT, PTN0_IN),
+
+ /* PTQ */
+ PINMUX_DATA(PTQ6_DATA, PTQ6_OUT),
+ PINMUX_DATA(PTQ5_DATA, PTQ5_OUT, PTQ5_IN, PTQ5_IN_PD),
+ PINMUX_DATA(PTQ4_DATA, PTQ4_OUT, PTQ4_IN, PTQ4_IN_PD),
+ PINMUX_DATA(PTQ3_DATA, PTQ3_OUT, PTQ3_IN, PTQ3_IN_PD),
+ PINMUX_DATA(PTQ2_DATA, PTQ2_IN, PTQ2_IN_PD),
+ PINMUX_DATA(PTQ1_DATA, PTQ1_OUT),
+ PINMUX_DATA(PTQ0_DATA, PTQ0_OUT, PTQ0_IN, PTQ0_IN_PU),
+
+ /* PTR */
+ PINMUX_DATA(PTR4_DATA, PTR4_OUT),
+ PINMUX_DATA(PTR3_DATA, PTR3_OUT),
+ PINMUX_DATA(PTR2_DATA, PTR2_IN, PTR2_IN_PU),
+ PINMUX_DATA(PTR1_DATA, PTR1_OUT),
+ PINMUX_DATA(PTR0_DATA, PTR0_OUT),
+
+ /* PTS */
+ PINMUX_DATA(PTS4_DATA, PTS4_IN, PTS4_IN_PD),
+ PINMUX_DATA(PTS3_DATA, PTS3_OUT),
+ PINMUX_DATA(PTS2_DATA, PTS2_OUT, PTS2_IN, PTS2_IN_PD),
+ PINMUX_DATA(PTS1_DATA, PTS1_IN, PTS1_IN_PD),
+ PINMUX_DATA(PTS0_DATA, PTS0_OUT),
+
+ /* PTT */
+ PINMUX_DATA(PTT4_DATA, PTT4_OUT, PTT4_IN, PTT4_IN_PD),
+ PINMUX_DATA(PTT3_DATA, PTT3_OUT, PTT3_IN, PTT3_IN_PD),
+ PINMUX_DATA(PTT2_DATA, PTT2_OUT, PTT2_IN, PTT2_IN_PD),
+ PINMUX_DATA(PTT1_DATA, PTT1_IN, PTT1_IN_PD),
+ PINMUX_DATA(PTT0_DATA, PTT0_OUT),
+
+ /* PTU */
+ PINMUX_DATA(PTU4_DATA, PTU4_OUT, PTU4_IN, PTU4_IN_PD),
+ PINMUX_DATA(PTU3_DATA, PTU3_OUT, PTU3_IN, PTU3_IN_PD),
+ PINMUX_DATA(PTU2_DATA, PTU2_OUT, PTU2_IN, PTU2_IN_PD),
+ PINMUX_DATA(PTU1_DATA, PTU1_IN, PTU1_IN_PD),
+ PINMUX_DATA(PTU0_DATA, PTU0_OUT, PTU0_IN, PTU0_IN_PD),
+
+ /* PTV */
+ PINMUX_DATA(PTV4_DATA, PTV4_OUT, PTV4_IN, PTV4_IN_PD),
+ PINMUX_DATA(PTV3_DATA, PTV3_OUT, PTV3_IN, PTV3_IN_PD),
+ PINMUX_DATA(PTV2_DATA, PTV2_OUT, PTV2_IN, PTV2_IN_PD),
+ PINMUX_DATA(PTV1_DATA, PTV1_OUT, PTV1_IN, PTV1_IN_PD),
+ PINMUX_DATA(PTV0_DATA, PTV0_OUT, PTV0_IN, PTV0_IN_PD),
+
+ /* PTW */
+ PINMUX_DATA(PTW6_DATA, PTW6_IN, PTW6_IN_PD),
+ PINMUX_DATA(PTW5_DATA, PTW5_OUT),
+ PINMUX_DATA(PTW4_DATA, PTW4_OUT, PTW4_IN, PTW4_IN_PD),
+ PINMUX_DATA(PTW3_DATA, PTW3_OUT, PTW3_IN, PTW3_IN_PD),
+ PINMUX_DATA(PTW2_DATA, PTW2_OUT, PTW2_IN, PTW2_IN_PD),
+ PINMUX_DATA(PTW1_DATA, PTW1_OUT, PTW1_IN, PTW1_IN_PD),
+ PINMUX_DATA(PTW0_DATA, PTW0_OUT, PTW0_IN, PTW0_IN_PD),
+
+ /* PTX */
+ PINMUX_DATA(PTX6_DATA, PTX6_OUT, PTX6_IN, PTX6_IN_PD),
+ PINMUX_DATA(PTX5_DATA, PTX5_OUT, PTX5_IN, PTX5_IN_PD),
+ PINMUX_DATA(PTX4_DATA, PTX4_OUT, PTX4_IN, PTX4_IN_PD),
+ PINMUX_DATA(PTX3_DATA, PTX3_OUT, PTX3_IN, PTX3_IN_PD),
+ PINMUX_DATA(PTX2_DATA, PTX2_OUT, PTX2_IN, PTX2_IN_PD),
+ PINMUX_DATA(PTX1_DATA, PTX1_OUT, PTX1_IN, PTX1_IN_PD),
+ PINMUX_DATA(PTX0_DATA, PTX0_OUT, PTX0_IN, PTX0_IN_PD),
+
+ /* PTY */
+ PINMUX_DATA(PTY5_DATA, PTY5_OUT, PTY5_IN, PTY5_IN_PU),
+ PINMUX_DATA(PTY4_DATA, PTY4_OUT, PTY4_IN, PTY4_IN_PU),
+ PINMUX_DATA(PTY3_DATA, PTY3_OUT, PTY3_IN, PTY3_IN_PU),
+ PINMUX_DATA(PTY2_DATA, PTY2_OUT, PTY2_IN, PTY2_IN_PU),
+ PINMUX_DATA(PTY1_DATA, PTY1_OUT),
+ PINMUX_DATA(PTY0_DATA, PTY0_OUT, PTY0_IN, PTY0_IN_PU),
+
+ /* PTZ */
+ PINMUX_DATA(PTZ5_DATA, PTZ5_IN, PTZ5_IN_PU),
+ PINMUX_DATA(PTZ4_DATA, PTZ4_IN, PTZ4_IN_PU),
+ PINMUX_DATA(PTZ3_DATA, PTZ3_IN, PTZ3_IN_PU),
+ PINMUX_DATA(PTZ2_DATA, PTZ2_IN, PTZ2_IN_PU),
+ PINMUX_DATA(PTZ1_DATA, PTZ1_IN, PTZ1_IN_PU),
+
+ /* SCIF0 */
+ PINMUX_DATA(SCIF0_TXD_MARK, SCIF0_TXD),
+ PINMUX_DATA(SCIF0_RXD_MARK, SCIF0_RXD),
+ PINMUX_DATA(SCIF0_RTS_MARK, PSD7_SCIF0_RTS, SCIF0_RTS_SIUAOSPD),
+ PINMUX_DATA(SCIF0_CTS_MARK, PSD6_SCIF0_CTS, SCIF0_CTS_SIUAISPD),
+ PINMUX_DATA(SCIF0_SCK_MARK, PSD8_SCIF0_SCK, SCIF0_SCK_TPUTO),
+
+ /* SCIF1 */
+ PINMUX_DATA(SCIF1_TXD_MARK, PSD11_SCIF1, VIO_D5_SCIF1_TXD),
+ PINMUX_DATA(SCIF1_RXD_MARK, PSD11_SCIF1, VIO_D6_SCIF1_RXD),
+ PINMUX_DATA(SCIF1_RTS_MARK, PSD12_SCIF1, VIO_CLK_SCIF1_RTS),
+ PINMUX_DATA(SCIF1_CTS_MARK, PSD12_SCIF1, VIO_VD_SCIF1_CTS),
+ PINMUX_DATA(SCIF1_SCK_MARK, PSD11_SCIF1, VIO_D7_SCIF1_SCK),
+
+ /* SCIF2 */
+ PINMUX_DATA(SCIF2_TXD_MARK, PSD13_SCIF2, VIO_STEM_SCIF2_TXD),
+ PINMUX_DATA(SCIF2_RXD_MARK, PSD13_SCIF2, VIO_HD_SCIF2_RXD),
+ PINMUX_DATA(SCIF2_RTS_MARK, PSD13_SCIF2, VIO_CKO_SCIF2_RTS),
+ PINMUX_DATA(SCIF2_CTS_MARK, PSD13_SCIF2, VIO_FLD_SCIF2_CTS),
+ PINMUX_DATA(SCIF2_SCK_MARK, PSD13_SCIF2, VIO_STEX_SCIF2_SCK),
+
+ /* SIO */
+ PINMUX_DATA(SIOTXD_MARK, PSB15_SIOTXD, SIOTXD_SIUBOSLD),
+ PINMUX_DATA(SIORXD_MARK, PSB14_SIORXD, SIORXD_SIUBISLD),
+ PINMUX_DATA(SIOD_MARK, PSB13_SIOD, SIOD_SIUBILR),
+ PINMUX_DATA(SIOSTRB0_MARK, PSB12_SIOSTRB0, SIOSTRB0_SIUBIBT),
+ PINMUX_DATA(SIOSTRB1_MARK, PSB11_SIOSTRB1, SIOSTRB1_SIUBOLR),
+ PINMUX_DATA(SIOSCK_MARK, PSB10_SIOSCK, SIOSCK_SIUBOBT),
+ PINMUX_DATA(SIOMCK_MARK, PSD9_SIOMCK_SIUMCKB, PSB9_SIOMCK, PTF6),
+
+ /* CEU */
+ PINMUX_DATA(VIO_D15_MARK, PSC0_VIO, HIZA10_NAF, NAF7_VIO_D15),
+ PINMUX_DATA(VIO_D14_MARK, PSC0_VIO, HIZA10_NAF, NAF6_VIO_D14),
+ PINMUX_DATA(VIO_D13_MARK, PSC0_VIO, HIZA10_NAF, NAF5_VIO_D13),
+ PINMUX_DATA(VIO_D12_MARK, PSC0_VIO, HIZA10_NAF, NAF4_VIO_D12),
+ PINMUX_DATA(VIO_D11_MARK, PSC0_VIO, HIZA10_NAF, NAF3_VIO_D11),
+ PINMUX_DATA(VIO_D10_MARK, PSE2_VIO_D10, HIZB0_VIO, NAF2_VIO_D10),
+ PINMUX_DATA(VIO_D9_MARK, PSE1_VIO_D9, HIZB0_VIO, NAF1_VIO_D9),
+ PINMUX_DATA(VIO_D8_MARK, PSE0_VIO_D8, HIZB0_VIO, NAF0_VIO_D8),
+ PINMUX_DATA(VIO_D7_MARK, PSD11_VIO, VIO_D7_SCIF1_SCK),
+ PINMUX_DATA(VIO_D6_MARK, PSD11_VIO, VIO_D6_SCIF1_RXD),
+ PINMUX_DATA(VIO_D5_MARK, PSD11_VIO, VIO_D5_SCIF1_TXD),
+ PINMUX_DATA(VIO_D4_MARK, VIO_D4),
+ PINMUX_DATA(VIO_D3_MARK, VIO_D3),
+ PINMUX_DATA(VIO_D2_MARK, VIO_D2),
+ PINMUX_DATA(VIO_D1_MARK, VIO_D1),
+ PINMUX_DATA(VIO_D0_MARK, PSD10_VIO_D0, VIO_D0_LCDLCLK),
+ PINMUX_DATA(VIO_CLK_MARK, PSD12_VIO, MSELB9_VIO, VIO_CLK_SCIF1_RTS),
+ PINMUX_DATA(VIO_VD_MARK, PSD12_VIO, MSELB9_VIO, VIO_VD_SCIF1_CTS),
+ PINMUX_DATA(VIO_HD_MARK, PSD13_VIO, MSELB9_VIO, VIO_HD_SCIF2_RXD),
+ PINMUX_DATA(VIO_FLD_MARK, PSD13_VIO, HIZA9_VIO, VIO_FLD_SCIF2_CTS),
+ PINMUX_DATA(VIO_CKO_MARK, PSD13_VIO, HIZA9_VIO, VIO_CKO_SCIF2_RTS),
+ PINMUX_DATA(VIO_STEX_MARK, PSD13_VIO, HIZA9_VIO, VIO_STEX_SCIF2_SCK),
+ PINMUX_DATA(VIO_STEM_MARK, PSD13_VIO, HIZA9_VIO, VIO_STEM_SCIF2_TXD),
+ PINMUX_DATA(VIO_VD2_MARK, PSE3_VIO, MSELB9_VIO2,
+ HIZB0_VIO, FOE_VIO_VD2),
+ PINMUX_DATA(VIO_HD2_MARK, PSE3_VIO, MSELB9_VIO2,
+ HIZB1_VIO, FCE_VIO_HD2),
+ PINMUX_DATA(VIO_CLK2_MARK, PSE3_VIO, MSELB9_VIO2,
+ HIZB1_VIO, FRB_VIO_CLK2),
+
+ /* LCDC */
+ PINMUX_DATA(LCDD23_MARK, HIZA8_LCDC, LCDD23),
+ PINMUX_DATA(LCDD22_MARK, HIZA8_LCDC, LCDD22),
+ PINMUX_DATA(LCDD21_MARK, HIZA8_LCDC, LCDD21),
+ PINMUX_DATA(LCDD20_MARK, HIZA8_LCDC, LCDD20),
+ PINMUX_DATA(LCDD19_MARK, PSD0_LCDD19_LCDD0, HIZA8_LCDC, LCDD19_DV_CLKI),
+ PINMUX_DATA(LCDD18_MARK, PSD0_LCDD19_LCDD0, HIZA8_LCDC, LCDD18_DV_CLK),
+ PINMUX_DATA(LCDD17_MARK, PSD0_LCDD19_LCDD0, HIZA8_LCDC,
+ LCDD17_DV_HSYNC),
+ PINMUX_DATA(LCDD16_MARK, PSD0_LCDD19_LCDD0, HIZA8_LCDC,
+ LCDD16_DV_VSYNC),
+ PINMUX_DATA(LCDD15_MARK, PSD0_LCDD19_LCDD0, HIZA8_LCDC, LCDD15_DV_D15),
+ PINMUX_DATA(LCDD14_MARK, PSD0_LCDD19_LCDD0, HIZA8_LCDC, LCDD14_DV_D14),
+ PINMUX_DATA(LCDD13_MARK, PSD0_LCDD19_LCDD0, HIZA8_LCDC, LCDD13_DV_D13),
+ PINMUX_DATA(LCDD12_MARK, PSD0_LCDD19_LCDD0, HIZA8_LCDC, LCDD12_DV_D12),
+ PINMUX_DATA(LCDD11_MARK, PSD0_LCDD19_LCDD0, HIZA8_LCDC, LCDD11_DV_D11),
+ PINMUX_DATA(LCDD10_MARK, PSD0_LCDD19_LCDD0, HIZA8_LCDC, LCDD10_DV_D10),
+ PINMUX_DATA(LCDD9_MARK, PSD0_LCDD19_LCDD0, HIZA8_LCDC, LCDD9_DV_D9),
+ PINMUX_DATA(LCDD8_MARK, PSD0_LCDD19_LCDD0, HIZA8_LCDC, LCDD8_DV_D8),
+ PINMUX_DATA(LCDD7_MARK, PSD0_LCDD19_LCDD0, HIZA8_LCDC, LCDD7_DV_D7),
+ PINMUX_DATA(LCDD6_MARK, PSD0_LCDD19_LCDD0, HIZA8_LCDC, LCDD6_DV_D6),
+ PINMUX_DATA(LCDD5_MARK, PSD0_LCDD19_LCDD0, HIZA8_LCDC, LCDD5_DV_D5),
+ PINMUX_DATA(LCDD4_MARK, PSD0_LCDD19_LCDD0, HIZA8_LCDC, LCDD4_DV_D4),
+ PINMUX_DATA(LCDD3_MARK, PSD0_LCDD19_LCDD0, HIZA8_LCDC, LCDD3_DV_D3),
+ PINMUX_DATA(LCDD2_MARK, PSD0_LCDD19_LCDD0, HIZA8_LCDC, LCDD2_DV_D2),
+ PINMUX_DATA(LCDD1_MARK, PSD0_LCDD19_LCDD0, HIZA8_LCDC, LCDD1_DV_D1),
+ PINMUX_DATA(LCDD0_MARK, PSD0_LCDD19_LCDD0, HIZA8_LCDC, LCDD0_DV_D0),
+ PINMUX_DATA(LCDLCLK_MARK, PSD10_LCDLCLK, VIO_D0_LCDLCLK),
+ /* Main LCD */
+ PINMUX_DATA(LCDDON_MARK, PSD2_LCDDON, HIZA7_LCDC, LCDDON_LCDDON2),
+ PINMUX_DATA(LCDVCPWC_MARK, PSD3_LCDVEPWC_LCDVCPWC,
+ HIZA6_LCDC, LCDVCPWC_LCDVCPWC2),
+ PINMUX_DATA(LCDVEPWC_MARK, PSD3_LCDVEPWC_LCDVCPWC,
+ HIZA6_LCDC, LCDVEPWC_LCDVEPWC2),
+ PINMUX_DATA(LCDVSYN_MARK, HIZA7_LCDC, LCDVSYN),
+ /* Main LCD - RGB Mode */
+ PINMUX_DATA(LCDDCK_MARK, MSELB8_RGB, HIZA8_LCDC, LCDDCK_LCDWR),
+ PINMUX_DATA(LCDHSYN_MARK, MSELB8_RGB, HIZA7_LCDC, LCDHSYN_LCDCS),
+ PINMUX_DATA(LCDDISP_MARK, MSELB8_RGB, HIZA7_LCDC, LCDDISP_LCDRS),
+ /* Main LCD - SYS Mode */
+ PINMUX_DATA(LCDRS_MARK, MSELB8_SYS, HIZA7_LCDC, LCDDISP_LCDRS),
+ PINMUX_DATA(LCDCS_MARK, MSELB8_SYS, HIZA7_LCDC, LCDHSYN_LCDCS),
+ PINMUX_DATA(LCDWR_MARK, MSELB8_SYS, HIZA8_LCDC, LCDDCK_LCDWR),
+ PINMUX_DATA(LCDRD_MARK, HIZA7_LCDC, LCDRD),
+ /* Sub LCD - SYS Mode */
+ PINMUX_DATA(LCDDON2_MARK, PSD2_LCDDON2, HIZA7_LCDC, LCDDON_LCDDON2),
+ PINMUX_DATA(LCDVCPWC2_MARK, PSD3_LCDVEPWC2_LCDVCPWC2,
+ HIZA6_LCDC, LCDVCPWC_LCDVCPWC2),
+ PINMUX_DATA(LCDVEPWC2_MARK, PSD3_LCDVEPWC2_LCDVCPWC2,
+ HIZA6_LCDC, LCDVEPWC_LCDVEPWC2),
+ PINMUX_DATA(LCDVSYN2_MARK, PSE12_LCDVSYN2, HIZA8_LCDC, LCDVSYN2_DACK),
+ PINMUX_DATA(LCDCS2_MARK, PSD5_LCDCS2, CS6B_CE1B_LCDCS2),
+
+ /* BSC */
+ PINMUX_DATA(IOIS16_MARK, IOIS16),
+ PINMUX_DATA(A25_MARK, A25),
+ PINMUX_DATA(A24_MARK, A24),
+ PINMUX_DATA(A23_MARK, A23),
+ PINMUX_DATA(A22_MARK, A22),
+ PINMUX_DATA(BS_MARK, PSA9_BS, IRQ4_BS),
+ PINMUX_DATA(CS6B_CE1B_MARK, PSD5_CS6B_CE1B, CS6B_CE1B_LCDCS2),
+ PINMUX_DATA(WAIT_MARK, WAIT),
+ PINMUX_DATA(CS6A_CE2B_MARK, CS6A_CE2B),
+
+ /* SBSC */
+ PINMUX_DATA(HPD63_MARK, HPD63),
+ PINMUX_DATA(HPD62_MARK, HPD62),
+ PINMUX_DATA(HPD61_MARK, HPD61),
+ PINMUX_DATA(HPD60_MARK, HPD60),
+ PINMUX_DATA(HPD59_MARK, HPD59),
+ PINMUX_DATA(HPD58_MARK, HPD58),
+ PINMUX_DATA(HPD57_MARK, HPD57),
+ PINMUX_DATA(HPD56_MARK, HPD56),
+ PINMUX_DATA(HPD55_MARK, HPD55),
+ PINMUX_DATA(HPD54_MARK, HPD54),
+ PINMUX_DATA(HPD53_MARK, HPD53),
+ PINMUX_DATA(HPD52_MARK, HPD52),
+ PINMUX_DATA(HPD51_MARK, HPD51),
+ PINMUX_DATA(HPD50_MARK, HPD50),
+ PINMUX_DATA(HPD49_MARK, HPD49),
+ PINMUX_DATA(HPD48_MARK, HPD48),
+ PINMUX_DATA(HPDQM7_MARK, HPDQM7),
+ PINMUX_DATA(HPDQM6_MARK, HPDQM6),
+ PINMUX_DATA(HPDQM5_MARK, HPDQM5),
+ PINMUX_DATA(HPDQM4_MARK, HPDQM4),
+
+ /* IRQ */
+ PINMUX_DATA(IRQ0_MARK, HIZC8_IRQ0, IRQ0),
+ PINMUX_DATA(IRQ1_MARK, HIZC9_IRQ1, IRQ1),
+ PINMUX_DATA(IRQ2_MARK, PSA4_IRQ2, HIZC10_IRQ2, IRQ2_SDHID2),
+ PINMUX_DATA(IRQ3_MARK, PSE15_SIOF0_MCK_IRQ3, PSB8_IRQ3,
+ HIZC11_IRQ3, PTQ0),
+ PINMUX_DATA(IRQ4_MARK, PSA9_IRQ4, HIZC12_IRQ4, IRQ4_BS),
+ PINMUX_DATA(IRQ5_MARK, HIZC13_IRQ5, IRQ5),
+ PINMUX_DATA(IRQ6_MARK, PSA15_IRQ6, HIZC14_IRQ6, KEYIN0_IRQ6),
+ PINMUX_DATA(IRQ7_MARK, PSA14_IRQ7, HIZC15_IRQ7, KEYIN4_IRQ7),
+
+ /* SDHI */
+ PINMUX_DATA(SDHICD_MARK, SDHICD),
+ PINMUX_DATA(SDHIWP_MARK, SDHIWP),
+ PINMUX_DATA(SDHID3_MARK, SDHID3),
+ PINMUX_DATA(SDHID2_MARK, PSA4_SDHID2, IRQ2_SDHID2),
+ PINMUX_DATA(SDHID1_MARK, SDHID1),
+ PINMUX_DATA(SDHID0_MARK, SDHID0),
+ PINMUX_DATA(SDHICMD_MARK, SDHICMD),
+ PINMUX_DATA(SDHICLK_MARK, SDHICLK),
+
+ /* SIU - Port A */
+ PINMUX_DATA(SIUAOLR_MARK, PSC13_SIUAOLR, HIZB4_SIUA, SIUAOLR_SIOF1_SYNC),
+ PINMUX_DATA(SIUAOBT_MARK, PSC14_SIUAOBT, HIZB4_SIUA, SIUAOBT_SIOF1_SCK),
+ PINMUX_DATA(SIUAISLD_MARK, PSC15_SIUAISLD, HIZB4_SIUA, SIUAISLD_SIOF1_RXD),
+ PINMUX_DATA(SIUAILR_MARK, PSC11_SIUAILR, HIZB4_SIUA, SIUAILR_SIOF1_SS2),
+ PINMUX_DATA(SIUAIBT_MARK, PSC12_SIUAIBT, HIZB4_SIUA, SIUAIBT_SIOF1_SS1),
+ PINMUX_DATA(SIUAOSLD_MARK, PSB0_SIUAOSLD, HIZB4_SIUA, SIUAOSLD_SIOF1_TXD),
+ PINMUX_DATA(SIUMCKA_MARK, PSE11_SIUMCKA_SIOF1_MCK, HIZB4_SIUA, PSB1_SIUMCKA, PTK0),
+ PINMUX_DATA(SIUFCKA_MARK, PSE11_SIUFCKA, HIZB4_SIUA, PTK0),
+
+ /* SIU - Port B */
+ PINMUX_DATA(SIUBOLR_MARK, PSB11_SIUBOLR, SIOSTRB1_SIUBOLR),
+ PINMUX_DATA(SIUBOBT_MARK, PSB10_SIUBOBT, SIOSCK_SIUBOBT),
+ PINMUX_DATA(SIUBISLD_MARK, PSB14_SIUBISLD, SIORXD_SIUBISLD),
+ PINMUX_DATA(SIUBILR_MARK, PSB13_SIUBILR, SIOD_SIUBILR),
+ PINMUX_DATA(SIUBIBT_MARK, PSB12_SIUBIBT, SIOSTRB0_SIUBIBT),
+ PINMUX_DATA(SIUBOSLD_MARK, PSB15_SIUBOSLD, SIOTXD_SIUBOSLD),
+ PINMUX_DATA(SIUMCKB_MARK, PSD9_SIOMCK_SIUMCKB, PSB9_SIUMCKB, PTF6),
+ PINMUX_DATA(SIUFCKB_MARK, PSD9_SIUFCKB, PTF6),
+
+ /* AUD */
+ PINMUX_DATA(AUDSYNC_MARK, AUDSYNC),
+ PINMUX_DATA(AUDATA3_MARK, AUDATA3),
+ PINMUX_DATA(AUDATA2_MARK, AUDATA2),
+ PINMUX_DATA(AUDATA1_MARK, AUDATA1),
+ PINMUX_DATA(AUDATA0_MARK, AUDATA0),
+
+ /* DMAC */
+ PINMUX_DATA(DACK_MARK, PSE12_DACK, LCDVSYN2_DACK),
+ PINMUX_DATA(DREQ0_MARK, DREQ0),
+
+ /* VOU */
+ PINMUX_DATA(DV_CLKI_MARK, PSD0_DV, LCDD19_DV_CLKI),
+ PINMUX_DATA(DV_CLK_MARK, PSD0_DV, LCDD18_DV_CLK),
+ PINMUX_DATA(DV_HSYNC_MARK, PSD0_DV, LCDD17_DV_HSYNC),
+ PINMUX_DATA(DV_VSYNC_MARK, PSD0_DV, LCDD16_DV_VSYNC),
+ PINMUX_DATA(DV_D15_MARK, PSD0_DV, LCDD15_DV_D15),
+ PINMUX_DATA(DV_D14_MARK, PSD0_DV, LCDD14_DV_D14),
+ PINMUX_DATA(DV_D13_MARK, PSD0_DV, LCDD13_DV_D13),
+ PINMUX_DATA(DV_D12_MARK, PSD0_DV, LCDD12_DV_D12),
+ PINMUX_DATA(DV_D11_MARK, PSD0_DV, LCDD11_DV_D11),
+ PINMUX_DATA(DV_D10_MARK, PSD0_DV, LCDD10_DV_D10),
+ PINMUX_DATA(DV_D9_MARK, PSD0_DV, LCDD9_DV_D9),
+ PINMUX_DATA(DV_D8_MARK, PSD0_DV, LCDD8_DV_D8),
+ PINMUX_DATA(DV_D7_MARK, PSD0_DV, LCDD7_DV_D7),
+ PINMUX_DATA(DV_D6_MARK, PSD0_DV, LCDD6_DV_D6),
+ PINMUX_DATA(DV_D5_MARK, PSD0_DV, LCDD5_DV_D5),
+ PINMUX_DATA(DV_D4_MARK, PSD0_DV, LCDD4_DV_D4),
+ PINMUX_DATA(DV_D3_MARK, PSD0_DV, LCDD3_DV_D3),
+ PINMUX_DATA(DV_D2_MARK, PSD0_DV, LCDD2_DV_D2),
+ PINMUX_DATA(DV_D1_MARK, PSD0_DV, LCDD1_DV_D1),
+ PINMUX_DATA(DV_D0_MARK, PSD0_DV, LCDD0_DV_D0),
+
+ /* CPG */
+ PINMUX_DATA(STATUS0_MARK, STATUS0),
+ PINMUX_DATA(PDSTATUS_MARK, PDSTATUS),
+
+ /* SIOF0 */
+ PINMUX_DATA(SIOF0_MCK_MARK, PSE15_SIOF0_MCK_IRQ3, PSB8_SIOF0_MCK, PTQ0),
+ PINMUX_DATA(SIOF0_SCK_MARK, PSB5_SIOF0_SCK, SIOF0_SCK_TS_SCK),
+ PINMUX_DATA(SIOF0_SYNC_MARK, PSB4_SIOF0_SYNC, SIOF0_SYNC_TS_SDEN),
+ PINMUX_DATA(SIOF0_SS1_MARK, PSB3_SIOF0_SS1, SIOF0_SS1_TS_SPSYNC),
+ PINMUX_DATA(SIOF0_SS2_MARK, PSB2_SIOF0_SS2, SIOF0_SS2_SIM_RST),
+ PINMUX_DATA(SIOF0_TXD_MARK, PSE14_SIOF0_TXD_IRDA_OUT,
+ PSB7_SIOF0_TXD, PTQ1),
+ PINMUX_DATA(SIOF0_RXD_MARK, PSE13_SIOF0_RXD_IRDA_IN,
+ PSB6_SIOF0_RXD, PTQ2),
+
+ /* SIOF1 */
+ PINMUX_DATA(SIOF1_MCK_MARK, PSE11_SIUMCKA_SIOF1_MCK,
+ PSB1_SIOF1_MCK, PTK0),
+ PINMUX_DATA(SIOF1_SCK_MARK, PSC14_SIOF1_SCK, SIUAOBT_SIOF1_SCK),
+ PINMUX_DATA(SIOF1_SYNC_MARK, PSC13_SIOF1_SYNC, SIUAOLR_SIOF1_SYNC),
+ PINMUX_DATA(SIOF1_SS1_MARK, PSC12_SIOF1_SS1, SIUAIBT_SIOF1_SS1),
+ PINMUX_DATA(SIOF1_SS2_MARK, PSC11_SIOF1_SS2, SIUAILR_SIOF1_SS2),
+ PINMUX_DATA(SIOF1_TXD_MARK, PSB0_SIOF1_TXD, SIUAOSLD_SIOF1_TXD),
+ PINMUX_DATA(SIOF1_RXD_MARK, PSC15_SIOF1_RXD, SIUAISLD_SIOF1_RXD),
+
+ /* SIM */
+ PINMUX_DATA(SIM_D_MARK, PSE15_SIM_D, PTQ0),
+ PINMUX_DATA(SIM_CLK_MARK, PSE14_SIM_CLK, PTQ1),
+ PINMUX_DATA(SIM_RST_MARK, PSB2_SIM_RST, SIOF0_SS2_SIM_RST),
+
+ /* TSIF */
+ PINMUX_DATA(TS_SDAT_MARK, PSE13_TS_SDAT, PTQ2),
+ PINMUX_DATA(TS_SCK_MARK, PSB5_TS_SCK, SIOF0_SCK_TS_SCK),
+ PINMUX_DATA(TS_SDEN_MARK, PSB4_TS_SDEN, SIOF0_SYNC_TS_SDEN),
+ PINMUX_DATA(TS_SPSYNC_MARK, PSB3_TS_SPSYNC, SIOF0_SS1_TS_SPSYNC),
+
+ /* IRDA */
+ PINMUX_DATA(IRDA_IN_MARK, PSE13_SIOF0_RXD_IRDA_IN, PSB6_IRDA_IN, PTQ2),
+ PINMUX_DATA(IRDA_OUT_MARK, PSE14_SIOF0_TXD_IRDA_OUT,
+ PSB7_IRDA_OUT, PTQ1),
+
+ /* TPU */
+ PINMUX_DATA(TPUTO_MARK, PSD8_TPUTO, SCIF0_SCK_TPUTO),
+
+ /* FLCTL */
+ PINMUX_DATA(FCE_MARK, PSE3_FLCTL, FCE_VIO_HD2),
+ PINMUX_DATA(NAF7_MARK, PSC0_NAF, HIZA10_NAF, NAF7_VIO_D15),
+ PINMUX_DATA(NAF6_MARK, PSC0_NAF, HIZA10_NAF, NAF6_VIO_D14),
+ PINMUX_DATA(NAF5_MARK, PSC0_NAF, HIZA10_NAF, NAF5_VIO_D13),
+ PINMUX_DATA(NAF4_MARK, PSC0_NAF, HIZA10_NAF, NAF4_VIO_D12),
+ PINMUX_DATA(NAF3_MARK, PSC0_NAF, HIZA10_NAF, NAF3_VIO_D11),
+ PINMUX_DATA(NAF2_MARK, PSE2_NAF2, HIZB0_VIO, NAF2_VIO_D10),
+ PINMUX_DATA(NAF1_MARK, PSE1_NAF1, HIZB0_VIO, NAF1_VIO_D9),
+ PINMUX_DATA(NAF0_MARK, PSE0_NAF0, HIZB0_VIO, NAF0_VIO_D8),
+ PINMUX_DATA(FCDE_MARK, FCDE),
+ PINMUX_DATA(FOE_MARK, PSE3_FLCTL, HIZB0_VIO, FOE_VIO_VD2),
+ PINMUX_DATA(FSC_MARK, FSC),
+ PINMUX_DATA(FWE_MARK, FWE),
+ PINMUX_DATA(FRB_MARK, PSE3_FLCTL, FRB_VIO_CLK2),
+
+ /* KEYSC */
+ PINMUX_DATA(KEYIN0_MARK, PSA15_KEYIN0, HIZC14_IRQ6, KEYIN0_IRQ6),
+ PINMUX_DATA(KEYIN1_MARK, HIZA14_KEYSC, KEYIN1),
+ PINMUX_DATA(KEYIN2_MARK, HIZA14_KEYSC, KEYIN2),
+ PINMUX_DATA(KEYIN3_MARK, HIZA14_KEYSC, KEYIN3),
+ PINMUX_DATA(KEYIN4_MARK, PSA14_KEYIN4, HIZC15_IRQ7, KEYIN4_IRQ7),
+ PINMUX_DATA(KEYOUT0_MARK, HIZA14_KEYSC, KEYOUT0),
+ PINMUX_DATA(KEYOUT1_MARK, HIZA14_KEYSC, KEYOUT1),
+ PINMUX_DATA(KEYOUT2_MARK, HIZA14_KEYSC, KEYOUT2),
+ PINMUX_DATA(KEYOUT3_MARK, HIZA14_KEYSC, KEYOUT3),
+ PINMUX_DATA(KEYOUT4_IN6_MARK, HIZA14_KEYSC, KEYOUT4_IN6),
+ PINMUX_DATA(KEYOUT5_IN5_MARK, HIZA14_KEYSC, KEYOUT5_IN5),
+};
+
+static struct pinmux_gpio pinmux_gpios[] = {
+ /* PTA */
+ PINMUX_GPIO(GPIO_PTA7, PTA7_DATA),
+ PINMUX_GPIO(GPIO_PTA6, PTA6_DATA),
+ PINMUX_GPIO(GPIO_PTA5, PTA5_DATA),
+ PINMUX_GPIO(GPIO_PTA4, PTA4_DATA),
+ PINMUX_GPIO(GPIO_PTA3, PTA3_DATA),
+ PINMUX_GPIO(GPIO_PTA2, PTA2_DATA),
+ PINMUX_GPIO(GPIO_PTA1, PTA1_DATA),
+ PINMUX_GPIO(GPIO_PTA0, PTA0_DATA),
+
+ /* PTB */
+ PINMUX_GPIO(GPIO_PTB7, PTB7_DATA),
+ PINMUX_GPIO(GPIO_PTB6, PTB6_DATA),
+ PINMUX_GPIO(GPIO_PTB5, PTB5_DATA),
+ PINMUX_GPIO(GPIO_PTB4, PTB4_DATA),
+ PINMUX_GPIO(GPIO_PTB3, PTB3_DATA),
+ PINMUX_GPIO(GPIO_PTB2, PTB2_DATA),
+ PINMUX_GPIO(GPIO_PTB1, PTB1_DATA),
+ PINMUX_GPIO(GPIO_PTB0, PTB0_DATA),
+
+ /* PTC */
+ PINMUX_GPIO(GPIO_PTC7, PTC7_DATA),
+ PINMUX_GPIO(GPIO_PTC5, PTC5_DATA),
+ PINMUX_GPIO(GPIO_PTC4, PTC4_DATA),
+ PINMUX_GPIO(GPIO_PTC3, PTC3_DATA),
+ PINMUX_GPIO(GPIO_PTC2, PTC2_DATA),
+ PINMUX_GPIO(GPIO_PTC0, PTC0_DATA),
+
+ /* PTD */
+ PINMUX_GPIO(GPIO_PTD7, PTD7_DATA),
+ PINMUX_GPIO(GPIO_PTD6, PTD6_DATA),
+ PINMUX_GPIO(GPIO_PTD5, PTD5_DATA),
+ PINMUX_GPIO(GPIO_PTD4, PTD4_DATA),
+ PINMUX_GPIO(GPIO_PTD3, PTD3_DATA),
+ PINMUX_GPIO(GPIO_PTD2, PTD2_DATA),
+ PINMUX_GPIO(GPIO_PTD1, PTD1_DATA),
+ PINMUX_GPIO(GPIO_PTD0, PTD0_DATA),
+
+ /* PTE */
+ PINMUX_GPIO(GPIO_PTE7, PTE7_DATA),
+ PINMUX_GPIO(GPIO_PTE6, PTE6_DATA),
+ PINMUX_GPIO(GPIO_PTE5, PTE5_DATA),
+ PINMUX_GPIO(GPIO_PTE4, PTE4_DATA),
+ PINMUX_GPIO(GPIO_PTE1, PTE1_DATA),
+ PINMUX_GPIO(GPIO_PTE0, PTE0_DATA),
+
+ /* PTF */
+ PINMUX_GPIO(GPIO_PTF6, PTF6_DATA),
+ PINMUX_GPIO(GPIO_PTF5, PTF5_DATA),
+ PINMUX_GPIO(GPIO_PTF4, PTF4_DATA),
+ PINMUX_GPIO(GPIO_PTF3, PTF3_DATA),
+ PINMUX_GPIO(GPIO_PTF2, PTF2_DATA),
+ PINMUX_GPIO(GPIO_PTF1, PTF1_DATA),
+ PINMUX_GPIO(GPIO_PTF0, PTF0_DATA),
+
+ /* PTG */
+ PINMUX_GPIO(GPIO_PTG4, PTG4_DATA),
+ PINMUX_GPIO(GPIO_PTG3, PTG3_DATA),
+ PINMUX_GPIO(GPIO_PTG2, PTG2_DATA),
+ PINMUX_GPIO(GPIO_PTG1, PTG1_DATA),
+ PINMUX_GPIO(GPIO_PTG0, PTG0_DATA),
+
+ /* PTH */
+ PINMUX_GPIO(GPIO_PTH7, PTH7_DATA),
+ PINMUX_GPIO(GPIO_PTH6, PTH6_DATA),
+ PINMUX_GPIO(GPIO_PTH5, PTH5_DATA),
+ PINMUX_GPIO(GPIO_PTH4, PTH4_DATA),
+ PINMUX_GPIO(GPIO_PTH3, PTH3_DATA),
+ PINMUX_GPIO(GPIO_PTH2, PTH2_DATA),
+ PINMUX_GPIO(GPIO_PTH1, PTH1_DATA),
+ PINMUX_GPIO(GPIO_PTH0, PTH0_DATA),
+
+ /* PTJ */
+ PINMUX_GPIO(GPIO_PTJ7, PTJ7_DATA),
+ PINMUX_GPIO(GPIO_PTJ6, PTJ6_DATA),
+ PINMUX_GPIO(GPIO_PTJ5, PTJ5_DATA),
+ PINMUX_GPIO(GPIO_PTJ1, PTJ1_DATA),
+ PINMUX_GPIO(GPIO_PTJ0, PTJ0_DATA),
+
+ /* PTK */
+ PINMUX_GPIO(GPIO_PTK6, PTK6_DATA),
+ PINMUX_GPIO(GPIO_PTK5, PTK5_DATA),
+ PINMUX_GPIO(GPIO_PTK4, PTK4_DATA),
+ PINMUX_GPIO(GPIO_PTK3, PTK3_DATA),
+ PINMUX_GPIO(GPIO_PTK2, PTK2_DATA),
+ PINMUX_GPIO(GPIO_PTK1, PTK1_DATA),
+ PINMUX_GPIO(GPIO_PTK0, PTK0_DATA),
+
+ /* PTL */
+ PINMUX_GPIO(GPIO_PTL7, PTL7_DATA),
+ PINMUX_GPIO(GPIO_PTL6, PTL6_DATA),
+ PINMUX_GPIO(GPIO_PTL5, PTL5_DATA),
+ PINMUX_GPIO(GPIO_PTL4, PTL4_DATA),
+ PINMUX_GPIO(GPIO_PTL3, PTL3_DATA),
+ PINMUX_GPIO(GPIO_PTL2, PTL2_DATA),
+ PINMUX_GPIO(GPIO_PTL1, PTL1_DATA),
+ PINMUX_GPIO(GPIO_PTL0, PTL0_DATA),
+
+ /* PTM */
+ PINMUX_GPIO(GPIO_PTM7, PTM7_DATA),
+ PINMUX_GPIO(GPIO_PTM6, PTM6_DATA),
+ PINMUX_GPIO(GPIO_PTM5, PTM5_DATA),
+ PINMUX_GPIO(GPIO_PTM4, PTM4_DATA),
+ PINMUX_GPIO(GPIO_PTM3, PTM3_DATA),
+ PINMUX_GPIO(GPIO_PTM2, PTM2_DATA),
+ PINMUX_GPIO(GPIO_PTM1, PTM1_DATA),
+ PINMUX_GPIO(GPIO_PTM0, PTM0_DATA),
+
+ /* PTN */
+ PINMUX_GPIO(GPIO_PTN7, PTN7_DATA),
+ PINMUX_GPIO(GPIO_PTN6, PTN6_DATA),
+ PINMUX_GPIO(GPIO_PTN5, PTN5_DATA),
+ PINMUX_GPIO(GPIO_PTN4, PTN4_DATA),
+ PINMUX_GPIO(GPIO_PTN3, PTN3_DATA),
+ PINMUX_GPIO(GPIO_PTN2, PTN2_DATA),
+ PINMUX_GPIO(GPIO_PTN1, PTN1_DATA),
+ PINMUX_GPIO(GPIO_PTN0, PTN0_DATA),
+
+ /* PTQ */
+ PINMUX_GPIO(GPIO_PTQ6, PTQ6_DATA),
+ PINMUX_GPIO(GPIO_PTQ5, PTQ5_DATA),
+ PINMUX_GPIO(GPIO_PTQ4, PTQ4_DATA),
+ PINMUX_GPIO(GPIO_PTQ3, PTQ3_DATA),
+ PINMUX_GPIO(GPIO_PTQ2, PTQ2_DATA),
+ PINMUX_GPIO(GPIO_PTQ1, PTQ1_DATA),
+ PINMUX_GPIO(GPIO_PTQ0, PTQ0_DATA),
+
+ /* PTR */
+ PINMUX_GPIO(GPIO_PTR4, PTR4_DATA),
+ PINMUX_GPIO(GPIO_PTR3, PTR3_DATA),
+ PINMUX_GPIO(GPIO_PTR2, PTR2_DATA),
+ PINMUX_GPIO(GPIO_PTR1, PTR1_DATA),
+ PINMUX_GPIO(GPIO_PTR0, PTR0_DATA),
+
+ /* PTS */
+ PINMUX_GPIO(GPIO_PTS4, PTS4_DATA),
+ PINMUX_GPIO(GPIO_PTS3, PTS3_DATA),
+ PINMUX_GPIO(GPIO_PTS2, PTS2_DATA),
+ PINMUX_GPIO(GPIO_PTS1, PTS1_DATA),
+ PINMUX_GPIO(GPIO_PTS0, PTS0_DATA),
+
+ /* PTT */
+ PINMUX_GPIO(GPIO_PTT4, PTT4_DATA),
+ PINMUX_GPIO(GPIO_PTT3, PTT3_DATA),
+ PINMUX_GPIO(GPIO_PTT2, PTT2_DATA),
+ PINMUX_GPIO(GPIO_PTT1, PTT1_DATA),
+ PINMUX_GPIO(GPIO_PTT0, PTT0_DATA),
+
+ /* PTU */
+ PINMUX_GPIO(GPIO_PTU4, PTU4_DATA),
+ PINMUX_GPIO(GPIO_PTU3, PTU3_DATA),
+ PINMUX_GPIO(GPIO_PTU2, PTU2_DATA),
+ PINMUX_GPIO(GPIO_PTU1, PTU1_DATA),
+ PINMUX_GPIO(GPIO_PTU0, PTU0_DATA),
+
+ /* PTV */
+ PINMUX_GPIO(GPIO_PTV4, PTV4_DATA),
+ PINMUX_GPIO(GPIO_PTV3, PTV3_DATA),
+ PINMUX_GPIO(GPIO_PTV2, PTV2_DATA),
+ PINMUX_GPIO(GPIO_PTV1, PTV1_DATA),
+ PINMUX_GPIO(GPIO_PTV0, PTV0_DATA),
+
+ /* PTW */
+ PINMUX_GPIO(GPIO_PTW6, PTW6_DATA),
+ PINMUX_GPIO(GPIO_PTW5, PTW5_DATA),
+ PINMUX_GPIO(GPIO_PTW4, PTW4_DATA),
+ PINMUX_GPIO(GPIO_PTW3, PTW3_DATA),
+ PINMUX_GPIO(GPIO_PTW2, PTW2_DATA),
+ PINMUX_GPIO(GPIO_PTW1, PTW1_DATA),
+ PINMUX_GPIO(GPIO_PTW0, PTW0_DATA),
+
+ /* PTX */
+ PINMUX_GPIO(GPIO_PTX6, PTX6_DATA),
+ PINMUX_GPIO(GPIO_PTX5, PTX5_DATA),
+ PINMUX_GPIO(GPIO_PTX4, PTX4_DATA),
+ PINMUX_GPIO(GPIO_PTX3, PTX3_DATA),
+ PINMUX_GPIO(GPIO_PTX2, PTX2_DATA),
+ PINMUX_GPIO(GPIO_PTX1, PTX1_DATA),
+ PINMUX_GPIO(GPIO_PTX0, PTX0_DATA),
+
+ /* PTY */
+ PINMUX_GPIO(GPIO_PTY5, PTY5_DATA),
+ PINMUX_GPIO(GPIO_PTY4, PTY4_DATA),
+ PINMUX_GPIO(GPIO_PTY3, PTY3_DATA),
+ PINMUX_GPIO(GPIO_PTY2, PTY2_DATA),
+ PINMUX_GPIO(GPIO_PTY1, PTY1_DATA),
+ PINMUX_GPIO(GPIO_PTY0, PTY0_DATA),
+
+ /* PTZ */
+ PINMUX_GPIO(GPIO_PTZ5, PTZ5_DATA),
+ PINMUX_GPIO(GPIO_PTZ4, PTZ4_DATA),
+ PINMUX_GPIO(GPIO_PTZ3, PTZ3_DATA),
+ PINMUX_GPIO(GPIO_PTZ2, PTZ2_DATA),
+ PINMUX_GPIO(GPIO_PTZ1, PTZ1_DATA),
+
+ /* SCIF0 */
+ PINMUX_GPIO(GPIO_FN_SCIF0_TXD, SCIF0_TXD_MARK),
+ PINMUX_GPIO(GPIO_FN_SCIF0_RXD, SCIF0_RXD_MARK),
+ PINMUX_GPIO(GPIO_FN_SCIF0_RTS, SCIF0_RTS_MARK),
+ PINMUX_GPIO(GPIO_FN_SCIF0_CTS, SCIF0_CTS_MARK),
+ PINMUX_GPIO(GPIO_FN_SCIF0_SCK, SCIF0_SCK_MARK),
+
+ /* SCIF1 */
+ PINMUX_GPIO(GPIO_FN_SCIF1_TXD, SCIF1_TXD_MARK),
+ PINMUX_GPIO(GPIO_FN_SCIF1_RXD, SCIF1_RXD_MARK),
+ PINMUX_GPIO(GPIO_FN_SCIF1_RTS, SCIF1_RTS_MARK),
+ PINMUX_GPIO(GPIO_FN_SCIF1_CTS, SCIF1_CTS_MARK),
+ PINMUX_GPIO(GPIO_FN_SCIF1_SCK, SCIF1_SCK_MARK),
+
+ /* SCIF2 */
+ PINMUX_GPIO(GPIO_FN_SCIF2_TXD, SCIF2_TXD_MARK),
+ PINMUX_GPIO(GPIO_FN_SCIF2_RXD, SCIF2_RXD_MARK),
+ PINMUX_GPIO(GPIO_FN_SCIF2_RTS, SCIF2_RTS_MARK),
+ PINMUX_GPIO(GPIO_FN_SCIF2_CTS, SCIF2_CTS_MARK),
+ PINMUX_GPIO(GPIO_FN_SCIF2_SCK, SCIF2_SCK_MARK),
+
+ /* SIO */
+ PINMUX_GPIO(GPIO_FN_SIOTXD, SIOTXD_MARK),
+ PINMUX_GPIO(GPIO_FN_SIORXD, SIORXD_MARK),
+ PINMUX_GPIO(GPIO_FN_SIOD, SIOD_MARK),
+ PINMUX_GPIO(GPIO_FN_SIOSTRB0, SIOSTRB0_MARK),
+ PINMUX_GPIO(GPIO_FN_SIOSTRB1, SIOSTRB1_MARK),
+ PINMUX_GPIO(GPIO_FN_SIOSCK, SIOSCK_MARK),
+ PINMUX_GPIO(GPIO_FN_SIOMCK, SIOMCK_MARK),
+
+ /* CEU */
+ PINMUX_GPIO(GPIO_FN_VIO_D15, VIO_D15_MARK),
+ PINMUX_GPIO(GPIO_FN_VIO_D14, VIO_D14_MARK),
+ PINMUX_GPIO(GPIO_FN_VIO_D13, VIO_D13_MARK),
+ PINMUX_GPIO(GPIO_FN_VIO_D12, VIO_D12_MARK),
+ PINMUX_GPIO(GPIO_FN_VIO_D11, VIO_D11_MARK),
+ PINMUX_GPIO(GPIO_FN_VIO_D10, VIO_D10_MARK),
+ PINMUX_GPIO(GPIO_FN_VIO_D9, VIO_D9_MARK),
+ PINMUX_GPIO(GPIO_FN_VIO_D8, VIO_D8_MARK),
+ PINMUX_GPIO(GPIO_FN_VIO_D7, VIO_D7_MARK),
+ PINMUX_GPIO(GPIO_FN_VIO_D6, VIO_D6_MARK),
+ PINMUX_GPIO(GPIO_FN_VIO_D5, VIO_D5_MARK),
+ PINMUX_GPIO(GPIO_FN_VIO_D4, VIO_D4_MARK),
+ PINMUX_GPIO(GPIO_FN_VIO_D3, VIO_D3_MARK),
+ PINMUX_GPIO(GPIO_FN_VIO_D2, VIO_D2_MARK),
+ PINMUX_GPIO(GPIO_FN_VIO_D1, VIO_D1_MARK),
+ PINMUX_GPIO(GPIO_FN_VIO_D0, VIO_D0_MARK),
+ PINMUX_GPIO(GPIO_FN_VIO_CLK, VIO_CLK_MARK),
+ PINMUX_GPIO(GPIO_FN_VIO_VD, VIO_VD_MARK),
+ PINMUX_GPIO(GPIO_FN_VIO_HD, VIO_HD_MARK),
+ PINMUX_GPIO(GPIO_FN_VIO_FLD, VIO_FLD_MARK),
+ PINMUX_GPIO(GPIO_FN_VIO_CKO, VIO_CKO_MARK),
+ PINMUX_GPIO(GPIO_FN_VIO_STEX, VIO_STEX_MARK),
+ PINMUX_GPIO(GPIO_FN_VIO_STEM, VIO_STEM_MARK),
+ PINMUX_GPIO(GPIO_FN_VIO_VD2, VIO_VD2_MARK),
+ PINMUX_GPIO(GPIO_FN_VIO_HD2, VIO_HD2_MARK),
+ PINMUX_GPIO(GPIO_FN_VIO_CLK2, VIO_CLK2_MARK),
+
+ /* LCDC */
+ PINMUX_GPIO(GPIO_FN_LCDD23, LCDD23_MARK),
+ PINMUX_GPIO(GPIO_FN_LCDD22, LCDD22_MARK),
+ PINMUX_GPIO(GPIO_FN_LCDD21, LCDD21_MARK),
+ PINMUX_GPIO(GPIO_FN_LCDD20, LCDD20_MARK),
+ PINMUX_GPIO(GPIO_FN_LCDD19, LCDD19_MARK),
+ PINMUX_GPIO(GPIO_FN_LCDD18, LCDD18_MARK),
+ PINMUX_GPIO(GPIO_FN_LCDD17, LCDD17_MARK),
+ PINMUX_GPIO(GPIO_FN_LCDD16, LCDD16_MARK),
+ PINMUX_GPIO(GPIO_FN_LCDD15, LCDD15_MARK),
+ PINMUX_GPIO(GPIO_FN_LCDD14, LCDD14_MARK),
+ PINMUX_GPIO(GPIO_FN_LCDD13, LCDD13_MARK),
+ PINMUX_GPIO(GPIO_FN_LCDD12, LCDD12_MARK),
+ PINMUX_GPIO(GPIO_FN_LCDD11, LCDD11_MARK),
+ PINMUX_GPIO(GPIO_FN_LCDD10, LCDD10_MARK),
+ PINMUX_GPIO(GPIO_FN_LCDD9, LCDD9_MARK),
+ PINMUX_GPIO(GPIO_FN_LCDD8, LCDD8_MARK),
+ PINMUX_GPIO(GPIO_FN_LCDD7, LCDD7_MARK),
+ PINMUX_GPIO(GPIO_FN_LCDD6, LCDD6_MARK),
+ PINMUX_GPIO(GPIO_FN_LCDD5, LCDD5_MARK),
+ PINMUX_GPIO(GPIO_FN_LCDD4, LCDD4_MARK),
+ PINMUX_GPIO(GPIO_FN_LCDD3, LCDD3_MARK),
+ PINMUX_GPIO(GPIO_FN_LCDD2, LCDD2_MARK),
+ PINMUX_GPIO(GPIO_FN_LCDD1, LCDD1_MARK),
+ PINMUX_GPIO(GPIO_FN_LCDD0, LCDD0_MARK),
+ PINMUX_GPIO(GPIO_FN_LCDLCLK, LCDLCLK_MARK),
+ /* Main LCD */
+ PINMUX_GPIO(GPIO_FN_LCDDON, LCDDON_MARK),
+ PINMUX_GPIO(GPIO_FN_LCDVCPWC, LCDVCPWC_MARK),
+ PINMUX_GPIO(GPIO_FN_LCDVEPWC, LCDVEPWC_MARK),
+ PINMUX_GPIO(GPIO_FN_LCDVSYN, LCDVSYN_MARK),
+ /* Main LCD - RGB Mode */
+ PINMUX_GPIO(GPIO_FN_LCDDCK, LCDDCK_MARK),
+ PINMUX_GPIO(GPIO_FN_LCDHSYN, LCDHSYN_MARK),
+ PINMUX_GPIO(GPIO_FN_LCDDISP, LCDDISP_MARK),
+ /* Main LCD - SYS Mode */
+ PINMUX_GPIO(GPIO_FN_LCDRS, LCDRS_MARK),
+ PINMUX_GPIO(GPIO_FN_LCDCS, LCDCS_MARK),
+ PINMUX_GPIO(GPIO_FN_LCDWR, LCDWR_MARK),
+ PINMUX_GPIO(GPIO_FN_LCDRD, LCDRD_MARK),
+ /* Sub LCD - SYS Mode */
+ PINMUX_GPIO(GPIO_FN_LCDDON2, LCDDON2_MARK),
+ PINMUX_GPIO(GPIO_FN_LCDVCPWC2, LCDVCPWC2_MARK),
+ PINMUX_GPIO(GPIO_FN_LCDVEPWC2, LCDVEPWC2_MARK),
+ PINMUX_GPIO(GPIO_FN_LCDVSYN2, LCDVSYN2_MARK),
+ PINMUX_GPIO(GPIO_FN_LCDCS2, LCDCS2_MARK),
+
+ /* BSC */
+ PINMUX_GPIO(GPIO_FN_IOIS16, IOIS16_MARK),
+ PINMUX_GPIO(GPIO_FN_A25, A25_MARK),
+ PINMUX_GPIO(GPIO_FN_A24, A24_MARK),
+ PINMUX_GPIO(GPIO_FN_A23, A23_MARK),
+ PINMUX_GPIO(GPIO_FN_A22, A22_MARK),
+ PINMUX_GPIO(GPIO_FN_BS, BS_MARK),
+ PINMUX_GPIO(GPIO_FN_CS6B_CE1B, CS6B_CE1B_MARK),
+ PINMUX_GPIO(GPIO_FN_WAIT, WAIT_MARK),
+ PINMUX_GPIO(GPIO_FN_CS6A_CE2B, CS6A_CE2B_MARK),
+
+ /* SBSC */
+ PINMUX_GPIO(GPIO_FN_HPD63, HPD63_MARK),
+ PINMUX_GPIO(GPIO_FN_HPD62, HPD62_MARK),
+ PINMUX_GPIO(GPIO_FN_HPD61, HPD61_MARK),
+ PINMUX_GPIO(GPIO_FN_HPD60, HPD60_MARK),
+ PINMUX_GPIO(GPIO_FN_HPD59, HPD59_MARK),
+ PINMUX_GPIO(GPIO_FN_HPD58, HPD58_MARK),
+ PINMUX_GPIO(GPIO_FN_HPD57, HPD57_MARK),
+ PINMUX_GPIO(GPIO_FN_HPD56, HPD56_MARK),
+ PINMUX_GPIO(GPIO_FN_HPD55, HPD55_MARK),
+ PINMUX_GPIO(GPIO_FN_HPD54, HPD54_MARK),
+ PINMUX_GPIO(GPIO_FN_HPD53, HPD53_MARK),
+ PINMUX_GPIO(GPIO_FN_HPD52, HPD52_MARK),
+ PINMUX_GPIO(GPIO_FN_HPD51, HPD51_MARK),
+ PINMUX_GPIO(GPIO_FN_HPD50, HPD50_MARK),
+ PINMUX_GPIO(GPIO_FN_HPD49, HPD49_MARK),
+ PINMUX_GPIO(GPIO_FN_HPD48, HPD48_MARK),
+ PINMUX_GPIO(GPIO_FN_HPDQM7, HPDQM7_MARK),
+ PINMUX_GPIO(GPIO_FN_HPDQM6, HPDQM6_MARK),
+ PINMUX_GPIO(GPIO_FN_HPDQM5, HPDQM5_MARK),
+ PINMUX_GPIO(GPIO_FN_HPDQM4, HPDQM4_MARK),
+
+ /* IRQ */
+ PINMUX_GPIO(GPIO_FN_IRQ0, IRQ0_MARK),
+ PINMUX_GPIO(GPIO_FN_IRQ1, IRQ1_MARK),
+ PINMUX_GPIO(GPIO_FN_IRQ2, IRQ2_MARK),
+ PINMUX_GPIO(GPIO_FN_IRQ3, IRQ3_MARK),
+ PINMUX_GPIO(GPIO_FN_IRQ4, IRQ4_MARK),
+ PINMUX_GPIO(GPIO_FN_IRQ5, IRQ5_MARK),
+ PINMUX_GPIO(GPIO_FN_IRQ6, IRQ6_MARK),
+ PINMUX_GPIO(GPIO_FN_IRQ7, IRQ7_MARK),
+
+ /* SDHI */
+ PINMUX_GPIO(GPIO_FN_SDHICD, SDHICD_MARK),
+ PINMUX_GPIO(GPIO_FN_SDHIWP, SDHIWP_MARK),
+ PINMUX_GPIO(GPIO_FN_SDHID3, SDHID3_MARK),
+ PINMUX_GPIO(GPIO_FN_SDHID2, SDHID2_MARK),
+ PINMUX_GPIO(GPIO_FN_SDHID1, SDHID1_MARK),
+ PINMUX_GPIO(GPIO_FN_SDHID0, SDHID0_MARK),
+ PINMUX_GPIO(GPIO_FN_SDHICMD, SDHICMD_MARK),
+ PINMUX_GPIO(GPIO_FN_SDHICLK, SDHICLK_MARK),
+
+ /* SIU - Port A */
+ PINMUX_GPIO(GPIO_FN_SIUAOLR, SIUAOLR_MARK),
+ PINMUX_GPIO(GPIO_FN_SIUAOBT, SIUAOBT_MARK),
+ PINMUX_GPIO(GPIO_FN_SIUAISLD, SIUAISLD_MARK),
+ PINMUX_GPIO(GPIO_FN_SIUAILR, SIUAILR_MARK),
+ PINMUX_GPIO(GPIO_FN_SIUAIBT, SIUAIBT_MARK),
+ PINMUX_GPIO(GPIO_FN_SIUAOSLD, SIUAOSLD_MARK),
+ PINMUX_GPIO(GPIO_FN_SIUMCKA, SIUMCKA_MARK),
+ PINMUX_GPIO(GPIO_FN_SIUFCKA, SIUFCKA_MARK),
+
+ /* SIU - Port B */
+ PINMUX_GPIO(GPIO_FN_SIUBOLR, SIUBOLR_MARK),
+ PINMUX_GPIO(GPIO_FN_SIUBOBT, SIUBOBT_MARK),
+ PINMUX_GPIO(GPIO_FN_SIUBISLD, SIUBISLD_MARK),
+ PINMUX_GPIO(GPIO_FN_SIUBILR, SIUBILR_MARK),
+ PINMUX_GPIO(GPIO_FN_SIUBIBT, SIUBIBT_MARK),
+ PINMUX_GPIO(GPIO_FN_SIUBOSLD, SIUBOSLD_MARK),
+ PINMUX_GPIO(GPIO_FN_SIUMCKB, SIUMCKB_MARK),
+ PINMUX_GPIO(GPIO_FN_SIUFCKB, SIUFCKB_MARK),
+
+ /* AUD */
+ PINMUX_GPIO(GPIO_FN_AUDSYNC, AUDSYNC_MARK),
+ PINMUX_GPIO(GPIO_FN_AUDATA3, AUDATA3_MARK),
+ PINMUX_GPIO(GPIO_FN_AUDATA2, AUDATA2_MARK),
+ PINMUX_GPIO(GPIO_FN_AUDATA1, AUDATA1_MARK),
+ PINMUX_GPIO(GPIO_FN_AUDATA0, AUDATA0_MARK),
+
+ /* DMAC */
+ PINMUX_GPIO(GPIO_FN_DACK, DACK_MARK),
+ PINMUX_GPIO(GPIO_FN_DREQ0, DREQ0_MARK),
+
+ /* VOU */
+ PINMUX_GPIO(GPIO_FN_DV_CLKI, DV_CLKI_MARK),
+ PINMUX_GPIO(GPIO_FN_DV_CLK, DV_CLK_MARK),
+ PINMUX_GPIO(GPIO_FN_DV_HSYNC, DV_HSYNC_MARK),
+ PINMUX_GPIO(GPIO_FN_DV_VSYNC, DV_VSYNC_MARK),
+ PINMUX_GPIO(GPIO_FN_DV_D15, DV_D15_MARK),
+ PINMUX_GPIO(GPIO_FN_DV_D14, DV_D14_MARK),
+ PINMUX_GPIO(GPIO_FN_DV_D13, DV_D13_MARK),
+ PINMUX_GPIO(GPIO_FN_DV_D12, DV_D12_MARK),
+ PINMUX_GPIO(GPIO_FN_DV_D11, DV_D11_MARK),
+ PINMUX_GPIO(GPIO_FN_DV_D10, DV_D10_MARK),
+ PINMUX_GPIO(GPIO_FN_DV_D9, DV_D9_MARK),
+ PINMUX_GPIO(GPIO_FN_DV_D8, DV_D8_MARK),
+ PINMUX_GPIO(GPIO_FN_DV_D7, DV_D7_MARK),
+ PINMUX_GPIO(GPIO_FN_DV_D6, DV_D6_MARK),
+ PINMUX_GPIO(GPIO_FN_DV_D5, DV_D5_MARK),
+ PINMUX_GPIO(GPIO_FN_DV_D4, DV_D4_MARK),
+ PINMUX_GPIO(GPIO_FN_DV_D3, DV_D3_MARK),
+ PINMUX_GPIO(GPIO_FN_DV_D2, DV_D2_MARK),
+ PINMUX_GPIO(GPIO_FN_DV_D1, DV_D1_MARK),
+ PINMUX_GPIO(GPIO_FN_DV_D0, DV_D0_MARK),
+
+ /* CPG */
+ PINMUX_GPIO(GPIO_FN_STATUS0, STATUS0_MARK),
+ PINMUX_GPIO(GPIO_FN_PDSTATUS, PDSTATUS_MARK),
+
+ /* SIOF0 */
+ PINMUX_GPIO(GPIO_FN_SIOF0_MCK, SIOF0_MCK_MARK),
+ PINMUX_GPIO(GPIO_FN_SIOF0_SCK, SIOF0_SCK_MARK),
+ PINMUX_GPIO(GPIO_FN_SIOF0_SYNC, SIOF0_SYNC_MARK),
+ PINMUX_GPIO(GPIO_FN_SIOF0_SS1, SIOF0_SS1_MARK),
+ PINMUX_GPIO(GPIO_FN_SIOF0_SS2, SIOF0_SS2_MARK),
+ PINMUX_GPIO(GPIO_FN_SIOF0_TXD, SIOF0_TXD_MARK),
+ PINMUX_GPIO(GPIO_FN_SIOF0_RXD, SIOF0_RXD_MARK),
+
+ /* SIOF1 */
+ PINMUX_GPIO(GPIO_FN_SIOF1_MCK, SIOF1_MCK_MARK),
+ PINMUX_GPIO(GPIO_FN_SIOF1_SCK, SIOF1_SCK_MARK),
+ PINMUX_GPIO(GPIO_FN_SIOF1_SYNC, SIOF1_SYNC_MARK),
+ PINMUX_GPIO(GPIO_FN_SIOF1_SS1, SIOF1_SS1_MARK),
+ PINMUX_GPIO(GPIO_FN_SIOF1_SS2, SIOF1_SS2_MARK),
+ PINMUX_GPIO(GPIO_FN_SIOF1_TXD, SIOF1_TXD_MARK),
+ PINMUX_GPIO(GPIO_FN_SIOF1_RXD, SIOF1_RXD_MARK),
+
+ /* SIM */
+ PINMUX_GPIO(GPIO_FN_SIM_D, SIM_D_MARK),
+ PINMUX_GPIO(GPIO_FN_SIM_CLK, SIM_CLK_MARK),
+ PINMUX_GPIO(GPIO_FN_SIM_RST, SIM_RST_MARK),
+
+ /* TSIF */
+ PINMUX_GPIO(GPIO_FN_TS_SDAT, TS_SDAT_MARK),
+ PINMUX_GPIO(GPIO_FN_TS_SCK, TS_SCK_MARK),
+ PINMUX_GPIO(GPIO_FN_TS_SDEN, TS_SDEN_MARK),
+ PINMUX_GPIO(GPIO_FN_TS_SPSYNC, TS_SPSYNC_MARK),
+
+ /* IRDA */
+ PINMUX_GPIO(GPIO_FN_IRDA_IN, IRDA_IN_MARK),
+ PINMUX_GPIO(GPIO_FN_IRDA_OUT, IRDA_OUT_MARK),
+
+ /* TPU */
+ PINMUX_GPIO(GPIO_FN_TPUTO, TPUTO_MARK),
+
+ /* FLCTL */
+ PINMUX_GPIO(GPIO_FN_FCE, FCE_MARK),
+ PINMUX_GPIO(GPIO_FN_NAF7, NAF7_MARK),
+ PINMUX_GPIO(GPIO_FN_NAF6, NAF6_MARK),
+ PINMUX_GPIO(GPIO_FN_NAF5, NAF5_MARK),
+ PINMUX_GPIO(GPIO_FN_NAF4, NAF4_MARK),
+ PINMUX_GPIO(GPIO_FN_NAF3, NAF3_MARK),
+ PINMUX_GPIO(GPIO_FN_NAF2, NAF2_MARK),
+ PINMUX_GPIO(GPIO_FN_NAF1, NAF1_MARK),
+ PINMUX_GPIO(GPIO_FN_NAF0, NAF0_MARK),
+ PINMUX_GPIO(GPIO_FN_FCDE, FCDE_MARK),
+ PINMUX_GPIO(GPIO_FN_FOE, FOE_MARK),
+ PINMUX_GPIO(GPIO_FN_FSC, FSC_MARK),
+ PINMUX_GPIO(GPIO_FN_FWE, FWE_MARK),
+ PINMUX_GPIO(GPIO_FN_FRB, FRB_MARK),
+
+ /* KEYSC */
+ PINMUX_GPIO(GPIO_FN_KEYIN0, KEYIN0_MARK),
+ PINMUX_GPIO(GPIO_FN_KEYIN1, KEYIN1_MARK),
+ PINMUX_GPIO(GPIO_FN_KEYIN2, KEYIN2_MARK),
+ PINMUX_GPIO(GPIO_FN_KEYIN3, KEYIN3_MARK),
+ PINMUX_GPIO(GPIO_FN_KEYIN4, KEYIN4_MARK),
+ PINMUX_GPIO(GPIO_FN_KEYOUT0, KEYOUT0_MARK),
+ PINMUX_GPIO(GPIO_FN_KEYOUT1, KEYOUT1_MARK),
+ PINMUX_GPIO(GPIO_FN_KEYOUT2, KEYOUT2_MARK),
+ PINMUX_GPIO(GPIO_FN_KEYOUT3, KEYOUT3_MARK),
+ PINMUX_GPIO(GPIO_FN_KEYOUT4_IN6, KEYOUT4_IN6_MARK),
+ PINMUX_GPIO(GPIO_FN_KEYOUT5_IN5, KEYOUT5_IN5_MARK),
+};
+
+static struct pinmux_cfg_reg pinmux_config_regs[] = {
+ { PINMUX_CFG_REG("PACR", 0xa4050100, 16, 2) {
+ VIO_D7_SCIF1_SCK, PTA7_OUT, PTA7_IN_PD, PTA7_IN,
+ VIO_D6_SCIF1_RXD, 0, PTA6_IN_PD, PTA6_IN,
+ VIO_D5_SCIF1_TXD, PTA5_OUT, PTA5_IN_PD, PTA5_IN,
+ VIO_D4, 0, PTA4_IN_PD, PTA4_IN,
+ VIO_D3, 0, PTA3_IN_PD, PTA3_IN,
+ VIO_D2, 0, PTA2_IN_PD, PTA2_IN,
+ VIO_D1, 0, PTA1_IN_PD, PTA1_IN,
+ VIO_D0_LCDLCLK, 0, PTA0_IN_PD, PTA0_IN }
+ },
+ { PINMUX_CFG_REG("PBCR", 0xa4050102, 16, 2) {
+ HPD55, PTB7_OUT, 0, PTB7_IN,
+ HPD54, PTB6_OUT, 0, PTB6_IN,
+ HPD53, PTB5_OUT, 0, PTB5_IN,
+ HPD52, PTB4_OUT, 0, PTB4_IN,
+ HPD51, PTB3_OUT, 0, PTB3_IN,
+ HPD50, PTB2_OUT, 0, PTB2_IN,
+ HPD49, PTB1_OUT, 0, PTB1_IN,
+ HPD48, PTB0_OUT, 0, PTB0_IN }
+ },
+ { PINMUX_CFG_REG("PCCR", 0xa4050104, 16, 2) {
+ 0, 0, PTC7_IN_PU, PTC7_IN,
+ 0, 0, 0, 0,
+ IOIS16, 0, PTC5_IN_PU, PTC5_IN,
+ HPDQM7, PTC4_OUT, 0, PTC4_IN,
+ HPDQM6, PTC3_OUT, 0, PTC3_IN,
+ HPDQM5, PTC2_OUT, 0, PTC2_IN,
+ 0, 0, 0, 0,
+ HPDQM4, PTC0_OUT, 0, PTC0_IN }
+ },
+ { PINMUX_CFG_REG("PDCR", 0xa4050106, 16, 2) {
+ SDHICD, 0, PTD7_IN_PU, PTD7_IN,
+ SDHIWP, PTD6_OUT, PTD6_IN_PU, PTD6_IN,
+ SDHID3, PTD5_OUT, PTD5_IN_PU, PTD5_IN,
+ IRQ2_SDHID2, PTD4_OUT, PTD4_IN_PU, PTD4_IN,
+ SDHID1, PTD3_OUT, PTD3_IN_PU, PTD3_IN,
+ SDHID0, PTD2_OUT, PTD2_IN_PU, PTD2_IN,
+ SDHICMD, PTD1_OUT, PTD1_IN_PU, PTD1_IN,
+ SDHICLK, PTD0_OUT, 0, 0 }
+ },
+ { PINMUX_CFG_REG("PECR", 0xa4050108, 16, 2) {
+ A25, PTE7_OUT, PTE7_IN_PD, PTE7_IN,
+ A24, PTE6_OUT, PTE6_IN_PD, PTE6_IN,
+ A23, PTE5_OUT, PTE5_IN_PD, PTE5_IN,
+ A22, PTE4_OUT, PTE4_IN_PD, PTE4_IN,
+ 0, 0, 0, 0,
+ 0, 0, 0, 0,
+ IRQ5, PTE1_OUT, PTE1_IN_PD, PTE1_IN,
+ IRQ4_BS, PTE0_OUT, PTE0_IN_PD, PTE0_IN }
+ },
+ { PINMUX_CFG_REG("PFCR", 0xa405010a, 16, 2) {
+ 0, 0, 0, 0,
+ PTF6, PTF6_OUT, PTF6_IN_PD, PTF6_IN,
+ SIOSCK_SIUBOBT, PTF5_OUT, PTF5_IN_PD, PTF5_IN,
+ SIOSTRB1_SIUBOLR, PTF4_OUT, PTF4_IN_PD, PTF4_IN,
+ SIOSTRB0_SIUBIBT, PTF3_OUT, PTF3_IN_PD, PTF3_IN,
+ SIOD_SIUBILR, PTF2_OUT, PTF2_IN_PD, PTF2_IN,
+ SIORXD_SIUBISLD, 0, PTF1_IN_PD, PTF1_IN,
+ SIOTXD_SIUBOSLD, PTF0_OUT, 0, 0 }
+ },
+ { PINMUX_CFG_REG("PGCR", 0xa405010c, 16, 2) {
+ 0, 0, 0, 0,
+ 0, 0, 0, 0,
+ 0, 0, 0, 0,
+ AUDSYNC, PTG4_OUT, 0, 0,
+ AUDATA3, PTG3_OUT, 0, 0,
+ AUDATA2, PTG2_OUT, 0, 0,
+ AUDATA1, PTG1_OUT, 0, 0,
+ AUDATA0, PTG0_OUT, 0, 0 }
+ },
+ { PINMUX_CFG_REG("PHCR", 0xa405010e, 16, 2) {
+ LCDVCPWC_LCDVCPWC2, PTH7_OUT, 0, 0,
+ LCDVSYN2_DACK, PTH6_OUT, PTH6_IN_PD, PTH6_IN,
+ LCDVSYN, PTH5_OUT, PTH5_IN_PD, PTH5_IN,
+ LCDDISP_LCDRS, PTH4_OUT, 0, 0,
+ LCDHSYN_LCDCS, PTH3_OUT, 0, 0,
+ LCDDON_LCDDON2, PTH2_OUT, 0, 0,
+ LCDD17_DV_HSYNC, PTH1_OUT, PTH1_IN_PD, PTH1_IN,
+ LCDD16_DV_VSYNC, PTH0_OUT, PTH0_IN_PD, PTH0_IN }
+ },
+ { PINMUX_CFG_REG("PJCR", 0xa4050110, 16, 2) {
+ STATUS0, PTJ7_OUT, 0, 0,
+ 0, PTJ6_OUT, 0, 0,
+ PDSTATUS, PTJ5_OUT, 0, 0,
+ 0, 0, 0, 0,
+ 0, 0, 0, 0,
+ 0, 0, 0, 0,
+ IRQ1, PTJ1_OUT, PTJ1_IN_PU, PTJ1_IN,
+ IRQ0, PTJ0_OUT, PTJ0_IN_PU, PTJ0_IN }
+ },
+ { PINMUX_CFG_REG("PKCR", 0xa4050112, 16, 2) {
+ 0, 0, 0, 0,
+ SIUAILR_SIOF1_SS2, PTK6_OUT, PTK6_IN_PD, PTK6_IN,
+ SIUAIBT_SIOF1_SS1, PTK5_OUT, PTK5_IN_PD, PTK5_IN,
+ SIUAOLR_SIOF1_SYNC, PTK4_OUT, PTK4_IN_PD, PTK4_IN,
+ SIUAOBT_SIOF1_SCK, PTK3_OUT, PTK3_IN_PD, PTK3_IN,
+ SIUAISLD_SIOF1_RXD, 0, PTK2_IN_PD, PTK2_IN,
+ SIUAOSLD_SIOF1_TXD, PTK1_OUT, 0, 0,
+ PTK0, PTK0_OUT, PTK0_IN_PD, PTK0_IN }
+ },
+ { PINMUX_CFG_REG("PLCR", 0xa4050114, 16, 2) {
+ LCDD15_DV_D15, PTL7_OUT, PTL7_IN_PD, PTL7_IN,
+ LCDD14_DV_D14, PTL6_OUT, PTL6_IN_PD, PTL6_IN,
+ LCDD13_DV_D13, PTL5_OUT, PTL5_IN_PD, PTL5_IN,
+ LCDD12_DV_D12, PTL4_OUT, PTL4_IN_PD, PTL4_IN,
+ LCDD11_DV_D11, PTL3_OUT, PTL3_IN_PD, PTL3_IN,
+ LCDD10_DV_D10, PTL2_OUT, PTL2_IN_PD, PTL2_IN,
+ LCDD9_DV_D9, PTL1_OUT, PTL1_IN_PD, PTL1_IN,
+ LCDD8_DV_D8, PTL0_OUT, PTL0_IN_PD, PTL0_IN }
+ },
+ { PINMUX_CFG_REG("PMCR", 0xa4050116, 16, 2) {
+ LCDD7_DV_D7, PTM7_OUT, PTM7_IN_PD, PTM7_IN,
+ LCDD6_DV_D6, PTM6_OUT, PTM6_IN_PD, PTM6_IN,
+ LCDD5_DV_D5, PTM5_OUT, PTM5_IN_PD, PTM5_IN,
+ LCDD4_DV_D4, PTM4_OUT, PTM4_IN_PD, PTM4_IN,
+ LCDD3_DV_D3, PTM3_OUT, PTM3_IN_PD, PTM3_IN,
+ LCDD2_DV_D2, PTM2_OUT, PTM2_IN_PD, PTM2_IN,
+ LCDD1_DV_D1, PTM1_OUT, PTM1_IN_PD, PTM1_IN,
+ LCDD0_DV_D0, PTM0_OUT, PTM0_IN_PD, PTM0_IN }
+ },
+ { PINMUX_CFG_REG("PNCR", 0xa4050118, 16, 2) {
+ HPD63, PTN7_OUT, 0, PTN7_IN,
+ HPD62, PTN6_OUT, 0, PTN6_IN,
+ HPD61, PTN5_OUT, 0, PTN5_IN,
+ HPD60, PTN4_OUT, 0, PTN4_IN,
+ HPD59, PTN3_OUT, 0, PTN3_IN,
+ HPD58, PTN2_OUT, 0, PTN2_IN,
+ HPD57, PTN1_OUT, 0, PTN1_IN,
+ HPD56, PTN0_OUT, 0, PTN0_IN }
+ },
+ { PINMUX_CFG_REG("PQCR", 0xa405011a, 16, 2) {
+ 0, 0, 0, 0,
+ SIOF0_SS2_SIM_RST, PTQ6_OUT, 0, 0,
+ SIOF0_SS1_TS_SPSYNC, PTQ5_OUT, PTQ5_IN_PD, PTQ5_IN,
+ SIOF0_SYNC_TS_SDEN, PTQ4_OUT, PTQ4_IN_PD, PTQ4_IN,
+ SIOF0_SCK_TS_SCK, PTQ3_OUT, PTQ3_IN_PD, PTQ3_IN,
+ PTQ2, 0, PTQ2_IN_PD, PTQ2_IN,
+ PTQ1, PTQ1_OUT, 0, 0,
+ PTQ0, PTQ0_OUT, PTQ0_IN_PU, PTQ0_IN }
+ },
+ { PINMUX_CFG_REG("PRCR", 0xa405011c, 16, 2) {
+ 0, 0, 0, 0,
+ 0, 0, 0, 0,
+ 0, 0, 0, 0,
+ LCDRD, PTR4_OUT, 0, 0,
+ CS6B_CE1B_LCDCS2, PTR3_OUT, 0, 0,
+ WAIT, 0, PTR2_IN_PU, PTR2_IN,
+ LCDDCK_LCDWR, PTR1_OUT, 0, 0,
+ LCDVEPWC_LCDVEPWC2, PTR0_OUT, 0, 0 }
+ },
+ { PINMUX_CFG_REG("PSCR", 0xa405011e, 16, 2) {
+ 0, 0, 0, 0,
+ 0, 0, 0, 0,
+ 0, 0, 0, 0,
+ SCIF0_CTS_SIUAISPD, 0, PTS4_IN_PD, PTS4_IN,
+ SCIF0_RTS_SIUAOSPD, PTS3_OUT, 0, 0,
+ SCIF0_SCK_TPUTO, PTS2_OUT, PTS2_IN_PD, PTS2_IN,
+ SCIF0_RXD, 0, PTS1_IN_PD, PTS1_IN,
+ SCIF0_TXD, PTS0_OUT, 0, 0 }
+ },
+ { PINMUX_CFG_REG("PTCR", 0xa4050140, 16, 2) {
+ 0, 0, 0, 0,
+ 0, 0, 0, 0,
+ 0, 0, 0, 0,
+ FOE_VIO_VD2, PTT4_OUT, PTT4_IN_PD, PTT4_IN,
+ FWE, PTT3_OUT, PTT3_IN_PD, PTT3_IN,
+ FSC, PTT2_OUT, PTT2_IN_PD, PTT2_IN,
+ DREQ0, 0, PTT1_IN_PD, PTT1_IN,
+ FCDE, PTT0_OUT, 0, 0 }
+ },
+ { PINMUX_CFG_REG("PUCR", 0xa4050142, 16, 2) {
+ 0, 0, 0, 0,
+ 0, 0, 0, 0,
+ 0, 0, 0, 0,
+ NAF2_VIO_D10, PTU4_OUT, PTU4_IN_PD, PTU4_IN,
+ NAF1_VIO_D9, PTU3_OUT, PTU3_IN_PD, PTU3_IN,
+ NAF0_VIO_D8, PTU2_OUT, PTU2_IN_PD, PTU2_IN,
+ FRB_VIO_CLK2, 0, PTU1_IN_PD, PTU1_IN,
+ FCE_VIO_HD2, PTU0_OUT, PTU0_IN_PD, PTU0_IN }
+ },
+ { PINMUX_CFG_REG("PVCR", 0xa4050144, 16, 2) {
+ 0, 0, 0, 0,
+ 0, 0, 0, 0,
+ 0, 0, 0, 0,
+ NAF7_VIO_D15, PTV4_OUT, PTV4_IN_PD, PTV4_IN,
+ NAF6_VIO_D14, PTV3_OUT, PTV3_IN_PD, PTV3_IN,
+ NAF5_VIO_D13, PTV2_OUT, PTV2_IN_PD, PTV2_IN,
+ NAF4_VIO_D12, PTV1_OUT, PTV1_IN_PD, PTV1_IN,
+ NAF3_VIO_D11, PTV0_OUT, PTV0_IN_PD, PTV0_IN }
+ },
+ { PINMUX_CFG_REG("PWCR", 0xa4050146, 16, 2) {
+ 0, 0, 0, 0,
+ VIO_FLD_SCIF2_CTS, 0, PTW6_IN_PD, PTW6_IN,
+ VIO_CKO_SCIF2_RTS, PTW5_OUT, 0, 0,
+ VIO_STEX_SCIF2_SCK, PTW4_OUT, PTW4_IN_PD, PTW4_IN,
+ VIO_STEM_SCIF2_TXD, PTW3_OUT, PTW3_IN_PD, PTW3_IN,
+ VIO_HD_SCIF2_RXD, PTW2_OUT, PTW2_IN_PD, PTW2_IN,
+ VIO_VD_SCIF1_CTS, PTW1_OUT, PTW1_IN_PD, PTW1_IN,
+ VIO_CLK_SCIF1_RTS, PTW0_OUT, PTW0_IN_PD, PTW0_IN }
+ },
+ { PINMUX_CFG_REG("PXCR", 0xa4050148, 16, 2) {
+ 0, 0, 0, 0,
+ CS6A_CE2B, PTX6_OUT, PTX6_IN_PU, PTX6_IN,
+ LCDD23, PTX5_OUT, PTX5_IN_PD, PTX5_IN,
+ LCDD22, PTX4_OUT, PTX4_IN_PD, PTX4_IN,
+ LCDD21, PTX3_OUT, PTX3_IN_PD, PTX3_IN,
+ LCDD20, PTX2_OUT, PTX2_IN_PD, PTX2_IN,
+ LCDD19_DV_CLKI, PTX1_OUT, PTX1_IN_PD, PTX1_IN,
+ LCDD18_DV_CLK, PTX0_OUT, PTX0_IN_PD, PTX0_IN }
+ },
+ { PINMUX_CFG_REG("PYCR", 0xa405014a, 16, 2) {
+ 0, 0, 0, 0,
+ 0, 0, 0, 0,
+ KEYOUT5_IN5, PTY5_OUT, PTY5_IN_PU, PTY5_IN,
+ KEYOUT4_IN6, PTY4_OUT, PTY4_IN_PU, PTY4_IN,
+ KEYOUT3, PTY3_OUT, PTY3_IN_PU, PTY3_IN,
+ KEYOUT2, PTY2_OUT, PTY2_IN_PU, PTY2_IN,
+ KEYOUT1, PTY1_OUT, 0, 0,
+ KEYOUT0, PTY0_OUT, PTY0_IN_PU, PTY0_IN }
+ },
+ { PINMUX_CFG_REG("PZCR", 0xa405014c, 16, 2) {
+ 0, 0, 0, 0,
+ 0, 0, 0, 0,
+ KEYIN4_IRQ7, 0, PTZ5_IN_PU, PTZ5_IN,
+ KEYIN3, 0, PTZ4_IN_PU, PTZ4_IN,
+ KEYIN2, 0, PTZ3_IN_PU, PTZ3_IN,
+ KEYIN1, 0, PTZ2_IN_PU, PTZ2_IN,
+ KEYIN0_IRQ6, 0, PTZ1_IN_PU, PTZ1_IN,
+ 0, 0, 0, 0 }
+ },
+ { PINMUX_CFG_REG("PSELA", 0xa405014e, 16, 1) {
+ PSA15_KEYIN0, PSA15_IRQ6,
+ PSA14_KEYIN4, PSA14_IRQ7,
+ 0, 0,
+ 0, 0,
+ 0, 0,
+ 0, 0,
+ PSA9_IRQ4, PSA9_BS,
+ 0, 0,
+ 0, 0,
+ 0, 0,
+ 0, 0,
+ PSA4_IRQ2, PSA4_SDHID2,
+ 0, 0,
+ 0, 0,
+ 0, 0,
+ 0, 0 }
+ },
+ { PINMUX_CFG_REG("PSELB", 0xa4050150, 16, 1) {
+ PSB15_SIOTXD, PSB15_SIUBOSLD,
+ PSB14_SIORXD, PSB14_SIUBISLD,
+ PSB13_SIOD, PSB13_SIUBILR,
+ PSB12_SIOSTRB0, PSB12_SIUBIBT,
+ PSB11_SIOSTRB1, PSB11_SIUBOLR,
+ PSB10_SIOSCK, PSB10_SIUBOBT,
+ PSB9_SIOMCK, PSB9_SIUMCKB,
+ PSB8_SIOF0_MCK, PSB8_IRQ3,
+ PSB7_SIOF0_TXD, PSB7_IRDA_OUT,
+ PSB6_SIOF0_RXD, PSB6_IRDA_IN,
+ PSB5_SIOF0_SCK, PSB5_TS_SCK,
+ PSB4_SIOF0_SYNC, PSB4_TS_SDEN,
+ PSB3_SIOF0_SS1, PSB3_TS_SPSYNC,
+ PSB2_SIOF0_SS2, PSB2_SIM_RST,
+ PSB1_SIUMCKA, PSB1_SIOF1_MCK,
+ PSB0_SIUAOSLD, PSB0_SIOF1_TXD }
+ },
+ { PINMUX_CFG_REG("PSELC", 0xa4050152, 16, 1) {
+ PSC15_SIUAISLD, PSC15_SIOF1_RXD,
+ PSC14_SIUAOBT, PSC14_SIOF1_SCK,
+ PSC13_SIUAOLR, PSC13_SIOF1_SYNC,
+ PSC12_SIUAIBT, PSC12_SIOF1_SS1,
+ PSC11_SIUAILR, PSC11_SIOF1_SS2,
+ 0, 0,
+ 0, 0,
+ 0, 0,
+ 0, 0,
+ 0, 0,
+ 0, 0,
+ 0, 0,
+ 0, 0,
+ 0, 0,
+ 0, 0,
+ PSC0_NAF, PSC0_VIO }
+ },
+ { PINMUX_CFG_REG("PSELD", 0xa4050154, 16, 1) {
+ 0, 0,
+ 0, 0,
+ PSD13_VIO, PSD13_SCIF2,
+ PSD12_VIO, PSD12_SCIF1,
+ PSD11_VIO, PSD11_SCIF1,
+ PSD10_VIO_D0, PSD10_LCDLCLK,
+ PSD9_SIOMCK_SIUMCKB, PSD9_SIUFCKB,
+ PSD8_SCIF0_SCK, PSD8_TPUTO,
+ PSD7_SCIF0_RTS, PSD7_SIUAOSPD,
+ PSD6_SCIF0_CTS, PSD6_SIUAISPD,
+ PSD5_CS6B_CE1B, PSD5_LCDCS2,
+ 0, 0,
+ PSD3_LCDVEPWC_LCDVCPWC, PSD3_LCDVEPWC2_LCDVCPWC2,
+ PSD2_LCDDON, PSD2_LCDDON2,
+ 0, 0,
+ PSD0_LCDD19_LCDD0, PSD0_DV }
+ },
+ { PINMUX_CFG_REG("PSELE", 0xa4050156, 16, 1) {
+ PSE15_SIOF0_MCK_IRQ3, PSE15_SIM_D,
+ PSE14_SIOF0_TXD_IRDA_OUT, PSE14_SIM_CLK,
+ PSE13_SIOF0_RXD_IRDA_IN, PSE13_TS_SDAT,
+ PSE12_LCDVSYN2, PSE12_DACK,
+ PSE11_SIUMCKA_SIOF1_MCK, PSE11_SIUFCKA,
+ 0, 0,
+ 0, 0,
+ 0, 0,
+ 0, 0,
+ 0, 0,
+ 0, 0,
+ 0, 0,
+ PSE3_FLCTL, PSE3_VIO,
+ PSE2_NAF2, PSE2_VIO_D10,
+ PSE1_NAF1, PSE1_VIO_D9,
+ PSE0_NAF0, PSE0_VIO_D8 }
+ },
+ { PINMUX_CFG_REG("HIZCRA", 0xa4050158, 16, 1) {
+ 0, 0,
+ HIZA14_KEYSC, HIZA14_HIZ,
+ 0, 0,
+ 0, 0,
+ 0, 0,
+ HIZA10_NAF, HIZA10_HIZ,
+ HIZA9_VIO, HIZA9_HIZ,
+ HIZA8_LCDC, HIZA8_HIZ,
+ HIZA7_LCDC, HIZA7_HIZ,
+ HIZA6_LCDC, HIZA6_HIZ,
+ 0, 0,
+ 0, 0,
+ 0, 0,
+ 0, 0,
+ 0, 0,
+ 0, 0 }
+ },
+ { PINMUX_CFG_REG("HIZCRB", 0xa405015a, 16, 1) {
+ 0, 0,
+ 0, 0,
+ 0, 0,
+ 0, 0,
+ 0, 0,
+ 0, 0,
+ 0, 0,
+ 0, 0,
+ 0, 0,
+ 0, 0,
+ 0, 0,
+ HIZB4_SIUA, HIZB4_HIZ,
+ 0, 0,
+ 0, 0,
+ HIZB1_VIO, HIZB1_HIZ,
+ HIZB0_VIO, HIZB0_HIZ }
+ },
+ { PINMUX_CFG_REG("HIZCRC", 0xa405015c, 16, 1) {
+ HIZC15_IRQ7, HIZC15_HIZ,
+ HIZC14_IRQ6, HIZC14_HIZ,
+ HIZC13_IRQ5, HIZC13_HIZ,
+ HIZC12_IRQ4, HIZC12_HIZ,
+ HIZC11_IRQ3, HIZC11_HIZ,
+ HIZC10_IRQ2, HIZC10_HIZ,
+ HIZC9_IRQ1, HIZC9_HIZ,
+ HIZC8_IRQ0, HIZC8_HIZ,
+ 0, 0,
+ 0, 0,
+ 0, 0,
+ 0, 0,
+ 0, 0,
+ 0, 0,
+ 0, 0,
+ 0, 0 }
+ },
+ { PINMUX_CFG_REG("MSELCRB", 0xa4050182, 16, 1) {
+ 0, 0,
+ 0, 0,
+ 0, 0,
+ 0, 0,
+ 0, 0,
+ 0, 0,
+ MSELB9_VIO, MSELB9_VIO2,
+ MSELB8_RGB, MSELB8_SYS,
+ 0, 0,
+ 0, 0,
+ 0, 0,
+ 0, 0,
+ 0, 0,
+ 0, 0,
+ 0, 0,
+ 0, 0 }
+ },
+ {}
+};
+
+static struct pinmux_data_reg pinmux_data_regs[] = {
+ { PINMUX_DATA_REG("PADR", 0xa4050120, 8) {
+ PTA7_DATA, PTA6_DATA, PTA5_DATA, PTA4_DATA,
+ PTA3_DATA, PTA2_DATA, PTA1_DATA, PTA0_DATA }
+ },
+ { PINMUX_DATA_REG("PBDR", 0xa4050122, 8) {
+ PTB7_DATA, PTB6_DATA, PTB5_DATA, PTB4_DATA,
+ PTB3_DATA, PTB2_DATA, PTB1_DATA, PTB0_DATA }
+ },
+ { PINMUX_DATA_REG("PCDR", 0xa4050124, 8) {
+ PTC7_DATA, 0, PTC5_DATA, PTC4_DATA,
+ PTC3_DATA, PTC2_DATA, 0, PTC0_DATA }
+ },
+ { PINMUX_DATA_REG("PDDR", 0xa4050126, 8) {
+ PTD7_DATA, PTD6_DATA, PTD5_DATA, PTD4_DATA,
+ PTD3_DATA, PTD2_DATA, PTD1_DATA, PTD0_DATA }
+ },
+ { PINMUX_DATA_REG("PEDR", 0xa4050128, 8) {
+ PTE7_DATA, PTE6_DATA, PTE5_DATA, PTE4_DATA,
+ 0, 0, PTE1_DATA, PTE0_DATA }
+ },
+ { PINMUX_DATA_REG("PFDR", 0xa405012a, 8) {
+ 0, PTF6_DATA, PTF5_DATA, PTF4_DATA,
+ PTF3_DATA, PTF2_DATA, PTF1_DATA, PTF0_DATA }
+ },
+ { PINMUX_DATA_REG("PGDR", 0xa405012c, 8) {
+ 0, 0, 0, PTG4_DATA,
+ PTG3_DATA, PTG2_DATA, PTG1_DATA, PTG0_DATA }
+ },
+ { PINMUX_DATA_REG("PHDR", 0xa405012e, 8) {
+ PTH7_DATA, PTH6_DATA, PTH5_DATA, PTH4_DATA,
+ PTH3_DATA, PTH2_DATA, PTH1_DATA, PTH0_DATA }
+ },
+ { PINMUX_DATA_REG("PJDR", 0xa4050130, 8) {
+ PTJ7_DATA, PTJ6_DATA, PTJ5_DATA, 0,
+ 0, 0, PTJ1_DATA, PTJ0_DATA }
+ },
+ { PINMUX_DATA_REG("PKDR", 0xa4050132, 8) {
+ 0, PTK6_DATA, PTK5_DATA, PTK4_DATA,
+ PTK3_DATA, PTK2_DATA, PTK1_DATA, PTK0_DATA }
+ },
+ { PINMUX_DATA_REG("PLDR", 0xa4050134, 8) {
+ PTL7_DATA, PTL6_DATA, PTL5_DATA, PTL4_DATA,
+ PTL3_DATA, PTL2_DATA, PTL1_DATA, PTL0_DATA }
+ },
+ { PINMUX_DATA_REG("PMDR", 0xa4050136, 8) {
+ PTM7_DATA, PTM6_DATA, PTM5_DATA, PTM4_DATA,
+ PTM3_DATA, PTM2_DATA, PTM1_DATA, PTM0_DATA }
+ },
+ { PINMUX_DATA_REG("PNDR", 0xa4050138, 8) {
+ PTN7_DATA, PTN6_DATA, PTN5_DATA, PTN4_DATA,
+ PTN3_DATA, PTN2_DATA, PTN1_DATA, PTN0_DATA }
+ },
+ { PINMUX_DATA_REG("PQDR", 0xa405013a, 8) {
+ 0, PTQ6_DATA, PTQ5_DATA, PTQ4_DATA,
+ PTQ3_DATA, PTQ2_DATA, PTQ1_DATA, PTQ0_DATA }
+ },
+ { PINMUX_DATA_REG("PRDR", 0xa405013c, 8) {
+ 0, 0, 0, PTR4_DATA,
+ PTR3_DATA, PTR2_DATA, PTR1_DATA, PTR0_DATA }
+ },
+ { PINMUX_DATA_REG("PSDR", 0xa405013e, 8) {
+ 0, 0, 0, PTS4_DATA,
+ PTS3_DATA, PTS2_DATA, PTS1_DATA, PTS0_DATA }
+ },
+ { PINMUX_DATA_REG("PTDR", 0xa4050160, 8) {
+ 0, 0, 0, PTT4_DATA,
+ PTT3_DATA, PTT2_DATA, PTT1_DATA, PTT0_DATA }
+ },
+ { PINMUX_DATA_REG("PUDR", 0xa4050162, 8) {
+ 0, 0, 0, PTU4_DATA,
+ PTU3_DATA, PTU2_DATA, PTU1_DATA, PTU0_DATA }
+ },
+ { PINMUX_DATA_REG("PVDR", 0xa4050164, 8) {
+ 0, 0, 0, PTV4_DATA,
+ PTV3_DATA, PTV2_DATA, PTV1_DATA, PTV0_DATA }
+ },
+ { PINMUX_DATA_REG("PWDR", 0xa4050166, 8) {
+ 0, PTW6_DATA, PTW5_DATA, PTW4_DATA,
+ PTW3_DATA, PTW2_DATA, PTW1_DATA, PTW0_DATA }
+ },
+ { PINMUX_DATA_REG("PXDR", 0xa4050168, 8) {
+ 0, PTX6_DATA, PTX5_DATA, PTX4_DATA,
+ PTX3_DATA, PTX2_DATA, PTX1_DATA, PTX0_DATA }
+ },
+ { PINMUX_DATA_REG("PYDR", 0xa405016a, 8) {
+ 0, PTY6_DATA, PTY5_DATA, PTY4_DATA,
+ PTY3_DATA, PTY2_DATA, PTY1_DATA, PTY0_DATA }
+ },
+ { PINMUX_DATA_REG("PZDR", 0xa405016c, 8) {
+ 0, 0, PTZ5_DATA, PTZ4_DATA,
+ PTZ3_DATA, PTZ2_DATA, PTZ1_DATA, PTZ0_DATA }
+ },
+ { },
+};
+
+struct sh_pfc_soc_info sh7722_pinmux_info = {
+ .name = "sh7722_pfc",
+ .reserved_id = PINMUX_RESERVED,
+ .data = { PINMUX_DATA_BEGIN, PINMUX_DATA_END },
+ .input = { PINMUX_INPUT_BEGIN, PINMUX_INPUT_END },
+ .input_pd = { PINMUX_INPUT_PULLDOWN_BEGIN, PINMUX_INPUT_PULLDOWN_END },
+ .input_pu = { PINMUX_INPUT_PULLUP_BEGIN, PINMUX_INPUT_PULLUP_END },
+ .output = { PINMUX_OUTPUT_BEGIN, PINMUX_OUTPUT_END },
+ .mark = { PINMUX_MARK_BEGIN, PINMUX_MARK_END },
+ .function = { PINMUX_FUNCTION_BEGIN, PINMUX_FUNCTION_END },
+
+ .first_gpio = GPIO_PTA7,
+ .last_gpio = GPIO_FN_KEYOUT5_IN5,
+
+ .gpios = pinmux_gpios,
+ .cfg_regs = pinmux_config_regs,
+ .data_regs = pinmux_data_regs,
+
+ .gpio_data = pinmux_data,
+ .gpio_data_size = ARRAY_SIZE(pinmux_data),
+};
diff --git a/drivers/pinctrl/sh-pfc/pfc-sh7723.c b/drivers/pinctrl/sh-pfc/pfc-sh7723.c
new file mode 100644
index 000000000000..609673d3d70e
--- /dev/null
+++ b/drivers/pinctrl/sh-pfc/pfc-sh7723.c
@@ -0,0 +1,1903 @@
+/*
+ * SH7723 Pinmux
+ *
+ * Copyright (C) 2008 Magnus Damm
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ */
+
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <cpu/sh7723.h>
+
+#include "sh_pfc.h"
+
+enum {
+ PINMUX_RESERVED = 0,
+
+ PINMUX_DATA_BEGIN,
+ PTA7_DATA, PTA6_DATA, PTA5_DATA, PTA4_DATA,
+ PTA3_DATA, PTA2_DATA, PTA1_DATA, PTA0_DATA,
+ PTB7_DATA, PTB6_DATA, PTB5_DATA, PTB4_DATA,
+ PTB3_DATA, PTB2_DATA, PTB1_DATA, PTB0_DATA,
+ PTC7_DATA, PTC6_DATA, PTC5_DATA, PTC4_DATA,
+ PTC3_DATA, PTC2_DATA, PTC1_DATA, PTC0_DATA,
+ PTD7_DATA, PTD6_DATA, PTD5_DATA, PTD4_DATA,
+ PTD3_DATA, PTD2_DATA, PTD1_DATA, PTD0_DATA,
+ PTE5_DATA, PTE4_DATA, PTE3_DATA, PTE2_DATA, PTE1_DATA, PTE0_DATA,
+ PTF7_DATA, PTF6_DATA, PTF5_DATA, PTF4_DATA,
+ PTF3_DATA, PTF2_DATA, PTF1_DATA, PTF0_DATA,
+ PTG5_DATA, PTG4_DATA, PTG3_DATA, PTG2_DATA, PTG1_DATA, PTG0_DATA,
+ PTH7_DATA, PTH6_DATA, PTH5_DATA, PTH4_DATA,
+ PTH3_DATA, PTH2_DATA, PTH1_DATA, PTH0_DATA,
+ PTJ7_DATA, PTJ5_DATA, PTJ3_DATA, PTJ2_DATA, PTJ1_DATA, PTJ0_DATA,
+ PTK7_DATA, PTK6_DATA, PTK5_DATA, PTK4_DATA,
+ PTK3_DATA, PTK2_DATA, PTK1_DATA, PTK0_DATA,
+ PTL7_DATA, PTL6_DATA, PTL5_DATA, PTL4_DATA,
+ PTL3_DATA, PTL2_DATA, PTL1_DATA, PTL0_DATA,
+ PTM7_DATA, PTM6_DATA, PTM5_DATA, PTM4_DATA,
+ PTM3_DATA, PTM2_DATA, PTM1_DATA, PTM0_DATA,
+ PTN7_DATA, PTN6_DATA, PTN5_DATA, PTN4_DATA,
+ PTN3_DATA, PTN2_DATA, PTN1_DATA, PTN0_DATA,
+ PTQ3_DATA, PTQ2_DATA, PTQ1_DATA, PTQ0_DATA,
+ PTR7_DATA, PTR6_DATA, PTR5_DATA, PTR4_DATA,
+ PTR3_DATA, PTR2_DATA, PTR1_DATA, PTR0_DATA,
+ PTS7_DATA, PTS6_DATA, PTS5_DATA, PTS4_DATA,
+ PTS3_DATA, PTS2_DATA, PTS1_DATA, PTS0_DATA,
+ PTT5_DATA, PTT4_DATA, PTT3_DATA, PTT2_DATA, PTT1_DATA, PTT0_DATA,
+ PTU5_DATA, PTU4_DATA, PTU3_DATA, PTU2_DATA, PTU1_DATA, PTU0_DATA,
+ PTV7_DATA, PTV6_DATA, PTV5_DATA, PTV4_DATA,
+ PTV3_DATA, PTV2_DATA, PTV1_DATA, PTV0_DATA,
+ PTW7_DATA, PTW6_DATA, PTW5_DATA, PTW4_DATA,
+ PTW3_DATA, PTW2_DATA, PTW1_DATA, PTW0_DATA,
+ PTX7_DATA, PTX6_DATA, PTX5_DATA, PTX4_DATA,
+ PTX3_DATA, PTX2_DATA, PTX1_DATA, PTX0_DATA,
+ PTY7_DATA, PTY6_DATA, PTY5_DATA, PTY4_DATA,
+ PTY3_DATA, PTY2_DATA, PTY1_DATA, PTY0_DATA,
+ PTZ7_DATA, PTZ6_DATA, PTZ5_DATA, PTZ4_DATA,
+ PTZ3_DATA, PTZ2_DATA, PTZ1_DATA, PTZ0_DATA,
+ PINMUX_DATA_END,
+
+ PINMUX_INPUT_BEGIN,
+ PTA7_IN, PTA6_IN, PTA5_IN, PTA4_IN,
+ PTA3_IN, PTA2_IN, PTA1_IN, PTA0_IN,
+ PTB7_IN, PTB6_IN, PTB5_IN, PTB4_IN,
+ PTB3_IN, PTB2_IN, PTB1_IN, PTB0_IN,
+ PTC7_IN, PTC6_IN, PTC5_IN, PTC4_IN,
+ PTC3_IN, PTC2_IN, PTC1_IN, PTC0_IN,
+ PTD7_IN, PTD6_IN, PTD5_IN, PTD4_IN,
+ PTD3_IN, PTD2_IN, PTD1_IN, PTD0_IN,
+ PTE5_IN, PTE4_IN, PTE3_IN, PTE2_IN, PTE1_IN, PTE0_IN,
+ PTF7_IN, PTF6_IN, PTF5_IN, PTF4_IN,
+ PTF3_IN, PTF2_IN, PTF1_IN, PTF0_IN,
+ PTH7_IN, PTH6_IN, PTH5_IN, PTH4_IN,
+ PTH3_IN, PTH2_IN, PTH1_IN, PTH0_IN,
+ PTJ3_IN, PTJ2_IN, PTJ1_IN, PTJ0_IN,
+ PTK7_IN, PTK6_IN, PTK5_IN, PTK4_IN,
+ PTK3_IN, PTK2_IN, PTK1_IN, PTK0_IN,
+ PTL7_IN, PTL6_IN, PTL5_IN, PTL4_IN,
+ PTL3_IN, PTL2_IN, PTL1_IN, PTL0_IN,
+ PTM7_IN, PTM6_IN, PTM5_IN, PTM4_IN,
+ PTM3_IN, PTM2_IN, PTM1_IN, PTM0_IN,
+ PTN7_IN, PTN6_IN, PTN5_IN, PTN4_IN,
+ PTN3_IN, PTN2_IN, PTN1_IN, PTN0_IN,
+ PTQ3_IN, PTQ2_IN, PTQ1_IN, PTQ0_IN,
+ PTR7_IN, PTR6_IN, PTR5_IN, PTR4_IN,
+ PTR3_IN, PTR2_IN, PTR1_IN, PTR0_IN,
+ PTS7_IN, PTS6_IN, PTS5_IN, PTS4_IN,
+ PTS3_IN, PTS2_IN, PTS1_IN, PTS0_IN,
+ PTT5_IN, PTT4_IN, PTT3_IN, PTT2_IN, PTT1_IN, PTT0_IN,
+ PTU5_IN, PTU4_IN, PTU3_IN, PTU2_IN, PTU1_IN, PTU0_IN,
+ PTV7_IN, PTV6_IN, PTV5_IN, PTV4_IN,
+ PTV3_IN, PTV2_IN, PTV1_IN, PTV0_IN,
+ PTW7_IN, PTW6_IN, PTW5_IN, PTW4_IN,
+ PTW3_IN, PTW2_IN, PTW1_IN, PTW0_IN,
+ PTX7_IN, PTX6_IN, PTX5_IN, PTX4_IN,
+ PTX3_IN, PTX2_IN, PTX1_IN, PTX0_IN,
+ PTY7_IN, PTY6_IN, PTY5_IN, PTY4_IN,
+ PTY3_IN, PTY2_IN, PTY1_IN, PTY0_IN,
+ PTZ7_IN, PTZ6_IN, PTZ5_IN, PTZ4_IN,
+ PTZ3_IN, PTZ2_IN, PTZ1_IN, PTZ0_IN,
+ PINMUX_INPUT_END,
+
+ PINMUX_INPUT_PULLUP_BEGIN,
+ PTA4_IN_PU, PTA3_IN_PU, PTA2_IN_PU, PTA1_IN_PU, PTA0_IN_PU,
+ PTB2_IN_PU, PTB1_IN_PU,
+ PTR2_IN_PU,
+ PINMUX_INPUT_PULLUP_END,
+
+ PINMUX_OUTPUT_BEGIN,
+ PTA7_OUT, PTA6_OUT, PTA5_OUT, PTA4_OUT,
+ PTA3_OUT, PTA2_OUT, PTA1_OUT, PTA0_OUT,
+ PTB7_OUT, PTB6_OUT, PTB5_OUT, PTB4_OUT,
+ PTB3_OUT, PTB2_OUT, PTB1_OUT, PTB0_OUT,
+ PTC7_OUT, PTC6_OUT, PTC5_OUT, PTC4_OUT,
+ PTC3_OUT, PTC2_OUT, PTC1_OUT, PTC0_OUT,
+ PTD7_OUT, PTD6_OUT, PTD5_OUT, PTD4_OUT,
+ PTD3_OUT, PTD2_OUT, PTD1_OUT, PTD0_OUT,
+ PTE5_OUT, PTE4_OUT, PTE3_OUT, PTE2_OUT, PTE1_OUT, PTE0_OUT,
+ PTF7_OUT, PTF6_OUT, PTF5_OUT, PTF4_OUT,
+ PTF3_OUT, PTF2_OUT, PTF1_OUT, PTF0_OUT,
+ PTG5_OUT, PTG4_OUT, PTG3_OUT, PTG2_OUT, PTG1_OUT, PTG0_OUT,
+ PTH7_OUT, PTH6_OUT, PTH5_OUT, PTH4_OUT,
+ PTH3_OUT, PTH2_OUT, PTH1_OUT, PTH0_OUT,
+ PTJ7_OUT, PTJ5_OUT, PTJ3_OUT, PTJ2_OUT, PTJ1_OUT, PTJ0_OUT,
+ PTK7_OUT, PTK6_OUT, PTK5_OUT, PTK4_OUT,
+ PTK3_OUT, PTK2_OUT, PTK1_OUT, PTK0_OUT,
+ PTL7_OUT, PTL6_OUT, PTL5_OUT, PTL4_OUT,
+ PTL3_OUT, PTL2_OUT, PTL1_OUT, PTL0_OUT,
+ PTM7_OUT, PTM6_OUT, PTM5_OUT, PTM4_OUT,
+ PTM3_OUT, PTM2_OUT, PTM1_OUT, PTM0_OUT,
+ PTN7_OUT, PTN6_OUT, PTN5_OUT, PTN4_OUT,
+ PTN3_OUT, PTN2_OUT, PTN1_OUT, PTN0_OUT,
+ PTR7_OUT, PTR6_OUT, PTR5_OUT, PTR4_OUT,
+ PTR1_OUT, PTR0_OUT,
+ PTS7_OUT, PTS6_OUT, PTS5_OUT, PTS4_OUT,
+ PTS3_OUT, PTS2_OUT, PTS1_OUT, PTS0_OUT,
+ PTT5_OUT, PTT4_OUT, PTT3_OUT, PTT2_OUT, PTT1_OUT, PTT0_OUT,
+ PTU5_OUT, PTU4_OUT, PTU3_OUT, PTU2_OUT, PTU1_OUT, PTU0_OUT,
+ PTV7_OUT, PTV6_OUT, PTV5_OUT, PTV4_OUT,
+ PTV3_OUT, PTV2_OUT, PTV1_OUT, PTV0_OUT,
+ PTW7_OUT, PTW6_OUT, PTW5_OUT, PTW4_OUT,
+ PTW3_OUT, PTW2_OUT, PTW1_OUT, PTW0_OUT,
+ PTX7_OUT, PTX6_OUT, PTX5_OUT, PTX4_OUT,
+ PTX3_OUT, PTX2_OUT, PTX1_OUT, PTX0_OUT,
+ PTY7_OUT, PTY6_OUT, PTY5_OUT, PTY4_OUT,
+ PTY3_OUT, PTY2_OUT, PTY1_OUT, PTY0_OUT,
+ PTZ7_OUT, PTZ6_OUT, PTZ5_OUT, PTZ4_OUT,
+ PTZ3_OUT, PTZ2_OUT, PTZ1_OUT, PTZ0_OUT,
+ PINMUX_OUTPUT_END,
+
+ PINMUX_FUNCTION_BEGIN,
+ PTA7_FN, PTA6_FN, PTA5_FN, PTA4_FN,
+ PTA3_FN, PTA2_FN, PTA1_FN, PTA0_FN,
+ PTB7_FN, PTB6_FN, PTB5_FN, PTB4_FN,
+ PTB3_FN, PTB2_FN, PTB1_FN, PTB0_FN,
+ PTC7_FN, PTC6_FN, PTC5_FN, PTC4_FN,
+ PTC3_FN, PTC2_FN, PTC1_FN, PTC0_FN,
+ PTD7_FN, PTD6_FN, PTD5_FN, PTD4_FN,
+ PTD3_FN, PTD2_FN, PTD1_FN, PTD0_FN,
+ PTE5_FN, PTE4_FN, PTE3_FN, PTE2_FN, PTE1_FN, PTE0_FN,
+ PTF7_FN, PTF6_FN, PTF5_FN, PTF4_FN,
+ PTF3_FN, PTF2_FN, PTF1_FN, PTF0_FN,
+ PTG5_FN, PTG4_FN, PTG3_FN, PTG2_FN, PTG1_FN, PTG0_FN,
+ PTH7_FN, PTH6_FN, PTH5_FN, PTH4_FN,
+ PTH3_FN, PTH2_FN, PTH1_FN, PTH0_FN,
+ PTJ7_FN, PTJ5_FN, PTJ3_FN, PTJ2_FN, PTJ1_FN, PTJ0_FN,
+ PTK7_FN, PTK6_FN, PTK5_FN, PTK4_FN,
+ PTK3_FN, PTK2_FN, PTK1_FN, PTK0_FN,
+ PTL7_FN, PTL6_FN, PTL5_FN, PTL4_FN,
+ PTL3_FN, PTL2_FN, PTL1_FN, PTL0_FN,
+ PTM7_FN, PTM6_FN, PTM5_FN, PTM4_FN,
+ PTM3_FN, PTM2_FN, PTM1_FN, PTM0_FN,
+ PTN7_FN, PTN6_FN, PTN5_FN, PTN4_FN,
+ PTN3_FN, PTN2_FN, PTN1_FN, PTN0_FN,
+ PTQ3_FN, PTQ2_FN, PTQ1_FN, PTQ0_FN,
+ PTR7_FN, PTR6_FN, PTR5_FN, PTR4_FN,
+ PTR3_FN, PTR2_FN, PTR1_FN, PTR0_FN,
+ PTS7_FN, PTS6_FN, PTS5_FN, PTS4_FN,
+ PTS3_FN, PTS2_FN, PTS1_FN, PTS0_FN,
+ PTT5_FN, PTT4_FN, PTT3_FN, PTT2_FN, PTT1_FN, PTT0_FN,
+ PTU5_FN, PTU4_FN, PTU3_FN, PTU2_FN, PTU1_FN, PTU0_FN,
+ PTV7_FN, PTV6_FN, PTV5_FN, PTV4_FN,
+ PTV3_FN, PTV2_FN, PTV1_FN, PTV0_FN,
+ PTW7_FN, PTW6_FN, PTW5_FN, PTW4_FN,
+ PTW3_FN, PTW2_FN, PTW1_FN, PTW0_FN,
+ PTX7_FN, PTX6_FN, PTX5_FN, PTX4_FN,
+ PTX3_FN, PTX2_FN, PTX1_FN, PTX0_FN,
+ PTY7_FN, PTY6_FN, PTY5_FN, PTY4_FN,
+ PTY3_FN, PTY2_FN, PTY1_FN, PTY0_FN,
+ PTZ7_FN, PTZ6_FN, PTZ5_FN, PTZ4_FN,
+ PTZ3_FN, PTZ2_FN, PTZ1_FN, PTZ0_FN,
+
+
+ PSA15_PSA14_FN1, PSA15_PSA14_FN2,
+ PSA13_PSA12_FN1, PSA13_PSA12_FN2,
+ PSA11_PSA10_FN1, PSA11_PSA10_FN2,
+ PSA5_PSA4_FN1, PSA5_PSA4_FN2, PSA5_PSA4_FN3,
+ PSA3_PSA2_FN1, PSA3_PSA2_FN2,
+ PSB15_PSB14_FN1, PSB15_PSB14_FN2,
+ PSB13_PSB12_LCDC_RGB, PSB13_PSB12_LCDC_SYS,
+ PSB9_PSB8_FN1, PSB9_PSB8_FN2, PSB9_PSB8_FN3,
+ PSB7_PSB6_FN1, PSB7_PSB6_FN2,
+ PSB5_PSB4_FN1, PSB5_PSB4_FN2,
+ PSB3_PSB2_FN1, PSB3_PSB2_FN2,
+ PSC15_PSC14_FN1, PSC15_PSC14_FN2,
+ PSC13_PSC12_FN1, PSC13_PSC12_FN2,
+ PSC11_PSC10_FN1, PSC11_PSC10_FN2, PSC11_PSC10_FN3,
+ PSC9_PSC8_FN1, PSC9_PSC8_FN2,
+ PSC7_PSC6_FN1, PSC7_PSC6_FN2, PSC7_PSC6_FN3,
+ PSD15_PSD14_FN1, PSD15_PSD14_FN2,
+ PSD13_PSD12_FN1, PSD13_PSD12_FN2,
+ PSD11_PSD10_FN1, PSD11_PSD10_FN2, PSD11_PSD10_FN3,
+ PSD9_PSD8_FN1, PSD9_PSD8_FN2,
+ PSD7_PSD6_FN1, PSD7_PSD6_FN2,
+ PSD5_PSD4_FN1, PSD5_PSD4_FN2,
+ PSD3_PSD2_FN1, PSD3_PSD2_FN2,
+ PSD1_PSD0_FN1, PSD1_PSD0_FN2,
+ PINMUX_FUNCTION_END,
+
+ PINMUX_MARK_BEGIN,
+ SCIF0_PTT_TXD_MARK, SCIF0_PTT_RXD_MARK,
+ SCIF0_PTT_SCK_MARK, SCIF0_PTU_TXD_MARK,
+ SCIF0_PTU_RXD_MARK, SCIF0_PTU_SCK_MARK,
+
+ SCIF1_PTS_TXD_MARK, SCIF1_PTS_RXD_MARK,
+ SCIF1_PTS_SCK_MARK, SCIF1_PTV_TXD_MARK,
+ SCIF1_PTV_RXD_MARK, SCIF1_PTV_SCK_MARK,
+
+ SCIF2_PTT_TXD_MARK, SCIF2_PTT_RXD_MARK,
+ SCIF2_PTT_SCK_MARK, SCIF2_PTU_TXD_MARK,
+ SCIF2_PTU_RXD_MARK, SCIF2_PTU_SCK_MARK,
+
+ SCIF3_PTS_TXD_MARK, SCIF3_PTS_RXD_MARK,
+ SCIF3_PTS_SCK_MARK, SCIF3_PTS_RTS_MARK,
+ SCIF3_PTS_CTS_MARK, SCIF3_PTV_TXD_MARK,
+ SCIF3_PTV_RXD_MARK, SCIF3_PTV_SCK_MARK,
+ SCIF3_PTV_RTS_MARK, SCIF3_PTV_CTS_MARK,
+
+ SCIF4_PTE_TXD_MARK, SCIF4_PTE_RXD_MARK,
+ SCIF4_PTE_SCK_MARK, SCIF4_PTN_TXD_MARK,
+ SCIF4_PTN_RXD_MARK, SCIF4_PTN_SCK_MARK,
+
+ SCIF5_PTE_TXD_MARK, SCIF5_PTE_RXD_MARK,
+ SCIF5_PTE_SCK_MARK, SCIF5_PTN_TXD_MARK,
+ SCIF5_PTN_RXD_MARK, SCIF5_PTN_SCK_MARK,
+
+ VIO_D15_MARK, VIO_D14_MARK, VIO_D13_MARK, VIO_D12_MARK,
+ VIO_D11_MARK, VIO_D10_MARK, VIO_D9_MARK, VIO_D8_MARK,
+ VIO_D7_MARK, VIO_D6_MARK, VIO_D5_MARK, VIO_D4_MARK,
+ VIO_D3_MARK, VIO_D2_MARK, VIO_D1_MARK, VIO_D0_MARK,
+ VIO_FLD_MARK, VIO_CKO_MARK,
+ VIO_VD1_MARK, VIO_HD1_MARK, VIO_CLK1_MARK,
+ VIO_HD2_MARK, VIO_VD2_MARK, VIO_CLK2_MARK,
+
+ LCDD23_MARK, LCDD22_MARK, LCDD21_MARK, LCDD20_MARK,
+ LCDD19_MARK, LCDD18_MARK, LCDD17_MARK, LCDD16_MARK,
+ LCDD15_MARK, LCDD14_MARK, LCDD13_MARK, LCDD12_MARK,
+ LCDD11_MARK, LCDD10_MARK, LCDD9_MARK, LCDD8_MARK,
+ LCDD7_MARK, LCDD6_MARK, LCDD5_MARK, LCDD4_MARK,
+ LCDD3_MARK, LCDD2_MARK, LCDD1_MARK, LCDD0_MARK,
+ LCDDON_MARK, LCDVCPWC_MARK, LCDVEPWC_MARK,
+ LCDVSYN_MARK, LCDDCK_MARK, LCDHSYN_MARK, LCDDISP_MARK,
+ LCDRS_MARK, LCDCS_MARK, LCDWR_MARK, LCDRD_MARK,
+ LCDLCLK_PTR_MARK, LCDLCLK_PTW_MARK,
+
+ IRQ0_MARK, IRQ1_MARK, IRQ2_MARK, IRQ3_MARK,
+ IRQ4_MARK, IRQ5_MARK, IRQ6_MARK, IRQ7_MARK,
+
+ AUDATA3_MARK, AUDATA2_MARK, AUDATA1_MARK, AUDATA0_MARK,
+ AUDCK_MARK, AUDSYNC_MARK,
+
+ SDHI0CD_PTD_MARK, SDHI0WP_PTD_MARK,
+ SDHI0D3_PTD_MARK, SDHI0D2_PTD_MARK,
+ SDHI0D1_PTD_MARK, SDHI0D0_PTD_MARK,
+ SDHI0CMD_PTD_MARK, SDHI0CLK_PTD_MARK,
+
+ SDHI0CD_PTS_MARK, SDHI0WP_PTS_MARK,
+ SDHI0D3_PTS_MARK, SDHI0D2_PTS_MARK,
+ SDHI0D1_PTS_MARK, SDHI0D0_PTS_MARK,
+ SDHI0CMD_PTS_MARK, SDHI0CLK_PTS_MARK,
+
+ SDHI1CD_MARK, SDHI1WP_MARK, SDHI1D3_MARK, SDHI1D2_MARK,
+ SDHI1D1_MARK, SDHI1D0_MARK, SDHI1CMD_MARK, SDHI1CLK_MARK,
+
+ SIUAFCK_MARK, SIUAILR_MARK, SIUAIBT_MARK, SIUAISLD_MARK,
+ SIUAOLR_MARK, SIUAOBT_MARK, SIUAOSLD_MARK, SIUAMCK_MARK,
+ SIUAISPD_MARK, SIUAOSPD_MARK,
+
+ SIUBFCK_MARK, SIUBILR_MARK, SIUBIBT_MARK, SIUBISLD_MARK,
+ SIUBOLR_MARK, SIUBOBT_MARK, SIUBOSLD_MARK, SIUBMCK_MARK,
+
+ IRDA_IN_MARK, IRDA_OUT_MARK,
+
+ DV_CLKI_MARK, DV_CLK_MARK, DV_HSYNC_MARK, DV_VSYNC_MARK,
+ DV_D15_MARK, DV_D14_MARK, DV_D13_MARK, DV_D12_MARK,
+ DV_D11_MARK, DV_D10_MARK, DV_D9_MARK, DV_D8_MARK,
+ DV_D7_MARK, DV_D6_MARK, DV_D5_MARK, DV_D4_MARK,
+ DV_D3_MARK, DV_D2_MARK, DV_D1_MARK, DV_D0_MARK,
+
+ KEYIN0_MARK, KEYIN1_MARK, KEYIN2_MARK, KEYIN3_MARK, KEYIN4_MARK,
+ KEYOUT0_MARK, KEYOUT1_MARK, KEYOUT2_MARK, KEYOUT3_MARK,
+ KEYOUT4_IN6_MARK, KEYOUT5_IN5_MARK,
+
+ MSIOF0_PTF_TXD_MARK, MSIOF0_PTF_RXD_MARK, MSIOF0_PTF_MCK_MARK,
+ MSIOF0_PTF_TSYNC_MARK, MSIOF0_PTF_TSCK_MARK, MSIOF0_PTF_RSYNC_MARK,
+ MSIOF0_PTF_RSCK_MARK, MSIOF0_PTF_SS1_MARK, MSIOF0_PTF_SS2_MARK,
+
+ MSIOF0_PTT_TXD_MARK, MSIOF0_PTT_RXD_MARK, MSIOF0_PTX_MCK_MARK,
+ MSIOF0_PTT_TSYNC_MARK, MSIOF0_PTT_TSCK_MARK, MSIOF0_PTT_RSYNC_MARK,
+ MSIOF0_PTT_RSCK_MARK, MSIOF0_PTT_SS1_MARK, MSIOF0_PTT_SS2_MARK,
+
+ MSIOF1_TXD_MARK, MSIOF1_RXD_MARK, MSIOF1_MCK_MARK,
+ MSIOF1_TSYNC_MARK, MSIOF1_TSCK_MARK, MSIOF1_RSYNC_MARK,
+ MSIOF1_RSCK_MARK, MSIOF1_SS1_MARK, MSIOF1_SS2_MARK,
+
+ TS0_SDAT_MARK, TS0_SCK_MARK, TS0_SDEN_MARK, TS0_SPSYNC_MARK,
+
+ FCE_MARK, NAF7_MARK, NAF6_MARK, NAF5_MARK, NAF4_MARK,
+ NAF3_MARK, NAF2_MARK, NAF1_MARK, NAF0_MARK, FCDE_MARK,
+ FOE_MARK, FSC_MARK, FWE_MARK, FRB_MARK,
+
+ DACK1_MARK, DREQ1_MARK, DACK0_MARK, DREQ0_MARK,
+
+ AN3_MARK, AN2_MARK, AN1_MARK, AN0_MARK, ADTRG_MARK,
+
+ STATUS0_MARK, PDSTATUS_MARK,
+
+ TPUTO3_MARK, TPUTO2_MARK, TPUTO1_MARK, TPUTO0_MARK,
+
+ D31_MARK, D30_MARK, D29_MARK, D28_MARK,
+ D27_MARK, D26_MARK, D25_MARK, D24_MARK,
+ D23_MARK, D22_MARK, D21_MARK, D20_MARK,
+ D19_MARK, D18_MARK, D17_MARK, D16_MARK,
+ IOIS16_MARK, WAIT_MARK, BS_MARK,
+ A25_MARK, A24_MARK, A23_MARK, A22_MARK,
+ CS6B_CE1B_MARK, CS6A_CE2B_MARK,
+ CS5B_CE1A_MARK, CS5A_CE2A_MARK,
+ WE3_ICIOWR_MARK, WE2_ICIORD_MARK,
+
+ IDED15_MARK, IDED14_MARK, IDED13_MARK, IDED12_MARK,
+ IDED11_MARK, IDED10_MARK, IDED9_MARK, IDED8_MARK,
+ IDED7_MARK, IDED6_MARK, IDED5_MARK, IDED4_MARK,
+ IDED3_MARK, IDED2_MARK, IDED1_MARK, IDED0_MARK,
+ DIRECTION_MARK, EXBUF_ENB_MARK, IDERST_MARK, IODACK_MARK,
+ IODREQ_MARK, IDEIORDY_MARK, IDEINT_MARK, IDEIOWR_MARK,
+ IDEIORD_MARK, IDECS1_MARK, IDECS0_MARK, IDEA2_MARK,
+ IDEA1_MARK, IDEA0_MARK,
+ PINMUX_MARK_END,
+};
+
+static pinmux_enum_t pinmux_data[] = {
+ /* PTA GPIO */
+ PINMUX_DATA(PTA7_DATA, PTA7_IN, PTA7_OUT),
+ PINMUX_DATA(PTA6_DATA, PTA6_IN, PTA6_OUT),
+ PINMUX_DATA(PTA5_DATA, PTA5_IN, PTA5_OUT),
+ PINMUX_DATA(PTA4_DATA, PTA4_IN, PTA4_OUT, PTA4_IN_PU),
+ PINMUX_DATA(PTA3_DATA, PTA3_IN, PTA3_OUT, PTA3_IN_PU),
+ PINMUX_DATA(PTA2_DATA, PTA2_IN, PTA2_OUT, PTA2_IN_PU),
+ PINMUX_DATA(PTA1_DATA, PTA1_IN, PTA1_OUT, PTA1_IN_PU),
+ PINMUX_DATA(PTA0_DATA, PTA0_IN, PTA0_OUT, PTA0_IN_PU),
+
+ /* PTB GPIO */
+ PINMUX_DATA(PTB7_DATA, PTB7_IN, PTB7_OUT),
+ PINMUX_DATA(PTB6_DATA, PTB6_IN, PTB6_OUT),
+ PINMUX_DATA(PTB5_DATA, PTB5_IN, PTB5_OUT),
+ PINMUX_DATA(PTB4_DATA, PTB4_IN, PTB4_OUT),
+ PINMUX_DATA(PTB3_DATA, PTB3_IN, PTB3_OUT),
+ PINMUX_DATA(PTB2_DATA, PTB2_IN, PTB2_OUT, PTB2_IN_PU),
+ PINMUX_DATA(PTB1_DATA, PTB1_IN, PTB1_OUT, PTB1_IN_PU),
+ PINMUX_DATA(PTB0_DATA, PTB0_IN, PTB0_OUT),
+
+ /* PTC GPIO */
+ PINMUX_DATA(PTC7_DATA, PTC7_IN, PTC7_OUT),
+ PINMUX_DATA(PTC6_DATA, PTC6_IN, PTC6_OUT),
+ PINMUX_DATA(PTC5_DATA, PTC5_IN, PTC5_OUT),
+ PINMUX_DATA(PTC4_DATA, PTC4_IN, PTC4_OUT),
+ PINMUX_DATA(PTC3_DATA, PTC3_IN, PTC3_OUT),
+ PINMUX_DATA(PTC2_DATA, PTC2_IN, PTC2_OUT),
+ PINMUX_DATA(PTC1_DATA, PTC1_IN, PTC1_OUT),
+ PINMUX_DATA(PTC0_DATA, PTC0_IN, PTC0_OUT),
+
+ /* PTD GPIO */
+ PINMUX_DATA(PTD7_DATA, PTD7_IN, PTD7_OUT),
+ PINMUX_DATA(PTD6_DATA, PTD6_IN, PTD6_OUT),
+ PINMUX_DATA(PTD5_DATA, PTD5_IN, PTD5_OUT),
+ PINMUX_DATA(PTD4_DATA, PTD4_IN, PTD4_OUT),
+ PINMUX_DATA(PTD3_DATA, PTD3_IN, PTD3_OUT),
+ PINMUX_DATA(PTD2_DATA, PTD2_IN, PTD2_OUT),
+ PINMUX_DATA(PTD1_DATA, PTD1_IN, PTD1_OUT),
+ PINMUX_DATA(PTD0_DATA, PTD0_IN, PTD0_OUT),
+
+ /* PTE GPIO */
+ PINMUX_DATA(PTE5_DATA, PTE5_IN, PTE5_OUT),
+ PINMUX_DATA(PTE4_DATA, PTE4_IN, PTE4_OUT),
+ PINMUX_DATA(PTE3_DATA, PTE3_IN, PTE3_OUT),
+ PINMUX_DATA(PTE2_DATA, PTE2_IN, PTE2_OUT),
+ PINMUX_DATA(PTE1_DATA, PTE1_IN, PTE1_OUT),
+ PINMUX_DATA(PTE0_DATA, PTE0_IN, PTE0_OUT),
+
+ /* PTF GPIO */
+ PINMUX_DATA(PTF7_DATA, PTF7_IN, PTF7_OUT),
+ PINMUX_DATA(PTF6_DATA, PTF6_IN, PTF6_OUT),
+ PINMUX_DATA(PTF5_DATA, PTF5_IN, PTF5_OUT),
+ PINMUX_DATA(PTF4_DATA, PTF4_IN, PTF4_OUT),
+ PINMUX_DATA(PTF3_DATA, PTF3_IN, PTF3_OUT),
+ PINMUX_DATA(PTF2_DATA, PTF2_IN, PTF2_OUT),
+ PINMUX_DATA(PTF1_DATA, PTF1_IN, PTF1_OUT),
+ PINMUX_DATA(PTF0_DATA, PTF0_IN, PTF0_OUT),
+
+ /* PTG GPIO */
+ PINMUX_DATA(PTG5_DATA, PTG5_OUT),
+ PINMUX_DATA(PTG4_DATA, PTG4_OUT),
+ PINMUX_DATA(PTG3_DATA, PTG3_OUT),
+ PINMUX_DATA(PTG2_DATA, PTG2_OUT),
+ PINMUX_DATA(PTG1_DATA, PTG1_OUT),
+ PINMUX_DATA(PTG0_DATA, PTG0_OUT),
+
+ /* PTH GPIO */
+ PINMUX_DATA(PTH7_DATA, PTH7_IN, PTH7_OUT),
+ PINMUX_DATA(PTH6_DATA, PTH6_IN, PTH6_OUT),
+ PINMUX_DATA(PTH5_DATA, PTH5_IN, PTH5_OUT),
+ PINMUX_DATA(PTH4_DATA, PTH4_IN, PTH4_OUT),
+ PINMUX_DATA(PTH3_DATA, PTH3_IN, PTH3_OUT),
+ PINMUX_DATA(PTH2_DATA, PTH2_IN, PTH2_OUT),
+ PINMUX_DATA(PTH1_DATA, PTH1_IN, PTH1_OUT),
+ PINMUX_DATA(PTH0_DATA, PTH0_IN, PTH0_OUT),
+
+ /* PTJ GPIO */
+ PINMUX_DATA(PTJ7_DATA, PTJ7_OUT),
+ PINMUX_DATA(PTJ5_DATA, PTJ5_OUT),
+ PINMUX_DATA(PTJ3_DATA, PTJ3_IN, PTJ3_OUT),
+ PINMUX_DATA(PTJ2_DATA, PTJ2_IN, PTJ2_OUT),
+ PINMUX_DATA(PTJ1_DATA, PTJ1_IN, PTJ1_OUT),
+ PINMUX_DATA(PTJ0_DATA, PTJ0_IN, PTJ0_OUT),
+
+ /* PTK GPIO */
+ PINMUX_DATA(PTK7_DATA, PTK7_IN, PTK7_OUT),
+ PINMUX_DATA(PTK6_DATA, PTK6_IN, PTK6_OUT),
+ PINMUX_DATA(PTK5_DATA, PTK5_IN, PTK5_OUT),
+ PINMUX_DATA(PTK4_DATA, PTK4_IN, PTK4_OUT),
+ PINMUX_DATA(PTK3_DATA, PTK3_IN, PTK3_OUT),
+ PINMUX_DATA(PTK2_DATA, PTK2_IN, PTK2_OUT),
+ PINMUX_DATA(PTK1_DATA, PTK1_IN, PTK1_OUT),
+ PINMUX_DATA(PTK0_DATA, PTK0_IN, PTK0_OUT),
+
+ /* PTL GPIO */
+ PINMUX_DATA(PTL7_DATA, PTL7_IN, PTL7_OUT),
+ PINMUX_DATA(PTL6_DATA, PTL6_IN, PTL6_OUT),
+ PINMUX_DATA(PTL5_DATA, PTL5_IN, PTL5_OUT),
+ PINMUX_DATA(PTL4_DATA, PTL4_IN, PTL4_OUT),
+ PINMUX_DATA(PTL3_DATA, PTL3_IN, PTL3_OUT),
+ PINMUX_DATA(PTL2_DATA, PTL2_IN, PTL2_OUT),
+ PINMUX_DATA(PTL1_DATA, PTL1_IN, PTL1_OUT),
+ PINMUX_DATA(PTL0_DATA, PTL0_IN, PTL0_OUT),
+
+ /* PTM GPIO */
+ PINMUX_DATA(PTM7_DATA, PTM7_IN, PTM7_OUT),
+ PINMUX_DATA(PTM6_DATA, PTM6_IN, PTM6_OUT),
+ PINMUX_DATA(PTM5_DATA, PTM5_IN, PTM5_OUT),
+ PINMUX_DATA(PTM4_DATA, PTM4_IN, PTM4_OUT),
+ PINMUX_DATA(PTM3_DATA, PTM3_IN, PTM3_OUT),
+ PINMUX_DATA(PTM2_DATA, PTM2_IN, PTM2_OUT),
+ PINMUX_DATA(PTM1_DATA, PTM1_IN, PTM1_OUT),
+ PINMUX_DATA(PTM0_DATA, PTM0_IN, PTM0_OUT),
+
+ /* PTN GPIO */
+ PINMUX_DATA(PTN7_DATA, PTN7_IN, PTN7_OUT),
+ PINMUX_DATA(PTN6_DATA, PTN6_IN, PTN6_OUT),
+ PINMUX_DATA(PTN5_DATA, PTN5_IN, PTN5_OUT),
+ PINMUX_DATA(PTN4_DATA, PTN4_IN, PTN4_OUT),
+ PINMUX_DATA(PTN3_DATA, PTN3_IN, PTN3_OUT),
+ PINMUX_DATA(PTN2_DATA, PTN2_IN, PTN2_OUT),
+ PINMUX_DATA(PTN1_DATA, PTN1_IN, PTN1_OUT),
+ PINMUX_DATA(PTN0_DATA, PTN0_IN, PTN0_OUT),
+
+ /* PTQ GPIO */
+ PINMUX_DATA(PTQ3_DATA, PTQ3_IN),
+ PINMUX_DATA(PTQ2_DATA, PTQ2_IN),
+ PINMUX_DATA(PTQ1_DATA, PTQ1_IN),
+ PINMUX_DATA(PTQ0_DATA, PTQ0_IN),
+
+ /* PTR GPIO */
+ PINMUX_DATA(PTR7_DATA, PTR7_IN, PTR7_OUT),
+ PINMUX_DATA(PTR6_DATA, PTR6_IN, PTR6_OUT),
+ PINMUX_DATA(PTR5_DATA, PTR5_IN, PTR5_OUT),
+ PINMUX_DATA(PTR4_DATA, PTR4_IN, PTR4_OUT),
+ PINMUX_DATA(PTR3_DATA, PTR3_IN),
+ PINMUX_DATA(PTR2_DATA, PTR2_IN, PTR2_IN_PU),
+ PINMUX_DATA(PTR1_DATA, PTR1_IN, PTR1_OUT),
+ PINMUX_DATA(PTR0_DATA, PTR0_IN, PTR0_OUT),
+
+ /* PTS GPIO */
+ PINMUX_DATA(PTS7_DATA, PTS7_IN, PTS7_OUT),
+ PINMUX_DATA(PTS6_DATA, PTS6_IN, PTS6_OUT),
+ PINMUX_DATA(PTS5_DATA, PTS5_IN, PTS5_OUT),
+ PINMUX_DATA(PTS4_DATA, PTS4_IN, PTS4_OUT),
+ PINMUX_DATA(PTS3_DATA, PTS3_IN, PTS3_OUT),
+ PINMUX_DATA(PTS2_DATA, PTS2_IN, PTS2_OUT),
+ PINMUX_DATA(PTS1_DATA, PTS1_IN, PTS1_OUT),
+ PINMUX_DATA(PTS0_DATA, PTS0_IN, PTS0_OUT),
+
+ /* PTT GPIO */
+ PINMUX_DATA(PTT5_DATA, PTT5_IN, PTT5_OUT),
+ PINMUX_DATA(PTT4_DATA, PTT4_IN, PTT4_OUT),
+ PINMUX_DATA(PTT3_DATA, PTT3_IN, PTT3_OUT),
+ PINMUX_DATA(PTT2_DATA, PTT2_IN, PTT2_OUT),
+ PINMUX_DATA(PTT1_DATA, PTT1_IN, PTT1_OUT),
+ PINMUX_DATA(PTT0_DATA, PTT0_IN, PTT0_OUT),
+
+ /* PTU GPIO */
+ PINMUX_DATA(PTU5_DATA, PTU5_IN, PTU5_OUT),
+ PINMUX_DATA(PTU4_DATA, PTU4_IN, PTU4_OUT),
+ PINMUX_DATA(PTU3_DATA, PTU3_IN, PTU3_OUT),
+ PINMUX_DATA(PTU2_DATA, PTU2_IN, PTU2_OUT),
+ PINMUX_DATA(PTU1_DATA, PTU1_IN, PTU1_OUT),
+ PINMUX_DATA(PTU0_DATA, PTU0_IN, PTU0_OUT),
+
+ /* PTV GPIO */
+ PINMUX_DATA(PTV7_DATA, PTV7_IN, PTV7_OUT),
+ PINMUX_DATA(PTV6_DATA, PTV6_IN, PTV6_OUT),
+ PINMUX_DATA(PTV5_DATA, PTV5_IN, PTV5_OUT),
+ PINMUX_DATA(PTV4_DATA, PTV4_IN, PTV4_OUT),
+ PINMUX_DATA(PTV3_DATA, PTV3_IN, PTV3_OUT),
+ PINMUX_DATA(PTV2_DATA, PTV2_IN, PTV2_OUT),
+ PINMUX_DATA(PTV1_DATA, PTV1_IN, PTV1_OUT),
+ PINMUX_DATA(PTV0_DATA, PTV0_IN, PTV0_OUT),
+
+ /* PTW GPIO */
+ PINMUX_DATA(PTW7_DATA, PTW7_IN, PTW7_OUT),
+ PINMUX_DATA(PTW6_DATA, PTW6_IN, PTW6_OUT),
+ PINMUX_DATA(PTW5_DATA, PTW5_IN, PTW5_OUT),
+ PINMUX_DATA(PTW4_DATA, PTW4_IN, PTW4_OUT),
+ PINMUX_DATA(PTW3_DATA, PTW3_IN, PTW3_OUT),
+ PINMUX_DATA(PTW2_DATA, PTW2_IN, PTW2_OUT),
+ PINMUX_DATA(PTW1_DATA, PTW1_IN, PTW1_OUT),
+ PINMUX_DATA(PTW0_DATA, PTW0_IN, PTW0_OUT),
+
+ /* PTX GPIO */
+ PINMUX_DATA(PTX7_DATA, PTX7_IN, PTX7_OUT),
+ PINMUX_DATA(PTX6_DATA, PTX6_IN, PTX6_OUT),
+ PINMUX_DATA(PTX5_DATA, PTX5_IN, PTX5_OUT),
+ PINMUX_DATA(PTX4_DATA, PTX4_IN, PTX4_OUT),
+ PINMUX_DATA(PTX3_DATA, PTX3_IN, PTX3_OUT),
+ PINMUX_DATA(PTX2_DATA, PTX2_IN, PTX2_OUT),
+ PINMUX_DATA(PTX1_DATA, PTX1_IN, PTX1_OUT),
+ PINMUX_DATA(PTX0_DATA, PTX0_IN, PTX0_OUT),
+
+ /* PTY GPIO */
+ PINMUX_DATA(PTY7_DATA, PTY7_IN, PTY7_OUT),
+ PINMUX_DATA(PTY6_DATA, PTY6_IN, PTY6_OUT),
+ PINMUX_DATA(PTY5_DATA, PTY5_IN, PTY5_OUT),
+ PINMUX_DATA(PTY4_DATA, PTY4_IN, PTY4_OUT),
+ PINMUX_DATA(PTY3_DATA, PTY3_IN, PTY3_OUT),
+ PINMUX_DATA(PTY2_DATA, PTY2_IN, PTY2_OUT),
+ PINMUX_DATA(PTY1_DATA, PTY1_IN, PTY1_OUT),
+ PINMUX_DATA(PTY0_DATA, PTY0_IN, PTY0_OUT),
+
+ /* PTZ GPIO */
+ PINMUX_DATA(PTZ7_DATA, PTZ7_IN, PTZ7_OUT),
+ PINMUX_DATA(PTZ6_DATA, PTZ6_IN, PTZ6_OUT),
+ PINMUX_DATA(PTZ5_DATA, PTZ5_IN, PTZ5_OUT),
+ PINMUX_DATA(PTZ4_DATA, PTZ4_IN, PTZ4_OUT),
+ PINMUX_DATA(PTZ3_DATA, PTZ3_IN, PTZ3_OUT),
+ PINMUX_DATA(PTZ2_DATA, PTZ2_IN, PTZ2_OUT),
+ PINMUX_DATA(PTZ1_DATA, PTZ1_IN, PTZ1_OUT),
+ PINMUX_DATA(PTZ0_DATA, PTZ0_IN, PTZ0_OUT),
+
+ /* PTA FN */
+ PINMUX_DATA(D23_MARK, PSA15_PSA14_FN1, PTA7_FN),
+ PINMUX_DATA(KEYOUT2_MARK, PSA15_PSA14_FN2, PTA7_FN),
+ PINMUX_DATA(D22_MARK, PSA15_PSA14_FN1, PTA6_FN),
+ PINMUX_DATA(KEYOUT1_MARK, PSA15_PSA14_FN2, PTA6_FN),
+ PINMUX_DATA(D21_MARK, PSA15_PSA14_FN1, PTA5_FN),
+ PINMUX_DATA(KEYOUT0_MARK, PSA15_PSA14_FN2, PTA5_FN),
+ PINMUX_DATA(D20_MARK, PSA15_PSA14_FN1, PTA4_FN),
+ PINMUX_DATA(KEYIN4_MARK, PSA15_PSA14_FN2, PTA4_FN),
+ PINMUX_DATA(D19_MARK, PSA15_PSA14_FN1, PTA3_FN),
+ PINMUX_DATA(KEYIN3_MARK, PSA15_PSA14_FN2, PTA3_FN),
+ PINMUX_DATA(D18_MARK, PSA15_PSA14_FN1, PTA2_FN),
+ PINMUX_DATA(KEYIN2_MARK, PSA15_PSA14_FN2, PTA2_FN),
+ PINMUX_DATA(D17_MARK, PSA15_PSA14_FN1, PTA1_FN),
+ PINMUX_DATA(KEYIN1_MARK, PSA15_PSA14_FN2, PTA1_FN),
+ PINMUX_DATA(D16_MARK, PSA15_PSA14_FN1, PTA0_FN),
+ PINMUX_DATA(KEYIN0_MARK, PSA15_PSA14_FN2, PTA0_FN),
+
+ /* PTB FN */
+ PINMUX_DATA(D31_MARK, PTB7_FN),
+ PINMUX_DATA(D30_MARK, PTB6_FN),
+ PINMUX_DATA(D29_MARK, PTB5_FN),
+ PINMUX_DATA(D28_MARK, PTB4_FN),
+ PINMUX_DATA(D27_MARK, PTB3_FN),
+ PINMUX_DATA(D26_MARK, PSA15_PSA14_FN1, PTB2_FN),
+ PINMUX_DATA(KEYOUT5_IN5_MARK, PSA15_PSA14_FN2, PTB2_FN),
+ PINMUX_DATA(D25_MARK, PSA15_PSA14_FN1, PTB1_FN),
+ PINMUX_DATA(KEYOUT4_IN6_MARK, PSA15_PSA14_FN2, PTB1_FN),
+ PINMUX_DATA(D24_MARK, PSA15_PSA14_FN1, PTB0_FN),
+ PINMUX_DATA(KEYOUT3_MARK, PSA15_PSA14_FN2, PTB0_FN),
+
+ /* PTC FN */
+ PINMUX_DATA(IDED15_MARK, PSA11_PSA10_FN1, PTC7_FN),
+ PINMUX_DATA(SDHI1CD_MARK, PSA11_PSA10_FN2, PTC7_FN),
+ PINMUX_DATA(IDED14_MARK, PSA11_PSA10_FN1, PTC6_FN),
+ PINMUX_DATA(SDHI1WP_MARK, PSA11_PSA10_FN2, PTC6_FN),
+ PINMUX_DATA(IDED13_MARK, PSA11_PSA10_FN1, PTC5_FN),
+ PINMUX_DATA(SDHI1D3_MARK, PSA11_PSA10_FN2, PTC5_FN),
+ PINMUX_DATA(IDED12_MARK, PSA11_PSA10_FN1, PTC4_FN),
+ PINMUX_DATA(SDHI1D2_MARK, PSA11_PSA10_FN2, PTC4_FN),
+ PINMUX_DATA(IDED11_MARK, PSA11_PSA10_FN1, PTC3_FN),
+ PINMUX_DATA(SDHI1D1_MARK, PSA11_PSA10_FN2, PTC3_FN),
+ PINMUX_DATA(IDED10_MARK, PSA11_PSA10_FN1, PTC2_FN),
+ PINMUX_DATA(SDHI1D0_MARK, PSA11_PSA10_FN2, PTC2_FN),
+ PINMUX_DATA(IDED9_MARK, PSA11_PSA10_FN1, PTC1_FN),
+ PINMUX_DATA(SDHI1CMD_MARK, PSA11_PSA10_FN2, PTC1_FN),
+ PINMUX_DATA(IDED8_MARK, PSA11_PSA10_FN1, PTC0_FN),
+ PINMUX_DATA(SDHI1CLK_MARK, PSA11_PSA10_FN2, PTC0_FN),
+
+ /* PTD FN */
+ PINMUX_DATA(IDED7_MARK, PSA11_PSA10_FN1, PTD7_FN),
+ PINMUX_DATA(SDHI0CD_PTD_MARK, PSA11_PSA10_FN2, PTD7_FN),
+ PINMUX_DATA(IDED6_MARK, PSA11_PSA10_FN1, PTD6_FN),
+ PINMUX_DATA(SDHI0WP_PTD_MARK, PSA11_PSA10_FN2, PTD6_FN),
+ PINMUX_DATA(IDED5_MARK, PSA11_PSA10_FN1, PTD5_FN),
+ PINMUX_DATA(SDHI0D3_PTD_MARK, PSA11_PSA10_FN2, PTD5_FN),
+ PINMUX_DATA(IDED4_MARK, PSA11_PSA10_FN1, PTD4_FN),
+ PINMUX_DATA(SDHI0D2_PTD_MARK, PSA11_PSA10_FN2, PTD4_FN),
+ PINMUX_DATA(IDED3_MARK, PSA11_PSA10_FN1, PTD3_FN),
+ PINMUX_DATA(SDHI0D1_PTD_MARK, PSA11_PSA10_FN2, PTD3_FN),
+ PINMUX_DATA(IDED2_MARK, PSA11_PSA10_FN1, PTD2_FN),
+ PINMUX_DATA(SDHI0D0_PTD_MARK, PSA11_PSA10_FN2, PTD2_FN),
+ PINMUX_DATA(IDED1_MARK, PSA11_PSA10_FN1, PTD1_FN),
+ PINMUX_DATA(SDHI0CMD_PTD_MARK, PSA11_PSA10_FN2, PTD1_FN),
+ PINMUX_DATA(IDED0_MARK, PSA11_PSA10_FN1, PTD0_FN),
+ PINMUX_DATA(SDHI0CLK_PTD_MARK, PSA11_PSA10_FN2, PTD0_FN),
+
+ /* PTE FN */
+ PINMUX_DATA(DIRECTION_MARK, PSA11_PSA10_FN1, PTE5_FN),
+ PINMUX_DATA(SCIF5_PTE_SCK_MARK, PSA11_PSA10_FN2, PTE5_FN),
+ PINMUX_DATA(EXBUF_ENB_MARK, PSA11_PSA10_FN1, PTE4_FN),
+ PINMUX_DATA(SCIF5_PTE_RXD_MARK, PSA11_PSA10_FN2, PTE4_FN),
+ PINMUX_DATA(IDERST_MARK, PSA11_PSA10_FN1, PTE3_FN),
+ PINMUX_DATA(SCIF5_PTE_TXD_MARK, PSA11_PSA10_FN2, PTE3_FN),
+ PINMUX_DATA(IODACK_MARK, PSA11_PSA10_FN1, PTE2_FN),
+ PINMUX_DATA(SCIF4_PTE_SCK_MARK, PSA11_PSA10_FN2, PTE2_FN),
+ PINMUX_DATA(IODREQ_MARK, PSA11_PSA10_FN1, PTE1_FN),
+ PINMUX_DATA(SCIF4_PTE_RXD_MARK, PSA11_PSA10_FN2, PTE1_FN),
+ PINMUX_DATA(IDEIORDY_MARK, PSA11_PSA10_FN1, PTE0_FN),
+ PINMUX_DATA(SCIF4_PTE_TXD_MARK, PSA11_PSA10_FN2, PTE0_FN),
+
+ /* PTF FN */
+ PINMUX_DATA(IDEINT_MARK, PTF7_FN),
+ PINMUX_DATA(IDEIOWR_MARK, PSA5_PSA4_FN1, PTF6_FN),
+ PINMUX_DATA(MSIOF0_PTF_SS2_MARK, PSA5_PSA4_FN2, PTF6_FN),
+ PINMUX_DATA(MSIOF0_PTF_RSYNC_MARK, PSA5_PSA4_FN3, PTF6_FN),
+ PINMUX_DATA(IDEIORD_MARK, PSA5_PSA4_FN1, PTF5_FN),
+ PINMUX_DATA(MSIOF0_PTF_SS1_MARK, PSA5_PSA4_FN2, PTF5_FN),
+ PINMUX_DATA(MSIOF0_PTF_RSCK_MARK, PSA5_PSA4_FN3, PTF5_FN),
+ PINMUX_DATA(IDECS1_MARK, PSA11_PSA10_FN1, PTF4_FN),
+ PINMUX_DATA(MSIOF0_PTF_TSYNC_MARK, PSA11_PSA10_FN2, PTF4_FN),
+ PINMUX_DATA(IDECS0_MARK, PSA11_PSA10_FN1, PTF3_FN),
+ PINMUX_DATA(MSIOF0_PTF_TSCK_MARK, PSA11_PSA10_FN2, PTF3_FN),
+ PINMUX_DATA(IDEA2_MARK, PSA11_PSA10_FN1, PTF2_FN),
+ PINMUX_DATA(MSIOF0_PTF_RXD_MARK, PSA11_PSA10_FN2, PTF2_FN),
+ PINMUX_DATA(IDEA1_MARK, PSA11_PSA10_FN1, PTF1_FN),
+ PINMUX_DATA(MSIOF0_PTF_TXD_MARK, PSA11_PSA10_FN2, PTF1_FN),
+ PINMUX_DATA(IDEA0_MARK, PSA11_PSA10_FN1, PTF0_FN),
+ PINMUX_DATA(MSIOF0_PTF_MCK_MARK, PSA11_PSA10_FN2, PTF0_FN),
+
+ /* PTG FN */
+ PINMUX_DATA(AUDCK_MARK, PTG5_FN),
+ PINMUX_DATA(AUDSYNC_MARK, PTG4_FN),
+ PINMUX_DATA(AUDATA3_MARK, PSA3_PSA2_FN1, PTG3_FN),
+ PINMUX_DATA(TPUTO3_MARK, PSA3_PSA2_FN2, PTG3_FN),
+ PINMUX_DATA(AUDATA2_MARK, PSA3_PSA2_FN1, PTG2_FN),
+ PINMUX_DATA(TPUTO2_MARK, PSA3_PSA2_FN2, PTG2_FN),
+ PINMUX_DATA(AUDATA1_MARK, PSA3_PSA2_FN1, PTG1_FN),
+ PINMUX_DATA(TPUTO1_MARK, PSA3_PSA2_FN2, PTG1_FN),
+ PINMUX_DATA(AUDATA0_MARK, PSA3_PSA2_FN1, PTG0_FN),
+ PINMUX_DATA(TPUTO0_MARK, PSA3_PSA2_FN2, PTG0_FN),
+
+ /* PTG FN */
+ PINMUX_DATA(LCDVCPWC_MARK, PTH7_FN),
+ PINMUX_DATA(LCDRD_MARK, PSB15_PSB14_FN1, PTH6_FN),
+ PINMUX_DATA(DV_CLKI_MARK, PSB15_PSB14_FN2, PTH6_FN),
+ PINMUX_DATA(LCDVSYN_MARK, PSB15_PSB14_FN1, PTH5_FN),
+ PINMUX_DATA(DV_CLK_MARK, PSB15_PSB14_FN2, PTH5_FN),
+ PINMUX_DATA(LCDDISP_MARK, PSB13_PSB12_LCDC_RGB, PTH4_FN),
+ PINMUX_DATA(LCDRS_MARK, PSB13_PSB12_LCDC_SYS, PTH4_FN),
+ PINMUX_DATA(LCDHSYN_MARK, PSB13_PSB12_LCDC_RGB, PTH3_FN),
+ PINMUX_DATA(LCDCS_MARK, PSB13_PSB12_LCDC_SYS, PTH3_FN),
+ PINMUX_DATA(LCDDON_MARK, PTH2_FN),
+ PINMUX_DATA(LCDDCK_MARK, PSB13_PSB12_LCDC_RGB, PTH1_FN),
+ PINMUX_DATA(LCDWR_MARK, PSB13_PSB12_LCDC_SYS, PTH1_FN),
+ PINMUX_DATA(LCDVEPWC_MARK, PTH0_FN),
+
+ /* PTJ FN */
+ PINMUX_DATA(STATUS0_MARK, PTJ7_FN),
+ PINMUX_DATA(PDSTATUS_MARK, PTJ5_FN),
+ PINMUX_DATA(A25_MARK, PTJ3_FN),
+ PINMUX_DATA(A24_MARK, PTJ2_FN),
+ PINMUX_DATA(A23_MARK, PTJ1_FN),
+ PINMUX_DATA(A22_MARK, PTJ0_FN),
+
+ /* PTK FN */
+ PINMUX_DATA(SIUAFCK_MARK, PTK7_FN),
+ PINMUX_DATA(SIUAILR_MARK, PSB9_PSB8_FN1, PTK6_FN),
+ PINMUX_DATA(MSIOF1_SS2_MARK, PSB9_PSB8_FN2, PTK6_FN),
+ PINMUX_DATA(MSIOF1_RSYNC_MARK, PSB9_PSB8_FN3, PTK6_FN),
+ PINMUX_DATA(SIUAIBT_MARK, PSB9_PSB8_FN1, PTK5_FN),
+ PINMUX_DATA(MSIOF1_SS1_MARK, PSB9_PSB8_FN2, PTK5_FN),
+ PINMUX_DATA(MSIOF1_RSCK_MARK, PSB9_PSB8_FN3, PTK5_FN),
+ PINMUX_DATA(SIUAISLD_MARK, PSB7_PSB6_FN1, PTK4_FN),
+ PINMUX_DATA(MSIOF1_RXD_MARK, PSB7_PSB6_FN2, PTK4_FN),
+ PINMUX_DATA(SIUAOLR_MARK, PSB7_PSB6_FN1, PTK3_FN),
+ PINMUX_DATA(MSIOF1_TSYNC_MARK, PSB7_PSB6_FN2, PTK3_FN),
+ PINMUX_DATA(SIUAOBT_MARK, PSB7_PSB6_FN1, PTK2_FN),
+ PINMUX_DATA(MSIOF1_TSCK_MARK, PSB7_PSB6_FN2, PTK2_FN),
+ PINMUX_DATA(SIUAOSLD_MARK, PSB7_PSB6_FN1, PTK1_FN),
+ PINMUX_DATA(MSIOF1_RXD_MARK, PSB7_PSB6_FN2, PTK1_FN),
+ PINMUX_DATA(SIUAMCK_MARK, PSB7_PSB6_FN1, PTK0_FN),
+ PINMUX_DATA(MSIOF1_MCK_MARK, PSB7_PSB6_FN2, PTK0_FN),
+
+ /* PTL FN */
+ PINMUX_DATA(LCDD15_MARK, PSB5_PSB4_FN1, PTL7_FN),
+ PINMUX_DATA(DV_D15_MARK, PSB5_PSB4_FN2, PTL7_FN),
+ PINMUX_DATA(LCDD14_MARK, PSB5_PSB4_FN1, PTL6_FN),
+ PINMUX_DATA(DV_D14_MARK, PSB5_PSB4_FN2, PTL6_FN),
+ PINMUX_DATA(LCDD13_MARK, PSB5_PSB4_FN1, PTL5_FN),
+ PINMUX_DATA(DV_D13_MARK, PSB5_PSB4_FN2, PTL5_FN),
+ PINMUX_DATA(LCDD12_MARK, PSB5_PSB4_FN1, PTL4_FN),
+ PINMUX_DATA(DV_D12_MARK, PSB5_PSB4_FN2, PTL4_FN),
+ PINMUX_DATA(LCDD11_MARK, PSB5_PSB4_FN1, PTL3_FN),
+ PINMUX_DATA(DV_D11_MARK, PSB5_PSB4_FN2, PTL3_FN),
+ PINMUX_DATA(LCDD10_MARK, PSB5_PSB4_FN1, PTL2_FN),
+ PINMUX_DATA(DV_D10_MARK, PSB5_PSB4_FN2, PTL2_FN),
+ PINMUX_DATA(LCDD9_MARK, PSB5_PSB4_FN1, PTL1_FN),
+ PINMUX_DATA(DV_D9_MARK, PSB5_PSB4_FN2, PTL1_FN),
+ PINMUX_DATA(LCDD8_MARK, PSB5_PSB4_FN1, PTL0_FN),
+ PINMUX_DATA(DV_D8_MARK, PSB5_PSB4_FN2, PTL0_FN),
+
+ /* PTM FN */
+ PINMUX_DATA(LCDD7_MARK, PSB5_PSB4_FN1, PTM7_FN),
+ PINMUX_DATA(DV_D7_MARK, PSB5_PSB4_FN2, PTM7_FN),
+ PINMUX_DATA(LCDD6_MARK, PSB5_PSB4_FN1, PTM6_FN),
+ PINMUX_DATA(DV_D6_MARK, PSB5_PSB4_FN2, PTM6_FN),
+ PINMUX_DATA(LCDD5_MARK, PSB5_PSB4_FN1, PTM5_FN),
+ PINMUX_DATA(DV_D5_MARK, PSB5_PSB4_FN2, PTM5_FN),
+ PINMUX_DATA(LCDD4_MARK, PSB5_PSB4_FN1, PTM4_FN),
+ PINMUX_DATA(DV_D4_MARK, PSB5_PSB4_FN2, PTM4_FN),
+ PINMUX_DATA(LCDD3_MARK, PSB5_PSB4_FN1, PTM3_FN),
+ PINMUX_DATA(DV_D3_MARK, PSB5_PSB4_FN2, PTM3_FN),
+ PINMUX_DATA(LCDD2_MARK, PSB5_PSB4_FN1, PTM2_FN),
+ PINMUX_DATA(DV_D2_MARK, PSB5_PSB4_FN2, PTM2_FN),
+ PINMUX_DATA(LCDD1_MARK, PSB5_PSB4_FN1, PTM1_FN),
+ PINMUX_DATA(DV_D1_MARK, PSB5_PSB4_FN2, PTM1_FN),
+ PINMUX_DATA(LCDD0_MARK, PSB5_PSB4_FN1, PTM0_FN),
+ PINMUX_DATA(DV_D0_MARK, PSB5_PSB4_FN2, PTM0_FN),
+
+ /* PTN FN */
+ PINMUX_DATA(LCDD23_MARK, PSB3_PSB2_FN1, PTN7_FN),
+ PINMUX_DATA(SCIF5_PTN_SCK_MARK, PSB3_PSB2_FN2, PTN7_FN),
+ PINMUX_DATA(LCDD22_MARK, PSB3_PSB2_FN1, PTN6_FN),
+ PINMUX_DATA(SCIF5_PTN_RXD_MARK, PSB3_PSB2_FN2, PTN6_FN),
+ PINMUX_DATA(LCDD21_MARK, PSB3_PSB2_FN1, PTN5_FN),
+ PINMUX_DATA(SCIF5_PTN_TXD_MARK, PSB3_PSB2_FN2, PTN5_FN),
+ PINMUX_DATA(LCDD20_MARK, PSB3_PSB2_FN1, PTN4_FN),
+ PINMUX_DATA(SCIF4_PTN_SCK_MARK, PSB3_PSB2_FN2, PTN4_FN),
+ PINMUX_DATA(LCDD19_MARK, PSB3_PSB2_FN1, PTN3_FN),
+ PINMUX_DATA(SCIF4_PTN_RXD_MARK, PSB3_PSB2_FN2, PTN3_FN),
+ PINMUX_DATA(LCDD18_MARK, PSB3_PSB2_FN1, PTN2_FN),
+ PINMUX_DATA(SCIF4_PTN_TXD_MARK, PSB3_PSB2_FN2, PTN2_FN),
+ PINMUX_DATA(LCDD17_MARK, PSB5_PSB4_FN1, PTN1_FN),
+ PINMUX_DATA(DV_VSYNC_MARK, PSB5_PSB4_FN2, PTN1_FN),
+ PINMUX_DATA(LCDD16_MARK, PSB5_PSB4_FN1, PTN0_FN),
+ PINMUX_DATA(DV_HSYNC_MARK, PSB5_PSB4_FN2, PTN0_FN),
+
+ /* PTQ FN */
+ PINMUX_DATA(AN3_MARK, PTQ3_FN),
+ PINMUX_DATA(AN2_MARK, PTQ2_FN),
+ PINMUX_DATA(AN1_MARK, PTQ1_FN),
+ PINMUX_DATA(AN0_MARK, PTQ0_FN),
+
+ /* PTR FN */
+ PINMUX_DATA(CS6B_CE1B_MARK, PTR7_FN),
+ PINMUX_DATA(CS6A_CE2B_MARK, PTR6_FN),
+ PINMUX_DATA(CS5B_CE1A_MARK, PTR5_FN),
+ PINMUX_DATA(CS5A_CE2A_MARK, PTR4_FN),
+ PINMUX_DATA(IOIS16_MARK, PSA13_PSA12_FN1, PTR3_FN),
+ PINMUX_DATA(LCDLCLK_PTR_MARK, PSA13_PSA12_FN2, PTR3_FN),
+ PINMUX_DATA(WAIT_MARK, PTR2_FN),
+ PINMUX_DATA(WE3_ICIOWR_MARK, PTR1_FN),
+ PINMUX_DATA(WE2_ICIORD_MARK, PTR0_FN),
+
+ /* PTS FN */
+ PINMUX_DATA(SCIF1_PTS_SCK_MARK, PSC15_PSC14_FN1, PTS7_FN),
+ PINMUX_DATA(SDHI0CD_PTS_MARK, PSC15_PSC14_FN2, PTS7_FN),
+ PINMUX_DATA(SCIF1_PTS_RXD_MARK, PSC15_PSC14_FN1, PTS6_FN),
+ PINMUX_DATA(SDHI0WP_PTS_MARK, PSC15_PSC14_FN2, PTS6_FN),
+ PINMUX_DATA(SCIF1_PTS_TXD_MARK, PSC15_PSC14_FN1, PTS5_FN),
+ PINMUX_DATA(SDHI0D3_PTS_MARK, PSC15_PSC14_FN2, PTS5_FN),
+ PINMUX_DATA(SCIF3_PTS_CTS_MARK, PSC15_PSC14_FN1, PTS4_FN),
+ PINMUX_DATA(SDHI0D2_PTS_MARK, PSC15_PSC14_FN2, PTS4_FN),
+ PINMUX_DATA(SCIF3_PTS_RTS_MARK, PSC15_PSC14_FN1, PTS3_FN),
+ PINMUX_DATA(SDHI0D1_PTS_MARK, PSC15_PSC14_FN2, PTS3_FN),
+ PINMUX_DATA(SCIF3_PTS_SCK_MARK, PSC15_PSC14_FN1, PTS2_FN),
+ PINMUX_DATA(SDHI0D0_PTS_MARK, PSC15_PSC14_FN2, PTS2_FN),
+ PINMUX_DATA(SCIF3_PTS_RXD_MARK, PSC15_PSC14_FN1, PTS1_FN),
+ PINMUX_DATA(SDHI0CMD_PTS_MARK, PSC15_PSC14_FN2, PTS1_FN),
+ PINMUX_DATA(SCIF3_PTS_TXD_MARK, PSC15_PSC14_FN1, PTS0_FN),
+ PINMUX_DATA(SDHI0CLK_PTS_MARK, PSC15_PSC14_FN2, PTS0_FN),
+
+ /* PTT FN */
+ PINMUX_DATA(SCIF0_PTT_SCK_MARK, PSC13_PSC12_FN1, PTT5_FN),
+ PINMUX_DATA(MSIOF0_PTT_TSCK_MARK, PSC13_PSC12_FN2, PTT5_FN),
+ PINMUX_DATA(SCIF0_PTT_RXD_MARK, PSC13_PSC12_FN1, PTT4_FN),
+ PINMUX_DATA(MSIOF0_PTT_RXD_MARK, PSC13_PSC12_FN2, PTT4_FN),
+ PINMUX_DATA(SCIF0_PTT_TXD_MARK, PSC13_PSC12_FN1, PTT3_FN),
+ PINMUX_DATA(MSIOF0_PTT_TXD_MARK, PSC13_PSC12_FN2, PTT3_FN),
+ PINMUX_DATA(SCIF2_PTT_SCK_MARK, PSC11_PSC10_FN1, PTT2_FN),
+ PINMUX_DATA(MSIOF0_PTT_TSYNC_MARK, PSC11_PSC10_FN2, PTT2_FN),
+ PINMUX_DATA(SCIF2_PTT_RXD_MARK, PSC11_PSC10_FN1, PTT1_FN),
+ PINMUX_DATA(MSIOF0_PTT_SS1_MARK, PSC11_PSC10_FN2, PTT1_FN),
+ PINMUX_DATA(MSIOF0_PTT_RSCK_MARK, PSC11_PSC10_FN3, PTT1_FN),
+ PINMUX_DATA(SCIF2_PTT_TXD_MARK, PSC11_PSC10_FN1, PTT0_FN),
+ PINMUX_DATA(MSIOF0_PTT_SS2_MARK, PSC11_PSC10_FN2, PTT0_FN),
+ PINMUX_DATA(MSIOF0_PTT_RSYNC_MARK, PSC11_PSC10_FN3, PTT0_FN),
+
+ /* PTU FN */
+ PINMUX_DATA(FCDE_MARK, PSC9_PSC8_FN1, PTU5_FN),
+ PINMUX_DATA(SCIF0_PTU_SCK_MARK, PSC9_PSC8_FN2, PTU5_FN),
+ PINMUX_DATA(FSC_MARK, PSC9_PSC8_FN1, PTU4_FN),
+ PINMUX_DATA(SCIF0_PTU_RXD_MARK, PSC9_PSC8_FN2, PTU4_FN),
+ PINMUX_DATA(FWE_MARK, PSC9_PSC8_FN1, PTU3_FN),
+ PINMUX_DATA(SCIF0_PTU_TXD_MARK, PSC9_PSC8_FN2, PTU3_FN),
+ PINMUX_DATA(FOE_MARK, PSC7_PSC6_FN1, PTU2_FN),
+ PINMUX_DATA(SCIF2_PTU_SCK_MARK, PSC7_PSC6_FN2, PTU2_FN),
+ PINMUX_DATA(VIO_VD2_MARK, PSC7_PSC6_FN3, PTU2_FN),
+ PINMUX_DATA(FRB_MARK, PSC7_PSC6_FN1, PTU1_FN),
+ PINMUX_DATA(SCIF2_PTU_RXD_MARK, PSC7_PSC6_FN2, PTU1_FN),
+ PINMUX_DATA(VIO_CLK2_MARK, PSC7_PSC6_FN3, PTU1_FN),
+ PINMUX_DATA(FCE_MARK, PSC7_PSC6_FN1, PTU0_FN),
+ PINMUX_DATA(SCIF2_PTU_TXD_MARK, PSC7_PSC6_FN2, PTU0_FN),
+ PINMUX_DATA(VIO_HD2_MARK, PSC7_PSC6_FN3, PTU0_FN),
+
+ /* PTV FN */
+ PINMUX_DATA(NAF7_MARK, PSC7_PSC6_FN1, PTV7_FN),
+ PINMUX_DATA(SCIF1_PTV_SCK_MARK, PSC7_PSC6_FN2, PTV7_FN),
+ PINMUX_DATA(VIO_D15_MARK, PSC7_PSC6_FN3, PTV7_FN),
+ PINMUX_DATA(NAF6_MARK, PSC7_PSC6_FN1, PTV6_FN),
+ PINMUX_DATA(SCIF1_PTV_RXD_MARK, PSC7_PSC6_FN2, PTV6_FN),
+ PINMUX_DATA(VIO_D14_MARK, PSC7_PSC6_FN3, PTV6_FN),
+ PINMUX_DATA(NAF5_MARK, PSC7_PSC6_FN1, PTV5_FN),
+ PINMUX_DATA(SCIF1_PTV_TXD_MARK, PSC7_PSC6_FN2, PTV5_FN),
+ PINMUX_DATA(VIO_D13_MARK, PSC7_PSC6_FN3, PTV5_FN),
+ PINMUX_DATA(NAF4_MARK, PSC7_PSC6_FN1, PTV4_FN),
+ PINMUX_DATA(SCIF3_PTV_CTS_MARK, PSC7_PSC6_FN2, PTV4_FN),
+ PINMUX_DATA(VIO_D12_MARK, PSC7_PSC6_FN3, PTV4_FN),
+ PINMUX_DATA(NAF3_MARK, PSC7_PSC6_FN1, PTV3_FN),
+ PINMUX_DATA(SCIF3_PTV_RTS_MARK, PSC7_PSC6_FN2, PTV3_FN),
+ PINMUX_DATA(VIO_D11_MARK, PSC7_PSC6_FN3, PTV3_FN),
+ PINMUX_DATA(NAF2_MARK, PSC7_PSC6_FN1, PTV2_FN),
+ PINMUX_DATA(SCIF3_PTV_SCK_MARK, PSC7_PSC6_FN2, PTV2_FN),
+ PINMUX_DATA(VIO_D10_MARK, PSC7_PSC6_FN3, PTV2_FN),
+ PINMUX_DATA(NAF1_MARK, PSC7_PSC6_FN1, PTV1_FN),
+ PINMUX_DATA(SCIF3_PTV_RXD_MARK, PSC7_PSC6_FN2, PTV1_FN),
+ PINMUX_DATA(VIO_D9_MARK, PSC7_PSC6_FN3, PTV1_FN),
+ PINMUX_DATA(NAF0_MARK, PSC7_PSC6_FN1, PTV0_FN),
+ PINMUX_DATA(SCIF3_PTV_TXD_MARK, PSC7_PSC6_FN2, PTV0_FN),
+ PINMUX_DATA(VIO_D8_MARK, PSC7_PSC6_FN3, PTV0_FN),
+
+ /* PTW FN */
+ PINMUX_DATA(IRQ7_MARK, PTW7_FN),
+ PINMUX_DATA(IRQ6_MARK, PTW6_FN),
+ PINMUX_DATA(IRQ5_MARK, PTW5_FN),
+ PINMUX_DATA(IRQ4_MARK, PSD15_PSD14_FN1, PTW4_FN),
+ PINMUX_DATA(LCDLCLK_PTW_MARK, PSD15_PSD14_FN2, PTW4_FN),
+ PINMUX_DATA(IRQ3_MARK, PSD13_PSD12_FN1, PTW3_FN),
+ PINMUX_DATA(ADTRG_MARK, PSD13_PSD12_FN2, PTW3_FN),
+ PINMUX_DATA(IRQ2_MARK, PSD11_PSD10_FN1, PTW2_FN),
+ PINMUX_DATA(BS_MARK, PSD11_PSD10_FN2, PTW2_FN),
+ PINMUX_DATA(VIO_CKO_MARK, PSD11_PSD10_FN3, PTW2_FN),
+ PINMUX_DATA(IRQ1_MARK, PSD9_PSD8_FN1, PTW1_FN),
+ PINMUX_DATA(SIUAISPD_MARK, PSD9_PSD8_FN2, PTW1_FN),
+ PINMUX_DATA(IRQ0_MARK, PSD7_PSD6_FN1, PTW0_FN),
+ PINMUX_DATA(SIUAOSPD_MARK, PSD7_PSD6_FN2, PTW0_FN),
+
+ /* PTX FN */
+ PINMUX_DATA(DACK1_MARK, PTX7_FN),
+ PINMUX_DATA(DREQ1_MARK, PSD3_PSD2_FN1, PTX6_FN),
+ PINMUX_DATA(MSIOF0_PTX_MCK_MARK, PSD3_PSD2_FN2, PTX6_FN),
+ PINMUX_DATA(DACK1_MARK, PTX5_FN),
+ PINMUX_DATA(IRDA_OUT_MARK, PSD5_PSD4_FN2, PTX5_FN),
+ PINMUX_DATA(DREQ1_MARK, PTX4_FN),
+ PINMUX_DATA(IRDA_IN_MARK, PSD5_PSD4_FN2, PTX4_FN),
+ PINMUX_DATA(TS0_SDAT_MARK, PTX3_FN),
+ PINMUX_DATA(TS0_SCK_MARK, PTX2_FN),
+ PINMUX_DATA(TS0_SDEN_MARK, PTX1_FN),
+ PINMUX_DATA(TS0_SPSYNC_MARK, PTX0_FN),
+
+ /* PTY FN */
+ PINMUX_DATA(VIO_D7_MARK, PTY7_FN),
+ PINMUX_DATA(VIO_D6_MARK, PTY6_FN),
+ PINMUX_DATA(VIO_D5_MARK, PTY5_FN),
+ PINMUX_DATA(VIO_D4_MARK, PTY4_FN),
+ PINMUX_DATA(VIO_D3_MARK, PTY3_FN),
+ PINMUX_DATA(VIO_D2_MARK, PTY2_FN),
+ PINMUX_DATA(VIO_D1_MARK, PTY1_FN),
+ PINMUX_DATA(VIO_D0_MARK, PTY0_FN),
+
+ /* PTZ FN */
+ PINMUX_DATA(SIUBOBT_MARK, PTZ7_FN),
+ PINMUX_DATA(SIUBOLR_MARK, PTZ6_FN),
+ PINMUX_DATA(SIUBOSLD_MARK, PTZ5_FN),
+ PINMUX_DATA(SIUBMCK_MARK, PTZ4_FN),
+ PINMUX_DATA(VIO_FLD_MARK, PSD1_PSD0_FN1, PTZ3_FN),
+ PINMUX_DATA(SIUBFCK_MARK, PSD1_PSD0_FN2, PTZ3_FN),
+ PINMUX_DATA(VIO_HD1_MARK, PSD1_PSD0_FN1, PTZ2_FN),
+ PINMUX_DATA(SIUBILR_MARK, PSD1_PSD0_FN2, PTZ2_FN),
+ PINMUX_DATA(VIO_VD1_MARK, PSD1_PSD0_FN1, PTZ1_FN),
+ PINMUX_DATA(SIUBIBT_MARK, PSD1_PSD0_FN2, PTZ1_FN),
+ PINMUX_DATA(VIO_CLK1_MARK, PSD1_PSD0_FN1, PTZ0_FN),
+ PINMUX_DATA(SIUBISLD_MARK, PSD1_PSD0_FN2, PTZ0_FN),
+};
+
+static struct pinmux_gpio pinmux_gpios[] = {
+ /* PTA */
+ PINMUX_GPIO(GPIO_PTA7, PTA7_DATA),
+ PINMUX_GPIO(GPIO_PTA6, PTA6_DATA),
+ PINMUX_GPIO(GPIO_PTA5, PTA5_DATA),
+ PINMUX_GPIO(GPIO_PTA4, PTA4_DATA),
+ PINMUX_GPIO(GPIO_PTA3, PTA3_DATA),
+ PINMUX_GPIO(GPIO_PTA2, PTA2_DATA),
+ PINMUX_GPIO(GPIO_PTA1, PTA1_DATA),
+ PINMUX_GPIO(GPIO_PTA0, PTA0_DATA),
+
+ /* PTB */
+ PINMUX_GPIO(GPIO_PTB7, PTB7_DATA),
+ PINMUX_GPIO(GPIO_PTB6, PTB6_DATA),
+ PINMUX_GPIO(GPIO_PTB5, PTB5_DATA),
+ PINMUX_GPIO(GPIO_PTB4, PTB4_DATA),
+ PINMUX_GPIO(GPIO_PTB3, PTB3_DATA),
+ PINMUX_GPIO(GPIO_PTB2, PTB2_DATA),
+ PINMUX_GPIO(GPIO_PTB1, PTB1_DATA),
+ PINMUX_GPIO(GPIO_PTB0, PTB0_DATA),
+
+ /* PTC */
+ PINMUX_GPIO(GPIO_PTC7, PTC7_DATA),
+ PINMUX_GPIO(GPIO_PTC6, PTC6_DATA),
+ PINMUX_GPIO(GPIO_PTC5, PTC5_DATA),
+ PINMUX_GPIO(GPIO_PTC4, PTC4_DATA),
+ PINMUX_GPIO(GPIO_PTC3, PTC3_DATA),
+ PINMUX_GPIO(GPIO_PTC2, PTC2_DATA),
+ PINMUX_GPIO(GPIO_PTC1, PTC1_DATA),
+ PINMUX_GPIO(GPIO_PTC0, PTC0_DATA),
+
+ /* PTD */
+ PINMUX_GPIO(GPIO_PTD7, PTD7_DATA),
+ PINMUX_GPIO(GPIO_PTD6, PTD6_DATA),
+ PINMUX_GPIO(GPIO_PTD5, PTD5_DATA),
+ PINMUX_GPIO(GPIO_PTD4, PTD4_DATA),
+ PINMUX_GPIO(GPIO_PTD3, PTD3_DATA),
+ PINMUX_GPIO(GPIO_PTD2, PTD2_DATA),
+ PINMUX_GPIO(GPIO_PTD1, PTD1_DATA),
+ PINMUX_GPIO(GPIO_PTD0, PTD0_DATA),
+
+ /* PTE */
+ PINMUX_GPIO(GPIO_PTE5, PTE5_DATA),
+ PINMUX_GPIO(GPIO_PTE4, PTE4_DATA),
+ PINMUX_GPIO(GPIO_PTE3, PTE3_DATA),
+ PINMUX_GPIO(GPIO_PTE2, PTE2_DATA),
+ PINMUX_GPIO(GPIO_PTE1, PTE1_DATA),
+ PINMUX_GPIO(GPIO_PTE0, PTE0_DATA),
+
+ /* PTF */
+ PINMUX_GPIO(GPIO_PTF7, PTF7_DATA),
+ PINMUX_GPIO(GPIO_PTF6, PTF6_DATA),
+ PINMUX_GPIO(GPIO_PTF5, PTF5_DATA),
+ PINMUX_GPIO(GPIO_PTF4, PTF4_DATA),
+ PINMUX_GPIO(GPIO_PTF3, PTF3_DATA),
+ PINMUX_GPIO(GPIO_PTF2, PTF2_DATA),
+ PINMUX_GPIO(GPIO_PTF1, PTF1_DATA),
+ PINMUX_GPIO(GPIO_PTF0, PTF0_DATA),
+
+ /* PTG */
+ PINMUX_GPIO(GPIO_PTG5, PTG5_DATA),
+ PINMUX_GPIO(GPIO_PTG4, PTG4_DATA),
+ PINMUX_GPIO(GPIO_PTG3, PTG3_DATA),
+ PINMUX_GPIO(GPIO_PTG2, PTG2_DATA),
+ PINMUX_GPIO(GPIO_PTG1, PTG1_DATA),
+ PINMUX_GPIO(GPIO_PTG0, PTG0_DATA),
+
+ /* PTH */
+ PINMUX_GPIO(GPIO_PTH7, PTH7_DATA),
+ PINMUX_GPIO(GPIO_PTH6, PTH6_DATA),
+ PINMUX_GPIO(GPIO_PTH5, PTH5_DATA),
+ PINMUX_GPIO(GPIO_PTH4, PTH4_DATA),
+ PINMUX_GPIO(GPIO_PTH3, PTH3_DATA),
+ PINMUX_GPIO(GPIO_PTH2, PTH2_DATA),
+ PINMUX_GPIO(GPIO_PTH1, PTH1_DATA),
+ PINMUX_GPIO(GPIO_PTH0, PTH0_DATA),
+
+ /* PTJ */
+ PINMUX_GPIO(GPIO_PTJ7, PTJ7_DATA),
+ PINMUX_GPIO(GPIO_PTJ5, PTJ5_DATA),
+ PINMUX_GPIO(GPIO_PTJ3, PTJ3_DATA),
+ PINMUX_GPIO(GPIO_PTJ2, PTJ2_DATA),
+ PINMUX_GPIO(GPIO_PTJ1, PTJ1_DATA),
+ PINMUX_GPIO(GPIO_PTJ0, PTJ0_DATA),
+
+ /* PTK */
+ PINMUX_GPIO(GPIO_PTK7, PTK7_DATA),
+ PINMUX_GPIO(GPIO_PTK6, PTK6_DATA),
+ PINMUX_GPIO(GPIO_PTK5, PTK5_DATA),
+ PINMUX_GPIO(GPIO_PTK4, PTK4_DATA),
+ PINMUX_GPIO(GPIO_PTK3, PTK3_DATA),
+ PINMUX_GPIO(GPIO_PTK2, PTK2_DATA),
+ PINMUX_GPIO(GPIO_PTK1, PTK1_DATA),
+ PINMUX_GPIO(GPIO_PTK0, PTK0_DATA),
+
+ /* PTL */
+ PINMUX_GPIO(GPIO_PTL7, PTL7_DATA),
+ PINMUX_GPIO(GPIO_PTL6, PTL6_DATA),
+ PINMUX_GPIO(GPIO_PTL5, PTL5_DATA),
+ PINMUX_GPIO(GPIO_PTL4, PTL4_DATA),
+ PINMUX_GPIO(GPIO_PTL3, PTL3_DATA),
+ PINMUX_GPIO(GPIO_PTL2, PTL2_DATA),
+ PINMUX_GPIO(GPIO_PTL1, PTL1_DATA),
+ PINMUX_GPIO(GPIO_PTL0, PTL0_DATA),
+
+ /* PTM */
+ PINMUX_GPIO(GPIO_PTM7, PTM7_DATA),
+ PINMUX_GPIO(GPIO_PTM6, PTM6_DATA),
+ PINMUX_GPIO(GPIO_PTM5, PTM5_DATA),
+ PINMUX_GPIO(GPIO_PTM4, PTM4_DATA),
+ PINMUX_GPIO(GPIO_PTM3, PTM3_DATA),
+ PINMUX_GPIO(GPIO_PTM2, PTM2_DATA),
+ PINMUX_GPIO(GPIO_PTM1, PTM1_DATA),
+ PINMUX_GPIO(GPIO_PTM0, PTM0_DATA),
+
+ /* PTN */
+ PINMUX_GPIO(GPIO_PTN7, PTN7_DATA),
+ PINMUX_GPIO(GPIO_PTN6, PTN6_DATA),
+ PINMUX_GPIO(GPIO_PTN5, PTN5_DATA),
+ PINMUX_GPIO(GPIO_PTN4, PTN4_DATA),
+ PINMUX_GPIO(GPIO_PTN3, PTN3_DATA),
+ PINMUX_GPIO(GPIO_PTN2, PTN2_DATA),
+ PINMUX_GPIO(GPIO_PTN1, PTN1_DATA),
+ PINMUX_GPIO(GPIO_PTN0, PTN0_DATA),
+
+ /* PTQ */
+ PINMUX_GPIO(GPIO_PTQ3, PTQ3_DATA),
+ PINMUX_GPIO(GPIO_PTQ2, PTQ2_DATA),
+ PINMUX_GPIO(GPIO_PTQ1, PTQ1_DATA),
+ PINMUX_GPIO(GPIO_PTQ0, PTQ0_DATA),
+
+ /* PTR */
+ PINMUX_GPIO(GPIO_PTR7, PTR7_DATA),
+ PINMUX_GPIO(GPIO_PTR6, PTR6_DATA),
+ PINMUX_GPIO(GPIO_PTR5, PTR5_DATA),
+ PINMUX_GPIO(GPIO_PTR4, PTR4_DATA),
+ PINMUX_GPIO(GPIO_PTR3, PTR3_DATA),
+ PINMUX_GPIO(GPIO_PTR2, PTR2_DATA),
+ PINMUX_GPIO(GPIO_PTR1, PTR1_DATA),
+ PINMUX_GPIO(GPIO_PTR0, PTR0_DATA),
+
+ /* PTS */
+ PINMUX_GPIO(GPIO_PTS7, PTS7_DATA),
+ PINMUX_GPIO(GPIO_PTS6, PTS6_DATA),
+ PINMUX_GPIO(GPIO_PTS5, PTS5_DATA),
+ PINMUX_GPIO(GPIO_PTS4, PTS4_DATA),
+ PINMUX_GPIO(GPIO_PTS3, PTS3_DATA),
+ PINMUX_GPIO(GPIO_PTS2, PTS2_DATA),
+ PINMUX_GPIO(GPIO_PTS1, PTS1_DATA),
+ PINMUX_GPIO(GPIO_PTS0, PTS0_DATA),
+
+ /* PTT */
+ PINMUX_GPIO(GPIO_PTT5, PTT5_DATA),
+ PINMUX_GPIO(GPIO_PTT4, PTT4_DATA),
+ PINMUX_GPIO(GPIO_PTT3, PTT3_DATA),
+ PINMUX_GPIO(GPIO_PTT2, PTT2_DATA),
+ PINMUX_GPIO(GPIO_PTT1, PTT1_DATA),
+ PINMUX_GPIO(GPIO_PTT0, PTT0_DATA),
+
+ /* PTU */
+ PINMUX_GPIO(GPIO_PTU5, PTU5_DATA),
+ PINMUX_GPIO(GPIO_PTU4, PTU4_DATA),
+ PINMUX_GPIO(GPIO_PTU3, PTU3_DATA),
+ PINMUX_GPIO(GPIO_PTU2, PTU2_DATA),
+ PINMUX_GPIO(GPIO_PTU1, PTU1_DATA),
+ PINMUX_GPIO(GPIO_PTU0, PTU0_DATA),
+
+ /* PTV */
+ PINMUX_GPIO(GPIO_PTV7, PTV7_DATA),
+ PINMUX_GPIO(GPIO_PTV6, PTV6_DATA),
+ PINMUX_GPIO(GPIO_PTV5, PTV5_DATA),
+ PINMUX_GPIO(GPIO_PTV4, PTV4_DATA),
+ PINMUX_GPIO(GPIO_PTV3, PTV3_DATA),
+ PINMUX_GPIO(GPIO_PTV2, PTV2_DATA),
+ PINMUX_GPIO(GPIO_PTV1, PTV1_DATA),
+ PINMUX_GPIO(GPIO_PTV0, PTV0_DATA),
+
+ /* PTW */
+ PINMUX_GPIO(GPIO_PTW7, PTW7_DATA),
+ PINMUX_GPIO(GPIO_PTW6, PTW6_DATA),
+ PINMUX_GPIO(GPIO_PTW5, PTW5_DATA),
+ PINMUX_GPIO(GPIO_PTW4, PTW4_DATA),
+ PINMUX_GPIO(GPIO_PTW3, PTW3_DATA),
+ PINMUX_GPIO(GPIO_PTW2, PTW2_DATA),
+ PINMUX_GPIO(GPIO_PTW1, PTW1_DATA),
+ PINMUX_GPIO(GPIO_PTW0, PTW0_DATA),
+
+ /* PTX */
+ PINMUX_GPIO(GPIO_PTX7, PTX7_DATA),
+ PINMUX_GPIO(GPIO_PTX6, PTX6_DATA),
+ PINMUX_GPIO(GPIO_PTX5, PTX5_DATA),
+ PINMUX_GPIO(GPIO_PTX4, PTX4_DATA),
+ PINMUX_GPIO(GPIO_PTX3, PTX3_DATA),
+ PINMUX_GPIO(GPIO_PTX2, PTX2_DATA),
+ PINMUX_GPIO(GPIO_PTX1, PTX1_DATA),
+ PINMUX_GPIO(GPIO_PTX0, PTX0_DATA),
+
+ /* PTY */
+ PINMUX_GPIO(GPIO_PTY7, PTY7_DATA),
+ PINMUX_GPIO(GPIO_PTY6, PTY6_DATA),
+ PINMUX_GPIO(GPIO_PTY5, PTY5_DATA),
+ PINMUX_GPIO(GPIO_PTY4, PTY4_DATA),
+ PINMUX_GPIO(GPIO_PTY3, PTY3_DATA),
+ PINMUX_GPIO(GPIO_PTY2, PTY2_DATA),
+ PINMUX_GPIO(GPIO_PTY1, PTY1_DATA),
+ PINMUX_GPIO(GPIO_PTY0, PTY0_DATA),
+
+ /* PTZ */
+ PINMUX_GPIO(GPIO_PTZ7, PTZ7_DATA),
+ PINMUX_GPIO(GPIO_PTZ6, PTZ6_DATA),
+ PINMUX_GPIO(GPIO_PTZ5, PTZ5_DATA),
+ PINMUX_GPIO(GPIO_PTZ4, PTZ4_DATA),
+ PINMUX_GPIO(GPIO_PTZ3, PTZ3_DATA),
+ PINMUX_GPIO(GPIO_PTZ2, PTZ2_DATA),
+ PINMUX_GPIO(GPIO_PTZ1, PTZ1_DATA),
+ PINMUX_GPIO(GPIO_PTZ0, PTZ0_DATA),
+
+ /* SCIF0 */
+ PINMUX_GPIO(GPIO_FN_SCIF0_PTT_TXD, SCIF0_PTT_TXD_MARK),
+ PINMUX_GPIO(GPIO_FN_SCIF0_PTT_RXD, SCIF0_PTT_RXD_MARK),
+ PINMUX_GPIO(GPIO_FN_SCIF0_PTT_SCK, SCIF0_PTT_SCK_MARK),
+ PINMUX_GPIO(GPIO_FN_SCIF0_PTU_TXD, SCIF0_PTU_TXD_MARK),
+ PINMUX_GPIO(GPIO_FN_SCIF0_PTU_RXD, SCIF0_PTU_RXD_MARK),
+ PINMUX_GPIO(GPIO_FN_SCIF0_PTU_SCK, SCIF0_PTU_SCK_MARK),
+
+ /* SCIF1 */
+ PINMUX_GPIO(GPIO_FN_SCIF1_PTS_TXD, SCIF1_PTS_TXD_MARK),
+ PINMUX_GPIO(GPIO_FN_SCIF1_PTS_RXD, SCIF1_PTS_RXD_MARK),
+ PINMUX_GPIO(GPIO_FN_SCIF1_PTS_SCK, SCIF1_PTS_SCK_MARK),
+ PINMUX_GPIO(GPIO_FN_SCIF1_PTV_TXD, SCIF1_PTV_TXD_MARK),
+ PINMUX_GPIO(GPIO_FN_SCIF1_PTV_RXD, SCIF1_PTV_RXD_MARK),
+ PINMUX_GPIO(GPIO_FN_SCIF1_PTV_SCK, SCIF1_PTV_SCK_MARK),
+
+ /* SCIF2 */
+ PINMUX_GPIO(GPIO_FN_SCIF2_PTT_TXD, SCIF2_PTT_TXD_MARK),
+ PINMUX_GPIO(GPIO_FN_SCIF2_PTT_RXD, SCIF2_PTT_RXD_MARK),
+ PINMUX_GPIO(GPIO_FN_SCIF2_PTT_SCK, SCIF2_PTT_SCK_MARK),
+ PINMUX_GPIO(GPIO_FN_SCIF2_PTU_TXD, SCIF2_PTU_TXD_MARK),
+ PINMUX_GPIO(GPIO_FN_SCIF2_PTU_RXD, SCIF2_PTU_RXD_MARK),
+ PINMUX_GPIO(GPIO_FN_SCIF2_PTU_SCK, SCIF2_PTU_SCK_MARK),
+
+ /* SCIF3 */
+ PINMUX_GPIO(GPIO_FN_SCIF3_PTS_TXD, SCIF3_PTS_TXD_MARK),
+ PINMUX_GPIO(GPIO_FN_SCIF3_PTS_RXD, SCIF3_PTS_RXD_MARK),
+ PINMUX_GPIO(GPIO_FN_SCIF3_PTS_SCK, SCIF3_PTS_SCK_MARK),
+ PINMUX_GPIO(GPIO_FN_SCIF3_PTS_RTS, SCIF3_PTS_RTS_MARK),
+ PINMUX_GPIO(GPIO_FN_SCIF3_PTS_CTS, SCIF3_PTS_CTS_MARK),
+ PINMUX_GPIO(GPIO_FN_SCIF3_PTV_TXD, SCIF3_PTV_TXD_MARK),
+ PINMUX_GPIO(GPIO_FN_SCIF3_PTV_RXD, SCIF3_PTV_RXD_MARK),
+ PINMUX_GPIO(GPIO_FN_SCIF3_PTV_SCK, SCIF3_PTV_SCK_MARK),
+ PINMUX_GPIO(GPIO_FN_SCIF3_PTV_RTS, SCIF3_PTV_RTS_MARK),
+ PINMUX_GPIO(GPIO_FN_SCIF3_PTV_CTS, SCIF3_PTV_CTS_MARK),
+
+ /* SCIF4 */
+ PINMUX_GPIO(GPIO_FN_SCIF4_PTE_TXD, SCIF4_PTE_TXD_MARK),
+ PINMUX_GPIO(GPIO_FN_SCIF4_PTE_RXD, SCIF4_PTE_RXD_MARK),
+ PINMUX_GPIO(GPIO_FN_SCIF4_PTE_SCK, SCIF4_PTE_SCK_MARK),
+ PINMUX_GPIO(GPIO_FN_SCIF4_PTN_TXD, SCIF4_PTN_TXD_MARK),
+ PINMUX_GPIO(GPIO_FN_SCIF4_PTN_RXD, SCIF4_PTN_RXD_MARK),
+ PINMUX_GPIO(GPIO_FN_SCIF4_PTN_SCK, SCIF4_PTN_SCK_MARK),
+
+ /* SCIF5 */
+ PINMUX_GPIO(GPIO_FN_SCIF5_PTE_TXD, SCIF5_PTE_TXD_MARK),
+ PINMUX_GPIO(GPIO_FN_SCIF5_PTE_RXD, SCIF5_PTE_RXD_MARK),
+ PINMUX_GPIO(GPIO_FN_SCIF5_PTE_SCK, SCIF5_PTE_SCK_MARK),
+ PINMUX_GPIO(GPIO_FN_SCIF5_PTN_TXD, SCIF5_PTN_TXD_MARK),
+ PINMUX_GPIO(GPIO_FN_SCIF5_PTN_RXD, SCIF5_PTN_RXD_MARK),
+ PINMUX_GPIO(GPIO_FN_SCIF5_PTN_SCK, SCIF5_PTN_SCK_MARK),
+
+ /* CEU */
+ PINMUX_GPIO(GPIO_FN_VIO_D15, VIO_D15_MARK),
+ PINMUX_GPIO(GPIO_FN_VIO_D14, VIO_D14_MARK),
+ PINMUX_GPIO(GPIO_FN_VIO_D13, VIO_D13_MARK),
+ PINMUX_GPIO(GPIO_FN_VIO_D12, VIO_D12_MARK),
+ PINMUX_GPIO(GPIO_FN_VIO_D11, VIO_D11_MARK),
+ PINMUX_GPIO(GPIO_FN_VIO_D10, VIO_D10_MARK),
+ PINMUX_GPIO(GPIO_FN_VIO_D9, VIO_D9_MARK),
+ PINMUX_GPIO(GPIO_FN_VIO_D8, VIO_D8_MARK),
+ PINMUX_GPIO(GPIO_FN_VIO_D7, VIO_D7_MARK),
+ PINMUX_GPIO(GPIO_FN_VIO_D6, VIO_D6_MARK),
+ PINMUX_GPIO(GPIO_FN_VIO_D5, VIO_D5_MARK),
+ PINMUX_GPIO(GPIO_FN_VIO_D4, VIO_D4_MARK),
+ PINMUX_GPIO(GPIO_FN_VIO_D3, VIO_D3_MARK),
+ PINMUX_GPIO(GPIO_FN_VIO_D2, VIO_D2_MARK),
+ PINMUX_GPIO(GPIO_FN_VIO_D1, VIO_D1_MARK),
+ PINMUX_GPIO(GPIO_FN_VIO_D0, VIO_D0_MARK),
+ PINMUX_GPIO(GPIO_FN_VIO_CLK1, VIO_CLK1_MARK),
+ PINMUX_GPIO(GPIO_FN_VIO_VD1, VIO_VD1_MARK),
+ PINMUX_GPIO(GPIO_FN_VIO_HD1, VIO_HD1_MARK),
+ PINMUX_GPIO(GPIO_FN_VIO_FLD, VIO_FLD_MARK),
+ PINMUX_GPIO(GPIO_FN_VIO_CKO, VIO_CKO_MARK),
+ PINMUX_GPIO(GPIO_FN_VIO_VD2, VIO_VD2_MARK),
+ PINMUX_GPIO(GPIO_FN_VIO_HD2, VIO_HD2_MARK),
+ PINMUX_GPIO(GPIO_FN_VIO_CLK2, VIO_CLK2_MARK),
+
+ /* LCDC */
+ PINMUX_GPIO(GPIO_FN_LCDD23, LCDD23_MARK),
+ PINMUX_GPIO(GPIO_FN_LCDD22, LCDD22_MARK),
+ PINMUX_GPIO(GPIO_FN_LCDD21, LCDD21_MARK),
+ PINMUX_GPIO(GPIO_FN_LCDD20, LCDD20_MARK),
+ PINMUX_GPIO(GPIO_FN_LCDD19, LCDD19_MARK),
+ PINMUX_GPIO(GPIO_FN_LCDD18, LCDD18_MARK),
+ PINMUX_GPIO(GPIO_FN_LCDD17, LCDD17_MARK),
+ PINMUX_GPIO(GPIO_FN_LCDD16, LCDD16_MARK),
+ PINMUX_GPIO(GPIO_FN_LCDD15, LCDD15_MARK),
+ PINMUX_GPIO(GPIO_FN_LCDD14, LCDD14_MARK),
+ PINMUX_GPIO(GPIO_FN_LCDD13, LCDD13_MARK),
+ PINMUX_GPIO(GPIO_FN_LCDD12, LCDD12_MARK),
+ PINMUX_GPIO(GPIO_FN_LCDD11, LCDD11_MARK),
+ PINMUX_GPIO(GPIO_FN_LCDD10, LCDD10_MARK),
+ PINMUX_GPIO(GPIO_FN_LCDD9, LCDD9_MARK),
+ PINMUX_GPIO(GPIO_FN_LCDD8, LCDD8_MARK),
+ PINMUX_GPIO(GPIO_FN_LCDD7, LCDD7_MARK),
+ PINMUX_GPIO(GPIO_FN_LCDD6, LCDD6_MARK),
+ PINMUX_GPIO(GPIO_FN_LCDD5, LCDD5_MARK),
+ PINMUX_GPIO(GPIO_FN_LCDD4, LCDD4_MARK),
+ PINMUX_GPIO(GPIO_FN_LCDD3, LCDD3_MARK),
+ PINMUX_GPIO(GPIO_FN_LCDD2, LCDD2_MARK),
+ PINMUX_GPIO(GPIO_FN_LCDD1, LCDD1_MARK),
+ PINMUX_GPIO(GPIO_FN_LCDD0, LCDD0_MARK),
+ PINMUX_GPIO(GPIO_FN_LCDLCLK_PTR, LCDLCLK_PTR_MARK),
+ PINMUX_GPIO(GPIO_FN_LCDLCLK_PTW, LCDLCLK_PTW_MARK),
+ /* Main LCD */
+ PINMUX_GPIO(GPIO_FN_LCDDON, LCDDON_MARK),
+ PINMUX_GPIO(GPIO_FN_LCDVCPWC, LCDVCPWC_MARK),
+ PINMUX_GPIO(GPIO_FN_LCDVEPWC, LCDVEPWC_MARK),
+ PINMUX_GPIO(GPIO_FN_LCDVSYN, LCDVSYN_MARK),
+ /* Main LCD - RGB Mode */
+ PINMUX_GPIO(GPIO_FN_LCDDCK, LCDDCK_MARK),
+ PINMUX_GPIO(GPIO_FN_LCDHSYN, LCDHSYN_MARK),
+ PINMUX_GPIO(GPIO_FN_LCDDISP, LCDDISP_MARK),
+ /* Main LCD - SYS Mode */
+ PINMUX_GPIO(GPIO_FN_LCDRS, LCDRS_MARK),
+ PINMUX_GPIO(GPIO_FN_LCDCS, LCDCS_MARK),
+ PINMUX_GPIO(GPIO_FN_LCDWR, LCDWR_MARK),
+ PINMUX_GPIO(GPIO_FN_LCDRD, LCDRD_MARK),
+
+ /* IRQ */
+ PINMUX_GPIO(GPIO_FN_IRQ0, IRQ0_MARK),
+ PINMUX_GPIO(GPIO_FN_IRQ1, IRQ1_MARK),
+ PINMUX_GPIO(GPIO_FN_IRQ2, IRQ2_MARK),
+ PINMUX_GPIO(GPIO_FN_IRQ3, IRQ3_MARK),
+ PINMUX_GPIO(GPIO_FN_IRQ4, IRQ4_MARK),
+ PINMUX_GPIO(GPIO_FN_IRQ5, IRQ5_MARK),
+ PINMUX_GPIO(GPIO_FN_IRQ6, IRQ6_MARK),
+ PINMUX_GPIO(GPIO_FN_IRQ7, IRQ7_MARK),
+
+ /* AUD */
+ PINMUX_GPIO(GPIO_FN_AUDCK, AUDCK_MARK),
+ PINMUX_GPIO(GPIO_FN_AUDSYNC, AUDSYNC_MARK),
+ PINMUX_GPIO(GPIO_FN_AUDATA3, AUDATA3_MARK),
+ PINMUX_GPIO(GPIO_FN_AUDATA2, AUDATA2_MARK),
+ PINMUX_GPIO(GPIO_FN_AUDATA1, AUDATA1_MARK),
+ PINMUX_GPIO(GPIO_FN_AUDATA0, AUDATA0_MARK),
+
+ /* SDHI0 (PTD) */
+ PINMUX_GPIO(GPIO_FN_SDHI0CD_PTD, SDHI0CD_PTD_MARK),
+ PINMUX_GPIO(GPIO_FN_SDHI0WP_PTD, SDHI0WP_PTD_MARK),
+ PINMUX_GPIO(GPIO_FN_SDHI0D3_PTD, SDHI0D3_PTD_MARK),
+ PINMUX_GPIO(GPIO_FN_SDHI0D2_PTD, SDHI0D2_PTD_MARK),
+ PINMUX_GPIO(GPIO_FN_SDHI0D1_PTD, SDHI0D1_PTD_MARK),
+ PINMUX_GPIO(GPIO_FN_SDHI0D0_PTD, SDHI0D0_PTD_MARK),
+ PINMUX_GPIO(GPIO_FN_SDHI0CMD_PTD, SDHI0CMD_PTD_MARK),
+ PINMUX_GPIO(GPIO_FN_SDHI0CLK_PTD, SDHI0CLK_PTD_MARK),
+
+ /* SDHI0 (PTS) */
+ PINMUX_GPIO(GPIO_FN_SDHI0CD_PTS, SDHI0CD_PTS_MARK),
+ PINMUX_GPIO(GPIO_FN_SDHI0WP_PTS, SDHI0WP_PTS_MARK),
+ PINMUX_GPIO(GPIO_FN_SDHI0D3_PTS, SDHI0D3_PTS_MARK),
+ PINMUX_GPIO(GPIO_FN_SDHI0D2_PTS, SDHI0D2_PTS_MARK),
+ PINMUX_GPIO(GPIO_FN_SDHI0D1_PTS, SDHI0D1_PTS_MARK),
+ PINMUX_GPIO(GPIO_FN_SDHI0D0_PTS, SDHI0D0_PTS_MARK),
+ PINMUX_GPIO(GPIO_FN_SDHI0CMD_PTS, SDHI0CMD_PTS_MARK),
+ PINMUX_GPIO(GPIO_FN_SDHI0CLK_PTS, SDHI0CLK_PTS_MARK),
+
+ /* SDHI1 */
+ PINMUX_GPIO(GPIO_FN_SDHI1CD, SDHI1CD_MARK),
+ PINMUX_GPIO(GPIO_FN_SDHI1WP, SDHI1WP_MARK),
+ PINMUX_GPIO(GPIO_FN_SDHI1D3, SDHI1D3_MARK),
+ PINMUX_GPIO(GPIO_FN_SDHI1D2, SDHI1D2_MARK),
+ PINMUX_GPIO(GPIO_FN_SDHI1D1, SDHI1D1_MARK),
+ PINMUX_GPIO(GPIO_FN_SDHI1D0, SDHI1D0_MARK),
+ PINMUX_GPIO(GPIO_FN_SDHI1CMD, SDHI1CMD_MARK),
+ PINMUX_GPIO(GPIO_FN_SDHI1CLK, SDHI1CLK_MARK),
+
+ /* SIUA */
+ PINMUX_GPIO(GPIO_FN_SIUAFCK, SIUAFCK_MARK),
+ PINMUX_GPIO(GPIO_FN_SIUAILR, SIUAILR_MARK),
+ PINMUX_GPIO(GPIO_FN_SIUAIBT, SIUAIBT_MARK),
+ PINMUX_GPIO(GPIO_FN_SIUAISLD, SIUAISLD_MARK),
+ PINMUX_GPIO(GPIO_FN_SIUAOLR, SIUAOLR_MARK),
+ PINMUX_GPIO(GPIO_FN_SIUAOBT, SIUAOBT_MARK),
+ PINMUX_GPIO(GPIO_FN_SIUAOSLD, SIUAOSLD_MARK),
+ PINMUX_GPIO(GPIO_FN_SIUAMCK, SIUAMCK_MARK),
+ PINMUX_GPIO(GPIO_FN_SIUAISPD, SIUAISPD_MARK),
+ PINMUX_GPIO(GPIO_FN_SIUAOSPD, SIUAOSPD_MARK),
+
+ /* SIUB */
+ PINMUX_GPIO(GPIO_FN_SIUBFCK, SIUBFCK_MARK),
+ PINMUX_GPIO(GPIO_FN_SIUBILR, SIUBILR_MARK),
+ PINMUX_GPIO(GPIO_FN_SIUBIBT, SIUBIBT_MARK),
+ PINMUX_GPIO(GPIO_FN_SIUBISLD, SIUBISLD_MARK),
+ PINMUX_GPIO(GPIO_FN_SIUBOLR, SIUBOLR_MARK),
+ PINMUX_GPIO(GPIO_FN_SIUBOBT, SIUBOBT_MARK),
+ PINMUX_GPIO(GPIO_FN_SIUBOSLD, SIUBOSLD_MARK),
+ PINMUX_GPIO(GPIO_FN_SIUBMCK, SIUBMCK_MARK),
+
+ /* IRDA */
+ PINMUX_GPIO(GPIO_FN_IRDA_IN, IRDA_IN_MARK),
+ PINMUX_GPIO(GPIO_FN_IRDA_OUT, IRDA_OUT_MARK),
+
+ /* VOU */
+ PINMUX_GPIO(GPIO_FN_DV_CLKI, DV_CLKI_MARK),
+ PINMUX_GPIO(GPIO_FN_DV_CLK, DV_CLK_MARK),
+ PINMUX_GPIO(GPIO_FN_DV_HSYNC, DV_HSYNC_MARK),
+ PINMUX_GPIO(GPIO_FN_DV_VSYNC, DV_VSYNC_MARK),
+ PINMUX_GPIO(GPIO_FN_DV_D15, DV_D15_MARK),
+ PINMUX_GPIO(GPIO_FN_DV_D14, DV_D14_MARK),
+ PINMUX_GPIO(GPIO_FN_DV_D13, DV_D13_MARK),
+ PINMUX_GPIO(GPIO_FN_DV_D12, DV_D12_MARK),
+ PINMUX_GPIO(GPIO_FN_DV_D11, DV_D11_MARK),
+ PINMUX_GPIO(GPIO_FN_DV_D10, DV_D10_MARK),
+ PINMUX_GPIO(GPIO_FN_DV_D9, DV_D9_MARK),
+ PINMUX_GPIO(GPIO_FN_DV_D8, DV_D8_MARK),
+ PINMUX_GPIO(GPIO_FN_DV_D7, DV_D7_MARK),
+ PINMUX_GPIO(GPIO_FN_DV_D6, DV_D6_MARK),
+ PINMUX_GPIO(GPIO_FN_DV_D5, DV_D5_MARK),
+ PINMUX_GPIO(GPIO_FN_DV_D4, DV_D4_MARK),
+ PINMUX_GPIO(GPIO_FN_DV_D3, DV_D3_MARK),
+ PINMUX_GPIO(GPIO_FN_DV_D2, DV_D2_MARK),
+ PINMUX_GPIO(GPIO_FN_DV_D1, DV_D1_MARK),
+ PINMUX_GPIO(GPIO_FN_DV_D0, DV_D0_MARK),
+
+ /* KEYSC */
+ PINMUX_GPIO(GPIO_FN_KEYIN0, KEYIN0_MARK),
+ PINMUX_GPIO(GPIO_FN_KEYIN1, KEYIN1_MARK),
+ PINMUX_GPIO(GPIO_FN_KEYIN2, KEYIN2_MARK),
+ PINMUX_GPIO(GPIO_FN_KEYIN3, KEYIN3_MARK),
+ PINMUX_GPIO(GPIO_FN_KEYIN4, KEYIN4_MARK),
+ PINMUX_GPIO(GPIO_FN_KEYOUT0, KEYOUT0_MARK),
+ PINMUX_GPIO(GPIO_FN_KEYOUT1, KEYOUT1_MARK),
+ PINMUX_GPIO(GPIO_FN_KEYOUT2, KEYOUT2_MARK),
+ PINMUX_GPIO(GPIO_FN_KEYOUT3, KEYOUT3_MARK),
+ PINMUX_GPIO(GPIO_FN_KEYOUT4_IN6, KEYOUT4_IN6_MARK),
+ PINMUX_GPIO(GPIO_FN_KEYOUT5_IN5, KEYOUT5_IN5_MARK),
+
+ /* MSIOF0 (PTF) */
+ PINMUX_GPIO(GPIO_FN_MSIOF0_PTF_TXD, MSIOF0_PTF_TXD_MARK),
+ PINMUX_GPIO(GPIO_FN_MSIOF0_PTF_RXD, MSIOF0_PTF_RXD_MARK),
+ PINMUX_GPIO(GPIO_FN_MSIOF0_PTF_MCK, MSIOF0_PTF_MCK_MARK),
+ PINMUX_GPIO(GPIO_FN_MSIOF0_PTF_TSYNC, MSIOF0_PTF_TSYNC_MARK),
+ PINMUX_GPIO(GPIO_FN_MSIOF0_PTF_TSCK, MSIOF0_PTF_TSCK_MARK),
+ PINMUX_GPIO(GPIO_FN_MSIOF0_PTF_RSYNC, MSIOF0_PTF_RSYNC_MARK),
+ PINMUX_GPIO(GPIO_FN_MSIOF0_PTF_RSCK, MSIOF0_PTF_RSCK_MARK),
+ PINMUX_GPIO(GPIO_FN_MSIOF0_PTF_SS1, MSIOF0_PTF_SS1_MARK),
+ PINMUX_GPIO(GPIO_FN_MSIOF0_PTF_SS2, MSIOF0_PTF_SS2_MARK),
+
+ /* MSIOF0 (PTT+PTX) */
+ PINMUX_GPIO(GPIO_FN_MSIOF0_PTT_TXD, MSIOF0_PTT_TXD_MARK),
+ PINMUX_GPIO(GPIO_FN_MSIOF0_PTT_RXD, MSIOF0_PTT_RXD_MARK),
+ PINMUX_GPIO(GPIO_FN_MSIOF0_PTX_MCK, MSIOF0_PTX_MCK_MARK),
+ PINMUX_GPIO(GPIO_FN_MSIOF0_PTT_TSYNC, MSIOF0_PTT_TSYNC_MARK),
+ PINMUX_GPIO(GPIO_FN_MSIOF0_PTT_TSCK, MSIOF0_PTT_TSCK_MARK),
+ PINMUX_GPIO(GPIO_FN_MSIOF0_PTT_RSYNC, MSIOF0_PTT_RSYNC_MARK),
+ PINMUX_GPIO(GPIO_FN_MSIOF0_PTT_RSCK, MSIOF0_PTT_RSCK_MARK),
+ PINMUX_GPIO(GPIO_FN_MSIOF0_PTT_SS1, MSIOF0_PTT_SS1_MARK),
+ PINMUX_GPIO(GPIO_FN_MSIOF0_PTT_SS2, MSIOF0_PTT_SS2_MARK),
+
+ /* MSIOF1 */
+ PINMUX_GPIO(GPIO_FN_MSIOF1_TXD, MSIOF1_TXD_MARK),
+ PINMUX_GPIO(GPIO_FN_MSIOF1_RXD, MSIOF1_RXD_MARK),
+ PINMUX_GPIO(GPIO_FN_MSIOF1_MCK, MSIOF1_MCK_MARK),
+ PINMUX_GPIO(GPIO_FN_MSIOF1_TSYNC, MSIOF1_TSYNC_MARK),
+ PINMUX_GPIO(GPIO_FN_MSIOF1_TSCK, MSIOF1_TSCK_MARK),
+ PINMUX_GPIO(GPIO_FN_MSIOF1_RSYNC, MSIOF1_RSYNC_MARK),
+ PINMUX_GPIO(GPIO_FN_MSIOF1_RSCK, MSIOF1_RSCK_MARK),
+ PINMUX_GPIO(GPIO_FN_MSIOF1_SS1, MSIOF1_SS1_MARK),
+ PINMUX_GPIO(GPIO_FN_MSIOF1_SS2, MSIOF1_SS2_MARK),
+
+ /* TSIF */
+ PINMUX_GPIO(GPIO_FN_TS0_SDAT, TS0_SDAT_MARK),
+ PINMUX_GPIO(GPIO_FN_TS0_SCK, TS0_SCK_MARK),
+ PINMUX_GPIO(GPIO_FN_TS0_SDEN, TS0_SDEN_MARK),
+ PINMUX_GPIO(GPIO_FN_TS0_SPSYNC, TS0_SPSYNC_MARK),
+
+ /* FLCTL */
+ PINMUX_GPIO(GPIO_FN_FCE, FCE_MARK),
+ PINMUX_GPIO(GPIO_FN_NAF7, NAF7_MARK),
+ PINMUX_GPIO(GPIO_FN_NAF6, NAF6_MARK),
+ PINMUX_GPIO(GPIO_FN_NAF5, NAF5_MARK),
+ PINMUX_GPIO(GPIO_FN_NAF4, NAF4_MARK),
+ PINMUX_GPIO(GPIO_FN_NAF3, NAF3_MARK),
+ PINMUX_GPIO(GPIO_FN_NAF2, NAF2_MARK),
+ PINMUX_GPIO(GPIO_FN_NAF1, NAF1_MARK),
+ PINMUX_GPIO(GPIO_FN_NAF0, NAF0_MARK),
+ PINMUX_GPIO(GPIO_FN_FCDE, FCDE_MARK),
+ PINMUX_GPIO(GPIO_FN_FOE, FOE_MARK),
+ PINMUX_GPIO(GPIO_FN_FSC, FSC_MARK),
+ PINMUX_GPIO(GPIO_FN_FWE, FWE_MARK),
+ PINMUX_GPIO(GPIO_FN_FRB, FRB_MARK),
+
+ /* DMAC */
+ PINMUX_GPIO(GPIO_FN_DACK1, DACK1_MARK),
+ PINMUX_GPIO(GPIO_FN_DREQ1, DREQ1_MARK),
+ PINMUX_GPIO(GPIO_FN_DACK0, DACK0_MARK),
+ PINMUX_GPIO(GPIO_FN_DREQ0, DREQ0_MARK),
+
+ /* ADC */
+ PINMUX_GPIO(GPIO_FN_AN3, AN3_MARK),
+ PINMUX_GPIO(GPIO_FN_AN2, AN2_MARK),
+ PINMUX_GPIO(GPIO_FN_AN1, AN1_MARK),
+ PINMUX_GPIO(GPIO_FN_AN0, AN0_MARK),
+ PINMUX_GPIO(GPIO_FN_ADTRG, ADTRG_MARK),
+
+ /* CPG */
+ PINMUX_GPIO(GPIO_FN_STATUS0, STATUS0_MARK),
+ PINMUX_GPIO(GPIO_FN_PDSTATUS, PDSTATUS_MARK),
+
+ /* TPU */
+ PINMUX_GPIO(GPIO_FN_TPUTO0, TPUTO0_MARK),
+ PINMUX_GPIO(GPIO_FN_TPUTO1, TPUTO1_MARK),
+ PINMUX_GPIO(GPIO_FN_TPUTO2, TPUTO2_MARK),
+ PINMUX_GPIO(GPIO_FN_TPUTO3, TPUTO3_MARK),
+
+ /* BSC */
+ PINMUX_GPIO(GPIO_FN_D31, D31_MARK),
+ PINMUX_GPIO(GPIO_FN_D30, D30_MARK),
+ PINMUX_GPIO(GPIO_FN_D29, D29_MARK),
+ PINMUX_GPIO(GPIO_FN_D28, D28_MARK),
+ PINMUX_GPIO(GPIO_FN_D27, D27_MARK),
+ PINMUX_GPIO(GPIO_FN_D26, D26_MARK),
+ PINMUX_GPIO(GPIO_FN_D25, D25_MARK),
+ PINMUX_GPIO(GPIO_FN_D24, D24_MARK),
+ PINMUX_GPIO(GPIO_FN_D23, D23_MARK),
+ PINMUX_GPIO(GPIO_FN_D22, D22_MARK),
+ PINMUX_GPIO(GPIO_FN_D21, D21_MARK),
+ PINMUX_GPIO(GPIO_FN_D20, D20_MARK),
+ PINMUX_GPIO(GPIO_FN_D19, D19_MARK),
+ PINMUX_GPIO(GPIO_FN_D18, D18_MARK),
+ PINMUX_GPIO(GPIO_FN_D17, D17_MARK),
+ PINMUX_GPIO(GPIO_FN_D16, D16_MARK),
+ PINMUX_GPIO(GPIO_FN_IOIS16, IOIS16_MARK),
+ PINMUX_GPIO(GPIO_FN_WAIT, WAIT_MARK),
+ PINMUX_GPIO(GPIO_FN_BS, BS_MARK),
+ PINMUX_GPIO(GPIO_FN_A25, A25_MARK),
+ PINMUX_GPIO(GPIO_FN_A24, A24_MARK),
+ PINMUX_GPIO(GPIO_FN_A23, A23_MARK),
+ PINMUX_GPIO(GPIO_FN_A22, A22_MARK),
+ PINMUX_GPIO(GPIO_FN_CS6B_CE1B, CS6B_CE1B_MARK),
+ PINMUX_GPIO(GPIO_FN_CS6A_CE2B, CS6A_CE2B_MARK),
+ PINMUX_GPIO(GPIO_FN_CS5B_CE1A, CS5B_CE1A_MARK),
+ PINMUX_GPIO(GPIO_FN_CS5A_CE2A, CS5A_CE2A_MARK),
+ PINMUX_GPIO(GPIO_FN_WE3_ICIOWR, WE3_ICIOWR_MARK),
+ PINMUX_GPIO(GPIO_FN_WE2_ICIORD, WE2_ICIORD_MARK),
+
+ /* ATAPI */
+ PINMUX_GPIO(GPIO_FN_IDED15, IDED15_MARK),
+ PINMUX_GPIO(GPIO_FN_IDED14, IDED14_MARK),
+ PINMUX_GPIO(GPIO_FN_IDED13, IDED13_MARK),
+ PINMUX_GPIO(GPIO_FN_IDED12, IDED12_MARK),
+ PINMUX_GPIO(GPIO_FN_IDED11, IDED11_MARK),
+ PINMUX_GPIO(GPIO_FN_IDED10, IDED10_MARK),
+ PINMUX_GPIO(GPIO_FN_IDED9, IDED9_MARK),
+ PINMUX_GPIO(GPIO_FN_IDED8, IDED8_MARK),
+ PINMUX_GPIO(GPIO_FN_IDED7, IDED7_MARK),
+ PINMUX_GPIO(GPIO_FN_IDED6, IDED6_MARK),
+ PINMUX_GPIO(GPIO_FN_IDED5, IDED5_MARK),
+ PINMUX_GPIO(GPIO_FN_IDED4, IDED4_MARK),
+ PINMUX_GPIO(GPIO_FN_IDED3, IDED3_MARK),
+ PINMUX_GPIO(GPIO_FN_IDED2, IDED2_MARK),
+ PINMUX_GPIO(GPIO_FN_IDED1, IDED1_MARK),
+ PINMUX_GPIO(GPIO_FN_IDED0, IDED0_MARK),
+ PINMUX_GPIO(GPIO_FN_DIRECTION, DIRECTION_MARK),
+ PINMUX_GPIO(GPIO_FN_EXBUF_ENB, EXBUF_ENB_MARK),
+ PINMUX_GPIO(GPIO_FN_IDERST, IDERST_MARK),
+ PINMUX_GPIO(GPIO_FN_IODACK, IODACK_MARK),
+ PINMUX_GPIO(GPIO_FN_IODREQ, IODREQ_MARK),
+ PINMUX_GPIO(GPIO_FN_IDEIORDY, IDEIORDY_MARK),
+ PINMUX_GPIO(GPIO_FN_IDEINT, IDEINT_MARK),
+ PINMUX_GPIO(GPIO_FN_IDEIOWR, IDEIOWR_MARK),
+ PINMUX_GPIO(GPIO_FN_IDEIORD, IDEIORD_MARK),
+ PINMUX_GPIO(GPIO_FN_IDECS1, IDECS1_MARK),
+ PINMUX_GPIO(GPIO_FN_IDECS0, IDECS0_MARK),
+ PINMUX_GPIO(GPIO_FN_IDEA2, IDEA2_MARK),
+ PINMUX_GPIO(GPIO_FN_IDEA1, IDEA1_MARK),
+ PINMUX_GPIO(GPIO_FN_IDEA0, IDEA0_MARK),
+};
+
+static struct pinmux_cfg_reg pinmux_config_regs[] = {
+ { PINMUX_CFG_REG("PACR", 0xa4050100, 16, 2) {
+ PTA7_FN, PTA7_OUT, 0, PTA7_IN,
+ PTA6_FN, PTA6_OUT, 0, PTA6_IN,
+ PTA5_FN, PTA5_OUT, 0, PTA5_IN,
+ PTA4_FN, PTA4_OUT, PTA4_IN_PU, PTA4_IN,
+ PTA3_FN, PTA3_OUT, PTA3_IN_PU, PTA3_IN,
+ PTA2_FN, PTA2_OUT, PTA2_IN_PU, PTA2_IN,
+ PTA1_FN, PTA1_OUT, PTA1_IN_PU, PTA1_IN,
+ PTA0_FN, PTA0_OUT, PTA0_IN_PU, PTA0_IN }
+ },
+ { PINMUX_CFG_REG("PBCR", 0xa4050102, 16, 2) {
+ PTB7_FN, PTB7_OUT, 0, PTB7_IN,
+ PTB6_FN, PTB6_OUT, 0, PTB6_IN,
+ PTB5_FN, PTB5_OUT, 0, PTB5_IN,
+ PTB4_FN, PTB4_OUT, 0, PTB4_IN,
+ PTB3_FN, PTB3_OUT, 0, PTB3_IN,
+ PTB2_FN, PTB2_OUT, PTB2_IN_PU, PTB2_IN,
+ PTB1_FN, PTB1_OUT, PTB1_IN_PU, PTB1_IN,
+ PTB0_FN, PTB0_OUT, 0, PTB0_IN }
+ },
+ { PINMUX_CFG_REG("PCCR", 0xa4050104, 16, 2) {
+ PTC7_FN, PTC7_OUT, 0, PTC7_IN,
+ PTC6_FN, PTC6_OUT, 0, PTC6_IN,
+ PTC5_FN, PTC5_OUT, 0, PTC5_IN,
+ PTC4_FN, PTC4_OUT, 0, PTC4_IN,
+ PTC3_FN, PTC3_OUT, 0, PTC3_IN,
+ PTC2_FN, PTC2_OUT, 0, PTC2_IN,
+ PTC1_FN, PTC1_OUT, 0, PTC1_IN,
+ PTC0_FN, PTC0_OUT, 0, PTC0_IN }
+ },
+ { PINMUX_CFG_REG("PDCR", 0xa4050106, 16, 2) {
+ PTD7_FN, PTD7_OUT, 0, PTD7_IN,
+ PTD6_FN, PTD6_OUT, 0, PTD6_IN,
+ PTD5_FN, PTD5_OUT, 0, PTD5_IN,
+ PTD4_FN, PTD4_OUT, 0, PTD4_IN,
+ PTD3_FN, PTD3_OUT, 0, PTD3_IN,
+ PTD2_FN, PTD2_OUT, 0, PTD2_IN,
+ PTD1_FN, PTD1_OUT, 0, PTD1_IN,
+ PTD0_FN, PTD0_OUT, 0, PTD0_IN }
+ },
+ { PINMUX_CFG_REG("PECR", 0xa4050108, 16, 2) {
+ 0, 0, 0, 0,
+ 0, 0, 0, 0,
+ PTE5_FN, PTE5_OUT, 0, PTE5_IN,
+ PTE4_FN, PTE4_OUT, 0, PTE4_IN,
+ PTE3_FN, PTE3_OUT, 0, PTE3_IN,
+ PTE2_FN, PTE2_OUT, 0, PTE2_IN,
+ PTE1_FN, PTE1_OUT, 0, PTE1_IN,
+ PTE0_FN, PTE0_OUT, 0, PTE0_IN }
+ },
+ { PINMUX_CFG_REG("PFCR", 0xa405010a, 16, 2) {
+ PTF7_FN, PTF7_OUT, 0, PTF7_IN,
+ PTF6_FN, PTF6_OUT, 0, PTF6_IN,
+ PTF5_FN, PTF5_OUT, 0, PTF5_IN,
+ PTF4_FN, PTF4_OUT, 0, PTF4_IN,
+ PTF3_FN, PTF3_OUT, 0, PTF3_IN,
+ PTF2_FN, PTF2_OUT, 0, PTF2_IN,
+ PTF1_FN, PTF1_OUT, 0, PTF1_IN,
+ PTF0_FN, PTF0_OUT, 0, PTF0_IN }
+ },
+ { PINMUX_CFG_REG("PGCR", 0xa405010c, 16, 2) {
+ 0, 0, 0, 0,
+ 0, 0, 0, 0,
+ PTG5_FN, PTG5_OUT, 0, 0,
+ PTG4_FN, PTG4_OUT, 0, 0,
+ PTG3_FN, PTG3_OUT, 0, 0,
+ PTG2_FN, PTG2_OUT, 0, 0,
+ PTG1_FN, PTG1_OUT, 0, 0,
+ PTG0_FN, PTG0_OUT, 0, 0 }
+ },
+ { PINMUX_CFG_REG("PHCR", 0xa405010e, 16, 2) {
+ PTH7_FN, PTH7_OUT, 0, PTH7_IN,
+ PTH6_FN, PTH6_OUT, 0, PTH6_IN,
+ PTH5_FN, PTH5_OUT, 0, PTH5_IN,
+ PTH4_FN, PTH4_OUT, 0, PTH4_IN,
+ PTH3_FN, PTH3_OUT, 0, PTH3_IN,
+ PTH2_FN, PTH2_OUT, 0, PTH2_IN,
+ PTH1_FN, PTH1_OUT, 0, PTH1_IN,
+ PTH0_FN, PTH0_OUT, 0, PTH0_IN }
+ },
+ { PINMUX_CFG_REG("PJCR", 0xa4050110, 16, 2) {
+ PTJ7_FN, PTJ7_OUT, 0, 0,
+ 0, 0, 0, 0,
+ PTJ5_FN, PTJ5_OUT, 0, 0,
+ 0, 0, 0, 0,
+ PTJ3_FN, PTJ3_OUT, 0, PTJ3_IN,
+ PTJ2_FN, PTJ2_OUT, 0, PTJ2_IN,
+ PTJ1_FN, PTJ1_OUT, 0, PTJ1_IN,
+ PTJ0_FN, PTJ0_OUT, 0, PTJ0_IN }
+ },
+ { PINMUX_CFG_REG("PKCR", 0xa4050112, 16, 2) {
+ PTK7_FN, PTK7_OUT, 0, PTK7_IN,
+ PTK6_FN, PTK6_OUT, 0, PTK6_IN,
+ PTK5_FN, PTK5_OUT, 0, PTK5_IN,
+ PTK4_FN, PTK4_OUT, 0, PTK4_IN,
+ PTK3_FN, PTK3_OUT, 0, PTK3_IN,
+ PTK2_FN, PTK2_OUT, 0, PTK2_IN,
+ PTK1_FN, PTK1_OUT, 0, PTK1_IN,
+ PTK0_FN, PTK0_OUT, 0, PTK0_IN }
+ },
+ { PINMUX_CFG_REG("PLCR", 0xa4050114, 16, 2) {
+ PTL7_FN, PTL7_OUT, 0, PTL7_IN,
+ PTL6_FN, PTL6_OUT, 0, PTL6_IN,
+ PTL5_FN, PTL5_OUT, 0, PTL5_IN,
+ PTL4_FN, PTL4_OUT, 0, PTL4_IN,
+ PTL3_FN, PTL3_OUT, 0, PTL3_IN,
+ PTL2_FN, PTL2_OUT, 0, PTL2_IN,
+ PTL1_FN, PTL1_OUT, 0, PTL1_IN,
+ PTL0_FN, PTL0_OUT, 0, PTL0_IN }
+ },
+ { PINMUX_CFG_REG("PMCR", 0xa4050116, 16, 2) {
+ PTM7_FN, PTM7_OUT, 0, PTM7_IN,
+ PTM6_FN, PTM6_OUT, 0, PTM6_IN,
+ PTM5_FN, PTM5_OUT, 0, PTM5_IN,
+ PTM4_FN, PTM4_OUT, 0, PTM4_IN,
+ PTM3_FN, PTM3_OUT, 0, PTM3_IN,
+ PTM2_FN, PTM2_OUT, 0, PTM2_IN,
+ PTM1_FN, PTM1_OUT, 0, PTM1_IN,
+ PTM0_FN, PTM0_OUT, 0, PTM0_IN }
+ },
+ { PINMUX_CFG_REG("PNCR", 0xa4050118, 16, 2) {
+ PTN7_FN, PTN7_OUT, 0, PTN7_IN,
+ PTN6_FN, PTN6_OUT, 0, PTN6_IN,
+ PTN5_FN, PTN5_OUT, 0, PTN5_IN,
+ PTN4_FN, PTN4_OUT, 0, PTN4_IN,
+ PTN3_FN, PTN3_OUT, 0, PTN3_IN,
+ PTN2_FN, PTN2_OUT, 0, PTN2_IN,
+ PTN1_FN, PTN1_OUT, 0, PTN1_IN,
+ PTN0_FN, PTN0_OUT, 0, PTN0_IN }
+ },
+ { PINMUX_CFG_REG("PQCR", 0xa405011a, 16, 2) {
+ 0, 0, 0, 0,
+ 0, 0, 0, 0,
+ 0, 0, 0, 0,
+ 0, 0, 0, 0,
+ PTQ3_FN, 0, 0, PTQ3_IN,
+ PTQ2_FN, 0, 0, PTQ2_IN,
+ PTQ1_FN, 0, 0, PTQ1_IN,
+ PTQ0_FN, 0, 0, PTQ0_IN }
+ },
+ { PINMUX_CFG_REG("PRCR", 0xa405011c, 16, 2) {
+ PTR7_FN, PTR7_OUT, 0, PTR7_IN,
+ PTR6_FN, PTR6_OUT, 0, PTR6_IN,
+ PTR5_FN, PTR5_OUT, 0, PTR5_IN,
+ PTR4_FN, PTR4_OUT, 0, PTR4_IN,
+ PTR3_FN, 0, 0, PTR3_IN,
+ PTR2_FN, 0, PTR2_IN_PU, PTR2_IN,
+ PTR1_FN, PTR1_OUT, 0, PTR1_IN,
+ PTR0_FN, PTR0_OUT, 0, PTR0_IN }
+ },
+ { PINMUX_CFG_REG("PSCR", 0xa405011e, 16, 2) {
+ PTS7_FN, PTS7_OUT, 0, PTS7_IN,
+ PTS6_FN, PTS6_OUT, 0, PTS6_IN,
+ PTS5_FN, PTS5_OUT, 0, PTS5_IN,
+ PTS4_FN, PTS4_OUT, 0, PTS4_IN,
+ PTS3_FN, PTS3_OUT, 0, PTS3_IN,
+ PTS2_FN, PTS2_OUT, 0, PTS2_IN,
+ PTS1_FN, PTS1_OUT, 0, PTS1_IN,
+ PTS0_FN, PTS0_OUT, 0, PTS0_IN }
+ },
+ { PINMUX_CFG_REG("PTCR", 0xa4050140, 16, 2) {
+ 0, 0, 0, 0,
+ 0, 0, 0, 0,
+ PTT5_FN, PTT5_OUT, 0, PTT5_IN,
+ PTT4_FN, PTT4_OUT, 0, PTT4_IN,
+ PTT3_FN, PTT3_OUT, 0, PTT3_IN,
+ PTT2_FN, PTT2_OUT, 0, PTT2_IN,
+ PTT1_FN, PTT1_OUT, 0, PTT1_IN,
+ PTT0_FN, PTT0_OUT, 0, PTT0_IN }
+ },
+ { PINMUX_CFG_REG("PUCR", 0xa4050142, 16, 2) {
+ 0, 0, 0, 0,
+ 0, 0, 0, 0,
+ PTU5_FN, PTU5_OUT, 0, PTU5_IN,
+ PTU4_FN, PTU4_OUT, 0, PTU4_IN,
+ PTU3_FN, PTU3_OUT, 0, PTU3_IN,
+ PTU2_FN, PTU2_OUT, 0, PTU2_IN,
+ PTU1_FN, PTU1_OUT, 0, PTU1_IN,
+ PTU0_FN, PTU0_OUT, 0, PTU0_IN }
+ },
+ { PINMUX_CFG_REG("PVCR", 0xa4050144, 16, 2) {
+ PTV7_FN, PTV7_OUT, 0, PTV7_IN,
+ PTV6_FN, PTV6_OUT, 0, PTV6_IN,
+ PTV5_FN, PTV5_OUT, 0, PTV5_IN,
+ PTV4_FN, PTV4_OUT, 0, PTV4_IN,
+ PTV3_FN, PTV3_OUT, 0, PTV3_IN,
+ PTV2_FN, PTV2_OUT, 0, PTV2_IN,
+ PTV1_FN, PTV1_OUT, 0, PTV1_IN,
+ PTV0_FN, PTV0_OUT, 0, PTV0_IN }
+ },
+ { PINMUX_CFG_REG("PWCR", 0xa4050146, 16, 2) {
+ PTW7_FN, PTW7_OUT, 0, PTW7_IN,
+ PTW6_FN, PTW6_OUT, 0, PTW6_IN,
+ PTW5_FN, PTW5_OUT, 0, PTW5_IN,
+ PTW4_FN, PTW4_OUT, 0, PTW4_IN,
+ PTW3_FN, PTW3_OUT, 0, PTW3_IN,
+ PTW2_FN, PTW2_OUT, 0, PTW2_IN,
+ PTW1_FN, PTW1_OUT, 0, PTW1_IN,
+ PTW0_FN, PTW0_OUT, 0, PTW0_IN }
+ },
+ { PINMUX_CFG_REG("PXCR", 0xa4050148, 16, 2) {
+ PTX7_FN, PTX7_OUT, 0, PTX7_IN,
+ PTX6_FN, PTX6_OUT, 0, PTX6_IN,
+ PTX5_FN, PTX5_OUT, 0, PTX5_IN,
+ PTX4_FN, PTX4_OUT, 0, PTX4_IN,
+ PTX3_FN, PTX3_OUT, 0, PTX3_IN,
+ PTX2_FN, PTX2_OUT, 0, PTX2_IN,
+ PTX1_FN, PTX1_OUT, 0, PTX1_IN,
+ PTX0_FN, PTX0_OUT, 0, PTX0_IN }
+ },
+ { PINMUX_CFG_REG("PYCR", 0xa405014a, 16, 2) {
+ PTY7_FN, PTY7_OUT, 0, PTY7_IN,
+ PTY6_FN, PTY6_OUT, 0, PTY6_IN,
+ PTY5_FN, PTY5_OUT, 0, PTY5_IN,
+ PTY4_FN, PTY4_OUT, 0, PTY4_IN,
+ PTY3_FN, PTY3_OUT, 0, PTY3_IN,
+ PTY2_FN, PTY2_OUT, 0, PTY2_IN,
+ PTY1_FN, PTY1_OUT, 0, PTY1_IN,
+ PTY0_FN, PTY0_OUT, 0, PTY0_IN }
+ },
+ { PINMUX_CFG_REG("PZCR", 0xa405014c, 16, 2) {
+ PTZ7_FN, PTZ7_OUT, 0, PTZ7_IN,
+ PTZ6_FN, PTZ6_OUT, 0, PTZ6_IN,
+ PTZ5_FN, PTZ5_OUT, 0, PTZ5_IN,
+ PTZ4_FN, PTZ4_OUT, 0, PTZ4_IN,
+ PTZ3_FN, PTZ3_OUT, 0, PTZ3_IN,
+ PTZ2_FN, PTZ2_OUT, 0, PTZ2_IN,
+ PTZ1_FN, PTZ1_OUT, 0, PTZ1_IN,
+ PTZ0_FN, PTZ0_OUT, 0, PTZ0_IN }
+ },
+ { PINMUX_CFG_REG("PSELA", 0xa405014e, 16, 2) {
+ PSA15_PSA14_FN1, PSA15_PSA14_FN2, 0, 0,
+ PSA13_PSA12_FN1, PSA13_PSA12_FN2, 0, 0,
+ PSA11_PSA10_FN1, PSA11_PSA10_FN2, 0, 0,
+ 0, 0, 0, 0,
+ 0, 0, 0, 0,
+ PSA5_PSA4_FN1, PSA5_PSA4_FN2, PSA5_PSA4_FN3, 0,
+ PSA3_PSA2_FN1, PSA3_PSA2_FN2, 0, 0,
+ 0, 0, 0, 0 }
+ },
+ { PINMUX_CFG_REG("PSELB", 0xa4050150, 16, 2) {
+ PSB15_PSB14_FN1, PSB15_PSB14_FN2, 0, 0,
+ PSB13_PSB12_LCDC_RGB, PSB13_PSB12_LCDC_SYS, 0, 0,
+ 0, 0, 0, 0,
+ PSB9_PSB8_FN1, PSB9_PSB8_FN2, PSB9_PSB8_FN3, 0,
+ PSB7_PSB6_FN1, PSB7_PSB6_FN2, 0, 0,
+ PSB5_PSB4_FN1, PSB5_PSB4_FN2, 0, 0,
+ PSB3_PSB2_FN1, PSB3_PSB2_FN2, 0, 0,
+ 0, 0, 0, 0 }
+ },
+ { PINMUX_CFG_REG("PSELC", 0xa4050152, 16, 2) {
+ PSC15_PSC14_FN1, PSC15_PSC14_FN2, 0, 0,
+ PSC13_PSC12_FN1, PSC13_PSC12_FN2, 0, 0,
+ PSC11_PSC10_FN1, PSC11_PSC10_FN2, PSC11_PSC10_FN3, 0,
+ PSC9_PSC8_FN1, PSC9_PSC8_FN2, 0, 0,
+ PSC7_PSC6_FN1, PSC7_PSC6_FN2, PSC7_PSC6_FN3, 0,
+ 0, 0, 0, 0,
+ 0, 0, 0, 0,
+ 0, 0, 0, 0 }
+ },
+ { PINMUX_CFG_REG("PSELD", 0xa4050154, 16, 2) {
+ PSD15_PSD14_FN1, PSD15_PSD14_FN2, 0, 0,
+ PSD13_PSD12_FN1, PSD13_PSD12_FN2, 0, 0,
+ PSD11_PSD10_FN1, PSD11_PSD10_FN2, PSD11_PSD10_FN3, 0,
+ PSD9_PSD8_FN1, PSD9_PSD8_FN2, 0, 0,
+ PSD7_PSD6_FN1, PSD7_PSD6_FN2, 0, 0,
+ PSD5_PSD4_FN1, PSD5_PSD4_FN2, 0, 0,
+ PSD3_PSD2_FN1, PSD3_PSD2_FN2, 0, 0,
+ PSD1_PSD0_FN1, PSD1_PSD0_FN2, 0, 0 }
+ },
+ {}
+};
+
+static struct pinmux_data_reg pinmux_data_regs[] = {
+ { PINMUX_DATA_REG("PADR", 0xa4050120, 8) {
+ PTA7_DATA, PTA6_DATA, PTA5_DATA, PTA4_DATA,
+ PTA3_DATA, PTA2_DATA, PTA1_DATA, PTA0_DATA }
+ },
+ { PINMUX_DATA_REG("PBDR", 0xa4050122, 8) {
+ PTB7_DATA, PTB6_DATA, PTB5_DATA, PTB4_DATA,
+ PTB3_DATA, PTB2_DATA, PTB1_DATA, PTB0_DATA }
+ },
+ { PINMUX_DATA_REG("PCDR", 0xa4050124, 8) {
+ PTC7_DATA, PTC6_DATA, PTC5_DATA, PTC4_DATA,
+ PTC3_DATA, PTC2_DATA, PTC1_DATA, PTC0_DATA }
+ },
+ { PINMUX_DATA_REG("PDDR", 0xa4050126, 8) {
+ PTD7_DATA, PTD6_DATA, PTD5_DATA, PTD4_DATA,
+ PTD3_DATA, PTD2_DATA, PTD1_DATA, PTD0_DATA }
+ },
+ { PINMUX_DATA_REG("PEDR", 0xa4050128, 8) {
+ 0, 0, PTE5_DATA, PTE4_DATA,
+ PTE3_DATA, PTE2_DATA, PTE1_DATA, PTE0_DATA }
+ },
+ { PINMUX_DATA_REG("PFDR", 0xa405012a, 8) {
+ PTF7_DATA, PTF6_DATA, PTF5_DATA, PTF4_DATA,
+ PTF3_DATA, PTF2_DATA, PTF1_DATA, PTF0_DATA }
+ },
+ { PINMUX_DATA_REG("PGDR", 0xa405012c, 8) {
+ 0, 0, PTG5_DATA, PTG4_DATA,
+ PTG3_DATA, PTG2_DATA, PTG1_DATA, PTG0_DATA }
+ },
+ { PINMUX_DATA_REG("PHDR", 0xa405012e, 8) {
+ PTH7_DATA, PTH6_DATA, PTH5_DATA, PTH4_DATA,
+ PTH3_DATA, PTH2_DATA, PTH1_DATA, PTH0_DATA }
+ },
+ { PINMUX_DATA_REG("PJDR", 0xa4050130, 8) {
+ PTJ7_DATA, 0, PTJ5_DATA, 0,
+ PTJ3_DATA, PTJ2_DATA, PTJ1_DATA, PTJ0_DATA }
+ },
+ { PINMUX_DATA_REG("PKDR", 0xa4050132, 8) {
+ PTK7_DATA, PTK6_DATA, PTK5_DATA, PTK4_DATA,
+ PTK3_DATA, PTK2_DATA, PTK1_DATA, PTK0_DATA }
+ },
+ { PINMUX_DATA_REG("PLDR", 0xa4050134, 8) {
+ PTL7_DATA, PTL6_DATA, PTL5_DATA, PTL4_DATA,
+ PTL3_DATA, PTL2_DATA, PTL1_DATA, PTL0_DATA }
+ },
+ { PINMUX_DATA_REG("PMDR", 0xa4050136, 8) {
+ PTM7_DATA, PTM6_DATA, PTM5_DATA, PTM4_DATA,
+ PTM3_DATA, PTM2_DATA, PTM1_DATA, PTM0_DATA }
+ },
+ { PINMUX_DATA_REG("PNDR", 0xa4050138, 8) {
+ PTN7_DATA, PTN6_DATA, PTN5_DATA, PTN4_DATA,
+ PTN3_DATA, PTN2_DATA, PTN1_DATA, PTN0_DATA }
+ },
+ { PINMUX_DATA_REG("PQDR", 0xa405013a, 8) {
+ 0, 0, 0, 0,
+ PTQ3_DATA, PTQ2_DATA, PTQ1_DATA, PTQ0_DATA }
+ },
+ { PINMUX_DATA_REG("PRDR", 0xa405013c, 8) {
+ PTR7_DATA, PTR6_DATA, PTR5_DATA, PTR4_DATA,
+ PTR3_DATA, PTR2_DATA, PTR1_DATA, PTR0_DATA }
+ },
+ { PINMUX_DATA_REG("PSDR", 0xa405013e, 8) {
+ PTS7_DATA, PTS6_DATA, PTS5_DATA, PTS4_DATA,
+ PTS3_DATA, PTS2_DATA, PTS1_DATA, PTS0_DATA }
+ },
+ { PINMUX_DATA_REG("PTDR", 0xa4050160, 8) {
+ 0, 0, PTT5_DATA, PTT4_DATA,
+ PTT3_DATA, PTT2_DATA, PTT1_DATA, PTT0_DATA }
+ },
+ { PINMUX_DATA_REG("PUDR", 0xa4050162, 8) {
+ 0, 0, PTU5_DATA, PTU4_DATA,
+ PTU3_DATA, PTU2_DATA, PTU1_DATA, PTU0_DATA }
+ },
+ { PINMUX_DATA_REG("PVDR", 0xa4050164, 8) {
+ PTV7_DATA, PTV6_DATA, PTV5_DATA, PTV4_DATA,
+ PTV3_DATA, PTV2_DATA, PTV1_DATA, PTV0_DATA }
+ },
+ { PINMUX_DATA_REG("PWDR", 0xa4050166, 8) {
+ PTW7_DATA, PTW6_DATA, PTW5_DATA, PTW4_DATA,
+ PTW3_DATA, PTW2_DATA, PTW1_DATA, PTW0_DATA }
+ },
+ { PINMUX_DATA_REG("PXDR", 0xa4050168, 8) {
+ PTX7_DATA, PTX6_DATA, PTX5_DATA, PTX4_DATA,
+ PTX3_DATA, PTX2_DATA, PTX1_DATA, PTX0_DATA }
+ },
+ { PINMUX_DATA_REG("PYDR", 0xa405016a, 8) {
+ PTY7_DATA, PTY6_DATA, PTY5_DATA, PTY4_DATA,
+ PTY3_DATA, PTY2_DATA, PTY1_DATA, PTY0_DATA }
+ },
+ { PINMUX_DATA_REG("PZDR", 0xa405016c, 8) {
+ PTZ7_DATA, PTZ6_DATA, PTZ5_DATA, PTZ4_DATA,
+ PTZ3_DATA, PTZ2_DATA, PTZ1_DATA, PTZ0_DATA }
+ },
+ { },
+};
+
+struct sh_pfc_soc_info sh7723_pinmux_info = {
+ .name = "sh7723_pfc",
+ .reserved_id = PINMUX_RESERVED,
+ .data = { PINMUX_DATA_BEGIN, PINMUX_DATA_END },
+ .input = { PINMUX_INPUT_BEGIN, PINMUX_INPUT_END },
+ .input_pu = { PINMUX_INPUT_PULLUP_BEGIN, PINMUX_INPUT_PULLUP_END },
+ .output = { PINMUX_OUTPUT_BEGIN, PINMUX_OUTPUT_END },
+ .mark = { PINMUX_MARK_BEGIN, PINMUX_MARK_END },
+ .function = { PINMUX_FUNCTION_BEGIN, PINMUX_FUNCTION_END },
+
+ .first_gpio = GPIO_PTA7,
+ .last_gpio = GPIO_FN_IDEA0,
+
+ .gpios = pinmux_gpios,
+ .cfg_regs = pinmux_config_regs,
+ .data_regs = pinmux_data_regs,
+
+ .gpio_data = pinmux_data,
+ .gpio_data_size = ARRAY_SIZE(pinmux_data),
+};
diff --git a/drivers/pinctrl/sh-pfc/pfc-sh7724.c b/drivers/pinctrl/sh-pfc/pfc-sh7724.c
new file mode 100644
index 000000000000..233fbf750b39
--- /dev/null
+++ b/drivers/pinctrl/sh-pfc/pfc-sh7724.c
@@ -0,0 +1,2225 @@
+/*
+ * SH7724 Pinmux
+ *
+ * Copyright (C) 2009 Renesas Solutions Corp.
+ *
+ * Kuninori Morimoto <morimoto.kuninori@renesas.com>
+ *
+ * Based on SH7723 Pinmux
+ * Copyright (C) 2008 Magnus Damm
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ */
+
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <cpu/sh7724.h>
+
+#include "sh_pfc.h"
+
+enum {
+ PINMUX_RESERVED = 0,
+
+ PINMUX_DATA_BEGIN,
+ PTA7_DATA, PTA6_DATA, PTA5_DATA, PTA4_DATA,
+ PTA3_DATA, PTA2_DATA, PTA1_DATA, PTA0_DATA,
+ PTB7_DATA, PTB6_DATA, PTB5_DATA, PTB4_DATA,
+ PTB3_DATA, PTB2_DATA, PTB1_DATA, PTB0_DATA,
+ PTC7_DATA, PTC6_DATA, PTC5_DATA, PTC4_DATA,
+ PTC3_DATA, PTC2_DATA, PTC1_DATA, PTC0_DATA,
+ PTD7_DATA, PTD6_DATA, PTD5_DATA, PTD4_DATA,
+ PTD3_DATA, PTD2_DATA, PTD1_DATA, PTD0_DATA,
+ PTE7_DATA, PTE6_DATA, PTE5_DATA, PTE4_DATA,
+ PTE3_DATA, PTE2_DATA, PTE1_DATA, PTE0_DATA,
+ PTF7_DATA, PTF6_DATA, PTF5_DATA, PTF4_DATA,
+ PTF3_DATA, PTF2_DATA, PTF1_DATA, PTF0_DATA,
+ PTG5_DATA, PTG4_DATA,
+ PTG3_DATA, PTG2_DATA, PTG1_DATA, PTG0_DATA,
+ PTH7_DATA, PTH6_DATA, PTH5_DATA, PTH4_DATA,
+ PTH3_DATA, PTH2_DATA, PTH1_DATA, PTH0_DATA,
+ PTJ7_DATA, PTJ6_DATA, PTJ5_DATA,
+ PTJ3_DATA, PTJ2_DATA, PTJ1_DATA, PTJ0_DATA,
+ PTK7_DATA, PTK6_DATA, PTK5_DATA, PTK4_DATA,
+ PTK3_DATA, PTK2_DATA, PTK1_DATA, PTK0_DATA,
+ PTL7_DATA, PTL6_DATA, PTL5_DATA, PTL4_DATA,
+ PTL3_DATA, PTL2_DATA, PTL1_DATA, PTL0_DATA,
+ PTM7_DATA, PTM6_DATA, PTM5_DATA, PTM4_DATA,
+ PTM3_DATA, PTM2_DATA, PTM1_DATA, PTM0_DATA,
+ PTN7_DATA, PTN6_DATA, PTN5_DATA, PTN4_DATA,
+ PTN3_DATA, PTN2_DATA, PTN1_DATA, PTN0_DATA,
+ PTQ7_DATA, PTQ6_DATA, PTQ5_DATA, PTQ4_DATA,
+ PTQ3_DATA, PTQ2_DATA, PTQ1_DATA, PTQ0_DATA,
+ PTR7_DATA, PTR6_DATA, PTR5_DATA, PTR4_DATA,
+ PTR3_DATA, PTR2_DATA, PTR1_DATA, PTR0_DATA,
+ PTS6_DATA, PTS5_DATA, PTS4_DATA,
+ PTS3_DATA, PTS2_DATA, PTS1_DATA, PTS0_DATA,
+ PTT7_DATA, PTT6_DATA, PTT5_DATA, PTT4_DATA,
+ PTT3_DATA, PTT2_DATA, PTT1_DATA, PTT0_DATA,
+ PTU7_DATA, PTU6_DATA, PTU5_DATA, PTU4_DATA,
+ PTU3_DATA, PTU2_DATA, PTU1_DATA, PTU0_DATA,
+ PTV7_DATA, PTV6_DATA, PTV5_DATA, PTV4_DATA,
+ PTV3_DATA, PTV2_DATA, PTV1_DATA, PTV0_DATA,
+ PTW7_DATA, PTW6_DATA, PTW5_DATA, PTW4_DATA,
+ PTW3_DATA, PTW2_DATA, PTW1_DATA, PTW0_DATA,
+ PTX7_DATA, PTX6_DATA, PTX5_DATA, PTX4_DATA,
+ PTX3_DATA, PTX2_DATA, PTX1_DATA, PTX0_DATA,
+ PTY7_DATA, PTY6_DATA, PTY5_DATA, PTY4_DATA,
+ PTY3_DATA, PTY2_DATA, PTY1_DATA, PTY0_DATA,
+ PTZ7_DATA, PTZ6_DATA, PTZ5_DATA, PTZ4_DATA,
+ PTZ3_DATA, PTZ2_DATA, PTZ1_DATA, PTZ0_DATA,
+ PINMUX_DATA_END,
+
+ PINMUX_INPUT_BEGIN,
+ PTA7_IN, PTA6_IN, PTA5_IN, PTA4_IN,
+ PTA3_IN, PTA2_IN, PTA1_IN, PTA0_IN,
+ PTB7_IN, PTB6_IN, PTB5_IN, PTB4_IN,
+ PTB3_IN, PTB2_IN, PTB1_IN, PTB0_IN,
+ PTC7_IN, PTC6_IN, PTC5_IN, PTC4_IN,
+ PTC3_IN, PTC2_IN, PTC1_IN, PTC0_IN,
+ PTD7_IN, PTD6_IN, PTD5_IN, PTD4_IN,
+ PTD3_IN, PTD2_IN, PTD1_IN, PTD0_IN,
+ PTE7_IN, PTE6_IN, PTE5_IN, PTE4_IN,
+ PTE3_IN, PTE2_IN, PTE1_IN, PTE0_IN,
+ PTF7_IN, PTF6_IN, PTF5_IN, PTF4_IN,
+ PTF3_IN, PTF2_IN, PTF1_IN, PTF0_IN,
+ PTH7_IN, PTH6_IN, PTH5_IN, PTH4_IN,
+ PTH3_IN, PTH2_IN, PTH1_IN, PTH0_IN,
+ PTJ3_IN, PTJ2_IN, PTJ1_IN, PTJ0_IN,
+ PTK7_IN, PTK6_IN, PTK5_IN, PTK4_IN,
+ PTK3_IN, PTK2_IN, PTK1_IN, PTK0_IN,
+ PTL7_IN, PTL6_IN, PTL5_IN, PTL4_IN,
+ PTL3_IN, PTL2_IN, PTL1_IN, PTL0_IN,
+ PTM7_IN, PTM6_IN, PTM5_IN, PTM4_IN,
+ PTM3_IN, PTM2_IN, PTM1_IN, PTM0_IN,
+ PTN7_IN, PTN6_IN, PTN5_IN, PTN4_IN,
+ PTN3_IN, PTN2_IN, PTN1_IN, PTN0_IN,
+ PTQ7_IN, PTQ6_IN, PTQ5_IN, PTQ4_IN,
+ PTQ3_IN, PTQ2_IN, PTQ1_IN, PTQ0_IN,
+ PTR7_IN, PTR6_IN, PTR5_IN, PTR4_IN,
+ PTR3_IN, PTR2_IN, PTR1_IN, PTR0_IN,
+ PTS6_IN, PTS5_IN, PTS4_IN,
+ PTS3_IN, PTS2_IN, PTS1_IN, PTS0_IN,
+ PTT7_IN, PTT6_IN, PTT5_IN, PTT4_IN,
+ PTT3_IN, PTT2_IN, PTT1_IN, PTT0_IN,
+ PTU7_IN, PTU6_IN, PTU5_IN, PTU4_IN,
+ PTU3_IN, PTU2_IN, PTU1_IN, PTU0_IN,
+ PTV7_IN, PTV6_IN, PTV5_IN, PTV4_IN,
+ PTV3_IN, PTV2_IN, PTV1_IN, PTV0_IN,
+ PTW7_IN, PTW6_IN, PTW5_IN, PTW4_IN,
+ PTW3_IN, PTW2_IN, PTW1_IN, PTW0_IN,
+ PTX7_IN, PTX6_IN, PTX5_IN, PTX4_IN,
+ PTX3_IN, PTX2_IN, PTX1_IN, PTX0_IN,
+ PTY7_IN, PTY6_IN, PTY5_IN, PTY4_IN,
+ PTY3_IN, PTY2_IN, PTY1_IN, PTY0_IN,
+ PTZ7_IN, PTZ6_IN, PTZ5_IN, PTZ4_IN,
+ PTZ3_IN, PTZ2_IN, PTZ1_IN, PTZ0_IN,
+ PINMUX_INPUT_END,
+
+ PINMUX_INPUT_PULLUP_BEGIN,
+ PTA7_IN_PU, PTA6_IN_PU, PTA5_IN_PU, PTA4_IN_PU,
+ PTA3_IN_PU, PTA2_IN_PU, PTA1_IN_PU, PTA0_IN_PU,
+ PTB7_IN_PU, PTB6_IN_PU, PTB5_IN_PU, PTB4_IN_PU,
+ PTB3_IN_PU, PTB2_IN_PU, PTB1_IN_PU, PTB0_IN_PU,
+ PTC7_IN_PU, PTC6_IN_PU, PTC5_IN_PU, PTC4_IN_PU,
+ PTC3_IN_PU, PTC2_IN_PU, PTC1_IN_PU, PTC0_IN_PU,
+ PTD7_IN_PU, PTD6_IN_PU, PTD5_IN_PU, PTD4_IN_PU,
+ PTD3_IN_PU, PTD2_IN_PU, PTD1_IN_PU, PTD0_IN_PU,
+ PTE7_IN_PU, PTE6_IN_PU, PTE5_IN_PU, PTE4_IN_PU,
+ PTE3_IN_PU, PTE2_IN_PU, PTE1_IN_PU, PTE0_IN_PU,
+ PTF7_IN_PU, PTF6_IN_PU, PTF5_IN_PU, PTF4_IN_PU,
+ PTF3_IN_PU, PTF2_IN_PU, PTF1_IN_PU, PTF0_IN_PU,
+ PTH7_IN_PU, PTH6_IN_PU, PTH5_IN_PU, PTH4_IN_PU,
+ PTH3_IN_PU, PTH2_IN_PU, PTH1_IN_PU, PTH0_IN_PU,
+ PTJ3_IN_PU, PTJ2_IN_PU, PTJ1_IN_PU, PTJ0_IN_PU,
+ PTK7_IN_PU, PTK6_IN_PU, PTK5_IN_PU, PTK4_IN_PU,
+ PTK3_IN_PU, PTK2_IN_PU, PTK1_IN_PU, PTK0_IN_PU,
+ PTL7_IN_PU, PTL6_IN_PU, PTL5_IN_PU, PTL4_IN_PU,
+ PTL3_IN_PU, PTL2_IN_PU, PTL1_IN_PU, PTL0_IN_PU,
+ PTM7_IN_PU, PTM6_IN_PU, PTM5_IN_PU, PTM4_IN_PU,
+ PTM3_IN_PU, PTM2_IN_PU, PTM1_IN_PU, PTM0_IN_PU,
+ PTN7_IN_PU, PTN6_IN_PU, PTN5_IN_PU, PTN4_IN_PU,
+ PTN3_IN_PU, PTN2_IN_PU, PTN1_IN_PU, PTN0_IN_PU,
+ PTQ7_IN_PU, PTQ6_IN_PU, PTQ5_IN_PU, PTQ4_IN_PU,
+ PTQ3_IN_PU, PTQ2_IN_PU, PTQ1_IN_PU, PTQ0_IN_PU,
+ PTR7_IN_PU, PTR6_IN_PU, PTR5_IN_PU, PTR4_IN_PU,
+ PTR3_IN_PU, PTR2_IN_PU, PTR1_IN_PU, PTR0_IN_PU,
+ PTS6_IN_PU, PTS5_IN_PU, PTS4_IN_PU,
+ PTS3_IN_PU, PTS2_IN_PU, PTS1_IN_PU, PTS0_IN_PU,
+ PTT7_IN_PU, PTT6_IN_PU, PTT5_IN_PU, PTT4_IN_PU,
+ PTT3_IN_PU, PTT2_IN_PU, PTT1_IN_PU, PTT0_IN_PU,
+ PTU7_IN_PU, PTU6_IN_PU, PTU5_IN_PU, PTU4_IN_PU,
+ PTU3_IN_PU, PTU2_IN_PU, PTU1_IN_PU, PTU0_IN_PU,
+ PTV7_IN_PU, PTV6_IN_PU, PTV5_IN_PU, PTV4_IN_PU,
+ PTV3_IN_PU, PTV2_IN_PU, PTV1_IN_PU, PTV0_IN_PU,
+ PTW7_IN_PU, PTW6_IN_PU, PTW5_IN_PU, PTW4_IN_PU,
+ PTW3_IN_PU, PTW2_IN_PU, PTW1_IN_PU, PTW0_IN_PU,
+ PTX7_IN_PU, PTX6_IN_PU, PTX5_IN_PU, PTX4_IN_PU,
+ PTX3_IN_PU, PTX2_IN_PU, PTX1_IN_PU, PTX0_IN_PU,
+ PTY7_IN_PU, PTY6_IN_PU, PTY5_IN_PU, PTY4_IN_PU,
+ PTY3_IN_PU, PTY2_IN_PU, PTY1_IN_PU, PTY0_IN_PU,
+ PTZ7_IN_PU, PTZ6_IN_PU, PTZ5_IN_PU, PTZ4_IN_PU,
+ PTZ3_IN_PU, PTZ2_IN_PU, PTZ1_IN_PU, PTZ0_IN_PU,
+ PINMUX_INPUT_PULLUP_END,
+
+ PINMUX_OUTPUT_BEGIN,
+ PTA7_OUT, PTA6_OUT, PTA5_OUT, PTA4_OUT,
+ PTA3_OUT, PTA2_OUT, PTA1_OUT, PTA0_OUT,
+ PTB7_OUT, PTB6_OUT, PTB5_OUT, PTB4_OUT,
+ PTB3_OUT, PTB2_OUT, PTB1_OUT, PTB0_OUT,
+ PTC7_OUT, PTC6_OUT, PTC5_OUT, PTC4_OUT,
+ PTC3_OUT, PTC2_OUT, PTC1_OUT, PTC0_OUT,
+ PTD7_OUT, PTD6_OUT, PTD5_OUT, PTD4_OUT,
+ PTD3_OUT, PTD2_OUT, PTD1_OUT, PTD0_OUT,
+ PTE7_OUT, PTE6_OUT, PTE5_OUT, PTE4_OUT,
+ PTE3_OUT, PTE2_OUT, PTE1_OUT, PTE0_OUT,
+ PTF7_OUT, PTF6_OUT, PTF5_OUT, PTF4_OUT,
+ PTF3_OUT, PTF2_OUT, PTF1_OUT, PTF0_OUT,
+ PTG5_OUT, PTG4_OUT,
+ PTG3_OUT, PTG2_OUT, PTG1_OUT, PTG0_OUT,
+ PTH7_OUT, PTH6_OUT, PTH5_OUT, PTH4_OUT,
+ PTH3_OUT, PTH2_OUT, PTH1_OUT, PTH0_OUT,
+ PTJ7_OUT, PTJ6_OUT, PTJ5_OUT,
+ PTJ3_OUT, PTJ2_OUT, PTJ1_OUT, PTJ0_OUT,
+ PTK7_OUT, PTK6_OUT, PTK5_OUT, PTK4_OUT,
+ PTK3_OUT, PTK2_OUT, PTK1_OUT, PTK0_OUT,
+ PTL7_OUT, PTL6_OUT, PTL5_OUT, PTL4_OUT,
+ PTL3_OUT, PTL2_OUT, PTL1_OUT, PTL0_OUT,
+ PTM7_OUT, PTM6_OUT, PTM5_OUT, PTM4_OUT,
+ PTM3_OUT, PTM2_OUT, PTM1_OUT, PTM0_OUT,
+ PTN7_OUT, PTN6_OUT, PTN5_OUT, PTN4_OUT,
+ PTN3_OUT, PTN2_OUT, PTN1_OUT, PTN0_OUT,
+ PTQ7_OUT, PTQ6_OUT, PTQ5_OUT, PTQ4_OUT,
+ PTQ3_OUT, PTQ2_OUT, PTQ1_OUT, PTQ0_OUT,
+ PTR7_OUT, PTR6_OUT, PTR5_OUT, PTR4_OUT,
+ PTR1_OUT, PTR0_OUT,
+ PTS6_OUT, PTS5_OUT, PTS4_OUT,
+ PTS3_OUT, PTS2_OUT, PTS1_OUT, PTS0_OUT,
+ PTT7_OUT, PTT6_OUT, PTT5_OUT, PTT4_OUT,
+ PTT3_OUT, PTT2_OUT, PTT1_OUT, PTT0_OUT,
+ PTU7_OUT, PTU6_OUT, PTU5_OUT, PTU4_OUT,
+ PTU3_OUT, PTU2_OUT, PTU1_OUT, PTU0_OUT,
+ PTV7_OUT, PTV6_OUT, PTV5_OUT, PTV4_OUT,
+ PTV3_OUT, PTV2_OUT, PTV1_OUT, PTV0_OUT,
+ PTW7_OUT, PTW6_OUT, PTW5_OUT, PTW4_OUT,
+ PTW3_OUT, PTW2_OUT, PTW1_OUT, PTW0_OUT,
+ PTX7_OUT, PTX6_OUT, PTX5_OUT, PTX4_OUT,
+ PTX3_OUT, PTX2_OUT, PTX1_OUT, PTX0_OUT,
+ PTY7_OUT, PTY6_OUT, PTY5_OUT, PTY4_OUT,
+ PTY3_OUT, PTY2_OUT, PTY1_OUT, PTY0_OUT,
+ PTZ7_OUT, PTZ6_OUT, PTZ5_OUT, PTZ4_OUT,
+ PTZ3_OUT, PTZ2_OUT, PTZ1_OUT, PTZ0_OUT,
+ PINMUX_OUTPUT_END,
+
+ PINMUX_FUNCTION_BEGIN,
+ PTA7_FN, PTA6_FN, PTA5_FN, PTA4_FN,
+ PTA3_FN, PTA2_FN, PTA1_FN, PTA0_FN,
+ PTB7_FN, PTB6_FN, PTB5_FN, PTB4_FN,
+ PTB3_FN, PTB2_FN, PTB1_FN, PTB0_FN,
+ PTC7_FN, PTC6_FN, PTC5_FN, PTC4_FN,
+ PTC3_FN, PTC2_FN, PTC1_FN, PTC0_FN,
+ PTD7_FN, PTD6_FN, PTD5_FN, PTD4_FN,
+ PTD3_FN, PTD2_FN, PTD1_FN, PTD0_FN,
+ PTE7_FN, PTE6_FN, PTE5_FN, PTE4_FN,
+ PTE3_FN, PTE2_FN, PTE1_FN, PTE0_FN,
+ PTF7_FN, PTF6_FN, PTF5_FN, PTF4_FN,
+ PTF3_FN, PTF2_FN, PTF1_FN, PTF0_FN,
+ PTG5_FN, PTG4_FN,
+ PTG3_FN, PTG2_FN, PTG1_FN, PTG0_FN,
+ PTH7_FN, PTH6_FN, PTH5_FN, PTH4_FN,
+ PTH3_FN, PTH2_FN, PTH1_FN, PTH0_FN,
+ PTJ7_FN, PTJ6_FN, PTJ5_FN,
+ PTJ3_FN, PTJ2_FN, PTJ1_FN, PTJ0_FN,
+ PTK7_FN, PTK6_FN, PTK5_FN, PTK4_FN,
+ PTK3_FN, PTK2_FN, PTK1_FN, PTK0_FN,
+ PTL7_FN, PTL6_FN, PTL5_FN, PTL4_FN,
+ PTL3_FN, PTL2_FN, PTL1_FN, PTL0_FN,
+ PTM7_FN, PTM6_FN, PTM5_FN, PTM4_FN,
+ PTM3_FN, PTM2_FN, PTM1_FN, PTM0_FN,
+ PTN7_FN, PTN6_FN, PTN5_FN, PTN4_FN,
+ PTN3_FN, PTN2_FN, PTN1_FN, PTN0_FN,
+ PTQ7_FN, PTQ6_FN, PTQ5_FN, PTQ4_FN,
+ PTQ3_FN, PTQ2_FN, PTQ1_FN, PTQ0_FN,
+ PTR7_FN, PTR6_FN, PTR5_FN, PTR4_FN,
+ PTR3_FN, PTR2_FN, PTR1_FN, PTR0_FN,
+ PTS6_FN, PTS5_FN, PTS4_FN,
+ PTS3_FN, PTS2_FN, PTS1_FN, PTS0_FN,
+ PTT7_FN, PTT6_FN, PTT5_FN, PTT4_FN,
+ PTT3_FN, PTT2_FN, PTT1_FN, PTT0_FN,
+ PTU7_FN, PTU6_FN, PTU5_FN, PTU4_FN,
+ PTU3_FN, PTU2_FN, PTU1_FN, PTU0_FN,
+ PTV7_FN, PTV6_FN, PTV5_FN, PTV4_FN,
+ PTV3_FN, PTV2_FN, PTV1_FN, PTV0_FN,
+ PTW7_FN, PTW6_FN, PTW5_FN, PTW4_FN,
+ PTW3_FN, PTW2_FN, PTW1_FN, PTW0_FN,
+ PTX7_FN, PTX6_FN, PTX5_FN, PTX4_FN,
+ PTX3_FN, PTX2_FN, PTX1_FN, PTX0_FN,
+ PTY7_FN, PTY6_FN, PTY5_FN, PTY4_FN,
+ PTY3_FN, PTY2_FN, PTY1_FN, PTY0_FN,
+ PTZ7_FN, PTZ6_FN, PTZ5_FN, PTZ4_FN,
+ PTZ3_FN, PTZ2_FN, PTZ1_FN, PTZ0_FN,
+
+
+ PSA15_0, PSA15_1,
+ PSA14_0, PSA14_1,
+ PSA13_0, PSA13_1,
+ PSA12_0, PSA12_1,
+ PSA10_0, PSA10_1,
+ PSA9_0, PSA9_1,
+ PSA8_0, PSA8_1,
+ PSA7_0, PSA7_1,
+ PSA6_0, PSA6_1,
+ PSA5_0, PSA5_1,
+ PSA3_0, PSA3_1,
+ PSA2_0, PSA2_1,
+ PSA1_0, PSA1_1,
+ PSA0_0, PSA0_1,
+
+ PSB14_0, PSB14_1,
+ PSB13_0, PSB13_1,
+ PSB12_0, PSB12_1,
+ PSB11_0, PSB11_1,
+ PSB10_0, PSB10_1,
+ PSB9_0, PSB9_1,
+ PSB8_0, PSB8_1,
+ PSB7_0, PSB7_1,
+ PSB6_0, PSB6_1,
+ PSB5_0, PSB5_1,
+ PSB4_0, PSB4_1,
+ PSB3_0, PSB3_1,
+ PSB2_0, PSB2_1,
+ PSB1_0, PSB1_1,
+ PSB0_0, PSB0_1,
+
+ PSC15_0, PSC15_1,
+ PSC14_0, PSC14_1,
+ PSC13_0, PSC13_1,
+ PSC12_0, PSC12_1,
+ PSC11_0, PSC11_1,
+ PSC10_0, PSC10_1,
+ PSC9_0, PSC9_1,
+ PSC8_0, PSC8_1,
+ PSC7_0, PSC7_1,
+ PSC6_0, PSC6_1,
+ PSC5_0, PSC5_1,
+ PSC4_0, PSC4_1,
+ PSC2_0, PSC2_1,
+ PSC1_0, PSC1_1,
+ PSC0_0, PSC0_1,
+
+ PSD15_0, PSD15_1,
+ PSD14_0, PSD14_1,
+ PSD13_0, PSD13_1,
+ PSD12_0, PSD12_1,
+ PSD11_0, PSD11_1,
+ PSD10_0, PSD10_1,
+ PSD9_0, PSD9_1,
+ PSD8_0, PSD8_1,
+ PSD7_0, PSD7_1,
+ PSD6_0, PSD6_1,
+ PSD5_0, PSD5_1,
+ PSD4_0, PSD4_1,
+ PSD3_0, PSD3_1,
+ PSD2_0, PSD2_1,
+ PSD1_0, PSD1_1,
+ PSD0_0, PSD0_1,
+
+ PSE15_0, PSE15_1,
+ PSE14_0, PSE14_1,
+ PSE13_0, PSE13_1,
+ PSE12_0, PSE12_1,
+ PSE11_0, PSE11_1,
+ PSE10_0, PSE10_1,
+ PSE9_0, PSE9_1,
+ PSE8_0, PSE8_1,
+ PSE7_0, PSE7_1,
+ PSE6_0, PSE6_1,
+ PSE5_0, PSE5_1,
+ PSE4_0, PSE4_1,
+ PSE3_0, PSE3_1,
+ PSE2_0, PSE2_1,
+ PSE1_0, PSE1_1,
+ PSE0_0, PSE0_1,
+ PINMUX_FUNCTION_END,
+
+ PINMUX_MARK_BEGIN,
+ /*PTA*/
+ D23_MARK, KEYOUT2_MARK, IDED15_MARK,
+ D22_MARK, KEYOUT1_MARK, IDED14_MARK,
+ D21_MARK, KEYOUT0_MARK, IDED13_MARK,
+ D20_MARK, KEYIN4_MARK, IDED12_MARK,
+ D19_MARK, KEYIN3_MARK, IDED11_MARK,
+ D18_MARK, KEYIN2_MARK, IDED10_MARK,
+ D17_MARK, KEYIN1_MARK, IDED9_MARK,
+ D16_MARK, KEYIN0_MARK, IDED8_MARK,
+
+ /*PTB*/
+ D31_MARK, TPUTO1_MARK, IDEA1_MARK,
+ D30_MARK, TPUTO0_MARK, IDEA0_MARK,
+ D29_MARK, IODREQ_MARK,
+ D28_MARK, IDECS0_MARK,
+ D27_MARK, IDECS1_MARK,
+ D26_MARK, KEYOUT5_IN5_MARK, IDEIORD_MARK,
+ D25_MARK, KEYOUT4_IN6_MARK, IDEIOWR_MARK,
+ D24_MARK, KEYOUT3_MARK, IDEINT_MARK,
+
+ /*PTC*/
+ LCDD7_MARK,
+ LCDD6_MARK,
+ LCDD5_MARK,
+ LCDD4_MARK,
+ LCDD3_MARK,
+ LCDD2_MARK,
+ LCDD1_MARK,
+ LCDD0_MARK,
+
+ /*PTD*/
+ LCDD15_MARK,
+ LCDD14_MARK,
+ LCDD13_MARK,
+ LCDD12_MARK,
+ LCDD11_MARK,
+ LCDD10_MARK,
+ LCDD9_MARK,
+ LCDD8_MARK,
+
+ /*PTE*/
+ FSIMCKB_MARK,
+ FSIMCKA_MARK,
+ LCDD21_MARK, SCIF2_L_TXD_MARK,
+ LCDD20_MARK, SCIF4_SCK_MARK,
+ LCDD19_MARK, SCIF4_RXD_MARK,
+ LCDD18_MARK, SCIF4_TXD_MARK,
+ LCDD17_MARK,
+ LCDD16_MARK,
+
+ /*PTF*/
+ LCDVSYN_MARK,
+ LCDDISP_MARK, LCDRS_MARK,
+ LCDHSYN_MARK, LCDCS_MARK,
+ LCDDON_MARK,
+ LCDDCK_MARK, LCDWR_MARK,
+ LCDVEPWC_MARK, SCIF0_TXD_MARK,
+ LCDD23_MARK, SCIF2_L_SCK_MARK,
+ LCDD22_MARK, SCIF2_L_RXD_MARK,
+
+ /*PTG*/
+ AUDCK_MARK,
+ AUDSYNC_MARK,
+ AUDATA3_MARK,
+ AUDATA2_MARK,
+ AUDATA1_MARK,
+ AUDATA0_MARK,
+
+ /*PTH*/
+ VIO0_VD_MARK,
+ VIO0_CLK_MARK,
+ VIO0_D7_MARK,
+ VIO0_D6_MARK,
+ VIO0_D5_MARK,
+ VIO0_D4_MARK,
+ VIO0_D3_MARK,
+ VIO0_D2_MARK,
+
+ /*PTJ*/
+ PDSTATUS_MARK,
+ STATUS2_MARK,
+ STATUS0_MARK,
+ A25_MARK, BS_MARK,
+ A24_MARK,
+ A23_MARK,
+ A22_MARK,
+
+ /*PTK*/
+ VIO1_D5_MARK, VIO0_D13_MARK, IDED5_MARK,
+ VIO1_D4_MARK, VIO0_D12_MARK, IDED4_MARK,
+ VIO1_D3_MARK, VIO0_D11_MARK, IDED3_MARK,
+ VIO1_D2_MARK, VIO0_D10_MARK, IDED2_MARK,
+ VIO1_D1_MARK, VIO0_D9_MARK, IDED1_MARK,
+ VIO1_D0_MARK, VIO0_D8_MARK, IDED0_MARK,
+ VIO0_FLD_MARK,
+ VIO0_HD_MARK,
+
+ /*PTL*/
+ DV_D5_MARK, SCIF3_V_SCK_MARK, RMII_RXD0_MARK,
+ DV_D4_MARK, SCIF3_V_RXD_MARK, RMII_RXD1_MARK,
+ DV_D3_MARK, SCIF3_V_TXD_MARK, RMII_REF_CLK_MARK,
+ DV_D2_MARK, SCIF1_SCK_MARK, RMII_TX_EN_MARK,
+ DV_D1_MARK, SCIF1_RXD_MARK, RMII_TXD0_MARK,
+ DV_D0_MARK, SCIF1_TXD_MARK, RMII_TXD1_MARK,
+ DV_D15_MARK,
+ DV_D14_MARK, MSIOF0_MCK_MARK,
+
+ /*PTM*/
+ DV_D13_MARK, MSIOF0_TSCK_MARK,
+ DV_D12_MARK, MSIOF0_RXD_MARK,
+ DV_D11_MARK, MSIOF0_TXD_MARK,
+ DV_D10_MARK, MSIOF0_TSYNC_MARK,
+ DV_D9_MARK, MSIOF0_SS1_MARK, MSIOF0_RSCK_MARK,
+ DV_D8_MARK, MSIOF0_SS2_MARK, MSIOF0_RSYNC_MARK,
+ LCDVCPWC_MARK, SCIF0_RXD_MARK,
+ LCDRD_MARK, SCIF0_SCK_MARK,
+
+ /*PTN*/
+ VIO0_D1_MARK,
+ VIO0_D0_MARK,
+ DV_CLKI_MARK,
+ DV_CLK_MARK, SCIF2_V_SCK_MARK,
+ DV_VSYNC_MARK, SCIF2_V_RXD_MARK,
+ DV_HSYNC_MARK, SCIF2_V_TXD_MARK,
+ DV_D7_MARK, SCIF3_V_CTS_MARK, RMII_RX_ER_MARK,
+ DV_D6_MARK, SCIF3_V_RTS_MARK, RMII_CRS_DV_MARK,
+
+ /*PTQ*/
+ D7_MARK,
+ D6_MARK,
+ D5_MARK,
+ D4_MARK,
+ D3_MARK,
+ D2_MARK,
+ D1_MARK,
+ D0_MARK,
+
+ /*PTR*/
+ CS6B_CE1B_MARK,
+ CS6A_CE2B_MARK,
+ CS5B_CE1A_MARK,
+ CS5A_CE2A_MARK,
+ IOIS16_MARK, LCDLCLK_MARK,
+ WAIT_MARK,
+ WE3_ICIOWR_MARK, TPUTO3_MARK, TPUTI3_MARK,
+ WE2_ICIORD_MARK, TPUTO2_MARK, IDEA2_MARK,
+
+ /*PTS*/
+ VIO_CKO_MARK,
+ VIO1_FLD_MARK, TPUTI2_MARK, IDEIORDY_MARK,
+ VIO1_HD_MARK, SCIF5_SCK_MARK,
+ VIO1_VD_MARK, SCIF5_RXD_MARK,
+ VIO1_CLK_MARK, SCIF5_TXD_MARK,
+ VIO1_D7_MARK, VIO0_D15_MARK, IDED7_MARK,
+ VIO1_D6_MARK, VIO0_D14_MARK, IDED6_MARK,
+
+ /*PTT*/
+ D15_MARK,
+ D14_MARK,
+ D13_MARK,
+ D12_MARK,
+ D11_MARK,
+ D10_MARK,
+ D9_MARK,
+ D8_MARK,
+
+ /*PTU*/
+ DMAC_DACK0_MARK,
+ DMAC_DREQ0_MARK,
+ FSIOASD_MARK,
+ FSIIABCK_MARK,
+ FSIIALRCK_MARK,
+ FSIOABCK_MARK,
+ FSIOALRCK_MARK,
+ CLKAUDIOAO_MARK,
+
+ /*PTV*/
+ FSIIBSD_MARK, MSIOF1_SS2_MARK, MSIOF1_RSYNC_MARK,
+ FSIOBSD_MARK, MSIOF1_SS1_MARK, MSIOF1_RSCK_MARK,
+ FSIIBBCK_MARK, MSIOF1_RXD_MARK,
+ FSIIBLRCK_MARK, MSIOF1_TSYNC_MARK,
+ FSIOBBCK_MARK, MSIOF1_TSCK_MARK,
+ FSIOBLRCK_MARK, MSIOF1_TXD_MARK,
+ CLKAUDIOBO_MARK, MSIOF1_MCK_MARK,
+ FSIIASD_MARK,
+
+ /*PTW*/
+ MMC_D7_MARK, SDHI1CD_MARK, IODACK_MARK,
+ MMC_D6_MARK, SDHI1WP_MARK, IDERST_MARK,
+ MMC_D5_MARK, SDHI1D3_MARK, EXBUF_ENB_MARK,
+ MMC_D4_MARK, SDHI1D2_MARK, DIRECTION_MARK,
+ MMC_D3_MARK, SDHI1D1_MARK,
+ MMC_D2_MARK, SDHI1D0_MARK,
+ MMC_D1_MARK, SDHI1CMD_MARK,
+ MMC_D0_MARK, SDHI1CLK_MARK,
+
+ /*PTX*/
+ DMAC_DACK1_MARK, IRDA_OUT_MARK,
+ DMAC_DREQ1_MARK, IRDA_IN_MARK,
+ TSIF_TS0_SDAT_MARK, LNKSTA_MARK,
+ TSIF_TS0_SCK_MARK, MDIO_MARK,
+ TSIF_TS0_SDEN_MARK, MDC_MARK,
+ TSIF_TS0_SPSYNC_MARK,
+ MMC_CLK_MARK,
+ MMC_CMD_MARK,
+
+ /*PTY*/
+ SDHI0CD_MARK,
+ SDHI0WP_MARK,
+ SDHI0D3_MARK,
+ SDHI0D2_MARK,
+ SDHI0D1_MARK,
+ SDHI0D0_MARK,
+ SDHI0CMD_MARK,
+ SDHI0CLK_MARK,
+
+ /*PTZ*/
+ INTC_IRQ7_MARK, SCIF3_I_CTS_MARK,
+ INTC_IRQ6_MARK, SCIF3_I_RTS_MARK,
+ INTC_IRQ5_MARK, SCIF3_I_SCK_MARK,
+ INTC_IRQ4_MARK, SCIF3_I_RXD_MARK,
+ INTC_IRQ3_MARK, SCIF3_I_TXD_MARK,
+ INTC_IRQ2_MARK,
+ INTC_IRQ1_MARK,
+ INTC_IRQ0_MARK,
+ PINMUX_MARK_END,
+};
+
+static pinmux_enum_t pinmux_data[] = {
+ /* PTA GPIO */
+ PINMUX_DATA(PTA7_DATA, PTA7_IN, PTA7_OUT, PTA7_IN_PU),
+ PINMUX_DATA(PTA6_DATA, PTA6_IN, PTA6_OUT, PTA6_IN_PU),
+ PINMUX_DATA(PTA5_DATA, PTA5_IN, PTA5_OUT, PTA5_IN_PU),
+ PINMUX_DATA(PTA4_DATA, PTA4_IN, PTA4_OUT, PTA4_IN_PU),
+ PINMUX_DATA(PTA3_DATA, PTA3_IN, PTA3_OUT, PTA3_IN_PU),
+ PINMUX_DATA(PTA2_DATA, PTA2_IN, PTA2_OUT, PTA2_IN_PU),
+ PINMUX_DATA(PTA1_DATA, PTA1_IN, PTA1_OUT, PTA1_IN_PU),
+ PINMUX_DATA(PTA0_DATA, PTA0_IN, PTA0_OUT, PTA0_IN_PU),
+
+ /* PTB GPIO */
+ PINMUX_DATA(PTB7_DATA, PTB7_IN, PTB7_OUT, PTB7_IN_PU),
+ PINMUX_DATA(PTB6_DATA, PTB6_IN, PTB6_OUT, PTB6_IN_PU),
+ PINMUX_DATA(PTB5_DATA, PTB5_IN, PTB5_OUT, PTB5_IN_PU),
+ PINMUX_DATA(PTB4_DATA, PTB4_IN, PTB4_OUT, PTB4_IN_PU),
+ PINMUX_DATA(PTB3_DATA, PTB3_IN, PTB3_OUT, PTB3_IN_PU),
+ PINMUX_DATA(PTB2_DATA, PTB2_IN, PTB2_OUT, PTB2_IN_PU),
+ PINMUX_DATA(PTB1_DATA, PTB1_IN, PTB1_OUT, PTB1_IN_PU),
+ PINMUX_DATA(PTB0_DATA, PTB0_IN, PTB0_OUT, PTB0_IN_PU),
+
+ /* PTC GPIO */
+ PINMUX_DATA(PTC7_DATA, PTC7_IN, PTC7_OUT, PTC7_IN_PU),
+ PINMUX_DATA(PTC6_DATA, PTC6_IN, PTC6_OUT, PTC6_IN_PU),
+ PINMUX_DATA(PTC5_DATA, PTC5_IN, PTC5_OUT, PTC5_IN_PU),
+ PINMUX_DATA(PTC4_DATA, PTC4_IN, PTC4_OUT, PTC4_IN_PU),
+ PINMUX_DATA(PTC3_DATA, PTC3_IN, PTC3_OUT, PTC3_IN_PU),
+ PINMUX_DATA(PTC2_DATA, PTC2_IN, PTC2_OUT, PTC2_IN_PU),
+ PINMUX_DATA(PTC1_DATA, PTC1_IN, PTC1_OUT, PTC1_IN_PU),
+ PINMUX_DATA(PTC0_DATA, PTC0_IN, PTC0_OUT, PTC0_IN_PU),
+
+ /* PTD GPIO */
+ PINMUX_DATA(PTD7_DATA, PTD7_IN, PTD7_OUT, PTD7_IN_PU),
+ PINMUX_DATA(PTD6_DATA, PTD6_IN, PTD6_OUT, PTD6_IN_PU),
+ PINMUX_DATA(PTD5_DATA, PTD5_IN, PTD5_OUT, PTD5_IN_PU),
+ PINMUX_DATA(PTD4_DATA, PTD4_IN, PTD4_OUT, PTD4_IN_PU),
+ PINMUX_DATA(PTD3_DATA, PTD3_IN, PTD3_OUT, PTD3_IN_PU),
+ PINMUX_DATA(PTD2_DATA, PTD2_IN, PTD2_OUT, PTD2_IN_PU),
+ PINMUX_DATA(PTD1_DATA, PTD1_IN, PTD1_OUT, PTD1_IN_PU),
+ PINMUX_DATA(PTD0_DATA, PTD0_IN, PTD0_OUT, PTD0_IN_PU),
+
+ /* PTE GPIO */
+ PINMUX_DATA(PTE7_DATA, PTE7_IN, PTE7_OUT, PTE7_IN_PU),
+ PINMUX_DATA(PTE6_DATA, PTE6_IN, PTE6_OUT, PTE6_IN_PU),
+ PINMUX_DATA(PTE5_DATA, PTE5_IN, PTE5_OUT, PTE5_IN_PU),
+ PINMUX_DATA(PTE4_DATA, PTE4_IN, PTE4_OUT, PTE4_IN_PU),
+ PINMUX_DATA(PTE3_DATA, PTE3_IN, PTE3_OUT, PTE3_IN_PU),
+ PINMUX_DATA(PTE2_DATA, PTE2_IN, PTE2_OUT, PTE2_IN_PU),
+ PINMUX_DATA(PTE1_DATA, PTE1_IN, PTE1_OUT, PTE1_IN_PU),
+ PINMUX_DATA(PTE0_DATA, PTE0_IN, PTE0_OUT, PTE0_IN_PU),
+
+ /* PTF GPIO */
+ PINMUX_DATA(PTF7_DATA, PTF7_IN, PTF7_OUT, PTF7_IN_PU),
+ PINMUX_DATA(PTF6_DATA, PTF6_IN, PTF6_OUT, PTF6_IN_PU),
+ PINMUX_DATA(PTF5_DATA, PTF5_IN, PTF5_OUT, PTF5_IN_PU),
+ PINMUX_DATA(PTF4_DATA, PTF4_IN, PTF4_OUT, PTF4_IN_PU),
+ PINMUX_DATA(PTF3_DATA, PTF3_IN, PTF3_OUT, PTF3_IN_PU),
+ PINMUX_DATA(PTF2_DATA, PTF2_IN, PTF2_OUT, PTF2_IN_PU),
+ PINMUX_DATA(PTF1_DATA, PTF1_IN, PTF1_OUT, PTF1_IN_PU),
+ PINMUX_DATA(PTF0_DATA, PTF0_IN, PTF0_OUT, PTF0_IN_PU),
+
+ /* PTG GPIO */
+ PINMUX_DATA(PTG5_DATA, PTG5_OUT),
+ PINMUX_DATA(PTG4_DATA, PTG4_OUT),
+ PINMUX_DATA(PTG3_DATA, PTG3_OUT),
+ PINMUX_DATA(PTG2_DATA, PTG2_OUT),
+ PINMUX_DATA(PTG1_DATA, PTG1_OUT),
+ PINMUX_DATA(PTG0_DATA, PTG0_OUT),
+
+ /* PTH GPIO */
+ PINMUX_DATA(PTH7_DATA, PTH7_IN, PTH7_OUT, PTH7_IN_PU),
+ PINMUX_DATA(PTH6_DATA, PTH6_IN, PTH6_OUT, PTH6_IN_PU),
+ PINMUX_DATA(PTH5_DATA, PTH5_IN, PTH5_OUT, PTH5_IN_PU),
+ PINMUX_DATA(PTH4_DATA, PTH4_IN, PTH4_OUT, PTH4_IN_PU),
+ PINMUX_DATA(PTH3_DATA, PTH3_IN, PTH3_OUT, PTH3_IN_PU),
+ PINMUX_DATA(PTH2_DATA, PTH2_IN, PTH2_OUT, PTH2_IN_PU),
+ PINMUX_DATA(PTH1_DATA, PTH1_IN, PTH1_OUT, PTH1_IN_PU),
+ PINMUX_DATA(PTH0_DATA, PTH0_IN, PTH0_OUT, PTH0_IN_PU),
+
+ /* PTJ GPIO */
+ PINMUX_DATA(PTJ7_DATA, PTJ7_OUT),
+ PINMUX_DATA(PTJ6_DATA, PTJ6_OUT),
+ PINMUX_DATA(PTJ5_DATA, PTJ5_OUT),
+ PINMUX_DATA(PTJ3_DATA, PTJ3_IN, PTJ3_OUT, PTJ3_IN_PU),
+ PINMUX_DATA(PTJ2_DATA, PTJ2_IN, PTJ2_OUT, PTJ2_IN_PU),
+ PINMUX_DATA(PTJ1_DATA, PTJ1_IN, PTJ1_OUT, PTJ1_IN_PU),
+ PINMUX_DATA(PTJ0_DATA, PTJ0_IN, PTJ0_OUT, PTJ0_IN_PU),
+
+ /* PTK GPIO */
+ PINMUX_DATA(PTK7_DATA, PTK7_IN, PTK7_OUT, PTK7_IN_PU),
+ PINMUX_DATA(PTK6_DATA, PTK6_IN, PTK6_OUT, PTK6_IN_PU),
+ PINMUX_DATA(PTK5_DATA, PTK5_IN, PTK5_OUT, PTK5_IN_PU),
+ PINMUX_DATA(PTK4_DATA, PTK4_IN, PTK4_OUT, PTK4_IN_PU),
+ PINMUX_DATA(PTK3_DATA, PTK3_IN, PTK3_OUT, PTK3_IN_PU),
+ PINMUX_DATA(PTK2_DATA, PTK2_IN, PTK2_OUT, PTK2_IN_PU),
+ PINMUX_DATA(PTK1_DATA, PTK1_IN, PTK1_OUT, PTK1_IN_PU),
+ PINMUX_DATA(PTK0_DATA, PTK0_IN, PTK0_OUT, PTK0_IN_PU),
+
+ /* PTL GPIO */
+ PINMUX_DATA(PTL7_DATA, PTL7_IN, PTL7_OUT, PTL7_IN_PU),
+ PINMUX_DATA(PTL6_DATA, PTL6_IN, PTL6_OUT, PTL6_IN_PU),
+ PINMUX_DATA(PTL5_DATA, PTL5_IN, PTL5_OUT, PTL5_IN_PU),
+ PINMUX_DATA(PTL4_DATA, PTL4_IN, PTL4_OUT, PTL4_IN_PU),
+ PINMUX_DATA(PTL3_DATA, PTL3_IN, PTL3_OUT, PTL3_IN_PU),
+ PINMUX_DATA(PTL2_DATA, PTL2_IN, PTL2_OUT, PTL2_IN_PU),
+ PINMUX_DATA(PTL1_DATA, PTL1_IN, PTL1_OUT, PTL1_IN_PU),
+ PINMUX_DATA(PTL0_DATA, PTL0_IN, PTL0_OUT, PTL0_IN_PU),
+
+ /* PTM GPIO */
+ PINMUX_DATA(PTM7_DATA, PTM7_IN, PTM7_OUT, PTM7_IN_PU),
+ PINMUX_DATA(PTM6_DATA, PTM6_IN, PTM6_OUT, PTM6_IN_PU),
+ PINMUX_DATA(PTM5_DATA, PTM5_IN, PTM5_OUT, PTM5_IN_PU),
+ PINMUX_DATA(PTM4_DATA, PTM4_IN, PTM4_OUT, PTM4_IN_PU),
+ PINMUX_DATA(PTM3_DATA, PTM3_IN, PTM3_OUT, PTM3_IN_PU),
+ PINMUX_DATA(PTM2_DATA, PTM2_IN, PTM2_OUT, PTM2_IN_PU),
+ PINMUX_DATA(PTM1_DATA, PTM1_IN, PTM1_OUT, PTM1_IN_PU),
+ PINMUX_DATA(PTM0_DATA, PTM0_IN, PTM0_OUT, PTM0_IN_PU),
+
+ /* PTN GPIO */
+ PINMUX_DATA(PTN7_DATA, PTN7_IN, PTN7_OUT, PTN7_IN_PU),
+ PINMUX_DATA(PTN6_DATA, PTN6_IN, PTN6_OUT, PTN6_IN_PU),
+ PINMUX_DATA(PTN5_DATA, PTN5_IN, PTN5_OUT, PTN5_IN_PU),
+ PINMUX_DATA(PTN4_DATA, PTN4_IN, PTN4_OUT, PTN4_IN_PU),
+ PINMUX_DATA(PTN3_DATA, PTN3_IN, PTN3_OUT, PTN3_IN_PU),
+ PINMUX_DATA(PTN2_DATA, PTN2_IN, PTN2_OUT, PTN2_IN_PU),
+ PINMUX_DATA(PTN1_DATA, PTN1_IN, PTN1_OUT, PTN1_IN_PU),
+ PINMUX_DATA(PTN0_DATA, PTN0_IN, PTN0_OUT, PTN0_IN_PU),
+
+ /* PTQ GPIO */
+ PINMUX_DATA(PTQ7_DATA, PTQ7_IN, PTQ7_OUT, PTQ7_IN_PU),
+ PINMUX_DATA(PTQ6_DATA, PTQ6_IN, PTQ6_OUT, PTQ6_IN_PU),
+ PINMUX_DATA(PTQ5_DATA, PTQ5_IN, PTQ5_OUT, PTQ5_IN_PU),
+ PINMUX_DATA(PTQ4_DATA, PTQ4_IN, PTQ4_OUT, PTQ4_IN_PU),
+ PINMUX_DATA(PTQ3_DATA, PTQ3_IN, PTQ3_OUT, PTQ3_IN_PU),
+ PINMUX_DATA(PTQ2_DATA, PTQ2_IN, PTQ2_OUT, PTQ2_IN_PU),
+ PINMUX_DATA(PTQ1_DATA, PTQ1_IN, PTQ1_OUT, PTQ1_IN_PU),
+ PINMUX_DATA(PTQ0_DATA, PTQ0_IN, PTQ0_OUT, PTQ0_IN_PU),
+
+ /* PTR GPIO */
+ PINMUX_DATA(PTR7_DATA, PTR7_IN, PTR7_OUT, PTR7_IN_PU),
+ PINMUX_DATA(PTR6_DATA, PTR6_IN, PTR6_OUT, PTR6_IN_PU),
+ PINMUX_DATA(PTR5_DATA, PTR5_IN, PTR5_OUT, PTR5_IN_PU),
+ PINMUX_DATA(PTR4_DATA, PTR4_IN, PTR4_OUT, PTR4_IN_PU),
+ PINMUX_DATA(PTR3_DATA, PTR3_IN, PTR3_IN_PU),
+ PINMUX_DATA(PTR2_DATA, PTR2_IN, PTR2_IN_PU),
+ PINMUX_DATA(PTR1_DATA, PTR1_IN, PTR1_OUT, PTR1_IN_PU),
+ PINMUX_DATA(PTR0_DATA, PTR0_IN, PTR0_OUT, PTR0_IN_PU),
+
+ /* PTS GPIO */
+ PINMUX_DATA(PTS6_DATA, PTS6_IN, PTS6_OUT, PTS6_IN_PU),
+ PINMUX_DATA(PTS5_DATA, PTS5_IN, PTS5_OUT, PTS5_IN_PU),
+ PINMUX_DATA(PTS4_DATA, PTS4_IN, PTS4_OUT, PTS4_IN_PU),
+ PINMUX_DATA(PTS3_DATA, PTS3_IN, PTS3_OUT, PTS3_IN_PU),
+ PINMUX_DATA(PTS2_DATA, PTS2_IN, PTS2_OUT, PTS2_IN_PU),
+ PINMUX_DATA(PTS1_DATA, PTS1_IN, PTS1_OUT, PTS1_IN_PU),
+ PINMUX_DATA(PTS0_DATA, PTS0_IN, PTS0_OUT, PTS0_IN_PU),
+
+ /* PTT GPIO */
+ PINMUX_DATA(PTT7_DATA, PTT7_IN, PTT7_OUT, PTT7_IN_PU),
+ PINMUX_DATA(PTT6_DATA, PTT6_IN, PTT6_OUT, PTT6_IN_PU),
+ PINMUX_DATA(PTT5_DATA, PTT5_IN, PTT5_OUT, PTT5_IN_PU),
+ PINMUX_DATA(PTT4_DATA, PTT4_IN, PTT4_OUT, PTT4_IN_PU),
+ PINMUX_DATA(PTT3_DATA, PTT3_IN, PTT3_OUT, PTT3_IN_PU),
+ PINMUX_DATA(PTT2_DATA, PTT2_IN, PTT2_OUT, PTT2_IN_PU),
+ PINMUX_DATA(PTT1_DATA, PTT1_IN, PTT1_OUT, PTT1_IN_PU),
+ PINMUX_DATA(PTT0_DATA, PTT0_IN, PTT0_OUT, PTT0_IN_PU),
+
+ /* PTU GPIO */
+ PINMUX_DATA(PTU7_DATA, PTU7_IN, PTU7_OUT, PTU7_IN_PU),
+ PINMUX_DATA(PTU6_DATA, PTU6_IN, PTU6_OUT, PTU6_IN_PU),
+ PINMUX_DATA(PTU5_DATA, PTU5_IN, PTU5_OUT, PTU5_IN_PU),
+ PINMUX_DATA(PTU4_DATA, PTU4_IN, PTU4_OUT, PTU4_IN_PU),
+ PINMUX_DATA(PTU3_DATA, PTU3_IN, PTU3_OUT, PTU3_IN_PU),
+ PINMUX_DATA(PTU2_DATA, PTU2_IN, PTU2_OUT, PTU2_IN_PU),
+ PINMUX_DATA(PTU1_DATA, PTU1_IN, PTU1_OUT, PTU1_IN_PU),
+ PINMUX_DATA(PTU0_DATA, PTU0_IN, PTU0_OUT, PTU0_IN_PU),
+
+ /* PTV GPIO */
+ PINMUX_DATA(PTV7_DATA, PTV7_IN, PTV7_OUT, PTV7_IN_PU),
+ PINMUX_DATA(PTV6_DATA, PTV6_IN, PTV6_OUT, PTV6_IN_PU),
+ PINMUX_DATA(PTV5_DATA, PTV5_IN, PTV5_OUT, PTV5_IN_PU),
+ PINMUX_DATA(PTV4_DATA, PTV4_IN, PTV4_OUT, PTV4_IN_PU),
+ PINMUX_DATA(PTV3_DATA, PTV3_IN, PTV3_OUT, PTV3_IN_PU),
+ PINMUX_DATA(PTV2_DATA, PTV2_IN, PTV2_OUT, PTV2_IN_PU),
+ PINMUX_DATA(PTV1_DATA, PTV1_IN, PTV1_OUT, PTV1_IN_PU),
+ PINMUX_DATA(PTV0_DATA, PTV0_IN, PTV0_OUT, PTV0_IN_PU),
+
+ /* PTW GPIO */
+ PINMUX_DATA(PTW7_DATA, PTW7_IN, PTW7_OUT, PTW7_IN_PU),
+ PINMUX_DATA(PTW6_DATA, PTW6_IN, PTW6_OUT, PTW6_IN_PU),
+ PINMUX_DATA(PTW5_DATA, PTW5_IN, PTW5_OUT, PTW5_IN_PU),
+ PINMUX_DATA(PTW4_DATA, PTW4_IN, PTW4_OUT, PTW4_IN_PU),
+ PINMUX_DATA(PTW3_DATA, PTW3_IN, PTW3_OUT, PTW3_IN_PU),
+ PINMUX_DATA(PTW2_DATA, PTW2_IN, PTW2_OUT, PTW2_IN_PU),
+ PINMUX_DATA(PTW1_DATA, PTW1_IN, PTW1_OUT, PTW1_IN_PU),
+ PINMUX_DATA(PTW0_DATA, PTW0_IN, PTW0_OUT, PTW0_IN_PU),
+
+ /* PTX GPIO */
+ PINMUX_DATA(PTX7_DATA, PTX7_IN, PTX7_OUT, PTX7_IN_PU),
+ PINMUX_DATA(PTX6_DATA, PTX6_IN, PTX6_OUT, PTX6_IN_PU),
+ PINMUX_DATA(PTX5_DATA, PTX5_IN, PTX5_OUT, PTX5_IN_PU),
+ PINMUX_DATA(PTX4_DATA, PTX4_IN, PTX4_OUT, PTX4_IN_PU),
+ PINMUX_DATA(PTX3_DATA, PTX3_IN, PTX3_OUT, PTX3_IN_PU),
+ PINMUX_DATA(PTX2_DATA, PTX2_IN, PTX2_OUT, PTX2_IN_PU),
+ PINMUX_DATA(PTX1_DATA, PTX1_IN, PTX1_OUT, PTX1_IN_PU),
+ PINMUX_DATA(PTX0_DATA, PTX0_IN, PTX0_OUT, PTX0_IN_PU),
+
+ /* PTY GPIO */
+ PINMUX_DATA(PTY7_DATA, PTY7_IN, PTY7_OUT, PTY7_IN_PU),
+ PINMUX_DATA(PTY6_DATA, PTY6_IN, PTY6_OUT, PTY6_IN_PU),
+ PINMUX_DATA(PTY5_DATA, PTY5_IN, PTY5_OUT, PTY5_IN_PU),
+ PINMUX_DATA(PTY4_DATA, PTY4_IN, PTY4_OUT, PTY4_IN_PU),
+ PINMUX_DATA(PTY3_DATA, PTY3_IN, PTY3_OUT, PTY3_IN_PU),
+ PINMUX_DATA(PTY2_DATA, PTY2_IN, PTY2_OUT, PTY2_IN_PU),
+ PINMUX_DATA(PTY1_DATA, PTY1_IN, PTY1_OUT, PTY1_IN_PU),
+ PINMUX_DATA(PTY0_DATA, PTY0_IN, PTY0_OUT, PTY0_IN_PU),
+
+ /* PTZ GPIO */
+ PINMUX_DATA(PTZ7_DATA, PTZ7_IN, PTZ7_OUT, PTZ7_IN_PU),
+ PINMUX_DATA(PTZ6_DATA, PTZ6_IN, PTZ6_OUT, PTZ6_IN_PU),
+ PINMUX_DATA(PTZ5_DATA, PTZ5_IN, PTZ5_OUT, PTZ5_IN_PU),
+ PINMUX_DATA(PTZ4_DATA, PTZ4_IN, PTZ4_OUT, PTZ4_IN_PU),
+ PINMUX_DATA(PTZ3_DATA, PTZ3_IN, PTZ3_OUT, PTZ3_IN_PU),
+ PINMUX_DATA(PTZ2_DATA, PTZ2_IN, PTZ2_OUT, PTZ2_IN_PU),
+ PINMUX_DATA(PTZ1_DATA, PTZ1_IN, PTZ1_OUT, PTZ1_IN_PU),
+ PINMUX_DATA(PTZ0_DATA, PTZ0_IN, PTZ0_OUT, PTZ0_IN_PU),
+
+ /* PTA FN */
+ PINMUX_DATA(D23_MARK, PSA15_0, PSA14_0, PTA7_FN),
+ PINMUX_DATA(D22_MARK, PSA15_0, PSA14_0, PTA6_FN),
+ PINMUX_DATA(D21_MARK, PSA15_0, PSA14_0, PTA5_FN),
+ PINMUX_DATA(D20_MARK, PSA15_0, PSA14_0, PTA4_FN),
+ PINMUX_DATA(D19_MARK, PSA15_0, PSA14_0, PTA3_FN),
+ PINMUX_DATA(D18_MARK, PSA15_0, PSA14_0, PTA2_FN),
+ PINMUX_DATA(D17_MARK, PSA15_0, PSA14_0, PTA1_FN),
+ PINMUX_DATA(D16_MARK, PSA15_0, PSA14_0, PTA0_FN),
+
+ PINMUX_DATA(KEYOUT2_MARK, PSA15_0, PSA14_1, PTA7_FN),
+ PINMUX_DATA(KEYOUT1_MARK, PSA15_0, PSA14_1, PTA6_FN),
+ PINMUX_DATA(KEYOUT0_MARK, PSA15_0, PSA14_1, PTA5_FN),
+ PINMUX_DATA(KEYIN4_MARK, PSA15_0, PSA14_1, PTA4_FN),
+ PINMUX_DATA(KEYIN3_MARK, PSA15_0, PSA14_1, PTA3_FN),
+ PINMUX_DATA(KEYIN2_MARK, PSA15_0, PSA14_1, PTA2_FN),
+ PINMUX_DATA(KEYIN1_MARK, PSA15_0, PSA14_1, PTA1_FN),
+ PINMUX_DATA(KEYIN0_MARK, PSA15_0, PSA14_1, PTA0_FN),
+
+ PINMUX_DATA(IDED15_MARK, PSA15_1, PSA14_0, PTA7_FN),
+ PINMUX_DATA(IDED14_MARK, PSA15_1, PSA14_0, PTA6_FN),
+ PINMUX_DATA(IDED13_MARK, PSA15_1, PSA14_0, PTA5_FN),
+ PINMUX_DATA(IDED12_MARK, PSA15_1, PSA14_0, PTA4_FN),
+ PINMUX_DATA(IDED11_MARK, PSA15_1, PSA14_0, PTA3_FN),
+ PINMUX_DATA(IDED10_MARK, PSA15_1, PSA14_0, PTA2_FN),
+ PINMUX_DATA(IDED9_MARK, PSA15_1, PSA14_0, PTA1_FN),
+ PINMUX_DATA(IDED8_MARK, PSA15_1, PSA14_0, PTA0_FN),
+
+ /* PTB FN */
+ PINMUX_DATA(D31_MARK, PSE15_0, PSE14_0, PTB7_FN),
+ PINMUX_DATA(D30_MARK, PSE15_0, PSE14_0, PTB6_FN),
+ PINMUX_DATA(D29_MARK, PSE11_0, PTB5_FN),
+ PINMUX_DATA(D28_MARK, PSE11_0, PTB4_FN),
+ PINMUX_DATA(D27_MARK, PSE11_0, PTB3_FN),
+ PINMUX_DATA(D26_MARK, PSA15_0, PSA14_0, PTB2_FN),
+ PINMUX_DATA(D25_MARK, PSA15_0, PSA14_0, PTB1_FN),
+ PINMUX_DATA(D24_MARK, PSA15_0, PSA14_0, PTB0_FN),
+
+ PINMUX_DATA(IDEA1_MARK, PSE15_1, PSE14_0, PTB7_FN),
+ PINMUX_DATA(IDEA0_MARK, PSE15_1, PSE14_0, PTB6_FN),
+ PINMUX_DATA(IODREQ_MARK, PSE11_1, PTB5_FN),
+ PINMUX_DATA(IDECS0_MARK, PSE11_1, PTB4_FN),
+ PINMUX_DATA(IDECS1_MARK, PSE11_1, PTB3_FN),
+ PINMUX_DATA(IDEIORD_MARK, PSA15_1, PSA14_0, PTB2_FN),
+ PINMUX_DATA(IDEIOWR_MARK, PSA15_1, PSA14_0, PTB1_FN),
+ PINMUX_DATA(IDEINT_MARK, PSA15_1, PSA14_0, PTB0_FN),
+
+ PINMUX_DATA(TPUTO1_MARK, PSE15_0, PSE14_1, PTB7_FN),
+ PINMUX_DATA(TPUTO0_MARK, PSE15_0, PSE14_1, PTB6_FN),
+
+ PINMUX_DATA(KEYOUT5_IN5_MARK, PSA15_0, PSA14_1, PTB2_FN),
+ PINMUX_DATA(KEYOUT4_IN6_MARK, PSA15_0, PSA14_1, PTB1_FN),
+ PINMUX_DATA(KEYOUT3_MARK, PSA15_0, PSA14_1, PTB0_FN),
+
+ /* PTC FN */
+ PINMUX_DATA(LCDD7_MARK, PSD5_0, PTC7_FN),
+ PINMUX_DATA(LCDD6_MARK, PSD5_0, PTC6_FN),
+ PINMUX_DATA(LCDD5_MARK, PSD5_0, PTC5_FN),
+ PINMUX_DATA(LCDD4_MARK, PSD5_0, PTC4_FN),
+ PINMUX_DATA(LCDD3_MARK, PSD5_0, PTC3_FN),
+ PINMUX_DATA(LCDD2_MARK, PSD5_0, PTC2_FN),
+ PINMUX_DATA(LCDD1_MARK, PSD5_0, PTC1_FN),
+ PINMUX_DATA(LCDD0_MARK, PSD5_0, PTC0_FN),
+
+ /* PTD FN */
+ PINMUX_DATA(LCDD15_MARK, PSD5_0, PTD7_FN),
+ PINMUX_DATA(LCDD14_MARK, PSD5_0, PTD6_FN),
+ PINMUX_DATA(LCDD13_MARK, PSD5_0, PTD5_FN),
+ PINMUX_DATA(LCDD12_MARK, PSD5_0, PTD4_FN),
+ PINMUX_DATA(LCDD11_MARK, PSD5_0, PTD3_FN),
+ PINMUX_DATA(LCDD10_MARK, PSD5_0, PTD2_FN),
+ PINMUX_DATA(LCDD9_MARK, PSD5_0, PTD1_FN),
+ PINMUX_DATA(LCDD8_MARK, PSD5_0, PTD0_FN),
+
+ /* PTE FN */
+ PINMUX_DATA(FSIMCKB_MARK, PTE7_FN),
+ PINMUX_DATA(FSIMCKA_MARK, PTE6_FN),
+
+ PINMUX_DATA(LCDD21_MARK, PSC5_0, PSC4_0, PTE5_FN),
+ PINMUX_DATA(LCDD20_MARK, PSD3_0, PSD2_0, PTE4_FN),
+ PINMUX_DATA(LCDD19_MARK, PSA3_0, PSA2_0, PTE3_FN),
+ PINMUX_DATA(LCDD18_MARK, PSA3_0, PSA2_0, PTE2_FN),
+ PINMUX_DATA(LCDD17_MARK, PSD5_0, PTE1_FN),
+ PINMUX_DATA(LCDD16_MARK, PSD5_0, PTE0_FN),
+
+ PINMUX_DATA(SCIF2_L_TXD_MARK, PSC5_0, PSC4_1, PTE5_FN),
+ PINMUX_DATA(SCIF4_SCK_MARK, PSD3_0, PSD2_1, PTE4_FN),
+ PINMUX_DATA(SCIF4_RXD_MARK, PSA3_0, PSA2_1, PTE3_FN),
+ PINMUX_DATA(SCIF4_TXD_MARK, PSA3_0, PSA2_1, PTE2_FN),
+
+ /* PTF FN */
+ PINMUX_DATA(LCDVSYN_MARK, PSD8_0, PTF7_FN),
+ PINMUX_DATA(LCDDISP_MARK, PSD10_0, PSD9_0, PTF6_FN),
+ PINMUX_DATA(LCDHSYN_MARK, PSD10_0, PSD9_0, PTF5_FN),
+ PINMUX_DATA(LCDDON_MARK, PSD8_0, PTF4_FN),
+ PINMUX_DATA(LCDDCK_MARK, PSD10_0, PSD9_0, PTF3_FN),
+ PINMUX_DATA(LCDVEPWC_MARK, PSA6_0, PTF2_FN),
+ PINMUX_DATA(LCDD23_MARK, PSC7_0, PSC6_0, PTF1_FN),
+ PINMUX_DATA(LCDD22_MARK, PSC5_0, PSC4_0, PTF0_FN),
+
+ PINMUX_DATA(LCDRS_MARK, PSD10_0, PSD9_1, PTF6_FN),
+ PINMUX_DATA(LCDCS_MARK, PSD10_0, PSD9_1, PTF5_FN),
+ PINMUX_DATA(LCDWR_MARK, PSD10_0, PSD9_1, PTF3_FN),
+
+ PINMUX_DATA(SCIF0_TXD_MARK, PSA6_1, PTF2_FN),
+ PINMUX_DATA(SCIF2_L_SCK_MARK, PSC7_0, PSC6_1, PTF1_FN),
+ PINMUX_DATA(SCIF2_L_RXD_MARK, PSC5_0, PSC4_1, PTF0_FN),
+
+ /* PTG FN */
+ PINMUX_DATA(AUDCK_MARK, PTG5_FN),
+ PINMUX_DATA(AUDSYNC_MARK, PTG4_FN),
+ PINMUX_DATA(AUDATA3_MARK, PTG3_FN),
+ PINMUX_DATA(AUDATA2_MARK, PTG2_FN),
+ PINMUX_DATA(AUDATA1_MARK, PTG1_FN),
+ PINMUX_DATA(AUDATA0_MARK, PTG0_FN),
+
+ /* PTH FN */
+ PINMUX_DATA(VIO0_VD_MARK, PTH7_FN),
+ PINMUX_DATA(VIO0_CLK_MARK, PTH6_FN),
+ PINMUX_DATA(VIO0_D7_MARK, PTH5_FN),
+ PINMUX_DATA(VIO0_D6_MARK, PTH4_FN),
+ PINMUX_DATA(VIO0_D5_MARK, PTH3_FN),
+ PINMUX_DATA(VIO0_D4_MARK, PTH2_FN),
+ PINMUX_DATA(VIO0_D3_MARK, PTH1_FN),
+ PINMUX_DATA(VIO0_D2_MARK, PTH0_FN),
+
+ /* PTJ FN */
+ PINMUX_DATA(PDSTATUS_MARK, PTJ7_FN),
+ PINMUX_DATA(STATUS2_MARK, PTJ6_FN),
+ PINMUX_DATA(STATUS0_MARK, PTJ5_FN),
+ PINMUX_DATA(A25_MARK, PSA8_0, PTJ3_FN),
+ PINMUX_DATA(BS_MARK, PSA8_1, PTJ3_FN),
+ PINMUX_DATA(A24_MARK, PTJ2_FN),
+ PINMUX_DATA(A23_MARK, PTJ1_FN),
+ PINMUX_DATA(A22_MARK, PTJ0_FN),
+
+ /* PTK FN */
+ PINMUX_DATA(VIO1_D5_MARK, PSB7_0, PSB6_0, PTK7_FN),
+ PINMUX_DATA(VIO1_D4_MARK, PSB7_0, PSB6_0, PTK6_FN),
+ PINMUX_DATA(VIO1_D3_MARK, PSB7_0, PSB6_0, PTK5_FN),
+ PINMUX_DATA(VIO1_D2_MARK, PSB7_0, PSB6_0, PTK4_FN),
+ PINMUX_DATA(VIO1_D1_MARK, PSB7_0, PSB6_0, PTK3_FN),
+ PINMUX_DATA(VIO1_D0_MARK, PSB7_0, PSB6_0, PTK2_FN),
+
+ PINMUX_DATA(VIO0_D13_MARK, PSB7_0, PSB6_1, PTK7_FN),
+ PINMUX_DATA(VIO0_D12_MARK, PSB7_0, PSB6_1, PTK6_FN),
+ PINMUX_DATA(VIO0_D11_MARK, PSB7_0, PSB6_1, PTK5_FN),
+ PINMUX_DATA(VIO0_D10_MARK, PSB7_0, PSB6_1, PTK4_FN),
+ PINMUX_DATA(VIO0_D9_MARK, PSB7_0, PSB6_1, PTK3_FN),
+ PINMUX_DATA(VIO0_D8_MARK, PSB7_0, PSB6_1, PTK2_FN),
+
+ PINMUX_DATA(IDED5_MARK, PSB7_1, PSB6_0, PTK7_FN),
+ PINMUX_DATA(IDED4_MARK, PSB7_1, PSB6_0, PTK6_FN),
+ PINMUX_DATA(IDED3_MARK, PSB7_1, PSB6_0, PTK5_FN),
+ PINMUX_DATA(IDED2_MARK, PSB7_1, PSB6_0, PTK4_FN),
+ PINMUX_DATA(IDED1_MARK, PSB7_1, PSB6_0, PTK3_FN),
+ PINMUX_DATA(IDED0_MARK, PSB7_1, PSB6_0, PTK2_FN),
+
+ PINMUX_DATA(VIO0_FLD_MARK, PTK1_FN),
+ PINMUX_DATA(VIO0_HD_MARK, PTK0_FN),
+
+ /* PTL FN */
+ PINMUX_DATA(DV_D5_MARK, PSB9_0, PSB8_0, PTL7_FN),
+ PINMUX_DATA(DV_D4_MARK, PSB9_0, PSB8_0, PTL6_FN),
+ PINMUX_DATA(DV_D3_MARK, PSE7_0, PSE6_0, PTL5_FN),
+ PINMUX_DATA(DV_D2_MARK, PSC9_0, PSC8_0, PTL4_FN),
+ PINMUX_DATA(DV_D1_MARK, PSC9_0, PSC8_0, PTL3_FN),
+ PINMUX_DATA(DV_D0_MARK, PSC9_0, PSC8_0, PTL2_FN),
+ PINMUX_DATA(DV_D15_MARK, PSD4_0, PTL1_FN),
+ PINMUX_DATA(DV_D14_MARK, PSE5_0, PSE4_0, PTL0_FN),
+
+ PINMUX_DATA(SCIF3_V_SCK_MARK, PSB9_0, PSB8_1, PTL7_FN),
+ PINMUX_DATA(SCIF3_V_RXD_MARK, PSB9_0, PSB8_1, PTL6_FN),
+ PINMUX_DATA(SCIF3_V_TXD_MARK, PSE7_0, PSE6_1, PTL5_FN),
+ PINMUX_DATA(SCIF1_SCK_MARK, PSC9_0, PSC8_1, PTL4_FN),
+ PINMUX_DATA(SCIF1_RXD_MARK, PSC9_0, PSC8_1, PTL3_FN),
+ PINMUX_DATA(SCIF1_TXD_MARK, PSC9_0, PSC8_1, PTL2_FN),
+
+ PINMUX_DATA(RMII_RXD0_MARK, PSB9_1, PSB8_0, PTL7_FN),
+ PINMUX_DATA(RMII_RXD1_MARK, PSB9_1, PSB8_0, PTL6_FN),
+ PINMUX_DATA(RMII_REF_CLK_MARK, PSE7_1, PSE6_0, PTL5_FN),
+ PINMUX_DATA(RMII_TX_EN_MARK, PSC9_1, PSC8_0, PTL4_FN),
+ PINMUX_DATA(RMII_TXD0_MARK, PSC9_1, PSC8_0, PTL3_FN),
+ PINMUX_DATA(RMII_TXD1_MARK, PSC9_1, PSC8_0, PTL2_FN),
+
+ PINMUX_DATA(MSIOF0_MCK_MARK, PSE5_0, PSE4_1, PTL0_FN),
+
+ /* PTM FN */
+ PINMUX_DATA(DV_D13_MARK, PSC13_0, PSC12_0, PTM7_FN),
+ PINMUX_DATA(DV_D12_MARK, PSC13_0, PSC12_0, PTM6_FN),
+ PINMUX_DATA(DV_D11_MARK, PSC13_0, PSC12_0, PTM5_FN),
+ PINMUX_DATA(DV_D10_MARK, PSC13_0, PSC12_0, PTM4_FN),
+ PINMUX_DATA(DV_D9_MARK, PSC11_0, PSC10_0, PTM3_FN),
+ PINMUX_DATA(DV_D8_MARK, PSC11_0, PSC10_0, PTM2_FN),
+
+ PINMUX_DATA(MSIOF0_TSCK_MARK, PSC13_0, PSC12_1, PTM7_FN),
+ PINMUX_DATA(MSIOF0_RXD_MARK, PSC13_0, PSC12_1, PTM6_FN),
+ PINMUX_DATA(MSIOF0_TXD_MARK, PSC13_0, PSC12_1, PTM5_FN),
+ PINMUX_DATA(MSIOF0_TSYNC_MARK, PSC13_0, PSC12_1, PTM4_FN),
+ PINMUX_DATA(MSIOF0_SS1_MARK, PSC11_0, PSC10_1, PTM3_FN),
+ PINMUX_DATA(MSIOF0_RSCK_MARK, PSC11_1, PSC10_0, PTM3_FN),
+ PINMUX_DATA(MSIOF0_SS2_MARK, PSC11_0, PSC10_1, PTM2_FN),
+ PINMUX_DATA(MSIOF0_RSYNC_MARK, PSC11_1, PSC10_0, PTM2_FN),
+
+ PINMUX_DATA(LCDVCPWC_MARK, PSA6_0, PTM1_FN),
+ PINMUX_DATA(LCDRD_MARK, PSA7_0, PTM0_FN),
+
+ PINMUX_DATA(SCIF0_RXD_MARK, PSA6_1, PTM1_FN),
+ PINMUX_DATA(SCIF0_SCK_MARK, PSA7_1, PTM0_FN),
+
+ /* PTN FN */
+ PINMUX_DATA(VIO0_D1_MARK, PTN7_FN),
+ PINMUX_DATA(VIO0_D0_MARK, PTN6_FN),
+
+ PINMUX_DATA(DV_CLKI_MARK, PSD11_0, PTN5_FN),
+ PINMUX_DATA(DV_CLK_MARK, PSD13_0, PSD12_0, PTN4_FN),
+ PINMUX_DATA(DV_VSYNC_MARK, PSD15_0, PSD14_0, PTN3_FN),
+ PINMUX_DATA(DV_HSYNC_MARK, PSB5_0, PSB4_0, PTN2_FN),
+ PINMUX_DATA(DV_D7_MARK, PSB3_0, PSB2_0, PTN1_FN),
+ PINMUX_DATA(DV_D6_MARK, PSB1_0, PSB0_0, PTN0_FN),
+
+ PINMUX_DATA(SCIF2_V_SCK_MARK, PSD13_0, PSD12_1, PTN4_FN),
+ PINMUX_DATA(SCIF2_V_RXD_MARK, PSD15_0, PSD14_1, PTN3_FN),
+ PINMUX_DATA(SCIF2_V_TXD_MARK, PSB5_0, PSB4_1, PTN2_FN),
+ PINMUX_DATA(SCIF3_V_CTS_MARK, PSB3_0, PSB2_1, PTN1_FN),
+ PINMUX_DATA(SCIF3_V_RTS_MARK, PSB1_0, PSB0_1, PTN0_FN),
+
+ PINMUX_DATA(RMII_RX_ER_MARK, PSB3_1, PSB2_0, PTN1_FN),
+ PINMUX_DATA(RMII_CRS_DV_MARK, PSB1_1, PSB0_0, PTN0_FN),
+
+ /* PTQ FN */
+ PINMUX_DATA(D7_MARK, PTQ7_FN),
+ PINMUX_DATA(D6_MARK, PTQ6_FN),
+ PINMUX_DATA(D5_MARK, PTQ5_FN),
+ PINMUX_DATA(D4_MARK, PTQ4_FN),
+ PINMUX_DATA(D3_MARK, PTQ3_FN),
+ PINMUX_DATA(D2_MARK, PTQ2_FN),
+ PINMUX_DATA(D1_MARK, PTQ1_FN),
+ PINMUX_DATA(D0_MARK, PTQ0_FN),
+
+ /* PTR FN */
+ PINMUX_DATA(CS6B_CE1B_MARK, PTR7_FN),
+ PINMUX_DATA(CS6A_CE2B_MARK, PTR6_FN),
+ PINMUX_DATA(CS5B_CE1A_MARK, PTR5_FN),
+ PINMUX_DATA(CS5A_CE2A_MARK, PTR4_FN),
+ PINMUX_DATA(IOIS16_MARK, PSA5_0, PTR3_FN),
+ PINMUX_DATA(WAIT_MARK, PTR2_FN),
+ PINMUX_DATA(WE3_ICIOWR_MARK, PSA1_0, PSA0_0, PTR1_FN),
+ PINMUX_DATA(WE2_ICIORD_MARK, PSD1_0, PSD0_0, PTR0_FN),
+
+ PINMUX_DATA(LCDLCLK_MARK, PSA5_1, PTR3_FN),
+
+ PINMUX_DATA(IDEA2_MARK, PSD1_1, PSD0_0, PTR0_FN),
+
+ PINMUX_DATA(TPUTO3_MARK, PSA1_0, PSA0_1, PTR1_FN),
+ PINMUX_DATA(TPUTI3_MARK, PSA1_1, PSA0_0, PTR1_FN),
+ PINMUX_DATA(TPUTO2_MARK, PSD1_0, PSD0_1, PTR0_FN),
+
+ /* PTS FN */
+ PINMUX_DATA(VIO_CKO_MARK, PTS6_FN),
+
+ PINMUX_DATA(TPUTI2_MARK, PSE9_0, PSE8_1, PTS5_FN),
+
+ PINMUX_DATA(IDEIORDY_MARK, PSE9_1, PSE8_0, PTS5_FN),
+
+ PINMUX_DATA(VIO1_FLD_MARK, PSE9_0, PSE8_0, PTS5_FN),
+ PINMUX_DATA(VIO1_HD_MARK, PSA10_0, PTS4_FN),
+ PINMUX_DATA(VIO1_VD_MARK, PSA9_0, PTS3_FN),
+ PINMUX_DATA(VIO1_CLK_MARK, PSA9_0, PTS2_FN),
+ PINMUX_DATA(VIO1_D7_MARK, PSB7_0, PSB6_0, PTS1_FN),
+ PINMUX_DATA(VIO1_D6_MARK, PSB7_0, PSB6_0, PTS0_FN),
+
+ PINMUX_DATA(SCIF5_SCK_MARK, PSA10_1, PTS4_FN),
+ PINMUX_DATA(SCIF5_RXD_MARK, PSA9_1, PTS3_FN),
+ PINMUX_DATA(SCIF5_TXD_MARK, PSA9_1, PTS2_FN),
+
+ PINMUX_DATA(VIO0_D15_MARK, PSB7_0, PSB6_1, PTS1_FN),
+ PINMUX_DATA(VIO0_D14_MARK, PSB7_0, PSB6_1, PTS0_FN),
+
+ PINMUX_DATA(IDED7_MARK, PSB7_1, PSB6_0, PTS1_FN),
+ PINMUX_DATA(IDED6_MARK, PSB7_1, PSB6_0, PTS0_FN),
+
+ /* PTT FN */
+ PINMUX_DATA(D15_MARK, PTT7_FN),
+ PINMUX_DATA(D14_MARK, PTT6_FN),
+ PINMUX_DATA(D13_MARK, PTT5_FN),
+ PINMUX_DATA(D12_MARK, PTT4_FN),
+ PINMUX_DATA(D11_MARK, PTT3_FN),
+ PINMUX_DATA(D10_MARK, PTT2_FN),
+ PINMUX_DATA(D9_MARK, PTT1_FN),
+ PINMUX_DATA(D8_MARK, PTT0_FN),
+
+ /* PTU FN */
+ PINMUX_DATA(DMAC_DACK0_MARK, PTU7_FN),
+ PINMUX_DATA(DMAC_DREQ0_MARK, PTU6_FN),
+
+ PINMUX_DATA(FSIOASD_MARK, PSE1_0, PTU5_FN),
+ PINMUX_DATA(FSIIABCK_MARK, PSE1_0, PTU4_FN),
+ PINMUX_DATA(FSIIALRCK_MARK, PSE1_0, PTU3_FN),
+ PINMUX_DATA(FSIOABCK_MARK, PSE1_0, PTU2_FN),
+ PINMUX_DATA(FSIOALRCK_MARK, PSE1_0, PTU1_FN),
+ PINMUX_DATA(CLKAUDIOAO_MARK, PSE0_0, PTU0_FN),
+
+ /* PTV FN */
+ PINMUX_DATA(FSIIBSD_MARK, PSD7_0, PSD6_0, PTV7_FN),
+ PINMUX_DATA(FSIOBSD_MARK, PSD7_0, PSD6_0, PTV6_FN),
+ PINMUX_DATA(FSIIBBCK_MARK, PSC15_0, PSC14_0, PTV5_FN),
+ PINMUX_DATA(FSIIBLRCK_MARK, PSC15_0, PSC14_0, PTV4_FN),
+ PINMUX_DATA(FSIOBBCK_MARK, PSC15_0, PSC14_0, PTV3_FN),
+ PINMUX_DATA(FSIOBLRCK_MARK, PSC15_0, PSC14_0, PTV2_FN),
+ PINMUX_DATA(CLKAUDIOBO_MARK, PSE3_0, PSE2_0, PTV1_FN),
+ PINMUX_DATA(FSIIASD_MARK, PSE10_0, PTV0_FN),
+
+ PINMUX_DATA(MSIOF1_SS2_MARK, PSD7_0, PSD6_1, PTV7_FN),
+ PINMUX_DATA(MSIOF1_RSYNC_MARK, PSD7_1, PSD6_0, PTV7_FN),
+ PINMUX_DATA(MSIOF1_SS1_MARK, PSD7_0, PSD6_1, PTV6_FN),
+ PINMUX_DATA(MSIOF1_RSCK_MARK, PSD7_1, PSD6_0, PTV6_FN),
+ PINMUX_DATA(MSIOF1_RXD_MARK, PSC15_0, PSC14_1, PTV5_FN),
+ PINMUX_DATA(MSIOF1_TSYNC_MARK, PSC15_0, PSC14_1, PTV4_FN),
+ PINMUX_DATA(MSIOF1_TSCK_MARK, PSC15_0, PSC14_1, PTV3_FN),
+ PINMUX_DATA(MSIOF1_TXD_MARK, PSC15_0, PSC14_1, PTV2_FN),
+ PINMUX_DATA(MSIOF1_MCK_MARK, PSE3_0, PSE2_1, PTV1_FN),
+
+ /* PTW FN */
+ PINMUX_DATA(MMC_D7_MARK, PSE13_0, PSE12_0, PTW7_FN),
+ PINMUX_DATA(MMC_D6_MARK, PSE13_0, PSE12_0, PTW6_FN),
+ PINMUX_DATA(MMC_D5_MARK, PSE13_0, PSE12_0, PTW5_FN),
+ PINMUX_DATA(MMC_D4_MARK, PSE13_0, PSE12_0, PTW4_FN),
+ PINMUX_DATA(MMC_D3_MARK, PSA13_0, PTW3_FN),
+ PINMUX_DATA(MMC_D2_MARK, PSA13_0, PTW2_FN),
+ PINMUX_DATA(MMC_D1_MARK, PSA13_0, PTW1_FN),
+ PINMUX_DATA(MMC_D0_MARK, PSA13_0, PTW0_FN),
+
+ PINMUX_DATA(SDHI1CD_MARK, PSE13_0, PSE12_1, PTW7_FN),
+ PINMUX_DATA(SDHI1WP_MARK, PSE13_0, PSE12_1, PTW6_FN),
+ PINMUX_DATA(SDHI1D3_MARK, PSE13_0, PSE12_1, PTW5_FN),
+ PINMUX_DATA(SDHI1D2_MARK, PSE13_0, PSE12_1, PTW4_FN),
+ PINMUX_DATA(SDHI1D1_MARK, PSA13_1, PTW3_FN),
+ PINMUX_DATA(SDHI1D0_MARK, PSA13_1, PTW2_FN),
+ PINMUX_DATA(SDHI1CMD_MARK, PSA13_1, PTW1_FN),
+ PINMUX_DATA(SDHI1CLK_MARK, PSA13_1, PTW0_FN),
+
+ PINMUX_DATA(IODACK_MARK, PSE13_1, PSE12_0, PTW7_FN),
+ PINMUX_DATA(IDERST_MARK, PSE13_1, PSE12_0, PTW6_FN),
+ PINMUX_DATA(EXBUF_ENB_MARK, PSE13_1, PSE12_0, PTW5_FN),
+ PINMUX_DATA(DIRECTION_MARK, PSE13_1, PSE12_0, PTW4_FN),
+
+ /* PTX FN */
+ PINMUX_DATA(DMAC_DACK1_MARK, PSA12_0, PTX7_FN),
+ PINMUX_DATA(DMAC_DREQ1_MARK, PSA12_0, PTX6_FN),
+
+ PINMUX_DATA(IRDA_OUT_MARK, PSA12_1, PTX7_FN),
+ PINMUX_DATA(IRDA_IN_MARK, PSA12_1, PTX6_FN),
+
+ PINMUX_DATA(TSIF_TS0_SDAT_MARK, PSC0_0, PTX5_FN),
+ PINMUX_DATA(TSIF_TS0_SCK_MARK, PSC1_0, PTX4_FN),
+ PINMUX_DATA(TSIF_TS0_SDEN_MARK, PSC2_0, PTX3_FN),
+ PINMUX_DATA(TSIF_TS0_SPSYNC_MARK, PTX2_FN),
+
+ PINMUX_DATA(LNKSTA_MARK, PSC0_1, PTX5_FN),
+ PINMUX_DATA(MDIO_MARK, PSC1_1, PTX4_FN),
+ PINMUX_DATA(MDC_MARK, PSC2_1, PTX3_FN),
+
+ PINMUX_DATA(MMC_CLK_MARK, PTX1_FN),
+ PINMUX_DATA(MMC_CMD_MARK, PTX0_FN),
+
+ /* PTY FN */
+ PINMUX_DATA(SDHI0CD_MARK, PTY7_FN),
+ PINMUX_DATA(SDHI0WP_MARK, PTY6_FN),
+ PINMUX_DATA(SDHI0D3_MARK, PTY5_FN),
+ PINMUX_DATA(SDHI0D2_MARK, PTY4_FN),
+ PINMUX_DATA(SDHI0D1_MARK, PTY3_FN),
+ PINMUX_DATA(SDHI0D0_MARK, PTY2_FN),
+ PINMUX_DATA(SDHI0CMD_MARK, PTY1_FN),
+ PINMUX_DATA(SDHI0CLK_MARK, PTY0_FN),
+
+ /* PTZ FN */
+ PINMUX_DATA(INTC_IRQ7_MARK, PSB10_0, PTZ7_FN),
+ PINMUX_DATA(INTC_IRQ6_MARK, PSB11_0, PTZ6_FN),
+ PINMUX_DATA(INTC_IRQ5_MARK, PSB12_0, PTZ5_FN),
+ PINMUX_DATA(INTC_IRQ4_MARK, PSB13_0, PTZ4_FN),
+ PINMUX_DATA(INTC_IRQ3_MARK, PSB14_0, PTZ3_FN),
+ PINMUX_DATA(INTC_IRQ2_MARK, PTZ2_FN),
+ PINMUX_DATA(INTC_IRQ1_MARK, PTZ1_FN),
+ PINMUX_DATA(INTC_IRQ0_MARK, PTZ0_FN),
+
+ PINMUX_DATA(SCIF3_I_CTS_MARK, PSB10_1, PTZ7_FN),
+ PINMUX_DATA(SCIF3_I_RTS_MARK, PSB11_1, PTZ6_FN),
+ PINMUX_DATA(SCIF3_I_SCK_MARK, PSB12_1, PTZ5_FN),
+ PINMUX_DATA(SCIF3_I_RXD_MARK, PSB13_1, PTZ4_FN),
+ PINMUX_DATA(SCIF3_I_TXD_MARK, PSB14_1, PTZ3_FN),
+};
+
+static struct pinmux_gpio pinmux_gpios[] = {
+ /* PTA */
+ PINMUX_GPIO(GPIO_PTA7, PTA7_DATA),
+ PINMUX_GPIO(GPIO_PTA6, PTA6_DATA),
+ PINMUX_GPIO(GPIO_PTA5, PTA5_DATA),
+ PINMUX_GPIO(GPIO_PTA4, PTA4_DATA),
+ PINMUX_GPIO(GPIO_PTA3, PTA3_DATA),
+ PINMUX_GPIO(GPIO_PTA2, PTA2_DATA),
+ PINMUX_GPIO(GPIO_PTA1, PTA1_DATA),
+ PINMUX_GPIO(GPIO_PTA0, PTA0_DATA),
+
+ /* PTB */
+ PINMUX_GPIO(GPIO_PTB7, PTB7_DATA),
+ PINMUX_GPIO(GPIO_PTB6, PTB6_DATA),
+ PINMUX_GPIO(GPIO_PTB5, PTB5_DATA),
+ PINMUX_GPIO(GPIO_PTB4, PTB4_DATA),
+ PINMUX_GPIO(GPIO_PTB3, PTB3_DATA),
+ PINMUX_GPIO(GPIO_PTB2, PTB2_DATA),
+ PINMUX_GPIO(GPIO_PTB1, PTB1_DATA),
+ PINMUX_GPIO(GPIO_PTB0, PTB0_DATA),
+
+ /* PTC */
+ PINMUX_GPIO(GPIO_PTC7, PTC7_DATA),
+ PINMUX_GPIO(GPIO_PTC6, PTC6_DATA),
+ PINMUX_GPIO(GPIO_PTC5, PTC5_DATA),
+ PINMUX_GPIO(GPIO_PTC4, PTC4_DATA),
+ PINMUX_GPIO(GPIO_PTC3, PTC3_DATA),
+ PINMUX_GPIO(GPIO_PTC2, PTC2_DATA),
+ PINMUX_GPIO(GPIO_PTC1, PTC1_DATA),
+ PINMUX_GPIO(GPIO_PTC0, PTC0_DATA),
+
+ /* PTD */
+ PINMUX_GPIO(GPIO_PTD7, PTD7_DATA),
+ PINMUX_GPIO(GPIO_PTD6, PTD6_DATA),
+ PINMUX_GPIO(GPIO_PTD5, PTD5_DATA),
+ PINMUX_GPIO(GPIO_PTD4, PTD4_DATA),
+ PINMUX_GPIO(GPIO_PTD3, PTD3_DATA),
+ PINMUX_GPIO(GPIO_PTD2, PTD2_DATA),
+ PINMUX_GPIO(GPIO_PTD1, PTD1_DATA),
+ PINMUX_GPIO(GPIO_PTD0, PTD0_DATA),
+
+ /* PTE */
+ PINMUX_GPIO(GPIO_PTE7, PTE7_DATA),
+ PINMUX_GPIO(GPIO_PTE6, PTE6_DATA),
+ PINMUX_GPIO(GPIO_PTE5, PTE5_DATA),
+ PINMUX_GPIO(GPIO_PTE4, PTE4_DATA),
+ PINMUX_GPIO(GPIO_PTE3, PTE3_DATA),
+ PINMUX_GPIO(GPIO_PTE2, PTE2_DATA),
+ PINMUX_GPIO(GPIO_PTE1, PTE1_DATA),
+ PINMUX_GPIO(GPIO_PTE0, PTE0_DATA),
+
+ /* PTF */
+ PINMUX_GPIO(GPIO_PTF7, PTF7_DATA),
+ PINMUX_GPIO(GPIO_PTF6, PTF6_DATA),
+ PINMUX_GPIO(GPIO_PTF5, PTF5_DATA),
+ PINMUX_GPIO(GPIO_PTF4, PTF4_DATA),
+ PINMUX_GPIO(GPIO_PTF3, PTF3_DATA),
+ PINMUX_GPIO(GPIO_PTF2, PTF2_DATA),
+ PINMUX_GPIO(GPIO_PTF1, PTF1_DATA),
+ PINMUX_GPIO(GPIO_PTF0, PTF0_DATA),
+
+ /* PTG */
+ PINMUX_GPIO(GPIO_PTG5, PTG5_DATA),
+ PINMUX_GPIO(GPIO_PTG4, PTG4_DATA),
+ PINMUX_GPIO(GPIO_PTG3, PTG3_DATA),
+ PINMUX_GPIO(GPIO_PTG2, PTG2_DATA),
+ PINMUX_GPIO(GPIO_PTG1, PTG1_DATA),
+ PINMUX_GPIO(GPIO_PTG0, PTG0_DATA),
+
+ /* PTH */
+ PINMUX_GPIO(GPIO_PTH7, PTH7_DATA),
+ PINMUX_GPIO(GPIO_PTH6, PTH6_DATA),
+ PINMUX_GPIO(GPIO_PTH5, PTH5_DATA),
+ PINMUX_GPIO(GPIO_PTH4, PTH4_DATA),
+ PINMUX_GPIO(GPIO_PTH3, PTH3_DATA),
+ PINMUX_GPIO(GPIO_PTH2, PTH2_DATA),
+ PINMUX_GPIO(GPIO_PTH1, PTH1_DATA),
+ PINMUX_GPIO(GPIO_PTH0, PTH0_DATA),
+
+ /* PTJ */
+ PINMUX_GPIO(GPIO_PTJ7, PTJ7_DATA),
+ PINMUX_GPIO(GPIO_PTJ6, PTJ6_DATA),
+ PINMUX_GPIO(GPIO_PTJ5, PTJ5_DATA),
+ PINMUX_GPIO(GPIO_PTJ3, PTJ3_DATA),
+ PINMUX_GPIO(GPIO_PTJ2, PTJ2_DATA),
+ PINMUX_GPIO(GPIO_PTJ1, PTJ1_DATA),
+ PINMUX_GPIO(GPIO_PTJ0, PTJ0_DATA),
+
+ /* PTK */
+ PINMUX_GPIO(GPIO_PTK7, PTK7_DATA),
+ PINMUX_GPIO(GPIO_PTK6, PTK6_DATA),
+ PINMUX_GPIO(GPIO_PTK5, PTK5_DATA),
+ PINMUX_GPIO(GPIO_PTK4, PTK4_DATA),
+ PINMUX_GPIO(GPIO_PTK3, PTK3_DATA),
+ PINMUX_GPIO(GPIO_PTK2, PTK2_DATA),
+ PINMUX_GPIO(GPIO_PTK1, PTK1_DATA),
+ PINMUX_GPIO(GPIO_PTK0, PTK0_DATA),
+
+ /* PTL */
+ PINMUX_GPIO(GPIO_PTL7, PTL7_DATA),
+ PINMUX_GPIO(GPIO_PTL6, PTL6_DATA),
+ PINMUX_GPIO(GPIO_PTL5, PTL5_DATA),
+ PINMUX_GPIO(GPIO_PTL4, PTL4_DATA),
+ PINMUX_GPIO(GPIO_PTL3, PTL3_DATA),
+ PINMUX_GPIO(GPIO_PTL2, PTL2_DATA),
+ PINMUX_GPIO(GPIO_PTL1, PTL1_DATA),
+ PINMUX_GPIO(GPIO_PTL0, PTL0_DATA),
+
+ /* PTM */
+ PINMUX_GPIO(GPIO_PTM7, PTM7_DATA),
+ PINMUX_GPIO(GPIO_PTM6, PTM6_DATA),
+ PINMUX_GPIO(GPIO_PTM5, PTM5_DATA),
+ PINMUX_GPIO(GPIO_PTM4, PTM4_DATA),
+ PINMUX_GPIO(GPIO_PTM3, PTM3_DATA),
+ PINMUX_GPIO(GPIO_PTM2, PTM2_DATA),
+ PINMUX_GPIO(GPIO_PTM1, PTM1_DATA),
+ PINMUX_GPIO(GPIO_PTM0, PTM0_DATA),
+
+ /* PTN */
+ PINMUX_GPIO(GPIO_PTN7, PTN7_DATA),
+ PINMUX_GPIO(GPIO_PTN6, PTN6_DATA),
+ PINMUX_GPIO(GPIO_PTN5, PTN5_DATA),
+ PINMUX_GPIO(GPIO_PTN4, PTN4_DATA),
+ PINMUX_GPIO(GPIO_PTN3, PTN3_DATA),
+ PINMUX_GPIO(GPIO_PTN2, PTN2_DATA),
+ PINMUX_GPIO(GPIO_PTN1, PTN1_DATA),
+ PINMUX_GPIO(GPIO_PTN0, PTN0_DATA),
+
+ /* PTQ */
+ PINMUX_GPIO(GPIO_PTQ7, PTQ7_DATA),
+ PINMUX_GPIO(GPIO_PTQ6, PTQ6_DATA),
+ PINMUX_GPIO(GPIO_PTQ5, PTQ5_DATA),
+ PINMUX_GPIO(GPIO_PTQ4, PTQ4_DATA),
+ PINMUX_GPIO(GPIO_PTQ3, PTQ3_DATA),
+ PINMUX_GPIO(GPIO_PTQ2, PTQ2_DATA),
+ PINMUX_GPIO(GPIO_PTQ1, PTQ1_DATA),
+ PINMUX_GPIO(GPIO_PTQ0, PTQ0_DATA),
+
+ /* PTR */
+ PINMUX_GPIO(GPIO_PTR7, PTR7_DATA),
+ PINMUX_GPIO(GPIO_PTR6, PTR6_DATA),
+ PINMUX_GPIO(GPIO_PTR5, PTR5_DATA),
+ PINMUX_GPIO(GPIO_PTR4, PTR4_DATA),
+ PINMUX_GPIO(GPIO_PTR3, PTR3_DATA),
+ PINMUX_GPIO(GPIO_PTR2, PTR2_DATA),
+ PINMUX_GPIO(GPIO_PTR1, PTR1_DATA),
+ PINMUX_GPIO(GPIO_PTR0, PTR0_DATA),
+
+ /* PTS */
+ PINMUX_GPIO(GPIO_PTS6, PTS6_DATA),
+ PINMUX_GPIO(GPIO_PTS5, PTS5_DATA),
+ PINMUX_GPIO(GPIO_PTS4, PTS4_DATA),
+ PINMUX_GPIO(GPIO_PTS3, PTS3_DATA),
+ PINMUX_GPIO(GPIO_PTS2, PTS2_DATA),
+ PINMUX_GPIO(GPIO_PTS1, PTS1_DATA),
+ PINMUX_GPIO(GPIO_PTS0, PTS0_DATA),
+
+ /* PTT */
+ PINMUX_GPIO(GPIO_PTT7, PTT7_DATA),
+ PINMUX_GPIO(GPIO_PTT6, PTT6_DATA),
+ PINMUX_GPIO(GPIO_PTT5, PTT5_DATA),
+ PINMUX_GPIO(GPIO_PTT4, PTT4_DATA),
+ PINMUX_GPIO(GPIO_PTT3, PTT3_DATA),
+ PINMUX_GPIO(GPIO_PTT2, PTT2_DATA),
+ PINMUX_GPIO(GPIO_PTT1, PTT1_DATA),
+ PINMUX_GPIO(GPIO_PTT0, PTT0_DATA),
+
+ /* PTU */
+ PINMUX_GPIO(GPIO_PTU7, PTU7_DATA),
+ PINMUX_GPIO(GPIO_PTU6, PTU6_DATA),
+ PINMUX_GPIO(GPIO_PTU5, PTU5_DATA),
+ PINMUX_GPIO(GPIO_PTU4, PTU4_DATA),
+ PINMUX_GPIO(GPIO_PTU3, PTU3_DATA),
+ PINMUX_GPIO(GPIO_PTU2, PTU2_DATA),
+ PINMUX_GPIO(GPIO_PTU1, PTU1_DATA),
+ PINMUX_GPIO(GPIO_PTU0, PTU0_DATA),
+
+ /* PTV */
+ PINMUX_GPIO(GPIO_PTV7, PTV7_DATA),
+ PINMUX_GPIO(GPIO_PTV6, PTV6_DATA),
+ PINMUX_GPIO(GPIO_PTV5, PTV5_DATA),
+ PINMUX_GPIO(GPIO_PTV4, PTV4_DATA),
+ PINMUX_GPIO(GPIO_PTV3, PTV3_DATA),
+ PINMUX_GPIO(GPIO_PTV2, PTV2_DATA),
+ PINMUX_GPIO(GPIO_PTV1, PTV1_DATA),
+ PINMUX_GPIO(GPIO_PTV0, PTV0_DATA),
+
+ /* PTW */
+ PINMUX_GPIO(GPIO_PTW7, PTW7_DATA),
+ PINMUX_GPIO(GPIO_PTW6, PTW6_DATA),
+ PINMUX_GPIO(GPIO_PTW5, PTW5_DATA),
+ PINMUX_GPIO(GPIO_PTW4, PTW4_DATA),
+ PINMUX_GPIO(GPIO_PTW3, PTW3_DATA),
+ PINMUX_GPIO(GPIO_PTW2, PTW2_DATA),
+ PINMUX_GPIO(GPIO_PTW1, PTW1_DATA),
+ PINMUX_GPIO(GPIO_PTW0, PTW0_DATA),
+
+ /* PTX */
+ PINMUX_GPIO(GPIO_PTX7, PTX7_DATA),
+ PINMUX_GPIO(GPIO_PTX6, PTX6_DATA),
+ PINMUX_GPIO(GPIO_PTX5, PTX5_DATA),
+ PINMUX_GPIO(GPIO_PTX4, PTX4_DATA),
+ PINMUX_GPIO(GPIO_PTX3, PTX3_DATA),
+ PINMUX_GPIO(GPIO_PTX2, PTX2_DATA),
+ PINMUX_GPIO(GPIO_PTX1, PTX1_DATA),
+ PINMUX_GPIO(GPIO_PTX0, PTX0_DATA),
+
+ /* PTY */
+ PINMUX_GPIO(GPIO_PTY7, PTY7_DATA),
+ PINMUX_GPIO(GPIO_PTY6, PTY6_DATA),
+ PINMUX_GPIO(GPIO_PTY5, PTY5_DATA),
+ PINMUX_GPIO(GPIO_PTY4, PTY4_DATA),
+ PINMUX_GPIO(GPIO_PTY3, PTY3_DATA),
+ PINMUX_GPIO(GPIO_PTY2, PTY2_DATA),
+ PINMUX_GPIO(GPIO_PTY1, PTY1_DATA),
+ PINMUX_GPIO(GPIO_PTY0, PTY0_DATA),
+
+ /* PTZ */
+ PINMUX_GPIO(GPIO_PTZ7, PTZ7_DATA),
+ PINMUX_GPIO(GPIO_PTZ6, PTZ6_DATA),
+ PINMUX_GPIO(GPIO_PTZ5, PTZ5_DATA),
+ PINMUX_GPIO(GPIO_PTZ4, PTZ4_DATA),
+ PINMUX_GPIO(GPIO_PTZ3, PTZ3_DATA),
+ PINMUX_GPIO(GPIO_PTZ2, PTZ2_DATA),
+ PINMUX_GPIO(GPIO_PTZ1, PTZ1_DATA),
+ PINMUX_GPIO(GPIO_PTZ0, PTZ0_DATA),
+
+ /* BSC */
+ PINMUX_GPIO(GPIO_FN_D31, D31_MARK),
+ PINMUX_GPIO(GPIO_FN_D30, D30_MARK),
+ PINMUX_GPIO(GPIO_FN_D29, D29_MARK),
+ PINMUX_GPIO(GPIO_FN_D28, D28_MARK),
+ PINMUX_GPIO(GPIO_FN_D27, D27_MARK),
+ PINMUX_GPIO(GPIO_FN_D26, D26_MARK),
+ PINMUX_GPIO(GPIO_FN_D25, D25_MARK),
+ PINMUX_GPIO(GPIO_FN_D24, D24_MARK),
+ PINMUX_GPIO(GPIO_FN_D23, D23_MARK),
+ PINMUX_GPIO(GPIO_FN_D22, D22_MARK),
+ PINMUX_GPIO(GPIO_FN_D21, D21_MARK),
+ PINMUX_GPIO(GPIO_FN_D20, D20_MARK),
+ PINMUX_GPIO(GPIO_FN_D19, D19_MARK),
+ PINMUX_GPIO(GPIO_FN_D18, D18_MARK),
+ PINMUX_GPIO(GPIO_FN_D17, D17_MARK),
+ PINMUX_GPIO(GPIO_FN_D16, D16_MARK),
+ PINMUX_GPIO(GPIO_FN_D15, D15_MARK),
+ PINMUX_GPIO(GPIO_FN_D14, D14_MARK),
+ PINMUX_GPIO(GPIO_FN_D13, D13_MARK),
+ PINMUX_GPIO(GPIO_FN_D12, D12_MARK),
+ PINMUX_GPIO(GPIO_FN_D11, D11_MARK),
+ PINMUX_GPIO(GPIO_FN_D10, D10_MARK),
+ PINMUX_GPIO(GPIO_FN_D9, D9_MARK),
+ PINMUX_GPIO(GPIO_FN_D8, D8_MARK),
+ PINMUX_GPIO(GPIO_FN_D7, D7_MARK),
+ PINMUX_GPIO(GPIO_FN_D6, D6_MARK),
+ PINMUX_GPIO(GPIO_FN_D5, D5_MARK),
+ PINMUX_GPIO(GPIO_FN_D4, D4_MARK),
+ PINMUX_GPIO(GPIO_FN_D3, D3_MARK),
+ PINMUX_GPIO(GPIO_FN_D2, D2_MARK),
+ PINMUX_GPIO(GPIO_FN_D1, D1_MARK),
+ PINMUX_GPIO(GPIO_FN_D0, D0_MARK),
+ PINMUX_GPIO(GPIO_FN_A25, A25_MARK),
+ PINMUX_GPIO(GPIO_FN_A24, A24_MARK),
+ PINMUX_GPIO(GPIO_FN_A23, A23_MARK),
+ PINMUX_GPIO(GPIO_FN_A22, A22_MARK),
+ PINMUX_GPIO(GPIO_FN_CS6B_CE1B, CS6B_CE1B_MARK),
+ PINMUX_GPIO(GPIO_FN_CS6A_CE2B, CS6A_CE2B_MARK),
+ PINMUX_GPIO(GPIO_FN_CS5B_CE1A, CS5B_CE1A_MARK),
+ PINMUX_GPIO(GPIO_FN_CS5A_CE2A, CS5A_CE2A_MARK),
+ PINMUX_GPIO(GPIO_FN_WE3_ICIOWR, WE3_ICIOWR_MARK),
+ PINMUX_GPIO(GPIO_FN_WE2_ICIORD, WE2_ICIORD_MARK),
+ PINMUX_GPIO(GPIO_FN_IOIS16, IOIS16_MARK),
+ PINMUX_GPIO(GPIO_FN_WAIT, WAIT_MARK),
+ PINMUX_GPIO(GPIO_FN_BS, BS_MARK),
+
+ /* KEYSC */
+ PINMUX_GPIO(GPIO_FN_KEYOUT5_IN5, KEYOUT5_IN5_MARK),
+ PINMUX_GPIO(GPIO_FN_KEYOUT4_IN6, KEYOUT4_IN6_MARK),
+ PINMUX_GPIO(GPIO_FN_KEYIN4, KEYIN4_MARK),
+ PINMUX_GPIO(GPIO_FN_KEYIN3, KEYIN3_MARK),
+ PINMUX_GPIO(GPIO_FN_KEYIN2, KEYIN2_MARK),
+ PINMUX_GPIO(GPIO_FN_KEYIN1, KEYIN1_MARK),
+ PINMUX_GPIO(GPIO_FN_KEYIN0, KEYIN0_MARK),
+ PINMUX_GPIO(GPIO_FN_KEYOUT3, KEYOUT3_MARK),
+ PINMUX_GPIO(GPIO_FN_KEYOUT2, KEYOUT2_MARK),
+ PINMUX_GPIO(GPIO_FN_KEYOUT1, KEYOUT1_MARK),
+ PINMUX_GPIO(GPIO_FN_KEYOUT0, KEYOUT0_MARK),
+
+ /* ATAPI */
+ PINMUX_GPIO(GPIO_FN_IDED15, IDED15_MARK),
+ PINMUX_GPIO(GPIO_FN_IDED14, IDED14_MARK),
+ PINMUX_GPIO(GPIO_FN_IDED13, IDED13_MARK),
+ PINMUX_GPIO(GPIO_FN_IDED12, IDED12_MARK),
+ PINMUX_GPIO(GPIO_FN_IDED11, IDED11_MARK),
+ PINMUX_GPIO(GPIO_FN_IDED10, IDED10_MARK),
+ PINMUX_GPIO(GPIO_FN_IDED9, IDED9_MARK),
+ PINMUX_GPIO(GPIO_FN_IDED8, IDED8_MARK),
+ PINMUX_GPIO(GPIO_FN_IDED7, IDED7_MARK),
+ PINMUX_GPIO(GPIO_FN_IDED6, IDED6_MARK),
+ PINMUX_GPIO(GPIO_FN_IDED5, IDED5_MARK),
+ PINMUX_GPIO(GPIO_FN_IDED4, IDED4_MARK),
+ PINMUX_GPIO(GPIO_FN_IDED3, IDED3_MARK),
+ PINMUX_GPIO(GPIO_FN_IDED2, IDED2_MARK),
+ PINMUX_GPIO(GPIO_FN_IDED1, IDED1_MARK),
+ PINMUX_GPIO(GPIO_FN_IDED0, IDED0_MARK),
+ PINMUX_GPIO(GPIO_FN_IDEA2, IDEA2_MARK),
+ PINMUX_GPIO(GPIO_FN_IDEA1, IDEA1_MARK),
+ PINMUX_GPIO(GPIO_FN_IDEA0, IDEA0_MARK),
+ PINMUX_GPIO(GPIO_FN_IDEIOWR, IDEIOWR_MARK),
+ PINMUX_GPIO(GPIO_FN_IODREQ, IODREQ_MARK),
+ PINMUX_GPIO(GPIO_FN_IDECS0, IDECS0_MARK),
+ PINMUX_GPIO(GPIO_FN_IDECS1, IDECS1_MARK),
+ PINMUX_GPIO(GPIO_FN_IDEIORD, IDEIORD_MARK),
+ PINMUX_GPIO(GPIO_FN_DIRECTION, DIRECTION_MARK),
+ PINMUX_GPIO(GPIO_FN_EXBUF_ENB, EXBUF_ENB_MARK),
+ PINMUX_GPIO(GPIO_FN_IDERST, IDERST_MARK),
+ PINMUX_GPIO(GPIO_FN_IODACK, IODACK_MARK),
+ PINMUX_GPIO(GPIO_FN_IDEINT, IDEINT_MARK),
+ PINMUX_GPIO(GPIO_FN_IDEIORDY, IDEIORDY_MARK),
+
+ /* TPU */
+ PINMUX_GPIO(GPIO_FN_TPUTO3, TPUTO3_MARK),
+ PINMUX_GPIO(GPIO_FN_TPUTO2, TPUTO2_MARK),
+ PINMUX_GPIO(GPIO_FN_TPUTO1, TPUTO1_MARK),
+ PINMUX_GPIO(GPIO_FN_TPUTO0, TPUTO0_MARK),
+ PINMUX_GPIO(GPIO_FN_TPUTI3, TPUTI3_MARK),
+ PINMUX_GPIO(GPIO_FN_TPUTI2, TPUTI2_MARK),
+
+ /* LCDC */
+ PINMUX_GPIO(GPIO_FN_LCDD23, LCDD23_MARK),
+ PINMUX_GPIO(GPIO_FN_LCDD22, LCDD22_MARK),
+ PINMUX_GPIO(GPIO_FN_LCDD21, LCDD21_MARK),
+ PINMUX_GPIO(GPIO_FN_LCDD20, LCDD20_MARK),
+ PINMUX_GPIO(GPIO_FN_LCDD19, LCDD19_MARK),
+ PINMUX_GPIO(GPIO_FN_LCDD18, LCDD18_MARK),
+ PINMUX_GPIO(GPIO_FN_LCDD17, LCDD17_MARK),
+ PINMUX_GPIO(GPIO_FN_LCDD16, LCDD16_MARK),
+ PINMUX_GPIO(GPIO_FN_LCDD15, LCDD15_MARK),
+ PINMUX_GPIO(GPIO_FN_LCDD14, LCDD14_MARK),
+ PINMUX_GPIO(GPIO_FN_LCDD13, LCDD13_MARK),
+ PINMUX_GPIO(GPIO_FN_LCDD12, LCDD12_MARK),
+ PINMUX_GPIO(GPIO_FN_LCDD11, LCDD11_MARK),
+ PINMUX_GPIO(GPIO_FN_LCDD10, LCDD10_MARK),
+ PINMUX_GPIO(GPIO_FN_LCDD9, LCDD9_MARK),
+ PINMUX_GPIO(GPIO_FN_LCDD8, LCDD8_MARK),
+ PINMUX_GPIO(GPIO_FN_LCDD7, LCDD7_MARK),
+ PINMUX_GPIO(GPIO_FN_LCDD6, LCDD6_MARK),
+ PINMUX_GPIO(GPIO_FN_LCDD5, LCDD5_MARK),
+ PINMUX_GPIO(GPIO_FN_LCDD4, LCDD4_MARK),
+ PINMUX_GPIO(GPIO_FN_LCDD3, LCDD3_MARK),
+ PINMUX_GPIO(GPIO_FN_LCDD2, LCDD2_MARK),
+ PINMUX_GPIO(GPIO_FN_LCDD1, LCDD1_MARK),
+ PINMUX_GPIO(GPIO_FN_LCDD0, LCDD0_MARK),
+ PINMUX_GPIO(GPIO_FN_LCDVSYN, LCDVSYN_MARK),
+ PINMUX_GPIO(GPIO_FN_LCDDISP, LCDDISP_MARK),
+ PINMUX_GPIO(GPIO_FN_LCDRS, LCDRS_MARK),
+ PINMUX_GPIO(GPIO_FN_LCDHSYN, LCDHSYN_MARK),
+ PINMUX_GPIO(GPIO_FN_LCDCS, LCDCS_MARK),
+ PINMUX_GPIO(GPIO_FN_LCDDON, LCDDON_MARK),
+ PINMUX_GPIO(GPIO_FN_LCDDCK, LCDDCK_MARK),
+ PINMUX_GPIO(GPIO_FN_LCDWR, LCDWR_MARK),
+ PINMUX_GPIO(GPIO_FN_LCDVEPWC, LCDVEPWC_MARK),
+ PINMUX_GPIO(GPIO_FN_LCDVCPWC, LCDVCPWC_MARK),
+ PINMUX_GPIO(GPIO_FN_LCDRD, LCDRD_MARK),
+ PINMUX_GPIO(GPIO_FN_LCDLCLK, LCDLCLK_MARK),
+
+ /* SCIF0 */
+ PINMUX_GPIO(GPIO_FN_SCIF0_TXD, SCIF0_TXD_MARK),
+ PINMUX_GPIO(GPIO_FN_SCIF0_RXD, SCIF0_RXD_MARK),
+ PINMUX_GPIO(GPIO_FN_SCIF0_SCK, SCIF0_SCK_MARK),
+
+ /* SCIF1 */
+ PINMUX_GPIO(GPIO_FN_SCIF1_SCK, SCIF1_SCK_MARK),
+ PINMUX_GPIO(GPIO_FN_SCIF1_RXD, SCIF1_RXD_MARK),
+ PINMUX_GPIO(GPIO_FN_SCIF1_TXD, SCIF1_TXD_MARK),
+
+ /* SCIF2 */
+ PINMUX_GPIO(GPIO_FN_SCIF2_L_TXD, SCIF2_L_TXD_MARK),
+ PINMUX_GPIO(GPIO_FN_SCIF2_L_SCK, SCIF2_L_SCK_MARK),
+ PINMUX_GPIO(GPIO_FN_SCIF2_L_RXD, SCIF2_L_RXD_MARK),
+ PINMUX_GPIO(GPIO_FN_SCIF2_V_TXD, SCIF2_V_TXD_MARK),
+ PINMUX_GPIO(GPIO_FN_SCIF2_V_SCK, SCIF2_V_SCK_MARK),
+ PINMUX_GPIO(GPIO_FN_SCIF2_V_RXD, SCIF2_V_RXD_MARK),
+
+ /* SCIF3 */
+ PINMUX_GPIO(GPIO_FN_SCIF3_V_SCK, SCIF3_V_SCK_MARK),
+ PINMUX_GPIO(GPIO_FN_SCIF3_V_RXD, SCIF3_V_RXD_MARK),
+ PINMUX_GPIO(GPIO_FN_SCIF3_V_TXD, SCIF3_V_TXD_MARK),
+ PINMUX_GPIO(GPIO_FN_SCIF3_V_CTS, SCIF3_V_CTS_MARK),
+ PINMUX_GPIO(GPIO_FN_SCIF3_V_RTS, SCIF3_V_RTS_MARK),
+ PINMUX_GPIO(GPIO_FN_SCIF3_I_SCK, SCIF3_I_SCK_MARK),
+ PINMUX_GPIO(GPIO_FN_SCIF3_I_RXD, SCIF3_I_RXD_MARK),
+ PINMUX_GPIO(GPIO_FN_SCIF3_I_TXD, SCIF3_I_TXD_MARK),
+ PINMUX_GPIO(GPIO_FN_SCIF3_I_CTS, SCIF3_I_CTS_MARK),
+ PINMUX_GPIO(GPIO_FN_SCIF3_I_RTS, SCIF3_I_RTS_MARK),
+
+ /* SCIF4 */
+ PINMUX_GPIO(GPIO_FN_SCIF4_SCK, SCIF4_SCK_MARK),
+ PINMUX_GPIO(GPIO_FN_SCIF4_RXD, SCIF4_RXD_MARK),
+ PINMUX_GPIO(GPIO_FN_SCIF4_TXD, SCIF4_TXD_MARK),
+
+ /* SCIF5 */
+ PINMUX_GPIO(GPIO_FN_SCIF5_SCK, SCIF5_SCK_MARK),
+ PINMUX_GPIO(GPIO_FN_SCIF5_RXD, SCIF5_RXD_MARK),
+ PINMUX_GPIO(GPIO_FN_SCIF5_TXD, SCIF5_TXD_MARK),
+
+ /* FSI */
+ PINMUX_GPIO(GPIO_FN_FSIMCKB, FSIMCKB_MARK),
+ PINMUX_GPIO(GPIO_FN_FSIMCKA, FSIMCKA_MARK),
+ PINMUX_GPIO(GPIO_FN_FSIOASD, FSIOASD_MARK),
+ PINMUX_GPIO(GPIO_FN_FSIIABCK, FSIIABCK_MARK),
+ PINMUX_GPIO(GPIO_FN_FSIIALRCK, FSIIALRCK_MARK),
+ PINMUX_GPIO(GPIO_FN_FSIOABCK, FSIOABCK_MARK),
+ PINMUX_GPIO(GPIO_FN_FSIOALRCK, FSIOALRCK_MARK),
+ PINMUX_GPIO(GPIO_FN_CLKAUDIOAO, CLKAUDIOAO_MARK),
+ PINMUX_GPIO(GPIO_FN_FSIIBSD, FSIIBSD_MARK),
+ PINMUX_GPIO(GPIO_FN_FSIOBSD, FSIOBSD_MARK),
+ PINMUX_GPIO(GPIO_FN_FSIIBBCK, FSIIBBCK_MARK),
+ PINMUX_GPIO(GPIO_FN_FSIIBLRCK, FSIIBLRCK_MARK),
+ PINMUX_GPIO(GPIO_FN_FSIOBBCK, FSIOBBCK_MARK),
+ PINMUX_GPIO(GPIO_FN_FSIOBLRCK, FSIOBLRCK_MARK),
+ PINMUX_GPIO(GPIO_FN_CLKAUDIOBO, CLKAUDIOBO_MARK),
+ PINMUX_GPIO(GPIO_FN_FSIIASD, FSIIASD_MARK),
+
+ /* AUD */
+ PINMUX_GPIO(GPIO_FN_AUDCK, AUDCK_MARK),
+ PINMUX_GPIO(GPIO_FN_AUDSYNC, AUDSYNC_MARK),
+ PINMUX_GPIO(GPIO_FN_AUDATA3, AUDATA3_MARK),
+ PINMUX_GPIO(GPIO_FN_AUDATA2, AUDATA2_MARK),
+ PINMUX_GPIO(GPIO_FN_AUDATA1, AUDATA1_MARK),
+ PINMUX_GPIO(GPIO_FN_AUDATA0, AUDATA0_MARK),
+
+ /* VIO */
+ PINMUX_GPIO(GPIO_FN_VIO_CKO, VIO_CKO_MARK),
+
+ /* VIO0 */
+ PINMUX_GPIO(GPIO_FN_VIO0_D15, VIO0_D15_MARK),
+ PINMUX_GPIO(GPIO_FN_VIO0_D14, VIO0_D14_MARK),
+ PINMUX_GPIO(GPIO_FN_VIO0_D13, VIO0_D13_MARK),
+ PINMUX_GPIO(GPIO_FN_VIO0_D12, VIO0_D12_MARK),
+ PINMUX_GPIO(GPIO_FN_VIO0_D11, VIO0_D11_MARK),
+ PINMUX_GPIO(GPIO_FN_VIO0_D10, VIO0_D10_MARK),
+ PINMUX_GPIO(GPIO_FN_VIO0_D9, VIO0_D9_MARK),
+ PINMUX_GPIO(GPIO_FN_VIO0_D8, VIO0_D8_MARK),
+ PINMUX_GPIO(GPIO_FN_VIO0_D7, VIO0_D7_MARK),
+ PINMUX_GPIO(GPIO_FN_VIO0_D6, VIO0_D6_MARK),
+ PINMUX_GPIO(GPIO_FN_VIO0_D5, VIO0_D5_MARK),
+ PINMUX_GPIO(GPIO_FN_VIO0_D4, VIO0_D4_MARK),
+ PINMUX_GPIO(GPIO_FN_VIO0_D3, VIO0_D3_MARK),
+ PINMUX_GPIO(GPIO_FN_VIO0_D2, VIO0_D2_MARK),
+ PINMUX_GPIO(GPIO_FN_VIO0_D1, VIO0_D1_MARK),
+ PINMUX_GPIO(GPIO_FN_VIO0_D0, VIO0_D0_MARK),
+ PINMUX_GPIO(GPIO_FN_VIO0_VD, VIO0_VD_MARK),
+ PINMUX_GPIO(GPIO_FN_VIO0_CLK, VIO0_CLK_MARK),
+ PINMUX_GPIO(GPIO_FN_VIO0_FLD, VIO0_FLD_MARK),
+ PINMUX_GPIO(GPIO_FN_VIO0_HD, VIO0_HD_MARK),
+
+ /* VIO1 */
+ PINMUX_GPIO(GPIO_FN_VIO1_D7, VIO1_D7_MARK),
+ PINMUX_GPIO(GPIO_FN_VIO1_D6, VIO1_D6_MARK),
+ PINMUX_GPIO(GPIO_FN_VIO1_D5, VIO1_D5_MARK),
+ PINMUX_GPIO(GPIO_FN_VIO1_D4, VIO1_D4_MARK),
+ PINMUX_GPIO(GPIO_FN_VIO1_D3, VIO1_D3_MARK),
+ PINMUX_GPIO(GPIO_FN_VIO1_D2, VIO1_D2_MARK),
+ PINMUX_GPIO(GPIO_FN_VIO1_D1, VIO1_D1_MARK),
+ PINMUX_GPIO(GPIO_FN_VIO1_D0, VIO1_D0_MARK),
+ PINMUX_GPIO(GPIO_FN_VIO1_FLD, VIO1_FLD_MARK),
+ PINMUX_GPIO(GPIO_FN_VIO1_HD, VIO1_HD_MARK),
+ PINMUX_GPIO(GPIO_FN_VIO1_VD, VIO1_VD_MARK),
+ PINMUX_GPIO(GPIO_FN_VIO1_CLK, VIO1_CLK_MARK),
+
+ /* Eth */
+ PINMUX_GPIO(GPIO_FN_RMII_RXD0, RMII_RXD0_MARK),
+ PINMUX_GPIO(GPIO_FN_RMII_RXD1, RMII_RXD1_MARK),
+ PINMUX_GPIO(GPIO_FN_RMII_TXD0, RMII_TXD0_MARK),
+ PINMUX_GPIO(GPIO_FN_RMII_TXD1, RMII_TXD1_MARK),
+ PINMUX_GPIO(GPIO_FN_RMII_REF_CLK, RMII_REF_CLK_MARK),
+ PINMUX_GPIO(GPIO_FN_RMII_TX_EN, RMII_TX_EN_MARK),
+ PINMUX_GPIO(GPIO_FN_RMII_RX_ER, RMII_RX_ER_MARK),
+ PINMUX_GPIO(GPIO_FN_RMII_CRS_DV, RMII_CRS_DV_MARK),
+ PINMUX_GPIO(GPIO_FN_LNKSTA, LNKSTA_MARK),
+ PINMUX_GPIO(GPIO_FN_MDIO, MDIO_MARK),
+ PINMUX_GPIO(GPIO_FN_MDC, MDC_MARK),
+
+ /* System */
+ PINMUX_GPIO(GPIO_FN_PDSTATUS, PDSTATUS_MARK),
+ PINMUX_GPIO(GPIO_FN_STATUS2, STATUS2_MARK),
+ PINMUX_GPIO(GPIO_FN_STATUS0, STATUS0_MARK),
+
+ /* VOU */
+ PINMUX_GPIO(GPIO_FN_DV_D15, DV_D15_MARK),
+ PINMUX_GPIO(GPIO_FN_DV_D14, DV_D14_MARK),
+ PINMUX_GPIO(GPIO_FN_DV_D13, DV_D13_MARK),
+ PINMUX_GPIO(GPIO_FN_DV_D12, DV_D12_MARK),
+ PINMUX_GPIO(GPIO_FN_DV_D11, DV_D11_MARK),
+ PINMUX_GPIO(GPIO_FN_DV_D10, DV_D10_MARK),
+ PINMUX_GPIO(GPIO_FN_DV_D9, DV_D9_MARK),
+ PINMUX_GPIO(GPIO_FN_DV_D8, DV_D8_MARK),
+ PINMUX_GPIO(GPIO_FN_DV_D7, DV_D7_MARK),
+ PINMUX_GPIO(GPIO_FN_DV_D6, DV_D6_MARK),
+ PINMUX_GPIO(GPIO_FN_DV_D5, DV_D5_MARK),
+ PINMUX_GPIO(GPIO_FN_DV_D4, DV_D4_MARK),
+ PINMUX_GPIO(GPIO_FN_DV_D3, DV_D3_MARK),
+ PINMUX_GPIO(GPIO_FN_DV_D2, DV_D2_MARK),
+ PINMUX_GPIO(GPIO_FN_DV_D1, DV_D1_MARK),
+ PINMUX_GPIO(GPIO_FN_DV_D0, DV_D0_MARK),
+ PINMUX_GPIO(GPIO_FN_DV_CLKI, DV_CLKI_MARK),
+ PINMUX_GPIO(GPIO_FN_DV_CLK, DV_CLK_MARK),
+ PINMUX_GPIO(GPIO_FN_DV_VSYNC, DV_VSYNC_MARK),
+ PINMUX_GPIO(GPIO_FN_DV_HSYNC, DV_HSYNC_MARK),
+
+ /* MSIOF0 */
+ PINMUX_GPIO(GPIO_FN_MSIOF0_RXD, MSIOF0_RXD_MARK),
+ PINMUX_GPIO(GPIO_FN_MSIOF0_TXD, MSIOF0_TXD_MARK),
+ PINMUX_GPIO(GPIO_FN_MSIOF0_MCK, MSIOF0_MCK_MARK),
+ PINMUX_GPIO(GPIO_FN_MSIOF0_TSCK, MSIOF0_TSCK_MARK),
+ PINMUX_GPIO(GPIO_FN_MSIOF0_SS1, MSIOF0_SS1_MARK),
+ PINMUX_GPIO(GPIO_FN_MSIOF0_SS2, MSIOF0_SS2_MARK),
+ PINMUX_GPIO(GPIO_FN_MSIOF0_TSYNC, MSIOF0_TSYNC_MARK),
+ PINMUX_GPIO(GPIO_FN_MSIOF0_RSCK, MSIOF0_RSCK_MARK),
+ PINMUX_GPIO(GPIO_FN_MSIOF0_RSYNC, MSIOF0_RSYNC_MARK),
+
+ /* MSIOF1 */
+ PINMUX_GPIO(GPIO_FN_MSIOF1_RXD, MSIOF1_RXD_MARK),
+ PINMUX_GPIO(GPIO_FN_MSIOF1_TXD, MSIOF1_TXD_MARK),
+ PINMUX_GPIO(GPIO_FN_MSIOF1_MCK, MSIOF1_MCK_MARK),
+ PINMUX_GPIO(GPIO_FN_MSIOF1_TSCK, MSIOF1_TSCK_MARK),
+ PINMUX_GPIO(GPIO_FN_MSIOF1_SS1, MSIOF1_SS1_MARK),
+ PINMUX_GPIO(GPIO_FN_MSIOF1_SS2, MSIOF1_SS2_MARK),
+ PINMUX_GPIO(GPIO_FN_MSIOF1_TSYNC, MSIOF1_TSYNC_MARK),
+ PINMUX_GPIO(GPIO_FN_MSIOF1_RSCK, MSIOF1_RSCK_MARK),
+ PINMUX_GPIO(GPIO_FN_MSIOF1_RSYNC, MSIOF1_RSYNC_MARK),
+
+ /* DMAC */
+ PINMUX_GPIO(GPIO_FN_DMAC_DACK0, DMAC_DACK0_MARK),
+ PINMUX_GPIO(GPIO_FN_DMAC_DREQ0, DMAC_DREQ0_MARK),
+ PINMUX_GPIO(GPIO_FN_DMAC_DACK1, DMAC_DACK1_MARK),
+ PINMUX_GPIO(GPIO_FN_DMAC_DREQ1, DMAC_DREQ1_MARK),
+
+ /* SDHI0 */
+ PINMUX_GPIO(GPIO_FN_SDHI0CD, SDHI0CD_MARK),
+ PINMUX_GPIO(GPIO_FN_SDHI0WP, SDHI0WP_MARK),
+ PINMUX_GPIO(GPIO_FN_SDHI0CMD, SDHI0CMD_MARK),
+ PINMUX_GPIO(GPIO_FN_SDHI0CLK, SDHI0CLK_MARK),
+ PINMUX_GPIO(GPIO_FN_SDHI0D3, SDHI0D3_MARK),
+ PINMUX_GPIO(GPIO_FN_SDHI0D2, SDHI0D2_MARK),
+ PINMUX_GPIO(GPIO_FN_SDHI0D1, SDHI0D1_MARK),
+ PINMUX_GPIO(GPIO_FN_SDHI0D0, SDHI0D0_MARK),
+
+ /* SDHI1 */
+ PINMUX_GPIO(GPIO_FN_SDHI1CD, SDHI1CD_MARK),
+ PINMUX_GPIO(GPIO_FN_SDHI1WP, SDHI1WP_MARK),
+ PINMUX_GPIO(GPIO_FN_SDHI1CMD, SDHI1CMD_MARK),
+ PINMUX_GPIO(GPIO_FN_SDHI1CLK, SDHI1CLK_MARK),
+ PINMUX_GPIO(GPIO_FN_SDHI1D3, SDHI1D3_MARK),
+ PINMUX_GPIO(GPIO_FN_SDHI1D2, SDHI1D2_MARK),
+ PINMUX_GPIO(GPIO_FN_SDHI1D1, SDHI1D1_MARK),
+ PINMUX_GPIO(GPIO_FN_SDHI1D0, SDHI1D0_MARK),
+
+ /* MMC */
+ PINMUX_GPIO(GPIO_FN_MMC_D7, MMC_D7_MARK),
+ PINMUX_GPIO(GPIO_FN_MMC_D6, MMC_D6_MARK),
+ PINMUX_GPIO(GPIO_FN_MMC_D5, MMC_D5_MARK),
+ PINMUX_GPIO(GPIO_FN_MMC_D4, MMC_D4_MARK),
+ PINMUX_GPIO(GPIO_FN_MMC_D3, MMC_D3_MARK),
+ PINMUX_GPIO(GPIO_FN_MMC_D2, MMC_D2_MARK),
+ PINMUX_GPIO(GPIO_FN_MMC_D1, MMC_D1_MARK),
+ PINMUX_GPIO(GPIO_FN_MMC_D0, MMC_D0_MARK),
+ PINMUX_GPIO(GPIO_FN_MMC_CLK, MMC_CLK_MARK),
+ PINMUX_GPIO(GPIO_FN_MMC_CMD, MMC_CMD_MARK),
+
+ /* IrDA */
+ PINMUX_GPIO(GPIO_FN_IRDA_OUT, IRDA_OUT_MARK),
+ PINMUX_GPIO(GPIO_FN_IRDA_IN, IRDA_IN_MARK),
+
+ /* TSIF */
+ PINMUX_GPIO(GPIO_FN_TSIF_TS0_SDAT, TSIF_TS0_SDAT_MARK),
+ PINMUX_GPIO(GPIO_FN_TSIF_TS0_SCK, TSIF_TS0_SCK_MARK),
+ PINMUX_GPIO(GPIO_FN_TSIF_TS0_SDEN, TSIF_TS0_SDEN_MARK),
+ PINMUX_GPIO(GPIO_FN_TSIF_TS0_SPSYNC, TSIF_TS0_SPSYNC_MARK),
+
+ /* IRQ */
+ PINMUX_GPIO(GPIO_FN_INTC_IRQ7, INTC_IRQ7_MARK),
+ PINMUX_GPIO(GPIO_FN_INTC_IRQ6, INTC_IRQ6_MARK),
+ PINMUX_GPIO(GPIO_FN_INTC_IRQ5, INTC_IRQ5_MARK),
+ PINMUX_GPIO(GPIO_FN_INTC_IRQ4, INTC_IRQ4_MARK),
+ PINMUX_GPIO(GPIO_FN_INTC_IRQ3, INTC_IRQ3_MARK),
+ PINMUX_GPIO(GPIO_FN_INTC_IRQ2, INTC_IRQ2_MARK),
+ PINMUX_GPIO(GPIO_FN_INTC_IRQ1, INTC_IRQ1_MARK),
+ PINMUX_GPIO(GPIO_FN_INTC_IRQ0, INTC_IRQ0_MARK),
+};
+
+static struct pinmux_cfg_reg pinmux_config_regs[] = {
+ { PINMUX_CFG_REG("PACR", 0xa4050100, 16, 2) {
+ PTA7_FN, PTA7_OUT, PTA7_IN_PU, PTA7_IN,
+ PTA6_FN, PTA6_OUT, PTA6_IN_PU, PTA6_IN,
+ PTA5_FN, PTA5_OUT, PTA5_IN_PU, PTA5_IN,
+ PTA4_FN, PTA4_OUT, PTA4_IN_PU, PTA4_IN,
+ PTA3_FN, PTA3_OUT, PTA3_IN_PU, PTA3_IN,
+ PTA2_FN, PTA2_OUT, PTA2_IN_PU, PTA2_IN,
+ PTA1_FN, PTA1_OUT, PTA1_IN_PU, PTA1_IN,
+ PTA0_FN, PTA0_OUT, PTA0_IN_PU, PTA0_IN }
+ },
+ { PINMUX_CFG_REG("PBCR", 0xa4050102, 16, 2) {
+ PTB7_FN, PTB7_OUT, PTB7_IN_PU, PTB7_IN,
+ PTB6_FN, PTB6_OUT, PTB6_IN_PU, PTB6_IN,
+ PTB5_FN, PTB5_OUT, PTB5_IN_PU, PTB5_IN,
+ PTB4_FN, PTB4_OUT, PTB4_IN_PU, PTB4_IN,
+ PTB3_FN, PTB3_OUT, PTB3_IN_PU, PTB3_IN,
+ PTB2_FN, PTB2_OUT, PTB2_IN_PU, PTB2_IN,
+ PTB1_FN, PTB1_OUT, PTB1_IN_PU, PTB1_IN,
+ PTB0_FN, PTB0_OUT, PTB0_IN_PU, PTB0_IN }
+ },
+ { PINMUX_CFG_REG("PCCR", 0xa4050104, 16, 2) {
+ PTC7_FN, PTC7_OUT, PTC7_IN_PU, PTC7_IN,
+ PTC6_FN, PTC6_OUT, PTC6_IN_PU, PTC6_IN,
+ PTC5_FN, PTC5_OUT, PTC5_IN_PU, PTC5_IN,
+ PTC4_FN, PTC4_OUT, PTC4_IN_PU, PTC4_IN,
+ PTC3_FN, PTC3_OUT, PTC3_IN_PU, PTC3_IN,
+ PTC2_FN, PTC2_OUT, PTC2_IN_PU, PTC2_IN,
+ PTC1_FN, PTC1_OUT, PTC1_IN_PU, PTC1_IN,
+ PTC0_FN, PTC0_OUT, PTC0_IN_PU, PTC0_IN }
+ },
+ { PINMUX_CFG_REG("PDCR", 0xa4050106, 16, 2) {
+ PTD7_FN, PTD7_OUT, PTD7_IN_PU, PTD7_IN,
+ PTD6_FN, PTD6_OUT, PTD6_IN_PU, PTD6_IN,
+ PTD5_FN, PTD5_OUT, PTD5_IN_PU, PTD5_IN,
+ PTD4_FN, PTD4_OUT, PTD4_IN_PU, PTD4_IN,
+ PTD3_FN, PTD3_OUT, PTD3_IN_PU, PTD3_IN,
+ PTD2_FN, PTD2_OUT, PTD2_IN_PU, PTD2_IN,
+ PTD1_FN, PTD1_OUT, PTD1_IN_PU, PTD1_IN,
+ PTD0_FN, PTD0_OUT, PTD0_IN_PU, PTD0_IN }
+ },
+ { PINMUX_CFG_REG("PECR", 0xa4050108, 16, 2) {
+ PTE7_FN, PTE7_OUT, PTE7_IN_PU, PTE7_IN,
+ PTE6_FN, PTE6_OUT, PTE6_IN_PU, PTE6_IN,
+ PTE5_FN, PTE5_OUT, PTE5_IN_PU, PTE5_IN,
+ PTE4_FN, PTE4_OUT, PTE4_IN_PU, PTE4_IN,
+ PTE3_FN, PTE3_OUT, PTE3_IN_PU, PTE3_IN,
+ PTE2_FN, PTE2_OUT, PTE2_IN_PU, PTE2_IN,
+ PTE1_FN, PTE1_OUT, PTE1_IN_PU, PTE1_IN,
+ PTE0_FN, PTE0_OUT, PTE0_IN_PU, PTE0_IN }
+ },
+ { PINMUX_CFG_REG("PFCR", 0xa405010a, 16, 2) {
+ PTF7_FN, PTF7_OUT, PTF7_IN_PU, PTF7_IN,
+ PTF6_FN, PTF6_OUT, PTF6_IN_PU, PTF6_IN,
+ PTF5_FN, PTF5_OUT, PTF5_IN_PU, PTF5_IN,
+ PTF4_FN, PTF4_OUT, PTF4_IN_PU, PTF4_IN,
+ PTF3_FN, PTF3_OUT, PTF3_IN_PU, PTF3_IN,
+ PTF2_FN, PTF2_OUT, PTF2_IN_PU, PTF2_IN,
+ PTF1_FN, PTF1_OUT, PTF1_IN_PU, PTF1_IN,
+ PTF0_FN, PTF0_OUT, PTF0_IN_PU, PTF0_IN }
+ },
+ { PINMUX_CFG_REG("PGCR", 0xa405010c, 16, 2) {
+ 0, 0, 0, 0,
+ 0, 0, 0, 0,
+ PTG5_FN, PTG5_OUT, 0, 0,
+ PTG4_FN, PTG4_OUT, 0, 0,
+ PTG3_FN, PTG3_OUT, 0, 0,
+ PTG2_FN, PTG2_OUT, 0, 0,
+ PTG1_FN, PTG1_OUT, 0, 0,
+ PTG0_FN, PTG0_OUT, 0, 0 }
+ },
+ { PINMUX_CFG_REG("PHCR", 0xa405010e, 16, 2) {
+ PTH7_FN, PTH7_OUT, PTH7_IN_PU, PTH7_IN,
+ PTH6_FN, PTH6_OUT, PTH6_IN_PU, PTH6_IN,
+ PTH5_FN, PTH5_OUT, PTH5_IN_PU, PTH5_IN,
+ PTH4_FN, PTH4_OUT, PTH4_IN_PU, PTH4_IN,
+ PTH3_FN, PTH3_OUT, PTH3_IN_PU, PTH3_IN,
+ PTH2_FN, PTH2_OUT, PTH2_IN_PU, PTH2_IN,
+ PTH1_FN, PTH1_OUT, PTH1_IN_PU, PTH1_IN,
+ PTH0_FN, PTH0_OUT, PTH0_IN_PU, PTH0_IN }
+ },
+ { PINMUX_CFG_REG("PJCR", 0xa4050110, 16, 2) {
+ PTJ7_FN, PTJ7_OUT, 0, 0,
+ PTJ6_FN, PTJ6_OUT, 0, 0,
+ PTJ5_FN, PTJ5_OUT, 0, 0,
+ 0, 0, 0, 0,
+ PTJ3_FN, PTJ3_OUT, PTJ3_IN_PU, PTJ3_IN,
+ PTJ2_FN, PTJ2_OUT, PTJ2_IN_PU, PTJ2_IN,
+ PTJ1_FN, PTJ1_OUT, PTJ1_IN_PU, PTJ1_IN,
+ PTJ0_FN, PTJ0_OUT, PTJ0_IN_PU, PTJ0_IN }
+ },
+ { PINMUX_CFG_REG("PKCR", 0xa4050112, 16, 2) {
+ PTK7_FN, PTK7_OUT, PTK7_IN_PU, PTK7_IN,
+ PTK6_FN, PTK6_OUT, PTK6_IN_PU, PTK6_IN,
+ PTK5_FN, PTK5_OUT, PTK5_IN_PU, PTK5_IN,
+ PTK4_FN, PTK4_OUT, PTK4_IN_PU, PTK4_IN,
+ PTK3_FN, PTK3_OUT, PTK3_IN_PU, PTK3_IN,
+ PTK2_FN, PTK2_OUT, PTK2_IN_PU, PTK2_IN,
+ PTK1_FN, PTK1_OUT, PTK1_IN_PU, PTK1_IN,
+ PTK0_FN, PTK0_OUT, PTK0_IN_PU, PTK0_IN }
+ },
+ { PINMUX_CFG_REG("PLCR", 0xa4050114, 16, 2) {
+ PTL7_FN, PTL7_OUT, PTL7_IN_PU, PTL7_IN,
+ PTL6_FN, PTL6_OUT, PTL6_IN_PU, PTL6_IN,
+ PTL5_FN, PTL5_OUT, PTL5_IN_PU, PTL5_IN,
+ PTL4_FN, PTL4_OUT, PTL4_IN_PU, PTL4_IN,
+ PTL3_FN, PTL3_OUT, PTL3_IN_PU, PTL3_IN,
+ PTL2_FN, PTL2_OUT, PTL2_IN_PU, PTL2_IN,
+ PTL1_FN, PTL1_OUT, PTL1_IN_PU, PTL1_IN,
+ PTL0_FN, PTL0_OUT, PTL0_IN_PU, PTL0_IN }
+ },
+ { PINMUX_CFG_REG("PMCR", 0xa4050116, 16, 2) {
+ PTM7_FN, PTM7_OUT, PTM7_IN_PU, PTM7_IN,
+ PTM6_FN, PTM6_OUT, PTM6_IN_PU, PTM6_IN,
+ PTM5_FN, PTM5_OUT, PTM5_IN_PU, PTM5_IN,
+ PTM4_FN, PTM4_OUT, PTM4_IN_PU, PTM4_IN,
+ PTM3_FN, PTM3_OUT, PTM3_IN_PU, PTM3_IN,
+ PTM2_FN, PTM2_OUT, PTM2_IN_PU, PTM2_IN,
+ PTM1_FN, PTM1_OUT, PTM1_IN_PU, PTM1_IN,
+ PTM0_FN, PTM0_OUT, PTM0_IN_PU, PTM0_IN }
+ },
+ { PINMUX_CFG_REG("PNCR", 0xa4050118, 16, 2) {
+ PTN7_FN, PTN7_OUT, PTN7_IN_PU, PTN7_IN,
+ PTN6_FN, PTN6_OUT, PTN6_IN_PU, PTN6_IN,
+ PTN5_FN, PTN5_OUT, PTN5_IN_PU, PTN5_IN,
+ PTN4_FN, PTN4_OUT, PTN4_IN_PU, PTN4_IN,
+ PTN3_FN, PTN3_OUT, PTN3_IN_PU, PTN3_IN,
+ PTN2_FN, PTN2_OUT, PTN2_IN_PU, PTN2_IN,
+ PTN1_FN, PTN1_OUT, PTN1_IN_PU, PTN1_IN,
+ PTN0_FN, PTN0_OUT, PTN0_IN_PU, PTN0_IN }
+ },
+ { PINMUX_CFG_REG("PQCR", 0xa405011a, 16, 2) {
+ PTQ7_FN, PTQ7_OUT, PTQ7_IN_PU, PTQ7_IN,
+ PTQ6_FN, PTQ6_OUT, PTQ6_IN_PU, PTQ6_IN,
+ PTQ5_FN, PTQ5_OUT, PTQ5_IN_PU, PTQ5_IN,
+ PTQ4_FN, PTQ4_OUT, PTQ4_IN_PU, PTQ4_IN,
+ PTQ3_FN, PTQ3_OUT, PTQ3_IN_PU, PTQ3_IN,
+ PTQ2_FN, PTQ2_OUT, PTQ2_IN_PU, PTQ2_IN,
+ PTQ1_FN, PTQ1_OUT, PTQ1_IN_PU, PTQ1_IN,
+ PTQ0_FN, PTQ0_OUT, PTQ0_IN_PU, PTQ0_IN }
+ },
+ { PINMUX_CFG_REG("PRCR", 0xa405011c, 16, 2) {
+ PTR7_FN, PTR7_OUT, PTR7_IN_PU, PTR7_IN,
+ PTR6_FN, PTR6_OUT, PTR6_IN_PU, PTR6_IN,
+ PTR5_FN, PTR5_OUT, PTR5_IN_PU, PTR5_IN,
+ PTR4_FN, PTR4_OUT, PTR4_IN_PU, PTR4_IN,
+ PTR3_FN, 0, PTR3_IN_PU, PTR3_IN,
+ PTR2_FN, 0, PTR2_IN_PU, PTR2_IN,
+ PTR1_FN, PTR1_OUT, PTR1_IN_PU, PTR1_IN,
+ PTR0_FN, PTR0_OUT, PTR0_IN_PU, PTR0_IN }
+ },
+ { PINMUX_CFG_REG("PSCR", 0xa405011e, 16, 2) {
+ 0, 0, 0, 0,
+ PTS6_FN, PTS6_OUT, PTS6_IN_PU, PTS6_IN,
+ PTS5_FN, PTS5_OUT, PTS5_IN_PU, PTS5_IN,
+ PTS4_FN, PTS4_OUT, PTS4_IN_PU, PTS4_IN,
+ PTS3_FN, PTS3_OUT, PTS3_IN_PU, PTS3_IN,
+ PTS2_FN, PTS2_OUT, PTS2_IN_PU, PTS2_IN,
+ PTS1_FN, PTS1_OUT, PTS1_IN_PU, PTS1_IN,
+ PTS0_FN, PTS0_OUT, PTS0_IN_PU, PTS0_IN }
+ },
+ { PINMUX_CFG_REG("PTCR", 0xa4050140, 16, 2) {
+ PTT7_FN, PTT7_OUT, PTT7_IN_PU, PTT7_IN,
+ PTT6_FN, PTT6_OUT, PTT6_IN_PU, PTT6_IN,
+ PTT5_FN, PTT5_OUT, PTT5_IN_PU, PTT5_IN,
+ PTT4_FN, PTT4_OUT, PTT4_IN_PU, PTT4_IN,
+ PTT3_FN, PTT3_OUT, PTT3_IN_PU, PTT3_IN,
+ PTT2_FN, PTT2_OUT, PTT2_IN_PU, PTT2_IN,
+ PTT1_FN, PTT1_OUT, PTT1_IN_PU, PTT1_IN,
+ PTT0_FN, PTT0_OUT, PTT0_IN_PU, PTT0_IN }
+ },
+ { PINMUX_CFG_REG("PUCR", 0xa4050142, 16, 2) {
+ PTU7_FN, PTU7_OUT, PTU7_IN_PU, PTU7_IN,
+ PTU6_FN, PTU6_OUT, PTU6_IN_PU, PTU6_IN,
+ PTU5_FN, PTU5_OUT, PTU5_IN_PU, PTU5_IN,
+ PTU4_FN, PTU4_OUT, PTU4_IN_PU, PTU4_IN,
+ PTU3_FN, PTU3_OUT, PTU3_IN_PU, PTU3_IN,
+ PTU2_FN, PTU2_OUT, PTU2_IN_PU, PTU2_IN,
+ PTU1_FN, PTU1_OUT, PTU1_IN_PU, PTU1_IN,
+ PTU0_FN, PTU0_OUT, PTU0_IN_PU, PTU0_IN }
+ },
+ { PINMUX_CFG_REG("PVCR", 0xa4050144, 16, 2) {
+ PTV7_FN, PTV7_OUT, PTV7_IN_PU, PTV7_IN,
+ PTV6_FN, PTV6_OUT, PTV6_IN_PU, PTV6_IN,
+ PTV5_FN, PTV5_OUT, PTV5_IN_PU, PTV5_IN,
+ PTV4_FN, PTV4_OUT, PTV4_IN_PU, PTV4_IN,
+ PTV3_FN, PTV3_OUT, PTV3_IN_PU, PTV3_IN,
+ PTV2_FN, PTV2_OUT, PTV2_IN_PU, PTV2_IN,
+ PTV1_FN, PTV1_OUT, PTV1_IN_PU, PTV1_IN,
+ PTV0_FN, PTV0_OUT, PTV0_IN_PU, PTV0_IN }
+ },
+ { PINMUX_CFG_REG("PWCR", 0xa4050146, 16, 2) {
+ PTW7_FN, PTW7_OUT, PTW7_IN_PU, PTW7_IN,
+ PTW6_FN, PTW6_OUT, PTW6_IN_PU, PTW6_IN,
+ PTW5_FN, PTW5_OUT, PTW5_IN_PU, PTW5_IN,
+ PTW4_FN, PTW4_OUT, PTW4_IN_PU, PTW4_IN,
+ PTW3_FN, PTW3_OUT, PTW3_IN_PU, PTW3_IN,
+ PTW2_FN, PTW2_OUT, PTW2_IN_PU, PTW2_IN,
+ PTW1_FN, PTW1_OUT, PTW1_IN_PU, PTW1_IN,
+ PTW0_FN, PTW0_OUT, PTW0_IN_PU, PTW0_IN }
+ },
+ { PINMUX_CFG_REG("PXCR", 0xa4050148, 16, 2) {
+ PTX7_FN, PTX7_OUT, PTX7_IN_PU, PTX7_IN,
+ PTX6_FN, PTX6_OUT, PTX6_IN_PU, PTX6_IN,
+ PTX5_FN, PTX5_OUT, PTX5_IN_PU, PTX5_IN,
+ PTX4_FN, PTX4_OUT, PTX4_IN_PU, PTX4_IN,
+ PTX3_FN, PTX3_OUT, PTX3_IN_PU, PTX3_IN,
+ PTX2_FN, PTX2_OUT, PTX2_IN_PU, PTX2_IN,
+ PTX1_FN, PTX1_OUT, PTX1_IN_PU, PTX1_IN,
+ PTX0_FN, PTX0_OUT, PTX0_IN_PU, PTX0_IN }
+ },
+ { PINMUX_CFG_REG("PYCR", 0xa405014a, 16, 2) {
+ PTY7_FN, PTY7_OUT, PTY7_IN_PU, PTY7_IN,
+ PTY6_FN, PTY6_OUT, PTY6_IN_PU, PTY6_IN,
+ PTY5_FN, PTY5_OUT, PTY5_IN_PU, PTY5_IN,
+ PTY4_FN, PTY4_OUT, PTY4_IN_PU, PTY4_IN,
+ PTY3_FN, PTY3_OUT, PTY3_IN_PU, PTY3_IN,
+ PTY2_FN, PTY2_OUT, PTY2_IN_PU, PTY2_IN,
+ PTY1_FN, PTY1_OUT, PTY1_IN_PU, PTY1_IN,
+ PTY0_FN, PTY0_OUT, PTY0_IN_PU, PTY0_IN }
+ },
+ { PINMUX_CFG_REG("PZCR", 0xa405014c, 16, 2) {
+ PTZ7_FN, PTZ7_OUT, PTZ7_IN_PU, PTZ7_IN,
+ PTZ6_FN, PTZ6_OUT, PTZ6_IN_PU, PTZ6_IN,
+ PTZ5_FN, PTZ5_OUT, PTZ5_IN_PU, PTZ5_IN,
+ PTZ4_FN, PTZ4_OUT, PTZ4_IN_PU, PTZ4_IN,
+ PTZ3_FN, PTZ3_OUT, PTZ3_IN_PU, PTZ3_IN,
+ PTZ2_FN, PTZ2_OUT, PTZ2_IN_PU, PTZ2_IN,
+ PTZ1_FN, PTZ1_OUT, PTZ1_IN_PU, PTZ1_IN,
+ PTZ0_FN, PTZ0_OUT, PTZ0_IN_PU, PTZ0_IN }
+ },
+ { PINMUX_CFG_REG("PSELA", 0xa405014e, 16, 1) {
+ PSA15_0, PSA15_1,
+ PSA14_0, PSA14_1,
+ PSA13_0, PSA13_1,
+ PSA12_0, PSA12_1,
+ 0, 0,
+ PSA10_0, PSA10_1,
+ PSA9_0, PSA9_1,
+ PSA8_0, PSA8_1,
+ PSA7_0, PSA7_1,
+ PSA6_0, PSA6_1,
+ PSA5_0, PSA5_1,
+ 0, 0,
+ PSA3_0, PSA3_1,
+ PSA2_0, PSA2_1,
+ PSA1_0, PSA1_1,
+ PSA0_0, PSA0_1}
+ },
+ { PINMUX_CFG_REG("PSELB", 0xa4050150, 16, 1) {
+ 0, 0,
+ PSB14_0, PSB14_1,
+ PSB13_0, PSB13_1,
+ PSB12_0, PSB12_1,
+ PSB11_0, PSB11_1,
+ PSB10_0, PSB10_1,
+ PSB9_0, PSB9_1,
+ PSB8_0, PSB8_1,
+ PSB7_0, PSB7_1,
+ PSB6_0, PSB6_1,
+ PSB5_0, PSB5_1,
+ PSB4_0, PSB4_1,
+ PSB3_0, PSB3_1,
+ PSB2_0, PSB2_1,
+ PSB1_0, PSB1_1,
+ PSB0_0, PSB0_1}
+ },
+ { PINMUX_CFG_REG("PSELC", 0xa4050152, 16, 1) {
+ PSC15_0, PSC15_1,
+ PSC14_0, PSC14_1,
+ PSC13_0, PSC13_1,
+ PSC12_0, PSC12_1,
+ PSC11_0, PSC11_1,
+ PSC10_0, PSC10_1,
+ PSC9_0, PSC9_1,
+ PSC8_0, PSC8_1,
+ PSC7_0, PSC7_1,
+ PSC6_0, PSC6_1,
+ PSC5_0, PSC5_1,
+ PSC4_0, PSC4_1,
+ 0, 0,
+ PSC2_0, PSC2_1,
+ PSC1_0, PSC1_1,
+ PSC0_0, PSC0_1}
+ },
+ { PINMUX_CFG_REG("PSELD", 0xa4050154, 16, 1) {
+ PSD15_0, PSD15_1,
+ PSD14_0, PSD14_1,
+ PSD13_0, PSD13_1,
+ PSD12_0, PSD12_1,
+ PSD11_0, PSD11_1,
+ PSD10_0, PSD10_1,
+ PSD9_0, PSD9_1,
+ PSD8_0, PSD8_1,
+ PSD7_0, PSD7_1,
+ PSD6_0, PSD6_1,
+ PSD5_0, PSD5_1,
+ PSD4_0, PSD4_1,
+ PSD3_0, PSD3_1,
+ PSD2_0, PSD2_1,
+ PSD1_0, PSD1_1,
+ PSD0_0, PSD0_1}
+ },
+ { PINMUX_CFG_REG("PSELE", 0xa4050156, 16, 1) {
+ PSE15_0, PSE15_1,
+ PSE14_0, PSE14_1,
+ PSE13_0, PSE13_1,
+ PSE12_0, PSE12_1,
+ PSE11_0, PSE11_1,
+ PSE10_0, PSE10_1,
+ PSE9_0, PSE9_1,
+ PSE8_0, PSE8_1,
+ PSE7_0, PSE7_1,
+ PSE6_0, PSE6_1,
+ PSE5_0, PSE5_1,
+ PSE4_0, PSE4_1,
+ PSE3_0, PSE3_1,
+ PSE2_0, PSE2_1,
+ PSE1_0, PSE1_1,
+ PSE0_0, PSE0_1}
+ },
+ {}
+};
+
+static struct pinmux_data_reg pinmux_data_regs[] = {
+ { PINMUX_DATA_REG("PADR", 0xa4050120, 8) {
+ PTA7_DATA, PTA6_DATA, PTA5_DATA, PTA4_DATA,
+ PTA3_DATA, PTA2_DATA, PTA1_DATA, PTA0_DATA }
+ },
+ { PINMUX_DATA_REG("PBDR", 0xa4050122, 8) {
+ PTB7_DATA, PTB6_DATA, PTB5_DATA, PTB4_DATA,
+ PTB3_DATA, PTB2_DATA, PTB1_DATA, PTB0_DATA }
+ },
+ { PINMUX_DATA_REG("PCDR", 0xa4050124, 8) {
+ PTC7_DATA, PTC6_DATA, PTC5_DATA, PTC4_DATA,
+ PTC3_DATA, PTC2_DATA, PTC1_DATA, PTC0_DATA }
+ },
+ { PINMUX_DATA_REG("PDDR", 0xa4050126, 8) {
+ PTD7_DATA, PTD6_DATA, PTD5_DATA, PTD4_DATA,
+ PTD3_DATA, PTD2_DATA, PTD1_DATA, PTD0_DATA }
+ },
+ { PINMUX_DATA_REG("PEDR", 0xa4050128, 8) {
+ PTE7_DATA, PTE6_DATA, PTE5_DATA, PTE4_DATA,
+ PTE3_DATA, PTE2_DATA, PTE1_DATA, PTE0_DATA }
+ },
+ { PINMUX_DATA_REG("PFDR", 0xa405012a, 8) {
+ PTF7_DATA, PTF6_DATA, PTF5_DATA, PTF4_DATA,
+ PTF3_DATA, PTF2_DATA, PTF1_DATA, PTF0_DATA }
+ },
+ { PINMUX_DATA_REG("PGDR", 0xa405012c, 8) {
+ 0, 0, PTG5_DATA, PTG4_DATA,
+ PTG3_DATA, PTG2_DATA, PTG1_DATA, PTG0_DATA }
+ },
+ { PINMUX_DATA_REG("PHDR", 0xa405012e, 8) {
+ PTH7_DATA, PTH6_DATA, PTH5_DATA, PTH4_DATA,
+ PTH3_DATA, PTH2_DATA, PTH1_DATA, PTH0_DATA }
+ },
+ { PINMUX_DATA_REG("PJDR", 0xa4050130, 8) {
+ PTJ7_DATA, PTJ6_DATA, PTJ5_DATA, 0,
+ PTJ3_DATA, PTJ2_DATA, PTJ1_DATA, PTJ0_DATA }
+ },
+ { PINMUX_DATA_REG("PKDR", 0xa4050132, 8) {
+ PTK7_DATA, PTK6_DATA, PTK5_DATA, PTK4_DATA,
+ PTK3_DATA, PTK2_DATA, PTK1_DATA, PTK0_DATA }
+ },
+ { PINMUX_DATA_REG("PLDR", 0xa4050134, 8) {
+ PTL7_DATA, PTL6_DATA, PTL5_DATA, PTL4_DATA,
+ PTL3_DATA, PTL2_DATA, PTL1_DATA, PTL0_DATA }
+ },
+ { PINMUX_DATA_REG("PMDR", 0xa4050136, 8) {
+ PTM7_DATA, PTM6_DATA, PTM5_DATA, PTM4_DATA,
+ PTM3_DATA, PTM2_DATA, PTM1_DATA, PTM0_DATA }
+ },
+ { PINMUX_DATA_REG("PNDR", 0xa4050138, 8) {
+ PTN7_DATA, PTN6_DATA, PTN5_DATA, PTN4_DATA,
+ PTN3_DATA, PTN2_DATA, PTN1_DATA, PTN0_DATA }
+ },
+ { PINMUX_DATA_REG("PQDR", 0xa405013a, 8) {
+ PTQ7_DATA, PTQ6_DATA, PTQ5_DATA, PTQ4_DATA,
+ PTQ3_DATA, PTQ2_DATA, PTQ1_DATA, PTQ0_DATA }
+ },
+ { PINMUX_DATA_REG("PRDR", 0xa405013c, 8) {
+ PTR7_DATA, PTR6_DATA, PTR5_DATA, PTR4_DATA,
+ PTR3_DATA, PTR2_DATA, PTR1_DATA, PTR0_DATA }
+ },
+ { PINMUX_DATA_REG("PSDR", 0xa405013e, 8) {
+ 0, PTS6_DATA, PTS5_DATA, PTS4_DATA,
+ PTS3_DATA, PTS2_DATA, PTS1_DATA, PTS0_DATA }
+ },
+ { PINMUX_DATA_REG("PTDR", 0xa4050160, 8) {
+ PTT7_DATA, PTT6_DATA, PTT5_DATA, PTT4_DATA,
+ PTT3_DATA, PTT2_DATA, PTT1_DATA, PTT0_DATA }
+ },
+ { PINMUX_DATA_REG("PUDR", 0xa4050162, 8) {
+ PTU7_DATA, PTU6_DATA, PTU5_DATA, PTU4_DATA,
+ PTU3_DATA, PTU2_DATA, PTU1_DATA, PTU0_DATA }
+ },
+ { PINMUX_DATA_REG("PVDR", 0xa4050164, 8) {
+ PTV7_DATA, PTV6_DATA, PTV5_DATA, PTV4_DATA,
+ PTV3_DATA, PTV2_DATA, PTV1_DATA, PTV0_DATA }
+ },
+ { PINMUX_DATA_REG("PWDR", 0xa4050166, 8) {
+ PTW7_DATA, PTW6_DATA, PTW5_DATA, PTW4_DATA,
+ PTW3_DATA, PTW2_DATA, PTW1_DATA, PTW0_DATA }
+ },
+ { PINMUX_DATA_REG("PXDR", 0xa4050168, 8) {
+ PTX7_DATA, PTX6_DATA, PTX5_DATA, PTX4_DATA,
+ PTX3_DATA, PTX2_DATA, PTX1_DATA, PTX0_DATA }
+ },
+ { PINMUX_DATA_REG("PYDR", 0xa405016a, 8) {
+ PTY7_DATA, PTY6_DATA, PTY5_DATA, PTY4_DATA,
+ PTY3_DATA, PTY2_DATA, PTY1_DATA, PTY0_DATA }
+ },
+ { PINMUX_DATA_REG("PZDR", 0xa405016c, 8) {
+ PTZ7_DATA, PTZ6_DATA, PTZ5_DATA, PTZ4_DATA,
+ PTZ3_DATA, PTZ2_DATA, PTZ1_DATA, PTZ0_DATA }
+ },
+ { },
+};
+
+struct sh_pfc_soc_info sh7724_pinmux_info = {
+ .name = "sh7724_pfc",
+ .reserved_id = PINMUX_RESERVED,
+ .data = { PINMUX_DATA_BEGIN, PINMUX_DATA_END },
+ .input = { PINMUX_INPUT_BEGIN, PINMUX_INPUT_END },
+ .input_pu = { PINMUX_INPUT_PULLUP_BEGIN, PINMUX_INPUT_PULLUP_END },
+ .output = { PINMUX_OUTPUT_BEGIN, PINMUX_OUTPUT_END },
+ .mark = { PINMUX_MARK_BEGIN, PINMUX_MARK_END },
+ .function = { PINMUX_FUNCTION_BEGIN, PINMUX_FUNCTION_END },
+
+ .first_gpio = GPIO_PTA7,
+ .last_gpio = GPIO_FN_INTC_IRQ0,
+
+ .gpios = pinmux_gpios,
+ .cfg_regs = pinmux_config_regs,
+ .data_regs = pinmux_data_regs,
+
+ .gpio_data = pinmux_data,
+ .gpio_data_size = ARRAY_SIZE(pinmux_data),
+};
diff --git a/drivers/pinctrl/sh-pfc/pfc-sh7734.c b/drivers/pinctrl/sh-pfc/pfc-sh7734.c
new file mode 100644
index 000000000000..23d76d262c32
--- /dev/null
+++ b/drivers/pinctrl/sh-pfc/pfc-sh7734.c
@@ -0,0 +1,2475 @@
+/*
+ * SH7734 processor support - PFC hardware block
+ *
+ * Copyright (C) 2012 Renesas Solutions Corp.
+ * Copyright (C) 2012 Nobuhiro Iwamatsu <nobuhiro.iwamatsu.yj@renesas.com>
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ */
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <cpu/sh7734.h>
+
+#include "sh_pfc.h"
+
+#define CPU_32_PORT(fn, pfx, sfx) \
+ PORT_10(fn, pfx, sfx), PORT_10(fn, pfx##1, sfx), \
+ PORT_10(fn, pfx##2, sfx), PORT_1(fn, pfx##30, sfx), \
+ PORT_1(fn, pfx##31, sfx)
+
+#define CPU_32_PORT5(fn, pfx, sfx) \
+ PORT_1(fn, pfx##0, sfx), PORT_1(fn, pfx##1, sfx), \
+ PORT_1(fn, pfx##2, sfx), PORT_1(fn, pfx##3, sfx), \
+ PORT_1(fn, pfx##4, sfx), PORT_1(fn, pfx##5, sfx), \
+ PORT_1(fn, pfx##6, sfx), PORT_1(fn, pfx##7, sfx), \
+ PORT_1(fn, pfx##8, sfx), PORT_1(fn, pfx##9, sfx), \
+ PORT_1(fn, pfx##10, sfx), PORT_1(fn, pfx##11, sfx)
+
+/* GPSR0 - GPSR5 */
+#define CPU_ALL_PORT(fn, pfx, sfx) \
+ CPU_32_PORT(fn, pfx##_0_, sfx), \
+ CPU_32_PORT(fn, pfx##_1_, sfx), \
+ CPU_32_PORT(fn, pfx##_2_, sfx), \
+ CPU_32_PORT(fn, pfx##_3_, sfx), \
+ CPU_32_PORT(fn, pfx##_4_, sfx), \
+ CPU_32_PORT5(fn, pfx##_5_, sfx)
+
+#define _GP_GPIO(pfx, sfx) PINMUX_GPIO(GPIO_GP##pfx, GP##pfx##_DATA)
+#define _GP_DATA(pfx, sfx) PINMUX_DATA(GP##pfx##_DATA, GP##pfx##_FN, \
+ GP##pfx##_IN, GP##pfx##_OUT)
+
+#define _GP_INOUTSEL(pfx, sfx) GP##pfx##_IN, GP##pfx##_OUT
+#define _GP_INDT(pfx, sfx) GP##pfx##_DATA
+
+#define GP_ALL(str) CPU_ALL_PORT(_PORT_ALL, GP, str)
+#define PINMUX_GPIO_GP_ALL() CPU_ALL_PORT(_GP_GPIO, , unused)
+#define PINMUX_DATA_GP_ALL() CPU_ALL_PORT(_GP_DATA, , unused)
+
+#define PORT_10_REV(fn, pfx, sfx) \
+ PORT_1(fn, pfx##9, sfx), PORT_1(fn, pfx##8, sfx), \
+ PORT_1(fn, pfx##7, sfx), PORT_1(fn, pfx##6, sfx), \
+ PORT_1(fn, pfx##5, sfx), PORT_1(fn, pfx##4, sfx), \
+ PORT_1(fn, pfx##3, sfx), PORT_1(fn, pfx##2, sfx), \
+ PORT_1(fn, pfx##1, sfx), PORT_1(fn, pfx##0, sfx)
+
+#define CPU_32_PORT_REV(fn, pfx, sfx) \
+ PORT_1(fn, pfx##31, sfx), PORT_1(fn, pfx##30, sfx), \
+ PORT_10_REV(fn, pfx##2, sfx), PORT_10_REV(fn, pfx##1, sfx), \
+ PORT_10_REV(fn, pfx, sfx)
+
+#define GP_INOUTSEL(bank) CPU_32_PORT_REV(_GP_INOUTSEL, _##bank##_, unused)
+#define GP_INDT(bank) CPU_32_PORT_REV(_GP_INDT, _##bank##_, unused)
+
+#define PINMUX_IPSR_DATA(ipsr, fn) PINMUX_DATA(fn##_MARK, FN_##ipsr, FN_##fn)
+#define PINMUX_IPSR_MODSEL_DATA(ipsr, fn, ms) PINMUX_DATA(fn##_MARK, FN_##ms, \
+ FN_##ipsr, FN_##fn)
+
+enum {
+ PINMUX_RESERVED = 0,
+
+ PINMUX_DATA_BEGIN,
+ GP_ALL(DATA), /* GP_0_0_DATA -> GP_5_11_DATA */
+ PINMUX_DATA_END,
+
+ PINMUX_INPUT_BEGIN,
+ GP_ALL(IN), /* GP_0_0_IN -> GP_5_11_IN */
+ PINMUX_INPUT_END,
+
+ PINMUX_OUTPUT_BEGIN,
+ GP_ALL(OUT), /* GP_0_0_OUT -> GP_5_11_OUT */
+ PINMUX_OUTPUT_END,
+
+ PINMUX_FUNCTION_BEGIN,
+ GP_ALL(FN), /* GP_0_0_FN -> GP_5_11_FN */
+
+ /* GPSR0 */
+ FN_IP1_9_8, FN_IP1_11_10, FN_IP1_13_12, FN_IP1_15_14,
+ FN_IP0_7_6, FN_IP0_9_8, FN_IP0_11_10, FN_IP0_13_12,
+ FN_IP0_15_14, FN_IP0_17_16, FN_IP0_19_18, FN_IP0_21_20,
+ FN_IP0_23_22, FN_IP0_25_24, FN_IP0_27_26, FN_IP0_29_28,
+ FN_IP0_31_30, FN_IP1_1_0, FN_IP1_3_2, FN_IP1_5_4,
+ FN_IP1_7_6, FN_IP11_28, FN_IP0_1_0, FN_IP0_3_2,
+ FN_IP0_5_4, FN_IP1_17_16, FN_IP1_19_18, FN_IP1_22_20,
+ FN_IP1_25_23, FN_IP1_28_26, FN_IP1_31_29, FN_IP2_2_0,
+
+ /* GPSR1 */
+ FN_IP3_20, FN_IP3_29_27, FN_IP11_20_19, FN_IP11_22_21,
+ FN_IP2_16_14, FN_IP2_19_17, FN_IP2_22_20, FN_IP2_24_23,
+ FN_IP2_27_25, FN_IP2_30_28, FN_IP3_1_0, FN_CLKOUT,
+ FN_BS, FN_CS0, FN_IP3_2, FN_EX_CS0,
+ FN_IP3_5_3, FN_IP3_8_6, FN_IP3_11_9, FN_IP3_14_12,
+ FN_IP3_17_15, FN_RD, FN_IP3_19_18, FN_WE0,
+ FN_WE1, FN_IP2_4_3, FN_IP3_23_21, FN_IP3_26_24,
+ FN_IP2_7_5, FN_IP2_10_8, FN_IP2_13_11, FN_IP11_25_23,
+
+ /* GPSR2 */
+ FN_IP11_6_4, FN_IP11_9_7, FN_IP11_11_10, FN_IP4_2_0,
+ FN_IP8_29_28, FN_IP11_27_26, FN_IP8_22_20, FN_IP8_25_23,
+ FN_IP11_12, FN_IP8_27_26, FN_IP4_5_3, FN_IP4_8_6,
+ FN_IP4_11_9, FN_IP4_14_12, FN_IP4_17_15, FN_IP4_19_18,
+ FN_IP4_21_20, FN_IP4_23_22, FN_IP4_25_24, FN_IP4_27_26,
+ FN_IP4_29_28, FN_IP4_31_30, FN_IP5_2_0, FN_IP5_5_3,
+ FN_IP5_8_6, FN_IP5_11_9, FN_IP5_14_12, FN_IP5_17_15,
+ FN_IP5_20_18, FN_IP5_22_21, FN_IP5_24_23, FN_IP5_26_25,
+
+ /* GPSR3 */
+ FN_IP6_2_0, FN_IP6_5_3, FN_IP6_7_6, FN_IP6_9_8,
+ FN_IP6_11_10, FN_IP6_13_12, FN_IP6_15_14, FN_IP6_17_16,
+ FN_IP6_20_18, FN_IP6_23_21, FN_IP7_2_0, FN_IP7_5_3,
+ FN_IP7_8_6, FN_IP7_11_9, FN_IP7_14_12, FN_IP7_17_15,
+ FN_IP7_20_18, FN_IP7_23_21, FN_IP7_26_24, FN_IP7_28_27,
+ FN_IP7_30_29, FN_IP8_1_0, FN_IP8_3_2, FN_IP8_5_4,
+ FN_IP8_7_6, FN_IP8_9_8, FN_IP8_11_10, FN_IP8_13_12,
+ FN_IP8_15_14, FN_IP8_17_16, FN_IP8_19_18, FN_IP9_1_0,
+
+ /* GPSR4 */
+ FN_IP9_19_18, FN_IP9_21_20, FN_IP9_23_22, FN_IP9_25_24,
+ FN_IP9_11_10, FN_IP9_13_12, FN_IP9_15_14, FN_IP9_17_16,
+ FN_IP9_3_2, FN_IP9_5_4, FN_IP9_7_6, FN_IP9_9_8,
+ FN_IP9_27_26, FN_IP9_29_28, FN_IP10_2_0, FN_IP10_5_3,
+ FN_IP10_8_6, FN_IP10_11_9, FN_IP10_14_12, FN_IP10_15,
+ FN_IP10_18_16, FN_IP10_21_19, FN_IP11_0, FN_IP11_1,
+ FN_SCL0, FN_IP11_2, FN_PENC0, FN_IP11_15_13, /* Need check*/
+ FN_USB_OVC0, FN_IP11_18_16,
+ FN_IP10_22, FN_IP10_24_23,
+
+ /* GPSR5 */
+ FN_IP10_25, FN_IP11_3, FN_IRQ2_B, FN_IRQ3_B,
+ FN_IP10_27_26, /* 10 */
+ FN_IP10_29_28, /* 11 */
+
+ /* IPSR0 */
+ FN_A15, FN_ST0_VCO_CLKIN, FN_LCD_DATA15_A, FN_TIOC3D_C,
+ FN_A14, FN_LCD_DATA14_A, FN_TIOC3C_C,
+ FN_A13, FN_LCD_DATA13_A, FN_TIOC3B_C,
+ FN_A12, FN_LCD_DATA12_A, FN_TIOC3A_C,
+ FN_A11, FN_ST0_D7, FN_LCD_DATA11_A, FN_TIOC2B_C,
+ FN_A10, FN_ST0_D6, FN_LCD_DATA10_A, FN_TIOC2A_C,
+ FN_A9, FN_ST0_D5, FN_LCD_DATA9_A, FN_TIOC1B_C,
+ FN_A8, FN_ST0_D4, FN_LCD_DATA8_A, FN_TIOC1A_C,
+ FN_A7, FN_ST0_D3, FN_LCD_DATA7_A, FN_TIOC0D_C,
+ FN_A6, FN_ST0_D2, FN_LCD_DATA6_A, FN_TIOC0C_C,
+ FN_A5, FN_ST0_D1, FN_LCD_DATA5_A, FN_TIOC0B_C,
+ FN_A4, FN_ST0_D0, FN_LCD_DATA4_A, FN_TIOC0A_C,
+ FN_A3, FN_ST0_VLD, FN_LCD_DATA3_A, FN_TCLKD_C,
+ FN_A2, FN_ST0_SYC, FN_LCD_DATA2_A, FN_TCLKC_C,
+ FN_A1, FN_ST0_REQ, FN_LCD_DATA1_A, FN_TCLKB_C,
+ FN_A0, FN_ST0_CLKIN, FN_LCD_DATA0_A, FN_TCLKA_C,
+
+ /* IPSR1 */
+ FN_D3, FN_SD0_DAT3_A, FN_MMC_D3_A, FN_ST1_D6, FN_FD3_A,
+ FN_D2, FN_SD0_DAT2_A, FN_MMC_D2_A, FN_ST1_D5, FN_FD2_A,
+ FN_D1, FN_SD0_DAT1_A, FN_MMC_D1_A, FN_ST1_D4, FN_FD1_A,
+ FN_D0, FN_SD0_DAT0_A, FN_MMC_D0_A, FN_ST1_D3, FN_FD0_A,
+ FN_A25, FN_TX2_D, FN_ST1_D2,
+ FN_A24, FN_RX2_D, FN_ST1_D1,
+ FN_A23, FN_ST1_D0, FN_LCD_M_DISP_A,
+ FN_A22, FN_ST1_VLD, FN_LCD_VEPWC_A,
+ FN_A21, FN_ST1_SYC, FN_LCD_VCPWC_A,
+ FN_A20, FN_ST1_REQ, FN_LCD_FLM_A,
+ FN_A19, FN_ST1_CLKIN, FN_LCD_CLK_A, FN_TIOC4D_C,
+ FN_A18, FN_ST1_PWM, FN_LCD_CL2_A, FN_TIOC4C_C,
+ FN_A17, FN_ST1_VCO_CLKIN, FN_LCD_CL1_A, FN_TIOC4B_C,
+ FN_A16, FN_ST0_PWM, FN_LCD_DON_A, FN_TIOC4A_C,
+
+ /* IPSR2 */
+ FN_D14, FN_TX2_B, FN_FSE_A, FN_ET0_TX_CLK_B,
+ FN_D13, FN_RX2_B, FN_FRB_A, FN_ET0_ETXD6_B,
+ FN_D12, FN_FWE_A, FN_ET0_ETXD5_B,
+ FN_D11, FN_RSPI_MISO_A, FN_QMI_QIO1_A, FN_FRE_A,
+ FN_ET0_ETXD3_B,
+ FN_D10, FN_RSPI_MOSI_A, FN_QMO_QIO0_A, FN_FALE_A,
+ FN_ET0_ETXD2_B,
+ FN_D9, FN_SD0_CMD_A, FN_MMC_CMD_A, FN_QIO3_A, FN_FCLE_A,
+ FN_ET0_ETXD1_B,
+ FN_D8, FN_SD0_CLK_A, FN_MMC_CLK_A, FN_QIO2_A, FN_FCE_A,
+ FN_ET0_GTX_CLK_B,
+ FN_D7, FN_RSPI_SSL_A, FN_MMC_D7_A, FN_QSSL_A, FN_FD7_A,
+ FN_D6, FN_RSPI_RSPCK_A, FN_MMC_D6_A, FN_QSPCLK_A, FN_FD6_A,
+ FN_D5, FN_SD0_WP_A, FN_MMC_D5_A, FN_FD5_A,
+ FN_D4, FN_SD0_CD_A, FN_MMC_D4_A, FN_ST1_D7, FN_FD4_A,
+
+ /* IPSR3 */
+ FN_DRACK0, FN_SD1_DAT2_A, FN_ATAG, FN_TCLK1_A, FN_ET0_ETXD7,
+ FN_EX_WAIT2, FN_SD1_DAT1_A, FN_DACK2, FN_CAN1_RX_C,
+ FN_ET0_MAGIC_C, FN_ET0_ETXD6_A,
+ FN_EX_WAIT1, FN_SD1_DAT0_A, FN_DREQ2, FN_CAN1_TX_C,
+ FN_ET0_LINK_C, FN_ET0_ETXD5_A,
+ FN_EX_WAIT0, FN_TCLK1_B,
+ FN_RD_WR, FN_TCLK0, FN_CAN_CLK_B, FN_ET0_ETXD4,
+ FN_EX_CS5, FN_SD1_CMD_A, FN_ATADIR, FN_QSSL_B, FN_ET0_ETXD3_A,
+ FN_EX_CS4, FN_SD1_WP_A, FN_ATAWR, FN_QMI_QIO1_B, FN_ET0_ETXD2_A,
+ FN_EX_CS3, FN_SD1_CD_A, FN_ATARD, FN_QMO_QIO0_B, FN_ET0_ETXD1_A,
+ FN_EX_CS2, FN_TX3_B, FN_ATACS1, FN_QSPCLK_B, FN_ET0_GTX_CLK_A,
+ FN_EX_CS1, FN_RX3_B, FN_ATACS0, FN_QIO2_B, FN_ET0_ETXD0,
+ FN_CS1_A26, FN_QIO3_B,
+ FN_D15, FN_SCK2_B,
+
+ /* IPSR4 */
+ FN_SCK2_A, FN_VI0_G3,
+ FN_RTS1_B, FN_VI0_G2,
+ FN_CTS1_B, FN_VI0_DATA7_VI0_G1,
+ FN_TX1_B, FN_VI0_DATA6_VI0_G0, FN_ET0_PHY_INT_A,
+ FN_RX1_B, FN_VI0_DATA5_VI0_B5, FN_ET0_MAGIC_A,
+ FN_SCK1_B, FN_VI0_DATA4_VI0_B4, FN_ET0_LINK_A,
+ FN_RTS0_B, FN_VI0_DATA3_VI0_B3, FN_ET0_MDIO_A,
+ FN_CTS0_B, FN_VI0_DATA2_VI0_B2, FN_RMII0_MDIO_A, FN_ET0_MDC,
+ FN_HTX0_A, FN_TX1_A, FN_VI0_DATA1_VI0_B1, FN_RMII0_MDC_A, FN_ET0_COL,
+ FN_HRX0_A, FN_RX1_A, FN_VI0_DATA0_VI0_B0, FN_RMII0_CRS_DV_A, FN_ET0_CRS,
+ FN_HSCK0_A, FN_SCK1_A, FN_VI0_VSYNC, FN_RMII0_RX_ER_A, FN_ET0_RX_ER,
+ FN_HRTS0_A, FN_RTS1_A, FN_VI0_HSYNC, FN_RMII0_TXD_EN_A, FN_ET0_RX_DV,
+ FN_HCTS0_A, FN_CTS1_A, FN_VI0_FIELD, FN_RMII0_RXD1_A, FN_ET0_ERXD7,
+
+ /* IPSR5 */
+ FN_SD2_CLK_A, FN_RX2_A, FN_VI0_G4, FN_ET0_RX_CLK_B,
+ FN_SD2_CMD_A, FN_TX2_A, FN_VI0_G5, FN_ET0_ERXD2_B,
+ FN_SD2_DAT0_A, FN_RX3_A, FN_VI0_R0, FN_ET0_ERXD3_B,
+ FN_SD2_DAT1_A, FN_TX3_A, FN_VI0_R1, FN_ET0_MDIO_B,
+ FN_SD2_DAT2_A, FN_RX4_A, FN_VI0_R2, FN_ET0_LINK_B,
+ FN_SD2_DAT3_A, FN_TX4_A, FN_VI0_R3, FN_ET0_MAGIC_B,
+ FN_SD2_CD_A, FN_RX5_A, FN_VI0_R4, FN_ET0_PHY_INT_B,
+ FN_SD2_WP_A, FN_TX5_A, FN_VI0_R5,
+ FN_REF125CK, FN_ADTRG, FN_RX5_C,
+ FN_REF50CK, FN_CTS1_E, FN_HCTS0_D,
+
+ /* IPSR6 */
+ FN_DU0_DR0, FN_SCIF_CLK_B, FN_HRX0_D, FN_IETX_A, FN_TCLKA_A, FN_HIFD00,
+ FN_DU0_DR1, FN_SCK0_B, FN_HTX0_D, FN_IERX_A, FN_TCLKB_A, FN_HIFD01,
+ FN_DU0_DR2, FN_RX0_B, FN_TCLKC_A, FN_HIFD02,
+ FN_DU0_DR3, FN_TX0_B, FN_TCLKD_A, FN_HIFD03,
+ FN_DU0_DR4, FN_CTS0_C, FN_TIOC0A_A, FN_HIFD04,
+ FN_DU0_DR5, FN_RTS0_C, FN_TIOC0B_A, FN_HIFD05,
+ FN_DU0_DR6, FN_SCK1_C, FN_TIOC0C_A, FN_HIFD06,
+ FN_DU0_DR7, FN_RX1_C, FN_TIOC0D_A, FN_HIFD07,
+ FN_DU0_DG0, FN_TX1_C, FN_HSCK0_D, FN_IECLK_A, FN_TIOC1A_A, FN_HIFD08,
+ FN_DU0_DG1, FN_CTS1_C, FN_HRTS0_D, FN_TIOC1B_A, FN_HIFD09,
+
+ /* IPSR7 */
+ FN_DU0_DG2, FN_RTS1_C, FN_RMII0_MDC_B, FN_TIOC2A_A, FN_HIFD10,
+ FN_DU0_DG3, FN_SCK2_C, FN_RMII0_MDIO_B, FN_TIOC2B_A, FN_HIFD11,
+ FN_DU0_DG4, FN_RX2_C, FN_RMII0_CRS_DV_B, FN_TIOC3A_A, FN_HIFD12,
+ FN_DU0_DG5, FN_TX2_C, FN_RMII0_RX_ER_B, FN_TIOC3B_A, FN_HIFD13,
+ FN_DU0_DG6, FN_RX3_C, FN_RMII0_RXD0_B, FN_TIOC3C_A, FN_HIFD14,
+ FN_DU0_DG7, FN_TX3_C, FN_RMII0_RXD1_B, FN_TIOC3D_A, FN_HIFD15,
+ FN_DU0_DB0, FN_RX4_C, FN_RMII0_TXD_EN_B, FN_TIOC4A_A, FN_HIFCS,
+ FN_DU0_DB1, FN_TX4_C, FN_RMII0_TXD0_B, FN_TIOC4B_A, FN_HIFRS,
+ FN_DU0_DB2, FN_RX5_B, FN_RMII0_TXD1_B, FN_TIOC4C_A, FN_HIFWR,
+ FN_DU0_DB3, FN_TX5_B, FN_TIOC4D_A, FN_HIFRD,
+ FN_DU0_DB4, FN_HIFINT,
+
+ /* IPSR8 */
+ FN_DU0_DB5, FN_HIFDREQ,
+ FN_DU0_DB6, FN_HIFRDY,
+ FN_DU0_DB7, FN_SSI_SCK0_B, FN_HIFEBL_B,
+ FN_DU0_DOTCLKIN, FN_HSPI_CS0_C, FN_SSI_WS0_B,
+ FN_DU0_DOTCLKOUT, FN_HSPI_CLK0_C, FN_SSI_SDATA0_B,
+ FN_DU0_EXHSYNC_DU0_HSYNC, FN_HSPI_TX0_C, FN_SSI_SCK1_B,
+ FN_DU0_EXVSYNC_DU0_VSYNC, FN_HSPI_RX0_C, FN_SSI_WS1_B,
+ FN_DU0_EXODDF_DU0_ODDF, FN_CAN0_RX_B, FN_HSCK0_B, FN_SSI_SDATA1_B,
+ FN_DU0_DISP, FN_CAN0_TX_B, FN_HRX0_B, FN_AUDIO_CLKA_B,
+ FN_DU0_CDE, FN_HTX0_B, FN_AUDIO_CLKB_B, FN_LCD_VCPWC_B,
+ FN_IRQ0_A, FN_HSPI_TX_B, FN_RX3_E, FN_ET0_ERXD0,
+ FN_IRQ1_A, FN_HSPI_RX_B, FN_TX3_E, FN_ET0_ERXD1,
+ FN_IRQ2_A, FN_CTS0_A, FN_HCTS0_B, FN_ET0_ERXD2_A,
+ FN_IRQ3_A, FN_RTS0_A, FN_HRTS0_B, FN_ET0_ERXD3_A,
+
+ /* IPSR9 */
+ FN_VI1_CLK_A, FN_FD0_B, FN_LCD_DATA0_B,
+ FN_VI1_0_A, FN_FD1_B, FN_LCD_DATA1_B,
+ FN_VI1_1_A, FN_FD2_B, FN_LCD_DATA2_B,
+ FN_VI1_2_A, FN_FD3_B, FN_LCD_DATA3_B,
+ FN_VI1_3_A, FN_FD4_B, FN_LCD_DATA4_B,
+ FN_VI1_4_A, FN_FD5_B, FN_LCD_DATA5_B,
+ FN_VI1_5_A, FN_FD6_B, FN_LCD_DATA6_B,
+ FN_VI1_6_A, FN_FD7_B, FN_LCD_DATA7_B,
+ FN_VI1_7_A, FN_FCE_B, FN_LCD_DATA8_B,
+ FN_SSI_SCK0_A, FN_TIOC1A_B, FN_LCD_DATA9_B,
+ FN_SSI_WS0_A, FN_TIOC1B_B, FN_LCD_DATA10_B,
+ FN_SSI_SDATA0_A, FN_VI1_0_B, FN_TIOC2A_B, FN_LCD_DATA11_B,
+ FN_SSI_SCK1_A, FN_VI1_1_B, FN_TIOC2B_B, FN_LCD_DATA12_B,
+ FN_SSI_WS1_A, FN_VI1_2_B, FN_LCD_DATA13_B,
+ FN_SSI_SDATA1_A, FN_VI1_3_B, FN_LCD_DATA14_B,
+
+ /* IPSR10 */
+ FN_SSI_SCK23, FN_VI1_4_B, FN_RX1_D, FN_FCLE_B, FN_LCD_DATA15_B,
+ FN_SSI_WS23, FN_VI1_5_B, FN_TX1_D, FN_HSCK0_C, FN_FALE_B, FN_LCD_DON_B,
+ FN_SSI_SDATA2, FN_VI1_6_B, FN_HRX0_C, FN_FRE_B, FN_LCD_CL1_B,
+ FN_SSI_SDATA3, FN_VI1_7_B, FN_HTX0_C, FN_FWE_B, FN_LCD_CL2_B,
+ FN_AUDIO_CLKA_A, FN_VI1_CLK_B, FN_SCK1_D, FN_IECLK_B, FN_LCD_FLM_B,
+ FN_AUDIO_CLKB_A, FN_LCD_CLK_B,
+ FN_AUDIO_CLKC, FN_SCK1_E, FN_HCTS0_C, FN_FRB_B, FN_LCD_VEPWC_B,
+ FN_AUDIO_CLKOUT, FN_TX1_E, FN_HRTS0_C, FN_FSE_B, FN_LCD_M_DISP_B,
+ FN_CAN_CLK_A, FN_RX4_D,
+ FN_CAN0_TX_A, FN_TX4_D, FN_MLB_CLK,
+ FN_CAN1_RX_A, FN_IRQ1_B,
+ FN_CAN0_RX_A, FN_IRQ0_B, FN_MLB_SIG,
+ FN_CAN1_TX_A, FN_TX5_C, FN_MLB_DAT,
+
+ /* IPSR11 */
+ FN_SCL1, FN_SCIF_CLK_C,
+ FN_SDA1, FN_RX1_E,
+ FN_SDA0, FN_HIFEBL_A,
+ FN_SDSELF, FN_RTS1_E,
+ FN_SCIF_CLK_A, FN_HSPI_CLK_A, FN_VI0_CLK, FN_RMII0_TXD0_A, FN_ET0_ERXD4,
+ FN_SCK0_A, FN_HSPI_CS_A, FN_VI0_CLKENB, FN_RMII0_TXD1_A, FN_ET0_ERXD5,
+ FN_RX0_A, FN_HSPI_RX_A, FN_RMII0_RXD0_A, FN_ET0_ERXD6,
+ FN_TX0_A, FN_HSPI_TX_A,
+ FN_PENC1, FN_TX3_D, FN_CAN1_TX_B, FN_TX5_D, FN_IETX_B,
+ FN_USB_OVC1, FN_RX3_D, FN_CAN1_RX_B, FN_RX5_D, FN_IERX_B,
+ FN_DREQ0, FN_SD1_CLK_A, FN_ET0_TX_EN,
+ FN_DACK0, FN_SD1_DAT3_A, FN_ET0_TX_ER,
+ FN_DREQ1, FN_HSPI_CLK_B, FN_RX4_B, FN_ET0_PHY_INT_C, FN_ET0_TX_CLK_A,
+ FN_DACK1, FN_HSPI_CS_B, FN_TX4_B, FN_ET0_RX_CLK_A,
+ FN_PRESETOUT, FN_ST_CLKOUT,
+
+ /* MOD_SEL1 */
+ FN_SEL_IEBUS_0, FN_SEL_IEBUS_1,
+ FN_SEL_RQSPI_0, FN_SEL_RQSPI_1,
+ FN_SEL_VIN1_0, FN_SEL_VIN1_1,
+ FN_SEL_HIF_0, FN_SEL_HIF_1,
+ FN_SEL_RSPI_0, FN_SEL_RSPI_1,
+ FN_SEL_LCDC_0, FN_SEL_LCDC_1,
+ FN_SEL_ET0_CTL_0, FN_SEL_ET0_CTL_1, FN_SEL_ET0_CTL_2,
+ FN_SEL_ET0_0, FN_SEL_ET0_1,
+ FN_SEL_RMII_0, FN_SEL_RMII_1,
+ FN_SEL_TMU_0, FN_SEL_TMU_1,
+ FN_SEL_HSPI_0, FN_SEL_HSPI_1, FN_SEL_HSPI_2,
+ FN_SEL_HSCIF_0, FN_SEL_HSCIF_1, FN_SEL_HSCIF_2, FN_SEL_HSCIF_3,
+ FN_SEL_RCAN_CLK_0, FN_SEL_RCAN_CLK_1,
+ FN_SEL_RCAN1_0, FN_SEL_RCAN1_1, FN_SEL_RCAN1_2,
+ FN_SEL_RCAN0_0, FN_SEL_RCAN0_1,
+ FN_SEL_SDHI2_0, FN_SEL_SDHI2_1,
+ FN_SEL_SDHI1_0, FN_SEL_SDHI1_1,
+ FN_SEL_SDHI0_0, FN_SEL_SDHI0_1,
+ FN_SEL_SSI1_0, FN_SEL_SSI1_1,
+ FN_SEL_SSI0_0, FN_SEL_SSI0_1,
+ FN_SEL_AUDIO_CLKB_0, FN_SEL_AUDIO_CLKB_1,
+ FN_SEL_AUDIO_CLKA_0, FN_SEL_AUDIO_CLKA_1,
+ FN_SEL_FLCTL_0, FN_SEL_FLCTL_1,
+ FN_SEL_MMC_0, FN_SEL_MMC_1,
+ FN_SEL_INTC_0, FN_SEL_INTC_1,
+
+ /* MOD_SEL2 */
+ FN_SEL_MTU2_CLK_0, FN_SEL_MTU2_CLK_1,
+ FN_SEL_MTU2_CH4_0, FN_SEL_MTU2_CH4_1,
+ FN_SEL_MTU2_CH3_0, FN_SEL_MTU2_CH3_1,
+ FN_SEL_MTU2_CH2_0, FN_SEL_MTU2_CH2_1, FN_SEL_MTU2_CH2_2,
+ FN_SEL_MTU2_CH1_0, FN_SEL_MTU2_CH1_1, FN_SEL_MTU2_CH1_2,
+ FN_SEL_MTU2_CH0_0, FN_SEL_MTU2_CH0_1,
+ FN_SEL_SCIF5_0, FN_SEL_SCIF5_1,
+ FN_SEL_SCIF5_2, FN_SEL_SCIF5_3,
+ FN_SEL_SCIF4_0, FN_SEL_SCIF4_1,
+ FN_SEL_SCIF4_2, FN_SEL_SCIF4_3,
+ FN_SEL_SCIF3_0, FN_SEL_SCIF3_1, FN_SEL_SCIF3_2,
+ FN_SEL_SCIF3_3, FN_SEL_SCIF3_4,
+ FN_SEL_SCIF2_0, FN_SEL_SCIF2_1, FN_SEL_SCIF2_2,
+ FN_SEL_SCIF2_3,
+ FN_SEL_SCIF1_0, FN_SEL_SCIF1_1, FN_SEL_SCIF1_2,
+ FN_SEL_SCIF1_3, FN_SEL_SCIF1_4,
+ FN_SEL_SCIF0_0, FN_SEL_SCIF0_1, FN_SEL_SCIF0_2,
+ FN_SEL_SCIF_CLK_0, FN_SEL_SCIF_CLK_1, FN_SEL_SCIF_CLK_2,
+
+ PINMUX_FUNCTION_END,
+
+ PINMUX_MARK_BEGIN,
+
+ CLKOUT_MARK, BS_MARK, CS0_MARK, EX_CS0_MARK, RD_MARK,
+ WE0_MARK, WE1_MARK,
+
+ SCL0_MARK, PENC0_MARK, USB_OVC0_MARK,
+
+ IRQ2_B_MARK, IRQ3_B_MARK,
+
+ /* IPSR0 */
+ A15_MARK, ST0_VCO_CLKIN_MARK, LCD_DATA15_A_MARK, TIOC3D_C_MARK,
+ A14_MARK, LCD_DATA14_A_MARK, TIOC3C_C_MARK,
+ A13_MARK, LCD_DATA13_A_MARK, TIOC3B_C_MARK,
+ A12_MARK, LCD_DATA12_A_MARK, TIOC3A_C_MARK,
+ A11_MARK, ST0_D7_MARK, LCD_DATA11_A_MARK, TIOC2B_C_MARK,
+ A10_MARK, ST0_D6_MARK, LCD_DATA10_A_MARK, TIOC2A_C_MARK,
+ A9_MARK, ST0_D5_MARK, LCD_DATA9_A_MARK, TIOC1B_C_MARK,
+ A8_MARK, ST0_D4_MARK, LCD_DATA8_A_MARK, TIOC1A_C_MARK,
+ A7_MARK, ST0_D3_MARK, LCD_DATA7_A_MARK, TIOC0D_C_MARK,
+ A6_MARK, ST0_D2_MARK, LCD_DATA6_A_MARK, TIOC0C_C_MARK,
+ A5_MARK, ST0_D1_MARK, LCD_DATA5_A_MARK, TIOC0B_C_MARK,
+ A4_MARK, ST0_D0_MARK, LCD_DATA4_A_MARK, TIOC0A_C_MARK,
+ A3_MARK, ST0_VLD_MARK, LCD_DATA3_A_MARK, TCLKD_C_MARK,
+ A2_MARK, ST0_SYC_MARK, LCD_DATA2_A_MARK, TCLKC_C_MARK,
+ A1_MARK, ST0_REQ_MARK, LCD_DATA1_A_MARK, TCLKB_C_MARK,
+ A0_MARK, ST0_CLKIN_MARK, LCD_DATA0_A_MARK, TCLKA_C_MARK,
+
+ /* IPSR1 */
+ D3_MARK, SD0_DAT3_A_MARK, MMC_D3_A_MARK, ST1_D6_MARK, FD3_A_MARK,
+ D2_MARK, SD0_DAT2_A_MARK, MMC_D2_A_MARK, ST1_D5_MARK, FD2_A_MARK,
+ D1_MARK, SD0_DAT1_A_MARK, MMC_D1_A_MARK, ST1_D4_MARK, FD1_A_MARK,
+ D0_MARK, SD0_DAT0_A_MARK, MMC_D0_A_MARK, ST1_D3_MARK, FD0_A_MARK,
+ A25_MARK, TX2_D_MARK, ST1_D2_MARK,
+ A24_MARK, RX2_D_MARK, ST1_D1_MARK,
+ A23_MARK, ST1_D0_MARK, LCD_M_DISP_A_MARK,
+ A22_MARK, ST1_VLD_MARK, LCD_VEPWC_A_MARK,
+ A21_MARK, ST1_SYC_MARK, LCD_VCPWC_A_MARK,
+ A20_MARK, ST1_REQ_MARK, LCD_FLM_A_MARK,
+ A19_MARK, ST1_CLKIN_MARK, LCD_CLK_A_MARK, TIOC4D_C_MARK,
+ A18_MARK, ST1_PWM_MARK, LCD_CL2_A_MARK, TIOC4C_C_MARK,
+ A17_MARK, ST1_VCO_CLKIN_MARK, LCD_CL1_A_MARK, TIOC4B_C_MARK,
+ A16_MARK, ST0_PWM_MARK, LCD_DON_A_MARK, TIOC4A_C_MARK,
+
+ /* IPSR2 */
+ D14_MARK, TX2_B_MARK, FSE_A_MARK, ET0_TX_CLK_B_MARK,
+ D13_MARK, RX2_B_MARK, FRB_A_MARK, ET0_ETXD6_B_MARK,
+ D12_MARK, FWE_A_MARK, ET0_ETXD5_B_MARK,
+ D11_MARK, RSPI_MISO_A_MARK, QMI_QIO1_A_MARK, FRE_A_MARK,
+ ET0_ETXD3_B_MARK,
+ D10_MARK, RSPI_MOSI_A_MARK, QMO_QIO0_A_MARK, FALE_A_MARK,
+ ET0_ETXD2_B_MARK,
+ D9_MARK, SD0_CMD_A_MARK, MMC_CMD_A_MARK, QIO3_A_MARK,
+ FCLE_A_MARK, ET0_ETXD1_B_MARK,
+ D8_MARK, SD0_CLK_A_MARK, MMC_CLK_A_MARK, QIO2_A_MARK,
+ FCE_A_MARK, ET0_GTX_CLK_B_MARK,
+ D7_MARK, RSPI_SSL_A_MARK, MMC_D7_A_MARK, QSSL_A_MARK,
+ FD7_A_MARK,
+ D6_MARK, RSPI_RSPCK_A_MARK, MMC_D6_A_MARK, QSPCLK_A_MARK,
+ FD6_A_MARK,
+ D5_MARK, SD0_WP_A_MARK, MMC_D5_A_MARK, FD5_A_MARK,
+ D4_MARK, SD0_CD_A_MARK, MMC_D4_A_MARK, ST1_D7_MARK,
+ FD4_A_MARK,
+
+ /* IPSR3 */
+ DRACK0_MARK, SD1_DAT2_A_MARK, ATAG_MARK, TCLK1_A_MARK, ET0_ETXD7_MARK,
+ EX_WAIT2_MARK, SD1_DAT1_A_MARK, DACK2_MARK, CAN1_RX_C_MARK,
+ ET0_MAGIC_C_MARK, ET0_ETXD6_A_MARK,
+ EX_WAIT1_MARK, SD1_DAT0_A_MARK, DREQ2_MARK, CAN1_TX_C_MARK,
+ ET0_LINK_C_MARK, ET0_ETXD5_A_MARK,
+ EX_WAIT0_MARK, TCLK1_B_MARK,
+ RD_WR_MARK, TCLK0_MARK, CAN_CLK_B_MARK, ET0_ETXD4_MARK,
+ EX_CS5_MARK, SD1_CMD_A_MARK, ATADIR_MARK, QSSL_B_MARK,
+ ET0_ETXD3_A_MARK,
+ EX_CS4_MARK, SD1_WP_A_MARK, ATAWR_MARK, QMI_QIO1_B_MARK,
+ ET0_ETXD2_A_MARK,
+ EX_CS3_MARK, SD1_CD_A_MARK, ATARD_MARK, QMO_QIO0_B_MARK,
+ ET0_ETXD1_A_MARK,
+ EX_CS2_MARK, TX3_B_MARK, ATACS1_MARK, QSPCLK_B_MARK,
+ ET0_GTX_CLK_A_MARK,
+ EX_CS1_MARK, RX3_B_MARK, ATACS0_MARK, QIO2_B_MARK,
+ ET0_ETXD0_MARK,
+ CS1_A26_MARK, QIO3_B_MARK,
+ D15_MARK, SCK2_B_MARK,
+
+ /* IPSR4 */
+ SCK2_A_MARK, VI0_G3_MARK,
+ RTS1_B_MARK, VI0_G2_MARK,
+ CTS1_B_MARK, VI0_DATA7_VI0_G1_MARK,
+ TX1_B_MARK, VI0_DATA6_VI0_G0_MARK, ET0_PHY_INT_A_MARK,
+ RX1_B_MARK, VI0_DATA5_VI0_B5_MARK, ET0_MAGIC_A_MARK,
+ SCK1_B_MARK, VI0_DATA4_VI0_B4_MARK, ET0_LINK_A_MARK,
+ RTS0_B_MARK, VI0_DATA3_VI0_B3_MARK, ET0_MDIO_A_MARK,
+ CTS0_B_MARK, VI0_DATA2_VI0_B2_MARK, RMII0_MDIO_A_MARK,
+ ET0_MDC_MARK,
+ HTX0_A_MARK, TX1_A_MARK, VI0_DATA1_VI0_B1_MARK,
+ RMII0_MDC_A_MARK, ET0_COL_MARK,
+ HRX0_A_MARK, RX1_A_MARK, VI0_DATA0_VI0_B0_MARK,
+ RMII0_CRS_DV_A_MARK, ET0_CRS_MARK,
+ HSCK0_A_MARK, SCK1_A_MARK, VI0_VSYNC_MARK,
+ RMII0_RX_ER_A_MARK, ET0_RX_ER_MARK,
+ HRTS0_A_MARK, RTS1_A_MARK, VI0_HSYNC_MARK,
+ RMII0_TXD_EN_A_MARK, ET0_RX_DV_MARK,
+ HCTS0_A_MARK, CTS1_A_MARK, VI0_FIELD_MARK,
+ RMII0_RXD1_A_MARK, ET0_ERXD7_MARK,
+
+ /* IPSR5 */
+ SD2_CLK_A_MARK, RX2_A_MARK, VI0_G4_MARK, ET0_RX_CLK_B_MARK,
+ SD2_CMD_A_MARK, TX2_A_MARK, VI0_G5_MARK, ET0_ERXD2_B_MARK,
+ SD2_DAT0_A_MARK, RX3_A_MARK, VI0_R0_MARK, ET0_ERXD3_B_MARK,
+ SD2_DAT1_A_MARK, TX3_A_MARK, VI0_R1_MARK, ET0_MDIO_B_MARK,
+ SD2_DAT2_A_MARK, RX4_A_MARK, VI0_R2_MARK, ET0_LINK_B_MARK,
+ SD2_DAT3_A_MARK, TX4_A_MARK, VI0_R3_MARK, ET0_MAGIC_B_MARK,
+ SD2_CD_A_MARK, RX5_A_MARK, VI0_R4_MARK, ET0_PHY_INT_B_MARK,
+ SD2_WP_A_MARK, TX5_A_MARK, VI0_R5_MARK,
+ REF125CK_MARK, ADTRG_MARK, RX5_C_MARK,
+ REF50CK_MARK, CTS1_E_MARK, HCTS0_D_MARK,
+
+ /* IPSR6 */
+ DU0_DR0_MARK, SCIF_CLK_B_MARK, HRX0_D_MARK, IETX_A_MARK,
+ TCLKA_A_MARK, HIFD00_MARK,
+ DU0_DR1_MARK, SCK0_B_MARK, HTX0_D_MARK, IERX_A_MARK,
+ TCLKB_A_MARK, HIFD01_MARK,
+ DU0_DR2_MARK, RX0_B_MARK, TCLKC_A_MARK, HIFD02_MARK,
+ DU0_DR3_MARK, TX0_B_MARK, TCLKD_A_MARK, HIFD03_MARK,
+ DU0_DR4_MARK, CTS0_C_MARK, TIOC0A_A_MARK, HIFD04_MARK,
+ DU0_DR5_MARK, RTS0_C_MARK, TIOC0B_A_MARK, HIFD05_MARK,
+ DU0_DR6_MARK, SCK1_C_MARK, TIOC0C_A_MARK, HIFD06_MARK,
+ DU0_DR7_MARK, RX1_C_MARK, TIOC0D_A_MARK, HIFD07_MARK,
+ DU0_DG0_MARK, TX1_C_MARK, HSCK0_D_MARK, IECLK_A_MARK,
+ TIOC1A_A_MARK, HIFD08_MARK,
+ DU0_DG1_MARK, CTS1_C_MARK, HRTS0_D_MARK, TIOC1B_A_MARK,
+ HIFD09_MARK,
+
+ /* IPSR7 */
+ DU0_DG2_MARK, RTS1_C_MARK, RMII0_MDC_B_MARK, TIOC2A_A_MARK,
+ HIFD10_MARK,
+ DU0_DG3_MARK, SCK2_C_MARK, RMII0_MDIO_B_MARK, TIOC2B_A_MARK,
+ HIFD11_MARK,
+ DU0_DG4_MARK, RX2_C_MARK, RMII0_CRS_DV_B_MARK, TIOC3A_A_MARK,
+ HIFD12_MARK,
+ DU0_DG5_MARK, TX2_C_MARK, RMII0_RX_ER_B_MARK, TIOC3B_A_MARK,
+ HIFD13_MARK,
+ DU0_DG6_MARK, RX3_C_MARK, RMII0_RXD0_B_MARK, TIOC3C_A_MARK,
+ HIFD14_MARK,
+ DU0_DG7_MARK, TX3_C_MARK, RMII0_RXD1_B_MARK, TIOC3D_A_MARK,
+ HIFD15_MARK,
+ DU0_DB0_MARK, RX4_C_MARK, RMII0_TXD_EN_B_MARK, TIOC4A_A_MARK,
+ HIFCS_MARK,
+ DU0_DB1_MARK, TX4_C_MARK, RMII0_TXD0_B_MARK, TIOC4B_A_MARK,
+ HIFRS_MARK,
+ DU0_DB2_MARK, RX5_B_MARK, RMII0_TXD1_B_MARK, TIOC4C_A_MARK,
+ HIFWR_MARK,
+ DU0_DB3_MARK, TX5_B_MARK, TIOC4D_A_MARK, HIFRD_MARK,
+ DU0_DB4_MARK, HIFINT_MARK,
+
+ /* IPSR8 */
+ DU0_DB5_MARK, HIFDREQ_MARK,
+ DU0_DB6_MARK, HIFRDY_MARK,
+ DU0_DB7_MARK, SSI_SCK0_B_MARK, HIFEBL_B_MARK,
+ DU0_DOTCLKIN_MARK, HSPI_CS0_C_MARK, SSI_WS0_B_MARK,
+ DU0_DOTCLKOUT_MARK, HSPI_CLK0_C_MARK, SSI_SDATA0_B_MARK,
+ DU0_EXHSYNC_DU0_HSYNC_MARK, HSPI_TX0_C_MARK, SSI_SCK1_B_MARK,
+ DU0_EXVSYNC_DU0_VSYNC_MARK, HSPI_RX0_C_MARK, SSI_WS1_B_MARK,
+ DU0_EXODDF_DU0_ODDF_MARK, CAN0_RX_B_MARK, HSCK0_B_MARK,
+ SSI_SDATA1_B_MARK,
+ DU0_DISP_MARK, CAN0_TX_B_MARK, HRX0_B_MARK, AUDIO_CLKA_B_MARK,
+ DU0_CDE_MARK, HTX0_B_MARK, AUDIO_CLKB_B_MARK, LCD_VCPWC_B_MARK,
+ IRQ0_A_MARK, HSPI_TX_B_MARK, RX3_E_MARK, ET0_ERXD0_MARK,
+ IRQ1_A_MARK, HSPI_RX_B_MARK, TX3_E_MARK, ET0_ERXD1_MARK,
+ IRQ2_A_MARK, CTS0_A_MARK, HCTS0_B_MARK, ET0_ERXD2_A_MARK,
+ IRQ3_A_MARK, RTS0_A_MARK, HRTS0_B_MARK, ET0_ERXD3_A_MARK,
+
+ /* IPSR9 */
+ VI1_CLK_A_MARK, FD0_B_MARK, LCD_DATA0_B_MARK,
+ VI1_0_A_MARK, FD1_B_MARK, LCD_DATA1_B_MARK,
+ VI1_1_A_MARK, FD2_B_MARK, LCD_DATA2_B_MARK,
+ VI1_2_A_MARK, FD3_B_MARK, LCD_DATA3_B_MARK,
+ VI1_3_A_MARK, FD4_B_MARK, LCD_DATA4_B_MARK,
+ VI1_4_A_MARK, FD5_B_MARK, LCD_DATA5_B_MARK,
+ VI1_5_A_MARK, FD6_B_MARK, LCD_DATA6_B_MARK,
+ VI1_6_A_MARK, FD7_B_MARK, LCD_DATA7_B_MARK,
+ VI1_7_A_MARK, FCE_B_MARK, LCD_DATA8_B_MARK,
+ SSI_SCK0_A_MARK, TIOC1A_B_MARK, LCD_DATA9_B_MARK,
+ SSI_WS0_A_MARK, TIOC1B_B_MARK, LCD_DATA10_B_MARK,
+ SSI_SDATA0_A_MARK, VI1_0_B_MARK, TIOC2A_B_MARK, LCD_DATA11_B_MARK,
+ SSI_SCK1_A_MARK, VI1_1_B_MARK, TIOC2B_B_MARK, LCD_DATA12_B_MARK,
+ SSI_WS1_A_MARK, VI1_2_B_MARK, LCD_DATA13_B_MARK,
+ SSI_SDATA1_A_MARK, VI1_3_B_MARK, LCD_DATA14_B_MARK,
+
+ /* IPSR10 */
+ SSI_SCK23_MARK, VI1_4_B_MARK, RX1_D_MARK, FCLE_B_MARK,
+ LCD_DATA15_B_MARK,
+ SSI_WS23_MARK, VI1_5_B_MARK, TX1_D_MARK, HSCK0_C_MARK,
+ FALE_B_MARK, LCD_DON_B_MARK,
+ SSI_SDATA2_MARK, VI1_6_B_MARK, HRX0_C_MARK, FRE_B_MARK,
+ LCD_CL1_B_MARK,
+ SSI_SDATA3_MARK, VI1_7_B_MARK, HTX0_C_MARK, FWE_B_MARK,
+ LCD_CL2_B_MARK,
+ AUDIO_CLKA_A_MARK, VI1_CLK_B_MARK, SCK1_D_MARK, IECLK_B_MARK,
+ LCD_FLM_B_MARK,
+ AUDIO_CLKB_A_MARK, LCD_CLK_B_MARK,
+ AUDIO_CLKC_MARK, SCK1_E_MARK, HCTS0_C_MARK, FRB_B_MARK,
+ LCD_VEPWC_B_MARK,
+ AUDIO_CLKOUT_MARK, TX1_E_MARK, HRTS0_C_MARK, FSE_B_MARK,
+ LCD_M_DISP_B_MARK,
+ CAN_CLK_A_MARK, RX4_D_MARK,
+ CAN0_TX_A_MARK, TX4_D_MARK, MLB_CLK_MARK,
+ CAN1_RX_A_MARK, IRQ1_B_MARK,
+ CAN0_RX_A_MARK, IRQ0_B_MARK, MLB_SIG_MARK,
+ CAN1_TX_A_MARK, TX5_C_MARK, MLB_DAT_MARK,
+
+ /* IPSR11 */
+ SCL1_MARK, SCIF_CLK_C_MARK,
+ SDA1_MARK, RX1_E_MARK,
+ SDA0_MARK, HIFEBL_A_MARK,
+ SDSELF_MARK, RTS1_E_MARK,
+ SCIF_CLK_A_MARK, HSPI_CLK_A_MARK, VI0_CLK_MARK, RMII0_TXD0_A_MARK,
+ ET0_ERXD4_MARK,
+ SCK0_A_MARK, HSPI_CS_A_MARK, VI0_CLKENB_MARK, RMII0_TXD1_A_MARK,
+ ET0_ERXD5_MARK,
+ RX0_A_MARK, HSPI_RX_A_MARK, RMII0_RXD0_A_MARK, ET0_ERXD6_MARK,
+ TX0_A_MARK, HSPI_TX_A_MARK,
+ PENC1_MARK, TX3_D_MARK, CAN1_TX_B_MARK, TX5_D_MARK,
+ IETX_B_MARK,
+ USB_OVC1_MARK, RX3_D_MARK, CAN1_RX_B_MARK, RX5_D_MARK,
+ IERX_B_MARK,
+ DREQ0_MARK, SD1_CLK_A_MARK, ET0_TX_EN_MARK,
+ DACK0_MARK, SD1_DAT3_A_MARK, ET0_TX_ER_MARK,
+ DREQ1_MARK, HSPI_CLK_B_MARK, RX4_B_MARK, ET0_PHY_INT_C_MARK,
+ ET0_TX_CLK_A_MARK,
+ DACK1_MARK, HSPI_CS_B_MARK, TX4_B_MARK, ET0_RX_CLK_A_MARK,
+ PRESETOUT_MARK, ST_CLKOUT_MARK,
+
+ PINMUX_MARK_END,
+};
+
+static pinmux_enum_t pinmux_data[] = {
+ PINMUX_DATA_GP_ALL(), /* PINMUX_DATA(GP_M_N_DATA, GP_M_N_FN...), */
+
+ PINMUX_DATA(CLKOUT_MARK, FN_CLKOUT),
+ PINMUX_DATA(BS_MARK, FN_BS), PINMUX_DATA(CS0_MARK, FN_CS0),
+ PINMUX_DATA(EX_CS0_MARK, FN_EX_CS0),
+ PINMUX_DATA(RD_MARK, FN_RD), PINMUX_DATA(WE0_MARK, FN_WE0),
+ PINMUX_DATA(WE1_MARK, FN_WE1),
+ PINMUX_DATA(SCL0_MARK, FN_SCL0), PINMUX_DATA(PENC0_MARK, FN_PENC0),
+ PINMUX_DATA(USB_OVC0_MARK, FN_USB_OVC0),
+ PINMUX_DATA(IRQ2_B_MARK, FN_IRQ2_B),
+ PINMUX_DATA(IRQ3_B_MARK, FN_IRQ3_B),
+
+ /* IPSR0 */
+ PINMUX_IPSR_DATA(IP0_1_0, A0),
+ PINMUX_IPSR_DATA(IP0_1_0, ST0_CLKIN),
+ PINMUX_IPSR_MODSEL_DATA(IP0_1_0, LCD_DATA0_A, SEL_LCDC_0),
+ PINMUX_IPSR_MODSEL_DATA(IP0_1_0, TCLKA_C, SEL_MTU2_CLK_1),
+
+ PINMUX_IPSR_DATA(IP0_3_2, A1),
+ PINMUX_IPSR_DATA(IP0_3_2, ST0_REQ),
+ PINMUX_IPSR_MODSEL_DATA(IP0_3_2, LCD_DATA1_A, SEL_LCDC_0),
+ PINMUX_IPSR_MODSEL_DATA(IP0_3_2, TCLKB_C, SEL_MTU2_CLK_1),
+
+ PINMUX_IPSR_DATA(IP0_5_4, A2),
+ PINMUX_IPSR_DATA(IP0_5_4, ST0_SYC),
+ PINMUX_IPSR_MODSEL_DATA(IP0_5_4, LCD_DATA2_A, SEL_LCDC_0),
+ PINMUX_IPSR_MODSEL_DATA(IP0_5_4, TCLKC_C, SEL_MTU2_CLK_1),
+
+ PINMUX_IPSR_DATA(IP0_7_6, A3),
+ PINMUX_IPSR_DATA(IP0_7_6, ST0_VLD),
+ PINMUX_IPSR_MODSEL_DATA(IP0_7_6, LCD_DATA3_A, SEL_LCDC_0),
+ PINMUX_IPSR_MODSEL_DATA(IP0_7_6, TCLKD_C, SEL_MTU2_CLK_1),
+
+ PINMUX_IPSR_DATA(IP0_9_8, A4),
+ PINMUX_IPSR_DATA(IP0_9_8, ST0_D0),
+ PINMUX_IPSR_MODSEL_DATA(IP0_9_8, LCD_DATA4_A, SEL_LCDC_0),
+ PINMUX_IPSR_MODSEL_DATA(IP0_9_8, TIOC0A_C, SEL_MTU2_CH0_1),
+
+ PINMUX_IPSR_DATA(IP0_11_10, A5),
+ PINMUX_IPSR_DATA(IP0_11_10, ST0_D1),
+ PINMUX_IPSR_MODSEL_DATA(IP0_11_10, LCD_DATA5_A, SEL_LCDC_0),
+ PINMUX_IPSR_MODSEL_DATA(IP0_11_10, TIOC0B_C, SEL_MTU2_CH0_1),
+
+ PINMUX_IPSR_DATA(IP0_13_12, A6),
+ PINMUX_IPSR_DATA(IP0_13_12, ST0_D2),
+ PINMUX_IPSR_MODSEL_DATA(IP0_13_12, LCD_DATA6_A, SEL_LCDC_0),
+ PINMUX_IPSR_MODSEL_DATA(IP0_13_12, TIOC0C_C, SEL_MTU2_CH0_1),
+
+ PINMUX_IPSR_DATA(IP0_15_14, A7),
+ PINMUX_IPSR_DATA(IP0_15_14, ST0_D3),
+ PINMUX_IPSR_MODSEL_DATA(IP0_15_14, LCD_DATA7_A, SEL_LCDC_0),
+ PINMUX_IPSR_MODSEL_DATA(IP0_15_14, TIOC0D_C, SEL_MTU2_CH0_1),
+
+ PINMUX_IPSR_DATA(IP0_17_16, A8),
+ PINMUX_IPSR_DATA(IP0_17_16, ST0_D4),
+ PINMUX_IPSR_MODSEL_DATA(IP0_17_16, LCD_DATA8_A, SEL_LCDC_0),
+ PINMUX_IPSR_MODSEL_DATA(IP0_17_16, TIOC1A_C, SEL_MTU2_CH1_2),
+
+ PINMUX_IPSR_DATA(IP0_19_18, A9),
+ PINMUX_IPSR_DATA(IP0_19_18, ST0_D5),
+ PINMUX_IPSR_MODSEL_DATA(IP0_19_18, LCD_DATA9_A, SEL_LCDC_0),
+ PINMUX_IPSR_MODSEL_DATA(IP0_19_18, TIOC1B_C, SEL_MTU2_CH1_2),
+
+ PINMUX_IPSR_DATA(IP0_21_20, A10),
+ PINMUX_IPSR_DATA(IP0_21_20, ST0_D6),
+ PINMUX_IPSR_MODSEL_DATA(IP0_21_20, LCD_DATA10_A, SEL_LCDC_0),
+ PINMUX_IPSR_MODSEL_DATA(IP0_21_20, TIOC2A_C, SEL_MTU2_CH2_2),
+
+ PINMUX_IPSR_DATA(IP0_23_22, A11),
+ PINMUX_IPSR_DATA(IP0_23_22, ST0_D7),
+ PINMUX_IPSR_MODSEL_DATA(IP0_23_22, LCD_DATA11_A, SEL_LCDC_0),
+ PINMUX_IPSR_MODSEL_DATA(IP0_23_22, TIOC2B_C, SEL_MTU2_CH2_2),
+
+ PINMUX_IPSR_DATA(IP0_25_24, A12),
+ PINMUX_IPSR_MODSEL_DATA(IP0_25_24, LCD_DATA12_A, SEL_LCDC_0),
+ PINMUX_IPSR_MODSEL_DATA(IP0_25_24, TIOC3A_C, SEL_MTU2_CH3_1),
+
+ PINMUX_IPSR_DATA(IP0_27_26, A13),
+ PINMUX_IPSR_MODSEL_DATA(IP0_27_26, LCD_DATA13_A, SEL_LCDC_0),
+ PINMUX_IPSR_MODSEL_DATA(IP0_27_26, TIOC3B_C, SEL_MTU2_CH3_1),
+
+ PINMUX_IPSR_DATA(IP0_29_28, A14),
+ PINMUX_IPSR_MODSEL_DATA(IP0_29_28, LCD_DATA14_A, SEL_LCDC_0),
+ PINMUX_IPSR_MODSEL_DATA(IP0_29_28, TIOC3C_C, SEL_MTU2_CH3_1),
+
+ PINMUX_IPSR_DATA(IP0_31_30, A15),
+ PINMUX_IPSR_DATA(IP0_31_30, ST0_VCO_CLKIN),
+ PINMUX_IPSR_MODSEL_DATA(IP0_31_30, LCD_DATA15_A, SEL_LCDC_0),
+ PINMUX_IPSR_MODSEL_DATA(IP0_31_30, TIOC3D_C, SEL_MTU2_CH3_1),
+
+
+ /* IPSR1 */
+ PINMUX_IPSR_DATA(IP1_1_0, A16),
+ PINMUX_IPSR_DATA(IP1_1_0, ST0_PWM),
+ PINMUX_IPSR_MODSEL_DATA(IP1_1_0, LCD_DON_A, SEL_LCDC_0),
+ PINMUX_IPSR_MODSEL_DATA(IP1_1_0, TIOC4A_C, SEL_MTU2_CH4_1),
+
+ PINMUX_IPSR_DATA(IP1_3_2, A17),
+ PINMUX_IPSR_DATA(IP1_3_2, ST1_VCO_CLKIN),
+ PINMUX_IPSR_MODSEL_DATA(IP1_3_2, LCD_CL1_A, SEL_LCDC_0),
+ PINMUX_IPSR_MODSEL_DATA(IP1_3_2, TIOC4B_C, SEL_MTU2_CH4_1),
+
+ PINMUX_IPSR_DATA(IP1_5_4, A18),
+ PINMUX_IPSR_DATA(IP1_5_4, ST1_PWM),
+ PINMUX_IPSR_MODSEL_DATA(IP1_5_4, LCD_CL2_A, SEL_LCDC_0),
+ PINMUX_IPSR_MODSEL_DATA(IP1_5_4, TIOC4C_C, SEL_MTU2_CH4_1),
+
+ PINMUX_IPSR_DATA(IP1_7_6, A19),
+ PINMUX_IPSR_DATA(IP1_7_6, ST1_CLKIN),
+ PINMUX_IPSR_MODSEL_DATA(IP1_7_6, LCD_CLK_A, SEL_LCDC_0),
+ PINMUX_IPSR_MODSEL_DATA(IP1_7_6, TIOC4D_C, SEL_MTU2_CH4_1),
+
+ PINMUX_IPSR_DATA(IP1_9_8, A20),
+ PINMUX_IPSR_DATA(IP1_9_8, ST1_REQ),
+ PINMUX_IPSR_MODSEL_DATA(IP1_9_8, LCD_FLM_A, SEL_LCDC_0),
+
+ PINMUX_IPSR_DATA(IP1_11_10, A21),
+ PINMUX_IPSR_DATA(IP1_11_10, ST1_SYC),
+ PINMUX_IPSR_MODSEL_DATA(IP1_11_10, LCD_VCPWC_A, SEL_LCDC_0),
+
+ PINMUX_IPSR_DATA(IP1_13_12, A22),
+ PINMUX_IPSR_DATA(IP1_13_12, ST1_VLD),
+ PINMUX_IPSR_MODSEL_DATA(IP1_13_12, LCD_VEPWC_A, SEL_LCDC_0),
+
+ PINMUX_IPSR_DATA(IP1_15_14, A23),
+ PINMUX_IPSR_DATA(IP1_15_14, ST1_D0),
+ PINMUX_IPSR_MODSEL_DATA(IP1_15_14, LCD_M_DISP_A, SEL_LCDC_0),
+
+ PINMUX_IPSR_DATA(IP1_17_16, A24),
+ PINMUX_IPSR_MODSEL_DATA(IP1_17_16, RX2_D, SEL_SCIF2_3),
+ PINMUX_IPSR_DATA(IP1_17_16, ST1_D1),
+
+ PINMUX_IPSR_DATA(IP1_19_18, A25),
+ PINMUX_IPSR_MODSEL_DATA(IP1_17_16, RX2_D, SEL_SCIF2_3),
+ PINMUX_IPSR_DATA(IP1_17_16, ST1_D2),
+
+ PINMUX_IPSR_DATA(IP1_22_20, D0),
+ PINMUX_IPSR_MODSEL_DATA(IP1_22_20, SD0_DAT0_A, SEL_SDHI0_0),
+ PINMUX_IPSR_MODSEL_DATA(IP1_22_20, MMC_D0_A, SEL_MMC_0),
+ PINMUX_IPSR_DATA(IP1_22_20, ST1_D3),
+ PINMUX_IPSR_MODSEL_DATA(IP1_22_20, FD0_A, SEL_FLCTL_0),
+
+ PINMUX_IPSR_DATA(IP1_25_23, D1),
+ PINMUX_IPSR_MODSEL_DATA(IP1_25_23, SD0_DAT0_A, SEL_SDHI0_0),
+ PINMUX_IPSR_MODSEL_DATA(IP1_25_23, MMC_D1_A, SEL_MMC_0),
+ PINMUX_IPSR_DATA(IP1_25_23, ST1_D4),
+ PINMUX_IPSR_MODSEL_DATA(IP1_25_23, FD1_A, SEL_FLCTL_0),
+
+ PINMUX_IPSR_DATA(IP1_28_26, D2),
+ PINMUX_IPSR_MODSEL_DATA(IP1_28_26, SD0_DAT0_A, SEL_SDHI0_0),
+ PINMUX_IPSR_MODSEL_DATA(IP1_28_26, MMC_D2_A, SEL_MMC_0),
+ PINMUX_IPSR_DATA(IP1_28_26, ST1_D5),
+ PINMUX_IPSR_MODSEL_DATA(IP1_28_26, FD2_A, SEL_FLCTL_0),
+
+ PINMUX_IPSR_DATA(IP1_31_29, D3),
+ PINMUX_IPSR_MODSEL_DATA(IP1_31_29, SD0_DAT0_A, SEL_SDHI0_0),
+ PINMUX_IPSR_MODSEL_DATA(IP1_31_29, MMC_D3_A, SEL_MMC_0),
+ PINMUX_IPSR_DATA(IP1_31_29, ST1_D6),
+ PINMUX_IPSR_MODSEL_DATA(IP1_31_29, FD3_A, SEL_FLCTL_0),
+
+ /* IPSR2 */
+ PINMUX_IPSR_DATA(IP2_2_0, D4),
+ PINMUX_IPSR_MODSEL_DATA(IP2_2_0, SD0_CD_A, SEL_SDHI0_0),
+ PINMUX_IPSR_MODSEL_DATA(IP2_2_0, MMC_D4_A, SEL_MMC_0),
+ PINMUX_IPSR_DATA(IP2_2_0, ST1_D7),
+ PINMUX_IPSR_MODSEL_DATA(IP2_2_0, FD4_A, SEL_FLCTL_0),
+
+ PINMUX_IPSR_DATA(IP2_4_3, D5),
+ PINMUX_IPSR_MODSEL_DATA(IP2_4_3, SD0_WP_A, SEL_SDHI0_0),
+ PINMUX_IPSR_MODSEL_DATA(IP2_4_3, MMC_D5_A, SEL_MMC_0),
+ PINMUX_IPSR_MODSEL_DATA(IP2_4_3, FD5_A, SEL_FLCTL_0),
+
+ PINMUX_IPSR_DATA(IP2_7_5, D6),
+ PINMUX_IPSR_MODSEL_DATA(IP2_7_5, RSPI_RSPCK_A, SEL_RSPI_0),
+ PINMUX_IPSR_MODSEL_DATA(IP2_7_5, MMC_D6_A, SEL_MMC_0),
+ PINMUX_IPSR_MODSEL_DATA(IP2_7_5, QSPCLK_A, SEL_RQSPI_0),
+ PINMUX_IPSR_MODSEL_DATA(IP2_7_5, FD6_A, SEL_FLCTL_0),
+
+ PINMUX_IPSR_DATA(IP2_10_8, D7),
+ PINMUX_IPSR_MODSEL_DATA(IP2_10_8, RSPI_SSL_A, SEL_RSPI_0),
+ PINMUX_IPSR_MODSEL_DATA(IP2_10_8, MMC_D7_A, SEL_MMC_0),
+ PINMUX_IPSR_MODSEL_DATA(IP2_10_8, QSSL_A, SEL_RQSPI_0),
+ PINMUX_IPSR_MODSEL_DATA(IP2_10_8, FD7_A, SEL_FLCTL_0),
+
+ PINMUX_IPSR_DATA(IP2_13_11, D8),
+ PINMUX_IPSR_MODSEL_DATA(IP2_13_11, SD0_CLK_A, SEL_SDHI0_0),
+ PINMUX_IPSR_MODSEL_DATA(IP2_13_11, MMC_CLK_A, SEL_MMC_0),
+ PINMUX_IPSR_MODSEL_DATA(IP2_13_11, QIO2_A, SEL_RQSPI_0),
+ PINMUX_IPSR_MODSEL_DATA(IP2_13_11, FCE_A, SEL_FLCTL_0),
+ PINMUX_IPSR_MODSEL_DATA(IP2_13_11, ET0_GTX_CLK_B, SEL_ET0_1),
+
+ PINMUX_IPSR_DATA(IP2_16_14, D9),
+ PINMUX_IPSR_MODSEL_DATA(IP2_16_14, SD0_CMD_A, SEL_SDHI0_0),
+ PINMUX_IPSR_MODSEL_DATA(IP2_16_14, MMC_CMD_A, SEL_MMC_0),
+ PINMUX_IPSR_MODSEL_DATA(IP2_16_14, QIO3_A, SEL_RQSPI_0),
+ PINMUX_IPSR_MODSEL_DATA(IP2_16_14, FCLE_A, SEL_FLCTL_0),
+ PINMUX_IPSR_MODSEL_DATA(IP2_16_14, ET0_ETXD1_B, SEL_ET0_1),
+
+ PINMUX_IPSR_DATA(IP2_19_17, D10),
+ PINMUX_IPSR_MODSEL_DATA(IP2_19_17, RSPI_MOSI_A, SEL_RSPI_0),
+ PINMUX_IPSR_MODSEL_DATA(IP2_19_17, QMO_QIO0_A, SEL_RQSPI_0),
+ PINMUX_IPSR_MODSEL_DATA(IP2_19_17, FALE_A, SEL_FLCTL_0),
+ PINMUX_IPSR_MODSEL_DATA(IP2_19_17, ET0_ETXD2_B, SEL_ET0_1),
+
+ PINMUX_IPSR_DATA(IP2_22_20, D11),
+ PINMUX_IPSR_MODSEL_DATA(IP2_22_20, RSPI_MISO_A, SEL_RSPI_0),
+ PINMUX_IPSR_MODSEL_DATA(IP2_22_20, QMI_QIO1_A, SEL_RQSPI_0),
+ PINMUX_IPSR_MODSEL_DATA(IP2_22_20, FRE_A, SEL_FLCTL_0),
+
+ PINMUX_IPSR_DATA(IP2_24_23, D12),
+ PINMUX_IPSR_MODSEL_DATA(IP2_24_23, FWE_A, SEL_FLCTL_0),
+ PINMUX_IPSR_MODSEL_DATA(IP2_24_23, ET0_ETXD5_B, SEL_ET0_1),
+
+ PINMUX_IPSR_DATA(IP2_27_25, D13),
+ PINMUX_IPSR_MODSEL_DATA(IP2_27_25, RX2_B, SEL_SCIF2_1),
+ PINMUX_IPSR_MODSEL_DATA(IP2_27_25, FRB_A, SEL_FLCTL_0),
+ PINMUX_IPSR_MODSEL_DATA(IP2_27_25, ET0_ETXD6_B, SEL_ET0_1),
+
+ PINMUX_IPSR_DATA(IP2_30_28, D14),
+ PINMUX_IPSR_MODSEL_DATA(IP2_30_28, TX2_B, SEL_SCIF2_1),
+ PINMUX_IPSR_MODSEL_DATA(IP2_30_28, FSE_A, SEL_FLCTL_0),
+ PINMUX_IPSR_MODSEL_DATA(IP2_30_28, ET0_TX_CLK_B, SEL_ET0_1),
+
+ /* IPSR3 */
+ PINMUX_IPSR_DATA(IP3_1_0, D15),
+ PINMUX_IPSR_MODSEL_DATA(IP3_1_0, SCK2_B, SEL_SCIF2_1),
+
+ PINMUX_IPSR_DATA(IP3_2, CS1_A26),
+ PINMUX_IPSR_MODSEL_DATA(IP3_2, QIO3_B, SEL_RQSPI_1),
+
+ PINMUX_IPSR_DATA(IP3_5_3, EX_CS1),
+ PINMUX_IPSR_MODSEL_DATA(IP3_5_3, RX3_B, SEL_SCIF2_1),
+ PINMUX_IPSR_DATA(IP3_5_3, ATACS0),
+ PINMUX_IPSR_MODSEL_DATA(IP3_5_3, QIO2_B, SEL_RQSPI_1),
+ PINMUX_IPSR_DATA(IP3_5_3, ET0_ETXD0),
+
+ PINMUX_IPSR_DATA(IP3_8_6, EX_CS2),
+ PINMUX_IPSR_MODSEL_DATA(IP3_8_6, TX3_B, SEL_SCIF3_1),
+ PINMUX_IPSR_DATA(IP3_8_6, ATACS1),
+ PINMUX_IPSR_MODSEL_DATA(IP3_8_6, QSPCLK_B, SEL_RQSPI_1),
+ PINMUX_IPSR_MODSEL_DATA(IP3_8_6, ET0_GTX_CLK_A, SEL_ET0_0),
+
+ PINMUX_IPSR_DATA(IP3_11_9, EX_CS3),
+ PINMUX_IPSR_MODSEL_DATA(IP3_11_9, SD1_CD_A, SEL_SDHI1_0),
+ PINMUX_IPSR_DATA(IP3_11_9, ATARD),
+ PINMUX_IPSR_MODSEL_DATA(IP3_11_9, QMO_QIO0_B, SEL_RQSPI_1),
+ PINMUX_IPSR_MODSEL_DATA(IP3_11_9, ET0_ETXD1_A, SEL_ET0_0),
+
+ PINMUX_IPSR_DATA(IP3_14_12, EX_CS4),
+ PINMUX_IPSR_MODSEL_DATA(IP3_14_12, SD1_WP_A, SEL_SDHI1_0),
+ PINMUX_IPSR_DATA(IP3_14_12, ATAWR),
+ PINMUX_IPSR_MODSEL_DATA(IP3_14_12, QMI_QIO1_B, SEL_RQSPI_1),
+ PINMUX_IPSR_MODSEL_DATA(IP3_14_12, ET0_ETXD2_A, SEL_ET0_0),
+
+ PINMUX_IPSR_DATA(IP3_17_15, EX_CS5),
+ PINMUX_IPSR_MODSEL_DATA(IP3_17_15, SD1_CMD_A, SEL_SDHI1_0),
+ PINMUX_IPSR_DATA(IP3_17_15, ATADIR),
+ PINMUX_IPSR_MODSEL_DATA(IP3_17_15, QSSL_B, SEL_RQSPI_1),
+ PINMUX_IPSR_MODSEL_DATA(IP3_17_15, ET0_ETXD3_A, SEL_ET0_0),
+
+ PINMUX_IPSR_DATA(IP3_19_18, RD_WR),
+ PINMUX_IPSR_DATA(IP3_19_18, TCLK0),
+ PINMUX_IPSR_MODSEL_DATA(IP3_19_18, CAN_CLK_B, SEL_RCAN_CLK_1),
+ PINMUX_IPSR_DATA(IP3_19_18, ET0_ETXD4),
+
+ PINMUX_IPSR_DATA(IP3_20, EX_WAIT0),
+ PINMUX_IPSR_MODSEL_DATA(IP3_20, TCLK1_B, SEL_TMU_1),
+
+ PINMUX_IPSR_DATA(IP3_23_21, EX_WAIT1),
+ PINMUX_IPSR_MODSEL_DATA(IP3_23_21, SD1_DAT0_A, SEL_SDHI1_0),
+ PINMUX_IPSR_DATA(IP3_23_21, DREQ2),
+ PINMUX_IPSR_MODSEL_DATA(IP3_23_21, CAN1_TX_C, SEL_RCAN1_2),
+ PINMUX_IPSR_MODSEL_DATA(IP3_23_21, ET0_LINK_C, SEL_ET0_CTL_2),
+ PINMUX_IPSR_MODSEL_DATA(IP3_23_21, ET0_ETXD5_A, SEL_ET0_0),
+
+ PINMUX_IPSR_DATA(IP3_26_24, EX_WAIT2),
+ PINMUX_IPSR_MODSEL_DATA(IP3_26_24, SD1_DAT1_A, SEL_SDHI1_0),
+ PINMUX_IPSR_DATA(IP3_26_24, DACK2),
+ PINMUX_IPSR_MODSEL_DATA(IP3_26_24, CAN1_RX_C, SEL_RCAN1_2),
+ PINMUX_IPSR_MODSEL_DATA(IP3_26_24, ET0_MAGIC_C, SEL_ET0_CTL_2),
+ PINMUX_IPSR_MODSEL_DATA(IP3_26_24, ET0_ETXD6_A, SEL_ET0_0),
+
+ PINMUX_IPSR_DATA(IP3_29_27, DRACK0),
+ PINMUX_IPSR_MODSEL_DATA(IP3_29_27, SD1_DAT2_A, SEL_SDHI1_0),
+ PINMUX_IPSR_DATA(IP3_29_27, ATAG),
+ PINMUX_IPSR_MODSEL_DATA(IP3_29_27, TCLK1_A, SEL_TMU_0),
+ PINMUX_IPSR_DATA(IP3_29_27, ET0_ETXD7),
+
+ /* IPSR4 */
+ PINMUX_IPSR_MODSEL_DATA(IP4_2_0, HCTS0_A, SEL_HSCIF_0),
+ PINMUX_IPSR_MODSEL_DATA(IP4_2_0, CTS1_A, SEL_SCIF1_0),
+ PINMUX_IPSR_DATA(IP4_2_0, VI0_FIELD),
+ PINMUX_IPSR_MODSEL_DATA(IP4_2_0, RMII0_RXD1_A, SEL_RMII_0),
+ PINMUX_IPSR_DATA(IP4_2_0, ET0_ERXD7),
+
+ PINMUX_IPSR_MODSEL_DATA(IP4_5_3, HRTS0_A, SEL_HSCIF_0),
+ PINMUX_IPSR_MODSEL_DATA(IP4_5_3, RTS1_A, SEL_SCIF1_0),
+ PINMUX_IPSR_DATA(IP4_5_3, VI0_HSYNC),
+ PINMUX_IPSR_MODSEL_DATA(IP4_5_3, RMII0_TXD_EN_A, SEL_RMII_0),
+ PINMUX_IPSR_DATA(IP4_5_3, ET0_RX_DV),
+
+ PINMUX_IPSR_MODSEL_DATA(IP4_8_6, HSCK0_A, SEL_HSCIF_0),
+ PINMUX_IPSR_MODSEL_DATA(IP4_8_6, SCK1_A, SEL_SCIF1_0),
+ PINMUX_IPSR_DATA(IP4_8_6, VI0_VSYNC),
+ PINMUX_IPSR_MODSEL_DATA(IP4_8_6, RMII0_RX_ER_A, SEL_RMII_0),
+ PINMUX_IPSR_DATA(IP4_8_6, ET0_RX_ER),
+
+ PINMUX_IPSR_MODSEL_DATA(IP4_11_9, HRX0_A, SEL_HSCIF_0),
+ PINMUX_IPSR_MODSEL_DATA(IP4_11_9, RX1_A, SEL_SCIF1_0),
+ PINMUX_IPSR_DATA(IP4_11_9, VI0_DATA0_VI0_B0),
+ PINMUX_IPSR_MODSEL_DATA(IP4_11_9, RMII0_CRS_DV_A, SEL_RMII_0),
+ PINMUX_IPSR_DATA(IP4_11_9, ET0_CRS),
+
+ PINMUX_IPSR_MODSEL_DATA(IP4_14_12, HTX0_A, SEL_HSCIF_0),
+ PINMUX_IPSR_MODSEL_DATA(IP4_14_12, TX1_A, SEL_SCIF1_0),
+ PINMUX_IPSR_DATA(IP4_14_12, VI0_DATA1_VI0_B1),
+ PINMUX_IPSR_MODSEL_DATA(IP4_14_12, RMII0_MDC_A, SEL_RMII_0),
+ PINMUX_IPSR_DATA(IP4_14_12, ET0_COL),
+
+ PINMUX_IPSR_MODSEL_DATA(IP4_17_15, CTS0_B, SEL_SCIF0_1),
+ PINMUX_IPSR_DATA(IP4_17_15, VI0_DATA2_VI0_B2),
+ PINMUX_IPSR_MODSEL_DATA(IP4_17_15, RMII0_MDIO_A, SEL_RMII_0),
+ PINMUX_IPSR_DATA(IP4_17_15, ET0_MDC),
+
+ PINMUX_IPSR_MODSEL_DATA(IP4_19_18, RTS0_B, SEL_SCIF0_1),
+ PINMUX_IPSR_DATA(IP4_19_18, VI0_DATA3_VI0_B3),
+ PINMUX_IPSR_MODSEL_DATA(IP4_19_18, ET0_MDIO_A, SEL_ET0_0),
+
+ PINMUX_IPSR_MODSEL_DATA(IP4_21_20, SCK1_B, SEL_SCIF1_1),
+ PINMUX_IPSR_DATA(IP4_21_20, VI0_DATA4_VI0_B4),
+ PINMUX_IPSR_MODSEL_DATA(IP4_21_20, ET0_LINK_A, SEL_ET0_CTL_0),
+
+ PINMUX_IPSR_MODSEL_DATA(IP4_23_22, RX1_B, SEL_SCIF1_1),
+ PINMUX_IPSR_DATA(IP4_23_22, VI0_DATA5_VI0_B5),
+ PINMUX_IPSR_MODSEL_DATA(IP4_23_22, ET0_MAGIC_A, SEL_ET0_CTL_0),
+
+ PINMUX_IPSR_MODSEL_DATA(IP4_25_24, TX1_B, SEL_SCIF1_1),
+ PINMUX_IPSR_DATA(IP4_25_24, VI0_DATA6_VI0_G0),
+ PINMUX_IPSR_MODSEL_DATA(IP4_25_24, ET0_PHY_INT_A, SEL_ET0_CTL_0),
+
+ PINMUX_IPSR_MODSEL_DATA(IP4_27_26, CTS1_B, SEL_SCIF1_1),
+ PINMUX_IPSR_DATA(IP4_27_26, VI0_DATA7_VI0_G1),
+
+ PINMUX_IPSR_MODSEL_DATA(IP4_29_28, RTS1_B, SEL_SCIF1_1),
+ PINMUX_IPSR_DATA(IP4_29_28, VI0_G2),
+
+ PINMUX_IPSR_MODSEL_DATA(IP4_31_30, SCK2_A, SEL_SCIF2_0),
+ PINMUX_IPSR_DATA(IP4_31_30, VI0_G3),
+
+ /* IPSR5 */
+ PINMUX_IPSR_MODSEL_DATA(IP5_2_0, SD2_CLK_A, SEL_SDHI2_0),
+ PINMUX_IPSR_MODSEL_DATA(IP5_2_0, RX2_A, SEL_SCIF2_0),
+ PINMUX_IPSR_DATA(IP5_2_0, VI0_G4),
+ PINMUX_IPSR_MODSEL_DATA(IP5_2_0, ET0_RX_CLK_B, SEL_ET0_1),
+
+ PINMUX_IPSR_MODSEL_DATA(IP5_5_3, SD2_CMD_A, SEL_SDHI2_0),
+ PINMUX_IPSR_MODSEL_DATA(IP5_5_3, TX2_A, SEL_SCIF2_0),
+ PINMUX_IPSR_DATA(IP5_5_3, VI0_G5),
+ PINMUX_IPSR_MODSEL_DATA(IP5_5_3, ET0_ERXD2_B, SEL_ET0_1),
+
+ PINMUX_IPSR_MODSEL_DATA(IP5_8_6, SD2_DAT0_A, SEL_SDHI2_0),
+ PINMUX_IPSR_MODSEL_DATA(IP5_8_6, RX3_A, SEL_SCIF3_0),
+ PINMUX_IPSR_DATA(IP4_8_6, VI0_R0),
+ PINMUX_IPSR_MODSEL_DATA(IP4_8_6, ET0_ERXD2_B, SEL_ET0_1),
+
+ PINMUX_IPSR_MODSEL_DATA(IP5_11_9, SD2_DAT1_A, SEL_SDHI2_0),
+ PINMUX_IPSR_MODSEL_DATA(IP5_11_9, TX3_A, SEL_SCIF3_0),
+ PINMUX_IPSR_DATA(IP5_11_9, VI0_R1),
+ PINMUX_IPSR_MODSEL_DATA(IP5_11_9, ET0_MDIO_B, SEL_ET0_1),
+
+ PINMUX_IPSR_MODSEL_DATA(IP5_14_12, SD2_DAT2_A, SEL_SDHI2_0),
+ PINMUX_IPSR_MODSEL_DATA(IP5_14_12, RX4_A, SEL_SCIF4_0),
+ PINMUX_IPSR_DATA(IP5_14_12, VI0_R2),
+ PINMUX_IPSR_MODSEL_DATA(IP5_14_12, ET0_LINK_B, SEL_ET0_CTL_1),
+
+ PINMUX_IPSR_MODSEL_DATA(IP5_17_15, SD2_DAT3_A, SEL_SDHI2_0),
+ PINMUX_IPSR_MODSEL_DATA(IP5_17_15, TX4_A, SEL_SCIF4_0),
+ PINMUX_IPSR_DATA(IP5_17_15, VI0_R3),
+ PINMUX_IPSR_MODSEL_DATA(IP5_17_15, ET0_MAGIC_B, SEL_ET0_CTL_1),
+
+ PINMUX_IPSR_MODSEL_DATA(IP5_20_18, SD2_CD_A, SEL_SDHI2_0),
+ PINMUX_IPSR_MODSEL_DATA(IP5_20_18, RX5_A, SEL_SCIF5_0),
+ PINMUX_IPSR_DATA(IP5_20_18, VI0_R4),
+ PINMUX_IPSR_MODSEL_DATA(IP5_20_18, ET0_PHY_INT_B, SEL_ET0_CTL_1),
+
+ PINMUX_IPSR_MODSEL_DATA(IP5_22_21, SD2_WP_A, SEL_SDHI2_0),
+ PINMUX_IPSR_MODSEL_DATA(IP5_22_21, TX5_A, SEL_SCIF5_0),
+ PINMUX_IPSR_DATA(IP5_22_21, VI0_R5),
+
+ PINMUX_IPSR_DATA(IP5_24_23, REF125CK),
+ PINMUX_IPSR_DATA(IP5_24_23, ADTRG),
+ PINMUX_IPSR_MODSEL_DATA(IP5_24_23, RX5_C, SEL_SCIF5_2),
+ PINMUX_IPSR_DATA(IP5_26_25, REF50CK),
+ PINMUX_IPSR_MODSEL_DATA(IP5_26_25, CTS1_E, SEL_SCIF1_3),
+ PINMUX_IPSR_MODSEL_DATA(IP5_26_25, HCTS0_D, SEL_HSCIF_3),
+
+ /* IPSR6 */
+ PINMUX_IPSR_DATA(IP6_2_0, DU0_DR0),
+ PINMUX_IPSR_MODSEL_DATA(IP6_2_0, SCIF_CLK_B, SEL_SCIF_CLK_1),
+ PINMUX_IPSR_MODSEL_DATA(IP6_2_0, HRX0_D, SEL_HSCIF_3),
+ PINMUX_IPSR_MODSEL_DATA(IP6_2_0, IETX_A, SEL_IEBUS_0),
+ PINMUX_IPSR_MODSEL_DATA(IP6_2_0, TCLKA_A, SEL_MTU2_CLK_0),
+ PINMUX_IPSR_DATA(IP6_2_0, HIFD00),
+
+ PINMUX_IPSR_DATA(IP6_5_3, DU0_DR1),
+ PINMUX_IPSR_MODSEL_DATA(IP6_5_3, SCK0_B, SEL_SCIF0_1),
+ PINMUX_IPSR_MODSEL_DATA(IP6_5_3, HTX0_D, SEL_HSCIF_3),
+ PINMUX_IPSR_MODSEL_DATA(IP6_5_3, IERX_A, SEL_IEBUS_0),
+ PINMUX_IPSR_MODSEL_DATA(IP6_5_3, TCLKB_A, SEL_MTU2_CLK_0),
+ PINMUX_IPSR_DATA(IP6_5_3, HIFD01),
+
+ PINMUX_IPSR_DATA(IP6_7_6, DU0_DR2),
+ PINMUX_IPSR_MODSEL_DATA(IP6_7_6, RX0_B, SEL_SCIF0_1),
+ PINMUX_IPSR_MODSEL_DATA(IP6_7_6, TCLKC_A, SEL_MTU2_CLK_0),
+ PINMUX_IPSR_DATA(IP6_7_6, HIFD02),
+
+ PINMUX_IPSR_DATA(IP6_9_8, DU0_DR3),
+ PINMUX_IPSR_MODSEL_DATA(IP6_9_8, TX0_B, SEL_SCIF0_1),
+ PINMUX_IPSR_MODSEL_DATA(IP6_9_8, TCLKD_A, SEL_MTU2_CLK_0),
+ PINMUX_IPSR_DATA(IP6_9_8, HIFD03),
+
+ PINMUX_IPSR_DATA(IP6_11_10, DU0_DR4),
+ PINMUX_IPSR_MODSEL_DATA(IP6_11_10, CTS0_C, SEL_SCIF0_2),
+ PINMUX_IPSR_MODSEL_DATA(IP6_11_10, TIOC0A_A, SEL_MTU2_CH0_0),
+ PINMUX_IPSR_DATA(IP6_11_10, HIFD04),
+
+ PINMUX_IPSR_DATA(IP6_13_12, DU0_DR5),
+ PINMUX_IPSR_MODSEL_DATA(IP6_13_12, RTS0_C, SEL_SCIF0_1),
+ PINMUX_IPSR_MODSEL_DATA(IP6_13_12, TIOC0B_A, SEL_MTU2_CH0_0),
+ PINMUX_IPSR_DATA(IP6_13_12, HIFD05),
+
+ PINMUX_IPSR_DATA(IP6_15_14, DU0_DR6),
+ PINMUX_IPSR_MODSEL_DATA(IP6_15_14, SCK1_C, SEL_SCIF1_2),
+ PINMUX_IPSR_MODSEL_DATA(IP6_15_14, TIOC0C_A, SEL_MTU2_CH0_0),
+ PINMUX_IPSR_DATA(IP6_15_14, HIFD06),
+
+ PINMUX_IPSR_DATA(IP6_17_16, DU0_DR7),
+ PINMUX_IPSR_MODSEL_DATA(IP6_17_16, RX1_C, SEL_SCIF1_2),
+ PINMUX_IPSR_MODSEL_DATA(IP6_17_16, TIOC0D_A, SEL_MTU2_CH0_0),
+ PINMUX_IPSR_DATA(IP6_17_16, HIFD07),
+
+ PINMUX_IPSR_DATA(IP6_20_18, DU0_DG0),
+ PINMUX_IPSR_MODSEL_DATA(IP6_20_18, TX1_C, SEL_SCIF1_2),
+ PINMUX_IPSR_MODSEL_DATA(IP6_20_18, HSCK0_D, SEL_HSCIF_3),
+ PINMUX_IPSR_MODSEL_DATA(IP6_20_18, IECLK_A, SEL_IEBUS_0),
+ PINMUX_IPSR_MODSEL_DATA(IP6_20_18, TIOC1A_A, SEL_MTU2_CH1_0),
+ PINMUX_IPSR_DATA(IP6_20_18, HIFD08),
+
+ PINMUX_IPSR_DATA(IP6_23_21, DU0_DG1),
+ PINMUX_IPSR_MODSEL_DATA(IP6_23_21, CTS1_C, SEL_SCIF1_2),
+ PINMUX_IPSR_MODSEL_DATA(IP6_23_21, HRTS0_D, SEL_HSCIF_3),
+ PINMUX_IPSR_MODSEL_DATA(IP6_23_21, TIOC1B_A, SEL_MTU2_CH1_0),
+ PINMUX_IPSR_DATA(IP6_23_21, HIFD09),
+
+ /* IPSR7 */
+ PINMUX_IPSR_DATA(IP7_2_0, DU0_DG2),
+ PINMUX_IPSR_MODSEL_DATA(IP7_2_0, RTS1_C, SEL_SCIF1_2),
+ PINMUX_IPSR_MODSEL_DATA(IP7_2_0, RMII0_MDC_B, SEL_RMII_1),
+ PINMUX_IPSR_MODSEL_DATA(IP7_2_0, TIOC2A_A, SEL_MTU2_CH2_0),
+ PINMUX_IPSR_DATA(IP7_2_0, HIFD10),
+
+ PINMUX_IPSR_DATA(IP7_5_3, DU0_DG3),
+ PINMUX_IPSR_MODSEL_DATA(IP7_5_3, SCK2_C, SEL_SCIF2_2),
+ PINMUX_IPSR_MODSEL_DATA(IP7_5_3, RMII0_MDIO_B, SEL_RMII_1),
+ PINMUX_IPSR_MODSEL_DATA(IP7_5_3, TIOC2B_A, SEL_MTU2_CH2_0),
+ PINMUX_IPSR_DATA(IP7_5_3, HIFD11),
+
+ PINMUX_IPSR_DATA(IP7_8_6, DU0_DG4),
+ PINMUX_IPSR_MODSEL_DATA(IP7_8_6, RX2_C, SEL_SCIF2_2),
+ PINMUX_IPSR_MODSEL_DATA(IP7_8_6, RMII0_CRS_DV_B, SEL_RMII_1),
+ PINMUX_IPSR_MODSEL_DATA(IP7_8_6, TIOC3A_A, SEL_MTU2_CH3_0),
+ PINMUX_IPSR_DATA(IP7_8_6, HIFD12),
+
+ PINMUX_IPSR_DATA(IP7_11_9, DU0_DG5),
+ PINMUX_IPSR_MODSEL_DATA(IP7_11_9, TX2_C, SEL_SCIF2_2),
+ PINMUX_IPSR_MODSEL_DATA(IP7_11_9, RMII0_RX_ER_B, SEL_RMII_1),
+ PINMUX_IPSR_MODSEL_DATA(IP7_11_9, TIOC3B_A, SEL_MTU2_CH3_0),
+ PINMUX_IPSR_DATA(IP7_11_9, HIFD13),
+
+ PINMUX_IPSR_DATA(IP7_14_12, DU0_DG6),
+ PINMUX_IPSR_MODSEL_DATA(IP7_14_12, RX3_C, SEL_SCIF3_2),
+ PINMUX_IPSR_MODSEL_DATA(IP7_14_12, RMII0_RXD0_B, SEL_RMII_1),
+ PINMUX_IPSR_MODSEL_DATA(IP7_14_12, TIOC3C_A, SEL_MTU2_CH3_0),
+ PINMUX_IPSR_DATA(IP7_14_12, HIFD14),
+
+ PINMUX_IPSR_DATA(IP7_17_15, DU0_DG7),
+ PINMUX_IPSR_MODSEL_DATA(IP7_17_15, TX3_C, SEL_SCIF3_2),
+ PINMUX_IPSR_MODSEL_DATA(IP7_17_15, RMII0_RXD1_B, SEL_RMII_1),
+ PINMUX_IPSR_MODSEL_DATA(IP7_17_15, TIOC3D_A, SEL_MTU2_CH3_0),
+ PINMUX_IPSR_DATA(IP7_17_15, HIFD15),
+
+ PINMUX_IPSR_DATA(IP7_20_18, DU0_DB0),
+ PINMUX_IPSR_MODSEL_DATA(IP7_20_18, RX4_C, SEL_SCIF4_2),
+ PINMUX_IPSR_MODSEL_DATA(IP7_20_18, RMII0_TXD_EN_B, SEL_RMII_1),
+ PINMUX_IPSR_MODSEL_DATA(IP7_20_18, TIOC4A_A, SEL_MTU2_CH4_0),
+ PINMUX_IPSR_DATA(IP7_20_18, HIFCS),
+
+ PINMUX_IPSR_DATA(IP7_23_21, DU0_DB1),
+ PINMUX_IPSR_MODSEL_DATA(IP7_23_21, TX4_C, SEL_SCIF4_2),
+ PINMUX_IPSR_MODSEL_DATA(IP7_23_21, RMII0_TXD0_B, SEL_RMII_1),
+ PINMUX_IPSR_MODSEL_DATA(IP7_23_21, TIOC4B_A, SEL_MTU2_CH4_0),
+ PINMUX_IPSR_DATA(IP7_23_21, HIFWR),
+
+ PINMUX_IPSR_DATA(IP7_26_24, DU0_DB2),
+ PINMUX_IPSR_MODSEL_DATA(IP7_26_24, RX5_B, SEL_SCIF5_1),
+ PINMUX_IPSR_MODSEL_DATA(IP7_26_24, RMII0_TXD1_B, SEL_RMII_1),
+ PINMUX_IPSR_MODSEL_DATA(IP7_26_24, TIOC4C_A, SEL_MTU2_CH4_0),
+
+ PINMUX_IPSR_DATA(IP7_28_27, DU0_DB3),
+ PINMUX_IPSR_MODSEL_DATA(IP7_28_27, TX5_B, SEL_SCIF5_1),
+ PINMUX_IPSR_MODSEL_DATA(IP7_28_27, TIOC4D_A, SEL_MTU2_CH4_0),
+ PINMUX_IPSR_DATA(IP7_28_27, HIFRD),
+
+ PINMUX_IPSR_DATA(IP7_30_29, DU0_DB4),
+ PINMUX_IPSR_DATA(IP7_30_29, HIFINT),
+
+ /* IPSR8 */
+ PINMUX_IPSR_DATA(IP8_1_0, DU0_DB5),
+ PINMUX_IPSR_DATA(IP8_1_0, HIFDREQ),
+
+ PINMUX_IPSR_DATA(IP8_3_2, DU0_DB6),
+ PINMUX_IPSR_DATA(IP8_3_2, HIFRDY),
+
+ PINMUX_IPSR_DATA(IP8_5_4, DU0_DB7),
+ PINMUX_IPSR_MODSEL_DATA(IP8_5_4, SSI_SCK0_B, SEL_SSI0_1),
+ PINMUX_IPSR_MODSEL_DATA(IP8_5_4, HIFEBL_B, SEL_HIF_1),
+
+ PINMUX_IPSR_DATA(IP8_7_6, DU0_DOTCLKIN),
+ PINMUX_IPSR_MODSEL_DATA(IP8_7_6, HSPI_CS0_C, SEL_HSPI_2),
+ PINMUX_IPSR_MODSEL_DATA(IP8_7_6, SSI_WS0_B, SEL_SSI0_1),
+
+ PINMUX_IPSR_DATA(IP8_9_8, DU0_DOTCLKOUT),
+ PINMUX_IPSR_MODSEL_DATA(IP8_9_8, HSPI_CLK0_C, SEL_HSPI_2),
+ PINMUX_IPSR_MODSEL_DATA(IP8_9_8, SSI_SDATA0_B, SEL_SSI0_1),
+
+ PINMUX_IPSR_DATA(IP8_11_10, DU0_EXHSYNC_DU0_HSYNC),
+ PINMUX_IPSR_MODSEL_DATA(IP8_11_10, HSPI_TX0_C, SEL_HSPI_2),
+ PINMUX_IPSR_MODSEL_DATA(IP8_11_10, SSI_SCK1_B, SEL_SSI1_1),
+
+ PINMUX_IPSR_DATA(IP8_13_12, DU0_EXVSYNC_DU0_VSYNC),
+ PINMUX_IPSR_MODSEL_DATA(IP8_13_12, HSPI_RX0_C, SEL_HSPI_2),
+ PINMUX_IPSR_MODSEL_DATA(IP8_13_12, SSI_WS1_B, SEL_SSI1_1),
+
+ PINMUX_IPSR_DATA(IP8_15_14, DU0_EXODDF_DU0_ODDF),
+ PINMUX_IPSR_MODSEL_DATA(IP8_15_14, CAN0_RX_B, SEL_RCAN0_1),
+ PINMUX_IPSR_MODSEL_DATA(IP8_15_14, HSCK0_B, SEL_HSCIF_1),
+ PINMUX_IPSR_MODSEL_DATA(IP8_15_14, SSI_SDATA1_B, SEL_SSI1_1),
+
+ PINMUX_IPSR_DATA(IP8_17_16, DU0_DISP),
+ PINMUX_IPSR_MODSEL_DATA(IP8_17_16, CAN0_TX_B, SEL_RCAN0_1),
+ PINMUX_IPSR_MODSEL_DATA(IP8_17_16, HRX0_B, SEL_HSCIF_1),
+ PINMUX_IPSR_MODSEL_DATA(IP8_17_16, AUDIO_CLKA_B, SEL_AUDIO_CLKA_1),
+
+ PINMUX_IPSR_DATA(IP8_19_18, DU0_CDE),
+ PINMUX_IPSR_MODSEL_DATA(IP8_19_18, HTX0_B, SEL_HSCIF_1),
+ PINMUX_IPSR_MODSEL_DATA(IP8_19_18, AUDIO_CLKB_B, SEL_AUDIO_CLKB_1),
+ PINMUX_IPSR_MODSEL_DATA(IP8_19_18, LCD_VCPWC_B, SEL_LCDC_1),
+
+ PINMUX_IPSR_MODSEL_DATA(IP8_22_20, IRQ0_A, SEL_INTC_0),
+ PINMUX_IPSR_MODSEL_DATA(IP8_22_20, HSPI_TX_B, SEL_HSPI_1),
+ PINMUX_IPSR_MODSEL_DATA(IP8_22_20, RX3_E, SEL_SCIF3_4),
+ PINMUX_IPSR_DATA(IP8_22_20, ET0_ERXD0),
+
+ PINMUX_IPSR_MODSEL_DATA(IP8_25_23, IRQ1_A, SEL_INTC_0),
+ PINMUX_IPSR_MODSEL_DATA(IP8_25_23, HSPI_RX_B, SEL_HSPI_1),
+ PINMUX_IPSR_MODSEL_DATA(IP8_25_23, TX3_E, SEL_SCIF3_4),
+ PINMUX_IPSR_DATA(IP8_25_23, ET0_ERXD1),
+
+ PINMUX_IPSR_MODSEL_DATA(IP8_27_26, IRQ2_A, SEL_INTC_0),
+ PINMUX_IPSR_MODSEL_DATA(IP8_27_26, CTS0_A, SEL_SCIF0_0),
+ PINMUX_IPSR_MODSEL_DATA(IP8_27_26, HCTS0_B, SEL_HSCIF_1),
+ PINMUX_IPSR_MODSEL_DATA(IP8_27_26, ET0_ERXD2_A, SEL_ET0_0),
+
+ PINMUX_IPSR_MODSEL_DATA(IP8_29_28, IRQ3_A, SEL_INTC_0),
+ PINMUX_IPSR_MODSEL_DATA(IP8_29_28, RTS0_A, SEL_SCIF0_0),
+ PINMUX_IPSR_MODSEL_DATA(IP8_29_28, HRTS0_B, SEL_HSCIF_1),
+ PINMUX_IPSR_MODSEL_DATA(IP8_29_28, ET0_ERXD3_A, SEL_ET0_0),
+
+ /* IPSR9 */
+ PINMUX_IPSR_MODSEL_DATA(IP9_1_0, VI1_CLK_A, SEL_VIN1_0),
+ PINMUX_IPSR_MODSEL_DATA(IP9_1_0, FD0_B, SEL_FLCTL_1),
+ PINMUX_IPSR_MODSEL_DATA(IP9_1_0, LCD_DATA0_B, SEL_LCDC_1),
+
+ PINMUX_IPSR_MODSEL_DATA(IP9_3_2, VI1_0_A, SEL_VIN1_0),
+ PINMUX_IPSR_MODSEL_DATA(IP9_3_2, FD1_B, SEL_FLCTL_1),
+ PINMUX_IPSR_MODSEL_DATA(IP9_3_2, LCD_DATA1_B, SEL_LCDC_1),
+
+ PINMUX_IPSR_MODSEL_DATA(IP9_5_4, VI1_1_A, SEL_VIN1_0),
+ PINMUX_IPSR_MODSEL_DATA(IP9_5_4, FD2_B, SEL_FLCTL_1),
+ PINMUX_IPSR_MODSEL_DATA(IP9_5_4, LCD_DATA2_B, SEL_LCDC_1),
+
+ PINMUX_IPSR_MODSEL_DATA(IP9_7_6, VI1_2_A, SEL_VIN1_0),
+ PINMUX_IPSR_MODSEL_DATA(IP9_7_6, FD3_B, SEL_FLCTL_1),
+ PINMUX_IPSR_MODSEL_DATA(IP9_7_6, LCD_DATA3_B, SEL_LCDC_1),
+
+ PINMUX_IPSR_MODSEL_DATA(IP9_9_8, VI1_3_A, SEL_VIN1_0),
+ PINMUX_IPSR_MODSEL_DATA(IP9_9_8, FD4_B, SEL_FLCTL_1),
+ PINMUX_IPSR_MODSEL_DATA(IP9_9_8, LCD_DATA4_B, SEL_LCDC_1),
+
+ PINMUX_IPSR_MODSEL_DATA(IP9_11_10, VI1_4_A, SEL_VIN1_0),
+ PINMUX_IPSR_MODSEL_DATA(IP9_11_10, FD5_B, SEL_FLCTL_1),
+ PINMUX_IPSR_MODSEL_DATA(IP9_11_10, LCD_DATA5_B, SEL_LCDC_1),
+
+ PINMUX_IPSR_MODSEL_DATA(IP9_13_12, VI1_5_A, SEL_VIN1_0),
+ PINMUX_IPSR_MODSEL_DATA(IP9_13_12, FD6_B, SEL_FLCTL_1),
+ PINMUX_IPSR_MODSEL_DATA(IP9_13_12, LCD_DATA6_B, SEL_LCDC_1),
+
+ PINMUX_IPSR_MODSEL_DATA(IP9_15_14, VI1_6_A, SEL_VIN1_0),
+ PINMUX_IPSR_MODSEL_DATA(IP9_15_14, FD7_B, SEL_FLCTL_1),
+ PINMUX_IPSR_MODSEL_DATA(IP9_15_14, LCD_DATA7_B, SEL_LCDC_1),
+
+ PINMUX_IPSR_MODSEL_DATA(IP9_17_16, VI1_7_A, SEL_VIN1_0),
+ PINMUX_IPSR_MODSEL_DATA(IP9_17_16, FCE_B, SEL_FLCTL_1),
+ PINMUX_IPSR_MODSEL_DATA(IP9_17_16, LCD_DATA8_B, SEL_LCDC_1),
+
+ PINMUX_IPSR_MODSEL_DATA(IP9_19_18, SSI_SCK0_A, SEL_SSI0_0),
+ PINMUX_IPSR_MODSEL_DATA(IP9_19_18, TIOC1A_B, SEL_MTU2_CH1_1),
+ PINMUX_IPSR_MODSEL_DATA(IP9_19_18, LCD_DATA9_B, SEL_LCDC_1),
+
+ PINMUX_IPSR_MODSEL_DATA(IP9_21_20, SSI_WS0_A, SEL_SSI0_0),
+ PINMUX_IPSR_MODSEL_DATA(IP9_21_20, TIOC1B_B, SEL_MTU2_CH1_1),
+ PINMUX_IPSR_MODSEL_DATA(IP9_21_20, LCD_DATA10_B, SEL_LCDC_1),
+
+ PINMUX_IPSR_MODSEL_DATA(IP9_23_22, SSI_SDATA0_A, SEL_SSI0_0),
+ PINMUX_IPSR_MODSEL_DATA(IP9_23_22, VI1_0_B, SEL_VIN1_1),
+ PINMUX_IPSR_MODSEL_DATA(IP9_23_22, TIOC2A_B, SEL_MTU2_CH2_1),
+ PINMUX_IPSR_MODSEL_DATA(IP9_23_22, LCD_DATA11_B, SEL_LCDC_1),
+
+ PINMUX_IPSR_MODSEL_DATA(IP9_25_24, SSI_SCK1_A, SEL_SSI1_0),
+ PINMUX_IPSR_MODSEL_DATA(IP9_25_24, VI1_1_B, SEL_VIN1_1),
+ PINMUX_IPSR_MODSEL_DATA(IP9_25_24, TIOC2B_B, SEL_MTU2_CH2_1),
+ PINMUX_IPSR_MODSEL_DATA(IP9_25_24, LCD_DATA12_B, SEL_LCDC_1),
+
+ PINMUX_IPSR_MODSEL_DATA(IP9_27_26, SSI_WS1_A, SEL_SSI1_0),
+ PINMUX_IPSR_MODSEL_DATA(IP9_27_26, VI1_2_B, SEL_VIN1_1),
+ PINMUX_IPSR_MODSEL_DATA(IP9_27_26, LCD_DATA13_B, SEL_LCDC_1),
+
+ PINMUX_IPSR_MODSEL_DATA(IP9_29_28, SSI_SDATA1_A, SEL_SSI1_0),
+ PINMUX_IPSR_MODSEL_DATA(IP9_29_28, VI1_3_B, SEL_VIN1_1),
+ PINMUX_IPSR_MODSEL_DATA(IP9_29_28, LCD_DATA14_B, SEL_LCDC_1),
+
+ /* IPSE10 */
+ PINMUX_IPSR_DATA(IP10_2_0, SSI_SCK23),
+ PINMUX_IPSR_MODSEL_DATA(IP10_2_0, VI1_4_B, SEL_VIN1_1),
+ PINMUX_IPSR_MODSEL_DATA(IP10_2_0, RX1_D, SEL_SCIF1_3),
+ PINMUX_IPSR_MODSEL_DATA(IP10_2_0, FCLE_B, SEL_FLCTL_1),
+ PINMUX_IPSR_MODSEL_DATA(IP10_2_0, LCD_DATA15_B, SEL_LCDC_1),
+
+ PINMUX_IPSR_DATA(IP10_5_3, SSI_WS23),
+ PINMUX_IPSR_MODSEL_DATA(IP10_5_3, VI1_5_B, SEL_VIN1_1),
+ PINMUX_IPSR_MODSEL_DATA(IP10_5_3, TX1_D, SEL_SCIF1_3),
+ PINMUX_IPSR_MODSEL_DATA(IP10_5_3, HSCK0_C, SEL_HSCIF_2),
+ PINMUX_IPSR_MODSEL_DATA(IP10_5_3, FALE_B, SEL_FLCTL_1),
+ PINMUX_IPSR_MODSEL_DATA(IP10_5_3, LCD_DON_B, SEL_LCDC_1),
+
+ PINMUX_IPSR_DATA(IP10_8_6, SSI_SDATA2),
+ PINMUX_IPSR_MODSEL_DATA(IP10_8_6, VI1_6_B, SEL_VIN1_1),
+ PINMUX_IPSR_MODSEL_DATA(IP10_8_6, HRX0_C, SEL_HSCIF_2),
+ PINMUX_IPSR_MODSEL_DATA(IP10_8_6, FRE_B, SEL_FLCTL_1),
+ PINMUX_IPSR_MODSEL_DATA(IP10_8_6, LCD_CL1_B, SEL_LCDC_1),
+
+ PINMUX_IPSR_DATA(IP10_11_9, SSI_SDATA3),
+ PINMUX_IPSR_MODSEL_DATA(IP10_11_9, VI1_7_B, SEL_VIN1_1),
+ PINMUX_IPSR_MODSEL_DATA(IP10_11_9, HTX0_C, SEL_HSCIF_2),
+ PINMUX_IPSR_MODSEL_DATA(IP10_11_9, FWE_B, SEL_FLCTL_1),
+ PINMUX_IPSR_MODSEL_DATA(IP10_11_9, LCD_CL2_B, SEL_LCDC_1),
+
+ PINMUX_IPSR_MODSEL_DATA(IP10_14_12, AUDIO_CLKA_A, SEL_AUDIO_CLKA_0),
+ PINMUX_IPSR_MODSEL_DATA(IP10_14_12, VI1_CLK_B, SEL_VIN1_1),
+ PINMUX_IPSR_MODSEL_DATA(IP10_14_12, SCK1_D, SEL_SCIF1_3),
+ PINMUX_IPSR_MODSEL_DATA(IP10_14_12, IECLK_B, SEL_IEBUS_1),
+ PINMUX_IPSR_MODSEL_DATA(IP10_14_12, LCD_FLM_B, SEL_LCDC_1),
+
+ PINMUX_IPSR_MODSEL_DATA(IP10_15, AUDIO_CLKB_A, SEL_AUDIO_CLKB_0),
+ PINMUX_IPSR_MODSEL_DATA(IP10_15, LCD_CLK_B, SEL_LCDC_1),
+
+ PINMUX_IPSR_DATA(IP10_18_16, AUDIO_CLKC),
+ PINMUX_IPSR_MODSEL_DATA(IP10_18_16, SCK1_E, SEL_SCIF1_4),
+ PINMUX_IPSR_MODSEL_DATA(IP10_18_16, HCTS0_C, SEL_HSCIF_2),
+ PINMUX_IPSR_MODSEL_DATA(IP10_18_16, FRB_B, SEL_FLCTL_1),
+ PINMUX_IPSR_MODSEL_DATA(IP10_18_16, LCD_VEPWC_B, SEL_LCDC_1),
+
+ PINMUX_IPSR_DATA(IP10_21_19, AUDIO_CLKOUT),
+ PINMUX_IPSR_MODSEL_DATA(IP10_21_19, TX1_E, SEL_SCIF1_4),
+ PINMUX_IPSR_MODSEL_DATA(IP10_21_19, HRTS0_C, SEL_HSCIF_2),
+ PINMUX_IPSR_MODSEL_DATA(IP10_21_19, FSE_B, SEL_FLCTL_1),
+ PINMUX_IPSR_MODSEL_DATA(IP10_21_19, LCD_M_DISP_B, SEL_LCDC_1),
+
+ PINMUX_IPSR_MODSEL_DATA(IP10_22, CAN_CLK_A, SEL_RCAN_CLK_0),
+ PINMUX_IPSR_MODSEL_DATA(IP10_22, RX4_D, SEL_SCIF4_3),
+
+ PINMUX_IPSR_MODSEL_DATA(IP10_24_23, CAN0_TX_A, SEL_RCAN0_0),
+ PINMUX_IPSR_MODSEL_DATA(IP10_24_23, TX4_D, SEL_SCIF4_3),
+ PINMUX_IPSR_DATA(IP10_24_23, MLB_CLK),
+
+ PINMUX_IPSR_MODSEL_DATA(IP10_25, CAN1_RX_A, SEL_RCAN1_0),
+ PINMUX_IPSR_MODSEL_DATA(IP10_25, IRQ1_B, SEL_INTC_1),
+
+ PINMUX_IPSR_MODSEL_DATA(IP10_27_26, CAN0_RX_A, SEL_RCAN0_0),
+ PINMUX_IPSR_MODSEL_DATA(IP10_27_26, IRQ0_B, SEL_INTC_1),
+ PINMUX_IPSR_DATA(IP10_27_26, MLB_SIG),
+
+ PINMUX_IPSR_MODSEL_DATA(IP10_29_28, CAN1_TX_A, SEL_RCAN1_0),
+ PINMUX_IPSR_MODSEL_DATA(IP10_29_28, TX5_C, SEL_SCIF1_2),
+ PINMUX_IPSR_DATA(IP10_29_28, MLB_DAT),
+
+ /* IPSR11 */
+ PINMUX_IPSR_DATA(IP11_0, SCL1),
+ PINMUX_IPSR_MODSEL_DATA(IP11_0, SCIF_CLK_C, SEL_SCIF_CLK_2),
+
+ PINMUX_IPSR_DATA(IP11_1, SDA1),
+ PINMUX_IPSR_MODSEL_DATA(IP11_0, RX1_E, SEL_SCIF1_4),
+
+ PINMUX_IPSR_DATA(IP11_2, SDA0),
+ PINMUX_IPSR_MODSEL_DATA(IP11_2, HIFEBL_A, SEL_HIF_0),
+
+ PINMUX_IPSR_DATA(IP11_3, SDSELF),
+ PINMUX_IPSR_MODSEL_DATA(IP11_3, RTS1_E, SEL_SCIF1_3),
+
+ PINMUX_IPSR_MODSEL_DATA(IP11_6_4, SCIF_CLK_A, SEL_SCIF_CLK_0),
+ PINMUX_IPSR_MODSEL_DATA(IP11_6_4, HSPI_CLK_A, SEL_HSPI_0),
+ PINMUX_IPSR_DATA(IP11_6_4, VI0_CLK),
+ PINMUX_IPSR_MODSEL_DATA(IP11_6_4, RMII0_TXD0_A, SEL_RMII_0),
+ PINMUX_IPSR_DATA(IP11_6_4, ET0_ERXD4),
+
+ PINMUX_IPSR_MODSEL_DATA(IP11_9_7, SCK0_A, SEL_SCIF0_0),
+ PINMUX_IPSR_MODSEL_DATA(IP11_9_7, HSPI_CS_A, SEL_HSPI_0),
+ PINMUX_IPSR_DATA(IP11_9_7, VI0_CLKENB),
+ PINMUX_IPSR_MODSEL_DATA(IP11_9_7, RMII0_TXD1_A, SEL_RMII_0),
+ PINMUX_IPSR_DATA(IP11_9_7, ET0_ERXD5),
+
+ PINMUX_IPSR_MODSEL_DATA(IP11_11_10, RX0_A, SEL_SCIF0_0),
+ PINMUX_IPSR_MODSEL_DATA(IP11_11_10, HSPI_RX_A, SEL_HSPI_0),
+ PINMUX_IPSR_MODSEL_DATA(IP11_11_10, RMII0_RXD0_A, SEL_RMII_0),
+ PINMUX_IPSR_DATA(IP11_11_10, ET0_ERXD6),
+
+ PINMUX_IPSR_MODSEL_DATA(IP11_12, TX0_A, SEL_SCIF0_0),
+ PINMUX_IPSR_MODSEL_DATA(IP11_12, HSPI_TX_A, SEL_HSPI_0),
+
+ PINMUX_IPSR_DATA(IP11_15_13, PENC1),
+ PINMUX_IPSR_MODSEL_DATA(IP11_15_13, TX3_D, SEL_SCIF3_3),
+ PINMUX_IPSR_MODSEL_DATA(IP11_15_13, CAN1_TX_B, SEL_RCAN1_1),
+ PINMUX_IPSR_MODSEL_DATA(IP11_15_13, TX5_D, SEL_SCIF5_3),
+ PINMUX_IPSR_MODSEL_DATA(IP11_15_13, IETX_B, SEL_IEBUS_1),
+
+ PINMUX_IPSR_DATA(IP11_18_16, USB_OVC1),
+ PINMUX_IPSR_MODSEL_DATA(IP11_18_16, RX3_D, SEL_SCIF3_3),
+ PINMUX_IPSR_MODSEL_DATA(IP11_18_16, CAN1_RX_B, SEL_RCAN1_1),
+ PINMUX_IPSR_MODSEL_DATA(IP11_18_16, RX5_D, SEL_SCIF5_3),
+ PINMUX_IPSR_MODSEL_DATA(IP11_18_16, IERX_B, SEL_IEBUS_1),
+
+ PINMUX_IPSR_DATA(IP11_20_19, DREQ0),
+ PINMUX_IPSR_MODSEL_DATA(IP11_20_19, SD1_CLK_A, SEL_SDHI1_0),
+ PINMUX_IPSR_DATA(IP11_20_19, ET0_TX_EN),
+
+ PINMUX_IPSR_DATA(IP11_22_21, DACK0),
+ PINMUX_IPSR_MODSEL_DATA(IP11_22_21, SD1_DAT3_A, SEL_SDHI1_0),
+ PINMUX_IPSR_DATA(IP11_22_21, ET0_TX_ER),
+
+ PINMUX_IPSR_DATA(IP11_25_23, DREQ1),
+ PINMUX_IPSR_MODSEL_DATA(IP11_25_23, HSPI_CLK_B, SEL_HSPI_1),
+ PINMUX_IPSR_MODSEL_DATA(IP11_25_23, RX4_B, SEL_SCIF4_1),
+ PINMUX_IPSR_MODSEL_DATA(IP11_25_23, ET0_PHY_INT_C, SEL_ET0_CTL_0),
+ PINMUX_IPSR_MODSEL_DATA(IP11_25_23, ET0_TX_CLK_A, SEL_ET0_0),
+
+ PINMUX_IPSR_DATA(IP11_27_26, DACK1),
+ PINMUX_IPSR_MODSEL_DATA(IP11_27_26, HSPI_CS_B, SEL_HSPI_1),
+ PINMUX_IPSR_MODSEL_DATA(IP11_27_26, TX4_B, SEL_SCIF3_1),
+ PINMUX_IPSR_MODSEL_DATA(IP11_27_26, ET0_RX_CLK_A, SEL_ET0_0),
+
+ PINMUX_IPSR_DATA(IP11_28, PRESETOUT),
+ PINMUX_IPSR_DATA(IP11_28, ST_CLKOUT),
+};
+
+static struct pinmux_gpio pinmux_gpios[] = {
+ PINMUX_GPIO_GP_ALL(),
+
+ GPIO_FN(CLKOUT), GPIO_FN(BS), GPIO_FN(CS0), GPIO_FN(EX_CS0),
+ GPIO_FN(RD), GPIO_FN(WE0), GPIO_FN(WE1),
+ GPIO_FN(SCL0), GPIO_FN(PENC0), GPIO_FN(USB_OVC0),
+ GPIO_FN(IRQ2_B), GPIO_FN(IRQ3_B),
+
+ /* IPSR0 */
+ GPIO_FN(A0), GPIO_FN(ST0_CLKIN), GPIO_FN(LCD_DATA0_A),
+ GPIO_FN(TCLKA_C),
+ GPIO_FN(A1), GPIO_FN(ST0_REQ), GPIO_FN(LCD_DATA1_A),
+ GPIO_FN(TCLKB_C),
+ GPIO_FN(A2), GPIO_FN(ST0_SYC), GPIO_FN(LCD_DATA2_A),
+ GPIO_FN(TCLKC_C),
+ GPIO_FN(A3), GPIO_FN(ST0_VLD), GPIO_FN(LCD_DATA3_A),
+ GPIO_FN(TCLKD_C),
+ GPIO_FN(A4), GPIO_FN(ST0_D0), GPIO_FN(LCD_DATA4_A),
+ GPIO_FN(TIOC0A_C),
+ GPIO_FN(A5), GPIO_FN(ST0_D1), GPIO_FN(LCD_DATA5_A),
+ GPIO_FN(TIOC0B_C),
+ GPIO_FN(A6), GPIO_FN(ST0_D2), GPIO_FN(LCD_DATA6_A),
+ GPIO_FN(TIOC0C_C),
+ GPIO_FN(A7), GPIO_FN(ST0_D3), GPIO_FN(LCD_DATA7_A),
+ GPIO_FN(TIOC0D_C),
+ GPIO_FN(A8), GPIO_FN(ST0_D4), GPIO_FN(LCD_DATA8_A),
+ GPIO_FN(TIOC1A_C),
+ GPIO_FN(A9), GPIO_FN(ST0_D5), GPIO_FN(LCD_DATA9_A),
+ GPIO_FN(TIOC1B_C),
+ GPIO_FN(A10), GPIO_FN(ST0_D6), GPIO_FN(LCD_DATA10_A),
+ GPIO_FN(TIOC2A_C),
+ GPIO_FN(A11), GPIO_FN(ST0_D7), GPIO_FN(LCD_DATA11_A),
+ GPIO_FN(TIOC2B_C),
+ GPIO_FN(A12), GPIO_FN(LCD_DATA12_A), GPIO_FN(TIOC3A_C),
+ GPIO_FN(A13), GPIO_FN(LCD_DATA13_A), GPIO_FN(TIOC3B_C),
+ GPIO_FN(A14), GPIO_FN(LCD_DATA14_A), GPIO_FN(TIOC3C_C),
+ GPIO_FN(A15), GPIO_FN(ST0_VCO_CLKIN), GPIO_FN(LCD_DATA15_A),
+ GPIO_FN(TIOC3D_C),
+
+ /* IPSR1 */
+ GPIO_FN(A16), GPIO_FN(ST0_PWM), GPIO_FN(LCD_DON_A),
+ GPIO_FN(TIOC4A_C),
+ GPIO_FN(A17), GPIO_FN(ST1_VCO_CLKIN), GPIO_FN(LCD_CL1_A),
+ GPIO_FN(TIOC4B_C),
+ GPIO_FN(A18), GPIO_FN(ST1_PWM), GPIO_FN(LCD_CL2_A),
+ GPIO_FN(TIOC4C_C),
+ GPIO_FN(A19), GPIO_FN(ST1_CLKIN), GPIO_FN(LCD_CLK_A),
+ GPIO_FN(TIOC4D_C),
+ GPIO_FN(A20), GPIO_FN(ST1_REQ), GPIO_FN(LCD_FLM_A),
+ GPIO_FN(A21), GPIO_FN(ST1_SYC), GPIO_FN(LCD_VCPWC_A),
+ GPIO_FN(A22), GPIO_FN(ST1_VLD), GPIO_FN(LCD_VEPWC_A),
+ GPIO_FN(A23), GPIO_FN(ST1_D0), GPIO_FN(LCD_M_DISP_A),
+ GPIO_FN(A24), GPIO_FN(RX2_D), GPIO_FN(ST1_D1),
+ GPIO_FN(A25), GPIO_FN(TX2_D), GPIO_FN(ST1_D2),
+ GPIO_FN(D0), GPIO_FN(SD0_DAT0_A), GPIO_FN(MMC_D0_A),
+ GPIO_FN(ST1_D3), GPIO_FN(FD0_A),
+ GPIO_FN(D1), GPIO_FN(SD0_DAT1_A), GPIO_FN(MMC_D1_A),
+ GPIO_FN(ST1_D4), GPIO_FN(FD1_A),
+ GPIO_FN(D2), GPIO_FN(SD0_DAT2_A), GPIO_FN(MMC_D2_A),
+ GPIO_FN(ST1_D5), GPIO_FN(FD2_A),
+ GPIO_FN(D3), GPIO_FN(SD0_DAT3_A), GPIO_FN(MMC_D3_A),
+ GPIO_FN(ST1_D6), GPIO_FN(FD3_A),
+
+ /* IPSR2 */
+ GPIO_FN(D4), GPIO_FN(SD0_CD_A), GPIO_FN(MMC_D4_A), GPIO_FN(ST1_D7),
+ GPIO_FN(FD4_A),
+ GPIO_FN(D5), GPIO_FN(SD0_WP_A), GPIO_FN(MMC_D5_A), GPIO_FN(FD5_A),
+ GPIO_FN(D6), GPIO_FN(RSPI_RSPCK_A), GPIO_FN(MMC_D6_A),
+ GPIO_FN(QSPCLK_A),
+ GPIO_FN(FD6_A),
+ GPIO_FN(D7), GPIO_FN(RSPI_SSL_A), GPIO_FN(MMC_D7_A), GPIO_FN(QSSL_A),
+ GPIO_FN(FD7_A),
+ GPIO_FN(D8), GPIO_FN(SD0_CLK_A), GPIO_FN(MMC_CLK_A), GPIO_FN(QIO2_A),
+ GPIO_FN(FCE_A), GPIO_FN(ET0_GTX_CLK_B),
+ GPIO_FN(D9), GPIO_FN(SD0_CMD_A), GPIO_FN(MMC_CMD_A), GPIO_FN(QIO3_A),
+ GPIO_FN(FCLE_A), GPIO_FN(ET0_ETXD1_B),
+ GPIO_FN(D10), GPIO_FN(RSPI_MOSI_A), GPIO_FN(QMO_QIO0_A),
+ GPIO_FN(FALE_A), GPIO_FN(ET0_ETXD2_B),
+ GPIO_FN(D11), GPIO_FN(RSPI_MISO_A), GPIO_FN(QMI_QIO1_A), GPIO_FN(FRE_A),
+ GPIO_FN(ET0_ETXD3_B),
+ GPIO_FN(D12), GPIO_FN(FWE_A), GPIO_FN(ET0_ETXD5_B),
+ GPIO_FN(D13), GPIO_FN(RX2_B), GPIO_FN(FRB_A), GPIO_FN(ET0_ETXD6_B),
+ GPIO_FN(D14), GPIO_FN(TX2_B), GPIO_FN(FSE_A), GPIO_FN(ET0_TX_CLK_B),
+
+ /* IPSR3 */
+ GPIO_FN(D15), GPIO_FN(SCK2_B),
+ GPIO_FN(CS1_A26), GPIO_FN(QIO3_B),
+ GPIO_FN(EX_CS1), GPIO_FN(RX3_B), GPIO_FN(ATACS0), GPIO_FN(QIO2_B),
+ GPIO_FN(ET0_ETXD0),
+ GPIO_FN(EX_CS2), GPIO_FN(TX3_B), GPIO_FN(ATACS1), GPIO_FN(QSPCLK_B),
+ GPIO_FN(ET0_GTX_CLK_A),
+ GPIO_FN(EX_CS3), GPIO_FN(SD1_CD_A), GPIO_FN(ATARD), GPIO_FN(QMO_QIO0_B),
+ GPIO_FN(ET0_ETXD1_A),
+ GPIO_FN(EX_CS4), GPIO_FN(SD1_WP_A), GPIO_FN(ATAWR), GPIO_FN(QMI_QIO1_B),
+ GPIO_FN(ET0_ETXD2_A),
+ GPIO_FN(EX_CS5), GPIO_FN(SD1_CMD_A), GPIO_FN(ATADIR), GPIO_FN(QSSL_B),
+ GPIO_FN(ET0_ETXD3_A),
+ GPIO_FN(RD_WR), GPIO_FN(TCLK1_B),
+ GPIO_FN(EX_WAIT0), GPIO_FN(TCLK1_B),
+ GPIO_FN(EX_WAIT1), GPIO_FN(SD1_DAT0_A), GPIO_FN(DREQ2),
+ GPIO_FN(CAN1_TX_C), GPIO_FN(ET0_LINK_C), GPIO_FN(ET0_ETXD5_A),
+ GPIO_FN(EX_WAIT2), GPIO_FN(SD1_DAT1_A), GPIO_FN(DACK2),
+ GPIO_FN(CAN1_RX_C), GPIO_FN(ET0_MAGIC_C), GPIO_FN(ET0_ETXD6_A),
+ GPIO_FN(DRACK0), GPIO_FN(SD1_DAT2_A), GPIO_FN(ATAG), GPIO_FN(TCLK1_A),
+ GPIO_FN(ET0_ETXD7),
+
+ /* IPSR4 */
+ GPIO_FN(HCTS0_A), GPIO_FN(CTS1_A), GPIO_FN(VI0_FIELD),
+ GPIO_FN(RMII0_RXD1_A), GPIO_FN(ET0_ERXD7),
+ GPIO_FN(HRTS0_A), GPIO_FN(RTS1_A), GPIO_FN(VI0_HSYNC),
+ GPIO_FN(RMII0_TXD_EN_A), GPIO_FN(ET0_RX_DV),
+ GPIO_FN(HSCK0_A), GPIO_FN(SCK1_A), GPIO_FN(VI0_VSYNC),
+ GPIO_FN(RMII0_RX_ER_A), GPIO_FN(ET0_RX_ER),
+ GPIO_FN(HRX0_A), GPIO_FN(RX1_A), GPIO_FN(VI0_DATA0_VI0_B0),
+ GPIO_FN(RMII0_CRS_DV_A), GPIO_FN(ET0_CRS),
+ GPIO_FN(HTX0_A), GPIO_FN(TX1_A), GPIO_FN(VI0_DATA1_VI0_B1),
+ GPIO_FN(RMII0_MDC_A), GPIO_FN(ET0_COL),
+ GPIO_FN(CTS0_B), GPIO_FN(VI0_DATA2_VI0_B2), GPIO_FN(RMII0_MDIO_A),
+ GPIO_FN(ET0_MDC),
+ GPIO_FN(RTS0_B), GPIO_FN(VI0_DATA3_VI0_B3), GPIO_FN(ET0_MDIO_A),
+ GPIO_FN(SCK1_B), GPIO_FN(VI0_DATA4_VI0_B4), GPIO_FN(ET0_LINK_A),
+ GPIO_FN(RX1_B), GPIO_FN(VI0_DATA5_VI0_B5), GPIO_FN(ET0_MAGIC_A),
+ GPIO_FN(TX1_B), GPIO_FN(VI0_DATA6_VI0_G0), GPIO_FN(ET0_PHY_INT_A),
+ GPIO_FN(CTS1_B), GPIO_FN(VI0_DATA7_VI0_G1),
+ GPIO_FN(RTS1_B), GPIO_FN(VI0_G2),
+ GPIO_FN(SCK2_A), GPIO_FN(VI0_G3),
+
+ /* IPSR5 */
+ GPIO_FN(REF50CK), GPIO_FN(CTS1_E), GPIO_FN(HCTS0_D),
+ GPIO_FN(REF125CK), GPIO_FN(ADTRG), GPIO_FN(RX5_C),
+ GPIO_FN(SD2_WP_A), GPIO_FN(TX5_A), GPIO_FN(VI0_R5),
+ GPIO_FN(SD2_CD_A), GPIO_FN(RX5_A), GPIO_FN(VI0_R4),
+ GPIO_FN(ET0_PHY_INT_B),
+ GPIO_FN(SD2_DAT3_A), GPIO_FN(TX4_A), GPIO_FN(VI0_R3),
+ GPIO_FN(ET0_MAGIC_B),
+ GPIO_FN(SD2_DAT2_A), GPIO_FN(RX4_A), GPIO_FN(VI0_R2),
+ GPIO_FN(ET0_LINK_B),
+ GPIO_FN(SD2_DAT1_A), GPIO_FN(TX3_A), GPIO_FN(VI0_R1),
+ GPIO_FN(ET0_MDIO_B),
+ GPIO_FN(SD2_DAT0_A), GPIO_FN(RX3_A), GPIO_FN(VI0_R0),
+ GPIO_FN(ET0_ERXD3_B),
+ GPIO_FN(SD2_CMD_A), GPIO_FN(TX2_A), GPIO_FN(VI0_G5),
+ GPIO_FN(ET0_ERXD2_B),
+ GPIO_FN(SD2_CLK_A), GPIO_FN(RX2_A), GPIO_FN(VI0_G4),
+ GPIO_FN(ET0_RX_CLK_B),
+
+ /* IPSR6 */
+ GPIO_FN(DU0_DG1), GPIO_FN(CTS1_C), GPIO_FN(HRTS0_D),
+ GPIO_FN(TIOC1B_A), GPIO_FN(HIFD09),
+ GPIO_FN(DU0_DG0), GPIO_FN(TX1_C), GPIO_FN(HSCK0_D),
+ GPIO_FN(IECLK_A), GPIO_FN(TIOC1A_A), GPIO_FN(HIFD08),
+ GPIO_FN(DU0_DR7), GPIO_FN(RX1_C), GPIO_FN(TIOC0D_A),
+ GPIO_FN(HIFD07),
+ GPIO_FN(DU0_DR6), GPIO_FN(SCK1_C), GPIO_FN(TIOC0C_A),
+ GPIO_FN(HIFD06),
+ GPIO_FN(DU0_DR5), GPIO_FN(RTS0_C), GPIO_FN(TIOC0B_A),
+ GPIO_FN(HIFD05),
+ GPIO_FN(DU0_DR4), GPIO_FN(CTS0_C), GPIO_FN(TIOC0A_A),
+ GPIO_FN(HIFD04),
+ GPIO_FN(DU0_DR3), GPIO_FN(TX0_B), GPIO_FN(TCLKD_A), GPIO_FN(HIFD03),
+ GPIO_FN(DU0_DR2), GPIO_FN(RX0_B), GPIO_FN(TCLKC_A), GPIO_FN(HIFD02),
+ GPIO_FN(DU0_DR1), GPIO_FN(SCK0_B), GPIO_FN(HTX0_D),
+ GPIO_FN(IERX_A), GPIO_FN(TCLKB_A), GPIO_FN(HIFD01),
+ GPIO_FN(DU0_DR0), GPIO_FN(SCIF_CLK_B), GPIO_FN(HRX0_D),
+ GPIO_FN(IETX_A), GPIO_FN(TCLKA_A), GPIO_FN(HIFD00),
+
+ /* IPSR7 */
+ GPIO_FN(DU0_DB4), GPIO_FN(HIFINT),
+ GPIO_FN(DU0_DB3), GPIO_FN(TX5_B), GPIO_FN(TIOC4D_A), GPIO_FN(HIFRD),
+ GPIO_FN(DU0_DB2), GPIO_FN(RX5_B), GPIO_FN(RMII0_TXD1_B),
+ GPIO_FN(TIOC4C_A), GPIO_FN(HIFWR),
+ GPIO_FN(DU0_DB1), GPIO_FN(TX4_C), GPIO_FN(RMII0_TXD0_B),
+ GPIO_FN(TIOC4B_A), GPIO_FN(HIFRS),
+ GPIO_FN(DU0_DB0), GPIO_FN(RX4_C), GPIO_FN(RMII0_TXD_EN_B),
+ GPIO_FN(TIOC4A_A), GPIO_FN(HIFCS),
+ GPIO_FN(DU0_DG7), GPIO_FN(TX3_C), GPIO_FN(RMII0_RXD1_B),
+ GPIO_FN(TIOC3D_A), GPIO_FN(HIFD15),
+ GPIO_FN(DU0_DG6), GPIO_FN(RX3_C), GPIO_FN(RMII0_RXD0_B),
+ GPIO_FN(TIOC3C_A), GPIO_FN(HIFD14),
+ GPIO_FN(DU0_DG5), GPIO_FN(TX2_C), GPIO_FN(RMII0_RX_ER_B),
+ GPIO_FN(TIOC3B_A), GPIO_FN(HIFD13),
+ GPIO_FN(DU0_DG4), GPIO_FN(RX2_C), GPIO_FN(RMII0_CRS_DV_B),
+ GPIO_FN(TIOC3A_A), GPIO_FN(HIFD12),
+ GPIO_FN(DU0_DG3), GPIO_FN(SCK2_C), GPIO_FN(RMII0_MDIO_B),
+ GPIO_FN(TIOC2B_A), GPIO_FN(HIFD11),
+ GPIO_FN(DU0_DG2), GPIO_FN(RTS1_C), GPIO_FN(RMII0_MDC_B),
+ GPIO_FN(TIOC2A_A), GPIO_FN(HIFD10),
+
+ /* IPSR8 */
+ GPIO_FN(IRQ3_A), GPIO_FN(RTS0_A), GPIO_FN(HRTS0_B),
+ GPIO_FN(ET0_ERXD3_A),
+ GPIO_FN(IRQ2_A), GPIO_FN(CTS0_A), GPIO_FN(HCTS0_B),
+ GPIO_FN(ET0_ERXD2_A),
+ GPIO_FN(IRQ1_A), GPIO_FN(HSPI_RX_B), GPIO_FN(TX3_E),
+ GPIO_FN(ET0_ERXD1),
+ GPIO_FN(IRQ0_A), GPIO_FN(HSPI_TX_B), GPIO_FN(RX3_E),
+ GPIO_FN(ET0_ERXD0),
+ GPIO_FN(DU0_CDE), GPIO_FN(HTX0_B), GPIO_FN(AUDIO_CLKB_B),
+ GPIO_FN(LCD_VCPWC_B),
+ GPIO_FN(DU0_DISP), GPIO_FN(CAN0_TX_B), GPIO_FN(HRX0_B),
+ GPIO_FN(AUDIO_CLKA_B),
+ GPIO_FN(DU0_EXODDF_DU0_ODDF), GPIO_FN(CAN0_RX_B), GPIO_FN(HSCK0_B),
+ GPIO_FN(SSI_SDATA1_B),
+ GPIO_FN(DU0_EXVSYNC_DU0_VSYNC), GPIO_FN(HSPI_RX0_C),
+ GPIO_FN(SSI_WS1_B),
+ GPIO_FN(DU0_EXHSYNC_DU0_HSYNC), GPIO_FN(HSPI_TX0_C),
+ GPIO_FN(SSI_SCK1_B),
+ GPIO_FN(DU0_DOTCLKOUT), GPIO_FN(HSPI_CLK0_C),
+ GPIO_FN(SSI_SDATA0_B),
+ GPIO_FN(DU0_DOTCLKIN), GPIO_FN(HSPI_CS0_C),
+ GPIO_FN(SSI_WS0_B),
+ GPIO_FN(DU0_DB7), GPIO_FN(SSI_SCK0_B), GPIO_FN(HIFEBL_B),
+ GPIO_FN(DU0_DB6), GPIO_FN(HIFRDY),
+ GPIO_FN(DU0_DB5), GPIO_FN(HIFDREQ),
+
+ /* IPSR9 */
+ GPIO_FN(SSI_SDATA1_A), GPIO_FN(VI1_3_B), GPIO_FN(LCD_DATA14_B),
+ GPIO_FN(SSI_WS1_A), GPIO_FN(VI1_2_B), GPIO_FN(LCD_DATA13_B),
+ GPIO_FN(SSI_SCK1_A), GPIO_FN(VI1_1_B), GPIO_FN(TIOC2B_B),
+ GPIO_FN(LCD_DATA12_B),
+ GPIO_FN(SSI_SDATA0_A), GPIO_FN(VI1_0_B), GPIO_FN(TIOC2A_B),
+ GPIO_FN(LCD_DATA11_B),
+ GPIO_FN(SSI_WS0_A), GPIO_FN(TIOC1B_B), GPIO_FN(LCD_DATA10_B),
+ GPIO_FN(SSI_SCK0_A), GPIO_FN(TIOC1A_B), GPIO_FN(LCD_DATA9_B),
+ GPIO_FN(VI1_7_A), GPIO_FN(FCE_B), GPIO_FN(LCD_DATA8_B),
+ GPIO_FN(VI1_6_A), GPIO_FN(FD7_B), GPIO_FN(LCD_DATA7_B),
+ GPIO_FN(VI1_5_A), GPIO_FN(FD6_B), GPIO_FN(LCD_DATA6_B),
+ GPIO_FN(VI1_4_A), GPIO_FN(FD5_B), GPIO_FN(LCD_DATA5_B),
+ GPIO_FN(VI1_3_A), GPIO_FN(FD4_B), GPIO_FN(LCD_DATA4_B),
+ GPIO_FN(VI1_2_A), GPIO_FN(FD3_B), GPIO_FN(LCD_DATA3_B),
+ GPIO_FN(VI1_1_A), GPIO_FN(FD2_B), GPIO_FN(LCD_DATA2_B),
+ GPIO_FN(VI1_0_A), GPIO_FN(FD1_B), GPIO_FN(LCD_DATA1_B),
+ GPIO_FN(VI1_CLK_A), GPIO_FN(FD0_B), GPIO_FN(LCD_DATA0_B),
+
+ /* IPSR10 */
+ GPIO_FN(CAN1_TX_A), GPIO_FN(TX5_C), GPIO_FN(MLB_DAT),
+ GPIO_FN(CAN0_RX_A), GPIO_FN(IRQ0_B), GPIO_FN(MLB_SIG),
+ GPIO_FN(CAN1_RX_A), GPIO_FN(IRQ1_B),
+ GPIO_FN(CAN0_TX_A), GPIO_FN(TX4_D), GPIO_FN(MLB_CLK),
+ GPIO_FN(CAN_CLK_A), GPIO_FN(RX4_D),
+ GPIO_FN(AUDIO_CLKOUT), GPIO_FN(TX1_E), GPIO_FN(HRTS0_C),
+ GPIO_FN(FSE_B), GPIO_FN(LCD_M_DISP_B),
+ GPIO_FN(AUDIO_CLKC), GPIO_FN(SCK1_E), GPIO_FN(HCTS0_C),
+ GPIO_FN(FRB_B), GPIO_FN(LCD_VEPWC_B),
+ GPIO_FN(AUDIO_CLKB_A), GPIO_FN(LCD_CLK_B),
+ GPIO_FN(AUDIO_CLKA_A), GPIO_FN(VI1_CLK_B), GPIO_FN(SCK1_D),
+ GPIO_FN(IECLK_B), GPIO_FN(LCD_FLM_B),
+ GPIO_FN(SSI_SDATA3), GPIO_FN(VI1_7_B), GPIO_FN(HTX0_C),
+ GPIO_FN(FWE_B), GPIO_FN(LCD_CL2_B),
+ GPIO_FN(SSI_SDATA2), GPIO_FN(VI1_6_B), GPIO_FN(HRX0_C),
+ GPIO_FN(FRE_B), GPIO_FN(LCD_CL1_B),
+ GPIO_FN(SSI_WS23), GPIO_FN(VI1_5_B), GPIO_FN(TX1_D),
+ GPIO_FN(HSCK0_C), GPIO_FN(FALE_B), GPIO_FN(LCD_DON_B),
+ GPIO_FN(SSI_SCK23), GPIO_FN(VI1_4_B), GPIO_FN(RX1_D),
+ GPIO_FN(FCLE_B), GPIO_FN(LCD_DATA15_B),
+
+ /* IPSR11 */
+ GPIO_FN(PRESETOUT), GPIO_FN(ST_CLKOUT),
+ GPIO_FN(DACK1), GPIO_FN(HSPI_CS_B), GPIO_FN(TX4_B),
+ GPIO_FN(ET0_RX_CLK_A),
+ GPIO_FN(DREQ1), GPIO_FN(HSPI_CLK_B), GPIO_FN(RX4_B),
+ GPIO_FN(ET0_PHY_INT_C), GPIO_FN(ET0_TX_CLK_A),
+ GPIO_FN(DACK0), GPIO_FN(SD1_DAT3_A), GPIO_FN(ET0_TX_ER),
+ GPIO_FN(DREQ0), GPIO_FN(SD1_CLK_A), GPIO_FN(ET0_TX_EN),
+ GPIO_FN(USB_OVC1), GPIO_FN(RX3_D), GPIO_FN(CAN1_RX_B),
+ GPIO_FN(RX5_D), GPIO_FN(IERX_B),
+ GPIO_FN(PENC1), GPIO_FN(TX3_D), GPIO_FN(CAN1_TX_B),
+ GPIO_FN(TX5_D), GPIO_FN(IETX_B),
+ GPIO_FN(TX0_A), GPIO_FN(HSPI_TX_A),
+ GPIO_FN(RX0_A), GPIO_FN(HSPI_RX_A), GPIO_FN(RMII0_RXD0_A),
+ GPIO_FN(ET0_ERXD6),
+ GPIO_FN(SCK0_A), GPIO_FN(HSPI_CS_A), GPIO_FN(VI0_CLKENB),
+ GPIO_FN(RMII0_TXD1_A), GPIO_FN(ET0_ERXD5),
+ GPIO_FN(SCIF_CLK_A), GPIO_FN(HSPI_CLK_A), GPIO_FN(VI0_CLK),
+ GPIO_FN(RMII0_TXD0_A), GPIO_FN(ET0_ERXD4),
+ GPIO_FN(SDSELF), GPIO_FN(RTS1_E),
+ GPIO_FN(SDA0), GPIO_FN(HIFEBL_A),
+ GPIO_FN(SDA1), GPIO_FN(RX1_E),
+ GPIO_FN(SCL1), GPIO_FN(SCIF_CLK_C),
+};
+
+static struct pinmux_cfg_reg pinmux_config_regs[] = {
+ { PINMUX_CFG_REG("GPSR0", 0xFFFC0004, 32, 1) {
+ GP_0_31_FN, FN_IP2_2_0,
+ GP_0_30_FN, FN_IP1_31_29,
+ GP_0_29_FN, FN_IP1_28_26,
+ GP_0_28_FN, FN_IP1_25_23,
+ GP_0_27_FN, FN_IP1_22_20,
+ GP_0_26_FN, FN_IP1_19_18,
+ GP_0_25_FN, FN_IP1_17_16,
+ GP_0_24_FN, FN_IP0_5_4,
+ GP_0_23_FN, FN_IP0_3_2,
+ GP_0_22_FN, FN_IP0_1_0,
+ GP_0_21_FN, FN_IP11_28,
+ GP_0_20_FN, FN_IP1_7_6,
+ GP_0_19_FN, FN_IP1_5_4,
+ GP_0_18_FN, FN_IP1_3_2,
+ GP_0_17_FN, FN_IP1_1_0,
+ GP_0_16_FN, FN_IP0_31_30,
+ GP_0_15_FN, FN_IP0_29_28,
+ GP_0_14_FN, FN_IP0_27_26,
+ GP_0_13_FN, FN_IP0_25_24,
+ GP_0_12_FN, FN_IP0_23_22,
+ GP_0_11_FN, FN_IP0_21_20,
+ GP_0_10_FN, FN_IP0_19_18,
+ GP_0_9_FN, FN_IP0_17_16,
+ GP_0_8_FN, FN_IP0_15_14,
+ GP_0_7_FN, FN_IP0_13_12,
+ GP_0_6_FN, FN_IP0_11_10,
+ GP_0_5_FN, FN_IP0_9_8,
+ GP_0_4_FN, FN_IP0_7_6,
+ GP_0_3_FN, FN_IP1_15_14,
+ GP_0_2_FN, FN_IP1_13_12,
+ GP_0_1_FN, FN_IP1_11_10,
+ GP_0_0_FN, FN_IP1_9_8 }
+ },
+ { PINMUX_CFG_REG("GPSR1", 0xFFFC0008, 32, 1) {
+ GP_1_31_FN, FN_IP11_25_23,
+ GP_1_30_FN, FN_IP2_13_11,
+ GP_1_29_FN, FN_IP2_10_8,
+ GP_1_28_FN, FN_IP2_7_5,
+ GP_1_27_FN, FN_IP3_26_24,
+ GP_1_26_FN, FN_IP3_23_21,
+ GP_1_25_FN, FN_IP2_4_3,
+ GP_1_24_FN, FN_WE1,
+ GP_1_23_FN, FN_WE0,
+ GP_1_22_FN, FN_IP3_19_18,
+ GP_1_21_FN, FN_RD,
+ GP_1_20_FN, FN_IP3_17_15,
+ GP_1_19_FN, FN_IP3_14_12,
+ GP_1_18_FN, FN_IP3_11_9,
+ GP_1_17_FN, FN_IP3_8_6,
+ GP_1_16_FN, FN_IP3_5_3,
+ GP_1_15_FN, FN_EX_CS0,
+ GP_1_14_FN, FN_IP3_2,
+ GP_1_13_FN, FN_CS0,
+ GP_1_12_FN, FN_BS,
+ GP_1_11_FN, FN_CLKOUT,
+ GP_1_10_FN, FN_IP3_1_0,
+ GP_1_9_FN, FN_IP2_30_28,
+ GP_1_8_FN, FN_IP2_27_25,
+ GP_1_7_FN, FN_IP2_24_23,
+ GP_1_6_FN, FN_IP2_22_20,
+ GP_1_5_FN, FN_IP2_19_17,
+ GP_1_4_FN, FN_IP2_16_14,
+ GP_1_3_FN, FN_IP11_22_21,
+ GP_1_2_FN, FN_IP11_20_19,
+ GP_1_1_FN, FN_IP3_29_27,
+ GP_1_0_FN, FN_IP3_20 }
+ },
+ { PINMUX_CFG_REG("GPSR2", 0xFFFC000C, 32, 1) {
+ GP_2_31_FN, FN_IP4_31_30,
+ GP_2_30_FN, FN_IP5_2_0,
+ GP_2_29_FN, FN_IP5_5_3,
+ GP_2_28_FN, FN_IP5_8_6,
+ GP_2_27_FN, FN_IP5_11_9,
+ GP_2_26_FN, FN_IP5_14_12,
+ GP_2_25_FN, FN_IP5_17_15,
+ GP_2_24_FN, FN_IP5_20_18,
+ GP_2_23_FN, FN_IP5_22_21,
+ GP_2_22_FN, FN_IP5_24_23,
+ GP_2_21_FN, FN_IP5_26_25,
+ GP_2_20_FN, FN_IP4_29_28,
+ GP_2_19_FN, FN_IP4_27_26,
+ GP_2_18_FN, FN_IP4_25_24,
+ GP_2_17_FN, FN_IP4_23_22,
+ GP_2_16_FN, FN_IP4_21_20,
+ GP_2_15_FN, FN_IP4_19_18,
+ GP_2_14_FN, FN_IP4_17_15,
+ GP_2_13_FN, FN_IP4_14_12,
+ GP_2_12_FN, FN_IP4_11_9,
+ GP_2_11_FN, FN_IP4_8_6,
+ GP_2_10_FN, FN_IP4_5_3,
+ GP_2_9_FN, FN_IP8_27_26,
+ GP_2_8_FN, FN_IP11_12,
+ GP_2_7_FN, FN_IP8_25_23,
+ GP_2_6_FN, FN_IP8_22_20,
+ GP_2_5_FN, FN_IP11_27_26,
+ GP_2_4_FN, FN_IP8_29_28,
+ GP_2_3_FN, FN_IP4_2_0,
+ GP_2_2_FN, FN_IP11_11_10,
+ GP_2_1_FN, FN_IP11_9_7,
+ GP_2_0_FN, FN_IP11_6_4 }
+ },
+ { PINMUX_CFG_REG("GPSR3", 0xFFFC0010, 32, 1) {
+ GP_3_31_FN, FN_IP9_1_0,
+ GP_3_30_FN, FN_IP8_19_18,
+ GP_3_29_FN, FN_IP8_17_16,
+ GP_3_28_FN, FN_IP8_15_14,
+ GP_3_27_FN, FN_IP8_13_12,
+ GP_3_26_FN, FN_IP8_11_10,
+ GP_3_25_FN, FN_IP8_9_8,
+ GP_3_24_FN, FN_IP8_7_6,
+ GP_3_23_FN, FN_IP8_5_4,
+ GP_3_22_FN, FN_IP8_3_2,
+ GP_3_21_FN, FN_IP8_1_0,
+ GP_3_20_FN, FN_IP7_30_29,
+ GP_3_19_FN, FN_IP7_28_27,
+ GP_3_18_FN, FN_IP7_26_24,
+ GP_3_17_FN, FN_IP7_23_21,
+ GP_3_16_FN, FN_IP7_20_18,
+ GP_3_15_FN, FN_IP7_17_15,
+ GP_3_14_FN, FN_IP7_14_12,
+ GP_3_13_FN, FN_IP7_11_9,
+ GP_3_12_FN, FN_IP7_8_6,
+ GP_3_11_FN, FN_IP7_5_3,
+ GP_3_10_FN, FN_IP7_2_0,
+ GP_3_9_FN, FN_IP6_23_21,
+ GP_3_8_FN, FN_IP6_20_18,
+ GP_3_7_FN, FN_IP6_17_16,
+ GP_3_6_FN, FN_IP6_15_14,
+ GP_3_5_FN, FN_IP6_13_12,
+ GP_3_4_FN, FN_IP6_11_10,
+ GP_3_3_FN, FN_IP6_9_8,
+ GP_3_2_FN, FN_IP6_7_6,
+ GP_3_1_FN, FN_IP6_5_3,
+ GP_3_0_FN, FN_IP6_2_0 }
+ },
+
+ { PINMUX_CFG_REG("GPSR4", 0xFFFC0014, 32, 1) {
+ GP_4_31_FN, FN_IP10_24_23,
+ GP_4_30_FN, FN_IP10_22,
+ GP_4_29_FN, FN_IP11_18_16,
+ GP_4_28_FN, FN_USB_OVC0,
+ GP_4_27_FN, FN_IP11_15_13,
+ GP_4_26_FN, FN_PENC0,
+ GP_4_25_FN, FN_IP11_2,
+ GP_4_24_FN, FN_SCL0,
+ GP_4_23_FN, FN_IP11_1,
+ GP_4_22_FN, FN_IP11_0,
+ GP_4_21_FN, FN_IP10_21_19,
+ GP_4_20_FN, FN_IP10_18_16,
+ GP_4_19_FN, FN_IP10_15,
+ GP_4_18_FN, FN_IP10_14_12,
+ GP_4_17_FN, FN_IP10_11_9,
+ GP_4_16_FN, FN_IP10_8_6,
+ GP_4_15_FN, FN_IP10_5_3,
+ GP_4_14_FN, FN_IP10_2_0,
+ GP_4_13_FN, FN_IP9_29_28,
+ GP_4_12_FN, FN_IP9_27_26,
+ GP_4_11_FN, FN_IP9_9_8,
+ GP_4_10_FN, FN_IP9_7_6,
+ GP_4_9_FN, FN_IP9_5_4,
+ GP_4_8_FN, FN_IP9_3_2,
+ GP_4_7_FN, FN_IP9_17_16,
+ GP_4_6_FN, FN_IP9_15_14,
+ GP_4_5_FN, FN_IP9_13_12,
+ GP_4_4_FN, FN_IP9_11_10,
+ GP_4_3_FN, FN_IP9_25_24,
+ GP_4_2_FN, FN_IP9_23_22,
+ GP_4_1_FN, FN_IP9_21_20,
+ GP_4_0_FN, FN_IP9_19_18 }
+ },
+ { PINMUX_CFG_REG("GPSR5", 0xFFFC0018, 32, 1) {
+ 0, 0, 0, 0, 0, 0, 0, 0, /* 31 - 28 */
+ 0, 0, 0, 0, 0, 0, 0, 0, /* 27 - 24 */
+ 0, 0, 0, 0, 0, 0, 0, 0, /* 23 - 20 */
+ 0, 0, 0, 0, 0, 0, 0, 0, /* 19 - 16 */
+ 0, 0, 0, 0, 0, 0, 0, 0, /* 15 - 12 */
+ GP_5_11_FN, FN_IP10_29_28,
+ GP_5_10_FN, FN_IP10_27_26,
+ 0, 0, 0, 0, 0, 0, 0, 0, /* 9 - 6 */
+ 0, 0, 0, 0, /* 5, 4 */
+ GP_5_3_FN, FN_IRQ3_B,
+ GP_5_2_FN, FN_IRQ2_B,
+ GP_5_1_FN, FN_IP11_3,
+ GP_5_0_FN, FN_IP10_25 }
+ },
+
+ { PINMUX_CFG_REG_VAR("IPSR0", 0xFFFC001C, 32,
+ 2, 2, 2, 2, 2, 2, 2, 2,
+ 2, 2, 2, 2, 2, 2, 2, 2) {
+ /* IP0_31_30 [2] */
+ FN_A15, FN_ST0_VCO_CLKIN, FN_LCD_DATA15_A,
+ FN_TIOC3D_C,
+ /* IP0_29_28 [2] */
+ FN_A14, FN_LCD_DATA14_A, FN_TIOC3C_C, 0,
+ /* IP0_27_26 [2] */
+ FN_A13, FN_LCD_DATA13_A, FN_TIOC3B_C, 0,
+ /* IP0_25_24 [2] */
+ FN_A12, FN_LCD_DATA12_A, FN_TIOC3A_C, 0,
+ /* IP0_23_22 [2] */
+ FN_A11, FN_ST0_D7, FN_LCD_DATA11_A, FN_TIOC2B_C,
+ /* IP0_21_20 [2] */
+ FN_A10, FN_ST0_D6, FN_LCD_DATA10_A, FN_TIOC2A_C,
+ /* IP0_19_18 [2] */
+ FN_A9, FN_ST0_D5, FN_LCD_DATA9_A, FN_TIOC1B_C,
+ /* IP0_17_16 [2] */
+ FN_A8, FN_ST0_D4, FN_LCD_DATA8_A, FN_TIOC1A_C,
+ /* IP0_15_14 [2] */
+ FN_A7, FN_ST0_D3, FN_LCD_DATA7_A, FN_TIOC0D_C,
+ /* IP0_13_12 [2] */
+ FN_A6, FN_ST0_D2, FN_LCD_DATA6_A, FN_TIOC0C_C,
+ /* IP0_11_10 [2] */
+ FN_A5, FN_ST0_D1, FN_LCD_DATA5_A, FN_TIOC0B_C,
+ /* IP0_9_8 [2] */
+ FN_A4, FN_ST0_D0, FN_LCD_DATA4_A, FN_TIOC0A_C,
+ /* IP0_7_6 [2] */
+ FN_A3, FN_ST0_VLD, FN_LCD_DATA3_A, FN_TCLKD_C,
+ /* IP0_5_4 [2] */
+ FN_A2, FN_ST0_SYC, FN_LCD_DATA2_A, FN_TCLKC_C,
+ /* IP0_3_2 [2] */
+ FN_A1, FN_ST0_REQ, FN_LCD_DATA1_A, FN_TCLKB_C,
+ /* IP0_1_0 [2] */
+ FN_A0, FN_ST0_CLKIN, FN_LCD_DATA0_A, FN_TCLKA_C }
+ },
+ { PINMUX_CFG_REG_VAR("IPSR1", 0xFFFC0020, 32,
+ 3, 3, 3, 3, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2) {
+ /* IP1_31_29 [3] */
+ FN_D3, FN_SD0_DAT3_A, FN_MMC_D3_A, FN_ST1_D6,
+ FN_FD3_A, 0, 0, 0,
+ /* IP1_28_26 [3] */
+ FN_D2, FN_SD0_DAT2_A, FN_MMC_D2_A, FN_ST1_D5,
+ FN_FD2_A, 0, 0, 0,
+ /* IP1_25_23 [3] */
+ FN_D1, FN_SD0_DAT1_A, FN_MMC_D1_A, FN_ST1_D4,
+ FN_FD1_A, 0, 0, 0,
+ /* IP1_22_20 [3] */
+ FN_D0, FN_SD0_DAT0_A, FN_MMC_D0_A, FN_ST1_D3,
+ FN_FD0_A, 0, 0, 0,
+ /* IP1_19_18 [2] */
+ FN_A25, FN_TX2_D, FN_ST1_D2, 0,
+ /* IP1_17_16 [2] */
+ FN_A24, FN_RX2_D, FN_ST1_D1, 0,
+ /* IP1_15_14 [2] */
+ FN_A23, FN_ST1_D0, FN_LCD_M_DISP_A, 0,
+ /* IP1_13_12 [2] */
+ FN_A22, FN_ST1_VLD, FN_LCD_VEPWC_A, 0,
+ /* IP1_11_10 [2] */
+ FN_A21, FN_ST1_SYC, FN_LCD_VCPWC_A, 0,
+ /* IP1_9_8 [2] */
+ FN_A20, FN_ST1_REQ, FN_LCD_FLM_A, 0,
+ /* IP1_7_6 [2] */
+ FN_A19, FN_ST1_CLKIN, FN_LCD_CLK_A, FN_TIOC4D_C,
+ /* IP1_5_4 [2] */
+ FN_A18, FN_ST1_PWM, FN_LCD_CL2_A, FN_TIOC4C_C,
+ /* IP1_3_2 [2] */
+ FN_A17, FN_ST1_VCO_CLKIN, FN_LCD_CL1_A, FN_TIOC4B_C,
+ /* IP1_1_0 [2] */
+ FN_A16, FN_ST0_PWM, FN_LCD_DON_A, FN_TIOC4A_C }
+ },
+ { PINMUX_CFG_REG_VAR("IPSR2", 0xFFFC0024, 32,
+ 1, 3, 3, 2, 3, 3, 3, 3, 3, 3, 2, 3) {
+ /* IP2_31 [1] */
+ 0, 0,
+ /* IP2_30_28 [3] */
+ FN_D14, FN_TX2_B, 0, FN_FSE_A,
+ FN_ET0_TX_CLK_B, 0, 0, 0,
+ /* IP2_27_25 [3] */
+ FN_D13, FN_RX2_B, 0, FN_FRB_A,
+ FN_ET0_ETXD6_B, 0, 0, 0,
+ /* IP2_24_23 [2] */
+ FN_D12, 0, FN_FWE_A, FN_ET0_ETXD5_B,
+ /* IP2_22_20 [3] */
+ FN_D11, FN_RSPI_MISO_A, 0, FN_QMI_QIO1_A,
+ FN_FRE_A, FN_ET0_ETXD3_B, 0, 0,
+ /* IP2_19_17 [3] */
+ FN_D10, FN_RSPI_MOSI_A, 0, FN_QMO_QIO0_A,
+ FN_FALE_A, FN_ET0_ETXD2_B, 0, 0,
+ /* IP2_16_14 [3] */
+ FN_D9, FN_SD0_CMD_A, FN_MMC_CMD_A, FN_QIO3_A,
+ FN_FCLE_A, FN_ET0_ETXD1_B, 0, 0,
+ /* IP2_13_11 [3] */
+ FN_D8, FN_SD0_CLK_A, FN_MMC_CLK_A, FN_QIO2_A,
+ FN_FCE_A, FN_ET0_GTX_CLK_B, 0, 0,
+ /* IP2_10_8 [3] */
+ FN_D7, FN_RSPI_SSL_A, FN_MMC_D7_A, FN_QSSL_A,
+ FN_FD7_A, 0, 0, 0,
+ /* IP2_7_5 [3] */
+ FN_D6, FN_RSPI_RSPCK_A, FN_MMC_D6_A, FN_QSPCLK_A,
+ FN_FD6_A, 0, 0, 0,
+ /* IP2_4_3 [2] */
+ FN_D5, FN_SD0_WP_A, FN_MMC_D5_A, FN_FD5_A,
+ /* IP2_2_0 [3] */
+ FN_D4, FN_SD0_CD_A, FN_MMC_D4_A, FN_ST1_D7,
+ FN_FD4_A, 0, 0, 0 }
+ },
+ { PINMUX_CFG_REG_VAR("IPSR3", 0xFFFC0028, 32,
+ 2, 3, 3, 3, 1, 2, 3, 3, 3, 3, 3, 1, 2) {
+ /* IP3_31_30 [2] */
+ 0, 0, 0, 0,
+ /* IP3_29_27 [3] */
+ FN_DRACK0, FN_SD1_DAT2_A, FN_ATAG, FN_TCLK1_A,
+ FN_ET0_ETXD7, 0, 0, 0,
+ /* IP3_26_24 [3] */
+ FN_EX_WAIT2, FN_SD1_DAT1_A, FN_DACK2, FN_CAN1_RX_C,
+ FN_ET0_MAGIC_C, FN_ET0_ETXD6_A, 0, 0,
+ /* IP3_23_21 [3] */
+ FN_EX_WAIT1, FN_SD1_DAT0_A, FN_DREQ2, FN_CAN1_TX_C,
+ FN_ET0_LINK_C, FN_ET0_ETXD5_A, 0, 0,
+ /* IP3_20 [1] */
+ FN_EX_WAIT0, FN_TCLK1_B,
+ /* IP3_19_18 [2] */
+ FN_RD_WR, FN_TCLK1_B, 0, 0,
+ /* IP3_17_15 [3] */
+ FN_EX_CS5, FN_SD1_CMD_A, FN_ATADIR, FN_QSSL_B,
+ FN_ET0_ETXD3_A, 0, 0, 0,
+ /* IP3_14_12 [3] */
+ FN_EX_CS4, FN_SD1_WP_A, FN_ATAWR, FN_QMI_QIO1_B,
+ FN_ET0_ETXD2_A, 0, 0, 0,
+ /* IP3_11_9 [3] */
+ FN_EX_CS3, FN_SD1_CD_A, FN_ATARD, FN_QMO_QIO0_B,
+ FN_ET0_ETXD1_A, 0, 0, 0,
+ /* IP3_8_6 [3] */
+ FN_EX_CS2, FN_TX3_B, FN_ATACS1, FN_QSPCLK_B,
+ FN_ET0_GTX_CLK_A, 0, 0, 0,
+ /* IP3_5_3 [3] */
+ FN_EX_CS1, FN_RX3_B, FN_ATACS0, FN_QIO2_B,
+ FN_ET0_ETXD0, 0, 0, 0,
+ /* IP3_2 [1] */
+ FN_CS1_A26, FN_QIO3_B,
+ /* IP3_1_0 [2] */
+ FN_D15, FN_SCK2_B, 0, 0 }
+ },
+ { PINMUX_CFG_REG_VAR("IPSR4", 0xFFFC002C, 32,
+ 2, 2, 2, 2, 2, 2 , 2, 3, 3, 3, 3, 3, 3) {
+ /* IP4_31_30 [2] */
+ 0, FN_SCK2_A, FN_VI0_G3, 0,
+ /* IP4_29_28 [2] */
+ 0, FN_RTS1_B, FN_VI0_G2, 0,
+ /* IP4_27_26 [2] */
+ 0, FN_CTS1_B, FN_VI0_DATA7_VI0_G1, 0,
+ /* IP4_25_24 [2] */
+ 0, FN_TX1_B, FN_VI0_DATA6_VI0_G0, FN_ET0_PHY_INT_A,
+ /* IP4_23_22 [2] */
+ 0, FN_RX1_B, FN_VI0_DATA5_VI0_B5, FN_ET0_MAGIC_A,
+ /* IP4_21_20 [2] */
+ 0, FN_SCK1_B, FN_VI0_DATA4_VI0_B4, FN_ET0_LINK_A,
+ /* IP4_19_18 [2] */
+ 0, FN_RTS0_B, FN_VI0_DATA3_VI0_B3, FN_ET0_MDIO_A,
+ /* IP4_17_15 [3] */
+ 0, FN_CTS0_B, FN_VI0_DATA2_VI0_B2, FN_RMII0_MDIO_A,
+ FN_ET0_MDC, 0, 0, 0,
+ /* IP4_14_12 [3] */
+ FN_HTX0_A, FN_TX1_A, FN_VI0_DATA1_VI0_B1, FN_RMII0_MDC_A,
+ FN_ET0_COL, 0, 0, 0,
+ /* IP4_11_9 [3] */
+ FN_HRX0_A, FN_RX1_A, FN_VI0_DATA0_VI0_B0, FN_RMII0_CRS_DV_A,
+ FN_ET0_CRS, 0, 0, 0,
+ /* IP4_8_6 [3] */
+ FN_HSCK0_A, FN_SCK1_A, FN_VI0_VSYNC, FN_RMII0_RX_ER_A,
+ FN_ET0_RX_ER, 0, 0, 0,
+ /* IP4_5_3 [3] */
+ FN_HRTS0_A, FN_RTS1_A, FN_VI0_HSYNC, FN_RMII0_TXD_EN_A,
+ FN_ET0_RX_DV, 0, 0, 0,
+ /* IP4_2_0 [3] */
+ FN_HCTS0_A, FN_CTS1_A, FN_VI0_FIELD, FN_RMII0_RXD1_A,
+ FN_ET0_ERXD7, 0, 0, 0 }
+ },
+ { PINMUX_CFG_REG_VAR("IPSR5", 0xFFFC0030, 32,
+ 1, 1, 1, 1, 1, 2, 2, 2, 3, 3, 3, 3, 3, 3, 3) {
+ /* IP5_31 [1] */
+ 0, 0,
+ /* IP5_30 [1] */
+ 0, 0,
+ /* IP5_29 [1] */
+ 0, 0,
+ /* IP5_28 [1] */
+ 0, 0,
+ /* IP5_27 [1] */
+ 0, 0,
+ /* IP5_26_25 [2] */
+ FN_REF50CK, FN_CTS1_E, FN_HCTS0_D, 0,
+ /* IP5_24_23 [2] */
+ FN_REF125CK, FN_ADTRG, FN_RX5_C, 0,
+ /* IP5_22_21 [2] */
+ FN_SD2_WP_A, FN_TX5_A, FN_VI0_R5, 0,
+ /* IP5_20_18 [3] */
+ FN_SD2_CD_A, FN_RX5_A, FN_VI0_R4, 0,
+ 0, 0, 0, FN_ET0_PHY_INT_B,
+ /* IP5_17_15 [3] */
+ FN_SD2_DAT3_A, FN_TX4_A, FN_VI0_R3, 0,
+ 0, 0, 0, FN_ET0_MAGIC_B,
+ /* IP5_14_12 [3] */
+ FN_SD2_DAT2_A, FN_RX4_A, FN_VI0_R2, 0,
+ 0, 0, 0, FN_ET0_LINK_B,
+ /* IP5_11_9 [3] */
+ FN_SD2_DAT1_A, FN_TX3_A, FN_VI0_R1, 0,
+ 0, 0, 0, FN_ET0_MDIO_B,
+ /* IP5_8_6 [3] */
+ FN_SD2_DAT0_A, FN_RX3_A, FN_VI0_R0, 0,
+ 0, 0, 0, FN_ET0_ERXD3_B,
+ /* IP5_5_3 [3] */
+ FN_SD2_CMD_A, FN_TX2_A, FN_VI0_G5, 0,
+ 0, 0, 0, FN_ET0_ERXD2_B,
+ /* IP5_2_0 [3] */
+ FN_SD2_CLK_A, FN_RX2_A, FN_VI0_G4, 0,
+ FN_ET0_RX_CLK_B, 0, 0, 0 }
+ },
+ { PINMUX_CFG_REG_VAR("IPSR6", 0xFFFC0034, 32,
+ 1, 1, 1, 1, 1, 1, 1, 1,
+ 3, 3, 2, 2, 2, 2, 2, 2, 3, 3) {
+ /* IP5_31 [1] */
+ 0, 0,
+ /* IP6_30 [1] */
+ 0, 0,
+ /* IP6_29 [1] */
+ 0, 0,
+ /* IP6_28 [1] */
+ 0, 0,
+ /* IP6_27 [1] */
+ 0, 0,
+ /* IP6_26 [1] */
+ 0, 0,
+ /* IP6_25 [1] */
+ 0, 0,
+ /* IP6_24 [1] */
+ 0, 0,
+ /* IP6_23_21 [3] */
+ FN_DU0_DG1, FN_CTS1_C, FN_HRTS0_D, FN_TIOC1B_A,
+ FN_HIFD09, 0, 0, 0,
+ /* IP6_20_18 [3] */
+ FN_DU0_DG0, FN_TX1_C, FN_HSCK0_D, FN_IECLK_A,
+ FN_TIOC1A_A, FN_HIFD08, 0, 0,
+ /* IP6_17_16 [2] */
+ FN_DU0_DR7, FN_RX1_C, FN_TIOC0D_A, FN_HIFD07,
+ /* IP6_15_14 [2] */
+ FN_DU0_DR6, FN_SCK1_C, FN_TIOC0C_A, FN_HIFD06,
+ /* IP6_13_12 [2] */
+ FN_DU0_DR5, FN_RTS0_C, FN_TIOC0B_A, FN_HIFD05,
+ /* IP6_11_10 [2] */
+ FN_DU0_DR4, FN_CTS0_C, FN_TIOC0A_A, FN_HIFD04,
+ /* IP6_9_8 [2] */
+ FN_DU0_DR3, FN_TX0_B, FN_TCLKD_A, FN_HIFD03,
+ /* IP6_7_6 [2] */
+ FN_DU0_DR2, FN_RX0_B, FN_TCLKC_A, FN_HIFD02,
+ /* IP6_5_3 [3] */
+ FN_DU0_DR1, FN_SCK0_B, FN_HTX0_D, FN_IERX_A,
+ FN_TCLKB_A, FN_HIFD01, 0, 0,
+ /* IP6_2_0 [3] */
+ FN_DU0_DR0, FN_SCIF_CLK_B, FN_HRX0_D, FN_IETX_A,
+ FN_TCLKA_A, FN_HIFD00, 0, 0 }
+ },
+ { PINMUX_CFG_REG_VAR("IPSR7", 0xFFFC0038, 32,
+ 1, 2, 2, 3, 3, 3, 3, 3, 3, 3, 3, 3) {
+ /* IP7_31 [1] */
+ 0, 0,
+ /* IP7_30_29 [2] */
+ FN_DU0_DB4, 0, FN_HIFINT, 0,
+ /* IP7_28_27 [2] */
+ FN_DU0_DB3, FN_TX5_B, FN_TIOC4D_A, FN_HIFRD,
+ /* IP7_26_24 [3] */
+ FN_DU0_DB2, FN_RX5_B, FN_RMII0_TXD1_B, FN_TIOC4C_A,
+ FN_HIFWR, 0, 0, 0,
+ /* IP7_23_21 [3] */
+ FN_DU0_DB1, FN_TX4_C, FN_RMII0_TXD0_B, FN_TIOC4B_A,
+ FN_HIFRS, 0, 0, 0,
+ /* IP7_20_18 [3] */
+ FN_DU0_DB0, FN_RX4_C, FN_RMII0_TXD_EN_B, FN_TIOC4A_A,
+ FN_HIFCS, 0, 0, 0,
+ /* IP7_17_15 [3] */
+ FN_DU0_DG7, FN_TX3_C, FN_RMII0_RXD1_B, FN_TIOC3D_A,
+ FN_HIFD15, 0, 0, 0,
+ /* IP7_14_12 [3] */
+ FN_DU0_DG6, FN_RX3_C, FN_RMII0_RXD0_B, FN_TIOC3C_A,
+ FN_HIFD14, 0, 0, 0,
+ /* IP7_11_9 [3] */
+ FN_DU0_DG5, FN_TX2_C, FN_RMII0_RX_ER_B, FN_TIOC3B_A,
+ FN_HIFD13, 0, 0, 0,
+ /* IP7_8_6 [3] */
+ FN_DU0_DG4, FN_RX2_C, FN_RMII0_CRS_DV_B, FN_TIOC3A_A,
+ FN_HIFD12, 0, 0, 0,
+ /* IP7_5_3 [3] */
+ FN_DU0_DG3, FN_SCK2_C, FN_RMII0_MDIO_B, FN_TIOC2B_A,
+ FN_HIFD11, 0, 0, 0,
+ /* IP7_2_0 [3] */
+ FN_DU0_DG2, FN_RTS1_C, FN_RMII0_MDC_B, FN_TIOC2A_A,
+ FN_HIFD10, 0, 0, 0 }
+ },
+ { PINMUX_CFG_REG_VAR("IPSR8", 0xFFFC003C, 32,
+ 2, 2, 2, 3, 3, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2) {
+ /* IP9_31_30 [2] */
+ 0, 0, 0, 0,
+ /* IP8_29_28 [2] */
+ FN_IRQ3_A, FN_RTS0_A, FN_HRTS0_B, FN_ET0_ERXD3_A,
+ /* IP8_27_26 [2] */
+ FN_IRQ2_A, FN_CTS0_A, FN_HCTS0_B, FN_ET0_ERXD2_A,
+ /* IP8_25_23 [3] */
+ FN_IRQ1_A, 0, FN_HSPI_RX_B, FN_TX3_E,
+ FN_ET0_ERXD1, 0, 0, 0,
+ /* IP8_22_20 [3] */
+ FN_IRQ0_A, 0, FN_HSPI_TX_B, FN_RX3_E,
+ FN_ET0_ERXD0, 0, 0, 0,
+ /* IP8_19_18 [2] */
+ FN_DU0_CDE, FN_HTX0_B, FN_AUDIO_CLKB_B, FN_LCD_VCPWC_B,
+ /* IP8_17_16 [2] */
+ FN_DU0_DISP, FN_CAN0_TX_B, FN_HRX0_B, FN_AUDIO_CLKA_B,
+ /* IP8_15_14 [2] */
+ FN_DU0_EXODDF_DU0_ODDF, FN_CAN0_RX_B, FN_HSCK0_B,
+ FN_SSI_SDATA1_B,
+ /* IP8_13_12 [2] */
+ FN_DU0_EXVSYNC_DU0_VSYNC, 0, FN_HSPI_RX0_C, FN_SSI_WS1_B,
+ /* IP8_11_10 [2] */
+ FN_DU0_EXHSYNC_DU0_HSYNC, 0, FN_HSPI_TX0_C, FN_SSI_SCK1_B,
+ /* IP8_9_8 [2] */
+ FN_DU0_DOTCLKOUT, 0, FN_HSPI_CLK0_C, FN_SSI_SDATA0_B,
+ /* IP8_7_6 [2] */
+ FN_DU0_DOTCLKIN, 0, FN_HSPI_CS0_C, FN_SSI_WS0_B,
+ /* IP8_5_4 [2] */
+ FN_DU0_DB7, 0, FN_SSI_SCK0_B, FN_HIFEBL_B,
+ /* IP8_3_2 [2] */
+ FN_DU0_DB6, 0, FN_HIFRDY, 0,
+ /* IP8_1_0 [2] */
+ FN_DU0_DB5, 0, FN_HIFDREQ, 0 }
+ },
+ { PINMUX_CFG_REG_VAR("IPSR9", 0xFFFC0040, 32,
+ 2, 2, 2, 2, 2, 2, 2, 2,
+ 2, 2, 2, 2, 2, 2, 2, 2) {
+ /* IP9_31_30 [2] */
+ 0, 0, 0, 0,
+ /* IP9_29_28 [2] */
+ FN_SSI_SDATA1_A, FN_VI1_3_B, FN_LCD_DATA14_B, 0,
+ /* IP9_27_26 [2] */
+ FN_SSI_WS1_A, FN_VI1_2_B, FN_LCD_DATA13_B, 0,
+ /* IP9_25_24 [2] */
+ FN_SSI_SCK1_A, FN_VI1_1_B, FN_TIOC2B_B, FN_LCD_DATA12_B,
+ /* IP9_23_22 [2] */
+ FN_SSI_SDATA0_A, FN_VI1_0_B, FN_TIOC2A_B, FN_LCD_DATA11_B,
+ /* IP9_21_20 [2] */
+ FN_SSI_WS0_A, FN_TIOC1B_B, FN_LCD_DATA10_B, 0,
+ /* IP9_19_18 [2] */
+ FN_SSI_SCK0_A, FN_TIOC1A_B, FN_LCD_DATA9_B, 0,
+ /* IP9_17_16 [2] */
+ FN_VI1_7_A, FN_FCE_B, FN_LCD_DATA8_B, 0,
+ /* IP9_15_14 [2] */
+ FN_VI1_6_A, 0, FN_FD7_B, FN_LCD_DATA7_B,
+ /* IP9_13_12 [2] */
+ FN_VI1_5_A, 0, FN_FD6_B, FN_LCD_DATA6_B,
+ /* IP9_11_10 [2] */
+ FN_VI1_4_A, 0, FN_FD5_B, FN_LCD_DATA5_B,
+ /* IP9_9_8 [2] */
+ FN_VI1_3_A, 0, FN_FD4_B, FN_LCD_DATA4_B,
+ /* IP9_7_6 [2] */
+ FN_VI1_2_A, 0, FN_FD3_B, FN_LCD_DATA3_B,
+ /* IP9_5_4 [2] */
+ FN_VI1_1_A, 0, FN_FD2_B, FN_LCD_DATA2_B,
+ /* IP9_3_2 [2] */
+ FN_VI1_0_A, 0, FN_FD1_B, FN_LCD_DATA1_B,
+ /* IP9_1_0 [2] */
+ FN_VI1_CLK_A, 0, FN_FD0_B, FN_LCD_DATA0_B }
+ },
+ { PINMUX_CFG_REG_VAR("IPSR10", 0xFFFC0044, 32,
+ 2, 2, 2, 1, 2, 1, 3,
+ 3, 1, 3, 3, 3, 3, 3) {
+ /* IP9_31_30 [2] */
+ 0, 0, 0, 0,
+ /* IP10_29_28 [2] */
+ FN_CAN1_TX_A, FN_TX5_C, FN_MLB_DAT, 0,
+ /* IP10_27_26 [2] */
+ FN_CAN0_RX_A, FN_IRQ0_B, FN_MLB_SIG, 0,
+ /* IP10_25 [1] */
+ FN_CAN1_RX_A, FN_IRQ1_B,
+ /* IP10_24_23 [2] */
+ FN_CAN0_TX_A, FN_TX4_D, FN_MLB_CLK, 0,
+ /* IP10_22 [1] */
+ FN_CAN_CLK_A, FN_RX4_D,
+ /* IP10_21_19 [3] */
+ FN_AUDIO_CLKOUT, FN_TX1_E, FN_HRTS0_C, FN_FSE_B,
+ FN_LCD_M_DISP_B, 0, 0, 0,
+ /* IP10_18_16 [3] */
+ FN_AUDIO_CLKC, FN_SCK1_E, FN_HCTS0_C, FN_FRB_B,
+ FN_LCD_VEPWC_B, 0, 0, 0,
+ /* IP10_15 [1] */
+ FN_AUDIO_CLKB_A, FN_LCD_CLK_B,
+ /* IP10_14_12 [3] */
+ FN_AUDIO_CLKA_A, FN_VI1_CLK_B, FN_SCK1_D, FN_IECLK_B,
+ FN_LCD_FLM_B, 0, 0, 0,
+ /* IP10_11_9 [3] */
+ FN_SSI_SDATA3, FN_VI1_7_B, FN_HTX0_C, FN_FWE_B,
+ FN_LCD_CL2_B, 0, 0, 0,
+ /* IP10_8_6 [3] */
+ FN_SSI_SDATA2, FN_VI1_6_B, FN_HRX0_C, FN_FRE_B,
+ FN_LCD_CL1_B, 0, 0, 0,
+ /* IP10_5_3 [3] */
+ FN_SSI_WS23, FN_VI1_5_B, FN_TX1_D, FN_HSCK0_C, FN_FALE_B,
+ FN_LCD_DON_B, 0, 0, 0,
+ /* IP10_2_0 [3] */
+ FN_SSI_SCK23, FN_VI1_4_B, FN_RX1_D, FN_FCLE_B,
+ FN_LCD_DATA15_B, 0, 0, 0 }
+ },
+ { PINMUX_CFG_REG_VAR("IPSR11", 0xFFFC0048, 32,
+ 3, 1, 2, 2, 2, 3, 3, 1, 2, 3, 3, 1, 1, 1, 1) {
+ /* IP11_31_29 [3] */
+ 0, 0, 0, 0, 0, 0, 0, 0,
+ /* IP11_28 [1] */
+ FN_PRESETOUT, FN_ST_CLKOUT,
+ /* IP11_27_26 [2] */
+ FN_DACK1, FN_HSPI_CS_B, FN_TX4_B, FN_ET0_RX_CLK_A,
+ /* IP11_25_23 [3] */
+ FN_DREQ1, FN_HSPI_CLK_B, FN_RX4_B, FN_ET0_PHY_INT_C,
+ FN_ET0_TX_CLK_A, 0, 0, 0,
+ /* IP11_22_21 [2] */
+ FN_DACK0, FN_SD1_DAT3_A, FN_ET0_TX_ER, 0,
+ /* IP11_20_19 [2] */
+ FN_DREQ0, FN_SD1_CLK_A, FN_ET0_TX_EN, 0,
+ /* IP11_18_16 [3] */
+ FN_USB_OVC1, FN_RX3_D, FN_CAN1_RX_B, FN_RX5_D,
+ FN_IERX_B, 0, 0, 0,
+ /* IP11_15_13 [3] */
+ FN_PENC1, FN_TX3_D, FN_CAN1_TX_B, FN_TX5_D,
+ FN_IETX_B, 0, 0, 0,
+ /* IP11_12 [1] */
+ FN_TX0_A, FN_HSPI_TX_A,
+ /* IP11_11_10 [2] */
+ FN_RX0_A, FN_HSPI_RX_A, FN_RMII0_RXD0_A, FN_ET0_ERXD6,
+ /* IP11_9_7 [3] */
+ FN_SCK0_A, FN_HSPI_CS_A, FN_VI0_CLKENB, FN_RMII0_TXD1_A,
+ FN_ET0_ERXD5, 0, 0, 0,
+ /* IP11_6_4 [3] */
+ FN_SCIF_CLK_A, FN_HSPI_CLK_A, FN_VI0_CLK, FN_RMII0_TXD0_A,
+ FN_ET0_ERXD4, 0, 0, 0,
+ /* IP11_3 [1] */
+ FN_SDSELF, FN_RTS1_E,
+ /* IP11_2 [1] */
+ FN_SDA0, FN_HIFEBL_A,
+ /* IP11_1 [1] */
+ FN_SDA1, FN_RX1_E,
+ /* IP11_0 [1] */
+ FN_SCL1, FN_SCIF_CLK_C }
+ },
+ { PINMUX_CFG_REG_VAR("MOD_SEL1", 0xFFFC004C, 32,
+ 3, 1, 1, 1, 1, 1, 1, 2, 1, 1, 1, 2, 2,
+ 1, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1) {
+ /* SEL1_31_29 [3] */
+ 0, 0, 0, 0, 0, 0, 0, 0,
+ /* SEL1_28 [1] */
+ FN_SEL_IEBUS_0, FN_SEL_IEBUS_1,
+ /* SEL1_27 [1] */
+ FN_SEL_RQSPI_0, FN_SEL_RQSPI_1,
+ /* SEL1_26 [1] */
+ FN_SEL_VIN1_0, FN_SEL_VIN1_1,
+ /* SEL1_25 [1] */
+ FN_SEL_HIF_0, FN_SEL_HIF_1,
+ /* SEL1_24 [1] */
+ FN_SEL_RSPI_0, FN_SEL_RSPI_1,
+ /* SEL1_23 [1] */
+ FN_SEL_LCDC_0, FN_SEL_LCDC_1,
+ /* SEL1_22_21 [2] */
+ FN_SEL_ET0_CTL_0, FN_SEL_ET0_CTL_1, FN_SEL_ET0_CTL_2, 0,
+ /* SEL1_20 [1] */
+ FN_SEL_ET0_0, FN_SEL_ET0_1,
+ /* SEL1_19 [1] */
+ FN_SEL_RMII_0, FN_SEL_RMII_1,
+ /* SEL1_18 [1] */
+ FN_SEL_TMU_0, FN_SEL_TMU_1,
+ /* SEL1_17_16 [2] */
+ FN_SEL_HSPI_0, FN_SEL_HSPI_1, FN_SEL_HSPI_2, 0,
+ /* SEL1_15_14 [2] */
+ FN_SEL_HSCIF_0, FN_SEL_HSCIF_1, FN_SEL_HSCIF_2, FN_SEL_HSCIF_3,
+ /* SEL1_13 [1] */
+ FN_SEL_RCAN_CLK_0, FN_SEL_RCAN_CLK_1,
+ /* SEL1_12_11 [2] */
+ FN_SEL_RCAN1_0, FN_SEL_RCAN1_1, FN_SEL_RCAN1_2, 0,
+ /* SEL1_10 [1] */
+ FN_SEL_RCAN0_0, FN_SEL_RCAN0_1,
+ /* SEL1_9 [1] */
+ FN_SEL_SDHI2_0, FN_SEL_SDHI2_1,
+ /* SEL1_8 [1] */
+ FN_SEL_SDHI1_0, FN_SEL_SDHI1_1,
+ /* SEL1_7 [1] */
+ FN_SEL_SDHI0_0, FN_SEL_SDHI0_1,
+ /* SEL1_6 [1] */
+ FN_SEL_SSI1_0, FN_SEL_SSI1_1,
+ /* SEL1_5 [1] */
+ FN_SEL_SSI0_0, FN_SEL_SSI0_1,
+ /* SEL1_4 [1] */
+ FN_SEL_AUDIO_CLKB_0, FN_SEL_AUDIO_CLKB_1,
+ /* SEL1_3 [1] */
+ FN_SEL_AUDIO_CLKA_0, FN_SEL_AUDIO_CLKA_1,
+ /* SEL1_2 [1] */
+ FN_SEL_FLCTL_0, FN_SEL_FLCTL_1,
+ /* SEL1_1 [1] */
+ FN_SEL_MMC_0, FN_SEL_MMC_1,
+ /* SEL1_0 [1] */
+ FN_SEL_INTC_0, FN_SEL_INTC_1 }
+ },
+ { PINMUX_CFG_REG_VAR("MOD_SEL2", 0xFFFC0050, 32,
+ 1, 1, 1, 1, 1, 1, 1, 1,
+ 1, 1, 1, 2, 2, 1, 2, 2, 3, 2, 3, 2, 2) {
+ /* SEL2_31 [1] */
+ 0, 0,
+ /* SEL2_30 [1] */
+ 0, 0,
+ /* SEL2_29 [1] */
+ 0, 0,
+ /* SEL2_28 [1] */
+ 0, 0,
+ /* SEL2_27 [1] */
+ 0, 0,
+ /* SEL2_26 [1] */
+ 0, 0,
+ /* SEL2_25 [1] */
+ 0, 0,
+ /* SEL2_24 [1] */
+ 0, 0,
+ /* SEL2_23 [1] */
+ FN_SEL_MTU2_CLK_0, FN_SEL_MTU2_CLK_1,
+ /* SEL2_22 [1] */
+ FN_SEL_MTU2_CH4_0, FN_SEL_MTU2_CH4_1,
+ /* SEL2_21 [1] */
+ FN_SEL_MTU2_CH3_0, FN_SEL_MTU2_CH3_1,
+ /* SEL2_20_19 [2] */
+ FN_SEL_MTU2_CH2_0, FN_SEL_MTU2_CH2_1, FN_SEL_MTU2_CH2_2, 0,
+ /* SEL2_18_17 [2] */
+ FN_SEL_MTU2_CH1_0, FN_SEL_MTU2_CH1_1, FN_SEL_MTU2_CH1_2, 0,
+ /* SEL2_16 [1] */
+ FN_SEL_MTU2_CH0_0, FN_SEL_MTU2_CH0_1,
+ /* SEL2_15_14 [2] */
+ FN_SEL_SCIF5_0, FN_SEL_SCIF5_1, FN_SEL_SCIF5_2, FN_SEL_SCIF5_3,
+ /* SEL2_13_12 [2] */
+ FN_SEL_SCIF4_0, FN_SEL_SCIF4_1, FN_SEL_SCIF4_2, FN_SEL_SCIF4_3,
+ /* SEL2_11_9 [3] */
+ FN_SEL_SCIF3_0, FN_SEL_SCIF3_1, FN_SEL_SCIF3_2, FN_SEL_SCIF3_3,
+ FN_SEL_SCIF3_4, 0, 0, 0,
+ /* SEL2_8_7 [2] */
+ FN_SEL_SCIF2_0, FN_SEL_SCIF2_1, FN_SEL_SCIF2_2, FN_SEL_SCIF2_3,
+ /* SEL2_6_4 [3] */
+ FN_SEL_SCIF1_0, FN_SEL_SCIF1_1, FN_SEL_SCIF1_2, FN_SEL_SCIF1_3,
+ FN_SEL_SCIF1_4, 0, 0, 0,
+ /* SEL2_3_2 [2] */
+ FN_SEL_SCIF0_0, FN_SEL_SCIF0_1, FN_SEL_SCIF0_2, 0,
+ /* SEL2_1_0 [2] */
+ FN_SEL_SCIF_CLK_0, FN_SEL_SCIF_CLK_1, FN_SEL_SCIF_CLK_2, 0 }
+ },
+ /* GPIO 0 - 5*/
+ { PINMUX_CFG_REG("INOUTSEL0", 0xFFC40004, 32, 1) { GP_INOUTSEL(0) } },
+ { PINMUX_CFG_REG("INOUTSEL1", 0xFFC41004, 32, 1) { GP_INOUTSEL(1) } },
+ { PINMUX_CFG_REG("INOUTSEL2", 0xFFC42004, 32, 1) { GP_INOUTSEL(2) } },
+ { PINMUX_CFG_REG("INOUTSEL3", 0xFFC43004, 32, 1) { GP_INOUTSEL(3) } },
+ { PINMUX_CFG_REG("INOUTSEL4", 0xFFC44004, 32, 1) { GP_INOUTSEL(4) } },
+ { PINMUX_CFG_REG("INOUTSEL5", 0xffc45004, 32, 1) {
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, /* 31 - 24 */
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, /* 23 - 16 */
+ 0, 0, 0, 0, 0, 0, 0, 0, /* 15 - 12 */
+ GP_5_11_IN, GP_5_11_OUT,
+ GP_5_10_IN, GP_5_10_OUT,
+ GP_5_9_IN, GP_5_9_OUT,
+ GP_5_8_IN, GP_5_8_OUT,
+ GP_5_7_IN, GP_5_7_OUT,
+ GP_5_6_IN, GP_5_6_OUT,
+ GP_5_5_IN, GP_5_5_OUT,
+ GP_5_4_IN, GP_5_4_OUT,
+ GP_5_3_IN, GP_5_3_OUT,
+ GP_5_2_IN, GP_5_2_OUT,
+ GP_5_1_IN, GP_5_1_OUT,
+ GP_5_0_IN, GP_5_0_OUT }
+ },
+ { },
+};
+
+static struct pinmux_data_reg pinmux_data_regs[] = {
+ /* GPIO 0 - 5*/
+ { PINMUX_DATA_REG("INDT0", 0xFFC4000C, 32) { GP_INDT(0) } },
+ { PINMUX_DATA_REG("INDT1", 0xFFC4100C, 32) { GP_INDT(1) } },
+ { PINMUX_DATA_REG("INDT2", 0xFFC4200C, 32) { GP_INDT(2) } },
+ { PINMUX_DATA_REG("INDT3", 0xFFC4300C, 32) { GP_INDT(3) } },
+ { PINMUX_DATA_REG("INDT4", 0xFFC4400C, 32) { GP_INDT(4) } },
+ { PINMUX_DATA_REG("INDT5", 0xFFC4500C, 32) {
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0,
+ GP_5_11_DATA, GP_5_10_DATA, GP_5_9_DATA, GP_5_8_DATA,
+ GP_5_7_DATA, GP_5_6_DATA, GP_5_5_DATA, GP_5_4_DATA,
+ GP_5_3_DATA, GP_5_2_DATA, GP_5_1_DATA, GP_5_0_DATA }
+ },
+ { },
+};
+
+struct sh_pfc_soc_info sh7734_pinmux_info = {
+ .name = "sh7734_pfc",
+
+ .unlock_reg = 0xFFFC0000,
+
+ .reserved_id = PINMUX_RESERVED,
+ .data = { PINMUX_DATA_BEGIN, PINMUX_DATA_END },
+ .input = { PINMUX_INPUT_BEGIN, PINMUX_INPUT_END },
+ .output = { PINMUX_OUTPUT_BEGIN, PINMUX_OUTPUT_END },
+ .mark = { PINMUX_MARK_BEGIN, PINMUX_MARK_END },
+ .function = { PINMUX_FUNCTION_BEGIN, PINMUX_FUNCTION_END },
+
+ .first_gpio = GPIO_GP_0_0,
+ .last_gpio = GPIO_FN_ST_CLKOUT,
+
+ .gpios = pinmux_gpios,
+ .cfg_regs = pinmux_config_regs,
+ .data_regs = pinmux_data_regs,
+
+ .gpio_data = pinmux_data,
+ .gpio_data_size = ARRAY_SIZE(pinmux_data),
+};
diff --git a/drivers/pinctrl/sh-pfc/pfc-sh7757.c b/drivers/pinctrl/sh-pfc/pfc-sh7757.c
new file mode 100644
index 000000000000..5ed74cd0ba99
--- /dev/null
+++ b/drivers/pinctrl/sh-pfc/pfc-sh7757.c
@@ -0,0 +1,2282 @@
+/*
+ * SH7757 (B0 step) Pinmux
+ *
+ * Copyright (C) 2009-2010 Renesas Solutions Corp.
+ *
+ * Author : Yoshihiro Shimoda <shimoda.yoshihiro@renesas.com>
+ *
+ * Based on SH7723 Pinmux
+ * Copyright (C) 2008 Magnus Damm
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ */
+
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <cpu/sh7757.h>
+
+#include "sh_pfc.h"
+
+enum {
+ PINMUX_RESERVED = 0,
+
+ PINMUX_DATA_BEGIN,
+ PTA7_DATA, PTA6_DATA, PTA5_DATA, PTA4_DATA,
+ PTA3_DATA, PTA2_DATA, PTA1_DATA, PTA0_DATA,
+ PTB7_DATA, PTB6_DATA, PTB5_DATA, PTB4_DATA,
+ PTB3_DATA, PTB2_DATA, PTB1_DATA, PTB0_DATA,
+ PTC7_DATA, PTC6_DATA, PTC5_DATA, PTC4_DATA,
+ PTC3_DATA, PTC2_DATA, PTC1_DATA, PTC0_DATA,
+ PTD7_DATA, PTD6_DATA, PTD5_DATA, PTD4_DATA,
+ PTD3_DATA, PTD2_DATA, PTD1_DATA, PTD0_DATA,
+ PTE7_DATA, PTE6_DATA, PTE5_DATA, PTE4_DATA,
+ PTE3_DATA, PTE2_DATA, PTE1_DATA, PTE0_DATA,
+ PTF7_DATA, PTF6_DATA, PTF5_DATA, PTF4_DATA,
+ PTF3_DATA, PTF2_DATA, PTF1_DATA, PTF0_DATA,
+ PTG7_DATA, PTG6_DATA, PTG5_DATA, PTG4_DATA,
+ PTG3_DATA, PTG2_DATA, PTG1_DATA, PTG0_DATA,
+ PTH7_DATA, PTH6_DATA, PTH5_DATA, PTH4_DATA,
+ PTH3_DATA, PTH2_DATA, PTH1_DATA, PTH0_DATA,
+ PTI7_DATA, PTI6_DATA, PTI5_DATA, PTI4_DATA,
+ PTI3_DATA, PTI2_DATA, PTI1_DATA, PTI0_DATA,
+ PTJ6_DATA, PTJ5_DATA, PTJ4_DATA,
+ PTJ3_DATA, PTJ2_DATA, PTJ1_DATA, PTJ0_DATA,
+ PTK7_DATA, PTK6_DATA, PTK5_DATA, PTK4_DATA,
+ PTK3_DATA, PTK2_DATA, PTK1_DATA, PTK0_DATA,
+ PTL6_DATA, PTL5_DATA, PTL4_DATA,
+ PTL3_DATA, PTL2_DATA, PTL1_DATA, PTL0_DATA,
+ PTM7_DATA, PTM6_DATA, PTM5_DATA, PTM4_DATA,
+ PTM3_DATA, PTM2_DATA, PTM1_DATA, PTM0_DATA,
+ PTN6_DATA, PTN5_DATA, PTN4_DATA,
+ PTN3_DATA, PTN2_DATA, PTN1_DATA, PTN0_DATA,
+ PTO7_DATA, PTO6_DATA, PTO5_DATA, PTO4_DATA,
+ PTO3_DATA, PTO2_DATA, PTO1_DATA, PTO0_DATA,
+ PTP7_DATA, PTP6_DATA, PTP5_DATA, PTP4_DATA,
+ PTP3_DATA, PTP2_DATA, PTP1_DATA, PTP0_DATA,
+ PTQ6_DATA, PTQ5_DATA, PTQ4_DATA,
+ PTQ3_DATA, PTQ2_DATA, PTQ1_DATA, PTQ0_DATA,
+ PTR7_DATA, PTR6_DATA, PTR5_DATA, PTR4_DATA,
+ PTR3_DATA, PTR2_DATA, PTR1_DATA, PTR0_DATA,
+ PTS7_DATA, PTS6_DATA, PTS5_DATA, PTS4_DATA,
+ PTS3_DATA, PTS2_DATA, PTS1_DATA, PTS0_DATA,
+ PTT7_DATA, PTT6_DATA, PTT5_DATA, PTT4_DATA,
+ PTT3_DATA, PTT2_DATA, PTT1_DATA, PTT0_DATA,
+ PTU7_DATA, PTU6_DATA, PTU5_DATA, PTU4_DATA,
+ PTU3_DATA, PTU2_DATA, PTU1_DATA, PTU0_DATA,
+ PTV7_DATA, PTV6_DATA, PTV5_DATA, PTV4_DATA,
+ PTV3_DATA, PTV2_DATA, PTV1_DATA, PTV0_DATA,
+ PTW7_DATA, PTW6_DATA, PTW5_DATA, PTW4_DATA,
+ PTW3_DATA, PTW2_DATA, PTW1_DATA, PTW0_DATA,
+ PTX7_DATA, PTX6_DATA, PTX5_DATA, PTX4_DATA,
+ PTX3_DATA, PTX2_DATA, PTX1_DATA, PTX0_DATA,
+ PTY7_DATA, PTY6_DATA, PTY5_DATA, PTY4_DATA,
+ PTY3_DATA, PTY2_DATA, PTY1_DATA, PTY0_DATA,
+ PTZ7_DATA, PTZ6_DATA, PTZ5_DATA, PTZ4_DATA,
+ PTZ3_DATA, PTZ2_DATA, PTZ1_DATA, PTZ0_DATA,
+ PINMUX_DATA_END,
+
+ PINMUX_INPUT_BEGIN,
+ PTA7_IN, PTA6_IN, PTA5_IN, PTA4_IN,
+ PTA3_IN, PTA2_IN, PTA1_IN, PTA0_IN,
+ PTB7_IN, PTB6_IN, PTB5_IN, PTB4_IN,
+ PTB3_IN, PTB2_IN, PTB1_IN, PTB0_IN,
+ PTC7_IN, PTC6_IN, PTC5_IN, PTC4_IN,
+ PTC3_IN, PTC2_IN, PTC1_IN, PTC0_IN,
+ PTD7_IN, PTD6_IN, PTD5_IN, PTD4_IN,
+ PTD3_IN, PTD2_IN, PTD1_IN, PTD0_IN,
+ PTE7_IN, PTE6_IN, PTE5_IN, PTE4_IN,
+ PTE3_IN, PTE2_IN, PTE1_IN, PTE0_IN,
+ PTF7_IN, PTF6_IN, PTF5_IN, PTF4_IN,
+ PTF3_IN, PTF2_IN, PTF1_IN, PTF0_IN,
+ PTG7_IN, PTG6_IN, PTG5_IN, PTG4_IN,
+ PTG3_IN, PTG2_IN, PTG1_IN, PTG0_IN,
+ PTH7_IN, PTH6_IN, PTH5_IN, PTH4_IN,
+ PTH3_IN, PTH2_IN, PTH1_IN, PTH0_IN,
+ PTI7_IN, PTI6_IN, PTI5_IN, PTI4_IN,
+ PTI3_IN, PTI2_IN, PTI1_IN, PTI0_IN,
+ PTJ6_IN, PTJ5_IN, PTJ4_IN,
+ PTJ3_IN, PTJ2_IN, PTJ1_IN, PTJ0_IN,
+ PTK7_IN, PTK6_IN, PTK5_IN, PTK4_IN,
+ PTK3_IN, PTK2_IN, PTK1_IN, PTK0_IN,
+ PTL6_IN, PTL5_IN, PTL4_IN,
+ PTL3_IN, PTL2_IN, PTL1_IN, PTL0_IN,
+ PTM7_IN, PTM6_IN, PTM5_IN, PTM4_IN,
+ PTM3_IN, PTM2_IN, PTM1_IN, PTM0_IN,
+ PTN6_IN, PTN5_IN, PTN4_IN,
+ PTN3_IN, PTN2_IN, PTN1_IN, PTN0_IN,
+ PTO7_IN, PTO6_IN, PTO5_IN, PTO4_IN,
+ PTO3_IN, PTO2_IN, PTO1_IN, PTO0_IN,
+ PTP7_IN, PTP6_IN, PTP5_IN, PTP4_IN,
+ PTP3_IN, PTP2_IN, PTP1_IN, PTP0_IN,
+ PTQ6_IN, PTQ5_IN, PTQ4_IN,
+ PTQ3_IN, PTQ2_IN, PTQ1_IN, PTQ0_IN,
+ PTR7_IN, PTR6_IN, PTR5_IN, PTR4_IN,
+ PTR3_IN, PTR2_IN, PTR1_IN, PTR0_IN,
+ PTS7_IN, PTS6_IN, PTS5_IN, PTS4_IN,
+ PTS3_IN, PTS2_IN, PTS1_IN, PTS0_IN,
+ PTT7_IN, PTT6_IN, PTT5_IN, PTT4_IN,
+ PTT3_IN, PTT2_IN, PTT1_IN, PTT0_IN,
+ PTU7_IN, PTU6_IN, PTU5_IN, PTU4_IN,
+ PTU3_IN, PTU2_IN, PTU1_IN, PTU0_IN,
+ PTV7_IN, PTV6_IN, PTV5_IN, PTV4_IN,
+ PTV3_IN, PTV2_IN, PTV1_IN, PTV0_IN,
+ PTW7_IN, PTW6_IN, PTW5_IN, PTW4_IN,
+ PTW3_IN, PTW2_IN, PTW1_IN, PTW0_IN,
+ PTX7_IN, PTX6_IN, PTX5_IN, PTX4_IN,
+ PTX3_IN, PTX2_IN, PTX1_IN, PTX0_IN,
+ PTY7_IN, PTY6_IN, PTY5_IN, PTY4_IN,
+ PTY3_IN, PTY2_IN, PTY1_IN, PTY0_IN,
+ PTZ7_IN, PTZ6_IN, PTZ5_IN, PTZ4_IN,
+ PTZ3_IN, PTZ2_IN, PTZ1_IN, PTZ0_IN,
+ PINMUX_INPUT_END,
+
+ PINMUX_INPUT_PULLUP_BEGIN,
+ PTA7_IN_PU, PTA6_IN_PU, PTA5_IN_PU, PTA4_IN_PU,
+ PTA3_IN_PU, PTA2_IN_PU, PTA1_IN_PU, PTA0_IN_PU,
+ PTD7_IN_PU, PTD6_IN_PU, PTD5_IN_PU, PTD4_IN_PU,
+ PTD3_IN_PU, PTD2_IN_PU, PTD1_IN_PU, PTD0_IN_PU,
+ PTE7_IN_PU, PTE6_IN_PU, PTE5_IN_PU, PTE4_IN_PU,
+ PTE3_IN_PU, PTE2_IN_PU, PTE1_IN_PU, PTE0_IN_PU,
+ PTF7_IN_PU, PTF6_IN_PU, PTF5_IN_PU, PTF4_IN_PU,
+ PTF3_IN_PU, PTF2_IN_PU, PTF1_IN_PU, PTF0_IN_PU,
+ PTG7_IN_PU, PTG6_IN_PU, PTG4_IN_PU,
+ PTH7_IN_PU, PTH6_IN_PU, PTH5_IN_PU, PTH4_IN_PU,
+ PTH3_IN_PU, PTH2_IN_PU, PTH1_IN_PU, PTH0_IN_PU,
+ PTI7_IN_PU, PTI6_IN_PU, PTI4_IN_PU,
+ PTI3_IN_PU, PTI2_IN_PU, PTI1_IN_PU, PTI0_IN_PU,
+ PTJ6_IN_PU, PTJ5_IN_PU, PTJ4_IN_PU,
+ PTJ3_IN_PU, PTJ2_IN_PU, PTJ1_IN_PU, PTJ0_IN_PU,
+ PTK7_IN_PU, PTK6_IN_PU, PTK5_IN_PU, PTK4_IN_PU,
+ PTK3_IN_PU, PTK2_IN_PU, PTK1_IN_PU, PTK0_IN_PU,
+ PTL6_IN_PU, PTL5_IN_PU, PTL4_IN_PU,
+ PTL3_IN_PU, PTL2_IN_PU, PTL1_IN_PU, PTL0_IN_PU,
+ PTM7_IN_PU, PTM6_IN_PU, PTM5_IN_PU, PTM4_IN_PU,
+ PTN4_IN_PU,
+ PTN3_IN_PU, PTN2_IN_PU, PTN1_IN_PU, PTN0_IN_PU,
+ PTO7_IN_PU, PTO6_IN_PU, PTO5_IN_PU, PTO4_IN_PU,
+ PTO3_IN_PU, PTO2_IN_PU, PTO1_IN_PU, PTO0_IN_PU,
+ PTT7_IN_PU, PTT6_IN_PU, PTT5_IN_PU, PTT4_IN_PU,
+ PTT3_IN_PU, PTT2_IN_PU, PTT1_IN_PU, PTT0_IN_PU,
+ PTU7_IN_PU, PTU6_IN_PU, PTU5_IN_PU, PTU4_IN_PU,
+ PTU3_IN_PU, PTU2_IN_PU, PTU1_IN_PU, PTU0_IN_PU,
+ PTV7_IN_PU, PTV6_IN_PU, PTV5_IN_PU, PTV4_IN_PU,
+ PTV3_IN_PU, PTV2_IN_PU,
+ PTW1_IN_PU, PTW0_IN_PU,
+ PTX7_IN_PU, PTX6_IN_PU, PTX5_IN_PU, PTX4_IN_PU,
+ PTX3_IN_PU, PTX2_IN_PU, PTX1_IN_PU, PTX0_IN_PU,
+ PTY7_IN_PU, PTY6_IN_PU, PTY5_IN_PU, PTY4_IN_PU,
+ PTY3_IN_PU, PTY2_IN_PU, PTY1_IN_PU, PTY0_IN_PU,
+ PTZ7_IN_PU, PTZ6_IN_PU, PTZ5_IN_PU, PTZ4_IN_PU,
+ PTZ3_IN_PU, PTZ2_IN_PU, PTZ1_IN_PU, PTZ0_IN_PU,
+ PINMUX_INPUT_PULLUP_END,
+
+ PINMUX_OUTPUT_BEGIN,
+ PTA7_OUT, PTA6_OUT, PTA5_OUT, PTA4_OUT,
+ PTA3_OUT, PTA2_OUT, PTA1_OUT, PTA0_OUT,
+ PTB7_OUT, PTB6_OUT, PTB5_OUT, PTB4_OUT,
+ PTB3_OUT, PTB2_OUT, PTB1_OUT, PTB0_OUT,
+ PTC7_OUT, PTC6_OUT, PTC5_OUT, PTC4_OUT,
+ PTC3_OUT, PTC2_OUT, PTC1_OUT, PTC0_OUT,
+ PTD7_OUT, PTD6_OUT, PTD5_OUT, PTD4_OUT,
+ PTD3_OUT, PTD2_OUT, PTD1_OUT, PTD0_OUT,
+ PTE7_OUT, PTE6_OUT, PTE5_OUT, PTE4_OUT,
+ PTE3_OUT, PTE2_OUT, PTE1_OUT, PTE0_OUT,
+ PTF7_OUT, PTF6_OUT, PTF5_OUT, PTF4_OUT,
+ PTF3_OUT, PTF2_OUT, PTF1_OUT, PTF0_OUT,
+ PTG7_OUT, PTG6_OUT, PTG5_OUT, PTG4_OUT,
+ PTG3_OUT, PTG2_OUT, PTG1_OUT, PTG0_OUT,
+ PTH7_OUT, PTH6_OUT, PTH5_OUT, PTH4_OUT,
+ PTH3_OUT, PTH2_OUT, PTH1_OUT, PTH0_OUT,
+ PTI7_OUT, PTI6_OUT, PTI5_OUT, PTI4_OUT,
+ PTI3_OUT, PTI2_OUT, PTI1_OUT, PTI0_OUT,
+ PTJ6_OUT, PTJ5_OUT, PTJ4_OUT,
+ PTJ3_OUT, PTJ2_OUT, PTJ1_OUT, PTJ0_OUT,
+ PTK7_OUT, PTK6_OUT, PTK5_OUT, PTK4_OUT,
+ PTK3_OUT, PTK2_OUT, PTK1_OUT, PTK0_OUT,
+ PTL6_OUT, PTL5_OUT, PTL4_OUT,
+ PTL3_OUT, PTL2_OUT, PTL1_OUT, PTL0_OUT,
+ PTM7_OUT, PTM6_OUT, PTM5_OUT, PTM4_OUT,
+ PTM3_OUT, PTM2_OUT, PTM1_OUT, PTM0_OUT,
+ PTN6_OUT, PTN5_OUT, PTN4_OUT,
+ PTN3_OUT, PTN2_OUT, PTN1_OUT, PTN0_OUT,
+ PTO7_OUT, PTO6_OUT, PTO5_OUT, PTO4_OUT,
+ PTO3_OUT, PTO2_OUT, PTO1_OUT, PTO0_OUT,
+ PTP7_OUT, PTP6_OUT, PTP5_OUT, PTP4_OUT,
+ PTP3_OUT, PTP2_OUT, PTP1_OUT, PTP0_OUT,
+ PTQ6_OUT, PTQ5_OUT, PTQ4_OUT,
+ PTQ3_OUT, PTQ2_OUT, PTQ1_OUT, PTQ0_OUT,
+ PTR7_OUT, PTR6_OUT, PTR5_OUT, PTR4_OUT,
+ PTR3_OUT, PTR2_OUT, PTR1_OUT, PTR0_OUT,
+ PTS7_OUT, PTS6_OUT, PTS5_OUT, PTS4_OUT,
+ PTS3_OUT, PTS2_OUT, PTS1_OUT, PTS0_OUT,
+ PTT7_OUT, PTT6_OUT, PTT5_OUT, PTT4_OUT,
+ PTT3_OUT, PTT2_OUT, PTT1_OUT, PTT0_OUT,
+ PTU7_OUT, PTU6_OUT, PTU5_OUT, PTU4_OUT,
+ PTU3_OUT, PTU2_OUT, PTU1_OUT, PTU0_OUT,
+ PTV7_OUT, PTV6_OUT, PTV5_OUT, PTV4_OUT,
+ PTV3_OUT, PTV2_OUT, PTV1_OUT, PTV0_OUT,
+ PTW7_OUT, PTW6_OUT, PTW5_OUT, PTW4_OUT,
+ PTW3_OUT, PTW2_OUT, PTW1_OUT, PTW0_OUT,
+ PTX7_OUT, PTX6_OUT, PTX5_OUT, PTX4_OUT,
+ PTX3_OUT, PTX2_OUT, PTX1_OUT, PTX0_OUT,
+ PTY7_OUT, PTY6_OUT, PTY5_OUT, PTY4_OUT,
+ PTY3_OUT, PTY2_OUT, PTY1_OUT, PTY0_OUT,
+ PTZ7_OUT, PTZ6_OUT, PTZ5_OUT, PTZ4_OUT,
+ PTZ3_OUT, PTZ2_OUT, PTZ1_OUT, PTZ0_OUT,
+ PINMUX_OUTPUT_END,
+
+ PINMUX_FUNCTION_BEGIN,
+ PTA7_FN, PTA6_FN, PTA5_FN, PTA4_FN,
+ PTA3_FN, PTA2_FN, PTA1_FN, PTA0_FN,
+ PTB7_FN, PTB6_FN, PTB5_FN, PTB4_FN,
+ PTB3_FN, PTB2_FN, PTB1_FN, PTB0_FN,
+ PTC7_FN, PTC6_FN, PTC5_FN, PTC4_FN,
+ PTC3_FN, PTC2_FN, PTC1_FN, PTC0_FN,
+ PTD7_FN, PTD6_FN, PTD5_FN, PTD4_FN,
+ PTD3_FN, PTD2_FN, PTD1_FN, PTD0_FN,
+ PTE7_FN, PTE6_FN, PTE5_FN, PTE4_FN,
+ PTE3_FN, PTE2_FN, PTE1_FN, PTE0_FN,
+ PTF7_FN, PTF6_FN, PTF5_FN, PTF4_FN,
+ PTF3_FN, PTF2_FN, PTF1_FN, PTF0_FN,
+ PTG7_FN, PTG6_FN, PTG5_FN, PTG4_FN,
+ PTG3_FN, PTG2_FN, PTG1_FN, PTG0_FN,
+ PTH7_FN, PTH6_FN, PTH5_FN, PTH4_FN,
+ PTH3_FN, PTH2_FN, PTH1_FN, PTH0_FN,
+ PTI7_FN, PTI6_FN, PTI5_FN, PTI4_FN,
+ PTI3_FN, PTI2_FN, PTI1_FN, PTI0_FN,
+ PTJ6_FN, PTJ5_FN, PTJ4_FN,
+ PTJ3_FN, PTJ2_FN, PTJ1_FN, PTJ0_FN,
+ PTK7_FN, PTK6_FN, PTK5_FN, PTK4_FN,
+ PTK3_FN, PTK2_FN, PTK1_FN, PTK0_FN,
+ PTL6_FN, PTL5_FN, PTL4_FN,
+ PTL3_FN, PTL2_FN, PTL1_FN, PTL0_FN,
+ PTM7_FN, PTM6_FN, PTM5_FN, PTM4_FN,
+ PTM3_FN, PTM2_FN, PTM1_FN, PTM0_FN,
+ PTN6_FN, PTN5_FN, PTN4_FN,
+ PTN3_FN, PTN2_FN, PTN1_FN, PTN0_FN,
+ PTO7_FN, PTO6_FN, PTO5_FN, PTO4_FN,
+ PTO3_FN, PTO2_FN, PTO1_FN, PTO0_FN,
+ PTP7_FN, PTP6_FN, PTP5_FN, PTP4_FN,
+ PTP3_FN, PTP2_FN, PTP1_FN, PTP0_FN,
+ PTQ6_FN, PTQ5_FN, PTQ4_FN,
+ PTQ3_FN, PTQ2_FN, PTQ1_FN, PTQ0_FN,
+ PTR7_FN, PTR6_FN, PTR5_FN, PTR4_FN,
+ PTR3_FN, PTR2_FN, PTR1_FN, PTR0_FN,
+ PTS7_FN, PTS6_FN, PTS5_FN, PTS4_FN,
+ PTS3_FN, PTS2_FN, PTS1_FN, PTS0_FN,
+ PTT7_FN, PTT6_FN, PTT5_FN, PTT4_FN,
+ PTT3_FN, PTT2_FN, PTT1_FN, PTT0_FN,
+ PTU7_FN, PTU6_FN, PTU5_FN, PTU4_FN,
+ PTU3_FN, PTU2_FN, PTU1_FN, PTU0_FN,
+ PTV7_FN, PTV6_FN, PTV5_FN, PTV4_FN,
+ PTV3_FN, PTV2_FN, PTV1_FN, PTV0_FN,
+ PTW7_FN, PTW6_FN, PTW5_FN, PTW4_FN,
+ PTW3_FN, PTW2_FN, PTW1_FN, PTW0_FN,
+ PTX7_FN, PTX6_FN, PTX5_FN, PTX4_FN,
+ PTX3_FN, PTX2_FN, PTX1_FN, PTX0_FN,
+ PTY7_FN, PTY6_FN, PTY5_FN, PTY4_FN,
+ PTY3_FN, PTY2_FN, PTY1_FN, PTY0_FN,
+ PTZ7_FN, PTZ6_FN, PTZ5_FN, PTZ4_FN,
+ PTZ3_FN, PTZ2_FN, PTZ1_FN, PTZ0_FN,
+
+ PS0_15_FN1, PS0_15_FN2,
+ PS0_14_FN1, PS0_14_FN2,
+ PS0_13_FN1, PS0_13_FN2,
+ PS0_12_FN1, PS0_12_FN2,
+ PS0_11_FN1, PS0_11_FN2,
+ PS0_10_FN1, PS0_10_FN2,
+ PS0_9_FN1, PS0_9_FN2,
+ PS0_8_FN1, PS0_8_FN2,
+ PS0_7_FN1, PS0_7_FN2,
+ PS0_6_FN1, PS0_6_FN2,
+ PS0_5_FN1, PS0_5_FN2,
+ PS0_4_FN1, PS0_4_FN2,
+ PS0_3_FN1, PS0_3_FN2,
+ PS0_2_FN1, PS0_2_FN2,
+
+ PS1_10_FN1, PS1_10_FN2,
+ PS1_9_FN1, PS1_9_FN2,
+ PS1_8_FN1, PS1_8_FN2,
+ PS1_2_FN1, PS1_2_FN2,
+
+ PS2_13_FN1, PS2_13_FN2,
+ PS2_12_FN1, PS2_12_FN2,
+ PS2_7_FN1, PS2_7_FN2,
+ PS2_6_FN1, PS2_6_FN2,
+ PS2_5_FN1, PS2_5_FN2,
+ PS2_4_FN1, PS2_4_FN2,
+ PS2_2_FN1, PS2_2_FN2,
+
+ PS3_15_FN1, PS3_15_FN2,
+ PS3_14_FN1, PS3_14_FN2,
+ PS3_13_FN1, PS3_13_FN2,
+ PS3_12_FN1, PS3_12_FN2,
+ PS3_11_FN1, PS3_11_FN2,
+ PS3_10_FN1, PS3_10_FN2,
+ PS3_9_FN1, PS3_9_FN2,
+ PS3_8_FN1, PS3_8_FN2,
+ PS3_7_FN1, PS3_7_FN2,
+ PS3_2_FN1, PS3_2_FN2,
+ PS3_1_FN1, PS3_1_FN2,
+
+ PS4_14_FN1, PS4_14_FN2,
+ PS4_13_FN1, PS4_13_FN2,
+ PS4_12_FN1, PS4_12_FN2,
+ PS4_10_FN1, PS4_10_FN2,
+ PS4_9_FN1, PS4_9_FN2,
+ PS4_8_FN1, PS4_8_FN2,
+ PS4_4_FN1, PS4_4_FN2,
+ PS4_3_FN1, PS4_3_FN2,
+ PS4_2_FN1, PS4_2_FN2,
+ PS4_1_FN1, PS4_1_FN2,
+ PS4_0_FN1, PS4_0_FN2,
+
+ PS5_11_FN1, PS5_11_FN2,
+ PS5_10_FN1, PS5_10_FN2,
+ PS5_9_FN1, PS5_9_FN2,
+ PS5_8_FN1, PS5_8_FN2,
+ PS5_7_FN1, PS5_7_FN2,
+ PS5_6_FN1, PS5_6_FN2,
+ PS5_5_FN1, PS5_5_FN2,
+ PS5_4_FN1, PS5_4_FN2,
+ PS5_3_FN1, PS5_3_FN2,
+ PS5_2_FN1, PS5_2_FN2,
+
+ PS6_15_FN1, PS6_15_FN2,
+ PS6_14_FN1, PS6_14_FN2,
+ PS6_13_FN1, PS6_13_FN2,
+ PS6_12_FN1, PS6_12_FN2,
+ PS6_11_FN1, PS6_11_FN2,
+ PS6_10_FN1, PS6_10_FN2,
+ PS6_9_FN1, PS6_9_FN2,
+ PS6_8_FN1, PS6_8_FN2,
+ PS6_7_FN1, PS6_7_FN2,
+ PS6_6_FN1, PS6_6_FN2,
+ PS6_5_FN1, PS6_5_FN2,
+ PS6_4_FN1, PS6_4_FN2,
+ PS6_3_FN1, PS6_3_FN2,
+ PS6_2_FN1, PS6_2_FN2,
+ PS6_1_FN1, PS6_1_FN2,
+ PS6_0_FN1, PS6_0_FN2,
+
+ PS7_15_FN1, PS7_15_FN2,
+ PS7_14_FN1, PS7_14_FN2,
+ PS7_13_FN1, PS7_13_FN2,
+ PS7_12_FN1, PS7_12_FN2,
+ PS7_11_FN1, PS7_11_FN2,
+ PS7_10_FN1, PS7_10_FN2,
+ PS7_9_FN1, PS7_9_FN2,
+ PS7_8_FN1, PS7_8_FN2,
+ PS7_7_FN1, PS7_7_FN2,
+ PS7_6_FN1, PS7_6_FN2,
+ PS7_5_FN1, PS7_5_FN2,
+ PS7_4_FN1, PS7_4_FN2,
+
+ PS8_15_FN1, PS8_15_FN2,
+ PS8_14_FN1, PS8_14_FN2,
+ PS8_13_FN1, PS8_13_FN2,
+ PS8_12_FN1, PS8_12_FN2,
+ PS8_11_FN1, PS8_11_FN2,
+ PS8_10_FN1, PS8_10_FN2,
+ PS8_9_FN1, PS8_9_FN2,
+ PS8_8_FN1, PS8_8_FN2,
+ PINMUX_FUNCTION_END,
+
+ PINMUX_MARK_BEGIN,
+ /* PTA (mobule: LBSC, RGMII) */
+ BS_MARK, RDWR_MARK, WE1_MARK, RDY_MARK,
+ ET0_MDC_MARK, ET0_MDIO_MARK, ET1_MDC_MARK, ET1_MDIO_MARK,
+
+ /* PTB (mobule: INTC, ONFI, TMU) */
+ IRQ15_MARK, IRQ14_MARK, IRQ13_MARK, IRQ12_MARK,
+ IRQ11_MARK, IRQ10_MARK, IRQ9_MARK, IRQ8_MARK,
+ ON_NRE_MARK, ON_NWE_MARK, ON_NWP_MARK, ON_NCE0_MARK,
+ ON_R_B0_MARK, ON_ALE_MARK, ON_CLE_MARK, TCLK_MARK,
+
+ /* PTC (mobule: IRQ, PWMU) */
+ IRQ7_MARK, IRQ6_MARK, IRQ5_MARK, IRQ4_MARK,
+ IRQ3_MARK, IRQ2_MARK, IRQ1_MARK, IRQ0_MARK,
+ PWMU0_MARK, PWMU1_MARK, PWMU2_MARK, PWMU3_MARK,
+ PWMU4_MARK, PWMU5_MARK,
+
+ /* PTD (mobule: SPI0, DMAC) */
+ SP0_MOSI_MARK, SP0_MISO_MARK, SP0_SCK_MARK, SP0_SCK_FB_MARK,
+ SP0_SS0_MARK, SP0_SS1_MARK, SP0_SS2_MARK, SP0_SS3_MARK,
+ DREQ0_MARK, DACK0_MARK, TEND0_MARK,
+
+ /* PTE (mobule: RMII) */
+ RMII0_CRS_DV_MARK, RMII0_TXD1_MARK,
+ RMII0_TXD0_MARK, RMII0_TXEN_MARK,
+ RMII0_REFCLK_MARK, RMII0_RXD1_MARK,
+ RMII0_RXD0_MARK, RMII0_RX_ER_MARK,
+
+ /* PTF (mobule: RMII, SerMux) */
+ RMII1_CRS_DV_MARK, RMII1_TXD1_MARK,
+ RMII1_TXD0_MARK, RMII1_TXEN_MARK,
+ RMII1_REFCLK_MARK, RMII1_RXD1_MARK,
+ RMII1_RXD0_MARK, RMII1_RX_ER_MARK,
+ RAC_RI_MARK,
+
+ /* PTG (mobule: system, LBSC, LPC, WDT, LPC, eMMC) */
+ BOOTFMS_MARK, BOOTWP_MARK, A25_MARK, A24_MARK,
+ SERIRQ_MARK, WDTOVF_MARK, LPCPD_MARK, LDRQ_MARK,
+ MMCCLK_MARK, MMCCMD_MARK,
+
+ /* PTH (mobule: SPI1, LPC, DMAC, ADC) */
+ SP1_MOSI_MARK, SP1_MISO_MARK, SP1_SCK_MARK, SP1_SCK_FB_MARK,
+ SP1_SS0_MARK, SP1_SS1_MARK, WP_MARK, FMS0_MARK,
+ TEND1_MARK, DREQ1_MARK, DACK1_MARK, ADTRG1_MARK,
+ ADTRG0_MARK,
+
+ /* PTI (mobule: LBSC, SDHI) */
+ D15_MARK, D14_MARK, D13_MARK, D12_MARK,
+ D11_MARK, D10_MARK, D9_MARK, D8_MARK,
+ SD_WP_MARK, SD_CD_MARK, SD_CLK_MARK, SD_CMD_MARK,
+ SD_D3_MARK, SD_D2_MARK, SD_D1_MARK, SD_D0_MARK,
+
+ /* PTJ (mobule: SCIF234) */
+ RTS3_MARK, CTS3_MARK, TXD3_MARK, RXD3_MARK,
+ RTS4_MARK, RXD4_MARK, TXD4_MARK,
+
+ /* PTK (mobule: SERMUX, LBSC, SCIF) */
+ COM2_TXD_MARK, COM2_RXD_MARK, COM2_RTS_MARK, COM2_CTS_MARK,
+ COM2_DTR_MARK, COM2_DSR_MARK, COM2_DCD_MARK, CLKOUT_MARK,
+ SCK2_MARK, SCK4_MARK, SCK3_MARK,
+
+ /* PTL (mobule: SERMUX, SCIF, LBSC, AUD) */
+ RAC_RXD_MARK, RAC_RTS_MARK, RAC_CTS_MARK, RAC_DTR_MARK,
+ RAC_DSR_MARK, RAC_DCD_MARK, RAC_TXD_MARK, RXD2_MARK,
+ CS5_MARK, CS6_MARK, AUDSYNC_MARK, AUDCK_MARK,
+ TXD2_MARK,
+
+ /* PTM (mobule: LBSC, IIC) */
+ CS4_MARK, RD_MARK, WE0_MARK, CS0_MARK,
+ SDA6_MARK, SCL6_MARK, SDA7_MARK, SCL7_MARK,
+
+ /* PTN (mobule: USB, JMC, SGPIO, WDT) */
+ VBUS_EN_MARK, VBUS_OC_MARK, JMCTCK_MARK, JMCTMS_MARK,
+ JMCTDO_MARK, JMCTDI_MARK, JMCTRST_MARK,
+ SGPIO1_CLK_MARK, SGPIO1_LOAD_MARK, SGPIO1_DI_MARK,
+ SGPIO1_DO_MARK, SUB_CLKIN_MARK,
+
+ /* PTO (mobule: SGPIO, SerMux) */
+ SGPIO0_CLK_MARK, SGPIO0_LOAD_MARK, SGPIO0_DI_MARK,
+ SGPIO0_DO_MARK, SGPIO2_CLK_MARK, SGPIO2_LOAD_MARK,
+ SGPIO2_DI_MARK, SGPIO2_DO_MARK,
+ COM1_TXD_MARK, COM1_RXD_MARK, COM1_RTS_MARK, COM1_CTS_MARK,
+
+ /* PTQ (mobule: LPC) */
+ LAD3_MARK, LAD2_MARK, LAD1_MARK, LAD0_MARK,
+ LFRAME_MARK, LRESET_MARK, LCLK_MARK,
+
+ /* PTR (mobule: GRA, IIC) */
+ DDC3_MARK, DDC2_MARK, SDA2_MARK, SCL2_MARK,
+ SDA1_MARK, SCL1_MARK, SDA0_MARK, SCL0_MARK,
+ SDA8_MARK, SCL8_MARK,
+
+ /* PTS (mobule: GRA, IIC) */
+ DDC1_MARK, DDC0_MARK, SDA5_MARK, SCL5_MARK,
+ SDA4_MARK, SCL4_MARK, SDA3_MARK, SCL3_MARK,
+ SDA9_MARK, SCL9_MARK,
+
+ /* PTT (mobule: PWMX, AUD) */
+ PWMX7_MARK, PWMX6_MARK, PWMX5_MARK, PWMX4_MARK,
+ PWMX3_MARK, PWMX2_MARK, PWMX1_MARK, PWMX0_MARK,
+ AUDATA3_MARK, AUDATA2_MARK, AUDATA1_MARK, AUDATA0_MARK,
+ STATUS1_MARK, STATUS0_MARK,
+
+ /* PTU (mobule: LPC, APM) */
+ LGPIO7_MARK, LGPIO6_MARK, LGPIO5_MARK, LGPIO4_MARK,
+ LGPIO3_MARK, LGPIO2_MARK, LGPIO1_MARK, LGPIO0_MARK,
+ APMONCTL_O_MARK, APMPWBTOUT_O_MARK, APMSCI_O_MARK,
+ APMVDDON_MARK, APMSLPBTN_MARK, APMPWRBTN_MARK, APMS5N_MARK,
+ APMS3N_MARK,
+
+ /* PTV (mobule: LBSC, SerMux, R-SPI, EVC, GRA) */
+ A23_MARK, A22_MARK, A21_MARK, A20_MARK,
+ A19_MARK, A18_MARK, A17_MARK, A16_MARK,
+ COM2_RI_MARK, R_SPI_MOSI_MARK, R_SPI_MISO_MARK,
+ R_SPI_RSPCK_MARK, R_SPI_SSL0_MARK, R_SPI_SSL1_MARK,
+ EVENT7_MARK, EVENT6_MARK, VBIOS_DI_MARK, VBIOS_DO_MARK,
+ VBIOS_CLK_MARK, VBIOS_CS_MARK,
+
+ /* PTW (mobule: LBSC, EVC, SCIF) */
+ A15_MARK, A14_MARK, A13_MARK, A12_MARK,
+ A11_MARK, A10_MARK, A9_MARK, A8_MARK,
+ EVENT5_MARK, EVENT4_MARK, EVENT3_MARK, EVENT2_MARK,
+ EVENT1_MARK, EVENT0_MARK, CTS4_MARK, CTS2_MARK,
+
+ /* PTX (mobule: LBSC, SCIF, SIM) */
+ A7_MARK, A6_MARK, A5_MARK, A4_MARK,
+ A3_MARK, A2_MARK, A1_MARK, A0_MARK,
+ RTS2_MARK, SIM_D_MARK, SIM_CLK_MARK, SIM_RST_MARK,
+
+ /* PTY (mobule: LBSC) */
+ D7_MARK, D6_MARK, D5_MARK, D4_MARK,
+ D3_MARK, D2_MARK, D1_MARK, D0_MARK,
+
+ /* PTZ (mobule: eMMC, ONFI) */
+ MMCDAT7_MARK, MMCDAT6_MARK, MMCDAT5_MARK, MMCDAT4_MARK,
+ MMCDAT3_MARK, MMCDAT2_MARK, MMCDAT1_MARK, MMCDAT0_MARK,
+ ON_DQ7_MARK, ON_DQ6_MARK, ON_DQ5_MARK, ON_DQ4_MARK,
+ ON_DQ3_MARK, ON_DQ2_MARK, ON_DQ1_MARK, ON_DQ0_MARK,
+
+ PINMUX_MARK_END,
+};
+
+static pinmux_enum_t pinmux_data[] = {
+ /* PTA GPIO */
+ PINMUX_DATA(PTA7_DATA, PTA7_IN, PTA7_OUT),
+ PINMUX_DATA(PTA6_DATA, PTA6_IN, PTA6_OUT),
+ PINMUX_DATA(PTA5_DATA, PTA5_IN, PTA5_OUT),
+ PINMUX_DATA(PTA4_DATA, PTA4_IN, PTA4_OUT),
+ PINMUX_DATA(PTA3_DATA, PTA3_IN, PTA3_OUT),
+ PINMUX_DATA(PTA2_DATA, PTA2_IN, PTA2_OUT),
+ PINMUX_DATA(PTA1_DATA, PTA1_IN, PTA1_OUT),
+ PINMUX_DATA(PTA0_DATA, PTA0_IN, PTA0_OUT),
+
+ /* PTB GPIO */
+ PINMUX_DATA(PTB7_DATA, PTB7_IN, PTB7_OUT),
+ PINMUX_DATA(PTB6_DATA, PTB6_IN, PTB6_OUT),
+ PINMUX_DATA(PTB5_DATA, PTB5_IN, PTB5_OUT),
+ PINMUX_DATA(PTB4_DATA, PTB4_IN, PTB4_OUT),
+ PINMUX_DATA(PTB3_DATA, PTB3_IN, PTB3_OUT),
+ PINMUX_DATA(PTB2_DATA, PTB2_IN, PTB2_OUT),
+ PINMUX_DATA(PTB1_DATA, PTB1_IN, PTB1_OUT),
+ PINMUX_DATA(PTB0_DATA, PTB0_IN, PTB0_OUT),
+
+ /* PTC GPIO */
+ PINMUX_DATA(PTC7_DATA, PTC7_IN, PTC7_OUT),
+ PINMUX_DATA(PTC6_DATA, PTC6_IN, PTC6_OUT),
+ PINMUX_DATA(PTC5_DATA, PTC5_IN, PTC5_OUT),
+ PINMUX_DATA(PTC4_DATA, PTC4_IN, PTC4_OUT),
+ PINMUX_DATA(PTC3_DATA, PTC3_IN, PTC3_OUT),
+ PINMUX_DATA(PTC2_DATA, PTC2_IN, PTC2_OUT),
+ PINMUX_DATA(PTC1_DATA, PTC1_IN, PTC1_OUT),
+ PINMUX_DATA(PTC0_DATA, PTC0_IN, PTC0_OUT),
+
+ /* PTD GPIO */
+ PINMUX_DATA(PTD7_DATA, PTD7_IN, PTD7_OUT),
+ PINMUX_DATA(PTD6_DATA, PTD6_IN, PTD6_OUT),
+ PINMUX_DATA(PTD5_DATA, PTD5_IN, PTD5_OUT),
+ PINMUX_DATA(PTD4_DATA, PTD4_IN, PTD4_OUT),
+ PINMUX_DATA(PTD3_DATA, PTD3_IN, PTD3_OUT),
+ PINMUX_DATA(PTD2_DATA, PTD2_IN, PTD2_OUT),
+ PINMUX_DATA(PTD1_DATA, PTD1_IN, PTD1_OUT),
+ PINMUX_DATA(PTD0_DATA, PTD0_IN, PTD0_OUT),
+
+ /* PTE GPIO */
+ PINMUX_DATA(PTE7_DATA, PTE7_IN, PTE7_OUT),
+ PINMUX_DATA(PTE6_DATA, PTE6_IN, PTE6_OUT),
+ PINMUX_DATA(PTE5_DATA, PTE5_IN, PTE5_OUT),
+ PINMUX_DATA(PTE4_DATA, PTE4_IN, PTE4_OUT),
+ PINMUX_DATA(PTE3_DATA, PTE3_IN, PTE3_OUT),
+ PINMUX_DATA(PTE2_DATA, PTE2_IN, PTE2_OUT),
+ PINMUX_DATA(PTE1_DATA, PTE1_IN, PTE1_OUT),
+ PINMUX_DATA(PTE0_DATA, PTE0_IN, PTE0_OUT),
+
+ /* PTF GPIO */
+ PINMUX_DATA(PTF7_DATA, PTF7_IN, PTF7_OUT),
+ PINMUX_DATA(PTF6_DATA, PTF6_IN, PTF6_OUT),
+ PINMUX_DATA(PTF5_DATA, PTF5_IN, PTF5_OUT),
+ PINMUX_DATA(PTF4_DATA, PTF4_IN, PTF4_OUT),
+ PINMUX_DATA(PTF3_DATA, PTF3_IN, PTF3_OUT),
+ PINMUX_DATA(PTF2_DATA, PTF2_IN, PTF2_OUT),
+ PINMUX_DATA(PTF1_DATA, PTF1_IN, PTF1_OUT),
+ PINMUX_DATA(PTF0_DATA, PTF0_IN, PTF0_OUT),
+
+ /* PTG GPIO */
+ PINMUX_DATA(PTG7_DATA, PTG7_IN, PTG7_OUT),
+ PINMUX_DATA(PTG6_DATA, PTG6_IN, PTG6_OUT),
+ PINMUX_DATA(PTG5_DATA, PTG5_IN, PTG5_OUT),
+ PINMUX_DATA(PTG4_DATA, PTG4_IN, PTG4_OUT),
+ PINMUX_DATA(PTG3_DATA, PTG3_IN, PTG3_OUT),
+ PINMUX_DATA(PTG2_DATA, PTG2_IN, PTG2_OUT),
+ PINMUX_DATA(PTG1_DATA, PTG1_IN, PTG1_OUT),
+ PINMUX_DATA(PTG0_DATA, PTG0_IN, PTG0_OUT),
+
+ /* PTH GPIO */
+ PINMUX_DATA(PTH7_DATA, PTH7_IN, PTH7_OUT),
+ PINMUX_DATA(PTH6_DATA, PTH6_IN, PTH6_OUT),
+ PINMUX_DATA(PTH5_DATA, PTH5_IN, PTH5_OUT),
+ PINMUX_DATA(PTH4_DATA, PTH4_IN, PTH4_OUT),
+ PINMUX_DATA(PTH3_DATA, PTH3_IN, PTH3_OUT),
+ PINMUX_DATA(PTH2_DATA, PTH2_IN, PTH2_OUT),
+ PINMUX_DATA(PTH1_DATA, PTH1_IN, PTH1_OUT),
+ PINMUX_DATA(PTH0_DATA, PTH0_IN, PTH0_OUT),
+
+ /* PTI GPIO */
+ PINMUX_DATA(PTI7_DATA, PTI7_IN, PTI7_OUT),
+ PINMUX_DATA(PTI6_DATA, PTI6_IN, PTI6_OUT),
+ PINMUX_DATA(PTI5_DATA, PTI5_IN, PTI5_OUT),
+ PINMUX_DATA(PTI4_DATA, PTI4_IN, PTI4_OUT),
+ PINMUX_DATA(PTI3_DATA, PTI3_IN, PTI3_OUT),
+ PINMUX_DATA(PTI2_DATA, PTI2_IN, PTI2_OUT),
+ PINMUX_DATA(PTI1_DATA, PTI1_IN, PTI1_OUT),
+ PINMUX_DATA(PTI0_DATA, PTI0_IN, PTI0_OUT),
+
+ /* PTJ GPIO */
+ PINMUX_DATA(PTJ6_DATA, PTJ6_IN, PTJ6_OUT),
+ PINMUX_DATA(PTJ5_DATA, PTJ5_IN, PTJ5_OUT),
+ PINMUX_DATA(PTJ4_DATA, PTJ4_IN, PTJ4_OUT),
+ PINMUX_DATA(PTJ3_DATA, PTJ3_IN, PTJ3_OUT),
+ PINMUX_DATA(PTJ2_DATA, PTJ2_IN, PTJ2_OUT),
+ PINMUX_DATA(PTJ1_DATA, PTJ1_IN, PTJ1_OUT),
+ PINMUX_DATA(PTJ0_DATA, PTJ0_IN, PTJ0_OUT),
+
+ /* PTK GPIO */
+ PINMUX_DATA(PTK7_DATA, PTK7_IN, PTK7_OUT),
+ PINMUX_DATA(PTK6_DATA, PTK6_IN, PTK6_OUT),
+ PINMUX_DATA(PTK5_DATA, PTK5_IN, PTK5_OUT),
+ PINMUX_DATA(PTK4_DATA, PTK4_IN, PTK4_OUT),
+ PINMUX_DATA(PTK3_DATA, PTK3_IN, PTK3_OUT),
+ PINMUX_DATA(PTK2_DATA, PTK2_IN, PTK2_OUT),
+ PINMUX_DATA(PTK1_DATA, PTK1_IN, PTK1_OUT),
+ PINMUX_DATA(PTK0_DATA, PTK0_IN, PTK0_OUT),
+
+ /* PTL GPIO */
+ PINMUX_DATA(PTL6_DATA, PTL6_IN, PTL6_OUT),
+ PINMUX_DATA(PTL5_DATA, PTL5_IN, PTL5_OUT),
+ PINMUX_DATA(PTL4_DATA, PTL4_IN, PTL4_OUT),
+ PINMUX_DATA(PTL3_DATA, PTL3_IN, PTL3_OUT),
+ PINMUX_DATA(PTL2_DATA, PTL2_IN, PTL2_OUT),
+ PINMUX_DATA(PTL1_DATA, PTL1_IN, PTL1_OUT),
+ PINMUX_DATA(PTL0_DATA, PTL0_IN, PTL0_OUT),
+
+ /* PTM GPIO */
+ PINMUX_DATA(PTM6_DATA, PTM6_IN, PTM6_OUT),
+ PINMUX_DATA(PTM5_DATA, PTM5_IN, PTM5_OUT),
+ PINMUX_DATA(PTM4_DATA, PTM4_IN, PTM4_OUT),
+ PINMUX_DATA(PTM3_DATA, PTM3_IN, PTM3_OUT),
+ PINMUX_DATA(PTM2_DATA, PTM2_IN, PTM2_OUT),
+ PINMUX_DATA(PTM1_DATA, PTM1_IN, PTM1_OUT),
+ PINMUX_DATA(PTM0_DATA, PTM0_IN, PTM0_OUT),
+
+ /* PTN GPIO */
+ PINMUX_DATA(PTN6_DATA, PTN6_IN, PTN6_OUT),
+ PINMUX_DATA(PTN5_DATA, PTN5_IN, PTN5_OUT),
+ PINMUX_DATA(PTN4_DATA, PTN4_IN, PTN4_OUT),
+ PINMUX_DATA(PTN3_DATA, PTN3_IN, PTN3_OUT),
+ PINMUX_DATA(PTN2_DATA, PTN2_IN, PTN2_OUT),
+ PINMUX_DATA(PTN1_DATA, PTN1_IN, PTN1_OUT),
+ PINMUX_DATA(PTN0_DATA, PTN0_IN, PTN0_OUT),
+
+ /* PTO GPIO */
+ PINMUX_DATA(PTO7_DATA, PTO7_IN, PTO7_OUT),
+ PINMUX_DATA(PTO6_DATA, PTO6_IN, PTO6_OUT),
+ PINMUX_DATA(PTO5_DATA, PTO5_IN, PTO5_OUT),
+ PINMUX_DATA(PTO4_DATA, PTO4_IN, PTO4_OUT),
+ PINMUX_DATA(PTO3_DATA, PTO3_IN, PTO3_OUT),
+ PINMUX_DATA(PTO2_DATA, PTO2_IN, PTO2_OUT),
+ PINMUX_DATA(PTO1_DATA, PTO1_IN, PTO1_OUT),
+ PINMUX_DATA(PTO0_DATA, PTO0_IN, PTO0_OUT),
+
+ /* PTQ GPIO */
+ PINMUX_DATA(PTQ6_DATA, PTQ6_IN, PTQ6_OUT),
+ PINMUX_DATA(PTQ5_DATA, PTQ5_IN, PTQ5_OUT),
+ PINMUX_DATA(PTQ4_DATA, PTQ4_IN, PTQ4_OUT),
+ PINMUX_DATA(PTQ3_DATA, PTQ3_IN, PTQ3_OUT),
+ PINMUX_DATA(PTQ2_DATA, PTQ2_IN, PTQ2_OUT),
+ PINMUX_DATA(PTQ1_DATA, PTQ1_IN, PTQ1_OUT),
+ PINMUX_DATA(PTQ0_DATA, PTQ0_IN, PTQ0_OUT),
+
+ /* PTR GPIO */
+ PINMUX_DATA(PTR7_DATA, PTR7_IN, PTR7_OUT),
+ PINMUX_DATA(PTR6_DATA, PTR6_IN, PTR6_OUT),
+ PINMUX_DATA(PTR5_DATA, PTR5_IN, PTR5_OUT),
+ PINMUX_DATA(PTR4_DATA, PTR4_IN, PTR4_OUT),
+ PINMUX_DATA(PTR3_DATA, PTR3_IN, PTR3_OUT),
+ PINMUX_DATA(PTR2_DATA, PTR2_IN, PTR2_OUT),
+ PINMUX_DATA(PTR1_DATA, PTR1_IN, PTR1_OUT),
+ PINMUX_DATA(PTR0_DATA, PTR0_IN, PTR0_OUT),
+
+ /* PTS GPIO */
+ PINMUX_DATA(PTS7_DATA, PTS7_IN, PTS7_OUT),
+ PINMUX_DATA(PTS6_DATA, PTS6_IN, PTS6_OUT),
+ PINMUX_DATA(PTS5_DATA, PTS5_IN, PTS5_OUT),
+ PINMUX_DATA(PTS4_DATA, PTS4_IN, PTS4_OUT),
+ PINMUX_DATA(PTS3_DATA, PTS3_IN, PTS3_OUT),
+ PINMUX_DATA(PTS2_DATA, PTS2_IN, PTS2_OUT),
+ PINMUX_DATA(PTS1_DATA, PTS1_IN, PTS1_OUT),
+ PINMUX_DATA(PTS0_DATA, PTS0_IN, PTS0_OUT),
+
+ /* PTT GPIO */
+ PINMUX_DATA(PTT7_DATA, PTT7_IN, PTT7_OUT),
+ PINMUX_DATA(PTT6_DATA, PTT6_IN, PTT6_OUT),
+ PINMUX_DATA(PTT5_DATA, PTT5_IN, PTT5_OUT),
+ PINMUX_DATA(PTT4_DATA, PTT4_IN, PTT4_OUT),
+ PINMUX_DATA(PTT3_DATA, PTT3_IN, PTT3_OUT),
+ PINMUX_DATA(PTT2_DATA, PTT2_IN, PTT2_OUT),
+ PINMUX_DATA(PTT1_DATA, PTT1_IN, PTT1_OUT),
+ PINMUX_DATA(PTT0_DATA, PTT0_IN, PTT0_OUT),
+
+ /* PTU GPIO */
+ PINMUX_DATA(PTU7_DATA, PTU7_IN, PTU7_OUT),
+ PINMUX_DATA(PTU6_DATA, PTU6_IN, PTU6_OUT),
+ PINMUX_DATA(PTU5_DATA, PTU5_IN, PTU5_OUT),
+ PINMUX_DATA(PTU4_DATA, PTU4_IN, PTU4_OUT),
+ PINMUX_DATA(PTU3_DATA, PTU3_IN, PTU3_OUT),
+ PINMUX_DATA(PTU2_DATA, PTU2_IN, PTU2_OUT),
+ PINMUX_DATA(PTU1_DATA, PTU1_IN, PTU1_OUT),
+ PINMUX_DATA(PTU0_DATA, PTU0_IN, PTU0_OUT),
+
+ /* PTV GPIO */
+ PINMUX_DATA(PTV7_DATA, PTV7_IN, PTV7_OUT),
+ PINMUX_DATA(PTV6_DATA, PTV6_IN, PTV6_OUT),
+ PINMUX_DATA(PTV5_DATA, PTV5_IN, PTV5_OUT),
+ PINMUX_DATA(PTV4_DATA, PTV4_IN, PTV4_OUT),
+ PINMUX_DATA(PTV3_DATA, PTV3_IN, PTV3_OUT),
+ PINMUX_DATA(PTV2_DATA, PTV2_IN, PTV2_OUT),
+ PINMUX_DATA(PTV1_DATA, PTV1_IN, PTV1_OUT),
+ PINMUX_DATA(PTV0_DATA, PTV0_IN, PTV0_OUT),
+
+ /* PTW GPIO */
+ PINMUX_DATA(PTW7_DATA, PTW7_IN, PTW7_OUT),
+ PINMUX_DATA(PTW6_DATA, PTW6_IN, PTW6_OUT),
+ PINMUX_DATA(PTW5_DATA, PTW5_IN, PTW5_OUT),
+ PINMUX_DATA(PTW4_DATA, PTW4_IN, PTW4_OUT),
+ PINMUX_DATA(PTW3_DATA, PTW3_IN, PTW3_OUT),
+ PINMUX_DATA(PTW2_DATA, PTW2_IN, PTW2_OUT),
+ PINMUX_DATA(PTW1_DATA, PTW1_IN, PTW1_OUT),
+ PINMUX_DATA(PTW0_DATA, PTW0_IN, PTW0_OUT),
+
+ /* PTX GPIO */
+ PINMUX_DATA(PTX7_DATA, PTX7_IN, PTX7_OUT),
+ PINMUX_DATA(PTX6_DATA, PTX6_IN, PTX6_OUT),
+ PINMUX_DATA(PTX5_DATA, PTX5_IN, PTX5_OUT),
+ PINMUX_DATA(PTX4_DATA, PTX4_IN, PTX4_OUT),
+ PINMUX_DATA(PTX3_DATA, PTX3_IN, PTX3_OUT),
+ PINMUX_DATA(PTX2_DATA, PTX2_IN, PTX2_OUT),
+ PINMUX_DATA(PTX1_DATA, PTX1_IN, PTX1_OUT),
+ PINMUX_DATA(PTX0_DATA, PTX0_IN, PTX0_OUT),
+
+ /* PTY GPIO */
+ PINMUX_DATA(PTY7_DATA, PTY7_IN, PTY7_OUT),
+ PINMUX_DATA(PTY6_DATA, PTY6_IN, PTY6_OUT),
+ PINMUX_DATA(PTY5_DATA, PTY5_IN, PTY5_OUT),
+ PINMUX_DATA(PTY4_DATA, PTY4_IN, PTY4_OUT),
+ PINMUX_DATA(PTY3_DATA, PTY3_IN, PTY3_OUT),
+ PINMUX_DATA(PTY2_DATA, PTY2_IN, PTY2_OUT),
+ PINMUX_DATA(PTY1_DATA, PTY1_IN, PTY1_OUT),
+ PINMUX_DATA(PTY0_DATA, PTY0_IN, PTY0_OUT),
+
+ /* PTZ GPIO */
+ PINMUX_DATA(PTZ7_DATA, PTZ7_IN, PTZ7_OUT),
+ PINMUX_DATA(PTZ6_DATA, PTZ6_IN, PTZ6_OUT),
+ PINMUX_DATA(PTZ5_DATA, PTZ5_IN, PTZ5_OUT),
+ PINMUX_DATA(PTZ4_DATA, PTZ4_IN, PTZ4_OUT),
+ PINMUX_DATA(PTZ3_DATA, PTZ3_IN, PTZ3_OUT),
+ PINMUX_DATA(PTZ2_DATA, PTZ2_IN, PTZ2_OUT),
+ PINMUX_DATA(PTZ1_DATA, PTZ1_IN, PTZ1_OUT),
+ PINMUX_DATA(PTZ0_DATA, PTZ0_IN, PTZ0_OUT),
+
+ /* PTA FN */
+ PINMUX_DATA(BS_MARK, PTA7_FN),
+ PINMUX_DATA(RDWR_MARK, PTA6_FN),
+ PINMUX_DATA(WE1_MARK, PTA5_FN),
+ PINMUX_DATA(RDY_MARK, PTA4_FN),
+ PINMUX_DATA(ET0_MDC_MARK, PTA3_FN),
+ PINMUX_DATA(ET0_MDIO_MARK, PTA2_FN),
+ PINMUX_DATA(ET1_MDC_MARK, PTA1_FN),
+ PINMUX_DATA(ET1_MDIO_MARK, PTA0_FN),
+
+ /* PTB FN */
+ PINMUX_DATA(IRQ15_MARK, PS0_15_FN1, PTB7_FN),
+ PINMUX_DATA(ON_NRE_MARK, PS0_15_FN2, PTB7_FN),
+ PINMUX_DATA(IRQ14_MARK, PS0_14_FN1, PTB6_FN),
+ PINMUX_DATA(ON_NWE_MARK, PS0_14_FN2, PTB6_FN),
+ PINMUX_DATA(IRQ13_MARK, PS0_13_FN1, PTB5_FN),
+ PINMUX_DATA(ON_NWP_MARK, PS0_13_FN2, PTB5_FN),
+ PINMUX_DATA(IRQ12_MARK, PS0_12_FN1, PTB4_FN),
+ PINMUX_DATA(ON_NCE0_MARK, PS0_12_FN2, PTB4_FN),
+ PINMUX_DATA(IRQ11_MARK, PS0_11_FN1, PTB3_FN),
+ PINMUX_DATA(ON_R_B0_MARK, PS0_11_FN2, PTB3_FN),
+ PINMUX_DATA(IRQ10_MARK, PS0_10_FN1, PTB2_FN),
+ PINMUX_DATA(ON_ALE_MARK, PS0_10_FN2, PTB2_FN),
+ PINMUX_DATA(IRQ9_MARK, PS0_9_FN1, PTB1_FN),
+ PINMUX_DATA(ON_CLE_MARK, PS0_9_FN2, PTB1_FN),
+ PINMUX_DATA(IRQ8_MARK, PS0_8_FN1, PTB0_FN),
+ PINMUX_DATA(TCLK_MARK, PS0_8_FN2, PTB0_FN),
+
+ /* PTC FN */
+ PINMUX_DATA(IRQ7_MARK, PS0_7_FN1, PTC7_FN),
+ PINMUX_DATA(PWMU0_MARK, PS0_7_FN2, PTC7_FN),
+ PINMUX_DATA(IRQ6_MARK, PS0_6_FN1, PTC6_FN),
+ PINMUX_DATA(PWMU1_MARK, PS0_6_FN2, PTC6_FN),
+ PINMUX_DATA(IRQ5_MARK, PS0_5_FN1, PTC5_FN),
+ PINMUX_DATA(PWMU2_MARK, PS0_5_FN2, PTC5_FN),
+ PINMUX_DATA(IRQ4_MARK, PS0_4_FN1, PTC5_FN),
+ PINMUX_DATA(PWMU3_MARK, PS0_4_FN2, PTC4_FN),
+ PINMUX_DATA(IRQ3_MARK, PS0_3_FN1, PTC3_FN),
+ PINMUX_DATA(PWMU4_MARK, PS0_3_FN2, PTC3_FN),
+ PINMUX_DATA(IRQ2_MARK, PS0_2_FN1, PTC2_FN),
+ PINMUX_DATA(PWMU5_MARK, PS0_2_FN2, PTC2_FN),
+ PINMUX_DATA(IRQ1_MARK, PTC1_FN),
+ PINMUX_DATA(IRQ0_MARK, PTC0_FN),
+
+ /* PTD FN */
+ PINMUX_DATA(SP0_MOSI_MARK, PTD7_FN),
+ PINMUX_DATA(SP0_MISO_MARK, PTD6_FN),
+ PINMUX_DATA(SP0_SCK_MARK, PTD5_FN),
+ PINMUX_DATA(SP0_SCK_FB_MARK, PTD4_FN),
+ PINMUX_DATA(SP0_SS0_MARK, PTD3_FN),
+ PINMUX_DATA(SP0_SS1_MARK, PS1_10_FN1, PTD2_FN),
+ PINMUX_DATA(DREQ0_MARK, PS1_10_FN2, PTD2_FN),
+ PINMUX_DATA(SP0_SS2_MARK, PS1_9_FN1, PTD1_FN),
+ PINMUX_DATA(DACK0_MARK, PS1_9_FN2, PTD1_FN),
+ PINMUX_DATA(SP0_SS3_MARK, PS1_8_FN1, PTD0_FN),
+ PINMUX_DATA(TEND0_MARK, PS1_8_FN2, PTD0_FN),
+
+ /* PTE FN */
+ PINMUX_DATA(RMII0_CRS_DV_MARK, PTE7_FN),
+ PINMUX_DATA(RMII0_TXD1_MARK, PTE6_FN),
+ PINMUX_DATA(RMII0_TXD0_MARK, PTE5_FN),
+ PINMUX_DATA(RMII0_TXEN_MARK, PTE4_FN),
+ PINMUX_DATA(RMII0_REFCLK_MARK, PTE3_FN),
+ PINMUX_DATA(RMII0_RXD1_MARK, PTE2_FN),
+ PINMUX_DATA(RMII0_RXD0_MARK, PTE1_FN),
+ PINMUX_DATA(RMII0_RX_ER_MARK, PTE0_FN),
+
+ /* PTF FN */
+ PINMUX_DATA(RMII1_CRS_DV_MARK, PTF7_FN),
+ PINMUX_DATA(RMII1_TXD1_MARK, PTF6_FN),
+ PINMUX_DATA(RMII1_TXD0_MARK, PTF5_FN),
+ PINMUX_DATA(RMII1_TXEN_MARK, PTF4_FN),
+ PINMUX_DATA(RMII1_REFCLK_MARK, PTF3_FN),
+ PINMUX_DATA(RMII1_RXD1_MARK, PS1_2_FN1, PTF2_FN),
+ PINMUX_DATA(RAC_RI_MARK, PS1_2_FN2, PTF2_FN),
+ PINMUX_DATA(RMII1_RXD0_MARK, PTF1_FN),
+ PINMUX_DATA(RMII1_RX_ER_MARK, PTF0_FN),
+
+ /* PTG FN */
+ PINMUX_DATA(BOOTFMS_MARK, PTG7_FN),
+ PINMUX_DATA(BOOTWP_MARK, PTG6_FN),
+ PINMUX_DATA(A25_MARK, PS2_13_FN1, PTG5_FN),
+ PINMUX_DATA(MMCCLK_MARK, PS2_13_FN2, PTG5_FN),
+ PINMUX_DATA(A24_MARK, PS2_12_FN1, PTG4_FN),
+ PINMUX_DATA(MMCCMD_MARK, PS2_12_FN2, PTG4_FN),
+ PINMUX_DATA(SERIRQ_MARK, PTG3_FN),
+ PINMUX_DATA(WDTOVF_MARK, PTG2_FN),
+ PINMUX_DATA(LPCPD_MARK, PTG1_FN),
+ PINMUX_DATA(LDRQ_MARK, PTG0_FN),
+
+ /* PTH FN */
+ PINMUX_DATA(SP1_MOSI_MARK, PS2_7_FN1, PTH7_FN),
+ PINMUX_DATA(TEND1_MARK, PS2_7_FN2, PTH7_FN),
+ PINMUX_DATA(SP1_MISO_MARK, PS2_6_FN1, PTH6_FN),
+ PINMUX_DATA(DREQ1_MARK, PS2_6_FN2, PTH6_FN),
+ PINMUX_DATA(SP1_SCK_MARK, PS2_5_FN1, PTH5_FN),
+ PINMUX_DATA(DACK1_MARK, PS2_5_FN2, PTH5_FN),
+ PINMUX_DATA(SP1_SCK_FB_MARK, PS2_4_FN1, PTH4_FN),
+ PINMUX_DATA(ADTRG1_MARK, PS2_4_FN2, PTH4_FN),
+ PINMUX_DATA(SP1_SS0_MARK, PTH3_FN),
+ PINMUX_DATA(SP1_SS1_MARK, PS2_2_FN1, PTH2_FN),
+ PINMUX_DATA(ADTRG0_MARK, PS2_2_FN2, PTH2_FN),
+ PINMUX_DATA(WP_MARK, PTH1_FN),
+ PINMUX_DATA(FMS0_MARK, PTH0_FN),
+
+ /* PTI FN */
+ PINMUX_DATA(D15_MARK, PS3_15_FN1, PTI7_FN),
+ PINMUX_DATA(SD_WP_MARK, PS3_15_FN2, PTI7_FN),
+ PINMUX_DATA(D14_MARK, PS3_14_FN1, PTI6_FN),
+ PINMUX_DATA(SD_CD_MARK, PS3_14_FN2, PTI6_FN),
+ PINMUX_DATA(D13_MARK, PS3_13_FN1, PTI5_FN),
+ PINMUX_DATA(SD_CLK_MARK, PS3_13_FN2, PTI5_FN),
+ PINMUX_DATA(D12_MARK, PS3_12_FN1, PTI4_FN),
+ PINMUX_DATA(SD_CMD_MARK, PS3_12_FN2, PTI4_FN),
+ PINMUX_DATA(D11_MARK, PS3_11_FN1, PTI3_FN),
+ PINMUX_DATA(SD_D3_MARK, PS3_11_FN2, PTI3_FN),
+ PINMUX_DATA(D10_MARK, PS3_10_FN1, PTI2_FN),
+ PINMUX_DATA(SD_D2_MARK, PS3_10_FN2, PTI2_FN),
+ PINMUX_DATA(D9_MARK, PS3_9_FN1, PTI1_FN),
+ PINMUX_DATA(SD_D1_MARK, PS3_9_FN2, PTI1_FN),
+ PINMUX_DATA(D8_MARK, PS3_8_FN1, PTI0_FN),
+ PINMUX_DATA(SD_D0_MARK, PS3_8_FN2, PTI0_FN),
+
+ /* PTJ FN */
+ PINMUX_DATA(RTS3_MARK, PTJ6_FN),
+ PINMUX_DATA(CTS3_MARK, PTJ5_FN),
+ PINMUX_DATA(TXD3_MARK, PTJ4_FN),
+ PINMUX_DATA(RXD3_MARK, PTJ3_FN),
+ PINMUX_DATA(RTS4_MARK, PTJ2_FN),
+ PINMUX_DATA(RXD4_MARK, PTJ1_FN),
+ PINMUX_DATA(TXD4_MARK, PTJ0_FN),
+
+ /* PTK FN */
+ PINMUX_DATA(COM2_TXD_MARK, PS3_7_FN1, PTK7_FN),
+ PINMUX_DATA(SCK2_MARK, PS3_7_FN2, PTK7_FN),
+ PINMUX_DATA(COM2_RXD_MARK, PTK6_FN),
+ PINMUX_DATA(COM2_RTS_MARK, PTK5_FN),
+ PINMUX_DATA(COM2_CTS_MARK, PTK4_FN),
+ PINMUX_DATA(COM2_DTR_MARK, PTK3_FN),
+ PINMUX_DATA(COM2_DSR_MARK, PS3_2_FN1, PTK2_FN),
+ PINMUX_DATA(SCK4_MARK, PS3_2_FN2, PTK2_FN),
+ PINMUX_DATA(COM2_DCD_MARK, PS3_1_FN1, PTK1_FN),
+ PINMUX_DATA(SCK3_MARK, PS3_1_FN2, PTK1_FN),
+ PINMUX_DATA(CLKOUT_MARK, PTK0_FN),
+
+ /* PTL FN */
+ PINMUX_DATA(RAC_RXD_MARK, PS4_14_FN1, PTL6_FN),
+ PINMUX_DATA(RXD2_MARK, PS4_14_FN2, PTL6_FN),
+ PINMUX_DATA(RAC_RTS_MARK, PS4_13_FN1, PTL5_FN),
+ PINMUX_DATA(CS5_MARK, PS4_13_FN2, PTL5_FN),
+ PINMUX_DATA(RAC_CTS_MARK, PS4_12_FN1, PTL4_FN),
+ PINMUX_DATA(CS6_MARK, PS4_12_FN2, PTL4_FN),
+ PINMUX_DATA(RAC_DTR_MARK, PTL3_FN),
+ PINMUX_DATA(RAC_DSR_MARK, PS4_10_FN1, PTL2_FN),
+ PINMUX_DATA(AUDSYNC_MARK, PS4_10_FN2, PTL2_FN),
+ PINMUX_DATA(RAC_DCD_MARK, PS4_9_FN1, PTL1_FN),
+ PINMUX_DATA(AUDCK_MARK, PS4_9_FN2, PTL1_FN),
+ PINMUX_DATA(RAC_TXD_MARK, PS4_8_FN1, PTL0_FN),
+ PINMUX_DATA(TXD2_MARK, PS4_8_FN1, PTL0_FN),
+
+ /* PTM FN */
+ PINMUX_DATA(CS4_MARK, PTM7_FN),
+ PINMUX_DATA(RD_MARK, PTM6_FN),
+ PINMUX_DATA(WE0_MARK, PTM7_FN),
+ PINMUX_DATA(CS0_MARK, PTM4_FN),
+ PINMUX_DATA(SDA6_MARK, PTM3_FN),
+ PINMUX_DATA(SCL6_MARK, PTM2_FN),
+ PINMUX_DATA(SDA7_MARK, PTM1_FN),
+ PINMUX_DATA(SCL7_MARK, PTM0_FN),
+
+ /* PTN FN */
+ PINMUX_DATA(VBUS_EN_MARK, PTN6_FN),
+ PINMUX_DATA(VBUS_OC_MARK, PTN5_FN),
+ PINMUX_DATA(JMCTCK_MARK, PS4_4_FN1, PTN4_FN),
+ PINMUX_DATA(SGPIO1_CLK_MARK, PS4_4_FN2, PTN4_FN),
+ PINMUX_DATA(JMCTMS_MARK, PS4_3_FN1, PTN5_FN),
+ PINMUX_DATA(SGPIO1_LOAD_MARK, PS4_3_FN2, PTN5_FN),
+ PINMUX_DATA(JMCTDO_MARK, PS4_2_FN1, PTN2_FN),
+ PINMUX_DATA(SGPIO1_DO_MARK, PS4_2_FN2, PTN2_FN),
+ PINMUX_DATA(JMCTDI_MARK, PS4_1_FN1, PTN1_FN),
+ PINMUX_DATA(SGPIO1_DI_MARK, PS4_1_FN2, PTN1_FN),
+ PINMUX_DATA(JMCTRST_MARK, PS4_0_FN1, PTN0_FN),
+ PINMUX_DATA(SUB_CLKIN_MARK, PS4_0_FN2, PTN0_FN),
+
+ /* PTO FN */
+ PINMUX_DATA(SGPIO0_CLK_MARK, PTO7_FN),
+ PINMUX_DATA(SGPIO0_LOAD_MARK, PTO6_FN),
+ PINMUX_DATA(SGPIO0_DI_MARK, PTO5_FN),
+ PINMUX_DATA(SGPIO0_DO_MARK, PTO4_FN),
+ PINMUX_DATA(SGPIO2_CLK_MARK, PS5_11_FN1, PTO3_FN),
+ PINMUX_DATA(COM1_TXD_MARK, PS5_11_FN2, PTO3_FN),
+ PINMUX_DATA(SGPIO2_LOAD_MARK, PS5_10_FN1, PTO2_FN),
+ PINMUX_DATA(COM1_RXD_MARK, PS5_10_FN2, PTO2_FN),
+ PINMUX_DATA(SGPIO2_DI_MARK, PS5_9_FN1, PTO1_FN),
+ PINMUX_DATA(COM1_RTS_MARK, PS5_9_FN2, PTO1_FN),
+ PINMUX_DATA(SGPIO2_DO_MARK, PS5_8_FN1, PTO0_FN),
+ PINMUX_DATA(COM1_CTS_MARK, PS5_8_FN2, PTO0_FN),
+
+ /* PTP FN */
+
+ /* PTQ FN */
+ PINMUX_DATA(LAD3_MARK, PTQ6_FN),
+ PINMUX_DATA(LAD2_MARK, PTQ5_FN),
+ PINMUX_DATA(LAD1_MARK, PTQ4_FN),
+ PINMUX_DATA(LAD0_MARK, PTQ3_FN),
+ PINMUX_DATA(LFRAME_MARK, PTQ2_FN),
+ PINMUX_DATA(LRESET_MARK, PTQ1_FN),
+ PINMUX_DATA(LCLK_MARK, PTQ0_FN),
+
+ /* PTR FN */
+ PINMUX_DATA(SDA8_MARK, PTR7_FN), /* DDC3? */
+ PINMUX_DATA(SCL8_MARK, PTR6_FN), /* DDC2? */
+ PINMUX_DATA(SDA2_MARK, PTR5_FN),
+ PINMUX_DATA(SCL2_MARK, PTR4_FN),
+ PINMUX_DATA(SDA1_MARK, PTR3_FN),
+ PINMUX_DATA(SCL1_MARK, PTR2_FN),
+ PINMUX_DATA(SDA0_MARK, PTR1_FN),
+ PINMUX_DATA(SCL0_MARK, PTR0_FN),
+
+ /* PTS FN */
+ PINMUX_DATA(SDA9_MARK, PTS7_FN), /* DDC1? */
+ PINMUX_DATA(SCL9_MARK, PTS6_FN), /* DDC0? */
+ PINMUX_DATA(SDA5_MARK, PTS5_FN),
+ PINMUX_DATA(SCL5_MARK, PTS4_FN),
+ PINMUX_DATA(SDA4_MARK, PTS3_FN),
+ PINMUX_DATA(SCL4_MARK, PTS2_FN),
+ PINMUX_DATA(SDA3_MARK, PTS1_FN),
+ PINMUX_DATA(SCL3_MARK, PTS0_FN),
+
+ /* PTT FN */
+ PINMUX_DATA(PWMX7_MARK, PS5_7_FN1, PTT7_FN),
+ PINMUX_DATA(AUDATA3_MARK, PS5_7_FN2, PTT7_FN),
+ PINMUX_DATA(PWMX6_MARK, PS5_6_FN1, PTT6_FN),
+ PINMUX_DATA(AUDATA2_MARK, PS5_6_FN2, PTT6_FN),
+ PINMUX_DATA(PWMX5_MARK, PS5_5_FN1, PTT5_FN),
+ PINMUX_DATA(AUDATA1_MARK, PS5_5_FN2, PTT5_FN),
+ PINMUX_DATA(PWMX4_MARK, PS5_4_FN1, PTT4_FN),
+ PINMUX_DATA(AUDATA0_MARK, PS5_4_FN2, PTT4_FN),
+ PINMUX_DATA(PWMX3_MARK, PS5_3_FN1, PTT3_FN),
+ PINMUX_DATA(STATUS1_MARK, PS5_3_FN2, PTT3_FN),
+ PINMUX_DATA(PWMX2_MARK, PS5_2_FN1, PTT2_FN),
+ PINMUX_DATA(STATUS0_MARK, PS5_2_FN2, PTT2_FN),
+ PINMUX_DATA(PWMX1_MARK, PTT1_FN),
+ PINMUX_DATA(PWMX0_MARK, PTT0_FN),
+
+ /* PTU FN */
+ PINMUX_DATA(LGPIO7_MARK, PS6_15_FN1, PTU7_FN),
+ PINMUX_DATA(APMONCTL_O_MARK, PS6_15_FN2, PTU7_FN),
+ PINMUX_DATA(LGPIO6_MARK, PS6_14_FN1, PTU6_FN),
+ PINMUX_DATA(APMPWBTOUT_O_MARK, PS6_14_FN2, PTU6_FN),
+ PINMUX_DATA(LGPIO5_MARK, PS6_13_FN1, PTU5_FN),
+ PINMUX_DATA(APMSCI_O_MARK, PS6_13_FN2, PTU5_FN),
+ PINMUX_DATA(LGPIO4_MARK, PS6_12_FN1, PTU4_FN),
+ PINMUX_DATA(APMVDDON_MARK, PS6_12_FN2, PTU4_FN),
+ PINMUX_DATA(LGPIO3_MARK, PS6_11_FN1, PTU3_FN),
+ PINMUX_DATA(APMSLPBTN_MARK, PS6_11_FN2, PTU3_FN),
+ PINMUX_DATA(LGPIO2_MARK, PS6_10_FN1, PTU2_FN),
+ PINMUX_DATA(APMPWRBTN_MARK, PS6_10_FN2, PTU2_FN),
+ PINMUX_DATA(LGPIO1_MARK, PS6_9_FN1, PTU1_FN),
+ PINMUX_DATA(APMS5N_MARK, PS6_9_FN2, PTU1_FN),
+ PINMUX_DATA(LGPIO0_MARK, PS6_8_FN1, PTU0_FN),
+ PINMUX_DATA(APMS3N_MARK, PS6_8_FN2, PTU0_FN),
+
+ /* PTV FN */
+ PINMUX_DATA(A23_MARK, PS6_7_FN1, PTV7_FN),
+ PINMUX_DATA(COM2_RI_MARK, PS6_7_FN2, PTV7_FN),
+ PINMUX_DATA(A22_MARK, PS6_6_FN1, PTV6_FN),
+ PINMUX_DATA(R_SPI_MOSI_MARK, PS6_6_FN2, PTV6_FN),
+ PINMUX_DATA(A21_MARK, PS6_5_FN1, PTV5_FN),
+ PINMUX_DATA(R_SPI_MISO_MARK, PS6_5_FN2, PTV5_FN),
+ PINMUX_DATA(A20_MARK, PS6_4_FN1, PTV4_FN),
+ PINMUX_DATA(R_SPI_RSPCK_MARK, PS6_4_FN2, PTV4_FN),
+ PINMUX_DATA(A19_MARK, PS6_3_FN1, PTV3_FN),
+ PINMUX_DATA(R_SPI_SSL0_MARK, PS6_3_FN2, PTV3_FN),
+ PINMUX_DATA(A18_MARK, PS6_2_FN1, PTV2_FN),
+ PINMUX_DATA(R_SPI_SSL1_MARK, PS6_2_FN2, PTV2_FN),
+ PINMUX_DATA(A17_MARK, PS6_1_FN1, PTV1_FN),
+ PINMUX_DATA(EVENT7_MARK, PS6_1_FN2, PTV1_FN),
+ PINMUX_DATA(A16_MARK, PS6_0_FN1, PTV0_FN),
+ PINMUX_DATA(EVENT6_MARK, PS6_0_FN1, PTV0_FN),
+
+ /* PTW FN */
+ PINMUX_DATA(A15_MARK, PS7_15_FN1, PTW7_FN),
+ PINMUX_DATA(EVENT5_MARK, PS7_15_FN2, PTW7_FN),
+ PINMUX_DATA(A14_MARK, PS7_14_FN1, PTW6_FN),
+ PINMUX_DATA(EVENT4_MARK, PS7_14_FN2, PTW6_FN),
+ PINMUX_DATA(A13_MARK, PS7_13_FN1, PTW5_FN),
+ PINMUX_DATA(EVENT3_MARK, PS7_13_FN2, PTW5_FN),
+ PINMUX_DATA(A12_MARK, PS7_12_FN1, PTW4_FN),
+ PINMUX_DATA(EVENT2_MARK, PS7_12_FN2, PTW4_FN),
+ PINMUX_DATA(A11_MARK, PS7_11_FN1, PTW3_FN),
+ PINMUX_DATA(EVENT1_MARK, PS7_11_FN2, PTW3_FN),
+ PINMUX_DATA(A10_MARK, PS7_10_FN1, PTW2_FN),
+ PINMUX_DATA(EVENT0_MARK, PS7_10_FN2, PTW2_FN),
+ PINMUX_DATA(A9_MARK, PS7_9_FN1, PTW1_FN),
+ PINMUX_DATA(CTS4_MARK, PS7_9_FN2, PTW1_FN),
+ PINMUX_DATA(A8_MARK, PS7_8_FN1, PTW0_FN),
+ PINMUX_DATA(CTS2_MARK, PS7_8_FN2, PTW0_FN),
+
+ /* PTX FN */
+ PINMUX_DATA(A7_MARK, PS7_7_FN1, PTX7_FN),
+ PINMUX_DATA(RTS2_MARK, PS7_7_FN2, PTX7_FN),
+ PINMUX_DATA(A6_MARK, PS7_6_FN1, PTX6_FN),
+ PINMUX_DATA(SIM_D_MARK, PS7_6_FN2, PTX6_FN),
+ PINMUX_DATA(A5_MARK, PS7_5_FN1, PTX5_FN),
+ PINMUX_DATA(SIM_CLK_MARK, PS7_5_FN2, PTX5_FN),
+ PINMUX_DATA(A4_MARK, PS7_4_FN1, PTX4_FN),
+ PINMUX_DATA(SIM_RST_MARK, PS7_4_FN2, PTX4_FN),
+ PINMUX_DATA(A3_MARK, PTX3_FN),
+ PINMUX_DATA(A2_MARK, PTX2_FN),
+ PINMUX_DATA(A1_MARK, PTX1_FN),
+ PINMUX_DATA(A0_MARK, PTX0_FN),
+
+ /* PTY FN */
+ PINMUX_DATA(D7_MARK, PTY7_FN),
+ PINMUX_DATA(D6_MARK, PTY6_FN),
+ PINMUX_DATA(D5_MARK, PTY5_FN),
+ PINMUX_DATA(D4_MARK, PTY4_FN),
+ PINMUX_DATA(D3_MARK, PTY3_FN),
+ PINMUX_DATA(D2_MARK, PTY2_FN),
+ PINMUX_DATA(D1_MARK, PTY1_FN),
+ PINMUX_DATA(D0_MARK, PTY0_FN),
+
+ /* PTZ FN */
+ PINMUX_DATA(MMCDAT7_MARK, PS8_15_FN1, PTZ7_FN),
+ PINMUX_DATA(ON_DQ7_MARK, PS8_15_FN2, PTZ7_FN),
+ PINMUX_DATA(MMCDAT6_MARK, PS8_14_FN1, PTZ6_FN),
+ PINMUX_DATA(ON_DQ6_MARK, PS8_14_FN2, PTZ6_FN),
+ PINMUX_DATA(MMCDAT5_MARK, PS8_13_FN1, PTZ5_FN),
+ PINMUX_DATA(ON_DQ5_MARK, PS8_13_FN2, PTZ5_FN),
+ PINMUX_DATA(MMCDAT4_MARK, PS8_12_FN1, PTZ4_FN),
+ PINMUX_DATA(ON_DQ4_MARK, PS8_12_FN2, PTZ4_FN),
+ PINMUX_DATA(MMCDAT3_MARK, PS8_11_FN1, PTZ3_FN),
+ PINMUX_DATA(ON_DQ3_MARK, PS8_11_FN2, PTZ3_FN),
+ PINMUX_DATA(MMCDAT2_MARK, PS8_10_FN1, PTZ2_FN),
+ PINMUX_DATA(ON_DQ2_MARK, PS8_10_FN2, PTZ2_FN),
+ PINMUX_DATA(MMCDAT1_MARK, PS8_9_FN1, PTZ1_FN),
+ PINMUX_DATA(ON_DQ1_MARK, PS8_9_FN2, PTZ1_FN),
+ PINMUX_DATA(MMCDAT0_MARK, PS8_8_FN1, PTZ0_FN),
+ PINMUX_DATA(ON_DQ0_MARK, PS8_8_FN2, PTZ0_FN),
+};
+
+static struct pinmux_gpio pinmux_gpios[] = {
+ /* PTA */
+ PINMUX_GPIO(GPIO_PTA7, PTA7_DATA),
+ PINMUX_GPIO(GPIO_PTA6, PTA6_DATA),
+ PINMUX_GPIO(GPIO_PTA5, PTA5_DATA),
+ PINMUX_GPIO(GPIO_PTA4, PTA4_DATA),
+ PINMUX_GPIO(GPIO_PTA3, PTA3_DATA),
+ PINMUX_GPIO(GPIO_PTA2, PTA2_DATA),
+ PINMUX_GPIO(GPIO_PTA1, PTA1_DATA),
+ PINMUX_GPIO(GPIO_PTA0, PTA0_DATA),
+
+ /* PTB */
+ PINMUX_GPIO(GPIO_PTB7, PTB7_DATA),
+ PINMUX_GPIO(GPIO_PTB6, PTB6_DATA),
+ PINMUX_GPIO(GPIO_PTB5, PTB5_DATA),
+ PINMUX_GPIO(GPIO_PTB4, PTB4_DATA),
+ PINMUX_GPIO(GPIO_PTB3, PTB3_DATA),
+ PINMUX_GPIO(GPIO_PTB2, PTB2_DATA),
+ PINMUX_GPIO(GPIO_PTB1, PTB1_DATA),
+ PINMUX_GPIO(GPIO_PTB0, PTB0_DATA),
+
+ /* PTC */
+ PINMUX_GPIO(GPIO_PTC7, PTC7_DATA),
+ PINMUX_GPIO(GPIO_PTC6, PTC6_DATA),
+ PINMUX_GPIO(GPIO_PTC5, PTC5_DATA),
+ PINMUX_GPIO(GPIO_PTC4, PTC4_DATA),
+ PINMUX_GPIO(GPIO_PTC3, PTC3_DATA),
+ PINMUX_GPIO(GPIO_PTC2, PTC2_DATA),
+ PINMUX_GPIO(GPIO_PTC1, PTC1_DATA),
+ PINMUX_GPIO(GPIO_PTC0, PTC0_DATA),
+
+ /* PTD */
+ PINMUX_GPIO(GPIO_PTD7, PTD7_DATA),
+ PINMUX_GPIO(GPIO_PTD6, PTD6_DATA),
+ PINMUX_GPIO(GPIO_PTD5, PTD5_DATA),
+ PINMUX_GPIO(GPIO_PTD4, PTD4_DATA),
+ PINMUX_GPIO(GPIO_PTD3, PTD3_DATA),
+ PINMUX_GPIO(GPIO_PTD2, PTD2_DATA),
+ PINMUX_GPIO(GPIO_PTD1, PTD1_DATA),
+ PINMUX_GPIO(GPIO_PTD0, PTD0_DATA),
+
+ /* PTE */
+ PINMUX_GPIO(GPIO_PTE7, PTE7_DATA),
+ PINMUX_GPIO(GPIO_PTE6, PTE6_DATA),
+ PINMUX_GPIO(GPIO_PTE5, PTE5_DATA),
+ PINMUX_GPIO(GPIO_PTE4, PTE4_DATA),
+ PINMUX_GPIO(GPIO_PTE3, PTE3_DATA),
+ PINMUX_GPIO(GPIO_PTE2, PTE2_DATA),
+ PINMUX_GPIO(GPIO_PTE1, PTE1_DATA),
+ PINMUX_GPIO(GPIO_PTE0, PTE0_DATA),
+
+ /* PTF */
+ PINMUX_GPIO(GPIO_PTF7, PTF7_DATA),
+ PINMUX_GPIO(GPIO_PTF6, PTF6_DATA),
+ PINMUX_GPIO(GPIO_PTF5, PTF5_DATA),
+ PINMUX_GPIO(GPIO_PTF4, PTF4_DATA),
+ PINMUX_GPIO(GPIO_PTF3, PTF3_DATA),
+ PINMUX_GPIO(GPIO_PTF2, PTF2_DATA),
+ PINMUX_GPIO(GPIO_PTF1, PTF1_DATA),
+ PINMUX_GPIO(GPIO_PTF0, PTF0_DATA),
+
+ /* PTG */
+ PINMUX_GPIO(GPIO_PTG7, PTG7_DATA),
+ PINMUX_GPIO(GPIO_PTG6, PTG6_DATA),
+ PINMUX_GPIO(GPIO_PTG5, PTG5_DATA),
+ PINMUX_GPIO(GPIO_PTG4, PTG4_DATA),
+ PINMUX_GPIO(GPIO_PTG3, PTG3_DATA),
+ PINMUX_GPIO(GPIO_PTG2, PTG2_DATA),
+ PINMUX_GPIO(GPIO_PTG1, PTG1_DATA),
+ PINMUX_GPIO(GPIO_PTG0, PTG0_DATA),
+
+ /* PTH */
+ PINMUX_GPIO(GPIO_PTH7, PTH7_DATA),
+ PINMUX_GPIO(GPIO_PTH6, PTH6_DATA),
+ PINMUX_GPIO(GPIO_PTH5, PTH5_DATA),
+ PINMUX_GPIO(GPIO_PTH4, PTH4_DATA),
+ PINMUX_GPIO(GPIO_PTH3, PTH3_DATA),
+ PINMUX_GPIO(GPIO_PTH2, PTH2_DATA),
+ PINMUX_GPIO(GPIO_PTH1, PTH1_DATA),
+ PINMUX_GPIO(GPIO_PTH0, PTH0_DATA),
+
+ /* PTI */
+ PINMUX_GPIO(GPIO_PTI7, PTI7_DATA),
+ PINMUX_GPIO(GPIO_PTI6, PTI6_DATA),
+ PINMUX_GPIO(GPIO_PTI5, PTI5_DATA),
+ PINMUX_GPIO(GPIO_PTI4, PTI4_DATA),
+ PINMUX_GPIO(GPIO_PTI3, PTI3_DATA),
+ PINMUX_GPIO(GPIO_PTI2, PTI2_DATA),
+ PINMUX_GPIO(GPIO_PTI1, PTI1_DATA),
+ PINMUX_GPIO(GPIO_PTI0, PTI0_DATA),
+
+ /* PTJ */
+ PINMUX_GPIO(GPIO_PTJ6, PTJ6_DATA),
+ PINMUX_GPIO(GPIO_PTJ5, PTJ5_DATA),
+ PINMUX_GPIO(GPIO_PTJ4, PTJ4_DATA),
+ PINMUX_GPIO(GPIO_PTJ3, PTJ3_DATA),
+ PINMUX_GPIO(GPIO_PTJ2, PTJ2_DATA),
+ PINMUX_GPIO(GPIO_PTJ1, PTJ1_DATA),
+ PINMUX_GPIO(GPIO_PTJ0, PTJ0_DATA),
+
+ /* PTK */
+ PINMUX_GPIO(GPIO_PTK7, PTK7_DATA),
+ PINMUX_GPIO(GPIO_PTK6, PTK6_DATA),
+ PINMUX_GPIO(GPIO_PTK5, PTK5_DATA),
+ PINMUX_GPIO(GPIO_PTK4, PTK4_DATA),
+ PINMUX_GPIO(GPIO_PTK3, PTK3_DATA),
+ PINMUX_GPIO(GPIO_PTK2, PTK2_DATA),
+ PINMUX_GPIO(GPIO_PTK1, PTK1_DATA),
+ PINMUX_GPIO(GPIO_PTK0, PTK0_DATA),
+
+ /* PTL */
+ PINMUX_GPIO(GPIO_PTL6, PTL6_DATA),
+ PINMUX_GPIO(GPIO_PTL5, PTL5_DATA),
+ PINMUX_GPIO(GPIO_PTL4, PTL4_DATA),
+ PINMUX_GPIO(GPIO_PTL3, PTL3_DATA),
+ PINMUX_GPIO(GPIO_PTL2, PTL2_DATA),
+ PINMUX_GPIO(GPIO_PTL1, PTL1_DATA),
+ PINMUX_GPIO(GPIO_PTL0, PTL0_DATA),
+
+ /* PTM */
+ PINMUX_GPIO(GPIO_PTM7, PTM7_DATA),
+ PINMUX_GPIO(GPIO_PTM6, PTM6_DATA),
+ PINMUX_GPIO(GPIO_PTM5, PTM5_DATA),
+ PINMUX_GPIO(GPIO_PTM4, PTM4_DATA),
+ PINMUX_GPIO(GPIO_PTM3, PTM3_DATA),
+ PINMUX_GPIO(GPIO_PTM2, PTM2_DATA),
+ PINMUX_GPIO(GPIO_PTM1, PTM1_DATA),
+ PINMUX_GPIO(GPIO_PTM0, PTM0_DATA),
+
+ /* PTN */
+ PINMUX_GPIO(GPIO_PTN6, PTN6_DATA),
+ PINMUX_GPIO(GPIO_PTN5, PTN5_DATA),
+ PINMUX_GPIO(GPIO_PTN4, PTN4_DATA),
+ PINMUX_GPIO(GPIO_PTN3, PTN3_DATA),
+ PINMUX_GPIO(GPIO_PTN2, PTN2_DATA),
+ PINMUX_GPIO(GPIO_PTN1, PTN1_DATA),
+ PINMUX_GPIO(GPIO_PTN0, PTN0_DATA),
+
+ /* PTO */
+ PINMUX_GPIO(GPIO_PTO7, PTO7_DATA),
+ PINMUX_GPIO(GPIO_PTO6, PTO6_DATA),
+ PINMUX_GPIO(GPIO_PTO5, PTO5_DATA),
+ PINMUX_GPIO(GPIO_PTO4, PTO4_DATA),
+ PINMUX_GPIO(GPIO_PTO3, PTO3_DATA),
+ PINMUX_GPIO(GPIO_PTO2, PTO2_DATA),
+ PINMUX_GPIO(GPIO_PTO1, PTO1_DATA),
+ PINMUX_GPIO(GPIO_PTO0, PTO0_DATA),
+
+ /* PTP */
+ PINMUX_GPIO(GPIO_PTP7, PTP7_DATA),
+ PINMUX_GPIO(GPIO_PTP6, PTP6_DATA),
+ PINMUX_GPIO(GPIO_PTP5, PTP5_DATA),
+ PINMUX_GPIO(GPIO_PTP4, PTP4_DATA),
+ PINMUX_GPIO(GPIO_PTP3, PTP3_DATA),
+ PINMUX_GPIO(GPIO_PTP2, PTP2_DATA),
+ PINMUX_GPIO(GPIO_PTP1, PTP1_DATA),
+ PINMUX_GPIO(GPIO_PTP0, PTP0_DATA),
+
+ /* PTQ */
+ PINMUX_GPIO(GPIO_PTQ6, PTQ6_DATA),
+ PINMUX_GPIO(GPIO_PTQ5, PTQ5_DATA),
+ PINMUX_GPIO(GPIO_PTQ4, PTQ4_DATA),
+ PINMUX_GPIO(GPIO_PTQ3, PTQ3_DATA),
+ PINMUX_GPIO(GPIO_PTQ2, PTQ2_DATA),
+ PINMUX_GPIO(GPIO_PTQ1, PTQ1_DATA),
+ PINMUX_GPIO(GPIO_PTQ0, PTQ0_DATA),
+
+ /* PTR */
+ PINMUX_GPIO(GPIO_PTR7, PTR7_DATA),
+ PINMUX_GPIO(GPIO_PTR6, PTR6_DATA),
+ PINMUX_GPIO(GPIO_PTR5, PTR5_DATA),
+ PINMUX_GPIO(GPIO_PTR4, PTR4_DATA),
+ PINMUX_GPIO(GPIO_PTR3, PTR3_DATA),
+ PINMUX_GPIO(GPIO_PTR2, PTR2_DATA),
+ PINMUX_GPIO(GPIO_PTR1, PTR1_DATA),
+ PINMUX_GPIO(GPIO_PTR0, PTR0_DATA),
+
+ /* PTS */
+ PINMUX_GPIO(GPIO_PTS7, PTS7_DATA),
+ PINMUX_GPIO(GPIO_PTS6, PTS6_DATA),
+ PINMUX_GPIO(GPIO_PTS5, PTS5_DATA),
+ PINMUX_GPIO(GPIO_PTS4, PTS4_DATA),
+ PINMUX_GPIO(GPIO_PTS3, PTS3_DATA),
+ PINMUX_GPIO(GPIO_PTS2, PTS2_DATA),
+ PINMUX_GPIO(GPIO_PTS1, PTS1_DATA),
+ PINMUX_GPIO(GPIO_PTS0, PTS0_DATA),
+
+ /* PTT */
+ PINMUX_GPIO(GPIO_PTT7, PTT7_DATA),
+ PINMUX_GPIO(GPIO_PTT6, PTT6_DATA),
+ PINMUX_GPIO(GPIO_PTT5, PTT5_DATA),
+ PINMUX_GPIO(GPIO_PTT4, PTT4_DATA),
+ PINMUX_GPIO(GPIO_PTT3, PTT3_DATA),
+ PINMUX_GPIO(GPIO_PTT2, PTT2_DATA),
+ PINMUX_GPIO(GPIO_PTT1, PTT1_DATA),
+ PINMUX_GPIO(GPIO_PTT0, PTT0_DATA),
+
+ /* PTU */
+ PINMUX_GPIO(GPIO_PTU7, PTU7_DATA),
+ PINMUX_GPIO(GPIO_PTU6, PTU6_DATA),
+ PINMUX_GPIO(GPIO_PTU5, PTU5_DATA),
+ PINMUX_GPIO(GPIO_PTU4, PTU4_DATA),
+ PINMUX_GPIO(GPIO_PTU3, PTU3_DATA),
+ PINMUX_GPIO(GPIO_PTU2, PTU2_DATA),
+ PINMUX_GPIO(GPIO_PTU1, PTU1_DATA),
+ PINMUX_GPIO(GPIO_PTU0, PTU0_DATA),
+
+ /* PTV */
+ PINMUX_GPIO(GPIO_PTV7, PTV7_DATA),
+ PINMUX_GPIO(GPIO_PTV6, PTV6_DATA),
+ PINMUX_GPIO(GPIO_PTV5, PTV5_DATA),
+ PINMUX_GPIO(GPIO_PTV4, PTV4_DATA),
+ PINMUX_GPIO(GPIO_PTV3, PTV3_DATA),
+ PINMUX_GPIO(GPIO_PTV2, PTV2_DATA),
+ PINMUX_GPIO(GPIO_PTV1, PTV1_DATA),
+ PINMUX_GPIO(GPIO_PTV0, PTV0_DATA),
+
+ /* PTW */
+ PINMUX_GPIO(GPIO_PTW7, PTW7_DATA),
+ PINMUX_GPIO(GPIO_PTW6, PTW6_DATA),
+ PINMUX_GPIO(GPIO_PTW5, PTW5_DATA),
+ PINMUX_GPIO(GPIO_PTW4, PTW4_DATA),
+ PINMUX_GPIO(GPIO_PTW3, PTW3_DATA),
+ PINMUX_GPIO(GPIO_PTW2, PTW2_DATA),
+ PINMUX_GPIO(GPIO_PTW1, PTW1_DATA),
+ PINMUX_GPIO(GPIO_PTW0, PTW0_DATA),
+
+ /* PTX */
+ PINMUX_GPIO(GPIO_PTX7, PTX7_DATA),
+ PINMUX_GPIO(GPIO_PTX6, PTX6_DATA),
+ PINMUX_GPIO(GPIO_PTX5, PTX5_DATA),
+ PINMUX_GPIO(GPIO_PTX4, PTX4_DATA),
+ PINMUX_GPIO(GPIO_PTX3, PTX3_DATA),
+ PINMUX_GPIO(GPIO_PTX2, PTX2_DATA),
+ PINMUX_GPIO(GPIO_PTX1, PTX1_DATA),
+ PINMUX_GPIO(GPIO_PTX0, PTX0_DATA),
+
+ /* PTY */
+ PINMUX_GPIO(GPIO_PTY7, PTY7_DATA),
+ PINMUX_GPIO(GPIO_PTY6, PTY6_DATA),
+ PINMUX_GPIO(GPIO_PTY5, PTY5_DATA),
+ PINMUX_GPIO(GPIO_PTY4, PTY4_DATA),
+ PINMUX_GPIO(GPIO_PTY3, PTY3_DATA),
+ PINMUX_GPIO(GPIO_PTY2, PTY2_DATA),
+ PINMUX_GPIO(GPIO_PTY1, PTY1_DATA),
+ PINMUX_GPIO(GPIO_PTY0, PTY0_DATA),
+
+ /* PTZ */
+ PINMUX_GPIO(GPIO_PTZ7, PTZ7_DATA),
+ PINMUX_GPIO(GPIO_PTZ6, PTZ6_DATA),
+ PINMUX_GPIO(GPIO_PTZ5, PTZ5_DATA),
+ PINMUX_GPIO(GPIO_PTZ4, PTZ4_DATA),
+ PINMUX_GPIO(GPIO_PTZ3, PTZ3_DATA),
+ PINMUX_GPIO(GPIO_PTZ2, PTZ2_DATA),
+ PINMUX_GPIO(GPIO_PTZ1, PTZ1_DATA),
+ PINMUX_GPIO(GPIO_PTZ0, PTZ0_DATA),
+
+ /* PTA (mobule: LBSC, RGMII) */
+ PINMUX_GPIO(GPIO_FN_BS, BS_MARK),
+ PINMUX_GPIO(GPIO_FN_RDWR, RDWR_MARK),
+ PINMUX_GPIO(GPIO_FN_WE1, WE1_MARK),
+ PINMUX_GPIO(GPIO_FN_RDY, RDY_MARK),
+ PINMUX_GPIO(GPIO_FN_ET0_MDC, ET0_MDC_MARK),
+ PINMUX_GPIO(GPIO_FN_ET0_MDIO, ET0_MDIO_MARK),
+ PINMUX_GPIO(GPIO_FN_ET1_MDC, ET1_MDC_MARK),
+ PINMUX_GPIO(GPIO_FN_ET1_MDIO, ET1_MDIO_MARK),
+
+ /* PTB (mobule: INTC, ONFI, TMU) */
+ PINMUX_GPIO(GPIO_FN_IRQ15, IRQ15_MARK),
+ PINMUX_GPIO(GPIO_FN_IRQ14, IRQ14_MARK),
+ PINMUX_GPIO(GPIO_FN_IRQ13, IRQ13_MARK),
+ PINMUX_GPIO(GPIO_FN_IRQ12, IRQ12_MARK),
+ PINMUX_GPIO(GPIO_FN_IRQ11, IRQ11_MARK),
+ PINMUX_GPIO(GPIO_FN_IRQ10, IRQ10_MARK),
+ PINMUX_GPIO(GPIO_FN_IRQ9, IRQ9_MARK),
+ PINMUX_GPIO(GPIO_FN_IRQ8, IRQ8_MARK),
+ PINMUX_GPIO(GPIO_FN_ON_NRE, ON_NRE_MARK),
+ PINMUX_GPIO(GPIO_FN_ON_NWE, ON_NWE_MARK),
+ PINMUX_GPIO(GPIO_FN_ON_NWP, ON_NWP_MARK),
+ PINMUX_GPIO(GPIO_FN_ON_NCE0, ON_NCE0_MARK),
+ PINMUX_GPIO(GPIO_FN_ON_R_B0, ON_R_B0_MARK),
+ PINMUX_GPIO(GPIO_FN_ON_ALE, ON_ALE_MARK),
+ PINMUX_GPIO(GPIO_FN_ON_CLE, ON_CLE_MARK),
+ PINMUX_GPIO(GPIO_FN_TCLK, TCLK_MARK),
+
+ /* PTC (mobule: IRQ, PWMU) */
+ PINMUX_GPIO(GPIO_FN_IRQ7, IRQ7_MARK),
+ PINMUX_GPIO(GPIO_FN_IRQ6, IRQ6_MARK),
+ PINMUX_GPIO(GPIO_FN_IRQ5, IRQ5_MARK),
+ PINMUX_GPIO(GPIO_FN_IRQ4, IRQ4_MARK),
+ PINMUX_GPIO(GPIO_FN_IRQ3, IRQ3_MARK),
+ PINMUX_GPIO(GPIO_FN_IRQ2, IRQ2_MARK),
+ PINMUX_GPIO(GPIO_FN_IRQ1, IRQ1_MARK),
+ PINMUX_GPIO(GPIO_FN_IRQ0, IRQ0_MARK),
+ PINMUX_GPIO(GPIO_FN_PWMU0, PWMU0_MARK),
+ PINMUX_GPIO(GPIO_FN_PWMU1, PWMU1_MARK),
+ PINMUX_GPIO(GPIO_FN_PWMU2, PWMU2_MARK),
+ PINMUX_GPIO(GPIO_FN_PWMU3, PWMU3_MARK),
+ PINMUX_GPIO(GPIO_FN_PWMU4, PWMU4_MARK),
+ PINMUX_GPIO(GPIO_FN_PWMU5, PWMU5_MARK),
+
+ /* PTD (mobule: SPI0, DMAC) */
+ PINMUX_GPIO(GPIO_FN_SP0_MOSI, SP0_MOSI_MARK),
+ PINMUX_GPIO(GPIO_FN_SP0_MISO, SP0_MISO_MARK),
+ PINMUX_GPIO(GPIO_FN_SP0_SCK, SP0_SCK_MARK),
+ PINMUX_GPIO(GPIO_FN_SP0_SCK_FB, SP0_SCK_FB_MARK),
+ PINMUX_GPIO(GPIO_FN_SP0_SS0, SP0_SS0_MARK),
+ PINMUX_GPIO(GPIO_FN_SP0_SS1, SP0_SS1_MARK),
+ PINMUX_GPIO(GPIO_FN_SP0_SS2, SP0_SS2_MARK),
+ PINMUX_GPIO(GPIO_FN_SP0_SS3, SP0_SS3_MARK),
+ PINMUX_GPIO(GPIO_FN_DREQ0, DREQ0_MARK),
+ PINMUX_GPIO(GPIO_FN_DACK0, DACK0_MARK),
+ PINMUX_GPIO(GPIO_FN_TEND0, TEND0_MARK),
+
+ /* PTE (mobule: RMII) */
+ PINMUX_GPIO(GPIO_FN_RMII0_CRS_DV, RMII0_CRS_DV_MARK),
+ PINMUX_GPIO(GPIO_FN_RMII0_TXD1, RMII0_TXD1_MARK),
+ PINMUX_GPIO(GPIO_FN_RMII0_TXD0, RMII0_TXD0_MARK),
+ PINMUX_GPIO(GPIO_FN_RMII0_TXEN, RMII0_TXEN_MARK),
+ PINMUX_GPIO(GPIO_FN_RMII0_REFCLK, RMII0_REFCLK_MARK),
+ PINMUX_GPIO(GPIO_FN_RMII0_RXD1, RMII0_RXD1_MARK),
+ PINMUX_GPIO(GPIO_FN_RMII0_RXD0, RMII0_RXD0_MARK),
+ PINMUX_GPIO(GPIO_FN_RMII0_RX_ER, RMII0_RX_ER_MARK),
+
+ /* PTF (mobule: RMII, SerMux) */
+ PINMUX_GPIO(GPIO_FN_RMII1_CRS_DV, RMII1_CRS_DV_MARK),
+ PINMUX_GPIO(GPIO_FN_RMII1_TXD1, RMII1_TXD1_MARK),
+ PINMUX_GPIO(GPIO_FN_RMII1_TXD0, RMII1_TXD0_MARK),
+ PINMUX_GPIO(GPIO_FN_RMII1_TXEN, RMII1_TXEN_MARK),
+ PINMUX_GPIO(GPIO_FN_RMII1_REFCLK, RMII1_REFCLK_MARK),
+ PINMUX_GPIO(GPIO_FN_RMII1_RXD1, RMII1_RXD1_MARK),
+ PINMUX_GPIO(GPIO_FN_RMII1_RXD0, RMII1_RXD0_MARK),
+ PINMUX_GPIO(GPIO_FN_RMII1_RX_ER, RMII1_RX_ER_MARK),
+ PINMUX_GPIO(GPIO_FN_RAC_RI, RAC_RI_MARK),
+
+ /* PTG (mobule: system, LBSC, LPC, WDT, LPC, eMMC) */
+ PINMUX_GPIO(GPIO_FN_BOOTFMS, BOOTFMS_MARK),
+ PINMUX_GPIO(GPIO_FN_BOOTWP, BOOTWP_MARK),
+ PINMUX_GPIO(GPIO_FN_A25, A25_MARK),
+ PINMUX_GPIO(GPIO_FN_A24, A24_MARK),
+ PINMUX_GPIO(GPIO_FN_SERIRQ, SERIRQ_MARK),
+ PINMUX_GPIO(GPIO_FN_WDTOVF, WDTOVF_MARK),
+ PINMUX_GPIO(GPIO_FN_LPCPD, LPCPD_MARK),
+ PINMUX_GPIO(GPIO_FN_LDRQ, LDRQ_MARK),
+ PINMUX_GPIO(GPIO_FN_MMCCLK, MMCCLK_MARK),
+ PINMUX_GPIO(GPIO_FN_MMCCMD, MMCCMD_MARK),
+
+ /* PTH (mobule: SPI1, LPC, DMAC, ADC) */
+ PINMUX_GPIO(GPIO_FN_SP1_MOSI, SP1_MOSI_MARK),
+ PINMUX_GPIO(GPIO_FN_SP1_MISO, SP1_MISO_MARK),
+ PINMUX_GPIO(GPIO_FN_SP1_SCK, SP1_SCK_MARK),
+ PINMUX_GPIO(GPIO_FN_SP1_SCK_FB, SP1_SCK_FB_MARK),
+ PINMUX_GPIO(GPIO_FN_SP1_SS0, SP1_SS0_MARK),
+ PINMUX_GPIO(GPIO_FN_SP1_SS1, SP1_SS1_MARK),
+ PINMUX_GPIO(GPIO_FN_WP, WP_MARK),
+ PINMUX_GPIO(GPIO_FN_FMS0, FMS0_MARK),
+ PINMUX_GPIO(GPIO_FN_TEND1, TEND1_MARK),
+ PINMUX_GPIO(GPIO_FN_DREQ1, DREQ1_MARK),
+ PINMUX_GPIO(GPIO_FN_DACK1, DACK1_MARK),
+ PINMUX_GPIO(GPIO_FN_ADTRG1, ADTRG1_MARK),
+ PINMUX_GPIO(GPIO_FN_ADTRG0, ADTRG0_MARK),
+
+ /* PTI (mobule: LBSC, SDHI) */
+ PINMUX_GPIO(GPIO_FN_D15, D15_MARK),
+ PINMUX_GPIO(GPIO_FN_D14, D14_MARK),
+ PINMUX_GPIO(GPIO_FN_D13, D13_MARK),
+ PINMUX_GPIO(GPIO_FN_D12, D12_MARK),
+ PINMUX_GPIO(GPIO_FN_D11, D11_MARK),
+ PINMUX_GPIO(GPIO_FN_D10, D10_MARK),
+ PINMUX_GPIO(GPIO_FN_D9, D9_MARK),
+ PINMUX_GPIO(GPIO_FN_D8, D8_MARK),
+ PINMUX_GPIO(GPIO_FN_SD_WP, SD_WP_MARK),
+ PINMUX_GPIO(GPIO_FN_SD_CD, SD_CD_MARK),
+ PINMUX_GPIO(GPIO_FN_SD_CLK, SD_CLK_MARK),
+ PINMUX_GPIO(GPIO_FN_SD_CMD, SD_CMD_MARK),
+ PINMUX_GPIO(GPIO_FN_SD_D3, SD_D3_MARK),
+ PINMUX_GPIO(GPIO_FN_SD_D2, SD_D2_MARK),
+ PINMUX_GPIO(GPIO_FN_SD_D1, SD_D1_MARK),
+ PINMUX_GPIO(GPIO_FN_SD_D0, SD_D0_MARK),
+
+ /* PTJ (mobule: SCIF234, SERMUX) */
+ PINMUX_GPIO(GPIO_FN_RTS3, RTS3_MARK),
+ PINMUX_GPIO(GPIO_FN_CTS3, CTS3_MARK),
+ PINMUX_GPIO(GPIO_FN_TXD3, TXD3_MARK),
+ PINMUX_GPIO(GPIO_FN_RXD3, RXD3_MARK),
+ PINMUX_GPIO(GPIO_FN_RTS4, RTS4_MARK),
+ PINMUX_GPIO(GPIO_FN_RXD4, RXD4_MARK),
+ PINMUX_GPIO(GPIO_FN_TXD4, TXD4_MARK),
+
+ /* PTK (mobule: SERMUX, LBSC, SCIF) */
+ PINMUX_GPIO(GPIO_FN_COM2_TXD, COM2_TXD_MARK),
+ PINMUX_GPIO(GPIO_FN_COM2_RXD, COM2_RXD_MARK),
+ PINMUX_GPIO(GPIO_FN_COM2_RTS, COM2_RTS_MARK),
+ PINMUX_GPIO(GPIO_FN_COM2_CTS, COM2_CTS_MARK),
+ PINMUX_GPIO(GPIO_FN_COM2_DTR, COM2_DTR_MARK),
+ PINMUX_GPIO(GPIO_FN_COM2_DSR, COM2_DSR_MARK),
+ PINMUX_GPIO(GPIO_FN_COM2_DCD, COM2_DCD_MARK),
+ PINMUX_GPIO(GPIO_FN_CLKOUT, CLKOUT_MARK),
+ PINMUX_GPIO(GPIO_FN_SCK2, SCK2_MARK),
+ PINMUX_GPIO(GPIO_FN_SCK4, SCK4_MARK),
+ PINMUX_GPIO(GPIO_FN_SCK3, SCK3_MARK),
+
+ /* PTL (mobule: SERMUX, SCIF, LBSC, AUD) */
+ PINMUX_GPIO(GPIO_FN_RAC_RXD, RAC_RXD_MARK),
+ PINMUX_GPIO(GPIO_FN_RAC_RTS, RAC_RTS_MARK),
+ PINMUX_GPIO(GPIO_FN_RAC_CTS, RAC_CTS_MARK),
+ PINMUX_GPIO(GPIO_FN_RAC_DTR, RAC_DTR_MARK),
+ PINMUX_GPIO(GPIO_FN_RAC_DSR, RAC_DSR_MARK),
+ PINMUX_GPIO(GPIO_FN_RAC_DCD, RAC_DCD_MARK),
+ PINMUX_GPIO(GPIO_FN_RAC_TXD, RAC_TXD_MARK),
+ PINMUX_GPIO(GPIO_FN_RXD2, RXD2_MARK),
+ PINMUX_GPIO(GPIO_FN_CS5, CS5_MARK),
+ PINMUX_GPIO(GPIO_FN_CS6, CS6_MARK),
+ PINMUX_GPIO(GPIO_FN_AUDSYNC, AUDSYNC_MARK),
+ PINMUX_GPIO(GPIO_FN_AUDCK, AUDCK_MARK),
+ PINMUX_GPIO(GPIO_FN_TXD2, TXD2_MARK),
+
+ /* PTM (mobule: LBSC, IIC) */
+ PINMUX_GPIO(GPIO_FN_CS4, CS4_MARK),
+ PINMUX_GPIO(GPIO_FN_RD, RD_MARK),
+ PINMUX_GPIO(GPIO_FN_WE0, WE0_MARK),
+ PINMUX_GPIO(GPIO_FN_CS0, CS0_MARK),
+ PINMUX_GPIO(GPIO_FN_SDA6, SDA6_MARK),
+ PINMUX_GPIO(GPIO_FN_SCL6, SCL6_MARK),
+ PINMUX_GPIO(GPIO_FN_SDA7, SDA7_MARK),
+ PINMUX_GPIO(GPIO_FN_SCL7, SCL7_MARK),
+
+ /* PTN (mobule: USB, JMC, SGPIO, WDT) */
+ PINMUX_GPIO(GPIO_FN_VBUS_EN, VBUS_EN_MARK),
+ PINMUX_GPIO(GPIO_FN_VBUS_OC, VBUS_OC_MARK),
+ PINMUX_GPIO(GPIO_FN_JMCTCK, JMCTCK_MARK),
+ PINMUX_GPIO(GPIO_FN_JMCTMS, JMCTMS_MARK),
+ PINMUX_GPIO(GPIO_FN_JMCTDO, JMCTDO_MARK),
+ PINMUX_GPIO(GPIO_FN_JMCTDI, JMCTDI_MARK),
+ PINMUX_GPIO(GPIO_FN_JMCTRST, JMCTRST_MARK),
+ PINMUX_GPIO(GPIO_FN_SGPIO1_CLK, SGPIO1_CLK_MARK),
+ PINMUX_GPIO(GPIO_FN_SGPIO1_LOAD, SGPIO1_LOAD_MARK),
+ PINMUX_GPIO(GPIO_FN_SGPIO1_DI, SGPIO1_DI_MARK),
+ PINMUX_GPIO(GPIO_FN_SGPIO1_DO, SGPIO1_DO_MARK),
+ PINMUX_GPIO(GPIO_FN_SUB_CLKIN, SUB_CLKIN_MARK),
+
+ /* PTO (mobule: SGPIO, SerMux) */
+ PINMUX_GPIO(GPIO_FN_SGPIO0_CLK, SGPIO0_CLK_MARK),
+ PINMUX_GPIO(GPIO_FN_SGPIO0_LOAD, SGPIO0_LOAD_MARK),
+ PINMUX_GPIO(GPIO_FN_SGPIO0_DI, SGPIO0_DI_MARK),
+ PINMUX_GPIO(GPIO_FN_SGPIO0_DO, SGPIO0_DO_MARK),
+ PINMUX_GPIO(GPIO_FN_SGPIO2_CLK, SGPIO2_CLK_MARK),
+ PINMUX_GPIO(GPIO_FN_SGPIO2_LOAD, SGPIO2_LOAD_MARK),
+ PINMUX_GPIO(GPIO_FN_SGPIO2_DI, SGPIO2_DI_MARK),
+ PINMUX_GPIO(GPIO_FN_SGPIO2_DO, SGPIO2_DO_MARK),
+ PINMUX_GPIO(GPIO_FN_COM1_TXD, COM1_TXD_MARK),
+ PINMUX_GPIO(GPIO_FN_COM1_RXD, COM1_RXD_MARK),
+ PINMUX_GPIO(GPIO_FN_COM1_RTS, COM1_RTS_MARK),
+ PINMUX_GPIO(GPIO_FN_COM1_CTS, COM1_CTS_MARK),
+
+ /* PTP (mobule: EVC, ADC) */
+
+ /* PTQ (mobule: LPC) */
+ PINMUX_GPIO(GPIO_FN_LAD3, LAD3_MARK),
+ PINMUX_GPIO(GPIO_FN_LAD2, LAD2_MARK),
+ PINMUX_GPIO(GPIO_FN_LAD1, LAD1_MARK),
+ PINMUX_GPIO(GPIO_FN_LAD0, LAD0_MARK),
+ PINMUX_GPIO(GPIO_FN_LFRAME, LFRAME_MARK),
+ PINMUX_GPIO(GPIO_FN_LRESET, LRESET_MARK),
+ PINMUX_GPIO(GPIO_FN_LCLK, LCLK_MARK),
+
+ /* PTR (mobule: GRA, IIC) */
+ PINMUX_GPIO(GPIO_FN_DDC3, DDC3_MARK),
+ PINMUX_GPIO(GPIO_FN_DDC2, DDC2_MARK),
+ PINMUX_GPIO(GPIO_FN_SDA8, SDA8_MARK),
+ PINMUX_GPIO(GPIO_FN_SCL8, SCL8_MARK),
+ PINMUX_GPIO(GPIO_FN_SDA2, SDA2_MARK),
+ PINMUX_GPIO(GPIO_FN_SCL2, SCL2_MARK),
+ PINMUX_GPIO(GPIO_FN_SDA1, SDA1_MARK),
+ PINMUX_GPIO(GPIO_FN_SCL1, SCL1_MARK),
+ PINMUX_GPIO(GPIO_FN_SDA0, SDA0_MARK),
+ PINMUX_GPIO(GPIO_FN_SCL0, SCL0_MARK),
+
+ /* PTS (mobule: GRA, IIC) */
+ PINMUX_GPIO(GPIO_FN_DDC1, DDC1_MARK),
+ PINMUX_GPIO(GPIO_FN_DDC0, DDC0_MARK),
+ PINMUX_GPIO(GPIO_FN_SDA9, SDA9_MARK),
+ PINMUX_GPIO(GPIO_FN_SCL9, SCL9_MARK),
+ PINMUX_GPIO(GPIO_FN_SDA5, SDA5_MARK),
+ PINMUX_GPIO(GPIO_FN_SCL5, SCL5_MARK),
+ PINMUX_GPIO(GPIO_FN_SDA4, SDA4_MARK),
+ PINMUX_GPIO(GPIO_FN_SCL4, SCL4_MARK),
+ PINMUX_GPIO(GPIO_FN_SDA3, SDA3_MARK),
+ PINMUX_GPIO(GPIO_FN_SCL3, SCL3_MARK),
+
+ /* PTT (mobule: PWMX, AUD) */
+ PINMUX_GPIO(GPIO_FN_PWMX7, PWMX7_MARK),
+ PINMUX_GPIO(GPIO_FN_PWMX6, PWMX6_MARK),
+ PINMUX_GPIO(GPIO_FN_PWMX5, PWMX5_MARK),
+ PINMUX_GPIO(GPIO_FN_PWMX4, PWMX4_MARK),
+ PINMUX_GPIO(GPIO_FN_PWMX3, PWMX3_MARK),
+ PINMUX_GPIO(GPIO_FN_PWMX2, PWMX2_MARK),
+ PINMUX_GPIO(GPIO_FN_PWMX1, PWMX1_MARK),
+ PINMUX_GPIO(GPIO_FN_PWMX0, PWMX0_MARK),
+ PINMUX_GPIO(GPIO_FN_AUDATA3, AUDATA3_MARK),
+ PINMUX_GPIO(GPIO_FN_AUDATA2, AUDATA2_MARK),
+ PINMUX_GPIO(GPIO_FN_AUDATA1, AUDATA1_MARK),
+ PINMUX_GPIO(GPIO_FN_AUDATA0, AUDATA0_MARK),
+ PINMUX_GPIO(GPIO_FN_STATUS1, STATUS1_MARK),
+ PINMUX_GPIO(GPIO_FN_STATUS0, STATUS0_MARK),
+
+ /* PTU (mobule: LPC, APM) */
+ PINMUX_GPIO(GPIO_FN_LGPIO7, LGPIO7_MARK),
+ PINMUX_GPIO(GPIO_FN_LGPIO6, LGPIO6_MARK),
+ PINMUX_GPIO(GPIO_FN_LGPIO5, LGPIO5_MARK),
+ PINMUX_GPIO(GPIO_FN_LGPIO4, LGPIO4_MARK),
+ PINMUX_GPIO(GPIO_FN_LGPIO3, LGPIO3_MARK),
+ PINMUX_GPIO(GPIO_FN_LGPIO2, LGPIO2_MARK),
+ PINMUX_GPIO(GPIO_FN_LGPIO1, LGPIO1_MARK),
+ PINMUX_GPIO(GPIO_FN_LGPIO0, LGPIO0_MARK),
+ PINMUX_GPIO(GPIO_FN_APMONCTL_O, APMONCTL_O_MARK),
+ PINMUX_GPIO(GPIO_FN_APMPWBTOUT_O, APMPWBTOUT_O_MARK),
+ PINMUX_GPIO(GPIO_FN_APMSCI_O, APMSCI_O_MARK),
+ PINMUX_GPIO(GPIO_FN_APMVDDON, APMVDDON_MARK),
+ PINMUX_GPIO(GPIO_FN_APMSLPBTN, APMSLPBTN_MARK),
+ PINMUX_GPIO(GPIO_FN_APMPWRBTN, APMPWRBTN_MARK),
+ PINMUX_GPIO(GPIO_FN_APMS5N, APMS5N_MARK),
+ PINMUX_GPIO(GPIO_FN_APMS3N, APMS3N_MARK),
+
+ /* PTV (mobule: LBSC, SerMux, R-SPI, EVC, GRA) */
+ PINMUX_GPIO(GPIO_FN_A23, A23_MARK),
+ PINMUX_GPIO(GPIO_FN_A22, A22_MARK),
+ PINMUX_GPIO(GPIO_FN_A21, A21_MARK),
+ PINMUX_GPIO(GPIO_FN_A20, A20_MARK),
+ PINMUX_GPIO(GPIO_FN_A19, A19_MARK),
+ PINMUX_GPIO(GPIO_FN_A18, A18_MARK),
+ PINMUX_GPIO(GPIO_FN_A17, A17_MARK),
+ PINMUX_GPIO(GPIO_FN_A16, A16_MARK),
+ PINMUX_GPIO(GPIO_FN_COM2_RI, COM2_RI_MARK),
+ PINMUX_GPIO(GPIO_FN_R_SPI_MOSI, R_SPI_MOSI_MARK),
+ PINMUX_GPIO(GPIO_FN_R_SPI_MISO, R_SPI_MISO_MARK),
+ PINMUX_GPIO(GPIO_FN_R_SPI_RSPCK, R_SPI_RSPCK_MARK),
+ PINMUX_GPIO(GPIO_FN_R_SPI_SSL0, R_SPI_SSL0_MARK),
+ PINMUX_GPIO(GPIO_FN_R_SPI_SSL1, R_SPI_SSL1_MARK),
+ PINMUX_GPIO(GPIO_FN_EVENT7, EVENT7_MARK),
+ PINMUX_GPIO(GPIO_FN_EVENT6, EVENT6_MARK),
+ PINMUX_GPIO(GPIO_FN_VBIOS_DI, VBIOS_DI_MARK),
+ PINMUX_GPIO(GPIO_FN_VBIOS_DO, VBIOS_DO_MARK),
+ PINMUX_GPIO(GPIO_FN_VBIOS_CLK, VBIOS_CLK_MARK),
+ PINMUX_GPIO(GPIO_FN_VBIOS_CS, VBIOS_CS_MARK),
+
+ /* PTW (mobule: LBSC, EVC, SCIF) */
+ PINMUX_GPIO(GPIO_FN_A16, A16_MARK),
+ PINMUX_GPIO(GPIO_FN_A15, A15_MARK),
+ PINMUX_GPIO(GPIO_FN_A14, A14_MARK),
+ PINMUX_GPIO(GPIO_FN_A13, A13_MARK),
+ PINMUX_GPIO(GPIO_FN_A12, A12_MARK),
+ PINMUX_GPIO(GPIO_FN_A11, A11_MARK),
+ PINMUX_GPIO(GPIO_FN_A10, A10_MARK),
+ PINMUX_GPIO(GPIO_FN_A9, A9_MARK),
+ PINMUX_GPIO(GPIO_FN_A8, A8_MARK),
+ PINMUX_GPIO(GPIO_FN_EVENT5, EVENT5_MARK),
+ PINMUX_GPIO(GPIO_FN_EVENT4, EVENT4_MARK),
+ PINMUX_GPIO(GPIO_FN_EVENT3, EVENT3_MARK),
+ PINMUX_GPIO(GPIO_FN_EVENT2, EVENT2_MARK),
+ PINMUX_GPIO(GPIO_FN_EVENT1, EVENT1_MARK),
+ PINMUX_GPIO(GPIO_FN_EVENT0, EVENT0_MARK),
+ PINMUX_GPIO(GPIO_FN_CTS4, CTS4_MARK),
+ PINMUX_GPIO(GPIO_FN_CTS2, CTS2_MARK),
+
+ /* PTX (mobule: LBSC) */
+ PINMUX_GPIO(GPIO_FN_A7, A7_MARK),
+ PINMUX_GPIO(GPIO_FN_A6, A6_MARK),
+ PINMUX_GPIO(GPIO_FN_A5, A5_MARK),
+ PINMUX_GPIO(GPIO_FN_A4, A4_MARK),
+ PINMUX_GPIO(GPIO_FN_A3, A3_MARK),
+ PINMUX_GPIO(GPIO_FN_A2, A2_MARK),
+ PINMUX_GPIO(GPIO_FN_A1, A1_MARK),
+ PINMUX_GPIO(GPIO_FN_A0, A0_MARK),
+ PINMUX_GPIO(GPIO_FN_RTS2, RTS2_MARK),
+ PINMUX_GPIO(GPIO_FN_SIM_D, SIM_D_MARK),
+ PINMUX_GPIO(GPIO_FN_SIM_CLK, SIM_CLK_MARK),
+ PINMUX_GPIO(GPIO_FN_SIM_RST, SIM_RST_MARK),
+
+ /* PTY (mobule: LBSC) */
+ PINMUX_GPIO(GPIO_FN_D7, D7_MARK),
+ PINMUX_GPIO(GPIO_FN_D6, D6_MARK),
+ PINMUX_GPIO(GPIO_FN_D5, D5_MARK),
+ PINMUX_GPIO(GPIO_FN_D4, D4_MARK),
+ PINMUX_GPIO(GPIO_FN_D3, D3_MARK),
+ PINMUX_GPIO(GPIO_FN_D2, D2_MARK),
+ PINMUX_GPIO(GPIO_FN_D1, D1_MARK),
+ PINMUX_GPIO(GPIO_FN_D0, D0_MARK),
+
+ /* PTZ (mobule: eMMC, ONFI) */
+ PINMUX_GPIO(GPIO_FN_MMCDAT7, MMCDAT7_MARK),
+ PINMUX_GPIO(GPIO_FN_MMCDAT6, MMCDAT6_MARK),
+ PINMUX_GPIO(GPIO_FN_MMCDAT5, MMCDAT5_MARK),
+ PINMUX_GPIO(GPIO_FN_MMCDAT4, MMCDAT4_MARK),
+ PINMUX_GPIO(GPIO_FN_MMCDAT3, MMCDAT3_MARK),
+ PINMUX_GPIO(GPIO_FN_MMCDAT2, MMCDAT2_MARK),
+ PINMUX_GPIO(GPIO_FN_MMCDAT1, MMCDAT1_MARK),
+ PINMUX_GPIO(GPIO_FN_MMCDAT0, MMCDAT0_MARK),
+ PINMUX_GPIO(GPIO_FN_ON_DQ7, ON_DQ7_MARK),
+ PINMUX_GPIO(GPIO_FN_ON_DQ6, ON_DQ6_MARK),
+ PINMUX_GPIO(GPIO_FN_ON_DQ5, ON_DQ5_MARK),
+ PINMUX_GPIO(GPIO_FN_ON_DQ4, ON_DQ4_MARK),
+ PINMUX_GPIO(GPIO_FN_ON_DQ3, ON_DQ3_MARK),
+ PINMUX_GPIO(GPIO_FN_ON_DQ2, ON_DQ2_MARK),
+ PINMUX_GPIO(GPIO_FN_ON_DQ1, ON_DQ1_MARK),
+ PINMUX_GPIO(GPIO_FN_ON_DQ0, ON_DQ0_MARK),
+};
+
+static struct pinmux_cfg_reg pinmux_config_regs[] = {
+ { PINMUX_CFG_REG("PACR", 0xffec0000, 16, 2) {
+ PTA7_FN, PTA7_OUT, PTA7_IN, PTA7_IN_PU,
+ PTA6_FN, PTA6_OUT, PTA6_IN, PTA6_IN_PU,
+ PTA5_FN, PTA5_OUT, PTA5_IN, PTA5_IN_PU,
+ PTA4_FN, PTA4_OUT, PTA4_IN, PTA4_IN_PU,
+ PTA3_FN, PTA3_OUT, PTA3_IN, PTA3_IN_PU,
+ PTA2_FN, PTA2_OUT, PTA2_IN, PTA2_IN_PU,
+ PTA1_FN, PTA1_OUT, PTA1_IN, PTA1_IN_PU,
+ PTA0_FN, PTA0_OUT, PTA0_IN, PTA0_IN_PU }
+ },
+ { PINMUX_CFG_REG("PBCR", 0xffec0002, 16, 2) {
+ PTB7_FN, PTB7_OUT, PTB7_IN, 0,
+ PTB6_FN, PTB6_OUT, PTB6_IN, 0,
+ PTB5_FN, PTB5_OUT, PTB5_IN, 0,
+ PTB4_FN, PTB4_OUT, PTB4_IN, 0,
+ PTB3_FN, PTB3_OUT, PTB3_IN, 0,
+ PTB2_FN, PTB2_OUT, PTB2_IN, 0,
+ PTB1_FN, PTB1_OUT, PTB1_IN, 0,
+ PTB0_FN, PTB0_OUT, PTB0_IN, 0 }
+ },
+ { PINMUX_CFG_REG("PCCR", 0xffec0004, 16, 2) {
+ PTC7_FN, PTC7_OUT, PTC7_IN, 0,
+ PTC6_FN, PTC6_OUT, PTC6_IN, 0,
+ PTC5_FN, PTC5_OUT, PTC5_IN, 0,
+ PTC4_FN, PTC4_OUT, PTC4_IN, 0,
+ PTC3_FN, PTC3_OUT, PTC3_IN, 0,
+ PTC2_FN, PTC2_OUT, PTC2_IN, 0,
+ PTC1_FN, PTC1_OUT, PTC1_IN, 0,
+ PTC0_FN, PTC0_OUT, PTC0_IN, 0 }
+ },
+ { PINMUX_CFG_REG("PDCR", 0xffec0006, 16, 2) {
+ PTD7_FN, PTD7_OUT, PTD7_IN, PTD7_IN_PU,
+ PTD6_FN, PTD6_OUT, PTD6_IN, PTD6_IN_PU,
+ PTD5_FN, PTD5_OUT, PTD5_IN, PTD5_IN_PU,
+ PTD4_FN, PTD4_OUT, PTD4_IN, PTD4_IN_PU,
+ PTD3_FN, PTD3_OUT, PTD3_IN, PTD3_IN_PU,
+ PTD2_FN, PTD2_OUT, PTD2_IN, PTD2_IN_PU,
+ PTD1_FN, PTD1_OUT, PTD1_IN, PTD1_IN_PU,
+ PTD0_FN, PTD0_OUT, PTD0_IN, PTD0_IN_PU }
+ },
+ { PINMUX_CFG_REG("PECR", 0xffec0008, 16, 2) {
+ PTE7_FN, PTE7_OUT, PTE7_IN, PTE7_IN_PU,
+ PTE6_FN, PTE6_OUT, PTE6_IN, PTE6_IN_PU,
+ PTE5_FN, PTE5_OUT, PTE5_IN, PTE5_IN_PU,
+ PTE4_FN, PTE4_OUT, PTE4_IN, PTE4_IN_PU,
+ PTE3_FN, PTE3_OUT, PTE3_IN, PTE3_IN_PU,
+ PTE2_FN, PTE2_OUT, PTE2_IN, PTE2_IN_PU,
+ PTE1_FN, PTE1_OUT, PTE1_IN, PTE1_IN_PU,
+ PTE0_FN, PTE0_OUT, PTE0_IN, PTE0_IN_PU }
+ },
+ { PINMUX_CFG_REG("PFCR", 0xffec000a, 16, 2) {
+ PTF7_FN, PTF7_OUT, PTF7_IN, PTF7_IN_PU,
+ PTF6_FN, PTF6_OUT, PTF6_IN, PTF6_IN_PU,
+ PTF5_FN, PTF5_OUT, PTF5_IN, PTF5_IN_PU,
+ PTF4_FN, PTF4_OUT, PTF4_IN, PTF4_IN_PU,
+ PTF3_FN, PTF3_OUT, PTF3_IN, PTF3_IN_PU,
+ PTF2_FN, PTF2_OUT, PTF2_IN, PTF2_IN_PU,
+ PTF1_FN, PTF1_OUT, PTF1_IN, PTF1_IN_PU,
+ PTF0_FN, PTF0_OUT, PTF0_IN, PTF0_IN_PU }
+ },
+ { PINMUX_CFG_REG("PGCR", 0xffec000c, 16, 2) {
+ PTG7_FN, PTG7_OUT, PTG7_IN, PTG7_IN_PU ,
+ PTG6_FN, PTG6_OUT, PTG6_IN, PTG6_IN_PU ,
+ PTG5_FN, PTG5_OUT, PTG5_IN, 0,
+ PTG4_FN, PTG4_OUT, PTG4_IN, PTG4_IN_PU ,
+ PTG3_FN, PTG3_OUT, PTG3_IN, 0,
+ PTG2_FN, PTG2_OUT, PTG2_IN, 0,
+ PTG1_FN, PTG1_OUT, PTG1_IN, 0,
+ PTG0_FN, PTG0_OUT, PTG0_IN, 0 }
+ },
+ { PINMUX_CFG_REG("PHCR", 0xffec000e, 16, 2) {
+ PTH7_FN, PTH7_OUT, PTH7_IN, PTH7_IN_PU,
+ PTH6_FN, PTH6_OUT, PTH6_IN, PTH6_IN_PU,
+ PTH5_FN, PTH5_OUT, PTH5_IN, PTH5_IN_PU,
+ PTH4_FN, PTH4_OUT, PTH4_IN, PTH4_IN_PU,
+ PTH3_FN, PTH3_OUT, PTH3_IN, PTH3_IN_PU,
+ PTH2_FN, PTH2_OUT, PTH2_IN, PTH2_IN_PU,
+ PTH1_FN, PTH1_OUT, PTH1_IN, PTH1_IN_PU,
+ PTH0_FN, PTH0_OUT, PTH0_IN, PTH0_IN_PU }
+ },
+ { PINMUX_CFG_REG("PICR", 0xffec0010, 16, 2) {
+ PTI7_FN, PTI7_OUT, PTI7_IN, PTI7_IN_PU,
+ PTI6_FN, PTI6_OUT, PTI6_IN, PTI6_IN_PU,
+ PTI5_FN, PTI5_OUT, PTI5_IN, 0,
+ PTI4_FN, PTI4_OUT, PTI4_IN, PTI4_IN_PU,
+ PTI3_FN, PTI3_OUT, PTI3_IN, PTI3_IN_PU,
+ PTI2_FN, PTI2_OUT, PTI2_IN, PTI2_IN_PU,
+ PTI1_FN, PTI1_OUT, PTI1_IN, PTI1_IN_PU,
+ PTI0_FN, PTI0_OUT, PTI0_IN, PTI0_IN_PU }
+ },
+ { PINMUX_CFG_REG("PJCR", 0xffec0012, 16, 2) {
+ 0, 0, 0, 0, /* reserved: always set 1 */
+ PTJ6_FN, PTJ6_OUT, PTJ6_IN, PTJ6_IN_PU,
+ PTJ5_FN, PTJ5_OUT, PTJ5_IN, PTJ5_IN_PU,
+ PTJ4_FN, PTJ4_OUT, PTJ4_IN, PTJ4_IN_PU,
+ PTJ3_FN, PTJ3_OUT, PTJ3_IN, PTJ3_IN_PU,
+ PTJ2_FN, PTJ2_OUT, PTJ2_IN, PTJ2_IN_PU,
+ PTJ1_FN, PTJ1_OUT, PTJ1_IN, PTJ1_IN_PU,
+ PTJ0_FN, PTJ0_OUT, PTJ0_IN, PTJ0_IN_PU }
+ },
+ { PINMUX_CFG_REG("PKCR", 0xffec0014, 16, 2) {
+ PTK7_FN, PTK7_OUT, PTK7_IN, PTK7_IN_PU,
+ PTK6_FN, PTK6_OUT, PTK6_IN, PTK6_IN_PU,
+ PTK5_FN, PTK5_OUT, PTK5_IN, PTK5_IN_PU,
+ PTK4_FN, PTK4_OUT, PTK4_IN, PTK4_IN_PU,
+ PTK3_FN, PTK3_OUT, PTK3_IN, PTK3_IN_PU,
+ PTK2_FN, PTK2_OUT, PTK2_IN, PTK2_IN_PU,
+ PTK1_FN, PTK1_OUT, PTK1_IN, PTK1_IN_PU,
+ PTK0_FN, PTK0_OUT, PTK0_IN, PTK0_IN_PU }
+ },
+ { PINMUX_CFG_REG("PLCR", 0xffec0016, 16, 2) {
+ 0, 0, 0, 0, /* reserved: always set 1 */
+ PTL6_FN, PTL6_OUT, PTL6_IN, PTL6_IN_PU,
+ PTL5_FN, PTL5_OUT, PTL5_IN, PTL5_IN_PU,
+ PTL4_FN, PTL4_OUT, PTL4_IN, PTL4_IN_PU,
+ PTL3_FN, PTL3_OUT, PTL3_IN, PTL3_IN_PU,
+ PTL2_FN, PTL2_OUT, PTL2_IN, PTL2_IN_PU,
+ PTL1_FN, PTL1_OUT, PTL1_IN, PTL1_IN_PU,
+ PTL0_FN, PTL0_OUT, PTL0_IN, PTL0_IN_PU }
+ },
+ { PINMUX_CFG_REG("PMCR", 0xffec0018, 16, 2) {
+ PTM7_FN, PTM7_OUT, PTM7_IN, PTM7_IN_PU,
+ PTM6_FN, PTM6_OUT, PTM6_IN, PTM6_IN_PU,
+ PTM5_FN, PTM5_OUT, PTM5_IN, PTM5_IN_PU,
+ PTM4_FN, PTM4_OUT, PTM4_IN, PTM4_IN_PU,
+ PTM3_FN, PTM3_OUT, PTM3_IN, 0,
+ PTM2_FN, PTM2_OUT, PTM2_IN, 0,
+ PTM1_FN, PTM1_OUT, PTM1_IN, 0,
+ PTM0_FN, PTM0_OUT, PTM0_IN, 0 }
+ },
+ { PINMUX_CFG_REG("PNCR", 0xffec001a, 16, 2) {
+ 0, 0, 0, 0, /* reserved: always set 1 */
+ PTN6_FN, PTN6_OUT, PTN6_IN, 0,
+ PTN5_FN, PTN5_OUT, PTN5_IN, 0,
+ PTN4_FN, PTN4_OUT, PTN4_IN, PTN4_IN_PU,
+ PTN3_FN, PTN3_OUT, PTN3_IN, PTN3_IN_PU,
+ PTN2_FN, PTN2_OUT, PTN2_IN, PTN2_IN_PU,
+ PTN1_FN, PTN1_OUT, PTN1_IN, PTN1_IN_PU,
+ PTN0_FN, PTN0_OUT, PTN0_IN, PTN0_IN_PU }
+ },
+ { PINMUX_CFG_REG("POCR", 0xffec001c, 16, 2) {
+ PTO7_FN, PTO7_OUT, PTO7_IN, PTO7_IN_PU,
+ PTO6_FN, PTO6_OUT, PTO6_IN, PTO6_IN_PU,
+ PTO5_FN, PTO5_OUT, PTO5_IN, PTO5_IN_PU,
+ PTO4_FN, PTO4_OUT, PTO4_IN, PTO4_IN_PU,
+ PTO3_FN, PTO3_OUT, PTO3_IN, PTO3_IN_PU,
+ PTO2_FN, PTO2_OUT, PTO2_IN, PTO2_IN_PU,
+ PTO1_FN, PTO1_OUT, PTO1_IN, PTO1_IN_PU,
+ PTO0_FN, PTO0_OUT, PTO0_IN, PTO0_IN_PU }
+ },
+#if 0 /* FIXME: Remove it? */
+ { PINMUX_CFG_REG("PPCR", 0xffec001e, 16, 2) {
+ 0, 0, 0, 0, /* reserved: always set 1 */
+ PTP6_FN, PTP6_OUT, PTP6_IN, 0,
+ PTP5_FN, PTP5_OUT, PTP5_IN, 0,
+ PTP4_FN, PTP4_OUT, PTP4_IN, 0,
+ PTP3_FN, PTP3_OUT, PTP3_IN, 0,
+ PTP2_FN, PTP2_OUT, PTP2_IN, 0,
+ PTP1_FN, PTP1_OUT, PTP1_IN, 0,
+ PTP0_FN, PTP0_OUT, PTP0_IN, 0 }
+ },
+#endif
+ { PINMUX_CFG_REG("PQCR", 0xffec0020, 16, 2) {
+ 0, 0, 0, 0, /* reserved: always set 1 */
+ PTQ6_FN, PTQ6_OUT, PTQ6_IN, 0,
+ PTQ5_FN, PTQ5_OUT, PTQ5_IN, 0,
+ PTQ4_FN, PTQ4_OUT, PTQ4_IN, 0,
+ PTQ3_FN, PTQ3_OUT, PTQ3_IN, 0,
+ PTQ2_FN, PTQ2_OUT, PTQ2_IN, 0,
+ PTQ1_FN, PTQ1_OUT, PTQ1_IN, 0,
+ PTQ0_FN, PTQ0_OUT, PTQ0_IN, 0 }
+ },
+ { PINMUX_CFG_REG("PRCR", 0xffec0022, 16, 2) {
+ PTR7_FN, PTR7_OUT, PTR7_IN, 0,
+ PTR6_FN, PTR6_OUT, PTR6_IN, 0,
+ PTR5_FN, PTR5_OUT, PTR5_IN, 0,
+ PTR4_FN, PTR4_OUT, PTR4_IN, 0,
+ PTR3_FN, PTR3_OUT, PTR3_IN, 0,
+ PTR2_FN, PTR2_OUT, PTR2_IN, 0,
+ PTR1_FN, PTR1_OUT, PTR1_IN, 0,
+ PTR0_FN, PTR0_OUT, PTR0_IN, 0 }
+ },
+ { PINMUX_CFG_REG("PSCR", 0xffec0024, 16, 2) {
+ PTS7_FN, PTS7_OUT, PTS7_IN, 0,
+ PTS6_FN, PTS6_OUT, PTS6_IN, 0,
+ PTS5_FN, PTS5_OUT, PTS5_IN, 0,
+ PTS4_FN, PTS4_OUT, PTS4_IN, 0,
+ PTS3_FN, PTS3_OUT, PTS3_IN, 0,
+ PTS2_FN, PTS2_OUT, PTS2_IN, 0,
+ PTS1_FN, PTS1_OUT, PTS1_IN, 0,
+ PTS0_FN, PTS0_OUT, PTS0_IN, 0 }
+ },
+ { PINMUX_CFG_REG("PTCR", 0xffec0026, 16, 2) {
+ PTT7_FN, PTT7_OUT, PTT7_IN, PTO7_IN_PU,
+ PTT6_FN, PTT6_OUT, PTT6_IN, PTO6_IN_PU,
+ PTT5_FN, PTT5_OUT, PTT5_IN, PTO5_IN_PU,
+ PTT4_FN, PTT4_OUT, PTT4_IN, PTO4_IN_PU,
+ PTT3_FN, PTT3_OUT, PTT3_IN, PTO3_IN_PU,
+ PTT2_FN, PTT2_OUT, PTT2_IN, PTO2_IN_PU,
+ PTT1_FN, PTT1_OUT, PTT1_IN, PTO1_IN_PU,
+ PTT0_FN, PTT0_OUT, PTT0_IN, PTO0_IN_PU }
+ },
+ { PINMUX_CFG_REG("PUCR", 0xffec0028, 16, 2) {
+ PTU7_FN, PTU7_OUT, PTU7_IN, PTU7_IN_PU,
+ PTU6_FN, PTU6_OUT, PTU6_IN, PTU6_IN_PU,
+ PTU5_FN, PTU5_OUT, PTU5_IN, PTU5_IN_PU,
+ PTU4_FN, PTU4_OUT, PTU4_IN, PTU4_IN_PU,
+ PTU3_FN, PTU3_OUT, PTU3_IN, PTU3_IN_PU,
+ PTU2_FN, PTU2_OUT, PTU2_IN, PTU2_IN_PU,
+ PTU1_FN, PTU1_OUT, PTU1_IN, PTU1_IN_PU,
+ PTU0_FN, PTU0_OUT, PTU0_IN, PTU0_IN_PU }
+ },
+ { PINMUX_CFG_REG("PVCR", 0xffec002a, 16, 2) {
+ PTV7_FN, PTV7_OUT, PTV7_IN, PTV7_IN_PU,
+ PTV6_FN, PTV6_OUT, PTV6_IN, PTV6_IN_PU,
+ PTV5_FN, PTV5_OUT, PTV5_IN, PTV5_IN_PU,
+ PTV4_FN, PTV4_OUT, PTV4_IN, PTV4_IN_PU,
+ PTV3_FN, PTV3_OUT, PTV3_IN, PTV3_IN_PU,
+ PTV2_FN, PTV2_OUT, PTV2_IN, PTV2_IN_PU,
+ PTV1_FN, PTV1_OUT, PTV1_IN, 0,
+ PTV0_FN, PTV0_OUT, PTV0_IN, 0 }
+ },
+ { PINMUX_CFG_REG("PWCR", 0xffec002c, 16, 2) {
+ PTW7_FN, PTW7_OUT, PTW7_IN, 0,
+ PTW6_FN, PTW6_OUT, PTW6_IN, 0,
+ PTW5_FN, PTW5_OUT, PTW5_IN, 0,
+ PTW4_FN, PTW4_OUT, PTW4_IN, 0,
+ PTW3_FN, PTW3_OUT, PTW3_IN, 0,
+ PTW2_FN, PTW2_OUT, PTW2_IN, 0,
+ PTW1_FN, PTW1_OUT, PTW1_IN, PTW1_IN_PU,
+ PTW0_FN, PTW0_OUT, PTW0_IN, PTW0_IN_PU }
+ },
+ { PINMUX_CFG_REG("PXCR", 0xffec002e, 16, 2) {
+ PTX7_FN, PTX7_OUT, PTX7_IN, PTX7_IN_PU,
+ PTX6_FN, PTX6_OUT, PTX6_IN, PTX6_IN_PU,
+ PTX5_FN, PTX5_OUT, PTX5_IN, PTX5_IN_PU,
+ PTX4_FN, PTX4_OUT, PTX4_IN, PTX4_IN_PU,
+ PTX3_FN, PTX3_OUT, PTX3_IN, PTX3_IN_PU,
+ PTX2_FN, PTX2_OUT, PTX2_IN, PTX2_IN_PU,
+ PTX1_FN, PTX1_OUT, PTX1_IN, PTX1_IN_PU,
+ PTX0_FN, PTX0_OUT, PTX0_IN, PTX0_IN_PU }
+ },
+ { PINMUX_CFG_REG("PYCR", 0xffec0030, 16, 2) {
+ PTY7_FN, PTY7_OUT, PTY7_IN, PTY7_IN_PU,
+ PTY6_FN, PTY6_OUT, PTY6_IN, PTY6_IN_PU,
+ PTY5_FN, PTY5_OUT, PTY5_IN, PTY5_IN_PU,
+ PTY4_FN, PTY4_OUT, PTY4_IN, PTY4_IN_PU,
+ PTY3_FN, PTY3_OUT, PTY3_IN, PTY3_IN_PU,
+ PTY2_FN, PTY2_OUT, PTY2_IN, PTY2_IN_PU,
+ PTY1_FN, PTY1_OUT, PTY1_IN, PTY1_IN_PU,
+ PTY0_FN, PTY0_OUT, PTY0_IN, PTY0_IN_PU }
+ },
+ { PINMUX_CFG_REG("PZCR", 0xffec0032, 16, 2) {
+ PTZ7_FN, PTZ7_OUT, PTZ7_IN, 0,
+ PTZ6_FN, PTZ6_OUT, PTZ6_IN, 0,
+ PTZ5_FN, PTZ5_OUT, PTZ5_IN, 0,
+ PTZ4_FN, PTZ4_OUT, PTZ4_IN, 0,
+ PTZ3_FN, PTZ3_OUT, PTZ3_IN, 0,
+ PTZ2_FN, PTZ2_OUT, PTZ2_IN, 0,
+ PTZ1_FN, PTZ1_OUT, PTZ1_IN, 0,
+ PTZ0_FN, PTZ0_OUT, PTZ0_IN, 0 }
+ },
+
+ { PINMUX_CFG_REG("PSEL0", 0xffec0070, 16, 1) {
+ PS0_15_FN1, PS0_15_FN2,
+ PS0_14_FN1, PS0_14_FN2,
+ PS0_13_FN1, PS0_13_FN2,
+ PS0_12_FN1, PS0_12_FN2,
+ PS0_11_FN1, PS0_11_FN2,
+ PS0_10_FN1, PS0_10_FN2,
+ PS0_9_FN1, PS0_9_FN2,
+ PS0_8_FN1, PS0_8_FN2,
+ PS0_7_FN1, PS0_7_FN2,
+ PS0_6_FN1, PS0_6_FN2,
+ PS0_5_FN1, PS0_5_FN2,
+ PS0_4_FN1, PS0_4_FN2,
+ PS0_3_FN1, PS0_3_FN2,
+ PS0_2_FN1, PS0_2_FN2,
+ 0, 0,
+ 0, 0, }
+ },
+ { PINMUX_CFG_REG("PSEL1", 0xffec0072, 16, 1) {
+ 0, 0,
+ 0, 0,
+ 0, 0,
+ 0, 0,
+ 0, 0,
+ PS1_10_FN1, PS1_10_FN2,
+ PS1_9_FN1, PS1_9_FN2,
+ PS1_8_FN1, PS1_8_FN2,
+ 0, 0,
+ 0, 0,
+ 0, 0,
+ 0, 0,
+ 0, 0,
+ PS1_2_FN1, PS1_2_FN2,
+ 0, 0,
+ 0, 0, }
+ },
+ { PINMUX_CFG_REG("PSEL2", 0xffec0074, 16, 1) {
+ 0, 0,
+ 0, 0,
+ PS2_13_FN1, PS2_13_FN2,
+ PS2_12_FN1, PS2_12_FN2,
+ 0, 0,
+ 0, 0,
+ 0, 0,
+ 0, 0,
+ PS2_7_FN1, PS2_7_FN2,
+ PS2_6_FN1, PS2_6_FN2,
+ PS2_5_FN1, PS2_5_FN2,
+ PS2_4_FN1, PS2_4_FN2,
+ 0, 0,
+ PS2_2_FN1, PS2_2_FN2,
+ 0, 0,
+ 0, 0, }
+ },
+ { PINMUX_CFG_REG("PSEL3", 0xffec0076, 16, 1) {
+ PS3_15_FN1, PS3_15_FN2,
+ PS3_14_FN1, PS3_14_FN2,
+ PS3_13_FN1, PS3_13_FN2,
+ PS3_12_FN1, PS3_12_FN2,
+ PS3_11_FN1, PS3_11_FN2,
+ PS3_10_FN1, PS3_10_FN2,
+ PS3_9_FN1, PS3_9_FN2,
+ PS3_8_FN1, PS3_8_FN2,
+ PS3_7_FN1, PS3_7_FN2,
+ 0, 0,
+ 0, 0,
+ 0, 0,
+ 0, 0,
+ PS3_2_FN1, PS3_2_FN2,
+ PS3_1_FN1, PS3_1_FN2,
+ 0, 0, }
+ },
+
+ { PINMUX_CFG_REG("PSEL4", 0xffec0078, 16, 1) {
+ 0, 0,
+ PS4_14_FN1, PS4_14_FN2,
+ PS4_13_FN1, PS4_13_FN2,
+ PS4_12_FN1, PS4_12_FN2,
+ 0, 0,
+ PS4_10_FN1, PS4_10_FN2,
+ PS4_9_FN1, PS4_9_FN2,
+ PS4_8_FN1, PS4_8_FN2,
+ 0, 0,
+ 0, 0,
+ 0, 0,
+ PS4_4_FN1, PS4_4_FN2,
+ PS4_3_FN1, PS4_3_FN2,
+ PS4_2_FN1, PS4_2_FN2,
+ PS4_1_FN1, PS4_1_FN2,
+ PS4_0_FN1, PS4_0_FN2, }
+ },
+ { PINMUX_CFG_REG("PSEL5", 0xffec007a, 16, 1) {
+ 0, 0,
+ 0, 0,
+ 0, 0,
+ 0, 0,
+ PS5_11_FN1, PS5_11_FN2,
+ PS5_10_FN1, PS5_10_FN2,
+ PS5_9_FN1, PS5_9_FN2,
+ PS5_8_FN1, PS5_8_FN2,
+ PS5_7_FN1, PS5_7_FN2,
+ PS5_6_FN1, PS5_6_FN2,
+ PS5_5_FN1, PS5_5_FN2,
+ PS5_4_FN1, PS5_4_FN2,
+ PS5_3_FN1, PS5_3_FN2,
+ PS5_2_FN1, PS5_2_FN2,
+ 0, 0,
+ 0, 0, }
+ },
+ { PINMUX_CFG_REG("PSEL6", 0xffec007c, 16, 1) {
+ PS6_15_FN1, PS6_15_FN2,
+ PS6_14_FN1, PS6_14_FN2,
+ PS6_13_FN1, PS6_13_FN2,
+ PS6_12_FN1, PS6_12_FN2,
+ PS6_11_FN1, PS6_11_FN2,
+ PS6_10_FN1, PS6_10_FN2,
+ PS6_9_FN1, PS6_9_FN2,
+ PS6_8_FN1, PS6_8_FN2,
+ PS6_7_FN1, PS6_7_FN2,
+ PS6_6_FN1, PS6_6_FN2,
+ PS6_5_FN1, PS6_5_FN2,
+ PS6_4_FN1, PS6_4_FN2,
+ PS6_3_FN1, PS6_3_FN2,
+ PS6_2_FN1, PS6_2_FN2,
+ PS6_1_FN1, PS6_1_FN2,
+ PS6_0_FN1, PS6_0_FN2, }
+ },
+ { PINMUX_CFG_REG("PSEL7", 0xffec0082, 16, 1) {
+ PS7_15_FN1, PS7_15_FN2,
+ PS7_14_FN1, PS7_14_FN2,
+ PS7_13_FN1, PS7_13_FN2,
+ PS7_12_FN1, PS7_12_FN2,
+ PS7_11_FN1, PS7_11_FN2,
+ PS7_10_FN1, PS7_10_FN2,
+ PS7_9_FN1, PS7_9_FN2,
+ PS7_8_FN1, PS7_8_FN2,
+ PS7_7_FN1, PS7_7_FN2,
+ PS7_6_FN1, PS7_6_FN2,
+ PS7_5_FN1, PS7_5_FN2,
+ 0, 0,
+ 0, 0,
+ 0, 0,
+ 0, 0,
+ 0, 0, }
+ },
+ { PINMUX_CFG_REG("PSEL8", 0xffec0084, 16, 1) {
+ PS8_15_FN1, PS8_15_FN2,
+ PS8_14_FN1, PS8_14_FN2,
+ PS8_13_FN1, PS8_13_FN2,
+ PS8_12_FN1, PS8_12_FN2,
+ PS8_11_FN1, PS8_11_FN2,
+ PS8_10_FN1, PS8_10_FN2,
+ PS8_9_FN1, PS8_9_FN2,
+ PS8_8_FN1, PS8_8_FN2,
+ 0, 0,
+ 0, 0,
+ 0, 0,
+ 0, 0,
+ 0, 0,
+ 0, 0,
+ 0, 0,
+ 0, 0, }
+ },
+ {}
+};
+
+static struct pinmux_data_reg pinmux_data_regs[] = {
+ { PINMUX_DATA_REG("PADR", 0xffec0034, 8) {
+ PTA7_DATA, PTA6_DATA, PTA5_DATA, PTA4_DATA,
+ PTA3_DATA, PTA2_DATA, PTA1_DATA, PTA0_DATA }
+ },
+ { PINMUX_DATA_REG("PBDR", 0xffec0036, 8) {
+ PTB7_DATA, PTB6_DATA, PTB5_DATA, PTB4_DATA,
+ PTB3_DATA, PTB2_DATA, PTB1_DATA, PTB0_DATA }
+ },
+ { PINMUX_DATA_REG("PCDR", 0xffec0038, 8) {
+ PTC7_DATA, PTC6_DATA, PTC5_DATA, PTC4_DATA,
+ PTC3_DATA, PTC2_DATA, PTC1_DATA, PTC0_DATA }
+ },
+ { PINMUX_DATA_REG("PDDR", 0xffec003a, 8) {
+ PTD7_DATA, PTD6_DATA, PTD5_DATA, PTD4_DATA,
+ PTD3_DATA, PTD2_DATA, PTD1_DATA, PTD0_DATA }
+ },
+ { PINMUX_DATA_REG("PEDR", 0xffec003c, 8) {
+ PTE7_DATA, PTE6_DATA, PTE5_DATA, PTE4_DATA,
+ PTE3_DATA, PTE2_DATA, PTE1_DATA, PTE0_DATA }
+ },
+ { PINMUX_DATA_REG("PFDR", 0xffec003e, 8) {
+ PTF7_DATA, PTF6_DATA, PTF5_DATA, PTF4_DATA,
+ PTF3_DATA, PTF2_DATA, PTF1_DATA, PTF0_DATA }
+ },
+ { PINMUX_DATA_REG("PGDR", 0xffec0040, 8) {
+ PTG7_DATA, PTG6_DATA, PTG5_DATA, PTG4_DATA,
+ PTG3_DATA, PTG2_DATA, PTG1_DATA, PTG0_DATA }
+ },
+ { PINMUX_DATA_REG("PHDR", 0xffec0042, 8) {
+ PTH7_DATA, PTH6_DATA, PTH5_DATA, PTH4_DATA,
+ PTH3_DATA, PTH2_DATA, PTH1_DATA, PTH0_DATA }
+ },
+ { PINMUX_DATA_REG("PIDR", 0xffec0044, 8) {
+ PTI7_DATA, PTI6_DATA, PTI5_DATA, PTI4_DATA,
+ PTI3_DATA, PTI2_DATA, PTI1_DATA, PTI0_DATA }
+ },
+ { PINMUX_DATA_REG("PJDR", 0xffec0046, 8) {
+ 0, PTJ6_DATA, PTJ5_DATA, PTJ4_DATA,
+ PTJ3_DATA, PTJ2_DATA, PTJ1_DATA, PTJ0_DATA }
+ },
+ { PINMUX_DATA_REG("PKDR", 0xffec0048, 8) {
+ PTK7_DATA, PTK6_DATA, PTK5_DATA, PTK4_DATA,
+ PTK3_DATA, PTK2_DATA, PTK1_DATA, PTK0_DATA }
+ },
+ { PINMUX_DATA_REG("PLDR", 0xffec004a, 8) {
+ 0, PTL6_DATA, PTL5_DATA, PTL4_DATA,
+ PTL3_DATA, PTL2_DATA, PTL1_DATA, PTL0_DATA }
+ },
+ { PINMUX_DATA_REG("PMDR", 0xffec004c, 8) {
+ PTM7_DATA, PTM6_DATA, PTM5_DATA, PTM4_DATA,
+ PTM3_DATA, PTM2_DATA, PTM1_DATA, PTM0_DATA }
+ },
+ { PINMUX_DATA_REG("PNDR", 0xffec004e, 8) {
+ 0, PTN6_DATA, PTN5_DATA, PTN4_DATA,
+ PTN3_DATA, PTN2_DATA, PTN1_DATA, PTN0_DATA }
+ },
+ { PINMUX_DATA_REG("PODR", 0xffec0050, 8) {
+ PTO7_DATA, PTO6_DATA, PTO5_DATA, PTO4_DATA,
+ PTO3_DATA, PTO2_DATA, PTO1_DATA, PTO0_DATA }
+ },
+ { PINMUX_DATA_REG("PPDR", 0xffec0052, 8) {
+ PTP7_DATA, PTP6_DATA, PTP5_DATA, PTP4_DATA,
+ PTP3_DATA, PTP2_DATA, PTP1_DATA, PTP0_DATA }
+ },
+ { PINMUX_DATA_REG("PQDR", 0xffec0054, 8) {
+ 0, PTQ6_DATA, PTQ5_DATA, PTQ4_DATA,
+ PTQ3_DATA, PTQ2_DATA, PTQ1_DATA, PTQ0_DATA }
+ },
+ { PINMUX_DATA_REG("PRDR", 0xffec0056, 8) {
+ PTR7_DATA, PTR6_DATA, PTR5_DATA, PTR4_DATA,
+ PTR3_DATA, PTR2_DATA, PTR1_DATA, PTR0_DATA }
+ },
+ { PINMUX_DATA_REG("PSDR", 0xffec0058, 8) {
+ PTS7_DATA, PTS6_DATA, PTS5_DATA, PTS4_DATA,
+ PTS3_DATA, PTS2_DATA, PTS1_DATA, PTS0_DATA }
+ },
+ { PINMUX_DATA_REG("PTDR", 0xffec005a, 8) {
+ PTT7_DATA, PTT6_DATA, PTT5_DATA, PTT4_DATA,
+ PTT3_DATA, PTT2_DATA, PTT1_DATA, PTT0_DATA }
+ },
+ { PINMUX_DATA_REG("PUDR", 0xffec005c, 8) {
+ PTU7_DATA, PTU6_DATA, PTU5_DATA, PTU4_DATA,
+ PTU3_DATA, PTU2_DATA, PTU1_DATA, PTU0_DATA }
+ },
+ { PINMUX_DATA_REG("PVDR", 0xffec005e, 8) {
+ PTV7_DATA, PTV6_DATA, PTV5_DATA, PTV4_DATA,
+ PTV3_DATA, PTV2_DATA, PTV1_DATA, PTV0_DATA }
+ },
+ { PINMUX_DATA_REG("PWDR", 0xffec0060, 8) {
+ PTW7_DATA, PTW6_DATA, PTW5_DATA, PTW4_DATA,
+ PTW3_DATA, PTW2_DATA, PTW1_DATA, PTW0_DATA }
+ },
+ { PINMUX_DATA_REG("PXDR", 0xffec0062, 8) {
+ PTX7_DATA, PTX6_DATA, PTX5_DATA, PTX4_DATA,
+ PTX3_DATA, PTX2_DATA, PTX1_DATA, PTX0_DATA }
+ },
+ { PINMUX_DATA_REG("PYDR", 0xffec0064, 8) {
+ PTY7_DATA, PTY6_DATA, PTY5_DATA, PTY4_DATA,
+ PTY3_DATA, PTY2_DATA, PTY1_DATA, PTY0_DATA }
+ },
+ { PINMUX_DATA_REG("PZDR", 0xffec0066, 8) {
+ PTZ7_DATA, PTZ6_DATA, PTZ5_DATA, PTZ4_DATA,
+ PTZ3_DATA, PTZ2_DATA, PTZ1_DATA, PTZ0_DATA }
+ },
+ { },
+};
+
+struct sh_pfc_soc_info sh7757_pinmux_info = {
+ .name = "sh7757_pfc",
+ .reserved_id = PINMUX_RESERVED,
+ .data = { PINMUX_DATA_BEGIN, PINMUX_DATA_END },
+ .input = { PINMUX_INPUT_BEGIN, PINMUX_INPUT_END },
+ .input_pu = { PINMUX_INPUT_PULLUP_BEGIN, PINMUX_INPUT_PULLUP_END },
+ .output = { PINMUX_OUTPUT_BEGIN, PINMUX_OUTPUT_END },
+ .mark = { PINMUX_MARK_BEGIN, PINMUX_MARK_END },
+ .function = { PINMUX_FUNCTION_BEGIN, PINMUX_FUNCTION_END },
+
+ .first_gpio = GPIO_PTA0,
+ .last_gpio = GPIO_FN_ON_DQ0,
+
+ .gpios = pinmux_gpios,
+ .cfg_regs = pinmux_config_regs,
+ .data_regs = pinmux_data_regs,
+
+ .gpio_data = pinmux_data,
+ .gpio_data_size = ARRAY_SIZE(pinmux_data),
+};
diff --git a/drivers/pinctrl/sh-pfc/pfc-sh7785.c b/drivers/pinctrl/sh-pfc/pfc-sh7785.c
new file mode 100644
index 000000000000..3b1825d925bb
--- /dev/null
+++ b/drivers/pinctrl/sh-pfc/pfc-sh7785.c
@@ -0,0 +1,1304 @@
+/*
+ * SH7785 Pinmux
+ *
+ * Copyright (C) 2008 Magnus Damm
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ */
+
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <cpu/sh7785.h>
+
+#include "sh_pfc.h"
+
+enum {
+ PINMUX_RESERVED = 0,
+
+ PINMUX_DATA_BEGIN,
+ PA7_DATA, PA6_DATA, PA5_DATA, PA4_DATA,
+ PA3_DATA, PA2_DATA, PA1_DATA, PA0_DATA,
+ PB7_DATA, PB6_DATA, PB5_DATA, PB4_DATA,
+ PB3_DATA, PB2_DATA, PB1_DATA, PB0_DATA,
+ PC7_DATA, PC6_DATA, PC5_DATA, PC4_DATA,
+ PC3_DATA, PC2_DATA, PC1_DATA, PC0_DATA,
+ PD7_DATA, PD6_DATA, PD5_DATA, PD4_DATA,
+ PD3_DATA, PD2_DATA, PD1_DATA, PD0_DATA,
+ PE5_DATA, PE4_DATA, PE3_DATA, PE2_DATA, PE1_DATA, PE0_DATA,
+ PF7_DATA, PF6_DATA, PF5_DATA, PF4_DATA,
+ PF3_DATA, PF2_DATA, PF1_DATA, PF0_DATA,
+ PG7_DATA, PG6_DATA, PG5_DATA, PG4_DATA,
+ PG3_DATA, PG2_DATA, PG1_DATA, PG0_DATA,
+ PH7_DATA, PH6_DATA, PH5_DATA, PH4_DATA,
+ PH3_DATA, PH2_DATA, PH1_DATA, PH0_DATA,
+ PJ7_DATA, PJ6_DATA, PJ5_DATA, PJ4_DATA,
+ PJ3_DATA, PJ2_DATA, PJ1_DATA, PJ0_DATA,
+ PK7_DATA, PK6_DATA, PK5_DATA, PK4_DATA,
+ PK3_DATA, PK2_DATA, PK1_DATA, PK0_DATA,
+ PL7_DATA, PL6_DATA, PL5_DATA, PL4_DATA,
+ PL3_DATA, PL2_DATA, PL1_DATA, PL0_DATA,
+ PM1_DATA, PM0_DATA,
+ PN7_DATA, PN6_DATA, PN5_DATA, PN4_DATA,
+ PN3_DATA, PN2_DATA, PN1_DATA, PN0_DATA,
+ PP5_DATA, PP4_DATA, PP3_DATA, PP2_DATA, PP1_DATA, PP0_DATA,
+ PQ4_DATA, PQ3_DATA, PQ2_DATA, PQ1_DATA, PQ0_DATA,
+ PR3_DATA, PR2_DATA, PR1_DATA, PR0_DATA,
+ PINMUX_DATA_END,
+
+ PINMUX_INPUT_BEGIN,
+ PA7_IN, PA6_IN, PA5_IN, PA4_IN,
+ PA3_IN, PA2_IN, PA1_IN, PA0_IN,
+ PB7_IN, PB6_IN, PB5_IN, PB4_IN,
+ PB3_IN, PB2_IN, PB1_IN, PB0_IN,
+ PC7_IN, PC6_IN, PC5_IN, PC4_IN,
+ PC3_IN, PC2_IN, PC1_IN, PC0_IN,
+ PD7_IN, PD6_IN, PD5_IN, PD4_IN,
+ PD3_IN, PD2_IN, PD1_IN, PD0_IN,
+ PE5_IN, PE4_IN, PE3_IN, PE2_IN, PE1_IN, PE0_IN,
+ PF7_IN, PF6_IN, PF5_IN, PF4_IN,
+ PF3_IN, PF2_IN, PF1_IN, PF0_IN,
+ PG7_IN, PG6_IN, PG5_IN, PG4_IN,
+ PG3_IN, PG2_IN, PG1_IN, PG0_IN,
+ PH7_IN, PH6_IN, PH5_IN, PH4_IN,
+ PH3_IN, PH2_IN, PH1_IN, PH0_IN,
+ PJ7_IN, PJ6_IN, PJ5_IN, PJ4_IN,
+ PJ3_IN, PJ2_IN, PJ1_IN, PJ0_IN,
+ PK7_IN, PK6_IN, PK5_IN, PK4_IN,
+ PK3_IN, PK2_IN, PK1_IN, PK0_IN,
+ PL7_IN, PL6_IN, PL5_IN, PL4_IN,
+ PL3_IN, PL2_IN, PL1_IN, PL0_IN,
+ PM1_IN, PM0_IN,
+ PN7_IN, PN6_IN, PN5_IN, PN4_IN,
+ PN3_IN, PN2_IN, PN1_IN, PN0_IN,
+ PP5_IN, PP4_IN, PP3_IN, PP2_IN, PP1_IN, PP0_IN,
+ PQ4_IN, PQ3_IN, PQ2_IN, PQ1_IN, PQ0_IN,
+ PR3_IN, PR2_IN, PR1_IN, PR0_IN,
+ PINMUX_INPUT_END,
+
+ PINMUX_INPUT_PULLUP_BEGIN,
+ PA7_IN_PU, PA6_IN_PU, PA5_IN_PU, PA4_IN_PU,
+ PA3_IN_PU, PA2_IN_PU, PA1_IN_PU, PA0_IN_PU,
+ PB7_IN_PU, PB6_IN_PU, PB5_IN_PU, PB4_IN_PU,
+ PB3_IN_PU, PB2_IN_PU, PB1_IN_PU, PB0_IN_PU,
+ PC7_IN_PU, PC6_IN_PU, PC5_IN_PU, PC4_IN_PU,
+ PC3_IN_PU, PC2_IN_PU, PC1_IN_PU, PC0_IN_PU,
+ PD7_IN_PU, PD6_IN_PU, PD5_IN_PU, PD4_IN_PU,
+ PD3_IN_PU, PD2_IN_PU, PD1_IN_PU, PD0_IN_PU,
+ PE5_IN_PU, PE4_IN_PU, PE3_IN_PU, PE2_IN_PU, PE1_IN_PU, PE0_IN_PU,
+ PF7_IN_PU, PF6_IN_PU, PF5_IN_PU, PF4_IN_PU,
+ PF3_IN_PU, PF2_IN_PU, PF1_IN_PU, PF0_IN_PU,
+ PG7_IN_PU, PG6_IN_PU, PG5_IN_PU, PG4_IN_PU,
+ PG3_IN_PU, PG2_IN_PU, PG1_IN_PU, PG0_IN_PU,
+ PH7_IN_PU, PH6_IN_PU, PH5_IN_PU, PH4_IN_PU,
+ PH3_IN_PU, PH2_IN_PU, PH1_IN_PU, PH0_IN_PU,
+ PJ7_IN_PU, PJ6_IN_PU, PJ5_IN_PU, PJ4_IN_PU,
+ PJ3_IN_PU, PJ2_IN_PU, PJ1_IN_PU, PJ0_IN_PU,
+ PK7_IN_PU, PK6_IN_PU, PK5_IN_PU, PK4_IN_PU,
+ PK3_IN_PU, PK2_IN_PU, PK1_IN_PU, PK0_IN_PU,
+ PL7_IN_PU, PL6_IN_PU, PL5_IN_PU, PL4_IN_PU,
+ PL3_IN_PU, PL2_IN_PU, PL1_IN_PU, PL0_IN_PU,
+ PM1_IN_PU, PM0_IN_PU,
+ PN7_IN_PU, PN6_IN_PU, PN5_IN_PU, PN4_IN_PU,
+ PN3_IN_PU, PN2_IN_PU, PN1_IN_PU, PN0_IN_PU,
+ PP5_IN_PU, PP4_IN_PU, PP3_IN_PU, PP2_IN_PU, PP1_IN_PU, PP0_IN_PU,
+ PQ4_IN_PU, PQ3_IN_PU, PQ2_IN_PU, PQ1_IN_PU, PQ0_IN_PU,
+ PR3_IN_PU, PR2_IN_PU, PR1_IN_PU, PR0_IN_PU,
+ PINMUX_INPUT_PULLUP_END,
+
+ PINMUX_OUTPUT_BEGIN,
+ PA7_OUT, PA6_OUT, PA5_OUT, PA4_OUT,
+ PA3_OUT, PA2_OUT, PA1_OUT, PA0_OUT,
+ PB7_OUT, PB6_OUT, PB5_OUT, PB4_OUT,
+ PB3_OUT, PB2_OUT, PB1_OUT, PB0_OUT,
+ PC7_OUT, PC6_OUT, PC5_OUT, PC4_OUT,
+ PC3_OUT, PC2_OUT, PC1_OUT, PC0_OUT,
+ PD7_OUT, PD6_OUT, PD5_OUT, PD4_OUT,
+ PD3_OUT, PD2_OUT, PD1_OUT, PD0_OUT,
+ PE5_OUT, PE4_OUT, PE3_OUT, PE2_OUT, PE1_OUT, PE0_OUT,
+ PF7_OUT, PF6_OUT, PF5_OUT, PF4_OUT,
+ PF3_OUT, PF2_OUT, PF1_OUT, PF0_OUT,
+ PG7_OUT, PG6_OUT, PG5_OUT, PG4_OUT,
+ PG3_OUT, PG2_OUT, PG1_OUT, PG0_OUT,
+ PH7_OUT, PH6_OUT, PH5_OUT, PH4_OUT,
+ PH3_OUT, PH2_OUT, PH1_OUT, PH0_OUT,
+ PJ7_OUT, PJ6_OUT, PJ5_OUT, PJ4_OUT,
+ PJ3_OUT, PJ2_OUT, PJ1_OUT, PJ0_OUT,
+ PK7_OUT, PK6_OUT, PK5_OUT, PK4_OUT,
+ PK3_OUT, PK2_OUT, PK1_OUT, PK0_OUT,
+ PL7_OUT, PL6_OUT, PL5_OUT, PL4_OUT,
+ PL3_OUT, PL2_OUT, PL1_OUT, PL0_OUT,
+ PM1_OUT, PM0_OUT,
+ PN7_OUT, PN6_OUT, PN5_OUT, PN4_OUT,
+ PN3_OUT, PN2_OUT, PN1_OUT, PN0_OUT,
+ PP5_OUT, PP4_OUT, PP3_OUT, PP2_OUT, PP1_OUT, PP0_OUT,
+ PQ4_OUT, PQ3_OUT, PQ2_OUT, PQ1_OUT, PQ0_OUT,
+ PR3_OUT, PR2_OUT, PR1_OUT, PR0_OUT,
+ PINMUX_OUTPUT_END,
+
+ PINMUX_FUNCTION_BEGIN,
+ PA7_FN, PA6_FN, PA5_FN, PA4_FN,
+ PA3_FN, PA2_FN, PA1_FN, PA0_FN,
+ PB7_FN, PB6_FN, PB5_FN, PB4_FN,
+ PB3_FN, PB2_FN, PB1_FN, PB0_FN,
+ PC7_FN, PC6_FN, PC5_FN, PC4_FN,
+ PC3_FN, PC2_FN, PC1_FN, PC0_FN,
+ PD7_FN, PD6_FN, PD5_FN, PD4_FN,
+ PD3_FN, PD2_FN, PD1_FN, PD0_FN,
+ PE5_FN, PE4_FN, PE3_FN, PE2_FN, PE1_FN, PE0_FN,
+ PF7_FN, PF6_FN, PF5_FN, PF4_FN,
+ PF3_FN, PF2_FN, PF1_FN, PF0_FN,
+ PG7_FN, PG6_FN, PG5_FN, PG4_FN,
+ PG3_FN, PG2_FN, PG1_FN, PG0_FN,
+ PH7_FN, PH6_FN, PH5_FN, PH4_FN,
+ PH3_FN, PH2_FN, PH1_FN, PH0_FN,
+ PJ7_FN, PJ6_FN, PJ5_FN, PJ4_FN,
+ PJ3_FN, PJ2_FN, PJ1_FN, PJ0_FN,
+ PK7_FN, PK6_FN, PK5_FN, PK4_FN,
+ PK3_FN, PK2_FN, PK1_FN, PK0_FN,
+ PL7_FN, PL6_FN, PL5_FN, PL4_FN,
+ PL3_FN, PL2_FN, PL1_FN, PL0_FN,
+ PM1_FN, PM0_FN,
+ PN7_FN, PN6_FN, PN5_FN, PN4_FN,
+ PN3_FN, PN2_FN, PN1_FN, PN0_FN,
+ PP5_FN, PP4_FN, PP3_FN, PP2_FN, PP1_FN, PP0_FN,
+ PQ4_FN, PQ3_FN, PQ2_FN, PQ1_FN, PQ0_FN,
+ PR3_FN, PR2_FN, PR1_FN, PR0_FN,
+ P1MSEL15_0, P1MSEL15_1,
+ P1MSEL14_0, P1MSEL14_1,
+ P1MSEL13_0, P1MSEL13_1,
+ P1MSEL12_0, P1MSEL12_1,
+ P1MSEL11_0, P1MSEL11_1,
+ P1MSEL10_0, P1MSEL10_1,
+ P1MSEL9_0, P1MSEL9_1,
+ P1MSEL8_0, P1MSEL8_1,
+ P1MSEL7_0, P1MSEL7_1,
+ P1MSEL6_0, P1MSEL6_1,
+ P1MSEL5_0,
+ P1MSEL4_0, P1MSEL4_1,
+ P1MSEL3_0, P1MSEL3_1,
+ P1MSEL2_0, P1MSEL2_1,
+ P1MSEL1_0, P1MSEL1_1,
+ P1MSEL0_0, P1MSEL0_1,
+ P2MSEL2_0, P2MSEL2_1,
+ P2MSEL1_0, P2MSEL1_1,
+ P2MSEL0_0, P2MSEL0_1,
+ PINMUX_FUNCTION_END,
+
+ PINMUX_MARK_BEGIN,
+ D63_AD31_MARK,
+ D62_AD30_MARK,
+ D61_AD29_MARK,
+ D60_AD28_MARK,
+ D59_AD27_MARK,
+ D58_AD26_MARK,
+ D57_AD25_MARK,
+ D56_AD24_MARK,
+ D55_AD23_MARK,
+ D54_AD22_MARK,
+ D53_AD21_MARK,
+ D52_AD20_MARK,
+ D51_AD19_MARK,
+ D50_AD18_MARK,
+ D49_AD17_DB5_MARK,
+ D48_AD16_DB4_MARK,
+ D47_AD15_DB3_MARK,
+ D46_AD14_DB2_MARK,
+ D45_AD13_DB1_MARK,
+ D44_AD12_DB0_MARK,
+ D43_AD11_DG5_MARK,
+ D42_AD10_DG4_MARK,
+ D41_AD9_DG3_MARK,
+ D40_AD8_DG2_MARK,
+ D39_AD7_DG1_MARK,
+ D38_AD6_DG0_MARK,
+ D37_AD5_DR5_MARK,
+ D36_AD4_DR4_MARK,
+ D35_AD3_DR3_MARK,
+ D34_AD2_DR2_MARK,
+ D33_AD1_DR1_MARK,
+ D32_AD0_DR0_MARK,
+ REQ1_MARK,
+ REQ2_MARK,
+ REQ3_MARK,
+ GNT1_MARK,
+ GNT2_MARK,
+ GNT3_MARK,
+ MMCCLK_MARK,
+ D31_MARK,
+ D30_MARK,
+ D29_MARK,
+ D28_MARK,
+ D27_MARK,
+ D26_MARK,
+ D25_MARK,
+ D24_MARK,
+ D23_MARK,
+ D22_MARK,
+ D21_MARK,
+ D20_MARK,
+ D19_MARK,
+ D18_MARK,
+ D17_MARK,
+ D16_MARK,
+ SCIF1_SCK_MARK,
+ SCIF1_RXD_MARK,
+ SCIF1_TXD_MARK,
+ SCIF0_CTS_MARK,
+ INTD_MARK,
+ FCE_MARK,
+ SCIF0_RTS_MARK,
+ HSPI_CS_MARK,
+ FSE_MARK,
+ SCIF0_SCK_MARK,
+ HSPI_CLK_MARK,
+ FRE_MARK,
+ SCIF0_RXD_MARK,
+ HSPI_RX_MARK,
+ FRB_MARK,
+ SCIF0_TXD_MARK,
+ HSPI_TX_MARK,
+ FWE_MARK,
+ SCIF5_TXD_MARK,
+ HAC1_SYNC_MARK,
+ SSI1_WS_MARK,
+ SIOF_TXD_PJ_MARK,
+ HAC0_SDOUT_MARK,
+ SSI0_SDATA_MARK,
+ SIOF_RXD_PJ_MARK,
+ HAC0_SDIN_MARK,
+ SSI0_SCK_MARK,
+ SIOF_SYNC_PJ_MARK,
+ HAC0_SYNC_MARK,
+ SSI0_WS_MARK,
+ SIOF_MCLK_PJ_MARK,
+ HAC_RES_MARK,
+ SIOF_SCK_PJ_MARK,
+ HAC0_BITCLK_MARK,
+ SSI0_CLK_MARK,
+ HAC1_BITCLK_MARK,
+ SSI1_CLK_MARK,
+ TCLK_MARK,
+ IOIS16_MARK,
+ STATUS0_MARK,
+ DRAK0_PK3_MARK,
+ STATUS1_MARK,
+ DRAK1_PK2_MARK,
+ DACK2_MARK,
+ SCIF2_TXD_MARK,
+ MMCCMD_MARK,
+ SIOF_TXD_PK_MARK,
+ DACK3_MARK,
+ SCIF2_SCK_MARK,
+ MMCDAT_MARK,
+ SIOF_SCK_PK_MARK,
+ DREQ0_MARK,
+ DREQ1_MARK,
+ DRAK0_PK1_MARK,
+ DRAK1_PK0_MARK,
+ DREQ2_MARK,
+ INTB_MARK,
+ DREQ3_MARK,
+ INTC_MARK,
+ DRAK2_MARK,
+ CE2A_MARK,
+ IRL4_MARK,
+ FD4_MARK,
+ IRL5_MARK,
+ FD5_MARK,
+ IRL6_MARK,
+ FD6_MARK,
+ IRL7_MARK,
+ FD7_MARK,
+ DRAK3_MARK,
+ CE2B_MARK,
+ BREQ_BSACK_MARK,
+ BACK_BSREQ_MARK,
+ SCIF5_RXD_MARK,
+ HAC1_SDIN_MARK,
+ SSI1_SCK_MARK,
+ SCIF5_SCK_MARK,
+ HAC1_SDOUT_MARK,
+ SSI1_SDATA_MARK,
+ SCIF3_TXD_MARK,
+ FCLE_MARK,
+ SCIF3_RXD_MARK,
+ FALE_MARK,
+ SCIF3_SCK_MARK,
+ FD0_MARK,
+ SCIF4_TXD_MARK,
+ FD1_MARK,
+ SCIF4_RXD_MARK,
+ FD2_MARK,
+ SCIF4_SCK_MARK,
+ FD3_MARK,
+ DEVSEL_DCLKOUT_MARK,
+ STOP_CDE_MARK,
+ LOCK_ODDF_MARK,
+ TRDY_DISPL_MARK,
+ IRDY_HSYNC_MARK,
+ PCIFRAME_VSYNC_MARK,
+ INTA_MARK,
+ GNT0_GNTIN_MARK,
+ REQ0_REQOUT_MARK,
+ PERR_MARK,
+ SERR_MARK,
+ WE7_CBE3_MARK,
+ WE6_CBE2_MARK,
+ WE5_CBE1_MARK,
+ WE4_CBE0_MARK,
+ SCIF2_RXD_MARK,
+ SIOF_RXD_MARK,
+ MRESETOUT_MARK,
+ IRQOUT_MARK,
+ PINMUX_MARK_END,
+};
+
+static pinmux_enum_t pinmux_data[] = {
+
+ /* PA GPIO */
+ PINMUX_DATA(PA7_DATA, PA7_IN, PA7_OUT, PA7_IN_PU),
+ PINMUX_DATA(PA6_DATA, PA6_IN, PA6_OUT, PA6_IN_PU),
+ PINMUX_DATA(PA5_DATA, PA5_IN, PA5_OUT, PA5_IN_PU),
+ PINMUX_DATA(PA4_DATA, PA4_IN, PA4_OUT, PA4_IN_PU),
+ PINMUX_DATA(PA3_DATA, PA3_IN, PA3_OUT, PA3_IN_PU),
+ PINMUX_DATA(PA2_DATA, PA2_IN, PA2_OUT, PA2_IN_PU),
+ PINMUX_DATA(PA1_DATA, PA1_IN, PA1_OUT, PA1_IN_PU),
+ PINMUX_DATA(PA0_DATA, PA0_IN, PA0_OUT, PA0_IN_PU),
+
+ /* PB GPIO */
+ PINMUX_DATA(PB7_DATA, PB7_IN, PB7_OUT, PB7_IN_PU),
+ PINMUX_DATA(PB6_DATA, PB6_IN, PB6_OUT, PB6_IN_PU),
+ PINMUX_DATA(PB5_DATA, PB5_IN, PB5_OUT, PB5_IN_PU),
+ PINMUX_DATA(PB4_DATA, PB4_IN, PB4_OUT, PB4_IN_PU),
+ PINMUX_DATA(PB3_DATA, PB3_IN, PB3_OUT, PB3_IN_PU),
+ PINMUX_DATA(PB2_DATA, PB2_IN, PB2_OUT, PB2_IN_PU),
+ PINMUX_DATA(PB1_DATA, PB1_IN, PB1_OUT, PB1_IN_PU),
+ PINMUX_DATA(PB0_DATA, PB0_IN, PB0_OUT, PB0_IN_PU),
+
+ /* PC GPIO */
+ PINMUX_DATA(PC7_DATA, PC7_IN, PC7_OUT, PC7_IN_PU),
+ PINMUX_DATA(PC6_DATA, PC6_IN, PC6_OUT, PC6_IN_PU),
+ PINMUX_DATA(PC5_DATA, PC5_IN, PC5_OUT, PC5_IN_PU),
+ PINMUX_DATA(PC4_DATA, PC4_IN, PC4_OUT, PC4_IN_PU),
+ PINMUX_DATA(PC3_DATA, PC3_IN, PC3_OUT, PC3_IN_PU),
+ PINMUX_DATA(PC2_DATA, PC2_IN, PC2_OUT, PC2_IN_PU),
+ PINMUX_DATA(PC1_DATA, PC1_IN, PC1_OUT, PC1_IN_PU),
+ PINMUX_DATA(PC0_DATA, PC0_IN, PC0_OUT, PC0_IN_PU),
+
+ /* PD GPIO */
+ PINMUX_DATA(PD7_DATA, PD7_IN, PD7_OUT, PD7_IN_PU),
+ PINMUX_DATA(PD6_DATA, PD6_IN, PD6_OUT, PD6_IN_PU),
+ PINMUX_DATA(PD5_DATA, PD5_IN, PD5_OUT, PD5_IN_PU),
+ PINMUX_DATA(PD4_DATA, PD4_IN, PD4_OUT, PD4_IN_PU),
+ PINMUX_DATA(PD3_DATA, PD3_IN, PD3_OUT, PD3_IN_PU),
+ PINMUX_DATA(PD2_DATA, PD2_IN, PD2_OUT, PD2_IN_PU),
+ PINMUX_DATA(PD1_DATA, PD1_IN, PD1_OUT, PD1_IN_PU),
+ PINMUX_DATA(PD0_DATA, PD0_IN, PD0_OUT, PD0_IN_PU),
+
+ /* PE GPIO */
+ PINMUX_DATA(PE5_DATA, PE5_IN, PE5_OUT, PE5_IN_PU),
+ PINMUX_DATA(PE4_DATA, PE4_IN, PE4_OUT, PE4_IN_PU),
+ PINMUX_DATA(PE3_DATA, PE3_IN, PE3_OUT, PE3_IN_PU),
+ PINMUX_DATA(PE2_DATA, PE2_IN, PE2_OUT, PE2_IN_PU),
+ PINMUX_DATA(PE1_DATA, PE1_IN, PE1_OUT, PE1_IN_PU),
+ PINMUX_DATA(PE0_DATA, PE0_IN, PE0_OUT, PE0_IN_PU),
+
+ /* PF GPIO */
+ PINMUX_DATA(PF7_DATA, PF7_IN, PF7_OUT, PF7_IN_PU),
+ PINMUX_DATA(PF6_DATA, PF6_IN, PF6_OUT, PF6_IN_PU),
+ PINMUX_DATA(PF5_DATA, PF5_IN, PF5_OUT, PF5_IN_PU),
+ PINMUX_DATA(PF4_DATA, PF4_IN, PF4_OUT, PF4_IN_PU),
+ PINMUX_DATA(PF3_DATA, PF3_IN, PF3_OUT, PF3_IN_PU),
+ PINMUX_DATA(PF2_DATA, PF2_IN, PF2_OUT, PF2_IN_PU),
+ PINMUX_DATA(PF1_DATA, PF1_IN, PF1_OUT, PF1_IN_PU),
+ PINMUX_DATA(PF0_DATA, PF0_IN, PF0_OUT, PF0_IN_PU),
+
+ /* PG GPIO */
+ PINMUX_DATA(PG7_DATA, PG7_IN, PG7_OUT, PG7_IN_PU),
+ PINMUX_DATA(PG6_DATA, PG6_IN, PG6_OUT, PG6_IN_PU),
+ PINMUX_DATA(PG5_DATA, PG5_IN, PG5_OUT, PG5_IN_PU),
+ PINMUX_DATA(PG4_DATA, PG4_IN, PG4_OUT, PG4_IN_PU),
+ PINMUX_DATA(PG3_DATA, PG3_IN, PG3_OUT, PG3_IN_PU),
+ PINMUX_DATA(PG2_DATA, PG2_IN, PG2_OUT, PG2_IN_PU),
+ PINMUX_DATA(PG1_DATA, PG1_IN, PG1_OUT, PG1_IN_PU),
+ PINMUX_DATA(PG0_DATA, PG0_IN, PG0_OUT, PG0_IN_PU),
+
+ /* PH GPIO */
+ PINMUX_DATA(PH7_DATA, PH7_IN, PH7_OUT, PH7_IN_PU),
+ PINMUX_DATA(PH6_DATA, PH6_IN, PH6_OUT, PH6_IN_PU),
+ PINMUX_DATA(PH5_DATA, PH5_IN, PH5_OUT, PH5_IN_PU),
+ PINMUX_DATA(PH4_DATA, PH4_IN, PH4_OUT, PH4_IN_PU),
+ PINMUX_DATA(PH3_DATA, PH3_IN, PH3_OUT, PH3_IN_PU),
+ PINMUX_DATA(PH2_DATA, PH2_IN, PH2_OUT, PH2_IN_PU),
+ PINMUX_DATA(PH1_DATA, PH1_IN, PH1_OUT, PH1_IN_PU),
+ PINMUX_DATA(PH0_DATA, PH0_IN, PH0_OUT, PH0_IN_PU),
+
+ /* PJ GPIO */
+ PINMUX_DATA(PJ7_DATA, PJ7_IN, PJ7_OUT, PJ7_IN_PU),
+ PINMUX_DATA(PJ6_DATA, PJ6_IN, PJ6_OUT, PJ6_IN_PU),
+ PINMUX_DATA(PJ5_DATA, PJ5_IN, PJ5_OUT, PJ5_IN_PU),
+ PINMUX_DATA(PJ4_DATA, PJ4_IN, PJ4_OUT, PJ4_IN_PU),
+ PINMUX_DATA(PJ3_DATA, PJ3_IN, PJ3_OUT, PJ3_IN_PU),
+ PINMUX_DATA(PJ2_DATA, PJ2_IN, PJ2_OUT, PJ2_IN_PU),
+ PINMUX_DATA(PJ1_DATA, PJ1_IN, PJ1_OUT, PJ1_IN_PU),
+ PINMUX_DATA(PJ0_DATA, PJ0_IN, PJ0_OUT, PJ0_IN_PU),
+
+ /* PK GPIO */
+ PINMUX_DATA(PK7_DATA, PK7_IN, PK7_OUT, PK7_IN_PU),
+ PINMUX_DATA(PK6_DATA, PK6_IN, PK6_OUT, PK6_IN_PU),
+ PINMUX_DATA(PK5_DATA, PK5_IN, PK5_OUT, PK5_IN_PU),
+ PINMUX_DATA(PK4_DATA, PK4_IN, PK4_OUT, PK4_IN_PU),
+ PINMUX_DATA(PK3_DATA, PK3_IN, PK3_OUT, PK3_IN_PU),
+ PINMUX_DATA(PK2_DATA, PK2_IN, PK2_OUT, PK2_IN_PU),
+ PINMUX_DATA(PK1_DATA, PK1_IN, PK1_OUT, PK1_IN_PU),
+ PINMUX_DATA(PK0_DATA, PK0_IN, PK0_OUT, PK0_IN_PU),
+
+ /* PL GPIO */
+ PINMUX_DATA(PL7_DATA, PL7_IN, PL7_OUT, PL7_IN_PU),
+ PINMUX_DATA(PL6_DATA, PL6_IN, PL6_OUT, PL6_IN_PU),
+ PINMUX_DATA(PL5_DATA, PL5_IN, PL5_OUT, PL5_IN_PU),
+ PINMUX_DATA(PL4_DATA, PL4_IN, PL4_OUT, PL4_IN_PU),
+ PINMUX_DATA(PL3_DATA, PL3_IN, PL3_OUT, PL3_IN_PU),
+ PINMUX_DATA(PL2_DATA, PL2_IN, PL2_OUT, PL2_IN_PU),
+ PINMUX_DATA(PL1_DATA, PL1_IN, PL1_OUT, PL1_IN_PU),
+ PINMUX_DATA(PL0_DATA, PL0_IN, PL0_OUT, PL0_IN_PU),
+
+ /* PM GPIO */
+ PINMUX_DATA(PM1_DATA, PM1_IN, PM1_OUT, PM1_IN_PU),
+ PINMUX_DATA(PM0_DATA, PM0_IN, PM0_OUT, PM0_IN_PU),
+
+ /* PN GPIO */
+ PINMUX_DATA(PN7_DATA, PN7_IN, PN7_OUT, PN7_IN_PU),
+ PINMUX_DATA(PN6_DATA, PN6_IN, PN6_OUT, PN6_IN_PU),
+ PINMUX_DATA(PN5_DATA, PN5_IN, PN5_OUT, PN5_IN_PU),
+ PINMUX_DATA(PN4_DATA, PN4_IN, PN4_OUT, PN4_IN_PU),
+ PINMUX_DATA(PN3_DATA, PN3_IN, PN3_OUT, PN3_IN_PU),
+ PINMUX_DATA(PN2_DATA, PN2_IN, PN2_OUT, PN2_IN_PU),
+ PINMUX_DATA(PN1_DATA, PN1_IN, PN1_OUT, PN1_IN_PU),
+ PINMUX_DATA(PN0_DATA, PN0_IN, PN0_OUT, PN0_IN_PU),
+
+ /* PP GPIO */
+ PINMUX_DATA(PP5_DATA, PP5_IN, PP5_OUT, PP5_IN_PU),
+ PINMUX_DATA(PP4_DATA, PP4_IN, PP4_OUT, PP4_IN_PU),
+ PINMUX_DATA(PP3_DATA, PP3_IN, PP3_OUT, PP3_IN_PU),
+ PINMUX_DATA(PP2_DATA, PP2_IN, PP2_OUT, PP2_IN_PU),
+ PINMUX_DATA(PP1_DATA, PP1_IN, PP1_OUT, PP1_IN_PU),
+ PINMUX_DATA(PP0_DATA, PP0_IN, PP0_OUT, PP0_IN_PU),
+
+ /* PQ GPIO */
+ PINMUX_DATA(PQ4_DATA, PQ4_IN, PQ4_OUT, PQ4_IN_PU),
+ PINMUX_DATA(PQ3_DATA, PQ3_IN, PQ3_OUT, PQ3_IN_PU),
+ PINMUX_DATA(PQ2_DATA, PQ2_IN, PQ2_OUT, PQ2_IN_PU),
+ PINMUX_DATA(PQ1_DATA, PQ1_IN, PQ1_OUT, PQ1_IN_PU),
+ PINMUX_DATA(PQ0_DATA, PQ0_IN, PQ0_OUT, PQ0_IN_PU),
+
+ /* PR GPIO */
+ PINMUX_DATA(PR3_DATA, PR3_IN, PR3_OUT, PR3_IN_PU),
+ PINMUX_DATA(PR2_DATA, PR2_IN, PR2_OUT, PR2_IN_PU),
+ PINMUX_DATA(PR1_DATA, PR1_IN, PR1_OUT, PR1_IN_PU),
+ PINMUX_DATA(PR0_DATA, PR0_IN, PR0_OUT, PR0_IN_PU),
+
+ /* PA FN */
+ PINMUX_DATA(D63_AD31_MARK, PA7_FN),
+ PINMUX_DATA(D62_AD30_MARK, PA6_FN),
+ PINMUX_DATA(D61_AD29_MARK, PA5_FN),
+ PINMUX_DATA(D60_AD28_MARK, PA4_FN),
+ PINMUX_DATA(D59_AD27_MARK, PA3_FN),
+ PINMUX_DATA(D58_AD26_MARK, PA2_FN),
+ PINMUX_DATA(D57_AD25_MARK, PA1_FN),
+ PINMUX_DATA(D56_AD24_MARK, PA0_FN),
+
+ /* PB FN */
+ PINMUX_DATA(D55_AD23_MARK, PB7_FN),
+ PINMUX_DATA(D54_AD22_MARK, PB6_FN),
+ PINMUX_DATA(D53_AD21_MARK, PB5_FN),
+ PINMUX_DATA(D52_AD20_MARK, PB4_FN),
+ PINMUX_DATA(D51_AD19_MARK, PB3_FN),
+ PINMUX_DATA(D50_AD18_MARK, PB2_FN),
+ PINMUX_DATA(D49_AD17_DB5_MARK, PB1_FN),
+ PINMUX_DATA(D48_AD16_DB4_MARK, PB0_FN),
+
+ /* PC FN */
+ PINMUX_DATA(D47_AD15_DB3_MARK, PC7_FN),
+ PINMUX_DATA(D46_AD14_DB2_MARK, PC6_FN),
+ PINMUX_DATA(D45_AD13_DB1_MARK, PC5_FN),
+ PINMUX_DATA(D44_AD12_DB0_MARK, PC4_FN),
+ PINMUX_DATA(D43_AD11_DG5_MARK, PC3_FN),
+ PINMUX_DATA(D42_AD10_DG4_MARK, PC2_FN),
+ PINMUX_DATA(D41_AD9_DG3_MARK, PC1_FN),
+ PINMUX_DATA(D40_AD8_DG2_MARK, PC0_FN),
+
+ /* PD FN */
+ PINMUX_DATA(D39_AD7_DG1_MARK, PD7_FN),
+ PINMUX_DATA(D38_AD6_DG0_MARK, PD6_FN),
+ PINMUX_DATA(D37_AD5_DR5_MARK, PD5_FN),
+ PINMUX_DATA(D36_AD4_DR4_MARK, PD4_FN),
+ PINMUX_DATA(D35_AD3_DR3_MARK, PD3_FN),
+ PINMUX_DATA(D34_AD2_DR2_MARK, PD2_FN),
+ PINMUX_DATA(D33_AD1_DR1_MARK, PD1_FN),
+ PINMUX_DATA(D32_AD0_DR0_MARK, PD0_FN),
+
+ /* PE FN */
+ PINMUX_DATA(REQ1_MARK, PE5_FN),
+ PINMUX_DATA(REQ2_MARK, PE4_FN),
+ PINMUX_DATA(REQ3_MARK, P2MSEL0_0, PE3_FN),
+ PINMUX_DATA(GNT1_MARK, PE2_FN),
+ PINMUX_DATA(GNT2_MARK, PE1_FN),
+ PINMUX_DATA(GNT3_MARK, P2MSEL0_0, PE0_FN),
+ PINMUX_DATA(MMCCLK_MARK, P2MSEL0_1, PE0_FN),
+
+ /* PF FN */
+ PINMUX_DATA(D31_MARK, PF7_FN),
+ PINMUX_DATA(D30_MARK, PF6_FN),
+ PINMUX_DATA(D29_MARK, PF5_FN),
+ PINMUX_DATA(D28_MARK, PF4_FN),
+ PINMUX_DATA(D27_MARK, PF3_FN),
+ PINMUX_DATA(D26_MARK, PF2_FN),
+ PINMUX_DATA(D25_MARK, PF1_FN),
+ PINMUX_DATA(D24_MARK, PF0_FN),
+
+ /* PF FN */
+ PINMUX_DATA(D23_MARK, PG7_FN),
+ PINMUX_DATA(D22_MARK, PG6_FN),
+ PINMUX_DATA(D21_MARK, PG5_FN),
+ PINMUX_DATA(D20_MARK, PG4_FN),
+ PINMUX_DATA(D19_MARK, PG3_FN),
+ PINMUX_DATA(D18_MARK, PG2_FN),
+ PINMUX_DATA(D17_MARK, PG1_FN),
+ PINMUX_DATA(D16_MARK, PG0_FN),
+
+ /* PH FN */
+ PINMUX_DATA(SCIF1_SCK_MARK, PH7_FN),
+ PINMUX_DATA(SCIF1_RXD_MARK, PH6_FN),
+ PINMUX_DATA(SCIF1_TXD_MARK, PH5_FN),
+ PINMUX_DATA(SCIF0_CTS_MARK, PH4_FN),
+ PINMUX_DATA(INTD_MARK, P1MSEL7_1, PH4_FN),
+ PINMUX_DATA(FCE_MARK, P1MSEL8_1, P1MSEL7_0, PH4_FN),
+ PINMUX_DATA(SCIF0_RTS_MARK, P1MSEL8_0, P1MSEL7_0, PH3_FN),
+ PINMUX_DATA(HSPI_CS_MARK, P1MSEL8_0, P1MSEL7_1, PH3_FN),
+ PINMUX_DATA(FSE_MARK, P1MSEL8_1, P1MSEL7_0, PH3_FN),
+ PINMUX_DATA(SCIF0_SCK_MARK, P1MSEL8_0, P1MSEL7_0, PH2_FN),
+ PINMUX_DATA(HSPI_CLK_MARK, P1MSEL8_0, P1MSEL7_1, PH2_FN),
+ PINMUX_DATA(FRE_MARK, P1MSEL8_1, P1MSEL7_0, PH2_FN),
+ PINMUX_DATA(SCIF0_RXD_MARK, P1MSEL8_0, P1MSEL7_0, PH1_FN),
+ PINMUX_DATA(HSPI_RX_MARK, P1MSEL8_0, P1MSEL7_1, PH1_FN),
+ PINMUX_DATA(FRB_MARK, P1MSEL8_1, P1MSEL7_0, PH1_FN),
+ PINMUX_DATA(SCIF0_TXD_MARK, P1MSEL8_0, P1MSEL7_0, PH0_FN),
+ PINMUX_DATA(HSPI_TX_MARK, P1MSEL8_0, P1MSEL7_1, PH0_FN),
+ PINMUX_DATA(FWE_MARK, P1MSEL8_1, P1MSEL7_0, PH0_FN),
+
+ /* PJ FN */
+ PINMUX_DATA(SCIF5_TXD_MARK, P1MSEL2_0, P1MSEL1_0, PJ7_FN),
+ PINMUX_DATA(HAC1_SYNC_MARK, P1MSEL2_0, P1MSEL1_1, PJ7_FN),
+ PINMUX_DATA(SSI1_WS_MARK, P1MSEL2_1, P1MSEL1_0, PJ7_FN),
+ PINMUX_DATA(SIOF_TXD_PJ_MARK, P2MSEL1_0, P1MSEL4_0, P1MSEL3_0, PJ6_FN),
+ PINMUX_DATA(HAC0_SDOUT_MARK, P1MSEL4_0, P1MSEL3_1, PJ6_FN),
+ PINMUX_DATA(SSI0_SDATA_MARK, P1MSEL4_1, P1MSEL3_0, PJ6_FN),
+ PINMUX_DATA(SIOF_RXD_PJ_MARK, P2MSEL1_0, P1MSEL4_0, P1MSEL3_0, PJ5_FN),
+ PINMUX_DATA(HAC0_SDIN_MARK, P1MSEL4_0, P1MSEL3_1, PJ5_FN),
+ PINMUX_DATA(SSI0_SCK_MARK, P1MSEL4_1, P1MSEL3_0, PJ5_FN),
+ PINMUX_DATA(SIOF_SYNC_PJ_MARK, P2MSEL1_0, P1MSEL4_0, P1MSEL3_0, PJ4_FN),
+ PINMUX_DATA(HAC0_SYNC_MARK, P1MSEL4_0, P1MSEL3_1, PJ4_FN),
+ PINMUX_DATA(SSI0_WS_MARK, P1MSEL4_1, P1MSEL3_0, PJ4_FN),
+ PINMUX_DATA(SIOF_MCLK_PJ_MARK, P2MSEL1_0, P1MSEL4_0, P1MSEL3_0, PJ3_FN),
+ PINMUX_DATA(HAC_RES_MARK, P1MSEL4_0, P1MSEL3_1, PJ3_FN),
+ PINMUX_DATA(SIOF_SCK_PJ_MARK, P2MSEL1_0, P1MSEL4_0, P1MSEL3_0, PJ2_FN),
+ PINMUX_DATA(HAC0_BITCLK_MARK, P1MSEL4_0, P1MSEL3_1, PJ2_FN),
+ PINMUX_DATA(SSI0_CLK_MARK, P1MSEL4_1, P1MSEL3_0, PJ2_FN),
+ PINMUX_DATA(HAC1_BITCLK_MARK, P1MSEL2_0, PJ1_FN),
+ PINMUX_DATA(SSI1_CLK_MARK, P1MSEL2_1, P1MSEL1_0, PJ1_FN),
+ PINMUX_DATA(TCLK_MARK, P1MSEL9_0, PJ0_FN),
+ PINMUX_DATA(IOIS16_MARK, P1MSEL9_1, PJ0_FN),
+
+ /* PK FN */
+ PINMUX_DATA(STATUS0_MARK, P1MSEL15_0, PK7_FN),
+ PINMUX_DATA(DRAK0_PK3_MARK, P1MSEL15_1, PK7_FN),
+ PINMUX_DATA(STATUS1_MARK, P1MSEL15_0, PK6_FN),
+ PINMUX_DATA(DRAK1_PK2_MARK, P1MSEL15_1, PK6_FN),
+ PINMUX_DATA(DACK2_MARK, P1MSEL12_0, P1MSEL11_0, PK5_FN),
+ PINMUX_DATA(SCIF2_TXD_MARK, P1MSEL12_1, P1MSEL11_0, PK5_FN),
+ PINMUX_DATA(MMCCMD_MARK, P1MSEL12_1, P1MSEL11_1, PK5_FN),
+ PINMUX_DATA(SIOF_TXD_PK_MARK, P2MSEL1_1,
+ P1MSEL12_0, P1MSEL11_1, PK5_FN),
+ PINMUX_DATA(DACK3_MARK, P1MSEL12_0, P1MSEL11_0, PK4_FN),
+ PINMUX_DATA(SCIF2_SCK_MARK, P1MSEL12_1, P1MSEL11_0, PK4_FN),
+ PINMUX_DATA(MMCDAT_MARK, P1MSEL12_1, P1MSEL11_1, PK4_FN),
+ PINMUX_DATA(SIOF_SCK_PK_MARK, P2MSEL1_1,
+ P1MSEL12_0, P1MSEL11_1, PK4_FN),
+ PINMUX_DATA(DREQ0_MARK, PK3_FN),
+ PINMUX_DATA(DREQ1_MARK, PK2_FN),
+ PINMUX_DATA(DRAK0_PK1_MARK, PK1_FN),
+ PINMUX_DATA(DRAK1_PK0_MARK, PK0_FN),
+
+ /* PL FN */
+ PINMUX_DATA(DREQ2_MARK, P1MSEL13_0, PL7_FN),
+ PINMUX_DATA(INTB_MARK, P1MSEL13_1, PL7_FN),
+ PINMUX_DATA(DREQ3_MARK, P1MSEL13_0, PL6_FN),
+ PINMUX_DATA(INTC_MARK, P1MSEL13_1, PL6_FN),
+ PINMUX_DATA(DRAK2_MARK, P1MSEL10_0, PL5_FN),
+ PINMUX_DATA(CE2A_MARK, P1MSEL10_1, PL5_FN),
+ PINMUX_DATA(IRL4_MARK, P1MSEL14_0, PL4_FN),
+ PINMUX_DATA(FD4_MARK, P1MSEL14_1, PL4_FN),
+ PINMUX_DATA(IRL5_MARK, P1MSEL14_0, PL3_FN),
+ PINMUX_DATA(FD5_MARK, P1MSEL14_1, PL3_FN),
+ PINMUX_DATA(IRL6_MARK, P1MSEL14_0, PL2_FN),
+ PINMUX_DATA(FD6_MARK, P1MSEL14_1, PL2_FN),
+ PINMUX_DATA(IRL7_MARK, P1MSEL14_0, PL1_FN),
+ PINMUX_DATA(FD7_MARK, P1MSEL14_1, PL1_FN),
+ PINMUX_DATA(DRAK3_MARK, P1MSEL10_0, PL0_FN),
+ PINMUX_DATA(CE2B_MARK, P1MSEL10_1, PL0_FN),
+
+ /* PM FN */
+ PINMUX_DATA(BREQ_BSACK_MARK, PM1_FN),
+ PINMUX_DATA(BACK_BSREQ_MARK, PM0_FN),
+
+ /* PN FN */
+ PINMUX_DATA(SCIF5_RXD_MARK, P1MSEL2_0, P1MSEL1_0, PN7_FN),
+ PINMUX_DATA(HAC1_SDIN_MARK, P1MSEL2_0, P1MSEL1_1, PN7_FN),
+ PINMUX_DATA(SSI1_SCK_MARK, P1MSEL2_1, P1MSEL1_0, PN7_FN),
+ PINMUX_DATA(SCIF5_SCK_MARK, P1MSEL2_0, P1MSEL1_0, PN6_FN),
+ PINMUX_DATA(HAC1_SDOUT_MARK, P1MSEL2_0, P1MSEL1_1, PN6_FN),
+ PINMUX_DATA(SSI1_SDATA_MARK, P1MSEL2_1, P1MSEL1_0, PN6_FN),
+ PINMUX_DATA(SCIF3_TXD_MARK, P1MSEL0_0, PN5_FN),
+ PINMUX_DATA(FCLE_MARK, P1MSEL0_1, PN5_FN),
+ PINMUX_DATA(SCIF3_RXD_MARK, P1MSEL0_0, PN4_FN),
+ PINMUX_DATA(FALE_MARK, P1MSEL0_1, PN4_FN),
+ PINMUX_DATA(SCIF3_SCK_MARK, P1MSEL0_0, PN3_FN),
+ PINMUX_DATA(FD0_MARK, P1MSEL0_1, PN3_FN),
+ PINMUX_DATA(SCIF4_TXD_MARK, P1MSEL0_0, PN2_FN),
+ PINMUX_DATA(FD1_MARK, P1MSEL0_1, PN2_FN),
+ PINMUX_DATA(SCIF4_RXD_MARK, P1MSEL0_0, PN1_FN),
+ PINMUX_DATA(FD2_MARK, P1MSEL0_1, PN1_FN),
+ PINMUX_DATA(SCIF4_SCK_MARK, P1MSEL0_0, PN0_FN),
+ PINMUX_DATA(FD3_MARK, P1MSEL0_1, PN0_FN),
+
+ /* PP FN */
+ PINMUX_DATA(DEVSEL_DCLKOUT_MARK, PP5_FN),
+ PINMUX_DATA(STOP_CDE_MARK, PP4_FN),
+ PINMUX_DATA(LOCK_ODDF_MARK, PP3_FN),
+ PINMUX_DATA(TRDY_DISPL_MARK, PP2_FN),
+ PINMUX_DATA(IRDY_HSYNC_MARK, PP1_FN),
+ PINMUX_DATA(PCIFRAME_VSYNC_MARK, PP0_FN),
+
+ /* PQ FN */
+ PINMUX_DATA(INTA_MARK, PQ4_FN),
+ PINMUX_DATA(GNT0_GNTIN_MARK, PQ3_FN),
+ PINMUX_DATA(REQ0_REQOUT_MARK, PQ2_FN),
+ PINMUX_DATA(PERR_MARK, PQ1_FN),
+ PINMUX_DATA(SERR_MARK, PQ0_FN),
+
+ /* PR FN */
+ PINMUX_DATA(WE7_CBE3_MARK, PR3_FN),
+ PINMUX_DATA(WE6_CBE2_MARK, PR2_FN),
+ PINMUX_DATA(WE5_CBE1_MARK, PR1_FN),
+ PINMUX_DATA(WE4_CBE0_MARK, PR0_FN),
+
+ /* MISC FN */
+ PINMUX_DATA(SCIF2_RXD_MARK, P1MSEL6_0, P1MSEL5_0),
+ PINMUX_DATA(SIOF_RXD_MARK, P2MSEL1_1, P1MSEL6_1, P1MSEL5_0),
+ PINMUX_DATA(MRESETOUT_MARK, P2MSEL2_0),
+ PINMUX_DATA(IRQOUT_MARK, P2MSEL2_1),
+};
+
+static struct pinmux_gpio pinmux_gpios[] = {
+ /* PA */
+ PINMUX_GPIO(GPIO_PA7, PA7_DATA),
+ PINMUX_GPIO(GPIO_PA6, PA6_DATA),
+ PINMUX_GPIO(GPIO_PA5, PA5_DATA),
+ PINMUX_GPIO(GPIO_PA4, PA4_DATA),
+ PINMUX_GPIO(GPIO_PA3, PA3_DATA),
+ PINMUX_GPIO(GPIO_PA2, PA2_DATA),
+ PINMUX_GPIO(GPIO_PA1, PA1_DATA),
+ PINMUX_GPIO(GPIO_PA0, PA0_DATA),
+
+ /* PB */
+ PINMUX_GPIO(GPIO_PB7, PB7_DATA),
+ PINMUX_GPIO(GPIO_PB6, PB6_DATA),
+ PINMUX_GPIO(GPIO_PB5, PB5_DATA),
+ PINMUX_GPIO(GPIO_PB4, PB4_DATA),
+ PINMUX_GPIO(GPIO_PB3, PB3_DATA),
+ PINMUX_GPIO(GPIO_PB2, PB2_DATA),
+ PINMUX_GPIO(GPIO_PB1, PB1_DATA),
+ PINMUX_GPIO(GPIO_PB0, PB0_DATA),
+
+ /* PC */
+ PINMUX_GPIO(GPIO_PC7, PC7_DATA),
+ PINMUX_GPIO(GPIO_PC6, PC6_DATA),
+ PINMUX_GPIO(GPIO_PC5, PC5_DATA),
+ PINMUX_GPIO(GPIO_PC4, PC4_DATA),
+ PINMUX_GPIO(GPIO_PC3, PC3_DATA),
+ PINMUX_GPIO(GPIO_PC2, PC2_DATA),
+ PINMUX_GPIO(GPIO_PC1, PC1_DATA),
+ PINMUX_GPIO(GPIO_PC0, PC0_DATA),
+
+ /* PD */
+ PINMUX_GPIO(GPIO_PD7, PD7_DATA),
+ PINMUX_GPIO(GPIO_PD6, PD6_DATA),
+ PINMUX_GPIO(GPIO_PD5, PD5_DATA),
+ PINMUX_GPIO(GPIO_PD4, PD4_DATA),
+ PINMUX_GPIO(GPIO_PD3, PD3_DATA),
+ PINMUX_GPIO(GPIO_PD2, PD2_DATA),
+ PINMUX_GPIO(GPIO_PD1, PD1_DATA),
+ PINMUX_GPIO(GPIO_PD0, PD0_DATA),
+
+ /* PE */
+ PINMUX_GPIO(GPIO_PE5, PE5_DATA),
+ PINMUX_GPIO(GPIO_PE4, PE4_DATA),
+ PINMUX_GPIO(GPIO_PE3, PE3_DATA),
+ PINMUX_GPIO(GPIO_PE2, PE2_DATA),
+ PINMUX_GPIO(GPIO_PE1, PE1_DATA),
+ PINMUX_GPIO(GPIO_PE0, PE0_DATA),
+
+ /* PF */
+ PINMUX_GPIO(GPIO_PF7, PF7_DATA),
+ PINMUX_GPIO(GPIO_PF6, PF6_DATA),
+ PINMUX_GPIO(GPIO_PF5, PF5_DATA),
+ PINMUX_GPIO(GPIO_PF4, PF4_DATA),
+ PINMUX_GPIO(GPIO_PF3, PF3_DATA),
+ PINMUX_GPIO(GPIO_PF2, PF2_DATA),
+ PINMUX_GPIO(GPIO_PF1, PF1_DATA),
+ PINMUX_GPIO(GPIO_PF0, PF0_DATA),
+
+ /* PG */
+ PINMUX_GPIO(GPIO_PG7, PG7_DATA),
+ PINMUX_GPIO(GPIO_PG6, PG6_DATA),
+ PINMUX_GPIO(GPIO_PG5, PG5_DATA),
+ PINMUX_GPIO(GPIO_PG4, PG4_DATA),
+ PINMUX_GPIO(GPIO_PG3, PG3_DATA),
+ PINMUX_GPIO(GPIO_PG2, PG2_DATA),
+ PINMUX_GPIO(GPIO_PG1, PG1_DATA),
+ PINMUX_GPIO(GPIO_PG0, PG0_DATA),
+
+ /* PH */
+ PINMUX_GPIO(GPIO_PH7, PH7_DATA),
+ PINMUX_GPIO(GPIO_PH6, PH6_DATA),
+ PINMUX_GPIO(GPIO_PH5, PH5_DATA),
+ PINMUX_GPIO(GPIO_PH4, PH4_DATA),
+ PINMUX_GPIO(GPIO_PH3, PH3_DATA),
+ PINMUX_GPIO(GPIO_PH2, PH2_DATA),
+ PINMUX_GPIO(GPIO_PH1, PH1_DATA),
+ PINMUX_GPIO(GPIO_PH0, PH0_DATA),
+
+ /* PJ */
+ PINMUX_GPIO(GPIO_PJ7, PJ7_DATA),
+ PINMUX_GPIO(GPIO_PJ6, PJ6_DATA),
+ PINMUX_GPIO(GPIO_PJ5, PJ5_DATA),
+ PINMUX_GPIO(GPIO_PJ4, PJ4_DATA),
+ PINMUX_GPIO(GPIO_PJ3, PJ3_DATA),
+ PINMUX_GPIO(GPIO_PJ2, PJ2_DATA),
+ PINMUX_GPIO(GPIO_PJ1, PJ1_DATA),
+ PINMUX_GPIO(GPIO_PJ0, PJ0_DATA),
+
+ /* PK */
+ PINMUX_GPIO(GPIO_PK7, PK7_DATA),
+ PINMUX_GPIO(GPIO_PK6, PK6_DATA),
+ PINMUX_GPIO(GPIO_PK5, PK5_DATA),
+ PINMUX_GPIO(GPIO_PK4, PK4_DATA),
+ PINMUX_GPIO(GPIO_PK3, PK3_DATA),
+ PINMUX_GPIO(GPIO_PK2, PK2_DATA),
+ PINMUX_GPIO(GPIO_PK1, PK1_DATA),
+ PINMUX_GPIO(GPIO_PK0, PK0_DATA),
+
+ /* PL */
+ PINMUX_GPIO(GPIO_PL7, PL7_DATA),
+ PINMUX_GPIO(GPIO_PL6, PL6_DATA),
+ PINMUX_GPIO(GPIO_PL5, PL5_DATA),
+ PINMUX_GPIO(GPIO_PL4, PL4_DATA),
+ PINMUX_GPIO(GPIO_PL3, PL3_DATA),
+ PINMUX_GPIO(GPIO_PL2, PL2_DATA),
+ PINMUX_GPIO(GPIO_PL1, PL1_DATA),
+ PINMUX_GPIO(GPIO_PL0, PL0_DATA),
+
+ /* PM */
+ PINMUX_GPIO(GPIO_PM1, PM1_DATA),
+ PINMUX_GPIO(GPIO_PM0, PM0_DATA),
+
+ /* PN */
+ PINMUX_GPIO(GPIO_PN7, PN7_DATA),
+ PINMUX_GPIO(GPIO_PN6, PN6_DATA),
+ PINMUX_GPIO(GPIO_PN5, PN5_DATA),
+ PINMUX_GPIO(GPIO_PN4, PN4_DATA),
+ PINMUX_GPIO(GPIO_PN3, PN3_DATA),
+ PINMUX_GPIO(GPIO_PN2, PN2_DATA),
+ PINMUX_GPIO(GPIO_PN1, PN1_DATA),
+ PINMUX_GPIO(GPIO_PN0, PN0_DATA),
+
+ /* PP */
+ PINMUX_GPIO(GPIO_PP5, PP5_DATA),
+ PINMUX_GPIO(GPIO_PP4, PP4_DATA),
+ PINMUX_GPIO(GPIO_PP3, PP3_DATA),
+ PINMUX_GPIO(GPIO_PP2, PP2_DATA),
+ PINMUX_GPIO(GPIO_PP1, PP1_DATA),
+ PINMUX_GPIO(GPIO_PP0, PP0_DATA),
+
+ /* PQ */
+ PINMUX_GPIO(GPIO_PQ4, PQ4_DATA),
+ PINMUX_GPIO(GPIO_PQ3, PQ3_DATA),
+ PINMUX_GPIO(GPIO_PQ2, PQ2_DATA),
+ PINMUX_GPIO(GPIO_PQ1, PQ1_DATA),
+ PINMUX_GPIO(GPIO_PQ0, PQ0_DATA),
+
+ /* PR */
+ PINMUX_GPIO(GPIO_PR3, PR3_DATA),
+ PINMUX_GPIO(GPIO_PR2, PR2_DATA),
+ PINMUX_GPIO(GPIO_PR1, PR1_DATA),
+ PINMUX_GPIO(GPIO_PR0, PR0_DATA),
+
+ /* FN */
+ PINMUX_GPIO(GPIO_FN_D63_AD31, D63_AD31_MARK),
+ PINMUX_GPIO(GPIO_FN_D62_AD30, D62_AD30_MARK),
+ PINMUX_GPIO(GPIO_FN_D61_AD29, D61_AD29_MARK),
+ PINMUX_GPIO(GPIO_FN_D60_AD28, D60_AD28_MARK),
+ PINMUX_GPIO(GPIO_FN_D59_AD27, D59_AD27_MARK),
+ PINMUX_GPIO(GPIO_FN_D58_AD26, D58_AD26_MARK),
+ PINMUX_GPIO(GPIO_FN_D57_AD25, D57_AD25_MARK),
+ PINMUX_GPIO(GPIO_FN_D56_AD24, D56_AD24_MARK),
+ PINMUX_GPIO(GPIO_FN_D55_AD23, D55_AD23_MARK),
+ PINMUX_GPIO(GPIO_FN_D54_AD22, D54_AD22_MARK),
+ PINMUX_GPIO(GPIO_FN_D53_AD21, D53_AD21_MARK),
+ PINMUX_GPIO(GPIO_FN_D52_AD20, D52_AD20_MARK),
+ PINMUX_GPIO(GPIO_FN_D51_AD19, D51_AD19_MARK),
+ PINMUX_GPIO(GPIO_FN_D50_AD18, D50_AD18_MARK),
+ PINMUX_GPIO(GPIO_FN_D49_AD17_DB5, D49_AD17_DB5_MARK),
+ PINMUX_GPIO(GPIO_FN_D48_AD16_DB4, D48_AD16_DB4_MARK),
+ PINMUX_GPIO(GPIO_FN_D47_AD15_DB3, D47_AD15_DB3_MARK),
+ PINMUX_GPIO(GPIO_FN_D46_AD14_DB2, D46_AD14_DB2_MARK),
+ PINMUX_GPIO(GPIO_FN_D45_AD13_DB1, D45_AD13_DB1_MARK),
+ PINMUX_GPIO(GPIO_FN_D44_AD12_DB0, D44_AD12_DB0_MARK),
+ PINMUX_GPIO(GPIO_FN_D43_AD11_DG5, D43_AD11_DG5_MARK),
+ PINMUX_GPIO(GPIO_FN_D42_AD10_DG4, D42_AD10_DG4_MARK),
+ PINMUX_GPIO(GPIO_FN_D41_AD9_DG3, D41_AD9_DG3_MARK),
+ PINMUX_GPIO(GPIO_FN_D40_AD8_DG2, D40_AD8_DG2_MARK),
+ PINMUX_GPIO(GPIO_FN_D39_AD7_DG1, D39_AD7_DG1_MARK),
+ PINMUX_GPIO(GPIO_FN_D38_AD6_DG0, D38_AD6_DG0_MARK),
+ PINMUX_GPIO(GPIO_FN_D37_AD5_DR5, D37_AD5_DR5_MARK),
+ PINMUX_GPIO(GPIO_FN_D36_AD4_DR4, D36_AD4_DR4_MARK),
+ PINMUX_GPIO(GPIO_FN_D35_AD3_DR3, D35_AD3_DR3_MARK),
+ PINMUX_GPIO(GPIO_FN_D34_AD2_DR2, D34_AD2_DR2_MARK),
+ PINMUX_GPIO(GPIO_FN_D33_AD1_DR1, D33_AD1_DR1_MARK),
+ PINMUX_GPIO(GPIO_FN_D32_AD0_DR0, D32_AD0_DR0_MARK),
+ PINMUX_GPIO(GPIO_FN_REQ1, REQ1_MARK),
+ PINMUX_GPIO(GPIO_FN_REQ2, REQ2_MARK),
+ PINMUX_GPIO(GPIO_FN_REQ3, REQ3_MARK),
+ PINMUX_GPIO(GPIO_FN_GNT1, GNT1_MARK),
+ PINMUX_GPIO(GPIO_FN_GNT2, GNT2_MARK),
+ PINMUX_GPIO(GPIO_FN_GNT3, GNT3_MARK),
+ PINMUX_GPIO(GPIO_FN_MMCCLK, MMCCLK_MARK),
+ PINMUX_GPIO(GPIO_FN_D31, D31_MARK),
+ PINMUX_GPIO(GPIO_FN_D30, D30_MARK),
+ PINMUX_GPIO(GPIO_FN_D29, D29_MARK),
+ PINMUX_GPIO(GPIO_FN_D28, D28_MARK),
+ PINMUX_GPIO(GPIO_FN_D27, D27_MARK),
+ PINMUX_GPIO(GPIO_FN_D26, D26_MARK),
+ PINMUX_GPIO(GPIO_FN_D25, D25_MARK),
+ PINMUX_GPIO(GPIO_FN_D24, D24_MARK),
+ PINMUX_GPIO(GPIO_FN_D23, D23_MARK),
+ PINMUX_GPIO(GPIO_FN_D22, D22_MARK),
+ PINMUX_GPIO(GPIO_FN_D21, D21_MARK),
+ PINMUX_GPIO(GPIO_FN_D20, D20_MARK),
+ PINMUX_GPIO(GPIO_FN_D19, D19_MARK),
+ PINMUX_GPIO(GPIO_FN_D18, D18_MARK),
+ PINMUX_GPIO(GPIO_FN_D17, D17_MARK),
+ PINMUX_GPIO(GPIO_FN_D16, D16_MARK),
+ PINMUX_GPIO(GPIO_FN_SCIF1_SCK, SCIF1_SCK_MARK),
+ PINMUX_GPIO(GPIO_FN_SCIF1_RXD, SCIF1_RXD_MARK),
+ PINMUX_GPIO(GPIO_FN_SCIF1_TXD, SCIF1_TXD_MARK),
+ PINMUX_GPIO(GPIO_FN_SCIF0_CTS, SCIF0_CTS_MARK),
+ PINMUX_GPIO(GPIO_FN_INTD, INTD_MARK),
+ PINMUX_GPIO(GPIO_FN_FCE, FCE_MARK),
+ PINMUX_GPIO(GPIO_FN_SCIF0_RTS, SCIF0_RTS_MARK),
+ PINMUX_GPIO(GPIO_FN_HSPI_CS, HSPI_CS_MARK),
+ PINMUX_GPIO(GPIO_FN_FSE, FSE_MARK),
+ PINMUX_GPIO(GPIO_FN_SCIF0_SCK, SCIF0_SCK_MARK),
+ PINMUX_GPIO(GPIO_FN_HSPI_CLK, HSPI_CLK_MARK),
+ PINMUX_GPIO(GPIO_FN_FRE, FRE_MARK),
+ PINMUX_GPIO(GPIO_FN_SCIF0_RXD, SCIF0_RXD_MARK),
+ PINMUX_GPIO(GPIO_FN_HSPI_RX, HSPI_RX_MARK),
+ PINMUX_GPIO(GPIO_FN_FRB, FRB_MARK),
+ PINMUX_GPIO(GPIO_FN_SCIF0_TXD, SCIF0_TXD_MARK),
+ PINMUX_GPIO(GPIO_FN_HSPI_TX, HSPI_TX_MARK),
+ PINMUX_GPIO(GPIO_FN_FWE, FWE_MARK),
+ PINMUX_GPIO(GPIO_FN_SCIF5_TXD, SCIF5_TXD_MARK),
+ PINMUX_GPIO(GPIO_FN_HAC1_SYNC, HAC1_SYNC_MARK),
+ PINMUX_GPIO(GPIO_FN_SSI1_WS, SSI1_WS_MARK),
+ PINMUX_GPIO(GPIO_FN_SIOF_TXD_PJ, SIOF_TXD_PJ_MARK),
+ PINMUX_GPIO(GPIO_FN_HAC0_SDOUT, HAC0_SDOUT_MARK),
+ PINMUX_GPIO(GPIO_FN_SSI0_SDATA, SSI0_SDATA_MARK),
+ PINMUX_GPIO(GPIO_FN_SIOF_RXD_PJ, SIOF_RXD_PJ_MARK),
+ PINMUX_GPIO(GPIO_FN_HAC0_SDIN, HAC0_SDIN_MARK),
+ PINMUX_GPIO(GPIO_FN_SSI0_SCK, SSI0_SCK_MARK),
+ PINMUX_GPIO(GPIO_FN_SIOF_SYNC_PJ, SIOF_SYNC_PJ_MARK),
+ PINMUX_GPIO(GPIO_FN_HAC0_SYNC, HAC0_SYNC_MARK),
+ PINMUX_GPIO(GPIO_FN_SSI0_WS, SSI0_WS_MARK),
+ PINMUX_GPIO(GPIO_FN_SIOF_MCLK_PJ, SIOF_MCLK_PJ_MARK),
+ PINMUX_GPIO(GPIO_FN_HAC_RES, HAC_RES_MARK),
+ PINMUX_GPIO(GPIO_FN_SIOF_SCK_PJ, SIOF_SCK_PJ_MARK),
+ PINMUX_GPIO(GPIO_FN_HAC0_BITCLK, HAC0_BITCLK_MARK),
+ PINMUX_GPIO(GPIO_FN_SSI0_CLK, SSI0_CLK_MARK),
+ PINMUX_GPIO(GPIO_FN_HAC1_BITCLK, HAC1_BITCLK_MARK),
+ PINMUX_GPIO(GPIO_FN_SSI1_CLK, SSI1_CLK_MARK),
+ PINMUX_GPIO(GPIO_FN_TCLK, TCLK_MARK),
+ PINMUX_GPIO(GPIO_FN_IOIS16, IOIS16_MARK),
+ PINMUX_GPIO(GPIO_FN_STATUS0, STATUS0_MARK),
+ PINMUX_GPIO(GPIO_FN_DRAK0_PK3, DRAK0_PK3_MARK),
+ PINMUX_GPIO(GPIO_FN_STATUS1, STATUS1_MARK),
+ PINMUX_GPIO(GPIO_FN_DRAK1_PK2, DRAK1_PK2_MARK),
+ PINMUX_GPIO(GPIO_FN_DACK2, DACK2_MARK),
+ PINMUX_GPIO(GPIO_FN_SCIF2_TXD, SCIF2_TXD_MARK),
+ PINMUX_GPIO(GPIO_FN_MMCCMD, MMCCMD_MARK),
+ PINMUX_GPIO(GPIO_FN_SIOF_TXD_PK, SIOF_TXD_PK_MARK),
+ PINMUX_GPIO(GPIO_FN_DACK3, DACK3_MARK),
+ PINMUX_GPIO(GPIO_FN_SCIF2_SCK, SCIF2_SCK_MARK),
+ PINMUX_GPIO(GPIO_FN_MMCDAT, MMCDAT_MARK),
+ PINMUX_GPIO(GPIO_FN_SIOF_SCK_PK, SIOF_SCK_PK_MARK),
+ PINMUX_GPIO(GPIO_FN_DREQ0, DREQ0_MARK),
+ PINMUX_GPIO(GPIO_FN_DREQ1, DREQ1_MARK),
+ PINMUX_GPIO(GPIO_FN_DRAK0_PK1, DRAK0_PK1_MARK),
+ PINMUX_GPIO(GPIO_FN_DRAK1_PK0, DRAK1_PK0_MARK),
+ PINMUX_GPIO(GPIO_FN_DREQ2, DREQ2_MARK),
+ PINMUX_GPIO(GPIO_FN_INTB, INTB_MARK),
+ PINMUX_GPIO(GPIO_FN_DREQ3, DREQ3_MARK),
+ PINMUX_GPIO(GPIO_FN_INTC, INTC_MARK),
+ PINMUX_GPIO(GPIO_FN_DRAK2, DRAK2_MARK),
+ PINMUX_GPIO(GPIO_FN_CE2A, CE2A_MARK),
+ PINMUX_GPIO(GPIO_FN_IRL4, IRL4_MARK),
+ PINMUX_GPIO(GPIO_FN_FD4, FD4_MARK),
+ PINMUX_GPIO(GPIO_FN_IRL5, IRL5_MARK),
+ PINMUX_GPIO(GPIO_FN_FD5, FD5_MARK),
+ PINMUX_GPIO(GPIO_FN_IRL6, IRL6_MARK),
+ PINMUX_GPIO(GPIO_FN_FD6, FD6_MARK),
+ PINMUX_GPIO(GPIO_FN_IRL7, IRL7_MARK),
+ PINMUX_GPIO(GPIO_FN_FD7, FD7_MARK),
+ PINMUX_GPIO(GPIO_FN_DRAK3, DRAK3_MARK),
+ PINMUX_GPIO(GPIO_FN_CE2B, CE2B_MARK),
+ PINMUX_GPIO(GPIO_FN_BREQ_BSACK, BREQ_BSACK_MARK),
+ PINMUX_GPIO(GPIO_FN_BACK_BSREQ, BACK_BSREQ_MARK),
+ PINMUX_GPIO(GPIO_FN_SCIF5_RXD, SCIF5_RXD_MARK),
+ PINMUX_GPIO(GPIO_FN_HAC1_SDIN, HAC1_SDIN_MARK),
+ PINMUX_GPIO(GPIO_FN_SSI1_SCK, SSI1_SCK_MARK),
+ PINMUX_GPIO(GPIO_FN_SCIF5_SCK, SCIF5_SCK_MARK),
+ PINMUX_GPIO(GPIO_FN_HAC1_SDOUT, HAC1_SDOUT_MARK),
+ PINMUX_GPIO(GPIO_FN_SSI1_SDATA, SSI1_SDATA_MARK),
+ PINMUX_GPIO(GPIO_FN_SCIF3_TXD, SCIF3_TXD_MARK),
+ PINMUX_GPIO(GPIO_FN_FCLE, FCLE_MARK),
+ PINMUX_GPIO(GPIO_FN_SCIF3_RXD, SCIF3_RXD_MARK),
+ PINMUX_GPIO(GPIO_FN_FALE, FALE_MARK),
+ PINMUX_GPIO(GPIO_FN_SCIF3_SCK, SCIF3_SCK_MARK),
+ PINMUX_GPIO(GPIO_FN_FD0, FD0_MARK),
+ PINMUX_GPIO(GPIO_FN_SCIF4_TXD, SCIF4_TXD_MARK),
+ PINMUX_GPIO(GPIO_FN_FD1, FD1_MARK),
+ PINMUX_GPIO(GPIO_FN_SCIF4_RXD, SCIF4_RXD_MARK),
+ PINMUX_GPIO(GPIO_FN_FD2, FD2_MARK),
+ PINMUX_GPIO(GPIO_FN_SCIF4_SCK, SCIF4_SCK_MARK),
+ PINMUX_GPIO(GPIO_FN_FD3, FD3_MARK),
+ PINMUX_GPIO(GPIO_FN_DEVSEL_DCLKOUT, DEVSEL_DCLKOUT_MARK),
+ PINMUX_GPIO(GPIO_FN_STOP_CDE, STOP_CDE_MARK),
+ PINMUX_GPIO(GPIO_FN_LOCK_ODDF, LOCK_ODDF_MARK),
+ PINMUX_GPIO(GPIO_FN_TRDY_DISPL, TRDY_DISPL_MARK),
+ PINMUX_GPIO(GPIO_FN_IRDY_HSYNC, IRDY_HSYNC_MARK),
+ PINMUX_GPIO(GPIO_FN_PCIFRAME_VSYNC, PCIFRAME_VSYNC_MARK),
+ PINMUX_GPIO(GPIO_FN_INTA, INTA_MARK),
+ PINMUX_GPIO(GPIO_FN_GNT0_GNTIN, GNT0_GNTIN_MARK),
+ PINMUX_GPIO(GPIO_FN_REQ0_REQOUT, REQ0_REQOUT_MARK),
+ PINMUX_GPIO(GPIO_FN_PERR, PERR_MARK),
+ PINMUX_GPIO(GPIO_FN_SERR, SERR_MARK),
+ PINMUX_GPIO(GPIO_FN_WE7_CBE3, WE7_CBE3_MARK),
+ PINMUX_GPIO(GPIO_FN_WE6_CBE2, WE6_CBE2_MARK),
+ PINMUX_GPIO(GPIO_FN_WE5_CBE1, WE5_CBE1_MARK),
+ PINMUX_GPIO(GPIO_FN_WE4_CBE0, WE4_CBE0_MARK),
+ PINMUX_GPIO(GPIO_FN_SCIF2_RXD, SCIF2_RXD_MARK),
+ PINMUX_GPIO(GPIO_FN_SIOF_RXD, SIOF_RXD_MARK),
+ PINMUX_GPIO(GPIO_FN_MRESETOUT, MRESETOUT_MARK),
+ PINMUX_GPIO(GPIO_FN_IRQOUT, IRQOUT_MARK),
+};
+
+static struct pinmux_cfg_reg pinmux_config_regs[] = {
+ { PINMUX_CFG_REG("PACR", 0xffe70000, 16, 2) {
+ PA7_FN, PA7_OUT, PA7_IN, PA7_IN_PU,
+ PA6_FN, PA6_OUT, PA6_IN, PA6_IN_PU,
+ PA5_FN, PA5_OUT, PA5_IN, PA5_IN_PU,
+ PA4_FN, PA4_OUT, PA4_IN, PA4_IN_PU,
+ PA3_FN, PA3_OUT, PA3_IN, PA3_IN_PU,
+ PA2_FN, PA2_OUT, PA2_IN, PA2_IN_PU,
+ PA1_FN, PA1_OUT, PA1_IN, PA1_IN_PU,
+ PA0_FN, PA0_OUT, PA0_IN, PA0_IN_PU }
+ },
+ { PINMUX_CFG_REG("PBCR", 0xffe70002, 16, 2) {
+ PB7_FN, PB7_OUT, PB7_IN, PB7_IN_PU,
+ PB6_FN, PB6_OUT, PB6_IN, PB6_IN_PU,
+ PB5_FN, PB5_OUT, PB5_IN, PB5_IN_PU,
+ PB4_FN, PB4_OUT, PB4_IN, PB4_IN_PU,
+ PB3_FN, PB3_OUT, PB3_IN, PB3_IN_PU,
+ PB2_FN, PB2_OUT, PB2_IN, PB2_IN_PU,
+ PB1_FN, PB1_OUT, PB1_IN, PB1_IN_PU,
+ PB0_FN, PB0_OUT, PB0_IN, PB0_IN_PU }
+ },
+ { PINMUX_CFG_REG("PCCR", 0xffe70004, 16, 2) {
+ PC7_FN, PC7_OUT, PC7_IN, PC7_IN_PU,
+ PC6_FN, PC6_OUT, PC6_IN, PC6_IN_PU,
+ PC5_FN, PC5_OUT, PC5_IN, PC5_IN_PU,
+ PC4_FN, PC4_OUT, PC4_IN, PC4_IN_PU,
+ PC3_FN, PC3_OUT, PC3_IN, PC3_IN_PU,
+ PC2_FN, PC2_OUT, PC2_IN, PC2_IN_PU,
+ PC1_FN, PC1_OUT, PC1_IN, PC1_IN_PU,
+ PC0_FN, PC0_OUT, PC0_IN, PC0_IN_PU }
+ },
+ { PINMUX_CFG_REG("PDCR", 0xffe70006, 16, 2) {
+ PD7_FN, PD7_OUT, PD7_IN, PD7_IN_PU,
+ PD6_FN, PD6_OUT, PD6_IN, PD6_IN_PU,
+ PD5_FN, PD5_OUT, PD5_IN, PD5_IN_PU,
+ PD4_FN, PD4_OUT, PD4_IN, PD4_IN_PU,
+ PD3_FN, PD3_OUT, PD3_IN, PD3_IN_PU,
+ PD2_FN, PD2_OUT, PD2_IN, PD2_IN_PU,
+ PD1_FN, PD1_OUT, PD1_IN, PD1_IN_PU,
+ PD0_FN, PD0_OUT, PD0_IN, PD0_IN_PU }
+ },
+ { PINMUX_CFG_REG("PECR", 0xffe70008, 16, 2) {
+ 0, 0, 0, 0,
+ 0, 0, 0, 0,
+ PE5_FN, PE5_OUT, PE5_IN, PE5_IN_PU,
+ PE4_FN, PE4_OUT, PE4_IN, PE4_IN_PU,
+ PE3_FN, PE3_OUT, PE3_IN, PE3_IN_PU,
+ PE2_FN, PE2_OUT, PE2_IN, PE2_IN_PU,
+ PE1_FN, PE1_OUT, PE1_IN, PE1_IN_PU,
+ PE0_FN, PE0_OUT, PE0_IN, PE0_IN_PU }
+ },
+ { PINMUX_CFG_REG("PFCR", 0xffe7000a, 16, 2) {
+ PF7_FN, PF7_OUT, PF7_IN, PF7_IN_PU,
+ PF6_FN, PF6_OUT, PF6_IN, PF6_IN_PU,
+ PF5_FN, PF5_OUT, PF5_IN, PF5_IN_PU,
+ PF4_FN, PF4_OUT, PF4_IN, PF4_IN_PU,
+ PF3_FN, PF3_OUT, PF3_IN, PF3_IN_PU,
+ PF2_FN, PF2_OUT, PF2_IN, PF2_IN_PU,
+ PF1_FN, PF1_OUT, PF1_IN, PF1_IN_PU,
+ PF0_FN, PF0_OUT, PF0_IN, PF0_IN_PU }
+ },
+ { PINMUX_CFG_REG("PGCR", 0xffe7000c, 16, 2) {
+ PG7_FN, PG7_OUT, PG7_IN, PG7_IN_PU,
+ PG6_FN, PG6_OUT, PG6_IN, PG6_IN_PU,
+ PG5_FN, PG5_OUT, PG5_IN, PG5_IN_PU,
+ PG4_FN, PG4_OUT, PG4_IN, PG4_IN_PU,
+ PG3_FN, PG3_OUT, PG3_IN, PG3_IN_PU,
+ PG2_FN, PG2_OUT, PG2_IN, PG2_IN_PU,
+ PG1_FN, PG1_OUT, PG1_IN, PG1_IN_PU,
+ PG0_FN, PG0_OUT, PG0_IN, PG0_IN_PU }
+ },
+ { PINMUX_CFG_REG("PHCR", 0xffe7000e, 16, 2) {
+ PH7_FN, PH7_OUT, PH7_IN, PH7_IN_PU,
+ PH6_FN, PH6_OUT, PH6_IN, PH6_IN_PU,
+ PH5_FN, PH5_OUT, PH5_IN, PH5_IN_PU,
+ PH4_FN, PH4_OUT, PH4_IN, PH4_IN_PU,
+ PH3_FN, PH3_OUT, PH3_IN, PH3_IN_PU,
+ PH2_FN, PH2_OUT, PH2_IN, PH2_IN_PU,
+ PH1_FN, PH1_OUT, PH1_IN, PH1_IN_PU,
+ PH0_FN, PH0_OUT, PH0_IN, PH0_IN_PU }
+ },
+ { PINMUX_CFG_REG("PJCR", 0xffe70010, 16, 2) {
+ PJ7_FN, PJ7_OUT, PJ7_IN, PJ7_IN_PU,
+ PJ6_FN, PJ6_OUT, PJ6_IN, PJ6_IN_PU,
+ PJ5_FN, PJ5_OUT, PJ5_IN, PJ5_IN_PU,
+ PJ4_FN, PJ4_OUT, PJ4_IN, PJ4_IN_PU,
+ PJ3_FN, PJ3_OUT, PJ3_IN, PJ3_IN_PU,
+ PJ2_FN, PJ2_OUT, PJ2_IN, PJ2_IN_PU,
+ PJ1_FN, PJ1_OUT, PJ1_IN, PJ1_IN_PU,
+ PJ0_FN, PJ0_OUT, PJ0_IN, PJ0_IN_PU }
+ },
+ { PINMUX_CFG_REG("PKCR", 0xffe70012, 16, 2) {
+ PK7_FN, PK7_OUT, PK7_IN, PK7_IN_PU,
+ PK6_FN, PK6_OUT, PK6_IN, PK6_IN_PU,
+ PK5_FN, PK5_OUT, PK5_IN, PK5_IN_PU,
+ PK4_FN, PK4_OUT, PK4_IN, PK4_IN_PU,
+ PK3_FN, PK3_OUT, PK3_IN, PK3_IN_PU,
+ PK2_FN, PK2_OUT, PK2_IN, PK2_IN_PU,
+ PK1_FN, PK1_OUT, PK1_IN, PK1_IN_PU,
+ PK0_FN, PK0_OUT, PK0_IN, PK0_IN_PU }
+ },
+ { PINMUX_CFG_REG("PLCR", 0xffe70014, 16, 2) {
+ PL7_FN, PL7_OUT, PL7_IN, PL7_IN_PU,
+ PL6_FN, PL6_OUT, PL6_IN, PL6_IN_PU,
+ PL5_FN, PL5_OUT, PL5_IN, PL5_IN_PU,
+ PL4_FN, PL4_OUT, PL4_IN, PL4_IN_PU,
+ PL3_FN, PL3_OUT, PL3_IN, PL3_IN_PU,
+ PL2_FN, PL2_OUT, PL2_IN, PL2_IN_PU,
+ PL1_FN, PL1_OUT, PL1_IN, PL1_IN_PU,
+ PL0_FN, PL0_OUT, PL0_IN, PL0_IN_PU }
+ },
+ { PINMUX_CFG_REG("PMCR", 0xffe70016, 16, 2) {
+ 0, 0, 0, 0,
+ 0, 0, 0, 0,
+ 0, 0, 0, 0,
+ 0, 0, 0, 0,
+ 0, 0, 0, 0,
+ 0, 0, 0, 0,
+ PM1_FN, PM1_OUT, PM1_IN, PM1_IN_PU,
+ PM0_FN, PM0_OUT, PM0_IN, PM0_IN_PU }
+ },
+ { PINMUX_CFG_REG("PNCR", 0xffe70018, 16, 2) {
+ PN7_FN, PN7_OUT, PN7_IN, PN7_IN_PU,
+ PN6_FN, PN6_OUT, PN6_IN, PN6_IN_PU,
+ PN5_FN, PN5_OUT, PN5_IN, PN5_IN_PU,
+ PN4_FN, PN4_OUT, PN4_IN, PN4_IN_PU,
+ PN3_FN, PN3_OUT, PN3_IN, PN3_IN_PU,
+ PN2_FN, PN2_OUT, PN2_IN, PN2_IN_PU,
+ PN1_FN, PN1_OUT, PN1_IN, PN1_IN_PU,
+ PN0_FN, PN0_OUT, PN0_IN, PN0_IN_PU }
+ },
+ { PINMUX_CFG_REG("PPCR", 0xffe7001a, 16, 2) {
+ 0, 0, 0, 0,
+ 0, 0, 0, 0,
+ PP5_FN, PP5_OUT, PP5_IN, PP5_IN_PU,
+ PP4_FN, PP4_OUT, PP4_IN, PP4_IN_PU,
+ PP3_FN, PP3_OUT, PP3_IN, PP3_IN_PU,
+ PP2_FN, PP2_OUT, PP2_IN, PP2_IN_PU,
+ PP1_FN, PP1_OUT, PP1_IN, PP1_IN_PU,
+ PP0_FN, PP0_OUT, PP0_IN, PP0_IN_PU }
+ },
+ { PINMUX_CFG_REG("PQCR", 0xffe7001c, 16, 2) {
+ 0, 0, 0, 0,
+ 0, 0, 0, 0,
+ 0, 0, 0, 0,
+ PQ4_FN, PQ4_OUT, PQ4_IN, PQ4_IN_PU,
+ PQ3_FN, PQ3_OUT, PQ3_IN, PQ3_IN_PU,
+ PQ2_FN, PQ2_OUT, PQ2_IN, PQ2_IN_PU,
+ PQ1_FN, PQ1_OUT, PQ1_IN, PQ1_IN_PU,
+ PQ0_FN, PQ0_OUT, PQ0_IN, PQ0_IN_PU }
+ },
+ { PINMUX_CFG_REG("PRCR", 0xffe7001e, 16, 2) {
+ 0, 0, 0, 0,
+ 0, 0, 0, 0,
+ 0, 0, 0, 0,
+ 0, 0, 0, 0,
+ PR3_FN, PR3_OUT, PR3_IN, PR3_IN_PU,
+ PR2_FN, PR2_OUT, PR2_IN, PR2_IN_PU,
+ PR1_FN, PR1_OUT, PR1_IN, PR1_IN_PU,
+ PR0_FN, PR0_OUT, PR0_IN, PR0_IN_PU }
+ },
+ { PINMUX_CFG_REG("P1MSELR", 0xffe70080, 16, 1) {
+ P1MSEL15_0, P1MSEL15_1,
+ P1MSEL14_0, P1MSEL14_1,
+ P1MSEL13_0, P1MSEL13_1,
+ P1MSEL12_0, P1MSEL12_1,
+ P1MSEL11_0, P1MSEL11_1,
+ P1MSEL10_0, P1MSEL10_1,
+ P1MSEL9_0, P1MSEL9_1,
+ P1MSEL8_0, P1MSEL8_1,
+ P1MSEL7_0, P1MSEL7_1,
+ P1MSEL6_0, P1MSEL6_1,
+ P1MSEL5_0, 0,
+ P1MSEL4_0, P1MSEL4_1,
+ P1MSEL3_0, P1MSEL3_1,
+ P1MSEL2_0, P1MSEL2_1,
+ P1MSEL1_0, P1MSEL1_1,
+ P1MSEL0_0, P1MSEL0_1 }
+ },
+ { PINMUX_CFG_REG("P2MSELR", 0xffe70082, 16, 1) {
+ 0, 0,
+ 0, 0,
+ 0, 0,
+ 0, 0,
+ 0, 0,
+ 0, 0,
+ 0, 0,
+ 0, 0,
+ 0, 0,
+ 0, 0,
+ 0, 0,
+ 0, 0,
+ 0, 0,
+ P2MSEL2_0, P2MSEL2_1,
+ P2MSEL1_0, P2MSEL1_1,
+ P2MSEL0_0, P2MSEL0_1 }
+ },
+ {}
+};
+
+static struct pinmux_data_reg pinmux_data_regs[] = {
+ { PINMUX_DATA_REG("PADR", 0xffe70020, 8) {
+ PA7_DATA, PA6_DATA, PA5_DATA, PA4_DATA,
+ PA3_DATA, PA2_DATA, PA1_DATA, PA0_DATA }
+ },
+ { PINMUX_DATA_REG("PBDR", 0xffe70022, 8) {
+ PB7_DATA, PB6_DATA, PB5_DATA, PB4_DATA,
+ PB3_DATA, PB2_DATA, PB1_DATA, PB0_DATA }
+ },
+ { PINMUX_DATA_REG("PCDR", 0xffe70024, 8) {
+ PC7_DATA, PC6_DATA, PC5_DATA, PC4_DATA,
+ PC3_DATA, PC2_DATA, PC1_DATA, PC0_DATA }
+ },
+ { PINMUX_DATA_REG("PDDR", 0xffe70026, 8) {
+ PD7_DATA, PD6_DATA, PD5_DATA, PD4_DATA,
+ PD3_DATA, PD2_DATA, PD1_DATA, PD0_DATA }
+ },
+ { PINMUX_DATA_REG("PEDR", 0xffe70028, 8) {
+ 0, 0, PE5_DATA, PE4_DATA,
+ PE3_DATA, PE2_DATA, PE1_DATA, PE0_DATA }
+ },
+ { PINMUX_DATA_REG("PFDR", 0xffe7002a, 8) {
+ PF7_DATA, PF6_DATA, PF5_DATA, PF4_DATA,
+ PF3_DATA, PF2_DATA, PF1_DATA, PF0_DATA }
+ },
+ { PINMUX_DATA_REG("PGDR", 0xffe7002c, 8) {
+ PG7_DATA, PG6_DATA, PG5_DATA, PG4_DATA,
+ PG3_DATA, PG2_DATA, PG1_DATA, PG0_DATA }
+ },
+ { PINMUX_DATA_REG("PHDR", 0xffe7002e, 8) {
+ PH7_DATA, PH6_DATA, PH5_DATA, PH4_DATA,
+ PH3_DATA, PH2_DATA, PH1_DATA, PH0_DATA }
+ },
+ { PINMUX_DATA_REG("PJDR", 0xffe70030, 8) {
+ PJ7_DATA, PJ6_DATA, PJ5_DATA, PJ4_DATA,
+ PJ3_DATA, PJ2_DATA, PJ1_DATA, PJ0_DATA }
+ },
+ { PINMUX_DATA_REG("PKDR", 0xffe70032, 8) {
+ PK7_DATA, PK6_DATA, PK5_DATA, PK4_DATA,
+ PK3_DATA, PK2_DATA, PK1_DATA, PK0_DATA }
+ },
+ { PINMUX_DATA_REG("PLDR", 0xffe70034, 8) {
+ PL7_DATA, PL6_DATA, PL5_DATA, PL4_DATA,
+ PL3_DATA, PL2_DATA, PL1_DATA, PL0_DATA }
+ },
+ { PINMUX_DATA_REG("PMDR", 0xffe70036, 8) {
+ 0, 0, 0, 0,
+ 0, 0, PM1_DATA, PM0_DATA }
+ },
+ { PINMUX_DATA_REG("PNDR", 0xffe70038, 8) {
+ PN7_DATA, PN6_DATA, PN5_DATA, PN4_DATA,
+ PN3_DATA, PN2_DATA, PN1_DATA, PN0_DATA }
+ },
+ { PINMUX_DATA_REG("PPDR", 0xffe7003a, 8) {
+ 0, 0, PP5_DATA, PP4_DATA,
+ PP3_DATA, PP2_DATA, PP1_DATA, PP0_DATA }
+ },
+ { PINMUX_DATA_REG("PQDR", 0xffe7003c, 8) {
+ 0, 0, 0, PQ4_DATA,
+ PQ3_DATA, PQ2_DATA, PQ1_DATA, PQ0_DATA }
+ },
+ { PINMUX_DATA_REG("PRDR", 0xffe7003e, 8) {
+ 0, 0, 0, 0,
+ PR3_DATA, PR2_DATA, PR1_DATA, PR0_DATA }
+ },
+ { },
+};
+
+struct sh_pfc_soc_info sh7785_pinmux_info = {
+ .name = "sh7785_pfc",
+ .reserved_id = PINMUX_RESERVED,
+ .data = { PINMUX_DATA_BEGIN, PINMUX_DATA_END },
+ .input = { PINMUX_INPUT_BEGIN, PINMUX_INPUT_END },
+ .input_pu = { PINMUX_INPUT_PULLUP_BEGIN, PINMUX_INPUT_PULLUP_END },
+ .output = { PINMUX_OUTPUT_BEGIN, PINMUX_OUTPUT_END },
+ .mark = { PINMUX_MARK_BEGIN, PINMUX_MARK_END },
+ .function = { PINMUX_FUNCTION_BEGIN, PINMUX_FUNCTION_END },
+
+ .first_gpio = GPIO_PA7,
+ .last_gpio = GPIO_FN_IRQOUT,
+
+ .gpios = pinmux_gpios,
+ .cfg_regs = pinmux_config_regs,
+ .data_regs = pinmux_data_regs,
+
+ .gpio_data = pinmux_data,
+ .gpio_data_size = ARRAY_SIZE(pinmux_data),
+};
diff --git a/drivers/pinctrl/sh-pfc/pfc-sh7786.c b/drivers/pinctrl/sh-pfc/pfc-sh7786.c
new file mode 100644
index 000000000000..1e18b58f9e5f
--- /dev/null
+++ b/drivers/pinctrl/sh-pfc/pfc-sh7786.c
@@ -0,0 +1,837 @@
+/*
+ * SH7786 Pinmux
+ *
+ * Copyright (C) 2008, 2009 Renesas Solutions Corp.
+ * Kuninori Morimoto <morimoto.kuninori@renesas.com>
+ *
+ * Based on SH7785 pinmux
+ *
+ * Copyright (C) 2008 Magnus Damm
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ */
+
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <cpu/sh7786.h>
+
+#include "sh_pfc.h"
+
+enum {
+ PINMUX_RESERVED = 0,
+
+ PINMUX_DATA_BEGIN,
+ PA7_DATA, PA6_DATA, PA5_DATA, PA4_DATA,
+ PA3_DATA, PA2_DATA, PA1_DATA, PA0_DATA,
+ PB7_DATA, PB6_DATA, PB5_DATA, PB4_DATA,
+ PB3_DATA, PB2_DATA, PB1_DATA, PB0_DATA,
+ PC7_DATA, PC6_DATA, PC5_DATA, PC4_DATA,
+ PC3_DATA, PC2_DATA, PC1_DATA, PC0_DATA,
+ PD7_DATA, PD6_DATA, PD5_DATA, PD4_DATA,
+ PD3_DATA, PD2_DATA, PD1_DATA, PD0_DATA,
+ PE7_DATA, PE6_DATA,
+ PF7_DATA, PF6_DATA, PF5_DATA, PF4_DATA,
+ PF3_DATA, PF2_DATA, PF1_DATA, PF0_DATA,
+ PG7_DATA, PG6_DATA, PG5_DATA,
+ PH7_DATA, PH6_DATA, PH5_DATA, PH4_DATA,
+ PH3_DATA, PH2_DATA, PH1_DATA, PH0_DATA,
+ PJ7_DATA, PJ6_DATA, PJ5_DATA, PJ4_DATA,
+ PJ3_DATA, PJ2_DATA, PJ1_DATA,
+ PINMUX_DATA_END,
+
+ PINMUX_INPUT_BEGIN,
+ PA7_IN, PA6_IN, PA5_IN, PA4_IN,
+ PA3_IN, PA2_IN, PA1_IN, PA0_IN,
+ PB7_IN, PB6_IN, PB5_IN, PB4_IN,
+ PB3_IN, PB2_IN, PB1_IN, PB0_IN,
+ PC7_IN, PC6_IN, PC5_IN, PC4_IN,
+ PC3_IN, PC2_IN, PC1_IN, PC0_IN,
+ PD7_IN, PD6_IN, PD5_IN, PD4_IN,
+ PD3_IN, PD2_IN, PD1_IN, PD0_IN,
+ PE7_IN, PE6_IN,
+ PF7_IN, PF6_IN, PF5_IN, PF4_IN,
+ PF3_IN, PF2_IN, PF1_IN, PF0_IN,
+ PG7_IN, PG6_IN, PG5_IN,
+ PH7_IN, PH6_IN, PH5_IN, PH4_IN,
+ PH3_IN, PH2_IN, PH1_IN, PH0_IN,
+ PJ7_IN, PJ6_IN, PJ5_IN, PJ4_IN,
+ PJ3_IN, PJ2_IN, PJ1_IN,
+ PINMUX_INPUT_END,
+
+ PINMUX_INPUT_PULLUP_BEGIN,
+ PA7_IN_PU, PA6_IN_PU, PA5_IN_PU, PA4_IN_PU,
+ PA3_IN_PU, PA2_IN_PU, PA1_IN_PU, PA0_IN_PU,
+ PB7_IN_PU, PB6_IN_PU, PB5_IN_PU, PB4_IN_PU,
+ PB3_IN_PU, PB2_IN_PU, PB1_IN_PU, PB0_IN_PU,
+ PC7_IN_PU, PC6_IN_PU, PC5_IN_PU, PC4_IN_PU,
+ PC3_IN_PU, PC2_IN_PU, PC1_IN_PU, PC0_IN_PU,
+ PD7_IN_PU, PD6_IN_PU, PD5_IN_PU, PD4_IN_PU,
+ PD3_IN_PU, PD2_IN_PU, PD1_IN_PU, PD0_IN_PU,
+ PE7_IN_PU, PE6_IN_PU,
+ PF7_IN_PU, PF6_IN_PU, PF5_IN_PU, PF4_IN_PU,
+ PF3_IN_PU, PF2_IN_PU, PF1_IN_PU, PF0_IN_PU,
+ PG7_IN_PU, PG6_IN_PU, PG5_IN_PU,
+ PH7_IN_PU, PH6_IN_PU, PH5_IN_PU, PH4_IN_PU,
+ PH3_IN_PU, PH2_IN_PU, PH1_IN_PU, PH0_IN_PU,
+ PJ7_IN_PU, PJ6_IN_PU, PJ5_IN_PU, PJ4_IN_PU,
+ PJ3_IN_PU, PJ2_IN_PU, PJ1_IN_PU,
+ PINMUX_INPUT_PULLUP_END,
+
+ PINMUX_OUTPUT_BEGIN,
+ PA7_OUT, PA6_OUT, PA5_OUT, PA4_OUT,
+ PA3_OUT, PA2_OUT, PA1_OUT, PA0_OUT,
+ PB7_OUT, PB6_OUT, PB5_OUT, PB4_OUT,
+ PB3_OUT, PB2_OUT, PB1_OUT, PB0_OUT,
+ PC7_OUT, PC6_OUT, PC5_OUT, PC4_OUT,
+ PC3_OUT, PC2_OUT, PC1_OUT, PC0_OUT,
+ PD7_OUT, PD6_OUT, PD5_OUT, PD4_OUT,
+ PD3_OUT, PD2_OUT, PD1_OUT, PD0_OUT,
+ PE7_OUT, PE6_OUT,
+ PF7_OUT, PF6_OUT, PF5_OUT, PF4_OUT,
+ PF3_OUT, PF2_OUT, PF1_OUT, PF0_OUT,
+ PG7_OUT, PG6_OUT, PG5_OUT,
+ PH7_OUT, PH6_OUT, PH5_OUT, PH4_OUT,
+ PH3_OUT, PH2_OUT, PH1_OUT, PH0_OUT,
+ PJ7_OUT, PJ6_OUT, PJ5_OUT, PJ4_OUT,
+ PJ3_OUT, PJ2_OUT, PJ1_OUT,
+ PINMUX_OUTPUT_END,
+
+ PINMUX_FUNCTION_BEGIN,
+ PA7_FN, PA6_FN, PA5_FN, PA4_FN,
+ PA3_FN, PA2_FN, PA1_FN, PA0_FN,
+ PB7_FN, PB6_FN, PB5_FN, PB4_FN,
+ PB3_FN, PB2_FN, PB1_FN, PB0_FN,
+ PC7_FN, PC6_FN, PC5_FN, PC4_FN,
+ PC3_FN, PC2_FN, PC1_FN, PC0_FN,
+ PD7_FN, PD6_FN, PD5_FN, PD4_FN,
+ PD3_FN, PD2_FN, PD1_FN, PD0_FN,
+ PE7_FN, PE6_FN,
+ PF7_FN, PF6_FN, PF5_FN, PF4_FN,
+ PF3_FN, PF2_FN, PF1_FN, PF0_FN,
+ PG7_FN, PG6_FN, PG5_FN,
+ PH7_FN, PH6_FN, PH5_FN, PH4_FN,
+ PH3_FN, PH2_FN, PH1_FN, PH0_FN,
+ PJ7_FN, PJ6_FN, PJ5_FN, PJ4_FN,
+ PJ3_FN, PJ2_FN, PJ1_FN,
+ P1MSEL14_0, P1MSEL14_1,
+ P1MSEL13_0, P1MSEL13_1,
+ P1MSEL12_0, P1MSEL12_1,
+ P1MSEL11_0, P1MSEL11_1,
+ P1MSEL10_0, P1MSEL10_1,
+ P1MSEL9_0, P1MSEL9_1,
+ P1MSEL8_0, P1MSEL8_1,
+ P1MSEL7_0, P1MSEL7_1,
+ P1MSEL6_0, P1MSEL6_1,
+ P1MSEL5_0, P1MSEL5_1,
+ P1MSEL4_0, P1MSEL4_1,
+ P1MSEL3_0, P1MSEL3_1,
+ P1MSEL2_0, P1MSEL2_1,
+ P1MSEL1_0, P1MSEL1_1,
+ P1MSEL0_0, P1MSEL0_1,
+
+ P2MSEL15_0, P2MSEL15_1,
+ P2MSEL14_0, P2MSEL14_1,
+ P2MSEL13_0, P2MSEL13_1,
+ P2MSEL12_0, P2MSEL12_1,
+ P2MSEL11_0, P2MSEL11_1,
+ P2MSEL10_0, P2MSEL10_1,
+ P2MSEL9_0, P2MSEL9_1,
+ P2MSEL8_0, P2MSEL8_1,
+ P2MSEL7_0, P2MSEL7_1,
+ P2MSEL6_0, P2MSEL6_1,
+ P2MSEL5_0, P2MSEL5_1,
+ P2MSEL4_0, P2MSEL4_1,
+ P2MSEL3_0, P2MSEL3_1,
+ P2MSEL2_0, P2MSEL2_1,
+ P2MSEL1_0, P2MSEL1_1,
+ P2MSEL0_0, P2MSEL0_1,
+ PINMUX_FUNCTION_END,
+
+ PINMUX_MARK_BEGIN,
+ DCLKIN_MARK, DCLKOUT_MARK, ODDF_MARK,
+ VSYNC_MARK, HSYNC_MARK, CDE_MARK, DISP_MARK,
+ DR0_MARK, DR1_MARK, DR2_MARK, DR3_MARK, DR4_MARK, DR5_MARK,
+ DG0_MARK, DG1_MARK, DG2_MARK, DG3_MARK, DG4_MARK, DG5_MARK,
+ DB0_MARK, DB1_MARK, DB2_MARK, DB3_MARK, DB4_MARK, DB5_MARK,
+ ETH_MAGIC_MARK, ETH_LINK_MARK, ETH_TX_ER_MARK, ETH_TX_EN_MARK,
+ ETH_MDIO_MARK, ETH_RX_CLK_MARK, ETH_MDC_MARK, ETH_COL_MARK,
+ ETH_TX_CLK_MARK, ETH_CRS_MARK, ETH_RX_DV_MARK, ETH_RX_ER_MARK,
+ ETH_TXD3_MARK, ETH_TXD2_MARK, ETH_TXD1_MARK, ETH_TXD0_MARK,
+ ETH_RXD3_MARK, ETH_RXD2_MARK, ETH_RXD1_MARK, ETH_RXD0_MARK,
+ HSPI_CLK_MARK, HSPI_CS_MARK, HSPI_RX_MARK, HSPI_TX_MARK,
+ SCIF0_CTS_MARK, SCIF0_RTS_MARK,
+ SCIF0_SCK_MARK, SCIF0_RXD_MARK, SCIF0_TXD_MARK,
+ SCIF1_SCK_MARK, SCIF1_RXD_MARK, SCIF1_TXD_MARK,
+ SCIF3_SCK_MARK, SCIF3_RXD_MARK, SCIF3_TXD_MARK,
+ SCIF4_SCK_MARK, SCIF4_RXD_MARK, SCIF4_TXD_MARK,
+ SCIF5_SCK_MARK, SCIF5_RXD_MARK, SCIF5_TXD_MARK,
+ BREQ_MARK, IOIS16_MARK, CE2B_MARK, CE2A_MARK, BACK_MARK,
+ FALE_MARK, FRB_MARK, FSTATUS_MARK,
+ FSE_MARK, FCLE_MARK,
+ DACK0_MARK, DACK1_MARK, DACK2_MARK, DACK3_MARK,
+ DREQ0_MARK, DREQ1_MARK, DREQ2_MARK, DREQ3_MARK,
+ DRAK0_MARK, DRAK1_MARK, DRAK2_MARK, DRAK3_MARK,
+ USB_OVC1_MARK, USB_OVC0_MARK,
+ USB_PENC1_MARK, USB_PENC0_MARK,
+ HAC_RES_MARK,
+ HAC1_SDOUT_MARK, HAC1_SDIN_MARK, HAC1_SYNC_MARK, HAC1_BITCLK_MARK,
+ HAC0_SDOUT_MARK, HAC0_SDIN_MARK, HAC0_SYNC_MARK, HAC0_BITCLK_MARK,
+ SSI0_SDATA_MARK, SSI0_SCK_MARK, SSI0_WS_MARK, SSI0_CLK_MARK,
+ SSI1_SDATA_MARK, SSI1_SCK_MARK, SSI1_WS_MARK, SSI1_CLK_MARK,
+ SSI2_SDATA_MARK, SSI2_SCK_MARK, SSI2_WS_MARK,
+ SSI3_SDATA_MARK, SSI3_SCK_MARK, SSI3_WS_MARK,
+ SDIF1CMD_MARK, SDIF1CD_MARK, SDIF1WP_MARK, SDIF1CLK_MARK,
+ SDIF1D3_MARK, SDIF1D2_MARK, SDIF1D1_MARK, SDIF1D0_MARK,
+ SDIF0CMD_MARK, SDIF0CD_MARK, SDIF0WP_MARK, SDIF0CLK_MARK,
+ SDIF0D3_MARK, SDIF0D2_MARK, SDIF0D1_MARK, SDIF0D0_MARK,
+ TCLK_MARK,
+ IRL7_MARK, IRL6_MARK, IRL5_MARK, IRL4_MARK,
+ PINMUX_MARK_END,
+};
+
+static pinmux_enum_t pinmux_data[] = {
+
+ /* PA GPIO */
+ PINMUX_DATA(PA7_DATA, PA7_IN, PA7_OUT, PA7_IN_PU),
+ PINMUX_DATA(PA6_DATA, PA6_IN, PA6_OUT, PA6_IN_PU),
+ PINMUX_DATA(PA5_DATA, PA5_IN, PA5_OUT, PA5_IN_PU),
+ PINMUX_DATA(PA4_DATA, PA4_IN, PA4_OUT, PA4_IN_PU),
+ PINMUX_DATA(PA3_DATA, PA3_IN, PA3_OUT, PA3_IN_PU),
+ PINMUX_DATA(PA2_DATA, PA2_IN, PA2_OUT, PA2_IN_PU),
+ PINMUX_DATA(PA1_DATA, PA1_IN, PA1_OUT, PA1_IN_PU),
+ PINMUX_DATA(PA0_DATA, PA0_IN, PA0_OUT, PA0_IN_PU),
+
+ /* PB GPIO */
+ PINMUX_DATA(PB7_DATA, PB7_IN, PB7_OUT, PB7_IN_PU),
+ PINMUX_DATA(PB6_DATA, PB6_IN, PB6_OUT, PB6_IN_PU),
+ PINMUX_DATA(PB5_DATA, PB5_IN, PB5_OUT, PB5_IN_PU),
+ PINMUX_DATA(PB4_DATA, PB4_IN, PB4_OUT, PB4_IN_PU),
+ PINMUX_DATA(PB3_DATA, PB3_IN, PB3_OUT, PB3_IN_PU),
+ PINMUX_DATA(PB2_DATA, PB2_IN, PB2_OUT, PB2_IN_PU),
+ PINMUX_DATA(PB1_DATA, PB1_IN, PB1_OUT, PB1_IN_PU),
+ PINMUX_DATA(PB0_DATA, PB0_IN, PB0_OUT, PB0_IN_PU),
+
+ /* PC GPIO */
+ PINMUX_DATA(PC7_DATA, PC7_IN, PC7_OUT, PC7_IN_PU),
+ PINMUX_DATA(PC6_DATA, PC6_IN, PC6_OUT, PC6_IN_PU),
+ PINMUX_DATA(PC5_DATA, PC5_IN, PC5_OUT, PC5_IN_PU),
+ PINMUX_DATA(PC4_DATA, PC4_IN, PC4_OUT, PC4_IN_PU),
+ PINMUX_DATA(PC3_DATA, PC3_IN, PC3_OUT, PC3_IN_PU),
+ PINMUX_DATA(PC2_DATA, PC2_IN, PC2_OUT, PC2_IN_PU),
+ PINMUX_DATA(PC1_DATA, PC1_IN, PC1_OUT, PC1_IN_PU),
+ PINMUX_DATA(PC0_DATA, PC0_IN, PC0_OUT, PC0_IN_PU),
+
+ /* PD GPIO */
+ PINMUX_DATA(PD7_DATA, PD7_IN, PD7_OUT, PD7_IN_PU),
+ PINMUX_DATA(PD6_DATA, PD6_IN, PD6_OUT, PD6_IN_PU),
+ PINMUX_DATA(PD5_DATA, PD5_IN, PD5_OUT, PD5_IN_PU),
+ PINMUX_DATA(PD4_DATA, PD4_IN, PD4_OUT, PD4_IN_PU),
+ PINMUX_DATA(PD3_DATA, PD3_IN, PD3_OUT, PD3_IN_PU),
+ PINMUX_DATA(PD2_DATA, PD2_IN, PD2_OUT, PD2_IN_PU),
+ PINMUX_DATA(PD1_DATA, PD1_IN, PD1_OUT, PD1_IN_PU),
+ PINMUX_DATA(PD0_DATA, PD0_IN, PD0_OUT, PD0_IN_PU),
+
+ /* PE GPIO */
+ PINMUX_DATA(PE7_DATA, PE7_IN, PE7_OUT, PE7_IN_PU),
+ PINMUX_DATA(PE6_DATA, PE6_IN, PE6_OUT, PE6_IN_PU),
+
+ /* PF GPIO */
+ PINMUX_DATA(PF7_DATA, PF7_IN, PF7_OUT, PF7_IN_PU),
+ PINMUX_DATA(PF6_DATA, PF6_IN, PF6_OUT, PF6_IN_PU),
+ PINMUX_DATA(PF5_DATA, PF5_IN, PF5_OUT, PF5_IN_PU),
+ PINMUX_DATA(PF4_DATA, PF4_IN, PF4_OUT, PF4_IN_PU),
+ PINMUX_DATA(PF3_DATA, PF3_IN, PF3_OUT, PF3_IN_PU),
+ PINMUX_DATA(PF2_DATA, PF2_IN, PF2_OUT, PF2_IN_PU),
+ PINMUX_DATA(PF1_DATA, PF1_IN, PF1_OUT, PF1_IN_PU),
+ PINMUX_DATA(PF0_DATA, PF0_IN, PF0_OUT, PF0_IN_PU),
+
+ /* PG GPIO */
+ PINMUX_DATA(PG7_DATA, PG7_IN, PG7_OUT, PG7_IN_PU),
+ PINMUX_DATA(PG6_DATA, PG6_IN, PG6_OUT, PG6_IN_PU),
+ PINMUX_DATA(PG5_DATA, PG5_IN, PG5_OUT, PG5_IN_PU),
+
+ /* PH GPIO */
+ PINMUX_DATA(PH7_DATA, PH7_IN, PH7_OUT, PH7_IN_PU),
+ PINMUX_DATA(PH6_DATA, PH6_IN, PH6_OUT, PH6_IN_PU),
+ PINMUX_DATA(PH5_DATA, PH5_IN, PH5_OUT, PH5_IN_PU),
+ PINMUX_DATA(PH4_DATA, PH4_IN, PH4_OUT, PH4_IN_PU),
+ PINMUX_DATA(PH3_DATA, PH3_IN, PH3_OUT, PH3_IN_PU),
+ PINMUX_DATA(PH2_DATA, PH2_IN, PH2_OUT, PH2_IN_PU),
+ PINMUX_DATA(PH1_DATA, PH1_IN, PH1_OUT, PH1_IN_PU),
+ PINMUX_DATA(PH0_DATA, PH0_IN, PH0_OUT, PH0_IN_PU),
+
+ /* PJ GPIO */
+ PINMUX_DATA(PJ7_DATA, PJ7_IN, PJ7_OUT, PJ7_IN_PU),
+ PINMUX_DATA(PJ6_DATA, PJ6_IN, PJ6_OUT, PJ6_IN_PU),
+ PINMUX_DATA(PJ5_DATA, PJ5_IN, PJ5_OUT, PJ5_IN_PU),
+ PINMUX_DATA(PJ4_DATA, PJ4_IN, PJ4_OUT, PJ4_IN_PU),
+ PINMUX_DATA(PJ3_DATA, PJ3_IN, PJ3_OUT, PJ3_IN_PU),
+ PINMUX_DATA(PJ2_DATA, PJ2_IN, PJ2_OUT, PJ2_IN_PU),
+ PINMUX_DATA(PJ1_DATA, PJ1_IN, PJ1_OUT, PJ1_IN_PU),
+
+ /* PA FN */
+ PINMUX_DATA(CDE_MARK, P1MSEL2_0, PA7_FN),
+ PINMUX_DATA(DISP_MARK, P1MSEL2_0, PA6_FN),
+ PINMUX_DATA(DR5_MARK, P1MSEL2_0, PA5_FN),
+ PINMUX_DATA(DR4_MARK, P1MSEL2_0, PA4_FN),
+ PINMUX_DATA(DR3_MARK, P1MSEL2_0, PA3_FN),
+ PINMUX_DATA(DR2_MARK, P1MSEL2_0, PA2_FN),
+ PINMUX_DATA(DR1_MARK, P1MSEL2_0, PA1_FN),
+ PINMUX_DATA(DR0_MARK, P1MSEL2_0, PA0_FN),
+ PINMUX_DATA(ETH_MAGIC_MARK, P1MSEL2_1, PA7_FN),
+ PINMUX_DATA(ETH_LINK_MARK, P1MSEL2_1, PA6_FN),
+ PINMUX_DATA(ETH_TX_ER_MARK, P1MSEL2_1, PA5_FN),
+ PINMUX_DATA(ETH_TX_EN_MARK, P1MSEL2_1, PA4_FN),
+ PINMUX_DATA(ETH_TXD3_MARK, P1MSEL2_1, PA3_FN),
+ PINMUX_DATA(ETH_TXD2_MARK, P1MSEL2_1, PA2_FN),
+ PINMUX_DATA(ETH_TXD1_MARK, P1MSEL2_1, PA1_FN),
+ PINMUX_DATA(ETH_TXD0_MARK, P1MSEL2_1, PA0_FN),
+
+ /* PB FN */
+ PINMUX_DATA(VSYNC_MARK, P1MSEL3_0, PB7_FN),
+ PINMUX_DATA(ODDF_MARK, P1MSEL3_0, PB6_FN),
+ PINMUX_DATA(DG5_MARK, P1MSEL2_0, PB5_FN),
+ PINMUX_DATA(DG4_MARK, P1MSEL2_0, PB4_FN),
+ PINMUX_DATA(DG3_MARK, P1MSEL2_0, PB3_FN),
+ PINMUX_DATA(DG2_MARK, P1MSEL2_0, PB2_FN),
+ PINMUX_DATA(DG1_MARK, P1MSEL2_0, PB1_FN),
+ PINMUX_DATA(DG0_MARK, P1MSEL2_0, PB0_FN),
+ PINMUX_DATA(HSPI_CLK_MARK, P1MSEL3_1, PB7_FN),
+ PINMUX_DATA(HSPI_CS_MARK, P1MSEL3_1, PB6_FN),
+ PINMUX_DATA(ETH_MDIO_MARK, P1MSEL2_1, PB5_FN),
+ PINMUX_DATA(ETH_RX_CLK_MARK, P1MSEL2_1, PB4_FN),
+ PINMUX_DATA(ETH_MDC_MARK, P1MSEL2_1, PB3_FN),
+ PINMUX_DATA(ETH_COL_MARK, P1MSEL2_1, PB2_FN),
+ PINMUX_DATA(ETH_TX_CLK_MARK, P1MSEL2_1, PB1_FN),
+ PINMUX_DATA(ETH_CRS_MARK, P1MSEL2_1, PB0_FN),
+
+ /* PC FN */
+ PINMUX_DATA(DCLKIN_MARK, P1MSEL3_0, PC7_FN),
+ PINMUX_DATA(HSYNC_MARK, P1MSEL3_0, PC6_FN),
+ PINMUX_DATA(DB5_MARK, P1MSEL2_0, PC5_FN),
+ PINMUX_DATA(DB4_MARK, P1MSEL2_0, PC4_FN),
+ PINMUX_DATA(DB3_MARK, P1MSEL2_0, PC3_FN),
+ PINMUX_DATA(DB2_MARK, P1MSEL2_0, PC2_FN),
+ PINMUX_DATA(DB1_MARK, P1MSEL2_0, PC1_FN),
+ PINMUX_DATA(DB0_MARK, P1MSEL2_0, PC0_FN),
+
+ PINMUX_DATA(HSPI_RX_MARK, P1MSEL3_1, PC7_FN),
+ PINMUX_DATA(HSPI_TX_MARK, P1MSEL3_1, PC6_FN),
+ PINMUX_DATA(ETH_RXD3_MARK, P1MSEL2_1, PC5_FN),
+ PINMUX_DATA(ETH_RXD2_MARK, P1MSEL2_1, PC4_FN),
+ PINMUX_DATA(ETH_RXD1_MARK, P1MSEL2_1, PC3_FN),
+ PINMUX_DATA(ETH_RXD0_MARK, P1MSEL2_1, PC2_FN),
+ PINMUX_DATA(ETH_RX_DV_MARK, P1MSEL2_1, PC1_FN),
+ PINMUX_DATA(ETH_RX_ER_MARK, P1MSEL2_1, PC0_FN),
+
+ /* PD FN */
+ PINMUX_DATA(DCLKOUT_MARK, PD7_FN),
+ PINMUX_DATA(SCIF1_SCK_MARK, PD6_FN),
+ PINMUX_DATA(SCIF1_RXD_MARK, PD5_FN),
+ PINMUX_DATA(SCIF1_TXD_MARK, PD4_FN),
+ PINMUX_DATA(DACK1_MARK, P1MSEL13_1, P1MSEL12_0, PD3_FN),
+ PINMUX_DATA(BACK_MARK, P1MSEL13_0, P1MSEL12_1, PD3_FN),
+ PINMUX_DATA(FALE_MARK, P1MSEL13_0, P1MSEL12_0, PD3_FN),
+ PINMUX_DATA(DACK0_MARK, P1MSEL14_1, PD2_FN),
+ PINMUX_DATA(FCLE_MARK, P1MSEL14_0, PD2_FN),
+ PINMUX_DATA(DREQ1_MARK, P1MSEL10_0, P1MSEL9_1, PD1_FN),
+ PINMUX_DATA(BREQ_MARK, P1MSEL10_1, P1MSEL9_0, PD1_FN),
+ PINMUX_DATA(USB_OVC1_MARK, P1MSEL10_0, P1MSEL9_0, PD1_FN),
+ PINMUX_DATA(DREQ0_MARK, P1MSEL11_1, PD0_FN),
+ PINMUX_DATA(USB_OVC0_MARK, P1MSEL11_0, PD0_FN),
+
+ /* PE FN */
+ PINMUX_DATA(USB_PENC1_MARK, PE7_FN),
+ PINMUX_DATA(USB_PENC0_MARK, PE6_FN),
+
+ /* PF FN */
+ PINMUX_DATA(HAC1_SDOUT_MARK, P2MSEL15_0, P2MSEL14_0, PF7_FN),
+ PINMUX_DATA(HAC1_SDIN_MARK, P2MSEL15_0, P2MSEL14_0, PF6_FN),
+ PINMUX_DATA(HAC1_SYNC_MARK, P2MSEL15_0, P2MSEL14_0, PF5_FN),
+ PINMUX_DATA(HAC1_BITCLK_MARK, P2MSEL15_0, P2MSEL14_0, PF4_FN),
+ PINMUX_DATA(HAC0_SDOUT_MARK, P2MSEL13_0, P2MSEL12_0, PF3_FN),
+ PINMUX_DATA(HAC0_SDIN_MARK, P2MSEL13_0, P2MSEL12_0, PF2_FN),
+ PINMUX_DATA(HAC0_SYNC_MARK, P2MSEL13_0, P2MSEL12_0, PF1_FN),
+ PINMUX_DATA(HAC0_BITCLK_MARK, P2MSEL13_0, P2MSEL12_0, PF0_FN),
+ PINMUX_DATA(SSI1_SDATA_MARK, P2MSEL15_0, P2MSEL14_1, PF7_FN),
+ PINMUX_DATA(SSI1_SCK_MARK, P2MSEL15_0, P2MSEL14_1, PF6_FN),
+ PINMUX_DATA(SSI1_WS_MARK, P2MSEL15_0, P2MSEL14_1, PF5_FN),
+ PINMUX_DATA(SSI1_CLK_MARK, P2MSEL15_0, P2MSEL14_1, PF4_FN),
+ PINMUX_DATA(SSI0_SDATA_MARK, P2MSEL13_0, P2MSEL12_1, PF3_FN),
+ PINMUX_DATA(SSI0_SCK_MARK, P2MSEL13_0, P2MSEL12_1, PF2_FN),
+ PINMUX_DATA(SSI0_WS_MARK, P2MSEL13_0, P2MSEL12_1, PF1_FN),
+ PINMUX_DATA(SSI0_CLK_MARK, P2MSEL13_0, P2MSEL12_1, PF0_FN),
+ PINMUX_DATA(SDIF1CMD_MARK, P2MSEL15_1, P2MSEL14_0, PF7_FN),
+ PINMUX_DATA(SDIF1CD_MARK, P2MSEL15_1, P2MSEL14_0, PF6_FN),
+ PINMUX_DATA(SDIF1WP_MARK, P2MSEL15_1, P2MSEL14_0, PF5_FN),
+ PINMUX_DATA(SDIF1CLK_MARK, P2MSEL15_1, P2MSEL14_0, PF4_FN),
+ PINMUX_DATA(SDIF1D3_MARK, P2MSEL13_1, P2MSEL12_0, PF3_FN),
+ PINMUX_DATA(SDIF1D2_MARK, P2MSEL13_1, P2MSEL12_0, PF2_FN),
+ PINMUX_DATA(SDIF1D1_MARK, P2MSEL13_1, P2MSEL12_0, PF1_FN),
+ PINMUX_DATA(SDIF1D0_MARK, P2MSEL13_1, P2MSEL12_0, PF0_FN),
+
+ /* PG FN */
+ PINMUX_DATA(SCIF3_SCK_MARK, P1MSEL8_0, PG7_FN),
+ PINMUX_DATA(SSI2_SDATA_MARK, P1MSEL8_1, PG7_FN),
+ PINMUX_DATA(SCIF3_RXD_MARK, P1MSEL7_0, P1MSEL6_0, PG6_FN),
+ PINMUX_DATA(SSI2_SCK_MARK, P1MSEL7_1, P1MSEL6_0, PG6_FN),
+ PINMUX_DATA(TCLK_MARK, P1MSEL7_0, P1MSEL6_1, PG6_FN),
+ PINMUX_DATA(SCIF3_TXD_MARK, P1MSEL5_0, P1MSEL4_0, PG5_FN),
+ PINMUX_DATA(SSI2_WS_MARK, P1MSEL5_1, P1MSEL4_0, PG5_FN),
+ PINMUX_DATA(HAC_RES_MARK, P1MSEL5_0, P1MSEL4_1, PG5_FN),
+
+ /* PH FN */
+ PINMUX_DATA(DACK3_MARK, P2MSEL4_0, PH7_FN),
+ PINMUX_DATA(SDIF0CMD_MARK, P2MSEL4_1, PH7_FN),
+ PINMUX_DATA(DACK2_MARK, P2MSEL4_0, PH6_FN),
+ PINMUX_DATA(SDIF0CD_MARK, P2MSEL4_1, PH6_FN),
+ PINMUX_DATA(DREQ3_MARK, P2MSEL4_0, PH5_FN),
+ PINMUX_DATA(SDIF0WP_MARK, P2MSEL4_1, PH5_FN),
+ PINMUX_DATA(DREQ2_MARK, P2MSEL3_0, P2MSEL2_1, PH4_FN),
+ PINMUX_DATA(SDIF0CLK_MARK, P2MSEL3_1, P2MSEL2_0, PH4_FN),
+ PINMUX_DATA(SCIF0_CTS_MARK, P2MSEL3_0, P2MSEL2_0, PH4_FN),
+ PINMUX_DATA(SDIF0D3_MARK, P2MSEL1_1, P2MSEL0_0, PH3_FN),
+ PINMUX_DATA(SCIF0_RTS_MARK, P2MSEL1_0, P2MSEL0_0, PH3_FN),
+ PINMUX_DATA(IRL7_MARK, P2MSEL1_0, P2MSEL0_1, PH3_FN),
+ PINMUX_DATA(SDIF0D2_MARK, P2MSEL1_1, P2MSEL0_0, PH2_FN),
+ PINMUX_DATA(SCIF0_SCK_MARK, P2MSEL1_0, P2MSEL0_0, PH2_FN),
+ PINMUX_DATA(IRL6_MARK, P2MSEL1_0, P2MSEL0_1, PH2_FN),
+ PINMUX_DATA(SDIF0D1_MARK, P2MSEL1_1, P2MSEL0_0, PH1_FN),
+ PINMUX_DATA(SCIF0_RXD_MARK, P2MSEL1_0, P2MSEL0_0, PH1_FN),
+ PINMUX_DATA(IRL5_MARK, P2MSEL1_0, P2MSEL0_1, PH1_FN),
+ PINMUX_DATA(SDIF0D0_MARK, P2MSEL1_1, P2MSEL0_0, PH0_FN),
+ PINMUX_DATA(SCIF0_TXD_MARK, P2MSEL1_0, P2MSEL0_0, PH0_FN),
+ PINMUX_DATA(IRL4_MARK, P2MSEL1_0, P2MSEL0_1, PH0_FN),
+
+ /* PJ FN */
+ PINMUX_DATA(SCIF5_SCK_MARK, P2MSEL11_1, PJ7_FN),
+ PINMUX_DATA(FRB_MARK, P2MSEL11_0, PJ7_FN),
+ PINMUX_DATA(SCIF5_RXD_MARK, P2MSEL10_0, PJ6_FN),
+ PINMUX_DATA(IOIS16_MARK, P2MSEL10_1, PJ6_FN),
+ PINMUX_DATA(SCIF5_TXD_MARK, P2MSEL10_0, PJ5_FN),
+ PINMUX_DATA(CE2B_MARK, P2MSEL10_1, PJ5_FN),
+ PINMUX_DATA(DRAK3_MARK, P2MSEL7_0, PJ4_FN),
+ PINMUX_DATA(CE2A_MARK, P2MSEL7_1, PJ4_FN),
+ PINMUX_DATA(SCIF4_SCK_MARK, P2MSEL9_0, P2MSEL8_0, PJ3_FN),
+ PINMUX_DATA(DRAK2_MARK, P2MSEL9_0, P2MSEL8_1, PJ3_FN),
+ PINMUX_DATA(SSI3_WS_MARK, P2MSEL9_1, P2MSEL8_0, PJ3_FN),
+ PINMUX_DATA(SCIF4_RXD_MARK, P2MSEL6_1, P2MSEL5_0, PJ2_FN),
+ PINMUX_DATA(DRAK1_MARK, P2MSEL6_0, P2MSEL5_1, PJ2_FN),
+ PINMUX_DATA(FSTATUS_MARK, P2MSEL6_0, P2MSEL5_0, PJ2_FN),
+ PINMUX_DATA(SSI3_SDATA_MARK, P2MSEL6_1, P2MSEL5_1, PJ2_FN),
+ PINMUX_DATA(SCIF4_TXD_MARK, P2MSEL6_1, P2MSEL5_0, PJ1_FN),
+ PINMUX_DATA(DRAK0_MARK, P2MSEL6_0, P2MSEL5_1, PJ1_FN),
+ PINMUX_DATA(FSE_MARK, P2MSEL6_0, P2MSEL5_0, PJ1_FN),
+ PINMUX_DATA(SSI3_SCK_MARK, P2MSEL6_1, P2MSEL5_1, PJ1_FN),
+};
+
+static struct pinmux_gpio pinmux_gpios[] = {
+ /* PA */
+ PINMUX_GPIO(GPIO_PA7, PA7_DATA),
+ PINMUX_GPIO(GPIO_PA6, PA6_DATA),
+ PINMUX_GPIO(GPIO_PA5, PA5_DATA),
+ PINMUX_GPIO(GPIO_PA4, PA4_DATA),
+ PINMUX_GPIO(GPIO_PA3, PA3_DATA),
+ PINMUX_GPIO(GPIO_PA2, PA2_DATA),
+ PINMUX_GPIO(GPIO_PA1, PA1_DATA),
+ PINMUX_GPIO(GPIO_PA0, PA0_DATA),
+
+ /* PB */
+ PINMUX_GPIO(GPIO_PB7, PB7_DATA),
+ PINMUX_GPIO(GPIO_PB6, PB6_DATA),
+ PINMUX_GPIO(GPIO_PB5, PB5_DATA),
+ PINMUX_GPIO(GPIO_PB4, PB4_DATA),
+ PINMUX_GPIO(GPIO_PB3, PB3_DATA),
+ PINMUX_GPIO(GPIO_PB2, PB2_DATA),
+ PINMUX_GPIO(GPIO_PB1, PB1_DATA),
+ PINMUX_GPIO(GPIO_PB0, PB0_DATA),
+
+ /* PC */
+ PINMUX_GPIO(GPIO_PC7, PC7_DATA),
+ PINMUX_GPIO(GPIO_PC6, PC6_DATA),
+ PINMUX_GPIO(GPIO_PC5, PC5_DATA),
+ PINMUX_GPIO(GPIO_PC4, PC4_DATA),
+ PINMUX_GPIO(GPIO_PC3, PC3_DATA),
+ PINMUX_GPIO(GPIO_PC2, PC2_DATA),
+ PINMUX_GPIO(GPIO_PC1, PC1_DATA),
+ PINMUX_GPIO(GPIO_PC0, PC0_DATA),
+
+ /* PD */
+ PINMUX_GPIO(GPIO_PD7, PD7_DATA),
+ PINMUX_GPIO(GPIO_PD6, PD6_DATA),
+ PINMUX_GPIO(GPIO_PD5, PD5_DATA),
+ PINMUX_GPIO(GPIO_PD4, PD4_DATA),
+ PINMUX_GPIO(GPIO_PD3, PD3_DATA),
+ PINMUX_GPIO(GPIO_PD2, PD2_DATA),
+ PINMUX_GPIO(GPIO_PD1, PD1_DATA),
+ PINMUX_GPIO(GPIO_PD0, PD0_DATA),
+
+ /* PE */
+ PINMUX_GPIO(GPIO_PE7, PE7_DATA),
+ PINMUX_GPIO(GPIO_PE6, PE6_DATA),
+
+ /* PF */
+ PINMUX_GPIO(GPIO_PF7, PF7_DATA),
+ PINMUX_GPIO(GPIO_PF6, PF6_DATA),
+ PINMUX_GPIO(GPIO_PF5, PF5_DATA),
+ PINMUX_GPIO(GPIO_PF4, PF4_DATA),
+ PINMUX_GPIO(GPIO_PF3, PF3_DATA),
+ PINMUX_GPIO(GPIO_PF2, PF2_DATA),
+ PINMUX_GPIO(GPIO_PF1, PF1_DATA),
+ PINMUX_GPIO(GPIO_PF0, PF0_DATA),
+
+ /* PG */
+ PINMUX_GPIO(GPIO_PG7, PG7_DATA),
+ PINMUX_GPIO(GPIO_PG6, PG6_DATA),
+ PINMUX_GPIO(GPIO_PG5, PG5_DATA),
+
+ /* PH */
+ PINMUX_GPIO(GPIO_PH7, PH7_DATA),
+ PINMUX_GPIO(GPIO_PH6, PH6_DATA),
+ PINMUX_GPIO(GPIO_PH5, PH5_DATA),
+ PINMUX_GPIO(GPIO_PH4, PH4_DATA),
+ PINMUX_GPIO(GPIO_PH3, PH3_DATA),
+ PINMUX_GPIO(GPIO_PH2, PH2_DATA),
+ PINMUX_GPIO(GPIO_PH1, PH1_DATA),
+ PINMUX_GPIO(GPIO_PH0, PH0_DATA),
+
+ /* PJ */
+ PINMUX_GPIO(GPIO_PJ7, PJ7_DATA),
+ PINMUX_GPIO(GPIO_PJ6, PJ6_DATA),
+ PINMUX_GPIO(GPIO_PJ5, PJ5_DATA),
+ PINMUX_GPIO(GPIO_PJ4, PJ4_DATA),
+ PINMUX_GPIO(GPIO_PJ3, PJ3_DATA),
+ PINMUX_GPIO(GPIO_PJ2, PJ2_DATA),
+ PINMUX_GPIO(GPIO_PJ1, PJ1_DATA),
+
+ /* FN */
+ PINMUX_GPIO(GPIO_FN_CDE, CDE_MARK),
+ PINMUX_GPIO(GPIO_FN_ETH_MAGIC, ETH_MAGIC_MARK),
+ PINMUX_GPIO(GPIO_FN_DISP, DISP_MARK),
+ PINMUX_GPIO(GPIO_FN_ETH_LINK, ETH_LINK_MARK),
+ PINMUX_GPIO(GPIO_FN_DR5, DR5_MARK),
+ PINMUX_GPIO(GPIO_FN_ETH_TX_ER, ETH_TX_ER_MARK),
+ PINMUX_GPIO(GPIO_FN_DR4, DR4_MARK),
+ PINMUX_GPIO(GPIO_FN_ETH_TX_EN, ETH_TX_EN_MARK),
+ PINMUX_GPIO(GPIO_FN_DR3, DR3_MARK),
+ PINMUX_GPIO(GPIO_FN_ETH_TXD3, ETH_TXD3_MARK),
+ PINMUX_GPIO(GPIO_FN_DR2, DR2_MARK),
+ PINMUX_GPIO(GPIO_FN_ETH_TXD2, ETH_TXD2_MARK),
+ PINMUX_GPIO(GPIO_FN_DR1, DR1_MARK),
+ PINMUX_GPIO(GPIO_FN_ETH_TXD1, ETH_TXD1_MARK),
+ PINMUX_GPIO(GPIO_FN_DR0, DR0_MARK),
+ PINMUX_GPIO(GPIO_FN_ETH_TXD0, ETH_TXD0_MARK),
+ PINMUX_GPIO(GPIO_FN_VSYNC, VSYNC_MARK),
+ PINMUX_GPIO(GPIO_FN_HSPI_CLK, HSPI_CLK_MARK),
+ PINMUX_GPIO(GPIO_FN_ODDF, ODDF_MARK),
+ PINMUX_GPIO(GPIO_FN_HSPI_CS, HSPI_CS_MARK),
+ PINMUX_GPIO(GPIO_FN_DG5, DG5_MARK),
+ PINMUX_GPIO(GPIO_FN_ETH_MDIO, ETH_MDIO_MARK),
+ PINMUX_GPIO(GPIO_FN_DG4, DG4_MARK),
+ PINMUX_GPIO(GPIO_FN_ETH_RX_CLK, ETH_RX_CLK_MARK),
+ PINMUX_GPIO(GPIO_FN_DG3, DG3_MARK),
+ PINMUX_GPIO(GPIO_FN_ETH_MDC, ETH_MDC_MARK),
+ PINMUX_GPIO(GPIO_FN_DG2, DG2_MARK),
+ PINMUX_GPIO(GPIO_FN_ETH_COL, ETH_COL_MARK),
+ PINMUX_GPIO(GPIO_FN_DG1, DG1_MARK),
+ PINMUX_GPIO(GPIO_FN_ETH_TX_CLK, ETH_TX_CLK_MARK),
+ PINMUX_GPIO(GPIO_FN_DG0, DG0_MARK),
+ PINMUX_GPIO(GPIO_FN_ETH_CRS, ETH_CRS_MARK),
+ PINMUX_GPIO(GPIO_FN_DCLKIN, DCLKIN_MARK),
+ PINMUX_GPIO(GPIO_FN_HSPI_RX, HSPI_RX_MARK),
+ PINMUX_GPIO(GPIO_FN_HSYNC, HSYNC_MARK),
+ PINMUX_GPIO(GPIO_FN_HSPI_TX, HSPI_TX_MARK),
+ PINMUX_GPIO(GPIO_FN_DB5, DB5_MARK),
+ PINMUX_GPIO(GPIO_FN_ETH_RXD3, ETH_RXD3_MARK),
+ PINMUX_GPIO(GPIO_FN_DB4, DB4_MARK),
+ PINMUX_GPIO(GPIO_FN_ETH_RXD2, ETH_RXD2_MARK),
+ PINMUX_GPIO(GPIO_FN_DB3, DB3_MARK),
+ PINMUX_GPIO(GPIO_FN_ETH_RXD1, ETH_RXD1_MARK),
+ PINMUX_GPIO(GPIO_FN_DB2, DB2_MARK),
+ PINMUX_GPIO(GPIO_FN_ETH_RXD0, ETH_RXD0_MARK),
+ PINMUX_GPIO(GPIO_FN_DB1, DB1_MARK),
+ PINMUX_GPIO(GPIO_FN_ETH_RX_DV, ETH_RX_DV_MARK),
+ PINMUX_GPIO(GPIO_FN_DB0, DB0_MARK),
+ PINMUX_GPIO(GPIO_FN_ETH_RX_ER, ETH_RX_ER_MARK),
+ PINMUX_GPIO(GPIO_FN_DCLKOUT, DCLKOUT_MARK),
+ PINMUX_GPIO(GPIO_FN_SCIF1_SCK, SCIF1_SCK_MARK),
+ PINMUX_GPIO(GPIO_FN_SCIF1_RXD, SCIF1_RXD_MARK),
+ PINMUX_GPIO(GPIO_FN_SCIF1_TXD, SCIF1_TXD_MARK),
+ PINMUX_GPIO(GPIO_FN_DACK1, DACK1_MARK),
+ PINMUX_GPIO(GPIO_FN_BACK, BACK_MARK),
+ PINMUX_GPIO(GPIO_FN_FALE, FALE_MARK),
+ PINMUX_GPIO(GPIO_FN_DACK0, DACK0_MARK),
+ PINMUX_GPIO(GPIO_FN_FCLE, FCLE_MARK),
+ PINMUX_GPIO(GPIO_FN_DREQ1, DREQ1_MARK),
+ PINMUX_GPIO(GPIO_FN_BREQ, BREQ_MARK),
+ PINMUX_GPIO(GPIO_FN_USB_OVC1, USB_OVC1_MARK),
+ PINMUX_GPIO(GPIO_FN_DREQ0, DREQ0_MARK),
+ PINMUX_GPIO(GPIO_FN_USB_OVC0, USB_OVC0_MARK),
+ PINMUX_GPIO(GPIO_FN_USB_PENC1, USB_PENC1_MARK),
+ PINMUX_GPIO(GPIO_FN_USB_PENC0, USB_PENC0_MARK),
+ PINMUX_GPIO(GPIO_FN_HAC1_SDOUT, HAC1_SDOUT_MARK),
+ PINMUX_GPIO(GPIO_FN_SSI1_SDATA, SSI1_SDATA_MARK),
+ PINMUX_GPIO(GPIO_FN_SDIF1CMD, SDIF1CMD_MARK),
+ PINMUX_GPIO(GPIO_FN_HAC1_SDIN, HAC1_SDIN_MARK),
+ PINMUX_GPIO(GPIO_FN_SSI1_SCK, SSI1_SCK_MARK),
+ PINMUX_GPIO(GPIO_FN_SDIF1CD, SDIF1CD_MARK),
+ PINMUX_GPIO(GPIO_FN_HAC1_SYNC, HAC1_SYNC_MARK),
+ PINMUX_GPIO(GPIO_FN_SSI1_WS, SSI1_WS_MARK),
+ PINMUX_GPIO(GPIO_FN_SDIF1WP, SDIF1WP_MARK),
+ PINMUX_GPIO(GPIO_FN_HAC1_BITCLK, HAC1_BITCLK_MARK),
+ PINMUX_GPIO(GPIO_FN_SSI1_CLK, SSI1_CLK_MARK),
+ PINMUX_GPIO(GPIO_FN_SDIF1CLK, SDIF1CLK_MARK),
+ PINMUX_GPIO(GPIO_FN_HAC0_SDOUT, HAC0_SDOUT_MARK),
+ PINMUX_GPIO(GPIO_FN_SSI0_SDATA, SSI0_SDATA_MARK),
+ PINMUX_GPIO(GPIO_FN_SDIF1D3, SDIF1D3_MARK),
+ PINMUX_GPIO(GPIO_FN_HAC0_SDIN, HAC0_SDIN_MARK),
+ PINMUX_GPIO(GPIO_FN_SSI0_SCK, SSI0_SCK_MARK),
+ PINMUX_GPIO(GPIO_FN_SDIF1D2, SDIF1D2_MARK),
+ PINMUX_GPIO(GPIO_FN_HAC0_SYNC, HAC0_SYNC_MARK),
+ PINMUX_GPIO(GPIO_FN_SSI0_WS, SSI0_WS_MARK),
+ PINMUX_GPIO(GPIO_FN_SDIF1D1, SDIF1D1_MARK),
+ PINMUX_GPIO(GPIO_FN_HAC0_BITCLK, HAC0_BITCLK_MARK),
+ PINMUX_GPIO(GPIO_FN_SSI0_CLK, SSI0_CLK_MARK),
+ PINMUX_GPIO(GPIO_FN_SDIF1D0, SDIF1D0_MARK),
+ PINMUX_GPIO(GPIO_FN_SCIF3_SCK, SCIF3_SCK_MARK),
+ PINMUX_GPIO(GPIO_FN_SSI2_SDATA, SSI2_SDATA_MARK),
+ PINMUX_GPIO(GPIO_FN_SCIF3_RXD, SCIF3_RXD_MARK),
+ PINMUX_GPIO(GPIO_FN_TCLK, TCLK_MARK),
+ PINMUX_GPIO(GPIO_FN_SSI2_SCK, SSI2_SCK_MARK),
+ PINMUX_GPIO(GPIO_FN_SCIF3_TXD, SCIF3_TXD_MARK),
+ PINMUX_GPIO(GPIO_FN_HAC_RES, HAC_RES_MARK),
+ PINMUX_GPIO(GPIO_FN_SSI2_WS, SSI2_WS_MARK),
+ PINMUX_GPIO(GPIO_FN_DACK3, DACK3_MARK),
+ PINMUX_GPIO(GPIO_FN_SDIF0CMD, SDIF0CMD_MARK),
+ PINMUX_GPIO(GPIO_FN_DACK2, DACK2_MARK),
+ PINMUX_GPIO(GPIO_FN_SDIF0CD, SDIF0CD_MARK),
+ PINMUX_GPIO(GPIO_FN_DREQ3, DREQ3_MARK),
+ PINMUX_GPIO(GPIO_FN_SDIF0WP, SDIF0WP_MARK),
+ PINMUX_GPIO(GPIO_FN_SCIF0_CTS, SCIF0_CTS_MARK),
+ PINMUX_GPIO(GPIO_FN_DREQ2, DREQ2_MARK),
+ PINMUX_GPIO(GPIO_FN_SDIF0CLK, SDIF0CLK_MARK),
+ PINMUX_GPIO(GPIO_FN_SCIF0_RTS, SCIF0_RTS_MARK),
+ PINMUX_GPIO(GPIO_FN_IRL7, IRL7_MARK),
+ PINMUX_GPIO(GPIO_FN_SDIF0D3, SDIF0D3_MARK),
+ PINMUX_GPIO(GPIO_FN_SCIF0_SCK, SCIF0_SCK_MARK),
+ PINMUX_GPIO(GPIO_FN_IRL6, IRL6_MARK),
+ PINMUX_GPIO(GPIO_FN_SDIF0D2, SDIF0D2_MARK),
+ PINMUX_GPIO(GPIO_FN_SCIF0_RXD, SCIF0_RXD_MARK),
+ PINMUX_GPIO(GPIO_FN_IRL5, IRL5_MARK),
+ PINMUX_GPIO(GPIO_FN_SDIF0D1, SDIF0D1_MARK),
+ PINMUX_GPIO(GPIO_FN_SCIF0_TXD, SCIF0_TXD_MARK),
+ PINMUX_GPIO(GPIO_FN_IRL4, IRL4_MARK),
+ PINMUX_GPIO(GPIO_FN_SDIF0D0, SDIF0D0_MARK),
+ PINMUX_GPIO(GPIO_FN_SCIF5_SCK, SCIF5_SCK_MARK),
+ PINMUX_GPIO(GPIO_FN_FRB, FRB_MARK),
+ PINMUX_GPIO(GPIO_FN_SCIF5_RXD, SCIF5_RXD_MARK),
+ PINMUX_GPIO(GPIO_FN_IOIS16, IOIS16_MARK),
+ PINMUX_GPIO(GPIO_FN_SCIF5_TXD, SCIF5_TXD_MARK),
+ PINMUX_GPIO(GPIO_FN_CE2B, CE2B_MARK),
+ PINMUX_GPIO(GPIO_FN_DRAK3, DRAK3_MARK),
+ PINMUX_GPIO(GPIO_FN_CE2A, CE2A_MARK),
+ PINMUX_GPIO(GPIO_FN_SCIF4_SCK, SCIF4_SCK_MARK),
+ PINMUX_GPIO(GPIO_FN_DRAK2, DRAK2_MARK),
+ PINMUX_GPIO(GPIO_FN_SSI3_WS, SSI3_WS_MARK),
+ PINMUX_GPIO(GPIO_FN_SCIF4_RXD, SCIF4_RXD_MARK),
+ PINMUX_GPIO(GPIO_FN_DRAK1, DRAK1_MARK),
+ PINMUX_GPIO(GPIO_FN_SSI3_SDATA, SSI3_SDATA_MARK),
+ PINMUX_GPIO(GPIO_FN_FSTATUS, FSTATUS_MARK),
+ PINMUX_GPIO(GPIO_FN_SCIF4_TXD, SCIF4_TXD_MARK),
+ PINMUX_GPIO(GPIO_FN_DRAK0, DRAK0_MARK),
+ PINMUX_GPIO(GPIO_FN_SSI3_SCK, SSI3_SCK_MARK),
+ PINMUX_GPIO(GPIO_FN_FSE, FSE_MARK),
+};
+
+static struct pinmux_cfg_reg pinmux_config_regs[] = {
+ { PINMUX_CFG_REG("PACR", 0xffcc0000, 16, 2) {
+ PA7_FN, PA7_OUT, PA7_IN, PA7_IN_PU,
+ PA6_FN, PA6_OUT, PA6_IN, PA6_IN_PU,
+ PA5_FN, PA5_OUT, PA5_IN, PA5_IN_PU,
+ PA4_FN, PA4_OUT, PA4_IN, PA4_IN_PU,
+ PA3_FN, PA3_OUT, PA3_IN, PA3_IN_PU,
+ PA2_FN, PA2_OUT, PA2_IN, PA2_IN_PU,
+ PA1_FN, PA1_OUT, PA1_IN, PA1_IN_PU,
+ PA0_FN, PA0_OUT, PA0_IN, PA0_IN_PU }
+ },
+ { PINMUX_CFG_REG("PBCR", 0xffcc0002, 16, 2) {
+ PB7_FN, PB7_OUT, PB7_IN, PB7_IN_PU,
+ PB6_FN, PB6_OUT, PB6_IN, PB6_IN_PU,
+ PB5_FN, PB5_OUT, PB5_IN, PB5_IN_PU,
+ PB4_FN, PB4_OUT, PB4_IN, PB4_IN_PU,
+ PB3_FN, PB3_OUT, PB3_IN, PB3_IN_PU,
+ PB2_FN, PB2_OUT, PB2_IN, PB2_IN_PU,
+ PB1_FN, PB1_OUT, PB1_IN, PB1_IN_PU,
+ PB0_FN, PB0_OUT, PB0_IN, PB0_IN_PU }
+ },
+ { PINMUX_CFG_REG("PCCR", 0xffcc0004, 16, 2) {
+ PC7_FN, PC7_OUT, PC7_IN, PC7_IN_PU,
+ PC6_FN, PC6_OUT, PC6_IN, PC6_IN_PU,
+ PC5_FN, PC5_OUT, PC5_IN, PC5_IN_PU,
+ PC4_FN, PC4_OUT, PC4_IN, PC4_IN_PU,
+ PC3_FN, PC3_OUT, PC3_IN, PC3_IN_PU,
+ PC2_FN, PC2_OUT, PC2_IN, PC2_IN_PU,
+ PC1_FN, PC1_OUT, PC1_IN, PC1_IN_PU,
+ PC0_FN, PC0_OUT, PC0_IN, PC0_IN_PU }
+ },
+ { PINMUX_CFG_REG("PDCR", 0xffcc0006, 16, 2) {
+ PD7_FN, PD7_OUT, PD7_IN, PD7_IN_PU,
+ PD6_FN, PD6_OUT, PD6_IN, PD6_IN_PU,
+ PD5_FN, PD5_OUT, PD5_IN, PD5_IN_PU,
+ PD4_FN, PD4_OUT, PD4_IN, PD4_IN_PU,
+ PD3_FN, PD3_OUT, PD3_IN, PD3_IN_PU,
+ PD2_FN, PD2_OUT, PD2_IN, PD2_IN_PU,
+ PD1_FN, PD1_OUT, PD1_IN, PD1_IN_PU,
+ PD0_FN, PD0_OUT, PD0_IN, PD0_IN_PU }
+ },
+ { PINMUX_CFG_REG("PECR", 0xffcc0008, 16, 2) {
+ PE7_FN, PE7_OUT, PE7_IN, PE7_IN_PU,
+ PE6_FN, PE6_OUT, PE6_IN, PE6_IN_PU,
+ 0, 0, 0, 0,
+ 0, 0, 0, 0,
+ 0, 0, 0, 0,
+ 0, 0, 0, 0,
+ 0, 0, 0, 0,
+ 0, 0, 0, 0, }
+ },
+ { PINMUX_CFG_REG("PFCR", 0xffcc000a, 16, 2) {
+ PF7_FN, PF7_OUT, PF7_IN, PF7_IN_PU,
+ PF6_FN, PF6_OUT, PF6_IN, PF6_IN_PU,
+ PF5_FN, PF5_OUT, PF5_IN, PF5_IN_PU,
+ PF4_FN, PF4_OUT, PF4_IN, PF4_IN_PU,
+ PF3_FN, PF3_OUT, PF3_IN, PF3_IN_PU,
+ PF2_FN, PF2_OUT, PF2_IN, PF2_IN_PU,
+ PF1_FN, PF1_OUT, PF1_IN, PF1_IN_PU,
+ PF0_FN, PF0_OUT, PF0_IN, PF0_IN_PU }
+ },
+ { PINMUX_CFG_REG("PGCR", 0xffcc000c, 16, 2) {
+ PG7_FN, PG7_OUT, PG7_IN, PG7_IN_PU,
+ PG6_FN, PG6_OUT, PG6_IN, PG6_IN_PU,
+ PG5_FN, PG5_OUT, PG5_IN, PG5_IN_PU,
+ 0, 0, 0, 0,
+ 0, 0, 0, 0,
+ 0, 0, 0, 0,
+ 0, 0, 0, 0,
+ 0, 0, 0, 0, }
+ },
+ { PINMUX_CFG_REG("PHCR", 0xffcc000e, 16, 2) {
+ PH7_FN, PH7_OUT, PH7_IN, PH7_IN_PU,
+ PH6_FN, PH6_OUT, PH6_IN, PH6_IN_PU,
+ PH5_FN, PH5_OUT, PH5_IN, PH5_IN_PU,
+ PH4_FN, PH4_OUT, PH4_IN, PH4_IN_PU,
+ PH3_FN, PH3_OUT, PH3_IN, PH3_IN_PU,
+ PH2_FN, PH2_OUT, PH2_IN, PH2_IN_PU,
+ PH1_FN, PH1_OUT, PH1_IN, PH1_IN_PU,
+ PH0_FN, PH0_OUT, PH0_IN, PH0_IN_PU }
+ },
+ { PINMUX_CFG_REG("PJCR", 0xffcc0010, 16, 2) {
+ PJ7_FN, PJ7_OUT, PJ7_IN, PJ7_IN_PU,
+ PJ6_FN, PJ6_OUT, PJ6_IN, PJ6_IN_PU,
+ PJ5_FN, PJ5_OUT, PJ5_IN, PJ5_IN_PU,
+ PJ4_FN, PJ4_OUT, PJ4_IN, PJ4_IN_PU,
+ PJ3_FN, PJ3_OUT, PJ3_IN, PJ3_IN_PU,
+ PJ2_FN, PJ2_OUT, PJ2_IN, PJ2_IN_PU,
+ PJ1_FN, PJ1_OUT, PJ1_IN, PJ1_IN_PU,
+ 0, 0, 0, 0, }
+ },
+ { PINMUX_CFG_REG("P1MSELR", 0xffcc0080, 16, 1) {
+ 0, 0,
+ P1MSEL14_0, P1MSEL14_1,
+ P1MSEL13_0, P1MSEL13_1,
+ P1MSEL12_0, P1MSEL12_1,
+ P1MSEL11_0, P1MSEL11_1,
+ P1MSEL10_0, P1MSEL10_1,
+ P1MSEL9_0, P1MSEL9_1,
+ P1MSEL8_0, P1MSEL8_1,
+ P1MSEL7_0, P1MSEL7_1,
+ P1MSEL6_0, P1MSEL6_1,
+ P1MSEL5_0, P1MSEL5_1,
+ P1MSEL4_0, P1MSEL4_1,
+ P1MSEL3_0, P1MSEL3_1,
+ P1MSEL2_0, P1MSEL2_1,
+ P1MSEL1_0, P1MSEL1_1,
+ P1MSEL0_0, P1MSEL0_1 }
+ },
+ { PINMUX_CFG_REG("P2MSELR", 0xffcc0082, 16, 1) {
+ P2MSEL15_0, P2MSEL15_1,
+ P2MSEL14_0, P2MSEL14_1,
+ P2MSEL13_0, P2MSEL13_1,
+ P2MSEL12_0, P2MSEL12_1,
+ P2MSEL11_0, P2MSEL11_1,
+ P2MSEL10_0, P2MSEL10_1,
+ P2MSEL9_0, P2MSEL9_1,
+ P2MSEL8_0, P2MSEL8_1,
+ P2MSEL7_0, P2MSEL7_1,
+ P2MSEL6_0, P2MSEL6_1,
+ P2MSEL5_0, P2MSEL5_1,
+ P2MSEL4_0, P2MSEL4_1,
+ P2MSEL3_0, P2MSEL3_1,
+ P2MSEL2_0, P2MSEL2_1,
+ P2MSEL1_0, P2MSEL1_1,
+ P2MSEL0_0, P2MSEL0_1 }
+ },
+ {}
+};
+
+static struct pinmux_data_reg pinmux_data_regs[] = {
+ { PINMUX_DATA_REG("PADR", 0xffcc0020, 8) {
+ PA7_DATA, PA6_DATA, PA5_DATA, PA4_DATA,
+ PA3_DATA, PA2_DATA, PA1_DATA, PA0_DATA }
+ },
+ { PINMUX_DATA_REG("PBDR", 0xffcc0022, 8) {
+ PB7_DATA, PB6_DATA, PB5_DATA, PB4_DATA,
+ PB3_DATA, PB2_DATA, PB1_DATA, PB0_DATA }
+ },
+ { PINMUX_DATA_REG("PCDR", 0xffcc0024, 8) {
+ PC7_DATA, PC6_DATA, PC5_DATA, PC4_DATA,
+ PC3_DATA, PC2_DATA, PC1_DATA, PC0_DATA }
+ },
+ { PINMUX_DATA_REG("PDDR", 0xffcc0026, 8) {
+ PD7_DATA, PD6_DATA, PD5_DATA, PD4_DATA,
+ PD3_DATA, PD2_DATA, PD1_DATA, PD0_DATA }
+ },
+ { PINMUX_DATA_REG("PEDR", 0xffcc0028, 8) {
+ PE7_DATA, PE6_DATA,
+ 0, 0, 0, 0, 0, 0 }
+ },
+ { PINMUX_DATA_REG("PFDR", 0xffcc002a, 8) {
+ PF7_DATA, PF6_DATA, PF5_DATA, PF4_DATA,
+ PF3_DATA, PF2_DATA, PF1_DATA, PF0_DATA }
+ },
+ { PINMUX_DATA_REG("PGDR", 0xffcc002c, 8) {
+ PG7_DATA, PG6_DATA, PG5_DATA, 0,
+ 0, 0, 0, 0 }
+ },
+ { PINMUX_DATA_REG("PHDR", 0xffcc002e, 8) {
+ PH7_DATA, PH6_DATA, PH5_DATA, PH4_DATA,
+ PH3_DATA, PH2_DATA, PH1_DATA, PH0_DATA }
+ },
+ { PINMUX_DATA_REG("PJDR", 0xffcc0030, 8) {
+ PJ7_DATA, PJ6_DATA, PJ5_DATA, PJ4_DATA,
+ PJ3_DATA, PJ2_DATA, PJ1_DATA, 0 }
+ },
+ { },
+};
+
+struct sh_pfc_soc_info sh7786_pinmux_info = {
+ .name = "sh7786_pfc",
+ .reserved_id = PINMUX_RESERVED,
+ .data = { PINMUX_DATA_BEGIN, PINMUX_DATA_END },
+ .input = { PINMUX_INPUT_BEGIN, PINMUX_INPUT_END },
+ .input_pu = { PINMUX_INPUT_PULLUP_BEGIN, PINMUX_INPUT_PULLUP_END },
+ .output = { PINMUX_OUTPUT_BEGIN, PINMUX_OUTPUT_END },
+ .mark = { PINMUX_MARK_BEGIN, PINMUX_MARK_END },
+ .function = { PINMUX_FUNCTION_BEGIN, PINMUX_FUNCTION_END },
+
+ .first_gpio = GPIO_PA7,
+ .last_gpio = GPIO_FN_IRL4,
+
+ .gpios = pinmux_gpios,
+ .cfg_regs = pinmux_config_regs,
+ .data_regs = pinmux_data_regs,
+
+ .gpio_data = pinmux_data,
+ .gpio_data_size = ARRAY_SIZE(pinmux_data),
+};
diff --git a/drivers/pinctrl/sh-pfc/pfc-shx3.c b/drivers/pinctrl/sh-pfc/pfc-shx3.c
new file mode 100644
index 000000000000..ccf6918b03c6
--- /dev/null
+++ b/drivers/pinctrl/sh-pfc/pfc-shx3.c
@@ -0,0 +1,582 @@
+/*
+ * SH-X3 prototype CPU pinmux
+ *
+ * Copyright (C) 2010 Paul Mundt
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ */
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <cpu/shx3.h>
+
+#include "sh_pfc.h"
+
+enum {
+ PINMUX_RESERVED = 0,
+
+ PINMUX_DATA_BEGIN,
+ PA7_DATA, PA6_DATA, PA5_DATA, PA4_DATA,
+ PA3_DATA, PA2_DATA, PA1_DATA, PA0_DATA,
+ PB7_DATA, PB6_DATA, PB5_DATA, PB4_DATA,
+ PB3_DATA, PB2_DATA, PB1_DATA, PB0_DATA,
+ PC7_DATA, PC6_DATA, PC5_DATA, PC4_DATA,
+ PC3_DATA, PC2_DATA, PC1_DATA, PC0_DATA,
+ PD7_DATA, PD6_DATA, PD5_DATA, PD4_DATA,
+ PD3_DATA, PD2_DATA, PD1_DATA, PD0_DATA,
+ PE7_DATA, PE6_DATA, PE5_DATA, PE4_DATA,
+ PE3_DATA, PE2_DATA, PE1_DATA, PE0_DATA,
+ PF7_DATA, PF6_DATA, PF5_DATA, PF4_DATA,
+ PF3_DATA, PF2_DATA, PF1_DATA, PF0_DATA,
+ PG7_DATA, PG6_DATA, PG5_DATA, PG4_DATA,
+ PG3_DATA, PG2_DATA, PG1_DATA, PG0_DATA,
+
+ PH5_DATA, PH4_DATA,
+ PH3_DATA, PH2_DATA, PH1_DATA, PH0_DATA,
+ PINMUX_DATA_END,
+
+ PINMUX_INPUT_BEGIN,
+ PA7_IN, PA6_IN, PA5_IN, PA4_IN,
+ PA3_IN, PA2_IN, PA1_IN, PA0_IN,
+ PB7_IN, PB6_IN, PB5_IN, PB4_IN,
+ PB3_IN, PB2_IN, PB1_IN, PB0_IN,
+ PC7_IN, PC6_IN, PC5_IN, PC4_IN,
+ PC3_IN, PC2_IN, PC1_IN, PC0_IN,
+ PD7_IN, PD6_IN, PD5_IN, PD4_IN,
+ PD3_IN, PD2_IN, PD1_IN, PD0_IN,
+ PE7_IN, PE6_IN, PE5_IN, PE4_IN,
+ PE3_IN, PE2_IN, PE1_IN, PE0_IN,
+ PF7_IN, PF6_IN, PF5_IN, PF4_IN,
+ PF3_IN, PF2_IN, PF1_IN, PF0_IN,
+ PG7_IN, PG6_IN, PG5_IN, PG4_IN,
+ PG3_IN, PG2_IN, PG1_IN, PG0_IN,
+
+ PH5_IN, PH4_IN,
+ PH3_IN, PH2_IN, PH1_IN, PH0_IN,
+ PINMUX_INPUT_END,
+
+ PINMUX_INPUT_PULLUP_BEGIN,
+ PA7_IN_PU, PA6_IN_PU, PA5_IN_PU, PA4_IN_PU,
+ PA3_IN_PU, PA2_IN_PU, PA1_IN_PU, PA0_IN_PU,
+ PB7_IN_PU, PB6_IN_PU, PB5_IN_PU, PB4_IN_PU,
+ PB3_IN_PU, PB2_IN_PU, PB1_IN_PU, PB0_IN_PU,
+ PC7_IN_PU, PC6_IN_PU, PC5_IN_PU, PC4_IN_PU,
+ PC3_IN_PU, PC2_IN_PU, PC1_IN_PU, PC0_IN_PU,
+ PD7_IN_PU, PD6_IN_PU, PD5_IN_PU, PD4_IN_PU,
+ PD3_IN_PU, PD2_IN_PU, PD1_IN_PU, PD0_IN_PU,
+ PE7_IN_PU, PE6_IN_PU, PE5_IN_PU, PE4_IN_PU,
+ PE3_IN_PU, PE2_IN_PU, PE1_IN_PU, PE0_IN_PU,
+ PF7_IN_PU, PF6_IN_PU, PF5_IN_PU, PF4_IN_PU,
+ PF3_IN_PU, PF2_IN_PU, PF1_IN_PU, PF0_IN_PU,
+ PG7_IN_PU, PG6_IN_PU, PG5_IN_PU, PG4_IN_PU,
+ PG3_IN_PU, PG2_IN_PU, PG1_IN_PU, PG0_IN_PU,
+
+ PH5_IN_PU, PH4_IN_PU,
+ PH3_IN_PU, PH2_IN_PU, PH1_IN_PU, PH0_IN_PU,
+ PINMUX_INPUT_PULLUP_END,
+
+ PINMUX_OUTPUT_BEGIN,
+ PA7_OUT, PA6_OUT, PA5_OUT, PA4_OUT,
+ PA3_OUT, PA2_OUT, PA1_OUT, PA0_OUT,
+ PB7_OUT, PB6_OUT, PB5_OUT, PB4_OUT,
+ PB3_OUT, PB2_OUT, PB1_OUT, PB0_OUT,
+ PC7_OUT, PC6_OUT, PC5_OUT, PC4_OUT,
+ PC3_OUT, PC2_OUT, PC1_OUT, PC0_OUT,
+ PD7_OUT, PD6_OUT, PD5_OUT, PD4_OUT,
+ PD3_OUT, PD2_OUT, PD1_OUT, PD0_OUT,
+ PE7_OUT, PE6_OUT, PE5_OUT, PE4_OUT,
+ PE3_OUT, PE2_OUT, PE1_OUT, PE0_OUT,
+ PF7_OUT, PF6_OUT, PF5_OUT, PF4_OUT,
+ PF3_OUT, PF2_OUT, PF1_OUT, PF0_OUT,
+ PG7_OUT, PG6_OUT, PG5_OUT, PG4_OUT,
+ PG3_OUT, PG2_OUT, PG1_OUT, PG0_OUT,
+
+ PH5_OUT, PH4_OUT,
+ PH3_OUT, PH2_OUT, PH1_OUT, PH0_OUT,
+ PINMUX_OUTPUT_END,
+
+ PINMUX_FUNCTION_BEGIN,
+ PA7_FN, PA6_FN, PA5_FN, PA4_FN,
+ PA3_FN, PA2_FN, PA1_FN, PA0_FN,
+ PB7_FN, PB6_FN, PB5_FN, PB4_FN,
+ PB3_FN, PB2_FN, PB1_FN, PB0_FN,
+ PC7_FN, PC6_FN, PC5_FN, PC4_FN,
+ PC3_FN, PC2_FN, PC1_FN, PC0_FN,
+ PD7_FN, PD6_FN, PD5_FN, PD4_FN,
+ PD3_FN, PD2_FN, PD1_FN, PD0_FN,
+ PE7_FN, PE6_FN, PE5_FN, PE4_FN,
+ PE3_FN, PE2_FN, PE1_FN, PE0_FN,
+ PF7_FN, PF6_FN, PF5_FN, PF4_FN,
+ PF3_FN, PF2_FN, PF1_FN, PF0_FN,
+ PG7_FN, PG6_FN, PG5_FN, PG4_FN,
+ PG3_FN, PG2_FN, PG1_FN, PG0_FN,
+
+ PH5_FN, PH4_FN,
+ PH3_FN, PH2_FN, PH1_FN, PH0_FN,
+ PINMUX_FUNCTION_END,
+
+ PINMUX_MARK_BEGIN,
+
+ D31_MARK, D30_MARK, D29_MARK, D28_MARK, D27_MARK, D26_MARK,
+ D25_MARK, D24_MARK, D23_MARK, D22_MARK, D21_MARK, D20_MARK,
+ D19_MARK, D18_MARK, D17_MARK, D16_MARK,
+
+ BACK_MARK, BREQ_MARK,
+ WE3_MARK, WE2_MARK,
+ CS6_MARK, CS5_MARK, CS4_MARK,
+ CLKOUTENB_MARK,
+
+ DACK3_MARK, DACK2_MARK, DACK1_MARK, DACK0_MARK,
+ DREQ3_MARK, DREQ2_MARK, DREQ1_MARK, DREQ0_MARK,
+
+ IRQ3_MARK, IRQ2_MARK, IRQ1_MARK, IRQ0_MARK,
+
+ DRAK3_MARK, DRAK2_MARK, DRAK1_MARK, DRAK0_MARK,
+
+ SCK3_MARK, SCK2_MARK, SCK1_MARK, SCK0_MARK,
+ IRL3_MARK, IRL2_MARK, IRL1_MARK, IRL0_MARK,
+ TXD3_MARK, TXD2_MARK, TXD1_MARK, TXD0_MARK,
+ RXD3_MARK, RXD2_MARK, RXD1_MARK, RXD0_MARK,
+
+ CE2B_MARK, CE2A_MARK, IOIS16_MARK,
+ STATUS1_MARK, STATUS0_MARK,
+
+ IRQOUT_MARK,
+
+ PINMUX_MARK_END,
+};
+
+static pinmux_enum_t shx3_pinmux_data[] = {
+
+ /* PA GPIO */
+ PINMUX_DATA(PA7_DATA, PA7_IN, PA7_OUT, PA7_IN_PU),
+ PINMUX_DATA(PA6_DATA, PA6_IN, PA6_OUT, PA6_IN_PU),
+ PINMUX_DATA(PA5_DATA, PA5_IN, PA5_OUT, PA5_IN_PU),
+ PINMUX_DATA(PA4_DATA, PA4_IN, PA4_OUT, PA4_IN_PU),
+ PINMUX_DATA(PA3_DATA, PA3_IN, PA3_OUT, PA3_IN_PU),
+ PINMUX_DATA(PA2_DATA, PA2_IN, PA2_OUT, PA2_IN_PU),
+ PINMUX_DATA(PA1_DATA, PA1_IN, PA1_OUT, PA1_IN_PU),
+ PINMUX_DATA(PA0_DATA, PA0_IN, PA0_OUT, PA0_IN_PU),
+
+ /* PB GPIO */
+ PINMUX_DATA(PB7_DATA, PB7_IN, PB7_OUT, PB7_IN_PU),
+ PINMUX_DATA(PB6_DATA, PB6_IN, PB6_OUT, PB6_IN_PU),
+ PINMUX_DATA(PB5_DATA, PB5_IN, PB5_OUT, PB5_IN_PU),
+ PINMUX_DATA(PB4_DATA, PB4_IN, PB4_OUT, PB4_IN_PU),
+ PINMUX_DATA(PB3_DATA, PB3_IN, PB3_OUT, PB3_IN_PU),
+ PINMUX_DATA(PB2_DATA, PB2_IN, PB2_OUT, PB2_IN_PU),
+ PINMUX_DATA(PB1_DATA, PB1_IN, PB1_OUT, PB1_IN_PU),
+ PINMUX_DATA(PB0_DATA, PB0_IN, PB0_OUT, PB0_IN_PU),
+
+ /* PC GPIO */
+ PINMUX_DATA(PC7_DATA, PC7_IN, PC7_OUT, PC7_IN_PU),
+ PINMUX_DATA(PC6_DATA, PC6_IN, PC6_OUT, PC6_IN_PU),
+ PINMUX_DATA(PC5_DATA, PC5_IN, PC5_OUT, PC5_IN_PU),
+ PINMUX_DATA(PC4_DATA, PC4_IN, PC4_OUT, PC4_IN_PU),
+ PINMUX_DATA(PC3_DATA, PC3_IN, PC3_OUT, PC3_IN_PU),
+ PINMUX_DATA(PC2_DATA, PC2_IN, PC2_OUT, PC2_IN_PU),
+ PINMUX_DATA(PC1_DATA, PC1_IN, PC1_OUT, PC1_IN_PU),
+ PINMUX_DATA(PC0_DATA, PC0_IN, PC0_OUT, PC0_IN_PU),
+
+ /* PD GPIO */
+ PINMUX_DATA(PD7_DATA, PD7_IN, PD7_OUT, PD7_IN_PU),
+ PINMUX_DATA(PD6_DATA, PD6_IN, PD6_OUT, PD6_IN_PU),
+ PINMUX_DATA(PD5_DATA, PD5_IN, PD5_OUT, PD5_IN_PU),
+ PINMUX_DATA(PD4_DATA, PD4_IN, PD4_OUT, PD4_IN_PU),
+ PINMUX_DATA(PD3_DATA, PD3_IN, PD3_OUT, PD3_IN_PU),
+ PINMUX_DATA(PD2_DATA, PD2_IN, PD2_OUT, PD2_IN_PU),
+ PINMUX_DATA(PD1_DATA, PD1_IN, PD1_OUT, PD1_IN_PU),
+ PINMUX_DATA(PD0_DATA, PD0_IN, PD0_OUT, PD0_IN_PU),
+
+ /* PE GPIO */
+ PINMUX_DATA(PE7_DATA, PE7_IN, PE7_OUT, PE7_IN_PU),
+ PINMUX_DATA(PE6_DATA, PE6_IN, PE6_OUT, PE6_IN_PU),
+ PINMUX_DATA(PE5_DATA, PE5_IN, PE5_OUT, PE5_IN_PU),
+ PINMUX_DATA(PE4_DATA, PE4_IN, PE4_OUT, PE4_IN_PU),
+ PINMUX_DATA(PE3_DATA, PE3_IN, PE3_OUT, PE3_IN_PU),
+ PINMUX_DATA(PE2_DATA, PE2_IN, PE2_OUT, PE2_IN_PU),
+ PINMUX_DATA(PE1_DATA, PE1_IN, PE1_OUT, PE1_IN_PU),
+ PINMUX_DATA(PE0_DATA, PE0_IN, PE0_OUT, PE0_IN_PU),
+
+ /* PF GPIO */
+ PINMUX_DATA(PF7_DATA, PF7_IN, PF7_OUT, PF7_IN_PU),
+ PINMUX_DATA(PF6_DATA, PF6_IN, PF6_OUT, PF6_IN_PU),
+ PINMUX_DATA(PF5_DATA, PF5_IN, PF5_OUT, PF5_IN_PU),
+ PINMUX_DATA(PF4_DATA, PF4_IN, PF4_OUT, PF4_IN_PU),
+ PINMUX_DATA(PF3_DATA, PF3_IN, PF3_OUT, PF3_IN_PU),
+ PINMUX_DATA(PF2_DATA, PF2_IN, PF2_OUT, PF2_IN_PU),
+ PINMUX_DATA(PF1_DATA, PF1_IN, PF1_OUT, PF1_IN_PU),
+ PINMUX_DATA(PF0_DATA, PF0_IN, PF0_OUT, PF0_IN_PU),
+
+ /* PG GPIO */
+ PINMUX_DATA(PG7_DATA, PG7_IN, PG7_OUT, PG7_IN_PU),
+ PINMUX_DATA(PG6_DATA, PG6_IN, PG6_OUT, PG6_IN_PU),
+ PINMUX_DATA(PG5_DATA, PG5_IN, PG5_OUT, PG5_IN_PU),
+ PINMUX_DATA(PG4_DATA, PG4_IN, PG4_OUT, PG4_IN_PU),
+ PINMUX_DATA(PG3_DATA, PG3_IN, PG3_OUT, PG3_IN_PU),
+ PINMUX_DATA(PG2_DATA, PG2_IN, PG2_OUT, PG2_IN_PU),
+ PINMUX_DATA(PG1_DATA, PG1_IN, PG1_OUT, PG1_IN_PU),
+ PINMUX_DATA(PG0_DATA, PG0_IN, PG0_OUT, PG0_IN_PU),
+
+ /* PH GPIO */
+ PINMUX_DATA(PH5_DATA, PH5_IN, PH5_OUT, PH5_IN_PU),
+ PINMUX_DATA(PH4_DATA, PH4_IN, PH4_OUT, PH4_IN_PU),
+ PINMUX_DATA(PH3_DATA, PH3_IN, PH3_OUT, PH3_IN_PU),
+ PINMUX_DATA(PH2_DATA, PH2_IN, PH2_OUT, PH2_IN_PU),
+ PINMUX_DATA(PH1_DATA, PH1_IN, PH1_OUT, PH1_IN_PU),
+ PINMUX_DATA(PH0_DATA, PH0_IN, PH0_OUT, PH0_IN_PU),
+
+ /* PA FN */
+ PINMUX_DATA(D31_MARK, PA7_FN),
+ PINMUX_DATA(D30_MARK, PA6_FN),
+ PINMUX_DATA(D29_MARK, PA5_FN),
+ PINMUX_DATA(D28_MARK, PA4_FN),
+ PINMUX_DATA(D27_MARK, PA3_FN),
+ PINMUX_DATA(D26_MARK, PA2_FN),
+ PINMUX_DATA(D25_MARK, PA1_FN),
+ PINMUX_DATA(D24_MARK, PA0_FN),
+
+ /* PB FN */
+ PINMUX_DATA(D23_MARK, PB7_FN),
+ PINMUX_DATA(D22_MARK, PB6_FN),
+ PINMUX_DATA(D21_MARK, PB5_FN),
+ PINMUX_DATA(D20_MARK, PB4_FN),
+ PINMUX_DATA(D19_MARK, PB3_FN),
+ PINMUX_DATA(D18_MARK, PB2_FN),
+ PINMUX_DATA(D17_MARK, PB1_FN),
+ PINMUX_DATA(D16_MARK, PB0_FN),
+
+ /* PC FN */
+ PINMUX_DATA(BACK_MARK, PC7_FN),
+ PINMUX_DATA(BREQ_MARK, PC6_FN),
+ PINMUX_DATA(WE3_MARK, PC5_FN),
+ PINMUX_DATA(WE2_MARK, PC4_FN),
+ PINMUX_DATA(CS6_MARK, PC3_FN),
+ PINMUX_DATA(CS5_MARK, PC2_FN),
+ PINMUX_DATA(CS4_MARK, PC1_FN),
+ PINMUX_DATA(CLKOUTENB_MARK, PC0_FN),
+
+ /* PD FN */
+ PINMUX_DATA(DACK3_MARK, PD7_FN),
+ PINMUX_DATA(DACK2_MARK, PD6_FN),
+ PINMUX_DATA(DACK1_MARK, PD5_FN),
+ PINMUX_DATA(DACK0_MARK, PD4_FN),
+ PINMUX_DATA(DREQ3_MARK, PD3_FN),
+ PINMUX_DATA(DREQ2_MARK, PD2_FN),
+ PINMUX_DATA(DREQ1_MARK, PD1_FN),
+ PINMUX_DATA(DREQ0_MARK, PD0_FN),
+
+ /* PE FN */
+ PINMUX_DATA(IRQ3_MARK, PE7_FN),
+ PINMUX_DATA(IRQ2_MARK, PE6_FN),
+ PINMUX_DATA(IRQ1_MARK, PE5_FN),
+ PINMUX_DATA(IRQ0_MARK, PE4_FN),
+ PINMUX_DATA(DRAK3_MARK, PE3_FN),
+ PINMUX_DATA(DRAK2_MARK, PE2_FN),
+ PINMUX_DATA(DRAK1_MARK, PE1_FN),
+ PINMUX_DATA(DRAK0_MARK, PE0_FN),
+
+ /* PF FN */
+ PINMUX_DATA(SCK3_MARK, PF7_FN),
+ PINMUX_DATA(SCK2_MARK, PF6_FN),
+ PINMUX_DATA(SCK1_MARK, PF5_FN),
+ PINMUX_DATA(SCK0_MARK, PF4_FN),
+ PINMUX_DATA(IRL3_MARK, PF3_FN),
+ PINMUX_DATA(IRL2_MARK, PF2_FN),
+ PINMUX_DATA(IRL1_MARK, PF1_FN),
+ PINMUX_DATA(IRL0_MARK, PF0_FN),
+
+ /* PG FN */
+ PINMUX_DATA(TXD3_MARK, PG7_FN),
+ PINMUX_DATA(TXD2_MARK, PG6_FN),
+ PINMUX_DATA(TXD1_MARK, PG5_FN),
+ PINMUX_DATA(TXD0_MARK, PG4_FN),
+ PINMUX_DATA(RXD3_MARK, PG3_FN),
+ PINMUX_DATA(RXD2_MARK, PG2_FN),
+ PINMUX_DATA(RXD1_MARK, PG1_FN),
+ PINMUX_DATA(RXD0_MARK, PG0_FN),
+
+ /* PH FN */
+ PINMUX_DATA(CE2B_MARK, PH5_FN),
+ PINMUX_DATA(CE2A_MARK, PH4_FN),
+ PINMUX_DATA(IOIS16_MARK, PH3_FN),
+ PINMUX_DATA(STATUS1_MARK, PH2_FN),
+ PINMUX_DATA(STATUS0_MARK, PH1_FN),
+ PINMUX_DATA(IRQOUT_MARK, PH0_FN),
+};
+
+static struct pinmux_gpio shx3_pinmux_gpios[] = {
+ /* PA */
+ PINMUX_GPIO(GPIO_PA7, PA7_DATA),
+ PINMUX_GPIO(GPIO_PA6, PA6_DATA),
+ PINMUX_GPIO(GPIO_PA5, PA5_DATA),
+ PINMUX_GPIO(GPIO_PA4, PA4_DATA),
+ PINMUX_GPIO(GPIO_PA3, PA3_DATA),
+ PINMUX_GPIO(GPIO_PA2, PA2_DATA),
+ PINMUX_GPIO(GPIO_PA1, PA1_DATA),
+ PINMUX_GPIO(GPIO_PA0, PA0_DATA),
+
+ /* PB */
+ PINMUX_GPIO(GPIO_PB7, PB7_DATA),
+ PINMUX_GPIO(GPIO_PB6, PB6_DATA),
+ PINMUX_GPIO(GPIO_PB5, PB5_DATA),
+ PINMUX_GPIO(GPIO_PB4, PB4_DATA),
+ PINMUX_GPIO(GPIO_PB3, PB3_DATA),
+ PINMUX_GPIO(GPIO_PB2, PB2_DATA),
+ PINMUX_GPIO(GPIO_PB1, PB1_DATA),
+ PINMUX_GPIO(GPIO_PB0, PB0_DATA),
+
+ /* PC */
+ PINMUX_GPIO(GPIO_PC7, PC7_DATA),
+ PINMUX_GPIO(GPIO_PC6, PC6_DATA),
+ PINMUX_GPIO(GPIO_PC5, PC5_DATA),
+ PINMUX_GPIO(GPIO_PC4, PC4_DATA),
+ PINMUX_GPIO(GPIO_PC3, PC3_DATA),
+ PINMUX_GPIO(GPIO_PC2, PC2_DATA),
+ PINMUX_GPIO(GPIO_PC1, PC1_DATA),
+ PINMUX_GPIO(GPIO_PC0, PC0_DATA),
+
+ /* PD */
+ PINMUX_GPIO(GPIO_PD7, PD7_DATA),
+ PINMUX_GPIO(GPIO_PD6, PD6_DATA),
+ PINMUX_GPIO(GPIO_PD5, PD5_DATA),
+ PINMUX_GPIO(GPIO_PD4, PD4_DATA),
+ PINMUX_GPIO(GPIO_PD3, PD3_DATA),
+ PINMUX_GPIO(GPIO_PD2, PD2_DATA),
+ PINMUX_GPIO(GPIO_PD1, PD1_DATA),
+ PINMUX_GPIO(GPIO_PD0, PD0_DATA),
+
+ /* PE */
+ PINMUX_GPIO(GPIO_PE7, PE7_DATA),
+ PINMUX_GPIO(GPIO_PE6, PE6_DATA),
+ PINMUX_GPIO(GPIO_PE5, PE5_DATA),
+ PINMUX_GPIO(GPIO_PE4, PE4_DATA),
+ PINMUX_GPIO(GPIO_PE3, PE3_DATA),
+ PINMUX_GPIO(GPIO_PE2, PE2_DATA),
+ PINMUX_GPIO(GPIO_PE1, PE1_DATA),
+ PINMUX_GPIO(GPIO_PE0, PE0_DATA),
+
+ /* PF */
+ PINMUX_GPIO(GPIO_PF7, PF7_DATA),
+ PINMUX_GPIO(GPIO_PF6, PF6_DATA),
+ PINMUX_GPIO(GPIO_PF5, PF5_DATA),
+ PINMUX_GPIO(GPIO_PF4, PF4_DATA),
+ PINMUX_GPIO(GPIO_PF3, PF3_DATA),
+ PINMUX_GPIO(GPIO_PF2, PF2_DATA),
+ PINMUX_GPIO(GPIO_PF1, PF1_DATA),
+ PINMUX_GPIO(GPIO_PF0, PF0_DATA),
+
+ /* PG */
+ PINMUX_GPIO(GPIO_PG7, PG7_DATA),
+ PINMUX_GPIO(GPIO_PG6, PG6_DATA),
+ PINMUX_GPIO(GPIO_PG5, PG5_DATA),
+ PINMUX_GPIO(GPIO_PG4, PG4_DATA),
+ PINMUX_GPIO(GPIO_PG3, PG3_DATA),
+ PINMUX_GPIO(GPIO_PG2, PG2_DATA),
+ PINMUX_GPIO(GPIO_PG1, PG1_DATA),
+ PINMUX_GPIO(GPIO_PG0, PG0_DATA),
+
+ /* PH */
+ PINMUX_GPIO(GPIO_PH5, PH5_DATA),
+ PINMUX_GPIO(GPIO_PH4, PH4_DATA),
+ PINMUX_GPIO(GPIO_PH3, PH3_DATA),
+ PINMUX_GPIO(GPIO_PH2, PH2_DATA),
+ PINMUX_GPIO(GPIO_PH1, PH1_DATA),
+ PINMUX_GPIO(GPIO_PH0, PH0_DATA),
+
+ /* FN */
+ PINMUX_GPIO(GPIO_FN_D31, D31_MARK),
+ PINMUX_GPIO(GPIO_FN_D30, D30_MARK),
+ PINMUX_GPIO(GPIO_FN_D29, D29_MARK),
+ PINMUX_GPIO(GPIO_FN_D28, D28_MARK),
+ PINMUX_GPIO(GPIO_FN_D27, D27_MARK),
+ PINMUX_GPIO(GPIO_FN_D26, D26_MARK),
+ PINMUX_GPIO(GPIO_FN_D25, D25_MARK),
+ PINMUX_GPIO(GPIO_FN_D24, D24_MARK),
+ PINMUX_GPIO(GPIO_FN_D23, D23_MARK),
+ PINMUX_GPIO(GPIO_FN_D22, D22_MARK),
+ PINMUX_GPIO(GPIO_FN_D21, D21_MARK),
+ PINMUX_GPIO(GPIO_FN_D20, D20_MARK),
+ PINMUX_GPIO(GPIO_FN_D19, D19_MARK),
+ PINMUX_GPIO(GPIO_FN_D18, D18_MARK),
+ PINMUX_GPIO(GPIO_FN_D17, D17_MARK),
+ PINMUX_GPIO(GPIO_FN_D16, D16_MARK),
+ PINMUX_GPIO(GPIO_FN_BACK, BACK_MARK),
+ PINMUX_GPIO(GPIO_FN_BREQ, BREQ_MARK),
+ PINMUX_GPIO(GPIO_FN_WE3, WE3_MARK),
+ PINMUX_GPIO(GPIO_FN_WE2, WE2_MARK),
+ PINMUX_GPIO(GPIO_FN_CS6, CS6_MARK),
+ PINMUX_GPIO(GPIO_FN_CS5, CS5_MARK),
+ PINMUX_GPIO(GPIO_FN_CS4, CS4_MARK),
+ PINMUX_GPIO(GPIO_FN_CLKOUTENB, CLKOUTENB_MARK),
+ PINMUX_GPIO(GPIO_FN_DACK3, DACK3_MARK),
+ PINMUX_GPIO(GPIO_FN_DACK2, DACK2_MARK),
+ PINMUX_GPIO(GPIO_FN_DACK1, DACK1_MARK),
+ PINMUX_GPIO(GPIO_FN_DACK0, DACK0_MARK),
+ PINMUX_GPIO(GPIO_FN_DREQ3, DREQ3_MARK),
+ PINMUX_GPIO(GPIO_FN_DREQ2, DREQ2_MARK),
+ PINMUX_GPIO(GPIO_FN_DREQ1, DREQ1_MARK),
+ PINMUX_GPIO(GPIO_FN_DREQ0, DREQ0_MARK),
+ PINMUX_GPIO(GPIO_FN_IRQ3, IRQ3_MARK),
+ PINMUX_GPIO(GPIO_FN_IRQ2, IRQ2_MARK),
+ PINMUX_GPIO(GPIO_FN_IRQ1, IRQ1_MARK),
+ PINMUX_GPIO(GPIO_FN_IRQ0, IRQ0_MARK),
+ PINMUX_GPIO(GPIO_FN_DRAK3, DRAK3_MARK),
+ PINMUX_GPIO(GPIO_FN_DRAK2, DRAK2_MARK),
+ PINMUX_GPIO(GPIO_FN_DRAK1, DRAK1_MARK),
+ PINMUX_GPIO(GPIO_FN_DRAK0, DRAK0_MARK),
+ PINMUX_GPIO(GPIO_FN_SCK3, SCK3_MARK),
+ PINMUX_GPIO(GPIO_FN_SCK2, SCK2_MARK),
+ PINMUX_GPIO(GPIO_FN_SCK1, SCK1_MARK),
+ PINMUX_GPIO(GPIO_FN_SCK0, SCK0_MARK),
+ PINMUX_GPIO(GPIO_FN_IRL3, IRL3_MARK),
+ PINMUX_GPIO(GPIO_FN_IRL2, IRL2_MARK),
+ PINMUX_GPIO(GPIO_FN_IRL1, IRL1_MARK),
+ PINMUX_GPIO(GPIO_FN_IRL0, IRL0_MARK),
+ PINMUX_GPIO(GPIO_FN_TXD3, TXD3_MARK),
+ PINMUX_GPIO(GPIO_FN_TXD2, TXD2_MARK),
+ PINMUX_GPIO(GPIO_FN_TXD1, TXD1_MARK),
+ PINMUX_GPIO(GPIO_FN_TXD0, TXD0_MARK),
+ PINMUX_GPIO(GPIO_FN_RXD3, RXD3_MARK),
+ PINMUX_GPIO(GPIO_FN_RXD2, RXD2_MARK),
+ PINMUX_GPIO(GPIO_FN_RXD1, RXD1_MARK),
+ PINMUX_GPIO(GPIO_FN_RXD0, RXD0_MARK),
+ PINMUX_GPIO(GPIO_FN_CE2B, CE2B_MARK),
+ PINMUX_GPIO(GPIO_FN_CE2A, CE2A_MARK),
+ PINMUX_GPIO(GPIO_FN_IOIS16, IOIS16_MARK),
+ PINMUX_GPIO(GPIO_FN_STATUS1, STATUS1_MARK),
+ PINMUX_GPIO(GPIO_FN_STATUS0, STATUS0_MARK),
+ PINMUX_GPIO(GPIO_FN_IRQOUT, IRQOUT_MARK),
+};
+
+static struct pinmux_cfg_reg shx3_pinmux_config_regs[] = {
+ { PINMUX_CFG_REG("PABCR", 0xffc70000, 32, 2) {
+ PA7_FN, PA7_OUT, PA7_IN, PA7_IN_PU,
+ PA6_FN, PA6_OUT, PA6_IN, PA6_IN_PU,
+ PA5_FN, PA5_OUT, PA5_IN, PA5_IN_PU,
+ PA4_FN, PA4_OUT, PA4_IN, PA4_IN_PU,
+ PA3_FN, PA3_OUT, PA3_IN, PA3_IN_PU,
+ PA2_FN, PA2_OUT, PA2_IN, PA2_IN_PU,
+ PA1_FN, PA1_OUT, PA1_IN, PA1_IN_PU,
+ PA0_FN, PA0_OUT, PA0_IN, PA0_IN_PU,
+ PB7_FN, PB7_OUT, PB7_IN, PB7_IN_PU,
+ PB6_FN, PB6_OUT, PB6_IN, PB6_IN_PU,
+ PB5_FN, PB5_OUT, PB5_IN, PB5_IN_PU,
+ PB4_FN, PB4_OUT, PB4_IN, PB4_IN_PU,
+ PB3_FN, PB3_OUT, PB3_IN, PB3_IN_PU,
+ PB2_FN, PB2_OUT, PB2_IN, PB2_IN_PU,
+ PB1_FN, PB1_OUT, PB1_IN, PB1_IN_PU,
+ PB0_FN, PB0_OUT, PB0_IN, PB0_IN_PU, },
+ },
+ { PINMUX_CFG_REG("PCDCR", 0xffc70004, 32, 2) {
+ PC7_FN, PC7_OUT, PC7_IN, PC7_IN_PU,
+ PC6_FN, PC6_OUT, PC6_IN, PC6_IN_PU,
+ PC5_FN, PC5_OUT, PC5_IN, PC5_IN_PU,
+ PC4_FN, PC4_OUT, PC4_IN, PC4_IN_PU,
+ PC3_FN, PC3_OUT, PC3_IN, PC3_IN_PU,
+ PC2_FN, PC2_OUT, PC2_IN, PC2_IN_PU,
+ PC1_FN, PC1_OUT, PC1_IN, PC1_IN_PU,
+ PC0_FN, PC0_OUT, PC0_IN, PC0_IN_PU,
+ PD7_FN, PD7_OUT, PD7_IN, PD7_IN_PU,
+ PD6_FN, PD6_OUT, PD6_IN, PD6_IN_PU,
+ PD5_FN, PD5_OUT, PD5_IN, PD5_IN_PU,
+ PD4_FN, PD4_OUT, PD4_IN, PD4_IN_PU,
+ PD3_FN, PD3_OUT, PD3_IN, PD3_IN_PU,
+ PD2_FN, PD2_OUT, PD2_IN, PD2_IN_PU,
+ PD1_FN, PD1_OUT, PD1_IN, PD1_IN_PU,
+ PD0_FN, PD0_OUT, PD0_IN, PD0_IN_PU, },
+ },
+ { PINMUX_CFG_REG("PEFCR", 0xffc70008, 32, 2) {
+ PE7_FN, PE7_OUT, PE7_IN, PE7_IN_PU,
+ PE6_FN, PE6_OUT, PE6_IN, PE6_IN_PU,
+ PE5_FN, PE5_OUT, PE5_IN, PE5_IN_PU,
+ PE4_FN, PE4_OUT, PE4_IN, PE4_IN_PU,
+ PE3_FN, PE3_OUT, PE3_IN, PE3_IN_PU,
+ PE2_FN, PE2_OUT, PE2_IN, PE2_IN_PU,
+ PE1_FN, PE1_OUT, PE1_IN, PE1_IN_PU,
+ PE0_FN, PE0_OUT, PE0_IN, PE0_IN_PU,
+ PF7_FN, PF7_OUT, PF7_IN, PF7_IN_PU,
+ PF6_FN, PF6_OUT, PF6_IN, PF6_IN_PU,
+ PF5_FN, PF5_OUT, PF5_IN, PF5_IN_PU,
+ PF4_FN, PF4_OUT, PF4_IN, PF4_IN_PU,
+ PF3_FN, PF3_OUT, PF3_IN, PF3_IN_PU,
+ PF2_FN, PF2_OUT, PF2_IN, PF2_IN_PU,
+ PF1_FN, PF1_OUT, PF1_IN, PF1_IN_PU,
+ PF0_FN, PF0_OUT, PF0_IN, PF0_IN_PU, },
+ },
+ { PINMUX_CFG_REG("PGHCR", 0xffc7000c, 32, 2) {
+ PG7_FN, PG7_OUT, PG7_IN, PG7_IN_PU,
+ PG6_FN, PG6_OUT, PG6_IN, PG6_IN_PU,
+ PG5_FN, PG5_OUT, PG5_IN, PG5_IN_PU,
+ PG4_FN, PG4_OUT, PG4_IN, PG4_IN_PU,
+ PG3_FN, PG3_OUT, PG3_IN, PG3_IN_PU,
+ PG2_FN, PG2_OUT, PG2_IN, PG2_IN_PU,
+ PG1_FN, PG1_OUT, PG1_IN, PG1_IN_PU,
+ PG0_FN, PG0_OUT, PG0_IN, PG0_IN_PU,
+ 0, 0, 0, 0,
+ 0, 0, 0, 0,
+ PH5_FN, PH5_OUT, PH5_IN, PH5_IN_PU,
+ PH4_FN, PH4_OUT, PH4_IN, PH4_IN_PU,
+ PH3_FN, PH3_OUT, PH3_IN, PH3_IN_PU,
+ PH2_FN, PH2_OUT, PH2_IN, PH2_IN_PU,
+ PH1_FN, PH1_OUT, PH1_IN, PH1_IN_PU,
+ PH0_FN, PH0_OUT, PH0_IN, PH0_IN_PU, },
+ },
+ { },
+};
+
+static struct pinmux_data_reg shx3_pinmux_data_regs[] = {
+ { PINMUX_DATA_REG("PABDR", 0xffc70010, 32) {
+ 0, 0, 0, 0, 0, 0, 0, 0,
+ PA7_DATA, PA6_DATA, PA5_DATA, PA4_DATA,
+ PA3_DATA, PA2_DATA, PA1_DATA, PA0_DATA,
+ 0, 0, 0, 0, 0, 0, 0, 0,
+ PB7_DATA, PB6_DATA, PB5_DATA, PB4_DATA,
+ PB3_DATA, PB2_DATA, PB1_DATA, PB0_DATA, },
+ },
+ { PINMUX_DATA_REG("PCDDR", 0xffc70014, 32) {
+ 0, 0, 0, 0, 0, 0, 0, 0,
+ PC7_DATA, PC6_DATA, PC5_DATA, PC4_DATA,
+ PC3_DATA, PC2_DATA, PC1_DATA, PC0_DATA,
+ 0, 0, 0, 0, 0, 0, 0, 0,
+ PD7_DATA, PD6_DATA, PD5_DATA, PD4_DATA,
+ PD3_DATA, PD2_DATA, PD1_DATA, PD0_DATA, },
+ },
+ { PINMUX_DATA_REG("PEFDR", 0xffc70018, 32) {
+ 0, 0, 0, 0, 0, 0, 0, 0,
+ PE7_DATA, PE6_DATA, PE5_DATA, PE4_DATA,
+ PE3_DATA, PE2_DATA, PE1_DATA, PE0_DATA,
+ 0, 0, 0, 0, 0, 0, 0, 0,
+ PF7_DATA, PF6_DATA, PF5_DATA, PF4_DATA,
+ PF3_DATA, PF2_DATA, PF1_DATA, PF0_DATA, },
+ },
+ { PINMUX_DATA_REG("PGHDR", 0xffc7001c, 32) {
+ 0, 0, 0, 0, 0, 0, 0, 0,
+ PG7_DATA, PG6_DATA, PG5_DATA, PG4_DATA,
+ PG3_DATA, PG2_DATA, PG1_DATA, PG0_DATA,
+ 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, PH5_DATA, PH4_DATA,
+ PH3_DATA, PH2_DATA, PH1_DATA, PH0_DATA, },
+ },
+ { },
+};
+
+struct sh_pfc_soc_info shx3_pinmux_info = {
+ .name = "shx3_pfc",
+ .reserved_id = PINMUX_RESERVED,
+ .data = { PINMUX_DATA_BEGIN, PINMUX_DATA_END },
+ .input = { PINMUX_INPUT_BEGIN, PINMUX_INPUT_END },
+ .input_pu = { PINMUX_INPUT_PULLUP_BEGIN,
+ PINMUX_INPUT_PULLUP_END },
+ .output = { PINMUX_OUTPUT_BEGIN, PINMUX_OUTPUT_END },
+ .mark = { PINMUX_MARK_BEGIN, PINMUX_MARK_END },
+ .function = { PINMUX_FUNCTION_BEGIN, PINMUX_FUNCTION_END },
+ .first_gpio = GPIO_PA7,
+ .last_gpio = GPIO_FN_STATUS0,
+ .gpios = shx3_pinmux_gpios,
+ .gpio_data = shx3_pinmux_data,
+ .gpio_data_size = ARRAY_SIZE(shx3_pinmux_data),
+ .cfg_regs = shx3_pinmux_config_regs,
+ .data_regs = shx3_pinmux_data_regs,
+};
diff --git a/drivers/sh/pfc/pinctrl.c b/drivers/pinctrl/sh-pfc/pinctrl.c
index 4109b769eac0..11e0e1374d65 100644
--- a/drivers/sh/pfc/pinctrl.c
+++ b/drivers/pinctrl/sh-pfc/pinctrl.c
@@ -7,22 +7,23 @@
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*/
-#define DRV_NAME "pinctrl-sh_pfc"
-#define pr_fmt(fmt) DRV_NAME " " KBUILD_MODNAME ": " fmt
+#define DRV_NAME "sh-pfc"
+#define pr_fmt(fmt) KBUILD_MODNAME " pinctrl: " fmt
+#include <linux/device.h>
+#include <linux/err.h>
#include <linux/init.h>
#include <linux/module.h>
-#include <linux/sh_pfc.h>
-#include <linux/err.h>
-#include <linux/slab.h>
-#include <linux/spinlock.h>
-#include <linux/platform_device.h>
#include <linux/pinctrl/consumer.h>
-#include <linux/pinctrl/pinctrl.h>
#include <linux/pinctrl/pinconf.h>
-#include <linux/pinctrl/pinmux.h>
#include <linux/pinctrl/pinconf-generic.h>
+#include <linux/pinctrl/pinctrl.h>
+#include <linux/pinctrl/pinmux.h>
+#include <linux/slab.h>
+#include <linux/spinlock.h>
+
+#include "core.h"
struct sh_pfc_pinctrl {
struct pinctrl_dev *pctl;
@@ -37,8 +38,6 @@ struct sh_pfc_pinctrl {
spinlock_t lock;
};
-static struct sh_pfc_pinctrl *sh_pfc_pmx;
-
static int sh_pfc_get_groups_count(struct pinctrl_dev *pctldev)
{
struct sh_pfc_pinctrl *pmx = pinctrl_dev_get_drvdata(pctldev);
@@ -116,7 +115,7 @@ static void sh_pfc_noop_disable(struct pinctrl_dev *pctldev, unsigned func,
{
}
-static inline int sh_pfc_config_function(struct sh_pfc *pfc, unsigned offset)
+static int sh_pfc_config_function(struct sh_pfc *pfc, unsigned offset)
{
if (sh_pfc_config_gpio(pfc, offset,
PINMUX_TYPE_FUNCTION,
@@ -140,7 +139,7 @@ static int sh_pfc_reconfig_pin(struct sh_pfc *pfc, unsigned offset,
spin_lock_irqsave(&pfc->lock, flags);
- pinmux_type = pfc->gpios[offset].flags & PINMUX_FLAG_TYPE;
+ pinmux_type = pfc->info->gpios[offset].flags & PINMUX_FLAG_TYPE;
/*
* See if the present config needs to first be de-configured.
@@ -172,8 +171,8 @@ static int sh_pfc_reconfig_pin(struct sh_pfc *pfc, unsigned offset,
GPIO_CFG_REQ) != 0)
goto err;
- pfc->gpios[offset].flags &= ~PINMUX_FLAG_TYPE;
- pfc->gpios[offset].flags |= new_type;
+ pfc->info->gpios[offset].flags &= ~PINMUX_FLAG_TYPE;
+ pfc->info->gpios[offset].flags |= new_type;
ret = 0;
@@ -195,7 +194,7 @@ static int sh_pfc_gpio_request_enable(struct pinctrl_dev *pctldev,
spin_lock_irqsave(&pfc->lock, flags);
- pinmux_type = pfc->gpios[offset].flags & PINMUX_FLAG_TYPE;
+ pinmux_type = pfc->info->gpios[offset].flags & PINMUX_FLAG_TYPE;
switch (pinmux_type) {
case PINMUX_TYPE_FUNCTION:
@@ -236,7 +235,7 @@ static void sh_pfc_gpio_disable_free(struct pinctrl_dev *pctldev,
spin_lock_irqsave(&pfc->lock, flags);
- pinmux_type = pfc->gpios[offset].flags & PINMUX_FLAG_TYPE;
+ pinmux_type = pfc->info->gpios[offset].flags & PINMUX_FLAG_TYPE;
sh_pfc_config_gpio(pfc, offset, pinmux_type, GPIO_CFG_FREE);
@@ -270,7 +269,7 @@ static int sh_pfc_pinconf_get(struct pinctrl_dev *pctldev, unsigned pin,
struct sh_pfc_pinctrl *pmx = pinctrl_dev_get_drvdata(pctldev);
struct sh_pfc *pfc = pmx->pfc;
- *config = pfc->gpios[pin].flags & PINMUX_FLAG_TYPE;
+ *config = pfc->info->gpios[pin].flags & PINMUX_FLAG_TYPE;
return 0;
}
@@ -328,10 +327,8 @@ static struct pinctrl_desc sh_pfc_pinctrl_desc = {
.confops = &sh_pfc_pinconf_ops,
};
-static inline void sh_pfc_map_one_gpio(struct sh_pfc *pfc,
- struct sh_pfc_pinctrl *pmx,
- struct pinmux_gpio *gpio,
- unsigned offset)
+static void sh_pfc_map_one_gpio(struct sh_pfc *pfc, struct sh_pfc_pinctrl *pmx,
+ struct pinmux_gpio *gpio, unsigned offset)
{
struct pinmux_data_reg *dummy;
unsigned long flags;
@@ -356,10 +353,10 @@ static int sh_pfc_map_gpios(struct sh_pfc *pfc, struct sh_pfc_pinctrl *pmx)
unsigned long flags;
int i;
- pmx->nr_pads = pfc->last_gpio - pfc->first_gpio + 1;
+ pmx->nr_pads = pfc->info->last_gpio - pfc->info->first_gpio + 1;
- pmx->pads = kmalloc(sizeof(struct pinctrl_pin_desc) * pmx->nr_pads,
- GFP_KERNEL);
+ pmx->pads = devm_kzalloc(pfc->dev, sizeof(*pmx->pads) * pmx->nr_pads,
+ GFP_KERNEL);
if (unlikely(!pmx->pads)) {
pmx->nr_pads = 0;
return -ENOMEM;
@@ -375,9 +372,9 @@ static int sh_pfc_map_gpios(struct sh_pfc *pfc, struct sh_pfc_pinctrl *pmx)
*/
for (i = 0; i < pmx->nr_pads; i++) {
struct pinctrl_pin_desc *pin = pmx->pads + i;
- struct pinmux_gpio *gpio = pfc->gpios + i;
+ struct pinmux_gpio *gpio = pfc->info->gpios + i;
- pin->number = pfc->first_gpio + i;
+ pin->number = pfc->info->first_gpio + i;
pin->name = gpio->name;
/* XXX */
@@ -400,15 +397,15 @@ static int sh_pfc_map_functions(struct sh_pfc *pfc, struct sh_pfc_pinctrl *pmx)
unsigned long flags;
int i, fn;
- pmx->functions = kzalloc(pmx->nr_functions * sizeof(void *),
- GFP_KERNEL);
+ pmx->functions = devm_kzalloc(pfc->dev, pmx->nr_functions *
+ sizeof(*pmx->functions), GFP_KERNEL);
if (unlikely(!pmx->functions))
return -ENOMEM;
spin_lock_irqsave(&pmx->lock, flags);
for (i = fn = 0; i < pmx->nr_pads; i++) {
- struct pinmux_gpio *gpio = pfc->gpios + i;
+ struct pinmux_gpio *gpio = pfc->info->gpios + i;
if ((gpio->flags & PINMUX_FLAG_TYPE) == PINMUX_TYPE_FUNCTION)
pmx->functions[fn++] = gpio;
@@ -419,109 +416,48 @@ static int sh_pfc_map_functions(struct sh_pfc *pfc, struct sh_pfc_pinctrl *pmx)
return 0;
}
-static int sh_pfc_pinctrl_probe(struct platform_device *pdev)
+int sh_pfc_register_pinctrl(struct sh_pfc *pfc)
{
- struct sh_pfc *pfc;
+ struct sh_pfc_pinctrl *pmx;
int ret;
- if (unlikely(!sh_pfc_pmx))
- return -ENODEV;
+ pmx = devm_kzalloc(pfc->dev, sizeof(*pmx), GFP_KERNEL);
+ if (unlikely(!pmx))
+ return -ENOMEM;
+
+ spin_lock_init(&pmx->lock);
- pfc = sh_pfc_pmx->pfc;
+ pmx->pfc = pfc;
+ pfc->pinctrl = pmx;
- ret = sh_pfc_map_gpios(pfc, sh_pfc_pmx);
+ ret = sh_pfc_map_gpios(pfc, pmx);
if (unlikely(ret != 0))
return ret;
- ret = sh_pfc_map_functions(pfc, sh_pfc_pmx);
+ ret = sh_pfc_map_functions(pfc, pmx);
if (unlikely(ret != 0))
- goto free_pads;
-
- sh_pfc_pmx->pctl = pinctrl_register(&sh_pfc_pinctrl_desc, &pdev->dev,
- sh_pfc_pmx);
- if (IS_ERR(sh_pfc_pmx->pctl)) {
- ret = PTR_ERR(sh_pfc_pmx->pctl);
- goto free_functions;
- }
+ return ret;
- sh_pfc_gpio_range.npins = pfc->last_gpio - pfc->first_gpio + 1;
- sh_pfc_gpio_range.base = pfc->first_gpio;
- sh_pfc_gpio_range.pin_base = pfc->first_gpio;
+ pmx->pctl = pinctrl_register(&sh_pfc_pinctrl_desc, pfc->dev, pmx);
+ if (IS_ERR(pmx->pctl))
+ return PTR_ERR(pmx->pctl);
- pinctrl_add_gpio_range(sh_pfc_pmx->pctl, &sh_pfc_gpio_range);
+ sh_pfc_gpio_range.npins = pfc->info->last_gpio
+ - pfc->info->first_gpio + 1;
+ sh_pfc_gpio_range.base = pfc->info->first_gpio;
+ sh_pfc_gpio_range.pin_base = pfc->info->first_gpio;
- platform_set_drvdata(pdev, sh_pfc_pmx);
+ pinctrl_add_gpio_range(pmx->pctl, &sh_pfc_gpio_range);
return 0;
-
-free_functions:
- kfree(sh_pfc_pmx->functions);
-free_pads:
- kfree(sh_pfc_pmx->pads);
- kfree(sh_pfc_pmx);
-
- return ret;
}
-static int sh_pfc_pinctrl_remove(struct platform_device *pdev)
+int sh_pfc_unregister_pinctrl(struct sh_pfc *pfc)
{
- struct sh_pfc_pinctrl *pmx = platform_get_drvdata(pdev);
+ struct sh_pfc_pinctrl *pmx = pfc->pinctrl;
pinctrl_unregister(pmx->pctl);
- platform_set_drvdata(pdev, NULL);
-
- kfree(sh_pfc_pmx->functions);
- kfree(sh_pfc_pmx->pads);
- kfree(sh_pfc_pmx);
-
+ pfc->pinctrl = NULL;
return 0;
}
-
-static struct platform_driver sh_pfc_pinctrl_driver = {
- .probe = sh_pfc_pinctrl_probe,
- .remove = sh_pfc_pinctrl_remove,
- .driver = {
- .name = DRV_NAME,
- .owner = THIS_MODULE,
- },
-};
-
-static struct platform_device sh_pfc_pinctrl_device = {
- .name = DRV_NAME,
- .id = -1,
-};
-
-static int sh_pfc_pinctrl_init(void)
-{
- int rc;
-
- rc = platform_driver_register(&sh_pfc_pinctrl_driver);
- if (likely(!rc)) {
- rc = platform_device_register(&sh_pfc_pinctrl_device);
- if (unlikely(rc))
- platform_driver_unregister(&sh_pfc_pinctrl_driver);
- }
-
- return rc;
-}
-
-int sh_pfc_register_pinctrl(struct sh_pfc *pfc)
-{
- sh_pfc_pmx = kzalloc(sizeof(struct sh_pfc_pinctrl), GFP_KERNEL);
- if (unlikely(!sh_pfc_pmx))
- return -ENOMEM;
-
- spin_lock_init(&sh_pfc_pmx->lock);
-
- sh_pfc_pmx->pfc = pfc;
-
- return sh_pfc_pinctrl_init();
-}
-EXPORT_SYMBOL_GPL(sh_pfc_register_pinctrl);
-
-static void __exit sh_pfc_pinctrl_exit(void)
-{
- platform_driver_unregister(&sh_pfc_pinctrl_driver);
-}
-module_exit(sh_pfc_pinctrl_exit);
diff --git a/drivers/pinctrl/sh-pfc/sh_pfc.h b/drivers/pinctrl/sh-pfc/sh_pfc.h
new file mode 100644
index 000000000000..13049c4c8d30
--- /dev/null
+++ b/drivers/pinctrl/sh-pfc/sh_pfc.h
@@ -0,0 +1,195 @@
+/*
+ * SuperH Pin Function Controller Support
+ *
+ * Copyright (c) 2008 Magnus Damm
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ */
+
+#ifndef __SH_PFC_H
+#define __SH_PFC_H
+
+#include <linux/stringify.h>
+#include <asm-generic/gpio.h>
+
+typedef unsigned short pinmux_enum_t;
+typedef unsigned short pinmux_flag_t;
+
+enum {
+ PINMUX_TYPE_NONE,
+
+ PINMUX_TYPE_FUNCTION,
+ PINMUX_TYPE_GPIO,
+ PINMUX_TYPE_OUTPUT,
+ PINMUX_TYPE_INPUT,
+ PINMUX_TYPE_INPUT_PULLUP,
+ PINMUX_TYPE_INPUT_PULLDOWN,
+
+ PINMUX_FLAG_TYPE, /* must be last */
+};
+
+#define PINMUX_FLAG_DBIT_SHIFT 5
+#define PINMUX_FLAG_DBIT (0x1f << PINMUX_FLAG_DBIT_SHIFT)
+#define PINMUX_FLAG_DREG_SHIFT 10
+#define PINMUX_FLAG_DREG (0x3f << PINMUX_FLAG_DREG_SHIFT)
+
+struct pinmux_gpio {
+ pinmux_enum_t enum_id;
+ pinmux_flag_t flags;
+ const char *name;
+};
+
+#define PINMUX_GPIO(gpio, data_or_mark) \
+ [gpio] = { .name = __stringify(gpio), .enum_id = data_or_mark, .flags = PINMUX_TYPE_NONE }
+
+#define PINMUX_DATA(data_or_mark, ids...) data_or_mark, ids, 0
+
+struct pinmux_cfg_reg {
+ unsigned long reg, reg_width, field_width;
+ unsigned long *cnt;
+ pinmux_enum_t *enum_ids;
+ unsigned long *var_field_width;
+};
+
+#define PINMUX_CFG_REG(name, r, r_width, f_width) \
+ .reg = r, .reg_width = r_width, .field_width = f_width, \
+ .cnt = (unsigned long [r_width / f_width]) {}, \
+ .enum_ids = (pinmux_enum_t [(r_width / f_width) * (1 << f_width)])
+
+#define PINMUX_CFG_REG_VAR(name, r, r_width, var_fw0, var_fwn...) \
+ .reg = r, .reg_width = r_width, \
+ .cnt = (unsigned long [r_width]) {}, \
+ .var_field_width = (unsigned long [r_width]) { var_fw0, var_fwn, 0 }, \
+ .enum_ids = (pinmux_enum_t [])
+
+struct pinmux_data_reg {
+ unsigned long reg, reg_width, reg_shadow;
+ pinmux_enum_t *enum_ids;
+ void __iomem *mapped_reg;
+};
+
+#define PINMUX_DATA_REG(name, r, r_width) \
+ .reg = r, .reg_width = r_width, \
+ .enum_ids = (pinmux_enum_t [r_width]) \
+
+struct pinmux_irq {
+ int irq;
+ pinmux_enum_t *enum_ids;
+};
+
+#define PINMUX_IRQ(irq_nr, ids...) \
+ { .irq = irq_nr, .enum_ids = (pinmux_enum_t []) { ids, 0 } } \
+
+struct pinmux_range {
+ pinmux_enum_t begin;
+ pinmux_enum_t end;
+ pinmux_enum_t force;
+};
+
+struct sh_pfc_soc_info {
+ char *name;
+ pinmux_enum_t reserved_id;
+ struct pinmux_range data;
+ struct pinmux_range input;
+ struct pinmux_range input_pd;
+ struct pinmux_range input_pu;
+ struct pinmux_range output;
+ struct pinmux_range mark;
+ struct pinmux_range function;
+
+ unsigned first_gpio, last_gpio;
+
+ struct pinmux_gpio *gpios;
+ struct pinmux_cfg_reg *cfg_regs;
+ struct pinmux_data_reg *data_regs;
+
+ pinmux_enum_t *gpio_data;
+ unsigned int gpio_data_size;
+
+ struct pinmux_irq *gpio_irq;
+ unsigned int gpio_irq_size;
+
+ unsigned long unlock_reg;
+};
+
+enum { GPIO_CFG_DRYRUN, GPIO_CFG_REQ, GPIO_CFG_FREE };
+
+/* helper macro for port */
+#define PORT_1(fn, pfx, sfx) fn(pfx, sfx)
+
+#define PORT_10(fn, pfx, sfx) \
+ PORT_1(fn, pfx##0, sfx), PORT_1(fn, pfx##1, sfx), \
+ PORT_1(fn, pfx##2, sfx), PORT_1(fn, pfx##3, sfx), \
+ PORT_1(fn, pfx##4, sfx), PORT_1(fn, pfx##5, sfx), \
+ PORT_1(fn, pfx##6, sfx), PORT_1(fn, pfx##7, sfx), \
+ PORT_1(fn, pfx##8, sfx), PORT_1(fn, pfx##9, sfx)
+
+#define PORT_90(fn, pfx, sfx) \
+ PORT_10(fn, pfx##1, sfx), PORT_10(fn, pfx##2, sfx), \
+ PORT_10(fn, pfx##3, sfx), PORT_10(fn, pfx##4, sfx), \
+ PORT_10(fn, pfx##5, sfx), PORT_10(fn, pfx##6, sfx), \
+ PORT_10(fn, pfx##7, sfx), PORT_10(fn, pfx##8, sfx), \
+ PORT_10(fn, pfx##9, sfx)
+
+#define _PORT_ALL(pfx, sfx) pfx##_##sfx
+#define _GPIO_PORT(pfx, sfx) PINMUX_GPIO(GPIO_PORT##pfx, PORT##pfx##_DATA)
+#define PORT_ALL(str) CPU_ALL_PORT(_PORT_ALL, PORT, str)
+#define GPIO_PORT_ALL() CPU_ALL_PORT(_GPIO_PORT, , unused)
+#define GPIO_FN(str) PINMUX_GPIO(GPIO_FN_##str, str##_MARK)
+
+/* helper macro for pinmux_enum_t */
+#define PORT_DATA_I(nr) \
+ PINMUX_DATA(PORT##nr##_DATA, PORT##nr##_FN0, PORT##nr##_IN)
+
+#define PORT_DATA_I_PD(nr) \
+ PINMUX_DATA(PORT##nr##_DATA, PORT##nr##_FN0, \
+ PORT##nr##_IN, PORT##nr##_IN_PD)
+
+#define PORT_DATA_I_PU(nr) \
+ PINMUX_DATA(PORT##nr##_DATA, PORT##nr##_FN0, \
+ PORT##nr##_IN, PORT##nr##_IN_PU)
+
+#define PORT_DATA_I_PU_PD(nr) \
+ PINMUX_DATA(PORT##nr##_DATA, PORT##nr##_FN0, \
+ PORT##nr##_IN, PORT##nr##_IN_PD, PORT##nr##_IN_PU)
+
+#define PORT_DATA_O(nr) \
+ PINMUX_DATA(PORT##nr##_DATA, PORT##nr##_FN0, PORT##nr##_OUT)
+
+#define PORT_DATA_IO(nr) \
+ PINMUX_DATA(PORT##nr##_DATA, PORT##nr##_FN0, PORT##nr##_OUT, \
+ PORT##nr##_IN)
+
+#define PORT_DATA_IO_PD(nr) \
+ PINMUX_DATA(PORT##nr##_DATA, PORT##nr##_FN0, PORT##nr##_OUT, \
+ PORT##nr##_IN, PORT##nr##_IN_PD)
+
+#define PORT_DATA_IO_PU(nr) \
+ PINMUX_DATA(PORT##nr##_DATA, PORT##nr##_FN0, PORT##nr##_OUT, \
+ PORT##nr##_IN, PORT##nr##_IN_PU)
+
+#define PORT_DATA_IO_PU_PD(nr) \
+ PINMUX_DATA(PORT##nr##_DATA, PORT##nr##_FN0, PORT##nr##_OUT, \
+ PORT##nr##_IN, PORT##nr##_IN_PD, PORT##nr##_IN_PU)
+
+/* helper macro for top 4 bits in PORTnCR */
+#define _PCRH(in, in_pd, in_pu, out) \
+ 0, (out), (in), 0, \
+ 0, 0, 0, 0, \
+ 0, 0, (in_pd), 0, \
+ 0, 0, (in_pu), 0
+
+#define PORTCR(nr, reg) \
+ { \
+ PINMUX_CFG_REG("PORT" nr "CR", reg, 8, 4) { \
+ _PCRH(PORT##nr##_IN, PORT##nr##_IN_PD, \
+ PORT##nr##_IN_PU, PORT##nr##_OUT), \
+ PORT##nr##_FN0, PORT##nr##_FN1, \
+ PORT##nr##_FN2, PORT##nr##_FN3, \
+ PORT##nr##_FN4, PORT##nr##_FN5, \
+ PORT##nr##_FN6, PORT##nr##_FN7 } \
+ }
+
+#endif /* __SH_PFC_H */
diff --git a/drivers/platform/x86/Kconfig b/drivers/platform/x86/Kconfig
index 7ab0b2fba503..3338437b559b 100644
--- a/drivers/platform/x86/Kconfig
+++ b/drivers/platform/x86/Kconfig
@@ -79,6 +79,17 @@ config ASUS_LAPTOP
If you have an ACPI-compatible ASUS laptop, say Y or M here.
+config CHROMEOS_LAPTOP
+ tristate "Chrome OS Laptop"
+ depends on I2C
+ depends on DMI
+ ---help---
+ This driver instantiates i2c and smbus devices such as
+ light sensors and touchpads.
+
+ If you have a supported Chromebook, choose Y or M here.
+ The module will be called chromeos_laptop.
+
config DELL_LAPTOP
tristate "Dell Laptop Extras"
depends on X86
@@ -288,9 +299,11 @@ config IDEAPAD_LAPTOP
depends on ACPI
depends on RFKILL && INPUT
depends on SERIO_I8042
+ depends on BACKLIGHT_CLASS_DEVICE
select INPUT_SPARSEKMAP
help
- This is a driver for the rfkill switches on Lenovo IdeaPad netbooks.
+ This is a driver for Lenovo IdeaPad netbooks contains drivers for
+ rfkill switch, hotkey, fan control and backlight control.
config THINKPAD_ACPI
tristate "ThinkPad ACPI Laptop Extras"
diff --git a/drivers/platform/x86/Makefile b/drivers/platform/x86/Makefile
index bf7e4f935b17..ace2b38942fe 100644
--- a/drivers/platform/x86/Makefile
+++ b/drivers/platform/x86/Makefile
@@ -50,3 +50,4 @@ obj-$(CONFIG_INTEL_MID_POWER_BUTTON) += intel_mid_powerbtn.o
obj-$(CONFIG_INTEL_OAKTRAIL) += intel_oaktrail.o
obj-$(CONFIG_SAMSUNG_Q10) += samsung-q10.o
obj-$(CONFIG_APPLE_GMUX) += apple-gmux.o
+obj-$(CONFIG_CHROMEOS_LAPTOP) += chromeos_laptop.o
diff --git a/drivers/platform/x86/acer-wmi.c b/drivers/platform/x86/acer-wmi.c
index afed7018a2b5..c9076bdaf2c1 100644
--- a/drivers/platform/x86/acer-wmi.c
+++ b/drivers/platform/x86/acer-wmi.c
@@ -511,6 +511,24 @@ static struct dmi_system_id acer_quirks[] = {
},
.driver_data = &quirk_fujitsu_amilo_li_1718,
},
+ {
+ .callback = dmi_matched,
+ .ident = "Lenovo Ideapad S205-10382JG",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "10382JG"),
+ },
+ .driver_data = &quirk_lenovo_ideapad_s205,
+ },
+ {
+ .callback = dmi_matched,
+ .ident = "Lenovo Ideapad S205-1038DPG",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "1038DPG"),
+ },
+ .driver_data = &quirk_lenovo_ideapad_s205,
+ },
{}
};
@@ -1204,6 +1222,9 @@ static acpi_status WMID_set_capabilities(void)
devices = *((u32 *) obj->buffer.pointer);
} else if (obj->type == ACPI_TYPE_INTEGER) {
devices = (u32) obj->integer.value;
+ } else {
+ kfree(out.pointer);
+ return AE_ERROR;
}
} else {
kfree(out.pointer);
diff --git a/drivers/platform/x86/asus-laptop.c b/drivers/platform/x86/asus-laptop.c
index d9f9a0dbc6f3..0eea09c1c134 100644
--- a/drivers/platform/x86/asus-laptop.c
+++ b/drivers/platform/x86/asus-laptop.c
@@ -128,10 +128,12 @@ MODULE_PARM_DESC(als_status, "Set the ALS status on boot "
/*
* Some events we use, same for all Asus
*/
-#define ATKD_BR_UP 0x10 /* (event & ~ATKD_BR_UP) = brightness level */
-#define ATKD_BR_DOWN 0x20 /* (event & ~ATKD_BR_DOWN) = britghness level */
-#define ATKD_BR_MIN ATKD_BR_UP
-#define ATKD_BR_MAX (ATKD_BR_DOWN | 0xF) /* 0x2f */
+#define ATKD_BRNUP_MIN 0x10
+#define ATKD_BRNUP_MAX 0x1f
+#define ATKD_BRNDOWN_MIN 0x20
+#define ATKD_BRNDOWN_MAX 0x2f
+#define ATKD_BRNDOWN 0x20
+#define ATKD_BRNUP 0x2f
#define ATKD_LCD_ON 0x33
#define ATKD_LCD_OFF 0x34
@@ -301,40 +303,65 @@ static const struct key_entry asus_keymap[] = {
{KE_KEY, 0x17, { KEY_ZOOM } },
{KE_KEY, 0x1f, { KEY_BATTERY } },
/* End of Lenovo SL Specific keycodes */
+ {KE_KEY, ATKD_BRNDOWN, { KEY_BRIGHTNESSDOWN } },
+ {KE_KEY, ATKD_BRNUP, { KEY_BRIGHTNESSUP } },
{KE_KEY, 0x30, { KEY_VOLUMEUP } },
{KE_KEY, 0x31, { KEY_VOLUMEDOWN } },
{KE_KEY, 0x32, { KEY_MUTE } },
- {KE_KEY, 0x33, { KEY_SWITCHVIDEOMODE } },
- {KE_KEY, 0x34, { KEY_SWITCHVIDEOMODE } },
+ {KE_KEY, 0x33, { KEY_DISPLAYTOGGLE } }, /* LCD on */
+ {KE_KEY, 0x34, { KEY_DISPLAY_OFF } }, /* LCD off */
{KE_KEY, 0x40, { KEY_PREVIOUSSONG } },
{KE_KEY, 0x41, { KEY_NEXTSONG } },
- {KE_KEY, 0x43, { KEY_STOPCD } },
+ {KE_KEY, 0x43, { KEY_STOPCD } }, /* Stop/Eject */
{KE_KEY, 0x45, { KEY_PLAYPAUSE } },
- {KE_KEY, 0x4c, { KEY_MEDIA } },
+ {KE_KEY, 0x4c, { KEY_MEDIA } }, /* WMP Key */
{KE_KEY, 0x50, { KEY_EMAIL } },
{KE_KEY, 0x51, { KEY_WWW } },
{KE_KEY, 0x55, { KEY_CALC } },
+ {KE_IGNORE, 0x57, }, /* Battery mode */
+ {KE_IGNORE, 0x58, }, /* AC mode */
{KE_KEY, 0x5C, { KEY_SCREENLOCK } }, /* Screenlock */
- {KE_KEY, 0x5D, { KEY_WLAN } },
- {KE_KEY, 0x5E, { KEY_WLAN } },
- {KE_KEY, 0x5F, { KEY_WLAN } },
- {KE_KEY, 0x60, { KEY_SWITCHVIDEOMODE } },
- {KE_KEY, 0x61, { KEY_SWITCHVIDEOMODE } },
- {KE_KEY, 0x62, { KEY_SWITCHVIDEOMODE } },
- {KE_KEY, 0x63, { KEY_SWITCHVIDEOMODE } },
- {KE_KEY, 0x6B, { KEY_F13 } }, /* Lock Touchpad */
+ {KE_KEY, 0x5D, { KEY_WLAN } }, /* WLAN Toggle */
+ {KE_KEY, 0x5E, { KEY_WLAN } }, /* WLAN Enable */
+ {KE_KEY, 0x5F, { KEY_WLAN } }, /* WLAN Disable */
+ {KE_KEY, 0x60, { KEY_TOUCHPAD_ON } },
+ {KE_KEY, 0x61, { KEY_SWITCHVIDEOMODE } }, /* SDSP LCD only */
+ {KE_KEY, 0x62, { KEY_SWITCHVIDEOMODE } }, /* SDSP CRT only */
+ {KE_KEY, 0x63, { KEY_SWITCHVIDEOMODE } }, /* SDSP LCD + CRT */
+ {KE_KEY, 0x64, { KEY_SWITCHVIDEOMODE } }, /* SDSP TV */
+ {KE_KEY, 0x65, { KEY_SWITCHVIDEOMODE } }, /* SDSP LCD + TV */
+ {KE_KEY, 0x66, { KEY_SWITCHVIDEOMODE } }, /* SDSP CRT + TV */
+ {KE_KEY, 0x67, { KEY_SWITCHVIDEOMODE } }, /* SDSP LCD + CRT + TV */
+ {KE_KEY, 0x6B, { KEY_TOUCHPAD_TOGGLE } }, /* Lock Touchpad */
{KE_KEY, 0x6C, { KEY_SLEEP } }, /* Suspend */
{KE_KEY, 0x6D, { KEY_SLEEP } }, /* Hibernate */
- {KE_KEY, 0x7E, { KEY_BLUETOOTH } },
- {KE_KEY, 0x7D, { KEY_BLUETOOTH } },
+ {KE_IGNORE, 0x6E, }, /* Low Battery notification */
+ {KE_KEY, 0x7D, { KEY_BLUETOOTH } }, /* Bluetooth Enable */
+ {KE_KEY, 0x7E, { KEY_BLUETOOTH } }, /* Bluetooth Disable */
{KE_KEY, 0x82, { KEY_CAMERA } },
- {KE_KEY, 0x88, { KEY_WLAN } },
- {KE_KEY, 0x8A, { KEY_PROG1 } },
+ {KE_KEY, 0x88, { KEY_RFKILL } }, /* Radio Toggle Key */
+ {KE_KEY, 0x8A, { KEY_PROG1 } }, /* Color enhancement mode */
+ {KE_KEY, 0x8C, { KEY_SWITCHVIDEOMODE } }, /* SDSP DVI only */
+ {KE_KEY, 0x8D, { KEY_SWITCHVIDEOMODE } }, /* SDSP LCD + DVI */
+ {KE_KEY, 0x8E, { KEY_SWITCHVIDEOMODE } }, /* SDSP CRT + DVI */
+ {KE_KEY, 0x8F, { KEY_SWITCHVIDEOMODE } }, /* SDSP TV + DVI */
+ {KE_KEY, 0x90, { KEY_SWITCHVIDEOMODE } }, /* SDSP LCD + CRT + DVI */
+ {KE_KEY, 0x91, { KEY_SWITCHVIDEOMODE } }, /* SDSP LCD + TV + DVI */
+ {KE_KEY, 0x92, { KEY_SWITCHVIDEOMODE } }, /* SDSP CRT + TV + DVI */
+ {KE_KEY, 0x93, { KEY_SWITCHVIDEOMODE } }, /* SDSP LCD + CRT + TV + DVI */
{KE_KEY, 0x95, { KEY_MEDIA } },
{KE_KEY, 0x99, { KEY_PHONE } },
- {KE_KEY, 0xc4, { KEY_KBDILLUMUP } },
- {KE_KEY, 0xc5, { KEY_KBDILLUMDOWN } },
- {KE_KEY, 0xb5, { KEY_CALC } },
+ {KE_KEY, 0xA0, { KEY_SWITCHVIDEOMODE } }, /* SDSP HDMI only */
+ {KE_KEY, 0xA1, { KEY_SWITCHVIDEOMODE } }, /* SDSP LCD + HDMI */
+ {KE_KEY, 0xA2, { KEY_SWITCHVIDEOMODE } }, /* SDSP CRT + HDMI */
+ {KE_KEY, 0xA3, { KEY_SWITCHVIDEOMODE } }, /* SDSP TV + HDMI */
+ {KE_KEY, 0xA4, { KEY_SWITCHVIDEOMODE } }, /* SDSP LCD + CRT + HDMI */
+ {KE_KEY, 0xA5, { KEY_SWITCHVIDEOMODE } }, /* SDSP LCD + TV + HDMI */
+ {KE_KEY, 0xA6, { KEY_SWITCHVIDEOMODE } }, /* SDSP CRT + TV + HDMI */
+ {KE_KEY, 0xA7, { KEY_SWITCHVIDEOMODE } }, /* SDSP LCD + CRT + TV + HDMI */
+ {KE_KEY, 0xB5, { KEY_CALC } },
+ {KE_KEY, 0xC4, { KEY_KBDILLUMUP } },
+ {KE_KEY, 0xC5, { KEY_KBDILLUMDOWN } },
{KE_END, 0},
};
@@ -1521,15 +1548,19 @@ static void asus_acpi_notify(struct acpi_device *device, u32 event)
dev_name(&asus->device->dev), event,
count);
- /* Brightness events are special */
- if (event >= ATKD_BR_MIN && event <= ATKD_BR_MAX) {
+ if (event >= ATKD_BRNUP_MIN && event <= ATKD_BRNUP_MAX)
+ event = ATKD_BRNUP;
+ else if (event >= ATKD_BRNDOWN_MIN &&
+ event <= ATKD_BRNDOWN_MAX)
+ event = ATKD_BRNDOWN;
- /* Ignore them completely if the acpi video driver is used */
+ /* Brightness events are special */
+ if (event == ATKD_BRNDOWN || event == ATKD_BRNUP) {
if (asus->backlight_device != NULL) {
/* Update the backlight device. */
asus_backlight_notify(asus);
+ return ;
}
- return ;
}
/* Accelerometer "coarse orientation change" event */
diff --git a/drivers/platform/x86/asus-nb-wmi.c b/drivers/platform/x86/asus-nb-wmi.c
index be790402e0f1..210b5b872125 100644
--- a/drivers/platform/x86/asus-nb-wmi.c
+++ b/drivers/platform/x86/asus-nb-wmi.c
@@ -59,6 +59,17 @@ static struct quirk_entry quirk_asus_unknown = {
.wapf = 0,
};
+/*
+ * For those machines that need software to control bt/wifi status
+ * and can't adjust brightness through ACPI interface
+ * and have duplicate events(ACPI and WMI) for display toggle
+ */
+static struct quirk_entry quirk_asus_x55u = {
+ .wapf = 4,
+ .wmi_backlight_power = true,
+ .no_display_toggle = true,
+};
+
static struct quirk_entry quirk_asus_x401u = {
.wapf = 4,
};
@@ -77,6 +88,15 @@ static struct dmi_system_id asus_quirks[] = {
DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
DMI_MATCH(DMI_PRODUCT_NAME, "X401U"),
},
+ .driver_data = &quirk_asus_x55u,
+ },
+ {
+ .callback = dmi_matched,
+ .ident = "ASUSTeK COMPUTER INC. X401A",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
+ DMI_MATCH(DMI_PRODUCT_NAME, "X401A"),
+ },
.driver_data = &quirk_asus_x401u,
},
{
@@ -95,6 +115,15 @@ static struct dmi_system_id asus_quirks[] = {
DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
DMI_MATCH(DMI_PRODUCT_NAME, "X501U"),
},
+ .driver_data = &quirk_asus_x55u,
+ },
+ {
+ .callback = dmi_matched,
+ .ident = "ASUSTeK COMPUTER INC. X501A",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
+ DMI_MATCH(DMI_PRODUCT_NAME, "X501A"),
+ },
.driver_data = &quirk_asus_x401u,
},
{
@@ -131,7 +160,7 @@ static struct dmi_system_id asus_quirks[] = {
DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
DMI_MATCH(DMI_PRODUCT_NAME, "X55U"),
},
- .driver_data = &quirk_asus_x401u,
+ .driver_data = &quirk_asus_x55u,
},
{
.callback = dmi_matched,
@@ -161,6 +190,8 @@ static void asus_nb_wmi_quirks(struct asus_wmi_driver *driver)
}
static const struct key_entry asus_nb_wmi_keymap[] = {
+ { KE_KEY, ASUS_WMI_BRN_DOWN, { KEY_BRIGHTNESSDOWN } },
+ { KE_KEY, ASUS_WMI_BRN_UP, { KEY_BRIGHTNESSUP } },
{ KE_KEY, 0x30, { KEY_VOLUMEUP } },
{ KE_KEY, 0x31, { KEY_VOLUMEDOWN } },
{ KE_KEY, 0x32, { KEY_MUTE } },
@@ -168,9 +199,9 @@ static const struct key_entry asus_nb_wmi_keymap[] = {
{ KE_KEY, 0x34, { KEY_DISPLAY_OFF } }, /* LCD off */
{ KE_KEY, 0x40, { KEY_PREVIOUSSONG } },
{ KE_KEY, 0x41, { KEY_NEXTSONG } },
- { KE_KEY, 0x43, { KEY_STOPCD } },
+ { KE_KEY, 0x43, { KEY_STOPCD } }, /* Stop/Eject */
{ KE_KEY, 0x45, { KEY_PLAYPAUSE } },
- { KE_KEY, 0x4c, { KEY_MEDIA } },
+ { KE_KEY, 0x4c, { KEY_MEDIA } }, /* WMP Key */
{ KE_KEY, 0x50, { KEY_EMAIL } },
{ KE_KEY, 0x51, { KEY_WWW } },
{ KE_KEY, 0x55, { KEY_CALC } },
@@ -180,25 +211,42 @@ static const struct key_entry asus_nb_wmi_keymap[] = {
{ KE_KEY, 0x5D, { KEY_WLAN } }, /* Wireless console Toggle */
{ KE_KEY, 0x5E, { KEY_WLAN } }, /* Wireless console Enable */
{ KE_KEY, 0x5F, { KEY_WLAN } }, /* Wireless console Disable */
- { KE_KEY, 0x60, { KEY_SWITCHVIDEOMODE } },
- { KE_KEY, 0x61, { KEY_SWITCHVIDEOMODE } },
- { KE_KEY, 0x62, { KEY_SWITCHVIDEOMODE } },
- { KE_KEY, 0x63, { KEY_SWITCHVIDEOMODE } },
+ { KE_KEY, 0x60, { KEY_TOUCHPAD_ON } },
+ { KE_KEY, 0x61, { KEY_SWITCHVIDEOMODE } }, /* SDSP LCD only */
+ { KE_KEY, 0x62, { KEY_SWITCHVIDEOMODE } }, /* SDSP CRT only */
+ { KE_KEY, 0x63, { KEY_SWITCHVIDEOMODE } }, /* SDSP LCD + CRT */
+ { KE_KEY, 0x64, { KEY_SWITCHVIDEOMODE } }, /* SDSP TV */
+ { KE_KEY, 0x65, { KEY_SWITCHVIDEOMODE } }, /* SDSP LCD + TV */
+ { KE_KEY, 0x66, { KEY_SWITCHVIDEOMODE } }, /* SDSP CRT + TV */
+ { KE_KEY, 0x67, { KEY_SWITCHVIDEOMODE } }, /* SDSP LCD + CRT + TV */
{ KE_KEY, 0x6B, { KEY_TOUCHPAD_TOGGLE } },
- { KE_KEY, 0x7D, { KEY_BLUETOOTH } },
- { KE_KEY, 0x7E, { KEY_BLUETOOTH } },
+ { KE_IGNORE, 0x6E, }, /* Low Battery notification */
+ { KE_KEY, 0x7D, { KEY_BLUETOOTH } }, /* Bluetooth Enable */
+ { KE_KEY, 0x7E, { KEY_BLUETOOTH } }, /* Bluetooth Disable */
{ KE_KEY, 0x82, { KEY_CAMERA } },
- { KE_KEY, 0x88, { KEY_RFKILL } },
- { KE_KEY, 0x8A, { KEY_PROG1 } },
+ { KE_KEY, 0x88, { KEY_RFKILL } }, /* Radio Toggle Key */
+ { KE_KEY, 0x8A, { KEY_PROG1 } }, /* Color enhancement mode */
+ { KE_KEY, 0x8C, { KEY_SWITCHVIDEOMODE } }, /* SDSP DVI only */
+ { KE_KEY, 0x8D, { KEY_SWITCHVIDEOMODE } }, /* SDSP LCD + DVI */
+ { KE_KEY, 0x8E, { KEY_SWITCHVIDEOMODE } }, /* SDSP CRT + DVI */
+ { KE_KEY, 0x8F, { KEY_SWITCHVIDEOMODE } }, /* SDSP TV + DVI */
+ { KE_KEY, 0x90, { KEY_SWITCHVIDEOMODE } }, /* SDSP LCD + CRT + DVI */
+ { KE_KEY, 0x91, { KEY_SWITCHVIDEOMODE } }, /* SDSP LCD + TV + DVI */
+ { KE_KEY, 0x92, { KEY_SWITCHVIDEOMODE } }, /* SDSP CRT + TV + DVI */
+ { KE_KEY, 0x93, { KEY_SWITCHVIDEOMODE } }, /* SDSP LCD + CRT + TV + DVI */
{ KE_KEY, 0x95, { KEY_MEDIA } },
{ KE_KEY, 0x99, { KEY_PHONE } },
{ KE_KEY, 0xA0, { KEY_SWITCHVIDEOMODE } }, /* SDSP HDMI only */
{ KE_KEY, 0xA1, { KEY_SWITCHVIDEOMODE } }, /* SDSP LCD + HDMI */
{ KE_KEY, 0xA2, { KEY_SWITCHVIDEOMODE } }, /* SDSP CRT + HDMI */
{ KE_KEY, 0xA3, { KEY_SWITCHVIDEOMODE } }, /* SDSP TV + HDMI */
- { KE_KEY, 0xb5, { KEY_CALC } },
- { KE_KEY, 0xc4, { KEY_KBDILLUMUP } },
- { KE_KEY, 0xc5, { KEY_KBDILLUMDOWN } },
+ { KE_KEY, 0xA4, { KEY_SWITCHVIDEOMODE } }, /* SDSP LCD + CRT + HDMI */
+ { KE_KEY, 0xA5, { KEY_SWITCHVIDEOMODE } }, /* SDSP LCD + TV + HDMI */
+ { KE_KEY, 0xA6, { KEY_SWITCHVIDEOMODE } }, /* SDSP CRT + TV + HDMI */
+ { KE_KEY, 0xA7, { KEY_SWITCHVIDEOMODE } }, /* SDSP LCD + CRT + TV + HDMI */
+ { KE_KEY, 0xB5, { KEY_CALC } },
+ { KE_KEY, 0xC4, { KEY_KBDILLUMUP } },
+ { KE_KEY, 0xC5, { KEY_KBDILLUMDOWN } },
{ KE_END, 0},
};
diff --git a/drivers/platform/x86/asus-wmi.c b/drivers/platform/x86/asus-wmi.c
index f80ae4d10f68..c11b2426dac1 100644
--- a/drivers/platform/x86/asus-wmi.c
+++ b/drivers/platform/x86/asus-wmi.c
@@ -187,6 +187,8 @@ struct asus_wmi {
struct device *hwmon_device;
struct platform_device *platform_device;
+ struct led_classdev wlan_led;
+ int wlan_led_wk;
struct led_classdev tpd_led;
int tpd_led_wk;
struct led_classdev kbd_led;
@@ -194,6 +196,7 @@ struct asus_wmi {
struct workqueue_struct *led_workqueue;
struct work_struct tpd_led_work;
struct work_struct kbd_led_work;
+ struct work_struct wlan_led_work;
struct asus_rfkill wlan;
struct asus_rfkill bluetooth;
@@ -456,12 +459,65 @@ static enum led_brightness kbd_led_get(struct led_classdev *led_cdev)
return value;
}
+static int wlan_led_unknown_state(struct asus_wmi *asus)
+{
+ u32 result;
+
+ asus_wmi_get_devstate(asus, ASUS_WMI_DEVID_WIRELESS_LED, &result);
+
+ return result & ASUS_WMI_DSTS_UNKNOWN_BIT;
+}
+
+static int wlan_led_presence(struct asus_wmi *asus)
+{
+ u32 result;
+
+ asus_wmi_get_devstate(asus, ASUS_WMI_DEVID_WIRELESS_LED, &result);
+
+ return result & ASUS_WMI_DSTS_PRESENCE_BIT;
+}
+
+static void wlan_led_update(struct work_struct *work)
+{
+ int ctrl_param;
+ struct asus_wmi *asus;
+
+ asus = container_of(work, struct asus_wmi, wlan_led_work);
+
+ ctrl_param = asus->wlan_led_wk;
+ asus_wmi_set_devstate(ASUS_WMI_DEVID_WIRELESS_LED, ctrl_param, NULL);
+}
+
+static void wlan_led_set(struct led_classdev *led_cdev,
+ enum led_brightness value)
+{
+ struct asus_wmi *asus;
+
+ asus = container_of(led_cdev, struct asus_wmi, wlan_led);
+
+ asus->wlan_led_wk = !!value;
+ queue_work(asus->led_workqueue, &asus->wlan_led_work);
+}
+
+static enum led_brightness wlan_led_get(struct led_classdev *led_cdev)
+{
+ struct asus_wmi *asus;
+ u32 result;
+
+ asus = container_of(led_cdev, struct asus_wmi, wlan_led);
+ asus_wmi_get_devstate(asus, ASUS_WMI_DEVID_WIRELESS_LED, &result);
+
+ return result & ASUS_WMI_DSTS_BRIGHTNESS_MASK;
+}
+
static void asus_wmi_led_exit(struct asus_wmi *asus)
{
if (!IS_ERR_OR_NULL(asus->kbd_led.dev))
led_classdev_unregister(&asus->kbd_led);
if (!IS_ERR_OR_NULL(asus->tpd_led.dev))
led_classdev_unregister(&asus->tpd_led);
+ if (!IS_ERR_OR_NULL(asus->wlan_led.dev))
+ led_classdev_unregister(&asus->wlan_led);
if (asus->led_workqueue)
destroy_workqueue(asus->led_workqueue);
}
@@ -498,6 +554,23 @@ static int asus_wmi_led_init(struct asus_wmi *asus)
rv = led_classdev_register(&asus->platform_device->dev,
&asus->kbd_led);
+ if (rv)
+ goto error;
+ }
+
+ if (wlan_led_presence(asus)) {
+ INIT_WORK(&asus->wlan_led_work, wlan_led_update);
+
+ asus->wlan_led.name = "asus::wlan";
+ asus->wlan_led.brightness_set = wlan_led_set;
+ if (!wlan_led_unknown_state(asus))
+ asus->wlan_led.brightness_get = wlan_led_get;
+ asus->wlan_led.flags = LED_CORE_SUSPENDRESUME;
+ asus->wlan_led.max_brightness = 1;
+ asus->wlan_led.default_trigger = "asus-wlan";
+
+ rv = led_classdev_register(&asus->platform_device->dev,
+ &asus->wlan_led);
}
error:
@@ -813,6 +886,9 @@ static int asus_new_rfkill(struct asus_wmi *asus,
if (!*rfkill)
return -EINVAL;
+ if (dev_id == ASUS_WMI_DEVID_WLAN)
+ rfkill_set_led_trigger_name(*rfkill, "asus-wlan");
+
rfkill_init_sw_state(*rfkill, !result);
result = rfkill_register(*rfkill);
if (result) {
@@ -1265,6 +1341,18 @@ static void asus_wmi_backlight_exit(struct asus_wmi *asus)
asus->backlight_device = NULL;
}
+static int is_display_toggle(int code)
+{
+ /* display toggle keys */
+ if ((code >= 0x61 && code <= 0x67) ||
+ (code >= 0x8c && code <= 0x93) ||
+ (code >= 0xa0 && code <= 0xa7) ||
+ (code >= 0xd0 && code <= 0xd5))
+ return 1;
+
+ return 0;
+}
+
static void asus_wmi_notify(u32 value, void *context)
{
struct asus_wmi *asus = context;
@@ -1298,16 +1386,24 @@ static void asus_wmi_notify(u32 value, void *context)
}
if (code >= NOTIFY_BRNUP_MIN && code <= NOTIFY_BRNUP_MAX)
- code = NOTIFY_BRNUP_MIN;
+ code = ASUS_WMI_BRN_UP;
else if (code >= NOTIFY_BRNDOWN_MIN &&
code <= NOTIFY_BRNDOWN_MAX)
- code = NOTIFY_BRNDOWN_MIN;
+ code = ASUS_WMI_BRN_DOWN;
- if (code == NOTIFY_BRNUP_MIN || code == NOTIFY_BRNDOWN_MIN) {
- if (!acpi_video_backlight_support())
+ if (code == ASUS_WMI_BRN_DOWN || code == ASUS_WMI_BRN_UP) {
+ if (!acpi_video_backlight_support()) {
asus_wmi_backlight_notify(asus, orig_code);
- } else if (!sparse_keymap_report_event(asus->inputdev, code,
- key_value, autorelease))
+ goto exit;
+ }
+ }
+
+ if (is_display_toggle(code) &&
+ asus->driver->quirks->no_display_toggle)
+ goto exit;
+
+ if (!sparse_keymap_report_event(asus->inputdev, code,
+ key_value, autorelease))
pr_info("Unknown key %x pressed\n", code);
exit:
diff --git a/drivers/platform/x86/asus-wmi.h b/drivers/platform/x86/asus-wmi.h
index 4c9bd38bb0a2..4da4c8bafe70 100644
--- a/drivers/platform/x86/asus-wmi.h
+++ b/drivers/platform/x86/asus-wmi.h
@@ -30,6 +30,8 @@
#include <linux/platform_device.h>
#define ASUS_WMI_KEY_IGNORE (-1)
+#define ASUS_WMI_BRN_DOWN 0x20
+#define ASUS_WMI_BRN_UP 0x2f
struct module;
struct key_entry;
@@ -41,6 +43,13 @@ struct quirk_entry {
bool store_backlight_power;
bool wmi_backlight_power;
int wapf;
+ /*
+ * For machines with AMD graphic chips, it will send out WMI event
+ * and ACPI interrupt at the same time while hitting the hotkey.
+ * To simplify the problem, we just have to ignore the WMI event,
+ * and let the ACPI interrupt to send out the key event.
+ */
+ int no_display_toggle;
};
struct asus_wmi_driver {
diff --git a/drivers/platform/x86/chromeos_laptop.c b/drivers/platform/x86/chromeos_laptop.c
new file mode 100644
index 000000000000..93d66809355a
--- /dev/null
+++ b/drivers/platform/x86/chromeos_laptop.c
@@ -0,0 +1,371 @@
+/*
+ * chromeos_laptop.c - Driver to instantiate Chromebook i2c/smbus devices.
+ *
+ * Author : Benson Leung <bleung@chromium.org>
+ *
+ * Copyright (C) 2012 Google, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ *
+ */
+
+#include <linux/dmi.h>
+#include <linux/i2c.h>
+#include <linux/module.h>
+
+#define ATMEL_TP_I2C_ADDR 0x4b
+#define ATMEL_TP_I2C_BL_ADDR 0x25
+#define ATMEL_TS_I2C_ADDR 0x4a
+#define ATMEL_TS_I2C_BL_ADDR 0x26
+#define CYAPA_TP_I2C_ADDR 0x67
+#define ISL_ALS_I2C_ADDR 0x44
+#define TAOS_ALS_I2C_ADDR 0x29
+
+static struct i2c_client *als;
+static struct i2c_client *tp;
+static struct i2c_client *ts;
+
+const char *i2c_adapter_names[] = {
+ "SMBus I801 adapter",
+ "i915 gmbus vga",
+ "i915 gmbus panel",
+};
+
+/* Keep this enum consistent with i2c_adapter_names */
+enum i2c_adapter_type {
+ I2C_ADAPTER_SMBUS = 0,
+ I2C_ADAPTER_VGADDC,
+ I2C_ADAPTER_PANEL,
+};
+
+static struct i2c_board_info __initdata cyapa_device = {
+ I2C_BOARD_INFO("cyapa", CYAPA_TP_I2C_ADDR),
+ .flags = I2C_CLIENT_WAKE,
+};
+
+static struct i2c_board_info __initdata isl_als_device = {
+ I2C_BOARD_INFO("isl29018", ISL_ALS_I2C_ADDR),
+};
+
+static struct i2c_board_info __initdata tsl2583_als_device = {
+ I2C_BOARD_INFO("tsl2583", TAOS_ALS_I2C_ADDR),
+};
+
+static struct i2c_board_info __initdata tsl2563_als_device = {
+ I2C_BOARD_INFO("tsl2563", TAOS_ALS_I2C_ADDR),
+};
+
+static struct i2c_board_info __initdata atmel_224s_tp_device = {
+ I2C_BOARD_INFO("atmel_mxt_tp", ATMEL_TP_I2C_ADDR),
+ .platform_data = NULL,
+ .flags = I2C_CLIENT_WAKE,
+};
+
+static struct i2c_board_info __initdata atmel_1664s_device = {
+ I2C_BOARD_INFO("atmel_mxt_ts", ATMEL_TS_I2C_ADDR),
+ .platform_data = NULL,
+ .flags = I2C_CLIENT_WAKE,
+};
+
+static struct i2c_client __init *__add_probed_i2c_device(
+ const char *name,
+ int bus,
+ struct i2c_board_info *info,
+ const unsigned short *addrs)
+{
+ const struct dmi_device *dmi_dev;
+ const struct dmi_dev_onboard *dev_data;
+ struct i2c_adapter *adapter;
+ struct i2c_client *client;
+
+ if (bus < 0)
+ return NULL;
+ /*
+ * If a name is specified, look for irq platform information stashed
+ * in DMI_DEV_TYPE_DEV_ONBOARD by the Chrome OS custom system firmware.
+ */
+ if (name) {
+ dmi_dev = dmi_find_device(DMI_DEV_TYPE_DEV_ONBOARD, name, NULL);
+ if (!dmi_dev) {
+ pr_err("%s failed to dmi find device %s.\n",
+ __func__,
+ name);
+ return NULL;
+ }
+ dev_data = (struct dmi_dev_onboard *)dmi_dev->device_data;
+ if (!dev_data) {
+ pr_err("%s failed to get data from dmi for %s.\n",
+ __func__, name);
+ return NULL;
+ }
+ info->irq = dev_data->instance;
+ }
+
+ adapter = i2c_get_adapter(bus);
+ if (!adapter) {
+ pr_err("%s failed to get i2c adapter %d.\n", __func__, bus);
+ return NULL;
+ }
+
+ /* add the i2c device */
+ client = i2c_new_probed_device(adapter, info, addrs, NULL);
+ if (!client)
+ pr_err("%s failed to register device %d-%02x\n",
+ __func__, bus, info->addr);
+ else
+ pr_debug("%s added i2c device %d-%02x\n",
+ __func__, bus, info->addr);
+
+ i2c_put_adapter(adapter);
+ return client;
+}
+
+static int __init __find_i2c_adap(struct device *dev, void *data)
+{
+ const char *name = data;
+ static const char *prefix = "i2c-";
+ struct i2c_adapter *adapter;
+ if (strncmp(dev_name(dev), prefix, strlen(prefix)) != 0)
+ return 0;
+ adapter = to_i2c_adapter(dev);
+ return (strncmp(adapter->name, name, strlen(name)) == 0);
+}
+
+static int __init find_i2c_adapter_num(enum i2c_adapter_type type)
+{
+ struct device *dev = NULL;
+ struct i2c_adapter *adapter;
+ const char *name = i2c_adapter_names[type];
+ /* find the adapter by name */
+ dev = bus_find_device(&i2c_bus_type, NULL, (void *)name,
+ __find_i2c_adap);
+ if (!dev) {
+ pr_err("%s: i2c adapter %s not found on system.\n", __func__,
+ name);
+ return -ENODEV;
+ }
+ adapter = to_i2c_adapter(dev);
+ return adapter->nr;
+}
+
+/*
+ * Takes a list of addresses in addrs as such :
+ * { addr1, ... , addrn, I2C_CLIENT_END };
+ * add_probed_i2c_device will use i2c_new_probed_device
+ * and probe for devices at all of the addresses listed.
+ * Returns NULL if no devices found.
+ * See Documentation/i2c/instantiating-devices for more information.
+ */
+static __init struct i2c_client *add_probed_i2c_device(
+ const char *name,
+ enum i2c_adapter_type type,
+ struct i2c_board_info *info,
+ const unsigned short *addrs)
+{
+ return __add_probed_i2c_device(name,
+ find_i2c_adapter_num(type),
+ info,
+ addrs);
+}
+
+/*
+ * Probes for a device at a single address, the one provided by
+ * info->addr.
+ * Returns NULL if no device found.
+ */
+static __init struct i2c_client *add_i2c_device(const char *name,
+ enum i2c_adapter_type type,
+ struct i2c_board_info *info)
+{
+ const unsigned short addr_list[] = { info->addr, I2C_CLIENT_END };
+ return __add_probed_i2c_device(name,
+ find_i2c_adapter_num(type),
+ info,
+ addr_list);
+}
+
+
+static struct i2c_client __init *add_smbus_device(const char *name,
+ struct i2c_board_info *info)
+{
+ return add_i2c_device(name, I2C_ADAPTER_SMBUS, info);
+}
+
+static int __init setup_cyapa_smbus_tp(const struct dmi_system_id *id)
+{
+ /* add cyapa touchpad on smbus */
+ tp = add_smbus_device("trackpad", &cyapa_device);
+ return 0;
+}
+
+static int __init setup_atmel_224s_tp(const struct dmi_system_id *id)
+{
+ const unsigned short addr_list[] = { ATMEL_TP_I2C_BL_ADDR,
+ ATMEL_TP_I2C_ADDR,
+ I2C_CLIENT_END };
+
+ /* add atmel mxt touchpad on VGA DDC GMBus */
+ tp = add_probed_i2c_device("trackpad", I2C_ADAPTER_VGADDC,
+ &atmel_224s_tp_device, addr_list);
+ return 0;
+}
+
+static int __init setup_atmel_1664s_ts(const struct dmi_system_id *id)
+{
+ const unsigned short addr_list[] = { ATMEL_TS_I2C_BL_ADDR,
+ ATMEL_TS_I2C_ADDR,
+ I2C_CLIENT_END };
+
+ /* add atmel mxt touch device on PANEL GMBus */
+ ts = add_probed_i2c_device("touchscreen", I2C_ADAPTER_PANEL,
+ &atmel_1664s_device, addr_list);
+ return 0;
+}
+
+
+static int __init setup_isl29018_als(const struct dmi_system_id *id)
+{
+ /* add isl29018 light sensor */
+ als = add_smbus_device("lightsensor", &isl_als_device);
+ return 0;
+}
+
+static int __init setup_isl29023_als(const struct dmi_system_id *id)
+{
+ /* add isl29023 light sensor on Panel GMBus */
+ als = add_i2c_device("lightsensor", I2C_ADAPTER_PANEL,
+ &isl_als_device);
+ return 0;
+}
+
+static int __init setup_tsl2583_als(const struct dmi_system_id *id)
+{
+ /* add tsl2583 light sensor on smbus */
+ als = add_smbus_device(NULL, &tsl2583_als_device);
+ return 0;
+}
+
+static int __init setup_tsl2563_als(const struct dmi_system_id *id)
+{
+ /* add tsl2563 light sensor on smbus */
+ als = add_smbus_device(NULL, &tsl2563_als_device);
+ return 0;
+}
+
+static struct dmi_system_id __initdata chromeos_laptop_dmi_table[] = {
+ {
+ .ident = "Samsung Series 5 550 - Touchpad",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "SAMSUNG"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "Lumpy"),
+ },
+ .callback = setup_cyapa_smbus_tp,
+ },
+ {
+ .ident = "Chromebook Pixel - Touchscreen",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "GOOGLE"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "Link"),
+ },
+ .callback = setup_atmel_1664s_ts,
+ },
+ {
+ .ident = "Chromebook Pixel - Touchpad",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "GOOGLE"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "Link"),
+ },
+ .callback = setup_atmel_224s_tp,
+ },
+ {
+ .ident = "Samsung Series 5 550 - Light Sensor",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "SAMSUNG"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "Lumpy"),
+ },
+ .callback = setup_isl29018_als,
+ },
+ {
+ .ident = "Chromebook Pixel - Light Sensor",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "GOOGLE"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "Link"),
+ },
+ .callback = setup_isl29023_als,
+ },
+ {
+ .ident = "Acer C7 Chromebook - Touchpad",
+ .matches = {
+ DMI_MATCH(DMI_PRODUCT_NAME, "Parrot"),
+ },
+ .callback = setup_cyapa_smbus_tp,
+ },
+ {
+ .ident = "HP Pavilion 14 Chromebook - Touchpad",
+ .matches = {
+ DMI_MATCH(DMI_PRODUCT_NAME, "Butterfly"),
+ },
+ .callback = setup_cyapa_smbus_tp,
+ },
+ {
+ .ident = "Samsung Series 5 - Light Sensor",
+ .matches = {
+ DMI_MATCH(DMI_PRODUCT_NAME, "Alex"),
+ },
+ .callback = setup_tsl2583_als,
+ },
+ {
+ .ident = "Cr-48 - Light Sensor",
+ .matches = {
+ DMI_MATCH(DMI_PRODUCT_NAME, "Mario"),
+ },
+ .callback = setup_tsl2563_als,
+ },
+ {
+ .ident = "Acer AC700 - Light Sensor",
+ .matches = {
+ DMI_MATCH(DMI_PRODUCT_NAME, "ZGB"),
+ },
+ .callback = setup_tsl2563_als,
+ },
+ { }
+};
+MODULE_DEVICE_TABLE(dmi, chromeos_laptop_dmi_table);
+
+static int __init chromeos_laptop_init(void)
+{
+ if (!dmi_check_system(chromeos_laptop_dmi_table)) {
+ pr_debug("%s unsupported system.\n", __func__);
+ return -ENODEV;
+ }
+ return 0;
+}
+
+static void __exit chromeos_laptop_exit(void)
+{
+ if (als)
+ i2c_unregister_device(als);
+ if (tp)
+ i2c_unregister_device(tp);
+ if (ts)
+ i2c_unregister_device(ts);
+}
+
+module_init(chromeos_laptop_init);
+module_exit(chromeos_laptop_exit);
+
+MODULE_DESCRIPTION("Chrome OS Laptop driver");
+MODULE_AUTHOR("Benson Leung <bleung@chromium.org>");
+MODULE_LICENSE("GPL");
diff --git a/drivers/platform/x86/eeepc-wmi.c b/drivers/platform/x86/eeepc-wmi.c
index 60cb76a5b513..af67e6e56ebb 100644
--- a/drivers/platform/x86/eeepc-wmi.c
+++ b/drivers/platform/x86/eeepc-wmi.c
@@ -63,6 +63,8 @@ MODULE_PARM_DESC(hotplug_wireless,
#define HOME_RELEASE 0xe5
static const struct key_entry eeepc_wmi_keymap[] = {
+ { KE_KEY, ASUS_WMI_BRN_DOWN, { KEY_BRIGHTNESSDOWN } },
+ { KE_KEY, ASUS_WMI_BRN_UP, { KEY_BRIGHTNESSUP } },
/* Sleep already handled via generic ACPI code */
{ KE_KEY, 0x30, { KEY_VOLUMEUP } },
{ KE_KEY, 0x31, { KEY_VOLUMEDOWN } },
diff --git a/drivers/platform/x86/hp-wmi.c b/drivers/platform/x86/hp-wmi.c
index 1dde7accf27c..45cacf79f3a7 100644
--- a/drivers/platform/x86/hp-wmi.c
+++ b/drivers/platform/x86/hp-wmi.c
@@ -60,6 +60,7 @@ enum hp_wmi_radio {
HPWMI_WIFI = 0,
HPWMI_BLUETOOTH = 1,
HPWMI_WWAN = 2,
+ HPWMI_GPS = 3,
};
enum hp_wmi_event_ids {
@@ -72,10 +73,6 @@ enum hp_wmi_event_ids {
HPWMI_LOCK_SWITCH = 7,
};
-static int hp_wmi_bios_setup(struct platform_device *device);
-static int __exit hp_wmi_bios_remove(struct platform_device *device);
-static int hp_wmi_resume_handler(struct device *device);
-
struct bios_args {
u32 signature;
u32 command;
@@ -137,6 +134,7 @@ static const struct key_entry hp_wmi_keymap[] = {
{ KE_KEY, 0x2142, { KEY_MEDIA } },
{ KE_KEY, 0x213b, { KEY_INFO } },
{ KE_KEY, 0x2169, { KEY_DIRECTION } },
+ { KE_KEY, 0x216a, { KEY_SETUP } },
{ KE_KEY, 0x231b, { KEY_HELP } },
{ KE_END, 0 }
};
@@ -147,6 +145,7 @@ static struct platform_device *hp_wmi_platform_dev;
static struct rfkill *wifi_rfkill;
static struct rfkill *bluetooth_rfkill;
static struct rfkill *wwan_rfkill;
+static struct rfkill *gps_rfkill;
struct rfkill2_device {
u8 id;
@@ -157,21 +156,6 @@ struct rfkill2_device {
static int rfkill2_count;
static struct rfkill2_device rfkill2[HPWMI_MAX_RFKILL2_DEVICES];
-static const struct dev_pm_ops hp_wmi_pm_ops = {
- .resume = hp_wmi_resume_handler,
- .restore = hp_wmi_resume_handler,
-};
-
-static struct platform_driver hp_wmi_driver = {
- .driver = {
- .name = "hp-wmi",
- .owner = THIS_MODULE,
- .pm = &hp_wmi_pm_ops,
- },
- .probe = hp_wmi_bios_setup,
- .remove = hp_wmi_bios_remove,
-};
-
/*
* hp_wmi_perform_query
*
@@ -543,6 +527,10 @@ static void hp_wmi_notify(u32 value, void *context)
rfkill_set_states(wwan_rfkill,
hp_wmi_get_sw_state(HPWMI_WWAN),
hp_wmi_get_hw_state(HPWMI_WWAN));
+ if (gps_rfkill)
+ rfkill_set_states(gps_rfkill,
+ hp_wmi_get_sw_state(HPWMI_GPS),
+ hp_wmi_get_hw_state(HPWMI_GPS));
break;
case HPWMI_CPU_BATTERY_THROTTLE:
pr_info("Unimplemented CPU throttle because of 3 Cell battery event detected\n");
@@ -670,7 +658,7 @@ static int hp_wmi_rfkill_setup(struct platform_device *device)
(void *) HPWMI_WWAN);
if (!wwan_rfkill) {
err = -ENOMEM;
- goto register_bluetooth_error;
+ goto register_gps_error;
}
rfkill_init_sw_state(wwan_rfkill,
hp_wmi_get_sw_state(HPWMI_WWAN));
@@ -681,10 +669,33 @@ static int hp_wmi_rfkill_setup(struct platform_device *device)
goto register_wwan_err;
}
+ if (wireless & 0x8) {
+ gps_rfkill = rfkill_alloc("hp-gps", &device->dev,
+ RFKILL_TYPE_GPS,
+ &hp_wmi_rfkill_ops,
+ (void *) HPWMI_GPS);
+ if (!gps_rfkill) {
+ err = -ENOMEM;
+ goto register_bluetooth_error;
+ }
+ rfkill_init_sw_state(gps_rfkill,
+ hp_wmi_get_sw_state(HPWMI_GPS));
+ rfkill_set_hw_state(bluetooth_rfkill,
+ hp_wmi_get_hw_state(HPWMI_GPS));
+ err = rfkill_register(gps_rfkill);
+ if (err)
+ goto register_gps_error;
+ }
+
return 0;
register_wwan_err:
rfkill_destroy(wwan_rfkill);
wwan_rfkill = NULL;
+ if (gps_rfkill)
+ rfkill_unregister(gps_rfkill);
+register_gps_error:
+ rfkill_destroy(gps_rfkill);
+ gps_rfkill = NULL;
if (bluetooth_rfkill)
rfkill_unregister(bluetooth_rfkill);
register_bluetooth_error:
@@ -729,6 +740,10 @@ static int hp_wmi_rfkill2_setup(struct platform_device *device)
type = RFKILL_TYPE_WWAN;
name = "hp-wwan";
break;
+ case HPWMI_GPS:
+ type = RFKILL_TYPE_GPS;
+ name = "hp-gps";
+ break;
default:
pr_warn("unknown device type 0x%x\n",
state.device[i].radio_type);
@@ -778,7 +793,7 @@ fail:
return err;
}
-static int hp_wmi_bios_setup(struct platform_device *device)
+static int __init hp_wmi_bios_setup(struct platform_device *device)
{
int err;
@@ -786,6 +801,7 @@ static int hp_wmi_bios_setup(struct platform_device *device)
wifi_rfkill = NULL;
bluetooth_rfkill = NULL;
wwan_rfkill = NULL;
+ gps_rfkill = NULL;
rfkill2_count = 0;
if (hp_wmi_rfkill_setup(device))
@@ -835,6 +851,10 @@ static int __exit hp_wmi_bios_remove(struct platform_device *device)
rfkill_unregister(wwan_rfkill);
rfkill_destroy(wwan_rfkill);
}
+ if (gps_rfkill) {
+ rfkill_unregister(gps_rfkill);
+ rfkill_destroy(gps_rfkill);
+ }
return 0;
}
@@ -870,51 +890,70 @@ static int hp_wmi_resume_handler(struct device *device)
rfkill_set_states(wwan_rfkill,
hp_wmi_get_sw_state(HPWMI_WWAN),
hp_wmi_get_hw_state(HPWMI_WWAN));
+ if (gps_rfkill)
+ rfkill_set_states(gps_rfkill,
+ hp_wmi_get_sw_state(HPWMI_GPS),
+ hp_wmi_get_hw_state(HPWMI_GPS));
return 0;
}
+static const struct dev_pm_ops hp_wmi_pm_ops = {
+ .resume = hp_wmi_resume_handler,
+ .restore = hp_wmi_resume_handler,
+};
+
+static struct platform_driver hp_wmi_driver = {
+ .driver = {
+ .name = "hp-wmi",
+ .owner = THIS_MODULE,
+ .pm = &hp_wmi_pm_ops,
+ },
+ .remove = __exit_p(hp_wmi_bios_remove),
+};
+
static int __init hp_wmi_init(void)
{
int err;
int event_capable = wmi_has_guid(HPWMI_EVENT_GUID);
int bios_capable = wmi_has_guid(HPWMI_BIOS_GUID);
+ if (!bios_capable && !event_capable)
+ return -ENODEV;
+
if (event_capable) {
err = hp_wmi_input_setup();
if (err)
return err;
+
+ //Enable magic for hotkeys that run on the SMBus
+ ec_write(0xe6,0x6e);
}
if (bios_capable) {
- err = platform_driver_register(&hp_wmi_driver);
- if (err)
- goto err_driver_reg;
- hp_wmi_platform_dev = platform_device_alloc("hp-wmi", -1);
- if (!hp_wmi_platform_dev) {
- err = -ENOMEM;
- goto err_device_alloc;
+ hp_wmi_platform_dev =
+ platform_device_register_simple("hp-wmi", -1, NULL, 0);
+ if (IS_ERR(hp_wmi_platform_dev)) {
+ err = PTR_ERR(hp_wmi_platform_dev);
+ goto err_destroy_input;
}
- err = platform_device_add(hp_wmi_platform_dev);
+
+ err = platform_driver_probe(&hp_wmi_driver, hp_wmi_bios_setup);
if (err)
- goto err_device_add;
+ goto err_unregister_device;
}
- if (!bios_capable && !event_capable)
- return -ENODEV;
-
return 0;
-err_device_add:
- platform_device_put(hp_wmi_platform_dev);
-err_device_alloc:
- platform_driver_unregister(&hp_wmi_driver);
-err_driver_reg:
+err_unregister_device:
+ platform_device_unregister(hp_wmi_platform_dev);
+err_destroy_input:
if (event_capable)
hp_wmi_input_destroy();
return err;
}
+module_init(hp_wmi_init);
static void __exit hp_wmi_exit(void)
{
@@ -926,6 +965,4 @@ static void __exit hp_wmi_exit(void)
platform_driver_unregister(&hp_wmi_driver);
}
}
-
-module_init(hp_wmi_init);
module_exit(hp_wmi_exit);
diff --git a/drivers/platform/x86/msi-laptop.c b/drivers/platform/x86/msi-laptop.c
index 2111dbb7e1e3..6b2293875672 100644
--- a/drivers/platform/x86/msi-laptop.c
+++ b/drivers/platform/x86/msi-laptop.c
@@ -82,8 +82,19 @@
#define MSI_STANDARD_EC_SCM_LOAD_ADDRESS 0x2d
#define MSI_STANDARD_EC_SCM_LOAD_MASK (1 << 0)
-#define MSI_STANDARD_EC_TOUCHPAD_ADDRESS 0xe4
+#define MSI_STANDARD_EC_FUNCTIONS_ADDRESS 0xe4
+/* Power LED is orange - Turbo mode */
+#define MSI_STANDARD_EC_TURBO_MASK (1 << 1)
+/* Power LED is green - ECO mode */
+#define MSI_STANDARD_EC_ECO_MASK (1 << 3)
+/* Touchpad is turned on */
#define MSI_STANDARD_EC_TOUCHPAD_MASK (1 << 4)
+/* If this bit != bit 1, turbo mode can't be toggled */
+#define MSI_STANDARD_EC_TURBO_COOLDOWN_MASK (1 << 7)
+
+#define MSI_STANDARD_EC_FAN_ADDRESS 0x33
+/* If zero, fan rotates at maximal speed */
+#define MSI_STANDARD_EC_AUTOFAN_MASK (1 << 0)
#ifdef CONFIG_PM_SLEEP
static int msi_laptop_resume(struct device *device);
@@ -108,23 +119,38 @@ static const struct key_entry msi_laptop_keymap[] = {
static struct input_dev *msi_laptop_input_dev;
-static bool old_ec_model;
static int wlan_s, bluetooth_s, threeg_s;
static int threeg_exists;
-
-/* Some MSI 3G netbook only have one fn key to control Wlan/Bluetooth/3G,
- * those netbook will load the SCM (windows app) to disable the original
- * Wlan/Bluetooth control by BIOS when user press fn key, then control
- * Wlan/Bluetooth/3G by SCM (software control by OS). Without SCM, user
- * cann't on/off 3G module on those 3G netbook.
- * On Linux, msi-laptop driver will do the same thing to disable the
- * original BIOS control, then might need use HAL or other userland
- * application to do the software control that simulate with SCM.
- * e.g. MSI N034 netbook
- */
-static bool load_scm_model;
static struct rfkill *rfk_wlan, *rfk_bluetooth, *rfk_threeg;
+/* MSI laptop quirks */
+struct quirk_entry {
+ bool old_ec_model;
+
+ /* Some MSI 3G netbook only have one fn key to control
+ * Wlan/Bluetooth/3G, those netbook will load the SCM (windows app) to
+ * disable the original Wlan/Bluetooth control by BIOS when user press
+ * fn key, then control Wlan/Bluetooth/3G by SCM (software control by
+ * OS). Without SCM, user cann't on/off 3G module on those 3G netbook.
+ * On Linux, msi-laptop driver will do the same thing to disable the
+ * original BIOS control, then might need use HAL or other userland
+ * application to do the software control that simulate with SCM.
+ * e.g. MSI N034 netbook
+ */
+ bool load_scm_model;
+
+ /* Some MSI laptops need delay before reading from EC */
+ bool ec_delay;
+
+ /* Some MSI Wind netbooks (e.g. MSI Wind U100) need loading SCM to get
+ * some features working (e.g. ECO mode), but we cannot change
+ * Wlan/Bluetooth state in software and we can only read its state.
+ */
+ bool ec_read_only;
+};
+
+static struct quirk_entry *quirks;
+
/* Hardware access */
static int set_lcd_level(int level)
@@ -195,10 +221,13 @@ static ssize_t set_device_state(const char *buf, size_t count, u8 mask)
if (sscanf(buf, "%i", &status) != 1 || (status < 0 || status > 1))
return -EINVAL;
+ if (quirks->ec_read_only)
+ return -EOPNOTSUPP;
+
/* read current device state */
result = ec_read(MSI_STANDARD_EC_COMMAND_ADDRESS, &rdata);
if (result < 0)
- return -EINVAL;
+ return result;
if (!!(rdata & mask) != status) {
/* reverse device bit */
@@ -209,7 +238,7 @@ static ssize_t set_device_state(const char *buf, size_t count, u8 mask)
result = ec_write(MSI_STANDARD_EC_COMMAND_ADDRESS, wdata);
if (result < 0)
- return -EINVAL;
+ return result;
}
return count;
@@ -222,7 +251,7 @@ static int get_wireless_state(int *wlan, int *bluetooth)
result = ec_transaction(MSI_EC_COMMAND_WIRELESS, &wdata, 1, &rdata, 1);
if (result < 0)
- return -1;
+ return result;
if (wlan)
*wlan = !!(rdata & 8);
@@ -240,7 +269,7 @@ static int get_wireless_state_ec_standard(void)
result = ec_read(MSI_STANDARD_EC_COMMAND_ADDRESS, &rdata);
if (result < 0)
- return -1;
+ return result;
wlan_s = !!(rdata & MSI_STANDARD_EC_WLAN_MASK);
@@ -258,7 +287,7 @@ static int get_threeg_exists(void)
result = ec_read(MSI_STANDARD_EC_DEVICES_EXISTS_ADDRESS, &rdata);
if (result < 0)
- return -1;
+ return result;
threeg_exists = !!(rdata & MSI_STANDARD_EC_3G_MASK);
@@ -291,9 +320,9 @@ static ssize_t show_wlan(struct device *dev,
struct device_attribute *attr, char *buf)
{
- int ret, enabled;
+ int ret, enabled = 0;
- if (old_ec_model) {
+ if (quirks->old_ec_model) {
ret = get_wireless_state(&enabled, NULL);
} else {
ret = get_wireless_state_ec_standard();
@@ -315,9 +344,9 @@ static ssize_t show_bluetooth(struct device *dev,
struct device_attribute *attr, char *buf)
{
- int ret, enabled;
+ int ret, enabled = 0;
- if (old_ec_model) {
+ if (quirks->old_ec_model) {
ret = get_wireless_state(NULL, &enabled);
} else {
ret = get_wireless_state_ec_standard();
@@ -342,8 +371,8 @@ static ssize_t show_threeg(struct device *dev,
int ret;
/* old msi ec not support 3G */
- if (old_ec_model)
- return -1;
+ if (quirks->old_ec_model)
+ return -ENODEV;
ret = get_wireless_state_ec_standard();
if (ret < 0)
@@ -417,18 +446,119 @@ static ssize_t store_auto_brightness(struct device *dev,
return count;
}
+static ssize_t show_touchpad(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+
+ u8 rdata;
+ int result;
+
+ result = ec_read(MSI_STANDARD_EC_FUNCTIONS_ADDRESS, &rdata);
+ if (result < 0)
+ return result;
+
+ return sprintf(buf, "%i\n", !!(rdata & MSI_STANDARD_EC_TOUCHPAD_MASK));
+}
+
+static ssize_t show_turbo(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+
+ u8 rdata;
+ int result;
+
+ result = ec_read(MSI_STANDARD_EC_FUNCTIONS_ADDRESS, &rdata);
+ if (result < 0)
+ return result;
+
+ return sprintf(buf, "%i\n", !!(rdata & MSI_STANDARD_EC_TURBO_MASK));
+}
+
+static ssize_t show_eco(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+
+ u8 rdata;
+ int result;
+
+ result = ec_read(MSI_STANDARD_EC_FUNCTIONS_ADDRESS, &rdata);
+ if (result < 0)
+ return result;
+
+ return sprintf(buf, "%i\n", !!(rdata & MSI_STANDARD_EC_ECO_MASK));
+}
+
+static ssize_t show_turbo_cooldown(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+
+ u8 rdata;
+ int result;
+
+ result = ec_read(MSI_STANDARD_EC_FUNCTIONS_ADDRESS, &rdata);
+ if (result < 0)
+ return result;
+
+ return sprintf(buf, "%i\n", (!!(rdata & MSI_STANDARD_EC_TURBO_MASK)) |
+ (!!(rdata & MSI_STANDARD_EC_TURBO_COOLDOWN_MASK) << 1));
+}
+
+static ssize_t show_auto_fan(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+
+ u8 rdata;
+ int result;
+
+ result = ec_read(MSI_STANDARD_EC_FAN_ADDRESS, &rdata);
+ if (result < 0)
+ return result;
+
+ return sprintf(buf, "%i\n", !!(rdata & MSI_STANDARD_EC_AUTOFAN_MASK));
+}
+
+static ssize_t store_auto_fan(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t count)
+{
+
+ int enable, result;
+
+ if (sscanf(buf, "%i", &enable) != 1 || (enable != (enable & 1)))
+ return -EINVAL;
+
+ result = ec_write(MSI_STANDARD_EC_FAN_ADDRESS, enable);
+ if (result < 0)
+ return result;
+
+ return count;
+}
+
static DEVICE_ATTR(lcd_level, 0644, show_lcd_level, store_lcd_level);
static DEVICE_ATTR(auto_brightness, 0644, show_auto_brightness,
store_auto_brightness);
static DEVICE_ATTR(bluetooth, 0444, show_bluetooth, NULL);
static DEVICE_ATTR(wlan, 0444, show_wlan, NULL);
static DEVICE_ATTR(threeg, 0444, show_threeg, NULL);
+static DEVICE_ATTR(touchpad, 0444, show_touchpad, NULL);
+static DEVICE_ATTR(turbo_mode, 0444, show_turbo, NULL);
+static DEVICE_ATTR(eco_mode, 0444, show_eco, NULL);
+static DEVICE_ATTR(turbo_cooldown, 0444, show_turbo_cooldown, NULL);
+static DEVICE_ATTR(auto_fan, 0644, show_auto_fan, store_auto_fan);
static struct attribute *msipf_attributes[] = {
- &dev_attr_lcd_level.attr,
- &dev_attr_auto_brightness.attr,
&dev_attr_bluetooth.attr,
&dev_attr_wlan.attr,
+ &dev_attr_touchpad.attr,
+ &dev_attr_turbo_mode.attr,
+ &dev_attr_eco_mode.attr,
+ &dev_attr_turbo_cooldown.attr,
+ &dev_attr_auto_fan.attr,
+ NULL
+};
+
+static struct attribute *msipf_old_attributes[] = {
+ &dev_attr_lcd_level.attr,
+ &dev_attr_auto_brightness.attr,
NULL
};
@@ -436,6 +566,10 @@ static struct attribute_group msipf_attribute_group = {
.attrs = msipf_attributes
};
+static struct attribute_group msipf_old_attribute_group = {
+ .attrs = msipf_old_attributes
+};
+
static struct platform_driver msipf_driver = {
.driver = {
.name = "msi-laptop-pf",
@@ -448,9 +582,26 @@ static struct platform_device *msipf_device;
/* Initialization */
-static int dmi_check_cb(const struct dmi_system_id *id)
+static struct quirk_entry quirk_old_ec_model = {
+ .old_ec_model = true,
+};
+
+static struct quirk_entry quirk_load_scm_model = {
+ .load_scm_model = true,
+ .ec_delay = true,
+};
+
+static struct quirk_entry quirk_load_scm_ro_model = {
+ .load_scm_model = true,
+ .ec_read_only = true,
+};
+
+static int dmi_check_cb(const struct dmi_system_id *dmi)
{
- pr_info("Identified laptop model '%s'\n", id->ident);
+ pr_info("Identified laptop model '%s'\n", dmi->ident);
+
+ quirks = dmi->driver_data;
+
return 1;
}
@@ -464,6 +615,7 @@ static struct dmi_system_id __initdata msi_dmi_table[] = {
DMI_MATCH(DMI_CHASSIS_VENDOR,
"MICRO-STAR INT'L CO.,LTD")
},
+ .driver_data = &quirk_old_ec_model,
.callback = dmi_check_cb
},
{
@@ -474,6 +626,7 @@ static struct dmi_system_id __initdata msi_dmi_table[] = {
DMI_MATCH(DMI_PRODUCT_VERSION, "0581"),
DMI_MATCH(DMI_BOARD_NAME, "MS-1058")
},
+ .driver_data = &quirk_old_ec_model,
.callback = dmi_check_cb
},
{
@@ -484,6 +637,7 @@ static struct dmi_system_id __initdata msi_dmi_table[] = {
DMI_MATCH(DMI_BOARD_VENDOR, "MSI"),
DMI_MATCH(DMI_BOARD_NAME, "MS-1412")
},
+ .driver_data = &quirk_old_ec_model,
.callback = dmi_check_cb
},
{
@@ -495,12 +649,9 @@ static struct dmi_system_id __initdata msi_dmi_table[] = {
DMI_MATCH(DMI_CHASSIS_VENDOR,
"MICRO-STAR INT'L CO.,LTD")
},
+ .driver_data = &quirk_old_ec_model,
.callback = dmi_check_cb
},
- { }
-};
-
-static struct dmi_system_id __initdata msi_load_scm_models_dmi_table[] = {
{
.ident = "MSI N034",
.matches = {
@@ -510,6 +661,7 @@ static struct dmi_system_id __initdata msi_load_scm_models_dmi_table[] = {
DMI_MATCH(DMI_CHASSIS_VENDOR,
"MICRO-STAR INTERNATIONAL CO., LTD")
},
+ .driver_data = &quirk_load_scm_model,
.callback = dmi_check_cb
},
{
@@ -521,6 +673,7 @@ static struct dmi_system_id __initdata msi_load_scm_models_dmi_table[] = {
DMI_MATCH(DMI_CHASSIS_VENDOR,
"MICRO-STAR INTERNATIONAL CO., LTD")
},
+ .driver_data = &quirk_load_scm_model,
.callback = dmi_check_cb
},
{
@@ -530,6 +683,7 @@ static struct dmi_system_id __initdata msi_load_scm_models_dmi_table[] = {
"MICRO-STAR INTERNATIONAL CO., LTD"),
DMI_MATCH(DMI_PRODUCT_NAME, "MS-N014"),
},
+ .driver_data = &quirk_load_scm_model,
.callback = dmi_check_cb
},
{
@@ -539,6 +693,7 @@ static struct dmi_system_id __initdata msi_load_scm_models_dmi_table[] = {
"Micro-Star International"),
DMI_MATCH(DMI_PRODUCT_NAME, "CR620"),
},
+ .driver_data = &quirk_load_scm_model,
.callback = dmi_check_cb
},
{
@@ -548,6 +703,17 @@ static struct dmi_system_id __initdata msi_load_scm_models_dmi_table[] = {
"Micro-Star International Co., Ltd."),
DMI_MATCH(DMI_PRODUCT_NAME, "U270 series"),
},
+ .driver_data = &quirk_load_scm_model,
+ .callback = dmi_check_cb
+ },
+ {
+ .ident = "MSI U90/U100",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR,
+ "MICRO-STAR INTERNATIONAL CO., LTD"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "U90/U100"),
+ },
+ .driver_data = &quirk_load_scm_ro_model,
.callback = dmi_check_cb
},
{ }
@@ -560,32 +726,26 @@ static int rfkill_bluetooth_set(void *data, bool blocked)
* blocked == false is on
* blocked == true is off
*/
- if (blocked)
- set_device_state("0", 0, MSI_STANDARD_EC_BLUETOOTH_MASK);
- else
- set_device_state("1", 0, MSI_STANDARD_EC_BLUETOOTH_MASK);
+ int result = set_device_state(blocked ? "0" : "1", 0,
+ MSI_STANDARD_EC_BLUETOOTH_MASK);
- return 0;
+ return min(result, 0);
}
static int rfkill_wlan_set(void *data, bool blocked)
{
- if (blocked)
- set_device_state("0", 0, MSI_STANDARD_EC_WLAN_MASK);
- else
- set_device_state("1", 0, MSI_STANDARD_EC_WLAN_MASK);
+ int result = set_device_state(blocked ? "0" : "1", 0,
+ MSI_STANDARD_EC_WLAN_MASK);
- return 0;
+ return min(result, 0);
}
static int rfkill_threeg_set(void *data, bool blocked)
{
- if (blocked)
- set_device_state("0", 0, MSI_STANDARD_EC_3G_MASK);
- else
- set_device_state("1", 0, MSI_STANDARD_EC_3G_MASK);
+ int result = set_device_state(blocked ? "0" : "1", 0,
+ MSI_STANDARD_EC_3G_MASK);
- return 0;
+ return min(result, 0);
}
static const struct rfkill_ops rfkill_bluetooth_ops = {
@@ -618,25 +778,34 @@ static void rfkill_cleanup(void)
}
}
+static bool msi_rfkill_set_state(struct rfkill *rfkill, bool blocked)
+{
+ if (quirks->ec_read_only)
+ return rfkill_set_hw_state(rfkill, blocked);
+ else
+ return rfkill_set_sw_state(rfkill, blocked);
+}
+
static void msi_update_rfkill(struct work_struct *ignored)
{
get_wireless_state_ec_standard();
if (rfk_wlan)
- rfkill_set_sw_state(rfk_wlan, !wlan_s);
+ msi_rfkill_set_state(rfk_wlan, !wlan_s);
if (rfk_bluetooth)
- rfkill_set_sw_state(rfk_bluetooth, !bluetooth_s);
+ msi_rfkill_set_state(rfk_bluetooth, !bluetooth_s);
if (rfk_threeg)
- rfkill_set_sw_state(rfk_threeg, !threeg_s);
+ msi_rfkill_set_state(rfk_threeg, !threeg_s);
}
-static DECLARE_DELAYED_WORK(msi_rfkill_work, msi_update_rfkill);
+static DECLARE_DELAYED_WORK(msi_rfkill_dwork, msi_update_rfkill);
+static DECLARE_WORK(msi_rfkill_work, msi_update_rfkill);
static void msi_send_touchpad_key(struct work_struct *ignored)
{
u8 rdata;
int result;
- result = ec_read(MSI_STANDARD_EC_TOUCHPAD_ADDRESS, &rdata);
+ result = ec_read(MSI_STANDARD_EC_FUNCTIONS_ADDRESS, &rdata);
if (result < 0)
return;
@@ -644,7 +813,8 @@ static void msi_send_touchpad_key(struct work_struct *ignored)
(rdata & MSI_STANDARD_EC_TOUCHPAD_MASK) ?
KEY_TOUCHPAD_ON : KEY_TOUCHPAD_OFF, 1, true);
}
-static DECLARE_DELAYED_WORK(msi_touchpad_work, msi_send_touchpad_key);
+static DECLARE_DELAYED_WORK(msi_touchpad_dwork, msi_send_touchpad_key);
+static DECLARE_WORK(msi_touchpad_work, msi_send_touchpad_key);
static bool msi_laptop_i8042_filter(unsigned char data, unsigned char str,
struct serio *port)
@@ -662,14 +832,20 @@ static bool msi_laptop_i8042_filter(unsigned char data, unsigned char str,
extended = false;
switch (data) {
case 0xE4:
- schedule_delayed_work(&msi_touchpad_work,
- round_jiffies_relative(0.5 * HZ));
+ if (quirks->ec_delay) {
+ schedule_delayed_work(&msi_touchpad_dwork,
+ round_jiffies_relative(0.5 * HZ));
+ } else
+ schedule_work(&msi_touchpad_work);
break;
case 0x54:
case 0x62:
case 0x76:
- schedule_delayed_work(&msi_rfkill_work,
- round_jiffies_relative(0.5 * HZ));
+ if (quirks->ec_delay) {
+ schedule_delayed_work(&msi_rfkill_dwork,
+ round_jiffies_relative(0.5 * HZ));
+ } else
+ schedule_work(&msi_rfkill_work);
break;
}
}
@@ -736,8 +912,11 @@ static int rfkill_init(struct platform_device *sdev)
}
/* schedule to run rfkill state initial */
- schedule_delayed_work(&msi_rfkill_init,
- round_jiffies_relative(1 * HZ));
+ if (quirks->ec_delay) {
+ schedule_delayed_work(&msi_rfkill_init,
+ round_jiffies_relative(1 * HZ));
+ } else
+ schedule_work(&msi_rfkill_work);
return 0;
@@ -761,7 +940,7 @@ static int msi_laptop_resume(struct device *device)
u8 data;
int result;
- if (!load_scm_model)
+ if (!quirks->load_scm_model)
return 0;
/* set load SCM to disable hardware control by fn key */
@@ -819,13 +998,15 @@ static int __init load_scm_model_init(struct platform_device *sdev)
u8 data;
int result;
- /* allow userland write sysfs file */
- dev_attr_bluetooth.store = store_bluetooth;
- dev_attr_wlan.store = store_wlan;
- dev_attr_threeg.store = store_threeg;
- dev_attr_bluetooth.attr.mode |= S_IWUSR;
- dev_attr_wlan.attr.mode |= S_IWUSR;
- dev_attr_threeg.attr.mode |= S_IWUSR;
+ if (!quirks->ec_read_only) {
+ /* allow userland write sysfs file */
+ dev_attr_bluetooth.store = store_bluetooth;
+ dev_attr_wlan.store = store_wlan;
+ dev_attr_threeg.store = store_threeg;
+ dev_attr_bluetooth.attr.mode |= S_IWUSR;
+ dev_attr_wlan.attr.mode |= S_IWUSR;
+ dev_attr_threeg.attr.mode |= S_IWUSR;
+ }
/* disable hardware control by fn key */
result = ec_read(MSI_STANDARD_EC_SCM_LOAD_ADDRESS, &data);
@@ -874,21 +1055,22 @@ static int __init msi_init(void)
if (acpi_disabled)
return -ENODEV;
- if (force || dmi_check_system(msi_dmi_table))
- old_ec_model = 1;
+ dmi_check_system(msi_dmi_table);
+ if (!quirks)
+ /* quirks may be NULL if no match in DMI table */
+ quirks = &quirk_load_scm_model;
+ if (force)
+ quirks = &quirk_old_ec_model;
- if (!old_ec_model)
+ if (!quirks->old_ec_model)
get_threeg_exists();
- if (!old_ec_model && dmi_check_system(msi_load_scm_models_dmi_table))
- load_scm_model = 1;
-
if (auto_brightness < 0 || auto_brightness > 2)
return -EINVAL;
/* Register backlight stuff */
- if (acpi_video_backlight_support()) {
+ if (!quirks->old_ec_model || acpi_video_backlight_support()) {
pr_info("Brightness ignored, must be controlled by ACPI video driver\n");
} else {
struct backlight_properties props;
@@ -918,7 +1100,7 @@ static int __init msi_init(void)
if (ret)
goto fail_platform_device1;
- if (load_scm_model && (load_scm_model_init(msipf_device) < 0)) {
+ if (quirks->load_scm_model && (load_scm_model_init(msipf_device) < 0)) {
ret = -EINVAL;
goto fail_platform_device1;
}
@@ -928,20 +1110,25 @@ static int __init msi_init(void)
if (ret)
goto fail_platform_device2;
- if (!old_ec_model) {
+ if (!quirks->old_ec_model) {
if (threeg_exists)
ret = device_create_file(&msipf_device->dev,
&dev_attr_threeg);
if (ret)
goto fail_platform_device2;
- }
+ } else {
+ ret = sysfs_create_group(&msipf_device->dev.kobj,
+ &msipf_old_attribute_group);
+ if (ret)
+ goto fail_platform_device2;
- /* Disable automatic brightness control by default because
- * this module was probably loaded to do brightness control in
- * software. */
+ /* Disable automatic brightness control by default because
+ * this module was probably loaded to do brightness control in
+ * software. */
- if (auto_brightness != 2)
- set_auto_brightness(auto_brightness);
+ if (auto_brightness != 2)
+ set_auto_brightness(auto_brightness);
+ }
pr_info("driver " MSI_DRIVER_VERSION " successfully loaded\n");
@@ -949,9 +1136,10 @@ static int __init msi_init(void)
fail_platform_device2:
- if (load_scm_model) {
+ if (quirks->load_scm_model) {
i8042_remove_filter(msi_laptop_i8042_filter);
- cancel_delayed_work_sync(&msi_rfkill_work);
+ cancel_delayed_work_sync(&msi_rfkill_dwork);
+ cancel_work_sync(&msi_rfkill_work);
rfkill_cleanup();
}
platform_device_del(msipf_device);
@@ -973,23 +1161,26 @@ fail_backlight:
static void __exit msi_cleanup(void)
{
- if (load_scm_model) {
+ if (quirks->load_scm_model) {
i8042_remove_filter(msi_laptop_i8042_filter);
msi_laptop_input_destroy();
- cancel_delayed_work_sync(&msi_rfkill_work);
+ cancel_delayed_work_sync(&msi_rfkill_dwork);
+ cancel_work_sync(&msi_rfkill_work);
rfkill_cleanup();
}
sysfs_remove_group(&msipf_device->dev.kobj, &msipf_attribute_group);
- if (!old_ec_model && threeg_exists)
+ if (!quirks->old_ec_model && threeg_exists)
device_remove_file(&msipf_device->dev, &dev_attr_threeg);
platform_device_unregister(msipf_device);
platform_driver_unregister(&msipf_driver);
backlight_device_unregister(msibl_device);
- /* Enable automatic brightness control again */
- if (auto_brightness != 2)
- set_auto_brightness(1);
+ if (quirks->old_ec_model) {
+ /* Enable automatic brightness control again */
+ if (auto_brightness != 2)
+ set_auto_brightness(1);
+ }
pr_info("driver unloaded\n");
}
@@ -1011,3 +1202,4 @@ MODULE_ALIAS("dmi:*:svnMICRO-STARINTERNATIONAL*:pnMS-N051:*");
MODULE_ALIAS("dmi:*:svnMICRO-STARINTERNATIONAL*:pnMS-N014:*");
MODULE_ALIAS("dmi:*:svnMicro-StarInternational*:pnCR620:*");
MODULE_ALIAS("dmi:*:svnMicro-StarInternational*:pnU270series:*");
+MODULE_ALIAS("dmi:*:svnMICRO-STARINTERNATIONAL*:pnU90/U100:*");
diff --git a/drivers/platform/x86/msi-wmi.c b/drivers/platform/x86/msi-wmi.c
index 2264331bd48e..70222f265f68 100644
--- a/drivers/platform/x86/msi-wmi.c
+++ b/drivers/platform/x86/msi-wmi.c
@@ -34,29 +34,65 @@ MODULE_AUTHOR("Thomas Renninger <trenn@suse.de>");
MODULE_DESCRIPTION("MSI laptop WMI hotkeys driver");
MODULE_LICENSE("GPL");
-MODULE_ALIAS("wmi:551A1F84-FBDD-4125-91DB-3EA8F44F1D45");
-MODULE_ALIAS("wmi:B6F3EEF2-3D2F-49DC-9DE3-85BCE18C62F2");
-
#define DRV_NAME "msi-wmi"
#define MSIWMI_BIOS_GUID "551A1F84-FBDD-4125-91DB-3EA8F44F1D45"
-#define MSIWMI_EVENT_GUID "B6F3EEF2-3D2F-49DC-9DE3-85BCE18C62F2"
-
-#define SCANCODE_BASE 0xD0
-#define MSI_WMI_BRIGHTNESSUP SCANCODE_BASE
-#define MSI_WMI_BRIGHTNESSDOWN (SCANCODE_BASE + 1)
-#define MSI_WMI_VOLUMEUP (SCANCODE_BASE + 2)
-#define MSI_WMI_VOLUMEDOWN (SCANCODE_BASE + 3)
-#define MSI_WMI_MUTE (SCANCODE_BASE + 4)
+#define MSIWMI_MSI_EVENT_GUID "B6F3EEF2-3D2F-49DC-9DE3-85BCE18C62F2"
+#define MSIWMI_WIND_EVENT_GUID "5B3CC38A-40D9-7245-8AE6-1145B751BE3F"
+
+MODULE_ALIAS("wmi:" MSIWMI_BIOS_GUID);
+MODULE_ALIAS("wmi:" MSIWMI_MSI_EVENT_GUID);
+MODULE_ALIAS("wmi:" MSIWMI_WIND_EVENT_GUID);
+
+enum msi_scancodes {
+ /* Generic MSI keys (not present on MSI Wind) */
+ MSI_KEY_BRIGHTNESSUP = 0xD0,
+ MSI_KEY_BRIGHTNESSDOWN,
+ MSI_KEY_VOLUMEUP,
+ MSI_KEY_VOLUMEDOWN,
+ MSI_KEY_MUTE,
+ /* MSI Wind keys */
+ WIND_KEY_TOUCHPAD = 0x08, /* Fn+F3 touchpad toggle */
+ WIND_KEY_BLUETOOTH = 0x56, /* Fn+F11 Bluetooth toggle */
+ WIND_KEY_CAMERA, /* Fn+F6 webcam toggle */
+ WIND_KEY_WLAN = 0x5f, /* Fn+F11 Wi-Fi toggle */
+ WIND_KEY_TURBO, /* Fn+F10 turbo mode toggle */
+ WIND_KEY_ECO = 0x69, /* Fn+F10 ECO mode toggle */
+};
static struct key_entry msi_wmi_keymap[] = {
- { KE_KEY, MSI_WMI_BRIGHTNESSUP, {KEY_BRIGHTNESSUP} },
- { KE_KEY, MSI_WMI_BRIGHTNESSDOWN, {KEY_BRIGHTNESSDOWN} },
- { KE_KEY, MSI_WMI_VOLUMEUP, {KEY_VOLUMEUP} },
- { KE_KEY, MSI_WMI_VOLUMEDOWN, {KEY_VOLUMEDOWN} },
- { KE_KEY, MSI_WMI_MUTE, {KEY_MUTE} },
- { KE_END, 0}
+ { KE_KEY, MSI_KEY_BRIGHTNESSUP, {KEY_BRIGHTNESSUP} },
+ { KE_KEY, MSI_KEY_BRIGHTNESSDOWN, {KEY_BRIGHTNESSDOWN} },
+ { KE_KEY, MSI_KEY_VOLUMEUP, {KEY_VOLUMEUP} },
+ { KE_KEY, MSI_KEY_VOLUMEDOWN, {KEY_VOLUMEDOWN} },
+ { KE_KEY, MSI_KEY_MUTE, {KEY_MUTE} },
+
+ /* These keys work without WMI. Ignore them to avoid double keycodes */
+ { KE_IGNORE, WIND_KEY_TOUCHPAD, {KEY_TOUCHPAD_TOGGLE} },
+ { KE_IGNORE, WIND_KEY_BLUETOOTH, {KEY_BLUETOOTH} },
+ { KE_IGNORE, WIND_KEY_CAMERA, {KEY_CAMERA} },
+ { KE_IGNORE, WIND_KEY_WLAN, {KEY_WLAN} },
+
+ /* These are unknown WMI events found on MSI Wind */
+ { KE_IGNORE, 0x00 },
+ { KE_IGNORE, 0x62 },
+ { KE_IGNORE, 0x63 },
+
+ /* These are MSI Wind keys that should be handled via WMI */
+ { KE_KEY, WIND_KEY_TURBO, {KEY_PROG1} },
+ { KE_KEY, WIND_KEY_ECO, {KEY_PROG2} },
+
+ { KE_END, 0 }
+};
+
+static ktime_t last_pressed;
+
+static const struct {
+ const char *guid;
+ bool quirk_last_pressed;
+} *event_wmi, event_wmis[] = {
+ { MSIWMI_MSI_EVENT_GUID, true },
+ { MSIWMI_WIND_EVENT_GUID, false },
};
-static ktime_t last_pressed[ARRAY_SIZE(msi_wmi_keymap) - 1];
static struct backlight_device *backlight;
@@ -149,7 +185,6 @@ static void msi_wmi_notify(u32 value, void *context)
struct acpi_buffer response = { ACPI_ALLOCATE_BUFFER, NULL };
static struct key_entry *key;
union acpi_object *obj;
- ktime_t cur;
acpi_status status;
status = wmi_get_event_data(value, &response);
@@ -165,39 +200,67 @@ static void msi_wmi_notify(u32 value, void *context)
pr_debug("Eventcode: 0x%x\n", eventcode);
key = sparse_keymap_entry_from_scancode(msi_wmi_input_dev,
eventcode);
- if (key) {
- ktime_t diff;
- cur = ktime_get_real();
- diff = ktime_sub(cur, last_pressed[key->code -
- SCANCODE_BASE]);
- /* Ignore event if the same event happened in a 50 ms
+ if (!key) {
+ pr_info("Unknown key pressed - %x\n", eventcode);
+ goto msi_wmi_notify_exit;
+ }
+
+ if (event_wmi->quirk_last_pressed) {
+ ktime_t cur = ktime_get_real();
+ ktime_t diff = ktime_sub(cur, last_pressed);
+ /* Ignore event if any event happened in a 50 ms
timeframe -> Key press may result in 10-20 GPEs */
if (ktime_to_us(diff) < 1000 * 50) {
pr_debug("Suppressed key event 0x%X - "
"Last press was %lld us ago\n",
key->code, ktime_to_us(diff));
- return;
- }
- last_pressed[key->code - SCANCODE_BASE] = cur;
-
- if (key->type == KE_KEY &&
- /* Brightness is served via acpi video driver */
- (!acpi_video_backlight_support() ||
- (key->code != MSI_WMI_BRIGHTNESSUP &&
- key->code != MSI_WMI_BRIGHTNESSDOWN))) {
- pr_debug("Send key: 0x%X - "
- "Input layer keycode: %d\n",
- key->code, key->keycode);
- sparse_keymap_report_entry(msi_wmi_input_dev,
- key, 1, true);
+ goto msi_wmi_notify_exit;
}
- } else
- pr_info("Unknown key pressed - %x\n", eventcode);
+ last_pressed = cur;
+ }
+
+ if (key->type == KE_KEY &&
+ /* Brightness is served via acpi video driver */
+ (backlight ||
+ (key->code != MSI_KEY_BRIGHTNESSUP &&
+ key->code != MSI_KEY_BRIGHTNESSDOWN))) {
+ pr_debug("Send key: 0x%X - Input layer keycode: %d\n",
+ key->code, key->keycode);
+ sparse_keymap_report_entry(msi_wmi_input_dev, key, 1,
+ true);
+ }
} else
pr_info("Unknown event received\n");
+
+msi_wmi_notify_exit:
kfree(response.pointer);
}
+static int __init msi_wmi_backlight_setup(void)
+{
+ int err;
+ struct backlight_properties props;
+
+ memset(&props, 0, sizeof(struct backlight_properties));
+ props.type = BACKLIGHT_PLATFORM;
+ props.max_brightness = ARRAY_SIZE(backlight_map) - 1;
+ backlight = backlight_device_register(DRV_NAME, NULL, NULL,
+ &msi_backlight_ops,
+ &props);
+ if (IS_ERR(backlight))
+ return PTR_ERR(backlight);
+
+ err = bl_get(NULL);
+ if (err < 0) {
+ backlight_device_unregister(backlight);
+ return err;
+ }
+
+ backlight->props.brightness = err;
+
+ return 0;
+}
+
static int __init msi_wmi_input_setup(void)
{
int err;
@@ -219,7 +282,7 @@ static int __init msi_wmi_input_setup(void)
if (err)
goto err_free_keymap;
- memset(last_pressed, 0, sizeof(last_pressed));
+ last_pressed = ktime_set(0, 0);
return 0;
@@ -233,61 +296,66 @@ err_free_dev:
static int __init msi_wmi_init(void)
{
int err;
+ int i;
- if (!wmi_has_guid(MSIWMI_EVENT_GUID)) {
- pr_err("This machine doesn't have MSI-hotkeys through WMI\n");
- return -ENODEV;
- }
- err = wmi_install_notify_handler(MSIWMI_EVENT_GUID,
- msi_wmi_notify, NULL);
- if (ACPI_FAILURE(err))
- return -EINVAL;
+ for (i = 0; i < ARRAY_SIZE(event_wmis); i++) {
+ if (!wmi_has_guid(event_wmis[i].guid))
+ continue;
- err = msi_wmi_input_setup();
- if (err)
- goto err_uninstall_notifier;
-
- if (!acpi_video_backlight_support()) {
- struct backlight_properties props;
- memset(&props, 0, sizeof(struct backlight_properties));
- props.type = BACKLIGHT_PLATFORM;
- props.max_brightness = ARRAY_SIZE(backlight_map) - 1;
- backlight = backlight_device_register(DRV_NAME, NULL, NULL,
- &msi_backlight_ops,
- &props);
- if (IS_ERR(backlight)) {
- err = PTR_ERR(backlight);
+ err = msi_wmi_input_setup();
+ if (err) {
+ pr_err("Unable to setup input device\n");
+ return err;
+ }
+
+ err = wmi_install_notify_handler(event_wmis[i].guid,
+ msi_wmi_notify, NULL);
+ if (ACPI_FAILURE(err)) {
+ pr_err("Unable to setup WMI notify handler\n");
goto err_free_input;
}
- err = bl_get(NULL);
- if (err < 0)
- goto err_free_backlight;
+ pr_debug("Event handler installed\n");
+ event_wmi = &event_wmis[i];
+ break;
+ }
- backlight->props.brightness = err;
+ if (wmi_has_guid(MSIWMI_BIOS_GUID) && !acpi_video_backlight_support()) {
+ err = msi_wmi_backlight_setup();
+ if (err) {
+ pr_err("Unable to setup backlight device\n");
+ goto err_uninstall_handler;
+ }
+ pr_debug("Backlight device created\n");
+ }
+
+ if (!event_wmi && !backlight) {
+ pr_err("This machine doesn't have neither MSI-hotkeys nor backlight through WMI\n");
+ return -ENODEV;
}
- pr_debug("Event handler installed\n");
return 0;
-err_free_backlight:
- backlight_device_unregister(backlight);
+err_uninstall_handler:
+ if (event_wmi)
+ wmi_remove_notify_handler(event_wmi->guid);
err_free_input:
- sparse_keymap_free(msi_wmi_input_dev);
- input_unregister_device(msi_wmi_input_dev);
-err_uninstall_notifier:
- wmi_remove_notify_handler(MSIWMI_EVENT_GUID);
+ if (event_wmi) {
+ sparse_keymap_free(msi_wmi_input_dev);
+ input_unregister_device(msi_wmi_input_dev);
+ }
return err;
}
static void __exit msi_wmi_exit(void)
{
- if (wmi_has_guid(MSIWMI_EVENT_GUID)) {
- wmi_remove_notify_handler(MSIWMI_EVENT_GUID);
+ if (event_wmi) {
+ wmi_remove_notify_handler(event_wmi->guid);
sparse_keymap_free(msi_wmi_input_dev);
input_unregister_device(msi_wmi_input_dev);
- backlight_device_unregister(backlight);
}
+ if (backlight)
+ backlight_device_unregister(backlight);
}
module_init(msi_wmi_init);
diff --git a/drivers/platform/x86/sony-laptop.c b/drivers/platform/x86/sony-laptop.c
index ceb41eff4230..14d4dced1def 100644
--- a/drivers/platform/x86/sony-laptop.c
+++ b/drivers/platform/x86/sony-laptop.c
@@ -158,6 +158,11 @@ static void sony_nc_thermal_cleanup(struct platform_device *pd);
static int sony_nc_lid_resume_setup(struct platform_device *pd);
static void sony_nc_lid_resume_cleanup(struct platform_device *pd);
+static int sony_nc_gfx_switch_setup(struct platform_device *pd,
+ unsigned int handle);
+static void sony_nc_gfx_switch_cleanup(struct platform_device *pd);
+static int __sony_nc_gfx_switch_status_get(void);
+
static int sony_nc_highspeed_charging_setup(struct platform_device *pd);
static void sony_nc_highspeed_charging_cleanup(struct platform_device *pd);
@@ -1241,17 +1246,13 @@ static void sony_nc_notify(struct acpi_device *device, u32 event)
/* Hybrid GFX switching */
sony_call_snc_handle(handle, 0x0000, &result);
dprintk("GFX switch event received (reason: %s)\n",
- (result & 0x01) ?
- "switch change" : "unknown");
-
- /* verify the switch state
- * 1: discrete GFX
- * 0: integrated GFX
- */
- sony_call_snc_handle(handle, 0x0100, &result);
+ (result == 0x1) ? "switch change" :
+ (result == 0x2) ? "output switch" :
+ (result == 0x3) ? "output switch" :
+ "");
ev_type = GFX_SWITCH;
- real_ev = result & 0xff;
+ real_ev = __sony_nc_gfx_switch_status_get();
break;
default:
@@ -1350,6 +1351,13 @@ static void sony_nc_function_setup(struct acpi_device *device,
pr_err("couldn't set up thermal profile function (%d)\n",
result);
break;
+ case 0x0128:
+ case 0x0146:
+ result = sony_nc_gfx_switch_setup(pf_device, handle);
+ if (result)
+ pr_err("couldn't set up GFX Switch status (%d)\n",
+ result);
+ break;
case 0x0131:
result = sony_nc_highspeed_charging_setup(pf_device);
if (result)
@@ -1365,6 +1373,8 @@ static void sony_nc_function_setup(struct acpi_device *device,
break;
case 0x0137:
case 0x0143:
+ case 0x014b:
+ case 0x014c:
result = sony_nc_kbd_backlight_setup(pf_device, handle);
if (result)
pr_err("couldn't set up keyboard backlight function (%d)\n",
@@ -1414,6 +1424,10 @@ static void sony_nc_function_cleanup(struct platform_device *pd)
case 0x0122:
sony_nc_thermal_cleanup(pd);
break;
+ case 0x0128:
+ case 0x0146:
+ sony_nc_gfx_switch_cleanup(pd);
+ break;
case 0x0131:
sony_nc_highspeed_charging_cleanup(pd);
break;
@@ -1423,6 +1437,8 @@ static void sony_nc_function_cleanup(struct platform_device *pd)
break;
case 0x0137:
case 0x0143:
+ case 0x014b:
+ case 0x014c:
sony_nc_kbd_backlight_cleanup(pd);
break;
default:
@@ -1467,6 +1483,8 @@ static void sony_nc_function_resume(void)
break;
case 0x0137:
case 0x0143:
+ case 0x014b:
+ case 0x014c:
sony_nc_kbd_backlight_resume();
break;
default:
@@ -1534,7 +1552,7 @@ static int sony_nc_rfkill_set(void *data, bool blocked)
int argument = sony_rfkill_address[(long) data] + 0x100;
if (!blocked)
- argument |= 0x030000;
+ argument |= 0x070000;
return sony_call_snc_handle(sony_rfkill_handle, argument, &result);
}
@@ -2333,7 +2351,7 @@ static int sony_nc_lid_resume_setup(struct platform_device *pd)
return 0;
liderror:
- for (; i > 0; i--)
+ for (i--; i >= 0; i--)
device_remove_file(&pd->dev, &lid_ctl->attrs[i]);
kfree(lid_ctl);
@@ -2355,6 +2373,97 @@ static void sony_nc_lid_resume_cleanup(struct platform_device *pd)
}
}
+/* GFX Switch position */
+enum gfx_switch {
+ SPEED,
+ STAMINA,
+ AUTO
+};
+struct snc_gfx_switch_control {
+ struct device_attribute attr;
+ unsigned int handle;
+};
+static struct snc_gfx_switch_control *gfxs_ctl;
+
+/* returns 0 for speed, 1 for stamina */
+static int __sony_nc_gfx_switch_status_get(void)
+{
+ unsigned int result;
+
+ if (sony_call_snc_handle(gfxs_ctl->handle, 0x0100, &result))
+ return -EIO;
+
+ switch (gfxs_ctl->handle) {
+ case 0x0146:
+ /* 1: discrete GFX (speed)
+ * 0: integrated GFX (stamina)
+ */
+ return result & 0x1 ? SPEED : STAMINA;
+ break;
+ case 0x0128:
+ /* it's a more elaborated bitmask, for now:
+ * 2: integrated GFX (stamina)
+ * 0: discrete GFX (speed)
+ */
+ dprintk("GFX Status: 0x%x\n", result);
+ return result & 0x80 ? AUTO :
+ result & 0x02 ? STAMINA : SPEED;
+ break;
+ }
+ return -EINVAL;
+}
+
+static ssize_t sony_nc_gfx_switch_status_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buffer)
+{
+ int pos = __sony_nc_gfx_switch_status_get();
+
+ if (pos < 0)
+ return pos;
+
+ return snprintf(buffer, PAGE_SIZE, "%s\n", pos ? "speed" : "stamina");
+}
+
+static int sony_nc_gfx_switch_setup(struct platform_device *pd,
+ unsigned int handle)
+{
+ unsigned int result;
+
+ gfxs_ctl = kzalloc(sizeof(struct snc_gfx_switch_control), GFP_KERNEL);
+ if (!gfxs_ctl)
+ return -ENOMEM;
+
+ gfxs_ctl->handle = handle;
+
+ sysfs_attr_init(&gfxs_ctl->attr.attr);
+ gfxs_ctl->attr.attr.name = "gfx_switch_status";
+ gfxs_ctl->attr.attr.mode = S_IRUGO;
+ gfxs_ctl->attr.show = sony_nc_gfx_switch_status_show;
+
+ result = device_create_file(&pd->dev, &gfxs_ctl->attr);
+ if (result)
+ goto gfxerror;
+
+ return 0;
+
+gfxerror:
+ kfree(gfxs_ctl);
+ gfxs_ctl = NULL;
+
+ return result;
+}
+
+static void sony_nc_gfx_switch_cleanup(struct platform_device *pd)
+{
+ if (gfxs_ctl) {
+ device_remove_file(&pd->dev, &gfxs_ctl->attr);
+
+ kfree(gfxs_ctl);
+ gfxs_ctl = NULL;
+ }
+}
+
/* High speed charging function */
static struct device_attribute *hsc_handle;
@@ -2533,6 +2642,8 @@ static void sony_nc_backlight_ng_read_limits(int handle,
lvl_table_len = 9;
break;
case 0x143:
+ case 0x14b:
+ case 0x14c:
lvl_table_len = 16;
break;
}
@@ -2584,6 +2695,18 @@ static void sony_nc_backlight_setup(void)
sony_nc_backlight_ng_read_limits(0x143, &sony_bl_props);
max_brightness = sony_bl_props.maxlvl - sony_bl_props.offset;
+ } else if (sony_find_snc_handle(0x14b) >= 0) {
+ ops = &sony_backlight_ng_ops;
+ sony_bl_props.cmd_base = 0x3000;
+ sony_nc_backlight_ng_read_limits(0x14b, &sony_bl_props);
+ max_brightness = sony_bl_props.maxlvl - sony_bl_props.offset;
+
+ } else if (sony_find_snc_handle(0x14c) >= 0) {
+ ops = &sony_backlight_ng_ops;
+ sony_bl_props.cmd_base = 0x3000;
+ sony_nc_backlight_ng_read_limits(0x14c, &sony_bl_props);
+ max_brightness = sony_bl_props.maxlvl - sony_bl_props.offset;
+
} else if (ACPI_SUCCESS(acpi_get_handle(sony_nc_acpi_handle, "GBRT",
&unused))) {
ops = &sony_backlight_ops;
@@ -3566,7 +3689,7 @@ static ssize_t sonypi_misc_read(struct file *file, char __user *buf,
}
if (ret > 0) {
- struct inode *inode = file->f_path.dentry->d_inode;
+ struct inode *inode = file_inode(file);
inode->i_atime = current_fs_time(inode->i_sb);
}
diff --git a/drivers/platform/x86/thinkpad_acpi.c b/drivers/platform/x86/thinkpad_acpi.c
index ebcb461bb2b0..9a907567f41e 100644
--- a/drivers/platform/x86/thinkpad_acpi.c
+++ b/drivers/platform/x86/thinkpad_acpi.c
@@ -209,9 +209,8 @@ enum tpacpi_hkey_event_t {
TP_HKEY_EV_ALARM_SENSOR_XHOT = 0x6022, /* sensor critically hot */
TP_HKEY_EV_THM_TABLE_CHANGED = 0x6030, /* thermal table changed */
- TP_HKEY_EV_UNK_6040 = 0x6040, /* Related to AC change?
- some sort of APM hint,
- W520 */
+ /* AC-related events */
+ TP_HKEY_EV_AC_CHANGED = 0x6040, /* AC status changed */
/* Misc */
TP_HKEY_EV_RFKILL_CHANGED = 0x7000, /* rfkill switch changed */
@@ -852,7 +851,7 @@ static ssize_t dispatch_proc_write(struct file *file,
const char __user *userbuf,
size_t count, loff_t *pos)
{
- struct ibm_struct *ibm = PDE(file->f_path.dentry->d_inode)->data;
+ struct ibm_struct *ibm = PDE(file_inode(file))->data;
char *kernbuf;
int ret;
@@ -3629,6 +3628,12 @@ static bool hotkey_notify_6xxx(const u32 hkey,
"a sensor reports something is extremely hot!\n");
/* recommended action: immediate sleep/hibernate */
break;
+ case TP_HKEY_EV_AC_CHANGED:
+ /* X120e, X121e, X220, X220i, X220t, X230, T420, T420s, W520:
+ * AC status changed; can be triggered by plugging or
+ * unplugging AC adapter, docking or undocking. */
+
+ /* fallthrough */
case TP_HKEY_EV_KEY_NUMLOCK:
case TP_HKEY_EV_KEY_FN:
@@ -8574,7 +8579,8 @@ static bool __pure __init tpacpi_is_valid_fw_id(const char* const s,
return s && strlen(s) >= 8 &&
tpacpi_is_fw_digit(s[0]) &&
tpacpi_is_fw_digit(s[1]) &&
- s[2] == t && s[3] == 'T' &&
+ s[2] == t &&
+ (s[3] == 'T' || s[3] == 'N') &&
tpacpi_is_fw_digit(s[4]) &&
tpacpi_is_fw_digit(s[5]);
}
@@ -8607,7 +8613,8 @@ static int __must_check __init get_thinkpad_model_data(
return -ENOMEM;
/* Really ancient ThinkPad 240X will fail this, which is fine */
- if (!tpacpi_is_valid_fw_id(tp->bios_version_str, 'E'))
+ if (!(tpacpi_is_valid_fw_id(tp->bios_version_str, 'E') ||
+ tpacpi_is_valid_fw_id(tp->bios_version_str, 'C')))
return 0;
tp->bios_model = tp->bios_version_str[0]
diff --git a/drivers/platform/x86/toshiba_acpi.c b/drivers/platform/x86/toshiba_acpi.c
index 904476b2fa8f..242abac62d8b 100644
--- a/drivers/platform/x86/toshiba_acpi.c
+++ b/drivers/platform/x86/toshiba_acpi.c
@@ -583,7 +583,7 @@ static int set_lcd_status(struct backlight_device *bd)
static ssize_t lcd_proc_write(struct file *file, const char __user *buf,
size_t count, loff_t *pos)
{
- struct toshiba_acpi_dev *dev = PDE(file->f_path.dentry->d_inode)->data;
+ struct toshiba_acpi_dev *dev = PDE(file_inode(file))->data;
char cmd[42];
size_t len;
int value;
@@ -650,7 +650,7 @@ static int video_proc_open(struct inode *inode, struct file *file)
static ssize_t video_proc_write(struct file *file, const char __user *buf,
size_t count, loff_t *pos)
{
- struct toshiba_acpi_dev *dev = PDE(file->f_path.dentry->d_inode)->data;
+ struct toshiba_acpi_dev *dev = PDE(file_inode(file))->data;
char *cmd, *buffer;
int ret;
int value;
@@ -750,7 +750,7 @@ static int fan_proc_open(struct inode *inode, struct file *file)
static ssize_t fan_proc_write(struct file *file, const char __user *buf,
size_t count, loff_t *pos)
{
- struct toshiba_acpi_dev *dev = PDE(file->f_path.dentry->d_inode)->data;
+ struct toshiba_acpi_dev *dev = PDE(file_inode(file))->data;
char cmd[42];
size_t len;
int value;
@@ -822,7 +822,7 @@ static int keys_proc_open(struct inode *inode, struct file *file)
static ssize_t keys_proc_write(struct file *file, const char __user *buf,
size_t count, loff_t *pos)
{
- struct toshiba_acpi_dev *dev = PDE(file->f_path.dentry->d_inode)->data;
+ struct toshiba_acpi_dev *dev = PDE(file_inode(file))->data;
char cmd[42];
size_t len;
int value;
diff --git a/drivers/pnp/isapnp/proc.c b/drivers/pnp/isapnp/proc.c
index 315b3112aca8..65f735ac6b3b 100644
--- a/drivers/pnp/isapnp/proc.c
+++ b/drivers/pnp/isapnp/proc.c
@@ -30,7 +30,7 @@ static struct proc_dir_entry *isapnp_proc_bus_dir = NULL;
static loff_t isapnp_proc_bus_lseek(struct file *file, loff_t off, int whence)
{
loff_t new = -1;
- struct inode *inode = file->f_path.dentry->d_inode;
+ struct inode *inode = file_inode(file);
mutex_lock(&inode->i_mutex);
switch (whence) {
@@ -55,7 +55,7 @@ static loff_t isapnp_proc_bus_lseek(struct file *file, loff_t off, int whence)
static ssize_t isapnp_proc_bus_read(struct file *file, char __user * buf,
size_t nbytes, loff_t * ppos)
{
- struct inode *ino = file->f_path.dentry->d_inode;
+ struct inode *ino = file_inode(file);
struct proc_dir_entry *dp = PDE(ino);
struct pnp_dev *dev = dp->data;
int pos = *ppos;
diff --git a/drivers/pnp/pnpbios/proc.c b/drivers/pnp/pnpbios/proc.c
index bc89f392a629..63ddb0173456 100644
--- a/drivers/pnp/pnpbios/proc.c
+++ b/drivers/pnp/pnpbios/proc.c
@@ -244,7 +244,7 @@ static int pnpbios_proc_open(struct inode *inode, struct file *file)
static ssize_t pnpbios_proc_write(struct file *file, const char __user *buf,
size_t count, loff_t *pos)
{
- void *data = PDE(file->f_path.dentry->d_inode)->data;
+ void *data = PDE(file_inode(file))->data;
struct pnp_bios_node *node;
int boot = (long)data >> 8;
u8 nodenum = (long)data;
diff --git a/drivers/power/bq2415x_charger.c b/drivers/power/bq2415x_charger.c
index ca91396fc48e..0727f9256138 100644
--- a/drivers/power/bq2415x_charger.c
+++ b/drivers/power/bq2415x_charger.c
@@ -1515,16 +1515,11 @@ static int bq2415x_probe(struct i2c_client *client,
}
/* Get new ID for the new device */
- ret = idr_pre_get(&bq2415x_id, GFP_KERNEL);
- if (ret == 0)
- return -ENOMEM;
-
mutex_lock(&bq2415x_id_mutex);
- ret = idr_get_new(&bq2415x_id, client, &num);
+ num = idr_alloc(&bq2415x_id, client, 0, 0, GFP_KERNEL);
mutex_unlock(&bq2415x_id_mutex);
-
- if (ret < 0)
- return ret;
+ if (num < 0)
+ return num;
name = kasprintf(GFP_KERNEL, "%s-%d", id->name, num);
if (!name) {
diff --git a/drivers/power/bq27x00_battery.c b/drivers/power/bq27x00_battery.c
index 8ccf5d7d0add..26037ca7efb4 100644
--- a/drivers/power/bq27x00_battery.c
+++ b/drivers/power/bq27x00_battery.c
@@ -791,14 +791,11 @@ static int bq27x00_battery_probe(struct i2c_client *client,
int retval = 0;
/* Get new ID for the new battery device */
- retval = idr_pre_get(&battery_id, GFP_KERNEL);
- if (retval == 0)
- return -ENOMEM;
mutex_lock(&battery_mutex);
- retval = idr_get_new(&battery_id, client, &num);
+ num = idr_alloc(&battery_id, client, 0, 0, GFP_KERNEL);
mutex_unlock(&battery_mutex);
- if (retval < 0)
- return retval;
+ if (num < 0)
+ return num;
name = kasprintf(GFP_KERNEL, "%s-%d", id->name, num);
if (!name) {
diff --git a/drivers/power/ds2782_battery.c b/drivers/power/ds2782_battery.c
index e7301b3ed623..c09e7726c96c 100644
--- a/drivers/power/ds2782_battery.c
+++ b/drivers/power/ds2782_battery.c
@@ -395,17 +395,12 @@ static int ds278x_battery_probe(struct i2c_client *client,
}
/* Get an ID for this battery */
- ret = idr_pre_get(&battery_id, GFP_KERNEL);
- if (ret == 0) {
- ret = -ENOMEM;
- goto fail_id;
- }
-
mutex_lock(&battery_lock);
- ret = idr_get_new(&battery_id, client, &num);
+ ret = idr_alloc(&battery_id, client, 0, 0, GFP_KERNEL);
mutex_unlock(&battery_lock);
if (ret < 0)
goto fail_id;
+ num = ret;
info = kzalloc(sizeof(*info), GFP_KERNEL);
if (!info) {
diff --git a/drivers/pps/clients/Kconfig b/drivers/pps/clients/Kconfig
index 445197d4a8c4..6efd9b60d8ff 100644
--- a/drivers/pps/clients/Kconfig
+++ b/drivers/pps/clients/Kconfig
@@ -17,7 +17,7 @@ config PPS_CLIENT_KTIMER
config PPS_CLIENT_LDISC
tristate "PPS line discipline"
- depends on PPS
+ depends on PPS && TTY
help
If you say yes here you get support for a PPS source connected
with the CD (Carrier Detect) pin of your serial port.
diff --git a/drivers/pps/clients/pps-gpio.c b/drivers/pps/clients/pps-gpio.c
index 2bf0c1b608dd..d3db26e46489 100644
--- a/drivers/pps/clients/pps-gpio.c
+++ b/drivers/pps/clients/pps-gpio.c
@@ -128,7 +128,8 @@ static int pps_gpio_probe(struct platform_device *pdev)
}
/* allocate space for device info */
- data = kzalloc(sizeof(struct pps_gpio_device_data), GFP_KERNEL);
+ data = devm_kzalloc(&pdev->dev, sizeof(struct pps_gpio_device_data),
+ GFP_KERNEL);
if (data == NULL) {
err = -ENOMEM;
goto return_error;
@@ -150,7 +151,6 @@ static int pps_gpio_probe(struct platform_device *pdev)
pps_default_params |= PPS_CAPTURECLEAR | PPS_OFFSETCLEAR;
data->pps = pps_register_source(&data->info, pps_default_params);
if (data->pps == NULL) {
- kfree(data);
pr_err("failed to register IRQ %d as PPS source\n", irq);
err = -EINVAL;
goto return_error;
@@ -164,7 +164,6 @@ static int pps_gpio_probe(struct platform_device *pdev)
get_irqf_trigger_flags(pdata), data->info.name, data);
if (ret) {
pps_unregister_source(data->pps);
- kfree(data);
pr_err("failed to acquire IRQ %d\n", irq);
err = -EINVAL;
goto return_error;
@@ -190,7 +189,6 @@ static int pps_gpio_remove(struct platform_device *pdev)
gpio_free(pdata->gpio_pin);
pps_unregister_source(data->pps);
pr_info("removed IRQ %d as PPS source\n", data->irq);
- kfree(data);
return 0;
}
diff --git a/drivers/pps/clients/pps-ldisc.c b/drivers/pps/clients/pps-ldisc.c
index 79451f2dea6a..73bd3bb4d93b 100644
--- a/drivers/pps/clients/pps-ldisc.c
+++ b/drivers/pps/clients/pps-ldisc.c
@@ -25,18 +25,27 @@
#include <linux/serial_core.h>
#include <linux/tty.h>
#include <linux/pps_kernel.h>
+#include <linux/bug.h>
#define PPS_TTY_MAGIC 0x0001
-static void pps_tty_dcd_change(struct tty_struct *tty, unsigned int status,
- struct pps_event_time *ts)
+static void pps_tty_dcd_change(struct tty_struct *tty, unsigned int status)
{
- struct pps_device *pps = (struct pps_device *)tty->disc_data;
+ struct pps_device *pps;
+ struct pps_event_time ts;
+
+ pps_get_ts(&ts);
- BUG_ON(pps == NULL);
+ pps = pps_lookup_dev(tty);
+ /*
+ * This should never fail, but the ldisc locking is very
+ * convoluted, so don't crash just in case.
+ */
+ if (WARN_ON_ONCE(pps == NULL))
+ return;
/* Now do the PPS event report */
- pps_event(pps, ts, status ? PPS_CAPTUREASSERT :
+ pps_event(pps, &ts, status ? PPS_CAPTUREASSERT :
PPS_CAPTURECLEAR, NULL);
dev_dbg(pps->dev, "PPS %s at %lu\n",
@@ -67,9 +76,9 @@ static int pps_tty_open(struct tty_struct *tty)
pr_err("cannot register PPS source \"%s\"\n", info.path);
return -ENOMEM;
}
- tty->disc_data = pps;
+ pps->lookup_cookie = tty;
- /* Should open N_TTY ldisc too */
+ /* Now open the base class N_TTY ldisc */
ret = alias_n_tty_open(tty);
if (ret < 0) {
pr_err("cannot open tty ldisc \"%s\"\n", info.path);
@@ -81,7 +90,6 @@ static int pps_tty_open(struct tty_struct *tty)
return 0;
err_unregister:
- tty->disc_data = NULL;
pps_unregister_source(pps);
return ret;
}
@@ -90,11 +98,13 @@ static void (*alias_n_tty_close)(struct tty_struct *tty);
static void pps_tty_close(struct tty_struct *tty)
{
- struct pps_device *pps = (struct pps_device *)tty->disc_data;
+ struct pps_device *pps = pps_lookup_dev(tty);
alias_n_tty_close(tty);
- tty->disc_data = NULL;
+ if (WARN_ON(!pps))
+ return;
+
dev_info(pps->dev, "removed\n");
pps_unregister_source(pps);
}
diff --git a/drivers/pps/kapi.c b/drivers/pps/kapi.c
index f197e8ea185c..cdad4d95b20e 100644
--- a/drivers/pps/kapi.c
+++ b/drivers/pps/kapi.c
@@ -102,7 +102,7 @@ struct pps_device *pps_register_source(struct pps_source_info *info,
goto pps_register_source_exit;
}
- /* These initializations must be done before calling idr_get_new()
+ /* These initializations must be done before calling idr_alloc()
* in order to avoid reces into pps_event().
*/
pps->params.api_version = PPS_API_VERS;
diff --git a/drivers/pps/pps.c b/drivers/pps/pps.c
index 2420d5af0583..7173e3ad475d 100644
--- a/drivers/pps/pps.c
+++ b/drivers/pps/pps.c
@@ -247,12 +247,15 @@ static int pps_cdev_open(struct inode *inode, struct file *file)
struct pps_device *pps = container_of(inode->i_cdev,
struct pps_device, cdev);
file->private_data = pps;
-
+ kobject_get(&pps->dev->kobj);
return 0;
}
static int pps_cdev_release(struct inode *inode, struct file *file)
{
+ struct pps_device *pps = container_of(inode->i_cdev,
+ struct pps_device, cdev);
+ kobject_put(&pps->dev->kobj);
return 0;
}
@@ -274,8 +277,10 @@ static void pps_device_destruct(struct device *dev)
{
struct pps_device *pps = dev_get_drvdata(dev);
- /* release id here to protect others from using it while it's
- * still in use */
+ cdev_del(&pps->cdev);
+
+ /* Now we can release the ID for re-use */
+ pr_debug("deallocating pps%d\n", pps->id);
mutex_lock(&pps_idr_lock);
idr_remove(&pps_idr, pps->id);
mutex_unlock(&pps_idr_lock);
@@ -290,29 +295,21 @@ int pps_register_cdev(struct pps_device *pps)
dev_t devt;
mutex_lock(&pps_idr_lock);
- /* Get new ID for the new PPS source */
- if (idr_pre_get(&pps_idr, GFP_KERNEL) == 0) {
- mutex_unlock(&pps_idr_lock);
- return -ENOMEM;
- }
-
- /* Now really allocate the PPS source.
- * After idr_get_new() calling the new source will be freely available
- * into the kernel.
+ /*
+ * Get new ID for the new PPS source. After idr_alloc() calling
+ * the new source will be freely available into the kernel.
*/
- err = idr_get_new(&pps_idr, pps, &pps->id);
- mutex_unlock(&pps_idr_lock);
-
- if (err < 0)
- return err;
-
- pps->id &= MAX_IDR_MASK;
- if (pps->id >= PPS_MAX_SOURCES) {
- pr_err("%s: too many PPS sources in the system\n",
- pps->info.name);
- err = -EBUSY;
- goto free_idr;
+ err = idr_alloc(&pps_idr, pps, 0, PPS_MAX_SOURCES, GFP_KERNEL);
+ if (err < 0) {
+ if (err == -ENOSPC) {
+ pr_err("%s: too many PPS sources in the system\n",
+ pps->info.name);
+ err = -EBUSY;
+ }
+ goto out_unlock;
}
+ pps->id = err;
+ mutex_unlock(&pps_idr_lock);
devt = MKDEV(MAJOR(pps_devt), pps->id);
@@ -332,6 +329,7 @@ int pps_register_cdev(struct pps_device *pps)
goto del_cdev;
}
+ /* Override the release function with our own */
pps->dev->release = pps_device_destruct;
pr_debug("source %s got cdev (%d:%d)\n", pps->info.name,
@@ -345,18 +343,51 @@ del_cdev:
free_idr:
mutex_lock(&pps_idr_lock);
idr_remove(&pps_idr, pps->id);
+out_unlock:
mutex_unlock(&pps_idr_lock);
-
return err;
}
void pps_unregister_cdev(struct pps_device *pps)
{
+ pr_debug("unregistering pps%d\n", pps->id);
+ pps->lookup_cookie = NULL;
device_destroy(pps_class, pps->dev->devt);
- cdev_del(&pps->cdev);
}
/*
+ * Look up a pps device by magic cookie.
+ * The cookie is usually a pointer to some enclosing device, but this
+ * code doesn't care; you should never be dereferencing it.
+ *
+ * This is a bit of a kludge that is currently used only by the PPS
+ * serial line discipline. It may need to be tweaked when a second user
+ * is found.
+ *
+ * There is no function interface for setting the lookup_cookie field.
+ * It's initialized to NULL when the pps device is created, and if a
+ * client wants to use it, just fill it in afterward.
+ *
+ * The cookie is automatically set to NULL in pps_unregister_source()
+ * so that it will not be used again, even if the pps device cannot
+ * be removed from the idr due to pending references holding the minor
+ * number in use.
+ */
+struct pps_device *pps_lookup_dev(void const *cookie)
+{
+ struct pps_device *pps;
+ unsigned id;
+
+ rcu_read_lock();
+ idr_for_each_entry(&pps_idr, pps, id)
+ if (cookie == pps->lookup_cookie)
+ break;
+ rcu_read_unlock();
+ return pps;
+}
+EXPORT_SYMBOL(pps_lookup_dev);
+
+/*
* Module stuff
*/
diff --git a/drivers/pwm/Kconfig b/drivers/pwm/Kconfig
index e513cd998170..0e0bfa035083 100644
--- a/drivers/pwm/Kconfig
+++ b/drivers/pwm/Kconfig
@@ -37,6 +37,18 @@ config PWM_AB8500
To compile this driver as a module, choose M here: the module
will be called pwm-ab8500.
+config PWM_ATMEL_TCB
+ tristate "Atmel TC Block PWM support"
+ depends on ATMEL_TCLIB && OF
+ help
+ Generic PWM framework driver for Atmel Timer Counter Block.
+
+ A Timer Counter Block provides 6 PWM devices grouped by 2.
+ Devices in a given group must have the same period.
+
+ To compile this driver as a module, choose M here: the module
+ will be called pwm-atmel-tcb.
+
config PWM_BFIN
tristate "Blackfin PWM support"
depends on BFIN_GPTIMERS
@@ -47,7 +59,7 @@ config PWM_BFIN
will be called pwm-bfin.
config PWM_IMX
- tristate "i.MX pwm support"
+ tristate "i.MX PWM support"
depends on ARCH_MXC
help
Generic PWM framework driver for i.MX.
@@ -104,7 +116,7 @@ config PWM_PXA
will be called pwm-pxa.
config PWM_SAMSUNG
- tristate "Samsung pwm support"
+ tristate "Samsung PWM support"
depends on PLAT_SAMSUNG
help
Generic PWM framework driver for Samsung.
@@ -183,7 +195,7 @@ config PWM_TWL_LED
will be called pwm-twl-led.
config PWM_VT8500
- tristate "vt8500 pwm support"
+ tristate "vt8500 PWM support"
depends on ARCH_VT8500
help
Generic PWM framework driver for vt8500.
diff --git a/drivers/pwm/Makefile b/drivers/pwm/Makefile
index 62a2963cfe58..94ba21e24bd6 100644
--- a/drivers/pwm/Makefile
+++ b/drivers/pwm/Makefile
@@ -1,5 +1,6 @@
obj-$(CONFIG_PWM) += core.o
obj-$(CONFIG_PWM_AB8500) += pwm-ab8500.o
+obj-$(CONFIG_PWM_ATMEL_TCB) += pwm-atmel-tcb.o
obj-$(CONFIG_PWM_BFIN) += pwm-bfin.o
obj-$(CONFIG_PWM_IMX) += pwm-imx.o
obj-$(CONFIG_PWM_JZ4740) += pwm-jz4740.o
diff --git a/drivers/pwm/core.c b/drivers/pwm/core.c
index 903138b18842..32221cb0cbe7 100644
--- a/drivers/pwm/core.c
+++ b/drivers/pwm/core.c
@@ -211,6 +211,7 @@ int pwm_set_chip_data(struct pwm_device *pwm, void *data)
return 0;
}
+EXPORT_SYMBOL_GPL(pwm_set_chip_data);
/**
* pwm_get_chip_data() - get private chip data for a PWM
@@ -220,6 +221,7 @@ void *pwm_get_chip_data(struct pwm_device *pwm)
{
return pwm ? pwm->chip_data : NULL;
}
+EXPORT_SYMBOL_GPL(pwm_get_chip_data);
/**
* pwmchip_add() - register a new PWM chip
@@ -471,7 +473,7 @@ static struct pwm_chip *of_node_to_pwmchip(struct device_node *np)
}
/**
- * of_pwm_request() - request a PWM via the PWM framework
+ * of_pwm_get() - request a PWM via the PWM framework
* @np: device node to get the PWM from
* @con_id: consumer name
*
@@ -486,8 +488,7 @@ static struct pwm_chip *of_node_to_pwmchip(struct device_node *np)
* becomes mandatory for devices that look up the PWM device via the con_id
* parameter.
*/
-static struct pwm_device *of_pwm_request(struct device_node *np,
- const char *con_id)
+struct pwm_device *of_pwm_get(struct device_node *np, const char *con_id)
{
struct pwm_device *pwm = NULL;
struct of_phandle_args args;
@@ -545,6 +546,7 @@ put:
return pwm;
}
+EXPORT_SYMBOL_GPL(of_pwm_get);
/**
* pwm_add_table() - register PWM device consumers
@@ -587,7 +589,7 @@ struct pwm_device *pwm_get(struct device *dev, const char *con_id)
/* look up via DT first */
if (IS_ENABLED(CONFIG_OF) && dev && dev->of_node)
- return of_pwm_request(dev->of_node, con_id);
+ return of_pwm_get(dev->of_node, con_id);
/*
* We look up the provider in the static table typically provided by
@@ -708,6 +710,36 @@ struct pwm_device *devm_pwm_get(struct device *dev, const char *con_id)
}
EXPORT_SYMBOL_GPL(devm_pwm_get);
+/**
+ * devm_of_pwm_get() - resource managed of_pwm_get()
+ * @dev: device for PWM consumer
+ * @np: device node to get the PWM from
+ * @con_id: consumer name
+ *
+ * This function performs like of_pwm_get() but the acquired PWM device will
+ * automatically be released on driver detach.
+ */
+struct pwm_device *devm_of_pwm_get(struct device *dev, struct device_node *np,
+ const char *con_id)
+{
+ struct pwm_device **ptr, *pwm;
+
+ ptr = devres_alloc(devm_pwm_release, sizeof(**ptr), GFP_KERNEL);
+ if (!ptr)
+ return ERR_PTR(-ENOMEM);
+
+ pwm = of_pwm_get(np, con_id);
+ if (!IS_ERR(pwm)) {
+ *ptr = pwm;
+ devres_add(dev, ptr);
+ } else {
+ devres_free(ptr);
+ }
+
+ return pwm;
+}
+EXPORT_SYMBOL_GPL(devm_of_pwm_get);
+
static int devm_pwm_match(struct device *dev, void *res, void *data)
{
struct pwm_device **p = res;
@@ -733,6 +765,18 @@ void devm_pwm_put(struct device *dev, struct pwm_device *pwm)
}
EXPORT_SYMBOL_GPL(devm_pwm_put);
+/**
+ * pwm_can_sleep() - report whether PWM access will sleep
+ * @pwm: PWM device
+ *
+ * It returns true if accessing the PWM can sleep, false otherwise.
+ */
+bool pwm_can_sleep(struct pwm_device *pwm)
+{
+ return pwm->chip->can_sleep;
+}
+EXPORT_SYMBOL_GPL(pwm_can_sleep);
+
#ifdef CONFIG_DEBUG_FS
static void pwm_dbg_show(struct pwm_chip *chip, struct seq_file *s)
{
diff --git a/drivers/pwm/pwm-atmel-tcb.c b/drivers/pwm/pwm-atmel-tcb.c
new file mode 100644
index 000000000000..16cb53092857
--- /dev/null
+++ b/drivers/pwm/pwm-atmel-tcb.c
@@ -0,0 +1,445 @@
+/*
+ * Copyright (C) Overkiz SAS 2012
+ *
+ * Author: Boris BREZILLON <b.brezillon@overkiz.com>
+ * License terms: GNU General Public License (GPL) version 2
+ */
+
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/clocksource.h>
+#include <linux/clockchips.h>
+#include <linux/interrupt.h>
+#include <linux/irq.h>
+
+#include <linux/clk.h>
+#include <linux/err.h>
+#include <linux/ioport.h>
+#include <linux/io.h>
+#include <linux/platform_device.h>
+#include <linux/atmel_tc.h>
+#include <linux/pwm.h>
+#include <linux/of_device.h>
+#include <linux/slab.h>
+
+#define NPWM 6
+
+#define ATMEL_TC_ACMR_MASK (ATMEL_TC_ACPA | ATMEL_TC_ACPC | \
+ ATMEL_TC_AEEVT | ATMEL_TC_ASWTRG)
+
+#define ATMEL_TC_BCMR_MASK (ATMEL_TC_BCPB | ATMEL_TC_BCPC | \
+ ATMEL_TC_BEEVT | ATMEL_TC_BSWTRG)
+
+struct atmel_tcb_pwm_device {
+ enum pwm_polarity polarity; /* PWM polarity */
+ unsigned div; /* PWM clock divider */
+ unsigned duty; /* PWM duty expressed in clk cycles */
+ unsigned period; /* PWM period expressed in clk cycles */
+};
+
+struct atmel_tcb_pwm_chip {
+ struct pwm_chip chip;
+ spinlock_t lock;
+ struct atmel_tc *tc;
+ struct atmel_tcb_pwm_device *pwms[NPWM];
+};
+
+static inline struct atmel_tcb_pwm_chip *to_tcb_chip(struct pwm_chip *chip)
+{
+ return container_of(chip, struct atmel_tcb_pwm_chip, chip);
+}
+
+static int atmel_tcb_pwm_set_polarity(struct pwm_chip *chip,
+ struct pwm_device *pwm,
+ enum pwm_polarity polarity)
+{
+ struct atmel_tcb_pwm_device *tcbpwm = pwm_get_chip_data(pwm);
+
+ tcbpwm->polarity = polarity;
+
+ return 0;
+}
+
+static int atmel_tcb_pwm_request(struct pwm_chip *chip,
+ struct pwm_device *pwm)
+{
+ struct atmel_tcb_pwm_chip *tcbpwmc = to_tcb_chip(chip);
+ struct atmel_tcb_pwm_device *tcbpwm;
+ struct atmel_tc *tc = tcbpwmc->tc;
+ void __iomem *regs = tc->regs;
+ unsigned group = pwm->hwpwm / 2;
+ unsigned index = pwm->hwpwm % 2;
+ unsigned cmr;
+ int ret;
+
+ tcbpwm = devm_kzalloc(chip->dev, sizeof(*tcbpwm), GFP_KERNEL);
+ if (!tcbpwm)
+ return -ENOMEM;
+
+ ret = clk_enable(tc->clk[group]);
+ if (ret) {
+ devm_kfree(chip->dev, tcbpwm);
+ return ret;
+ }
+
+ pwm_set_chip_data(pwm, tcbpwm);
+ tcbpwm->polarity = PWM_POLARITY_NORMAL;
+ tcbpwm->duty = 0;
+ tcbpwm->period = 0;
+ tcbpwm->div = 0;
+
+ spin_lock(&tcbpwmc->lock);
+ cmr = __raw_readl(regs + ATMEL_TC_REG(group, CMR));
+ /*
+ * Get init config from Timer Counter registers if
+ * Timer Counter is already configured as a PWM generator.
+ */
+ if (cmr & ATMEL_TC_WAVE) {
+ if (index == 0)
+ tcbpwm->duty =
+ __raw_readl(regs + ATMEL_TC_REG(group, RA));
+ else
+ tcbpwm->duty =
+ __raw_readl(regs + ATMEL_TC_REG(group, RB));
+
+ tcbpwm->div = cmr & ATMEL_TC_TCCLKS;
+ tcbpwm->period = __raw_readl(regs + ATMEL_TC_REG(group, RC));
+ cmr &= (ATMEL_TC_TCCLKS | ATMEL_TC_ACMR_MASK |
+ ATMEL_TC_BCMR_MASK);
+ } else
+ cmr = 0;
+
+ cmr |= ATMEL_TC_WAVE | ATMEL_TC_WAVESEL_UP_AUTO | ATMEL_TC_EEVT_XC0;
+ __raw_writel(cmr, regs + ATMEL_TC_REG(group, CMR));
+ spin_unlock(&tcbpwmc->lock);
+
+ tcbpwmc->pwms[pwm->hwpwm] = tcbpwm;
+
+ return 0;
+}
+
+static void atmel_tcb_pwm_free(struct pwm_chip *chip, struct pwm_device *pwm)
+{
+ struct atmel_tcb_pwm_chip *tcbpwmc = to_tcb_chip(chip);
+ struct atmel_tcb_pwm_device *tcbpwm = pwm_get_chip_data(pwm);
+ struct atmel_tc *tc = tcbpwmc->tc;
+
+ clk_disable(tc->clk[pwm->hwpwm / 2]);
+ tcbpwmc->pwms[pwm->hwpwm] = NULL;
+ devm_kfree(chip->dev, tcbpwm);
+}
+
+static void atmel_tcb_pwm_disable(struct pwm_chip *chip, struct pwm_device *pwm)
+{
+ struct atmel_tcb_pwm_chip *tcbpwmc = to_tcb_chip(chip);
+ struct atmel_tcb_pwm_device *tcbpwm = pwm_get_chip_data(pwm);
+ struct atmel_tc *tc = tcbpwmc->tc;
+ void __iomem *regs = tc->regs;
+ unsigned group = pwm->hwpwm / 2;
+ unsigned index = pwm->hwpwm % 2;
+ unsigned cmr;
+ enum pwm_polarity polarity = tcbpwm->polarity;
+
+ /*
+ * If duty is 0 the timer will be stopped and we have to
+ * configure the output correctly on software trigger:
+ * - set output to high if PWM_POLARITY_INVERSED
+ * - set output to low if PWM_POLARITY_NORMAL
+ *
+ * This is why we're reverting polarity in this case.
+ */
+ if (tcbpwm->duty == 0)
+ polarity = !polarity;
+
+ spin_lock(&tcbpwmc->lock);
+ cmr = __raw_readl(regs + ATMEL_TC_REG(group, CMR));
+
+ /* flush old setting and set the new one */
+ if (index == 0) {
+ cmr &= ~ATMEL_TC_ACMR_MASK;
+ if (polarity == PWM_POLARITY_INVERSED)
+ cmr |= ATMEL_TC_ASWTRG_CLEAR;
+ else
+ cmr |= ATMEL_TC_ASWTRG_SET;
+ } else {
+ cmr &= ~ATMEL_TC_BCMR_MASK;
+ if (polarity == PWM_POLARITY_INVERSED)
+ cmr |= ATMEL_TC_BSWTRG_CLEAR;
+ else
+ cmr |= ATMEL_TC_BSWTRG_SET;
+ }
+
+ __raw_writel(cmr, regs + ATMEL_TC_REG(group, CMR));
+
+ /*
+ * Use software trigger to apply the new setting.
+ * If both PWM devices in this group are disabled we stop the clock.
+ */
+ if (!(cmr & (ATMEL_TC_ACPC | ATMEL_TC_BCPC)))
+ __raw_writel(ATMEL_TC_SWTRG | ATMEL_TC_CLKDIS,
+ regs + ATMEL_TC_REG(group, CCR));
+ else
+ __raw_writel(ATMEL_TC_SWTRG, regs +
+ ATMEL_TC_REG(group, CCR));
+
+ spin_unlock(&tcbpwmc->lock);
+}
+
+static int atmel_tcb_pwm_enable(struct pwm_chip *chip, struct pwm_device *pwm)
+{
+ struct atmel_tcb_pwm_chip *tcbpwmc = to_tcb_chip(chip);
+ struct atmel_tcb_pwm_device *tcbpwm = pwm_get_chip_data(pwm);
+ struct atmel_tc *tc = tcbpwmc->tc;
+ void __iomem *regs = tc->regs;
+ unsigned group = pwm->hwpwm / 2;
+ unsigned index = pwm->hwpwm % 2;
+ u32 cmr;
+ enum pwm_polarity polarity = tcbpwm->polarity;
+
+ /*
+ * If duty is 0 the timer will be stopped and we have to
+ * configure the output correctly on software trigger:
+ * - set output to high if PWM_POLARITY_INVERSED
+ * - set output to low if PWM_POLARITY_NORMAL
+ *
+ * This is why we're reverting polarity in this case.
+ */
+ if (tcbpwm->duty == 0)
+ polarity = !polarity;
+
+ spin_lock(&tcbpwmc->lock);
+ cmr = __raw_readl(regs + ATMEL_TC_REG(group, CMR));
+
+ /* flush old setting and set the new one */
+ cmr &= ~ATMEL_TC_TCCLKS;
+
+ if (index == 0) {
+ cmr &= ~ATMEL_TC_ACMR_MASK;
+
+ /* Set CMR flags according to given polarity */
+ if (polarity == PWM_POLARITY_INVERSED)
+ cmr |= ATMEL_TC_ASWTRG_CLEAR;
+ else
+ cmr |= ATMEL_TC_ASWTRG_SET;
+ } else {
+ cmr &= ~ATMEL_TC_BCMR_MASK;
+ if (polarity == PWM_POLARITY_INVERSED)
+ cmr |= ATMEL_TC_BSWTRG_CLEAR;
+ else
+ cmr |= ATMEL_TC_BSWTRG_SET;
+ }
+
+ /*
+ * If duty is 0 or equal to period there's no need to register
+ * a specific action on RA/RB and RC compare.
+ * The output will be configured on software trigger and keep
+ * this config till next config call.
+ */
+ if (tcbpwm->duty != tcbpwm->period && tcbpwm->duty > 0) {
+ if (index == 0) {
+ if (polarity == PWM_POLARITY_INVERSED)
+ cmr |= ATMEL_TC_ACPA_SET | ATMEL_TC_ACPC_CLEAR;
+ else
+ cmr |= ATMEL_TC_ACPA_CLEAR | ATMEL_TC_ACPC_SET;
+ } else {
+ if (polarity == PWM_POLARITY_INVERSED)
+ cmr |= ATMEL_TC_BCPB_SET | ATMEL_TC_BCPC_CLEAR;
+ else
+ cmr |= ATMEL_TC_BCPB_CLEAR | ATMEL_TC_BCPC_SET;
+ }
+ }
+
+ __raw_writel(cmr, regs + ATMEL_TC_REG(group, CMR));
+
+ if (index == 0)
+ __raw_writel(tcbpwm->duty, regs + ATMEL_TC_REG(group, RA));
+ else
+ __raw_writel(tcbpwm->duty, regs + ATMEL_TC_REG(group, RB));
+
+ __raw_writel(tcbpwm->period, regs + ATMEL_TC_REG(group, RC));
+
+ /* Use software trigger to apply the new setting */
+ __raw_writel(ATMEL_TC_CLKEN | ATMEL_TC_SWTRG,
+ regs + ATMEL_TC_REG(group, CCR));
+ spin_unlock(&tcbpwmc->lock);
+ return 0;
+}
+
+static int atmel_tcb_pwm_config(struct pwm_chip *chip, struct pwm_device *pwm,
+ int duty_ns, int period_ns)
+{
+ struct atmel_tcb_pwm_chip *tcbpwmc = to_tcb_chip(chip);
+ struct atmel_tcb_pwm_device *tcbpwm = pwm_get_chip_data(pwm);
+ unsigned group = pwm->hwpwm / 2;
+ unsigned index = pwm->hwpwm % 2;
+ struct atmel_tcb_pwm_device *atcbpwm = NULL;
+ struct atmel_tc *tc = tcbpwmc->tc;
+ int i;
+ int slowclk = 0;
+ unsigned period;
+ unsigned duty;
+ unsigned rate = clk_get_rate(tc->clk[group]);
+ unsigned long long min;
+ unsigned long long max;
+
+ /*
+ * Find best clk divisor:
+ * the smallest divisor which can fulfill the period_ns requirements.
+ */
+ for (i = 0; i < 5; ++i) {
+ if (atmel_tc_divisors[i] == 0) {
+ slowclk = i;
+ continue;
+ }
+ min = div_u64((u64)NSEC_PER_SEC * atmel_tc_divisors[i], rate);
+ max = min << tc->tcb_config->counter_width;
+ if (max >= period_ns)
+ break;
+ }
+
+ /*
+ * If none of the divisor are small enough to represent period_ns
+ * take slow clock (32KHz).
+ */
+ if (i == 5) {
+ i = slowclk;
+ rate = 32768;
+ min = div_u64(NSEC_PER_SEC, rate);
+ max = min << 16;
+
+ /* If period is too big return ERANGE error */
+ if (max < period_ns)
+ return -ERANGE;
+ }
+
+ duty = div_u64(duty_ns, min);
+ period = div_u64(period_ns, min);
+
+ if (index == 0)
+ atcbpwm = tcbpwmc->pwms[pwm->hwpwm + 1];
+ else
+ atcbpwm = tcbpwmc->pwms[pwm->hwpwm - 1];
+
+ /*
+ * PWM devices provided by TCB driver are grouped by 2:
+ * - group 0: PWM 0 & 1
+ * - group 1: PWM 2 & 3
+ * - group 2: PWM 4 & 5
+ *
+ * PWM devices in a given group must be configured with the
+ * same period_ns.
+ *
+ * We're checking the period value of the second PWM device
+ * in this group before applying the new config.
+ */
+ if ((atcbpwm && atcbpwm->duty > 0 &&
+ atcbpwm->duty != atcbpwm->period) &&
+ (atcbpwm->div != i || atcbpwm->period != period)) {
+ dev_err(chip->dev,
+ "failed to configure period_ns: PWM group already configured with a different value\n");
+ return -EINVAL;
+ }
+
+ tcbpwm->period = period;
+ tcbpwm->div = i;
+ tcbpwm->duty = duty;
+
+ /* If the PWM is enabled, call enable to apply the new conf */
+ if (test_bit(PWMF_ENABLED, &pwm->flags))
+ atmel_tcb_pwm_enable(chip, pwm);
+
+ return 0;
+}
+
+static const struct pwm_ops atmel_tcb_pwm_ops = {
+ .request = atmel_tcb_pwm_request,
+ .free = atmel_tcb_pwm_free,
+ .config = atmel_tcb_pwm_config,
+ .set_polarity = atmel_tcb_pwm_set_polarity,
+ .enable = atmel_tcb_pwm_enable,
+ .disable = atmel_tcb_pwm_disable,
+};
+
+static int atmel_tcb_pwm_probe(struct platform_device *pdev)
+{
+ struct atmel_tcb_pwm_chip *tcbpwm;
+ struct device_node *np = pdev->dev.of_node;
+ struct atmel_tc *tc;
+ int err;
+ int tcblock;
+
+ err = of_property_read_u32(np, "tc-block", &tcblock);
+ if (err < 0) {
+ dev_err(&pdev->dev,
+ "failed to get Timer Counter Block number from device tree (error: %d)\n",
+ err);
+ return err;
+ }
+
+ tc = atmel_tc_alloc(tcblock, "tcb-pwm");
+ if (tc == NULL) {
+ dev_err(&pdev->dev, "failed to allocate Timer Counter Block\n");
+ return -ENOMEM;
+ }
+
+ tcbpwm = devm_kzalloc(&pdev->dev, sizeof(*tcbpwm), GFP_KERNEL);
+ if (tcbpwm == NULL) {
+ atmel_tc_free(tc);
+ dev_err(&pdev->dev, "failed to allocate memory\n");
+ return -ENOMEM;
+ }
+
+ tcbpwm->chip.dev = &pdev->dev;
+ tcbpwm->chip.ops = &atmel_tcb_pwm_ops;
+ tcbpwm->chip.of_xlate = of_pwm_xlate_with_flags;
+ tcbpwm->chip.of_pwm_n_cells = 3;
+ tcbpwm->chip.base = -1;
+ tcbpwm->chip.npwm = NPWM;
+ tcbpwm->tc = tc;
+
+ spin_lock_init(&tcbpwm->lock);
+
+ err = pwmchip_add(&tcbpwm->chip);
+ if (err < 0) {
+ atmel_tc_free(tc);
+ return err;
+ }
+
+ platform_set_drvdata(pdev, tcbpwm);
+
+ return 0;
+}
+
+static int atmel_tcb_pwm_remove(struct platform_device *pdev)
+{
+ struct atmel_tcb_pwm_chip *tcbpwm = platform_get_drvdata(pdev);
+ int err;
+
+ err = pwmchip_remove(&tcbpwm->chip);
+ if (err < 0)
+ return err;
+
+ atmel_tc_free(tcbpwm->tc);
+
+ return 0;
+}
+
+static const struct of_device_id atmel_tcb_pwm_dt_ids[] = {
+ { .compatible = "atmel,tcb-pwm", },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, atmel_tcb_pwm_dt_ids);
+
+static struct platform_driver atmel_tcb_pwm_driver = {
+ .driver = {
+ .name = "atmel-tcb-pwm",
+ .of_match_table = atmel_tcb_pwm_dt_ids,
+ },
+ .probe = atmel_tcb_pwm_probe,
+ .remove = atmel_tcb_pwm_remove,
+};
+module_platform_driver(atmel_tcb_pwm_driver);
+
+MODULE_AUTHOR("Boris BREZILLON <b.brezillon@overkiz.com>");
+MODULE_DESCRIPTION("Atmel Timer Counter Pulse Width Modulation Driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/pwm/pwm-tegra.c b/drivers/pwm/pwm-tegra.c
index 71900e8cd3d1..af3ab48cb7ef 100644
--- a/drivers/pwm/pwm-tegra.c
+++ b/drivers/pwm/pwm-tegra.c
@@ -233,7 +233,6 @@ static int tegra_pwm_remove(struct platform_device *pdev)
return pwmchip_remove(&pc->chip);
}
-#ifdef CONFIG_OF
static struct of_device_id tegra_pwm_of_match[] = {
{ .compatible = "nvidia,tegra20-pwm" },
{ .compatible = "nvidia,tegra30-pwm" },
@@ -241,12 +240,11 @@ static struct of_device_id tegra_pwm_of_match[] = {
};
MODULE_DEVICE_TABLE(of, tegra_pwm_of_match);
-#endif
static struct platform_driver tegra_pwm_driver = {
.driver = {
.name = "tegra-pwm",
- .of_match_table = of_match_ptr(tegra_pwm_of_match),
+ .of_match_table = tegra_pwm_of_match,
},
.probe = tegra_pwm_probe,
.remove = tegra_pwm_remove,
diff --git a/drivers/pwm/pwm-tiecap.c b/drivers/pwm/pwm-tiecap.c
index 27a67d6b27c1..22e96e2bffd3 100644
--- a/drivers/pwm/pwm-tiecap.c
+++ b/drivers/pwm/pwm-tiecap.c
@@ -41,10 +41,17 @@
#define ECCTL2_SYNC_SEL_DISA (BIT(7) | BIT(6))
#define ECCTL2_TSCTR_FREERUN BIT(4)
+struct ecap_context {
+ u32 cap3;
+ u32 cap4;
+ u16 ecctl2;
+};
+
struct ecap_pwm_chip {
struct pwm_chip chip;
unsigned int clk_rate;
void __iomem *mmio_base;
+ struct ecap_context ctx;
};
static inline struct ecap_pwm_chip *to_ecap_pwm_chip(struct pwm_chip *chip)
@@ -288,11 +295,57 @@ static int ecap_pwm_remove(struct platform_device *pdev)
return pwmchip_remove(&pc->chip);
}
+void ecap_pwm_save_context(struct ecap_pwm_chip *pc)
+{
+ pm_runtime_get_sync(pc->chip.dev);
+ pc->ctx.ecctl2 = readw(pc->mmio_base + ECCTL2);
+ pc->ctx.cap4 = readl(pc->mmio_base + CAP4);
+ pc->ctx.cap3 = readl(pc->mmio_base + CAP3);
+ pm_runtime_put_sync(pc->chip.dev);
+}
+
+void ecap_pwm_restore_context(struct ecap_pwm_chip *pc)
+{
+ writel(pc->ctx.cap3, pc->mmio_base + CAP3);
+ writel(pc->ctx.cap4, pc->mmio_base + CAP4);
+ writew(pc->ctx.ecctl2, pc->mmio_base + ECCTL2);
+}
+
+static int ecap_pwm_suspend(struct device *dev)
+{
+ struct ecap_pwm_chip *pc = dev_get_drvdata(dev);
+ struct pwm_device *pwm = pc->chip.pwms;
+
+ ecap_pwm_save_context(pc);
+
+ /* Disable explicitly if PWM is running */
+ if (test_bit(PWMF_ENABLED, &pwm->flags))
+ pm_runtime_put_sync(dev);
+
+ return 0;
+}
+
+static int ecap_pwm_resume(struct device *dev)
+{
+ struct ecap_pwm_chip *pc = dev_get_drvdata(dev);
+ struct pwm_device *pwm = pc->chip.pwms;
+
+ /* Enable explicitly if PWM was running */
+ if (test_bit(PWMF_ENABLED, &pwm->flags))
+ pm_runtime_get_sync(dev);
+
+ ecap_pwm_restore_context(pc);
+ return 0;
+}
+
+static SIMPLE_DEV_PM_OPS(ecap_pwm_pm_ops, ecap_pwm_suspend, ecap_pwm_resume);
+
static struct platform_driver ecap_pwm_driver = {
.driver = {
.name = "ecap",
.owner = THIS_MODULE,
.of_match_table = ecap_of_match,
+ .pm = &ecap_pwm_pm_ops,
},
.probe = ecap_pwm_probe,
.remove = ecap_pwm_remove,
diff --git a/drivers/pwm/pwm-tiehrpwm.c b/drivers/pwm/pwm-tiehrpwm.c
index 5a1399580533..8b4c86fa99c8 100644
--- a/drivers/pwm/pwm-tiehrpwm.c
+++ b/drivers/pwm/pwm-tiehrpwm.c
@@ -113,6 +113,17 @@
#define NUM_PWM_CHANNEL 2 /* EHRPWM channels */
+struct ehrpwm_context {
+ u16 tbctl;
+ u16 tbprd;
+ u16 cmpa;
+ u16 cmpb;
+ u16 aqctla;
+ u16 aqctlb;
+ u16 aqsfrc;
+ u16 aqcsfrc;
+};
+
struct ehrpwm_pwm_chip {
struct pwm_chip chip;
unsigned int clk_rate;
@@ -120,6 +131,7 @@ struct ehrpwm_pwm_chip {
unsigned long period_cycles[NUM_PWM_CHANNEL];
enum pwm_polarity polarity[NUM_PWM_CHANNEL];
struct clk *tbclk;
+ struct ehrpwm_context ctx;
};
static inline struct ehrpwm_pwm_chip *to_ehrpwm_pwm_chip(struct pwm_chip *chip)
@@ -127,6 +139,11 @@ static inline struct ehrpwm_pwm_chip *to_ehrpwm_pwm_chip(struct pwm_chip *chip)
return container_of(chip, struct ehrpwm_pwm_chip, chip);
}
+static u16 ehrpwm_read(void *base, int offset)
+{
+ return readw(base + offset);
+}
+
static void ehrpwm_write(void *base, int offset, unsigned int val)
{
writew(val & 0xFFFF, base + offset);
@@ -318,6 +335,7 @@ static int ehrpwm_pwm_enable(struct pwm_chip *chip, struct pwm_device *pwm)
{
struct ehrpwm_pwm_chip *pc = to_ehrpwm_pwm_chip(chip);
unsigned short aqcsfrc_val, aqcsfrc_mask;
+ int ret;
/* Leave clock enabled on enabling PWM */
pm_runtime_get_sync(chip->dev);
@@ -341,7 +359,12 @@ static int ehrpwm_pwm_enable(struct pwm_chip *chip, struct pwm_device *pwm)
configure_polarity(pc, pwm->hwpwm);
/* Enable TBCLK before enabling PWM device */
- clk_enable(pc->tbclk);
+ ret = clk_prepare_enable(pc->tbclk);
+ if (ret) {
+ pr_err("Failed to enable TBCLK for %s\n",
+ dev_name(pc->chip.dev));
+ return ret;
+ }
/* Enable time counter for free_run */
ehrpwm_modify(pc->mmio_base, TBCTL, TBCTL_RUN_MASK, TBCTL_FREE_RUN);
@@ -372,7 +395,7 @@ static void ehrpwm_pwm_disable(struct pwm_chip *chip, struct pwm_device *pwm)
ehrpwm_modify(pc->mmio_base, AQCSFRC, aqcsfrc_mask, aqcsfrc_val);
/* Disabling TBCLK on PWM disable */
- clk_disable(pc->tbclk);
+ clk_disable_unprepare(pc->tbclk);
/* Stop Time base counter */
ehrpwm_modify(pc->mmio_base, TBCTL, TBCTL_RUN_MASK, TBCTL_STOP_NEXT);
@@ -510,11 +533,77 @@ static int ehrpwm_pwm_remove(struct platform_device *pdev)
return pwmchip_remove(&pc->chip);
}
+void ehrpwm_pwm_save_context(struct ehrpwm_pwm_chip *pc)
+{
+ pm_runtime_get_sync(pc->chip.dev);
+ pc->ctx.tbctl = ehrpwm_read(pc->mmio_base, TBCTL);
+ pc->ctx.tbprd = ehrpwm_read(pc->mmio_base, TBPRD);
+ pc->ctx.cmpa = ehrpwm_read(pc->mmio_base, CMPA);
+ pc->ctx.cmpb = ehrpwm_read(pc->mmio_base, CMPB);
+ pc->ctx.aqctla = ehrpwm_read(pc->mmio_base, AQCTLA);
+ pc->ctx.aqctlb = ehrpwm_read(pc->mmio_base, AQCTLB);
+ pc->ctx.aqsfrc = ehrpwm_read(pc->mmio_base, AQSFRC);
+ pc->ctx.aqcsfrc = ehrpwm_read(pc->mmio_base, AQCSFRC);
+ pm_runtime_put_sync(pc->chip.dev);
+}
+
+void ehrpwm_pwm_restore_context(struct ehrpwm_pwm_chip *pc)
+{
+ ehrpwm_write(pc->mmio_base, TBPRD, pc->ctx.tbprd);
+ ehrpwm_write(pc->mmio_base, CMPA, pc->ctx.cmpa);
+ ehrpwm_write(pc->mmio_base, CMPB, pc->ctx.cmpb);
+ ehrpwm_write(pc->mmio_base, AQCTLA, pc->ctx.aqctla);
+ ehrpwm_write(pc->mmio_base, AQCTLB, pc->ctx.aqctlb);
+ ehrpwm_write(pc->mmio_base, AQSFRC, pc->ctx.aqsfrc);
+ ehrpwm_write(pc->mmio_base, AQCSFRC, pc->ctx.aqcsfrc);
+ ehrpwm_write(pc->mmio_base, TBCTL, pc->ctx.tbctl);
+}
+
+static int ehrpwm_pwm_suspend(struct device *dev)
+{
+ struct ehrpwm_pwm_chip *pc = dev_get_drvdata(dev);
+ int i;
+
+ ehrpwm_pwm_save_context(pc);
+ for (i = 0; i < pc->chip.npwm; i++) {
+ struct pwm_device *pwm = &pc->chip.pwms[i];
+
+ if (!test_bit(PWMF_ENABLED, &pwm->flags))
+ continue;
+
+ /* Disable explicitly if PWM is running */
+ pm_runtime_put_sync(dev);
+ }
+ return 0;
+}
+
+static int ehrpwm_pwm_resume(struct device *dev)
+{
+ struct ehrpwm_pwm_chip *pc = dev_get_drvdata(dev);
+ int i;
+
+ for (i = 0; i < pc->chip.npwm; i++) {
+ struct pwm_device *pwm = &pc->chip.pwms[i];
+
+ if (!test_bit(PWMF_ENABLED, &pwm->flags))
+ continue;
+
+ /* Enable explicitly if PWM was running */
+ pm_runtime_get_sync(dev);
+ }
+ ehrpwm_pwm_restore_context(pc);
+ return 0;
+}
+
+static SIMPLE_DEV_PM_OPS(ehrpwm_pwm_pm_ops, ehrpwm_pwm_suspend,
+ ehrpwm_pwm_resume);
+
static struct platform_driver ehrpwm_pwm_driver = {
.driver = {
.name = "ehrpwm",
.owner = THIS_MODULE,
.of_match_table = ehrpwm_of_match,
+ .pm = &ehrpwm_pwm_pm_ops,
},
.probe = ehrpwm_pwm_probe,
.remove = ehrpwm_pwm_remove,
diff --git a/drivers/pwm/pwm-twl-led.c b/drivers/pwm/pwm-twl-led.c
index 9dfa0f3eca30..83e25d45d640 100644
--- a/drivers/pwm/pwm-twl-led.c
+++ b/drivers/pwm/pwm-twl-led.c
@@ -300,6 +300,7 @@ static int twl_pwmled_probe(struct platform_device *pdev)
twl->chip.dev = &pdev->dev;
twl->chip.base = -1;
+ twl->chip.can_sleep = true;
mutex_init(&twl->mutex);
diff --git a/drivers/pwm/pwm-twl.c b/drivers/pwm/pwm-twl.c
index e65db95d5e59..bf3fda294223 100644
--- a/drivers/pwm/pwm-twl.c
+++ b/drivers/pwm/pwm-twl.c
@@ -200,8 +200,7 @@ out:
static void twl4030_pwm_free(struct pwm_chip *chip, struct pwm_device *pwm)
{
- struct twl_pwm_chip *twl = container_of(chip, struct twl_pwm_chip,
- chip);
+ struct twl_pwm_chip *twl = to_twl(chip);
int ret;
u8 val, mask;
@@ -231,8 +230,7 @@ out:
static int twl6030_pwm_enable(struct pwm_chip *chip, struct pwm_device *pwm)
{
- struct twl_pwm_chip *twl = container_of(chip, struct twl_pwm_chip,
- chip);
+ struct twl_pwm_chip *twl = to_twl(chip);
int ret;
u8 val;
@@ -255,8 +253,7 @@ out:
static void twl6030_pwm_disable(struct pwm_chip *chip, struct pwm_device *pwm)
{
- struct twl_pwm_chip *twl = container_of(chip, struct twl_pwm_chip,
- chip);
+ struct twl_pwm_chip *twl = to_twl(chip);
int ret;
u8 val;
@@ -315,6 +312,7 @@ static int twl_pwm_probe(struct platform_device *pdev)
twl->chip.dev = &pdev->dev;
twl->chip.base = -1;
twl->chip.npwm = 2;
+ twl->chip.can_sleep = true;
mutex_init(&twl->mutex);
diff --git a/drivers/pwm/pwm-vt8500.c b/drivers/pwm/pwm-vt8500.c
index f9de9b28e46e..69effd19afc7 100644
--- a/drivers/pwm/pwm-vt8500.c
+++ b/drivers/pwm/pwm-vt8500.c
@@ -36,6 +36,25 @@
*/
#define VT8500_NR_PWMS 2
+#define REG_CTRL(pwm) (((pwm) << 4) + 0x00)
+#define REG_SCALAR(pwm) (((pwm) << 4) + 0x04)
+#define REG_PERIOD(pwm) (((pwm) << 4) + 0x08)
+#define REG_DUTY(pwm) (((pwm) << 4) + 0x0C)
+#define REG_STATUS 0x40
+
+#define CTRL_ENABLE BIT(0)
+#define CTRL_INVERT BIT(1)
+#define CTRL_AUTOLOAD BIT(2)
+#define CTRL_STOP_IMM BIT(3)
+#define CTRL_LOAD_PRESCALE BIT(4)
+#define CTRL_LOAD_PERIOD BIT(5)
+
+#define STATUS_CTRL_UPDATE BIT(0)
+#define STATUS_SCALAR_UPDATE BIT(1)
+#define STATUS_PERIOD_UPDATE BIT(2)
+#define STATUS_DUTY_UPDATE BIT(3)
+#define STATUS_ALL_UPDATE 0x0F
+
struct vt8500_chip {
struct pwm_chip chip;
void __iomem *base;
@@ -45,15 +64,17 @@ struct vt8500_chip {
#define to_vt8500_chip(chip) container_of(chip, struct vt8500_chip, chip)
#define msecs_to_loops(t) (loops_per_jiffy / 1000 * HZ * t)
-static inline void pwm_busy_wait(void __iomem *reg, u8 bitmask)
+static inline void pwm_busy_wait(struct vt8500_chip *vt8500, int nr, u8 bitmask)
{
int loops = msecs_to_loops(10);
- while ((readb(reg) & bitmask) && --loops)
+ u32 mask = bitmask << (nr << 8);
+
+ while ((readl(vt8500->base + REG_STATUS) & mask) && --loops)
cpu_relax();
if (unlikely(!loops))
- pr_warn("Waiting for status bits 0x%x to clear timed out\n",
- bitmask);
+ dev_warn(vt8500->chip.dev, "Waiting for status bits 0x%x to clear timed out\n",
+ mask);
}
static int vt8500_pwm_config(struct pwm_chip *chip, struct pwm_device *pwm,
@@ -63,6 +84,7 @@ static int vt8500_pwm_config(struct pwm_chip *chip, struct pwm_device *pwm,
unsigned long long c;
unsigned long period_cycles, prescale, pv, dc;
int err;
+ u32 val;
err = clk_enable(vt8500->clk);
if (err < 0) {
@@ -91,14 +113,19 @@ static int vt8500_pwm_config(struct pwm_chip *chip, struct pwm_device *pwm,
do_div(c, period_ns);
dc = c;
- pwm_busy_wait(vt8500->base + 0x40 + pwm->hwpwm, (1 << 1));
- writel(prescale, vt8500->base + 0x4 + (pwm->hwpwm << 4));
+ writel(prescale, vt8500->base + REG_SCALAR(pwm->hwpwm));
+ pwm_busy_wait(vt8500, pwm->hwpwm, STATUS_SCALAR_UPDATE);
- pwm_busy_wait(vt8500->base + 0x40 + pwm->hwpwm, (1 << 2));
- writel(pv, vt8500->base + 0x8 + (pwm->hwpwm << 4));
+ writel(pv, vt8500->base + REG_PERIOD(pwm->hwpwm));
+ pwm_busy_wait(vt8500, pwm->hwpwm, STATUS_PERIOD_UPDATE);
- pwm_busy_wait(vt8500->base + 0x40 + pwm->hwpwm, (1 << 3));
- writel(dc, vt8500->base + 0xc + (pwm->hwpwm << 4));
+ writel(dc, vt8500->base + REG_DUTY(pwm->hwpwm));
+ pwm_busy_wait(vt8500, pwm->hwpwm, STATUS_DUTY_UPDATE);
+
+ val = readl(vt8500->base + REG_CTRL(pwm->hwpwm));
+ val |= CTRL_AUTOLOAD;
+ writel(val, vt8500->base + REG_CTRL(pwm->hwpwm));
+ pwm_busy_wait(vt8500, pwm->hwpwm, STATUS_CTRL_UPDATE);
clk_disable(vt8500->clk);
return 0;
@@ -106,8 +133,9 @@ static int vt8500_pwm_config(struct pwm_chip *chip, struct pwm_device *pwm,
static int vt8500_pwm_enable(struct pwm_chip *chip, struct pwm_device *pwm)
{
- int err;
struct vt8500_chip *vt8500 = to_vt8500_chip(chip);
+ int err;
+ u32 val;
err = clk_enable(vt8500->clk);
if (err < 0) {
@@ -115,25 +143,52 @@ static int vt8500_pwm_enable(struct pwm_chip *chip, struct pwm_device *pwm)
return err;
}
- pwm_busy_wait(vt8500->base + 0x40 + pwm->hwpwm, (1 << 0));
- writel(5, vt8500->base + (pwm->hwpwm << 4));
+ val = readl(vt8500->base + REG_CTRL(pwm->hwpwm));
+ val |= CTRL_ENABLE;
+ writel(val, vt8500->base + REG_CTRL(pwm->hwpwm));
+ pwm_busy_wait(vt8500, pwm->hwpwm, STATUS_CTRL_UPDATE);
+
return 0;
}
static void vt8500_pwm_disable(struct pwm_chip *chip, struct pwm_device *pwm)
{
struct vt8500_chip *vt8500 = to_vt8500_chip(chip);
+ u32 val;
- pwm_busy_wait(vt8500->base + 0x40 + pwm->hwpwm, (1 << 0));
- writel(0, vt8500->base + (pwm->hwpwm << 4));
+ val = readl(vt8500->base + REG_CTRL(pwm->hwpwm));
+ val &= ~CTRL_ENABLE;
+ writel(val, vt8500->base + REG_CTRL(pwm->hwpwm));
+ pwm_busy_wait(vt8500, pwm->hwpwm, STATUS_CTRL_UPDATE);
clk_disable(vt8500->clk);
}
+static int vt8500_pwm_set_polarity(struct pwm_chip *chip,
+ struct pwm_device *pwm,
+ enum pwm_polarity polarity)
+{
+ struct vt8500_chip *vt8500 = to_vt8500_chip(chip);
+ u32 val;
+
+ val = readl(vt8500->base + REG_CTRL(pwm->hwpwm));
+
+ if (polarity == PWM_POLARITY_INVERSED)
+ val |= CTRL_INVERT;
+ else
+ val &= ~CTRL_INVERT;
+
+ writel(val, vt8500->base + REG_CTRL(pwm->hwpwm));
+ pwm_busy_wait(vt8500, pwm->hwpwm, STATUS_CTRL_UPDATE);
+
+ return 0;
+}
+
static struct pwm_ops vt8500_pwm_ops = {
.enable = vt8500_pwm_enable,
.disable = vt8500_pwm_disable,
.config = vt8500_pwm_config,
+ .set_polarity = vt8500_pwm_set_polarity,
.owner = THIS_MODULE,
};
@@ -163,6 +218,8 @@ static int vt8500_pwm_probe(struct platform_device *pdev)
chip->chip.dev = &pdev->dev;
chip->chip.ops = &vt8500_pwm_ops;
+ chip->chip.of_xlate = of_pwm_xlate_with_flags;
+ chip->chip.of_pwm_n_cells = 3;
chip->chip.base = -1;
chip->chip.npwm = VT8500_NR_PWMS;
diff --git a/drivers/remoteproc/Kconfig b/drivers/remoteproc/Kconfig
index 0b24108d1e1b..cc1f7bf53fd0 100644
--- a/drivers/remoteproc/Kconfig
+++ b/drivers/remoteproc/Kconfig
@@ -12,8 +12,8 @@ config OMAP_REMOTEPROC
depends on HAS_DMA
depends on ARCH_OMAP4
depends on OMAP_IOMMU
+ depends on OMAP_MBOX_FWK
select REMOTEPROC
- select OMAP_MBOX_FWK
select RPMSG
help
Say y here to support OMAP's remote processors (dual M3
diff --git a/drivers/remoteproc/remoteproc_core.c b/drivers/remoteproc/remoteproc_core.c
index dd3bfaf1ad40..29387df4bfc9 100644
--- a/drivers/remoteproc/remoteproc_core.c
+++ b/drivers/remoteproc/remoteproc_core.c
@@ -199,11 +199,6 @@ int rproc_alloc_vring(struct rproc_vdev *rvdev, int i)
/* actual size of vring (in bytes) */
size = PAGE_ALIGN(vring_size(rvring->len, rvring->align));
- if (!idr_pre_get(&rproc->notifyids, GFP_KERNEL)) {
- dev_err(dev, "idr_pre_get failed\n");
- return -ENOMEM;
- }
-
/*
* Allocate non-cacheable memory for the vring. In the future
* this call will also configure the IOMMU for us
@@ -221,12 +216,13 @@ int rproc_alloc_vring(struct rproc_vdev *rvdev, int i)
* TODO: let the rproc know the notifyid of this vring
* TODO: support predefined notifyids (via resource table)
*/
- ret = idr_get_new(&rproc->notifyids, rvring, &notifyid);
+ ret = idr_alloc(&rproc->notifyids, rvring, 0, 0, GFP_KERNEL);
if (ret) {
- dev_err(dev, "idr_get_new failed: %d\n", ret);
+ dev_err(dev, "idr_alloc failed: %d\n", ret);
dma_free_coherent(dev->parent, size, va, dma);
return ret;
}
+ notifyid = ret;
/* Store largest notifyid */
rproc->max_notifyid = max(rproc->max_notifyid, notifyid);
@@ -1180,7 +1176,6 @@ static void rproc_type_release(struct device *dev)
rproc_delete_debug_dir(rproc);
- idr_remove_all(&rproc->notifyids);
idr_destroy(&rproc->notifyids);
if (rproc->index >= 0)
diff --git a/drivers/remoteproc/remoteproc_virtio.c b/drivers/remoteproc/remoteproc_virtio.c
index 9e198e590675..afed9b7731c4 100644
--- a/drivers/remoteproc/remoteproc_virtio.c
+++ b/drivers/remoteproc/remoteproc_virtio.c
@@ -222,7 +222,7 @@ static void rproc_virtio_finalize_features(struct virtio_device *vdev)
rvdev->gfeatures = vdev->features[0];
}
-static struct virtio_config_ops rproc_virtio_config_ops = {
+static const struct virtio_config_ops rproc_virtio_config_ops = {
.get_features = rproc_virtio_get_features,
.finalize_features = rproc_virtio_finalize_features,
.find_vqs = rproc_virtio_find_vqs,
diff --git a/drivers/rpmsg/virtio_rpmsg_bus.c b/drivers/rpmsg/virtio_rpmsg_bus.c
index f1e323924f12..a59684b5fc68 100644
--- a/drivers/rpmsg/virtio_rpmsg_bus.c
+++ b/drivers/rpmsg/virtio_rpmsg_bus.c
@@ -213,13 +213,10 @@ static struct rpmsg_endpoint *__rpmsg_create_ept(struct virtproc_info *vrp,
struct rpmsg_channel *rpdev, rpmsg_rx_cb_t cb,
void *priv, u32 addr)
{
- int err, tmpaddr, request;
+ int id_min, id_max, id;
struct rpmsg_endpoint *ept;
struct device *dev = rpdev ? &rpdev->dev : &vrp->vdev->dev;
- if (!idr_pre_get(&vrp->endpoints, GFP_KERNEL))
- return NULL;
-
ept = kzalloc(sizeof(*ept), GFP_KERNEL);
if (!ept) {
dev_err(dev, "failed to kzalloc a new ept\n");
@@ -234,31 +231,28 @@ static struct rpmsg_endpoint *__rpmsg_create_ept(struct virtproc_info *vrp,
ept->priv = priv;
/* do we need to allocate a local address ? */
- request = addr == RPMSG_ADDR_ANY ? RPMSG_RESERVED_ADDRESSES : addr;
+ if (addr == RPMSG_ADDR_ANY) {
+ id_min = RPMSG_RESERVED_ADDRESSES;
+ id_max = 0;
+ } else {
+ id_min = addr;
+ id_max = addr + 1;
+ }
mutex_lock(&vrp->endpoints_lock);
/* bind the endpoint to an rpmsg address (and allocate one if needed) */
- err = idr_get_new_above(&vrp->endpoints, ept, request, &tmpaddr);
- if (err) {
- dev_err(dev, "idr_get_new_above failed: %d\n", err);
+ id = idr_alloc(&vrp->endpoints, ept, id_min, id_max, GFP_KERNEL);
+ if (id < 0) {
+ dev_err(dev, "idr_alloc failed: %d\n", id);
goto free_ept;
}
-
- /* make sure the user's address request is fulfilled, if relevant */
- if (addr != RPMSG_ADDR_ANY && tmpaddr != addr) {
- dev_err(dev, "address 0x%x already in use\n", addr);
- goto rem_idr;
- }
-
- ept->addr = tmpaddr;
+ ept->addr = id;
mutex_unlock(&vrp->endpoints_lock);
return ept;
-rem_idr:
- idr_remove(&vrp->endpoints, request);
free_ept:
mutex_unlock(&vrp->endpoints_lock);
kref_put(&ept->refcount, __ept_release);
@@ -839,7 +833,7 @@ static void rpmsg_recv_done(struct virtqueue *rvq)
/* farewell, ept, we don't need you anymore */
kref_put(&ept->refcount, __ept_release);
} else
- dev_warn(dev, "msg received with no recepient\n");
+ dev_warn(dev, "msg received with no recipient\n");
/* publish the real size of the buffer */
sg_init_one(&sg, msg, RPMSG_BUF_SIZE);
@@ -1036,7 +1030,6 @@ static void rpmsg_remove(struct virtio_device *vdev)
if (vrp->ns_ept)
__rpmsg_destroy_ept(vrp, vrp->ns_ept);
- idr_remove_all(&vrp->endpoints);
idr_destroy(&vrp->endpoints);
vdev->config->del_vqs(vrp->vdev);
diff --git a/drivers/rtc/Kconfig b/drivers/rtc/Kconfig
index aa64d5d3fb20..79fbe3832dfc 100644
--- a/drivers/rtc/Kconfig
+++ b/drivers/rtc/Kconfig
@@ -204,6 +204,12 @@ config RTC_DRV_DS3232
This driver can also be built as a module. If so, the module
will be called rtc-ds3232.
+config RTC_DRV_LP8788
+ tristate "TI LP8788 RTC driver"
+ depends on MFD_LP8788
+ help
+ Say Y to enable support for the LP8788 RTC/ALARM driver.
+
config RTC_DRV_MAX6900
tristate "Maxim MAX6900"
help
@@ -243,6 +249,26 @@ config RTC_DRV_MAX8998
This driver can also be built as a module. If so, the module
will be called rtc-max8998.
+config RTC_DRV_MAX8997
+ tristate "Maxim MAX8997"
+ depends on MFD_MAX8997
+ help
+ If you say yes here you will get support for the
+ RTC of Maxim MAX8997 PMIC.
+
+ This driver can also be built as a module. If so, the module
+ will be called rtc-max8997.
+
+config RTC_DRV_MAX77686
+ tristate "Maxim MAX77686"
+ depends on MFD_MAX77686
+ help
+ If you say yes here you will get support for the
+ RTC of Maxim MAX77686 PMIC.
+
+ This driver can also be built as a module. If so, the module
+ will be called rtc-max77686.
+
config RTC_DRV_RS5C372
tristate "Ricoh R2025S/D, RS5C372A/B, RV5C386, RV5C387A"
help
@@ -279,6 +305,16 @@ config RTC_DRV_X1205
This driver can also be built as a module. If so, the module
will be called rtc-x1205.
+config RTC_DRV_PALMAS
+ tristate "TI Palmas RTC driver"
+ depends on MFD_PALMAS
+ help
+ If you say yes here you get support for the RTC of TI PALMA series PMIC
+ chips.
+
+ This driver can also be built as a module. If so, the module
+ will be called rtc-palma.
+
config RTC_DRV_PCF8523
tristate "NXP PCF8523"
help
@@ -380,6 +416,14 @@ config RTC_DRV_TPS65910
This driver can also be built as a module. If so, the module
will be called rtc-tps65910.
+config RTC_DRV_TPS80031
+ tristate "TI TPS80031/TPS80032 RTC driver"
+ depends on MFD_TPS80031
+ help
+ TI Power Managment IC TPS80031 supports RTC functionality
+ along with alarm. This driver supports the RTC driver for
+ the TPS80031 RTC module.
+
config RTC_DRV_RC5T583
tristate "RICOH 5T583 RTC driver"
depends on MFD_RC5T583
@@ -537,6 +581,14 @@ config RTC_DRV_PCF2123
This driver can also be built as a module. If so, the module
will be called rtc-pcf2123.
+config RTC_DRV_RX4581
+ tristate "Epson RX-4581"
+ help
+ If you say yes here you will get support for the Epson RX-4581.
+
+ This driver can also be built as a module. If so the module
+ will be called rtc-rx4581.
+
endif # SPI_MASTER
comment "Platform RTC drivers"
@@ -1033,7 +1085,7 @@ config RTC_DRV_TX4939
config RTC_DRV_MV
tristate "Marvell SoC RTC"
- depends on ARCH_KIRKWOOD || ARCH_DOVE
+ depends on ARCH_KIRKWOOD || ARCH_DOVE || ARCH_MVEBU
help
If you say yes here you will get support for the in-chip RTC
that can be found in some of Marvell's SoC devices, such as
diff --git a/drivers/rtc/Makefile b/drivers/rtc/Makefile
index 60c0414d67fa..c33f86f1a69b 100644
--- a/drivers/rtc/Makefile
+++ b/drivers/rtc/Makefile
@@ -58,6 +58,7 @@ obj-$(CONFIG_RTC_DRV_IMXDI) += rtc-imxdi.o
obj-$(CONFIG_RTC_DRV_ISL1208) += rtc-isl1208.o
obj-$(CONFIG_RTC_DRV_ISL12022) += rtc-isl12022.o
obj-$(CONFIG_RTC_DRV_JZ4740) += rtc-jz4740.o
+obj-$(CONFIG_RTC_DRV_LP8788) += rtc-lp8788.o
obj-$(CONFIG_RTC_DRV_LPC32XX) += rtc-lpc32xx.o
obj-$(CONFIG_RTC_DRV_LOONGSON1) += rtc-ls1x.o
obj-$(CONFIG_RTC_DRV_M41T80) += rtc-m41t80.o
@@ -71,13 +72,16 @@ obj-$(CONFIG_RTC_DRV_MAX6900) += rtc-max6900.o
obj-$(CONFIG_RTC_DRV_MAX8907) += rtc-max8907.o
obj-$(CONFIG_RTC_DRV_MAX8925) += rtc-max8925.o
obj-$(CONFIG_RTC_DRV_MAX8998) += rtc-max8998.o
+obj-$(CONFIG_RTC_DRV_MAX8997) += rtc-max8997.o
obj-$(CONFIG_RTC_DRV_MAX6902) += rtc-max6902.o
+obj-$(CONFIG_RTC_DRV_MAX77686) += rtc-max77686.o
obj-$(CONFIG_RTC_DRV_MC13XXX) += rtc-mc13xxx.o
obj-$(CONFIG_RTC_DRV_MSM6242) += rtc-msm6242.o
obj-$(CONFIG_RTC_DRV_MPC5121) += rtc-mpc5121.o
obj-$(CONFIG_RTC_DRV_MV) += rtc-mv.o
obj-$(CONFIG_RTC_DRV_NUC900) += rtc-nuc900.o
obj-$(CONFIG_RTC_DRV_OMAP) += rtc-omap.o
+obj-$(CONFIG_RTC_DRV_PALMAS) += rtc-palmas.o
obj-$(CONFIG_RTC_DRV_PCAP) += rtc-pcap.o
obj-$(CONFIG_RTC_DRV_PCF8523) += rtc-pcf8523.o
obj-$(CONFIG_RTC_DRV_PCF8563) += rtc-pcf8563.o
@@ -97,6 +101,7 @@ obj-$(CONFIG_RTC_DRV_RS5C313) += rtc-rs5c313.o
obj-$(CONFIG_RTC_DRV_RS5C348) += rtc-rs5c348.o
obj-$(CONFIG_RTC_DRV_RS5C372) += rtc-rs5c372.o
obj-$(CONFIG_RTC_DRV_RV3029C2) += rtc-rv3029c2.o
+obj-$(CONFIG_RTC_DRV_RX4581) += rtc-rx4581.o
obj-$(CONFIG_RTC_DRV_RX8025) += rtc-rx8025.o
obj-$(CONFIG_RTC_DRV_RX8581) += rtc-rx8581.o
obj-$(CONFIG_RTC_DRV_S35390A) += rtc-s35390a.o
@@ -115,6 +120,7 @@ obj-$(CONFIG_RTC_DRV_TILE) += rtc-tile.o
obj-$(CONFIG_RTC_DRV_TWL4030) += rtc-twl.o
obj-$(CONFIG_RTC_DRV_TPS6586X) += rtc-tps6586x.o
obj-$(CONFIG_RTC_DRV_TPS65910) += rtc-tps65910.o
+obj-$(CONFIG_RTC_DRV_TPS80031) += rtc-tps80031.o
obj-$(CONFIG_RTC_DRV_TX4939) += rtc-tx4939.o
obj-$(CONFIG_RTC_DRV_V3020) += rtc-v3020.o
obj-$(CONFIG_RTC_DRV_VR41XX) += rtc-vr41xx.o
diff --git a/drivers/rtc/class.c b/drivers/rtc/class.c
index 26388f182594..9b742d3ffb94 100644
--- a/drivers/rtc/class.c
+++ b/drivers/rtc/class.c
@@ -11,6 +11,8 @@
* published by the Free Software Foundation.
*/
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
#include <linux/module.h>
#include <linux/rtc.h>
#include <linux/kdev_t.h>
@@ -261,7 +263,7 @@ static int __init rtc_init(void)
{
rtc_class = class_create(THIS_MODULE, "rtc");
if (IS_ERR(rtc_class)) {
- printk(KERN_ERR "%s: couldn't create class\n", __FILE__);
+ pr_err("couldn't create class\n");
return PTR_ERR(rtc_class);
}
rtc_class->suspend = rtc_suspend;
diff --git a/drivers/rtc/rtc-at91rm9200.c b/drivers/rtc/rtc-at91rm9200.c
index b6469e2cae89..434ebc3a99dc 100644
--- a/drivers/rtc/rtc-at91rm9200.c
+++ b/drivers/rtc/rtc-at91rm9200.c
@@ -86,7 +86,7 @@ static int at91_rtc_readtime(struct device *dev, struct rtc_time *tm)
tm->tm_yday = rtc_year_days(tm->tm_mday, tm->tm_mon, tm->tm_year);
tm->tm_year = tm->tm_year - 1900;
- pr_debug("%s(): %4d-%02d-%02d %02d:%02d:%02d\n", __func__,
+ dev_dbg(dev, "%s(): %4d-%02d-%02d %02d:%02d:%02d\n", __func__,
1900 + tm->tm_year, tm->tm_mon, tm->tm_mday,
tm->tm_hour, tm->tm_min, tm->tm_sec);
@@ -100,7 +100,7 @@ static int at91_rtc_settime(struct device *dev, struct rtc_time *tm)
{
unsigned long cr;
- pr_debug("%s(): %4d-%02d-%02d %02d:%02d:%02d\n", __func__,
+ dev_dbg(dev, "%s(): %4d-%02d-%02d %02d:%02d:%02d\n", __func__,
1900 + tm->tm_year, tm->tm_mon, tm->tm_mday,
tm->tm_hour, tm->tm_min, tm->tm_sec);
@@ -145,7 +145,7 @@ static int at91_rtc_readalarm(struct device *dev, struct rtc_wkalrm *alrm)
alrm->enabled = (at91_rtc_read(AT91_RTC_IMR) & AT91_RTC_ALARM)
? 1 : 0;
- pr_debug("%s(): %4d-%02d-%02d %02d:%02d:%02d\n", __func__,
+ dev_dbg(dev, "%s(): %4d-%02d-%02d %02d:%02d:%02d\n", __func__,
1900 + tm->tm_year, tm->tm_mon, tm->tm_mday,
tm->tm_hour, tm->tm_min, tm->tm_sec);
@@ -183,7 +183,7 @@ static int at91_rtc_setalarm(struct device *dev, struct rtc_wkalrm *alrm)
at91_rtc_write(AT91_RTC_IER, AT91_RTC_ALARM);
}
- pr_debug("%s(): %4d-%02d-%02d %02d:%02d:%02d\n", __func__,
+ dev_dbg(dev, "%s(): %4d-%02d-%02d %02d:%02d:%02d\n", __func__,
at91_alarm_year, tm.tm_mon, tm.tm_mday, tm.tm_hour,
tm.tm_min, tm.tm_sec);
@@ -192,7 +192,7 @@ static int at91_rtc_setalarm(struct device *dev, struct rtc_wkalrm *alrm)
static int at91_rtc_alarm_irq_enable(struct device *dev, unsigned int enabled)
{
- pr_debug("%s(): cmd=%08x\n", __func__, enabled);
+ dev_dbg(dev, "%s(): cmd=%08x\n", __func__, enabled);
if (enabled) {
at91_rtc_write(AT91_RTC_SCCR, AT91_RTC_ALARM);
@@ -240,7 +240,7 @@ static irqreturn_t at91_rtc_interrupt(int irq, void *dev_id)
rtc_update_irq(rtc, 1, events);
- pr_debug("%s(): num=%ld, events=0x%02lx\n", __func__,
+ dev_dbg(&pdev->dev, "%s(): num=%ld, events=0x%02lx\n", __func__,
events >> 8, events & 0x000000FF);
return IRQ_HANDLED;
@@ -296,8 +296,7 @@ static int __init at91_rtc_probe(struct platform_device *pdev)
IRQF_SHARED,
"at91_rtc", pdev);
if (ret) {
- printk(KERN_ERR "at91_rtc: IRQ %d already in use.\n",
- irq);
+ dev_err(&pdev->dev, "IRQ %d already in use.\n", irq);
return ret;
}
@@ -315,7 +314,7 @@ static int __init at91_rtc_probe(struct platform_device *pdev)
}
platform_set_drvdata(pdev, rtc);
- printk(KERN_INFO "AT91 Real Time Clock driver.\n");
+ dev_info(&pdev->dev, "AT91 Real Time Clock driver.\n");
return 0;
}
diff --git a/drivers/rtc/rtc-cmos.c b/drivers/rtc/rtc-cmos.c
index 16630aa87f45..af97c94e8a3a 100644
--- a/drivers/rtc/rtc-cmos.c
+++ b/drivers/rtc/rtc-cmos.c
@@ -706,7 +706,7 @@ cmos_do_probe(struct device *dev, struct resource *ports, int rtc_irq)
rtc_cmos_int_handler = hpet_rtc_interrupt;
err = hpet_register_irq_handler(cmos_interrupt);
if (err != 0) {
- printk(KERN_WARNING "hpet_register_irq_handler "
+ dev_warn(dev, "hpet_register_irq_handler "
" failed in rtc_init().");
goto cleanup1;
}
@@ -731,8 +731,7 @@ cmos_do_probe(struct device *dev, struct resource *ports, int rtc_irq)
goto cleanup2;
}
- pr_info("%s: %s%s, %zd bytes nvram%s\n",
- dev_name(&cmos_rtc.rtc->dev),
+ dev_info(dev, "%s%s, %zd bytes nvram%s\n",
!is_valid_irq(rtc_irq) ? "no alarms" :
cmos_rtc.mon_alrm ? "alarms up to one year" :
cmos_rtc.day_alrm ? "alarms up to one month" :
@@ -820,8 +819,7 @@ static int cmos_suspend(struct device *dev)
enable_irq_wake(cmos->irq);
}
- pr_debug("%s: suspend%s, ctrl %02x\n",
- dev_name(&cmos_rtc.rtc->dev),
+ dev_dbg(dev, "suspend%s, ctrl %02x\n",
(tmp & RTC_AIE) ? ", alarm may wake" : "",
tmp);
@@ -876,9 +874,7 @@ static int cmos_resume(struct device *dev)
spin_unlock_irq(&rtc_lock);
}
- pr_debug("%s: resume, ctrl %02x\n",
- dev_name(&cmos_rtc.rtc->dev),
- tmp);
+ dev_dbg(dev, "resume, ctrl %02x\n", tmp);
return 0;
}
@@ -1098,7 +1094,6 @@ static __init void cmos_of_init(struct platform_device *pdev)
}
#else
static inline void cmos_of_init(struct platform_device *pdev) {}
-#define of_cmos_match NULL
#endif
/*----------------------------------------------------------------*/
@@ -1140,7 +1135,7 @@ static struct platform_driver cmos_platform_driver = {
#ifdef CONFIG_PM
.pm = &cmos_pm_ops,
#endif
- .of_match_table = of_cmos_match,
+ .of_match_table = of_match_ptr(of_cmos_match),
}
};
diff --git a/drivers/rtc/rtc-coh901331.c b/drivers/rtc/rtc-coh901331.c
index c8115b83e5ab..2d28ec1aa1cd 100644
--- a/drivers/rtc/rtc-coh901331.c
+++ b/drivers/rtc/rtc-coh901331.c
@@ -157,7 +157,6 @@ static int __exit coh901331_remove(struct platform_device *pdev)
if (rtap) {
rtc_device_unregister(rtap->rtc);
clk_unprepare(rtap->clk);
- clk_put(rtap->clk);
platform_set_drvdata(pdev, NULL);
}
@@ -196,7 +195,7 @@ static int __init coh901331_probe(struct platform_device *pdev)
"RTC COH 901 331 Alarm", rtap))
return -EIO;
- rtap->clk = clk_get(&pdev->dev, NULL);
+ rtap->clk = devm_clk_get(&pdev->dev, NULL);
if (IS_ERR(rtap->clk)) {
ret = PTR_ERR(rtap->clk);
dev_err(&pdev->dev, "could not get clock\n");
@@ -207,7 +206,7 @@ static int __init coh901331_probe(struct platform_device *pdev)
ret = clk_prepare_enable(rtap->clk);
if (ret) {
dev_err(&pdev->dev, "could not enable clock\n");
- goto out_no_clk_prepenable;
+ return ret;
}
clk_disable(rtap->clk);
@@ -224,8 +223,6 @@ static int __init coh901331_probe(struct platform_device *pdev)
out_no_rtc:
platform_set_drvdata(pdev, NULL);
clk_unprepare(rtap->clk);
- out_no_clk_prepenable:
- clk_put(rtap->clk);
return ret;
}
diff --git a/drivers/rtc/rtc-da9052.c b/drivers/rtc/rtc-da9052.c
index 60b826e520e2..0dde688ca09b 100644
--- a/drivers/rtc/rtc-da9052.c
+++ b/drivers/rtc/rtc-da9052.c
@@ -240,9 +240,10 @@ static int da9052_rtc_probe(struct platform_device *pdev)
rtc->da9052 = dev_get_drvdata(pdev->dev.parent);
platform_set_drvdata(pdev, rtc);
rtc->irq = platform_get_irq_byname(pdev, "ALM");
- ret = request_threaded_irq(rtc->irq, NULL, da9052_rtc_irq,
- IRQF_TRIGGER_LOW | IRQF_ONESHOT,
- "ALM", rtc);
+ ret = devm_request_threaded_irq(&pdev->dev, rtc->irq, NULL,
+ da9052_rtc_irq,
+ IRQF_TRIGGER_LOW | IRQF_ONESHOT,
+ "ALM", rtc);
if (ret != 0) {
rtc_err(rtc->da9052, "irq registration failed: %d\n", ret);
return ret;
@@ -250,16 +251,10 @@ static int da9052_rtc_probe(struct platform_device *pdev)
rtc->rtc = rtc_device_register(pdev->name, &pdev->dev,
&da9052_rtc_ops, THIS_MODULE);
- if (IS_ERR(rtc->rtc)) {
- ret = PTR_ERR(rtc->rtc);
- goto err_free_irq;
- }
+ if (IS_ERR(rtc->rtc))
+ return PTR_ERR(rtc->rtc);
return 0;
-
-err_free_irq:
- free_irq(rtc->irq, rtc);
- return ret;
}
static int da9052_rtc_remove(struct platform_device *pdev)
@@ -267,7 +262,6 @@ static int da9052_rtc_remove(struct platform_device *pdev)
struct da9052_rtc *rtc = pdev->dev.platform_data;
rtc_device_unregister(rtc->rtc);
- free_irq(rtc->irq, rtc);
platform_set_drvdata(pdev, NULL);
return 0;
diff --git a/drivers/rtc/rtc-davinci.c b/drivers/rtc/rtc-davinci.c
index 5f7982f7c1b5..56b73089bb29 100644
--- a/drivers/rtc/rtc-davinci.c
+++ b/drivers/rtc/rtc-davinci.c
@@ -506,19 +506,19 @@ static int __init davinci_rtc_probe(struct platform_device *pdev)
davinci_rtc->pbase = res->start;
davinci_rtc->base_size = resource_size(res);
- mem = request_mem_region(davinci_rtc->pbase, davinci_rtc->base_size,
- pdev->name);
+ mem = devm_request_mem_region(dev, davinci_rtc->pbase,
+ davinci_rtc->base_size, pdev->name);
if (!mem) {
dev_err(dev, "RTC registers at %08x are not free\n",
davinci_rtc->pbase);
return -EBUSY;
}
- davinci_rtc->base = ioremap(davinci_rtc->pbase, davinci_rtc->base_size);
+ davinci_rtc->base = devm_ioremap(dev, davinci_rtc->pbase,
+ davinci_rtc->base_size);
if (!davinci_rtc->base) {
dev_err(dev, "unable to ioremap MEM resource\n");
- ret = -ENOMEM;
- goto fail2;
+ return -ENOMEM;
}
platform_set_drvdata(pdev, davinci_rtc);
@@ -529,7 +529,7 @@ static int __init davinci_rtc_probe(struct platform_device *pdev)
ret = PTR_ERR(davinci_rtc->rtc);
dev_err(dev, "unable to register RTC device, err %d\n",
ret);
- goto fail3;
+ goto fail1;
}
rtcif_write(davinci_rtc, PRTCIF_INTFLG_RTCSS, PRTCIF_INTFLG);
@@ -539,11 +539,11 @@ static int __init davinci_rtc_probe(struct platform_device *pdev)
rtcss_write(davinci_rtc, 0, PRTCSS_RTC_CTRL);
rtcss_write(davinci_rtc, 0, PRTCSS_RTC_CCTRL);
- ret = request_irq(davinci_rtc->irq, davinci_rtc_interrupt,
+ ret = devm_request_irq(dev, davinci_rtc->irq, davinci_rtc_interrupt,
0, "davinci_rtc", davinci_rtc);
if (ret < 0) {
dev_err(dev, "unable to register davinci RTC interrupt\n");
- goto fail4;
+ goto fail2;
}
/* Enable interrupts */
@@ -557,13 +557,10 @@ static int __init davinci_rtc_probe(struct platform_device *pdev)
return 0;
-fail4:
+fail2:
rtc_device_unregister(davinci_rtc->rtc);
-fail3:
+fail1:
platform_set_drvdata(pdev, NULL);
- iounmap(davinci_rtc->base);
-fail2:
- release_mem_region(davinci_rtc->pbase, davinci_rtc->base_size);
return ret;
}
@@ -575,13 +572,8 @@ static int davinci_rtc_remove(struct platform_device *pdev)
rtcif_write(davinci_rtc, 0, PRTCIF_INTEN);
- free_irq(davinci_rtc->irq, davinci_rtc);
-
rtc_device_unregister(davinci_rtc->rtc);
- iounmap(davinci_rtc->base);
- release_mem_region(davinci_rtc->pbase, davinci_rtc->base_size);
-
platform_set_drvdata(pdev, NULL);
return 0;
diff --git a/drivers/rtc/rtc-dev.c b/drivers/rtc/rtc-dev.c
index 9a86b4bd8699..d04939369251 100644
--- a/drivers/rtc/rtc-dev.c
+++ b/drivers/rtc/rtc-dev.c
@@ -11,6 +11,8 @@
* published by the Free Software Foundation.
*/
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
#include <linux/module.h>
#include <linux/rtc.h>
#include <linux/sched.h>
@@ -462,7 +464,7 @@ void rtc_dev_prepare(struct rtc_device *rtc)
return;
if (rtc->id >= RTC_DEV_MAX) {
- pr_debug("%s: too many RTC devices\n", rtc->name);
+ dev_dbg(&rtc->dev, "%s: too many RTC devices\n", rtc->name);
return;
}
@@ -480,10 +482,10 @@ void rtc_dev_prepare(struct rtc_device *rtc)
void rtc_dev_add_device(struct rtc_device *rtc)
{
if (cdev_add(&rtc->char_dev, rtc->dev.devt, 1))
- printk(KERN_WARNING "%s: failed to add char device %d:%d\n",
+ dev_warn(&rtc->dev, "%s: failed to add char device %d:%d\n",
rtc->name, MAJOR(rtc_devt), rtc->id);
else
- pr_debug("%s: dev (%d:%d)\n", rtc->name,
+ dev_dbg(&rtc->dev, "%s: dev (%d:%d)\n", rtc->name,
MAJOR(rtc_devt), rtc->id);
}
@@ -499,8 +501,7 @@ void __init rtc_dev_init(void)
err = alloc_chrdev_region(&rtc_devt, 0, RTC_DEV_MAX, "rtc");
if (err < 0)
- printk(KERN_ERR "%s: failed to allocate char dev region\n",
- __FILE__);
+ pr_err("failed to allocate char dev region\n");
}
void __exit rtc_dev_exit(void)
diff --git a/drivers/rtc/rtc-ds1305.c b/drivers/rtc/rtc-ds1305.c
index d578773f5ce2..b05a6dc96405 100644
--- a/drivers/rtc/rtc-ds1305.c
+++ b/drivers/rtc/rtc-ds1305.c
@@ -635,9 +635,7 @@ static int ds1305_probe(struct spi_device *spi)
goto fail0;
}
- dev_dbg(&spi->dev, "ctrl %s: %02x %02x %02x\n",
- "read", ds1305->ctrl[0],
- ds1305->ctrl[1], ds1305->ctrl[2]);
+ dev_dbg(&spi->dev, "ctrl %s: %3ph\n", "read", ds1305->ctrl);
/* Sanity check register values ... partially compensating for the
* fact that SPI has no device handshake. A pullup on MISO would
@@ -723,9 +721,7 @@ static int ds1305_probe(struct spi_device *spi)
goto fail0;
}
- dev_dbg(&spi->dev, "ctrl %s: %02x %02x %02x\n",
- "write", ds1305->ctrl[0],
- ds1305->ctrl[1], ds1305->ctrl[2]);
+ dev_dbg(&spi->dev, "ctrl %s: %3ph\n", "write", ds1305->ctrl);
}
/* see if non-Linux software set up AM/PM mode */
diff --git a/drivers/rtc/rtc-ds1307.c b/drivers/rtc/rtc-ds1307.c
index e0d0ba4de03f..970a236b147a 100644
--- a/drivers/rtc/rtc-ds1307.c
+++ b/drivers/rtc/rtc-ds1307.c
@@ -322,12 +322,7 @@ static int ds1307_get_time(struct device *dev, struct rtc_time *t)
return -EIO;
}
- dev_dbg(dev, "%s: %02x %02x %02x %02x %02x %02x %02x\n",
- "read",
- ds1307->regs[0], ds1307->regs[1],
- ds1307->regs[2], ds1307->regs[3],
- ds1307->regs[4], ds1307->regs[5],
- ds1307->regs[6]);
+ dev_dbg(dev, "%s: %7ph\n", "read", ds1307->regs);
t->tm_sec = bcd2bin(ds1307->regs[DS1307_REG_SECS] & 0x7f);
t->tm_min = bcd2bin(ds1307->regs[DS1307_REG_MIN] & 0x7f);
@@ -398,9 +393,7 @@ static int ds1307_set_time(struct device *dev, struct rtc_time *t)
break;
}
- dev_dbg(dev, "%s: %02x %02x %02x %02x %02x %02x %02x\n",
- "write", buf[0], buf[1], buf[2], buf[3],
- buf[4], buf[5], buf[6]);
+ dev_dbg(dev, "%s: %7ph\n", "write", buf);
result = ds1307->write_block_data(ds1307->client,
ds1307->offset, 7, buf);
diff --git a/drivers/rtc/rtc-ds2404.c b/drivers/rtc/rtc-ds2404.c
index 5ea9df7c8c31..b04fc4272fb3 100644
--- a/drivers/rtc/rtc-ds2404.c
+++ b/drivers/rtc/rtc-ds2404.c
@@ -70,7 +70,7 @@ static int ds2404_gpio_map(struct ds2404 *chip, struct platform_device *pdev,
for (i = 0; i < ARRAY_SIZE(ds2404_gpio); i++) {
err = gpio_request(ds2404_gpio[i].gpio, ds2404_gpio[i].name);
if (err) {
- printk(KERN_ERR "error mapping gpio %s: %d\n",
+ dev_err(&pdev->dev, "error mapping gpio %s: %d\n",
ds2404_gpio[i].name, err);
goto err_request;
}
@@ -177,7 +177,7 @@ static void ds2404_write_memory(struct device *dev, u16 offset,
for (i = 0; i < length; i++) {
if (out[i] != ds2404_read_byte(dev)) {
- printk(KERN_ERR "read invalid data\n");
+ dev_err(dev, "read invalid data\n");
return;
}
}
@@ -283,19 +283,7 @@ static struct platform_driver rtc_device_driver = {
.owner = THIS_MODULE,
},
};
-
-static __init int ds2404_init(void)
-{
- return platform_driver_register(&rtc_device_driver);
-}
-
-static __exit void ds2404_exit(void)
-{
- platform_driver_unregister(&rtc_device_driver);
-}
-
-module_init(ds2404_init);
-module_exit(ds2404_exit);
+module_platform_driver(rtc_device_driver);
MODULE_DESCRIPTION("DS2404 RTC");
MODULE_AUTHOR("Sven Schnelle");
diff --git a/drivers/rtc/rtc-efi.c b/drivers/rtc/rtc-efi.c
index c9f890b088da..1a0c37c9152b 100644
--- a/drivers/rtc/rtc-efi.c
+++ b/drivers/rtc/rtc-efi.c
@@ -13,6 +13,8 @@
*
*/
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/time.h>
@@ -47,7 +49,7 @@ compute_wday(efi_time_t *eft)
int ndays = 0;
if (eft->year < 1998) {
- printk(KERN_ERR "efirtc: EFI year < 1998, invalid date\n");
+ pr_err("EFI year < 1998, invalid date\n");
return -1;
}
@@ -70,7 +72,7 @@ convert_to_efi_time(struct rtc_time *wtime, efi_time_t *eft)
eft->day = wtime->tm_mday;
eft->hour = wtime->tm_hour;
eft->minute = wtime->tm_min;
- eft->second = wtime->tm_sec;
+ eft->second = wtime->tm_sec;
eft->nanosecond = 0;
eft->daylight = wtime->tm_isdst ? EFI_ISDST : 0;
eft->timezone = EFI_UNSPECIFIED_TIMEZONE;
@@ -142,7 +144,7 @@ static int efi_set_alarm(struct device *dev, struct rtc_wkalrm *wkalrm)
*/
status = efi.set_wakeup_time((efi_bool_t)wkalrm->enabled, &eft);
- printk(KERN_WARNING "write status is %d\n", (int)status);
+ dev_warn(dev, "write status is %d\n", (int)status);
return status == EFI_SUCCESS ? 0 : -EINVAL;
}
@@ -157,7 +159,7 @@ static int efi_read_time(struct device *dev, struct rtc_time *tm)
if (status != EFI_SUCCESS) {
/* should never happen */
- printk(KERN_ERR "efitime: can't read time\n");
+ dev_err(dev, "can't read time\n");
return -EINVAL;
}
diff --git a/drivers/rtc/rtc-fm3130.c b/drivers/rtc/rtc-fm3130.c
index 04e93c6597f8..bff3cdc5140e 100644
--- a/drivers/rtc/rtc-fm3130.c
+++ b/drivers/rtc/rtc-fm3130.c
@@ -116,17 +116,7 @@ static int fm3130_get_time(struct device *dev, struct rtc_time *t)
fm3130_rtc_mode(dev, FM3130_MODE_NORMAL);
- dev_dbg(dev, "%s: %02x %02x %02x %02x %02x %02x %02x %02x"
- "%02x %02x %02x %02x %02x %02x %02x\n",
- "read",
- fm3130->regs[0], fm3130->regs[1],
- fm3130->regs[2], fm3130->regs[3],
- fm3130->regs[4], fm3130->regs[5],
- fm3130->regs[6], fm3130->regs[7],
- fm3130->regs[8], fm3130->regs[9],
- fm3130->regs[0xa], fm3130->regs[0xb],
- fm3130->regs[0xc], fm3130->regs[0xd],
- fm3130->regs[0xe]);
+ dev_dbg(dev, "%s: %15ph\n", "read", fm3130->regs);
t->tm_sec = bcd2bin(fm3130->regs[FM3130_RTC_SECONDS] & 0x7f);
t->tm_min = bcd2bin(fm3130->regs[FM3130_RTC_MINUTES] & 0x7f);
@@ -175,12 +165,7 @@ static int fm3130_set_time(struct device *dev, struct rtc_time *t)
tmp = t->tm_year - 100;
buf[FM3130_RTC_YEARS] = bin2bcd(tmp);
- dev_dbg(dev, "%s: %02x %02x %02x %02x %02x %02x %02x"
- "%02x %02x %02x %02x %02x %02x %02x %02x\n",
- "write", buf[0], buf[1], buf[2], buf[3],
- buf[4], buf[5], buf[6], buf[7],
- buf[8], buf[9], buf[0xa], buf[0xb],
- buf[0xc], buf[0xd], buf[0xe]);
+ dev_dbg(dev, "%s: %15ph\n", "write", buf);
fm3130_rtc_mode(dev, FM3130_MODE_WRITE);
@@ -517,18 +502,8 @@ bad_alarm:
bad_clock:
if (!fm3130->data_valid || !fm3130->alarm_valid)
- dev_dbg(&client->dev,
- "%s: %02x %02x %02x %02x %02x %02x %02x %02x"
- "%02x %02x %02x %02x %02x %02x %02x\n",
- "bogus registers",
- fm3130->regs[0], fm3130->regs[1],
- fm3130->regs[2], fm3130->regs[3],
- fm3130->regs[4], fm3130->regs[5],
- fm3130->regs[6], fm3130->regs[7],
- fm3130->regs[8], fm3130->regs[9],
- fm3130->regs[0xa], fm3130->regs[0xb],
- fm3130->regs[0xc], fm3130->regs[0xd],
- fm3130->regs[0xe]);
+ dev_dbg(&client->dev, "%s: %15ph\n", "bogus registers",
+ fm3130->regs);
/* We won't bail out here because we just got invalid data.
Time setting from u-boot doesn't work anyway */
diff --git a/drivers/rtc/rtc-imxdi.c b/drivers/rtc/rtc-imxdi.c
index 75d307ab37f4..82aad695979e 100644
--- a/drivers/rtc/rtc-imxdi.c
+++ b/drivers/rtc/rtc-imxdi.c
@@ -406,7 +406,7 @@ static int dryice_rtc_probe(struct platform_device *pdev)
mutex_init(&imxdi->write_mutex);
- imxdi->clk = clk_get(&pdev->dev, NULL);
+ imxdi->clk = devm_clk_get(&pdev->dev, NULL);
if (IS_ERR(imxdi->clk))
return PTR_ERR(imxdi->clk);
clk_prepare_enable(imxdi->clk);
@@ -475,7 +475,6 @@ static int dryice_rtc_probe(struct platform_device *pdev)
err:
clk_disable_unprepare(imxdi->clk);
- clk_put(imxdi->clk);
return rc;
}
@@ -492,7 +491,6 @@ static int dryice_rtc_remove(struct platform_device *pdev)
rtc_device_unregister(imxdi->rtc);
clk_disable_unprepare(imxdi->clk);
- clk_put(imxdi->clk);
return 0;
}
diff --git a/drivers/rtc/rtc-isl12022.c b/drivers/rtc/rtc-isl12022.c
index 1850104705c0..6b4298ea683d 100644
--- a/drivers/rtc/rtc-isl12022.c
+++ b/drivers/rtc/rtc-isl12022.c
@@ -227,7 +227,7 @@ static int isl12022_set_datetime(struct i2c_client *client, struct rtc_time *tm)
buf[ISL12022_REG_SC + i]);
if (ret)
return -EIO;
- };
+ }
return 0;
}
diff --git a/drivers/rtc/rtc-lp8788.c b/drivers/rtc/rtc-lp8788.c
new file mode 100644
index 000000000000..9a4631218f41
--- /dev/null
+++ b/drivers/rtc/rtc-lp8788.c
@@ -0,0 +1,338 @@
+/*
+ * TI LP8788 MFD - rtc driver
+ *
+ * Copyright 2012 Texas Instruments
+ *
+ * Author: Milo(Woogyom) Kim <milo.kim@ti.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ */
+
+#include <linux/err.h>
+#include <linux/irqdomain.h>
+#include <linux/mfd/lp8788.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/rtc.h>
+#include <linux/slab.h>
+
+/* register address */
+#define LP8788_INTEN_3 0x05
+#define LP8788_RTC_UNLOCK 0x64
+#define LP8788_RTC_SEC 0x70
+#define LP8788_ALM1_SEC 0x77
+#define LP8788_ALM1_EN 0x7D
+#define LP8788_ALM2_SEC 0x7E
+#define LP8788_ALM2_EN 0x84
+
+/* mask/shift bits */
+#define LP8788_INT_RTC_ALM1_M BIT(1) /* Addr 05h */
+#define LP8788_INT_RTC_ALM1_S 1
+#define LP8788_INT_RTC_ALM2_M BIT(2) /* Addr 05h */
+#define LP8788_INT_RTC_ALM2_S 2
+#define LP8788_ALM_EN_M BIT(7) /* Addr 7Dh or 84h */
+#define LP8788_ALM_EN_S 7
+
+#define DEFAULT_ALARM_SEL LP8788_ALARM_1
+#define LP8788_MONTH_OFFSET 1
+#define LP8788_BASE_YEAR 2000
+#define MAX_WDAY_BITS 7
+#define LP8788_WDAY_SET 1
+#define RTC_UNLOCK 0x1
+#define RTC_LATCH 0x2
+#define ALARM_IRQ_FLAG (RTC_IRQF | RTC_AF)
+
+enum lp8788_time {
+ LPTIME_SEC,
+ LPTIME_MIN,
+ LPTIME_HOUR,
+ LPTIME_MDAY,
+ LPTIME_MON,
+ LPTIME_YEAR,
+ LPTIME_WDAY,
+ LPTIME_MAX,
+};
+
+struct lp8788_rtc {
+ struct lp8788 *lp;
+ struct rtc_device *rdev;
+ enum lp8788_alarm_sel alarm;
+ int irq;
+};
+
+static const u8 addr_alarm_sec[LP8788_ALARM_MAX] = {
+ LP8788_ALM1_SEC,
+ LP8788_ALM2_SEC,
+};
+
+static const u8 addr_alarm_en[LP8788_ALARM_MAX] = {
+ LP8788_ALM1_EN,
+ LP8788_ALM2_EN,
+};
+
+static const u8 mask_alarm_en[LP8788_ALARM_MAX] = {
+ LP8788_INT_RTC_ALM1_M,
+ LP8788_INT_RTC_ALM2_M,
+};
+
+static const u8 shift_alarm_en[LP8788_ALARM_MAX] = {
+ LP8788_INT_RTC_ALM1_S,
+ LP8788_INT_RTC_ALM2_S,
+};
+
+static int _to_tm_wday(u8 lp8788_wday)
+{
+ int i;
+
+ if (lp8788_wday == 0)
+ return 0;
+
+ /* lookup defined weekday from read register value */
+ for (i = 0; i < MAX_WDAY_BITS; i++) {
+ if ((lp8788_wday >> i) == LP8788_WDAY_SET)
+ break;
+ }
+
+ return i + 1;
+}
+
+static inline int _to_lp8788_wday(int tm_wday)
+{
+ return LP8788_WDAY_SET << (tm_wday - 1);
+}
+
+static void lp8788_rtc_unlock(struct lp8788 *lp)
+{
+ lp8788_write_byte(lp, LP8788_RTC_UNLOCK, RTC_UNLOCK);
+ lp8788_write_byte(lp, LP8788_RTC_UNLOCK, RTC_LATCH);
+}
+
+static int lp8788_rtc_read_time(struct device *dev, struct rtc_time *tm)
+{
+ struct lp8788_rtc *rtc = dev_get_drvdata(dev);
+ struct lp8788 *lp = rtc->lp;
+ u8 data[LPTIME_MAX];
+ int ret;
+
+ lp8788_rtc_unlock(lp);
+
+ ret = lp8788_read_multi_bytes(lp, LP8788_RTC_SEC, data, LPTIME_MAX);
+ if (ret)
+ return ret;
+
+ tm->tm_sec = data[LPTIME_SEC];
+ tm->tm_min = data[LPTIME_MIN];
+ tm->tm_hour = data[LPTIME_HOUR];
+ tm->tm_mday = data[LPTIME_MDAY];
+ tm->tm_mon = data[LPTIME_MON] - LP8788_MONTH_OFFSET;
+ tm->tm_year = data[LPTIME_YEAR] + LP8788_BASE_YEAR - 1900;
+ tm->tm_wday = _to_tm_wday(data[LPTIME_WDAY]);
+
+ return 0;
+}
+
+static int lp8788_rtc_set_time(struct device *dev, struct rtc_time *tm)
+{
+ struct lp8788_rtc *rtc = dev_get_drvdata(dev);
+ struct lp8788 *lp = rtc->lp;
+ u8 data[LPTIME_MAX - 1];
+ int ret, i, year;
+
+ year = tm->tm_year + 1900 - LP8788_BASE_YEAR;
+ if (year < 0) {
+ dev_err(lp->dev, "invalid year: %d\n", year);
+ return -EINVAL;
+ }
+
+ /* because rtc weekday is a readonly register, do not update */
+ data[LPTIME_SEC] = tm->tm_sec;
+ data[LPTIME_MIN] = tm->tm_min;
+ data[LPTIME_HOUR] = tm->tm_hour;
+ data[LPTIME_MDAY] = tm->tm_mday;
+ data[LPTIME_MON] = tm->tm_mon + LP8788_MONTH_OFFSET;
+ data[LPTIME_YEAR] = year;
+
+ for (i = 0; i < ARRAY_SIZE(data); i++) {
+ ret = lp8788_write_byte(lp, LP8788_RTC_SEC + i, data[i]);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
+static int lp8788_read_alarm(struct device *dev, struct rtc_wkalrm *alarm)
+{
+ struct lp8788_rtc *rtc = dev_get_drvdata(dev);
+ struct lp8788 *lp = rtc->lp;
+ struct rtc_time *tm = &alarm->time;
+ u8 addr, data[LPTIME_MAX];
+ int ret;
+
+ addr = addr_alarm_sec[rtc->alarm];
+ ret = lp8788_read_multi_bytes(lp, addr, data, LPTIME_MAX);
+ if (ret)
+ return ret;
+
+ tm->tm_sec = data[LPTIME_SEC];
+ tm->tm_min = data[LPTIME_MIN];
+ tm->tm_hour = data[LPTIME_HOUR];
+ tm->tm_mday = data[LPTIME_MDAY];
+ tm->tm_mon = data[LPTIME_MON] - LP8788_MONTH_OFFSET;
+ tm->tm_year = data[LPTIME_YEAR] + LP8788_BASE_YEAR - 1900;
+ tm->tm_wday = _to_tm_wday(data[LPTIME_WDAY]);
+ alarm->enabled = data[LPTIME_WDAY] & LP8788_ALM_EN_M;
+
+ return 0;
+}
+
+static int lp8788_set_alarm(struct device *dev, struct rtc_wkalrm *alarm)
+{
+ struct lp8788_rtc *rtc = dev_get_drvdata(dev);
+ struct lp8788 *lp = rtc->lp;
+ struct rtc_time *tm = &alarm->time;
+ u8 addr, data[LPTIME_MAX];
+ int ret, i, year;
+
+ year = tm->tm_year + 1900 - LP8788_BASE_YEAR;
+ if (year < 0) {
+ dev_err(lp->dev, "invalid year: %d\n", year);
+ return -EINVAL;
+ }
+
+ data[LPTIME_SEC] = tm->tm_sec;
+ data[LPTIME_MIN] = tm->tm_min;
+ data[LPTIME_HOUR] = tm->tm_hour;
+ data[LPTIME_MDAY] = tm->tm_mday;
+ data[LPTIME_MON] = tm->tm_mon + LP8788_MONTH_OFFSET;
+ data[LPTIME_YEAR] = year;
+ data[LPTIME_WDAY] = _to_lp8788_wday(tm->tm_wday);
+
+ for (i = 0; i < ARRAY_SIZE(data); i++) {
+ addr = addr_alarm_sec[rtc->alarm] + i;
+ ret = lp8788_write_byte(lp, addr, data[i]);
+ if (ret)
+ return ret;
+ }
+
+ alarm->enabled = 1;
+ addr = addr_alarm_en[rtc->alarm];
+
+ return lp8788_update_bits(lp, addr, LP8788_ALM_EN_M,
+ alarm->enabled << LP8788_ALM_EN_S);
+}
+
+static int lp8788_alarm_irq_enable(struct device *dev, unsigned int enable)
+{
+ struct lp8788_rtc *rtc = dev_get_drvdata(dev);
+ struct lp8788 *lp = rtc->lp;
+ u8 mask, shift;
+
+ if (!rtc->irq)
+ return -EIO;
+
+ mask = mask_alarm_en[rtc->alarm];
+ shift = shift_alarm_en[rtc->alarm];
+
+ return lp8788_update_bits(lp, LP8788_INTEN_3, mask, enable << shift);
+}
+
+static const struct rtc_class_ops lp8788_rtc_ops = {
+ .read_time = lp8788_rtc_read_time,
+ .set_time = lp8788_rtc_set_time,
+ .read_alarm = lp8788_read_alarm,
+ .set_alarm = lp8788_set_alarm,
+ .alarm_irq_enable = lp8788_alarm_irq_enable,
+};
+
+static irqreturn_t lp8788_alarm_irq_handler(int irq, void *ptr)
+{
+ struct lp8788_rtc *rtc = ptr;
+
+ rtc_update_irq(rtc->rdev, 1, ALARM_IRQ_FLAG);
+ return IRQ_HANDLED;
+}
+
+static int lp8788_alarm_irq_register(struct platform_device *pdev,
+ struct lp8788_rtc *rtc)
+{
+ struct resource *r;
+ struct lp8788 *lp = rtc->lp;
+ struct irq_domain *irqdm = lp->irqdm;
+ int irq;
+
+ rtc->irq = 0;
+
+ /* even the alarm IRQ number is not specified, rtc time should work */
+ r = platform_get_resource_byname(pdev, IORESOURCE_IRQ, LP8788_ALM_IRQ);
+ if (!r)
+ return 0;
+
+ if (rtc->alarm == LP8788_ALARM_1)
+ irq = r->start;
+ else
+ irq = r->end;
+
+ rtc->irq = irq_create_mapping(irqdm, irq);
+
+ return devm_request_threaded_irq(&pdev->dev, rtc->irq, NULL,
+ lp8788_alarm_irq_handler,
+ 0, LP8788_ALM_IRQ, rtc);
+}
+
+static int lp8788_rtc_probe(struct platform_device *pdev)
+{
+ struct lp8788 *lp = dev_get_drvdata(pdev->dev.parent);
+ struct lp8788_rtc *rtc;
+ struct device *dev = &pdev->dev;
+
+ rtc = devm_kzalloc(dev, sizeof(struct lp8788_rtc), GFP_KERNEL);
+ if (!rtc)
+ return -ENOMEM;
+
+ rtc->lp = lp;
+ rtc->alarm = lp->pdata ? lp->pdata->alarm_sel : DEFAULT_ALARM_SEL;
+ platform_set_drvdata(pdev, rtc);
+
+ device_init_wakeup(dev, 1);
+
+ rtc->rdev = rtc_device_register("lp8788_rtc", dev,
+ &lp8788_rtc_ops, THIS_MODULE);
+ if (IS_ERR(rtc->rdev)) {
+ dev_err(dev, "can not register rtc device\n");
+ return PTR_ERR(rtc->rdev);
+ }
+
+ if (lp8788_alarm_irq_register(pdev, rtc))
+ dev_warn(lp->dev, "no rtc irq handler\n");
+
+ return 0;
+}
+
+static int lp8788_rtc_remove(struct platform_device *pdev)
+{
+ struct lp8788_rtc *rtc = platform_get_drvdata(pdev);
+
+ rtc_device_unregister(rtc->rdev);
+ platform_set_drvdata(pdev, NULL);
+
+ return 0;
+}
+
+static struct platform_driver lp8788_rtc_driver = {
+ .probe = lp8788_rtc_probe,
+ .remove = lp8788_rtc_remove,
+ .driver = {
+ .name = LP8788_DEV_RTC,
+ .owner = THIS_MODULE,
+ },
+};
+module_platform_driver(lp8788_rtc_driver);
+
+MODULE_DESCRIPTION("Texas Instruments LP8788 RTC Driver");
+MODULE_AUTHOR("Milo Kim");
+MODULE_LICENSE("GPL");
+MODULE_ALIAS("platform:lp8788-rtc");
diff --git a/drivers/rtc/rtc-max77686.c b/drivers/rtc/rtc-max77686.c
new file mode 100644
index 000000000000..6b1337f9baf4
--- /dev/null
+++ b/drivers/rtc/rtc-max77686.c
@@ -0,0 +1,641 @@
+/*
+ * RTC driver for Maxim MAX77686
+ *
+ * Copyright (C) 2012 Samsung Electronics Co.Ltd
+ *
+ * based on rtc-max8997.c
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ *
+ */
+
+#include <linux/slab.h>
+#include <linux/rtc.h>
+#include <linux/delay.h>
+#include <linux/mutex.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/mfd/max77686-private.h>
+#include <linux/irqdomain.h>
+#include <linux/regmap.h>
+
+/* RTC Control Register */
+#define BCD_EN_SHIFT 0
+#define BCD_EN_MASK (1 << BCD_EN_SHIFT)
+#define MODEL24_SHIFT 1
+#define MODEL24_MASK (1 << MODEL24_SHIFT)
+/* RTC Update Register1 */
+#define RTC_UDR_SHIFT 0
+#define RTC_UDR_MASK (1 << RTC_UDR_SHIFT)
+#define RTC_RBUDR_SHIFT 4
+#define RTC_RBUDR_MASK (1 << RTC_RBUDR_SHIFT)
+/* WTSR and SMPL Register */
+#define WTSRT_SHIFT 0
+#define SMPLT_SHIFT 2
+#define WTSR_EN_SHIFT 6
+#define SMPL_EN_SHIFT 7
+#define WTSRT_MASK (3 << WTSRT_SHIFT)
+#define SMPLT_MASK (3 << SMPLT_SHIFT)
+#define WTSR_EN_MASK (1 << WTSR_EN_SHIFT)
+#define SMPL_EN_MASK (1 << SMPL_EN_SHIFT)
+/* RTC Hour register */
+#define HOUR_PM_SHIFT 6
+#define HOUR_PM_MASK (1 << HOUR_PM_SHIFT)
+/* RTC Alarm Enable */
+#define ALARM_ENABLE_SHIFT 7
+#define ALARM_ENABLE_MASK (1 << ALARM_ENABLE_SHIFT)
+
+#define MAX77686_RTC_UPDATE_DELAY 16
+#undef MAX77686_RTC_WTSR_SMPL
+
+enum {
+ RTC_SEC = 0,
+ RTC_MIN,
+ RTC_HOUR,
+ RTC_WEEKDAY,
+ RTC_MONTH,
+ RTC_YEAR,
+ RTC_DATE,
+ RTC_NR_TIME
+};
+
+struct max77686_rtc_info {
+ struct device *dev;
+ struct max77686_dev *max77686;
+ struct i2c_client *rtc;
+ struct rtc_device *rtc_dev;
+ struct mutex lock;
+
+ struct regmap *regmap;
+
+ int virq;
+ int rtc_24hr_mode;
+};
+
+enum MAX77686_RTC_OP {
+ MAX77686_RTC_WRITE,
+ MAX77686_RTC_READ,
+};
+
+static inline int max77686_rtc_calculate_wday(u8 shifted)
+{
+ int counter = -1;
+ while (shifted) {
+ shifted >>= 1;
+ counter++;
+ }
+ return counter;
+}
+
+static void max77686_rtc_data_to_tm(u8 *data, struct rtc_time *tm,
+ int rtc_24hr_mode)
+{
+ tm->tm_sec = data[RTC_SEC] & 0x7f;
+ tm->tm_min = data[RTC_MIN] & 0x7f;
+ if (rtc_24hr_mode)
+ tm->tm_hour = data[RTC_HOUR] & 0x1f;
+ else {
+ tm->tm_hour = data[RTC_HOUR] & 0x0f;
+ if (data[RTC_HOUR] & HOUR_PM_MASK)
+ tm->tm_hour += 12;
+ }
+
+ tm->tm_wday = max77686_rtc_calculate_wday(data[RTC_WEEKDAY] & 0x7f);
+ tm->tm_mday = data[RTC_DATE] & 0x1f;
+ tm->tm_mon = (data[RTC_MONTH] & 0x0f) - 1;
+ tm->tm_year = (data[RTC_YEAR] & 0x7f) + 100;
+ tm->tm_yday = 0;
+ tm->tm_isdst = 0;
+}
+
+static int max77686_rtc_tm_to_data(struct rtc_time *tm, u8 *data)
+{
+ data[RTC_SEC] = tm->tm_sec;
+ data[RTC_MIN] = tm->tm_min;
+ data[RTC_HOUR] = tm->tm_hour;
+ data[RTC_WEEKDAY] = 1 << tm->tm_wday;
+ data[RTC_DATE] = tm->tm_mday;
+ data[RTC_MONTH] = tm->tm_mon + 1;
+ data[RTC_YEAR] = tm->tm_year > 100 ? (tm->tm_year - 100) : 0 ;
+
+ if (tm->tm_year < 100) {
+ pr_warn("%s: MAX77686 RTC cannot handle the year %d."
+ "Assume it's 2000.\n", __func__, 1900 + tm->tm_year);
+ return -EINVAL;
+ }
+ return 0;
+}
+
+static int max77686_rtc_update(struct max77686_rtc_info *info,
+ enum MAX77686_RTC_OP op)
+{
+ int ret;
+ unsigned int data;
+
+ if (op == MAX77686_RTC_WRITE)
+ data = 1 << RTC_UDR_SHIFT;
+ else
+ data = 1 << RTC_RBUDR_SHIFT;
+
+ ret = regmap_update_bits(info->max77686->rtc_regmap,
+ MAX77686_RTC_UPDATE0, data, data);
+ if (ret < 0)
+ dev_err(info->dev, "%s: fail to write update reg(ret=%d, data=0x%x)\n",
+ __func__, ret, data);
+ else {
+ /* Minimum 16ms delay required before RTC update. */
+ msleep(MAX77686_RTC_UPDATE_DELAY);
+ }
+
+ return ret;
+}
+
+static int max77686_rtc_read_time(struct device *dev, struct rtc_time *tm)
+{
+ struct max77686_rtc_info *info = dev_get_drvdata(dev);
+ u8 data[RTC_NR_TIME];
+ int ret;
+
+ mutex_lock(&info->lock);
+
+ ret = max77686_rtc_update(info, MAX77686_RTC_READ);
+ if (ret < 0)
+ goto out;
+
+ ret = regmap_bulk_read(info->max77686->rtc_regmap,
+ MAX77686_RTC_SEC, data, RTC_NR_TIME);
+ if (ret < 0) {
+ dev_err(info->dev, "%s: fail to read time reg(%d)\n", __func__, ret);
+ goto out;
+ }
+
+ max77686_rtc_data_to_tm(data, tm, info->rtc_24hr_mode);
+
+ ret = rtc_valid_tm(tm);
+
+out:
+ mutex_unlock(&info->lock);
+ return ret;
+}
+
+static int max77686_rtc_set_time(struct device *dev, struct rtc_time *tm)
+{
+ struct max77686_rtc_info *info = dev_get_drvdata(dev);
+ u8 data[RTC_NR_TIME];
+ int ret;
+
+ ret = max77686_rtc_tm_to_data(tm, data);
+ if (ret < 0)
+ return ret;
+
+ mutex_lock(&info->lock);
+
+ ret = regmap_bulk_write(info->max77686->rtc_regmap,
+ MAX77686_RTC_SEC, data, RTC_NR_TIME);
+ if (ret < 0) {
+ dev_err(info->dev, "%s: fail to write time reg(%d)\n", __func__,
+ ret);
+ goto out;
+ }
+
+ ret = max77686_rtc_update(info, MAX77686_RTC_WRITE);
+
+out:
+ mutex_unlock(&info->lock);
+ return ret;
+}
+
+static int max77686_rtc_read_alarm(struct device *dev, struct rtc_wkalrm *alrm)
+{
+ struct max77686_rtc_info *info = dev_get_drvdata(dev);
+ u8 data[RTC_NR_TIME];
+ unsigned int val;
+ int i, ret;
+
+ mutex_lock(&info->lock);
+
+ ret = max77686_rtc_update(info, MAX77686_RTC_READ);
+ if (ret < 0)
+ goto out;
+
+ ret = regmap_bulk_read(info->max77686->rtc_regmap,
+ MAX77686_ALARM1_SEC, data, RTC_NR_TIME);
+ if (ret < 0) {
+ dev_err(info->dev, "%s:%d fail to read alarm reg(%d)\n",
+ __func__, __LINE__, ret);
+ goto out;
+ }
+
+ max77686_rtc_data_to_tm(data, &alrm->time, info->rtc_24hr_mode);
+
+ alrm->enabled = 0;
+ for (i = 0; i < RTC_NR_TIME; i++) {
+ if (data[i] & ALARM_ENABLE_MASK) {
+ alrm->enabled = 1;
+ break;
+ }
+ }
+
+ alrm->pending = 0;
+ ret = regmap_read(info->max77686->regmap, MAX77686_REG_STATUS1, &val);
+ if (ret < 0) {
+ dev_err(info->dev, "%s:%d fail to read status1 reg(%d)\n",
+ __func__, __LINE__, ret);
+ goto out;
+ }
+
+ if (val & (1 << 4)) /* RTCA1 */
+ alrm->pending = 1;
+
+out:
+ mutex_unlock(&info->lock);
+ return 0;
+}
+
+static int max77686_rtc_stop_alarm(struct max77686_rtc_info *info)
+{
+ u8 data[RTC_NR_TIME];
+ int ret, i;
+ struct rtc_time tm;
+
+ if (!mutex_is_locked(&info->lock))
+ dev_warn(info->dev, "%s: should have mutex locked\n", __func__);
+
+ ret = max77686_rtc_update(info, MAX77686_RTC_READ);
+ if (ret < 0)
+ goto out;
+
+ ret = regmap_bulk_read(info->max77686->rtc_regmap,
+ MAX77686_ALARM1_SEC, data, RTC_NR_TIME);
+ if (ret < 0) {
+ dev_err(info->dev, "%s: fail to read alarm reg(%d)\n",
+ __func__, ret);
+ goto out;
+ }
+
+ max77686_rtc_data_to_tm(data, &tm, info->rtc_24hr_mode);
+
+ for (i = 0; i < RTC_NR_TIME; i++)
+ data[i] &= ~ALARM_ENABLE_MASK;
+
+ ret = regmap_bulk_write(info->max77686->rtc_regmap,
+ MAX77686_ALARM1_SEC, data, RTC_NR_TIME);
+ if (ret < 0) {
+ dev_err(info->dev, "%s: fail to write alarm reg(%d)\n",
+ __func__, ret);
+ goto out;
+ }
+
+ ret = max77686_rtc_update(info, MAX77686_RTC_WRITE);
+out:
+ return ret;
+}
+
+static int max77686_rtc_start_alarm(struct max77686_rtc_info *info)
+{
+ u8 data[RTC_NR_TIME];
+ int ret;
+ struct rtc_time tm;
+
+ if (!mutex_is_locked(&info->lock))
+ dev_warn(info->dev, "%s: should have mutex locked\n", __func__);
+
+ ret = max77686_rtc_update(info, MAX77686_RTC_READ);
+ if (ret < 0)
+ goto out;
+
+ ret = regmap_bulk_read(info->max77686->rtc_regmap,
+ MAX77686_ALARM1_SEC, data, RTC_NR_TIME);
+ if (ret < 0) {
+ dev_err(info->dev, "%s: fail to read alarm reg(%d)\n",
+ __func__, ret);
+ goto out;
+ }
+
+ max77686_rtc_data_to_tm(data, &tm, info->rtc_24hr_mode);
+
+ data[RTC_SEC] |= (1 << ALARM_ENABLE_SHIFT);
+ data[RTC_MIN] |= (1 << ALARM_ENABLE_SHIFT);
+ data[RTC_HOUR] |= (1 << ALARM_ENABLE_SHIFT);
+ data[RTC_WEEKDAY] &= ~ALARM_ENABLE_MASK;
+ if (data[RTC_MONTH] & 0xf)
+ data[RTC_MONTH] |= (1 << ALARM_ENABLE_SHIFT);
+ if (data[RTC_YEAR] & 0x7f)
+ data[RTC_YEAR] |= (1 << ALARM_ENABLE_SHIFT);
+ if (data[RTC_DATE] & 0x1f)
+ data[RTC_DATE] |= (1 << ALARM_ENABLE_SHIFT);
+
+ ret = regmap_bulk_write(info->max77686->rtc_regmap,
+ MAX77686_ALARM1_SEC, data, RTC_NR_TIME);
+ if (ret < 0) {
+ dev_err(info->dev, "%s: fail to write alarm reg(%d)\n",
+ __func__, ret);
+ goto out;
+ }
+
+ ret = max77686_rtc_update(info, MAX77686_RTC_WRITE);
+out:
+ return ret;
+}
+
+static int max77686_rtc_set_alarm(struct device *dev, struct rtc_wkalrm *alrm)
+{
+ struct max77686_rtc_info *info = dev_get_drvdata(dev);
+ u8 data[RTC_NR_TIME];
+ int ret;
+
+ ret = max77686_rtc_tm_to_data(&alrm->time, data);
+ if (ret < 0)
+ return ret;
+
+ mutex_lock(&info->lock);
+
+ ret = max77686_rtc_stop_alarm(info);
+ if (ret < 0)
+ goto out;
+
+ ret = regmap_bulk_write(info->max77686->rtc_regmap,
+ MAX77686_ALARM1_SEC, data, RTC_NR_TIME);
+
+ if (ret < 0) {
+ dev_err(info->dev, "%s: fail to write alarm reg(%d)\n",
+ __func__, ret);
+ goto out;
+ }
+
+ ret = max77686_rtc_update(info, MAX77686_RTC_WRITE);
+ if (ret < 0)
+ goto out;
+
+ if (alrm->enabled)
+ ret = max77686_rtc_start_alarm(info);
+out:
+ mutex_unlock(&info->lock);
+ return ret;
+}
+
+static int max77686_rtc_alarm_irq_enable(struct device *dev,
+ unsigned int enabled)
+{
+ struct max77686_rtc_info *info = dev_get_drvdata(dev);
+ int ret;
+
+ mutex_lock(&info->lock);
+ if (enabled)
+ ret = max77686_rtc_start_alarm(info);
+ else
+ ret = max77686_rtc_stop_alarm(info);
+ mutex_unlock(&info->lock);
+
+ return ret;
+}
+
+static irqreturn_t max77686_rtc_alarm_irq(int irq, void *data)
+{
+ struct max77686_rtc_info *info = data;
+
+ dev_info(info->dev, "%s:irq(%d)\n", __func__, irq);
+
+ rtc_update_irq(info->rtc_dev, 1, RTC_IRQF | RTC_AF);
+
+ return IRQ_HANDLED;
+}
+
+static const struct rtc_class_ops max77686_rtc_ops = {
+ .read_time = max77686_rtc_read_time,
+ .set_time = max77686_rtc_set_time,
+ .read_alarm = max77686_rtc_read_alarm,
+ .set_alarm = max77686_rtc_set_alarm,
+ .alarm_irq_enable = max77686_rtc_alarm_irq_enable,
+};
+
+#ifdef MAX77686_RTC_WTSR_SMPL
+static void max77686_rtc_enable_wtsr(struct max77686_rtc_info *info, bool enable)
+{
+ int ret;
+ unsigned int val, mask;
+
+ if (enable)
+ val = (1 << WTSR_EN_SHIFT) | (3 << WTSRT_SHIFT);
+ else
+ val = 0;
+
+ mask = WTSR_EN_MASK | WTSRT_MASK;
+
+ dev_info(info->dev, "%s: %s WTSR\n", __func__,
+ enable ? "enable" : "disable");
+
+ ret = regmap_update_bits(info->max77686->rtc_regmap,
+ MAX77686_WTSR_SMPL_CNTL, mask, val);
+ if (ret < 0) {
+ dev_err(info->dev, "%s: fail to update WTSR reg(%d)\n",
+ __func__, ret);
+ return;
+ }
+
+ max77686_rtc_update(info, MAX77686_RTC_WRITE);
+}
+
+static void max77686_rtc_enable_smpl(struct max77686_rtc_info *info, bool enable)
+{
+ int ret;
+ unsigned int val, mask;
+
+ if (enable)
+ val = (1 << SMPL_EN_SHIFT) | (0 << SMPLT_SHIFT);
+ else
+ val = 0;
+
+ mask = SMPL_EN_MASK | SMPLT_MASK;
+
+ dev_info(info->dev, "%s: %s SMPL\n", __func__,
+ enable ? "enable" : "disable");
+
+ ret = regmap_update_bits(info->max77686->rtc_regmap,
+ MAX77686_WTSR_SMPL_CNTL, mask, val);
+ if (ret < 0) {
+ dev_err(info->dev, "%s: fail to update SMPL reg(%d)\n",
+ __func__, ret);
+ return;
+ }
+
+ max77686_rtc_update(info, MAX77686_RTC_WRITE);
+
+ val = 0;
+ regmap_read(info->max77686->rtc_regmap, MAX77686_WTSR_SMPL_CNTL, &val);
+ pr_info("%s: WTSR_SMPL(0x%02x)\n", __func__, val);
+}
+#endif /* MAX77686_RTC_WTSR_SMPL */
+
+static int max77686_rtc_init_reg(struct max77686_rtc_info *info)
+{
+ u8 data[2];
+ int ret;
+
+ /* Set RTC control register : Binary mode, 24hour mdoe */
+ data[0] = (1 << BCD_EN_SHIFT) | (1 << MODEL24_SHIFT);
+ data[1] = (0 << BCD_EN_SHIFT) | (1 << MODEL24_SHIFT);
+
+ info->rtc_24hr_mode = 1;
+
+ ret = regmap_bulk_write(info->max77686->rtc_regmap, MAX77686_RTC_CONTROLM, data, 2);
+ if (ret < 0) {
+ dev_err(info->dev, "%s: fail to write controlm reg(%d)\n",
+ __func__, ret);
+ return ret;
+ }
+
+ ret = max77686_rtc_update(info, MAX77686_RTC_WRITE);
+ return ret;
+}
+
+static struct regmap_config max77686_rtc_regmap_config = {
+ .reg_bits = 8,
+ .val_bits = 8,
+};
+
+static int max77686_rtc_probe(struct platform_device *pdev)
+{
+ struct max77686_dev *max77686 = dev_get_drvdata(pdev->dev.parent);
+ struct max77686_rtc_info *info;
+ int ret, virq;
+
+ dev_info(&pdev->dev, "%s\n", __func__);
+
+ info = kzalloc(sizeof(struct max77686_rtc_info), GFP_KERNEL);
+ if (!info)
+ return -ENOMEM;
+
+ mutex_init(&info->lock);
+ info->dev = &pdev->dev;
+ info->max77686 = max77686;
+ info->rtc = max77686->rtc;
+ info->max77686->rtc_regmap = regmap_init_i2c(info->max77686->rtc,
+ &max77686_rtc_regmap_config);
+ if (IS_ERR(info->max77686->rtc_regmap)) {
+ ret = PTR_ERR(info->max77686->rtc_regmap);
+ dev_err(info->max77686->dev, "Failed to allocate register map: %d\n",
+ ret);
+ kfree(info);
+ return ret;
+ }
+ platform_set_drvdata(pdev, info);
+
+ ret = max77686_rtc_init_reg(info);
+
+ if (ret < 0) {
+ dev_err(&pdev->dev, "Failed to initialize RTC reg:%d\n", ret);
+ goto err_rtc;
+ }
+
+#ifdef MAX77686_RTC_WTSR_SMPL
+ max77686_rtc_enable_wtsr(info, true);
+ max77686_rtc_enable_smpl(info, true);
+#endif
+
+ device_init_wakeup(&pdev->dev, 1);
+
+ info->rtc_dev = rtc_device_register("max77686-rtc", &pdev->dev,
+ &max77686_rtc_ops, THIS_MODULE);
+
+ if (IS_ERR(info->rtc_dev)) {
+ dev_info(&pdev->dev, "%s: fail\n", __func__);
+
+ ret = PTR_ERR(info->rtc_dev);
+ dev_err(&pdev->dev, "Failed to register RTC device: %d\n", ret);
+ if (ret == 0)
+ ret = -EINVAL;
+ goto err_rtc;
+ }
+ virq = irq_create_mapping(max77686->irq_domain, MAX77686_RTCIRQ_RTCA1);
+ if (!virq)
+ goto err_rtc;
+ info->virq = virq;
+
+ ret = request_threaded_irq(virq, NULL, max77686_rtc_alarm_irq, 0,
+ "rtc-alarm0", info);
+ if (ret < 0) {
+ dev_err(&pdev->dev, "Failed to request alarm IRQ: %d: %d\n",
+ info->virq, ret);
+ goto err_rtc;
+ }
+
+ goto out;
+err_rtc:
+ kfree(info);
+ return ret;
+out:
+ return ret;
+}
+
+static int max77686_rtc_remove(struct platform_device *pdev)
+{
+ struct max77686_rtc_info *info = platform_get_drvdata(pdev);
+
+ if (info) {
+ free_irq(info->virq, info);
+ rtc_device_unregister(info->rtc_dev);
+ kfree(info);
+ }
+
+ return 0;
+}
+
+static void max77686_rtc_shutdown(struct platform_device *pdev)
+{
+#ifdef MAX77686_RTC_WTSR_SMPL
+ struct max77686_rtc_info *info = platform_get_drvdata(pdev);
+ int i;
+ u8 val = 0;
+
+ for (i = 0; i < 3; i++) {
+ max77686_rtc_enable_wtsr(info, false);
+ regmap_read(info->max77686->rtc_regmap, MAX77686_WTSR_SMPL_CNTL, &val);
+ pr_info("%s: WTSR_SMPL reg(0x%02x)\n", __func__, val);
+ if (val & WTSR_EN_MASK)
+ pr_emerg("%s: fail to disable WTSR\n", __func__);
+ else {
+ pr_info("%s: success to disable WTSR\n", __func__);
+ break;
+ }
+ }
+
+ /* Disable SMPL when power off */
+ max77686_rtc_enable_smpl(info, false);
+#endif /* MAX77686_RTC_WTSR_SMPL */
+}
+
+static const struct platform_device_id rtc_id[] = {
+ { "max77686-rtc", 0 },
+ {},
+};
+
+static struct platform_driver max77686_rtc_driver = {
+ .driver = {
+ .name = "max77686-rtc",
+ .owner = THIS_MODULE,
+ },
+ .probe = max77686_rtc_probe,
+ .remove = max77686_rtc_remove,
+ .shutdown = max77686_rtc_shutdown,
+ .id_table = rtc_id,
+};
+
+static int __init max77686_rtc_init(void)
+{
+ return platform_driver_register(&max77686_rtc_driver);
+}
+module_init(max77686_rtc_init);
+
+static void __exit max77686_rtc_exit(void)
+{
+ platform_driver_unregister(&max77686_rtc_driver);
+}
+module_exit(max77686_rtc_exit);
+
+MODULE_DESCRIPTION("Maxim MAX77686 RTC driver");
+MODULE_AUTHOR("<woong.byun@samsung.com>");
+MODULE_LICENSE("GPL");
diff --git a/drivers/rtc/rtc-max8907.c b/drivers/rtc/rtc-max8907.c
index 1d049da16c85..31ca8faf9f05 100644
--- a/drivers/rtc/rtc-max8907.c
+++ b/drivers/rtc/rtc-max8907.c
@@ -205,8 +205,9 @@ static int max8907_rtc_probe(struct platform_device *pdev)
goto err_unregister;
}
- ret = request_threaded_irq(rtc->irq, NULL, max8907_irq_handler,
- IRQF_ONESHOT, "max8907-alarm0", rtc);
+ ret = devm_request_threaded_irq(&pdev->dev, rtc->irq, NULL,
+ max8907_irq_handler,
+ IRQF_ONESHOT, "max8907-alarm0", rtc);
if (ret < 0) {
dev_err(&pdev->dev, "Failed to request IRQ%d: %d\n",
rtc->irq, ret);
@@ -224,7 +225,6 @@ static int max8907_rtc_remove(struct platform_device *pdev)
{
struct max8907_rtc *rtc = platform_get_drvdata(pdev);
- free_irq(rtc->irq, rtc);
rtc_device_unregister(rtc->rtc_dev);
return 0;
diff --git a/drivers/rtc/rtc-max8997.c b/drivers/rtc/rtc-max8997.c
new file mode 100644
index 000000000000..00e505b6bee3
--- /dev/null
+++ b/drivers/rtc/rtc-max8997.c
@@ -0,0 +1,552 @@
+/*
+ * RTC driver for Maxim MAX8997
+ *
+ * Copyright (C) 2013 Samsung Electronics Co.Ltd
+ *
+ * based on rtc-max8998.c
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ *
+ */
+
+#include <linux/slab.h>
+#include <linux/rtc.h>
+#include <linux/delay.h>
+#include <linux/mutex.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/mfd/max8997-private.h>
+#include <linux/irqdomain.h>
+
+/* Module parameter for WTSR function control */
+static int wtsr_en = 1;
+module_param(wtsr_en, int, 0444);
+MODULE_PARM_DESC(wtsr_en, "Wachdog Timeout & Sofware Reset (default=on)");
+/* Module parameter for SMPL function control */
+static int smpl_en = 1;
+module_param(smpl_en, int, 0444);
+MODULE_PARM_DESC(smpl_en, "Sudden Momentary Power Loss (default=on)");
+
+/* RTC Control Register */
+#define BCD_EN_SHIFT 0
+#define BCD_EN_MASK (1 << BCD_EN_SHIFT)
+#define MODEL24_SHIFT 1
+#define MODEL24_MASK (1 << MODEL24_SHIFT)
+/* RTC Update Register1 */
+#define RTC_UDR_SHIFT 0
+#define RTC_UDR_MASK (1 << RTC_UDR_SHIFT)
+/* WTSR and SMPL Register */
+#define WTSRT_SHIFT 0
+#define SMPLT_SHIFT 2
+#define WTSR_EN_SHIFT 6
+#define SMPL_EN_SHIFT 7
+#define WTSRT_MASK (3 << WTSRT_SHIFT)
+#define SMPLT_MASK (3 << SMPLT_SHIFT)
+#define WTSR_EN_MASK (1 << WTSR_EN_SHIFT)
+#define SMPL_EN_MASK (1 << SMPL_EN_SHIFT)
+/* RTC Hour register */
+#define HOUR_PM_SHIFT 6
+#define HOUR_PM_MASK (1 << HOUR_PM_SHIFT)
+/* RTC Alarm Enable */
+#define ALARM_ENABLE_SHIFT 7
+#define ALARM_ENABLE_MASK (1 << ALARM_ENABLE_SHIFT)
+
+enum {
+ RTC_SEC = 0,
+ RTC_MIN,
+ RTC_HOUR,
+ RTC_WEEKDAY,
+ RTC_MONTH,
+ RTC_YEAR,
+ RTC_DATE,
+ RTC_NR_TIME
+};
+
+struct max8997_rtc_info {
+ struct device *dev;
+ struct max8997_dev *max8997;
+ struct i2c_client *rtc;
+ struct rtc_device *rtc_dev;
+ struct mutex lock;
+ int virq;
+ int rtc_24hr_mode;
+};
+
+static void max8997_rtc_data_to_tm(u8 *data, struct rtc_time *tm,
+ int rtc_24hr_mode)
+{
+ tm->tm_sec = data[RTC_SEC] & 0x7f;
+ tm->tm_min = data[RTC_MIN] & 0x7f;
+ if (rtc_24hr_mode)
+ tm->tm_hour = data[RTC_HOUR] & 0x1f;
+ else {
+ tm->tm_hour = data[RTC_HOUR] & 0x0f;
+ if (data[RTC_HOUR] & HOUR_PM_MASK)
+ tm->tm_hour += 12;
+ }
+
+ tm->tm_wday = fls(data[RTC_WEEKDAY] & 0x7f) - 1;
+ tm->tm_mday = data[RTC_DATE] & 0x1f;
+ tm->tm_mon = (data[RTC_MONTH] & 0x0f) - 1;
+ tm->tm_year = (data[RTC_YEAR] & 0x7f) + 100;
+ tm->tm_yday = 0;
+ tm->tm_isdst = 0;
+}
+
+static int max8997_rtc_tm_to_data(struct rtc_time *tm, u8 *data)
+{
+ data[RTC_SEC] = tm->tm_sec;
+ data[RTC_MIN] = tm->tm_min;
+ data[RTC_HOUR] = tm->tm_hour;
+ data[RTC_WEEKDAY] = 1 << tm->tm_wday;
+ data[RTC_DATE] = tm->tm_mday;
+ data[RTC_MONTH] = tm->tm_mon + 1;
+ data[RTC_YEAR] = tm->tm_year > 100 ? (tm->tm_year - 100) : 0 ;
+
+ if (tm->tm_year < 100) {
+ pr_warn("%s: MAX8997 RTC cannot handle the year %d."
+ "Assume it's 2000.\n", __func__, 1900 + tm->tm_year);
+ return -EINVAL;
+ }
+ return 0;
+}
+
+static inline int max8997_rtc_set_update_reg(struct max8997_rtc_info *info)
+{
+ int ret;
+
+ ret = max8997_write_reg(info->rtc, MAX8997_RTC_UPDATE1,
+ RTC_UDR_MASK);
+ if (ret < 0)
+ dev_err(info->dev, "%s: fail to write update reg(%d)\n",
+ __func__, ret);
+ else {
+ /* Minimum 16ms delay required before RTC update.
+ * Otherwise, we may read and update based on out-of-date
+ * value */
+ msleep(20);
+ }
+
+ return ret;
+}
+
+static int max8997_rtc_read_time(struct device *dev, struct rtc_time *tm)
+{
+ struct max8997_rtc_info *info = dev_get_drvdata(dev);
+ u8 data[RTC_NR_TIME];
+ int ret;
+
+ mutex_lock(&info->lock);
+ ret = max8997_bulk_read(info->rtc, MAX8997_RTC_SEC, RTC_NR_TIME, data);
+ mutex_unlock(&info->lock);
+
+ if (ret < 0) {
+ dev_err(info->dev, "%s: fail to read time reg(%d)\n", __func__,
+ ret);
+ return ret;
+ }
+
+ max8997_rtc_data_to_tm(data, tm, info->rtc_24hr_mode);
+
+ return rtc_valid_tm(tm);
+}
+
+static int max8997_rtc_set_time(struct device *dev, struct rtc_time *tm)
+{
+ struct max8997_rtc_info *info = dev_get_drvdata(dev);
+ u8 data[RTC_NR_TIME];
+ int ret;
+
+ ret = max8997_rtc_tm_to_data(tm, data);
+ if (ret < 0)
+ return ret;
+
+ mutex_lock(&info->lock);
+
+ ret = max8997_bulk_write(info->rtc, MAX8997_RTC_SEC, RTC_NR_TIME, data);
+ if (ret < 0) {
+ dev_err(info->dev, "%s: fail to write time reg(%d)\n", __func__,
+ ret);
+ goto out;
+ }
+
+ ret = max8997_rtc_set_update_reg(info);
+out:
+ mutex_unlock(&info->lock);
+ return ret;
+}
+
+static int max8997_rtc_read_alarm(struct device *dev, struct rtc_wkalrm *alrm)
+{
+ struct max8997_rtc_info *info = dev_get_drvdata(dev);
+ u8 data[RTC_NR_TIME];
+ u8 val;
+ int i, ret;
+
+ mutex_lock(&info->lock);
+
+ ret = max8997_bulk_read(info->rtc, MAX8997_RTC_ALARM1_SEC, RTC_NR_TIME,
+ data);
+ if (ret < 0) {
+ dev_err(info->dev, "%s:%d fail to read alarm reg(%d)\n",
+ __func__, __LINE__, ret);
+ goto out;
+ }
+
+ max8997_rtc_data_to_tm(data, &alrm->time, info->rtc_24hr_mode);
+
+ alrm->enabled = 0;
+ for (i = 0; i < RTC_NR_TIME; i++) {
+ if (data[i] & ALARM_ENABLE_MASK) {
+ alrm->enabled = 1;
+ break;
+ }
+ }
+
+ alrm->pending = 0;
+ ret = max8997_read_reg(info->max8997->i2c, MAX8997_REG_STATUS1, &val);
+ if (ret < 0) {
+ dev_err(info->dev, "%s:%d fail to read status1 reg(%d)\n",
+ __func__, __LINE__, ret);
+ goto out;
+ }
+
+ if (val & (1 << 4)) /* RTCA1 */
+ alrm->pending = 1;
+
+out:
+ mutex_unlock(&info->lock);
+ return 0;
+}
+
+static int max8997_rtc_stop_alarm(struct max8997_rtc_info *info)
+{
+ u8 data[RTC_NR_TIME];
+ int ret, i;
+
+ if (!mutex_is_locked(&info->lock))
+ dev_warn(info->dev, "%s: should have mutex locked\n", __func__);
+
+ ret = max8997_bulk_read(info->rtc, MAX8997_RTC_ALARM1_SEC, RTC_NR_TIME,
+ data);
+ if (ret < 0) {
+ dev_err(info->dev, "%s: fail to read alarm reg(%d)\n",
+ __func__, ret);
+ goto out;
+ }
+
+ for (i = 0; i < RTC_NR_TIME; i++)
+ data[i] &= ~ALARM_ENABLE_MASK;
+
+ ret = max8997_bulk_write(info->rtc, MAX8997_RTC_ALARM1_SEC, RTC_NR_TIME,
+ data);
+ if (ret < 0) {
+ dev_err(info->dev, "%s: fail to write alarm reg(%d)\n",
+ __func__, ret);
+ goto out;
+ }
+
+ ret = max8997_rtc_set_update_reg(info);
+out:
+ return ret;
+}
+
+static int max8997_rtc_start_alarm(struct max8997_rtc_info *info)
+{
+ u8 data[RTC_NR_TIME];
+ int ret;
+
+ if (!mutex_is_locked(&info->lock))
+ dev_warn(info->dev, "%s: should have mutex locked\n", __func__);
+
+ ret = max8997_bulk_read(info->rtc, MAX8997_RTC_ALARM1_SEC, RTC_NR_TIME,
+ data);
+ if (ret < 0) {
+ dev_err(info->dev, "%s: fail to read alarm reg(%d)\n",
+ __func__, ret);
+ goto out;
+ }
+
+ data[RTC_SEC] |= (1 << ALARM_ENABLE_SHIFT);
+ data[RTC_MIN] |= (1 << ALARM_ENABLE_SHIFT);
+ data[RTC_HOUR] |= (1 << ALARM_ENABLE_SHIFT);
+ data[RTC_WEEKDAY] &= ~ALARM_ENABLE_MASK;
+ if (data[RTC_MONTH] & 0xf)
+ data[RTC_MONTH] |= (1 << ALARM_ENABLE_SHIFT);
+ if (data[RTC_YEAR] & 0x7f)
+ data[RTC_YEAR] |= (1 << ALARM_ENABLE_SHIFT);
+ if (data[RTC_DATE] & 0x1f)
+ data[RTC_DATE] |= (1 << ALARM_ENABLE_SHIFT);
+
+ ret = max8997_bulk_write(info->rtc, MAX8997_RTC_ALARM1_SEC, RTC_NR_TIME,
+ data);
+ if (ret < 0) {
+ dev_err(info->dev, "%s: fail to write alarm reg(%d)\n",
+ __func__, ret);
+ goto out;
+ }
+
+ ret = max8997_rtc_set_update_reg(info);
+out:
+ return ret;
+}
+static int max8997_rtc_set_alarm(struct device *dev, struct rtc_wkalrm *alrm)
+{
+ struct max8997_rtc_info *info = dev_get_drvdata(dev);
+ u8 data[RTC_NR_TIME];
+ int ret;
+
+ ret = max8997_rtc_tm_to_data(&alrm->time, data);
+ if (ret < 0)
+ return ret;
+
+ dev_info(info->dev, "%s: %d-%02d-%02d %02d:%02d:%02d\n", __func__,
+ data[RTC_YEAR] + 2000, data[RTC_MONTH], data[RTC_DATE],
+ data[RTC_HOUR], data[RTC_MIN], data[RTC_SEC]);
+
+ mutex_lock(&info->lock);
+
+ ret = max8997_rtc_stop_alarm(info);
+ if (ret < 0)
+ goto out;
+
+ ret = max8997_bulk_write(info->rtc, MAX8997_RTC_ALARM1_SEC, RTC_NR_TIME,
+ data);
+ if (ret < 0) {
+ dev_err(info->dev, "%s: fail to write alarm reg(%d)\n",
+ __func__, ret);
+ goto out;
+ }
+
+ ret = max8997_rtc_set_update_reg(info);
+ if (ret < 0)
+ goto out;
+
+ if (alrm->enabled)
+ ret = max8997_rtc_start_alarm(info);
+out:
+ mutex_unlock(&info->lock);
+ return ret;
+}
+
+static int max8997_rtc_alarm_irq_enable(struct device *dev,
+ unsigned int enabled)
+{
+ struct max8997_rtc_info *info = dev_get_drvdata(dev);
+ int ret;
+
+ mutex_lock(&info->lock);
+ if (enabled)
+ ret = max8997_rtc_start_alarm(info);
+ else
+ ret = max8997_rtc_stop_alarm(info);
+ mutex_unlock(&info->lock);
+
+ return ret;
+}
+
+static irqreturn_t max8997_rtc_alarm_irq(int irq, void *data)
+{
+ struct max8997_rtc_info *info = data;
+
+ dev_info(info->dev, "%s:irq(%d)\n", __func__, irq);
+
+ rtc_update_irq(info->rtc_dev, 1, RTC_IRQF | RTC_AF);
+
+ return IRQ_HANDLED;
+}
+
+static const struct rtc_class_ops max8997_rtc_ops = {
+ .read_time = max8997_rtc_read_time,
+ .set_time = max8997_rtc_set_time,
+ .read_alarm = max8997_rtc_read_alarm,
+ .set_alarm = max8997_rtc_set_alarm,
+ .alarm_irq_enable = max8997_rtc_alarm_irq_enable,
+};
+
+static void max8997_rtc_enable_wtsr(struct max8997_rtc_info *info, bool enable)
+{
+ int ret;
+ u8 val, mask;
+
+ if (!wtsr_en)
+ return;
+
+ if (enable)
+ val = (1 << WTSR_EN_SHIFT) | (3 << WTSRT_SHIFT);
+ else
+ val = 0;
+
+ mask = WTSR_EN_MASK | WTSRT_MASK;
+
+ dev_info(info->dev, "%s: %s WTSR\n", __func__,
+ enable ? "enable" : "disable");
+
+ ret = max8997_update_reg(info->rtc, MAX8997_RTC_WTSR_SMPL, val, mask);
+ if (ret < 0) {
+ dev_err(info->dev, "%s: fail to update WTSR reg(%d)\n",
+ __func__, ret);
+ return;
+ }
+
+ max8997_rtc_set_update_reg(info);
+}
+
+static void max8997_rtc_enable_smpl(struct max8997_rtc_info *info, bool enable)
+{
+ int ret;
+ u8 val, mask;
+
+ if (!smpl_en)
+ return;
+
+ if (enable)
+ val = (1 << SMPL_EN_SHIFT) | (0 << SMPLT_SHIFT);
+ else
+ val = 0;
+
+ mask = SMPL_EN_MASK | SMPLT_MASK;
+
+ dev_info(info->dev, "%s: %s SMPL\n", __func__,
+ enable ? "enable" : "disable");
+
+ ret = max8997_update_reg(info->rtc, MAX8997_RTC_WTSR_SMPL, val, mask);
+ if (ret < 0) {
+ dev_err(info->dev, "%s: fail to update SMPL reg(%d)\n",
+ __func__, ret);
+ return;
+ }
+
+ max8997_rtc_set_update_reg(info);
+
+ val = 0;
+ max8997_read_reg(info->rtc, MAX8997_RTC_WTSR_SMPL, &val);
+ pr_info("%s: WTSR_SMPL(0x%02x)\n", __func__, val);
+}
+
+static int max8997_rtc_init_reg(struct max8997_rtc_info *info)
+{
+ u8 data[2];
+ int ret;
+
+ /* Set RTC control register : Binary mode, 24hour mdoe */
+ data[0] = (1 << BCD_EN_SHIFT) | (1 << MODEL24_SHIFT);
+ data[1] = (0 << BCD_EN_SHIFT) | (1 << MODEL24_SHIFT);
+
+ info->rtc_24hr_mode = 1;
+
+ ret = max8997_bulk_write(info->rtc, MAX8997_RTC_CTRLMASK, 2, data);
+ if (ret < 0) {
+ dev_err(info->dev, "%s: fail to write controlm reg(%d)\n",
+ __func__, ret);
+ return ret;
+ }
+
+ ret = max8997_rtc_set_update_reg(info);
+ return ret;
+}
+
+static int max8997_rtc_probe(struct platform_device *pdev)
+{
+ struct max8997_dev *max8997 = dev_get_drvdata(pdev->dev.parent);
+ struct max8997_rtc_info *info;
+ int ret, virq;
+
+ info = devm_kzalloc(&pdev->dev, sizeof(struct max8997_rtc_info),
+ GFP_KERNEL);
+ if (!info)
+ return -ENOMEM;
+
+ mutex_init(&info->lock);
+ info->dev = &pdev->dev;
+ info->max8997 = max8997;
+ info->rtc = max8997->rtc;
+
+ platform_set_drvdata(pdev, info);
+
+ ret = max8997_rtc_init_reg(info);
+
+ if (ret < 0) {
+ dev_err(&pdev->dev, "Failed to initialize RTC reg:%d\n", ret);
+ return ret;
+ }
+
+ max8997_rtc_enable_wtsr(info, true);
+ max8997_rtc_enable_smpl(info, true);
+
+ device_init_wakeup(&pdev->dev, 1);
+
+ info->rtc_dev = rtc_device_register("max8997-rtc", &pdev->dev,
+ &max8997_rtc_ops, THIS_MODULE);
+
+ if (IS_ERR(info->rtc_dev)) {
+ ret = PTR_ERR(info->rtc_dev);
+ dev_err(&pdev->dev, "Failed to register RTC device: %d\n", ret);
+ return ret;
+ }
+
+ virq = irq_create_mapping(max8997->irq_domain, MAX8997_PMICIRQ_RTCA1);
+ if (!virq) {
+ dev_err(&pdev->dev, "Failed to create mapping alarm IRQ\n");
+ goto err_out;
+ }
+ info->virq = virq;
+
+ ret = devm_request_threaded_irq(&pdev->dev, virq, NULL,
+ max8997_rtc_alarm_irq, 0,
+ "rtc-alarm0", info);
+ if (ret < 0) {
+ dev_err(&pdev->dev, "Failed to request alarm IRQ: %d: %d\n",
+ info->virq, ret);
+ goto err_out;
+ }
+
+ return ret;
+
+err_out:
+ rtc_device_unregister(info->rtc_dev);
+ return ret;
+}
+
+static int max8997_rtc_remove(struct platform_device *pdev)
+{
+ struct max8997_rtc_info *info = platform_get_drvdata(pdev);
+
+ if (info)
+ rtc_device_unregister(info->rtc_dev);
+
+ return 0;
+}
+
+static void max8997_rtc_shutdown(struct platform_device *pdev)
+{
+ struct max8997_rtc_info *info = platform_get_drvdata(pdev);
+
+ max8997_rtc_enable_wtsr(info, false);
+ max8997_rtc_enable_smpl(info, false);
+}
+
+static const struct platform_device_id rtc_id[] = {
+ { "max8997-rtc", 0 },
+ {},
+};
+
+static struct platform_driver max8997_rtc_driver = {
+ .driver = {
+ .name = "max8997-rtc",
+ .owner = THIS_MODULE,
+ },
+ .probe = max8997_rtc_probe,
+ .remove = max8997_rtc_remove,
+ .shutdown = max8997_rtc_shutdown,
+ .id_table = rtc_id,
+};
+
+module_platform_driver(max8997_rtc_driver);
+
+MODULE_DESCRIPTION("Maxim MAX8997 RTC driver");
+MODULE_AUTHOR("<ms925.kim@samsung.com>");
+MODULE_LICENSE("GPL");
diff --git a/drivers/rtc/rtc-mpc5121.c b/drivers/rtc/rtc-mpc5121.c
index bec10be96f84..bdcc60830aec 100644
--- a/drivers/rtc/rtc-mpc5121.c
+++ b/drivers/rtc/rtc-mpc5121.c
@@ -13,6 +13,7 @@
#include <linux/init.h>
#include <linux/module.h>
#include <linux/rtc.h>
+#include <linux/of.h>
#include <linux/of_device.h>
#include <linux/of_platform.h>
#include <linux/io.h>
@@ -403,17 +404,19 @@ static int mpc5121_rtc_remove(struct platform_device *op)
return 0;
}
+#ifdef CONFIG_OF
static struct of_device_id mpc5121_rtc_match[] = {
{ .compatible = "fsl,mpc5121-rtc", },
{ .compatible = "fsl,mpc5200-rtc", },
{},
};
+#endif
static struct platform_driver mpc5121_rtc_driver = {
.driver = {
.name = "mpc5121-rtc",
.owner = THIS_MODULE,
- .of_match_table = mpc5121_rtc_match,
+ .of_match_table = of_match_ptr(mpc5121_rtc_match),
},
.probe = mpc5121_rtc_probe,
.remove = mpc5121_rtc_remove,
diff --git a/drivers/rtc/rtc-palmas.c b/drivers/rtc/rtc-palmas.c
new file mode 100644
index 000000000000..59c42986254e
--- /dev/null
+++ b/drivers/rtc/rtc-palmas.c
@@ -0,0 +1,339 @@
+/*
+ * rtc-palmas.c -- Palmas Real Time Clock driver.
+
+ * RTC driver for TI Palma series devices like TPS65913,
+ * TPS65914 power management IC.
+ *
+ * Copyright (c) 2012, NVIDIA Corporation.
+ *
+ * Author: Laxman Dewangan <ldewangan@nvidia.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation version 2.
+ *
+ * This program is distributed "as is" WITHOUT ANY WARRANTY of any kind,
+ * whether express or implied; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
+ * 02111-1307, USA
+ */
+
+#include <linux/bcd.h>
+#include <linux/errno.h>
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/kernel.h>
+#include <linux/mfd/palmas.h>
+#include <linux/module.h>
+#include <linux/rtc.h>
+#include <linux/types.h>
+#include <linux/platform_device.h>
+#include <linux/pm.h>
+
+struct palmas_rtc {
+ struct rtc_device *rtc;
+ struct device *dev;
+ unsigned int irq;
+};
+
+/* Total number of RTC registers needed to set time*/
+#define PALMAS_NUM_TIME_REGS (PALMAS_YEARS_REG - PALMAS_SECONDS_REG + 1)
+
+static int palmas_rtc_read_time(struct device *dev, struct rtc_time *tm)
+{
+ unsigned char rtc_data[PALMAS_NUM_TIME_REGS];
+ struct palmas *palmas = dev_get_drvdata(dev->parent);
+ int ret;
+
+ /* Copy RTC counting registers to static registers or latches */
+ ret = palmas_update_bits(palmas, PALMAS_RTC_BASE, PALMAS_RTC_CTRL_REG,
+ PALMAS_RTC_CTRL_REG_GET_TIME, PALMAS_RTC_CTRL_REG_GET_TIME);
+ if (ret < 0) {
+ dev_err(dev, "RTC CTRL reg update failed, err: %d\n", ret);
+ return ret;
+ }
+
+ ret = palmas_bulk_read(palmas, PALMAS_RTC_BASE, PALMAS_SECONDS_REG,
+ rtc_data, PALMAS_NUM_TIME_REGS);
+ if (ret < 0) {
+ dev_err(dev, "RTC_SECONDS reg read failed, err = %d\n", ret);
+ return ret;
+ }
+
+ tm->tm_sec = bcd2bin(rtc_data[0]);
+ tm->tm_min = bcd2bin(rtc_data[1]);
+ tm->tm_hour = bcd2bin(rtc_data[2]);
+ tm->tm_mday = bcd2bin(rtc_data[3]);
+ tm->tm_mon = bcd2bin(rtc_data[4]) - 1;
+ tm->tm_year = bcd2bin(rtc_data[5]) + 100;
+
+ return ret;
+}
+
+static int palmas_rtc_set_time(struct device *dev, struct rtc_time *tm)
+{
+ unsigned char rtc_data[PALMAS_NUM_TIME_REGS];
+ struct palmas *palmas = dev_get_drvdata(dev->parent);
+ int ret;
+
+ rtc_data[0] = bin2bcd(tm->tm_sec);
+ rtc_data[1] = bin2bcd(tm->tm_min);
+ rtc_data[2] = bin2bcd(tm->tm_hour);
+ rtc_data[3] = bin2bcd(tm->tm_mday);
+ rtc_data[4] = bin2bcd(tm->tm_mon + 1);
+ rtc_data[5] = bin2bcd(tm->tm_year - 100);
+
+ /* Stop RTC while updating the RTC time registers */
+ ret = palmas_update_bits(palmas, PALMAS_RTC_BASE, PALMAS_RTC_CTRL_REG,
+ PALMAS_RTC_CTRL_REG_STOP_RTC, 0);
+ if (ret < 0) {
+ dev_err(dev, "RTC stop failed, err = %d\n", ret);
+ return ret;
+ }
+
+ ret = palmas_bulk_write(palmas, PALMAS_RTC_BASE, PALMAS_SECONDS_REG,
+ rtc_data, PALMAS_NUM_TIME_REGS);
+ if (ret < 0) {
+ dev_err(dev, "RTC_SECONDS reg write failed, err = %d\n", ret);
+ return ret;
+ }
+
+ /* Start back RTC */
+ ret = palmas_update_bits(palmas, PALMAS_RTC_BASE, PALMAS_RTC_CTRL_REG,
+ PALMAS_RTC_CTRL_REG_STOP_RTC, PALMAS_RTC_CTRL_REG_STOP_RTC);
+ if (ret < 0)
+ dev_err(dev, "RTC start failed, err = %d\n", ret);
+ return ret;
+}
+
+static int palmas_rtc_alarm_irq_enable(struct device *dev, unsigned enabled)
+{
+ struct palmas *palmas = dev_get_drvdata(dev->parent);
+ u8 val;
+
+ val = enabled ? PALMAS_RTC_INTERRUPTS_REG_IT_ALARM : 0;
+ return palmas_write(palmas, PALMAS_RTC_BASE,
+ PALMAS_RTC_INTERRUPTS_REG, val);
+}
+
+static int palmas_rtc_read_alarm(struct device *dev, struct rtc_wkalrm *alm)
+{
+ unsigned char alarm_data[PALMAS_NUM_TIME_REGS];
+ u32 int_val;
+ struct palmas *palmas = dev_get_drvdata(dev->parent);
+ int ret;
+
+ ret = palmas_bulk_read(palmas, PALMAS_RTC_BASE,
+ PALMAS_ALARM_SECONDS_REG,
+ alarm_data, PALMAS_NUM_TIME_REGS);
+ if (ret < 0) {
+ dev_err(dev, "RTC_ALARM_SECONDS read failed, err = %d\n", ret);
+ return ret;
+ }
+
+ alm->time.tm_sec = bcd2bin(alarm_data[0]);
+ alm->time.tm_min = bcd2bin(alarm_data[1]);
+ alm->time.tm_hour = bcd2bin(alarm_data[2]);
+ alm->time.tm_mday = bcd2bin(alarm_data[3]);
+ alm->time.tm_mon = bcd2bin(alarm_data[4]) - 1;
+ alm->time.tm_year = bcd2bin(alarm_data[5]) + 100;
+
+ ret = palmas_read(palmas, PALMAS_RTC_BASE, PALMAS_RTC_INTERRUPTS_REG,
+ &int_val);
+ if (ret < 0) {
+ dev_err(dev, "RTC_INTERRUPTS reg read failed, err = %d\n", ret);
+ return ret;
+ }
+
+ if (int_val & PALMAS_RTC_INTERRUPTS_REG_IT_ALARM)
+ alm->enabled = 1;
+ return ret;
+}
+
+static int palmas_rtc_set_alarm(struct device *dev, struct rtc_wkalrm *alm)
+{
+ unsigned char alarm_data[PALMAS_NUM_TIME_REGS];
+ struct palmas *palmas = dev_get_drvdata(dev->parent);
+ int ret;
+
+ ret = palmas_rtc_alarm_irq_enable(dev, 0);
+ if (ret < 0) {
+ dev_err(dev, "Disable RTC alarm failed\n");
+ return ret;
+ }
+
+ alarm_data[0] = bin2bcd(alm->time.tm_sec);
+ alarm_data[1] = bin2bcd(alm->time.tm_min);
+ alarm_data[2] = bin2bcd(alm->time.tm_hour);
+ alarm_data[3] = bin2bcd(alm->time.tm_mday);
+ alarm_data[4] = bin2bcd(alm->time.tm_mon + 1);
+ alarm_data[5] = bin2bcd(alm->time.tm_year - 100);
+
+ ret = palmas_bulk_write(palmas, PALMAS_RTC_BASE,
+ PALMAS_ALARM_SECONDS_REG, alarm_data, PALMAS_NUM_TIME_REGS);
+ if (ret < 0) {
+ dev_err(dev, "ALARM_SECONDS_REG write failed, err = %d\n", ret);
+ return ret;
+ }
+
+ if (alm->enabled)
+ ret = palmas_rtc_alarm_irq_enable(dev, 1);
+ return ret;
+}
+
+static int palmas_clear_interrupts(struct device *dev)
+{
+ struct palmas *palmas = dev_get_drvdata(dev->parent);
+ unsigned int rtc_reg;
+ int ret;
+
+ ret = palmas_read(palmas, PALMAS_RTC_BASE, PALMAS_RTC_STATUS_REG,
+ &rtc_reg);
+ if (ret < 0) {
+ dev_err(dev, "RTC_STATUS read failed, err = %d\n", ret);
+ return ret;
+ }
+
+ ret = palmas_write(palmas, PALMAS_RTC_BASE, PALMAS_RTC_STATUS_REG,
+ rtc_reg);
+ if (ret < 0) {
+ dev_err(dev, "RTC_STATUS write failed, err = %d\n", ret);
+ return ret;
+ }
+ return 0;
+}
+
+static irqreturn_t palmas_rtc_interrupt(int irq, void *context)
+{
+ struct palmas_rtc *palmas_rtc = context;
+ struct device *dev = palmas_rtc->dev;
+ int ret;
+
+ ret = palmas_clear_interrupts(dev);
+ if (ret < 0) {
+ dev_err(dev, "RTC interrupt clear failed, err = %d\n", ret);
+ return IRQ_NONE;
+ }
+
+ rtc_update_irq(palmas_rtc->rtc, 1, RTC_IRQF | RTC_AF);
+ return IRQ_HANDLED;
+}
+
+static struct rtc_class_ops palmas_rtc_ops = {
+ .read_time = palmas_rtc_read_time,
+ .set_time = palmas_rtc_set_time,
+ .read_alarm = palmas_rtc_read_alarm,
+ .set_alarm = palmas_rtc_set_alarm,
+ .alarm_irq_enable = palmas_rtc_alarm_irq_enable,
+};
+
+static int palmas_rtc_probe(struct platform_device *pdev)
+{
+ struct palmas *palmas = dev_get_drvdata(pdev->dev.parent);
+ struct palmas_rtc *palmas_rtc = NULL;
+ int ret;
+
+ palmas_rtc = devm_kzalloc(&pdev->dev, sizeof(struct palmas_rtc),
+ GFP_KERNEL);
+ if (!palmas_rtc)
+ return -ENOMEM;
+
+ /* Clear pending interrupts */
+ ret = palmas_clear_interrupts(&pdev->dev);
+ if (ret < 0) {
+ dev_err(&pdev->dev, "clear RTC int failed, err = %d\n", ret);
+ return ret;
+ }
+
+ palmas_rtc->dev = &pdev->dev;
+ platform_set_drvdata(pdev, palmas_rtc);
+
+ /* Start RTC */
+ ret = palmas_update_bits(palmas, PALMAS_RTC_BASE, PALMAS_RTC_CTRL_REG,
+ PALMAS_RTC_CTRL_REG_STOP_RTC,
+ PALMAS_RTC_CTRL_REG_STOP_RTC);
+ if (ret < 0) {
+ dev_err(&pdev->dev, "RTC_CTRL write failed, err = %d\n", ret);
+ return ret;
+ }
+
+ palmas_rtc->irq = platform_get_irq(pdev, 0);
+
+ palmas_rtc->rtc = rtc_device_register(pdev->name, &pdev->dev,
+ &palmas_rtc_ops, THIS_MODULE);
+ if (IS_ERR(palmas_rtc->rtc)) {
+ ret = PTR_ERR(palmas_rtc->rtc);
+ dev_err(&pdev->dev, "RTC register failed, err = %d\n", ret);
+ return ret;
+ }
+
+ ret = request_threaded_irq(palmas_rtc->irq, NULL,
+ palmas_rtc_interrupt,
+ IRQF_TRIGGER_LOW | IRQF_ONESHOT |
+ IRQF_EARLY_RESUME,
+ dev_name(&pdev->dev), palmas_rtc);
+ if (ret < 0) {
+ dev_err(&pdev->dev, "IRQ request failed, err = %d\n", ret);
+ rtc_device_unregister(palmas_rtc->rtc);
+ return ret;
+ }
+
+ device_set_wakeup_capable(&pdev->dev, 1);
+ return 0;
+}
+
+static int palmas_rtc_remove(struct platform_device *pdev)
+{
+ struct palmas_rtc *palmas_rtc = platform_get_drvdata(pdev);
+
+ palmas_rtc_alarm_irq_enable(&pdev->dev, 0);
+ free_irq(palmas_rtc->irq, palmas_rtc);
+ rtc_device_unregister(palmas_rtc->rtc);
+ return 0;
+}
+
+#ifdef CONFIG_PM_SLEEP
+static int palmas_rtc_suspend(struct device *dev)
+{
+ struct palmas_rtc *palmas_rtc = dev_get_drvdata(dev);
+
+ if (device_may_wakeup(dev))
+ enable_irq_wake(palmas_rtc->irq);
+ return 0;
+}
+
+static int palmas_rtc_resume(struct device *dev)
+{
+ struct palmas_rtc *palmas_rtc = dev_get_drvdata(dev);
+
+ if (device_may_wakeup(dev))
+ disable_irq_wake(palmas_rtc->irq);
+ return 0;
+}
+#endif
+
+static const struct dev_pm_ops palmas_rtc_pm_ops = {
+ SET_SYSTEM_SLEEP_PM_OPS(palmas_rtc_suspend, palmas_rtc_resume)
+};
+
+static struct platform_driver palmas_rtc_driver = {
+ .probe = palmas_rtc_probe,
+ .remove = palmas_rtc_remove,
+ .driver = {
+ .owner = THIS_MODULE,
+ .name = "palmas-rtc",
+ .pm = &palmas_rtc_pm_ops,
+ },
+};
+
+module_platform_driver(palmas_rtc_driver);
+
+MODULE_ALIAS("platform:palmas_rtc");
+MODULE_DESCRIPTION("TI PALMAS series RTC driver");
+MODULE_AUTHOR("Laxman Dewangan <ldewangan@nvidia.com>");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/rtc/rtc-pcf8523.c b/drivers/rtc/rtc-pcf8523.c
index be05a645f99e..889e3160e701 100644
--- a/drivers/rtc/rtc-pcf8523.c
+++ b/drivers/rtc/rtc-pcf8523.c
@@ -23,6 +23,7 @@
#define REG_CONTROL3_PM_VDD (1 << 6) /* switch-over disabled */
#define REG_CONTROL3_PM_DSM (1 << 5) /* direct switching mode */
#define REG_CONTROL3_PM_MASK 0xe0
+#define REG_CONTROL3_BLF (1 << 2) /* battery low bit, read-only */
#define REG_SECONDS 0x03
#define REG_SECONDS_OS (1 << 7)
@@ -250,9 +251,39 @@ static int pcf8523_rtc_set_time(struct device *dev, struct rtc_time *tm)
return pcf8523_start_rtc(client);
}
+#ifdef CONFIG_RTC_INTF_DEV
+static int pcf8523_rtc_ioctl(struct device *dev, unsigned int cmd,
+ unsigned long arg)
+{
+ struct i2c_client *client = to_i2c_client(dev);
+ u8 value;
+ int ret = 0, err;
+
+ switch (cmd) {
+ case RTC_VL_READ:
+ err = pcf8523_read(client, REG_CONTROL3, &value);
+ if (err < 0)
+ return err;
+
+ if (value & REG_CONTROL3_BLF)
+ ret = 1;
+
+ if (copy_to_user((void __user *)arg, &ret, sizeof(int)))
+ return -EFAULT;
+
+ return 0;
+ default:
+ return -ENOIOCTLCMD;
+ }
+}
+#else
+#define pcf8523_rtc_ioctl NULL
+#endif
+
static const struct rtc_class_ops pcf8523_rtc_ops = {
.read_time = pcf8523_rtc_read_time,
.set_time = pcf8523_rtc_set_time,
+ .ioctl = pcf8523_rtc_ioctl,
};
static int pcf8523_probe(struct i2c_client *client,
diff --git a/drivers/rtc/rtc-pcf8563.c b/drivers/rtc/rtc-pcf8563.c
index 7098ee89bd29..f7daf18a112e 100644
--- a/drivers/rtc/rtc-pcf8563.c
+++ b/drivers/rtc/rtc-pcf8563.c
@@ -181,7 +181,7 @@ static int pcf8563_set_datetime(struct i2c_client *client, struct rtc_time *tm)
__func__, err, data[0], data[1]);
return -EIO;
}
- };
+ }
return 0;
}
diff --git a/drivers/rtc/rtc-pcf8583.c b/drivers/rtc/rtc-pcf8583.c
index 3415b8f18555..5f97c61247d5 100644
--- a/drivers/rtc/rtc-pcf8583.c
+++ b/drivers/rtc/rtc-pcf8583.c
@@ -185,8 +185,8 @@ static int pcf8583_rtc_read_time(struct device *dev, struct rtc_time *tm)
if (ctrl & (CTRL_STOP | CTRL_HOLD)) {
unsigned char new_ctrl = ctrl & ~(CTRL_STOP | CTRL_HOLD);
- printk(KERN_WARNING "RTC: resetting control %02x -> %02x\n",
- ctrl, new_ctrl);
+ dev_warn(dev, "resetting control %02x -> %02x\n",
+ ctrl, new_ctrl);
if ((err = pcf8583_set_ctrl(client, &new_ctrl)) < 0)
return err;
diff --git a/drivers/rtc/rtc-pl031.c b/drivers/rtc/rtc-pl031.c
index 81c5077feff3..8900ea784817 100644
--- a/drivers/rtc/rtc-pl031.c
+++ b/drivers/rtc/rtc-pl031.c
@@ -384,6 +384,8 @@ static int pl031_probe(struct amba_device *adev, const struct amba_id *id)
goto out_no_irq;
}
+ device_init_wakeup(&adev->dev, 1);
+
return 0;
out_no_irq:
diff --git a/drivers/rtc/rtc-pxa.c b/drivers/rtc/rtc-pxa.c
index f771b2ee4b18..03c85ee719a7 100644
--- a/drivers/rtc/rtc-pxa.c
+++ b/drivers/rtc/rtc-pxa.c
@@ -62,6 +62,10 @@
#define RYxR_MONTH_S 5
#define RYxR_MONTH_MASK (0xf << RYxR_MONTH_S)
#define RYxR_DAY_MASK 0x1f
+#define RDxR_WOM_S 20
+#define RDxR_WOM_MASK (0x7 << RDxR_WOM_S)
+#define RDxR_DOW_S 17
+#define RDxR_DOW_MASK (0x7 << RDxR_DOW_S)
#define RDxR_HOUR_S 12
#define RDxR_HOUR_MASK (0x1f << RDxR_HOUR_S)
#define RDxR_MIN_S 6
@@ -91,6 +95,7 @@ struct pxa_rtc {
spinlock_t lock; /* Protects this structure */
};
+
static u32 ryxr_calc(struct rtc_time *tm)
{
return ((tm->tm_year + 1900) << RYxR_YEAR_S)
@@ -100,7 +105,10 @@ static u32 ryxr_calc(struct rtc_time *tm)
static u32 rdxr_calc(struct rtc_time *tm)
{
- return (tm->tm_hour << RDxR_HOUR_S) | (tm->tm_min << RDxR_MIN_S)
+ return ((((tm->tm_mday + 6) / 7) << RDxR_WOM_S) & RDxR_WOM_MASK)
+ | (((tm->tm_wday + 1) << RDxR_DOW_S) & RDxR_DOW_MASK)
+ | (tm->tm_hour << RDxR_HOUR_S)
+ | (tm->tm_min << RDxR_MIN_S)
| tm->tm_sec;
}
@@ -109,6 +117,7 @@ static void tm_calc(u32 rycr, u32 rdcr, struct rtc_time *tm)
tm->tm_year = ((rycr & RYxR_YEAR_MASK) >> RYxR_YEAR_S) - 1900;
tm->tm_mon = (((rycr & RYxR_MONTH_MASK) >> RYxR_MONTH_S)) - 1;
tm->tm_mday = (rycr & RYxR_DAY_MASK);
+ tm->tm_wday = ((rycr & RDxR_DOW_MASK) >> RDxR_DOW_S) - 1;
tm->tm_hour = (rdcr & RDxR_HOUR_MASK) >> RDxR_HOUR_S;
tm->tm_min = (rdcr & RDxR_MIN_MASK) >> RDxR_MIN_S;
tm->tm_sec = rdcr & RDxR_SEC_MASK;
@@ -300,8 +309,6 @@ static int pxa_rtc_proc(struct device *dev, struct seq_file *seq)
}
static const struct rtc_class_ops pxa_rtc_ops = {
- .open = pxa_rtc_open,
- .release = pxa_rtc_release,
.read_time = pxa_rtc_read_time,
.set_time = pxa_rtc_set_time,
.read_alarm = pxa_rtc_read_alarm,
@@ -341,7 +348,7 @@ static int __init pxa_rtc_probe(struct platform_device *pdev)
dev_err(dev, "No alarm IRQ resource defined\n");
goto err_ress;
}
-
+ pxa_rtc_open(dev);
ret = -ENOMEM;
pxa_rtc->base = ioremap(pxa_rtc->ress->start,
resource_size(pxa_rtc->ress));
@@ -387,6 +394,9 @@ static int __exit pxa_rtc_remove(struct platform_device *pdev)
{
struct pxa_rtc *pxa_rtc = platform_get_drvdata(pdev);
+ struct device *dev = &pdev->dev;
+ pxa_rtc_release(dev);
+
rtc_device_unregister(pxa_rtc->rtc);
spin_lock_irq(&pxa_rtc->lock);
@@ -444,10 +454,7 @@ static struct platform_driver pxa_rtc_driver = {
static int __init pxa_rtc_init(void)
{
- if (cpu_is_pxa27x() || cpu_is_pxa3xx())
- return platform_driver_probe(&pxa_rtc_driver, pxa_rtc_probe);
-
- return -ENODEV;
+ return platform_driver_probe(&pxa_rtc_driver, pxa_rtc_probe);
}
static void __exit pxa_rtc_exit(void)
diff --git a/drivers/rtc/rtc-rs5c313.c b/drivers/rtc/rtc-rs5c313.c
index d1aee793ecc8..d98ea5b759c8 100644
--- a/drivers/rtc/rtc-rs5c313.c
+++ b/drivers/rtc/rtc-rs5c313.c
@@ -39,6 +39,8 @@
* 1.13 Nobuhiro Iwamatsu: Updata driver.
*/
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
#include <linux/module.h>
#include <linux/err.h>
#include <linux/rtc.h>
@@ -352,8 +354,7 @@ static void rs5c313_check_xstp_bit(void)
tm.tm_year = 2000 - 1900;
rs5c313_rtc_set_time(NULL, &tm);
- printk(KERN_ERR "RICHO RS5C313: invalid value, resetting to "
- "1 Jan 2000\n");
+ pr_err("invalid value, resetting to 1 Jan 2000\n");
}
RS5C313_CEDISABLE;
ndelay(700); /* CE:L */
diff --git a/drivers/rtc/rtc-rs5c372.c b/drivers/rtc/rtc-rs5c372.c
index 76f565ae384d..581739f40097 100644
--- a/drivers/rtc/rtc-rs5c372.c
+++ b/drivers/rtc/rtc-rs5c372.c
@@ -311,8 +311,7 @@ static int rs5c_rtc_alarm_irq_enable(struct device *dev, unsigned int enabled)
buf &= ~RS5C_CTRL1_AALE;
if (i2c_smbus_write_byte_data(client, addr, buf) < 0) {
- printk(KERN_WARNING "%s: can't update alarm\n",
- rs5c->rtc->name);
+ dev_warn(dev, "can't update alarm\n");
status = -EIO;
} else
rs5c->regs[RS5C_REG_CTRL1] = buf;
@@ -381,7 +380,7 @@ static int rs5c_set_alarm(struct device *dev, struct rtc_wkalrm *t)
addr = RS5C_ADDR(RS5C_REG_CTRL1);
buf[0] = rs5c->regs[RS5C_REG_CTRL1] & ~RS5C_CTRL1_AALE;
if (i2c_smbus_write_byte_data(client, addr, buf[0]) < 0) {
- pr_debug("%s: can't disable alarm\n", rs5c->rtc->name);
+ dev_dbg(dev, "can't disable alarm\n");
return -EIO;
}
rs5c->regs[RS5C_REG_CTRL1] = buf[0];
@@ -395,7 +394,7 @@ static int rs5c_set_alarm(struct device *dev, struct rtc_wkalrm *t)
for (i = 0; i < sizeof(buf); i++) {
addr = RS5C_ADDR(RS5C_REG_ALARM_A_MIN + i);
if (i2c_smbus_write_byte_data(client, addr, buf[i]) < 0) {
- pr_debug("%s: can't set alarm time\n", rs5c->rtc->name);
+ dev_dbg(dev, "can't set alarm time\n");
return -EIO;
}
}
@@ -405,8 +404,7 @@ static int rs5c_set_alarm(struct device *dev, struct rtc_wkalrm *t)
addr = RS5C_ADDR(RS5C_REG_CTRL1);
buf[0] = rs5c->regs[RS5C_REG_CTRL1] | RS5C_CTRL1_AALE;
if (i2c_smbus_write_byte_data(client, addr, buf[0]) < 0)
- printk(KERN_WARNING "%s: can't enable alarm\n",
- rs5c->rtc->name);
+ dev_warn(dev, "can't enable alarm\n");
rs5c->regs[RS5C_REG_CTRL1] = buf[0];
}
diff --git a/drivers/rtc/rtc-rx4581.c b/drivers/rtc/rtc-rx4581.c
new file mode 100644
index 000000000000..599ec73ec886
--- /dev/null
+++ b/drivers/rtc/rtc-rx4581.c
@@ -0,0 +1,314 @@
+/* drivers/rtc/rtc-rx4581.c
+ *
+ * written by Torben Hohn <torbenh@linutronix.de>
+ *
+ * Based on:
+ * drivers/rtc/rtc-max6902.c
+ *
+ * Copyright (C) 2006 8D Technologies inc.
+ * Copyright (C) 2004 Compulab Ltd.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * Driver for MAX6902 spi RTC
+ *
+ * and based on:
+ * drivers/rtc/rtc-rx8581.c
+ *
+ * An I2C driver for the Epson RX8581 RTC
+ *
+ * Author: Martyn Welch <martyn.welch@ge.com>
+ * Copyright 2008 GE Intelligent Platforms Embedded Systems, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * Based on: rtc-pcf8563.c (An I2C driver for the Philips PCF8563 RTC)
+ * Copyright 2005-06 Tower Technologies
+ *
+ */
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/platform_device.h>
+#include <linux/init.h>
+#include <linux/rtc.h>
+#include <linux/spi/spi.h>
+#include <linux/bcd.h>
+
+#define RX4581_REG_SC 0x00 /* Second in BCD */
+#define RX4581_REG_MN 0x01 /* Minute in BCD */
+#define RX4581_REG_HR 0x02 /* Hour in BCD */
+#define RX4581_REG_DW 0x03 /* Day of Week */
+#define RX4581_REG_DM 0x04 /* Day of Month in BCD */
+#define RX4581_REG_MO 0x05 /* Month in BCD */
+#define RX4581_REG_YR 0x06 /* Year in BCD */
+#define RX4581_REG_RAM 0x07 /* RAM */
+#define RX4581_REG_AMN 0x08 /* Alarm Min in BCD*/
+#define RX4581_REG_AHR 0x09 /* Alarm Hour in BCD */
+#define RX4581_REG_ADM 0x0A
+#define RX4581_REG_ADW 0x0A
+#define RX4581_REG_TMR0 0x0B
+#define RX4581_REG_TMR1 0x0C
+#define RX4581_REG_EXT 0x0D /* Extension Register */
+#define RX4581_REG_FLAG 0x0E /* Flag Register */
+#define RX4581_REG_CTRL 0x0F /* Control Register */
+
+
+/* Flag Register bit definitions */
+#define RX4581_FLAG_UF 0x20 /* Update */
+#define RX4581_FLAG_TF 0x10 /* Timer */
+#define RX4581_FLAG_AF 0x08 /* Alarm */
+#define RX4581_FLAG_VLF 0x02 /* Voltage Low */
+
+/* Control Register bit definitions */
+#define RX4581_CTRL_UIE 0x20 /* Update Interrupt Enable */
+#define RX4581_CTRL_TIE 0x10 /* Timer Interrupt Enable */
+#define RX4581_CTRL_AIE 0x08 /* Alarm Interrupt Enable */
+#define RX4581_CTRL_STOP 0x02 /* STOP bit */
+#define RX4581_CTRL_RESET 0x01 /* RESET bit */
+
+static int rx4581_set_reg(struct device *dev, unsigned char address,
+ unsigned char data)
+{
+ struct spi_device *spi = to_spi_device(dev);
+ unsigned char buf[2];
+
+ /* high nibble must be '0' to write */
+ buf[0] = address & 0x0f;
+ buf[1] = data;
+
+ return spi_write_then_read(spi, buf, 2, NULL, 0);
+}
+
+static int rx4581_get_reg(struct device *dev, unsigned char address,
+ unsigned char *data)
+{
+ struct spi_device *spi = to_spi_device(dev);
+
+ /* Set MSB to indicate read */
+ *data = address | 0x80;
+
+ return spi_write_then_read(spi, data, 1, data, 1);
+}
+
+/*
+ * In the routines that deal directly with the rx8581 hardware, we use
+ * rtc_time -- month 0-11, hour 0-23, yr = calendar year-epoch.
+ */
+static int rx4581_get_datetime(struct device *dev, struct rtc_time *tm)
+{
+ struct spi_device *spi = to_spi_device(dev);
+ unsigned char date[7];
+ unsigned char data;
+ int err;
+
+ /* First we ensure that the "update flag" is not set, we read the
+ * time and date then re-read the "update flag". If the update flag
+ * has been set, we know that the time has changed during the read so
+ * we repeat the whole process again.
+ */
+ err = rx4581_get_reg(dev, RX4581_REG_FLAG, &data);
+ if (err != 0) {
+ dev_err(dev, "Unable to read device flags\n");
+ return -EIO;
+ }
+
+ do {
+ /* If update flag set, clear it */
+ if (data & RX4581_FLAG_UF) {
+ err = rx4581_set_reg(dev,
+ RX4581_REG_FLAG, (data & ~RX4581_FLAG_UF));
+ if (err != 0) {
+ dev_err(dev, "Unable to write device "
+ "flags\n");
+ return -EIO;
+ }
+ }
+
+ /* Now read time and date */
+ date[0] = 0x80;
+ err = spi_write_then_read(spi, date, 1, date, 7);
+ if (err < 0) {
+ dev_err(dev, "Unable to read date\n");
+ return -EIO;
+ }
+
+ /* Check flag register */
+ err = rx4581_get_reg(dev, RX4581_REG_FLAG, &data);
+ if (err != 0) {
+ dev_err(dev, "Unable to read device flags\n");
+ return -EIO;
+ }
+ } while (data & RX4581_FLAG_UF);
+
+ if (data & RX4581_FLAG_VLF)
+ dev_info(dev,
+ "low voltage detected, date/time is not reliable.\n");
+
+ dev_dbg(dev,
+ "%s: raw data is sec=%02x, min=%02x, hr=%02x, "
+ "wday=%02x, mday=%02x, mon=%02x, year=%02x\n",
+ __func__,
+ date[0], date[1], date[2], date[3], date[4], date[5], date[6]);
+
+ tm->tm_sec = bcd2bin(date[RX4581_REG_SC] & 0x7F);
+ tm->tm_min = bcd2bin(date[RX4581_REG_MN] & 0x7F);
+ tm->tm_hour = bcd2bin(date[RX4581_REG_HR] & 0x3F); /* rtc hr 0-23 */
+ tm->tm_wday = ilog2(date[RX4581_REG_DW] & 0x7F);
+ tm->tm_mday = bcd2bin(date[RX4581_REG_DM] & 0x3F);
+ tm->tm_mon = bcd2bin(date[RX4581_REG_MO] & 0x1F) - 1; /* rtc mn 1-12 */
+ tm->tm_year = bcd2bin(date[RX4581_REG_YR]);
+ if (tm->tm_year < 70)
+ tm->tm_year += 100; /* assume we are in 1970...2069 */
+
+
+ dev_dbg(dev, "%s: tm is secs=%d, mins=%d, hours=%d, "
+ "mday=%d, mon=%d, year=%d, wday=%d\n",
+ __func__,
+ tm->tm_sec, tm->tm_min, tm->tm_hour,
+ tm->tm_mday, tm->tm_mon, tm->tm_year, tm->tm_wday);
+
+ err = rtc_valid_tm(tm);
+ if (err < 0)
+ dev_err(dev, "retrieved date/time is not valid.\n");
+
+ return err;
+}
+
+static int rx4581_set_datetime(struct device *dev, struct rtc_time *tm)
+{
+ struct spi_device *spi = to_spi_device(dev);
+ int err;
+ unsigned char buf[8], data;
+
+ dev_dbg(dev, "%s: secs=%d, mins=%d, hours=%d, "
+ "mday=%d, mon=%d, year=%d, wday=%d\n",
+ __func__,
+ tm->tm_sec, tm->tm_min, tm->tm_hour,
+ tm->tm_mday, tm->tm_mon, tm->tm_year, tm->tm_wday);
+
+ buf[0] = 0x00;
+ /* hours, minutes and seconds */
+ buf[RX4581_REG_SC+1] = bin2bcd(tm->tm_sec);
+ buf[RX4581_REG_MN+1] = bin2bcd(tm->tm_min);
+ buf[RX4581_REG_HR+1] = bin2bcd(tm->tm_hour);
+
+ buf[RX4581_REG_DM+1] = bin2bcd(tm->tm_mday);
+
+ /* month, 1 - 12 */
+ buf[RX4581_REG_MO+1] = bin2bcd(tm->tm_mon + 1);
+
+ /* year and century */
+ buf[RX4581_REG_YR+1] = bin2bcd(tm->tm_year % 100);
+ buf[RX4581_REG_DW+1] = (0x1 << tm->tm_wday);
+
+ /* Stop the clock */
+ err = rx4581_get_reg(dev, RX4581_REG_CTRL, &data);
+ if (err != 0) {
+ dev_err(dev, "Unable to read control register\n");
+ return -EIO;
+ }
+
+ err = rx4581_set_reg(dev, RX4581_REG_CTRL,
+ (data | RX4581_CTRL_STOP));
+ if (err != 0) {
+ dev_err(dev, "Unable to write control register\n");
+ return -EIO;
+ }
+
+ /* write register's data */
+ err = spi_write_then_read(spi, buf, 8, NULL, 0);
+ if (err != 0) {
+ dev_err(dev, "Unable to write to date registers\n");
+ return -EIO;
+ }
+
+ /* get VLF and clear it */
+ err = rx4581_get_reg(dev, RX4581_REG_FLAG, &data);
+ if (err != 0) {
+ dev_err(dev, "Unable to read flag register\n");
+ return -EIO;
+ }
+
+ err = rx4581_set_reg(dev, RX4581_REG_FLAG,
+ (data & ~(RX4581_FLAG_VLF)));
+ if (err != 0) {
+ dev_err(dev, "Unable to write flag register\n");
+ return -EIO;
+ }
+
+ /* Restart the clock */
+ err = rx4581_get_reg(dev, RX4581_REG_CTRL, &data);
+ if (err != 0) {
+ dev_err(dev, "Unable to read control register\n");
+ return -EIO;
+ }
+
+ err = rx4581_set_reg(dev, RX4581_REG_CTRL,
+ (data & ~(RX4581_CTRL_STOP)));
+ if (err != 0) {
+ dev_err(dev, "Unable to write control register\n");
+ return -EIO;
+ }
+
+ return 0;
+}
+
+static const struct rtc_class_ops rx4581_rtc_ops = {
+ .read_time = rx4581_get_datetime,
+ .set_time = rx4581_set_datetime,
+};
+
+static int rx4581_probe(struct spi_device *spi)
+{
+ struct rtc_device *rtc;
+ unsigned char tmp;
+ int res;
+
+ res = rx4581_get_reg(&spi->dev, RX4581_REG_SC, &tmp);
+ if (res != 0)
+ return res;
+
+ rtc = rtc_device_register("rx4581",
+ &spi->dev, &rx4581_rtc_ops, THIS_MODULE);
+ if (IS_ERR(rtc))
+ return PTR_ERR(rtc);
+
+ dev_set_drvdata(&spi->dev, rtc);
+ return 0;
+}
+
+static int rx4581_remove(struct spi_device *spi)
+{
+ struct rtc_device *rtc = dev_get_drvdata(&spi->dev);
+
+ rtc_device_unregister(rtc);
+ return 0;
+}
+
+static const struct spi_device_id rx4581_id[] = {
+ { "rx4581", 0 },
+ { }
+};
+MODULE_DEVICE_TABLE(spi, rx4581_id);
+
+static struct spi_driver rx4581_driver = {
+ .driver = {
+ .name = "rtc-rx4581",
+ .owner = THIS_MODULE,
+ },
+ .probe = rx4581_probe,
+ .remove = rx4581_remove,
+ .id_table = rx4581_id,
+};
+
+module_spi_driver(rx4581_driver);
+
+MODULE_DESCRIPTION("rx4581 spi RTC driver");
+MODULE_AUTHOR("Torben Hohn");
+MODULE_LICENSE("GPL");
+MODULE_ALIAS("spi:rtc-rx4581");
diff --git a/drivers/rtc/rtc-s3c.c b/drivers/rtc/rtc-s3c.c
index 0c397ac3b132..fb994e9ddc15 100644
--- a/drivers/rtc/rtc-s3c.c
+++ b/drivers/rtc/rtc-s3c.c
@@ -115,7 +115,7 @@ static int s3c_rtc_setaie(struct device *dev, unsigned int enabled)
{
unsigned int tmp;
- pr_debug("%s: aie=%d\n", __func__, enabled);
+ dev_dbg(dev, "%s: aie=%d\n", __func__, enabled);
clk_enable(rtc_clk);
tmp = readb(s3c_rtc_base + S3C2410_RTCALM) & ~S3C2410_RTCALM_ALMEN;
@@ -203,7 +203,7 @@ static int s3c_rtc_gettime(struct device *dev, struct rtc_time *rtc_tm)
rtc_tm->tm_year += 100;
- pr_debug("read time %04d.%02d.%02d %02d:%02d:%02d\n",
+ dev_dbg(dev, "read time %04d.%02d.%02d %02d:%02d:%02d\n",
1900 + rtc_tm->tm_year, rtc_tm->tm_mon, rtc_tm->tm_mday,
rtc_tm->tm_hour, rtc_tm->tm_min, rtc_tm->tm_sec);
@@ -218,7 +218,7 @@ static int s3c_rtc_settime(struct device *dev, struct rtc_time *tm)
void __iomem *base = s3c_rtc_base;
int year = tm->tm_year - 100;
- pr_debug("set time %04d.%02d.%02d %02d:%02d:%02d\n",
+ dev_dbg(dev, "set time %04d.%02d.%02d %02d:%02d:%02d\n",
1900 + tm->tm_year, tm->tm_mon, tm->tm_mday,
tm->tm_hour, tm->tm_min, tm->tm_sec);
@@ -259,7 +259,7 @@ static int s3c_rtc_getalarm(struct device *dev, struct rtc_wkalrm *alrm)
alrm->enabled = (alm_en & S3C2410_RTCALM_ALMEN) ? 1 : 0;
- pr_debug("read alarm %d, %04d.%02d.%02d %02d:%02d:%02d\n",
+ dev_dbg(dev, "read alarm %d, %04d.%02d.%02d %02d:%02d:%02d\n",
alm_en,
1900 + alm_tm->tm_year, alm_tm->tm_mon, alm_tm->tm_mday,
alm_tm->tm_hour, alm_tm->tm_min, alm_tm->tm_sec);
@@ -310,7 +310,7 @@ static int s3c_rtc_setalarm(struct device *dev, struct rtc_wkalrm *alrm)
unsigned int alrm_en;
clk_enable(rtc_clk);
- pr_debug("s3c_rtc_setalarm: %d, %04d.%02d.%02d %02d:%02d:%02d\n",
+ dev_dbg(dev, "s3c_rtc_setalarm: %d, %04d.%02d.%02d %02d:%02d:%02d\n",
alrm->enabled,
1900 + tm->tm_year, tm->tm_mon + 1, tm->tm_mday,
tm->tm_hour, tm->tm_min, tm->tm_sec);
@@ -333,7 +333,7 @@ static int s3c_rtc_setalarm(struct device *dev, struct rtc_wkalrm *alrm)
writeb(bin2bcd(tm->tm_hour), base + S3C2410_ALMHOUR);
}
- pr_debug("setting S3C2410_RTCALM to %08x\n", alrm_en);
+ dev_dbg(dev, "setting S3C2410_RTCALM to %08x\n", alrm_en);
writeb(alrm_en, base + S3C2410_RTCALM);
@@ -459,7 +459,7 @@ static int s3c_rtc_probe(struct platform_device *pdev)
int ret;
int tmp;
- pr_debug("%s: probe=%p\n", __func__, pdev);
+ dev_dbg(&pdev->dev, "%s: probe=%p\n", __func__, pdev);
/* find the IRQs */
@@ -475,7 +475,7 @@ static int s3c_rtc_probe(struct platform_device *pdev)
return s3c_rtc_alarmno;
}
- pr_debug("s3c2410_rtc: tick irq %d, alarm irq %d\n",
+ dev_dbg(&pdev->dev, "s3c2410_rtc: tick irq %d, alarm irq %d\n",
s3c_rtc_tickno, s3c_rtc_alarmno);
/* get the memory region */
@@ -504,7 +504,7 @@ static int s3c_rtc_probe(struct platform_device *pdev)
s3c_rtc_enable(pdev, 1);
- pr_debug("s3c2410_rtc: RTCCON=%02x\n",
+ dev_dbg(&pdev->dev, "s3c2410_rtc: RTCCON=%02x\n",
readw(s3c_rtc_base + S3C2410_RTCCON));
device_init_wakeup(&pdev->dev, 1);
diff --git a/drivers/rtc/rtc-sa1100.c b/drivers/rtc/rtc-sa1100.c
index 50a5c4adee48..5ec5036df0bc 100644
--- a/drivers/rtc/rtc-sa1100.c
+++ b/drivers/rtc/rtc-sa1100.c
@@ -108,9 +108,6 @@ static int sa1100_rtc_open(struct device *dev)
struct rtc_device *rtc = info->rtc;
int ret;
- ret = clk_prepare_enable(info->clk);
- if (ret)
- goto fail_clk;
ret = request_irq(info->irq_1hz, sa1100_rtc_interrupt, 0, "rtc 1Hz", dev);
if (ret) {
dev_err(dev, "IRQ %d already in use.\n", info->irq_1hz);
@@ -130,7 +127,6 @@ static int sa1100_rtc_open(struct device *dev)
free_irq(info->irq_1hz, dev);
fail_ui:
clk_disable_unprepare(info->clk);
- fail_clk:
return ret;
}
@@ -144,7 +140,6 @@ static void sa1100_rtc_release(struct device *dev)
free_irq(info->irq_alarm, dev);
free_irq(info->irq_1hz, dev);
- clk_disable_unprepare(info->clk);
}
static int sa1100_rtc_alarm_irq_enable(struct device *dev, unsigned int enabled)
@@ -253,6 +248,9 @@ static int sa1100_rtc_probe(struct platform_device *pdev)
spin_lock_init(&info->lock);
platform_set_drvdata(pdev, info);
+ ret = clk_prepare_enable(info->clk);
+ if (ret)
+ goto err_enable_clk;
/*
* According to the manual we should be able to let RTTR be zero
* and then a default diviser for a 32.768KHz clock is used.
@@ -305,6 +303,8 @@ static int sa1100_rtc_probe(struct platform_device *pdev)
return 0;
err_dev:
+ clk_disable_unprepare(info->clk);
+err_enable_clk:
platform_set_drvdata(pdev, NULL);
clk_put(info->clk);
err_clk:
@@ -318,6 +318,7 @@ static int sa1100_rtc_remove(struct platform_device *pdev)
if (info) {
rtc_device_unregister(info->rtc);
+ clk_disable_unprepare(info->clk);
clk_put(info->clk);
platform_set_drvdata(pdev, NULL);
kfree(info);
@@ -349,12 +350,14 @@ static const struct dev_pm_ops sa1100_rtc_pm_ops = {
};
#endif
+#ifdef CONFIG_OF
static struct of_device_id sa1100_rtc_dt_ids[] = {
{ .compatible = "mrvl,sa1100-rtc", },
{ .compatible = "mrvl,mmp-rtc", },
{}
};
MODULE_DEVICE_TABLE(of, sa1100_rtc_dt_ids);
+#endif
static struct platform_driver sa1100_rtc_driver = {
.probe = sa1100_rtc_probe,
@@ -364,7 +367,7 @@ static struct platform_driver sa1100_rtc_driver = {
#ifdef CONFIG_PM
.pm = &sa1100_rtc_pm_ops,
#endif
- .of_match_table = sa1100_rtc_dt_ids,
+ .of_match_table = of_match_ptr(sa1100_rtc_dt_ids),
},
};
diff --git a/drivers/rtc/rtc-snvs.c b/drivers/rtc/rtc-snvs.c
index 40662e9dc0ab..f7d90703db5e 100644
--- a/drivers/rtc/rtc-snvs.c
+++ b/drivers/rtc/rtc-snvs.c
@@ -338,7 +338,7 @@ static struct platform_driver snvs_rtc_driver = {
.name = "snvs_rtc",
.owner = THIS_MODULE,
.pm = &snvs_rtc_pm_ops,
- .of_match_table = snvs_dt_ids,
+ .of_match_table = of_match_ptr(snvs_dt_ids),
},
.probe = snvs_rtc_probe,
.remove = snvs_rtc_remove,
diff --git a/drivers/rtc/rtc-stmp3xxx.c b/drivers/rtc/rtc-stmp3xxx.c
index 739ef55694f4..98f0d3c30738 100644
--- a/drivers/rtc/rtc-stmp3xxx.c
+++ b/drivers/rtc/rtc-stmp3xxx.c
@@ -26,6 +26,9 @@
#include <linux/rtc.h>
#include <linux/slab.h>
#include <linux/of_device.h>
+#include <linux/of.h>
+#include <linux/stmp_device.h>
+#include <linux/stmp3xxx_rtc_wdt.h>
#include <mach/common.h>
@@ -35,6 +38,7 @@
#define STMP3XXX_RTC_CTRL_ALARM_IRQ_EN 0x00000001
#define STMP3XXX_RTC_CTRL_ONEMSEC_IRQ_EN 0x00000002
#define STMP3XXX_RTC_CTRL_ALARM_IRQ 0x00000004
+#define STMP3XXX_RTC_CTRL_WATCHDOGEN 0x00000010
#define STMP3XXX_RTC_STAT 0x10
#define STMP3XXX_RTC_STAT_STALE_SHIFT 16
@@ -44,6 +48,8 @@
#define STMP3XXX_RTC_ALARM 0x40
+#define STMP3XXX_RTC_WATCHDOG 0x50
+
#define STMP3XXX_RTC_PERSISTENT0 0x60
#define STMP3XXX_RTC_PERSISTENT0_SET 0x64
#define STMP3XXX_RTC_PERSISTENT0_CLR 0x68
@@ -51,12 +57,70 @@
#define STMP3XXX_RTC_PERSISTENT0_ALARM_EN 0x00000004
#define STMP3XXX_RTC_PERSISTENT0_ALARM_WAKE 0x00000080
+#define STMP3XXX_RTC_PERSISTENT1 0x70
+/* missing bitmask in headers */
+#define STMP3XXX_RTC_PERSISTENT1_FORCE_UPDATER 0x80000000
+
struct stmp3xxx_rtc_data {
struct rtc_device *rtc;
void __iomem *io;
int irq_alarm;
};
+#if IS_ENABLED(CONFIG_STMP3XXX_RTC_WATCHDOG)
+/**
+ * stmp3xxx_wdt_set_timeout - configure the watchdog inside the STMP3xxx RTC
+ * @dev: the parent device of the watchdog (= the RTC)
+ * @timeout: the desired value for the timeout register of the watchdog.
+ * 0 disables the watchdog
+ *
+ * The watchdog needs one register and two bits which are in the RTC domain.
+ * To handle the resource conflict, the RTC driver will create another
+ * platform_device for the watchdog driver as a child of the RTC device.
+ * The watchdog driver is passed the below accessor function via platform_data
+ * to configure the watchdog. Locking is not needed because accessing SET/CLR
+ * registers is atomic.
+ */
+
+static void stmp3xxx_wdt_set_timeout(struct device *dev, u32 timeout)
+{
+ struct stmp3xxx_rtc_data *rtc_data = dev_get_drvdata(dev);
+
+ if (timeout) {
+ writel(timeout, rtc_data->io + STMP3XXX_RTC_WATCHDOG);
+ writel(STMP3XXX_RTC_CTRL_WATCHDOGEN,
+ rtc_data->io + STMP3XXX_RTC_CTRL + STMP_OFFSET_REG_SET);
+ writel(STMP3XXX_RTC_PERSISTENT1_FORCE_UPDATER,
+ rtc_data->io + STMP3XXX_RTC_PERSISTENT1 + STMP_OFFSET_REG_SET);
+ } else {
+ writel(STMP3XXX_RTC_CTRL_WATCHDOGEN,
+ rtc_data->io + STMP3XXX_RTC_CTRL + STMP_OFFSET_REG_CLR);
+ writel(STMP3XXX_RTC_PERSISTENT1_FORCE_UPDATER,
+ rtc_data->io + STMP3XXX_RTC_PERSISTENT1 + STMP_OFFSET_REG_CLR);
+ }
+}
+
+static struct stmp3xxx_wdt_pdata wdt_pdata = {
+ .wdt_set_timeout = stmp3xxx_wdt_set_timeout,
+};
+
+static void stmp3xxx_wdt_register(struct platform_device *rtc_pdev)
+{
+ struct platform_device *wdt_pdev =
+ platform_device_alloc("stmp3xxx_rtc_wdt", rtc_pdev->id);
+
+ if (wdt_pdev) {
+ wdt_pdev->dev.parent = &rtc_pdev->dev;
+ wdt_pdev->dev.platform_data = &wdt_pdata;
+ platform_device_add(wdt_pdev);
+ }
+}
+#else
+static void stmp3xxx_wdt_register(struct platform_device *rtc_pdev)
+{
+}
+#endif /* CONFIG_STMP3XXX_RTC_WATCHDOG */
+
static void stmp3xxx_wait_time(struct stmp3xxx_rtc_data *rtc_data)
{
/*
@@ -232,6 +296,7 @@ static int stmp3xxx_rtc_probe(struct platform_device *pdev)
goto out_irq_alarm;
}
+ stmp3xxx_wdt_register(pdev);
return 0;
out_irq_alarm:
@@ -280,7 +345,7 @@ static struct platform_driver stmp3xxx_rtcdrv = {
.driver = {
.name = "stmp3xxx-rtc",
.owner = THIS_MODULE,
- .of_match_table = rtc_dt_ids,
+ .of_match_table = of_match_ptr(rtc_dt_ids),
},
};
diff --git a/drivers/rtc/rtc-sun4v.c b/drivers/rtc/rtc-sun4v.c
index 5b2261052a65..59b5c2dcb58c 100644
--- a/drivers/rtc/rtc-sun4v.c
+++ b/drivers/rtc/rtc-sun4v.c
@@ -3,6 +3,8 @@
* Copyright (C) 2008 David S. Miller <davem@davemloft.net>
*/
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/delay.h>
@@ -26,10 +28,10 @@ retry:
udelay(100);
goto retry;
}
- printk(KERN_WARNING "SUN4V: tod_get() timed out.\n");
+ pr_warn("tod_get() timed out.\n");
return 0;
}
- printk(KERN_WARNING "SUN4V: tod_get() not supported.\n");
+ pr_warn("tod_get() not supported.\n");
return 0;
}
@@ -53,10 +55,10 @@ retry:
udelay(100);
goto retry;
}
- printk(KERN_WARNING "SUN4V: tod_set() timed out.\n");
+ pr_warn("tod_set() timed out.\n");
return -EAGAIN;
}
- printk(KERN_WARNING "SUN4V: tod_set() not supported.\n");
+ pr_warn("tod_set() not supported.\n");
return -EOPNOTSUPP;
}
diff --git a/drivers/rtc/rtc-tps6586x.c b/drivers/rtc/rtc-tps6586x.c
index 70f61b8e9e6f..aab4e8c93622 100644
--- a/drivers/rtc/rtc-tps6586x.c
+++ b/drivers/rtc/rtc-tps6586x.c
@@ -282,7 +282,8 @@ static int tps6586x_rtc_probe(struct platform_device *pdev)
goto fail_rtc_register;
}
- ret = request_threaded_irq(rtc->irq, NULL, tps6586x_rtc_irq,
+ ret = devm_request_threaded_irq(&pdev->dev, rtc->irq, NULL,
+ tps6586x_rtc_irq,
IRQF_ONESHOT | IRQF_EARLY_RESUME,
dev_name(&pdev->dev), rtc);
if (ret < 0) {
@@ -311,7 +312,6 @@ static int tps6586x_rtc_remove(struct platform_device *pdev)
tps6586x_update(tps_dev, RTC_CTRL, 0,
RTC_ENABLE | OSC_SRC_SEL | PRE_BYPASS | CL_SEL_MASK);
rtc_device_unregister(rtc->rtc);
- free_irq(rtc->irq, rtc);
return 0;
}
diff --git a/drivers/rtc/rtc-tps65910.c b/drivers/rtc/rtc-tps65910.c
index e5fef141a0e2..8bd8115329b5 100644
--- a/drivers/rtc/rtc-tps65910.c
+++ b/drivers/rtc/rtc-tps65910.c
@@ -22,13 +22,13 @@
#include <linux/rtc.h>
#include <linux/bcd.h>
#include <linux/platform_device.h>
+#include <linux/pm_runtime.h>
#include <linux/interrupt.h>
#include <linux/mfd/tps65910.h>
struct tps65910_rtc {
struct rtc_device *rtc;
- /* To store the list of enabled interrupts */
- u32 irqstat;
+ int irq;
};
/* Total number of RTC registers needed to set time*/
@@ -267,13 +267,14 @@ static int tps65910_rtc_probe(struct platform_device *pdev)
}
ret = devm_request_threaded_irq(&pdev->dev, irq, NULL,
- tps65910_rtc_interrupt, IRQF_TRIGGER_LOW,
+ tps65910_rtc_interrupt, IRQF_TRIGGER_LOW | IRQF_EARLY_RESUME,
dev_name(&pdev->dev), &pdev->dev);
if (ret < 0) {
dev_err(&pdev->dev, "IRQ is not free.\n");
return ret;
}
- device_init_wakeup(&pdev->dev, 1);
+ tps_rtc->irq = irq;
+ device_set_wakeup_capable(&pdev->dev, 1);
tps_rtc->rtc = rtc_device_register(pdev->name, &pdev->dev,
&tps65910_rtc_ops, THIS_MODULE);
@@ -304,49 +305,36 @@ static int tps65910_rtc_remove(struct platform_device *pdev)
}
#ifdef CONFIG_PM_SLEEP
-
static int tps65910_rtc_suspend(struct device *dev)
{
- struct tps65910 *tps = dev_get_drvdata(dev->parent);
- u8 alarm = TPS65910_RTC_INTERRUPTS_IT_ALARM;
- int ret;
-
- /* Store current list of enabled interrupts*/
- ret = regmap_read(tps->regmap, TPS65910_RTC_INTERRUPTS,
- &tps->rtc->irqstat);
- if (ret < 0)
- return ret;
+ struct tps65910_rtc *tps_rtc = dev_get_drvdata(dev);
- /* Enable RTC ALARM interrupt only */
- return regmap_write(tps->regmap, TPS65910_RTC_INTERRUPTS, alarm);
+ if (device_may_wakeup(dev))
+ enable_irq_wake(tps_rtc->irq);
+ return 0;
}
static int tps65910_rtc_resume(struct device *dev)
{
- struct tps65910 *tps = dev_get_drvdata(dev->parent);
+ struct tps65910_rtc *tps_rtc = dev_get_drvdata(dev);
- /* Restore list of enabled interrupts before suspend */
- return regmap_write(tps->regmap, TPS65910_RTC_INTERRUPTS,
- tps->rtc->irqstat);
+ if (device_may_wakeup(dev))
+ disable_irq_wake(tps_rtc->irq);
+ return 0;
}
+#endif
static const struct dev_pm_ops tps65910_rtc_pm_ops = {
- .suspend = tps65910_rtc_suspend,
- .resume = tps65910_rtc_resume,
+ SET_SYSTEM_SLEEP_PM_OPS(tps65910_rtc_suspend, tps65910_rtc_resume)
};
-#define DEV_PM_OPS (&tps65910_rtc_pm_ops)
-#else
-#define DEV_PM_OPS NULL
-#endif
-
static struct platform_driver tps65910_rtc_driver = {
.probe = tps65910_rtc_probe,
.remove = tps65910_rtc_remove,
.driver = {
.owner = THIS_MODULE,
.name = "tps65910-rtc",
- .pm = DEV_PM_OPS,
+ .pm = &tps65910_rtc_pm_ops,
},
};
diff --git a/drivers/rtc/rtc-tps80031.c b/drivers/rtc/rtc-tps80031.c
new file mode 100644
index 000000000000..9aaf8aaebae9
--- /dev/null
+++ b/drivers/rtc/rtc-tps80031.c
@@ -0,0 +1,349 @@
+/*
+ * rtc-tps80031.c -- TI TPS80031/TPS80032 RTC driver
+ *
+ * RTC driver for TI TPS80031/TPS80032 Fully Integrated
+ * Power Management with Power Path and Battery Charger
+ *
+ * Copyright (c) 2012, NVIDIA Corporation.
+ *
+ * Author: Laxman Dewangan <ldewangan@nvidia.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation version 2.
+ *
+ * This program is distributed "as is" WITHOUT ANY WARRANTY of any kind,
+ * whether express or implied; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
+ * 02111-1307, USA
+ */
+
+#include <linux/bcd.h>
+#include <linux/device.h>
+#include <linux/err.h>
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/mfd/tps80031.h>
+#include <linux/platform_device.h>
+#include <linux/pm.h>
+#include <linux/rtc.h>
+#include <linux/slab.h>
+
+#define ENABLE_ALARM_INT 0x08
+#define ALARM_INT_STATUS 0x40
+
+/**
+ * Setting bit to 1 in STOP_RTC will run the RTC and
+ * setting this bit to 0 will freeze RTC.
+ */
+#define STOP_RTC 0x1
+
+/* Power on reset Values of RTC registers */
+#define TPS80031_RTC_POR_YEAR 0
+#define TPS80031_RTC_POR_MONTH 1
+#define TPS80031_RTC_POR_DAY 1
+
+/* Numbers of registers for time and alarms */
+#define TPS80031_RTC_TIME_NUM_REGS 7
+#define TPS80031_RTC_ALARM_NUM_REGS 6
+
+/**
+ * PMU RTC have only 2 nibbles to store year information, so using an
+ * offset of 100 to set the base year as 2000 for our driver.
+ */
+#define RTC_YEAR_OFFSET 100
+
+struct tps80031_rtc {
+ struct rtc_device *rtc;
+ int irq;
+};
+
+static int tps80031_rtc_read_time(struct device *dev, struct rtc_time *tm)
+{
+ u8 buff[TPS80031_RTC_TIME_NUM_REGS];
+ int ret;
+
+ ret = tps80031_reads(dev->parent, TPS80031_SLAVE_ID1,
+ TPS80031_SECONDS_REG, TPS80031_RTC_TIME_NUM_REGS, buff);
+ if (ret < 0) {
+ dev_err(dev, "reading RTC_SECONDS_REG failed, err = %d\n", ret);
+ return ret;
+ }
+
+ tm->tm_sec = bcd2bin(buff[0]);
+ tm->tm_min = bcd2bin(buff[1]);
+ tm->tm_hour = bcd2bin(buff[2]);
+ tm->tm_mday = bcd2bin(buff[3]);
+ tm->tm_mon = bcd2bin(buff[4]) - 1;
+ tm->tm_year = bcd2bin(buff[5]) + RTC_YEAR_OFFSET;
+ tm->tm_wday = bcd2bin(buff[6]);
+ return 0;
+}
+
+static int tps80031_rtc_set_time(struct device *dev, struct rtc_time *tm)
+{
+ u8 buff[7];
+ int ret;
+
+ buff[0] = bin2bcd(tm->tm_sec);
+ buff[1] = bin2bcd(tm->tm_min);
+ buff[2] = bin2bcd(tm->tm_hour);
+ buff[3] = bin2bcd(tm->tm_mday);
+ buff[4] = bin2bcd(tm->tm_mon + 1);
+ buff[5] = bin2bcd(tm->tm_year % RTC_YEAR_OFFSET);
+ buff[6] = bin2bcd(tm->tm_wday);
+
+ /* Stop RTC while updating the RTC time registers */
+ ret = tps80031_clr_bits(dev->parent, TPS80031_SLAVE_ID1,
+ TPS80031_RTC_CTRL_REG, STOP_RTC);
+ if (ret < 0) {
+ dev_err(dev->parent, "Stop RTC failed, err = %d\n", ret);
+ return ret;
+ }
+
+ ret = tps80031_writes(dev->parent, TPS80031_SLAVE_ID1,
+ TPS80031_SECONDS_REG,
+ TPS80031_RTC_TIME_NUM_REGS, buff);
+ if (ret < 0) {
+ dev_err(dev, "writing RTC_SECONDS_REG failed, err %d\n", ret);
+ return ret;
+ }
+
+ ret = tps80031_set_bits(dev->parent, TPS80031_SLAVE_ID1,
+ TPS80031_RTC_CTRL_REG, STOP_RTC);
+ if (ret < 0)
+ dev_err(dev->parent, "Start RTC failed, err = %d\n", ret);
+ return ret;
+}
+
+static int tps80031_rtc_alarm_irq_enable(struct device *dev,
+ unsigned int enable)
+{
+ int ret;
+
+ if (enable)
+ ret = tps80031_set_bits(dev->parent, TPS80031_SLAVE_ID1,
+ TPS80031_RTC_INTERRUPTS_REG, ENABLE_ALARM_INT);
+ else
+ ret = tps80031_clr_bits(dev->parent, TPS80031_SLAVE_ID1,
+ TPS80031_RTC_INTERRUPTS_REG, ENABLE_ALARM_INT);
+ if (ret < 0) {
+ dev_err(dev, "Update on RTC_INT failed, err = %d\n", ret);
+ return ret;
+ }
+ return 0;
+}
+
+static int tps80031_rtc_set_alarm(struct device *dev, struct rtc_wkalrm *alrm)
+{
+ u8 buff[TPS80031_RTC_ALARM_NUM_REGS];
+ int ret;
+
+ buff[0] = bin2bcd(alrm->time.tm_sec);
+ buff[1] = bin2bcd(alrm->time.tm_min);
+ buff[2] = bin2bcd(alrm->time.tm_hour);
+ buff[3] = bin2bcd(alrm->time.tm_mday);
+ buff[4] = bin2bcd(alrm->time.tm_mon + 1);
+ buff[5] = bin2bcd(alrm->time.tm_year % RTC_YEAR_OFFSET);
+ ret = tps80031_writes(dev->parent, TPS80031_SLAVE_ID1,
+ TPS80031_ALARM_SECONDS_REG,
+ TPS80031_RTC_ALARM_NUM_REGS, buff);
+ if (ret < 0) {
+ dev_err(dev, "Writing RTC_ALARM failed, err %d\n", ret);
+ return ret;
+ }
+ return tps80031_rtc_alarm_irq_enable(dev, alrm->enabled);
+}
+
+static int tps80031_rtc_read_alarm(struct device *dev, struct rtc_wkalrm *alrm)
+{
+ u8 buff[6];
+ int ret;
+
+ ret = tps80031_reads(dev->parent, TPS80031_SLAVE_ID1,
+ TPS80031_ALARM_SECONDS_REG,
+ TPS80031_RTC_ALARM_NUM_REGS, buff);
+ if (ret < 0) {
+ dev_err(dev->parent,
+ "reading RTC_ALARM failed, err = %d\n", ret);
+ return ret;
+ }
+
+ alrm->time.tm_sec = bcd2bin(buff[0]);
+ alrm->time.tm_min = bcd2bin(buff[1]);
+ alrm->time.tm_hour = bcd2bin(buff[2]);
+ alrm->time.tm_mday = bcd2bin(buff[3]);
+ alrm->time.tm_mon = bcd2bin(buff[4]) - 1;
+ alrm->time.tm_year = bcd2bin(buff[5]) + RTC_YEAR_OFFSET;
+ return 0;
+}
+
+static int clear_alarm_int_status(struct device *dev, struct tps80031_rtc *rtc)
+{
+ int ret;
+ u8 buf;
+
+ /**
+ * As per datasheet, A dummy read of this RTC_STATUS_REG register
+ * is necessary before each I2C read in order to update the status
+ * register value.
+ */
+ ret = tps80031_read(dev->parent, TPS80031_SLAVE_ID1,
+ TPS80031_RTC_STATUS_REG, &buf);
+ if (ret < 0) {
+ dev_err(dev, "reading RTC_STATUS failed. err = %d\n", ret);
+ return ret;
+ }
+
+ /* clear Alarm status bits.*/
+ ret = tps80031_set_bits(dev->parent, TPS80031_SLAVE_ID1,
+ TPS80031_RTC_STATUS_REG, ALARM_INT_STATUS);
+ if (ret < 0) {
+ dev_err(dev, "clear Alarm INT failed, err = %d\n", ret);
+ return ret;
+ }
+ return 0;
+}
+
+static irqreturn_t tps80031_rtc_irq(int irq, void *data)
+{
+ struct device *dev = data;
+ struct tps80031_rtc *rtc = dev_get_drvdata(dev);
+ int ret;
+
+ ret = clear_alarm_int_status(dev, rtc);
+ if (ret < 0)
+ return ret;
+
+ rtc_update_irq(rtc->rtc, 1, RTC_IRQF | RTC_AF);
+ return IRQ_HANDLED;
+}
+
+static const struct rtc_class_ops tps80031_rtc_ops = {
+ .read_time = tps80031_rtc_read_time,
+ .set_time = tps80031_rtc_set_time,
+ .set_alarm = tps80031_rtc_set_alarm,
+ .read_alarm = tps80031_rtc_read_alarm,
+ .alarm_irq_enable = tps80031_rtc_alarm_irq_enable,
+};
+
+static int tps80031_rtc_probe(struct platform_device *pdev)
+{
+ struct tps80031_rtc *rtc;
+ struct rtc_time tm;
+ int ret;
+
+ rtc = devm_kzalloc(&pdev->dev, sizeof(*rtc), GFP_KERNEL);
+ if (!rtc)
+ return -ENOMEM;
+
+ rtc->irq = platform_get_irq(pdev, 0);
+ platform_set_drvdata(pdev, rtc);
+
+ /* Start RTC */
+ ret = tps80031_set_bits(pdev->dev.parent, TPS80031_SLAVE_ID1,
+ TPS80031_RTC_CTRL_REG, STOP_RTC);
+ if (ret < 0) {
+ dev_err(&pdev->dev, "failed to start RTC. err = %d\n", ret);
+ return ret;
+ }
+
+ /* If RTC have POR values, set time 01:01:2000 */
+ tps80031_rtc_read_time(&pdev->dev, &tm);
+ if ((tm.tm_year == RTC_YEAR_OFFSET + TPS80031_RTC_POR_YEAR) &&
+ (tm.tm_mon == (TPS80031_RTC_POR_MONTH - 1)) &&
+ (tm.tm_mday == TPS80031_RTC_POR_DAY)) {
+ tm.tm_year = 2000;
+ tm.tm_mday = 1;
+ tm.tm_mon = 1;
+ ret = tps80031_rtc_set_time(&pdev->dev, &tm);
+ if (ret < 0) {
+ dev_err(&pdev->dev,
+ "RTC set time failed, err = %d\n", ret);
+ return ret;
+ }
+ }
+
+ /* Clear alarm intretupt status if it is there */
+ ret = clear_alarm_int_status(&pdev->dev, rtc);
+ if (ret < 0) {
+ dev_err(&pdev->dev, "Clear alarm int failed, err = %d\n", ret);
+ return ret;
+ }
+
+ rtc->rtc = rtc_device_register(pdev->name, &pdev->dev,
+ &tps80031_rtc_ops, THIS_MODULE);
+ if (IS_ERR(rtc->rtc)) {
+ ret = PTR_ERR(rtc->rtc);
+ dev_err(&pdev->dev, "RTC registration failed, err %d\n", ret);
+ return ret;
+ }
+
+ ret = devm_request_threaded_irq(&pdev->dev, rtc->irq, NULL,
+ tps80031_rtc_irq,
+ IRQF_ONESHOT | IRQF_EARLY_RESUME,
+ dev_name(&pdev->dev), rtc);
+ if (ret < 0) {
+ dev_err(&pdev->dev, "request IRQ:%d failed, err = %d\n",
+ rtc->irq, ret);
+ rtc_device_unregister(rtc->rtc);
+ return ret;
+ }
+ device_set_wakeup_capable(&pdev->dev, 1);
+ return 0;
+}
+
+static int tps80031_rtc_remove(struct platform_device *pdev)
+{
+ struct tps80031_rtc *rtc = platform_get_drvdata(pdev);
+
+ rtc_device_unregister(rtc->rtc);
+ return 0;
+}
+
+#ifdef CONFIG_PM_SLEEP
+static int tps80031_rtc_suspend(struct device *dev)
+{
+ struct tps80031_rtc *rtc = dev_get_drvdata(dev);
+
+ if (device_may_wakeup(dev))
+ enable_irq_wake(rtc->irq);
+ return 0;
+}
+
+static int tps80031_rtc_resume(struct device *dev)
+{
+ struct tps80031_rtc *rtc = dev_get_drvdata(dev);
+
+ if (device_may_wakeup(dev))
+ disable_irq_wake(rtc->irq);
+ return 0;
+};
+#endif
+
+static const struct dev_pm_ops tps80031_pm_ops = {
+ SET_SYSTEM_SLEEP_PM_OPS(tps80031_rtc_suspend, tps80031_rtc_resume)
+};
+
+static struct platform_driver tps80031_rtc_driver = {
+ .driver = {
+ .name = "tps80031-rtc",
+ .owner = THIS_MODULE,
+ .pm = &tps80031_pm_ops,
+ },
+ .probe = tps80031_rtc_probe,
+ .remove = tps80031_rtc_remove,
+};
+
+module_platform_driver(tps80031_rtc_driver);
+
+MODULE_ALIAS("platform:tps80031-rtc");
+MODULE_DESCRIPTION("TI TPS80031/TPS80032 RTC driver");
+MODULE_AUTHOR("Laxman Dewangan <ldewangan@nvidia.com>");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/rtc/rtc-twl.c b/drivers/rtc/rtc-twl.c
index ccd4ad370b32..8bc6c80b184c 100644
--- a/drivers/rtc/rtc-twl.c
+++ b/drivers/rtc/rtc-twl.c
@@ -27,6 +27,7 @@
#include <linux/bcd.h>
#include <linux/platform_device.h>
#include <linux/interrupt.h>
+#include <linux/of.h>
#include <linux/i2c/twl.h>
@@ -588,11 +589,14 @@ static int twl_rtc_resume(struct platform_device *pdev)
#define twl_rtc_resume NULL
#endif
+#ifdef CONFIG_OF
static const struct of_device_id twl_rtc_of_match[] = {
{.compatible = "ti,twl4030-rtc", },
{ },
};
MODULE_DEVICE_TABLE(of, twl_rtc_of_match);
+#endif
+
MODULE_ALIAS("platform:twl_rtc");
static struct platform_driver twl4030rtc_driver = {
@@ -604,7 +608,7 @@ static struct platform_driver twl4030rtc_driver = {
.driver = {
.owner = THIS_MODULE,
.name = "twl_rtc",
- .of_match_table = twl_rtc_of_match,
+ .of_match_table = of_match_ptr(twl_rtc_of_match),
},
};
diff --git a/drivers/rtc/rtc-vr41xx.c b/drivers/rtc/rtc-vr41xx.c
index 6c3774cf5a24..f91be04b9050 100644
--- a/drivers/rtc/rtc-vr41xx.c
+++ b/drivers/rtc/rtc-vr41xx.c
@@ -352,7 +352,7 @@ static int rtc_probe(struct platform_device *pdev)
disable_irq(aie_irq);
disable_irq(pie_irq);
- printk(KERN_INFO "rtc: Real Time Clock of NEC VR4100 series\n");
+ dev_info(&pdev->dev, "Real Time Clock of NEC VR4100 series\n");
return 0;
diff --git a/drivers/rtc/rtc-vt8500.c b/drivers/rtc/rtc-vt8500.c
index 2730533e2d2d..a000bc0a8bff 100644
--- a/drivers/rtc/rtc-vt8500.c
+++ b/drivers/rtc/rtc-vt8500.c
@@ -231,20 +231,21 @@ static int vt8500_rtc_probe(struct platform_device *pdev)
return -ENXIO;
}
- vt8500_rtc->res = request_mem_region(vt8500_rtc->res->start,
- resource_size(vt8500_rtc->res),
- "vt8500-rtc");
+ vt8500_rtc->res = devm_request_mem_region(&pdev->dev,
+ vt8500_rtc->res->start,
+ resource_size(vt8500_rtc->res),
+ "vt8500-rtc");
if (vt8500_rtc->res == NULL) {
dev_err(&pdev->dev, "failed to request I/O memory\n");
return -EBUSY;
}
- vt8500_rtc->regbase = ioremap(vt8500_rtc->res->start,
+ vt8500_rtc->regbase = devm_ioremap(&pdev->dev, vt8500_rtc->res->start,
resource_size(vt8500_rtc->res));
if (!vt8500_rtc->regbase) {
dev_err(&pdev->dev, "Unable to map RTC I/O memory\n");
ret = -EBUSY;
- goto err_release;
+ goto err_return;
}
/* Enable RTC and set it to 24-hour mode */
@@ -257,11 +258,11 @@ static int vt8500_rtc_probe(struct platform_device *pdev)
ret = PTR_ERR(vt8500_rtc->rtc);
dev_err(&pdev->dev,
"Failed to register RTC device -> %d\n", ret);
- goto err_unmap;
+ goto err_return;
}
- ret = request_irq(vt8500_rtc->irq_alarm, vt8500_rtc_irq, 0,
- "rtc alarm", vt8500_rtc);
+ ret = devm_request_irq(&pdev->dev, vt8500_rtc->irq_alarm,
+ vt8500_rtc_irq, 0, "rtc alarm", vt8500_rtc);
if (ret < 0) {
dev_err(&pdev->dev, "can't get irq %i, err %d\n",
vt8500_rtc->irq_alarm, ret);
@@ -272,11 +273,7 @@ static int vt8500_rtc_probe(struct platform_device *pdev)
err_unreg:
rtc_device_unregister(vt8500_rtc->rtc);
-err_unmap:
- iounmap(vt8500_rtc->regbase);
-err_release:
- release_mem_region(vt8500_rtc->res->start,
- resource_size(vt8500_rtc->res));
+err_return:
return ret;
}
@@ -284,15 +281,10 @@ static int vt8500_rtc_remove(struct platform_device *pdev)
{
struct vt8500_rtc *vt8500_rtc = platform_get_drvdata(pdev);
- free_irq(vt8500_rtc->irq_alarm, vt8500_rtc);
-
rtc_device_unregister(vt8500_rtc->rtc);
/* Disable alarm matching */
writel(0, vt8500_rtc->regbase + VT8500_RTC_IS);
- iounmap(vt8500_rtc->regbase);
- release_mem_region(vt8500_rtc->res->start,
- resource_size(vt8500_rtc->res));
platform_set_drvdata(pdev, NULL);
diff --git a/drivers/rtc/rtc-wm831x.c b/drivers/rtc/rtc-wm831x.c
index 1b0affbe2659..2f0ac7b30a0c 100644
--- a/drivers/rtc/rtc-wm831x.c
+++ b/drivers/rtc/rtc-wm831x.c
@@ -443,9 +443,10 @@ static int wm831x_rtc_probe(struct platform_device *pdev)
goto err;
}
- ret = request_threaded_irq(alm_irq, NULL, wm831x_alm_irq,
- IRQF_TRIGGER_RISING, "RTC alarm",
- wm831x_rtc);
+ ret = devm_request_threaded_irq(&pdev->dev, alm_irq, NULL,
+ wm831x_alm_irq,
+ IRQF_TRIGGER_RISING, "RTC alarm",
+ wm831x_rtc);
if (ret != 0) {
dev_err(&pdev->dev, "Failed to request alarm IRQ %d: %d\n",
alm_irq, ret);
@@ -462,9 +463,7 @@ err:
static int wm831x_rtc_remove(struct platform_device *pdev)
{
struct wm831x_rtc *wm831x_rtc = platform_get_drvdata(pdev);
- int alm_irq = platform_get_irq_byname(pdev, "ALM");
- free_irq(alm_irq, wm831x_rtc);
rtc_device_unregister(wm831x_rtc->rtc);
return 0;
diff --git a/drivers/s390/block/dasd.c b/drivers/s390/block/dasd.c
index 29225e1c159c..f1b7fdc58a5f 100644
--- a/drivers/s390/block/dasd.c
+++ b/drivers/s390/block/dasd.c
@@ -1352,7 +1352,7 @@ int dasd_term_IO(struct dasd_ccw_req *cqr)
switch (rc) {
case 0: /* termination successful */
cqr->status = DASD_CQR_CLEAR_PENDING;
- cqr->stopclk = get_clock();
+ cqr->stopclk = get_tod_clock();
cqr->starttime = 0;
DBF_DEV_EVENT(DBF_DEBUG, device,
"terminate cqr %p successful",
@@ -1420,7 +1420,7 @@ int dasd_start_IO(struct dasd_ccw_req *cqr)
cqr->status = DASD_CQR_ERROR;
return -EIO;
}
- cqr->startclk = get_clock();
+ cqr->startclk = get_tod_clock();
cqr->starttime = jiffies;
cqr->retries--;
if (!test_bit(DASD_CQR_VERIFY_PATH, &cqr->flags)) {
@@ -1623,7 +1623,7 @@ void dasd_int_handler(struct ccw_device *cdev, unsigned long intparm,
return;
}
- now = get_clock();
+ now = get_tod_clock();
cqr = (struct dasd_ccw_req *) intparm;
/* check for conditions that should be handled immediately */
if (!cqr ||
@@ -1963,7 +1963,7 @@ int dasd_flush_device_queue(struct dasd_device *device)
}
break;
case DASD_CQR_QUEUED:
- cqr->stopclk = get_clock();
+ cqr->stopclk = get_tod_clock();
cqr->status = DASD_CQR_CLEARED;
break;
default: /* no need to modify the others */
@@ -2210,7 +2210,7 @@ static int _dasd_sleep_on(struct dasd_ccw_req *maincqr, int interruptible)
wait_event(generic_waitq, _wait_for_wakeup(cqr));
}
- maincqr->endclk = get_clock();
+ maincqr->endclk = get_tod_clock();
if ((maincqr->status != DASD_CQR_DONE) &&
(maincqr->intrc != -ERESTARTSYS))
dasd_log_sense(maincqr, &maincqr->irb);
@@ -2340,7 +2340,7 @@ int dasd_cancel_req(struct dasd_ccw_req *cqr)
"Cancelling request %p failed with rc=%d\n",
cqr, rc);
} else {
- cqr->stopclk = get_clock();
+ cqr->stopclk = get_tod_clock();
}
break;
default: /* already finished or clear pending - do nothing */
@@ -2568,7 +2568,7 @@ restart:
}
/* Rechain finished requests to final queue */
- cqr->endclk = get_clock();
+ cqr->endclk = get_tod_clock();
list_move_tail(&cqr->blocklist, final_queue);
}
}
@@ -2711,7 +2711,7 @@ restart_cb:
}
/* call the callback function */
spin_lock_irq(&block->request_queue_lock);
- cqr->endclk = get_clock();
+ cqr->endclk = get_tod_clock();
list_del_init(&cqr->blocklist);
__dasd_cleanup_cqr(cqr);
spin_unlock_irq(&block->request_queue_lock);
@@ -3042,12 +3042,15 @@ void dasd_generic_remove(struct ccw_device *cdev)
cdev->handler = NULL;
device = dasd_device_from_cdev(cdev);
- if (IS_ERR(device))
+ if (IS_ERR(device)) {
+ dasd_remove_sysfs_files(cdev);
return;
+ }
if (test_and_set_bit(DASD_FLAG_OFFLINE, &device->flags) &&
!test_bit(DASD_FLAG_SAFE_OFFLINE_RUNNING, &device->flags)) {
/* Already doing offline processing */
dasd_put_device(device);
+ dasd_remove_sysfs_files(cdev);
return;
}
/*
@@ -3504,7 +3507,7 @@ static struct dasd_ccw_req *dasd_generic_build_rdc(struct dasd_device *device,
cqr->memdev = device;
cqr->expires = 10*HZ;
cqr->retries = 256;
- cqr->buildclk = get_clock();
+ cqr->buildclk = get_tod_clock();
cqr->status = DASD_CQR_FILLED;
return cqr;
}
diff --git a/drivers/s390/block/dasd_3990_erp.c b/drivers/s390/block/dasd_3990_erp.c
index f8212d54013a..d26134713682 100644
--- a/drivers/s390/block/dasd_3990_erp.c
+++ b/drivers/s390/block/dasd_3990_erp.c
@@ -229,7 +229,7 @@ dasd_3990_erp_DCTL(struct dasd_ccw_req * erp, char modifier)
dctl_cqr->expires = 5 * 60 * HZ;
dctl_cqr->retries = 2;
- dctl_cqr->buildclk = get_clock();
+ dctl_cqr->buildclk = get_tod_clock();
dctl_cqr->status = DASD_CQR_FILLED;
@@ -1719,7 +1719,7 @@ dasd_3990_erp_action_1B_32(struct dasd_ccw_req * default_erp, char *sense)
erp->magic = default_erp->magic;
erp->expires = default_erp->expires;
erp->retries = 256;
- erp->buildclk = get_clock();
+ erp->buildclk = get_tod_clock();
erp->status = DASD_CQR_FILLED;
/* remove the default erp */
@@ -2322,7 +2322,7 @@ static struct dasd_ccw_req *dasd_3990_erp_add_erp(struct dasd_ccw_req *cqr)
DBF_DEV_EVENT(DBF_ERR, device, "%s",
"Unable to allocate ERP request");
cqr->status = DASD_CQR_FAILED;
- cqr->stopclk = get_clock ();
+ cqr->stopclk = get_tod_clock();
} else {
DBF_DEV_EVENT(DBF_ERR, device,
"Unable to allocate ERP request "
@@ -2364,7 +2364,7 @@ static struct dasd_ccw_req *dasd_3990_erp_add_erp(struct dasd_ccw_req *cqr)
erp->magic = cqr->magic;
erp->expires = cqr->expires;
erp->retries = 256;
- erp->buildclk = get_clock();
+ erp->buildclk = get_tod_clock();
erp->status = DASD_CQR_FILLED;
return erp;
diff --git a/drivers/s390/block/dasd_alias.c b/drivers/s390/block/dasd_alias.c
index 6b556995bb33..a2597e683e79 100644
--- a/drivers/s390/block/dasd_alias.c
+++ b/drivers/s390/block/dasd_alias.c
@@ -448,7 +448,7 @@ static int read_unit_address_configuration(struct dasd_device *device,
ccw->count = sizeof(*(lcu->uac));
ccw->cda = (__u32)(addr_t) lcu->uac;
- cqr->buildclk = get_clock();
+ cqr->buildclk = get_tod_clock();
cqr->status = DASD_CQR_FILLED;
/* need to unset flag here to detect race with summary unit check */
@@ -733,7 +733,7 @@ static int reset_summary_unit_check(struct alias_lcu *lcu,
cqr->memdev = device;
cqr->block = NULL;
cqr->expires = 5 * HZ;
- cqr->buildclk = get_clock();
+ cqr->buildclk = get_tod_clock();
cqr->status = DASD_CQR_FILLED;
rc = dasd_sleep_on_immediatly(cqr);
diff --git a/drivers/s390/block/dasd_diag.c b/drivers/s390/block/dasd_diag.c
index 704488d0f819..cc0603358522 100644
--- a/drivers/s390/block/dasd_diag.c
+++ b/drivers/s390/block/dasd_diag.c
@@ -184,14 +184,14 @@ dasd_start_diag(struct dasd_ccw_req * cqr)
private->iob.bio_list = dreq->bio;
private->iob.flaga = DASD_DIAG_FLAGA_DEFAULT;
- cqr->startclk = get_clock();
+ cqr->startclk = get_tod_clock();
cqr->starttime = jiffies;
cqr->retries--;
rc = dia250(&private->iob, RW_BIO);
switch (rc) {
case 0: /* Synchronous I/O finished successfully */
- cqr->stopclk = get_clock();
+ cqr->stopclk = get_tod_clock();
cqr->status = DASD_CQR_SUCCESS;
/* Indicate to calling function that only a dasd_schedule_bh()
and no timer is needed */
@@ -222,7 +222,7 @@ dasd_diag_term_IO(struct dasd_ccw_req * cqr)
mdsk_term_io(device);
mdsk_init_io(device, device->block->bp_block, 0, NULL);
cqr->status = DASD_CQR_CLEAR_PENDING;
- cqr->stopclk = get_clock();
+ cqr->stopclk = get_tod_clock();
dasd_schedule_device_bh(device);
return 0;
}
@@ -276,7 +276,7 @@ static void dasd_ext_handler(struct ext_code ext_code,
return;
}
- cqr->stopclk = get_clock();
+ cqr->stopclk = get_tod_clock();
expires = 0;
if ((ext_code.subcode & 0xff) == 0) {
@@ -556,7 +556,7 @@ static struct dasd_ccw_req *dasd_diag_build_cp(struct dasd_device *memdev,
}
}
cqr->retries = DIAG_MAX_RETRIES;
- cqr->buildclk = get_clock();
+ cqr->buildclk = get_tod_clock();
if (blk_noretry_request(req) ||
block->base->features & DASD_FEATURE_FAILFAST)
set_bit(DASD_CQR_FLAGS_FAILFAST, &cqr->flags);
diff --git a/drivers/s390/block/dasd_eckd.c b/drivers/s390/block/dasd_eckd.c
index e37bc1620d14..33f26bfa62f2 100644
--- a/drivers/s390/block/dasd_eckd.c
+++ b/drivers/s390/block/dasd_eckd.c
@@ -862,7 +862,7 @@ static void dasd_eckd_fill_rcd_cqr(struct dasd_device *device,
cqr->expires = 10*HZ;
cqr->lpm = lpm;
cqr->retries = 256;
- cqr->buildclk = get_clock();
+ cqr->buildclk = get_tod_clock();
cqr->status = DASD_CQR_FILLED;
set_bit(DASD_CQR_VERIFY_PATH, &cqr->flags);
}
@@ -1449,7 +1449,7 @@ static int dasd_eckd_read_features(struct dasd_device *device)
ccw->count = sizeof(struct dasd_rssd_features);
ccw->cda = (__u32)(addr_t) features;
- cqr->buildclk = get_clock();
+ cqr->buildclk = get_tod_clock();
cqr->status = DASD_CQR_FILLED;
rc = dasd_sleep_on(cqr);
if (rc == 0) {
@@ -1501,7 +1501,7 @@ static struct dasd_ccw_req *dasd_eckd_build_psf_ssc(struct dasd_device *device,
cqr->block = NULL;
cqr->retries = 256;
cqr->expires = 10*HZ;
- cqr->buildclk = get_clock();
+ cqr->buildclk = get_tod_clock();
cqr->status = DASD_CQR_FILLED;
return cqr;
}
@@ -1841,7 +1841,7 @@ dasd_eckd_analysis_ccw(struct dasd_device *device)
cqr->startdev = device;
cqr->memdev = device;
cqr->retries = 255;
- cqr->buildclk = get_clock();
+ cqr->buildclk = get_tod_clock();
cqr->status = DASD_CQR_FILLED;
return cqr;
}
@@ -2241,7 +2241,7 @@ dasd_eckd_format_device(struct dasd_device * device,
fcp->startdev = device;
fcp->memdev = device;
fcp->retries = 256;
- fcp->buildclk = get_clock();
+ fcp->buildclk = get_tod_clock();
fcp->status = DASD_CQR_FILLED;
return fcp;
}
@@ -2530,7 +2530,7 @@ static struct dasd_ccw_req *dasd_eckd_build_cp_cmd_single(
cqr->expires = startdev->default_expires * HZ; /* default 5 minutes */
cqr->lpm = startdev->path_data.ppm;
cqr->retries = 256;
- cqr->buildclk = get_clock();
+ cqr->buildclk = get_tod_clock();
cqr->status = DASD_CQR_FILLED;
return cqr;
}
@@ -2705,7 +2705,7 @@ static struct dasd_ccw_req *dasd_eckd_build_cp_cmd_track(
cqr->expires = startdev->default_expires * HZ; /* default 5 minutes */
cqr->lpm = startdev->path_data.ppm;
cqr->retries = 256;
- cqr->buildclk = get_clock();
+ cqr->buildclk = get_tod_clock();
cqr->status = DASD_CQR_FILLED;
return cqr;
}
@@ -2998,7 +2998,7 @@ static struct dasd_ccw_req *dasd_eckd_build_cp_tpm_track(
cqr->expires = startdev->default_expires * HZ; /* default 5 minutes */
cqr->lpm = startdev->path_data.ppm;
cqr->retries = 256;
- cqr->buildclk = get_clock();
+ cqr->buildclk = get_tod_clock();
cqr->status = DASD_CQR_FILLED;
return cqr;
out_error:
@@ -3201,7 +3201,7 @@ static struct dasd_ccw_req *dasd_raw_build_cp(struct dasd_device *startdev,
cqr->expires = startdev->default_expires * HZ;
cqr->lpm = startdev->path_data.ppm;
cqr->retries = 256;
- cqr->buildclk = get_clock();
+ cqr->buildclk = get_tod_clock();
cqr->status = DASD_CQR_FILLED;
if (IS_ERR(cqr) && PTR_ERR(cqr) != -EAGAIN)
@@ -3402,7 +3402,7 @@ dasd_eckd_release(struct dasd_device *device)
set_bit(DASD_CQR_FLAGS_FAILFAST, &cqr->flags);
cqr->retries = 2; /* set retry counter to enable basic ERP */
cqr->expires = 2 * HZ;
- cqr->buildclk = get_clock();
+ cqr->buildclk = get_tod_clock();
cqr->status = DASD_CQR_FILLED;
rc = dasd_sleep_on_immediatly(cqr);
@@ -3457,7 +3457,7 @@ dasd_eckd_reserve(struct dasd_device *device)
set_bit(DASD_CQR_FLAGS_FAILFAST, &cqr->flags);
cqr->retries = 2; /* set retry counter to enable basic ERP */
cqr->expires = 2 * HZ;
- cqr->buildclk = get_clock();
+ cqr->buildclk = get_tod_clock();
cqr->status = DASD_CQR_FILLED;
rc = dasd_sleep_on_immediatly(cqr);
@@ -3511,7 +3511,7 @@ dasd_eckd_steal_lock(struct dasd_device *device)
set_bit(DASD_CQR_FLAGS_FAILFAST, &cqr->flags);
cqr->retries = 2; /* set retry counter to enable basic ERP */
cqr->expires = 2 * HZ;
- cqr->buildclk = get_clock();
+ cqr->buildclk = get_tod_clock();
cqr->status = DASD_CQR_FILLED;
rc = dasd_sleep_on_immediatly(cqr);
@@ -3572,7 +3572,7 @@ static int dasd_eckd_snid(struct dasd_device *device,
set_bit(DASD_CQR_ALLOW_SLOCK, &cqr->flags);
cqr->retries = 5;
cqr->expires = 10 * HZ;
- cqr->buildclk = get_clock();
+ cqr->buildclk = get_tod_clock();
cqr->status = DASD_CQR_FILLED;
cqr->lpm = usrparm.path_mask;
@@ -3642,7 +3642,7 @@ dasd_eckd_performance(struct dasd_device *device, void __user *argp)
ccw->count = sizeof(struct dasd_rssd_perf_stats_t);
ccw->cda = (__u32)(addr_t) stats;
- cqr->buildclk = get_clock();
+ cqr->buildclk = get_tod_clock();
cqr->status = DASD_CQR_FILLED;
rc = dasd_sleep_on(cqr);
if (rc == 0) {
@@ -3768,7 +3768,7 @@ static int dasd_symm_io(struct dasd_device *device, void __user *argp)
cqr->memdev = device;
cqr->retries = 3;
cqr->expires = 10 * HZ;
- cqr->buildclk = get_clock();
+ cqr->buildclk = get_tod_clock();
cqr->status = DASD_CQR_FILLED;
/* Build the ccws */
diff --git a/drivers/s390/block/dasd_eer.c b/drivers/s390/block/dasd_eer.c
index ff901b5509c1..21ef63cf0960 100644
--- a/drivers/s390/block/dasd_eer.c
+++ b/drivers/s390/block/dasd_eer.c
@@ -481,7 +481,7 @@ int dasd_eer_enable(struct dasd_device *device)
ccw->flags = 0;
ccw->cda = (__u32)(addr_t) cqr->data;
- cqr->buildclk = get_clock();
+ cqr->buildclk = get_tod_clock();
cqr->status = DASD_CQR_FILLED;
cqr->callback = dasd_eer_snss_cb;
diff --git a/drivers/s390/block/dasd_erp.c b/drivers/s390/block/dasd_erp.c
index d01ef82f8757..3250cb471f78 100644
--- a/drivers/s390/block/dasd_erp.c
+++ b/drivers/s390/block/dasd_erp.c
@@ -102,7 +102,7 @@ dasd_default_erp_action(struct dasd_ccw_req *cqr)
pr_err("%s: default ERP has run out of retries and failed\n",
dev_name(&device->cdev->dev));
cqr->status = DASD_CQR_FAILED;
- cqr->stopclk = get_clock();
+ cqr->stopclk = get_tod_clock();
}
return cqr;
} /* end dasd_default_erp_action */
@@ -146,7 +146,7 @@ struct dasd_ccw_req *dasd_default_erp_postaction(struct dasd_ccw_req *cqr)
cqr->status = DASD_CQR_DONE;
else {
cqr->status = DASD_CQR_FAILED;
- cqr->stopclk = get_clock();
+ cqr->stopclk = get_tod_clock();
}
return cqr;
diff --git a/drivers/s390/block/dasd_fba.c b/drivers/s390/block/dasd_fba.c
index 414698584344..4dd0e2f6047e 100644
--- a/drivers/s390/block/dasd_fba.c
+++ b/drivers/s390/block/dasd_fba.c
@@ -370,7 +370,7 @@ static struct dasd_ccw_req *dasd_fba_build_cp(struct dasd_device * memdev,
cqr->block = block;
cqr->expires = memdev->default_expires * HZ; /* default 5 minutes */
cqr->retries = 32;
- cqr->buildclk = get_clock();
+ cqr->buildclk = get_tod_clock();
cqr->status = DASD_CQR_FILLED;
return cqr;
}
diff --git a/drivers/s390/block/scm_blk.h b/drivers/s390/block/scm_blk.h
index 7ac6bad919ef..3c1ccf494647 100644
--- a/drivers/s390/block/scm_blk.h
+++ b/drivers/s390/block/scm_blk.h
@@ -68,19 +68,34 @@ void scm_initiate_cluster_request(struct scm_request *);
void scm_cluster_request_irq(struct scm_request *);
bool scm_test_cluster_request(struct scm_request *);
bool scm_cluster_size_valid(void);
-#else
-#define __scm_free_rq_cluster(scmrq) {}
-#define __scm_alloc_rq_cluster(scmrq) 0
-#define scm_request_cluster_init(scmrq) {}
-#define scm_reserve_cluster(scmrq) true
-#define scm_release_cluster(scmrq) {}
-#define scm_blk_dev_cluster_setup(bdev) {}
-#define scm_need_cluster_request(scmrq) false
-#define scm_initiate_cluster_request(scmrq) {}
-#define scm_cluster_request_irq(scmrq) {}
-#define scm_test_cluster_request(scmrq) false
-#define scm_cluster_size_valid() true
-#endif
+#else /* CONFIG_SCM_BLOCK_CLUSTER_WRITE */
+static inline void __scm_free_rq_cluster(struct scm_request *scmrq) {}
+static inline int __scm_alloc_rq_cluster(struct scm_request *scmrq)
+{
+ return 0;
+}
+static inline void scm_request_cluster_init(struct scm_request *scmrq) {}
+static inline bool scm_reserve_cluster(struct scm_request *scmrq)
+{
+ return true;
+}
+static inline void scm_release_cluster(struct scm_request *scmrq) {}
+static inline void scm_blk_dev_cluster_setup(struct scm_blk_dev *bdev) {}
+static inline bool scm_need_cluster_request(struct scm_request *scmrq)
+{
+ return false;
+}
+static inline void scm_initiate_cluster_request(struct scm_request *scmrq) {}
+static inline void scm_cluster_request_irq(struct scm_request *scmrq) {}
+static inline bool scm_test_cluster_request(struct scm_request *scmrq)
+{
+ return false;
+}
+static inline bool scm_cluster_size_valid(void)
+{
+ return true;
+}
+#endif /* CONFIG_SCM_BLOCK_CLUSTER_WRITE */
extern debug_info_t *scm_debug;
diff --git a/drivers/s390/char/Kconfig b/drivers/s390/char/Kconfig
index 2c9a776bd63c..71bf959732fe 100644
--- a/drivers/s390/char/Kconfig
+++ b/drivers/s390/char/Kconfig
@@ -11,7 +11,7 @@ config TN3270
config TN3270_TTY
def_tristate y
prompt "Support for tty input/output on 3270 terminals"
- depends on TN3270
+ depends on TN3270 && TTY
help
Include support for using an IBM 3270 terminal as a Linux tty.
@@ -33,7 +33,7 @@ config TN3270_CONSOLE
config TN3215
def_bool y
prompt "Support for 3215 line mode terminal"
- depends on CCW
+ depends on CCW && TTY
help
Include support for IBM 3215 line-mode terminals.
@@ -51,7 +51,7 @@ config CCW_CONSOLE
config SCLP_TTY
def_bool y
prompt "Support for SCLP line mode terminal"
- depends on S390
+ depends on S390 && TTY
help
Include support for IBM SCLP line-mode terminals.
@@ -66,7 +66,7 @@ config SCLP_CONSOLE
config SCLP_VT220_TTY
def_bool y
prompt "Support for SCLP VT220-compatible terminal"
- depends on S390
+ depends on S390 && TTY
help
Include support for an IBM SCLP VT220-compatible terminal.
diff --git a/drivers/s390/char/con3215.c b/drivers/s390/char/con3215.c
index 33b7141a182f..7b00fa634d40 100644
--- a/drivers/s390/char/con3215.c
+++ b/drivers/s390/char/con3215.c
@@ -412,8 +412,9 @@ static void raw3215_irq(struct ccw_device *cdev, unsigned long intparm,
break;
case CTRLCHAR_CTRL:
- tty_insert_flip_char(tty, cchar, TTY_NORMAL);
- tty_flip_buffer_push(tty);
+ tty_insert_flip_char(&raw->port, cchar,
+ TTY_NORMAL);
+ tty_flip_buffer_push(&raw->port);
break;
case CTRLCHAR_NONE:
@@ -425,8 +426,9 @@ static void raw3215_irq(struct ccw_device *cdev, unsigned long intparm,
count++;
} else
count -= 2;
- tty_insert_flip_string(tty, raw->inbuf, count);
- tty_flip_buffer_push(tty);
+ tty_insert_flip_string(&raw->port, raw->inbuf,
+ count);
+ tty_flip_buffer_push(&raw->port);
break;
}
} else if (req->type == RAW3215_WRITE) {
@@ -970,7 +972,7 @@ static int tty3215_open(struct tty_struct *tty, struct file * filp)
tty_port_tty_set(&raw->port, tty);
- tty->low_latency = 0; /* don't use bottom half for pushing chars */
+ raw->port.low_latency = 0; /* don't use bottom half for pushing chars */
/*
* Start up 3215 device
*/
diff --git a/drivers/s390/char/fs3270.c b/drivers/s390/char/fs3270.c
index 911704571b9c..96e52bf75930 100644
--- a/drivers/s390/char/fs3270.c
+++ b/drivers/s390/char/fs3270.c
@@ -433,9 +433,9 @@ fs3270_open(struct inode *inode, struct file *filp)
struct idal_buffer *ib;
int minor, rc = 0;
- if (imajor(filp->f_path.dentry->d_inode) != IBM_FS3270_MAJOR)
+ if (imajor(file_inode(filp)) != IBM_FS3270_MAJOR)
return -ENODEV;
- minor = iminor(filp->f_path.dentry->d_inode);
+ minor = iminor(file_inode(filp));
/* Check for minor 0 multiplexer. */
if (minor == 0) {
struct tty_struct *tty = get_current_tty();
@@ -443,7 +443,7 @@ fs3270_open(struct inode *inode, struct file *filp)
tty_kref_put(tty);
return -ENODEV;
}
- minor = tty->index + RAW3270_FIRSTMINOR;
+ minor = tty->index;
tty_kref_put(tty);
}
mutex_lock(&fs3270_mutex);
@@ -524,6 +524,25 @@ static const struct file_operations fs3270_fops = {
.llseek = no_llseek,
};
+void fs3270_create_cb(int minor)
+{
+ __register_chrdev(IBM_FS3270_MAJOR, minor, 1, "tub", &fs3270_fops);
+ device_create(class3270, NULL, MKDEV(IBM_FS3270_MAJOR, minor),
+ NULL, "3270/tub%d", minor);
+}
+
+void fs3270_destroy_cb(int minor)
+{
+ device_destroy(class3270, MKDEV(IBM_FS3270_MAJOR, minor));
+ __unregister_chrdev(IBM_FS3270_MAJOR, minor, 1, "tub");
+}
+
+struct raw3270_notifier fs3270_notifier =
+{
+ .create = fs3270_create_cb,
+ .destroy = fs3270_destroy_cb,
+};
+
/*
* 3270 fullscreen driver initialization.
*/
@@ -532,16 +551,20 @@ fs3270_init(void)
{
int rc;
- rc = register_chrdev(IBM_FS3270_MAJOR, "fs3270", &fs3270_fops);
+ rc = __register_chrdev(IBM_FS3270_MAJOR, 0, 1, "fs3270", &fs3270_fops);
if (rc)
return rc;
+ device_create(class3270, NULL, MKDEV(IBM_FS3270_MAJOR, 0),
+ NULL, "3270/tub");
+ raw3270_register_notifier(&fs3270_notifier);
return 0;
}
static void __exit
fs3270_exit(void)
{
- unregister_chrdev(IBM_FS3270_MAJOR, "fs3270");
+ raw3270_unregister_notifier(&fs3270_notifier);
+ __unregister_chrdev(IBM_FS3270_MAJOR, 0, 1, "fs3270");
}
MODULE_LICENSE("GPL");
diff --git a/drivers/s390/char/keyboard.h b/drivers/s390/char/keyboard.h
index d0ae2be58191..a31f339211d5 100644
--- a/drivers/s390/char/keyboard.h
+++ b/drivers/s390/char/keyboard.h
@@ -43,22 +43,14 @@ int kbd_ioctl(struct kbd_data *, unsigned int, unsigned long);
static inline void
kbd_put_queue(struct tty_port *port, int ch)
{
- struct tty_struct *tty = tty_port_tty_get(port);
- if (!tty)
- return;
- tty_insert_flip_char(tty, ch, 0);
- tty_schedule_flip(tty);
- tty_kref_put(tty);
+ tty_insert_flip_char(port, ch, 0);
+ tty_schedule_flip(port);
}
static inline void
kbd_puts_queue(struct tty_port *port, char *cp)
{
- struct tty_struct *tty = tty_port_tty_get(port);
- if (!tty)
- return;
while (*cp)
- tty_insert_flip_char(tty, *cp++, 0);
- tty_schedule_flip(tty);
- tty_kref_put(tty);
+ tty_insert_flip_char(port, *cp++, 0);
+ tty_schedule_flip(port);
}
diff --git a/drivers/s390/char/raw3270.c b/drivers/s390/char/raw3270.c
index 9a6c140c5f07..4c9030a5b9f2 100644
--- a/drivers/s390/char/raw3270.c
+++ b/drivers/s390/char/raw3270.c
@@ -28,7 +28,7 @@
#include <linux/device.h>
#include <linux/mutex.h>
-static struct class *class3270;
+struct class *class3270;
/* The main 3270 data structure. */
struct raw3270 {
@@ -37,6 +37,7 @@ struct raw3270 {
int minor;
short model, rows, cols;
+ unsigned int state;
unsigned long flags;
struct list_head req_queue; /* Request queue. */
@@ -46,20 +47,26 @@ struct raw3270 {
struct timer_list timer; /* Device timer. */
unsigned char *ascebc; /* ascii -> ebcdic table */
- struct device *clttydev; /* 3270-class tty device ptr */
- struct device *cltubdev; /* 3270-class tub device ptr */
- struct raw3270_request init_request;
+ struct raw3270_view init_view;
+ struct raw3270_request init_reset;
+ struct raw3270_request init_readpart;
+ struct raw3270_request init_readmod;
unsigned char init_data[256];
};
+/* raw3270->state */
+#define RAW3270_STATE_INIT 0 /* Initial state */
+#define RAW3270_STATE_RESET 1 /* Reset command is pending */
+#define RAW3270_STATE_W4ATTN 2 /* Wait for attention interrupt */
+#define RAW3270_STATE_READMOD 3 /* Read partition is pending */
+#define RAW3270_STATE_READY 4 /* Device is usable by views */
+
/* raw3270->flags */
#define RAW3270_FLAGS_14BITADDR 0 /* 14-bit buffer addresses */
#define RAW3270_FLAGS_BUSY 1 /* Device busy, leave it alone */
-#define RAW3270_FLAGS_ATTN 2 /* Device sent an ATTN interrupt */
-#define RAW3270_FLAGS_READY 4 /* Device is useable by views */
-#define RAW3270_FLAGS_CONSOLE 8 /* Device is the console. */
-#define RAW3270_FLAGS_FROZEN 16 /* set if 3270 is frozen for suspend */
+#define RAW3270_FLAGS_CONSOLE 2 /* Device is the console. */
+#define RAW3270_FLAGS_FROZEN 3 /* set if 3270 is frozen for suspend */
/* Semaphore to protect global data of raw3270 (devices, views, etc). */
static DEFINE_MUTEX(raw3270_mutex);
@@ -97,6 +104,17 @@ static unsigned char raw3270_ebcgraf[64] = {
0xf8, 0xf9, 0x7a, 0x7b, 0x7c, 0x7d, 0x7e, 0x7f
};
+static inline int raw3270_state_ready(struct raw3270 *rp)
+{
+ return rp->state == RAW3270_STATE_READY;
+}
+
+static inline int raw3270_state_final(struct raw3270 *rp)
+{
+ return rp->state == RAW3270_STATE_INIT ||
+ rp->state == RAW3270_STATE_READY;
+}
+
void
raw3270_buffer_address(struct raw3270 *rp, char *cp, unsigned short addr)
{
@@ -214,7 +232,7 @@ raw3270_request_set_idal(struct raw3270_request *rq, struct idal_buffer *ib)
* Stop running ccw.
*/
static int
-raw3270_halt_io_nolock(struct raw3270 *rp, struct raw3270_request *rq)
+__raw3270_halt_io(struct raw3270 *rp, struct raw3270_request *rq)
{
int retries;
int rc;
@@ -233,18 +251,6 @@ raw3270_halt_io_nolock(struct raw3270 *rp, struct raw3270_request *rq)
return rc;
}
-static int
-raw3270_halt_io(struct raw3270 *rp, struct raw3270_request *rq)
-{
- unsigned long flags;
- int rc;
-
- spin_lock_irqsave(get_ccwdev_lock(rp->cdev), flags);
- rc = raw3270_halt_io_nolock(rp, rq);
- spin_unlock_irqrestore(get_ccwdev_lock(rp->cdev), flags);
- return rc;
-}
-
/*
* Add the request to the request queue, try to start it if the
* 3270 device is idle. Return without waiting for end of i/o.
@@ -281,8 +287,8 @@ raw3270_start(struct raw3270_view *view, struct raw3270_request *rq)
if (!rp || rp->view != view ||
test_bit(RAW3270_FLAGS_FROZEN, &rp->flags))
rc = -EACCES;
- else if (!test_bit(RAW3270_FLAGS_READY, &rp->flags))
- rc = -ENODEV;
+ else if (!raw3270_state_ready(rp))
+ rc = -EBUSY;
else
rc = __raw3270_start(rp, view, rq);
spin_unlock_irqrestore(get_ccwdev_lock(view->dev->cdev), flags);
@@ -299,8 +305,8 @@ raw3270_start_locked(struct raw3270_view *view, struct raw3270_request *rq)
if (!rp || rp->view != view ||
test_bit(RAW3270_FLAGS_FROZEN, &rp->flags))
rc = -EACCES;
- else if (!test_bit(RAW3270_FLAGS_READY, &rp->flags))
- rc = -ENODEV;
+ else if (!raw3270_state_ready(rp))
+ rc = -EBUSY;
else
rc = __raw3270_start(rp, view, rq);
return rc;
@@ -378,7 +384,7 @@ raw3270_irq (struct ccw_device *cdev, unsigned long intparm, struct irb *irb)
case RAW3270_IO_STOP:
if (!rq)
break;
- raw3270_halt_io_nolock(rp, rq);
+ __raw3270_halt_io(rp, rq);
rq->rc = -EIO;
break;
default:
@@ -413,9 +419,14 @@ raw3270_irq (struct ccw_device *cdev, unsigned long intparm, struct irb *irb)
}
/*
- * Size sensing.
+ * To determine the size of the 3270 device we need to do:
+ * 1) send a 'read partition' data stream to the device
+ * 2) wait for the attn interrupt that precedes the query reply
+ * 3) do a read modified to get the query reply
+ * To make things worse we have to cope with intervention
+ * required (3270 device switched to 'stand-by') and command
+ * rejects (old devices that can't do 'read partition').
*/
-
struct raw3270_ua { /* Query Reply structure for Usable Area */
struct { /* Usable Area Query Reply Base */
short l; /* Length of this structured field */
@@ -451,117 +462,21 @@ struct raw3270_ua { /* Query Reply structure for Usable Area */
} __attribute__ ((packed)) aua;
} __attribute__ ((packed));
-static struct diag210 raw3270_init_diag210;
-static DEFINE_MUTEX(raw3270_init_mutex);
-
-static int
-raw3270_init_irq(struct raw3270_view *view, struct raw3270_request *rq,
- struct irb *irb)
-{
- /*
- * Unit-Check Processing:
- * Expect Command Reject or Intervention Required.
- */
- if (irb->scsw.cmd.dstat & DEV_STAT_UNIT_CHECK) {
- /* Request finished abnormally. */
- if (irb->ecw[0] & SNS0_INTERVENTION_REQ) {
- set_bit(RAW3270_FLAGS_BUSY, &view->dev->flags);
- return RAW3270_IO_BUSY;
- }
- }
- if (rq) {
- if (irb->scsw.cmd.dstat & DEV_STAT_UNIT_CHECK) {
- if (irb->ecw[0] & SNS0_CMD_REJECT)
- rq->rc = -EOPNOTSUPP;
- else
- rq->rc = -EIO;
- } else
- /* Request finished normally. Copy residual count. */
- rq->rescnt = irb->scsw.cmd.count;
- }
- if (irb->scsw.cmd.dstat & DEV_STAT_ATTENTION) {
- set_bit(RAW3270_FLAGS_ATTN, &view->dev->flags);
- wake_up(&raw3270_wait_queue);
- }
- return RAW3270_IO_DONE;
-}
-
-static struct raw3270_fn raw3270_init_fn = {
- .intv = raw3270_init_irq
-};
-
-static struct raw3270_view raw3270_init_view = {
- .fn = &raw3270_init_fn
-};
-
-/*
- * raw3270_wait/raw3270_wait_interruptible/__raw3270_wakeup
- * Wait for end of request. The request must have been started
- * with raw3270_start, rc = 0. The device lock may NOT have been
- * released between calling raw3270_start and raw3270_wait.
- */
static void
-raw3270_wake_init(struct raw3270_request *rq, void *data)
-{
- wake_up((wait_queue_head_t *) data);
-}
-
-/*
- * Special wait function that can cope with console initialization.
- */
-static int
-raw3270_start_init(struct raw3270 *rp, struct raw3270_view *view,
- struct raw3270_request *rq)
-{
- unsigned long flags;
- int rc;
-
-#ifdef CONFIG_TN3270_CONSOLE
- if (raw3270_registered == 0) {
- spin_lock_irqsave(get_ccwdev_lock(view->dev->cdev), flags);
- rq->callback = NULL;
- rc = __raw3270_start(rp, view, rq);
- if (rc == 0)
- while (!raw3270_request_final(rq)) {
- wait_cons_dev();
- barrier();
- }
- spin_unlock_irqrestore(get_ccwdev_lock(view->dev->cdev), flags);
- return rq->rc;
- }
-#endif
- rq->callback = raw3270_wake_init;
- rq->callback_data = &raw3270_wait_queue;
- spin_lock_irqsave(get_ccwdev_lock(view->dev->cdev), flags);
- rc = __raw3270_start(rp, view, rq);
- spin_unlock_irqrestore(get_ccwdev_lock(view->dev->cdev), flags);
- if (rc)
- return rc;
- /* Now wait for the completion. */
- rc = wait_event_interruptible(raw3270_wait_queue,
- raw3270_request_final(rq));
- if (rc == -ERESTARTSYS) { /* Interrupted by a signal. */
- raw3270_halt_io(view->dev, rq);
- /* No wait for the halt to complete. */
- wait_event(raw3270_wait_queue, raw3270_request_final(rq));
- return -ERESTARTSYS;
- }
- return rq->rc;
-}
-
-static int
-__raw3270_size_device_vm(struct raw3270 *rp)
+raw3270_size_device_vm(struct raw3270 *rp)
{
int rc, model;
struct ccw_dev_id dev_id;
+ struct diag210 diag_data;
ccw_device_get_id(rp->cdev, &dev_id);
- raw3270_init_diag210.vrdcdvno = dev_id.devno;
- raw3270_init_diag210.vrdclen = sizeof(struct diag210);
- rc = diag210(&raw3270_init_diag210);
- if (rc)
- return rc;
- model = raw3270_init_diag210.vrdccrmd;
+ diag_data.vrdcdvno = dev_id.devno;
+ diag_data.vrdclen = sizeof(struct diag210);
+ rc = diag210(&diag_data);
+ model = diag_data.vrdccrmd;
+ /* Use default model 2 if the size could not be detected */
+ if (rc || model < 2 || model > 5)
+ model = 2;
switch (model) {
case 2:
rp->model = model;
@@ -583,77 +498,25 @@ __raw3270_size_device_vm(struct raw3270 *rp)
rp->rows = 27;
rp->cols = 132;
break;
- default:
- rc = -EOPNOTSUPP;
- break;
}
- return rc;
}
-static int
-__raw3270_size_device(struct raw3270 *rp)
+static void
+raw3270_size_device(struct raw3270 *rp)
{
- static const unsigned char wbuf[] =
- { 0x00, 0x07, 0x01, 0xff, 0x03, 0x00, 0x81 };
struct raw3270_ua *uap;
- int rc;
- /*
- * To determine the size of the 3270 device we need to do:
- * 1) send a 'read partition' data stream to the device
- * 2) wait for the attn interrupt that precedes the query reply
- * 3) do a read modified to get the query reply
- * To make things worse we have to cope with intervention
- * required (3270 device switched to 'stand-by') and command
- * rejects (old devices that can't do 'read partition').
- */
- memset(&rp->init_request, 0, sizeof(rp->init_request));
- memset(&rp->init_data, 0, 256);
- /* Store 'read partition' data stream to init_data */
- memcpy(&rp->init_data, wbuf, sizeof(wbuf));
- INIT_LIST_HEAD(&rp->init_request.list);
- rp->init_request.ccw.cmd_code = TC_WRITESF;
- rp->init_request.ccw.flags = CCW_FLAG_SLI;
- rp->init_request.ccw.count = sizeof(wbuf);
- rp->init_request.ccw.cda = (__u32) __pa(&rp->init_data);
-
- rc = raw3270_start_init(rp, &raw3270_init_view, &rp->init_request);
- if (rc)
- /* Check error cases: -ERESTARTSYS, -EIO and -EOPNOTSUPP */
- return rc;
-
- /* Wait for attention interrupt. */
-#ifdef CONFIG_TN3270_CONSOLE
- if (raw3270_registered == 0) {
- unsigned long flags;
-
- spin_lock_irqsave(get_ccwdev_lock(rp->cdev), flags);
- while (!test_and_clear_bit(RAW3270_FLAGS_ATTN, &rp->flags))
- wait_cons_dev();
- spin_unlock_irqrestore(get_ccwdev_lock(rp->cdev), flags);
- } else
-#endif
- rc = wait_event_interruptible(raw3270_wait_queue,
- test_and_clear_bit(RAW3270_FLAGS_ATTN, &rp->flags));
- if (rc)
- return rc;
-
- /*
- * The device accepted the 'read partition' command. Now
- * set up a read ccw and issue it.
- */
- rp->init_request.ccw.cmd_code = TC_READMOD;
- rp->init_request.ccw.flags = CCW_FLAG_SLI;
- rp->init_request.ccw.count = sizeof(rp->init_data);
- rp->init_request.ccw.cda = (__u32) __pa(rp->init_data);
- rc = raw3270_start_init(rp, &raw3270_init_view, &rp->init_request);
- if (rc)
- return rc;
/* Got a Query Reply */
uap = (struct raw3270_ua *) (rp->init_data + 1);
/* Paranoia check. */
- if (rp->init_data[0] != 0x88 || uap->uab.qcode != 0x81)
- return -EOPNOTSUPP;
+ if (rp->init_readmod.rc || rp->init_data[0] != 0x88 ||
+ uap->uab.qcode != 0x81) {
+ /* Couldn't detect size. Use default model 2. */
+ rp->model = 2;
+ rp->rows = 24;
+ rp->cols = 80;
+ return;
+ }
/* Copy rows/columns of default Usable Area */
rp->rows = uap->uab.h;
rp->cols = uap->uab.w;
@@ -666,66 +529,131 @@ __raw3270_size_device(struct raw3270 *rp)
rp->rows = uap->aua.hauai;
rp->cols = uap->aua.wauai;
}
- return 0;
+ /* Try to find a model. */
+ rp->model = 0;
+ if (rp->rows == 24 && rp->cols == 80)
+ rp->model = 2;
+ if (rp->rows == 32 && rp->cols == 80)
+ rp->model = 3;
+ if (rp->rows == 43 && rp->cols == 80)
+ rp->model = 4;
+ if (rp->rows == 27 && rp->cols == 132)
+ rp->model = 5;
}
-static int
-raw3270_size_device(struct raw3270 *rp)
+static void
+raw3270_size_device_done(struct raw3270 *rp)
{
- int rc;
+ struct raw3270_view *view;
- mutex_lock(&raw3270_init_mutex);
- rp->view = &raw3270_init_view;
- raw3270_init_view.dev = rp;
- if (MACHINE_IS_VM)
- rc = __raw3270_size_device_vm(rp);
- else
- rc = __raw3270_size_device(rp);
- raw3270_init_view.dev = NULL;
rp->view = NULL;
- mutex_unlock(&raw3270_init_mutex);
- if (rc == 0) { /* Found something. */
- /* Try to find a model. */
- rp->model = 0;
- if (rp->rows == 24 && rp->cols == 80)
- rp->model = 2;
- if (rp->rows == 32 && rp->cols == 80)
- rp->model = 3;
- if (rp->rows == 43 && rp->cols == 80)
- rp->model = 4;
- if (rp->rows == 27 && rp->cols == 132)
- rp->model = 5;
- } else {
- /* Couldn't detect size. Use default model 2. */
- rp->model = 2;
- rp->rows = 24;
- rp->cols = 80;
- return 0;
+ rp->state = RAW3270_STATE_READY;
+ /* Notify views about new size */
+ list_for_each_entry(view, &rp->view_list, list)
+ if (view->fn->resize)
+ view->fn->resize(view, rp->model, rp->rows, rp->cols);
+ /* Setup processing done, now activate a view */
+ list_for_each_entry(view, &rp->view_list, list) {
+ rp->view = view;
+ if (view->fn->activate(view) == 0)
+ break;
+ rp->view = NULL;
}
- return rc;
+}
+
+static void
+raw3270_read_modified_cb(struct raw3270_request *rq, void *data)
+{
+ struct raw3270 *rp = rq->view->dev;
+
+ raw3270_size_device(rp);
+ raw3270_size_device_done(rp);
+}
+
+static void
+raw3270_read_modified(struct raw3270 *rp)
+{
+ if (rp->state != RAW3270_STATE_W4ATTN)
+ return;
+ /* Use 'read modified' to get the result of a read partition. */
+ memset(&rp->init_readmod, 0, sizeof(rp->init_readmod));
+ memset(&rp->init_data, 0, sizeof(rp->init_data));
+ rp->init_readmod.ccw.cmd_code = TC_READMOD;
+ rp->init_readmod.ccw.flags = CCW_FLAG_SLI;
+ rp->init_readmod.ccw.count = sizeof(rp->init_data);
+ rp->init_readmod.ccw.cda = (__u32) __pa(rp->init_data);
+ rp->init_readmod.callback = raw3270_read_modified_cb;
+ rp->state = RAW3270_STATE_READMOD;
+ raw3270_start_irq(&rp->init_view, &rp->init_readmod);
+}
+
+static void
+raw3270_writesf_readpart(struct raw3270 *rp)
+{
+ static const unsigned char wbuf[] =
+ { 0x00, 0x07, 0x01, 0xff, 0x03, 0x00, 0x81 };
+
+ /* Store 'read partition' data stream to init_data */
+ memset(&rp->init_readpart, 0, sizeof(rp->init_readpart));
+ memset(&rp->init_data, 0, sizeof(rp->init_data));
+ memcpy(&rp->init_data, wbuf, sizeof(wbuf));
+ rp->init_readpart.ccw.cmd_code = TC_WRITESF;
+ rp->init_readpart.ccw.flags = CCW_FLAG_SLI;
+ rp->init_readpart.ccw.count = sizeof(wbuf);
+ rp->init_readpart.ccw.cda = (__u32) __pa(&rp->init_data);
+ rp->state = RAW3270_STATE_W4ATTN;
+ raw3270_start_irq(&rp->init_view, &rp->init_readpart);
+}
+
+/*
+ * Device reset
+ */
+static void
+raw3270_reset_device_cb(struct raw3270_request *rq, void *data)
+{
+ struct raw3270 *rp = rq->view->dev;
+
+ if (rp->state != RAW3270_STATE_RESET)
+ return;
+ if (rq && rq->rc) {
+ /* Reset command failed. */
+ rp->state = RAW3270_STATE_INIT;
+ } else if (0 && MACHINE_IS_VM) {
+ raw3270_size_device_vm(rp);
+ raw3270_size_device_done(rp);
+ } else
+ raw3270_writesf_readpart(rp);
}
static int
-raw3270_reset_device(struct raw3270 *rp)
+__raw3270_reset_device(struct raw3270 *rp)
{
int rc;
- mutex_lock(&raw3270_init_mutex);
- memset(&rp->init_request, 0, sizeof(rp->init_request));
+ /* Store reset data stream to init_data/init_reset */
+ memset(&rp->init_reset, 0, sizeof(rp->init_reset));
memset(&rp->init_data, 0, sizeof(rp->init_data));
- /* Store reset data stream to init_data/init_request */
rp->init_data[0] = TW_KR;
- INIT_LIST_HEAD(&rp->init_request.list);
- rp->init_request.ccw.cmd_code = TC_EWRITEA;
- rp->init_request.ccw.flags = CCW_FLAG_SLI;
- rp->init_request.ccw.count = 1;
- rp->init_request.ccw.cda = (__u32) __pa(rp->init_data);
- rp->view = &raw3270_init_view;
- raw3270_init_view.dev = rp;
- rc = raw3270_start_init(rp, &raw3270_init_view, &rp->init_request);
- raw3270_init_view.dev = NULL;
- rp->view = NULL;
- mutex_unlock(&raw3270_init_mutex);
+ rp->init_reset.ccw.cmd_code = TC_EWRITEA;
+ rp->init_reset.ccw.flags = CCW_FLAG_SLI;
+ rp->init_reset.ccw.count = 1;
+ rp->init_reset.ccw.cda = (__u32) __pa(rp->init_data);
+ rp->init_reset.callback = raw3270_reset_device_cb;
+ rc = __raw3270_start(rp, &rp->init_view, &rp->init_reset);
+ if (rc == 0 && rp->state == RAW3270_STATE_INIT)
+ rp->state = RAW3270_STATE_RESET;
+ return rc;
+}
+
+static int
+raw3270_reset_device(struct raw3270 *rp)
+{
+ unsigned long flags;
+ int rc;
+
+ spin_lock_irqsave(get_ccwdev_lock(rp->cdev), flags);
+ rc = __raw3270_reset_device(rp);
+ spin_unlock_irqrestore(get_ccwdev_lock(rp->cdev), flags);
return rc;
}
@@ -739,13 +667,50 @@ raw3270_reset(struct raw3270_view *view)
if (!rp || rp->view != view ||
test_bit(RAW3270_FLAGS_FROZEN, &rp->flags))
rc = -EACCES;
- else if (!test_bit(RAW3270_FLAGS_READY, &rp->flags))
- rc = -ENODEV;
+ else if (!raw3270_state_ready(rp))
+ rc = -EBUSY;
else
rc = raw3270_reset_device(view->dev);
return rc;
}
+static int
+raw3270_init_irq(struct raw3270_view *view, struct raw3270_request *rq,
+ struct irb *irb)
+{
+ struct raw3270 *rp;
+
+ /*
+ * Unit-Check Processing:
+ * Expect Command Reject or Intervention Required.
+ */
+ if (irb->scsw.cmd.dstat & DEV_STAT_UNIT_CHECK) {
+ /* Request finished abnormally. */
+ if (irb->ecw[0] & SNS0_INTERVENTION_REQ) {
+ set_bit(RAW3270_FLAGS_BUSY, &view->dev->flags);
+ return RAW3270_IO_BUSY;
+ }
+ }
+ if (rq) {
+ if (irb->scsw.cmd.dstat & DEV_STAT_UNIT_CHECK) {
+ if (irb->ecw[0] & SNS0_CMD_REJECT)
+ rq->rc = -EOPNOTSUPP;
+ else
+ rq->rc = -EIO;
+ }
+ }
+ if (irb->scsw.cmd.dstat & DEV_STAT_ATTENTION) {
+ /* Queue read modified after attention interrupt */
+ rp = view->dev;
+ raw3270_read_modified(rp);
+ }
+ return RAW3270_IO_DONE;
+}
+
+static struct raw3270_fn raw3270_init_fn = {
+ .intv = raw3270_init_irq
+};
+
/*
* Setup new 3270 device.
*/
@@ -774,6 +739,10 @@ raw3270_setup_device(struct ccw_device *cdev, struct raw3270 *rp, char *ascebc)
INIT_LIST_HEAD(&rp->req_queue);
INIT_LIST_HEAD(&rp->view_list);
+ rp->init_view.dev = rp;
+ rp->init_view.fn = &raw3270_init_fn;
+ rp->view = &rp->init_view;
+
/*
* Add device to list and find the smallest unused minor
* number for it. Note: there is no device with minor 0,
@@ -812,6 +781,7 @@ raw3270_setup_device(struct ccw_device *cdev, struct raw3270 *rp, char *ascebc)
*/
struct raw3270 __init *raw3270_setup_console(struct ccw_device *cdev)
{
+ unsigned long flags;
struct raw3270 *rp;
char *ascebc;
int rc;
@@ -822,16 +792,15 @@ struct raw3270 __init *raw3270_setup_console(struct ccw_device *cdev)
if (rc)
return ERR_PTR(rc);
set_bit(RAW3270_FLAGS_CONSOLE, &rp->flags);
- rc = raw3270_reset_device(rp);
- if (rc)
- return ERR_PTR(rc);
- rc = raw3270_size_device(rp);
- if (rc)
- return ERR_PTR(rc);
- rc = raw3270_reset_device(rp);
- if (rc)
- return ERR_PTR(rc);
- set_bit(RAW3270_FLAGS_READY, &rp->flags);
+ spin_lock_irqsave(get_ccwdev_lock(rp->cdev), flags);
+ do {
+ __raw3270_reset_device(rp);
+ while (!raw3270_state_final(rp)) {
+ wait_cons_dev();
+ barrier();
+ }
+ } while (rp->state != RAW3270_STATE_READY);
+ spin_unlock_irqrestore(get_ccwdev_lock(rp->cdev), flags);
return rp;
}
@@ -893,13 +862,13 @@ raw3270_activate_view(struct raw3270_view *view)
spin_lock_irqsave(get_ccwdev_lock(rp->cdev), flags);
if (rp->view == view)
rc = 0;
- else if (!test_bit(RAW3270_FLAGS_READY, &rp->flags))
- rc = -ENODEV;
+ else if (!raw3270_state_ready(rp))
+ rc = -EBUSY;
else if (test_bit(RAW3270_FLAGS_FROZEN, &rp->flags))
rc = -EACCES;
else {
oldview = NULL;
- if (rp->view) {
+ if (rp->view && rp->view->fn->deactivate) {
oldview = rp->view;
oldview->fn->deactivate(oldview);
}
@@ -944,7 +913,7 @@ raw3270_deactivate_view(struct raw3270_view *view)
list_del_init(&view->list);
list_add_tail(&view->list, &rp->view_list);
/* Try to activate another view. */
- if (test_bit(RAW3270_FLAGS_READY, &rp->flags) &&
+ if (raw3270_state_ready(rp) &&
!test_bit(RAW3270_FLAGS_FROZEN, &rp->flags)) {
list_for_each_entry(view, &rp->view_list, list) {
rp->view = view;
@@ -975,18 +944,16 @@ raw3270_add_view(struct raw3270_view *view, struct raw3270_fn *fn, int minor)
if (rp->minor != minor)
continue;
spin_lock_irqsave(get_ccwdev_lock(rp->cdev), flags);
- if (test_bit(RAW3270_FLAGS_READY, &rp->flags)) {
- atomic_set(&view->ref_count, 2);
- view->dev = rp;
- view->fn = fn;
- view->model = rp->model;
- view->rows = rp->rows;
- view->cols = rp->cols;
- view->ascebc = rp->ascebc;
- spin_lock_init(&view->lock);
- list_add(&view->list, &rp->view_list);
- rc = 0;
- }
+ atomic_set(&view->ref_count, 2);
+ view->dev = rp;
+ view->fn = fn;
+ view->model = rp->model;
+ view->rows = rp->rows;
+ view->cols = rp->cols;
+ view->ascebc = rp->ascebc;
+ spin_lock_init(&view->lock);
+ list_add(&view->list, &rp->view_list);
+ rc = 0;
spin_unlock_irqrestore(get_ccwdev_lock(rp->cdev), flags);
break;
}
@@ -1010,14 +977,11 @@ raw3270_find_view(struct raw3270_fn *fn, int minor)
if (rp->minor != minor)
continue;
spin_lock_irqsave(get_ccwdev_lock(rp->cdev), flags);
- if (test_bit(RAW3270_FLAGS_READY, &rp->flags)) {
- view = ERR_PTR(-ENOENT);
- list_for_each_entry(tmp, &rp->view_list, list) {
- if (tmp->fn == fn) {
- raw3270_get_view(tmp);
- view = tmp;
- break;
- }
+ list_for_each_entry(tmp, &rp->view_list, list) {
+ if (tmp->fn == fn) {
+ raw3270_get_view(tmp);
+ view = tmp;
+ break;
}
}
spin_unlock_irqrestore(get_ccwdev_lock(rp->cdev), flags);
@@ -1044,7 +1008,7 @@ raw3270_del_view(struct raw3270_view *view)
rp->view = NULL;
}
list_del_init(&view->list);
- if (!rp->view && test_bit(RAW3270_FLAGS_READY, &rp->flags) &&
+ if (!rp->view && raw3270_state_ready(rp) &&
!test_bit(RAW3270_FLAGS_FROZEN, &rp->flags)) {
/* Try to activate another view. */
list_for_each_entry(nv, &rp->view_list, list) {
@@ -1072,10 +1036,6 @@ raw3270_delete_device(struct raw3270 *rp)
/* Remove from device chain. */
mutex_lock(&raw3270_mutex);
- if (rp->clttydev && !IS_ERR(rp->clttydev))
- device_destroy(class3270, MKDEV(IBM_TTY3270_MAJOR, rp->minor));
- if (rp->cltubdev && !IS_ERR(rp->cltubdev))
- device_destroy(class3270, MKDEV(IBM_FS3270_MAJOR, rp->minor));
list_del_init(&rp->list);
mutex_unlock(&raw3270_mutex);
@@ -1139,75 +1099,34 @@ static struct attribute_group raw3270_attr_group = {
static int raw3270_create_attributes(struct raw3270 *rp)
{
- int rc;
-
- rc = sysfs_create_group(&rp->cdev->dev.kobj, &raw3270_attr_group);
- if (rc)
- goto out;
-
- rp->clttydev = device_create(class3270, &rp->cdev->dev,
- MKDEV(IBM_TTY3270_MAJOR, rp->minor), NULL,
- "tty%s", dev_name(&rp->cdev->dev));
- if (IS_ERR(rp->clttydev)) {
- rc = PTR_ERR(rp->clttydev);
- goto out_ttydev;
- }
-
- rp->cltubdev = device_create(class3270, &rp->cdev->dev,
- MKDEV(IBM_FS3270_MAJOR, rp->minor), NULL,
- "tub%s", dev_name(&rp->cdev->dev));
- if (!IS_ERR(rp->cltubdev))
- goto out;
-
- rc = PTR_ERR(rp->cltubdev);
- device_destroy(class3270, MKDEV(IBM_TTY3270_MAJOR, rp->minor));
-
-out_ttydev:
- sysfs_remove_group(&rp->cdev->dev.kobj, &raw3270_attr_group);
-out:
- return rc;
+ return sysfs_create_group(&rp->cdev->dev.kobj, &raw3270_attr_group);
}
/*
* Notifier for device addition/removal
*/
-struct raw3270_notifier {
- struct list_head list;
- void (*notifier)(int, int);
-};
-
static LIST_HEAD(raw3270_notifier);
-int raw3270_register_notifier(void (*notifier)(int, int))
+int raw3270_register_notifier(struct raw3270_notifier *notifier)
{
- struct raw3270_notifier *np;
struct raw3270 *rp;
- np = kmalloc(sizeof(struct raw3270_notifier), GFP_KERNEL);
- if (!np)
- return -ENOMEM;
- np->notifier = notifier;
mutex_lock(&raw3270_mutex);
- list_add_tail(&np->list, &raw3270_notifier);
- list_for_each_entry(rp, &raw3270_devices, list) {
- get_device(&rp->cdev->dev);
- notifier(rp->minor, 1);
- }
+ list_add_tail(&notifier->list, &raw3270_notifier);
+ list_for_each_entry(rp, &raw3270_devices, list)
+ notifier->create(rp->minor);
mutex_unlock(&raw3270_mutex);
return 0;
}
-void raw3270_unregister_notifier(void (*notifier)(int, int))
+void raw3270_unregister_notifier(struct raw3270_notifier *notifier)
{
- struct raw3270_notifier *np;
+ struct raw3270 *rp;
mutex_lock(&raw3270_mutex);
- list_for_each_entry(np, &raw3270_notifier, list)
- if (np->notifier == notifier) {
- list_del(&np->list);
- kfree(np);
- break;
- }
+ list_for_each_entry(rp, &raw3270_devices, list)
+ notifier->destroy(rp->minor);
+ list_del(&notifier->list);
mutex_unlock(&raw3270_mutex);
}
@@ -1217,29 +1136,20 @@ void raw3270_unregister_notifier(void (*notifier)(int, int))
static int
raw3270_set_online (struct ccw_device *cdev)
{
- struct raw3270 *rp;
struct raw3270_notifier *np;
+ struct raw3270 *rp;
int rc;
rp = raw3270_create_device(cdev);
if (IS_ERR(rp))
return PTR_ERR(rp);
- rc = raw3270_reset_device(rp);
- if (rc)
- goto failure;
- rc = raw3270_size_device(rp);
- if (rc)
- goto failure;
- rc = raw3270_reset_device(rp);
- if (rc)
- goto failure;
rc = raw3270_create_attributes(rp);
if (rc)
goto failure;
- set_bit(RAW3270_FLAGS_READY, &rp->flags);
+ raw3270_reset_device(rp);
mutex_lock(&raw3270_mutex);
list_for_each_entry(np, &raw3270_notifier, list)
- np->notifier(rp->minor, 1);
+ np->create(rp->minor);
mutex_unlock(&raw3270_mutex);
return 0;
@@ -1268,14 +1178,14 @@ raw3270_remove (struct ccw_device *cdev)
*/
if (rp == NULL)
return;
- clear_bit(RAW3270_FLAGS_READY, &rp->flags);
sysfs_remove_group(&cdev->dev.kobj, &raw3270_attr_group);
/* Deactivate current view and remove all views. */
spin_lock_irqsave(get_ccwdev_lock(cdev), flags);
if (rp->view) {
- rp->view->fn->deactivate(rp->view);
+ if (rp->view->fn->deactivate)
+ rp->view->fn->deactivate(rp->view);
rp->view = NULL;
}
while (!list_empty(&rp->view_list)) {
@@ -1290,7 +1200,7 @@ raw3270_remove (struct ccw_device *cdev)
mutex_lock(&raw3270_mutex);
list_for_each_entry(np, &raw3270_notifier, list)
- np->notifier(rp->minor, 0);
+ np->destroy(rp->minor);
mutex_unlock(&raw3270_mutex);
/* Reset 3270 device. */
@@ -1324,7 +1234,7 @@ static int raw3270_pm_stop(struct ccw_device *cdev)
if (!rp)
return 0;
spin_lock_irqsave(get_ccwdev_lock(rp->cdev), flags);
- if (rp->view)
+ if (rp->view && rp->view->fn->deactivate)
rp->view->fn->deactivate(rp->view);
if (!test_bit(RAW3270_FLAGS_CONSOLE, &rp->flags)) {
/*
@@ -1351,7 +1261,7 @@ static int raw3270_pm_start(struct ccw_device *cdev)
return 0;
spin_lock_irqsave(get_ccwdev_lock(rp->cdev), flags);
clear_bit(RAW3270_FLAGS_FROZEN, &rp->flags);
- if (rp->view)
+ if (rp->view && rp->view->fn->activate)
rp->view->fn->activate(rp->view);
spin_unlock_irqrestore(get_ccwdev_lock(rp->cdev), flags);
return 0;
@@ -1434,6 +1344,7 @@ MODULE_LICENSE("GPL");
module_init(raw3270_init);
module_exit(raw3270_exit);
+EXPORT_SYMBOL(class3270);
EXPORT_SYMBOL(raw3270_request_alloc);
EXPORT_SYMBOL(raw3270_request_free);
EXPORT_SYMBOL(raw3270_request_reset);
diff --git a/drivers/s390/char/raw3270.h b/drivers/s390/char/raw3270.h
index ed34eb2199cc..7b73ff8c1bd7 100644
--- a/drivers/s390/char/raw3270.h
+++ b/drivers/s390/char/raw3270.h
@@ -91,6 +91,7 @@ struct raw3270_iocb {
struct raw3270;
struct raw3270_view;
+extern struct class *class3270;
/* 3270 CCW request */
struct raw3270_request {
@@ -140,6 +141,7 @@ struct raw3270_fn {
struct raw3270_request *, struct irb *);
void (*release)(struct raw3270_view *);
void (*free)(struct raw3270_view *);
+ void (*resize)(struct raw3270_view *, int, int, int);
};
/*
@@ -192,8 +194,14 @@ struct raw3270 *raw3270_setup_console(struct ccw_device *cdev);
void raw3270_wait_cons_dev(struct raw3270 *);
/* Notifier for device addition/removal */
-int raw3270_register_notifier(void (*notifier)(int, int));
-void raw3270_unregister_notifier(void (*notifier)(int, int));
+struct raw3270_notifier {
+ struct list_head list;
+ void (*create)(int minor);
+ void (*destroy)(int minor);
+};
+
+int raw3270_register_notifier(struct raw3270_notifier *);
+void raw3270_unregister_notifier(struct raw3270_notifier *);
void raw3270_pm_unfreeze(struct raw3270_view *);
/*
diff --git a/drivers/s390/char/sclp.c b/drivers/s390/char/sclp.c
index 12c16a65dd25..bd6871bf545a 100644
--- a/drivers/s390/char/sclp.c
+++ b/drivers/s390/char/sclp.c
@@ -450,7 +450,7 @@ sclp_sync_wait(void)
timeout = 0;
if (timer_pending(&sclp_request_timer)) {
/* Get timeout TOD value */
- timeout = get_clock() +
+ timeout = get_tod_clock() +
sclp_tod_from_jiffies(sclp_request_timer.expires -
jiffies);
}
@@ -472,7 +472,7 @@ sclp_sync_wait(void)
while (sclp_running_state != sclp_running_state_idle) {
/* Check for expired request timer */
if (timer_pending(&sclp_request_timer) &&
- get_clock() > timeout &&
+ get_tod_clock() > timeout &&
del_timer(&sclp_request_timer))
sclp_request_timer.function(sclp_request_timer.data);
cpu_relax();
diff --git a/drivers/s390/char/sclp_cmd.c b/drivers/s390/char/sclp_cmd.c
index c44d13f607bc..30a2255389e5 100644
--- a/drivers/s390/char/sclp_cmd.c
+++ b/drivers/s390/char/sclp_cmd.c
@@ -56,7 +56,6 @@ static int __initdata early_read_info_sccb_valid;
u64 sclp_facilities;
static u8 sclp_fac84;
-static u8 sclp_fac85;
static unsigned long long rzm;
static unsigned long long rnmax;
@@ -131,7 +130,8 @@ void __init sclp_facilities_detect(void)
sccb = &early_read_info_sccb;
sclp_facilities = sccb->facilities;
sclp_fac84 = sccb->fac84;
- sclp_fac85 = sccb->fac85;
+ if (sccb->fac85 & 0x02)
+ S390_lowcore.machine_flags |= MACHINE_FLAG_ESOP;
rnmax = sccb->rnmax ? sccb->rnmax : sccb->rnmax2;
rzm = sccb->rnsize ? sccb->rnsize : sccb->rnsize2;
rzm <<= 20;
@@ -171,12 +171,6 @@ unsigned long long sclp_get_rzm(void)
return rzm;
}
-u8 sclp_get_fac85(void)
-{
- return sclp_fac85;
-}
-EXPORT_SYMBOL_GPL(sclp_get_fac85);
-
/*
* This function will be called after sclp_facilities_detect(), which gets
* called from early.c code. Therefore the sccb should have valid contents.
diff --git a/drivers/s390/char/sclp_tty.c b/drivers/s390/char/sclp_tty.c
index 877fbc37c1e7..14b4cb8abcc8 100644
--- a/drivers/s390/char/sclp_tty.c
+++ b/drivers/s390/char/sclp_tty.c
@@ -65,7 +65,7 @@ sclp_tty_open(struct tty_struct *tty, struct file *filp)
{
tty_port_tty_set(&sclp_port, tty);
tty->driver_data = NULL;
- tty->low_latency = 0;
+ sclp_port.low_latency = 0;
return 0;
}
@@ -342,8 +342,8 @@ sclp_tty_input(unsigned char* buf, unsigned int count)
case CTRLCHAR_SYSRQ:
break;
case CTRLCHAR_CTRL:
- tty_insert_flip_char(tty, cchar, TTY_NORMAL);
- tty_flip_buffer_push(tty);
+ tty_insert_flip_char(&sclp_port, cchar, TTY_NORMAL);
+ tty_flip_buffer_push(&sclp_port);
break;
case CTRLCHAR_NONE:
/* send (normal) input to line discipline */
@@ -351,11 +351,11 @@ sclp_tty_input(unsigned char* buf, unsigned int count)
(strncmp((const char *) buf + count - 2, "^n", 2) &&
strncmp((const char *) buf + count - 2, "\252n", 2))) {
/* add the auto \n */
- tty_insert_flip_string(tty, buf, count);
- tty_insert_flip_char(tty, '\n', TTY_NORMAL);
+ tty_insert_flip_string(&sclp_port, buf, count);
+ tty_insert_flip_char(&sclp_port, '\n', TTY_NORMAL);
} else
- tty_insert_flip_string(tty, buf, count - 2);
- tty_flip_buffer_push(tty);
+ tty_insert_flip_string(&sclp_port, buf, count - 2);
+ tty_flip_buffer_push(&sclp_port);
break;
}
tty_kref_put(tty);
diff --git a/drivers/s390/char/sclp_vt220.c b/drivers/s390/char/sclp_vt220.c
index effcc8756e0a..6c92f62623be 100644
--- a/drivers/s390/char/sclp_vt220.c
+++ b/drivers/s390/char/sclp_vt220.c
@@ -461,14 +461,9 @@ sclp_vt220_write(struct tty_struct *tty, const unsigned char *buf, int count)
static void
sclp_vt220_receiver_fn(struct evbuf_header *evbuf)
{
- struct tty_struct *tty = tty_port_tty_get(&sclp_vt220_port);
char *buffer;
unsigned int count;
- /* Ignore input if device is not open */
- if (tty == NULL)
- return;
-
buffer = (char *) ((addr_t) evbuf + sizeof(struct evbuf_header));
count = evbuf->length - sizeof(struct evbuf_header);
@@ -480,11 +475,10 @@ sclp_vt220_receiver_fn(struct evbuf_header *evbuf)
/* Send input to line discipline */
buffer++;
count--;
- tty_insert_flip_string(tty, buffer, count);
- tty_flip_buffer_push(tty);
+ tty_insert_flip_string(&sclp_vt220_port, buffer, count);
+ tty_flip_buffer_push(&sclp_vt220_port);
break;
}
- tty_kref_put(tty);
}
/*
@@ -495,7 +489,7 @@ sclp_vt220_open(struct tty_struct *tty, struct file *filp)
{
if (tty->count == 1) {
tty_port_tty_set(&sclp_vt220_port, tty);
- tty->low_latency = 0;
+ sclp_vt220_port.low_latency = 0;
if (!tty->winsize.ws_row && !tty->winsize.ws_col) {
tty->winsize.ws_row = 24;
tty->winsize.ws_col = 80;
diff --git a/drivers/s390/char/tape_char.c b/drivers/s390/char/tape_char.c
index 2d61db3fc62a..6dc60725de92 100644
--- a/drivers/s390/char/tape_char.c
+++ b/drivers/s390/char/tape_char.c
@@ -273,13 +273,13 @@ tapechar_open (struct inode *inode, struct file *filp)
int minor, rc;
DBF_EVENT(6, "TCHAR:open: %i:%i\n",
- imajor(filp->f_path.dentry->d_inode),
- iminor(filp->f_path.dentry->d_inode));
+ imajor(file_inode(filp)),
+ iminor(file_inode(filp)));
- if (imajor(filp->f_path.dentry->d_inode) != tapechar_major)
+ if (imajor(file_inode(filp)) != tapechar_major)
return -ENODEV;
- minor = iminor(filp->f_path.dentry->d_inode);
+ minor = iminor(file_inode(filp));
device = tape_find_device(minor / TAPE_MINORS_PER_DEV);
if (IS_ERR(device)) {
DBF_EVENT(3, "TCHAR:open: tape_find_device() failed\n");
diff --git a/drivers/s390/char/tty3270.c b/drivers/s390/char/tty3270.c
index 43ea0593bdb0..b907dba24025 100644
--- a/drivers/s390/char/tty3270.c
+++ b/drivers/s390/char/tty3270.c
@@ -15,6 +15,7 @@
#include <linux/init.h>
#include <linux/console.h>
#include <linux/interrupt.h>
+#include <linux/workqueue.h>
#include <linux/slab.h>
#include <linux/bootmem.h>
@@ -80,6 +81,8 @@ struct tty3270 {
unsigned int highlight; /* Blink/reverse/underscore */
unsigned int f_color; /* Foreground color */
struct tty3270_line *screen;
+ unsigned int n_model, n_cols, n_rows; /* New model & size */
+ struct work_struct resize_work;
/* Input stuff. */
struct string *prompt; /* Output string for input area. */
@@ -115,6 +118,7 @@ struct tty3270 {
#define TTY_UPDATE_ALL 16 /* Recreate screen. */
static void tty3270_update(struct tty3270 *);
+static void tty3270_resize_work(struct work_struct *work);
/*
* Setup timeout for a device. On timeout trigger an update.
@@ -683,12 +687,6 @@ tty3270_alloc_view(void)
INIT_LIST_HEAD(&tp->update);
INIT_LIST_HEAD(&tp->rcl_lines);
tp->rcl_max = 20;
- tty_port_init(&tp->port);
- setup_timer(&tp->timer, (void (*)(unsigned long)) tty3270_update,
- (unsigned long) tp);
- tasklet_init(&tp->readlet,
- (void (*)(unsigned long)) tty3270_read_tasklet,
- (unsigned long) tp->read);
for (pages = 0; pages < TTY3270_STRING_PAGES; pages++) {
tp->freemem_pages[pages] = (void *)
@@ -710,6 +708,15 @@ tty3270_alloc_view(void)
tp->kbd = kbd_alloc();
if (!tp->kbd)
goto out_reset;
+
+ tty_port_init(&tp->port);
+ setup_timer(&tp->timer, (void (*)(unsigned long)) tty3270_update,
+ (unsigned long) tp);
+ tasklet_init(&tp->readlet,
+ (void (*)(unsigned long)) tty3270_read_tasklet,
+ (unsigned long) tp->read);
+ INIT_WORK(&tp->resize_work, tty3270_resize_work);
+
return tp;
out_reset:
@@ -752,42 +759,96 @@ tty3270_free_view(struct tty3270 *tp)
/*
* Allocate tty3270 screen.
*/
-static int
-tty3270_alloc_screen(struct tty3270 *tp)
+static struct tty3270_line *
+tty3270_alloc_screen(unsigned int rows, unsigned int cols)
{
+ struct tty3270_line *screen;
unsigned long size;
int lines;
- size = sizeof(struct tty3270_line) * (tp->view.rows - 2);
- tp->screen = kzalloc(size, GFP_KERNEL);
- if (!tp->screen)
+ size = sizeof(struct tty3270_line) * (rows - 2);
+ screen = kzalloc(size, GFP_KERNEL);
+ if (!screen)
goto out_err;
- for (lines = 0; lines < tp->view.rows - 2; lines++) {
- size = sizeof(struct tty3270_cell) * tp->view.cols;
- tp->screen[lines].cells = kzalloc(size, GFP_KERNEL);
- if (!tp->screen[lines].cells)
+ for (lines = 0; lines < rows - 2; lines++) {
+ size = sizeof(struct tty3270_cell) * cols;
+ screen[lines].cells = kzalloc(size, GFP_KERNEL);
+ if (!screen[lines].cells)
goto out_screen;
}
- return 0;
+ return screen;
out_screen:
while (lines--)
- kfree(tp->screen[lines].cells);
- kfree(tp->screen);
+ kfree(screen[lines].cells);
+ kfree(screen);
out_err:
- return -ENOMEM;
+ return ERR_PTR(-ENOMEM);
}
/*
* Free tty3270 screen.
*/
static void
-tty3270_free_screen(struct tty3270 *tp)
+tty3270_free_screen(struct tty3270_line *screen, unsigned int rows)
{
int lines;
- for (lines = 0; lines < tp->view.rows - 2; lines++)
- kfree(tp->screen[lines].cells);
- kfree(tp->screen);
+ for (lines = 0; lines < rows - 2; lines++)
+ kfree(screen[lines].cells);
+ kfree(screen);
+}
+
+/*
+ * Resize tty3270 screen
+ */
+static void tty3270_resize_work(struct work_struct *work)
+{
+ struct tty3270 *tp = container_of(work, struct tty3270, resize_work);
+ struct tty3270_line *screen, *oscreen;
+ struct tty_struct *tty;
+ unsigned int orows;
+ struct winsize ws;
+
+ screen = tty3270_alloc_screen(tp->n_rows, tp->n_cols);
+ if (!screen)
+ return;
+ /* Switch to new output size */
+ spin_lock_bh(&tp->view.lock);
+ oscreen = tp->screen;
+ orows = tp->view.rows;
+ tp->view.model = tp->n_model;
+ tp->view.rows = tp->n_rows;
+ tp->view.cols = tp->n_cols;
+ tp->screen = screen;
+ free_string(&tp->freemem, tp->prompt);
+ free_string(&tp->freemem, tp->status);
+ tty3270_create_prompt(tp);
+ tty3270_create_status(tp);
+ tp->nr_up = 0;
+ while (tp->nr_lines < tp->view.rows - 2)
+ tty3270_blank_line(tp);
+ tp->update_flags = TTY_UPDATE_ALL;
+ spin_unlock_bh(&tp->view.lock);
+ tty3270_free_screen(oscreen, orows);
+ tty3270_set_timer(tp, 1);
+ /* Informat tty layer about new size */
+ tty = tty_port_tty_get(&tp->port);
+ if (!tty)
+ return;
+ ws.ws_row = tp->view.rows - 2;
+ ws.ws_col = tp->view.cols;
+ tty_do_resize(tty, &ws);
+}
+
+static void
+tty3270_resize(struct raw3270_view *view, int model, int rows, int cols)
+{
+ struct tty3270 *tp = container_of(view, struct tty3270, view);
+
+ tp->n_model = model;
+ tp->n_rows = rows;
+ tp->n_cols = cols;
+ schedule_work(&tp->resize_work);
}
/*
@@ -815,7 +876,8 @@ static void
tty3270_free(struct raw3270_view *view)
{
struct tty3270 *tp = container_of(view, struct tty3270, view);
- tty3270_free_screen(tp);
+
+ tty3270_free_screen(tp->screen, tp->view.rows);
tty3270_free_view(tp);
}
@@ -827,9 +889,8 @@ tty3270_del_views(void)
{
int i;
- for (i = 0; i < tty3270_max_index; i++) {
- struct raw3270_view *view =
- raw3270_find_view(&tty3270_fn, i + RAW3270_FIRSTMINOR);
+ for (i = RAW3270_FIRSTMINOR; i <= tty3270_max_index; i++) {
+ struct raw3270_view *view = raw3270_find_view(&tty3270_fn, i);
if (!IS_ERR(view))
raw3270_del_view(view);
}
@@ -840,7 +901,8 @@ static struct raw3270_fn tty3270_fn = {
.deactivate = tty3270_deactivate,
.intv = (void *) tty3270_irq,
.release = tty3270_release,
- .free = tty3270_free
+ .free = tty3270_free,
+ .resize = tty3270_resize
};
/*
@@ -853,47 +915,43 @@ static int tty3270_install(struct tty_driver *driver, struct tty_struct *tty)
int i, rc;
/* Check if the tty3270 is already there. */
- view = raw3270_find_view(&tty3270_fn,
- tty->index + RAW3270_FIRSTMINOR);
+ view = raw3270_find_view(&tty3270_fn, tty->index);
if (!IS_ERR(view)) {
tp = container_of(view, struct tty3270, view);
tty->driver_data = tp;
tty->winsize.ws_row = tp->view.rows - 2;
tty->winsize.ws_col = tp->view.cols;
- tty->low_latency = 0;
+ tp->port.low_latency = 0;
/* why to reassign? */
tty_port_tty_set(&tp->port, tty);
tp->inattr = TF_INPUT;
return tty_port_install(&tp->port, driver, tty);
}
- if (tty3270_max_index < tty->index + 1)
- tty3270_max_index = tty->index + 1;
-
- /* Quick exit if there is no device for tty->index. */
- if (PTR_ERR(view) == -ENODEV)
- return -ENODEV;
+ if (tty3270_max_index < tty->index)
+ tty3270_max_index = tty->index;
/* Allocate tty3270 structure on first open. */
tp = tty3270_alloc_view();
if (IS_ERR(tp))
return PTR_ERR(tp);
- rc = raw3270_add_view(&tp->view, &tty3270_fn,
- tty->index + RAW3270_FIRSTMINOR);
+ rc = raw3270_add_view(&tp->view, &tty3270_fn, tty->index);
if (rc) {
tty3270_free_view(tp);
return rc;
}
- rc = tty3270_alloc_screen(tp);
- if (rc) {
+ tp->screen = tty3270_alloc_screen(tp->view.cols, tp->view.rows);
+ if (IS_ERR(tp->screen)) {
+ rc = PTR_ERR(tp->screen);
raw3270_put_view(&tp->view);
raw3270_del_view(&tp->view);
+ tty3270_free_view(tp);
return rc;
}
tty_port_tty_set(&tp->port, tty);
- tty->low_latency = 0;
+ tp->port.low_latency = 0;
tty->winsize.ws_row = tp->view.rows - 2;
tty->winsize.ws_col = tp->view.cols;
@@ -926,6 +984,20 @@ static int tty3270_install(struct tty_driver *driver, struct tty_struct *tty)
}
/*
+ * This routine is called whenever a 3270 tty is opened.
+ */
+static int
+tty3270_open(struct tty_struct *tty, struct file *filp)
+{
+ struct tty3270 *tp = tty->driver_data;
+ struct tty_port *port = &tp->port;
+
+ port->count++;
+ tty_port_tty_set(port, tty);
+ return 0;
+}
+
+/*
* This routine is called when the 3270 tty is closed. We wait
* for the remaining request to be completed. Then we clean up.
*/
@@ -1753,6 +1825,7 @@ static long tty3270_compat_ioctl(struct tty_struct *tty,
static const struct tty_operations tty3270_ops = {
.install = tty3270_install,
.cleanup = tty3270_cleanup,
+ .open = tty3270_open,
.close = tty3270_close,
.write = tty3270_write,
.put_char = tty3270_put_char,
@@ -1771,6 +1844,22 @@ static const struct tty_operations tty3270_ops = {
.set_termios = tty3270_set_termios
};
+void tty3270_create_cb(int minor)
+{
+ tty_register_device(tty3270_driver, minor, NULL);
+}
+
+void tty3270_destroy_cb(int minor)
+{
+ tty_unregister_device(tty3270_driver, minor);
+}
+
+struct raw3270_notifier tty3270_notifier =
+{
+ .create = tty3270_create_cb,
+ .destroy = tty3270_destroy_cb,
+};
+
/*
* 3270 tty registration code called from tty_init().
* Most kernel services (incl. kmalloc) are available at this poimt.
@@ -1780,23 +1869,25 @@ static int __init tty3270_init(void)
struct tty_driver *driver;
int ret;
- driver = alloc_tty_driver(RAW3270_MAXDEVS);
- if (!driver)
- return -ENOMEM;
+ driver = tty_alloc_driver(RAW3270_MAXDEVS,
+ TTY_DRIVER_REAL_RAW |
+ TTY_DRIVER_DYNAMIC_DEV |
+ TTY_DRIVER_RESET_TERMIOS);
+ if (IS_ERR(driver))
+ return PTR_ERR(driver);
/*
* Initialize the tty_driver structure
* Entries in tty3270_driver that are NOT initialized:
* proc_entry, set_termios, flush_buffer, set_ldisc, write_proc
*/
- driver->driver_name = "ttyTUB";
- driver->name = "ttyTUB";
+ driver->driver_name = "tty3270";
+ driver->name = "3270/tty";
driver->major = IBM_TTY3270_MAJOR;
- driver->minor_start = RAW3270_FIRSTMINOR;
+ driver->minor_start = 0;
driver->type = TTY_DRIVER_TYPE_SYSTEM;
driver->subtype = SYSTEM_TYPE_TTY;
driver->init_termios = tty_std_termios;
- driver->flags = TTY_DRIVER_RESET_TERMIOS;
tty_set_operations(driver, &tty3270_ops);
ret = tty_register_driver(driver);
if (ret) {
@@ -1804,6 +1895,7 @@ static int __init tty3270_init(void)
return ret;
}
tty3270_driver = driver;
+ raw3270_register_notifier(&tty3270_notifier);
return 0;
}
@@ -1812,6 +1904,7 @@ tty3270_exit(void)
{
struct tty_driver *driver;
+ raw3270_unregister_notifier(&tty3270_notifier);
driver = tty3270_driver;
tty3270_driver = NULL;
tty_unregister_driver(driver);
diff --git a/drivers/s390/char/vmur.c b/drivers/s390/char/vmur.c
index 483f72ba030d..c180e3135b3b 100644
--- a/drivers/s390/char/vmur.c
+++ b/drivers/s390/char/vmur.c
@@ -703,7 +703,7 @@ static int ur_open(struct inode *inode, struct file *file)
* We treat the minor number as the devno of the ur device
* to find in the driver tree.
*/
- devno = MINOR(file->f_dentry->d_inode->i_rdev);
+ devno = MINOR(file_inode(file)->i_rdev);
urd = urdev_get_from_devno(devno);
if (!urd) {
diff --git a/drivers/s390/char/zcore.c b/drivers/s390/char/zcore.c
index e3b9308b0fe3..1d61a01576d2 100644
--- a/drivers/s390/char/zcore.c
+++ b/drivers/s390/char/zcore.c
@@ -62,6 +62,7 @@ static struct dentry *zcore_dir;
static struct dentry *zcore_file;
static struct dentry *zcore_memmap_file;
static struct dentry *zcore_reipl_file;
+static struct dentry *zcore_hsa_file;
static struct ipl_parameter_block *ipl_block;
/*
@@ -77,6 +78,8 @@ static int memcpy_hsa(void *dest, unsigned long src, size_t count, int mode)
int offs, blk_num;
static char buf[PAGE_SIZE] __attribute__((__aligned__(PAGE_SIZE)));
+ if (!hsa_available)
+ return -ENODATA;
if (count == 0)
return 0;
@@ -278,6 +281,15 @@ next:
}
/*
+ * Release the HSA
+ */
+static void release_hsa(void)
+{
+ diag308(DIAG308_REL_HSA, NULL);
+ hsa_available = 0;
+}
+
+/*
* Read routine for zcore character device
* First 4K are dump header
* Next 32MB are HSA Memory
@@ -363,8 +375,8 @@ static int zcore_open(struct inode *inode, struct file *filp)
static int zcore_release(struct inode *inode, struct file *filep)
{
- diag308(DIAG308_REL_HSA, NULL);
- hsa_available = 0;
+ if (hsa_available)
+ release_hsa();
return 0;
}
@@ -474,6 +486,41 @@ static const struct file_operations zcore_reipl_fops = {
.llseek = no_llseek,
};
+static ssize_t zcore_hsa_read(struct file *filp, char __user *buf,
+ size_t count, loff_t *ppos)
+{
+ static char str[18];
+
+ if (hsa_available)
+ snprintf(str, sizeof(str), "%lx\n", ZFCPDUMP_HSA_SIZE);
+ else
+ snprintf(str, sizeof(str), "0\n");
+ return simple_read_from_buffer(buf, count, ppos, str, strlen(str));
+}
+
+static ssize_t zcore_hsa_write(struct file *filp, const char __user *buf,
+ size_t count, loff_t *ppos)
+{
+ char value;
+
+ if (*ppos != 0)
+ return -EPIPE;
+ if (copy_from_user(&value, buf, 1))
+ return -EFAULT;
+ if (value != '0')
+ return -EINVAL;
+ release_hsa();
+ return count;
+}
+
+static const struct file_operations zcore_hsa_fops = {
+ .owner = THIS_MODULE,
+ .write = zcore_hsa_write,
+ .read = zcore_hsa_read,
+ .open = nonseekable_open,
+ .llseek = no_llseek,
+};
+
#ifdef CONFIG_32BIT
static void __init set_lc_mask(struct save_area *map)
@@ -590,7 +637,7 @@ static int __init zcore_header_init(int arch, struct zcore_header *hdr)
hdr->rmem_size = memory;
hdr->mem_end = sys_info.mem_size;
hdr->num_pages = memory / PAGE_SIZE;
- hdr->tod = get_clock();
+ hdr->tod = get_tod_clock();
get_cpu_id(&hdr->cpu_id);
for (i = 0; zfcpdump_save_areas[i]; i++) {
prefix = zfcpdump_save_areas[i]->pref_reg;
@@ -658,6 +705,7 @@ static int __init zcore_init(void)
rc = check_sdias();
if (rc)
goto fail;
+ hsa_available = 1;
rc = memcpy_hsa_kernel(&arch, __LC_AR_MODE_ID, 1);
if (rc)
@@ -714,9 +762,16 @@ static int __init zcore_init(void)
rc = -ENOMEM;
goto fail_memmap_file;
}
- hsa_available = 1;
+ zcore_hsa_file = debugfs_create_file("hsa", S_IRUSR|S_IWUSR, zcore_dir,
+ NULL, &zcore_hsa_fops);
+ if (!zcore_hsa_file) {
+ rc = -ENOMEM;
+ goto fail_reipl_file;
+ }
return 0;
+fail_reipl_file:
+ debugfs_remove(zcore_reipl_file);
fail_memmap_file:
debugfs_remove(zcore_memmap_file);
fail_file:
@@ -733,6 +788,7 @@ static void __exit zcore_exit(void)
debug_unregister(zcore_dbf);
sclp_sdias_exit();
free_page((unsigned long) ipl_block);
+ debugfs_remove(zcore_hsa_file);
debugfs_remove(zcore_reipl_file);
debugfs_remove(zcore_memmap_file);
debugfs_remove(zcore_file);
diff --git a/drivers/s390/cio/chsc.c b/drivers/s390/cio/chsc.c
index 10729bbceced..31ceef1beb8b 100644
--- a/drivers/s390/cio/chsc.c
+++ b/drivers/s390/cio/chsc.c
@@ -435,7 +435,6 @@ static void chsc_process_sei_scm_change(struct chsc_sei_nt0_area *sei_area)
static void chsc_process_sei_nt2(struct chsc_sei_nt2_area *sei_area)
{
-#ifdef CONFIG_PCI
switch (sei_area->cc) {
case 1:
zpci_event_error(sei_area->ccdf);
@@ -444,11 +443,10 @@ static void chsc_process_sei_nt2(struct chsc_sei_nt2_area *sei_area)
zpci_event_availability(sei_area->ccdf);
break;
default:
- CIO_CRW_EVENT(2, "chsc: unhandled sei content code %d\n",
+ CIO_CRW_EVENT(2, "chsc: sei nt2 unhandled cc=%d\n",
sei_area->cc);
break;
}
-#endif
}
static void chsc_process_sei_nt0(struct chsc_sei_nt0_area *sei_area)
@@ -471,13 +469,19 @@ static void chsc_process_sei_nt0(struct chsc_sei_nt0_area *sei_area)
chsc_process_sei_scm_change(sei_area);
break;
default: /* other stuff */
- CIO_CRW_EVENT(4, "chsc: unhandled sei content code %d\n",
+ CIO_CRW_EVENT(2, "chsc: sei nt0 unhandled cc=%d\n",
sei_area->cc);
break;
}
+
+ /* Check if we might have lost some information. */
+ if (sei_area->flags & 0x40) {
+ CIO_CRW_EVENT(2, "chsc: event overflow\n");
+ css_schedule_eval_all();
+ }
}
-static int __chsc_process_crw(struct chsc_sei *sei, u64 ntsm)
+static void chsc_process_event_information(struct chsc_sei *sei, u64 ntsm)
{
do {
memset(sei, 0, sizeof(*sei));
@@ -488,40 +492,37 @@ static int __chsc_process_crw(struct chsc_sei *sei, u64 ntsm)
if (chsc(sei))
break;
- if (sei->response.code == 0x0001) {
- CIO_CRW_EVENT(2, "chsc: sei successful\n");
-
- /* Check if we might have lost some information. */
- if (sei->u.nt0_area.flags & 0x40) {
- CIO_CRW_EVENT(2, "chsc: event overflow\n");
- css_schedule_eval_all();
- }
-
- switch (sei->nt) {
- case 0:
- chsc_process_sei_nt0(&sei->u.nt0_area);
- break;
- case 2:
- chsc_process_sei_nt2(&sei->u.nt2_area);
- break;
- default:
- CIO_CRW_EVENT(2, "chsc: unhandled nt=%d\n",
- sei->nt);
- break;
- }
- } else {
+ if (sei->response.code != 0x0001) {
CIO_CRW_EVENT(2, "chsc: sei failed (rc=%04x)\n",
sei->response.code);
break;
}
- } while (sei->u.nt0_area.flags & 0x80);
- return 0;
+ CIO_CRW_EVENT(2, "chsc: sei successful (nt=%d)\n", sei->nt);
+ switch (sei->nt) {
+ case 0:
+ chsc_process_sei_nt0(&sei->u.nt0_area);
+ break;
+ case 2:
+ chsc_process_sei_nt2(&sei->u.nt2_area);
+ break;
+ default:
+ CIO_CRW_EVENT(2, "chsc: unhandled nt: %d\n", sei->nt);
+ break;
+ }
+ } while (sei->u.nt0_area.flags & 0x80);
}
+/*
+ * Handle channel subsystem related CRWs.
+ * Use store event information to find out what's going on.
+ *
+ * Note: Access to sei_page is serialized through machine check handler
+ * thread, so no need for locking.
+ */
static void chsc_process_crw(struct crw *crw0, struct crw *crw1, int overflow)
{
- struct chsc_sei *sei;
+ struct chsc_sei *sei = sei_page;
if (overflow) {
css_schedule_eval_all();
@@ -531,14 +532,9 @@ static void chsc_process_crw(struct crw *crw0, struct crw *crw1, int overflow)
"chn=%d, rsc=%X, anc=%d, erc=%X, rsid=%X\n",
crw0->slct, crw0->oflw, crw0->chn, crw0->rsc, crw0->anc,
crw0->erc, crw0->rsid);
- if (!sei_page)
- return;
- /* Access to sei_page is serialized through machine check handler
- * thread, so no need for locking. */
- sei = sei_page;
CIO_TRACE_EVENT(2, "prcss");
- __chsc_process_crw(sei, CHSC_SEI_NT0 | CHSC_SEI_NT2);
+ chsc_process_event_information(sei, CHSC_SEI_NT0 | CHSC_SEI_NT2);
}
void chsc_chp_online(struct chp_id chpid)
diff --git a/drivers/s390/cio/chsc.h b/drivers/s390/cio/chsc.h
index 662dab4b93e6..227e05f674b3 100644
--- a/drivers/s390/cio/chsc.h
+++ b/drivers/s390/cio/chsc.h
@@ -157,7 +157,7 @@ int chsc_scm_info(struct chsc_scm_info *scm_area, u64 token);
#ifdef CONFIG_SCM_BUS
int scm_update_information(void);
#else /* CONFIG_SCM_BUS */
-#define scm_update_information() 0
+static inline int scm_update_information(void) { return 0; }
#endif /* CONFIG_SCM_BUS */
diff --git a/drivers/s390/cio/cio.c b/drivers/s390/cio/cio.c
index c8faf6230b0f..986ef6a92a41 100644
--- a/drivers/s390/cio/cio.c
+++ b/drivers/s390/cio/cio.c
@@ -962,9 +962,9 @@ static void css_reset(void)
atomic_inc(&chpid_reset_count);
}
/* Wait for machine check for all channel paths. */
- timeout = get_clock() + (RCHP_TIMEOUT << 12);
+ timeout = get_tod_clock() + (RCHP_TIMEOUT << 12);
while (atomic_read(&chpid_reset_count) != 0) {
- if (get_clock() > timeout)
+ if (get_tod_clock() > timeout)
break;
cpu_relax();
}
diff --git a/drivers/s390/cio/cmf.c b/drivers/s390/cio/cmf.c
index c9fc61c0a866..4495e0627a40 100644
--- a/drivers/s390/cio/cmf.c
+++ b/drivers/s390/cio/cmf.c
@@ -33,7 +33,7 @@
#include <linux/module.h>
#include <linux/moduleparam.h>
#include <linux/slab.h>
-#include <linux/timex.h> /* get_clock() */
+#include <linux/timex.h> /* get_tod_clock() */
#include <asm/ccwdev.h>
#include <asm/cio.h>
@@ -326,7 +326,7 @@ static int cmf_copy_block(struct ccw_device *cdev)
memcpy(cmb_data->last_block, hw_block, cmb_data->size);
memcpy(reference_buf, hw_block, cmb_data->size);
} while (memcmp(cmb_data->last_block, reference_buf, cmb_data->size));
- cmb_data->last_update = get_clock();
+ cmb_data->last_update = get_tod_clock();
kfree(reference_buf);
return 0;
}
@@ -428,7 +428,7 @@ static void cmf_generic_reset(struct ccw_device *cdev)
memset(cmbops->align(cmb_data->hw_block), 0, cmb_data->size);
cmb_data->last_update = 0;
}
- cdev->private->cmb_start_time = get_clock();
+ cdev->private->cmb_start_time = get_tod_clock();
spin_unlock_irq(cdev->ccwlock);
}
diff --git a/drivers/s390/cio/css.c b/drivers/s390/cio/css.c
index fd00afd8b850..a239237d43f3 100644
--- a/drivers/s390/cio/css.c
+++ b/drivers/s390/cio/css.c
@@ -780,7 +780,7 @@ static int __init setup_css(int nr)
css->cssid = nr;
dev_set_name(&css->device, "css%x", nr);
css->device.release = channel_subsystem_release;
- tod_high = (u32) (get_clock() >> 32);
+ tod_high = (u32) (get_tod_clock() >> 32);
css_generate_pgid(css, tod_high);
return 0;
}
diff --git a/drivers/s390/cio/device.c b/drivers/s390/cio/device.c
index 7cd5c6812ac7..c6767f5a58b2 100644
--- a/drivers/s390/cio/device.c
+++ b/drivers/s390/cio/device.c
@@ -632,6 +632,14 @@ initiate_logging(struct device *dev, struct device_attribute *attr,
return count;
}
+static ssize_t vpm_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct subchannel *sch = to_subchannel(dev);
+
+ return sprintf(buf, "%02x\n", sch->vpm);
+}
+
static DEVICE_ATTR(chpids, 0444, chpids_show, NULL);
static DEVICE_ATTR(pimpampom, 0444, pimpampom_show, NULL);
static DEVICE_ATTR(devtype, 0444, devtype_show, NULL);
@@ -640,11 +648,13 @@ static DEVICE_ATTR(modalias, 0444, modalias_show, NULL);
static DEVICE_ATTR(online, 0644, online_show, online_store);
static DEVICE_ATTR(availability, 0444, available_show, NULL);
static DEVICE_ATTR(logging, 0200, NULL, initiate_logging);
+static DEVICE_ATTR(vpm, 0444, vpm_show, NULL);
static struct attribute *io_subchannel_attrs[] = {
&dev_attr_chpids.attr,
&dev_attr_pimpampom.attr,
&dev_attr_logging.attr,
+ &dev_attr_vpm.attr,
NULL,
};
diff --git a/drivers/s390/cio/device_fsm.c b/drivers/s390/cio/device_fsm.c
index 1bb1d00095af..c7638c543250 100644
--- a/drivers/s390/cio/device_fsm.c
+++ b/drivers/s390/cio/device_fsm.c
@@ -47,7 +47,7 @@ static void ccw_timeout_log(struct ccw_device *cdev)
cc = stsch_err(sch->schid, &schib);
printk(KERN_WARNING "cio: ccw device timeout occurred at %llx, "
- "device information:\n", get_clock());
+ "device information:\n", get_tod_clock());
printk(KERN_WARNING "cio: orb:\n");
print_hex_dump(KERN_WARNING, "cio: ", DUMP_PREFIX_NONE, 16, 1,
orb, sizeof(*orb), 0);
diff --git a/drivers/s390/cio/device_pgid.c b/drivers/s390/cio/device_pgid.c
index 908d287f66c1..37ada05e82a5 100644
--- a/drivers/s390/cio/device_pgid.c
+++ b/drivers/s390/cio/device_pgid.c
@@ -23,6 +23,8 @@
#define PGID_RETRIES 256
#define PGID_TIMEOUT (10 * HZ)
+static void verify_start(struct ccw_device *cdev);
+
/*
* Process path verification data and report result.
*/
@@ -70,8 +72,8 @@ static void nop_do(struct ccw_device *cdev)
struct subchannel *sch = to_subchannel(cdev->dev.parent);
struct ccw_request *req = &cdev->private->req;
- /* Adjust lpm. */
- req->lpm = lpm_adjust(req->lpm, sch->schib.pmcw.pam & sch->opm);
+ req->lpm = lpm_adjust(req->lpm, sch->schib.pmcw.pam & sch->opm &
+ ~cdev->private->path_noirq_mask);
if (!req->lpm)
goto out_nopath;
nop_build_cp(cdev);
@@ -102,10 +104,20 @@ static void nop_callback(struct ccw_device *cdev, void *data, int rc)
struct subchannel *sch = to_subchannel(cdev->dev.parent);
struct ccw_request *req = &cdev->private->req;
- if (rc == 0)
+ switch (rc) {
+ case 0:
sch->vpm |= req->lpm;
- else if (rc != -EACCES)
+ break;
+ case -ETIME:
+ cdev->private->path_noirq_mask |= req->lpm;
+ break;
+ case -EACCES:
+ cdev->private->path_notoper_mask |= req->lpm;
+ break;
+ default:
goto err;
+ }
+ /* Continue on the next path. */
req->lpm >>= 1;
nop_do(cdev);
return;
@@ -132,6 +144,48 @@ static void spid_build_cp(struct ccw_device *cdev, u8 fn)
req->cp = cp;
}
+static void pgid_wipeout_callback(struct ccw_device *cdev, void *data, int rc)
+{
+ if (rc) {
+ /* We don't know the path groups' state. Abort. */
+ verify_done(cdev, rc);
+ return;
+ }
+ /*
+ * Path groups have been reset. Restart path verification but
+ * leave paths in path_noirq_mask out.
+ */
+ cdev->private->flags.pgid_unknown = 0;
+ verify_start(cdev);
+}
+
+/*
+ * Reset pathgroups and restart path verification, leave unusable paths out.
+ */
+static void pgid_wipeout_start(struct ccw_device *cdev)
+{
+ struct subchannel *sch = to_subchannel(cdev->dev.parent);
+ struct ccw_dev_id *id = &cdev->private->dev_id;
+ struct ccw_request *req = &cdev->private->req;
+ u8 fn;
+
+ CIO_MSG_EVENT(2, "wipe: device 0.%x.%04x: pvm=%02x nim=%02x\n",
+ id->ssid, id->devno, cdev->private->pgid_valid_mask,
+ cdev->private->path_noirq_mask);
+
+ /* Initialize request data. */
+ memset(req, 0, sizeof(*req));
+ req->timeout = PGID_TIMEOUT;
+ req->maxretries = PGID_RETRIES;
+ req->lpm = sch->schib.pmcw.pam;
+ req->callback = pgid_wipeout_callback;
+ fn = SPID_FUNC_DISBAND;
+ if (cdev->private->flags.mpath)
+ fn |= SPID_FUNC_MULTI_PATH;
+ spid_build_cp(cdev, fn);
+ ccw_request_start(cdev);
+}
+
/*
* Perform establish/resign SET PGID on a single path.
*/
@@ -157,11 +211,14 @@ static void spid_do(struct ccw_device *cdev)
return;
out_nopath:
+ if (cdev->private->flags.pgid_unknown) {
+ /* At least one SPID could be partially done. */
+ pgid_wipeout_start(cdev);
+ return;
+ }
verify_done(cdev, sch->vpm ? 0 : -EACCES);
}
-static void verify_start(struct ccw_device *cdev);
-
/*
* Process SET PGID request result for a single path.
*/
@@ -174,7 +231,12 @@ static void spid_callback(struct ccw_device *cdev, void *data, int rc)
case 0:
sch->vpm |= req->lpm & sch->opm;
break;
+ case -ETIME:
+ cdev->private->flags.pgid_unknown = 1;
+ cdev->private->path_noirq_mask |= req->lpm;
+ break;
case -EACCES:
+ cdev->private->path_notoper_mask |= req->lpm;
break;
case -EOPNOTSUPP:
if (cdev->private->flags.mpath) {
@@ -330,8 +392,9 @@ static void snid_done(struct ccw_device *cdev, int rc)
else {
donepm = pgid_to_donepm(cdev);
sch->vpm = donepm & sch->opm;
- cdev->private->pgid_todo_mask &= ~donepm;
cdev->private->pgid_reset_mask |= reset;
+ cdev->private->pgid_todo_mask &=
+ ~(donepm | cdev->private->path_noirq_mask);
pgid_fill(cdev, pgid);
}
out:
@@ -341,6 +404,10 @@ out:
cdev->private->pgid_todo_mask, mismatch, reserved, reset);
switch (rc) {
case 0:
+ if (cdev->private->flags.pgid_unknown) {
+ pgid_wipeout_start(cdev);
+ return;
+ }
/* Anything left to do? */
if (cdev->private->pgid_todo_mask == 0) {
verify_done(cdev, sch->vpm == 0 ? -EACCES : 0);
@@ -384,9 +451,10 @@ static void snid_do(struct ccw_device *cdev)
{
struct subchannel *sch = to_subchannel(cdev->dev.parent);
struct ccw_request *req = &cdev->private->req;
+ int ret;
- /* Adjust lpm if paths are not set in pam. */
- req->lpm = lpm_adjust(req->lpm, sch->schib.pmcw.pam);
+ req->lpm = lpm_adjust(req->lpm, sch->schib.pmcw.pam &
+ ~cdev->private->path_noirq_mask);
if (!req->lpm)
goto out_nopath;
snid_build_cp(cdev);
@@ -394,7 +462,13 @@ static void snid_do(struct ccw_device *cdev)
return;
out_nopath:
- snid_done(cdev, cdev->private->pgid_valid_mask ? 0 : -EACCES);
+ if (cdev->private->pgid_valid_mask)
+ ret = 0;
+ else if (cdev->private->path_noirq_mask)
+ ret = -ETIME;
+ else
+ ret = -EACCES;
+ snid_done(cdev, ret);
}
/*
@@ -404,10 +478,21 @@ static void snid_callback(struct ccw_device *cdev, void *data, int rc)
{
struct ccw_request *req = &cdev->private->req;
- if (rc == 0)
+ switch (rc) {
+ case 0:
cdev->private->pgid_valid_mask |= req->lpm;
- else if (rc != -EACCES)
+ break;
+ case -ETIME:
+ cdev->private->flags.pgid_unknown = 1;
+ cdev->private->path_noirq_mask |= req->lpm;
+ break;
+ case -EACCES:
+ cdev->private->path_notoper_mask |= req->lpm;
+ break;
+ default:
goto err;
+ }
+ /* Continue on the next path. */
req->lpm >>= 1;
snid_do(cdev);
return;
@@ -427,6 +512,13 @@ static void verify_start(struct ccw_device *cdev)
sch->vpm = 0;
sch->lpm = sch->schib.pmcw.pam;
+
+ /* Initialize PGID data. */
+ memset(cdev->private->pgid, 0, sizeof(cdev->private->pgid));
+ cdev->private->pgid_valid_mask = 0;
+ cdev->private->pgid_todo_mask = sch->schib.pmcw.pam;
+ cdev->private->path_notoper_mask = 0;
+
/* Initialize request data. */
memset(req, 0, sizeof(*req));
req->timeout = PGID_TIMEOUT;
@@ -459,14 +551,8 @@ static void verify_start(struct ccw_device *cdev)
*/
void ccw_device_verify_start(struct ccw_device *cdev)
{
- struct subchannel *sch = to_subchannel(cdev->dev.parent);
-
CIO_TRACE_EVENT(4, "vrfy");
CIO_HEX_EVENT(4, &cdev->private->dev_id, sizeof(cdev->private->dev_id));
- /* Initialize PGID data. */
- memset(cdev->private->pgid, 0, sizeof(cdev->private->pgid));
- cdev->private->pgid_valid_mask = 0;
- cdev->private->pgid_todo_mask = sch->schib.pmcw.pam;
/*
* Initialize pathgroup and multipath state with target values.
* They may change in the course of path verification.
@@ -474,6 +560,7 @@ void ccw_device_verify_start(struct ccw_device *cdev)
cdev->private->flags.pgroup = cdev->private->options.pgroup;
cdev->private->flags.mpath = cdev->private->options.mpath;
cdev->private->flags.doverify = 0;
+ cdev->private->path_noirq_mask = 0;
verify_start(cdev);
}
diff --git a/drivers/s390/cio/io_sch.h b/drivers/s390/cio/io_sch.h
index 76253dfcc1be..b108f4a5c7dd 100644
--- a/drivers/s390/cio/io_sch.h
+++ b/drivers/s390/cio/io_sch.h
@@ -126,6 +126,10 @@ struct ccw_device_private {
u8 pgid_valid_mask; /* mask of valid PGIDs */
u8 pgid_todo_mask; /* mask of PGIDs to be adjusted */
u8 pgid_reset_mask; /* mask of PGIDs which were reset */
+ u8 path_noirq_mask; /* mask of paths for which no irq was
+ received */
+ u8 path_notoper_mask; /* mask of paths which were found
+ not operable */
u8 path_gone_mask; /* mask of paths, that became unavailable */
u8 path_new_mask; /* mask of paths, that became available */
struct {
@@ -145,6 +149,7 @@ struct ccw_device_private {
unsigned int resuming:1; /* recognition while resume */
unsigned int pgroup:1; /* pathgroup is set up */
unsigned int mpath:1; /* multipathing is set up */
+ unsigned int pgid_unknown:1;/* unknown pgid state */
unsigned int initialized:1; /* set if initial reference held */
} __attribute__((packed)) flags;
unsigned long intparm; /* user interruption parameter */
diff --git a/drivers/s390/cio/qdio_debug.c b/drivers/s390/cio/qdio_debug.c
index e6e0d31c02ac..749b72739c4a 100644
--- a/drivers/s390/cio/qdio_debug.c
+++ b/drivers/s390/cio/qdio_debug.c
@@ -128,7 +128,7 @@ static int qstat_show(struct seq_file *m, void *v)
static int qstat_seq_open(struct inode *inode, struct file *filp)
{
return single_open(filp, qstat_show,
- filp->f_path.dentry->d_inode->i_private);
+ file_inode(filp)->i_private);
}
static const struct file_operations debugfs_fops = {
@@ -221,7 +221,7 @@ static ssize_t qperf_seq_write(struct file *file, const char __user *ubuf,
static int qperf_seq_open(struct inode *inode, struct file *filp)
{
return single_open(filp, qperf_show,
- filp->f_path.dentry->d_inode->i_private);
+ file_inode(filp)->i_private);
}
static struct file_operations debugfs_perf_fops = {
diff --git a/drivers/s390/cio/qdio_main.c b/drivers/s390/cio/qdio_main.c
index 1671d3461f29..abc550e5dd35 100644
--- a/drivers/s390/cio/qdio_main.c
+++ b/drivers/s390/cio/qdio_main.c
@@ -338,10 +338,10 @@ again:
retries++;
if (!start_time) {
- start_time = get_clock();
+ start_time = get_tod_clock();
goto again;
}
- if ((get_clock() - start_time) < QDIO_BUSY_BIT_PATIENCE)
+ if ((get_tod_clock() - start_time) < QDIO_BUSY_BIT_PATIENCE)
goto again;
}
if (retries) {
@@ -504,7 +504,7 @@ static int get_inbound_buffer_frontier(struct qdio_q *q)
int count, stop;
unsigned char state = 0;
- q->timestamp = get_clock();
+ q->timestamp = get_tod_clock();
/*
* Don't check 128 buffers, as otherwise qdio_inbound_q_moved
@@ -563,7 +563,7 @@ static int qdio_inbound_q_moved(struct qdio_q *q)
if (bufnr != q->last_move) {
q->last_move = bufnr;
if (!is_thinint_irq(q->irq_ptr) && MACHINE_IS_LPAR)
- q->u.in.timestamp = get_clock();
+ q->u.in.timestamp = get_tod_clock();
return 1;
} else
return 0;
@@ -595,7 +595,7 @@ static inline int qdio_inbound_q_done(struct qdio_q *q)
* At this point we know, that inbound first_to_check
* has (probably) not moved (see qdio_inbound_processing).
*/
- if (get_clock() > q->u.in.timestamp + QDIO_INPUT_THRESHOLD) {
+ if (get_tod_clock() > q->u.in.timestamp + QDIO_INPUT_THRESHOLD) {
DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "in done:%02x",
q->first_to_check);
return 1;
@@ -772,7 +772,7 @@ static int get_outbound_buffer_frontier(struct qdio_q *q)
int count, stop;
unsigned char state = 0;
- q->timestamp = get_clock();
+ q->timestamp = get_tod_clock();
if (need_siga_sync(q))
if (((queue_type(q) != QDIO_IQDIO_QFMT) &&
diff --git a/drivers/s390/kvm/Makefile b/drivers/s390/kvm/Makefile
index 0815690ac1e0..241891a57caf 100644
--- a/drivers/s390/kvm/Makefile
+++ b/drivers/s390/kvm/Makefile
@@ -6,4 +6,4 @@
# it under the terms of the GNU General Public License (version 2 only)
# as published by the Free Software Foundation.
-obj-$(CONFIG_S390_GUEST) += kvm_virtio.o
+obj-$(CONFIG_S390_GUEST) += kvm_virtio.o virtio_ccw.o
diff --git a/drivers/s390/kvm/kvm_virtio.c b/drivers/s390/kvm/kvm_virtio.c
index 8491111aec12..6711e65764b5 100644
--- a/drivers/s390/kvm/kvm_virtio.c
+++ b/drivers/s390/kvm/kvm_virtio.c
@@ -275,7 +275,7 @@ static const char *kvm_bus_name(struct virtio_device *vdev)
/*
* The config ops structure as defined by virtio config
*/
-static struct virtio_config_ops kvm_vq_configspace_ops = {
+static const struct virtio_config_ops kvm_vq_configspace_ops = {
.get_features = kvm_get_features,
.finalize_features = kvm_finalize_features,
.get = kvm_get,
@@ -422,6 +422,26 @@ static void kvm_extint_handler(struct ext_code ext_code,
}
/*
+ * For s390-virtio, we expect a page above main storage containing
+ * the virtio configuration. Try to actually load from this area
+ * in order to figure out if the host provides this page.
+ */
+static int __init test_devices_support(unsigned long addr)
+{
+ int ret = -EIO;
+
+ asm volatile(
+ "0: lura 0,%1\n"
+ "1: xgr %0,%0\n"
+ "2:\n"
+ EX_TABLE(0b,2b)
+ EX_TABLE(1b,2b)
+ : "+d" (ret)
+ : "a" (addr)
+ : "0", "cc");
+ return ret;
+}
+/*
* Init function for virtio
* devices are in a single page above top of "normal" mem
*/
@@ -432,21 +452,23 @@ static int __init kvm_devices_init(void)
if (!MACHINE_IS_KVM)
return -ENODEV;
+ if (test_devices_support(real_memory_size) < 0)
+ return -ENODEV;
+
+ rc = vmem_add_mapping(real_memory_size, PAGE_SIZE);
+ if (rc)
+ return rc;
+
+ kvm_devices = (void *) real_memory_size;
+
kvm_root = root_device_register("kvm_s390");
if (IS_ERR(kvm_root)) {
rc = PTR_ERR(kvm_root);
printk(KERN_ERR "Could not register kvm_s390 root device");
+ vmem_remove_mapping(real_memory_size, PAGE_SIZE);
return rc;
}
- rc = vmem_add_mapping(real_memory_size, PAGE_SIZE);
- if (rc) {
- root_device_unregister(kvm_root);
- return rc;
- }
-
- kvm_devices = (void *) real_memory_size;
-
INIT_WORK(&hotplug_work, hotplug_devices);
service_subclass_irq_register();
diff --git a/drivers/s390/kvm/virtio_ccw.c b/drivers/s390/kvm/virtio_ccw.c
new file mode 100644
index 000000000000..2029b6caa595
--- /dev/null
+++ b/drivers/s390/kvm/virtio_ccw.c
@@ -0,0 +1,926 @@
+/*
+ * ccw based virtio transport
+ *
+ * Copyright IBM Corp. 2012
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License (version 2 only)
+ * as published by the Free Software Foundation.
+ *
+ * Author(s): Cornelia Huck <cornelia.huck@de.ibm.com>
+ */
+
+#include <linux/kernel_stat.h>
+#include <linux/init.h>
+#include <linux/bootmem.h>
+#include <linux/err.h>
+#include <linux/virtio.h>
+#include <linux/virtio_config.h>
+#include <linux/slab.h>
+#include <linux/interrupt.h>
+#include <linux/virtio_ring.h>
+#include <linux/pfn.h>
+#include <linux/async.h>
+#include <linux/wait.h>
+#include <linux/list.h>
+#include <linux/bitops.h>
+#include <linux/module.h>
+#include <linux/io.h>
+#include <linux/kvm_para.h>
+#include <asm/setup.h>
+#include <asm/irq.h>
+#include <asm/cio.h>
+#include <asm/ccwdev.h>
+
+/*
+ * virtio related functions
+ */
+
+struct vq_config_block {
+ __u16 index;
+ __u16 num;
+} __packed;
+
+#define VIRTIO_CCW_CONFIG_SIZE 0x100
+/* same as PCI config space size, should be enough for all drivers */
+
+struct virtio_ccw_device {
+ struct virtio_device vdev;
+ __u8 *status;
+ __u8 config[VIRTIO_CCW_CONFIG_SIZE];
+ struct ccw_device *cdev;
+ __u32 curr_io;
+ int err;
+ wait_queue_head_t wait_q;
+ spinlock_t lock;
+ struct list_head virtqueues;
+ unsigned long indicators;
+ unsigned long indicators2;
+ struct vq_config_block *config_block;
+};
+
+struct vq_info_block {
+ __u64 queue;
+ __u32 align;
+ __u16 index;
+ __u16 num;
+} __packed;
+
+struct virtio_feature_desc {
+ __u32 features;
+ __u8 index;
+} __packed;
+
+struct virtio_ccw_vq_info {
+ struct virtqueue *vq;
+ int num;
+ void *queue;
+ struct vq_info_block *info_block;
+ struct list_head node;
+};
+
+#define KVM_VIRTIO_CCW_RING_ALIGN 4096
+
+#define KVM_S390_VIRTIO_CCW_NOTIFY 3
+
+#define CCW_CMD_SET_VQ 0x13
+#define CCW_CMD_VDEV_RESET 0x33
+#define CCW_CMD_SET_IND 0x43
+#define CCW_CMD_SET_CONF_IND 0x53
+#define CCW_CMD_READ_FEAT 0x12
+#define CCW_CMD_WRITE_FEAT 0x11
+#define CCW_CMD_READ_CONF 0x22
+#define CCW_CMD_WRITE_CONF 0x21
+#define CCW_CMD_WRITE_STATUS 0x31
+#define CCW_CMD_READ_VQ_CONF 0x32
+
+#define VIRTIO_CCW_DOING_SET_VQ 0x00010000
+#define VIRTIO_CCW_DOING_RESET 0x00040000
+#define VIRTIO_CCW_DOING_READ_FEAT 0x00080000
+#define VIRTIO_CCW_DOING_WRITE_FEAT 0x00100000
+#define VIRTIO_CCW_DOING_READ_CONFIG 0x00200000
+#define VIRTIO_CCW_DOING_WRITE_CONFIG 0x00400000
+#define VIRTIO_CCW_DOING_WRITE_STATUS 0x00800000
+#define VIRTIO_CCW_DOING_SET_IND 0x01000000
+#define VIRTIO_CCW_DOING_READ_VQ_CONF 0x02000000
+#define VIRTIO_CCW_DOING_SET_CONF_IND 0x04000000
+#define VIRTIO_CCW_INTPARM_MASK 0xffff0000
+
+static struct virtio_ccw_device *to_vc_device(struct virtio_device *vdev)
+{
+ return container_of(vdev, struct virtio_ccw_device, vdev);
+}
+
+static int doing_io(struct virtio_ccw_device *vcdev, __u32 flag)
+{
+ unsigned long flags;
+ __u32 ret;
+
+ spin_lock_irqsave(get_ccwdev_lock(vcdev->cdev), flags);
+ if (vcdev->err)
+ ret = 0;
+ else
+ ret = vcdev->curr_io & flag;
+ spin_unlock_irqrestore(get_ccwdev_lock(vcdev->cdev), flags);
+ return ret;
+}
+
+static int ccw_io_helper(struct virtio_ccw_device *vcdev,
+ struct ccw1 *ccw, __u32 intparm)
+{
+ int ret;
+ unsigned long flags;
+ int flag = intparm & VIRTIO_CCW_INTPARM_MASK;
+
+ do {
+ spin_lock_irqsave(get_ccwdev_lock(vcdev->cdev), flags);
+ ret = ccw_device_start(vcdev->cdev, ccw, intparm, 0, 0);
+ if (!ret)
+ vcdev->curr_io |= flag;
+ spin_unlock_irqrestore(get_ccwdev_lock(vcdev->cdev), flags);
+ cpu_relax();
+ } while (ret == -EBUSY);
+ wait_event(vcdev->wait_q, doing_io(vcdev, flag) == 0);
+ return ret ? ret : vcdev->err;
+}
+
+static inline long do_kvm_notify(struct subchannel_id schid,
+ unsigned long queue_index)
+{
+ register unsigned long __nr asm("1") = KVM_S390_VIRTIO_CCW_NOTIFY;
+ register struct subchannel_id __schid asm("2") = schid;
+ register unsigned long __index asm("3") = queue_index;
+ register long __rc asm("2");
+
+ asm volatile ("diag 2,4,0x500\n"
+ : "=d" (__rc) : "d" (__nr), "d" (__schid), "d" (__index)
+ : "memory", "cc");
+ return __rc;
+}
+
+static void virtio_ccw_kvm_notify(struct virtqueue *vq)
+{
+ struct virtio_ccw_vq_info *info = vq->priv;
+ struct virtio_ccw_device *vcdev;
+ struct subchannel_id schid;
+
+ vcdev = to_vc_device(info->vq->vdev);
+ ccw_device_get_schid(vcdev->cdev, &schid);
+ do_kvm_notify(schid, virtqueue_get_queue_index(vq));
+}
+
+static int virtio_ccw_read_vq_conf(struct virtio_ccw_device *vcdev,
+ struct ccw1 *ccw, int index)
+{
+ vcdev->config_block->index = index;
+ ccw->cmd_code = CCW_CMD_READ_VQ_CONF;
+ ccw->flags = 0;
+ ccw->count = sizeof(struct vq_config_block);
+ ccw->cda = (__u32)(unsigned long)(vcdev->config_block);
+ ccw_io_helper(vcdev, ccw, VIRTIO_CCW_DOING_READ_VQ_CONF);
+ return vcdev->config_block->num;
+}
+
+static void virtio_ccw_del_vq(struct virtqueue *vq, struct ccw1 *ccw)
+{
+ struct virtio_ccw_device *vcdev = to_vc_device(vq->vdev);
+ struct virtio_ccw_vq_info *info = vq->priv;
+ unsigned long flags;
+ unsigned long size;
+ int ret;
+ unsigned int index = virtqueue_get_queue_index(vq);
+
+ /* Remove from our list. */
+ spin_lock_irqsave(&vcdev->lock, flags);
+ list_del(&info->node);
+ spin_unlock_irqrestore(&vcdev->lock, flags);
+
+ /* Release from host. */
+ info->info_block->queue = 0;
+ info->info_block->align = 0;
+ info->info_block->index = index;
+ info->info_block->num = 0;
+ ccw->cmd_code = CCW_CMD_SET_VQ;
+ ccw->flags = 0;
+ ccw->count = sizeof(*info->info_block);
+ ccw->cda = (__u32)(unsigned long)(info->info_block);
+ ret = ccw_io_helper(vcdev, ccw,
+ VIRTIO_CCW_DOING_SET_VQ | index);
+ /*
+ * -ENODEV isn't considered an error: The device is gone anyway.
+ * This may happen on device detach.
+ */
+ if (ret && (ret != -ENODEV))
+ dev_warn(&vq->vdev->dev, "Error %d while deleting queue %d",
+ ret, index);
+
+ vring_del_virtqueue(vq);
+ size = PAGE_ALIGN(vring_size(info->num, KVM_VIRTIO_CCW_RING_ALIGN));
+ free_pages_exact(info->queue, size);
+ kfree(info->info_block);
+ kfree(info);
+}
+
+static void virtio_ccw_del_vqs(struct virtio_device *vdev)
+{
+ struct virtqueue *vq, *n;
+ struct ccw1 *ccw;
+
+ ccw = kzalloc(sizeof(*ccw), GFP_DMA | GFP_KERNEL);
+ if (!ccw)
+ return;
+
+
+ list_for_each_entry_safe(vq, n, &vdev->vqs, list)
+ virtio_ccw_del_vq(vq, ccw);
+
+ kfree(ccw);
+}
+
+static struct virtqueue *virtio_ccw_setup_vq(struct virtio_device *vdev,
+ int i, vq_callback_t *callback,
+ const char *name,
+ struct ccw1 *ccw)
+{
+ struct virtio_ccw_device *vcdev = to_vc_device(vdev);
+ int err;
+ struct virtqueue *vq = NULL;
+ struct virtio_ccw_vq_info *info;
+ unsigned long size = 0; /* silence the compiler */
+ unsigned long flags;
+
+ /* Allocate queue. */
+ info = kzalloc(sizeof(struct virtio_ccw_vq_info), GFP_KERNEL);
+ if (!info) {
+ dev_warn(&vcdev->cdev->dev, "no info\n");
+ err = -ENOMEM;
+ goto out_err;
+ }
+ info->info_block = kzalloc(sizeof(*info->info_block),
+ GFP_DMA | GFP_KERNEL);
+ if (!info->info_block) {
+ dev_warn(&vcdev->cdev->dev, "no info block\n");
+ err = -ENOMEM;
+ goto out_err;
+ }
+ info->num = virtio_ccw_read_vq_conf(vcdev, ccw, i);
+ size = PAGE_ALIGN(vring_size(info->num, KVM_VIRTIO_CCW_RING_ALIGN));
+ info->queue = alloc_pages_exact(size, GFP_KERNEL | __GFP_ZERO);
+ if (info->queue == NULL) {
+ dev_warn(&vcdev->cdev->dev, "no queue\n");
+ err = -ENOMEM;
+ goto out_err;
+ }
+
+ vq = vring_new_virtqueue(i, info->num, KVM_VIRTIO_CCW_RING_ALIGN, vdev,
+ true, info->queue, virtio_ccw_kvm_notify,
+ callback, name);
+ if (!vq) {
+ /* For now, we fail if we can't get the requested size. */
+ dev_warn(&vcdev->cdev->dev, "no vq\n");
+ err = -ENOMEM;
+ goto out_err;
+ }
+
+ /* Register it with the host. */
+ info->info_block->queue = (__u64)info->queue;
+ info->info_block->align = KVM_VIRTIO_CCW_RING_ALIGN;
+ info->info_block->index = i;
+ info->info_block->num = info->num;
+ ccw->cmd_code = CCW_CMD_SET_VQ;
+ ccw->flags = 0;
+ ccw->count = sizeof(*info->info_block);
+ ccw->cda = (__u32)(unsigned long)(info->info_block);
+ err = ccw_io_helper(vcdev, ccw, VIRTIO_CCW_DOING_SET_VQ | i);
+ if (err) {
+ dev_warn(&vcdev->cdev->dev, "SET_VQ failed\n");
+ goto out_err;
+ }
+
+ info->vq = vq;
+ vq->priv = info;
+
+ /* Save it to our list. */
+ spin_lock_irqsave(&vcdev->lock, flags);
+ list_add(&info->node, &vcdev->virtqueues);
+ spin_unlock_irqrestore(&vcdev->lock, flags);
+
+ return vq;
+
+out_err:
+ if (vq)
+ vring_del_virtqueue(vq);
+ if (info) {
+ if (info->queue)
+ free_pages_exact(info->queue, size);
+ kfree(info->info_block);
+ }
+ kfree(info);
+ return ERR_PTR(err);
+}
+
+static int virtio_ccw_find_vqs(struct virtio_device *vdev, unsigned nvqs,
+ struct virtqueue *vqs[],
+ vq_callback_t *callbacks[],
+ const char *names[])
+{
+ struct virtio_ccw_device *vcdev = to_vc_device(vdev);
+ unsigned long *indicatorp = NULL;
+ int ret, i;
+ struct ccw1 *ccw;
+
+ ccw = kzalloc(sizeof(*ccw), GFP_DMA | GFP_KERNEL);
+ if (!ccw)
+ return -ENOMEM;
+
+ for (i = 0; i < nvqs; ++i) {
+ vqs[i] = virtio_ccw_setup_vq(vdev, i, callbacks[i], names[i],
+ ccw);
+ if (IS_ERR(vqs[i])) {
+ ret = PTR_ERR(vqs[i]);
+ vqs[i] = NULL;
+ goto out;
+ }
+ }
+ ret = -ENOMEM;
+ /* We need a data area under 2G to communicate. */
+ indicatorp = kmalloc(sizeof(&vcdev->indicators), GFP_DMA | GFP_KERNEL);
+ if (!indicatorp)
+ goto out;
+ *indicatorp = (unsigned long) &vcdev->indicators;
+ /* Register queue indicators with host. */
+ vcdev->indicators = 0;
+ ccw->cmd_code = CCW_CMD_SET_IND;
+ ccw->flags = 0;
+ ccw->count = sizeof(vcdev->indicators);
+ ccw->cda = (__u32)(unsigned long) indicatorp;
+ ret = ccw_io_helper(vcdev, ccw, VIRTIO_CCW_DOING_SET_IND);
+ if (ret)
+ goto out;
+ /* Register indicators2 with host for config changes */
+ *indicatorp = (unsigned long) &vcdev->indicators2;
+ vcdev->indicators2 = 0;
+ ccw->cmd_code = CCW_CMD_SET_CONF_IND;
+ ccw->flags = 0;
+ ccw->count = sizeof(vcdev->indicators2);
+ ccw->cda = (__u32)(unsigned long) indicatorp;
+ ret = ccw_io_helper(vcdev, ccw, VIRTIO_CCW_DOING_SET_CONF_IND);
+ if (ret)
+ goto out;
+
+ kfree(indicatorp);
+ kfree(ccw);
+ return 0;
+out:
+ kfree(indicatorp);
+ kfree(ccw);
+ virtio_ccw_del_vqs(vdev);
+ return ret;
+}
+
+static void virtio_ccw_reset(struct virtio_device *vdev)
+{
+ struct virtio_ccw_device *vcdev = to_vc_device(vdev);
+ struct ccw1 *ccw;
+
+ ccw = kzalloc(sizeof(*ccw), GFP_DMA | GFP_KERNEL);
+ if (!ccw)
+ return;
+
+ /* Zero status bits. */
+ *vcdev->status = 0;
+
+ /* Send a reset ccw on device. */
+ ccw->cmd_code = CCW_CMD_VDEV_RESET;
+ ccw->flags = 0;
+ ccw->count = 0;
+ ccw->cda = 0;
+ ccw_io_helper(vcdev, ccw, VIRTIO_CCW_DOING_RESET);
+ kfree(ccw);
+}
+
+static u32 virtio_ccw_get_features(struct virtio_device *vdev)
+{
+ struct virtio_ccw_device *vcdev = to_vc_device(vdev);
+ struct virtio_feature_desc *features;
+ int ret, rc;
+ struct ccw1 *ccw;
+
+ ccw = kzalloc(sizeof(*ccw), GFP_DMA | GFP_KERNEL);
+ if (!ccw)
+ return 0;
+
+ features = kzalloc(sizeof(*features), GFP_DMA | GFP_KERNEL);
+ if (!features) {
+ rc = 0;
+ goto out_free;
+ }
+ /* Read the feature bits from the host. */
+ /* TODO: Features > 32 bits */
+ features->index = 0;
+ ccw->cmd_code = CCW_CMD_READ_FEAT;
+ ccw->flags = 0;
+ ccw->count = sizeof(*features);
+ ccw->cda = (__u32)(unsigned long)features;
+ ret = ccw_io_helper(vcdev, ccw, VIRTIO_CCW_DOING_READ_FEAT);
+ if (ret) {
+ rc = 0;
+ goto out_free;
+ }
+
+ rc = le32_to_cpu(features->features);
+
+out_free:
+ kfree(features);
+ kfree(ccw);
+ return rc;
+}
+
+static void virtio_ccw_finalize_features(struct virtio_device *vdev)
+{
+ struct virtio_ccw_device *vcdev = to_vc_device(vdev);
+ struct virtio_feature_desc *features;
+ int i;
+ struct ccw1 *ccw;
+
+ ccw = kzalloc(sizeof(*ccw), GFP_DMA | GFP_KERNEL);
+ if (!ccw)
+ return;
+
+ features = kzalloc(sizeof(*features), GFP_DMA | GFP_KERNEL);
+ if (!features)
+ goto out_free;
+
+ /* Give virtio_ring a chance to accept features. */
+ vring_transport_features(vdev);
+
+ for (i = 0; i < sizeof(*vdev->features) / sizeof(features->features);
+ i++) {
+ int highbits = i % 2 ? 32 : 0;
+ features->index = i;
+ features->features = cpu_to_le32(vdev->features[i / 2]
+ >> highbits);
+ /* Write the feature bits to the host. */
+ ccw->cmd_code = CCW_CMD_WRITE_FEAT;
+ ccw->flags = 0;
+ ccw->count = sizeof(*features);
+ ccw->cda = (__u32)(unsigned long)features;
+ ccw_io_helper(vcdev, ccw, VIRTIO_CCW_DOING_WRITE_FEAT);
+ }
+out_free:
+ kfree(features);
+ kfree(ccw);
+}
+
+static void virtio_ccw_get_config(struct virtio_device *vdev,
+ unsigned int offset, void *buf, unsigned len)
+{
+ struct virtio_ccw_device *vcdev = to_vc_device(vdev);
+ int ret;
+ struct ccw1 *ccw;
+ void *config_area;
+
+ ccw = kzalloc(sizeof(*ccw), GFP_DMA | GFP_KERNEL);
+ if (!ccw)
+ return;
+
+ config_area = kzalloc(VIRTIO_CCW_CONFIG_SIZE, GFP_DMA | GFP_KERNEL);
+ if (!config_area)
+ goto out_free;
+
+ /* Read the config area from the host. */
+ ccw->cmd_code = CCW_CMD_READ_CONF;
+ ccw->flags = 0;
+ ccw->count = offset + len;
+ ccw->cda = (__u32)(unsigned long)config_area;
+ ret = ccw_io_helper(vcdev, ccw, VIRTIO_CCW_DOING_READ_CONFIG);
+ if (ret)
+ goto out_free;
+
+ memcpy(vcdev->config, config_area, sizeof(vcdev->config));
+ memcpy(buf, &vcdev->config[offset], len);
+
+out_free:
+ kfree(config_area);
+ kfree(ccw);
+}
+
+static void virtio_ccw_set_config(struct virtio_device *vdev,
+ unsigned int offset, const void *buf,
+ unsigned len)
+{
+ struct virtio_ccw_device *vcdev = to_vc_device(vdev);
+ struct ccw1 *ccw;
+ void *config_area;
+
+ ccw = kzalloc(sizeof(*ccw), GFP_DMA | GFP_KERNEL);
+ if (!ccw)
+ return;
+
+ config_area = kzalloc(VIRTIO_CCW_CONFIG_SIZE, GFP_DMA | GFP_KERNEL);
+ if (!config_area)
+ goto out_free;
+
+ memcpy(&vcdev->config[offset], buf, len);
+ /* Write the config area to the host. */
+ memcpy(config_area, vcdev->config, sizeof(vcdev->config));
+ ccw->cmd_code = CCW_CMD_WRITE_CONF;
+ ccw->flags = 0;
+ ccw->count = offset + len;
+ ccw->cda = (__u32)(unsigned long)config_area;
+ ccw_io_helper(vcdev, ccw, VIRTIO_CCW_DOING_WRITE_CONFIG);
+
+out_free:
+ kfree(config_area);
+ kfree(ccw);
+}
+
+static u8 virtio_ccw_get_status(struct virtio_device *vdev)
+{
+ struct virtio_ccw_device *vcdev = to_vc_device(vdev);
+
+ return *vcdev->status;
+}
+
+static void virtio_ccw_set_status(struct virtio_device *vdev, u8 status)
+{
+ struct virtio_ccw_device *vcdev = to_vc_device(vdev);
+ struct ccw1 *ccw;
+
+ ccw = kzalloc(sizeof(*ccw), GFP_DMA | GFP_KERNEL);
+ if (!ccw)
+ return;
+
+ /* Write the status to the host. */
+ *vcdev->status = status;
+ ccw->cmd_code = CCW_CMD_WRITE_STATUS;
+ ccw->flags = 0;
+ ccw->count = sizeof(status);
+ ccw->cda = (__u32)(unsigned long)vcdev->status;
+ ccw_io_helper(vcdev, ccw, VIRTIO_CCW_DOING_WRITE_STATUS);
+ kfree(ccw);
+}
+
+static struct virtio_config_ops virtio_ccw_config_ops = {
+ .get_features = virtio_ccw_get_features,
+ .finalize_features = virtio_ccw_finalize_features,
+ .get = virtio_ccw_get_config,
+ .set = virtio_ccw_set_config,
+ .get_status = virtio_ccw_get_status,
+ .set_status = virtio_ccw_set_status,
+ .reset = virtio_ccw_reset,
+ .find_vqs = virtio_ccw_find_vqs,
+ .del_vqs = virtio_ccw_del_vqs,
+};
+
+
+/*
+ * ccw bus driver related functions
+ */
+
+static void virtio_ccw_release_dev(struct device *_d)
+{
+ struct virtio_device *dev = container_of(_d, struct virtio_device,
+ dev);
+ struct virtio_ccw_device *vcdev = to_vc_device(dev);
+
+ kfree(vcdev->status);
+ kfree(vcdev->config_block);
+ kfree(vcdev);
+}
+
+static int irb_is_error(struct irb *irb)
+{
+ if (scsw_cstat(&irb->scsw) != 0)
+ return 1;
+ if (scsw_dstat(&irb->scsw) & ~(DEV_STAT_CHN_END | DEV_STAT_DEV_END))
+ return 1;
+ if (scsw_cc(&irb->scsw) != 0)
+ return 1;
+ return 0;
+}
+
+static struct virtqueue *virtio_ccw_vq_by_ind(struct virtio_ccw_device *vcdev,
+ int index)
+{
+ struct virtio_ccw_vq_info *info;
+ unsigned long flags;
+ struct virtqueue *vq;
+
+ vq = NULL;
+ spin_lock_irqsave(&vcdev->lock, flags);
+ list_for_each_entry(info, &vcdev->virtqueues, node) {
+ if (virtqueue_get_queue_index(info->vq) == index) {
+ vq = info->vq;
+ break;
+ }
+ }
+ spin_unlock_irqrestore(&vcdev->lock, flags);
+ return vq;
+}
+
+static void virtio_ccw_int_handler(struct ccw_device *cdev,
+ unsigned long intparm,
+ struct irb *irb)
+{
+ __u32 activity = intparm & VIRTIO_CCW_INTPARM_MASK;
+ struct virtio_ccw_device *vcdev = dev_get_drvdata(&cdev->dev);
+ int i;
+ struct virtqueue *vq;
+ struct virtio_driver *drv;
+
+ /* Check if it's a notification from the host. */
+ if ((intparm == 0) &&
+ (scsw_stctl(&irb->scsw) ==
+ (SCSW_STCTL_ALERT_STATUS | SCSW_STCTL_STATUS_PEND))) {
+ /* OK */
+ }
+ if (irb_is_error(irb))
+ vcdev->err = -EIO; /* XXX - use real error */
+ if (vcdev->curr_io & activity) {
+ switch (activity) {
+ case VIRTIO_CCW_DOING_READ_FEAT:
+ case VIRTIO_CCW_DOING_WRITE_FEAT:
+ case VIRTIO_CCW_DOING_READ_CONFIG:
+ case VIRTIO_CCW_DOING_WRITE_CONFIG:
+ case VIRTIO_CCW_DOING_WRITE_STATUS:
+ case VIRTIO_CCW_DOING_SET_VQ:
+ case VIRTIO_CCW_DOING_SET_IND:
+ case VIRTIO_CCW_DOING_SET_CONF_IND:
+ case VIRTIO_CCW_DOING_RESET:
+ case VIRTIO_CCW_DOING_READ_VQ_CONF:
+ vcdev->curr_io &= ~activity;
+ wake_up(&vcdev->wait_q);
+ break;
+ default:
+ /* don't know what to do... */
+ dev_warn(&cdev->dev, "Suspicious activity '%08x'\n",
+ activity);
+ WARN_ON(1);
+ break;
+ }
+ }
+ for_each_set_bit(i, &vcdev->indicators,
+ sizeof(vcdev->indicators) * BITS_PER_BYTE) {
+ /* The bit clear must happen before the vring kick. */
+ clear_bit(i, &vcdev->indicators);
+ barrier();
+ vq = virtio_ccw_vq_by_ind(vcdev, i);
+ vring_interrupt(0, vq);
+ }
+ if (test_bit(0, &vcdev->indicators2)) {
+ drv = container_of(vcdev->vdev.dev.driver,
+ struct virtio_driver, driver);
+
+ if (drv && drv->config_changed)
+ drv->config_changed(&vcdev->vdev);
+ clear_bit(0, &vcdev->indicators2);
+ }
+}
+
+/*
+ * We usually want to autoonline all devices, but give the admin
+ * a way to exempt devices from this.
+ */
+#define __DEV_WORDS ((__MAX_SUBCHANNEL + (8*sizeof(long) - 1)) / \
+ (8*sizeof(long)))
+static unsigned long devs_no_auto[__MAX_SSID + 1][__DEV_WORDS];
+
+static char *no_auto = "";
+
+module_param(no_auto, charp, 0444);
+MODULE_PARM_DESC(no_auto, "list of ccw bus id ranges not to be auto-onlined");
+
+static int virtio_ccw_check_autoonline(struct ccw_device *cdev)
+{
+ struct ccw_dev_id id;
+
+ ccw_device_get_id(cdev, &id);
+ if (test_bit(id.devno, devs_no_auto[id.ssid]))
+ return 0;
+ return 1;
+}
+
+static void virtio_ccw_auto_online(void *data, async_cookie_t cookie)
+{
+ struct ccw_device *cdev = data;
+ int ret;
+
+ ret = ccw_device_set_online(cdev);
+ if (ret)
+ dev_warn(&cdev->dev, "Failed to set online: %d\n", ret);
+}
+
+static int virtio_ccw_probe(struct ccw_device *cdev)
+{
+ cdev->handler = virtio_ccw_int_handler;
+
+ if (virtio_ccw_check_autoonline(cdev))
+ async_schedule(virtio_ccw_auto_online, cdev);
+ return 0;
+}
+
+static void virtio_ccw_remove(struct ccw_device *cdev)
+{
+ struct virtio_ccw_device *vcdev = dev_get_drvdata(&cdev->dev);
+
+ if (cdev->online) {
+ unregister_virtio_device(&vcdev->vdev);
+ dev_set_drvdata(&cdev->dev, NULL);
+ }
+ cdev->handler = NULL;
+}
+
+static int virtio_ccw_offline(struct ccw_device *cdev)
+{
+ struct virtio_ccw_device *vcdev = dev_get_drvdata(&cdev->dev);
+
+ unregister_virtio_device(&vcdev->vdev);
+ dev_set_drvdata(&cdev->dev, NULL);
+ return 0;
+}
+
+
+static int virtio_ccw_online(struct ccw_device *cdev)
+{
+ int ret;
+ struct virtio_ccw_device *vcdev;
+
+ vcdev = kzalloc(sizeof(*vcdev), GFP_KERNEL);
+ if (!vcdev) {
+ dev_warn(&cdev->dev, "Could not get memory for virtio\n");
+ ret = -ENOMEM;
+ goto out_free;
+ }
+ vcdev->config_block = kzalloc(sizeof(*vcdev->config_block),
+ GFP_DMA | GFP_KERNEL);
+ if (!vcdev->config_block) {
+ ret = -ENOMEM;
+ goto out_free;
+ }
+ vcdev->status = kzalloc(sizeof(*vcdev->status), GFP_DMA | GFP_KERNEL);
+ if (!vcdev->status) {
+ ret = -ENOMEM;
+ goto out_free;
+ }
+
+ vcdev->vdev.dev.parent = &cdev->dev;
+ vcdev->vdev.dev.release = virtio_ccw_release_dev;
+ vcdev->vdev.config = &virtio_ccw_config_ops;
+ vcdev->cdev = cdev;
+ init_waitqueue_head(&vcdev->wait_q);
+ INIT_LIST_HEAD(&vcdev->virtqueues);
+ spin_lock_init(&vcdev->lock);
+
+ dev_set_drvdata(&cdev->dev, vcdev);
+ vcdev->vdev.id.vendor = cdev->id.cu_type;
+ vcdev->vdev.id.device = cdev->id.cu_model;
+ ret = register_virtio_device(&vcdev->vdev);
+ if (ret) {
+ dev_warn(&cdev->dev, "Failed to register virtio device: %d\n",
+ ret);
+ goto out_put;
+ }
+ return 0;
+out_put:
+ dev_set_drvdata(&cdev->dev, NULL);
+ put_device(&vcdev->vdev.dev);
+ return ret;
+out_free:
+ if (vcdev) {
+ kfree(vcdev->status);
+ kfree(vcdev->config_block);
+ }
+ kfree(vcdev);
+ return ret;
+}
+
+static int virtio_ccw_cio_notify(struct ccw_device *cdev, int event)
+{
+ /* TODO: Check whether we need special handling here. */
+ return 0;
+}
+
+static struct ccw_device_id virtio_ids[] = {
+ { CCW_DEVICE(0x3832, 0) },
+ {},
+};
+MODULE_DEVICE_TABLE(ccw, virtio_ids);
+
+static struct ccw_driver virtio_ccw_driver = {
+ .driver = {
+ .owner = THIS_MODULE,
+ .name = "virtio_ccw",
+ },
+ .ids = virtio_ids,
+ .probe = virtio_ccw_probe,
+ .remove = virtio_ccw_remove,
+ .set_offline = virtio_ccw_offline,
+ .set_online = virtio_ccw_online,
+ .notify = virtio_ccw_cio_notify,
+ .int_class = IRQIO_VIR,
+};
+
+static int __init pure_hex(char **cp, unsigned int *val, int min_digit,
+ int max_digit, int max_val)
+{
+ int diff;
+
+ diff = 0;
+ *val = 0;
+
+ while (diff <= max_digit) {
+ int value = hex_to_bin(**cp);
+
+ if (value < 0)
+ break;
+ *val = *val * 16 + value;
+ (*cp)++;
+ diff++;
+ }
+
+ if ((diff < min_digit) || (diff > max_digit) || (*val > max_val))
+ return 1;
+
+ return 0;
+}
+
+static int __init parse_busid(char *str, unsigned int *cssid,
+ unsigned int *ssid, unsigned int *devno)
+{
+ char *str_work;
+ int rc, ret;
+
+ rc = 1;
+
+ if (*str == '\0')
+ goto out;
+
+ str_work = str;
+ ret = pure_hex(&str_work, cssid, 1, 2, __MAX_CSSID);
+ if (ret || (str_work[0] != '.'))
+ goto out;
+ str_work++;
+ ret = pure_hex(&str_work, ssid, 1, 1, __MAX_SSID);
+ if (ret || (str_work[0] != '.'))
+ goto out;
+ str_work++;
+ ret = pure_hex(&str_work, devno, 4, 4, __MAX_SUBCHANNEL);
+ if (ret || (str_work[0] != '\0'))
+ goto out;
+
+ rc = 0;
+out:
+ return rc;
+}
+
+static void __init no_auto_parse(void)
+{
+ unsigned int from_cssid, to_cssid, from_ssid, to_ssid, from, to;
+ char *parm, *str;
+ int rc;
+
+ str = no_auto;
+ while ((parm = strsep(&str, ","))) {
+ rc = parse_busid(strsep(&parm, "-"), &from_cssid,
+ &from_ssid, &from);
+ if (rc)
+ continue;
+ if (parm != NULL) {
+ rc = parse_busid(parm, &to_cssid,
+ &to_ssid, &to);
+ if ((from_ssid > to_ssid) ||
+ ((from_ssid == to_ssid) && (from > to)))
+ rc = -EINVAL;
+ } else {
+ to_cssid = from_cssid;
+ to_ssid = from_ssid;
+ to = from;
+ }
+ if (rc)
+ continue;
+ while ((from_ssid < to_ssid) ||
+ ((from_ssid == to_ssid) && (from <= to))) {
+ set_bit(from, devs_no_auto[from_ssid]);
+ from++;
+ if (from > __MAX_SUBCHANNEL) {
+ from_ssid++;
+ from = 0;
+ }
+ }
+ }
+}
+
+static int __init virtio_ccw_init(void)
+{
+ /* parse no_auto string before we do anything further */
+ no_auto_parse();
+ return ccw_driver_register(&virtio_ccw_driver);
+}
+module_init(virtio_ccw_init);
+
+static void __exit virtio_ccw_exit(void)
+{
+ ccw_driver_unregister(&virtio_ccw_driver);
+}
+module_exit(virtio_ccw_exit);
diff --git a/drivers/s390/net/qeth_core.h b/drivers/s390/net/qeth_core.h
index d690b33846cc..d87961d4c0de 100644
--- a/drivers/s390/net/qeth_core.h
+++ b/drivers/s390/net/qeth_core.h
@@ -818,7 +818,7 @@ static inline struct qeth_card *CARD_FROM_CDEV(struct ccw_device *cdev)
static inline int qeth_get_micros(void)
{
- return (int) (get_clock() >> 12);
+ return (int) (get_tod_clock() >> 12);
}
static inline int qeth_get_ip_version(struct sk_buff *skb)
diff --git a/drivers/s390/scsi/zfcp_fsf.c b/drivers/s390/scsi/zfcp_fsf.c
index c96320d79fbc..c7e148f33b2a 100644
--- a/drivers/s390/scsi/zfcp_fsf.c
+++ b/drivers/s390/scsi/zfcp_fsf.c
@@ -727,7 +727,7 @@ static int zfcp_fsf_req_send(struct zfcp_fsf_req *req)
zfcp_reqlist_add(adapter->req_list, req);
req->qdio_req.qdio_outb_usage = atomic_read(&qdio->req_q_free);
- req->issued = get_clock();
+ req->issued = get_tod_clock();
if (zfcp_qdio_send(qdio, &req->qdio_req)) {
del_timer(&req->timer);
/* lookup request again, list might have changed */
diff --git a/drivers/s390/scsi/zfcp_qdio.c b/drivers/s390/scsi/zfcp_qdio.c
index 50b5615848f6..665e3cfaaf85 100644
--- a/drivers/s390/scsi/zfcp_qdio.c
+++ b/drivers/s390/scsi/zfcp_qdio.c
@@ -68,7 +68,7 @@ static inline void zfcp_qdio_account(struct zfcp_qdio *qdio)
unsigned long long now, span;
int used;
- now = get_clock_monotonic();
+ now = get_tod_clock_monotonic();
span = (now - qdio->req_q_time) >> 12;
used = QDIO_MAX_BUFFERS_PER_Q - atomic_read(&qdio->req_q_free);
qdio->req_q_util += used * span;
diff --git a/drivers/sbus/char/display7seg.c b/drivers/sbus/char/display7seg.c
index e85c803b30cd..fc1339cf91ac 100644
--- a/drivers/sbus/char/display7seg.c
+++ b/drivers/sbus/char/display7seg.c
@@ -107,7 +107,7 @@ static long d7s_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
int error = 0;
u8 ireg = 0;
- if (D7S_MINOR != iminor(file->f_path.dentry->d_inode))
+ if (D7S_MINOR != iminor(file_inode(file)))
return -ENODEV;
mutex_lock(&d7s_mutex);
diff --git a/drivers/scsi/3w-9xxx.c b/drivers/scsi/3w-9xxx.c
index d1f0120cdb98..5e1e12c0cf42 100644
--- a/drivers/scsi/3w-9xxx.c
+++ b/drivers/scsi/3w-9xxx.c
@@ -640,7 +640,7 @@ out:
/* This function handles ioctl for the character device */
static long twa_chrdev_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
{
- struct inode *inode = file->f_path.dentry->d_inode;
+ struct inode *inode = file_inode(file);
long timeout;
unsigned long *cpu_addr, data_buffer_length_adjusted = 0, flags = 0;
dma_addr_t dma_handle;
diff --git a/drivers/scsi/3w-sas.c b/drivers/scsi/3w-sas.c
index 52a2f0580d97..c845bdbeb6c0 100644
--- a/drivers/scsi/3w-sas.c
+++ b/drivers/scsi/3w-sas.c
@@ -757,7 +757,7 @@ static long twl_chrdev_ioctl(struct file *file, unsigned int cmd, unsigned long
dma_addr_t dma_handle;
int request_id = 0;
TW_Ioctl_Driver_Command driver_command;
- struct inode *inode = file->f_dentry->d_inode;
+ struct inode *inode = file_inode(file);
TW_Ioctl_Buf_Apache *tw_ioctl;
TW_Command_Full *full_command_packet;
TW_Device_Extension *tw_dev = twl_device_extension_list[iminor(inode)];
diff --git a/drivers/scsi/3w-xxxx.c b/drivers/scsi/3w-xxxx.c
index 62071d2fc1ce..56662ae03dea 100644
--- a/drivers/scsi/3w-xxxx.c
+++ b/drivers/scsi/3w-xxxx.c
@@ -889,7 +889,7 @@ static long tw_chrdev_ioctl(struct file *file, unsigned int cmd, unsigned long a
unsigned long flags;
unsigned int data_buffer_length = 0;
unsigned long data_buffer_length_adjusted = 0;
- struct inode *inode = file->f_dentry->d_inode;
+ struct inode *inode = file_inode(file);
unsigned long *cpu_addr;
long timeout;
TW_New_Ioctl *tw_ioctl;
diff --git a/drivers/scsi/Kconfig b/drivers/scsi/Kconfig
index d56b2534f3eb..db95c547c09d 100644
--- a/drivers/scsi/Kconfig
+++ b/drivers/scsi/Kconfig
@@ -883,7 +883,7 @@ config SCSI_IBMVSCSI
This is the IBM POWER Virtual SCSI Client
To compile this driver as a module, choose M here: the
- module will be called ibmvscsic.
+ module will be called ibmvscsi.
config SCSI_IBMVSCSIS
tristate "IBM Virtual SCSI Server support"
diff --git a/drivers/scsi/aacraid/aacraid.h b/drivers/scsi/aacraid/aacraid.h
index 742f5d7eb0f5..a6f7190c09a4 100644
--- a/drivers/scsi/aacraid/aacraid.h
+++ b/drivers/scsi/aacraid/aacraid.h
@@ -12,13 +12,13 @@
*----------------------------------------------------------------------------*/
#ifndef AAC_DRIVER_BUILD
-# define AAC_DRIVER_BUILD 29801
+# define AAC_DRIVER_BUILD 30000
# define AAC_DRIVER_BRANCH "-ms"
#endif
#define MAXIMUM_NUM_CONTAINERS 32
#define AAC_NUM_MGT_FIB 8
-#define AAC_NUM_IO_FIB (512 - AAC_NUM_MGT_FIB)
+#define AAC_NUM_IO_FIB (1024 - AAC_NUM_MGT_FIB)
#define AAC_NUM_FIB (AAC_NUM_IO_FIB + AAC_NUM_MGT_FIB)
#define AAC_MAX_LUN (8)
@@ -36,6 +36,10 @@
#define CONTAINER_TO_ID(cont) (cont)
#define CONTAINER_TO_LUN(cont) (0)
+#define PMC_DEVICE_S7 0x28c
+#define PMC_DEVICE_S8 0x28d
+#define PMC_DEVICE_S9 0x28f
+
#define aac_phys_to_logical(x) ((x)+1)
#define aac_logical_to_phys(x) ((x)?(x)-1:0)
diff --git a/drivers/scsi/aacraid/comminit.c b/drivers/scsi/aacraid/comminit.c
index 8e5d3be16127..3f759957f4b4 100644
--- a/drivers/scsi/aacraid/comminit.c
+++ b/drivers/scsi/aacraid/comminit.c
@@ -404,7 +404,13 @@ struct aac_dev *aac_init_adapter(struct aac_dev *dev)
dev->max_fib_size = status[1] & 0xFFE0;
host->sg_tablesize = status[2] >> 16;
dev->sg_tablesize = status[2] & 0xFFFF;
- host->can_queue = (status[3] & 0xFFFF) - AAC_NUM_MGT_FIB;
+ if (dev->pdev->device == PMC_DEVICE_S7 ||
+ dev->pdev->device == PMC_DEVICE_S8 ||
+ dev->pdev->device == PMC_DEVICE_S9)
+ host->can_queue = ((status[3] >> 16) ? (status[3] >> 16) :
+ (status[3] & 0xFFFF)) - AAC_NUM_MGT_FIB;
+ else
+ host->can_queue = (status[3] & 0xFFFF) - AAC_NUM_MGT_FIB;
dev->max_num_aif = status[4] & 0xFFFF;
/*
* NOTE:
@@ -452,6 +458,9 @@ struct aac_dev *aac_init_adapter(struct aac_dev *dev)
}
}
+ if (host->can_queue > AAC_NUM_IO_FIB)
+ host->can_queue = AAC_NUM_IO_FIB;
+
/*
* Ok now init the communication subsystem
*/
diff --git a/drivers/scsi/aacraid/src.c b/drivers/scsi/aacraid/src.c
index 3b021ec63255..e2e349204e7d 100644
--- a/drivers/scsi/aacraid/src.c
+++ b/drivers/scsi/aacraid/src.c
@@ -407,7 +407,7 @@ static int aac_src_deliver_message(struct fib *fib)
fib->hw_fib_va->header.StructType = FIB_MAGIC2;
fib->hw_fib_va->header.SenderFibAddress = (u32)address;
fib->hw_fib_va->header.u.TimeStamp = 0;
- BUG_ON((u32)(address >> 32) != 0L);
+ BUG_ON(upper_32_bits(address) != 0L);
address |= fibsize;
} else {
/* Calculate the amount to the fibsize bits */
@@ -431,7 +431,7 @@ static int aac_src_deliver_message(struct fib *fib)
address |= fibsize;
}
- src_writel(dev, MUnit.IQ_H, (address >> 32) & 0xffffffff);
+ src_writel(dev, MUnit.IQ_H, upper_32_bits(address) & 0xffffffff);
src_writel(dev, MUnit.IQ_L, address & 0xffffffff);
return 0;
diff --git a/drivers/scsi/bfa/bfad.c b/drivers/scsi/bfa/bfad.c
index e6bf12675db8..a5f7690e819e 100644
--- a/drivers/scsi/bfa/bfad.c
+++ b/drivers/scsi/bfa/bfad.c
@@ -1034,7 +1034,7 @@ bfad_start_ops(struct bfad_s *bfad) {
sizeof(driver_info.host_os_patch) - 1);
strncpy(driver_info.os_device_name, bfad->pci_name,
- sizeof(driver_info.os_device_name - 1));
+ sizeof(driver_info.os_device_name) - 1);
/* FCS driver info init */
spin_lock_irqsave(&bfad->bfad_lock, flags);
diff --git a/drivers/scsi/bfa/bfad_im.c b/drivers/scsi/bfa/bfad_im.c
index 8f92732655c7..5864f987f206 100644
--- a/drivers/scsi/bfa/bfad_im.c
+++ b/drivers/scsi/bfa/bfad_im.c
@@ -523,20 +523,13 @@ bfad_im_scsi_host_alloc(struct bfad_s *bfad, struct bfad_im_port_s *im_port,
int error = 1;
mutex_lock(&bfad_mutex);
- if (!idr_pre_get(&bfad_im_port_index, GFP_KERNEL)) {
+ error = idr_alloc(&bfad_im_port_index, im_port, 0, 0, GFP_KERNEL);
+ if (error < 0) {
mutex_unlock(&bfad_mutex);
- printk(KERN_WARNING "idr_pre_get failure\n");
+ printk(KERN_WARNING "idr_alloc failure\n");
goto out;
}
-
- error = idr_get_new(&bfad_im_port_index, im_port,
- &im_port->idr_id);
- if (error) {
- mutex_unlock(&bfad_mutex);
- printk(KERN_WARNING "idr_get_new failure\n");
- goto out;
- }
-
+ im_port->idr_id = error;
mutex_unlock(&bfad_mutex);
im_port->shost = bfad_scsi_host_alloc(im_port, bfad);
diff --git a/drivers/scsi/bnx2fc/bnx2fc.h b/drivers/scsi/bnx2fc/bnx2fc.h
index 3486845ba301..50fcd018d14b 100644
--- a/drivers/scsi/bnx2fc/bnx2fc.h
+++ b/drivers/scsi/bnx2fc/bnx2fc.h
@@ -64,7 +64,7 @@
#include "bnx2fc_constants.h"
#define BNX2FC_NAME "bnx2fc"
-#define BNX2FC_VERSION "1.0.12"
+#define BNX2FC_VERSION "1.0.13"
#define PFX "bnx2fc: "
@@ -156,6 +156,18 @@
#define BNX2FC_RELOGIN_WAIT_TIME 200
#define BNX2FC_RELOGIN_WAIT_CNT 10
+#define BNX2FC_STATS(hba, stat, cnt) \
+ do { \
+ u32 val; \
+ \
+ val = fw_stats->stat.cnt; \
+ if (hba->prev_stats.stat.cnt <= val) \
+ val -= hba->prev_stats.stat.cnt; \
+ else \
+ val += (0xfffffff - hba->prev_stats.stat.cnt); \
+ hba->bfw_stats.cnt += val; \
+ } while (0)
+
/* bnx2fc driver uses only one instance of fcoe_percpu_s */
extern struct fcoe_percpu_s bnx2fc_global;
@@ -167,6 +179,14 @@ struct bnx2fc_percpu_s {
spinlock_t fp_work_lock;
};
+struct bnx2fc_fw_stats {
+ u64 fc_crc_cnt;
+ u64 fcoe_tx_pkt_cnt;
+ u64 fcoe_rx_pkt_cnt;
+ u64 fcoe_tx_byte_cnt;
+ u64 fcoe_rx_byte_cnt;
+};
+
struct bnx2fc_hba {
struct list_head list;
struct cnic_dev *cnic;
@@ -207,6 +227,8 @@ struct bnx2fc_hba {
struct bnx2fc_rport **tgt_ofld_list;
/* statistics */
+ struct bnx2fc_fw_stats bfw_stats;
+ struct fcoe_statistics_params prev_stats;
struct fcoe_statistics_params *stats_buffer;
dma_addr_t stats_buf_dma;
struct completion stat_req_done;
@@ -280,6 +302,7 @@ struct bnx2fc_rport {
#define BNX2FC_FLAG_UPLD_REQ_COMPL 0x7
#define BNX2FC_FLAG_EXPL_LOGO 0x8
#define BNX2FC_FLAG_DISABLE_FAILED 0x9
+#define BNX2FC_FLAG_ENABLED 0xa
u8 src_addr[ETH_ALEN];
u32 max_sqes;
@@ -468,6 +491,8 @@ int bnx2fc_send_fw_fcoe_init_msg(struct bnx2fc_hba *hba);
int bnx2fc_send_fw_fcoe_destroy_msg(struct bnx2fc_hba *hba);
int bnx2fc_send_session_ofld_req(struct fcoe_port *port,
struct bnx2fc_rport *tgt);
+int bnx2fc_send_session_enable_req(struct fcoe_port *port,
+ struct bnx2fc_rport *tgt);
int bnx2fc_send_session_disable_req(struct fcoe_port *port,
struct bnx2fc_rport *tgt);
int bnx2fc_send_session_destroy_req(struct bnx2fc_hba *hba,
diff --git a/drivers/scsi/bnx2fc/bnx2fc_fcoe.c b/drivers/scsi/bnx2fc/bnx2fc_fcoe.c
index 70ecd953a579..2daf4b0da434 100644
--- a/drivers/scsi/bnx2fc/bnx2fc_fcoe.c
+++ b/drivers/scsi/bnx2fc/bnx2fc_fcoe.c
@@ -22,7 +22,7 @@ DEFINE_PER_CPU(struct bnx2fc_percpu_s, bnx2fc_percpu);
#define DRV_MODULE_NAME "bnx2fc"
#define DRV_MODULE_VERSION BNX2FC_VERSION
-#define DRV_MODULE_RELDATE "Jun 04, 2012"
+#define DRV_MODULE_RELDATE "Dec 21, 2012"
static char version[] =
@@ -62,6 +62,10 @@ static int bnx2fc_destroy(struct net_device *net_device);
static int bnx2fc_enable(struct net_device *netdev);
static int bnx2fc_disable(struct net_device *netdev);
+/* fcoe_syfs control interface handlers */
+static int bnx2fc_ctlr_alloc(struct net_device *netdev);
+static int bnx2fc_ctlr_enabled(struct fcoe_ctlr_device *cdev);
+
static void bnx2fc_recv_frame(struct sk_buff *skb);
static void bnx2fc_start_disc(struct bnx2fc_interface *interface);
@@ -89,7 +93,6 @@ static void bnx2fc_port_shutdown(struct fc_lport *lport);
static void bnx2fc_stop(struct bnx2fc_interface *interface);
static int __init bnx2fc_mod_init(void);
static void __exit bnx2fc_mod_exit(void);
-static void bnx2fc_ctlr_get_lesb(struct fcoe_ctlr_device *ctlr_dev);
unsigned int bnx2fc_debug_level;
module_param_named(debug_logging, bnx2fc_debug_level, int, S_IRUGO|S_IWUSR);
@@ -107,44 +110,6 @@ static inline struct net_device *bnx2fc_netdev(const struct fc_lport *lport)
((struct fcoe_port *)lport_priv(lport))->priv)->netdev;
}
-/**
- * bnx2fc_get_lesb() - Fill the FCoE Link Error Status Block
- * @lport: the local port
- * @fc_lesb: the link error status block
- */
-static void bnx2fc_get_lesb(struct fc_lport *lport,
- struct fc_els_lesb *fc_lesb)
-{
- struct net_device *netdev = bnx2fc_netdev(lport);
-
- __fcoe_get_lesb(lport, fc_lesb, netdev);
-}
-
-static void bnx2fc_ctlr_get_lesb(struct fcoe_ctlr_device *ctlr_dev)
-{
- struct fcoe_ctlr *fip = fcoe_ctlr_device_priv(ctlr_dev);
- struct net_device *netdev = bnx2fc_netdev(fip->lp);
- struct fcoe_fc_els_lesb *fcoe_lesb;
- struct fc_els_lesb fc_lesb;
-
- __fcoe_get_lesb(fip->lp, &fc_lesb, netdev);
- fcoe_lesb = (struct fcoe_fc_els_lesb *)(&fc_lesb);
-
- ctlr_dev->lesb.lesb_link_fail =
- ntohl(fcoe_lesb->lesb_link_fail);
- ctlr_dev->lesb.lesb_vlink_fail =
- ntohl(fcoe_lesb->lesb_vlink_fail);
- ctlr_dev->lesb.lesb_miss_fka =
- ntohl(fcoe_lesb->lesb_miss_fka);
- ctlr_dev->lesb.lesb_symb_err =
- ntohl(fcoe_lesb->lesb_symb_err);
- ctlr_dev->lesb.lesb_err_block =
- ntohl(fcoe_lesb->lesb_err_block);
- ctlr_dev->lesb.lesb_fcs_error =
- ntohl(fcoe_lesb->lesb_fcs_error);
-}
-EXPORT_SYMBOL(bnx2fc_ctlr_get_lesb);
-
static void bnx2fc_fcf_get_vlan_id(struct fcoe_fcf_device *fcf_dev)
{
struct fcoe_ctlr_device *ctlr_dev =
@@ -687,11 +652,16 @@ static struct fc_host_statistics *bnx2fc_get_host_stats(struct Scsi_Host *shost)
BNX2FC_HBA_DBG(lport, "FW stat req timed out\n");
return bnx2fc_stats;
}
- bnx2fc_stats->invalid_crc_count += fw_stats->rx_stat2.fc_crc_cnt;
- bnx2fc_stats->tx_frames += fw_stats->tx_stat.fcoe_tx_pkt_cnt;
- bnx2fc_stats->tx_words += (fw_stats->tx_stat.fcoe_tx_byte_cnt) / 4;
- bnx2fc_stats->rx_frames += fw_stats->rx_stat0.fcoe_rx_pkt_cnt;
- bnx2fc_stats->rx_words += (fw_stats->rx_stat0.fcoe_rx_byte_cnt) / 4;
+ BNX2FC_STATS(hba, rx_stat2, fc_crc_cnt);
+ bnx2fc_stats->invalid_crc_count += hba->bfw_stats.fc_crc_cnt;
+ BNX2FC_STATS(hba, tx_stat, fcoe_tx_pkt_cnt);
+ bnx2fc_stats->tx_frames += hba->bfw_stats.fcoe_tx_pkt_cnt;
+ BNX2FC_STATS(hba, tx_stat, fcoe_tx_byte_cnt);
+ bnx2fc_stats->tx_words += ((hba->bfw_stats.fcoe_tx_byte_cnt) / 4);
+ BNX2FC_STATS(hba, rx_stat0, fcoe_rx_pkt_cnt);
+ bnx2fc_stats->rx_frames += hba->bfw_stats.fcoe_rx_pkt_cnt;
+ BNX2FC_STATS(hba, rx_stat0, fcoe_rx_byte_cnt);
+ bnx2fc_stats->rx_words += ((hba->bfw_stats.fcoe_rx_byte_cnt) / 4);
bnx2fc_stats->dumped_frames = 0;
bnx2fc_stats->lip_count = 0;
@@ -700,6 +670,8 @@ static struct fc_host_statistics *bnx2fc_get_host_stats(struct Scsi_Host *shost)
bnx2fc_stats->loss_of_signal_count = 0;
bnx2fc_stats->prim_seq_protocol_err_count = 0;
+ memcpy(&hba->prev_stats, hba->stats_buffer,
+ sizeof(struct fcoe_statistics_params));
return bnx2fc_stats;
}
@@ -734,35 +706,6 @@ static int bnx2fc_shost_config(struct fc_lport *lport, struct device *dev)
return 0;
}
-static void bnx2fc_link_speed_update(struct fc_lport *lport)
-{
- struct fcoe_port *port = lport_priv(lport);
- struct bnx2fc_interface *interface = port->priv;
- struct net_device *netdev = interface->netdev;
- struct ethtool_cmd ecmd;
-
- if (!__ethtool_get_settings(netdev, &ecmd)) {
- lport->link_supported_speeds &=
- ~(FC_PORTSPEED_1GBIT | FC_PORTSPEED_10GBIT);
- if (ecmd.supported & (SUPPORTED_1000baseT_Half |
- SUPPORTED_1000baseT_Full))
- lport->link_supported_speeds |= FC_PORTSPEED_1GBIT;
- if (ecmd.supported & SUPPORTED_10000baseT_Full)
- lport->link_supported_speeds |= FC_PORTSPEED_10GBIT;
-
- switch (ethtool_cmd_speed(&ecmd)) {
- case SPEED_1000:
- lport->link_speed = FC_PORTSPEED_1GBIT;
- break;
- case SPEED_2500:
- lport->link_speed = FC_PORTSPEED_2GBIT;
- break;
- case SPEED_10000:
- lport->link_speed = FC_PORTSPEED_10GBIT;
- break;
- }
- }
-}
static int bnx2fc_link_ok(struct fc_lport *lport)
{
struct fcoe_port *port = lport_priv(lport);
@@ -820,7 +763,7 @@ static int bnx2fc_net_config(struct fc_lport *lport, struct net_device *netdev)
port->fcoe_pending_queue_active = 0;
setup_timer(&port->timer, fcoe_queue_timer, (unsigned long) lport);
- bnx2fc_link_speed_update(lport);
+ fcoe_link_speed_update(lport);
if (!lport->vport) {
if (fcoe_get_wwn(netdev, &wwnn, NETDEV_FCOE_WWNN))
@@ -864,6 +807,7 @@ static void bnx2fc_indicate_netevent(void *context, unsigned long event,
u16 vlan_id)
{
struct bnx2fc_hba *hba = (struct bnx2fc_hba *)context;
+ struct fcoe_ctlr_device *cdev;
struct fc_lport *lport;
struct fc_lport *vport;
struct bnx2fc_interface *interface, *tmp;
@@ -923,30 +867,47 @@ static void bnx2fc_indicate_netevent(void *context, unsigned long event,
BNX2FC_HBA_DBG(lport, "netevent handler - event=%s %ld\n",
interface->netdev->name, event);
- bnx2fc_link_speed_update(lport);
+ fcoe_link_speed_update(lport);
+
+ cdev = fcoe_ctlr_to_ctlr_dev(ctlr);
if (link_possible && !bnx2fc_link_ok(lport)) {
- /* Reset max recv frame size to default */
- fc_set_mfs(lport, BNX2FC_MFS);
- /*
- * ctlr link up will only be handled during
- * enable to avoid sending discovery solicitation
- * on a stale vlan
- */
- if (interface->enabled)
- fcoe_ctlr_link_up(ctlr);
+ switch (cdev->enabled) {
+ case FCOE_CTLR_DISABLED:
+ pr_info("Link up while interface is disabled.\n");
+ break;
+ case FCOE_CTLR_ENABLED:
+ case FCOE_CTLR_UNUSED:
+ /* Reset max recv frame size to default */
+ fc_set_mfs(lport, BNX2FC_MFS);
+ /*
+ * ctlr link up will only be handled during
+ * enable to avoid sending discovery
+ * solicitation on a stale vlan
+ */
+ if (interface->enabled)
+ fcoe_ctlr_link_up(ctlr);
+ };
} else if (fcoe_ctlr_link_down(ctlr)) {
- mutex_lock(&lport->lp_mutex);
- list_for_each_entry(vport, &lport->vports, list)
- fc_host_port_type(vport->host) =
- FC_PORTTYPE_UNKNOWN;
- mutex_unlock(&lport->lp_mutex);
- fc_host_port_type(lport->host) = FC_PORTTYPE_UNKNOWN;
- per_cpu_ptr(lport->stats,
- get_cpu())->LinkFailureCount++;
- put_cpu();
- fcoe_clean_pending_queue(lport);
- wait_for_upload = 1;
+ switch (cdev->enabled) {
+ case FCOE_CTLR_DISABLED:
+ pr_info("Link down while interface is disabled.\n");
+ break;
+ case FCOE_CTLR_ENABLED:
+ case FCOE_CTLR_UNUSED:
+ mutex_lock(&lport->lp_mutex);
+ list_for_each_entry(vport, &lport->vports, list)
+ fc_host_port_type(vport->host) =
+ FC_PORTTYPE_UNKNOWN;
+ mutex_unlock(&lport->lp_mutex);
+ fc_host_port_type(lport->host) =
+ FC_PORTTYPE_UNKNOWN;
+ per_cpu_ptr(lport->stats,
+ get_cpu())->LinkFailureCount++;
+ put_cpu();
+ fcoe_clean_pending_queue(lport);
+ wait_for_upload = 1;
+ };
}
}
mutex_unlock(&bnx2fc_dev_lock);
@@ -1477,6 +1438,7 @@ static struct fc_lport *bnx2fc_if_create(struct bnx2fc_interface *interface,
port = lport_priv(lport);
port->lport = lport;
port->priv = interface;
+ port->get_netdev = bnx2fc_netdev;
INIT_WORK(&port->destroy_work, bnx2fc_destroy_work);
/* Configure fcoe_port */
@@ -1996,7 +1958,9 @@ static void bnx2fc_ulp_init(struct cnic_dev *dev)
set_bit(BNX2FC_CNIC_REGISTERED, &hba->reg_with_cnic);
}
-
+/**
+ * Deperecated: Use bnx2fc_enabled()
+ */
static int bnx2fc_disable(struct net_device *netdev)
{
struct bnx2fc_interface *interface;
@@ -2022,7 +1986,9 @@ static int bnx2fc_disable(struct net_device *netdev)
return rc;
}
-
+/**
+ * Deprecated: Use bnx2fc_enabled()
+ */
static int bnx2fc_enable(struct net_device *netdev)
{
struct bnx2fc_interface *interface;
@@ -2048,17 +2014,57 @@ static int bnx2fc_enable(struct net_device *netdev)
}
/**
- * bnx2fc_create - Create bnx2fc FCoE interface
+ * bnx2fc_ctlr_enabled() - Enable or disable an FCoE Controller
+ * @cdev: The FCoE Controller that is being enabled or disabled
+ *
+ * fcoe_sysfs will ensure that the state of 'enabled' has
+ * changed, so no checking is necessary here. This routine simply
+ * calls fcoe_enable or fcoe_disable, both of which are deprecated.
+ * When those routines are removed the functionality can be merged
+ * here.
+ */
+static int bnx2fc_ctlr_enabled(struct fcoe_ctlr_device *cdev)
+{
+ struct fcoe_ctlr *ctlr = fcoe_ctlr_device_priv(cdev);
+ struct fc_lport *lport = ctlr->lp;
+ struct net_device *netdev = bnx2fc_netdev(lport);
+
+ switch (cdev->enabled) {
+ case FCOE_CTLR_ENABLED:
+ return bnx2fc_enable(netdev);
+ case FCOE_CTLR_DISABLED:
+ return bnx2fc_disable(netdev);
+ case FCOE_CTLR_UNUSED:
+ default:
+ return -ENOTSUPP;
+ };
+}
+
+enum bnx2fc_create_link_state {
+ BNX2FC_CREATE_LINK_DOWN,
+ BNX2FC_CREATE_LINK_UP,
+};
+
+/**
+ * _bnx2fc_create() - Create bnx2fc FCoE interface
+ * @netdev : The net_device object the Ethernet interface to create on
+ * @fip_mode: The FIP mode for this creation
+ * @link_state: The ctlr link state on creation
*
- * @buffer: The name of Ethernet interface to create on
- * @kp: The associated kernel param
+ * Called from either the libfcoe 'create' module parameter
+ * via fcoe_create or from fcoe_syfs's ctlr_create file.
*
- * Called from sysfs.
+ * libfcoe's 'create' module parameter is deprecated so some
+ * consolidation of code can be done when that interface is
+ * removed.
*
* Returns: 0 for success
*/
-static int bnx2fc_create(struct net_device *netdev, enum fip_state fip_mode)
+static int _bnx2fc_create(struct net_device *netdev,
+ enum fip_state fip_mode,
+ enum bnx2fc_create_link_state link_state)
{
+ struct fcoe_ctlr_device *cdev;
struct fcoe_ctlr *ctlr;
struct bnx2fc_interface *interface;
struct bnx2fc_hba *hba;
@@ -2153,7 +2159,15 @@ static int bnx2fc_create(struct net_device *netdev, enum fip_state fip_mode)
/* Make this master N_port */
ctlr->lp = lport;
- if (!bnx2fc_link_ok(lport)) {
+ cdev = fcoe_ctlr_to_ctlr_dev(ctlr);
+
+ if (link_state == BNX2FC_CREATE_LINK_UP)
+ cdev->enabled = FCOE_CTLR_ENABLED;
+ else
+ cdev->enabled = FCOE_CTLR_DISABLED;
+
+ if (link_state == BNX2FC_CREATE_LINK_UP &&
+ !bnx2fc_link_ok(lport)) {
fcoe_ctlr_link_up(ctlr);
fc_host_port_type(lport->host) = FC_PORTTYPE_NPORT;
set_bit(ADAPTER_STATE_READY, &interface->hba->adapter_state);
@@ -2161,7 +2175,10 @@ static int bnx2fc_create(struct net_device *netdev, enum fip_state fip_mode)
BNX2FC_HBA_DBG(lport, "create: START DISC\n");
bnx2fc_start_disc(interface);
- interface->enabled = true;
+
+ if (link_state == BNX2FC_CREATE_LINK_UP)
+ interface->enabled = true;
+
/*
* Release from kref_init in bnx2fc_interface_setup, on success
* lport should be holding a reference taken in bnx2fc_if_create
@@ -2187,6 +2204,37 @@ mod_err:
}
/**
+ * bnx2fc_create() - Create a bnx2fc interface
+ * @netdev : The net_device object the Ethernet interface to create on
+ * @fip_mode: The FIP mode for this creation
+ *
+ * Called from fcoe transport
+ *
+ * Returns: 0 for success
+ */
+static int bnx2fc_create(struct net_device *netdev, enum fip_state fip_mode)
+{
+ return _bnx2fc_create(netdev, fip_mode, BNX2FC_CREATE_LINK_UP);
+}
+
+/**
+ * bnx2fc_ctlr_alloc() - Allocate a bnx2fc interface from fcoe_sysfs
+ * @netdev: The net_device to be used by the allocated FCoE Controller
+ *
+ * This routine is called from fcoe_sysfs. It will start the fcoe_ctlr
+ * in a link_down state. The allows the user an opportunity to configure
+ * the FCoE Controller from sysfs before enabling the FCoE Controller.
+ *
+ * Creating in with this routine starts the FCoE Controller in Fabric
+ * mode. The user can change to VN2VN or another mode before enabling.
+ */
+static int bnx2fc_ctlr_alloc(struct net_device *netdev)
+{
+ return _bnx2fc_create(netdev, FIP_MODE_FABRIC,
+ BNX2FC_CREATE_LINK_DOWN);
+}
+
+/**
* bnx2fc_find_hba_for_cnic - maps cnic instance to bnx2fc hba instance
*
* @cnic: Pointer to cnic device instance
@@ -2311,6 +2359,7 @@ static struct fcoe_transport bnx2fc_transport = {
.name = {"bnx2fc"},
.attached = false,
.list = LIST_HEAD_INIT(bnx2fc_transport.list),
+ .alloc = bnx2fc_ctlr_alloc,
.match = bnx2fc_match,
.create = bnx2fc_create,
.destroy = bnx2fc_destroy,
@@ -2555,13 +2604,13 @@ module_init(bnx2fc_mod_init);
module_exit(bnx2fc_mod_exit);
static struct fcoe_sysfs_function_template bnx2fc_fcoe_sysfs_templ = {
- .get_fcoe_ctlr_mode = fcoe_ctlr_get_fip_mode,
- .get_fcoe_ctlr_link_fail = bnx2fc_ctlr_get_lesb,
- .get_fcoe_ctlr_vlink_fail = bnx2fc_ctlr_get_lesb,
- .get_fcoe_ctlr_miss_fka = bnx2fc_ctlr_get_lesb,
- .get_fcoe_ctlr_symb_err = bnx2fc_ctlr_get_lesb,
- .get_fcoe_ctlr_err_block = bnx2fc_ctlr_get_lesb,
- .get_fcoe_ctlr_fcs_error = bnx2fc_ctlr_get_lesb,
+ .set_fcoe_ctlr_enabled = bnx2fc_ctlr_enabled,
+ .get_fcoe_ctlr_link_fail = fcoe_ctlr_get_lesb,
+ .get_fcoe_ctlr_vlink_fail = fcoe_ctlr_get_lesb,
+ .get_fcoe_ctlr_miss_fka = fcoe_ctlr_get_lesb,
+ .get_fcoe_ctlr_symb_err = fcoe_ctlr_get_lesb,
+ .get_fcoe_ctlr_err_block = fcoe_ctlr_get_lesb,
+ .get_fcoe_ctlr_fcs_error = fcoe_ctlr_get_lesb,
.get_fcoe_fcf_selected = fcoe_fcf_get_selected,
.get_fcoe_fcf_vlan_id = bnx2fc_fcf_get_vlan_id,
@@ -2660,7 +2709,7 @@ static struct scsi_host_template bnx2fc_shost_template = {
.can_queue = BNX2FC_CAN_QUEUE,
.use_clustering = ENABLE_CLUSTERING,
.sg_tablesize = BNX2FC_MAX_BDS_PER_CMD,
- .max_sectors = 512,
+ .max_sectors = 1024,
};
static struct libfc_function_template bnx2fc_libfc_fcn_templ = {
@@ -2668,7 +2717,7 @@ static struct libfc_function_template bnx2fc_libfc_fcn_templ = {
.elsct_send = bnx2fc_elsct_send,
.fcp_abort_io = bnx2fc_abort_io,
.fcp_cleanup = bnx2fc_cleanup,
- .get_lesb = bnx2fc_get_lesb,
+ .get_lesb = fcoe_get_lesb,
.rport_event_callback = bnx2fc_rport_event_handler,
};
diff --git a/drivers/scsi/bnx2fc/bnx2fc_hwi.c b/drivers/scsi/bnx2fc/bnx2fc_hwi.c
index ef60afa94d0e..85ea98a80f40 100644
--- a/drivers/scsi/bnx2fc/bnx2fc_hwi.c
+++ b/drivers/scsi/bnx2fc/bnx2fc_hwi.c
@@ -347,7 +347,7 @@ int bnx2fc_send_session_ofld_req(struct fcoe_port *port,
* @port: port structure pointer
* @tgt: bnx2fc_rport structure pointer
*/
-static int bnx2fc_send_session_enable_req(struct fcoe_port *port,
+int bnx2fc_send_session_enable_req(struct fcoe_port *port,
struct bnx2fc_rport *tgt)
{
struct kwqe *kwqe_arr[2];
@@ -759,8 +759,6 @@ static void bnx2fc_process_unsol_compl(struct bnx2fc_rport *tgt, u16 wqe)
case FCOE_ERROR_CODE_DATA_SOFN_SEQ_ACTIVE_RESET:
BNX2FC_TGT_DBG(tgt, "REC TOV popped for xid - 0x%x\n",
xid);
- memset(&io_req->err_entry, 0,
- sizeof(struct fcoe_err_report_entry));
memcpy(&io_req->err_entry, err_entry,
sizeof(struct fcoe_err_report_entry));
if (!test_bit(BNX2FC_FLAG_SRR_SENT,
@@ -847,8 +845,6 @@ ret_err_rqe:
goto ret_warn_rqe;
}
- memset(&io_req->err_entry, 0,
- sizeof(struct fcoe_err_report_entry));
memcpy(&io_req->err_entry, err_entry,
sizeof(struct fcoe_err_report_entry));
@@ -1124,7 +1120,6 @@ static void bnx2fc_process_ofld_cmpl(struct bnx2fc_hba *hba,
struct bnx2fc_interface *interface;
u32 conn_id;
u32 context_id;
- int rc;
conn_id = ofld_kcqe->fcoe_conn_id;
context_id = ofld_kcqe->fcoe_conn_context_id;
@@ -1153,17 +1148,10 @@ static void bnx2fc_process_ofld_cmpl(struct bnx2fc_hba *hba,
"resources\n");
set_bit(BNX2FC_FLAG_CTX_ALLOC_FAILURE, &tgt->flags);
}
- goto ofld_cmpl_err;
} else {
-
- /* now enable the session */
- rc = bnx2fc_send_session_enable_req(port, tgt);
- if (rc) {
- printk(KERN_ERR PFX "enable session failed\n");
- goto ofld_cmpl_err;
- }
+ /* FW offload request successfully completed */
+ set_bit(BNX2FC_FLAG_OFFLOADED, &tgt->flags);
}
- return;
ofld_cmpl_err:
set_bit(BNX2FC_FLAG_OFLD_REQ_CMPL, &tgt->flags);
wake_up_interruptible(&tgt->ofld_wait);
@@ -1210,15 +1198,9 @@ static void bnx2fc_process_enable_conn_cmpl(struct bnx2fc_hba *hba,
printk(KERN_ERR PFX "bnx2fc-enbl_cmpl: HBA mis-match\n");
goto enbl_cmpl_err;
}
- if (ofld_kcqe->completion_status)
- goto enbl_cmpl_err;
- else {
+ if (!ofld_kcqe->completion_status)
/* enable successful - rport ready for issuing IOs */
- set_bit(BNX2FC_FLAG_OFFLOADED, &tgt->flags);
- set_bit(BNX2FC_FLAG_OFLD_REQ_CMPL, &tgt->flags);
- wake_up_interruptible(&tgt->ofld_wait);
- }
- return;
+ set_bit(BNX2FC_FLAG_ENABLED, &tgt->flags);
enbl_cmpl_err:
set_bit(BNX2FC_FLAG_OFLD_REQ_CMPL, &tgt->flags);
@@ -1251,6 +1233,7 @@ static void bnx2fc_process_conn_disable_cmpl(struct bnx2fc_hba *hba,
/* disable successful */
BNX2FC_TGT_DBG(tgt, "disable successful\n");
clear_bit(BNX2FC_FLAG_OFFLOADED, &tgt->flags);
+ clear_bit(BNX2FC_FLAG_ENABLED, &tgt->flags);
set_bit(BNX2FC_FLAG_DISABLED, &tgt->flags);
set_bit(BNX2FC_FLAG_UPLD_REQ_COMPL, &tgt->flags);
wake_up_interruptible(&tgt->upld_wait);
diff --git a/drivers/scsi/bnx2fc/bnx2fc_io.c b/drivers/scsi/bnx2fc/bnx2fc_io.c
index 8d4626c07a12..60798e829de6 100644
--- a/drivers/scsi/bnx2fc/bnx2fc_io.c
+++ b/drivers/scsi/bnx2fc/bnx2fc_io.c
@@ -654,7 +654,7 @@ int bnx2fc_init_mp_req(struct bnx2fc_cmd *io_req)
mp_req->mp_resp_bd = dma_alloc_coherent(&hba->pcidev->dev, sz,
&mp_req->mp_resp_bd_dma,
GFP_ATOMIC);
- if (!mp_req->mp_req_bd) {
+ if (!mp_req->mp_resp_bd) {
printk(KERN_ERR PFX "unable to alloc MP resp bd\n");
bnx2fc_free_mp_resc(io_req);
return FAILED;
@@ -685,8 +685,8 @@ int bnx2fc_init_mp_req(struct bnx2fc_cmd *io_req)
static int bnx2fc_initiate_tmf(struct scsi_cmnd *sc_cmd, u8 tm_flags)
{
struct fc_lport *lport;
- struct fc_rport *rport = starget_to_rport(scsi_target(sc_cmd->device));
- struct fc_rport_libfc_priv *rp = rport->dd_data;
+ struct fc_rport *rport;
+ struct fc_rport_libfc_priv *rp;
struct fcoe_port *port;
struct bnx2fc_interface *interface;
struct bnx2fc_rport *tgt;
@@ -704,6 +704,7 @@ static int bnx2fc_initiate_tmf(struct scsi_cmnd *sc_cmd, u8 tm_flags)
unsigned long start = jiffies;
lport = shost_priv(host);
+ rport = starget_to_rport(scsi_target(sc_cmd->device));
port = lport_priv(lport);
interface = port->priv;
@@ -712,6 +713,7 @@ static int bnx2fc_initiate_tmf(struct scsi_cmnd *sc_cmd, u8 tm_flags)
rc = FAILED;
goto tmf_err;
}
+ rp = rport->dd_data;
rc = fc_block_scsi_eh(sc_cmd);
if (rc)
diff --git a/drivers/scsi/bnx2fc/bnx2fc_tgt.c b/drivers/scsi/bnx2fc/bnx2fc_tgt.c
index b9d0d9cb17f9..c57a3bb8a9fb 100644
--- a/drivers/scsi/bnx2fc/bnx2fc_tgt.c
+++ b/drivers/scsi/bnx2fc/bnx2fc_tgt.c
@@ -33,6 +33,7 @@ static void bnx2fc_upld_timer(unsigned long data)
BNX2FC_TGT_DBG(tgt, "upld_timer - Upload compl not received!!\n");
/* fake upload completion */
clear_bit(BNX2FC_FLAG_OFFLOADED, &tgt->flags);
+ clear_bit(BNX2FC_FLAG_ENABLED, &tgt->flags);
set_bit(BNX2FC_FLAG_UPLD_REQ_COMPL, &tgt->flags);
wake_up_interruptible(&tgt->upld_wait);
}
@@ -55,10 +56,25 @@ static void bnx2fc_ofld_timer(unsigned long data)
* resources are freed up in bnx2fc_offload_session
*/
clear_bit(BNX2FC_FLAG_OFFLOADED, &tgt->flags);
+ clear_bit(BNX2FC_FLAG_ENABLED, &tgt->flags);
set_bit(BNX2FC_FLAG_OFLD_REQ_CMPL, &tgt->flags);
wake_up_interruptible(&tgt->ofld_wait);
}
+static void bnx2fc_ofld_wait(struct bnx2fc_rport *tgt)
+{
+ setup_timer(&tgt->ofld_timer, bnx2fc_ofld_timer, (unsigned long)tgt);
+ mod_timer(&tgt->ofld_timer, jiffies + BNX2FC_FW_TIMEOUT);
+
+ wait_event_interruptible(tgt->ofld_wait,
+ (test_bit(
+ BNX2FC_FLAG_OFLD_REQ_CMPL,
+ &tgt->flags)));
+ if (signal_pending(current))
+ flush_signals(current);
+ del_timer_sync(&tgt->ofld_timer);
+}
+
static void bnx2fc_offload_session(struct fcoe_port *port,
struct bnx2fc_rport *tgt,
struct fc_rport_priv *rdata)
@@ -103,17 +119,7 @@ retry_ofld:
* wait for the session is offloaded and enabled. 3 Secs
* should be ample time for this process to complete.
*/
- setup_timer(&tgt->ofld_timer, bnx2fc_ofld_timer, (unsigned long)tgt);
- mod_timer(&tgt->ofld_timer, jiffies + BNX2FC_FW_TIMEOUT);
-
- wait_event_interruptible(tgt->ofld_wait,
- (test_bit(
- BNX2FC_FLAG_OFLD_REQ_CMPL,
- &tgt->flags)));
- if (signal_pending(current))
- flush_signals(current);
-
- del_timer_sync(&tgt->ofld_timer);
+ bnx2fc_ofld_wait(tgt);
if (!(test_bit(BNX2FC_FLAG_OFFLOADED, &tgt->flags))) {
if (test_and_clear_bit(BNX2FC_FLAG_CTX_ALLOC_FAILURE,
@@ -131,14 +137,23 @@ retry_ofld:
}
if (bnx2fc_map_doorbell(tgt)) {
printk(KERN_ERR PFX "map doorbell failed - no mem\n");
- /* upload will take care of cleaning up sess resc */
- lport->tt.rport_logoff(rdata);
+ goto ofld_err;
}
+ clear_bit(BNX2FC_FLAG_OFLD_REQ_CMPL, &tgt->flags);
+ rval = bnx2fc_send_session_enable_req(port, tgt);
+ if (rval) {
+ pr_err(PFX "enable session failed\n");
+ goto ofld_err;
+ }
+ bnx2fc_ofld_wait(tgt);
+ if (!(test_bit(BNX2FC_FLAG_ENABLED, &tgt->flags)))
+ goto ofld_err;
return;
ofld_err:
/* couldn't offload the session. log off from this rport */
BNX2FC_TGT_DBG(tgt, "bnx2fc_offload_session - offload error\n");
+ clear_bit(BNX2FC_FLAG_OFFLOADED, &tgt->flags);
/* Free session resources */
bnx2fc_free_session_resc(hba, tgt);
tgt_init_err:
@@ -259,6 +274,19 @@ void bnx2fc_flush_active_ios(struct bnx2fc_rport *tgt)
spin_unlock_bh(&tgt->tgt_lock);
}
+static void bnx2fc_upld_wait(struct bnx2fc_rport *tgt)
+{
+ setup_timer(&tgt->upld_timer, bnx2fc_upld_timer, (unsigned long)tgt);
+ mod_timer(&tgt->upld_timer, jiffies + BNX2FC_FW_TIMEOUT);
+ wait_event_interruptible(tgt->upld_wait,
+ (test_bit(
+ BNX2FC_FLAG_UPLD_REQ_COMPL,
+ &tgt->flags)));
+ if (signal_pending(current))
+ flush_signals(current);
+ del_timer_sync(&tgt->upld_timer);
+}
+
static void bnx2fc_upload_session(struct fcoe_port *port,
struct bnx2fc_rport *tgt)
{
@@ -279,19 +307,8 @@ static void bnx2fc_upload_session(struct fcoe_port *port,
* wait for upload to complete. 3 Secs
* should be sufficient time for this process to complete.
*/
- setup_timer(&tgt->upld_timer, bnx2fc_upld_timer, (unsigned long)tgt);
- mod_timer(&tgt->upld_timer, jiffies + BNX2FC_FW_TIMEOUT);
-
BNX2FC_TGT_DBG(tgt, "waiting for disable compl\n");
- wait_event_interruptible(tgt->upld_wait,
- (test_bit(
- BNX2FC_FLAG_UPLD_REQ_COMPL,
- &tgt->flags)));
-
- if (signal_pending(current))
- flush_signals(current);
-
- del_timer_sync(&tgt->upld_timer);
+ bnx2fc_upld_wait(tgt);
/*
* traverse thru the active_q and tmf_q and cleanup
@@ -308,24 +325,13 @@ static void bnx2fc_upload_session(struct fcoe_port *port,
bnx2fc_send_session_destroy_req(hba, tgt);
/* wait for destroy to complete */
- setup_timer(&tgt->upld_timer,
- bnx2fc_upld_timer, (unsigned long)tgt);
- mod_timer(&tgt->upld_timer, jiffies + BNX2FC_FW_TIMEOUT);
-
- wait_event_interruptible(tgt->upld_wait,
- (test_bit(
- BNX2FC_FLAG_UPLD_REQ_COMPL,
- &tgt->flags)));
+ bnx2fc_upld_wait(tgt);
if (!(test_bit(BNX2FC_FLAG_DESTROYED, &tgt->flags)))
printk(KERN_ERR PFX "ERROR!! destroy timed out\n");
BNX2FC_TGT_DBG(tgt, "destroy wait complete flags = 0x%lx\n",
tgt->flags);
- if (signal_pending(current))
- flush_signals(current);
-
- del_timer_sync(&tgt->upld_timer);
} else if (test_bit(BNX2FC_FLAG_DISABLE_FAILED, &tgt->flags)) {
printk(KERN_ERR PFX "ERROR!! DISABLE req failed, destroy"
@@ -381,7 +387,9 @@ static int bnx2fc_init_tgt(struct bnx2fc_rport *tgt,
tgt->rq_cons_idx = 0;
atomic_set(&tgt->num_active_ios, 0);
- if (rdata->flags & FC_RP_FLAGS_RETRY) {
+ if (rdata->flags & FC_RP_FLAGS_RETRY &&
+ rdata->ids.roles & FC_RPORT_ROLE_FCP_TARGET &&
+ !(rdata->ids.roles & FC_RPORT_ROLE_FCP_INITIATOR)) {
tgt->dev_type = TYPE_TAPE;
tgt->io_timeout = 0; /* use default ULP timeout */
} else {
@@ -479,7 +487,7 @@ void bnx2fc_rport_event_handler(struct fc_lport *lport,
tgt = (struct bnx2fc_rport *)&rp[1];
/* This can happen when ADISC finds the same target */
- if (test_bit(BNX2FC_FLAG_OFFLOADED, &tgt->flags)) {
+ if (test_bit(BNX2FC_FLAG_ENABLED, &tgt->flags)) {
BNX2FC_TGT_DBG(tgt, "already offloaded\n");
mutex_unlock(&hba->hba_mutex);
return;
@@ -494,11 +502,8 @@ void bnx2fc_rport_event_handler(struct fc_lport *lport,
BNX2FC_TGT_DBG(tgt, "OFFLOAD num_ofld_sess = %d\n",
hba->num_ofld_sess);
- if (test_bit(BNX2FC_FLAG_OFFLOADED, &tgt->flags)) {
- /*
- * Session is offloaded and enabled. Map
- * doorbell register for this target
- */
+ if (test_bit(BNX2FC_FLAG_ENABLED, &tgt->flags)) {
+ /* Session is offloaded and enabled. */
BNX2FC_TGT_DBG(tgt, "sess offloaded\n");
/* This counter is protected with hba mutex */
hba->num_ofld_sess++;
@@ -535,7 +540,7 @@ void bnx2fc_rport_event_handler(struct fc_lport *lport,
*/
tgt = (struct bnx2fc_rport *)&rp[1];
- if (!(test_bit(BNX2FC_FLAG_OFFLOADED, &tgt->flags))) {
+ if (!(test_bit(BNX2FC_FLAG_ENABLED, &tgt->flags))) {
mutex_unlock(&hba->hba_mutex);
break;
}
diff --git a/drivers/scsi/bnx2i/bnx2i_hwi.c b/drivers/scsi/bnx2i/bnx2i_hwi.c
index 91eec60252ee..a28b03e5a5f6 100644
--- a/drivers/scsi/bnx2i/bnx2i_hwi.c
+++ b/drivers/scsi/bnx2i/bnx2i_hwi.c
@@ -1317,7 +1317,7 @@ int bnx2i_send_fw_iscsi_init_msg(struct bnx2i_hba *hba)
(1ULL << ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_LUN));
if (error_mask1) {
iscsi_init2.error_bit_map[0] = error_mask1;
- mask64 &= (u32)(~mask64);
+ mask64 ^= (u32)(mask64);
mask64 |= error_mask1;
} else
iscsi_init2.error_bit_map[0] = (u32) mask64;
diff --git a/drivers/scsi/ch.c b/drivers/scsi/ch.c
index a15474eef5f7..2a323742ce04 100644
--- a/drivers/scsi/ch.c
+++ b/drivers/scsi/ch.c
@@ -895,7 +895,7 @@ static int ch_probe(struct device *dev)
{
struct scsi_device *sd = to_scsi_device(dev);
struct device *class_dev;
- int minor, ret = -ENOMEM;
+ int ret;
scsi_changer *ch;
if (sd->type != TYPE_MEDIUM_CHANGER)
@@ -905,22 +905,19 @@ static int ch_probe(struct device *dev)
if (NULL == ch)
return -ENOMEM;
- if (!idr_pre_get(&ch_index_idr, GFP_KERNEL))
- goto free_ch;
-
+ idr_preload(GFP_KERNEL);
spin_lock(&ch_index_lock);
- ret = idr_get_new(&ch_index_idr, ch, &minor);
+ ret = idr_alloc(&ch_index_idr, ch, 0, CH_MAX_DEVS + 1, GFP_NOWAIT);
spin_unlock(&ch_index_lock);
+ idr_preload_end();
- if (ret)
+ if (ret < 0) {
+ if (ret == -ENOSPC)
+ ret = -ENODEV;
goto free_ch;
-
- if (minor > CH_MAX_DEVS) {
- ret = -ENODEV;
- goto remove_idr;
}
- ch->minor = minor;
+ ch->minor = ret;
sprintf(ch->name,"ch%d",ch->minor);
class_dev = device_create(ch_sysfs_class, dev,
@@ -944,7 +941,7 @@ static int ch_probe(struct device *dev)
return 0;
remove_idr:
- idr_remove(&ch_index_idr, minor);
+ idr_remove(&ch_index_idr, ch->minor);
free_ch:
kfree(ch);
return ret;
diff --git a/drivers/scsi/csiostor/csio_hw.c b/drivers/scsi/csiostor/csio_hw.c
index 8ecdb94a59f4..bdd78fb4fc70 100644
--- a/drivers/scsi/csiostor/csio_hw.c
+++ b/drivers/scsi/csiostor/csio_hw.c
@@ -2131,13 +2131,16 @@ csio_hw_flash_config(struct csio_hw *hw, u32 *fw_cfg_param, char *path)
value_to_add = 4 - (cf->size % 4);
cfg_data = kzalloc(cf->size+value_to_add, GFP_KERNEL);
- if (cfg_data == NULL)
- return -ENOMEM;
+ if (cfg_data == NULL) {
+ ret = -ENOMEM;
+ goto leave;
+ }
memcpy((void *)cfg_data, (const void *)cf->data, cf->size);
-
- if (csio_hw_check_fwconfig(hw, fw_cfg_param) != 0)
- return -EINVAL;
+ if (csio_hw_check_fwconfig(hw, fw_cfg_param) != 0) {
+ ret = -EINVAL;
+ goto leave;
+ }
mtype = FW_PARAMS_PARAM_Y_GET(*fw_cfg_param);
maddr = FW_PARAMS_PARAM_Z_GET(*fw_cfg_param) << 16;
@@ -2149,9 +2152,9 @@ csio_hw_flash_config(struct csio_hw *hw, u32 *fw_cfg_param, char *path)
strncpy(path, "/lib/firmware/" CSIO_CF_FNAME, 64);
}
+leave:
kfree(cfg_data);
release_firmware(cf);
-
return ret;
}
diff --git a/drivers/scsi/csiostor/csio_init.c b/drivers/scsi/csiostor/csio_init.c
index b42cbbd3d92d..0604b5ff3638 100644
--- a/drivers/scsi/csiostor/csio_init.c
+++ b/drivers/scsi/csiostor/csio_init.c
@@ -60,18 +60,11 @@ static struct scsi_transport_template *csio_fcoe_transport_vport;
/*
* debugfs support
*/
-static int
-csio_mem_open(struct inode *inode, struct file *file)
-{
- file->private_data = inode->i_private;
- return 0;
-}
-
static ssize_t
csio_mem_read(struct file *file, char __user *buf, size_t count, loff_t *ppos)
{
loff_t pos = *ppos;
- loff_t avail = file->f_path.dentry->d_inode->i_size;
+ loff_t avail = file_inode(file)->i_size;
unsigned int mem = (uintptr_t)file->private_data & 3;
struct csio_hw *hw = file->private_data - mem;
@@ -110,7 +103,7 @@ csio_mem_read(struct file *file, char __user *buf, size_t count, loff_t *ppos)
static const struct file_operations csio_mem_debugfs_fops = {
.owner = THIS_MODULE,
- .open = csio_mem_open,
+ .open = simple_open,
.read = csio_mem_read,
.llseek = default_llseek,
};
diff --git a/drivers/scsi/cxgbi/cxgb4i/cxgb4i.c b/drivers/scsi/cxgbi/cxgb4i/cxgb4i.c
index f924b3c3720e..3fecf35ba292 100644
--- a/drivers/scsi/cxgbi/cxgb4i/cxgb4i.c
+++ b/drivers/scsi/cxgbi/cxgb4i/cxgb4i.c
@@ -1564,6 +1564,7 @@ static int t4_uld_state_change(void *handle, enum cxgb4_state state)
break;
case CXGB4_STATE_DETACH:
pr_info("cdev 0x%p, DETACH.\n", cdev);
+ cxgbi_device_unregister(cdev);
break;
default:
pr_info("cdev 0x%p, unknown state %d.\n", cdev, state);
diff --git a/drivers/scsi/dc395x.c b/drivers/scsi/dc395x.c
index 865c64fa923c..fed486bfd3f4 100644
--- a/drivers/scsi/dc395x.c
+++ b/drivers/scsi/dc395x.c
@@ -3747,13 +3747,13 @@ static struct DeviceCtlBlk *device_alloc(struct AdapterCtlBlk *acb,
dcb->max_command = 1;
dcb->target_id = target;
dcb->target_lun = lun;
+ dcb->dev_mode = eeprom->target[target].cfg0;
#ifndef DC395x_NO_DISCONNECT
dcb->identify_msg =
IDENTIFY(dcb->dev_mode & NTC_DO_DISCONNECT, lun);
#else
dcb->identify_msg = IDENTIFY(0, lun);
#endif
- dcb->dev_mode = eeprom->target[target].cfg0;
dcb->inquiry7 = 0;
dcb->sync_mode = 0;
dcb->min_nego_period = clock_period[period_index];
diff --git a/drivers/scsi/dpt_i2o.c b/drivers/scsi/dpt_i2o.c
index b4f6c9a84e71..b6e2700ec1c6 100644
--- a/drivers/scsi/dpt_i2o.c
+++ b/drivers/scsi/dpt_i2o.c
@@ -2161,7 +2161,7 @@ static long adpt_unlocked_ioctl(struct file *file, uint cmd, ulong arg)
struct inode *inode;
long ret;
- inode = file->f_dentry->d_inode;
+ inode = file_inode(file);
mutex_lock(&adpt_mutex);
ret = adpt_ioctl(inode, file, cmd, arg);
@@ -2177,7 +2177,7 @@ static long compat_adpt_ioctl(struct file *file,
struct inode *inode;
long ret;
- inode = file->f_dentry->d_inode;
+ inode = file_inode(file);
mutex_lock(&adpt_mutex);
diff --git a/drivers/scsi/fcoe/fcoe.c b/drivers/scsi/fcoe/fcoe.c
index 666b7ac4475f..b5d92fc93c70 100644
--- a/drivers/scsi/fcoe/fcoe.c
+++ b/drivers/scsi/fcoe/fcoe.c
@@ -82,11 +82,11 @@ static int fcoe_rcv(struct sk_buff *, struct net_device *,
struct packet_type *, struct net_device *);
static int fcoe_percpu_receive_thread(void *);
static void fcoe_percpu_clean(struct fc_lport *);
-static int fcoe_link_speed_update(struct fc_lport *);
static int fcoe_link_ok(struct fc_lport *);
static struct fc_lport *fcoe_hostlist_lookup(const struct net_device *);
static int fcoe_hostlist_add(const struct fc_lport *);
+static void fcoe_hostlist_del(const struct fc_lport *);
static int fcoe_device_notification(struct notifier_block *, ulong, void *);
static void fcoe_dev_setup(void);
@@ -117,6 +117,11 @@ static int fcoe_destroy(struct net_device *netdev);
static int fcoe_enable(struct net_device *netdev);
static int fcoe_disable(struct net_device *netdev);
+/* fcoe_syfs control interface handlers */
+static int fcoe_ctlr_alloc(struct net_device *netdev);
+static int fcoe_ctlr_enabled(struct fcoe_ctlr_device *cdev);
+
+
static struct fc_seq *fcoe_elsct_send(struct fc_lport *,
u32 did, struct fc_frame *,
unsigned int op,
@@ -126,8 +131,6 @@ static struct fc_seq *fcoe_elsct_send(struct fc_lport *,
void *, u32 timeout);
static void fcoe_recv_frame(struct sk_buff *skb);
-static void fcoe_get_lesb(struct fc_lport *, struct fc_els_lesb *);
-
/* notification function for packets from net device */
static struct notifier_block fcoe_notifier = {
.notifier_call = fcoe_device_notification,
@@ -151,11 +154,11 @@ static int fcoe_vport_create(struct fc_vport *, bool disabled);
static int fcoe_vport_disable(struct fc_vport *, bool disable);
static void fcoe_set_vport_symbolic_name(struct fc_vport *);
static void fcoe_set_port_id(struct fc_lport *, u32, struct fc_frame *);
-static void fcoe_ctlr_get_lesb(struct fcoe_ctlr_device *);
static void fcoe_fcf_get_vlan_id(struct fcoe_fcf_device *);
static struct fcoe_sysfs_function_template fcoe_sysfs_templ = {
- .get_fcoe_ctlr_mode = fcoe_ctlr_get_fip_mode,
+ .set_fcoe_ctlr_mode = fcoe_ctlr_set_fip_mode,
+ .set_fcoe_ctlr_enabled = fcoe_ctlr_enabled,
.get_fcoe_ctlr_link_fail = fcoe_ctlr_get_lesb,
.get_fcoe_ctlr_vlink_fail = fcoe_ctlr_get_lesb,
.get_fcoe_ctlr_miss_fka = fcoe_ctlr_get_lesb,
@@ -1112,10 +1115,17 @@ static struct fc_lport *fcoe_if_create(struct fcoe_interface *fcoe,
port = lport_priv(lport);
port->lport = lport;
port->priv = fcoe;
+ port->get_netdev = fcoe_netdev;
port->max_queue_depth = FCOE_MAX_QUEUE_DEPTH;
port->min_queue_depth = FCOE_MIN_QUEUE_DEPTH;
INIT_WORK(&port->destroy_work, fcoe_destroy_work);
+ /*
+ * Need to add the lport to the hostlist
+ * so we catch NETDEV_CHANGE events.
+ */
+ fcoe_hostlist_add(lport);
+
/* configure a fc_lport including the exchange manager */
rc = fcoe_lport_config(lport);
if (rc) {
@@ -1187,6 +1197,7 @@ static struct fc_lport *fcoe_if_create(struct fcoe_interface *fcoe,
out_lp_destroy:
fc_exch_mgr_free(lport);
out_host_put:
+ fcoe_hostlist_del(lport);
scsi_host_put(lport->host);
out:
return ERR_PTR(rc);
@@ -1964,6 +1975,7 @@ static int fcoe_dcb_app_notification(struct notifier_block *notifier,
static int fcoe_device_notification(struct notifier_block *notifier,
ulong event, void *ptr)
{
+ struct fcoe_ctlr_device *cdev;
struct fc_lport *lport = NULL;
struct net_device *netdev = ptr;
struct fcoe_ctlr *ctlr;
@@ -2020,13 +2032,29 @@ static int fcoe_device_notification(struct notifier_block *notifier,
fcoe_link_speed_update(lport);
- if (link_possible && !fcoe_link_ok(lport))
- fcoe_ctlr_link_up(ctlr);
- else if (fcoe_ctlr_link_down(ctlr)) {
- stats = per_cpu_ptr(lport->stats, get_cpu());
- stats->LinkFailureCount++;
- put_cpu();
- fcoe_clean_pending_queue(lport);
+ cdev = fcoe_ctlr_to_ctlr_dev(ctlr);
+
+ if (link_possible && !fcoe_link_ok(lport)) {
+ switch (cdev->enabled) {
+ case FCOE_CTLR_DISABLED:
+ pr_info("Link up while interface is disabled.\n");
+ break;
+ case FCOE_CTLR_ENABLED:
+ case FCOE_CTLR_UNUSED:
+ fcoe_ctlr_link_up(ctlr);
+ };
+ } else if (fcoe_ctlr_link_down(ctlr)) {
+ switch (cdev->enabled) {
+ case FCOE_CTLR_DISABLED:
+ pr_info("Link down while interface is disabled.\n");
+ break;
+ case FCOE_CTLR_ENABLED:
+ case FCOE_CTLR_UNUSED:
+ stats = per_cpu_ptr(lport->stats, get_cpu());
+ stats->LinkFailureCount++;
+ put_cpu();
+ fcoe_clean_pending_queue(lport);
+ };
}
out:
return rc;
@@ -2039,6 +2067,8 @@ out:
* Called from fcoe transport.
*
* Returns: 0 for success
+ *
+ * Deprecated: use fcoe_ctlr_enabled()
*/
static int fcoe_disable(struct net_device *netdev)
{
@@ -2098,6 +2128,33 @@ out:
}
/**
+ * fcoe_ctlr_enabled() - Enable or disable an FCoE Controller
+ * @cdev: The FCoE Controller that is being enabled or disabled
+ *
+ * fcoe_sysfs will ensure that the state of 'enabled' has
+ * changed, so no checking is necessary here. This routine simply
+ * calls fcoe_enable or fcoe_disable, both of which are deprecated.
+ * When those routines are removed the functionality can be merged
+ * here.
+ */
+static int fcoe_ctlr_enabled(struct fcoe_ctlr_device *cdev)
+{
+ struct fcoe_ctlr *ctlr = fcoe_ctlr_device_priv(cdev);
+ struct fc_lport *lport = ctlr->lp;
+ struct net_device *netdev = fcoe_netdev(lport);
+
+ switch (cdev->enabled) {
+ case FCOE_CTLR_ENABLED:
+ return fcoe_enable(netdev);
+ case FCOE_CTLR_DISABLED:
+ return fcoe_disable(netdev);
+ case FCOE_CTLR_UNUSED:
+ default:
+ return -ENOTSUPP;
+ };
+}
+
+/**
* fcoe_destroy() - Destroy a FCoE interface
* @netdev : The net_device object the Ethernet interface to create on
*
@@ -2139,8 +2196,31 @@ static void fcoe_destroy_work(struct work_struct *work)
{
struct fcoe_port *port;
struct fcoe_interface *fcoe;
+ struct Scsi_Host *shost;
+ struct fc_host_attrs *fc_host;
+ unsigned long flags;
+ struct fc_vport *vport;
+ struct fc_vport *next_vport;
port = container_of(work, struct fcoe_port, destroy_work);
+ shost = port->lport->host;
+ fc_host = shost_to_fc_host(shost);
+
+ /* Loop through all the vports and mark them for deletion */
+ spin_lock_irqsave(shost->host_lock, flags);
+ list_for_each_entry_safe(vport, next_vport, &fc_host->vports, peers) {
+ if (vport->flags & (FC_VPORT_DEL | FC_VPORT_CREATING)) {
+ continue;
+ } else {
+ vport->flags |= FC_VPORT_DELETING;
+ queue_work(fc_host_work_q(shost),
+ &vport->vport_delete_work);
+ }
+ }
+ spin_unlock_irqrestore(shost->host_lock, flags);
+
+ flush_workqueue(fc_host_work_q(shost));
+
mutex_lock(&fcoe_config_mutex);
fcoe = port->priv;
@@ -2204,16 +2284,26 @@ static void fcoe_dcb_create(struct fcoe_interface *fcoe)
#endif
}
+enum fcoe_create_link_state {
+ FCOE_CREATE_LINK_DOWN,
+ FCOE_CREATE_LINK_UP,
+};
+
/**
- * fcoe_create() - Create a fcoe interface
- * @netdev : The net_device object the Ethernet interface to create on
- * @fip_mode: The FIP mode for this creation
+ * _fcoe_create() - (internal) Create a fcoe interface
+ * @netdev : The net_device object the Ethernet interface to create on
+ * @fip_mode: The FIP mode for this creation
+ * @link_state: The ctlr link state on creation
*
- * Called from fcoe transport
+ * Called from either the libfcoe 'create' module parameter
+ * via fcoe_create or from fcoe_syfs's ctlr_create file.
*
- * Returns: 0 for success
+ * libfcoe's 'create' module parameter is deprecated so some
+ * consolidation of code can be done when that interface is
+ * removed.
*/
-static int fcoe_create(struct net_device *netdev, enum fip_state fip_mode)
+static int _fcoe_create(struct net_device *netdev, enum fip_state fip_mode,
+ enum fcoe_create_link_state link_state)
{
int rc = 0;
struct fcoe_ctlr_device *ctlr_dev;
@@ -2254,13 +2344,29 @@ static int fcoe_create(struct net_device *netdev, enum fip_state fip_mode)
/* setup DCB priority attributes. */
fcoe_dcb_create(fcoe);
- /* add to lports list */
- fcoe_hostlist_add(lport);
-
/* start FIP Discovery and FLOGI */
lport->boot_time = jiffies;
fc_fabric_login(lport);
- if (!fcoe_link_ok(lport)) {
+
+ /*
+ * If the fcoe_ctlr_device is to be set to DISABLED
+ * it must be done after the lport is added to the
+ * hostlist, but before the rtnl_lock is released.
+ * This is because the rtnl_lock protects the
+ * hostlist that fcoe_device_notification uses. If
+ * the FCoE Controller is intended to be created
+ * DISABLED then 'enabled' needs to be considered
+ * handling link events. 'enabled' must be set
+ * before the lport can be found in the hostlist
+ * when a link up event is received.
+ */
+ if (link_state == FCOE_CREATE_LINK_UP)
+ ctlr_dev->enabled = FCOE_CTLR_ENABLED;
+ else
+ ctlr_dev->enabled = FCOE_CTLR_DISABLED;
+
+ if (link_state == FCOE_CREATE_LINK_UP &&
+ !fcoe_link_ok(lport)) {
rtnl_unlock();
fcoe_ctlr_link_up(ctlr);
mutex_unlock(&fcoe_config_mutex);
@@ -2275,37 +2381,34 @@ out_nortnl:
}
/**
- * fcoe_link_speed_update() - Update the supported and actual link speeds
- * @lport: The local port to update speeds for
+ * fcoe_create() - Create a fcoe interface
+ * @netdev : The net_device object the Ethernet interface to create on
+ * @fip_mode: The FIP mode for this creation
+ *
+ * Called from fcoe transport
+ *
+ * Returns: 0 for success
+ */
+static int fcoe_create(struct net_device *netdev, enum fip_state fip_mode)
+{
+ return _fcoe_create(netdev, fip_mode, FCOE_CREATE_LINK_UP);
+}
+
+/**
+ * fcoe_ctlr_alloc() - Allocate a fcoe interface from fcoe_sysfs
+ * @netdev: The net_device to be used by the allocated FCoE Controller
*
- * Returns: 0 if the ethtool query was successful
- * -1 if the ethtool query failed
+ * This routine is called from fcoe_sysfs. It will start the fcoe_ctlr
+ * in a link_down state. The allows the user an opportunity to configure
+ * the FCoE Controller from sysfs before enabling the FCoE Controller.
+ *
+ * Creating in with this routine starts the FCoE Controller in Fabric
+ * mode. The user can change to VN2VN or another mode before enabling.
*/
-static int fcoe_link_speed_update(struct fc_lport *lport)
+static int fcoe_ctlr_alloc(struct net_device *netdev)
{
- struct net_device *netdev = fcoe_netdev(lport);
- struct ethtool_cmd ecmd;
-
- if (!__ethtool_get_settings(netdev, &ecmd)) {
- lport->link_supported_speeds &=
- ~(FC_PORTSPEED_1GBIT | FC_PORTSPEED_10GBIT);
- if (ecmd.supported & (SUPPORTED_1000baseT_Half |
- SUPPORTED_1000baseT_Full))
- lport->link_supported_speeds |= FC_PORTSPEED_1GBIT;
- if (ecmd.supported & SUPPORTED_10000baseT_Full)
- lport->link_supported_speeds |=
- FC_PORTSPEED_10GBIT;
- switch (ethtool_cmd_speed(&ecmd)) {
- case SPEED_1000:
- lport->link_speed = FC_PORTSPEED_1GBIT;
- break;
- case SPEED_10000:
- lport->link_speed = FC_PORTSPEED_10GBIT;
- break;
- }
- return 0;
- }
- return -1;
+ return _fcoe_create(netdev, FIP_MODE_FABRIC,
+ FCOE_CREATE_LINK_DOWN);
}
/**
@@ -2375,10 +2478,13 @@ static int fcoe_reset(struct Scsi_Host *shost)
struct fcoe_port *port = lport_priv(lport);
struct fcoe_interface *fcoe = port->priv;
struct fcoe_ctlr *ctlr = fcoe_to_ctlr(fcoe);
+ struct fcoe_ctlr_device *cdev = fcoe_ctlr_to_ctlr_dev(ctlr);
fcoe_ctlr_link_down(ctlr);
fcoe_clean_pending_queue(ctlr->lp);
- if (!fcoe_link_ok(ctlr->lp))
+
+ if (cdev->enabled != FCOE_CTLR_DISABLED &&
+ !fcoe_link_ok(ctlr->lp))
fcoe_ctlr_link_up(ctlr);
return 0;
}
@@ -2445,12 +2551,31 @@ static int fcoe_hostlist_add(const struct fc_lport *lport)
return 0;
}
+/**
+ * fcoe_hostlist_del() - Remove the FCoE interface identified by a local
+ * port to the hostlist
+ * @lport: The local port that identifies the FCoE interface to be added
+ *
+ * Locking: must be called with the RTNL mutex held
+ *
+ */
+static void fcoe_hostlist_del(const struct fc_lport *lport)
+{
+ struct fcoe_interface *fcoe;
+ struct fcoe_port *port;
+
+ port = lport_priv(lport);
+ fcoe = port->priv;
+ list_del(&fcoe->list);
+ return;
+}
static struct fcoe_transport fcoe_sw_transport = {
.name = {FCOE_TRANSPORT_DEFAULT},
.attached = false,
.list = LIST_HEAD_INIT(fcoe_sw_transport.list),
.match = fcoe_match,
+ .alloc = fcoe_ctlr_alloc,
.create = fcoe_create,
.destroy = fcoe_destroy,
.enable = fcoe_enable,
@@ -2534,9 +2659,9 @@ static void __exit fcoe_exit(void)
/* releases the associated fcoe hosts */
rtnl_lock();
list_for_each_entry_safe(fcoe, tmp, &fcoe_hostlist, list) {
- list_del(&fcoe->list);
ctlr = fcoe_to_ctlr(fcoe);
port = lport_priv(ctlr->lp);
+ fcoe_hostlist_del(port->lport);
queue_work(fcoe_wq, &port->destroy_work);
}
rtnl_unlock();
@@ -2776,43 +2901,6 @@ static void fcoe_set_vport_symbolic_name(struct fc_vport *vport)
NULL, NULL, 3 * lport->r_a_tov);
}
-/**
- * fcoe_get_lesb() - Fill the FCoE Link Error Status Block
- * @lport: the local port
- * @fc_lesb: the link error status block
- */
-static void fcoe_get_lesb(struct fc_lport *lport,
- struct fc_els_lesb *fc_lesb)
-{
- struct net_device *netdev = fcoe_netdev(lport);
-
- __fcoe_get_lesb(lport, fc_lesb, netdev);
-}
-
-static void fcoe_ctlr_get_lesb(struct fcoe_ctlr_device *ctlr_dev)
-{
- struct fcoe_ctlr *fip = fcoe_ctlr_device_priv(ctlr_dev);
- struct net_device *netdev = fcoe_netdev(fip->lp);
- struct fcoe_fc_els_lesb *fcoe_lesb;
- struct fc_els_lesb fc_lesb;
-
- __fcoe_get_lesb(fip->lp, &fc_lesb, netdev);
- fcoe_lesb = (struct fcoe_fc_els_lesb *)(&fc_lesb);
-
- ctlr_dev->lesb.lesb_link_fail =
- ntohl(fcoe_lesb->lesb_link_fail);
- ctlr_dev->lesb.lesb_vlink_fail =
- ntohl(fcoe_lesb->lesb_vlink_fail);
- ctlr_dev->lesb.lesb_miss_fka =
- ntohl(fcoe_lesb->lesb_miss_fka);
- ctlr_dev->lesb.lesb_symb_err =
- ntohl(fcoe_lesb->lesb_symb_err);
- ctlr_dev->lesb.lesb_err_block =
- ntohl(fcoe_lesb->lesb_err_block);
- ctlr_dev->lesb.lesb_fcs_error =
- ntohl(fcoe_lesb->lesb_fcs_error);
-}
-
static void fcoe_fcf_get_vlan_id(struct fcoe_fcf_device *fcf_dev)
{
struct fcoe_ctlr_device *ctlr_dev =
diff --git a/drivers/scsi/fcoe/fcoe.h b/drivers/scsi/fcoe/fcoe.h
index b42dc32cb5eb..2b53672bf932 100644
--- a/drivers/scsi/fcoe/fcoe.h
+++ b/drivers/scsi/fcoe/fcoe.h
@@ -55,12 +55,12 @@ do { \
#define FCOE_DBG(fmt, args...) \
FCOE_CHECK_LOGGING(FCOE_LOGGING, \
- printk(KERN_INFO "fcoe: " fmt, ##args);)
+ pr_info("fcoe: " fmt, ##args);)
#define FCOE_NETDEV_DBG(netdev, fmt, args...) \
FCOE_CHECK_LOGGING(FCOE_NETDEV_LOGGING, \
- printk(KERN_INFO "fcoe: %s: " fmt, \
- netdev->name, ##args);)
+ pr_info("fcoe: %s: " fmt, \
+ netdev->name, ##args);)
/**
* struct fcoe_interface - A FCoE interface
diff --git a/drivers/scsi/fcoe/fcoe_ctlr.c b/drivers/scsi/fcoe/fcoe_ctlr.c
index 4a909d7cfde1..08c3bc398da2 100644
--- a/drivers/scsi/fcoe/fcoe_ctlr.c
+++ b/drivers/scsi/fcoe/fcoe_ctlr.c
@@ -1291,8 +1291,16 @@ static void fcoe_ctlr_recv_clr_vlink(struct fcoe_ctlr *fip,
LIBFCOE_FIP_DBG(fip, "Clear Virtual Link received\n");
- if (!fcf || !lport->port_id)
+ if (!fcf || !lport->port_id) {
+ /*
+ * We are yet to select best FCF, but we got CVL in the
+ * meantime. reset the ctlr and let it rediscover the FCF
+ */
+ mutex_lock(&fip->ctlr_mutex);
+ fcoe_ctlr_reset(fip);
+ mutex_unlock(&fip->ctlr_mutex);
return;
+ }
/*
* mask of required descriptors. Validating each one clears its bit.
@@ -1551,15 +1559,6 @@ static struct fcoe_fcf *fcoe_ctlr_select(struct fcoe_ctlr *fip)
fcf->fabric_name, fcf->vfid, fcf->fcf_mac,
fcf->fc_map, fcoe_ctlr_mtu_valid(fcf),
fcf->flogi_sent, fcf->pri);
- if (fcf->fabric_name != first->fabric_name ||
- fcf->vfid != first->vfid ||
- fcf->fc_map != first->fc_map) {
- LIBFCOE_FIP_DBG(fip, "Conflicting fabric, VFID, "
- "or FC-MAP\n");
- return NULL;
- }
- if (fcf->flogi_sent)
- continue;
if (!fcoe_ctlr_fcf_usable(fcf)) {
LIBFCOE_FIP_DBG(fip, "FCF for fab %16.16llx "
"map %x %svalid %savailable\n",
@@ -1569,6 +1568,15 @@ static struct fcoe_fcf *fcoe_ctlr_select(struct fcoe_ctlr *fip)
"" : "un");
continue;
}
+ if (fcf->fabric_name != first->fabric_name ||
+ fcf->vfid != first->vfid ||
+ fcf->fc_map != first->fc_map) {
+ LIBFCOE_FIP_DBG(fip, "Conflicting fabric, VFID, "
+ "or FC-MAP\n");
+ return NULL;
+ }
+ if (fcf->flogi_sent)
+ continue;
if (!best || fcf->pri < best->pri || best->flogi_sent)
best = fcf;
}
@@ -2864,22 +2872,21 @@ void fcoe_fcf_get_selected(struct fcoe_fcf_device *fcf_dev)
}
EXPORT_SYMBOL(fcoe_fcf_get_selected);
-void fcoe_ctlr_get_fip_mode(struct fcoe_ctlr_device *ctlr_dev)
+void fcoe_ctlr_set_fip_mode(struct fcoe_ctlr_device *ctlr_dev)
{
struct fcoe_ctlr *ctlr = fcoe_ctlr_device_priv(ctlr_dev);
mutex_lock(&ctlr->ctlr_mutex);
- switch (ctlr->mode) {
- case FIP_MODE_FABRIC:
- ctlr_dev->mode = FIP_CONN_TYPE_FABRIC;
- break;
- case FIP_MODE_VN2VN:
- ctlr_dev->mode = FIP_CONN_TYPE_VN2VN;
+ switch (ctlr_dev->mode) {
+ case FIP_CONN_TYPE_VN2VN:
+ ctlr->mode = FIP_MODE_VN2VN;
break;
+ case FIP_CONN_TYPE_FABRIC:
default:
- ctlr_dev->mode = FIP_CONN_TYPE_UNKNOWN;
+ ctlr->mode = FIP_MODE_FABRIC;
break;
}
+
mutex_unlock(&ctlr->ctlr_mutex);
}
-EXPORT_SYMBOL(fcoe_ctlr_get_fip_mode);
+EXPORT_SYMBOL(fcoe_ctlr_set_fip_mode);
diff --git a/drivers/scsi/fcoe/fcoe_sysfs.c b/drivers/scsi/fcoe/fcoe_sysfs.c
index 5e751689a089..8c05ae017f5b 100644
--- a/drivers/scsi/fcoe/fcoe_sysfs.c
+++ b/drivers/scsi/fcoe/fcoe_sysfs.c
@@ -21,8 +21,17 @@
#include <linux/types.h>
#include <linux/kernel.h>
#include <linux/etherdevice.h>
+#include <linux/ctype.h>
#include <scsi/fcoe_sysfs.h>
+#include <scsi/libfcoe.h>
+
+/*
+ * OK to include local libfcoe.h for debug_logging, but cannot include
+ * <scsi/libfcoe.h> otherwise non-netdev based fcoe solutions would have
+ * have to include more than fcoe_sysfs.h.
+ */
+#include "libfcoe.h"
static atomic_t ctlr_num;
static atomic_t fcf_num;
@@ -71,6 +80,8 @@ MODULE_PARM_DESC(fcf_dev_loss_tmo,
((x)->lesb.lesb_err_block)
#define fcoe_ctlr_fcs_error(x) \
((x)->lesb.lesb_fcs_error)
+#define fcoe_ctlr_enabled(x) \
+ ((x)->enabled)
#define fcoe_fcf_state(x) \
((x)->state)
#define fcoe_fcf_fabric_name(x) \
@@ -210,25 +221,34 @@ static ssize_t show_fcoe_fcf_device_##field(struct device *dev, \
#define fcoe_enum_name_search(title, table_type, table) \
static const char *get_fcoe_##title##_name(enum table_type table_key) \
{ \
- int i; \
- char *name = NULL; \
- \
- for (i = 0; i < ARRAY_SIZE(table); i++) { \
- if (table[i].value == table_key) { \
- name = table[i].name; \
- break; \
- } \
- } \
- return name; \
+ if (table_key < 0 || table_key >= ARRAY_SIZE(table)) \
+ return NULL; \
+ return table[table_key]; \
+}
+
+static char *fip_conn_type_names[] = {
+ [ FIP_CONN_TYPE_UNKNOWN ] = "Unknown",
+ [ FIP_CONN_TYPE_FABRIC ] = "Fabric",
+ [ FIP_CONN_TYPE_VN2VN ] = "VN2VN",
+};
+fcoe_enum_name_search(ctlr_mode, fip_conn_type, fip_conn_type_names)
+
+static enum fip_conn_type fcoe_parse_mode(const char *buf)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(fip_conn_type_names); i++) {
+ if (strcasecmp(buf, fip_conn_type_names[i]) == 0)
+ return i;
+ }
+
+ return FIP_CONN_TYPE_UNKNOWN;
}
-static struct {
- enum fcf_state value;
- char *name;
-} fcf_state_names[] = {
- { FCOE_FCF_STATE_UNKNOWN, "Unknown" },
- { FCOE_FCF_STATE_DISCONNECTED, "Disconnected" },
- { FCOE_FCF_STATE_CONNECTED, "Connected" },
+static char *fcf_state_names[] = {
+ [ FCOE_FCF_STATE_UNKNOWN ] = "Unknown",
+ [ FCOE_FCF_STATE_DISCONNECTED ] = "Disconnected",
+ [ FCOE_FCF_STATE_CONNECTED ] = "Connected",
};
fcoe_enum_name_search(fcf_state, fcf_state, fcf_state_names)
#define FCOE_FCF_STATE_MAX_NAMELEN 50
@@ -246,17 +266,7 @@ static ssize_t show_fcf_state(struct device *dev,
}
static FCOE_DEVICE_ATTR(fcf, state, S_IRUGO, show_fcf_state, NULL);
-static struct {
- enum fip_conn_type value;
- char *name;
-} fip_conn_type_names[] = {
- { FIP_CONN_TYPE_UNKNOWN, "Unknown" },
- { FIP_CONN_TYPE_FABRIC, "Fabric" },
- { FIP_CONN_TYPE_VN2VN, "VN2VN" },
-};
-fcoe_enum_name_search(ctlr_mode, fip_conn_type, fip_conn_type_names)
-#define FCOE_CTLR_MODE_MAX_NAMELEN 50
-
+#define FCOE_MAX_MODENAME_LEN 20
static ssize_t show_ctlr_mode(struct device *dev,
struct device_attribute *attr,
char *buf)
@@ -264,17 +274,116 @@ static ssize_t show_ctlr_mode(struct device *dev,
struct fcoe_ctlr_device *ctlr = dev_to_ctlr(dev);
const char *name;
- if (ctlr->f->get_fcoe_ctlr_mode)
- ctlr->f->get_fcoe_ctlr_mode(ctlr);
-
name = get_fcoe_ctlr_mode_name(ctlr->mode);
if (!name)
return -EINVAL;
- return snprintf(buf, FCOE_CTLR_MODE_MAX_NAMELEN,
+ return snprintf(buf, FCOE_MAX_MODENAME_LEN,
+ "%s\n", name);
+}
+
+static ssize_t store_ctlr_mode(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct fcoe_ctlr_device *ctlr = dev_to_ctlr(dev);
+ char mode[FCOE_MAX_MODENAME_LEN + 1];
+
+ if (count > FCOE_MAX_MODENAME_LEN)
+ return -EINVAL;
+
+ strncpy(mode, buf, count);
+
+ if (mode[count - 1] == '\n')
+ mode[count - 1] = '\0';
+ else
+ mode[count] = '\0';
+
+ switch (ctlr->enabled) {
+ case FCOE_CTLR_ENABLED:
+ LIBFCOE_SYSFS_DBG(ctlr, "Cannot change mode when enabled.");
+ return -EBUSY;
+ case FCOE_CTLR_DISABLED:
+ if (!ctlr->f->set_fcoe_ctlr_mode) {
+ LIBFCOE_SYSFS_DBG(ctlr,
+ "Mode change not supported by LLD.");
+ return -ENOTSUPP;
+ }
+
+ ctlr->mode = fcoe_parse_mode(mode);
+ if (ctlr->mode == FIP_CONN_TYPE_UNKNOWN) {
+ LIBFCOE_SYSFS_DBG(ctlr,
+ "Unknown mode %s provided.", buf);
+ return -EINVAL;
+ }
+
+ ctlr->f->set_fcoe_ctlr_mode(ctlr);
+ LIBFCOE_SYSFS_DBG(ctlr, "Mode changed to %s.", buf);
+
+ return count;
+ case FCOE_CTLR_UNUSED:
+ default:
+ LIBFCOE_SYSFS_DBG(ctlr, "Mode change not supported.");
+ return -ENOTSUPP;
+ };
+}
+
+static FCOE_DEVICE_ATTR(ctlr, mode, S_IRUGO | S_IWUSR,
+ show_ctlr_mode, store_ctlr_mode);
+
+static ssize_t store_ctlr_enabled(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct fcoe_ctlr_device *ctlr = dev_to_ctlr(dev);
+ int rc;
+
+ switch (ctlr->enabled) {
+ case FCOE_CTLR_ENABLED:
+ if (*buf == '1')
+ return count;
+ ctlr->enabled = FCOE_CTLR_DISABLED;
+ break;
+ case FCOE_CTLR_DISABLED:
+ if (*buf == '0')
+ return count;
+ ctlr->enabled = FCOE_CTLR_ENABLED;
+ break;
+ case FCOE_CTLR_UNUSED:
+ return -ENOTSUPP;
+ };
+
+ rc = ctlr->f->set_fcoe_ctlr_enabled(ctlr);
+ if (rc)
+ return rc;
+
+ return count;
+}
+
+static char *ctlr_enabled_state_names[] = {
+ [ FCOE_CTLR_ENABLED ] = "1",
+ [ FCOE_CTLR_DISABLED ] = "0",
+};
+fcoe_enum_name_search(ctlr_enabled_state, ctlr_enabled_state,
+ ctlr_enabled_state_names)
+#define FCOE_CTLR_ENABLED_MAX_NAMELEN 50
+
+static ssize_t show_ctlr_enabled_state(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct fcoe_ctlr_device *ctlr = dev_to_ctlr(dev);
+ const char *name;
+
+ name = get_fcoe_ctlr_enabled_state_name(ctlr->enabled);
+ if (!name)
+ return -EINVAL;
+ return snprintf(buf, FCOE_CTLR_ENABLED_MAX_NAMELEN,
"%s\n", name);
}
-static FCOE_DEVICE_ATTR(ctlr, mode, S_IRUGO,
- show_ctlr_mode, NULL);
+
+static FCOE_DEVICE_ATTR(ctlr, enabled, S_IRUGO | S_IWUSR,
+ show_ctlr_enabled_state,
+ store_ctlr_enabled);
static ssize_t
store_private_fcoe_ctlr_fcf_dev_loss_tmo(struct device *dev,
@@ -359,6 +468,7 @@ static struct attribute_group fcoe_ctlr_lesb_attr_group = {
static struct attribute *fcoe_ctlr_attrs[] = {
&device_attr_fcoe_ctlr_fcf_dev_loss_tmo.attr,
+ &device_attr_fcoe_ctlr_enabled.attr,
&device_attr_fcoe_ctlr_mode.attr,
NULL,
};
@@ -443,9 +553,16 @@ struct device_type fcoe_fcf_device_type = {
.release = fcoe_fcf_device_release,
};
+struct bus_attribute fcoe_bus_attr_group[] = {
+ __ATTR(ctlr_create, S_IWUSR, NULL, fcoe_ctlr_create_store),
+ __ATTR(ctlr_destroy, S_IWUSR, NULL, fcoe_ctlr_destroy_store),
+ __ATTR_NULL
+};
+
struct bus_type fcoe_bus_type = {
.name = "fcoe",
.match = &fcoe_bus_match,
+ .bus_attrs = fcoe_bus_attr_group,
};
/**
@@ -566,6 +683,7 @@ struct fcoe_ctlr_device *fcoe_ctlr_device_add(struct device *parent,
ctlr->id = atomic_inc_return(&ctlr_num) - 1;
ctlr->f = f;
+ ctlr->mode = FIP_CONN_TYPE_FABRIC;
INIT_LIST_HEAD(&ctlr->fcfs);
mutex_init(&ctlr->lock);
ctlr->dev.parent = parent;
diff --git a/drivers/scsi/fcoe/fcoe_transport.c b/drivers/scsi/fcoe/fcoe_transport.c
index ac76d8a042d7..f3a5a53e8631 100644
--- a/drivers/scsi/fcoe/fcoe_transport.c
+++ b/drivers/scsi/fcoe/fcoe_transport.c
@@ -83,6 +83,50 @@ static struct notifier_block libfcoe_notifier = {
.notifier_call = libfcoe_device_notification,
};
+/**
+ * fcoe_link_speed_update() - Update the supported and actual link speeds
+ * @lport: The local port to update speeds for
+ *
+ * Returns: 0 if the ethtool query was successful
+ * -1 if the ethtool query failed
+ */
+int fcoe_link_speed_update(struct fc_lport *lport)
+{
+ struct net_device *netdev = fcoe_get_netdev(lport);
+ struct ethtool_cmd ecmd;
+
+ if (!__ethtool_get_settings(netdev, &ecmd)) {
+ lport->link_supported_speeds &=
+ ~(FC_PORTSPEED_1GBIT | FC_PORTSPEED_10GBIT);
+ if (ecmd.supported & (SUPPORTED_1000baseT_Half |
+ SUPPORTED_1000baseT_Full))
+ lport->link_supported_speeds |= FC_PORTSPEED_1GBIT;
+ if (ecmd.supported & SUPPORTED_10000baseT_Full)
+ lport->link_supported_speeds |=
+ FC_PORTSPEED_10GBIT;
+ switch (ethtool_cmd_speed(&ecmd)) {
+ case SPEED_1000:
+ lport->link_speed = FC_PORTSPEED_1GBIT;
+ break;
+ case SPEED_10000:
+ lport->link_speed = FC_PORTSPEED_10GBIT;
+ break;
+ }
+ return 0;
+ }
+ return -1;
+}
+EXPORT_SYMBOL_GPL(fcoe_link_speed_update);
+
+/**
+ * __fcoe_get_lesb() - Get the Link Error Status Block (LESB) for a given lport
+ * @lport: The local port to update speeds for
+ * @fc_lesb: Pointer to the LESB to be filled up
+ * @netdev: Pointer to the netdev that is associated with the lport
+ *
+ * Note, the Link Error Status Block (LESB) for FCoE is defined in FC-BB-6
+ * Clause 7.11 in v1.04.
+ */
void __fcoe_get_lesb(struct fc_lport *lport,
struct fc_els_lesb *fc_lesb,
struct net_device *netdev)
@@ -112,6 +156,51 @@ void __fcoe_get_lesb(struct fc_lport *lport,
}
EXPORT_SYMBOL_GPL(__fcoe_get_lesb);
+/**
+ * fcoe_get_lesb() - Fill the FCoE Link Error Status Block
+ * @lport: the local port
+ * @fc_lesb: the link error status block
+ */
+void fcoe_get_lesb(struct fc_lport *lport,
+ struct fc_els_lesb *fc_lesb)
+{
+ struct net_device *netdev = fcoe_get_netdev(lport);
+
+ __fcoe_get_lesb(lport, fc_lesb, netdev);
+}
+EXPORT_SYMBOL_GPL(fcoe_get_lesb);
+
+/**
+ * fcoe_ctlr_get_lesb() - Get the Link Error Status Block (LESB) for a given
+ * fcoe controller device
+ * @ctlr_dev: The given fcoe controller device
+ *
+ */
+void fcoe_ctlr_get_lesb(struct fcoe_ctlr_device *ctlr_dev)
+{
+ struct fcoe_ctlr *fip = fcoe_ctlr_device_priv(ctlr_dev);
+ struct net_device *netdev = fcoe_get_netdev(fip->lp);
+ struct fcoe_fc_els_lesb *fcoe_lesb;
+ struct fc_els_lesb fc_lesb;
+
+ __fcoe_get_lesb(fip->lp, &fc_lesb, netdev);
+ fcoe_lesb = (struct fcoe_fc_els_lesb *)(&fc_lesb);
+
+ ctlr_dev->lesb.lesb_link_fail =
+ ntohl(fcoe_lesb->lesb_link_fail);
+ ctlr_dev->lesb.lesb_vlink_fail =
+ ntohl(fcoe_lesb->lesb_vlink_fail);
+ ctlr_dev->lesb.lesb_miss_fka =
+ ntohl(fcoe_lesb->lesb_miss_fka);
+ ctlr_dev->lesb.lesb_symb_err =
+ ntohl(fcoe_lesb->lesb_symb_err);
+ ctlr_dev->lesb.lesb_err_block =
+ ntohl(fcoe_lesb->lesb_err_block);
+ ctlr_dev->lesb.lesb_fcs_error =
+ ntohl(fcoe_lesb->lesb_fcs_error);
+}
+EXPORT_SYMBOL_GPL(fcoe_ctlr_get_lesb);
+
void fcoe_wwn_to_str(u64 wwn, char *buf, int len)
{
u8 wwpn[8];
@@ -627,6 +716,110 @@ static int libfcoe_device_notification(struct notifier_block *notifier,
return NOTIFY_OK;
}
+ssize_t fcoe_ctlr_create_store(struct bus_type *bus,
+ const char *buf, size_t count)
+{
+ struct net_device *netdev = NULL;
+ struct fcoe_transport *ft = NULL;
+ struct fcoe_ctlr_device *ctlr_dev = NULL;
+ int rc = 0;
+ int err;
+
+ mutex_lock(&ft_mutex);
+
+ netdev = fcoe_if_to_netdev(buf);
+ if (!netdev) {
+ LIBFCOE_TRANSPORT_DBG("Invalid device %s.\n", buf);
+ rc = -ENODEV;
+ goto out_nodev;
+ }
+
+ ft = fcoe_netdev_map_lookup(netdev);
+ if (ft) {
+ LIBFCOE_TRANSPORT_DBG("transport %s already has existing "
+ "FCoE instance on %s.\n",
+ ft->name, netdev->name);
+ rc = -EEXIST;
+ goto out_putdev;
+ }
+
+ ft = fcoe_transport_lookup(netdev);
+ if (!ft) {
+ LIBFCOE_TRANSPORT_DBG("no FCoE transport found for %s.\n",
+ netdev->name);
+ rc = -ENODEV;
+ goto out_putdev;
+ }
+
+ /* pass to transport create */
+ err = ft->alloc ? ft->alloc(netdev) : -ENODEV;
+ if (err) {
+ fcoe_del_netdev_mapping(netdev);
+ rc = -ENOMEM;
+ goto out_putdev;
+ }
+
+ err = fcoe_add_netdev_mapping(netdev, ft);
+ if (err) {
+ LIBFCOE_TRANSPORT_DBG("failed to add new netdev mapping "
+ "for FCoE transport %s for %s.\n",
+ ft->name, netdev->name);
+ rc = -ENODEV;
+ goto out_putdev;
+ }
+
+ LIBFCOE_TRANSPORT_DBG("transport %s %s to create fcoe on %s.\n",
+ ft->name, (ctlr_dev) ? "succeeded" : "failed",
+ netdev->name);
+
+out_putdev:
+ dev_put(netdev);
+out_nodev:
+ mutex_unlock(&ft_mutex);
+ if (rc)
+ return rc;
+ return count;
+}
+
+ssize_t fcoe_ctlr_destroy_store(struct bus_type *bus,
+ const char *buf, size_t count)
+{
+ int rc = -ENODEV;
+ struct net_device *netdev = NULL;
+ struct fcoe_transport *ft = NULL;
+
+ mutex_lock(&ft_mutex);
+
+ netdev = fcoe_if_to_netdev(buf);
+ if (!netdev) {
+ LIBFCOE_TRANSPORT_DBG("invalid device %s.\n", buf);
+ goto out_nodev;
+ }
+
+ ft = fcoe_netdev_map_lookup(netdev);
+ if (!ft) {
+ LIBFCOE_TRANSPORT_DBG("no FCoE transport found for %s.\n",
+ netdev->name);
+ goto out_putdev;
+ }
+
+ /* pass to transport destroy */
+ rc = ft->destroy(netdev);
+ if (rc)
+ goto out_putdev;
+
+ fcoe_del_netdev_mapping(netdev);
+ LIBFCOE_TRANSPORT_DBG("transport %s %s to destroy fcoe on %s.\n",
+ ft->name, (rc) ? "failed" : "succeeded",
+ netdev->name);
+ rc = count; /* required for successful return */
+out_putdev:
+ dev_put(netdev);
+out_nodev:
+ mutex_unlock(&ft_mutex);
+ return rc;
+}
+EXPORT_SYMBOL(fcoe_ctlr_destroy_store);
/**
* fcoe_transport_create() - Create a fcoe interface
@@ -769,11 +962,7 @@ out_putdev:
dev_put(netdev);
out_nodev:
mutex_unlock(&ft_mutex);
-
- if (rc == -ERESTARTSYS)
- return restart_syscall();
- else
- return rc;
+ return rc;
}
/**
diff --git a/drivers/scsi/fcoe/libfcoe.h b/drivers/scsi/fcoe/libfcoe.h
index 6af5fc3a17d8..d3bb16d11401 100644
--- a/drivers/scsi/fcoe/libfcoe.h
+++ b/drivers/scsi/fcoe/libfcoe.h
@@ -2,9 +2,10 @@
#define _FCOE_LIBFCOE_H_
extern unsigned int libfcoe_debug_logging;
-#define LIBFCOE_LOGGING 0x01 /* General logging, not categorized */
-#define LIBFCOE_FIP_LOGGING 0x02 /* FIP logging */
-#define LIBFCOE_TRANSPORT_LOGGING 0x04 /* FCoE transport logging */
+#define LIBFCOE_LOGGING 0x01 /* General logging, not categorized */
+#define LIBFCOE_FIP_LOGGING 0x02 /* FIP logging */
+#define LIBFCOE_TRANSPORT_LOGGING 0x04 /* FCoE transport logging */
+#define LIBFCOE_SYSFS_LOGGING 0x08 /* fcoe_sysfs logging */
#define LIBFCOE_CHECK_LOGGING(LEVEL, CMD) \
do { \
@@ -16,16 +17,19 @@ do { \
#define LIBFCOE_DBG(fmt, args...) \
LIBFCOE_CHECK_LOGGING(LIBFCOE_LOGGING, \
- printk(KERN_INFO "libfcoe: " fmt, ##args);)
+ pr_info("libfcoe: " fmt, ##args);)
#define LIBFCOE_FIP_DBG(fip, fmt, args...) \
LIBFCOE_CHECK_LOGGING(LIBFCOE_FIP_LOGGING, \
- printk(KERN_INFO "host%d: fip: " fmt, \
- (fip)->lp->host->host_no, ##args);)
+ pr_info("host%d: fip: " fmt, \
+ (fip)->lp->host->host_no, ##args);)
#define LIBFCOE_TRANSPORT_DBG(fmt, args...) \
LIBFCOE_CHECK_LOGGING(LIBFCOE_TRANSPORT_LOGGING, \
- printk(KERN_INFO "%s: " fmt, \
- __func__, ##args);)
+ pr_info("%s: " fmt, __func__, ##args);)
+
+#define LIBFCOE_SYSFS_DBG(cdev, fmt, args...) \
+ LIBFCOE_CHECK_LOGGING(LIBFCOE_SYSFS_LOGGING, \
+ pr_info("ctlr_%d: " fmt, cdev->id, ##args);)
#endif /* _FCOE_LIBFCOE_H_ */
diff --git a/drivers/scsi/fnic/Makefile b/drivers/scsi/fnic/Makefile
index 37c3440bc17c..383598fadf04 100644
--- a/drivers/scsi/fnic/Makefile
+++ b/drivers/scsi/fnic/Makefile
@@ -7,6 +7,8 @@ fnic-y := \
fnic_res.o \
fnic_fcs.o \
fnic_scsi.o \
+ fnic_trace.o \
+ fnic_debugfs.o \
vnic_cq.o \
vnic_dev.o \
vnic_intr.o \
diff --git a/drivers/scsi/fnic/fnic.h b/drivers/scsi/fnic/fnic.h
index 95a5ba29320d..98436c363035 100644
--- a/drivers/scsi/fnic/fnic.h
+++ b/drivers/scsi/fnic/fnic.h
@@ -26,6 +26,7 @@
#include <scsi/libfcoe.h>
#include "fnic_io.h"
#include "fnic_res.h"
+#include "fnic_trace.h"
#include "vnic_dev.h"
#include "vnic_wq.h"
#include "vnic_rq.h"
@@ -56,6 +57,34 @@
#define FNIC_NO_TAG -1
/*
+ * Command flags to identify the type of command and for other future
+ * use.
+ */
+#define FNIC_NO_FLAGS 0
+#define FNIC_IO_INITIALIZED BIT(0)
+#define FNIC_IO_ISSUED BIT(1)
+#define FNIC_IO_DONE BIT(2)
+#define FNIC_IO_REQ_NULL BIT(3)
+#define FNIC_IO_ABTS_PENDING BIT(4)
+#define FNIC_IO_ABORTED BIT(5)
+#define FNIC_IO_ABTS_ISSUED BIT(6)
+#define FNIC_IO_TERM_ISSUED BIT(7)
+#define FNIC_IO_INTERNAL_TERM_ISSUED BIT(8)
+#define FNIC_IO_ABT_TERM_DONE BIT(9)
+#define FNIC_IO_ABT_TERM_REQ_NULL BIT(10)
+#define FNIC_IO_ABT_TERM_TIMED_OUT BIT(11)
+#define FNIC_DEVICE_RESET BIT(12) /* Device reset request */
+#define FNIC_DEV_RST_ISSUED BIT(13)
+#define FNIC_DEV_RST_TIMED_OUT BIT(14)
+#define FNIC_DEV_RST_ABTS_ISSUED BIT(15)
+#define FNIC_DEV_RST_TERM_ISSUED BIT(16)
+#define FNIC_DEV_RST_DONE BIT(17)
+#define FNIC_DEV_RST_REQ_NULL BIT(18)
+#define FNIC_DEV_RST_ABTS_DONE BIT(19)
+#define FNIC_DEV_RST_TERM_DONE BIT(20)
+#define FNIC_DEV_RST_ABTS_PENDING BIT(21)
+
+/*
* Usage of the scsi_cmnd scratchpad.
* These fields are locked by the hashed io_req_lock.
*/
@@ -64,6 +93,7 @@
#define CMD_ABTS_STATUS(Cmnd) ((Cmnd)->SCp.Message)
#define CMD_LR_STATUS(Cmnd) ((Cmnd)->SCp.have_data_in)
#define CMD_TAG(Cmnd) ((Cmnd)->SCp.sent_command)
+#define CMD_FLAGS(Cmnd) ((Cmnd)->SCp.Status)
#define FCPIO_INVALID_CODE 0x100 /* hdr_status value unused by firmware */
@@ -71,9 +101,28 @@
#define FNIC_HOST_RESET_TIMEOUT 10000 /* mSec */
#define FNIC_RMDEVICE_TIMEOUT 1000 /* mSec */
#define FNIC_HOST_RESET_SETTLE_TIME 30 /* Sec */
+#define FNIC_ABT_TERM_DELAY_TIMEOUT 500 /* mSec */
#define FNIC_MAX_FCP_TARGET 256
+/**
+ * state_flags to identify host state along along with fnic's state
+ **/
+#define __FNIC_FLAGS_FWRESET BIT(0) /* fwreset in progress */
+#define __FNIC_FLAGS_BLOCK_IO BIT(1) /* IOs are blocked */
+
+#define FNIC_FLAGS_NONE (0)
+#define FNIC_FLAGS_FWRESET (__FNIC_FLAGS_FWRESET | \
+ __FNIC_FLAGS_BLOCK_IO)
+
+#define FNIC_FLAGS_IO_BLOCKED (__FNIC_FLAGS_BLOCK_IO)
+
+#define fnic_set_state_flags(fnicp, st_flags) \
+ __fnic_set_state_flags(fnicp, st_flags, 0)
+
+#define fnic_clear_state_flags(fnicp, st_flags) \
+ __fnic_set_state_flags(fnicp, st_flags, 1)
+
extern unsigned int fnic_log_level;
#define FNIC_MAIN_LOGGING 0x01
@@ -170,6 +219,9 @@ struct fnic {
struct completion *remove_wait; /* device remove thread blocks */
+ atomic_t in_flight; /* io counter */
+ u32 _reserved; /* fill hole */
+ unsigned long state_flags; /* protected by host lock */
enum fnic_state state;
spinlock_t fnic_lock;
@@ -267,4 +319,12 @@ const char *fnic_state_to_str(unsigned int state);
void fnic_log_q_error(struct fnic *fnic);
void fnic_handle_link_event(struct fnic *fnic);
+int fnic_is_abts_pending(struct fnic *, struct scsi_cmnd *);
+
+static inline int
+fnic_chk_state_flags_locked(struct fnic *fnic, unsigned long st_flags)
+{
+ return ((fnic->state_flags & st_flags) == st_flags);
+}
+void __fnic_set_state_flags(struct fnic *, unsigned long, unsigned long);
#endif /* _FNIC_H_ */
diff --git a/drivers/scsi/fnic/fnic_debugfs.c b/drivers/scsi/fnic/fnic_debugfs.c
new file mode 100644
index 000000000000..adc1f7f471f5
--- /dev/null
+++ b/drivers/scsi/fnic/fnic_debugfs.c
@@ -0,0 +1,314 @@
+/*
+ * Copyright 2012 Cisco Systems, Inc. All rights reserved.
+ *
+ * This program is free software; you may redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 of the License.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#include <linux/module.h>
+#include <linux/errno.h>
+#include <linux/debugfs.h>
+#include "fnic.h"
+
+static struct dentry *fnic_trace_debugfs_root;
+static struct dentry *fnic_trace_debugfs_file;
+static struct dentry *fnic_trace_enable;
+
+/*
+ * fnic_trace_ctrl_open - Open the trace_enable file
+ * @inode: The inode pointer.
+ * @file: The file pointer to attach the trace enable/disable flag.
+ *
+ * Description:
+ * This routine opens a debugsfs file trace_enable.
+ *
+ * Returns:
+ * This function returns zero if successful.
+ */
+static int fnic_trace_ctrl_open(struct inode *inode, struct file *filp)
+{
+ filp->private_data = inode->i_private;
+ return 0;
+}
+
+/*
+ * fnic_trace_ctrl_read - Read a trace_enable debugfs file
+ * @filp: The file pointer to read from.
+ * @ubuf: The buffer to copy the data to.
+ * @cnt: The number of bytes to read.
+ * @ppos: The position in the file to start reading from.
+ *
+ * Description:
+ * This routine reads value of variable fnic_tracing_enabled
+ * and stores into local @buf. It will start reading file at @ppos and
+ * copy up to @cnt of data to @ubuf from @buf.
+ *
+ * Returns:
+ * This function returns the amount of data that was read.
+ */
+static ssize_t fnic_trace_ctrl_read(struct file *filp,
+ char __user *ubuf,
+ size_t cnt, loff_t *ppos)
+{
+ char buf[64];
+ int len;
+ len = sprintf(buf, "%u\n", fnic_tracing_enabled);
+
+ return simple_read_from_buffer(ubuf, cnt, ppos, buf, len);
+}
+
+/*
+ * fnic_trace_ctrl_write - Write to trace_enable debugfs file
+ * @filp: The file pointer to write from.
+ * @ubuf: The buffer to copy the data from.
+ * @cnt: The number of bytes to write.
+ * @ppos: The position in the file to start writing to.
+ *
+ * Description:
+ * This routine writes data from user buffer @ubuf to buffer @buf and
+ * sets fnic_tracing_enabled value as per user input.
+ *
+ * Returns:
+ * This function returns the amount of data that was written.
+ */
+static ssize_t fnic_trace_ctrl_write(struct file *filp,
+ const char __user *ubuf,
+ size_t cnt, loff_t *ppos)
+{
+ char buf[64];
+ unsigned long val;
+ int ret;
+
+ if (cnt >= sizeof(buf))
+ return -EINVAL;
+
+ if (copy_from_user(&buf, ubuf, cnt))
+ return -EFAULT;
+
+ buf[cnt] = 0;
+
+ ret = kstrtoul(buf, 10, &val);
+ if (ret < 0)
+ return ret;
+
+ fnic_tracing_enabled = val;
+ (*ppos)++;
+
+ return cnt;
+}
+
+/*
+ * fnic_trace_debugfs_open - Open the fnic trace log
+ * @inode: The inode pointer
+ * @file: The file pointer to attach the log output
+ *
+ * Description:
+ * This routine is the entry point for the debugfs open file operation.
+ * It allocates the necessary buffer for the log, fills the buffer from
+ * the in-memory log and then returns a pointer to that log in
+ * the private_data field in @file.
+ *
+ * Returns:
+ * This function returns zero if successful. On error it will return
+ * a negative error value.
+ */
+static int fnic_trace_debugfs_open(struct inode *inode,
+ struct file *file)
+{
+ fnic_dbgfs_t *fnic_dbg_prt;
+ fnic_dbg_prt = kzalloc(sizeof(fnic_dbgfs_t), GFP_KERNEL);
+ if (!fnic_dbg_prt)
+ return -ENOMEM;
+
+ fnic_dbg_prt->buffer = vmalloc((3*(trace_max_pages * PAGE_SIZE)));
+ if (!fnic_dbg_prt->buffer) {
+ kfree(fnic_dbg_prt);
+ return -ENOMEM;
+ }
+ memset((void *)fnic_dbg_prt->buffer, 0,
+ (3*(trace_max_pages * PAGE_SIZE)));
+ fnic_dbg_prt->buffer_len = fnic_get_trace_data(fnic_dbg_prt);
+ file->private_data = fnic_dbg_prt;
+ return 0;
+}
+
+/*
+ * fnic_trace_debugfs_lseek - Seek through a debugfs file
+ * @file: The file pointer to seek through.
+ * @offset: The offset to seek to or the amount to seek by.
+ * @howto: Indicates how to seek.
+ *
+ * Description:
+ * This routine is the entry point for the debugfs lseek file operation.
+ * The @howto parameter indicates whether @offset is the offset to directly
+ * seek to, or if it is a value to seek forward or reverse by. This function
+ * figures out what the new offset of the debugfs file will be and assigns
+ * that value to the f_pos field of @file.
+ *
+ * Returns:
+ * This function returns the new offset if successful and returns a negative
+ * error if unable to process the seek.
+ */
+static loff_t fnic_trace_debugfs_lseek(struct file *file,
+ loff_t offset,
+ int howto)
+{
+ fnic_dbgfs_t *fnic_dbg_prt = file->private_data;
+ loff_t pos = -1;
+
+ switch (howto) {
+ case 0:
+ pos = offset;
+ break;
+ case 1:
+ pos = file->f_pos + offset;
+ break;
+ case 2:
+ pos = fnic_dbg_prt->buffer_len - offset;
+ }
+ return (pos < 0 || pos > fnic_dbg_prt->buffer_len) ?
+ -EINVAL : (file->f_pos = pos);
+}
+
+/*
+ * fnic_trace_debugfs_read - Read a debugfs file
+ * @file: The file pointer to read from.
+ * @ubuf: The buffer to copy the data to.
+ * @nbytes: The number of bytes to read.
+ * @pos: The position in the file to start reading from.
+ *
+ * Description:
+ * This routine reads data from the buffer indicated in the private_data
+ * field of @file. It will start reading at @pos and copy up to @nbytes of
+ * data to @ubuf.
+ *
+ * Returns:
+ * This function returns the amount of data that was read (this could be
+ * less than @nbytes if the end of the file was reached).
+ */
+static ssize_t fnic_trace_debugfs_read(struct file *file,
+ char __user *ubuf,
+ size_t nbytes,
+ loff_t *pos)
+{
+ fnic_dbgfs_t *fnic_dbg_prt = file->private_data;
+ int rc = 0;
+ rc = simple_read_from_buffer(ubuf, nbytes, pos,
+ fnic_dbg_prt->buffer,
+ fnic_dbg_prt->buffer_len);
+ return rc;
+}
+
+/*
+ * fnic_trace_debugfs_release - Release the buffer used to store
+ * debugfs file data
+ * @inode: The inode pointer
+ * @file: The file pointer that contains the buffer to release
+ *
+ * Description:
+ * This routine frees the buffer that was allocated when the debugfs
+ * file was opened.
+ *
+ * Returns:
+ * This function returns zero.
+ */
+static int fnic_trace_debugfs_release(struct inode *inode,
+ struct file *file)
+{
+ fnic_dbgfs_t *fnic_dbg_prt = file->private_data;
+
+ vfree(fnic_dbg_prt->buffer);
+ kfree(fnic_dbg_prt);
+ return 0;
+}
+
+static const struct file_operations fnic_trace_ctrl_fops = {
+ .owner = THIS_MODULE,
+ .open = fnic_trace_ctrl_open,
+ .read = fnic_trace_ctrl_read,
+ .write = fnic_trace_ctrl_write,
+};
+
+static const struct file_operations fnic_trace_debugfs_fops = {
+ .owner = THIS_MODULE,
+ .open = fnic_trace_debugfs_open,
+ .llseek = fnic_trace_debugfs_lseek,
+ .read = fnic_trace_debugfs_read,
+ .release = fnic_trace_debugfs_release,
+};
+
+/*
+ * fnic_trace_debugfs_init - Initialize debugfs for fnic trace logging
+ *
+ * Description:
+ * When Debugfs is configured this routine sets up the fnic debugfs
+ * file system. If not already created, this routine will create the
+ * fnic directory. It will create file trace to log fnic trace buffer
+ * output into debugfs and it will also create file trace_enable to
+ * control enable/disable of trace logging into trace buffer.
+ */
+int fnic_trace_debugfs_init(void)
+{
+ int rc = -1;
+ fnic_trace_debugfs_root = debugfs_create_dir("fnic", NULL);
+ if (!fnic_trace_debugfs_root) {
+ printk(KERN_DEBUG "Cannot create debugfs root\n");
+ return rc;
+ }
+ fnic_trace_enable = debugfs_create_file("tracing_enable",
+ S_IFREG|S_IRUGO|S_IWUSR,
+ fnic_trace_debugfs_root,
+ NULL, &fnic_trace_ctrl_fops);
+
+ if (!fnic_trace_enable) {
+ printk(KERN_DEBUG "Cannot create trace_enable file"
+ " under debugfs");
+ return rc;
+ }
+
+ fnic_trace_debugfs_file = debugfs_create_file("trace",
+ S_IFREG|S_IRUGO|S_IWUSR,
+ fnic_trace_debugfs_root,
+ NULL,
+ &fnic_trace_debugfs_fops);
+
+ if (!fnic_trace_debugfs_file) {
+ printk(KERN_DEBUG "Cannot create trace file under debugfs");
+ return rc;
+ }
+ rc = 0;
+ return rc;
+}
+
+/*
+ * fnic_trace_debugfs_terminate - Tear down debugfs infrastructure
+ *
+ * Description:
+ * When Debugfs is configured this routine removes debugfs file system
+ * elements that are specific to fnic trace logging.
+ */
+void fnic_trace_debugfs_terminate(void)
+{
+ if (fnic_trace_debugfs_file) {
+ debugfs_remove(fnic_trace_debugfs_file);
+ fnic_trace_debugfs_file = NULL;
+ }
+ if (fnic_trace_enable) {
+ debugfs_remove(fnic_trace_enable);
+ fnic_trace_enable = NULL;
+ }
+ if (fnic_trace_debugfs_root) {
+ debugfs_remove(fnic_trace_debugfs_root);
+ fnic_trace_debugfs_root = NULL;
+ }
+}
diff --git a/drivers/scsi/fnic/fnic_fcs.c b/drivers/scsi/fnic/fnic_fcs.c
index 3c53c3478ee7..483eb9dbe663 100644
--- a/drivers/scsi/fnic/fnic_fcs.c
+++ b/drivers/scsi/fnic/fnic_fcs.c
@@ -495,7 +495,8 @@ void fnic_eth_send(struct fcoe_ctlr *fip, struct sk_buff *skb)
}
fnic_queue_wq_eth_desc(wq, skb, pa, skb->len,
- fnic->vlan_hw_insert, fnic->vlan_id, 1);
+ 0 /* hw inserts cos value */,
+ fnic->vlan_id, 1);
spin_unlock_irqrestore(&fnic->wq_lock[0], flags);
}
@@ -563,7 +564,8 @@ static int fnic_send_frame(struct fnic *fnic, struct fc_frame *fp)
}
fnic_queue_wq_desc(wq, skb, pa, tot_len, fr_eof(fp),
- fnic->vlan_hw_insert, fnic->vlan_id, 1, 1, 1);
+ 0 /* hw inserts cos value */,
+ fnic->vlan_id, 1, 1, 1);
fnic_send_frame_end:
spin_unlock_irqrestore(&fnic->wq_lock[0], flags);
diff --git a/drivers/scsi/fnic/fnic_io.h b/drivers/scsi/fnic/fnic_io.h
index f0b896988cd5..c35b8f1889ea 100644
--- a/drivers/scsi/fnic/fnic_io.h
+++ b/drivers/scsi/fnic/fnic_io.h
@@ -21,7 +21,7 @@
#include <scsi/fc/fc_fcp.h>
#define FNIC_DFLT_SG_DESC_CNT 32
-#define FNIC_MAX_SG_DESC_CNT 1024 /* Maximum descriptors per sgl */
+#define FNIC_MAX_SG_DESC_CNT 256 /* Maximum descriptors per sgl */
#define FNIC_SG_DESC_ALIGN 16 /* Descriptor address alignment */
struct host_sg_desc {
@@ -45,7 +45,8 @@ enum fnic_sgl_list_type {
};
enum fnic_ioreq_state {
- FNIC_IOREQ_CMD_PENDING = 0,
+ FNIC_IOREQ_NOT_INITED = 0,
+ FNIC_IOREQ_CMD_PENDING,
FNIC_IOREQ_ABTS_PENDING,
FNIC_IOREQ_ABTS_COMPLETE,
FNIC_IOREQ_CMD_COMPLETE,
@@ -60,6 +61,7 @@ struct fnic_io_req {
u8 sgl_type; /* device DMA descriptor list type */
u8 io_completed:1; /* set to 1 when fw completes IO */
u32 port_id; /* remote port DID */
+ unsigned long start_time; /* in jiffies */
struct completion *abts_done; /* completion for abts */
struct completion *dr_done; /* completion for device reset */
};
diff --git a/drivers/scsi/fnic/fnic_main.c b/drivers/scsi/fnic/fnic_main.c
index fbf3ac6e0c55..d601ac543c52 100644
--- a/drivers/scsi/fnic/fnic_main.c
+++ b/drivers/scsi/fnic/fnic_main.c
@@ -68,6 +68,10 @@ unsigned int fnic_log_level;
module_param(fnic_log_level, int, S_IRUGO|S_IWUSR);
MODULE_PARM_DESC(fnic_log_level, "bit mask of fnic logging levels");
+unsigned int fnic_trace_max_pages = 16;
+module_param(fnic_trace_max_pages, uint, S_IRUGO|S_IWUSR);
+MODULE_PARM_DESC(fnic_trace_max_pages, "Total allocated memory pages "
+ "for fnic trace buffer");
static struct libfc_function_template fnic_transport_template = {
.frame_send = fnic_send,
@@ -624,6 +628,9 @@ static int fnic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
}
fnic->state = FNIC_IN_FC_MODE;
+ atomic_set(&fnic->in_flight, 0);
+ fnic->state_flags = FNIC_FLAGS_NONE;
+
/* Enable hardware stripping of vlan header on ingress */
fnic_set_nic_config(fnic, 0, 0, 0, 0, 0, 0, 1);
@@ -858,6 +865,14 @@ static int __init fnic_init_module(void)
printk(KERN_INFO PFX "%s, ver %s\n", DRV_DESCRIPTION, DRV_VERSION);
+ /* Allocate memory for trace buffer */
+ err = fnic_trace_buf_init();
+ if (err < 0) {
+ printk(KERN_ERR PFX "Trace buffer initialization Failed "
+ "Fnic Tracing utility is disabled\n");
+ fnic_trace_free();
+ }
+
/* Create a cache for allocation of default size sgls */
len = sizeof(struct fnic_dflt_sgl_list);
fnic_sgl_cache[FNIC_SGL_CACHE_DFLT] = kmem_cache_create
@@ -928,6 +943,7 @@ err_create_fnic_ioreq_slab:
err_create_fnic_sgl_slab_max:
kmem_cache_destroy(fnic_sgl_cache[FNIC_SGL_CACHE_DFLT]);
err_create_fnic_sgl_slab_dflt:
+ fnic_trace_free();
return err;
}
@@ -939,6 +955,7 @@ static void __exit fnic_cleanup_module(void)
kmem_cache_destroy(fnic_sgl_cache[FNIC_SGL_CACHE_DFLT]);
kmem_cache_destroy(fnic_io_req_cache);
fc_release_transport(fnic_fc_transport);
+ fnic_trace_free();
}
module_init(fnic_init_module);
diff --git a/drivers/scsi/fnic/fnic_scsi.c b/drivers/scsi/fnic/fnic_scsi.c
index c40ce52ed7c6..be99e7549d89 100644
--- a/drivers/scsi/fnic/fnic_scsi.c
+++ b/drivers/scsi/fnic/fnic_scsi.c
@@ -47,6 +47,7 @@ const char *fnic_state_str[] = {
};
static const char *fnic_ioreq_state_str[] = {
+ [FNIC_IOREQ_NOT_INITED] = "FNIC_IOREQ_NOT_INITED",
[FNIC_IOREQ_CMD_PENDING] = "FNIC_IOREQ_CMD_PENDING",
[FNIC_IOREQ_ABTS_PENDING] = "FNIC_IOREQ_ABTS_PENDING",
[FNIC_IOREQ_ABTS_COMPLETE] = "FNIC_IOREQ_ABTS_COMPLETE",
@@ -165,6 +166,33 @@ static int free_wq_copy_descs(struct fnic *fnic, struct vnic_wq_copy *wq)
}
+/**
+ * __fnic_set_state_flags
+ * Sets/Clears bits in fnic's state_flags
+ **/
+void
+__fnic_set_state_flags(struct fnic *fnic, unsigned long st_flags,
+ unsigned long clearbits)
+{
+ struct Scsi_Host *host = fnic->lport->host;
+ int sh_locked = spin_is_locked(host->host_lock);
+ unsigned long flags = 0;
+
+ if (!sh_locked)
+ spin_lock_irqsave(host->host_lock, flags);
+
+ if (clearbits)
+ fnic->state_flags &= ~st_flags;
+ else
+ fnic->state_flags |= st_flags;
+
+ if (!sh_locked)
+ spin_unlock_irqrestore(host->host_lock, flags);
+
+ return;
+}
+
+
/*
* fnic_fw_reset_handler
* Routine to send reset msg to fw
@@ -175,9 +203,16 @@ int fnic_fw_reset_handler(struct fnic *fnic)
int ret = 0;
unsigned long flags;
+ /* indicate fwreset to io path */
+ fnic_set_state_flags(fnic, FNIC_FLAGS_FWRESET);
+
skb_queue_purge(&fnic->frame_queue);
skb_queue_purge(&fnic->tx_queue);
+ /* wait for io cmpl */
+ while (atomic_read(&fnic->in_flight))
+ schedule_timeout(msecs_to_jiffies(1));
+
spin_lock_irqsave(&fnic->wq_copy_lock[0], flags);
if (vnic_wq_copy_desc_avail(wq) <= fnic->wq_copy_desc_low[0])
@@ -193,9 +228,12 @@ int fnic_fw_reset_handler(struct fnic *fnic)
if (!ret)
FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
"Issued fw reset\n");
- else
+ else {
+ fnic_clear_state_flags(fnic, FNIC_FLAGS_FWRESET);
FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
"Failed to issue fw reset\n");
+ }
+
return ret;
}
@@ -312,6 +350,8 @@ static inline int fnic_queue_wq_copy_desc(struct fnic *fnic,
if (unlikely(!vnic_wq_copy_desc_avail(wq))) {
spin_unlock_irqrestore(&fnic->wq_copy_lock[0], intr_flags);
+ FNIC_SCSI_DBG(KERN_INFO, fnic->lport->host,
+ "fnic_queue_wq_copy_desc failure - no descriptors\n");
return SCSI_MLQUEUE_HOST_BUSY;
}
@@ -351,16 +391,20 @@ static inline int fnic_queue_wq_copy_desc(struct fnic *fnic,
*/
static int fnic_queuecommand_lck(struct scsi_cmnd *sc, void (*done)(struct scsi_cmnd *))
{
- struct fc_lport *lp;
+ struct fc_lport *lp = shost_priv(sc->device->host);
struct fc_rport *rport;
- struct fnic_io_req *io_req;
- struct fnic *fnic;
+ struct fnic_io_req *io_req = NULL;
+ struct fnic *fnic = lport_priv(lp);
struct vnic_wq_copy *wq;
int ret;
- int sg_count;
+ u64 cmd_trace;
+ int sg_count = 0;
unsigned long flags;
unsigned long ptr;
+ if (unlikely(fnic_chk_state_flags_locked(fnic, FNIC_FLAGS_IO_BLOCKED)))
+ return SCSI_MLQUEUE_HOST_BUSY;
+
rport = starget_to_rport(scsi_target(sc->device));
ret = fc_remote_port_chkready(rport);
if (ret) {
@@ -369,20 +413,21 @@ static int fnic_queuecommand_lck(struct scsi_cmnd *sc, void (*done)(struct scsi_
return 0;
}
- lp = shost_priv(sc->device->host);
if (lp->state != LPORT_ST_READY || !(lp->link_up))
return SCSI_MLQUEUE_HOST_BUSY;
+ atomic_inc(&fnic->in_flight);
+
/*
* Release host lock, use driver resource specific locks from here.
* Don't re-enable interrupts in case they were disabled prior to the
* caller disabling them.
*/
spin_unlock(lp->host->host_lock);
+ CMD_STATE(sc) = FNIC_IOREQ_NOT_INITED;
+ CMD_FLAGS(sc) = FNIC_NO_FLAGS;
/* Get a new io_req for this SCSI IO */
- fnic = lport_priv(lp);
-
io_req = mempool_alloc(fnic->io_req_pool, GFP_ATOMIC);
if (!io_req) {
ret = SCSI_MLQUEUE_HOST_BUSY;
@@ -393,6 +438,9 @@ static int fnic_queuecommand_lck(struct scsi_cmnd *sc, void (*done)(struct scsi_
/* Map the data buffer */
sg_count = scsi_dma_map(sc);
if (sg_count < 0) {
+ FNIC_TRACE(fnic_queuecommand, sc->device->host->host_no,
+ sc->request->tag, sc, 0, sc->cmnd[0],
+ sg_count, CMD_STATE(sc));
mempool_free(io_req, fnic->io_req_pool);
goto out;
}
@@ -427,8 +475,10 @@ static int fnic_queuecommand_lck(struct scsi_cmnd *sc, void (*done)(struct scsi_
/* initialize rest of io_req */
io_req->port_id = rport->port_id;
+ io_req->start_time = jiffies;
CMD_STATE(sc) = FNIC_IOREQ_CMD_PENDING;
CMD_SP(sc) = (char *)io_req;
+ CMD_FLAGS(sc) |= FNIC_IO_INITIALIZED;
sc->scsi_done = done;
/* create copy wq desc and enqueue it */
@@ -440,7 +490,9 @@ static int fnic_queuecommand_lck(struct scsi_cmnd *sc, void (*done)(struct scsi_
* refetch the pointer under the lock.
*/
spinlock_t *io_lock = fnic_io_lock_hash(fnic, sc);
-
+ FNIC_TRACE(fnic_queuecommand, sc->device->host->host_no,
+ sc->request->tag, sc, 0, 0, 0,
+ (((u64)CMD_FLAGS(sc) << 32) | CMD_STATE(sc)));
spin_lock_irqsave(io_lock, flags);
io_req = (struct fnic_io_req *)CMD_SP(sc);
CMD_SP(sc) = NULL;
@@ -450,8 +502,21 @@ static int fnic_queuecommand_lck(struct scsi_cmnd *sc, void (*done)(struct scsi_
fnic_release_ioreq_buf(fnic, io_req, sc);
mempool_free(io_req, fnic->io_req_pool);
}
+ } else {
+ /* REVISIT: Use per IO lock in the final code */
+ CMD_FLAGS(sc) |= FNIC_IO_ISSUED;
}
out:
+ cmd_trace = ((u64)sc->cmnd[0] << 56 | (u64)sc->cmnd[7] << 40 |
+ (u64)sc->cmnd[8] << 32 | (u64)sc->cmnd[2] << 24 |
+ (u64)sc->cmnd[3] << 16 | (u64)sc->cmnd[4] << 8 |
+ sc->cmnd[5]);
+
+ FNIC_TRACE(fnic_queuecommand, sc->device->host->host_no,
+ sc->request->tag, sc, io_req,
+ sg_count, cmd_trace,
+ (((u64)CMD_FLAGS(sc) >> 32) | CMD_STATE(sc)));
+ atomic_dec(&fnic->in_flight);
/* acquire host lock before returning to SCSI */
spin_lock(lp->host->host_lock);
return ret;
@@ -529,6 +594,8 @@ static int fnic_fcpio_fw_reset_cmpl_handler(struct fnic *fnic,
fnic_flush_tx(fnic);
reset_cmpl_handler_end:
+ fnic_clear_state_flags(fnic, FNIC_FLAGS_FWRESET);
+
return ret;
}
@@ -622,6 +689,7 @@ static inline void fnic_fcpio_ack_handler(struct fnic *fnic,
struct vnic_wq_copy *wq;
u16 request_out = desc->u.ack.request_out;
unsigned long flags;
+ u64 *ox_id_tag = (u64 *)(void *)desc;
/* mark the ack state */
wq = &fnic->wq_copy[cq_index - fnic->raw_wq_count - fnic->rq_count];
@@ -632,6 +700,9 @@ static inline void fnic_fcpio_ack_handler(struct fnic *fnic,
fnic->fw_ack_recd[0] = 1;
}
spin_unlock_irqrestore(&fnic->wq_copy_lock[0], flags);
+ FNIC_TRACE(fnic_fcpio_ack_handler,
+ fnic->lport->host->host_no, 0, 0, ox_id_tag[2], ox_id_tag[3],
+ ox_id_tag[4], ox_id_tag[5]);
}
/*
@@ -651,27 +722,53 @@ static void fnic_fcpio_icmnd_cmpl_handler(struct fnic *fnic,
struct scsi_cmnd *sc;
unsigned long flags;
spinlock_t *io_lock;
+ u64 cmd_trace;
+ unsigned long start_time;
/* Decode the cmpl description to get the io_req id */
fcpio_header_dec(&desc->hdr, &type, &hdr_status, &tag);
fcpio_tag_id_dec(&tag, &id);
+ icmnd_cmpl = &desc->u.icmnd_cmpl;
- if (id >= FNIC_MAX_IO_REQ)
+ if (id >= FNIC_MAX_IO_REQ) {
+ shost_printk(KERN_ERR, fnic->lport->host,
+ "Tag out of range tag %x hdr status = %s\n",
+ id, fnic_fcpio_status_to_str(hdr_status));
return;
+ }
sc = scsi_host_find_tag(fnic->lport->host, id);
WARN_ON_ONCE(!sc);
- if (!sc)
+ if (!sc) {
+ shost_printk(KERN_ERR, fnic->lport->host,
+ "icmnd_cmpl sc is null - "
+ "hdr status = %s tag = 0x%x desc = 0x%p\n",
+ fnic_fcpio_status_to_str(hdr_status), id, desc);
+ FNIC_TRACE(fnic_fcpio_icmnd_cmpl_handler,
+ fnic->lport->host->host_no, id,
+ ((u64)icmnd_cmpl->_resvd0[1] << 16 |
+ (u64)icmnd_cmpl->_resvd0[0]),
+ ((u64)hdr_status << 16 |
+ (u64)icmnd_cmpl->scsi_status << 8 |
+ (u64)icmnd_cmpl->flags), desc,
+ (u64)icmnd_cmpl->residual, 0);
return;
+ }
io_lock = fnic_io_lock_hash(fnic, sc);
spin_lock_irqsave(io_lock, flags);
io_req = (struct fnic_io_req *)CMD_SP(sc);
WARN_ON_ONCE(!io_req);
if (!io_req) {
+ CMD_FLAGS(sc) |= FNIC_IO_REQ_NULL;
spin_unlock_irqrestore(io_lock, flags);
+ shost_printk(KERN_ERR, fnic->lport->host,
+ "icmnd_cmpl io_req is null - "
+ "hdr status = %s tag = 0x%x sc 0x%p\n",
+ fnic_fcpio_status_to_str(hdr_status), id, sc);
return;
}
+ start_time = io_req->start_time;
/* firmware completed the io */
io_req->io_completed = 1;
@@ -682,6 +779,28 @@ static void fnic_fcpio_icmnd_cmpl_handler(struct fnic *fnic,
*/
if (CMD_STATE(sc) == FNIC_IOREQ_ABTS_PENDING) {
spin_unlock_irqrestore(io_lock, flags);
+ CMD_FLAGS(sc) |= FNIC_IO_ABTS_PENDING;
+ switch (hdr_status) {
+ case FCPIO_SUCCESS:
+ CMD_FLAGS(sc) |= FNIC_IO_DONE;
+ FNIC_SCSI_DBG(KERN_INFO, fnic->lport->host,
+ "icmnd_cmpl ABTS pending hdr status = %s "
+ "sc 0x%p scsi_status %x residual %d\n",
+ fnic_fcpio_status_to_str(hdr_status), sc,
+ icmnd_cmpl->scsi_status,
+ icmnd_cmpl->residual);
+ break;
+ case FCPIO_ABORTED:
+ CMD_FLAGS(sc) |= FNIC_IO_ABORTED;
+ break;
+ default:
+ FNIC_SCSI_DBG(KERN_INFO, fnic->lport->host,
+ "icmnd_cmpl abts pending "
+ "hdr status = %s tag = 0x%x sc = 0x%p\n",
+ fnic_fcpio_status_to_str(hdr_status),
+ id, sc);
+ break;
+ }
return;
}
@@ -765,6 +884,7 @@ static void fnic_fcpio_icmnd_cmpl_handler(struct fnic *fnic,
/* Break link with the SCSI command */
CMD_SP(sc) = NULL;
+ CMD_FLAGS(sc) |= FNIC_IO_DONE;
spin_unlock_irqrestore(io_lock, flags);
@@ -772,6 +892,20 @@ static void fnic_fcpio_icmnd_cmpl_handler(struct fnic *fnic,
mempool_free(io_req, fnic->io_req_pool);
+ cmd_trace = ((u64)hdr_status << 56) |
+ (u64)icmnd_cmpl->scsi_status << 48 |
+ (u64)icmnd_cmpl->flags << 40 | (u64)sc->cmnd[0] << 32 |
+ (u64)sc->cmnd[2] << 24 | (u64)sc->cmnd[3] << 16 |
+ (u64)sc->cmnd[4] << 8 | sc->cmnd[5];
+
+ FNIC_TRACE(fnic_fcpio_icmnd_cmpl_handler,
+ sc->device->host->host_no, id, sc,
+ ((u64)icmnd_cmpl->_resvd0[1] << 56 |
+ (u64)icmnd_cmpl->_resvd0[0] << 48 |
+ jiffies_to_msecs(jiffies - start_time)),
+ desc, cmd_trace,
+ (((u64)CMD_FLAGS(sc) << 32) | CMD_STATE(sc)));
+
if (sc->sc_data_direction == DMA_FROM_DEVICE) {
fnic->lport->host_stats.fcp_input_requests++;
fnic->fcp_input_bytes += xfer_len;
@@ -784,7 +918,6 @@ static void fnic_fcpio_icmnd_cmpl_handler(struct fnic *fnic,
/* Call SCSI completion function to complete the IO */
if (sc->scsi_done)
sc->scsi_done(sc);
-
}
/* fnic_fcpio_itmf_cmpl_handler
@@ -801,28 +934,54 @@ static void fnic_fcpio_itmf_cmpl_handler(struct fnic *fnic,
struct fnic_io_req *io_req;
unsigned long flags;
spinlock_t *io_lock;
+ unsigned long start_time;
fcpio_header_dec(&desc->hdr, &type, &hdr_status, &tag);
fcpio_tag_id_dec(&tag, &id);
- if ((id & FNIC_TAG_MASK) >= FNIC_MAX_IO_REQ)
+ if ((id & FNIC_TAG_MASK) >= FNIC_MAX_IO_REQ) {
+ shost_printk(KERN_ERR, fnic->lport->host,
+ "Tag out of range tag %x hdr status = %s\n",
+ id, fnic_fcpio_status_to_str(hdr_status));
return;
+ }
sc = scsi_host_find_tag(fnic->lport->host, id & FNIC_TAG_MASK);
WARN_ON_ONCE(!sc);
- if (!sc)
+ if (!sc) {
+ shost_printk(KERN_ERR, fnic->lport->host,
+ "itmf_cmpl sc is null - hdr status = %s tag = 0x%x\n",
+ fnic_fcpio_status_to_str(hdr_status), id);
return;
-
+ }
io_lock = fnic_io_lock_hash(fnic, sc);
spin_lock_irqsave(io_lock, flags);
io_req = (struct fnic_io_req *)CMD_SP(sc);
WARN_ON_ONCE(!io_req);
if (!io_req) {
spin_unlock_irqrestore(io_lock, flags);
+ CMD_FLAGS(sc) |= FNIC_IO_ABT_TERM_REQ_NULL;
+ shost_printk(KERN_ERR, fnic->lport->host,
+ "itmf_cmpl io_req is null - "
+ "hdr status = %s tag = 0x%x sc 0x%p\n",
+ fnic_fcpio_status_to_str(hdr_status), id, sc);
return;
}
+ start_time = io_req->start_time;
- if (id & FNIC_TAG_ABORT) {
+ if ((id & FNIC_TAG_ABORT) && (id & FNIC_TAG_DEV_RST)) {
+ /* Abort and terminate completion of device reset req */
+ /* REVISIT : Add asserts about various flags */
+ FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
+ "dev reset abts cmpl recd. id %x status %s\n",
+ id, fnic_fcpio_status_to_str(hdr_status));
+ CMD_STATE(sc) = FNIC_IOREQ_ABTS_COMPLETE;
+ CMD_ABTS_STATUS(sc) = hdr_status;
+ CMD_FLAGS(sc) |= FNIC_DEV_RST_DONE;
+ if (io_req->abts_done)
+ complete(io_req->abts_done);
+ spin_unlock_irqrestore(io_lock, flags);
+ } else if (id & FNIC_TAG_ABORT) {
/* Completion of abort cmd */
if (CMD_STATE(sc) != FNIC_IOREQ_ABTS_PENDING) {
/* This is a late completion. Ignore it */
@@ -832,6 +991,7 @@ static void fnic_fcpio_itmf_cmpl_handler(struct fnic *fnic,
CMD_STATE(sc) = FNIC_IOREQ_ABTS_COMPLETE;
CMD_ABTS_STATUS(sc) = hdr_status;
+ CMD_FLAGS(sc) |= FNIC_IO_ABT_TERM_DONE;
FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
"abts cmpl recd. id %d status %s\n",
(int)(id & FNIC_TAG_MASK),
@@ -855,14 +1015,58 @@ static void fnic_fcpio_itmf_cmpl_handler(struct fnic *fnic,
fnic_release_ioreq_buf(fnic, io_req, sc);
mempool_free(io_req, fnic->io_req_pool);
- if (sc->scsi_done)
+ if (sc->scsi_done) {
+ FNIC_TRACE(fnic_fcpio_itmf_cmpl_handler,
+ sc->device->host->host_no, id,
+ sc,
+ jiffies_to_msecs(jiffies - start_time),
+ desc,
+ (((u64)hdr_status << 40) |
+ (u64)sc->cmnd[0] << 32 |
+ (u64)sc->cmnd[2] << 24 |
+ (u64)sc->cmnd[3] << 16 |
+ (u64)sc->cmnd[4] << 8 | sc->cmnd[5]),
+ (((u64)CMD_FLAGS(sc) << 32) |
+ CMD_STATE(sc)));
sc->scsi_done(sc);
+ }
}
} else if (id & FNIC_TAG_DEV_RST) {
/* Completion of device reset */
CMD_LR_STATUS(sc) = hdr_status;
+ if (CMD_STATE(sc) == FNIC_IOREQ_ABTS_PENDING) {
+ spin_unlock_irqrestore(io_lock, flags);
+ CMD_FLAGS(sc) |= FNIC_DEV_RST_ABTS_PENDING;
+ FNIC_TRACE(fnic_fcpio_itmf_cmpl_handler,
+ sc->device->host->host_no, id, sc,
+ jiffies_to_msecs(jiffies - start_time),
+ desc, 0,
+ (((u64)CMD_FLAGS(sc) << 32) | CMD_STATE(sc)));
+ FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
+ "Terminate pending "
+ "dev reset cmpl recd. id %d status %s\n",
+ (int)(id & FNIC_TAG_MASK),
+ fnic_fcpio_status_to_str(hdr_status));
+ return;
+ }
+ if (CMD_FLAGS(sc) & FNIC_DEV_RST_TIMED_OUT) {
+ /* Need to wait for terminate completion */
+ spin_unlock_irqrestore(io_lock, flags);
+ FNIC_TRACE(fnic_fcpio_itmf_cmpl_handler,
+ sc->device->host->host_no, id, sc,
+ jiffies_to_msecs(jiffies - start_time),
+ desc, 0,
+ (((u64)CMD_FLAGS(sc) << 32) | CMD_STATE(sc)));
+ FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
+ "dev reset cmpl recd after time out. "
+ "id %d status %s\n",
+ (int)(id & FNIC_TAG_MASK),
+ fnic_fcpio_status_to_str(hdr_status));
+ return;
+ }
CMD_STATE(sc) = FNIC_IOREQ_CMD_COMPLETE;
+ CMD_FLAGS(sc) |= FNIC_DEV_RST_DONE;
FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
"dev reset cmpl recd. id %d status %s\n",
(int)(id & FNIC_TAG_MASK),
@@ -889,7 +1093,6 @@ static int fnic_fcpio_cmpl_handler(struct vnic_dev *vdev,
struct fcpio_fw_req *desc)
{
struct fnic *fnic = vnic_dev_priv(vdev);
- int ret = 0;
switch (desc->hdr.type) {
case FCPIO_ACK: /* fw copied copy wq desc to its queue */
@@ -906,11 +1109,11 @@ static int fnic_fcpio_cmpl_handler(struct vnic_dev *vdev,
case FCPIO_FLOGI_REG_CMPL: /* fw completed flogi_reg */
case FCPIO_FLOGI_FIP_REG_CMPL: /* fw completed flogi_fip_reg */
- ret = fnic_fcpio_flogi_reg_cmpl_handler(fnic, desc);
+ fnic_fcpio_flogi_reg_cmpl_handler(fnic, desc);
break;
case FCPIO_RESET_CMPL: /* fw completed reset */
- ret = fnic_fcpio_fw_reset_cmpl_handler(fnic, desc);
+ fnic_fcpio_fw_reset_cmpl_handler(fnic, desc);
break;
default:
@@ -920,7 +1123,7 @@ static int fnic_fcpio_cmpl_handler(struct vnic_dev *vdev,
break;
}
- return ret;
+ return 0;
}
/*
@@ -950,6 +1153,7 @@ static void fnic_cleanup_io(struct fnic *fnic, int exclude_id)
unsigned long flags = 0;
struct scsi_cmnd *sc;
spinlock_t *io_lock;
+ unsigned long start_time = 0;
for (i = 0; i < FNIC_MAX_IO_REQ; i++) {
if (i == exclude_id)
@@ -962,6 +1166,23 @@ static void fnic_cleanup_io(struct fnic *fnic, int exclude_id)
io_lock = fnic_io_lock_hash(fnic, sc);
spin_lock_irqsave(io_lock, flags);
io_req = (struct fnic_io_req *)CMD_SP(sc);
+ if ((CMD_FLAGS(sc) & FNIC_DEVICE_RESET) &&
+ !(CMD_FLAGS(sc) & FNIC_DEV_RST_DONE)) {
+ /*
+ * We will be here only when FW completes reset
+ * without sending completions for outstanding ios.
+ */
+ CMD_FLAGS(sc) |= FNIC_DEV_RST_DONE;
+ if (io_req && io_req->dr_done)
+ complete(io_req->dr_done);
+ else if (io_req && io_req->abts_done)
+ complete(io_req->abts_done);
+ spin_unlock_irqrestore(io_lock, flags);
+ continue;
+ } else if (CMD_FLAGS(sc) & FNIC_DEVICE_RESET) {
+ spin_unlock_irqrestore(io_lock, flags);
+ continue;
+ }
if (!io_req) {
spin_unlock_irqrestore(io_lock, flags);
goto cleanup_scsi_cmd;
@@ -975,6 +1196,7 @@ static void fnic_cleanup_io(struct fnic *fnic, int exclude_id)
* If there is a scsi_cmnd associated with this io_req, then
* free the corresponding state
*/
+ start_time = io_req->start_time;
fnic_release_ioreq_buf(fnic, io_req, sc);
mempool_free(io_req, fnic->io_req_pool);
@@ -984,8 +1206,18 @@ cleanup_scsi_cmd:
" DID_TRANSPORT_DISRUPTED\n");
/* Complete the command to SCSI */
- if (sc->scsi_done)
+ if (sc->scsi_done) {
+ FNIC_TRACE(fnic_cleanup_io,
+ sc->device->host->host_no, i, sc,
+ jiffies_to_msecs(jiffies - start_time),
+ 0, ((u64)sc->cmnd[0] << 32 |
+ (u64)sc->cmnd[2] << 24 |
+ (u64)sc->cmnd[3] << 16 |
+ (u64)sc->cmnd[4] << 8 | sc->cmnd[5]),
+ (((u64)CMD_FLAGS(sc) << 32) | CMD_STATE(sc)));
+
sc->scsi_done(sc);
+ }
}
}
@@ -998,6 +1230,7 @@ void fnic_wq_copy_cleanup_handler(struct vnic_wq_copy *wq,
struct scsi_cmnd *sc;
unsigned long flags;
spinlock_t *io_lock;
+ unsigned long start_time = 0;
/* get the tag reference */
fcpio_tag_id_dec(&desc->hdr.tag, &id);
@@ -1027,6 +1260,7 @@ void fnic_wq_copy_cleanup_handler(struct vnic_wq_copy *wq,
spin_unlock_irqrestore(io_lock, flags);
+ start_time = io_req->start_time;
fnic_release_ioreq_buf(fnic, io_req, sc);
mempool_free(io_req, fnic->io_req_pool);
@@ -1035,8 +1269,17 @@ wq_copy_cleanup_scsi_cmd:
FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host, "wq_copy_cleanup_handler:"
" DID_NO_CONNECT\n");
- if (sc->scsi_done)
+ if (sc->scsi_done) {
+ FNIC_TRACE(fnic_wq_copy_cleanup_handler,
+ sc->device->host->host_no, id, sc,
+ jiffies_to_msecs(jiffies - start_time),
+ 0, ((u64)sc->cmnd[0] << 32 |
+ (u64)sc->cmnd[2] << 24 | (u64)sc->cmnd[3] << 16 |
+ (u64)sc->cmnd[4] << 8 | sc->cmnd[5]),
+ (((u64)CMD_FLAGS(sc) << 32) | CMD_STATE(sc)));
+
sc->scsi_done(sc);
+ }
}
static inline int fnic_queue_abort_io_req(struct fnic *fnic, int tag,
@@ -1044,8 +1287,18 @@ static inline int fnic_queue_abort_io_req(struct fnic *fnic, int tag,
struct fnic_io_req *io_req)
{
struct vnic_wq_copy *wq = &fnic->wq_copy[0];
+ struct Scsi_Host *host = fnic->lport->host;
unsigned long flags;
+ spin_lock_irqsave(host->host_lock, flags);
+ if (unlikely(fnic_chk_state_flags_locked(fnic,
+ FNIC_FLAGS_IO_BLOCKED))) {
+ spin_unlock_irqrestore(host->host_lock, flags);
+ return 1;
+ } else
+ atomic_inc(&fnic->in_flight);
+ spin_unlock_irqrestore(host->host_lock, flags);
+
spin_lock_irqsave(&fnic->wq_copy_lock[0], flags);
if (vnic_wq_copy_desc_avail(wq) <= fnic->wq_copy_desc_low[0])
@@ -1053,6 +1306,9 @@ static inline int fnic_queue_abort_io_req(struct fnic *fnic, int tag,
if (!vnic_wq_copy_desc_avail(wq)) {
spin_unlock_irqrestore(&fnic->wq_copy_lock[0], flags);
+ atomic_dec(&fnic->in_flight);
+ FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
+ "fnic_queue_abort_io_req: failure: no descriptors\n");
return 1;
}
fnic_queue_wq_copy_desc_itmf(wq, tag | FNIC_TAG_ABORT,
@@ -1060,12 +1316,15 @@ static inline int fnic_queue_abort_io_req(struct fnic *fnic, int tag,
fnic->config.ra_tov, fnic->config.ed_tov);
spin_unlock_irqrestore(&fnic->wq_copy_lock[0], flags);
+ atomic_dec(&fnic->in_flight);
+
return 0;
}
-void fnic_rport_exch_reset(struct fnic *fnic, u32 port_id)
+static void fnic_rport_exch_reset(struct fnic *fnic, u32 port_id)
{
int tag;
+ int abt_tag;
struct fnic_io_req *io_req;
spinlock_t *io_lock;
unsigned long flags;
@@ -1075,13 +1334,14 @@ void fnic_rport_exch_reset(struct fnic *fnic, u32 port_id)
FNIC_SCSI_DBG(KERN_DEBUG,
fnic->lport->host,
- "fnic_rport_reset_exch called portid 0x%06x\n",
+ "fnic_rport_exch_reset called portid 0x%06x\n",
port_id);
if (fnic->in_remove)
return;
for (tag = 0; tag < FNIC_MAX_IO_REQ; tag++) {
+ abt_tag = tag;
sc = scsi_host_find_tag(fnic->lport->host, tag);
if (!sc)
continue;
@@ -1096,6 +1356,15 @@ void fnic_rport_exch_reset(struct fnic *fnic, u32 port_id)
continue;
}
+ if ((CMD_FLAGS(sc) & FNIC_DEVICE_RESET) &&
+ (!(CMD_FLAGS(sc) & FNIC_DEV_RST_ISSUED))) {
+ FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
+ "fnic_rport_exch_reset dev rst not pending sc 0x%p\n",
+ sc);
+ spin_unlock_irqrestore(io_lock, flags);
+ continue;
+ }
+
/*
* Found IO that is still pending with firmware and
* belongs to rport that went away
@@ -1104,9 +1373,29 @@ void fnic_rport_exch_reset(struct fnic *fnic, u32 port_id)
spin_unlock_irqrestore(io_lock, flags);
continue;
}
+ if (io_req->abts_done) {
+ shost_printk(KERN_ERR, fnic->lport->host,
+ "fnic_rport_exch_reset: io_req->abts_done is set "
+ "state is %s\n",
+ fnic_ioreq_state_to_str(CMD_STATE(sc)));
+ }
+
+ if (!(CMD_FLAGS(sc) & FNIC_IO_ISSUED)) {
+ shost_printk(KERN_ERR, fnic->lport->host,
+ "rport_exch_reset "
+ "IO not yet issued %p tag 0x%x flags "
+ "%x state %d\n",
+ sc, tag, CMD_FLAGS(sc), CMD_STATE(sc));
+ }
old_ioreq_state = CMD_STATE(sc);
CMD_STATE(sc) = FNIC_IOREQ_ABTS_PENDING;
CMD_ABTS_STATUS(sc) = FCPIO_INVALID_CODE;
+ if (CMD_FLAGS(sc) & FNIC_DEVICE_RESET) {
+ abt_tag = (tag | FNIC_TAG_DEV_RST);
+ FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
+ "fnic_rport_exch_reset dev rst sc 0x%p\n",
+ sc);
+ }
BUG_ON(io_req->abts_done);
@@ -1118,7 +1407,7 @@ void fnic_rport_exch_reset(struct fnic *fnic, u32 port_id)
/* Now queue the abort command to firmware */
int_to_scsilun(sc->device->lun, &fc_lun);
- if (fnic_queue_abort_io_req(fnic, tag,
+ if (fnic_queue_abort_io_req(fnic, abt_tag,
FCPIO_ITMF_ABT_TASK_TERM,
fc_lun.scsi_lun, io_req)) {
/*
@@ -1127,12 +1416,17 @@ void fnic_rport_exch_reset(struct fnic *fnic, u32 port_id)
* aborted later by scsi_eh, or cleaned up during
* lun reset
*/
- io_lock = fnic_io_lock_hash(fnic, sc);
-
spin_lock_irqsave(io_lock, flags);
if (CMD_STATE(sc) == FNIC_IOREQ_ABTS_PENDING)
CMD_STATE(sc) = old_ioreq_state;
spin_unlock_irqrestore(io_lock, flags);
+ } else {
+ spin_lock_irqsave(io_lock, flags);
+ if (CMD_FLAGS(sc) & FNIC_DEVICE_RESET)
+ CMD_FLAGS(sc) |= FNIC_DEV_RST_TERM_ISSUED;
+ else
+ CMD_FLAGS(sc) |= FNIC_IO_INTERNAL_TERM_ISSUED;
+ spin_unlock_irqrestore(io_lock, flags);
}
}
@@ -1141,6 +1435,7 @@ void fnic_rport_exch_reset(struct fnic *fnic, u32 port_id)
void fnic_terminate_rport_io(struct fc_rport *rport)
{
int tag;
+ int abt_tag;
struct fnic_io_req *io_req;
spinlock_t *io_lock;
unsigned long flags;
@@ -1154,14 +1449,15 @@ void fnic_terminate_rport_io(struct fc_rport *rport)
FNIC_SCSI_DBG(KERN_DEBUG,
fnic->lport->host, "fnic_terminate_rport_io called"
- " wwpn 0x%llx, wwnn0x%llx, portid 0x%06x\n",
- rport->port_name, rport->node_name,
+ " wwpn 0x%llx, wwnn0x%llx, rport 0x%p, portid 0x%06x\n",
+ rport->port_name, rport->node_name, rport,
rport->port_id);
if (fnic->in_remove)
return;
for (tag = 0; tag < FNIC_MAX_IO_REQ; tag++) {
+ abt_tag = tag;
sc = scsi_host_find_tag(fnic->lport->host, tag);
if (!sc)
continue;
@@ -1180,6 +1476,14 @@ void fnic_terminate_rport_io(struct fc_rport *rport)
continue;
}
+ if ((CMD_FLAGS(sc) & FNIC_DEVICE_RESET) &&
+ (!(CMD_FLAGS(sc) & FNIC_DEV_RST_ISSUED))) {
+ FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
+ "fnic_terminate_rport_io dev rst not pending sc 0x%p\n",
+ sc);
+ spin_unlock_irqrestore(io_lock, flags);
+ continue;
+ }
/*
* Found IO that is still pending with firmware and
* belongs to rport that went away
@@ -1188,9 +1492,27 @@ void fnic_terminate_rport_io(struct fc_rport *rport)
spin_unlock_irqrestore(io_lock, flags);
continue;
}
+ if (io_req->abts_done) {
+ shost_printk(KERN_ERR, fnic->lport->host,
+ "fnic_terminate_rport_io: io_req->abts_done is set "
+ "state is %s\n",
+ fnic_ioreq_state_to_str(CMD_STATE(sc)));
+ }
+ if (!(CMD_FLAGS(sc) & FNIC_IO_ISSUED)) {
+ FNIC_SCSI_DBG(KERN_INFO, fnic->lport->host,
+ "fnic_terminate_rport_io "
+ "IO not yet issued %p tag 0x%x flags "
+ "%x state %d\n",
+ sc, tag, CMD_FLAGS(sc), CMD_STATE(sc));
+ }
old_ioreq_state = CMD_STATE(sc);
CMD_STATE(sc) = FNIC_IOREQ_ABTS_PENDING;
CMD_ABTS_STATUS(sc) = FCPIO_INVALID_CODE;
+ if (CMD_FLAGS(sc) & FNIC_DEVICE_RESET) {
+ abt_tag = (tag | FNIC_TAG_DEV_RST);
+ FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
+ "fnic_terminate_rport_io dev rst sc 0x%p\n", sc);
+ }
BUG_ON(io_req->abts_done);
@@ -1203,7 +1525,7 @@ void fnic_terminate_rport_io(struct fc_rport *rport)
/* Now queue the abort command to firmware */
int_to_scsilun(sc->device->lun, &fc_lun);
- if (fnic_queue_abort_io_req(fnic, tag,
+ if (fnic_queue_abort_io_req(fnic, abt_tag,
FCPIO_ITMF_ABT_TASK_TERM,
fc_lun.scsi_lun, io_req)) {
/*
@@ -1212,12 +1534,17 @@ void fnic_terminate_rport_io(struct fc_rport *rport)
* aborted later by scsi_eh, or cleaned up during
* lun reset
*/
- io_lock = fnic_io_lock_hash(fnic, sc);
-
spin_lock_irqsave(io_lock, flags);
if (CMD_STATE(sc) == FNIC_IOREQ_ABTS_PENDING)
CMD_STATE(sc) = old_ioreq_state;
spin_unlock_irqrestore(io_lock, flags);
+ } else {
+ spin_lock_irqsave(io_lock, flags);
+ if (CMD_FLAGS(sc) & FNIC_DEVICE_RESET)
+ CMD_FLAGS(sc) |= FNIC_DEV_RST_TERM_ISSUED;
+ else
+ CMD_FLAGS(sc) |= FNIC_IO_INTERNAL_TERM_ISSUED;
+ spin_unlock_irqrestore(io_lock, flags);
}
}
@@ -1232,13 +1559,15 @@ int fnic_abort_cmd(struct scsi_cmnd *sc)
{
struct fc_lport *lp;
struct fnic *fnic;
- struct fnic_io_req *io_req;
+ struct fnic_io_req *io_req = NULL;
struct fc_rport *rport;
spinlock_t *io_lock;
unsigned long flags;
+ unsigned long start_time = 0;
int ret = SUCCESS;
- u32 task_req;
+ u32 task_req = 0;
struct scsi_lun fc_lun;
+ int tag;
DECLARE_COMPLETION_ONSTACK(tm_done);
/* Wait for rport to unblock */
@@ -1249,9 +1578,13 @@ int fnic_abort_cmd(struct scsi_cmnd *sc)
fnic = lport_priv(lp);
rport = starget_to_rport(scsi_target(sc->device));
- FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
- "Abort Cmd called FCID 0x%x, LUN 0x%x TAG %d\n",
- rport->port_id, sc->device->lun, sc->request->tag);
+ tag = sc->request->tag;
+ FNIC_SCSI_DBG(KERN_DEBUG,
+ fnic->lport->host,
+ "Abort Cmd called FCID 0x%x, LUN 0x%x TAG %x flags %x\n",
+ rport->port_id, sc->device->lun, tag, CMD_FLAGS(sc));
+
+ CMD_FLAGS(sc) = FNIC_NO_FLAGS;
if (lp->state != LPORT_ST_READY || !(lp->link_up)) {
ret = FAILED;
@@ -1318,6 +1651,10 @@ int fnic_abort_cmd(struct scsi_cmnd *sc)
ret = FAILED;
goto fnic_abort_cmd_end;
}
+ if (task_req == FCPIO_ITMF_ABT_TASK)
+ CMD_FLAGS(sc) |= FNIC_IO_ABTS_ISSUED;
+ else
+ CMD_FLAGS(sc) |= FNIC_IO_TERM_ISSUED;
/*
* We queued an abort IO, wait for its completion.
@@ -1336,6 +1673,7 @@ int fnic_abort_cmd(struct scsi_cmnd *sc)
io_req = (struct fnic_io_req *)CMD_SP(sc);
if (!io_req) {
spin_unlock_irqrestore(io_lock, flags);
+ CMD_FLAGS(sc) |= FNIC_IO_ABT_TERM_REQ_NULL;
ret = FAILED;
goto fnic_abort_cmd_end;
}
@@ -1344,6 +1682,7 @@ int fnic_abort_cmd(struct scsi_cmnd *sc)
/* fw did not complete abort, timed out */
if (CMD_STATE(sc) == FNIC_IOREQ_ABTS_PENDING) {
spin_unlock_irqrestore(io_lock, flags);
+ CMD_FLAGS(sc) |= FNIC_IO_ABT_TERM_TIMED_OUT;
ret = FAILED;
goto fnic_abort_cmd_end;
}
@@ -1359,12 +1698,21 @@ int fnic_abort_cmd(struct scsi_cmnd *sc)
spin_unlock_irqrestore(io_lock, flags);
+ start_time = io_req->start_time;
fnic_release_ioreq_buf(fnic, io_req, sc);
mempool_free(io_req, fnic->io_req_pool);
fnic_abort_cmd_end:
+ FNIC_TRACE(fnic_abort_cmd, sc->device->host->host_no,
+ sc->request->tag, sc,
+ jiffies_to_msecs(jiffies - start_time),
+ 0, ((u64)sc->cmnd[0] << 32 |
+ (u64)sc->cmnd[2] << 24 | (u64)sc->cmnd[3] << 16 |
+ (u64)sc->cmnd[4] << 8 | sc->cmnd[5]),
+ (((u64)CMD_FLAGS(sc) << 32) | CMD_STATE(sc)));
+
FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
- "Returning from abort cmd %s\n",
+ "Returning from abort cmd type %x %s\n", task_req,
(ret == SUCCESS) ?
"SUCCESS" : "FAILED");
return ret;
@@ -1375,16 +1723,28 @@ static inline int fnic_queue_dr_io_req(struct fnic *fnic,
struct fnic_io_req *io_req)
{
struct vnic_wq_copy *wq = &fnic->wq_copy[0];
+ struct Scsi_Host *host = fnic->lport->host;
struct scsi_lun fc_lun;
int ret = 0;
unsigned long intr_flags;
+ spin_lock_irqsave(host->host_lock, intr_flags);
+ if (unlikely(fnic_chk_state_flags_locked(fnic,
+ FNIC_FLAGS_IO_BLOCKED))) {
+ spin_unlock_irqrestore(host->host_lock, intr_flags);
+ return FAILED;
+ } else
+ atomic_inc(&fnic->in_flight);
+ spin_unlock_irqrestore(host->host_lock, intr_flags);
+
spin_lock_irqsave(&fnic->wq_copy_lock[0], intr_flags);
if (vnic_wq_copy_desc_avail(wq) <= fnic->wq_copy_desc_low[0])
free_wq_copy_descs(fnic, wq);
if (!vnic_wq_copy_desc_avail(wq)) {
+ FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
+ "queue_dr_io_req failure - no descriptors\n");
ret = -EAGAIN;
goto lr_io_req_end;
}
@@ -1399,6 +1759,7 @@ static inline int fnic_queue_dr_io_req(struct fnic *fnic,
lr_io_req_end:
spin_unlock_irqrestore(&fnic->wq_copy_lock[0], intr_flags);
+ atomic_dec(&fnic->in_flight);
return ret;
}
@@ -1412,7 +1773,7 @@ lr_io_req_end:
static int fnic_clean_pending_aborts(struct fnic *fnic,
struct scsi_cmnd *lr_sc)
{
- int tag;
+ int tag, abt_tag;
struct fnic_io_req *io_req;
spinlock_t *io_lock;
unsigned long flags;
@@ -1421,6 +1782,7 @@ static int fnic_clean_pending_aborts(struct fnic *fnic,
struct scsi_lun fc_lun;
struct scsi_device *lun_dev = lr_sc->device;
DECLARE_COMPLETION_ONSTACK(tm_done);
+ enum fnic_ioreq_state old_ioreq_state;
for (tag = 0; tag < FNIC_MAX_IO_REQ; tag++) {
sc = scsi_host_find_tag(fnic->lport->host, tag);
@@ -1449,7 +1811,41 @@ static int fnic_clean_pending_aborts(struct fnic *fnic,
"Found IO in %s on lun\n",
fnic_ioreq_state_to_str(CMD_STATE(sc)));
- BUG_ON(CMD_STATE(sc) != FNIC_IOREQ_ABTS_PENDING);
+ if (CMD_STATE(sc) == FNIC_IOREQ_ABTS_PENDING) {
+ spin_unlock_irqrestore(io_lock, flags);
+ continue;
+ }
+ if ((CMD_FLAGS(sc) & FNIC_DEVICE_RESET) &&
+ (!(CMD_FLAGS(sc) & FNIC_DEV_RST_ISSUED))) {
+ FNIC_SCSI_DBG(KERN_INFO, fnic->lport->host,
+ "%s dev rst not pending sc 0x%p\n", __func__,
+ sc);
+ spin_unlock_irqrestore(io_lock, flags);
+ continue;
+ }
+ old_ioreq_state = CMD_STATE(sc);
+ /*
+ * Any pending IO issued prior to reset is expected to be
+ * in abts pending state, if not we need to set
+ * FNIC_IOREQ_ABTS_PENDING to indicate the IO is abort pending.
+ * When IO is completed, the IO will be handed over and
+ * handled in this function.
+ */
+ CMD_STATE(sc) = FNIC_IOREQ_ABTS_PENDING;
+
+ if (io_req->abts_done)
+ shost_printk(KERN_ERR, fnic->lport->host,
+ "%s: io_req->abts_done is set state is %s\n",
+ __func__, fnic_ioreq_state_to_str(CMD_STATE(sc)));
+
+ BUG_ON(io_req->abts_done);
+
+ abt_tag = tag;
+ if (CMD_FLAGS(sc) & FNIC_DEVICE_RESET) {
+ abt_tag |= FNIC_TAG_DEV_RST;
+ FNIC_SCSI_DBG(KERN_INFO, fnic->lport->host,
+ "%s: dev rst sc 0x%p\n", __func__, sc);
+ }
CMD_ABTS_STATUS(sc) = FCPIO_INVALID_CODE;
io_req->abts_done = &tm_done;
@@ -1458,17 +1854,25 @@ static int fnic_clean_pending_aborts(struct fnic *fnic,
/* Now queue the abort command to firmware */
int_to_scsilun(sc->device->lun, &fc_lun);
- if (fnic_queue_abort_io_req(fnic, tag,
+ if (fnic_queue_abort_io_req(fnic, abt_tag,
FCPIO_ITMF_ABT_TASK_TERM,
fc_lun.scsi_lun, io_req)) {
spin_lock_irqsave(io_lock, flags);
io_req = (struct fnic_io_req *)CMD_SP(sc);
if (io_req)
io_req->abts_done = NULL;
+ if (CMD_STATE(sc) == FNIC_IOREQ_ABTS_PENDING)
+ CMD_STATE(sc) = old_ioreq_state;
spin_unlock_irqrestore(io_lock, flags);
ret = 1;
goto clean_pending_aborts_end;
+ } else {
+ spin_lock_irqsave(io_lock, flags);
+ if (CMD_FLAGS(sc) & FNIC_DEVICE_RESET)
+ CMD_FLAGS(sc) |= FNIC_DEV_RST_TERM_ISSUED;
+ spin_unlock_irqrestore(io_lock, flags);
}
+ CMD_FLAGS(sc) |= FNIC_IO_INTERNAL_TERM_ISSUED;
wait_for_completion_timeout(&tm_done,
msecs_to_jiffies
@@ -1479,8 +1883,8 @@ static int fnic_clean_pending_aborts(struct fnic *fnic,
io_req = (struct fnic_io_req *)CMD_SP(sc);
if (!io_req) {
spin_unlock_irqrestore(io_lock, flags);
- ret = 1;
- goto clean_pending_aborts_end;
+ CMD_FLAGS(sc) |= FNIC_IO_ABT_TERM_REQ_NULL;
+ continue;
}
io_req->abts_done = NULL;
@@ -1488,6 +1892,7 @@ static int fnic_clean_pending_aborts(struct fnic *fnic,
/* if abort is still pending with fw, fail */
if (CMD_STATE(sc) == FNIC_IOREQ_ABTS_PENDING) {
spin_unlock_irqrestore(io_lock, flags);
+ CMD_FLAGS(sc) |= FNIC_IO_ABT_TERM_DONE;
ret = 1;
goto clean_pending_aborts_end;
}
@@ -1498,10 +1903,75 @@ static int fnic_clean_pending_aborts(struct fnic *fnic,
mempool_free(io_req, fnic->io_req_pool);
}
+ schedule_timeout(msecs_to_jiffies(2 * fnic->config.ed_tov));
+
+ /* walk again to check, if IOs are still pending in fw */
+ if (fnic_is_abts_pending(fnic, lr_sc))
+ ret = FAILED;
+
clean_pending_aborts_end:
return ret;
}
+/**
+ * fnic_scsi_host_start_tag
+ * Allocates tagid from host's tag list
+ **/
+static inline int
+fnic_scsi_host_start_tag(struct fnic *fnic, struct scsi_cmnd *sc)
+{
+ struct blk_queue_tag *bqt = fnic->lport->host->bqt;
+ int tag, ret = SCSI_NO_TAG;
+
+ BUG_ON(!bqt);
+ if (!bqt) {
+ pr_err("Tags are not supported\n");
+ goto end;
+ }
+
+ do {
+ tag = find_next_zero_bit(bqt->tag_map, bqt->max_depth, 1);
+ if (tag >= bqt->max_depth) {
+ pr_err("Tag allocation failure\n");
+ goto end;
+ }
+ } while (test_and_set_bit(tag, bqt->tag_map));
+
+ bqt->tag_index[tag] = sc->request;
+ sc->request->tag = tag;
+ sc->tag = tag;
+ if (!sc->request->special)
+ sc->request->special = sc;
+
+ ret = tag;
+
+end:
+ return ret;
+}
+
+/**
+ * fnic_scsi_host_end_tag
+ * frees tag allocated by fnic_scsi_host_start_tag.
+ **/
+static inline void
+fnic_scsi_host_end_tag(struct fnic *fnic, struct scsi_cmnd *sc)
+{
+ struct blk_queue_tag *bqt = fnic->lport->host->bqt;
+ int tag = sc->request->tag;
+
+ if (tag == SCSI_NO_TAG)
+ return;
+
+ BUG_ON(!bqt || !bqt->tag_index[tag]);
+ if (!bqt)
+ return;
+
+ bqt->tag_index[tag] = NULL;
+ clear_bit(tag, bqt->tag_map);
+
+ return;
+}
+
/*
* SCSI Eh thread issues a Lun Reset when one or more commands on a LUN
* fail to get aborted. It calls driver's eh_device_reset with a SCSI command
@@ -1511,13 +1981,17 @@ int fnic_device_reset(struct scsi_cmnd *sc)
{
struct fc_lport *lp;
struct fnic *fnic;
- struct fnic_io_req *io_req;
+ struct fnic_io_req *io_req = NULL;
struct fc_rport *rport;
int status;
int ret = FAILED;
spinlock_t *io_lock;
unsigned long flags;
+ unsigned long start_time = 0;
+ struct scsi_lun fc_lun;
+ int tag = 0;
DECLARE_COMPLETION_ONSTACK(tm_done);
+ int tag_gen_flag = 0; /*to track tags allocated by fnic driver*/
/* Wait for rport to unblock */
fc_block_scsi_eh(sc);
@@ -1529,8 +2003,8 @@ int fnic_device_reset(struct scsi_cmnd *sc)
rport = starget_to_rport(scsi_target(sc->device));
FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
- "Device reset called FCID 0x%x, LUN 0x%x\n",
- rport->port_id, sc->device->lun);
+ "Device reset called FCID 0x%x, LUN 0x%x sc 0x%p\n",
+ rport->port_id, sc->device->lun, sc);
if (lp->state != LPORT_ST_READY || !(lp->link_up))
goto fnic_device_reset_end;
@@ -1539,6 +2013,16 @@ int fnic_device_reset(struct scsi_cmnd *sc)
if (fc_remote_port_chkready(rport))
goto fnic_device_reset_end;
+ CMD_FLAGS(sc) = FNIC_DEVICE_RESET;
+ /* Allocate tag if not present */
+
+ tag = sc->request->tag;
+ if (unlikely(tag < 0)) {
+ tag = fnic_scsi_host_start_tag(fnic, sc);
+ if (unlikely(tag == SCSI_NO_TAG))
+ goto fnic_device_reset_end;
+ tag_gen_flag = 1;
+ }
io_lock = fnic_io_lock_hash(fnic, sc);
spin_lock_irqsave(io_lock, flags);
io_req = (struct fnic_io_req *)CMD_SP(sc);
@@ -1562,8 +2046,7 @@ int fnic_device_reset(struct scsi_cmnd *sc)
CMD_LR_STATUS(sc) = FCPIO_INVALID_CODE;
spin_unlock_irqrestore(io_lock, flags);
- FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host, "TAG %d\n",
- sc->request->tag);
+ FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host, "TAG %x\n", tag);
/*
* issue the device reset, if enqueue failed, clean up the ioreq
@@ -1576,6 +2059,9 @@ int fnic_device_reset(struct scsi_cmnd *sc)
io_req->dr_done = NULL;
goto fnic_device_reset_clean;
}
+ spin_lock_irqsave(io_lock, flags);
+ CMD_FLAGS(sc) |= FNIC_DEV_RST_ISSUED;
+ spin_unlock_irqrestore(io_lock, flags);
/*
* Wait on the local completion for LUN reset. The io_req may be
@@ -1588,12 +2074,13 @@ int fnic_device_reset(struct scsi_cmnd *sc)
io_req = (struct fnic_io_req *)CMD_SP(sc);
if (!io_req) {
spin_unlock_irqrestore(io_lock, flags);
+ FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
+ "io_req is null tag 0x%x sc 0x%p\n", tag, sc);
goto fnic_device_reset_end;
}
io_req->dr_done = NULL;
status = CMD_LR_STATUS(sc);
- spin_unlock_irqrestore(io_lock, flags);
/*
* If lun reset not completed, bail out with failed. io_req
@@ -1602,7 +2089,53 @@ int fnic_device_reset(struct scsi_cmnd *sc)
if (status == FCPIO_INVALID_CODE) {
FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
"Device reset timed out\n");
- goto fnic_device_reset_end;
+ CMD_FLAGS(sc) |= FNIC_DEV_RST_TIMED_OUT;
+ spin_unlock_irqrestore(io_lock, flags);
+ int_to_scsilun(sc->device->lun, &fc_lun);
+ /*
+ * Issue abort and terminate on the device reset request.
+ * If q'ing of the abort fails, retry issue it after a delay.
+ */
+ while (1) {
+ spin_lock_irqsave(io_lock, flags);
+ if (CMD_FLAGS(sc) & FNIC_DEV_RST_TERM_ISSUED) {
+ spin_unlock_irqrestore(io_lock, flags);
+ break;
+ }
+ spin_unlock_irqrestore(io_lock, flags);
+ if (fnic_queue_abort_io_req(fnic,
+ tag | FNIC_TAG_DEV_RST,
+ FCPIO_ITMF_ABT_TASK_TERM,
+ fc_lun.scsi_lun, io_req)) {
+ wait_for_completion_timeout(&tm_done,
+ msecs_to_jiffies(FNIC_ABT_TERM_DELAY_TIMEOUT));
+ } else {
+ spin_lock_irqsave(io_lock, flags);
+ CMD_FLAGS(sc) |= FNIC_DEV_RST_TERM_ISSUED;
+ CMD_STATE(sc) = FNIC_IOREQ_ABTS_PENDING;
+ io_req->abts_done = &tm_done;
+ spin_unlock_irqrestore(io_lock, flags);
+ FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
+ "Abort and terminate issued on Device reset "
+ "tag 0x%x sc 0x%p\n", tag, sc);
+ break;
+ }
+ }
+ while (1) {
+ spin_lock_irqsave(io_lock, flags);
+ if (!(CMD_FLAGS(sc) & FNIC_DEV_RST_DONE)) {
+ spin_unlock_irqrestore(io_lock, flags);
+ wait_for_completion_timeout(&tm_done,
+ msecs_to_jiffies(FNIC_LUN_RESET_TIMEOUT));
+ break;
+ } else {
+ io_req = (struct fnic_io_req *)CMD_SP(sc);
+ io_req->abts_done = NULL;
+ goto fnic_device_reset_clean;
+ }
+ }
+ } else {
+ spin_unlock_irqrestore(io_lock, flags);
}
/* Completed, but not successful, clean up the io_req, return fail */
@@ -1645,11 +2178,24 @@ fnic_device_reset_clean:
spin_unlock_irqrestore(io_lock, flags);
if (io_req) {
+ start_time = io_req->start_time;
fnic_release_ioreq_buf(fnic, io_req, sc);
mempool_free(io_req, fnic->io_req_pool);
}
fnic_device_reset_end:
+ FNIC_TRACE(fnic_device_reset, sc->device->host->host_no,
+ sc->request->tag, sc,
+ jiffies_to_msecs(jiffies - start_time),
+ 0, ((u64)sc->cmnd[0] << 32 |
+ (u64)sc->cmnd[2] << 24 | (u64)sc->cmnd[3] << 16 |
+ (u64)sc->cmnd[4] << 8 | sc->cmnd[5]),
+ (((u64)CMD_FLAGS(sc) << 32) | CMD_STATE(sc)));
+
+ /* free tag if it is allocated */
+ if (unlikely(tag_gen_flag))
+ fnic_scsi_host_end_tag(fnic, sc);
+
FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
"Returning from device reset %s\n",
(ret == SUCCESS) ?
@@ -1735,7 +2281,15 @@ void fnic_scsi_abort_io(struct fc_lport *lp)
DECLARE_COMPLETION_ONSTACK(remove_wait);
/* Issue firmware reset for fnic, wait for reset to complete */
+retry_fw_reset:
spin_lock_irqsave(&fnic->fnic_lock, flags);
+ if (unlikely(fnic->state == FNIC_IN_FC_TRANS_ETH_MODE)) {
+ /* fw reset is in progress, poll for its completion */
+ spin_unlock_irqrestore(&fnic->fnic_lock, flags);
+ schedule_timeout(msecs_to_jiffies(100));
+ goto retry_fw_reset;
+ }
+
fnic->remove_wait = &remove_wait;
old_state = fnic->state;
fnic->state = FNIC_IN_FC_TRANS_ETH_MODE;
@@ -1776,7 +2330,14 @@ void fnic_scsi_cleanup(struct fc_lport *lp)
struct fnic *fnic = lport_priv(lp);
/* issue fw reset */
+retry_fw_reset:
spin_lock_irqsave(&fnic->fnic_lock, flags);
+ if (unlikely(fnic->state == FNIC_IN_FC_TRANS_ETH_MODE)) {
+ /* fw reset is in progress, poll for its completion */
+ spin_unlock_irqrestore(&fnic->fnic_lock, flags);
+ schedule_timeout(msecs_to_jiffies(100));
+ goto retry_fw_reset;
+ }
old_state = fnic->state;
fnic->state = FNIC_IN_FC_TRANS_ETH_MODE;
fnic_update_mac_locked(fnic, fnic->ctlr.ctl_src_addr);
@@ -1822,3 +2383,61 @@ call_fc_exch_mgr_reset:
fc_exch_mgr_reset(lp, sid, did);
}
+
+/*
+ * fnic_is_abts_pending() is a helper function that
+ * walks through tag map to check if there is any IOs pending,if there is one,
+ * then it returns 1 (true), otherwise 0 (false)
+ * if @lr_sc is non NULL, then it checks IOs specific to particular LUN,
+ * otherwise, it checks for all IOs.
+ */
+int fnic_is_abts_pending(struct fnic *fnic, struct scsi_cmnd *lr_sc)
+{
+ int tag;
+ struct fnic_io_req *io_req;
+ spinlock_t *io_lock;
+ unsigned long flags;
+ int ret = 0;
+ struct scsi_cmnd *sc;
+ struct scsi_device *lun_dev = NULL;
+
+ if (lr_sc)
+ lun_dev = lr_sc->device;
+
+ /* walk again to check, if IOs are still pending in fw */
+ for (tag = 0; tag < FNIC_MAX_IO_REQ; tag++) {
+ sc = scsi_host_find_tag(fnic->lport->host, tag);
+ /*
+ * ignore this lun reset cmd or cmds that do not belong to
+ * this lun
+ */
+ if (!sc || (lr_sc && (sc->device != lun_dev || sc == lr_sc)))
+ continue;
+
+ io_lock = fnic_io_lock_hash(fnic, sc);
+ spin_lock_irqsave(io_lock, flags);
+
+ io_req = (struct fnic_io_req *)CMD_SP(sc);
+
+ if (!io_req || sc->device != lun_dev) {
+ spin_unlock_irqrestore(io_lock, flags);
+ continue;
+ }
+
+ /*
+ * Found IO that is still pending with firmware and
+ * belongs to the LUN that we are resetting
+ */
+ FNIC_SCSI_DBG(KERN_INFO, fnic->lport->host,
+ "Found IO in %s on lun\n",
+ fnic_ioreq_state_to_str(CMD_STATE(sc)));
+
+ if (CMD_STATE(sc) == FNIC_IOREQ_ABTS_PENDING) {
+ spin_unlock_irqrestore(io_lock, flags);
+ ret = 1;
+ continue;
+ }
+ }
+
+ return ret;
+}
diff --git a/drivers/scsi/fnic/fnic_trace.c b/drivers/scsi/fnic/fnic_trace.c
new file mode 100644
index 000000000000..23a60e3d8527
--- /dev/null
+++ b/drivers/scsi/fnic/fnic_trace.c
@@ -0,0 +1,273 @@
+/*
+ * Copyright 2012 Cisco Systems, Inc. All rights reserved.
+ *
+ * This program is free software; you may redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 of the License.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#include <linux/module.h>
+#include <linux/mempool.h>
+#include <linux/errno.h>
+#include <linux/spinlock.h>
+#include <linux/kallsyms.h>
+#include "fnic_io.h"
+#include "fnic.h"
+
+unsigned int trace_max_pages;
+static int fnic_max_trace_entries;
+
+static unsigned long fnic_trace_buf_p;
+static DEFINE_SPINLOCK(fnic_trace_lock);
+
+static fnic_trace_dbg_t fnic_trace_entries;
+int fnic_tracing_enabled = 1;
+
+/*
+ * fnic_trace_get_buf - Give buffer pointer to user to fill up trace information
+ *
+ * Description:
+ * This routine gets next available trace buffer entry location @wr_idx
+ * from allocated trace buffer pages and give that memory location
+ * to user to store the trace information.
+ *
+ * Return Value:
+ * This routine returns pointer to next available trace entry
+ * @fnic_buf_head for user to fill trace information.
+ */
+fnic_trace_data_t *fnic_trace_get_buf(void)
+{
+ unsigned long fnic_buf_head;
+ unsigned long flags;
+
+ spin_lock_irqsave(&fnic_trace_lock, flags);
+
+ /*
+ * Get next available memory location for writing trace information
+ * at @wr_idx and increment @wr_idx
+ */
+ fnic_buf_head =
+ fnic_trace_entries.page_offset[fnic_trace_entries.wr_idx];
+ fnic_trace_entries.wr_idx++;
+
+ /*
+ * Verify if trace buffer is full then change wd_idx to
+ * start from zero
+ */
+ if (fnic_trace_entries.wr_idx >= fnic_max_trace_entries)
+ fnic_trace_entries.wr_idx = 0;
+
+ /*
+ * Verify if write index @wr_idx and read index @rd_idx are same then
+ * increment @rd_idx to move to next entry in trace buffer
+ */
+ if (fnic_trace_entries.wr_idx == fnic_trace_entries.rd_idx) {
+ fnic_trace_entries.rd_idx++;
+ if (fnic_trace_entries.rd_idx >= fnic_max_trace_entries)
+ fnic_trace_entries.rd_idx = 0;
+ }
+ spin_unlock_irqrestore(&fnic_trace_lock, flags);
+ return (fnic_trace_data_t *)fnic_buf_head;
+}
+
+/*
+ * fnic_get_trace_data - Copy trace buffer to a memory file
+ * @fnic_dbgfs_t: pointer to debugfs trace buffer
+ *
+ * Description:
+ * This routine gathers the fnic trace debugfs data from the fnic_trace_data_t
+ * buffer and dumps it to fnic_dbgfs_t. It will start at the rd_idx entry in
+ * the log and process the log until the end of the buffer. Then it will gather
+ * from the beginning of the log and process until the current entry @wr_idx.
+ *
+ * Return Value:
+ * This routine returns the amount of bytes that were dumped into fnic_dbgfs_t
+ */
+int fnic_get_trace_data(fnic_dbgfs_t *fnic_dbgfs_prt)
+{
+ int rd_idx;
+ int wr_idx;
+ int len = 0;
+ unsigned long flags;
+ char str[KSYM_SYMBOL_LEN];
+ struct timespec val;
+ fnic_trace_data_t *tbp;
+
+ spin_lock_irqsave(&fnic_trace_lock, flags);
+ rd_idx = fnic_trace_entries.rd_idx;
+ wr_idx = fnic_trace_entries.wr_idx;
+ if (wr_idx < rd_idx) {
+ while (1) {
+ /* Start from read index @rd_idx */
+ tbp = (fnic_trace_data_t *)
+ fnic_trace_entries.page_offset[rd_idx];
+ if (!tbp) {
+ spin_unlock_irqrestore(&fnic_trace_lock, flags);
+ return 0;
+ }
+ /* Convert function pointer to function name */
+ if (sizeof(unsigned long) < 8) {
+ sprint_symbol(str, tbp->fnaddr.low);
+ jiffies_to_timespec(tbp->timestamp.low, &val);
+ } else {
+ sprint_symbol(str, tbp->fnaddr.val);
+ jiffies_to_timespec(tbp->timestamp.val, &val);
+ }
+ /*
+ * Dump trace buffer entry to memory file
+ * and increment read index @rd_idx
+ */
+ len += snprintf(fnic_dbgfs_prt->buffer + len,
+ (trace_max_pages * PAGE_SIZE * 3) - len,
+ "%16lu.%16lu %-50s %8x %8x %16llx %16llx "
+ "%16llx %16llx %16llx\n", val.tv_sec,
+ val.tv_nsec, str, tbp->host_no, tbp->tag,
+ tbp->data[0], tbp->data[1], tbp->data[2],
+ tbp->data[3], tbp->data[4]);
+ rd_idx++;
+ /*
+ * If rd_idx is reached to maximum trace entries
+ * then move rd_idx to zero
+ */
+ if (rd_idx > (fnic_max_trace_entries-1))
+ rd_idx = 0;
+ /*
+ * Continure dumpping trace buffer entries into
+ * memory file till rd_idx reaches write index
+ */
+ if (rd_idx == wr_idx)
+ break;
+ }
+ } else if (wr_idx > rd_idx) {
+ while (1) {
+ /* Start from read index @rd_idx */
+ tbp = (fnic_trace_data_t *)
+ fnic_trace_entries.page_offset[rd_idx];
+ if (!tbp) {
+ spin_unlock_irqrestore(&fnic_trace_lock, flags);
+ return 0;
+ }
+ /* Convert function pointer to function name */
+ if (sizeof(unsigned long) < 8) {
+ sprint_symbol(str, tbp->fnaddr.low);
+ jiffies_to_timespec(tbp->timestamp.low, &val);
+ } else {
+ sprint_symbol(str, tbp->fnaddr.val);
+ jiffies_to_timespec(tbp->timestamp.val, &val);
+ }
+ /*
+ * Dump trace buffer entry to memory file
+ * and increment read index @rd_idx
+ */
+ len += snprintf(fnic_dbgfs_prt->buffer + len,
+ (trace_max_pages * PAGE_SIZE * 3) - len,
+ "%16lu.%16lu %-50s %8x %8x %16llx %16llx "
+ "%16llx %16llx %16llx\n", val.tv_sec,
+ val.tv_nsec, str, tbp->host_no, tbp->tag,
+ tbp->data[0], tbp->data[1], tbp->data[2],
+ tbp->data[3], tbp->data[4]);
+ rd_idx++;
+ /*
+ * Continue dumpping trace buffer entries into
+ * memory file till rd_idx reaches write index
+ */
+ if (rd_idx == wr_idx)
+ break;
+ }
+ }
+ spin_unlock_irqrestore(&fnic_trace_lock, flags);
+ return len;
+}
+
+/*
+ * fnic_trace_buf_init - Initialize fnic trace buffer logging facility
+ *
+ * Description:
+ * Initialize trace buffer data structure by allocating required memory and
+ * setting page_offset information for every trace entry by adding trace entry
+ * length to previous page_offset value.
+ */
+int fnic_trace_buf_init(void)
+{
+ unsigned long fnic_buf_head;
+ int i;
+ int err = 0;
+
+ trace_max_pages = fnic_trace_max_pages;
+ fnic_max_trace_entries = (trace_max_pages * PAGE_SIZE)/
+ FNIC_ENTRY_SIZE_BYTES;
+
+ fnic_trace_buf_p = (unsigned long)vmalloc((trace_max_pages * PAGE_SIZE));
+ if (!fnic_trace_buf_p) {
+ printk(KERN_ERR PFX "Failed to allocate memory "
+ "for fnic_trace_buf_p\n");
+ err = -ENOMEM;
+ goto err_fnic_trace_buf_init;
+ }
+ memset((void *)fnic_trace_buf_p, 0, (trace_max_pages * PAGE_SIZE));
+
+ fnic_trace_entries.page_offset = vmalloc(fnic_max_trace_entries *
+ sizeof(unsigned long));
+ if (!fnic_trace_entries.page_offset) {
+ printk(KERN_ERR PFX "Failed to allocate memory for"
+ " page_offset\n");
+ if (fnic_trace_buf_p) {
+ vfree((void *)fnic_trace_buf_p);
+ fnic_trace_buf_p = 0;
+ }
+ err = -ENOMEM;
+ goto err_fnic_trace_buf_init;
+ }
+ memset((void *)fnic_trace_entries.page_offset, 0,
+ (fnic_max_trace_entries * sizeof(unsigned long)));
+ fnic_trace_entries.wr_idx = fnic_trace_entries.rd_idx = 0;
+ fnic_buf_head = fnic_trace_buf_p;
+
+ /*
+ * Set page_offset field of fnic_trace_entries struct by
+ * calculating memory location for every trace entry using
+ * length of each trace entry
+ */
+ for (i = 0; i < fnic_max_trace_entries; i++) {
+ fnic_trace_entries.page_offset[i] = fnic_buf_head;
+ fnic_buf_head += FNIC_ENTRY_SIZE_BYTES;
+ }
+ err = fnic_trace_debugfs_init();
+ if (err < 0) {
+ printk(KERN_ERR PFX "Failed to initialize debugfs for tracing\n");
+ goto err_fnic_trace_debugfs_init;
+ }
+ printk(KERN_INFO PFX "Successfully Initialized Trace Buffer\n");
+ return err;
+err_fnic_trace_debugfs_init:
+ fnic_trace_free();
+err_fnic_trace_buf_init:
+ return err;
+}
+
+/*
+ * fnic_trace_free - Free memory of fnic trace data structures.
+ */
+void fnic_trace_free(void)
+{
+ fnic_tracing_enabled = 0;
+ fnic_trace_debugfs_terminate();
+ if (fnic_trace_entries.page_offset) {
+ vfree((void *)fnic_trace_entries.page_offset);
+ fnic_trace_entries.page_offset = NULL;
+ }
+ if (fnic_trace_buf_p) {
+ vfree((void *)fnic_trace_buf_p);
+ fnic_trace_buf_p = 0;
+ }
+ printk(KERN_INFO PFX "Successfully Freed Trace Buffer\n");
+}
diff --git a/drivers/scsi/fnic/fnic_trace.h b/drivers/scsi/fnic/fnic_trace.h
new file mode 100644
index 000000000000..cef42b4c4d6c
--- /dev/null
+++ b/drivers/scsi/fnic/fnic_trace.h
@@ -0,0 +1,90 @@
+/*
+ * Copyright 2012 Cisco Systems, Inc. All rights reserved.
+ *
+ * This program is free software; you may redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 of the License.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#ifndef __FNIC_TRACE_H__
+#define __FNIC_TRACE_H__
+
+#define FNIC_ENTRY_SIZE_BYTES 64
+
+extern ssize_t simple_read_from_buffer(void __user *to,
+ size_t count,
+ loff_t *ppos,
+ const void *from,
+ size_t available);
+
+extern unsigned int fnic_trace_max_pages;
+extern int fnic_tracing_enabled;
+extern unsigned int trace_max_pages;
+
+typedef struct fnic_trace_dbg {
+ int wr_idx;
+ int rd_idx;
+ unsigned long *page_offset;
+} fnic_trace_dbg_t;
+
+typedef struct fnic_dbgfs {
+ int buffer_len;
+ char *buffer;
+} fnic_dbgfs_t;
+
+struct fnic_trace_data {
+ union {
+ struct {
+ u32 low;
+ u32 high;
+ };
+ u64 val;
+ } timestamp, fnaddr;
+ u32 host_no;
+ u32 tag;
+ u64 data[5];
+} __attribute__((__packed__));
+
+typedef struct fnic_trace_data fnic_trace_data_t;
+
+#define FNIC_TRACE_ENTRY_SIZE \
+ (FNIC_ENTRY_SIZE_BYTES - sizeof(fnic_trace_data_t))
+
+#define FNIC_TRACE(_fn, _hn, _t, _a, _b, _c, _d, _e) \
+ if (unlikely(fnic_tracing_enabled)) { \
+ fnic_trace_data_t *trace_buf = fnic_trace_get_buf(); \
+ if (trace_buf) { \
+ if (sizeof(unsigned long) < 8) { \
+ trace_buf->timestamp.low = jiffies; \
+ trace_buf->fnaddr.low = (u32)(unsigned long)_fn; \
+ } else { \
+ trace_buf->timestamp.val = jiffies; \
+ trace_buf->fnaddr.val = (u64)(unsigned long)_fn; \
+ } \
+ trace_buf->host_no = _hn; \
+ trace_buf->tag = _t; \
+ trace_buf->data[0] = (u64)(unsigned long)_a; \
+ trace_buf->data[1] = (u64)(unsigned long)_b; \
+ trace_buf->data[2] = (u64)(unsigned long)_c; \
+ trace_buf->data[3] = (u64)(unsigned long)_d; \
+ trace_buf->data[4] = (u64)(unsigned long)_e; \
+ } \
+ }
+
+fnic_trace_data_t *fnic_trace_get_buf(void);
+int fnic_get_trace_data(fnic_dbgfs_t *);
+int fnic_trace_buf_init(void);
+void fnic_trace_free(void);
+int fnic_trace_debugfs_init(void);
+void fnic_trace_debugfs_terminate(void);
+
+#endif
diff --git a/drivers/scsi/gdth.c b/drivers/scsi/gdth.c
index 599790e41a98..59bceac51a4c 100644
--- a/drivers/scsi/gdth.c
+++ b/drivers/scsi/gdth.c
@@ -1107,14 +1107,8 @@ static int gdth_init_pci(struct pci_dev *pdev, gdth_pci_str *pcistr,
pci_read_config_word(pdev, PCI_COMMAND, &command);
command |= 6;
pci_write_config_word(pdev, PCI_COMMAND, command);
- if (pci_resource_start(pdev, 8) == 1UL)
- pci_resource_start(pdev, 8) = 0UL;
- i = 0xFEFF0001UL;
- pci_write_config_dword(pdev, PCI_ROM_ADDRESS, i);
- gdth_delay(1);
- pci_write_config_dword(pdev, PCI_ROM_ADDRESS,
- pci_resource_start(pdev, 8));
-
+ gdth_delay(1);
+
dp6m_ptr = ha->brd;
/* Ensure that it is safe to access the non HW portions of DPMEM.
diff --git a/drivers/scsi/hpsa.c b/drivers/scsi/hpsa.c
index 4f338061b5c3..7f4f790a3d71 100644
--- a/drivers/scsi/hpsa.c
+++ b/drivers/scsi/hpsa.c
@@ -165,7 +165,7 @@ static void cmd_free(struct ctlr_info *h, struct CommandList *c);
static void cmd_special_free(struct ctlr_info *h, struct CommandList *c);
static struct CommandList *cmd_alloc(struct ctlr_info *h);
static struct CommandList *cmd_special_alloc(struct ctlr_info *h);
-static void fill_cmd(struct CommandList *c, u8 cmd, struct ctlr_info *h,
+static int fill_cmd(struct CommandList *c, u8 cmd, struct ctlr_info *h,
void *buff, size_t size, u8 page_code, unsigned char *scsi3addr,
int cmd_type);
@@ -1131,7 +1131,7 @@ clean:
return -ENOMEM;
}
-static void hpsa_map_sg_chain_block(struct ctlr_info *h,
+static int hpsa_map_sg_chain_block(struct ctlr_info *h,
struct CommandList *c)
{
struct SGDescriptor *chain_sg, *chain_block;
@@ -1144,8 +1144,15 @@ static void hpsa_map_sg_chain_block(struct ctlr_info *h,
(c->Header.SGTotal - h->max_cmd_sg_entries);
temp64 = pci_map_single(h->pdev, chain_block, chain_sg->Len,
PCI_DMA_TODEVICE);
+ if (dma_mapping_error(&h->pdev->dev, temp64)) {
+ /* prevent subsequent unmapping */
+ chain_sg->Addr.lower = 0;
+ chain_sg->Addr.upper = 0;
+ return -1;
+ }
chain_sg->Addr.lower = (u32) (temp64 & 0x0FFFFFFFFULL);
chain_sg->Addr.upper = (u32) ((temp64 >> 32) & 0x0FFFFFFFFULL);
+ return 0;
}
static void hpsa_unmap_sg_chain_block(struct ctlr_info *h,
@@ -1390,7 +1397,7 @@ static void hpsa_pci_unmap(struct pci_dev *pdev,
}
}
-static void hpsa_map_one(struct pci_dev *pdev,
+static int hpsa_map_one(struct pci_dev *pdev,
struct CommandList *cp,
unsigned char *buf,
size_t buflen,
@@ -1401,10 +1408,16 @@ static void hpsa_map_one(struct pci_dev *pdev,
if (buflen == 0 || data_direction == PCI_DMA_NONE) {
cp->Header.SGList = 0;
cp->Header.SGTotal = 0;
- return;
+ return 0;
}
addr64 = (u64) pci_map_single(pdev, buf, buflen, data_direction);
+ if (dma_mapping_error(&pdev->dev, addr64)) {
+ /* Prevent subsequent unmap of something never mapped */
+ cp->Header.SGList = 0;
+ cp->Header.SGTotal = 0;
+ return -1;
+ }
cp->SG[0].Addr.lower =
(u32) (addr64 & (u64) 0x00000000FFFFFFFF);
cp->SG[0].Addr.upper =
@@ -1412,6 +1425,7 @@ static void hpsa_map_one(struct pci_dev *pdev,
cp->SG[0].Len = buflen;
cp->Header.SGList = (u8) 1; /* no. SGs contig in this cmd */
cp->Header.SGTotal = (u16) 1; /* total sgs in this cmd list */
+ return 0;
}
static inline void hpsa_scsi_do_simple_cmd_core(struct ctlr_info *h,
@@ -1540,13 +1554,18 @@ static int hpsa_scsi_do_inquiry(struct ctlr_info *h, unsigned char *scsi3addr,
return -ENOMEM;
}
- fill_cmd(c, HPSA_INQUIRY, h, buf, bufsize, page, scsi3addr, TYPE_CMD);
+ if (fill_cmd(c, HPSA_INQUIRY, h, buf, bufsize,
+ page, scsi3addr, TYPE_CMD)) {
+ rc = -1;
+ goto out;
+ }
hpsa_scsi_do_simple_cmd_with_retry(h, c, PCI_DMA_FROMDEVICE);
ei = c->err_info;
if (ei->CommandStatus != 0 && ei->CommandStatus != CMD_DATA_UNDERRUN) {
hpsa_scsi_interpret_error(c);
rc = -1;
}
+out:
cmd_special_free(h, c);
return rc;
}
@@ -1564,7 +1583,9 @@ static int hpsa_send_reset(struct ctlr_info *h, unsigned char *scsi3addr)
return -ENOMEM;
}
- fill_cmd(c, HPSA_DEVICE_RESET_MSG, h, NULL, 0, 0, scsi3addr, TYPE_MSG);
+ /* fill_cmd can't fail here, no data buffer to map. */
+ (void) fill_cmd(c, HPSA_DEVICE_RESET_MSG, h,
+ NULL, 0, 0, scsi3addr, TYPE_MSG);
hpsa_scsi_do_simple_cmd_core(h, c);
/* no unmap needed here because no data xfer. */
@@ -1631,8 +1652,11 @@ static int hpsa_scsi_do_report_luns(struct ctlr_info *h, int logical,
}
/* address the controller */
memset(scsi3addr, 0, sizeof(scsi3addr));
- fill_cmd(c, logical ? HPSA_REPORT_LOG : HPSA_REPORT_PHYS, h,
- buf, bufsize, 0, scsi3addr, TYPE_CMD);
+ if (fill_cmd(c, logical ? HPSA_REPORT_LOG : HPSA_REPORT_PHYS, h,
+ buf, bufsize, 0, scsi3addr, TYPE_CMD)) {
+ rc = -1;
+ goto out;
+ }
if (extended_response)
c->Request.CDB[1] = extended_response;
hpsa_scsi_do_simple_cmd_with_retry(h, c, PCI_DMA_FROMDEVICE);
@@ -1642,6 +1666,7 @@ static int hpsa_scsi_do_report_luns(struct ctlr_info *h, int logical,
hpsa_scsi_interpret_error(c);
rc = -1;
}
+out:
cmd_special_free(h, c);
return rc;
}
@@ -2105,7 +2130,10 @@ static int hpsa_scatter_gather(struct ctlr_info *h,
if (chained) {
cp->Header.SGList = h->max_cmd_sg_entries;
cp->Header.SGTotal = (u16) (use_sg + 1);
- hpsa_map_sg_chain_block(h, cp);
+ if (hpsa_map_sg_chain_block(h, cp)) {
+ scsi_dma_unmap(cmd);
+ return -1;
+ }
return 0;
}
@@ -2353,8 +2381,9 @@ static int wait_for_device_to_become_ready(struct ctlr_info *h,
if (waittime < HPSA_MAX_WAIT_INTERVAL_SECS)
waittime = waittime * 2;
- /* Send the Test Unit Ready */
- fill_cmd(c, TEST_UNIT_READY, h, NULL, 0, 0, lunaddr, TYPE_CMD);
+ /* Send the Test Unit Ready, fill_cmd can't fail, no mapping */
+ (void) fill_cmd(c, TEST_UNIT_READY, h,
+ NULL, 0, 0, lunaddr, TYPE_CMD);
hpsa_scsi_do_simple_cmd_core(h, c);
/* no unmap needed here because no data xfer. */
@@ -2439,7 +2468,9 @@ static int hpsa_send_abort(struct ctlr_info *h, unsigned char *scsi3addr,
return -ENOMEM;
}
- fill_cmd(c, HPSA_ABORT_MSG, h, abort, 0, 0, scsi3addr, TYPE_MSG);
+ /* fill_cmd can't fail here, no buffer to map */
+ (void) fill_cmd(c, HPSA_ABORT_MSG, h, abort,
+ 0, 0, scsi3addr, TYPE_MSG);
if (swizzle)
swizzle_abort_tag(&c->Request.CDB[4]);
hpsa_scsi_do_simple_cmd_core(h, c);
@@ -2928,6 +2959,7 @@ static int hpsa_passthru_ioctl(struct ctlr_info *h, void __user *argp)
struct CommandList *c;
char *buff = NULL;
union u64bit temp64;
+ int rc = 0;
if (!argp)
return -EINVAL;
@@ -2947,8 +2979,8 @@ static int hpsa_passthru_ioctl(struct ctlr_info *h, void __user *argp)
/* Copy the data into the buffer we created */
if (copy_from_user(buff, iocommand.buf,
iocommand.buf_size)) {
- kfree(buff);
- return -EFAULT;
+ rc = -EFAULT;
+ goto out_kfree;
}
} else {
memset(buff, 0, iocommand.buf_size);
@@ -2956,8 +2988,8 @@ static int hpsa_passthru_ioctl(struct ctlr_info *h, void __user *argp)
}
c = cmd_special_alloc(h);
if (c == NULL) {
- kfree(buff);
- return -ENOMEM;
+ rc = -ENOMEM;
+ goto out_kfree;
}
/* Fill in the command type */
c->cmd_type = CMD_IOCTL_PEND;
@@ -2982,6 +3014,13 @@ static int hpsa_passthru_ioctl(struct ctlr_info *h, void __user *argp)
if (iocommand.buf_size > 0) {
temp64.val = pci_map_single(h->pdev, buff,
iocommand.buf_size, PCI_DMA_BIDIRECTIONAL);
+ if (dma_mapping_error(&h->pdev->dev, temp64.val)) {
+ c->SG[0].Addr.lower = 0;
+ c->SG[0].Addr.upper = 0;
+ c->SG[0].Len = 0;
+ rc = -ENOMEM;
+ goto out;
+ }
c->SG[0].Addr.lower = temp64.val32.lower;
c->SG[0].Addr.upper = temp64.val32.upper;
c->SG[0].Len = iocommand.buf_size;
@@ -2996,22 +3035,22 @@ static int hpsa_passthru_ioctl(struct ctlr_info *h, void __user *argp)
memcpy(&iocommand.error_info, c->err_info,
sizeof(iocommand.error_info));
if (copy_to_user(argp, &iocommand, sizeof(iocommand))) {
- kfree(buff);
- cmd_special_free(h, c);
- return -EFAULT;
+ rc = -EFAULT;
+ goto out;
}
if (iocommand.Request.Type.Direction == XFER_READ &&
iocommand.buf_size > 0) {
/* Copy the data out of the buffer we created */
if (copy_to_user(iocommand.buf, buff, iocommand.buf_size)) {
- kfree(buff);
- cmd_special_free(h, c);
- return -EFAULT;
+ rc = -EFAULT;
+ goto out;
}
}
- kfree(buff);
+out:
cmd_special_free(h, c);
- return 0;
+out_kfree:
+ kfree(buff);
+ return rc;
}
static int hpsa_big_passthru_ioctl(struct ctlr_info *h, void __user *argp)
@@ -3103,6 +3142,15 @@ static int hpsa_big_passthru_ioctl(struct ctlr_info *h, void __user *argp)
for (i = 0; i < sg_used; i++) {
temp64.val = pci_map_single(h->pdev, buff[i],
buff_size[i], PCI_DMA_BIDIRECTIONAL);
+ if (dma_mapping_error(&h->pdev->dev, temp64.val)) {
+ c->SG[i].Addr.lower = 0;
+ c->SG[i].Addr.upper = 0;
+ c->SG[i].Len = 0;
+ hpsa_pci_unmap(h->pdev, c, i,
+ PCI_DMA_BIDIRECTIONAL);
+ status = -ENOMEM;
+ goto cleanup1;
+ }
c->SG[i].Addr.lower = temp64.val32.lower;
c->SG[i].Addr.upper = temp64.val32.upper;
c->SG[i].Len = buff_size[i];
@@ -3190,7 +3238,8 @@ static int hpsa_send_host_reset(struct ctlr_info *h, unsigned char *scsi3addr,
c = cmd_alloc(h);
if (!c)
return -ENOMEM;
- fill_cmd(c, HPSA_DEVICE_RESET_MSG, h, NULL, 0, 0,
+ /* fill_cmd can't fail here, no data buffer to map */
+ (void) fill_cmd(c, HPSA_DEVICE_RESET_MSG, h, NULL, 0, 0,
RAID_CTLR_LUNID, TYPE_MSG);
c->Request.CDB[1] = reset_type; /* fill_cmd defaults to target reset */
c->waiting = NULL;
@@ -3202,7 +3251,7 @@ static int hpsa_send_host_reset(struct ctlr_info *h, unsigned char *scsi3addr,
return 0;
}
-static void fill_cmd(struct CommandList *c, u8 cmd, struct ctlr_info *h,
+static int fill_cmd(struct CommandList *c, u8 cmd, struct ctlr_info *h,
void *buff, size_t size, u8 page_code, unsigned char *scsi3addr,
int cmd_type)
{
@@ -3271,7 +3320,7 @@ static void fill_cmd(struct CommandList *c, u8 cmd, struct ctlr_info *h,
default:
dev_warn(&h->pdev->dev, "unknown command 0x%c\n", cmd);
BUG();
- return;
+ return -1;
}
} else if (cmd_type == TYPE_MSG) {
switch (cmd) {
@@ -3343,10 +3392,9 @@ static void fill_cmd(struct CommandList *c, u8 cmd, struct ctlr_info *h,
default:
pci_dir = PCI_DMA_BIDIRECTIONAL;
}
-
- hpsa_map_one(h->pdev, c, buff, size, pci_dir);
-
- return;
+ if (hpsa_map_one(h->pdev, c, buff, size, pci_dir))
+ return -1;
+ return 0;
}
/*
@@ -4882,10 +4930,13 @@ static void hpsa_flush_cache(struct ctlr_info *h)
dev_warn(&h->pdev->dev, "cmd_special_alloc returned NULL!\n");
goto out_of_memory;
}
- fill_cmd(c, HPSA_CACHE_FLUSH, h, flush_buf, 4, 0,
- RAID_CTLR_LUNID, TYPE_CMD);
+ if (fill_cmd(c, HPSA_CACHE_FLUSH, h, flush_buf, 4, 0,
+ RAID_CTLR_LUNID, TYPE_CMD)) {
+ goto out;
+ }
hpsa_scsi_do_simple_cmd_with_retry(h, c, PCI_DMA_TODEVICE);
if (c->err_info->CommandStatus != 0)
+out:
dev_warn(&h->pdev->dev,
"error flushing cache on controller\n");
cmd_special_free(h, c);
diff --git a/drivers/scsi/ipr.c b/drivers/scsi/ipr.c
index 1d7da3f41ebb..f328089a1060 100644
--- a/drivers/scsi/ipr.c
+++ b/drivers/scsi/ipr.c
@@ -98,6 +98,7 @@ static unsigned int ipr_transop_timeout = 0;
static unsigned int ipr_debug = 0;
static unsigned int ipr_max_devs = IPR_DEFAULT_SIS64_DEVS;
static unsigned int ipr_dual_ioa_raid = 1;
+static unsigned int ipr_number_of_msix = 2;
static DEFINE_SPINLOCK(ipr_driver_lock);
/* This table describes the differences between DMA controller chips */
@@ -107,6 +108,7 @@ static const struct ipr_chip_cfg_t ipr_chip_cfg[] = {
.max_cmds = 100,
.cache_line_size = 0x20,
.clear_isr = 1,
+ .iopoll_weight = 0,
{
.set_interrupt_mask_reg = 0x0022C,
.clr_interrupt_mask_reg = 0x00230,
@@ -131,6 +133,7 @@ static const struct ipr_chip_cfg_t ipr_chip_cfg[] = {
.max_cmds = 100,
.cache_line_size = 0x20,
.clear_isr = 1,
+ .iopoll_weight = 0,
{
.set_interrupt_mask_reg = 0x00288,
.clr_interrupt_mask_reg = 0x0028C,
@@ -155,6 +158,7 @@ static const struct ipr_chip_cfg_t ipr_chip_cfg[] = {
.max_cmds = 1000,
.cache_line_size = 0x20,
.clear_isr = 0,
+ .iopoll_weight = 64,
{
.set_interrupt_mask_reg = 0x00010,
.clr_interrupt_mask_reg = 0x00018,
@@ -215,6 +219,8 @@ MODULE_PARM_DESC(dual_ioa_raid, "Enable dual adapter RAID support. Set to 1 to e
module_param_named(max_devs, ipr_max_devs, int, 0);
MODULE_PARM_DESC(max_devs, "Specify the maximum number of physical devices. "
"[Default=" __stringify(IPR_DEFAULT_SIS64_DEVS) "]");
+module_param_named(number_of_msix, ipr_number_of_msix, int, 0);
+MODULE_PARM_DESC(number_of_msix, "Specify the number of MSIX interrupts to use on capable adapters (1 - 5). (default:2)");
MODULE_LICENSE("GPL");
MODULE_VERSION(IPR_DRIVER_VERSION);
@@ -549,7 +555,8 @@ static void ipr_trc_hook(struct ipr_cmnd *ipr_cmd,
struct ipr_trace_entry *trace_entry;
struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
- trace_entry = &ioa_cfg->trace[ioa_cfg->trace_index++];
+ trace_entry = &ioa_cfg->trace[atomic_add_return
+ (1, &ioa_cfg->trace_index)%IPR_NUM_TRACE_ENTRIES];
trace_entry->time = jiffies;
trace_entry->op_code = ipr_cmd->ioarcb.cmd_pkt.cdb[0];
trace_entry->type = type;
@@ -560,6 +567,7 @@ static void ipr_trc_hook(struct ipr_cmnd *ipr_cmd,
trace_entry->cmd_index = ipr_cmd->cmd_index & 0xff;
trace_entry->res_handle = ipr_cmd->ioarcb.res_handle;
trace_entry->u.add_data = add_data;
+ wmb();
}
#else
#define ipr_trc_hook(ipr_cmd, type, add_data) do { } while (0)
@@ -595,8 +603,11 @@ static void ipr_reinit_ipr_cmnd(struct ipr_cmnd *ipr_cmd)
struct ipr_ioasa *ioasa = &ipr_cmd->s.ioasa;
struct ipr_ioasa64 *ioasa64 = &ipr_cmd->s.ioasa64;
dma_addr_t dma_addr = ipr_cmd->dma_addr;
+ int hrrq_id;
+ hrrq_id = ioarcb->cmd_pkt.hrrq_id;
memset(&ioarcb->cmd_pkt, 0, sizeof(struct ipr_cmd_pkt));
+ ioarcb->cmd_pkt.hrrq_id = hrrq_id;
ioarcb->data_transfer_length = 0;
ioarcb->read_data_transfer_length = 0;
ioarcb->ioadl_len = 0;
@@ -646,12 +657,16 @@ static void ipr_init_ipr_cmnd(struct ipr_cmnd *ipr_cmd,
* pointer to ipr command struct
**/
static
-struct ipr_cmnd *__ipr_get_free_ipr_cmnd(struct ipr_ioa_cfg *ioa_cfg)
+struct ipr_cmnd *__ipr_get_free_ipr_cmnd(struct ipr_hrr_queue *hrrq)
{
- struct ipr_cmnd *ipr_cmd;
+ struct ipr_cmnd *ipr_cmd = NULL;
+
+ if (likely(!list_empty(&hrrq->hrrq_free_q))) {
+ ipr_cmd = list_entry(hrrq->hrrq_free_q.next,
+ struct ipr_cmnd, queue);
+ list_del(&ipr_cmd->queue);
+ }
- ipr_cmd = list_entry(ioa_cfg->free_q.next, struct ipr_cmnd, queue);
- list_del(&ipr_cmd->queue);
return ipr_cmd;
}
@@ -666,7 +681,8 @@ struct ipr_cmnd *__ipr_get_free_ipr_cmnd(struct ipr_ioa_cfg *ioa_cfg)
static
struct ipr_cmnd *ipr_get_free_ipr_cmnd(struct ipr_ioa_cfg *ioa_cfg)
{
- struct ipr_cmnd *ipr_cmd = __ipr_get_free_ipr_cmnd(ioa_cfg);
+ struct ipr_cmnd *ipr_cmd =
+ __ipr_get_free_ipr_cmnd(&ioa_cfg->hrrq[IPR_INIT_HRRQ]);
ipr_init_ipr_cmnd(ipr_cmd, ipr_lock_and_done);
return ipr_cmd;
}
@@ -686,9 +702,15 @@ static void ipr_mask_and_clear_interrupts(struct ipr_ioa_cfg *ioa_cfg,
u32 clr_ints)
{
volatile u32 int_reg;
+ int i;
/* Stop new interrupts */
- ioa_cfg->allow_interrupts = 0;
+ for (i = 0; i < ioa_cfg->hrrq_num; i++) {
+ spin_lock(&ioa_cfg->hrrq[i]._lock);
+ ioa_cfg->hrrq[i].allow_interrupts = 0;
+ spin_unlock(&ioa_cfg->hrrq[i]._lock);
+ }
+ wmb();
/* Set interrupt mask to stop all new interrupts */
if (ioa_cfg->sis64)
@@ -761,13 +783,12 @@ static int ipr_set_pcix_cmd_reg(struct ipr_ioa_cfg *ioa_cfg)
**/
static void ipr_sata_eh_done(struct ipr_cmnd *ipr_cmd)
{
- struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
struct ata_queued_cmd *qc = ipr_cmd->qc;
struct ipr_sata_port *sata_port = qc->ap->private_data;
qc->err_mask |= AC_ERR_OTHER;
sata_port->ioasa.status |= ATA_BUSY;
- list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
+ list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
ata_qc_complete(qc);
}
@@ -783,14 +804,13 @@ static void ipr_sata_eh_done(struct ipr_cmnd *ipr_cmd)
**/
static void ipr_scsi_eh_done(struct ipr_cmnd *ipr_cmd)
{
- struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd;
scsi_cmd->result |= (DID_ERROR << 16);
scsi_dma_unmap(ipr_cmd->scsi_cmd);
scsi_cmd->scsi_done(scsi_cmd);
- list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
+ list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
}
/**
@@ -805,24 +825,32 @@ static void ipr_scsi_eh_done(struct ipr_cmnd *ipr_cmd)
static void ipr_fail_all_ops(struct ipr_ioa_cfg *ioa_cfg)
{
struct ipr_cmnd *ipr_cmd, *temp;
+ struct ipr_hrr_queue *hrrq;
ENTER;
- list_for_each_entry_safe(ipr_cmd, temp, &ioa_cfg->pending_q, queue) {
- list_del(&ipr_cmd->queue);
+ for_each_hrrq(hrrq, ioa_cfg) {
+ spin_lock(&hrrq->_lock);
+ list_for_each_entry_safe(ipr_cmd,
+ temp, &hrrq->hrrq_pending_q, queue) {
+ list_del(&ipr_cmd->queue);
- ipr_cmd->s.ioasa.hdr.ioasc = cpu_to_be32(IPR_IOASC_IOA_WAS_RESET);
- ipr_cmd->s.ioasa.hdr.ilid = cpu_to_be32(IPR_DRIVER_ILID);
+ ipr_cmd->s.ioasa.hdr.ioasc =
+ cpu_to_be32(IPR_IOASC_IOA_WAS_RESET);
+ ipr_cmd->s.ioasa.hdr.ilid =
+ cpu_to_be32(IPR_DRIVER_ILID);
- if (ipr_cmd->scsi_cmd)
- ipr_cmd->done = ipr_scsi_eh_done;
- else if (ipr_cmd->qc)
- ipr_cmd->done = ipr_sata_eh_done;
+ if (ipr_cmd->scsi_cmd)
+ ipr_cmd->done = ipr_scsi_eh_done;
+ else if (ipr_cmd->qc)
+ ipr_cmd->done = ipr_sata_eh_done;
- ipr_trc_hook(ipr_cmd, IPR_TRACE_FINISH, IPR_IOASC_IOA_WAS_RESET);
- del_timer(&ipr_cmd->timer);
- ipr_cmd->done(ipr_cmd);
+ ipr_trc_hook(ipr_cmd, IPR_TRACE_FINISH,
+ IPR_IOASC_IOA_WAS_RESET);
+ del_timer(&ipr_cmd->timer);
+ ipr_cmd->done(ipr_cmd);
+ }
+ spin_unlock(&hrrq->_lock);
}
-
LEAVE;
}
@@ -872,9 +900,7 @@ static void ipr_do_req(struct ipr_cmnd *ipr_cmd,
void (*done) (struct ipr_cmnd *),
void (*timeout_func) (struct ipr_cmnd *), u32 timeout)
{
- struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
-
- list_add_tail(&ipr_cmd->queue, &ioa_cfg->pending_q);
+ list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_pending_q);
ipr_cmd->done = done;
@@ -975,6 +1001,14 @@ static void ipr_send_blocking_cmd(struct ipr_cmnd *ipr_cmd,
spin_lock_irq(ioa_cfg->host->host_lock);
}
+static int ipr_get_hrrq_index(struct ipr_ioa_cfg *ioa_cfg)
+{
+ if (ioa_cfg->hrrq_num == 1)
+ return 0;
+ else
+ return (atomic_add_return(1, &ioa_cfg->hrrq_index) % (ioa_cfg->hrrq_num - 1)) + 1;
+}
+
/**
* ipr_send_hcam - Send an HCAM to the adapter.
* @ioa_cfg: ioa config struct
@@ -994,9 +1028,9 @@ static void ipr_send_hcam(struct ipr_ioa_cfg *ioa_cfg, u8 type,
struct ipr_cmnd *ipr_cmd;
struct ipr_ioarcb *ioarcb;
- if (ioa_cfg->allow_cmds) {
+ if (ioa_cfg->hrrq[IPR_INIT_HRRQ].allow_cmds) {
ipr_cmd = ipr_get_free_ipr_cmnd(ioa_cfg);
- list_add_tail(&ipr_cmd->queue, &ioa_cfg->pending_q);
+ list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_pending_q);
list_add_tail(&hostrcb->queue, &ioa_cfg->hostrcb_pending_q);
ipr_cmd->u.hostrcb = hostrcb;
@@ -1166,14 +1200,15 @@ static int ipr_is_same_device(struct ipr_resource_entry *res,
}
/**
- * ipr_format_res_path - Format the resource path for printing.
+ * __ipr_format_res_path - Format the resource path for printing.
* @res_path: resource path
* @buf: buffer
+ * @len: length of buffer provided
*
* Return value:
* pointer to buffer
**/
-static char *ipr_format_res_path(u8 *res_path, char *buffer, int len)
+static char *__ipr_format_res_path(u8 *res_path, char *buffer, int len)
{
int i;
char *p = buffer;
@@ -1187,6 +1222,27 @@ static char *ipr_format_res_path(u8 *res_path, char *buffer, int len)
}
/**
+ * ipr_format_res_path - Format the resource path for printing.
+ * @ioa_cfg: ioa config struct
+ * @res_path: resource path
+ * @buf: buffer
+ * @len: length of buffer provided
+ *
+ * Return value:
+ * pointer to buffer
+ **/
+static char *ipr_format_res_path(struct ipr_ioa_cfg *ioa_cfg,
+ u8 *res_path, char *buffer, int len)
+{
+ char *p = buffer;
+
+ *p = '\0';
+ p += snprintf(p, buffer + len - p, "%d/", ioa_cfg->host->host_no);
+ __ipr_format_res_path(res_path, p, len - (buffer - p));
+ return buffer;
+}
+
+/**
* ipr_update_res_entry - Update the resource entry.
* @res: resource entry struct
* @cfgtew: config table entry wrapper struct
@@ -1226,8 +1282,8 @@ static void ipr_update_res_entry(struct ipr_resource_entry *res,
if (res->sdev && new_path)
sdev_printk(KERN_INFO, res->sdev, "Resource path: %s\n",
- ipr_format_res_path(res->res_path, buffer,
- sizeof(buffer)));
+ ipr_format_res_path(res->ioa_cfg,
+ res->res_path, buffer, sizeof(buffer)));
} else {
res->flags = cfgtew->u.cfgte->flags;
if (res->flags & IPR_IS_IOA_RESOURCE)
@@ -1363,7 +1419,7 @@ static void ipr_process_ccn(struct ipr_cmnd *ipr_cmd)
u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
list_del(&hostrcb->queue);
- list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
+ list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
if (ioasc) {
if (ioasc != IPR_IOASC_IOA_WAS_RESET)
@@ -1613,8 +1669,8 @@ static void ipr_log_sis64_config_error(struct ipr_ioa_cfg *ioa_cfg,
ipr_err_separator;
ipr_err("Device %d : %s", i + 1,
- ipr_format_res_path(dev_entry->res_path, buffer,
- sizeof(buffer)));
+ __ipr_format_res_path(dev_entry->res_path,
+ buffer, sizeof(buffer)));
ipr_log_ext_vpd(&dev_entry->vpd);
ipr_err("-----New Device Information-----\n");
@@ -1960,14 +2016,16 @@ static void ipr_log64_fabric_path(struct ipr_hostrcb *hostrcb,
ipr_hcam_err(hostrcb, "%s %s: Resource Path=%s\n",
path_active_desc[i].desc, path_state_desc[j].desc,
- ipr_format_res_path(fabric->res_path, buffer,
- sizeof(buffer)));
+ ipr_format_res_path(hostrcb->ioa_cfg,
+ fabric->res_path,
+ buffer, sizeof(buffer)));
return;
}
}
ipr_err("Path state=%02X Resource Path=%s\n", path_state,
- ipr_format_res_path(fabric->res_path, buffer, sizeof(buffer)));
+ ipr_format_res_path(hostrcb->ioa_cfg, fabric->res_path,
+ buffer, sizeof(buffer)));
}
static const struct {
@@ -2108,18 +2166,20 @@ static void ipr_log64_path_elem(struct ipr_hostrcb *hostrcb,
ipr_hcam_err(hostrcb, "%s %s: Resource Path=%s, Link rate=%s, WWN=%08X%08X\n",
path_status_desc[j].desc, path_type_desc[i].desc,
- ipr_format_res_path(cfg->res_path, buffer,
- sizeof(buffer)),
- link_rate[cfg->link_rate & IPR_PHY_LINK_RATE_MASK],
- be32_to_cpu(cfg->wwid[0]), be32_to_cpu(cfg->wwid[1]));
+ ipr_format_res_path(hostrcb->ioa_cfg,
+ cfg->res_path, buffer, sizeof(buffer)),
+ link_rate[cfg->link_rate & IPR_PHY_LINK_RATE_MASK],
+ be32_to_cpu(cfg->wwid[0]),
+ be32_to_cpu(cfg->wwid[1]));
return;
}
}
ipr_hcam_err(hostrcb, "Path element=%02X: Resource Path=%s, Link rate=%s "
"WWN=%08X%08X\n", cfg->type_status,
- ipr_format_res_path(cfg->res_path, buffer, sizeof(buffer)),
- link_rate[cfg->link_rate & IPR_PHY_LINK_RATE_MASK],
- be32_to_cpu(cfg->wwid[0]), be32_to_cpu(cfg->wwid[1]));
+ ipr_format_res_path(hostrcb->ioa_cfg,
+ cfg->res_path, buffer, sizeof(buffer)),
+ link_rate[cfg->link_rate & IPR_PHY_LINK_RATE_MASK],
+ be32_to_cpu(cfg->wwid[0]), be32_to_cpu(cfg->wwid[1]));
}
/**
@@ -2182,7 +2242,8 @@ static void ipr_log_sis64_array_error(struct ipr_ioa_cfg *ioa_cfg,
ipr_err("RAID %s Array Configuration: %s\n",
error->protection_level,
- ipr_format_res_path(error->last_res_path, buffer, sizeof(buffer)));
+ ipr_format_res_path(ioa_cfg, error->last_res_path,
+ buffer, sizeof(buffer)));
ipr_err_separator;
@@ -2203,11 +2264,12 @@ static void ipr_log_sis64_array_error(struct ipr_ioa_cfg *ioa_cfg,
ipr_err("Array Member %d:\n", i);
ipr_log_ext_vpd(&array_entry->vpd);
ipr_err("Current Location: %s\n",
- ipr_format_res_path(array_entry->res_path, buffer,
- sizeof(buffer)));
+ ipr_format_res_path(ioa_cfg, array_entry->res_path,
+ buffer, sizeof(buffer)));
ipr_err("Expected Location: %s\n",
- ipr_format_res_path(array_entry->expected_res_path,
- buffer, sizeof(buffer)));
+ ipr_format_res_path(ioa_cfg,
+ array_entry->expected_res_path,
+ buffer, sizeof(buffer)));
ipr_err_separator;
}
@@ -2409,7 +2471,7 @@ static void ipr_process_error(struct ipr_cmnd *ipr_cmd)
fd_ioasc = be32_to_cpu(hostrcb->hcam.u.error.fd_ioasc);
list_del(&hostrcb->queue);
- list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
+ list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
if (!ioasc) {
ipr_handle_log_data(ioa_cfg, hostrcb);
@@ -2491,36 +2553,6 @@ static void ipr_oper_timeout(struct ipr_cmnd *ipr_cmd)
}
/**
- * ipr_reset_reload - Reset/Reload the IOA
- * @ioa_cfg: ioa config struct
- * @shutdown_type: shutdown type
- *
- * This function resets the adapter and re-initializes it.
- * This function assumes that all new host commands have been stopped.
- * Return value:
- * SUCCESS / FAILED
- **/
-static int ipr_reset_reload(struct ipr_ioa_cfg *ioa_cfg,
- enum ipr_shutdown_type shutdown_type)
-{
- if (!ioa_cfg->in_reset_reload)
- ipr_initiate_ioa_reset(ioa_cfg, shutdown_type);
-
- spin_unlock_irq(ioa_cfg->host->host_lock);
- wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
- spin_lock_irq(ioa_cfg->host->host_lock);
-
- /* If we got hit with a host reset while we were already resetting
- the adapter for some reason, and the reset failed. */
- if (ioa_cfg->ioa_is_dead) {
- ipr_trace;
- return FAILED;
- }
-
- return SUCCESS;
-}
-
-/**
* ipr_find_ses_entry - Find matching SES in SES table
* @res: resource entry struct of SES
*
@@ -3153,7 +3185,8 @@ static void ipr_worker_thread(struct work_struct *work)
restart:
do {
did_work = 0;
- if (!ioa_cfg->allow_cmds || !ioa_cfg->allow_ml_add_del) {
+ if (!ioa_cfg->hrrq[IPR_INIT_HRRQ].allow_cmds ||
+ !ioa_cfg->allow_ml_add_del) {
spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
return;
}
@@ -3401,7 +3434,7 @@ static ssize_t ipr_show_adapter_state(struct device *dev,
int len;
spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
- if (ioa_cfg->ioa_is_dead)
+ if (ioa_cfg->hrrq[IPR_INIT_HRRQ].ioa_is_dead)
len = snprintf(buf, PAGE_SIZE, "offline\n");
else
len = snprintf(buf, PAGE_SIZE, "online\n");
@@ -3427,14 +3460,20 @@ static ssize_t ipr_store_adapter_state(struct device *dev,
struct Scsi_Host *shost = class_to_shost(dev);
struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
unsigned long lock_flags;
- int result = count;
+ int result = count, i;
if (!capable(CAP_SYS_ADMIN))
return -EACCES;
spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
- if (ioa_cfg->ioa_is_dead && !strncmp(buf, "online", 6)) {
- ioa_cfg->ioa_is_dead = 0;
+ if (ioa_cfg->hrrq[IPR_INIT_HRRQ].ioa_is_dead &&
+ !strncmp(buf, "online", 6)) {
+ for (i = 0; i < ioa_cfg->hrrq_num; i++) {
+ spin_lock(&ioa_cfg->hrrq[i]._lock);
+ ioa_cfg->hrrq[i].ioa_is_dead = 0;
+ spin_unlock(&ioa_cfg->hrrq[i]._lock);
+ }
+ wmb();
ioa_cfg->reset_retries = 0;
ioa_cfg->in_ioa_bringdown = 0;
ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
@@ -3494,6 +3533,95 @@ static struct device_attribute ipr_ioa_reset_attr = {
.store = ipr_store_reset_adapter
};
+static int ipr_iopoll(struct blk_iopoll *iop, int budget);
+ /**
+ * ipr_show_iopoll_weight - Show ipr polling mode
+ * @dev: class device struct
+ * @buf: buffer
+ *
+ * Return value:
+ * number of bytes printed to buffer
+ **/
+static ssize_t ipr_show_iopoll_weight(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct Scsi_Host *shost = class_to_shost(dev);
+ struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
+ unsigned long lock_flags = 0;
+ int len;
+
+ spin_lock_irqsave(shost->host_lock, lock_flags);
+ len = snprintf(buf, PAGE_SIZE, "%d\n", ioa_cfg->iopoll_weight);
+ spin_unlock_irqrestore(shost->host_lock, lock_flags);
+
+ return len;
+}
+
+/**
+ * ipr_store_iopoll_weight - Change the adapter's polling mode
+ * @dev: class device struct
+ * @buf: buffer
+ *
+ * Return value:
+ * number of bytes printed to buffer
+ **/
+static ssize_t ipr_store_iopoll_weight(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct Scsi_Host *shost = class_to_shost(dev);
+ struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
+ unsigned long user_iopoll_weight;
+ unsigned long lock_flags = 0;
+ int i;
+
+ if (!ioa_cfg->sis64) {
+ dev_info(&ioa_cfg->pdev->dev, "blk-iopoll not supported on this adapter\n");
+ return -EINVAL;
+ }
+ if (kstrtoul(buf, 10, &user_iopoll_weight))
+ return -EINVAL;
+
+ if (user_iopoll_weight > 256) {
+ dev_info(&ioa_cfg->pdev->dev, "Invalid blk-iopoll weight. It must be less than 256\n");
+ return -EINVAL;
+ }
+
+ if (user_iopoll_weight == ioa_cfg->iopoll_weight) {
+ dev_info(&ioa_cfg->pdev->dev, "Current blk-iopoll weight has the same weight\n");
+ return strlen(buf);
+ }
+
+ if (blk_iopoll_enabled && ioa_cfg->iopoll_weight &&
+ ioa_cfg->sis64 && ioa_cfg->nvectors > 1) {
+ for (i = 1; i < ioa_cfg->hrrq_num; i++)
+ blk_iopoll_disable(&ioa_cfg->hrrq[i].iopoll);
+ }
+
+ spin_lock_irqsave(shost->host_lock, lock_flags);
+ ioa_cfg->iopoll_weight = user_iopoll_weight;
+ if (blk_iopoll_enabled && ioa_cfg->iopoll_weight &&
+ ioa_cfg->sis64 && ioa_cfg->nvectors > 1) {
+ for (i = 1; i < ioa_cfg->hrrq_num; i++) {
+ blk_iopoll_init(&ioa_cfg->hrrq[i].iopoll,
+ ioa_cfg->iopoll_weight, ipr_iopoll);
+ blk_iopoll_enable(&ioa_cfg->hrrq[i].iopoll);
+ }
+ }
+ spin_unlock_irqrestore(shost->host_lock, lock_flags);
+
+ return strlen(buf);
+}
+
+static struct device_attribute ipr_iopoll_weight_attr = {
+ .attr = {
+ .name = "iopoll_weight",
+ .mode = S_IRUGO | S_IWUSR,
+ },
+ .show = ipr_show_iopoll_weight,
+ .store = ipr_store_iopoll_weight
+};
+
/**
* ipr_alloc_ucode_buffer - Allocates a microcode download buffer
* @buf_len: buffer length
@@ -3862,6 +3990,7 @@ static struct device_attribute *ipr_ioa_attrs[] = {
&ipr_ioa_reset_attr,
&ipr_update_fw_attr,
&ipr_ioa_fw_type_attr,
+ &ipr_iopoll_weight_attr,
NULL,
};
@@ -4014,7 +4143,7 @@ static int ipr_alloc_dump(struct ipr_ioa_cfg *ioa_cfg)
ioa_cfg->dump = dump;
ioa_cfg->sdt_state = WAIT_FOR_DUMP;
- if (ioa_cfg->ioa_is_dead && !ioa_cfg->dump_taken) {
+ if (ioa_cfg->hrrq[IPR_INIT_HRRQ].ioa_is_dead && !ioa_cfg->dump_taken) {
ioa_cfg->dump_taken = 1;
schedule_work(&ioa_cfg->work_q);
}
@@ -4227,8 +4356,8 @@ static ssize_t ipr_show_resource_path(struct device *dev, struct device_attribut
res = (struct ipr_resource_entry *)sdev->hostdata;
if (res && ioa_cfg->sis64)
len = snprintf(buf, PAGE_SIZE, "%s\n",
- ipr_format_res_path(res->res_path, buffer,
- sizeof(buffer)));
+ __ipr_format_res_path(res->res_path, buffer,
+ sizeof(buffer)));
else if (res)
len = snprintf(buf, PAGE_SIZE, "%d:%d:%d:%d\n", ioa_cfg->host->host_no,
res->bus, res->target, res->lun);
@@ -4556,8 +4685,8 @@ static int ipr_slave_configure(struct scsi_device *sdev)
scsi_adjust_queue_depth(sdev, 0, sdev->host->cmd_per_lun);
if (ioa_cfg->sis64)
sdev_printk(KERN_INFO, sdev, "Resource path: %s\n",
- ipr_format_res_path(res->res_path, buffer,
- sizeof(buffer)));
+ ipr_format_res_path(ioa_cfg,
+ res->res_path, buffer, sizeof(buffer)));
return 0;
}
spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
@@ -4638,22 +4767,18 @@ static int ipr_slave_alloc(struct scsi_device *sdev)
return rc;
}
-/**
- * ipr_eh_host_reset - Reset the host adapter
- * @scsi_cmd: scsi command struct
- *
- * Return value:
- * SUCCESS / FAILED
- **/
-static int __ipr_eh_host_reset(struct scsi_cmnd *scsi_cmd)
+static int ipr_eh_host_reset(struct scsi_cmnd *cmd)
{
struct ipr_ioa_cfg *ioa_cfg;
- int rc;
+ unsigned long lock_flags = 0;
+ int rc = SUCCESS;
ENTER;
- ioa_cfg = (struct ipr_ioa_cfg *) scsi_cmd->device->host->hostdata;
+ ioa_cfg = (struct ipr_ioa_cfg *) cmd->device->host->hostdata;
+ spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
if (!ioa_cfg->in_reset_reload) {
+ ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_ABBREV);
dev_err(&ioa_cfg->pdev->dev,
"Adapter being reset as a result of error recovery.\n");
@@ -4661,20 +4786,19 @@ static int __ipr_eh_host_reset(struct scsi_cmnd *scsi_cmd)
ioa_cfg->sdt_state = GET_DUMP;
}
- rc = ipr_reset_reload(ioa_cfg, IPR_SHUTDOWN_ABBREV);
-
- LEAVE;
- return rc;
-}
-
-static int ipr_eh_host_reset(struct scsi_cmnd *cmd)
-{
- int rc;
+ spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
+ wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
+ spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
- spin_lock_irq(cmd->device->host->host_lock);
- rc = __ipr_eh_host_reset(cmd);
- spin_unlock_irq(cmd->device->host->host_lock);
+ /* If we got hit with a host reset while we were already resetting
+ the adapter for some reason, and the reset failed. */
+ if (ioa_cfg->hrrq[IPR_INIT_HRRQ].ioa_is_dead) {
+ ipr_trace;
+ rc = FAILED;
+ }
+ spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
+ LEAVE;
return rc;
}
@@ -4723,7 +4847,7 @@ static int ipr_device_reset(struct ipr_ioa_cfg *ioa_cfg,
ipr_send_blocking_cmd(ipr_cmd, ipr_timeout, IPR_DEVICE_RESET_TIMEOUT);
ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
- list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
+ list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
if (ipr_is_gata(res) && res->sata_port && ioasc != IPR_IOASC_IOA_WAS_RESET) {
if (ipr_cmd->ioa_cfg->sis64)
memcpy(&res->sata_port->ioasa, &ipr_cmd->s.ioasa64.u.gata,
@@ -4793,6 +4917,7 @@ static int __ipr_eh_dev_reset(struct scsi_cmnd *scsi_cmd)
struct ipr_resource_entry *res;
struct ata_port *ap;
int rc = 0;
+ struct ipr_hrr_queue *hrrq;
ENTER;
ioa_cfg = (struct ipr_ioa_cfg *) scsi_cmd->device->host->hostdata;
@@ -4808,22 +4933,26 @@ static int __ipr_eh_dev_reset(struct scsi_cmnd *scsi_cmd)
*/
if (ioa_cfg->in_reset_reload)
return FAILED;
- if (ioa_cfg->ioa_is_dead)
+ if (ioa_cfg->hrrq[IPR_INIT_HRRQ].ioa_is_dead)
return FAILED;
- list_for_each_entry(ipr_cmd, &ioa_cfg->pending_q, queue) {
- if (ipr_cmd->ioarcb.res_handle == res->res_handle) {
- if (ipr_cmd->scsi_cmd)
- ipr_cmd->done = ipr_scsi_eh_done;
- if (ipr_cmd->qc)
- ipr_cmd->done = ipr_sata_eh_done;
- if (ipr_cmd->qc && !(ipr_cmd->qc->flags & ATA_QCFLAG_FAILED)) {
- ipr_cmd->qc->err_mask |= AC_ERR_TIMEOUT;
- ipr_cmd->qc->flags |= ATA_QCFLAG_FAILED;
+ for_each_hrrq(hrrq, ioa_cfg) {
+ spin_lock(&hrrq->_lock);
+ list_for_each_entry(ipr_cmd, &hrrq->hrrq_pending_q, queue) {
+ if (ipr_cmd->ioarcb.res_handle == res->res_handle) {
+ if (ipr_cmd->scsi_cmd)
+ ipr_cmd->done = ipr_scsi_eh_done;
+ if (ipr_cmd->qc)
+ ipr_cmd->done = ipr_sata_eh_done;
+ if (ipr_cmd->qc &&
+ !(ipr_cmd->qc->flags & ATA_QCFLAG_FAILED)) {
+ ipr_cmd->qc->err_mask |= AC_ERR_TIMEOUT;
+ ipr_cmd->qc->flags |= ATA_QCFLAG_FAILED;
+ }
}
}
+ spin_unlock(&hrrq->_lock);
}
-
res->resetting_device = 1;
scmd_printk(KERN_ERR, scsi_cmd, "Resetting device\n");
@@ -4833,11 +4962,17 @@ static int __ipr_eh_dev_reset(struct scsi_cmnd *scsi_cmd)
ata_std_error_handler(ap);
spin_lock_irq(scsi_cmd->device->host->host_lock);
- list_for_each_entry(ipr_cmd, &ioa_cfg->pending_q, queue) {
- if (ipr_cmd->ioarcb.res_handle == res->res_handle) {
- rc = -EIO;
- break;
+ for_each_hrrq(hrrq, ioa_cfg) {
+ spin_lock(&hrrq->_lock);
+ list_for_each_entry(ipr_cmd,
+ &hrrq->hrrq_pending_q, queue) {
+ if (ipr_cmd->ioarcb.res_handle ==
+ res->res_handle) {
+ rc = -EIO;
+ break;
+ }
}
+ spin_unlock(&hrrq->_lock);
}
} else
rc = ipr_device_reset(ioa_cfg, res);
@@ -4890,7 +5025,7 @@ static void ipr_bus_reset_done(struct ipr_cmnd *ipr_cmd)
else
ipr_cmd->sibling->done(ipr_cmd->sibling);
- list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
+ list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
LEAVE;
}
@@ -4951,6 +5086,7 @@ static int ipr_cancel_op(struct scsi_cmnd *scsi_cmd)
struct ipr_cmd_pkt *cmd_pkt;
u32 ioasc, int_reg;
int op_found = 0;
+ struct ipr_hrr_queue *hrrq;
ENTER;
ioa_cfg = (struct ipr_ioa_cfg *)scsi_cmd->device->host->hostdata;
@@ -4960,7 +5096,8 @@ static int ipr_cancel_op(struct scsi_cmnd *scsi_cmd)
* This will force the mid-layer to call ipr_eh_host_reset,
* which will then go to sleep and wait for the reset to complete
*/
- if (ioa_cfg->in_reset_reload || ioa_cfg->ioa_is_dead)
+ if (ioa_cfg->in_reset_reload ||
+ ioa_cfg->hrrq[IPR_INIT_HRRQ].ioa_is_dead)
return FAILED;
if (!res)
return FAILED;
@@ -4975,12 +5112,16 @@ static int ipr_cancel_op(struct scsi_cmnd *scsi_cmd)
if (!ipr_is_gscsi(res))
return FAILED;
- list_for_each_entry(ipr_cmd, &ioa_cfg->pending_q, queue) {
- if (ipr_cmd->scsi_cmd == scsi_cmd) {
- ipr_cmd->done = ipr_scsi_eh_done;
- op_found = 1;
- break;
+ for_each_hrrq(hrrq, ioa_cfg) {
+ spin_lock(&hrrq->_lock);
+ list_for_each_entry(ipr_cmd, &hrrq->hrrq_pending_q, queue) {
+ if (ipr_cmd->scsi_cmd == scsi_cmd) {
+ ipr_cmd->done = ipr_scsi_eh_done;
+ op_found = 1;
+ break;
+ }
}
+ spin_unlock(&hrrq->_lock);
}
if (!op_found)
@@ -5007,7 +5148,7 @@ static int ipr_cancel_op(struct scsi_cmnd *scsi_cmd)
ipr_trace;
}
- list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
+ list_add_tail(&ipr_cmd->queue, &hrrq->hrrq_free_q);
if (!ipr_is_naca_model(res))
res->needs_sync_complete = 1;
@@ -5099,6 +5240,9 @@ static irqreturn_t ipr_handle_other_interrupt(struct ipr_ioa_cfg *ioa_cfg,
} else {
if (int_reg & IPR_PCII_IOA_UNIT_CHECKED)
ioa_cfg->ioa_unit_checked = 1;
+ else if (int_reg & IPR_PCII_NO_HOST_RRQ)
+ dev_err(&ioa_cfg->pdev->dev,
+ "No Host RRQ. 0x%08X\n", int_reg);
else
dev_err(&ioa_cfg->pdev->dev,
"Permanent IOA failure. 0x%08X\n", int_reg);
@@ -5121,10 +5265,10 @@ static irqreturn_t ipr_handle_other_interrupt(struct ipr_ioa_cfg *ioa_cfg,
* Return value:
* none
**/
-static void ipr_isr_eh(struct ipr_ioa_cfg *ioa_cfg, char *msg)
+static void ipr_isr_eh(struct ipr_ioa_cfg *ioa_cfg, char *msg, u16 number)
{
ioa_cfg->errors_logged++;
- dev_err(&ioa_cfg->pdev->dev, "%s\n", msg);
+ dev_err(&ioa_cfg->pdev->dev, "%s %d\n", msg, number);
if (WAIT_FOR_DUMP == ioa_cfg->sdt_state)
ioa_cfg->sdt_state = GET_DUMP;
@@ -5132,6 +5276,83 @@ static void ipr_isr_eh(struct ipr_ioa_cfg *ioa_cfg, char *msg)
ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
}
+static int ipr_process_hrrq(struct ipr_hrr_queue *hrr_queue, int budget,
+ struct list_head *doneq)
+{
+ u32 ioasc;
+ u16 cmd_index;
+ struct ipr_cmnd *ipr_cmd;
+ struct ipr_ioa_cfg *ioa_cfg = hrr_queue->ioa_cfg;
+ int num_hrrq = 0;
+
+ /* If interrupts are disabled, ignore the interrupt */
+ if (!hrr_queue->allow_interrupts)
+ return 0;
+
+ while ((be32_to_cpu(*hrr_queue->hrrq_curr) & IPR_HRRQ_TOGGLE_BIT) ==
+ hrr_queue->toggle_bit) {
+
+ cmd_index = (be32_to_cpu(*hrr_queue->hrrq_curr) &
+ IPR_HRRQ_REQ_RESP_HANDLE_MASK) >>
+ IPR_HRRQ_REQ_RESP_HANDLE_SHIFT;
+
+ if (unlikely(cmd_index > hrr_queue->max_cmd_id ||
+ cmd_index < hrr_queue->min_cmd_id)) {
+ ipr_isr_eh(ioa_cfg,
+ "Invalid response handle from IOA: ",
+ cmd_index);
+ break;
+ }
+
+ ipr_cmd = ioa_cfg->ipr_cmnd_list[cmd_index];
+ ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
+
+ ipr_trc_hook(ipr_cmd, IPR_TRACE_FINISH, ioasc);
+
+ list_move_tail(&ipr_cmd->queue, doneq);
+
+ if (hrr_queue->hrrq_curr < hrr_queue->hrrq_end) {
+ hrr_queue->hrrq_curr++;
+ } else {
+ hrr_queue->hrrq_curr = hrr_queue->hrrq_start;
+ hrr_queue->toggle_bit ^= 1u;
+ }
+ num_hrrq++;
+ if (budget > 0 && num_hrrq >= budget)
+ break;
+ }
+
+ return num_hrrq;
+}
+
+static int ipr_iopoll(struct blk_iopoll *iop, int budget)
+{
+ struct ipr_ioa_cfg *ioa_cfg;
+ struct ipr_hrr_queue *hrrq;
+ struct ipr_cmnd *ipr_cmd, *temp;
+ unsigned long hrrq_flags;
+ int completed_ops;
+ LIST_HEAD(doneq);
+
+ hrrq = container_of(iop, struct ipr_hrr_queue, iopoll);
+ ioa_cfg = hrrq->ioa_cfg;
+
+ spin_lock_irqsave(hrrq->lock, hrrq_flags);
+ completed_ops = ipr_process_hrrq(hrrq, budget, &doneq);
+
+ if (completed_ops < budget)
+ blk_iopoll_complete(iop);
+ spin_unlock_irqrestore(hrrq->lock, hrrq_flags);
+
+ list_for_each_entry_safe(ipr_cmd, temp, &doneq, queue) {
+ list_del(&ipr_cmd->queue);
+ del_timer(&ipr_cmd->timer);
+ ipr_cmd->fast_done(ipr_cmd);
+ }
+
+ return completed_ops;
+}
+
/**
* ipr_isr - Interrupt service routine
* @irq: irq number
@@ -5142,78 +5363,48 @@ static void ipr_isr_eh(struct ipr_ioa_cfg *ioa_cfg, char *msg)
**/
static irqreturn_t ipr_isr(int irq, void *devp)
{
- struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)devp;
- unsigned long lock_flags = 0;
+ struct ipr_hrr_queue *hrrq = (struct ipr_hrr_queue *)devp;
+ struct ipr_ioa_cfg *ioa_cfg = hrrq->ioa_cfg;
+ unsigned long hrrq_flags = 0;
u32 int_reg = 0;
- u32 ioasc;
- u16 cmd_index;
int num_hrrq = 0;
int irq_none = 0;
struct ipr_cmnd *ipr_cmd, *temp;
irqreturn_t rc = IRQ_NONE;
LIST_HEAD(doneq);
- spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
-
+ spin_lock_irqsave(hrrq->lock, hrrq_flags);
/* If interrupts are disabled, ignore the interrupt */
- if (!ioa_cfg->allow_interrupts) {
- spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
+ if (!hrrq->allow_interrupts) {
+ spin_unlock_irqrestore(hrrq->lock, hrrq_flags);
return IRQ_NONE;
}
while (1) {
- ipr_cmd = NULL;
-
- while ((be32_to_cpu(*ioa_cfg->hrrq_curr) & IPR_HRRQ_TOGGLE_BIT) ==
- ioa_cfg->toggle_bit) {
-
- cmd_index = (be32_to_cpu(*ioa_cfg->hrrq_curr) &
- IPR_HRRQ_REQ_RESP_HANDLE_MASK) >> IPR_HRRQ_REQ_RESP_HANDLE_SHIFT;
-
- if (unlikely(cmd_index >= IPR_NUM_CMD_BLKS)) {
- ipr_isr_eh(ioa_cfg, "Invalid response handle from IOA");
- rc = IRQ_HANDLED;
- goto unlock_out;
- }
-
- ipr_cmd = ioa_cfg->ipr_cmnd_list[cmd_index];
-
- ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
-
- ipr_trc_hook(ipr_cmd, IPR_TRACE_FINISH, ioasc);
-
- list_move_tail(&ipr_cmd->queue, &doneq);
-
- rc = IRQ_HANDLED;
-
- if (ioa_cfg->hrrq_curr < ioa_cfg->hrrq_end) {
- ioa_cfg->hrrq_curr++;
- } else {
- ioa_cfg->hrrq_curr = ioa_cfg->hrrq_start;
- ioa_cfg->toggle_bit ^= 1u;
- }
- }
+ if (ipr_process_hrrq(hrrq, -1, &doneq)) {
+ rc = IRQ_HANDLED;
- if (ipr_cmd && !ioa_cfg->clear_isr)
- break;
+ if (!ioa_cfg->clear_isr)
+ break;
- if (ipr_cmd != NULL) {
/* Clear the PCI interrupt */
num_hrrq = 0;
do {
- writel(IPR_PCII_HRRQ_UPDATED, ioa_cfg->regs.clr_interrupt_reg32);
+ writel(IPR_PCII_HRRQ_UPDATED,
+ ioa_cfg->regs.clr_interrupt_reg32);
int_reg = readl(ioa_cfg->regs.sense_interrupt_reg32);
} while (int_reg & IPR_PCII_HRRQ_UPDATED &&
- num_hrrq++ < IPR_MAX_HRRQ_RETRIES);
+ num_hrrq++ < IPR_MAX_HRRQ_RETRIES);
} else if (rc == IRQ_NONE && irq_none == 0) {
int_reg = readl(ioa_cfg->regs.sense_interrupt_reg32);
irq_none++;
} else if (num_hrrq == IPR_MAX_HRRQ_RETRIES &&
int_reg & IPR_PCII_HRRQ_UPDATED) {
- ipr_isr_eh(ioa_cfg, "Error clearing HRRQ");
+ ipr_isr_eh(ioa_cfg,
+ "Error clearing HRRQ: ", num_hrrq);
rc = IRQ_HANDLED;
- goto unlock_out;
+ break;
} else
break;
}
@@ -5221,14 +5412,64 @@ static irqreturn_t ipr_isr(int irq, void *devp)
if (unlikely(rc == IRQ_NONE))
rc = ipr_handle_other_interrupt(ioa_cfg, int_reg);
-unlock_out:
- spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
+ spin_unlock_irqrestore(hrrq->lock, hrrq_flags);
list_for_each_entry_safe(ipr_cmd, temp, &doneq, queue) {
list_del(&ipr_cmd->queue);
del_timer(&ipr_cmd->timer);
ipr_cmd->fast_done(ipr_cmd);
}
+ return rc;
+}
+
+/**
+ * ipr_isr_mhrrq - Interrupt service routine
+ * @irq: irq number
+ * @devp: pointer to ioa config struct
+ *
+ * Return value:
+ * IRQ_NONE / IRQ_HANDLED
+ **/
+static irqreturn_t ipr_isr_mhrrq(int irq, void *devp)
+{
+ struct ipr_hrr_queue *hrrq = (struct ipr_hrr_queue *)devp;
+ struct ipr_ioa_cfg *ioa_cfg = hrrq->ioa_cfg;
+ unsigned long hrrq_flags = 0;
+ struct ipr_cmnd *ipr_cmd, *temp;
+ irqreturn_t rc = IRQ_NONE;
+ LIST_HEAD(doneq);
+
+ spin_lock_irqsave(hrrq->lock, hrrq_flags);
+
+ /* If interrupts are disabled, ignore the interrupt */
+ if (!hrrq->allow_interrupts) {
+ spin_unlock_irqrestore(hrrq->lock, hrrq_flags);
+ return IRQ_NONE;
+ }
+
+ if (blk_iopoll_enabled && ioa_cfg->iopoll_weight &&
+ ioa_cfg->sis64 && ioa_cfg->nvectors > 1) {
+ if ((be32_to_cpu(*hrrq->hrrq_curr) & IPR_HRRQ_TOGGLE_BIT) ==
+ hrrq->toggle_bit) {
+ if (!blk_iopoll_sched_prep(&hrrq->iopoll))
+ blk_iopoll_sched(&hrrq->iopoll);
+ spin_unlock_irqrestore(hrrq->lock, hrrq_flags);
+ return IRQ_HANDLED;
+ }
+ } else {
+ if ((be32_to_cpu(*hrrq->hrrq_curr) & IPR_HRRQ_TOGGLE_BIT) ==
+ hrrq->toggle_bit)
+
+ if (ipr_process_hrrq(hrrq, -1, &doneq))
+ rc = IRQ_HANDLED;
+ }
+
+ spin_unlock_irqrestore(hrrq->lock, hrrq_flags);
+ list_for_each_entry_safe(ipr_cmd, temp, &doneq, queue) {
+ list_del(&ipr_cmd->queue);
+ del_timer(&ipr_cmd->timer);
+ ipr_cmd->fast_done(ipr_cmd);
+ }
return rc;
}
@@ -5388,7 +5629,6 @@ static void ipr_erp_done(struct ipr_cmnd *ipr_cmd)
{
struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd;
struct ipr_resource_entry *res = scsi_cmd->device->hostdata;
- struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
if (IPR_IOASC_SENSE_KEY(ioasc) > 0) {
@@ -5406,7 +5646,7 @@ static void ipr_erp_done(struct ipr_cmnd *ipr_cmd)
res->in_erp = 0;
}
scsi_dma_unmap(ipr_cmd->scsi_cmd);
- list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
+ list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
scsi_cmd->scsi_done(scsi_cmd);
}
@@ -5790,7 +6030,7 @@ static void ipr_erp_start(struct ipr_ioa_cfg *ioa_cfg,
}
scsi_dma_unmap(ipr_cmd->scsi_cmd);
- list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
+ list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
scsi_cmd->scsi_done(scsi_cmd);
}
@@ -5809,21 +6049,21 @@ static void ipr_scsi_done(struct ipr_cmnd *ipr_cmd)
struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd;
u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
- unsigned long lock_flags;
+ unsigned long hrrq_flags;
scsi_set_resid(scsi_cmd, be32_to_cpu(ipr_cmd->s.ioasa.hdr.residual_data_len));
if (likely(IPR_IOASC_SENSE_KEY(ioasc) == 0)) {
scsi_dma_unmap(scsi_cmd);
- spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
- list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
+ spin_lock_irqsave(ipr_cmd->hrrq->lock, hrrq_flags);
+ list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
scsi_cmd->scsi_done(scsi_cmd);
- spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
+ spin_unlock_irqrestore(ipr_cmd->hrrq->lock, hrrq_flags);
} else {
- spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
+ spin_lock_irqsave(ipr_cmd->hrrq->lock, hrrq_flags);
ipr_erp_start(ioa_cfg, ipr_cmd);
- spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
+ spin_unlock_irqrestore(ipr_cmd->hrrq->lock, hrrq_flags);
}
}
@@ -5846,22 +6086,34 @@ static int ipr_queuecommand(struct Scsi_Host *shost,
struct ipr_resource_entry *res;
struct ipr_ioarcb *ioarcb;
struct ipr_cmnd *ipr_cmd;
- unsigned long lock_flags;
+ unsigned long hrrq_flags, lock_flags;
int rc;
+ struct ipr_hrr_queue *hrrq;
+ int hrrq_id;
ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
- spin_lock_irqsave(shost->host_lock, lock_flags);
scsi_cmd->result = (DID_OK << 16);
res = scsi_cmd->device->hostdata;
+ if (ipr_is_gata(res) && res->sata_port) {
+ spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
+ rc = ata_sas_queuecmd(scsi_cmd, res->sata_port->ap);
+ spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
+ return rc;
+ }
+
+ hrrq_id = ipr_get_hrrq_index(ioa_cfg);
+ hrrq = &ioa_cfg->hrrq[hrrq_id];
+
+ spin_lock_irqsave(hrrq->lock, hrrq_flags);
/*
* We are currently blocking all devices due to a host reset
* We have told the host to stop giving us new requests, but
* ERP ops don't count. FIXME
*/
- if (unlikely(!ioa_cfg->allow_cmds && !ioa_cfg->ioa_is_dead)) {
- spin_unlock_irqrestore(shost->host_lock, lock_flags);
+ if (unlikely(!hrrq->allow_cmds && !hrrq->ioa_is_dead && !hrrq->removing_ioa)) {
+ spin_unlock_irqrestore(hrrq->lock, hrrq_flags);
return SCSI_MLQUEUE_HOST_BUSY;
}
@@ -5869,19 +6121,17 @@ static int ipr_queuecommand(struct Scsi_Host *shost,
* FIXME - Create scsi_set_host_offline interface
* and the ioa_is_dead check can be removed
*/
- if (unlikely(ioa_cfg->ioa_is_dead || !res)) {
- spin_unlock_irqrestore(shost->host_lock, lock_flags);
+ if (unlikely(hrrq->ioa_is_dead || hrrq->removing_ioa || !res)) {
+ spin_unlock_irqrestore(hrrq->lock, hrrq_flags);
goto err_nodev;
}
- if (ipr_is_gata(res) && res->sata_port) {
- rc = ata_sas_queuecmd(scsi_cmd, res->sata_port->ap);
- spin_unlock_irqrestore(shost->host_lock, lock_flags);
- return rc;
+ ipr_cmd = __ipr_get_free_ipr_cmnd(hrrq);
+ if (ipr_cmd == NULL) {
+ spin_unlock_irqrestore(hrrq->lock, hrrq_flags);
+ return SCSI_MLQUEUE_HOST_BUSY;
}
-
- ipr_cmd = __ipr_get_free_ipr_cmnd(ioa_cfg);
- spin_unlock_irqrestore(shost->host_lock, lock_flags);
+ spin_unlock_irqrestore(hrrq->lock, hrrq_flags);
ipr_init_ipr_cmnd(ipr_cmd, ipr_scsi_done);
ioarcb = &ipr_cmd->ioarcb;
@@ -5902,26 +6152,27 @@ static int ipr_queuecommand(struct Scsi_Host *shost,
}
if (scsi_cmd->cmnd[0] >= 0xC0 &&
- (!ipr_is_gscsi(res) || scsi_cmd->cmnd[0] == IPR_QUERY_RSRC_STATE))
+ (!ipr_is_gscsi(res) || scsi_cmd->cmnd[0] == IPR_QUERY_RSRC_STATE)) {
ioarcb->cmd_pkt.request_type = IPR_RQTYPE_IOACMD;
+ }
if (ioa_cfg->sis64)
rc = ipr_build_ioadl64(ioa_cfg, ipr_cmd);
else
rc = ipr_build_ioadl(ioa_cfg, ipr_cmd);
- spin_lock_irqsave(shost->host_lock, lock_flags);
- if (unlikely(rc || (!ioa_cfg->allow_cmds && !ioa_cfg->ioa_is_dead))) {
- list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
- spin_unlock_irqrestore(shost->host_lock, lock_flags);
+ spin_lock_irqsave(hrrq->lock, hrrq_flags);
+ if (unlikely(rc || (!hrrq->allow_cmds && !hrrq->ioa_is_dead))) {
+ list_add_tail(&ipr_cmd->queue, &hrrq->hrrq_free_q);
+ spin_unlock_irqrestore(hrrq->lock, hrrq_flags);
if (!rc)
scsi_dma_unmap(scsi_cmd);
return SCSI_MLQUEUE_HOST_BUSY;
}
- if (unlikely(ioa_cfg->ioa_is_dead)) {
- list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
- spin_unlock_irqrestore(shost->host_lock, lock_flags);
+ if (unlikely(hrrq->ioa_is_dead)) {
+ list_add_tail(&ipr_cmd->queue, &hrrq->hrrq_free_q);
+ spin_unlock_irqrestore(hrrq->lock, hrrq_flags);
scsi_dma_unmap(scsi_cmd);
goto err_nodev;
}
@@ -5931,18 +6182,18 @@ static int ipr_queuecommand(struct Scsi_Host *shost,
ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_SYNC_COMPLETE;
res->needs_sync_complete = 0;
}
- list_add_tail(&ipr_cmd->queue, &ioa_cfg->pending_q);
+ list_add_tail(&ipr_cmd->queue, &hrrq->hrrq_pending_q);
ipr_trc_hook(ipr_cmd, IPR_TRACE_START, IPR_GET_RES_PHYS_LOC(res));
ipr_send_command(ipr_cmd);
- spin_unlock_irqrestore(shost->host_lock, lock_flags);
+ spin_unlock_irqrestore(hrrq->lock, hrrq_flags);
return 0;
err_nodev:
- spin_lock_irqsave(shost->host_lock, lock_flags);
+ spin_lock_irqsave(hrrq->lock, hrrq_flags);
memset(scsi_cmd->sense_buffer, 0, SCSI_SENSE_BUFFERSIZE);
scsi_cmd->result = (DID_NO_CONNECT << 16);
scsi_cmd->scsi_done(scsi_cmd);
- spin_unlock_irqrestore(shost->host_lock, lock_flags);
+ spin_unlock_irqrestore(hrrq->lock, hrrq_flags);
return 0;
}
@@ -6040,7 +6291,7 @@ static void ipr_ata_phy_reset(struct ata_port *ap)
spin_lock_irqsave(ioa_cfg->host->host_lock, flags);
}
- if (!ioa_cfg->allow_cmds)
+ if (!ioa_cfg->hrrq[IPR_INIT_HRRQ].allow_cmds)
goto out_unlock;
rc = ipr_device_reset(ioa_cfg, res);
@@ -6071,6 +6322,7 @@ static void ipr_ata_post_internal(struct ata_queued_cmd *qc)
struct ipr_sata_port *sata_port = qc->ap->private_data;
struct ipr_ioa_cfg *ioa_cfg = sata_port->ioa_cfg;
struct ipr_cmnd *ipr_cmd;
+ struct ipr_hrr_queue *hrrq;
unsigned long flags;
spin_lock_irqsave(ioa_cfg->host->host_lock, flags);
@@ -6080,11 +6332,15 @@ static void ipr_ata_post_internal(struct ata_queued_cmd *qc)
spin_lock_irqsave(ioa_cfg->host->host_lock, flags);
}
- list_for_each_entry(ipr_cmd, &ioa_cfg->pending_q, queue) {
- if (ipr_cmd->qc == qc) {
- ipr_device_reset(ioa_cfg, sata_port->res);
- break;
+ for_each_hrrq(hrrq, ioa_cfg) {
+ spin_lock(&hrrq->_lock);
+ list_for_each_entry(ipr_cmd, &hrrq->hrrq_pending_q, queue) {
+ if (ipr_cmd->qc == qc) {
+ ipr_device_reset(ioa_cfg, sata_port->res);
+ break;
+ }
}
+ spin_unlock(&hrrq->_lock);
}
spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
}
@@ -6133,6 +6389,7 @@ static void ipr_sata_done(struct ipr_cmnd *ipr_cmd)
struct ipr_resource_entry *res = sata_port->res;
u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
+ spin_lock(&ipr_cmd->hrrq->_lock);
if (ipr_cmd->ioa_cfg->sis64)
memcpy(&sata_port->ioasa, &ipr_cmd->s.ioasa64.u.gata,
sizeof(struct ipr_ioasa_gata));
@@ -6148,7 +6405,8 @@ static void ipr_sata_done(struct ipr_cmnd *ipr_cmd)
qc->err_mask |= __ac_err_mask(sata_port->ioasa.status);
else
qc->err_mask |= ac_err_mask(sata_port->ioasa.status);
- list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
+ list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
+ spin_unlock(&ipr_cmd->hrrq->_lock);
ata_qc_complete(qc);
}
@@ -6244,6 +6502,48 @@ static void ipr_build_ata_ioadl(struct ipr_cmnd *ipr_cmd,
}
/**
+ * ipr_qc_defer - Get a free ipr_cmd
+ * @qc: queued command
+ *
+ * Return value:
+ * 0 if success
+ **/
+static int ipr_qc_defer(struct ata_queued_cmd *qc)
+{
+ struct ata_port *ap = qc->ap;
+ struct ipr_sata_port *sata_port = ap->private_data;
+ struct ipr_ioa_cfg *ioa_cfg = sata_port->ioa_cfg;
+ struct ipr_cmnd *ipr_cmd;
+ struct ipr_hrr_queue *hrrq;
+ int hrrq_id;
+
+ hrrq_id = ipr_get_hrrq_index(ioa_cfg);
+ hrrq = &ioa_cfg->hrrq[hrrq_id];
+
+ qc->lldd_task = NULL;
+ spin_lock(&hrrq->_lock);
+ if (unlikely(hrrq->ioa_is_dead)) {
+ spin_unlock(&hrrq->_lock);
+ return 0;
+ }
+
+ if (unlikely(!hrrq->allow_cmds)) {
+ spin_unlock(&hrrq->_lock);
+ return ATA_DEFER_LINK;
+ }
+
+ ipr_cmd = __ipr_get_free_ipr_cmnd(hrrq);
+ if (ipr_cmd == NULL) {
+ spin_unlock(&hrrq->_lock);
+ return ATA_DEFER_LINK;
+ }
+
+ qc->lldd_task = ipr_cmd;
+ spin_unlock(&hrrq->_lock);
+ return 0;
+}
+
+/**
* ipr_qc_issue - Issue a SATA qc to a device
* @qc: queued command
*
@@ -6260,10 +6560,23 @@ static unsigned int ipr_qc_issue(struct ata_queued_cmd *qc)
struct ipr_ioarcb *ioarcb;
struct ipr_ioarcb_ata_regs *regs;
- if (unlikely(!ioa_cfg->allow_cmds || ioa_cfg->ioa_is_dead))
+ if (qc->lldd_task == NULL)
+ ipr_qc_defer(qc);
+
+ ipr_cmd = qc->lldd_task;
+ if (ipr_cmd == NULL)
return AC_ERR_SYSTEM;
- ipr_cmd = ipr_get_free_ipr_cmnd(ioa_cfg);
+ qc->lldd_task = NULL;
+ spin_lock(&ipr_cmd->hrrq->_lock);
+ if (unlikely(!ipr_cmd->hrrq->allow_cmds ||
+ ipr_cmd->hrrq->ioa_is_dead)) {
+ list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
+ spin_unlock(&ipr_cmd->hrrq->_lock);
+ return AC_ERR_SYSTEM;
+ }
+
+ ipr_init_ipr_cmnd(ipr_cmd, ipr_lock_and_done);
ioarcb = &ipr_cmd->ioarcb;
if (ioa_cfg->sis64) {
@@ -6275,7 +6588,7 @@ static unsigned int ipr_qc_issue(struct ata_queued_cmd *qc)
memset(regs, 0, sizeof(*regs));
ioarcb->add_cmd_parms_len = cpu_to_be16(sizeof(*regs));
- list_add_tail(&ipr_cmd->queue, &ioa_cfg->pending_q);
+ list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_pending_q);
ipr_cmd->qc = qc;
ipr_cmd->done = ipr_sata_done;
ipr_cmd->ioarcb.res_handle = res->res_handle;
@@ -6315,10 +6628,12 @@ static unsigned int ipr_qc_issue(struct ata_queued_cmd *qc)
default:
WARN_ON(1);
+ spin_unlock(&ipr_cmd->hrrq->_lock);
return AC_ERR_INVALID;
}
ipr_send_command(ipr_cmd);
+ spin_unlock(&ipr_cmd->hrrq->_lock);
return 0;
}
@@ -6357,6 +6672,7 @@ static struct ata_port_operations ipr_sata_ops = {
.hardreset = ipr_sata_reset,
.post_internal_cmd = ipr_ata_post_internal,
.qc_prep = ata_noop_qc_prep,
+ .qc_defer = ipr_qc_defer,
.qc_issue = ipr_qc_issue,
.qc_fill_rtf = ipr_qc_fill_rtf,
.port_start = ata_sas_port_start,
@@ -6425,14 +6741,17 @@ static int ipr_ioa_bringdown_done(struct ipr_cmnd *ipr_cmd)
struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
ENTER;
+ if (!ioa_cfg->hrrq[IPR_INIT_HRRQ].removing_ioa) {
+ ipr_trace;
+ spin_unlock_irq(ioa_cfg->host->host_lock);
+ scsi_unblock_requests(ioa_cfg->host);
+ spin_lock_irq(ioa_cfg->host->host_lock);
+ }
+
ioa_cfg->in_reset_reload = 0;
ioa_cfg->reset_retries = 0;
- list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
+ list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
wake_up_all(&ioa_cfg->reset_wait_q);
-
- spin_unlock_irq(ioa_cfg->host->host_lock);
- scsi_unblock_requests(ioa_cfg->host);
- spin_lock_irq(ioa_cfg->host->host_lock);
LEAVE;
return IPR_RC_JOB_RETURN;
@@ -6454,11 +6773,16 @@ static int ipr_ioa_reset_done(struct ipr_cmnd *ipr_cmd)
struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
struct ipr_resource_entry *res;
struct ipr_hostrcb *hostrcb, *temp;
- int i = 0;
+ int i = 0, j;
ENTER;
ioa_cfg->in_reset_reload = 0;
- ioa_cfg->allow_cmds = 1;
+ for (j = 0; j < ioa_cfg->hrrq_num; j++) {
+ spin_lock(&ioa_cfg->hrrq[j]._lock);
+ ioa_cfg->hrrq[j].allow_cmds = 1;
+ spin_unlock(&ioa_cfg->hrrq[j]._lock);
+ }
+ wmb();
ioa_cfg->reset_cmd = NULL;
ioa_cfg->doorbell |= IPR_RUNTIME_RESET;
@@ -6482,14 +6806,14 @@ static int ipr_ioa_reset_done(struct ipr_cmnd *ipr_cmd)
dev_info(&ioa_cfg->pdev->dev, "IOA initialized.\n");
ioa_cfg->reset_retries = 0;
- list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
+ list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
wake_up_all(&ioa_cfg->reset_wait_q);
spin_unlock(ioa_cfg->host->host_lock);
scsi_unblock_requests(ioa_cfg->host);
spin_lock(ioa_cfg->host->host_lock);
- if (!ioa_cfg->allow_cmds)
+ if (!ioa_cfg->hrrq[IPR_INIT_HRRQ].allow_cmds)
scsi_block_requests(ioa_cfg->host);
LEAVE;
@@ -6560,9 +6884,11 @@ static int ipr_set_supported_devs(struct ipr_cmnd *ipr_cmd)
if (!ioa_cfg->sis64)
ipr_cmd->job_step = ipr_set_supported_devs;
+ LEAVE;
return IPR_RC_JOB_RETURN;
}
+ LEAVE;
return IPR_RC_JOB_CONTINUE;
}
@@ -6820,7 +7146,7 @@ static int ipr_reset_cmd_failed(struct ipr_cmnd *ipr_cmd)
ipr_cmd->ioarcb.cmd_pkt.cdb[0], ioasc);
ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
- list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
+ list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
return IPR_RC_JOB_RETURN;
}
@@ -7278,46 +7604,71 @@ static int ipr_ioafp_identify_hrrq(struct ipr_cmnd *ipr_cmd)
{
struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
+ struct ipr_hrr_queue *hrrq;
ENTER;
+ ipr_cmd->job_step = ipr_ioafp_std_inquiry;
dev_info(&ioa_cfg->pdev->dev, "Starting IOA initialization sequence.\n");
- ioarcb->cmd_pkt.cdb[0] = IPR_ID_HOST_RR_Q;
- ioarcb->res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
+ if (ioa_cfg->identify_hrrq_index < ioa_cfg->hrrq_num) {
+ hrrq = &ioa_cfg->hrrq[ioa_cfg->identify_hrrq_index];
- ioarcb->cmd_pkt.request_type = IPR_RQTYPE_IOACMD;
- if (ioa_cfg->sis64)
- ioarcb->cmd_pkt.cdb[1] = 0x1;
- ioarcb->cmd_pkt.cdb[2] =
- ((u64) ioa_cfg->host_rrq_dma >> 24) & 0xff;
- ioarcb->cmd_pkt.cdb[3] =
- ((u64) ioa_cfg->host_rrq_dma >> 16) & 0xff;
- ioarcb->cmd_pkt.cdb[4] =
- ((u64) ioa_cfg->host_rrq_dma >> 8) & 0xff;
- ioarcb->cmd_pkt.cdb[5] =
- ((u64) ioa_cfg->host_rrq_dma) & 0xff;
- ioarcb->cmd_pkt.cdb[7] =
- ((sizeof(u32) * IPR_NUM_CMD_BLKS) >> 8) & 0xff;
- ioarcb->cmd_pkt.cdb[8] =
- (sizeof(u32) * IPR_NUM_CMD_BLKS) & 0xff;
+ ioarcb->cmd_pkt.cdb[0] = IPR_ID_HOST_RR_Q;
+ ioarcb->res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
- if (ioa_cfg->sis64) {
- ioarcb->cmd_pkt.cdb[10] =
- ((u64) ioa_cfg->host_rrq_dma >> 56) & 0xff;
- ioarcb->cmd_pkt.cdb[11] =
- ((u64) ioa_cfg->host_rrq_dma >> 48) & 0xff;
- ioarcb->cmd_pkt.cdb[12] =
- ((u64) ioa_cfg->host_rrq_dma >> 40) & 0xff;
- ioarcb->cmd_pkt.cdb[13] =
- ((u64) ioa_cfg->host_rrq_dma >> 32) & 0xff;
- }
+ ioarcb->cmd_pkt.request_type = IPR_RQTYPE_IOACMD;
+ if (ioa_cfg->sis64)
+ ioarcb->cmd_pkt.cdb[1] = 0x1;
- ipr_cmd->job_step = ipr_ioafp_std_inquiry;
+ if (ioa_cfg->nvectors == 1)
+ ioarcb->cmd_pkt.cdb[1] &= ~IPR_ID_HRRQ_SELE_ENABLE;
+ else
+ ioarcb->cmd_pkt.cdb[1] |= IPR_ID_HRRQ_SELE_ENABLE;
+
+ ioarcb->cmd_pkt.cdb[2] =
+ ((u64) hrrq->host_rrq_dma >> 24) & 0xff;
+ ioarcb->cmd_pkt.cdb[3] =
+ ((u64) hrrq->host_rrq_dma >> 16) & 0xff;
+ ioarcb->cmd_pkt.cdb[4] =
+ ((u64) hrrq->host_rrq_dma >> 8) & 0xff;
+ ioarcb->cmd_pkt.cdb[5] =
+ ((u64) hrrq->host_rrq_dma) & 0xff;
+ ioarcb->cmd_pkt.cdb[7] =
+ ((sizeof(u32) * hrrq->size) >> 8) & 0xff;
+ ioarcb->cmd_pkt.cdb[8] =
+ (sizeof(u32) * hrrq->size) & 0xff;
+
+ if (ioarcb->cmd_pkt.cdb[1] & IPR_ID_HRRQ_SELE_ENABLE)
+ ioarcb->cmd_pkt.cdb[9] =
+ ioa_cfg->identify_hrrq_index;
- ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout, IPR_INTERNAL_TIMEOUT);
+ if (ioa_cfg->sis64) {
+ ioarcb->cmd_pkt.cdb[10] =
+ ((u64) hrrq->host_rrq_dma >> 56) & 0xff;
+ ioarcb->cmd_pkt.cdb[11] =
+ ((u64) hrrq->host_rrq_dma >> 48) & 0xff;
+ ioarcb->cmd_pkt.cdb[12] =
+ ((u64) hrrq->host_rrq_dma >> 40) & 0xff;
+ ioarcb->cmd_pkt.cdb[13] =
+ ((u64) hrrq->host_rrq_dma >> 32) & 0xff;
+ }
+
+ if (ioarcb->cmd_pkt.cdb[1] & IPR_ID_HRRQ_SELE_ENABLE)
+ ioarcb->cmd_pkt.cdb[14] =
+ ioa_cfg->identify_hrrq_index;
+
+ ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout,
+ IPR_INTERNAL_TIMEOUT);
+
+ if (++ioa_cfg->identify_hrrq_index < ioa_cfg->hrrq_num)
+ ipr_cmd->job_step = ipr_ioafp_identify_hrrq;
+
+ LEAVE;
+ return IPR_RC_JOB_RETURN;
+ }
LEAVE;
- return IPR_RC_JOB_RETURN;
+ return IPR_RC_JOB_CONTINUE;
}
/**
@@ -7365,7 +7716,9 @@ static void ipr_reset_timer_done(struct ipr_cmnd *ipr_cmd)
static void ipr_reset_start_timer(struct ipr_cmnd *ipr_cmd,
unsigned long timeout)
{
- list_add_tail(&ipr_cmd->queue, &ipr_cmd->ioa_cfg->pending_q);
+
+ ENTER;
+ list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_pending_q);
ipr_cmd->done = ipr_reset_ioa_job;
ipr_cmd->timer.data = (unsigned long) ipr_cmd;
@@ -7383,13 +7736,26 @@ static void ipr_reset_start_timer(struct ipr_cmnd *ipr_cmd,
**/
static void ipr_init_ioa_mem(struct ipr_ioa_cfg *ioa_cfg)
{
- memset(ioa_cfg->host_rrq, 0, sizeof(u32) * IPR_NUM_CMD_BLKS);
+ struct ipr_hrr_queue *hrrq;
- /* Initialize Host RRQ pointers */
- ioa_cfg->hrrq_start = ioa_cfg->host_rrq;
- ioa_cfg->hrrq_end = &ioa_cfg->host_rrq[IPR_NUM_CMD_BLKS - 1];
- ioa_cfg->hrrq_curr = ioa_cfg->hrrq_start;
- ioa_cfg->toggle_bit = 1;
+ for_each_hrrq(hrrq, ioa_cfg) {
+ spin_lock(&hrrq->_lock);
+ memset(hrrq->host_rrq, 0, sizeof(u32) * hrrq->size);
+
+ /* Initialize Host RRQ pointers */
+ hrrq->hrrq_start = hrrq->host_rrq;
+ hrrq->hrrq_end = &hrrq->host_rrq[hrrq->size - 1];
+ hrrq->hrrq_curr = hrrq->hrrq_start;
+ hrrq->toggle_bit = 1;
+ spin_unlock(&hrrq->_lock);
+ }
+ wmb();
+
+ ioa_cfg->identify_hrrq_index = 0;
+ if (ioa_cfg->hrrq_num == 1)
+ atomic_set(&ioa_cfg->hrrq_index, 0);
+ else
+ atomic_set(&ioa_cfg->hrrq_index, 1);
/* Zero out config table */
memset(ioa_cfg->u.cfg_table, 0, ioa_cfg->cfg_table_size);
@@ -7446,7 +7812,8 @@ static int ipr_reset_next_stage(struct ipr_cmnd *ipr_cmd)
ipr_cmd->timer.function = (void (*)(unsigned long))ipr_oper_timeout;
ipr_cmd->done = ipr_reset_ioa_job;
add_timer(&ipr_cmd->timer);
- list_add_tail(&ipr_cmd->queue, &ioa_cfg->pending_q);
+
+ list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_pending_q);
return IPR_RC_JOB_RETURN;
}
@@ -7466,12 +7833,18 @@ static int ipr_reset_enable_ioa(struct ipr_cmnd *ipr_cmd)
struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
volatile u32 int_reg;
volatile u64 maskval;
+ int i;
ENTER;
ipr_cmd->job_step = ipr_ioafp_identify_hrrq;
ipr_init_ioa_mem(ioa_cfg);
- ioa_cfg->allow_interrupts = 1;
+ for (i = 0; i < ioa_cfg->hrrq_num; i++) {
+ spin_lock(&ioa_cfg->hrrq[i]._lock);
+ ioa_cfg->hrrq[i].allow_interrupts = 1;
+ spin_unlock(&ioa_cfg->hrrq[i]._lock);
+ }
+ wmb();
if (ioa_cfg->sis64) {
/* Set the adapter to the correct endian mode. */
writel(IPR_ENDIAN_SWAP_KEY, ioa_cfg->regs.endian_swap_reg);
@@ -7511,7 +7884,7 @@ static int ipr_reset_enable_ioa(struct ipr_cmnd *ipr_cmd)
ipr_cmd->timer.function = (void (*)(unsigned long))ipr_oper_timeout;
ipr_cmd->done = ipr_reset_ioa_job;
add_timer(&ipr_cmd->timer);
- list_add_tail(&ipr_cmd->queue, &ioa_cfg->pending_q);
+ list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_pending_q);
LEAVE;
return IPR_RC_JOB_RETURN;
@@ -8030,7 +8403,8 @@ static int ipr_reset_shutdown_ioa(struct ipr_cmnd *ipr_cmd)
int rc = IPR_RC_JOB_CONTINUE;
ENTER;
- if (shutdown_type != IPR_SHUTDOWN_NONE && !ioa_cfg->ioa_is_dead) {
+ if (shutdown_type != IPR_SHUTDOWN_NONE &&
+ !ioa_cfg->hrrq[IPR_INIT_HRRQ].ioa_is_dead) {
ipr_cmd->ioarcb.res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
ipr_cmd->ioarcb.cmd_pkt.request_type = IPR_RQTYPE_IOACMD;
ipr_cmd->ioarcb.cmd_pkt.cdb[0] = IPR_IOA_SHUTDOWN;
@@ -8078,7 +8452,8 @@ static void ipr_reset_ioa_job(struct ipr_cmnd *ipr_cmd)
* We are doing nested adapter resets and this is
* not the current reset job.
*/
- list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
+ list_add_tail(&ipr_cmd->queue,
+ &ipr_cmd->hrrq->hrrq_free_q);
return;
}
@@ -8113,10 +8488,17 @@ static void _ipr_initiate_ioa_reset(struct ipr_ioa_cfg *ioa_cfg,
enum ipr_shutdown_type shutdown_type)
{
struct ipr_cmnd *ipr_cmd;
+ int i;
ioa_cfg->in_reset_reload = 1;
- ioa_cfg->allow_cmds = 0;
- scsi_block_requests(ioa_cfg->host);
+ for (i = 0; i < ioa_cfg->hrrq_num; i++) {
+ spin_lock(&ioa_cfg->hrrq[i]._lock);
+ ioa_cfg->hrrq[i].allow_cmds = 0;
+ spin_unlock(&ioa_cfg->hrrq[i]._lock);
+ }
+ wmb();
+ if (!ioa_cfg->hrrq[IPR_INIT_HRRQ].removing_ioa)
+ scsi_block_requests(ioa_cfg->host);
ipr_cmd = ipr_get_free_ipr_cmnd(ioa_cfg);
ioa_cfg->reset_cmd = ipr_cmd;
@@ -8141,7 +8523,9 @@ static void _ipr_initiate_ioa_reset(struct ipr_ioa_cfg *ioa_cfg,
static void ipr_initiate_ioa_reset(struct ipr_ioa_cfg *ioa_cfg,
enum ipr_shutdown_type shutdown_type)
{
- if (ioa_cfg->ioa_is_dead)
+ int i;
+
+ if (ioa_cfg->hrrq[IPR_INIT_HRRQ].ioa_is_dead)
return;
if (ioa_cfg->in_reset_reload) {
@@ -8156,7 +8540,12 @@ static void ipr_initiate_ioa_reset(struct ipr_ioa_cfg *ioa_cfg,
"IOA taken offline - error recovery failed\n");
ioa_cfg->reset_retries = 0;
- ioa_cfg->ioa_is_dead = 1;
+ for (i = 0; i < ioa_cfg->hrrq_num; i++) {
+ spin_lock(&ioa_cfg->hrrq[i]._lock);
+ ioa_cfg->hrrq[i].ioa_is_dead = 1;
+ spin_unlock(&ioa_cfg->hrrq[i]._lock);
+ }
+ wmb();
if (ioa_cfg->in_ioa_bringdown) {
ioa_cfg->reset_cmd = NULL;
@@ -8164,9 +8553,11 @@ static void ipr_initiate_ioa_reset(struct ipr_ioa_cfg *ioa_cfg,
ipr_fail_all_ops(ioa_cfg);
wake_up_all(&ioa_cfg->reset_wait_q);
- spin_unlock_irq(ioa_cfg->host->host_lock);
- scsi_unblock_requests(ioa_cfg->host);
- spin_lock_irq(ioa_cfg->host->host_lock);
+ if (!ioa_cfg->hrrq[IPR_INIT_HRRQ].removing_ioa) {
+ spin_unlock_irq(ioa_cfg->host->host_lock);
+ scsi_unblock_requests(ioa_cfg->host);
+ spin_lock_irq(ioa_cfg->host->host_lock);
+ }
return;
} else {
ioa_cfg->in_ioa_bringdown = 1;
@@ -8188,9 +8579,17 @@ static void ipr_initiate_ioa_reset(struct ipr_ioa_cfg *ioa_cfg,
*/
static int ipr_reset_freeze(struct ipr_cmnd *ipr_cmd)
{
+ struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
+ int i;
+
/* Disallow new interrupts, avoid loop */
- ipr_cmd->ioa_cfg->allow_interrupts = 0;
- list_add_tail(&ipr_cmd->queue, &ipr_cmd->ioa_cfg->pending_q);
+ for (i = 0; i < ioa_cfg->hrrq_num; i++) {
+ spin_lock(&ioa_cfg->hrrq[i]._lock);
+ ioa_cfg->hrrq[i].allow_interrupts = 0;
+ spin_unlock(&ioa_cfg->hrrq[i]._lock);
+ }
+ wmb();
+ list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_pending_q);
ipr_cmd->done = ipr_reset_ioa_job;
return IPR_RC_JOB_RETURN;
}
@@ -8247,13 +8646,19 @@ static void ipr_pci_perm_failure(struct pci_dev *pdev)
{
unsigned long flags = 0;
struct ipr_ioa_cfg *ioa_cfg = pci_get_drvdata(pdev);
+ int i;
spin_lock_irqsave(ioa_cfg->host->host_lock, flags);
if (ioa_cfg->sdt_state == WAIT_FOR_DUMP)
ioa_cfg->sdt_state = ABORT_DUMP;
ioa_cfg->reset_retries = IPR_NUM_RESET_RELOAD_RETRIES;
ioa_cfg->in_ioa_bringdown = 1;
- ioa_cfg->allow_cmds = 0;
+ for (i = 0; i < ioa_cfg->hrrq_num; i++) {
+ spin_lock(&ioa_cfg->hrrq[i]._lock);
+ ioa_cfg->hrrq[i].allow_cmds = 0;
+ spin_unlock(&ioa_cfg->hrrq[i]._lock);
+ }
+ wmb();
ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
}
@@ -8310,12 +8715,11 @@ static int ipr_probe_ioa_part2(struct ipr_ioa_cfg *ioa_cfg)
} else
_ipr_initiate_ioa_reset(ioa_cfg, ipr_reset_enable_ioa,
IPR_SHUTDOWN_NONE);
-
spin_unlock_irqrestore(ioa_cfg->host->host_lock, host_lock_flags);
wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
spin_lock_irqsave(ioa_cfg->host->host_lock, host_lock_flags);
- if (ioa_cfg->ioa_is_dead) {
+ if (ioa_cfg->hrrq[IPR_INIT_HRRQ].ioa_is_dead) {
rc = -EIO;
} else if (ipr_invalid_adapter(ioa_cfg)) {
if (!ipr_testmode)
@@ -8376,8 +8780,13 @@ static void ipr_free_mem(struct ipr_ioa_cfg *ioa_cfg)
pci_free_consistent(ioa_cfg->pdev, sizeof(struct ipr_misc_cbs),
ioa_cfg->vpd_cbs, ioa_cfg->vpd_cbs_dma);
ipr_free_cmd_blks(ioa_cfg);
- pci_free_consistent(ioa_cfg->pdev, sizeof(u32) * IPR_NUM_CMD_BLKS,
- ioa_cfg->host_rrq, ioa_cfg->host_rrq_dma);
+
+ for (i = 0; i < ioa_cfg->hrrq_num; i++)
+ pci_free_consistent(ioa_cfg->pdev,
+ sizeof(u32) * ioa_cfg->hrrq[i].size,
+ ioa_cfg->hrrq[i].host_rrq,
+ ioa_cfg->hrrq[i].host_rrq_dma);
+
pci_free_consistent(ioa_cfg->pdev, ioa_cfg->cfg_table_size,
ioa_cfg->u.cfg_table,
ioa_cfg->cfg_table_dma);
@@ -8408,8 +8817,23 @@ static void ipr_free_all_resources(struct ipr_ioa_cfg *ioa_cfg)
struct pci_dev *pdev = ioa_cfg->pdev;
ENTER;
- free_irq(pdev->irq, ioa_cfg);
- pci_disable_msi(pdev);
+ if (ioa_cfg->intr_flag == IPR_USE_MSI ||
+ ioa_cfg->intr_flag == IPR_USE_MSIX) {
+ int i;
+ for (i = 0; i < ioa_cfg->nvectors; i++)
+ free_irq(ioa_cfg->vectors_info[i].vec,
+ &ioa_cfg->hrrq[i]);
+ } else
+ free_irq(pdev->irq, &ioa_cfg->hrrq[0]);
+
+ if (ioa_cfg->intr_flag == IPR_USE_MSI) {
+ pci_disable_msi(pdev);
+ ioa_cfg->intr_flag &= ~IPR_USE_MSI;
+ } else if (ioa_cfg->intr_flag == IPR_USE_MSIX) {
+ pci_disable_msix(pdev);
+ ioa_cfg->intr_flag &= ~IPR_USE_MSIX;
+ }
+
iounmap(ioa_cfg->hdw_dma_regs);
pci_release_regions(pdev);
ipr_free_mem(ioa_cfg);
@@ -8430,7 +8854,7 @@ static int ipr_alloc_cmd_blks(struct ipr_ioa_cfg *ioa_cfg)
struct ipr_cmnd *ipr_cmd;
struct ipr_ioarcb *ioarcb;
dma_addr_t dma_addr;
- int i;
+ int i, entries_each_hrrq, hrrq_id = 0;
ioa_cfg->ipr_cmd_pool = pci_pool_create(IPR_NAME, ioa_cfg->pdev,
sizeof(struct ipr_cmnd), 512, 0);
@@ -8446,6 +8870,41 @@ static int ipr_alloc_cmd_blks(struct ipr_ioa_cfg *ioa_cfg)
return -ENOMEM;
}
+ for (i = 0; i < ioa_cfg->hrrq_num; i++) {
+ if (ioa_cfg->hrrq_num > 1) {
+ if (i == 0) {
+ entries_each_hrrq = IPR_NUM_INTERNAL_CMD_BLKS;
+ ioa_cfg->hrrq[i].min_cmd_id = 0;
+ ioa_cfg->hrrq[i].max_cmd_id =
+ (entries_each_hrrq - 1);
+ } else {
+ entries_each_hrrq =
+ IPR_NUM_BASE_CMD_BLKS/
+ (ioa_cfg->hrrq_num - 1);
+ ioa_cfg->hrrq[i].min_cmd_id =
+ IPR_NUM_INTERNAL_CMD_BLKS +
+ (i - 1) * entries_each_hrrq;
+ ioa_cfg->hrrq[i].max_cmd_id =
+ (IPR_NUM_INTERNAL_CMD_BLKS +
+ i * entries_each_hrrq - 1);
+ }
+ } else {
+ entries_each_hrrq = IPR_NUM_CMD_BLKS;
+ ioa_cfg->hrrq[i].min_cmd_id = 0;
+ ioa_cfg->hrrq[i].max_cmd_id = (entries_each_hrrq - 1);
+ }
+ ioa_cfg->hrrq[i].size = entries_each_hrrq;
+ }
+
+ BUG_ON(ioa_cfg->hrrq_num == 0);
+
+ i = IPR_NUM_CMD_BLKS -
+ ioa_cfg->hrrq[ioa_cfg->hrrq_num - 1].max_cmd_id - 1;
+ if (i > 0) {
+ ioa_cfg->hrrq[ioa_cfg->hrrq_num - 1].size += i;
+ ioa_cfg->hrrq[ioa_cfg->hrrq_num - 1].max_cmd_id += i;
+ }
+
for (i = 0; i < IPR_NUM_CMD_BLKS; i++) {
ipr_cmd = pci_pool_alloc(ioa_cfg->ipr_cmd_pool, GFP_KERNEL, &dma_addr);
@@ -8484,7 +8943,11 @@ static int ipr_alloc_cmd_blks(struct ipr_ioa_cfg *ioa_cfg)
ipr_cmd->sense_buffer_dma = dma_addr +
offsetof(struct ipr_cmnd, sense_buffer);
- list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
+ ipr_cmd->ioarcb.cmd_pkt.hrrq_id = hrrq_id;
+ ipr_cmd->hrrq = &ioa_cfg->hrrq[hrrq_id];
+ list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
+ if (i >= ioa_cfg->hrrq[hrrq_id].max_cmd_id)
+ hrrq_id++;
}
return 0;
@@ -8516,6 +8979,10 @@ static int ipr_alloc_mem(struct ipr_ioa_cfg *ioa_cfg)
BITS_TO_LONGS(ioa_cfg->max_devs_supported), GFP_KERNEL);
ioa_cfg->vset_ids = kzalloc(sizeof(unsigned long) *
BITS_TO_LONGS(ioa_cfg->max_devs_supported), GFP_KERNEL);
+
+ if (!ioa_cfg->target_ids || !ioa_cfg->array_ids
+ || !ioa_cfg->vset_ids)
+ goto out_free_res_entries;
}
for (i = 0; i < ioa_cfg->max_devs_supported; i++) {
@@ -8530,15 +8997,34 @@ static int ipr_alloc_mem(struct ipr_ioa_cfg *ioa_cfg)
if (!ioa_cfg->vpd_cbs)
goto out_free_res_entries;
+ for (i = 0; i < ioa_cfg->hrrq_num; i++) {
+ INIT_LIST_HEAD(&ioa_cfg->hrrq[i].hrrq_free_q);
+ INIT_LIST_HEAD(&ioa_cfg->hrrq[i].hrrq_pending_q);
+ spin_lock_init(&ioa_cfg->hrrq[i]._lock);
+ if (i == 0)
+ ioa_cfg->hrrq[i].lock = ioa_cfg->host->host_lock;
+ else
+ ioa_cfg->hrrq[i].lock = &ioa_cfg->hrrq[i]._lock;
+ }
+
if (ipr_alloc_cmd_blks(ioa_cfg))
goto out_free_vpd_cbs;
- ioa_cfg->host_rrq = pci_alloc_consistent(ioa_cfg->pdev,
- sizeof(u32) * IPR_NUM_CMD_BLKS,
- &ioa_cfg->host_rrq_dma);
-
- if (!ioa_cfg->host_rrq)
- goto out_ipr_free_cmd_blocks;
+ for (i = 0; i < ioa_cfg->hrrq_num; i++) {
+ ioa_cfg->hrrq[i].host_rrq = pci_alloc_consistent(ioa_cfg->pdev,
+ sizeof(u32) * ioa_cfg->hrrq[i].size,
+ &ioa_cfg->hrrq[i].host_rrq_dma);
+
+ if (!ioa_cfg->hrrq[i].host_rrq) {
+ while (--i > 0)
+ pci_free_consistent(pdev,
+ sizeof(u32) * ioa_cfg->hrrq[i].size,
+ ioa_cfg->hrrq[i].host_rrq,
+ ioa_cfg->hrrq[i].host_rrq_dma);
+ goto out_ipr_free_cmd_blocks;
+ }
+ ioa_cfg->hrrq[i].ioa_cfg = ioa_cfg;
+ }
ioa_cfg->u.cfg_table = pci_alloc_consistent(ioa_cfg->pdev,
ioa_cfg->cfg_table_size,
@@ -8582,8 +9068,12 @@ out_free_hostrcb_dma:
ioa_cfg->u.cfg_table,
ioa_cfg->cfg_table_dma);
out_free_host_rrq:
- pci_free_consistent(pdev, sizeof(u32) * IPR_NUM_CMD_BLKS,
- ioa_cfg->host_rrq, ioa_cfg->host_rrq_dma);
+ for (i = 0; i < ioa_cfg->hrrq_num; i++) {
+ pci_free_consistent(pdev,
+ sizeof(u32) * ioa_cfg->hrrq[i].size,
+ ioa_cfg->hrrq[i].host_rrq,
+ ioa_cfg->hrrq[i].host_rrq_dma);
+ }
out_ipr_free_cmd_blocks:
ipr_free_cmd_blks(ioa_cfg);
out_free_vpd_cbs:
@@ -8591,6 +9081,9 @@ out_free_vpd_cbs:
ioa_cfg->vpd_cbs, ioa_cfg->vpd_cbs_dma);
out_free_res_entries:
kfree(ioa_cfg->res_entries);
+ kfree(ioa_cfg->target_ids);
+ kfree(ioa_cfg->array_ids);
+ kfree(ioa_cfg->vset_ids);
goto out;
}
@@ -8638,15 +9131,11 @@ static void ipr_init_ioa_cfg(struct ipr_ioa_cfg *ioa_cfg,
ioa_cfg->doorbell = IPR_DOORBELL;
sprintf(ioa_cfg->eye_catcher, IPR_EYECATCHER);
sprintf(ioa_cfg->trace_start, IPR_TRACE_START_LABEL);
- sprintf(ioa_cfg->ipr_free_label, IPR_FREEQ_LABEL);
- sprintf(ioa_cfg->ipr_pending_label, IPR_PENDQ_LABEL);
sprintf(ioa_cfg->cfg_table_start, IPR_CFG_TBL_START);
sprintf(ioa_cfg->resource_table_label, IPR_RES_TABLE_LABEL);
sprintf(ioa_cfg->ipr_hcam_label, IPR_HCAM_LABEL);
sprintf(ioa_cfg->ipr_cmd_label, IPR_CMD_LABEL);
- INIT_LIST_HEAD(&ioa_cfg->free_q);
- INIT_LIST_HEAD(&ioa_cfg->pending_q);
INIT_LIST_HEAD(&ioa_cfg->hostrcb_free_q);
INIT_LIST_HEAD(&ioa_cfg->hostrcb_pending_q);
INIT_LIST_HEAD(&ioa_cfg->free_res_q);
@@ -8724,6 +9213,88 @@ ipr_get_chip_info(const struct pci_device_id *dev_id)
return NULL;
}
+static int ipr_enable_msix(struct ipr_ioa_cfg *ioa_cfg)
+{
+ struct msix_entry entries[IPR_MAX_MSIX_VECTORS];
+ int i, err, vectors;
+
+ for (i = 0; i < ARRAY_SIZE(entries); ++i)
+ entries[i].entry = i;
+
+ vectors = ipr_number_of_msix;
+
+ while ((err = pci_enable_msix(ioa_cfg->pdev, entries, vectors)) > 0)
+ vectors = err;
+
+ if (err < 0) {
+ pci_disable_msix(ioa_cfg->pdev);
+ return err;
+ }
+
+ if (!err) {
+ for (i = 0; i < vectors; i++)
+ ioa_cfg->vectors_info[i].vec = entries[i].vector;
+ ioa_cfg->nvectors = vectors;
+ }
+
+ return err;
+}
+
+static int ipr_enable_msi(struct ipr_ioa_cfg *ioa_cfg)
+{
+ int i, err, vectors;
+
+ vectors = ipr_number_of_msix;
+
+ while ((err = pci_enable_msi_block(ioa_cfg->pdev, vectors)) > 0)
+ vectors = err;
+
+ if (err < 0) {
+ pci_disable_msi(ioa_cfg->pdev);
+ return err;
+ }
+
+ if (!err) {
+ for (i = 0; i < vectors; i++)
+ ioa_cfg->vectors_info[i].vec = ioa_cfg->pdev->irq + i;
+ ioa_cfg->nvectors = vectors;
+ }
+
+ return err;
+}
+
+static void name_msi_vectors(struct ipr_ioa_cfg *ioa_cfg)
+{
+ int vec_idx, n = sizeof(ioa_cfg->vectors_info[0].desc) - 1;
+
+ for (vec_idx = 0; vec_idx < ioa_cfg->nvectors; vec_idx++) {
+ snprintf(ioa_cfg->vectors_info[vec_idx].desc, n,
+ "host%d-%d", ioa_cfg->host->host_no, vec_idx);
+ ioa_cfg->vectors_info[vec_idx].
+ desc[strlen(ioa_cfg->vectors_info[vec_idx].desc)] = 0;
+ }
+}
+
+static int ipr_request_other_msi_irqs(struct ipr_ioa_cfg *ioa_cfg)
+{
+ int i, rc;
+
+ for (i = 1; i < ioa_cfg->nvectors; i++) {
+ rc = request_irq(ioa_cfg->vectors_info[i].vec,
+ ipr_isr_mhrrq,
+ 0,
+ ioa_cfg->vectors_info[i].desc,
+ &ioa_cfg->hrrq[i]);
+ if (rc) {
+ while (--i >= 0)
+ free_irq(ioa_cfg->vectors_info[i].vec,
+ &ioa_cfg->hrrq[i]);
+ return rc;
+ }
+ }
+ return 0;
+}
+
/**
* ipr_test_intr - Handle the interrupt generated in ipr_test_msi().
* @pdev: PCI device struct
@@ -8740,6 +9311,7 @@ static irqreturn_t ipr_test_intr(int irq, void *devp)
unsigned long lock_flags = 0;
irqreturn_t rc = IRQ_HANDLED;
+ dev_info(&ioa_cfg->pdev->dev, "Received IRQ : %d\n", irq);
spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
ioa_cfg->msi_received = 1;
@@ -8787,9 +9359,9 @@ static int ipr_test_msi(struct ipr_ioa_cfg *ioa_cfg, struct pci_dev *pdev)
writel(IPR_PCII_IO_DEBUG_ACKNOWLEDGE, ioa_cfg->regs.sense_interrupt_reg32);
int_reg = readl(ioa_cfg->regs.sense_interrupt_reg);
wait_event_timeout(ioa_cfg->msi_wait_q, ioa_cfg->msi_received, HZ);
+ spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
ipr_mask_and_clear_interrupts(ioa_cfg, ~IPR_PCII_IOA_TRANS_TO_OPER);
- spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
if (!ioa_cfg->msi_received) {
/* MSI test failed */
dev_info(&pdev->dev, "MSI test failed. Falling back to LSI.\n");
@@ -8806,8 +9378,7 @@ static int ipr_test_msi(struct ipr_ioa_cfg *ioa_cfg, struct pci_dev *pdev)
return rc;
}
-/**
- * ipr_probe_ioa - Allocates memory and does first stage of initialization
+ /* ipr_probe_ioa - Allocates memory and does first stage of initialization
* @pdev: PCI device struct
* @dev_id: PCI device id struct
*
@@ -8823,6 +9394,7 @@ static int ipr_probe_ioa(struct pci_dev *pdev,
void __iomem *ipr_regs;
int rc = PCIBIOS_SUCCESSFUL;
volatile u32 mask, uproc, interrupts;
+ unsigned long lock_flags;
ENTER;
@@ -8918,17 +9490,56 @@ static int ipr_probe_ioa(struct pci_dev *pdev,
goto cleanup_nomem;
}
- /* Enable MSI style interrupts if they are supported. */
- if (ioa_cfg->ipr_chip->intr_type == IPR_USE_MSI && !pci_enable_msi(pdev)) {
+ if (ipr_number_of_msix > IPR_MAX_MSIX_VECTORS) {
+ dev_err(&pdev->dev, "The max number of MSIX is %d\n",
+ IPR_MAX_MSIX_VECTORS);
+ ipr_number_of_msix = IPR_MAX_MSIX_VECTORS;
+ }
+
+ if (ioa_cfg->ipr_chip->intr_type == IPR_USE_MSI &&
+ ipr_enable_msix(ioa_cfg) == 0)
+ ioa_cfg->intr_flag = IPR_USE_MSIX;
+ else if (ioa_cfg->ipr_chip->intr_type == IPR_USE_MSI &&
+ ipr_enable_msi(ioa_cfg) == 0)
+ ioa_cfg->intr_flag = IPR_USE_MSI;
+ else {
+ ioa_cfg->intr_flag = IPR_USE_LSI;
+ ioa_cfg->nvectors = 1;
+ dev_info(&pdev->dev, "Cannot enable MSI.\n");
+ }
+
+ if (ioa_cfg->intr_flag == IPR_USE_MSI ||
+ ioa_cfg->intr_flag == IPR_USE_MSIX) {
rc = ipr_test_msi(ioa_cfg, pdev);
- if (rc == -EOPNOTSUPP)
- pci_disable_msi(pdev);
+ if (rc == -EOPNOTSUPP) {
+ if (ioa_cfg->intr_flag == IPR_USE_MSI) {
+ ioa_cfg->intr_flag &= ~IPR_USE_MSI;
+ pci_disable_msi(pdev);
+ } else if (ioa_cfg->intr_flag == IPR_USE_MSIX) {
+ ioa_cfg->intr_flag &= ~IPR_USE_MSIX;
+ pci_disable_msix(pdev);
+ }
+
+ ioa_cfg->intr_flag = IPR_USE_LSI;
+ ioa_cfg->nvectors = 1;
+ }
else if (rc)
goto out_msi_disable;
- else
- dev_info(&pdev->dev, "MSI enabled with IRQ: %d\n", pdev->irq);
- } else if (ipr_debug)
- dev_info(&pdev->dev, "Cannot enable MSI.\n");
+ else {
+ if (ioa_cfg->intr_flag == IPR_USE_MSI)
+ dev_info(&pdev->dev,
+ "Request for %d MSIs succeeded with starting IRQ: %d\n",
+ ioa_cfg->nvectors, pdev->irq);
+ else if (ioa_cfg->intr_flag == IPR_USE_MSIX)
+ dev_info(&pdev->dev,
+ "Request for %d MSIXs succeeded.",
+ ioa_cfg->nvectors);
+ }
+ }
+
+ ioa_cfg->hrrq_num = min3(ioa_cfg->nvectors,
+ (unsigned int)num_online_cpus(),
+ (unsigned int)IPR_MAX_HRRQ_NUM);
/* Save away PCI config space for use following IOA reset */
rc = pci_save_state(pdev);
@@ -8975,11 +9586,24 @@ static int ipr_probe_ioa(struct pci_dev *pdev,
if (interrupts & IPR_PCII_IOA_UNIT_CHECKED)
ioa_cfg->ioa_unit_checked = 1;
+ spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
ipr_mask_and_clear_interrupts(ioa_cfg, ~IPR_PCII_IOA_TRANS_TO_OPER);
- rc = request_irq(pdev->irq, ipr_isr,
- ioa_cfg->msi_received ? 0 : IRQF_SHARED,
- IPR_NAME, ioa_cfg);
+ spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
+ if (ioa_cfg->intr_flag == IPR_USE_MSI
+ || ioa_cfg->intr_flag == IPR_USE_MSIX) {
+ name_msi_vectors(ioa_cfg);
+ rc = request_irq(ioa_cfg->vectors_info[0].vec, ipr_isr,
+ 0,
+ ioa_cfg->vectors_info[0].desc,
+ &ioa_cfg->hrrq[0]);
+ if (!rc)
+ rc = ipr_request_other_msi_irqs(ioa_cfg);
+ } else {
+ rc = request_irq(pdev->irq, ipr_isr,
+ IRQF_SHARED,
+ IPR_NAME, &ioa_cfg->hrrq[0]);
+ }
if (rc) {
dev_err(&pdev->dev, "Couldn't register IRQ %d! rc=%d\n",
pdev->irq, rc);
@@ -9004,7 +9628,10 @@ out:
cleanup_nolog:
ipr_free_mem(ioa_cfg);
out_msi_disable:
- pci_disable_msi(pdev);
+ if (ioa_cfg->intr_flag == IPR_USE_MSI)
+ pci_disable_msi(pdev);
+ else if (ioa_cfg->intr_flag == IPR_USE_MSIX)
+ pci_disable_msix(pdev);
cleanup_nomem:
iounmap(ipr_regs);
out_release_regions:
@@ -9074,6 +9701,7 @@ static void __ipr_remove(struct pci_dev *pdev)
{
unsigned long host_lock_flags = 0;
struct ipr_ioa_cfg *ioa_cfg = pci_get_drvdata(pdev);
+ int i;
ENTER;
spin_lock_irqsave(ioa_cfg->host->host_lock, host_lock_flags);
@@ -9083,6 +9711,12 @@ static void __ipr_remove(struct pci_dev *pdev)
spin_lock_irqsave(ioa_cfg->host->host_lock, host_lock_flags);
}
+ for (i = 0; i < ioa_cfg->hrrq_num; i++) {
+ spin_lock(&ioa_cfg->hrrq[i]._lock);
+ ioa_cfg->hrrq[i].removing_ioa = 1;
+ spin_unlock(&ioa_cfg->hrrq[i]._lock);
+ }
+ wmb();
ipr_initiate_ioa_bringdown(ioa_cfg, IPR_SHUTDOWN_NORMAL);
spin_unlock_irqrestore(ioa_cfg->host->host_lock, host_lock_flags);
@@ -9138,7 +9772,7 @@ static void ipr_remove(struct pci_dev *pdev)
static int ipr_probe(struct pci_dev *pdev, const struct pci_device_id *dev_id)
{
struct ipr_ioa_cfg *ioa_cfg;
- int rc;
+ int rc, i;
rc = ipr_probe_ioa(pdev, dev_id);
@@ -9185,6 +9819,17 @@ static int ipr_probe(struct pci_dev *pdev, const struct pci_device_id *dev_id)
scsi_add_device(ioa_cfg->host, IPR_IOA_BUS, IPR_IOA_TARGET, IPR_IOA_LUN);
ioa_cfg->allow_ml_add_del = 1;
ioa_cfg->host->max_channel = IPR_VSET_BUS;
+ ioa_cfg->iopoll_weight = ioa_cfg->chip_cfg->iopoll_weight;
+
+ if (blk_iopoll_enabled && ioa_cfg->iopoll_weight &&
+ ioa_cfg->sis64 && ioa_cfg->nvectors > 1) {
+ for (i = 1; i < ioa_cfg->hrrq_num; i++) {
+ blk_iopoll_init(&ioa_cfg->hrrq[i].iopoll,
+ ioa_cfg->iopoll_weight, ipr_iopoll);
+ blk_iopoll_enable(&ioa_cfg->hrrq[i].iopoll);
+ }
+ }
+
schedule_work(&ioa_cfg->work_q);
return 0;
}
@@ -9203,8 +9848,16 @@ static void ipr_shutdown(struct pci_dev *pdev)
{
struct ipr_ioa_cfg *ioa_cfg = pci_get_drvdata(pdev);
unsigned long lock_flags = 0;
+ int i;
spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
+ if (blk_iopoll_enabled && ioa_cfg->iopoll_weight &&
+ ioa_cfg->sis64 && ioa_cfg->nvectors > 1) {
+ ioa_cfg->iopoll_weight = 0;
+ for (i = 1; i < ioa_cfg->hrrq_num; i++)
+ blk_iopoll_disable(&ioa_cfg->hrrq[i].iopoll);
+ }
+
while (ioa_cfg->in_reset_reload) {
spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
@@ -9277,6 +9930,8 @@ static struct pci_device_id ipr_pci_table[] = {
{ PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROC_FPGA_E2,
PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57B2, 0, 0, 0 },
{ PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROC_FPGA_E2,
+ PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57C0, 0, 0, 0 },
+ { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROC_FPGA_E2,
PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57C3, 0, 0, 0 },
{ PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROC_FPGA_E2,
PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57C4, 0, 0, 0 },
@@ -9290,6 +9945,14 @@ static struct pci_device_id ipr_pci_table[] = {
PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57C8, 0, 0, 0 },
{ PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57CE, 0, 0, 0 },
+ { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
+ PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57D5, 0, 0, 0 },
+ { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
+ PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57D6, 0, 0, 0 },
+ { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
+ PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57D7, 0, 0, 0 },
+ { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
+ PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57D8, 0, 0, 0 },
{ }
};
MODULE_DEVICE_TABLE(pci, ipr_pci_table);
@@ -9316,9 +9979,7 @@ static struct pci_driver ipr_driver = {
**/
static void ipr_halt_done(struct ipr_cmnd *ipr_cmd)
{
- struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
-
- list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
+ list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
}
/**
@@ -9340,7 +10001,7 @@ static int ipr_halt(struct notifier_block *nb, ulong event, void *buf)
list_for_each_entry(ioa_cfg, &ipr_ioa_head, queue) {
spin_lock_irqsave(ioa_cfg->host->host_lock, flags);
- if (!ioa_cfg->allow_cmds) {
+ if (!ioa_cfg->hrrq[IPR_INIT_HRRQ].allow_cmds) {
spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
continue;
}
diff --git a/drivers/scsi/ipr.h b/drivers/scsi/ipr.h
index c8a137f83bb1..21a6ff1ed5c6 100644
--- a/drivers/scsi/ipr.h
+++ b/drivers/scsi/ipr.h
@@ -32,14 +32,15 @@
#include <linux/libata.h>
#include <linux/list.h>
#include <linux/kref.h>
+#include <linux/blk-iopoll.h>
#include <scsi/scsi.h>
#include <scsi/scsi_cmnd.h>
/*
* Literals
*/
-#define IPR_DRIVER_VERSION "2.5.4"
-#define IPR_DRIVER_DATE "(July 11, 2012)"
+#define IPR_DRIVER_VERSION "2.6.0"
+#define IPR_DRIVER_DATE "(November 16, 2012)"
/*
* IPR_MAX_CMD_PER_LUN: This defines the maximum number of outstanding
@@ -82,6 +83,7 @@
#define IPR_SUBS_DEV_ID_57B4 0x033B
#define IPR_SUBS_DEV_ID_57B2 0x035F
+#define IPR_SUBS_DEV_ID_57C0 0x0352
#define IPR_SUBS_DEV_ID_57C3 0x0353
#define IPR_SUBS_DEV_ID_57C4 0x0354
#define IPR_SUBS_DEV_ID_57C6 0x0357
@@ -94,6 +96,10 @@
#define IPR_SUBS_DEV_ID_574D 0x0356
#define IPR_SUBS_DEV_ID_57C8 0x035D
+#define IPR_SUBS_DEV_ID_57D5 0x03FB
+#define IPR_SUBS_DEV_ID_57D6 0x03FC
+#define IPR_SUBS_DEV_ID_57D7 0x03FF
+#define IPR_SUBS_DEV_ID_57D8 0x03FE
#define IPR_NAME "ipr"
/*
@@ -298,6 +304,9 @@ IPR_PCII_NO_HOST_RRQ | IPR_PCII_IOARRIN_LOST | IPR_PCII_MMIO_ERROR)
* Misc literals
*/
#define IPR_NUM_IOADL_ENTRIES IPR_MAX_SGLIST
+#define IPR_MAX_MSIX_VECTORS 0x5
+#define IPR_MAX_HRRQ_NUM 0x10
+#define IPR_INIT_HRRQ 0x0
/*
* Adapter interface types
@@ -404,7 +413,7 @@ struct ipr_config_table_entry64 {
__be64 dev_id;
__be64 lun;
__be64 lun_wwn[2];
-#define IPR_MAX_RES_PATH_LENGTH 24
+#define IPR_MAX_RES_PATH_LENGTH 48
__be64 res_path;
struct ipr_std_inq_data std_inq_data;
u8 reserved2[4];
@@ -459,9 +468,40 @@ struct ipr_supported_device {
u8 reserved2[16];
}__attribute__((packed, aligned (4)));
+struct ipr_hrr_queue {
+ struct ipr_ioa_cfg *ioa_cfg;
+ __be32 *host_rrq;
+ dma_addr_t host_rrq_dma;
+#define IPR_HRRQ_REQ_RESP_HANDLE_MASK 0xfffffffc
+#define IPR_HRRQ_RESP_BIT_SET 0x00000002
+#define IPR_HRRQ_TOGGLE_BIT 0x00000001
+#define IPR_HRRQ_REQ_RESP_HANDLE_SHIFT 2
+#define IPR_ID_HRRQ_SELE_ENABLE 0x02
+ volatile __be32 *hrrq_start;
+ volatile __be32 *hrrq_end;
+ volatile __be32 *hrrq_curr;
+
+ struct list_head hrrq_free_q;
+ struct list_head hrrq_pending_q;
+ spinlock_t _lock;
+ spinlock_t *lock;
+
+ volatile u32 toggle_bit;
+ u32 size;
+ u32 min_cmd_id;
+ u32 max_cmd_id;
+ u8 allow_interrupts:1;
+ u8 ioa_is_dead:1;
+ u8 allow_cmds:1;
+ u8 removing_ioa:1;
+
+ struct blk_iopoll iopoll;
+};
+
/* Command packet structure */
struct ipr_cmd_pkt {
- __be16 reserved; /* Reserved by IOA */
+ u8 reserved; /* Reserved by IOA */
+ u8 hrrq_id;
u8 request_type;
#define IPR_RQTYPE_SCSICDB 0x00
#define IPR_RQTYPE_IOACMD 0x01
@@ -1022,6 +1062,10 @@ struct ipr_hostrcb64_fabric_desc {
struct ipr_hostrcb64_config_element elem[1];
}__attribute__((packed, aligned (8)));
+#define for_each_hrrq(hrrq, ioa_cfg) \
+ for (hrrq = (ioa_cfg)->hrrq; \
+ hrrq < ((ioa_cfg)->hrrq + (ioa_cfg)->hrrq_num); hrrq++)
+
#define for_each_fabric_cfg(fabric, cfg) \
for (cfg = (fabric)->elem; \
cfg < ((fabric)->elem + be16_to_cpu((fabric)->num_entries)); \
@@ -1308,6 +1352,7 @@ struct ipr_chip_cfg_t {
u16 max_cmds;
u8 cache_line_size;
u8 clear_isr;
+ u32 iopoll_weight;
struct ipr_interrupt_offsets regs;
};
@@ -1317,6 +1362,7 @@ struct ipr_chip_t {
u16 intr_type;
#define IPR_USE_LSI 0x00
#define IPR_USE_MSI 0x01
+#define IPR_USE_MSIX 0x02
u16 sis_type;
#define IPR_SIS32 0x00
#define IPR_SIS64 0x01
@@ -1375,13 +1421,10 @@ struct ipr_ioa_cfg {
struct list_head queue;
- u8 allow_interrupts:1;
u8 in_reset_reload:1;
u8 in_ioa_bringdown:1;
u8 ioa_unit_checked:1;
- u8 ioa_is_dead:1;
u8 dump_taken:1;
- u8 allow_cmds:1;
u8 allow_ml_add_del:1;
u8 needs_hard_reset:1;
u8 dual_raid:1;
@@ -1413,21 +1456,7 @@ struct ipr_ioa_cfg {
char trace_start[8];
#define IPR_TRACE_START_LABEL "trace"
struct ipr_trace_entry *trace;
- u32 trace_index:IPR_NUM_TRACE_INDEX_BITS;
-
- /*
- * Queue for free command blocks
- */
- char ipr_free_label[8];
-#define IPR_FREEQ_LABEL "free-q"
- struct list_head free_q;
-
- /*
- * Queue for command blocks outstanding to the adapter
- */
- char ipr_pending_label[8];
-#define IPR_PENDQ_LABEL "pend-q"
- struct list_head pending_q;
+ atomic_t trace_index;
char cfg_table_start[8];
#define IPR_CFG_TBL_START "cfg"
@@ -1452,16 +1481,10 @@ struct ipr_ioa_cfg {
struct list_head hostrcb_free_q;
struct list_head hostrcb_pending_q;
- __be32 *host_rrq;
- dma_addr_t host_rrq_dma;
-#define IPR_HRRQ_REQ_RESP_HANDLE_MASK 0xfffffffc
-#define IPR_HRRQ_RESP_BIT_SET 0x00000002
-#define IPR_HRRQ_TOGGLE_BIT 0x00000001
-#define IPR_HRRQ_REQ_RESP_HANDLE_SHIFT 2
- volatile __be32 *hrrq_start;
- volatile __be32 *hrrq_end;
- volatile __be32 *hrrq_curr;
- volatile u32 toggle_bit;
+ struct ipr_hrr_queue hrrq[IPR_MAX_HRRQ_NUM];
+ u32 hrrq_num;
+ atomic_t hrrq_index;
+ u16 identify_hrrq_index;
struct ipr_bus_attributes bus_attr[IPR_MAX_NUM_BUSES];
@@ -1507,6 +1530,17 @@ struct ipr_ioa_cfg {
u32 max_cmds;
struct ipr_cmnd **ipr_cmnd_list;
dma_addr_t *ipr_cmnd_list_dma;
+
+ u16 intr_flag;
+ unsigned int nvectors;
+
+ struct {
+ unsigned short vec;
+ char desc[22];
+ } vectors_info[IPR_MAX_MSIX_VECTORS];
+
+ u32 iopoll_weight;
+
}; /* struct ipr_ioa_cfg */
struct ipr_cmnd {
@@ -1544,6 +1578,7 @@ struct ipr_cmnd {
struct scsi_device *sdev;
} u;
+ struct ipr_hrr_queue *hrrq;
struct ipr_ioa_cfg *ioa_cfg;
};
@@ -1717,7 +1752,8 @@ struct ipr_ucode_image_header {
if (ipr_is_device(hostrcb)) { \
if ((hostrcb)->ioa_cfg->sis64) { \
printk(KERN_ERR IPR_NAME ": %s: " fmt, \
- ipr_format_res_path(hostrcb->hcam.u.error64.fd_res_path, \
+ ipr_format_res_path(hostrcb->ioa_cfg, \
+ hostrcb->hcam.u.error64.fd_res_path, \
hostrcb->rp_buffer, \
sizeof(hostrcb->rp_buffer)), \
__VA_ARGS__); \
diff --git a/drivers/scsi/libfc/fc_fcp.c b/drivers/scsi/libfc/fc_fcp.c
index fcb9d0b20ee4..09c81b2f2169 100644
--- a/drivers/scsi/libfc/fc_fcp.c
+++ b/drivers/scsi/libfc/fc_fcp.c
@@ -1381,10 +1381,10 @@ static void fc_fcp_timeout(unsigned long data)
fsp->state |= FC_SRB_FCP_PROCESSING_TMO;
- if (fsp->state & FC_SRB_RCV_STATUS)
- fc_fcp_complete_locked(fsp);
- else if (rpriv->flags & FC_RP_FLAGS_REC_SUPPORTED)
+ if (rpriv->flags & FC_RP_FLAGS_REC_SUPPORTED)
fc_fcp_rec(fsp);
+ else if (fsp->state & FC_SRB_RCV_STATUS)
+ fc_fcp_complete_locked(fsp);
else
fc_fcp_recovery(fsp, FC_TIMED_OUT);
fsp->state &= ~FC_SRB_FCP_PROCESSING_TMO;
diff --git a/drivers/scsi/libfc/fc_libfc.h b/drivers/scsi/libfc/fc_libfc.h
index c2830cc66d6a..b74189d89322 100644
--- a/drivers/scsi/libfc/fc_libfc.h
+++ b/drivers/scsi/libfc/fc_libfc.h
@@ -41,25 +41,25 @@ extern unsigned int fc_debug_logging;
#define FC_LIBFC_DBG(fmt, args...) \
FC_CHECK_LOGGING(FC_LIBFC_LOGGING, \
- printk(KERN_INFO "libfc: " fmt, ##args))
+ pr_info("libfc: " fmt, ##args))
#define FC_LPORT_DBG(lport, fmt, args...) \
FC_CHECK_LOGGING(FC_LPORT_LOGGING, \
- printk(KERN_INFO "host%u: lport %6.6x: " fmt, \
- (lport)->host->host_no, \
- (lport)->port_id, ##args))
+ pr_info("host%u: lport %6.6x: " fmt, \
+ (lport)->host->host_no, \
+ (lport)->port_id, ##args))
-#define FC_DISC_DBG(disc, fmt, args...) \
- FC_CHECK_LOGGING(FC_DISC_LOGGING, \
- printk(KERN_INFO "host%u: disc: " fmt, \
- fc_disc_lport(disc)->host->host_no, \
- ##args))
+#define FC_DISC_DBG(disc, fmt, args...) \
+ FC_CHECK_LOGGING(FC_DISC_LOGGING, \
+ pr_info("host%u: disc: " fmt, \
+ fc_disc_lport(disc)->host->host_no, \
+ ##args))
#define FC_RPORT_ID_DBG(lport, port_id, fmt, args...) \
FC_CHECK_LOGGING(FC_RPORT_LOGGING, \
- printk(KERN_INFO "host%u: rport %6.6x: " fmt, \
- (lport)->host->host_no, \
- (port_id), ##args))
+ pr_info("host%u: rport %6.6x: " fmt, \
+ (lport)->host->host_no, \
+ (port_id), ##args))
#define FC_RPORT_DBG(rdata, fmt, args...) \
FC_RPORT_ID_DBG((rdata)->local_port, (rdata)->ids.port_id, fmt, ##args)
@@ -70,13 +70,13 @@ extern unsigned int fc_debug_logging;
if ((pkt)->seq_ptr) { \
struct fc_exch *_ep = NULL; \
_ep = fc_seq_exch((pkt)->seq_ptr); \
- printk(KERN_INFO "host%u: fcp: %6.6x: " \
+ pr_info("host%u: fcp: %6.6x: " \
"xid %04x-%04x: " fmt, \
(pkt)->lp->host->host_no, \
(pkt)->rport->port_id, \
(_ep)->oxid, (_ep)->rxid, ##args); \
} else { \
- printk(KERN_INFO "host%u: fcp: %6.6x: " fmt, \
+ pr_info("host%u: fcp: %6.6x: " fmt, \
(pkt)->lp->host->host_no, \
(pkt)->rport->port_id, ##args); \
} \
@@ -84,14 +84,14 @@ extern unsigned int fc_debug_logging;
#define FC_EXCH_DBG(exch, fmt, args...) \
FC_CHECK_LOGGING(FC_EXCH_LOGGING, \
- printk(KERN_INFO "host%u: xid %4x: " fmt, \
- (exch)->lp->host->host_no, \
- exch->xid, ##args))
+ pr_info("host%u: xid %4x: " fmt, \
+ (exch)->lp->host->host_no, \
+ exch->xid, ##args))
#define FC_SCSI_DBG(lport, fmt, args...) \
FC_CHECK_LOGGING(FC_SCSI_LOGGING, \
- printk(KERN_INFO "host%u: scsi: " fmt, \
- (lport)->host->host_no, ##args))
+ pr_info("host%u: scsi: " fmt, \
+ (lport)->host->host_no, ##args))
/*
* FC-4 Providers.
diff --git a/drivers/scsi/libfc/fc_rport.c b/drivers/scsi/libfc/fc_rport.c
index 83aa1efec875..d518d17e940f 100644
--- a/drivers/scsi/libfc/fc_rport.c
+++ b/drivers/scsi/libfc/fc_rport.c
@@ -582,7 +582,7 @@ static void fc_rport_error(struct fc_rport_priv *rdata, struct fc_frame *fp)
static void fc_rport_error_retry(struct fc_rport_priv *rdata,
struct fc_frame *fp)
{
- unsigned long delay = FC_DEF_E_D_TOV;
+ unsigned long delay = msecs_to_jiffies(FC_DEF_E_D_TOV);
/* make sure this isn't an FC_EX_CLOSED error, never retry those */
if (PTR_ERR(fp) == -FC_EX_CLOSED)
diff --git a/drivers/scsi/lpfc/lpfc.h b/drivers/scsi/lpfc/lpfc.h
index df4c13a5534c..7706c99ec8bb 100644
--- a/drivers/scsi/lpfc/lpfc.h
+++ b/drivers/scsi/lpfc/lpfc.h
@@ -466,11 +466,13 @@ enum intr_type_t {
MSIX,
};
+#define LPFC_CT_CTX_MAX 64
struct unsol_rcv_ct_ctx {
uint32_t ctxt_id;
uint32_t SID;
- uint32_t flags;
-#define UNSOL_VALID 0x00000001
+ uint32_t valid;
+#define UNSOL_INVALID 0
+#define UNSOL_VALID 1
uint16_t oxid;
uint16_t rxid;
};
@@ -750,6 +752,15 @@ struct lpfc_hba {
void __iomem *ctrl_regs_memmap_p;/* Kernel memory mapped address for
PCI BAR2 */
+ void __iomem *pci_bar0_memmap_p; /* Kernel memory mapped address for
+ PCI BAR0 with dual-ULP support */
+ void __iomem *pci_bar2_memmap_p; /* Kernel memory mapped address for
+ PCI BAR2 with dual-ULP support */
+ void __iomem *pci_bar4_memmap_p; /* Kernel memory mapped address for
+ PCI BAR4 with dual-ULP support */
+#define PCI_64BIT_BAR0 0
+#define PCI_64BIT_BAR2 2
+#define PCI_64BIT_BAR4 4
void __iomem *MBslimaddr; /* virtual address for mbox cmds */
void __iomem *HAregaddr; /* virtual address for host attn reg */
void __iomem *CAregaddr; /* virtual address for chip attn reg */
@@ -938,7 +949,7 @@ struct lpfc_hba {
spinlock_t ct_ev_lock; /* synchronize access to ct_ev_waiters */
struct list_head ct_ev_waiters;
- struct unsol_rcv_ct_ctx ct_ctx[64];
+ struct unsol_rcv_ct_ctx ct_ctx[LPFC_CT_CTX_MAX];
uint32_t ctx_idx;
uint8_t menlo_flag; /* menlo generic flags */
diff --git a/drivers/scsi/lpfc/lpfc_bsg.c b/drivers/scsi/lpfc/lpfc_bsg.c
index f7368eb80415..32d5683e6181 100644
--- a/drivers/scsi/lpfc/lpfc_bsg.c
+++ b/drivers/scsi/lpfc/lpfc_bsg.c
@@ -955,9 +955,9 @@ lpfc_bsg_ct_unsol_event(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
spin_lock_irqsave(&phba->ct_ev_lock, flags);
if (phba->sli_rev == LPFC_SLI_REV4) {
evt_dat->immed_dat = phba->ctx_idx;
- phba->ctx_idx = (phba->ctx_idx + 1) % 64;
+ phba->ctx_idx = (phba->ctx_idx + 1) % LPFC_CT_CTX_MAX;
/* Provide warning for over-run of the ct_ctx array */
- if (phba->ct_ctx[evt_dat->immed_dat].flags &
+ if (phba->ct_ctx[evt_dat->immed_dat].valid ==
UNSOL_VALID)
lpfc_printf_log(phba, KERN_WARNING, LOG_ELS,
"2717 CT context array entry "
@@ -973,7 +973,7 @@ lpfc_bsg_ct_unsol_event(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
piocbq->iocb.unsli3.rcvsli3.ox_id;
phba->ct_ctx[evt_dat->immed_dat].SID =
piocbq->iocb.un.rcvels.remoteID;
- phba->ct_ctx[evt_dat->immed_dat].flags = UNSOL_VALID;
+ phba->ct_ctx[evt_dat->immed_dat].valid = UNSOL_VALID;
} else
evt_dat->immed_dat = piocbq->iocb.ulpContext;
@@ -1013,6 +1013,47 @@ error_ct_unsol_exit:
}
/**
+ * lpfc_bsg_ct_unsol_abort - handler ct abort to management plane
+ * @phba: Pointer to HBA context object.
+ * @dmabuf: pointer to a dmabuf that describes the FC sequence
+ *
+ * This function handles abort to the CT command toward management plane
+ * for SLI4 port.
+ *
+ * If the pending context of a CT command to management plane present, clears
+ * such context and returns 1 for handled; otherwise, it returns 0 indicating
+ * no context exists.
+ **/
+int
+lpfc_bsg_ct_unsol_abort(struct lpfc_hba *phba, struct hbq_dmabuf *dmabuf)
+{
+ struct fc_frame_header fc_hdr;
+ struct fc_frame_header *fc_hdr_ptr = &fc_hdr;
+ int ctx_idx, handled = 0;
+ uint16_t oxid, rxid;
+ uint32_t sid;
+
+ memcpy(fc_hdr_ptr, dmabuf->hbuf.virt, sizeof(struct fc_frame_header));
+ sid = sli4_sid_from_fc_hdr(fc_hdr_ptr);
+ oxid = be16_to_cpu(fc_hdr_ptr->fh_ox_id);
+ rxid = be16_to_cpu(fc_hdr_ptr->fh_rx_id);
+
+ for (ctx_idx = 0; ctx_idx < LPFC_CT_CTX_MAX; ctx_idx++) {
+ if (phba->ct_ctx[ctx_idx].valid != UNSOL_VALID)
+ continue;
+ if (phba->ct_ctx[ctx_idx].rxid != rxid)
+ continue;
+ if (phba->ct_ctx[ctx_idx].oxid != oxid)
+ continue;
+ if (phba->ct_ctx[ctx_idx].SID != sid)
+ continue;
+ phba->ct_ctx[ctx_idx].valid = UNSOL_INVALID;
+ handled = 1;
+ }
+ return handled;
+}
+
+/**
* lpfc_bsg_hba_set_event - process a SET_EVENT bsg vendor command
* @job: SET_EVENT fc_bsg_job
**/
@@ -1318,7 +1359,7 @@ lpfc_issue_ct_rsp(struct lpfc_hba *phba, struct fc_bsg_job *job, uint32_t tag,
icmd->ulpClass = CLASS3;
if (phba->sli_rev == LPFC_SLI_REV4) {
/* Do not issue unsol response if oxid not marked as valid */
- if (!(phba->ct_ctx[tag].flags & UNSOL_VALID)) {
+ if (phba->ct_ctx[tag].valid != UNSOL_VALID) {
rc = IOCB_ERROR;
goto issue_ct_rsp_exit;
}
@@ -1352,7 +1393,7 @@ lpfc_issue_ct_rsp(struct lpfc_hba *phba, struct fc_bsg_job *job, uint32_t tag,
phba->sli4_hba.rpi_ids[ndlp->nlp_rpi];
/* The exchange is done, mark the entry as invalid */
- phba->ct_ctx[tag].flags &= ~UNSOL_VALID;
+ phba->ct_ctx[tag].valid = UNSOL_INVALID;
} else
icmd->ulpContext = (ushort) tag;
diff --git a/drivers/scsi/lpfc/lpfc_crtn.h b/drivers/scsi/lpfc/lpfc_crtn.h
index 69d66e3662cb..76ca65dae781 100644
--- a/drivers/scsi/lpfc/lpfc_crtn.h
+++ b/drivers/scsi/lpfc/lpfc_crtn.h
@@ -164,8 +164,7 @@ void lpfc_hb_timeout_handler(struct lpfc_hba *);
void lpfc_ct_unsol_event(struct lpfc_hba *, struct lpfc_sli_ring *,
struct lpfc_iocbq *);
-void lpfc_sli4_ct_abort_unsol_event(struct lpfc_hba *, struct lpfc_sli_ring *,
- struct lpfc_iocbq *);
+int lpfc_ct_handle_unsol_abort(struct lpfc_hba *, struct hbq_dmabuf *);
int lpfc_ns_cmd(struct lpfc_vport *, int, uint8_t, uint32_t);
int lpfc_fdmi_cmd(struct lpfc_vport *, struct lpfc_nodelist *, int);
void lpfc_fdmi_tmo(unsigned long);
@@ -427,6 +426,7 @@ int lpfc_bsg_request(struct fc_bsg_job *);
int lpfc_bsg_timeout(struct fc_bsg_job *);
int lpfc_bsg_ct_unsol_event(struct lpfc_hba *, struct lpfc_sli_ring *,
struct lpfc_iocbq *);
+int lpfc_bsg_ct_unsol_abort(struct lpfc_hba *, struct hbq_dmabuf *);
void __lpfc_sli_ringtx_put(struct lpfc_hba *, struct lpfc_sli_ring *,
struct lpfc_iocbq *);
struct lpfc_iocbq *lpfc_sli_ringtx_get(struct lpfc_hba *,
diff --git a/drivers/scsi/lpfc/lpfc_ct.c b/drivers/scsi/lpfc/lpfc_ct.c
index 65f9fb6862e6..7bff3a19af56 100644
--- a/drivers/scsi/lpfc/lpfc_ct.c
+++ b/drivers/scsi/lpfc/lpfc_ct.c
@@ -164,37 +164,24 @@ lpfc_ct_unsol_event(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
}
/**
- * lpfc_sli4_ct_abort_unsol_event - Default handle for sli4 unsol abort
+ * lpfc_ct_handle_unsol_abort - ct upper level protocol abort handler
* @phba: Pointer to HBA context object.
- * @pring: Pointer to the driver internal I/O ring.
- * @piocbq: Pointer to the IOCBQ.
+ * @dmabuf: pointer to a dmabuf that describes the FC sequence
*
- * This function serves as the default handler for the sli4 unsolicited
- * abort event. It shall be invoked when there is no application interface
- * registered unsolicited abort handler. This handler does nothing but
- * just simply releases the dma buffer used by the unsol abort event.
+ * This function serves as the upper level protocol abort handler for CT
+ * protocol.
+ *
+ * Return 1 if abort has been handled, 0 otherwise.
**/
-void
-lpfc_sli4_ct_abort_unsol_event(struct lpfc_hba *phba,
- struct lpfc_sli_ring *pring,
- struct lpfc_iocbq *piocbq)
+int
+lpfc_ct_handle_unsol_abort(struct lpfc_hba *phba, struct hbq_dmabuf *dmabuf)
{
- IOCB_t *icmd = &piocbq->iocb;
- struct lpfc_dmabuf *bdeBuf;
- uint32_t size;
+ int handled;
- /* Forward abort event to any process registered to receive ct event */
- if (lpfc_bsg_ct_unsol_event(phba, pring, piocbq) == 0)
- return;
+ /* CT upper level goes through BSG */
+ handled = lpfc_bsg_ct_unsol_abort(phba, dmabuf);
- /* If there is no BDE associated with IOCB, there is nothing to do */
- if (icmd->ulpBdeCount == 0)
- return;
- bdeBuf = piocbq->context2;
- piocbq->context2 = NULL;
- size = icmd->un.cont64[0].tus.f.bdeSize;
- lpfc_ct_unsol_buffer(phba, piocbq, bdeBuf, size);
- lpfc_in_buf_free(phba, bdeBuf);
+ return handled;
}
static void
diff --git a/drivers/scsi/lpfc/lpfc_els.c b/drivers/scsi/lpfc/lpfc_els.c
index b9440deaad45..08d156a9094f 100644
--- a/drivers/scsi/lpfc/lpfc_els.c
+++ b/drivers/scsi/lpfc/lpfc_els.c
@@ -3122,6 +3122,13 @@ lpfc_els_retry(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
case IOERR_SEQUENCE_TIMEOUT:
case IOERR_INVALID_RPI:
+ if (cmd == ELS_CMD_PLOGI &&
+ did == NameServer_DID) {
+ /* Continue forever if plogi to */
+ /* the nameserver fails */
+ maxretry = 0;
+ delay = 100;
+ }
retry = 1;
break;
}
@@ -6517,7 +6524,8 @@ lpfc_els_unsol_buffer(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
struct lpfc_nodelist *ndlp;
struct ls_rjt stat;
uint32_t *payload;
- uint32_t cmd, did, newnode, rjt_err = 0;
+ uint32_t cmd, did, newnode;
+ uint8_t rjt_exp, rjt_err = 0;
IOCB_t *icmd = &elsiocb->iocb;
if (!vport || !(elsiocb->context2))
@@ -6606,12 +6614,14 @@ lpfc_els_unsol_buffer(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
/* If Nport discovery is delayed, reject PLOGIs */
if (vport->fc_flag & FC_DISC_DELAYED) {
rjt_err = LSRJT_UNABLE_TPC;
+ rjt_exp = LSEXP_NOTHING_MORE;
break;
}
if (vport->port_state < LPFC_DISC_AUTH) {
if (!(phba->pport->fc_flag & FC_PT2PT) ||
(phba->pport->fc_flag & FC_PT2PT_PLOGI)) {
rjt_err = LSRJT_UNABLE_TPC;
+ rjt_exp = LSEXP_NOTHING_MORE;
break;
}
/* We get here, and drop thru, if we are PT2PT with
@@ -6648,6 +6658,7 @@ lpfc_els_unsol_buffer(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
lpfc_send_els_event(vport, ndlp, payload);
if (vport->port_state < LPFC_DISC_AUTH) {
rjt_err = LSRJT_UNABLE_TPC;
+ rjt_exp = LSEXP_NOTHING_MORE;
break;
}
lpfc_disc_state_machine(vport, ndlp, elsiocb, NLP_EVT_RCV_LOGO);
@@ -6661,6 +6672,7 @@ lpfc_els_unsol_buffer(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
lpfc_send_els_event(vport, ndlp, payload);
if (vport->port_state < LPFC_DISC_AUTH) {
rjt_err = LSRJT_UNABLE_TPC;
+ rjt_exp = LSEXP_NOTHING_MORE;
break;
}
lpfc_disc_state_machine(vport, ndlp, elsiocb, NLP_EVT_RCV_PRLO);
@@ -6680,6 +6692,7 @@ lpfc_els_unsol_buffer(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
phba->fc_stat.elsRcvADISC++;
if (vport->port_state < LPFC_DISC_AUTH) {
rjt_err = LSRJT_UNABLE_TPC;
+ rjt_exp = LSEXP_NOTHING_MORE;
break;
}
lpfc_disc_state_machine(vport, ndlp, elsiocb,
@@ -6693,6 +6706,7 @@ lpfc_els_unsol_buffer(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
phba->fc_stat.elsRcvPDISC++;
if (vport->port_state < LPFC_DISC_AUTH) {
rjt_err = LSRJT_UNABLE_TPC;
+ rjt_exp = LSEXP_NOTHING_MORE;
break;
}
lpfc_disc_state_machine(vport, ndlp, elsiocb,
@@ -6730,6 +6744,7 @@ lpfc_els_unsol_buffer(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
phba->fc_stat.elsRcvPRLI++;
if (vport->port_state < LPFC_DISC_AUTH) {
rjt_err = LSRJT_UNABLE_TPC;
+ rjt_exp = LSEXP_NOTHING_MORE;
break;
}
lpfc_disc_state_machine(vport, ndlp, elsiocb, NLP_EVT_RCV_PRLI);
@@ -6813,6 +6828,11 @@ lpfc_els_unsol_buffer(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
if (newnode)
lpfc_nlp_put(ndlp);
break;
+ case ELS_CMD_REC:
+ /* receive this due to exchange closed */
+ rjt_err = LSRJT_UNABLE_TPC;
+ rjt_exp = LSEXP_INVALID_OX_RX;
+ break;
default:
lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL,
"RCV ELS cmd: cmd:x%x did:x%x/ste:x%x",
@@ -6820,6 +6840,7 @@ lpfc_els_unsol_buffer(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
/* Unsupported ELS command, reject */
rjt_err = LSRJT_CMD_UNSUPPORTED;
+ rjt_exp = LSEXP_NOTHING_MORE;
/* Unknown ELS command <elsCmd> received from NPORT <did> */
lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS,
@@ -6834,7 +6855,7 @@ lpfc_els_unsol_buffer(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
if (rjt_err) {
memset(&stat, 0, sizeof(stat));
stat.un.b.lsRjtRsnCode = rjt_err;
- stat.un.b.lsRjtRsnCodeExp = LSEXP_NOTHING_MORE;
+ stat.un.b.lsRjtRsnCodeExp = rjt_exp;
lpfc_els_rsp_reject(vport, stat.un.lsRjtError, elsiocb, ndlp,
NULL);
}
diff --git a/drivers/scsi/lpfc/lpfc_hw.h b/drivers/scsi/lpfc/lpfc_hw.h
index 7398ca862e97..e8c476031703 100644
--- a/drivers/scsi/lpfc/lpfc_hw.h
+++ b/drivers/scsi/lpfc/lpfc_hw.h
@@ -538,6 +538,7 @@ struct fc_vft_header {
#define ELS_CMD_ECHO 0x10000000
#define ELS_CMD_TEST 0x11000000
#define ELS_CMD_RRQ 0x12000000
+#define ELS_CMD_REC 0x13000000
#define ELS_CMD_PRLI 0x20100014
#define ELS_CMD_PRLO 0x21100014
#define ELS_CMD_PRLO_ACC 0x02100014
@@ -574,6 +575,7 @@ struct fc_vft_header {
#define ELS_CMD_ECHO 0x10
#define ELS_CMD_TEST 0x11
#define ELS_CMD_RRQ 0x12
+#define ELS_CMD_REC 0x13
#define ELS_CMD_PRLI 0x14001020
#define ELS_CMD_PRLO 0x14001021
#define ELS_CMD_PRLO_ACC 0x14001002
diff --git a/drivers/scsi/lpfc/lpfc_hw4.h b/drivers/scsi/lpfc/lpfc_hw4.h
index a47cfbdd05f2..6e93b886cd4d 100644
--- a/drivers/scsi/lpfc/lpfc_hw4.h
+++ b/drivers/scsi/lpfc/lpfc_hw4.h
@@ -106,6 +106,7 @@ struct lpfc_sli_intf {
#define LPFC_SLI4_MB_WORD_COUNT 64
#define LPFC_MAX_MQ_PAGE 8
+#define LPFC_MAX_WQ_PAGE_V0 4
#define LPFC_MAX_WQ_PAGE 8
#define LPFC_MAX_CQ_PAGE 4
#define LPFC_MAX_EQ_PAGE 8
@@ -703,24 +704,41 @@ struct lpfc_register {
* BAR0. The offsets are the same so the driver must account for
* any base address difference.
*/
-#define LPFC_RQ_DOORBELL 0x00A0
-#define lpfc_rq_doorbell_num_posted_SHIFT 16
-#define lpfc_rq_doorbell_num_posted_MASK 0x3FFF
-#define lpfc_rq_doorbell_num_posted_WORD word0
-#define lpfc_rq_doorbell_id_SHIFT 0
-#define lpfc_rq_doorbell_id_MASK 0xFFFF
-#define lpfc_rq_doorbell_id_WORD word0
-
-#define LPFC_WQ_DOORBELL 0x0040
-#define lpfc_wq_doorbell_num_posted_SHIFT 24
-#define lpfc_wq_doorbell_num_posted_MASK 0x00FF
-#define lpfc_wq_doorbell_num_posted_WORD word0
-#define lpfc_wq_doorbell_index_SHIFT 16
-#define lpfc_wq_doorbell_index_MASK 0x00FF
-#define lpfc_wq_doorbell_index_WORD word0
-#define lpfc_wq_doorbell_id_SHIFT 0
-#define lpfc_wq_doorbell_id_MASK 0xFFFF
-#define lpfc_wq_doorbell_id_WORD word0
+#define LPFC_ULP0_RQ_DOORBELL 0x00A0
+#define LPFC_ULP1_RQ_DOORBELL 0x00C0
+#define lpfc_rq_db_list_fm_num_posted_SHIFT 24
+#define lpfc_rq_db_list_fm_num_posted_MASK 0x00FF
+#define lpfc_rq_db_list_fm_num_posted_WORD word0
+#define lpfc_rq_db_list_fm_index_SHIFT 16
+#define lpfc_rq_db_list_fm_index_MASK 0x00FF
+#define lpfc_rq_db_list_fm_index_WORD word0
+#define lpfc_rq_db_list_fm_id_SHIFT 0
+#define lpfc_rq_db_list_fm_id_MASK 0xFFFF
+#define lpfc_rq_db_list_fm_id_WORD word0
+#define lpfc_rq_db_ring_fm_num_posted_SHIFT 16
+#define lpfc_rq_db_ring_fm_num_posted_MASK 0x3FFF
+#define lpfc_rq_db_ring_fm_num_posted_WORD word0
+#define lpfc_rq_db_ring_fm_id_SHIFT 0
+#define lpfc_rq_db_ring_fm_id_MASK 0xFFFF
+#define lpfc_rq_db_ring_fm_id_WORD word0
+
+#define LPFC_ULP0_WQ_DOORBELL 0x0040
+#define LPFC_ULP1_WQ_DOORBELL 0x0060
+#define lpfc_wq_db_list_fm_num_posted_SHIFT 24
+#define lpfc_wq_db_list_fm_num_posted_MASK 0x00FF
+#define lpfc_wq_db_list_fm_num_posted_WORD word0
+#define lpfc_wq_db_list_fm_index_SHIFT 16
+#define lpfc_wq_db_list_fm_index_MASK 0x00FF
+#define lpfc_wq_db_list_fm_index_WORD word0
+#define lpfc_wq_db_list_fm_id_SHIFT 0
+#define lpfc_wq_db_list_fm_id_MASK 0xFFFF
+#define lpfc_wq_db_list_fm_id_WORD word0
+#define lpfc_wq_db_ring_fm_num_posted_SHIFT 16
+#define lpfc_wq_db_ring_fm_num_posted_MASK 0x3FFF
+#define lpfc_wq_db_ring_fm_num_posted_WORD word0
+#define lpfc_wq_db_ring_fm_id_SHIFT 0
+#define lpfc_wq_db_ring_fm_id_MASK 0xFFFF
+#define lpfc_wq_db_ring_fm_id_WORD word0
#define LPFC_EQCQ_DOORBELL 0x0120
#define lpfc_eqcq_doorbell_se_SHIFT 31
@@ -1131,12 +1149,22 @@ struct lpfc_mbx_wq_create {
struct { /* Version 0 Request */
uint32_t word0;
#define lpfc_mbx_wq_create_num_pages_SHIFT 0
-#define lpfc_mbx_wq_create_num_pages_MASK 0x0000FFFF
+#define lpfc_mbx_wq_create_num_pages_MASK 0x000000FF
#define lpfc_mbx_wq_create_num_pages_WORD word0
+#define lpfc_mbx_wq_create_dua_SHIFT 8
+#define lpfc_mbx_wq_create_dua_MASK 0x00000001
+#define lpfc_mbx_wq_create_dua_WORD word0
#define lpfc_mbx_wq_create_cq_id_SHIFT 16
#define lpfc_mbx_wq_create_cq_id_MASK 0x0000FFFF
#define lpfc_mbx_wq_create_cq_id_WORD word0
- struct dma_address page[LPFC_MAX_WQ_PAGE];
+ struct dma_address page[LPFC_MAX_WQ_PAGE_V0];
+ uint32_t word9;
+#define lpfc_mbx_wq_create_bua_SHIFT 0
+#define lpfc_mbx_wq_create_bua_MASK 0x00000001
+#define lpfc_mbx_wq_create_bua_WORD word9
+#define lpfc_mbx_wq_create_ulp_num_SHIFT 8
+#define lpfc_mbx_wq_create_ulp_num_MASK 0x000000FF
+#define lpfc_mbx_wq_create_ulp_num_WORD word9
} request;
struct { /* Version 1 Request */
uint32_t word0; /* Word 0 is the same as in v0 */
@@ -1160,6 +1188,17 @@ struct lpfc_mbx_wq_create {
#define lpfc_mbx_wq_create_q_id_SHIFT 0
#define lpfc_mbx_wq_create_q_id_MASK 0x0000FFFF
#define lpfc_mbx_wq_create_q_id_WORD word0
+ uint32_t doorbell_offset;
+ uint32_t word2;
+#define lpfc_mbx_wq_create_bar_set_SHIFT 0
+#define lpfc_mbx_wq_create_bar_set_MASK 0x0000FFFF
+#define lpfc_mbx_wq_create_bar_set_WORD word2
+#define WQ_PCI_BAR_0_AND_1 0x00
+#define WQ_PCI_BAR_2_AND_3 0x01
+#define WQ_PCI_BAR_4_AND_5 0x02
+#define lpfc_mbx_wq_create_db_format_SHIFT 16
+#define lpfc_mbx_wq_create_db_format_MASK 0x0000FFFF
+#define lpfc_mbx_wq_create_db_format_WORD word2
} response;
} u;
};
@@ -1223,14 +1262,31 @@ struct lpfc_mbx_rq_create {
#define lpfc_mbx_rq_create_num_pages_SHIFT 0
#define lpfc_mbx_rq_create_num_pages_MASK 0x0000FFFF
#define lpfc_mbx_rq_create_num_pages_WORD word0
+#define lpfc_mbx_rq_create_dua_SHIFT 16
+#define lpfc_mbx_rq_create_dua_MASK 0x00000001
+#define lpfc_mbx_rq_create_dua_WORD word0
+#define lpfc_mbx_rq_create_bqu_SHIFT 17
+#define lpfc_mbx_rq_create_bqu_MASK 0x00000001
+#define lpfc_mbx_rq_create_bqu_WORD word0
+#define lpfc_mbx_rq_create_ulp_num_SHIFT 24
+#define lpfc_mbx_rq_create_ulp_num_MASK 0x000000FF
+#define lpfc_mbx_rq_create_ulp_num_WORD word0
struct rq_context context;
struct dma_address page[LPFC_MAX_WQ_PAGE];
} request;
struct {
uint32_t word0;
-#define lpfc_mbx_rq_create_q_id_SHIFT 0
-#define lpfc_mbx_rq_create_q_id_MASK 0x0000FFFF
-#define lpfc_mbx_rq_create_q_id_WORD word0
+#define lpfc_mbx_rq_create_q_id_SHIFT 0
+#define lpfc_mbx_rq_create_q_id_MASK 0x0000FFFF
+#define lpfc_mbx_rq_create_q_id_WORD word0
+ uint32_t doorbell_offset;
+ uint32_t word2;
+#define lpfc_mbx_rq_create_bar_set_SHIFT 0
+#define lpfc_mbx_rq_create_bar_set_MASK 0x0000FFFF
+#define lpfc_mbx_rq_create_bar_set_WORD word2
+#define lpfc_mbx_rq_create_db_format_SHIFT 16
+#define lpfc_mbx_rq_create_db_format_MASK 0x0000FFFF
+#define lpfc_mbx_rq_create_db_format_WORD word2
} response;
} u;
};
@@ -1388,6 +1444,33 @@ struct lpfc_mbx_get_rsrc_extent_info {
} u;
};
+struct lpfc_mbx_query_fw_config {
+ struct mbox_header header;
+ struct {
+ uint32_t config_number;
+#define LPFC_FC_FCOE 0x00000007
+ uint32_t asic_revision;
+ uint32_t physical_port;
+ uint32_t function_mode;
+#define LPFC_FCOE_INI_MODE 0x00000040
+#define LPFC_FCOE_TGT_MODE 0x00000080
+#define LPFC_DUA_MODE 0x00000800
+ uint32_t ulp0_mode;
+#define LPFC_ULP_FCOE_INIT_MODE 0x00000040
+#define LPFC_ULP_FCOE_TGT_MODE 0x00000080
+ uint32_t ulp0_nap_words[12];
+ uint32_t ulp1_mode;
+ uint32_t ulp1_nap_words[12];
+ uint32_t function_capabilities;
+ uint32_t cqid_base;
+ uint32_t cqid_tot;
+ uint32_t eqid_base;
+ uint32_t eqid_tot;
+ uint32_t ulp0_nap2_words[2];
+ uint32_t ulp1_nap2_words[2];
+ } rsp;
+};
+
struct lpfc_id_range {
uint32_t word5;
#define lpfc_mbx_rsrc_id_word4_0_SHIFT 0
@@ -1803,51 +1886,6 @@ struct lpfc_mbx_redisc_fcf_tbl {
#define lpfc_mbx_redisc_fcf_index_WORD word12
};
-struct lpfc_mbx_query_fw_cfg {
- struct mbox_header header;
- uint32_t config_number;
- uint32_t asic_rev;
- uint32_t phys_port;
- uint32_t function_mode;
-/* firmware Function Mode */
-#define lpfc_function_mode_toe_SHIFT 0
-#define lpfc_function_mode_toe_MASK 0x00000001
-#define lpfc_function_mode_toe_WORD function_mode
-#define lpfc_function_mode_nic_SHIFT 1
-#define lpfc_function_mode_nic_MASK 0x00000001
-#define lpfc_function_mode_nic_WORD function_mode
-#define lpfc_function_mode_rdma_SHIFT 2
-#define lpfc_function_mode_rdma_MASK 0x00000001
-#define lpfc_function_mode_rdma_WORD function_mode
-#define lpfc_function_mode_vm_SHIFT 3
-#define lpfc_function_mode_vm_MASK 0x00000001
-#define lpfc_function_mode_vm_WORD function_mode
-#define lpfc_function_mode_iscsi_i_SHIFT 4
-#define lpfc_function_mode_iscsi_i_MASK 0x00000001
-#define lpfc_function_mode_iscsi_i_WORD function_mode
-#define lpfc_function_mode_iscsi_t_SHIFT 5
-#define lpfc_function_mode_iscsi_t_MASK 0x00000001
-#define lpfc_function_mode_iscsi_t_WORD function_mode
-#define lpfc_function_mode_fcoe_i_SHIFT 6
-#define lpfc_function_mode_fcoe_i_MASK 0x00000001
-#define lpfc_function_mode_fcoe_i_WORD function_mode
-#define lpfc_function_mode_fcoe_t_SHIFT 7
-#define lpfc_function_mode_fcoe_t_MASK 0x00000001
-#define lpfc_function_mode_fcoe_t_WORD function_mode
-#define lpfc_function_mode_dal_SHIFT 8
-#define lpfc_function_mode_dal_MASK 0x00000001
-#define lpfc_function_mode_dal_WORD function_mode
-#define lpfc_function_mode_lro_SHIFT 9
-#define lpfc_function_mode_lro_MASK 0x00000001
-#define lpfc_function_mode_lro_WORD function_mode
-#define lpfc_function_mode_flex10_SHIFT 10
-#define lpfc_function_mode_flex10_MASK 0x00000001
-#define lpfc_function_mode_flex10_WORD function_mode
-#define lpfc_function_mode_ncsi_SHIFT 11
-#define lpfc_function_mode_ncsi_MASK 0x00000001
-#define lpfc_function_mode_ncsi_WORD function_mode
-};
-
/* Status field for embedded SLI_CONFIG mailbox command */
#define STATUS_SUCCESS 0x0
#define STATUS_FAILED 0x1
@@ -2965,7 +3003,7 @@ struct lpfc_mqe {
struct lpfc_mbx_read_config rd_config;
struct lpfc_mbx_request_features req_ftrs;
struct lpfc_mbx_post_hdr_tmpl hdr_tmpl;
- struct lpfc_mbx_query_fw_cfg query_fw_cfg;
+ struct lpfc_mbx_query_fw_config query_fw_cfg;
struct lpfc_mbx_supp_pages supp_pages;
struct lpfc_mbx_pc_sli4_params sli4_params;
struct lpfc_mbx_get_sli4_parameters get_sli4_parameters;
diff --git a/drivers/scsi/lpfc/lpfc_init.c b/drivers/scsi/lpfc/lpfc_init.c
index 89ad55807012..314b4f61b9e3 100644
--- a/drivers/scsi/lpfc/lpfc_init.c
+++ b/drivers/scsi/lpfc/lpfc_init.c
@@ -3165,14 +3165,10 @@ destroy_port(struct lpfc_vport *vport)
int
lpfc_get_instance(void)
{
- int instance = 0;
+ int ret;
- /* Assign an unused number */
- if (!idr_pre_get(&lpfc_hba_index, GFP_KERNEL))
- return -1;
- if (idr_get_new(&lpfc_hba_index, NULL, &instance))
- return -1;
- return instance;
+ ret = idr_alloc(&lpfc_hba_index, NULL, 0, 0, GFP_KERNEL);
+ return ret < 0 ? -1 : ret;
}
/**
@@ -6233,9 +6229,11 @@ lpfc_sli4_bar0_register_memmap(struct lpfc_hba *phba, uint32_t if_type)
phba->sli4_hba.conf_regs_memmap_p +
LPFC_CTL_PORT_SEM_OFFSET;
phba->sli4_hba.RQDBregaddr =
- phba->sli4_hba.conf_regs_memmap_p + LPFC_RQ_DOORBELL;
+ phba->sli4_hba.conf_regs_memmap_p +
+ LPFC_ULP0_RQ_DOORBELL;
phba->sli4_hba.WQDBregaddr =
- phba->sli4_hba.conf_regs_memmap_p + LPFC_WQ_DOORBELL;
+ phba->sli4_hba.conf_regs_memmap_p +
+ LPFC_ULP0_WQ_DOORBELL;
phba->sli4_hba.EQCQDBregaddr =
phba->sli4_hba.conf_regs_memmap_p + LPFC_EQCQ_DOORBELL;
phba->sli4_hba.MQDBregaddr =
@@ -6289,9 +6287,11 @@ lpfc_sli4_bar2_register_memmap(struct lpfc_hba *phba, uint32_t vf)
return -ENODEV;
phba->sli4_hba.RQDBregaddr = (phba->sli4_hba.drbl_regs_memmap_p +
- vf * LPFC_VFR_PAGE_SIZE + LPFC_RQ_DOORBELL);
+ vf * LPFC_VFR_PAGE_SIZE +
+ LPFC_ULP0_RQ_DOORBELL);
phba->sli4_hba.WQDBregaddr = (phba->sli4_hba.drbl_regs_memmap_p +
- vf * LPFC_VFR_PAGE_SIZE + LPFC_WQ_DOORBELL);
+ vf * LPFC_VFR_PAGE_SIZE +
+ LPFC_ULP0_WQ_DOORBELL);
phba->sli4_hba.EQCQDBregaddr = (phba->sli4_hba.drbl_regs_memmap_p +
vf * LPFC_VFR_PAGE_SIZE + LPFC_EQCQ_DOORBELL);
phba->sli4_hba.MQDBregaddr = (phba->sli4_hba.drbl_regs_memmap_p +
@@ -6987,6 +6987,19 @@ lpfc_sli4_queue_destroy(struct lpfc_hba *phba)
phba->sli4_hba.fcp_wq = NULL;
}
+ if (phba->pci_bar0_memmap_p) {
+ iounmap(phba->pci_bar0_memmap_p);
+ phba->pci_bar0_memmap_p = NULL;
+ }
+ if (phba->pci_bar2_memmap_p) {
+ iounmap(phba->pci_bar2_memmap_p);
+ phba->pci_bar2_memmap_p = NULL;
+ }
+ if (phba->pci_bar4_memmap_p) {
+ iounmap(phba->pci_bar4_memmap_p);
+ phba->pci_bar4_memmap_p = NULL;
+ }
+
/* Release FCP CQ mapping array */
if (phba->sli4_hba.fcp_cq_map != NULL) {
kfree(phba->sli4_hba.fcp_cq_map);
@@ -7050,6 +7063,53 @@ lpfc_sli4_queue_setup(struct lpfc_hba *phba)
int rc = -ENOMEM;
int fcp_eqidx, fcp_cqidx, fcp_wqidx;
int fcp_cq_index = 0;
+ uint32_t shdr_status, shdr_add_status;
+ union lpfc_sli4_cfg_shdr *shdr;
+ LPFC_MBOXQ_t *mboxq;
+ uint32_t length;
+
+ /* Check for dual-ULP support */
+ mboxq = (LPFC_MBOXQ_t *)mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
+ if (!mboxq) {
+ lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+ "3249 Unable to allocate memory for "
+ "QUERY_FW_CFG mailbox command\n");
+ return -ENOMEM;
+ }
+ length = (sizeof(struct lpfc_mbx_query_fw_config) -
+ sizeof(struct lpfc_sli4_cfg_mhdr));
+ lpfc_sli4_config(phba, mboxq, LPFC_MBOX_SUBSYSTEM_COMMON,
+ LPFC_MBOX_OPCODE_QUERY_FW_CFG,
+ length, LPFC_SLI4_MBX_EMBED);
+
+ rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
+
+ shdr = (union lpfc_sli4_cfg_shdr *)
+ &mboxq->u.mqe.un.sli4_config.header.cfg_shdr;
+ shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
+ shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
+ if (shdr_status || shdr_add_status || rc) {
+ lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+ "3250 QUERY_FW_CFG mailbox failed with status "
+ "x%x add_status x%x, mbx status x%x\n",
+ shdr_status, shdr_add_status, rc);
+ if (rc != MBX_TIMEOUT)
+ mempool_free(mboxq, phba->mbox_mem_pool);
+ rc = -ENXIO;
+ goto out_error;
+ }
+
+ phba->sli4_hba.fw_func_mode =
+ mboxq->u.mqe.un.query_fw_cfg.rsp.function_mode;
+ phba->sli4_hba.ulp0_mode = mboxq->u.mqe.un.query_fw_cfg.rsp.ulp0_mode;
+ phba->sli4_hba.ulp1_mode = mboxq->u.mqe.un.query_fw_cfg.rsp.ulp1_mode;
+ lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
+ "3251 QUERY_FW_CFG: func_mode:x%x, ulp0_mode:x%x, "
+ "ulp1_mode:x%x\n", phba->sli4_hba.fw_func_mode,
+ phba->sli4_hba.ulp0_mode, phba->sli4_hba.ulp1_mode);
+
+ if (rc != MBX_TIMEOUT)
+ mempool_free(mboxq, phba->mbox_mem_pool);
/*
* Set up HBA Event Queues (EQs)
@@ -7664,78 +7724,6 @@ out:
}
/**
- * lpfc_sli4_send_nop_mbox_cmds - Send sli-4 nop mailbox commands
- * @phba: pointer to lpfc hba data structure.
- * @cnt: number of nop mailbox commands to send.
- *
- * This routine is invoked to send a number @cnt of NOP mailbox command and
- * wait for each command to complete.
- *
- * Return: the number of NOP mailbox command completed.
- **/
-static int
-lpfc_sli4_send_nop_mbox_cmds(struct lpfc_hba *phba, uint32_t cnt)
-{
- LPFC_MBOXQ_t *mboxq;
- int length, cmdsent;
- uint32_t mbox_tmo;
- uint32_t rc = 0;
- uint32_t shdr_status, shdr_add_status;
- union lpfc_sli4_cfg_shdr *shdr;
-
- if (cnt == 0) {
- lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
- "2518 Requested to send 0 NOP mailbox cmd\n");
- return cnt;
- }
-
- mboxq = (LPFC_MBOXQ_t *)mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
- if (!mboxq) {
- lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
- "2519 Unable to allocate memory for issuing "
- "NOP mailbox command\n");
- return 0;
- }
-
- /* Set up NOP SLI4_CONFIG mailbox-ioctl command */
- length = (sizeof(struct lpfc_mbx_nop) -
- sizeof(struct lpfc_sli4_cfg_mhdr));
-
- for (cmdsent = 0; cmdsent < cnt; cmdsent++) {
- lpfc_sli4_config(phba, mboxq, LPFC_MBOX_SUBSYSTEM_COMMON,
- LPFC_MBOX_OPCODE_NOP, length,
- LPFC_SLI4_MBX_EMBED);
- if (!phba->sli4_hba.intr_enable)
- rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
- else {
- mbox_tmo = lpfc_mbox_tmo_val(phba, mboxq);
- rc = lpfc_sli_issue_mbox_wait(phba, mboxq, mbox_tmo);
- }
- if (rc == MBX_TIMEOUT)
- break;
- /* Check return status */
- shdr = (union lpfc_sli4_cfg_shdr *)
- &mboxq->u.mqe.un.sli4_config.header.cfg_shdr;
- shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
- shdr_add_status = bf_get(lpfc_mbox_hdr_add_status,
- &shdr->response);
- if (shdr_status || shdr_add_status || rc) {
- lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
- "2520 NOP mailbox command failed "
- "status x%x add_status x%x mbx "
- "status x%x\n", shdr_status,
- shdr_add_status, rc);
- break;
- }
- }
-
- if (rc != MBX_TIMEOUT)
- mempool_free(mboxq, phba->mbox_mem_pool);
-
- return cmdsent;
-}
-
-/**
* lpfc_sli4_pci_mem_setup - Setup SLI4 HBA PCI memory space.
* @phba: pointer to lpfc hba data structure.
*
@@ -8503,37 +8491,6 @@ lpfc_unset_hba(struct lpfc_hba *phba)
}
/**
- * lpfc_sli4_unset_hba - Unset SLI4 hba device initialization.
- * @phba: pointer to lpfc hba data structure.
- *
- * This routine is invoked to unset the HBA device initialization steps to
- * a device with SLI-4 interface spec.
- **/
-static void
-lpfc_sli4_unset_hba(struct lpfc_hba *phba)
-{
- struct lpfc_vport *vport = phba->pport;
- struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
-
- spin_lock_irq(shost->host_lock);
- vport->load_flag |= FC_UNLOADING;
- spin_unlock_irq(shost->host_lock);
-
- phba->pport->work_port_events = 0;
-
- /* Stop the SLI4 device port */
- lpfc_stop_port(phba);
-
- lpfc_sli4_disable_intr(phba);
-
- /* Reset SLI4 HBA FCoE function */
- lpfc_pci_function_reset(phba);
- lpfc_sli4_queue_destroy(phba);
-
- return;
-}
-
-/**
* lpfc_sli4_xri_exchange_busy_wait - Wait for device XRI exchange busy
* @phba: Pointer to HBA context object.
*
@@ -9595,7 +9552,6 @@ lpfc_pci_probe_one_s4(struct pci_dev *pdev, const struct pci_device_id *pid)
struct Scsi_Host *shost = NULL;
int error, ret;
uint32_t cfg_mode, intr_mode;
- int mcnt;
int adjusted_fcp_io_channel;
/* Allocate memory for HBA structure */
@@ -9684,58 +9640,35 @@ lpfc_pci_probe_one_s4(struct pci_dev *pdev, const struct pci_device_id *pid)
shost = lpfc_shost_from_vport(vport); /* save shost for error cleanup */
/* Now, trying to enable interrupt and bring up the device */
cfg_mode = phba->cfg_use_msi;
- while (true) {
- /* Put device to a known state before enabling interrupt */
- lpfc_stop_port(phba);
- /* Configure and enable interrupt */
- intr_mode = lpfc_sli4_enable_intr(phba, cfg_mode);
- if (intr_mode == LPFC_INTR_ERROR) {
- lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
- "0426 Failed to enable interrupt.\n");
- error = -ENODEV;
- goto out_free_sysfs_attr;
- }
- /* Default to single EQ for non-MSI-X */
- if (phba->intr_type != MSIX)
- adjusted_fcp_io_channel = 1;
- else
- adjusted_fcp_io_channel = phba->cfg_fcp_io_channel;
- phba->cfg_fcp_io_channel = adjusted_fcp_io_channel;
- /* Set up SLI-4 HBA */
- if (lpfc_sli4_hba_setup(phba)) {
- lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
- "1421 Failed to set up hba\n");
- error = -ENODEV;
- goto out_disable_intr;
- }
- /* Send NOP mbx cmds for non-INTx mode active interrupt test */
- if (intr_mode != 0)
- mcnt = lpfc_sli4_send_nop_mbox_cmds(phba,
- LPFC_ACT_INTR_CNT);
-
- /* Check active interrupts received only for MSI/MSI-X */
- if (intr_mode == 0 ||
- phba->sli.slistat.sli_intr >= LPFC_ACT_INTR_CNT) {
- /* Log the current active interrupt mode */
- phba->intr_mode = intr_mode;
- lpfc_log_intr_mode(phba, intr_mode);
- break;
- }
- lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
- "0451 Configure interrupt mode (%d) "
- "failed active interrupt test.\n",
- intr_mode);
- /* Unset the previous SLI-4 HBA setup. */
- /*
- * TODO: Is this operation compatible with IF TYPE 2
- * devices? All port state is deleted and cleared.
- */
- lpfc_sli4_unset_hba(phba);
- /* Try next level of interrupt mode */
- cfg_mode = --intr_mode;
+ /* Put device to a known state before enabling interrupt */
+ lpfc_stop_port(phba);
+ /* Configure and enable interrupt */
+ intr_mode = lpfc_sli4_enable_intr(phba, cfg_mode);
+ if (intr_mode == LPFC_INTR_ERROR) {
+ lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+ "0426 Failed to enable interrupt.\n");
+ error = -ENODEV;
+ goto out_free_sysfs_attr;
+ }
+ /* Default to single EQ for non-MSI-X */
+ if (phba->intr_type != MSIX)
+ adjusted_fcp_io_channel = 1;
+ else
+ adjusted_fcp_io_channel = phba->cfg_fcp_io_channel;
+ phba->cfg_fcp_io_channel = adjusted_fcp_io_channel;
+ /* Set up SLI-4 HBA */
+ if (lpfc_sli4_hba_setup(phba)) {
+ lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+ "1421 Failed to set up hba\n");
+ error = -ENODEV;
+ goto out_disable_intr;
}
+ /* Log the current active interrupt mode */
+ phba->intr_mode = intr_mode;
+ lpfc_log_intr_mode(phba, intr_mode);
+
/* Perform post initialization setup */
lpfc_post_init_setup(phba);
diff --git a/drivers/scsi/lpfc/lpfc_nportdisc.c b/drivers/scsi/lpfc/lpfc_nportdisc.c
index d8fadcb2db73..46128c679202 100644
--- a/drivers/scsi/lpfc/lpfc_nportdisc.c
+++ b/drivers/scsi/lpfc/lpfc_nportdisc.c
@@ -1115,6 +1115,13 @@ out:
"0261 Cannot Register NameServer login\n");
}
+ /*
+ ** In case the node reference counter does not go to zero, ensure that
+ ** the stale state for the node is not processed.
+ */
+
+ ndlp->nlp_prev_state = ndlp->nlp_state;
+ lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE);
spin_lock_irq(shost->host_lock);
ndlp->nlp_flag |= NLP_DEFER_RM;
spin_unlock_irq(shost->host_lock);
@@ -2159,13 +2166,16 @@ lpfc_cmpl_plogi_npr_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
{
struct lpfc_iocbq *cmdiocb, *rspiocb;
IOCB_t *irsp;
+ struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
cmdiocb = (struct lpfc_iocbq *) arg;
rspiocb = cmdiocb->context_un.rsp_iocb;
irsp = &rspiocb->iocb;
if (irsp->ulpStatus) {
+ spin_lock_irq(shost->host_lock);
ndlp->nlp_flag |= NLP_DEFER_RM;
+ spin_unlock_irq(shost->host_lock);
return NLP_STE_FREED_NODE;
}
return ndlp->nlp_state;
diff --git a/drivers/scsi/lpfc/lpfc_scsi.c b/drivers/scsi/lpfc/lpfc_scsi.c
index 60e5a177644c..98af07c6e300 100644
--- a/drivers/scsi/lpfc/lpfc_scsi.c
+++ b/drivers/scsi/lpfc/lpfc_scsi.c
@@ -288,6 +288,26 @@ lpfc_change_queue_depth(struct scsi_device *sdev, int qdepth, int reason)
}
/**
+ * lpfc_change_queue_type() - Change a device's scsi tag queuing type
+ * @sdev: Pointer the scsi device whose queue depth is to change
+ * @tag_type: Identifier for queue tag type
+ */
+static int
+lpfc_change_queue_type(struct scsi_device *sdev, int tag_type)
+{
+ if (sdev->tagged_supported) {
+ scsi_set_tag_type(sdev, tag_type);
+ if (tag_type)
+ scsi_activate_tcq(sdev, sdev->queue_depth);
+ else
+ scsi_deactivate_tcq(sdev, sdev->queue_depth);
+ } else
+ tag_type = 0;
+
+ return tag_type;
+}
+
+/**
* lpfc_rampdown_queue_depth - Post RAMP_DOWN_QUEUE event to worker thread
* @phba: The Hba for which this call is being executed.
*
@@ -3972,7 +3992,7 @@ lpfc_scsi_prep_cmnd(struct lpfc_vport *vport, struct lpfc_scsi_buf *lpfc_cmd,
break;
}
} else
- fcp_cmnd->fcpCntl1 = 0;
+ fcp_cmnd->fcpCntl1 = SIMPLE_Q;
sli4 = (phba->sli_rev == LPFC_SLI_REV4);
@@ -5150,6 +5170,7 @@ struct scsi_host_template lpfc_template = {
.max_sectors = 0xFFFF,
.vendor_id = LPFC_NL_VENDOR_ID,
.change_queue_depth = lpfc_change_queue_depth,
+ .change_queue_type = lpfc_change_queue_type,
};
struct scsi_host_template lpfc_vport_template = {
@@ -5172,4 +5193,5 @@ struct scsi_host_template lpfc_vport_template = {
.shost_attrs = lpfc_vport_attrs,
.max_sectors = 0xFFFF,
.change_queue_depth = lpfc_change_queue_depth,
+ .change_queue_type = lpfc_change_queue_type,
};
diff --git a/drivers/scsi/lpfc/lpfc_sli.c b/drivers/scsi/lpfc/lpfc_sli.c
index 624eab370396..74b67d98e952 100644
--- a/drivers/scsi/lpfc/lpfc_sli.c
+++ b/drivers/scsi/lpfc/lpfc_sli.c
@@ -124,10 +124,17 @@ lpfc_sli4_wq_put(struct lpfc_queue *q, union lpfc_wqe *wqe)
/* Ring Doorbell */
doorbell.word0 = 0;
- bf_set(lpfc_wq_doorbell_num_posted, &doorbell, 1);
- bf_set(lpfc_wq_doorbell_index, &doorbell, host_index);
- bf_set(lpfc_wq_doorbell_id, &doorbell, q->queue_id);
- writel(doorbell.word0, q->phba->sli4_hba.WQDBregaddr);
+ if (q->db_format == LPFC_DB_LIST_FORMAT) {
+ bf_set(lpfc_wq_db_list_fm_num_posted, &doorbell, 1);
+ bf_set(lpfc_wq_db_list_fm_index, &doorbell, host_index);
+ bf_set(lpfc_wq_db_list_fm_id, &doorbell, q->queue_id);
+ } else if (q->db_format == LPFC_DB_RING_FORMAT) {
+ bf_set(lpfc_wq_db_ring_fm_num_posted, &doorbell, 1);
+ bf_set(lpfc_wq_db_ring_fm_id, &doorbell, q->queue_id);
+ } else {
+ return -EINVAL;
+ }
+ writel(doorbell.word0, q->db_regaddr);
return 0;
}
@@ -456,10 +463,20 @@ lpfc_sli4_rq_put(struct lpfc_queue *hq, struct lpfc_queue *dq,
/* Ring The Header Receive Queue Doorbell */
if (!(hq->host_index % hq->entry_repost)) {
doorbell.word0 = 0;
- bf_set(lpfc_rq_doorbell_num_posted, &doorbell,
- hq->entry_repost);
- bf_set(lpfc_rq_doorbell_id, &doorbell, hq->queue_id);
- writel(doorbell.word0, hq->phba->sli4_hba.RQDBregaddr);
+ if (hq->db_format == LPFC_DB_RING_FORMAT) {
+ bf_set(lpfc_rq_db_ring_fm_num_posted, &doorbell,
+ hq->entry_repost);
+ bf_set(lpfc_rq_db_ring_fm_id, &doorbell, hq->queue_id);
+ } else if (hq->db_format == LPFC_DB_LIST_FORMAT) {
+ bf_set(lpfc_rq_db_list_fm_num_posted, &doorbell,
+ hq->entry_repost);
+ bf_set(lpfc_rq_db_list_fm_index, &doorbell,
+ hq->host_index);
+ bf_set(lpfc_rq_db_list_fm_id, &doorbell, hq->queue_id);
+ } else {
+ return -EINVAL;
+ }
+ writel(doorbell.word0, hq->db_regaddr);
}
return put_index;
}
@@ -4939,7 +4956,7 @@ out_free_mboxq:
static void
lpfc_sli4_arm_cqeq_intr(struct lpfc_hba *phba)
{
- uint8_t fcp_eqidx;
+ int fcp_eqidx;
lpfc_sli4_cq_release(phba->sli4_hba.mbx_cq, LPFC_QUEUE_REARM);
lpfc_sli4_cq_release(phba->sli4_hba.els_cq, LPFC_QUEUE_REARM);
@@ -5622,6 +5639,13 @@ lpfc_sli4_alloc_resource_identifiers(struct lpfc_hba *phba)
}
/* RPIs. */
count = phba->sli4_hba.max_cfg_param.max_rpi;
+ if (count <= 0) {
+ lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
+ "3279 Invalid provisioning of "
+ "rpi:%d\n", count);
+ rc = -EINVAL;
+ goto err_exit;
+ }
base = phba->sli4_hba.max_cfg_param.rpi_base;
longs = (count + BITS_PER_LONG - 1) / BITS_PER_LONG;
phba->sli4_hba.rpi_bmask = kzalloc(longs *
@@ -5644,6 +5668,13 @@ lpfc_sli4_alloc_resource_identifiers(struct lpfc_hba *phba)
/* VPIs. */
count = phba->sli4_hba.max_cfg_param.max_vpi;
+ if (count <= 0) {
+ lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
+ "3280 Invalid provisioning of "
+ "vpi:%d\n", count);
+ rc = -EINVAL;
+ goto free_rpi_ids;
+ }
base = phba->sli4_hba.max_cfg_param.vpi_base;
longs = (count + BITS_PER_LONG - 1) / BITS_PER_LONG;
phba->vpi_bmask = kzalloc(longs *
@@ -5666,6 +5697,13 @@ lpfc_sli4_alloc_resource_identifiers(struct lpfc_hba *phba)
/* XRIs. */
count = phba->sli4_hba.max_cfg_param.max_xri;
+ if (count <= 0) {
+ lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
+ "3281 Invalid provisioning of "
+ "xri:%d\n", count);
+ rc = -EINVAL;
+ goto free_vpi_ids;
+ }
base = phba->sli4_hba.max_cfg_param.xri_base;
longs = (count + BITS_PER_LONG - 1) / BITS_PER_LONG;
phba->sli4_hba.xri_bmask = kzalloc(longs *
@@ -5689,6 +5727,13 @@ lpfc_sli4_alloc_resource_identifiers(struct lpfc_hba *phba)
/* VFIs. */
count = phba->sli4_hba.max_cfg_param.max_vfi;
+ if (count <= 0) {
+ lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
+ "3282 Invalid provisioning of "
+ "vfi:%d\n", count);
+ rc = -EINVAL;
+ goto free_xri_ids;
+ }
base = phba->sli4_hba.max_cfg_param.vfi_base;
longs = (count + BITS_PER_LONG - 1) / BITS_PER_LONG;
phba->sli4_hba.vfi_bmask = kzalloc(longs *
@@ -6599,7 +6644,7 @@ static int
lpfc_sli_issue_mbox_s3(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmbox,
uint32_t flag)
{
- MAILBOX_t *mb;
+ MAILBOX_t *mbx;
struct lpfc_sli *psli = &phba->sli;
uint32_t status, evtctr;
uint32_t ha_copy, hc_copy;
@@ -6653,7 +6698,7 @@ lpfc_sli_issue_mbox_s3(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmbox,
psli = &phba->sli;
- mb = &pmbox->u.mb;
+ mbx = &pmbox->u.mb;
status = MBX_SUCCESS;
if (phba->link_state == LPFC_HBA_ERROR) {
@@ -6668,7 +6713,7 @@ lpfc_sli_issue_mbox_s3(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmbox,
goto out_not_finished;
}
- if (mb->mbxCommand != MBX_KILL_BOARD && flag & MBX_NOWAIT) {
+ if (mbx->mbxCommand != MBX_KILL_BOARD && flag & MBX_NOWAIT) {
if (lpfc_readl(phba->HCregaddr, &hc_copy) ||
!(hc_copy & HC_MBINT_ENA)) {
spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
@@ -6722,7 +6767,7 @@ lpfc_sli_issue_mbox_s3(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmbox,
"(%d):0308 Mbox cmd issue - BUSY Data: "
"x%x x%x x%x x%x\n",
pmbox->vport ? pmbox->vport->vpi : 0xffffff,
- mb->mbxCommand, phba->pport->port_state,
+ mbx->mbxCommand, phba->pport->port_state,
psli->sli_flag, flag);
psli->slistat.mbox_busy++;
@@ -6732,15 +6777,15 @@ lpfc_sli_issue_mbox_s3(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmbox,
lpfc_debugfs_disc_trc(pmbox->vport,
LPFC_DISC_TRC_MBOX_VPORT,
"MBOX Bsy vport: cmd:x%x mb:x%x x%x",
- (uint32_t)mb->mbxCommand,
- mb->un.varWords[0], mb->un.varWords[1]);
+ (uint32_t)mbx->mbxCommand,
+ mbx->un.varWords[0], mbx->un.varWords[1]);
}
else {
lpfc_debugfs_disc_trc(phba->pport,
LPFC_DISC_TRC_MBOX,
"MBOX Bsy: cmd:x%x mb:x%x x%x",
- (uint32_t)mb->mbxCommand,
- mb->un.varWords[0], mb->un.varWords[1]);
+ (uint32_t)mbx->mbxCommand,
+ mbx->un.varWords[0], mbx->un.varWords[1]);
}
return MBX_BUSY;
@@ -6751,7 +6796,7 @@ lpfc_sli_issue_mbox_s3(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmbox,
/* If we are not polling, we MUST be in SLI2 mode */
if (flag != MBX_POLL) {
if (!(psli->sli_flag & LPFC_SLI_ACTIVE) &&
- (mb->mbxCommand != MBX_KILL_BOARD)) {
+ (mbx->mbxCommand != MBX_KILL_BOARD)) {
psli->sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
/* Mbox command <mbxCommand> cannot issue */
@@ -6773,23 +6818,23 @@ lpfc_sli_issue_mbox_s3(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmbox,
"(%d):0309 Mailbox cmd x%x issue Data: x%x x%x "
"x%x\n",
pmbox->vport ? pmbox->vport->vpi : 0,
- mb->mbxCommand, phba->pport->port_state,
+ mbx->mbxCommand, phba->pport->port_state,
psli->sli_flag, flag);
- if (mb->mbxCommand != MBX_HEARTBEAT) {
+ if (mbx->mbxCommand != MBX_HEARTBEAT) {
if (pmbox->vport) {
lpfc_debugfs_disc_trc(pmbox->vport,
LPFC_DISC_TRC_MBOX_VPORT,
"MBOX Send vport: cmd:x%x mb:x%x x%x",
- (uint32_t)mb->mbxCommand,
- mb->un.varWords[0], mb->un.varWords[1]);
+ (uint32_t)mbx->mbxCommand,
+ mbx->un.varWords[0], mbx->un.varWords[1]);
}
else {
lpfc_debugfs_disc_trc(phba->pport,
LPFC_DISC_TRC_MBOX,
"MBOX Send: cmd:x%x mb:x%x x%x",
- (uint32_t)mb->mbxCommand,
- mb->un.varWords[0], mb->un.varWords[1]);
+ (uint32_t)mbx->mbxCommand,
+ mbx->un.varWords[0], mbx->un.varWords[1]);
}
}
@@ -6797,12 +6842,12 @@ lpfc_sli_issue_mbox_s3(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmbox,
evtctr = psli->slistat.mbox_event;
/* next set own bit for the adapter and copy over command word */
- mb->mbxOwner = OWN_CHIP;
+ mbx->mbxOwner = OWN_CHIP;
if (psli->sli_flag & LPFC_SLI_ACTIVE) {
/* Populate mbox extension offset word. */
if (pmbox->in_ext_byte_len || pmbox->out_ext_byte_len) {
- *(((uint32_t *)mb) + pmbox->mbox_offset_word)
+ *(((uint32_t *)mbx) + pmbox->mbox_offset_word)
= (uint8_t *)phba->mbox_ext
- (uint8_t *)phba->mbox;
}
@@ -6814,11 +6859,11 @@ lpfc_sli_issue_mbox_s3(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmbox,
pmbox->in_ext_byte_len);
}
/* Copy command data to host SLIM area */
- lpfc_sli_pcimem_bcopy(mb, phba->mbox, MAILBOX_CMD_SIZE);
+ lpfc_sli_pcimem_bcopy(mbx, phba->mbox, MAILBOX_CMD_SIZE);
} else {
/* Populate mbox extension offset word. */
if (pmbox->in_ext_byte_len || pmbox->out_ext_byte_len)
- *(((uint32_t *)mb) + pmbox->mbox_offset_word)
+ *(((uint32_t *)mbx) + pmbox->mbox_offset_word)
= MAILBOX_HBA_EXT_OFFSET;
/* Copy the mailbox extension data */
@@ -6828,24 +6873,24 @@ lpfc_sli_issue_mbox_s3(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmbox,
pmbox->context2, pmbox->in_ext_byte_len);
}
- if (mb->mbxCommand == MBX_CONFIG_PORT) {
+ if (mbx->mbxCommand == MBX_CONFIG_PORT) {
/* copy command data into host mbox for cmpl */
- lpfc_sli_pcimem_bcopy(mb, phba->mbox, MAILBOX_CMD_SIZE);
+ lpfc_sli_pcimem_bcopy(mbx, phba->mbox, MAILBOX_CMD_SIZE);
}
/* First copy mbox command data to HBA SLIM, skip past first
word */
to_slim = phba->MBslimaddr + sizeof (uint32_t);
- lpfc_memcpy_to_slim(to_slim, &mb->un.varWords[0],
+ lpfc_memcpy_to_slim(to_slim, &mbx->un.varWords[0],
MAILBOX_CMD_SIZE - sizeof (uint32_t));
/* Next copy over first word, with mbxOwner set */
- ldata = *((uint32_t *)mb);
+ ldata = *((uint32_t *)mbx);
to_slim = phba->MBslimaddr;
writel(ldata, to_slim);
readl(to_slim); /* flush */
- if (mb->mbxCommand == MBX_CONFIG_PORT) {
+ if (mbx->mbxCommand == MBX_CONFIG_PORT) {
/* switch over to host mailbox */
psli->sli_flag |= LPFC_SLI_ACTIVE;
}
@@ -6920,7 +6965,7 @@ lpfc_sli_issue_mbox_s3(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmbox,
/* First copy command data */
word0 = *((uint32_t *)phba->mbox);
word0 = le32_to_cpu(word0);
- if (mb->mbxCommand == MBX_CONFIG_PORT) {
+ if (mbx->mbxCommand == MBX_CONFIG_PORT) {
MAILBOX_t *slimmb;
uint32_t slimword0;
/* Check real SLIM for any errors */
@@ -6947,7 +6992,7 @@ lpfc_sli_issue_mbox_s3(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmbox,
if (psli->sli_flag & LPFC_SLI_ACTIVE) {
/* copy results back to user */
- lpfc_sli_pcimem_bcopy(phba->mbox, mb, MAILBOX_CMD_SIZE);
+ lpfc_sli_pcimem_bcopy(phba->mbox, mbx, MAILBOX_CMD_SIZE);
/* Copy the mailbox extension data */
if (pmbox->out_ext_byte_len && pmbox->context2) {
lpfc_sli_pcimem_bcopy(phba->mbox_ext,
@@ -6956,7 +7001,7 @@ lpfc_sli_issue_mbox_s3(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmbox,
}
} else {
/* First copy command data */
- lpfc_memcpy_from_slim(mb, phba->MBslimaddr,
+ lpfc_memcpy_from_slim(mbx, phba->MBslimaddr,
MAILBOX_CMD_SIZE);
/* Copy the mailbox extension data */
if (pmbox->out_ext_byte_len && pmbox->context2) {
@@ -6971,7 +7016,7 @@ lpfc_sli_issue_mbox_s3(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmbox,
readl(phba->HAregaddr); /* flush */
psli->sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
- status = mb->mbxStatus;
+ status = mbx->mbxStatus;
}
spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
@@ -8370,7 +8415,7 @@ __lpfc_sli_issue_iocb_s4(struct lpfc_hba *phba, uint32_t ring_number,
* This is a continuation of a commandi,(CX) so this
* sglq is on the active list
*/
- sglq = __lpfc_get_active_sglq(phba, piocb->sli4_xritag);
+ sglq = __lpfc_get_active_sglq(phba, piocb->sli4_lxritag);
if (!sglq)
return IOCB_ERROR;
}
@@ -8855,12 +8900,6 @@ lpfc_sli_setup(struct lpfc_hba *phba)
pring->prt[3].type = FC_TYPE_CT;
pring->prt[3].lpfc_sli_rcv_unsol_event =
lpfc_ct_unsol_event;
- /* abort unsolicited sequence */
- pring->prt[4].profile = 0; /* Mask 4 */
- pring->prt[4].rctl = FC_RCTL_BA_ABTS;
- pring->prt[4].type = FC_TYPE_BLS;
- pring->prt[4].lpfc_sli_rcv_unsol_event =
- lpfc_sli4_ct_abort_unsol_event;
break;
}
totiocbsize += (pring->sli.sli3.numCiocb *
@@ -11873,7 +11912,7 @@ lpfc_sli4_hba_intr_handler(int irq, void *dev_id)
struct lpfc_eqe *eqe;
unsigned long iflag;
int ecount = 0;
- uint32_t fcp_eqidx;
+ int fcp_eqidx;
/* Get the driver's phba structure from the dev_id */
fcp_eq_hdl = (struct lpfc_fcp_eq_hdl *)dev_id;
@@ -11975,7 +12014,7 @@ lpfc_sli4_intr_handler(int irq, void *dev_id)
struct lpfc_hba *phba;
irqreturn_t hba_irq_rc;
bool hba_handled = false;
- uint32_t fcp_eqidx;
+ int fcp_eqidx;
/* Get the driver's phba structure from the dev_id */
phba = (struct lpfc_hba *)dev_id;
@@ -12097,6 +12136,54 @@ out_fail:
}
/**
+ * lpfc_dual_chute_pci_bar_map - Map pci base address register to host memory
+ * @phba: HBA structure that indicates port to create a queue on.
+ * @pci_barset: PCI BAR set flag.
+ *
+ * This function shall perform iomap of the specified PCI BAR address to host
+ * memory address if not already done so and return it. The returned host
+ * memory address can be NULL.
+ */
+static void __iomem *
+lpfc_dual_chute_pci_bar_map(struct lpfc_hba *phba, uint16_t pci_barset)
+{
+ struct pci_dev *pdev;
+ unsigned long bar_map, bar_map_len;
+
+ if (!phba->pcidev)
+ return NULL;
+ else
+ pdev = phba->pcidev;
+
+ switch (pci_barset) {
+ case WQ_PCI_BAR_0_AND_1:
+ if (!phba->pci_bar0_memmap_p) {
+ bar_map = pci_resource_start(pdev, PCI_64BIT_BAR0);
+ bar_map_len = pci_resource_len(pdev, PCI_64BIT_BAR0);
+ phba->pci_bar0_memmap_p = ioremap(bar_map, bar_map_len);
+ }
+ return phba->pci_bar0_memmap_p;
+ case WQ_PCI_BAR_2_AND_3:
+ if (!phba->pci_bar2_memmap_p) {
+ bar_map = pci_resource_start(pdev, PCI_64BIT_BAR2);
+ bar_map_len = pci_resource_len(pdev, PCI_64BIT_BAR2);
+ phba->pci_bar2_memmap_p = ioremap(bar_map, bar_map_len);
+ }
+ return phba->pci_bar2_memmap_p;
+ case WQ_PCI_BAR_4_AND_5:
+ if (!phba->pci_bar4_memmap_p) {
+ bar_map = pci_resource_start(pdev, PCI_64BIT_BAR4);
+ bar_map_len = pci_resource_len(pdev, PCI_64BIT_BAR4);
+ phba->pci_bar4_memmap_p = ioremap(bar_map, bar_map_len);
+ }
+ return phba->pci_bar4_memmap_p;
+ default:
+ break;
+ }
+ return NULL;
+}
+
+/**
* lpfc_modify_fcp_eq_delay - Modify Delay Multiplier on FCP EQs
* @phba: HBA structure that indicates port to create a queue on.
* @startq: The starting FCP EQ to modify
@@ -12673,6 +12760,9 @@ lpfc_wq_create(struct lpfc_hba *phba, struct lpfc_queue *wq,
union lpfc_sli4_cfg_shdr *shdr;
uint32_t hw_page_size = phba->sli4_hba.pc_sli4_params.if_page_sz;
struct dma_address *page;
+ void __iomem *bar_memmap_p;
+ uint32_t db_offset;
+ uint16_t pci_barset;
/* sanity check on queue memory */
if (!wq || !cq)
@@ -12696,6 +12786,7 @@ lpfc_wq_create(struct lpfc_hba *phba, struct lpfc_queue *wq,
cq->queue_id);
bf_set(lpfc_mbox_hdr_version, &shdr->request,
phba->sli4_hba.pc_sli4_params.wqv);
+
if (phba->sli4_hba.pc_sli4_params.wqv == LPFC_Q_CREATE_VERSION_1) {
bf_set(lpfc_mbx_wq_create_wqe_count, &wq_create->u.request_1,
wq->entry_count);
@@ -12723,6 +12814,10 @@ lpfc_wq_create(struct lpfc_hba *phba, struct lpfc_queue *wq,
page[dmabuf->buffer_tag].addr_lo = putPaddrLow(dmabuf->phys);
page[dmabuf->buffer_tag].addr_hi = putPaddrHigh(dmabuf->phys);
}
+
+ if (phba->sli4_hba.fw_func_mode & LPFC_DUA_MODE)
+ bf_set(lpfc_mbx_wq_create_dua, &wq_create->u.request, 1);
+
rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
/* The IOCTL status is embedded in the mailbox subheader. */
shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
@@ -12740,6 +12835,47 @@ lpfc_wq_create(struct lpfc_hba *phba, struct lpfc_queue *wq,
status = -ENXIO;
goto out;
}
+ if (phba->sli4_hba.fw_func_mode & LPFC_DUA_MODE) {
+ wq->db_format = bf_get(lpfc_mbx_wq_create_db_format,
+ &wq_create->u.response);
+ if ((wq->db_format != LPFC_DB_LIST_FORMAT) &&
+ (wq->db_format != LPFC_DB_RING_FORMAT)) {
+ lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+ "3265 WQ[%d] doorbell format not "
+ "supported: x%x\n", wq->queue_id,
+ wq->db_format);
+ status = -EINVAL;
+ goto out;
+ }
+ pci_barset = bf_get(lpfc_mbx_wq_create_bar_set,
+ &wq_create->u.response);
+ bar_memmap_p = lpfc_dual_chute_pci_bar_map(phba, pci_barset);
+ if (!bar_memmap_p) {
+ lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+ "3263 WQ[%d] failed to memmap pci "
+ "barset:x%x\n", wq->queue_id,
+ pci_barset);
+ status = -ENOMEM;
+ goto out;
+ }
+ db_offset = wq_create->u.response.doorbell_offset;
+ if ((db_offset != LPFC_ULP0_WQ_DOORBELL) &&
+ (db_offset != LPFC_ULP1_WQ_DOORBELL)) {
+ lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+ "3252 WQ[%d] doorbell offset not "
+ "supported: x%x\n", wq->queue_id,
+ db_offset);
+ status = -EINVAL;
+ goto out;
+ }
+ wq->db_regaddr = bar_memmap_p + db_offset;
+ lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
+ "3264 WQ[%d]: barset:x%x, offset:x%x\n",
+ wq->queue_id, pci_barset, db_offset);
+ } else {
+ wq->db_format = LPFC_DB_LIST_FORMAT;
+ wq->db_regaddr = phba->sli4_hba.WQDBregaddr;
+ }
wq->type = LPFC_WQ;
wq->assoc_qid = cq->queue_id;
wq->subtype = subtype;
@@ -12816,6 +12952,9 @@ lpfc_rq_create(struct lpfc_hba *phba, struct lpfc_queue *hrq,
uint32_t shdr_status, shdr_add_status;
union lpfc_sli4_cfg_shdr *shdr;
uint32_t hw_page_size = phba->sli4_hba.pc_sli4_params.if_page_sz;
+ void __iomem *bar_memmap_p;
+ uint32_t db_offset;
+ uint16_t pci_barset;
/* sanity check on queue memory */
if (!hrq || !drq || !cq)
@@ -12894,6 +13033,9 @@ lpfc_rq_create(struct lpfc_hba *phba, struct lpfc_queue *hrq,
rq_create->u.request.page[dmabuf->buffer_tag].addr_hi =
putPaddrHigh(dmabuf->phys);
}
+ if (phba->sli4_hba.fw_func_mode & LPFC_DUA_MODE)
+ bf_set(lpfc_mbx_rq_create_dua, &rq_create->u.request, 1);
+
rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
/* The IOCTL status is embedded in the mailbox subheader. */
shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
@@ -12911,6 +13053,50 @@ lpfc_rq_create(struct lpfc_hba *phba, struct lpfc_queue *hrq,
status = -ENXIO;
goto out;
}
+
+ if (phba->sli4_hba.fw_func_mode & LPFC_DUA_MODE) {
+ hrq->db_format = bf_get(lpfc_mbx_rq_create_db_format,
+ &rq_create->u.response);
+ if ((hrq->db_format != LPFC_DB_LIST_FORMAT) &&
+ (hrq->db_format != LPFC_DB_RING_FORMAT)) {
+ lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+ "3262 RQ [%d] doorbell format not "
+ "supported: x%x\n", hrq->queue_id,
+ hrq->db_format);
+ status = -EINVAL;
+ goto out;
+ }
+
+ pci_barset = bf_get(lpfc_mbx_rq_create_bar_set,
+ &rq_create->u.response);
+ bar_memmap_p = lpfc_dual_chute_pci_bar_map(phba, pci_barset);
+ if (!bar_memmap_p) {
+ lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+ "3269 RQ[%d] failed to memmap pci "
+ "barset:x%x\n", hrq->queue_id,
+ pci_barset);
+ status = -ENOMEM;
+ goto out;
+ }
+
+ db_offset = rq_create->u.response.doorbell_offset;
+ if ((db_offset != LPFC_ULP0_RQ_DOORBELL) &&
+ (db_offset != LPFC_ULP1_RQ_DOORBELL)) {
+ lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+ "3270 RQ[%d] doorbell offset not "
+ "supported: x%x\n", hrq->queue_id,
+ db_offset);
+ status = -EINVAL;
+ goto out;
+ }
+ hrq->db_regaddr = bar_memmap_p + db_offset;
+ lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
+ "3266 RQ[qid:%d]: barset:x%x, offset:x%x\n",
+ hrq->queue_id, pci_barset, db_offset);
+ } else {
+ hrq->db_format = LPFC_DB_RING_FORMAT;
+ hrq->db_regaddr = phba->sli4_hba.RQDBregaddr;
+ }
hrq->type = LPFC_HRQ;
hrq->assoc_qid = cq->queue_id;
hrq->subtype = subtype;
@@ -12976,6 +13162,8 @@ lpfc_rq_create(struct lpfc_hba *phba, struct lpfc_queue *hrq,
rq_create->u.request.page[dmabuf->buffer_tag].addr_hi =
putPaddrHigh(dmabuf->phys);
}
+ if (phba->sli4_hba.fw_func_mode & LPFC_DUA_MODE)
+ bf_set(lpfc_mbx_rq_create_dua, &rq_create->u.request, 1);
rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
/* The IOCTL status is embedded in the mailbox subheader. */
shdr = (union lpfc_sli4_cfg_shdr *) &rq_create->header.cfg_shdr;
@@ -14063,6 +14251,40 @@ lpfc_sli4_abort_partial_seq(struct lpfc_vport *vport,
}
/**
+ * lpfc_sli4_abort_ulp_seq - Abort assembled unsol sequence from ulp
+ * @vport: pointer to a vitural port
+ * @dmabuf: pointer to a dmabuf that describes the FC sequence
+ *
+ * This function tries to abort from the assembed sequence from upper level
+ * protocol, described by the information from basic abbort @dmabuf. It
+ * checks to see whether such pending context exists at upper level protocol.
+ * If so, it shall clean up the pending context.
+ *
+ * Return
+ * true -- if there is matching pending context of the sequence cleaned
+ * at ulp;
+ * false -- if there is no matching pending context of the sequence present
+ * at ulp.
+ **/
+static bool
+lpfc_sli4_abort_ulp_seq(struct lpfc_vport *vport, struct hbq_dmabuf *dmabuf)
+{
+ struct lpfc_hba *phba = vport->phba;
+ int handled;
+
+ /* Accepting abort at ulp with SLI4 only */
+ if (phba->sli_rev < LPFC_SLI_REV4)
+ return false;
+
+ /* Register all caring upper level protocols to attend abort */
+ handled = lpfc_ct_handle_unsol_abort(phba, dmabuf);
+ if (handled)
+ return true;
+
+ return false;
+}
+
+/**
* lpfc_sli4_seq_abort_rsp_cmpl - BLS ABORT RSP seq abort iocb complete handler
* @phba: Pointer to HBA context object.
* @cmd_iocbq: pointer to the command iocbq structure.
@@ -14077,8 +14299,14 @@ lpfc_sli4_seq_abort_rsp_cmpl(struct lpfc_hba *phba,
struct lpfc_iocbq *cmd_iocbq,
struct lpfc_iocbq *rsp_iocbq)
{
- if (cmd_iocbq)
+ struct lpfc_nodelist *ndlp;
+
+ if (cmd_iocbq) {
+ ndlp = (struct lpfc_nodelist *)cmd_iocbq->context1;
+ lpfc_nlp_put(ndlp);
+ lpfc_nlp_not_used(ndlp);
lpfc_sli_release_iocbq(phba, cmd_iocbq);
+ }
/* Failure means BLS ABORT RSP did not get delivered to remote node*/
if (rsp_iocbq && rsp_iocbq->iocb.ulpStatus)
@@ -14118,9 +14346,10 @@ lpfc_sli4_xri_inrange(struct lpfc_hba *phba,
* event after aborting the sequence handling.
**/
static void
-lpfc_sli4_seq_abort_rsp(struct lpfc_hba *phba,
- struct fc_frame_header *fc_hdr)
+lpfc_sli4_seq_abort_rsp(struct lpfc_vport *vport,
+ struct fc_frame_header *fc_hdr, bool aborted)
{
+ struct lpfc_hba *phba = vport->phba;
struct lpfc_iocbq *ctiocb = NULL;
struct lpfc_nodelist *ndlp;
uint16_t oxid, rxid, xri, lxri;
@@ -14135,12 +14364,27 @@ lpfc_sli4_seq_abort_rsp(struct lpfc_hba *phba,
oxid = be16_to_cpu(fc_hdr->fh_ox_id);
rxid = be16_to_cpu(fc_hdr->fh_rx_id);
- ndlp = lpfc_findnode_did(phba->pport, sid);
+ ndlp = lpfc_findnode_did(vport, sid);
if (!ndlp) {
- lpfc_printf_log(phba, KERN_WARNING, LOG_ELS,
- "1268 Find ndlp returned NULL for oxid:x%x "
- "SID:x%x\n", oxid, sid);
- return;
+ ndlp = mempool_alloc(phba->nlp_mem_pool, GFP_KERNEL);
+ if (!ndlp) {
+ lpfc_printf_vlog(vport, KERN_WARNING, LOG_ELS,
+ "1268 Failed to allocate ndlp for "
+ "oxid:x%x SID:x%x\n", oxid, sid);
+ return;
+ }
+ lpfc_nlp_init(vport, ndlp, sid);
+ /* Put ndlp onto pport node list */
+ lpfc_enqueue_node(vport, ndlp);
+ } else if (!NLP_CHK_NODE_ACT(ndlp)) {
+ /* re-setup ndlp without removing from node list */
+ ndlp = lpfc_enable_node(vport, ndlp, NLP_STE_UNUSED_NODE);
+ if (!ndlp) {
+ lpfc_printf_vlog(vport, KERN_WARNING, LOG_ELS,
+ "3275 Failed to active ndlp found "
+ "for oxid:x%x SID:x%x\n", oxid, sid);
+ return;
+ }
}
/* Allocate buffer for rsp iocb */
@@ -14164,7 +14408,7 @@ lpfc_sli4_seq_abort_rsp(struct lpfc_hba *phba,
icmd->ulpLe = 1;
icmd->ulpClass = CLASS3;
icmd->ulpContext = phba->sli4_hba.rpi_ids[ndlp->nlp_rpi];
- ctiocb->context1 = ndlp;
+ ctiocb->context1 = lpfc_nlp_get(ndlp);
ctiocb->iocb_cmpl = NULL;
ctiocb->vport = phba->pport;
@@ -14183,14 +14427,24 @@ lpfc_sli4_seq_abort_rsp(struct lpfc_hba *phba,
if (lxri != NO_XRI)
lpfc_set_rrq_active(phba, ndlp, lxri,
(xri == oxid) ? rxid : oxid, 0);
- /* If the oxid maps to the FCP XRI range or if it is out of range,
- * send a BLS_RJT. The driver no longer has that exchange.
- * Override the IOCB for a BA_RJT.
+ /* For BA_ABTS from exchange responder, if the logical xri with
+ * the oxid maps to the FCP XRI range, the port no longer has
+ * that exchange context, send a BLS_RJT. Override the IOCB for
+ * a BA_RJT.
*/
- if (xri > (phba->sli4_hba.max_cfg_param.max_xri +
- phba->sli4_hba.max_cfg_param.xri_base) ||
- xri > (lpfc_sli4_get_els_iocb_cnt(phba) +
- phba->sli4_hba.max_cfg_param.xri_base)) {
+ if ((fctl & FC_FC_EX_CTX) &&
+ (lxri > lpfc_sli4_get_els_iocb_cnt(phba))) {
+ icmd->un.xseq64.w5.hcsw.Rctl = FC_RCTL_BA_RJT;
+ bf_set(lpfc_vndr_code, &icmd->un.bls_rsp, 0);
+ bf_set(lpfc_rsn_expln, &icmd->un.bls_rsp, FC_BA_RJT_INV_XID);
+ bf_set(lpfc_rsn_code, &icmd->un.bls_rsp, FC_BA_RJT_UNABLE);
+ }
+
+ /* If BA_ABTS failed to abort a partially assembled receive sequence,
+ * the driver no longer has that exchange, send a BLS_RJT. Override
+ * the IOCB for a BA_RJT.
+ */
+ if (aborted == false) {
icmd->un.xseq64.w5.hcsw.Rctl = FC_RCTL_BA_RJT;
bf_set(lpfc_vndr_code, &icmd->un.bls_rsp, 0);
bf_set(lpfc_rsn_expln, &icmd->un.bls_rsp, FC_BA_RJT_INV_XID);
@@ -14214,17 +14468,19 @@ lpfc_sli4_seq_abort_rsp(struct lpfc_hba *phba,
bf_set(lpfc_abts_oxid, &icmd->un.bls_rsp, oxid);
/* Xmit CT abts response on exchange <xid> */
- lpfc_printf_log(phba, KERN_INFO, LOG_ELS,
- "1200 Send BLS cmd x%x on oxid x%x Data: x%x\n",
- icmd->un.xseq64.w5.hcsw.Rctl, oxid, phba->link_state);
+ lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
+ "1200 Send BLS cmd x%x on oxid x%x Data: x%x\n",
+ icmd->un.xseq64.w5.hcsw.Rctl, oxid, phba->link_state);
rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, ctiocb, 0);
if (rc == IOCB_ERROR) {
- lpfc_printf_log(phba, KERN_ERR, LOG_ELS,
- "2925 Failed to issue CT ABTS RSP x%x on "
- "xri x%x, Data x%x\n",
- icmd->un.xseq64.w5.hcsw.Rctl, oxid,
- phba->link_state);
+ lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS,
+ "2925 Failed to issue CT ABTS RSP x%x on "
+ "xri x%x, Data x%x\n",
+ icmd->un.xseq64.w5.hcsw.Rctl, oxid,
+ phba->link_state);
+ lpfc_nlp_put(ndlp);
+ ctiocb->context1 = NULL;
lpfc_sli_release_iocbq(phba, ctiocb);
}
}
@@ -14249,32 +14505,25 @@ lpfc_sli4_handle_unsol_abort(struct lpfc_vport *vport,
struct lpfc_hba *phba = vport->phba;
struct fc_frame_header fc_hdr;
uint32_t fctl;
- bool abts_par;
+ bool aborted;
/* Make a copy of fc_hdr before the dmabuf being released */
memcpy(&fc_hdr, dmabuf->hbuf.virt, sizeof(struct fc_frame_header));
fctl = sli4_fctl_from_fc_hdr(&fc_hdr);
if (fctl & FC_FC_EX_CTX) {
- /*
- * ABTS sent by responder to exchange, just free the buffer
- */
- lpfc_in_buf_free(phba, &dmabuf->dbuf);
+ /* ABTS by responder to exchange, no cleanup needed */
+ aborted = true;
} else {
- /*
- * ABTS sent by initiator to exchange, need to do cleanup
- */
- /* Try to abort partially assembled seq */
- abts_par = lpfc_sli4_abort_partial_seq(vport, dmabuf);
-
- /* Send abort to ULP if partially seq abort failed */
- if (abts_par == false)
- lpfc_sli4_send_seq_to_ulp(vport, dmabuf);
- else
- lpfc_in_buf_free(phba, &dmabuf->dbuf);
+ /* ABTS by initiator to exchange, need to do cleanup */
+ aborted = lpfc_sli4_abort_partial_seq(vport, dmabuf);
+ if (aborted == false)
+ aborted = lpfc_sli4_abort_ulp_seq(vport, dmabuf);
}
- /* Send basic accept (BA_ACC) to the abort requester */
- lpfc_sli4_seq_abort_rsp(phba, &fc_hdr);
+ lpfc_in_buf_free(phba, &dmabuf->dbuf);
+
+ /* Respond with BA_ACC or BA_RJT accordingly */
+ lpfc_sli4_seq_abort_rsp(vport, &fc_hdr, aborted);
}
/**
@@ -15307,10 +15556,13 @@ lpfc_sli4_fcf_rr_next_index_get(struct lpfc_hba *phba)
{
uint16_t next_fcf_index;
+initial_priority:
/* Search start from next bit of currently registered FCF index */
+ next_fcf_index = phba->fcf.current_rec.fcf_indx;
+
next_priority:
- next_fcf_index = (phba->fcf.current_rec.fcf_indx + 1) %
- LPFC_SLI4_FCF_TBL_INDX_MAX;
+ /* Determine the next fcf index to check */
+ next_fcf_index = (next_fcf_index + 1) % LPFC_SLI4_FCF_TBL_INDX_MAX;
next_fcf_index = find_next_bit(phba->fcf.fcf_rr_bmask,
LPFC_SLI4_FCF_TBL_INDX_MAX,
next_fcf_index);
@@ -15337,7 +15589,7 @@ next_priority:
* at that level and continue the selection process.
*/
if (lpfc_check_next_fcf_pri_level(phba))
- goto next_priority;
+ goto initial_priority;
lpfc_printf_log(phba, KERN_WARNING, LOG_FIP,
"2844 No roundrobin failover FCF available\n");
if (next_fcf_index >= LPFC_SLI4_FCF_TBL_INDX_MAX)
diff --git a/drivers/scsi/lpfc/lpfc_sli4.h b/drivers/scsi/lpfc/lpfc_sli4.h
index 44c427a45d66..be02b59ea279 100644
--- a/drivers/scsi/lpfc/lpfc_sli4.h
+++ b/drivers/scsi/lpfc/lpfc_sli4.h
@@ -139,6 +139,10 @@ struct lpfc_queue {
struct lpfc_sli_ring *pring; /* ptr to io ring associated with q */
+ uint16_t db_format;
+#define LPFC_DB_RING_FORMAT 0x01
+#define LPFC_DB_LIST_FORMAT 0x02
+ void __iomem *db_regaddr;
/* For q stats */
uint32_t q_cnt_1;
uint32_t q_cnt_2;
@@ -508,6 +512,10 @@ struct lpfc_sli4_hba {
struct lpfc_queue *hdr_rq; /* Slow-path Header Receive queue */
struct lpfc_queue *dat_rq; /* Slow-path Data Receive queue */
+ uint8_t fw_func_mode; /* FW function protocol mode */
+ uint32_t ulp0_mode; /* ULP0 protocol mode */
+ uint32_t ulp1_mode; /* ULP1 protocol mode */
+
/* Setup information for various queue parameters */
int eq_esize;
int eq_ecount;
diff --git a/drivers/scsi/lpfc/lpfc_version.h b/drivers/scsi/lpfc/lpfc_version.h
index ba596e854bbc..f3b7795a296b 100644
--- a/drivers/scsi/lpfc/lpfc_version.h
+++ b/drivers/scsi/lpfc/lpfc_version.h
@@ -18,7 +18,7 @@
* included with this package. *
*******************************************************************/
-#define LPFC_DRIVER_VERSION "8.3.36"
+#define LPFC_DRIVER_VERSION "8.3.37"
#define LPFC_DRIVER_NAME "lpfc"
/* Used for SLI 2/3 */
diff --git a/drivers/scsi/megaraid/megaraid_sas.h b/drivers/scsi/megaraid/megaraid_sas.h
index 3b2365c8eab2..408d2548a748 100644
--- a/drivers/scsi/megaraid/megaraid_sas.h
+++ b/drivers/scsi/megaraid/megaraid_sas.h
@@ -33,9 +33,9 @@
/*
* MegaRAID SAS Driver meta data
*/
-#define MEGASAS_VERSION "06.504.01.00-rc1"
-#define MEGASAS_RELDATE "Oct. 1, 2012"
-#define MEGASAS_EXT_VERSION "Mon. Oct. 1 17:00:00 PDT 2012"
+#define MEGASAS_VERSION "06.506.00.00-rc1"
+#define MEGASAS_RELDATE "Feb. 9, 2013"
+#define MEGASAS_EXT_VERSION "Sat. Feb. 9 17:00:00 PDT 2013"
/*
* Device IDs
diff --git a/drivers/scsi/megaraid/megaraid_sas_base.c b/drivers/scsi/megaraid/megaraid_sas_base.c
index 66a0fec0437b..9d53540207ec 100644
--- a/drivers/scsi/megaraid/megaraid_sas_base.c
+++ b/drivers/scsi/megaraid/megaraid_sas_base.c
@@ -18,7 +18,7 @@
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*
* FILE: megaraid_sas_base.c
- * Version : v06.504.01.00-rc1
+ * Version : v06.506.00.00-rc1
*
* Authors: LSI Corporation
* Sreenivas Bagalkote
diff --git a/drivers/scsi/megaraid/megaraid_sas_fusion.c b/drivers/scsi/megaraid/megaraid_sas_fusion.c
index 74030aff69ad..a7d56687bfca 100644
--- a/drivers/scsi/megaraid/megaraid_sas_fusion.c
+++ b/drivers/scsi/megaraid/megaraid_sas_fusion.c
@@ -1206,7 +1206,7 @@ megasas_set_pd_lba(struct MPI2_RAID_SCSI_IO_REQUEST *io_request, u8 cdb_len,
MPI2_SCSIIO_EEDPFLAGS_INSERT_OP;
}
io_request->Control |= (0x4 << 26);
- io_request->EEDPBlockSize = MEGASAS_EEDPBLOCKSIZE;
+ io_request->EEDPBlockSize = scp->device->sector_size;
} else {
/* Some drives don't support 16/12 byte CDB's, convert to 10 */
if (((cdb_len == 12) || (cdb_len == 16)) &&
@@ -1511,7 +1511,8 @@ megasas_build_dcdb_fusion(struct megasas_instance *instance,
if (scmd->device->channel < MEGASAS_MAX_PD_CHANNELS &&
instance->pd_list[pd_index].driveState == MR_PD_STATE_SYSTEM) {
io_request->Function = 0;
- io_request->DevHandle =
+ if (fusion->fast_path_io)
+ io_request->DevHandle =
local_map_ptr->raidMap.devHndlInfo[device_id].curDevHdl;
io_request->RaidContext.timeoutValue =
local_map_ptr->raidMap.fpPdIoTimeoutSec;
diff --git a/drivers/scsi/megaraid/megaraid_sas_fusion.h b/drivers/scsi/megaraid/megaraid_sas_fusion.h
index a7c64f051996..f68a3cd11d5d 100644
--- a/drivers/scsi/megaraid/megaraid_sas_fusion.h
+++ b/drivers/scsi/megaraid/megaraid_sas_fusion.h
@@ -61,7 +61,6 @@
#define MEGASAS_SCSI_ADDL_CDB_LEN 0x18
#define MEGASAS_RD_WR_PROTECT_CHECK_ALL 0x20
#define MEGASAS_RD_WR_PROTECT_CHECK_NONE 0x60
-#define MEGASAS_EEDPBLOCKSIZE 512
/*
* Raid context flags
diff --git a/drivers/scsi/mpt2sas/mpt2sas_base.c b/drivers/scsi/mpt2sas/mpt2sas_base.c
index ffd85c511c8e..bcb23d28b3e8 100644
--- a/drivers/scsi/mpt2sas/mpt2sas_base.c
+++ b/drivers/scsi/mpt2sas/mpt2sas_base.c
@@ -155,7 +155,7 @@ _base_fault_reset_work(struct work_struct *work)
struct task_struct *p;
spin_lock_irqsave(&ioc->ioc_reset_in_progress_lock, flags);
- if (ioc->shost_recovery)
+ if (ioc->shost_recovery || ioc->pci_error_recovery)
goto rearm_timer;
spin_unlock_irqrestore(&ioc->ioc_reset_in_progress_lock, flags);
@@ -164,6 +164,20 @@ _base_fault_reset_work(struct work_struct *work)
printk(MPT2SAS_INFO_FMT "%s : SAS host is non-operational !!!!\n",
ioc->name, __func__);
+ /* It may be possible that EEH recovery can resolve some of
+ * pci bus failure issues rather removing the dead ioc function
+ * by considering controller is in a non-operational state. So
+ * here priority is given to the EEH recovery. If it doesn't
+ * not resolve this issue, mpt2sas driver will consider this
+ * controller to non-operational state and remove the dead ioc
+ * function.
+ */
+ if (ioc->non_operational_loop++ < 5) {
+ spin_lock_irqsave(&ioc->ioc_reset_in_progress_lock,
+ flags);
+ goto rearm_timer;
+ }
+
/*
* Call _scsih_flush_pending_cmds callback so that we flush all
* pending commands back to OS. This call is required to aovid
@@ -193,6 +207,8 @@ _base_fault_reset_work(struct work_struct *work)
return; /* don't rearm timer */
}
+ ioc->non_operational_loop = 0;
+
if ((doorbell & MPI2_IOC_STATE_MASK) == MPI2_IOC_STATE_FAULT) {
rc = mpt2sas_base_hard_reset_handler(ioc, CAN_SLEEP,
FORCE_BIG_HAMMER);
@@ -2007,6 +2023,14 @@ _base_display_intel_branding(struct MPT2SAS_ADAPTER *ioc)
printk(MPT2SAS_INFO_FMT "%s\n", ioc->name,
MPT2SAS_INTEL_RMS25KB040_BRANDING);
break;
+ case MPT2SAS_INTEL_RMS25LB040_SSDID:
+ printk(MPT2SAS_INFO_FMT "%s\n", ioc->name,
+ MPT2SAS_INTEL_RMS25LB040_BRANDING);
+ break;
+ case MPT2SAS_INTEL_RMS25LB080_SSDID:
+ printk(MPT2SAS_INFO_FMT "%s\n", ioc->name,
+ MPT2SAS_INTEL_RMS25LB080_BRANDING);
+ break;
default:
break;
}
@@ -4386,6 +4410,7 @@ mpt2sas_base_attach(struct MPT2SAS_ADAPTER *ioc)
if (missing_delay[0] != -1 && missing_delay[1] != -1)
_base_update_missing_delay(ioc, missing_delay[0],
missing_delay[1]);
+ ioc->non_operational_loop = 0;
return 0;
diff --git a/drivers/scsi/mpt2sas/mpt2sas_base.h b/drivers/scsi/mpt2sas/mpt2sas_base.h
index 543d8d637479..4caaac13682f 100644
--- a/drivers/scsi/mpt2sas/mpt2sas_base.h
+++ b/drivers/scsi/mpt2sas/mpt2sas_base.h
@@ -165,6 +165,10 @@
"Intel(R) Integrated RAID Module RMS25KB080"
#define MPT2SAS_INTEL_RMS25KB040_BRANDING \
"Intel(R) Integrated RAID Module RMS25KB040"
+#define MPT2SAS_INTEL_RMS25LB040_BRANDING \
+ "Intel(R) Integrated RAID Module RMS25LB040"
+#define MPT2SAS_INTEL_RMS25LB080_BRANDING \
+ "Intel(R) Integrated RAID Module RMS25LB080"
#define MPT2SAS_INTEL_RMS2LL080_BRANDING \
"Intel Integrated RAID Module RMS2LL080"
#define MPT2SAS_INTEL_RMS2LL040_BRANDING \
@@ -180,6 +184,8 @@
#define MPT2SAS_INTEL_RMS25JB040_SSDID 0x3517
#define MPT2SAS_INTEL_RMS25KB080_SSDID 0x3518
#define MPT2SAS_INTEL_RMS25KB040_SSDID 0x3519
+#define MPT2SAS_INTEL_RMS25LB040_SSDID 0x351A
+#define MPT2SAS_INTEL_RMS25LB080_SSDID 0x351B
#define MPT2SAS_INTEL_RMS2LL080_SSDID 0x350E
#define MPT2SAS_INTEL_RMS2LL040_SSDID 0x350F
#define MPT2SAS_INTEL_RS25GB008_SSDID 0x3000
@@ -835,6 +841,7 @@ struct MPT2SAS_ADAPTER {
u16 cpu_msix_table_sz;
u32 ioc_reset_count;
MPT2SAS_FLUSH_RUNNING_CMDS schedule_dead_ioc_flush_running_cmds;
+ u32 non_operational_loop;
/* internal commands, callback index */
u8 scsi_io_cb_idx;
diff --git a/drivers/scsi/mpt3sas/mpt3sas_base.c b/drivers/scsi/mpt3sas/mpt3sas_base.c
index 04f8010f0770..18360032a520 100644
--- a/drivers/scsi/mpt3sas/mpt3sas_base.c
+++ b/drivers/scsi/mpt3sas/mpt3sas_base.c
@@ -42,7 +42,6 @@
* USA.
*/
-#include <linux/version.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/errno.h>
@@ -1310,7 +1309,6 @@ _base_build_sg_scmd_ieee(struct MPT3SAS_ADAPTER *ioc,
void *sg_local, *chain;
u32 chain_offset;
u32 chain_length;
- u32 chain_flags;
int sges_left;
u32 sges_in_segment;
u8 simple_sgl_flags;
@@ -1356,8 +1354,7 @@ _base_build_sg_scmd_ieee(struct MPT3SAS_ADAPTER *ioc,
sges_in_segment--;
}
- /* initializing the chain flags and pointers */
- chain_flags = MPI2_SGE_FLAGS_CHAIN_ELEMENT << MPI2_SGE_FLAGS_SHIFT;
+ /* initializing the pointers */
chain_req = _base_get_chain_buffer_tracker(ioc, smid);
if (!chain_req)
return -1;
diff --git a/drivers/scsi/mpt3sas/mpt3sas_config.c b/drivers/scsi/mpt3sas/mpt3sas_config.c
index ce7e59b2fc08..1df9ed4f371d 100644
--- a/drivers/scsi/mpt3sas/mpt3sas_config.c
+++ b/drivers/scsi/mpt3sas/mpt3sas_config.c
@@ -41,7 +41,6 @@
* USA.
*/
-#include <linux/version.h>
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/init.h>
diff --git a/drivers/scsi/mpt3sas/mpt3sas_ctl.c b/drivers/scsi/mpt3sas/mpt3sas_ctl.c
index 8af944d7d13d..054d5231c974 100644
--- a/drivers/scsi/mpt3sas/mpt3sas_ctl.c
+++ b/drivers/scsi/mpt3sas/mpt3sas_ctl.c
@@ -42,7 +42,6 @@
* USA.
*/
-#include <linux/version.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/errno.h>
@@ -3136,7 +3135,7 @@ _ctl_diag_trigger_mpi_store(struct device *cdev,
spin_lock_irqsave(&ioc->diag_trigger_lock, flags);
sz = min(sizeof(struct SL_WH_MPI_TRIGGERS_T), count);
memset(&ioc->diag_trigger_mpi, 0,
- sizeof(struct SL_WH_EVENT_TRIGGERS_T));
+ sizeof(ioc->diag_trigger_mpi));
memcpy(&ioc->diag_trigger_mpi, buf, sz);
if (ioc->diag_trigger_mpi.ValidEntries > NUM_VALID_ENTRIES)
ioc->diag_trigger_mpi.ValidEntries = NUM_VALID_ENTRIES;
diff --git a/drivers/scsi/mpt3sas/mpt3sas_scsih.c b/drivers/scsi/mpt3sas/mpt3sas_scsih.c
index 6421a06c4ce2..dcbf7c880cb2 100644
--- a/drivers/scsi/mpt3sas/mpt3sas_scsih.c
+++ b/drivers/scsi/mpt3sas/mpt3sas_scsih.c
@@ -41,7 +41,6 @@
* USA.
*/
-#include <linux/version.h>
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/init.h>
@@ -2755,13 +2754,11 @@ _scsih_block_io_to_children_attached_directly(struct MPT3SAS_ADAPTER *ioc,
int i;
u16 handle;
u16 reason_code;
- u8 phy_number;
for (i = 0; i < event_data->NumEntries; i++) {
handle = le16_to_cpu(event_data->PHY[i].AttachedDevHandle);
if (!handle)
continue;
- phy_number = event_data->StartPhyNum + i;
reason_code = event_data->PHY[i].PhyStatus &
MPI2_EVENT_SAS_TOPO_RC_MASK;
if (reason_code == MPI2_EVENT_SAS_TOPO_RC_DELAY_NOT_RESPONDING)
diff --git a/drivers/scsi/mpt3sas/mpt3sas_trigger_diag.c b/drivers/scsi/mpt3sas/mpt3sas_trigger_diag.c
index da6c5f25749c..6f8d6213040b 100644
--- a/drivers/scsi/mpt3sas/mpt3sas_trigger_diag.c
+++ b/drivers/scsi/mpt3sas/mpt3sas_trigger_diag.c
@@ -42,7 +42,6 @@
* USA.
*/
-#include <linux/version.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/errno.h>
diff --git a/drivers/scsi/mvsas/mv_sas.c b/drivers/scsi/mvsas/mv_sas.c
index 078c63913b55..532110f4562a 100644
--- a/drivers/scsi/mvsas/mv_sas.c
+++ b/drivers/scsi/mvsas/mv_sas.c
@@ -316,10 +316,13 @@ static int mvs_task_prep_smp(struct mvs_info *mvi,
struct mvs_task_exec_info *tei)
{
int elem, rc, i;
+ struct sas_ha_struct *sha = mvi->sas;
struct sas_task *task = tei->task;
struct mvs_cmd_hdr *hdr = tei->hdr;
struct domain_device *dev = task->dev;
struct asd_sas_port *sas_port = dev->port;
+ struct sas_phy *sphy = dev->phy;
+ struct asd_sas_phy *sas_phy = sha->sas_phy[sphy->number];
struct scatterlist *sg_req, *sg_resp;
u32 req_len, resp_len, tag = tei->tag;
void *buf_tmp;
@@ -392,7 +395,7 @@ static int mvs_task_prep_smp(struct mvs_info *mvi,
slot->tx = mvi->tx_prod;
mvi->tx[mvi->tx_prod] = cpu_to_le32((TXQ_CMD_SMP << TXQ_CMD_SHIFT) |
TXQ_MODE_I | tag |
- (sas_port->phy_mask << TXQ_PHY_SHIFT));
+ (MVS_PHY_ID << TXQ_PHY_SHIFT));
hdr->flags |= flags;
hdr->lens = cpu_to_le32(((resp_len / 4) << 16) | ((req_len - 4) / 4));
@@ -438,11 +441,14 @@ static u32 mvs_get_ncq_tag(struct sas_task *task, u32 *tag)
static int mvs_task_prep_ata(struct mvs_info *mvi,
struct mvs_task_exec_info *tei)
{
+ struct sas_ha_struct *sha = mvi->sas;
struct sas_task *task = tei->task;
struct domain_device *dev = task->dev;
struct mvs_device *mvi_dev = dev->lldd_dev;
struct mvs_cmd_hdr *hdr = tei->hdr;
struct asd_sas_port *sas_port = dev->port;
+ struct sas_phy *sphy = dev->phy;
+ struct asd_sas_phy *sas_phy = sha->sas_phy[sphy->number];
struct mvs_slot_info *slot;
void *buf_prd;
u32 tag = tei->tag, hdr_tag;
@@ -462,7 +468,7 @@ static int mvs_task_prep_ata(struct mvs_info *mvi,
slot->tx = mvi->tx_prod;
del_q = TXQ_MODE_I | tag |
(TXQ_CMD_STP << TXQ_CMD_SHIFT) |
- (sas_port->phy_mask << TXQ_PHY_SHIFT) |
+ (MVS_PHY_ID << TXQ_PHY_SHIFT) |
(mvi_dev->taskfileset << TXQ_SRS_SHIFT);
mvi->tx[mvi->tx_prod] = cpu_to_le32(del_q);
diff --git a/drivers/scsi/mvsas/mv_sas.h b/drivers/scsi/mvsas/mv_sas.h
index 2ae77a0394b2..9f3cc13a5ce7 100644
--- a/drivers/scsi/mvsas/mv_sas.h
+++ b/drivers/scsi/mvsas/mv_sas.h
@@ -76,6 +76,7 @@ extern struct kmem_cache *mvs_task_list_cache;
(__mc) != 0 ; \
(++__lseq), (__mc) >>= 1)
+#define MVS_PHY_ID (1U << sas_phy->id)
#define MV_INIT_DELAYED_WORK(w, f, d) INIT_DELAYED_WORK(w, f)
#define UNASSOC_D2H_FIS(id) \
((void *) mvi->rx_fis + 0x100 * id)
diff --git a/drivers/scsi/osd/osd_initiator.c b/drivers/scsi/osd/osd_initiator.c
index c06b8e5aa2cf..d8293f25ca33 100644
--- a/drivers/scsi/osd/osd_initiator.c
+++ b/drivers/scsi/osd/osd_initiator.c
@@ -144,6 +144,10 @@ static int _osd_get_print_system_info(struct osd_dev *od,
odi->osdname_len = get_attrs[a].len;
/* Avoid NULL for memcmp optimization 0-length is good enough */
odi->osdname = kzalloc(odi->osdname_len + 1, GFP_KERNEL);
+ if (!odi->osdname) {
+ ret = -ENOMEM;
+ goto out;
+ }
if (odi->osdname_len)
memcpy(odi->osdname, get_attrs[a].val_ptr, odi->osdname_len);
OSD_INFO("OSD_NAME [%s]\n", odi->osdname);
diff --git a/drivers/scsi/pm8001/pm8001_init.c b/drivers/scsi/pm8001/pm8001_init.c
index 4c9fe733fe88..3d5e522e00fc 100644
--- a/drivers/scsi/pm8001/pm8001_init.c
+++ b/drivers/scsi/pm8001/pm8001_init.c
@@ -140,7 +140,8 @@ static void pm8001_free(struct pm8001_hba_info *pm8001_ha)
for (i = 0; i < USI_MAX_MEMCNT; i++) {
if (pm8001_ha->memoryMap.region[i].virt_ptr != NULL) {
pci_free_consistent(pm8001_ha->pdev,
- pm8001_ha->memoryMap.region[i].element_size,
+ (pm8001_ha->memoryMap.region[i].total_len +
+ pm8001_ha->memoryMap.region[i].alignment),
pm8001_ha->memoryMap.region[i].virt_ptr,
pm8001_ha->memoryMap.region[i].phys_addr);
}
diff --git a/drivers/scsi/qla2xxx/qla_attr.c b/drivers/scsi/qla2xxx/qla_attr.c
index 83d798428c10..1d82eef4e1eb 100644
--- a/drivers/scsi/qla2xxx/qla_attr.c
+++ b/drivers/scsi/qla2xxx/qla_attr.c
@@ -1,6 +1,6 @@
/*
* QLogic Fibre Channel HBA Driver
- * Copyright (c) 2003-2012 QLogic Corporation
+ * Copyright (c) 2003-2013 QLogic Corporation
*
* See LICENSE.qla2xxx for copyright and licensing details.
*/
@@ -1272,22 +1272,29 @@ qla2x00_thermal_temp_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
- int rval = QLA_FUNCTION_FAILED;
- uint16_t temp, frac;
+ uint16_t temp = 0;
- if (!vha->hw->flags.thermal_supported)
- return snprintf(buf, PAGE_SIZE, "\n");
+ if (!vha->hw->thermal_support) {
+ ql_log(ql_log_warn, vha, 0x70db,
+ "Thermal not supported by this card.\n");
+ goto done;
+ }
- temp = frac = 0;
- if (qla2x00_reset_active(vha))
- ql_log(ql_log_warn, vha, 0x707b,
- "ISP reset active.\n");
- else if (!vha->hw->flags.eeh_busy)
- rval = qla2x00_get_thermal_temp(vha, &temp, &frac);
- if (rval != QLA_SUCCESS)
- return snprintf(buf, PAGE_SIZE, "\n");
+ if (qla2x00_reset_active(vha)) {
+ ql_log(ql_log_warn, vha, 0x70dc, "ISP reset active.\n");
+ goto done;
+ }
+
+ if (vha->hw->flags.eeh_busy) {
+ ql_log(ql_log_warn, vha, 0x70dd, "PCI EEH busy.\n");
+ goto done;
+ }
+
+ if (qla2x00_get_thermal_temp(vha, &temp) == QLA_SUCCESS)
+ return snprintf(buf, PAGE_SIZE, "%d\n", temp);
- return snprintf(buf, PAGE_SIZE, "%d.%02d\n", temp, frac);
+done:
+ return snprintf(buf, PAGE_SIZE, "\n");
}
static ssize_t
diff --git a/drivers/scsi/qla2xxx/qla_bsg.c b/drivers/scsi/qla2xxx/qla_bsg.c
index 9f34dedcdad7..ad54099cb805 100644
--- a/drivers/scsi/qla2xxx/qla_bsg.c
+++ b/drivers/scsi/qla2xxx/qla_bsg.c
@@ -27,7 +27,7 @@ void
qla2x00_bsg_sp_free(void *data, void *ptr)
{
srb_t *sp = (srb_t *)ptr;
- struct scsi_qla_host *vha = (scsi_qla_host_t *)data;
+ struct scsi_qla_host *vha = sp->fcport->vha;
struct fc_bsg_job *bsg_job = sp->u.bsg_job;
struct qla_hw_data *ha = vha->hw;
@@ -40,7 +40,7 @@ qla2x00_bsg_sp_free(void *data, void *ptr)
if (sp->type == SRB_CT_CMD ||
sp->type == SRB_ELS_CMD_HST)
kfree(sp->fcport);
- mempool_free(sp, vha->hw->srb_mempool);
+ qla2x00_rel_sp(vha, sp);
}
int
@@ -368,7 +368,7 @@ qla2x00_process_els(struct fc_bsg_job *bsg_job)
if (rval != QLA_SUCCESS) {
ql_log(ql_log_warn, vha, 0x700e,
"qla2x00_start_sp failed = %d\n", rval);
- mempool_free(sp, ha->srb_mempool);
+ qla2x00_rel_sp(vha, sp);
rval = -EIO;
goto done_unmap_sg;
}
@@ -515,7 +515,7 @@ qla2x00_process_ct(struct fc_bsg_job *bsg_job)
if (rval != QLA_SUCCESS) {
ql_log(ql_log_warn, vha, 0x7017,
"qla2x00_start_sp failed=%d.\n", rval);
- mempool_free(sp, ha->srb_mempool);
+ qla2x00_rel_sp(vha, sp);
rval = -EIO;
goto done_free_fcport;
}
@@ -531,6 +531,75 @@ done_unmap_sg:
done:
return rval;
}
+
+/* Disable loopback mode */
+static inline int
+qla81xx_reset_loopback_mode(scsi_qla_host_t *vha, uint16_t *config,
+ int wait, int wait2)
+{
+ int ret = 0;
+ int rval = 0;
+ uint16_t new_config[4];
+ struct qla_hw_data *ha = vha->hw;
+
+ if (!IS_QLA81XX(ha) && !IS_QLA8031(ha))
+ goto done_reset_internal;
+
+ memset(new_config, 0 , sizeof(new_config));
+ if ((config[0] & INTERNAL_LOOPBACK_MASK) >> 1 ==
+ ENABLE_INTERNAL_LOOPBACK ||
+ (config[0] & INTERNAL_LOOPBACK_MASK) >> 1 ==
+ ENABLE_EXTERNAL_LOOPBACK) {
+ new_config[0] = config[0] & ~INTERNAL_LOOPBACK_MASK;
+ ql_dbg(ql_dbg_user, vha, 0x70bf, "new_config[0]=%02x\n",
+ (new_config[0] & INTERNAL_LOOPBACK_MASK));
+ memcpy(&new_config[1], &config[1], sizeof(uint16_t) * 3) ;
+
+ ha->notify_dcbx_comp = wait;
+ ha->notify_lb_portup_comp = wait2;
+
+ ret = qla81xx_set_port_config(vha, new_config);
+ if (ret != QLA_SUCCESS) {
+ ql_log(ql_log_warn, vha, 0x7025,
+ "Set port config failed.\n");
+ ha->notify_dcbx_comp = 0;
+ ha->notify_lb_portup_comp = 0;
+ rval = -EINVAL;
+ goto done_reset_internal;
+ }
+
+ /* Wait for DCBX complete event */
+ if (wait && !wait_for_completion_timeout(&ha->dcbx_comp,
+ (DCBX_COMP_TIMEOUT * HZ))) {
+ ql_dbg(ql_dbg_user, vha, 0x7026,
+ "DCBX completion not received.\n");
+ ha->notify_dcbx_comp = 0;
+ ha->notify_lb_portup_comp = 0;
+ rval = -EINVAL;
+ goto done_reset_internal;
+ } else
+ ql_dbg(ql_dbg_user, vha, 0x7027,
+ "DCBX completion received.\n");
+
+ if (wait2 &&
+ !wait_for_completion_timeout(&ha->lb_portup_comp,
+ (LB_PORTUP_COMP_TIMEOUT * HZ))) {
+ ql_dbg(ql_dbg_user, vha, 0x70c5,
+ "Port up completion not received.\n");
+ ha->notify_lb_portup_comp = 0;
+ rval = -EINVAL;
+ goto done_reset_internal;
+ } else
+ ql_dbg(ql_dbg_user, vha, 0x70c6,
+ "Port up completion received.\n");
+
+ ha->notify_dcbx_comp = 0;
+ ha->notify_lb_portup_comp = 0;
+ }
+done_reset_internal:
+ return rval;
+}
+
/*
* Set the port configuration to enable the internal or external loopback
* depending on the loopback mode.
@@ -566,9 +635,19 @@ qla81xx_set_loopback_mode(scsi_qla_host_t *vha, uint16_t *config,
}
/* Wait for DCBX complete event */
- if (!wait_for_completion_timeout(&ha->dcbx_comp, (20 * HZ))) {
+ if (!wait_for_completion_timeout(&ha->dcbx_comp,
+ (DCBX_COMP_TIMEOUT * HZ))) {
ql_dbg(ql_dbg_user, vha, 0x7022,
- "State change notification not received.\n");
+ "DCBX completion not received.\n");
+ ret = qla81xx_reset_loopback_mode(vha, new_config, 0, 0);
+ /*
+ * If the reset of the loopback mode doesn't work take a FCoE
+ * dump and reset the chip.
+ */
+ if (ret) {
+ ha->isp_ops->fw_dump(vha, 0);
+ set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
+ }
rval = -EINVAL;
} else {
if (ha->flags.idc_compl_status) {
@@ -578,7 +657,7 @@ qla81xx_set_loopback_mode(scsi_qla_host_t *vha, uint16_t *config,
ha->flags.idc_compl_status = 0;
} else
ql_dbg(ql_dbg_user, vha, 0x7023,
- "State change received.\n");
+ "DCBX completion received.\n");
}
ha->notify_dcbx_comp = 0;
@@ -587,57 +666,6 @@ done_set_internal:
return rval;
}
-/* Disable loopback mode */
-static inline int
-qla81xx_reset_loopback_mode(scsi_qla_host_t *vha, uint16_t *config,
- int wait)
-{
- int ret = 0;
- int rval = 0;
- uint16_t new_config[4];
- struct qla_hw_data *ha = vha->hw;
-
- if (!IS_QLA81XX(ha) && !IS_QLA8031(ha))
- goto done_reset_internal;
-
- memset(new_config, 0 , sizeof(new_config));
- if ((config[0] & INTERNAL_LOOPBACK_MASK) >> 1 ==
- ENABLE_INTERNAL_LOOPBACK ||
- (config[0] & INTERNAL_LOOPBACK_MASK) >> 1 ==
- ENABLE_EXTERNAL_LOOPBACK) {
- new_config[0] = config[0] & ~INTERNAL_LOOPBACK_MASK;
- ql_dbg(ql_dbg_user, vha, 0x70bf, "new_config[0]=%02x\n",
- (new_config[0] & INTERNAL_LOOPBACK_MASK));
- memcpy(&new_config[1], &config[1], sizeof(uint16_t) * 3) ;
-
- ha->notify_dcbx_comp = wait;
- ret = qla81xx_set_port_config(vha, new_config);
- if (ret != QLA_SUCCESS) {
- ql_log(ql_log_warn, vha, 0x7025,
- "Set port config failed.\n");
- ha->notify_dcbx_comp = 0;
- rval = -EINVAL;
- goto done_reset_internal;
- }
-
- /* Wait for DCBX complete event */
- if (wait && !wait_for_completion_timeout(&ha->dcbx_comp,
- (20 * HZ))) {
- ql_dbg(ql_dbg_user, vha, 0x7026,
- "State change notification not received.\n");
- ha->notify_dcbx_comp = 0;
- rval = -EINVAL;
- goto done_reset_internal;
- } else
- ql_dbg(ql_dbg_user, vha, 0x7027,
- "State change received.\n");
-
- ha->notify_dcbx_comp = 0;
- }
-done_reset_internal:
- return rval;
-}
-
static int
qla2x00_process_loopback(struct fc_bsg_job *bsg_job)
{
@@ -739,6 +767,7 @@ qla2x00_process_loopback(struct fc_bsg_job *bsg_job)
if (IS_QLA81XX(ha) || IS_QLA8031(ha)) {
memset(config, 0, sizeof(config));
memset(new_config, 0, sizeof(new_config));
+
if (qla81xx_get_port_config(vha, config)) {
ql_log(ql_log_warn, vha, 0x701f,
"Get port config failed.\n");
@@ -746,6 +775,14 @@ qla2x00_process_loopback(struct fc_bsg_job *bsg_job)
goto done_free_dma_rsp;
}
+ if ((config[0] & INTERNAL_LOOPBACK_MASK) != 0) {
+ ql_dbg(ql_dbg_user, vha, 0x70c4,
+ "Loopback operation already in "
+ "progress.\n");
+ rval = -EAGAIN;
+ goto done_free_dma_rsp;
+ }
+
ql_dbg(ql_dbg_user, vha, 0x70c0,
"elreq.options=%04x\n", elreq.options);
@@ -755,7 +792,7 @@ qla2x00_process_loopback(struct fc_bsg_job *bsg_job)
config, new_config, elreq.options);
else
rval = qla81xx_reset_loopback_mode(vha,
- config, 1);
+ config, 1, 0);
else
rval = qla81xx_set_loopback_mode(vha, config,
new_config, elreq.options);
@@ -772,14 +809,6 @@ qla2x00_process_loopback(struct fc_bsg_job *bsg_job)
command_sent = INT_DEF_LB_LOOPBACK_CMD;
rval = qla2x00_loopback_test(vha, &elreq, response);
- if (new_config[0]) {
- /* Revert back to original port config
- * Also clear internal loopback
- */
- qla81xx_reset_loopback_mode(vha,
- new_config, 0);
- }
-
if (response[0] == MBS_COMMAND_ERROR &&
response[1] == MBS_LB_RESET) {
ql_log(ql_log_warn, vha, 0x7029,
@@ -788,15 +817,39 @@ qla2x00_process_loopback(struct fc_bsg_job *bsg_job)
qla2xxx_wake_dpc(vha);
qla2x00_wait_for_chip_reset(vha);
/* Also reset the MPI */
- if (qla81xx_restart_mpi_firmware(vha) !=
- QLA_SUCCESS) {
- ql_log(ql_log_warn, vha, 0x702a,
- "MPI reset failed.\n");
+ if (IS_QLA81XX(ha)) {
+ if (qla81xx_restart_mpi_firmware(vha) !=
+ QLA_SUCCESS) {
+ ql_log(ql_log_warn, vha, 0x702a,
+ "MPI reset failed.\n");
+ }
}
rval = -EIO;
goto done_free_dma_rsp;
}
+
+ if (new_config[0]) {
+ int ret;
+
+ /* Revert back to original port config
+ * Also clear internal loopback
+ */
+ ret = qla81xx_reset_loopback_mode(vha,
+ new_config, 0, 1);
+ if (ret) {
+ /*
+ * If the reset of the loopback mode
+ * doesn't work take FCoE dump and then
+ * reset the chip.
+ */
+ ha->isp_ops->fw_dump(vha, 0);
+ set_bit(ISP_ABORT_NEEDED,
+ &vha->dpc_flags);
+ }
+
+ }
+
} else {
type = "FC_BSG_HST_VENDOR_LOOPBACK";
ql_dbg(ql_dbg_user, vha, 0x702b,
@@ -1950,7 +2003,7 @@ qla24xx_bsg_timeout(struct fc_bsg_job *bsg_job)
if (!req)
continue;
- for (cnt = 1; cnt < MAX_OUTSTANDING_COMMANDS; cnt++) {
+ for (cnt = 1; cnt < req->num_outstanding_cmds; cnt++) {
sp = req->outstanding_cmds[cnt];
if (sp) {
if (((sp->type == SRB_CT_CMD) ||
@@ -1985,6 +2038,6 @@ done:
spin_unlock_irqrestore(&ha->hardware_lock, flags);
if (bsg_job->request->msgcode == FC_BSG_HST_CT)
kfree(sp->fcport);
- mempool_free(sp, ha->srb_mempool);
+ qla2x00_rel_sp(vha, sp);
return 0;
}
diff --git a/drivers/scsi/qla2xxx/qla_bsg.h b/drivers/scsi/qla2xxx/qla_bsg.h
index 37b8b7ba7421..e9f6b9bbf29a 100644
--- a/drivers/scsi/qla2xxx/qla_bsg.h
+++ b/drivers/scsi/qla2xxx/qla_bsg.h
@@ -1,6 +1,6 @@
/*
* QLogic Fibre Channel HBA Driver
- * Copyright (c) 2003-2012 QLogic Corporation
+ * Copyright (c) 2003-2013 QLogic Corporation
*
* See LICENSE.qla2xxx for copyright and licensing details.
*/
diff --git a/drivers/scsi/qla2xxx/qla_dbg.c b/drivers/scsi/qla2xxx/qla_dbg.c
index 53f9e492f9dc..1626de52e32a 100644
--- a/drivers/scsi/qla2xxx/qla_dbg.c
+++ b/drivers/scsi/qla2xxx/qla_dbg.c
@@ -1,6 +1,6 @@
/*
* QLogic Fibre Channel HBA Driver
- * Copyright (c) 2003-2012 QLogic Corporation
+ * Copyright (c) 2003-2013 QLogic Corporation
*
* See LICENSE.qla2xxx for copyright and licensing details.
*/
@@ -11,20 +11,21 @@
* ----------------------------------------------------------------------
* | Level | Last Value Used | Holes |
* ----------------------------------------------------------------------
- * | Module Init and Probe | 0x0125 | 0x4b,0xba,0xfa |
- * | Mailbox commands | 0x114f | 0x111a-0x111b |
+ * | Module Init and Probe | 0x0126 | 0x4b,0xba,0xfa |
+ * | Mailbox commands | 0x115b | 0x111a-0x111b |
* | | | 0x112c-0x112e |
* | | | 0x113a |
* | Device Discovery | 0x2087 | 0x2020-0x2022, |
* | | | 0x2016 |
- * | Queue Command and IO tracing | 0x3030 | 0x3006-0x300b |
+ * | Queue Command and IO tracing | 0x3031 | 0x3006-0x300b |
* | | | 0x3027-0x3028 |
* | | | 0x302d-0x302e |
* | DPC Thread | 0x401d | 0x4002,0x4013 |
* | Async Events | 0x5071 | 0x502b-0x502f |
* | | | 0x5047,0x5052 |
* | Timer Routines | 0x6011 | |
- * | User Space Interactions | 0x70c3 | 0x7018,0x702e, |
+ * | User Space Interactions | 0x70c4 | 0x7018,0x702e, |
+ * | | | 0x7020,0x7024, |
* | | | 0x7039,0x7045, |
* | | | 0x7073-0x7075, |
* | | | 0x708c, |
@@ -35,11 +36,11 @@
* | | | 0x800b,0x8039 |
* | AER/EEH | 0x9011 | |
* | Virtual Port | 0xa007 | |
- * | ISP82XX Specific | 0xb084 | 0xb002,0xb024 |
+ * | ISP82XX Specific | 0xb086 | 0xb002,0xb024 |
* | MultiQ | 0xc00c | |
* | Misc | 0xd010 | |
- * | Target Mode | 0xe06f | |
- * | Target Mode Management | 0xf071 | |
+ * | Target Mode | 0xe070 | |
+ * | Target Mode Management | 0xf072 | |
* | Target Mode Task Management | 0x1000b | |
* ----------------------------------------------------------------------
*/
diff --git a/drivers/scsi/qla2xxx/qla_dbg.h b/drivers/scsi/qla2xxx/qla_dbg.h
index 8f911c0b1e74..35e20b4f8b6c 100644
--- a/drivers/scsi/qla2xxx/qla_dbg.h
+++ b/drivers/scsi/qla2xxx/qla_dbg.h
@@ -1,6 +1,6 @@
/*
* QLogic Fibre Channel HBA Driver
- * Copyright (c) 2003-2012 QLogic Corporation
+ * Copyright (c) 2003-2013 QLogic Corporation
*
* See LICENSE.qla2xxx for copyright and licensing details.
*/
diff --git a/drivers/scsi/qla2xxx/qla_def.h b/drivers/scsi/qla2xxx/qla_def.h
index 6e7727f46d43..c6509911772b 100644
--- a/drivers/scsi/qla2xxx/qla_def.h
+++ b/drivers/scsi/qla2xxx/qla_def.h
@@ -1,6 +1,6 @@
/*
* QLogic Fibre Channel HBA Driver
- * Copyright (c) 2003-2012 QLogic Corporation
+ * Copyright (c) 2003-2013 QLogic Corporation
*
* See LICENSE.qla2xxx for copyright and licensing details.
*/
@@ -37,6 +37,7 @@
#include "qla_nx.h"
#define QLA2XXX_DRIVER_NAME "qla2xxx"
#define QLA2XXX_APIDEV "ql2xapidev"
+#define QLA2XXX_MANUFACTURER "QLogic Corporation"
/*
* We have MAILBOX_REGISTER_COUNT sized arrays in a few places,
@@ -253,8 +254,8 @@
#define LOOP_DOWN_TIME 255 /* 240 */
#define LOOP_DOWN_RESET (LOOP_DOWN_TIME - 30)
-/* Maximum outstanding commands in ISP queues (1-65535) */
-#define MAX_OUTSTANDING_COMMANDS 1024
+#define DEFAULT_OUTSTANDING_COMMANDS 1024
+#define MIN_OUTSTANDING_COMMANDS 128
/* ISP request and response entry counts (37-65535) */
#define REQUEST_ENTRY_CNT_2100 128 /* Number of request entries. */
@@ -537,6 +538,8 @@ struct device_reg_25xxmq {
uint32_t req_q_out;
uint32_t rsp_q_in;
uint32_t rsp_q_out;
+ uint32_t atio_q_in;
+ uint32_t atio_q_out;
};
typedef union {
@@ -563,6 +566,9 @@ typedef union {
&(reg)->u.isp2100.mailbox5 : \
&(reg)->u.isp2300.rsp_q_out)
+#define ISP_ATIO_Q_IN(vha) (vha->hw->tgt.atio_q_in)
+#define ISP_ATIO_Q_OUT(vha) (vha->hw->tgt.atio_q_out)
+
#define MAILBOX_REG(ha, reg, num) \
(IS_QLA2100(ha) || IS_QLA2200(ha) ? \
(num < 8 ? \
@@ -762,8 +768,8 @@ typedef struct {
#define MBC_PORT_LOGOUT 0x56 /* Port Logout request */
#define MBC_SEND_RNID_ELS 0x57 /* Send RNID ELS request */
#define MBC_SET_RNID_PARAMS 0x59 /* Set RNID parameters */
-#define MBC_GET_RNID_PARAMS 0x5a /* Data Rate */
-#define MBC_DATA_RATE 0x5d /* Get RNID parameters */
+#define MBC_GET_RNID_PARAMS 0x5a /* Get RNID parameters */
+#define MBC_DATA_RATE 0x5d /* Data Rate */
#define MBC_INITIALIZE_FIRMWARE 0x60 /* Initialize firmware */
#define MBC_INITIATE_LIP 0x62 /* Initiate Loop */
/* Initialization Procedure */
@@ -809,6 +815,7 @@ typedef struct {
#define MBC_HOST_MEMORY_COPY 0x53 /* Host Memory Copy. */
#define MBC_SEND_RNFT_ELS 0x5e /* Send RNFT ELS request */
#define MBC_GET_LINK_PRIV_STATS 0x6d /* Get link & private data. */
+#define MBC_LINK_INITIALIZATION 0x72 /* Do link initialization. */
#define MBC_SET_VENDOR_ID 0x76 /* Set Vendor ID. */
#define MBC_PORT_RESET 0x120 /* Port Reset */
#define MBC_SET_PORT_CONFIG 0x122 /* Set port configuration */
@@ -856,6 +863,9 @@ typedef struct {
#define MBX_1 BIT_1
#define MBX_0 BIT_0
+#define RNID_TYPE_SET_VERSION 0x9
+#define RNID_TYPE_ASIC_TEMP 0xC
+
/*
* Firmware state codes from get firmware state mailbox command
*/
@@ -1841,9 +1851,6 @@ typedef struct fc_port {
uint8_t scan_state;
} fc_port_t;
-#define QLA_FCPORT_SCAN_NONE 0
-#define QLA_FCPORT_SCAN_FOUND 1
-
/*
* Fibre channel port/lun states.
*/
@@ -2533,8 +2540,10 @@ struct req_que {
uint16_t qos;
uint16_t vp_idx;
struct rsp_que *rsp;
- srb_t *outstanding_cmds[MAX_OUTSTANDING_COMMANDS];
+ srb_t **outstanding_cmds;
uint32_t current_outstanding_cmd;
+ uint16_t num_outstanding_cmds;
+#define MAX_Q_DEPTH 32
int max_q_depth;
};
@@ -2557,11 +2566,13 @@ struct qlt_hw_data {
struct atio *atio_ring_ptr; /* Current address. */
uint16_t atio_ring_index; /* Current index. */
uint16_t atio_q_length;
+ uint32_t __iomem *atio_q_in;
+ uint32_t __iomem *atio_q_out;
void *target_lport_ptr;
struct qla_tgt_func_tmpl *tgt_ops;
struct qla_tgt *qla_tgt;
- struct qla_tgt_cmd *cmds[MAX_OUTSTANDING_COMMANDS];
+ struct qla_tgt_cmd *cmds[DEFAULT_OUTSTANDING_COMMANDS];
uint16_t current_handle;
struct qla_tgt_vp_map *tgt_vp_map;
@@ -2618,7 +2629,6 @@ struct qla_hw_data {
uint32_t nic_core_hung:1;
uint32_t quiesce_owner:1;
- uint32_t thermal_supported:1;
uint32_t nic_core_reset_hdlr_active:1;
uint32_t nic_core_reset_owner:1;
uint32_t isp82xx_no_md_cap:1;
@@ -2788,6 +2798,8 @@ struct qla_hw_data {
#define IS_PI_SPLIT_DET_CAPABLE_HBA(ha) (IS_QLA83XX(ha))
#define IS_PI_SPLIT_DET_CAPABLE(ha) (IS_PI_SPLIT_DET_CAPABLE_HBA(ha) && \
(((ha)->fw_attributes_h << 16 | (ha)->fw_attributes) & BIT_22))
+#define IS_ATIO_MSIX_CAPABLE(ha) (IS_QLA83XX(ha))
+#define IS_TGT_MODE_CAPABLE(ha) (ha->tgt.atio_q_length)
/* HBA serial number */
uint8_t serial0;
@@ -2870,7 +2882,13 @@ struct qla_hw_data {
struct completion mbx_cmd_comp; /* Serialize mbx access */
struct completion mbx_intr_comp; /* Used for completion notification */
struct completion dcbx_comp; /* For set port config notification */
+ struct completion lb_portup_comp; /* Used to wait for link up during
+ * loopback */
+#define DCBX_COMP_TIMEOUT 20
+#define LB_PORTUP_COMP_TIMEOUT 10
+
int notify_dcbx_comp;
+ int notify_lb_portup_comp;
struct mutex selflogin_lock;
/* Basic firmware related information. */
@@ -2887,6 +2905,7 @@ struct qla_hw_data {
#define RISC_START_ADDRESS_2300 0x800
#define RISC_START_ADDRESS_2400 0x100000
uint16_t fw_xcb_count;
+ uint16_t fw_iocb_count;
uint16_t fw_options[16]; /* slots: 1,2,3,10,11 */
uint8_t fw_seriallink_options[4];
@@ -3056,7 +3075,16 @@ struct qla_hw_data {
struct work_struct idc_state_handler;
struct work_struct nic_core_unrecoverable;
+#define HOST_QUEUE_RAMPDOWN_INTERVAL (60 * HZ)
+#define HOST_QUEUE_RAMPUP_INTERVAL (30 * HZ)
+ unsigned long host_last_rampdown_time;
+ unsigned long host_last_rampup_time;
+ int cfg_lun_q_depth;
+
struct qlt_hw_data tgt;
+ uint16_t thermal_support;
+#define THERMAL_SUPPORT_I2C BIT_0
+#define THERMAL_SUPPORT_ISP BIT_1
};
/*
@@ -3115,6 +3143,8 @@ typedef struct scsi_qla_host {
#define MPI_RESET_NEEDED 19 /* Initiate MPI FW reset */
#define ISP_QUIESCE_NEEDED 20 /* Driver need some quiescence */
#define SCR_PENDING 21 /* SCR in target mode */
+#define HOST_RAMP_DOWN_QUEUE_DEPTH 22
+#define HOST_RAMP_UP_QUEUE_DEPTH 23
uint32_t device_flags;
#define SWITCH_FOUND BIT_0
@@ -3248,8 +3278,6 @@ struct qla_tgt_vp_map {
#define NVRAM_DELAY() udelay(10)
-#define INVALID_HANDLE (MAX_OUTSTANDING_COMMANDS+1)
-
/*
* Flash support definitions
*/
diff --git a/drivers/scsi/qla2xxx/qla_dfs.c b/drivers/scsi/qla2xxx/qla_dfs.c
index 706c4f7bc7c9..792a29294b62 100644
--- a/drivers/scsi/qla2xxx/qla_dfs.c
+++ b/drivers/scsi/qla2xxx/qla_dfs.c
@@ -1,6 +1,6 @@
/*
* QLogic Fibre Channel HBA Driver
- * Copyright (c) 2003-2012 QLogic Corporation
+ * Copyright (c) 2003-2013 QLogic Corporation
*
* See LICENSE.qla2xxx for copyright and licensing details.
*/
diff --git a/drivers/scsi/qla2xxx/qla_fw.h b/drivers/scsi/qla2xxx/qla_fw.h
index be6d61a89edc..1ac2b0e3a0e1 100644
--- a/drivers/scsi/qla2xxx/qla_fw.h
+++ b/drivers/scsi/qla2xxx/qla_fw.h
@@ -1,6 +1,6 @@
/*
* QLogic Fibre Channel HBA Driver
- * Copyright (c) 2003-2012 QLogic Corporation
+ * Copyright (c) 2003-2013 QLogic Corporation
*
* See LICENSE.qla2xxx for copyright and licensing details.
*/
@@ -300,7 +300,8 @@ struct init_cb_24xx {
uint32_t prio_request_q_address[2];
uint16_t msix;
- uint8_t reserved_2[6];
+ uint16_t msix_atio;
+ uint8_t reserved_2[4];
uint16_t atio_q_inpointer;
uint16_t atio_q_length;
@@ -1387,9 +1388,7 @@ struct qla_flt_header {
#define FLT_REG_FCP_PRIO_0 0x87
#define FLT_REG_FCP_PRIO_1 0x88
#define FLT_REG_FCOE_FW 0xA4
-#define FLT_REG_FCOE_VPD_0 0xA9
#define FLT_REG_FCOE_NVRAM_0 0xAA
-#define FLT_REG_FCOE_VPD_1 0xAB
#define FLT_REG_FCOE_NVRAM_1 0xAC
struct qla_flt_region {
diff --git a/drivers/scsi/qla2xxx/qla_gbl.h b/drivers/scsi/qla2xxx/qla_gbl.h
index 2411d1a12b26..eb3ca21a7f17 100644
--- a/drivers/scsi/qla2xxx/qla_gbl.h
+++ b/drivers/scsi/qla2xxx/qla_gbl.h
@@ -1,6 +1,6 @@
/*
* QLogic Fibre Channel HBA Driver
- * Copyright (c) 2003-2012 QLogic Corporation
+ * Copyright (c) 2003-2013 QLogic Corporation
*
* See LICENSE.qla2xxx for copyright and licensing details.
*/
@@ -55,7 +55,7 @@ extern void qla2x00_update_fcport(scsi_qla_host_t *, fc_port_t *);
extern void qla2x00_alloc_fw_dump(scsi_qla_host_t *);
extern void qla2x00_try_to_stop_firmware(scsi_qla_host_t *);
-extern int qla2x00_get_thermal_temp(scsi_qla_host_t *, uint16_t *, uint16_t *);
+extern int qla2x00_get_thermal_temp(scsi_qla_host_t *, uint16_t *);
extern void qla84xx_put_chip(struct scsi_qla_host *);
@@ -84,6 +84,9 @@ extern int qla83xx_nic_core_reset(scsi_qla_host_t *);
extern void qla83xx_reset_ownership(scsi_qla_host_t *);
extern int qla2xxx_mctp_dump(scsi_qla_host_t *);
+extern int
+qla2x00_alloc_outstanding_cmds(struct qla_hw_data *, struct req_que *);
+
/*
* Global Data in qla_os.c source file.
*/
@@ -94,6 +97,7 @@ extern int qlport_down_retry;
extern int ql2xplogiabsentdevice;
extern int ql2xloginretrycount;
extern int ql2xfdmienable;
+extern int ql2xmaxqdepth;
extern int ql2xallocfwdump;
extern int ql2xextended_error_logging;
extern int ql2xiidmaenable;
@@ -278,6 +282,9 @@ extern int
qla2x00_get_port_name(scsi_qla_host_t *, uint16_t, uint8_t *, uint8_t);
extern int
+qla24xx_link_initialize(scsi_qla_host_t *);
+
+extern int
qla2x00_lip_reset(scsi_qla_host_t *);
extern int
@@ -351,6 +358,9 @@ extern int
qla2x00_disable_fce_trace(scsi_qla_host_t *, uint64_t *, uint64_t *);
extern int
+qla2x00_set_driver_version(scsi_qla_host_t *, char *);
+
+extern int
qla2x00_read_sfp(scsi_qla_host_t *, dma_addr_t, uint8_t *,
uint16_t, uint16_t, uint16_t, uint16_t);
@@ -436,6 +446,7 @@ extern uint8_t *qla25xx_read_nvram_data(scsi_qla_host_t *, uint8_t *, uint32_t,
uint32_t);
extern int qla25xx_write_nvram_data(scsi_qla_host_t *, uint8_t *, uint32_t,
uint32_t);
+extern int qla2x00_is_a_vp_did(scsi_qla_host_t *, uint32_t);
extern int qla2x00_beacon_on(struct scsi_qla_host *);
extern int qla2x00_beacon_off(struct scsi_qla_host *);
diff --git a/drivers/scsi/qla2xxx/qla_gs.c b/drivers/scsi/qla2xxx/qla_gs.c
index 01efc0e9cc36..9b455250c101 100644
--- a/drivers/scsi/qla2xxx/qla_gs.c
+++ b/drivers/scsi/qla2xxx/qla_gs.c
@@ -1,6 +1,6 @@
/*
* QLogic Fibre Channel HBA Driver
- * Copyright (c) 2003-2012 QLogic Corporation
+ * Copyright (c) 2003-2013 QLogic Corporation
*
* See LICENSE.qla2xxx for copyright and licensing details.
*/
@@ -1328,8 +1328,8 @@ qla2x00_fdmi_rhba(scsi_qla_host_t *vha)
/* Manufacturer. */
eiter = (struct ct_fdmi_hba_attr *) (entries + size);
eiter->type = __constant_cpu_to_be16(FDMI_HBA_MANUFACTURER);
- strcpy(eiter->a.manufacturer, "QLogic Corporation");
- alen = strlen(eiter->a.manufacturer);
+ alen = strlen(QLA2XXX_MANUFACTURER);
+ strncpy(eiter->a.manufacturer, QLA2XXX_MANUFACTURER, alen + 1);
alen += (alen & 3) ? (4 - (alen & 3)) : 4;
eiter->len = cpu_to_be16(4 + alen);
size += 4 + alen;
@@ -1649,8 +1649,8 @@ qla2x00_fdmi_rpa(scsi_qla_host_t *vha)
/* OS device name. */
eiter = (struct ct_fdmi_port_attr *) (entries + size);
eiter->type = __constant_cpu_to_be16(FDMI_PORT_OS_DEVICE_NAME);
- strcpy(eiter->a.os_dev_name, QLA2XXX_DRIVER_NAME);
- alen = strlen(eiter->a.os_dev_name);
+ alen = strlen(QLA2XXX_DRIVER_NAME);
+ strncpy(eiter->a.os_dev_name, QLA2XXX_DRIVER_NAME, alen + 1);
alen += (alen & 3) ? (4 - (alen & 3)) : 4;
eiter->len = cpu_to_be16(4 + alen);
size += 4 + alen;
diff --git a/drivers/scsi/qla2xxx/qla_init.c b/drivers/scsi/qla2xxx/qla_init.c
index 563eee3fa924..edf4d14a1335 100644
--- a/drivers/scsi/qla2xxx/qla_init.c
+++ b/drivers/scsi/qla2xxx/qla_init.c
@@ -1,6 +1,6 @@
/*
* QLogic Fibre Channel HBA Driver
- * Copyright (c) 2003-2012 QLogic Corporation
+ * Copyright (c) 2003-2013 QLogic Corporation
*
* See LICENSE.qla2xxx for copyright and licensing details.
*/
@@ -70,9 +70,7 @@ qla2x00_sp_free(void *data, void *ptr)
struct scsi_qla_host *vha = (scsi_qla_host_t *)data;
del_timer(&iocb->timer);
- mempool_free(sp, vha->hw->srb_mempool);
-
- QLA_VHA_MARK_NOT_BUSY(vha);
+ qla2x00_rel_sp(vha, sp);
}
/* Asynchronous Login/Logout Routines -------------------------------------- */
@@ -525,7 +523,7 @@ qla2x00_initialize_adapter(scsi_qla_host_t *vha)
vha->flags.reset_active = 0;
ha->flags.pci_channel_io_perm_failure = 0;
ha->flags.eeh_busy = 0;
- ha->flags.thermal_supported = 1;
+ ha->thermal_support = THERMAL_SUPPORT_I2C|THERMAL_SUPPORT_ISP;
atomic_set(&vha->loop_down_timer, LOOP_DOWN_TIME);
atomic_set(&vha->loop_state, LOOP_DOWN);
vha->device_flags = DFLG_NO_CABLE;
@@ -621,6 +619,8 @@ qla2x00_initialize_adapter(scsi_qla_host_t *vha)
if (IS_QLA24XX_TYPE(ha) || IS_QLA25XX(ha))
qla24xx_read_fcp_prio_cfg(vha);
+ qla2x00_set_driver_version(vha, QLA2XXX_VERSION);
+
return (rval);
}
@@ -1559,6 +1559,47 @@ done:
return rval;
}
+int
+qla2x00_alloc_outstanding_cmds(struct qla_hw_data *ha, struct req_que *req)
+{
+ /* Don't try to reallocate the array */
+ if (req->outstanding_cmds)
+ return QLA_SUCCESS;
+
+ if (!IS_FWI2_CAPABLE(ha) || (ha->mqiobase &&
+ (ql2xmultique_tag || ql2xmaxqueues > 1)))
+ req->num_outstanding_cmds = DEFAULT_OUTSTANDING_COMMANDS;
+ else {
+ if (ha->fw_xcb_count <= ha->fw_iocb_count)
+ req->num_outstanding_cmds = ha->fw_xcb_count;
+ else
+ req->num_outstanding_cmds = ha->fw_iocb_count;
+ }
+
+ req->outstanding_cmds = kzalloc(sizeof(srb_t *) *
+ req->num_outstanding_cmds, GFP_KERNEL);
+
+ if (!req->outstanding_cmds) {
+ /*
+ * Try to allocate a minimal size just so we can get through
+ * initialization.
+ */
+ req->num_outstanding_cmds = MIN_OUTSTANDING_COMMANDS;
+ req->outstanding_cmds = kzalloc(sizeof(srb_t *) *
+ req->num_outstanding_cmds, GFP_KERNEL);
+
+ if (!req->outstanding_cmds) {
+ ql_log(ql_log_fatal, NULL, 0x0126,
+ "Failed to allocate memory for "
+ "outstanding_cmds for req_que %p.\n", req);
+ req->num_outstanding_cmds = 0;
+ return QLA_FUNCTION_FAILED;
+ }
+ }
+
+ return QLA_SUCCESS;
+}
+
/**
* qla2x00_setup_chip() - Load and start RISC firmware.
* @ha: HA context
@@ -1628,9 +1669,18 @@ enable_82xx_npiv:
MIN_MULTI_ID_FABRIC - 1;
}
qla2x00_get_resource_cnts(vha, NULL,
- &ha->fw_xcb_count, NULL, NULL,
+ &ha->fw_xcb_count, NULL, &ha->fw_iocb_count,
&ha->max_npiv_vports, NULL);
+ /*
+ * Allocate the array of outstanding commands
+ * now that we know the firmware resources.
+ */
+ rval = qla2x00_alloc_outstanding_cmds(ha,
+ vha->req);
+ if (rval != QLA_SUCCESS)
+ goto failed;
+
if (!fw_major_version && ql2xallocfwdump
&& !IS_QLA82XX(ha))
qla2x00_alloc_fw_dump(vha);
@@ -1914,7 +1964,7 @@ qla24xx_config_rings(struct scsi_qla_host *vha)
WRT_REG_DWORD(&reg->isp24.rsp_q_in, 0);
WRT_REG_DWORD(&reg->isp24.rsp_q_out, 0);
}
- qlt_24xx_config_rings(vha, reg);
+ qlt_24xx_config_rings(vha);
/* PCI posting */
RD_REG_DWORD(&ioreg->hccr);
@@ -1948,7 +1998,7 @@ qla2x00_init_rings(scsi_qla_host_t *vha)
req = ha->req_q_map[que];
if (!req)
continue;
- for (cnt = 1; cnt < MAX_OUTSTANDING_COMMANDS; cnt++)
+ for (cnt = 1; cnt < req->num_outstanding_cmds; cnt++)
req->outstanding_cmds[cnt] = NULL;
req->current_outstanding_cmd = 1;
@@ -2157,6 +2207,7 @@ qla2x00_configure_hba(scsi_qla_host_t *vha)
char connect_type[22];
struct qla_hw_data *ha = vha->hw;
unsigned long flags;
+ scsi_qla_host_t *base_vha = pci_get_drvdata(ha->pdev);
/* Get host addresses. */
rval = qla2x00_get_adapter_id(vha,
@@ -2170,6 +2221,13 @@ qla2x00_configure_hba(scsi_qla_host_t *vha)
} else {
ql_log(ql_log_warn, vha, 0x2009,
"Unable to get host loop ID.\n");
+ if (IS_FWI2_CAPABLE(ha) && (vha == base_vha) &&
+ (rval == QLA_COMMAND_ERROR && loop_id == 0x1b)) {
+ ql_log(ql_log_warn, vha, 0x1151,
+ "Doing link init.\n");
+ if (qla24xx_link_initialize(vha) == QLA_SUCCESS)
+ return rval;
+ }
set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
}
return (rval);
@@ -2690,7 +2748,6 @@ qla2x00_alloc_fcport(scsi_qla_host_t *vha, gfp_t flags)
fcport->loop_id = FC_NO_LOOP_ID;
qla2x00_set_fcport_state(fcport, FCS_UNCONFIGURED);
fcport->supported_classes = FC_COS_UNSPECIFIED;
- fcport->scan_state = QLA_FCPORT_SCAN_NONE;
return fcport;
}
@@ -3103,7 +3160,7 @@ static int
qla2x00_configure_fabric(scsi_qla_host_t *vha)
{
int rval;
- fc_port_t *fcport;
+ fc_port_t *fcport, *fcptemp;
uint16_t next_loopid;
uint16_t mb[MAILBOX_REGISTER_COUNT];
uint16_t loop_id;
@@ -3141,7 +3198,7 @@ qla2x00_configure_fabric(scsi_qla_host_t *vha)
0xfc, mb, BIT_1|BIT_0);
if (rval != QLA_SUCCESS) {
set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags);
- break;
+ return rval;
}
if (mb[0] != MBS_COMMAND_COMPLETE) {
ql_dbg(ql_dbg_disc, vha, 0x2042,
@@ -3173,16 +3230,21 @@ qla2x00_configure_fabric(scsi_qla_host_t *vha)
}
}
+#define QLA_FCPORT_SCAN 1
+#define QLA_FCPORT_FOUND 2
+
+ list_for_each_entry(fcport, &vha->vp_fcports, list) {
+ fcport->scan_state = QLA_FCPORT_SCAN;
+ }
+
rval = qla2x00_find_all_fabric_devs(vha, &new_fcports);
if (rval != QLA_SUCCESS)
break;
- /* Add new ports to existing port list */
- list_splice_tail_init(&new_fcports, &vha->vp_fcports);
-
- /* Starting free loop ID. */
- next_loopid = ha->min_external_loopid;
-
+ /*
+ * Logout all previous fabric devices marked lost, except
+ * FCP2 devices.
+ */
list_for_each_entry(fcport, &vha->vp_fcports, list) {
if (test_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags))
break;
@@ -3190,8 +3252,7 @@ qla2x00_configure_fabric(scsi_qla_host_t *vha)
if ((fcport->flags & FCF_FABRIC_DEVICE) == 0)
continue;
- /* Logout lost/gone fabric devices (non-FCP2) */
- if (fcport->scan_state != QLA_FCPORT_SCAN_FOUND &&
+ if (fcport->scan_state == QLA_FCPORT_SCAN &&
atomic_read(&fcport->state) == FCS_ONLINE) {
qla2x00_mark_device_lost(vha, fcport,
ql2xplogiabsentdevice, 0);
@@ -3204,30 +3265,74 @@ qla2x00_configure_fabric(scsi_qla_host_t *vha)
fcport->d_id.b.domain,
fcport->d_id.b.area,
fcport->d_id.b.al_pa);
+ fcport->loop_id = FC_NO_LOOP_ID;
}
- continue;
}
- fcport->scan_state = QLA_FCPORT_SCAN_NONE;
-
- /* Login fabric devices that need a login */
- if ((fcport->flags & FCF_LOGIN_NEEDED) != 0 &&
- atomic_read(&vha->loop_down_timer) == 0) {
- if (fcport->loop_id == FC_NO_LOOP_ID) {
- fcport->loop_id = next_loopid;
- rval = qla2x00_find_new_loop_id(
- base_vha, fcport);
- if (rval != QLA_SUCCESS) {
- /* Ran out of IDs to use */
- continue;
- }
+ }
+
+ /* Starting free loop ID. */
+ next_loopid = ha->min_external_loopid;
+
+ /*
+ * Scan through our port list and login entries that need to be
+ * logged in.
+ */
+ list_for_each_entry(fcport, &vha->vp_fcports, list) {
+ if (atomic_read(&vha->loop_down_timer) ||
+ test_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags))
+ break;
+
+ if ((fcport->flags & FCF_FABRIC_DEVICE) == 0 ||
+ (fcport->flags & FCF_LOGIN_NEEDED) == 0)
+ continue;
+
+ if (fcport->loop_id == FC_NO_LOOP_ID) {
+ fcport->loop_id = next_loopid;
+ rval = qla2x00_find_new_loop_id(
+ base_vha, fcport);
+ if (rval != QLA_SUCCESS) {
+ /* Ran out of IDs to use */
+ break;
}
}
+ /* Login and update database */
+ qla2x00_fabric_dev_login(vha, fcport, &next_loopid);
+ }
+
+ /* Exit if out of loop IDs. */
+ if (rval != QLA_SUCCESS) {
+ break;
+ }
+
+ /*
+ * Login and add the new devices to our port list.
+ */
+ list_for_each_entry_safe(fcport, fcptemp, &new_fcports, list) {
+ if (atomic_read(&vha->loop_down_timer) ||
+ test_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags))
+ break;
+
+ /* Find a new loop ID to use. */
+ fcport->loop_id = next_loopid;
+ rval = qla2x00_find_new_loop_id(base_vha, fcport);
+ if (rval != QLA_SUCCESS) {
+ /* Ran out of IDs to use */
+ break;
+ }
/* Login and update database */
qla2x00_fabric_dev_login(vha, fcport, &next_loopid);
+
+ list_move_tail(&fcport->list, &vha->vp_fcports);
}
} while (0);
+ /* Free all new device structures not processed. */
+ list_for_each_entry_safe(fcport, fcptemp, &new_fcports, list) {
+ list_del(&fcport->list);
+ kfree(fcport);
+ }
+
if (rval) {
ql_dbg(ql_dbg_disc, vha, 0x2068,
"Configure fabric error exit rval=%d.\n", rval);
@@ -3263,8 +3368,7 @@ qla2x00_find_all_fabric_devs(scsi_qla_host_t *vha,
int first_dev, last_dev;
port_id_t wrap = {}, nxt_d_id;
struct qla_hw_data *ha = vha->hw;
- struct scsi_qla_host *vp, *base_vha = pci_get_drvdata(ha->pdev);
- struct scsi_qla_host *tvp;
+ struct scsi_qla_host *base_vha = pci_get_drvdata(ha->pdev);
rval = QLA_SUCCESS;
@@ -3377,22 +3481,8 @@ qla2x00_find_all_fabric_devs(scsi_qla_host_t *vha,
continue;
/* Bypass virtual ports of the same host. */
- found = 0;
- if (ha->num_vhosts) {
- unsigned long flags;
-
- spin_lock_irqsave(&ha->vport_slock, flags);
- list_for_each_entry_safe(vp, tvp, &ha->vp_list, list) {
- if (new_fcport->d_id.b24 == vp->d_id.b24) {
- found = 1;
- break;
- }
- }
- spin_unlock_irqrestore(&ha->vport_slock, flags);
-
- if (found)
- continue;
- }
+ if (qla2x00_is_a_vp_did(vha, new_fcport->d_id.b24))
+ continue;
/* Bypass if same domain and area of adapter. */
if (((new_fcport->d_id.b24 & 0xffff00) ==
@@ -3417,7 +3507,7 @@ qla2x00_find_all_fabric_devs(scsi_qla_host_t *vha,
WWN_SIZE))
continue;
- fcport->scan_state = QLA_FCPORT_SCAN_FOUND;
+ fcport->scan_state = QLA_FCPORT_FOUND;
found++;
@@ -5004,7 +5094,7 @@ qla24xx_load_risc_flash(scsi_qla_host_t *vha, uint32_t *srisc_addr,
return rval;
}
-#define QLA_FW_URL "ftp://ftp.qlogic.com/outgoing/linux/firmware/"
+#define QLA_FW_URL "http://ldriver.qlogic.com/firmware/"
int
qla2x00_load_risc(scsi_qla_host_t *vha, uint32_t *srisc_addr)
@@ -5529,6 +5619,8 @@ qla81xx_nvram_config(scsi_qla_host_t *vha)
if (IS_T10_PI_CAPABLE(ha))
nv->frame_payload_size &= ~7;
+ qlt_81xx_config_nvram_stage1(vha, nv);
+
/* Reset Initialization control block */
memset(icb, 0, ha->init_cb_size);
@@ -5569,6 +5661,8 @@ qla81xx_nvram_config(scsi_qla_host_t *vha)
qla2x00_set_model_info(vha, nv->model_name, sizeof(nv->model_name),
"QLE8XXX");
+ qlt_81xx_config_nvram_stage2(vha, icb);
+
/* Use alternate WWN? */
if (nv->host_p & __constant_cpu_to_le32(BIT_15)) {
memcpy(icb->node_name, nv->alternate_node_name, WWN_SIZE);
diff --git a/drivers/scsi/qla2xxx/qla_inline.h b/drivers/scsi/qla2xxx/qla_inline.h
index c0462c04c885..68e2c4afc134 100644
--- a/drivers/scsi/qla2xxx/qla_inline.h
+++ b/drivers/scsi/qla2xxx/qla_inline.h
@@ -1,6 +1,6 @@
/*
* QLogic Fibre Channel HBA Driver
- * Copyright (c) 2003-2012 QLogic Corporation
+ * Copyright (c) 2003-2013 QLogic Corporation
*
* See LICENSE.qla2xxx for copyright and licensing details.
*/
@@ -198,6 +198,13 @@ done:
}
static inline void
+qla2x00_rel_sp(scsi_qla_host_t *vha, srb_t *sp)
+{
+ mempool_free(sp, vha->hw->srb_mempool);
+ QLA_VHA_MARK_NOT_BUSY(vha);
+}
+
+static inline void
qla2x00_init_timer(srb_t *sp, unsigned long tmo)
{
init_timer(&sp->u.iocb_cmd.timer);
@@ -213,3 +220,22 @@ qla2x00_gid_list_size(struct qla_hw_data *ha)
{
return sizeof(struct gid_list_info) * ha->max_fibre_devices;
}
+
+static inline void
+qla2x00_do_host_ramp_up(scsi_qla_host_t *vha)
+{
+ if (vha->hw->cfg_lun_q_depth >= ql2xmaxqdepth)
+ return;
+
+ /* Wait at least HOST_QUEUE_RAMPDOWN_INTERVAL before ramping up */
+ if (time_before(jiffies, (vha->hw->host_last_rampdown_time +
+ HOST_QUEUE_RAMPDOWN_INTERVAL)))
+ return;
+
+ /* Wait at least HOST_QUEUE_RAMPUP_INTERVAL between each ramp up */
+ if (time_before(jiffies, (vha->hw->host_last_rampup_time +
+ HOST_QUEUE_RAMPUP_INTERVAL)))
+ return;
+
+ set_bit(HOST_RAMP_UP_QUEUE_DEPTH, &vha->dpc_flags);
+}
diff --git a/drivers/scsi/qla2xxx/qla_iocb.c b/drivers/scsi/qla2xxx/qla_iocb.c
index a481684479c1..d2630317cce8 100644
--- a/drivers/scsi/qla2xxx/qla_iocb.c
+++ b/drivers/scsi/qla2xxx/qla_iocb.c
@@ -1,6 +1,6 @@
/*
* QLogic Fibre Channel HBA Driver
- * Copyright (c) 2003-2012 QLogic Corporation
+ * Copyright (c) 2003-2013 QLogic Corporation
*
* See LICENSE.qla2xxx for copyright and licensing details.
*/
@@ -349,14 +349,14 @@ qla2x00_start_scsi(srb_t *sp)
/* Check for room in outstanding command list. */
handle = req->current_outstanding_cmd;
- for (index = 1; index < MAX_OUTSTANDING_COMMANDS; index++) {
+ for (index = 1; index < req->num_outstanding_cmds; index++) {
handle++;
- if (handle == MAX_OUTSTANDING_COMMANDS)
+ if (handle == req->num_outstanding_cmds)
handle = 1;
if (!req->outstanding_cmds[handle])
break;
}
- if (index == MAX_OUTSTANDING_COMMANDS)
+ if (index == req->num_outstanding_cmds)
goto queuing_error;
/* Map the sg table so we have an accurate count of sg entries needed */
@@ -1467,16 +1467,15 @@ qla24xx_start_scsi(srb_t *sp)
/* Check for room in outstanding command list. */
handle = req->current_outstanding_cmd;
- for (index = 1; index < MAX_OUTSTANDING_COMMANDS; index++) {
+ for (index = 1; index < req->num_outstanding_cmds; index++) {
handle++;
- if (handle == MAX_OUTSTANDING_COMMANDS)
+ if (handle == req->num_outstanding_cmds)
handle = 1;
if (!req->outstanding_cmds[handle])
break;
}
- if (index == MAX_OUTSTANDING_COMMANDS) {
+ if (index == req->num_outstanding_cmds)
goto queuing_error;
- }
/* Map the sg table so we have an accurate count of sg entries needed */
if (scsi_sg_count(cmd)) {
@@ -1641,15 +1640,15 @@ qla24xx_dif_start_scsi(srb_t *sp)
/* Check for room in outstanding command list. */
handle = req->current_outstanding_cmd;
- for (index = 1; index < MAX_OUTSTANDING_COMMANDS; index++) {
+ for (index = 1; index < req->num_outstanding_cmds; index++) {
handle++;
- if (handle == MAX_OUTSTANDING_COMMANDS)
+ if (handle == req->num_outstanding_cmds)
handle = 1;
if (!req->outstanding_cmds[handle])
break;
}
- if (index == MAX_OUTSTANDING_COMMANDS)
+ if (index == req->num_outstanding_cmds)
goto queuing_error;
/* Compute number of required data segments */
@@ -1822,14 +1821,14 @@ qla2x00_alloc_iocbs(scsi_qla_host_t *vha, srb_t *sp)
/* Check for room in outstanding command list. */
handle = req->current_outstanding_cmd;
- for (index = 1; index < MAX_OUTSTANDING_COMMANDS; index++) {
+ for (index = 1; req->num_outstanding_cmds; index++) {
handle++;
- if (handle == MAX_OUTSTANDING_COMMANDS)
+ if (handle == req->num_outstanding_cmds)
handle = 1;
if (!req->outstanding_cmds[handle])
break;
}
- if (index == MAX_OUTSTANDING_COMMANDS) {
+ if (index == req->num_outstanding_cmds) {
ql_log(ql_log_warn, vha, 0x700b,
"No room on outstanding cmd array.\n");
goto queuing_error;
@@ -2263,14 +2262,14 @@ qla82xx_start_scsi(srb_t *sp)
/* Check for room in outstanding command list. */
handle = req->current_outstanding_cmd;
- for (index = 1; index < MAX_OUTSTANDING_COMMANDS; index++) {
+ for (index = 1; index < req->num_outstanding_cmds; index++) {
handle++;
- if (handle == MAX_OUTSTANDING_COMMANDS)
+ if (handle == req->num_outstanding_cmds)
handle = 1;
if (!req->outstanding_cmds[handle])
break;
}
- if (index == MAX_OUTSTANDING_COMMANDS)
+ if (index == req->num_outstanding_cmds)
goto queuing_error;
/* Map the sg table so we have an accurate count of sg entries needed */
@@ -2767,15 +2766,15 @@ qla2x00_start_bidir(srb_t *sp, struct scsi_qla_host *vha, uint32_t tot_dsds)
/* Check for room in outstanding command list. */
handle = req->current_outstanding_cmd;
- for (index = 1; index < MAX_OUTSTANDING_COMMANDS; index++) {
+ for (index = 1; index < req->num_outstanding_cmds; index++) {
handle++;
- if (handle == MAX_OUTSTANDING_COMMANDS)
+ if (handle == req->num_outstanding_cmds)
handle = 1;
if (!req->outstanding_cmds[handle])
break;
}
- if (index == MAX_OUTSTANDING_COMMANDS) {
+ if (index == req->num_outstanding_cmds) {
rval = EXT_STATUS_BUSY;
goto queuing_error;
}
diff --git a/drivers/scsi/qla2xxx/qla_isr.c b/drivers/scsi/qla2xxx/qla_isr.c
index 873c82014b16..e9dbd74c20d3 100644
--- a/drivers/scsi/qla2xxx/qla_isr.c
+++ b/drivers/scsi/qla2xxx/qla_isr.c
@@ -1,6 +1,6 @@
/*
* QLogic Fibre Channel HBA Driver
- * Copyright (c) 2003-2012 QLogic Corporation
+ * Copyright (c) 2003-2013 QLogic Corporation
*
* See LICENSE.qla2xxx for copyright and licensing details.
*/
@@ -13,6 +13,8 @@
#include <scsi/scsi_bsg_fc.h>
#include <scsi/scsi_eh.h>
+#include "qla_target.h"
+
static void qla2x00_mbx_completion(scsi_qla_host_t *, uint16_t);
static void qla2x00_process_completed_request(struct scsi_qla_host *,
struct req_que *, uint32_t);
@@ -489,10 +491,37 @@ qla83xx_handle_8200_aen(scsi_qla_host_t *vha, uint16_t *mb)
if (mb[1] & IDC_DEVICE_STATE_CHANGE) {
ql_log(ql_log_info, vha, 0x506a,
"IDC Device-State changed = 0x%x.\n", mb[4]);
+ if (ha->flags.nic_core_reset_owner)
+ return;
qla83xx_schedule_work(vha, MBA_IDC_AEN);
}
}
+int
+qla2x00_is_a_vp_did(scsi_qla_host_t *vha, uint32_t rscn_entry)
+{
+ struct qla_hw_data *ha = vha->hw;
+ scsi_qla_host_t *vp;
+ uint32_t vp_did;
+ unsigned long flags;
+ int ret = 0;
+
+ if (!ha->num_vhosts)
+ return ret;
+
+ spin_lock_irqsave(&ha->vport_slock, flags);
+ list_for_each_entry(vp, &ha->vp_list, list) {
+ vp_did = vp->d_id.b24;
+ if (vp_did == rscn_entry) {
+ ret = 1;
+ break;
+ }
+ }
+ spin_unlock_irqrestore(&ha->vport_slock, flags);
+
+ return ret;
+}
+
/**
* qla2x00_async_event() - Process aynchronous events.
* @ha: SCSI driver HA context
@@ -899,6 +928,10 @@ skip_rio:
/* Ignore reserved bits from RSCN-payload. */
rscn_entry = ((mb[1] & 0x3ff) << 16) | mb[2];
+ /* Skip RSCNs for virtual ports on the same physical port */
+ if (qla2x00_is_a_vp_did(vha, rscn_entry))
+ break;
+
atomic_set(&vha->loop_down_timer, 0);
vha->flags.management_server_logged_in = 0;
@@ -983,14 +1016,25 @@ skip_rio:
mb[1], mb[2], mb[3]);
break;
case MBA_IDC_NOTIFY:
- /* See if we need to quiesce any I/O */
- if (IS_QLA8031(vha->hw))
- if ((mb[2] & 0x7fff) == MBC_PORT_RESET ||
- (mb[2] & 0x7fff) == MBC_SET_PORT_CONFIG) {
+ if (IS_QLA8031(vha->hw)) {
+ mb[4] = RD_REG_WORD(&reg24->mailbox4);
+ if (((mb[2] & 0x7fff) == MBC_PORT_RESET ||
+ (mb[2] & 0x7fff) == MBC_SET_PORT_CONFIG) &&
+ (mb[4] & INTERNAL_LOOPBACK_MASK) != 0) {
set_bit(ISP_QUIESCE_NEEDED, &vha->dpc_flags);
+ /*
+ * Extend loop down timer since port is active.
+ */
+ if (atomic_read(&vha->loop_state) == LOOP_DOWN)
+ atomic_set(&vha->loop_down_timer,
+ LOOP_DOWN_TIME);
qla2xxx_wake_dpc(vha);
}
+ }
case MBA_IDC_COMPLETE:
+ if (ha->notify_lb_portup_comp)
+ complete(&ha->lb_portup_comp);
+ /* Fallthru */
case MBA_IDC_TIME_EXT:
if (IS_QLA81XX(vha->hw) || IS_QLA8031(vha->hw))
qla81xx_idc_event(vha, mb[0], mb[1]);
@@ -1029,7 +1073,7 @@ qla2x00_process_completed_request(struct scsi_qla_host *vha,
struct qla_hw_data *ha = vha->hw;
/* Validate handle. */
- if (index >= MAX_OUTSTANDING_COMMANDS) {
+ if (index >= req->num_outstanding_cmds) {
ql_log(ql_log_warn, vha, 0x3014,
"Invalid SCSI command index (%x).\n", index);
@@ -1067,7 +1111,7 @@ qla2x00_get_sp_from_handle(scsi_qla_host_t *vha, const char *func,
uint16_t index;
index = LSW(pkt->handle);
- if (index >= MAX_OUTSTANDING_COMMANDS) {
+ if (index >= req->num_outstanding_cmds) {
ql_log(ql_log_warn, vha, 0x5031,
"Invalid command index (%x).\n", index);
if (IS_QLA82XX(ha))
@@ -1740,7 +1784,7 @@ qla25xx_process_bidir_status_iocb(scsi_qla_host_t *vha, void *pkt,
sts24 = (struct sts_entry_24xx *) pkt;
/* Validate handle. */
- if (index >= MAX_OUTSTANDING_COMMANDS) {
+ if (index >= req->num_outstanding_cmds) {
ql_log(ql_log_warn, vha, 0x70af,
"Invalid SCSI completion handle 0x%x.\n", index);
set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
@@ -1910,9 +1954,9 @@ qla2x00_status_entry(scsi_qla_host_t *vha, struct rsp_que *rsp, void *pkt)
req = ha->req_q_map[que];
/* Validate handle. */
- if (handle < MAX_OUTSTANDING_COMMANDS) {
+ if (handle < req->num_outstanding_cmds)
sp = req->outstanding_cmds[handle];
- } else
+ else
sp = NULL;
if (sp == NULL) {
@@ -1934,6 +1978,7 @@ qla2x00_status_entry(scsi_qla_host_t *vha, struct rsp_que *rsp, void *pkt)
/* Fast path completion. */
if (comp_status == CS_COMPLETE && scsi_status == 0) {
+ qla2x00_do_host_ramp_up(vha);
qla2x00_process_completed_request(vha, req, handle);
return;
@@ -2193,6 +2238,9 @@ out:
cp->cmnd[8], cp->cmnd[9], scsi_bufflen(cp), rsp_info_len,
resid_len, fw_resid_len);
+ if (!res)
+ qla2x00_do_host_ramp_up(vha);
+
if (rsp->status_srb == NULL)
sp->done(ha, sp, res);
}
@@ -2747,6 +2795,12 @@ static struct qla_init_msix_entry qla82xx_msix_entries[2] = {
{ "qla2xxx (rsp_q)", qla82xx_msix_rsp_q },
};
+static struct qla_init_msix_entry qla83xx_msix_entries[3] = {
+ { "qla2xxx (default)", qla24xx_msix_default },
+ { "qla2xxx (rsp_q)", qla24xx_msix_rsp_q },
+ { "qla2xxx (atio_q)", qla83xx_msix_atio_q },
+};
+
static void
qla24xx_disable_msix(struct qla_hw_data *ha)
{
@@ -2827,9 +2881,13 @@ msix_failed:
}
/* Enable MSI-X vectors for the base queue */
- for (i = 0; i < 2; i++) {
+ for (i = 0; i < ha->msix_count; i++) {
qentry = &ha->msix_entries[i];
- if (IS_QLA82XX(ha)) {
+ if (QLA_TGT_MODE_ENABLED() && IS_ATIO_MSIX_CAPABLE(ha)) {
+ ret = request_irq(qentry->vector,
+ qla83xx_msix_entries[i].handler,
+ 0, qla83xx_msix_entries[i].name, rsp);
+ } else if (IS_QLA82XX(ha)) {
ret = request_irq(qentry->vector,
qla82xx_msix_entries[i].handler,
0, qla82xx_msix_entries[i].name, rsp);
diff --git a/drivers/scsi/qla2xxx/qla_mbx.c b/drivers/scsi/qla2xxx/qla_mbx.c
index 68c55eaa318c..186dd59ce4fa 100644
--- a/drivers/scsi/qla2xxx/qla_mbx.c
+++ b/drivers/scsi/qla2xxx/qla_mbx.c
@@ -1,6 +1,6 @@
/*
* QLogic Fibre Channel HBA Driver
- * Copyright (c) 2003-2012 QLogic Corporation
+ * Copyright (c) 2003-2013 QLogic Corporation
*
* See LICENSE.qla2xxx for copyright and licensing details.
*/
@@ -900,13 +900,13 @@ qla2x00_abort_command(srb_t *sp)
"Entered %s.\n", __func__);
spin_lock_irqsave(&ha->hardware_lock, flags);
- for (handle = 1; handle < MAX_OUTSTANDING_COMMANDS; handle++) {
+ for (handle = 1; handle < req->num_outstanding_cmds; handle++) {
if (req->outstanding_cmds[handle] == sp)
break;
}
spin_unlock_irqrestore(&ha->hardware_lock, flags);
- if (handle == MAX_OUTSTANDING_COMMANDS) {
+ if (handle == req->num_outstanding_cmds) {
/* command not found */
return QLA_FUNCTION_FAILED;
}
@@ -1633,6 +1633,54 @@ qla2x00_get_port_name(scsi_qla_host_t *vha, uint16_t loop_id, uint8_t *name,
}
/*
+ * qla24xx_link_initialization
+ * Issue link initialization mailbox command.
+ *
+ * Input:
+ * ha = adapter block pointer.
+ * TARGET_QUEUE_LOCK must be released.
+ * ADAPTER_STATE_LOCK must be released.
+ *
+ * Returns:
+ * qla2x00 local function return status code.
+ *
+ * Context:
+ * Kernel context.
+ */
+int
+qla24xx_link_initialize(scsi_qla_host_t *vha)
+{
+ int rval;
+ mbx_cmd_t mc;
+ mbx_cmd_t *mcp = &mc;
+
+ ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1152,
+ "Entered %s.\n", __func__);
+
+ if (!IS_FWI2_CAPABLE(vha->hw) || IS_CNA_CAPABLE(vha->hw))
+ return QLA_FUNCTION_FAILED;
+
+ mcp->mb[0] = MBC_LINK_INITIALIZATION;
+ mcp->mb[1] = BIT_6|BIT_4;
+ mcp->mb[2] = 0;
+ mcp->mb[3] = 0;
+ mcp->out_mb = MBX_3|MBX_2|MBX_1|MBX_0;
+ mcp->in_mb = MBX_0;
+ mcp->tov = MBX_TOV_SECONDS;
+ mcp->flags = 0;
+ rval = qla2x00_mailbox_command(vha, mcp);
+
+ if (rval != QLA_SUCCESS) {
+ ql_dbg(ql_dbg_mbx, vha, 0x1153, "Failed=%x.\n", rval);
+ } else {
+ ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1154,
+ "Done %s.\n", __func__);
+ }
+
+ return rval;
+}
+
+/*
* qla2x00_lip_reset
* Issue LIP reset mailbox command.
*
@@ -2535,12 +2583,12 @@ qla24xx_abort_command(srb_t *sp)
"Entered %s.\n", __func__);
spin_lock_irqsave(&ha->hardware_lock, flags);
- for (handle = 1; handle < MAX_OUTSTANDING_COMMANDS; handle++) {
+ for (handle = 1; handle < req->num_outstanding_cmds; handle++) {
if (req->outstanding_cmds[handle] == sp)
break;
}
spin_unlock_irqrestore(&ha->hardware_lock, flags);
- if (handle == MAX_OUTSTANDING_COMMANDS) {
+ if (handle == req->num_outstanding_cmds) {
/* Command not found. */
return QLA_FUNCTION_FAILED;
}
@@ -3093,6 +3141,7 @@ qla24xx_report_id_acquisition(scsi_qla_host_t *vha,
struct qla_hw_data *ha = vha->hw;
scsi_qla_host_t *vp;
unsigned long flags;
+ int found;
ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10b6,
"Entered %s.\n", __func__);
@@ -3128,13 +3177,17 @@ qla24xx_report_id_acquisition(scsi_qla_host_t *vha,
return;
}
+ found = 0;
spin_lock_irqsave(&ha->vport_slock, flags);
- list_for_each_entry(vp, &ha->vp_list, list)
- if (vp_idx == vp->vp_idx)
+ list_for_each_entry(vp, &ha->vp_list, list) {
+ if (vp_idx == vp->vp_idx) {
+ found = 1;
break;
+ }
+ }
spin_unlock_irqrestore(&ha->vport_slock, flags);
- if (!vp)
+ if (!found)
return;
vp->d_id.b.domain = rptid_entry->port_id[2];
@@ -3814,6 +3867,97 @@ qla81xx_restart_mpi_firmware(scsi_qla_host_t *vha)
}
int
+qla2x00_set_driver_version(scsi_qla_host_t *vha, char *version)
+{
+ int rval;
+ mbx_cmd_t mc;
+ mbx_cmd_t *mcp = &mc;
+ int len;
+ uint16_t dwlen;
+ uint8_t *str;
+ dma_addr_t str_dma;
+ struct qla_hw_data *ha = vha->hw;
+
+ if (!IS_FWI2_CAPABLE(ha) || IS_QLA82XX(ha))
+ return QLA_FUNCTION_FAILED;
+
+ ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1155,
+ "Entered %s.\n", __func__);
+
+ str = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, &str_dma);
+ if (!str) {
+ ql_log(ql_log_warn, vha, 0x1156,
+ "Failed to allocate driver version param.\n");
+ return QLA_MEMORY_ALLOC_FAILED;
+ }
+
+ memcpy(str, "\x7\x3\x11\x0", 4);
+ dwlen = str[0];
+ len = dwlen * sizeof(uint32_t) - 4;
+ memset(str + 4, 0, len);
+ if (len > strlen(version))
+ len = strlen(version);
+ memcpy(str + 4, version, len);
+
+ mcp->mb[0] = MBC_SET_RNID_PARAMS;
+ mcp->mb[1] = RNID_TYPE_SET_VERSION << 8 | dwlen;
+ mcp->mb[2] = MSW(LSD(str_dma));
+ mcp->mb[3] = LSW(LSD(str_dma));
+ mcp->mb[6] = MSW(MSD(str_dma));
+ mcp->mb[7] = LSW(MSD(str_dma));
+ mcp->out_mb = MBX_7|MBX_6|MBX_3|MBX_2|MBX_1|MBX_0;
+ mcp->in_mb = MBX_0;
+ mcp->tov = MBX_TOV_SECONDS;
+ mcp->flags = 0;
+ rval = qla2x00_mailbox_command(vha, mcp);
+
+ if (rval != QLA_SUCCESS) {
+ ql_dbg(ql_dbg_mbx, vha, 0x1157,
+ "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
+ } else {
+ ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1158,
+ "Done %s.\n", __func__);
+ }
+
+ dma_pool_free(ha->s_dma_pool, str, str_dma);
+
+ return rval;
+}
+
+static int
+qla2x00_read_asic_temperature(scsi_qla_host_t *vha, uint16_t *temp)
+{
+ int rval;
+ mbx_cmd_t mc;
+ mbx_cmd_t *mcp = &mc;
+
+ if (!IS_FWI2_CAPABLE(vha->hw))
+ return QLA_FUNCTION_FAILED;
+
+ ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1159,
+ "Entered %s.\n", __func__);
+
+ mcp->mb[0] = MBC_GET_RNID_PARAMS;
+ mcp->mb[1] = RNID_TYPE_ASIC_TEMP << 8;
+ mcp->out_mb = MBX_1|MBX_0;
+ mcp->in_mb = MBX_1|MBX_0;
+ mcp->tov = MBX_TOV_SECONDS;
+ mcp->flags = 0;
+ rval = qla2x00_mailbox_command(vha, mcp);
+ *temp = mcp->mb[1];
+
+ if (rval != QLA_SUCCESS) {
+ ql_dbg(ql_dbg_mbx, vha, 0x115a,
+ "Failed=%x mb[0]=%x,%x.\n", rval, mcp->mb[0], mcp->mb[1]);
+ } else {
+ ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x115b,
+ "Done %s.\n", __func__);
+ }
+
+ return rval;
+}
+
+int
qla2x00_read_sfp(scsi_qla_host_t *vha, dma_addr_t sfp_dma, uint8_t *sfp,
uint16_t dev, uint16_t off, uint16_t len, uint16_t opt)
{
@@ -4415,38 +4559,45 @@ qla24xx_set_fcp_prio(scsi_qla_host_t *vha, uint16_t loop_id, uint16_t priority,
}
int
-qla2x00_get_thermal_temp(scsi_qla_host_t *vha, uint16_t *temp, uint16_t *frac)
+qla2x00_get_thermal_temp(scsi_qla_host_t *vha, uint16_t *temp)
{
- int rval;
- uint8_t byte;
+ int rval = QLA_FUNCTION_FAILED;
struct qla_hw_data *ha = vha->hw;
+ uint8_t byte;
ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10ca,
"Entered %s.\n", __func__);
- /* Integer part */
- rval = qla2x00_read_sfp(vha, 0, &byte, 0x98, 0x01, 1,
- BIT_13|BIT_12|BIT_0);
- if (rval != QLA_SUCCESS) {
- ql_dbg(ql_dbg_mbx, vha, 0x10c9, "Failed=%x.\n", rval);
- ha->flags.thermal_supported = 0;
- goto fail;
+ if (ha->thermal_support & THERMAL_SUPPORT_I2C) {
+ rval = qla2x00_read_sfp(vha, 0, &byte,
+ 0x98, 0x1, 1, BIT_13|BIT_12|BIT_0);
+ *temp = byte;
+ if (rval == QLA_SUCCESS)
+ goto done;
+
+ ql_log(ql_log_warn, vha, 0x10c9,
+ "Thermal not supported by I2C.\n");
+ ha->thermal_support &= ~THERMAL_SUPPORT_I2C;
}
- *temp = byte;
- /* Fraction part */
- rval = qla2x00_read_sfp(vha, 0, &byte, 0x98, 0x10, 1,
- BIT_13|BIT_12|BIT_0);
- if (rval != QLA_SUCCESS) {
- ql_dbg(ql_dbg_mbx, vha, 0x1019, "Failed=%x.\n", rval);
- ha->flags.thermal_supported = 0;
- goto fail;
+ if (ha->thermal_support & THERMAL_SUPPORT_ISP) {
+ rval = qla2x00_read_asic_temperature(vha, temp);
+ if (rval == QLA_SUCCESS)
+ goto done;
+
+ ql_log(ql_log_warn, vha, 0x1019,
+ "Thermal not supported by ISP.\n");
+ ha->thermal_support &= ~THERMAL_SUPPORT_ISP;
}
- *frac = (byte >> 6) * 25;
+ ql_log(ql_log_warn, vha, 0x1150,
+ "Thermal not supported by this card "
+ "(ignoring further requests).\n");
+ return rval;
+
+done:
ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1018,
"Done %s.\n", __func__);
-fail:
return rval;
}
diff --git a/drivers/scsi/qla2xxx/qla_mid.c b/drivers/scsi/qla2xxx/qla_mid.c
index 20fd974f903a..f868a9f98afe 100644
--- a/drivers/scsi/qla2xxx/qla_mid.c
+++ b/drivers/scsi/qla2xxx/qla_mid.c
@@ -1,6 +1,6 @@
/*
* QLogic Fibre Channel HBA Driver
- * Copyright (c) 2003-2012 QLogic Corporation
+ * Copyright (c) 2003-2013 QLogic Corporation
*
* See LICENSE.qla2xxx for copyright and licensing details.
*/
@@ -523,6 +523,7 @@ qla25xx_free_req_que(struct scsi_qla_host *vha, struct req_que *req)
clear_bit(que_id, ha->req_qid_map);
mutex_unlock(&ha->vport_lock);
}
+ kfree(req->outstanding_cmds);
kfree(req);
req = NULL;
}
@@ -649,6 +650,10 @@ qla25xx_create_req_que(struct qla_hw_data *ha, uint16_t options,
goto que_failed;
}
+ ret = qla2x00_alloc_outstanding_cmds(ha, req);
+ if (ret != QLA_SUCCESS)
+ goto que_failed;
+
mutex_lock(&ha->vport_lock);
que_id = find_first_zero_bit(ha->req_qid_map, ha->max_req_queues);
if (que_id >= ha->max_req_queues) {
@@ -685,7 +690,7 @@ qla25xx_create_req_que(struct qla_hw_data *ha, uint16_t options,
"options=0x%x.\n", req->options);
ql_dbg(ql_dbg_init, base_vha, 0x00dd,
"options=0x%x.\n", req->options);
- for (cnt = 1; cnt < MAX_OUTSTANDING_COMMANDS; cnt++)
+ for (cnt = 1; cnt < req->num_outstanding_cmds; cnt++)
req->outstanding_cmds[cnt] = NULL;
req->current_outstanding_cmd = 1;
diff --git a/drivers/scsi/qla2xxx/qla_nx.c b/drivers/scsi/qla2xxx/qla_nx.c
index 3e3f593bada3..10754f518303 100644
--- a/drivers/scsi/qla2xxx/qla_nx.c
+++ b/drivers/scsi/qla2xxx/qla_nx.c
@@ -1,6 +1,6 @@
/*
* QLogic Fibre Channel HBA Driver
- * Copyright (c) 2003-2012 QLogic Corporation
+ * Copyright (c) 2003-2013 QLogic Corporation
*
* See LICENSE.qla2xxx for copyright and licensing details.
*/
@@ -847,14 +847,21 @@ static int
qla82xx_rom_lock(struct qla_hw_data *ha)
{
int done = 0, timeout = 0;
+ uint32_t lock_owner = 0;
+ scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev);
while (!done) {
/* acquire semaphore2 from PCI HW block */
done = qla82xx_rd_32(ha, QLA82XX_PCIE_REG(PCIE_SEM2_LOCK));
if (done == 1)
break;
- if (timeout >= qla82xx_rom_lock_timeout)
+ if (timeout >= qla82xx_rom_lock_timeout) {
+ lock_owner = qla82xx_rd_32(ha, QLA82XX_ROM_LOCK_ID);
+ ql_dbg(ql_dbg_p3p, vha, 0xb085,
+ "Failed to acquire rom lock, acquired by %d.\n",
+ lock_owner);
return -1;
+ }
timeout++;
}
qla82xx_wr_32(ha, QLA82XX_ROM_LOCK_ID, ROM_LOCK_DRIVER);
@@ -3629,7 +3636,7 @@ qla82xx_chip_reset_cleanup(scsi_qla_host_t *vha)
req = ha->req_q_map[que];
if (!req)
continue;
- for (cnt = 1; cnt < MAX_OUTSTANDING_COMMANDS; cnt++) {
+ for (cnt = 1; cnt < req->num_outstanding_cmds; cnt++) {
sp = req->outstanding_cmds[cnt];
if (sp) {
if (!sp->u.scmd.ctx ||
diff --git a/drivers/scsi/qla2xxx/qla_nx.h b/drivers/scsi/qla2xxx/qla_nx.h
index 6c953e8c08f0..d268e8406fdb 100644
--- a/drivers/scsi/qla2xxx/qla_nx.h
+++ b/drivers/scsi/qla2xxx/qla_nx.h
@@ -1,6 +1,6 @@
/*
* QLogic Fibre Channel HBA Driver
- * Copyright (c) 2003-2012 QLogic Corporation
+ * Copyright (c) 2003-2013 QLogic Corporation
*
* See LICENSE.qla2xxx for copyright and licensing details.
*/
@@ -897,7 +897,7 @@ struct ct6_dsd {
#define FLT_REG_BOOT_CODE_82XX 0x78
#define FLT_REG_FW_82XX 0x74
#define FLT_REG_GOLD_FW_82XX 0x75
-#define FLT_REG_VPD_82XX 0x81
+#define FLT_REG_VPD_8XXX 0x81
#define FA_VPD_SIZE_82XX 0x400
diff --git a/drivers/scsi/qla2xxx/qla_os.c b/drivers/scsi/qla2xxx/qla_os.c
index 10d23f8b7036..2c6dd3dfe0f4 100644
--- a/drivers/scsi/qla2xxx/qla_os.c
+++ b/drivers/scsi/qla2xxx/qla_os.c
@@ -1,6 +1,6 @@
/*
* QLogic Fibre Channel HBA Driver
- * Copyright (c) 2003-2012 QLogic Corporation
+ * Copyright (c) 2003-2013 QLogic Corporation
*
* See LICENSE.qla2xxx for copyright and licensing details.
*/
@@ -111,8 +111,7 @@ MODULE_PARM_DESC(ql2xfdmienable,
"Enables FDMI registrations. "
"0 - no FDMI. Default is 1 - perform FDMI.");
-#define MAX_Q_DEPTH 32
-static int ql2xmaxqdepth = MAX_Q_DEPTH;
+int ql2xmaxqdepth = MAX_Q_DEPTH;
module_param(ql2xmaxqdepth, int, S_IRUGO|S_IWUSR);
MODULE_PARM_DESC(ql2xmaxqdepth,
"Maximum queue depth to set for each LUN. "
@@ -360,6 +359,9 @@ static void qla2x00_free_req_que(struct qla_hw_data *ha, struct req_que *req)
(req->length + 1) * sizeof(request_t),
req->ring, req->dma);
+ if (req)
+ kfree(req->outstanding_cmds);
+
kfree(req);
req = NULL;
}
@@ -628,7 +630,7 @@ qla2x00_sp_free_dma(void *vha, void *ptr)
}
CMD_SP(cmd) = NULL;
- mempool_free(sp, ha->srb_mempool);
+ qla2x00_rel_sp(sp->fcport->vha, sp);
}
static void
@@ -716,9 +718,11 @@ qla2xxx_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *cmd)
goto qc24_target_busy;
}
- sp = qla2x00_get_sp(base_vha, fcport, GFP_ATOMIC);
- if (!sp)
+ sp = qla2x00_get_sp(vha, fcport, GFP_ATOMIC);
+ if (!sp) {
+ set_bit(HOST_RAMP_DOWN_QUEUE_DEPTH, &vha->dpc_flags);
goto qc24_host_busy;
+ }
sp->u.scmd.cmd = cmd;
sp->type = SRB_SCSI_CMD;
@@ -731,6 +735,7 @@ qla2xxx_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *cmd)
if (rval != QLA_SUCCESS) {
ql_dbg(ql_dbg_io + ql_dbg_verbose, vha, 0x3013,
"Start scsi failed rval=%d for cmd=%p.\n", rval, cmd);
+ set_bit(HOST_RAMP_DOWN_QUEUE_DEPTH, &vha->dpc_flags);
goto qc24_host_busy_free_sp;
}
@@ -1010,7 +1015,7 @@ qla2x00_eh_wait_for_pending_commands(scsi_qla_host_t *vha, unsigned int t,
spin_lock_irqsave(&ha->hardware_lock, flags);
req = vha->req;
for (cnt = 1; status == QLA_SUCCESS &&
- cnt < MAX_OUTSTANDING_COMMANDS; cnt++) {
+ cnt < req->num_outstanding_cmds; cnt++) {
sp = req->outstanding_cmds[cnt];
if (!sp)
continue;
@@ -1300,14 +1305,14 @@ qla2x00_loop_reset(scsi_qla_host_t *vha)
}
if (ha->flags.enable_lip_full_login && !IS_CNA_CAPABLE(ha)) {
+ atomic_set(&vha->loop_state, LOOP_DOWN);
+ atomic_set(&vha->loop_down_timer, LOOP_DOWN_TIME);
+ qla2x00_mark_all_devices_lost(vha, 0);
ret = qla2x00_full_login_lip(vha);
if (ret != QLA_SUCCESS) {
ql_dbg(ql_dbg_taskm, vha, 0x802d,
"full_login_lip=%d.\n", ret);
}
- atomic_set(&vha->loop_state, LOOP_DOWN);
- atomic_set(&vha->loop_down_timer, LOOP_DOWN_TIME);
- qla2x00_mark_all_devices_lost(vha, 0);
}
if (ha->flags.enable_lip_reset) {
@@ -1337,7 +1342,9 @@ qla2x00_abort_all_cmds(scsi_qla_host_t *vha, int res)
req = ha->req_q_map[que];
if (!req)
continue;
- for (cnt = 1; cnt < MAX_OUTSTANDING_COMMANDS; cnt++) {
+ if (!req->outstanding_cmds)
+ continue;
+ for (cnt = 1; cnt < req->num_outstanding_cmds; cnt++) {
sp = req->outstanding_cmds[cnt];
if (sp) {
req->outstanding_cmds[cnt] = NULL;
@@ -1453,6 +1460,81 @@ qla2x00_change_queue_type(struct scsi_device *sdev, int tag_type)
return tag_type;
}
+static void
+qla2x00_host_ramp_down_queuedepth(scsi_qla_host_t *vha)
+{
+ scsi_qla_host_t *vp;
+ struct Scsi_Host *shost;
+ struct scsi_device *sdev;
+ struct qla_hw_data *ha = vha->hw;
+ unsigned long flags;
+
+ ha->host_last_rampdown_time = jiffies;
+
+ if (ha->cfg_lun_q_depth <= vha->host->cmd_per_lun)
+ return;
+
+ if ((ha->cfg_lun_q_depth / 2) < vha->host->cmd_per_lun)
+ ha->cfg_lun_q_depth = vha->host->cmd_per_lun;
+ else
+ ha->cfg_lun_q_depth = ha->cfg_lun_q_depth / 2;
+
+ /*
+ * Geometrically ramp down the queue depth for all devices on this
+ * adapter
+ */
+ spin_lock_irqsave(&ha->vport_slock, flags);
+ list_for_each_entry(vp, &ha->vp_list, list) {
+ shost = vp->host;
+ shost_for_each_device(sdev, shost) {
+ if (sdev->queue_depth > shost->cmd_per_lun) {
+ if (sdev->queue_depth < ha->cfg_lun_q_depth)
+ continue;
+ ql_log(ql_log_warn, vp, 0x3031,
+ "%ld:%d:%d: Ramping down queue depth to %d",
+ vp->host_no, sdev->id, sdev->lun,
+ ha->cfg_lun_q_depth);
+ qla2x00_change_queue_depth(sdev,
+ ha->cfg_lun_q_depth, SCSI_QDEPTH_DEFAULT);
+ }
+ }
+ }
+ spin_unlock_irqrestore(&ha->vport_slock, flags);
+
+ return;
+}
+
+static void
+qla2x00_host_ramp_up_queuedepth(scsi_qla_host_t *vha)
+{
+ scsi_qla_host_t *vp;
+ struct Scsi_Host *shost;
+ struct scsi_device *sdev;
+ struct qla_hw_data *ha = vha->hw;
+ unsigned long flags;
+
+ ha->host_last_rampup_time = jiffies;
+ ha->cfg_lun_q_depth++;
+
+ /*
+ * Linearly ramp up the queue depth for all devices on this
+ * adapter
+ */
+ spin_lock_irqsave(&ha->vport_slock, flags);
+ list_for_each_entry(vp, &ha->vp_list, list) {
+ shost = vp->host;
+ shost_for_each_device(sdev, shost) {
+ if (sdev->queue_depth > ha->cfg_lun_q_depth)
+ continue;
+ qla2x00_change_queue_depth(sdev, ha->cfg_lun_q_depth,
+ SCSI_QDEPTH_RAMP_UP);
+ }
+ }
+ spin_unlock_irqrestore(&ha->vport_slock, flags);
+
+ return;
+}
+
/**
* qla2x00_config_dma_addressing() - Configure OS DMA addressing method.
* @ha: HA context
@@ -1730,6 +1812,9 @@ qla83xx_iospace_config(struct qla_hw_data *ha)
mqiobase_exit:
ha->msix_count = ha->max_rsp_queues + 1;
+
+ qlt_83xx_iospace_config(ha);
+
ql_dbg_pci(ql_dbg_init, ha->pdev, 0x011f,
"MSIX Count:%d.\n", ha->msix_count);
return 0;
@@ -2230,6 +2315,7 @@ qla2x00_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
ha->init_cb_size = sizeof(init_cb_t);
ha->link_data_rate = PORT_SPEED_UNKNOWN;
ha->optrom_size = OPTROM_SIZE_2300;
+ ha->cfg_lun_q_depth = ql2xmaxqdepth;
/* Assign ISP specific operations. */
if (IS_QLA2100(ha)) {
@@ -2307,6 +2393,7 @@ qla2x00_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
ha->mbx_count = MAILBOX_REGISTER_COUNT;
req_length = REQUEST_ENTRY_CNT_24XX;
rsp_length = RESPONSE_ENTRY_CNT_2300;
+ ha->tgt.atio_q_length = ATIO_ENTRY_CNT_24XX;
ha->max_loop_id = SNS_LAST_LOOP_ID_2300;
ha->init_cb_size = sizeof(struct mid_init_cb_81xx);
ha->gid_list_info_size = 8;
@@ -2338,6 +2425,7 @@ qla2x00_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
ha->mbx_count = MAILBOX_REGISTER_COUNT;
req_length = REQUEST_ENTRY_CNT_24XX;
rsp_length = RESPONSE_ENTRY_CNT_2300;
+ ha->tgt.atio_q_length = ATIO_ENTRY_CNT_24XX;
ha->max_loop_id = SNS_LAST_LOOP_ID_2300;
ha->init_cb_size = sizeof(struct mid_init_cb_81xx);
ha->gid_list_info_size = 8;
@@ -2377,6 +2465,7 @@ qla2x00_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
complete(&ha->mbx_cmd_comp);
init_completion(&ha->mbx_intr_comp);
init_completion(&ha->dcbx_comp);
+ init_completion(&ha->lb_portup_comp);
set_bit(0, (unsigned long *) ha->vp_idx_map);
@@ -2720,6 +2809,9 @@ qla2x00_shutdown(struct pci_dev *pdev)
scsi_qla_host_t *vha;
struct qla_hw_data *ha;
+ if (!atomic_read(&pdev->enable_cnt))
+ return;
+
vha = pci_get_drvdata(pdev);
ha = vha->hw;
@@ -3974,6 +4066,8 @@ qla83xx_force_lock_recovery(scsi_qla_host_t *base_vha)
uint32_t idc_lck_rcvry_stage_mask = 0x3;
uint32_t idc_lck_rcvry_owner_mask = 0x3c;
struct qla_hw_data *ha = base_vha->hw;
+ ql_dbg(ql_dbg_p3p, base_vha, 0xb086,
+ "Trying force recovery of the IDC lock.\n");
rval = qla83xx_rd_reg(base_vha, QLA83XX_IDC_LOCK_RECOVERY, &data);
if (rval)
@@ -4065,6 +4159,7 @@ qla83xx_idc_lock(scsi_qla_host_t *base_vha, uint16_t requester_id)
{
uint16_t options = (requester_id << 15) | BIT_6;
uint32_t data;
+ uint32_t lock_owner;
struct qla_hw_data *ha = base_vha->hw;
/* IDC-lock implementation using driver-lock/lock-id remote registers */
@@ -4076,8 +4171,11 @@ retry_lock:
qla83xx_wr_reg(base_vha, QLA83XX_DRIVER_LOCKID,
ha->portnum);
} else {
+ qla83xx_rd_reg(base_vha, QLA83XX_DRIVER_LOCKID,
+ &lock_owner);
ql_dbg(ql_dbg_p3p, base_vha, 0xb063,
- "Failed to acquire IDC lock. retrying...\n");
+ "Failed to acquire IDC lock, acquired by %d, "
+ "retrying...\n", lock_owner);
/* Retry/Perform IDC-Lock recovery */
if (qla83xx_idc_lock_recovery(base_vha)
@@ -4605,6 +4703,18 @@ qla2x00_do_dpc(void *data)
qla2xxx_flash_npiv_conf(base_vha);
}
+ if (test_and_clear_bit(HOST_RAMP_DOWN_QUEUE_DEPTH,
+ &base_vha->dpc_flags)) {
+ /* Prevents simultaneous ramp up and down */
+ clear_bit(HOST_RAMP_UP_QUEUE_DEPTH,
+ &base_vha->dpc_flags);
+ qla2x00_host_ramp_down_queuedepth(base_vha);
+ }
+
+ if (test_and_clear_bit(HOST_RAMP_UP_QUEUE_DEPTH,
+ &base_vha->dpc_flags))
+ qla2x00_host_ramp_up_queuedepth(base_vha);
+
if (!ha->interrupts_on)
ha->isp_ops->enable_intrs(ha);
@@ -4733,7 +4843,7 @@ qla2x00_timer(scsi_qla_host_t *vha)
cpu_flags);
req = ha->req_q_map[0];
for (index = 1;
- index < MAX_OUTSTANDING_COMMANDS;
+ index < req->num_outstanding_cmds;
index++) {
fc_port_t *sfcp;
@@ -4802,7 +4912,9 @@ qla2x00_timer(scsi_qla_host_t *vha)
test_bit(ISP_UNRECOVERABLE, &vha->dpc_flags) ||
test_bit(FCOE_CTX_RESET_NEEDED, &vha->dpc_flags) ||
test_bit(VP_DPC_NEEDED, &vha->dpc_flags) ||
- test_bit(RELOGIN_NEEDED, &vha->dpc_flags))) {
+ test_bit(RELOGIN_NEEDED, &vha->dpc_flags) ||
+ test_bit(HOST_RAMP_DOWN_QUEUE_DEPTH, &vha->dpc_flags) ||
+ test_bit(HOST_RAMP_UP_QUEUE_DEPTH, &vha->dpc_flags))) {
ql_dbg(ql_dbg_timer, vha, 0x600b,
"isp_abort_needed=%d loop_resync_needed=%d "
"fcport_update_needed=%d start_dpc=%d "
@@ -4815,12 +4927,15 @@ qla2x00_timer(scsi_qla_host_t *vha)
ql_dbg(ql_dbg_timer, vha, 0x600c,
"beacon_blink_needed=%d isp_unrecoverable=%d "
"fcoe_ctx_reset_needed=%d vp_dpc_needed=%d "
- "relogin_needed=%d.\n",
+ "relogin_needed=%d, host_ramp_down_needed=%d "
+ "host_ramp_up_needed=%d.\n",
test_bit(BEACON_BLINK_NEEDED, &vha->dpc_flags),
test_bit(ISP_UNRECOVERABLE, &vha->dpc_flags),
test_bit(FCOE_CTX_RESET_NEEDED, &vha->dpc_flags),
test_bit(VP_DPC_NEEDED, &vha->dpc_flags),
- test_bit(RELOGIN_NEEDED, &vha->dpc_flags));
+ test_bit(RELOGIN_NEEDED, &vha->dpc_flags),
+ test_bit(HOST_RAMP_UP_QUEUE_DEPTH, &vha->dpc_flags),
+ test_bit(HOST_RAMP_DOWN_QUEUE_DEPTH, &vha->dpc_flags));
qla2xxx_wake_dpc(vha);
}
diff --git a/drivers/scsi/qla2xxx/qla_settings.h b/drivers/scsi/qla2xxx/qla_settings.h
index 892a81e457bc..46ef0ac48f44 100644
--- a/drivers/scsi/qla2xxx/qla_settings.h
+++ b/drivers/scsi/qla2xxx/qla_settings.h
@@ -1,6 +1,6 @@
/*
* QLogic Fibre Channel HBA Driver
- * Copyright (c) 2003-2012 QLogic Corporation
+ * Copyright (c) 2003-2013 QLogic Corporation
*
* See LICENSE.qla2xxx for copyright and licensing details.
*/
diff --git a/drivers/scsi/qla2xxx/qla_sup.c b/drivers/scsi/qla2xxx/qla_sup.c
index 32fdc2a66dd1..3bef6736d885 100644
--- a/drivers/scsi/qla2xxx/qla_sup.c
+++ b/drivers/scsi/qla2xxx/qla_sup.c
@@ -1,6 +1,6 @@
/*
* QLogic Fibre Channel HBA Driver
- * Copyright (c) 2003-2012 QLogic Corporation
+ * Copyright (c) 2003-2013 QLogic Corporation
*
* See LICENSE.qla2xxx for copyright and licensing details.
*/
@@ -798,20 +798,8 @@ qla2xxx_get_flt_info(scsi_qla_host_t *vha, uint32_t flt_addr)
case FLT_REG_BOOTLOAD_82XX:
ha->flt_region_bootload = start;
break;
- case FLT_REG_VPD_82XX:
- ha->flt_region_vpd = start;
- break;
- case FLT_REG_FCOE_VPD_0:
- if (!IS_QLA8031(ha))
- break;
- ha->flt_region_vpd_nvram = start;
- if (ha->flags.port0)
- ha->flt_region_vpd = start;
- break;
- case FLT_REG_FCOE_VPD_1:
- if (!IS_QLA8031(ha))
- break;
- if (!ha->flags.port0)
+ case FLT_REG_VPD_8XXX:
+ if (IS_CNA_CAPABLE(ha))
ha->flt_region_vpd = start;
break;
case FLT_REG_FCOE_NVRAM_0:
diff --git a/drivers/scsi/qla2xxx/qla_target.c b/drivers/scsi/qla2xxx/qla_target.c
index 80f4b849e2b0..61b5d8c2b5da 100644
--- a/drivers/scsi/qla2xxx/qla_target.c
+++ b/drivers/scsi/qla2xxx/qla_target.c
@@ -52,7 +52,7 @@ MODULE_PARM_DESC(qlini_mode,
"\"disabled\" - initiator mode will never be enabled; "
"\"enabled\" (default) - initiator mode will always stay enabled.");
-static int ql2x_ini_mode = QLA2XXX_INI_MODE_EXCLUSIVE;
+int ql2x_ini_mode = QLA2XXX_INI_MODE_EXCLUSIVE;
/*
* From scsi/fc/fc_fcp.h
@@ -1119,6 +1119,7 @@ static void qlt_send_notify_ack(struct scsi_qla_host *vha,
nack->u.isp24.srr_rx_id = ntfy->u.isp24.srr_rx_id;
nack->u.isp24.status = ntfy->u.isp24.status;
nack->u.isp24.status_subcode = ntfy->u.isp24.status_subcode;
+ nack->u.isp24.fw_handle = ntfy->u.isp24.fw_handle;
nack->u.isp24.exchange_address = ntfy->u.isp24.exchange_address;
nack->u.isp24.srr_rel_offs = ntfy->u.isp24.srr_rel_offs;
nack->u.isp24.srr_ui = ntfy->u.isp24.srr_ui;
@@ -1570,7 +1571,7 @@ static inline uint32_t qlt_make_handle(struct scsi_qla_host *vha)
/* always increment cmd handle */
do {
++h;
- if (h > MAX_OUTSTANDING_COMMANDS)
+ if (h > DEFAULT_OUTSTANDING_COMMANDS)
h = 1; /* 0 is QLA_TGT_NULL_HANDLE */
if (h == ha->tgt.current_handle) {
ql_dbg(ql_dbg_tgt, vha, 0xe04e,
@@ -2441,7 +2442,7 @@ static struct qla_tgt_cmd *qlt_ctio_to_cmd(struct scsi_qla_host *vha,
return NULL;
}
/* handle-1 is actually used */
- if (unlikely(handle > MAX_OUTSTANDING_COMMANDS)) {
+ if (unlikely(handle > DEFAULT_OUTSTANDING_COMMANDS)) {
ql_dbg(ql_dbg_tgt, vha, 0xe052,
"qla_target(%d): Wrong handle %x received\n",
vha->vp_idx, handle);
@@ -4305,6 +4306,12 @@ int qlt_add_target(struct qla_hw_data *ha, struct scsi_qla_host *base_vha)
if (!QLA_TGT_MODE_ENABLED())
return 0;
+ if (!IS_TGT_MODE_CAPABLE(ha)) {
+ ql_log(ql_log_warn, base_vha, 0xe070,
+ "This adapter does not support target mode.\n");
+ return 0;
+ }
+
ql_dbg(ql_dbg_tgt, base_vha, 0xe03b,
"Registering target for host %ld(%p)", base_vha->host_no, ha);
@@ -4666,7 +4673,6 @@ void
qlt_24xx_process_atio_queue(struct scsi_qla_host *vha)
{
struct qla_hw_data *ha = vha->hw;
- struct device_reg_24xx __iomem *reg = &ha->iobase->isp24;
struct atio_from_isp *pkt;
int cnt, i;
@@ -4694,26 +4700,28 @@ qlt_24xx_process_atio_queue(struct scsi_qla_host *vha)
}
/* Adjust ring index */
- WRT_REG_DWORD(&reg->atio_q_out, ha->tgt.atio_ring_index);
+ WRT_REG_DWORD(ISP_ATIO_Q_OUT(vha), ha->tgt.atio_ring_index);
}
void
-qlt_24xx_config_rings(struct scsi_qla_host *vha, device_reg_t __iomem *reg)
+qlt_24xx_config_rings(struct scsi_qla_host *vha)
{
struct qla_hw_data *ha = vha->hw;
+ if (!QLA_TGT_MODE_ENABLED())
+ return;
-/* FIXME: atio_q in/out for ha->mqenable=1..? */
- if (ha->mqenable) {
-#if 0
- WRT_REG_DWORD(&reg->isp25mq.atio_q_in, 0);
- WRT_REG_DWORD(&reg->isp25mq.atio_q_out, 0);
- RD_REG_DWORD(&reg->isp25mq.atio_q_out);
-#endif
- } else {
- /* Setup APTIO registers for target mode */
- WRT_REG_DWORD(&reg->isp24.atio_q_in, 0);
- WRT_REG_DWORD(&reg->isp24.atio_q_out, 0);
- RD_REG_DWORD(&reg->isp24.atio_q_out);
+ WRT_REG_DWORD(ISP_ATIO_Q_IN(vha), 0);
+ WRT_REG_DWORD(ISP_ATIO_Q_OUT(vha), 0);
+ RD_REG_DWORD(ISP_ATIO_Q_OUT(vha));
+
+ if (IS_ATIO_MSIX_CAPABLE(ha)) {
+ struct qla_msix_entry *msix = &ha->msix_entries[2];
+ struct init_cb_24xx *icb = (struct init_cb_24xx *)ha->init_cb;
+
+ icb->msix_atio = cpu_to_le16(msix->entry);
+ ql_dbg(ql_dbg_init, vha, 0xf072,
+ "Registering ICB vector 0x%x for atio que.\n",
+ msix->entry);
}
}
@@ -4796,6 +4804,101 @@ qlt_24xx_config_nvram_stage2(struct scsi_qla_host *vha,
}
}
+void
+qlt_81xx_config_nvram_stage1(struct scsi_qla_host *vha, struct nvram_81xx *nv)
+{
+ struct qla_hw_data *ha = vha->hw;
+
+ if (!QLA_TGT_MODE_ENABLED())
+ return;
+
+ if (qla_tgt_mode_enabled(vha)) {
+ if (!ha->tgt.saved_set) {
+ /* We save only once */
+ ha->tgt.saved_exchange_count = nv->exchange_count;
+ ha->tgt.saved_firmware_options_1 =
+ nv->firmware_options_1;
+ ha->tgt.saved_firmware_options_2 =
+ nv->firmware_options_2;
+ ha->tgt.saved_firmware_options_3 =
+ nv->firmware_options_3;
+ ha->tgt.saved_set = 1;
+ }
+
+ nv->exchange_count = __constant_cpu_to_le16(0xFFFF);
+
+ /* Enable target mode */
+ nv->firmware_options_1 |= __constant_cpu_to_le32(BIT_4);
+
+ /* Disable ini mode, if requested */
+ if (!qla_ini_mode_enabled(vha))
+ nv->firmware_options_1 |=
+ __constant_cpu_to_le32(BIT_5);
+
+ /* Disable Full Login after LIP */
+ nv->firmware_options_1 &= __constant_cpu_to_le32(~BIT_13);
+ /* Enable initial LIP */
+ nv->firmware_options_1 &= __constant_cpu_to_le32(~BIT_9);
+ /* Enable FC tapes support */
+ nv->firmware_options_2 |= __constant_cpu_to_le32(BIT_12);
+ /* Disable Full Login after LIP */
+ nv->host_p &= __constant_cpu_to_le32(~BIT_10);
+ /* Enable target PRLI control */
+ nv->firmware_options_2 |= __constant_cpu_to_le32(BIT_14);
+ } else {
+ if (ha->tgt.saved_set) {
+ nv->exchange_count = ha->tgt.saved_exchange_count;
+ nv->firmware_options_1 =
+ ha->tgt.saved_firmware_options_1;
+ nv->firmware_options_2 =
+ ha->tgt.saved_firmware_options_2;
+ nv->firmware_options_3 =
+ ha->tgt.saved_firmware_options_3;
+ }
+ return;
+ }
+
+ /* out-of-order frames reassembly */
+ nv->firmware_options_3 |= BIT_6|BIT_9;
+
+ if (ha->tgt.enable_class_2) {
+ if (vha->flags.init_done)
+ fc_host_supported_classes(vha->host) =
+ FC_COS_CLASS2 | FC_COS_CLASS3;
+
+ nv->firmware_options_2 |= __constant_cpu_to_le32(BIT_8);
+ } else {
+ if (vha->flags.init_done)
+ fc_host_supported_classes(vha->host) = FC_COS_CLASS3;
+
+ nv->firmware_options_2 &= ~__constant_cpu_to_le32(BIT_8);
+ }
+}
+
+void
+qlt_81xx_config_nvram_stage2(struct scsi_qla_host *vha,
+ struct init_cb_81xx *icb)
+{
+ struct qla_hw_data *ha = vha->hw;
+
+ if (!QLA_TGT_MODE_ENABLED())
+ return;
+
+ if (ha->tgt.node_name_set) {
+ memcpy(icb->node_name, ha->tgt.tgt_node_name, WWN_SIZE);
+ icb->firmware_options_1 |= __constant_cpu_to_le32(BIT_14);
+ }
+}
+
+void
+qlt_83xx_iospace_config(struct qla_hw_data *ha)
+{
+ if (!QLA_TGT_MODE_ENABLED())
+ return;
+
+ ha->msix_count += 1; /* For ATIO Q */
+}
+
int
qlt_24xx_process_response_error(struct scsi_qla_host *vha,
struct sts_entry_24xx *pkt)
@@ -4828,11 +4931,41 @@ qlt_probe_one_stage1(struct scsi_qla_host *base_vha, struct qla_hw_data *ha)
if (!QLA_TGT_MODE_ENABLED())
return;
+ if (ha->mqenable || IS_QLA83XX(ha)) {
+ ISP_ATIO_Q_IN(base_vha) = &ha->mqiobase->isp25mq.atio_q_in;
+ ISP_ATIO_Q_OUT(base_vha) = &ha->mqiobase->isp25mq.atio_q_out;
+ } else {
+ ISP_ATIO_Q_IN(base_vha) = &ha->iobase->isp24.atio_q_in;
+ ISP_ATIO_Q_OUT(base_vha) = &ha->iobase->isp24.atio_q_out;
+ }
+
mutex_init(&ha->tgt.tgt_mutex);
mutex_init(&ha->tgt.tgt_host_action_mutex);
qlt_clear_mode(base_vha);
}
+irqreturn_t
+qla83xx_msix_atio_q(int irq, void *dev_id)
+{
+ struct rsp_que *rsp;
+ scsi_qla_host_t *vha;
+ struct qla_hw_data *ha;
+ unsigned long flags;
+
+ rsp = (struct rsp_que *) dev_id;
+ ha = rsp->hw;
+ vha = pci_get_drvdata(ha->pdev);
+
+ spin_lock_irqsave(&ha->hardware_lock, flags);
+
+ qlt_24xx_process_atio_queue(vha);
+ qla24xx_process_response_queue(vha, rsp);
+
+ spin_unlock_irqrestore(&ha->hardware_lock, flags);
+
+ return IRQ_HANDLED;
+}
+
int
qlt_mem_alloc(struct qla_hw_data *ha)
{
diff --git a/drivers/scsi/qla2xxx/qla_target.h b/drivers/scsi/qla2xxx/qla_target.h
index bad749561ec2..ff9ccb9fd036 100644
--- a/drivers/scsi/qla2xxx/qla_target.h
+++ b/drivers/scsi/qla2xxx/qla_target.h
@@ -60,8 +60,9 @@
* multi-complete should come to the tgt driver or be handled there by qla2xxx
*/
#define CTIO_COMPLETION_HANDLE_MARK BIT_29
-#if (CTIO_COMPLETION_HANDLE_MARK <= MAX_OUTSTANDING_COMMANDS)
-#error "CTIO_COMPLETION_HANDLE_MARK not larger than MAX_OUTSTANDING_COMMANDS"
+#if (CTIO_COMPLETION_HANDLE_MARK <= DEFAULT_OUTSTANDING_COMMANDS)
+#error "CTIO_COMPLETION_HANDLE_MARK not larger than "
+ "DEFAULT_OUTSTANDING_COMMANDS"
#endif
#define HANDLE_IS_CTIO_COMP(h) (h & CTIO_COMPLETION_HANDLE_MARK)
@@ -161,7 +162,7 @@ struct imm_ntfy_from_isp {
uint16_t srr_rx_id;
uint16_t status;
uint8_t status_subcode;
- uint8_t reserved_3;
+ uint8_t fw_handle;
uint32_t exchange_address;
uint32_t srr_rel_offs;
uint16_t srr_ui;
@@ -217,7 +218,7 @@ struct nack_to_isp {
uint16_t srr_rx_id;
uint16_t status;
uint8_t status_subcode;
- uint8_t reserved_3;
+ uint8_t fw_handle;
uint32_t exchange_address;
uint32_t srr_rel_offs;
uint16_t srr_ui;
@@ -948,6 +949,7 @@ extern void qlt_update_vp_map(struct scsi_qla_host *, int);
* is not set. Right now, ha value is ignored.
*/
#define QLA_TGT_MODE_ENABLED() (ql2x_ini_mode != QLA2XXX_INI_MODE_ENABLED)
+extern int ql2x_ini_mode;
static inline bool qla_tgt_mode_enabled(struct scsi_qla_host *ha)
{
@@ -985,12 +987,15 @@ extern void qlt_vport_create(struct scsi_qla_host *, struct qla_hw_data *);
extern void qlt_rff_id(struct scsi_qla_host *, struct ct_sns_req *);
extern void qlt_init_atio_q_entries(struct scsi_qla_host *);
extern void qlt_24xx_process_atio_queue(struct scsi_qla_host *);
-extern void qlt_24xx_config_rings(struct scsi_qla_host *,
- device_reg_t __iomem *);
+extern void qlt_24xx_config_rings(struct scsi_qla_host *);
extern void qlt_24xx_config_nvram_stage1(struct scsi_qla_host *,
struct nvram_24xx *);
extern void qlt_24xx_config_nvram_stage2(struct scsi_qla_host *,
struct init_cb_24xx *);
+extern void qlt_81xx_config_nvram_stage2(struct scsi_qla_host *,
+ struct init_cb_81xx *);
+extern void qlt_81xx_config_nvram_stage1(struct scsi_qla_host *,
+ struct nvram_81xx *);
extern int qlt_24xx_process_response_error(struct scsi_qla_host *,
struct sts_entry_24xx *);
extern void qlt_modify_vp_config(struct scsi_qla_host *,
@@ -1000,5 +1005,7 @@ extern int qlt_mem_alloc(struct qla_hw_data *);
extern void qlt_mem_free(struct qla_hw_data *);
extern void qlt_stop_phase1(struct qla_tgt *);
extern void qlt_stop_phase2(struct qla_tgt *);
+extern irqreturn_t qla83xx_msix_atio_q(int, void *);
+extern void qlt_83xx_iospace_config(struct qla_hw_data *);
#endif /* __QLA_TARGET_H */
diff --git a/drivers/scsi/qla2xxx/qla_version.h b/drivers/scsi/qla2xxx/qla_version.h
index 49697ca41e78..2b6e478d9e33 100644
--- a/drivers/scsi/qla2xxx/qla_version.h
+++ b/drivers/scsi/qla2xxx/qla_version.h
@@ -1,6 +1,6 @@
/*
* QLogic Fibre Channel HBA Driver
- * Copyright (c) 2003-2012 QLogic Corporation
+ * Copyright (c) 2003-2013 QLogic Corporation
*
* See LICENSE.qla2xxx for copyright and licensing details.
*/
diff --git a/drivers/scsi/qla4xxx/ql4_83xx.c b/drivers/scsi/qla4xxx/ql4_83xx.c
index 6e9af20be12f..5d8fe4f75650 100644
--- a/drivers/scsi/qla4xxx/ql4_83xx.c
+++ b/drivers/scsi/qla4xxx/ql4_83xx.c
@@ -538,7 +538,7 @@ struct device_info {
int port_num;
};
-static int qla4_83xx_can_perform_reset(struct scsi_qla_host *ha)
+int qla4_83xx_can_perform_reset(struct scsi_qla_host *ha)
{
uint32_t drv_active;
uint32_t dev_part, dev_part1, dev_part2;
@@ -1351,31 +1351,58 @@ exit_start_fw:
/*----------------------Interrupt Related functions ---------------------*/
-void qla4_83xx_disable_intrs(struct scsi_qla_host *ha)
+static void qla4_83xx_disable_iocb_intrs(struct scsi_qla_host *ha)
+{
+ if (test_and_clear_bit(AF_83XX_IOCB_INTR_ON, &ha->flags))
+ qla4_8xxx_intr_disable(ha);
+}
+
+static void qla4_83xx_disable_mbox_intrs(struct scsi_qla_host *ha)
{
uint32_t mb_int, ret;
- if (test_and_clear_bit(AF_INTERRUPTS_ON, &ha->flags))
- qla4_8xxx_mbx_intr_disable(ha);
+ if (test_and_clear_bit(AF_83XX_MBOX_INTR_ON, &ha->flags)) {
+ ret = readl(&ha->qla4_83xx_reg->mbox_int);
+ mb_int = ret & ~INT_ENABLE_FW_MB;
+ writel(mb_int, &ha->qla4_83xx_reg->mbox_int);
+ writel(1, &ha->qla4_83xx_reg->leg_int_mask);
+ }
+}
- ret = readl(&ha->qla4_83xx_reg->mbox_int);
- mb_int = ret & ~INT_ENABLE_FW_MB;
- writel(mb_int, &ha->qla4_83xx_reg->mbox_int);
- writel(1, &ha->qla4_83xx_reg->leg_int_mask);
+void qla4_83xx_disable_intrs(struct scsi_qla_host *ha)
+{
+ qla4_83xx_disable_mbox_intrs(ha);
+ qla4_83xx_disable_iocb_intrs(ha);
}
-void qla4_83xx_enable_intrs(struct scsi_qla_host *ha)
+static void qla4_83xx_enable_iocb_intrs(struct scsi_qla_host *ha)
+{
+ if (!test_bit(AF_83XX_IOCB_INTR_ON, &ha->flags)) {
+ qla4_8xxx_intr_enable(ha);
+ set_bit(AF_83XX_IOCB_INTR_ON, &ha->flags);
+ }
+}
+
+void qla4_83xx_enable_mbox_intrs(struct scsi_qla_host *ha)
{
uint32_t mb_int;
- qla4_8xxx_mbx_intr_enable(ha);
- mb_int = INT_ENABLE_FW_MB;
- writel(mb_int, &ha->qla4_83xx_reg->mbox_int);
- writel(0, &ha->qla4_83xx_reg->leg_int_mask);
+ if (!test_bit(AF_83XX_MBOX_INTR_ON, &ha->flags)) {
+ mb_int = INT_ENABLE_FW_MB;
+ writel(mb_int, &ha->qla4_83xx_reg->mbox_int);
+ writel(0, &ha->qla4_83xx_reg->leg_int_mask);
+ set_bit(AF_83XX_MBOX_INTR_ON, &ha->flags);
+ }
+}
- set_bit(AF_INTERRUPTS_ON, &ha->flags);
+
+void qla4_83xx_enable_intrs(struct scsi_qla_host *ha)
+{
+ qla4_83xx_enable_mbox_intrs(ha);
+ qla4_83xx_enable_iocb_intrs(ha);
}
+
void qla4_83xx_queue_mbox_cmd(struct scsi_qla_host *ha, uint32_t *mbx_cmd,
int incount)
{
diff --git a/drivers/scsi/qla4xxx/ql4_attr.c b/drivers/scsi/qla4xxx/ql4_attr.c
index 76819b71ada7..19ee55a6226c 100644
--- a/drivers/scsi/qla4xxx/ql4_attr.c
+++ b/drivers/scsi/qla4xxx/ql4_attr.c
@@ -74,16 +74,22 @@ qla4_8xxx_sysfs_write_fw_dump(struct file *filep, struct kobject *kobj,
}
break;
case 2:
- /* Reset HBA */
+ /* Reset HBA and collect FW dump */
ha->isp_ops->idc_lock(ha);
dev_state = qla4_8xxx_rd_direct(ha, QLA8XXX_CRB_DEV_STATE);
if (dev_state == QLA8XXX_DEV_READY) {
- ql4_printk(KERN_INFO, ha,
- "%s: Setting Need reset, reset_owner is 0x%x.\n",
- __func__, ha->func_num);
+ ql4_printk(KERN_INFO, ha, "%s: Setting Need reset\n",
+ __func__);
qla4_8xxx_wr_direct(ha, QLA8XXX_CRB_DEV_STATE,
QLA8XXX_DEV_NEED_RESET);
- set_bit(AF_8XXX_RST_OWNER, &ha->flags);
+ if (is_qla8022(ha) ||
+ (is_qla8032(ha) &&
+ qla4_83xx_can_perform_reset(ha))) {
+ set_bit(AF_8XXX_RST_OWNER, &ha->flags);
+ set_bit(AF_FW_RECOVERY, &ha->flags);
+ ql4_printk(KERN_INFO, ha, "%s: Reset owner is 0x%x\n",
+ __func__, ha->func_num);
+ }
} else
ql4_printk(KERN_INFO, ha,
"%s: Reset not performed as device state is 0x%x\n",
diff --git a/drivers/scsi/qla4xxx/ql4_def.h b/drivers/scsi/qla4xxx/ql4_def.h
index 329d553eae94..129f5dd02822 100644
--- a/drivers/scsi/qla4xxx/ql4_def.h
+++ b/drivers/scsi/qla4xxx/ql4_def.h
@@ -136,6 +136,7 @@
#define RESPONSE_QUEUE_DEPTH 64
#define QUEUE_SIZE 64
#define DMA_BUFFER_SIZE 512
+#define IOCB_HIWAT_CUSHION 4
/*
* Misc
@@ -180,6 +181,7 @@
#define DISABLE_ACB_TOV 30
#define IP_CONFIG_TOV 30
#define LOGIN_TOV 12
+#define BOOT_LOGIN_RESP_TOV 60
#define MAX_RESET_HA_RETRIES 2
#define FW_ALIVE_WAIT_TOV 3
@@ -314,6 +316,7 @@ struct ql4_tuple_ddb {
* DDB flags.
*/
#define DF_RELOGIN 0 /* Relogin to device */
+#define DF_BOOT_TGT 1 /* Boot target entry */
#define DF_ISNS_DISCOVERED 2 /* Device was discovered via iSNS */
#define DF_FO_MASKED 3
@@ -501,6 +504,7 @@ struct scsi_qla_host {
#define AF_INTERRUPTS_ON 6 /* 0x00000040 */
#define AF_GET_CRASH_RECORD 7 /* 0x00000080 */
#define AF_LINK_UP 8 /* 0x00000100 */
+#define AF_LOOPBACK 9 /* 0x00000200 */
#define AF_IRQ_ATTACHED 10 /* 0x00000400 */
#define AF_DISABLE_ACB_COMPLETE 11 /* 0x00000800 */
#define AF_HA_REMOVAL 12 /* 0x00001000 */
@@ -516,6 +520,8 @@ struct scsi_qla_host {
#define AF_8XXX_RST_OWNER 25 /* 0x02000000 */
#define AF_82XX_DUMP_READING 26 /* 0x04000000 */
#define AF_83XX_NO_FW_DUMP 27 /* 0x08000000 */
+#define AF_83XX_IOCB_INTR_ON 28 /* 0x10000000 */
+#define AF_83XX_MBOX_INTR_ON 29 /* 0x20000000 */
unsigned long dpc_flags;
@@ -537,6 +543,7 @@ struct scsi_qla_host {
uint32_t tot_ddbs;
uint16_t iocb_cnt;
+ uint16_t iocb_hiwat;
/* SRB cache. */
#define SRB_MIN_REQ 128
@@ -838,7 +845,8 @@ static inline int is_aer_supported(struct scsi_qla_host *ha)
static inline int adapter_up(struct scsi_qla_host *ha)
{
return (test_bit(AF_ONLINE, &ha->flags) != 0) &&
- (test_bit(AF_LINK_UP, &ha->flags) != 0);
+ (test_bit(AF_LINK_UP, &ha->flags) != 0) &&
+ (!test_bit(AF_LOOPBACK, &ha->flags));
}
static inline struct scsi_qla_host* to_qla_host(struct Scsi_Host *shost)
diff --git a/drivers/scsi/qla4xxx/ql4_fw.h b/drivers/scsi/qla4xxx/ql4_fw.h
index 1c4795020357..ad9d2e2d370f 100644
--- a/drivers/scsi/qla4xxx/ql4_fw.h
+++ b/drivers/scsi/qla4xxx/ql4_fw.h
@@ -495,7 +495,7 @@ struct qla_flt_region {
#define MBOX_ASTS_IPV6_LCL_PREFIX_IGNORED 0x802D
#define MBOX_ASTS_ICMPV6_ERROR_MSG_RCVD 0x802E
#define MBOX_ASTS_IDC_COMPLETE 0x8100
-#define MBOX_ASTS_IDC_NOTIFY 0x8101
+#define MBOX_ASTS_IDC_REQUEST_NOTIFICATION 0x8101
#define MBOX_ASTS_TXSCVR_INSERTED 0x8130
#define MBOX_ASTS_TXSCVR_REMOVED 0x8131
@@ -522,6 +522,10 @@ struct qla_flt_region {
#define FLASH_OPT_COMMIT 2
#define FLASH_OPT_RMW_COMMIT 3
+/* Loopback type */
+#define ENABLE_INTERNAL_LOOPBACK 0x04
+#define ENABLE_EXTERNAL_LOOPBACK 0x08
+
/*************************************************************************/
/* Host Adapter Initialization Control Block (from host) */
diff --git a/drivers/scsi/qla4xxx/ql4_glbl.h b/drivers/scsi/qla4xxx/ql4_glbl.h
index 57a5a3cf5770..982293edf02c 100644
--- a/drivers/scsi/qla4xxx/ql4_glbl.h
+++ b/drivers/scsi/qla4xxx/ql4_glbl.h
@@ -253,12 +253,14 @@ void qla4_8xxx_set_rst_ready(struct scsi_qla_host *ha);
void qla4_8xxx_clear_rst_ready(struct scsi_qla_host *ha);
int qla4_8xxx_device_bootstrap(struct scsi_qla_host *ha);
void qla4_8xxx_get_minidump(struct scsi_qla_host *ha);
-int qla4_8xxx_mbx_intr_disable(struct scsi_qla_host *ha);
-int qla4_8xxx_mbx_intr_enable(struct scsi_qla_host *ha);
+int qla4_8xxx_intr_disable(struct scsi_qla_host *ha);
+int qla4_8xxx_intr_enable(struct scsi_qla_host *ha);
int qla4_8xxx_set_param(struct scsi_qla_host *ha, int param);
int qla4_8xxx_update_idc_reg(struct scsi_qla_host *ha);
int qla4_83xx_post_idc_ack(struct scsi_qla_host *ha);
void qla4_83xx_disable_pause(struct scsi_qla_host *ha);
+void qla4_83xx_enable_mbox_intrs(struct scsi_qla_host *ha);
+int qla4_83xx_can_perform_reset(struct scsi_qla_host *ha);
extern int ql4xextended_error_logging;
extern int ql4xdontresethba;
diff --git a/drivers/scsi/qla4xxx/ql4_init.c b/drivers/scsi/qla4xxx/ql4_init.c
index 1aca1b4f70b8..8fc8548ba4ba 100644
--- a/drivers/scsi/qla4xxx/ql4_init.c
+++ b/drivers/scsi/qla4xxx/ql4_init.c
@@ -195,12 +195,10 @@ exit_get_sys_info_no_free:
* @ha: pointer to host adapter structure.
*
**/
-static int qla4xxx_init_local_data(struct scsi_qla_host *ha)
+static void qla4xxx_init_local_data(struct scsi_qla_host *ha)
{
/* Initialize aen queue */
ha->aen_q_count = MAX_AEN_ENTRIES;
-
- return qla4xxx_get_firmware_status(ha);
}
static uint8_t
@@ -935,14 +933,23 @@ int qla4xxx_initialize_adapter(struct scsi_qla_host *ha, int is_reset)
if (ha->isp_ops->start_firmware(ha) == QLA_ERROR)
goto exit_init_hba;
+ /*
+ * For ISP83XX, mailbox and IOCB interrupts are enabled separately.
+ * Mailbox interrupts must be enabled prior to issuing any mailbox
+ * command in order to prevent the possibility of losing interrupts
+ * while switching from polling to interrupt mode. IOCB interrupts are
+ * enabled via isp_ops->enable_intrs.
+ */
+ if (is_qla8032(ha))
+ qla4_83xx_enable_mbox_intrs(ha);
+
if (qla4xxx_about_firmware(ha) == QLA_ERROR)
goto exit_init_hba;
if (ha->isp_ops->get_sys_info(ha) == QLA_ERROR)
goto exit_init_hba;
- if (qla4xxx_init_local_data(ha) == QLA_ERROR)
- goto exit_init_hba;
+ qla4xxx_init_local_data(ha);
status = qla4xxx_init_firmware(ha);
if (status == QLA_ERROR)
diff --git a/drivers/scsi/qla4xxx/ql4_iocb.c b/drivers/scsi/qla4xxx/ql4_iocb.c
index f48f37a281d1..14fec976f634 100644
--- a/drivers/scsi/qla4xxx/ql4_iocb.c
+++ b/drivers/scsi/qla4xxx/ql4_iocb.c
@@ -316,7 +316,7 @@ int qla4xxx_send_command_to_isp(struct scsi_qla_host *ha, struct srb * srb)
goto queuing_error;
/* total iocbs active */
- if ((ha->iocb_cnt + req_cnt) >= REQUEST_QUEUE_DEPTH)
+ if ((ha->iocb_cnt + req_cnt) >= ha->iocb_hiwat)
goto queuing_error;
/* Build command packet */
diff --git a/drivers/scsi/qla4xxx/ql4_isr.c b/drivers/scsi/qla4xxx/ql4_isr.c
index 15ea81465ce4..1b83dc283d2e 100644
--- a/drivers/scsi/qla4xxx/ql4_isr.c
+++ b/drivers/scsi/qla4xxx/ql4_isr.c
@@ -582,6 +582,33 @@ exit_prq_error:
}
/**
+ * qla4_83xx_loopback_in_progress: Is loopback in progress?
+ * @ha: Pointer to host adapter structure.
+ * @ret: 1 = loopback in progress, 0 = loopback not in progress
+ **/
+static int qla4_83xx_loopback_in_progress(struct scsi_qla_host *ha)
+{
+ int rval = 1;
+
+ if (is_qla8032(ha)) {
+ if ((ha->idc_info.info2 & ENABLE_INTERNAL_LOOPBACK) ||
+ (ha->idc_info.info2 & ENABLE_EXTERNAL_LOOPBACK)) {
+ DEBUG2(ql4_printk(KERN_INFO, ha,
+ "%s: Loopback diagnostics in progress\n",
+ __func__));
+ rval = 1;
+ } else {
+ DEBUG2(ql4_printk(KERN_INFO, ha,
+ "%s: Loopback diagnostics not in progress\n",
+ __func__));
+ rval = 0;
+ }
+ }
+
+ return rval;
+}
+
+/**
* qla4xxx_isr_decode_mailbox - decodes mailbox status
* @ha: Pointer to host adapter structure.
* @mailbox_status: Mailbox status.
@@ -676,8 +703,10 @@ static void qla4xxx_isr_decode_mailbox(struct scsi_qla_host * ha,
case MBOX_ASTS_LINK_DOWN:
clear_bit(AF_LINK_UP, &ha->flags);
- if (test_bit(AF_INIT_DONE, &ha->flags))
+ if (test_bit(AF_INIT_DONE, &ha->flags)) {
set_bit(DPC_LINK_CHANGED, &ha->dpc_flags);
+ qla4xxx_wake_dpc(ha);
+ }
ql4_printk(KERN_INFO, ha, "%s: LINK DOWN\n", __func__);
qla4xxx_post_aen_work(ha, ISCSI_EVENT_LINKDOWN,
@@ -806,7 +835,7 @@ static void qla4xxx_isr_decode_mailbox(struct scsi_qla_host * ha,
" removed\n", ha->host_no, mbox_sts[0]));
break;
- case MBOX_ASTS_IDC_NOTIFY:
+ case MBOX_ASTS_IDC_REQUEST_NOTIFICATION:
{
uint32_t opcode;
if (is_qla8032(ha)) {
@@ -840,6 +869,11 @@ static void qla4xxx_isr_decode_mailbox(struct scsi_qla_host * ha,
DEBUG2(ql4_printk(KERN_INFO, ha,
"scsi:%ld: AEN %04x IDC Complete notification\n",
ha->host_no, mbox_sts[0]));
+
+ if (qla4_83xx_loopback_in_progress(ha))
+ set_bit(AF_LOOPBACK, &ha->flags);
+ else
+ clear_bit(AF_LOOPBACK, &ha->flags);
}
break;
@@ -1124,17 +1158,18 @@ irqreturn_t qla4_83xx_intr_handler(int irq, void *dev_id)
/* Legacy interrupt is valid if bit31 of leg_int_ptr is set */
if (!(leg_int_ptr & LEG_INT_PTR_B31)) {
- ql4_printk(KERN_ERR, ha,
- "%s: Legacy Interrupt Bit 31 not set, spurious interrupt!\n",
- __func__);
+ DEBUG2(ql4_printk(KERN_ERR, ha,
+ "%s: Legacy Interrupt Bit 31 not set, spurious interrupt!\n",
+ __func__));
return IRQ_NONE;
}
/* Validate the PCIE function ID set in leg_int_ptr bits [19..16] */
if ((leg_int_ptr & PF_BITS_MASK) != ha->pf_bit) {
- ql4_printk(KERN_ERR, ha,
- "%s: Incorrect function ID 0x%x in legacy interrupt register, ha->pf_bit = 0x%x\n",
- __func__, (leg_int_ptr & PF_BITS_MASK), ha->pf_bit);
+ DEBUG2(ql4_printk(KERN_ERR, ha,
+ "%s: Incorrect function ID 0x%x in legacy interrupt register, ha->pf_bit = 0x%x\n",
+ __func__, (leg_int_ptr & PF_BITS_MASK),
+ ha->pf_bit));
return IRQ_NONE;
}
@@ -1437,11 +1472,14 @@ irq_not_attached:
void qla4xxx_free_irqs(struct scsi_qla_host *ha)
{
- if (test_bit(AF_MSIX_ENABLED, &ha->flags))
- qla4_8xxx_disable_msix(ha);
- else if (test_and_clear_bit(AF_MSI_ENABLED, &ha->flags)) {
- free_irq(ha->pdev->irq, ha);
- pci_disable_msi(ha->pdev);
- } else if (test_and_clear_bit(AF_INTx_ENABLED, &ha->flags))
- free_irq(ha->pdev->irq, ha);
+ if (test_and_clear_bit(AF_IRQ_ATTACHED, &ha->flags)) {
+ if (test_bit(AF_MSIX_ENABLED, &ha->flags)) {
+ qla4_8xxx_disable_msix(ha);
+ } else if (test_and_clear_bit(AF_MSI_ENABLED, &ha->flags)) {
+ free_irq(ha->pdev->irq, ha);
+ pci_disable_msi(ha->pdev);
+ } else if (test_and_clear_bit(AF_INTx_ENABLED, &ha->flags)) {
+ free_irq(ha->pdev->irq, ha);
+ }
+ }
}
diff --git a/drivers/scsi/qla4xxx/ql4_mbx.c b/drivers/scsi/qla4xxx/ql4_mbx.c
index 3d41034191f0..160d33697216 100644
--- a/drivers/scsi/qla4xxx/ql4_mbx.c
+++ b/drivers/scsi/qla4xxx/ql4_mbx.c
@@ -44,6 +44,30 @@ void qla4xxx_process_mbox_intr(struct scsi_qla_host *ha, int out_count)
}
/**
+ * qla4xxx_is_intr_poll_mode – Are we allowed to poll for interrupts?
+ * @ha: Pointer to host adapter structure.
+ * @ret: 1=polling mode, 0=non-polling mode
+ **/
+static int qla4xxx_is_intr_poll_mode(struct scsi_qla_host *ha)
+{
+ int rval = 1;
+
+ if (is_qla8032(ha)) {
+ if (test_bit(AF_IRQ_ATTACHED, &ha->flags) &&
+ test_bit(AF_83XX_MBOX_INTR_ON, &ha->flags))
+ rval = 0;
+ } else {
+ if (test_bit(AF_IRQ_ATTACHED, &ha->flags) &&
+ test_bit(AF_INTERRUPTS_ON, &ha->flags) &&
+ test_bit(AF_ONLINE, &ha->flags) &&
+ !test_bit(AF_HA_REMOVAL, &ha->flags))
+ rval = 0;
+ }
+
+ return rval;
+}
+
+/**
* qla4xxx_mailbox_command - issues mailbox commands
* @ha: Pointer to host adapter structure.
* @inCount: number of mailbox registers to load.
@@ -153,33 +177,28 @@ int qla4xxx_mailbox_command(struct scsi_qla_host *ha, uint8_t inCount,
/*
* Wait for completion: Poll or completion queue
*/
- if (test_bit(AF_IRQ_ATTACHED, &ha->flags) &&
- test_bit(AF_INTERRUPTS_ON, &ha->flags) &&
- test_bit(AF_ONLINE, &ha->flags) &&
- !test_bit(AF_HA_REMOVAL, &ha->flags)) {
- /* Do not poll for completion. Use completion queue */
- set_bit(AF_MBOX_COMMAND_NOPOLL, &ha->flags);
- wait_for_completion_timeout(&ha->mbx_intr_comp, MBOX_TOV * HZ);
- clear_bit(AF_MBOX_COMMAND_NOPOLL, &ha->flags);
- } else {
+ if (qla4xxx_is_intr_poll_mode(ha)) {
/* Poll for command to complete */
wait_count = jiffies + MBOX_TOV * HZ;
while (test_bit(AF_MBOX_COMMAND_DONE, &ha->flags) == 0) {
if (time_after_eq(jiffies, wait_count))
break;
-
/*
* Service the interrupt.
* The ISR will save the mailbox status registers
* to a temporary storage location in the adapter
* structure.
*/
-
spin_lock_irqsave(&ha->hardware_lock, flags);
ha->isp_ops->process_mailbox_interrupt(ha, outCount);
spin_unlock_irqrestore(&ha->hardware_lock, flags);
msleep(10);
}
+ } else {
+ /* Do not poll for completion. Use completion queue */
+ set_bit(AF_MBOX_COMMAND_NOPOLL, &ha->flags);
+ wait_for_completion_timeout(&ha->mbx_intr_comp, MBOX_TOV * HZ);
+ clear_bit(AF_MBOX_COMMAND_NOPOLL, &ha->flags);
}
/* Check for mailbox timeout. */
@@ -678,8 +697,24 @@ int qla4xxx_get_firmware_status(struct scsi_qla_host * ha)
return QLA_ERROR;
}
- ql4_printk(KERN_INFO, ha, "%ld firmware IOCBs available (%d).\n",
- ha->host_no, mbox_sts[2]);
+ /* High-water mark of IOCBs */
+ ha->iocb_hiwat = mbox_sts[2];
+ DEBUG2(ql4_printk(KERN_INFO, ha,
+ "%s: firmware IOCBs available = %d\n", __func__,
+ ha->iocb_hiwat));
+
+ if (ha->iocb_hiwat > IOCB_HIWAT_CUSHION)
+ ha->iocb_hiwat -= IOCB_HIWAT_CUSHION;
+
+ /* Ideally, we should not enter this code, as the # of firmware
+ * IOCBs is hard-coded in the firmware. We set a default
+ * iocb_hiwat here just in case */
+ if (ha->iocb_hiwat == 0) {
+ ha->iocb_hiwat = REQUEST_QUEUE_DEPTH / 4;
+ DEBUG2(ql4_printk(KERN_WARNING, ha,
+ "%s: Setting IOCB's to = %d\n", __func__,
+ ha->iocb_hiwat));
+ }
return QLA_SUCCESS;
}
@@ -1385,10 +1420,8 @@ int qla4xxx_get_chap(struct scsi_qla_host *ha, char *username, char *password,
dma_addr_t chap_dma;
chap_table = dma_pool_alloc(ha->chap_dma_pool, GFP_KERNEL, &chap_dma);
- if (chap_table == NULL) {
- ret = -ENOMEM;
- goto exit_get_chap;
- }
+ if (chap_table == NULL)
+ return -ENOMEM;
chap_size = sizeof(struct ql4_chap_table);
memset(chap_table, 0, chap_size);
diff --git a/drivers/scsi/qla4xxx/ql4_nx.c b/drivers/scsi/qla4xxx/ql4_nx.c
index 499a92db1cf6..71d3d234f526 100644
--- a/drivers/scsi/qla4xxx/ql4_nx.c
+++ b/drivers/scsi/qla4xxx/ql4_nx.c
@@ -2986,7 +2986,7 @@ int qla4_8xxx_load_risc(struct scsi_qla_host *ha)
retval = qla4_8xxx_device_state_handler(ha);
- if (retval == QLA_SUCCESS && !test_bit(AF_INIT_DONE, &ha->flags))
+ if (retval == QLA_SUCCESS && !test_bit(AF_IRQ_ATTACHED, &ha->flags))
retval = qla4xxx_request_irqs(ha);
return retval;
@@ -3427,11 +3427,11 @@ int qla4_8xxx_get_sys_info(struct scsi_qla_host *ha)
}
/* Make sure we receive the minimum required data to cache internally */
- if (mbox_sts[4] < offsetof(struct mbx_sys_info, reserved)) {
+ if ((is_qla8032(ha) ? mbox_sts[3] : mbox_sts[4]) <
+ offsetof(struct mbx_sys_info, reserved)) {
DEBUG2(printk("scsi%ld: %s: GET_SYS_INFO data receive"
" error (%x)\n", ha->host_no, __func__, mbox_sts[4]));
goto exit_validate_mac82;
-
}
/* Save M.A.C. address & serial_number */
@@ -3463,7 +3463,7 @@ exit_validate_mac82:
/* Interrupt handling helpers. */
-int qla4_8xxx_mbx_intr_enable(struct scsi_qla_host *ha)
+int qla4_8xxx_intr_enable(struct scsi_qla_host *ha)
{
uint32_t mbox_cmd[MBOX_REG_COUNT];
uint32_t mbox_sts[MBOX_REG_COUNT];
@@ -3484,7 +3484,7 @@ int qla4_8xxx_mbx_intr_enable(struct scsi_qla_host *ha)
return QLA_SUCCESS;
}
-int qla4_8xxx_mbx_intr_disable(struct scsi_qla_host *ha)
+int qla4_8xxx_intr_disable(struct scsi_qla_host *ha)
{
uint32_t mbox_cmd[MBOX_REG_COUNT];
uint32_t mbox_sts[MBOX_REG_COUNT];
@@ -3509,7 +3509,7 @@ int qla4_8xxx_mbx_intr_disable(struct scsi_qla_host *ha)
void
qla4_82xx_enable_intrs(struct scsi_qla_host *ha)
{
- qla4_8xxx_mbx_intr_enable(ha);
+ qla4_8xxx_intr_enable(ha);
spin_lock_irq(&ha->hardware_lock);
/* BIT 10 - reset */
@@ -3522,7 +3522,7 @@ void
qla4_82xx_disable_intrs(struct scsi_qla_host *ha)
{
if (test_and_clear_bit(AF_INTERRUPTS_ON, &ha->flags))
- qla4_8xxx_mbx_intr_disable(ha);
+ qla4_8xxx_intr_disable(ha);
spin_lock_irq(&ha->hardware_lock);
/* BIT 10 - set */
diff --git a/drivers/scsi/qla4xxx/ql4_os.c b/drivers/scsi/qla4xxx/ql4_os.c
index 4cec123a6a6a..6142729167f4 100644
--- a/drivers/scsi/qla4xxx/ql4_os.c
+++ b/drivers/scsi/qla4xxx/ql4_os.c
@@ -1337,18 +1337,18 @@ static int qla4xxx_session_get_param(struct iscsi_cls_session *cls_sess,
sess->password_in, BIDI_CHAP,
&idx);
if (rval)
- return -EINVAL;
-
- len = sprintf(buf, "%hu\n", idx);
+ len = sprintf(buf, "\n");
+ else
+ len = sprintf(buf, "%hu\n", idx);
break;
case ISCSI_PARAM_CHAP_OUT_IDX:
rval = qla4xxx_get_chap_index(ha, sess->username,
sess->password, LOCAL_CHAP,
&idx);
if (rval)
- return -EINVAL;
-
- len = sprintf(buf, "%hu\n", idx);
+ len = sprintf(buf, "\n");
+ else
+ len = sprintf(buf, "%hu\n", idx);
break;
default:
return iscsi_session_get_param(cls_sess, param, buf);
@@ -2242,6 +2242,7 @@ static int qla4xxx_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *cmd)
test_bit(DPC_HA_NEED_QUIESCENT, &ha->dpc_flags) ||
!test_bit(AF_ONLINE, &ha->flags) ||
!test_bit(AF_LINK_UP, &ha->flags) ||
+ test_bit(AF_LOOPBACK, &ha->flags) ||
test_bit(DPC_RESET_HA_FW_CONTEXT, &ha->dpc_flags))
goto qc_host_busy;
@@ -2978,6 +2979,7 @@ static int qla4xxx_recover_adapter(struct scsi_qla_host *ha)
if (status == QLA_SUCCESS) {
if (!test_bit(AF_FW_RECOVERY, &ha->flags))
qla4xxx_cmd_wait(ha);
+
ha->isp_ops->disable_intrs(ha);
qla4xxx_process_aen(ha, FLUSH_DDB_CHANGED_AENS);
qla4xxx_abort_active_cmds(ha, DID_RESET << 16);
@@ -3479,7 +3481,8 @@ dpc_post_reset_ha:
}
/* ---- link change? --- */
- if (test_and_clear_bit(DPC_LINK_CHANGED, &ha->dpc_flags)) {
+ if (!test_bit(AF_LOOPBACK, &ha->flags) &&
+ test_and_clear_bit(DPC_LINK_CHANGED, &ha->dpc_flags)) {
if (!test_bit(AF_LINK_UP, &ha->flags)) {
/* ---- link down? --- */
qla4xxx_mark_all_devices_missing(ha);
@@ -3508,10 +3511,8 @@ static void qla4xxx_free_adapter(struct scsi_qla_host *ha)
{
qla4xxx_abort_active_cmds(ha, DID_NO_CONNECT << 16);
- if (test_bit(AF_INTERRUPTS_ON, &ha->flags)) {
- /* Turn-off interrupts on the card. */
- ha->isp_ops->disable_intrs(ha);
- }
+ /* Turn-off interrupts on the card. */
+ ha->isp_ops->disable_intrs(ha);
if (is_qla40XX(ha)) {
writel(set_rmask(CSR_SCSI_PROCESSOR_INTR),
@@ -3547,8 +3548,7 @@ static void qla4xxx_free_adapter(struct scsi_qla_host *ha)
}
/* Detach interrupts */
- if (test_and_clear_bit(AF_IRQ_ATTACHED, &ha->flags))
- qla4xxx_free_irqs(ha);
+ qla4xxx_free_irqs(ha);
/* free extra memory */
qla4xxx_mem_free(ha);
@@ -4687,7 +4687,8 @@ static struct iscsi_endpoint *qla4xxx_get_ep_fwdb(struct scsi_qla_host *ha,
struct iscsi_endpoint *ep;
struct sockaddr_in *addr;
struct sockaddr_in6 *addr6;
- struct sockaddr *dst_addr;
+ struct sockaddr *t_addr;
+ struct sockaddr_storage *dst_addr;
char *ip;
/* TODO: need to destroy on unload iscsi_endpoint*/
@@ -4696,21 +4697,23 @@ static struct iscsi_endpoint *qla4xxx_get_ep_fwdb(struct scsi_qla_host *ha,
return NULL;
if (fw_ddb_entry->options & DDB_OPT_IPV6_DEVICE) {
- dst_addr->sa_family = AF_INET6;
+ t_addr = (struct sockaddr *)dst_addr;
+ t_addr->sa_family = AF_INET6;
addr6 = (struct sockaddr_in6 *)dst_addr;
ip = (char *)&addr6->sin6_addr;
memcpy(ip, fw_ddb_entry->ip_addr, IPv6_ADDR_LEN);
addr6->sin6_port = htons(le16_to_cpu(fw_ddb_entry->port));
} else {
- dst_addr->sa_family = AF_INET;
+ t_addr = (struct sockaddr *)dst_addr;
+ t_addr->sa_family = AF_INET;
addr = (struct sockaddr_in *)dst_addr;
ip = (char *)&addr->sin_addr;
memcpy(ip, fw_ddb_entry->ip_addr, IP_ADDR_LEN);
addr->sin_port = htons(le16_to_cpu(fw_ddb_entry->port));
}
- ep = qla4xxx_ep_connect(ha->host, dst_addr, 0);
+ ep = qla4xxx_ep_connect(ha->host, (struct sockaddr *)dst_addr, 0);
vfree(dst_addr);
return ep;
}
@@ -4725,7 +4728,8 @@ static int qla4xxx_verify_boot_idx(struct scsi_qla_host *ha, uint16_t idx)
}
static void qla4xxx_setup_flash_ddb_entry(struct scsi_qla_host *ha,
- struct ddb_entry *ddb_entry)
+ struct ddb_entry *ddb_entry,
+ uint16_t idx)
{
uint16_t def_timeout;
@@ -4745,6 +4749,10 @@ static void qla4xxx_setup_flash_ddb_entry(struct scsi_qla_host *ha,
def_timeout : LOGIN_TOV;
ddb_entry->default_time2wait =
le16_to_cpu(ddb_entry->fw_ddb_entry.iscsi_def_time2wait);
+
+ if (ql4xdisablesysfsboot &&
+ (idx == ha->pri_ddb_idx || idx == ha->sec_ddb_idx))
+ set_bit(DF_BOOT_TGT, &ddb_entry->flags);
}
static void qla4xxx_wait_for_ip_configuration(struct scsi_qla_host *ha)
@@ -4881,7 +4889,7 @@ static void qla4xxx_remove_failed_ddb(struct scsi_qla_host *ha,
static int qla4xxx_sess_conn_setup(struct scsi_qla_host *ha,
struct dev_db_entry *fw_ddb_entry,
- int is_reset)
+ int is_reset, uint16_t idx)
{
struct iscsi_cls_session *cls_sess;
struct iscsi_session *sess;
@@ -4919,7 +4927,7 @@ static int qla4xxx_sess_conn_setup(struct scsi_qla_host *ha,
memcpy(&ddb_entry->fw_ddb_entry, fw_ddb_entry,
sizeof(struct dev_db_entry));
- qla4xxx_setup_flash_ddb_entry(ha, ddb_entry);
+ qla4xxx_setup_flash_ddb_entry(ha, ddb_entry, idx);
cls_conn = iscsi_conn_setup(cls_sess, sizeof(struct qla_conn), conn_id);
@@ -5036,7 +5044,7 @@ static void qla4xxx_build_nt_list(struct scsi_qla_host *ha,
goto continue_next_nt;
}
- ret = qla4xxx_sess_conn_setup(ha, fw_ddb_entry, is_reset);
+ ret = qla4xxx_sess_conn_setup(ha, fw_ddb_entry, is_reset, idx);
if (ret == QLA_ERROR)
goto exit_nt_list;
@@ -5116,6 +5124,78 @@ void qla4xxx_build_ddb_list(struct scsi_qla_host *ha, int is_reset)
}
/**
+ * qla4xxx_wait_login_resp_boot_tgt - Wait for iSCSI boot target login
+ * response.
+ * @ha: pointer to adapter structure
+ *
+ * When the boot entry is normal iSCSI target then DF_BOOT_TGT flag will be
+ * set in DDB and we will wait for login response of boot targets during
+ * probe.
+ **/
+static void qla4xxx_wait_login_resp_boot_tgt(struct scsi_qla_host *ha)
+{
+ struct ddb_entry *ddb_entry;
+ struct dev_db_entry *fw_ddb_entry = NULL;
+ dma_addr_t fw_ddb_entry_dma;
+ unsigned long wtime;
+ uint32_t ddb_state;
+ int max_ddbs, idx, ret;
+
+ max_ddbs = is_qla40XX(ha) ? MAX_DEV_DB_ENTRIES_40XX :
+ MAX_DEV_DB_ENTRIES;
+
+ fw_ddb_entry = dma_alloc_coherent(&ha->pdev->dev, sizeof(*fw_ddb_entry),
+ &fw_ddb_entry_dma, GFP_KERNEL);
+ if (!fw_ddb_entry) {
+ ql4_printk(KERN_ERR, ha,
+ "%s: Unable to allocate dma buffer\n", __func__);
+ goto exit_login_resp;
+ }
+
+ wtime = jiffies + (HZ * BOOT_LOGIN_RESP_TOV);
+
+ for (idx = 0; idx < max_ddbs; idx++) {
+ ddb_entry = qla4xxx_lookup_ddb_by_fw_index(ha, idx);
+ if (ddb_entry == NULL)
+ continue;
+
+ if (test_bit(DF_BOOT_TGT, &ddb_entry->flags)) {
+ DEBUG2(ql4_printk(KERN_INFO, ha,
+ "%s: DDB index [%d]\n", __func__,
+ ddb_entry->fw_ddb_index));
+ do {
+ ret = qla4xxx_get_fwddb_entry(ha,
+ ddb_entry->fw_ddb_index,
+ fw_ddb_entry, fw_ddb_entry_dma,
+ NULL, NULL, &ddb_state, NULL,
+ NULL, NULL);
+ if (ret == QLA_ERROR)
+ goto exit_login_resp;
+
+ if ((ddb_state == DDB_DS_SESSION_ACTIVE) ||
+ (ddb_state == DDB_DS_SESSION_FAILED))
+ break;
+
+ schedule_timeout_uninterruptible(HZ);
+
+ } while ((time_after(wtime, jiffies)));
+
+ if (!time_after(wtime, jiffies)) {
+ DEBUG2(ql4_printk(KERN_INFO, ha,
+ "%s: Login response wait timer expired\n",
+ __func__));
+ goto exit_login_resp;
+ }
+ }
+ }
+
+exit_login_resp:
+ if (fw_ddb_entry)
+ dma_free_coherent(&ha->pdev->dev, sizeof(*fw_ddb_entry),
+ fw_ddb_entry, fw_ddb_entry_dma);
+}
+
+/**
* qla4xxx_probe_adapter - callback function to probe HBA
* @pdev: pointer to pci_dev structure
* @pci_device_id: pointer to pci_device entry
@@ -5270,7 +5350,7 @@ static int qla4xxx_probe_adapter(struct pci_dev *pdev,
if (is_qla80XX(ha)) {
ha->isp_ops->idc_lock(ha);
dev_state = qla4_8xxx_rd_direct(ha,
- QLA82XX_CRB_DEV_STATE);
+ QLA8XXX_CRB_DEV_STATE);
ha->isp_ops->idc_unlock(ha);
if (dev_state == QLA8XXX_DEV_FAILED) {
ql4_printk(KERN_WARNING, ha, "%s: don't retry "
@@ -5368,6 +5448,7 @@ skip_retry_init:
/* Perform the build ddb list and login to each */
qla4xxx_build_ddb_list(ha, INIT_ADAPTER);
iscsi_host_for_each_session(ha->host, qla4xxx_login_flash_ddb);
+ qla4xxx_wait_login_resp_boot_tgt(ha);
qla4xxx_create_chap_list(ha);
@@ -6008,14 +6089,6 @@ static int qla4xxx_host_reset(struct Scsi_Host *shost, int reset_type)
goto exit_host_reset;
}
- rval = qla4xxx_wait_for_hba_online(ha);
- if (rval != QLA_SUCCESS) {
- DEBUG2(ql4_printk(KERN_INFO, ha, "%s: Unable to reset host "
- "adapter\n", __func__));
- rval = -EIO;
- goto exit_host_reset;
- }
-
if (test_bit(DPC_RESET_HA, &ha->dpc_flags))
goto recover_adapter;
@@ -6115,7 +6188,6 @@ qla4xxx_pci_mmio_enabled(struct pci_dev *pdev)
static uint32_t qla4_8xxx_error_recovery(struct scsi_qla_host *ha)
{
uint32_t rval = QLA_ERROR;
- uint32_t ret = 0;
int fn;
struct pci_dev *other_pdev = NULL;
@@ -6201,16 +6273,7 @@ static uint32_t qla4_8xxx_error_recovery(struct scsi_qla_host *ha)
qla4_8xxx_wr_direct(ha, QLA8XXX_CRB_DRV_STATE, 0);
qla4_8xxx_set_drv_active(ha);
ha->isp_ops->idc_unlock(ha);
- ret = qla4xxx_request_irqs(ha);
- if (ret) {
- ql4_printk(KERN_WARNING, ha, "Failed to "
- "reserve interrupt %d already in use.\n",
- ha->pdev->irq);
- rval = QLA_ERROR;
- } else {
- ha->isp_ops->enable_intrs(ha);
- rval = QLA_SUCCESS;
- }
+ ha->isp_ops->enable_intrs(ha);
}
} else {
ql4_printk(KERN_INFO, ha, "scsi%ld: %s: devfn 0x%x is not "
@@ -6220,18 +6283,9 @@ static uint32_t qla4_8xxx_error_recovery(struct scsi_qla_host *ha)
QLA8XXX_DEV_READY)) {
clear_bit(AF_FW_RECOVERY, &ha->flags);
rval = qla4xxx_initialize_adapter(ha, RESET_ADAPTER);
- if (rval == QLA_SUCCESS) {
- ret = qla4xxx_request_irqs(ha);
- if (ret) {
- ql4_printk(KERN_WARNING, ha, "Failed to"
- " reserve interrupt %d already in"
- " use.\n", ha->pdev->irq);
- rval = QLA_ERROR;
- } else {
- ha->isp_ops->enable_intrs(ha);
- rval = QLA_SUCCESS;
- }
- }
+ if (rval == QLA_SUCCESS)
+ ha->isp_ops->enable_intrs(ha);
+
ha->isp_ops->idc_lock(ha);
qla4_8xxx_set_drv_active(ha);
ha->isp_ops->idc_unlock(ha);
diff --git a/drivers/scsi/qla4xxx/ql4_version.h b/drivers/scsi/qla4xxx/ql4_version.h
index f6df2ea91ab5..6775a45af315 100644
--- a/drivers/scsi/qla4xxx/ql4_version.h
+++ b/drivers/scsi/qla4xxx/ql4_version.h
@@ -5,4 +5,4 @@
* See LICENSE.qla4xxx for copyright and licensing details.
*/
-#define QLA4XXX_DRIVER_VERSION "5.03.00-k1"
+#define QLA4XXX_DRIVER_VERSION "5.03.00-k4"
diff --git a/drivers/scsi/scsi_lib.c b/drivers/scsi/scsi_lib.c
index f1bf5aff68ed..765398c063c7 100644
--- a/drivers/scsi/scsi_lib.c
+++ b/drivers/scsi/scsi_lib.c
@@ -2617,3 +2617,17 @@ void scsi_kunmap_atomic_sg(void *virt)
kunmap_atomic(virt);
}
EXPORT_SYMBOL(scsi_kunmap_atomic_sg);
+
+void sdev_disable_disk_events(struct scsi_device *sdev)
+{
+ atomic_inc(&sdev->disk_events_disable_depth);
+}
+EXPORT_SYMBOL(sdev_disable_disk_events);
+
+void sdev_enable_disk_events(struct scsi_device *sdev)
+{
+ if (WARN_ON_ONCE(atomic_read(&sdev->disk_events_disable_depth) <= 0))
+ return;
+ atomic_dec(&sdev->disk_events_disable_depth);
+}
+EXPORT_SYMBOL(sdev_enable_disk_events);
diff --git a/drivers/scsi/scsi_transport_iscsi.c b/drivers/scsi/scsi_transport_iscsi.c
index 59d427bf08e2..0a74b975efdf 100644
--- a/drivers/scsi/scsi_transport_iscsi.c
+++ b/drivers/scsi/scsi_transport_iscsi.c
@@ -2503,6 +2503,15 @@ show_priv_session_creator(struct device *dev, struct device_attribute *attr,
}
static ISCSI_CLASS_ATTR(priv_sess, creator, S_IRUGO, show_priv_session_creator,
NULL);
+static ssize_t
+show_priv_session_target_id(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct iscsi_cls_session *session = iscsi_dev_to_session(dev->parent);
+ return sprintf(buf, "%d\n", session->target_id);
+}
+static ISCSI_CLASS_ATTR(priv_sess, target_id, S_IRUGO,
+ show_priv_session_target_id, NULL);
#define iscsi_priv_session_attr_show(field, format) \
static ssize_t \
@@ -2575,6 +2584,7 @@ static struct attribute *iscsi_session_attrs[] = {
&dev_attr_priv_sess_creator.attr,
&dev_attr_sess_chap_out_idx.attr,
&dev_attr_sess_chap_in_idx.attr,
+ &dev_attr_priv_sess_target_id.attr,
NULL,
};
@@ -2638,6 +2648,8 @@ static umode_t iscsi_session_attr_is_visible(struct kobject *kobj,
return S_IRUGO;
else if (attr == &dev_attr_priv_sess_creator.attr)
return S_IRUGO;
+ else if (attr == &dev_attr_priv_sess_target_id.attr)
+ return S_IRUGO;
else {
WARN_ONCE(1, "Invalid session attr");
return 0;
diff --git a/drivers/scsi/sg.c b/drivers/scsi/sg.c
index be2c9a6561ff..9f0c46547459 100644
--- a/drivers/scsi/sg.c
+++ b/drivers/scsi/sg.c
@@ -1391,24 +1391,23 @@ static Sg_device *sg_alloc(struct gendisk *disk, struct scsi_device *scsidp)
return ERR_PTR(-ENOMEM);
}
- if (!idr_pre_get(&sg_index_idr, GFP_KERNEL)) {
- printk(KERN_WARNING "idr expansion Sg_device failure\n");
- error = -ENOMEM;
- goto out;
- }
-
+ idr_preload(GFP_KERNEL);
write_lock_irqsave(&sg_index_lock, iflags);
- error = idr_get_new(&sg_index_idr, sdp, &k);
- if (error) {
- write_unlock_irqrestore(&sg_index_lock, iflags);
- printk(KERN_WARNING "idr allocation Sg_device failure: %d\n",
- error);
- goto out;
+ error = idr_alloc(&sg_index_idr, sdp, 0, SG_MAX_DEVS, GFP_NOWAIT);
+ if (error < 0) {
+ if (error == -ENOSPC) {
+ sdev_printk(KERN_WARNING, scsidp,
+ "Unable to attach sg device type=%d, minor number exceeds %d\n",
+ scsidp->type, SG_MAX_DEVS - 1);
+ error = -ENODEV;
+ } else {
+ printk(KERN_WARNING
+ "idr allocation Sg_device failure: %d\n", error);
+ }
+ goto out_unlock;
}
-
- if (unlikely(k >= SG_MAX_DEVS))
- goto overflow;
+ k = error;
SCSI_LOG_TIMEOUT(3, printk("sg_alloc: dev=%d \n", k));
sprintf(disk->disk_name, "sg%d", k);
@@ -1420,25 +1419,17 @@ static Sg_device *sg_alloc(struct gendisk *disk, struct scsi_device *scsidp)
sdp->sg_tablesize = queue_max_segments(q);
sdp->index = k;
kref_init(&sdp->d_ref);
+ error = 0;
+out_unlock:
write_unlock_irqrestore(&sg_index_lock, iflags);
+ idr_preload_end();
- error = 0;
- out:
if (error) {
kfree(sdp);
return ERR_PTR(error);
}
return sdp;
-
- overflow:
- idr_remove(&sg_index_idr, k);
- write_unlock_irqrestore(&sg_index_lock, iflags);
- sdev_printk(KERN_WARNING, scsidp,
- "Unable to attach sg device type=%d, minor "
- "number exceeds %d\n", scsidp->type, SG_MAX_DEVS - 1);
- error = -ENODEV;
- goto out;
}
static int
diff --git a/drivers/scsi/sr.c b/drivers/scsi/sr.c
index 5fc97d2ba2fd..f2884ee90710 100644
--- a/drivers/scsi/sr.c
+++ b/drivers/scsi/sr.c
@@ -45,6 +45,7 @@
#include <linux/blkdev.h>
#include <linux/mutex.h>
#include <linux/slab.h>
+#include <linux/pm_runtime.h>
#include <asm/uaccess.h>
#include <scsi/scsi.h>
@@ -79,6 +80,11 @@ static DEFINE_MUTEX(sr_mutex);
static int sr_probe(struct device *);
static int sr_remove(struct device *);
static int sr_done(struct scsi_cmnd *);
+static int sr_runtime_suspend(struct device *dev);
+
+static struct dev_pm_ops sr_pm_ops = {
+ .runtime_suspend = sr_runtime_suspend,
+};
static struct scsi_driver sr_template = {
.owner = THIS_MODULE,
@@ -86,6 +92,7 @@ static struct scsi_driver sr_template = {
.name = "sr",
.probe = sr_probe,
.remove = sr_remove,
+ .pm = &sr_pm_ops,
},
.done = sr_done,
};
@@ -131,6 +138,16 @@ static inline struct scsi_cd *scsi_cd(struct gendisk *disk)
return container_of(disk->private_data, struct scsi_cd, driver);
}
+static int sr_runtime_suspend(struct device *dev)
+{
+ struct scsi_cd *cd = dev_get_drvdata(dev);
+
+ if (cd->media_present)
+ return -EBUSY;
+ else
+ return 0;
+}
+
/*
* The get and put routines for the struct scsi_cd. Note this entity
* has a scsi_device pointer and owns a reference to this.
@@ -146,7 +163,8 @@ static inline struct scsi_cd *scsi_cd_get(struct gendisk *disk)
kref_get(&cd->kref);
if (scsi_device_get(cd->device))
goto out_put;
- goto out;
+ if (!scsi_autopm_get_device(cd->device))
+ goto out;
out_put:
kref_put(&cd->kref, sr_kref_release);
@@ -162,6 +180,7 @@ static void scsi_cd_put(struct scsi_cd *cd)
mutex_lock(&sr_ref_mutex);
kref_put(&cd->kref, sr_kref_release);
+ scsi_autopm_put_device(sdev);
scsi_device_put(sdev);
mutex_unlock(&sr_ref_mutex);
}
@@ -540,6 +559,8 @@ static int sr_block_ioctl(struct block_device *bdev, fmode_t mode, unsigned cmd,
void __user *argp = (void __user *)arg;
int ret;
+ scsi_autopm_get_device(cd->device);
+
mutex_lock(&sr_mutex);
/*
@@ -571,6 +592,7 @@ static int sr_block_ioctl(struct block_device *bdev, fmode_t mode, unsigned cmd,
out:
mutex_unlock(&sr_mutex);
+ scsi_autopm_put_device(cd->device);
return ret;
}
@@ -578,7 +600,17 @@ static unsigned int sr_block_check_events(struct gendisk *disk,
unsigned int clearing)
{
struct scsi_cd *cd = scsi_cd(disk);
- return cdrom_check_events(&cd->cdi, clearing);
+ unsigned int ret;
+
+ if (atomic_read(&cd->device->disk_events_disable_depth) == 0) {
+ scsi_autopm_get_device(cd->device);
+ ret = cdrom_check_events(&cd->cdi, clearing);
+ scsi_autopm_put_device(cd->device);
+ } else {
+ ret = 0;
+ }
+
+ return ret;
}
static int sr_block_revalidate_disk(struct gendisk *disk)
@@ -586,12 +618,16 @@ static int sr_block_revalidate_disk(struct gendisk *disk)
struct scsi_cd *cd = scsi_cd(disk);
struct scsi_sense_hdr sshdr;
+ scsi_autopm_get_device(cd->device);
+
/* if the unit is not ready, nothing more to do */
if (scsi_test_unit_ready(cd->device, SR_TIMEOUT, MAX_RETRIES, &sshdr))
- return 0;
+ goto out;
sr_cd_check(&cd->cdi);
get_sectorsize(cd);
+out:
+ scsi_autopm_put_device(cd->device);
return 0;
}
@@ -718,6 +754,8 @@ static int sr_probe(struct device *dev)
sdev_printk(KERN_DEBUG, sdev,
"Attached scsi CD-ROM %s\n", cd->cdi.name);
+ scsi_autopm_put_device(cd->device);
+
return 0;
fail_put:
@@ -965,6 +1003,8 @@ static int sr_remove(struct device *dev)
{
struct scsi_cd *cd = dev_get_drvdata(dev);
+ scsi_autopm_get_device(cd->device);
+
blk_queue_prep_rq(cd->device->request_queue, scsi_prep_fn);
del_gendisk(cd->disk);
diff --git a/drivers/scsi/st.c b/drivers/scsi/st.c
index 98156a97c472..86974471af68 100644
--- a/drivers/scsi/st.c
+++ b/drivers/scsi/st.c
@@ -977,7 +977,7 @@ static int check_tape(struct scsi_tape *STp, struct file *filp)
struct st_modedef *STm;
struct st_partstat *STps;
char *name = tape_name(STp);
- struct inode *inode = filp->f_path.dentry->d_inode;
+ struct inode *inode = file_inode(filp);
int mode = TAPE_MODE(inode);
STp->ready = ST_READY;
@@ -4076,7 +4076,7 @@ static int st_probe(struct device *dev)
struct st_modedef *STm;
struct st_partstat *STps;
struct st_buffer *buffer;
- int i, dev_num, error;
+ int i, error;
char *stp;
if (SDp->type != TYPE_TAPE)
@@ -4178,27 +4178,17 @@ static int st_probe(struct device *dev)
tpnt->blksize_changed = 0;
mutex_init(&tpnt->lock);
- if (!idr_pre_get(&st_index_idr, GFP_KERNEL)) {
- pr_warn("st: idr expansion failed\n");
- error = -ENOMEM;
- goto out_put_disk;
- }
-
+ idr_preload(GFP_KERNEL);
spin_lock(&st_index_lock);
- error = idr_get_new(&st_index_idr, tpnt, &dev_num);
+ error = idr_alloc(&st_index_idr, tpnt, 0, ST_MAX_TAPES + 1, GFP_NOWAIT);
spin_unlock(&st_index_lock);
- if (error) {
+ idr_preload_end();
+ if (error < 0) {
pr_warn("st: idr allocation failed: %d\n", error);
goto out_put_disk;
}
-
- if (dev_num > ST_MAX_TAPES) {
- pr_err("st: Too many tape devices (max. %d).\n", ST_MAX_TAPES);
- goto out_put_index;
- }
-
- tpnt->index = dev_num;
- sprintf(disk->disk_name, "st%d", dev_num);
+ tpnt->index = error;
+ sprintf(disk->disk_name, "st%d", tpnt->index);
dev_set_drvdata(dev, tpnt);
@@ -4218,9 +4208,8 @@ static int st_probe(struct device *dev)
out_remove_devs:
remove_cdevs(tpnt);
-out_put_index:
spin_lock(&st_index_lock);
- idr_remove(&st_index_idr, dev_num);
+ idr_remove(&st_index_idr, tpnt->index);
spin_unlock(&st_index_lock);
out_put_disk:
put_disk(disk);
diff --git a/drivers/scsi/storvsc_drv.c b/drivers/scsi/storvsc_drv.c
index 01440782feb2..16a3a0cc9672 100644
--- a/drivers/scsi/storvsc_drv.c
+++ b/drivers/scsi/storvsc_drv.c
@@ -201,6 +201,7 @@ enum storvsc_request_type {
#define SRB_STATUS_AUTOSENSE_VALID 0x80
#define SRB_STATUS_INVALID_LUN 0x20
#define SRB_STATUS_SUCCESS 0x01
+#define SRB_STATUS_ABORTED 0x02
#define SRB_STATUS_ERROR 0x04
/*
@@ -295,6 +296,25 @@ struct storvsc_scan_work {
uint lun;
};
+static void storvsc_device_scan(struct work_struct *work)
+{
+ struct storvsc_scan_work *wrk;
+ uint lun;
+ struct scsi_device *sdev;
+
+ wrk = container_of(work, struct storvsc_scan_work, work);
+ lun = wrk->lun;
+
+ sdev = scsi_device_lookup(wrk->host, 0, 0, lun);
+ if (!sdev)
+ goto done;
+ scsi_rescan_device(&sdev->sdev_gendev);
+ scsi_device_put(sdev);
+
+done:
+ kfree(wrk);
+}
+
static void storvsc_bus_scan(struct work_struct *work)
{
struct storvsc_scan_work *wrk;
@@ -467,6 +487,7 @@ static struct scatterlist *create_bounce_buffer(struct scatterlist *sgl,
if (!bounce_sgl)
return NULL;
+ sg_init_table(bounce_sgl, num_pages);
for (i = 0; i < num_pages; i++) {
page_buf = alloc_page(GFP_ATOMIC);
if (!page_buf)
@@ -760,6 +781,66 @@ cleanup:
return ret;
}
+static void storvsc_handle_error(struct vmscsi_request *vm_srb,
+ struct scsi_cmnd *scmnd,
+ struct Scsi_Host *host,
+ u8 asc, u8 ascq)
+{
+ struct storvsc_scan_work *wrk;
+ void (*process_err_fn)(struct work_struct *work);
+ bool do_work = false;
+
+ switch (vm_srb->srb_status) {
+ case SRB_STATUS_ERROR:
+ /*
+ * If there is an error; offline the device since all
+ * error recovery strategies would have already been
+ * deployed on the host side. However, if the command
+ * were a pass-through command deal with it appropriately.
+ */
+ switch (scmnd->cmnd[0]) {
+ case ATA_16:
+ case ATA_12:
+ set_host_byte(scmnd, DID_PASSTHROUGH);
+ break;
+ default:
+ set_host_byte(scmnd, DID_TARGET_FAILURE);
+ }
+ break;
+ case SRB_STATUS_INVALID_LUN:
+ do_work = true;
+ process_err_fn = storvsc_remove_lun;
+ break;
+ case (SRB_STATUS_ABORTED | SRB_STATUS_AUTOSENSE_VALID):
+ if ((asc == 0x2a) && (ascq == 0x9)) {
+ do_work = true;
+ process_err_fn = storvsc_device_scan;
+ /*
+ * Retry the I/O that trigerred this.
+ */
+ set_host_byte(scmnd, DID_REQUEUE);
+ }
+ break;
+ }
+
+ if (!do_work)
+ return;
+
+ /*
+ * We need to schedule work to process this error; schedule it.
+ */
+ wrk = kmalloc(sizeof(struct storvsc_scan_work), GFP_ATOMIC);
+ if (!wrk) {
+ set_host_byte(scmnd, DID_TARGET_FAILURE);
+ return;
+ }
+
+ wrk->host = host;
+ wrk->lun = vm_srb->lun;
+ INIT_WORK(&wrk->work, process_err_fn);
+ schedule_work(&wrk->work);
+}
+
static void storvsc_command_completion(struct storvsc_cmd_request *cmd_request)
{
@@ -768,8 +849,13 @@ static void storvsc_command_completion(struct storvsc_cmd_request *cmd_request)
void (*scsi_done_fn)(struct scsi_cmnd *);
struct scsi_sense_hdr sense_hdr;
struct vmscsi_request *vm_srb;
- struct storvsc_scan_work *wrk;
struct stor_mem_pools *memp = scmnd->device->hostdata;
+ struct Scsi_Host *host;
+ struct storvsc_device *stor_dev;
+ struct hv_device *dev = host_dev->dev;
+
+ stor_dev = get_in_stor_device(dev);
+ host = stor_dev->host;
vm_srb = &cmd_request->vstor_packet.vm_srb;
if (cmd_request->bounce_sgl_count) {
@@ -782,55 +868,18 @@ static void storvsc_command_completion(struct storvsc_cmd_request *cmd_request)
cmd_request->bounce_sgl_count);
}
- /*
- * If there is an error; offline the device since all
- * error recovery strategies would have already been
- * deployed on the host side. However, if the command
- * were a pass-through command deal with it appropriately.
- */
scmnd->result = vm_srb->scsi_status;
- if (vm_srb->srb_status == SRB_STATUS_ERROR) {
- switch (scmnd->cmnd[0]) {
- case ATA_16:
- case ATA_12:
- set_host_byte(scmnd, DID_PASSTHROUGH);
- break;
- default:
- set_host_byte(scmnd, DID_TARGET_FAILURE);
- }
- }
-
-
- /*
- * If the LUN is invalid; remove the device.
- */
- if (vm_srb->srb_status == SRB_STATUS_INVALID_LUN) {
- struct storvsc_device *stor_dev;
- struct hv_device *dev = host_dev->dev;
- struct Scsi_Host *host;
-
- stor_dev = get_in_stor_device(dev);
- host = stor_dev->host;
-
- wrk = kmalloc(sizeof(struct storvsc_scan_work),
- GFP_ATOMIC);
- if (!wrk) {
- scmnd->result = DID_TARGET_FAILURE << 16;
- } else {
- wrk->host = host;
- wrk->lun = vm_srb->lun;
- INIT_WORK(&wrk->work, storvsc_remove_lun);
- schedule_work(&wrk->work);
- }
- }
-
if (scmnd->result) {
if (scsi_normalize_sense(scmnd->sense_buffer,
SCSI_SENSE_BUFFERSIZE, &sense_hdr))
scsi_print_sense_hdr("storvsc", &sense_hdr);
}
+ if (vm_srb->srb_status != SRB_STATUS_SUCCESS)
+ storvsc_handle_error(vm_srb, scmnd, host, sense_hdr.asc,
+ sense_hdr.ascq);
+
scsi_set_resid(scmnd,
cmd_request->data_buffer.len -
vm_srb->data_transfer_length);
@@ -1155,6 +1204,8 @@ static int storvsc_device_configure(struct scsi_device *sdevice)
blk_queue_bounce_limit(sdevice->request_queue, BLK_BOUNCE_ANY);
+ sdevice->no_write_same = 1;
+
return 0;
}
@@ -1237,6 +1288,8 @@ static bool storvsc_scsi_cmd_ok(struct scsi_cmnd *scmnd)
u8 scsi_op = scmnd->cmnd[0];
switch (scsi_op) {
+ /* the host does not handle WRITE_SAME, log accident usage */
+ case WRITE_SAME:
/*
* smartd sends this command and the host does not handle
* this. So, don't send it.
@@ -1410,13 +1463,13 @@ enum {
static const struct hv_vmbus_device_id id_table[] = {
/* SCSI guid */
- { VMBUS_DEVICE(0xd9, 0x63, 0x61, 0xba, 0xa1, 0x04, 0x29, 0x4d,
- 0xb6, 0x05, 0x72, 0xe2, 0xff, 0xb1, 0xdc, 0x7f)
- .driver_data = SCSI_GUID },
+ { HV_SCSI_GUID,
+ .driver_data = SCSI_GUID
+ },
/* IDE guid */
- { VMBUS_DEVICE(0x32, 0x26, 0x41, 0x32, 0xcb, 0x86, 0xa2, 0x44,
- 0x9b, 0x5c, 0x50, 0xd1, 0x41, 0x73, 0x54, 0xf5)
- .driver_data = IDE_GUID },
+ { HV_IDE_GUID,
+ .driver_data = IDE_GUID
+ },
{ },
};
diff --git a/drivers/scsi/ufs/Kconfig b/drivers/scsi/ufs/Kconfig
index 8f27f9d6f91d..0371047c5922 100644
--- a/drivers/scsi/ufs/Kconfig
+++ b/drivers/scsi/ufs/Kconfig
@@ -2,48 +2,58 @@
# Kernel configuration file for the UFS Host Controller
#
# This code is based on drivers/scsi/ufs/Kconfig
-# Copyright (C) 2011 Samsung Samsung India Software Operations
+# Copyright (C) 2011-2013 Samsung India Software Operations
+#
+# Authors:
+# Santosh Yaraganavi <santosh.sy@samsung.com>
+# Vinayak Holikatti <h.vinayak@samsung.com>
#
-# Santosh Yaraganavi <santosh.sy@samsung.com>
-# Vinayak Holikatti <h.vinayak@samsung.com>
-
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
-
+# See the COPYING file in the top-level directory or visit
+# <http://www.gnu.org/licenses/gpl-2.0.html>
+#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
+#
+# This program is provided "AS IS" and "WITH ALL FAULTS" and
+# without warranty of any kind. You are solely responsible for
+# determining the appropriateness of using and distributing
+# the program and assume all risks associated with your exercise
+# of rights with respect to the program, including but not limited
+# to infringement of third party rights, the risks and costs of
+# program errors, damage to or loss of data, programs or equipment,
+# and unavailability or interruption of operations. Under no
+# circumstances will the contributor of this Program be liable for
+# any damages of any kind arising from your use or distribution of
+# this program.
-# NO WARRANTY
-# THE PROGRAM IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OR
-# CONDITIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED INCLUDING, WITHOUT
-# LIMITATION, ANY WARRANTIES OR CONDITIONS OF TITLE, NON-INFRINGEMENT,
-# MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE. Each Recipient is
-# solely responsible for determining the appropriateness of using and
-# distributing the Program and assumes all risks associated with its
-# exercise of rights under this Agreement, including but not limited to
-# the risks and costs of program errors, damage to or loss of data,
-# programs or equipment, and unavailability or interruption of operations.
-
-# DISCLAIMER OF LIABILITY
-# NEITHER RECIPIENT NOR ANY CONTRIBUTORS SHALL HAVE ANY LIABILITY FOR ANY
-# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
-# DAMAGES (INCLUDING WITHOUT LIMITATION LOST PROFITS), HOWEVER CAUSED AND
-# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR
-# TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
-# USE OR DISTRIBUTION OF THE PROGRAM OR THE EXERCISE OF ANY RIGHTS GRANTED
-# HEREUNDER, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGES
+config SCSI_UFSHCD
+ tristate "Universal Flash Storage Controller Driver Core"
+ depends on SCSI
+ ---help---
+ This selects the support for UFS devices in Linux, say Y and make
+ sure that you know the name of your UFS host adapter (the card
+ inside your computer that "speaks" the UFS protocol, also
+ called UFS Host Controller), because you will be asked for it.
+ The module will be called ufshcd.
-# You should have received a copy of the GNU General Public License
-# along with this program; if not, write to the Free Software
-# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301,
-# USA.
+ To compile this driver as a module, choose M here and read
+ <file:Documentation/scsi/ufs.txt>.
+ However, do not compile this as a module if your root file system
+ (the one containing the directory /) is located on a UFS device.
-config SCSI_UFSHCD
- tristate "Universal Flash Storage host controller driver"
- depends on PCI && SCSI
+config SCSI_UFSHCD_PCI
+ tristate "PCI bus based UFS Controller support"
+ depends on SCSI_UFSHCD && PCI
---help---
- This is a generic driver which supports PCIe UFS Host controllers.
+ This selects the PCI UFS Host Controller Interface. Select this if
+ you have UFS Host Controller with PCI Interface.
+
+ If you have a controller with this interface, say Y or M here.
+
+ If unsure, say N.
diff --git a/drivers/scsi/ufs/Makefile b/drivers/scsi/ufs/Makefile
index adf7895a6a91..9eda0dfbd6df 100644
--- a/drivers/scsi/ufs/Makefile
+++ b/drivers/scsi/ufs/Makefile
@@ -1,2 +1,3 @@
# UFSHCD makefile
obj-$(CONFIG_SCSI_UFSHCD) += ufshcd.o
+obj-$(CONFIG_SCSI_UFSHCD_PCI) += ufshcd-pci.o
diff --git a/drivers/scsi/ufs/ufs.h b/drivers/scsi/ufs/ufs.h
index b207529f8d54..139bc0647b41 100644
--- a/drivers/scsi/ufs/ufs.h
+++ b/drivers/scsi/ufs/ufs.h
@@ -2,45 +2,35 @@
* Universal Flash Storage Host controller driver
*
* This code is based on drivers/scsi/ufs/ufs.h
- * Copyright (C) 2011-2012 Samsung India Software Operations
+ * Copyright (C) 2011-2013 Samsung India Software Operations
*
- * Santosh Yaraganavi <santosh.sy@samsung.com>
- * Vinayak Holikatti <h.vinayak@samsung.com>
+ * Authors:
+ * Santosh Yaraganavi <santosh.sy@samsung.com>
+ * Vinayak Holikatti <h.vinayak@samsung.com>
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version 2
* of the License, or (at your option) any later version.
+ * See the COPYING file in the top-level directory or visit
+ * <http://www.gnu.org/licenses/gpl-2.0.html>
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
- * NO WARRANTY
- * THE PROGRAM IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OR
- * CONDITIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED INCLUDING, WITHOUT
- * LIMITATION, ANY WARRANTIES OR CONDITIONS OF TITLE, NON-INFRINGEMENT,
- * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE. Each Recipient is
- * solely responsible for determining the appropriateness of using and
- * distributing the Program and assumes all risks associated with its
- * exercise of rights under this Agreement, including but not limited to
- * the risks and costs of program errors, damage to or loss of data,
- * programs or equipment, and unavailability or interruption of operations.
-
- * DISCLAIMER OF LIABILITY
- * NEITHER RECIPIENT NOR ANY CONTRIBUTORS SHALL HAVE ANY LIABILITY FOR ANY
- * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
- * DAMAGES (INCLUDING WITHOUT LIMITATION LOST PROFITS), HOWEVER CAUSED AND
- * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR
- * TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
- * USE OR DISTRIBUTION OF THE PROGRAM OR THE EXERCISE OF ANY RIGHTS GRANTED
- * HEREUNDER, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGES
-
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301,
- * USA.
+ * This program is provided "AS IS" and "WITH ALL FAULTS" and
+ * without warranty of any kind. You are solely responsible for
+ * determining the appropriateness of using and distributing
+ * the program and assume all risks associated with your exercise
+ * of rights with respect to the program, including but not limited
+ * to infringement of third party rights, the risks and costs of
+ * program errors, damage to or loss of data, programs or equipment,
+ * and unavailability or interruption of operations. Under no
+ * circumstances will the contributor of this Program be liable for
+ * any damages of any kind arising from your use or distribution of
+ * this program.
*/
#ifndef _UFS_H
diff --git a/drivers/scsi/ufs/ufshcd-pci.c b/drivers/scsi/ufs/ufshcd-pci.c
new file mode 100644
index 000000000000..5cb1d75f5868
--- /dev/null
+++ b/drivers/scsi/ufs/ufshcd-pci.c
@@ -0,0 +1,211 @@
+/*
+ * Universal Flash Storage Host controller PCI glue driver
+ *
+ * This code is based on drivers/scsi/ufs/ufshcd-pci.c
+ * Copyright (C) 2011-2013 Samsung India Software Operations
+ *
+ * Authors:
+ * Santosh Yaraganavi <santosh.sy@samsung.com>
+ * Vinayak Holikatti <h.vinayak@samsung.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2
+ * of the License, or (at your option) any later version.
+ * See the COPYING file in the top-level directory or visit
+ * <http://www.gnu.org/licenses/gpl-2.0.html>
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * This program is provided "AS IS" and "WITH ALL FAULTS" and
+ * without warranty of any kind. You are solely responsible for
+ * determining the appropriateness of using and distributing
+ * the program and assume all risks associated with your exercise
+ * of rights with respect to the program, including but not limited
+ * to infringement of third party rights, the risks and costs of
+ * program errors, damage to or loss of data, programs or equipment,
+ * and unavailability or interruption of operations. Under no
+ * circumstances will the contributor of this Program be liable for
+ * any damages of any kind arising from your use or distribution of
+ * this program.
+ */
+
+#include "ufshcd.h"
+#include <linux/pci.h>
+
+#ifdef CONFIG_PM
+/**
+ * ufshcd_pci_suspend - suspend power management function
+ * @pdev: pointer to PCI device handle
+ * @state: power state
+ *
+ * Returns -ENOSYS
+ */
+static int ufshcd_pci_suspend(struct pci_dev *pdev, pm_message_t state)
+{
+ /*
+ * TODO:
+ * 1. Call ufshcd_suspend
+ * 2. Do bus specific power management
+ */
+
+ return -ENOSYS;
+}
+
+/**
+ * ufshcd_pci_resume - resume power management function
+ * @pdev: pointer to PCI device handle
+ *
+ * Returns -ENOSYS
+ */
+static int ufshcd_pci_resume(struct pci_dev *pdev)
+{
+ /*
+ * TODO:
+ * 1. Call ufshcd_resume.
+ * 2. Do bus specific wake up
+ */
+
+ return -ENOSYS;
+}
+#endif /* CONFIG_PM */
+
+/**
+ * ufshcd_pci_shutdown - main function to put the controller in reset state
+ * @pdev: pointer to PCI device handle
+ */
+static void ufshcd_pci_shutdown(struct pci_dev *pdev)
+{
+ ufshcd_hba_stop((struct ufs_hba *)pci_get_drvdata(pdev));
+}
+
+/**
+ * ufshcd_pci_remove - de-allocate PCI/SCSI host and host memory space
+ * data structure memory
+ * @pdev - pointer to PCI handle
+ */
+static void ufshcd_pci_remove(struct pci_dev *pdev)
+{
+ struct ufs_hba *hba = pci_get_drvdata(pdev);
+
+ disable_irq(pdev->irq);
+ free_irq(pdev->irq, hba);
+ ufshcd_remove(hba);
+ pci_release_regions(pdev);
+ pci_set_drvdata(pdev, NULL);
+ pci_clear_master(pdev);
+ pci_disable_device(pdev);
+}
+
+/**
+ * ufshcd_set_dma_mask - Set dma mask based on the controller
+ * addressing capability
+ * @pdev: PCI device structure
+ *
+ * Returns 0 for success, non-zero for failure
+ */
+static int ufshcd_set_dma_mask(struct pci_dev *pdev)
+{
+ int err;
+
+ if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(64))
+ && !pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64)))
+ return 0;
+ err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
+ if (!err)
+ err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
+ return err;
+}
+
+/**
+ * ufshcd_pci_probe - probe routine of the driver
+ * @pdev: pointer to PCI device handle
+ * @id: PCI device id
+ *
+ * Returns 0 on success, non-zero value on failure
+ */
+static int
+ufshcd_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
+{
+ struct ufs_hba *hba;
+ void __iomem *mmio_base;
+ int err;
+
+ err = pci_enable_device(pdev);
+ if (err) {
+ dev_err(&pdev->dev, "pci_enable_device failed\n");
+ goto out_error;
+ }
+
+ pci_set_master(pdev);
+
+
+ err = pci_request_regions(pdev, UFSHCD);
+ if (err < 0) {
+ dev_err(&pdev->dev, "request regions failed\n");
+ goto out_disable;
+ }
+
+ mmio_base = pci_ioremap_bar(pdev, 0);
+ if (!mmio_base) {
+ dev_err(&pdev->dev, "memory map failed\n");
+ err = -ENOMEM;
+ goto out_release_regions;
+ }
+
+ err = ufshcd_set_dma_mask(pdev);
+ if (err) {
+ dev_err(&pdev->dev, "set dma mask failed\n");
+ goto out_iounmap;
+ }
+
+ err = ufshcd_init(&pdev->dev, &hba, mmio_base, pdev->irq);
+ if (err) {
+ dev_err(&pdev->dev, "Initialization failed\n");
+ goto out_iounmap;
+ }
+
+ pci_set_drvdata(pdev, hba);
+
+ return 0;
+
+out_iounmap:
+ iounmap(mmio_base);
+out_release_regions:
+ pci_release_regions(pdev);
+out_disable:
+ pci_clear_master(pdev);
+ pci_disable_device(pdev);
+out_error:
+ return err;
+}
+
+static DEFINE_PCI_DEVICE_TABLE(ufshcd_pci_tbl) = {
+ { PCI_VENDOR_ID_SAMSUNG, 0xC00C, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 },
+ { } /* terminate list */
+};
+
+MODULE_DEVICE_TABLE(pci, ufshcd_pci_tbl);
+
+static struct pci_driver ufshcd_pci_driver = {
+ .name = UFSHCD,
+ .id_table = ufshcd_pci_tbl,
+ .probe = ufshcd_pci_probe,
+ .remove = ufshcd_pci_remove,
+ .shutdown = ufshcd_pci_shutdown,
+#ifdef CONFIG_PM
+ .suspend = ufshcd_pci_suspend,
+ .resume = ufshcd_pci_resume,
+#endif
+};
+
+module_pci_driver(ufshcd_pci_driver);
+
+MODULE_AUTHOR("Santosh Yaragnavi <santosh.sy@samsung.com>");
+MODULE_AUTHOR("Vinayak Holikatti <h.vinayak@samsung.com>");
+MODULE_DESCRIPTION("UFS host controller PCI glue driver");
+MODULE_LICENSE("GPL");
+MODULE_VERSION(UFSHCD_DRIVER_VERSION);
diff --git a/drivers/scsi/ufs/ufshcd.c b/drivers/scsi/ufs/ufshcd.c
index 91a4046ca9ba..60fd40c4e4c2 100644
--- a/drivers/scsi/ufs/ufshcd.c
+++ b/drivers/scsi/ufs/ufshcd.c
@@ -1,77 +1,39 @@
/*
- * Universal Flash Storage Host controller driver
+ * Universal Flash Storage Host controller driver Core
*
* This code is based on drivers/scsi/ufs/ufshcd.c
- * Copyright (C) 2011-2012 Samsung India Software Operations
+ * Copyright (C) 2011-2013 Samsung India Software Operations
*
- * Santosh Yaraganavi <santosh.sy@samsung.com>
- * Vinayak Holikatti <h.vinayak@samsung.com>
+ * Authors:
+ * Santosh Yaraganavi <santosh.sy@samsung.com>
+ * Vinayak Holikatti <h.vinayak@samsung.com>
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version 2
* of the License, or (at your option) any later version.
+ * See the COPYING file in the top-level directory or visit
+ * <http://www.gnu.org/licenses/gpl-2.0.html>
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
- * NO WARRANTY
- * THE PROGRAM IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OR
- * CONDITIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED INCLUDING, WITHOUT
- * LIMITATION, ANY WARRANTIES OR CONDITIONS OF TITLE, NON-INFRINGEMENT,
- * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE. Each Recipient is
- * solely responsible for determining the appropriateness of using and
- * distributing the Program and assumes all risks associated with its
- * exercise of rights under this Agreement, including but not limited to
- * the risks and costs of program errors, damage to or loss of data,
- * programs or equipment, and unavailability or interruption of operations.
-
- * DISCLAIMER OF LIABILITY
- * NEITHER RECIPIENT NOR ANY CONTRIBUTORS SHALL HAVE ANY LIABILITY FOR ANY
- * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
- * DAMAGES (INCLUDING WITHOUT LIMITATION LOST PROFITS), HOWEVER CAUSED AND
- * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR
- * TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
- * USE OR DISTRIBUTION OF THE PROGRAM OR THE EXERCISE OF ANY RIGHTS GRANTED
- * HEREUNDER, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGES
-
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301,
- * USA.
+ * This program is provided "AS IS" and "WITH ALL FAULTS" and
+ * without warranty of any kind. You are solely responsible for
+ * determining the appropriateness of using and distributing
+ * the program and assume all risks associated with your exercise
+ * of rights with respect to the program, including but not limited
+ * to infringement of third party rights, the risks and costs of
+ * program errors, damage to or loss of data, programs or equipment,
+ * and unavailability or interruption of operations. Under no
+ * circumstances will the contributor of this Program be liable for
+ * any damages of any kind arising from your use or distribution of
+ * this program.
*/
-#include <linux/module.h>
-#include <linux/kernel.h>
-#include <linux/init.h>
-#include <linux/pci.h>
-#include <linux/interrupt.h>
-#include <linux/io.h>
-#include <linux/delay.h>
-#include <linux/slab.h>
-#include <linux/spinlock.h>
-#include <linux/workqueue.h>
-#include <linux/errno.h>
-#include <linux/types.h>
-#include <linux/wait.h>
-#include <linux/bitops.h>
-
-#include <asm/irq.h>
-#include <asm/byteorder.h>
-#include <scsi/scsi.h>
-#include <scsi/scsi_cmnd.h>
-#include <scsi/scsi_host.h>
-#include <scsi/scsi_tcq.h>
-#include <scsi/scsi_dbg.h>
-#include <scsi/scsi_eh.h>
-
-#include "ufs.h"
-#include "ufshci.h"
-
-#define UFSHCD "ufshcd"
-#define UFSHCD_DRIVER_VERSION "0.1"
+#include "ufshcd.h"
enum {
UFSHCD_MAX_CHANNEL = 0,
@@ -102,121 +64,6 @@ enum {
};
/**
- * struct uic_command - UIC command structure
- * @command: UIC command
- * @argument1: UIC command argument 1
- * @argument2: UIC command argument 2
- * @argument3: UIC command argument 3
- * @cmd_active: Indicate if UIC command is outstanding
- * @result: UIC command result
- */
-struct uic_command {
- u32 command;
- u32 argument1;
- u32 argument2;
- u32 argument3;
- int cmd_active;
- int result;
-};
-
-/**
- * struct ufs_hba - per adapter private structure
- * @mmio_base: UFSHCI base register address
- * @ucdl_base_addr: UFS Command Descriptor base address
- * @utrdl_base_addr: UTP Transfer Request Descriptor base address
- * @utmrdl_base_addr: UTP Task Management Descriptor base address
- * @ucdl_dma_addr: UFS Command Descriptor DMA address
- * @utrdl_dma_addr: UTRDL DMA address
- * @utmrdl_dma_addr: UTMRDL DMA address
- * @host: Scsi_Host instance of the driver
- * @pdev: PCI device handle
- * @lrb: local reference block
- * @outstanding_tasks: Bits representing outstanding task requests
- * @outstanding_reqs: Bits representing outstanding transfer requests
- * @capabilities: UFS Controller Capabilities
- * @nutrs: Transfer Request Queue depth supported by controller
- * @nutmrs: Task Management Queue depth supported by controller
- * @active_uic_cmd: handle of active UIC command
- * @ufshcd_tm_wait_queue: wait queue for task management
- * @tm_condition: condition variable for task management
- * @ufshcd_state: UFSHCD states
- * @int_enable_mask: Interrupt Mask Bits
- * @uic_workq: Work queue for UIC completion handling
- * @feh_workq: Work queue for fatal controller error handling
- * @errors: HBA errors
- */
-struct ufs_hba {
- void __iomem *mmio_base;
-
- /* Virtual memory reference */
- struct utp_transfer_cmd_desc *ucdl_base_addr;
- struct utp_transfer_req_desc *utrdl_base_addr;
- struct utp_task_req_desc *utmrdl_base_addr;
-
- /* DMA memory reference */
- dma_addr_t ucdl_dma_addr;
- dma_addr_t utrdl_dma_addr;
- dma_addr_t utmrdl_dma_addr;
-
- struct Scsi_Host *host;
- struct pci_dev *pdev;
-
- struct ufshcd_lrb *lrb;
-
- unsigned long outstanding_tasks;
- unsigned long outstanding_reqs;
-
- u32 capabilities;
- int nutrs;
- int nutmrs;
- u32 ufs_version;
-
- struct uic_command active_uic_cmd;
- wait_queue_head_t ufshcd_tm_wait_queue;
- unsigned long tm_condition;
-
- u32 ufshcd_state;
- u32 int_enable_mask;
-
- /* Work Queues */
- struct work_struct uic_workq;
- struct work_struct feh_workq;
-
- /* HBA Errors */
- u32 errors;
-};
-
-/**
- * struct ufshcd_lrb - local reference block
- * @utr_descriptor_ptr: UTRD address of the command
- * @ucd_cmd_ptr: UCD address of the command
- * @ucd_rsp_ptr: Response UPIU address for this command
- * @ucd_prdt_ptr: PRDT address of the command
- * @cmd: pointer to SCSI command
- * @sense_buffer: pointer to sense buffer address of the SCSI command
- * @sense_bufflen: Length of the sense buffer
- * @scsi_status: SCSI status of the command
- * @command_type: SCSI, UFS, Query.
- * @task_tag: Task tag of the command
- * @lun: LUN of the command
- */
-struct ufshcd_lrb {
- struct utp_transfer_req_desc *utr_descriptor_ptr;
- struct utp_upiu_cmd *ucd_cmd_ptr;
- struct utp_upiu_rsp *ucd_rsp_ptr;
- struct ufshcd_sg_entry *ucd_prdt_ptr;
-
- struct scsi_cmnd *cmd;
- u8 *sense_buffer;
- unsigned int sense_bufflen;
- int scsi_status;
-
- int command_type;
- int task_tag;
- unsigned int lun;
-};
-
-/**
* ufshcd_get_ufs_version - Get the UFS version supported by the HBA
* @hba - Pointer to adapter instance
*
@@ -335,21 +182,21 @@ static inline void ufshcd_free_hba_memory(struct ufs_hba *hba)
if (hba->utmrdl_base_addr) {
utmrdl_size = sizeof(struct utp_task_req_desc) * hba->nutmrs;
- dma_free_coherent(&hba->pdev->dev, utmrdl_size,
+ dma_free_coherent(hba->dev, utmrdl_size,
hba->utmrdl_base_addr, hba->utmrdl_dma_addr);
}
if (hba->utrdl_base_addr) {
utrdl_size =
(sizeof(struct utp_transfer_req_desc) * hba->nutrs);
- dma_free_coherent(&hba->pdev->dev, utrdl_size,
+ dma_free_coherent(hba->dev, utrdl_size,
hba->utrdl_base_addr, hba->utrdl_dma_addr);
}
if (hba->ucdl_base_addr) {
ucdl_size =
(sizeof(struct utp_transfer_cmd_desc) * hba->nutrs);
- dma_free_coherent(&hba->pdev->dev, ucdl_size,
+ dma_free_coherent(hba->dev, ucdl_size,
hba->ucdl_base_addr, hba->ucdl_dma_addr);
}
}
@@ -429,15 +276,6 @@ static void ufshcd_enable_run_stop_reg(struct ufs_hba *hba)
}
/**
- * ufshcd_hba_stop - Send controller to reset state
- * @hba: per adapter instance
- */
-static inline void ufshcd_hba_stop(struct ufs_hba *hba)
-{
- writel(CONTROLLER_DISABLE, (hba->mmio_base + REG_CONTROLLER_ENABLE));
-}
-
-/**
* ufshcd_hba_start - Start controller initialization sequence
* @hba: per adapter instance
*/
@@ -724,7 +562,7 @@ static int ufshcd_memory_alloc(struct ufs_hba *hba)
/* Allocate memory for UTP command descriptors */
ucdl_size = (sizeof(struct utp_transfer_cmd_desc) * hba->nutrs);
- hba->ucdl_base_addr = dma_alloc_coherent(&hba->pdev->dev,
+ hba->ucdl_base_addr = dma_alloc_coherent(hba->dev,
ucdl_size,
&hba->ucdl_dma_addr,
GFP_KERNEL);
@@ -737,7 +575,7 @@ static int ufshcd_memory_alloc(struct ufs_hba *hba)
*/
if (!hba->ucdl_base_addr ||
WARN_ON(hba->ucdl_dma_addr & (PAGE_SIZE - 1))) {
- dev_err(&hba->pdev->dev,
+ dev_err(hba->dev,
"Command Descriptor Memory allocation failed\n");
goto out;
}
@@ -747,13 +585,13 @@ static int ufshcd_memory_alloc(struct ufs_hba *hba)
* UFSHCI requires 1024 byte alignment of UTRD
*/
utrdl_size = (sizeof(struct utp_transfer_req_desc) * hba->nutrs);
- hba->utrdl_base_addr = dma_alloc_coherent(&hba->pdev->dev,
+ hba->utrdl_base_addr = dma_alloc_coherent(hba->dev,
utrdl_size,
&hba->utrdl_dma_addr,
GFP_KERNEL);
if (!hba->utrdl_base_addr ||
WARN_ON(hba->utrdl_dma_addr & (PAGE_SIZE - 1))) {
- dev_err(&hba->pdev->dev,
+ dev_err(hba->dev,
"Transfer Descriptor Memory allocation failed\n");
goto out;
}
@@ -763,13 +601,13 @@ static int ufshcd_memory_alloc(struct ufs_hba *hba)
* UFSHCI requires 1024 byte alignment of UTMRD
*/
utmrdl_size = sizeof(struct utp_task_req_desc) * hba->nutmrs;
- hba->utmrdl_base_addr = dma_alloc_coherent(&hba->pdev->dev,
+ hba->utmrdl_base_addr = dma_alloc_coherent(hba->dev,
utmrdl_size,
&hba->utmrdl_dma_addr,
GFP_KERNEL);
if (!hba->utmrdl_base_addr ||
WARN_ON(hba->utmrdl_dma_addr & (PAGE_SIZE - 1))) {
- dev_err(&hba->pdev->dev,
+ dev_err(hba->dev,
"Task Management Descriptor Memory allocation failed\n");
goto out;
}
@@ -777,7 +615,7 @@ static int ufshcd_memory_alloc(struct ufs_hba *hba)
/* Allocate memory for local reference block */
hba->lrb = kcalloc(hba->nutrs, sizeof(struct ufshcd_lrb), GFP_KERNEL);
if (!hba->lrb) {
- dev_err(&hba->pdev->dev, "LRB Memory allocation failed\n");
+ dev_err(hba->dev, "LRB Memory allocation failed\n");
goto out;
}
return 0;
@@ -867,7 +705,7 @@ static int ufshcd_dme_link_startup(struct ufs_hba *hba)
/* check if controller is ready to accept UIC commands */
if (((readl(hba->mmio_base + REG_CONTROLLER_STATUS)) &
UIC_COMMAND_READY) == 0x0) {
- dev_err(&hba->pdev->dev,
+ dev_err(hba->dev,
"Controller not ready"
" to accept UIC commands\n");
return -EIO;
@@ -912,7 +750,7 @@ static int ufshcd_make_hba_operational(struct ufs_hba *hba)
/* check if device present */
reg = readl((hba->mmio_base + REG_CONTROLLER_STATUS));
if (!ufshcd_is_device_present(reg)) {
- dev_err(&hba->pdev->dev, "cc: Device not present\n");
+ dev_err(hba->dev, "cc: Device not present\n");
err = -ENXIO;
goto out;
}
@@ -924,7 +762,7 @@ static int ufshcd_make_hba_operational(struct ufs_hba *hba)
if (!(ufshcd_get_lists_status(reg))) {
ufshcd_enable_run_stop_reg(hba);
} else {
- dev_err(&hba->pdev->dev,
+ dev_err(hba->dev,
"Host controller not ready to process requests");
err = -EIO;
goto out;
@@ -1005,7 +843,7 @@ static int ufshcd_hba_enable(struct ufs_hba *hba)
if (retry) {
retry--;
} else {
- dev_err(&hba->pdev->dev,
+ dev_err(hba->dev,
"Controller enable failed\n");
return -EIO;
}
@@ -1084,7 +922,7 @@ static int ufshcd_do_reset(struct ufs_hba *hba)
/* start the initialization process */
if (ufshcd_initialize_hba(hba)) {
- dev_err(&hba->pdev->dev,
+ dev_err(hba->dev,
"Reset: Controller initialization failed\n");
return FAILED;
}
@@ -1167,7 +1005,7 @@ static int ufshcd_task_req_compl(struct ufs_hba *hba, u32 index)
task_result = SUCCESS;
} else {
task_result = FAILED;
- dev_err(&hba->pdev->dev,
+ dev_err(hba->dev,
"trc: Invalid ocs = %x\n", ocs_value);
}
spin_unlock_irqrestore(hba->host->host_lock, flags);
@@ -1281,7 +1119,7 @@ ufshcd_transfer_rsp_status(struct ufs_hba *hba, struct ufshcd_lrb *lrbp)
/* check if the returned transfer response is valid */
result = ufshcd_is_valid_req_rsp(lrbp->ucd_rsp_ptr);
if (result) {
- dev_err(&hba->pdev->dev,
+ dev_err(hba->dev,
"Invalid response = %x\n", result);
break;
}
@@ -1310,7 +1148,7 @@ ufshcd_transfer_rsp_status(struct ufs_hba *hba, struct ufshcd_lrb *lrbp)
case OCS_FATAL_ERROR:
default:
result |= DID_ERROR << 16;
- dev_err(&hba->pdev->dev,
+ dev_err(hba->dev,
"OCS error from controller = %x\n", ocs);
break;
} /* end of switch */
@@ -1374,7 +1212,7 @@ static void ufshcd_uic_cc_handler (struct work_struct *work)
!(ufshcd_get_uic_cmd_result(hba))) {
if (ufshcd_make_hba_operational(hba))
- dev_err(&hba->pdev->dev,
+ dev_err(hba->dev,
"cc: hba not operational state\n");
return;
}
@@ -1509,7 +1347,7 @@ ufshcd_issue_tm_cmd(struct ufs_hba *hba,
free_slot = ufshcd_get_tm_free_slot(hba);
if (free_slot >= hba->nutmrs) {
spin_unlock_irqrestore(host->host_lock, flags);
- dev_err(&hba->pdev->dev, "Task management queue full\n");
+ dev_err(hba->dev, "Task management queue full\n");
err = FAILED;
goto out;
}
@@ -1552,7 +1390,7 @@ ufshcd_issue_tm_cmd(struct ufs_hba *hba,
&hba->tm_condition) != 0),
60 * HZ);
if (!err) {
- dev_err(&hba->pdev->dev,
+ dev_err(hba->dev,
"Task management command timed-out\n");
err = FAILED;
goto out;
@@ -1688,23 +1526,13 @@ static struct scsi_host_template ufshcd_driver_template = {
};
/**
- * ufshcd_shutdown - main function to put the controller in reset state
- * @pdev: pointer to PCI device handle
- */
-static void ufshcd_shutdown(struct pci_dev *pdev)
-{
- ufshcd_hba_stop((struct ufs_hba *)pci_get_drvdata(pdev));
-}
-
-#ifdef CONFIG_PM
-/**
* ufshcd_suspend - suspend power management function
- * @pdev: pointer to PCI device handle
+ * @hba: per adapter instance
* @state: power state
*
* Returns -ENOSYS
*/
-static int ufshcd_suspend(struct pci_dev *pdev, pm_message_t state)
+int ufshcd_suspend(struct ufs_hba *hba, pm_message_t state)
{
/*
* TODO:
@@ -1717,14 +1545,15 @@ static int ufshcd_suspend(struct pci_dev *pdev, pm_message_t state)
return -ENOSYS;
}
+EXPORT_SYMBOL_GPL(ufshcd_suspend);
/**
* ufshcd_resume - resume power management function
- * @pdev: pointer to PCI device handle
+ * @hba: per adapter instance
*
* Returns -ENOSYS
*/
-static int ufshcd_resume(struct pci_dev *pdev)
+int ufshcd_resume(struct ufs_hba *hba)
{
/*
* TODO:
@@ -1737,7 +1566,7 @@ static int ufshcd_resume(struct pci_dev *pdev)
return -ENOSYS;
}
-#endif /* CONFIG_PM */
+EXPORT_SYMBOL_GPL(ufshcd_resume);
/**
* ufshcd_hba_free - free allocated memory for
@@ -1748,107 +1577,67 @@ static void ufshcd_hba_free(struct ufs_hba *hba)
{
iounmap(hba->mmio_base);
ufshcd_free_hba_memory(hba);
- pci_release_regions(hba->pdev);
}
/**
- * ufshcd_remove - de-allocate PCI/SCSI host and host memory space
+ * ufshcd_remove - de-allocate SCSI host and host memory space
* data structure memory
- * @pdev - pointer to PCI handle
+ * @hba - per adapter instance
*/
-static void ufshcd_remove(struct pci_dev *pdev)
+void ufshcd_remove(struct ufs_hba *hba)
{
- struct ufs_hba *hba = pci_get_drvdata(pdev);
-
/* disable interrupts */
ufshcd_int_config(hba, UFSHCD_INT_DISABLE);
- free_irq(pdev->irq, hba);
ufshcd_hba_stop(hba);
ufshcd_hba_free(hba);
scsi_remove_host(hba->host);
scsi_host_put(hba->host);
- pci_set_drvdata(pdev, NULL);
- pci_clear_master(pdev);
- pci_disable_device(pdev);
-}
-
-/**
- * ufshcd_set_dma_mask - Set dma mask based on the controller
- * addressing capability
- * @pdev: PCI device structure
- *
- * Returns 0 for success, non-zero for failure
- */
-static int ufshcd_set_dma_mask(struct ufs_hba *hba)
-{
- int err;
- u64 dma_mask;
-
- /*
- * If controller supports 64 bit addressing mode, then set the DMA
- * mask to 64-bit, else set the DMA mask to 32-bit
- */
- if (hba->capabilities & MASK_64_ADDRESSING_SUPPORT)
- dma_mask = DMA_BIT_MASK(64);
- else
- dma_mask = DMA_BIT_MASK(32);
-
- err = pci_set_dma_mask(hba->pdev, dma_mask);
- if (err)
- return err;
-
- err = pci_set_consistent_dma_mask(hba->pdev, dma_mask);
-
- return err;
}
+EXPORT_SYMBOL_GPL(ufshcd_remove);
/**
- * ufshcd_probe - probe routine of the driver
- * @pdev: pointer to PCI device handle
- * @id: PCI device id
- *
+ * ufshcd_init - Driver initialization routine
+ * @dev: pointer to device handle
+ * @hba_handle: driver private handle
+ * @mmio_base: base register address
+ * @irq: Interrupt line of device
* Returns 0 on success, non-zero value on failure
*/
-static int ufshcd_probe(struct pci_dev *pdev, const struct pci_device_id *id)
+int ufshcd_init(struct device *dev, struct ufs_hba **hba_handle,
+ void __iomem *mmio_base, unsigned int irq)
{
struct Scsi_Host *host;
struct ufs_hba *hba;
int err;
- err = pci_enable_device(pdev);
- if (err) {
- dev_err(&pdev->dev, "pci_enable_device failed\n");
+ if (!dev) {
+ dev_err(dev,
+ "Invalid memory reference for dev is NULL\n");
+ err = -ENODEV;
goto out_error;
}
- pci_set_master(pdev);
+ if (!mmio_base) {
+ dev_err(dev,
+ "Invalid memory reference for mmio_base is NULL\n");
+ err = -ENODEV;
+ goto out_error;
+ }
host = scsi_host_alloc(&ufshcd_driver_template,
sizeof(struct ufs_hba));
if (!host) {
- dev_err(&pdev->dev, "scsi_host_alloc failed\n");
+ dev_err(dev, "scsi_host_alloc failed\n");
err = -ENOMEM;
- goto out_disable;
+ goto out_error;
}
hba = shost_priv(host);
-
- err = pci_request_regions(pdev, UFSHCD);
- if (err < 0) {
- dev_err(&pdev->dev, "request regions failed\n");
- goto out_host_put;
- }
-
- hba->mmio_base = pci_ioremap_bar(pdev, 0);
- if (!hba->mmio_base) {
- dev_err(&pdev->dev, "memory map failed\n");
- err = -ENOMEM;
- goto out_release_regions;
- }
-
hba->host = host;
- hba->pdev = pdev;
+ hba->dev = dev;
+ hba->mmio_base = mmio_base;
+ hba->irq = irq;
/* Read capabilities registers */
ufshcd_hba_capabilities(hba);
@@ -1856,17 +1645,11 @@ static int ufshcd_probe(struct pci_dev *pdev, const struct pci_device_id *id)
/* Get UFS version supported by the controller */
hba->ufs_version = ufshcd_get_ufs_version(hba);
- err = ufshcd_set_dma_mask(hba);
- if (err) {
- dev_err(&pdev->dev, "set dma mask failed\n");
- goto out_iounmap;
- }
-
/* Allocate memory for host memory space */
err = ufshcd_memory_alloc(hba);
if (err) {
- dev_err(&pdev->dev, "Memory allocation failed\n");
- goto out_iounmap;
+ dev_err(hba->dev, "Memory allocation failed\n");
+ goto out_disable;
}
/* Configure LRB */
@@ -1888,76 +1671,50 @@ static int ufshcd_probe(struct pci_dev *pdev, const struct pci_device_id *id)
INIT_WORK(&hba->feh_workq, ufshcd_fatal_err_handler);
/* IRQ registration */
- err = request_irq(pdev->irq, ufshcd_intr, IRQF_SHARED, UFSHCD, hba);
+ err = request_irq(irq, ufshcd_intr, IRQF_SHARED, UFSHCD, hba);
if (err) {
- dev_err(&pdev->dev, "request irq failed\n");
+ dev_err(hba->dev, "request irq failed\n");
goto out_lrb_free;
}
/* Enable SCSI tag mapping */
err = scsi_init_shared_tag_map(host, host->can_queue);
if (err) {
- dev_err(&pdev->dev, "init shared queue failed\n");
+ dev_err(hba->dev, "init shared queue failed\n");
goto out_free_irq;
}
- pci_set_drvdata(pdev, hba);
-
- err = scsi_add_host(host, &pdev->dev);
+ err = scsi_add_host(host, hba->dev);
if (err) {
- dev_err(&pdev->dev, "scsi_add_host failed\n");
+ dev_err(hba->dev, "scsi_add_host failed\n");
goto out_free_irq;
}
/* Initialization routine */
err = ufshcd_initialize_hba(hba);
if (err) {
- dev_err(&pdev->dev, "Initialization failed\n");
- goto out_free_irq;
+ dev_err(hba->dev, "Initialization failed\n");
+ goto out_remove_scsi_host;
}
+ *hba_handle = hba;
return 0;
+out_remove_scsi_host:
+ scsi_remove_host(hba->host);
out_free_irq:
- free_irq(pdev->irq, hba);
+ free_irq(irq, hba);
out_lrb_free:
ufshcd_free_hba_memory(hba);
-out_iounmap:
- iounmap(hba->mmio_base);
-out_release_regions:
- pci_release_regions(pdev);
-out_host_put:
- scsi_host_put(host);
out_disable:
- pci_clear_master(pdev);
- pci_disable_device(pdev);
+ scsi_host_put(host);
out_error:
return err;
}
+EXPORT_SYMBOL_GPL(ufshcd_init);
-static DEFINE_PCI_DEVICE_TABLE(ufshcd_pci_tbl) = {
- { PCI_VENDOR_ID_SAMSUNG, 0xC00C, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 },
- { } /* terminate list */
-};
-
-MODULE_DEVICE_TABLE(pci, ufshcd_pci_tbl);
-
-static struct pci_driver ufshcd_pci_driver = {
- .name = UFSHCD,
- .id_table = ufshcd_pci_tbl,
- .probe = ufshcd_probe,
- .remove = ufshcd_remove,
- .shutdown = ufshcd_shutdown,
-#ifdef CONFIG_PM
- .suspend = ufshcd_suspend,
- .resume = ufshcd_resume,
-#endif
-};
-
-module_pci_driver(ufshcd_pci_driver);
-
-MODULE_AUTHOR("Santosh Yaragnavi <santosh.sy@samsung.com>, "
- "Vinayak Holikatti <h.vinayak@samsung.com>");
-MODULE_DESCRIPTION("Generic UFS host controller driver");
+MODULE_AUTHOR("Santosh Yaragnavi <santosh.sy@samsung.com>");
+MODULE_AUTHOR("Vinayak Holikatti <h.vinayak@samsung.com>");
+MODULE_DESCRIPTION("Generic UFS host controller driver Core");
MODULE_LICENSE("GPL");
MODULE_VERSION(UFSHCD_DRIVER_VERSION);
diff --git a/drivers/scsi/ufs/ufshcd.h b/drivers/scsi/ufs/ufshcd.h
new file mode 100644
index 000000000000..6b99a42f5819
--- /dev/null
+++ b/drivers/scsi/ufs/ufshcd.h
@@ -0,0 +1,202 @@
+/*
+ * Universal Flash Storage Host controller driver
+ *
+ * This code is based on drivers/scsi/ufs/ufshcd.h
+ * Copyright (C) 2011-2013 Samsung India Software Operations
+ *
+ * Authors:
+ * Santosh Yaraganavi <santosh.sy@samsung.com>
+ * Vinayak Holikatti <h.vinayak@samsung.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2
+ * of the License, or (at your option) any later version.
+ * See the COPYING file in the top-level directory or visit
+ * <http://www.gnu.org/licenses/gpl-2.0.html>
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * This program is provided "AS IS" and "WITH ALL FAULTS" and
+ * without warranty of any kind. You are solely responsible for
+ * determining the appropriateness of using and distributing
+ * the program and assume all risks associated with your exercise
+ * of rights with respect to the program, including but not limited
+ * to infringement of third party rights, the risks and costs of
+ * program errors, damage to or loss of data, programs or equipment,
+ * and unavailability or interruption of operations. Under no
+ * circumstances will the contributor of this Program be liable for
+ * any damages of any kind arising from your use or distribution of
+ * this program.
+ */
+
+#ifndef _UFSHCD_H
+#define _UFSHCD_H
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/delay.h>
+#include <linux/slab.h>
+#include <linux/spinlock.h>
+#include <linux/workqueue.h>
+#include <linux/errno.h>
+#include <linux/types.h>
+#include <linux/wait.h>
+#include <linux/bitops.h>
+#include <linux/pm_runtime.h>
+#include <linux/clk.h>
+
+#include <asm/irq.h>
+#include <asm/byteorder.h>
+#include <scsi/scsi.h>
+#include <scsi/scsi_cmnd.h>
+#include <scsi/scsi_host.h>
+#include <scsi/scsi_tcq.h>
+#include <scsi/scsi_dbg.h>
+#include <scsi/scsi_eh.h>
+
+#include "ufs.h"
+#include "ufshci.h"
+
+#define UFSHCD "ufshcd"
+#define UFSHCD_DRIVER_VERSION "0.2"
+
+/**
+ * struct uic_command - UIC command structure
+ * @command: UIC command
+ * @argument1: UIC command argument 1
+ * @argument2: UIC command argument 2
+ * @argument3: UIC command argument 3
+ * @cmd_active: Indicate if UIC command is outstanding
+ * @result: UIC command result
+ */
+struct uic_command {
+ u32 command;
+ u32 argument1;
+ u32 argument2;
+ u32 argument3;
+ int cmd_active;
+ int result;
+};
+
+/**
+ * struct ufshcd_lrb - local reference block
+ * @utr_descriptor_ptr: UTRD address of the command
+ * @ucd_cmd_ptr: UCD address of the command
+ * @ucd_rsp_ptr: Response UPIU address for this command
+ * @ucd_prdt_ptr: PRDT address of the command
+ * @cmd: pointer to SCSI command
+ * @sense_buffer: pointer to sense buffer address of the SCSI command
+ * @sense_bufflen: Length of the sense buffer
+ * @scsi_status: SCSI status of the command
+ * @command_type: SCSI, UFS, Query.
+ * @task_tag: Task tag of the command
+ * @lun: LUN of the command
+ */
+struct ufshcd_lrb {
+ struct utp_transfer_req_desc *utr_descriptor_ptr;
+ struct utp_upiu_cmd *ucd_cmd_ptr;
+ struct utp_upiu_rsp *ucd_rsp_ptr;
+ struct ufshcd_sg_entry *ucd_prdt_ptr;
+
+ struct scsi_cmnd *cmd;
+ u8 *sense_buffer;
+ unsigned int sense_bufflen;
+ int scsi_status;
+
+ int command_type;
+ int task_tag;
+ unsigned int lun;
+};
+
+
+/**
+ * struct ufs_hba - per adapter private structure
+ * @mmio_base: UFSHCI base register address
+ * @ucdl_base_addr: UFS Command Descriptor base address
+ * @utrdl_base_addr: UTP Transfer Request Descriptor base address
+ * @utmrdl_base_addr: UTP Task Management Descriptor base address
+ * @ucdl_dma_addr: UFS Command Descriptor DMA address
+ * @utrdl_dma_addr: UTRDL DMA address
+ * @utmrdl_dma_addr: UTMRDL DMA address
+ * @host: Scsi_Host instance of the driver
+ * @dev: device handle
+ * @lrb: local reference block
+ * @outstanding_tasks: Bits representing outstanding task requests
+ * @outstanding_reqs: Bits representing outstanding transfer requests
+ * @capabilities: UFS Controller Capabilities
+ * @nutrs: Transfer Request Queue depth supported by controller
+ * @nutmrs: Task Management Queue depth supported by controller
+ * @ufs_version: UFS Version to which controller complies
+ * @irq: Irq number of the controller
+ * @active_uic_cmd: handle of active UIC command
+ * @ufshcd_tm_wait_queue: wait queue for task management
+ * @tm_condition: condition variable for task management
+ * @ufshcd_state: UFSHCD states
+ * @int_enable_mask: Interrupt Mask Bits
+ * @uic_workq: Work queue for UIC completion handling
+ * @feh_workq: Work queue for fatal controller error handling
+ * @errors: HBA errors
+ */
+struct ufs_hba {
+ void __iomem *mmio_base;
+
+ /* Virtual memory reference */
+ struct utp_transfer_cmd_desc *ucdl_base_addr;
+ struct utp_transfer_req_desc *utrdl_base_addr;
+ struct utp_task_req_desc *utmrdl_base_addr;
+
+ /* DMA memory reference */
+ dma_addr_t ucdl_dma_addr;
+ dma_addr_t utrdl_dma_addr;
+ dma_addr_t utmrdl_dma_addr;
+
+ struct Scsi_Host *host;
+ struct device *dev;
+
+ struct ufshcd_lrb *lrb;
+
+ unsigned long outstanding_tasks;
+ unsigned long outstanding_reqs;
+
+ u32 capabilities;
+ int nutrs;
+ int nutmrs;
+ u32 ufs_version;
+ unsigned int irq;
+
+ struct uic_command active_uic_cmd;
+ wait_queue_head_t ufshcd_tm_wait_queue;
+ unsigned long tm_condition;
+
+ u32 ufshcd_state;
+ u32 int_enable_mask;
+
+ /* Work Queues */
+ struct work_struct uic_workq;
+ struct work_struct feh_workq;
+
+ /* HBA Errors */
+ u32 errors;
+};
+
+int ufshcd_init(struct device *, struct ufs_hba ** , void __iomem * ,
+ unsigned int);
+void ufshcd_remove(struct ufs_hba *);
+
+/**
+ * ufshcd_hba_stop - Send controller to reset state
+ * @hba: per adapter instance
+ */
+static inline void ufshcd_hba_stop(struct ufs_hba *hba)
+{
+ writel(CONTROLLER_DISABLE, (hba->mmio_base + REG_CONTROLLER_ENABLE));
+}
+
+#endif /* End of Header */
diff --git a/drivers/scsi/ufs/ufshci.h b/drivers/scsi/ufs/ufshci.h
index 6e3510f71167..0c164847a3ef 100644
--- a/drivers/scsi/ufs/ufshci.h
+++ b/drivers/scsi/ufs/ufshci.h
@@ -2,45 +2,35 @@
* Universal Flash Storage Host controller driver
*
* This code is based on drivers/scsi/ufs/ufshci.h
- * Copyright (C) 2011-2012 Samsung India Software Operations
+ * Copyright (C) 2011-2013 Samsung India Software Operations
*
- * Santosh Yaraganavi <santosh.sy@samsung.com>
- * Vinayak Holikatti <h.vinayak@samsung.com>
+ * Authors:
+ * Santosh Yaraganavi <santosh.sy@samsung.com>
+ * Vinayak Holikatti <h.vinayak@samsung.com>
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version 2
* of the License, or (at your option) any later version.
+ * See the COPYING file in the top-level directory or visit
+ * <http://www.gnu.org/licenses/gpl-2.0.html>
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
- * NO WARRANTY
- * THE PROGRAM IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OR
- * CONDITIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED INCLUDING, WITHOUT
- * LIMITATION, ANY WARRANTIES OR CONDITIONS OF TITLE, NON-INFRINGEMENT,
- * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE. Each Recipient is
- * solely responsible for determining the appropriateness of using and
- * distributing the Program and assumes all risks associated with its
- * exercise of rights under this Agreement, including but not limited to
- * the risks and costs of program errors, damage to or loss of data,
- * programs or equipment, and unavailability or interruption of operations.
-
- * DISCLAIMER OF LIABILITY
- * NEITHER RECIPIENT NOR ANY CONTRIBUTORS SHALL HAVE ANY LIABILITY FOR ANY
- * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
- * DAMAGES (INCLUDING WITHOUT LIMITATION LOST PROFITS), HOWEVER CAUSED AND
- * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR
- * TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
- * USE OR DISTRIBUTION OF THE PROGRAM OR THE EXERCISE OF ANY RIGHTS GRANTED
- * HEREUNDER, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGES
-
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301,
- * USA.
+ * This program is provided "AS IS" and "WITH ALL FAULTS" and
+ * without warranty of any kind. You are solely responsible for
+ * determining the appropriateness of using and distributing
+ * the program and assume all risks associated with your exercise
+ * of rights with respect to the program, including but not limited
+ * to infringement of third party rights, the risks and costs of
+ * program errors, damage to or loss of data, programs or equipment,
+ * and unavailability or interruption of operations. Under no
+ * circumstances will the contributor of this Program be liable for
+ * any damages of any kind arising from your use or distribution of
+ * this program.
*/
#ifndef _UFSHCI_H
diff --git a/drivers/sh/Kconfig b/drivers/sh/Kconfig
index d860ef743568..f168a6159961 100644
--- a/drivers/sh/Kconfig
+++ b/drivers/sh/Kconfig
@@ -1,6 +1,5 @@
menu "SuperH / SH-Mobile Driver Options"
source "drivers/sh/intc/Kconfig"
-source "drivers/sh/pfc/Kconfig"
endmenu
diff --git a/drivers/sh/Makefile b/drivers/sh/Makefile
index e57895b1a425..fc67f564f02c 100644
--- a/drivers/sh/Makefile
+++ b/drivers/sh/Makefile
@@ -5,7 +5,6 @@ obj-y := intc/
obj-$(CONFIG_HAVE_CLK) += clk/
obj-$(CONFIG_MAPLE) += maple/
-obj-$(CONFIG_SH_PFC) += pfc/
obj-$(CONFIG_SUPERHYWAY) += superhyway/
obj-y += pm_runtime.o
diff --git a/drivers/sh/pfc/Kconfig b/drivers/sh/pfc/Kconfig
deleted file mode 100644
index 804f9ad1bf4a..000000000000
--- a/drivers/sh/pfc/Kconfig
+++ /dev/null
@@ -1,26 +0,0 @@
-comment "Pin function controller options"
-
-config SH_PFC
- # XXX move off the gpio dependency
- depends on GENERIC_GPIO
- select GPIO_SH_PFC if ARCH_REQUIRE_GPIOLIB
- select PINCTRL_SH_PFC
- def_bool y
-
-#
-# Placeholder for now, rehome to drivers/pinctrl once the PFC APIs
-# have settled.
-#
-config PINCTRL_SH_PFC
- tristate "SuperH PFC pin controller driver"
- depends on SH_PFC
- select PINCTRL
- select PINMUX
- select PINCONF
-
-config GPIO_SH_PFC
- tristate "SuperH PFC GPIO support"
- depends on SH_PFC && GPIOLIB
- help
- This enables support for GPIOs within the SoC's pin function
- controller.
diff --git a/drivers/sh/pfc/Makefile b/drivers/sh/pfc/Makefile
deleted file mode 100644
index 7916027cce37..000000000000
--- a/drivers/sh/pfc/Makefile
+++ /dev/null
@@ -1,3 +0,0 @@
-obj-y += core.o
-obj-$(CONFIG_PINCTRL_SH_PFC) += pinctrl.o
-obj-$(CONFIG_GPIO_SH_PFC) += gpio.o
diff --git a/drivers/spi/spi-tegra20-sflash.c b/drivers/spi/spi-tegra20-sflash.c
index 8a61b27a9f2d..3d6a12b2af04 100644
--- a/drivers/spi/spi-tegra20-sflash.c
+++ b/drivers/spi/spi-tegra20-sflash.c
@@ -34,7 +34,7 @@
#include <linux/of_device.h>
#include <linux/spi/spi.h>
#include <linux/spi/spi-tegra.h>
-#include <mach/clk.h>
+#include <linux/clk/tegra.h>
#define SPI_COMMAND 0x000
#define SPI_GO BIT(30)
@@ -531,7 +531,7 @@ static int tegra_sflash_probe(struct platform_device *pdev)
goto exit_free_master;
}
- tsd->clk = devm_clk_get(&pdev->dev, "spi");
+ tsd->clk = devm_clk_get(&pdev->dev, NULL);
if (IS_ERR(tsd->clk)) {
dev_err(&pdev->dev, "can not get clock\n");
ret = PTR_ERR(tsd->clk);
diff --git a/drivers/spi/spi-tegra20-slink.c b/drivers/spi/spi-tegra20-slink.c
index 8458c4bf7172..b8698b389ef3 100644
--- a/drivers/spi/spi-tegra20-slink.c
+++ b/drivers/spi/spi-tegra20-slink.c
@@ -35,7 +35,7 @@
#include <linux/of_device.h>
#include <linux/spi/spi.h>
#include <linux/spi/spi-tegra.h>
-#include <mach/clk.h>
+#include <linux/clk/tegra.h>
#define SLINK_COMMAND 0x000
#define SLINK_BIT_LENGTH(x) (((x) & 0x1f) << 0)
@@ -1186,7 +1186,7 @@ static int tegra_slink_probe(struct platform_device *pdev)
goto exit_free_master;
}
- tspi->clk = devm_clk_get(&pdev->dev, "slink");
+ tspi->clk = devm_clk_get(&pdev->dev, NULL);
if (IS_ERR(tspi->clk)) {
dev_err(&pdev->dev, "can not get clock\n");
ret = PTR_ERR(tspi->clk);
diff --git a/drivers/ssb/driver_chipcommon_pmu.c b/drivers/ssb/driver_chipcommon_pmu.c
index a43415a7fbed..4c0f6d883dd3 100644
--- a/drivers/ssb/driver_chipcommon_pmu.c
+++ b/drivers/ssb/driver_chipcommon_pmu.c
@@ -14,7 +14,7 @@
#include <linux/delay.h>
#include <linux/export.h>
#ifdef CONFIG_BCM47XX
-#include <asm/mach-bcm47xx/nvram.h>
+#include <bcm47xx_nvram.h>
#endif
#include "ssb_private.h"
@@ -322,7 +322,7 @@ static void ssb_pmu_pll_init(struct ssb_chipcommon *cc)
if (bus->bustype == SSB_BUSTYPE_SSB) {
#ifdef CONFIG_BCM47XX
char buf[20];
- if (nvram_getenv("xtalfreq", buf, sizeof(buf)) >= 0)
+ if (bcm47xx_nvram_getenv("xtalfreq", buf, sizeof(buf)) >= 0)
crystalfreq = simple_strtoul(buf, NULL, 0);
#endif
}
diff --git a/drivers/staging/Kconfig b/drivers/staging/Kconfig
index 3a7965d6ac28..093f10c88cce 100644
--- a/drivers/staging/Kconfig
+++ b/drivers/staging/Kconfig
@@ -112,8 +112,6 @@ source "drivers/staging/media/Kconfig"
source "drivers/staging/net/Kconfig"
-source "drivers/staging/omapdrm/Kconfig"
-
source "drivers/staging/android/Kconfig"
source "drivers/staging/ozwpan/Kconfig"
diff --git a/drivers/staging/Makefile b/drivers/staging/Makefile
index 5971865d0c61..fa41b04cf4cb 100644
--- a/drivers/staging/Makefile
+++ b/drivers/staging/Makefile
@@ -48,7 +48,6 @@ obj-$(CONFIG_SPEAKUP) += speakup/
obj-$(CONFIG_TOUCHSCREEN_CLEARPAD_TM1217) += cptm1217/
obj-$(CONFIG_TOUCHSCREEN_SYNAPTICS_I2C_RMI4) += ste_rmi4/
obj-$(CONFIG_MFD_NVEC) += nvec/
-obj-$(CONFIG_DRM_OMAP) += omapdrm/
obj-$(CONFIG_ANDROID) += android/
obj-$(CONFIG_USB_WPAN_HCD) += ozwpan/
obj-$(CONFIG_USB_G_CCG) += ccg/
diff --git a/drivers/staging/android/binder.c b/drivers/staging/android/binder.c
index 538ebe213129..24456a0de6b2 100644
--- a/drivers/staging/android/binder.c
+++ b/drivers/staging/android/binder.c
@@ -2880,7 +2880,6 @@ static int binder_release(struct inode *nodp, struct file *filp)
static void binder_deferred_release(struct binder_proc *proc)
{
- struct hlist_node *pos;
struct binder_transaction *t;
struct rb_node *n;
int threads, nodes, incoming_refs, outgoing_refs, buffers, active_transactions, page_count;
@@ -2924,7 +2923,7 @@ static void binder_deferred_release(struct binder_proc *proc)
node->local_weak_refs = 0;
hlist_add_head(&node->dead_node, &binder_dead_nodes);
- hlist_for_each_entry(ref, pos, &node->refs, node_entry) {
+ hlist_for_each_entry(ref, &node->refs, node_entry) {
incoming_refs++;
if (ref->death) {
death++;
@@ -3156,12 +3155,11 @@ static void print_binder_thread(struct seq_file *m,
static void print_binder_node(struct seq_file *m, struct binder_node *node)
{
struct binder_ref *ref;
- struct hlist_node *pos;
struct binder_work *w;
int count;
count = 0;
- hlist_for_each_entry(ref, pos, &node->refs, node_entry)
+ hlist_for_each_entry(ref, &node->refs, node_entry)
count++;
seq_printf(m, " node %d: u%p c%p hs %d hw %d ls %d lw %d is %d iw %d",
@@ -3171,7 +3169,7 @@ static void print_binder_node(struct seq_file *m, struct binder_node *node)
node->internal_strong_refs, count);
if (count) {
seq_puts(m, " proc");
- hlist_for_each_entry(ref, pos, &node->refs, node_entry)
+ hlist_for_each_entry(ref, &node->refs, node_entry)
seq_printf(m, " %d", ref->proc->pid);
}
seq_puts(m, "\n");
@@ -3369,7 +3367,6 @@ static void print_binder_proc_stats(struct seq_file *m,
static int binder_state_show(struct seq_file *m, void *unused)
{
struct binder_proc *proc;
- struct hlist_node *pos;
struct binder_node *node;
int do_lock = !binder_debug_no_lock;
@@ -3380,10 +3377,10 @@ static int binder_state_show(struct seq_file *m, void *unused)
if (!hlist_empty(&binder_dead_nodes))
seq_puts(m, "dead nodes:\n");
- hlist_for_each_entry(node, pos, &binder_dead_nodes, dead_node)
+ hlist_for_each_entry(node, &binder_dead_nodes, dead_node)
print_binder_node(m, node);
- hlist_for_each_entry(proc, pos, &binder_procs, proc_node)
+ hlist_for_each_entry(proc, &binder_procs, proc_node)
print_binder_proc(m, proc, 1);
if (do_lock)
binder_unlock(__func__);
@@ -3393,7 +3390,6 @@ static int binder_state_show(struct seq_file *m, void *unused)
static int binder_stats_show(struct seq_file *m, void *unused)
{
struct binder_proc *proc;
- struct hlist_node *pos;
int do_lock = !binder_debug_no_lock;
if (do_lock)
@@ -3403,7 +3399,7 @@ static int binder_stats_show(struct seq_file *m, void *unused)
print_binder_stats(m, "", &binder_stats);
- hlist_for_each_entry(proc, pos, &binder_procs, proc_node)
+ hlist_for_each_entry(proc, &binder_procs, proc_node)
print_binder_proc_stats(m, proc);
if (do_lock)
binder_unlock(__func__);
@@ -3413,14 +3409,13 @@ static int binder_stats_show(struct seq_file *m, void *unused)
static int binder_transactions_show(struct seq_file *m, void *unused)
{
struct binder_proc *proc;
- struct hlist_node *pos;
int do_lock = !binder_debug_no_lock;
if (do_lock)
binder_lock(__func__);
seq_puts(m, "binder transactions:\n");
- hlist_for_each_entry(proc, pos, &binder_procs, proc_node)
+ hlist_for_each_entry(proc, &binder_procs, proc_node)
print_binder_proc(m, proc, 0);
if (do_lock)
binder_unlock(__func__);
diff --git a/drivers/staging/android/binder.h b/drivers/staging/android/binder.h
index 76ead8dac265..f240464effde 100644
--- a/drivers/staging/android/binder.h
+++ b/drivers/staging/android/binder.h
@@ -224,7 +224,7 @@ enum binder_driver_return_protocol {
BR_SPAWN_LOOPER = _IO('r', 13),
/*
* No parameters. The driver has determined that a process has no
- * threads waiting to service incomming transactions. When a process
+ * threads waiting to service incoming transactions. When a process
* receives this command, it must spawn a new service thread and
* register it via bcENTER_LOOPER.
*/
diff --git a/drivers/staging/bcm/Misc.c b/drivers/staging/bcm/Misc.c
index b5c2c4c15f92..d23eeeb95064 100644
--- a/drivers/staging/bcm/Misc.c
+++ b/drivers/staging/bcm/Misc.c
@@ -185,7 +185,7 @@ static int BcmFileDownload(struct bcm_mini_adapter *Adapter, const char *path, u
BCM_DEBUG_PRINT(Adapter, DBG_TYPE_INITEXIT, MP_INIT, DBG_LVL_ALL, "Unable to Open %s\n", path);
return -ENOENT;
}
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_INITEXIT, MP_INIT, DBG_LVL_ALL, "Opened file is = %s and length =0x%lx to be downloaded at =0x%x", path, (unsigned long)flp->f_dentry->d_inode->i_size, loc);
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_INITEXIT, MP_INIT, DBG_LVL_ALL, "Opened file is = %s and length =0x%lx to be downloaded at =0x%x", path, (unsigned long)file_inode(flp)->i_size, loc);
do_gettimeofday(&tv);
BCM_DEBUG_PRINT(Adapter, DBG_TYPE_INITEXIT, MP_INIT, DBG_LVL_ALL, "download start %lx", ((tv.tv_sec * 1000) + (tv.tv_usec / 1000)));
diff --git a/drivers/staging/ccg/Kconfig b/drivers/staging/ccg/Kconfig
index 8997a8c757aa..7ed5bc6caadb 100644
--- a/drivers/staging/ccg/Kconfig
+++ b/drivers/staging/ccg/Kconfig
@@ -2,7 +2,7 @@ if USB_GADGET
config USB_G_CCG
tristate "Configurable Composite Gadget (STAGING)"
- depends on STAGING && BLOCK && NET && !USB_ZERO && !USB_ZERO_HNPTEST && !USB_AUDIO && !GADGET_UAC1 && !USB_ETH && !USB_ETH_RNDIS && !USB_ETH_EEM && !USB_G_NCM && !USB_GADGETFS && !USB_FUNCTIONFS && !USB_FUNCTIONFS_ETH && !USB_FUNCTIONFS_RNDIS && !USB_FUNCTIONFS_GENERIC && !USB_FILE_STORAGE && !USB_FILE_STORAGE_TEST && !USB_MASS_STORAGE && !USB_G_SERIAL && !USB_MIDI_GADGET && !USB_G_PRINTER && !USB_CDC_COMPOSITE && !USB_G_NOKIA && !USB_G_ACM_MS && !USB_G_MULTI && !USB_G_MULTI_RNDIS && !USB_G_MULTI_CDC && !USB_G_HID && !USB_G_DBGP && !USB_G_WEBCAM
+ depends on STAGING && BLOCK && NET && !USB_ZERO && !USB_ZERO_HNPTEST && !USB_AUDIO && !GADGET_UAC1 && !USB_ETH && !USB_ETH_RNDIS && !USB_ETH_EEM && !USB_G_NCM && !USB_GADGETFS && !USB_FUNCTIONFS && !USB_FUNCTIONFS_ETH && !USB_FUNCTIONFS_RNDIS && !USB_FUNCTIONFS_GENERIC && !USB_FILE_STORAGE && !USB_FILE_STORAGE_TEST && !USB_MASS_STORAGE && !USB_G_SERIAL && !USB_MIDI_GADGET && !USB_G_PRINTER && !USB_CDC_COMPOSITE && !USB_G_NOKIA && !USB_G_ACM_MS && !USB_G_MULTI && !USB_G_MULTI_RNDIS && !USB_G_MULTI_CDC && !USB_G_HID && !USB_G_DBGP && !USB_G_WEBCAM && TTY
help
The Configurable Composite Gadget supports multiple USB
functions: acm, mass storage, rndis and FunctionFS.
diff --git a/drivers/staging/ccg/f_mass_storage.c b/drivers/staging/ccg/f_mass_storage.c
index 4f1142efa6d1..20bc2b454ac2 100644
--- a/drivers/staging/ccg/f_mass_storage.c
+++ b/drivers/staging/ccg/f_mass_storage.c
@@ -998,7 +998,7 @@ static int do_synchronize_cache(struct fsg_common *common)
static void invalidate_sub(struct fsg_lun *curlun)
{
struct file *filp = curlun->filp;
- struct inode *inode = filp->f_path.dentry->d_inode;
+ struct inode *inode = file_inode(filp);
unsigned long rc;
rc = invalidate_mapping_pages(inode->i_mapping, 0, -1);
diff --git a/drivers/staging/ccg/rndis.c b/drivers/staging/ccg/rndis.c
index e4192b887de9..d9297eebbf73 100644
--- a/drivers/staging/ccg/rndis.c
+++ b/drivers/staging/ccg/rndis.c
@@ -1065,7 +1065,7 @@ static int rndis_proc_show(struct seq_file *m, void *v)
static ssize_t rndis_proc_write(struct file *file, const char __user *buffer,
size_t count, loff_t *ppos)
{
- rndis_params *p = PDE(file->f_path.dentry->d_inode)->data;
+ rndis_params *p = PDE(file_inode(file))->data;
u32 speed = 0;
int i, fl_speed = 0;
diff --git a/drivers/staging/ccg/storage_common.c b/drivers/staging/ccg/storage_common.c
index 8d9bcd8207c8..abb01ac74cec 100644
--- a/drivers/staging/ccg/storage_common.c
+++ b/drivers/staging/ccg/storage_common.c
@@ -656,7 +656,7 @@ static int fsg_lun_open(struct fsg_lun *curlun, const char *filename)
if (!(filp->f_mode & FMODE_WRITE))
ro = 1;
- inode = filp->f_path.dentry->d_inode;
+ inode = file_inode(filp);
if ((!S_ISREG(inode->i_mode) && !S_ISBLK(inode->i_mode))) {
LINFO(curlun, "invalid file type: %s\n", filename);
goto out;
diff --git a/drivers/staging/ccg/u_serial.c b/drivers/staging/ccg/u_serial.c
index 373c40656b52..b10947ae0ac5 100644
--- a/drivers/staging/ccg/u_serial.c
+++ b/drivers/staging/ccg/u_serial.c
@@ -491,12 +491,8 @@ static void gs_rx_push(unsigned long _port)
req = list_first_entry(queue, struct usb_request, list);
- /* discard data if tty was closed */
- if (!tty)
- goto recycle;
-
/* leave data queued if tty was rx throttled */
- if (test_bit(TTY_THROTTLED, &tty->flags))
+ if (tty && test_bit(TTY_THROTTLED, &tty->flags))
break;
switch (req->status) {
@@ -529,7 +525,7 @@ static void gs_rx_push(unsigned long _port)
size -= n;
}
- count = tty_insert_flip_string(tty, packet, size);
+ count = tty_insert_flip_string(&port->port, packet, size);
if (count)
do_push = true;
if (count != size) {
@@ -542,7 +538,6 @@ static void gs_rx_push(unsigned long _port)
}
port->n_read = 0;
}
-recycle:
list_move(&req->list, &port->read_pool);
port->read_started--;
}
@@ -550,8 +545,8 @@ recycle:
/* Push from tty to ldisc; without low_latency set this is handled by
* a workqueue, so we won't get callbacks and can hold port_lock
*/
- if (tty && do_push)
- tty_flip_buffer_push(tty);
+ if (do_push)
+ tty_flip_buffer_push(&port->port);
/* We want our data queue to become empty ASAP, keeping data
diff --git a/drivers/staging/dgrp/Kconfig b/drivers/staging/dgrp/Kconfig
index 39f4bb65ec83..e4c41552923a 100644
--- a/drivers/staging/dgrp/Kconfig
+++ b/drivers/staging/dgrp/Kconfig
@@ -1,7 +1,7 @@
config DGRP
tristate "Digi Realport driver"
default n
- depends on SYSFS
+ depends on SYSFS && TTY
---help---
Support for Digi Realport devices. These devices allow you to
access remote serial ports as if they are local tty devices. This
diff --git a/drivers/staging/dgrp/dgrp_net_ops.c b/drivers/staging/dgrp/dgrp_net_ops.c
index 2d1bbfd5b67c..e6018823b9de 100644
--- a/drivers/staging/dgrp/dgrp_net_ops.c
+++ b/drivers/staging/dgrp/dgrp_net_ops.c
@@ -37,6 +37,7 @@
#include <linux/proc_fs.h>
#include <linux/types.h>
#include <linux/string.h>
+#include <linux/device.h>
#include <linux/tty.h>
#include <linux/tty_flip.h>
#include <linux/spinlock.h>
@@ -211,7 +212,7 @@ static void dgrp_input(struct ch_struct *ch)
data_len = (ch->ch_rin - ch->ch_rout) & RBUF_MASK;
/* len is the amount of data we are going to transfer here */
- len = tty_buffer_request_room(tty, data_len);
+ len = tty_buffer_request_room(&ch->port, data_len);
/* Check DPA flow control */
if ((nd->nd_dpa_debug) &&
@@ -232,9 +233,9 @@ static void dgrp_input(struct ch_struct *ch)
(nd->nd_dpa_port == PORT_NUM(MINOR(tty_devnum(tty)))))
dgrp_dpa_data(nd, 1, myflipbuf, len);
- tty_insert_flip_string_flags(tty, myflipbuf,
+ tty_insert_flip_string_flags(&ch->port, myflipbuf,
myflipflagbuf, len);
- tty_flip_buffer_push(tty);
+ tty_flip_buffer_push(&ch->port);
ch->ch_rxcount += len;
}
@@ -2956,9 +2957,9 @@ check_query:
I_BRKINT(ch->ch_tun.un_tty) &&
!(I_IGNBRK(ch->ch_tun.un_tty))) {
- tty_buffer_request_room(ch->ch_tun.un_tty, 1);
- tty_insert_flip_char(ch->ch_tun.un_tty, 0, TTY_BREAK);
- tty_flip_buffer_push(ch->ch_tun.un_tty);
+ tty_buffer_request_room(&ch->port, 1);
+ tty_insert_flip_char(&ch->port, 0, TTY_BREAK);
+ tty_flip_buffer_push(&ch->port);
}
diff --git a/drivers/staging/dgrp/dgrp_specproc.c b/drivers/staging/dgrp/dgrp_specproc.c
index 13c7ccf163c5..73f287f96604 100644
--- a/drivers/staging/dgrp/dgrp_specproc.c
+++ b/drivers/staging/dgrp/dgrp_specproc.c
@@ -357,7 +357,7 @@ static int dgrp_gen_proc_open(struct inode *inode, struct file *file)
struct dgrp_proc_entry *entry;
int ret = 0;
- de = (struct proc_dir_entry *) PDE(file->f_dentry->d_inode);
+ de = (struct proc_dir_entry *) PDE(file_inode(file));
if (!de || !de->data) {
ret = -ENXIO;
goto done;
@@ -387,7 +387,7 @@ static int dgrp_gen_proc_close(struct inode *inode, struct file *file)
struct proc_dir_entry *de;
struct dgrp_proc_entry *entry;
- de = (struct proc_dir_entry *) PDE(file->f_dentry->d_inode);
+ de = (struct proc_dir_entry *) PDE(file_inode(file));
if (!de || !de->data)
goto done;
diff --git a/drivers/staging/dgrp/dgrp_tty.c b/drivers/staging/dgrp/dgrp_tty.c
index 51d3ed3dca27..654f6010b473 100644
--- a/drivers/staging/dgrp/dgrp_tty.c
+++ b/drivers/staging/dgrp/dgrp_tty.c
@@ -39,6 +39,7 @@
#include <linux/slab.h>
#include <linux/tty.h>
#include <linux/tty_flip.h>
+#include <linux/device.h>
#include <linux/sched.h>
#include <linux/uaccess.h>
diff --git a/drivers/staging/fwserial/Kconfig b/drivers/staging/fwserial/Kconfig
index b2f8331e4acf..a0812d99136f 100644
--- a/drivers/staging/fwserial/Kconfig
+++ b/drivers/staging/fwserial/Kconfig
@@ -1,6 +1,6 @@
config FIREWIRE_SERIAL
tristate "TTY over Firewire"
- depends on FIREWIRE
+ depends on FIREWIRE && TTY
help
This enables TTY over IEEE 1394, providing high-speed serial
connectivity to cabled peers. This driver implements a
diff --git a/drivers/staging/fwserial/fwserial.c b/drivers/staging/fwserial/fwserial.c
index 8859c75f16fa..5a6fb44f38a8 100644
--- a/drivers/staging/fwserial/fwserial.c
+++ b/drivers/staging/fwserial/fwserial.c
@@ -500,16 +500,11 @@ static void fwtty_do_hangup(struct work_struct *work)
static void fwtty_emit_breaks(struct work_struct *work)
{
struct fwtty_port *port = to_port(to_delayed_work(work), emit_breaks);
- struct tty_struct *tty;
static const char buf[16];
unsigned long now = jiffies;
unsigned long elapsed = now - port->break_last;
int n, t, c, brk = 0;
- tty = tty_port_tty_get(&port->port);
- if (!tty)
- return;
-
/* generate breaks at the line rate (but at least 1) */
n = (elapsed * port->cps) / HZ + 1;
port->break_last = now;
@@ -518,15 +513,14 @@ static void fwtty_emit_breaks(struct work_struct *work)
while (n) {
t = min(n, 16);
- c = tty_insert_flip_string_fixed_flag(tty, buf, TTY_BREAK, t);
+ c = tty_insert_flip_string_fixed_flag(&port->port, buf,
+ TTY_BREAK, t);
n -= c;
brk += c;
if (c < t)
break;
}
- tty_flip_buffer_push(tty);
-
- tty_kref_put(tty);
+ tty_flip_buffer_push(&port->port);
if (port->mstatus & (UART_LSR_BI << 24))
schedule_delayed_work(&port->emit_breaks, FREQ_BREAKS);
@@ -540,13 +534,9 @@ static void fwtty_pushrx(struct work_struct *work)
struct buffered_rx *buf, *next;
int n, c = 0;
- tty = tty_port_tty_get(&port->port);
- if (!tty)
- return;
-
spin_lock_bh(&port->lock);
list_for_each_entry_safe(buf, next, &port->buf_list, list) {
- n = tty_insert_flip_string_fixed_flag(tty, buf->data,
+ n = tty_insert_flip_string_fixed_flag(&port->port, buf->data,
TTY_NORMAL, buf->n);
c += n;
port->buffered -= n;
@@ -555,7 +545,11 @@ static void fwtty_pushrx(struct work_struct *work)
memmove(buf->data, buf->data + n, buf->n - n);
buf->n -= n;
}
- __fwtty_throttle(port, tty);
+ tty = tty_port_tty_get(&port->port);
+ if (tty) {
+ __fwtty_throttle(port, tty);
+ tty_kref_put(tty);
+ }
break;
} else {
list_del(&buf->list);
@@ -563,13 +557,11 @@ static void fwtty_pushrx(struct work_struct *work)
}
}
if (c > 0)
- tty_flip_buffer_push(tty);
+ tty_flip_buffer_push(&port->port);
if (list_empty(&port->buf_list))
clear_bit(BUFFERING_RX, &port->flags);
spin_unlock_bh(&port->lock);
-
- tty_kref_put(tty);
}
static int fwtty_buffer_rx(struct fwtty_port *port, unsigned char *d, size_t n)
@@ -607,10 +599,6 @@ static int fwtty_rx(struct fwtty_port *port, unsigned char *data, size_t len)
unsigned lsr;
int err = 0;
- tty = tty_port_tty_get(&port->port);
- if (!tty)
- return -ENOENT;
-
fwtty_dbg(port, "%d", n);
profile_size_distrib(port->stats.reads, n);
@@ -630,7 +618,7 @@ static int fwtty_rx(struct fwtty_port *port, unsigned char *data, size_t len)
lsr &= port->status_mask;
if (lsr & ~port->ignore_mask & UART_LSR_OE) {
- if (!tty_insert_flip_char(tty, 0, TTY_OVERRUN)) {
+ if (!tty_insert_flip_char(&port->port, 0, TTY_OVERRUN)) {
err = -EIO;
goto out;
}
@@ -644,18 +632,23 @@ static int fwtty_rx(struct fwtty_port *port, unsigned char *data, size_t len)
}
if (!test_bit(BUFFERING_RX, &port->flags)) {
- c = tty_insert_flip_string_fixed_flag(tty, data, TTY_NORMAL, n);
+ c = tty_insert_flip_string_fixed_flag(&port->port, data,
+ TTY_NORMAL, n);
if (c > 0)
- tty_flip_buffer_push(tty);
+ tty_flip_buffer_push(&port->port);
n -= c;
if (n) {
/* start buffering and throttling */
n -= fwtty_buffer_rx(port, &data[c], n);
- spin_lock_bh(&port->lock);
- __fwtty_throttle(port, tty);
- spin_unlock_bh(&port->lock);
+ tty = tty_port_tty_get(&port->port);
+ if (tty) {
+ spin_lock_bh(&port->lock);
+ __fwtty_throttle(port, tty);
+ spin_unlock_bh(&port->lock);
+ tty_kref_put(tty);
+ }
}
} else
n -= fwtty_buffer_rx(port, data, n);
@@ -666,8 +659,6 @@ static int fwtty_rx(struct fwtty_port *port, unsigned char *data, size_t len)
}
out:
- tty_kref_put(tty);
-
port->icount.rx += len;
port->stats.lost += n;
return err;
diff --git a/drivers/staging/media/Kconfig b/drivers/staging/media/Kconfig
index 427218b8b10f..ae0abc350e34 100644
--- a/drivers/staging/media/Kconfig
+++ b/drivers/staging/media/Kconfig
@@ -23,6 +23,8 @@ source "drivers/staging/media/as102/Kconfig"
source "drivers/staging/media/cxd2099/Kconfig"
+source "drivers/staging/media/davinci_vpfe/Kconfig"
+
source "drivers/staging/media/dt3155v4l/Kconfig"
source "drivers/staging/media/go7007/Kconfig"
diff --git a/drivers/staging/media/Makefile b/drivers/staging/media/Makefile
index aec6eb963940..2b97cae99499 100644
--- a/drivers/staging/media/Makefile
+++ b/drivers/staging/media/Makefile
@@ -4,3 +4,4 @@ obj-$(CONFIG_LIRC_STAGING) += lirc/
obj-$(CONFIG_SOLO6X10) += solo6x10/
obj-$(CONFIG_VIDEO_DT3155) += dt3155v4l/
obj-$(CONFIG_VIDEO_GO7007) += go7007/
+obj-$(CONFIG_VIDEO_DM365_VPFE) += davinci_vpfe/
diff --git a/drivers/staging/media/as102/as102_usb_drv.c b/drivers/staging/media/as102/as102_usb_drv.c
index aaf1bc2ad1b2..9f275f020150 100644
--- a/drivers/staging/media/as102/as102_usb_drv.c
+++ b/drivers/staging/media/as102/as102_usb_drv.c
@@ -374,10 +374,8 @@ static int as102_usb_probe(struct usb_interface *intf,
}
as102_dev = kzalloc(sizeof(struct as102_dev_t), GFP_KERNEL);
- if (as102_dev == NULL) {
- dev_err(&intf->dev, "%s: kzalloc failed\n", __func__);
+ if (as102_dev == NULL)
return -ENOMEM;
- }
/* Assign the user-friendly device name */
for (i = 0; i < ARRAY_SIZE(as102_usb_id_table); i++) {
diff --git a/drivers/staging/media/as102/as10x_cmd_cfg.c b/drivers/staging/media/as102/as10x_cmd_cfg.c
index d2a4bce89623..4a2bbd766655 100644
--- a/drivers/staging/media/as102/as10x_cmd_cfg.c
+++ b/drivers/staging/media/as102/as10x_cmd_cfg.c
@@ -197,7 +197,7 @@ out:
* @prsp: pointer to AS10x command response buffer
* @proc_id: id of the command
*
- * Since the contex command reponse does not follow the common
+ * Since the contex command response does not follow the common
* response, a specific parse function is required.
* Return 0 on success or negative value in case of error.
*/
diff --git a/drivers/staging/media/cxd2099/cxd2099.c b/drivers/staging/media/cxd2099/cxd2099.c
index 0ff19724992f..822c487592a4 100644
--- a/drivers/staging/media/cxd2099/cxd2099.c
+++ b/drivers/staging/media/cxd2099/cxd2099.c
@@ -66,8 +66,9 @@ static int i2c_write_reg(struct i2c_adapter *adapter, u8 adr,
struct i2c_msg msg = {.addr = adr, .flags = 0, .buf = m, .len = 2};
if (i2c_transfer(adapter, &msg, 1) != 1) {
- printk(KERN_ERR "Failed to write to I2C register %02x@%02x!\n",
- reg, adr);
+ dev_err(&adapter->dev,
+ "Failed to write to I2C register %02x@%02x!\n",
+ reg, adr);
return -1;
}
return 0;
@@ -79,7 +80,7 @@ static int i2c_write(struct i2c_adapter *adapter, u8 adr,
struct i2c_msg msg = {.addr = adr, .flags = 0, .buf = data, .len = len};
if (i2c_transfer(adapter, &msg, 1) != 1) {
- printk(KERN_ERR "Failed to write to I2C!\n");
+ dev_err(&adapter->dev, "Failed to write to I2C!\n");
return -1;
}
return 0;
@@ -94,7 +95,7 @@ static int i2c_read_reg(struct i2c_adapter *adapter, u8 adr,
.buf = val, .len = 1} };
if (i2c_transfer(adapter, msgs, 2) != 2) {
- printk(KERN_ERR "error in i2c_read_reg\n");
+ dev_err(&adapter->dev, "error in i2c_read_reg\n");
return -1;
}
return 0;
@@ -109,7 +110,7 @@ static int i2c_read(struct i2c_adapter *adapter, u8 adr,
.buf = data, .len = n} };
if (i2c_transfer(adapter, msgs, 2) != 2) {
- printk(KERN_ERR "error in i2c_read\n");
+ dev_err(&adapter->dev, "error in i2c_read\n");
return -1;
}
return 0;
@@ -277,7 +278,7 @@ static void cam_mode(struct cxd *ci, int mode)
#ifdef BUFFER_MODE
if (!ci->en.read_data)
return;
- printk(KERN_INFO "enable cam buffer mode\n");
+ dev_info(&ci->i2c->dev, "enable cam buffer mode\n");
/* write_reg(ci, 0x0d, 0x00); */
/* write_reg(ci, 0x0e, 0x01); */
write_regm(ci, 0x08, 0x40, 0x40);
@@ -524,7 +525,7 @@ static int slot_reset(struct dvb_ca_en50221 *ca, int slot)
msleep(10);
#if 0
read_reg(ci, 0x06, &val);
- printk(KERN_INFO "%d:%02x\n", i, val);
+ dev_info(&ci->i2c->dev, "%d:%02x\n", i, val);
if (!(val&0x10))
break;
#else
@@ -542,7 +543,7 @@ static int slot_shutdown(struct dvb_ca_en50221 *ca, int slot)
{
struct cxd *ci = ca->data;
- printk(KERN_INFO "slot_shutdown\n");
+ dev_info(&ci->i2c->dev, "slot_shutdown\n");
mutex_lock(&ci->lock);
write_regm(ci, 0x09, 0x08, 0x08);
write_regm(ci, 0x20, 0x80, 0x80); /* Reset CAM Mode */
@@ -578,10 +579,10 @@ static int campoll(struct cxd *ci)
if (istat&0x40) {
ci->dr = 1;
- printk(KERN_INFO "DR\n");
+ dev_info(&ci->i2c->dev, "DR\n");
}
if (istat&0x20)
- printk(KERN_INFO "WC\n");
+ dev_info(&ci->i2c->dev, "WC\n");
if (istat&2) {
u8 slotstat;
@@ -597,7 +598,7 @@ static int campoll(struct cxd *ci)
if (ci->slot_stat) {
ci->slot_stat = 0;
write_regm(ci, 0x03, 0x00, 0x08);
- printk(KERN_INFO "NO CAM\n");
+ dev_info(&ci->i2c->dev, "NO CAM\n");
ci->ready = 0;
}
}
@@ -634,7 +635,7 @@ static int read_data(struct dvb_ca_en50221 *ca, int slot, u8 *ebuf, int ecount)
campoll(ci);
mutex_unlock(&ci->lock);
- printk(KERN_INFO "read_data\n");
+ dev_info(&ci->i2c->dev, "read_data\n");
if (!ci->dr)
return 0;
@@ -687,7 +688,7 @@ struct dvb_ca_en50221 *cxd2099_attach(struct cxd2099_cfg *cfg,
u8 val;
if (i2c_read_reg(i2c, cfg->adr, 0, &val) < 0) {
- printk(KERN_INFO "No CXD2099 detected at %02x\n", cfg->adr);
+ dev_info(&i2c->dev, "No CXD2099 detected at %02x\n", cfg->adr);
return NULL;
}
@@ -705,7 +706,7 @@ struct dvb_ca_en50221 *cxd2099_attach(struct cxd2099_cfg *cfg,
ci->en = en_templ;
ci->en.data = ci;
init(ci);
- printk(KERN_INFO "Attached CXD2099AR at %02x\n", ci->cfg.adr);
+ dev_info(&i2c->dev, "Attached CXD2099AR at %02x\n", ci->cfg.adr);
return &ci->en;
}
EXPORT_SYMBOL(cxd2099_attach);
diff --git a/drivers/staging/media/cxd2099/cxd2099.h b/drivers/staging/media/cxd2099/cxd2099.h
index 19c588a59588..0eb607c5b423 100644
--- a/drivers/staging/media/cxd2099/cxd2099.h
+++ b/drivers/staging/media/cxd2099/cxd2099.h
@@ -43,7 +43,7 @@ struct dvb_ca_en50221 *cxd2099_attach(struct cxd2099_cfg *cfg,
static inline struct dvb_ca_en50221 *cxd2099_attach(struct cxd2099_cfg *cfg,
void *priv, struct i2c_adapter *i2c)
{
- printk(KERN_WARNING "%s: driver disabled by Kconfig\n", __func__);
+ dev_warn(&i2c->dev, "%s: driver disabled by Kconfig\n", __func__);
return NULL;
}
#endif
diff --git a/drivers/staging/media/davinci_vpfe/Kconfig b/drivers/staging/media/davinci_vpfe/Kconfig
new file mode 100644
index 000000000000..2e4a28b018e8
--- /dev/null
+++ b/drivers/staging/media/davinci_vpfe/Kconfig
@@ -0,0 +1,9 @@
+config VIDEO_DM365_VPFE
+ tristate "DM365 VPFE Media Controller Capture Driver"
+ depends on VIDEO_V4L2 && ARCH_DAVINCI_DM365 && !VIDEO_VPFE_CAPTURE
+ select VIDEOBUF2_DMA_CONTIG
+ help
+ Support for DM365 VPFE based Media Controller Capture driver.
+
+ To compile this driver as a module, choose M here: the
+ module will be called vpfe-mc-capture.
diff --git a/drivers/staging/media/davinci_vpfe/Makefile b/drivers/staging/media/davinci_vpfe/Makefile
new file mode 100644
index 000000000000..c64515c644cd
--- /dev/null
+++ b/drivers/staging/media/davinci_vpfe/Makefile
@@ -0,0 +1,3 @@
+obj-$(CONFIG_VIDEO_DM365_VPFE) += \
+ dm365_isif.o dm365_ipipe_hw.o dm365_ipipe.o \
+ dm365_resizer.o dm365_ipipeif.o vpfe_mc_capture.o vpfe_video.o
diff --git a/drivers/staging/media/davinci_vpfe/TODO b/drivers/staging/media/davinci_vpfe/TODO
new file mode 100644
index 000000000000..7015ab35ded5
--- /dev/null
+++ b/drivers/staging/media/davinci_vpfe/TODO
@@ -0,0 +1,37 @@
+TODO (general):
+==================================
+
+- User space interface refinement
+ - Controls should be used when possible rather than private ioctl
+ - No enums should be used
+ - Use of MC and V4L2 subdev APIs when applicable
+ - Single interface header might suffice
+ - Current interface forces to configure everything at once
+- Get rid of the dm365_ipipe_hw.[ch] layer
+- Active external sub-devices defined by link configuration; no strcmp
+ needed
+- More generic platform data (i2c adapters)
+- The driver should have no knowledge of possible external subdevs; see
+ struct vpfe_subdev_id
+- Some of the hardware control should be refactorede
+- Check proper serialisation (through mutexes and spinlocks)
+- Names that are visible in kernel global namespace should have a common
+ prefix (or a few)
+- While replacing the older driver in media folder, provide a compatibility
+ layer and compatibility tests that warrants (using the libv4l's LD_PRELOAD
+ approach) there is no regression for the users using the older driver.
+
+Building of uImage and Applications:
+==================================
+
+As of now since the interface will undergo few changes all the include
+files are present in staging itself, to build for dm365 follow below steps,
+
+- copy vpfe.h from drivers/staging/media/davinci_vpfe/ to
+ include/media/davinci/ folder for building the uImage.
+- copy davinci_vpfe_user.h from drivers/staging/media/davinci_vpfe/ to
+ include/uapi/linux/davinci_vpfe.h, and add a entry in Kbuild (required
+ for building application).
+- copy dm365_ipipeif_user.h from drivers/staging/media/davinci_vpfe/ to
+ include/uapi/linux/dm365_ipipeif.h and a entry in Kbuild (required
+ for building application).
diff --git a/drivers/staging/media/davinci_vpfe/davinci-vpfe-mc.txt b/drivers/staging/media/davinci_vpfe/davinci-vpfe-mc.txt
new file mode 100644
index 000000000000..1dbd56418704
--- /dev/null
+++ b/drivers/staging/media/davinci_vpfe/davinci-vpfe-mc.txt
@@ -0,0 +1,154 @@
+Davinci Video processing Front End (VPFE) driver
+
+Copyright (C) 2012 Texas Instruments Inc
+
+Contacts: Manjunath Hadli <manjunath.hadli@ti.com>
+ Prabhakar Lad <prabhakar.lad@ti.com>
+
+
+Introduction
+============
+
+This file documents the Texas Instruments Davinci Video processing Front End
+(VPFE) driver located under drivers/media/platform/davinci. The original driver
+exists for Davinci VPFE, which is now being changed to Media Controller
+Framework.
+
+Currently the driver has been successfully used on the following
+version of Davinci:
+
+ DM365/DM368
+
+The driver implements V4L2, Media controller and v4l2_subdev interfaces. Sensor,
+lens and flash drivers using the v4l2_subdev interface in the kernel are
+supported.
+
+
+Split to subdevs
+================
+
+The Davinci VPFE is split into V4L2 subdevs, each of the blocks inside the VPFE
+having one subdev to represent it. Each of the subdevs provide a V4L2 subdev
+interface to userspace.
+
+ DAVINCI ISIF
+ DAVINCI IPIPEIF
+ DAVINCI IPIPE
+ DAVINCI CROP RESIZER
+ DAVINCI RESIZER A
+ DAVINCI RESIZER B
+
+Each possible link in the VPFE is modeled by a link in the Media controller
+interface. For an example program see [1].
+
+
+ISIF, IPIPE, and RESIZER block IOCTLs
+======================================
+
+The Davinci Video processing Front End (VPFE) driver supports standard V4L2
+IOCTLs and controls where possible and practical. Much of the functions provided
+by the VPFE, however, does not fall under the standard IOCTL's.
+
+In general, there is a private ioctl for configuring each of the blocks
+containing hardware-dependent functions.
+
+The following private IOCTLs are supported:
+
+ VIDIOC_VPFE_ISIF_[S/G]_RAW_PARAMS
+ VIDIOC_VPFE_IPIPE_[S/G]_CONFIG
+ VIDIOC_VPFE_RSZ_[S/G]_CONFIG
+
+The parameter structures used by these ioctl's are described in
+include/uapi/linux/davinci_vpfe.h.
+
+The VIDIOC_VPFE_ISIF_S_RAW_PARAMS, VIDIOC_VPFE_IPIPE_S_CONFIG and
+VIDIOC_VPFE_RSZ_S_CONFIG are used to configure, enable and disable functions in
+the isif, ipipe and resizer blocks respectively. These IOCTL's control several
+functions in the blocks they control. VIDIOC_VPFE_ISIF_S_RAW_PARAMS IOCTL
+accepts a pointer to struct vpfe_isif_raw_config as its argument. Similarly
+VIDIOC_VPFE_IPIPE_S_CONFIG accepts a pointer to struct vpfe_ipipe_config. And
+VIDIOC_VPFE_RSZ_S_CONFIG accepts a pointer to struct vpfe_rsz_config as its
+argument. Similarly VIDIOC_VPFE_ISIF_G_RAW_PARAMS, VIDIOC_VPFE_IPIPE_G_CONFIG
+and VIDIOC_VPFE_RSZ_G_CONFIG are used to get the current configuration set in
+the isif, ipipe and resizer blocks respectively.
+
+The detailed functions of the VPFE itself related to a given VPFE block is
+described in the Technical Reference Manuals (TRMs) --- see the end of the
+document for those.
+
+
+IPIPEIF block IOCTLs
+======================================
+
+The following private IOCTLs are supported:
+
+ VIDIOC_VPFE_IPIPEIF_[S/G]_CONFIG
+
+The parameter structures used by these ioctl's are described in
+include/uapi/linux/dm365_ipipeif.h
+
+The VIDIOC_VPFE_IPIPEIF_S_CONFIG is used to configure the ipipeif
+hardware block. The VIDIOC_VPFE_IPIPEIF_S_CONFIG and
+VIDIOC_VPFE_IPIPEIF_G_CONFIG accepts a pointer to struct ipipeif_params
+as its argument.
+
+
+VPFE Operating Modes
+==========================================
+
+a: Continuous Modes
+------------------------
+
+1: tvp514x/tvp7002/mt9p031---> DAVINCI ISIF---> SDRAM
+
+2: tvp514x/tvp7002/mt9p031---> DAVINCI ISIF---> DAVINCI IPIPEIF--->|
+ |
+ <--------------------<----------------<---------------------<---|
+ |
+ V
+ DAVINCI CROP RESIZER--->DAVINCI RESIZER [A/B]---> SDRAM
+
+3: tvp514x/tvp7002/mt9p031---> DAVINCI ISIF---> DAVINCI IPIPEIF--->|
+ |
+ <--------------------<----------------<---------------------<---|
+ |
+ V
+ DAVINCI IPIPE---> DAVINCI CROP RESIZER--->DAVINCI RESIZER [A/B]---> SDRAM
+
+a: Single Shot Modes
+------------------------
+
+1: SDRAM---> DAVINCI IPIPEIF---> DAVINCI IPIPE---> DAVINCI CROP RESIZER--->|
+ |
+ <----------------<----------------<------------------<---------------<--|
+ |
+ V
+DAVINCI RESIZER [A/B]---> SDRAM
+
+2: SDRAM---> DAVINCI IPIPEIF---> DAVINCI CROP RESIZER--->|
+ |
+ <----------------<----------------<---------------<---|
+ |
+ V
+DAVINCI RESIZER [A/B]---> SDRAM
+
+
+Technical reference manuals (TRMs) and other documentation
+==========================================================
+
+Davinci DM365 TRM:
+<URL:http://www.ti.com/lit/ds/sprs457e/sprs457e.pdf>
+Referenced MARCH 2009-REVISED JUNE 2011
+
+Davinci DM368 TRM:
+<URL:http://www.ti.com/lit/ds/sprs668c/sprs668c.pdf>
+Referenced APRIL 2010-REVISED JUNE 2011
+
+Davinci Video Processing Front End (VPFE) DM36x
+<URL:http://www.ti.com/lit/ug/sprufg8c/sprufg8c.pdf>
+
+
+References
+==========
+
+[1] http://git.ideasonboard.org/?p=media-ctl.git;a=summary
diff --git a/drivers/staging/media/davinci_vpfe/davinci_vpfe_user.h b/drivers/staging/media/davinci_vpfe/davinci_vpfe_user.h
new file mode 100644
index 000000000000..7b7e7b26c1e8
--- /dev/null
+++ b/drivers/staging/media/davinci_vpfe/davinci_vpfe_user.h
@@ -0,0 +1,1290 @@
+/*
+ * Copyright (C) 2012 Texas Instruments Inc
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation version 2.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ *
+ * Contributors:
+ * Manjunath Hadli <manjunath.hadli@ti.com>
+ * Prabhakar Lad <prabhakar.lad@ti.com>
+ */
+
+#ifndef _DAVINCI_VPFE_USER_H
+#define _DAVINCI_VPFE_USER_H
+
+#include <linux/types.h>
+#include <linux/videodev2.h>
+
+/*
+ * Private IOCTL
+ *
+ * VIDIOC_VPFE_ISIF_S_RAW_PARAMS: Set raw params in isif
+ * VIDIOC_VPFE_ISIF_G_RAW_PARAMS: Get raw params from isif
+ * VIDIOC_VPFE_PRV_S_CONFIG: Set ipipe engine configuration
+ * VIDIOC_VPFE_PRV_G_CONFIG: Get ipipe engine configuration
+ * VIDIOC_VPFE_RSZ_S_CONFIG: Set resizer engine configuration
+ * VIDIOC_VPFE_RSZ_G_CONFIG: Get resizer engine configuration
+ */
+
+#define VIDIOC_VPFE_ISIF_S_RAW_PARAMS \
+ _IOW('V', BASE_VIDIOC_PRIVATE + 1, struct vpfe_isif_raw_config)
+#define VIDIOC_VPFE_ISIF_G_RAW_PARAMS \
+ _IOR('V', BASE_VIDIOC_PRIVATE + 2, struct vpfe_isif_raw_config)
+#define VIDIOC_VPFE_IPIPE_S_CONFIG \
+ _IOWR('P', BASE_VIDIOC_PRIVATE + 3, struct vpfe_ipipe_config)
+#define VIDIOC_VPFE_IPIPE_G_CONFIG \
+ _IOWR('P', BASE_VIDIOC_PRIVATE + 4, struct vpfe_ipipe_config)
+#define VIDIOC_VPFE_RSZ_S_CONFIG \
+ _IOWR('R', BASE_VIDIOC_PRIVATE + 5, struct vpfe_rsz_config)
+#define VIDIOC_VPFE_RSZ_G_CONFIG \
+ _IOWR('R', BASE_VIDIOC_PRIVATE + 6, struct vpfe_rsz_config)
+
+/*
+ * Private Control's for ISIF
+ */
+#define VPFE_ISIF_CID_CRGAIN (V4L2_CID_USER_BASE | 0xa001)
+#define VPFE_ISIF_CID_CGRGAIN (V4L2_CID_USER_BASE | 0xa002)
+#define VPFE_ISIF_CID_CGBGAIN (V4L2_CID_USER_BASE | 0xa003)
+#define VPFE_ISIF_CID_CBGAIN (V4L2_CID_USER_BASE | 0xa004)
+#define VPFE_ISIF_CID_GAIN_OFFSET (V4L2_CID_USER_BASE | 0xa005)
+
+/*
+ * Private Control's for ISIF and IPIPEIF
+ */
+#define VPFE_CID_DPCM_PREDICTOR (V4L2_CID_USER_BASE | 0xa006)
+
+/************************************************************************
+ * Vertical Defect Correction parameters
+ ***********************************************************************/
+
+/**
+ * vertical defect correction methods
+ */
+enum vpfe_isif_vdfc_corr_mode {
+ /* Defect level subtraction. Just fed through if saturating */
+ VPFE_ISIF_VDFC_NORMAL,
+ /**
+ * Defect level subtraction. Horizontal interpolation ((i-2)+(i+2))/2
+ * if data saturating
+ */
+ VPFE_ISIF_VDFC_HORZ_INTERPOL_IF_SAT,
+ /* Horizontal interpolation (((i-2)+(i+2))/2) */
+ VPFE_ISIF_VDFC_HORZ_INTERPOL
+};
+
+/**
+ * Max Size of the Vertical Defect Correction table
+ */
+#define VPFE_ISIF_VDFC_TABLE_SIZE 8
+
+/**
+ * Values used for shifting up the vdfc defect level
+ */
+enum vpfe_isif_vdfc_shift {
+ /* No Shift */
+ VPFE_ISIF_VDFC_NO_SHIFT,
+ /* Shift by 1 bit */
+ VPFE_ISIF_VDFC_SHIFT_1,
+ /* Shift by 2 bit */
+ VPFE_ISIF_VDFC_SHIFT_2,
+ /* Shift by 3 bit */
+ VPFE_ISIF_VDFC_SHIFT_3,
+ /* Shift by 4 bit */
+ VPFE_ISIF_VDFC_SHIFT_4
+};
+
+/**
+ * Defect Correction (DFC) table entry
+ */
+struct vpfe_isif_vdfc_entry {
+ /* vertical position of defect */
+ unsigned short pos_vert;
+ /* horizontal position of defect */
+ unsigned short pos_horz;
+ /**
+ * Defect level of Vertical line defect position. This is subtracted
+ * from the data at the defect position
+ */
+ unsigned char level_at_pos;
+ /**
+ * Defect level of the pixels upper than the vertical line defect.
+ * This is subtracted from the data
+ */
+ unsigned char level_up_pixels;
+ /**
+ * Defect level of the pixels lower than the vertical line defect.
+ * This is subtracted from the data
+ */
+ unsigned char level_low_pixels;
+};
+
+/**
+ * Structure for Defect Correction (DFC) parameter
+ */
+struct vpfe_isif_dfc {
+ /* enable vertical defect correction */
+ unsigned char en;
+ /* Correction methods */
+ enum vpfe_isif_vdfc_corr_mode corr_mode;
+ /**
+ * 0 - whole line corrected, 1 - not
+ * pixels upper than the defect
+ */
+ unsigned char corr_whole_line;
+ /**
+ * defect level shift value. level_at_pos, level_upper_pos,
+ * and level_lower_pos can be shifted up by this value
+ */
+ enum vpfe_isif_vdfc_shift def_level_shift;
+ /* defect saturation level */
+ unsigned short def_sat_level;
+ /* number of vertical defects. Max is VPFE_ISIF_VDFC_TABLE_SIZE */
+ short num_vdefects;
+ /* VDFC table ptr */
+ struct vpfe_isif_vdfc_entry table[VPFE_ISIF_VDFC_TABLE_SIZE];
+};
+
+/************************************************************************
+* Digital/Black clamp or DC Subtract parameters
+************************************************************************/
+/**
+ * Horizontal Black Clamp modes
+ */
+enum vpfe_isif_horz_bc_mode {
+ /**
+ * Horizontal clamp disabled. Only vertical clamp
+ * value is subtracted
+ */
+ VPFE_ISIF_HORZ_BC_DISABLE,
+ /**
+ * Horizontal clamp value is calculated and subtracted
+ * from image data along with vertical clamp value
+ */
+ VPFE_ISIF_HORZ_BC_CLAMP_CALC_ENABLED,
+ /**
+ * Horizontal clamp value calculated from previous image
+ * is subtracted from image data along with vertical clamp
+ * value. How the horizontal clamp value for the first image
+ * is calculated in this case ???
+ */
+ VPFE_ISIF_HORZ_BC_CLAMP_NOT_UPDATED
+};
+
+/**
+ * Base window selection for Horizontal Black Clamp calculations
+ */
+enum vpfe_isif_horz_bc_base_win_sel {
+ /* Select Most left window for bc calculation */
+ VPFE_ISIF_SEL_MOST_LEFT_WIN,
+
+ /* Select Most right window for bc calculation */
+ VPFE_ISIF_SEL_MOST_RIGHT_WIN,
+};
+
+/* Size of window in horizontal direction for horizontal bc */
+enum vpfe_isif_horz_bc_sz_h {
+ VPFE_ISIF_HORZ_BC_SZ_H_2PIXELS,
+ VPFE_ISIF_HORZ_BC_SZ_H_4PIXELS,
+ VPFE_ISIF_HORZ_BC_SZ_H_8PIXELS,
+ VPFE_ISIF_HORZ_BC_SZ_H_16PIXELS
+};
+
+/* Size of window in vertcal direction for vertical bc */
+enum vpfe_isif_horz_bc_sz_v {
+ VPFE_ISIF_HORZ_BC_SZ_H_32PIXELS,
+ VPFE_ISIF_HORZ_BC_SZ_H_64PIXELS,
+ VPFE_ISIF_HORZ_BC_SZ_H_128PIXELS,
+ VPFE_ISIF_HORZ_BC_SZ_H_256PIXELS
+};
+
+/**
+ * Structure for Horizontal Black Clamp config params
+ */
+struct vpfe_isif_horz_bclamp {
+ /* horizontal clamp mode */
+ enum vpfe_isif_horz_bc_mode mode;
+ /**
+ * pixel value limit enable.
+ * 0 - limit disabled
+ * 1 - pixel value limited to 1023
+ */
+ unsigned char clamp_pix_limit;
+ /**
+ * Select most left or right window for clamp val
+ * calculation
+ */
+ enum vpfe_isif_horz_bc_base_win_sel base_win_sel_calc;
+ /* Window count per color for calculation. range 1-32 */
+ unsigned char win_count_calc;
+ /* Window start position - horizontal for calculation. 0 - 8191 */
+ unsigned short win_start_h_calc;
+ /* Window start position - vertical for calculation 0 - 8191 */
+ unsigned short win_start_v_calc;
+ /* Width of the sample window in pixels for calculation */
+ enum vpfe_isif_horz_bc_sz_h win_h_sz_calc;
+ /* Height of the sample window in pixels for calculation */
+ enum vpfe_isif_horz_bc_sz_v win_v_sz_calc;
+};
+
+/**
+ * Black Clamp vertical reset values
+ */
+enum vpfe_isif_vert_bc_reset_val_sel {
+ /* Reset value used is the clamp value calculated */
+ VPFE_ISIF_VERT_BC_USE_HORZ_CLAMP_VAL,
+ /* Reset value used is reset_clamp_val configured */
+ VPFE_ISIF_VERT_BC_USE_CONFIG_CLAMP_VAL,
+ /* No update, previous image value is used */
+ VPFE_ISIF_VERT_BC_NO_UPDATE
+};
+
+enum vpfe_isif_vert_bc_sz_h {
+ VPFE_ISIF_VERT_BC_SZ_H_2PIXELS,
+ VPFE_ISIF_VERT_BC_SZ_H_4PIXELS,
+ VPFE_ISIF_VERT_BC_SZ_H_8PIXELS,
+ VPFE_ISIF_VERT_BC_SZ_H_16PIXELS,
+ VPFE_ISIF_VERT_BC_SZ_H_32PIXELS,
+ VPFE_ISIF_VERT_BC_SZ_H_64PIXELS
+};
+
+/**
+ * Structure for Vertical Black Clamp configuration params
+ */
+struct vpfe_isif_vert_bclamp {
+ /* Reset value selection for vertical clamp calculation */
+ enum vpfe_isif_vert_bc_reset_val_sel reset_val_sel;
+ /* U12 value if reset_sel = ISIF_BC_VERT_USE_CONFIG_CLAMP_VAL */
+ unsigned short reset_clamp_val;
+ /**
+ * U8Q8. Line average coefficient used in vertical clamp
+ * calculation
+ */
+ unsigned char line_ave_coef;
+ /* Width in pixels of the optical black region used for calculation. */
+ enum vpfe_isif_vert_bc_sz_h ob_h_sz_calc;
+ /* Height of the optical black region for calculation */
+ unsigned short ob_v_sz_calc;
+ /* Optical black region start position - horizontal. 0 - 8191 */
+ unsigned short ob_start_h;
+ /* Optical black region start position - vertical 0 - 8191 */
+ unsigned short ob_start_v;
+};
+
+/**
+ * Structure for Black Clamp configuration params
+ */
+struct vpfe_isif_black_clamp {
+ /**
+ * this offset value is added irrespective of the clamp
+ * enable status. S13
+ */
+ unsigned short dc_offset;
+ /**
+ * Enable black/digital clamp value to be subtracted
+ * from the image data
+ */
+ unsigned char en;
+ /**
+ * black clamp mode. same/separate clamp for 4 colors
+ * 0 - disable - same clamp value for all colors
+ * 1 - clamp value calculated separately for all colors
+ */
+ unsigned char bc_mode_color;
+ /* Vertical start position for bc subtraction */
+ unsigned short vert_start_sub;
+ /* Black clamp for horizontal direction */
+ struct vpfe_isif_horz_bclamp horz;
+ /* Black clamp for vertical direction */
+ struct vpfe_isif_vert_bclamp vert;
+};
+
+/*************************************************************************
+** Color Space Conversion (CSC)
+*************************************************************************/
+/**
+ * Number of Coefficient values used for CSC
+ */
+#define VPFE_ISIF_CSC_NUM_COEFF 16
+
+struct float_8_bit {
+ /* 8 bit integer part */
+ __u8 integer;
+ /* 8 bit decimal part */
+ __u8 decimal;
+};
+
+struct float_16_bit {
+ /* 16 bit integer part */
+ __u16 integer;
+ /* 16 bit decimal part */
+ __u16 decimal;
+};
+
+/*************************************************************************
+** Color Space Conversion parameters
+*************************************************************************/
+/**
+ * Structure used for CSC config params
+ */
+struct vpfe_isif_color_space_conv {
+ /* Enable color space conversion */
+ unsigned char en;
+ /**
+ * csc coefficient table. S8Q5, M00 at index 0, M01 at index 1, and
+ * so forth
+ */
+ struct float_8_bit coeff[VPFE_ISIF_CSC_NUM_COEFF];
+};
+
+enum vpfe_isif_datasft {
+ /* No Shift */
+ VPFE_ISIF_NO_SHIFT,
+ /* 1 bit Shift */
+ VPFE_ISIF_1BIT_SHIFT,
+ /* 2 bit Shift */
+ VPFE_ISIF_2BIT_SHIFT,
+ /* 3 bit Shift */
+ VPFE_ISIF_3BIT_SHIFT,
+ /* 4 bit Shift */
+ VPFE_ISIF_4BIT_SHIFT,
+ /* 5 bit Shift */
+ VPFE_ISIF_5BIT_SHIFT,
+ /* 6 bit Shift */
+ VPFE_ISIF_6BIT_SHIFT
+};
+
+#define VPFE_ISIF_LINEAR_TAB_SIZE 192
+/*************************************************************************
+** Linearization parameters
+*************************************************************************/
+/**
+ * Structure for Sensor data linearization
+ */
+struct vpfe_isif_linearize {
+ /* Enable or Disable linearization of data */
+ unsigned char en;
+ /* Shift value applied */
+ enum vpfe_isif_datasft corr_shft;
+ /* scale factor applied U11Q10 */
+ struct float_16_bit scale_fact;
+ /* Size of the linear table */
+ unsigned short table[VPFE_ISIF_LINEAR_TAB_SIZE];
+};
+
+/*************************************************************************
+** ISIF Raw configuration parameters
+*************************************************************************/
+enum vpfe_isif_fmt_mode {
+ VPFE_ISIF_SPLIT,
+ VPFE_ISIF_COMBINE
+};
+
+enum vpfe_isif_lnum {
+ VPFE_ISIF_1LINE,
+ VPFE_ISIF_2LINES,
+ VPFE_ISIF_3LINES,
+ VPFE_ISIF_4LINES
+};
+
+enum vpfe_isif_line {
+ VPFE_ISIF_1STLINE,
+ VPFE_ISIF_2NDLINE,
+ VPFE_ISIF_3RDLINE,
+ VPFE_ISIF_4THLINE
+};
+
+struct vpfe_isif_fmtplen {
+ /**
+ * number of program entries for SET0, range 1 - 16
+ * when fmtmode is ISIF_SPLIT, 1 - 8 when fmtmode is
+ * ISIF_COMBINE
+ */
+ unsigned short plen0;
+ /**
+ * number of program entries for SET1, range 1 - 16
+ * when fmtmode is ISIF_SPLIT, 1 - 8 when fmtmode is
+ * ISIF_COMBINE
+ */
+ unsigned short plen1;
+ /**
+ * number of program entries for SET2, range 1 - 16
+ * when fmtmode is ISIF_SPLIT, 1 - 8 when fmtmode is
+ * ISIF_COMBINE
+ */
+ unsigned short plen2;
+ /**
+ * number of program entries for SET3, range 1 - 16
+ * when fmtmode is ISIF_SPLIT, 1 - 8 when fmtmode is
+ * ISIF_COMBINE
+ */
+ unsigned short plen3;
+};
+
+struct vpfe_isif_fmt_cfg {
+ /* Split or combine or line alternate */
+ enum vpfe_isif_fmt_mode fmtmode;
+ /* enable or disable line alternating mode */
+ unsigned char ln_alter_en;
+ /* Split/combine line number */
+ enum vpfe_isif_lnum lnum;
+ /* Address increment Range 1 - 16 */
+ unsigned int addrinc;
+};
+
+struct vpfe_isif_fmt_addr_ptr {
+ /* Initial address */
+ unsigned int init_addr;
+ /* output line number */
+ enum vpfe_isif_line out_line;
+};
+
+struct vpfe_isif_fmtpgm_ap {
+ /* program address pointer */
+ unsigned char pgm_aptr;
+ /* program address increment or decrement */
+ unsigned char pgmupdt;
+};
+
+struct vpfe_isif_data_formatter {
+ /* Enable/Disable data formatter */
+ unsigned char en;
+ /* data formatter configuration */
+ struct vpfe_isif_fmt_cfg cfg;
+ /* Formatter program entries length */
+ struct vpfe_isif_fmtplen plen;
+ /* first pixel in a line fed to formatter */
+ unsigned short fmtrlen;
+ /* HD interval for output line. Only valid when split line */
+ unsigned short fmthcnt;
+ /* formatter address pointers */
+ struct vpfe_isif_fmt_addr_ptr fmtaddr_ptr[16];
+ /* program enable/disable */
+ unsigned char pgm_en[32];
+ /* program address pointers */
+ struct vpfe_isif_fmtpgm_ap fmtpgm_ap[32];
+};
+
+struct vpfe_isif_df_csc {
+ /* Color Space Conversion configuration, 0 - csc, 1 - df */
+ unsigned int df_or_csc;
+ /* csc configuration valid if df_or_csc is 0 */
+ struct vpfe_isif_color_space_conv csc;
+ /* data formatter configuration valid if df_or_csc is 1 */
+ struct vpfe_isif_data_formatter df;
+ /* start pixel in a line at the input */
+ unsigned int start_pix;
+ /* number of pixels in input line */
+ unsigned int num_pixels;
+ /* start line at the input */
+ unsigned int start_line;
+ /* number of lines at the input */
+ unsigned int num_lines;
+};
+
+struct vpfe_isif_gain_offsets_adj {
+ /* Enable or Disable Gain adjustment for SDRAM data */
+ unsigned char gain_sdram_en;
+ /* Enable or Disable Gain adjustment for IPIPE data */
+ unsigned char gain_ipipe_en;
+ /* Enable or Disable Gain adjustment for H3A data */
+ unsigned char gain_h3a_en;
+ /* Enable or Disable Gain adjustment for SDRAM data */
+ unsigned char offset_sdram_en;
+ /* Enable or Disable Gain adjustment for IPIPE data */
+ unsigned char offset_ipipe_en;
+ /* Enable or Disable Gain adjustment for H3A data */
+ unsigned char offset_h3a_en;
+};
+
+struct vpfe_isif_cul {
+ /* Horizontal Cull pattern for odd lines */
+ unsigned char hcpat_odd;
+ /* Horizontal Cull pattern for even lines */
+ unsigned char hcpat_even;
+ /* Vertical Cull pattern */
+ unsigned char vcpat;
+ /* Enable or disable lpf. Apply when cull is enabled */
+ unsigned char en_lpf;
+};
+
+/* all the stuff in this struct will be provided by userland */
+struct vpfe_isif_raw_config {
+ /* Linearization parameters for image sensor data input */
+ struct vpfe_isif_linearize linearize;
+ /* Data formatter or CSC */
+ struct vpfe_isif_df_csc df_csc;
+ /* Defect Pixel Correction (DFC) confguration */
+ struct vpfe_isif_dfc dfc;
+ /* Black/Digital Clamp configuration */
+ struct vpfe_isif_black_clamp bclamp;
+ /* Gain, offset adjustments */
+ struct vpfe_isif_gain_offsets_adj gain_offset;
+ /* Culling */
+ struct vpfe_isif_cul culling;
+ /* horizontal offset for Gain/LSC/DFC */
+ unsigned short horz_offset;
+ /* vertical offset for Gain/LSC/DFC */
+ unsigned short vert_offset;
+};
+
+/**********************************************************************
+ IPIPE API Structures
+**********************************************************************/
+
+/* IPIPE module configurations */
+
+/* IPIPE input configuration */
+#define VPFE_IPIPE_INPUT_CONFIG (1 << 0)
+/* LUT based Defect Pixel Correction */
+#define VPFE_IPIPE_LUTDPC (1 << 1)
+/* On the fly (OTF) Defect Pixel Correction */
+#define VPFE_IPIPE_OTFDPC (1 << 2)
+/* Noise Filter - 1 */
+#define VPFE_IPIPE_NF1 (1 << 3)
+/* Noise Filter - 2 */
+#define VPFE_IPIPE_NF2 (1 << 4)
+/* White Balance. Also a control ID */
+#define VPFE_IPIPE_WB (1 << 5)
+/* 1st RGB to RBG Blend module */
+#define VPFE_IPIPE_RGB2RGB_1 (1 << 6)
+/* 2nd RGB to RBG Blend module */
+#define VPFE_IPIPE_RGB2RGB_2 (1 << 7)
+/* Gamma Correction */
+#define VPFE_IPIPE_GAMMA (1 << 8)
+/* 3D LUT color conversion */
+#define VPFE_IPIPE_3D_LUT (1 << 9)
+/* RGB to YCbCr module */
+#define VPFE_IPIPE_RGB2YUV (1 << 10)
+/* YUV 422 conversion module */
+#define VPFE_IPIPE_YUV422_CONV (1 << 11)
+/* Edge Enhancement */
+#define VPFE_IPIPE_YEE (1 << 12)
+/* Green Imbalance Correction */
+#define VPFE_IPIPE_GIC (1 << 13)
+/* CFA Interpolation */
+#define VPFE_IPIPE_CFA (1 << 14)
+/* Chroma Artifact Reduction */
+#define VPFE_IPIPE_CAR (1 << 15)
+/* Chroma Gain Suppression */
+#define VPFE_IPIPE_CGS (1 << 16)
+/* Global brightness and contrast control */
+#define VPFE_IPIPE_GBCE (1 << 17)
+
+#define VPFE_IPIPE_MAX_MODULES 18
+
+struct ipipe_float_u16 {
+ unsigned short integer;
+ unsigned short decimal;
+};
+
+struct ipipe_float_s16 {
+ short integer;
+ unsigned short decimal;
+};
+
+struct ipipe_float_u8 {
+ unsigned char integer;
+ unsigned char decimal;
+};
+
+/* Copy method selection for vertical correction
+ * Used when ipipe_dfc_corr_meth is IPIPE_DPC_CTORB_AFTER_HINT
+ */
+enum vpfe_ipipe_dpc_corr_meth {
+ /* replace by black or white dot specified by repl_white */
+ VPFE_IPIPE_DPC_REPL_BY_DOT = 0,
+ /* Copy from left */
+ VPFE_IPIPE_DPC_CL = 1,
+ /* Copy from right */
+ VPFE_IPIPE_DPC_CR = 2,
+ /* Horizontal interpolation */
+ VPFE_IPIPE_DPC_H_INTP = 3,
+ /* Vertical interpolation */
+ VPFE_IPIPE_DPC_V_INTP = 4,
+ /* Copy from top */
+ VPFE_IPIPE_DPC_CT = 5,
+ /* Copy from bottom */
+ VPFE_IPIPE_DPC_CB = 6,
+ /* 2D interpolation */
+ VPFE_IPIPE_DPC_2D_INTP = 7,
+};
+
+struct vpfe_ipipe_lutdpc_entry {
+ /* Horizontal position */
+ unsigned short horz_pos;
+ /* vertical position */
+ unsigned short vert_pos;
+ enum vpfe_ipipe_dpc_corr_meth method;
+};
+
+#define VPFE_IPIPE_MAX_SIZE_DPC 256
+
+/* Structure for configuring DPC module */
+struct vpfe_ipipe_lutdpc {
+ /* 0 - disable, 1 - enable */
+ unsigned char en;
+ /* 0 - replace with black dot, 1 - white dot when correction
+ * method is IPIPE_DFC_REPL_BY_DOT=0,
+ */
+ unsigned char repl_white;
+ /* number of entries in the correction table. Currently only
+ * support up-to 256 entries. infinite mode is not supported
+ */
+ unsigned short dpc_size;
+ struct vpfe_ipipe_lutdpc_entry table[VPFE_IPIPE_MAX_SIZE_DPC];
+};
+
+enum vpfe_ipipe_otfdpc_det_meth {
+ VPFE_IPIPE_DPC_OTF_MIN_MAX,
+ VPFE_IPIPE_DPC_OTF_MIN_MAX2
+};
+
+struct vpfe_ipipe_otfdpc_thr {
+ unsigned short r;
+ unsigned short gr;
+ unsigned short gb;
+ unsigned short b;
+};
+
+enum vpfe_ipipe_otfdpc_alg {
+ VPFE_IPIPE_OTFDPC_2_0,
+ VPFE_IPIPE_OTFDPC_3_0
+};
+
+struct vpfe_ipipe_otfdpc_2_0_cfg {
+ /* defect detection threshold for MIN_MAX2 method (DPC 2.0 alg) */
+ struct vpfe_ipipe_otfdpc_thr det_thr;
+ /* defect correction threshold for MIN_MAX2 method (DPC 2.0 alg) or
+ * maximum value for MIN_MAX method
+ */
+ struct vpfe_ipipe_otfdpc_thr corr_thr;
+};
+
+struct vpfe_ipipe_otfdpc_3_0_cfg {
+ /* DPC3.0 activity adj shf. activity = (max2-min2) >> (6 -shf)
+ */
+ unsigned char act_adj_shf;
+ /* DPC3.0 detection threshold, THR */
+ unsigned short det_thr;
+ /* DPC3.0 detection threshold slope, SLP */
+ unsigned short det_slp;
+ /* DPC3.0 detection threshold min, MIN */
+ unsigned short det_thr_min;
+ /* DPC3.0 detection threshold max, MAX */
+ unsigned short det_thr_max;
+ /* DPC3.0 correction threshold, THR */
+ unsigned short corr_thr;
+ /* DPC3.0 correction threshold slope, SLP */
+ unsigned short corr_slp;
+ /* DPC3.0 correction threshold min, MIN */
+ unsigned short corr_thr_min;
+ /* DPC3.0 correction threshold max, MAX */
+ unsigned short corr_thr_max;
+};
+
+struct vpfe_ipipe_otfdpc {
+ /* 0 - disable, 1 - enable */
+ unsigned char en;
+ /* defect detection method */
+ enum vpfe_ipipe_otfdpc_det_meth det_method;
+ /* Algorithm used. Applicable only when IPIPE_DPC_OTF_MIN_MAX2 is
+ * used
+ */
+ enum vpfe_ipipe_otfdpc_alg alg;
+ union {
+ /* if alg is IPIPE_OTFDPC_2_0 */
+ struct vpfe_ipipe_otfdpc_2_0_cfg dpc_2_0;
+ /* if alg is IPIPE_OTFDPC_3_0 */
+ struct vpfe_ipipe_otfdpc_3_0_cfg dpc_3_0;
+ } alg_cfg;
+};
+
+/* Threshold values table size */
+#define VPFE_IPIPE_NF_THR_TABLE_SIZE 8
+/* Intensity values table size */
+#define VPFE_IPIPE_NF_STR_TABLE_SIZE 8
+
+/* NF, sampling method for green pixels */
+enum vpfe_ipipe_nf_sampl_meth {
+ /* Same as R or B */
+ VPFE_IPIPE_NF_BOX,
+ /* Diamond mode */
+ VPFE_IPIPE_NF_DIAMOND
+};
+
+/* Structure for configuring NF module */
+struct vpfe_ipipe_nf {
+ /* 0 - disable, 1 - enable */
+ unsigned char en;
+ /* Sampling method for green pixels */
+ enum vpfe_ipipe_nf_sampl_meth gr_sample_meth;
+ /* Down shift value in LUT reference address
+ */
+ unsigned char shft_val;
+ /* Spread value in NF algorithm
+ */
+ unsigned char spread_val;
+ /* Apply LSC gain to threshold. Enable this only if
+ * LSC is enabled in ISIF
+ */
+ unsigned char apply_lsc_gain;
+ /* Threshold values table */
+ unsigned short thr[VPFE_IPIPE_NF_THR_TABLE_SIZE];
+ /* intensity values table */
+ unsigned char str[VPFE_IPIPE_NF_STR_TABLE_SIZE];
+ /* Edge detection minimum threshold */
+ unsigned short edge_det_min_thr;
+ /* Edge detection maximum threshold */
+ unsigned short edge_det_max_thr;
+};
+
+enum vpfe_ipipe_gic_alg {
+ VPFE_IPIPE_GIC_ALG_CONST_GAIN,
+ VPFE_IPIPE_GIC_ALG_ADAPT_GAIN
+};
+
+enum vpfe_ipipe_gic_thr_sel {
+ VPFE_IPIPE_GIC_THR_REG,
+ VPFE_IPIPE_GIC_THR_NF
+};
+
+enum vpfe_ipipe_gic_wt_fn_type {
+ /* Use difference as index */
+ VPFE_IPIPE_GIC_WT_FN_TYP_DIF,
+ /* Use weight function as index */
+ VPFE_IPIPE_GIC_WT_FN_TYP_HP_VAL
+};
+
+/* structure for Green Imbalance Correction */
+struct vpfe_ipipe_gic {
+ /* 0 - disable, 1 - enable */
+ unsigned char en;
+ /* 0 - Constant gain , 1 - Adaptive gain algorithm */
+ enum vpfe_ipipe_gic_alg gic_alg;
+ /* GIC gain or weight. Used for Constant gain and Adaptive algorithms
+ */
+ unsigned short gain;
+ /* Threshold selection. GIC register values or NF2 thr table */
+ enum vpfe_ipipe_gic_thr_sel thr_sel;
+ /* thr1. Used when thr_sel is IPIPE_GIC_THR_REG */
+ unsigned short thr;
+ /* this value is used for thr2-thr1, thr3-thr2 or
+ * thr4-thr3 when wt_fn_type is index. Otherwise it
+ * is the
+ */
+ unsigned short slope;
+ /* Apply LSC gain to threshold. Enable this only if
+ * LSC is enabled in ISIF & thr_sel is IPIPE_GIC_THR_REG
+ */
+ unsigned char apply_lsc_gain;
+ /* Multiply Nf2 threshold by this gain. Use this when thr_sel
+ * is IPIPE_GIC_THR_NF
+ */
+ struct ipipe_float_u8 nf2_thr_gain;
+ /* Weight function uses difference as index or high pass value.
+ * Used for adaptive gain algorithm
+ */
+ enum vpfe_ipipe_gic_wt_fn_type wt_fn_type;
+};
+
+/* Structure for configuring WB module */
+struct vpfe_ipipe_wb {
+ /* Offset (S12) for R */
+ short ofst_r;
+ /* Offset (S12) for Gr */
+ short ofst_gr;
+ /* Offset (S12) for Gb */
+ short ofst_gb;
+ /* Offset (S12) for B */
+ short ofst_b;
+ /* Gain (U13Q9) for Red */
+ struct ipipe_float_u16 gain_r;
+ /* Gain (U13Q9) for Gr */
+ struct ipipe_float_u16 gain_gr;
+ /* Gain (U13Q9) for Gb */
+ struct ipipe_float_u16 gain_gb;
+ /* Gain (U13Q9) for Blue */
+ struct ipipe_float_u16 gain_b;
+};
+
+enum vpfe_ipipe_cfa_alg {
+ /* Algorithm is 2DirAC */
+ VPFE_IPIPE_CFA_ALG_2DIRAC,
+ /* Algorithm is 2DirAC + Digital Antialiasing (DAA) */
+ VPFE_IPIPE_CFA_ALG_2DIRAC_DAA,
+ /* Algorithm is DAA */
+ VPFE_IPIPE_CFA_ALG_DAA
+};
+
+/* Structure for CFA Interpolation */
+struct vpfe_ipipe_cfa {
+ /* 2DirAC or 2DirAC + DAA */
+ enum vpfe_ipipe_cfa_alg alg;
+ /* 2Dir CFA HP value Low Threshold */
+ unsigned short hpf_thr_2dir;
+ /* 2Dir CFA HP value slope */
+ unsigned short hpf_slp_2dir;
+ /* 2Dir CFA HP mix threshold */
+ unsigned short hp_mix_thr_2dir;
+ /* 2Dir CFA HP mix slope */
+ unsigned short hp_mix_slope_2dir;
+ /* 2Dir Direction threshold */
+ unsigned short dir_thr_2dir;
+ /* 2Dir Direction slope */
+ unsigned short dir_slope_2dir;
+ /* 2Dir Non Directional Weight */
+ unsigned short nd_wt_2dir;
+ /* DAA Mono Hue Fraction */
+ unsigned short hue_fract_daa;
+ /* DAA Mono Edge threshold */
+ unsigned short edge_thr_daa;
+ /* DAA Mono threshold minimum */
+ unsigned short thr_min_daa;
+ /* DAA Mono threshold slope */
+ unsigned short thr_slope_daa;
+ /* DAA Mono slope minimum */
+ unsigned short slope_min_daa;
+ /* DAA Mono slope slope */
+ unsigned short slope_slope_daa;
+ /* DAA Mono LP wight */
+ unsigned short lp_wt_daa;
+};
+
+/* Struct for configuring RGB2RGB blending module */
+struct vpfe_ipipe_rgb2rgb {
+ /* Matrix coefficient for RR S12Q8 for ID = 1 and S11Q8 for ID = 2 */
+ struct ipipe_float_s16 coef_rr;
+ /* Matrix coefficient for GR S12Q8/S11Q8 */
+ struct ipipe_float_s16 coef_gr;
+ /* Matrix coefficient for BR S12Q8/S11Q8 */
+ struct ipipe_float_s16 coef_br;
+ /* Matrix coefficient for RG S12Q8/S11Q8 */
+ struct ipipe_float_s16 coef_rg;
+ /* Matrix coefficient for GG S12Q8/S11Q8 */
+ struct ipipe_float_s16 coef_gg;
+ /* Matrix coefficient for BG S12Q8/S11Q8 */
+ struct ipipe_float_s16 coef_bg;
+ /* Matrix coefficient for RB S12Q8/S11Q8 */
+ struct ipipe_float_s16 coef_rb;
+ /* Matrix coefficient for GB S12Q8/S11Q8 */
+ struct ipipe_float_s16 coef_gb;
+ /* Matrix coefficient for BB S12Q8/S11Q8 */
+ struct ipipe_float_s16 coef_bb;
+ /* Output offset for R S13/S11 */
+ int out_ofst_r;
+ /* Output offset for G S13/S11 */
+ int out_ofst_g;
+ /* Output offset for B S13/S11 */
+ int out_ofst_b;
+};
+
+#define VPFE_IPIPE_MAX_SIZE_GAMMA 512
+
+enum vpfe_ipipe_gamma_tbl_size {
+ VPFE_IPIPE_GAMMA_TBL_SZ_64 = 64,
+ VPFE_IPIPE_GAMMA_TBL_SZ_128 = 128,
+ VPFE_IPIPE_GAMMA_TBL_SZ_256 = 256,
+ VPFE_IPIPE_GAMMA_TBL_SZ_512 = 512,
+};
+
+enum vpfe_ipipe_gamma_tbl_sel {
+ VPFE_IPIPE_GAMMA_TBL_RAM = 0,
+ VPFE_IPIPE_GAMMA_TBL_ROM = 1,
+};
+
+struct vpfe_ipipe_gamma_entry {
+ /* 10 bit slope */
+ short slope;
+ /* 10 bit offset */
+ unsigned short offset;
+};
+
+/* Structure for configuring Gamma correction module */
+struct vpfe_ipipe_gamma {
+ /* 0 - Enable Gamma correction for Red
+ * 1 - bypass Gamma correction. Data is divided by 16
+ */
+ unsigned char bypass_r;
+ /* 0 - Enable Gamma correction for Blue
+ * 1 - bypass Gamma correction. Data is divided by 16
+ */
+ unsigned char bypass_b;
+ /* 0 - Enable Gamma correction for Green
+ * 1 - bypass Gamma correction. Data is divided by 16
+ */
+ unsigned char bypass_g;
+ /* IPIPE_GAMMA_TBL_RAM or IPIPE_GAMMA_TBL_ROM */
+ enum vpfe_ipipe_gamma_tbl_sel tbl_sel;
+ /* Table size for RAM gamma table.
+ */
+ enum vpfe_ipipe_gamma_tbl_size tbl_size;
+ /* R table */
+ struct vpfe_ipipe_gamma_entry table_r[VPFE_IPIPE_MAX_SIZE_GAMMA];
+ /* Blue table */
+ struct vpfe_ipipe_gamma_entry table_b[VPFE_IPIPE_MAX_SIZE_GAMMA];
+ /* Green table */
+ struct vpfe_ipipe_gamma_entry table_g[VPFE_IPIPE_MAX_SIZE_GAMMA];
+};
+
+#define VPFE_IPIPE_MAX_SIZE_3D_LUT 729
+
+struct vpfe_ipipe_3d_lut_entry {
+ /* 10 bit entry for red */
+ unsigned short r;
+ /* 10 bit entry for green */
+ unsigned short g;
+ /* 10 bit entry for blue */
+ unsigned short b;
+};
+
+/* structure for 3D-LUT */
+struct vpfe_ipipe_3d_lut {
+ /* enable/disable 3D lut */
+ unsigned char en;
+ /* 3D - LUT table entry */
+ struct vpfe_ipipe_3d_lut_entry table[VPFE_IPIPE_MAX_SIZE_3D_LUT];
+};
+
+/* Struct for configuring rgb2ycbcr module */
+struct vpfe_ipipe_rgb2yuv {
+ /* Matrix coefficient for RY S12Q8 */
+ struct ipipe_float_s16 coef_ry;
+ /* Matrix coefficient for GY S12Q8 */
+ struct ipipe_float_s16 coef_gy;
+ /* Matrix coefficient for BY S12Q8 */
+ struct ipipe_float_s16 coef_by;
+ /* Matrix coefficient for RCb S12Q8 */
+ struct ipipe_float_s16 coef_rcb;
+ /* Matrix coefficient for GCb S12Q8 */
+ struct ipipe_float_s16 coef_gcb;
+ /* Matrix coefficient for BCb S12Q8 */
+ struct ipipe_float_s16 coef_bcb;
+ /* Matrix coefficient for RCr S12Q8 */
+ struct ipipe_float_s16 coef_rcr;
+ /* Matrix coefficient for GCr S12Q8 */
+ struct ipipe_float_s16 coef_gcr;
+ /* Matrix coefficient for BCr S12Q8 */
+ struct ipipe_float_s16 coef_bcr;
+ /* Output offset for R S11 */
+ int out_ofst_y;
+ /* Output offset for Cb S11 */
+ int out_ofst_cb;
+ /* Output offset for Cr S11 */
+ int out_ofst_cr;
+};
+
+enum vpfe_ipipe_gbce_type {
+ VPFE_IPIPE_GBCE_Y_VAL_TBL = 0,
+ VPFE_IPIPE_GBCE_GAIN_TBL = 1,
+};
+
+#define VPFE_IPIPE_MAX_SIZE_GBCE_LUT 1024
+
+/* structure for Global brightness and Contrast */
+struct vpfe_ipipe_gbce {
+ /* enable/disable GBCE */
+ unsigned char en;
+ /* Y - value table or Gain table */
+ enum vpfe_ipipe_gbce_type type;
+ /* ptr to LUT for GBCE with 1024 entries */
+ unsigned short table[VPFE_IPIPE_MAX_SIZE_GBCE_LUT];
+};
+
+/* Chrominance position. Applicable only for YCbCr input
+ * Applied after edge enhancement
+ */
+enum vpfe_chr_pos {
+ /* Co-siting, same position with luminance */
+ VPFE_IPIPE_YUV422_CHR_POS_COSITE = 0,
+ /* Centering, In the middle of luminance */
+ VPFE_IPIPE_YUV422_CHR_POS_CENTRE = 1,
+};
+
+/* Structure for configuring yuv422 conversion module */
+struct vpfe_ipipe_yuv422_conv {
+ /* Max Chrominance value */
+ unsigned char en_chrom_lpf;
+ /* 1 - enable LPF for chrminance, 0 - disable */
+ enum vpfe_chr_pos chrom_pos;
+};
+
+#define VPFE_IPIPE_MAX_SIZE_YEE_LUT 1024
+
+enum vpfe_ipipe_yee_merge_meth {
+ VPFE_IPIPE_YEE_ABS_MAX = 0,
+ VPFE_IPIPE_YEE_EE_ES = 1,
+};
+
+/* Structure for configuring YUV Edge Enhancement module */
+struct vpfe_ipipe_yee {
+ /* 1 - enable enhancement, 0 - disable */
+ unsigned char en;
+ /* enable/disable halo reduction in edge sharpner */
+ unsigned char en_halo_red;
+ /* Merge method between Edge Enhancer and Edge sharpner */
+ enum vpfe_ipipe_yee_merge_meth merge_meth;
+ /* HPF Shift length */
+ unsigned char hpf_shft;
+ /* HPF Coefficient 00, S10 */
+ short hpf_coef_00;
+ /* HPF Coefficient 01, S10 */
+ short hpf_coef_01;
+ /* HPF Coefficient 02, S10 */
+ short hpf_coef_02;
+ /* HPF Coefficient 10, S10 */
+ short hpf_coef_10;
+ /* HPF Coefficient 11, S10 */
+ short hpf_coef_11;
+ /* HPF Coefficient 12, S10 */
+ short hpf_coef_12;
+ /* HPF Coefficient 20, S10 */
+ short hpf_coef_20;
+ /* HPF Coefficient 21, S10 */
+ short hpf_coef_21;
+ /* HPF Coefficient 22, S10 */
+ short hpf_coef_22;
+ /* Lower threshold before referring to LUT */
+ unsigned short yee_thr;
+ /* Edge sharpener Gain */
+ unsigned short es_gain;
+ /* Edge sharpener lower threshold */
+ unsigned short es_thr1;
+ /* Edge sharpener upper threshold */
+ unsigned short es_thr2;
+ /* Edge sharpener gain on gradient */
+ unsigned short es_gain_grad;
+ /* Edge sharpener offset on gradient */
+ unsigned short es_ofst_grad;
+ /* Ptr to EE table. Must have 1024 entries */
+ short table[VPFE_IPIPE_MAX_SIZE_YEE_LUT];
+};
+
+enum vpfe_ipipe_car_meth {
+ /* Chromatic Gain Control */
+ VPFE_IPIPE_CAR_CHR_GAIN_CTRL = 0,
+ /* Dynamic switching between CHR_GAIN_CTRL
+ * and MED_FLTR
+ */
+ VPFE_IPIPE_CAR_DYN_SWITCH = 1,
+ /* Median Filter */
+ VPFE_IPIPE_CAR_MED_FLTR = 2,
+};
+
+enum vpfe_ipipe_car_hpf_type {
+ VPFE_IPIPE_CAR_HPF_Y = 0,
+ VPFE_IPIPE_CAR_HPF_H = 1,
+ VPFE_IPIPE_CAR_HPF_V = 2,
+ VPFE_IPIPE_CAR_HPF_2D = 3,
+ /* 2D HPF from YUV Edge Enhancement */
+ VPFE_IPIPE_CAR_HPF_2D_YEE = 4,
+};
+
+struct vpfe_ipipe_car_gain {
+ /* csup_gain */
+ unsigned char gain;
+ /* csup_shf. */
+ unsigned char shft;
+ /* gain minimum */
+ unsigned short gain_min;
+};
+
+/* Structure for Chromatic Artifact Reduction */
+struct vpfe_ipipe_car {
+ /* enable/disable */
+ unsigned char en;
+ /* Gain control or Dynamic switching */
+ enum vpfe_ipipe_car_meth meth;
+ /* Gain1 function configuration for Gain control */
+ struct vpfe_ipipe_car_gain gain1;
+ /* Gain2 function configuration for Gain control */
+ struct vpfe_ipipe_car_gain gain2;
+ /* HPF type used for CAR */
+ enum vpfe_ipipe_car_hpf_type hpf;
+ /* csup_thr: HPF threshold for Gain control */
+ unsigned char hpf_thr;
+ /* Down shift value for hpf. 2 bits */
+ unsigned char hpf_shft;
+ /* switch limit for median filter */
+ unsigned char sw0;
+ /* switch coefficient for Gain control */
+ unsigned char sw1;
+};
+
+/* structure for Chromatic Gain Suppression */
+struct vpfe_ipipe_cgs {
+ /* enable/disable */
+ unsigned char en;
+ /* gain1 bright side threshold */
+ unsigned char h_thr;
+ /* gain1 bright side slope */
+ unsigned char h_slope;
+ /* gain1 down shift value for bright side */
+ unsigned char h_shft;
+ /* gain1 bright side minimum gain */
+ unsigned char h_min;
+};
+
+/* Max pixels allowed in the input. If above this either decimation
+ * or frame division mode to be enabled
+ */
+#define VPFE_IPIPE_MAX_INPUT_WIDTH 2600
+
+struct vpfe_ipipe_input_config {
+ unsigned int vst;
+ unsigned int hst;
+};
+
+/**
+ * struct vpfe_ipipe_config - IPIPE engine configuration (user)
+ * @input_config: Pointer to structure for ipipe configuration.
+ * @flag: Specifies which ISP IPIPE functions should be enabled.
+ * @lutdpc: Pointer to luma enhancement structure.
+ * @otfdpc: Pointer to structure for defect correction.
+ * @nf1: Pointer to structure for Noise Filter.
+ * @nf2: Pointer to structure for Noise Filter.
+ * @gic: Pointer to structure for Green Imbalance.
+ * @wbal: Pointer to structure for White Balance.
+ * @cfa: Pointer to structure containing the CFA interpolation.
+ * @rgb2rgb1: Pointer to structure for RGB to RGB Blending.
+ * @rgb2rgb2: Pointer to structure for RGB to RGB Blending.
+ * @gamma: Pointer to gamma structure.
+ * @lut: Pointer to structure for 3D LUT.
+ * @rgb2yuv: Pointer to structure for RGB-YCbCr conversion.
+ * @gbce: Pointer to structure for Global Brightness,Contrast Control.
+ * @yuv422_conv: Pointer to structure for YUV 422 conversion.
+ * @yee: Pointer to structure for Edge Enhancer.
+ * @car: Pointer to structure for Chromatic Artifact Reduction.
+ * @cgs: Pointer to structure for Chromatic Gain Suppression.
+ */
+struct vpfe_ipipe_config {
+ __u32 flag;
+ struct vpfe_ipipe_input_config __user *input_config;
+ struct vpfe_ipipe_lutdpc __user *lutdpc;
+ struct vpfe_ipipe_otfdpc __user *otfdpc;
+ struct vpfe_ipipe_nf __user *nf1;
+ struct vpfe_ipipe_nf __user *nf2;
+ struct vpfe_ipipe_gic __user *gic;
+ struct vpfe_ipipe_wb __user *wbal;
+ struct vpfe_ipipe_cfa __user *cfa;
+ struct vpfe_ipipe_rgb2rgb __user *rgb2rgb1;
+ struct vpfe_ipipe_rgb2rgb __user *rgb2rgb2;
+ struct vpfe_ipipe_gamma __user *gamma;
+ struct vpfe_ipipe_3d_lut __user *lut;
+ struct vpfe_ipipe_rgb2yuv __user *rgb2yuv;
+ struct vpfe_ipipe_gbce __user *gbce;
+ struct vpfe_ipipe_yuv422_conv __user *yuv422_conv;
+ struct vpfe_ipipe_yee __user *yee;
+ struct vpfe_ipipe_car __user *car;
+ struct vpfe_ipipe_cgs __user *cgs;
+};
+
+/*******************************************************************
+** Resizer API structures
+*******************************************************************/
+/* Interpolation types used for horizontal rescale */
+enum vpfe_rsz_intp_t {
+ VPFE_RSZ_INTP_CUBIC,
+ VPFE_RSZ_INTP_LINEAR
+};
+
+/* Horizontal LPF intensity selection */
+enum vpfe_rsz_h_lpf_lse_t {
+ VPFE_RSZ_H_LPF_LSE_INTERN,
+ VPFE_RSZ_H_LPF_LSE_USER_VAL
+};
+
+enum vpfe_rsz_down_scale_ave_sz {
+ VPFE_IPIPE_DWN_SCALE_1_OVER_2,
+ VPFE_IPIPE_DWN_SCALE_1_OVER_4,
+ VPFE_IPIPE_DWN_SCALE_1_OVER_8,
+ VPFE_IPIPE_DWN_SCALE_1_OVER_16,
+ VPFE_IPIPE_DWN_SCALE_1_OVER_32,
+ VPFE_IPIPE_DWN_SCALE_1_OVER_64,
+ VPFE_IPIPE_DWN_SCALE_1_OVER_128,
+ VPFE_IPIPE_DWN_SCALE_1_OVER_256
+};
+
+struct vpfe_rsz_output_spec {
+ /* enable horizontal flip */
+ unsigned char h_flip;
+ /* enable vertical flip */
+ unsigned char v_flip;
+ /* line start offset for y. */
+ unsigned int vst_y;
+ /* line start offset for c. Only for 420 */
+ unsigned int vst_c;
+ /* vertical rescale interpolation type, YCbCr or Luminance */
+ enum vpfe_rsz_intp_t v_typ_y;
+ /* vertical rescale interpolation type for Chrominance */
+ enum vpfe_rsz_intp_t v_typ_c;
+ /* vertical lpf intensity - Luminance */
+ unsigned char v_lpf_int_y;
+ /* vertical lpf intensity - Chrominance */
+ unsigned char v_lpf_int_c;
+ /* horizontal rescale interpolation types, YCbCr or Luminance */
+ enum vpfe_rsz_intp_t h_typ_y;
+ /* horizontal rescale interpolation types, Chrominance */
+ enum vpfe_rsz_intp_t h_typ_c;
+ /* horizontal lpf intensity - Luminance */
+ unsigned char h_lpf_int_y;
+ /* horizontal lpf intensity - Chrominance */
+ unsigned char h_lpf_int_c;
+ /* Use down scale mode for scale down */
+ unsigned char en_down_scale;
+ /* if downscale, set the downscale more average size for horizontal
+ * direction. Used only if output width and height is less than
+ * input sizes
+ */
+ enum vpfe_rsz_down_scale_ave_sz h_dscale_ave_sz;
+ /* if downscale, set the downscale more average size for vertical
+ * direction. Used only if output width and height is less than
+ * input sizes
+ */
+ enum vpfe_rsz_down_scale_ave_sz v_dscale_ave_sz;
+ /* Y offset. If set, the offset would be added to the base address
+ */
+ unsigned int user_y_ofst;
+ /* C offset. If set, the offset would be added to the base address
+ */
+ unsigned int user_c_ofst;
+};
+
+struct vpfe_rsz_config_params {
+ unsigned int vst;
+ /* horizontal start position of the image
+ * data to IPIPE
+ */
+ unsigned int hst;
+ /* output spec of the image data coming out of resizer - 0(UYVY).
+ */
+ struct vpfe_rsz_output_spec output1;
+ /* output spec of the image data coming out of resizer - 1(UYVY).
+ */
+ struct vpfe_rsz_output_spec output2;
+ /* 0 , chroma sample at odd pixel, 1 - even pixel */
+ unsigned char chroma_sample_even;
+ unsigned char frame_div_mode_en;
+ unsigned char yuv_y_min;
+ unsigned char yuv_y_max;
+ unsigned char yuv_c_min;
+ unsigned char yuv_c_max;
+ enum vpfe_chr_pos out_chr_pos;
+ unsigned char bypass;
+};
+
+/* Structure for VIDIOC_VPFE_RSZ_[S/G]_CONFIG IOCTLs */
+struct vpfe_rsz_config {
+ struct vpfe_rsz_config_params *config;
+};
+
+#endif /* _DAVINCI_VPFE_USER_H */
diff --git a/drivers/staging/media/davinci_vpfe/dm365_ipipe.c b/drivers/staging/media/davinci_vpfe/dm365_ipipe.c
new file mode 100644
index 000000000000..92853539cc0a
--- /dev/null
+++ b/drivers/staging/media/davinci_vpfe/dm365_ipipe.c
@@ -0,0 +1,1863 @@
+/*
+ * Copyright (C) 2012 Texas Instruments Inc
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation version 2.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ *
+ * Contributors:
+ * Manjunath Hadli <manjunath.hadli@ti.com>
+ * Prabhakar Lad <prabhakar.lad@ti.com>
+ *
+ *
+ * IPIPE allows fine tuning of the input image using different
+ * tuning modules in IPIPE. Some examples :- Noise filter, Defect
+ * pixel correction etc. It essentially operate on Bayer Raw data
+ * or YUV raw data. To do image tuning, application call,
+ *
+ */
+
+#include <linux/slab.h>
+
+#include "dm365_ipipe.h"
+#include "dm365_ipipe_hw.h"
+#include "vpfe_mc_capture.h"
+
+#define MIN_OUT_WIDTH 32
+#define MIN_OUT_HEIGHT 32
+
+/* ipipe input format's */
+static const unsigned int ipipe_input_fmts[] = {
+ V4L2_MBUS_FMT_UYVY8_2X8,
+ V4L2_MBUS_FMT_SGRBG12_1X12,
+ V4L2_MBUS_FMT_SGRBG10_DPCM8_1X8,
+ V4L2_MBUS_FMT_SGRBG10_ALAW8_1X8,
+};
+
+/* ipipe output format's */
+static const unsigned int ipipe_output_fmts[] = {
+ V4L2_MBUS_FMT_UYVY8_2X8,
+};
+
+static int ipipe_validate_lutdpc_params(struct vpfe_ipipe_lutdpc *lutdpc)
+{
+ int i;
+
+ if (lutdpc->en > 1 || lutdpc->repl_white > 1 ||
+ lutdpc->dpc_size > LUT_DPC_MAX_SIZE)
+ return -EINVAL;
+
+ if (lutdpc->en && !lutdpc->table)
+ return -EINVAL;
+
+ for (i = 0; i < lutdpc->dpc_size; i++)
+ if (lutdpc->table[i].horz_pos > LUT_DPC_H_POS_MASK ||
+ lutdpc->table[i].vert_pos > LUT_DPC_V_POS_MASK)
+ return -EINVAL;
+
+ return 0;
+}
+
+static int ipipe_set_lutdpc_params(struct vpfe_ipipe_device *ipipe, void *param)
+{
+ struct vpfe_ipipe_lutdpc *lutdpc = &ipipe->config.lutdpc;
+ struct vpfe_ipipe_lutdpc *dpc_param;
+ struct device *dev;
+
+ if (!param) {
+ memset((void *)lutdpc, 0, sizeof(struct vpfe_ipipe_lutdpc));
+ goto success;
+ }
+
+ dev = ipipe->subdev.v4l2_dev->dev;
+ dpc_param = (struct vpfe_ipipe_lutdpc *)param;
+ lutdpc->en = dpc_param->en;
+ lutdpc->repl_white = dpc_param->repl_white;
+ lutdpc->dpc_size = dpc_param->dpc_size;
+ memcpy(&lutdpc->table, &dpc_param->table,
+ (dpc_param->dpc_size * sizeof(struct vpfe_ipipe_lutdpc_entry)));
+ if (ipipe_validate_lutdpc_params(lutdpc) < 0)
+ return -EINVAL;
+
+success:
+ ipipe_set_lutdpc_regs(ipipe->base_addr, ipipe->isp5_base_addr, lutdpc);
+
+ return 0;
+}
+
+static int ipipe_get_lutdpc_params(struct vpfe_ipipe_device *ipipe, void *param)
+{
+ struct vpfe_ipipe_lutdpc *lut_param = (struct vpfe_ipipe_lutdpc *)param;
+ struct vpfe_ipipe_lutdpc *lutdpc = &ipipe->config.lutdpc;
+
+ lut_param->en = lutdpc->en;
+ lut_param->repl_white = lutdpc->repl_white;
+ lut_param->dpc_size = lutdpc->dpc_size;
+ memcpy(&lut_param->table, &lutdpc->table,
+ (lutdpc->dpc_size * sizeof(struct vpfe_ipipe_lutdpc_entry)));
+
+ return 0;
+}
+
+static int ipipe_set_input_config(struct vpfe_ipipe_device *ipipe, void *param)
+{
+ struct vpfe_ipipe_input_config *config = &ipipe->config.input_config;
+
+ if (!param)
+ memset(config, 0, sizeof(struct vpfe_ipipe_input_config));
+ else
+ memcpy(config, param, sizeof(struct vpfe_ipipe_input_config));
+ return 0;
+}
+
+static int ipipe_get_input_config(struct vpfe_ipipe_device *ipipe, void *param)
+{
+ struct vpfe_ipipe_input_config *config = &ipipe->config.input_config;
+
+ if (!param)
+ return -EINVAL;
+
+ memcpy(param, config, sizeof(struct vpfe_ipipe_input_config));
+
+ return 0;
+}
+
+static int ipipe_validate_otfdpc_params(struct vpfe_ipipe_otfdpc *dpc_param)
+{
+ struct vpfe_ipipe_otfdpc_2_0_cfg *dpc_2_0;
+ struct vpfe_ipipe_otfdpc_3_0_cfg *dpc_3_0;
+
+ if (dpc_param->en > 1)
+ return -EINVAL;
+
+ if (dpc_param->alg == VPFE_IPIPE_OTFDPC_2_0) {
+ dpc_2_0 = &dpc_param->alg_cfg.dpc_2_0;
+ if (dpc_2_0->det_thr.r > OTFDPC_DPC2_THR_MASK ||
+ dpc_2_0->det_thr.gr > OTFDPC_DPC2_THR_MASK ||
+ dpc_2_0->det_thr.gb > OTFDPC_DPC2_THR_MASK ||
+ dpc_2_0->det_thr.b > OTFDPC_DPC2_THR_MASK ||
+ dpc_2_0->corr_thr.r > OTFDPC_DPC2_THR_MASK ||
+ dpc_2_0->corr_thr.gr > OTFDPC_DPC2_THR_MASK ||
+ dpc_2_0->corr_thr.gb > OTFDPC_DPC2_THR_MASK ||
+ dpc_2_0->corr_thr.b > OTFDPC_DPC2_THR_MASK)
+ return -EINVAL;
+ return 0;
+ }
+
+ dpc_3_0 = &dpc_param->alg_cfg.dpc_3_0;
+
+ if (dpc_3_0->act_adj_shf > OTF_DPC3_0_SHF_MASK ||
+ dpc_3_0->det_thr > OTF_DPC3_0_DET_MASK ||
+ dpc_3_0->det_slp > OTF_DPC3_0_SLP_MASK ||
+ dpc_3_0->det_thr_min > OTF_DPC3_0_DET_MASK ||
+ dpc_3_0->det_thr_max > OTF_DPC3_0_DET_MASK ||
+ dpc_3_0->corr_thr > OTF_DPC3_0_CORR_MASK ||
+ dpc_3_0->corr_slp > OTF_DPC3_0_SLP_MASK ||
+ dpc_3_0->corr_thr_min > OTF_DPC3_0_CORR_MASK ||
+ dpc_3_0->corr_thr_max > OTF_DPC3_0_CORR_MASK)
+ return -EINVAL;
+
+ return 0;
+}
+
+static int ipipe_set_otfdpc_params(struct vpfe_ipipe_device *ipipe, void *param)
+{
+ struct vpfe_ipipe_otfdpc *dpc_param = (struct vpfe_ipipe_otfdpc *)param;
+ struct vpfe_ipipe_otfdpc *otfdpc = &ipipe->config.otfdpc;
+ struct device *dev;
+
+ if (!param) {
+ memset((void *)otfdpc, 0, sizeof(struct ipipe_otfdpc_2_0));
+ goto success;
+ }
+ dev = ipipe->subdev.v4l2_dev->dev;
+ memcpy(otfdpc, dpc_param, sizeof(struct vpfe_ipipe_otfdpc));
+ if (ipipe_validate_otfdpc_params(otfdpc) < 0) {
+ dev_err(dev, "Invalid otfdpc params\n");
+ return -EINVAL;
+ }
+
+success:
+ ipipe_set_otfdpc_regs(ipipe->base_addr, otfdpc);
+
+ return 0;
+}
+
+static int ipipe_get_otfdpc_params(struct vpfe_ipipe_device *ipipe, void *param)
+{
+ struct vpfe_ipipe_otfdpc *dpc_param = (struct vpfe_ipipe_otfdpc *)param;
+ struct vpfe_ipipe_otfdpc *otfdpc = &ipipe->config.otfdpc;
+
+ memcpy(dpc_param, otfdpc, sizeof(struct vpfe_ipipe_otfdpc));
+ return 0;
+}
+
+static int ipipe_validate_nf_params(struct vpfe_ipipe_nf *nf_param)
+{
+ int i;
+
+ if (nf_param->en > 1 || nf_param->shft_val > D2F_SHFT_VAL_MASK ||
+ nf_param->spread_val > D2F_SPR_VAL_MASK ||
+ nf_param->apply_lsc_gain > 1 ||
+ nf_param->edge_det_min_thr > D2F_EDGE_DET_THR_MASK ||
+ nf_param->edge_det_max_thr > D2F_EDGE_DET_THR_MASK)
+ return -EINVAL;
+
+ for (i = 0; i < VPFE_IPIPE_NF_THR_TABLE_SIZE; i++)
+ if (nf_param->thr[i] > D2F_THR_VAL_MASK)
+ return -EINVAL;
+
+ for (i = 0; i < VPFE_IPIPE_NF_STR_TABLE_SIZE; i++)
+ if (nf_param->str[i] > D2F_STR_VAL_MASK)
+ return -EINVAL;
+
+ return 0;
+}
+
+static int ipipe_set_nf_params(struct vpfe_ipipe_device *ipipe,
+ unsigned int id, void *param)
+{
+ struct vpfe_ipipe_nf *nf_param = (struct vpfe_ipipe_nf *)param;
+ struct vpfe_ipipe_nf *nf = &ipipe->config.nf1;
+ struct device *dev;
+
+ if (id == IPIPE_D2F_2ND)
+ nf = &ipipe->config.nf2;
+
+ if (!nf_param) {
+ memset((void *)nf, 0, sizeof(struct vpfe_ipipe_nf));
+ goto success;
+ }
+
+ dev = ipipe->subdev.v4l2_dev->dev;
+ memcpy(nf, nf_param, sizeof(struct vpfe_ipipe_nf));
+ if (ipipe_validate_nf_params(nf) < 0) {
+ dev_err(dev, "Invalid nf params\n");
+ return -EINVAL;
+ }
+
+success:
+ ipipe_set_d2f_regs(ipipe->base_addr, id, nf);
+
+ return 0;
+}
+
+static int ipipe_set_nf1_params(struct vpfe_ipipe_device *ipipe, void *param)
+{
+ return ipipe_set_nf_params(ipipe, IPIPE_D2F_1ST, param);
+}
+
+static int ipipe_set_nf2_params(struct vpfe_ipipe_device *ipipe, void *param)
+{
+ return ipipe_set_nf_params(ipipe, IPIPE_D2F_2ND, param);
+}
+
+static int ipipe_get_nf_params(struct vpfe_ipipe_device *ipipe,
+ unsigned int id, void *param)
+{
+ struct vpfe_ipipe_nf *nf_param = (struct vpfe_ipipe_nf *)param;
+ struct vpfe_ipipe_nf *nf = &ipipe->config.nf1;
+
+ if (id == IPIPE_D2F_2ND)
+ nf = &ipipe->config.nf2;
+
+ memcpy(nf_param, nf, sizeof(struct vpfe_ipipe_nf));
+
+ return 0;
+}
+
+static int ipipe_get_nf1_params(struct vpfe_ipipe_device *ipipe, void *param)
+{
+ return ipipe_get_nf_params(ipipe, IPIPE_D2F_1ST, param);
+}
+
+static int ipipe_get_nf2_params(struct vpfe_ipipe_device *ipipe, void *param)
+{
+ return ipipe_get_nf_params(ipipe, IPIPE_D2F_2ND, param);
+}
+
+static int ipipe_validate_gic_params(struct vpfe_ipipe_gic *gic)
+{
+ if (gic->en > 1 || gic->gain > GIC_GAIN_MASK ||
+ gic->thr > GIC_THR_MASK || gic->slope > GIC_SLOPE_MASK ||
+ gic->apply_lsc_gain > 1 ||
+ gic->nf2_thr_gain.integer > GIC_NFGAN_INT_MASK ||
+ gic->nf2_thr_gain.decimal > GIC_NFGAN_DECI_MASK)
+ return -EINVAL;
+
+ return 0;
+}
+
+static int ipipe_set_gic_params(struct vpfe_ipipe_device *ipipe, void *param)
+{
+ struct vpfe_ipipe_gic *gic_param = (struct vpfe_ipipe_gic *)param;
+ struct device *dev = ipipe->subdev.v4l2_dev->dev;
+ struct vpfe_ipipe_gic *gic = &ipipe->config.gic;
+
+ if (!gic_param) {
+ memset((void *)gic, 0, sizeof(struct vpfe_ipipe_gic));
+ goto success;
+ }
+
+ memcpy(gic, gic_param, sizeof(struct vpfe_ipipe_gic));
+ if (ipipe_validate_gic_params(gic) < 0) {
+ dev_err(dev, "Invalid gic params\n");
+ return -EINVAL;
+ }
+
+success:
+ ipipe_set_gic_regs(ipipe->base_addr, gic);
+
+ return 0;
+}
+
+static int ipipe_get_gic_params(struct vpfe_ipipe_device *ipipe, void *param)
+{
+ struct vpfe_ipipe_gic *gic_param = (struct vpfe_ipipe_gic *)param;
+ struct vpfe_ipipe_gic *gic = &ipipe->config.gic;
+
+ memcpy(gic_param, gic, sizeof(struct vpfe_ipipe_gic));
+
+ return 0;
+}
+
+static int ipipe_validate_wb_params(struct vpfe_ipipe_wb *wbal)
+{
+ if (wbal->ofst_r > WB_OFFSET_MASK ||
+ wbal->ofst_gr > WB_OFFSET_MASK ||
+ wbal->ofst_gb > WB_OFFSET_MASK ||
+ wbal->ofst_b > WB_OFFSET_MASK ||
+ wbal->gain_r.integer > WB_GAIN_INT_MASK ||
+ wbal->gain_r.decimal > WB_GAIN_DECI_MASK ||
+ wbal->gain_gr.integer > WB_GAIN_INT_MASK ||
+ wbal->gain_gr.decimal > WB_GAIN_DECI_MASK ||
+ wbal->gain_gb.integer > WB_GAIN_INT_MASK ||
+ wbal->gain_gb.decimal > WB_GAIN_DECI_MASK ||
+ wbal->gain_b.integer > WB_GAIN_INT_MASK ||
+ wbal->gain_b.decimal > WB_GAIN_DECI_MASK)
+ return -EINVAL;
+
+ return 0;
+}
+
+static int ipipe_set_wb_params(struct vpfe_ipipe_device *ipipe, void *param)
+{
+ struct vpfe_ipipe_wb *wb_param = (struct vpfe_ipipe_wb *)param;
+ struct vpfe_ipipe_wb *wbal = &ipipe->config.wbal;
+
+ if (!wb_param) {
+ const struct vpfe_ipipe_wb wb_defaults = {
+ .gain_r = {2, 0x0},
+ .gain_gr = {2, 0x0},
+ .gain_gb = {2, 0x0},
+ .gain_b = {2, 0x0}
+ };
+ memcpy(wbal, &wb_defaults, sizeof(struct vpfe_ipipe_wb));
+ goto success;
+ }
+
+ memcpy(wbal, wb_param, sizeof(struct vpfe_ipipe_wb));
+ if (ipipe_validate_wb_params(wbal) < 0)
+ return -EINVAL;
+
+success:
+ ipipe_set_wb_regs(ipipe->base_addr, wbal);
+
+ return 0;
+}
+
+static int ipipe_get_wb_params(struct vpfe_ipipe_device *ipipe, void *param)
+{
+ struct vpfe_ipipe_wb *wb_param = (struct vpfe_ipipe_wb *)param;
+ struct vpfe_ipipe_wb *wbal = &ipipe->config.wbal;
+
+ memcpy(wb_param, wbal, sizeof(struct vpfe_ipipe_wb));
+ return 0;
+}
+
+static int ipipe_validate_cfa_params(struct vpfe_ipipe_cfa *cfa)
+{
+ if (cfa->hpf_thr_2dir > CFA_HPF_THR_2DIR_MASK ||
+ cfa->hpf_slp_2dir > CFA_HPF_SLOPE_2DIR_MASK ||
+ cfa->hp_mix_thr_2dir > CFA_HPF_MIX_THR_2DIR_MASK ||
+ cfa->hp_mix_slope_2dir > CFA_HPF_MIX_SLP_2DIR_MASK ||
+ cfa->dir_thr_2dir > CFA_DIR_THR_2DIR_MASK ||
+ cfa->dir_slope_2dir > CFA_DIR_SLP_2DIR_MASK ||
+ cfa->nd_wt_2dir > CFA_ND_WT_2DIR_MASK ||
+ cfa->hue_fract_daa > CFA_DAA_HUE_FRA_MASK ||
+ cfa->edge_thr_daa > CFA_DAA_EDG_THR_MASK ||
+ cfa->thr_min_daa > CFA_DAA_THR_MIN_MASK ||
+ cfa->thr_slope_daa > CFA_DAA_THR_SLP_MASK ||
+ cfa->slope_min_daa > CFA_DAA_SLP_MIN_MASK ||
+ cfa->slope_slope_daa > CFA_DAA_SLP_SLP_MASK ||
+ cfa->lp_wt_daa > CFA_DAA_LP_WT_MASK)
+ return -EINVAL;
+
+ return 0;
+}
+
+static int ipipe_set_cfa_params(struct vpfe_ipipe_device *ipipe, void *param)
+{
+ struct vpfe_ipipe_cfa *cfa_param = (struct vpfe_ipipe_cfa *)param;
+ struct vpfe_ipipe_cfa *cfa = &ipipe->config.cfa;
+
+ if (!cfa_param) {
+ memset(cfa, 0, sizeof(struct vpfe_ipipe_cfa));
+ cfa->alg = VPFE_IPIPE_CFA_ALG_2DIRAC;
+ goto success;
+ }
+
+ memcpy(cfa, cfa_param, sizeof(struct vpfe_ipipe_cfa));
+ if (ipipe_validate_cfa_params(cfa) < 0)
+ return -EINVAL;
+
+success:
+ ipipe_set_cfa_regs(ipipe->base_addr, cfa);
+
+ return 0;
+}
+
+static int ipipe_get_cfa_params(struct vpfe_ipipe_device *ipipe, void *param)
+{
+ struct vpfe_ipipe_cfa *cfa_param = (struct vpfe_ipipe_cfa *)param;
+ struct vpfe_ipipe_cfa *cfa = &ipipe->config.cfa;
+
+ memcpy(cfa_param, cfa, sizeof(struct vpfe_ipipe_cfa));
+ return 0;
+}
+
+static int
+ipipe_validate_rgb2rgb_params(struct vpfe_ipipe_rgb2rgb *rgb2rgb,
+ unsigned int id)
+{
+ u32 gain_int_upper = RGB2RGB_1_GAIN_INT_MASK;
+ u32 offset_upper = RGB2RGB_1_OFST_MASK;
+
+ if (id == IPIPE_RGB2RGB_2) {
+ offset_upper = RGB2RGB_2_OFST_MASK;
+ gain_int_upper = RGB2RGB_2_GAIN_INT_MASK;
+ }
+
+ if (rgb2rgb->coef_rr.decimal > RGB2RGB_GAIN_DECI_MASK ||
+ rgb2rgb->coef_rr.integer > gain_int_upper)
+ return -EINVAL;
+
+ if (rgb2rgb->coef_gr.decimal > RGB2RGB_GAIN_DECI_MASK ||
+ rgb2rgb->coef_gr.integer > gain_int_upper)
+ return -EINVAL;
+
+ if (rgb2rgb->coef_br.decimal > RGB2RGB_GAIN_DECI_MASK ||
+ rgb2rgb->coef_br.integer > gain_int_upper)
+ return -EINVAL;
+
+ if (rgb2rgb->coef_rg.decimal > RGB2RGB_GAIN_DECI_MASK ||
+ rgb2rgb->coef_rg.integer > gain_int_upper)
+ return -EINVAL;
+
+ if (rgb2rgb->coef_gg.decimal > RGB2RGB_GAIN_DECI_MASK ||
+ rgb2rgb->coef_gg.integer > gain_int_upper)
+ return -EINVAL;
+
+ if (rgb2rgb->coef_bg.decimal > RGB2RGB_GAIN_DECI_MASK ||
+ rgb2rgb->coef_bg.integer > gain_int_upper)
+ return -EINVAL;
+
+ if (rgb2rgb->coef_rb.decimal > RGB2RGB_GAIN_DECI_MASK ||
+ rgb2rgb->coef_rb.integer > gain_int_upper)
+ return -EINVAL;
+
+ if (rgb2rgb->coef_gb.decimal > RGB2RGB_GAIN_DECI_MASK ||
+ rgb2rgb->coef_gb.integer > gain_int_upper)
+ return -EINVAL;
+
+ if (rgb2rgb->coef_bb.decimal > RGB2RGB_GAIN_DECI_MASK ||
+ rgb2rgb->coef_bb.integer > gain_int_upper)
+ return -EINVAL;
+
+ if (rgb2rgb->out_ofst_r > offset_upper ||
+ rgb2rgb->out_ofst_g > offset_upper ||
+ rgb2rgb->out_ofst_b > offset_upper)
+ return -EINVAL;
+
+ return 0;
+}
+
+static int ipipe_set_rgb2rgb_params(struct vpfe_ipipe_device *ipipe,
+ unsigned int id, void *param)
+{
+ struct vpfe_ipipe_rgb2rgb *rgb2rgb = &ipipe->config.rgb2rgb1;
+ struct device *dev = ipipe->subdev.v4l2_dev->dev;
+ struct vpfe_ipipe_rgb2rgb *rgb2rgb_param;
+
+ rgb2rgb_param = (struct vpfe_ipipe_rgb2rgb *)param;
+
+ if (id == IPIPE_RGB2RGB_2)
+ rgb2rgb = &ipipe->config.rgb2rgb2;
+
+ if (!rgb2rgb_param) {
+ const struct vpfe_ipipe_rgb2rgb rgb2rgb_defaults = {
+ .coef_rr = {1, 0}, /* 256 */
+ .coef_gr = {0, 0},
+ .coef_br = {0, 0},
+ .coef_rg = {0, 0},
+ .coef_gg = {1, 0}, /* 256 */
+ .coef_bg = {0, 0},
+ .coef_rb = {0, 0},
+ .coef_gb = {0, 0},
+ .coef_bb = {1, 0}, /* 256 */
+ };
+ /* Copy defaults for rgb2rgb conversion */
+ memcpy(rgb2rgb, &rgb2rgb_defaults,
+ sizeof(struct vpfe_ipipe_rgb2rgb));
+ goto success;
+ }
+
+ memcpy(rgb2rgb, rgb2rgb_param, sizeof(struct vpfe_ipipe_rgb2rgb));
+ if (ipipe_validate_rgb2rgb_params(rgb2rgb, id) < 0) {
+ dev_err(dev, "Invalid rgb2rgb params\n");
+ return -EINVAL;
+ }
+
+success:
+ ipipe_set_rgb2rgb_regs(ipipe->base_addr, id, rgb2rgb);
+
+ return 0;
+}
+
+static int
+ipipe_set_rgb2rgb_1_params(struct vpfe_ipipe_device *ipipe, void *param)
+{
+ return ipipe_set_rgb2rgb_params(ipipe, IPIPE_RGB2RGB_1, param);
+}
+
+static int
+ipipe_set_rgb2rgb_2_params(struct vpfe_ipipe_device *ipipe, void *param)
+{
+ return ipipe_set_rgb2rgb_params(ipipe, IPIPE_RGB2RGB_2, param);
+}
+
+static int ipipe_get_rgb2rgb_params(struct vpfe_ipipe_device *ipipe,
+ unsigned int id, void *param)
+{
+ struct vpfe_ipipe_rgb2rgb *rgb2rgb = &ipipe->config.rgb2rgb1;
+ struct vpfe_ipipe_rgb2rgb *rgb2rgb_param;
+
+ rgb2rgb_param = (struct vpfe_ipipe_rgb2rgb *)param;
+
+ if (id == IPIPE_RGB2RGB_2)
+ rgb2rgb = &ipipe->config.rgb2rgb2;
+
+ memcpy(rgb2rgb_param, rgb2rgb, sizeof(struct vpfe_ipipe_rgb2rgb));
+
+ return 0;
+}
+
+static int
+ipipe_get_rgb2rgb_1_params(struct vpfe_ipipe_device *ipipe, void *param)
+{
+ return ipipe_get_rgb2rgb_params(ipipe, IPIPE_RGB2RGB_1, param);
+}
+
+static int
+ipipe_get_rgb2rgb_2_params(struct vpfe_ipipe_device *ipipe, void *param)
+{
+ return ipipe_get_rgb2rgb_params(ipipe, IPIPE_RGB2RGB_2, param);
+}
+
+static int
+ipipe_validate_gamma_entry(struct vpfe_ipipe_gamma_entry *table, int size)
+{
+ int i;
+
+ if (!table)
+ return -EINVAL;
+
+ for (i = 0; i < size; i++)
+ if (table[i].slope > GAMMA_MASK ||
+ table[i].offset > GAMMA_MASK)
+ return -EINVAL;
+
+ return 0;
+}
+
+static int
+ipipe_validate_gamma_params(struct vpfe_ipipe_gamma *gamma, struct device *dev)
+{
+ int table_size;
+ int err;
+
+ if (gamma->bypass_r > 1 ||
+ gamma->bypass_b > 1 ||
+ gamma->bypass_g > 1)
+ return -EINVAL;
+
+ if (gamma->tbl_sel != VPFE_IPIPE_GAMMA_TBL_RAM)
+ return 0;
+
+ table_size = gamma->tbl_size;
+ if (!gamma->bypass_r) {
+ err = ipipe_validate_gamma_entry(gamma->table_r, table_size);
+ if (err) {
+ dev_err(dev, "GAMMA R - table entry invalid\n");
+ return err;
+ }
+ }
+
+ if (!gamma->bypass_b) {
+ err = ipipe_validate_gamma_entry(gamma->table_b, table_size);
+ if (err) {
+ dev_err(dev, "GAMMA B - table entry invalid\n");
+ return err;
+ }
+ }
+
+ if (!gamma->bypass_g) {
+ err = ipipe_validate_gamma_entry(gamma->table_g, table_size);
+ if (err) {
+ dev_err(dev, "GAMMA G - table entry invalid\n");
+ return err;
+ }
+ }
+
+ return 0;
+}
+
+static int
+ipipe_set_gamma_params(struct vpfe_ipipe_device *ipipe, void *param)
+{
+ struct vpfe_ipipe_gamma *gamma_param = (struct vpfe_ipipe_gamma *)param;
+ struct vpfe_ipipe_gamma *gamma = &ipipe->config.gamma;
+ struct device *dev = ipipe->subdev.v4l2_dev->dev;
+ int table_size;
+
+ if (!gamma_param) {
+ memset(gamma, 0, sizeof(struct vpfe_ipipe_gamma));
+ gamma->tbl_sel = VPFE_IPIPE_GAMMA_TBL_ROM;
+ goto success;
+ }
+
+ gamma->bypass_r = gamma_param->bypass_r;
+ gamma->bypass_b = gamma_param->bypass_b;
+ gamma->bypass_g = gamma_param->bypass_g;
+ gamma->tbl_sel = gamma_param->tbl_sel;
+ gamma->tbl_size = gamma_param->tbl_size;
+
+ if (ipipe_validate_gamma_params(gamma, dev) < 0)
+ return -EINVAL;
+
+ if (gamma_param->tbl_sel != VPFE_IPIPE_GAMMA_TBL_RAM)
+ goto success;
+
+ table_size = gamma->tbl_size;
+ if (!gamma_param->bypass_r)
+ memcpy(&gamma->table_r, &gamma_param->table_r,
+ (table_size * sizeof(struct vpfe_ipipe_gamma_entry)));
+
+ if (!gamma_param->bypass_b)
+ memcpy(&gamma->table_b, &gamma_param->table_b,
+ (table_size * sizeof(struct vpfe_ipipe_gamma_entry)));
+
+ if (!gamma_param->bypass_g)
+ memcpy(&gamma->table_g, &gamma_param->table_g,
+ (table_size * sizeof(struct vpfe_ipipe_gamma_entry)));
+
+success:
+ ipipe_set_gamma_regs(ipipe->base_addr, ipipe->isp5_base_addr, gamma);
+
+ return 0;
+}
+
+static int ipipe_get_gamma_params(struct vpfe_ipipe_device *ipipe, void *param)
+{
+ struct vpfe_ipipe_gamma *gamma_param = (struct vpfe_ipipe_gamma *)param;
+ struct vpfe_ipipe_gamma *gamma = &ipipe->config.gamma;
+ struct device *dev = ipipe->subdev.v4l2_dev->dev;
+ int table_size;
+
+ gamma_param->bypass_r = gamma->bypass_r;
+ gamma_param->bypass_g = gamma->bypass_g;
+ gamma_param->bypass_b = gamma->bypass_b;
+ gamma_param->tbl_sel = gamma->tbl_sel;
+ gamma_param->tbl_size = gamma->tbl_size;
+
+ if (gamma->tbl_sel != VPFE_IPIPE_GAMMA_TBL_RAM)
+ return 0;
+
+ table_size = gamma->tbl_size;
+
+ if (!gamma->bypass_r && !gamma_param->table_r) {
+ dev_err(dev,
+ "ipipe_get_gamma_params: table ptr empty for R\n");
+ return -EINVAL;
+ }
+ memcpy(gamma_param->table_r, gamma->table_r,
+ (table_size * sizeof(struct vpfe_ipipe_gamma_entry)));
+
+ if (!gamma->bypass_g && !gamma_param->table_g) {
+ dev_err(dev, "ipipe_get_gamma_params: table ptr empty for G\n");
+ return -EINVAL;
+ }
+ memcpy(gamma_param->table_g, gamma->table_g,
+ (table_size * sizeof(struct vpfe_ipipe_gamma_entry)));
+
+ if (!gamma->bypass_b && !gamma_param->table_b) {
+ dev_err(dev, "ipipe_get_gamma_params: table ptr empty for B\n");
+ return -EINVAL;
+ }
+ memcpy(gamma_param->table_b, gamma->table_b,
+ (table_size * sizeof(struct vpfe_ipipe_gamma_entry)));
+
+ return 0;
+}
+
+static int ipipe_validate_3d_lut_params(struct vpfe_ipipe_3d_lut *lut)
+{
+ int i;
+
+ if (!lut->en)
+ return 0;
+
+ for (i = 0; i < VPFE_IPIPE_MAX_SIZE_3D_LUT; i++)
+ if (lut->table[i].r > D3_LUT_ENTRY_MASK ||
+ lut->table[i].g > D3_LUT_ENTRY_MASK ||
+ lut->table[i].b > D3_LUT_ENTRY_MASK)
+ return -EINVAL;
+
+ return 0;
+}
+
+static int ipipe_get_3d_lut_params(struct vpfe_ipipe_device *ipipe, void *param)
+{
+ struct vpfe_ipipe_3d_lut *lut_param = (struct vpfe_ipipe_3d_lut *)param;
+ struct vpfe_ipipe_3d_lut *lut = &ipipe->config.lut;
+ struct device *dev = ipipe->subdev.v4l2_dev->dev;
+
+ lut_param->en = lut->en;
+ if (!lut_param->table) {
+ dev_err(dev, "ipipe_get_3d_lut_params: Invalid table ptr\n");
+ return -EINVAL;
+ }
+
+ memcpy(lut_param->table, &lut->table,
+ (VPFE_IPIPE_MAX_SIZE_3D_LUT *
+ sizeof(struct vpfe_ipipe_3d_lut_entry)));
+
+ return 0;
+}
+
+static int
+ipipe_set_3d_lut_params(struct vpfe_ipipe_device *ipipe, void *param)
+{
+ struct vpfe_ipipe_3d_lut *lut_param = (struct vpfe_ipipe_3d_lut *)param;
+ struct vpfe_ipipe_3d_lut *lut = &ipipe->config.lut;
+ struct device *dev = ipipe->subdev.v4l2_dev->dev;
+
+ if (!lut_param) {
+ memset(lut, 0, sizeof(struct vpfe_ipipe_3d_lut));
+ goto success;
+ }
+
+ memcpy(lut, lut_param, sizeof(struct vpfe_ipipe_3d_lut));
+ if (ipipe_validate_3d_lut_params(lut) < 0) {
+ dev_err(dev, "Invalid 3D-LUT Params\n");
+ return -EINVAL;
+ }
+
+success:
+ ipipe_set_3d_lut_regs(ipipe->base_addr, ipipe->isp5_base_addr, lut);
+
+ return 0;
+}
+
+static int ipipe_validate_rgb2yuv_params(struct vpfe_ipipe_rgb2yuv *rgb2yuv)
+{
+ if (rgb2yuv->coef_ry.decimal > RGB2YCBCR_COEF_DECI_MASK ||
+ rgb2yuv->coef_ry.integer > RGB2YCBCR_COEF_INT_MASK)
+ return -EINVAL;
+
+ if (rgb2yuv->coef_gy.decimal > RGB2YCBCR_COEF_DECI_MASK ||
+ rgb2yuv->coef_gy.integer > RGB2YCBCR_COEF_INT_MASK)
+ return -EINVAL;
+
+ if (rgb2yuv->coef_by.decimal > RGB2YCBCR_COEF_DECI_MASK ||
+ rgb2yuv->coef_by.integer > RGB2YCBCR_COEF_INT_MASK)
+ return -EINVAL;
+
+ if (rgb2yuv->coef_rcb.decimal > RGB2YCBCR_COEF_DECI_MASK ||
+ rgb2yuv->coef_rcb.integer > RGB2YCBCR_COEF_INT_MASK)
+ return -EINVAL;
+
+ if (rgb2yuv->coef_gcb.decimal > RGB2YCBCR_COEF_DECI_MASK ||
+ rgb2yuv->coef_gcb.integer > RGB2YCBCR_COEF_INT_MASK)
+ return -EINVAL;
+
+ if (rgb2yuv->coef_bcb.decimal > RGB2YCBCR_COEF_DECI_MASK ||
+ rgb2yuv->coef_bcb.integer > RGB2YCBCR_COEF_INT_MASK)
+ return -EINVAL;
+
+ if (rgb2yuv->coef_rcr.decimal > RGB2YCBCR_COEF_DECI_MASK ||
+ rgb2yuv->coef_rcr.integer > RGB2YCBCR_COEF_INT_MASK)
+ return -EINVAL;
+
+ if (rgb2yuv->coef_gcr.decimal > RGB2YCBCR_COEF_DECI_MASK ||
+ rgb2yuv->coef_gcr.integer > RGB2YCBCR_COEF_INT_MASK)
+ return -EINVAL;
+
+ if (rgb2yuv->coef_bcr.decimal > RGB2YCBCR_COEF_DECI_MASK ||
+ rgb2yuv->coef_bcr.integer > RGB2YCBCR_COEF_INT_MASK)
+ return -EINVAL;
+
+ if (rgb2yuv->out_ofst_y > RGB2YCBCR_OFST_MASK ||
+ rgb2yuv->out_ofst_cb > RGB2YCBCR_OFST_MASK ||
+ rgb2yuv->out_ofst_cr > RGB2YCBCR_OFST_MASK)
+ return -EINVAL;
+
+ return 0;
+}
+
+static int
+ipipe_set_rgb2yuv_params(struct vpfe_ipipe_device *ipipe, void *param)
+{
+ struct vpfe_ipipe_rgb2yuv *rgb2yuv = &ipipe->config.rgb2yuv;
+ struct device *dev = ipipe->subdev.v4l2_dev->dev;
+ struct vpfe_ipipe_rgb2yuv *rgb2yuv_param;
+
+ rgb2yuv_param = (struct vpfe_ipipe_rgb2yuv *)param;
+ if (!rgb2yuv_param) {
+ /* Defaults for rgb2yuv conversion */
+ const struct vpfe_ipipe_rgb2yuv rgb2yuv_defaults = {
+ .coef_ry = {0, 0x4d},
+ .coef_gy = {0, 0x96},
+ .coef_by = {0, 0x1d},
+ .coef_rcb = {0xf, 0xd5},
+ .coef_gcb = {0xf, 0xab},
+ .coef_bcb = {0, 0x80},
+ .coef_rcr = {0, 0x80},
+ .coef_gcr = {0xf, 0x95},
+ .coef_bcr = {0xf, 0xeb},
+ .out_ofst_cb = 0x80,
+ .out_ofst_cr = 0x80,
+ };
+ /* Copy defaults for rgb2yuv conversion */
+ memcpy(rgb2yuv, &rgb2yuv_defaults,
+ sizeof(struct vpfe_ipipe_rgb2yuv));
+ goto success;
+ }
+
+ memcpy(rgb2yuv, rgb2yuv_param, sizeof(struct vpfe_ipipe_rgb2yuv));
+ if (ipipe_validate_rgb2yuv_params(rgb2yuv) < 0) {
+ dev_err(dev, "Invalid rgb2yuv params\n");
+ return -EINVAL;
+ }
+
+success:
+ ipipe_set_rgb2ycbcr_regs(ipipe->base_addr, rgb2yuv);
+
+ return 0;
+}
+
+static int
+ipipe_get_rgb2yuv_params(struct vpfe_ipipe_device *ipipe, void *param)
+{
+ struct vpfe_ipipe_rgb2yuv *rgb2yuv = &ipipe->config.rgb2yuv;
+ struct vpfe_ipipe_rgb2yuv *rgb2yuv_param;
+
+ rgb2yuv_param = (struct vpfe_ipipe_rgb2yuv *)param;
+ memcpy(rgb2yuv_param, rgb2yuv, sizeof(struct vpfe_ipipe_rgb2yuv));
+ return 0;
+}
+
+static int ipipe_validate_gbce_params(struct vpfe_ipipe_gbce *gbce)
+{
+ u32 max = GBCE_Y_VAL_MASK;
+ int i;
+
+ if (!gbce->en)
+ return 0;
+
+ if (gbce->type == VPFE_IPIPE_GBCE_GAIN_TBL)
+ max = GBCE_GAIN_VAL_MASK;
+
+ for (i = 0; i < VPFE_IPIPE_MAX_SIZE_GBCE_LUT; i++)
+ if (gbce->table[i] > max)
+ return -EINVAL;
+
+ return 0;
+}
+
+static int ipipe_set_gbce_params(struct vpfe_ipipe_device *ipipe, void *param)
+{
+ struct vpfe_ipipe_gbce *gbce_param = (struct vpfe_ipipe_gbce *)param;
+ struct vpfe_ipipe_gbce *gbce = &ipipe->config.gbce;
+ struct device *dev = ipipe->subdev.v4l2_dev->dev;
+
+ if (!gbce_param) {
+ memset(gbce, 0 , sizeof(struct vpfe_ipipe_gbce));
+ } else {
+ memcpy(gbce, gbce_param, sizeof(struct vpfe_ipipe_gbce));
+ if (ipipe_validate_gbce_params(gbce) < 0) {
+ dev_err(dev, "Invalid gbce params\n");
+ return -EINVAL;
+ }
+ }
+
+ ipipe_set_gbce_regs(ipipe->base_addr, ipipe->isp5_base_addr, gbce);
+
+ return 0;
+}
+
+static int ipipe_get_gbce_params(struct vpfe_ipipe_device *ipipe, void *param)
+{
+ struct vpfe_ipipe_gbce *gbce_param = (struct vpfe_ipipe_gbce *)param;
+ struct vpfe_ipipe_gbce *gbce = &ipipe->config.gbce;
+ struct device *dev = ipipe->subdev.v4l2_dev->dev;
+
+ gbce_param->en = gbce->en;
+ gbce_param->type = gbce->type;
+ if (!gbce_param->table) {
+ dev_err(dev, "ipipe_get_gbce_params: Invalid table ptr\n");
+ return -EINVAL;
+ }
+
+ memcpy(gbce_param->table, gbce->table,
+ (VPFE_IPIPE_MAX_SIZE_GBCE_LUT * sizeof(unsigned short)));
+
+ return 0;
+}
+
+static int
+ipipe_validate_yuv422_conv_params(struct vpfe_ipipe_yuv422_conv *yuv422_conv)
+{
+ if (yuv422_conv->en_chrom_lpf > 1)
+ return -EINVAL;
+
+ return 0;
+}
+
+static int
+ipipe_set_yuv422_conv_params(struct vpfe_ipipe_device *ipipe, void *param)
+{
+ struct vpfe_ipipe_yuv422_conv *yuv422_conv = &ipipe->config.yuv422_conv;
+ struct vpfe_ipipe_yuv422_conv *yuv422_conv_param;
+ struct device *dev = ipipe->subdev.v4l2_dev->dev;
+
+ yuv422_conv_param = (struct vpfe_ipipe_yuv422_conv *)param;
+ if (!yuv422_conv_param) {
+ memset(yuv422_conv, 0, sizeof(struct vpfe_ipipe_yuv422_conv));
+ yuv422_conv->chrom_pos = VPFE_IPIPE_YUV422_CHR_POS_COSITE;
+ } else {
+ memcpy(yuv422_conv, yuv422_conv_param,
+ sizeof(struct vpfe_ipipe_yuv422_conv));
+ if (ipipe_validate_yuv422_conv_params(yuv422_conv) < 0) {
+ dev_err(dev, "Invalid yuv422 params\n");
+ return -EINVAL;
+ }
+ }
+
+ ipipe_set_yuv422_conv_regs(ipipe->base_addr, yuv422_conv);
+
+ return 0;
+}
+
+static int
+ipipe_get_yuv422_conv_params(struct vpfe_ipipe_device *ipipe, void *param)
+{
+ struct vpfe_ipipe_yuv422_conv *yuv422_conv = &ipipe->config.yuv422_conv;
+ struct vpfe_ipipe_yuv422_conv *yuv422_conv_param;
+
+ yuv422_conv_param = (struct vpfe_ipipe_yuv422_conv *)param;
+ memcpy(yuv422_conv_param, yuv422_conv,
+ sizeof(struct vpfe_ipipe_yuv422_conv));
+
+ return 0;
+}
+
+static int ipipe_validate_yee_params(struct vpfe_ipipe_yee *yee)
+{
+ int i;
+
+ if (yee->en > 1 ||
+ yee->en_halo_red > 1 ||
+ yee->hpf_shft > YEE_HPF_SHIFT_MASK)
+ return -EINVAL;
+
+ if (yee->hpf_coef_00 > YEE_COEF_MASK ||
+ yee->hpf_coef_01 > YEE_COEF_MASK ||
+ yee->hpf_coef_02 > YEE_COEF_MASK ||
+ yee->hpf_coef_10 > YEE_COEF_MASK ||
+ yee->hpf_coef_11 > YEE_COEF_MASK ||
+ yee->hpf_coef_12 > YEE_COEF_MASK ||
+ yee->hpf_coef_20 > YEE_COEF_MASK ||
+ yee->hpf_coef_21 > YEE_COEF_MASK ||
+ yee->hpf_coef_22 > YEE_COEF_MASK)
+ return -EINVAL;
+
+ if (yee->yee_thr > YEE_THR_MASK ||
+ yee->es_gain > YEE_ES_GAIN_MASK ||
+ yee->es_thr1 > YEE_ES_THR1_MASK ||
+ yee->es_thr2 > YEE_THR_MASK ||
+ yee->es_gain_grad > YEE_THR_MASK ||
+ yee->es_ofst_grad > YEE_THR_MASK)
+ return -EINVAL;
+
+ for (i = 0; i < VPFE_IPIPE_MAX_SIZE_YEE_LUT ; i++)
+ if (yee->table[i] > YEE_ENTRY_MASK)
+ return -EINVAL;
+
+ return 0;
+}
+
+static int ipipe_set_yee_params(struct vpfe_ipipe_device *ipipe, void *param)
+{
+ struct vpfe_ipipe_yee *yee_param = (struct vpfe_ipipe_yee *)param;
+ struct device *dev = ipipe->subdev.v4l2_dev->dev;
+ struct vpfe_ipipe_yee *yee = &ipipe->config.yee;
+
+ if (!yee_param) {
+ memset(yee, 0, sizeof(struct vpfe_ipipe_yee));
+ } else {
+ memcpy(yee, yee_param, sizeof(struct vpfe_ipipe_yee));
+ if (ipipe_validate_yee_params(yee) < 0) {
+ dev_err(dev, "Invalid yee params\n");
+ return -EINVAL;
+ }
+ }
+
+ ipipe_set_ee_regs(ipipe->base_addr, ipipe->isp5_base_addr, yee);
+
+ return 0;
+}
+
+static int ipipe_get_yee_params(struct vpfe_ipipe_device *ipipe, void *param)
+{
+ struct vpfe_ipipe_yee *yee_param = (struct vpfe_ipipe_yee *)param;
+ struct vpfe_ipipe_yee *yee = &ipipe->config.yee;
+
+ yee_param->en = yee->en;
+ yee_param->en_halo_red = yee->en_halo_red;
+ yee_param->merge_meth = yee->merge_meth;
+ yee_param->hpf_shft = yee->hpf_shft;
+ yee_param->hpf_coef_00 = yee->hpf_coef_00;
+ yee_param->hpf_coef_01 = yee->hpf_coef_01;
+ yee_param->hpf_coef_02 = yee->hpf_coef_02;
+ yee_param->hpf_coef_10 = yee->hpf_coef_10;
+ yee_param->hpf_coef_11 = yee->hpf_coef_11;
+ yee_param->hpf_coef_12 = yee->hpf_coef_12;
+ yee_param->hpf_coef_20 = yee->hpf_coef_20;
+ yee_param->hpf_coef_21 = yee->hpf_coef_21;
+ yee_param->hpf_coef_22 = yee->hpf_coef_22;
+ yee_param->yee_thr = yee->yee_thr;
+ yee_param->es_gain = yee->es_gain;
+ yee_param->es_thr1 = yee->es_thr1;
+ yee_param->es_thr2 = yee->es_thr2;
+ yee_param->es_gain_grad = yee->es_gain_grad;
+ yee_param->es_ofst_grad = yee->es_ofst_grad;
+ memcpy(yee_param->table, &yee->table,
+ (VPFE_IPIPE_MAX_SIZE_YEE_LUT * sizeof(short)));
+
+ return 0;
+}
+
+static int ipipe_validate_car_params(struct vpfe_ipipe_car *car)
+{
+ if (car->en > 1 || car->hpf_shft > CAR_HPF_SHIFT_MASK ||
+ car->gain1.shft > CAR_GAIN1_SHFT_MASK ||
+ car->gain1.gain_min > CAR_GAIN_MIN_MASK ||
+ car->gain2.shft > CAR_GAIN2_SHFT_MASK ||
+ car->gain2.gain_min > CAR_GAIN_MIN_MASK)
+ return -EINVAL;
+
+ return 0;
+}
+
+static int ipipe_set_car_params(struct vpfe_ipipe_device *ipipe, void *param)
+{
+ struct vpfe_ipipe_car *car_param = (struct vpfe_ipipe_car *)param;
+ struct device *dev = ipipe->subdev.v4l2_dev->dev;
+ struct vpfe_ipipe_car *car = &ipipe->config.car;
+
+ if (!car_param) {
+ memset(car , 0, sizeof(struct vpfe_ipipe_car));
+ } else {
+ memcpy(car, car_param, sizeof(struct vpfe_ipipe_car));
+ if (ipipe_validate_car_params(car) < 0) {
+ dev_err(dev, "Invalid car params\n");
+ return -EINVAL;
+ }
+ }
+
+ ipipe_set_car_regs(ipipe->base_addr, car);
+
+ return 0;
+}
+
+static int ipipe_get_car_params(struct vpfe_ipipe_device *ipipe, void *param)
+{
+ struct vpfe_ipipe_car *car_param = (struct vpfe_ipipe_car *)param;
+ struct vpfe_ipipe_car *car = &ipipe->config.car;
+
+ memcpy(car_param, car, sizeof(struct vpfe_ipipe_car));
+ return 0;
+}
+
+static int ipipe_validate_cgs_params(struct vpfe_ipipe_cgs *cgs)
+{
+ if (cgs->en > 1 || cgs->h_shft > CAR_SHIFT_MASK)
+ return -EINVAL;
+
+ return 0;
+}
+
+static int ipipe_set_cgs_params(struct vpfe_ipipe_device *ipipe, void *param)
+{
+ struct vpfe_ipipe_cgs *cgs_param = (struct vpfe_ipipe_cgs *)param;
+ struct device *dev = ipipe->subdev.v4l2_dev->dev;
+ struct vpfe_ipipe_cgs *cgs = &ipipe->config.cgs;
+
+ if (!cgs_param) {
+ memset(cgs, 0, sizeof(struct vpfe_ipipe_cgs));
+ } else {
+ memcpy(cgs, cgs_param, sizeof(struct vpfe_ipipe_cgs));
+ if (ipipe_validate_cgs_params(cgs) < 0) {
+ dev_err(dev, "Invalid cgs params\n");
+ return -EINVAL;
+ }
+ }
+
+ ipipe_set_cgs_regs(ipipe->base_addr, cgs);
+
+ return 0;
+}
+
+static int ipipe_get_cgs_params(struct vpfe_ipipe_device *ipipe, void *param)
+{
+ struct vpfe_ipipe_cgs *cgs_param = (struct vpfe_ipipe_cgs *)param;
+ struct vpfe_ipipe_cgs *cgs = &ipipe->config.cgs;
+
+ memcpy(cgs_param, cgs, sizeof(struct vpfe_ipipe_cgs));
+
+ return 0;
+}
+
+static const struct ipipe_module_if ipipe_modules[VPFE_IPIPE_MAX_MODULES] = {
+ /* VPFE_IPIPE_INPUT_CONFIG */ {
+ offsetof(struct ipipe_module_params, input_config),
+ FIELD_SIZEOF(struct ipipe_module_params, input_config),
+ offsetof(struct vpfe_ipipe_config, input_config),
+ ipipe_set_input_config,
+ ipipe_get_input_config,
+ }, /* VPFE_IPIPE_LUTDPC */ {
+ offsetof(struct ipipe_module_params, lutdpc),
+ FIELD_SIZEOF(struct ipipe_module_params, lutdpc),
+ offsetof(struct vpfe_ipipe_config, lutdpc),
+ ipipe_set_lutdpc_params,
+ ipipe_get_lutdpc_params,
+ }, /* VPFE_IPIPE_OTFDPC */ {
+ offsetof(struct ipipe_module_params, otfdpc),
+ FIELD_SIZEOF(struct ipipe_module_params, otfdpc),
+ offsetof(struct vpfe_ipipe_config, otfdpc),
+ ipipe_set_otfdpc_params,
+ ipipe_get_otfdpc_params,
+ }, /* VPFE_IPIPE_NF1 */ {
+ offsetof(struct ipipe_module_params, nf1),
+ FIELD_SIZEOF(struct ipipe_module_params, nf1),
+ offsetof(struct vpfe_ipipe_config, nf1),
+ ipipe_set_nf1_params,
+ ipipe_get_nf1_params,
+ }, /* VPFE_IPIPE_NF2 */ {
+ offsetof(struct ipipe_module_params, nf2),
+ FIELD_SIZEOF(struct ipipe_module_params, nf2),
+ offsetof(struct vpfe_ipipe_config, nf2),
+ ipipe_set_nf2_params,
+ ipipe_get_nf2_params,
+ }, /* VPFE_IPIPE_WB */ {
+ offsetof(struct ipipe_module_params, wbal),
+ FIELD_SIZEOF(struct ipipe_module_params, wbal),
+ offsetof(struct vpfe_ipipe_config, wbal),
+ ipipe_set_wb_params,
+ ipipe_get_wb_params,
+ }, /* VPFE_IPIPE_RGB2RGB_1 */ {
+ offsetof(struct ipipe_module_params, rgb2rgb1),
+ FIELD_SIZEOF(struct ipipe_module_params, rgb2rgb1),
+ offsetof(struct vpfe_ipipe_config, rgb2rgb1),
+ ipipe_set_rgb2rgb_1_params,
+ ipipe_get_rgb2rgb_1_params,
+ }, /* VPFE_IPIPE_RGB2RGB_2 */ {
+ offsetof(struct ipipe_module_params, rgb2rgb2),
+ FIELD_SIZEOF(struct ipipe_module_params, rgb2rgb2),
+ offsetof(struct vpfe_ipipe_config, rgb2rgb2),
+ ipipe_set_rgb2rgb_2_params,
+ ipipe_get_rgb2rgb_2_params,
+ }, /* VPFE_IPIPE_GAMMA */ {
+ offsetof(struct ipipe_module_params, gamma),
+ FIELD_SIZEOF(struct ipipe_module_params, gamma),
+ offsetof(struct vpfe_ipipe_config, gamma),
+ ipipe_set_gamma_params,
+ ipipe_get_gamma_params,
+ }, /* VPFE_IPIPE_3D_LUT */ {
+ offsetof(struct ipipe_module_params, lut),
+ FIELD_SIZEOF(struct ipipe_module_params, lut),
+ offsetof(struct vpfe_ipipe_config, lut),
+ ipipe_set_3d_lut_params,
+ ipipe_get_3d_lut_params,
+ }, /* VPFE_IPIPE_RGB2YUV */ {
+ offsetof(struct ipipe_module_params, rgb2yuv),
+ FIELD_SIZEOF(struct ipipe_module_params, rgb2yuv),
+ offsetof(struct vpfe_ipipe_config, rgb2yuv),
+ ipipe_set_rgb2yuv_params,
+ ipipe_get_rgb2yuv_params,
+ }, /* VPFE_IPIPE_YUV422_CONV */ {
+ offsetof(struct ipipe_module_params, yuv422_conv),
+ FIELD_SIZEOF(struct ipipe_module_params, yuv422_conv),
+ offsetof(struct vpfe_ipipe_config, yuv422_conv),
+ ipipe_set_yuv422_conv_params,
+ ipipe_get_yuv422_conv_params,
+ }, /* VPFE_IPIPE_YEE */ {
+ offsetof(struct ipipe_module_params, yee),
+ FIELD_SIZEOF(struct ipipe_module_params, yee),
+ offsetof(struct vpfe_ipipe_config, yee),
+ ipipe_set_yee_params,
+ ipipe_get_yee_params,
+ }, /* VPFE_IPIPE_GIC */ {
+ offsetof(struct ipipe_module_params, gic),
+ FIELD_SIZEOF(struct ipipe_module_params, gic),
+ offsetof(struct vpfe_ipipe_config, gic),
+ ipipe_set_gic_params,
+ ipipe_get_gic_params,
+ }, /* VPFE_IPIPE_CFA */ {
+ offsetof(struct ipipe_module_params, cfa),
+ FIELD_SIZEOF(struct ipipe_module_params, cfa),
+ offsetof(struct vpfe_ipipe_config, cfa),
+ ipipe_set_cfa_params,
+ ipipe_get_cfa_params,
+ }, /* VPFE_IPIPE_CAR */ {
+ offsetof(struct ipipe_module_params, car),
+ FIELD_SIZEOF(struct ipipe_module_params, car),
+ offsetof(struct vpfe_ipipe_config, car),
+ ipipe_set_car_params,
+ ipipe_get_car_params,
+ }, /* VPFE_IPIPE_CGS */ {
+ offsetof(struct ipipe_module_params, cgs),
+ FIELD_SIZEOF(struct ipipe_module_params, cgs),
+ offsetof(struct vpfe_ipipe_config, cgs),
+ ipipe_set_cgs_params,
+ ipipe_get_cgs_params,
+ }, /* VPFE_IPIPE_GBCE */ {
+ offsetof(struct ipipe_module_params, gbce),
+ FIELD_SIZEOF(struct ipipe_module_params, gbce),
+ offsetof(struct vpfe_ipipe_config, gbce),
+ ipipe_set_gbce_params,
+ ipipe_get_gbce_params,
+ },
+};
+
+static int ipipe_s_config(struct v4l2_subdev *sd, struct vpfe_ipipe_config *cfg)
+{
+ struct vpfe_ipipe_device *ipipe = v4l2_get_subdevdata(sd);
+ unsigned int i;
+ int rval = 0;
+
+ for (i = 0; i < ARRAY_SIZE(ipipe_modules); i++) {
+ unsigned int bit = 1 << i;
+ if (cfg->flag & bit) {
+ const struct ipipe_module_if *module_if =
+ &ipipe_modules[i];
+ struct ipipe_module_params *params;
+ void __user *from = *(void * __user *)
+ ((void *)cfg + module_if->config_offset);
+ size_t size;
+ void *to;
+
+ params = kmalloc(sizeof(struct ipipe_module_params),
+ GFP_KERNEL);
+ to = (void *)params + module_if->param_offset;
+ size = module_if->param_size;
+
+ if (to && from && size) {
+ if (copy_from_user(to, from, size)) {
+ rval = -EFAULT;
+ break;
+ }
+ rval = module_if->set(ipipe, to);
+ if (rval)
+ goto error;
+ } else if (to && !from && size) {
+ rval = module_if->set(ipipe, NULL);
+ if (rval)
+ goto error;
+ }
+ kfree(params);
+ }
+ }
+error:
+ return rval;
+}
+
+static int ipipe_g_config(struct v4l2_subdev *sd, struct vpfe_ipipe_config *cfg)
+{
+ struct vpfe_ipipe_device *ipipe = v4l2_get_subdevdata(sd);
+ unsigned int i;
+ int rval = 0;
+
+ for (i = 1; i < ARRAY_SIZE(ipipe_modules); i++) {
+ unsigned int bit = 1 << i;
+ if (cfg->flag & bit) {
+ const struct ipipe_module_if *module_if =
+ &ipipe_modules[i];
+ struct ipipe_module_params *params;
+ void __user *to = *(void * __user *)
+ ((void *)cfg + module_if->config_offset);
+ size_t size;
+ void *from;
+
+ params = kmalloc(sizeof(struct ipipe_module_params),
+ GFP_KERNEL);
+ from = (void *)params + module_if->param_offset;
+ size = module_if->param_size;
+
+ if (to && from && size) {
+ rval = module_if->get(ipipe, from);
+ if (rval)
+ goto error;
+ if (copy_to_user(to, from, size)) {
+ rval = -EFAULT;
+ break;
+ }
+ }
+ kfree(params);
+ }
+ }
+error:
+ return rval;
+}
+
+/*
+ * ipipe_ioctl() - Handle ipipe module private ioctl's
+ * @sd: pointer to v4l2 subdev structure
+ * @cmd: configuration command
+ * @arg: configuration argument
+ */
+static long ipipe_ioctl(struct v4l2_subdev *sd, unsigned int cmd, void *arg)
+{
+ int ret = 0;
+
+ switch (cmd) {
+ case VIDIOC_VPFE_IPIPE_S_CONFIG:
+ ret = ipipe_s_config(sd, arg);
+ break;
+
+ case VIDIOC_VPFE_IPIPE_G_CONFIG:
+ ret = ipipe_g_config(sd, arg);
+ break;
+
+ default:
+ ret = -ENOIOCTLCMD;
+ }
+ return ret;
+}
+
+void vpfe_ipipe_enable(struct vpfe_device *vpfe_dev, int en)
+{
+ struct vpfe_ipipeif_device *ipipeif = &vpfe_dev->vpfe_ipipeif;
+ struct vpfe_ipipe_device *ipipe = &vpfe_dev->vpfe_ipipe;
+ unsigned char val;
+
+ if (ipipe->input == IPIPE_INPUT_NONE)
+ return;
+
+ /* ipipe is set to single shot */
+ if (ipipeif->input == IPIPEIF_INPUT_MEMORY && en) {
+ /* for single-shot mode, need to wait for h/w to
+ * reset many register bits
+ */
+ do {
+ val = regr_ip(vpfe_dev->vpfe_ipipe.base_addr,
+ IPIPE_SRC_EN);
+ } while (val);
+ }
+ regw_ip(vpfe_dev->vpfe_ipipe.base_addr, en, IPIPE_SRC_EN);
+}
+
+/*
+ * ipipe_set_stream() - Enable/Disable streaming on the ipipe subdevice
+ * @sd: pointer to v4l2 subdev structure
+ * @enable: 1 == Enable, 0 == Disable
+ */
+static int ipipe_set_stream(struct v4l2_subdev *sd, int enable)
+{
+ struct vpfe_ipipe_device *ipipe = v4l2_get_subdevdata(sd);
+ struct vpfe_device *vpfe_dev = to_vpfe_device(ipipe);
+
+ if (enable && ipipe->input != IPIPE_INPUT_NONE &&
+ ipipe->output != IPIPE_OUTPUT_NONE) {
+ if (config_ipipe_hw(ipipe) < 0)
+ return -EINVAL;
+ }
+
+ vpfe_ipipe_enable(vpfe_dev, enable);
+
+ return 0;
+}
+
+/*
+ * __ipipe_get_format() - helper function for getting ipipe format
+ * @ipipe: pointer to ipipe private structure.
+ * @pad: pad number.
+ * @fh: V4L2 subdev file handle.
+ * @which: wanted subdev format.
+ *
+ */
+static struct v4l2_mbus_framefmt *
+__ipipe_get_format(struct vpfe_ipipe_device *ipipe,
+ struct v4l2_subdev_fh *fh, unsigned int pad,
+ enum v4l2_subdev_format_whence which)
+{
+ if (which == V4L2_SUBDEV_FORMAT_TRY)
+ return v4l2_subdev_get_try_format(fh, pad);
+
+ return &ipipe->formats[pad];
+}
+
+/*
+ * ipipe_try_format() - Handle try format by pad subdev method
+ * @ipipe: VPFE ipipe device.
+ * @fh: V4L2 subdev file handle.
+ * @pad: pad num.
+ * @fmt: pointer to v4l2 format structure.
+ * @which : wanted subdev format
+ */
+static void
+ipipe_try_format(struct vpfe_ipipe_device *ipipe,
+ struct v4l2_subdev_fh *fh, unsigned int pad,
+ struct v4l2_mbus_framefmt *fmt,
+ enum v4l2_subdev_format_whence which)
+{
+ unsigned int max_out_height;
+ unsigned int max_out_width;
+ unsigned int i;
+
+ max_out_width = IPIPE_MAX_OUTPUT_WIDTH_A;
+ max_out_height = IPIPE_MAX_OUTPUT_HEIGHT_A;
+
+ if (pad == IPIPE_PAD_SINK) {
+ for (i = 0; i < ARRAY_SIZE(ipipe_input_fmts); i++)
+ if (fmt->code == ipipe_input_fmts[i])
+ break;
+
+ /* If not found, use SBGGR10 as default */
+ if (i >= ARRAY_SIZE(ipipe_input_fmts))
+ fmt->code = V4L2_MBUS_FMT_SGRBG12_1X12;
+ } else if (pad == IPIPE_PAD_SOURCE) {
+ for (i = 0; i < ARRAY_SIZE(ipipe_output_fmts); i++)
+ if (fmt->code == ipipe_output_fmts[i])
+ break;
+
+ /* If not found, use UYVY as default */
+ if (i >= ARRAY_SIZE(ipipe_output_fmts))
+ fmt->code = V4L2_MBUS_FMT_UYVY8_2X8;
+ }
+
+ fmt->width = clamp_t(u32, fmt->width, MIN_OUT_HEIGHT, max_out_width);
+ fmt->height = clamp_t(u32, fmt->height, MIN_OUT_WIDTH, max_out_height);
+}
+
+/*
+ * ipipe_set_format() - Handle set format by pads subdev method
+ * @sd: pointer to v4l2 subdev structure
+ * @fh: V4L2 subdev file handle
+ * @fmt: pointer to v4l2 subdev format structure
+ * return -EINVAL or zero on success
+ */
+static int
+ipipe_set_format(struct v4l2_subdev *sd, struct v4l2_subdev_fh *fh,
+ struct v4l2_subdev_format *fmt)
+{
+ struct vpfe_ipipe_device *ipipe = v4l2_get_subdevdata(sd);
+ struct v4l2_mbus_framefmt *format;
+
+ format = __ipipe_get_format(ipipe, fh, fmt->pad, fmt->which);
+ if (format == NULL)
+ return -EINVAL;
+
+ ipipe_try_format(ipipe, fh, fmt->pad, &fmt->format, fmt->which);
+ *format = fmt->format;
+
+ if (fmt->which == V4L2_SUBDEV_FORMAT_TRY)
+ return 0;
+
+ if (fmt->pad == IPIPE_PAD_SINK &&
+ (ipipe->input == IPIPE_INPUT_CCDC ||
+ ipipe->input == IPIPE_INPUT_MEMORY))
+ ipipe->formats[fmt->pad] = fmt->format;
+ else if (fmt->pad == IPIPE_PAD_SOURCE &&
+ ipipe->output == IPIPE_OUTPUT_RESIZER)
+ ipipe->formats[fmt->pad] = fmt->format;
+ else
+ return -EINVAL;
+
+ return 0;
+}
+
+/*
+ * ipipe_get_format() - Handle get format by pads subdev method.
+ * @sd: pointer to v4l2 subdev structure.
+ * @fh: V4L2 subdev file handle.
+ * @fmt: pointer to v4l2 subdev format structure.
+ */
+static int
+ipipe_get_format(struct v4l2_subdev *sd, struct v4l2_subdev_fh *fh,
+ struct v4l2_subdev_format *fmt)
+{
+ struct vpfe_ipipe_device *ipipe = v4l2_get_subdevdata(sd);
+
+ if (fmt->which == V4L2_SUBDEV_FORMAT_ACTIVE)
+ fmt->format = ipipe->formats[fmt->pad];
+ else
+ fmt->format = *(v4l2_subdev_get_try_format(fh, fmt->pad));
+
+ return 0;
+}
+
+/*
+ * ipipe_enum_frame_size() - enum frame sizes on pads
+ * @sd: pointer to v4l2 subdev structure.
+ * @fh: V4L2 subdev file handle.
+ * @fse: pointer to v4l2_subdev_frame_size_enum structure.
+ */
+static int
+ipipe_enum_frame_size(struct v4l2_subdev *sd, struct v4l2_subdev_fh *fh,
+ struct v4l2_subdev_frame_size_enum *fse)
+{
+ struct vpfe_ipipe_device *ipipe = v4l2_get_subdevdata(sd);
+ struct v4l2_mbus_framefmt format;
+
+ if (fse->index != 0)
+ return -EINVAL;
+
+ format.code = fse->code;
+ format.width = 1;
+ format.height = 1;
+ ipipe_try_format(ipipe, fh, fse->pad, &format,
+ V4L2_SUBDEV_FORMAT_TRY);
+ fse->min_width = format.width;
+ fse->min_height = format.height;
+
+ if (format.code != fse->code)
+ return -EINVAL;
+
+ format.code = fse->code;
+ format.width = -1;
+ format.height = -1;
+ ipipe_try_format(ipipe, fh, fse->pad, &format,
+ V4L2_SUBDEV_FORMAT_TRY);
+ fse->max_width = format.width;
+ fse->max_height = format.height;
+
+ return 0;
+}
+
+/*
+ * ipipe_enum_mbus_code() - enum mbus codes for pads
+ * @sd: pointer to v4l2 subdev structure.
+ * @fh: V4L2 subdev file handle
+ * @code: pointer to v4l2_subdev_mbus_code_enum structure
+ */
+static int
+ipipe_enum_mbus_code(struct v4l2_subdev *sd, struct v4l2_subdev_fh *fh,
+ struct v4l2_subdev_mbus_code_enum *code)
+{
+ switch (code->pad) {
+ case IPIPE_PAD_SINK:
+ if (code->index >= ARRAY_SIZE(ipipe_input_fmts))
+ return -EINVAL;
+ code->code = ipipe_input_fmts[code->index];
+ break;
+
+ case IPIPE_PAD_SOURCE:
+ if (code->index >= ARRAY_SIZE(ipipe_output_fmts))
+ return -EINVAL;
+ code->code = ipipe_output_fmts[code->index];
+ break;
+
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+/*
+ * ipipe_s_ctrl() - Handle set control subdev method
+ * @ctrl: pointer to v4l2 control structure
+ */
+static int ipipe_s_ctrl(struct v4l2_ctrl *ctrl)
+{
+ struct vpfe_ipipe_device *ipipe =
+ container_of(ctrl->handler, struct vpfe_ipipe_device, ctrls);
+ struct ipipe_lum_adj *lum_adj = &ipipe->config.lum_adj;
+
+ switch (ctrl->id) {
+ case V4L2_CID_BRIGHTNESS:
+ lum_adj->brightness = ctrl->val;
+ ipipe_set_lum_adj_regs(ipipe->base_addr, lum_adj);
+ break;
+
+ case V4L2_CID_CONTRAST:
+ lum_adj->contrast = ctrl->val;
+ ipipe_set_lum_adj_regs(ipipe->base_addr, lum_adj);
+ break;
+
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+/*
+ * ipipe_init_formats() - Initialize formats on all pads
+ * @sd: pointer to v4l2 subdev structure.
+ * @fh: V4L2 subdev file handle
+ *
+ * Initialize all pad formats with default values. If fh is not NULL, try
+ * formats are initialized on the file handle. Otherwise active formats are
+ * initialized on the device.
+ */
+static int
+ipipe_init_formats(struct v4l2_subdev *sd, struct v4l2_subdev_fh *fh)
+{
+ struct v4l2_subdev_format format;
+
+ memset(&format, 0, sizeof(format));
+ format.pad = IPIPE_PAD_SINK;
+ format.which = fh ? V4L2_SUBDEV_FORMAT_TRY : V4L2_SUBDEV_FORMAT_ACTIVE;
+ format.format.code = V4L2_MBUS_FMT_SGRBG12_1X12;
+ format.format.width = IPIPE_MAX_OUTPUT_WIDTH_A;
+ format.format.height = IPIPE_MAX_OUTPUT_HEIGHT_A;
+ ipipe_set_format(sd, fh, &format);
+
+ memset(&format, 0, sizeof(format));
+ format.pad = IPIPE_PAD_SOURCE;
+ format.which = fh ? V4L2_SUBDEV_FORMAT_TRY : V4L2_SUBDEV_FORMAT_ACTIVE;
+ format.format.code = V4L2_MBUS_FMT_UYVY8_2X8;
+ format.format.width = IPIPE_MAX_OUTPUT_WIDTH_A;
+ format.format.height = IPIPE_MAX_OUTPUT_HEIGHT_A;
+ ipipe_set_format(sd, fh, &format);
+
+ return 0;
+}
+
+/* subdev core operations */
+static const struct v4l2_subdev_core_ops ipipe_v4l2_core_ops = {
+ .ioctl = ipipe_ioctl,
+};
+
+static const struct v4l2_ctrl_ops ipipe_ctrl_ops = {
+ .s_ctrl = ipipe_s_ctrl,
+};
+
+/* subdev file operations */
+static const struct v4l2_subdev_internal_ops ipipe_v4l2_internal_ops = {
+ .open = ipipe_init_formats,
+};
+
+/* subdev video operations */
+static const struct v4l2_subdev_video_ops ipipe_v4l2_video_ops = {
+ .s_stream = ipipe_set_stream,
+};
+
+/* subdev pad operations */
+static const struct v4l2_subdev_pad_ops ipipe_v4l2_pad_ops = {
+ .enum_mbus_code = ipipe_enum_mbus_code,
+ .enum_frame_size = ipipe_enum_frame_size,
+ .get_fmt = ipipe_get_format,
+ .set_fmt = ipipe_set_format,
+};
+
+/* v4l2 subdev operation */
+static const struct v4l2_subdev_ops ipipe_v4l2_ops = {
+ .core = &ipipe_v4l2_core_ops,
+ .video = &ipipe_v4l2_video_ops,
+ .pad = &ipipe_v4l2_pad_ops,
+};
+
+/*
+ * Media entity operations
+ */
+
+/*
+ * ipipe_link_setup() - Setup ipipe connections
+ * @entity: ipipe media entity
+ * @local: Pad at the local end of the link
+ * @remote: Pad at the remote end of the link
+ * @flags: Link flags
+ *
+ * return -EINVAL or zero on success
+ */
+static int
+ipipe_link_setup(struct media_entity *entity, const struct media_pad *local,
+ const struct media_pad *remote, u32 flags)
+{
+ struct v4l2_subdev *sd = media_entity_to_v4l2_subdev(entity);
+ struct vpfe_ipipe_device *ipipe = v4l2_get_subdevdata(sd);
+ struct vpfe_device *vpfe_dev = to_vpfe_device(ipipe);
+ u16 ipipeif_sink = vpfe_dev->vpfe_ipipeif.input;
+
+ switch (local->index | media_entity_type(remote->entity)) {
+ case IPIPE_PAD_SINK | MEDIA_ENT_T_V4L2_SUBDEV:
+ if (!(flags & MEDIA_LNK_FL_ENABLED)) {
+ ipipe->input = IPIPE_INPUT_NONE;
+ break;
+ }
+ if (ipipe->input != IPIPE_INPUT_NONE)
+ return -EBUSY;
+ if (ipipeif_sink == IPIPEIF_INPUT_MEMORY)
+ ipipe->input = IPIPE_INPUT_MEMORY;
+ else
+ ipipe->input = IPIPE_INPUT_CCDC;
+ break;
+
+ case IPIPE_PAD_SOURCE | MEDIA_ENT_T_V4L2_SUBDEV:
+ /* out to RESIZER */
+ if (flags & MEDIA_LNK_FL_ENABLED)
+ ipipe->output = IPIPE_OUTPUT_RESIZER;
+ else
+ ipipe->output = IPIPE_OUTPUT_NONE;
+ break;
+
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static const struct media_entity_operations ipipe_media_ops = {
+ .link_setup = ipipe_link_setup,
+};
+
+/*
+ * vpfe_ipipe_unregister_entities() - ipipe unregister entity
+ * @vpfe_ipipe: pointer to ipipe subdevice structure.
+ */
+void vpfe_ipipe_unregister_entities(struct vpfe_ipipe_device *vpfe_ipipe)
+{
+ /* cleanup entity */
+ media_entity_cleanup(&vpfe_ipipe->subdev.entity);
+ /* unregister subdev */
+ v4l2_device_unregister_subdev(&vpfe_ipipe->subdev);
+}
+
+/*
+ * vpfe_ipipe_register_entities() - ipipe register entity
+ * @ipipe: pointer to ipipe subdevice structure.
+ * @vdev: pointer to v4l2 device structure.
+ */
+int
+vpfe_ipipe_register_entities(struct vpfe_ipipe_device *ipipe,
+ struct v4l2_device *vdev)
+{
+ int ret;
+
+ /* Register the subdev */
+ ret = v4l2_device_register_subdev(vdev, &ipipe->subdev);
+ if (ret) {
+ pr_err("Failed to register ipipe as v4l2 subdevice\n");
+ return ret;
+ }
+
+ return ret;
+}
+
+#define IPIPE_CONTRAST_HIGH 0xff
+#define IPIPE_BRIGHT_HIGH 0xff
+
+/*
+ * vpfe_ipipe_init() - ipipe module initialization.
+ * @ipipe: pointer to ipipe subdevice structure.
+ * @pdev: platform device pointer.
+ */
+int
+vpfe_ipipe_init(struct vpfe_ipipe_device *ipipe, struct platform_device *pdev)
+{
+ struct media_pad *pads = &ipipe->pads[0];
+ struct v4l2_subdev *sd = &ipipe->subdev;
+ struct media_entity *me = &sd->entity;
+ static resource_size_t res_len;
+ struct resource *res;
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 4);
+ if (!res)
+ return -ENOENT;
+
+ res_len = resource_size(res);
+ res = request_mem_region(res->start, res_len, res->name);
+ if (!res)
+ return -EBUSY;
+ ipipe->base_addr = ioremap_nocache(res->start, res_len);
+ if (!ipipe->base_addr)
+ return -EBUSY;
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 6);
+ if (!res)
+ return -ENOENT;
+ ipipe->isp5_base_addr = ioremap_nocache(res->start, res_len);
+ if (!ipipe->isp5_base_addr)
+ return -EBUSY;
+
+ v4l2_subdev_init(sd, &ipipe_v4l2_ops);
+ sd->internal_ops = &ipipe_v4l2_internal_ops;
+ strlcpy(sd->name, "DAVINCI IPIPE", sizeof(sd->name));
+ sd->grp_id = 1 << 16; /* group ID for davinci subdevs */
+ v4l2_set_subdevdata(sd, ipipe);
+ sd->flags |= V4L2_SUBDEV_FL_HAS_DEVNODE;
+
+ pads[IPIPE_PAD_SINK].flags = MEDIA_PAD_FL_SINK;
+ pads[IPIPE_PAD_SOURCE].flags = MEDIA_PAD_FL_SOURCE;
+
+ ipipe->input = IPIPE_INPUT_NONE;
+ ipipe->output = IPIPE_OUTPUT_NONE;
+
+ me->ops = &ipipe_media_ops;
+ v4l2_ctrl_handler_init(&ipipe->ctrls, 2);
+ v4l2_ctrl_new_std(&ipipe->ctrls, &ipipe_ctrl_ops,
+ V4L2_CID_BRIGHTNESS, 0,
+ IPIPE_BRIGHT_HIGH, 1, 16);
+ v4l2_ctrl_new_std(&ipipe->ctrls, &ipipe_ctrl_ops,
+ V4L2_CID_CONTRAST, 0,
+ IPIPE_CONTRAST_HIGH, 1, 16);
+
+
+ v4l2_ctrl_handler_setup(&ipipe->ctrls);
+ sd->ctrl_handler = &ipipe->ctrls;
+
+ return media_entity_init(me, IPIPE_PADS_NUM, pads, 0);
+}
+
+/*
+ * vpfe_ipipe_cleanup() - ipipe subdevice cleanup.
+ * @ipipe: pointer to ipipe subdevice
+ * @dev: pointer to platform device
+ */
+void vpfe_ipipe_cleanup(struct vpfe_ipipe_device *ipipe,
+ struct platform_device *pdev)
+{
+ struct resource *res;
+
+ v4l2_ctrl_handler_free(&ipipe->ctrls);
+
+ iounmap(ipipe->base_addr);
+ iounmap(ipipe->isp5_base_addr);
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 4);
+ if (res)
+ release_mem_region(res->start, res->end - res->start + 1);
+}
diff --git a/drivers/staging/media/davinci_vpfe/dm365_ipipe.h b/drivers/staging/media/davinci_vpfe/dm365_ipipe.h
new file mode 100644
index 000000000000..cf4204603eb8
--- /dev/null
+++ b/drivers/staging/media/davinci_vpfe/dm365_ipipe.h
@@ -0,0 +1,179 @@
+/*
+ * Copyright (C) 2012 Texas Instruments Inc
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation version 2.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ *
+ * Contributors:
+ * Manjunath Hadli <manjunath.hadli@ti.com>
+ * Prabhakar Lad <prabhakar.lad@ti.com>
+ */
+
+#ifndef _DAVINCI_VPFE_DM365_IPIPE_H
+#define _DAVINCI_VPFE_DM365_IPIPE_H
+
+#include <linux/platform_device.h>
+
+#include <media/v4l2-ctrls.h>
+#include <media/v4l2-subdev.h>
+
+#include "davinci_vpfe_user.h"
+#include "vpfe_video.h"
+
+#define CEIL(a, b) (((a) + (b-1)) / (b))
+
+enum ipipe_noise_filter {
+ IPIPE_D2F_1ST = 0,
+ IPIPE_D2F_2ND = 1,
+};
+
+/* Used for driver storage */
+struct ipipe_otfdpc_2_0 {
+ /* 0 - disable, 1 - enable */
+ unsigned char en;
+ /* defect detection method */
+ enum vpfe_ipipe_otfdpc_det_meth det_method;
+ /* Algorithm used. Applicable only when IPIPE_DPC_OTF_MIN_MAX2 is
+ * used
+ */
+ enum vpfe_ipipe_otfdpc_alg alg;
+ struct vpfe_ipipe_otfdpc_2_0_cfg otfdpc_2_0;
+};
+
+struct ipipe_otfdpc_3_0 {
+ /* 0 - disable, 1 - enable */
+ unsigned char en;
+ /* defect detection method */
+ enum vpfe_ipipe_otfdpc_det_meth det_method;
+ /* Algorithm used. Applicable only when IPIPE_DPC_OTF_MIN_MAX2 is
+ * used
+ */
+ enum vpfe_ipipe_otfdpc_alg alg;
+ struct vpfe_ipipe_otfdpc_3_0_cfg otfdpc_3_0;
+};
+
+/* Structure for configuring Luminance Adjustment module */
+struct ipipe_lum_adj {
+ /* Brightness adjustments */
+ unsigned char brightness;
+ /* contrast adjustments */
+ unsigned char contrast;
+};
+
+enum ipipe_rgb2rgb {
+ IPIPE_RGB2RGB_1 = 0,
+ IPIPE_RGB2RGB_2 = 1,
+};
+
+struct ipipe_module_params {
+ __u32 flag;
+ struct vpfe_ipipe_input_config input_config;
+ struct vpfe_ipipe_lutdpc lutdpc;
+ struct vpfe_ipipe_otfdpc otfdpc;
+ struct vpfe_ipipe_nf nf1;
+ struct vpfe_ipipe_nf nf2;
+ struct vpfe_ipipe_gic gic;
+ struct vpfe_ipipe_wb wbal;
+ struct vpfe_ipipe_cfa cfa;
+ struct vpfe_ipipe_rgb2rgb rgb2rgb1;
+ struct vpfe_ipipe_rgb2rgb rgb2rgb2;
+ struct vpfe_ipipe_gamma gamma;
+ struct vpfe_ipipe_3d_lut lut;
+ struct vpfe_ipipe_rgb2yuv rgb2yuv;
+ struct vpfe_ipipe_gbce gbce;
+ struct vpfe_ipipe_yuv422_conv yuv422_conv;
+ struct vpfe_ipipe_yee yee;
+ struct vpfe_ipipe_car car;
+ struct vpfe_ipipe_cgs cgs;
+ struct ipipe_lum_adj lum_adj;
+};
+
+#define IPIPE_PAD_SINK 0
+#define IPIPE_PAD_SOURCE 1
+
+#define IPIPE_PADS_NUM 2
+
+#define IPIPE_OUTPUT_NONE 0
+#define IPIPE_OUTPUT_RESIZER (1 << 0)
+
+enum ipipe_input_entity {
+ IPIPE_INPUT_NONE = 0,
+ IPIPE_INPUT_MEMORY = 1,
+ IPIPE_INPUT_CCDC = 2,
+};
+
+
+struct vpfe_ipipe_device {
+ struct v4l2_subdev subdev;
+ struct media_pad pads[IPIPE_PADS_NUM];
+ struct v4l2_mbus_framefmt formats[IPIPE_PADS_NUM];
+ enum ipipe_input_entity input;
+ unsigned int output;
+ struct v4l2_ctrl_handler ctrls;
+ void *__iomem base_addr;
+ void *__iomem isp5_base_addr;
+ struct ipipe_module_params config;
+};
+
+struct ipipe_module_if {
+ unsigned int param_offset;
+ unsigned int param_size;
+ unsigned int config_offset;
+ int (*set)(struct vpfe_ipipe_device *ipipe, void *param);
+ int (*get)(struct vpfe_ipipe_device *ipipe, void *param);
+};
+
+/* data paths */
+enum ipipe_data_paths {
+ IPIPE_RAW2YUV,
+ /* Bayer RAW input to YCbCr output */
+ IPIPE_RAW2RAW,
+ /* Bayer Raw to Bayer output */
+ IPIPE_RAW2BOX,
+ /* Bayer Raw to Boxcar output */
+ IPIPE_YUV2YUV
+ /* YUV Raw to YUV Raw output */
+};
+
+#define IPIPE_COLPTN_R_Ye 0x0
+#define IPIPE_COLPTN_Gr_Cy 0x1
+#define IPIPE_COLPTN_Gb_G 0x2
+#define IPIPE_COLPTN_B_Mg 0x3
+
+#define COLPAT_EE_SHIFT 0
+#define COLPAT_EO_SHIFT 2
+#define COLPAT_OE_SHIFT 4
+#define COLPAT_OO_SHIFT 6
+
+#define ipipe_sgrbg_pattern \
+ (IPIPE_COLPTN_Gr_Cy << COLPAT_EE_SHIFT | \
+ IPIPE_COLPTN_R_Ye << COLPAT_EO_SHIFT | \
+ IPIPE_COLPTN_B_Mg << COLPAT_OE_SHIFT | \
+ IPIPE_COLPTN_Gb_G << COLPAT_OO_SHIFT)
+
+#define ipipe_srggb_pattern \
+ (IPIPE_COLPTN_R_Ye << COLPAT_EE_SHIFT | \
+ IPIPE_COLPTN_Gr_Cy << COLPAT_EO_SHIFT | \
+ IPIPE_COLPTN_Gb_G << COLPAT_OE_SHIFT | \
+ IPIPE_COLPTN_B_Mg << COLPAT_OO_SHIFT)
+
+int vpfe_ipipe_register_entities(struct vpfe_ipipe_device *ipipe,
+ struct v4l2_device *v4l2_dev);
+int vpfe_ipipe_init(struct vpfe_ipipe_device *ipipe,
+ struct platform_device *pdev);
+void vpfe_ipipe_unregister_entities(struct vpfe_ipipe_device *ipipe);
+void vpfe_ipipe_cleanup(struct vpfe_ipipe_device *ipipe,
+ struct platform_device *pdev);
+void vpfe_ipipe_enable(struct vpfe_device *vpfe_dev, int en);
+
+#endif /* _DAVINCI_VPFE_DM365_IPIPE_H */
diff --git a/drivers/staging/media/davinci_vpfe/dm365_ipipe_hw.c b/drivers/staging/media/davinci_vpfe/dm365_ipipe_hw.c
new file mode 100644
index 000000000000..e027b92b54ef
--- /dev/null
+++ b/drivers/staging/media/davinci_vpfe/dm365_ipipe_hw.c
@@ -0,0 +1,1048 @@
+/*
+ * Copyright (C) 2012 Texas Instruments Inc
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation version 2.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ *
+ * Contributors:
+ * Manjunath Hadli <manjunath.hadli@ti.com>
+ * Prabhakar Lad <prabhakar.lad@ti.com>
+ */
+
+#include "dm365_ipipe_hw.h"
+
+#define IPIPE_MODE_CONTINUOUS 0
+#define IPIPE_MODE_SINGLE_SHOT 1
+
+static void ipipe_clock_enable(void *__iomem base_addr)
+{
+ /* enable IPIPE MMR for register write access */
+ regw_ip(base_addr, IPIPE_GCK_MMR_DEFAULT, IPIPE_GCK_MMR);
+
+ /* enable the clock wb,cfa,dfc,d2f,pre modules */
+ regw_ip(base_addr, IPIPE_GCK_PIX_DEFAULT, IPIPE_GCK_PIX);
+}
+
+static void
+rsz_set_common_params(void *__iomem rsz_base, struct resizer_params *params)
+{
+ struct rsz_common_params *rsz_common = &params->rsz_common;
+ u32 val;
+
+ /* Set mode */
+ regw_rsz(rsz_base, params->oper_mode, RSZ_SRC_MODE);
+
+ /* data source selection and bypass */
+ val = (rsz_common->passthrough << RSZ_BYPASS_SHIFT) |
+ rsz_common->source;
+ regw_rsz(rsz_base, val, RSZ_SRC_FMT0);
+
+ /* src image selection */
+ val = (rsz_common->raw_flip & 1) |
+ (rsz_common->src_img_fmt << RSZ_SRC_IMG_FMT_SHIFT) |
+ ((rsz_common->y_c & 1) << RSZ_SRC_Y_C_SEL_SHIFT);
+ regw_rsz(rsz_base, val, RSZ_SRC_FMT1);
+
+ regw_rsz(rsz_base, rsz_common->vps & IPIPE_RSZ_VPS_MASK, RSZ_SRC_VPS);
+ regw_rsz(rsz_base, rsz_common->hps & IPIPE_RSZ_HPS_MASK, RSZ_SRC_HPS);
+ regw_rsz(rsz_base, rsz_common->vsz & IPIPE_RSZ_VSZ_MASK, RSZ_SRC_VSZ);
+ regw_rsz(rsz_base, rsz_common->hsz & IPIPE_RSZ_HSZ_MASK, RSZ_SRC_HSZ);
+ regw_rsz(rsz_base, rsz_common->yuv_y_min, RSZ_YUV_Y_MIN);
+ regw_rsz(rsz_base, rsz_common->yuv_y_max, RSZ_YUV_Y_MAX);
+ regw_rsz(rsz_base, rsz_common->yuv_c_min, RSZ_YUV_C_MIN);
+ regw_rsz(rsz_base, rsz_common->yuv_c_max, RSZ_YUV_C_MAX);
+ /* chromatic position */
+ regw_rsz(rsz_base, rsz_common->out_chr_pos, RSZ_YUV_PHS);
+}
+
+static void
+rsz_set_rsz_regs(void *__iomem rsz_base, unsigned int rsz_id,
+ struct resizer_params *params)
+{
+ struct resizer_scale_param *rsc_params;
+ struct rsz_ext_mem_param *ext_mem;
+ struct resizer_rgb *rgb;
+ u32 reg_base;
+ u32 val;
+
+ rsc_params = &params->rsz_rsc_param[rsz_id];
+ rgb = &params->rsz2rgb[rsz_id];
+ ext_mem = &params->ext_mem_param[rsz_id];
+
+ if (rsz_id == RSZ_A) {
+ val = rsc_params->h_flip << RSZA_H_FLIP_SHIFT;
+ val |= rsc_params->v_flip << RSZA_V_FLIP_SHIFT;
+ reg_base = RSZ_EN_A;
+ } else {
+ val = rsc_params->h_flip << RSZB_H_FLIP_SHIFT;
+ val |= rsc_params->v_flip << RSZB_V_FLIP_SHIFT;
+ reg_base = RSZ_EN_B;
+ }
+ /* update flip settings */
+ regw_rsz(rsz_base, val, RSZ_SEQ);
+
+ regw_rsz(rsz_base, params->oper_mode, reg_base + RSZ_MODE);
+
+ val = (rsc_params->cen << RSZ_CEN_SHIFT) | rsc_params->yen;
+ regw_rsz(rsz_base, val, reg_base + RSZ_420);
+
+ regw_rsz(rsz_base, rsc_params->i_vps & RSZ_VPS_MASK,
+ reg_base + RSZ_I_VPS);
+ regw_rsz(rsz_base, rsc_params->i_hps & RSZ_HPS_MASK,
+ reg_base + RSZ_I_HPS);
+ regw_rsz(rsz_base, rsc_params->o_vsz & RSZ_O_VSZ_MASK,
+ reg_base + RSZ_O_VSZ);
+ regw_rsz(rsz_base, rsc_params->o_hsz & RSZ_O_HSZ_MASK,
+ reg_base + RSZ_O_HSZ);
+ regw_rsz(rsz_base, rsc_params->v_phs_y & RSZ_V_PHS_MASK,
+ reg_base + RSZ_V_PHS_Y);
+ regw_rsz(rsz_base, rsc_params->v_phs_c & RSZ_V_PHS_MASK,
+ reg_base + RSZ_V_PHS_C);
+
+ /* keep this additional adjustment to zero for now */
+ regw_rsz(rsz_base, rsc_params->v_dif & RSZ_V_DIF_MASK,
+ reg_base + RSZ_V_DIF);
+
+ val = (rsc_params->v_typ_y & 1) |
+ ((rsc_params->v_typ_c & 1) << RSZ_TYP_C_SHIFT);
+ regw_rsz(rsz_base, val, reg_base + RSZ_V_TYP);
+
+ val = (rsc_params->v_lpf_int_y & RSZ_LPF_INT_MASK) |
+ ((rsc_params->v_lpf_int_c & RSZ_LPF_INT_MASK) <<
+ RSZ_LPF_INT_C_SHIFT);
+ regw_rsz(rsz_base, val, reg_base + RSZ_V_LPF);
+
+ regw_rsz(rsz_base, rsc_params->h_phs &
+ RSZ_H_PHS_MASK, reg_base + RSZ_H_PHS);
+
+ regw_rsz(rsz_base, 0, reg_base + RSZ_H_PHS_ADJ);
+ regw_rsz(rsz_base, rsc_params->h_dif &
+ RSZ_H_DIF_MASK, reg_base + RSZ_H_DIF);
+
+ val = (rsc_params->h_typ_y & 1) |
+ ((rsc_params->h_typ_c & 1) << RSZ_TYP_C_SHIFT);
+ regw_rsz(rsz_base, val, reg_base + RSZ_H_TYP);
+
+ val = (rsc_params->h_lpf_int_y & RSZ_LPF_INT_MASK) |
+ ((rsc_params->h_lpf_int_c & RSZ_LPF_INT_MASK) <<
+ RSZ_LPF_INT_C_SHIFT);
+ regw_rsz(rsz_base, val, reg_base + RSZ_H_LPF);
+
+ regw_rsz(rsz_base, rsc_params->dscale_en & 1, reg_base + RSZ_DWN_EN);
+
+ val = (rsc_params->h_dscale_ave_sz & RSZ_DWN_SCALE_AV_SZ_MASK) |
+ ((rsc_params->v_dscale_ave_sz & RSZ_DWN_SCALE_AV_SZ_MASK) <<
+ RSZ_DWN_SCALE_AV_SZ_V_SHIFT);
+ regw_rsz(rsz_base, val, reg_base + RSZ_DWN_AV);
+
+ /* setting rgb conversion parameters */
+ regw_rsz(rsz_base, rgb->rgb_en, reg_base + RSZ_RGB_EN);
+
+ val = (rgb->rgb_typ << RSZ_RGB_TYP_SHIFT) |
+ (rgb->rgb_msk0 << RSZ_RGB_MSK0_SHIFT) |
+ (rgb->rgb_msk1 << RSZ_RGB_MSK1_SHIFT);
+ regw_rsz(rsz_base, val, reg_base + RSZ_RGB_TYP);
+
+ regw_rsz(rsz_base, rgb->rgb_alpha_val & RSZ_RGB_ALPHA_MASK,
+ reg_base + RSZ_RGB_BLD);
+
+ /* setting external memory parameters */
+ regw_rsz(rsz_base, ext_mem->rsz_sdr_oft_y, reg_base + RSZ_SDR_Y_OFT);
+ regw_rsz(rsz_base, ext_mem->rsz_sdr_ptr_s_y,
+ reg_base + RSZ_SDR_Y_PTR_S);
+ regw_rsz(rsz_base, ext_mem->rsz_sdr_ptr_e_y,
+ reg_base + RSZ_SDR_Y_PTR_E);
+ regw_rsz(rsz_base, ext_mem->rsz_sdr_oft_c, reg_base + RSZ_SDR_C_OFT);
+ regw_rsz(rsz_base, ext_mem->rsz_sdr_ptr_s_c,
+ reg_base + RSZ_SDR_C_PTR_S);
+ regw_rsz(rsz_base, (ext_mem->rsz_sdr_ptr_e_c >> 1),
+ reg_base + RSZ_SDR_C_PTR_E);
+}
+
+/*set the registers of either RSZ0 or RSZ1 */
+static void
+ipipe_setup_resizer(void *__iomem rsz_base, struct resizer_params *params)
+{
+ /* enable MMR gate to write to Resizer */
+ regw_rsz(rsz_base, 1, RSZ_GCK_MMR);
+
+ /* Enable resizer if it is not in bypass mode */
+ if (params->rsz_common.passthrough)
+ regw_rsz(rsz_base, 0, RSZ_GCK_SDR);
+ else
+ regw_rsz(rsz_base, 1, RSZ_GCK_SDR);
+
+ rsz_set_common_params(rsz_base, params);
+
+ regw_rsz(rsz_base, params->rsz_en[RSZ_A], RSZ_EN_A);
+
+ if (params->rsz_en[RSZ_A])
+ /*setting rescale parameters */
+ rsz_set_rsz_regs(rsz_base, RSZ_A, params);
+
+ regw_rsz(rsz_base, params->rsz_en[RSZ_B], RSZ_EN_B);
+
+ if (params->rsz_en[RSZ_B])
+ rsz_set_rsz_regs(rsz_base, RSZ_B, params);
+}
+
+static u32 ipipe_get_color_pat(enum v4l2_mbus_pixelcode pix)
+{
+ switch (pix) {
+ case V4L2_MBUS_FMT_SGRBG10_ALAW8_1X8:
+ case V4L2_MBUS_FMT_SGRBG10_DPCM8_1X8:
+ case V4L2_MBUS_FMT_SGRBG12_1X12:
+ return ipipe_sgrbg_pattern;
+
+ default:
+ return ipipe_srggb_pattern;
+ }
+}
+
+static int ipipe_get_data_path(struct vpfe_ipipe_device *ipipe)
+{
+ enum v4l2_mbus_pixelcode temp_pix_fmt;
+
+ switch (ipipe->formats[IPIPE_PAD_SINK].code) {
+ case V4L2_MBUS_FMT_SBGGR8_1X8:
+ case V4L2_MBUS_FMT_SGRBG10_ALAW8_1X8:
+ case V4L2_MBUS_FMT_SGRBG10_DPCM8_1X8:
+ case V4L2_MBUS_FMT_SGRBG12_1X12:
+ temp_pix_fmt = V4L2_MBUS_FMT_SGRBG12_1X12;
+ break;
+
+ default:
+ temp_pix_fmt = V4L2_MBUS_FMT_UYVY8_2X8;
+ }
+
+ if (temp_pix_fmt == V4L2_MBUS_FMT_SGRBG12_1X12) {
+ if (ipipe->formats[IPIPE_PAD_SOURCE].code ==
+ V4L2_MBUS_FMT_SGRBG12_1X12)
+ return IPIPE_RAW2RAW;
+ return IPIPE_RAW2YUV;
+ }
+
+ return IPIPE_YUV2YUV;
+}
+
+static int get_ipipe_mode(struct vpfe_ipipe_device *ipipe)
+{
+ struct vpfe_device *vpfe_dev = to_vpfe_device(ipipe);
+ u16 ipipeif_sink = vpfe_dev->vpfe_ipipeif.input;
+
+ if (ipipeif_sink == IPIPEIF_INPUT_MEMORY)
+ return IPIPE_MODE_SINGLE_SHOT;
+ else if (ipipeif_sink == IPIPEIF_INPUT_ISIF)
+ return IPIPE_MODE_CONTINUOUS;
+
+ return -EINVAL;
+}
+
+int config_ipipe_hw(struct vpfe_ipipe_device *ipipe)
+{
+ struct vpfe_ipipe_input_config *config = &ipipe->config.input_config;
+ void __iomem *ipipe_base = ipipe->base_addr;
+ struct v4l2_mbus_framefmt *outformat;
+ u32 color_pat;
+ u32 ipipe_mode;
+ u32 data_path;
+
+ /* enable clock to IPIPE */
+ vpss_enable_clock(VPSS_IPIPE_CLOCK, 1);
+ ipipe_clock_enable(ipipe_base);
+
+ if (ipipe->input == IPIPE_INPUT_NONE) {
+ regw_ip(ipipe_base, 0, IPIPE_SRC_EN);
+ return 0;
+ }
+
+ ipipe_mode = get_ipipe_mode(ipipe);
+ if (ipipe < 0) {
+ pr_err("Failed to get ipipe mode");
+ return -EINVAL;
+ }
+ regw_ip(ipipe_base, ipipe_mode, IPIPE_SRC_MODE);
+
+ data_path = ipipe_get_data_path(ipipe);
+ regw_ip(ipipe_base, data_path, IPIPE_SRC_FMT);
+
+ regw_ip(ipipe_base, config->vst & IPIPE_RSZ_VPS_MASK, IPIPE_SRC_VPS);
+ regw_ip(ipipe_base, config->hst & IPIPE_RSZ_HPS_MASK, IPIPE_SRC_HPS);
+
+ outformat = &ipipe->formats[IPIPE_PAD_SOURCE];
+ regw_ip(ipipe_base, (outformat->height + 1) & IPIPE_RSZ_VSZ_MASK,
+ IPIPE_SRC_VSZ);
+ regw_ip(ipipe_base, (outformat->width + 1) & IPIPE_RSZ_HSZ_MASK,
+ IPIPE_SRC_HSZ);
+
+ if (data_path == IPIPE_RAW2YUV ||
+ data_path == IPIPE_RAW2RAW) {
+ color_pat =
+ ipipe_get_color_pat(ipipe->formats[IPIPE_PAD_SINK].code);
+ regw_ip(ipipe_base, color_pat, IPIPE_SRC_COL);
+ }
+
+ return 0;
+}
+
+/*
+ * config_rsz_hw() - Performs hardware setup of resizer.
+ */
+int config_rsz_hw(struct vpfe_resizer_device *resizer,
+ struct resizer_params *config)
+{
+ struct vpfe_device *vpfe_dev = to_vpfe_device(resizer);
+ void *__iomem ipipe_base = vpfe_dev->vpfe_ipipe.base_addr;
+ void *__iomem rsz_base = vpfe_dev->vpfe_resizer.base_addr;
+
+ /* enable VPSS clock */
+ vpss_enable_clock(VPSS_IPIPE_CLOCK, 1);
+ ipipe_clock_enable(ipipe_base);
+
+ ipipe_setup_resizer(rsz_base, config);
+
+ return 0;
+}
+
+static void
+rsz_set_y_address(void *__iomem rsz_base, unsigned int address,
+ unsigned int offset)
+{
+ u32 val;
+
+ val = address & SET_LOW_ADDR;
+ regw_rsz(rsz_base, val, offset + RSZ_SDR_Y_BAD_L);
+ regw_rsz(rsz_base, val, offset + RSZ_SDR_Y_SAD_L);
+
+ val = (address & SET_HIGH_ADDR) >> 16;
+ regw_rsz(rsz_base, val, offset + RSZ_SDR_Y_BAD_H);
+ regw_rsz(rsz_base, val, offset + RSZ_SDR_Y_SAD_H);
+}
+
+static void
+rsz_set_c_address(void *__iomem rsz_base, unsigned int address,
+ unsigned int offset)
+{
+ u32 val;
+
+ val = address & SET_LOW_ADDR;
+ regw_rsz(rsz_base, val, offset + RSZ_SDR_C_BAD_L);
+ regw_rsz(rsz_base, val, offset + RSZ_SDR_C_SAD_L);
+
+ val = (address & SET_HIGH_ADDR) >> 16;
+ regw_rsz(rsz_base, val, offset + RSZ_SDR_C_BAD_H);
+ regw_rsz(rsz_base, val, offset + RSZ_SDR_C_SAD_H);
+}
+
+/*
+ * resizer_set_outaddr() - set the address for given resize_no
+ * @rsz_base: resizer base address
+ * @params: pointer to ipipe_params structure
+ * @resize_no: 0 - Resizer-A, 1 - Resizer B
+ * @address: the address to set
+ */
+int
+resizer_set_outaddr(void *__iomem rsz_base, struct resizer_params *params,
+ int resize_no, unsigned int address)
+{
+ struct resizer_scale_param *rsc_param;
+ struct rsz_ext_mem_param *mem_param;
+ struct rsz_common_params *rsz_common;
+ unsigned int rsz_start_add;
+ unsigned int val;
+
+ if (resize_no != RSZ_A && resize_no != RSZ_B)
+ return -EINVAL;
+
+ mem_param = &params->ext_mem_param[resize_no];
+ rsc_param = &params->rsz_rsc_param[resize_no];
+ rsz_common = &params->rsz_common;
+
+ if (resize_no == RSZ_A)
+ rsz_start_add = RSZ_EN_A;
+ else
+ rsz_start_add = RSZ_EN_B;
+
+ /* y_c = 0 for y, = 1 for c */
+ if (rsz_common->src_img_fmt == RSZ_IMG_420) {
+ if (rsz_common->y_c) {
+ /* C channel */
+ val = address + mem_param->flip_ofst_c;
+ rsz_set_c_address(rsz_base, val, rsz_start_add);
+ } else {
+ val = address + mem_param->flip_ofst_y;
+ rsz_set_y_address(rsz_base, val, rsz_start_add);
+ }
+ } else {
+ if (rsc_param->cen && rsc_param->yen) {
+ /* 420 */
+ val = address + mem_param->c_offset +
+ mem_param->flip_ofst_c +
+ mem_param->user_y_ofst +
+ mem_param->user_c_ofst;
+ if (resize_no == RSZ_B)
+ val +=
+ params->ext_mem_param[RSZ_A].user_y_ofst +
+ params->ext_mem_param[RSZ_A].user_c_ofst;
+ /* set C address */
+ rsz_set_c_address(rsz_base, val, rsz_start_add);
+ }
+ val = address + mem_param->flip_ofst_y + mem_param->user_y_ofst;
+ if (resize_no == RSZ_B)
+ val += params->ext_mem_param[RSZ_A].user_y_ofst +
+ params->ext_mem_param[RSZ_A].user_c_ofst;
+ /* set Y address */
+ rsz_set_y_address(rsz_base, val, rsz_start_add);
+ }
+ /* resizer must be enabled */
+ regw_rsz(rsz_base, params->rsz_en[resize_no], rsz_start_add);
+
+ return 0;
+}
+
+void
+ipipe_set_lutdpc_regs(void *__iomem base_addr, void *__iomem isp5_base_addr,
+ struct vpfe_ipipe_lutdpc *dpc)
+{
+ u32 max_tbl_size = LUT_DPC_MAX_SIZE >> 1;
+ u32 lut_start_addr = DPC_TB0_START_ADDR;
+ u32 val;
+ u32 count;
+
+ ipipe_clock_enable(base_addr);
+ regw_ip(base_addr, dpc->en, DPC_LUT_EN);
+
+ if (dpc->en != 1)
+ return;
+
+ val = LUTDPC_TBL_256_EN | (dpc->repl_white & 1);
+ regw_ip(base_addr, val, DPC_LUT_SEL);
+ regw_ip(base_addr, LUT_DPC_START_ADDR, DPC_LUT_ADR);
+ regw_ip(base_addr, dpc->dpc_size, DPC_LUT_SIZ & LUT_DPC_SIZE_MASK);
+
+ if (dpc->table == NULL)
+ return;
+
+ for (count = 0; count < dpc->dpc_size; count++) {
+ if (count >= max_tbl_size)
+ lut_start_addr = DPC_TB1_START_ADDR;
+ val = (dpc->table[count].horz_pos & LUT_DPC_H_POS_MASK) |
+ ((dpc->table[count].vert_pos & LUT_DPC_V_POS_MASK) <<
+ LUT_DPC_V_POS_SHIFT) | (dpc->table[count].method <<
+ LUT_DPC_CORR_METH_SHIFT);
+ w_ip_table(isp5_base_addr, val, (lut_start_addr +
+ ((count % max_tbl_size) << 2)));
+ }
+}
+
+static void
+set_dpc_thresholds(void *__iomem base_addr,
+ struct vpfe_ipipe_otfdpc_2_0_cfg *dpc_thr)
+{
+ regw_ip(base_addr, dpc_thr->corr_thr.r & OTFDPC_DPC2_THR_MASK,
+ DPC_OTF_2C_THR_R);
+ regw_ip(base_addr, dpc_thr->corr_thr.gr & OTFDPC_DPC2_THR_MASK,
+ DPC_OTF_2C_THR_GR);
+ regw_ip(base_addr, dpc_thr->corr_thr.gb & OTFDPC_DPC2_THR_MASK,
+ DPC_OTF_2C_THR_GB);
+ regw_ip(base_addr, dpc_thr->corr_thr.b & OTFDPC_DPC2_THR_MASK,
+ DPC_OTF_2C_THR_B);
+ regw_ip(base_addr, dpc_thr->det_thr.r & OTFDPC_DPC2_THR_MASK,
+ DPC_OTF_2D_THR_R);
+ regw_ip(base_addr, dpc_thr->det_thr.gr & OTFDPC_DPC2_THR_MASK,
+ DPC_OTF_2D_THR_GR);
+ regw_ip(base_addr, dpc_thr->det_thr.gb & OTFDPC_DPC2_THR_MASK,
+ DPC_OTF_2D_THR_GB);
+ regw_ip(base_addr, dpc_thr->det_thr.b & OTFDPC_DPC2_THR_MASK,
+ DPC_OTF_2D_THR_B);
+}
+
+void ipipe_set_otfdpc_regs(void *__iomem base_addr,
+ struct vpfe_ipipe_otfdpc *otfdpc)
+{
+ struct vpfe_ipipe_otfdpc_2_0_cfg *dpc_2_0 = &otfdpc->alg_cfg.dpc_2_0;
+ struct vpfe_ipipe_otfdpc_3_0_cfg *dpc_3_0 = &otfdpc->alg_cfg.dpc_3_0;
+ u32 val;
+
+ ipipe_clock_enable(base_addr);
+
+ regw_ip(base_addr, (otfdpc->en & 1), DPC_OTF_EN);
+ if (!otfdpc->en)
+ return;
+
+ /* dpc enabled */
+ val = (otfdpc->det_method << OTF_DET_METHOD_SHIFT) | otfdpc->alg;
+ regw_ip(base_addr, val, DPC_OTF_TYP);
+
+ if (otfdpc->det_method == VPFE_IPIPE_DPC_OTF_MIN_MAX) {
+ /* ALG= 0, TYP = 0, DPC_OTF_2D_THR_[x]=0
+ * DPC_OTF_2C_THR_[x] = Maximum thresohld
+ * MinMax method
+ */
+ dpc_2_0->det_thr.r = dpc_2_0->det_thr.gb =
+ dpc_2_0->det_thr.gr = dpc_2_0->det_thr.b = 0;
+ set_dpc_thresholds(base_addr, dpc_2_0);
+ return;
+ }
+ /* MinMax2 */
+ if (otfdpc->alg == VPFE_IPIPE_OTFDPC_2_0) {
+ set_dpc_thresholds(base_addr, dpc_2_0);
+ return;
+ }
+ regw_ip(base_addr, dpc_3_0->act_adj_shf &
+ OTF_DPC3_0_SHF_MASK, DPC_OTF_3_SHF);
+ /* Detection thresholds */
+ regw_ip(base_addr, ((dpc_3_0->det_thr & OTF_DPC3_0_THR_MASK) <<
+ OTF_DPC3_0_THR_SHIFT), DPC_OTF_3D_THR);
+ regw_ip(base_addr, dpc_3_0->det_slp &
+ OTF_DPC3_0_SLP_MASK, DPC_OTF_3D_SLP);
+ regw_ip(base_addr, dpc_3_0->det_thr_min &
+ OTF_DPC3_0_DET_MASK, DPC_OTF_3D_MIN);
+ regw_ip(base_addr, dpc_3_0->det_thr_max &
+ OTF_DPC3_0_DET_MASK, DPC_OTF_3D_MAX);
+ /* Correction thresholds */
+ regw_ip(base_addr, ((dpc_3_0->corr_thr & OTF_DPC3_0_THR_MASK) <<
+ OTF_DPC3_0_THR_SHIFT), DPC_OTF_3C_THR);
+ regw_ip(base_addr, dpc_3_0->corr_slp &
+ OTF_DPC3_0_SLP_MASK, DPC_OTF_3C_SLP);
+ regw_ip(base_addr, dpc_3_0->corr_thr_min &
+ OTF_DPC3_0_CORR_MASK, DPC_OTF_3C_MIN);
+ regw_ip(base_addr, dpc_3_0->corr_thr_max &
+ OTF_DPC3_0_CORR_MASK, DPC_OTF_3C_MAX);
+}
+
+/* 2D Noise filter */
+void
+ipipe_set_d2f_regs(void *__iomem base_addr, unsigned int id,
+ struct vpfe_ipipe_nf *noise_filter)
+{
+
+ u32 offset = D2F_1ST;
+ int count;
+ u32 val;
+
+ if (id == IPIPE_D2F_2ND)
+ offset = D2F_2ND;
+
+ ipipe_clock_enable(base_addr);
+ regw_ip(base_addr, noise_filter->en & 1, offset + D2F_EN);
+ if (!noise_filter->en)
+ return;
+
+ /*noise filter enabled */
+ /* Combine all the fields to make D2F_CFG register of IPIPE */
+ val = ((noise_filter->spread_val & D2F_SPR_VAL_MASK) <<
+ D2F_SPR_VAL_SHIFT) | ((noise_filter->shft_val &
+ D2F_SHFT_VAL_MASK) << D2F_SHFT_VAL_SHIFT) |
+ (noise_filter->gr_sample_meth << D2F_SAMPLE_METH_SHIFT) |
+ ((noise_filter->apply_lsc_gain & 1) <<
+ D2F_APPLY_LSC_GAIN_SHIFT) | D2F_USE_SPR_REG_VAL;
+ regw_ip(base_addr, val, offset + D2F_TYP);
+
+ /* edge detection minimum */
+ regw_ip(base_addr, noise_filter->edge_det_min_thr &
+ D2F_EDGE_DET_THR_MASK, offset + D2F_EDG_MIN);
+
+ /* edge detection maximum */
+ regw_ip(base_addr, noise_filter->edge_det_max_thr &
+ D2F_EDGE_DET_THR_MASK, offset + D2F_EDG_MAX);
+
+ for (count = 0; count < VPFE_IPIPE_NF_STR_TABLE_SIZE; count++)
+ regw_ip(base_addr,
+ (noise_filter->str[count] & D2F_STR_VAL_MASK),
+ offset + D2F_STR + count * 4);
+
+ for (count = 0; count < VPFE_IPIPE_NF_THR_TABLE_SIZE; count++)
+ regw_ip(base_addr, noise_filter->thr[count] & D2F_THR_VAL_MASK,
+ offset + D2F_THR + count * 4);
+}
+
+#define IPIPE_U8Q5(decimal, integer) \
+ (((decimal & 0x1f) | ((integer & 0x7) << 5)))
+
+/* Green Imbalance Correction */
+void ipipe_set_gic_regs(void *__iomem base_addr, struct vpfe_ipipe_gic *gic)
+{
+ u32 val;
+
+ ipipe_clock_enable(base_addr);
+ regw_ip(base_addr, gic->en & 1, GIC_EN);
+
+ if (!gic->en)
+ return;
+
+ /*gic enabled */
+ val = (gic->wt_fn_type << GIC_TYP_SHIFT) |
+ (gic->thr_sel << GIC_THR_SEL_SHIFT) |
+ ((gic->apply_lsc_gain & 1) << GIC_APPLY_LSC_GAIN_SHIFT);
+ regw_ip(base_addr, val, GIC_TYP);
+
+ regw_ip(base_addr, gic->gain & GIC_GAIN_MASK, GIC_GAN);
+
+ if (gic->gic_alg != VPFE_IPIPE_GIC_ALG_ADAPT_GAIN) {
+ /* Constant Gain. Set threshold to maximum */
+ regw_ip(base_addr, GIC_THR_MASK, GIC_THR);
+ return;
+ }
+
+ if (gic->thr_sel == VPFE_IPIPE_GIC_THR_REG) {
+ regw_ip(base_addr, gic->thr & GIC_THR_MASK, GIC_THR);
+ regw_ip(base_addr, gic->slope & GIC_SLOPE_MASK, GIC_SLP);
+ } else {
+ /* Use NF thresholds */
+ val = IPIPE_U8Q5(gic->nf2_thr_gain.decimal,
+ gic->nf2_thr_gain.integer);
+ regw_ip(base_addr, val, GIC_NFGAN);
+ }
+}
+
+#define IPIPE_U13Q9(decimal, integer) \
+ (((decimal & 0x1ff) | ((integer & 0xf) << 9)))
+/* White balance */
+void ipipe_set_wb_regs(void *__iomem base_addr, struct vpfe_ipipe_wb *wb)
+{
+ u32 val;
+
+ ipipe_clock_enable(base_addr);
+ /* Ofsets. S12 */
+ regw_ip(base_addr, wb->ofst_r & WB_OFFSET_MASK, WB2_OFT_R);
+ regw_ip(base_addr, wb->ofst_gr & WB_OFFSET_MASK, WB2_OFT_GR);
+ regw_ip(base_addr, wb->ofst_gb & WB_OFFSET_MASK, WB2_OFT_GB);
+ regw_ip(base_addr, wb->ofst_b & WB_OFFSET_MASK, WB2_OFT_B);
+
+ /* Gains. U13Q9 */
+ val = IPIPE_U13Q9(wb->gain_r.decimal, wb->gain_r.integer);
+ regw_ip(base_addr, val, WB2_WGN_R);
+
+ val = IPIPE_U13Q9(wb->gain_gr.decimal, wb->gain_gr.integer);
+ regw_ip(base_addr, val, WB2_WGN_GR);
+
+ val = IPIPE_U13Q9(wb->gain_gb.decimal, wb->gain_gb.integer);
+ regw_ip(base_addr, val, WB2_WGN_GB);
+
+ val = IPIPE_U13Q9(wb->gain_b.decimal, wb->gain_b.integer);
+ regw_ip(base_addr, val, WB2_WGN_B);
+}
+
+/* CFA */
+void ipipe_set_cfa_regs(void *__iomem base_addr, struct vpfe_ipipe_cfa *cfa)
+{
+ ipipe_clock_enable(base_addr);
+
+ regw_ip(base_addr, cfa->alg, CFA_MODE);
+ regw_ip(base_addr, cfa->hpf_thr_2dir & CFA_HPF_THR_2DIR_MASK,
+ CFA_2DIR_HPF_THR);
+ regw_ip(base_addr, cfa->hpf_slp_2dir & CFA_HPF_SLOPE_2DIR_MASK,
+ CFA_2DIR_HPF_SLP);
+ regw_ip(base_addr, cfa->hp_mix_thr_2dir & CFA_HPF_MIX_THR_2DIR_MASK,
+ CFA_2DIR_MIX_THR);
+ regw_ip(base_addr, cfa->hp_mix_slope_2dir & CFA_HPF_MIX_SLP_2DIR_MASK,
+ CFA_2DIR_MIX_SLP);
+ regw_ip(base_addr, cfa->dir_thr_2dir & CFA_DIR_THR_2DIR_MASK,
+ CFA_2DIR_DIR_THR);
+ regw_ip(base_addr, cfa->dir_slope_2dir & CFA_DIR_SLP_2DIR_MASK,
+ CFA_2DIR_DIR_SLP);
+ regw_ip(base_addr, cfa->nd_wt_2dir & CFA_ND_WT_2DIR_MASK,
+ CFA_2DIR_NDWT);
+ regw_ip(base_addr, cfa->hue_fract_daa & CFA_DAA_HUE_FRA_MASK,
+ CFA_MONO_HUE_FRA);
+ regw_ip(base_addr, cfa->edge_thr_daa & CFA_DAA_EDG_THR_MASK,
+ CFA_MONO_EDG_THR);
+ regw_ip(base_addr, cfa->thr_min_daa & CFA_DAA_THR_MIN_MASK,
+ CFA_MONO_THR_MIN);
+ regw_ip(base_addr, cfa->thr_slope_daa & CFA_DAA_THR_SLP_MASK,
+ CFA_MONO_THR_SLP);
+ regw_ip(base_addr, cfa->slope_min_daa & CFA_DAA_SLP_MIN_MASK,
+ CFA_MONO_SLP_MIN);
+ regw_ip(base_addr, cfa->slope_slope_daa & CFA_DAA_SLP_SLP_MASK,
+ CFA_MONO_SLP_SLP);
+ regw_ip(base_addr, cfa->lp_wt_daa & CFA_DAA_LP_WT_MASK,
+ CFA_MONO_LPWT);
+}
+
+void
+ipipe_set_rgb2rgb_regs(void *__iomem base_addr, unsigned int id,
+ struct vpfe_ipipe_rgb2rgb *rgb)
+{
+ u32 offset_mask = RGB2RGB_1_OFST_MASK;
+ u32 offset = RGB1_MUL_BASE;
+ u32 integ_mask = 0xf;
+ u32 val;
+
+ ipipe_clock_enable(base_addr);
+
+ if (id == IPIPE_RGB2RGB_2) {
+ /* For second RGB module, gain integer is 3 bits instead
+ of 4, offset has 11 bits insread of 13 */
+ offset = RGB2_MUL_BASE;
+ integ_mask = 0x7;
+ offset_mask = RGB2RGB_2_OFST_MASK;
+ }
+ /* Gains */
+ val = (rgb->coef_rr.decimal & 0xff) |
+ ((rgb->coef_rr.integer & integ_mask) << 8);
+ regw_ip(base_addr, val, offset + RGB_MUL_RR);
+ val = (rgb->coef_gr.decimal & 0xff) |
+ ((rgb->coef_gr.integer & integ_mask) << 8);
+ regw_ip(base_addr, val, offset + RGB_MUL_GR);
+ val = (rgb->coef_br.decimal & 0xff) |
+ ((rgb->coef_br.integer & integ_mask) << 8);
+ regw_ip(base_addr, val, offset + RGB_MUL_BR);
+ val = (rgb->coef_rg.decimal & 0xff) |
+ ((rgb->coef_rg.integer & integ_mask) << 8);
+ regw_ip(base_addr, val, offset + RGB_MUL_RG);
+ val = (rgb->coef_gg.decimal & 0xff) |
+ ((rgb->coef_gg.integer & integ_mask) << 8);
+ regw_ip(base_addr, val, offset + RGB_MUL_GG);
+ val = (rgb->coef_bg.decimal & 0xff) |
+ ((rgb->coef_bg.integer & integ_mask) << 8);
+ regw_ip(base_addr, val, offset + RGB_MUL_BG);
+ val = (rgb->coef_rb.decimal & 0xff) |
+ ((rgb->coef_rb.integer & integ_mask) << 8);
+ regw_ip(base_addr, val, offset + RGB_MUL_RB);
+ val = (rgb->coef_gb.decimal & 0xff) |
+ ((rgb->coef_gb.integer & integ_mask) << 8);
+ regw_ip(base_addr, val, offset + RGB_MUL_GB);
+ val = (rgb->coef_bb.decimal & 0xff) |
+ ((rgb->coef_bb.integer & integ_mask) << 8);
+ regw_ip(base_addr, val, offset + RGB_MUL_BB);
+
+ /* Offsets */
+ regw_ip(base_addr, rgb->out_ofst_r & offset_mask, offset + RGB_OFT_OR);
+ regw_ip(base_addr, rgb->out_ofst_g & offset_mask, offset + RGB_OFT_OG);
+ regw_ip(base_addr, rgb->out_ofst_b & offset_mask, offset + RGB_OFT_OB);
+}
+
+static void
+ipipe_update_gamma_tbl(void *__iomem isp5_base_addr,
+ struct vpfe_ipipe_gamma_entry *table, int size, u32 addr)
+{
+ int count;
+ u32 val;
+
+ for (count = 0; count < size; count++) {
+ val = table[count].slope & GAMMA_MASK;
+ val |= (table[count].offset & GAMMA_MASK) << GAMMA_SHIFT;
+ w_ip_table(isp5_base_addr, val, (addr + (count * 4)));
+ }
+}
+
+void
+ipipe_set_gamma_regs(void *__iomem base_addr, void *__iomem isp5_base_addr,
+ struct vpfe_ipipe_gamma *gamma)
+{
+ int table_size;
+ u32 val;
+
+ ipipe_clock_enable(base_addr);
+ val = (gamma->bypass_r << GAMMA_BYPR_SHIFT) |
+ (gamma->bypass_b << GAMMA_BYPG_SHIFT) |
+ (gamma->bypass_g << GAMMA_BYPB_SHIFT) |
+ (gamma->tbl_sel << GAMMA_TBL_SEL_SHIFT) |
+ (gamma->tbl_size << GAMMA_TBL_SIZE_SHIFT);
+
+ regw_ip(base_addr, val, GMM_CFG);
+
+ if (gamma->tbl_sel != VPFE_IPIPE_GAMMA_TBL_RAM)
+ return;
+
+ table_size = gamma->tbl_size;
+
+ if (!gamma->bypass_r && gamma->table_r != NULL)
+ ipipe_update_gamma_tbl(isp5_base_addr, gamma->table_r,
+ table_size, GAMMA_R_START_ADDR);
+ if (!gamma->bypass_b && gamma->table_b != NULL)
+ ipipe_update_gamma_tbl(isp5_base_addr, gamma->table_b,
+ table_size, GAMMA_B_START_ADDR);
+ if (!gamma->bypass_g && gamma->table_g != NULL)
+ ipipe_update_gamma_tbl(isp5_base_addr, gamma->table_g,
+ table_size, GAMMA_G_START_ADDR);
+}
+
+void
+ipipe_set_3d_lut_regs(void *__iomem base_addr, void *__iomem isp5_base_addr,
+ struct vpfe_ipipe_3d_lut *lut_3d)
+{
+ struct vpfe_ipipe_3d_lut_entry *tbl;
+ u32 bnk_index;
+ u32 tbl_index;
+ u32 val;
+ u32 i;
+
+ ipipe_clock_enable(base_addr);
+ regw_ip(base_addr, lut_3d->en, D3LUT_EN);
+
+ if (!lut_3d->en)
+ return;
+
+ /* lut_3d enabled */
+ if (!lut_3d->table)
+ return;
+
+ /* valied table */
+ tbl = lut_3d->table;
+ for (i = 0 ; i < VPFE_IPIPE_MAX_SIZE_3D_LUT; i++) {
+ /* Each entry has 0-9 (B), 10-19 (G) and
+ 20-29 R values */
+ val = tbl[i].b & D3_LUT_ENTRY_MASK;
+ val |= (tbl[i].g & D3_LUT_ENTRY_MASK) <<
+ D3_LUT_ENTRY_G_SHIFT;
+ val |= (tbl[i].r & D3_LUT_ENTRY_MASK) <<
+ D3_LUT_ENTRY_R_SHIFT;
+ bnk_index = i % 4;
+ tbl_index = i >> 2;
+ tbl_index <<= 2;
+ if (bnk_index == 0)
+ w_ip_table(isp5_base_addr, val,
+ tbl_index + D3L_TB0_START_ADDR);
+ else if (bnk_index == 1)
+ w_ip_table(isp5_base_addr, val,
+ tbl_index + D3L_TB1_START_ADDR);
+ else if (bnk_index == 2)
+ w_ip_table(isp5_base_addr, val,
+ tbl_index + D3L_TB2_START_ADDR);
+ else
+ w_ip_table(isp5_base_addr, val,
+ tbl_index + D3L_TB3_START_ADDR);
+ }
+}
+
+/* Lumina adjustments */
+void
+ipipe_set_lum_adj_regs(void *__iomem base_addr, struct ipipe_lum_adj *lum_adj)
+{
+ u32 val;
+
+ ipipe_clock_enable(base_addr);
+
+ /* combine fields of YUV_ADJ to set brightness and contrast */
+ val = lum_adj->contrast << LUM_ADJ_CONTR_SHIFT |
+ lum_adj->brightness << LUM_ADJ_BRIGHT_SHIFT;
+ regw_ip(base_addr, val, YUV_ADJ);
+}
+
+#define IPIPE_S12Q8(decimal, integer) \
+ (((decimal & 0xff) | ((integer & 0xf) << 8)))
+
+void ipipe_set_rgb2ycbcr_regs(void *__iomem base_addr,
+ struct vpfe_ipipe_rgb2yuv *yuv)
+{
+ u32 val;
+
+ /* S10Q8 */
+ ipipe_clock_enable(base_addr);
+ val = IPIPE_S12Q8(yuv->coef_ry.decimal, yuv->coef_ry.integer);
+ regw_ip(base_addr, val, YUV_MUL_RY);
+ val = IPIPE_S12Q8(yuv->coef_gy.decimal, yuv->coef_gy.integer);
+ regw_ip(base_addr, val, YUV_MUL_GY);
+ val = IPIPE_S12Q8(yuv->coef_by.decimal, yuv->coef_by.integer);
+ regw_ip(base_addr, val, YUV_MUL_BY);
+ val = IPIPE_S12Q8(yuv->coef_rcb.decimal, yuv->coef_rcb.integer);
+ regw_ip(base_addr, val, YUV_MUL_RCB);
+ val = IPIPE_S12Q8(yuv->coef_gcb.decimal, yuv->coef_gcb.integer);
+ regw_ip(base_addr, val, YUV_MUL_GCB);
+ val = IPIPE_S12Q8(yuv->coef_bcb.decimal, yuv->coef_bcb.integer);
+ regw_ip(base_addr, val, YUV_MUL_BCB);
+ val = IPIPE_S12Q8(yuv->coef_rcr.decimal, yuv->coef_rcr.integer);
+ regw_ip(base_addr, val, YUV_MUL_RCR);
+ val = IPIPE_S12Q8(yuv->coef_gcr.decimal, yuv->coef_gcr.integer);
+ regw_ip(base_addr, val, YUV_MUL_GCR);
+ val = IPIPE_S12Q8(yuv->coef_bcr.decimal, yuv->coef_bcr.integer);
+ regw_ip(base_addr, val, YUV_MUL_BCR);
+ regw_ip(base_addr, yuv->out_ofst_y & RGB2YCBCR_OFST_MASK, YUV_OFT_Y);
+ regw_ip(base_addr, yuv->out_ofst_cb & RGB2YCBCR_OFST_MASK, YUV_OFT_CB);
+ regw_ip(base_addr, yuv->out_ofst_cr & RGB2YCBCR_OFST_MASK, YUV_OFT_CR);
+}
+
+/* YUV 422 conversion */
+void
+ipipe_set_yuv422_conv_regs(void *__iomem base_addr,
+ struct vpfe_ipipe_yuv422_conv *conv)
+{
+ u32 val;
+
+ ipipe_clock_enable(base_addr);
+
+ /* Combine all the fields to make YUV_PHS register of IPIPE */
+ val = (conv->chrom_pos << 0) | (conv->en_chrom_lpf << 1);
+ regw_ip(base_addr, val, YUV_PHS);
+}
+
+void
+ipipe_set_gbce_regs(void *__iomem base_addr, void *__iomem isp5_base_addr,
+ struct vpfe_ipipe_gbce *gbce)
+{
+ unsigned int count;
+ u32 mask = GBCE_Y_VAL_MASK;
+
+ if (gbce->type == VPFE_IPIPE_GBCE_GAIN_TBL)
+ mask = GBCE_GAIN_VAL_MASK;
+
+ ipipe_clock_enable(base_addr);
+ regw_ip(base_addr, gbce->en & 1, GBCE_EN);
+
+ if (!gbce->en)
+ return;
+
+ regw_ip(base_addr, gbce->type, GBCE_TYP);
+
+ if (!gbce->table)
+ return;
+
+ for (count = 0; count < VPFE_IPIPE_MAX_SIZE_GBCE_LUT ; count += 2)
+ w_ip_table(isp5_base_addr, ((gbce->table[count + 1] & mask) <<
+ GBCE_ENTRY_SHIFT) | (gbce->table[count] & mask),
+ ((count/2) << 2) + GBCE_TB_START_ADDR);
+}
+
+void
+ipipe_set_ee_regs(void *__iomem base_addr, void *__iomem isp5_base_addr,
+ struct vpfe_ipipe_yee *ee)
+{
+ unsigned int count;
+ u32 val;
+
+ ipipe_clock_enable(base_addr);
+ regw_ip(base_addr, ee->en, YEE_EN);
+
+ if (!ee->en)
+ return;
+
+ val = ee->en_halo_red & 1;
+ val |= ee->merge_meth << YEE_HALO_RED_EN_SHIFT;
+ regw_ip(base_addr, val, YEE_TYP);
+
+ regw_ip(base_addr, ee->hpf_shft, YEE_SHF);
+ regw_ip(base_addr, ee->hpf_coef_00 & YEE_COEF_MASK, YEE_MUL_00);
+ regw_ip(base_addr, ee->hpf_coef_01 & YEE_COEF_MASK, YEE_MUL_01);
+ regw_ip(base_addr, ee->hpf_coef_02 & YEE_COEF_MASK, YEE_MUL_02);
+ regw_ip(base_addr, ee->hpf_coef_10 & YEE_COEF_MASK, YEE_MUL_10);
+ regw_ip(base_addr, ee->hpf_coef_11 & YEE_COEF_MASK, YEE_MUL_11);
+ regw_ip(base_addr, ee->hpf_coef_12 & YEE_COEF_MASK, YEE_MUL_12);
+ regw_ip(base_addr, ee->hpf_coef_20 & YEE_COEF_MASK, YEE_MUL_20);
+ regw_ip(base_addr, ee->hpf_coef_21 & YEE_COEF_MASK, YEE_MUL_21);
+ regw_ip(base_addr, ee->hpf_coef_22 & YEE_COEF_MASK, YEE_MUL_22);
+ regw_ip(base_addr, ee->yee_thr & YEE_THR_MASK, YEE_THR);
+ regw_ip(base_addr, ee->es_gain & YEE_ES_GAIN_MASK, YEE_E_GAN);
+ regw_ip(base_addr, ee->es_thr1 & YEE_ES_THR1_MASK, YEE_E_THR1);
+ regw_ip(base_addr, ee->es_thr2 & YEE_THR_MASK, YEE_E_THR2);
+ regw_ip(base_addr, ee->es_gain_grad & YEE_THR_MASK, YEE_G_GAN);
+ regw_ip(base_addr, ee->es_ofst_grad & YEE_THR_MASK, YEE_G_OFT);
+
+ if (ee->table == NULL)
+ return;
+
+ for (count = 0; count < VPFE_IPIPE_MAX_SIZE_YEE_LUT; count += 2)
+ w_ip_table(isp5_base_addr, ((ee->table[count + 1] &
+ YEE_ENTRY_MASK) << YEE_ENTRY_SHIFT) |
+ (ee->table[count] & YEE_ENTRY_MASK),
+ ((count/2) << 2) + YEE_TB_START_ADDR);
+}
+
+/* Chromatic Artifact Correction. CAR */
+static void ipipe_set_mf(void *__iomem base_addr)
+{
+ /* typ to dynamic switch */
+ regw_ip(base_addr, VPFE_IPIPE_CAR_DYN_SWITCH, CAR_TYP);
+ /* Set SW0 to maximum */
+ regw_ip(base_addr, CAR_MF_THR, CAR_SW);
+}
+
+static void
+ipipe_set_gain_ctrl(void *__iomem base_addr, struct vpfe_ipipe_car *car)
+{
+ regw_ip(base_addr, VPFE_IPIPE_CAR_CHR_GAIN_CTRL, CAR_TYP);
+ regw_ip(base_addr, car->hpf, CAR_HPF_TYP);
+ regw_ip(base_addr, car->hpf_shft & CAR_HPF_SHIFT_MASK, CAR_HPF_SHF);
+ regw_ip(base_addr, car->hpf_thr, CAR_HPF_THR);
+ regw_ip(base_addr, car->gain1.gain, CAR_GN1_GAN);
+ regw_ip(base_addr, car->gain1.shft & CAR_GAIN1_SHFT_MASK, CAR_GN1_SHF);
+ regw_ip(base_addr, car->gain1.gain_min & CAR_GAIN_MIN_MASK,
+ CAR_GN1_MIN);
+ regw_ip(base_addr, car->gain2.gain, CAR_GN2_GAN);
+ regw_ip(base_addr, car->gain2.shft & CAR_GAIN2_SHFT_MASK, CAR_GN2_SHF);
+ regw_ip(base_addr, car->gain2.gain_min & CAR_GAIN_MIN_MASK,
+ CAR_GN2_MIN);
+}
+
+void ipipe_set_car_regs(void *__iomem base_addr, struct vpfe_ipipe_car *car)
+{
+ u32 val;
+
+ ipipe_clock_enable(base_addr);
+ regw_ip(base_addr, car->en, CAR_EN);
+
+ if (!car->en)
+ return;
+
+ switch (car->meth) {
+ case VPFE_IPIPE_CAR_MED_FLTR:
+ ipipe_set_mf(base_addr);
+ break;
+
+ case VPFE_IPIPE_CAR_CHR_GAIN_CTRL:
+ ipipe_set_gain_ctrl(base_addr, car);
+ break;
+
+ default:
+ /* Dynamic switch between MF and Gain Ctrl. */
+ ipipe_set_mf(base_addr);
+ ipipe_set_gain_ctrl(base_addr, car);
+ /* Set the threshold for switching between
+ * the two Here we overwrite the MF SW0 value
+ */
+ regw_ip(base_addr, VPFE_IPIPE_CAR_DYN_SWITCH, CAR_TYP);
+ val = car->sw1;
+ val <<= CAR_SW1_SHIFT;
+ val |= car->sw0;
+ regw_ip(base_addr, val, CAR_SW);
+ }
+}
+
+/* Chromatic Gain Suppression */
+void ipipe_set_cgs_regs(void *__iomem base_addr, struct vpfe_ipipe_cgs *cgs)
+{
+ ipipe_clock_enable(base_addr);
+ regw_ip(base_addr, cgs->en, CGS_EN);
+
+ if (!cgs->en)
+ return;
+
+ /* Set the bright side parameters */
+ regw_ip(base_addr, cgs->h_thr, CGS_GN1_H_THR);
+ regw_ip(base_addr, cgs->h_slope, CGS_GN1_H_GAN);
+ regw_ip(base_addr, cgs->h_shft & CAR_SHIFT_MASK, CGS_GN1_H_SHF);
+ regw_ip(base_addr, cgs->h_min, CGS_GN1_H_MIN);
+}
+
+void rsz_src_enable(void *__iomem rsz_base, int enable)
+{
+ regw_rsz(rsz_base, enable, RSZ_SRC_EN);
+}
+
+int rsz_enable(void *__iomem rsz_base, int rsz_id, int enable)
+{
+ if (rsz_id == RSZ_A) {
+ regw_rsz(rsz_base, enable, RSZ_EN_A);
+ /* We always enable RSZ_A. RSZ_B is enable upon request from
+ * application. So enable RSZ_SRC_EN along with RSZ_A
+ */
+ regw_rsz(rsz_base, enable, RSZ_SRC_EN);
+ } else if (rsz_id == RSZ_B) {
+ regw_rsz(rsz_base, enable, RSZ_EN_B);
+ } else {
+ BUG();
+ }
+
+ return 0;
+}
diff --git a/drivers/staging/media/davinci_vpfe/dm365_ipipe_hw.h b/drivers/staging/media/davinci_vpfe/dm365_ipipe_hw.h
new file mode 100644
index 000000000000..010fdb247faf
--- /dev/null
+++ b/drivers/staging/media/davinci_vpfe/dm365_ipipe_hw.h
@@ -0,0 +1,559 @@
+/*
+ * Copyright (C) 2012 Texas Instruments Inc
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation version 2.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ *
+ * Contributors:
+ * Manjunath Hadli <manjunath.hadli@ti.com>
+ * Prabhakar Lad <prabhakar.lad@ti.com>
+ */
+
+#ifndef _DAVINCI_VPFE_DM365_IPIPE_HW_H
+#define _DAVINCI_VPFE_DM365_IPIPE_HW_H
+
+#include "vpfe_mc_capture.h"
+
+#define SET_LOW_ADDR 0x0000ffff
+#define SET_HIGH_ADDR 0xffff0000
+
+/* Below are the internal tables */
+#define DPC_TB0_START_ADDR 0x8000
+#define DPC_TB1_START_ADDR 0x8400
+
+#define GAMMA_R_START_ADDR 0xa800
+#define GAMMA_G_START_ADDR 0xb000
+#define GAMMA_B_START_ADDR 0xb800
+
+/* RAM table addresses for edge enhancement correction*/
+#define YEE_TB_START_ADDR 0x8800
+
+/* RAM table address for GBC LUT */
+#define GBCE_TB_START_ADDR 0x9000
+
+/* RAM table for 3D NF LUT */
+#define D3L_TB0_START_ADDR 0x9800
+#define D3L_TB1_START_ADDR 0x9c00
+#define D3L_TB2_START_ADDR 0xa000
+#define D3L_TB3_START_ADDR 0xa400
+
+/* IPIPE Register Offsets from the base address */
+#define IPIPE_SRC_EN 0x0000
+#define IPIPE_SRC_MODE 0x0004
+#define IPIPE_SRC_FMT 0x0008
+#define IPIPE_SRC_COL 0x000c
+#define IPIPE_SRC_VPS 0x0010
+#define IPIPE_SRC_VSZ 0x0014
+#define IPIPE_SRC_HPS 0x0018
+#define IPIPE_SRC_HSZ 0x001c
+
+#define IPIPE_SEL_SBU 0x0020
+
+#define IPIPE_DMA_STA 0x0024
+#define IPIPE_GCK_MMR 0x0028
+#define IPIPE_GCK_PIX 0x002c
+#define IPIPE_RESERVED0 0x0030
+
+/* Defect Correction */
+#define DPC_LUT_EN 0x0034
+#define DPC_LUT_SEL 0x0038
+#define DPC_LUT_ADR 0x003c
+#define DPC_LUT_SIZ 0x0040
+#define DPC_OTF_EN 0x0044
+#define DPC_OTF_TYP 0x0048
+#define DPC_OTF_2D_THR_R 0x004c
+#define DPC_OTF_2D_THR_GR 0x0050
+#define DPC_OTF_2D_THR_GB 0x0054
+#define DPC_OTF_2D_THR_B 0x0058
+#define DPC_OTF_2C_THR_R 0x005c
+#define DPC_OTF_2C_THR_GR 0x0060
+#define DPC_OTF_2C_THR_GB 0x0064
+#define DPC_OTF_2C_THR_B 0x0068
+#define DPC_OTF_3_SHF 0x006c
+#define DPC_OTF_3D_THR 0x0070
+#define DPC_OTF_3D_SLP 0x0074
+#define DPC_OTF_3D_MIN 0x0078
+#define DPC_OTF_3D_MAX 0x007c
+#define DPC_OTF_3C_THR 0x0080
+#define DPC_OTF_3C_SLP 0x0084
+#define DPC_OTF_3C_MIN 0x0088
+#define DPC_OTF_3C_MAX 0x008c
+
+/* Lense Shading Correction */
+#define LSC_VOFT 0x90
+#define LSC_VA2 0x94
+#define LSC_VA1 0x98
+#define LSC_VS 0x9c
+#define LSC_HOFT 0xa0
+#define LSC_HA2 0xa4
+#define LSC_HA1 0xa8
+#define LSC_HS 0xac
+#define LSC_GAIN_R 0xb0
+#define LSC_GAIN_GR 0xb4
+#define LSC_GAIN_GB 0xb8
+#define LSC_GAIN_B 0xbc
+#define LSC_OFT_R 0xc0
+#define LSC_OFT_GR 0xc4
+#define LSC_OFT_GB 0xc8
+#define LSC_OFT_B 0xcc
+#define LSC_SHF 0xd0
+#define LSC_MAX 0xd4
+
+/* Noise Filter 1. Ofsets from start address given */
+#define D2F_1ST 0xd8
+#define D2F_EN 0x0
+#define D2F_TYP 0x4
+#define D2F_THR 0x8
+#define D2F_STR 0x28
+#define D2F_SPR 0x48
+#define D2F_EDG_MIN 0x68
+#define D2F_EDG_MAX 0x6c
+
+/* Noise Filter 2 */
+#define D2F_2ND 0x148
+
+/* GIC */
+#define GIC_EN 0x1b8
+#define GIC_TYP 0x1bc
+#define GIC_GAN 0x1c0
+#define GIC_NFGAN 0x1c4
+#define GIC_THR 0x1c8
+#define GIC_SLP 0x1cc
+
+/* White Balance */
+#define WB2_OFT_R 0x1d0
+#define WB2_OFT_GR 0x1d4
+#define WB2_OFT_GB 0x1d8
+#define WB2_OFT_B 0x1dc
+#define WB2_WGN_R 0x1e0
+#define WB2_WGN_GR 0x1e4
+#define WB2_WGN_GB 0x1e8
+#define WB2_WGN_B 0x1ec
+
+/* CFA interpolation */
+#define CFA_MODE 0x1f0
+#define CFA_2DIR_HPF_THR 0x1f4
+#define CFA_2DIR_HPF_SLP 0x1f8
+#define CFA_2DIR_MIX_THR 0x1fc
+#define CFA_2DIR_MIX_SLP 0x200
+#define CFA_2DIR_DIR_THR 0x204
+#define CFA_2DIR_DIR_SLP 0x208
+#define CFA_2DIR_NDWT 0x20c
+#define CFA_MONO_HUE_FRA 0x210
+#define CFA_MONO_EDG_THR 0x214
+#define CFA_MONO_THR_MIN 0x218
+#define CFA_MONO_THR_SLP 0x21c
+#define CFA_MONO_SLP_MIN 0x220
+#define CFA_MONO_SLP_SLP 0x224
+#define CFA_MONO_LPWT 0x228
+
+/* RGB to RGB conversiona - 1st */
+#define RGB1_MUL_BASE 0x22c
+/* Offsets from base */
+#define RGB_MUL_RR 0x0
+#define RGB_MUL_GR 0x4
+#define RGB_MUL_BR 0x8
+#define RGB_MUL_RG 0xc
+#define RGB_MUL_GG 0x10
+#define RGB_MUL_BG 0x14
+#define RGB_MUL_RB 0x18
+#define RGB_MUL_GB 0x1c
+#define RGB_MUL_BB 0x20
+#define RGB_OFT_OR 0x24
+#define RGB_OFT_OG 0x28
+#define RGB_OFT_OB 0x2c
+
+/* Gamma */
+#define GMM_CFG 0x25c
+
+/* RGB to RGB conversiona - 2nd */
+#define RGB2_MUL_BASE 0x260
+
+/* 3D LUT */
+#define D3LUT_EN 0x290
+
+/* RGB to YUV(YCbCr) conversion */
+#define YUV_ADJ 0x294
+#define YUV_MUL_RY 0x298
+#define YUV_MUL_GY 0x29c
+#define YUV_MUL_BY 0x2a0
+#define YUV_MUL_RCB 0x2a4
+#define YUV_MUL_GCB 0x2a8
+#define YUV_MUL_BCB 0x2ac
+#define YUV_MUL_RCR 0x2b0
+#define YUV_MUL_GCR 0x2b4
+#define YUV_MUL_BCR 0x2b8
+#define YUV_OFT_Y 0x2bc
+#define YUV_OFT_CB 0x2c0
+#define YUV_OFT_CR 0x2c4
+#define YUV_PHS 0x2c8
+
+/* Global Brightness and Contrast */
+#define GBCE_EN 0x2cc
+#define GBCE_TYP 0x2d0
+
+/* Edge Enhancer */
+#define YEE_EN 0x2d4
+#define YEE_TYP 0x2d8
+#define YEE_SHF 0x2dc
+#define YEE_MUL_00 0x2e0
+#define YEE_MUL_01 0x2e4
+#define YEE_MUL_02 0x2e8
+#define YEE_MUL_10 0x2ec
+#define YEE_MUL_11 0x2f0
+#define YEE_MUL_12 0x2f4
+#define YEE_MUL_20 0x2f8
+#define YEE_MUL_21 0x2fc
+#define YEE_MUL_22 0x300
+#define YEE_THR 0x304
+#define YEE_E_GAN 0x308
+#define YEE_E_THR1 0x30c
+#define YEE_E_THR2 0x310
+#define YEE_G_GAN 0x314
+#define YEE_G_OFT 0x318
+
+/* Chroma Artifact Reduction */
+#define CAR_EN 0x31c
+#define CAR_TYP 0x320
+#define CAR_SW 0x324
+#define CAR_HPF_TYP 0x328
+#define CAR_HPF_SHF 0x32c
+#define CAR_HPF_THR 0x330
+#define CAR_GN1_GAN 0x334
+#define CAR_GN1_SHF 0x338
+#define CAR_GN1_MIN 0x33c
+#define CAR_GN2_GAN 0x340
+#define CAR_GN2_SHF 0x344
+#define CAR_GN2_MIN 0x348
+
+/* Chroma Gain Suppression */
+#define CGS_EN 0x34c
+#define CGS_GN1_L_THR 0x350
+#define CGS_GN1_L_GAN 0x354
+#define CGS_GN1_L_SHF 0x358
+#define CGS_GN1_L_MIN 0x35c
+#define CGS_GN1_H_THR 0x360
+#define CGS_GN1_H_GAN 0x364
+#define CGS_GN1_H_SHF 0x368
+#define CGS_GN1_H_MIN 0x36c
+#define CGS_GN2_L_THR 0x370
+#define CGS_GN2_L_GAN 0x374
+#define CGS_GN2_L_SHF 0x378
+#define CGS_GN2_L_MIN 0x37c
+
+/* Resizer */
+#define RSZ_SRC_EN 0x0
+#define RSZ_SRC_MODE 0x4
+#define RSZ_SRC_FMT0 0x8
+#define RSZ_SRC_FMT1 0xc
+#define RSZ_SRC_VPS 0x10
+#define RSZ_SRC_VSZ 0x14
+#define RSZ_SRC_HPS 0x18
+#define RSZ_SRC_HSZ 0x1c
+#define RSZ_DMA_RZA 0x20
+#define RSZ_DMA_RZB 0x24
+#define RSZ_DMA_STA 0x28
+#define RSZ_GCK_MMR 0x2c
+#define RSZ_RESERVED0 0x30
+#define RSZ_GCK_SDR 0x34
+#define RSZ_IRQ_RZA 0x38
+#define RSZ_IRQ_RZB 0x3c
+#define RSZ_YUV_Y_MIN 0x40
+#define RSZ_YUV_Y_MAX 0x44
+#define RSZ_YUV_C_MIN 0x48
+#define RSZ_YUV_C_MAX 0x4c
+#define RSZ_YUV_PHS 0x50
+#define RSZ_SEQ 0x54
+
+/* Resizer Rescale Parameters */
+#define RSZ_EN_A 0x58
+#define RSZ_EN_B 0xe8
+/* offset of the registers to be added with base register of
+ either RSZ0 or RSZ1
+*/
+#define RSZ_MODE 0x4
+#define RSZ_420 0x8
+#define RSZ_I_VPS 0xc
+#define RSZ_I_HPS 0x10
+#define RSZ_O_VSZ 0x14
+#define RSZ_O_HSZ 0x18
+#define RSZ_V_PHS_Y 0x1c
+#define RSZ_V_PHS_C 0x20
+#define RSZ_V_DIF 0x24
+#define RSZ_V_TYP 0x28
+#define RSZ_V_LPF 0x2c
+#define RSZ_H_PHS 0x30
+#define RSZ_H_PHS_ADJ 0x34
+#define RSZ_H_DIF 0x38
+#define RSZ_H_TYP 0x3c
+#define RSZ_H_LPF 0x40
+#define RSZ_DWN_EN 0x44
+#define RSZ_DWN_AV 0x48
+
+/* Resizer RGB Conversion Parameters */
+#define RSZ_RGB_EN 0x4c
+#define RSZ_RGB_TYP 0x50
+#define RSZ_RGB_BLD 0x54
+
+/* Resizer External Memory Parameters */
+#define RSZ_SDR_Y_BAD_H 0x58
+#define RSZ_SDR_Y_BAD_L 0x5c
+#define RSZ_SDR_Y_SAD_H 0x60
+#define RSZ_SDR_Y_SAD_L 0x64
+#define RSZ_SDR_Y_OFT 0x68
+#define RSZ_SDR_Y_PTR_S 0x6c
+#define RSZ_SDR_Y_PTR_E 0x70
+#define RSZ_SDR_C_BAD_H 0x74
+#define RSZ_SDR_C_BAD_L 0x78
+#define RSZ_SDR_C_SAD_H 0x7c
+#define RSZ_SDR_C_SAD_L 0x80
+#define RSZ_SDR_C_OFT 0x84
+#define RSZ_SDR_C_PTR_S 0x88
+#define RSZ_SDR_C_PTR_E 0x8c
+
+/* Macro for resizer */
+#define RSZ_YUV_Y_MIN 0x40
+#define RSZ_YUV_Y_MAX 0x44
+#define RSZ_YUV_C_MIN 0x48
+#define RSZ_YUV_C_MAX 0x4c
+
+#define IPIPE_GCK_MMR_DEFAULT 1
+#define IPIPE_GCK_PIX_DEFAULT 0xe
+#define RSZ_GCK_MMR_DEFAULT 1
+#define RSZ_GCK_SDR_DEFAULT 1
+
+/* LUTDPC */
+#define LUTDPC_TBL_256_EN 0
+#define LUTDPC_INF_TBL_EN 1
+#define LUT_DPC_START_ADDR 0
+#define LUT_DPC_H_POS_MASK 0x1fff
+#define LUT_DPC_V_POS_MASK 0x1fff
+#define LUT_DPC_V_POS_SHIFT 13
+#define LUT_DPC_CORR_METH_SHIFT 26
+#define LUT_DPC_MAX_SIZE 256
+#define LUT_DPC_SIZE_MASK 0x3ff
+
+/* OTFDPC */
+#define OTFDPC_DPC2_THR_MASK 0xfff
+#define OTF_DET_METHOD_SHIFT 1
+#define OTF_DPC3_0_SHF_MASK 3
+#define OTF_DPC3_0_THR_SHIFT 6
+#define OTF_DPC3_0_THR_MASK 0x3f
+#define OTF_DPC3_0_SLP_MASK 0x3f
+#define OTF_DPC3_0_DET_MASK 0xfff
+#define OTF_DPC3_0_CORR_MASK 0xfff
+
+/* NF (D2F) */
+#define D2F_SPR_VAL_MASK 0x1f
+#define D2F_SPR_VAL_SHIFT 0
+#define D2F_SHFT_VAL_MASK 3
+#define D2F_SHFT_VAL_SHIFT 5
+#define D2F_SAMPLE_METH_SHIFT 7
+#define D2F_APPLY_LSC_GAIN_SHIFT 8
+#define D2F_USE_SPR_REG_VAL 0
+#define D2F_STR_VAL_MASK 0x1f
+#define D2F_THR_VAL_MASK 0x3ff
+#define D2F_EDGE_DET_THR_MASK 0x7ff
+
+/* Green Imbalance Correction */
+#define GIC_TYP_SHIFT 0
+#define GIC_THR_SEL_SHIFT 1
+#define GIC_APPLY_LSC_GAIN_SHIFT 2
+#define GIC_GAIN_MASK 0xff
+#define GIC_THR_MASK 0xfff
+#define GIC_SLOPE_MASK 0xfff
+#define GIC_NFGAN_INT_MASK 7
+#define GIC_NFGAN_DECI_MASK 0x1f
+
+/* WB */
+#define WB_OFFSET_MASK 0xfff
+#define WB_GAIN_INT_MASK 0xf
+#define WB_GAIN_DECI_MASK 0x1ff
+
+/* CFA */
+#define CFA_HPF_THR_2DIR_MASK 0x1fff
+#define CFA_HPF_SLOPE_2DIR_MASK 0x3ff
+#define CFA_HPF_MIX_THR_2DIR_MASK 0x1fff
+#define CFA_HPF_MIX_SLP_2DIR_MASK 0x3ff
+#define CFA_DIR_THR_2DIR_MASK 0x3ff
+#define CFA_DIR_SLP_2DIR_MASK 0x7f
+#define CFA_ND_WT_2DIR_MASK 0x3f
+#define CFA_DAA_HUE_FRA_MASK 0x3f
+#define CFA_DAA_EDG_THR_MASK 0xff
+#define CFA_DAA_THR_MIN_MASK 0x3ff
+#define CFA_DAA_THR_SLP_MASK 0x3ff
+#define CFA_DAA_SLP_MIN_MASK 0x3ff
+#define CFA_DAA_SLP_SLP_MASK 0x3ff
+#define CFA_DAA_LP_WT_MASK 0x3f
+
+/* RGB2RGB */
+#define RGB2RGB_1_OFST_MASK 0x1fff
+#define RGB2RGB_1_GAIN_INT_MASK 0xf
+#define RGB2RGB_GAIN_DECI_MASK 0xff
+#define RGB2RGB_2_OFST_MASK 0x7ff
+#define RGB2RGB_2_GAIN_INT_MASK 0x7
+
+/* Gamma */
+#define GAMMA_BYPR_SHIFT 0
+#define GAMMA_BYPG_SHIFT 1
+#define GAMMA_BYPB_SHIFT 2
+#define GAMMA_TBL_SEL_SHIFT 4
+#define GAMMA_TBL_SIZE_SHIFT 5
+#define GAMMA_MASK 0x3ff
+#define GAMMA_SHIFT 10
+
+/* 3D LUT */
+#define D3_LUT_ENTRY_MASK 0x3ff
+#define D3_LUT_ENTRY_R_SHIFT 20
+#define D3_LUT_ENTRY_G_SHIFT 10
+#define D3_LUT_ENTRY_B_SHIFT 0
+
+/* Lumina adj */
+#define LUM_ADJ_CONTR_SHIFT 0
+#define LUM_ADJ_BRIGHT_SHIFT 8
+
+/* RGB2YCbCr */
+#define RGB2YCBCR_OFST_MASK 0x7ff
+#define RGB2YCBCR_COEF_INT_MASK 0xf
+#define RGB2YCBCR_COEF_DECI_MASK 0xff
+
+/* GBCE */
+#define GBCE_Y_VAL_MASK 0xff
+#define GBCE_GAIN_VAL_MASK 0x3ff
+#define GBCE_ENTRY_SHIFT 10
+
+/* Edge Enhancements */
+#define YEE_HALO_RED_EN_SHIFT 1
+#define YEE_HPF_SHIFT_MASK 0xf
+#define YEE_COEF_MASK 0x3ff
+#define YEE_THR_MASK 0x3f
+#define YEE_ES_GAIN_MASK 0xfff
+#define YEE_ES_THR1_MASK 0xfff
+#define YEE_ENTRY_SHIFT 9
+#define YEE_ENTRY_MASK 0x1ff
+
+/* CAR */
+#define CAR_MF_THR 0xff
+#define CAR_SW1_SHIFT 8
+#define CAR_GAIN1_SHFT_MASK 7
+#define CAR_GAIN_MIN_MASK 0x1ff
+#define CAR_GAIN2_SHFT_MASK 0xf
+#define CAR_HPF_SHIFT_MASK 3
+
+/* CGS */
+#define CAR_SHIFT_MASK 3
+
+/* Resizer */
+#define RSZ_BYPASS_SHIFT 1
+#define RSZ_SRC_IMG_FMT_SHIFT 1
+#define RSZ_SRC_Y_C_SEL_SHIFT 2
+#define IPIPE_RSZ_VPS_MASK 0xffff
+#define IPIPE_RSZ_HPS_MASK 0xffff
+#define IPIPE_RSZ_VSZ_MASK 0x1fff
+#define IPIPE_RSZ_HSZ_MASK 0x1fff
+#define RSZ_HPS_MASK 0x1fff
+#define RSZ_VPS_MASK 0x1fff
+#define RSZ_O_HSZ_MASK 0x1fff
+#define RSZ_O_VSZ_MASK 0x1fff
+#define RSZ_V_PHS_MASK 0x3fff
+#define RSZ_V_DIF_MASK 0x3fff
+
+#define RSZA_H_FLIP_SHIFT 0
+#define RSZA_V_FLIP_SHIFT 1
+#define RSZB_H_FLIP_SHIFT 2
+#define RSZB_V_FLIP_SHIFT 3
+#define RSZ_A 0
+#define RSZ_B 1
+#define RSZ_CEN_SHIFT 1
+#define RSZ_YEN_SHIFT 0
+#define RSZ_TYP_Y_SHIFT 0
+#define RSZ_TYP_C_SHIFT 1
+#define RSZ_LPF_INT_MASK 0x3f
+#define RSZ_LPF_INT_MASK 0x3f
+#define RSZ_LPF_INT_C_SHIFT 6
+#define RSZ_H_PHS_MASK 0x3fff
+#define RSZ_H_DIF_MASK 0x3fff
+#define RSZ_DIFF_DOWN_THR 256
+#define RSZ_DWN_SCALE_AV_SZ_V_SHIFT 3
+#define RSZ_DWN_SCALE_AV_SZ_MASK 7
+#define RSZ_RGB_MSK1_SHIFT 2
+#define RSZ_RGB_MSK0_SHIFT 1
+#define RSZ_RGB_TYP_SHIFT 0
+#define RSZ_RGB_ALPHA_MASK 0xff
+
+static inline u32 regr_ip(void *__iomem addr, u32 offset)
+{
+ return readl(addr + offset);
+}
+
+static inline void regw_ip(void *__iomem addr, u32 val, u32 offset)
+{
+ writel(val, addr + offset);
+}
+
+static inline u32 w_ip_table(void *__iomem addr, u32 val, u32 offset)
+{
+ writel(val, addr + offset);
+
+ return val;
+}
+
+static inline u32 regr_rsz(void *__iomem addr, u32 offset)
+{
+ return readl(addr + offset);
+}
+
+static inline u32 regw_rsz(void *__iomem addr, u32 val, u32 offset)
+{
+ writel(val, addr + offset);
+
+ return val;
+}
+
+int config_ipipe_hw(struct vpfe_ipipe_device *ipipe);
+int resizer_set_outaddr(void *__iomem rsz_base, struct resizer_params *params,
+ int resize_no, unsigned int address);
+int rsz_enable(void *__iomem rsz_base, int rsz_id, int enable);
+void rsz_src_enable(void *__iomem rsz_base, int enable);
+void rsz_set_in_pix_format(unsigned char y_c);
+int config_rsz_hw(struct vpfe_resizer_device *resizer,
+ struct resizer_params *config);
+void ipipe_set_d2f_regs(void *__iomem base_addr, unsigned int id,
+ struct vpfe_ipipe_nf *noise_filter);
+void ipipe_set_rgb2rgb_regs(void *__iomem base_addr, unsigned int id,
+ struct vpfe_ipipe_rgb2rgb *rgb);
+void ipipe_set_yuv422_conv_regs(void *__iomem base_addr,
+ struct vpfe_ipipe_yuv422_conv *conv);
+void ipipe_set_lum_adj_regs(void *__iomem base_addr,
+ struct ipipe_lum_adj *lum_adj);
+void ipipe_set_rgb2ycbcr_regs(void *__iomem base_addr,
+ struct vpfe_ipipe_rgb2yuv *yuv);
+void ipipe_set_lutdpc_regs(void *__iomem base_addr,
+ void *__iomem isp5_base_addr, struct vpfe_ipipe_lutdpc *lutdpc);
+void ipipe_set_otfdpc_regs(void *__iomem base_addr,
+ struct vpfe_ipipe_otfdpc *otfdpc);
+void ipipe_set_3d_lut_regs(void *__iomem base_addr,
+ void *__iomem isp5_base_addr, struct vpfe_ipipe_3d_lut *lut_3d);
+void ipipe_set_gamma_regs(void *__iomem base_addr,
+ void *__iomem isp5_base_addr, struct vpfe_ipipe_gamma *gamma);
+void ipipe_set_ee_regs(void *__iomem base_addr,
+ void *__iomem isp5_base_addr, struct vpfe_ipipe_yee *ee);
+void ipipe_set_gbce_regs(void *__iomem base_addr,
+ void *__iomem isp5_base_addr, struct vpfe_ipipe_gbce *gbce);
+void ipipe_set_gic_regs(void *__iomem base_addr, struct vpfe_ipipe_gic *gic);
+void ipipe_set_cfa_regs(void *__iomem base_addr, struct vpfe_ipipe_cfa *cfa);
+void ipipe_set_car_regs(void *__iomem base_addr, struct vpfe_ipipe_car *car);
+void ipipe_set_cgs_regs(void *__iomem base_addr, struct vpfe_ipipe_cgs *cgs);
+void ipipe_set_wb_regs(void *__iomem base_addr, struct vpfe_ipipe_wb *wb);
+
+#endif /* _DAVINCI_VPFE_DM365_IPIPE_HW_H */
diff --git a/drivers/staging/media/davinci_vpfe/dm365_ipipeif.c b/drivers/staging/media/davinci_vpfe/dm365_ipipeif.c
new file mode 100644
index 000000000000..c8cae51d3d00
--- /dev/null
+++ b/drivers/staging/media/davinci_vpfe/dm365_ipipeif.c
@@ -0,0 +1,1071 @@
+/*
+ * Copyright (C) 2012 Texas Instruments Inc
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation version 2.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ *
+ * Contributors:
+ * Manjunath Hadli <manjunath.hadli@ti.com>
+ * Prabhakar Lad <prabhakar.lad@ti.com>
+ */
+
+#include "dm365_ipipeif.h"
+#include "vpfe_mc_capture.h"
+
+static const unsigned int ipipeif_input_fmts[] = {
+ V4L2_MBUS_FMT_UYVY8_2X8,
+ V4L2_MBUS_FMT_SGRBG12_1X12,
+ V4L2_MBUS_FMT_Y8_1X8,
+ V4L2_MBUS_FMT_UV8_1X8,
+ V4L2_MBUS_FMT_YDYUYDYV8_1X16,
+ V4L2_MBUS_FMT_SBGGR8_1X8,
+};
+
+static const unsigned int ipipeif_output_fmts[] = {
+ V4L2_MBUS_FMT_UYVY8_2X8,
+ V4L2_MBUS_FMT_SGRBG12_1X12,
+ V4L2_MBUS_FMT_Y8_1X8,
+ V4L2_MBUS_FMT_UV8_1X8,
+ V4L2_MBUS_FMT_YDYUYDYV8_1X16,
+ V4L2_MBUS_FMT_SBGGR8_1X8,
+ V4L2_MBUS_FMT_SGRBG10_DPCM8_1X8,
+ V4L2_MBUS_FMT_SGRBG10_ALAW8_1X8,
+};
+
+static int
+ipipeif_get_pack_mode(enum v4l2_mbus_pixelcode in_pix_fmt)
+{
+ switch (in_pix_fmt) {
+ case V4L2_MBUS_FMT_SBGGR8_1X8:
+ case V4L2_MBUS_FMT_Y8_1X8:
+ case V4L2_MBUS_FMT_SGRBG10_DPCM8_1X8:
+ case V4L2_MBUS_FMT_UV8_1X8:
+ return IPIPEIF_5_1_PACK_8_BIT;
+
+ case V4L2_MBUS_FMT_SGRBG10_ALAW8_1X8:
+ return IPIPEIF_5_1_PACK_8_BIT_A_LAW;
+
+ case V4L2_MBUS_FMT_SGRBG12_1X12:
+ return IPIPEIF_5_1_PACK_16_BIT;
+
+ case V4L2_MBUS_FMT_SBGGR12_1X12:
+ return IPIPEIF_5_1_PACK_12_BIT;
+
+ default:
+ return IPIPEIF_5_1_PACK_16_BIT;
+ }
+}
+
+static inline u32 ipipeif_read(void *addr, u32 offset)
+{
+ return readl(addr + offset);
+}
+
+static inline void ipipeif_write(u32 val, void *addr, u32 offset)
+{
+ writel(val, addr + offset);
+}
+
+static void ipipeif_config_dpc(void *addr, struct ipipeif_dpc *dpc)
+{
+ u32 val = 0;
+
+ if (dpc->en) {
+ val = (dpc->en & 1) << IPIPEIF_DPC2_EN_SHIFT;
+ val |= dpc->thr & IPIPEIF_DPC2_THR_MASK;
+ }
+ ipipeif_write(val, addr, IPIPEIF_DPC2);
+}
+
+#define IPIPEIF_MODE_CONTINUOUS 0
+#define IPIPEIF_MODE_ONE_SHOT 1
+
+static int get_oneshot_mode(enum ipipeif_input_entity input)
+{
+ if (input == IPIPEIF_INPUT_MEMORY)
+ return IPIPEIF_MODE_ONE_SHOT;
+ else if (input == IPIPEIF_INPUT_ISIF)
+ return IPIPEIF_MODE_CONTINUOUS;
+
+ return -EINVAL;
+}
+
+static int
+ipipeif_get_cfg_src1(struct vpfe_ipipeif_device *ipipeif)
+{
+ struct v4l2_mbus_framefmt *informat;
+
+ informat = &ipipeif->formats[IPIPEIF_PAD_SINK];
+ if (ipipeif->input == IPIPEIF_INPUT_MEMORY &&
+ (informat->code == V4L2_MBUS_FMT_Y8_1X8 ||
+ informat->code == V4L2_MBUS_FMT_UV8_1X8))
+ return IPIPEIF_CCDC;
+
+ return IPIPEIF_SRC1_PARALLEL_PORT;
+}
+
+static int
+ipipeif_get_data_shift(struct vpfe_ipipeif_device *ipipeif)
+{
+ struct v4l2_mbus_framefmt *informat;
+
+ informat = &ipipeif->formats[IPIPEIF_PAD_SINK];
+
+ switch (informat->code) {
+ case V4L2_MBUS_FMT_SGRBG12_1X12:
+ return IPIPEIF_5_1_BITS11_0;
+
+ case V4L2_MBUS_FMT_Y8_1X8:
+ case V4L2_MBUS_FMT_UV8_1X8:
+ return IPIPEIF_5_1_BITS11_0;
+
+ default:
+ return IPIPEIF_5_1_BITS7_0;
+ }
+}
+
+static enum ipipeif_input_source
+ipipeif_get_source(struct vpfe_ipipeif_device *ipipeif)
+{
+ struct v4l2_mbus_framefmt *informat;
+
+ informat = &ipipeif->formats[IPIPEIF_PAD_SINK];
+ if (ipipeif->input == IPIPEIF_INPUT_ISIF)
+ return IPIPEIF_CCDC;
+
+ if (informat->code == V4L2_MBUS_FMT_UYVY8_2X8)
+ return IPIPEIF_SDRAM_YUV;
+
+ return IPIPEIF_SDRAM_RAW;
+}
+
+void vpfe_ipipeif_ss_buffer_isr(struct vpfe_ipipeif_device *ipipeif)
+{
+ struct vpfe_video_device *video_in = &ipipeif->video_in;
+
+ if (ipipeif->input != IPIPEIF_INPUT_MEMORY)
+ return;
+
+ spin_lock(&video_in->dma_queue_lock);
+ vpfe_video_process_buffer_complete(video_in);
+ video_in->state = VPFE_VIDEO_BUFFER_NOT_QUEUED;
+ vpfe_video_schedule_next_buffer(video_in);
+ spin_unlock(&video_in->dma_queue_lock);
+}
+
+int vpfe_ipipeif_decimation_enabled(struct vpfe_device *vpfe_dev)
+{
+ struct vpfe_ipipeif_device *ipipeif = &vpfe_dev->vpfe_ipipeif;
+
+ return ipipeif->config.decimation;
+}
+
+int vpfe_ipipeif_get_rsz(struct vpfe_device *vpfe_dev)
+{
+ struct vpfe_ipipeif_device *ipipeif = &vpfe_dev->vpfe_ipipeif;
+
+ return ipipeif->config.rsz;
+}
+
+#define RD_DATA_15_2 0x7
+
+/*
+ * ipipeif_hw_setup() - This function sets up IPIPEIF
+ * @sd: pointer to v4l2 subdev structure
+ * return -EINVAL or zero on success
+ */
+static int ipipeif_hw_setup(struct v4l2_subdev *sd)
+{
+ struct vpfe_ipipeif_device *ipipeif = v4l2_get_subdevdata(sd);
+ struct v4l2_mbus_framefmt *informat, *outformat;
+ struct ipipeif_params params = ipipeif->config;
+ enum ipipeif_input_source ipipeif_source;
+ enum v4l2_mbus_pixelcode isif_port_if;
+ void *ipipeif_base_addr;
+ unsigned int val;
+ int data_shift;
+ int pack_mode;
+ int source1;
+
+ ipipeif_base_addr = ipipeif->ipipeif_base_addr;
+
+ /* Enable clock to IPIPEIF and IPIPE */
+ vpss_enable_clock(VPSS_IPIPEIF_CLOCK, 1);
+
+ informat = &ipipeif->formats[IPIPEIF_PAD_SINK];
+ outformat = &ipipeif->formats[IPIPEIF_PAD_SOURCE];
+
+ /* Combine all the fields to make CFG1 register of IPIPEIF */
+ val = get_oneshot_mode(ipipeif->input);
+ if (val < 0) {
+ pr_err("ipipeif: links setup required");
+ return -EINVAL;
+ }
+ val = val << ONESHOT_SHIFT;
+
+ ipipeif_source = ipipeif_get_source(ipipeif);
+ val |= ipipeif_source << INPSRC_SHIFT;
+
+ val |= params.clock_select << CLKSEL_SHIFT;
+ val |= params.avg_filter << AVGFILT_SHIFT;
+ val |= params.decimation << DECIM_SHIFT;
+
+ pack_mode = ipipeif_get_pack_mode(informat->code);
+ val |= pack_mode << PACK8IN_SHIFT;
+
+ source1 = ipipeif_get_cfg_src1(ipipeif);
+ val |= source1 << INPSRC1_SHIFT;
+
+ data_shift = ipipeif_get_data_shift(ipipeif);
+ if (ipipeif_source != IPIPEIF_SDRAM_YUV)
+ val |= data_shift << DATASFT_SHIFT;
+ else
+ val &= ~(RD_DATA_15_2 << DATASFT_SHIFT);
+
+ ipipeif_write(val, ipipeif_base_addr, IPIPEIF_CFG1);
+
+ switch (ipipeif_source) {
+ case IPIPEIF_CCDC:
+ ipipeif_write(ipipeif->gain, ipipeif_base_addr, IPIPEIF_GAIN);
+ break;
+
+ case IPIPEIF_SDRAM_RAW:
+ case IPIPEIF_CCDC_DARKFM:
+ ipipeif_write(ipipeif->gain, ipipeif_base_addr, IPIPEIF_GAIN);
+ /* fall through */
+ case IPIPEIF_SDRAM_YUV:
+ val |= data_shift << DATASFT_SHIFT;
+ ipipeif_write(params.ppln, ipipeif_base_addr, IPIPEIF_PPLN);
+ ipipeif_write(params.lpfr, ipipeif_base_addr, IPIPEIF_LPFR);
+ ipipeif_write(informat->width, ipipeif_base_addr, IPIPEIF_HNUM);
+ ipipeif_write(informat->height,
+ ipipeif_base_addr, IPIPEIF_VNUM);
+ break;
+
+ default:
+ return -EINVAL;
+ }
+
+ /*check if decimation is enable or not */
+ if (params.decimation)
+ ipipeif_write(params.rsz, ipipeif_base_addr, IPIPEIF_RSZ);
+
+ /* Setup sync alignment and initial rsz position */
+ val = params.if_5_1.align_sync & 1;
+ val <<= IPIPEIF_INIRSZ_ALNSYNC_SHIFT;
+ val |= params.if_5_1.rsz_start & IPIPEIF_INIRSZ_MASK;
+ ipipeif_write(val, ipipeif_base_addr, IPIPEIF_INIRSZ);
+ isif_port_if = informat->code;
+
+ if (isif_port_if == V4L2_MBUS_FMT_Y8_1X8)
+ isif_port_if = V4L2_MBUS_FMT_YUYV8_1X16;
+ else if (isif_port_if == V4L2_MBUS_FMT_UV8_1X8)
+ isif_port_if = V4L2_MBUS_FMT_SGRBG12_1X12;
+
+ /* Enable DPCM decompression */
+ switch (ipipeif_source) {
+ case IPIPEIF_SDRAM_RAW:
+ val = 0;
+ if (outformat->code == V4L2_MBUS_FMT_SGRBG10_DPCM8_1X8) {
+ val = 1;
+ val |= (IPIPEIF_DPCM_8BIT_10BIT & 1) <<
+ IPIPEIF_DPCM_BITS_SHIFT;
+ val |= (ipipeif->dpcm_predictor & 1) <<
+ IPIPEIF_DPCM_PRED_SHIFT;
+ }
+ ipipeif_write(val, ipipeif_base_addr, IPIPEIF_DPCM);
+
+ /* set DPC */
+ ipipeif_config_dpc(ipipeif_base_addr, &params.if_5_1.dpc);
+
+ ipipeif_write(params.if_5_1.clip,
+ ipipeif_base_addr, IPIPEIF_OCLIP);
+
+ /* fall through for SDRAM YUV mode */
+ /* configure CFG2 */
+ val = ipipeif_read(ipipeif_base_addr, IPIPEIF_CFG2);
+ switch (isif_port_if) {
+ case V4L2_MBUS_FMT_YUYV8_1X16:
+ case V4L2_MBUS_FMT_UYVY8_2X8:
+ case V4L2_MBUS_FMT_Y8_1X8:
+ RESETBIT(val, IPIPEIF_CFG2_YUV8_SHIFT);
+ SETBIT(val, IPIPEIF_CFG2_YUV16_SHIFT);
+ ipipeif_write(val, ipipeif_base_addr, IPIPEIF_CFG2);
+ break;
+
+ default:
+ RESETBIT(val, IPIPEIF_CFG2_YUV8_SHIFT);
+ RESETBIT(val, IPIPEIF_CFG2_YUV16_SHIFT);
+ ipipeif_write(val, ipipeif_base_addr, IPIPEIF_CFG2);
+ break;
+ }
+
+ case IPIPEIF_SDRAM_YUV:
+ /* Set clock divider */
+ if (params.clock_select == IPIPEIF_SDRAM_CLK) {
+ val = ipipeif_read(ipipeif_base_addr, IPIPEIF_CLKDIV);
+ val |= (params.if_5_1.clk_div.m - 1) <<
+ IPIPEIF_CLKDIV_M_SHIFT;
+ val |= (params.if_5_1.clk_div.n - 1);
+ ipipeif_write(val, ipipeif_base_addr, IPIPEIF_CLKDIV);
+ }
+ break;
+
+ case IPIPEIF_CCDC:
+ case IPIPEIF_CCDC_DARKFM:
+ /* set DPC */
+ ipipeif_config_dpc(ipipeif_base_addr, &params.if_5_1.dpc);
+
+ /* Set DF gain & threshold control */
+ val = 0;
+ if (params.if_5_1.df_gain_en) {
+ val = params.if_5_1.df_gain_thr &
+ IPIPEIF_DF_GAIN_THR_MASK;
+ ipipeif_write(val, ipipeif_base_addr, IPIPEIF_DFSGTH);
+ val = (params.if_5_1.df_gain_en & 1) <<
+ IPIPEIF_DF_GAIN_EN_SHIFT;
+ val |= params.if_5_1.df_gain &
+ IPIPEIF_DF_GAIN_MASK;
+ }
+ ipipeif_write(val, ipipeif_base_addr, IPIPEIF_DFSGVL);
+ /* configure CFG2 */
+ val = VPFE_PINPOL_POSITIVE << IPIPEIF_CFG2_HDPOL_SHIFT;
+ val |= VPFE_PINPOL_POSITIVE << IPIPEIF_CFG2_VDPOL_SHIFT;
+
+ switch (isif_port_if) {
+ case V4L2_MBUS_FMT_YUYV8_1X16:
+ case V4L2_MBUS_FMT_YUYV10_1X20:
+ RESETBIT(val, IPIPEIF_CFG2_YUV8_SHIFT);
+ SETBIT(val, IPIPEIF_CFG2_YUV16_SHIFT);
+ break;
+
+ case V4L2_MBUS_FMT_YUYV8_2X8:
+ case V4L2_MBUS_FMT_UYVY8_2X8:
+ case V4L2_MBUS_FMT_Y8_1X8:
+ case V4L2_MBUS_FMT_YUYV10_2X10:
+ SETBIT(val, IPIPEIF_CFG2_YUV8_SHIFT);
+ SETBIT(val, IPIPEIF_CFG2_YUV16_SHIFT);
+ val |= IPIPEIF_CBCR_Y << IPIPEIF_CFG2_YUV8P_SHIFT;
+ break;
+
+ default:
+ /* Bayer */
+ ipipeif_write(params.if_5_1.clip, ipipeif_base_addr,
+ IPIPEIF_OCLIP);
+ }
+ ipipeif_write(val, ipipeif_base_addr, IPIPEIF_CFG2);
+ break;
+
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int
+ipipeif_set_config(struct v4l2_subdev *sd, struct ipipeif_params *config)
+{
+ struct vpfe_ipipeif_device *ipipeif = v4l2_get_subdevdata(sd);
+ struct device *dev = ipipeif->subdev.v4l2_dev->dev;
+
+ if (!config) {
+ dev_err(dev, "Invalid configuration pointer\n");
+ return -EINVAL;
+ }
+
+ ipipeif->config.clock_select = config->clock_select;
+ ipipeif->config.ppln = config->ppln;
+ ipipeif->config.lpfr = config->lpfr;
+ ipipeif->config.rsz = config->rsz;
+ ipipeif->config.decimation = config->decimation;
+ if (ipipeif->config.decimation &&
+ (ipipeif->config.rsz < IPIPEIF_RSZ_MIN ||
+ ipipeif->config.rsz > IPIPEIF_RSZ_MAX)) {
+ dev_err(dev, "rsz range is %d to %d\n",
+ IPIPEIF_RSZ_MIN, IPIPEIF_RSZ_MAX);
+ return -EINVAL;
+ }
+
+ ipipeif->config.avg_filter = config->avg_filter;
+
+ ipipeif->config.if_5_1.df_gain_thr = config->if_5_1.df_gain_thr;
+ ipipeif->config.if_5_1.df_gain = config->if_5_1.df_gain;
+ ipipeif->config.if_5_1.df_gain_en = config->if_5_1.df_gain_en;
+
+ ipipeif->config.if_5_1.rsz_start = config->if_5_1.rsz_start;
+ ipipeif->config.if_5_1.align_sync = config->if_5_1.align_sync;
+ ipipeif->config.if_5_1.clip = config->if_5_1.clip;
+
+ ipipeif->config.if_5_1.dpc.en = config->if_5_1.dpc.en;
+ ipipeif->config.if_5_1.dpc.thr = config->if_5_1.dpc.thr;
+
+ ipipeif->config.if_5_1.clk_div.m = config->if_5_1.clk_div.m;
+ ipipeif->config.if_5_1.clk_div.n = config->if_5_1.clk_div.n;
+
+ return 0;
+}
+
+static int
+ipipeif_get_config(struct v4l2_subdev *sd, void __user *arg)
+{
+ struct vpfe_ipipeif_device *ipipeif = v4l2_get_subdevdata(sd);
+ struct ipipeif_params *config = (struct ipipeif_params *)arg;
+ struct device *dev = ipipeif->subdev.v4l2_dev->dev;
+
+ if (!arg) {
+ dev_err(dev, "Invalid configuration pointer\n");
+ return -EINVAL;
+ }
+
+ config->clock_select = ipipeif->config.clock_select;
+ config->ppln = ipipeif->config.ppln;
+ config->lpfr = ipipeif->config.lpfr;
+ config->rsz = ipipeif->config.rsz;
+ config->decimation = ipipeif->config.decimation;
+ config->avg_filter = ipipeif->config.avg_filter;
+
+ config->if_5_1.df_gain_thr = ipipeif->config.if_5_1.df_gain_thr;
+ config->if_5_1.df_gain = ipipeif->config.if_5_1.df_gain;
+ config->if_5_1.df_gain_en = ipipeif->config.if_5_1.df_gain_en;
+
+ config->if_5_1.rsz_start = ipipeif->config.if_5_1.rsz_start;
+ config->if_5_1.align_sync = ipipeif->config.if_5_1.align_sync;
+ config->if_5_1.clip = ipipeif->config.if_5_1.clip;
+
+ config->if_5_1.dpc.en = ipipeif->config.if_5_1.dpc.en;
+ config->if_5_1.dpc.thr = ipipeif->config.if_5_1.dpc.thr;
+
+ config->if_5_1.clk_div.m = ipipeif->config.if_5_1.clk_div.m;
+ config->if_5_1.clk_div.n = ipipeif->config.if_5_1.clk_div.n;
+
+ return 0;
+}
+
+/*
+ * ipipeif_ioctl() - Handle ipipeif module private ioctl's
+ * @sd: pointer to v4l2 subdev structure
+ * @cmd: configuration command
+ * @arg: configuration argument
+ */
+static long ipipeif_ioctl(struct v4l2_subdev *sd,
+ unsigned int cmd, void *arg)
+{
+ struct ipipeif_params *config = (struct ipipeif_params *)arg;
+ int ret = -ENOIOCTLCMD;
+
+ switch (cmd) {
+ case VIDIOC_VPFE_IPIPEIF_S_CONFIG:
+ ret = ipipeif_set_config(sd, config);
+ break;
+
+ case VIDIOC_VPFE_IPIPEIF_G_CONFIG:
+ ret = ipipeif_get_config(sd, arg);
+ break;
+ }
+ return ret;
+}
+
+/*
+ * ipipeif_s_ctrl() - Handle set control subdev method
+ * @ctrl: pointer to v4l2 control structure
+ */
+static int ipipeif_s_ctrl(struct v4l2_ctrl *ctrl)
+{
+ struct vpfe_ipipeif_device *ipipeif =
+ container_of(ctrl->handler, struct vpfe_ipipeif_device, ctrls);
+
+ switch (ctrl->id) {
+ case VPFE_CID_DPCM_PREDICTOR:
+ ipipeif->dpcm_predictor = ctrl->val;
+ break;
+
+ case V4L2_CID_GAIN:
+ ipipeif->gain = ctrl->val;
+ break;
+
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+#define ENABLE_IPIPEIF 0x1
+
+void vpfe_ipipeif_enable(struct vpfe_device *vpfe_dev)
+{
+ struct vpfe_ipipeif_device *ipipeif = &vpfe_dev->vpfe_ipipeif;
+ void *ipipeif_base_addr = ipipeif->ipipeif_base_addr;
+ unsigned char val;
+
+ if (ipipeif->input != IPIPEIF_INPUT_MEMORY)
+ return;
+
+ do {
+ val = ipipeif_read(ipipeif_base_addr, IPIPEIF_ENABLE);
+ } while (val & 0x1);
+
+ ipipeif_write(ENABLE_IPIPEIF, ipipeif_base_addr, IPIPEIF_ENABLE);
+}
+
+/*
+ * ipipeif_set_stream() - Enable/Disable streaming on ipipeif subdev
+ * @sd: pointer to v4l2 subdev structure
+ * @enable: 1 == Enable, 0 == Disable
+ */
+static int ipipeif_set_stream(struct v4l2_subdev *sd, int enable)
+{
+ struct vpfe_ipipeif_device *ipipeif = v4l2_get_subdevdata(sd);
+ struct vpfe_device *vpfe_dev = to_vpfe_device(ipipeif);
+ int ret = 0;
+
+ if (!enable)
+ return ret;
+
+ ret = ipipeif_hw_setup(sd);
+ if (!ret)
+ vpfe_ipipeif_enable(vpfe_dev);
+
+ return ret;
+}
+
+/*
+ * ipipeif_enum_mbus_code() - Handle pixel format enumeration
+ * @sd: pointer to v4l2 subdev structure
+ * @fh: V4L2 subdev file handle
+ * @code: pointer to v4l2_subdev_mbus_code_enum structure
+ * return -EINVAL or zero on success
+ */
+static int ipipeif_enum_mbus_code(struct v4l2_subdev *sd,
+ struct v4l2_subdev_fh *fh,
+ struct v4l2_subdev_mbus_code_enum *code)
+{
+ switch (code->pad) {
+ case IPIPEIF_PAD_SINK:
+ if (code->index >= ARRAY_SIZE(ipipeif_input_fmts))
+ return -EINVAL;
+
+ code->code = ipipeif_input_fmts[code->index];
+ break;
+
+ case IPIPEIF_PAD_SOURCE:
+ if (code->index >= ARRAY_SIZE(ipipeif_output_fmts))
+ return -EINVAL;
+
+ code->code = ipipeif_output_fmts[code->index];
+ break;
+
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+/*
+ * ipipeif_get_format() - Handle get format by pads subdev method
+ * @sd: pointer to v4l2 subdev structure
+ * @fh: V4L2 subdev file handle
+ * @fmt: pointer to v4l2 subdev format structure
+ */
+static int
+ipipeif_get_format(struct v4l2_subdev *sd, struct v4l2_subdev_fh *fh,
+ struct v4l2_subdev_format *fmt)
+{
+ struct vpfe_ipipeif_device *ipipeif = v4l2_get_subdevdata(sd);
+
+ if (fmt->which == V4L2_SUBDEV_FORMAT_ACTIVE)
+ fmt->format = ipipeif->formats[fmt->pad];
+ else
+ fmt->format = *(v4l2_subdev_get_try_format(fh, fmt->pad));
+
+ return 0;
+}
+
+#define MIN_OUT_WIDTH 32
+#define MIN_OUT_HEIGHT 32
+
+/*
+ * ipipeif_try_format() - Handle try format by pad subdev method
+ * @ipipeif: VPFE ipipeif device.
+ * @fh: V4L2 subdev file handle.
+ * @pad: pad num.
+ * @fmt: pointer to v4l2 format structure.
+ * @which : wanted subdev format
+ */
+static void
+ipipeif_try_format(struct vpfe_ipipeif_device *ipipeif,
+ struct v4l2_subdev_fh *fh, unsigned int pad,
+ struct v4l2_mbus_framefmt *fmt,
+ enum v4l2_subdev_format_whence which)
+{
+ unsigned int max_out_height;
+ unsigned int max_out_width;
+ unsigned int i;
+
+ max_out_width = IPIPE_MAX_OUTPUT_WIDTH_A;
+ max_out_height = IPIPE_MAX_OUTPUT_HEIGHT_A;
+
+ if (pad == IPIPEIF_PAD_SINK) {
+ for (i = 0; i < ARRAY_SIZE(ipipeif_input_fmts); i++)
+ if (fmt->code == ipipeif_input_fmts[i])
+ break;
+
+ /* If not found, use SBGGR10 as default */
+ if (i >= ARRAY_SIZE(ipipeif_input_fmts))
+ fmt->code = V4L2_MBUS_FMT_SGRBG12_1X12;
+ } else if (pad == IPIPEIF_PAD_SOURCE) {
+ for (i = 0; i < ARRAY_SIZE(ipipeif_output_fmts); i++)
+ if (fmt->code == ipipeif_output_fmts[i])
+ break;
+
+ /* If not found, use UYVY as default */
+ if (i >= ARRAY_SIZE(ipipeif_output_fmts))
+ fmt->code = V4L2_MBUS_FMT_UYVY8_2X8;
+ }
+
+ fmt->width = clamp_t(u32, fmt->width, MIN_OUT_HEIGHT, max_out_width);
+ fmt->height = clamp_t(u32, fmt->height, MIN_OUT_WIDTH, max_out_height);
+}
+
+static int
+ipipeif_enum_frame_size(struct v4l2_subdev *sd, struct v4l2_subdev_fh *fh,
+ struct v4l2_subdev_frame_size_enum *fse)
+{
+ struct vpfe_ipipeif_device *ipipeif = v4l2_get_subdevdata(sd);
+ struct v4l2_mbus_framefmt format;
+
+ if (fse->index != 0)
+ return -EINVAL;
+
+ format.code = fse->code;
+ format.width = 1;
+ format.height = 1;
+ ipipeif_try_format(ipipeif, fh, fse->pad, &format,
+ V4L2_SUBDEV_FORMAT_TRY);
+ fse->min_width = format.width;
+ fse->min_height = format.height;
+
+ if (format.code != fse->code)
+ return -EINVAL;
+
+ format.code = fse->code;
+ format.width = -1;
+ format.height = -1;
+ ipipeif_try_format(ipipeif, fh, fse->pad, &format,
+ V4L2_SUBDEV_FORMAT_TRY);
+ fse->max_width = format.width;
+ fse->max_height = format.height;
+
+ return 0;
+}
+
+/*
+ * __ipipeif_get_format() - helper function for getting ipipeif format
+ * @ipipeif: pointer to ipipeif private structure.
+ * @pad: pad number.
+ * @fh: V4L2 subdev file handle.
+ * @which: wanted subdev format.
+ *
+ */
+static struct v4l2_mbus_framefmt *
+__ipipeif_get_format(struct vpfe_ipipeif_device *ipipeif,
+ struct v4l2_subdev_fh *fh, unsigned int pad,
+ enum v4l2_subdev_format_whence which)
+{
+ if (which == V4L2_SUBDEV_FORMAT_TRY)
+ return v4l2_subdev_get_try_format(fh, pad);
+
+ return &ipipeif->formats[pad];
+}
+
+/*
+ * ipipeif_set_format() - Handle set format by pads subdev method
+ * @sd: pointer to v4l2 subdev structure
+ * @fh: V4L2 subdev file handle
+ * @fmt: pointer to v4l2 subdev format structure
+ * return -EINVAL or zero on success
+ */
+static int
+ipipeif_set_format(struct v4l2_subdev *sd, struct v4l2_subdev_fh *fh,
+ struct v4l2_subdev_format *fmt)
+{
+ struct vpfe_ipipeif_device *ipipeif = v4l2_get_subdevdata(sd);
+ struct v4l2_mbus_framefmt *format;
+
+ format = __ipipeif_get_format(ipipeif, fh, fmt->pad, fmt->which);
+ if (format == NULL)
+ return -EINVAL;
+
+ ipipeif_try_format(ipipeif, fh, fmt->pad, &fmt->format, fmt->which);
+ *format = fmt->format;
+
+ if (fmt->which == V4L2_SUBDEV_FORMAT_TRY)
+ return 0;
+
+ if (fmt->pad == IPIPEIF_PAD_SINK &&
+ ipipeif->input != IPIPEIF_INPUT_NONE)
+ ipipeif->formats[fmt->pad] = fmt->format;
+ else if (fmt->pad == IPIPEIF_PAD_SOURCE &&
+ ipipeif->output != IPIPEIF_OUTPUT_NONE)
+ ipipeif->formats[fmt->pad] = fmt->format;
+ else
+ return -EINVAL;
+
+ return 0;
+}
+
+static void ipipeif_set_default_config(struct vpfe_ipipeif_device *ipipeif)
+{
+#define WIDTH_I 640
+#define HEIGHT_I 480
+
+ const struct ipipeif_params ipipeif_defaults = {
+ .clock_select = IPIPEIF_SDRAM_CLK,
+ .ppln = WIDTH_I + 8,
+ .lpfr = HEIGHT_I + 10,
+ .rsz = 16, /* resize ratio 16/rsz */
+ .decimation = IPIPEIF_DECIMATION_OFF,
+ .avg_filter = IPIPEIF_AVG_OFF,
+ .if_5_1 = {
+ .clk_div = {
+ .m = 1, /* clock = sdram clock * (m/n) */
+ .n = 6
+ },
+ .clip = 4095,
+ },
+ };
+ memset(&ipipeif->config, 0, sizeof(struct ipipeif_params));
+ memcpy(&ipipeif->config, &ipipeif_defaults,
+ sizeof(struct ipipeif_params));
+}
+
+/*
+ * ipipeif_init_formats() - Initialize formats on all pads
+ * @sd: VPFE ipipeif V4L2 subdevice
+ * @fh: V4L2 subdev file handle
+ *
+ * Initialize all pad formats with default values. If fh is not NULL, try
+ * formats are initialized on the file handle. Otherwise active formats are
+ * initialized on the device.
+ */
+static int
+ipipeif_init_formats(struct v4l2_subdev *sd, struct v4l2_subdev_fh *fh)
+{
+ struct vpfe_ipipeif_device *ipipeif = v4l2_get_subdevdata(sd);
+ struct v4l2_subdev_format format;
+
+ memset(&format, 0, sizeof(format));
+ format.pad = IPIPEIF_PAD_SINK;
+ format.which = fh ? V4L2_SUBDEV_FORMAT_TRY : V4L2_SUBDEV_FORMAT_ACTIVE;
+ format.format.code = V4L2_MBUS_FMT_SGRBG12_1X12;
+ format.format.width = IPIPE_MAX_OUTPUT_WIDTH_A;
+ format.format.height = IPIPE_MAX_OUTPUT_HEIGHT_A;
+ ipipeif_set_format(sd, fh, &format);
+
+ memset(&format, 0, sizeof(format));
+ format.pad = IPIPEIF_PAD_SOURCE;
+ format.which = fh ? V4L2_SUBDEV_FORMAT_TRY : V4L2_SUBDEV_FORMAT_ACTIVE;
+ format.format.code = V4L2_MBUS_FMT_UYVY8_2X8;
+ format.format.width = IPIPE_MAX_OUTPUT_WIDTH_A;
+ format.format.height = IPIPE_MAX_OUTPUT_HEIGHT_A;
+ ipipeif_set_format(sd, fh, &format);
+
+ ipipeif_set_default_config(ipipeif);
+
+ return 0;
+}
+
+/*
+ * ipipeif_video_in_queue() - ipipeif video in queue
+ * @vpfe_dev: vpfe device pointer
+ * @addr: buffer address
+ */
+static int
+ipipeif_video_in_queue(struct vpfe_device *vpfe_dev, unsigned long addr)
+{
+ struct vpfe_ipipeif_device *ipipeif = &vpfe_dev->vpfe_ipipeif;
+ void *ipipeif_base_addr = ipipeif->ipipeif_base_addr;
+ unsigned int adofs;
+ u32 val;
+
+ if (ipipeif->input != IPIPEIF_INPUT_MEMORY)
+ return -EINVAL;
+
+ switch (ipipeif->formats[IPIPEIF_PAD_SINK].code) {
+ case V4L2_MBUS_FMT_Y8_1X8:
+ case V4L2_MBUS_FMT_UV8_1X8:
+ case V4L2_MBUS_FMT_YDYUYDYV8_1X16:
+ adofs = ipipeif->formats[IPIPEIF_PAD_SINK].width;
+ break;
+
+ default:
+ adofs = ipipeif->formats[IPIPEIF_PAD_SINK].width << 1;
+ break;
+ }
+
+ /* adjust the line len to be a multiple of 32 */
+ adofs += 31;
+ adofs &= ~0x1f;
+ val = (adofs >> 5) & IPIPEIF_ADOFS_LSB_MASK;
+ ipipeif_write(val, ipipeif_base_addr, IPIPEIF_ADOFS);
+
+ /* lower sixteen bit */
+ val = (addr >> IPIPEIF_ADDRL_SHIFT) & IPIPEIF_ADDRL_MASK;
+ ipipeif_write(val, ipipeif_base_addr, IPIPEIF_ADDRL);
+
+ /* upper next seven bit */
+ val = (addr >> IPIPEIF_ADDRU_SHIFT) & IPIPEIF_ADDRU_MASK;
+ ipipeif_write(val, ipipeif_base_addr, IPIPEIF_ADDRU);
+
+ return 0;
+}
+
+/* subdev core operations */
+static const struct v4l2_subdev_core_ops ipipeif_v4l2_core_ops = {
+ .ioctl = ipipeif_ioctl,
+};
+
+static const struct v4l2_ctrl_ops ipipeif_ctrl_ops = {
+ .s_ctrl = ipipeif_s_ctrl,
+};
+
+static const struct v4l2_ctrl_config vpfe_ipipeif_dpcm_pred = {
+ .ops = &ipipeif_ctrl_ops,
+ .id = VPFE_CID_DPCM_PREDICTOR,
+ .name = "DPCM Predictor",
+ .type = V4L2_CTRL_TYPE_INTEGER,
+ .min = 0,
+ .max = 1,
+ .step = 1,
+ .def = 0,
+};
+
+/* subdev file operations */
+static const struct v4l2_subdev_internal_ops ipipeif_v4l2_internal_ops = {
+ .open = ipipeif_init_formats,
+};
+
+/* subdev video operations */
+static const struct v4l2_subdev_video_ops ipipeif_v4l2_video_ops = {
+ .s_stream = ipipeif_set_stream,
+};
+
+/* subdev pad operations */
+static const struct v4l2_subdev_pad_ops ipipeif_v4l2_pad_ops = {
+ .enum_mbus_code = ipipeif_enum_mbus_code,
+ .enum_frame_size = ipipeif_enum_frame_size,
+ .get_fmt = ipipeif_get_format,
+ .set_fmt = ipipeif_set_format,
+};
+
+/* subdev operations */
+static const struct v4l2_subdev_ops ipipeif_v4l2_ops = {
+ .core = &ipipeif_v4l2_core_ops,
+ .video = &ipipeif_v4l2_video_ops,
+ .pad = &ipipeif_v4l2_pad_ops,
+};
+
+static const struct vpfe_video_operations video_in_ops = {
+ .queue = ipipeif_video_in_queue,
+};
+
+static int
+ipipeif_link_setup(struct media_entity *entity, const struct media_pad *local,
+ const struct media_pad *remote, u32 flags)
+{
+ struct v4l2_subdev *sd = media_entity_to_v4l2_subdev(entity);
+ struct vpfe_ipipeif_device *ipipeif = v4l2_get_subdevdata(sd);
+ struct vpfe_device *vpfe = to_vpfe_device(ipipeif);
+
+ switch (local->index | media_entity_type(remote->entity)) {
+ case IPIPEIF_PAD_SINK | MEDIA_ENT_T_DEVNODE:
+ /* Single shot mode */
+ if (!(flags & MEDIA_LNK_FL_ENABLED)) {
+ ipipeif->input = IPIPEIF_INPUT_NONE;
+ break;
+ }
+ ipipeif->input = IPIPEIF_INPUT_MEMORY;
+ break;
+
+ case IPIPEIF_PAD_SINK | MEDIA_ENT_T_V4L2_SUBDEV:
+ /* read from isif */
+ if (!(flags & MEDIA_LNK_FL_ENABLED)) {
+ ipipeif->input = IPIPEIF_INPUT_NONE;
+ break;
+ }
+ if (ipipeif->input != IPIPEIF_INPUT_NONE)
+ return -EBUSY;
+
+ ipipeif->input = IPIPEIF_INPUT_ISIF;
+ break;
+
+ case IPIPEIF_PAD_SOURCE | MEDIA_ENT_T_V4L2_SUBDEV:
+ if (!(flags & MEDIA_LNK_FL_ENABLED)) {
+ ipipeif->output = IPIPEIF_OUTPUT_NONE;
+ break;
+ }
+ if (remote->entity == &vpfe->vpfe_ipipe.subdev.entity)
+ /* connencted to ipipe */
+ ipipeif->output = IPIPEIF_OUTPUT_IPIPE;
+ else if (remote->entity == &vpfe->vpfe_resizer.
+ crop_resizer.subdev.entity)
+ /* connected to resizer */
+ ipipeif->output = IPIPEIF_OUTPUT_RESIZER;
+ else
+ return -EINVAL;
+ break;
+
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static const struct media_entity_operations ipipeif_media_ops = {
+ .link_setup = ipipeif_link_setup,
+};
+
+/*
+ * vpfe_ipipeif_unregister_entities() - Unregister entity
+ * @ipipeif - pointer to ipipeif subdevice structure.
+ */
+void vpfe_ipipeif_unregister_entities(struct vpfe_ipipeif_device *ipipeif)
+{
+ /* unregister video device */
+ vpfe_video_unregister(&ipipeif->video_in);
+
+ /* cleanup entity */
+ media_entity_cleanup(&ipipeif->subdev.entity);
+ /* unregister subdev */
+ v4l2_device_unregister_subdev(&ipipeif->subdev);
+}
+
+int
+vpfe_ipipeif_register_entities(struct vpfe_ipipeif_device *ipipeif,
+ struct v4l2_device *vdev)
+{
+ struct vpfe_device *vpfe_dev = to_vpfe_device(ipipeif);
+ unsigned int flags;
+ int ret;
+
+ /* Register the subdev */
+ ret = v4l2_device_register_subdev(vdev, &ipipeif->subdev);
+ if (ret < 0)
+ return ret;
+
+ ret = vpfe_video_register(&ipipeif->video_in, vdev);
+ if (ret) {
+ pr_err("Failed to register ipipeif video-in device\n");
+ goto fail;
+ }
+ ipipeif->video_in.vpfe_dev = vpfe_dev;
+
+ flags = 0;
+ ret = media_entity_create_link(&ipipeif->video_in.video_dev.entity, 0,
+ &ipipeif->subdev.entity, 0, flags);
+ if (ret < 0)
+ goto fail;
+
+ return 0;
+fail:
+ v4l2_device_unregister_subdev(&ipipeif->subdev);
+
+ return ret;
+}
+
+#define IPIPEIF_GAIN_HIGH 0x3ff
+#define IPIPEIF_DEFAULT_GAIN 0x200
+
+int vpfe_ipipeif_init(struct vpfe_ipipeif_device *ipipeif,
+ struct platform_device *pdev)
+{
+ struct v4l2_subdev *sd = &ipipeif->subdev;
+ struct media_pad *pads = &ipipeif->pads[0];
+ struct media_entity *me = &sd->entity;
+ static resource_size_t res_len;
+ struct resource *res;
+ int ret;
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 3);
+ if (!res)
+ return -ENOENT;
+
+ res_len = resource_size(res);
+ res = request_mem_region(res->start, res_len, res->name);
+ if (!res)
+ return -EBUSY;
+
+ ipipeif->ipipeif_base_addr = ioremap_nocache(res->start, res_len);
+ if (!ipipeif->ipipeif_base_addr) {
+ ret = -EBUSY;
+ goto fail;
+ }
+
+ v4l2_subdev_init(sd, &ipipeif_v4l2_ops);
+
+ sd->internal_ops = &ipipeif_v4l2_internal_ops;
+ strlcpy(sd->name, "DAVINCI IPIPEIF", sizeof(sd->name));
+ sd->grp_id = 1 << 16; /* group ID for davinci subdevs */
+
+ v4l2_set_subdevdata(sd, ipipeif);
+
+ sd->flags |= V4L2_SUBDEV_FL_HAS_EVENTS | V4L2_SUBDEV_FL_HAS_DEVNODE;
+ pads[IPIPEIF_PAD_SINK].flags = MEDIA_PAD_FL_SINK;
+ pads[IPIPEIF_PAD_SOURCE].flags = MEDIA_PAD_FL_SOURCE;
+ ipipeif->input = IPIPEIF_INPUT_NONE;
+ ipipeif->output = IPIPEIF_OUTPUT_NONE;
+ me->ops = &ipipeif_media_ops;
+
+ ret = media_entity_init(me, IPIPEIF_NUM_PADS, pads, 0);
+ if (ret)
+ goto fail;
+
+ v4l2_ctrl_handler_init(&ipipeif->ctrls, 2);
+ v4l2_ctrl_new_std(&ipipeif->ctrls, &ipipeif_ctrl_ops,
+ V4L2_CID_GAIN, 0,
+ IPIPEIF_GAIN_HIGH, 1, IPIPEIF_DEFAULT_GAIN);
+ v4l2_ctrl_new_custom(&ipipeif->ctrls, &vpfe_ipipeif_dpcm_pred, NULL);
+ v4l2_ctrl_handler_setup(&ipipeif->ctrls);
+ sd->ctrl_handler = &ipipeif->ctrls;
+
+ ipipeif->video_in.ops = &video_in_ops;
+ ipipeif->video_in.type = V4L2_BUF_TYPE_VIDEO_OUTPUT;
+ ret = vpfe_video_init(&ipipeif->video_in, "IPIPEIF");
+ if (ret) {
+ pr_err("Failed to init IPIPEIF video-in device\n");
+ goto fail;
+ }
+ ipipeif_set_default_config(ipipeif);
+ return 0;
+fail:
+ release_mem_region(res->start, res_len);
+ return ret;
+}
+
+void
+vpfe_ipipeif_cleanup(struct vpfe_ipipeif_device *ipipeif,
+ struct platform_device *pdev)
+{
+ struct resource *res;
+
+ v4l2_ctrl_handler_free(&ipipeif->ctrls);
+ iounmap(ipipeif->ipipeif_base_addr);
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 3);
+ if (res)
+ release_mem_region(res->start,
+ res->end - res->start + 1);
+
+}
diff --git a/drivers/staging/media/davinci_vpfe/dm365_ipipeif.h b/drivers/staging/media/davinci_vpfe/dm365_ipipeif.h
new file mode 100644
index 000000000000..608701fc5fed
--- /dev/null
+++ b/drivers/staging/media/davinci_vpfe/dm365_ipipeif.h
@@ -0,0 +1,233 @@
+/*
+ * Copyright (C) 2012 Texas Instruments Inc
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation version 2.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ *
+ * Contributors:
+ * Manjunath Hadli <manjunath.hadli@ti.com>
+ * Prabhakar Lad <prabhakar.lad@ti.com>
+ */
+
+#ifndef _DAVINCI_VPFE_DM365_IPIPEIF_H
+#define _DAVINCI_VPFE_DM365_IPIPEIF_H
+
+#include <linux/platform_device.h>
+
+#include <media/davinci/vpss.h>
+#include <media/v4l2-ctrls.h>
+#include <media/v4l2-subdev.h>
+
+#include "dm365_ipipeif_user.h"
+#include "vpfe_video.h"
+
+/* IPIPE base specific types */
+enum ipipeif_data_shift {
+ IPIPEIF_BITS15_2 = 0,
+ IPIPEIF_BITS14_1 = 1,
+ IPIPEIF_BITS13_0 = 2,
+ IPIPEIF_BITS12_0 = 3,
+ IPIPEIF_BITS11_0 = 4,
+ IPIPEIF_BITS10_0 = 5,
+ IPIPEIF_BITS9_0 = 6,
+};
+
+enum ipipeif_clkdiv {
+ IPIPEIF_DIVIDE_HALF = 0,
+ IPIPEIF_DIVIDE_THIRD = 1,
+ IPIPEIF_DIVIDE_FOURTH = 2,
+ IPIPEIF_DIVIDE_FIFTH = 3,
+ IPIPEIF_DIVIDE_SIXTH = 4,
+ IPIPEIF_DIVIDE_EIGHTH = 5,
+ IPIPEIF_DIVIDE_SIXTEENTH = 6,
+ IPIPEIF_DIVIDE_THIRTY = 7,
+};
+
+enum ipipeif_pack_mode {
+ IPIPEIF_PACK_16_BIT = 0,
+ IPIPEIF_PACK_8_BIT = 1,
+};
+
+enum ipipeif_5_1_pack_mode {
+ IPIPEIF_5_1_PACK_16_BIT = 0,
+ IPIPEIF_5_1_PACK_8_BIT = 1,
+ IPIPEIF_5_1_PACK_8_BIT_A_LAW = 2,
+ IPIPEIF_5_1_PACK_12_BIT = 3
+};
+
+enum ipipeif_input_source {
+ IPIPEIF_CCDC = 0,
+ IPIPEIF_SDRAM_RAW = 1,
+ IPIPEIF_CCDC_DARKFM = 2,
+ IPIPEIF_SDRAM_YUV = 3,
+};
+
+enum ipipeif_ialaw {
+ IPIPEIF_ALAW_OFF = 0,
+ IPIPEIF_ALAW_ON = 1,
+};
+
+enum ipipeif_input_src1 {
+ IPIPEIF_SRC1_PARALLEL_PORT = 0,
+ IPIPEIF_SRC1_SDRAM_RAW = 1,
+ IPIPEIF_SRC1_ISIF_DARKFM = 2,
+ IPIPEIF_SRC1_SDRAM_YUV = 3,
+};
+
+enum ipipeif_dfs_dir {
+ IPIPEIF_PORT_MINUS_SDRAM = 0,
+ IPIPEIF_SDRAM_MINUS_PORT = 1,
+};
+
+enum ipipeif_chroma_phase {
+ IPIPEIF_CBCR_Y = 0,
+ IPIPEIF_Y_CBCR = 1,
+};
+
+enum ipipeif_dpcm_type {
+ IPIPEIF_DPCM_8BIT_10BIT = 0,
+ IPIPEIF_DPCM_8BIT_12BIT = 1,
+};
+
+/* data shift for IPIPE 5.1 */
+enum ipipeif_5_1_data_shift {
+ IPIPEIF_5_1_BITS11_0 = 0,
+ IPIPEIF_5_1_BITS10_0 = 1,
+ IPIPEIF_5_1_BITS9_0 = 2,
+ IPIPEIF_5_1_BITS8_0 = 3,
+ IPIPEIF_5_1_BITS7_0 = 4,
+ IPIPEIF_5_1_BITS15_4 = 5,
+};
+
+#define IPIPEIF_PAD_SINK 0
+#define IPIPEIF_PAD_SOURCE 1
+
+#define IPIPEIF_NUM_PADS 2
+
+enum ipipeif_input_entity {
+ IPIPEIF_INPUT_NONE = 0,
+ IPIPEIF_INPUT_ISIF = 1,
+ IPIPEIF_INPUT_MEMORY = 2,
+};
+
+enum ipipeif_output_entity {
+ IPIPEIF_OUTPUT_NONE = 0,
+ IPIPEIF_OUTPUT_IPIPE = 1,
+ IPIPEIF_OUTPUT_RESIZER = 2,
+};
+
+struct vpfe_ipipeif_device {
+ struct v4l2_subdev subdev;
+ struct media_pad pads[IPIPEIF_NUM_PADS];
+ struct v4l2_mbus_framefmt formats[IPIPEIF_NUM_PADS];
+ enum ipipeif_input_entity input;
+ unsigned int output;
+ struct vpfe_video_device video_in;
+ struct v4l2_ctrl_handler ctrls;
+ void *__iomem ipipeif_base_addr;
+ struct ipipeif_params config;
+ int dpcm_predictor;
+ int gain;
+};
+
+/* IPIPEIF Register Offsets from the base address */
+#define IPIPEIF_ENABLE 0x00
+#define IPIPEIF_CFG1 0x04
+#define IPIPEIF_PPLN 0x08
+#define IPIPEIF_LPFR 0x0c
+#define IPIPEIF_HNUM 0x10
+#define IPIPEIF_VNUM 0x14
+#define IPIPEIF_ADDRU 0x18
+#define IPIPEIF_ADDRL 0x1c
+#define IPIPEIF_ADOFS 0x20
+#define IPIPEIF_RSZ 0x24
+#define IPIPEIF_GAIN 0x28
+
+/* Below registers are available only on IPIPE 5.1 */
+#define IPIPEIF_DPCM 0x2c
+#define IPIPEIF_CFG2 0x30
+#define IPIPEIF_INIRSZ 0x34
+#define IPIPEIF_OCLIP 0x38
+#define IPIPEIF_DTUDF 0x3c
+#define IPIPEIF_CLKDIV 0x40
+#define IPIPEIF_DPC1 0x44
+#define IPIPEIF_DPC2 0x48
+#define IPIPEIF_DFSGVL 0x4c
+#define IPIPEIF_DFSGTH 0x50
+#define IPIPEIF_RSZ3A 0x54
+#define IPIPEIF_INIRSZ3A 0x58
+#define IPIPEIF_RSZ_MIN 16
+#define IPIPEIF_RSZ_MAX 112
+#define IPIPEIF_RSZ_CONST 16
+#define SETBIT(reg, bit) (reg = ((reg) | ((0x00000001)<<(bit))))
+#define RESETBIT(reg, bit) (reg = ((reg) & (~(0x00000001<<(bit)))))
+
+#define IPIPEIF_ADOFS_LSB_MASK 0x1ff
+#define IPIPEIF_ADOFS_LSB_SHIFT 5
+#define IPIPEIF_ADOFS_MSB_MASK 0x200
+#define IPIPEIF_ADDRU_MASK 0x7ff
+#define IPIPEIF_ADDRL_SHIFT 5
+#define IPIPEIF_ADDRL_MASK 0xffff
+#define IPIPEIF_ADDRU_SHIFT 21
+#define IPIPEIF_ADDRMSB_SHIFT 31
+#define IPIPEIF_ADDRMSB_LEFT_SHIFT 10
+
+/* CFG1 Masks and shifts */
+#define ONESHOT_SHIFT 0
+#define DECIM_SHIFT 1
+#define INPSRC_SHIFT 2
+#define CLKDIV_SHIFT 4
+#define AVGFILT_SHIFT 7
+#define PACK8IN_SHIFT 8
+#define IALAW_SHIFT 9
+#define CLKSEL_SHIFT 10
+#define DATASFT_SHIFT 11
+#define INPSRC1_SHIFT 14
+
+/* DPC2 */
+#define IPIPEIF_DPC2_EN_SHIFT 12
+#define IPIPEIF_DPC2_THR_MASK 0xfff
+/* Applicable for IPIPE 5.1 */
+#define IPIPEIF_DF_GAIN_EN_SHIFT 10
+#define IPIPEIF_DF_GAIN_MASK 0x3ff
+#define IPIPEIF_DF_GAIN_THR_MASK 0xfff
+/* DPCM */
+#define IPIPEIF_DPCM_BITS_SHIFT 2
+#define IPIPEIF_DPCM_PRED_SHIFT 1
+/* CFG2 */
+#define IPIPEIF_CFG2_HDPOL_SHIFT 1
+#define IPIPEIF_CFG2_VDPOL_SHIFT 2
+#define IPIPEIF_CFG2_YUV8_SHIFT 6
+#define IPIPEIF_CFG2_YUV16_SHIFT 3
+#define IPIPEIF_CFG2_YUV8P_SHIFT 7
+
+/* INIRSZ */
+#define IPIPEIF_INIRSZ_ALNSYNC_SHIFT 13
+#define IPIPEIF_INIRSZ_MASK 0x1fff
+
+/* CLKDIV */
+#define IPIPEIF_CLKDIV_M_SHIFT 8
+
+void vpfe_ipipeif_enable(struct vpfe_device *vpfe_dev);
+void vpfe_ipipeif_ss_buffer_isr(struct vpfe_ipipeif_device *ipipeif);
+int vpfe_ipipeif_decimation_enabled(struct vpfe_device *vpfe_dev);
+int vpfe_ipipeif_get_rsz(struct vpfe_device *vpfe_dev);
+void vpfe_ipipeif_cleanup(struct vpfe_ipipeif_device *ipipeif,
+ struct platform_device *pdev);
+int vpfe_ipipeif_init(struct vpfe_ipipeif_device *ipipeif,
+ struct platform_device *pdev);
+int vpfe_ipipeif_register_entities(struct vpfe_ipipeif_device *ipipeif,
+ struct v4l2_device *vdev);
+void vpfe_ipipeif_unregister_entities(struct vpfe_ipipeif_device *ipipeif);
+
+#endif /* _DAVINCI_VPFE_DM365_IPIPEIF_H */
diff --git a/drivers/staging/media/davinci_vpfe/dm365_ipipeif_user.h b/drivers/staging/media/davinci_vpfe/dm365_ipipeif_user.h
new file mode 100644
index 000000000000..e2a69b59554a
--- /dev/null
+++ b/drivers/staging/media/davinci_vpfe/dm365_ipipeif_user.h
@@ -0,0 +1,93 @@
+/*
+ * Copyright (C) 2012 Texas Instruments Inc
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation version 2.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ *
+ * Contributors:
+ * Manjunath Hadli <manjunath.hadli@ti.com>
+ * Prabhakar Lad <prabhakar.lad@ti.com>
+ */
+
+#ifndef _DAVINCI_VPFE_DM365_IPIPEIF_USER_H
+#define _DAVINCI_VPFE_DM365_IPIPEIF_USER_H
+
+/* clockdiv for IPIPE 5.1 */
+struct ipipeif_5_1_clkdiv {
+ unsigned char m;
+ unsigned char n;
+};
+
+enum ipipeif_decimation {
+ IPIPEIF_DECIMATION_OFF,
+ IPIPEIF_DECIMATION_ON
+};
+
+/* DPC at the if for IPIPE 5.1 */
+struct ipipeif_dpc {
+ /* 0 - disable, 1 - enable */
+ unsigned char en;
+ /* threshold */
+ unsigned short thr;
+};
+
+enum ipipeif_clock {
+ IPIPEIF_PIXCEL_CLK,
+ IPIPEIF_SDRAM_CLK
+};
+
+enum ipipeif_avg_filter {
+ IPIPEIF_AVG_OFF,
+ IPIPEIF_AVG_ON
+};
+
+struct ipipeif_5_1 {
+ struct ipipeif_5_1_clkdiv clk_div;
+ /* Defect pixel correction */
+ struct ipipeif_dpc dpc;
+ /* clipped to this value */
+ unsigned short clip;
+ /* Align HSync and VSync to rsz_start */
+ unsigned char align_sync;
+ /* resizer start position */
+ unsigned int rsz_start;
+ /* DF gain enable */
+ unsigned char df_gain_en;
+ /* DF gain value */
+ unsigned short df_gain;
+ /* DF gain threshold value */
+ unsigned short df_gain_thr;
+};
+
+struct ipipeif_params {
+ enum ipipeif_clock clock_select;
+ unsigned int ppln;
+ unsigned int lpfr;
+ unsigned char rsz;
+ enum ipipeif_decimation decimation;
+ enum ipipeif_avg_filter avg_filter;
+ /* IPIPE 5.1 */
+ struct ipipeif_5_1 if_5_1;
+};
+
+/*
+ * Private IOCTL
+ * VIDIOC_VPFE_IPIPEIF_S_CONFIG: Set IPIEIF configuration
+ * VIDIOC_VPFE_IPIPEIF_G_CONFIG: Get IPIEIF configuration
+ */
+#define VIDIOC_VPFE_IPIPEIF_S_CONFIG \
+ _IOWR('I', BASE_VIDIOC_PRIVATE + 1, struct ipipeif_params)
+#define VIDIOC_VPFE_IPIPEIF_G_CONFIG \
+ _IOWR('I', BASE_VIDIOC_PRIVATE + 2, struct ipipeif_params)
+
+#endif /* _DAVINCI_VPFE_DM365_IPIPEIF_USER_H */
diff --git a/drivers/staging/media/davinci_vpfe/dm365_isif.c b/drivers/staging/media/davinci_vpfe/dm365_isif.c
new file mode 100644
index 000000000000..ebeea72e176a
--- /dev/null
+++ b/drivers/staging/media/davinci_vpfe/dm365_isif.c
@@ -0,0 +1,2104 @@
+/*
+ * Copyright (C) 2012 Texas Instruments Inc
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation version 2.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ *
+ * Contributors:
+ * Manjunath Hadli <manjunath.hadli@ti.com>
+ * Prabhakar Lad <prabhakar.lad@ti.com>
+ */
+
+#include "dm365_isif.h"
+#include "vpfe_mc_capture.h"
+
+#define MAX_WIDTH 4096
+#define MAX_HEIGHT 4096
+
+static const unsigned int isif_fmts[] = {
+ V4L2_MBUS_FMT_YUYV8_2X8,
+ V4L2_MBUS_FMT_UYVY8_2X8,
+ V4L2_MBUS_FMT_YUYV8_1X16,
+ V4L2_MBUS_FMT_YUYV10_1X20,
+ V4L2_MBUS_FMT_SGRBG12_1X12,
+ V4L2_MBUS_FMT_SGRBG10_ALAW8_1X8,
+ V4L2_MBUS_FMT_SGRBG10_DPCM8_1X8,
+};
+
+#define ISIF_COLPTN_R_Ye 0x0
+#define ISIF_COLPTN_Gr_Cy 0x1
+#define ISIF_COLPTN_Gb_G 0x2
+#define ISIF_COLPTN_B_Mg 0x3
+
+#define ISIF_CCOLP_CP01_0 0
+#define ISIF_CCOLP_CP03_2 2
+#define ISIF_CCOLP_CP05_4 4
+#define ISIF_CCOLP_CP07_6 6
+#define ISIF_CCOLP_CP11_0 8
+#define ISIF_CCOLP_CP13_2 10
+#define ISIF_CCOLP_CP15_4 12
+#define ISIF_CCOLP_CP17_6 14
+
+static const u32 isif_sgrbg_pattern =
+ ISIF_COLPTN_Gr_Cy << ISIF_CCOLP_CP01_0 |
+ ISIF_COLPTN_R_Ye << ISIF_CCOLP_CP03_2 |
+ ISIF_COLPTN_B_Mg << ISIF_CCOLP_CP05_4 |
+ ISIF_COLPTN_Gb_G << ISIF_CCOLP_CP07_6 |
+ ISIF_COLPTN_Gr_Cy << ISIF_CCOLP_CP11_0 |
+ ISIF_COLPTN_R_Ye << ISIF_CCOLP_CP13_2 |
+ ISIF_COLPTN_B_Mg << ISIF_CCOLP_CP15_4 |
+ ISIF_COLPTN_Gb_G << ISIF_CCOLP_CP17_6;
+
+static const u32 isif_srggb_pattern =
+ ISIF_COLPTN_R_Ye << ISIF_CCOLP_CP01_0 |
+ ISIF_COLPTN_Gr_Cy << ISIF_CCOLP_CP03_2 |
+ ISIF_COLPTN_Gb_G << ISIF_CCOLP_CP05_4 |
+ ISIF_COLPTN_B_Mg << ISIF_CCOLP_CP07_6 |
+ ISIF_COLPTN_R_Ye << ISIF_CCOLP_CP11_0 |
+ ISIF_COLPTN_Gr_Cy << ISIF_CCOLP_CP13_2 |
+ ISIF_COLPTN_Gb_G << ISIF_CCOLP_CP15_4 |
+ ISIF_COLPTN_B_Mg << ISIF_CCOLP_CP17_6;
+
+static inline u32 isif_read(void *__iomem base_addr, u32 offset)
+{
+ return readl(base_addr + offset);
+}
+
+static inline void isif_write(void *__iomem base_addr, u32 val, u32 offset)
+{
+ writel(val, base_addr + offset);
+}
+
+static inline u32 isif_merge(void *__iomem base_addr, u32 mask, u32 val,
+ u32 offset)
+{
+ u32 new_val = (isif_read(base_addr, offset) & ~mask) | (val & mask);
+
+ isif_write(base_addr, new_val, offset);
+
+ return new_val;
+}
+
+static void isif_enable_output_to_sdram(struct vpfe_isif_device *isif, int en)
+{
+ isif_merge(isif->isif_cfg.base_addr, ISIF_SYNCEN_WEN_MASK,
+ en << ISIF_SYNCEN_WEN_SHIFT, SYNCEN);
+}
+
+static inline void
+isif_regw_lin_tbl(struct vpfe_isif_device *isif, u32 val, u32 offset, int i)
+{
+ if (!i)
+ writel(val, isif->isif_cfg.linear_tbl0_addr + offset);
+ else
+ writel(val, isif->isif_cfg.linear_tbl1_addr + offset);
+}
+
+static void isif_disable_all_modules(struct vpfe_isif_device *isif)
+{
+ /* disable BC */
+ isif_write(isif->isif_cfg.base_addr, 0, CLAMPCFG);
+ /* disable vdfc */
+ isif_write(isif->isif_cfg.base_addr, 0, DFCCTL);
+ /* disable CSC */
+ isif_write(isif->isif_cfg.base_addr, 0, CSCCTL);
+ /* disable linearization */
+ isif_write(isif->isif_cfg.base_addr, 0, LINCFG0);
+}
+
+static void isif_enable(struct vpfe_isif_device *isif, int en)
+{
+ if (!en)
+ /* Before disable isif, disable all ISIF modules */
+ isif_disable_all_modules(isif);
+
+ /*
+ * wait for next VD. Assume lowest scan rate is 12 Hz. So
+ * 100 msec delay is good enough
+ */
+ msleep(100);
+ isif_merge(isif->isif_cfg.base_addr, ISIF_SYNCEN_VDHDEN_MASK,
+ en, SYNCEN);
+}
+
+/*
+ * ISIF helper functions
+ */
+
+#define DM365_ISIF_MDFS_OFFSET 15
+#define DM365_ISIF_MDFS_MASK 0x1
+
+/* get field id in isif hardware */
+enum v4l2_field vpfe_isif_get_fid(struct vpfe_device *vpfe_dev)
+{
+ struct vpfe_isif_device *isif = &vpfe_dev->vpfe_isif;
+ u32 field_status;
+
+ field_status = isif_read(isif->isif_cfg.base_addr, MODESET);
+ field_status = (field_status >> DM365_ISIF_MDFS_OFFSET) &
+ DM365_ISIF_MDFS_MASK;
+ return field_status;
+}
+
+static int
+isif_set_pixel_format(struct vpfe_isif_device *isif, unsigned int pixfmt)
+{
+ if (isif->formats[ISIF_PAD_SINK].code == V4L2_MBUS_FMT_SGRBG12_1X12) {
+ if (pixfmt == V4L2_PIX_FMT_SBGGR16)
+ isif->isif_cfg.data_pack = ISIF_PACK_16BIT;
+ else if ((pixfmt == V4L2_PIX_FMT_SGRBG10DPCM8) ||
+ (pixfmt == V4L2_PIX_FMT_SGRBG10ALAW8))
+ isif->isif_cfg.data_pack = ISIF_PACK_8BIT;
+ else
+ return -EINVAL;
+
+ isif->isif_cfg.bayer.pix_fmt = ISIF_PIXFMT_RAW;
+ isif->isif_cfg.bayer.v4l2_pix_fmt = pixfmt;
+ } else {
+ if (pixfmt == V4L2_PIX_FMT_YUYV)
+ isif->isif_cfg.ycbcr.pix_order = ISIF_PIXORDER_YCBYCR;
+ else if (pixfmt == V4L2_PIX_FMT_UYVY)
+ isif->isif_cfg.ycbcr.pix_order = ISIF_PIXORDER_CBYCRY;
+ else
+ return -EINVAL;
+
+ isif->isif_cfg.data_pack = ISIF_PACK_8BIT;
+ isif->isif_cfg.ycbcr.v4l2_pix_fmt = pixfmt;
+ }
+
+ return 0;
+}
+
+static int
+isif_set_frame_format(struct vpfe_isif_device *isif,
+ enum isif_frmfmt frm_fmt)
+{
+ if (isif->formats[ISIF_PAD_SINK].code == V4L2_MBUS_FMT_SGRBG12_1X12)
+ isif->isif_cfg.bayer.frm_fmt = frm_fmt;
+ else
+ isif->isif_cfg.ycbcr.frm_fmt = frm_fmt;
+
+ return 0;
+}
+
+static int isif_set_image_window(struct vpfe_isif_device *isif)
+{
+ struct v4l2_rect *win = &isif->crop;
+
+ if (isif->formats[ISIF_PAD_SINK].code == V4L2_MBUS_FMT_SGRBG12_1X12) {
+ isif->isif_cfg.bayer.win.top = win->top;
+ isif->isif_cfg.bayer.win.left = win->left;
+ isif->isif_cfg.bayer.win.width = win->width;
+ isif->isif_cfg.bayer.win.height = win->height;
+ return 0;
+ }
+ isif->isif_cfg.ycbcr.win.top = win->top;
+ isif->isif_cfg.ycbcr.win.left = win->left;
+ isif->isif_cfg.ycbcr.win.width = win->width;
+ isif->isif_cfg.ycbcr.win.height = win->height;
+
+ return 0;
+}
+
+static int
+isif_set_buftype(struct vpfe_isif_device *isif, enum isif_buftype buf_type)
+{
+ if (isif->formats[ISIF_PAD_SINK].code == V4L2_MBUS_FMT_SGRBG12_1X12)
+ isif->isif_cfg.bayer.buf_type = buf_type;
+ else
+ isif->isif_cfg.ycbcr.buf_type = buf_type;
+
+ return 0;
+}
+
+/* configure format in isif hardware */
+static int
+isif_config_format(struct vpfe_device *vpfe_dev, unsigned int pad)
+{
+ struct vpfe_isif_device *vpfe_isif = &vpfe_dev->vpfe_isif;
+ enum isif_frmfmt frm_fmt = ISIF_FRMFMT_INTERLACED;
+ struct v4l2_pix_format format;
+ int ret = 0;
+
+ v4l2_fill_pix_format(&format, &vpfe_dev->vpfe_isif.formats[pad]);
+ mbus_to_pix(&vpfe_dev->vpfe_isif.formats[pad], &format);
+
+ if (isif_set_pixel_format(vpfe_isif, format.pixelformat) < 0) {
+ v4l2_err(&vpfe_dev->v4l2_dev,
+ "Failed to set pixel format in isif\n");
+ return -EINVAL;
+ }
+
+ /* call for s_crop will override these values */
+ vpfe_isif->crop.left = 0;
+ vpfe_isif->crop.top = 0;
+ vpfe_isif->crop.width = format.width;
+ vpfe_isif->crop.height = format.height;
+
+ /* configure the image window */
+ isif_set_image_window(vpfe_isif);
+
+ switch (vpfe_dev->vpfe_isif.formats[pad].field) {
+ case V4L2_FIELD_INTERLACED:
+ /* do nothing, since it is default */
+ ret = isif_set_buftype(vpfe_isif, ISIF_BUFTYPE_FLD_INTERLEAVED);
+ break;
+
+ case V4L2_FIELD_NONE:
+ frm_fmt = ISIF_FRMFMT_PROGRESSIVE;
+ /* buffer type only applicable for interlaced scan */
+ break;
+
+ case V4L2_FIELD_SEQ_TB:
+ ret = isif_set_buftype(vpfe_isif, ISIF_BUFTYPE_FLD_SEPARATED);
+ break;
+
+ default:
+ return -EINVAL;
+ }
+
+ /* set the frame format */
+ if (!ret)
+ ret = isif_set_frame_format(vpfe_isif, frm_fmt);
+
+ return ret;
+}
+
+/*
+ * isif_try_format() - Try video format on a pad
+ * @isif: VPFE isif device
+ * @fh: V4L2 subdev file handle
+ * @fmt: pointer to v4l2 subdev format structure
+ */
+static void
+isif_try_format(struct vpfe_isif_device *isif, struct v4l2_subdev_fh *fh,
+ struct v4l2_subdev_format *fmt)
+{
+ unsigned int width = fmt->format.width;
+ unsigned int height = fmt->format.height;
+ unsigned int i;
+
+ for (i = 0; i < ARRAY_SIZE(isif_fmts); i++) {
+ if (fmt->format.code == isif_fmts[i])
+ break;
+ }
+
+ /* If not found, use YUYV8_2x8 as default */
+ if (i >= ARRAY_SIZE(isif_fmts))
+ fmt->format.code = V4L2_MBUS_FMT_YUYV8_2X8;
+
+ /* Clamp the size. */
+ fmt->format.width = clamp_t(u32, width, 32, MAX_WIDTH);
+ fmt->format.height = clamp_t(u32, height, 32, MAX_HEIGHT);
+
+ /* The data formatter truncates the number of horizontal output
+ * pixels to a multiple of 16. To avoid clipping data, allow
+ * callers to request an output size bigger than the input size
+ * up to the nearest multiple of 16.
+ */
+ if (fmt->pad == ISIF_PAD_SOURCE)
+ fmt->format.width &= ~15;
+}
+
+/*
+ * vpfe_isif_buffer_isr() - isif module non-progressive buffer scheduling isr
+ * @isif: Pointer to isif subdevice.
+ */
+void vpfe_isif_buffer_isr(struct vpfe_isif_device *isif)
+{
+ struct vpfe_device *vpfe_dev = to_vpfe_device(isif);
+ struct vpfe_video_device *video = &isif->video_out;
+ enum v4l2_field field;
+ int fid;
+
+ if (!video->started)
+ return;
+
+ field = video->fmt.fmt.pix.field;
+
+ if (field == V4L2_FIELD_NONE) {
+ /* handle progressive frame capture */
+ if (video->cur_frm != video->next_frm)
+ vpfe_video_process_buffer_complete(video);
+ return;
+ }
+
+ /* interlaced or TB capture check which field we
+ * are in hardware
+ */
+ fid = vpfe_isif_get_fid(vpfe_dev);
+
+ /* switch the software maintained field id */
+ video->field_id ^= 1;
+ if (fid == video->field_id) {
+ /* we are in-sync here,continue */
+ if (fid == 0) {
+ /*
+ * One frame is just being captured. If the
+ * next frame is available, release the current
+ * frame and move on
+ */
+ if (video->cur_frm != video->next_frm)
+ vpfe_video_process_buffer_complete(video);
+ /*
+ * based on whether the two fields are stored
+ * interleavely or separately in memory,
+ * reconfigure the ISIF memory address
+ */
+ if (field == V4L2_FIELD_SEQ_TB)
+ vpfe_video_schedule_bottom_field(video);
+ return;
+ }
+ /*
+ * if one field is just being captured configure
+ * the next frame get the next frame from the
+ * empty queue if no frame is available hold on
+ * to the current buffer
+ */
+ spin_lock(&video->dma_queue_lock);
+ if (!list_empty(&video->dma_queue) &&
+ video->cur_frm == video->next_frm)
+ vpfe_video_schedule_next_buffer(video);
+ spin_unlock(&video->dma_queue_lock);
+ } else if (fid == 0) {
+ /*
+ * out of sync. Recover from any hardware out-of-sync.
+ * May loose one frame
+ */
+ video->field_id = fid;
+ }
+}
+
+/*
+ * vpfe_isif_vidint1_isr() - ISIF module progressive buffer scheduling isr
+ * @isif: Pointer to isif subdevice.
+ */
+void vpfe_isif_vidint1_isr(struct vpfe_isif_device *isif)
+{
+ struct vpfe_video_device *video = &isif->video_out;
+
+ if (!video->started)
+ return;
+
+ spin_lock(&video->dma_queue_lock);
+ if (video->fmt.fmt.pix.field == V4L2_FIELD_NONE &&
+ !list_empty(&video->dma_queue) && video->cur_frm == video->next_frm)
+ vpfe_video_schedule_next_buffer(video);
+
+ spin_unlock(&video->dma_queue_lock);
+}
+
+/*
+ * VPFE video operations
+ */
+
+static int isif_video_queue(struct vpfe_device *vpfe_dev, unsigned long addr)
+{
+ struct vpfe_isif_device *isif = &vpfe_dev->vpfe_isif;
+
+ isif_write(isif->isif_cfg.base_addr, (addr >> 21) &
+ ISIF_CADU_BITS, CADU);
+ isif_write(isif->isif_cfg.base_addr, (addr >> 5) &
+ ISIF_CADL_BITS, CADL);
+
+ return 0;
+}
+
+static const struct vpfe_video_operations isif_video_ops = {
+ .queue = isif_video_queue,
+};
+
+/*
+ * V4L2 subdev operations
+ */
+
+/* Parameter operations */
+static int isif_get_params(struct v4l2_subdev *sd, void *params)
+{
+ struct vpfe_isif_device *isif = v4l2_get_subdevdata(sd);
+
+ /* only raw module parameters can be set through the IOCTL */
+ if (isif->formats[ISIF_PAD_SINK].code != V4L2_MBUS_FMT_SGRBG12_1X12)
+ return -EINVAL;
+ memcpy(params, &isif->isif_cfg.bayer.config_params,
+ sizeof(isif->isif_cfg.bayer.config_params));
+ return 0;
+}
+
+static int isif_validate_df_csc_params(struct vpfe_isif_df_csc *df_csc)
+{
+ struct vpfe_isif_color_space_conv *csc;
+ int err = -EINVAL;
+ int csc_df_en;
+ int i;
+
+ if (!df_csc->df_or_csc) {
+ /* csc configuration */
+ csc = &df_csc->csc;
+ if (csc->en) {
+ csc_df_en = 1;
+ for (i = 0; i < VPFE_ISIF_CSC_NUM_COEFF; i++)
+ if (csc->coeff[i].integer >
+ ISIF_CSC_COEF_INTEG_MASK ||
+ csc->coeff[i].decimal >
+ ISIF_CSC_COEF_DECIMAL_MASK) {
+ pr_err("Invalid CSC coefficients\n");
+ return err;
+ }
+ }
+ }
+ if (df_csc->start_pix > ISIF_DF_CSC_SPH_MASK) {
+ pr_err("Invalid df_csc start pix value\n");
+ return err;
+ }
+
+ if (df_csc->num_pixels > ISIF_DF_NUMPIX) {
+ pr_err("Invalid df_csc num pixels value\n");
+ return err;
+ }
+
+ if (df_csc->start_line > ISIF_DF_CSC_LNH_MASK) {
+ pr_err("Invalid df_csc start_line value\n");
+ return err;
+ }
+
+ if (df_csc->num_lines > ISIF_DF_NUMLINES) {
+ pr_err("Invalid df_csc num_lines value\n");
+ return err;
+ }
+
+ return 0;
+}
+
+#define DM365_ISIF_MAX_VDFLSFT 4
+#define DM365_ISIF_MAX_VDFSLV 4095
+#define DM365_ISIF_MAX_DFCMEM0 0x1fff
+#define DM365_ISIF_MAX_DFCMEM1 0x1fff
+
+static int isif_validate_dfc_params(struct vpfe_isif_dfc *dfc)
+{
+ int err = -EINVAL;
+ int i;
+
+ if (!dfc->en)
+ return 0;
+
+ if (dfc->corr_whole_line > 1) {
+ pr_err("Invalid corr_whole_line value\n");
+ return err;
+ }
+
+ if (dfc->def_level_shift > DM365_ISIF_MAX_VDFLSFT) {
+ pr_err("Invalid def_level_shift value\n");
+ return err;
+ }
+
+ if (dfc->def_sat_level > DM365_ISIF_MAX_VDFSLV) {
+ pr_err("Invalid def_sat_level value\n");
+ return err;
+ }
+
+ if (!dfc->num_vdefects ||
+ dfc->num_vdefects > VPFE_ISIF_VDFC_TABLE_SIZE) {
+ pr_err("Invalid num_vdefects value\n");
+ return err;
+ }
+
+ for (i = 0; i < VPFE_ISIF_VDFC_TABLE_SIZE; i++) {
+ if (dfc->table[i].pos_vert > DM365_ISIF_MAX_DFCMEM0) {
+ pr_err("Invalid pos_vert value\n");
+ return err;
+ }
+ if (dfc->table[i].pos_horz > DM365_ISIF_MAX_DFCMEM1) {
+ pr_err("Invalid pos_horz value\n");
+ return err;
+ }
+ }
+
+ return 0;
+}
+
+#define DM365_ISIF_MAX_CLVRV 0xfff
+#define DM365_ISIF_MAX_CLDC 0x1fff
+#define DM365_ISIF_MAX_CLHSH 0x1fff
+#define DM365_ISIF_MAX_CLHSV 0x1fff
+#define DM365_ISIF_MAX_CLVSH 0x1fff
+#define DM365_ISIF_MAX_CLVSV 0x1fff
+#define DM365_ISIF_MAX_HEIGHT_BLACK_REGION 0x1fff
+
+static int isif_validate_bclamp_params(struct vpfe_isif_black_clamp *bclamp)
+{
+ int err = -EINVAL;
+
+ if (bclamp->dc_offset > DM365_ISIF_MAX_CLDC) {
+ pr_err("Invalid bclamp dc_offset value\n");
+ return err;
+ }
+ if (!bclamp->en)
+ return 0;
+ if (bclamp->horz.clamp_pix_limit > 1) {
+ pr_err("Invalid bclamp horz clamp_pix_limit value\n");
+ return err;
+ }
+ if (bclamp->horz.win_count_calc < 1 ||
+ bclamp->horz.win_count_calc > 32) {
+ pr_err("Invalid bclamp horz win_count_calc value\n");
+ return err;
+ }
+ if (bclamp->horz.win_start_h_calc > DM365_ISIF_MAX_CLHSH) {
+ pr_err("Invalid bclamp win_start_v_calc value\n");
+ return err;
+ }
+
+ if (bclamp->horz.win_start_v_calc > DM365_ISIF_MAX_CLHSV) {
+ pr_err("Invalid bclamp win_start_v_calc value\n");
+ return err;
+ }
+ if (bclamp->vert.reset_clamp_val > DM365_ISIF_MAX_CLVRV) {
+ pr_err("Invalid bclamp reset_clamp_val value\n");
+ return err;
+ }
+ if (bclamp->vert.ob_v_sz_calc > DM365_ISIF_MAX_HEIGHT_BLACK_REGION) {
+ pr_err("Invalid bclamp ob_v_sz_calc value\n");
+ return err;
+ }
+ if (bclamp->vert.ob_start_h > DM365_ISIF_MAX_CLVSH) {
+ pr_err("Invalid bclamp ob_start_h value\n");
+ return err;
+ }
+ if (bclamp->vert.ob_start_v > DM365_ISIF_MAX_CLVSV) {
+ pr_err("Invalid bclamp ob_start_h value\n");
+ return err;
+ }
+ return 0;
+}
+
+static int
+isif_validate_raw_params(struct vpfe_isif_raw_config *params)
+{
+ int ret;
+
+ ret = isif_validate_df_csc_params(&params->df_csc);
+ if (ret)
+ return ret;
+ ret = isif_validate_dfc_params(&params->dfc);
+ if (ret)
+ return ret;
+ ret = isif_validate_bclamp_params(&params->bclamp);
+ return ret;
+}
+
+static int isif_set_params(struct v4l2_subdev *sd, void *params)
+{
+ struct vpfe_isif_device *isif = v4l2_get_subdevdata(sd);
+ struct vpfe_isif_raw_config isif_raw_params;
+ int ret = -EINVAL;
+
+ /* only raw module parameters can be set through the IOCTL */
+ if (isif->formats[ISIF_PAD_SINK].code != V4L2_MBUS_FMT_SGRBG12_1X12)
+ return ret;
+
+ memcpy(&isif_raw_params, params, sizeof(isif_raw_params));
+ if (!isif_validate_raw_params(&isif_raw_params)) {
+ memcpy(&isif->isif_cfg.bayer.config_params, &isif_raw_params,
+ sizeof(isif_raw_params));
+ ret = 0;
+ }
+ return ret;
+}
+/*
+ * isif_ioctl() - isif module private ioctl's
+ * @sd: VPFE isif V4L2 subdevice
+ * @cmd: ioctl command
+ * @arg: ioctl argument
+ *
+ * Return 0 on success or a negative error code otherwise.
+ */
+static long isif_ioctl(struct v4l2_subdev *sd, unsigned int cmd, void *arg)
+{
+ int ret;
+
+ switch (cmd) {
+ case VIDIOC_VPFE_ISIF_S_RAW_PARAMS:
+ ret = isif_set_params(sd, arg);
+ break;
+
+ case VIDIOC_VPFE_ISIF_G_RAW_PARAMS:
+ ret = isif_get_params(sd, arg);
+ break;
+
+ default:
+ ret = -ENOIOCTLCMD;
+ }
+ return ret;
+}
+
+static void isif_config_gain_offset(struct vpfe_isif_device *isif)
+{
+ struct vpfe_isif_gain_offsets_adj *gain_off_ptr =
+ &isif->isif_cfg.bayer.config_params.gain_offset;
+ void *__iomem base = isif->isif_cfg.base_addr;
+ u32 val;
+
+ val = ((gain_off_ptr->gain_sdram_en & 1) << GAIN_SDRAM_EN_SHIFT) |
+ ((gain_off_ptr->gain_ipipe_en & 1) << GAIN_IPIPE_EN_SHIFT) |
+ ((gain_off_ptr->gain_h3a_en & 1) << GAIN_H3A_EN_SHIFT) |
+ ((gain_off_ptr->offset_sdram_en & 1) << OFST_SDRAM_EN_SHIFT) |
+ ((gain_off_ptr->offset_ipipe_en & 1) << OFST_IPIPE_EN_SHIFT) |
+ ((gain_off_ptr->offset_h3a_en & 1) << OFST_H3A_EN_SHIFT);
+ isif_merge(base, GAIN_OFFSET_EN_MASK, val, CGAMMAWD);
+
+ isif_write(base, isif->isif_cfg.isif_gain_params.cr_gain, CRGAIN);
+ isif_write(base, isif->isif_cfg.isif_gain_params.cgr_gain, CGRGAIN);
+ isif_write(base, isif->isif_cfg.isif_gain_params.cgb_gain, CGBGAIN);
+ isif_write(base, isif->isif_cfg.isif_gain_params.cb_gain, CBGAIN);
+ isif_write(base, isif->isif_cfg.isif_gain_params.offset & OFFSET_MASK,
+ COFSTA);
+
+}
+
+static void isif_config_bclamp(struct vpfe_isif_device *isif,
+ struct vpfe_isif_black_clamp *bc)
+{
+ u32 val;
+
+ /**
+ * DC Offset is always added to image data irrespective of bc enable
+ * status
+ */
+ val = bc->dc_offset & ISIF_BC_DCOFFSET_MASK;
+ isif_write(isif->isif_cfg.base_addr, val, CLDCOFST);
+
+ if (!bc->en)
+ return;
+
+ val = (bc->bc_mode_color & ISIF_BC_MODE_COLOR_MASK) <<
+ ISIF_BC_MODE_COLOR_SHIFT;
+
+ /* Enable BC and horizontal clamp caculation paramaters */
+ val = val | 1 | ((bc->horz.mode & ISIF_HORZ_BC_MODE_MASK) <<
+ ISIF_HORZ_BC_MODE_SHIFT);
+
+ isif_write(isif->isif_cfg.base_addr, val, CLAMPCFG);
+
+ if (bc->horz.mode != VPFE_ISIF_HORZ_BC_DISABLE) {
+ /*
+ * Window count for calculation
+ * Base window selection
+ * pixel limit
+ * Horizontal size of window
+ * vertical size of the window
+ * Horizontal start position of the window
+ * Vertical start position of the window
+ */
+ val = (bc->horz.win_count_calc & ISIF_HORZ_BC_WIN_COUNT_MASK) |
+ ((bc->horz.base_win_sel_calc & 1) <<
+ ISIF_HORZ_BC_WIN_SEL_SHIFT) |
+ ((bc->horz.clamp_pix_limit & 1) <<
+ ISIF_HORZ_BC_PIX_LIMIT_SHIFT) |
+ ((bc->horz.win_h_sz_calc &
+ ISIF_HORZ_BC_WIN_H_SIZE_MASK) <<
+ ISIF_HORZ_BC_WIN_H_SIZE_SHIFT) |
+ ((bc->horz.win_v_sz_calc &
+ ISIF_HORZ_BC_WIN_V_SIZE_MASK) <<
+ ISIF_HORZ_BC_WIN_V_SIZE_SHIFT);
+
+ isif_write(isif->isif_cfg.base_addr, val, CLHWIN0);
+
+ val = bc->horz.win_start_h_calc & ISIF_HORZ_BC_WIN_START_H_MASK;
+ isif_write(isif->isif_cfg.base_addr, val, CLHWIN1);
+
+ val = bc->horz.win_start_v_calc & ISIF_HORZ_BC_WIN_START_V_MASK;
+ isif_write(isif->isif_cfg.base_addr, val, CLHWIN2);
+ }
+
+ /* vertical clamp caculation paramaters */
+ /* OB H Valid */
+ val = bc->vert.ob_h_sz_calc & ISIF_VERT_BC_OB_H_SZ_MASK;
+
+ /* Reset clamp value sel for previous line */
+ val |= (bc->vert.reset_val_sel & ISIF_VERT_BC_RST_VAL_SEL_MASK) <<
+ ISIF_VERT_BC_RST_VAL_SEL_SHIFT;
+
+ /* Line average coefficient */
+ val |= bc->vert.line_ave_coef << ISIF_VERT_BC_LINE_AVE_COEF_SHIFT;
+ isif_write(isif->isif_cfg.base_addr, val, CLVWIN0);
+
+ /* Configured reset value */
+ if (bc->vert.reset_val_sel == VPFE_ISIF_VERT_BC_USE_CONFIG_CLAMP_VAL) {
+ val = bc->vert.reset_clamp_val & ISIF_VERT_BC_RST_VAL_MASK;
+ isif_write(isif->isif_cfg.base_addr, val, CLVRV);
+ }
+
+ /* Optical Black horizontal start position */
+ val = bc->vert.ob_start_h & ISIF_VERT_BC_OB_START_HORZ_MASK;
+ isif_write(isif->isif_cfg.base_addr, val, CLVWIN1);
+
+ /* Optical Black vertical start position */
+ val = bc->vert.ob_start_v & ISIF_VERT_BC_OB_START_VERT_MASK;
+ isif_write(isif->isif_cfg.base_addr, val, CLVWIN2);
+
+ val = bc->vert.ob_v_sz_calc & ISIF_VERT_BC_OB_VERT_SZ_MASK;
+ isif_write(isif->isif_cfg.base_addr, val, CLVWIN3);
+
+ /* Vertical start position for BC subtraction */
+ val = bc->vert_start_sub & ISIF_BC_VERT_START_SUB_V_MASK;
+ isif_write(isif->isif_cfg.base_addr, val, CLSV);
+}
+
+/* This function will configure the window size to be capture in ISIF reg */
+static void
+isif_setwin(struct vpfe_isif_device *isif, struct v4l2_rect *image_win,
+ enum isif_frmfmt frm_fmt, int ppc, int mode)
+{
+ int horz_nr_pixels;
+ int vert_nr_lines;
+ int horz_start;
+ int vert_start;
+ int mid_img;
+
+ /*
+ * ppc - per pixel count. indicates how many pixels per cell
+ * output to SDRAM. example, for ycbcr, it is one y and one c, so 2.
+ * raw capture this is 1
+ */
+ horz_start = image_win->left << (ppc - 1);
+ horz_nr_pixels = (image_win->width << (ppc - 1)) - 1;
+
+ /* Writing the horizontal info into the registers */
+ isif_write(isif->isif_cfg.base_addr,
+ horz_start & START_PX_HOR_MASK, SPH);
+ isif_write(isif->isif_cfg.base_addr,
+ horz_nr_pixels & NUM_PX_HOR_MASK, LNH);
+ vert_start = image_win->top;
+
+ if (frm_fmt == ISIF_FRMFMT_INTERLACED) {
+ vert_nr_lines = (image_win->height >> 1) - 1;
+ vert_start >>= 1;
+ /* To account for VD since line 0 doesn't have any data */
+ vert_start += 1;
+ } else {
+ /* To account for VD since line 0 doesn't have any data */
+ vert_start += 1;
+ vert_nr_lines = image_win->height - 1;
+ /* configure VDINT0 and VDINT1 */
+ mid_img = vert_start + (image_win->height / 2);
+ isif_write(isif->isif_cfg.base_addr, mid_img, VDINT1);
+ }
+
+ if (!mode)
+ isif_write(isif->isif_cfg.base_addr, 0, VDINT0);
+ else
+ isif_write(isif->isif_cfg.base_addr, vert_nr_lines, VDINT0);
+ isif_write(isif->isif_cfg.base_addr,
+ vert_start & START_VER_ONE_MASK, SLV0);
+ isif_write(isif->isif_cfg.base_addr,
+ vert_start & START_VER_TWO_MASK, SLV1);
+ isif_write(isif->isif_cfg.base_addr,
+ vert_nr_lines & NUM_LINES_VER, LNV);
+}
+
+#define DM365_ISIF_DFCMWR_MEMORY_WRITE 1
+#define DM365_ISIF_DFCMRD_MEMORY_READ 0x2
+
+static void
+isif_config_dfc(struct vpfe_isif_device *isif, struct vpfe_isif_dfc *vdfc)
+{
+#define DFC_WRITE_WAIT_COUNT 1000
+ u32 count = DFC_WRITE_WAIT_COUNT;
+ u32 val;
+ int i;
+
+ if (!vdfc->en)
+ return;
+
+ /* Correction mode */
+ val = (vdfc->corr_mode & ISIF_VDFC_CORR_MOD_MASK) <<
+ ISIF_VDFC_CORR_MOD_SHIFT;
+
+ /* Correct whole line or partial */
+ if (vdfc->corr_whole_line)
+ val |= 1 << ISIF_VDFC_CORR_WHOLE_LN_SHIFT;
+
+ /* level shift value */
+ val |= (vdfc->def_level_shift & ISIF_VDFC_LEVEL_SHFT_MASK) <<
+ ISIF_VDFC_LEVEL_SHFT_SHIFT;
+
+ isif_write(isif->isif_cfg.base_addr, val, DFCCTL);
+
+ /* Defect saturation level */
+ val = vdfc->def_sat_level & ISIF_VDFC_SAT_LEVEL_MASK;
+ isif_write(isif->isif_cfg.base_addr, val, VDFSATLV);
+
+ isif_write(isif->isif_cfg.base_addr, vdfc->table[0].pos_vert &
+ ISIF_VDFC_POS_MASK, DFCMEM0);
+ isif_write(isif->isif_cfg.base_addr, vdfc->table[0].pos_horz &
+ ISIF_VDFC_POS_MASK, DFCMEM1);
+ if (vdfc->corr_mode == VPFE_ISIF_VDFC_NORMAL ||
+ vdfc->corr_mode == VPFE_ISIF_VDFC_HORZ_INTERPOL_IF_SAT) {
+ isif_write(isif->isif_cfg.base_addr,
+ vdfc->table[0].level_at_pos, DFCMEM2);
+ isif_write(isif->isif_cfg.base_addr,
+ vdfc->table[0].level_up_pixels, DFCMEM3);
+ isif_write(isif->isif_cfg.base_addr,
+ vdfc->table[0].level_low_pixels, DFCMEM4);
+ }
+
+ val = isif_read(isif->isif_cfg.base_addr, DFCMEMCTL);
+ /* set DFCMARST and set DFCMWR */
+ val |= 1 << ISIF_DFCMEMCTL_DFCMARST_SHIFT;
+ val |= 1;
+ isif_write(isif->isif_cfg.base_addr, val, DFCMEMCTL);
+
+ while (count && (isif_read(isif->isif_cfg.base_addr, DFCMEMCTL) & 0x01))
+ count--;
+
+ val = isif_read(isif->isif_cfg.base_addr, DFCMEMCTL);
+ if (!count) {
+ pr_debug("defect table write timeout !!\n");
+ return;
+ }
+
+ for (i = 1; i < vdfc->num_vdefects; i++) {
+ isif_write(isif->isif_cfg.base_addr, vdfc->table[i].pos_vert &
+ ISIF_VDFC_POS_MASK, DFCMEM0);
+
+ isif_write(isif->isif_cfg.base_addr, vdfc->table[i].pos_horz &
+ ISIF_VDFC_POS_MASK, DFCMEM1);
+
+ if (vdfc->corr_mode == VPFE_ISIF_VDFC_NORMAL ||
+ vdfc->corr_mode == VPFE_ISIF_VDFC_HORZ_INTERPOL_IF_SAT) {
+ isif_write(isif->isif_cfg.base_addr,
+ vdfc->table[i].level_at_pos, DFCMEM2);
+ isif_write(isif->isif_cfg.base_addr,
+ vdfc->table[i].level_up_pixels, DFCMEM3);
+ isif_write(isif->isif_cfg.base_addr,
+ vdfc->table[i].level_low_pixels, DFCMEM4);
+ }
+ val = isif_read(isif->isif_cfg.base_addr, DFCMEMCTL);
+ /* clear DFCMARST and set DFCMWR */
+ val &= ~(1 << ISIF_DFCMEMCTL_DFCMARST_SHIFT);
+ val |= 1;
+ isif_write(isif->isif_cfg.base_addr, val, DFCMEMCTL);
+
+ count = DFC_WRITE_WAIT_COUNT;
+ while (count && (isif_read(isif->isif_cfg.base_addr,
+ DFCMEMCTL) & 0x01))
+ count--;
+
+ val = isif_read(isif->isif_cfg.base_addr, DFCMEMCTL);
+ if (!count) {
+ pr_debug("defect table write timeout !!\n");
+ return;
+ }
+ }
+ if (vdfc->num_vdefects < VPFE_ISIF_VDFC_TABLE_SIZE) {
+ /* Extra cycle needed */
+ isif_write(isif->isif_cfg.base_addr, 0, DFCMEM0);
+ isif_write(isif->isif_cfg.base_addr,
+ DM365_ISIF_MAX_DFCMEM1, DFCMEM1);
+ isif_write(isif->isif_cfg.base_addr,
+ DM365_ISIF_DFCMWR_MEMORY_WRITE, DFCMEMCTL);
+ }
+ /* enable VDFC */
+ isif_merge(isif->isif_cfg.base_addr, (1 << ISIF_VDFC_EN_SHIFT),
+ (1 << ISIF_VDFC_EN_SHIFT), DFCCTL);
+
+ isif_merge(isif->isif_cfg.base_addr, (1 << ISIF_VDFC_EN_SHIFT),
+ (0 << ISIF_VDFC_EN_SHIFT), DFCCTL);
+
+ isif_write(isif->isif_cfg.base_addr, 0x6, DFCMEMCTL);
+ for (i = 0 ; i < vdfc->num_vdefects; i++) {
+ count = DFC_WRITE_WAIT_COUNT;
+ while (count &&
+ (isif_read(isif->isif_cfg.base_addr, DFCMEMCTL) & 0x2))
+ count--;
+ val = isif_read(isif->isif_cfg.base_addr, DFCMEMCTL);
+ if (!count) {
+ pr_debug("defect table write timeout !!\n");
+ return;
+ }
+ isif_write(isif->isif_cfg.base_addr,
+ DM365_ISIF_DFCMRD_MEMORY_READ, DFCMEMCTL);
+ }
+}
+
+static void
+isif_config_csc(struct vpfe_isif_device *isif, struct vpfe_isif_df_csc *df_csc)
+{
+ u32 val1;
+ u32 val2;
+ u32 i;
+
+ if (!df_csc->csc.en) {
+ isif_write(isif->isif_cfg.base_addr, 0, CSCCTL);
+ return;
+ }
+ /* initialize all bits to 0 */
+ val1 = 0;
+ for (i = 0; i < VPFE_ISIF_CSC_NUM_COEFF; i++) {
+ if ((i % 2) == 0) {
+ /* CSCM - LSB */
+ val1 = ((df_csc->csc.coeff[i].integer &
+ ISIF_CSC_COEF_INTEG_MASK) <<
+ ISIF_CSC_COEF_INTEG_SHIFT) |
+ ((df_csc->csc.coeff[i].decimal &
+ ISIF_CSC_COEF_DECIMAL_MASK));
+ } else {
+
+ /* CSCM - MSB */
+ val2 = ((df_csc->csc.coeff[i].integer &
+ ISIF_CSC_COEF_INTEG_MASK) <<
+ ISIF_CSC_COEF_INTEG_SHIFT) |
+ ((df_csc->csc.coeff[i].decimal &
+ ISIF_CSC_COEF_DECIMAL_MASK));
+ val2 <<= ISIF_CSCM_MSB_SHIFT;
+ val2 |= val1;
+ isif_write(isif->isif_cfg.base_addr, val2,
+ (CSCM0 + ((i-1) << 1)));
+ }
+ }
+ /* program the active area */
+ isif_write(isif->isif_cfg.base_addr, df_csc->start_pix &
+ ISIF_DF_CSC_SPH_MASK, FMTSPH);
+ /*
+ * one extra pixel as required for CSC. Actually number of
+ * pixel - 1 should be configured in this register. So we
+ * need to subtract 1 before writing to FMTSPH, but we will
+ * not do this since csc requires one extra pixel
+ */
+ isif_write(isif->isif_cfg.base_addr, df_csc->num_pixels &
+ ISIF_DF_CSC_SPH_MASK, FMTLNH);
+ isif_write(isif->isif_cfg.base_addr, df_csc->start_line &
+ ISIF_DF_CSC_SPH_MASK, FMTSLV);
+ /*
+ * one extra line as required for CSC. See reason documented for
+ * num_pixels
+ */
+ isif_write(isif->isif_cfg.base_addr, df_csc->num_lines &
+ ISIF_DF_CSC_SPH_MASK, FMTLNV);
+ /* Enable CSC */
+ isif_write(isif->isif_cfg.base_addr, 1, CSCCTL);
+}
+
+static void
+isif_config_linearization(struct vpfe_isif_device *isif,
+ struct vpfe_isif_linearize *linearize)
+{
+ u32 val;
+ u32 i;
+
+ if (!linearize->en) {
+ isif_write(isif->isif_cfg.base_addr, 0, LINCFG0);
+ return;
+ }
+ /* shift value for correction */
+ val = (linearize->corr_shft & ISIF_LIN_CORRSFT_MASK) <<
+ ISIF_LIN_CORRSFT_SHIFT;
+ /* enable */
+ val |= 1;
+ isif_write(isif->isif_cfg.base_addr, val, LINCFG0);
+ /* Scale factor */
+ val = (linearize->scale_fact.integer & 1) <<
+ ISIF_LIN_SCALE_FACT_INTEG_SHIFT;
+ val |= linearize->scale_fact.decimal & ISIF_LIN_SCALE_FACT_DECIMAL_MASK;
+ isif_write(isif->isif_cfg.base_addr, val, LINCFG1);
+
+ for (i = 0; i < VPFE_ISIF_LINEAR_TAB_SIZE; i++) {
+ val = linearize->table[i] & ISIF_LIN_ENTRY_MASK;
+ if (i%2)
+ isif_regw_lin_tbl(isif, val, ((i >> 1) << 2), 1);
+ else
+ isif_regw_lin_tbl(isif, val, ((i >> 1) << 2), 0);
+ }
+}
+
+static void
+isif_config_culling(struct vpfe_isif_device *isif, struct vpfe_isif_cul *cul)
+{
+ u32 val;
+
+ /* Horizontal pattern */
+ val = cul->hcpat_even << CULL_PAT_EVEN_LINE_SHIFT;
+ val |= cul->hcpat_odd;
+ isif_write(isif->isif_cfg.base_addr, val, CULH);
+ /* vertical pattern */
+ isif_write(isif->isif_cfg.base_addr, cul->vcpat, CULV);
+ /* LPF */
+ isif_merge(isif->isif_cfg.base_addr, ISIF_LPF_MASK << ISIF_LPF_SHIFT,
+ cul->en_lpf << ISIF_LPF_SHIFT, MODESET);
+}
+
+static int isif_get_pix_fmt(u32 mbus_code)
+{
+ switch (mbus_code) {
+ case V4L2_MBUS_FMT_SGRBG10_ALAW8_1X8:
+ case V4L2_MBUS_FMT_SGRBG10_DPCM8_1X8:
+ case V4L2_MBUS_FMT_SGRBG12_1X12:
+ return ISIF_PIXFMT_RAW;
+
+ case V4L2_MBUS_FMT_YUYV8_2X8:
+ case V4L2_MBUS_FMT_UYVY8_2X8:
+ case V4L2_MBUS_FMT_YUYV10_2X10:
+ case V4L2_MBUS_FMT_Y8_1X8:
+ return ISIF_PIXFMT_YCBCR_8BIT;
+
+ case V4L2_MBUS_FMT_YUYV8_1X16:
+ case V4L2_MBUS_FMT_YUYV10_1X20:
+ return ISIF_PIXFMT_YCBCR_16BIT;
+
+ default:
+ break;
+ }
+ return -EINVAL;
+}
+
+#define ISIF_INTERLACE_INVERSE_MODE 0x4b6d
+#define ISIF_INTERLACE_NON_INVERSE_MODE 0x0b6d
+#define ISIF_PROGRESSIVE_INVERSE_MODE 0x4000
+#define ISIF_PROGRESSIVE_NON_INVERSE_MODE 0x0000
+
+static int isif_config_raw(struct v4l2_subdev *sd, int mode)
+{
+ struct vpfe_isif_device *isif = v4l2_get_subdevdata(sd);
+ struct isif_params_raw *params = &isif->isif_cfg.bayer;
+ struct vpfe_isif_raw_config *module_params =
+ &isif->isif_cfg.bayer.config_params;
+ struct v4l2_mbus_framefmt *format;
+ int pix_fmt;
+ u32 val;
+
+ format = &isif->formats[ISIF_PAD_SINK];
+
+ /* In case of user has set BT656IF earlier, it should be reset
+ * when configuring for raw input.
+ */
+ isif_write(isif->isif_cfg.base_addr, 0, REC656IF);
+ /* Configure CCDCFG register
+ * Set CCD Not to swap input since input is RAW data
+ * Set FID detection function to Latch at V-Sync
+ * Set WENLOG - isif valid area
+ * Set TRGSEL
+ * Set EXTRG
+ * Packed to 8 or 16 bits
+ */
+ val = ISIF_YCINSWP_RAW | ISIF_CCDCFG_FIDMD_LATCH_VSYNC |
+ ISIF_CCDCFG_WENLOG_AND | ISIF_CCDCFG_TRGSEL_WEN |
+ ISIF_CCDCFG_EXTRG_DISABLE | (isif->isif_cfg.data_pack &
+ ISIF_DATA_PACK_MASK);
+ isif_write(isif->isif_cfg.base_addr, val, CCDCFG);
+
+ pix_fmt = isif_get_pix_fmt(format->code);
+ if (pix_fmt < 0) {
+ pr_debug("Invalid pix_fmt(input mode)\n");
+ return -EINVAL;
+ }
+ /*
+ * Configure the vertical sync polarity(MODESET.VDPOL)
+ * Configure the horizontal sync polarity (MODESET.HDPOL)
+ * Configure frame id polarity (MODESET.FLDPOL)
+ * Configure data polarity
+ * Configure External WEN Selection
+ * Configure frame format(progressive or interlace)
+ * Configure pixel format (Input mode)
+ * Configure the data shift
+ */
+ val = ISIF_VDHDOUT_INPUT | ((params->vd_pol & ISIF_VD_POL_MASK) <<
+ ISIF_VD_POL_SHIFT) | ((params->hd_pol & ISIF_HD_POL_MASK) <<
+ ISIF_HD_POL_SHIFT) | ((params->fid_pol & ISIF_FID_POL_MASK) <<
+ ISIF_FID_POL_SHIFT) | ((ISIF_DATAPOL_NORMAL &
+ ISIF_DATAPOL_MASK) << ISIF_DATAPOL_SHIFT) | ((ISIF_EXWEN_DISABLE &
+ ISIF_EXWEN_MASK) << ISIF_EXWEN_SHIFT) | ((params->frm_fmt &
+ ISIF_FRM_FMT_MASK) << ISIF_FRM_FMT_SHIFT) | ((pix_fmt &
+ ISIF_INPUT_MASK) << ISIF_INPUT_SHIFT);
+
+ /* currently only V4L2_MBUS_FMT_SGRBG12_1X12 is
+ * supported. shift appropriately depending on
+ * different MBUS fmt's added
+ */
+ if (format->code == V4L2_MBUS_FMT_SGRBG12_1X12)
+ val |= ((VPFE_ISIF_NO_SHIFT &
+ ISIF_DATASFT_MASK) << ISIF_DATASFT_SHIFT);
+
+ isif_write(isif->isif_cfg.base_addr, val, MODESET);
+ /*
+ * Configure GAMMAWD register
+ * CFA pattern setting
+ */
+ val = (params->cfa_pat & ISIF_GAMMAWD_CFA_MASK) <<
+ ISIF_GAMMAWD_CFA_SHIFT;
+ /* Gamma msb */
+ if (params->v4l2_pix_fmt == V4L2_PIX_FMT_SGRBG10ALAW8)
+ val = val | ISIF_ALAW_ENABLE;
+
+ val = val | ((params->data_msb & ISIF_ALAW_GAMA_WD_MASK) <<
+ ISIF_ALAW_GAMA_WD_SHIFT);
+
+ isif_write(isif->isif_cfg.base_addr, val, CGAMMAWD);
+ /* Configure DPCM compression settings */
+ if (params->v4l2_pix_fmt == V4L2_PIX_FMT_SGRBG10DPCM8) {
+ val = 1 << ISIF_DPCM_EN_SHIFT;
+ val |= (params->dpcm_predictor &
+ ISIF_DPCM_PREDICTOR_MASK) << ISIF_DPCM_PREDICTOR_SHIFT;
+ }
+ isif_write(isif->isif_cfg.base_addr, val, MISC);
+ /* Configure Gain & Offset */
+ isif_config_gain_offset(isif);
+ /* Configure Color pattern */
+ if (format->code == V4L2_MBUS_FMT_SGRBG12_1X12)
+ val = isif_sgrbg_pattern;
+ else
+ /* default set to rggb */
+ val = isif_srggb_pattern;
+
+ isif_write(isif->isif_cfg.base_addr, val, CCOLP);
+
+ /* Configure HSIZE register */
+ val = (params->horz_flip_en & ISIF_HSIZE_FLIP_MASK) <<
+ ISIF_HSIZE_FLIP_SHIFT;
+
+ /* calculate line offset in 32 bytes based on pack value */
+ if (isif->isif_cfg.data_pack == ISIF_PACK_8BIT)
+ val |= ((params->win.width + 31) >> 5) & ISIF_LINEOFST_MASK;
+ else if (isif->isif_cfg.data_pack == ISIF_PACK_12BIT)
+ val |= ((((params->win.width + (params->win.width >> 2)) +
+ 31) >> 5) & ISIF_LINEOFST_MASK);
+ else
+ val |= (((params->win.width * 2) + 31) >> 5) &
+ ISIF_LINEOFST_MASK;
+ isif_write(isif->isif_cfg.base_addr, val, HSIZE);
+ /* Configure SDOFST register */
+ if (params->frm_fmt == ISIF_FRMFMT_INTERLACED) {
+ if (params->image_invert_en)
+ /* For interlace inverse mode */
+ isif_write(isif->isif_cfg.base_addr,
+ ISIF_INTERLACE_INVERSE_MODE, SDOFST);
+ else
+ /* For interlace non inverse mode */
+ isif_write(isif->isif_cfg.base_addr,
+ ISIF_INTERLACE_NON_INVERSE_MODE, SDOFST);
+ } else if (params->frm_fmt == ISIF_FRMFMT_PROGRESSIVE) {
+ if (params->image_invert_en)
+ isif_write(isif->isif_cfg.base_addr,
+ ISIF_PROGRESSIVE_INVERSE_MODE, SDOFST);
+ else
+ /* For progessive non inverse mode */
+ isif_write(isif->isif_cfg.base_addr,
+ ISIF_PROGRESSIVE_NON_INVERSE_MODE, SDOFST);
+ }
+ /* Configure video window */
+ isif_setwin(isif, &params->win, params->frm_fmt, 1, mode);
+ /* Configure Black Clamp */
+ isif_config_bclamp(isif, &module_params->bclamp);
+ /* Configure Vertical Defection Pixel Correction */
+ isif_config_dfc(isif, &module_params->dfc);
+ if (!module_params->df_csc.df_or_csc)
+ /* Configure Color Space Conversion */
+ isif_config_csc(isif, &module_params->df_csc);
+
+ isif_config_linearization(isif, &module_params->linearize);
+ /* Configure Culling */
+ isif_config_culling(isif, &module_params->culling);
+ /* Configure Horizontal and vertical offsets(DFC,LSC,Gain) */
+ val = module_params->horz_offset & ISIF_DATA_H_OFFSET_MASK;
+ isif_write(isif->isif_cfg.base_addr, val, DATAHOFST);
+
+ val = module_params->vert_offset & ISIF_DATA_V_OFFSET_MASK;
+ isif_write(isif->isif_cfg.base_addr, val, DATAVOFST);
+
+ return 0;
+}
+
+#define DM365_ISIF_HSIZE_MASK 0xffffffe0
+#define DM365_ISIF_SDOFST_2_LINES 0x00000249
+
+/* This function will configure ISIF for YCbCr parameters. */
+static int isif_config_ycbcr(struct v4l2_subdev *sd, int mode)
+{
+ struct vpfe_isif_device *isif = v4l2_get_subdevdata(sd);
+ struct isif_ycbcr_config *params = &isif->isif_cfg.ycbcr;
+ struct v4l2_mbus_framefmt *format;
+ int pix_fmt;
+ u32 modeset;
+ u32 ccdcfg;
+
+ format = &isif->formats[ISIF_PAD_SINK];
+ /*
+ * first reset the ISIF
+ * all registers have default values after reset
+ * This is important since we assume default values to be set in
+ * a lot of registers that we didn't touch
+ */
+ /* start with all bits zero */
+ ccdcfg = modeset = 0;
+ pix_fmt = isif_get_pix_fmt(format->code);
+ if (pix_fmt < 0) {
+ pr_debug("Invalid pix_fmt(input mode)\n");
+ return -EINVAL;
+ }
+ /* configure pixel format or input mode */
+ modeset = modeset | ((pix_fmt & ISIF_INPUT_MASK) <<
+ ISIF_INPUT_SHIFT) | ((params->frm_fmt & ISIF_FRM_FMT_MASK) <<
+ ISIF_FRM_FMT_SHIFT) | (((params->fid_pol &
+ ISIF_FID_POL_MASK) << ISIF_FID_POL_SHIFT)) |
+ (((params->hd_pol & ISIF_HD_POL_MASK) << ISIF_HD_POL_SHIFT)) |
+ (((params->vd_pol & ISIF_VD_POL_MASK) << ISIF_VD_POL_SHIFT));
+ /* pack the data to 8-bit CCDCCFG */
+ switch (format->code) {
+ case V4L2_MBUS_FMT_YUYV8_2X8:
+ case V4L2_MBUS_FMT_UYVY8_2X8:
+ if (pix_fmt != ISIF_PIXFMT_YCBCR_8BIT) {
+ pr_debug("Invalid pix_fmt(input mode)\n");
+ return -EINVAL;
+ }
+ modeset |= ((VPFE_PINPOL_NEGATIVE & ISIF_VD_POL_MASK) <<
+ ISIF_VD_POL_SHIFT);
+ isif_write(isif->isif_cfg.base_addr, 3, REC656IF);
+ ccdcfg = ccdcfg | ISIF_PACK_8BIT | ISIF_YCINSWP_YCBCR;
+ break;
+
+ case V4L2_MBUS_FMT_YUYV10_2X10:
+ if (pix_fmt != ISIF_PIXFMT_YCBCR_8BIT) {
+ pr_debug("Invalid pix_fmt(input mode)\n");
+ return -EINVAL;
+ }
+ /* setup BT.656, embedded sync */
+ isif_write(isif->isif_cfg.base_addr, 3, REC656IF);
+ /* enable 10 bit mode in ccdcfg */
+ ccdcfg = ccdcfg | ISIF_PACK_8BIT | ISIF_YCINSWP_YCBCR |
+ ISIF_BW656_ENABLE;
+ break;
+
+ case V4L2_MBUS_FMT_YUYV10_1X20:
+ if (pix_fmt != ISIF_PIXFMT_YCBCR_16BIT) {
+ pr_debug("Invalid pix_fmt(input mode)\n");
+ return -EINVAL;
+ }
+ isif_write(isif->isif_cfg.base_addr, 3, REC656IF);
+ break;
+
+ case V4L2_MBUS_FMT_Y8_1X8:
+ ccdcfg |= ISIF_PACK_8BIT;
+ ccdcfg |= ISIF_YCINSWP_YCBCR;
+ if (pix_fmt != ISIF_PIXFMT_YCBCR_8BIT) {
+ pr_debug("Invalid pix_fmt(input mode)\n");
+ return -EINVAL;
+ }
+ break;
+
+ case V4L2_MBUS_FMT_YUYV8_1X16:
+ if (pix_fmt != ISIF_PIXFMT_YCBCR_16BIT) {
+ pr_debug("Invalid pix_fmt(input mode)\n");
+ return -EINVAL;
+ }
+ break;
+
+ default:
+ /* should never come here */
+ pr_debug("Invalid interface type\n");
+ return -EINVAL;
+ }
+ isif_write(isif->isif_cfg.base_addr, modeset, MODESET);
+ /* Set up pix order */
+ ccdcfg |= (params->pix_order & ISIF_PIX_ORDER_MASK) <<
+ ISIF_PIX_ORDER_SHIFT;
+ isif_write(isif->isif_cfg.base_addr, ccdcfg, CCDCFG);
+ /* configure video window */
+ if (format->code == V4L2_MBUS_FMT_YUYV10_1X20 ||
+ format->code == V4L2_MBUS_FMT_YUYV8_1X16)
+ isif_setwin(isif, &params->win, params->frm_fmt, 1, mode);
+ else
+ isif_setwin(isif, &params->win, params->frm_fmt, 2, mode);
+
+ /*
+ * configure the horizontal line offset
+ * this is done by rounding up width to a multiple of 16 pixels
+ * and multiply by two to account for y:cb:cr 4:2:2 data
+ */
+ isif_write(isif->isif_cfg.base_addr,
+ ((((params->win.width * 2) + 31) &
+ DM365_ISIF_HSIZE_MASK) >> 5), HSIZE);
+
+ /* configure the memory line offset */
+ if (params->frm_fmt == ISIF_FRMFMT_INTERLACED &&
+ params->buf_type == ISIF_BUFTYPE_FLD_INTERLEAVED)
+ /* two fields are interleaved in memory */
+ isif_write(isif->isif_cfg.base_addr,
+ DM365_ISIF_SDOFST_2_LINES, SDOFST);
+ return 0;
+}
+
+static int isif_configure(struct v4l2_subdev *sd, int mode)
+{
+ struct vpfe_isif_device *isif = v4l2_get_subdevdata(sd);
+ struct v4l2_mbus_framefmt *format;
+
+ format = &isif->formats[ISIF_PAD_SINK];
+
+ switch (format->code) {
+ case V4L2_MBUS_FMT_SGRBG10_ALAW8_1X8:
+ case V4L2_MBUS_FMT_SGRBG10_DPCM8_1X8:
+ case V4L2_MBUS_FMT_SGRBG12_1X12:
+ return isif_config_raw(sd, mode);
+
+ case V4L2_MBUS_FMT_YUYV8_2X8:
+ case V4L2_MBUS_FMT_UYVY8_2X8:
+ case V4L2_MBUS_FMT_YUYV10_2X10:
+ case V4L2_MBUS_FMT_Y8_1X8:
+ case V4L2_MBUS_FMT_YUYV8_1X16:
+ case V4L2_MBUS_FMT_YUYV10_1X20:
+ return isif_config_ycbcr(sd, mode);
+
+ default:
+ break;
+ }
+ return -EINVAL;
+}
+
+/*
+ * isif_set_stream() - Enable/Disable streaming on the ISIF module
+ * @sd: VPFE ISIF V4L2 subdevice
+ * @enable: Enable/disable stream
+ */
+static int isif_set_stream(struct v4l2_subdev *sd, int enable)
+{
+ struct vpfe_isif_device *isif = v4l2_get_subdevdata(sd);
+ int ret;
+
+ if (enable) {
+ ret = isif_configure(sd,
+ (isif->output == ISIF_OUTPUT_MEMORY) ? 0 : 1);
+ if (ret)
+ return ret;
+ if (isif->output == ISIF_OUTPUT_MEMORY)
+ isif_enable_output_to_sdram(isif, 1);
+ isif_enable(isif, 1);
+ } else {
+ isif_enable(isif, 0);
+ isif_enable_output_to_sdram(isif, 0);
+ }
+
+ return 0;
+}
+
+/*
+ * __isif_get_format() - helper function for getting isif format
+ * @isif: pointer to isif private structure.
+ * @pad: pad number.
+ * @fh: V4L2 subdev file handle.
+ * @which: wanted subdev format.
+ */
+static struct v4l2_mbus_framefmt *
+__isif_get_format(struct vpfe_isif_device *isif, struct v4l2_subdev_fh *fh,
+ unsigned int pad, enum v4l2_subdev_format_whence which)
+{
+ if (which == V4L2_SUBDEV_FORMAT_TRY) {
+ struct v4l2_subdev_format fmt;
+
+ fmt.pad = pad;
+ fmt.which = which;
+
+ return v4l2_subdev_get_try_format(fh, pad);
+ }
+ return &isif->formats[pad];
+}
+
+/*
+* isif_set_format() - set format on pad
+* @sd : VPFE ISIF device
+* @fh : V4L2 subdev file handle
+* @fmt : pointer to v4l2 subdev format structure
+*
+* Return 0 on success or -EINVAL if format or pad is invalid
+*/
+static int
+isif_set_format(struct v4l2_subdev *sd, struct v4l2_subdev_fh *fh,
+ struct v4l2_subdev_format *fmt)
+{
+ struct vpfe_isif_device *isif = v4l2_get_subdevdata(sd);
+ struct vpfe_device *vpfe_dev = to_vpfe_device(isif);
+ struct v4l2_mbus_framefmt *format;
+
+ format = __isif_get_format(isif, fh, fmt->pad, fmt->which);
+ if (format == NULL)
+ return -EINVAL;
+
+ isif_try_format(isif, fh, fmt);
+ memcpy(format, &fmt->format, sizeof(*format));
+
+ if (fmt->which == V4L2_SUBDEV_FORMAT_TRY)
+ return 0;
+
+ if (fmt->pad == ISIF_PAD_SOURCE)
+ return isif_config_format(vpfe_dev, fmt->pad);
+
+ return 0;
+}
+
+/*
+ * isif_get_format() - Retrieve the video format on a pad
+ * @sd: VPFE ISIF V4L2 subdevice
+ * @fh: V4L2 subdev file handle
+ * @fmt: pointer to v4l2 subdev format structure
+ *
+ * Return 0 on success or -EINVAL if the pad is invalid or doesn't correspond
+ * to the format type.
+ */
+static int
+isif_get_format(struct v4l2_subdev *sd, struct v4l2_subdev_fh *fh,
+ struct v4l2_subdev_format *fmt)
+{
+ struct vpfe_isif_device *vpfe_isif = v4l2_get_subdevdata(sd);
+ struct v4l2_mbus_framefmt *format;
+
+ format = __isif_get_format(vpfe_isif, fh, fmt->pad, fmt->which);
+ if (format == NULL)
+ return -EINVAL;
+
+ memcpy(&fmt->format, format, sizeof(fmt->format));
+
+ return 0;
+}
+
+/*
+ * isif_enum_frame_size() - enum frame sizes on pads
+ * @sd: VPFE isif V4L2 subdevice
+ * @fh: V4L2 subdev file handle
+ * @code: pointer to v4l2_subdev_frame_size_enum structure
+ */
+static int
+isif_enum_frame_size(struct v4l2_subdev *sd, struct v4l2_subdev_fh *fh,
+ struct v4l2_subdev_frame_size_enum *fse)
+{
+ struct vpfe_isif_device *isif = v4l2_get_subdevdata(sd);
+ struct v4l2_subdev_format format;
+
+ if (fse->index != 0)
+ return -EINVAL;
+
+ format.pad = fse->pad;
+ format.format.code = fse->code;
+ format.format.width = 1;
+ format.format.height = 1;
+ format.which = V4L2_SUBDEV_FORMAT_TRY;
+ isif_try_format(isif, fh, &format);
+ fse->min_width = format.format.width;
+ fse->min_height = format.format.height;
+
+ if (format.format.code != fse->code)
+ return -EINVAL;
+
+ format.pad = fse->pad;
+ format.format.code = fse->code;
+ format.format.width = -1;
+ format.format.height = -1;
+ format.which = V4L2_SUBDEV_FORMAT_TRY;
+ isif_try_format(isif, fh, &format);
+ fse->max_width = format.format.width;
+ fse->max_height = format.format.height;
+
+ return 0;
+}
+
+/*
+ * isif_enum_mbus_code() - enum mbus codes for pads
+ * @sd: VPFE isif V4L2 subdevice
+ * @fh: V4L2 subdev file handle
+ * @code: pointer to v4l2_subdev_mbus_code_enum structure
+ */
+static int
+isif_enum_mbus_code(struct v4l2_subdev *sd, struct v4l2_subdev_fh *fh,
+ struct v4l2_subdev_mbus_code_enum *code)
+{
+ switch (code->pad) {
+ case ISIF_PAD_SINK:
+ case ISIF_PAD_SOURCE:
+ if (code->index >= ARRAY_SIZE(isif_fmts))
+ return -EINVAL;
+ code->code = isif_fmts[code->index];
+ break;
+
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+/*
+ * isif_pad_set_crop() - set crop rectangle on pad
+ * @sd: VPFE isif V4L2 subdevice
+ * @fh: V4L2 subdev file handle
+ * @code: pointer to v4l2_subdev_mbus_code_enum structure
+ *
+ * Return 0 on success, -EINVAL if pad is invalid
+ */
+static int
+isif_pad_set_crop(struct v4l2_subdev *sd, struct v4l2_subdev_fh *fh,
+ struct v4l2_subdev_crop *crop)
+{
+ struct vpfe_isif_device *vpfe_isif = v4l2_get_subdevdata(sd);
+ struct v4l2_mbus_framefmt *format;
+
+ /* check wether its a valid pad */
+ if (crop->pad != ISIF_PAD_SINK)
+ return -EINVAL;
+
+ format = __isif_get_format(vpfe_isif, fh, crop->pad, crop->which);
+ if (format == NULL)
+ return -EINVAL;
+
+ /* check wether crop rect is within limits */
+ if (crop->rect.top < 0 || crop->rect.left < 0 ||
+ (crop->rect.left + crop->rect.width >
+ vpfe_isif->formats[ISIF_PAD_SINK].width) ||
+ (crop->rect.top + crop->rect.height >
+ vpfe_isif->formats[ISIF_PAD_SINK].height)) {
+ crop->rect.left = 0;
+ crop->rect.top = 0;
+ crop->rect.width = format->width;
+ crop->rect.height = format->height;
+ }
+ /* adjust the width to 16 pixel boundry */
+ crop->rect.width = ((crop->rect.width + 15) & ~0xf);
+ vpfe_isif->crop = crop->rect;
+ if (crop->which == V4L2_SUBDEV_FORMAT_ACTIVE) {
+ isif_set_image_window(vpfe_isif);
+ } else {
+ struct v4l2_rect *rect;
+
+ rect = v4l2_subdev_get_try_crop(fh, ISIF_PAD_SINK);
+ memcpy(rect, &vpfe_isif->crop, sizeof(*rect));
+ }
+ return 0;
+}
+
+/*
+ * isif_pad_get_crop() - get crop rectangle on pad
+ * @sd: VPFE isif V4L2 subdevice
+ * @fh: V4L2 subdev file handle
+ * @code: pointer to v4l2_subdev_mbus_code_enum structure
+ *
+ * Return 0 on success, -EINVAL if pad is invalid
+ */
+static int
+isif_pad_get_crop(struct v4l2_subdev *sd, struct v4l2_subdev_fh *fh,
+ struct v4l2_subdev_crop *crop)
+{
+ struct vpfe_isif_device *vpfe_isif = v4l2_get_subdevdata(sd);
+
+ /* check wether its a valid pad */
+ if (crop->pad != ISIF_PAD_SINK)
+ return -EINVAL;
+
+ if (crop->which == V4L2_SUBDEV_FORMAT_TRY) {
+ struct v4l2_rect *rect;
+ rect = v4l2_subdev_get_try_crop(fh, ISIF_PAD_SINK);
+ memcpy(&crop->rect, rect, sizeof(*rect));
+ } else {
+ crop->rect = vpfe_isif->crop;
+ }
+
+ return 0;
+}
+
+/*
+ * isif_init_formats() - Initialize formats on all pads
+ * @sd: VPFE isif V4L2 subdevice
+ * @fh: V4L2 subdev file handle
+ *
+ * Initialize all pad formats with default values. If fh is not NULL, try
+ * formats are initialized on the file handle. Otherwise active formats are
+ * initialized on the device.
+ */
+static int
+isif_init_formats(struct v4l2_subdev *sd,
+ struct v4l2_subdev_fh *fh)
+{
+ struct v4l2_subdev_format format;
+ struct v4l2_subdev_crop crop;
+
+ memset(&format, 0, sizeof(format));
+ format.pad = ISIF_PAD_SINK;
+ format.which = fh ? V4L2_SUBDEV_FORMAT_TRY : V4L2_SUBDEV_FORMAT_ACTIVE;
+ format.format.code = V4L2_MBUS_FMT_SGRBG12_1X12;
+ format.format.width = MAX_WIDTH;
+ format.format.height = MAX_HEIGHT;
+ isif_set_format(sd, fh, &format);
+
+ memset(&format, 0, sizeof(format));
+ format.pad = ISIF_PAD_SOURCE;
+ format.which = fh ? V4L2_SUBDEV_FORMAT_TRY : V4L2_SUBDEV_FORMAT_ACTIVE;
+ format.format.code = V4L2_MBUS_FMT_SGRBG12_1X12;
+ format.format.width = MAX_WIDTH;
+ format.format.height = MAX_HEIGHT;
+ isif_set_format(sd, fh, &format);
+
+ memset(&crop, 0, sizeof(crop));
+ crop.pad = ISIF_PAD_SINK;
+ crop.which = fh ? V4L2_SUBDEV_FORMAT_TRY : V4L2_SUBDEV_FORMAT_ACTIVE;
+ crop.rect.width = MAX_WIDTH;
+ crop.rect.height = MAX_HEIGHT;
+ isif_pad_set_crop(sd, fh, &crop);
+
+ return 0;
+}
+
+/* subdev core operations */
+static const struct v4l2_subdev_core_ops isif_v4l2_core_ops = {
+ .ioctl = isif_ioctl,
+};
+
+/* subdev file operations */
+static const struct v4l2_subdev_internal_ops isif_v4l2_internal_ops = {
+ .open = isif_init_formats,
+};
+
+/* subdev video operations */
+static const struct v4l2_subdev_video_ops isif_v4l2_video_ops = {
+ .s_stream = isif_set_stream,
+};
+
+/* subdev pad operations */
+static const struct v4l2_subdev_pad_ops isif_v4l2_pad_ops = {
+ .enum_mbus_code = isif_enum_mbus_code,
+ .enum_frame_size = isif_enum_frame_size,
+ .get_fmt = isif_get_format,
+ .set_fmt = isif_set_format,
+ .set_crop = isif_pad_set_crop,
+ .get_crop = isif_pad_get_crop,
+};
+
+/* subdev operations */
+static const struct v4l2_subdev_ops isif_v4l2_ops = {
+ .core = &isif_v4l2_core_ops,
+ .video = &isif_v4l2_video_ops,
+ .pad = &isif_v4l2_pad_ops,
+};
+
+/*
+ * Media entity operations
+ */
+
+/*
+ * isif_link_setup() - Setup isif connections
+ * @entity: isif media entity
+ * @local: Pad at the local end of the link
+ * @remote: Pad at the remote end of the link
+ * @flags: Link flags
+ *
+ * return -EINVAL or zero on success
+ */
+static int
+isif_link_setup(struct media_entity *entity, const struct media_pad *local,
+ const struct media_pad *remote, u32 flags)
+{
+ struct v4l2_subdev *sd = media_entity_to_v4l2_subdev(entity);
+ struct vpfe_isif_device *isif = v4l2_get_subdevdata(sd);
+
+ switch (local->index | media_entity_type(remote->entity)) {
+ case ISIF_PAD_SINK | MEDIA_ENT_T_V4L2_SUBDEV:
+ /* read from decoder/sensor */
+ if (!(flags & MEDIA_LNK_FL_ENABLED)) {
+ isif->input = ISIF_INPUT_NONE;
+ break;
+ }
+ if (isif->input != ISIF_INPUT_NONE)
+ return -EBUSY;
+ isif->input = ISIF_INPUT_PARALLEL;
+ break;
+
+ case ISIF_PAD_SOURCE | MEDIA_ENT_T_DEVNODE:
+ /* write to memory */
+ if (flags & MEDIA_LNK_FL_ENABLED)
+ isif->output = ISIF_OUTPUT_MEMORY;
+ else
+ isif->output = ISIF_OUTPUT_NONE;
+ break;
+
+ case ISIF_PAD_SOURCE | MEDIA_ENT_T_V4L2_SUBDEV:
+ if (flags & MEDIA_LNK_FL_ENABLED)
+ isif->output = ISIF_OUTPUT_IPIPEIF;
+ else
+ isif->output = ISIF_OUTPUT_NONE;
+ break;
+
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+static const struct media_entity_operations isif_media_ops = {
+ .link_setup = isif_link_setup,
+};
+
+/*
+ * vpfe_isif_unregister_entities() - isif unregister entity
+ * @isif - pointer to isif subdevice structure.
+ */
+void vpfe_isif_unregister_entities(struct vpfe_isif_device *isif)
+{
+ vpfe_video_unregister(&isif->video_out);
+ /* cleanup entity */
+ media_entity_cleanup(&isif->subdev.entity);
+ /* unregister subdev */
+ v4l2_device_unregister_subdev(&isif->subdev);
+}
+
+static void isif_restore_defaults(struct vpfe_isif_device *isif)
+{
+ enum vpss_ccdc_source_sel source = VPSS_CCDCIN;
+ int i;
+
+ memset(&isif->isif_cfg.bayer.config_params, 0,
+ sizeof(struct vpfe_isif_raw_config));
+
+ isif->isif_cfg.bayer.config_params.linearize.corr_shft =
+ VPFE_ISIF_NO_SHIFT;
+ isif->isif_cfg.bayer.config_params.linearize.scale_fact.integer = 1;
+ isif->isif_cfg.bayer.config_params.culling.hcpat_odd =
+ ISIF_CULLING_HCAPT_ODD;
+ isif->isif_cfg.bayer.config_params.culling.hcpat_even =
+ ISIF_CULLING_HCAPT_EVEN;
+ isif->isif_cfg.bayer.config_params.culling.vcpat = ISIF_CULLING_VCAPT;
+ /* Enable clock to ISIF, IPIPEIF and BL */
+ vpss_enable_clock(VPSS_CCDC_CLOCK, 1);
+ vpss_enable_clock(VPSS_IPIPEIF_CLOCK, 1);
+ vpss_enable_clock(VPSS_BL_CLOCK, 1);
+
+ /* set all registers to default value */
+ for (i = 0; i <= 0x1f8; i += 4)
+ isif_write(isif->isif_cfg.base_addr, 0, i);
+ /* no culling support */
+ isif_write(isif->isif_cfg.base_addr, 0xffff, CULH);
+ isif_write(isif->isif_cfg.base_addr, 0xff, CULV);
+
+ /* Set default offset and gain */
+ isif_config_gain_offset(isif);
+ vpss_select_ccdc_source(source);
+}
+
+/*
+ * vpfe_isif_register_entities() - isif register entity
+ * @isif - pointer to isif subdevice structure.
+ * @vdev: pointer to v4l2 device structure.
+ */
+int vpfe_isif_register_entities(struct vpfe_isif_device *isif,
+ struct v4l2_device *vdev)
+{
+ struct vpfe_device *vpfe_dev = to_vpfe_device(isif);
+ unsigned int flags;
+ int ret;
+
+ /* Register the subdev */
+ ret = v4l2_device_register_subdev(vdev, &isif->subdev);
+ if (ret < 0)
+ return ret;
+
+ isif_restore_defaults(isif);
+ ret = vpfe_video_register(&isif->video_out, vdev);
+ if (ret) {
+ pr_err("Failed to register isif video out device\n");
+ goto out_video_register;
+ }
+ isif->video_out.vpfe_dev = vpfe_dev;
+ flags = 0;
+ /* connect isif to video node */
+ ret = media_entity_create_link(&isif->subdev.entity, 1,
+ &isif->video_out.video_dev.entity,
+ 0, flags);
+ if (ret < 0)
+ goto out_create_link;
+ return 0;
+out_create_link:
+ vpfe_video_unregister(&isif->video_out);
+out_video_register:
+ v4l2_device_unregister_subdev(&isif->subdev);
+ return ret;
+}
+
+/* -------------------------------------------------------------------
+ * V4L2 subdev control operations
+ */
+
+static int vpfe_isif_s_ctrl(struct v4l2_ctrl *ctrl)
+{
+ struct vpfe_isif_device *isif =
+ container_of(ctrl->handler, struct vpfe_isif_device, ctrls);
+ struct isif_oper_config *config = &isif->isif_cfg;
+
+ switch (ctrl->id) {
+ case VPFE_CID_DPCM_PREDICTOR:
+ config->bayer.dpcm_predictor = ctrl->val;
+ break;
+
+ case VPFE_ISIF_CID_CRGAIN:
+ config->isif_gain_params.cr_gain = ctrl->val;
+ break;
+
+ case VPFE_ISIF_CID_CGRGAIN:
+ config->isif_gain_params.cgr_gain = ctrl->val;
+ break;
+
+ case VPFE_ISIF_CID_CGBGAIN:
+ config->isif_gain_params.cgb_gain = ctrl->val;
+ break;
+
+ case VPFE_ISIF_CID_CBGAIN:
+ config->isif_gain_params.cb_gain = ctrl->val;
+ break;
+
+ case VPFE_ISIF_CID_GAIN_OFFSET:
+ config->isif_gain_params.offset = ctrl->val;
+ break;
+
+ default:
+ return -EINVAL;
+ }
+ return 0;
+}
+
+static const struct v4l2_ctrl_ops vpfe_isif_ctrl_ops = {
+ .s_ctrl = vpfe_isif_s_ctrl,
+};
+
+static const struct v4l2_ctrl_config vpfe_isif_dpcm_pred = {
+ .ops = &vpfe_isif_ctrl_ops,
+ .id = VPFE_CID_DPCM_PREDICTOR,
+ .name = "DPCM Predictor",
+ .type = V4L2_CTRL_TYPE_INTEGER,
+ .min = 0,
+ .max = 1,
+ .step = 1,
+ .def = 0,
+};
+
+static const struct v4l2_ctrl_config vpfe_isif_crgain = {
+ .ops = &vpfe_isif_ctrl_ops,
+ .id = VPFE_ISIF_CID_CRGAIN,
+ .name = "CRGAIN",
+ .type = V4L2_CTRL_TYPE_INTEGER,
+ .min = 0,
+ .max = (1 << 12) - 1,
+ .step = 1,
+ .def = 0,
+};
+
+static const struct v4l2_ctrl_config vpfe_isif_cgrgain = {
+ .ops = &vpfe_isif_ctrl_ops,
+ .id = VPFE_ISIF_CID_CGRGAIN,
+ .name = "CGRGAIN",
+ .type = V4L2_CTRL_TYPE_INTEGER,
+ .min = 0,
+ .max = (1 << 12) - 1,
+ .step = 1,
+ .def = 0,
+};
+
+static const struct v4l2_ctrl_config vpfe_isif_cgbgain = {
+ .ops = &vpfe_isif_ctrl_ops,
+ .id = VPFE_ISIF_CID_CGBGAIN,
+ .name = "CGBGAIN",
+ .type = V4L2_CTRL_TYPE_INTEGER,
+ .min = 0,
+ .max = (1 << 12) - 1,
+ .step = 1,
+ .def = 0,
+};
+
+static const struct v4l2_ctrl_config vpfe_isif_cbgain = {
+ .ops = &vpfe_isif_ctrl_ops,
+ .id = VPFE_ISIF_CID_CBGAIN,
+ .name = "CBGAIN",
+ .type = V4L2_CTRL_TYPE_INTEGER,
+ .min = 0,
+ .max = (1 << 12) - 1,
+ .step = 1,
+ .def = 0,
+};
+
+static const struct v4l2_ctrl_config vpfe_isif_gain_offset = {
+ .ops = &vpfe_isif_ctrl_ops,
+ .id = VPFE_ISIF_CID_GAIN_OFFSET,
+ .name = "Gain Offset",
+ .type = V4L2_CTRL_TYPE_INTEGER,
+ .min = 0,
+ .max = (1 << 12) - 1,
+ .step = 1,
+ .def = 0,
+};
+
+static void isif_remove(struct vpfe_isif_device *isif,
+ struct platform_device *pdev)
+{
+ struct resource *res;
+ int i = 0;
+
+ iounmap(isif->isif_cfg.base_addr);
+ iounmap(isif->isif_cfg.linear_tbl0_addr);
+ iounmap(isif->isif_cfg.linear_tbl1_addr);
+
+ while (i < 3) {
+ res = platform_get_resource(pdev, IORESOURCE_MEM, i);
+ if (res)
+ release_mem_region(res->start,
+ res->end - res->start + 1);
+ i++;
+ }
+}
+
+static void isif_config_defaults(struct vpfe_isif_device *isif)
+{
+ isif->isif_cfg.ycbcr.v4l2_pix_fmt = V4L2_PIX_FMT_UYVY;
+ isif->isif_cfg.ycbcr.pix_fmt = ISIF_PIXFMT_YCBCR_8BIT;
+ isif->isif_cfg.ycbcr.frm_fmt = ISIF_FRMFMT_INTERLACED;
+ isif->isif_cfg.ycbcr.fid_pol = VPFE_PINPOL_POSITIVE;
+ isif->isif_cfg.ycbcr.vd_pol = VPFE_PINPOL_POSITIVE;
+ isif->isif_cfg.ycbcr.hd_pol = VPFE_PINPOL_POSITIVE;
+ isif->isif_cfg.ycbcr.pix_order = ISIF_PIXORDER_CBYCRY;
+ isif->isif_cfg.ycbcr.buf_type = ISIF_BUFTYPE_FLD_INTERLEAVED;
+
+ isif->isif_cfg.bayer.v4l2_pix_fmt = V4L2_PIX_FMT_SGRBG10ALAW8;
+ isif->isif_cfg.bayer.pix_fmt = ISIF_PIXFMT_RAW;
+ isif->isif_cfg.bayer.frm_fmt = ISIF_FRMFMT_PROGRESSIVE;
+ isif->isif_cfg.bayer.fid_pol = VPFE_PINPOL_POSITIVE;
+ isif->isif_cfg.bayer.vd_pol = VPFE_PINPOL_POSITIVE;
+ isif->isif_cfg.bayer.hd_pol = VPFE_PINPOL_POSITIVE;
+ isif->isif_cfg.bayer.cfa_pat = ISIF_CFA_PAT_MOSAIC;
+ isif->isif_cfg.bayer.data_msb = ISIF_BIT_MSB_11;
+ isif->isif_cfg.data_pack = ISIF_PACK_8BIT;
+}
+/*
+ * vpfe_isif_init() - Initialize V4L2 subdev and media entity
+ * @isif: VPFE isif module
+ * @pdev: Pointer to platform device structure.
+ * Return 0 on success and a negative error code on failure.
+ */
+int vpfe_isif_init(struct vpfe_isif_device *isif, struct platform_device *pdev)
+{
+ struct v4l2_subdev *sd = &isif->subdev;
+ struct media_pad *pads = &isif->pads[0];
+ struct media_entity *me = &sd->entity;
+ static resource_size_t res_len;
+ struct resource *res;
+ void *__iomem addr;
+ int status;
+ int i = 0;
+
+ /* Get the ISIF base address, linearization table0 and table1 addr. */
+ while (i < 3) {
+ res = platform_get_resource(pdev, IORESOURCE_MEM, i);
+ if (!res) {
+ status = -ENOENT;
+ goto fail_nobase_res;
+ }
+ res_len = res->end - res->start + 1;
+ res = request_mem_region(res->start, res_len, res->name);
+ if (!res) {
+ status = -EBUSY;
+ goto fail_nobase_res;
+ }
+ addr = ioremap_nocache(res->start, res_len);
+ if (!addr) {
+ status = -EBUSY;
+ goto fail_base_iomap;
+ }
+ switch (i) {
+ case 0:
+ /* ISIF base address */
+ isif->isif_cfg.base_addr = addr;
+ break;
+ case 1:
+ /* ISIF linear tbl0 address */
+ isif->isif_cfg.linear_tbl0_addr = addr;
+ break;
+ default:
+ /* ISIF linear tbl0 address */
+ isif->isif_cfg.linear_tbl1_addr = addr;
+ break;
+ }
+ i++;
+ }
+ davinci_cfg_reg(DM365_VIN_CAM_WEN);
+ davinci_cfg_reg(DM365_VIN_CAM_VD);
+ davinci_cfg_reg(DM365_VIN_CAM_HD);
+ davinci_cfg_reg(DM365_VIN_YIN4_7_EN);
+ davinci_cfg_reg(DM365_VIN_YIN0_3_EN);
+
+ /* queue ops */
+ isif->video_out.ops = &isif_video_ops;
+ v4l2_subdev_init(sd, &isif_v4l2_ops);
+ sd->internal_ops = &isif_v4l2_internal_ops;
+ strlcpy(sd->name, "DAVINCI ISIF", sizeof(sd->name));
+ sd->grp_id = 1 << 16; /* group ID for davinci subdevs */
+ v4l2_set_subdevdata(sd, isif);
+ sd->flags |= V4L2_SUBDEV_FL_HAS_EVENTS | V4L2_SUBDEV_FL_HAS_DEVNODE;
+ pads[ISIF_PAD_SINK].flags = MEDIA_PAD_FL_SINK;
+ pads[ISIF_PAD_SOURCE].flags = MEDIA_PAD_FL_SOURCE;
+
+ isif->input = ISIF_INPUT_NONE;
+ isif->output = ISIF_OUTPUT_NONE;
+ me->ops = &isif_media_ops;
+ status = media_entity_init(me, ISIF_PADS_NUM, pads, 0);
+ if (status)
+ goto isif_fail;
+ isif->video_out.type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
+ status = vpfe_video_init(&isif->video_out, "ISIF");
+ if (status) {
+ pr_err("Failed to init isif-out video device\n");
+ goto isif_fail;
+ }
+ v4l2_ctrl_handler_init(&isif->ctrls, 6);
+ v4l2_ctrl_new_custom(&isif->ctrls, &vpfe_isif_crgain, NULL);
+ v4l2_ctrl_new_custom(&isif->ctrls, &vpfe_isif_cgrgain, NULL);
+ v4l2_ctrl_new_custom(&isif->ctrls, &vpfe_isif_cgbgain, NULL);
+ v4l2_ctrl_new_custom(&isif->ctrls, &vpfe_isif_cbgain, NULL);
+ v4l2_ctrl_new_custom(&isif->ctrls, &vpfe_isif_gain_offset, NULL);
+ v4l2_ctrl_new_custom(&isif->ctrls, &vpfe_isif_dpcm_pred, NULL);
+
+ v4l2_ctrl_handler_setup(&isif->ctrls);
+ sd->ctrl_handler = &isif->ctrls;
+ isif_config_defaults(isif);
+ return 0;
+fail_base_iomap:
+ release_mem_region(res->start, res_len);
+ i--;
+fail_nobase_res:
+ if (isif->isif_cfg.base_addr)
+ iounmap(isif->isif_cfg.base_addr);
+ if (isif->isif_cfg.linear_tbl0_addr)
+ iounmap(isif->isif_cfg.linear_tbl0_addr);
+
+ while (i >= 0) {
+ res = platform_get_resource(pdev, IORESOURCE_MEM, i);
+ release_mem_region(res->start, res_len);
+ i--;
+ }
+ return status;
+isif_fail:
+ v4l2_ctrl_handler_free(&isif->ctrls);
+ isif_remove(isif, pdev);
+ return status;
+}
+
+/*
+ * vpfe_isif_cleanup - isif module cleanup
+ * @isif: pointer to isif subdevice
+ * @dev: pointer to platform device structure
+ */
+void
+vpfe_isif_cleanup(struct vpfe_isif_device *isif, struct platform_device *pdev)
+{
+ isif_remove(isif, pdev);
+}
diff --git a/drivers/staging/media/davinci_vpfe/dm365_isif.h b/drivers/staging/media/davinci_vpfe/dm365_isif.h
new file mode 100644
index 000000000000..473fd2cfe350
--- /dev/null
+++ b/drivers/staging/media/davinci_vpfe/dm365_isif.h
@@ -0,0 +1,203 @@
+/*
+ * Copyright (C) 2012 Texas Instruments Inc
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation version 2.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ *
+ * Contributors:
+ * Manjunath Hadli <manjunath.hadli@ti.com>
+ * Prabhakar Lad <prabhakar.lad@ti.com>
+ */
+
+#ifndef _DAVINCI_VPFE_DM365_ISIF_H
+#define _DAVINCI_VPFE_DM365_ISIF_H
+
+#include <linux/platform_device.h>
+
+#include <mach/mux.h>
+
+#include <media/davinci/vpfe_types.h>
+#include <media/v4l2-ctrls.h>
+#include <media/v4l2-device.h>
+
+#include "davinci_vpfe_user.h"
+#include "dm365_isif_regs.h"
+#include "vpfe_video.h"
+
+#define ISIF_CULLING_HCAPT_ODD 0xff
+#define ISIF_CULLING_HCAPT_EVEN 0xff
+#define ISIF_CULLING_VCAPT 0xff
+
+#define ISIF_CADU_BITS 0x07ff
+#define ISIF_CADL_BITS 0x0ffff
+
+enum isif_pixfmt {
+ ISIF_PIXFMT_RAW = 0,
+ ISIF_PIXFMT_YCBCR_16BIT = 1,
+ ISIF_PIXFMT_YCBCR_8BIT = 2,
+};
+
+enum isif_frmfmt {
+ ISIF_FRMFMT_PROGRESSIVE = 0,
+ ISIF_FRMFMT_INTERLACED = 1,
+};
+
+/* PIXEL ORDER IN MEMORY from LSB to MSB */
+/* only applicable for 8-bit input mode */
+enum isif_pixorder {
+ ISIF_PIXORDER_YCBYCR = 0,
+ ISIF_PIXORDER_CBYCRY = 1,
+};
+
+enum isif_buftype {
+ ISIF_BUFTYPE_FLD_INTERLEAVED = 0,
+ ISIF_BUFTYPE_FLD_SEPARATED = 1,
+};
+
+struct isif_ycbcr_config {
+ /* v4l2 pixel format */
+ unsigned long v4l2_pix_fmt;
+ /* isif pixel format */
+ enum isif_pixfmt pix_fmt;
+ /* isif frame format */
+ enum isif_frmfmt frm_fmt;
+ /* isif crop window */
+ struct v4l2_rect win;
+ /* field polarity */
+ enum vpfe_pin_pol fid_pol;
+ /* interface VD polarity */
+ enum vpfe_pin_pol vd_pol;
+ /* interface HD polarity */
+ enum vpfe_pin_pol hd_pol;
+ /* isif pix order. Only used for ycbcr capture */
+ enum isif_pixorder pix_order;
+ /* isif buffer type. Only used for ycbcr capture */
+ enum isif_buftype buf_type;
+};
+
+enum isif_cfa_pattern {
+ ISIF_CFA_PAT_MOSAIC = 0,
+ ISIF_CFA_PAT_STRIPE = 1,
+};
+
+enum isif_data_msb {
+ /* MSB b15 */
+ ISIF_BIT_MSB_15 = 0,
+ /* MSB b14 */
+ ISIF_BIT_MSB_14 = 1,
+ /* MSB b13 */
+ ISIF_BIT_MSB_13 = 2,
+ /* MSB b12 */
+ ISIF_BIT_MSB_12 = 3,
+ /* MSB b11 */
+ ISIF_BIT_MSB_11 = 4,
+ /* MSB b10 */
+ ISIF_BIT_MSB_10 = 5,
+ /* MSB b9 */
+ ISIF_BIT_MSB_9 = 6,
+ /* MSB b8 */
+ ISIF_BIT_MSB_8 = 7,
+ /* MSB b7 */
+ ISIF_BIT_MSB_7 = 8,
+};
+
+struct isif_params_raw {
+ /* v4l2 pixel format */
+ unsigned long v4l2_pix_fmt;
+ /* isif pixel format */
+ enum isif_pixfmt pix_fmt;
+ /* isif frame format */
+ enum isif_frmfmt frm_fmt;
+ /* video window */
+ struct v4l2_rect win;
+ /* field polarity */
+ enum vpfe_pin_pol fid_pol;
+ /* interface VD polarity */
+ enum vpfe_pin_pol vd_pol;
+ /* interface HD polarity */
+ enum vpfe_pin_pol hd_pol;
+ /* buffer type. Applicable for interlaced mode */
+ enum isif_buftype buf_type;
+ /* cfa pattern */
+ enum isif_cfa_pattern cfa_pat;
+ /* Data MSB position */
+ enum isif_data_msb data_msb;
+ /* Enable horizontal flip */
+ unsigned char horz_flip_en;
+ /* Enable image invert vertically */
+ unsigned char image_invert_en;
+ unsigned char dpcm_predictor;
+ struct vpfe_isif_raw_config config_params;
+};
+
+enum isif_data_pack {
+ ISIF_PACK_16BIT = 0,
+ ISIF_PACK_12BIT = 1,
+ ISIF_PACK_8BIT = 2,
+};
+
+struct isif_gain_values {
+ unsigned int cr_gain;
+ unsigned int cgr_gain;
+ unsigned int cgb_gain;
+ unsigned int cb_gain;
+ unsigned int offset;
+};
+
+struct isif_oper_config {
+ struct isif_ycbcr_config ycbcr;
+ struct isif_params_raw bayer;
+ enum isif_data_pack data_pack;
+ struct isif_gain_values isif_gain_params;
+ void *__iomem base_addr;
+ void *__iomem linear_tbl0_addr;
+ void *__iomem linear_tbl1_addr;
+};
+
+#define ISIF_PAD_SINK 0
+#define ISIF_PAD_SOURCE 1
+
+#define ISIF_PADS_NUM 2
+
+enum isif_input_entity {
+ ISIF_INPUT_NONE = 0,
+ ISIF_INPUT_PARALLEL = 1,
+};
+
+#define ISIF_OUTPUT_NONE (0)
+#define ISIF_OUTPUT_MEMORY (1 << 0)
+#define ISIF_OUTPUT_IPIPEIF (1 << 1)
+
+struct vpfe_isif_device {
+ struct v4l2_subdev subdev;
+ struct media_pad pads[ISIF_PADS_NUM];
+ struct v4l2_mbus_framefmt formats[ISIF_PADS_NUM];
+ enum isif_input_entity input;
+ unsigned int output;
+ struct v4l2_ctrl_handler ctrls;
+ struct v4l2_rect crop;
+ struct isif_oper_config isif_cfg;
+ struct vpfe_video_device video_out;
+};
+
+enum v4l2_field vpfe_isif_get_fid(struct vpfe_device *vpfe_dev);
+void vpfe_isif_unregister_entities(struct vpfe_isif_device *isif);
+int vpfe_isif_register_entities(struct vpfe_isif_device *isif,
+ struct v4l2_device *dev);
+int vpfe_isif_init(struct vpfe_isif_device *isif, struct platform_device *pdev);
+void vpfe_isif_cleanup(struct vpfe_isif_device *vpfe_isif,
+ struct platform_device *pdev);
+void vpfe_isif_vidint1_isr(struct vpfe_isif_device *isif);
+void vpfe_isif_buffer_isr(struct vpfe_isif_device *isif);
+
+#endif /* _DAVINCI_VPFE_DM365_ISIF_H */
diff --git a/drivers/staging/media/davinci_vpfe/dm365_isif_regs.h b/drivers/staging/media/davinci_vpfe/dm365_isif_regs.h
new file mode 100644
index 000000000000..8aceabb43f8e
--- /dev/null
+++ b/drivers/staging/media/davinci_vpfe/dm365_isif_regs.h
@@ -0,0 +1,294 @@
+/*
+ * Copyright (C) 2012 Texas Instruments Inc
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation version 2.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ *
+ * Contributors:
+ * Manjunath Hadli <manjunath.hadli@ti.com>
+ * Prabhakar Lad <prabhakar.lad@ti.com>
+ */
+
+#ifndef _DAVINCI_VPFE_DM365_ISIF_REGS_H
+#define _DAVINCI_VPFE_DM365_ISIF_REGS_H
+
+/* ISIF registers relative offsets */
+#define SYNCEN 0x00
+#define MODESET 0x04
+#define HDW 0x08
+#define VDW 0x0c
+#define PPLN 0x10
+#define LPFR 0x14
+#define SPH 0x18
+#define LNH 0x1c
+#define SLV0 0x20
+#define SLV1 0x24
+#define LNV 0x28
+#define CULH 0x2c
+#define CULV 0x30
+#define HSIZE 0x34
+#define SDOFST 0x38
+#define CADU 0x3c
+#define CADL 0x40
+#define LINCFG0 0x44
+#define LINCFG1 0x48
+#define CCOLP 0x4c
+#define CRGAIN 0x50
+#define CGRGAIN 0x54
+#define CGBGAIN 0x58
+#define CBGAIN 0x5c
+#define COFSTA 0x60
+#define FLSHCFG0 0x64
+#define FLSHCFG1 0x68
+#define FLSHCFG2 0x6c
+#define VDINT0 0x70
+#define VDINT1 0x74
+#define VDINT2 0x78
+#define MISC 0x7c
+#define CGAMMAWD 0x80
+#define REC656IF 0x84
+#define CCDCFG 0x88
+/*****************************************************
+* Defect Correction registers
+*****************************************************/
+#define DFCCTL 0x8c
+#define VDFSATLV 0x90
+#define DFCMEMCTL 0x94
+#define DFCMEM0 0x98
+#define DFCMEM1 0x9c
+#define DFCMEM2 0xa0
+#define DFCMEM3 0xa4
+#define DFCMEM4 0xa8
+/****************************************************
+* Black Clamp registers
+****************************************************/
+#define CLAMPCFG 0xac
+#define CLDCOFST 0xb0
+#define CLSV 0xb4
+#define CLHWIN0 0xb8
+#define CLHWIN1 0xbc
+#define CLHWIN2 0xc0
+#define CLVRV 0xc4
+#define CLVWIN0 0xc8
+#define CLVWIN1 0xcc
+#define CLVWIN2 0xd0
+#define CLVWIN3 0xd4
+/****************************************************
+* Lense Shading Correction
+****************************************************/
+#define DATAHOFST 0xd8
+#define DATAVOFST 0xdc
+#define LSCHVAL 0xe0
+#define LSCVVAL 0xe4
+#define TWODLSCCFG 0xe8
+#define TWODLSCOFST 0xec
+#define TWODLSCINI 0xf0
+#define TWODLSCGRBU 0xf4
+#define TWODLSCGRBL 0xf8
+#define TWODLSCGROF 0xfc
+#define TWODLSCORBU 0x100
+#define TWODLSCORBL 0x104
+#define TWODLSCOROF 0x108
+#define TWODLSCIRQEN 0x10c
+#define TWODLSCIRQST 0x110
+/****************************************************
+* Data formatter
+****************************************************/
+#define FMTCFG 0x114
+#define FMTPLEN 0x118
+#define FMTSPH 0x11c
+#define FMTLNH 0x120
+#define FMTSLV 0x124
+#define FMTLNV 0x128
+#define FMTRLEN 0x12c
+#define FMTHCNT 0x130
+#define FMTAPTR_BASE 0x134
+/* Below macro for addresses FMTAPTR0 - FMTAPTR15 */
+#define FMTAPTR(i) (FMTAPTR_BASE + (i * 4))
+#define FMTPGMVF0 0x174
+#define FMTPGMVF1 0x178
+#define FMTPGMAPU0 0x17c
+#define FMTPGMAPU1 0x180
+#define FMTPGMAPS0 0x184
+#define FMTPGMAPS1 0x188
+#define FMTPGMAPS2 0x18c
+#define FMTPGMAPS3 0x190
+#define FMTPGMAPS4 0x194
+#define FMTPGMAPS5 0x198
+#define FMTPGMAPS6 0x19c
+#define FMTPGMAPS7 0x1a0
+/************************************************
+* Color Space Converter
+************************************************/
+#define CSCCTL 0x1a4
+#define CSCM0 0x1a8
+#define CSCM1 0x1ac
+#define CSCM2 0x1b0
+#define CSCM3 0x1b4
+#define CSCM4 0x1b8
+#define CSCM5 0x1bc
+#define CSCM6 0x1c0
+#define CSCM7 0x1c4
+#define OBWIN0 0x1c8
+#define OBWIN1 0x1cc
+#define OBWIN2 0x1d0
+#define OBWIN3 0x1d4
+#define OBVAL0 0x1d8
+#define OBVAL1 0x1dc
+#define OBVAL2 0x1e0
+#define OBVAL3 0x1e4
+#define OBVAL4 0x1e8
+#define OBVAL5 0x1ec
+#define OBVAL6 0x1f0
+#define OBVAL7 0x1f4
+#define CLKCTL 0x1f8
+
+/* Masks & Shifts below */
+#define START_PX_HOR_MASK 0x7fff
+#define NUM_PX_HOR_MASK 0x7fff
+#define START_VER_ONE_MASK 0x7fff
+#define START_VER_TWO_MASK 0x7fff
+#define NUM_LINES_VER 0x7fff
+
+/* gain - offset masks */
+#define OFFSET_MASK 0xfff
+#define GAIN_SDRAM_EN_SHIFT 12
+#define GAIN_IPIPE_EN_SHIFT 13
+#define GAIN_H3A_EN_SHIFT 14
+#define OFST_SDRAM_EN_SHIFT 8
+#define OFST_IPIPE_EN_SHIFT 9
+#define OFST_H3A_EN_SHIFT 10
+#define GAIN_OFFSET_EN_MASK 0x7700
+
+/* Culling */
+#define CULL_PAT_EVEN_LINE_SHIFT 8
+
+/* CCDCFG register */
+#define ISIF_YCINSWP_RAW (0x00 << 4)
+#define ISIF_YCINSWP_YCBCR (0x01 << 4)
+#define ISIF_CCDCFG_FIDMD_LATCH_VSYNC (0x00 << 6)
+#define ISIF_CCDCFG_WENLOG_AND (0x00 << 8)
+#define ISIF_CCDCFG_TRGSEL_WEN (0x00 << 9)
+#define ISIF_CCDCFG_EXTRG_DISABLE (0x00 << 10)
+#define ISIF_LATCH_ON_VSYNC_DISABLE (0x01 << 15)
+#define ISIF_LATCH_ON_VSYNC_ENABLE (0x00 << 15)
+#define ISIF_DATA_PACK_MASK 0x03
+#define ISIF_PIX_ORDER_SHIFT 11
+#define ISIF_PIX_ORDER_MASK 0x01
+#define ISIF_BW656_ENABLE (0x01 << 5)
+
+/* MODESET registers */
+#define ISIF_VDHDOUT_INPUT (0x00 << 0)
+#define ISIF_INPUT_MASK 0x03
+#define ISIF_INPUT_SHIFT 12
+#define ISIF_FID_POL_MASK 0x01
+#define ISIF_FID_POL_SHIFT 4
+#define ISIF_HD_POL_MASK 0x01
+#define ISIF_HD_POL_SHIFT 3
+#define ISIF_VD_POL_MASK 0x01
+#define ISIF_VD_POL_SHIFT 2
+#define ISIF_DATAPOL_NORMAL 0x00
+#define ISIF_DATAPOL_MASK 0x01
+#define ISIF_DATAPOL_SHIFT 6
+#define ISIF_EXWEN_DISABLE 0x00
+#define ISIF_EXWEN_MASK 0x01
+#define ISIF_EXWEN_SHIFT 5
+#define ISIF_FRM_FMT_MASK 0x01
+#define ISIF_FRM_FMT_SHIFT 7
+#define ISIF_DATASFT_MASK 0x07
+#define ISIF_DATASFT_SHIFT 8
+#define ISIF_LPF_SHIFT 14
+#define ISIF_LPF_MASK 0x1
+
+/* GAMMAWD registers */
+#define ISIF_ALAW_GAMA_WD_MASK 0xf
+#define ISIF_ALAW_GAMA_WD_SHIFT 1
+#define ISIF_ALAW_ENABLE 0x01
+#define ISIF_GAMMAWD_CFA_MASK 0x01
+#define ISIF_GAMMAWD_CFA_SHIFT 5
+
+/* HSIZE registers */
+#define ISIF_HSIZE_FLIP_MASK 0x01
+#define ISIF_HSIZE_FLIP_SHIFT 12
+#define ISIF_LINEOFST_MASK 0xfff
+
+/* MISC registers */
+#define ISIF_DPCM_EN_SHIFT 12
+#define ISIF_DPCM_PREDICTOR_SHIFT 13
+#define ISIF_DPCM_PREDICTOR_MASK 1
+
+/* Black clamp related */
+#define ISIF_BC_DCOFFSET_MASK 0x1fff
+#define ISIF_BC_MODE_COLOR_MASK 1
+#define ISIF_BC_MODE_COLOR_SHIFT 4
+#define ISIF_HORZ_BC_MODE_MASK 3
+#define ISIF_HORZ_BC_MODE_SHIFT 1
+#define ISIF_HORZ_BC_WIN_COUNT_MASK 0x1f
+#define ISIF_HORZ_BC_WIN_SEL_SHIFT 5
+#define ISIF_HORZ_BC_PIX_LIMIT_SHIFT 6
+#define ISIF_HORZ_BC_WIN_H_SIZE_MASK 3
+#define ISIF_HORZ_BC_WIN_H_SIZE_SHIFT 8
+#define ISIF_HORZ_BC_WIN_V_SIZE_MASK 3
+#define ISIF_HORZ_BC_WIN_V_SIZE_SHIFT 12
+#define ISIF_HORZ_BC_WIN_START_H_MASK 0x1fff
+#define ISIF_HORZ_BC_WIN_START_V_MASK 0x1fff
+#define ISIF_VERT_BC_OB_H_SZ_MASK 7
+#define ISIF_VERT_BC_RST_VAL_SEL_MASK 3
+#define ISIF_VERT_BC_RST_VAL_SEL_SHIFT 4
+#define ISIF_VERT_BC_LINE_AVE_COEF_SHIFT 8
+#define ISIF_VERT_BC_OB_START_HORZ_MASK 0x1fff
+#define ISIF_VERT_BC_OB_START_VERT_MASK 0x1fff
+#define ISIF_VERT_BC_OB_VERT_SZ_MASK 0x1fff
+#define ISIF_VERT_BC_RST_VAL_MASK 0xfff
+#define ISIF_BC_VERT_START_SUB_V_MASK 0x1fff
+
+/* VDFC registers */
+#define ISIF_VDFC_EN_SHIFT 4
+#define ISIF_VDFC_CORR_MOD_MASK 3
+#define ISIF_VDFC_CORR_MOD_SHIFT 5
+#define ISIF_VDFC_CORR_WHOLE_LN_SHIFT 7
+#define ISIF_VDFC_LEVEL_SHFT_MASK 7
+#define ISIF_VDFC_LEVEL_SHFT_SHIFT 8
+#define ISIF_VDFC_SAT_LEVEL_MASK 0xfff
+#define ISIF_VDFC_POS_MASK 0x1fff
+#define ISIF_DFCMEMCTL_DFCMARST_SHIFT 2
+
+/* CSC registers */
+#define ISIF_CSC_COEF_INTEG_MASK 7
+#define ISIF_CSC_COEF_DECIMAL_MASK 0x1f
+#define ISIF_CSC_COEF_INTEG_SHIFT 5
+#define ISIF_CSCM_MSB_SHIFT 8
+#define ISIF_DF_CSC_SPH_MASK 0x1fff
+#define ISIF_DF_CSC_LNH_MASK 0x1fff
+#define ISIF_DF_CSC_SLV_MASK 0x1fff
+#define ISIF_DF_CSC_LNV_MASK 0x1fff
+#define ISIF_DF_NUMLINES 0x7fff
+#define ISIF_DF_NUMPIX 0x1fff
+
+/* Offsets for LSC/DFC/Gain */
+#define ISIF_DATA_H_OFFSET_MASK 0x1fff
+#define ISIF_DATA_V_OFFSET_MASK 0x1fff
+
+/* Linearization */
+#define ISIF_LIN_CORRSFT_MASK 7
+#define ISIF_LIN_CORRSFT_SHIFT 4
+#define ISIF_LIN_SCALE_FACT_INTEG_SHIFT 10
+#define ISIF_LIN_SCALE_FACT_DECIMAL_MASK 0x3ff
+#define ISIF_LIN_ENTRY_MASK 0x3ff
+
+/* masks and shifts*/
+#define ISIF_SYNCEN_VDHDEN_MASK (1 << 0)
+#define ISIF_SYNCEN_WEN_MASK (1 << 1)
+#define ISIF_SYNCEN_WEN_SHIFT 1
+
+#endif /* _DAVINCI_VPFE_DM365_ISIF_REGS_H */
diff --git a/drivers/staging/media/davinci_vpfe/dm365_resizer.c b/drivers/staging/media/davinci_vpfe/dm365_resizer.c
new file mode 100644
index 000000000000..9cb0262b9b33
--- /dev/null
+++ b/drivers/staging/media/davinci_vpfe/dm365_resizer.c
@@ -0,0 +1,1999 @@
+/*
+ * Copyright (C) 2012 Texas Instruments Inc
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation version 2.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ *
+ * Contributors:
+ * Manjunath Hadli <manjunath.hadli@ti.com>
+ * Prabhakar Lad <prabhakar.lad@ti.com>
+ *
+ *
+ * Resizer allows upscaling or downscaling a image to a desired
+ * resolution. There are 2 resizer modules. both operating on the
+ * same input image, but can have different output resolution.
+ */
+
+#include "dm365_ipipe_hw.h"
+#include "dm365_resizer.h"
+
+#define MIN_IN_WIDTH 32
+#define MIN_IN_HEIGHT 32
+#define MAX_IN_WIDTH 4095
+#define MAX_IN_HEIGHT 4095
+#define MIN_OUT_WIDTH 16
+#define MIN_OUT_HEIGHT 2
+
+static const unsigned int resizer_input_formats[] = {
+ V4L2_MBUS_FMT_UYVY8_2X8,
+ V4L2_MBUS_FMT_Y8_1X8,
+ V4L2_MBUS_FMT_UV8_1X8,
+ V4L2_MBUS_FMT_SGRBG12_1X12,
+};
+
+static const unsigned int resizer_output_formats[] = {
+ V4L2_MBUS_FMT_UYVY8_2X8,
+ V4L2_MBUS_FMT_Y8_1X8,
+ V4L2_MBUS_FMT_UV8_1X8,
+ V4L2_MBUS_FMT_YDYUYDYV8_1X16,
+ V4L2_MBUS_FMT_SGRBG12_1X12,
+};
+
+/* resizer_calculate_line_length() - This function calculates the line length of
+ * various image planes at the input and
+ * output.
+ */
+static void
+resizer_calculate_line_length(enum v4l2_mbus_pixelcode pix, int width,
+ int height, int *line_len, int *line_len_c)
+{
+ *line_len = 0;
+ *line_len_c = 0;
+
+ if (pix == V4L2_MBUS_FMT_UYVY8_2X8 ||
+ pix == V4L2_MBUS_FMT_SGRBG12_1X12) {
+ *line_len = width << 1;
+ } else if (pix == V4L2_MBUS_FMT_Y8_1X8 ||
+ pix == V4L2_MBUS_FMT_UV8_1X8) {
+ *line_len = width;
+ *line_len_c = width;
+ } else {
+ /* YUV 420 */
+ /* round width to upper 32 byte boundary */
+ *line_len = width;
+ *line_len_c = width;
+ }
+ /* adjust the line len to be a multiple of 32 */
+ *line_len += 31;
+ *line_len &= ~0x1f;
+ *line_len_c += 31;
+ *line_len_c &= ~0x1f;
+}
+
+static inline int
+resizer_validate_output_image_format(struct device *dev,
+ struct v4l2_mbus_framefmt *format,
+ int *in_line_len, int *in_line_len_c)
+{
+ if (format->code != V4L2_MBUS_FMT_UYVY8_2X8 &&
+ format->code != V4L2_MBUS_FMT_Y8_1X8 &&
+ format->code != V4L2_MBUS_FMT_UV8_1X8 &&
+ format->code != V4L2_MBUS_FMT_YDYUYDYV8_1X16 &&
+ format->code != V4L2_MBUS_FMT_SGRBG12_1X12) {
+ dev_err(dev, "Invalid Mbus format, %d\n", format->code);
+ return -EINVAL;
+ }
+ if (!format->width || !format->height) {
+ dev_err(dev, "invalid width or height\n");
+ return -EINVAL;
+ }
+ resizer_calculate_line_length(format->code, format->width,
+ format->height, in_line_len, in_line_len_c);
+ return 0;
+}
+
+static void
+resizer_configure_passthru(struct vpfe_resizer_device *resizer, int bypass)
+{
+ struct resizer_params *param = &resizer->config;
+
+ param->rsz_rsc_param[RSZ_A].cen = DISABLE;
+ param->rsz_rsc_param[RSZ_A].yen = DISABLE;
+ param->rsz_rsc_param[RSZ_A].v_phs_y = 0;
+ param->rsz_rsc_param[RSZ_A].v_phs_c = 0;
+ param->rsz_rsc_param[RSZ_A].v_dif = 256;
+ param->rsz_rsc_param[RSZ_A].v_lpf_int_y = 0;
+ param->rsz_rsc_param[RSZ_A].v_lpf_int_c = 0;
+ param->rsz_rsc_param[RSZ_A].h_phs = 0;
+ param->rsz_rsc_param[RSZ_A].h_dif = 256;
+ param->rsz_rsc_param[RSZ_A].h_lpf_int_y = 0;
+ param->rsz_rsc_param[RSZ_A].h_lpf_int_c = 0;
+ param->rsz_rsc_param[RSZ_A].dscale_en = DISABLE;
+ param->rsz2rgb[RSZ_A].rgb_en = DISABLE;
+ param->rsz_en[RSZ_A] = ENABLE;
+ param->rsz_en[RSZ_B] = DISABLE;
+ if (bypass) {
+ param->rsz_rsc_param[RSZ_A].i_vps = 0;
+ param->rsz_rsc_param[RSZ_A].i_hps = 0;
+ /* Raw Bypass */
+ param->rsz_common.passthrough = BYPASS_ON;
+ }
+}
+
+static void
+configure_resizer_out_params(struct vpfe_resizer_device *resizer, int index,
+ void *output_spec, unsigned char partial,
+ unsigned flag)
+{
+ struct resizer_params *param = &resizer->config;
+ struct v4l2_mbus_framefmt *outformat;
+ struct vpfe_rsz_output_spec *output;
+
+ if (index == RSZ_A &&
+ resizer->resizer_a.output == RESIZER_OUTPUT_NONE) {
+ param->rsz_en[index] = DISABLE;
+ return;
+ }
+ if (index == RSZ_B &&
+ resizer->resizer_b.output == RESIZER_OUTPUT_NONE) {
+ param->rsz_en[index] = DISABLE;
+ return;
+ }
+ output = (struct vpfe_rsz_output_spec *)output_spec;
+ param->rsz_en[index] = ENABLE;
+ if (partial) {
+ param->rsz_rsc_param[index].h_flip = output->h_flip;
+ param->rsz_rsc_param[index].v_flip = output->v_flip;
+ param->rsz_rsc_param[index].v_typ_y = output->v_typ_y;
+ param->rsz_rsc_param[index].v_typ_c = output->v_typ_c;
+ param->rsz_rsc_param[index].v_lpf_int_y =
+ output->v_lpf_int_y;
+ param->rsz_rsc_param[index].v_lpf_int_c =
+ output->v_lpf_int_c;
+ param->rsz_rsc_param[index].h_typ_y = output->h_typ_y;
+ param->rsz_rsc_param[index].h_typ_c = output->h_typ_c;
+ param->rsz_rsc_param[index].h_lpf_int_y =
+ output->h_lpf_int_y;
+ param->rsz_rsc_param[index].h_lpf_int_c =
+ output->h_lpf_int_c;
+ param->rsz_rsc_param[index].dscale_en =
+ output->en_down_scale;
+ param->rsz_rsc_param[index].h_dscale_ave_sz =
+ output->h_dscale_ave_sz;
+ param->rsz_rsc_param[index].v_dscale_ave_sz =
+ output->v_dscale_ave_sz;
+ param->ext_mem_param[index].user_y_ofst =
+ (output->user_y_ofst + 31) & ~0x1f;
+ param->ext_mem_param[index].user_c_ofst =
+ (output->user_c_ofst + 31) & ~0x1f;
+ return;
+ }
+
+ if (index == RSZ_A)
+ outformat = &resizer->resizer_a.formats[RESIZER_PAD_SOURCE];
+ else
+ outformat = &resizer->resizer_b.formats[RESIZER_PAD_SOURCE];
+ param->rsz_rsc_param[index].o_vsz = outformat->height - 1;
+ param->rsz_rsc_param[index].o_hsz = outformat->width - 1;
+ param->ext_mem_param[index].rsz_sdr_ptr_s_y = output->vst_y;
+ param->ext_mem_param[index].rsz_sdr_ptr_e_y = outformat->height;
+ param->ext_mem_param[index].rsz_sdr_ptr_s_c = output->vst_c;
+ param->ext_mem_param[index].rsz_sdr_ptr_e_c = outformat->height;
+
+ if (!flag)
+ return;
+ /* update common parameters */
+ param->rsz_rsc_param[index].h_flip = output->h_flip;
+ param->rsz_rsc_param[index].v_flip = output->v_flip;
+ param->rsz_rsc_param[index].v_typ_y = output->v_typ_y;
+ param->rsz_rsc_param[index].v_typ_c = output->v_typ_c;
+ param->rsz_rsc_param[index].v_lpf_int_y = output->v_lpf_int_y;
+ param->rsz_rsc_param[index].v_lpf_int_c = output->v_lpf_int_c;
+ param->rsz_rsc_param[index].h_typ_y = output->h_typ_y;
+ param->rsz_rsc_param[index].h_typ_c = output->h_typ_c;
+ param->rsz_rsc_param[index].h_lpf_int_y = output->h_lpf_int_y;
+ param->rsz_rsc_param[index].h_lpf_int_c = output->h_lpf_int_c;
+ param->rsz_rsc_param[index].dscale_en = output->en_down_scale;
+ param->rsz_rsc_param[index].h_dscale_ave_sz = output->h_dscale_ave_sz;
+ param->rsz_rsc_param[index].v_dscale_ave_sz = output->h_dscale_ave_sz;
+ param->ext_mem_param[index].user_y_ofst =
+ (output->user_y_ofst + 31) & ~0x1f;
+ param->ext_mem_param[index].user_c_ofst =
+ (output->user_c_ofst + 31) & ~0x1f;
+}
+
+/*
+ * resizer_calculate_resize_ratios() - Calculates resize ratio for resizer
+ * A or B. This is called after setting
+ * the input size or output size.
+ * @resizer: Pointer to VPFE resizer subdevice.
+ * @index: index RSZ_A-resizer-A RSZ_B-resizer-B.
+ */
+void
+resizer_calculate_resize_ratios(struct vpfe_resizer_device *resizer, int index)
+{
+ struct resizer_params *param = &resizer->config;
+ struct v4l2_mbus_framefmt *informat, *outformat;
+
+ informat = &resizer->crop_resizer.formats[RESIZER_CROP_PAD_SINK];
+
+ if (index == RSZ_A)
+ outformat = &resizer->resizer_a.formats[RESIZER_PAD_SOURCE];
+ else
+ outformat = &resizer->resizer_b.formats[RESIZER_PAD_SOURCE];
+
+ if (outformat->field != V4L2_FIELD_INTERLACED)
+ param->rsz_rsc_param[index].v_dif =
+ ((informat->height) * 256) / (outformat->height);
+ else
+ param->rsz_rsc_param[index].v_dif =
+ ((informat->height >> 1) * 256) / (outformat->height);
+ param->rsz_rsc_param[index].h_dif =
+ ((informat->width) * 256) / (outformat->width);
+}
+
+void
+static resizer_enable_422_420_conversion(struct resizer_params *param,
+ int index, bool en)
+{
+ param->rsz_rsc_param[index].cen = en;
+ param->rsz_rsc_param[index].yen = en;
+}
+
+/* resizer_calculate_sdram_offsets() - This function calculates the offsets from
+ * start of buffer for the C plane when
+ * output format is YUV420SP. It also
+ * calculates the offsets from the start of
+ * the buffer when the image is flipped
+ * vertically or horizontally for ycbcr/y/c
+ * planes.
+ * @resizer: Pointer to resizer subdevice.
+ * @index: index RSZ_A-resizer-A RSZ_B-resizer-B.
+ */
+static int
+resizer_calculate_sdram_offsets(struct vpfe_resizer_device *resizer, int index)
+{
+ struct resizer_params *param = &resizer->config;
+ struct v4l2_mbus_framefmt *outformat;
+ int bytesperpixel = 2;
+ int image_height;
+ int image_width;
+ int yuv_420 = 0;
+ int offset = 0;
+
+ if (index == RSZ_A)
+ outformat = &resizer->resizer_a.formats[RESIZER_PAD_SOURCE];
+ else
+ outformat = &resizer->resizer_b.formats[RESIZER_PAD_SOURCE];
+
+ image_height = outformat->height + 1;
+ image_width = outformat->width + 1;
+ param->ext_mem_param[index].c_offset = 0;
+ param->ext_mem_param[index].flip_ofst_y = 0;
+ param->ext_mem_param[index].flip_ofst_c = 0;
+ if (outformat->code == V4L2_MBUS_FMT_YDYUYDYV8_1X16) {
+ /* YUV 420 */
+ yuv_420 = 1;
+ bytesperpixel = 1;
+ }
+
+ if (param->rsz_rsc_param[index].h_flip)
+ /* width * bytesperpixel - 1 */
+ offset = (image_width * bytesperpixel) - 1;
+ if (param->rsz_rsc_param[index].v_flip)
+ offset += (image_height - 1) *
+ param->ext_mem_param[index].rsz_sdr_oft_y;
+ param->ext_mem_param[index].flip_ofst_y = offset;
+ if (!yuv_420)
+ return 0;
+ offset = 0;
+ /* half height for c-plane */
+ if (param->rsz_rsc_param[index].h_flip)
+ /* width * bytesperpixel - 1 */
+ offset = image_width - 1;
+ if (param->rsz_rsc_param[index].v_flip)
+ offset += (((image_height >> 1) - 1) *
+ param->ext_mem_param[index].rsz_sdr_oft_c);
+ param->ext_mem_param[index].flip_ofst_c = offset;
+ param->ext_mem_param[index].c_offset =
+ param->ext_mem_param[index].rsz_sdr_oft_y * image_height;
+ return 0;
+}
+
+int resizer_configure_output_win(struct vpfe_resizer_device *resizer)
+{
+ struct resizer_params *param = &resizer->config;
+ struct vpfe_rsz_output_spec output_specs;
+ struct v4l2_mbus_framefmt *outformat;
+ int line_len_c;
+ int line_len;
+ int ret;
+
+ outformat = &resizer->resizer_a.formats[RESIZER_PAD_SOURCE];
+
+ output_specs.vst_y = param->user_config.vst;
+ if (outformat->code == V4L2_MBUS_FMT_YDYUYDYV8_1X16)
+ output_specs.vst_c = param->user_config.vst;
+
+ configure_resizer_out_params(resizer, RSZ_A, &output_specs, 0, 0);
+ resizer_calculate_line_length(outformat->code,
+ param->rsz_rsc_param[0].o_hsz + 1,
+ param->rsz_rsc_param[0].o_vsz + 1,
+ &line_len, &line_len_c);
+ param->ext_mem_param[0].rsz_sdr_oft_y = line_len;
+ param->ext_mem_param[0].rsz_sdr_oft_c = line_len_c;
+ resizer_calculate_resize_ratios(resizer, RSZ_A);
+ if (param->rsz_en[RSZ_B])
+ resizer_calculate_resize_ratios(resizer, RSZ_B);
+
+ if (outformat->code == V4L2_MBUS_FMT_YDYUYDYV8_1X16)
+ resizer_enable_422_420_conversion(param, RSZ_A, ENABLE);
+ else
+ resizer_enable_422_420_conversion(param, RSZ_A, DISABLE);
+
+ ret = resizer_calculate_sdram_offsets(resizer, RSZ_A);
+ if (!ret && param->rsz_en[RSZ_B])
+ ret = resizer_calculate_sdram_offsets(resizer, RSZ_B);
+
+ if (ret)
+ pr_err("Error in calculating sdram offsets\n");
+ return ret;
+}
+
+static int
+resizer_calculate_down_scale_f_div_param(struct device *dev,
+ int input_width, int output_width,
+ struct resizer_scale_param *param)
+{
+ /* rsz = R, input_width = H, output width = h in the equation */
+ unsigned int two_power;
+ unsigned int upper_h1;
+ unsigned int upper_h2;
+ unsigned int val1;
+ unsigned int val;
+ unsigned int rsz;
+ unsigned int h1;
+ unsigned int h2;
+ unsigned int o;
+ unsigned int n;
+
+ upper_h1 = input_width >> 1;
+ n = param->h_dscale_ave_sz;
+ /* 2 ^ (scale+1) */
+ two_power = 1 << (n + 1);
+ upper_h1 = (upper_h1 >> (n + 1)) << (n + 1);
+ upper_h2 = input_width - upper_h1;
+ if (upper_h2 % two_power) {
+ dev_err(dev, "frame halves to be a multiple of 2 power n+1\n");
+ return -EINVAL;
+ }
+ two_power = 1 << n;
+ rsz = (input_width << 8) / output_width;
+ val = rsz * two_power;
+ val = ((upper_h1 << 8) / val) + 1;
+ if (!(val % 2)) {
+ h1 = val;
+ } else {
+ val = upper_h1 << 8;
+ val >>= n + 1;
+ val -= rsz >> 1;
+ val /= rsz << 1;
+ val <<= 1;
+ val += 2;
+ h1 = val;
+ }
+ o = 10 + (two_power << 2);
+ if (((input_width << 7) / rsz) % 2)
+ o += (((CEIL(rsz, 1024)) << 1) << n);
+ h2 = output_width - h1;
+ /* phi */
+ val = (h1 * rsz) - (((upper_h1 - (o - 10)) / two_power) << 8);
+ /* skip */
+ val1 = ((val - 1024) >> 9) << 1;
+ param->f_div.num_passes = MAX_PASSES;
+ param->f_div.pass[0].o_hsz = h1 - 1;
+ param->f_div.pass[0].i_hps = 0;
+ param->f_div.pass[0].h_phs = 0;
+ param->f_div.pass[0].src_hps = 0;
+ param->f_div.pass[0].src_hsz = upper_h1 + o;
+ param->f_div.pass[1].o_hsz = h2 - 1;
+ param->f_div.pass[1].i_hps = 10 + (val1 * two_power);
+ param->f_div.pass[1].h_phs = (val - (val1 << 8));
+ param->f_div.pass[1].src_hps = upper_h1 - o;
+ param->f_div.pass[1].src_hsz = upper_h2 + o;
+
+ return 0;
+}
+
+static int
+resizer_configure_common_in_params(struct vpfe_resizer_device *resizer)
+{
+ struct vpfe_device *vpfe_dev = to_vpfe_device(resizer);
+ struct resizer_params *param = &resizer->config;
+ struct vpfe_rsz_config_params *user_config;
+ struct v4l2_mbus_framefmt *informat;
+
+ informat = &resizer->crop_resizer.formats[RESIZER_CROP_PAD_SINK];
+ user_config = &resizer->config.user_config;
+ param->rsz_common.vps = param->user_config.vst;
+ param->rsz_common.hps = param->user_config.hst;
+
+ if (vpfe_ipipeif_decimation_enabled(vpfe_dev))
+ param->rsz_common.hsz = (((informat->width - 1) *
+ IPIPEIF_RSZ_CONST) / vpfe_ipipeif_get_rsz(vpfe_dev));
+ else
+ param->rsz_common.hsz = informat->width - 1;
+
+ if (informat->field == V4L2_FIELD_INTERLACED)
+ param->rsz_common.vsz = (informat->height - 1) >> 1;
+ else
+ param->rsz_common.vsz = informat->height - 1;
+
+ param->rsz_common.raw_flip = 0;
+
+ if (resizer->crop_resizer.input == RESIZER_CROP_INPUT_IPIPEIF)
+ param->rsz_common.source = IPIPEIF_DATA;
+ else
+ param->rsz_common.source = IPIPE_DATA;
+
+ switch (informat->code) {
+ case V4L2_MBUS_FMT_UYVY8_2X8:
+ param->rsz_common.src_img_fmt = RSZ_IMG_422;
+ param->rsz_common.raw_flip = 0;
+ break;
+
+ case V4L2_MBUS_FMT_Y8_1X8:
+ param->rsz_common.src_img_fmt = RSZ_IMG_420;
+ /* Select y */
+ param->rsz_common.y_c = 0;
+ param->rsz_common.raw_flip = 0;
+ break;
+
+ case V4L2_MBUS_FMT_UV8_1X8:
+ param->rsz_common.src_img_fmt = RSZ_IMG_420;
+ /* Select y */
+ param->rsz_common.y_c = 1;
+ param->rsz_common.raw_flip = 0;
+ break;
+
+ case V4L2_MBUS_FMT_SGRBG12_1X12:
+ param->rsz_common.raw_flip = 1;
+ break;
+
+ default:
+ param->rsz_common.src_img_fmt = RSZ_IMG_422;
+ param->rsz_common.source = IPIPE_DATA;
+ }
+
+ param->rsz_common.yuv_y_min = user_config->yuv_y_min;
+ param->rsz_common.yuv_y_max = user_config->yuv_y_max;
+ param->rsz_common.yuv_c_min = user_config->yuv_c_min;
+ param->rsz_common.yuv_c_max = user_config->yuv_c_max;
+ param->rsz_common.out_chr_pos = user_config->out_chr_pos;
+ param->rsz_common.rsz_seq_crv = user_config->chroma_sample_even;
+
+ return 0;
+}
+static int
+resizer_configure_in_continious_mode(struct vpfe_resizer_device *resizer)
+{
+ struct device *dev = resizer->crop_resizer.subdev.v4l2_dev->dev;
+ struct resizer_params *param = &resizer->config;
+ struct vpfe_rsz_config_params *cont_config;
+ int line_len_c;
+ int line_len;
+ int ret;
+
+ if (resizer->resizer_a.output != RESIZER_OUPUT_MEMORY) {
+ dev_err(dev, "enable resizer - Resizer-A\n");
+ return -EINVAL;
+ }
+
+ cont_config = &resizer->config.user_config;
+ param->rsz_en[RSZ_A] = ENABLE;
+ configure_resizer_out_params(resizer, RSZ_A,
+ &cont_config->output1, 1, 0);
+ param->rsz_en[RSZ_B] = DISABLE;
+ param->oper_mode = RESIZER_MODE_CONTINIOUS;
+
+ if (resizer->resizer_b.output == RESIZER_OUPUT_MEMORY) {
+ struct v4l2_mbus_framefmt *outformat2;
+
+ param->rsz_en[RSZ_B] = ENABLE;
+ outformat2 = &resizer->resizer_b.formats[RESIZER_PAD_SOURCE];
+ ret = resizer_validate_output_image_format(dev, outformat2,
+ &line_len, &line_len_c);
+ if (ret)
+ return ret;
+ param->ext_mem_param[RSZ_B].rsz_sdr_oft_y = line_len;
+ param->ext_mem_param[RSZ_B].rsz_sdr_oft_c = line_len_c;
+ configure_resizer_out_params(resizer, RSZ_B,
+ &cont_config->output2, 0, 1);
+ if (outformat2->code == V4L2_MBUS_FMT_YDYUYDYV8_1X16)
+ resizer_enable_422_420_conversion(param,
+ RSZ_B, ENABLE);
+ else
+ resizer_enable_422_420_conversion(param,
+ RSZ_B, DISABLE);
+ }
+ resizer_configure_common_in_params(resizer);
+ ret = resizer_configure_output_win(resizer);
+ if (ret)
+ return ret;
+
+ param->rsz_common.passthrough = cont_config->bypass;
+ if (cont_config->bypass)
+ resizer_configure_passthru(resizer, 1);
+
+ return 0;
+}
+
+static inline int
+resizer_validate_input_image_format(struct device *dev,
+ enum v4l2_mbus_pixelcode pix,
+ int width, int height, int *line_len)
+{
+ int val;
+
+ if (pix != V4L2_MBUS_FMT_UYVY8_2X8 &&
+ pix != V4L2_MBUS_FMT_Y8_1X8 &&
+ pix != V4L2_MBUS_FMT_UV8_1X8 &&
+ pix != V4L2_MBUS_FMT_SGRBG12_1X12) {
+ dev_err(dev,
+ "resizer validate output: pix format not supported, %d\n", pix);
+ return -EINVAL;
+ }
+
+ if (!width || !height) {
+ dev_err(dev,
+ "resizer validate input: invalid width or height\n");
+ return -EINVAL;
+ }
+
+ if (pix == V4L2_MBUS_FMT_UV8_1X8)
+ resizer_calculate_line_length(pix, width,
+ height, &val, line_len);
+ else
+ resizer_calculate_line_length(pix, width,
+ height, line_len, &val);
+
+ return 0;
+}
+
+static int
+resizer_validate_decimation(struct device *dev, enum ipipeif_decimation dec_en,
+ unsigned char rsz, unsigned char frame_div_mode_en,
+ int width)
+{
+ if (dec_en && frame_div_mode_en) {
+ dev_err(dev,
+ "dec_en & frame_div_mode_en can not enabled simultaneously\n");
+ return -EINVAL;
+ }
+
+ if (frame_div_mode_en) {
+ dev_err(dev, "frame_div_mode mode not supported\n");
+ return -EINVAL;
+ }
+
+ if (!dec_en)
+ return 0;
+
+ if (width <= VPFE_IPIPE_MAX_INPUT_WIDTH) {
+ dev_err(dev,
+ "image width to be more than %d for decimation\n",
+ VPFE_IPIPE_MAX_INPUT_WIDTH);
+ return -EINVAL;
+ }
+
+ if (rsz < IPIPEIF_RSZ_MIN || rsz > IPIPEIF_RSZ_MAX) {
+ dev_err(dev, "rsz range is %d to %d\n",
+ IPIPEIF_RSZ_MIN, IPIPEIF_RSZ_MAX);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+/* resizer_calculate_normal_f_div_param() - Algorithm to calculate the frame
+ * division parameters for resizer.
+ * in normal mode.
+ */
+static int
+resizer_calculate_normal_f_div_param(struct device *dev, int input_width,
+ int output_width, struct resizer_scale_param *param)
+{
+ /* rsz = R, input_width = H, output width = h in the equation */
+ unsigned int val1;
+ unsigned int rsz;
+ unsigned int val;
+ unsigned int h1;
+ unsigned int h2;
+ unsigned int o;
+
+ if (output_width > input_width) {
+ dev_err(dev, "frame div mode is used for scale down only\n");
+ return -EINVAL;
+ }
+
+ rsz = (input_width << 8) / output_width;
+ val = rsz << 1;
+ val = ((input_width << 8) / val) + 1;
+ o = 14;
+ if (!(val % 2)) {
+ h1 = val;
+ } else {
+ val = (input_width << 7);
+ val -= rsz >> 1;
+ val /= rsz << 1;
+ val <<= 1;
+ val += 2;
+ o += ((CEIL(rsz, 1024)) << 1);
+ h1 = val;
+ }
+ h2 = output_width - h1;
+ /* phi */
+ val = (h1 * rsz) - (((input_width >> 1) - o) << 8);
+ /* skip */
+ val1 = ((val - 1024) >> 9) << 1;
+ param->f_div.num_passes = MAX_PASSES;
+ param->f_div.pass[0].o_hsz = h1 - 1;
+ param->f_div.pass[0].i_hps = 0;
+ param->f_div.pass[0].h_phs = 0;
+ param->f_div.pass[0].src_hps = 0;
+ param->f_div.pass[0].src_hsz = (input_width >> 2) + o;
+ param->f_div.pass[1].o_hsz = h2 - 1;
+ param->f_div.pass[1].i_hps = val1;
+ param->f_div.pass[1].h_phs = (val - (val1 << 8));
+ param->f_div.pass[1].src_hps = (input_width >> 2) - o;
+ param->f_div.pass[1].src_hsz = (input_width >> 2) + o;
+
+ return 0;
+}
+
+static int
+resizer_configure_in_single_shot_mode(struct vpfe_resizer_device *resizer)
+{
+ struct vpfe_rsz_config_params *config = &resizer->config.user_config;
+ struct device *dev = resizer->crop_resizer.subdev.v4l2_dev->dev;
+ struct vpfe_device *vpfe_dev = to_vpfe_device(resizer);
+ struct v4l2_mbus_framefmt *outformat1, *outformat2;
+ struct resizer_params *param = &resizer->config;
+ struct v4l2_mbus_framefmt *informat;
+ int decimation;
+ int line_len_c;
+ int line_len;
+ int rsz;
+ int ret;
+
+ informat = &resizer->crop_resizer.formats[RESIZER_CROP_PAD_SINK];
+ outformat1 = &resizer->resizer_a.formats[RESIZER_PAD_SOURCE];
+ outformat2 = &resizer->resizer_b.formats[RESIZER_PAD_SOURCE];
+
+ decimation = vpfe_ipipeif_decimation_enabled(vpfe_dev);
+ rsz = vpfe_ipipeif_get_rsz(vpfe_dev);
+ if (decimation && param->user_config.frame_div_mode_en) {
+ dev_err(dev,
+ "dec_en & frame_div_mode_en cannot enabled simultaneously\n");
+ return -EINVAL;
+ }
+
+ ret = resizer_validate_decimation(dev, decimation, rsz,
+ param->user_config.frame_div_mode_en, informat->width);
+ if (ret)
+ return -EINVAL;
+
+ ret = resizer_validate_input_image_format(dev, informat->code,
+ informat->width, informat->height, &line_len);
+ if (ret)
+ return -EINVAL;
+
+ if (resizer->resizer_a.output != RESIZER_OUTPUT_NONE) {
+ param->rsz_en[RSZ_A] = ENABLE;
+ ret = resizer_validate_output_image_format(dev, outformat1,
+ &line_len, &line_len_c);
+ if (ret)
+ return ret;
+ param->ext_mem_param[RSZ_A].rsz_sdr_oft_y = line_len;
+ param->ext_mem_param[RSZ_A].rsz_sdr_oft_c = line_len_c;
+ configure_resizer_out_params(resizer, RSZ_A,
+ &param->user_config.output1, 0, 1);
+
+ if (outformat1->code == V4L2_MBUS_FMT_SGRBG12_1X12)
+ param->rsz_common.raw_flip = 1;
+ else
+ param->rsz_common.raw_flip = 0;
+
+ if (outformat1->code == V4L2_MBUS_FMT_YDYUYDYV8_1X16)
+ resizer_enable_422_420_conversion(param,
+ RSZ_A, ENABLE);
+ else
+ resizer_enable_422_420_conversion(param,
+ RSZ_A, DISABLE);
+ }
+
+ if (resizer->resizer_b.output != RESIZER_OUTPUT_NONE) {
+ param->rsz_en[RSZ_B] = ENABLE;
+ ret = resizer_validate_output_image_format(dev, outformat2,
+ &line_len, &line_len_c);
+ if (ret)
+ return ret;
+ param->ext_mem_param[RSZ_B].rsz_sdr_oft_y = line_len;
+ param->ext_mem_param[RSZ_B].rsz_sdr_oft_c = line_len_c;
+ configure_resizer_out_params(resizer, RSZ_B,
+ &param->user_config.output2, 0, 1);
+ if (outformat2->code == V4L2_MBUS_FMT_YDYUYDYV8_1X16)
+ resizer_enable_422_420_conversion(param,
+ RSZ_B, ENABLE);
+ else
+ resizer_enable_422_420_conversion(param,
+ RSZ_B, DISABLE);
+ }
+
+ resizer_configure_common_in_params(resizer);
+ if (resizer->resizer_a.output != RESIZER_OUTPUT_NONE) {
+ resizer_calculate_resize_ratios(resizer, RSZ_A);
+ resizer_calculate_sdram_offsets(resizer, RSZ_A);
+ /* Overriding resize ratio calculation */
+ if (informat->code == V4L2_MBUS_FMT_UV8_1X8) {
+ param->rsz_rsc_param[RSZ_A].v_dif =
+ (((informat->height + 1) * 2) * 256) /
+ (param->rsz_rsc_param[RSZ_A].o_vsz + 1);
+ }
+ }
+
+ if (resizer->resizer_b.output != RESIZER_OUTPUT_NONE) {
+ resizer_calculate_resize_ratios(resizer, RSZ_B);
+ resizer_calculate_sdram_offsets(resizer, RSZ_B);
+ /* Overriding resize ratio calculation */
+ if (informat->code == V4L2_MBUS_FMT_UV8_1X8) {
+ param->rsz_rsc_param[RSZ_B].v_dif =
+ (((informat->height + 1) * 2) * 256) /
+ (param->rsz_rsc_param[RSZ_B].o_vsz + 1);
+ }
+ }
+ if (param->user_config.frame_div_mode_en &&
+ param->rsz_en[RSZ_A]) {
+ if (!param->rsz_rsc_param[RSZ_A].dscale_en)
+ ret = resizer_calculate_normal_f_div_param(dev,
+ informat->width,
+ param->rsz_rsc_param[RSZ_A].o_vsz + 1,
+ &param->rsz_rsc_param[RSZ_A]);
+ else
+ ret = resizer_calculate_down_scale_f_div_param(dev,
+ informat->width,
+ param->rsz_rsc_param[RSZ_A].o_vsz + 1,
+ &param->rsz_rsc_param[RSZ_A]);
+ if (ret)
+ return -EINVAL;
+ }
+ if (param->user_config.frame_div_mode_en &&
+ param->rsz_en[RSZ_B]) {
+ if (!param->rsz_rsc_param[RSZ_B].dscale_en)
+ ret = resizer_calculate_normal_f_div_param(dev,
+ informat->width,
+ param->rsz_rsc_param[RSZ_B].o_vsz + 1,
+ &param->rsz_rsc_param[RSZ_B]);
+ else
+ ret = resizer_calculate_down_scale_f_div_param(dev,
+ informat->width,
+ param->rsz_rsc_param[RSZ_B].o_vsz + 1,
+ &param->rsz_rsc_param[RSZ_B]);
+ if (ret)
+ return -EINVAL;
+ }
+ param->rsz_common.passthrough = config->bypass;
+ if (config->bypass)
+ resizer_configure_passthru(resizer, 1);
+ return 0;
+}
+
+static void
+resizer_set_defualt_configuration(struct vpfe_resizer_device *resizer)
+{
+#define WIDTH_I 640
+#define HEIGHT_I 480
+#define WIDTH_O 640
+#define HEIGHT_O 480
+ const struct resizer_params rsz_default_config = {
+ .oper_mode = RESIZER_MODE_ONE_SHOT,
+ .rsz_common = {
+ .vsz = HEIGHT_I - 1,
+ .hsz = WIDTH_I - 1,
+ .src_img_fmt = RSZ_IMG_422,
+ .raw_flip = 1, /* flip preserve Raw format */
+ .source = IPIPE_DATA,
+ .passthrough = BYPASS_OFF,
+ .yuv_y_max = 255,
+ .yuv_c_max = 255,
+ .rsz_seq_crv = DISABLE,
+ .out_chr_pos = VPFE_IPIPE_YUV422_CHR_POS_COSITE,
+ },
+ .rsz_rsc_param = {
+ {
+ .h_flip = DISABLE,
+ .v_flip = DISABLE,
+ .cen = DISABLE,
+ .yen = DISABLE,
+ .o_vsz = HEIGHT_O - 1,
+ .o_hsz = WIDTH_O - 1,
+ .v_dif = 256,
+ .v_typ_y = VPFE_RSZ_INTP_CUBIC,
+ .h_typ_c = VPFE_RSZ_INTP_CUBIC,
+ .h_dif = 256,
+ .h_typ_y = VPFE_RSZ_INTP_CUBIC,
+ .h_typ_c = VPFE_RSZ_INTP_CUBIC,
+ .h_dscale_ave_sz =
+ VPFE_IPIPE_DWN_SCALE_1_OVER_2,
+ .v_dscale_ave_sz =
+ VPFE_IPIPE_DWN_SCALE_1_OVER_2,
+ },
+ {
+ .h_flip = DISABLE,
+ .v_flip = DISABLE,
+ .cen = DISABLE,
+ .yen = DISABLE,
+ .o_vsz = HEIGHT_O - 1,
+ .o_hsz = WIDTH_O - 1,
+ .v_dif = 256,
+ .v_typ_y = VPFE_RSZ_INTP_CUBIC,
+ .h_typ_c = VPFE_RSZ_INTP_CUBIC,
+ .h_dif = 256,
+ .h_typ_y = VPFE_RSZ_INTP_CUBIC,
+ .h_typ_c = VPFE_RSZ_INTP_CUBIC,
+ .h_dscale_ave_sz =
+ VPFE_IPIPE_DWN_SCALE_1_OVER_2,
+ .v_dscale_ave_sz =
+ VPFE_IPIPE_DWN_SCALE_1_OVER_2,
+ },
+ },
+ .rsz2rgb = {
+ {
+ .rgb_en = DISABLE
+ },
+ {
+ .rgb_en = DISABLE
+ }
+ },
+ .ext_mem_param = {
+ {
+ .rsz_sdr_oft_y = WIDTH_O << 1,
+ .rsz_sdr_ptr_e_y = HEIGHT_O,
+ .rsz_sdr_oft_c = WIDTH_O,
+ .rsz_sdr_ptr_e_c = HEIGHT_O >> 1,
+ },
+ {
+ .rsz_sdr_oft_y = WIDTH_O << 1,
+ .rsz_sdr_ptr_e_y = HEIGHT_O,
+ .rsz_sdr_oft_c = WIDTH_O,
+ .rsz_sdr_ptr_e_c = HEIGHT_O,
+ },
+ },
+ .rsz_en[0] = ENABLE,
+ .rsz_en[1] = DISABLE,
+ .user_config = {
+ .output1 = {
+ .v_typ_y = VPFE_RSZ_INTP_CUBIC,
+ .v_typ_c = VPFE_RSZ_INTP_CUBIC,
+ .h_typ_y = VPFE_RSZ_INTP_CUBIC,
+ .h_typ_c = VPFE_RSZ_INTP_CUBIC,
+ .h_dscale_ave_sz =
+ VPFE_IPIPE_DWN_SCALE_1_OVER_2,
+ .v_dscale_ave_sz =
+ VPFE_IPIPE_DWN_SCALE_1_OVER_2,
+ },
+ .output2 = {
+ .v_typ_y = VPFE_RSZ_INTP_CUBIC,
+ .v_typ_c = VPFE_RSZ_INTP_CUBIC,
+ .h_typ_y = VPFE_RSZ_INTP_CUBIC,
+ .h_typ_c = VPFE_RSZ_INTP_CUBIC,
+ .h_dscale_ave_sz =
+ VPFE_IPIPE_DWN_SCALE_1_OVER_2,
+ .v_dscale_ave_sz =
+ VPFE_IPIPE_DWN_SCALE_1_OVER_2,
+ },
+ .yuv_y_max = 255,
+ .yuv_c_max = 255,
+ .out_chr_pos = VPFE_IPIPE_YUV422_CHR_POS_COSITE,
+ },
+ };
+ memset(&resizer->config, 0, sizeof(struct resizer_params));
+ memcpy(&resizer->config, &rsz_default_config,
+ sizeof(struct resizer_params));
+}
+
+/*
+ * resizer_set_configuration() - set resizer config
+ * @resizer: vpfe resizer device pointer.
+ * @chan_config: resizer channel configuration.
+ */
+static int
+resizer_set_configuration(struct vpfe_resizer_device *resizer,
+ struct vpfe_rsz_config *chan_config)
+{
+ if (!chan_config->config)
+ resizer_set_defualt_configuration(resizer);
+ else
+ if (copy_from_user(&resizer->config.user_config,
+ chan_config->config, sizeof(struct vpfe_rsz_config_params)))
+ return -EFAULT;
+
+ return 0;
+}
+
+/*
+ * resizer_get_configuration() - get resizer config
+ * @resizer: vpfe resizer device pointer.
+ * @channel: image processor logical channel.
+ * @chan_config: resizer channel configuration.
+ */
+static int
+resizer_get_configuration(struct vpfe_resizer_device *resizer,
+ struct vpfe_rsz_config *chan_config)
+{
+ struct device *dev = resizer->crop_resizer.subdev.v4l2_dev->dev;
+
+ if (!chan_config->config) {
+ dev_err(dev, "Resizer channel invalid pointer\n");
+ return -EINVAL;
+ }
+
+ if (copy_to_user((void *)chan_config->config,
+ (void *)&resizer->config.user_config,
+ sizeof(struct vpfe_rsz_config_params))) {
+ dev_err(dev, "resizer_get_configuration: Error in copy to user\n");
+ return -EFAULT;
+ }
+
+ return 0;
+}
+
+/*
+ * VPFE video operations
+ */
+
+/*
+ * resizer_a_video_out_queue() - RESIZER-A video out queue
+ * @vpfe_dev: vpfe device pointer.
+ * @addr: buffer address.
+ */
+static int resizer_a_video_out_queue(struct vpfe_device *vpfe_dev,
+ unsigned long addr)
+{
+ struct vpfe_resizer_device *resizer = &vpfe_dev->vpfe_resizer;
+
+ return resizer_set_outaddr(resizer->base_addr,
+ &resizer->config, RSZ_A, addr);
+}
+
+/*
+ * resizer_b_video_out_queue() - RESIZER-B video out queue
+ * @vpfe_dev: vpfe device pointer.
+ * @addr: buffer address.
+ */
+static int resizer_b_video_out_queue(struct vpfe_device *vpfe_dev,
+ unsigned long addr)
+{
+ struct vpfe_resizer_device *resizer = &vpfe_dev->vpfe_resizer;
+
+ return resizer_set_outaddr(resizer->base_addr,
+ &resizer->config, RSZ_B, addr);
+}
+
+static const struct vpfe_video_operations resizer_a_video_ops = {
+ .queue = resizer_a_video_out_queue,
+};
+
+static const struct vpfe_video_operations resizer_b_video_ops = {
+ .queue = resizer_b_video_out_queue,
+};
+
+static void resizer_enable(struct vpfe_resizer_device *resizer, int en)
+{
+ struct vpfe_device *vpfe_dev = to_vpfe_device(resizer);
+ u16 ipipeif_sink = vpfe_dev->vpfe_ipipeif.input;
+ unsigned char val;
+
+ if (resizer->crop_resizer.input == RESIZER_CROP_INPUT_NONE)
+ return;
+
+ if (resizer->crop_resizer.input == RESIZER_CROP_INPUT_IPIPEIF &&
+ ipipeif_sink == IPIPEIF_INPUT_MEMORY) {
+ do {
+ val = regr_rsz(resizer->base_addr, RSZ_SRC_EN);
+ } while (val);
+
+ if (resizer->resizer_a.output != RESIZER_OUTPUT_NONE) {
+ do {
+ val = regr_rsz(resizer->base_addr, RSZ_A);
+ } while (val);
+ }
+ if (resizer->resizer_b.output != RESIZER_OUTPUT_NONE) {
+ do {
+ val = regr_rsz(resizer->base_addr, RSZ_B);
+ } while (val);
+ }
+ }
+ if (resizer->resizer_a.output != RESIZER_OUTPUT_NONE)
+ rsz_enable(resizer->base_addr, RSZ_A, en);
+
+ if (resizer->resizer_b.output != RESIZER_OUTPUT_NONE)
+ rsz_enable(resizer->base_addr, RSZ_B, en);
+}
+
+
+/*
+ * resizer_ss_isr() - resizer module single-shot buffer scheduling isr
+ * @resizer: vpfe resizer device pointer.
+ */
+static void resizer_ss_isr(struct vpfe_resizer_device *resizer)
+{
+ struct vpfe_video_device *video_out = &resizer->resizer_a.video_out;
+ struct vpfe_video_device *video_out2 = &resizer->resizer_b.video_out;
+ struct vpfe_device *vpfe_dev = to_vpfe_device(resizer);
+ struct vpfe_pipeline *pipe = &video_out->pipe;
+ u16 ipipeif_sink = vpfe_dev->vpfe_ipipeif.input;
+ u32 val;
+
+ if (ipipeif_sink != IPIPEIF_INPUT_MEMORY)
+ return;
+
+ if (resizer->resizer_a.output == RESIZER_OUPUT_MEMORY) {
+ val = vpss_dma_complete_interrupt();
+ if (val != 0 && val != 2)
+ return;
+ }
+
+ if (resizer->resizer_a.output == RESIZER_OUPUT_MEMORY) {
+ spin_lock(&video_out->dma_queue_lock);
+ vpfe_video_process_buffer_complete(video_out);
+ video_out->state = VPFE_VIDEO_BUFFER_NOT_QUEUED;
+ vpfe_video_schedule_next_buffer(video_out);
+ spin_unlock(&video_out->dma_queue_lock);
+ }
+
+ /* If resizer B is enabled */
+ if (pipe->output_num > 1 && resizer->resizer_b.output ==
+ RESIZER_OUPUT_MEMORY) {
+ spin_lock(&video_out->dma_queue_lock);
+ vpfe_video_process_buffer_complete(video_out2);
+ video_out2->state = VPFE_VIDEO_BUFFER_NOT_QUEUED;
+ vpfe_video_schedule_next_buffer(video_out2);
+ spin_unlock(&video_out2->dma_queue_lock);
+ }
+
+ /* start HW if buffers are queued */
+ if (vpfe_video_is_pipe_ready(pipe) &&
+ resizer->resizer_a.output == RESIZER_OUPUT_MEMORY) {
+ resizer_enable(resizer, 1);
+ vpfe_ipipe_enable(vpfe_dev, 1);
+ vpfe_ipipeif_enable(vpfe_dev);
+ }
+}
+
+/*
+ * vpfe_resizer_buffer_isr() - resizer module buffer scheduling isr
+ * @resizer: vpfe resizer device pointer.
+ */
+void vpfe_resizer_buffer_isr(struct vpfe_resizer_device *resizer)
+{
+ struct vpfe_device *vpfe_dev = to_vpfe_device(resizer);
+ struct vpfe_video_device *video_out = &resizer->resizer_a.video_out;
+ struct vpfe_video_device *video_out2 = &resizer->resizer_b.video_out;
+ struct vpfe_pipeline *pipe = &resizer->resizer_a.video_out.pipe;
+ enum v4l2_field field;
+ int fid;
+
+ if (!video_out->started)
+ return;
+
+ if (resizer->crop_resizer.input == RESIZER_CROP_INPUT_NONE)
+ return;
+
+ field = video_out->fmt.fmt.pix.field;
+ if (field == V4L2_FIELD_NONE) {
+ /* handle progressive frame capture */
+ if (video_out->cur_frm != video_out->next_frm) {
+ vpfe_video_process_buffer_complete(video_out);
+ if (pipe->output_num > 1)
+ vpfe_video_process_buffer_complete(video_out2);
+ }
+
+ video_out->skip_frame_count--;
+ if (!video_out->skip_frame_count) {
+ video_out->skip_frame_count =
+ video_out->skip_frame_count_init;
+ rsz_src_enable(resizer->base_addr, 1);
+ } else {
+ rsz_src_enable(resizer->base_addr, 0);
+ }
+ return;
+ }
+
+ /* handle interlaced frame capture */
+ fid = vpfe_isif_get_fid(vpfe_dev);
+
+ /* switch the software maintained field id */
+ video_out->field_id ^= 1;
+ if (fid == video_out->field_id) {
+ /*
+ * we are in-sync here,continue.
+ * One frame is just being captured. If the
+ * next frame is available, release the current
+ * frame and move on
+ */
+ if (fid == 0 && video_out->cur_frm != video_out->next_frm) {
+ vpfe_video_process_buffer_complete(video_out);
+ if (pipe->output_num > 1)
+ vpfe_video_process_buffer_complete(video_out2);
+ }
+ } else if (fid == 0) {
+ /*
+ * out of sync. Recover from any hardware out-of-sync.
+ * May loose one frame
+ */
+ video_out->field_id = fid;
+ }
+}
+
+/*
+ * vpfe_resizer_dma_isr() - resizer module dma isr
+ * @resizer: vpfe resizer device pointer.
+ */
+void vpfe_resizer_dma_isr(struct vpfe_resizer_device *resizer)
+{
+ struct vpfe_video_device *video_out2 = &resizer->resizer_b.video_out;
+ struct vpfe_video_device *video_out = &resizer->resizer_a.video_out;
+ struct vpfe_device *vpfe_dev = to_vpfe_device(resizer);
+ struct vpfe_pipeline *pipe = &video_out->pipe;
+ int schedule_capture = 0;
+ enum v4l2_field field;
+ int fid;
+
+ if (!video_out->started)
+ return;
+
+ if (pipe->state == VPFE_PIPELINE_STREAM_SINGLESHOT) {
+ resizer_ss_isr(resizer);
+ return;
+ }
+
+ field = video_out->fmt.fmt.pix.field;
+ if (field == V4L2_FIELD_NONE) {
+ if (!list_empty(&video_out->dma_queue) &&
+ video_out->cur_frm == video_out->next_frm)
+ schedule_capture = 1;
+ } else {
+ fid = vpfe_isif_get_fid(vpfe_dev);
+ if (fid == video_out->field_id) {
+ /* we are in-sync here,continue */
+ if (fid == 1 && !list_empty(&video_out->dma_queue) &&
+ video_out->cur_frm == video_out->next_frm)
+ schedule_capture = 1;
+ }
+ }
+
+ if (!schedule_capture)
+ return;
+
+ spin_lock(&video_out->dma_queue_lock);
+ vpfe_video_schedule_next_buffer(video_out);
+ spin_unlock(&video_out->dma_queue_lock);
+ if (pipe->output_num > 1) {
+ spin_lock(&video_out2->dma_queue_lock);
+ vpfe_video_schedule_next_buffer(video_out2);
+ spin_unlock(&video_out2->dma_queue_lock);
+ }
+}
+
+/*
+ * V4L2 subdev operations
+ */
+
+/*
+ * resizer_ioctl() - Handle resizer module private ioctl's
+ * @sd: pointer to v4l2 subdev structure
+ * @cmd: configuration command
+ * @arg: configuration argument
+ */
+static long resizer_ioctl(struct v4l2_subdev *sd, unsigned int cmd, void *arg)
+{
+ struct vpfe_resizer_device *resizer = v4l2_get_subdevdata(sd);
+ struct device *dev = resizer->crop_resizer.subdev.v4l2_dev->dev;
+ struct vpfe_rsz_config *user_config;
+ int ret = -ENOIOCTLCMD;
+
+ if (&resizer->crop_resizer.subdev != sd)
+ return ret;
+
+ switch (cmd) {
+ case VIDIOC_VPFE_RSZ_S_CONFIG:
+ user_config = (struct vpfe_rsz_config *)arg;
+ ret = resizer_set_configuration(resizer, user_config);
+ break;
+
+ case VIDIOC_VPFE_RSZ_G_CONFIG:
+ user_config = (struct vpfe_rsz_config *)arg;
+ if (!user_config->config) {
+ dev_err(dev, "error in VIDIOC_VPFE_RSZ_G_CONFIG\n");
+ return -EINVAL;
+ }
+ ret = resizer_get_configuration(resizer, user_config);
+ break;
+ }
+ return ret;
+}
+
+static int resizer_do_hw_setup(struct vpfe_resizer_device *resizer)
+{
+ struct vpfe_device *vpfe_dev = to_vpfe_device(resizer);
+ u16 ipipeif_sink = vpfe_dev->vpfe_ipipeif.input;
+ u16 ipipeif_source = vpfe_dev->vpfe_ipipeif.output;
+ struct resizer_params *param = &resizer->config;
+ int ret = 0;
+
+ if (resizer->resizer_a.output == RESIZER_OUPUT_MEMORY ||
+ resizer->resizer_b.output == RESIZER_OUPUT_MEMORY) {
+ if (ipipeif_sink == IPIPEIF_INPUT_MEMORY &&
+ ipipeif_source == IPIPEIF_OUTPUT_RESIZER)
+ ret = resizer_configure_in_single_shot_mode(resizer);
+ else
+ ret = resizer_configure_in_continious_mode(resizer);
+ if (ret)
+ return ret;
+ ret = config_rsz_hw(resizer, param);
+ }
+ return ret;
+}
+
+/*
+ * resizer_set_stream() - Enable/Disable streaming on resizer subdev
+ * @sd: pointer to v4l2 subdev structure
+ * @enable: 1 == Enable, 0 == Disable
+ */
+static int resizer_set_stream(struct v4l2_subdev *sd, int enable)
+{
+ struct vpfe_resizer_device *resizer = v4l2_get_subdevdata(sd);
+
+ if (&resizer->crop_resizer.subdev != sd)
+ return 0;
+
+ if (resizer->resizer_a.output != RESIZER_OUPUT_MEMORY)
+ return 0;
+
+ switch (enable) {
+ case 1:
+ if (resizer_do_hw_setup(resizer) < 0)
+ return -EINVAL;
+ resizer_enable(resizer, enable);
+ break;
+
+ case 0:
+ resizer_enable(resizer, enable);
+ break;
+ }
+
+ return 0;
+}
+
+/*
+ * __resizer_get_format() - helper function for getting resizer format
+ * @sd: pointer to subdev.
+ * @fh: V4L2 subdev file handle.
+ * @pad: pad number.
+ * @which: wanted subdev format.
+ * Retun wanted mbus frame format.
+ */
+static struct v4l2_mbus_framefmt *
+__resizer_get_format(struct v4l2_subdev *sd, struct v4l2_subdev_fh *fh,
+ unsigned int pad, enum v4l2_subdev_format_whence which)
+{
+ struct vpfe_resizer_device *resizer = v4l2_get_subdevdata(sd);
+
+ if (which == V4L2_SUBDEV_FORMAT_TRY)
+ return v4l2_subdev_get_try_format(fh, pad);
+ if (&resizer->crop_resizer.subdev == sd)
+ return &resizer->crop_resizer.formats[pad];
+ if (&resizer->resizer_a.subdev == sd)
+ return &resizer->resizer_a.formats[pad];
+ if (&resizer->resizer_b.subdev == sd)
+ return &resizer->resizer_b.formats[pad];
+ return NULL;
+}
+
+/*
+ * resizer_try_format() - Handle try format by pad subdev method
+ * @sd: pointer to subdev.
+ * @fh: V4L2 subdev file handle.
+ * @pad: pad num.
+ * @fmt: pointer to v4l2 format structure.
+ * @which: wanted subdev format.
+ */
+static void
+resizer_try_format(struct v4l2_subdev *sd, struct v4l2_subdev_fh *fh,
+ unsigned int pad, struct v4l2_mbus_framefmt *fmt,
+ enum v4l2_subdev_format_whence which)
+{
+ struct vpfe_resizer_device *resizer = v4l2_get_subdevdata(sd);
+ unsigned int max_out_height;
+ unsigned int max_out_width;
+ unsigned int i;
+
+ if ((&resizer->resizer_a.subdev == sd && pad == RESIZER_PAD_SINK) ||
+ (&resizer->resizer_b.subdev == sd && pad == RESIZER_PAD_SINK) ||
+ (&resizer->crop_resizer.subdev == sd &&
+ (pad == RESIZER_CROP_PAD_SOURCE ||
+ pad == RESIZER_CROP_PAD_SOURCE2 || pad == RESIZER_CROP_PAD_SINK))) {
+ for (i = 0; i < ARRAY_SIZE(resizer_input_formats); i++) {
+ if (fmt->code == resizer_input_formats[i])
+ break;
+ }
+ /* If not found, use UYVY as default */
+ if (i >= ARRAY_SIZE(resizer_input_formats))
+ fmt->code = V4L2_MBUS_FMT_UYVY8_2X8;
+
+ fmt->width = clamp_t(u32, fmt->width, MIN_IN_WIDTH,
+ MAX_IN_WIDTH);
+ fmt->height = clamp_t(u32, fmt->height, MIN_IN_HEIGHT,
+ MAX_IN_HEIGHT);
+ } else if (&resizer->resizer_a.subdev == sd &&
+ pad == RESIZER_PAD_SOURCE) {
+ max_out_width = IPIPE_MAX_OUTPUT_WIDTH_A;
+ max_out_height = IPIPE_MAX_OUTPUT_HEIGHT_A;
+
+ for (i = 0; i < ARRAY_SIZE(resizer_output_formats); i++) {
+ if (fmt->code == resizer_output_formats[i])
+ break;
+ }
+ /* If not found, use UYVY as default */
+ if (i >= ARRAY_SIZE(resizer_output_formats))
+ fmt->code = V4L2_MBUS_FMT_UYVY8_2X8;
+
+ fmt->width = clamp_t(u32, fmt->width, MIN_OUT_WIDTH,
+ max_out_width);
+ fmt->width &= ~15;
+ fmt->height = clamp_t(u32, fmt->height, MIN_OUT_HEIGHT,
+ max_out_height);
+ } else if (&resizer->resizer_b.subdev == sd &&
+ pad == RESIZER_PAD_SOURCE) {
+ max_out_width = IPIPE_MAX_OUTPUT_WIDTH_B;
+ max_out_height = IPIPE_MAX_OUTPUT_HEIGHT_B;
+
+ for (i = 0; i < ARRAY_SIZE(resizer_output_formats); i++) {
+ if (fmt->code == resizer_output_formats[i])
+ break;
+ }
+ /* If not found, use UYVY as default */
+ if (i >= ARRAY_SIZE(resizer_output_formats))
+ fmt->code = V4L2_MBUS_FMT_UYVY8_2X8;
+
+ fmt->width = clamp_t(u32, fmt->width, MIN_OUT_WIDTH,
+ max_out_width);
+ fmt->width &= ~15;
+ fmt->height = clamp_t(u32, fmt->height, MIN_OUT_HEIGHT,
+ max_out_height);
+ }
+}
+
+/*
+ * resizer_set_format() - Handle set format by pads subdev method
+ * @sd: pointer to v4l2 subdev structure
+ * @fh: V4L2 subdev file handle
+ * @fmt: pointer to v4l2 subdev format structure
+ * return -EINVAL or zero on success
+ */
+static int resizer_set_format(struct v4l2_subdev *sd, struct v4l2_subdev_fh *fh,
+ struct v4l2_subdev_format *fmt)
+{
+ struct vpfe_resizer_device *resizer = v4l2_get_subdevdata(sd);
+ struct v4l2_mbus_framefmt *format;
+
+ format = __resizer_get_format(sd, fh, fmt->pad, fmt->which);
+ if (format == NULL)
+ return -EINVAL;
+
+ resizer_try_format(sd, fh, fmt->pad, &fmt->format, fmt->which);
+ *format = fmt->format;
+
+ if (fmt->which == V4L2_SUBDEV_FORMAT_TRY)
+ return 0;
+
+ if (&resizer->crop_resizer.subdev == sd) {
+ if (fmt->pad == RESIZER_CROP_PAD_SINK) {
+ resizer->crop_resizer.formats[fmt->pad] = fmt->format;
+ } else if (fmt->pad == RESIZER_CROP_PAD_SOURCE &&
+ resizer->crop_resizer.output == RESIZER_A) {
+ resizer->crop_resizer.formats[fmt->pad] = fmt->format;
+ resizer->crop_resizer.
+ formats[RESIZER_CROP_PAD_SOURCE2] = fmt->format;
+ } else if (fmt->pad == RESIZER_CROP_PAD_SOURCE2 &&
+ resizer->crop_resizer.output2 == RESIZER_B) {
+ resizer->crop_resizer.formats[fmt->pad] = fmt->format;
+ resizer->crop_resizer.
+ formats[RESIZER_CROP_PAD_SOURCE] = fmt->format;
+ } else {
+ return -EINVAL;
+ }
+ } else if (&resizer->resizer_a.subdev == sd) {
+ if (fmt->pad == RESIZER_PAD_SINK)
+ resizer->resizer_a.formats[fmt->pad] = fmt->format;
+ else if (fmt->pad == RESIZER_PAD_SOURCE)
+ resizer->resizer_a.formats[fmt->pad] = fmt->format;
+ else
+ return -EINVAL;
+ } else if (&resizer->resizer_b.subdev == sd) {
+ if (fmt->pad == RESIZER_PAD_SINK)
+ resizer->resizer_b.formats[fmt->pad] = fmt->format;
+ else if (fmt->pad == RESIZER_PAD_SOURCE)
+ resizer->resizer_b.formats[fmt->pad] = fmt->format;
+ else
+ return -EINVAL;
+ } else {
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+/*
+ * resizer_get_format() - Retrieve the video format on a pad
+ * @sd: pointer to v4l2 subdev structure.
+ * @fh: V4L2 subdev file handle.
+ * @fmt: pointer to v4l2 subdev format structure
+ * return -EINVAL or zero on success
+ */
+static int resizer_get_format(struct v4l2_subdev *sd, struct v4l2_subdev_fh *fh,
+ struct v4l2_subdev_format *fmt)
+{
+ struct v4l2_mbus_framefmt *format;
+
+ format = __resizer_get_format(sd, fh, fmt->pad, fmt->which);
+ if (format == NULL)
+ return -EINVAL;
+
+ fmt->format = *format;
+
+ return 0;
+}
+
+/*
+ * resizer_enum_frame_size() - enum frame sizes on pads
+ * @sd: Pointer to subdevice.
+ * @fh: V4L2 subdev file handle.
+ * @code: pointer to v4l2_subdev_frame_size_enum structure.
+ */
+static int resizer_enum_frame_size(struct v4l2_subdev *sd,
+ struct v4l2_subdev_fh *fh,
+ struct v4l2_subdev_frame_size_enum *fse)
+{
+ struct v4l2_mbus_framefmt format;
+
+ if (fse->index != 0)
+ return -EINVAL;
+
+ format.code = fse->code;
+ format.width = 1;
+ format.height = 1;
+ resizer_try_format(sd, fh, fse->pad, &format,
+ V4L2_SUBDEV_FORMAT_TRY);
+ fse->min_width = format.width;
+ fse->min_height = format.height;
+
+ if (format.code != fse->code)
+ return -EINVAL;
+
+ format.code = fse->code;
+ format.width = -1;
+ format.height = -1;
+ resizer_try_format(sd, fh, fse->pad, &format,
+ V4L2_SUBDEV_FORMAT_TRY);
+ fse->max_width = format.width;
+ fse->max_height = format.height;
+
+ return 0;
+}
+
+/*
+ * resizer_enum_mbus_code() - enum mbus codes for pads
+ * @sd: Pointer to subdevice.
+ * @fh: V4L2 subdev file handle
+ * @code: pointer to v4l2_subdev_mbus_code_enum structure
+ */
+static int resizer_enum_mbus_code(struct v4l2_subdev *sd,
+ struct v4l2_subdev_fh *fh,
+ struct v4l2_subdev_mbus_code_enum *code)
+{
+ if (code->pad == RESIZER_PAD_SINK) {
+ if (code->index >= ARRAY_SIZE(resizer_input_formats))
+ return -EINVAL;
+
+ code->code = resizer_input_formats[code->index];
+ } else if (code->pad == RESIZER_PAD_SOURCE) {
+ if (code->index >= ARRAY_SIZE(resizer_output_formats))
+ return -EINVAL;
+
+ code->code = resizer_output_formats[code->index];
+ }
+
+ return 0;
+}
+
+/*
+ * resizer_init_formats() - Initialize formats on all pads
+ * @sd: Pointer to subdevice.
+ * @fh: V4L2 subdev file handle.
+ *
+ * Initialize all pad formats with default values. If fh is not NULL, try
+ * formats are initialized on the file handle. Otherwise active formats are
+ * initialized on the device.
+ */
+static int resizer_init_formats(struct v4l2_subdev *sd,
+ struct v4l2_subdev_fh *fh)
+{
+ __u32 which = fh ? V4L2_SUBDEV_FORMAT_TRY : V4L2_SUBDEV_FORMAT_ACTIVE;
+ struct vpfe_resizer_device *resizer = v4l2_get_subdevdata(sd);
+ struct v4l2_subdev_format format;
+
+ if (&resizer->crop_resizer.subdev == sd) {
+ memset(&format, 0, sizeof(format));
+ format.pad = RESIZER_CROP_PAD_SINK;
+ format.which = which;
+ format.format.code = V4L2_MBUS_FMT_YUYV8_2X8;
+ format.format.width = MAX_IN_WIDTH;
+ format.format.height = MAX_IN_HEIGHT;
+ resizer_set_format(sd, fh, &format);
+
+ memset(&format, 0, sizeof(format));
+ format.pad = RESIZER_CROP_PAD_SOURCE;
+ format.which = which;
+ format.format.code = V4L2_MBUS_FMT_UYVY8_2X8;
+ format.format.width = MAX_IN_WIDTH;
+ format.format.height = MAX_IN_WIDTH;
+ resizer_set_format(sd, fh, &format);
+
+ memset(&format, 0, sizeof(format));
+ format.pad = RESIZER_CROP_PAD_SOURCE2;
+ format.which = which;
+ format.format.code = V4L2_MBUS_FMT_UYVY8_2X8;
+ format.format.width = MAX_IN_WIDTH;
+ format.format.height = MAX_IN_WIDTH;
+ resizer_set_format(sd, fh, &format);
+ } else if (&resizer->resizer_a.subdev == sd) {
+ memset(&format, 0, sizeof(format));
+ format.pad = RESIZER_PAD_SINK;
+ format.which = which;
+ format.format.code = V4L2_MBUS_FMT_YUYV8_2X8;
+ format.format.width = MAX_IN_WIDTH;
+ format.format.height = MAX_IN_HEIGHT;
+ resizer_set_format(sd, fh, &format);
+
+ memset(&format, 0, sizeof(format));
+ format.pad = RESIZER_PAD_SOURCE;
+ format.which = which;
+ format.format.code = V4L2_MBUS_FMT_UYVY8_2X8;
+ format.format.width = IPIPE_MAX_OUTPUT_WIDTH_A;
+ format.format.height = IPIPE_MAX_OUTPUT_HEIGHT_A;
+ resizer_set_format(sd, fh, &format);
+ } else if (&resizer->resizer_b.subdev == sd) {
+ memset(&format, 0, sizeof(format));
+ format.pad = RESIZER_PAD_SINK;
+ format.which = which;
+ format.format.code = V4L2_MBUS_FMT_YUYV8_2X8;
+ format.format.width = MAX_IN_WIDTH;
+ format.format.height = MAX_IN_HEIGHT;
+ resizer_set_format(sd, fh, &format);
+
+ memset(&format, 0, sizeof(format));
+ format.pad = RESIZER_PAD_SOURCE;
+ format.which = which;
+ format.format.code = V4L2_MBUS_FMT_UYVY8_2X8;
+ format.format.width = IPIPE_MAX_OUTPUT_WIDTH_B;
+ format.format.height = IPIPE_MAX_OUTPUT_HEIGHT_B;
+ resizer_set_format(sd, fh, &format);
+ }
+
+ return 0;
+}
+
+/* subdev core operations */
+static const struct v4l2_subdev_core_ops resizer_v4l2_core_ops = {
+ .ioctl = resizer_ioctl,
+};
+
+/* subdev internal operations */
+static const struct v4l2_subdev_internal_ops resizer_v4l2_internal_ops = {
+ .open = resizer_init_formats,
+};
+
+/* subdev video operations */
+static const struct v4l2_subdev_video_ops resizer_v4l2_video_ops = {
+ .s_stream = resizer_set_stream,
+};
+
+/* subdev pad operations */
+static const struct v4l2_subdev_pad_ops resizer_v4l2_pad_ops = {
+ .enum_mbus_code = resizer_enum_mbus_code,
+ .enum_frame_size = resizer_enum_frame_size,
+ .get_fmt = resizer_get_format,
+ .set_fmt = resizer_set_format,
+};
+
+/* subdev operations */
+static const struct v4l2_subdev_ops resizer_v4l2_ops = {
+ .core = &resizer_v4l2_core_ops,
+ .video = &resizer_v4l2_video_ops,
+ .pad = &resizer_v4l2_pad_ops,
+};
+
+/*
+ * Media entity operations
+ */
+
+/*
+ * resizer_link_setup() - Setup resizer connections
+ * @entity: Pointer to media entity structure
+ * @local: Pointer to local pad array
+ * @remote: Pointer to remote pad array
+ * @flags: Link flags
+ * return -EINVAL or zero on success
+ */
+static int resizer_link_setup(struct media_entity *entity,
+ const struct media_pad *local,
+ const struct media_pad *remote, u32 flags)
+{
+ struct v4l2_subdev *sd = media_entity_to_v4l2_subdev(entity);
+ struct vpfe_resizer_device *resizer = v4l2_get_subdevdata(sd);
+ struct vpfe_device *vpfe_dev = to_vpfe_device(resizer);
+ u16 ipipeif_source = vpfe_dev->vpfe_ipipeif.output;
+ u16 ipipe_source = vpfe_dev->vpfe_ipipe.output;
+
+ if (&resizer->crop_resizer.subdev == sd) {
+ switch (local->index | media_entity_type(remote->entity)) {
+ case RESIZER_CROP_PAD_SINK | MEDIA_ENT_T_V4L2_SUBDEV:
+ if (!(flags & MEDIA_LNK_FL_ENABLED)) {
+ resizer->crop_resizer.input =
+ RESIZER_CROP_INPUT_NONE;
+ break;
+ }
+
+ if (resizer->crop_resizer.input !=
+ RESIZER_CROP_INPUT_NONE)
+ return -EBUSY;
+ if (ipipeif_source == IPIPEIF_OUTPUT_RESIZER)
+ resizer->crop_resizer.input =
+ RESIZER_CROP_INPUT_IPIPEIF;
+ else if (ipipe_source == IPIPE_OUTPUT_RESIZER)
+ resizer->crop_resizer.input =
+ RESIZER_CROP_INPUT_IPIPE;
+ else
+ return -EINVAL;
+ break;
+
+ case RESIZER_CROP_PAD_SOURCE | MEDIA_ENT_T_V4L2_SUBDEV:
+ if (!(flags & MEDIA_LNK_FL_ENABLED)) {
+ resizer->crop_resizer.output =
+ RESIZER_CROP_OUTPUT_NONE;
+ break;
+ }
+ if (resizer->crop_resizer.output !=
+ RESIZER_CROP_OUTPUT_NONE)
+ return -EBUSY;
+ resizer->crop_resizer.output = RESIZER_A;
+ break;
+
+ case RESIZER_CROP_PAD_SOURCE2 | MEDIA_ENT_T_V4L2_SUBDEV:
+ if (!(flags & MEDIA_LNK_FL_ENABLED)) {
+ resizer->crop_resizer.output2 =
+ RESIZER_CROP_OUTPUT_NONE;
+ break;
+ }
+ if (resizer->crop_resizer.output2 !=
+ RESIZER_CROP_OUTPUT_NONE)
+ return -EBUSY;
+ resizer->crop_resizer.output2 = RESIZER_B;
+ break;
+
+ default:
+ return -EINVAL;
+ }
+ } else if (&resizer->resizer_a.subdev == sd) {
+ switch (local->index | media_entity_type(remote->entity)) {
+ case RESIZER_PAD_SINK | MEDIA_ENT_T_V4L2_SUBDEV:
+ if (!(flags & MEDIA_LNK_FL_ENABLED)) {
+ resizer->resizer_a.input = RESIZER_INPUT_NONE;
+ break;
+ }
+ if (resizer->resizer_a.input != RESIZER_INPUT_NONE)
+ return -EBUSY;
+ resizer->resizer_a.input = RESIZER_INPUT_CROP_RESIZER;
+ break;
+
+ case RESIZER_PAD_SOURCE | MEDIA_ENT_T_DEVNODE:
+ if (!(flags & MEDIA_LNK_FL_ENABLED)) {
+ resizer->resizer_a.output = RESIZER_OUTPUT_NONE;
+ break;
+ }
+ if (resizer->resizer_a.output != RESIZER_OUTPUT_NONE)
+ return -EBUSY;
+ resizer->resizer_a.output = RESIZER_OUPUT_MEMORY;
+ break;
+
+ default:
+ return -EINVAL;
+ }
+ } else if (&resizer->resizer_b.subdev == sd) {
+ switch (local->index | media_entity_type(remote->entity)) {
+ case RESIZER_PAD_SINK | MEDIA_ENT_T_V4L2_SUBDEV:
+ if (!(flags & MEDIA_LNK_FL_ENABLED)) {
+ resizer->resizer_b.input = RESIZER_INPUT_NONE;
+ break;
+ }
+ if (resizer->resizer_b.input != RESIZER_INPUT_NONE)
+ return -EBUSY;
+ resizer->resizer_b.input = RESIZER_INPUT_CROP_RESIZER;
+ break;
+
+ case RESIZER_PAD_SOURCE | MEDIA_ENT_T_DEVNODE:
+ if (!(flags & MEDIA_LNK_FL_ENABLED)) {
+ resizer->resizer_b.output = RESIZER_OUTPUT_NONE;
+ break;
+ }
+ if (resizer->resizer_b.output != RESIZER_OUTPUT_NONE)
+ return -EBUSY;
+ resizer->resizer_b.output = RESIZER_OUPUT_MEMORY;
+ break;
+
+ default:
+ return -EINVAL;
+ }
+ } else {
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static const struct media_entity_operations resizer_media_ops = {
+ .link_setup = resizer_link_setup,
+};
+
+/*
+ * vpfe_resizer_unregister_entities() - Unregister entity
+ * @vpfe_rsz - pointer to resizer subdevice structure.
+ */
+void vpfe_resizer_unregister_entities(struct vpfe_resizer_device *vpfe_rsz)
+{
+ /* unregister video devices */
+ vpfe_video_unregister(&vpfe_rsz->resizer_a.video_out);
+ vpfe_video_unregister(&vpfe_rsz->resizer_b.video_out);
+
+ /* cleanup entity */
+ media_entity_cleanup(&vpfe_rsz->crop_resizer.subdev.entity);
+ media_entity_cleanup(&vpfe_rsz->resizer_a.subdev.entity);
+ media_entity_cleanup(&vpfe_rsz->resizer_b.subdev.entity);
+ /* unregister subdev */
+ v4l2_device_unregister_subdev(&vpfe_rsz->crop_resizer.subdev);
+ v4l2_device_unregister_subdev(&vpfe_rsz->resizer_a.subdev);
+ v4l2_device_unregister_subdev(&vpfe_rsz->resizer_b.subdev);
+}
+
+/*
+ * vpfe_resizer_register_entities() - Register entity
+ * @resizer - pointer to resizer devive.
+ * @vdev: pointer to v4l2 device structure.
+ */
+int vpfe_resizer_register_entities(struct vpfe_resizer_device *resizer,
+ struct v4l2_device *vdev)
+{
+ struct vpfe_device *vpfe_dev = to_vpfe_device(resizer);
+ unsigned int flags = 0;
+ int ret;
+
+ /* Register the crop resizer subdev */
+ ret = v4l2_device_register_subdev(vdev, &resizer->crop_resizer.subdev);
+ if (ret < 0) {
+ pr_err("Failed to register crop resizer as v4l2-subdev\n");
+ return ret;
+ }
+ /* Register Resizer-A subdev */
+ ret = v4l2_device_register_subdev(vdev, &resizer->resizer_a.subdev);
+ if (ret < 0) {
+ pr_err("Failed to register resizer-a as v4l2-subdev\n");
+ return ret;
+ }
+ /* Register Resizer-B subdev */
+ ret = v4l2_device_register_subdev(vdev, &resizer->resizer_b.subdev);
+ if (ret < 0) {
+ pr_err("Failed to register resizer-b as v4l2-subdev\n");
+ return ret;
+ }
+ /* Register video-out device for resizer-a */
+ ret = vpfe_video_register(&resizer->resizer_a.video_out, vdev);
+ if (ret) {
+ pr_err("Failed to register RSZ-A video-out device\n");
+ goto out_video_out2_register;
+ }
+ resizer->resizer_a.video_out.vpfe_dev = vpfe_dev;
+
+ /* Register video-out device for resizer-b */
+ ret = vpfe_video_register(&resizer->resizer_b.video_out, vdev);
+ if (ret) {
+ pr_err("Failed to register RSZ-B video-out device\n");
+ goto out_video_out2_register;
+ }
+ resizer->resizer_b.video_out.vpfe_dev = vpfe_dev;
+
+ /* create link between Resizer Crop----> Resizer A*/
+ ret = media_entity_create_link(&resizer->crop_resizer.subdev.entity, 1,
+ &resizer->resizer_a.subdev.entity,
+ 0, flags);
+ if (ret < 0)
+ goto out_create_link;
+
+ /* create link between Resizer Crop----> Resizer B*/
+ ret = media_entity_create_link(&resizer->crop_resizer.subdev.entity, 2,
+ &resizer->resizer_b.subdev.entity,
+ 0, flags);
+ if (ret < 0)
+ goto out_create_link;
+
+ /* create link between Resizer A ----> video out */
+ ret = media_entity_create_link(&resizer->resizer_a.subdev.entity, 1,
+ &resizer->resizer_a.video_out.video_dev.entity, 0, flags);
+ if (ret < 0)
+ goto out_create_link;
+
+ /* create link between Resizer B ----> video out */
+ ret = media_entity_create_link(&resizer->resizer_b.subdev.entity, 1,
+ &resizer->resizer_b.video_out.video_dev.entity, 0, flags);
+ if (ret < 0)
+ goto out_create_link;
+
+ return 0;
+
+out_create_link:
+ vpfe_video_unregister(&resizer->resizer_b.video_out);
+out_video_out2_register:
+ vpfe_video_unregister(&resizer->resizer_a.video_out);
+ media_entity_cleanup(&resizer->crop_resizer.subdev.entity);
+ media_entity_cleanup(&resizer->resizer_a.subdev.entity);
+ media_entity_cleanup(&resizer->resizer_b.subdev.entity);
+ v4l2_device_unregister_subdev(&resizer->crop_resizer.subdev);
+ v4l2_device_unregister_subdev(&resizer->resizer_a.subdev);
+ v4l2_device_unregister_subdev(&resizer->resizer_b.subdev);
+ return ret;
+}
+
+/*
+ * vpfe_resizer_init() - resizer device initialization.
+ * @vpfe_rsz - pointer to resizer device
+ * @pdev: platform device pointer.
+ */
+int vpfe_resizer_init(struct vpfe_resizer_device *vpfe_rsz,
+ struct platform_device *pdev)
+{
+ struct v4l2_subdev *sd = &vpfe_rsz->crop_resizer.subdev;
+ struct media_pad *pads = &vpfe_rsz->crop_resizer.pads[0];
+ struct media_entity *me = &sd->entity;
+ static resource_size_t res_len;
+ struct resource *res;
+ int ret;
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 5);
+ if (!res)
+ return -ENOENT;
+
+ res_len = resource_size(res);
+ res = request_mem_region(res->start, res_len, res->name);
+ if (!res)
+ return -EBUSY;
+
+ vpfe_rsz->base_addr = ioremap_nocache(res->start, res_len);
+ if (!vpfe_rsz->base_addr)
+ return -EBUSY;
+
+ v4l2_subdev_init(sd, &resizer_v4l2_ops);
+ sd->internal_ops = &resizer_v4l2_internal_ops;
+ strlcpy(sd->name, "DAVINCI RESIZER CROP", sizeof(sd->name));
+ sd->grp_id = 1 << 16; /* group ID for davinci subdevs */
+ v4l2_set_subdevdata(sd, vpfe_rsz);
+ sd->flags |= V4L2_SUBDEV_FL_HAS_DEVNODE;
+
+ pads[RESIZER_CROP_PAD_SINK].flags = MEDIA_PAD_FL_SINK;
+ pads[RESIZER_CROP_PAD_SOURCE].flags = MEDIA_PAD_FL_SOURCE;
+ pads[RESIZER_CROP_PAD_SOURCE2].flags = MEDIA_PAD_FL_SOURCE;
+
+ vpfe_rsz->crop_resizer.input = RESIZER_CROP_INPUT_NONE;
+ vpfe_rsz->crop_resizer.output = RESIZER_CROP_OUTPUT_NONE;
+ vpfe_rsz->crop_resizer.output2 = RESIZER_CROP_OUTPUT_NONE;
+ vpfe_rsz->crop_resizer.rsz_device = vpfe_rsz;
+ me->ops = &resizer_media_ops;
+ ret = media_entity_init(me, RESIZER_CROP_PADS_NUM, pads, 0);
+ if (ret)
+ return ret;
+
+ sd = &vpfe_rsz->resizer_a.subdev;
+ pads = &vpfe_rsz->resizer_a.pads[0];
+ me = &sd->entity;
+
+ v4l2_subdev_init(sd, &resizer_v4l2_ops);
+ sd->internal_ops = &resizer_v4l2_internal_ops;
+ strlcpy(sd->name, "DAVINCI RESIZER A", sizeof(sd->name));
+ sd->grp_id = 1 << 16; /* group ID for davinci subdevs */
+ v4l2_set_subdevdata(sd, vpfe_rsz);
+ sd->flags |= V4L2_SUBDEV_FL_HAS_DEVNODE;
+
+ pads[RESIZER_PAD_SINK].flags = MEDIA_PAD_FL_SINK;
+ pads[RESIZER_PAD_SOURCE].flags = MEDIA_PAD_FL_SOURCE;
+
+ vpfe_rsz->resizer_a.input = RESIZER_INPUT_NONE;
+ vpfe_rsz->resizer_a.output = RESIZER_OUTPUT_NONE;
+ vpfe_rsz->resizer_a.rsz_device = vpfe_rsz;
+ me->ops = &resizer_media_ops;
+ ret = media_entity_init(me, RESIZER_PADS_NUM, pads, 0);
+ if (ret)
+ return ret;
+
+ sd = &vpfe_rsz->resizer_b.subdev;
+ pads = &vpfe_rsz->resizer_b.pads[0];
+ me = &sd->entity;
+
+ v4l2_subdev_init(sd, &resizer_v4l2_ops);
+ sd->internal_ops = &resizer_v4l2_internal_ops;
+ strlcpy(sd->name, "DAVINCI RESIZER B", sizeof(sd->name));
+ sd->grp_id = 1 << 16; /* group ID for davinci subdevs */
+ v4l2_set_subdevdata(sd, vpfe_rsz);
+ sd->flags |= V4L2_SUBDEV_FL_HAS_DEVNODE;
+
+ pads[RESIZER_PAD_SINK].flags = MEDIA_PAD_FL_SINK;
+ pads[RESIZER_PAD_SOURCE].flags = MEDIA_PAD_FL_SOURCE;
+
+ vpfe_rsz->resizer_b.input = RESIZER_INPUT_NONE;
+ vpfe_rsz->resizer_b.output = RESIZER_OUTPUT_NONE;
+ vpfe_rsz->resizer_b.rsz_device = vpfe_rsz;
+ me->ops = &resizer_media_ops;
+ ret = media_entity_init(me, RESIZER_PADS_NUM, pads, 0);
+ if (ret)
+ return ret;
+
+ vpfe_rsz->resizer_a.video_out.ops = &resizer_a_video_ops;
+ vpfe_rsz->resizer_a.video_out.type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
+ ret = vpfe_video_init(&vpfe_rsz->resizer_a.video_out, "RSZ-A");
+ if (ret) {
+ pr_err("Failed to init RSZ video-out device\n");
+ return ret;
+ }
+ vpfe_rsz->resizer_b.video_out.ops = &resizer_b_video_ops;
+ vpfe_rsz->resizer_b.video_out.type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
+ ret = vpfe_video_init(&vpfe_rsz->resizer_b.video_out, "RSZ-B");
+ if (ret) {
+ pr_err("Failed to init RSZ video-out2 device\n");
+ return ret;
+ }
+ memset(&vpfe_rsz->config, 0, sizeof(struct resizer_params));
+
+ return 0;
+}
+
+void
+vpfe_resizer_cleanup(struct vpfe_resizer_device *vpfe_rsz,
+ struct platform_device *pdev)
+{
+ struct resource *res;
+
+ iounmap(vpfe_rsz->base_addr);
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 5);
+ if (res)
+ release_mem_region(res->start,
+ res->end - res->start + 1);
+}
diff --git a/drivers/staging/media/davinci_vpfe/dm365_resizer.h b/drivers/staging/media/davinci_vpfe/dm365_resizer.h
new file mode 100644
index 000000000000..59a79422b914
--- /dev/null
+++ b/drivers/staging/media/davinci_vpfe/dm365_resizer.h
@@ -0,0 +1,244 @@
+/*
+ * Copyright (C) 2012 Texas Instruments Inc
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation version 2.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ *
+ * Contributors:
+ * Manjunath Hadli <manjunath.hadli@ti.com>
+ * Prabhakar Lad <prabhakar.lad@ti.com>
+ */
+
+#ifndef _DAVINCI_VPFE_DM365_RESIZER_H
+#define _DAVINCI_VPFE_DM365_RESIZER_H
+
+enum resizer_oper_mode {
+ RESIZER_MODE_CONTINIOUS = 0,
+ RESIZER_MODE_ONE_SHOT = 1,
+};
+
+struct f_div_pass {
+ unsigned int o_hsz;
+ unsigned int i_hps;
+ unsigned int h_phs;
+ unsigned int src_hps;
+ unsigned int src_hsz;
+};
+
+#define MAX_PASSES 2
+
+struct f_div_param {
+ unsigned char en;
+ unsigned int num_passes;
+ struct f_div_pass pass[MAX_PASSES];
+};
+
+/* Resizer Rescale Parameters*/
+struct resizer_scale_param {
+ bool h_flip;
+ bool v_flip;
+ bool cen;
+ bool yen;
+ unsigned short i_vps;
+ unsigned short i_hps;
+ unsigned short o_vsz;
+ unsigned short o_hsz;
+ unsigned short v_phs_y;
+ unsigned short v_phs_c;
+ unsigned short v_dif;
+ /* resize method - Luminance */
+ enum vpfe_rsz_intp_t v_typ_y;
+ /* resize method - Chrominance */
+ enum vpfe_rsz_intp_t v_typ_c;
+ /* vertical lpf intensity - Luminance */
+ unsigned char v_lpf_int_y;
+ /* vertical lpf intensity - Chrominance */
+ unsigned char v_lpf_int_c;
+ unsigned short h_phs;
+ unsigned short h_dif;
+ /* resize method - Luminance */
+ enum vpfe_rsz_intp_t h_typ_y;
+ /* resize method - Chrominance */
+ enum vpfe_rsz_intp_t h_typ_c;
+ /* horizontal lpf intensity - Luminance */
+ unsigned char h_lpf_int_y;
+ /* horizontal lpf intensity - Chrominance */
+ unsigned char h_lpf_int_c;
+ bool dscale_en;
+ enum vpfe_rsz_down_scale_ave_sz h_dscale_ave_sz;
+ enum vpfe_rsz_down_scale_ave_sz v_dscale_ave_sz;
+ /* store the calculated frame division parameter */
+ struct f_div_param f_div;
+};
+
+enum resizer_rgb_t {
+ OUTPUT_32BIT,
+ OUTPUT_16BIT
+};
+
+enum resizer_rgb_msk_t {
+ NOMASK = 0,
+ MASKLAST2 = 1,
+};
+
+/* Resizer RGB Conversion Parameters */
+struct resizer_rgb {
+ bool rgb_en;
+ enum resizer_rgb_t rgb_typ;
+ enum resizer_rgb_msk_t rgb_msk0;
+ enum resizer_rgb_msk_t rgb_msk1;
+ unsigned int rgb_alpha_val;
+};
+
+/* Resizer External Memory Parameters */
+struct rsz_ext_mem_param {
+ unsigned int rsz_sdr_oft_y;
+ unsigned int rsz_sdr_ptr_s_y;
+ unsigned int rsz_sdr_ptr_e_y;
+ unsigned int rsz_sdr_oft_c;
+ unsigned int rsz_sdr_ptr_s_c;
+ unsigned int rsz_sdr_ptr_e_c;
+ /* offset to be added to buffer start when flipping for y/ycbcr */
+ unsigned int flip_ofst_y;
+ /* offset to be added to buffer start when flipping for c */
+ unsigned int flip_ofst_c;
+ /* c offset for YUV 420SP */
+ unsigned int c_offset;
+ /* User Defined Y offset for YUV 420SP or YUV420ILE data */
+ unsigned int user_y_ofst;
+ /* User Defined C offset for YUV 420SP data */
+ unsigned int user_c_ofst;
+};
+
+enum rsz_data_source {
+ IPIPE_DATA,
+ IPIPEIF_DATA
+};
+
+enum rsz_src_img_fmt {
+ RSZ_IMG_422,
+ RSZ_IMG_420
+};
+
+enum rsz_dpaths_bypass_t {
+ BYPASS_OFF = 0,
+ BYPASS_ON = 1,
+};
+
+struct rsz_common_params {
+ unsigned int vps;
+ unsigned int vsz;
+ unsigned int hps;
+ unsigned int hsz;
+ /* 420 or 422 */
+ enum rsz_src_img_fmt src_img_fmt;
+ /* Y or C when src_fmt is 420, 0 - y, 1 - c */
+ unsigned char y_c;
+ /* flip raw or ycbcr */
+ unsigned char raw_flip;
+ /* IPIPE or IPIPEIF data */
+ enum rsz_data_source source;
+ enum rsz_dpaths_bypass_t passthrough;
+ unsigned char yuv_y_min;
+ unsigned char yuv_y_max;
+ unsigned char yuv_c_min;
+ unsigned char yuv_c_max;
+ bool rsz_seq_crv;
+ enum vpfe_chr_pos out_chr_pos;
+};
+
+struct resizer_params {
+ enum resizer_oper_mode oper_mode;
+ struct rsz_common_params rsz_common;
+ struct resizer_scale_param rsz_rsc_param[2];
+ struct resizer_rgb rsz2rgb[2];
+ struct rsz_ext_mem_param ext_mem_param[2];
+ bool rsz_en[2];
+ struct vpfe_rsz_config_params user_config;
+};
+
+#define ENABLE 1
+#define DISABLE (!ENABLE)
+
+#define RESIZER_CROP_PAD_SINK 0
+#define RESIZER_CROP_PAD_SOURCE 1
+#define RESIZER_CROP_PAD_SOURCE2 2
+
+#define RESIZER_CROP_PADS_NUM 3
+
+enum resizer_crop_input_entity {
+ RESIZER_CROP_INPUT_NONE = 0,
+ RESIZER_CROP_INPUT_IPIPEIF = 1,
+ RESIZER_CROP_INPUT_IPIPE = 2,
+};
+
+enum resizer_crop_output_entity {
+ RESIZER_CROP_OUTPUT_NONE,
+ RESIZER_A,
+ RESIZER_B,
+};
+
+struct dm365_crop_resizer_device {
+ struct v4l2_subdev subdev;
+ struct media_pad pads[RESIZER_CROP_PADS_NUM];
+ struct v4l2_mbus_framefmt formats[RESIZER_CROP_PADS_NUM];
+ enum resizer_crop_input_entity input;
+ enum resizer_crop_output_entity output;
+ enum resizer_crop_output_entity output2;
+ struct vpfe_resizer_device *rsz_device;
+};
+
+#define RESIZER_PAD_SINK 0
+#define RESIZER_PAD_SOURCE 1
+
+#define RESIZER_PADS_NUM 2
+
+enum resizer_input_entity {
+ RESIZER_INPUT_NONE = 0,
+ RESIZER_INPUT_CROP_RESIZER = 1,
+};
+
+enum resizer_output_entity {
+ RESIZER_OUTPUT_NONE = 0,
+ RESIZER_OUPUT_MEMORY = 1,
+};
+
+struct dm365_resizer_device {
+ struct v4l2_subdev subdev;
+ struct media_pad pads[RESIZER_PADS_NUM];
+ struct v4l2_mbus_framefmt formats[RESIZER_PADS_NUM];
+ enum resizer_input_entity input;
+ enum resizer_output_entity output;
+ struct vpfe_video_device video_out;
+ struct vpfe_resizer_device *rsz_device;
+};
+
+struct vpfe_resizer_device {
+ struct dm365_crop_resizer_device crop_resizer;
+ struct dm365_resizer_device resizer_a;
+ struct dm365_resizer_device resizer_b;
+ struct resizer_params config;
+ void *__iomem base_addr;
+};
+
+int vpfe_resizer_init(struct vpfe_resizer_device *vpfe_rsz,
+ struct platform_device *pdev);
+int vpfe_resizer_register_entities(struct vpfe_resizer_device *vpfe_rsz,
+ struct v4l2_device *v4l2_dev);
+void vpfe_resizer_unregister_entities(struct vpfe_resizer_device *vpfe_rsz);
+void vpfe_resizer_cleanup(struct vpfe_resizer_device *vpfe_rsz,
+ struct platform_device *pdev);
+void vpfe_resizer_buffer_isr(struct vpfe_resizer_device *resizer);
+void vpfe_resizer_dma_isr(struct vpfe_resizer_device *resizer);
+
+#endif /* _DAVINCI_VPFE_DM365_RESIZER_H */
diff --git a/drivers/staging/media/davinci_vpfe/vpfe.h b/drivers/staging/media/davinci_vpfe/vpfe.h
new file mode 100644
index 000000000000..0587bc52a840
--- /dev/null
+++ b/drivers/staging/media/davinci_vpfe/vpfe.h
@@ -0,0 +1,86 @@
+/*
+ * Copyright (C) 2012 Texas Instruments Inc
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation version 2.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ *
+ * Contributors:
+ * Manjunath Hadli <manjunath.hadli@ti.com>
+ * Prabhakar Lad <prabhakar.lad@ti.com>
+ */
+
+#ifndef _VPFE_H
+#define _VPFE_H
+
+#ifdef __KERNEL__
+#include <linux/v4l2-subdev.h>
+#include <linux/clk.h>
+#include <linux/i2c.h>
+
+#include <media/davinci/vpfe_types.h>
+
+#define CAPTURE_DRV_NAME "vpfe-capture"
+
+struct vpfe_route {
+ __u32 input;
+ __u32 output;
+};
+
+enum vpfe_subdev_id {
+ VPFE_SUBDEV_TVP5146 = 1,
+ VPFE_SUBDEV_MT9T031 = 2,
+ VPFE_SUBDEV_TVP7002 = 3,
+ VPFE_SUBDEV_MT9P031 = 4,
+};
+
+struct vpfe_ext_subdev_info {
+ /* v4l2 subdev */
+ struct v4l2_subdev *subdev;
+ /* Sub device module name */
+ char module_name[32];
+ /* Sub device group id */
+ int grp_id;
+ /* Number of inputs supported */
+ int num_inputs;
+ /* inputs available at the sub device */
+ struct v4l2_input *inputs;
+ /* Sub dev routing information for each input */
+ struct vpfe_route *routes;
+ /* ccdc bus/interface configuration */
+ struct vpfe_hw_if_param ccdc_if_params;
+ /* i2c subdevice board info */
+ struct i2c_board_info board_info;
+ /* Is this a camera sub device ? */
+ unsigned is_camera:1;
+ /* check if sub dev supports routing */
+ unsigned can_route:1;
+ /* registered ? */
+ unsigned registered:1;
+};
+
+struct vpfe_config {
+ /* Number of sub devices connected to vpfe */
+ int num_subdevs;
+ /* information about each subdev */
+ struct vpfe_ext_subdev_info *sub_devs;
+ /* evm card info */
+ char *card_name;
+ /* setup function for the input path */
+ int (*setup_input)(enum vpfe_subdev_id id);
+ /* number of clocks */
+ int num_clocks;
+ /* clocks used for vpfe capture */
+ char *clocks[];
+};
+#endif
+#endif
diff --git a/drivers/staging/media/davinci_vpfe/vpfe_mc_capture.c b/drivers/staging/media/davinci_vpfe/vpfe_mc_capture.c
new file mode 100644
index 000000000000..7b351717bcbb
--- /dev/null
+++ b/drivers/staging/media/davinci_vpfe/vpfe_mc_capture.c
@@ -0,0 +1,740 @@
+/*
+ * Copyright (C) 2012 Texas Instruments Inc
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation version 2.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ *
+ * Contributors:
+ * Manjunath Hadli <manjunath.hadli@ti.com>
+ * Prabhakar Lad <prabhakar.lad@ti.com>
+ *
+ *
+ * Driver name : VPFE Capture driver
+ * VPFE Capture driver allows applications to capture and stream video
+ * frames on DaVinci SoCs (DM6446, DM355 etc) from a YUV source such as
+ * TVP5146 or Raw Bayer RGB image data from an image sensor
+ * such as Microns' MT9T001, MT9T031 etc.
+ *
+ * These SoCs have, in common, a Video Processing Subsystem (VPSS) that
+ * consists of a Video Processing Front End (VPFE) for capturing
+ * video/raw image data and Video Processing Back End (VPBE) for displaying
+ * YUV data through an in-built analog encoder or Digital LCD port. This
+ * driver is for capture through VPFE. A typical EVM using these SoCs have
+ * following high level configuration.
+ *
+ * decoder(TVP5146/ YUV/
+ * MT9T001) --> Raw Bayer RGB ---> MUX -> VPFE (CCDC/ISIF)
+ * data input | |
+ * V |
+ * SDRAM |
+ * V
+ * Image Processor
+ * |
+ * V
+ * SDRAM
+ * The data flow happens from a decoder connected to the VPFE over a
+ * YUV embedded (BT.656/BT.1120) or separate sync or raw bayer rgb interface
+ * and to the input of VPFE through an optional MUX (if more inputs are
+ * to be interfaced on the EVM). The input data is first passed through
+ * CCDC (CCD Controller, a.k.a Image Sensor Interface, ISIF). The CCDC
+ * does very little or no processing on YUV data and does pre-process Raw
+ * Bayer RGB data through modules such as Defect Pixel Correction (DFC)
+ * Color Space Conversion (CSC), data gain/offset etc. After this, data
+ * can be written to SDRAM or can be connected to the image processing
+ * block such as IPIPE (on DM355/DM365 only).
+ *
+ * Features supported
+ * - MMAP IO
+ * - USERPTR IO
+ * - Capture using TVP5146 over BT.656
+ * - Support for interfacing decoders using sub device model
+ * - Work with DM365 or DM355 or DM6446 CCDC to do Raw Bayer
+ * RGB/YUV data capture to SDRAM.
+ * - Chaining of Image Processor
+ * - SINGLE-SHOT mode
+ */
+
+#include <linux/interrupt.h>
+#include <linux/module.h>
+#include <linux/slab.h>
+
+#include "vpfe.h"
+#include "vpfe_mc_capture.h"
+
+static bool debug;
+static bool interface;
+
+module_param(interface, bool, S_IRUGO);
+module_param(debug, bool, 0644);
+
+/**
+ * VPFE capture can be used for capturing video such as from TVP5146 or TVP7002
+ * and for capture raw bayer data from camera sensors such as mt9p031. At this
+ * point there is problem in co-existence of mt9p031 and tvp5146 due to i2c
+ * address collision. So set the variable below from bootargs to do either video
+ * capture or camera capture.
+ * interface = 0 - video capture (from TVP514x or such),
+ * interface = 1 - Camera capture (from mt9p031 or such)
+ * Re-visit this when we fix the co-existence issue
+ */
+MODULE_PARM_DESC(interface, "interface 0-1 (default:0)");
+MODULE_PARM_DESC(debug, "Debug level 0-1");
+
+MODULE_DESCRIPTION("VPFE Video for Linux Capture Driver");
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Texas Instruments");
+
+/* map mbus_fmt to pixelformat */
+void mbus_to_pix(const struct v4l2_mbus_framefmt *mbus,
+ struct v4l2_pix_format *pix)
+{
+ switch (mbus->code) {
+ case V4L2_MBUS_FMT_UYVY8_2X8:
+ pix->pixelformat = V4L2_PIX_FMT_UYVY;
+ pix->bytesperline = pix->width * 2;
+ break;
+
+ case V4L2_MBUS_FMT_YUYV8_2X8:
+ pix->pixelformat = V4L2_PIX_FMT_YUYV;
+ pix->bytesperline = pix->width * 2;
+ break;
+
+ case V4L2_MBUS_FMT_YUYV10_1X20:
+ pix->pixelformat = V4L2_PIX_FMT_UYVY;
+ pix->bytesperline = pix->width * 2;
+ break;
+
+ case V4L2_MBUS_FMT_SGRBG12_1X12:
+ pix->pixelformat = V4L2_PIX_FMT_SBGGR16;
+ pix->bytesperline = pix->width * 2;
+ break;
+
+ case V4L2_MBUS_FMT_SGRBG10_DPCM8_1X8:
+ pix->pixelformat = V4L2_PIX_FMT_SGRBG10DPCM8;
+ pix->bytesperline = pix->width;
+ break;
+
+ case V4L2_MBUS_FMT_SGRBG10_ALAW8_1X8:
+ pix->pixelformat = V4L2_PIX_FMT_SGRBG10ALAW8;
+ pix->bytesperline = pix->width;
+ break;
+
+ case V4L2_MBUS_FMT_YDYUYDYV8_1X16:
+ pix->pixelformat = V4L2_PIX_FMT_NV12;
+ pix->bytesperline = pix->width;
+ break;
+
+ case V4L2_MBUS_FMT_Y8_1X8:
+ pix->pixelformat = V4L2_PIX_FMT_GREY;
+ pix->bytesperline = pix->width;
+ break;
+
+ case V4L2_MBUS_FMT_UV8_1X8:
+ pix->pixelformat = V4L2_PIX_FMT_UV8;
+ pix->bytesperline = pix->width;
+ break;
+
+ default:
+ pr_err("Invalid mbus code set\n");
+ }
+ /* pitch should be 32 bytes aligned */
+ pix->bytesperline = ALIGN(pix->bytesperline, 32);
+ if (pix->pixelformat == V4L2_PIX_FMT_NV12)
+ pix->sizeimage = pix->bytesperline * pix->height +
+ ((pix->bytesperline * pix->height) >> 1);
+ else
+ pix->sizeimage = pix->bytesperline * pix->height;
+}
+
+/* ISR for VINT0*/
+static irqreturn_t vpfe_isr(int irq, void *dev_id)
+{
+ struct vpfe_device *vpfe_dev = dev_id;
+
+ v4l2_dbg(1, debug, &vpfe_dev->v4l2_dev, "vpfe_isr\n");
+ vpfe_isif_buffer_isr(&vpfe_dev->vpfe_isif);
+ vpfe_resizer_buffer_isr(&vpfe_dev->vpfe_resizer);
+ return IRQ_HANDLED;
+}
+
+/* vpfe_vdint1_isr() - isr handler for VINT1 interrupt */
+static irqreturn_t vpfe_vdint1_isr(int irq, void *dev_id)
+{
+ struct vpfe_device *vpfe_dev = dev_id;
+
+ v4l2_dbg(1, debug, &vpfe_dev->v4l2_dev, "vpfe_vdint1_isr\n");
+ vpfe_isif_vidint1_isr(&vpfe_dev->vpfe_isif);
+ return IRQ_HANDLED;
+}
+
+/* vpfe_imp_dma_isr() - ISR for ipipe dma completion */
+static irqreturn_t vpfe_imp_dma_isr(int irq, void *dev_id)
+{
+ struct vpfe_device *vpfe_dev = dev_id;
+
+ v4l2_dbg(1, debug, &vpfe_dev->v4l2_dev, "vpfe_imp_dma_isr\n");
+ vpfe_ipipeif_ss_buffer_isr(&vpfe_dev->vpfe_ipipeif);
+ vpfe_resizer_dma_isr(&vpfe_dev->vpfe_resizer);
+ return IRQ_HANDLED;
+}
+
+/*
+ * vpfe_disable_clock() - Disable clocks for vpfe capture driver
+ * @vpfe_dev - ptr to vpfe capture device
+ *
+ * Disables clocks defined in vpfe configuration. The function
+ * assumes that at least one clock is to be defined which is
+ * true as of now.
+ */
+static void vpfe_disable_clock(struct vpfe_device *vpfe_dev)
+{
+ struct vpfe_config *vpfe_cfg = vpfe_dev->cfg;
+ int i;
+
+ for (i = 0; i < vpfe_cfg->num_clocks; i++) {
+ clk_disable_unprepare(vpfe_dev->clks[i]);
+ clk_put(vpfe_dev->clks[i]);
+ }
+ kzfree(vpfe_dev->clks);
+ v4l2_info(vpfe_dev->pdev->driver, "vpfe capture clocks disabled\n");
+}
+
+/*
+ * vpfe_enable_clock() - Enable clocks for vpfe capture driver
+ * @vpfe_dev - ptr to vpfe capture device
+ *
+ * Enables clocks defined in vpfe configuration. The function
+ * assumes that at least one clock is to be defined which is
+ * true as of now.
+ */
+static int vpfe_enable_clock(struct vpfe_device *vpfe_dev)
+{
+ struct vpfe_config *vpfe_cfg = vpfe_dev->cfg;
+ int ret = -EFAULT;
+ int i;
+
+ if (!vpfe_cfg->num_clocks)
+ return 0;
+
+ vpfe_dev->clks = kzalloc(vpfe_cfg->num_clocks *
+ sizeof(struct clock *), GFP_KERNEL);
+ if (vpfe_dev->clks == NULL) {
+ v4l2_err(vpfe_dev->pdev->driver, "Memory allocation failed\n");
+ return -ENOMEM;
+ }
+
+ for (i = 0; i < vpfe_cfg->num_clocks; i++) {
+ if (vpfe_cfg->clocks[i] == NULL) {
+ v4l2_err(vpfe_dev->pdev->driver,
+ "clock %s is not defined in vpfe config\n",
+ vpfe_cfg->clocks[i]);
+ goto out;
+ }
+
+ vpfe_dev->clks[i] =
+ clk_get(vpfe_dev->pdev, vpfe_cfg->clocks[i]);
+ if (vpfe_dev->clks[i] == NULL) {
+ v4l2_err(vpfe_dev->pdev->driver,
+ "Failed to get clock %s\n",
+ vpfe_cfg->clocks[i]);
+ goto out;
+ }
+
+ if (clk_prepare_enable(vpfe_dev->clks[i])) {
+ v4l2_err(vpfe_dev->pdev->driver,
+ "vpfe clock %s not enabled\n",
+ vpfe_cfg->clocks[i]);
+ goto out;
+ }
+
+ v4l2_info(vpfe_dev->pdev->driver, "vpss clock %s enabled",
+ vpfe_cfg->clocks[i]);
+ }
+
+ return 0;
+out:
+ for (i = 0; i < vpfe_cfg->num_clocks; i++)
+ if (vpfe_dev->clks[i]) {
+ clk_disable_unprepare(vpfe_dev->clks[i]);
+ clk_put(vpfe_dev->clks[i]);
+ }
+
+ v4l2_err(vpfe_dev->pdev->driver, "Failed to enable clocks\n");
+ kzfree(vpfe_dev->clks);
+
+ return ret;
+}
+
+/*
+ * vpfe_detach_irq() - Detach IRQs for vpfe capture driver
+ * @vpfe_dev - ptr to vpfe capture device
+ *
+ * Detach all IRQs defined in vpfe configuration.
+ */
+static void vpfe_detach_irq(struct vpfe_device *vpfe_dev)
+{
+ free_irq(vpfe_dev->ccdc_irq0, vpfe_dev);
+ free_irq(vpfe_dev->ccdc_irq1, vpfe_dev);
+ free_irq(vpfe_dev->imp_dma_irq, vpfe_dev);
+}
+
+/*
+ * vpfe_attach_irq() - Attach IRQs for vpfe capture driver
+ * @vpfe_dev - ptr to vpfe capture device
+ *
+ * Attach all IRQs defined in vpfe configuration.
+ */
+static int vpfe_attach_irq(struct vpfe_device *vpfe_dev)
+{
+ int ret = 0;
+
+ ret = request_irq(vpfe_dev->ccdc_irq0, vpfe_isr, IRQF_DISABLED,
+ "vpfe_capture0", vpfe_dev);
+ if (ret < 0) {
+ v4l2_err(&vpfe_dev->v4l2_dev,
+ "Error: requesting VINT0 interrupt\n");
+ return ret;
+ }
+
+ ret = request_irq(vpfe_dev->ccdc_irq1, vpfe_vdint1_isr, IRQF_DISABLED,
+ "vpfe_capture1", vpfe_dev);
+ if (ret < 0) {
+ v4l2_err(&vpfe_dev->v4l2_dev,
+ "Error: requesting VINT1 interrupt\n");
+ free_irq(vpfe_dev->ccdc_irq0, vpfe_dev);
+ return ret;
+ }
+
+ ret = request_irq(vpfe_dev->imp_dma_irq, vpfe_imp_dma_isr,
+ IRQF_DISABLED, "Imp_Sdram_Irq", vpfe_dev);
+ if (ret < 0) {
+ v4l2_err(&vpfe_dev->v4l2_dev,
+ "Error: requesting IMP IRQ interrupt\n");
+ free_irq(vpfe_dev->ccdc_irq1, vpfe_dev);
+ free_irq(vpfe_dev->ccdc_irq0, vpfe_dev);
+ return ret;
+ }
+
+ return 0;
+}
+
+/*
+ * register_i2c_devices() - register all i2c v4l2 subdevs
+ * @vpfe_dev - ptr to vpfe capture device
+ *
+ * register all i2c v4l2 subdevs
+ */
+static int register_i2c_devices(struct vpfe_device *vpfe_dev)
+{
+ struct vpfe_ext_subdev_info *sdinfo;
+ struct vpfe_config *vpfe_cfg;
+ struct i2c_adapter *i2c_adap;
+ unsigned int num_subdevs;
+ int ret;
+ int i;
+ int k;
+
+ vpfe_cfg = vpfe_dev->cfg;
+ i2c_adap = i2c_get_adapter(1);
+ num_subdevs = vpfe_cfg->num_subdevs;
+ vpfe_dev->sd =
+ kzalloc(sizeof(struct v4l2_subdev *)*num_subdevs, GFP_KERNEL);
+ if (vpfe_dev->sd == NULL) {
+ v4l2_err(&vpfe_dev->v4l2_dev,
+ "unable to allocate memory for subdevice\n");
+ return -ENOMEM;
+ }
+
+ for (i = 0, k = 0; i < num_subdevs; i++) {
+ sdinfo = &vpfe_cfg->sub_devs[i];
+ /*
+ * register subdevices based on interface setting. Currently
+ * tvp5146 and mt9p031 cannot co-exists due to i2c address
+ * conflicts. So only one of them is registered. Re-visit this
+ * once we have support for i2c switch handling in i2c driver
+ * framework
+ */
+ if (interface == sdinfo->is_camera) {
+ /* setup input path */
+ if (vpfe_cfg->setup_input &&
+ vpfe_cfg->setup_input(sdinfo->grp_id) < 0) {
+ ret = -EFAULT;
+ v4l2_info(&vpfe_dev->v4l2_dev,
+ "could not setup input for %s\n",
+ sdinfo->module_name);
+ goto probe_sd_out;
+ }
+ /* Load up the subdevice */
+ vpfe_dev->sd[k] =
+ v4l2_i2c_new_subdev_board(&vpfe_dev->v4l2_dev,
+ i2c_adap, &sdinfo->board_info,
+ NULL);
+ if (vpfe_dev->sd[k]) {
+ v4l2_info(&vpfe_dev->v4l2_dev,
+ "v4l2 sub device %s registered\n",
+ sdinfo->module_name);
+
+ vpfe_dev->sd[k]->grp_id = sdinfo->grp_id;
+ k++;
+
+ sdinfo->registered = 1;
+ }
+ } else {
+ v4l2_info(&vpfe_dev->v4l2_dev,
+ "v4l2 sub device %s is not registered\n",
+ sdinfo->module_name);
+ }
+ }
+ vpfe_dev->num_ext_subdevs = k;
+
+ return 0;
+
+probe_sd_out:
+ kzfree(vpfe_dev->sd);
+
+ return ret;
+}
+
+/*
+ * vpfe_register_entities() - register all v4l2 subdevs and media entities
+ * @vpfe_dev - ptr to vpfe capture device
+ *
+ * register all v4l2 subdevs, media entities, and creates links
+ * between entities
+ */
+static int vpfe_register_entities(struct vpfe_device *vpfe_dev)
+{
+ unsigned int flags = 0;
+ int ret;
+ int i;
+
+ /* register i2c devices first */
+ ret = register_i2c_devices(vpfe_dev);
+ if (ret)
+ return ret;
+
+ /* register rest of the sub-devs */
+ ret = vpfe_isif_register_entities(&vpfe_dev->vpfe_isif,
+ &vpfe_dev->v4l2_dev);
+ if (ret)
+ return ret;
+
+ ret = vpfe_ipipeif_register_entities(&vpfe_dev->vpfe_ipipeif,
+ &vpfe_dev->v4l2_dev);
+ if (ret)
+ goto out_isif_register;
+
+ ret = vpfe_ipipe_register_entities(&vpfe_dev->vpfe_ipipe,
+ &vpfe_dev->v4l2_dev);
+ if (ret)
+ goto out_ipipeif_register;
+
+ ret = vpfe_resizer_register_entities(&vpfe_dev->vpfe_resizer,
+ &vpfe_dev->v4l2_dev);
+ if (ret)
+ goto out_ipipe_register;
+
+ /* create links now, starting with external(i2c) entities */
+ for (i = 0; i < vpfe_dev->num_ext_subdevs; i++)
+ /* if entity has no pads (ex: amplifier),
+ cant establish link */
+ if (vpfe_dev->sd[i]->entity.num_pads) {
+ ret = media_entity_create_link(&vpfe_dev->sd[i]->entity,
+ 0, &vpfe_dev->vpfe_isif.subdev.entity,
+ 0, flags);
+ if (ret < 0)
+ goto out_resizer_register;
+ }
+
+ ret = media_entity_create_link(&vpfe_dev->vpfe_isif.subdev.entity, 1,
+ &vpfe_dev->vpfe_ipipeif.subdev.entity,
+ 0, flags);
+ if (ret < 0)
+ goto out_resizer_register;
+
+ ret = media_entity_create_link(&vpfe_dev->vpfe_ipipeif.subdev.entity, 1,
+ &vpfe_dev->vpfe_ipipe.subdev.entity,
+ 0, flags);
+ if (ret < 0)
+ goto out_resizer_register;
+
+ ret = media_entity_create_link(&vpfe_dev->vpfe_ipipe.subdev.entity,
+ 1, &vpfe_dev->vpfe_resizer.crop_resizer.subdev.entity,
+ 0, flags);
+ if (ret < 0)
+ goto out_resizer_register;
+
+ ret = media_entity_create_link(&vpfe_dev->vpfe_ipipeif.subdev.entity, 1,
+ &vpfe_dev->vpfe_resizer.crop_resizer.subdev.entity,
+ 0, flags);
+ if (ret < 0)
+ goto out_resizer_register;
+
+ ret = v4l2_device_register_subdev_nodes(&vpfe_dev->v4l2_dev);
+ if (ret < 0)
+ goto out_resizer_register;
+
+ return 0;
+
+out_resizer_register:
+ vpfe_resizer_unregister_entities(&vpfe_dev->vpfe_resizer);
+out_ipipe_register:
+ vpfe_ipipe_unregister_entities(&vpfe_dev->vpfe_ipipe);
+out_ipipeif_register:
+ vpfe_ipipeif_unregister_entities(&vpfe_dev->vpfe_ipipeif);
+out_isif_register:
+ vpfe_isif_unregister_entities(&vpfe_dev->vpfe_isif);
+
+ return ret;
+}
+
+/*
+ * vpfe_unregister_entities() - unregister all v4l2 subdevs and media entities
+ * @vpfe_dev - ptr to vpfe capture device
+ *
+ * unregister all v4l2 subdevs and media entities
+ */
+static void vpfe_unregister_entities(struct vpfe_device *vpfe_dev)
+{
+ vpfe_isif_unregister_entities(&vpfe_dev->vpfe_isif);
+ vpfe_ipipeif_unregister_entities(&vpfe_dev->vpfe_ipipeif);
+ vpfe_ipipe_unregister_entities(&vpfe_dev->vpfe_ipipe);
+ vpfe_resizer_unregister_entities(&vpfe_dev->vpfe_resizer);
+}
+
+/*
+ * vpfe_cleanup_modules() - cleanup all non-i2c v4l2 subdevs
+ * @vpfe_dev - ptr to vpfe capture device
+ * @pdev - pointer to platform device
+ *
+ * cleanup all v4l2 subdevs
+ */
+static void vpfe_cleanup_modules(struct vpfe_device *vpfe_dev,
+ struct platform_device *pdev)
+{
+ vpfe_isif_cleanup(&vpfe_dev->vpfe_isif, pdev);
+ vpfe_ipipeif_cleanup(&vpfe_dev->vpfe_ipipeif, pdev);
+ vpfe_ipipe_cleanup(&vpfe_dev->vpfe_ipipe, pdev);
+ vpfe_resizer_cleanup(&vpfe_dev->vpfe_resizer, pdev);
+}
+
+/*
+ * vpfe_initialize_modules() - initialize all non-i2c v4l2 subdevs
+ * @vpfe_dev - ptr to vpfe capture device
+ * @pdev - pointer to platform device
+ *
+ * intialize all v4l2 subdevs and media entities
+ */
+static int vpfe_initialize_modules(struct vpfe_device *vpfe_dev,
+ struct platform_device *pdev)
+{
+ int ret;
+
+ ret = vpfe_isif_init(&vpfe_dev->vpfe_isif, pdev);
+ if (ret)
+ return ret;
+
+ ret = vpfe_ipipeif_init(&vpfe_dev->vpfe_ipipeif, pdev);
+ if (ret)
+ goto out_isif_init;
+
+ ret = vpfe_ipipe_init(&vpfe_dev->vpfe_ipipe, pdev);
+ if (ret)
+ goto out_ipipeif_init;
+
+ ret = vpfe_resizer_init(&vpfe_dev->vpfe_resizer, pdev);
+ if (ret)
+ goto out_ipipe_init;
+
+ return 0;
+
+out_ipipe_init:
+ vpfe_ipipe_cleanup(&vpfe_dev->vpfe_ipipe, pdev);
+out_ipipeif_init:
+ vpfe_ipipeif_cleanup(&vpfe_dev->vpfe_ipipeif, pdev);
+out_isif_init:
+ vpfe_isif_cleanup(&vpfe_dev->vpfe_isif, pdev);
+
+ return ret;
+}
+
+/*
+ * vpfe_probe() : vpfe probe function
+ * @pdev: platform device pointer
+ *
+ * This function creates device entries by register itself to the V4L2 driver
+ * and initializes fields of each device objects
+ */
+static int vpfe_probe(struct platform_device *pdev)
+{
+ struct vpfe_device *vpfe_dev;
+ struct resource *res1;
+ int ret = -ENOMEM;
+
+ vpfe_dev = kzalloc(sizeof(*vpfe_dev), GFP_KERNEL);
+ if (!vpfe_dev) {
+ v4l2_err(pdev->dev.driver,
+ "Failed to allocate memory for vpfe_dev\n");
+ return ret;
+ }
+
+ if (pdev->dev.platform_data == NULL) {
+ v4l2_err(pdev->dev.driver, "Unable to get vpfe config\n");
+ ret = -ENOENT;
+ goto probe_free_dev_mem;
+ }
+
+ vpfe_dev->cfg = pdev->dev.platform_data;
+ if (vpfe_dev->cfg->card_name == NULL ||
+ vpfe_dev->cfg->sub_devs == NULL) {
+ v4l2_err(pdev->dev.driver, "null ptr in vpfe_cfg\n");
+ ret = -ENOENT;
+ goto probe_free_dev_mem;
+ }
+
+ /* Get VINT0 irq resource */
+ res1 = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
+ if (!res1) {
+ v4l2_err(pdev->dev.driver,
+ "Unable to get interrupt for VINT0\n");
+ ret = -ENOENT;
+ goto probe_free_dev_mem;
+ }
+ vpfe_dev->ccdc_irq0 = res1->start;
+
+ /* Get VINT1 irq resource */
+ res1 = platform_get_resource(pdev, IORESOURCE_IRQ, 1);
+ if (!res1) {
+ v4l2_err(pdev->dev.driver,
+ "Unable to get interrupt for VINT1\n");
+ ret = -ENOENT;
+ goto probe_free_dev_mem;
+ }
+ vpfe_dev->ccdc_irq1 = res1->start;
+
+ /* Get DMA irq resource */
+ res1 = platform_get_resource(pdev, IORESOURCE_IRQ, 2);
+ if (!res1) {
+ v4l2_err(pdev->dev.driver,
+ "Unable to get interrupt for DMA\n");
+ ret = -ENOENT;
+ goto probe_free_dev_mem;
+ }
+ vpfe_dev->imp_dma_irq = res1->start;
+
+ vpfe_dev->pdev = &pdev->dev;
+
+ /* enable vpss clocks */
+ ret = vpfe_enable_clock(vpfe_dev);
+ if (ret)
+ goto probe_free_dev_mem;
+
+ if (vpfe_initialize_modules(vpfe_dev, pdev))
+ goto probe_disable_clock;
+
+ vpfe_dev->media_dev.dev = vpfe_dev->pdev;
+ strcpy((char *)&vpfe_dev->media_dev.model, "davinci-media");
+
+ ret = media_device_register(&vpfe_dev->media_dev);
+ if (ret) {
+ v4l2_err(pdev->dev.driver,
+ "Unable to register media device.\n");
+ goto probe_out_entities_cleanup;
+ }
+
+ vpfe_dev->v4l2_dev.mdev = &vpfe_dev->media_dev;
+ ret = v4l2_device_register(&pdev->dev, &vpfe_dev->v4l2_dev);
+ if (ret) {
+ v4l2_err(pdev->dev.driver, "Unable to register v4l2 device.\n");
+ goto probe_out_media_unregister;
+ }
+
+ v4l2_info(&vpfe_dev->v4l2_dev, "v4l2 device registered\n");
+ /* set the driver data in platform device */
+ platform_set_drvdata(pdev, vpfe_dev);
+ /* register subdevs/entities */
+ if (vpfe_register_entities(vpfe_dev))
+ goto probe_out_v4l2_unregister;
+
+ ret = vpfe_attach_irq(vpfe_dev);
+ if (ret)
+ goto probe_out_entities_unregister;
+
+ return 0;
+
+probe_out_entities_unregister:
+ vpfe_unregister_entities(vpfe_dev);
+ kzfree(vpfe_dev->sd);
+probe_out_v4l2_unregister:
+ v4l2_device_unregister(&vpfe_dev->v4l2_dev);
+probe_out_media_unregister:
+ media_device_unregister(&vpfe_dev->media_dev);
+probe_out_entities_cleanup:
+ vpfe_cleanup_modules(vpfe_dev, pdev);
+probe_disable_clock:
+ vpfe_disable_clock(vpfe_dev);
+probe_free_dev_mem:
+ kzfree(vpfe_dev);
+
+ return ret;
+}
+
+/*
+ * vpfe_remove : This function un-registers device from V4L2 driver
+ */
+static int vpfe_remove(struct platform_device *pdev)
+{
+ struct vpfe_device *vpfe_dev = platform_get_drvdata(pdev);
+
+ v4l2_info(pdev->dev.driver, "vpfe_remove\n");
+
+ kzfree(vpfe_dev->sd);
+ vpfe_detach_irq(vpfe_dev);
+ vpfe_unregister_entities(vpfe_dev);
+ vpfe_cleanup_modules(vpfe_dev, pdev);
+ v4l2_device_unregister(&vpfe_dev->v4l2_dev);
+ media_device_unregister(&vpfe_dev->media_dev);
+ vpfe_disable_clock(vpfe_dev);
+ kzfree(vpfe_dev);
+
+ return 0;
+}
+
+static struct platform_driver vpfe_driver = {
+ .driver = {
+ .name = CAPTURE_DRV_NAME,
+ .owner = THIS_MODULE,
+ },
+ .probe = vpfe_probe,
+ .remove = vpfe_remove,
+};
+
+/**
+ * vpfe_init : This function registers device driver
+ */
+static __init int vpfe_init(void)
+{
+ /* Register driver to the kernel */
+ return platform_driver_register(&vpfe_driver);
+}
+
+/**
+ * vpfe_cleanup : This function un-registers device driver
+ */
+static void vpfe_cleanup(void)
+{
+ platform_driver_unregister(&vpfe_driver);
+}
+
+module_init(vpfe_init);
+module_exit(vpfe_cleanup);
diff --git a/drivers/staging/media/davinci_vpfe/vpfe_mc_capture.h b/drivers/staging/media/davinci_vpfe/vpfe_mc_capture.h
new file mode 100644
index 000000000000..68f6fe43a5b5
--- /dev/null
+++ b/drivers/staging/media/davinci_vpfe/vpfe_mc_capture.h
@@ -0,0 +1,97 @@
+/*
+ * Copyright (C) 2012 Texas Instruments Inc
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation version 2.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ *
+ * Contributors:
+ * Manjunath Hadli <manjunath.hadli@ti.com>
+ * Prabhakar Lad <prabhakar.lad@ti.com>
+ */
+
+#ifndef _DAVINCI_VPFE_MC_CAPTURE_H
+#define _DAVINCI_VPFE_MC_CAPTURE_H
+
+#include "dm365_ipipe.h"
+#include "dm365_ipipeif.h"
+#include "dm365_isif.h"
+#include "dm365_resizer.h"
+#include "vpfe_video.h"
+
+#define VPFE_MAJOR_RELEASE 0
+#define VPFE_MINOR_RELEASE 0
+#define VPFE_BUILD 1
+#define VPFE_CAPTURE_VERSION_CODE ((VPFE_MAJOR_RELEASE << 16) | \
+ (VPFE_MINOR_RELEASE << 8) | \
+ VPFE_BUILD)
+
+/* IPIPE hardware limits */
+#define IPIPE_MAX_OUTPUT_WIDTH_A 2176
+#define IPIPE_MAX_OUTPUT_WIDTH_B 640
+
+/* Based on max resolution supported. QXGA */
+#define IPIPE_MAX_OUTPUT_HEIGHT_A 1536
+/* Based on max resolution supported. VGA */
+#define IPIPE_MAX_OUTPUT_HEIGHT_B 480
+
+#define to_vpfe_device(ptr_module) \
+ container_of(ptr_module, struct vpfe_device, vpfe_##ptr_module)
+#define to_device(ptr_module) \
+ (to_vpfe_device(ptr_module)->dev)
+
+struct vpfe_device {
+ /* external registered sub devices */
+ struct v4l2_subdev **sd;
+ /* number of registered external subdevs */
+ unsigned int num_ext_subdevs;
+ /* vpfe cfg */
+ struct vpfe_config *cfg;
+ /* clock ptrs for vpfe capture */
+ struct clk **clks;
+ /* V4l2 device */
+ struct v4l2_device v4l2_dev;
+ /* parent device */
+ struct device *pdev;
+ /* IRQ number for DMA transfer completion at the image processor */
+ unsigned int imp_dma_irq;
+ /* CCDC IRQs used when CCDC/ISIF output to SDRAM */
+ unsigned int ccdc_irq0;
+ unsigned int ccdc_irq1;
+ /* maximum video memory that is available*/
+ unsigned int video_limit;
+ /* media device */
+ struct media_device media_dev;
+ /* ccdc subdevice */
+ struct vpfe_isif_device vpfe_isif;
+ /* ipipeif subdevice */
+ struct vpfe_ipipeif_device vpfe_ipipeif;
+ /* ipipe subdevice */
+ struct vpfe_ipipe_device vpfe_ipipe;
+ /* resizer subdevice */
+ struct vpfe_resizer_device vpfe_resizer;
+};
+
+/* File handle structure */
+struct vpfe_fh {
+ struct v4l2_fh vfh;
+ struct vpfe_video_device *video;
+ /* Indicates whether this file handle is doing IO */
+ u8 io_allowed;
+ /* Used to keep track priority of this instance */
+ enum v4l2_priority prio;
+};
+
+void mbus_to_pix(const struct v4l2_mbus_framefmt *mbus,
+ struct v4l2_pix_format *pix);
+
+#endif /* _DAVINCI_VPFE_MC_CAPTURE_H */
diff --git a/drivers/staging/media/davinci_vpfe/vpfe_video.c b/drivers/staging/media/davinci_vpfe/vpfe_video.c
new file mode 100644
index 000000000000..99ccbebea598
--- /dev/null
+++ b/drivers/staging/media/davinci_vpfe/vpfe_video.c
@@ -0,0 +1,1620 @@
+/*
+ * Copyright (C) 2012 Texas Instruments Inc
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation version 2.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ *
+ * Contributors:
+ * Manjunath Hadli <manjunath.hadli@ti.com>
+ * Prabhakar Lad <prabhakar.lad@ti.com>
+ */
+
+#include <linux/module.h>
+#include <linux/slab.h>
+
+#include <media/v4l2-ioctl.h>
+
+#include "vpfe.h"
+#include "vpfe_mc_capture.h"
+
+/* minimum number of buffers needed in cont-mode */
+#define MIN_NUM_BUFFERS 3
+
+static int debug;
+
+/* get v4l2 subdev pointer to external subdev which is active */
+static struct media_entity *vpfe_get_input_entity
+ (struct vpfe_video_device *video)
+{
+ struct vpfe_device *vpfe_dev = video->vpfe_dev;
+ struct media_pad *remote;
+
+ remote = media_entity_remote_source(&vpfe_dev->vpfe_isif.pads[0]);
+ if (remote == NULL) {
+ pr_err("Invalid media connection to isif/ccdc\n");
+ return NULL;
+ }
+ return remote->entity;
+}
+
+/* updates external subdev(sensor/decoder) which is active */
+static int vpfe_update_current_ext_subdev(struct vpfe_video_device *video)
+{
+ struct vpfe_device *vpfe_dev = video->vpfe_dev;
+ struct vpfe_config *vpfe_cfg;
+ struct v4l2_subdev *subdev;
+ struct media_pad *remote;
+ int i;
+
+ remote = media_entity_remote_source(&vpfe_dev->vpfe_isif.pads[0]);
+ if (remote == NULL) {
+ pr_err("Invalid media connection to isif/ccdc\n");
+ return -EINVAL;
+ }
+
+ subdev = media_entity_to_v4l2_subdev(remote->entity);
+ vpfe_cfg = vpfe_dev->pdev->platform_data;
+ for (i = 0; i < vpfe_cfg->num_subdevs; i++) {
+ if (!strcmp(vpfe_cfg->sub_devs[i].module_name, subdev->name)) {
+ video->current_ext_subdev = &vpfe_cfg->sub_devs[i];
+ break;
+ }
+ }
+
+ /* if user not linked decoder/sensor to isif/ccdc */
+ if (i == vpfe_cfg->num_subdevs) {
+ pr_err("Invalid media chain connection to isif/ccdc\n");
+ return -EINVAL;
+ }
+ /* find the v4l2 subdev pointer */
+ for (i = 0; i < vpfe_dev->num_ext_subdevs; i++) {
+ if (!strcmp(video->current_ext_subdev->module_name,
+ vpfe_dev->sd[i]->name))
+ video->current_ext_subdev->subdev = vpfe_dev->sd[i];
+ }
+ return 0;
+}
+
+/* get the subdev which is connected to the output video node */
+static struct v4l2_subdev *
+vpfe_video_remote_subdev(struct vpfe_video_device *video, u32 *pad)
+{
+ struct media_pad *remote = media_entity_remote_source(&video->pad);
+
+ if (remote == NULL || remote->entity->type != MEDIA_ENT_T_V4L2_SUBDEV)
+ return NULL;
+ if (pad)
+ *pad = remote->index;
+ return media_entity_to_v4l2_subdev(remote->entity);
+}
+
+/* get the format set at output pad of the adjacent subdev */
+static int
+__vpfe_video_get_format(struct vpfe_video_device *video,
+ struct v4l2_format *format)
+{
+ struct v4l2_subdev_format fmt;
+ struct v4l2_subdev *subdev;
+ struct media_pad *remote;
+ u32 pad;
+ int ret;
+
+ subdev = vpfe_video_remote_subdev(video, &pad);
+ if (subdev == NULL)
+ return -EINVAL;
+
+ fmt.which = V4L2_SUBDEV_FORMAT_ACTIVE;
+ remote = media_entity_remote_source(&video->pad);
+ fmt.pad = remote->index;
+
+ ret = v4l2_subdev_call(subdev, pad, get_fmt, NULL, &fmt);
+ if (ret == -ENOIOCTLCMD)
+ return -EINVAL;
+
+ format->type = video->type;
+ /* convert mbus_format to v4l2_format */
+ v4l2_fill_pix_format(&format->fmt.pix, &fmt.format);
+ mbus_to_pix(&fmt.format, &format->fmt.pix);
+
+ return 0;
+}
+
+/* make a note of pipeline details */
+static void vpfe_prepare_pipeline(struct vpfe_video_device *video)
+{
+ struct media_entity *entity = &video->video_dev.entity;
+ struct media_device *mdev = entity->parent;
+ struct vpfe_pipeline *pipe = &video->pipe;
+ struct vpfe_video_device *far_end = NULL;
+ struct media_entity_graph graph;
+
+ pipe->input_num = 0;
+ pipe->output_num = 0;
+
+ if (video->type == V4L2_BUF_TYPE_VIDEO_OUTPUT)
+ pipe->inputs[pipe->input_num++] = video;
+ else
+ pipe->outputs[pipe->output_num++] = video;
+
+ mutex_lock(&mdev->graph_mutex);
+ media_entity_graph_walk_start(&graph, entity);
+ while ((entity = media_entity_graph_walk_next(&graph))) {
+ if (entity == &video->video_dev.entity)
+ continue;
+ if (media_entity_type(entity) != MEDIA_ENT_T_DEVNODE)
+ continue;
+ far_end = to_vpfe_video(media_entity_to_video_device(entity));
+ if (far_end->type == V4L2_BUF_TYPE_VIDEO_OUTPUT)
+ pipe->inputs[pipe->input_num++] = far_end;
+ else
+ pipe->outputs[pipe->output_num++] = far_end;
+ }
+ mutex_unlock(&mdev->graph_mutex);
+}
+
+/* update pipe state selected by user */
+static int vpfe_update_pipe_state(struct vpfe_video_device *video)
+{
+ struct vpfe_pipeline *pipe = &video->pipe;
+ int ret;
+
+ vpfe_prepare_pipeline(video);
+
+ /* Find out if there is any input video
+ if yes, it is single shot.
+ */
+ if (pipe->input_num == 0) {
+ pipe->state = VPFE_PIPELINE_STREAM_CONTINUOUS;
+ ret = vpfe_update_current_ext_subdev(video);
+ if (ret) {
+ pr_err("Invalid external subdev\n");
+ return ret;
+ }
+ } else {
+ pipe->state = VPFE_PIPELINE_STREAM_SINGLESHOT;
+ }
+ video->initialized = 1;
+ video->skip_frame_count = 1;
+ video->skip_frame_count_init = 1;
+ return 0;
+}
+
+/* checks wether pipeline is ready for enabling */
+int vpfe_video_is_pipe_ready(struct vpfe_pipeline *pipe)
+{
+ int i;
+
+ for (i = 0; i < pipe->input_num; i++)
+ if (!pipe->inputs[i]->started ||
+ pipe->inputs[i]->state != VPFE_VIDEO_BUFFER_QUEUED)
+ return 0;
+ for (i = 0; i < pipe->output_num; i++)
+ if (!pipe->outputs[i]->started ||
+ pipe->outputs[i]->state != VPFE_VIDEO_BUFFER_QUEUED)
+ return 0;
+ return 1;
+}
+
+/**
+ * Validate a pipeline by checking both ends of all links for format
+ * discrepancies.
+ *
+ * Return 0 if all formats match, or -EPIPE if at least one link is found with
+ * different formats on its two ends.
+ */
+static int vpfe_video_validate_pipeline(struct vpfe_pipeline *pipe)
+{
+ struct v4l2_subdev_format fmt_source;
+ struct v4l2_subdev_format fmt_sink;
+ struct v4l2_subdev *subdev;
+ struct media_pad *pad;
+ int ret;
+
+ /*
+ * Should not matter if it is output[0] or 1 as
+ * the general ideas is to traverse backwards and
+ * the fact that the out video node always has the
+ * format of the connected pad.
+ */
+ subdev = vpfe_video_remote_subdev(pipe->outputs[0], NULL);
+ if (subdev == NULL)
+ return -EPIPE;
+
+ while (1) {
+ /* Retrieve the sink format */
+ pad = &subdev->entity.pads[0];
+ if (!(pad->flags & MEDIA_PAD_FL_SINK))
+ break;
+
+ fmt_sink.which = V4L2_SUBDEV_FORMAT_ACTIVE;
+ fmt_sink.pad = pad->index;
+ ret = v4l2_subdev_call(subdev, pad, get_fmt, NULL,
+ &fmt_sink);
+
+ if (ret < 0 && ret != -ENOIOCTLCMD)
+ return -EPIPE;
+
+ /* Retrieve the source format */
+ pad = media_entity_remote_source(pad);
+ if (pad == NULL ||
+ pad->entity->type != MEDIA_ENT_T_V4L2_SUBDEV)
+ break;
+
+ subdev = media_entity_to_v4l2_subdev(pad->entity);
+
+ fmt_source.which = V4L2_SUBDEV_FORMAT_ACTIVE;
+ fmt_source.pad = pad->index;
+ ret = v4l2_subdev_call(subdev, pad, get_fmt, NULL, &fmt_source);
+ if (ret < 0 && ret != -ENOIOCTLCMD)
+ return -EPIPE;
+
+ /* Check if the two ends match */
+ if (fmt_source.format.code != fmt_sink.format.code ||
+ fmt_source.format.width != fmt_sink.format.width ||
+ fmt_source.format.height != fmt_sink.format.height)
+ return -EPIPE;
+ }
+ return 0;
+}
+
+/*
+ * vpfe_pipeline_enable() - Enable streaming on a pipeline
+ * @vpfe_dev: vpfe device
+ * @pipe: vpfe pipeline
+ *
+ * Walk the entities chain starting at the pipeline output video node and start
+ * all modules in the chain in the given mode.
+ *
+ * Return 0 if successful, or the return value of the failed video::s_stream
+ * operation otherwise.
+ */
+static int vpfe_pipeline_enable(struct vpfe_pipeline *pipe)
+{
+ struct media_entity_graph graph;
+ struct media_entity *entity;
+ struct v4l2_subdev *subdev;
+ struct media_device *mdev;
+ int ret = 0;
+
+ if (pipe->state == VPFE_PIPELINE_STREAM_CONTINUOUS)
+ entity = vpfe_get_input_entity(pipe->outputs[0]);
+ else
+ entity = &pipe->inputs[0]->video_dev.entity;
+
+ mdev = entity->parent;
+ mutex_lock(&mdev->graph_mutex);
+ media_entity_graph_walk_start(&graph, entity);
+ while ((entity = media_entity_graph_walk_next(&graph))) {
+
+ if (media_entity_type(entity) == MEDIA_ENT_T_DEVNODE)
+ continue;
+ subdev = media_entity_to_v4l2_subdev(entity);
+ ret = v4l2_subdev_call(subdev, video, s_stream, 1);
+ if (ret < 0 && ret != -ENOIOCTLCMD)
+ break;
+ }
+ mutex_unlock(&mdev->graph_mutex);
+ return ret;
+}
+
+/*
+ * vpfe_pipeline_disable() - Disable streaming on a pipeline
+ * @vpfe_dev: vpfe device
+ * @pipe: VPFE pipeline
+ *
+ * Walk the entities chain starting at the pipeline output video node and stop
+ * all modules in the chain.
+ *
+ * Return 0 if all modules have been properly stopped, or -ETIMEDOUT if a module
+ * can't be stopped.
+ */
+static int vpfe_pipeline_disable(struct vpfe_pipeline *pipe)
+{
+ struct media_entity_graph graph;
+ struct media_entity *entity;
+ struct v4l2_subdev *subdev;
+ struct media_device *mdev;
+ int ret = 0;
+
+ if (pipe->state == VPFE_PIPELINE_STREAM_CONTINUOUS)
+ entity = vpfe_get_input_entity(pipe->outputs[0]);
+ else
+ entity = &pipe->inputs[0]->video_dev.entity;
+
+ mdev = entity->parent;
+ mutex_lock(&mdev->graph_mutex);
+ media_entity_graph_walk_start(&graph, entity);
+
+ while ((entity = media_entity_graph_walk_next(&graph))) {
+
+ if (media_entity_type(entity) == MEDIA_ENT_T_DEVNODE)
+ continue;
+ subdev = media_entity_to_v4l2_subdev(entity);
+ ret = v4l2_subdev_call(subdev, video, s_stream, 0);
+ if (ret < 0 && ret != -ENOIOCTLCMD)
+ break;
+ }
+ mutex_unlock(&mdev->graph_mutex);
+
+ return (ret == 0) ? ret : -ETIMEDOUT ;
+}
+
+/*
+ * vpfe_pipeline_set_stream() - Enable/disable streaming on a pipeline
+ * @vpfe_dev: VPFE device
+ * @pipe: VPFE pipeline
+ * @state: Stream state (stopped or active)
+ *
+ * Set the pipeline to the given stream state.
+ *
+ * Return 0 if successfull, or the return value of the failed video::s_stream
+ * operation otherwise.
+ */
+static int vpfe_pipeline_set_stream(struct vpfe_pipeline *pipe,
+ enum vpfe_pipeline_stream_state state)
+{
+ if (state == VPFE_PIPELINE_STREAM_STOPPED)
+ return vpfe_pipeline_disable(pipe);
+
+ return vpfe_pipeline_enable(pipe);
+}
+
+static int all_videos_stopped(struct vpfe_video_device *video)
+{
+ struct vpfe_pipeline *pipe = &video->pipe;
+ int i;
+
+ for (i = 0; i < pipe->input_num; i++)
+ if (pipe->inputs[i]->started)
+ return 0;
+ for (i = 0; i < pipe->output_num; i++)
+ if (pipe->outputs[i]->started)
+ return 0;
+ return 1;
+}
+
+/*
+ * vpfe_open() - open video device
+ * @file: file pointer
+ *
+ * initialize media pipeline state, allocate memory for file handle
+ *
+ * Return 0 if successful, or the return -ENODEV otherwise.
+ */
+static int vpfe_open(struct file *file)
+{
+ struct vpfe_video_device *video = video_drvdata(file);
+ struct vpfe_fh *handle;
+
+ /* Allocate memory for the file handle object */
+ handle = kzalloc(sizeof(struct vpfe_fh), GFP_KERNEL);
+
+ if (handle == NULL)
+ return -ENOMEM;
+
+ v4l2_fh_init(&handle->vfh, &video->video_dev);
+ v4l2_fh_add(&handle->vfh);
+
+ mutex_lock(&video->lock);
+ /* If decoder is not initialized. initialize it */
+ if (!video->initialized && vpfe_update_pipe_state(video)) {
+ mutex_unlock(&video->lock);
+ return -ENODEV;
+ }
+ /* Increment device users counter */
+ video->usrs++;
+ /* Set io_allowed member to false */
+ handle->io_allowed = 0;
+ v4l2_prio_open(&video->prio, &handle->prio);
+ handle->video = video;
+ file->private_data = &handle->vfh;
+ mutex_unlock(&video->lock);
+
+ return 0;
+}
+
+/* get the next buffer available from dma queue */
+static unsigned long
+vpfe_video_get_next_buffer(struct vpfe_video_device *video)
+{
+ video->cur_frm = video->next_frm =
+ list_entry(video->dma_queue.next,
+ struct vpfe_cap_buffer, list);
+
+ list_del(&video->next_frm->list);
+ video->next_frm->vb.state = VB2_BUF_STATE_ACTIVE;
+ return vb2_dma_contig_plane_dma_addr(&video->next_frm->vb, 0);
+}
+
+/* schedule the next buffer which is available on dma queue */
+void vpfe_video_schedule_next_buffer(struct vpfe_video_device *video)
+{
+ struct vpfe_device *vpfe_dev = video->vpfe_dev;
+ unsigned long addr;
+
+ if (list_empty(&video->dma_queue))
+ return;
+
+ video->next_frm = list_entry(video->dma_queue.next,
+ struct vpfe_cap_buffer, list);
+
+ if (VPFE_PIPELINE_STREAM_SINGLESHOT == video->pipe.state)
+ video->cur_frm = video->next_frm;
+
+ list_del(&video->next_frm->list);
+ video->next_frm->vb.state = VB2_BUF_STATE_ACTIVE;
+ addr = vb2_dma_contig_plane_dma_addr(&video->next_frm->vb, 0);
+ video->ops->queue(vpfe_dev, addr);
+ video->state = VPFE_VIDEO_BUFFER_QUEUED;
+}
+
+/* schedule the buffer for capturing bottom field */
+void vpfe_video_schedule_bottom_field(struct vpfe_video_device *video)
+{
+ struct vpfe_device *vpfe_dev = video->vpfe_dev;
+ unsigned long addr;
+
+ addr = vb2_dma_contig_plane_dma_addr(&video->cur_frm->vb, 0);
+ addr += video->field_off;
+ video->ops->queue(vpfe_dev, addr);
+}
+
+/* make buffer available for dequeue */
+void vpfe_video_process_buffer_complete(struct vpfe_video_device *video)
+{
+ struct vpfe_pipeline *pipe = &video->pipe;
+
+ do_gettimeofday(&video->cur_frm->vb.v4l2_buf.timestamp);
+ vb2_buffer_done(&video->cur_frm->vb, VB2_BUF_STATE_DONE);
+ if (pipe->state == VPFE_PIPELINE_STREAM_CONTINUOUS)
+ video->cur_frm = video->next_frm;
+}
+
+/* vpfe_stop_capture() - stop streaming */
+static void vpfe_stop_capture(struct vpfe_video_device *video)
+{
+ struct vpfe_pipeline *pipe = &video->pipe;
+
+ video->started = 0;
+
+ if (video->type == V4L2_BUF_TYPE_VIDEO_OUTPUT)
+ return;
+ if (all_videos_stopped(video))
+ vpfe_pipeline_set_stream(pipe,
+ VPFE_PIPELINE_STREAM_STOPPED);
+}
+
+/*
+ * vpfe_release() - release video device
+ * @file: file pointer
+ *
+ * deletes buffer queue, frees the buffers and the vpfe file handle
+ *
+ * Return 0
+ */
+static int vpfe_release(struct file *file)
+{
+ struct vpfe_video_device *video = video_drvdata(file);
+ struct v4l2_fh *vfh = file->private_data;
+ struct vpfe_device *vpfe_dev = video->vpfe_dev;
+ struct vpfe_fh *fh = container_of(vfh, struct vpfe_fh, vfh);
+
+ v4l2_dbg(1, debug, &vpfe_dev->v4l2_dev, "vpfe_release\n");
+
+ /* Get the device lock */
+ mutex_lock(&video->lock);
+ /* if this instance is doing IO */
+ if (fh->io_allowed) {
+ if (video->started) {
+ vpfe_stop_capture(video);
+ /* mark pipe state as stopped in vpfe_release(),
+ as app might call streamon() after streamoff()
+ in which case driver has to start streaming.
+ */
+ video->pipe.state = VPFE_PIPELINE_STREAM_STOPPED;
+ vb2_streamoff(&video->buffer_queue,
+ video->buffer_queue.type);
+ }
+ video->io_usrs = 0;
+ /* Free buffers allocated */
+ vb2_queue_release(&video->buffer_queue);
+ vb2_dma_contig_cleanup_ctx(video->alloc_ctx);
+ }
+ /* Decrement device users counter */
+ video->usrs--;
+ /* Close the priority */
+ v4l2_prio_close(&video->prio, fh->prio);
+ /* If this is the last file handle */
+ if (!video->usrs)
+ video->initialized = 0;
+ mutex_unlock(&video->lock);
+ file->private_data = NULL;
+ /* Free memory allocated to file handle object */
+ v4l2_fh_del(vfh);
+ kzfree(fh);
+ return 0;
+}
+
+/*
+ * vpfe_mmap() - It is used to map kernel space buffers
+ * into user spaces
+ */
+static int vpfe_mmap(struct file *file, struct vm_area_struct *vma)
+{
+ struct vpfe_video_device *video = video_drvdata(file);
+ struct vpfe_device *vpfe_dev = video->vpfe_dev;
+
+ v4l2_dbg(1, debug, &vpfe_dev->v4l2_dev, "vpfe_mmap\n");
+ return vb2_mmap(&video->buffer_queue, vma);
+}
+
+/*
+ * vpfe_poll() - It is used for select/poll system call
+ */
+static unsigned int vpfe_poll(struct file *file, poll_table *wait)
+{
+ struct vpfe_video_device *video = video_drvdata(file);
+ struct vpfe_device *vpfe_dev = video->vpfe_dev;
+
+ v4l2_dbg(1, debug, &vpfe_dev->v4l2_dev, "vpfe_poll\n");
+ if (video->started)
+ return vb2_poll(&video->buffer_queue, file, wait);
+ return 0;
+}
+
+/* vpfe capture driver file operations */
+static const struct v4l2_file_operations vpfe_fops = {
+ .owner = THIS_MODULE,
+ .open = vpfe_open,
+ .release = vpfe_release,
+ .unlocked_ioctl = video_ioctl2,
+ .mmap = vpfe_mmap,
+ .poll = vpfe_poll
+};
+
+/*
+ * vpfe_querycap() - query capabilities of video device
+ * @file: file pointer
+ * @priv: void pointer
+ * @cap: pointer to v4l2_capability structure
+ *
+ * fills v4l2 capabilities structure
+ *
+ * Return 0
+ */
+static int vpfe_querycap(struct file *file, void *priv,
+ struct v4l2_capability *cap)
+{
+ struct vpfe_video_device *video = video_drvdata(file);
+ struct vpfe_device *vpfe_dev = video->vpfe_dev;
+
+ v4l2_dbg(1, debug, &vpfe_dev->v4l2_dev, "vpfe_querycap\n");
+
+ if (video->type == V4L2_BUF_TYPE_VIDEO_CAPTURE)
+ cap->capabilities = V4L2_CAP_VIDEO_CAPTURE | V4L2_CAP_STREAMING;
+ else
+ cap->capabilities = V4L2_CAP_VIDEO_OUTPUT | V4L2_CAP_STREAMING;
+ cap->device_caps = cap->capabilities;
+ cap->version = VPFE_CAPTURE_VERSION_CODE;
+ strlcpy(cap->driver, CAPTURE_DRV_NAME, sizeof(cap->driver));
+ strlcpy(cap->bus_info, "VPFE", sizeof(cap->bus_info));
+ strlcpy(cap->card, vpfe_dev->cfg->card_name, sizeof(cap->card));
+
+ return 0;
+}
+
+/*
+ * vpfe_g_fmt() - get the format which is active on video device
+ * @file: file pointer
+ * @priv: void pointer
+ * @fmt: pointer to v4l2_format structure
+ *
+ * fills v4l2 format structure with active format
+ *
+ * Return 0
+ */
+static int vpfe_g_fmt(struct file *file, void *priv,
+ struct v4l2_format *fmt)
+{
+ struct vpfe_video_device *video = video_drvdata(file);
+ struct vpfe_device *vpfe_dev = video->vpfe_dev;
+
+ v4l2_dbg(1, debug, &vpfe_dev->v4l2_dev, "vpfe_g_fmt\n");
+ /* Fill in the information about format */
+ *fmt = video->fmt;
+ return 0;
+}
+
+/*
+ * vpfe_enum_fmt() - enum formats supported on media chain
+ * @file: file pointer
+ * @priv: void pointer
+ * @fmt: pointer to v4l2_fmtdesc structure
+ *
+ * fills v4l2_fmtdesc structure with output format set on adjacent subdev,
+ * only one format is enumearted as subdevs are already configured
+ *
+ * Return 0 if successfull, error code otherwise
+ */
+static int vpfe_enum_fmt(struct file *file, void *priv,
+ struct v4l2_fmtdesc *fmt)
+{
+ struct vpfe_video_device *video = video_drvdata(file);
+ struct vpfe_device *vpfe_dev = video->vpfe_dev;
+ struct v4l2_subdev_format sd_fmt;
+ struct v4l2_mbus_framefmt mbus;
+ struct v4l2_subdev *subdev;
+ struct v4l2_format format;
+ struct media_pad *remote;
+ int ret;
+
+ v4l2_dbg(1, debug, &vpfe_dev->v4l2_dev, "vpfe_enum_fmt\n");
+
+ /* since already subdev pad format is set,
+ only one pixel format is available */
+ if (fmt->index > 0) {
+ v4l2_err(&vpfe_dev->v4l2_dev, "Invalid index\n");
+ return -EINVAL;
+ }
+ /* get the remote pad */
+ remote = media_entity_remote_source(&video->pad);
+ if (remote == NULL) {
+ v4l2_err(&vpfe_dev->v4l2_dev,
+ "invalid remote pad for video node\n");
+ return -EINVAL;
+ }
+ /* get the remote subdev */
+ subdev = vpfe_video_remote_subdev(video, NULL);
+ if (subdev == NULL) {
+ v4l2_err(&vpfe_dev->v4l2_dev,
+ "invalid remote subdev for video node\n");
+ return -EINVAL;
+ }
+ sd_fmt.pad = remote->index;
+ sd_fmt.which = V4L2_SUBDEV_FORMAT_ACTIVE;
+ /* get output format of remote subdev */
+ ret = v4l2_subdev_call(subdev, pad, get_fmt, NULL, &sd_fmt);
+ if (ret) {
+ v4l2_err(&vpfe_dev->v4l2_dev,
+ "invalid remote subdev for video node\n");
+ return ret;
+ }
+ /* convert to pix format */
+ mbus.code = sd_fmt.format.code;
+ mbus_to_pix(&mbus, &format.fmt.pix);
+ /* copy the result */
+ fmt->pixelformat = format.fmt.pix.pixelformat;
+
+ return 0;
+}
+
+/*
+ * vpfe_s_fmt() - set the format on video device
+ * @file: file pointer
+ * @priv: void pointer
+ * @fmt: pointer to v4l2_format structure
+ *
+ * validate and set the format on video device
+ *
+ * Return 0 on success, error code otherwise
+ */
+static int vpfe_s_fmt(struct file *file, void *priv,
+ struct v4l2_format *fmt)
+{
+ struct vpfe_video_device *video = video_drvdata(file);
+ struct vpfe_device *vpfe_dev = video->vpfe_dev;
+ struct v4l2_format format;
+ int ret;
+
+ v4l2_dbg(1, debug, &vpfe_dev->v4l2_dev, "vpfe_s_fmt\n");
+ /* If streaming is started, return error */
+ if (video->started) {
+ v4l2_err(&vpfe_dev->v4l2_dev, "Streaming is started\n");
+ return -EBUSY;
+ }
+ /* get adjacent subdev's output pad format */
+ ret = __vpfe_video_get_format(video, &format);
+ if (ret)
+ return ret;
+ *fmt = format;
+ video->fmt = *fmt;
+ return 0;
+}
+
+/*
+ * vpfe_try_fmt() - try the format on video device
+ * @file: file pointer
+ * @priv: void pointer
+ * @fmt: pointer to v4l2_format structure
+ *
+ * validate the format, update with correct format
+ * based on output format set on adjacent subdev
+ *
+ * Return 0 on success, error code otherwise
+ */
+static int vpfe_try_fmt(struct file *file, void *priv,
+ struct v4l2_format *fmt)
+{
+ struct vpfe_video_device *video = video_drvdata(file);
+ struct vpfe_device *vpfe_dev = video->vpfe_dev;
+ struct v4l2_format format;
+ int ret;
+
+ v4l2_dbg(1, debug, &vpfe_dev->v4l2_dev, "vpfe_try_fmt\n");
+ /* get adjacent subdev's output pad format */
+ ret = __vpfe_video_get_format(video, &format);
+ if (ret)
+ return ret;
+
+ *fmt = format;
+ return 0;
+}
+
+/*
+ * vpfe_enum_input() - enum inputs supported on media chain
+ * @file: file pointer
+ * @priv: void pointer
+ * @fmt: pointer to v4l2_fmtdesc structure
+ *
+ * fills v4l2_input structure with input available on media chain,
+ * only one input is enumearted as media chain is setup by this time
+ *
+ * Return 0 if successfull, -EINVAL is media chain is invalid
+ */
+static int vpfe_enum_input(struct file *file, void *priv,
+ struct v4l2_input *inp)
+{
+ struct vpfe_video_device *video = video_drvdata(file);
+ struct vpfe_ext_subdev_info *sdinfo = video->current_ext_subdev;
+ struct vpfe_device *vpfe_dev = video->vpfe_dev;
+
+ v4l2_dbg(1, debug, &vpfe_dev->v4l2_dev, "vpfe_enum_input\n");
+ /* enumerate from the subdev user has choosen through mc */
+ if (inp->index < sdinfo->num_inputs) {
+ memcpy(inp, &sdinfo->inputs[inp->index],
+ sizeof(struct v4l2_input));
+ return 0;
+ }
+ return -EINVAL;
+}
+
+/*
+ * vpfe_g_input() - get index of the input which is active
+ * @file: file pointer
+ * @priv: void pointer
+ * @index: pointer to unsigned int
+ *
+ * set index with input index which is active
+ */
+static int vpfe_g_input(struct file *file, void *priv, unsigned int *index)
+{
+ struct vpfe_video_device *video = video_drvdata(file);
+ struct vpfe_device *vpfe_dev = video->vpfe_dev;
+
+ v4l2_dbg(1, debug, &vpfe_dev->v4l2_dev, "vpfe_g_input\n");
+
+ *index = video->current_input;
+ return 0;
+}
+
+/*
+ * vpfe_s_input() - set input which is pointed by input index
+ * @file: file pointer
+ * @priv: void pointer
+ * @index: pointer to unsigned int
+ *
+ * set input on external subdev
+ *
+ * Return 0 on success, error code otherwise
+ */
+static int vpfe_s_input(struct file *file, void *priv, unsigned int index)
+{
+ struct vpfe_video_device *video = video_drvdata(file);
+ struct vpfe_device *vpfe_dev = video->vpfe_dev;
+ struct vpfe_ext_subdev_info *sdinfo;
+ struct vpfe_route *route;
+ struct v4l2_input *inps;
+ u32 output;
+ u32 input;
+ int ret;
+ int i;
+
+ v4l2_dbg(1, debug, &vpfe_dev->v4l2_dev, "vpfe_s_input\n");
+
+ ret = mutex_lock_interruptible(&video->lock);
+ if (ret)
+ return ret;
+ /*
+ * If streaming is started return device busy
+ * error
+ */
+ if (video->started) {
+ v4l2_err(&vpfe_dev->v4l2_dev, "Streaming is on\n");
+ ret = -EBUSY;
+ goto unlock_out;
+ }
+
+ sdinfo = video->current_ext_subdev;
+ if (!sdinfo->registered) {
+ ret = -EINVAL;
+ goto unlock_out;
+ }
+ if (vpfe_dev->cfg->setup_input &&
+ vpfe_dev->cfg->setup_input(sdinfo->grp_id) < 0) {
+ ret = -EFAULT;
+ v4l2_dbg(1, debug, &vpfe_dev->v4l2_dev,
+ "couldn't setup input for %s\n",
+ sdinfo->module_name);
+ goto unlock_out;
+ }
+ route = &sdinfo->routes[index];
+ if (route && sdinfo->can_route) {
+ input = route->input;
+ output = route->output;
+ ret = v4l2_device_call_until_err(&vpfe_dev->v4l2_dev,
+ sdinfo->grp_id, video,
+ s_routing, input, output, 0);
+ if (ret) {
+ v4l2_dbg(1, debug, &vpfe_dev->v4l2_dev,
+ "s_input:error in setting input in decoder\n");
+ ret = -EINVAL;
+ goto unlock_out;
+ }
+ }
+ /* set standards set by subdev in video device */
+ for (i = 0; i < sdinfo->num_inputs; i++) {
+ inps = &sdinfo->inputs[i];
+ video->video_dev.tvnorms |= inps->std;
+ }
+ video->current_input = index;
+unlock_out:
+ mutex_unlock(&video->lock);
+ return ret;
+}
+
+/*
+ * vpfe_querystd() - query std which is being input on external subdev
+ * @file: file pointer
+ * @priv: void pointer
+ * @std_id: pointer to v4l2_std_id structure
+ *
+ * call external subdev through v4l2_device_call_until_err to
+ * get the std that is being active.
+ *
+ * Return 0 on success, error code otherwise
+ */
+static int vpfe_querystd(struct file *file, void *priv, v4l2_std_id *std_id)
+{
+ struct vpfe_video_device *video = video_drvdata(file);
+ struct vpfe_device *vpfe_dev = video->vpfe_dev;
+ struct vpfe_ext_subdev_info *sdinfo;
+ int ret;
+
+ v4l2_dbg(1, debug, &vpfe_dev->v4l2_dev, "vpfe_querystd\n");
+
+ ret = mutex_lock_interruptible(&video->lock);
+ sdinfo = video->current_ext_subdev;
+ if (ret)
+ return ret;
+ /* Call querystd function of decoder device */
+ ret = v4l2_device_call_until_err(&vpfe_dev->v4l2_dev, sdinfo->grp_id,
+ video, querystd, std_id);
+ mutex_unlock(&video->lock);
+ return ret;
+}
+
+/*
+ * vpfe_s_std() - set std on external subdev
+ * @file: file pointer
+ * @priv: void pointer
+ * @std_id: pointer to v4l2_std_id structure
+ *
+ * set std pointed by std_id on external subdev by calling it using
+ * v4l2_device_call_until_err
+ *
+ * Return 0 on success, error code otherwise
+ */
+static int vpfe_s_std(struct file *file, void *priv, v4l2_std_id *std_id)
+{
+ struct vpfe_video_device *video = video_drvdata(file);
+ struct vpfe_device *vpfe_dev = video->vpfe_dev;
+ struct vpfe_ext_subdev_info *sdinfo;
+ int ret;
+
+ v4l2_dbg(1, debug, &vpfe_dev->v4l2_dev, "vpfe_s_std\n");
+
+ /* Call decoder driver function to set the standard */
+ ret = mutex_lock_interruptible(&video->lock);
+ if (ret)
+ return ret;
+ sdinfo = video->current_ext_subdev;
+ /* If streaming is started, return device busy error */
+ if (video->started) {
+ v4l2_err(&vpfe_dev->v4l2_dev, "streaming is started\n");
+ ret = -EBUSY;
+ goto unlock_out;
+ }
+ ret = v4l2_device_call_until_err(&vpfe_dev->v4l2_dev, sdinfo->grp_id,
+ core, s_std, *std_id);
+ if (ret < 0) {
+ v4l2_err(&vpfe_dev->v4l2_dev, "Failed to set standard\n");
+ video->stdid = V4L2_STD_UNKNOWN;
+ goto unlock_out;
+ }
+ video->stdid = *std_id;
+unlock_out:
+ mutex_unlock(&video->lock);
+ return ret;
+}
+
+static int vpfe_g_std(struct file *file, void *priv, v4l2_std_id *tvnorm)
+{
+ struct vpfe_video_device *video = video_drvdata(file);
+ struct vpfe_device *vpfe_dev = video->vpfe_dev;
+
+ v4l2_dbg(1, debug, &vpfe_dev->v4l2_dev, "vpfe_g_std\n");
+ *tvnorm = video->stdid;
+ return 0;
+}
+
+/*
+ * vpfe_enum_dv_timings() - enumerate dv_timings which are supported by
+ * to external subdev
+ * @file: file pointer
+ * @priv: void pointer
+ * @timings: pointer to v4l2_enum_dv_timings structure
+ *
+ * enum dv_timings's which are supported by external subdev through
+ * v4l2_subdev_call
+ *
+ * Return 0 on success, error code otherwise
+ */
+static int
+vpfe_enum_dv_timings(struct file *file, void *fh,
+ struct v4l2_enum_dv_timings *timings)
+{
+ struct vpfe_video_device *video = video_drvdata(file);
+ struct vpfe_device *vpfe_dev = video->vpfe_dev;
+ struct v4l2_subdev *subdev = video->current_ext_subdev->subdev;
+
+ v4l2_dbg(1, debug, &vpfe_dev->v4l2_dev, "vpfe_enum_dv_timings\n");
+ return v4l2_subdev_call(subdev, video, enum_dv_timings, timings);
+}
+
+/*
+ * vpfe_query_dv_timings() - query the dv_timings which is being input
+ * to external subdev
+ * @file: file pointer
+ * @priv: void pointer
+ * @timings: pointer to v4l2_dv_timings structure
+ *
+ * get dv_timings which is being input on external subdev through
+ * v4l2_subdev_call
+ *
+ * Return 0 on success, error code otherwise
+ */
+static int
+vpfe_query_dv_timings(struct file *file, void *fh,
+ struct v4l2_dv_timings *timings)
+{
+ struct vpfe_video_device *video = video_drvdata(file);
+ struct vpfe_device *vpfe_dev = video->vpfe_dev;
+ struct v4l2_subdev *subdev = video->current_ext_subdev->subdev;
+
+ v4l2_dbg(1, debug, &vpfe_dev->v4l2_dev, "vpfe_query_dv_timings\n");
+ return v4l2_subdev_call(subdev, video, query_dv_timings, timings);
+}
+
+/*
+ * vpfe_s_dv_timings() - set dv_preset on external subdev
+ * @file: file pointer
+ * @priv: void pointer
+ * @timings: pointer to v4l2_dv_timings structure
+ *
+ * set dv_timings pointed by preset on external subdev through
+ * v4l2_device_call_until_err, this configures amplifier also
+ *
+ * Return 0 on success, error code otherwise
+ */
+static int
+vpfe_s_dv_timings(struct file *file, void *fh,
+ struct v4l2_dv_timings *timings)
+{
+ struct vpfe_video_device *video = video_drvdata(file);
+ struct vpfe_device *vpfe_dev = video->vpfe_dev;
+
+ v4l2_dbg(1, debug, &vpfe_dev->v4l2_dev, "vpfe_s_dv_timings\n");
+
+ video->stdid = V4L2_STD_UNKNOWN;
+ return v4l2_device_call_until_err(&vpfe_dev->v4l2_dev,
+ video->current_ext_subdev->grp_id,
+ video, s_dv_timings, timings);
+}
+
+/*
+ * vpfe_g_dv_timings() - get dv_preset which is set on external subdev
+ * @file: file pointer
+ * @priv: void pointer
+ * @timings: pointer to v4l2_dv_timings structure
+ *
+ * get dv_preset which is set on external subdev through
+ * v4l2_subdev_call
+ *
+ * Return 0 on success, error code otherwise
+ */
+static int
+vpfe_g_dv_timings(struct file *file, void *fh,
+ struct v4l2_dv_timings *timings)
+{
+ struct vpfe_video_device *video = video_drvdata(file);
+ struct vpfe_device *vpfe_dev = video->vpfe_dev;
+ struct v4l2_subdev *subdev = video->current_ext_subdev->subdev;
+
+ v4l2_dbg(1, debug, &vpfe_dev->v4l2_dev, "vpfe_g_dv_timings\n");
+ return v4l2_subdev_call(subdev, video, g_dv_timings, timings);
+}
+
+/*
+ * Videobuf operations
+ */
+/*
+ * vpfe_buffer_queue_setup : Callback function for buffer setup.
+ * @vq: vb2_queue ptr
+ * @fmt: v4l2 format
+ * @nbuffers: ptr to number of buffers requested by application
+ * @nplanes:: contains number of distinct video planes needed to hold a frame
+ * @sizes[]: contains the size (in bytes) of each plane.
+ * @alloc_ctxs: ptr to allocation context
+ *
+ * This callback function is called when reqbuf() is called to adjust
+ * the buffer nbuffers and buffer size
+ */
+static int
+vpfe_buffer_queue_setup(struct vb2_queue *vq, const struct v4l2_format *fmt,
+ unsigned int *nbuffers, unsigned int *nplanes,
+ unsigned int sizes[], void *alloc_ctxs[])
+{
+ struct vpfe_fh *fh = vb2_get_drv_priv(vq);
+ struct vpfe_video_device *video = fh->video;
+ struct vpfe_device *vpfe_dev = video->vpfe_dev;
+ struct vpfe_pipeline *pipe = &video->pipe;
+ unsigned long size;
+
+ v4l2_dbg(1, debug, &vpfe_dev->v4l2_dev, "vpfe_buffer_queue_setup\n");
+ size = video->fmt.fmt.pix.sizeimage;
+
+ if (vpfe_dev->video_limit) {
+ while (size * *nbuffers > vpfe_dev->video_limit)
+ (*nbuffers)--;
+ }
+ if (pipe->state == VPFE_PIPELINE_STREAM_CONTINUOUS) {
+ if (*nbuffers < MIN_NUM_BUFFERS)
+ *nbuffers = MIN_NUM_BUFFERS;
+ }
+ *nplanes = 1;
+ sizes[0] = size;
+ alloc_ctxs[0] = video->alloc_ctx;
+ v4l2_dbg(1, debug, &vpfe_dev->v4l2_dev,
+ "nbuffers=%d, size=%lu\n", *nbuffers, size);
+ return 0;
+}
+
+/*
+ * vpfe_buffer_prepare : callback function for buffer prepare
+ * @vb: ptr to vb2_buffer
+ *
+ * This is the callback function for buffer prepare when vb2_qbuf()
+ * function is called. The buffer is prepared and user space virtual address
+ * or user address is converted into physical address
+ */
+static int vpfe_buffer_prepare(struct vb2_buffer *vb)
+{
+ struct vpfe_fh *fh = vb2_get_drv_priv(vb->vb2_queue);
+ struct vpfe_video_device *video = fh->video;
+ struct vpfe_device *vpfe_dev = video->vpfe_dev;
+ unsigned long addr;
+
+ v4l2_dbg(1, debug, &vpfe_dev->v4l2_dev, "vpfe_buffer_prepare\n");
+
+ if (vb->state != VB2_BUF_STATE_ACTIVE &&
+ vb->state != VB2_BUF_STATE_PREPARED)
+ return 0;
+
+ /* Initialize buffer */
+ vb2_set_plane_payload(vb, 0, video->fmt.fmt.pix.sizeimage);
+ if (vb2_plane_vaddr(vb, 0) &&
+ vb2_get_plane_payload(vb, 0) > vb2_plane_size(vb, 0))
+ return -EINVAL;
+
+ addr = vb2_dma_contig_plane_dma_addr(vb, 0);
+ /* Make sure user addresses are aligned to 32 bytes */
+ if (!ALIGN(addr, 32))
+ return -EINVAL;
+
+ return 0;
+}
+
+static void vpfe_buffer_queue(struct vb2_buffer *vb)
+{
+ /* Get the file handle object and device object */
+ struct vpfe_fh *fh = vb2_get_drv_priv(vb->vb2_queue);
+ struct vpfe_video_device *video = fh->video;
+ struct vpfe_device *vpfe_dev = video->vpfe_dev;
+ struct vpfe_pipeline *pipe = &video->pipe;
+ struct vpfe_cap_buffer *buf = container_of(vb,
+ struct vpfe_cap_buffer, vb);
+ unsigned long flags;
+ unsigned long empty;
+ unsigned long addr;
+
+ spin_lock_irqsave(&video->dma_queue_lock, flags);
+ empty = list_empty(&video->dma_queue);
+ /* add the buffer to the DMA queue */
+ list_add_tail(&buf->list, &video->dma_queue);
+ spin_unlock_irqrestore(&video->dma_queue_lock, flags);
+ /* this case happens in case of single shot */
+ if (empty && video->started && pipe->state ==
+ VPFE_PIPELINE_STREAM_SINGLESHOT &&
+ video->state == VPFE_VIDEO_BUFFER_NOT_QUEUED) {
+ spin_lock(&video->dma_queue_lock);
+ addr = vpfe_video_get_next_buffer(video);
+ video->ops->queue(vpfe_dev, addr);
+
+ video->state = VPFE_VIDEO_BUFFER_QUEUED;
+ spin_unlock(&video->dma_queue_lock);
+
+ /* enable h/w each time in single shot */
+ if (vpfe_video_is_pipe_ready(pipe))
+ vpfe_pipeline_set_stream(pipe,
+ VPFE_PIPELINE_STREAM_SINGLESHOT);
+ }
+}
+
+/* vpfe_start_capture() - start streaming on all the subdevs */
+static int vpfe_start_capture(struct vpfe_video_device *video)
+{
+ struct vpfe_pipeline *pipe = &video->pipe;
+ int ret = 0;
+
+ video->started = 1;
+ if (vpfe_video_is_pipe_ready(pipe))
+ ret = vpfe_pipeline_set_stream(pipe, pipe->state);
+
+ return ret;
+}
+
+static int vpfe_start_streaming(struct vb2_queue *vq, unsigned int count)
+{
+ struct vpfe_fh *fh = vb2_get_drv_priv(vq);
+ struct vpfe_video_device *video = fh->video;
+ struct vpfe_device *vpfe_dev = video->vpfe_dev;
+ unsigned long addr;
+ int ret;
+
+ ret = mutex_lock_interruptible(&video->lock);
+ if (ret)
+ goto streamoff;
+
+ /* Get the next frame from the buffer queue */
+ video->cur_frm = video->next_frm =
+ list_entry(video->dma_queue.next, struct vpfe_cap_buffer, list);
+ /* Remove buffer from the buffer queue */
+ list_del(&video->cur_frm->list);
+ /* Mark state of the current frame to active */
+ video->cur_frm->vb.state = VB2_BUF_STATE_ACTIVE;
+ /* Initialize field_id and started member */
+ video->field_id = 0;
+ addr = vb2_dma_contig_plane_dma_addr(&video->cur_frm->vb, 0);
+ video->ops->queue(vpfe_dev, addr);
+ video->state = VPFE_VIDEO_BUFFER_QUEUED;
+
+ ret = vpfe_start_capture(video);
+ if (ret)
+ goto unlock_out;
+
+ mutex_unlock(&video->lock);
+
+ return ret;
+unlock_out:
+ mutex_unlock(&video->lock);
+streamoff:
+ ret = vb2_streamoff(&video->buffer_queue, video->buffer_queue.type);
+ return 0;
+}
+
+static int vpfe_buffer_init(struct vb2_buffer *vb)
+{
+ struct vpfe_cap_buffer *buf = container_of(vb,
+ struct vpfe_cap_buffer, vb);
+
+ INIT_LIST_HEAD(&buf->list);
+ return 0;
+}
+
+/* abort streaming and wait for last buffer */
+static int vpfe_stop_streaming(struct vb2_queue *vq)
+{
+ struct vpfe_fh *fh = vb2_get_drv_priv(vq);
+ struct vpfe_video_device *video = fh->video;
+
+ if (!vb2_is_streaming(vq))
+ return 0;
+ /* release all active buffers */
+ while (!list_empty(&video->dma_queue)) {
+ video->next_frm = list_entry(video->dma_queue.next,
+ struct vpfe_cap_buffer, list);
+ list_del(&video->next_frm->list);
+ vb2_buffer_done(&video->next_frm->vb, VB2_BUF_STATE_ERROR);
+ }
+ return 0;
+}
+
+static void vpfe_buf_cleanup(struct vb2_buffer *vb)
+{
+ struct vpfe_fh *fh = vb2_get_drv_priv(vb->vb2_queue);
+ struct vpfe_video_device *video = fh->video;
+ struct vpfe_device *vpfe_dev = video->vpfe_dev;
+ struct vpfe_cap_buffer *buf = container_of(vb,
+ struct vpfe_cap_buffer, vb);
+
+ v4l2_dbg(1, debug, &vpfe_dev->v4l2_dev, "vpfe_buf_cleanup\n");
+ if (vb->state == VB2_BUF_STATE_ACTIVE)
+ list_del_init(&buf->list);
+}
+
+static struct vb2_ops video_qops = {
+ .queue_setup = vpfe_buffer_queue_setup,
+ .buf_init = vpfe_buffer_init,
+ .buf_prepare = vpfe_buffer_prepare,
+ .start_streaming = vpfe_start_streaming,
+ .stop_streaming = vpfe_stop_streaming,
+ .buf_cleanup = vpfe_buf_cleanup,
+ .buf_queue = vpfe_buffer_queue,
+};
+
+/*
+ * vpfe_reqbufs() - supported REQBUF only once opening
+ * the device.
+ */
+static int vpfe_reqbufs(struct file *file, void *priv,
+ struct v4l2_requestbuffers *req_buf)
+{
+ struct vpfe_video_device *video = video_drvdata(file);
+ struct vpfe_device *vpfe_dev = video->vpfe_dev;
+ struct vpfe_fh *fh = file->private_data;
+ struct vb2_queue *q;
+ int ret;
+
+ v4l2_dbg(1, debug, &vpfe_dev->v4l2_dev, "vpfe_reqbufs\n");
+
+ if (V4L2_BUF_TYPE_VIDEO_CAPTURE != req_buf->type &&
+ V4L2_BUF_TYPE_VIDEO_OUTPUT != req_buf->type) {
+ v4l2_err(&vpfe_dev->v4l2_dev, "Invalid buffer type\n");
+ return -EINVAL;
+ }
+
+ ret = mutex_lock_interruptible(&video->lock);
+ if (ret)
+ return ret;
+
+ if (video->io_usrs != 0) {
+ v4l2_err(&vpfe_dev->v4l2_dev, "Only one IO user allowed\n");
+ ret = -EBUSY;
+ goto unlock_out;
+ }
+ video->memory = req_buf->memory;
+
+ /* Initialize videobuf2 queue as per the buffer type */
+ video->alloc_ctx = vb2_dma_contig_init_ctx(vpfe_dev->pdev);
+ if (IS_ERR(video->alloc_ctx)) {
+ v4l2_err(&vpfe_dev->v4l2_dev, "Failed to get the context\n");
+ return PTR_ERR(video->alloc_ctx);
+ }
+
+ q = &video->buffer_queue;
+ q->type = req_buf->type;
+ q->io_modes = VB2_MMAP | VB2_USERPTR;
+ q->drv_priv = fh;
+ q->ops = &video_qops;
+ q->mem_ops = &vb2_dma_contig_memops;
+ q->buf_struct_size = sizeof(struct vpfe_cap_buffer);
+
+ ret = vb2_queue_init(q);
+ if (ret) {
+ v4l2_err(&vpfe_dev->v4l2_dev, "vb2_queue_init() failed\n");
+ vb2_dma_contig_cleanup_ctx(vpfe_dev->pdev);
+ return ret;
+ }
+
+ fh->io_allowed = 1;
+ video->io_usrs = 1;
+ INIT_LIST_HEAD(&video->dma_queue);
+ ret = vb2_reqbufs(&video->buffer_queue, req_buf);
+
+unlock_out:
+ mutex_unlock(&video->lock);
+ return ret;
+}
+
+/*
+ * vpfe_querybuf() - query buffers for exchange
+ */
+static int vpfe_querybuf(struct file *file, void *priv,
+ struct v4l2_buffer *buf)
+{
+ struct vpfe_video_device *video = video_drvdata(file);
+ struct vpfe_device *vpfe_dev = video->vpfe_dev;
+
+ v4l2_dbg(1, debug, &vpfe_dev->v4l2_dev, "vpfe_querybuf\n");
+
+ if (V4L2_BUF_TYPE_VIDEO_CAPTURE != buf->type &&
+ V4L2_BUF_TYPE_VIDEO_OUTPUT != buf->type) {
+ v4l2_err(&vpfe_dev->v4l2_dev, "Invalid buf type\n");
+ return -EINVAL;
+ }
+
+ if (video->memory != V4L2_MEMORY_MMAP) {
+ v4l2_err(&vpfe_dev->v4l2_dev, "Invalid memory\n");
+ return -EINVAL;
+ }
+
+ /* Call vb2_querybuf to get information */
+ return vb2_querybuf(&video->buffer_queue, buf);
+}
+
+/*
+ * vpfe_qbuf() - queue buffers for capture or processing
+ */
+static int vpfe_qbuf(struct file *file, void *priv,
+ struct v4l2_buffer *p)
+{
+ struct vpfe_video_device *video = video_drvdata(file);
+ struct vpfe_device *vpfe_dev = video->vpfe_dev;
+ struct vpfe_fh *fh = file->private_data;
+
+ v4l2_dbg(1, debug, &vpfe_dev->v4l2_dev, "vpfe_qbuf\n");
+
+ if (V4L2_BUF_TYPE_VIDEO_CAPTURE != p->type &&
+ V4L2_BUF_TYPE_VIDEO_OUTPUT != p->type) {
+ v4l2_err(&vpfe_dev->v4l2_dev, "Invalid buf type\n");
+ return -EINVAL;
+ }
+ /*
+ * If this file handle is not allowed to do IO,
+ * return error
+ */
+ if (!fh->io_allowed) {
+ v4l2_err(&vpfe_dev->v4l2_dev, "fh->io_allowed\n");
+ return -EACCES;
+ }
+
+ return vb2_qbuf(&video->buffer_queue, p);
+}
+
+/*
+ * vpfe_dqbuf() - deque buffer which is done with processing
+ */
+static int vpfe_dqbuf(struct file *file, void *priv,
+ struct v4l2_buffer *buf)
+{
+ struct vpfe_video_device *video = video_drvdata(file);
+ struct vpfe_device *vpfe_dev = video->vpfe_dev;
+
+ v4l2_dbg(1, debug, &vpfe_dev->v4l2_dev, "vpfe_dqbuf\n");
+
+ if (V4L2_BUF_TYPE_VIDEO_CAPTURE != buf->type &&
+ V4L2_BUF_TYPE_VIDEO_OUTPUT != buf->type) {
+ v4l2_err(&vpfe_dev->v4l2_dev, "Invalid buf type\n");
+ return -EINVAL;
+ }
+
+ return vb2_dqbuf(&video->buffer_queue,
+ buf, (file->f_flags & O_NONBLOCK));
+}
+
+/*
+ * vpfe_streamon() - get dv_preset which is set on external subdev
+ * @file: file pointer
+ * @priv: void pointer
+ * @buf_type: enum v4l2_buf_type
+ *
+ * queue buffer onto hardware for capture/processing and
+ * start all the subdevs which are in media chain
+ *
+ * Return 0 on success, error code otherwise
+ */
+static int vpfe_streamon(struct file *file, void *priv,
+ enum v4l2_buf_type buf_type)
+{
+ struct vpfe_video_device *video = video_drvdata(file);
+ struct vpfe_device *vpfe_dev = video->vpfe_dev;
+ struct vpfe_pipeline *pipe = &video->pipe;
+ struct vpfe_fh *fh = file->private_data;
+ struct vpfe_ext_subdev_info *sdinfo;
+ int ret = -EINVAL;
+
+ v4l2_dbg(1, debug, &vpfe_dev->v4l2_dev, "vpfe_streamon\n");
+
+ if (V4L2_BUF_TYPE_VIDEO_CAPTURE != buf_type &&
+ V4L2_BUF_TYPE_VIDEO_OUTPUT != buf_type) {
+ v4l2_err(&vpfe_dev->v4l2_dev, "Invalid buf type\n");
+ return ret;
+ }
+ /* If file handle is not allowed IO, return error */
+ if (!fh->io_allowed) {
+ v4l2_err(&vpfe_dev->v4l2_dev, "fh->io_allowed\n");
+ return -EACCES;
+ }
+ sdinfo = video->current_ext_subdev;
+ /* If buffer queue is empty, return error */
+ if (list_empty(&video->buffer_queue.queued_list)) {
+ v4l2_err(&vpfe_dev->v4l2_dev, "buffer queue is empty\n");
+ return -EIO;
+ }
+ /* Validate the pipeline */
+ if (V4L2_BUF_TYPE_VIDEO_CAPTURE == buf_type) {
+ ret = vpfe_video_validate_pipeline(pipe);
+ if (ret < 0)
+ return ret;
+ }
+ /* Call vb2_streamon to start streaming */
+ return vb2_streamon(&video->buffer_queue, buf_type);
+}
+
+/*
+ * vpfe_streamoff() - get dv_preset which is set on external subdev
+ * @file: file pointer
+ * @priv: void pointer
+ * @buf_type: enum v4l2_buf_type
+ *
+ * stop all the subdevs which are in media chain
+ *
+ * Return 0 on success, error code otherwise
+ */
+static int vpfe_streamoff(struct file *file, void *priv,
+ enum v4l2_buf_type buf_type)
+{
+ struct vpfe_video_device *video = video_drvdata(file);
+ struct vpfe_device *vpfe_dev = video->vpfe_dev;
+ struct vpfe_fh *fh = file->private_data;
+ int ret = 0;
+
+ v4l2_dbg(1, debug, &vpfe_dev->v4l2_dev, "vpfe_streamoff\n");
+
+ if (buf_type != V4L2_BUF_TYPE_VIDEO_CAPTURE &&
+ buf_type != V4L2_BUF_TYPE_VIDEO_OUTPUT) {
+ v4l2_dbg(1, debug, &vpfe_dev->v4l2_dev, "Invalid buf type\n");
+ return -EINVAL;
+ }
+
+ /* If io is allowed for this file handle, return error */
+ if (!fh->io_allowed) {
+ v4l2_dbg(1, debug, &vpfe_dev->v4l2_dev, "fh->io_allowed\n");
+ return -EACCES;
+ }
+
+ /* If streaming is not started, return error */
+ if (!video->started) {
+ v4l2_err(&vpfe_dev->v4l2_dev, "device is not started\n");
+ return -EINVAL;
+ }
+
+ ret = mutex_lock_interruptible(&video->lock);
+ if (ret)
+ return ret;
+
+ vpfe_stop_capture(video);
+ ret = vb2_streamoff(&video->buffer_queue, buf_type);
+ mutex_unlock(&video->lock);
+
+ return ret;
+}
+
+/* vpfe capture ioctl operations */
+static const struct v4l2_ioctl_ops vpfe_ioctl_ops = {
+ .vidioc_querycap = vpfe_querycap,
+ .vidioc_g_fmt_vid_cap = vpfe_g_fmt,
+ .vidioc_s_fmt_vid_cap = vpfe_s_fmt,
+ .vidioc_try_fmt_vid_cap = vpfe_try_fmt,
+ .vidioc_enum_fmt_vid_cap = vpfe_enum_fmt,
+ .vidioc_g_fmt_vid_out = vpfe_g_fmt,
+ .vidioc_s_fmt_vid_out = vpfe_s_fmt,
+ .vidioc_try_fmt_vid_out = vpfe_try_fmt,
+ .vidioc_enum_fmt_vid_out = vpfe_enum_fmt,
+ .vidioc_enum_input = vpfe_enum_input,
+ .vidioc_g_input = vpfe_g_input,
+ .vidioc_s_input = vpfe_s_input,
+ .vidioc_querystd = vpfe_querystd,
+ .vidioc_s_std = vpfe_s_std,
+ .vidioc_g_std = vpfe_g_std,
+ .vidioc_enum_dv_timings = vpfe_enum_dv_timings,
+ .vidioc_query_dv_timings = vpfe_query_dv_timings,
+ .vidioc_s_dv_timings = vpfe_s_dv_timings,
+ .vidioc_g_dv_timings = vpfe_g_dv_timings,
+ .vidioc_reqbufs = vpfe_reqbufs,
+ .vidioc_querybuf = vpfe_querybuf,
+ .vidioc_qbuf = vpfe_qbuf,
+ .vidioc_dqbuf = vpfe_dqbuf,
+ .vidioc_streamon = vpfe_streamon,
+ .vidioc_streamoff = vpfe_streamoff,
+};
+
+/* VPFE video init function */
+int vpfe_video_init(struct vpfe_video_device *video, const char *name)
+{
+ const char *direction;
+ int ret;
+
+ switch (video->type) {
+ case V4L2_BUF_TYPE_VIDEO_CAPTURE:
+ direction = "output";
+ video->pad.flags = MEDIA_PAD_FL_SINK;
+ video->type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
+ break;
+
+ case V4L2_BUF_TYPE_VIDEO_OUTPUT:
+ direction = "input";
+ video->pad.flags = MEDIA_PAD_FL_SOURCE;
+ video->type = V4L2_BUF_TYPE_VIDEO_OUTPUT;
+ break;
+
+ default:
+ return -EINVAL;
+ }
+ /* Initialize field of video device */
+ video->video_dev.release = video_device_release;
+ video->video_dev.fops = &vpfe_fops;
+ video->video_dev.ioctl_ops = &vpfe_ioctl_ops;
+ video->video_dev.minor = -1;
+ video->video_dev.tvnorms = 0;
+ snprintf(video->video_dev.name, sizeof(video->video_dev.name),
+ "DAVINCI VIDEO %s %s", name, direction);
+
+ /* Initialize prio member of device object */
+ v4l2_prio_init(&video->prio);
+ spin_lock_init(&video->irqlock);
+ spin_lock_init(&video->dma_queue_lock);
+ mutex_init(&video->lock);
+ ret = media_entity_init(&video->video_dev.entity,
+ 1, &video->pad, 0);
+ if (ret < 0)
+ return ret;
+
+ video_set_drvdata(&video->video_dev, video);
+
+ return 0;
+}
+
+/* vpfe video device register function */
+int vpfe_video_register(struct vpfe_video_device *video,
+ struct v4l2_device *vdev)
+{
+ int ret;
+
+ video->video_dev.v4l2_dev = vdev;
+
+ ret = video_register_device(&video->video_dev, VFL_TYPE_GRABBER, -1);
+ if (ret < 0)
+ pr_err("%s: could not register video device (%d)\n",
+ __func__, ret);
+ return ret;
+}
+
+/* vpfe video device unregister function */
+void vpfe_video_unregister(struct vpfe_video_device *video)
+{
+ if (video_is_registered(&video->video_dev)) {
+ media_entity_cleanup(&video->video_dev.entity);
+ video_unregister_device(&video->video_dev);
+ }
+}
diff --git a/drivers/staging/media/davinci_vpfe/vpfe_video.h b/drivers/staging/media/davinci_vpfe/vpfe_video.h
new file mode 100644
index 000000000000..bf8af01d4a1b
--- /dev/null
+++ b/drivers/staging/media/davinci_vpfe/vpfe_video.h
@@ -0,0 +1,155 @@
+/*
+ * Copyright (C) 2012 Texas Instruments Inc
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation version 2.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ *
+ * Contributors:
+ * Manjunath Hadli <manjunath.hadli@ti.com>
+ * Prabhakar Lad <prabhakar.lad@ti.com>
+ */
+
+#ifndef _DAVINCI_VPFE_VIDEO_H
+#define _DAVINCI_VPFE_VIDEO_H
+
+#include <media/videobuf2-dma-contig.h>
+
+struct vpfe_device;
+
+/*
+ * struct vpfe_video_operations - VPFE video operations
+ * @queue: Resume streaming when a buffer is queued. Called on VIDIOC_QBUF
+ * if there was no buffer previously queued.
+ */
+struct vpfe_video_operations {
+ int(*queue) (struct vpfe_device *vpfe_dev, unsigned long addr);
+};
+
+enum vpfe_pipeline_stream_state {
+ VPFE_PIPELINE_STREAM_STOPPED = 0,
+ VPFE_PIPELINE_STREAM_CONTINUOUS = 1,
+ VPFE_PIPELINE_STREAM_SINGLESHOT = 2,
+};
+
+enum vpfe_video_state {
+ /* indicates that buffer is not queued */
+ VPFE_VIDEO_BUFFER_NOT_QUEUED = 0,
+ /* indicates that buffer is queued */
+ VPFE_VIDEO_BUFFER_QUEUED = 1,
+};
+
+struct vpfe_pipeline {
+ /* media pipeline */
+ struct media_pipeline *pipe;
+ /* state of the pipeline, continuous,
+ * single-shot or stopped
+ */
+ enum vpfe_pipeline_stream_state state;
+ /* number of active input video entities */
+ unsigned int input_num;
+ /* number of active output video entities */
+ unsigned int output_num;
+ /* input video nodes in case of single-shot mode */
+ struct vpfe_video_device *inputs[10];
+ /* capturing video nodes */
+ struct vpfe_video_device *outputs[10];
+};
+
+#define to_vpfe_pipeline(__e) \
+ container_of((__e)->pipe, struct vpfe_pipeline, pipe)
+
+#define to_vpfe_video(vdev) \
+ container_of(vdev, struct vpfe_video_device, video_dev)
+
+struct vpfe_cap_buffer {
+ struct vb2_buffer vb;
+ struct list_head list;
+};
+
+struct vpfe_video_device {
+ /* vpfe device */
+ struct vpfe_device *vpfe_dev;
+ /* video dev */
+ struct video_device video_dev;
+ /* media pad of video entity */
+ struct media_pad pad;
+ /* video operations supported by video device */
+ const struct vpfe_video_operations *ops;
+ /* type of the video buffers used by user */
+ enum v4l2_buf_type type;
+ /* Indicates id of the field which is being captured */
+ u32 field_id;
+ /* pipeline for which video device is part of */
+ struct vpfe_pipeline pipe;
+ /* Indicates whether streaming started */
+ u8 started;
+ /* Indicates state of the stream */
+ unsigned int state;
+ /* current input at the sub device */
+ int current_input;
+ /*
+ * This field keeps track of type of buffer exchange mechanism
+ * user has selected
+ */
+ enum v4l2_memory memory;
+ /* Used to keep track of state of the priority */
+ struct v4l2_prio_state prio;
+ /* number of open instances of the channel */
+ u32 usrs;
+ /* flag to indicate whether decoder is initialized */
+ u8 initialized;
+ /* skip frame count */
+ u8 skip_frame_count;
+ /* skip frame count init value */
+ u8 skip_frame_count_init;
+ /* time per frame for skipping */
+ struct v4l2_fract timeperframe;
+ /* ptr to currently selected sub device */
+ struct vpfe_ext_subdev_info *current_ext_subdev;
+ /* Pointer pointing to current vpfe_cap_buffer */
+ struct vpfe_cap_buffer *cur_frm;
+ /* Pointer pointing to next vpfe_cap_buffer */
+ struct vpfe_cap_buffer *next_frm;
+ /* Used to store pixel format */
+ struct v4l2_format fmt;
+ struct vb2_queue buffer_queue;
+ /* allocator-specific contexts for each plane */
+ struct vb2_alloc_ctx *alloc_ctx;
+ /* Queue of filled frames */
+ struct list_head dma_queue;
+ spinlock_t irqlock;
+ /* IRQ lock for DMA queue */
+ spinlock_t dma_queue_lock;
+ /* lock used to access this structure */
+ struct mutex lock;
+ /* number of users performing IO */
+ u32 io_usrs;
+ /* Currently selected or default standard */
+ v4l2_std_id stdid;
+ /*
+ * offset where second field starts from the starting of the
+ * buffer for field seperated YCbCr formats
+ */
+ u32 field_off;
+};
+
+int vpfe_video_is_pipe_ready(struct vpfe_pipeline *pipe);
+void vpfe_video_unregister(struct vpfe_video_device *video);
+int vpfe_video_register(struct vpfe_video_device *video,
+ struct v4l2_device *vdev);
+int vpfe_video_init(struct vpfe_video_device *video, const char *name);
+void vpfe_video_process_buffer_complete(struct vpfe_video_device *video);
+void vpfe_video_schedule_bottom_field(struct vpfe_video_device *video);
+void vpfe_video_schedule_next_buffer(struct vpfe_video_device *video);
+
+#endif /* _DAVINCI_VPFE_VIDEO_H */
diff --git a/drivers/staging/media/dt3155v4l/dt3155v4l.c b/drivers/staging/media/dt3155v4l/dt3155v4l.c
index 479c643da2f6..e33b7f55d84e 100644
--- a/drivers/staging/media/dt3155v4l/dt3155v4l.c
+++ b/drivers/staging/media/dt3155v4l/dt3155v4l.c
@@ -785,7 +785,7 @@ dt3155_init_board(struct pci_dev *pdev)
}
write_i2c_reg(pd->regs, CONFIG, pd->config); /* ACQ_MODE_EVEN */
- /* select chanel 1 for input and set sync level */
+ /* select channel 1 for input and set sync level */
write_i2c_reg(pd->regs, AD_ADDR, AD_CMD_REG);
write_i2c_reg(pd->regs, AD_CMD, VIDEO_CNL_1 | SYNC_CNL_1 | SYNC_LVL_3);
diff --git a/drivers/staging/media/go7007/go7007-driver.c b/drivers/staging/media/go7007/go7007-driver.c
index ece2dd146487..66950916df23 100644
--- a/drivers/staging/media/go7007/go7007-driver.c
+++ b/drivers/staging/media/go7007/go7007-driver.c
@@ -108,14 +108,13 @@ static int go7007_load_encoder(struct go7007 *go)
return -1;
}
fw_len = fw_entry->size - 16;
- bounce = kmalloc(fw_len, GFP_KERNEL);
+ bounce = kmemdup(fw_entry->data + 16, fw_len, GFP_KERNEL);
if (bounce == NULL) {
v4l2_err(go, "unable to allocate %d bytes for "
"firmware transfer\n", fw_len);
release_firmware(fw_entry);
return -1;
}
- memcpy(bounce, fw_entry->data + 16, fw_len);
release_firmware(fw_entry);
if (go7007_interface_reset(go) < 0 ||
go7007_send_firmware(go, bounce, fw_len) < 0 ||
@@ -173,6 +172,11 @@ static int go7007_init_encoder(struct go7007 *go)
go7007_write_addr(go, 0x3c82, 0x0001);
go7007_write_addr(go, 0x3c80, 0x00fe);
}
+ if (go->board_id == GO7007_BOARDID_ADLINK_MPG24) {
+ /* set GPIO5 to be an output, currently low */
+ go7007_write_addr(go, 0x3c82, 0x0000);
+ go7007_write_addr(go, 0x3c80, 0x00df);
+ }
return 0;
}
@@ -201,7 +205,8 @@ static int init_i2c_module(struct i2c_adapter *adapter, const char *type,
if (v4l2_i2c_new_subdev(v4l2_dev, adapter, type, addr, NULL))
return 0;
- printk(KERN_INFO "go7007: probing for module i2c:%s failed\n", type);
+ dev_info(&adapter->dev,
+ "go7007: probing for module i2c:%s failed\n", type);
return -1;
}
@@ -217,7 +222,7 @@ int go7007_register_encoder(struct go7007 *go)
{
int i, ret;
- printk(KERN_INFO "go7007: registering new %s\n", go->name);
+ dev_info(go->dev, "go7007: registering new %s\n", go->name);
mutex_lock(&go->hw_lock);
ret = go7007_init_encoder(go);
@@ -571,7 +576,7 @@ struct go7007 *go7007_alloc(struct go7007_board_info *board, struct device *dev)
struct go7007 *go;
int i;
- go = kmalloc(sizeof(struct go7007), GFP_KERNEL);
+ go = kzalloc(sizeof(struct go7007), GFP_KERNEL);
if (go == NULL)
return NULL;
go->dev = dev;
diff --git a/drivers/staging/media/go7007/go7007-fw.c b/drivers/staging/media/go7007/go7007-fw.c
index f99c05b454b0..a5ede1c109d0 100644
--- a/drivers/staging/media/go7007/go7007-fw.c
+++ b/drivers/staging/media/go7007/go7007-fw.c
@@ -381,11 +381,8 @@ static int gen_mjpeghdr_to_package(struct go7007 *go, __le16 *code, int space)
int size = 0, i, off = 0, chunk;
buf = kzalloc(4096, GFP_KERNEL);
- if (buf == NULL) {
- dev_err(go->dev,
- "unable to allocate 4096 bytes for firmware construction\n");
+ if (buf == NULL)
return -1;
- }
for (i = 1; i < 32; ++i) {
mjpeg_frame_header(go, buf + size, i);
@@ -651,11 +648,9 @@ static int gen_mpeg1hdr_to_package(struct go7007 *go,
int i, off = 0, chunk;
buf = kzalloc(5120, GFP_KERNEL);
- if (buf == NULL) {
- dev_err(go->dev,
- "unable to allocate 5120 bytes for firmware construction\n");
+ if (buf == NULL)
return -1;
- }
+
framelen[0] = mpeg1_frame_header(go, buf, 0, 1, PFRAME);
if (go->interlace_coding)
framelen[0] += mpeg1_frame_header(go, buf + framelen[0] / 8,
@@ -838,11 +833,9 @@ static int gen_mpeg4hdr_to_package(struct go7007 *go,
int i, off = 0, chunk;
buf = kzalloc(5120, GFP_KERNEL);
- if (buf == NULL) {
- dev_err(go->dev,
- "unable to allocate 5120 bytes for firmware construction\n");
+ if (buf == NULL)
return -1;
- }
+
framelen[0] = mpeg4_frame_header(go, buf, 0, PFRAME);
i = 368;
framelen[1] = mpeg4_frame_header(go, buf + i, 0, BFRAME_PRE);
@@ -1582,12 +1575,9 @@ int go7007_construct_fw_image(struct go7007 *go, u8 **fw, int *fwlen)
return -1;
}
code = kzalloc(codespace * 2, GFP_KERNEL);
- if (code == NULL) {
- dev_err(go->dev,
- "unable to allocate %d bytes for firmware construction\n",
- codespace * 2);
+ if (code == NULL)
goto fw_failed;
- }
+
src = (__le16 *)fw_entry->data;
srclen = fw_entry->size / 2;
while (srclen >= 2) {
diff --git a/drivers/staging/media/go7007/go7007-i2c.c b/drivers/staging/media/go7007/go7007-i2c.c
index 6bc82aaeef11..39456a36b2c6 100644
--- a/drivers/staging/media/go7007/go7007-i2c.c
+++ b/drivers/staging/media/go7007/go7007-i2c.c
@@ -60,10 +60,10 @@ static int go7007_i2c_xfer(struct go7007 *go, u16 addr, int read,
#ifdef GO7007_I2C_DEBUG
if (read)
- printk(KERN_DEBUG "go7007-i2c: reading 0x%02x on 0x%02x\n",
+ dev_dbg(go->dev, "go7007-i2c: reading 0x%02x on 0x%02x\n",
command, addr);
else
- printk(KERN_DEBUG
+ dev_dbg(go->dev,
"go7007-i2c: writing 0x%02x to 0x%02x on 0x%02x\n",
*data, command, addr);
#endif
@@ -85,7 +85,7 @@ static int go7007_i2c_xfer(struct go7007 *go, u16 addr, int read,
msleep(100);
}
if (i == 10) {
- printk(KERN_ERR "go7007-i2c: I2C adapter is hung\n");
+ dev_err(go->dev, "go7007-i2c: I2C adapter is hung\n");
goto i2c_done;
}
@@ -119,7 +119,7 @@ static int go7007_i2c_xfer(struct go7007 *go, u16 addr, int read,
msleep(100);
}
if (i == 10) {
- printk(KERN_ERR "go7007-i2c: I2C adapter is hung\n");
+ dev_err(go->dev, "go7007-i2c: I2C adapter is hung\n");
goto i2c_done;
}
@@ -216,7 +216,7 @@ int go7007_i2c_init(struct go7007 *go)
go->i2c_adapter.dev.parent = go->dev;
i2c_set_adapdata(&go->i2c_adapter, go);
if (i2c_add_adapter(&go->i2c_adapter) < 0) {
- printk(KERN_ERR
+ dev_err(go->dev,
"go7007-i2c: error: i2c_add_adapter failed\n");
return -1;
}
diff --git a/drivers/staging/media/go7007/go7007-usb.c b/drivers/staging/media/go7007/go7007-usb.c
index 5443e25086e9..9dbf5ecd05a2 100644
--- a/drivers/staging/media/go7007/go7007-usb.c
+++ b/drivers/staging/media/go7007/go7007-usb.c
@@ -1110,9 +1110,6 @@ static int go7007_usb_probe(struct usb_interface *intf,
} else {
u16 channel;
- /* set GPIO5 to be an output, currently low */
- go7007_write_addr(go, 0x3c82, 0x0000);
- go7007_write_addr(go, 0x3c80, 0x00df);
/* read channel number from GPIO[1:0] */
go7007_read_addr(go, 0x3c81, &channel);
channel &= 0x3;
@@ -1245,7 +1242,6 @@ static void go7007_usb_disconnect(struct usb_interface *intf)
struct urb *vurb, *aurb;
int i;
- go->status = STATUS_SHUTDOWN;
usb_kill_urb(usb->intr_urb);
/* Free USB-related structs */
@@ -1269,6 +1265,7 @@ static void go7007_usb_disconnect(struct usb_interface *intf)
kfree(go->hpi_context);
go7007_remove(go);
+ go->status = STATUS_SHUTDOWN;
}
static struct usb_driver go7007_usb_driver = {
diff --git a/drivers/staging/media/go7007/go7007-v4l2.c b/drivers/staging/media/go7007/go7007-v4l2.c
index a78133b67de2..cb9fe33050c7 100644
--- a/drivers/staging/media/go7007/go7007-v4l2.c
+++ b/drivers/staging/media/go7007/go7007-v4l2.c
@@ -98,7 +98,7 @@ static int go7007_open(struct file *file)
if (go->status != STATUS_ONLINE)
return -EBUSY;
- gofh = kmalloc(sizeof(struct go7007_file), GFP_KERNEL);
+ gofh = kzalloc(sizeof(struct go7007_file), GFP_KERNEL);
if (gofh == NULL)
return -ENOMEM;
++go->ref_count;
@@ -953,6 +953,7 @@ static int vidioc_streamon(struct file *file, void *priv,
}
mutex_unlock(&go->hw_lock);
mutex_unlock(&gofh->lock);
+ call_all(&go->v4l2_dev, video, s_stream, 1);
return retval;
}
@@ -968,6 +969,7 @@ static int vidioc_streamoff(struct file *file, void *priv,
mutex_lock(&gofh->lock);
go7007_streamoff(go);
mutex_unlock(&gofh->lock);
+ call_all(&go->v4l2_dev, video, s_stream, 0);
return 0;
}
@@ -1811,8 +1813,8 @@ int go7007_v4l2_init(struct go7007 *go)
}
video_set_drvdata(go->video_dev, go);
++go->ref_count;
- printk(KERN_INFO "%s: registered device %s [v4l2]\n",
- go->video_dev->name, video_device_node_name(go->video_dev));
+ dev_info(go->dev, "registered device %s [v4l2]\n",
+ video_device_node_name(go->video_dev));
return 0;
}
@@ -1832,5 +1834,6 @@ void go7007_v4l2_remove(struct go7007 *go)
mutex_unlock(&go->hw_lock);
if (go->video_dev)
video_unregister_device(go->video_dev);
- v4l2_device_unregister(&go->v4l2_dev);
+ if (go->status != STATUS_SHUTDOWN)
+ v4l2_device_unregister(&go->v4l2_dev);
}
diff --git a/drivers/staging/media/go7007/s2250-board.c b/drivers/staging/media/go7007/s2250-board.c
index b3974100c6cd..37400bfa6ccb 100644
--- a/drivers/staging/media/go7007/s2250-board.c
+++ b/drivers/staging/media/go7007/s2250-board.c
@@ -103,8 +103,7 @@ static u16 vid_regs_fp[] = {
};
/* PAL specific values */
-static u16 vid_regs_fp_pal[] =
-{
+static u16 vid_regs_fp_pal[] = {
0x120, 0x017,
0x121, 0xd22,
0x122, 0x122,
@@ -174,7 +173,7 @@ static int write_reg(struct i2c_client *client, u8 reg, u8 value)
usb = go->hpi_context;
if (mutex_lock_interruptible(&usb->i2c_lock) != 0) {
- printk(KERN_INFO "i2c lock failed\n");
+ dev_info(&client->dev, "i2c lock failed\n");
kfree(buf);
return -EINTR;
}
@@ -213,7 +212,7 @@ static int write_reg_fp(struct i2c_client *client, u16 addr, u16 val)
usb = go->hpi_context;
if (mutex_lock_interruptible(&usb->i2c_lock) != 0) {
- printk(KERN_INFO "i2c lock failed\n");
+ dev_info(&client->dev, "i2c lock failed\n");
kfree(buf);
return -EINTR;
}
@@ -231,13 +230,13 @@ static int write_reg_fp(struct i2c_client *client, u16 addr, u16 val)
val_read = (buf[2] << 8) + buf[3];
kfree(buf);
if (val_read != val) {
- printk(KERN_INFO "invalid fp write %x %x\n",
- val_read, val);
+ dev_info(&client->dev, "invalid fp write %x %x\n",
+ val_read, val);
return -EFAULT;
}
if (subaddr != addr) {
- printk(KERN_INFO "invalid fp write addr %x %x\n",
- subaddr, addr);
+ dev_info(&client->dev, "invalid fp write addr %x %x\n",
+ subaddr, addr);
return -EFAULT;
}
} else {
@@ -275,7 +274,7 @@ static int read_reg_fp(struct i2c_client *client, u16 addr, u16 *val)
memset(buf, 0xcd, 6);
usb = go->hpi_context;
if (mutex_lock_interruptible(&usb->i2c_lock) != 0) {
- printk(KERN_INFO "i2c lock failed\n");
+ dev_info(&client->dev, "i2c lock failed\n");
kfree(buf);
return -EINTR;
}
@@ -299,7 +298,7 @@ static int write_regs(struct i2c_client *client, u8 *regs)
for (i = 0; !((regs[i] == 0x00) && (regs[i+1] == 0x00)); i += 2) {
if (write_reg(client, regs[i], regs[i+1]) < 0) {
- printk(KERN_INFO "s2250: failed\n");
+ dev_info(&client->dev, "failed\n");
return -1;
}
}
@@ -312,7 +311,7 @@ static int write_regs_fp(struct i2c_client *client, u16 *regs)
for (i = 0; !((regs[i] == 0x00) && (regs[i+1] == 0x00)); i += 2) {
if (write_reg_fp(client, regs[i], regs[i+1]) < 0) {
- printk(KERN_INFO "s2250: failed fp\n");
+ dev_info(&client->dev, "failed fp\n");
return -1;
}
}
@@ -535,7 +534,7 @@ static int s2250_log_status(struct v4l2_subdev *sd)
v4l2_info(sd, "Brightness: %d\n", state->brightness);
v4l2_info(sd, "Contrast: %d\n", state->contrast);
v4l2_info(sd, "Saturation: %d\n", state->saturation);
- v4l2_info(sd, "Hue: %d\n", state->hue); return 0;
+ v4l2_info(sd, "Hue: %d\n", state->hue);
v4l2_info(sd, "Audio input: %s\n", state->audio_input == 0 ? "Line In" :
state->audio_input == 1 ? "Mic" :
state->audio_input == 2 ? "Mic Boost" :
@@ -606,23 +605,20 @@ static int s2250_probe(struct i2c_client *client,
/* initialize the audio */
if (write_regs(audio, aud_regs) < 0) {
- printk(KERN_ERR
- "s2250: error initializing audio\n");
+ dev_err(&client->dev, "error initializing audio\n");
i2c_unregister_device(audio);
kfree(state);
return 0;
}
if (write_regs(client, vid_regs) < 0) {
- printk(KERN_ERR
- "s2250: error initializing decoder\n");
+ dev_err(&client->dev, "error initializing decoder\n");
i2c_unregister_device(audio);
kfree(state);
return 0;
}
if (write_regs_fp(client, vid_regs_fp) < 0) {
- printk(KERN_ERR
- "s2250: error initializing decoder\n");
+ dev_err(&client->dev, "error initializing decoder\n");
i2c_unregister_device(audio);
kfree(state);
return 0;
diff --git a/drivers/staging/media/go7007/s2250-loader.c b/drivers/staging/media/go7007/s2250-loader.c
index f1bd159ac195..72e5175fe7e3 100644
--- a/drivers/staging/media/go7007/s2250-loader.c
+++ b/drivers/staging/media/go7007/s2250-loader.c
@@ -55,16 +55,16 @@ static int s2250loader_probe(struct usb_interface *interface,
usbdev = usb_get_dev(interface_to_usbdev(interface));
if (!usbdev) {
- printk(KERN_ERR "Enter s2250loader_probe failed\n");
+ dev_err(&interface->dev, "Enter s2250loader_probe failed\n");
return -1;
}
- printk(KERN_INFO "Enter s2250loader_probe 2.6 kernel\n");
- printk(KERN_INFO "vendor id 0x%x, device id 0x%x devnum:%d\n",
- usbdev->descriptor.idVendor, usbdev->descriptor.idProduct,
- usbdev->devnum);
+ dev_info(&interface->dev, "Enter s2250loader_probe 2.6 kernel\n");
+ dev_info(&interface->dev, "vendor id 0x%x, device id 0x%x devnum:%d\n",
+ usbdev->descriptor.idVendor, usbdev->descriptor.idProduct,
+ usbdev->devnum);
if (usbdev->descriptor.bNumConfigurations != 1) {
- printk(KERN_ERR "can't handle multiple config\n");
+ dev_err(&interface->dev, "can't handle multiple config\n");
return -1;
}
mutex_lock(&s2250_dev_table_mutex);
@@ -75,31 +75,31 @@ static int s2250loader_probe(struct usb_interface *interface,
}
if (minor < 0 || minor >= MAX_DEVICES) {
- printk(KERN_ERR "Invalid minor: %d\n", minor);
+ dev_err(&interface->dev, "Invalid minor: %d\n", minor);
goto failed;
}
/* Allocate dev data structure */
s = kmalloc(sizeof(device_extension_t), GFP_KERNEL);
- if (s == NULL) {
- printk(KERN_ERR "Out of memory\n");
+ if (s == NULL)
goto failed;
- }
+
s2250_dev_table[minor] = s;
- printk(KERN_INFO "s2250loader_probe: Device %d on Bus %d Minor %d\n",
- usbdev->devnum, usbdev->bus->busnum, minor);
+ dev_info(&interface->dev,
+ "s2250loader_probe: Device %d on Bus %d Minor %d\n",
+ usbdev->devnum, usbdev->bus->busnum, minor);
memset(s, 0, sizeof(device_extension_t));
s->usbdev = usbdev;
- printk(KERN_INFO "loading 2250 loader\n");
+ dev_info(&interface->dev, "loading 2250 loader\n");
kref_init(&(s->kref));
mutex_unlock(&s2250_dev_table_mutex);
if (request_firmware(&fw, S2250_LOADER_FIRMWARE, &usbdev->dev)) {
- printk(KERN_ERR
+ dev_err(&interface->dev,
"s2250: unable to load firmware from file \"%s\"\n",
S2250_LOADER_FIRMWARE);
goto failed2;
@@ -107,12 +107,12 @@ static int s2250loader_probe(struct usb_interface *interface,
ret = usb_cypress_load_firmware(usbdev, fw, CYPRESS_FX2);
release_firmware(fw);
if (0 != ret) {
- printk(KERN_ERR "loader download failed\n");
+ dev_err(&interface->dev, "loader download failed\n");
goto failed2;
}
if (request_firmware(&fw, S2250_FIRMWARE, &usbdev->dev)) {
- printk(KERN_ERR
+ dev_err(&interface->dev,
"s2250: unable to load firmware from file \"%s\"\n",
S2250_FIRMWARE);
goto failed2;
@@ -120,7 +120,7 @@ static int s2250loader_probe(struct usb_interface *interface,
ret = usb_cypress_load_firmware(usbdev, fw, CYPRESS_FX2);
release_firmware(fw);
if (0 != ret) {
- printk(KERN_ERR "firmware_s2250 download failed\n");
+ dev_err(&interface->dev, "firmware_s2250 download failed\n");
goto failed2;
}
@@ -133,14 +133,14 @@ failed2:
if (s)
kref_put(&(s->kref), s2250loader_delete);
- printk(KERN_ERR "probe failed\n");
+ dev_err(&interface->dev, "probe failed\n");
return -1;
}
static void s2250loader_disconnect(struct usb_interface *interface)
{
pdevice_extension_t s;
- printk(KERN_INFO "s2250: disconnect\n");
+ dev_info(&interface->dev, "s2250: disconnect\n");
s = usb_get_intfdata(interface);
usb_set_intfdata(interface, NULL);
kref_put(&(s->kref), s2250loader_delete);
diff --git a/drivers/staging/media/go7007/wis-saa7113.c b/drivers/staging/media/go7007/wis-saa7113.c
index 8810c1e6e1ed..891cde713a47 100644
--- a/drivers/staging/media/go7007/wis-saa7113.c
+++ b/drivers/staging/media/go7007/wis-saa7113.c
@@ -141,7 +141,7 @@ static int wis_saa7113_command(struct i2c_client *client,
} else if (dec->norm & V4L2_STD_PAL) {
write_reg(client, 0x0e, 0x01);
write_reg(client, 0x10, 0x48);
- } else if (dec->norm * V4L2_STD_SECAM) {
+ } else if (dec->norm & V4L2_STD_SECAM) {
write_reg(client, 0x0e, 0x50);
write_reg(client, 0x10, 0x48);
}
diff --git a/drivers/staging/media/go7007/wis-sony-tuner.c b/drivers/staging/media/go7007/wis-sony-tuner.c
index 1291ab79d2af..5d7ff8c81d6d 100644
--- a/drivers/staging/media/go7007/wis-sony-tuner.c
+++ b/drivers/staging/media/go7007/wis-sony-tuner.c
@@ -95,8 +95,8 @@ static int set_freq(struct i2c_client *client, int freq)
band_name = "UHF";
band_select = tun->UHF;
}
- printk(KERN_DEBUG "wis-sony-tuner: tuning to frequency %d.%04d (%s)\n",
- freq / 16, (freq % 16) * 625, band_name);
+ dev_dbg(&client->dev, "tuning to frequency %d.%04d (%s)\n",
+ freq / 16, (freq % 16) * 625, band_name);
n = freq + tun->IFPCoff;
buffer[0] = n >> 8;
@@ -288,16 +288,16 @@ static int mpx_setup(struct i2c_client *client)
u8 buf1[3], buf2[2];
struct i2c_msg msgs[2];
- printk(KERN_DEBUG "wis-sony-tuner: MPX registers: %04x %04x "
- "%04x %04x %04x %04x %04x %04x\n",
- mpx_audio_modes[t->mpxmode].modus,
- source,
- mpx_audio_modes[t->mpxmode].acb,
- mpx_audio_modes[t->mpxmode].fm_prescale,
- mpx_audio_modes[t->mpxmode].nicam_prescale,
- mpx_audio_modes[t->mpxmode].scart_prescale,
- mpx_audio_modes[t->mpxmode].system,
- mpx_audio_modes[t->mpxmode].volume);
+ dev_dbg(&client->dev,
+ "MPX registers: %04x %04x %04x %04x %04x %04x %04x %04x\n",
+ mpx_audio_modes[t->mpxmode].modus,
+ source,
+ mpx_audio_modes[t->mpxmode].acb,
+ mpx_audio_modes[t->mpxmode].fm_prescale,
+ mpx_audio_modes[t->mpxmode].nicam_prescale,
+ mpx_audio_modes[t->mpxmode].scart_prescale,
+ mpx_audio_modes[t->mpxmode].system,
+ mpx_audio_modes[t->mpxmode].volume);
buf1[0] = 0x11;
buf1[1] = 0x00;
buf1[2] = 0x7e;
@@ -310,14 +310,14 @@ static int mpx_setup(struct i2c_client *client)
msgs[1].len = 2;
msgs[1].buf = buf2;
i2c_transfer(client->adapter, msgs, 2);
- printk(KERN_DEBUG "wis-sony-tuner: MPX system: %02x%02x\n",
- buf2[0], buf2[1]);
+ dev_dbg(&client->dev, "MPX system: %02x%02x\n",
+ buf2[0], buf2[1]);
buf1[0] = 0x11;
buf1[1] = 0x02;
buf1[2] = 0x00;
i2c_transfer(client->adapter, msgs, 2);
- printk(KERN_DEBUG "wis-sony-tuner: MPX status: %02x%02x\n",
- buf2[0], buf2[1]);
+ dev_dbg(&client->dev, "MPX status: %02x%02x\n",
+ buf2[0], buf2[1]);
}
#endif
return 0;
@@ -375,8 +375,7 @@ static int set_if(struct i2c_client *client)
t->mpxmode = force_mpx_mode;
else
t->mpxmode = default_mpx_mode;
- printk(KERN_DEBUG "wis-sony-tuner: setting MPX to mode %d\n",
- t->mpxmode);
+ dev_dbg(&client->dev, "setting MPX to mode %d\n", t->mpxmode);
mpx_setup(client);
return 0;
@@ -401,8 +400,8 @@ static int tuner_command(struct i2c_client *client, unsigned int cmd, void *arg)
if (t->type >= 0) {
if (t->type != *type)
- printk(KERN_ERR "wis-sony-tuner: type already "
- "set to %d, ignoring request for %d\n",
+ dev_err(&client->dev,
+ "type already set to %d, ignoring request for %d\n",
t->type, *type);
break;
}
@@ -414,28 +413,28 @@ static int tuner_command(struct i2c_client *client, unsigned int cmd, void *arg)
case 'B':
case 'g':
case 'G':
- printk(KERN_INFO "wis-sony-tuner: forcing "
- "tuner to PAL-B/G bands\n");
+ dev_info(&client->dev,
+ "forcing tuner to PAL-B/G bands\n");
force_band = V4L2_STD_PAL_BG;
break;
case 'i':
case 'I':
- printk(KERN_INFO "wis-sony-tuner: forcing "
- "tuner to PAL-I band\n");
+ dev_info(&client->dev,
+ "forcing tuner to PAL-I band\n");
force_band = V4L2_STD_PAL_I;
break;
case 'd':
case 'D':
case 'k':
case 'K':
- printk(KERN_INFO "wis-sony-tuner: forcing "
- "tuner to PAL-D/K bands\n");
+ dev_info(&client->dev,
+ "forcing tuner to PAL-D/K bands\n");
force_band = V4L2_STD_PAL_I;
break;
case 'l':
case 'L':
- printk(KERN_INFO "wis-sony-tuner: forcing "
- "tuner to SECAM-L band\n");
+ dev_info(&client->dev,
+ "forcing tuner to SECAM-L band\n");
force_band = V4L2_STD_SECAM_L;
break;
default:
@@ -455,14 +454,15 @@ static int tuner_command(struct i2c_client *client, unsigned int cmd, void *arg)
t->std = V4L2_STD_NTSC_M;
break;
default:
- printk(KERN_ERR "wis-sony-tuner: tuner type %d is not "
- "supported by this module\n", *type);
+ dev_err(&client->dev,
+ "tuner type %d is not supported by this module\n",
+ *type);
break;
}
if (type >= 0)
- printk(KERN_INFO
- "wis-sony-tuner: type set to %d (%s)\n",
- t->type, sony_tuners[t->type - 200].name);
+ dev_info(&clinet->dev,
+ "type set to %d (%s)\n",
+ t->type, sony_tuners[t->type - 200].name);
break;
}
#endif
@@ -544,9 +544,8 @@ static int tuner_command(struct i2c_client *client, unsigned int cmd, void *arg)
if (force_band && (*std & force_band) != *std &&
*std != V4L2_STD_PAL &&
*std != V4L2_STD_SECAM) {
- printk(KERN_DEBUG "wis-sony-tuner: ignoring "
- "requested TV standard in "
- "favor of force_band value\n");
+ dev_dbg(&client->dev,
+ "ignoring requested TV standard in favor of force_band value\n");
t->std = force_band;
} else if (*std & V4L2_STD_PAL_BG) { /* default */
t->std = V4L2_STD_PAL_BG;
@@ -557,8 +556,8 @@ static int tuner_command(struct i2c_client *client, unsigned int cmd, void *arg)
} else if (*std & V4L2_STD_SECAM_L) {
t->std = V4L2_STD_SECAM_L;
} else {
- printk(KERN_ERR "wis-sony-tuner: TV standard "
- "not supported\n");
+ dev_err(&client->dev,
+ "TV standard not supported\n");
*std = 0; /* hack to indicate EINVAL */
break;
}
@@ -567,15 +566,15 @@ static int tuner_command(struct i2c_client *client, unsigned int cmd, void *arg)
break;
case TUNER_SONY_BTF_PK467Z:
if (!(*std & V4L2_STD_NTSC_M_JP)) {
- printk(KERN_ERR "wis-sony-tuner: TV standard "
- "not supported\n");
+ dev_err(&client->dev,
+ "TV standard not supported\n");
*std = 0; /* hack to indicate EINVAL */
}
break;
case TUNER_SONY_BTF_PB463Z:
if (!(*std & V4L2_STD_NTSC_M)) {
- printk(KERN_ERR "wis-sony-tuner: TV standard "
- "not supported\n");
+ dev_err(&client->dev,
+ "TV standard not supported\n");
*std = 0; /* hack to indicate EINVAL */
}
break;
@@ -673,8 +672,7 @@ static int wis_sony_tuner_probe(struct i2c_client *client,
t->audmode = V4L2_TUNER_MODE_STEREO;
i2c_set_clientdata(client, t);
- printk(KERN_DEBUG
- "wis-sony-tuner: initializing tuner at address %d on %s\n",
+ dev_dbg(&client->dev, "initializing tuner at address %d on %s\n",
client->addr, adapter->name);
return 0;
diff --git a/drivers/staging/media/go7007/wis-tw2804.c b/drivers/staging/media/go7007/wis-tw2804.c
index d6410ee01be8..290fd8c7bfef 100644
--- a/drivers/staging/media/go7007/wis-tw2804.c
+++ b/drivers/staging/media/go7007/wis-tw2804.c
@@ -128,30 +128,32 @@ static int wis_tw2804_command(struct i2c_client *client,
int *input = arg;
if (*input < 0 || *input > 3) {
- printk(KERN_ERR "wis-tw2804: channel %d is not "
- "between 0 and 3!\n", *input);
+ dev_err(&client->dev,
+ "channel %d is not between 0 and 3!\n", *input);
return 0;
}
dec->channel = *input;
- printk(KERN_DEBUG "wis-tw2804: initializing TW2804 "
- "channel %d\n", dec->channel);
+ dev_dbg(&client->dev, "initializing TW2804 channel %d\n",
+ dec->channel);
if (dec->channel == 0 &&
write_regs(client, global_registers, 0) < 0) {
- printk(KERN_ERR "wis-tw2804: error initializing "
- "TW2804 global registers\n");
+ dev_err(&client->dev,
+ "error initializing TW2804 global registers\n");
return 0;
}
if (write_regs(client, channel_registers, dec->channel) < 0) {
- printk(KERN_ERR "wis-tw2804: error initializing "
- "TW2804 channel %d\n", dec->channel);
+ dev_err(&client->dev,
+ "error initializing TW2804 channel %d\n",
+ dec->channel);
return 0;
}
return 0;
}
if (dec->channel < 0) {
- printk(KERN_DEBUG "wis-tw2804: ignoring command %08x until "
- "channel number is set\n", cmd);
+ dev_dbg(&client->dev,
+ "ignoring command %08x until channel number is set\n",
+ cmd);
return 0;
}
@@ -311,7 +313,7 @@ static int wis_tw2804_probe(struct i2c_client *client,
dec->hue = 128;
i2c_set_clientdata(client, dec);
- printk(KERN_DEBUG "wis-tw2804: creating TW2804 at address %d on %s\n",
+ dev_dbg(&client->dev, "creating TW2804 at address %d on %s\n",
client->addr, adapter->name);
return 0;
diff --git a/drivers/staging/media/go7007/wis-tw9903.c b/drivers/staging/media/go7007/wis-tw9903.c
index 94071def3bb4..684ca37f0382 100644
--- a/drivers/staging/media/go7007/wis-tw9903.c
+++ b/drivers/staging/media/go7007/wis-tw9903.c
@@ -31,8 +31,7 @@ struct wis_tw9903 {
int hue;
};
-static u8 initial_registers[] =
-{
+static u8 initial_registers[] = {
0x02, 0x44, /* input 1, composite */
0x03, 0x92, /* correct digital format */
0x04, 0x00,
@@ -128,8 +127,8 @@ static int wis_tw9903_command(struct i2c_client *client,
0x06, 0xc0, /* reset device */
0, 0,
};
- printk(KERN_DEBUG "vscale is %04x, hscale is %04x\n",
- vscale, hscale);
+ dev_dbg(&client->dev, "vscale is %04x, hscale is %04x\n",
+ vscale, hscale);
/*write_regs(client, regs);*/
break;
}
@@ -288,12 +287,11 @@ static int wis_tw9903_probe(struct i2c_client *client,
dec->hue = 0;
i2c_set_clientdata(client, dec);
- printk(KERN_DEBUG
- "wis-tw9903: initializing TW9903 at address %d on %s\n",
+ dev_dbg(&client->dev, "initializing TW9903 at address %d on %s\n",
client->addr, adapter->name);
if (write_regs(client, initial_registers) < 0) {
- printk(KERN_ERR "wis-tw9903: error initializing TW9903\n");
+ dev_err(&client->dev, "error initializing TW9903\n");
kfree(dec);
return -ENODEV;
}
diff --git a/drivers/staging/media/go7007/wis-uda1342.c b/drivers/staging/media/go7007/wis-uda1342.c
index 05ac798f35f7..582ea120a531 100644
--- a/drivers/staging/media/go7007/wis-uda1342.c
+++ b/drivers/staging/media/go7007/wis-uda1342.c
@@ -47,8 +47,8 @@ static int wis_uda1342_command(struct i2c_client *client,
write_reg(client, 0x00, 0x1241); /* select input 1 */
break;
default:
- printk(KERN_ERR "wis-uda1342: input %d not supported\n",
- *inp);
+ dev_err(&client->dev, "input %d not supported\n",
+ *inp);
break;
}
break;
@@ -67,8 +67,7 @@ static int wis_uda1342_probe(struct i2c_client *client,
if (!i2c_check_functionality(adapter, I2C_FUNC_SMBUS_WORD_DATA))
return -ENODEV;
- printk(KERN_DEBUG
- "wis-uda1342: initializing UDA1342 at address %d on %s\n",
+ dev_dbg(&client->dev, "initializing UDA1342 at address %d on %s\n",
client->addr, adapter->name);
write_reg(client, 0x00, 0x8000); /* reset registers */
diff --git a/drivers/staging/media/lirc/lirc_bt829.c b/drivers/staging/media/lirc/lirc_bt829.c
index 951007a3fc96..fa31ee7dd6a9 100644
--- a/drivers/staging/media/lirc/lirc_bt829.c
+++ b/drivers/staging/media/lirc/lirc_bt829.c
@@ -18,6 +18,8 @@
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/threads.h>
@@ -72,20 +74,19 @@ static struct pci_dev *do_pci_probe(void)
my_dev = pci_get_device(PCI_VENDOR_ID_ATI,
PCI_DEVICE_ID_ATI_264VT, NULL);
if (my_dev) {
- printk(KERN_ERR DRIVER_NAME ": Using device: %s\n",
- pci_name(my_dev));
+ pr_err("Using device: %s\n", pci_name(my_dev));
pci_addr_phys = 0;
if (my_dev->resource[0].flags & IORESOURCE_MEM) {
pci_addr_phys = my_dev->resource[0].start;
- printk(KERN_INFO DRIVER_NAME ": memory at 0x%08X\n",
+ pr_info("memory at 0x%08X\n",
(unsigned int)pci_addr_phys);
}
if (pci_addr_phys == 0) {
- printk(KERN_ERR DRIVER_NAME ": no memory resource ?\n");
+ pr_err("no memory resource ?\n");
return NULL;
}
} else {
- printk(KERN_ERR DRIVER_NAME ": pci_probe failed\n");
+ pr_err("pci_probe failed\n");
return NULL;
}
return my_dev;
@@ -140,7 +141,7 @@ int init_module(void)
atir_minor = lirc_register_driver(&atir_driver);
if (atir_minor < 0) {
- printk(KERN_ERR DRIVER_NAME ": failed to register driver!\n");
+ pr_err("failed to register driver!\n");
return atir_minor;
}
dprintk("driver is registered on minor %d\n", atir_minor);
@@ -159,7 +160,7 @@ static int atir_init_start(void)
{
pci_addr_lin = ioremap(pci_addr_phys + DATA_PCI_OFF, 0x400);
if (pci_addr_lin == 0) {
- printk(KERN_INFO DRIVER_NAME ": pci mem must be mapped\n");
+ pr_info("pci mem must be mapped\n");
return 0;
}
return 1;
diff --git a/drivers/staging/media/lirc/lirc_igorplugusb.c b/drivers/staging/media/lirc/lirc_igorplugusb.c
index 939a801c23e4..2faa391006db 100644
--- a/drivers/staging/media/lirc/lirc_igorplugusb.c
+++ b/drivers/staging/media/lirc/lirc_igorplugusb.c
@@ -223,8 +223,8 @@ static int unregister_from_lirc(struct igorplug *ir)
int devnum;
if (!ir) {
- printk(KERN_ERR "%s: called with NULL device struct!\n",
- __func__);
+ dev_err(&ir->usbdev->dev,
+ "%s: called with NULL device struct!\n", __func__);
return -EINVAL;
}
@@ -232,8 +232,8 @@ static int unregister_from_lirc(struct igorplug *ir)
d = ir->d;
if (!d) {
- printk(KERN_ERR "%s: called with NULL lirc driver struct!\n",
- __func__);
+ dev_err(&ir->usbdev->dev,
+ "%s: called with NULL lirc driver struct!\n", __func__);
return -EINVAL;
}
@@ -347,8 +347,8 @@ static int igorplugusb_remote_poll(void *data, struct lirc_buffer *buf)
if (ir->buf_in[2] == 0)
send_fragment(ir, buf, DEVICE_HEADERLEN, ret);
else {
- printk(KERN_WARNING DRIVER_NAME
- "[%d]: Device buffer overrun.\n", ir->devnum);
+ dev_warn(&ir->usbdev->dev,
+ "[%d]: Device buffer overrun.\n", ir->devnum);
/* HHHNNNNNNNNNNNOOOOOOOO H = header
<---[2]---> N = newer
<---------ret--------> O = older */
diff --git a/drivers/staging/media/lirc/lirc_imon.c b/drivers/staging/media/lirc/lirc_imon.c
index 2944fde89f44..0a2c45dd4475 100644
--- a/drivers/staging/media/lirc/lirc_imon.c
+++ b/drivers/staging/media/lirc/lirc_imon.c
@@ -20,6 +20,8 @@
* Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*/
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
#include <linux/errno.h>
#include <linux/init.h>
#include <linux/kernel.h>
@@ -205,12 +207,12 @@ static void deregister_from_lirc(struct imon_context *context)
retval = lirc_unregister_driver(minor);
if (retval)
- printk(KERN_ERR KBUILD_MODNAME
- ": %s: unable to deregister from lirc(%d)",
- __func__, retval);
+ dev_err(&context->usbdev->dev,
+ ": %s: unable to deregister from lirc(%d)",
+ __func__, retval);
else
- printk(KERN_INFO MOD_NAME ": Deregistered iMON driver "
- "(minor:%d)\n", minor);
+ dev_info(&context->usbdev->dev,
+ "Deregistered iMON driver (minor:%d)\n", minor);
}
@@ -231,8 +233,7 @@ static int display_open(struct inode *inode, struct file *file)
subminor = iminor(inode);
interface = usb_find_interface(&imon_driver, subminor);
if (!interface) {
- printk(KERN_ERR KBUILD_MODNAME
- ": %s: could not find interface for minor %d\n",
+ pr_err("%s: could not find interface for minor %d\n",
__func__, subminor);
retval = -ENODEV;
goto exit;
@@ -282,8 +283,7 @@ static int display_close(struct inode *inode, struct file *file)
context = file->private_data;
if (!context) {
- printk(KERN_ERR KBUILD_MODNAME
- "%s: no context for device\n", __func__);
+ pr_err("%s: no context for device\n", __func__);
return -ENODEV;
}
@@ -391,8 +391,7 @@ static ssize_t vfd_write(struct file *file, const char __user *buf,
context = file->private_data;
if (!context) {
- printk(KERN_ERR KBUILD_MODNAME
- "%s: no context for device\n", __func__);
+ pr_err("%s: no context for device\n", __func__);
return -ENODEV;
}
@@ -521,8 +520,7 @@ static void ir_close(void *data)
context = (struct imon_context *)data;
if (!context) {
- printk(KERN_ERR KBUILD_MODNAME
- "%s: no context for device\n", __func__);
+ pr_err("%s: no context for device\n", __func__);
return;
}
@@ -746,7 +744,6 @@ static int imon_probe(struct usb_interface *interface,
context = kzalloc(sizeof(struct imon_context), GFP_KERNEL);
if (!context) {
- dev_err(dev, "%s: kzalloc failed for context\n", __func__);
alloc_status = 1;
goto alloc_status_switch;
}
@@ -828,13 +825,11 @@ static int imon_probe(struct usb_interface *interface,
driver = kzalloc(sizeof(struct lirc_driver), GFP_KERNEL);
if (!driver) {
- dev_err(dev, "%s: kzalloc failed for lirc_driver\n", __func__);
alloc_status = 2;
goto alloc_status_switch;
}
rbuf = kmalloc(sizeof(struct lirc_buffer), GFP_KERNEL);
if (!rbuf) {
- dev_err(dev, "%s: kmalloc failed for lirc_buffer\n", __func__);
alloc_status = 3;
goto alloc_status_switch;
}
@@ -1009,8 +1004,8 @@ static void imon_disconnect(struct usb_interface *interface)
mutex_unlock(&driver_lock);
- printk(KERN_INFO "%s: iMON device (intf%d) disconnected\n",
- __func__, ifnum);
+ dev_info(&interface->dev, "%s: iMON device (intf%d) disconnected\n",
+ __func__, ifnum);
}
static int imon_suspend(struct usb_interface *intf, pm_message_t message)
diff --git a/drivers/staging/media/lirc/lirc_parallel.c b/drivers/staging/media/lirc/lirc_parallel.c
index ec14bc81851b..41d110f8bc02 100644
--- a/drivers/staging/media/lirc/lirc_parallel.c
+++ b/drivers/staging/media/lirc/lirc_parallel.c
@@ -22,6 +22,8 @@
*
*/
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
/*** Includes ***/
#include <linux/module.h>
@@ -115,8 +117,7 @@ static void out(int offset, int value)
parport_write_control(pport, value);
break;
case LIRC_LP_STATUS:
- printk(KERN_INFO "%s: attempt to write to status register\n",
- LIRC_DRIVER_NAME);
+ pr_info("attempt to write to status register\n");
break;
}
}
@@ -166,27 +167,23 @@ static unsigned int init_lirc_timer(void)
if (default_timer == 0) {
/* autodetect timer */
newtimer = (1000000*count)/timeelapsed;
- printk(KERN_INFO "%s: %u Hz timer detected\n",
- LIRC_DRIVER_NAME, newtimer);
+ pr_info("%u Hz timer detected\n", newtimer);
return newtimer;
} else {
newtimer = (1000000*count)/timeelapsed;
if (abs(newtimer - default_timer) > default_timer/10) {
/* bad timer */
- printk(KERN_NOTICE "%s: bad timer: %u Hz\n",
- LIRC_DRIVER_NAME, newtimer);
- printk(KERN_NOTICE "%s: using default timer: "
- "%u Hz\n",
- LIRC_DRIVER_NAME, default_timer);
+ pr_notice("bad timer: %u Hz\n", newtimer);
+ pr_notice("using default timer: %u Hz\n",
+ default_timer);
return default_timer;
} else {
- printk(KERN_INFO "%s: %u Hz timer detected\n",
- LIRC_DRIVER_NAME, newtimer);
+ pr_info("%u Hz timer detected\n", newtimer);
return newtimer; /* use detected value */
}
}
} else {
- printk(KERN_NOTICE "%s: no timer detected\n", LIRC_DRIVER_NAME);
+ pr_notice("no timer detected\n");
return 0;
}
}
@@ -194,13 +191,10 @@ static unsigned int init_lirc_timer(void)
static int lirc_claim(void)
{
if (parport_claim(ppdevice) != 0) {
- printk(KERN_WARNING "%s: could not claim port\n",
- LIRC_DRIVER_NAME);
- printk(KERN_WARNING "%s: waiting for port becoming available"
- "\n", LIRC_DRIVER_NAME);
+ pr_warn("could not claim port\n");
+ pr_warn("waiting for port becoming available\n");
if (parport_claim_or_block(ppdevice) < 0) {
- printk(KERN_NOTICE "%s: could not claim port, giving"
- " up\n", LIRC_DRIVER_NAME);
+ pr_notice("could not claim port, giving up\n");
return 0;
}
}
@@ -219,7 +213,7 @@ static void rbuf_write(int signal)
if (nwptr == rptr) {
/* no new signals will be accepted */
lost_irqs++;
- printk(KERN_NOTICE "%s: buffer overrun\n", LIRC_DRIVER_NAME);
+ pr_notice("buffer overrun\n");
return;
}
rbuf[wptr] = signal;
@@ -290,7 +284,7 @@ static void irq_handler(void *blah)
if (signal > timeout
|| (check_pselecd && (in(1) & LP_PSELECD))) {
signal = 0;
- printk(KERN_NOTICE "%s: timeout\n", LIRC_DRIVER_NAME);
+ pr_notice("timeout\n");
break;
}
} while (lirc_get_signal());
@@ -644,8 +638,7 @@ static int __init lirc_parallel_init(void)
result = platform_driver_register(&lirc_parallel_driver);
if (result) {
- printk(KERN_NOTICE "platform_driver_register"
- " returned %d\n", result);
+ pr_notice("platform_driver_register returned %d\n", result);
return result;
}
@@ -661,8 +654,7 @@ static int __init lirc_parallel_init(void)
pport = parport_find_base(io);
if (pport == NULL) {
- printk(KERN_NOTICE "%s: no port at %x found\n",
- LIRC_DRIVER_NAME, io);
+ pr_notice("no port at %x found\n", io);
result = -ENXIO;
goto exit_device_put;
}
@@ -670,8 +662,7 @@ static int __init lirc_parallel_init(void)
pf, kf, irq_handler, 0, NULL);
parport_put_port(pport);
if (ppdevice == NULL) {
- printk(KERN_NOTICE "%s: parport_register_device() failed\n",
- LIRC_DRIVER_NAME);
+ pr_notice("parport_register_device() failed\n");
result = -ENXIO;
goto exit_device_put;
}
@@ -706,14 +697,12 @@ static int __init lirc_parallel_init(void)
driver.dev = &lirc_parallel_dev->dev;
driver.minor = lirc_register_driver(&driver);
if (driver.minor < 0) {
- printk(KERN_NOTICE "%s: register_chrdev() failed\n",
- LIRC_DRIVER_NAME);
+ pr_notice("register_chrdev() failed\n");
parport_unregister_device(ppdevice);
result = -EIO;
goto exit_device_put;
}
- printk(KERN_INFO "%s: installed using port 0x%04x irq %d\n",
- LIRC_DRIVER_NAME, io, irq);
+ pr_info("installed using port 0x%04x irq %d\n", io, irq);
return 0;
exit_device_put:
diff --git a/drivers/staging/media/lirc/lirc_sasem.c b/drivers/staging/media/lirc/lirc_sasem.c
index f4e4d9003f38..68acca74ddb1 100644
--- a/drivers/staging/media/lirc/lirc_sasem.c
+++ b/drivers/staging/media/lirc/lirc_sasem.c
@@ -34,6 +34,8 @@
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
#include <linux/errno.h>
#include <linux/init.h>
#include <linux/kernel.h>
@@ -171,7 +173,7 @@ static void delete_context(struct sasem_context *context)
kfree(context);
if (debug)
- printk(KERN_INFO "%s: context deleted\n", __func__);
+ pr_info("%s: context deleted\n", __func__);
}
static void deregister_from_lirc(struct sasem_context *context)
@@ -181,11 +183,10 @@ static void deregister_from_lirc(struct sasem_context *context)
retval = lirc_unregister_driver(minor);
if (retval)
- printk(KERN_ERR "%s: unable to deregister from lirc (%d)\n",
- __func__, retval);
+ pr_err("%s: unable to deregister from lirc (%d)\n",
+ __func__, retval);
else
- printk(KERN_INFO "Deregistered Sasem driver (minor:%d)\n",
- minor);
+ pr_info("Deregistered Sasem driver (minor:%d)\n", minor);
}
@@ -206,8 +207,7 @@ static int vfd_open(struct inode *inode, struct file *file)
subminor = iminor(inode);
interface = usb_find_interface(&sasem_driver, subminor);
if (!interface) {
- printk(KERN_ERR KBUILD_MODNAME
- ": %s: could not find interface for minor %d\n",
+ pr_err("%s: could not find interface for minor %d\n",
__func__, subminor);
retval = -ENODEV;
goto exit;
@@ -252,8 +252,7 @@ static long vfd_ioctl(struct file *file, unsigned cmd, unsigned long arg)
context = (struct sasem_context *) file->private_data;
if (!context) {
- printk(KERN_ERR KBUILD_MODNAME
- ": %s: no context for device\n", __func__);
+ pr_err("%s: no context for device\n", __func__);
return -ENODEV;
}
@@ -266,7 +265,7 @@ static long vfd_ioctl(struct file *file, unsigned cmd, unsigned long arg)
context->vfd_contrast = (unsigned int)arg;
break;
default:
- printk(KERN_INFO "Unknown IOCTL command\n");
+ pr_info("Unknown IOCTL command\n");
mutex_unlock(&context->ctx_lock);
return -ENOIOCTLCMD; /* not supported */
}
@@ -287,8 +286,7 @@ static int vfd_close(struct inode *inode, struct file *file)
context = (struct sasem_context *) file->private_data;
if (!context) {
- printk(KERN_ERR KBUILD_MODNAME
- ": %s: no context for device\n", __func__);
+ pr_err("%s: no context for device\n", __func__);
return -ENODEV;
}
@@ -299,7 +297,7 @@ static int vfd_close(struct inode *inode, struct file *file)
retval = -EIO;
} else {
context->vfd_isopen = 0;
- printk(KERN_INFO "VFD port closed\n");
+ dev_info(&context->dev->dev, "VFD port closed\n");
if (!context->dev_present && !context->ir_isopen) {
/* Device disconnected before close and IR port is
@@ -373,16 +371,14 @@ static ssize_t vfd_write(struct file *file, const char *buf,
context = (struct sasem_context *) file->private_data;
if (!context) {
- printk(KERN_ERR KBUILD_MODNAME
- ": %s: no context for device\n", __func__);
+ pr_err("%s: no context for device\n", __func__);
return -ENODEV;
}
mutex_lock(&context->ctx_lock);
if (!context->dev_present) {
- printk(KERN_ERR KBUILD_MODNAME
- ": %s: no Sasem device present\n", __func__);
+ pr_err("%s: no Sasem device present\n", __func__);
retval = -ENODEV;
goto exit;
}
@@ -519,7 +515,7 @@ static int ir_open(void *data)
__func__, retval);
else {
context->ir_isopen = 1;
- printk(KERN_INFO "IR port opened\n");
+ dev_info(&context->dev->dev, "IR port opened\n");
}
exit:
@@ -538,8 +534,7 @@ static void ir_close(void *data)
context = (struct sasem_context *)data;
if (!context) {
- printk(KERN_ERR KBUILD_MODNAME
- ": %s: no context for device\n", __func__);
+ pr_err("%s: no context for device\n", __func__);
return;
}
@@ -547,7 +542,7 @@ static void ir_close(void *data)
usb_kill_urb(context->rx_urb);
context->ir_isopen = 0;
- printk(KERN_INFO "IR port closed\n");
+ pr_info("IR port closed\n");
if (!context->dev_present) {
@@ -584,8 +579,9 @@ static void incoming_packet(struct sasem_context *context,
int i;
if (len != 8) {
- printk(KERN_WARNING "%s: invalid incoming packet size (%d)\n",
- __func__, len);
+ dev_warn(&context->dev->dev,
+ "%s: invalid incoming packet size (%d)\n",
+ __func__, len);
return;
}
@@ -663,7 +659,7 @@ static void usb_rx_callback(struct urb *urb)
break;
default:
- printk(KERN_WARNING "%s: status (%d): ignored",
+ dev_warn(&urb->dev->dev, "%s: status (%d): ignored",
__func__, urb->status);
break;
}
@@ -763,22 +759,16 @@ static int sasem_probe(struct usb_interface *interface,
context = kzalloc(sizeof(struct sasem_context), GFP_KERNEL);
if (!context) {
- dev_err(&interface->dev,
- "%s: kzalloc failed for context\n", __func__);
alloc_status = 1;
goto alloc_status_switch;
}
driver = kzalloc(sizeof(struct lirc_driver), GFP_KERNEL);
if (!driver) {
- dev_err(&interface->dev,
- "%s: kzalloc failed for lirc_driver\n", __func__);
alloc_status = 2;
goto alloc_status_switch;
}
rbuf = kmalloc(sizeof(struct lirc_buffer), GFP_KERNEL);
if (!rbuf) {
- dev_err(&interface->dev,
- "%s: kmalloc failed for lirc_buffer\n", __func__);
alloc_status = 3;
goto alloc_status_switch;
}
@@ -830,8 +820,9 @@ static int sasem_probe(struct usb_interface *interface,
retval = lirc_minor;
goto unlock;
} else
- printk(KERN_INFO "%s: Registered Sasem driver (minor:%d)\n",
- __func__, lirc_minor);
+ dev_info(&interface->dev,
+ "%s: Registered Sasem driver (minor:%d)\n",
+ __func__, lirc_minor);
/* Needed while unregistering! */
driver->minor = lirc_minor;
@@ -852,15 +843,18 @@ static int sasem_probe(struct usb_interface *interface,
if (vfd_ep_found) {
if (debug)
- printk(KERN_INFO "Registering VFD with sysfs\n");
+ dev_info(&interface->dev,
+ "Registering VFD with sysfs\n");
if (usb_register_dev(interface, &sasem_class))
/* Not a fatal error, so ignore */
- printk(KERN_INFO "%s: could not get a minor number "
- "for VFD\n", __func__);
+ dev_info(&interface->dev,
+ "%s: could not get a minor number for VFD\n",
+ __func__);
}
- printk(KERN_INFO "%s: Sasem device on usb<%d:%d> initialized\n",
- __func__, dev->bus->busnum, dev->devnum);
+ dev_info(&interface->dev,
+ "%s: Sasem device on usb<%d:%d> initialized\n",
+ __func__, dev->bus->busnum, dev->devnum);
unlock:
mutex_unlock(&context->ctx_lock);
@@ -891,7 +885,7 @@ exit:
}
/**
- * Callback function for USB core API: disonnect
+ * Callback function for USB core API: disconnect
*/
static void sasem_disconnect(struct usb_interface *interface)
{
@@ -903,7 +897,8 @@ static void sasem_disconnect(struct usb_interface *interface)
context = usb_get_intfdata(interface);
mutex_lock(&context->ctx_lock);
- printk(KERN_INFO "%s: Sasem device disconnected\n", __func__);
+ dev_info(&interface->dev, "%s: Sasem device disconnected\n",
+ __func__);
usb_set_intfdata(interface, NULL);
context->dev_present = 0;
diff --git a/drivers/staging/media/lirc/lirc_serial.c b/drivers/staging/media/lirc/lirc_serial.c
index b5d0088f3102..af08e677b60f 100644
--- a/drivers/staging/media/lirc/lirc_serial.c
+++ b/drivers/staging/media/lirc/lirc_serial.c
@@ -48,6 +48,8 @@
* Steve Davies <steve@daviesfam.org> July 2001
*/
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
#include <linux/module.h>
#include <linux/errno.h>
#include <linux/signal.h>
@@ -667,8 +669,7 @@ static irqreturn_t irq_handler(int i, void *blah)
counter++;
status = sinp(UART_MSR);
if (counter > RS_ISR_PASS_LIMIT) {
- printk(KERN_WARNING LIRC_DRIVER_NAME ": AIEEEE: "
- "We're caught!\n");
+ pr_warn("AIEEEE: We're caught!\n");
break;
}
if ((status & hardware[type].signal_pin_change)
@@ -703,11 +704,10 @@ static irqreturn_t irq_handler(int i, void *blah)
dcd = (status & hardware[type].signal_pin) ? 1 : 0;
if (dcd == last_dcd) {
- printk(KERN_WARNING LIRC_DRIVER_NAME
- ": ignoring spike: %d %d %lx %lx %lx %lx\n",
- dcd, sense,
- tv.tv_sec, lasttv.tv_sec,
- tv.tv_usec, lasttv.tv_usec);
+ pr_warn("ignoring spike: %d %d %lx %lx %lx %lx\n",
+ dcd, sense,
+ tv.tv_sec, lasttv.tv_sec,
+ tv.tv_usec, lasttv.tv_usec);
continue;
}
@@ -715,25 +715,20 @@ static irqreturn_t irq_handler(int i, void *blah)
if (tv.tv_sec < lasttv.tv_sec ||
(tv.tv_sec == lasttv.tv_sec &&
tv.tv_usec < lasttv.tv_usec)) {
- printk(KERN_WARNING LIRC_DRIVER_NAME
- ": AIEEEE: your clock just jumped "
- "backwards\n");
- printk(KERN_WARNING LIRC_DRIVER_NAME
- ": %d %d %lx %lx %lx %lx\n",
- dcd, sense,
- tv.tv_sec, lasttv.tv_sec,
- tv.tv_usec, lasttv.tv_usec);
+ pr_warn("AIEEEE: your clock just jumped backwards\n");
+ pr_warn("%d %d %lx %lx %lx %lx\n",
+ dcd, sense,
+ tv.tv_sec, lasttv.tv_sec,
+ tv.tv_usec, lasttv.tv_usec);
data = PULSE_MASK;
} else if (deltv > 15) {
data = PULSE_MASK; /* really long time */
if (!(dcd^sense)) {
/* sanity check */
- printk(KERN_WARNING LIRC_DRIVER_NAME
- ": AIEEEE: "
- "%d %d %lx %lx %lx %lx\n",
- dcd, sense,
- tv.tv_sec, lasttv.tv_sec,
- tv.tv_usec, lasttv.tv_usec);
+ pr_warn("AIEEEE: %d %d %lx %lx %lx %lx\n",
+ dcd, sense,
+ tv.tv_sec, lasttv.tv_sec,
+ tv.tv_usec, lasttv.tv_usec);
/*
* detecting pulse while this
* MUST be a space!
@@ -776,8 +771,7 @@ static int hardware_init_port(void)
soutp(UART_IER, scratch);
if (scratch2 != 0 || scratch3 != 0x0f) {
/* we fail, there's nothing here */
- printk(KERN_ERR LIRC_DRIVER_NAME ": port existence test "
- "failed, cannot continue\n");
+ pr_err("port existence test failed, cannot continue\n");
return -ENODEV;
}
@@ -850,11 +844,9 @@ static int lirc_serial_probe(struct platform_device *dev)
LIRC_DRIVER_NAME, (void *)&hardware);
if (result < 0) {
if (result == -EBUSY)
- printk(KERN_ERR LIRC_DRIVER_NAME ": IRQ %d busy\n",
- irq);
+ dev_err(&dev->dev, "IRQ %d busy\n", irq);
else if (result == -EINVAL)
- printk(KERN_ERR LIRC_DRIVER_NAME
- ": Bad irq number or handler\n");
+ dev_err(&dev->dev, "Bad irq number or handler\n");
return result;
}
@@ -869,14 +861,11 @@ static int lirc_serial_probe(struct platform_device *dev)
LIRC_DRIVER_NAME) == NULL))
|| ((iommap == 0)
&& (request_region(io, 8, LIRC_DRIVER_NAME) == NULL))) {
- printk(KERN_ERR LIRC_DRIVER_NAME
- ": port %04x already in use\n", io);
- printk(KERN_WARNING LIRC_DRIVER_NAME
- ": use 'setserial /dev/ttySX uart none'\n");
- printk(KERN_WARNING LIRC_DRIVER_NAME
- ": or compile the serial port driver as module and\n");
- printk(KERN_WARNING LIRC_DRIVER_NAME
- ": make sure this module is loaded first\n");
+ dev_err(&dev->dev, "port %04x already in use\n", io);
+ dev_warn(&dev->dev, "use 'setserial /dev/ttySX uart none'\n");
+ dev_warn(&dev->dev,
+ "or compile the serial port driver as module and\n");
+ dev_warn(&dev->dev, "make sure this module is loaded first\n");
result = -EBUSY;
goto exit_free_irq;
}
@@ -907,11 +896,11 @@ static int lirc_serial_probe(struct platform_device *dev)
msleep(40);
}
sense = (nlow >= nhigh ? 1 : 0);
- printk(KERN_INFO LIRC_DRIVER_NAME ": auto-detected active "
- "%s receiver\n", sense ? "low" : "high");
+ dev_info(&dev->dev, "auto-detected active %s receiver\n",
+ sense ? "low" : "high");
} else
- printk(KERN_INFO LIRC_DRIVER_NAME ": Manually using active "
- "%s receiver\n", sense ? "low" : "high");
+ dev_info(&dev->dev, "Manually using active %s receiver\n",
+ sense ? "low" : "high");
dprintk("Interrupt %d, port %04x obtained\n", irq, io);
return 0;
@@ -1251,8 +1240,7 @@ static int __init lirc_serial_init_module(void)
driver.dev = &lirc_serial_dev->dev;
driver.minor = lirc_register_driver(&driver);
if (driver.minor < 0) {
- printk(KERN_ERR LIRC_DRIVER_NAME
- ": register_chrdev failed!\n");
+ pr_err("register_chrdev failed!\n");
lirc_serial_exit();
return driver.minor;
}
diff --git a/drivers/staging/media/lirc/lirc_sir.c b/drivers/staging/media/lirc/lirc_sir.c
index a45799874a21..63a554c36f75 100644
--- a/drivers/staging/media/lirc/lirc_sir.c
+++ b/drivers/staging/media/lirc/lirc_sir.c
@@ -33,6 +33,8 @@
* parts cut'n'pasted from sa1100_ir.c (C) 2000 Russell King
*/
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
#include <linux/module.h>
#include <linux/sched.h>
#include <linux/errno.h>
@@ -495,7 +497,7 @@ static int init_chrdev(void)
driver.dev = &lirc_sir_dev->dev;
driver.minor = lirc_register_driver(&driver);
if (driver.minor < 0) {
- printk(KERN_ERR LIRC_DRIVER_NAME ": init_chrdev() failed.\n");
+ pr_err("init_chrdev() failed.\n");
return -EIO;
}
return 0;
@@ -604,7 +606,7 @@ static irqreturn_t sir_interrupt(int irq, void *dev_id)
}
if (status & UTSR0_TFS)
- printk(KERN_ERR "transmit fifo not full, shouldn't happen\n");
+ pr_err("transmit fifo not full, shouldn't happen\n");
/* We must clear certain bits. */
status &= (UTSR0_RID | UTSR0_RBB | UTSR0_REB);
@@ -787,7 +789,7 @@ static int init_hardware(void)
#ifdef LIRC_ON_SA1100
#ifdef CONFIG_SA1100_BITSY
if (machine_is_bitsy()) {
- printk(KERN_INFO "Power on IR module\n");
+ pr_info("Power on IR module\n");
set_bitsy_egpio(EGPIO_BITSY_IR_ON);
}
#endif
@@ -885,8 +887,7 @@ static int init_hardware(void)
udelay(1500);
/* read previous control byte */
- printk(KERN_INFO LIRC_DRIVER_NAME
- ": 0x%02x\n", sinp(UART_RX));
+ pr_info("0x%02x\n", sinp(UART_RX));
/* Set DLAB 1. */
soutp(UART_LCR, sinp(UART_LCR) | UART_LCR_DLAB);
@@ -964,8 +965,7 @@ static int init_port(void)
/* get I/O port access and IRQ line */
#ifndef LIRC_ON_SA1100
if (request_region(io, 8, LIRC_DRIVER_NAME) == NULL) {
- printk(KERN_ERR LIRC_DRIVER_NAME
- ": i/o port 0x%.4x already in use.\n", io);
+ pr_err("i/o port 0x%.4x already in use.\n", io);
return -EBUSY;
}
#endif
@@ -975,15 +975,11 @@ static int init_port(void)
# ifndef LIRC_ON_SA1100
release_region(io, 8);
# endif
- printk(KERN_ERR LIRC_DRIVER_NAME
- ": IRQ %d already in use.\n",
- irq);
+ pr_err("IRQ %d already in use.\n", irq);
return retval;
}
#ifndef LIRC_ON_SA1100
- printk(KERN_INFO LIRC_DRIVER_NAME
- ": I/O port 0x%.4x, IRQ %d.\n",
- io, irq);
+ pr_info("I/O port 0x%.4x, IRQ %d.\n", io, irq);
#endif
init_timer(&timerlist);
@@ -1213,8 +1209,7 @@ static int init_lirc_sir(void)
if (retval < 0)
return retval;
init_hardware();
- printk(KERN_INFO LIRC_DRIVER_NAME
- ": Installed.\n");
+ pr_info("Installed.\n");
return 0;
}
@@ -1243,23 +1238,20 @@ static int __init lirc_sir_init(void)
retval = platform_driver_register(&lirc_sir_driver);
if (retval) {
- printk(KERN_ERR LIRC_DRIVER_NAME ": Platform driver register "
- "failed!\n");
+ pr_err("Platform driver register failed!\n");
return -ENODEV;
}
lirc_sir_dev = platform_device_alloc("lirc_dev", 0);
if (!lirc_sir_dev) {
- printk(KERN_ERR LIRC_DRIVER_NAME ": Platform device alloc "
- "failed!\n");
+ pr_err("Platform device alloc failed!\n");
retval = -ENOMEM;
goto pdev_alloc_fail;
}
retval = platform_device_add(lirc_sir_dev);
if (retval) {
- printk(KERN_ERR LIRC_DRIVER_NAME ": Platform device add "
- "failed!\n");
+ pr_err("Platform device add failed!\n");
retval = -ENODEV;
goto pdev_add_fail;
}
@@ -1292,7 +1284,7 @@ static void __exit lirc_sir_exit(void)
drop_port();
platform_device_unregister(lirc_sir_dev);
platform_driver_unregister(&lirc_sir_driver);
- printk(KERN_INFO LIRC_DRIVER_NAME ": Uninstalled.\n");
+ pr_info("Uninstalled.\n");
}
module_init(lirc_sir_init);
diff --git a/drivers/staging/media/solo6x10/p2m.c b/drivers/staging/media/solo6x10/p2m.c
index 56210f0fc5ec..58ab61b1f1d9 100644
--- a/drivers/staging/media/solo6x10/p2m.c
+++ b/drivers/staging/media/solo6x10/p2m.c
@@ -231,15 +231,15 @@ static void run_p2m_test(struct solo_dev *solo_dev)
u32 size = SOLO_JPEG_EXT_ADDR(solo_dev) + SOLO_JPEG_EXT_SIZE(solo_dev);
int i, d;
- printk(KERN_WARNING "%s: Testing %u bytes of external ram\n",
- SOLO6X10_NAME, size);
+ dev_warn(&solo_dev->pdev->dev, "Testing %u bytes of external ram\n",
+ size);
for (i = 0; i < size; i += TEST_CHUNK_SIZE)
for (d = 0; d < 4; d++)
errs += p2m_test(solo_dev, d, i, TEST_CHUNK_SIZE);
- printk(KERN_WARNING "%s: Found %llu errors during p2m test\n",
- SOLO6X10_NAME, errs);
+ dev_warn(&solo_dev->pdev->dev, "Found %llu errors during p2m test\n",
+ errs);
return;
}
diff --git a/drivers/staging/media/solo6x10/v4l2-enc.c b/drivers/staging/media/solo6x10/v4l2-enc.c
index f8f0da952288..4977e869d5b7 100644
--- a/drivers/staging/media/solo6x10/v4l2-enc.c
+++ b/drivers/staging/media/solo6x10/v4l2-enc.c
@@ -1619,6 +1619,8 @@ static int solo_s_ext_ctrls(struct file *file, void *priv,
solo_enc->osd_text[OSD_TEXT_MAX] = '\0';
if (!err)
err = solo_osd_print(solo_enc);
+ else
+ err = -EFAULT;
}
break;
default:
@@ -1654,6 +1656,8 @@ static int solo_g_ext_ctrls(struct file *file, void *priv,
err = copy_to_user(ctrl->string,
solo_enc->osd_text,
OSD_TEXT_MAX);
+ if (err)
+ err = -EFAULT;
}
break;
default:
diff --git a/drivers/staging/media/solo6x10/v4l2.c b/drivers/staging/media/solo6x10/v4l2.c
index 571c3a348d30..ca774cc57539 100644
--- a/drivers/staging/media/solo6x10/v4l2.c
+++ b/drivers/staging/media/solo6x10/v4l2.c
@@ -415,10 +415,7 @@ static int solo_start_thread(struct solo_filehandle *fh)
{
fh->kthread = kthread_run(solo_thread, fh, SOLO6X10_NAME "_disp");
- if (IS_ERR(fh->kthread))
- return PTR_ERR(fh->kthread);
-
- return 0;
+ return PTR_RET(fh->kthread);
}
static void solo_stop_thread(struct solo_filehandle *fh)
diff --git a/drivers/staging/nvec/TODO b/drivers/staging/nvec/TODO
index f950ab890e2e..e5ae42a0b44a 100644
--- a/drivers/staging/nvec/TODO
+++ b/drivers/staging/nvec/TODO
@@ -1,9 +1,5 @@
ToDo list (incomplete, unordered)
- add compile as module support
- - fix clk usage
- should not be using clk_get_sys(), but clk_get(&pdev->dev, conn)
- where conn is either NULL if the device only has one clock, or
- the device specific name if it has multiple clocks.
- move half of the nvec init stuff to i2c-tegra.c
- move event handling to nvec_events
- finish suspend/resume support
diff --git a/drivers/staging/nvec/nvec.c b/drivers/staging/nvec/nvec.c
index 6a0b6eccf1e6..cf159365b0ee 100644
--- a/drivers/staging/nvec/nvec.c
+++ b/drivers/staging/nvec/nvec.c
@@ -37,8 +37,7 @@
#include <linux/slab.h>
#include <linux/spinlock.h>
#include <linux/workqueue.h>
-
-#include <mach/clk.h>
+#include <linux/clk/tegra.h>
#include "nvec.h"
@@ -816,7 +815,7 @@ static int tegra_nvec_probe(struct platform_device *pdev)
return -ENODEV;
}
- i2c_clk = clk_get_sys("tegra-i2c.2", "div-clk");
+ i2c_clk = clk_get(&pdev->dev, "div-clk");
if (IS_ERR(i2c_clk)) {
dev_err(nvec->dev, "failed to get controller clock\n");
return -ENODEV;
diff --git a/drivers/staging/octeon/ethernet.c b/drivers/staging/octeon/ethernet.c
index 9a2ec251ae4d..c3a90e7012af 100644
--- a/drivers/staging/octeon/ethernet.c
+++ b/drivers/staging/octeon/ethernet.c
@@ -72,7 +72,7 @@ int pow_receive_group = 15;
module_param(pow_receive_group, int, 0444);
MODULE_PARM_DESC(pow_receive_group, "\n"
"\tPOW group to receive packets from. All ethernet hardware\n"
- "\twill be configured to send incomming packets to this POW\n"
+ "\twill be configured to send incoming packets to this POW\n"
"\tgroup. Also any other software can submit packets to this\n"
"\tgroup for the kernel to process.");
diff --git a/drivers/staging/omapdrm/TODO b/drivers/staging/omapdrm/TODO
deleted file mode 100644
index abeeb00aaa12..000000000000
--- a/drivers/staging/omapdrm/TODO
+++ /dev/null
@@ -1,32 +0,0 @@
-TODO
-. add video decode/encode support (via syslink3 + codec-engine)
- . NOTE: with dmabuf this probably could be split into different driver
- so perhaps this TODO doesn't belong here
-. where should we do eviction (detatch_pages())? We aren't necessarily
- accessing the pages via a GART, so maybe we need some other threshold
- to put a cap on the # of pages that can be pin'd. (It is mostly only
- of interest in case you have a swap partition/file.. which a lot of
- these devices do not.. but it doesn't hurt for the driver to do the
- right thing anyways.)
- . Use mm_shrinker to trigger unpinning pages. Need to figure out how
- to handle next issue first (I think?)
- . Note TTM already has some mm_shrinker stuff.. maybe an argument to
- move to TTM? Or maybe something that could be factored out in common?
-. GEM/shmem backed pages can have existing mappings (kernel linear map,
- etc..), which isn't really ideal.
-. Revisit GEM sync object infrastructure.. TTM has some framework for this
- already. Possibly this could be refactored out and made more common?
- There should be some way to do this with less wheel-reinvention.
-. Solve PM sequencing on resume. DMM/TILER must be reloaded before any
- access is made from any component in the system. Which means on suspend
- CRTC's should be disabled, and on resume the LUT should be reprogrammed
- before CRTC's are re-enabled, to prevent DSS from trying to DMA from a
- buffer mapped in DMM/TILER before LUT is reloaded.
-
-Userspace:
-. git://github.com/robclark/xf86-video-omap.git
-
-Currently tested on
-. OMAP3530 beagleboard
-. OMAP4430 pandaboard
-. OMAP4460 pandaboard
diff --git a/drivers/staging/omapdrm/omap_drm.h b/drivers/staging/omapdrm/omap_drm.h
deleted file mode 100644
index f0ac34a8973e..000000000000
--- a/drivers/staging/omapdrm/omap_drm.h
+++ /dev/null
@@ -1,123 +0,0 @@
-/*
- * include/drm/omap_drm.h
- *
- * Copyright (C) 2011 Texas Instruments
- * Author: Rob Clark <rob@ti.com>
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 as published by
- * the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * You should have received a copy of the GNU General Public License along with
- * this program. If not, see <http://www.gnu.org/licenses/>.
- */
-
-#ifndef __OMAP_DRM_H__
-#define __OMAP_DRM_H__
-
-#include <drm/drm.h>
-
-/* Please note that modifications to all structs defined here are
- * subject to backwards-compatibility constraints.
- */
-
-#define OMAP_PARAM_CHIPSET_ID 1 /* ie. 0x3430, 0x4430, etc */
-
-struct drm_omap_param {
- uint64_t param; /* in */
- uint64_t value; /* in (set_param), out (get_param) */
-};
-
-#define OMAP_BO_SCANOUT 0x00000001 /* scanout capable (phys contiguous) */
-#define OMAP_BO_CACHE_MASK 0x00000006 /* cache type mask, see cache modes */
-#define OMAP_BO_TILED_MASK 0x00000f00 /* tiled mapping mask, see tiled modes */
-
-/* cache modes */
-#define OMAP_BO_CACHED 0x00000000 /* default */
-#define OMAP_BO_WC 0x00000002 /* write-combine */
-#define OMAP_BO_UNCACHED 0x00000004 /* strongly-ordered (uncached) */
-
-/* tiled modes */
-#define OMAP_BO_TILED_8 0x00000100
-#define OMAP_BO_TILED_16 0x00000200
-#define OMAP_BO_TILED_32 0x00000300
-#define OMAP_BO_TILED (OMAP_BO_TILED_8 | OMAP_BO_TILED_16 | OMAP_BO_TILED_32)
-
-union omap_gem_size {
- uint32_t bytes; /* (for non-tiled formats) */
- struct {
- uint16_t width;
- uint16_t height;
- } tiled; /* (for tiled formats) */
-};
-
-struct drm_omap_gem_new {
- union omap_gem_size size; /* in */
- uint32_t flags; /* in */
- uint32_t handle; /* out */
- uint32_t __pad;
-};
-
-/* mask of operations: */
-enum omap_gem_op {
- OMAP_GEM_READ = 0x01,
- OMAP_GEM_WRITE = 0x02,
-};
-
-struct drm_omap_gem_cpu_prep {
- uint32_t handle; /* buffer handle (in) */
- uint32_t op; /* mask of omap_gem_op (in) */
-};
-
-struct drm_omap_gem_cpu_fini {
- uint32_t handle; /* buffer handle (in) */
- uint32_t op; /* mask of omap_gem_op (in) */
- /* TODO maybe here we pass down info about what regions are touched
- * by sw so we can be clever about cache ops? For now a placeholder,
- * set to zero and we just do full buffer flush..
- */
- uint32_t nregions;
- uint32_t __pad;
-};
-
-struct drm_omap_gem_info {
- uint32_t handle; /* buffer handle (in) */
- uint32_t pad;
- uint64_t offset; /* mmap offset (out) */
- /* note: in case of tiled buffers, the user virtual size can be
- * different from the physical size (ie. how many pages are needed
- * to back the object) which is returned in DRM_IOCTL_GEM_OPEN..
- * This size here is the one that should be used if you want to
- * mmap() the buffer:
- */
- uint32_t size; /* virtual size for mmap'ing (out) */
- uint32_t __pad;
-};
-
-#define DRM_OMAP_GET_PARAM 0x00
-#define DRM_OMAP_SET_PARAM 0x01
-/* placeholder for plugin-api
-#define DRM_OMAP_GET_BASE 0x02
-*/
-#define DRM_OMAP_GEM_NEW 0x03
-#define DRM_OMAP_GEM_CPU_PREP 0x04
-#define DRM_OMAP_GEM_CPU_FINI 0x05
-#define DRM_OMAP_GEM_INFO 0x06
-#define DRM_OMAP_NUM_IOCTLS 0x07
-
-#define DRM_IOCTL_OMAP_GET_PARAM DRM_IOWR(DRM_COMMAND_BASE + DRM_OMAP_GET_PARAM, struct drm_omap_param)
-#define DRM_IOCTL_OMAP_SET_PARAM DRM_IOW (DRM_COMMAND_BASE + DRM_OMAP_SET_PARAM, struct drm_omap_param)
-/* placeholder for plugin-api
-#define DRM_IOCTL_OMAP_GET_BASE DRM_IOWR(DRM_COMMAND_BASE + DRM_OMAP_GET_BASE, struct drm_omap_get_base)
-*/
-#define DRM_IOCTL_OMAP_GEM_NEW DRM_IOWR(DRM_COMMAND_BASE + DRM_OMAP_GEM_NEW, struct drm_omap_gem_new)
-#define DRM_IOCTL_OMAP_GEM_CPU_PREP DRM_IOW (DRM_COMMAND_BASE + DRM_OMAP_GEM_CPU_PREP, struct drm_omap_gem_cpu_prep)
-#define DRM_IOCTL_OMAP_GEM_CPU_FINI DRM_IOW (DRM_COMMAND_BASE + DRM_OMAP_GEM_CPU_FINI, struct drm_omap_gem_cpu_fini)
-#define DRM_IOCTL_OMAP_GEM_INFO DRM_IOWR(DRM_COMMAND_BASE + DRM_OMAP_GEM_INFO, struct drm_omap_gem_info)
-
-#endif /* __OMAP_DRM_H__ */
diff --git a/drivers/staging/sb105x/Kconfig b/drivers/staging/sb105x/Kconfig
index 1facad625554..245e7847a354 100644
--- a/drivers/staging/sb105x/Kconfig
+++ b/drivers/staging/sb105x/Kconfig
@@ -1,8 +1,7 @@
config SB105X
tristate "SystemBase PCI Multiport UART"
select SERIAL_CORE
- depends on PCI
- depends on X86
+ depends on PCI && X86 && TTY && BROKEN
help
A driver for the SystemBase Multi-2/PCI serial card
diff --git a/drivers/staging/serqt_usb2/serqt_usb2.c b/drivers/staging/serqt_usb2/serqt_usb2.c
index 1b3e995d3a27..b1bb1a6abe81 100644
--- a/drivers/staging/serqt_usb2/serqt_usb2.c
+++ b/drivers/staging/serqt_usb2/serqt_usb2.c
@@ -255,12 +255,11 @@ static void ProcessModemStatus(struct quatech_port *qt_port,
wake_up_interruptible(&qt_port->wait);
}
-static void ProcessRxChar(struct tty_struct *tty, struct usb_serial_port *port,
- unsigned char data)
+static void ProcessRxChar(struct usb_serial_port *port, unsigned char data)
{
struct urb *urb = port->read_urb;
if (urb->actual_length)
- tty_insert_flip_char(tty, data, TTY_NORMAL);
+ tty_insert_flip_char(&port->port, data, TTY_NORMAL);
}
static void qt_write_bulk_callback(struct urb *urb)
@@ -291,8 +290,7 @@ static void qt_interrupt_callback(struct urb *urb)
/* FIXME */
}
-static void qt_status_change_check(struct tty_struct *tty,
- struct urb *urb,
+static void qt_status_change_check(struct urb *urb,
struct quatech_port *qt_port,
struct usb_serial_port *port)
{
@@ -335,8 +333,8 @@ static void qt_status_change_check(struct tty_struct *tty,
case 0xff:
dev_dbg(&port->dev, "No status sequence.\n");
- ProcessRxChar(tty, port, data[i]);
- ProcessRxChar(tty, port, data[i + 1]);
+ ProcessRxChar(port, data[i]);
+ ProcessRxChar(port, data[i + 1]);
i += 2;
break;
@@ -345,11 +343,11 @@ static void qt_status_change_check(struct tty_struct *tty,
continue;
}
- if (tty && urb->actual_length)
- tty_insert_flip_char(tty, data[i], TTY_NORMAL);
+ if (urb->actual_length)
+ tty_insert_flip_char(&port->port, data[i], TTY_NORMAL);
}
- tty_flip_buffer_push(tty);
+ tty_flip_buffer_push(&port->port);
}
static void qt_read_bulk_callback(struct urb *urb)
@@ -358,7 +356,6 @@ static void qt_read_bulk_callback(struct urb *urb)
struct usb_serial_port *port = urb->context;
struct usb_serial *serial = get_usb_serial(port, __func__);
struct quatech_port *qt_port = qt_get_port_private(port);
- struct tty_struct *tty;
int result;
if (urb->status) {
@@ -369,27 +366,23 @@ static void qt_read_bulk_callback(struct urb *urb)
return;
}
- tty = tty_port_tty_get(&port->port);
- if (!tty)
- return;
-
dev_dbg(&port->dev,
"%s - port->RxHolding = %d\n", __func__, qt_port->RxHolding);
if (port_paranoia_check(port, __func__) != 0) {
qt_port->ReadBulkStopped = 1;
- goto exit;
+ return;
}
if (!serial)
- goto exit;
+ return;
if (qt_port->closePending == 1) {
/* Were closing , stop reading */
dev_dbg(&port->dev,
"%s - (qt_port->closepending == 1\n", __func__);
qt_port->ReadBulkStopped = 1;
- goto exit;
+ return;
}
/*
@@ -399,7 +392,7 @@ static void qt_read_bulk_callback(struct urb *urb)
*/
if (qt_port->RxHolding == 1) {
qt_port->ReadBulkStopped = 1;
- goto exit;
+ return;
}
if (urb->status) {
@@ -408,11 +401,11 @@ static void qt_read_bulk_callback(struct urb *urb)
dev_dbg(&port->dev,
"%s - nonzero read bulk status received: %d\n",
__func__, urb->status);
- goto exit;
+ return;
}
if (urb->actual_length)
- qt_status_change_check(tty, urb, qt_port, port);
+ qt_status_change_check(urb, qt_port, port);
/* Continue trying to always read */
usb_fill_bulk_urb(port->read_urb, serial->dev,
@@ -428,14 +421,12 @@ static void qt_read_bulk_callback(struct urb *urb)
__func__, result);
else {
if (urb->actual_length) {
- tty_flip_buffer_push(tty);
- tty_schedule_flip(tty);
+ tty_flip_buffer_push(&port->port);
+ tty_schedule_flip(&port->port);
}
}
schedule_work(&port->work);
-exit:
- tty_kref_put(tty);
}
/*
diff --git a/drivers/staging/speakup/selection.c b/drivers/staging/speakup/selection.c
index 822ac3d24788..775af26b9914 100644
--- a/drivers/staging/speakup/selection.c
+++ b/drivers/staging/speakup/selection.c
@@ -2,6 +2,7 @@
#include <linux/consolemap.h>
#include <linux/interrupt.h>
#include <linux/sched.h>
+#include <linux/device.h> /* for dev_warn */
#include <linux/selection.h>
#include "speakup.h"
diff --git a/drivers/staging/tidspbridge/Kconfig b/drivers/staging/tidspbridge/Kconfig
index 0dd479f5638d..60848f198b48 100644
--- a/drivers/staging/tidspbridge/Kconfig
+++ b/drivers/staging/tidspbridge/Kconfig
@@ -4,7 +4,7 @@
menuconfig TIDSPBRIDGE
tristate "DSP Bridge driver"
- depends on ARCH_OMAP3
+ depends on ARCH_OMAP3 && !ARCH_MULTIPLATFORM
select OMAP_MBOX_FWK
help
DSP/BIOS Bridge is designed for platforms that contain a GPP and
diff --git a/drivers/staging/usbip/usbip_common.c b/drivers/staging/usbip/usbip_common.c
index 75aa5bfcb8dd..539fa5785afe 100644
--- a/drivers/staging/usbip/usbip_common.c
+++ b/drivers/staging/usbip/usbip_common.c
@@ -411,7 +411,7 @@ struct socket *sockfd_to_socket(unsigned int sockfd)
return NULL;
}
- inode = file->f_dentry->d_inode;
+ inode = file_inode(file);
if (!inode || !S_ISSOCK(inode->i_mode)) {
fput(file);
diff --git a/drivers/staging/vme/devices/vme_user.c b/drivers/staging/vme/devices/vme_user.c
index 57474cff51f0..d074b1ecb41a 100644
--- a/drivers/staging/vme/devices/vme_user.c
+++ b/drivers/staging/vme/devices/vme_user.c
@@ -318,7 +318,7 @@ static ssize_t buffer_from_user(unsigned int minor, const char __user *buf,
static ssize_t vme_user_read(struct file *file, char __user *buf, size_t count,
loff_t *ppos)
{
- unsigned int minor = MINOR(file->f_dentry->d_inode->i_rdev);
+ unsigned int minor = MINOR(file_inode(file)->i_rdev);
ssize_t retval;
size_t image_size;
size_t okcount;
@@ -364,7 +364,7 @@ static ssize_t vme_user_read(struct file *file, char __user *buf, size_t count,
static ssize_t vme_user_write(struct file *file, const char __user *buf,
size_t count, loff_t *ppos)
{
- unsigned int minor = MINOR(file->f_dentry->d_inode->i_rdev);
+ unsigned int minor = MINOR(file_inode(file)->i_rdev);
ssize_t retval;
size_t image_size;
size_t okcount;
@@ -410,7 +410,7 @@ static ssize_t vme_user_write(struct file *file, const char __user *buf,
static loff_t vme_user_llseek(struct file *file, loff_t off, int whence)
{
loff_t absolute = -1;
- unsigned int minor = MINOR(file->f_dentry->d_inode->i_rdev);
+ unsigned int minor = MINOR(file_inode(file)->i_rdev);
size_t image_size;
if (minor == CONTROL_MINOR)
@@ -583,7 +583,7 @@ vme_user_unlocked_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
int ret;
mutex_lock(&vme_user_mutex);
- ret = vme_user_ioctl(file->f_path.dentry->d_inode, file, cmd, arg);
+ ret = vme_user_ioctl(file_inode(file), file, cmd, arg);
mutex_unlock(&vme_user_mutex);
return ret;
diff --git a/drivers/staging/zcache/zbud.c b/drivers/staging/zcache/zbud.c
index 328c397ea5dc..fdff5c6a0239 100644
--- a/drivers/staging/zcache/zbud.c
+++ b/drivers/staging/zcache/zbud.c
@@ -404,7 +404,7 @@ static inline struct page *zbud_unuse_zbudpage(struct zbudpage *zbudpage,
else
zbud_pers_pageframes--;
zbudpage_spin_unlock(zbudpage);
- reset_page_mapcount(page);
+ page_mapcount_reset(page);
init_page_count(page);
page->index = 0;
return page;
diff --git a/drivers/staging/zsmalloc/zsmalloc-main.c b/drivers/staging/zsmalloc/zsmalloc-main.c
index 06f73a93a44d..e78d262c5249 100644
--- a/drivers/staging/zsmalloc/zsmalloc-main.c
+++ b/drivers/staging/zsmalloc/zsmalloc-main.c
@@ -472,7 +472,7 @@ static void reset_page(struct page *page)
set_page_private(page, 0);
page->mapping = NULL;
page->freelist = NULL;
- reset_page_mapcount(page);
+ page_mapcount_reset(page);
}
static void free_zspage(struct page *first_page)
diff --git a/drivers/target/iscsi/iscsi_target.c b/drivers/target/iscsi/iscsi_target.c
index 339f97f7085b..7ea246a07731 100644
--- a/drivers/target/iscsi/iscsi_target.c
+++ b/drivers/target/iscsi/iscsi_target.c
@@ -144,23 +144,24 @@ struct iscsi_tiqn *iscsit_add_tiqn(unsigned char *buf)
spin_lock_init(&tiqn->login_stats.lock);
spin_lock_init(&tiqn->logout_stats.lock);
- if (!idr_pre_get(&tiqn_idr, GFP_KERNEL)) {
- pr_err("idr_pre_get() for tiqn_idr failed\n");
- kfree(tiqn);
- return ERR_PTR(-ENOMEM);
- }
tiqn->tiqn_state = TIQN_STATE_ACTIVE;
+ idr_preload(GFP_KERNEL);
spin_lock(&tiqn_lock);
- ret = idr_get_new(&tiqn_idr, NULL, &tiqn->tiqn_index);
+
+ ret = idr_alloc(&tiqn_idr, NULL, 0, 0, GFP_NOWAIT);
if (ret < 0) {
- pr_err("idr_get_new() failed for tiqn->tiqn_index\n");
+ pr_err("idr_alloc() failed for tiqn->tiqn_index\n");
spin_unlock(&tiqn_lock);
+ idr_preload_end();
kfree(tiqn);
return ERR_PTR(ret);
}
+ tiqn->tiqn_index = ret;
list_add_tail(&tiqn->tiqn_list, &g_tiqn_list);
+
spin_unlock(&tiqn_lock);
+ idr_preload_end();
pr_debug("CORE[0] - Added iSCSI Target IQN: %s\n", tiqn->tiqn);
@@ -264,16 +265,50 @@ int iscsit_deaccess_np(struct iscsi_np *np, struct iscsi_portal_group *tpg)
return 0;
}
-static struct iscsi_np *iscsit_get_np(
+bool iscsit_check_np_match(
struct __kernel_sockaddr_storage *sockaddr,
+ struct iscsi_np *np,
int network_transport)
{
struct sockaddr_in *sock_in, *sock_in_e;
struct sockaddr_in6 *sock_in6, *sock_in6_e;
- struct iscsi_np *np;
- int ip_match = 0;
+ bool ip_match = false;
u16 port;
+ if (sockaddr->ss_family == AF_INET6) {
+ sock_in6 = (struct sockaddr_in6 *)sockaddr;
+ sock_in6_e = (struct sockaddr_in6 *)&np->np_sockaddr;
+
+ if (!memcmp(&sock_in6->sin6_addr.in6_u,
+ &sock_in6_e->sin6_addr.in6_u,
+ sizeof(struct in6_addr)))
+ ip_match = true;
+
+ port = ntohs(sock_in6->sin6_port);
+ } else {
+ sock_in = (struct sockaddr_in *)sockaddr;
+ sock_in_e = (struct sockaddr_in *)&np->np_sockaddr;
+
+ if (sock_in->sin_addr.s_addr == sock_in_e->sin_addr.s_addr)
+ ip_match = true;
+
+ port = ntohs(sock_in->sin_port);
+ }
+
+ if ((ip_match == true) && (np->np_port == port) &&
+ (np->np_network_transport == network_transport))
+ return true;
+
+ return false;
+}
+
+static struct iscsi_np *iscsit_get_np(
+ struct __kernel_sockaddr_storage *sockaddr,
+ int network_transport)
+{
+ struct iscsi_np *np;
+ bool match;
+
spin_lock_bh(&np_lock);
list_for_each_entry(np, &g_np_list, np_list) {
spin_lock(&np->np_thread_lock);
@@ -282,29 +317,8 @@ static struct iscsi_np *iscsit_get_np(
continue;
}
- if (sockaddr->ss_family == AF_INET6) {
- sock_in6 = (struct sockaddr_in6 *)sockaddr;
- sock_in6_e = (struct sockaddr_in6 *)&np->np_sockaddr;
-
- if (!memcmp(&sock_in6->sin6_addr.in6_u,
- &sock_in6_e->sin6_addr.in6_u,
- sizeof(struct in6_addr)))
- ip_match = 1;
-
- port = ntohs(sock_in6->sin6_port);
- } else {
- sock_in = (struct sockaddr_in *)sockaddr;
- sock_in_e = (struct sockaddr_in *)&np->np_sockaddr;
-
- if (sock_in->sin_addr.s_addr ==
- sock_in_e->sin_addr.s_addr)
- ip_match = 1;
-
- port = ntohs(sock_in->sin_port);
- }
-
- if ((ip_match == 1) && (np->np_port == port) &&
- (np->np_network_transport == network_transport)) {
+ match = iscsit_check_np_match(sockaddr, np, network_transport);
+ if (match == true) {
/*
* Increment the np_exports reference count now to
* prevent iscsit_del_np() below from being called
@@ -3570,6 +3584,10 @@ check_rsp_state:
spin_lock_bh(&cmd->istate_lock);
cmd->i_state = ISTATE_SENT_STATUS;
spin_unlock_bh(&cmd->istate_lock);
+
+ if (atomic_read(&conn->check_immediate_queue))
+ return 1;
+
continue;
} else if (ret == 2) {
/* Still must send status,
@@ -3659,7 +3677,7 @@ check_rsp_state:
}
if (atomic_read(&conn->check_immediate_queue))
- break;
+ return 1;
}
return 0;
@@ -3703,12 +3721,15 @@ restart:
signal_pending(current))
goto transport_err;
+get_immediate:
ret = handle_immediate_queue(conn);
if (ret < 0)
goto transport_err;
ret = handle_response_queue(conn);
- if (ret == -EAGAIN)
+ if (ret == 1)
+ goto get_immediate;
+ else if (ret == -EAGAIN)
goto restart;
else if (ret < 0)
goto transport_err;
diff --git a/drivers/target/iscsi/iscsi_target.h b/drivers/target/iscsi/iscsi_target.h
index f1e4f3155bac..b1a1e6350707 100644
--- a/drivers/target/iscsi/iscsi_target.h
+++ b/drivers/target/iscsi/iscsi_target.h
@@ -8,6 +8,8 @@ extern struct iscsi_tiqn *iscsit_add_tiqn(unsigned char *);
extern void iscsit_del_tiqn(struct iscsi_tiqn *);
extern int iscsit_access_np(struct iscsi_np *, struct iscsi_portal_group *);
extern int iscsit_deaccess_np(struct iscsi_np *, struct iscsi_portal_group *);
+extern bool iscsit_check_np_match(struct __kernel_sockaddr_storage *,
+ struct iscsi_np *, int);
extern struct iscsi_np *iscsit_add_np(struct __kernel_sockaddr_storage *,
char *, int);
extern int iscsit_reset_np_thread(struct iscsi_np *, struct iscsi_tpg_np *,
diff --git a/drivers/target/iscsi/iscsi_target_login.c b/drivers/target/iscsi/iscsi_target_login.c
index fdb632f0ab85..2535d4d46c0e 100644
--- a/drivers/target/iscsi/iscsi_target_login.c
+++ b/drivers/target/iscsi/iscsi_target_login.c
@@ -247,19 +247,16 @@ static int iscsi_login_zero_tsih_s1(
spin_lock_init(&sess->session_usage_lock);
spin_lock_init(&sess->ttt_lock);
- if (!idr_pre_get(&sess_idr, GFP_KERNEL)) {
- pr_err("idr_pre_get() for sess_idr failed\n");
- iscsit_tx_login_rsp(conn, ISCSI_STATUS_CLS_TARGET_ERR,
- ISCSI_LOGIN_STATUS_NO_RESOURCES);
- kfree(sess);
- return -ENOMEM;
- }
+ idr_preload(GFP_KERNEL);
spin_lock_bh(&sess_idr_lock);
- ret = idr_get_new(&sess_idr, NULL, &sess->session_index);
+ ret = idr_alloc(&sess_idr, NULL, 0, 0, GFP_NOWAIT);
+ if (ret >= 0)
+ sess->session_index = ret;
spin_unlock_bh(&sess_idr_lock);
+ idr_preload_end();
if (ret < 0) {
- pr_err("idr_get_new() for sess_idr failed\n");
+ pr_err("idr_alloc() for sess_idr failed\n");
iscsit_tx_login_rsp(conn, ISCSI_STATUS_CLS_TARGET_ERR,
ISCSI_LOGIN_STATUS_NO_RESOURCES);
kfree(sess);
diff --git a/drivers/target/iscsi/iscsi_target_parameters.c b/drivers/target/iscsi/iscsi_target_parameters.c
index d89164287d00..ca2be406f141 100644
--- a/drivers/target/iscsi/iscsi_target_parameters.c
+++ b/drivers/target/iscsi/iscsi_target_parameters.c
@@ -1095,11 +1095,11 @@ static int iscsi_check_acceptor_state(struct iscsi_param *param, char *value,
SET_PSTATE_REPLY_OPTIONAL(param);
}
} else if (IS_TYPE_NUMBER(param)) {
- char *tmpptr, buf[10];
+ char *tmpptr, buf[11];
u32 acceptor_value = simple_strtoul(param->value, &tmpptr, 0);
u32 proposer_value = simple_strtoul(value, &tmpptr, 0);
- memset(buf, 0, 10);
+ memset(buf, 0, sizeof(buf));
if (!strcmp(param->name, MAXCONNECTIONS) ||
!strcmp(param->name, MAXBURSTLENGTH) ||
@@ -1503,8 +1503,8 @@ static int iscsi_enforce_integrity_rules(
FirstBurstLength = simple_strtoul(param->value,
&tmpptr, 0);
if (FirstBurstLength > MaxBurstLength) {
- char tmpbuf[10];
- memset(tmpbuf, 0, 10);
+ char tmpbuf[11];
+ memset(tmpbuf, 0, sizeof(tmpbuf));
sprintf(tmpbuf, "%u", MaxBurstLength);
if (iscsi_update_param_value(param, tmpbuf))
return -1;
diff --git a/drivers/target/iscsi/iscsi_target_stat.c b/drivers/target/iscsi/iscsi_target_stat.c
index 421d6947dc64..464b4206a51e 100644
--- a/drivers/target/iscsi/iscsi_target_stat.c
+++ b/drivers/target/iscsi/iscsi_target_stat.c
@@ -410,14 +410,16 @@ static ssize_t iscsi_stat_tgt_attr_show_attr_fail_intr_addr_type(
struct iscsi_tiqn *tiqn = container_of(igrps,
struct iscsi_tiqn, tiqn_stat_grps);
struct iscsi_login_stats *lstat = &tiqn->login_stats;
- unsigned char buf[8];
+ int ret;
spin_lock(&lstat->lock);
- snprintf(buf, 8, "%s", (lstat->last_intr_fail_ip_addr != NULL) ?
- "ipv6" : "ipv4");
+ if (lstat->last_intr_fail_ip_family == AF_INET6)
+ ret = snprintf(page, PAGE_SIZE, "ipv6\n");
+ else
+ ret = snprintf(page, PAGE_SIZE, "ipv4\n");
spin_unlock(&lstat->lock);
- return snprintf(page, PAGE_SIZE, "%s\n", buf);
+ return ret;
}
ISCSI_STAT_TGT_ATTR_RO(fail_intr_addr_type);
@@ -427,16 +429,19 @@ static ssize_t iscsi_stat_tgt_attr_show_attr_fail_intr_addr(
struct iscsi_tiqn *tiqn = container_of(igrps,
struct iscsi_tiqn, tiqn_stat_grps);
struct iscsi_login_stats *lstat = &tiqn->login_stats;
- unsigned char buf[32];
+ int ret;
spin_lock(&lstat->lock);
- if (lstat->last_intr_fail_ip_family == AF_INET6)
- snprintf(buf, 32, "[%s]", lstat->last_intr_fail_ip_addr);
- else
- snprintf(buf, 32, "%s", lstat->last_intr_fail_ip_addr);
+ if (lstat->last_intr_fail_ip_family == AF_INET6) {
+ ret = snprintf(page, PAGE_SIZE, "[%s]\n",
+ lstat->last_intr_fail_ip_addr);
+ } else {
+ ret = snprintf(page, PAGE_SIZE, "%s\n",
+ lstat->last_intr_fail_ip_addr);
+ }
spin_unlock(&lstat->lock);
- return snprintf(page, PAGE_SIZE, "%s\n", buf);
+ return ret;
}
ISCSI_STAT_TGT_ATTR_RO(fail_intr_addr);
diff --git a/drivers/target/iscsi/iscsi_target_tpg.c b/drivers/target/iscsi/iscsi_target_tpg.c
index de9ea32b6104..ee8f8c66248d 100644
--- a/drivers/target/iscsi/iscsi_target_tpg.c
+++ b/drivers/target/iscsi/iscsi_target_tpg.c
@@ -422,6 +422,35 @@ struct iscsi_tpg_np *iscsit_tpg_locate_child_np(
return NULL;
}
+static bool iscsit_tpg_check_network_portal(
+ struct iscsi_tiqn *tiqn,
+ struct __kernel_sockaddr_storage *sockaddr,
+ int network_transport)
+{
+ struct iscsi_portal_group *tpg;
+ struct iscsi_tpg_np *tpg_np;
+ struct iscsi_np *np;
+ bool match = false;
+
+ spin_lock(&tiqn->tiqn_tpg_lock);
+ list_for_each_entry(tpg, &tiqn->tiqn_tpg_list, tpg_list) {
+
+ spin_lock(&tpg->tpg_np_lock);
+ list_for_each_entry(tpg_np, &tpg->tpg_gnp_list, tpg_np_list) {
+ np = tpg_np->tpg_np;
+
+ match = iscsit_check_np_match(sockaddr, np,
+ network_transport);
+ if (match == true)
+ break;
+ }
+ spin_unlock(&tpg->tpg_np_lock);
+ }
+ spin_unlock(&tiqn->tiqn_tpg_lock);
+
+ return match;
+}
+
struct iscsi_tpg_np *iscsit_tpg_add_network_portal(
struct iscsi_portal_group *tpg,
struct __kernel_sockaddr_storage *sockaddr,
@@ -432,6 +461,16 @@ struct iscsi_tpg_np *iscsit_tpg_add_network_portal(
struct iscsi_np *np;
struct iscsi_tpg_np *tpg_np;
+ if (!tpg_np_parent) {
+ if (iscsit_tpg_check_network_portal(tpg->tpg_tiqn, sockaddr,
+ network_transport) == true) {
+ pr_err("Network Portal: %s already exists on a"
+ " different TPG on %s\n", ip_str,
+ tpg->tpg_tiqn->tiqn);
+ return ERR_PTR(-EEXIST);
+ }
+ }
+
tpg_np = kzalloc(sizeof(struct iscsi_tpg_np), GFP_KERNEL);
if (!tpg_np) {
pr_err("Unable to allocate memory for"
diff --git a/drivers/target/sbp/sbp_target.c b/drivers/target/sbp/sbp_target.c
index 2e8d06f198ae..d3536f57444f 100644
--- a/drivers/target/sbp/sbp_target.c
+++ b/drivers/target/sbp/sbp_target.c
@@ -1719,7 +1719,7 @@ static struct se_node_acl *sbp_alloc_fabric_acl(struct se_portal_group *se_tpg)
nacl = kzalloc(sizeof(struct sbp_nacl), GFP_KERNEL);
if (!nacl) {
- pr_err("Unable to alocate struct sbp_nacl\n");
+ pr_err("Unable to allocate struct sbp_nacl\n");
return NULL;
}
@@ -2598,7 +2598,7 @@ static int __init sbp_init(void)
return 0;
};
-static void sbp_exit(void)
+static void __exit sbp_exit(void)
{
sbp_deregister_configfs();
};
diff --git a/drivers/target/target_core_alua.c b/drivers/target/target_core_alua.c
index 7d4ec02e29a9..ff1c5ee352cb 100644
--- a/drivers/target/target_core_alua.c
+++ b/drivers/target/target_core_alua.c
@@ -27,6 +27,7 @@
#include <linux/spinlock.h>
#include <linux/configfs.h>
#include <linux/export.h>
+#include <linux/file.h>
#include <scsi/scsi.h>
#include <scsi/scsi_cmnd.h>
#include <asm/unaligned.h>
@@ -715,36 +716,18 @@ static int core_alua_write_tpg_metadata(
unsigned char *md_buf,
u32 md_buf_len)
{
- mm_segment_t old_fs;
- struct file *file;
- struct iovec iov[1];
- int flags = O_RDWR | O_CREAT | O_TRUNC, ret;
-
- memset(iov, 0, sizeof(struct iovec));
+ struct file *file = filp_open(path, O_RDWR | O_CREAT | O_TRUNC, 0600);
+ int ret;
- file = filp_open(path, flags, 0600);
- if (IS_ERR(file) || !file || !file->f_dentry) {
- pr_err("filp_open(%s) for ALUA metadata failed\n",
- path);
+ if (IS_ERR(file)) {
+ pr_err("filp_open(%s) for ALUA metadata failed\n", path);
return -ENODEV;
}
-
- iov[0].iov_base = &md_buf[0];
- iov[0].iov_len = md_buf_len;
-
- old_fs = get_fs();
- set_fs(get_ds());
- ret = vfs_writev(file, &iov[0], 1, &file->f_pos);
- set_fs(old_fs);
-
- if (ret < 0) {
+ ret = kernel_write(file, md_buf, md_buf_len, 0);
+ if (ret < 0)
pr_err("Error writing ALUA metadata file: %s\n", path);
- filp_close(file, NULL);
- return -EIO;
- }
- filp_close(file, NULL);
-
- return 0;
+ fput(file);
+ return ret ? -EIO : 0;
}
/*
diff --git a/drivers/target/target_core_configfs.c b/drivers/target/target_core_configfs.c
index 4efb61b8d001..43b7ac6c5b1c 100644
--- a/drivers/target/target_core_configfs.c
+++ b/drivers/target/target_core_configfs.c
@@ -609,6 +609,9 @@ static struct target_core_dev_attrib_attribute \
__CONFIGFS_EATTR_RO(_name, \
target_core_dev_show_attr_##_name);
+DEF_DEV_ATTRIB(emulate_model_alias);
+SE_DEV_ATTR(emulate_model_alias, S_IRUGO | S_IWUSR);
+
DEF_DEV_ATTRIB(emulate_dpo);
SE_DEV_ATTR(emulate_dpo, S_IRUGO | S_IWUSR);
@@ -681,6 +684,7 @@ SE_DEV_ATTR(max_write_same_len, S_IRUGO | S_IWUSR);
CONFIGFS_EATTR_OPS(target_core_dev_attrib, se_dev_attrib, da_group);
static struct configfs_attribute *target_core_dev_attrib_attrs[] = {
+ &target_core_dev_attrib_emulate_model_alias.attr,
&target_core_dev_attrib_emulate_dpo.attr,
&target_core_dev_attrib_emulate_fua_write.attr,
&target_core_dev_attrib_emulate_fua_read.attr,
diff --git a/drivers/target/target_core_device.c b/drivers/target/target_core_device.c
index f2aa7543d20a..2e4d655471bc 100644
--- a/drivers/target/target_core_device.c
+++ b/drivers/target/target_core_device.c
@@ -713,6 +713,44 @@ int se_dev_set_max_write_same_len(
return 0;
}
+static void dev_set_t10_wwn_model_alias(struct se_device *dev)
+{
+ const char *configname;
+
+ configname = config_item_name(&dev->dev_group.cg_item);
+ if (strlen(configname) >= 16) {
+ pr_warn("dev[%p]: Backstore name '%s' is too long for "
+ "INQUIRY_MODEL, truncating to 16 bytes\n", dev,
+ configname);
+ }
+ snprintf(&dev->t10_wwn.model[0], 16, "%s", configname);
+}
+
+int se_dev_set_emulate_model_alias(struct se_device *dev, int flag)
+{
+ if (dev->export_count) {
+ pr_err("dev[%p]: Unable to change model alias"
+ " while export_count is %d\n",
+ dev, dev->export_count);
+ return -EINVAL;
+ }
+
+ if (flag != 0 && flag != 1) {
+ pr_err("Illegal value %d\n", flag);
+ return -EINVAL;
+ }
+
+ if (flag) {
+ dev_set_t10_wwn_model_alias(dev);
+ } else {
+ strncpy(&dev->t10_wwn.model[0],
+ dev->transport->inquiry_prod, 16);
+ }
+ dev->dev_attrib.emulate_model_alias = flag;
+
+ return 0;
+}
+
int se_dev_set_emulate_dpo(struct se_device *dev, int flag)
{
if (flag != 0 && flag != 1) {
@@ -772,6 +810,12 @@ int se_dev_set_emulate_write_cache(struct se_device *dev, int flag)
pr_err("emulate_write_cache not supported for pSCSI\n");
return -EINVAL;
}
+ if (dev->transport->get_write_cache) {
+ pr_warn("emulate_write_cache cannot be changed when underlying"
+ " HW reports WriteCacheEnabled, ignoring request\n");
+ return 0;
+ }
+
dev->dev_attrib.emulate_write_cache = flag;
pr_debug("dev[%p]: SE Device WRITE_CACHE_EMULATION flag: %d\n",
dev, dev->dev_attrib.emulate_write_cache);
@@ -1182,24 +1226,18 @@ static struct se_lun *core_dev_get_lun(struct se_portal_group *tpg, u32 unpacked
struct se_lun_acl *core_dev_init_initiator_node_lun_acl(
struct se_portal_group *tpg,
+ struct se_node_acl *nacl,
u32 mapped_lun,
- char *initiatorname,
int *ret)
{
struct se_lun_acl *lacl;
- struct se_node_acl *nacl;
- if (strlen(initiatorname) >= TRANSPORT_IQN_LEN) {
+ if (strlen(nacl->initiatorname) >= TRANSPORT_IQN_LEN) {
pr_err("%s InitiatorName exceeds maximum size.\n",
tpg->se_tpg_tfo->get_fabric_name());
*ret = -EOVERFLOW;
return NULL;
}
- nacl = core_tpg_get_initiator_node_acl(tpg, initiatorname);
- if (!nacl) {
- *ret = -EINVAL;
- return NULL;
- }
lacl = kzalloc(sizeof(struct se_lun_acl), GFP_KERNEL);
if (!lacl) {
pr_err("Unable to allocate memory for struct se_lun_acl.\n");
@@ -1210,7 +1248,8 @@ struct se_lun_acl *core_dev_init_initiator_node_lun_acl(
INIT_LIST_HEAD(&lacl->lacl_list);
lacl->mapped_lun = mapped_lun;
lacl->se_lun_nacl = nacl;
- snprintf(lacl->initiatorname, TRANSPORT_IQN_LEN, "%s", initiatorname);
+ snprintf(lacl->initiatorname, TRANSPORT_IQN_LEN, "%s",
+ nacl->initiatorname);
return lacl;
}
@@ -1390,6 +1429,7 @@ struct se_device *target_alloc_device(struct se_hba *hba, const char *name)
dev->t10_alua.t10_dev = dev;
dev->dev_attrib.da_dev = dev;
+ dev->dev_attrib.emulate_model_alias = DA_EMULATE_MODEL_ALIAS;
dev->dev_attrib.emulate_dpo = DA_EMULATE_DPO;
dev->dev_attrib.emulate_fua_write = DA_EMULATE_FUA_WRITE;
dev->dev_attrib.emulate_fua_read = DA_EMULATE_FUA_READ;
diff --git a/drivers/target/target_core_fabric_configfs.c b/drivers/target/target_core_fabric_configfs.c
index c57bbbc7a7d1..04c775cb3e65 100644
--- a/drivers/target/target_core_fabric_configfs.c
+++ b/drivers/target/target_core_fabric_configfs.c
@@ -354,9 +354,17 @@ static struct config_group *target_fabric_make_mappedlun(
ret = -EINVAL;
goto out;
}
+ if (mapped_lun > (TRANSPORT_MAX_LUNS_PER_TPG-1)) {
+ pr_err("Mapped LUN: %lu exceeds TRANSPORT_MAX_LUNS_PER_TPG"
+ "-1: %u for Target Portal Group: %u\n", mapped_lun,
+ TRANSPORT_MAX_LUNS_PER_TPG-1,
+ se_tpg->se_tpg_tfo->tpg_get_tag(se_tpg));
+ ret = -EINVAL;
+ goto out;
+ }
- lacl = core_dev_init_initiator_node_lun_acl(se_tpg, mapped_lun,
- config_item_name(acl_ci), &ret);
+ lacl = core_dev_init_initiator_node_lun_acl(se_tpg, se_nacl,
+ mapped_lun, &ret);
if (!lacl) {
ret = -EINVAL;
goto out;
diff --git a/drivers/target/target_core_file.c b/drivers/target/target_core_file.c
index b9c88497e8f0..17a6acbc3ab0 100644
--- a/drivers/target/target_core_file.c
+++ b/drivers/target/target_core_file.c
@@ -190,6 +190,11 @@ static int fd_configure_device(struct se_device *dev)
fd_dev->fd_dev_id = fd_host->fd_host_dev_id_count++;
fd_dev->fd_queue_depth = dev->queue_depth;
+ /*
+ * Limit WRITE_SAME w/ UNMAP=0 emulation to 8k Number of LBAs (NoLB)
+ * based upon struct iovec limit for vfs_writev()
+ */
+ dev->dev_attrib.max_write_same_len = 0x1000;
pr_debug("CORE_FILE[%u] - Added TCM FILEIO Device ID: %u at %s,"
" %llu total bytes\n", fd_host->fd_host_id, fd_dev->fd_dev_id,
@@ -265,7 +270,7 @@ static int fd_do_rw(struct se_cmd *cmd, struct scatterlist *sgl,
* the expected virt_size for struct file w/o a backing struct
* block_device.
*/
- if (S_ISBLK(fd->f_dentry->d_inode->i_mode)) {
+ if (S_ISBLK(file_inode(fd)->i_mode)) {
if (ret < 0 || ret != cmd->data_length) {
pr_err("%s() returned %d, expecting %u for "
"S_ISBLK\n", __func__, ret,
@@ -328,6 +333,114 @@ fd_execute_sync_cache(struct se_cmd *cmd)
return 0;
}
+static unsigned char *
+fd_setup_write_same_buf(struct se_cmd *cmd, struct scatterlist *sg,
+ unsigned int len)
+{
+ struct se_device *se_dev = cmd->se_dev;
+ unsigned int block_size = se_dev->dev_attrib.block_size;
+ unsigned int i = 0, end;
+ unsigned char *buf, *p, *kmap_buf;
+
+ buf = kzalloc(min_t(unsigned int, len, PAGE_SIZE), GFP_KERNEL);
+ if (!buf) {
+ pr_err("Unable to allocate fd_execute_write_same buf\n");
+ return NULL;
+ }
+
+ kmap_buf = kmap(sg_page(sg)) + sg->offset;
+ if (!kmap_buf) {
+ pr_err("kmap() failed in fd_setup_write_same\n");
+ kfree(buf);
+ return NULL;
+ }
+ /*
+ * Fill local *buf to contain multiple WRITE_SAME blocks up to
+ * min(len, PAGE_SIZE)
+ */
+ p = buf;
+ end = min_t(unsigned int, len, PAGE_SIZE);
+
+ while (i < end) {
+ memcpy(p, kmap_buf, block_size);
+
+ i += block_size;
+ p += block_size;
+ }
+ kunmap(sg_page(sg));
+
+ return buf;
+}
+
+static sense_reason_t
+fd_execute_write_same(struct se_cmd *cmd)
+{
+ struct se_device *se_dev = cmd->se_dev;
+ struct fd_dev *fd_dev = FD_DEV(se_dev);
+ struct file *f = fd_dev->fd_file;
+ struct scatterlist *sg;
+ struct iovec *iov;
+ mm_segment_t old_fs;
+ sector_t nolb = sbc_get_write_same_sectors(cmd);
+ loff_t pos = cmd->t_task_lba * se_dev->dev_attrib.block_size;
+ unsigned int len, len_tmp, iov_num;
+ int i, rc;
+ unsigned char *buf;
+
+ if (!nolb) {
+ target_complete_cmd(cmd, SAM_STAT_GOOD);
+ return 0;
+ }
+ sg = &cmd->t_data_sg[0];
+
+ if (cmd->t_data_nents > 1 ||
+ sg->length != cmd->se_dev->dev_attrib.block_size) {
+ pr_err("WRITE_SAME: Illegal SGL t_data_nents: %u length: %u"
+ " block_size: %u\n", cmd->t_data_nents, sg->length,
+ cmd->se_dev->dev_attrib.block_size);
+ return TCM_INVALID_CDB_FIELD;
+ }
+
+ len = len_tmp = nolb * se_dev->dev_attrib.block_size;
+ iov_num = DIV_ROUND_UP(len, PAGE_SIZE);
+
+ buf = fd_setup_write_same_buf(cmd, sg, len);
+ if (!buf)
+ return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
+
+ iov = vzalloc(sizeof(struct iovec) * iov_num);
+ if (!iov) {
+ pr_err("Unable to allocate fd_execute_write_same iovecs\n");
+ kfree(buf);
+ return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
+ }
+ /*
+ * Map the single fabric received scatterlist block now populated
+ * in *buf into each iovec for I/O submission.
+ */
+ for (i = 0; i < iov_num; i++) {
+ iov[i].iov_base = buf;
+ iov[i].iov_len = min_t(unsigned int, len_tmp, PAGE_SIZE);
+ len_tmp -= iov[i].iov_len;
+ }
+
+ old_fs = get_fs();
+ set_fs(get_ds());
+ rc = vfs_writev(f, &iov[0], iov_num, &pos);
+ set_fs(old_fs);
+
+ vfree(iov);
+ kfree(buf);
+
+ if (rc < 0 || rc != len) {
+ pr_err("vfs_writev() returned %d for write same\n", rc);
+ return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
+ }
+
+ target_complete_cmd(cmd, SAM_STAT_GOOD);
+ return 0;
+}
+
static sense_reason_t
fd_execute_rw(struct se_cmd *cmd)
{
@@ -486,6 +599,7 @@ static sector_t fd_get_blocks(struct se_device *dev)
static struct sbc_ops fd_sbc_ops = {
.execute_rw = fd_execute_rw,
.execute_sync_cache = fd_execute_sync_cache,
+ .execute_write_same = fd_execute_write_same,
};
static sense_reason_t
@@ -517,7 +631,7 @@ static int __init fileio_module_init(void)
return transport_subsystem_register(&fileio_template);
}
-static void fileio_module_exit(void)
+static void __exit fileio_module_exit(void)
{
transport_subsystem_release(&fileio_template);
}
diff --git a/drivers/target/target_core_iblock.c b/drivers/target/target_core_iblock.c
index b526d23dcd4f..8bcc514ec8b6 100644
--- a/drivers/target/target_core_iblock.c
+++ b/drivers/target/target_core_iblock.c
@@ -154,6 +154,7 @@ static int iblock_configure_device(struct se_device *dev)
if (blk_queue_nonrot(q))
dev->dev_attrib.is_nonrot = 1;
+
return 0;
out_free_bioset:
@@ -390,10 +391,19 @@ iblock_execute_unmap(struct se_cmd *cmd)
sense_reason_t ret = 0;
int dl, bd_dl, err;
+ /* We never set ANC_SUP */
+ if (cmd->t_task_cdb[1])
+ return TCM_INVALID_CDB_FIELD;
+
+ if (cmd->data_length == 0) {
+ target_complete_cmd(cmd, SAM_STAT_GOOD);
+ return 0;
+ }
+
if (cmd->data_length < 8) {
pr_warn("UNMAP parameter list length %u too small\n",
cmd->data_length);
- return TCM_INVALID_PARAMETER_LIST;
+ return TCM_PARAMETER_LIST_LENGTH_ERROR;
}
buf = transport_kmap_data_sg(cmd);
@@ -463,7 +473,7 @@ iblock_execute_write_same_unmap(struct se_cmd *cmd)
int rc;
rc = blkdev_issue_discard(ib_dev->ibd_bd, cmd->t_task_lba,
- spc_get_write_same_sectors(cmd), GFP_KERNEL, 0);
+ sbc_get_write_same_sectors(cmd), GFP_KERNEL, 0);
if (rc < 0) {
pr_warn("blkdev_issue_discard() failed: %d\n", rc);
return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
@@ -481,7 +491,7 @@ iblock_execute_write_same(struct se_cmd *cmd)
struct bio *bio;
struct bio_list list;
sector_t block_lba = cmd->t_task_lba;
- sector_t sectors = spc_get_write_same_sectors(cmd);
+ sector_t sectors = sbc_get_write_same_sectors(cmd);
sg = &cmd->t_data_sg[0];
@@ -654,20 +664,24 @@ iblock_execute_rw(struct se_cmd *cmd)
u32 sg_num = sgl_nents;
sector_t block_lba;
unsigned bio_cnt;
- int rw;
+ int rw = 0;
int i;
if (data_direction == DMA_TO_DEVICE) {
+ struct iblock_dev *ib_dev = IBLOCK_DEV(dev);
+ struct request_queue *q = bdev_get_queue(ib_dev->ibd_bd);
/*
- * Force data to disk if we pretend to not have a volatile
- * write cache, or the initiator set the Force Unit Access bit.
+ * Force writethrough using WRITE_FUA if a volatile write cache
+ * is not enabled, or if initiator set the Force Unit Access bit.
*/
- if (dev->dev_attrib.emulate_write_cache == 0 ||
- (dev->dev_attrib.emulate_fua_write > 0 &&
- (cmd->se_cmd_flags & SCF_FUA)))
- rw = WRITE_FUA;
- else
+ if (q->flush_flags & REQ_FUA) {
+ if (cmd->se_cmd_flags & SCF_FUA)
+ rw = WRITE_FUA;
+ else if (!(q->flush_flags & REQ_FLUSH))
+ rw = WRITE_FUA;
+ } else {
rw = WRITE;
+ }
} else {
rw = READ;
}
@@ -774,6 +788,15 @@ iblock_parse_cdb(struct se_cmd *cmd)
return sbc_parse_cdb(cmd, &iblock_sbc_ops);
}
+bool iblock_get_write_cache(struct se_device *dev)
+{
+ struct iblock_dev *ib_dev = IBLOCK_DEV(dev);
+ struct block_device *bd = ib_dev->ibd_bd;
+ struct request_queue *q = bdev_get_queue(bd);
+
+ return q->flush_flags & REQ_FLUSH;
+}
+
static struct se_subsystem_api iblock_template = {
.name = "iblock",
.inquiry_prod = "IBLOCK",
@@ -790,6 +813,7 @@ static struct se_subsystem_api iblock_template = {
.show_configfs_dev_params = iblock_show_configfs_dev_params,
.get_device_type = sbc_get_device_type,
.get_blocks = iblock_get_blocks,
+ .get_write_cache = iblock_get_write_cache,
};
static int __init iblock_module_init(void)
@@ -797,7 +821,7 @@ static int __init iblock_module_init(void)
return transport_subsystem_register(&iblock_template);
}
-static void iblock_module_exit(void)
+static void __exit iblock_module_exit(void)
{
transport_subsystem_release(&iblock_template);
}
diff --git a/drivers/target/target_core_internal.h b/drivers/target/target_core_internal.h
index 93e9c1f580b0..853bab60e362 100644
--- a/drivers/target/target_core_internal.h
+++ b/drivers/target/target_core_internal.h
@@ -25,6 +25,7 @@ int se_dev_set_max_unmap_block_desc_count(struct se_device *, u32);
int se_dev_set_unmap_granularity(struct se_device *, u32);
int se_dev_set_unmap_granularity_alignment(struct se_device *, u32);
int se_dev_set_max_write_same_len(struct se_device *, u32);
+int se_dev_set_emulate_model_alias(struct se_device *, int);
int se_dev_set_emulate_dpo(struct se_device *, int);
int se_dev_set_emulate_fua_write(struct se_device *, int);
int se_dev_set_emulate_fua_read(struct se_device *, int);
@@ -45,7 +46,7 @@ struct se_lun *core_dev_add_lun(struct se_portal_group *, struct se_device *, u3
int core_dev_del_lun(struct se_portal_group *, u32);
struct se_lun *core_get_lun_from_tpg(struct se_portal_group *, u32);
struct se_lun_acl *core_dev_init_initiator_node_lun_acl(struct se_portal_group *,
- u32, char *, int *);
+ struct se_node_acl *, u32, int *);
int core_dev_add_initiator_node_lun_acl(struct se_portal_group *,
struct se_lun_acl *, u32, u32);
int core_dev_del_initiator_node_lun_acl(struct se_portal_group *,
diff --git a/drivers/target/target_core_pr.c b/drivers/target/target_core_pr.c
index 8e0290b38e43..3240f2cc81ef 100644
--- a/drivers/target/target_core_pr.c
+++ b/drivers/target/target_core_pr.c
@@ -27,6 +27,7 @@
#include <linux/slab.h>
#include <linux/spinlock.h>
#include <linux/list.h>
+#include <linux/file.h>
#include <scsi/scsi.h>
#include <scsi/scsi_cmnd.h>
#include <asm/unaligned.h>
@@ -1957,13 +1958,10 @@ static int __core_scsi3_write_aptpl_to_file(
{
struct t10_wwn *wwn = &dev->t10_wwn;
struct file *file;
- struct iovec iov[1];
- mm_segment_t old_fs;
int flags = O_RDWR | O_CREAT | O_TRUNC;
char path[512];
int ret;
- memset(iov, 0, sizeof(struct iovec));
memset(path, 0, 512);
if (strlen(&wwn->unit_serial[0]) >= 512) {
@@ -1974,31 +1972,22 @@ static int __core_scsi3_write_aptpl_to_file(
snprintf(path, 512, "/var/target/pr/aptpl_%s", &wwn->unit_serial[0]);
file = filp_open(path, flags, 0600);
- if (IS_ERR(file) || !file || !file->f_dentry) {
+ if (IS_ERR(file)) {
pr_err("filp_open(%s) for APTPL metadata"
" failed\n", path);
- return IS_ERR(file) ? PTR_ERR(file) : -ENOENT;
+ return PTR_ERR(file);
}
- iov[0].iov_base = &buf[0];
if (!pr_aptpl_buf_len)
- iov[0].iov_len = (strlen(&buf[0]) + 1); /* Add extra for NULL */
- else
- iov[0].iov_len = pr_aptpl_buf_len;
+ pr_aptpl_buf_len = (strlen(&buf[0]) + 1); /* Add extra for NULL */
- old_fs = get_fs();
- set_fs(get_ds());
- ret = vfs_writev(file, &iov[0], 1, &file->f_pos);
- set_fs(old_fs);
+ ret = kernel_write(file, buf, pr_aptpl_buf_len, 0);
- if (ret < 0) {
+ if (ret < 0)
pr_debug("Error writing APTPL metadata file: %s\n", path);
- filp_close(file, NULL);
- return -EIO;
- }
- filp_close(file, NULL);
+ fput(file);
- return 0;
+ return ret ? -EIO : 0;
}
static int
diff --git a/drivers/target/target_core_pscsi.c b/drivers/target/target_core_pscsi.c
index 2bcfd79cf595..82e78d72fdb6 100644
--- a/drivers/target/target_core_pscsi.c
+++ b/drivers/target/target_core_pscsi.c
@@ -840,14 +840,14 @@ static void pscsi_bi_endio(struct bio *bio, int error)
bio_put(bio);
}
-static inline struct bio *pscsi_get_bio(int sg_num)
+static inline struct bio *pscsi_get_bio(int nr_vecs)
{
struct bio *bio;
/*
* Use bio_malloc() following the comment in for bio -> struct request
* in block/blk-core.c:blk_make_request()
*/
- bio = bio_kmalloc(GFP_KERNEL, sg_num);
+ bio = bio_kmalloc(GFP_KERNEL, nr_vecs);
if (!bio) {
pr_err("PSCSI: bio_kmalloc() failed\n");
return NULL;
@@ -940,7 +940,6 @@ pscsi_map_sg(struct se_cmd *cmd, struct scatterlist *sgl, u32 sgl_nents,
bio = NULL;
}
- page++;
len -= bytes;
data_len -= bytes;
off = 0;
@@ -952,7 +951,6 @@ fail:
while (*hbio) {
bio = *hbio;
*hbio = (*hbio)->bi_next;
- bio->bi_next = NULL;
bio_endio(bio, 0); /* XXX: should be error */
}
return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
@@ -1092,7 +1090,6 @@ fail_free_bio:
while (hbio) {
struct bio *bio = hbio;
hbio = hbio->bi_next;
- bio->bi_next = NULL;
bio_endio(bio, 0); /* XXX: should be error */
}
ret = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
@@ -1178,7 +1175,7 @@ static int __init pscsi_module_init(void)
return transport_subsystem_register(&pscsi_template);
}
-static void pscsi_module_exit(void)
+static void __exit pscsi_module_exit(void)
{
transport_subsystem_release(&pscsi_template);
}
diff --git a/drivers/target/target_core_rd.c b/drivers/target/target_core_rd.c
index 0457de362e68..e0b3c379aa14 100644
--- a/drivers/target/target_core_rd.c
+++ b/drivers/target/target_core_rd.c
@@ -256,10 +256,12 @@ static void rd_free_device(struct se_device *dev)
static struct rd_dev_sg_table *rd_get_sg_table(struct rd_dev *rd_dev, u32 page)
{
- u32 i;
struct rd_dev_sg_table *sg_table;
+ u32 i, sg_per_table = (RD_MAX_ALLOCATION_SIZE /
+ sizeof(struct scatterlist));
- for (i = 0; i < rd_dev->sg_table_count; i++) {
+ i = page / sg_per_table;
+ if (i < rd_dev->sg_table_count) {
sg_table = &rd_dev->sg_table_array[i];
if ((sg_table->page_start_offset <= page) &&
(sg_table->page_end_offset >= page))
@@ -314,7 +316,19 @@ rd_execute_rw(struct se_cmd *cmd)
void *rd_addr;
sg_miter_next(&m);
+ if (!(u32)m.length) {
+ pr_debug("RD[%u]: invalid sgl %p len %zu\n",
+ dev->rd_dev_id, m.addr, m.length);
+ sg_miter_stop(&m);
+ return TCM_INCORRECT_AMOUNT_OF_DATA;
+ }
len = min((u32)m.length, src_len);
+ if (len > rd_size) {
+ pr_debug("RD[%u]: size underrun page %d offset %d "
+ "size %d\n", dev->rd_dev_id,
+ rd_page, rd_offset, rd_size);
+ len = rd_size;
+ }
m.consumed = len;
rd_addr = sg_virt(rd_sg) + rd_offset;
diff --git a/drivers/target/target_core_sbc.c b/drivers/target/target_core_sbc.c
index a664c664a31a..290230de2c53 100644
--- a/drivers/target/target_core_sbc.c
+++ b/drivers/target/target_core_sbc.c
@@ -105,7 +105,7 @@ sbc_emulate_readcapacity_16(struct se_cmd *cmd)
return 0;
}
-sector_t spc_get_write_same_sectors(struct se_cmd *cmd)
+sector_t sbc_get_write_same_sectors(struct se_cmd *cmd)
{
u32 num_blocks;
@@ -126,7 +126,7 @@ sector_t spc_get_write_same_sectors(struct se_cmd *cmd)
return cmd->se_dev->transport->get_blocks(cmd->se_dev) -
cmd->t_task_lba + 1;
}
-EXPORT_SYMBOL(spc_get_write_same_sectors);
+EXPORT_SYMBOL(sbc_get_write_same_sectors);
static sense_reason_t
sbc_emulate_noop(struct se_cmd *cmd)
@@ -233,7 +233,7 @@ static inline unsigned long long transport_lba_64_ext(unsigned char *cdb)
static sense_reason_t
sbc_setup_write_same(struct se_cmd *cmd, unsigned char *flags, struct sbc_ops *ops)
{
- unsigned int sectors = spc_get_write_same_sectors(cmd);
+ unsigned int sectors = sbc_get_write_same_sectors(cmd);
if ((flags[0] & 0x04) || (flags[0] & 0x02)) {
pr_err("WRITE_SAME PBDATA and LBDATA"
@@ -486,7 +486,7 @@ sbc_parse_cdb(struct se_cmd *cmd, struct sbc_ops *ops)
*/
if (cmd->t_task_lba || sectors) {
if (sbc_check_valid_sectors(cmd) < 0)
- return TCM_INVALID_CDB_FIELD;
+ return TCM_ADDRESS_OUT_OF_RANGE;
}
cmd->execute_cmd = ops->execute_sync_cache;
break;
diff --git a/drivers/target/target_core_spc.c b/drivers/target/target_core_spc.c
index 2d88f087d961..4cb667d720a7 100644
--- a/drivers/target/target_core_spc.c
+++ b/drivers/target/target_core_spc.c
@@ -66,8 +66,8 @@ static void spc_fill_alua_data(struct se_port *port, unsigned char *buf)
spin_unlock(&tg_pt_gp_mem->tg_pt_gp_mem_lock);
}
-static sense_reason_t
-spc_emulate_inquiry_std(struct se_cmd *cmd, char *buf)
+sense_reason_t
+spc_emulate_inquiry_std(struct se_cmd *cmd, unsigned char *buf)
{
struct se_lun *lun = cmd->se_lun;
struct se_device *dev = cmd->se_dev;
@@ -104,6 +104,7 @@ spc_emulate_inquiry_std(struct se_cmd *cmd, char *buf)
return 0;
}
+EXPORT_SYMBOL(spc_emulate_inquiry_std);
/* unit serial number */
static sense_reason_t
@@ -160,7 +161,7 @@ static void spc_parse_naa_6h_vendor_specific(struct se_device *dev,
* Device identification VPD, for a complete list of
* DESIGNATOR TYPEs see spc4r17 Table 459.
*/
-static sense_reason_t
+sense_reason_t
spc_emulate_evpd_83(struct se_cmd *cmd, unsigned char *buf)
{
struct se_device *dev = cmd->se_dev;
@@ -404,17 +405,33 @@ check_scsi_name:
buf[3] = (len & 0xff); /* Page Length for VPD 0x83 */
return 0;
}
+EXPORT_SYMBOL(spc_emulate_evpd_83);
+
+static bool
+spc_check_dev_wce(struct se_device *dev)
+{
+ bool wce = false;
+
+ if (dev->transport->get_write_cache)
+ wce = dev->transport->get_write_cache(dev);
+ else if (dev->dev_attrib.emulate_write_cache > 0)
+ wce = true;
+
+ return wce;
+}
/* Extended INQUIRY Data VPD Page */
static sense_reason_t
spc_emulate_evpd_86(struct se_cmd *cmd, unsigned char *buf)
{
+ struct se_device *dev = cmd->se_dev;
+
buf[3] = 0x3c;
/* Set HEADSUP, ORDSUP, SIMPSUP */
buf[5] = 0x07;
/* If WriteCache emulation is enabled, set V_SUP */
- if (cmd->se_dev->dev_attrib.emulate_write_cache > 0)
+ if (spc_check_dev_wce(dev))
buf[6] = 0x01;
return 0;
}
@@ -764,7 +781,7 @@ static int spc_modesense_caching(struct se_device *dev, u8 pc, u8 *p)
if (pc == 1)
goto out;
- if (dev->dev_attrib.emulate_write_cache > 0)
+ if (spc_check_dev_wce(dev))
p[2] = 0x04; /* Write Cache Enable */
p[12] = 0x20; /* Disabled Read Ahead */
@@ -876,7 +893,7 @@ static sense_reason_t spc_emulate_modesense(struct se_cmd *cmd)
(cmd->se_deve->lun_flags & TRANSPORT_LUNFLAGS_READ_ONLY)))
spc_modesense_write_protect(&buf[length], type);
- if ((dev->dev_attrib.emulate_write_cache > 0) &&
+ if ((spc_check_dev_wce(dev)) &&
(dev->dev_attrib.emulate_fua_write > 0))
spc_modesense_dpofua(&buf[length], type);
@@ -983,6 +1000,14 @@ static sense_reason_t spc_emulate_modeselect(struct se_cmd *cmd)
int ret = 0;
int i;
+ if (!cmd->data_length) {
+ target_complete_cmd(cmd, GOOD);
+ return 0;
+ }
+
+ if (cmd->data_length < off + 2)
+ return TCM_PARAMETER_LIST_LENGTH_ERROR;
+
buf = transport_kmap_data_sg(cmd);
if (!buf)
return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
@@ -1007,6 +1032,11 @@ static sense_reason_t spc_emulate_modeselect(struct se_cmd *cmd)
goto out;
check_contents:
+ if (cmd->data_length < off + length) {
+ ret = TCM_PARAMETER_LIST_LENGTH_ERROR;
+ goto out;
+ }
+
if (memcmp(buf + off, tbuf, length))
ret = TCM_INVALID_PARAMETER_LIST;
diff --git a/drivers/target/target_core_tmr.c b/drivers/target/target_core_tmr.c
index c6e0293ffdb0..d0b4dd95b91e 100644
--- a/drivers/target/target_core_tmr.c
+++ b/drivers/target/target_core_tmr.c
@@ -331,18 +331,6 @@ static void core_tmr_drain_state_list(
fe_count = atomic_read(&cmd->t_fe_count);
- if (!(cmd->transport_state & CMD_T_ACTIVE)) {
- pr_debug("LUN_RESET: got CMD_T_ACTIVE for"
- " cdb: %p, t_fe_count: %d dev: %p\n", cmd,
- fe_count, dev);
- cmd->transport_state |= CMD_T_ABORTED;
- spin_unlock_irqrestore(&cmd->t_state_lock, flags);
-
- core_tmr_handle_tas_abort(tmr_nacl, cmd, tas, fe_count);
- continue;
- }
- pr_debug("LUN_RESET: Got !CMD_T_ACTIVE for cdb: %p,"
- " t_fe_count: %d dev: %p\n", cmd, fe_count, dev);
cmd->transport_state |= CMD_T_ABORTED;
spin_unlock_irqrestore(&cmd->t_state_lock, flags);
diff --git a/drivers/target/target_core_tpg.c b/drivers/target/target_core_tpg.c
index 5192ac0337f7..9169d6a5d7e4 100644
--- a/drivers/target/target_core_tpg.c
+++ b/drivers/target/target_core_tpg.c
@@ -111,16 +111,10 @@ struct se_node_acl *core_tpg_get_initiator_node_acl(
struct se_node_acl *acl;
spin_lock_irq(&tpg->acl_node_lock);
- list_for_each_entry(acl, &tpg->acl_node_list, acl_list) {
- if (!strcmp(acl->initiatorname, initiatorname) &&
- !acl->dynamic_node_acl) {
- spin_unlock_irq(&tpg->acl_node_lock);
- return acl;
- }
- }
+ acl = __core_tpg_get_initiator_node_acl(tpg, initiatorname);
spin_unlock_irq(&tpg->acl_node_lock);
- return NULL;
+ return acl;
}
/* core_tpg_add_node_to_devs():
diff --git a/drivers/target/target_core_transport.c b/drivers/target/target_core_transport.c
index bd587b70661a..2030b608136d 100644
--- a/drivers/target/target_core_transport.c
+++ b/drivers/target/target_core_transport.c
@@ -907,15 +907,18 @@ int transport_dump_vpd_ident(
switch (vpd->device_identifier_code_set) {
case 0x01: /* Binary */
- sprintf(buf, "T10 VPD Binary Device Identifier: %s\n",
+ snprintf(buf, sizeof(buf),
+ "T10 VPD Binary Device Identifier: %s\n",
&vpd->device_identifier[0]);
break;
case 0x02: /* ASCII */
- sprintf(buf, "T10 VPD ASCII Device Identifier: %s\n",
+ snprintf(buf, sizeof(buf),
+ "T10 VPD ASCII Device Identifier: %s\n",
&vpd->device_identifier[0]);
break;
case 0x03: /* UTF-8 */
- sprintf(buf, "T10 VPD UTF-8 Device Identifier: %s\n",
+ snprintf(buf, sizeof(buf),
+ "T10 VPD UTF-8 Device Identifier: %s\n",
&vpd->device_identifier[0]);
break;
default:
@@ -1514,6 +1517,7 @@ void transport_generic_request_failure(struct se_cmd *cmd,
case TCM_UNSUPPORTED_SCSI_OPCODE:
case TCM_INVALID_CDB_FIELD:
case TCM_INVALID_PARAMETER_LIST:
+ case TCM_PARAMETER_LIST_LENGTH_ERROR:
case TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE:
case TCM_UNKNOWN_MODE_PAGE:
case TCM_WRITE_PROTECTED:
@@ -2674,6 +2678,15 @@ transport_send_check_condition_and_sense(struct se_cmd *cmd,
/* INVALID FIELD IN PARAMETER LIST */
buffer[SPC_ASC_KEY_OFFSET] = 0x26;
break;
+ case TCM_PARAMETER_LIST_LENGTH_ERROR:
+ /* CURRENT ERROR */
+ buffer[0] = 0x70;
+ buffer[SPC_ADD_SENSE_LEN_OFFSET] = 10;
+ /* ILLEGAL REQUEST */
+ buffer[SPC_SENSE_KEY_OFFSET] = ILLEGAL_REQUEST;
+ /* PARAMETER LIST LENGTH ERROR */
+ buffer[SPC_ASC_KEY_OFFSET] = 0x1a;
+ break;
case TCM_UNEXPECTED_UNSOLICITED_DATA:
/* CURRENT ERROR */
buffer[0] = 0x70;
diff --git a/drivers/target/tcm_fc/tfc_sess.c b/drivers/target/tcm_fc/tfc_sess.c
index 6659dd36e806..113f33598b9f 100644
--- a/drivers/target/tcm_fc/tfc_sess.c
+++ b/drivers/target/tcm_fc/tfc_sess.c
@@ -169,7 +169,6 @@ static struct ft_sess *ft_sess_get(struct fc_lport *lport, u32 port_id)
{
struct ft_tport *tport;
struct hlist_head *head;
- struct hlist_node *pos;
struct ft_sess *sess;
rcu_read_lock();
@@ -178,7 +177,7 @@ static struct ft_sess *ft_sess_get(struct fc_lport *lport, u32 port_id)
goto out;
head = &tport->hash[ft_sess_hash(port_id)];
- hlist_for_each_entry_rcu(sess, pos, head, hash) {
+ hlist_for_each_entry_rcu(sess, head, hash) {
if (sess->port_id == port_id) {
kref_get(&sess->kref);
rcu_read_unlock();
@@ -201,10 +200,9 @@ static struct ft_sess *ft_sess_create(struct ft_tport *tport, u32 port_id,
{
struct ft_sess *sess;
struct hlist_head *head;
- struct hlist_node *pos;
head = &tport->hash[ft_sess_hash(port_id)];
- hlist_for_each_entry_rcu(sess, pos, head, hash)
+ hlist_for_each_entry_rcu(sess, head, hash)
if (sess->port_id == port_id)
return sess;
@@ -253,11 +251,10 @@ static void ft_sess_unhash(struct ft_sess *sess)
static struct ft_sess *ft_sess_delete(struct ft_tport *tport, u32 port_id)
{
struct hlist_head *head;
- struct hlist_node *pos;
struct ft_sess *sess;
head = &tport->hash[ft_sess_hash(port_id)];
- hlist_for_each_entry_rcu(sess, pos, head, hash) {
+ hlist_for_each_entry_rcu(sess, head, hash) {
if (sess->port_id == port_id) {
ft_sess_unhash(sess);
return sess;
@@ -273,12 +270,11 @@ static struct ft_sess *ft_sess_delete(struct ft_tport *tport, u32 port_id)
static void ft_sess_delete_all(struct ft_tport *tport)
{
struct hlist_head *head;
- struct hlist_node *pos;
struct ft_sess *sess;
for (head = tport->hash;
head < &tport->hash[FT_SESS_HASH_SIZE]; head++) {
- hlist_for_each_entry_rcu(sess, pos, head, hash) {
+ hlist_for_each_entry_rcu(sess, head, hash) {
ft_sess_unhash(sess);
transport_deregister_session_configfs(sess->se_sess);
ft_sess_put(sess); /* release from table */
diff --git a/drivers/thermal/Kconfig b/drivers/thermal/Kconfig
index c2c77d1ac499..a764f165b589 100644
--- a/drivers/thermal/Kconfig
+++ b/drivers/thermal/Kconfig
@@ -29,14 +29,14 @@ choice
config THERMAL_DEFAULT_GOV_STEP_WISE
bool "step_wise"
- select STEP_WISE
+ select THERMAL_GOV_STEP_WISE
help
Use the step_wise governor as default. This throttles the
devices one step at a time.
config THERMAL_DEFAULT_GOV_FAIR_SHARE
bool "fair_share"
- select FAIR_SHARE
+ select THERMAL_GOV_FAIR_SHARE
help
Use the fair_share governor as default. This throttles the
devices based on their 'contribution' to a zone. The
@@ -44,24 +44,24 @@ config THERMAL_DEFAULT_GOV_FAIR_SHARE
config THERMAL_DEFAULT_GOV_USER_SPACE
bool "user_space"
- select USER_SPACE
+ select THERMAL_GOV_USER_SPACE
help
Select this if you want to let the user space manage the
lpatform thermals.
endchoice
-config FAIR_SHARE
+config THERMAL_GOV_FAIR_SHARE
bool "Fair-share thermal governor"
help
Enable this to manage platform thermals using fair-share governor.
-config STEP_WISE
+config THERMAL_GOV_STEP_WISE
bool "Step_wise thermal governor"
help
Enable this to manage platform thermals using a simple linear
-config USER_SPACE
+config THERMAL_GOV_USER_SPACE
bool "User_space thermal governor"
help
Enable this to let the user space manage the platform thermals.
@@ -78,6 +78,14 @@ config CPU_THERMAL
and not the ACPI interface.
If you want this support, you should say Y here.
+config THERMAL_EMULATION
+ bool "Thermal emulation mode support"
+ help
+ Enable this option to make a emul_temp sysfs node in thermal zone
+ directory to support temperature emulation. With emulation sysfs node,
+ user can manually input temperature and test the different trip
+ threshold behaviour for simulation purpose.
+
config SPEAR_THERMAL
bool "SPEAr thermal sensor driver"
depends on PLAT_SPEAR
@@ -93,6 +101,14 @@ config RCAR_THERMAL
Enable this to plug the R-Car thermal sensor driver into the Linux
thermal framework
+config KIRKWOOD_THERMAL
+ tristate "Temperature sensor on Marvell Kirkwood SoCs"
+ depends on ARCH_KIRKWOOD
+ depends on OF
+ help
+ Support for the Kirkwood thermal sensor driver into the Linux thermal
+ framework. Only kirkwood 88F6282 and 88F6283 have this sensor.
+
config EXYNOS_THERMAL
tristate "Temperature sensor on Samsung EXYNOS"
depends on (ARCH_EXYNOS4 || ARCH_EXYNOS5)
@@ -101,6 +117,23 @@ config EXYNOS_THERMAL
If you say yes here you get support for TMU (Thermal Management
Unit) on SAMSUNG EXYNOS series of SoC.
+config EXYNOS_THERMAL_EMUL
+ bool "EXYNOS TMU emulation mode support"
+ depends on EXYNOS_THERMAL
+ help
+ Exynos 4412 and 4414 and 5 series has emulation mode on TMU.
+ Enable this option will be make sysfs node in exynos thermal platform
+ device directory to support emulation mode. With emulation mode sysfs
+ node, you can manually input temperature to TMU for simulation purpose.
+
+config DOVE_THERMAL
+ tristate "Temperature sensor on Marvell Dove SoCs"
+ depends on ARCH_DOVE
+ depends on OF
+ help
+ Support for the Dove thermal sensor driver in the Linux thermal
+ framework.
+
config DB8500_THERMAL
bool "DB8500 thermal management"
depends on ARCH_U8500
@@ -122,4 +155,14 @@ config DB8500_CPUFREQ_COOLING
bound cpufreq cooling device turns active to set CPU frequency low to
cool down the CPU.
+config INTEL_POWERCLAMP
+ tristate "Intel PowerClamp idle injection driver"
+ depends on THERMAL
+ depends on X86
+ depends on CPU_SUP_INTEL
+ help
+ Enable this to enable Intel PowerClamp idle injection driver. This
+ enforce idle time which results in more package C-state residency. The
+ user interface is exposed via generic thermal framework.
+
endif
diff --git a/drivers/thermal/Makefile b/drivers/thermal/Makefile
index d8da683245fc..d3a2b38c31e8 100644
--- a/drivers/thermal/Makefile
+++ b/drivers/thermal/Makefile
@@ -5,9 +5,9 @@
obj-$(CONFIG_THERMAL) += thermal_sys.o
# governors
-obj-$(CONFIG_FAIR_SHARE) += fair_share.o
-obj-$(CONFIG_STEP_WISE) += step_wise.o
-obj-$(CONFIG_USER_SPACE) += user_space.o
+obj-$(CONFIG_THERMAL_GOV_FAIR_SHARE) += fair_share.o
+obj-$(CONFIG_THERMAL_GOV_STEP_WISE) += step_wise.o
+obj-$(CONFIG_THERMAL_GOV_USER_SPACE) += user_space.o
# cpufreq cooling
obj-$(CONFIG_CPU_THERMAL) += cpu_cooling.o
@@ -15,6 +15,10 @@ obj-$(CONFIG_CPU_THERMAL) += cpu_cooling.o
# platform thermal drivers
obj-$(CONFIG_SPEAR_THERMAL) += spear_thermal.o
obj-$(CONFIG_RCAR_THERMAL) += rcar_thermal.o
+obj-$(CONFIG_KIRKWOOD_THERMAL) += kirkwood_thermal.o
obj-$(CONFIG_EXYNOS_THERMAL) += exynos_thermal.o
+obj-$(CONFIG_DOVE_THERMAL) += dove_thermal.o
obj-$(CONFIG_DB8500_THERMAL) += db8500_thermal.o
obj-$(CONFIG_DB8500_CPUFREQ_COOLING) += db8500_cpufreq_cooling.o
+obj-$(CONFIG_INTEL_POWERCLAMP) += intel_powerclamp.o
+
diff --git a/drivers/thermal/cpu_cooling.c b/drivers/thermal/cpu_cooling.c
index 836828e29a87..8dc44cbb3e09 100644
--- a/drivers/thermal/cpu_cooling.c
+++ b/drivers/thermal/cpu_cooling.c
@@ -73,21 +73,14 @@ static struct cpufreq_cooling_device *notify_device;
*/
static int get_idr(struct idr *idr, int *id)
{
- int err;
-again:
- if (unlikely(idr_pre_get(idr, GFP_KERNEL) == 0))
- return -ENOMEM;
+ int ret;
mutex_lock(&cooling_cpufreq_lock);
- err = idr_get_new(idr, NULL, id);
+ ret = idr_alloc(idr, NULL, 0, 0, GFP_KERNEL);
mutex_unlock(&cooling_cpufreq_lock);
-
- if (unlikely(err == -EAGAIN))
- goto again;
- else if (unlikely(err))
- return err;
-
- *id = *id & MAX_IDR_MASK;
+ if (unlikely(ret < 0))
+ return ret;
+ *id = ret;
return 0;
}
@@ -118,8 +111,8 @@ static int is_cpufreq_valid(int cpu)
/**
* get_cpu_frequency - get the absolute value of frequency from level.
* @cpu: cpu for which frequency is fetched.
- * @level: level of frequency of the CPU
- * e.g level=1 --> 1st MAX FREQ, LEVEL=2 ---> 2nd MAX FREQ, .... etc
+ * @level: level of frequency, equals cooling state of cpu cooling device
+ * e.g level=0 --> 1st MAX FREQ, level=1 ---> 2nd MAX FREQ, .... etc
*/
static unsigned int get_cpu_frequency(unsigned int cpu, unsigned long level)
{
diff --git a/drivers/thermal/db8500_cpufreq_cooling.c b/drivers/thermal/db8500_cpufreq_cooling.c
index 4cf8e72af90a..21419851fc02 100644
--- a/drivers/thermal/db8500_cpufreq_cooling.c
+++ b/drivers/thermal/db8500_cpufreq_cooling.c
@@ -21,6 +21,7 @@
#include <linux/cpufreq.h>
#include <linux/err.h>
#include <linux/module.h>
+#include <linux/of.h>
#include <linux/platform_device.h>
#include <linux/slab.h>
@@ -73,15 +74,13 @@ static const struct of_device_id db8500_cpufreq_cooling_match[] = {
{ .compatible = "stericsson,db8500-cpufreq-cooling" },
{},
};
-#else
-#define db8500_cpufreq_cooling_match NULL
#endif
static struct platform_driver db8500_cpufreq_cooling_driver = {
.driver = {
.owner = THIS_MODULE,
.name = "db8500-cpufreq-cooling",
- .of_match_table = db8500_cpufreq_cooling_match,
+ .of_match_table = of_match_ptr(db8500_cpufreq_cooling_match),
},
.probe = db8500_cpufreq_cooling_probe,
.suspend = db8500_cpufreq_cooling_suspend,
diff --git a/drivers/thermal/db8500_thermal.c b/drivers/thermal/db8500_thermal.c
index ec71ade3e317..61ce60a35921 100644
--- a/drivers/thermal/db8500_thermal.c
+++ b/drivers/thermal/db8500_thermal.c
@@ -508,15 +508,13 @@ static const struct of_device_id db8500_thermal_match[] = {
{ .compatible = "stericsson,db8500-thermal" },
{},
};
-#else
-#define db8500_thermal_match NULL
#endif
static struct platform_driver db8500_thermal_driver = {
.driver = {
.owner = THIS_MODULE,
.name = "db8500-thermal",
- .of_match_table = db8500_thermal_match,
+ .of_match_table = of_match_ptr(db8500_thermal_match),
},
.probe = db8500_thermal_probe,
.suspend = db8500_thermal_suspend,
diff --git a/drivers/thermal/dove_thermal.c b/drivers/thermal/dove_thermal.c
new file mode 100644
index 000000000000..7b0bfa0e7a9c
--- /dev/null
+++ b/drivers/thermal/dove_thermal.c
@@ -0,0 +1,209 @@
+/*
+ * Dove thermal sensor driver
+ *
+ * Copyright (C) 2013 Andrew Lunn <andrew@lunn.ch>
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+#include <linux/device.h>
+#include <linux/err.h>
+#include <linux/io.h>
+#include <linux/kernel.h>
+#include <linux/of.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/thermal.h>
+
+#define DOVE_THERMAL_TEMP_OFFSET 1
+#define DOVE_THERMAL_TEMP_MASK 0x1FF
+
+/* Dove Thermal Manager Control and Status Register */
+#define PMU_TM_DISABLE_OFFS 0
+#define PMU_TM_DISABLE_MASK (0x1 << PMU_TM_DISABLE_OFFS)
+
+/* Dove Theraml Diode Control 0 Register */
+#define PMU_TDC0_SW_RST_MASK (0x1 << 1)
+#define PMU_TDC0_SEL_VCAL_OFFS 5
+#define PMU_TDC0_SEL_VCAL_MASK (0x3 << PMU_TDC0_SEL_VCAL_OFFS)
+#define PMU_TDC0_REF_CAL_CNT_OFFS 11
+#define PMU_TDC0_REF_CAL_CNT_MASK (0x1FF << PMU_TDC0_REF_CAL_CNT_OFFS)
+#define PMU_TDC0_AVG_NUM_OFFS 25
+#define PMU_TDC0_AVG_NUM_MASK (0x7 << PMU_TDC0_AVG_NUM_OFFS)
+
+/* Dove Thermal Diode Control 1 Register */
+#define PMU_TEMP_DIOD_CTRL1_REG 0x04
+#define PMU_TDC1_TEMP_VALID_MASK (0x1 << 10)
+
+/* Dove Thermal Sensor Dev Structure */
+struct dove_thermal_priv {
+ void __iomem *sensor;
+ void __iomem *control;
+};
+
+static int dove_init_sensor(const struct dove_thermal_priv *priv)
+{
+ u32 reg;
+ u32 i;
+
+ /* Configure the Diode Control Register #0 */
+ reg = readl_relaxed(priv->control);
+
+ /* Use average of 2 */
+ reg &= ~PMU_TDC0_AVG_NUM_MASK;
+ reg |= (0x1 << PMU_TDC0_AVG_NUM_OFFS);
+
+ /* Reference calibration value */
+ reg &= ~PMU_TDC0_REF_CAL_CNT_MASK;
+ reg |= (0x0F1 << PMU_TDC0_REF_CAL_CNT_OFFS);
+
+ /* Set the high level reference for calibration */
+ reg &= ~PMU_TDC0_SEL_VCAL_MASK;
+ reg |= (0x2 << PMU_TDC0_SEL_VCAL_OFFS);
+ writel(reg, priv->control);
+
+ /* Reset the sensor */
+ reg = readl_relaxed(priv->control);
+ writel((reg | PMU_TDC0_SW_RST_MASK), priv->control);
+ writel(reg, priv->control);
+
+ /* Enable the sensor */
+ reg = readl_relaxed(priv->sensor);
+ reg &= ~PMU_TM_DISABLE_MASK;
+ writel(reg, priv->sensor);
+
+ /* Poll the sensor for the first reading */
+ for (i = 0; i < 1000000; i++) {
+ reg = readl_relaxed(priv->sensor);
+ if (reg & DOVE_THERMAL_TEMP_MASK)
+ break;
+ }
+
+ if (i == 1000000)
+ return -EIO;
+
+ return 0;
+}
+
+static int dove_get_temp(struct thermal_zone_device *thermal,
+ unsigned long *temp)
+{
+ unsigned long reg;
+ struct dove_thermal_priv *priv = thermal->devdata;
+
+ /* Valid check */
+ reg = readl_relaxed(priv->control + PMU_TEMP_DIOD_CTRL1_REG);
+ if ((reg & PMU_TDC1_TEMP_VALID_MASK) == 0x0) {
+ dev_err(&thermal->device,
+ "Temperature sensor reading not valid\n");
+ return -EIO;
+ }
+
+ /*
+ * Calculate temperature. See Section 8.10.1 of 88AP510,
+ * Documentation/arm/Marvell/README
+ */
+ reg = readl_relaxed(priv->sensor);
+ reg = (reg >> DOVE_THERMAL_TEMP_OFFSET) & DOVE_THERMAL_TEMP_MASK;
+ *temp = ((2281638UL - (7298*reg)) / 10);
+
+ return 0;
+}
+
+static struct thermal_zone_device_ops ops = {
+ .get_temp = dove_get_temp,
+};
+
+static const struct of_device_id dove_thermal_id_table[] = {
+ { .compatible = "marvell,dove-thermal" },
+ {}
+};
+
+static int dove_thermal_probe(struct platform_device *pdev)
+{
+ struct thermal_zone_device *thermal = NULL;
+ struct dove_thermal_priv *priv;
+ struct resource *res;
+ int ret;
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!res) {
+ dev_err(&pdev->dev, "Failed to get platform resource\n");
+ return -ENODEV;
+ }
+
+ priv = devm_kzalloc(&pdev->dev, sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ priv->sensor = devm_request_and_ioremap(&pdev->dev, res);
+ if (!priv->sensor) {
+ dev_err(&pdev->dev, "Failed to request_ioremap memory\n");
+ return -EADDRNOTAVAIL;
+ }
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
+ if (!res) {
+ dev_err(&pdev->dev, "Failed to get platform resource\n");
+ return -ENODEV;
+ }
+ priv->control = devm_request_and_ioremap(&pdev->dev, res);
+ if (!priv->control) {
+ dev_err(&pdev->dev, "Failed to request_ioremap memory\n");
+ return -EADDRNOTAVAIL;
+ }
+
+ ret = dove_init_sensor(priv);
+ if (ret) {
+ dev_err(&pdev->dev, "Failed to initialize sensor\n");
+ return ret;
+ }
+
+ thermal = thermal_zone_device_register("dove_thermal", 0, 0,
+ priv, &ops, NULL, 0, 0);
+ if (IS_ERR(thermal)) {
+ dev_err(&pdev->dev,
+ "Failed to register thermal zone device\n");
+ return PTR_ERR(thermal);
+ }
+
+ platform_set_drvdata(pdev, thermal);
+
+ return 0;
+}
+
+static int dove_thermal_exit(struct platform_device *pdev)
+{
+ struct thermal_zone_device *dove_thermal =
+ platform_get_drvdata(pdev);
+
+ thermal_zone_device_unregister(dove_thermal);
+ platform_set_drvdata(pdev, NULL);
+
+ return 0;
+}
+
+MODULE_DEVICE_TABLE(of, dove_thermal_id_table);
+
+static struct platform_driver dove_thermal_driver = {
+ .probe = dove_thermal_probe,
+ .remove = dove_thermal_exit,
+ .driver = {
+ .name = "dove_thermal",
+ .owner = THIS_MODULE,
+ .of_match_table = of_match_ptr(dove_thermal_id_table),
+ },
+};
+
+module_platform_driver(dove_thermal_driver);
+
+MODULE_AUTHOR("Andrew Lunn <andrew@lunn.ch>");
+MODULE_DESCRIPTION("Dove thermal driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/thermal/exynos_thermal.c b/drivers/thermal/exynos_thermal.c
index bada1308318b..e04ebd8671ac 100644
--- a/drivers/thermal/exynos_thermal.c
+++ b/drivers/thermal/exynos_thermal.c
@@ -82,7 +82,7 @@
#define EXYNOS_TRIMINFO_RELOAD 0x1
#define EXYNOS_TMU_CLEAR_RISE_INT 0x111
-#define EXYNOS_TMU_CLEAR_FALL_INT (0x111 << 16)
+#define EXYNOS_TMU_CLEAR_FALL_INT (0x111 << 12)
#define EXYNOS_MUX_ADDR_VALUE 6
#define EXYNOS_MUX_ADDR_SHIFT 20
#define EXYNOS_TMU_TRIP_MODE_SHIFT 13
@@ -94,11 +94,20 @@
#define SENSOR_NAME_LEN 16
#define MAX_TRIP_COUNT 8
#define MAX_COOLING_DEVICE 4
+#define MAX_THRESHOLD_LEVS 4
#define ACTIVE_INTERVAL 500
#define IDLE_INTERVAL 10000
#define MCELSIUS 1000
+#ifdef CONFIG_EXYNOS_THERMAL_EMUL
+#define EXYNOS_EMUL_TIME 0x57F0
+#define EXYNOS_EMUL_TIME_SHIFT 16
+#define EXYNOS_EMUL_DATA_SHIFT 8
+#define EXYNOS_EMUL_DATA_MASK 0xFF
+#define EXYNOS_EMUL_ENABLE 0x1
+#endif /* CONFIG_EXYNOS_THERMAL_EMUL */
+
/* CPU Zone information */
#define PANIC_ZONE 4
#define WARN_ZONE 3
@@ -125,6 +134,7 @@ struct exynos_tmu_data {
struct thermal_trip_point_conf {
int trip_val[MAX_TRIP_COUNT];
int trip_count;
+ u8 trigger_falling;
};
struct thermal_cooling_conf {
@@ -174,7 +184,8 @@ static int exynos_set_mode(struct thermal_zone_device *thermal,
mutex_lock(&th_zone->therm_dev->lock);
- if (mode == THERMAL_DEVICE_ENABLED)
+ if (mode == THERMAL_DEVICE_ENABLED &&
+ !th_zone->sensor_conf->trip_data.trigger_falling)
th_zone->therm_dev->polling_delay = IDLE_INTERVAL;
else
th_zone->therm_dev->polling_delay = 0;
@@ -284,7 +295,7 @@ static int exynos_bind(struct thermal_zone_device *thermal,
case MONITOR_ZONE:
case WARN_ZONE:
if (thermal_zone_bind_cooling_device(thermal, i, cdev,
- level, level)) {
+ level, 0)) {
pr_err("error binding cdev inst %d\n", i);
ret = -EINVAL;
}
@@ -362,10 +373,17 @@ static int exynos_get_temp(struct thermal_zone_device *thermal,
static int exynos_get_trend(struct thermal_zone_device *thermal,
int trip, enum thermal_trend *trend)
{
- if (thermal->temperature >= trip)
- *trend = THERMAL_TREND_RAISING;
+ int ret;
+ unsigned long trip_temp;
+
+ ret = exynos_get_trip_temp(thermal, trip, &trip_temp);
+ if (ret < 0)
+ return ret;
+
+ if (thermal->temperature >= trip_temp)
+ *trend = THERMAL_TREND_RAISE_FULL;
else
- *trend = THERMAL_TREND_DROPPING;
+ *trend = THERMAL_TREND_DROP_FULL;
return 0;
}
@@ -413,7 +431,8 @@ static void exynos_report_trigger(void)
break;
}
- if (th_zone->mode == THERMAL_DEVICE_ENABLED) {
+ if (th_zone->mode == THERMAL_DEVICE_ENABLED &&
+ !th_zone->sensor_conf->trip_data.trigger_falling) {
if (i > 0)
th_zone->therm_dev->polling_delay = ACTIVE_INTERVAL;
else
@@ -452,7 +471,8 @@ static int exynos_register_thermal(struct thermal_sensor_conf *sensor_conf)
th_zone->therm_dev = thermal_zone_device_register(sensor_conf->name,
EXYNOS_ZONE_COUNT, 0, NULL, &exynos_dev_ops, NULL, 0,
- IDLE_INTERVAL);
+ sensor_conf->trip_data.trigger_falling ?
+ 0 : IDLE_INTERVAL);
if (IS_ERR(th_zone->therm_dev)) {
pr_err("Failed to register thermal zone device\n");
@@ -559,8 +579,9 @@ static int exynos_tmu_initialize(struct platform_device *pdev)
{
struct exynos_tmu_data *data = platform_get_drvdata(pdev);
struct exynos_tmu_platform_data *pdata = data->pdata;
- unsigned int status, trim_info, rising_threshold;
- int ret = 0, threshold_code;
+ unsigned int status, trim_info;
+ unsigned int rising_threshold = 0, falling_threshold = 0;
+ int ret = 0, threshold_code, i, trigger_levs = 0;
mutex_lock(&data->lock);
clk_enable(data->clk);
@@ -585,6 +606,11 @@ static int exynos_tmu_initialize(struct platform_device *pdev)
(data->temp_error2 != 0))
data->temp_error1 = pdata->efuse_value;
+ /* Count trigger levels to be enabled */
+ for (i = 0; i < MAX_THRESHOLD_LEVS; i++)
+ if (pdata->trigger_levels[i])
+ trigger_levs++;
+
if (data->soc == SOC_ARCH_EXYNOS4210) {
/* Write temperature code for threshold */
threshold_code = temp_to_code(data, pdata->threshold);
@@ -594,44 +620,38 @@ static int exynos_tmu_initialize(struct platform_device *pdev)
}
writeb(threshold_code,
data->base + EXYNOS4210_TMU_REG_THRESHOLD_TEMP);
-
- writeb(pdata->trigger_levels[0],
- data->base + EXYNOS4210_TMU_REG_TRIG_LEVEL0);
- writeb(pdata->trigger_levels[1],
- data->base + EXYNOS4210_TMU_REG_TRIG_LEVEL1);
- writeb(pdata->trigger_levels[2],
- data->base + EXYNOS4210_TMU_REG_TRIG_LEVEL2);
- writeb(pdata->trigger_levels[3],
- data->base + EXYNOS4210_TMU_REG_TRIG_LEVEL3);
+ for (i = 0; i < trigger_levs; i++)
+ writeb(pdata->trigger_levels[i],
+ data->base + EXYNOS4210_TMU_REG_TRIG_LEVEL0 + i * 4);
writel(EXYNOS4210_TMU_INTCLEAR_VAL,
data->base + EXYNOS_TMU_REG_INTCLEAR);
} else if (data->soc == SOC_ARCH_EXYNOS) {
- /* Write temperature code for threshold */
- threshold_code = temp_to_code(data, pdata->trigger_levels[0]);
- if (threshold_code < 0) {
- ret = threshold_code;
- goto out;
- }
- rising_threshold = threshold_code;
- threshold_code = temp_to_code(data, pdata->trigger_levels[1]);
- if (threshold_code < 0) {
- ret = threshold_code;
- goto out;
- }
- rising_threshold |= (threshold_code << 8);
- threshold_code = temp_to_code(data, pdata->trigger_levels[2]);
- if (threshold_code < 0) {
- ret = threshold_code;
- goto out;
+ /* Write temperature code for rising and falling threshold */
+ for (i = 0; i < trigger_levs; i++) {
+ threshold_code = temp_to_code(data,
+ pdata->trigger_levels[i]);
+ if (threshold_code < 0) {
+ ret = threshold_code;
+ goto out;
+ }
+ rising_threshold |= threshold_code << 8 * i;
+ if (pdata->threshold_falling) {
+ threshold_code = temp_to_code(data,
+ pdata->trigger_levels[i] -
+ pdata->threshold_falling);
+ if (threshold_code > 0)
+ falling_threshold |=
+ threshold_code << 8 * i;
+ }
}
- rising_threshold |= (threshold_code << 16);
writel(rising_threshold,
data->base + EXYNOS_THD_TEMP_RISE);
- writel(0, data->base + EXYNOS_THD_TEMP_FALL);
+ writel(falling_threshold,
+ data->base + EXYNOS_THD_TEMP_FALL);
- writel(EXYNOS_TMU_CLEAR_RISE_INT|EXYNOS_TMU_CLEAR_FALL_INT,
+ writel(EXYNOS_TMU_CLEAR_RISE_INT | EXYNOS_TMU_CLEAR_FALL_INT,
data->base + EXYNOS_TMU_REG_INTCLEAR);
}
out:
@@ -664,6 +684,8 @@ static void exynos_tmu_control(struct platform_device *pdev, bool on)
pdata->trigger_level2_en << 8 |
pdata->trigger_level1_en << 4 |
pdata->trigger_level0_en;
+ if (pdata->threshold_falling)
+ interrupt_en |= interrupt_en << 16;
} else {
con |= EXYNOS_TMU_CORE_OFF;
interrupt_en = 0; /* Disable all interrupts */
@@ -697,20 +719,19 @@ static void exynos_tmu_work(struct work_struct *work)
struct exynos_tmu_data *data = container_of(work,
struct exynos_tmu_data, irq_work);
+ exynos_report_trigger();
mutex_lock(&data->lock);
clk_enable(data->clk);
-
-
if (data->soc == SOC_ARCH_EXYNOS)
- writel(EXYNOS_TMU_CLEAR_RISE_INT,
+ writel(EXYNOS_TMU_CLEAR_RISE_INT |
+ EXYNOS_TMU_CLEAR_FALL_INT,
data->base + EXYNOS_TMU_REG_INTCLEAR);
else
writel(EXYNOS4210_TMU_INTCLEAR_VAL,
data->base + EXYNOS_TMU_REG_INTCLEAR);
-
clk_disable(data->clk);
mutex_unlock(&data->lock);
- exynos_report_trigger();
+
enable_irq(data->irq);
}
@@ -759,6 +780,7 @@ static struct exynos_tmu_platform_data const exynos4210_default_tmu_data = {
#if defined(CONFIG_SOC_EXYNOS5250) || defined(CONFIG_SOC_EXYNOS4412)
static struct exynos_tmu_platform_data const exynos_default_tmu_data = {
+ .threshold_falling = 10,
.trigger_levels[0] = 85,
.trigger_levels[1] = 103,
.trigger_levels[2] = 110,
@@ -800,8 +822,6 @@ static const struct of_device_id exynos_tmu_match[] = {
{},
};
MODULE_DEVICE_TABLE(of, exynos_tmu_match);
-#else
-#define exynos_tmu_match NULL
#endif
static struct platform_device_id exynos_tmu_driver_ids[] = {
@@ -832,6 +852,94 @@ static inline struct exynos_tmu_platform_data *exynos_get_driver_data(
return (struct exynos_tmu_platform_data *)
platform_get_device_id(pdev)->driver_data;
}
+
+#ifdef CONFIG_EXYNOS_THERMAL_EMUL
+static ssize_t exynos_tmu_emulation_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct platform_device *pdev = container_of(dev,
+ struct platform_device, dev);
+ struct exynos_tmu_data *data = platform_get_drvdata(pdev);
+ unsigned int reg;
+ u8 temp_code;
+ int temp = 0;
+
+ if (data->soc == SOC_ARCH_EXYNOS4210)
+ goto out;
+
+ mutex_lock(&data->lock);
+ clk_enable(data->clk);
+ reg = readl(data->base + EXYNOS_EMUL_CON);
+ clk_disable(data->clk);
+ mutex_unlock(&data->lock);
+
+ if (reg & EXYNOS_EMUL_ENABLE) {
+ reg >>= EXYNOS_EMUL_DATA_SHIFT;
+ temp_code = reg & EXYNOS_EMUL_DATA_MASK;
+ temp = code_to_temp(data, temp_code);
+ }
+out:
+ return sprintf(buf, "%d\n", temp * MCELSIUS);
+}
+
+static ssize_t exynos_tmu_emulation_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct platform_device *pdev = container_of(dev,
+ struct platform_device, dev);
+ struct exynos_tmu_data *data = platform_get_drvdata(pdev);
+ unsigned int reg;
+ int temp;
+
+ if (data->soc == SOC_ARCH_EXYNOS4210)
+ goto out;
+
+ if (!sscanf(buf, "%d\n", &temp) || temp < 0)
+ return -EINVAL;
+
+ mutex_lock(&data->lock);
+ clk_enable(data->clk);
+
+ reg = readl(data->base + EXYNOS_EMUL_CON);
+
+ if (temp) {
+ /* Both CELSIUS and MCELSIUS type are available for input */
+ if (temp > MCELSIUS)
+ temp /= MCELSIUS;
+
+ reg = (EXYNOS_EMUL_TIME << EXYNOS_EMUL_TIME_SHIFT) |
+ (temp_to_code(data, (temp / MCELSIUS))
+ << EXYNOS_EMUL_DATA_SHIFT) | EXYNOS_EMUL_ENABLE;
+ } else {
+ reg &= ~EXYNOS_EMUL_ENABLE;
+ }
+
+ writel(reg, data->base + EXYNOS_EMUL_CON);
+
+ clk_disable(data->clk);
+ mutex_unlock(&data->lock);
+
+out:
+ return count;
+}
+
+static DEVICE_ATTR(emulation, 0644, exynos_tmu_emulation_show,
+ exynos_tmu_emulation_store);
+static int create_emulation_sysfs(struct device *dev)
+{
+ return device_create_file(dev, &dev_attr_emulation);
+}
+static void remove_emulation_sysfs(struct device *dev)
+{
+ device_remove_file(dev, &dev_attr_emulation);
+}
+#else
+static inline int create_emulation_sysfs(struct device *dev) { return 0; }
+static inline void remove_emulation_sysfs(struct device *dev) {}
+#endif
+
static int exynos_tmu_probe(struct platform_device *pdev)
{
struct exynos_tmu_data *data;
@@ -914,6 +1022,8 @@ static int exynos_tmu_probe(struct platform_device *pdev)
exynos_sensor_conf.trip_data.trip_val[i] =
pdata->threshold + pdata->trigger_levels[i];
+ exynos_sensor_conf.trip_data.trigger_falling = pdata->threshold_falling;
+
exynos_sensor_conf.cooling_data.freq_clip_count =
pdata->freq_tab_count;
for (i = 0; i < pdata->freq_tab_count; i++) {
@@ -928,6 +1038,11 @@ static int exynos_tmu_probe(struct platform_device *pdev)
dev_err(&pdev->dev, "Failed to register thermal interface\n");
goto err_clk;
}
+
+ ret = create_emulation_sysfs(&pdev->dev);
+ if (ret)
+ dev_err(&pdev->dev, "Failed to create emulation mode sysfs node\n");
+
return 0;
err_clk:
platform_set_drvdata(pdev, NULL);
@@ -939,6 +1054,8 @@ static int exynos_tmu_remove(struct platform_device *pdev)
{
struct exynos_tmu_data *data = platform_get_drvdata(pdev);
+ remove_emulation_sysfs(&pdev->dev);
+
exynos_tmu_control(pdev, false);
exynos_unregister_thermal();
@@ -980,7 +1097,7 @@ static struct platform_driver exynos_tmu_driver = {
.name = "exynos-tmu",
.owner = THIS_MODULE,
.pm = EXYNOS_TMU_PM,
- .of_match_table = exynos_tmu_match,
+ .of_match_table = of_match_ptr(exynos_tmu_match),
},
.probe = exynos_tmu_probe,
.remove = exynos_tmu_remove,
diff --git a/drivers/thermal/intel_powerclamp.c b/drivers/thermal/intel_powerclamp.c
new file mode 100644
index 000000000000..b40b37cd25e0
--- /dev/null
+++ b/drivers/thermal/intel_powerclamp.c
@@ -0,0 +1,795 @@
+/*
+ * intel_powerclamp.c - package c-state idle injection
+ *
+ * Copyright (c) 2012, Intel Corporation.
+ *
+ * Authors:
+ * Arjan van de Ven <arjan@linux.intel.com>
+ * Jacob Pan <jacob.jun.pan@linux.intel.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ *
+ * TODO:
+ * 1. better handle wakeup from external interrupts, currently a fixed
+ * compensation is added to clamping duration when excessive amount
+ * of wakeups are observed during idle time. the reason is that in
+ * case of external interrupts without need for ack, clamping down
+ * cpu in non-irq context does not reduce irq. for majority of the
+ * cases, clamping down cpu does help reduce irq as well, we should
+ * be able to differenciate the two cases and give a quantitative
+ * solution for the irqs that we can control. perhaps based on
+ * get_cpu_iowait_time_us()
+ *
+ * 2. synchronization with other hw blocks
+ *
+ *
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/delay.h>
+#include <linux/kthread.h>
+#include <linux/freezer.h>
+#include <linux/cpu.h>
+#include <linux/thermal.h>
+#include <linux/slab.h>
+#include <linux/tick.h>
+#include <linux/debugfs.h>
+#include <linux/seq_file.h>
+#include <linux/sched/rt.h>
+
+#include <asm/nmi.h>
+#include <asm/msr.h>
+#include <asm/mwait.h>
+#include <asm/cpu_device_id.h>
+#include <asm/idle.h>
+#include <asm/hardirq.h>
+
+#define MAX_TARGET_RATIO (50U)
+/* For each undisturbed clamping period (no extra wake ups during idle time),
+ * we increment the confidence counter for the given target ratio.
+ * CONFIDENCE_OK defines the level where runtime calibration results are
+ * valid.
+ */
+#define CONFIDENCE_OK (3)
+/* Default idle injection duration, driver adjust sleep time to meet target
+ * idle ratio. Similar to frequency modulation.
+ */
+#define DEFAULT_DURATION_JIFFIES (6)
+
+static unsigned int target_mwait;
+static struct dentry *debug_dir;
+
+/* user selected target */
+static unsigned int set_target_ratio;
+static unsigned int current_ratio;
+static bool should_skip;
+static bool reduce_irq;
+static atomic_t idle_wakeup_counter;
+static unsigned int control_cpu; /* The cpu assigned to collect stat and update
+ * control parameters. default to BSP but BSP
+ * can be offlined.
+ */
+static bool clamping;
+
+
+static struct task_struct * __percpu *powerclamp_thread;
+static struct thermal_cooling_device *cooling_dev;
+static unsigned long *cpu_clamping_mask; /* bit map for tracking per cpu
+ * clamping thread
+ */
+
+static unsigned int duration;
+static unsigned int pkg_cstate_ratio_cur;
+static unsigned int window_size;
+
+static int duration_set(const char *arg, const struct kernel_param *kp)
+{
+ int ret = 0;
+ unsigned long new_duration;
+
+ ret = kstrtoul(arg, 10, &new_duration);
+ if (ret)
+ goto exit;
+ if (new_duration > 25 || new_duration < 6) {
+ pr_err("Out of recommended range %lu, between 6-25ms\n",
+ new_duration);
+ ret = -EINVAL;
+ }
+
+ duration = clamp(new_duration, 6ul, 25ul);
+ smp_mb();
+
+exit:
+
+ return ret;
+}
+
+static struct kernel_param_ops duration_ops = {
+ .set = duration_set,
+ .get = param_get_int,
+};
+
+
+module_param_cb(duration, &duration_ops, &duration, 0644);
+MODULE_PARM_DESC(duration, "forced idle time for each attempt in msec.");
+
+struct powerclamp_calibration_data {
+ unsigned long confidence; /* used for calibration, basically a counter
+ * gets incremented each time a clamping
+ * period is completed without extra wakeups
+ * once that counter is reached given level,
+ * compensation is deemed usable.
+ */
+ unsigned long steady_comp; /* steady state compensation used when
+ * no extra wakeups occurred.
+ */
+ unsigned long dynamic_comp; /* compensate excessive wakeup from idle
+ * mostly from external interrupts.
+ */
+};
+
+static struct powerclamp_calibration_data cal_data[MAX_TARGET_RATIO];
+
+static int window_size_set(const char *arg, const struct kernel_param *kp)
+{
+ int ret = 0;
+ unsigned long new_window_size;
+
+ ret = kstrtoul(arg, 10, &new_window_size);
+ if (ret)
+ goto exit_win;
+ if (new_window_size > 10 || new_window_size < 2) {
+ pr_err("Out of recommended window size %lu, between 2-10\n",
+ new_window_size);
+ ret = -EINVAL;
+ }
+
+ window_size = clamp(new_window_size, 2ul, 10ul);
+ smp_mb();
+
+exit_win:
+
+ return ret;
+}
+
+static struct kernel_param_ops window_size_ops = {
+ .set = window_size_set,
+ .get = param_get_int,
+};
+
+module_param_cb(window_size, &window_size_ops, &window_size, 0644);
+MODULE_PARM_DESC(window_size, "sliding window in number of clamping cycles\n"
+ "\tpowerclamp controls idle ratio within this window. larger\n"
+ "\twindow size results in slower response time but more smooth\n"
+ "\tclamping results. default to 2.");
+
+static void find_target_mwait(void)
+{
+ unsigned int eax, ebx, ecx, edx;
+ unsigned int highest_cstate = 0;
+ unsigned int highest_subcstate = 0;
+ int i;
+
+ if (boot_cpu_data.cpuid_level < CPUID_MWAIT_LEAF)
+ return;
+
+ cpuid(CPUID_MWAIT_LEAF, &eax, &ebx, &ecx, &edx);
+
+ if (!(ecx & CPUID5_ECX_EXTENSIONS_SUPPORTED) ||
+ !(ecx & CPUID5_ECX_INTERRUPT_BREAK))
+ return;
+
+ edx >>= MWAIT_SUBSTATE_SIZE;
+ for (i = 0; i < 7 && edx; i++, edx >>= MWAIT_SUBSTATE_SIZE) {
+ if (edx & MWAIT_SUBSTATE_MASK) {
+ highest_cstate = i;
+ highest_subcstate = edx & MWAIT_SUBSTATE_MASK;
+ }
+ }
+ target_mwait = (highest_cstate << MWAIT_SUBSTATE_SIZE) |
+ (highest_subcstate - 1);
+
+}
+
+static u64 pkg_state_counter(void)
+{
+ u64 val;
+ u64 count = 0;
+
+ static bool skip_c2;
+ static bool skip_c3;
+ static bool skip_c6;
+ static bool skip_c7;
+
+ if (!skip_c2) {
+ if (!rdmsrl_safe(MSR_PKG_C2_RESIDENCY, &val))
+ count += val;
+ else
+ skip_c2 = true;
+ }
+
+ if (!skip_c3) {
+ if (!rdmsrl_safe(MSR_PKG_C3_RESIDENCY, &val))
+ count += val;
+ else
+ skip_c3 = true;
+ }
+
+ if (!skip_c6) {
+ if (!rdmsrl_safe(MSR_PKG_C6_RESIDENCY, &val))
+ count += val;
+ else
+ skip_c6 = true;
+ }
+
+ if (!skip_c7) {
+ if (!rdmsrl_safe(MSR_PKG_C7_RESIDENCY, &val))
+ count += val;
+ else
+ skip_c7 = true;
+ }
+
+ return count;
+}
+
+static void noop_timer(unsigned long foo)
+{
+ /* empty... just the fact that we get the interrupt wakes us up */
+}
+
+static unsigned int get_compensation(int ratio)
+{
+ unsigned int comp = 0;
+
+ /* we only use compensation if all adjacent ones are good */
+ if (ratio == 1 &&
+ cal_data[ratio].confidence >= CONFIDENCE_OK &&
+ cal_data[ratio + 1].confidence >= CONFIDENCE_OK &&
+ cal_data[ratio + 2].confidence >= CONFIDENCE_OK) {
+ comp = (cal_data[ratio].steady_comp +
+ cal_data[ratio + 1].steady_comp +
+ cal_data[ratio + 2].steady_comp) / 3;
+ } else if (ratio == MAX_TARGET_RATIO - 1 &&
+ cal_data[ratio].confidence >= CONFIDENCE_OK &&
+ cal_data[ratio - 1].confidence >= CONFIDENCE_OK &&
+ cal_data[ratio - 2].confidence >= CONFIDENCE_OK) {
+ comp = (cal_data[ratio].steady_comp +
+ cal_data[ratio - 1].steady_comp +
+ cal_data[ratio - 2].steady_comp) / 3;
+ } else if (cal_data[ratio].confidence >= CONFIDENCE_OK &&
+ cal_data[ratio - 1].confidence >= CONFIDENCE_OK &&
+ cal_data[ratio + 1].confidence >= CONFIDENCE_OK) {
+ comp = (cal_data[ratio].steady_comp +
+ cal_data[ratio - 1].steady_comp +
+ cal_data[ratio + 1].steady_comp) / 3;
+ }
+
+ /* REVISIT: simple penalty of double idle injection */
+ if (reduce_irq)
+ comp = ratio;
+ /* do not exceed limit */
+ if (comp + ratio >= MAX_TARGET_RATIO)
+ comp = MAX_TARGET_RATIO - ratio - 1;
+
+ return comp;
+}
+
+static void adjust_compensation(int target_ratio, unsigned int win)
+{
+ int delta;
+ struct powerclamp_calibration_data *d = &cal_data[target_ratio];
+
+ /*
+ * adjust compensations if confidence level has not been reached or
+ * there are too many wakeups during the last idle injection period, we
+ * cannot trust the data for compensation.
+ */
+ if (d->confidence >= CONFIDENCE_OK ||
+ atomic_read(&idle_wakeup_counter) >
+ win * num_online_cpus())
+ return;
+
+ delta = set_target_ratio - current_ratio;
+ /* filter out bad data */
+ if (delta >= 0 && delta <= (1+target_ratio/10)) {
+ if (d->steady_comp)
+ d->steady_comp =
+ roundup(delta+d->steady_comp, 2)/2;
+ else
+ d->steady_comp = delta;
+ d->confidence++;
+ }
+}
+
+static bool powerclamp_adjust_controls(unsigned int target_ratio,
+ unsigned int guard, unsigned int win)
+{
+ static u64 msr_last, tsc_last;
+ u64 msr_now, tsc_now;
+ u64 val64;
+
+ /* check result for the last window */
+ msr_now = pkg_state_counter();
+ rdtscll(tsc_now);
+
+ /* calculate pkg cstate vs tsc ratio */
+ if (!msr_last || !tsc_last)
+ current_ratio = 1;
+ else if (tsc_now-tsc_last) {
+ val64 = 100*(msr_now-msr_last);
+ do_div(val64, (tsc_now-tsc_last));
+ current_ratio = val64;
+ }
+
+ /* update record */
+ msr_last = msr_now;
+ tsc_last = tsc_now;
+
+ adjust_compensation(target_ratio, win);
+ /*
+ * too many external interrupts, set flag such
+ * that we can take measure later.
+ */
+ reduce_irq = atomic_read(&idle_wakeup_counter) >=
+ 2 * win * num_online_cpus();
+
+ atomic_set(&idle_wakeup_counter, 0);
+ /* if we are above target+guard, skip */
+ return set_target_ratio + guard <= current_ratio;
+}
+
+static int clamp_thread(void *arg)
+{
+ int cpunr = (unsigned long)arg;
+ DEFINE_TIMER(wakeup_timer, noop_timer, 0, 0);
+ static const struct sched_param param = {
+ .sched_priority = MAX_USER_RT_PRIO/2,
+ };
+ unsigned int count = 0;
+ unsigned int target_ratio;
+
+ set_bit(cpunr, cpu_clamping_mask);
+ set_freezable();
+ init_timer_on_stack(&wakeup_timer);
+ sched_setscheduler(current, SCHED_FIFO, &param);
+
+ while (true == clamping && !kthread_should_stop() &&
+ cpu_online(cpunr)) {
+ int sleeptime;
+ unsigned long target_jiffies;
+ unsigned int guard;
+ unsigned int compensation = 0;
+ int interval; /* jiffies to sleep for each attempt */
+ unsigned int duration_jiffies = msecs_to_jiffies(duration);
+ unsigned int window_size_now;
+
+ try_to_freeze();
+ /*
+ * make sure user selected ratio does not take effect until
+ * the next round. adjust target_ratio if user has changed
+ * target such that we can converge quickly.
+ */
+ target_ratio = set_target_ratio;
+ guard = 1 + target_ratio/20;
+ window_size_now = window_size;
+ count++;
+
+ /*
+ * systems may have different ability to enter package level
+ * c-states, thus we need to compensate the injected idle ratio
+ * to achieve the actual target reported by the HW.
+ */
+ compensation = get_compensation(target_ratio);
+ interval = duration_jiffies*100/(target_ratio+compensation);
+
+ /* align idle time */
+ target_jiffies = roundup(jiffies, interval);
+ sleeptime = target_jiffies - jiffies;
+ if (sleeptime <= 0)
+ sleeptime = 1;
+ schedule_timeout_interruptible(sleeptime);
+ /*
+ * only elected controlling cpu can collect stats and update
+ * control parameters.
+ */
+ if (cpunr == control_cpu && !(count%window_size_now)) {
+ should_skip =
+ powerclamp_adjust_controls(target_ratio,
+ guard, window_size_now);
+ smp_mb();
+ }
+
+ if (should_skip)
+ continue;
+
+ target_jiffies = jiffies + duration_jiffies;
+ mod_timer(&wakeup_timer, target_jiffies);
+ if (unlikely(local_softirq_pending()))
+ continue;
+ /*
+ * stop tick sched during idle time, interrupts are still
+ * allowed. thus jiffies are updated properly.
+ */
+ preempt_disable();
+ tick_nohz_idle_enter();
+ /* mwait until target jiffies is reached */
+ while (time_before(jiffies, target_jiffies)) {
+ unsigned long ecx = 1;
+ unsigned long eax = target_mwait;
+
+ /*
+ * REVISIT: may call enter_idle() to notify drivers who
+ * can save power during cpu idle. same for exit_idle()
+ */
+ local_touch_nmi();
+ stop_critical_timings();
+ __monitor((void *)&current_thread_info()->flags, 0, 0);
+ cpu_relax(); /* allow HT sibling to run */
+ __mwait(eax, ecx);
+ start_critical_timings();
+ atomic_inc(&idle_wakeup_counter);
+ }
+ tick_nohz_idle_exit();
+ preempt_enable_no_resched();
+ }
+ del_timer_sync(&wakeup_timer);
+ clear_bit(cpunr, cpu_clamping_mask);
+
+ return 0;
+}
+
+/*
+ * 1 HZ polling while clamping is active, useful for userspace
+ * to monitor actual idle ratio.
+ */
+static void poll_pkg_cstate(struct work_struct *dummy);
+static DECLARE_DELAYED_WORK(poll_pkg_cstate_work, poll_pkg_cstate);
+static void poll_pkg_cstate(struct work_struct *dummy)
+{
+ static u64 msr_last;
+ static u64 tsc_last;
+ static unsigned long jiffies_last;
+
+ u64 msr_now;
+ unsigned long jiffies_now;
+ u64 tsc_now;
+ u64 val64;
+
+ msr_now = pkg_state_counter();
+ rdtscll(tsc_now);
+ jiffies_now = jiffies;
+
+ /* calculate pkg cstate vs tsc ratio */
+ if (!msr_last || !tsc_last)
+ pkg_cstate_ratio_cur = 1;
+ else {
+ if (tsc_now - tsc_last) {
+ val64 = 100 * (msr_now - msr_last);
+ do_div(val64, (tsc_now - tsc_last));
+ pkg_cstate_ratio_cur = val64;
+ }
+ }
+
+ /* update record */
+ msr_last = msr_now;
+ jiffies_last = jiffies_now;
+ tsc_last = tsc_now;
+
+ if (true == clamping)
+ schedule_delayed_work(&poll_pkg_cstate_work, HZ);
+}
+
+static int start_power_clamp(void)
+{
+ unsigned long cpu;
+ struct task_struct *thread;
+
+ /* check if pkg cstate counter is completely 0, abort in this case */
+ if (!pkg_state_counter()) {
+ pr_err("pkg cstate counter not functional, abort\n");
+ return -EINVAL;
+ }
+
+ set_target_ratio = clamp(set_target_ratio, 0U, MAX_TARGET_RATIO - 1);
+ /* prevent cpu hotplug */
+ get_online_cpus();
+
+ /* prefer BSP */
+ control_cpu = 0;
+ if (!cpu_online(control_cpu))
+ control_cpu = smp_processor_id();
+
+ clamping = true;
+ schedule_delayed_work(&poll_pkg_cstate_work, 0);
+
+ /* start one thread per online cpu */
+ for_each_online_cpu(cpu) {
+ struct task_struct **p =
+ per_cpu_ptr(powerclamp_thread, cpu);
+
+ thread = kthread_create_on_node(clamp_thread,
+ (void *) cpu,
+ cpu_to_node(cpu),
+ "kidle_inject/%ld", cpu);
+ /* bind to cpu here */
+ if (likely(!IS_ERR(thread))) {
+ kthread_bind(thread, cpu);
+ wake_up_process(thread);
+ *p = thread;
+ }
+
+ }
+ put_online_cpus();
+
+ return 0;
+}
+
+static void end_power_clamp(void)
+{
+ int i;
+ struct task_struct *thread;
+
+ clamping = false;
+ /*
+ * make clamping visible to other cpus and give per cpu clamping threads
+ * sometime to exit, or gets killed later.
+ */
+ smp_mb();
+ msleep(20);
+ if (bitmap_weight(cpu_clamping_mask, num_possible_cpus())) {
+ for_each_set_bit(i, cpu_clamping_mask, num_possible_cpus()) {
+ pr_debug("clamping thread for cpu %d alive, kill\n", i);
+ thread = *per_cpu_ptr(powerclamp_thread, i);
+ kthread_stop(thread);
+ }
+ }
+}
+
+static int powerclamp_cpu_callback(struct notifier_block *nfb,
+ unsigned long action, void *hcpu)
+{
+ unsigned long cpu = (unsigned long)hcpu;
+ struct task_struct *thread;
+ struct task_struct **percpu_thread =
+ per_cpu_ptr(powerclamp_thread, cpu);
+
+ if (false == clamping)
+ goto exit_ok;
+
+ switch (action) {
+ case CPU_ONLINE:
+ thread = kthread_create_on_node(clamp_thread,
+ (void *) cpu,
+ cpu_to_node(cpu),
+ "kidle_inject/%lu", cpu);
+ if (likely(!IS_ERR(thread))) {
+ kthread_bind(thread, cpu);
+ wake_up_process(thread);
+ *percpu_thread = thread;
+ }
+ /* prefer BSP as controlling CPU */
+ if (cpu == 0) {
+ control_cpu = 0;
+ smp_mb();
+ }
+ break;
+ case CPU_DEAD:
+ if (test_bit(cpu, cpu_clamping_mask)) {
+ pr_err("cpu %lu dead but powerclamping thread is not\n",
+ cpu);
+ kthread_stop(*percpu_thread);
+ }
+ if (cpu == control_cpu) {
+ control_cpu = smp_processor_id();
+ smp_mb();
+ }
+ }
+
+exit_ok:
+ return NOTIFY_OK;
+}
+
+static struct notifier_block powerclamp_cpu_notifier = {
+ .notifier_call = powerclamp_cpu_callback,
+};
+
+static int powerclamp_get_max_state(struct thermal_cooling_device *cdev,
+ unsigned long *state)
+{
+ *state = MAX_TARGET_RATIO;
+
+ return 0;
+}
+
+static int powerclamp_get_cur_state(struct thermal_cooling_device *cdev,
+ unsigned long *state)
+{
+ if (true == clamping)
+ *state = pkg_cstate_ratio_cur;
+ else
+ /* to save power, do not poll idle ratio while not clamping */
+ *state = -1; /* indicates invalid state */
+
+ return 0;
+}
+
+static int powerclamp_set_cur_state(struct thermal_cooling_device *cdev,
+ unsigned long new_target_ratio)
+{
+ int ret = 0;
+
+ new_target_ratio = clamp(new_target_ratio, 0UL,
+ (unsigned long) (MAX_TARGET_RATIO-1));
+ if (set_target_ratio == 0 && new_target_ratio > 0) {
+ pr_info("Start idle injection to reduce power\n");
+ set_target_ratio = new_target_ratio;
+ ret = start_power_clamp();
+ goto exit_set;
+ } else if (set_target_ratio > 0 && new_target_ratio == 0) {
+ pr_info("Stop forced idle injection\n");
+ set_target_ratio = 0;
+ end_power_clamp();
+ } else /* adjust currently running */ {
+ set_target_ratio = new_target_ratio;
+ /* make new set_target_ratio visible to other cpus */
+ smp_mb();
+ }
+
+exit_set:
+ return ret;
+}
+
+/* bind to generic thermal layer as cooling device*/
+static struct thermal_cooling_device_ops powerclamp_cooling_ops = {
+ .get_max_state = powerclamp_get_max_state,
+ .get_cur_state = powerclamp_get_cur_state,
+ .set_cur_state = powerclamp_set_cur_state,
+};
+
+/* runs on Nehalem and later */
+static const struct x86_cpu_id intel_powerclamp_ids[] = {
+ { X86_VENDOR_INTEL, 6, 0x1a},
+ { X86_VENDOR_INTEL, 6, 0x1c},
+ { X86_VENDOR_INTEL, 6, 0x1e},
+ { X86_VENDOR_INTEL, 6, 0x1f},
+ { X86_VENDOR_INTEL, 6, 0x25},
+ { X86_VENDOR_INTEL, 6, 0x26},
+ { X86_VENDOR_INTEL, 6, 0x2a},
+ { X86_VENDOR_INTEL, 6, 0x2c},
+ { X86_VENDOR_INTEL, 6, 0x2d},
+ { X86_VENDOR_INTEL, 6, 0x2e},
+ { X86_VENDOR_INTEL, 6, 0x2f},
+ { X86_VENDOR_INTEL, 6, 0x3a},
+ {}
+};
+MODULE_DEVICE_TABLE(x86cpu, intel_powerclamp_ids);
+
+static int powerclamp_probe(void)
+{
+ if (!x86_match_cpu(intel_powerclamp_ids)) {
+ pr_err("Intel powerclamp does not run on family %d model %d\n",
+ boot_cpu_data.x86, boot_cpu_data.x86_model);
+ return -ENODEV;
+ }
+ if (!boot_cpu_has(X86_FEATURE_NONSTOP_TSC) ||
+ !boot_cpu_has(X86_FEATURE_CONSTANT_TSC) ||
+ !boot_cpu_has(X86_FEATURE_MWAIT) ||
+ !boot_cpu_has(X86_FEATURE_ARAT))
+ return -ENODEV;
+
+ /* find the deepest mwait value */
+ find_target_mwait();
+
+ return 0;
+}
+
+static int powerclamp_debug_show(struct seq_file *m, void *unused)
+{
+ int i = 0;
+
+ seq_printf(m, "controlling cpu: %d\n", control_cpu);
+ seq_printf(m, "pct confidence steady dynamic (compensation)\n");
+ for (i = 0; i < MAX_TARGET_RATIO; i++) {
+ seq_printf(m, "%d\t%lu\t%lu\t%lu\n",
+ i,
+ cal_data[i].confidence,
+ cal_data[i].steady_comp,
+ cal_data[i].dynamic_comp);
+ }
+
+ return 0;
+}
+
+static int powerclamp_debug_open(struct inode *inode,
+ struct file *file)
+{
+ return single_open(file, powerclamp_debug_show, inode->i_private);
+}
+
+static const struct file_operations powerclamp_debug_fops = {
+ .open = powerclamp_debug_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+ .owner = THIS_MODULE,
+};
+
+static inline void powerclamp_create_debug_files(void)
+{
+ debug_dir = debugfs_create_dir("intel_powerclamp", NULL);
+ if (!debug_dir)
+ return;
+
+ if (!debugfs_create_file("powerclamp_calib", S_IRUGO, debug_dir,
+ cal_data, &powerclamp_debug_fops))
+ goto file_error;
+
+ return;
+
+file_error:
+ debugfs_remove_recursive(debug_dir);
+}
+
+static int powerclamp_init(void)
+{
+ int retval;
+ int bitmap_size;
+
+ bitmap_size = BITS_TO_LONGS(num_possible_cpus()) * sizeof(long);
+ cpu_clamping_mask = kzalloc(bitmap_size, GFP_KERNEL);
+ if (!cpu_clamping_mask)
+ return -ENOMEM;
+
+ /* probe cpu features and ids here */
+ retval = powerclamp_probe();
+ if (retval)
+ return retval;
+ /* set default limit, maybe adjusted during runtime based on feedback */
+ window_size = 2;
+ register_hotcpu_notifier(&powerclamp_cpu_notifier);
+ powerclamp_thread = alloc_percpu(struct task_struct *);
+ cooling_dev = thermal_cooling_device_register("intel_powerclamp", NULL,
+ &powerclamp_cooling_ops);
+ if (IS_ERR(cooling_dev))
+ return -ENODEV;
+
+ if (!duration)
+ duration = jiffies_to_msecs(DEFAULT_DURATION_JIFFIES);
+ powerclamp_create_debug_files();
+
+ return 0;
+}
+module_init(powerclamp_init);
+
+static void powerclamp_exit(void)
+{
+ unregister_hotcpu_notifier(&powerclamp_cpu_notifier);
+ end_power_clamp();
+ free_percpu(powerclamp_thread);
+ thermal_cooling_device_unregister(cooling_dev);
+ kfree(cpu_clamping_mask);
+
+ cancel_delayed_work_sync(&poll_pkg_cstate_work);
+ debugfs_remove_recursive(debug_dir);
+}
+module_exit(powerclamp_exit);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Arjan van de Ven <arjan@linux.intel.com>");
+MODULE_AUTHOR("Jacob Pan <jacob.jun.pan@linux.intel.com>");
+MODULE_DESCRIPTION("Package Level C-state Idle Injection for Intel CPUs");
diff --git a/drivers/thermal/kirkwood_thermal.c b/drivers/thermal/kirkwood_thermal.c
new file mode 100644
index 000000000000..65cb4f09e8f6
--- /dev/null
+++ b/drivers/thermal/kirkwood_thermal.c
@@ -0,0 +1,134 @@
+/*
+ * Kirkwood thermal sensor driver
+ *
+ * Copyright (C) 2012 Nobuhiro Iwamatsu <iwamatsu@nigauri.org>
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+#include <linux/device.h>
+#include <linux/err.h>
+#include <linux/io.h>
+#include <linux/kernel.h>
+#include <linux/of.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/thermal.h>
+
+#define KIRKWOOD_THERMAL_VALID_OFFSET 9
+#define KIRKWOOD_THERMAL_VALID_MASK 0x1
+#define KIRKWOOD_THERMAL_TEMP_OFFSET 10
+#define KIRKWOOD_THERMAL_TEMP_MASK 0x1FF
+
+/* Kirkwood Thermal Sensor Dev Structure */
+struct kirkwood_thermal_priv {
+ void __iomem *sensor;
+};
+
+static int kirkwood_get_temp(struct thermal_zone_device *thermal,
+ unsigned long *temp)
+{
+ unsigned long reg;
+ struct kirkwood_thermal_priv *priv = thermal->devdata;
+
+ reg = readl_relaxed(priv->sensor);
+
+ /* Valid check */
+ if (!(reg >> KIRKWOOD_THERMAL_VALID_OFFSET) &
+ KIRKWOOD_THERMAL_VALID_MASK) {
+ dev_err(&thermal->device,
+ "Temperature sensor reading not valid\n");
+ return -EIO;
+ }
+
+ /*
+ * Calculate temperature. See Section 8.10.1 of the 88AP510,
+ * datasheet, which has the same sensor.
+ * Documentation/arm/Marvell/README
+ */
+ reg = (reg >> KIRKWOOD_THERMAL_TEMP_OFFSET) &
+ KIRKWOOD_THERMAL_TEMP_MASK;
+ *temp = ((2281638UL - (7298*reg)) / 10);
+
+ return 0;
+}
+
+static struct thermal_zone_device_ops ops = {
+ .get_temp = kirkwood_get_temp,
+};
+
+static const struct of_device_id kirkwood_thermal_id_table[] = {
+ { .compatible = "marvell,kirkwood-thermal" },
+ {}
+};
+
+static int kirkwood_thermal_probe(struct platform_device *pdev)
+{
+ struct thermal_zone_device *thermal = NULL;
+ struct kirkwood_thermal_priv *priv;
+ struct resource *res;
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!res) {
+ dev_err(&pdev->dev, "Failed to get platform resource\n");
+ return -ENODEV;
+ }
+
+ priv = devm_kzalloc(&pdev->dev, sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ priv->sensor = devm_request_and_ioremap(&pdev->dev, res);
+ if (!priv->sensor) {
+ dev_err(&pdev->dev, "Failed to request_ioremap memory\n");
+ return -EADDRNOTAVAIL;
+ }
+
+ thermal = thermal_zone_device_register("kirkwood_thermal", 0, 0,
+ priv, &ops, NULL, 0, 0);
+ if (IS_ERR(thermal)) {
+ dev_err(&pdev->dev,
+ "Failed to register thermal zone device\n");
+ return PTR_ERR(thermal);
+ }
+
+ platform_set_drvdata(pdev, thermal);
+
+ return 0;
+}
+
+static int kirkwood_thermal_exit(struct platform_device *pdev)
+{
+ struct thermal_zone_device *kirkwood_thermal =
+ platform_get_drvdata(pdev);
+
+ thermal_zone_device_unregister(kirkwood_thermal);
+ platform_set_drvdata(pdev, NULL);
+
+ return 0;
+}
+
+MODULE_DEVICE_TABLE(of, kirkwood_thermal_id_table);
+
+static struct platform_driver kirkwood_thermal_driver = {
+ .probe = kirkwood_thermal_probe,
+ .remove = kirkwood_thermal_exit,
+ .driver = {
+ .name = "kirkwood_thermal",
+ .owner = THIS_MODULE,
+ .of_match_table = of_match_ptr(kirkwood_thermal_id_table),
+ },
+};
+
+module_platform_driver(kirkwood_thermal_driver);
+
+MODULE_AUTHOR("Nobuhiro Iwamatsu <iwamatsu@nigauri.org>");
+MODULE_DESCRIPTION("kirkwood thermal driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/thermal/rcar_thermal.c b/drivers/thermal/rcar_thermal.c
index 90db951725da..28f091994013 100644
--- a/drivers/thermal/rcar_thermal.c
+++ b/drivers/thermal/rcar_thermal.c
@@ -19,225 +19,473 @@
*/
#include <linux/delay.h>
#include <linux/err.h>
+#include <linux/irq.h>
+#include <linux/interrupt.h>
#include <linux/io.h>
#include <linux/module.h>
#include <linux/platform_device.h>
+#include <linux/reboot.h>
#include <linux/slab.h>
#include <linux/spinlock.h>
#include <linux/thermal.h>
-#define THSCR 0x2c
-#define THSSR 0x30
+#define IDLE_INTERVAL 5000
+
+#define COMMON_STR 0x00
+#define COMMON_ENR 0x04
+#define COMMON_INTMSK 0x0c
+
+#define REG_POSNEG 0x20
+#define REG_FILONOFF 0x28
+#define REG_THSCR 0x2c
+#define REG_THSSR 0x30
+#define REG_INTCTRL 0x34
/* THSCR */
-#define CPTAP 0xf
+#define CPCTL (1 << 12)
/* THSSR */
#define CTEMP 0x3f
-
-struct rcar_thermal_priv {
+struct rcar_thermal_common {
void __iomem *base;
struct device *dev;
+ struct list_head head;
spinlock_t lock;
- u32 comp;
};
+struct rcar_thermal_priv {
+ void __iomem *base;
+ struct rcar_thermal_common *common;
+ struct thermal_zone_device *zone;
+ struct delayed_work work;
+ struct mutex lock;
+ struct list_head list;
+ int id;
+ int ctemp;
+};
+
+#define rcar_thermal_for_each_priv(pos, common) \
+ list_for_each_entry(pos, &common->head, list)
+
#define MCELSIUS(temp) ((temp) * 1000)
-#define rcar_zone_to_priv(zone) (zone->devdata)
+#define rcar_zone_to_priv(zone) ((zone)->devdata)
+#define rcar_priv_to_dev(priv) ((priv)->common->dev)
+#define rcar_has_irq_support(priv) ((priv)->common->base)
+#define rcar_id_to_shift(priv) ((priv)->id * 8)
+
+#ifdef DEBUG
+# define rcar_force_update_temp(priv) 1
+#else
+# define rcar_force_update_temp(priv) 0
+#endif
/*
* basic functions
*/
-static u32 rcar_thermal_read(struct rcar_thermal_priv *priv, u32 reg)
+#define rcar_thermal_common_read(c, r) \
+ _rcar_thermal_common_read(c, COMMON_ ##r)
+static u32 _rcar_thermal_common_read(struct rcar_thermal_common *common,
+ u32 reg)
{
- unsigned long flags;
- u32 ret;
-
- spin_lock_irqsave(&priv->lock, flags);
+ return ioread32(common->base + reg);
+}
- ret = ioread32(priv->base + reg);
+#define rcar_thermal_common_write(c, r, d) \
+ _rcar_thermal_common_write(c, COMMON_ ##r, d)
+static void _rcar_thermal_common_write(struct rcar_thermal_common *common,
+ u32 reg, u32 data)
+{
+ iowrite32(data, common->base + reg);
+}
- spin_unlock_irqrestore(&priv->lock, flags);
+#define rcar_thermal_common_bset(c, r, m, d) \
+ _rcar_thermal_common_bset(c, COMMON_ ##r, m, d)
+static void _rcar_thermal_common_bset(struct rcar_thermal_common *common,
+ u32 reg, u32 mask, u32 data)
+{
+ u32 val;
- return ret;
+ val = ioread32(common->base + reg);
+ val &= ~mask;
+ val |= (data & mask);
+ iowrite32(val, common->base + reg);
}
-#if 0 /* no user at this point */
-static void rcar_thermal_write(struct rcar_thermal_priv *priv,
- u32 reg, u32 data)
+#define rcar_thermal_read(p, r) _rcar_thermal_read(p, REG_ ##r)
+static u32 _rcar_thermal_read(struct rcar_thermal_priv *priv, u32 reg)
{
- unsigned long flags;
-
- spin_lock_irqsave(&priv->lock, flags);
+ return ioread32(priv->base + reg);
+}
+#define rcar_thermal_write(p, r, d) _rcar_thermal_write(p, REG_ ##r, d)
+static void _rcar_thermal_write(struct rcar_thermal_priv *priv,
+ u32 reg, u32 data)
+{
iowrite32(data, priv->base + reg);
-
- spin_unlock_irqrestore(&priv->lock, flags);
}
-#endif
-static void rcar_thermal_bset(struct rcar_thermal_priv *priv, u32 reg,
- u32 mask, u32 data)
+#define rcar_thermal_bset(p, r, m, d) _rcar_thermal_bset(p, REG_ ##r, m, d)
+static void _rcar_thermal_bset(struct rcar_thermal_priv *priv, u32 reg,
+ u32 mask, u32 data)
{
- unsigned long flags;
u32 val;
- spin_lock_irqsave(&priv->lock, flags);
-
val = ioread32(priv->base + reg);
val &= ~mask;
val |= (data & mask);
iowrite32(val, priv->base + reg);
-
- spin_unlock_irqrestore(&priv->lock, flags);
}
/*
* zone device functions
*/
-static int rcar_thermal_get_temp(struct thermal_zone_device *zone,
- unsigned long *temp)
+static int rcar_thermal_update_temp(struct rcar_thermal_priv *priv)
{
- struct rcar_thermal_priv *priv = rcar_zone_to_priv(zone);
- int val, min, max, tmp;
-
- tmp = -200; /* default */
- while (1) {
- if (priv->comp < 1 || priv->comp > 12) {
- dev_err(priv->dev,
- "THSSR invalid data (%d)\n", priv->comp);
- priv->comp = 4; /* for next thermal */
- return -EINVAL;
- }
+ struct device *dev = rcar_priv_to_dev(priv);
+ int i;
+ int ctemp, old, new;
- /*
- * THS comparator offset and the reference temperature
- *
- * Comparator | reference | Temperature field
- * offset | temperature | measurement
- * | (degrees C) | (degrees C)
- * -------------+---------------+-------------------
- * 1 | -45 | -45 to -30
- * 2 | -30 | -30 to -15
- * 3 | -15 | -15 to 0
- * 4 | 0 | 0 to +15
- * 5 | +15 | +15 to +30
- * 6 | +30 | +30 to +45
- * 7 | +45 | +45 to +60
- * 8 | +60 | +60 to +75
- * 9 | +75 | +75 to +90
- * 10 | +90 | +90 to +105
- * 11 | +105 | +105 to +120
- * 12 | +120 | +120 to +135
- */
+ mutex_lock(&priv->lock);
- /* calculate thermal limitation */
- min = (priv->comp * 15) - 60;
- max = min + 15;
+ /*
+ * TSC decides a value of CPTAP automatically,
+ * and this is the conditions which validate interrupt.
+ */
+ rcar_thermal_bset(priv, THSCR, CPCTL, CPCTL);
+ ctemp = 0;
+ old = ~0;
+ for (i = 0; i < 128; i++) {
/*
* we need to wait 300us after changing comparator offset
* to get stable temperature.
* see "Usage Notes" on datasheet
*/
- rcar_thermal_bset(priv, THSCR, CPTAP, priv->comp);
udelay(300);
- /* calculate current temperature */
- val = rcar_thermal_read(priv, THSSR) & CTEMP;
- val = (val * 5) - 65;
+ new = rcar_thermal_read(priv, THSSR) & CTEMP;
+ if (new == old) {
+ ctemp = new;
+ break;
+ }
+ old = new;
+ }
- dev_dbg(priv->dev, "comp/min/max/val = %d/%d/%d/%d\n",
- priv->comp, min, max, val);
+ if (!ctemp) {
+ dev_err(dev, "thermal sensor was broken\n");
+ return -EINVAL;
+ }
- /*
- * If val is same as min/max, then,
- * it should try again on next comparator.
- * But the val might be correct temperature.
- * Keep it on "tmp" and compare with next val.
- */
- if (tmp == val)
- break;
+ /*
+ * enable IRQ
+ */
+ if (rcar_has_irq_support(priv)) {
+ rcar_thermal_write(priv, FILONOFF, 0);
- if (val <= min) {
- tmp = min;
- priv->comp--; /* try again */
- } else if (val >= max) {
- tmp = max;
- priv->comp++; /* try again */
- } else {
- tmp = val;
- break;
- }
+ /* enable Rising/Falling edge interrupt */
+ rcar_thermal_write(priv, POSNEG, 0x1);
+ rcar_thermal_write(priv, INTCTRL, (((ctemp - 0) << 8) |
+ ((ctemp - 1) << 0)));
+ }
+
+ dev_dbg(dev, "thermal%d %d -> %d\n", priv->id, priv->ctemp, ctemp);
+
+ priv->ctemp = ctemp;
+
+ mutex_unlock(&priv->lock);
+
+ return 0;
+}
+
+static int rcar_thermal_get_temp(struct thermal_zone_device *zone,
+ unsigned long *temp)
+{
+ struct rcar_thermal_priv *priv = rcar_zone_to_priv(zone);
+
+ if (!rcar_has_irq_support(priv) || rcar_force_update_temp(priv))
+ rcar_thermal_update_temp(priv);
+
+ mutex_lock(&priv->lock);
+ *temp = MCELSIUS((priv->ctemp * 5) - 65);
+ mutex_unlock(&priv->lock);
+
+ return 0;
+}
+
+static int rcar_thermal_get_trip_type(struct thermal_zone_device *zone,
+ int trip, enum thermal_trip_type *type)
+{
+ struct rcar_thermal_priv *priv = rcar_zone_to_priv(zone);
+ struct device *dev = rcar_priv_to_dev(priv);
+
+ /* see rcar_thermal_get_temp() */
+ switch (trip) {
+ case 0: /* +90 <= temp */
+ *type = THERMAL_TRIP_CRITICAL;
+ break;
+ default:
+ dev_err(dev, "rcar driver trip error\n");
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int rcar_thermal_get_trip_temp(struct thermal_zone_device *zone,
+ int trip, unsigned long *temp)
+{
+ struct rcar_thermal_priv *priv = rcar_zone_to_priv(zone);
+ struct device *dev = rcar_priv_to_dev(priv);
+
+ /* see rcar_thermal_get_temp() */
+ switch (trip) {
+ case 0: /* +90 <= temp */
+ *temp = MCELSIUS(90);
+ break;
+ default:
+ dev_err(dev, "rcar driver trip error\n");
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int rcar_thermal_notify(struct thermal_zone_device *zone,
+ int trip, enum thermal_trip_type type)
+{
+ struct rcar_thermal_priv *priv = rcar_zone_to_priv(zone);
+ struct device *dev = rcar_priv_to_dev(priv);
+
+ switch (type) {
+ case THERMAL_TRIP_CRITICAL:
+ /* FIXME */
+ dev_warn(dev, "Thermal reached to critical temperature\n");
+ break;
+ default:
+ break;
}
- *temp = MCELSIUS(tmp);
return 0;
}
static struct thermal_zone_device_ops rcar_thermal_zone_ops = {
- .get_temp = rcar_thermal_get_temp,
+ .get_temp = rcar_thermal_get_temp,
+ .get_trip_type = rcar_thermal_get_trip_type,
+ .get_trip_temp = rcar_thermal_get_trip_temp,
+ .notify = rcar_thermal_notify,
};
/*
- * platform functions
+ * interrupt
*/
-static int rcar_thermal_probe(struct platform_device *pdev)
+#define rcar_thermal_irq_enable(p) _rcar_thermal_irq_ctrl(p, 1)
+#define rcar_thermal_irq_disable(p) _rcar_thermal_irq_ctrl(p, 0)
+static void _rcar_thermal_irq_ctrl(struct rcar_thermal_priv *priv, int enable)
+{
+ struct rcar_thermal_common *common = priv->common;
+ unsigned long flags;
+ u32 mask = 0x3 << rcar_id_to_shift(priv); /* enable Rising/Falling */
+
+ spin_lock_irqsave(&common->lock, flags);
+
+ rcar_thermal_common_bset(common, INTMSK, mask, enable ? 0 : mask);
+
+ spin_unlock_irqrestore(&common->lock, flags);
+}
+
+static void rcar_thermal_work(struct work_struct *work)
{
- struct thermal_zone_device *zone;
struct rcar_thermal_priv *priv;
- struct resource *res;
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- if (!res) {
- dev_err(&pdev->dev, "Could not get platform resource\n");
- return -ENODEV;
+ priv = container_of(work, struct rcar_thermal_priv, work.work);
+
+ rcar_thermal_update_temp(priv);
+ rcar_thermal_irq_enable(priv);
+ thermal_zone_device_update(priv->zone);
+}
+
+static u32 rcar_thermal_had_changed(struct rcar_thermal_priv *priv, u32 status)
+{
+ struct device *dev = rcar_priv_to_dev(priv);
+
+ status = (status >> rcar_id_to_shift(priv)) & 0x3;
+
+ if (status & 0x3) {
+ dev_dbg(dev, "thermal%d %s%s\n",
+ priv->id,
+ (status & 0x2) ? "Rising " : "",
+ (status & 0x1) ? "Falling" : "");
}
- priv = devm_kzalloc(&pdev->dev, sizeof(*priv), GFP_KERNEL);
- if (!priv) {
- dev_err(&pdev->dev, "Could not allocate priv\n");
- return -ENOMEM;
+ return status;
+}
+
+static irqreturn_t rcar_thermal_irq(int irq, void *data)
+{
+ struct rcar_thermal_common *common = data;
+ struct rcar_thermal_priv *priv;
+ unsigned long flags;
+ u32 status, mask;
+
+ spin_lock_irqsave(&common->lock, flags);
+
+ mask = rcar_thermal_common_read(common, INTMSK);
+ status = rcar_thermal_common_read(common, STR);
+ rcar_thermal_common_write(common, STR, 0x000F0F0F & mask);
+
+ spin_unlock_irqrestore(&common->lock, flags);
+
+ status = status & ~mask;
+
+ /*
+ * check the status
+ */
+ rcar_thermal_for_each_priv(priv, common) {
+ if (rcar_thermal_had_changed(priv, status)) {
+ rcar_thermal_irq_disable(priv);
+ schedule_delayed_work(&priv->work,
+ msecs_to_jiffies(300));
+ }
}
- priv->comp = 4; /* basic setup */
- priv->dev = &pdev->dev;
- spin_lock_init(&priv->lock);
- priv->base = devm_ioremap_nocache(&pdev->dev,
- res->start, resource_size(res));
- if (!priv->base) {
- dev_err(&pdev->dev, "Unable to ioremap thermal register\n");
+ return IRQ_HANDLED;
+}
+
+/*
+ * platform functions
+ */
+static int rcar_thermal_probe(struct platform_device *pdev)
+{
+ struct rcar_thermal_common *common;
+ struct rcar_thermal_priv *priv;
+ struct device *dev = &pdev->dev;
+ struct resource *res, *irq;
+ int mres = 0;
+ int i;
+ int idle = IDLE_INTERVAL;
+
+ common = devm_kzalloc(dev, sizeof(*common), GFP_KERNEL);
+ if (!common) {
+ dev_err(dev, "Could not allocate common\n");
return -ENOMEM;
}
- zone = thermal_zone_device_register("rcar_thermal", 0, 0, priv,
- &rcar_thermal_zone_ops, NULL, 0, 0);
- if (IS_ERR(zone)) {
- dev_err(&pdev->dev, "thermal zone device is NULL\n");
- return PTR_ERR(zone);
+ INIT_LIST_HEAD(&common->head);
+ spin_lock_init(&common->lock);
+ common->dev = dev;
+
+ irq = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
+ if (irq) {
+ int ret;
+
+ /*
+ * platform has IRQ support.
+ * Then, drier use common register
+ */
+ res = platform_get_resource(pdev, IORESOURCE_MEM, mres++);
+ if (!res) {
+ dev_err(dev, "Could not get platform resource\n");
+ return -ENODEV;
+ }
+
+ ret = devm_request_irq(dev, irq->start, rcar_thermal_irq, 0,
+ dev_name(dev), common);
+ if (ret) {
+ dev_err(dev, "irq request failed\n ");
+ return ret;
+ }
+
+ /*
+ * rcar_has_irq_support() will be enabled
+ */
+ common->base = devm_request_and_ioremap(dev, res);
+ if (!common->base) {
+ dev_err(dev, "Unable to ioremap thermal register\n");
+ return -ENOMEM;
+ }
+
+ /* enable temperature comparation */
+ rcar_thermal_common_write(common, ENR, 0x00030303);
+
+ idle = 0; /* polling delaye is not needed */
}
- platform_set_drvdata(pdev, zone);
+ for (i = 0;; i++) {
+ res = platform_get_resource(pdev, IORESOURCE_MEM, mres++);
+ if (!res)
+ break;
+
+ priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
+ if (!priv) {
+ dev_err(dev, "Could not allocate priv\n");
+ return -ENOMEM;
+ }
+
+ priv->base = devm_request_and_ioremap(dev, res);
+ if (!priv->base) {
+ dev_err(dev, "Unable to ioremap priv register\n");
+ return -ENOMEM;
+ }
- dev_info(&pdev->dev, "proved\n");
+ priv->common = common;
+ priv->id = i;
+ mutex_init(&priv->lock);
+ INIT_LIST_HEAD(&priv->list);
+ INIT_DELAYED_WORK(&priv->work, rcar_thermal_work);
+ rcar_thermal_update_temp(priv);
+
+ priv->zone = thermal_zone_device_register("rcar_thermal",
+ 1, 0, priv,
+ &rcar_thermal_zone_ops, NULL, 0,
+ idle);
+ if (IS_ERR(priv->zone)) {
+ dev_err(dev, "can't register thermal zone\n");
+ goto error_unregister;
+ }
+
+ list_move_tail(&priv->list, &common->head);
+
+ if (rcar_has_irq_support(priv))
+ rcar_thermal_irq_enable(priv);
+ }
+
+ platform_set_drvdata(pdev, common);
+
+ dev_info(dev, "%d sensor proved\n", i);
return 0;
+
+error_unregister:
+ rcar_thermal_for_each_priv(priv, common)
+ thermal_zone_device_unregister(priv->zone);
+
+ return -ENODEV;
}
static int rcar_thermal_remove(struct platform_device *pdev)
{
- struct thermal_zone_device *zone = platform_get_drvdata(pdev);
+ struct rcar_thermal_common *common = platform_get_drvdata(pdev);
+ struct rcar_thermal_priv *priv;
+
+ rcar_thermal_for_each_priv(priv, common)
+ thermal_zone_device_unregister(priv->zone);
- thermal_zone_device_unregister(zone);
platform_set_drvdata(pdev, NULL);
return 0;
}
+static const struct of_device_id rcar_thermal_dt_ids[] = {
+ { .compatible = "renesas,rcar-thermal", },
+ {},
+};
+MODULE_DEVICE_TABLE(of, rcar_thermal_dt_ids);
+
static struct platform_driver rcar_thermal_driver = {
.driver = {
.name = "rcar_thermal",
+ .of_match_table = rcar_thermal_dt_ids,
},
.probe = rcar_thermal_probe,
.remove = rcar_thermal_remove,
diff --git a/drivers/thermal/spear_thermal.c b/drivers/thermal/spear_thermal.c
index 6b2d8b21aaee..3c5ee5607977 100644
--- a/drivers/thermal/spear_thermal.c
+++ b/drivers/thermal/spear_thermal.c
@@ -131,7 +131,7 @@ static int spear_thermal_probe(struct platform_device *pdev)
return -ENOMEM;
}
- stdev->clk = clk_get(&pdev->dev, NULL);
+ stdev->clk = devm_clk_get(&pdev->dev, NULL);
if (IS_ERR(stdev->clk)) {
dev_err(&pdev->dev, "Can't get clock\n");
return PTR_ERR(stdev->clk);
@@ -140,7 +140,7 @@ static int spear_thermal_probe(struct platform_device *pdev)
ret = clk_enable(stdev->clk);
if (ret) {
dev_err(&pdev->dev, "Can't enable clock\n");
- goto put_clk;
+ return ret;
}
stdev->flags = val;
@@ -163,8 +163,6 @@ static int spear_thermal_probe(struct platform_device *pdev)
disable_clk:
clk_disable(stdev->clk);
-put_clk:
- clk_put(stdev->clk);
return ret;
}
@@ -183,7 +181,6 @@ static int spear_thermal_exit(struct platform_device *pdev)
writel_relaxed(actual_mask & ~stdev->flags, stdev->thermal_base);
clk_disable(stdev->clk);
- clk_put(stdev->clk);
return 0;
}
diff --git a/drivers/thermal/step_wise.c b/drivers/thermal/step_wise.c
index 0cd5e9fbab1c..407cde3211c1 100644
--- a/drivers/thermal/step_wise.c
+++ b/drivers/thermal/step_wise.c
@@ -35,21 +35,54 @@
* state for this trip point
* b. if the trend is THERMAL_TREND_DROPPING, use lower cooling
* state for this trip point
+ * c. if the trend is THERMAL_TREND_RAISE_FULL, use upper limit
+ * for this trip point
+ * d. if the trend is THERMAL_TREND_DROP_FULL, use lower limit
+ * for this trip point
+ * If the temperature is lower than a trip point,
+ * a. if the trend is THERMAL_TREND_RAISING, do nothing
+ * b. if the trend is THERMAL_TREND_DROPPING, use lower cooling
+ * state for this trip point, if the cooling state already
+ * equals lower limit, deactivate the thermal instance
+ * c. if the trend is THERMAL_TREND_RAISE_FULL, do nothing
+ * d. if the trend is THERMAL_TREND_DROP_FULL, use lower limit,
+ * if the cooling state already equals lower limit,
+ * deactive the thermal instance
*/
static unsigned long get_target_state(struct thermal_instance *instance,
- enum thermal_trend trend)
+ enum thermal_trend trend, bool throttle)
{
struct thermal_cooling_device *cdev = instance->cdev;
unsigned long cur_state;
cdev->ops->get_cur_state(cdev, &cur_state);
- if (trend == THERMAL_TREND_RAISING) {
- cur_state = cur_state < instance->upper ?
- (cur_state + 1) : instance->upper;
- } else if (trend == THERMAL_TREND_DROPPING) {
- cur_state = cur_state > instance->lower ?
- (cur_state - 1) : instance->lower;
+ switch (trend) {
+ case THERMAL_TREND_RAISING:
+ if (throttle)
+ cur_state = cur_state < instance->upper ?
+ (cur_state + 1) : instance->upper;
+ break;
+ case THERMAL_TREND_RAISE_FULL:
+ if (throttle)
+ cur_state = instance->upper;
+ break;
+ case THERMAL_TREND_DROPPING:
+ if (cur_state == instance->lower) {
+ if (!throttle)
+ cur_state = -1;
+ } else
+ cur_state -= 1;
+ break;
+ case THERMAL_TREND_DROP_FULL:
+ if (cur_state == instance->lower) {
+ if (!throttle)
+ cur_state = -1;
+ } else
+ cur_state = instance->lower;
+ break;
+ default:
+ break;
}
return cur_state;
@@ -66,57 +99,14 @@ static void update_passive_instance(struct thermal_zone_device *tz,
tz->passive += value;
}
-static void update_instance_for_throttle(struct thermal_zone_device *tz,
- int trip, enum thermal_trip_type trip_type,
- enum thermal_trend trend)
-{
- struct thermal_instance *instance;
-
- list_for_each_entry(instance, &tz->thermal_instances, tz_node) {
- if (instance->trip != trip)
- continue;
-
- instance->target = get_target_state(instance, trend);
-
- /* Activate a passive thermal instance */
- if (instance->target == THERMAL_NO_TARGET)
- update_passive_instance(tz, trip_type, 1);
-
- instance->cdev->updated = false; /* cdev needs update */
- }
-}
-
-static void update_instance_for_dethrottle(struct thermal_zone_device *tz,
- int trip, enum thermal_trip_type trip_type)
-{
- struct thermal_instance *instance;
- struct thermal_cooling_device *cdev;
- unsigned long cur_state;
-
- list_for_each_entry(instance, &tz->thermal_instances, tz_node) {
- if (instance->trip != trip ||
- instance->target == THERMAL_NO_TARGET)
- continue;
-
- cdev = instance->cdev;
- cdev->ops->get_cur_state(cdev, &cur_state);
-
- instance->target = cur_state > instance->lower ?
- (cur_state - 1) : THERMAL_NO_TARGET;
-
- /* Deactivate a passive thermal instance */
- if (instance->target == THERMAL_NO_TARGET)
- update_passive_instance(tz, trip_type, -1);
-
- cdev->updated = false; /* cdev needs update */
- }
-}
-
static void thermal_zone_trip_update(struct thermal_zone_device *tz, int trip)
{
long trip_temp;
enum thermal_trip_type trip_type;
enum thermal_trend trend;
+ struct thermal_instance *instance;
+ bool throttle = false;
+ int old_target;
if (trip == THERMAL_TRIPS_NONE) {
trip_temp = tz->forced_passive;
@@ -128,12 +118,30 @@ static void thermal_zone_trip_update(struct thermal_zone_device *tz, int trip)
trend = get_tz_trend(tz, trip);
+ if (tz->temperature >= trip_temp)
+ throttle = true;
+
mutex_lock(&tz->lock);
- if (tz->temperature >= trip_temp)
- update_instance_for_throttle(tz, trip, trip_type, trend);
- else
- update_instance_for_dethrottle(tz, trip, trip_type);
+ list_for_each_entry(instance, &tz->thermal_instances, tz_node) {
+ if (instance->trip != trip)
+ continue;
+
+ old_target = instance->target;
+ instance->target = get_target_state(instance, trend, throttle);
+
+ /* Activate a passive thermal instance */
+ if (old_target == THERMAL_NO_TARGET &&
+ instance->target != THERMAL_NO_TARGET)
+ update_passive_instance(tz, trip_type, 1);
+ /* Deactivate a passive thermal instance */
+ else if (old_target != THERMAL_NO_TARGET &&
+ instance->target == THERMAL_NO_TARGET)
+ update_passive_instance(tz, trip_type, -1);
+
+
+ instance->cdev->updated = false; /* cdev needs update */
+ }
mutex_unlock(&tz->lock);
}
diff --git a/drivers/thermal/thermal_sys.c b/drivers/thermal/thermal_sys.c
index 8c8ce806180f..5b7863a03f98 100644
--- a/drivers/thermal/thermal_sys.c
+++ b/drivers/thermal/thermal_sys.c
@@ -32,7 +32,6 @@
#include <linux/kdev_t.h>
#include <linux/idr.h>
#include <linux/thermal.h>
-#include <linux/spinlock.h>
#include <linux/reboot.h>
#include <net/netlink.h>
#include <net/genetlink.h>
@@ -132,23 +131,16 @@ EXPORT_SYMBOL_GPL(thermal_unregister_governor);
static int get_idr(struct idr *idr, struct mutex *lock, int *id)
{
- int err;
-
-again:
- if (unlikely(idr_pre_get(idr, GFP_KERNEL) == 0))
- return -ENOMEM;
+ int ret;
if (lock)
mutex_lock(lock);
- err = idr_get_new(idr, NULL, id);
+ ret = idr_alloc(idr, NULL, 0, 0, GFP_KERNEL);
if (lock)
mutex_unlock(lock);
- if (unlikely(err == -EAGAIN))
- goto again;
- else if (unlikely(err))
- return err;
-
- *id = *id & MAX_IDR_MASK;
+ if (unlikely(ret < 0))
+ return ret;
+ *id = ret;
return 0;
}
@@ -355,8 +347,9 @@ static void handle_critical_trips(struct thermal_zone_device *tz,
tz->ops->notify(tz, trip, trip_type);
if (trip_type == THERMAL_TRIP_CRITICAL) {
- pr_emerg("Critical temperature reached(%d C),shutting down\n",
- tz->temperature / 1000);
+ dev_emerg(&tz->device,
+ "critical temperature reached(%d C),shutting down\n",
+ tz->temperature / 1000);
orderly_poweroff(true);
}
}
@@ -378,23 +371,57 @@ static void handle_thermal_trip(struct thermal_zone_device *tz, int trip)
monitor_thermal_zone(tz);
}
+static int thermal_zone_get_temp(struct thermal_zone_device *tz,
+ unsigned long *temp)
+{
+ int ret = 0;
+#ifdef CONFIG_THERMAL_EMULATION
+ int count;
+ unsigned long crit_temp = -1UL;
+ enum thermal_trip_type type;
+#endif
+
+ mutex_lock(&tz->lock);
+
+ ret = tz->ops->get_temp(tz, temp);
+#ifdef CONFIG_THERMAL_EMULATION
+ if (!tz->emul_temperature)
+ goto skip_emul;
+
+ for (count = 0; count < tz->trips; count++) {
+ ret = tz->ops->get_trip_type(tz, count, &type);
+ if (!ret && type == THERMAL_TRIP_CRITICAL) {
+ ret = tz->ops->get_trip_temp(tz, count, &crit_temp);
+ break;
+ }
+ }
+
+ if (ret)
+ goto skip_emul;
+
+ if (*temp < crit_temp)
+ *temp = tz->emul_temperature;
+skip_emul:
+#endif
+ mutex_unlock(&tz->lock);
+ return ret;
+}
+
static void update_temperature(struct thermal_zone_device *tz)
{
long temp;
int ret;
- mutex_lock(&tz->lock);
-
- ret = tz->ops->get_temp(tz, &temp);
+ ret = thermal_zone_get_temp(tz, &temp);
if (ret) {
- pr_warn("failed to read out thermal zone %d\n", tz->id);
- goto exit;
+ dev_warn(&tz->device, "failed to read out thermal zone %d\n",
+ tz->id);
+ return;
}
+ mutex_lock(&tz->lock);
tz->last_temperature = tz->temperature;
tz->temperature = temp;
-
-exit:
mutex_unlock(&tz->lock);
}
@@ -437,10 +464,7 @@ temp_show(struct device *dev, struct device_attribute *attr, char *buf)
long temperature;
int ret;
- if (!tz->ops->get_temp)
- return -EPERM;
-
- ret = tz->ops->get_temp(tz, &temperature);
+ ret = thermal_zone_get_temp(tz, &temperature);
if (ret)
return ret;
@@ -700,6 +724,31 @@ policy_show(struct device *dev, struct device_attribute *devattr, char *buf)
return sprintf(buf, "%s\n", tz->governor->name);
}
+#ifdef CONFIG_THERMAL_EMULATION
+static ssize_t
+emul_temp_store(struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct thermal_zone_device *tz = to_thermal_zone(dev);
+ int ret = 0;
+ unsigned long temperature;
+
+ if (kstrtoul(buf, 10, &temperature))
+ return -EINVAL;
+
+ if (!tz->ops->set_emul_temp) {
+ mutex_lock(&tz->lock);
+ tz->emul_temperature = temperature;
+ mutex_unlock(&tz->lock);
+ } else {
+ ret = tz->ops->set_emul_temp(tz, temperature);
+ }
+
+ return ret ? ret : count;
+}
+static DEVICE_ATTR(emul_temp, S_IWUSR, NULL, emul_temp_store);
+#endif/*CONFIG_THERMAL_EMULATION*/
+
static DEVICE_ATTR(type, 0444, type_show, NULL);
static DEVICE_ATTR(temp, 0444, temp_show, NULL);
static DEVICE_ATTR(mode, 0644, mode_show, mode_store);
@@ -842,7 +891,7 @@ temp_input_show(struct device *dev, struct device_attribute *attr, char *buf)
temp_input);
struct thermal_zone_device *tz = temp->tz;
- ret = tz->ops->get_temp(tz, &temperature);
+ ret = thermal_zone_get_temp(tz, &temperature);
if (ret)
return ret;
@@ -1529,6 +1578,9 @@ struct thermal_zone_device *thermal_zone_device_register(const char *type,
if (!ops || !ops->get_temp)
return ERR_PTR(-EINVAL);
+ if (trips > 0 && !ops->get_trip_type)
+ return ERR_PTR(-EINVAL);
+
tz = kzalloc(sizeof(struct thermal_zone_device), GFP_KERNEL);
if (!tz)
return ERR_PTR(-ENOMEM);
@@ -1592,6 +1644,11 @@ struct thermal_zone_device *thermal_zone_device_register(const char *type,
goto unregister;
}
+#ifdef CONFIG_THERMAL_EMULATION
+ result = device_create_file(&tz->device, &dev_attr_emul_temp);
+ if (result)
+ goto unregister;
+#endif
/* Create policy attribute */
result = device_create_file(&tz->device, &dev_attr_policy);
if (result)
@@ -1711,7 +1768,8 @@ static struct genl_multicast_group thermal_event_mcgrp = {
.name = THERMAL_GENL_MCAST_GROUP_NAME,
};
-int thermal_generate_netlink_event(u32 orig, enum events event)
+int thermal_generate_netlink_event(struct thermal_zone_device *tz,
+ enum events event)
{
struct sk_buff *skb;
struct nlattr *attr;
@@ -1721,6 +1779,9 @@ int thermal_generate_netlink_event(u32 orig, enum events event)
int result;
static unsigned int thermal_event_seqnum;
+ if (!tz)
+ return -EINVAL;
+
/* allocate memory */
size = nla_total_size(sizeof(struct thermal_genl_event)) +
nla_total_size(0);
@@ -1755,7 +1816,7 @@ int thermal_generate_netlink_event(u32 orig, enum events event)
memset(thermal_event, 0, sizeof(struct thermal_genl_event));
- thermal_event->orig = orig;
+ thermal_event->orig = tz->id;
thermal_event->event = event;
/* send multicast genetlink message */
@@ -1767,7 +1828,7 @@ int thermal_generate_netlink_event(u32 orig, enum events event)
result = genlmsg_multicast(skb, 0, thermal_event_mcgrp.id, GFP_ATOMIC);
if (result)
- pr_info("failed to send netlink event:%d\n", result);
+ dev_err(&tz->device, "Failed to send netlink event:%d", result);
return result;
}
@@ -1807,6 +1868,7 @@ static int __init thermal_init(void)
idr_destroy(&thermal_cdev_idr);
mutex_destroy(&thermal_idr_lock);
mutex_destroy(&thermal_list_lock);
+ return result;
}
result = genetlink_init();
return result;
diff --git a/drivers/tty/Kconfig b/drivers/tty/Kconfig
index 0ecf22b6a38e..978db344bda0 100644
--- a/drivers/tty/Kconfig
+++ b/drivers/tty/Kconfig
@@ -1,3 +1,14 @@
+config TTY
+ bool "Enable TTY" if EXPERT
+ default y
+ ---help---
+ Allows you to remove TTY support which can save space, and
+ blocks features that require TTY from inclusion in the kernel.
+ TTY is required for any text terminals or serial port
+ communication. Most users should leave this enabled.
+
+if TTY
+
config VT
bool "Virtual terminal" if EXPERT
depends on !S390 && !UML
@@ -388,3 +399,24 @@ config PPC_EARLY_DEBUG_EHV_BC_HANDLE
If the number you specify is not a valid byte channel handle, then
there simply will be no early console output. This is true also
if you don't boot under a hypervisor at all.
+
+config GOLDFISH_TTY
+ tristate "Goldfish TTY Driver"
+ depends on GOLDFISH
+ help
+ Console and system TTY driver for the Goldfish virtual platform.
+
+config DA_TTY
+ bool "DA TTY"
+ depends on METAG_DA
+ select SERIAL_NONSTANDARD
+ help
+ This enables a TTY on a Dash channel.
+
+config DA_CONSOLE
+ bool "DA Console"
+ depends on DA_TTY
+ help
+ This enables a console on a Dash channel.
+
+endif # TTY
diff --git a/drivers/tty/Makefile b/drivers/tty/Makefile
index 2953059530e4..6b78399bc7c9 100644
--- a/drivers/tty/Makefile
+++ b/drivers/tty/Makefile
@@ -1,4 +1,4 @@
-obj-y += tty_io.o n_tty.o tty_ioctl.o tty_ldisc.o \
+obj-$(CONFIG_TTY) += tty_io.o n_tty.o tty_ioctl.o tty_ldisc.o \
tty_buffer.o tty_port.o tty_mutex.o
obj-$(CONFIG_LEGACY_PTYS) += pty.o
obj-$(CONFIG_UNIX98_PTYS) += pty.o
@@ -27,5 +27,7 @@ obj-$(CONFIG_SYNCLINK_GT) += synclink_gt.o
obj-$(CONFIG_SYNCLINKMP) += synclinkmp.o
obj-$(CONFIG_SYNCLINK) += synclink.o
obj-$(CONFIG_PPC_EPAPR_HV_BYTECHAN) += ehv_bytechan.o
+obj-$(CONFIG_GOLDFISH_TTY) += goldfish.o
+obj-$(CONFIG_DA_TTY) += metag_da.o
obj-y += ipwireless/
diff --git a/drivers/tty/amiserial.c b/drivers/tty/amiserial.c
index 9d7d00cdfecb..fc700342d43f 100644
--- a/drivers/tty/amiserial.c
+++ b/drivers/tty/amiserial.c
@@ -251,7 +251,6 @@ static void receive_chars(struct serial_state *info)
{
int status;
int serdatr;
- struct tty_struct *tty = info->tport.tty;
unsigned char ch, flag;
struct async_icount *icount;
int oe = 0;
@@ -314,7 +313,7 @@ static void receive_chars(struct serial_state *info)
#endif
flag = TTY_BREAK;
if (info->tport.flags & ASYNC_SAK)
- do_SAK(tty);
+ do_SAK(info->tport.tty);
} else if (status & UART_LSR_PE)
flag = TTY_PARITY;
else if (status & UART_LSR_FE)
@@ -328,10 +327,10 @@ static void receive_chars(struct serial_state *info)
oe = 1;
}
}
- tty_insert_flip_char(tty, ch, flag);
+ tty_insert_flip_char(&info->tport, ch, flag);
if (oe == 1)
- tty_insert_flip_char(tty, 0, TTY_OVERRUN);
- tty_flip_buffer_push(tty);
+ tty_insert_flip_char(&info->tport, 0, TTY_OVERRUN);
+ tty_flip_buffer_push(&info->tport);
out:
return;
}
@@ -394,11 +393,6 @@ static void check_modem_status(struct serial_state *info)
icount->dsr++;
if (dstatus & SER_DCD) {
icount->dcd++;
-#ifdef CONFIG_HARD_PPS
- if ((port->flags & ASYNC_HARDPPS_CD) &&
- !(status & SER_DCD))
- hardpps();
-#endif
}
if (dstatus & SER_CTS)
icount->cts++;
@@ -1099,7 +1093,7 @@ static int set_serial_info(struct tty_struct *tty, struct serial_state *state,
state->custom_divisor = new_serial.custom_divisor;
port->close_delay = new_serial.close_delay * HZ/100;
port->closing_wait = new_serial.closing_wait * HZ/100;
- tty->low_latency = (port->flags & ASYNC_LOW_LATENCY) ? 1 : 0;
+ port->low_latency = (port->flags & ASYNC_LOW_LATENCY) ? 1 : 0;
check_and_exit:
if (port->flags & ASYNC_INITIALIZED) {
@@ -1528,7 +1522,7 @@ static int rs_open(struct tty_struct *tty, struct file * filp)
if (serial_paranoia_check(info, tty->name, "rs_open"))
return -ENODEV;
- tty->low_latency = (port->flags & ASYNC_LOW_LATENCY) ? 1 : 0;
+ port->low_latency = (port->flags & ASYNC_LOW_LATENCY) ? 1 : 0;
retval = startup(tty, info);
if (retval) {
diff --git a/drivers/tty/bfin_jtag_comm.c b/drivers/tty/bfin_jtag_comm.c
index 1cfcdbf1d0cc..a93a424873fa 100644
--- a/drivers/tty/bfin_jtag_comm.c
+++ b/drivers/tty/bfin_jtag_comm.c
@@ -95,18 +95,16 @@ bfin_jc_emudat_manager(void *arg)
/* if incoming data is ready, eat it */
if (bfin_read_DBGSTAT() & EMUDIF) {
- if (tty != NULL) {
- uint32_t emudat = bfin_read_emudat();
- if (inbound_len == 0) {
- pr_debug("incoming length: 0x%08x\n", emudat);
- inbound_len = emudat;
- } else {
- size_t num_chars = (4 <= inbound_len ? 4 : inbound_len);
- pr_debug(" incoming data: 0x%08x (pushing %zu)\n", emudat, num_chars);
- inbound_len -= num_chars;
- tty_insert_flip_string(tty, (unsigned char *)&emudat, num_chars);
- tty_flip_buffer_push(tty);
- }
+ uint32_t emudat = bfin_read_emudat();
+ if (inbound_len == 0) {
+ pr_debug("incoming length: 0x%08x\n", emudat);
+ inbound_len = emudat;
+ } else {
+ size_t num_chars = (4 <= inbound_len ? 4 : inbound_len);
+ pr_debug(" incoming data: 0x%08x (pushing %zu)\n", emudat, num_chars);
+ inbound_len -= num_chars;
+ tty_insert_flip_string(&port, (unsigned char *)&emudat, num_chars);
+ tty_flip_buffer_push(&port);
}
}
diff --git a/drivers/tty/cyclades.c b/drivers/tty/cyclades.c
index b09c8d1f9a66..345bd0e0884e 100644
--- a/drivers/tty/cyclades.c
+++ b/drivers/tty/cyclades.c
@@ -441,7 +441,7 @@ static void cyy_chip_rx(struct cyclades_card *cinfo, int chip,
void __iomem *base_addr)
{
struct cyclades_port *info;
- struct tty_struct *tty;
+ struct tty_port *port;
int len, index = cinfo->bus_index;
u8 ivr, save_xir, channel, save_car, data, char_count;
@@ -452,22 +452,11 @@ static void cyy_chip_rx(struct cyclades_card *cinfo, int chip,
save_xir = readb(base_addr + (CyRIR << index));
channel = save_xir & CyIRChannel;
info = &cinfo->ports[channel + chip * 4];
+ port = &info->port;
save_car = cyy_readb(info, CyCAR);
cyy_writeb(info, CyCAR, save_xir);
ivr = cyy_readb(info, CyRIVR) & CyIVRMask;
- tty = tty_port_tty_get(&info->port);
- /* if there is nowhere to put the data, discard it */
- if (tty == NULL) {
- if (ivr == CyIVRRxEx) { /* exception */
- data = cyy_readb(info, CyRDSR);
- } else { /* normal character reception */
- char_count = cyy_readb(info, CyRDCR);
- while (char_count--)
- data = cyy_readb(info, CyRDSR);
- }
- goto end;
- }
/* there is an open port for this data */
if (ivr == CyIVRRxEx) { /* exception */
data = cyy_readb(info, CyRDSR);
@@ -484,40 +473,45 @@ static void cyy_chip_rx(struct cyclades_card *cinfo, int chip,
if (data & info->ignore_status_mask) {
info->icount.rx++;
- tty_kref_put(tty);
return;
}
- if (tty_buffer_request_room(tty, 1)) {
+ if (tty_buffer_request_room(port, 1)) {
if (data & info->read_status_mask) {
if (data & CyBREAK) {
- tty_insert_flip_char(tty,
+ tty_insert_flip_char(port,
cyy_readb(info, CyRDSR),
TTY_BREAK);
info->icount.rx++;
- if (info->port.flags & ASYNC_SAK)
- do_SAK(tty);
+ if (port->flags & ASYNC_SAK) {
+ struct tty_struct *tty =
+ tty_port_tty_get(port);
+ if (tty) {
+ do_SAK(tty);
+ tty_kref_put(tty);
+ }
+ }
} else if (data & CyFRAME) {
- tty_insert_flip_char(tty,
+ tty_insert_flip_char(port,
cyy_readb(info, CyRDSR),
TTY_FRAME);
info->icount.rx++;
info->idle_stats.frame_errs++;
} else if (data & CyPARITY) {
/* Pieces of seven... */
- tty_insert_flip_char(tty,
+ tty_insert_flip_char(port,
cyy_readb(info, CyRDSR),
TTY_PARITY);
info->icount.rx++;
info->idle_stats.parity_errs++;
} else if (data & CyOVERRUN) {
- tty_insert_flip_char(tty, 0,
+ tty_insert_flip_char(port, 0,
TTY_OVERRUN);
info->icount.rx++;
/* If the flip buffer itself is
overflowing, we still lose
the next incoming character.
*/
- tty_insert_flip_char(tty,
+ tty_insert_flip_char(port,
cyy_readb(info, CyRDSR),
TTY_FRAME);
info->icount.rx++;
@@ -527,12 +521,12 @@ static void cyy_chip_rx(struct cyclades_card *cinfo, int chip,
/* } else if(data & CyTIMEOUT) { */
/* } else if(data & CySPECHAR) { */
} else {
- tty_insert_flip_char(tty, 0,
+ tty_insert_flip_char(port, 0,
TTY_NORMAL);
info->icount.rx++;
}
} else {
- tty_insert_flip_char(tty, 0, TTY_NORMAL);
+ tty_insert_flip_char(port, 0, TTY_NORMAL);
info->icount.rx++;
}
} else {
@@ -552,10 +546,10 @@ static void cyy_chip_rx(struct cyclades_card *cinfo, int chip,
info->mon.char_max = char_count;
info->mon.char_last = char_count;
#endif
- len = tty_buffer_request_room(tty, char_count);
+ len = tty_buffer_request_room(port, char_count);
while (len--) {
data = cyy_readb(info, CyRDSR);
- tty_insert_flip_char(tty, data, TTY_NORMAL);
+ tty_insert_flip_char(port, data, TTY_NORMAL);
info->idle_stats.recv_bytes++;
info->icount.rx++;
#ifdef CY_16Y_HACK
@@ -564,9 +558,8 @@ static void cyy_chip_rx(struct cyclades_card *cinfo, int chip,
}
info->idle_stats.recv_idle = jiffies;
}
- tty_schedule_flip(tty);
- tty_kref_put(tty);
-end:
+ tty_schedule_flip(port);
+
/* end of service */
cyy_writeb(info, CyRIR, save_xir & 0x3f);
cyy_writeb(info, CyCAR, save_car);
@@ -924,10 +917,11 @@ cyz_issue_cmd(struct cyclades_card *cinfo,
return 0;
} /* cyz_issue_cmd */
-static void cyz_handle_rx(struct cyclades_port *info, struct tty_struct *tty)
+static void cyz_handle_rx(struct cyclades_port *info)
{
struct BUF_CTRL __iomem *buf_ctrl = info->u.cyz.buf_ctrl;
struct cyclades_card *cinfo = info->card;
+ struct tty_port *port = &info->port;
unsigned int char_count;
int len;
#ifdef BLOCKMOVE
@@ -946,80 +940,77 @@ static void cyz_handle_rx(struct cyclades_port *info, struct tty_struct *tty)
else
char_count = rx_put - rx_get + rx_bufsize;
- if (char_count) {
+ if (!char_count)
+ return;
+
#ifdef CY_ENABLE_MONITORING
- info->mon.int_count++;
- info->mon.char_count += char_count;
- if (char_count > info->mon.char_max)
- info->mon.char_max = char_count;
- info->mon.char_last = char_count;
+ info->mon.int_count++;
+ info->mon.char_count += char_count;
+ if (char_count > info->mon.char_max)
+ info->mon.char_max = char_count;
+ info->mon.char_last = char_count;
#endif
- if (tty == NULL) {
- /* flush received characters */
- new_rx_get = (new_rx_get + char_count) &
- (rx_bufsize - 1);
- info->rflush_count++;
- } else {
+
#ifdef BLOCKMOVE
- /* we'd like to use memcpy(t, f, n) and memset(s, c, count)
- for performance, but because of buffer boundaries, there
- may be several steps to the operation */
- while (1) {
- len = tty_prepare_flip_string(tty, &buf,
- char_count);
- if (!len)
- break;
+ /* we'd like to use memcpy(t, f, n) and memset(s, c, count)
+ for performance, but because of buffer boundaries, there
+ may be several steps to the operation */
+ while (1) {
+ len = tty_prepare_flip_string(port, &buf,
+ char_count);
+ if (!len)
+ break;
- len = min_t(unsigned int, min(len, char_count),
- rx_bufsize - new_rx_get);
+ len = min_t(unsigned int, min(len, char_count),
+ rx_bufsize - new_rx_get);
- memcpy_fromio(buf, cinfo->base_addr +
- rx_bufaddr + new_rx_get, len);
+ memcpy_fromio(buf, cinfo->base_addr +
+ rx_bufaddr + new_rx_get, len);
- new_rx_get = (new_rx_get + len) &
- (rx_bufsize - 1);
- char_count -= len;
- info->icount.rx += len;
- info->idle_stats.recv_bytes += len;
- }
+ new_rx_get = (new_rx_get + len) &
+ (rx_bufsize - 1);
+ char_count -= len;
+ info->icount.rx += len;
+ info->idle_stats.recv_bytes += len;
+ }
#else
- len = tty_buffer_request_room(tty, char_count);
- while (len--) {
- data = readb(cinfo->base_addr + rx_bufaddr +
- new_rx_get);
- new_rx_get = (new_rx_get + 1) &
- (rx_bufsize - 1);
- tty_insert_flip_char(tty, data, TTY_NORMAL);
- info->idle_stats.recv_bytes++;
- info->icount.rx++;
- }
+ len = tty_buffer_request_room(port, char_count);
+ while (len--) {
+ data = readb(cinfo->base_addr + rx_bufaddr +
+ new_rx_get);
+ new_rx_get = (new_rx_get + 1) &
+ (rx_bufsize - 1);
+ tty_insert_flip_char(port, data, TTY_NORMAL);
+ info->idle_stats.recv_bytes++;
+ info->icount.rx++;
+ }
#endif
#ifdef CONFIG_CYZ_INTR
- /* Recalculate the number of chars in the RX buffer and issue
- a cmd in case it's higher than the RX high water mark */
- rx_put = readl(&buf_ctrl->rx_put);
- if (rx_put >= rx_get)
- char_count = rx_put - rx_get;
- else
- char_count = rx_put - rx_get + rx_bufsize;
- if (char_count >= readl(&buf_ctrl->rx_threshold) &&
- !timer_pending(&cyz_rx_full_timer[
- info->line]))
- mod_timer(&cyz_rx_full_timer[info->line],
- jiffies + 1);
+ /* Recalculate the number of chars in the RX buffer and issue
+ a cmd in case it's higher than the RX high water mark */
+ rx_put = readl(&buf_ctrl->rx_put);
+ if (rx_put >= rx_get)
+ char_count = rx_put - rx_get;
+ else
+ char_count = rx_put - rx_get + rx_bufsize;
+ if (char_count >= readl(&buf_ctrl->rx_threshold) &&
+ !timer_pending(&cyz_rx_full_timer[
+ info->line]))
+ mod_timer(&cyz_rx_full_timer[info->line],
+ jiffies + 1);
#endif
- info->idle_stats.recv_idle = jiffies;
- tty_schedule_flip(tty);
- }
- /* Update rx_get */
- cy_writel(&buf_ctrl->rx_get, new_rx_get);
- }
+ info->idle_stats.recv_idle = jiffies;
+ tty_schedule_flip(&info->port);
+
+ /* Update rx_get */
+ cy_writel(&buf_ctrl->rx_get, new_rx_get);
}
-static void cyz_handle_tx(struct cyclades_port *info, struct tty_struct *tty)
+static void cyz_handle_tx(struct cyclades_port *info)
{
struct BUF_CTRL __iomem *buf_ctrl = info->u.cyz.buf_ctrl;
struct cyclades_card *cinfo = info->card;
+ struct tty_struct *tty;
u8 data;
unsigned int char_count;
#ifdef BLOCKMOVE
@@ -1039,63 +1030,63 @@ static void cyz_handle_tx(struct cyclades_port *info, struct tty_struct *tty)
else
char_count = tx_get - tx_put - 1;
- if (char_count) {
-
- if (tty == NULL)
- goto ztxdone;
+ if (!char_count)
+ return;
+
+ tty = tty_port_tty_get(&info->port);
+ if (tty == NULL)
+ goto ztxdone;
- if (info->x_char) { /* send special char */
- data = info->x_char;
+ if (info->x_char) { /* send special char */
+ data = info->x_char;
- cy_writeb(cinfo->base_addr + tx_bufaddr + tx_put, data);
- tx_put = (tx_put + 1) & (tx_bufsize - 1);
- info->x_char = 0;
- char_count--;
- info->icount.tx++;
- }
+ cy_writeb(cinfo->base_addr + tx_bufaddr + tx_put, data);
+ tx_put = (tx_put + 1) & (tx_bufsize - 1);
+ info->x_char = 0;
+ char_count--;
+ info->icount.tx++;
+ }
#ifdef BLOCKMOVE
- while (0 < (small_count = min_t(unsigned int,
- tx_bufsize - tx_put, min_t(unsigned int,
- (SERIAL_XMIT_SIZE - info->xmit_tail),
- min_t(unsigned int, info->xmit_cnt,
- char_count))))) {
-
- memcpy_toio((char *)(cinfo->base_addr + tx_bufaddr +
- tx_put),
- &info->port.xmit_buf[info->xmit_tail],
- small_count);
-
- tx_put = (tx_put + small_count) & (tx_bufsize - 1);
- char_count -= small_count;
- info->icount.tx += small_count;
- info->xmit_cnt -= small_count;
- info->xmit_tail = (info->xmit_tail + small_count) &
- (SERIAL_XMIT_SIZE - 1);
- }
+ while (0 < (small_count = min_t(unsigned int,
+ tx_bufsize - tx_put, min_t(unsigned int,
+ (SERIAL_XMIT_SIZE - info->xmit_tail),
+ min_t(unsigned int, info->xmit_cnt,
+ char_count))))) {
+
+ memcpy_toio((char *)(cinfo->base_addr + tx_bufaddr + tx_put),
+ &info->port.xmit_buf[info->xmit_tail],
+ small_count);
+
+ tx_put = (tx_put + small_count) & (tx_bufsize - 1);
+ char_count -= small_count;
+ info->icount.tx += small_count;
+ info->xmit_cnt -= small_count;
+ info->xmit_tail = (info->xmit_tail + small_count) &
+ (SERIAL_XMIT_SIZE - 1);
+ }
#else
- while (info->xmit_cnt && char_count) {
- data = info->port.xmit_buf[info->xmit_tail];
- info->xmit_cnt--;
- info->xmit_tail = (info->xmit_tail + 1) &
- (SERIAL_XMIT_SIZE - 1);
-
- cy_writeb(cinfo->base_addr + tx_bufaddr + tx_put, data);
- tx_put = (tx_put + 1) & (tx_bufsize - 1);
- char_count--;
- info->icount.tx++;
- }
+ while (info->xmit_cnt && char_count) {
+ data = info->port.xmit_buf[info->xmit_tail];
+ info->xmit_cnt--;
+ info->xmit_tail = (info->xmit_tail + 1) &
+ (SERIAL_XMIT_SIZE - 1);
+
+ cy_writeb(cinfo->base_addr + tx_bufaddr + tx_put, data);
+ tx_put = (tx_put + 1) & (tx_bufsize - 1);
+ char_count--;
+ info->icount.tx++;
+ }
#endif
- tty_wakeup(tty);
+ tty_wakeup(tty);
+ tty_kref_put(tty);
ztxdone:
- /* Update tx_put */
- cy_writel(&buf_ctrl->tx_put, tx_put);
- }
+ /* Update tx_put */
+ cy_writel(&buf_ctrl->tx_put, tx_put);
}
static void cyz_handle_cmd(struct cyclades_card *cinfo)
{
struct BOARD_CTRL __iomem *board_ctrl = cinfo->board_ctrl;
- struct tty_struct *tty;
struct cyclades_port *info;
__u32 channel, param, fw_ver;
__u8 cmd;
@@ -1108,23 +1099,20 @@ static void cyz_handle_cmd(struct cyclades_card *cinfo)
special_count = 0;
delta_count = 0;
info = &cinfo->ports[channel];
- tty = tty_port_tty_get(&info->port);
- if (tty == NULL)
- continue;
switch (cmd) {
case C_CM_PR_ERROR:
- tty_insert_flip_char(tty, 0, TTY_PARITY);
+ tty_insert_flip_char(&info->port, 0, TTY_PARITY);
info->icount.rx++;
special_count++;
break;
case C_CM_FR_ERROR:
- tty_insert_flip_char(tty, 0, TTY_FRAME);
+ tty_insert_flip_char(&info->port, 0, TTY_FRAME);
info->icount.rx++;
special_count++;
break;
case C_CM_RXBRK:
- tty_insert_flip_char(tty, 0, TTY_BREAK);
+ tty_insert_flip_char(&info->port, 0, TTY_BREAK);
info->icount.rx++;
special_count++;
break;
@@ -1136,8 +1124,14 @@ static void cyz_handle_cmd(struct cyclades_card *cinfo)
readl(&info->u.cyz.ch_ctrl->rs_status);
if (dcd & C_RS_DCD)
wake_up_interruptible(&info->port.open_wait);
- else
- tty_hangup(tty);
+ else {
+ struct tty_struct *tty;
+ tty = tty_port_tty_get(&info->port);
+ if (tty) {
+ tty_hangup(tty);
+ tty_kref_put(tty);
+ }
+ }
}
break;
case C_CM_MCTS:
@@ -1166,7 +1160,7 @@ static void cyz_handle_cmd(struct cyclades_card *cinfo)
printk(KERN_DEBUG "cyz_interrupt: rcvd intr, card %d, "
"port %ld\n", info->card, channel);
#endif
- cyz_handle_rx(info, tty);
+ cyz_handle_rx(info);
break;
case C_CM_TXBEMPTY:
case C_CM_TXLOWWM:
@@ -1176,7 +1170,7 @@ static void cyz_handle_cmd(struct cyclades_card *cinfo)
printk(KERN_DEBUG "cyz_interrupt: xmit intr, card %d, "
"port %ld\n", info->card, channel);
#endif
- cyz_handle_tx(info, tty);
+ cyz_handle_tx(info);
break;
#endif /* CONFIG_CYZ_INTR */
case C_CM_FATAL:
@@ -1188,8 +1182,7 @@ static void cyz_handle_cmd(struct cyclades_card *cinfo)
if (delta_count)
wake_up_interruptible(&info->port.delta_msr_wait);
if (special_count)
- tty_schedule_flip(tty);
- tty_kref_put(tty);
+ tty_schedule_flip(&info->port);
}
}
@@ -1255,17 +1248,11 @@ static void cyz_poll(unsigned long arg)
cyz_handle_cmd(cinfo);
for (port = 0; port < cinfo->nports; port++) {
- struct tty_struct *tty;
-
info = &cinfo->ports[port];
- tty = tty_port_tty_get(&info->port);
- /* OK to pass NULL to the handle functions below.
- They need to drop the data in that case. */
if (!info->throttle)
- cyz_handle_rx(info, tty);
- cyz_handle_tx(info, tty);
- tty_kref_put(tty);
+ cyz_handle_rx(info);
+ cyz_handle_tx(info);
}
/* poll every 'cyz_polling_cycle' period */
expires = jiffies + cyz_polling_cycle;
diff --git a/drivers/tty/ehv_bytechan.c b/drivers/tty/ehv_bytechan.c
index c117d775a22f..ed92622b8949 100644
--- a/drivers/tty/ehv_bytechan.c
+++ b/drivers/tty/ehv_bytechan.c
@@ -371,22 +371,17 @@ console_initcall(ehv_bc_console_init);
static irqreturn_t ehv_bc_tty_rx_isr(int irq, void *data)
{
struct ehv_bc_data *bc = data;
- struct tty_struct *ttys = tty_port_tty_get(&bc->port);
unsigned int rx_count, tx_count, len;
int count;
char buffer[EV_BYTE_CHANNEL_MAX_BYTES];
int ret;
- /* ttys could be NULL during a hangup */
- if (!ttys)
- return IRQ_HANDLED;
-
/* Find out how much data needs to be read, and then ask the TTY layer
* if it can handle that much. We want to ensure that every byte we
* read from the byte channel will be accepted by the TTY layer.
*/
ev_byte_channel_poll(bc->handle, &rx_count, &tx_count);
- count = tty_buffer_request_room(ttys, rx_count);
+ count = tty_buffer_request_room(&bc->port, rx_count);
/* 'count' is the maximum amount of data the TTY layer can accept at
* this time. However, during testing, I was never able to get 'count'
@@ -407,7 +402,7 @@ static irqreturn_t ehv_bc_tty_rx_isr(int irq, void *data)
*/
/* Pass the received data to the tty layer. */
- ret = tty_insert_flip_string(ttys, buffer, len);
+ ret = tty_insert_flip_string(&bc->port, buffer, len);
/* 'ret' is the number of bytes that the TTY layer accepted.
* If it's not equal to 'len', then it means the buffer is
@@ -422,9 +417,7 @@ static irqreturn_t ehv_bc_tty_rx_isr(int irq, void *data)
}
/* Tell the tty layer that we're done. */
- tty_flip_buffer_push(ttys);
-
- tty_kref_put(ttys);
+ tty_flip_buffer_push(&bc->port);
return IRQ_HANDLED;
}
diff --git a/drivers/tty/goldfish.c b/drivers/tty/goldfish.c
new file mode 100644
index 000000000000..f17d2e4ee2ca
--- /dev/null
+++ b/drivers/tty/goldfish.c
@@ -0,0 +1,328 @@
+/*
+ * Copyright (C) 2007 Google, Inc.
+ * Copyright (C) 2012 Intel, Inc.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/console.h>
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/platform_device.h>
+#include <linux/tty.h>
+#include <linux/tty_flip.h>
+#include <linux/slab.h>
+#include <linux/io.h>
+#include <linux/module.h>
+
+enum {
+ GOLDFISH_TTY_PUT_CHAR = 0x00,
+ GOLDFISH_TTY_BYTES_READY = 0x04,
+ GOLDFISH_TTY_CMD = 0x08,
+
+ GOLDFISH_TTY_DATA_PTR = 0x10,
+ GOLDFISH_TTY_DATA_LEN = 0x14,
+
+ GOLDFISH_TTY_CMD_INT_DISABLE = 0,
+ GOLDFISH_TTY_CMD_INT_ENABLE = 1,
+ GOLDFISH_TTY_CMD_WRITE_BUFFER = 2,
+ GOLDFISH_TTY_CMD_READ_BUFFER = 3,
+};
+
+struct goldfish_tty {
+ struct tty_port port;
+ spinlock_t lock;
+ void __iomem *base;
+ u32 irq;
+ int opencount;
+ struct console console;
+};
+
+static DEFINE_MUTEX(goldfish_tty_lock);
+static struct tty_driver *goldfish_tty_driver;
+static u32 goldfish_tty_line_count = 8;
+static u32 goldfish_tty_current_line_count;
+static struct goldfish_tty *goldfish_ttys;
+
+static void goldfish_tty_do_write(int line, const char *buf, unsigned count)
+{
+ unsigned long irq_flags;
+ struct goldfish_tty *qtty = &goldfish_ttys[line];
+ void __iomem *base = qtty->base;
+ spin_lock_irqsave(&qtty->lock, irq_flags);
+ writel((u32)buf, base + GOLDFISH_TTY_DATA_PTR);
+ writel(count, base + GOLDFISH_TTY_DATA_LEN);
+ writel(GOLDFISH_TTY_CMD_WRITE_BUFFER, base + GOLDFISH_TTY_CMD);
+ spin_unlock_irqrestore(&qtty->lock, irq_flags);
+}
+
+static irqreturn_t goldfish_tty_interrupt(int irq, void *dev_id)
+{
+ struct platform_device *pdev = dev_id;
+ struct goldfish_tty *qtty = &goldfish_ttys[pdev->id];
+ void __iomem *base = qtty->base;
+ unsigned long irq_flags;
+ unsigned char *buf;
+ u32 count;
+
+ count = readl(base + GOLDFISH_TTY_BYTES_READY);
+ if(count == 0)
+ return IRQ_NONE;
+
+ count = tty_prepare_flip_string(&qtty->port, &buf, count);
+ spin_lock_irqsave(&qtty->lock, irq_flags);
+ writel((u32)buf, base + GOLDFISH_TTY_DATA_PTR);
+ writel(count, base + GOLDFISH_TTY_DATA_LEN);
+ writel(GOLDFISH_TTY_CMD_READ_BUFFER, base + GOLDFISH_TTY_CMD);
+ spin_unlock_irqrestore(&qtty->lock, irq_flags);
+ tty_schedule_flip(&qtty->port);
+ return IRQ_HANDLED;
+}
+
+static int goldfish_tty_activate(struct tty_port *port, struct tty_struct *tty)
+{
+ struct goldfish_tty *qtty = container_of(port, struct goldfish_tty, port);
+ writel(GOLDFISH_TTY_CMD_INT_ENABLE, qtty->base + GOLDFISH_TTY_CMD);
+ return 0;
+}
+
+static void goldfish_tty_shutdown(struct tty_port *port)
+{
+ struct goldfish_tty *qtty = container_of(port, struct goldfish_tty, port);
+ writel(GOLDFISH_TTY_CMD_INT_DISABLE, qtty->base + GOLDFISH_TTY_CMD);
+}
+
+static int goldfish_tty_open(struct tty_struct * tty, struct file * filp)
+{
+ struct goldfish_tty *qtty = &goldfish_ttys[tty->index];
+ return tty_port_open(&qtty->port, tty, filp);
+}
+
+static void goldfish_tty_close(struct tty_struct * tty, struct file * filp)
+{
+ tty_port_close(tty->port, tty, filp);
+}
+
+static void goldfish_tty_hangup(struct tty_struct *tty)
+{
+ tty_port_hangup(tty->port);
+}
+
+static int goldfish_tty_write(struct tty_struct * tty, const unsigned char *buf, int count)
+{
+ goldfish_tty_do_write(tty->index, buf, count);
+ return count;
+}
+
+static int goldfish_tty_write_room(struct tty_struct *tty)
+{
+ return 0x10000;
+}
+
+static int goldfish_tty_chars_in_buffer(struct tty_struct *tty)
+{
+ struct goldfish_tty *qtty = &goldfish_ttys[tty->index];
+ void __iomem *base = qtty->base;
+ return readl(base + GOLDFISH_TTY_BYTES_READY);
+}
+
+static void goldfish_tty_console_write(struct console *co, const char *b, unsigned count)
+{
+ goldfish_tty_do_write(co->index, b, count);
+}
+
+static struct tty_driver *goldfish_tty_console_device(struct console *c, int *index)
+{
+ *index = c->index;
+ return goldfish_tty_driver;
+}
+
+static int goldfish_tty_console_setup(struct console *co, char *options)
+{
+ if((unsigned)co->index > goldfish_tty_line_count)
+ return -ENODEV;
+ if(goldfish_ttys[co->index].base == 0)
+ return -ENODEV;
+ return 0;
+}
+
+static struct tty_port_operations goldfish_port_ops = {
+ .activate = goldfish_tty_activate,
+ .shutdown = goldfish_tty_shutdown
+};
+
+static struct tty_operations goldfish_tty_ops = {
+ .open = goldfish_tty_open,
+ .close = goldfish_tty_close,
+ .hangup = goldfish_tty_hangup,
+ .write = goldfish_tty_write,
+ .write_room = goldfish_tty_write_room,
+ .chars_in_buffer = goldfish_tty_chars_in_buffer,
+};
+
+static int goldfish_tty_create_driver(void)
+{
+ int ret;
+ struct tty_driver *tty;
+
+ goldfish_ttys = kzalloc(sizeof(*goldfish_ttys) * goldfish_tty_line_count, GFP_KERNEL);
+ if(goldfish_ttys == NULL) {
+ ret = -ENOMEM;
+ goto err_alloc_goldfish_ttys_failed;
+ }
+ tty = alloc_tty_driver(goldfish_tty_line_count);
+ if(tty == NULL) {
+ ret = -ENOMEM;
+ goto err_alloc_tty_driver_failed;
+ }
+ tty->driver_name = "goldfish";
+ tty->name = "ttyGF";
+ tty->type = TTY_DRIVER_TYPE_SERIAL;
+ tty->subtype = SERIAL_TYPE_NORMAL;
+ tty->init_termios = tty_std_termios;
+ tty->flags = TTY_DRIVER_RESET_TERMIOS | TTY_DRIVER_REAL_RAW | TTY_DRIVER_DYNAMIC_DEV;
+ tty_set_operations(tty, &goldfish_tty_ops);
+ ret = tty_register_driver(tty);
+ if(ret)
+ goto err_tty_register_driver_failed;
+
+ goldfish_tty_driver = tty;
+ return 0;
+
+err_tty_register_driver_failed:
+ put_tty_driver(tty);
+err_alloc_tty_driver_failed:
+ kfree(goldfish_ttys);
+ goldfish_ttys = NULL;
+err_alloc_goldfish_ttys_failed:
+ return ret;
+}
+
+static void goldfish_tty_delete_driver(void)
+{
+ tty_unregister_driver(goldfish_tty_driver);
+ put_tty_driver(goldfish_tty_driver);
+ goldfish_tty_driver = NULL;
+ kfree(goldfish_ttys);
+ goldfish_ttys = NULL;
+}
+
+static int goldfish_tty_probe(struct platform_device *pdev)
+{
+ struct goldfish_tty *qtty;
+ int ret = -EINVAL;
+ int i;
+ struct resource *r;
+ struct device *ttydev;
+ void __iomem *base;
+ u32 irq;
+
+ r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if(r == NULL)
+ return -EINVAL;
+
+ base = ioremap(r->start, 0x1000);
+ if (base == NULL)
+ pr_err("goldfish_tty: unable to remap base\n");
+
+ r = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
+ if(r == NULL)
+ goto err_unmap;
+
+ irq = r->start;
+
+ if(pdev->id >= goldfish_tty_line_count)
+ goto err_unmap;
+
+ mutex_lock(&goldfish_tty_lock);
+ if(goldfish_tty_current_line_count == 0) {
+ ret = goldfish_tty_create_driver();
+ if(ret)
+ goto err_create_driver_failed;
+ }
+ goldfish_tty_current_line_count++;
+
+ qtty = &goldfish_ttys[pdev->id];
+ spin_lock_init(&qtty->lock);
+ tty_port_init(&qtty->port);
+ qtty->port.ops = &goldfish_port_ops;
+ qtty->base = base;
+ qtty->irq = irq;
+
+ writel(GOLDFISH_TTY_CMD_INT_DISABLE, base + GOLDFISH_TTY_CMD);
+
+ ret = request_irq(irq, goldfish_tty_interrupt, IRQF_SHARED, "goldfish_tty", pdev);
+ if(ret)
+ goto err_request_irq_failed;
+
+
+ ttydev = tty_port_register_device(&qtty->port, goldfish_tty_driver,
+ pdev->id, &pdev->dev);
+ if(IS_ERR(ttydev)) {
+ ret = PTR_ERR(ttydev);
+ goto err_tty_register_device_failed;
+ }
+
+ strcpy(qtty->console.name, "ttyGF");
+ qtty->console.write = goldfish_tty_console_write;
+ qtty->console.device = goldfish_tty_console_device;
+ qtty->console.setup = goldfish_tty_console_setup;
+ qtty->console.flags = CON_PRINTBUFFER;
+ qtty->console.index = pdev->id;
+ register_console(&qtty->console);
+
+ mutex_unlock(&goldfish_tty_lock);
+ return 0;
+
+ tty_unregister_device(goldfish_tty_driver, i);
+err_tty_register_device_failed:
+ free_irq(irq, pdev);
+err_request_irq_failed:
+ goldfish_tty_current_line_count--;
+ if(goldfish_tty_current_line_count == 0)
+ goldfish_tty_delete_driver();
+err_create_driver_failed:
+ mutex_unlock(&goldfish_tty_lock);
+err_unmap:
+ iounmap(base);
+ return ret;
+}
+
+static int goldfish_tty_remove(struct platform_device *pdev)
+{
+ struct goldfish_tty *qtty;
+
+ mutex_lock(&goldfish_tty_lock);
+
+ qtty = &goldfish_ttys[pdev->id];
+ unregister_console(&qtty->console);
+ tty_unregister_device(goldfish_tty_driver, pdev->id);
+ iounmap(qtty->base);
+ qtty->base = 0;
+ free_irq(qtty->irq, pdev);
+ goldfish_tty_current_line_count--;
+ if(goldfish_tty_current_line_count == 0)
+ goldfish_tty_delete_driver();
+ mutex_unlock(&goldfish_tty_lock);
+ return 0;
+}
+
+static struct platform_driver goldfish_tty_platform_driver = {
+ .probe = goldfish_tty_probe,
+ .remove = goldfish_tty_remove,
+ .driver = {
+ .name = "goldfish_tty"
+ }
+};
+
+module_platform_driver(goldfish_tty_platform_driver);
+
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/tty/hvc/Kconfig b/drivers/tty/hvc/Kconfig
index f47b734c6a7a..8902f9b4df71 100644
--- a/drivers/tty/hvc/Kconfig
+++ b/drivers/tty/hvc/Kconfig
@@ -1,3 +1,5 @@
+if TTY
+
config HVC_DRIVER
bool
help
@@ -119,3 +121,4 @@ config HVCS
which will also be compiled when this driver is built as a
module.
+endif # TTY
diff --git a/drivers/tty/hvc/hvc_console.c b/drivers/tty/hvc/hvc_console.c
index 13ee53bd0bf6..eb255e807c06 100644
--- a/drivers/tty/hvc/hvc_console.c
+++ b/drivers/tty/hvc/hvc_console.c
@@ -629,7 +629,7 @@ int hvc_poll(struct hvc_struct *hp)
/* Read data if any */
for (;;) {
- int count = tty_buffer_request_room(tty, N_INBUF);
+ int count = tty_buffer_request_room(&hp->port, N_INBUF);
/* If flip is full, just reschedule a later read */
if (count == 0) {
@@ -672,7 +672,7 @@ int hvc_poll(struct hvc_struct *hp)
}
}
#endif /* CONFIG_MAGIC_SYSRQ */
- tty_insert_flip_char(tty, buf[i], 0);
+ tty_insert_flip_char(&hp->port, buf[i], 0);
}
read_total += n;
@@ -691,7 +691,7 @@ int hvc_poll(struct hvc_struct *hp)
a minimum for performance. */
timeout = MIN_TIMEOUT;
- tty_flip_buffer_push(tty);
+ tty_flip_buffer_push(&hp->port);
}
tty_kref_put(tty);
diff --git a/drivers/tty/hvc/hvc_xen.c b/drivers/tty/hvc/hvc_xen.c
index 19843ec3f80a..682210d778bd 100644
--- a/drivers/tty/hvc/hvc_xen.c
+++ b/drivers/tty/hvc/hvc_xen.c
@@ -230,7 +230,7 @@ static int xen_hvm_console_init(void)
if (r < 0 || v == 0)
goto err;
mfn = v;
- info->intf = ioremap(mfn << PAGE_SHIFT, PAGE_SIZE);
+ info->intf = xen_remap(mfn << PAGE_SHIFT, PAGE_SIZE);
if (info->intf == NULL)
goto err;
info->vtermno = HVC_COOKIE;
diff --git a/drivers/tty/hvc/hvcs.c b/drivers/tty/hvc/hvcs.c
index 877635733952..1956593ee89d 100644
--- a/drivers/tty/hvc/hvcs.c
+++ b/drivers/tty/hvc/hvcs.c
@@ -609,11 +609,11 @@ static int hvcs_io(struct hvcs_struct *hvcsd)
/* remove the read masks */
hvcsd->todo_mask &= ~(HVCS_READ_MASK);
- if (tty_buffer_request_room(tty, HVCS_BUFF_LEN) >= HVCS_BUFF_LEN) {
+ if (tty_buffer_request_room(&hvcsd->port, HVCS_BUFF_LEN) >= HVCS_BUFF_LEN) {
got = hvc_get_chars(unit_address,
&buf[0],
HVCS_BUFF_LEN);
- tty_insert_flip_string(tty, buf, got);
+ tty_insert_flip_string(&hvcsd->port, buf, got);
}
/* Give the TTY time to process the data we just sent. */
@@ -623,7 +623,7 @@ static int hvcs_io(struct hvcs_struct *hvcsd)
spin_unlock_irqrestore(&hvcsd->lock, flags);
/* This is synch because tty->low_latency == 1 */
if(got)
- tty_flip_buffer_push(tty);
+ tty_flip_buffer_push(&hvcsd->port);
if (!got) {
/* Do this _after_ the flip_buffer_push */
diff --git a/drivers/tty/hvc/hvsi.c b/drivers/tty/hvc/hvsi.c
index 68357a6e4de9..ef95a154854a 100644
--- a/drivers/tty/hvc/hvsi.c
+++ b/drivers/tty/hvc/hvsi.c
@@ -329,8 +329,7 @@ static void hvsi_recv_query(struct hvsi_struct *hp, uint8_t *packet)
}
}
-static void hvsi_insert_chars(struct hvsi_struct *hp, struct tty_struct *tty,
- const char *buf, int len)
+static void hvsi_insert_chars(struct hvsi_struct *hp, const char *buf, int len)
{
int i;
@@ -346,7 +345,7 @@ static void hvsi_insert_chars(struct hvsi_struct *hp, struct tty_struct *tty,
continue;
}
#endif /* CONFIG_MAGIC_SYSRQ */
- tty_insert_flip_char(tty, c, 0);
+ tty_insert_flip_char(&hp->port, c, 0);
}
}
@@ -359,8 +358,7 @@ static void hvsi_insert_chars(struct hvsi_struct *hp, struct tty_struct *tty,
* revisited.
*/
#define TTY_THRESHOLD_THROTTLE 128
-static bool hvsi_recv_data(struct hvsi_struct *hp, struct tty_struct *tty,
- const uint8_t *packet)
+static bool hvsi_recv_data(struct hvsi_struct *hp, const uint8_t *packet)
{
const struct hvsi_header *header = (const struct hvsi_header *)packet;
const uint8_t *data = packet + sizeof(struct hvsi_header);
@@ -377,7 +375,7 @@ static bool hvsi_recv_data(struct hvsi_struct *hp, struct tty_struct *tty,
datalen = TTY_THRESHOLD_THROTTLE;
}
- hvsi_insert_chars(hp, tty, data, datalen);
+ hvsi_insert_chars(hp, data, datalen);
if (overflow > 0) {
/*
@@ -438,9 +436,7 @@ static int hvsi_load_chunk(struct hvsi_struct *hp, struct tty_struct *tty,
case VS_DATA_PACKET_HEADER:
if (!is_open(hp))
break;
- if (tty == NULL)
- break; /* no tty buffer to put data in */
- flip = hvsi_recv_data(hp, tty, packet);
+ flip = hvsi_recv_data(hp, packet);
break;
case VS_CONTROL_PACKET_HEADER:
hvsi_recv_control(hp, packet, tty, handshake);
@@ -469,17 +465,17 @@ static int hvsi_load_chunk(struct hvsi_struct *hp, struct tty_struct *tty,
compact_inbuf(hp, packet);
if (flip)
- tty_flip_buffer_push(tty);
+ tty_flip_buffer_push(&hp->port);
return 1;
}
-static void hvsi_send_overflow(struct hvsi_struct *hp, struct tty_struct *tty)
+static void hvsi_send_overflow(struct hvsi_struct *hp)
{
pr_debug("%s: delivering %i bytes overflow\n", __func__,
hp->n_throttle);
- hvsi_insert_chars(hp, tty, hp->throttle_buf, hp->n_throttle);
+ hvsi_insert_chars(hp, hp->throttle_buf, hp->n_throttle);
hp->n_throttle = 0;
}
@@ -514,8 +510,8 @@ static irqreturn_t hvsi_interrupt(int irq, void *arg)
if (tty && hp->n_throttle && !test_bit(TTY_THROTTLED, &tty->flags)) {
/* we weren't hung up and we weren't throttled, so we can
* deliver the rest now */
- hvsi_send_overflow(hp, tty);
- tty_flip_buffer_push(tty);
+ hvsi_send_overflow(hp);
+ tty_flip_buffer_push(&hp->port);
}
spin_unlock_irqrestore(&hp->lock, flags);
@@ -1001,8 +997,8 @@ static void hvsi_unthrottle(struct tty_struct *tty)
spin_lock_irqsave(&hp->lock, flags);
if (hp->n_throttle) {
- hvsi_send_overflow(hp, tty);
- tty_flip_buffer_push(tty);
+ hvsi_send_overflow(hp);
+ tty_flip_buffer_push(&hp->port);
}
spin_unlock_irqrestore(&hp->lock, flags);
@@ -1187,9 +1183,7 @@ static int __init hvsi_console_init(void)
hvsi_wait = poll_for_state; /* no irqs yet; must poll */
/* search device tree for vty nodes */
- for (vty = of_find_compatible_node(NULL, "serial", "hvterm-protocol");
- vty != NULL;
- vty = of_find_compatible_node(vty, "serial", "hvterm-protocol")) {
+ for_each_compatible_node(vty, "serial", "hvterm-protocol") {
struct hvsi_struct *hp;
const uint32_t *vtermno, *irq;
diff --git a/drivers/tty/ipwireless/hardware.c b/drivers/tty/ipwireless/hardware.c
index b4ba0670dc54..97a511f4185d 100644
--- a/drivers/tty/ipwireless/hardware.c
+++ b/drivers/tty/ipwireless/hardware.c
@@ -646,7 +646,7 @@ static void queue_received_packet(struct ipw_hardware *hw,
(*assem) = pool_allocate(hw, *assem, length);
if (!(*assem)) {
printk(KERN_ERR IPWIRELESS_PCCARD_NAME
- ": no memory for incomming data packet, dropped!\n");
+ ": no memory for incoming data packet, dropped!\n");
return;
}
(*assem)->protocol = protocol;
@@ -670,7 +670,7 @@ static void queue_received_packet(struct ipw_hardware *hw,
packet = pool_allocate(hw, NULL, length);
if (!packet) {
printk(KERN_ERR IPWIRELESS_PCCARD_NAME
- ": no memory for incomming ctrl packet, dropped!\n");
+ ": no memory for incoming ctrl packet, dropped!\n");
return;
}
packet->protocol = protocol;
diff --git a/drivers/tty/ipwireless/tty.c b/drivers/tty/ipwireless/tty.c
index 2cde13ddf9fc..8fd72ff9436e 100644
--- a/drivers/tty/ipwireless/tty.c
+++ b/drivers/tty/ipwireless/tty.c
@@ -106,7 +106,7 @@ static int ipw_open(struct tty_struct *linux_tty, struct file *filp)
tty->port.tty = linux_tty;
linux_tty->driver_data = tty;
- linux_tty->low_latency = 1;
+ tty->port.low_latency = 1;
if (tty->tty_type == TTYTYPE_MODEM)
ipwireless_ppp_open(tty->network);
@@ -160,15 +160,9 @@ static void ipw_close(struct tty_struct *linux_tty, struct file *filp)
void ipwireless_tty_received(struct ipw_tty *tty, unsigned char *data,
unsigned int length)
{
- struct tty_struct *linux_tty;
int work = 0;
mutex_lock(&tty->ipw_tty_mutex);
- linux_tty = tty->port.tty;
- if (linux_tty == NULL) {
- mutex_unlock(&tty->ipw_tty_mutex);
- return;
- }
if (!tty->port.count) {
mutex_unlock(&tty->ipw_tty_mutex);
@@ -176,7 +170,7 @@ void ipwireless_tty_received(struct ipw_tty *tty, unsigned char *data,
}
mutex_unlock(&tty->ipw_tty_mutex);
- work = tty_insert_flip_string(linux_tty, data, length);
+ work = tty_insert_flip_string(&tty->port, data, length);
if (work != length)
printk(KERN_DEBUG IPWIRELESS_PCCARD_NAME
@@ -187,7 +181,7 @@ void ipwireless_tty_received(struct ipw_tty *tty, unsigned char *data,
* This may sleep if ->low_latency is set
*/
if (work)
- tty_flip_buffer_push(linux_tty);
+ tty_flip_buffer_push(&tty->port);
}
static void ipw_write_packet_sent_callback(void *callback_data,
diff --git a/drivers/tty/isicom.c b/drivers/tty/isicom.c
index 3205b2e9090b..858291ca889c 100644
--- a/drivers/tty/isicom.c
+++ b/drivers/tty/isicom.c
@@ -634,10 +634,10 @@ static irqreturn_t isicom_interrupt(int irq, void *dev_id)
break;
case 1: /* Received Break !!! */
- tty_insert_flip_char(tty, 0, TTY_BREAK);
+ tty_insert_flip_char(&port->port, 0, TTY_BREAK);
if (port->port.flags & ASYNC_SAK)
do_SAK(tty);
- tty_flip_buffer_push(tty);
+ tty_flip_buffer_push(&port->port);
break;
case 2: /* Statistics */
@@ -650,15 +650,15 @@ static irqreturn_t isicom_interrupt(int irq, void *dev_id)
break;
}
} else { /* Data Packet */
-
- count = tty_prepare_flip_string(tty, &rp, byte_count & ~1);
+ count = tty_prepare_flip_string(&port->port, &rp,
+ byte_count & ~1);
pr_debug("%s: Can rx %d of %d bytes.\n",
__func__, count, byte_count);
word_count = count >> 1;
insw(base, rp, word_count);
byte_count -= (word_count << 1);
if (count & 0x0001) {
- tty_insert_flip_char(tty, inw(base) & 0xff,
+ tty_insert_flip_char(&port->port, inw(base) & 0xff,
TTY_NORMAL);
byte_count -= 2;
}
@@ -671,7 +671,7 @@ static irqreturn_t isicom_interrupt(int irq, void *dev_id)
byte_count -= 2;
}
}
- tty_flip_buffer_push(tty);
+ tty_flip_buffer_push(&port->port);
}
outw(0x0000, base+0x04); /* enable interrupts */
spin_unlock(&card->card_lock);
diff --git a/drivers/tty/metag_da.c b/drivers/tty/metag_da.c
new file mode 100644
index 000000000000..0e888621f484
--- /dev/null
+++ b/drivers/tty/metag_da.c
@@ -0,0 +1,677 @@
+/*
+ * dashtty.c - tty driver for Dash channels interface.
+ *
+ * Copyright (C) 2007,2008,2012 Imagination Technologies
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file COPYING in the main directory of this archive
+ * for more details.
+ *
+ */
+
+#include <linux/atomic.h>
+#include <linux/completion.h>
+#include <linux/console.h>
+#include <linux/delay.h>
+#include <linux/export.h>
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/kthread.h>
+#include <linux/mutex.h>
+#include <linux/sched.h>
+#include <linux/serial.h>
+#include <linux/slab.h>
+#include <linux/spinlock.h>
+#include <linux/string.h>
+#include <linux/timer.h>
+#include <linux/tty.h>
+#include <linux/tty_driver.h>
+#include <linux/tty_flip.h>
+#include <linux/uaccess.h>
+
+#include <asm/da.h>
+
+/* Channel error codes */
+#define CONAOK 0
+#define CONERR 1
+#define CONBAD 2
+#define CONPRM 3
+#define CONADR 4
+#define CONCNT 5
+#define CONCBF 6
+#define CONCBE 7
+#define CONBSY 8
+
+/* Default channel for the console */
+#define CONSOLE_CHANNEL 1
+
+#define NUM_TTY_CHANNELS 6
+
+/* Auto allocate */
+#define DA_TTY_MAJOR 0
+
+/* A speedy poll rate helps the userland debug process connection response.
+ * But, if you set it too high then no other userland processes get much
+ * of a look in.
+ */
+#define DA_TTY_POLL (HZ / 50)
+
+/*
+ * A short put delay improves latency but has a high throughput overhead
+ */
+#define DA_TTY_PUT_DELAY (HZ / 100)
+
+static atomic_t num_channels_need_poll = ATOMIC_INIT(0);
+
+static struct timer_list poll_timer;
+
+static struct tty_driver *channel_driver;
+
+static struct timer_list put_timer;
+static struct task_struct *dashtty_thread;
+
+#define RX_BUF_SIZE 1024
+
+enum {
+ INCHR = 1,
+ OUTCHR,
+ RDBUF,
+ WRBUF,
+ RDSTAT
+};
+
+/**
+ * struct dashtty_port - Wrapper struct for dashtty tty_port.
+ * @port: TTY port data
+ * @rx_lock: Lock for rx_buf.
+ * This protects between the poll timer and user context.
+ * It's also held during read SWITCH operations.
+ * @rx_buf: Read buffer
+ * @xmit_lock: Lock for xmit_*, and port.xmit_buf.
+ * This protects between user context and kernel thread.
+ * It's also held during write SWITCH operations.
+ * @xmit_cnt: Size of xmit buffer contents
+ * @xmit_head: Head of xmit buffer where data is written
+ * @xmit_tail: Tail of xmit buffer where data is read
+ * @xmit_empty: Completion for xmit buffer being empty
+ */
+struct dashtty_port {
+ struct tty_port port;
+ spinlock_t rx_lock;
+ void *rx_buf;
+ struct mutex xmit_lock;
+ unsigned int xmit_cnt;
+ unsigned int xmit_head;
+ unsigned int xmit_tail;
+ struct completion xmit_empty;
+};
+
+static struct dashtty_port dashtty_ports[NUM_TTY_CHANNELS];
+
+static atomic_t dashtty_xmit_cnt = ATOMIC_INIT(0);
+static wait_queue_head_t dashtty_waitqueue;
+
+/*
+ * Low-level DA channel access routines
+ */
+static int chancall(int in_bios_function, int in_channel,
+ int in_arg2, void *in_arg3,
+ void *in_arg4)
+{
+ register int bios_function asm("D1Ar1") = in_bios_function;
+ register int channel asm("D0Ar2") = in_channel;
+ register int arg2 asm("D1Ar3") = in_arg2;
+ register void *arg3 asm("D0Ar4") = in_arg3;
+ register void *arg4 asm("D1Ar5") = in_arg4;
+ register int bios_call asm("D0Ar6") = 3;
+ register int result asm("D0Re0");
+
+ asm volatile (
+ "MSETL [A0StP++], %6,%4,%2\n\t"
+ "ADD A0StP, A0StP, #8\n\t"
+ "SWITCH #0x0C30208\n\t"
+ "GETD %0, [A0StP+#-8]\n\t"
+ "SUB A0StP, A0StP, #(4*6)+8\n\t"
+ : "=d" (result) /* outs */
+ : "d" (bios_function),
+ "d" (channel),
+ "d" (arg2),
+ "d" (arg3),
+ "d" (arg4),
+ "d" (bios_call) /* ins */
+ : "memory");
+
+ return result;
+}
+
+/*
+ * Attempts to fetch count bytes from channel and returns actual count.
+ */
+static int fetch_data(unsigned int channel)
+{
+ struct dashtty_port *dport = &dashtty_ports[channel];
+ int received = 0;
+
+ spin_lock_bh(&dport->rx_lock);
+ /* check the port isn't being shut down */
+ if (!dport->rx_buf)
+ goto unlock;
+ if (chancall(RDBUF, channel, RX_BUF_SIZE,
+ (void *)dport->rx_buf, &received) == CONAOK) {
+ if (received) {
+ int space;
+ unsigned char *cbuf;
+
+ space = tty_prepare_flip_string(&dport->port, &cbuf,
+ received);
+
+ if (space <= 0)
+ goto unlock;
+
+ memcpy(cbuf, dport->rx_buf, space);
+ tty_flip_buffer_push(&dport->port);
+ }
+ }
+unlock:
+ spin_unlock_bh(&dport->rx_lock);
+
+ return received;
+}
+
+/**
+ * find_channel_to_poll() - Returns number of the next channel to poll.
+ * Returns: The number of the next channel to poll, or -1 if none need
+ * polling.
+ */
+static int find_channel_to_poll(void)
+{
+ static int last_polled_channel;
+ int last = last_polled_channel;
+ int chan;
+ struct dashtty_port *dport;
+
+ for (chan = last + 1; ; ++chan) {
+ if (chan >= NUM_TTY_CHANNELS)
+ chan = 0;
+
+ dport = &dashtty_ports[chan];
+ if (dport->rx_buf) {
+ last_polled_channel = chan;
+ return chan;
+ }
+
+ if (chan == last)
+ break;
+ }
+ return -1;
+}
+
+/**
+ * put_channel_data() - Write out a block of channel data.
+ * @chan: DA channel number.
+ *
+ * Write a single block of data out to the debug adapter. If the circular buffer
+ * is wrapped then only the first block is written.
+ *
+ * Returns: 1 if the remote buffer was too full to accept data.
+ * 0 otherwise.
+ */
+static int put_channel_data(unsigned int chan)
+{
+ struct dashtty_port *dport;
+ struct tty_struct *tty;
+ int number_written;
+ unsigned int count = 0;
+
+ dport = &dashtty_ports[chan];
+ mutex_lock(&dport->xmit_lock);
+ if (dport->xmit_cnt) {
+ count = min((unsigned int)(SERIAL_XMIT_SIZE - dport->xmit_tail),
+ dport->xmit_cnt);
+ chancall(WRBUF, chan, count,
+ dport->port.xmit_buf + dport->xmit_tail,
+ &number_written);
+ dport->xmit_cnt -= number_written;
+ if (!dport->xmit_cnt) {
+ /* reset pointers to avoid wraps */
+ dport->xmit_head = 0;
+ dport->xmit_tail = 0;
+ complete(&dport->xmit_empty);
+ } else {
+ dport->xmit_tail += number_written;
+ if (dport->xmit_tail >= SERIAL_XMIT_SIZE)
+ dport->xmit_tail -= SERIAL_XMIT_SIZE;
+ }
+ atomic_sub(number_written, &dashtty_xmit_cnt);
+ }
+ mutex_unlock(&dport->xmit_lock);
+
+ /* if we've made more data available, wake up tty */
+ if (count && number_written) {
+ tty = tty_port_tty_get(&dport->port);
+ if (tty) {
+ tty_wakeup(tty);
+ tty_kref_put(tty);
+ }
+ }
+
+ /* did the write fail? */
+ return count && !number_written;
+}
+
+/**
+ * put_data() - Kernel thread to write out blocks of channel data to DA.
+ * @arg: Unused.
+ *
+ * This kernel thread runs while @dashtty_xmit_cnt != 0, and loops over the
+ * channels to write out any buffered data. If any of the channels stall due to
+ * the remote buffer being full, a hold off happens to allow the debugger to
+ * drain the buffer.
+ */
+static int put_data(void *arg)
+{
+ unsigned int chan, stall;
+
+ __set_current_state(TASK_RUNNING);
+ while (!kthread_should_stop()) {
+ /*
+ * For each channel see if there's anything to transmit in the
+ * port's xmit_buf.
+ */
+ stall = 0;
+ for (chan = 0; chan < NUM_TTY_CHANNELS; ++chan)
+ stall += put_channel_data(chan);
+
+ /*
+ * If some of the buffers are full, hold off for a short while
+ * to allow them to empty.
+ */
+ if (stall)
+ msleep(25);
+
+ wait_event_interruptible(dashtty_waitqueue,
+ atomic_read(&dashtty_xmit_cnt));
+ }
+
+ return 0;
+}
+
+/*
+ * This gets called every DA_TTY_POLL and polls the channels for data
+ */
+static void dashtty_timer(unsigned long ignored)
+{
+ int channel;
+
+ /* If there are no ports open do nothing and don't poll again. */
+ if (!atomic_read(&num_channels_need_poll))
+ return;
+
+ channel = find_channel_to_poll();
+
+ /* Did we find a channel to poll? */
+ if (channel >= 0)
+ fetch_data(channel);
+
+ mod_timer_pinned(&poll_timer, jiffies + DA_TTY_POLL);
+}
+
+static void add_poll_timer(struct timer_list *poll_timer)
+{
+ setup_timer(poll_timer, dashtty_timer, 0);
+ poll_timer->expires = jiffies + DA_TTY_POLL;
+
+ /*
+ * Always attach the timer to the boot CPU. The DA channels are per-CPU
+ * so all polling should be from a single CPU.
+ */
+ add_timer_on(poll_timer, 0);
+}
+
+static int dashtty_port_activate(struct tty_port *port, struct tty_struct *tty)
+{
+ struct dashtty_port *dport = container_of(port, struct dashtty_port,
+ port);
+ void *rx_buf;
+
+ /* Allocate the buffer we use for writing data */
+ if (tty_port_alloc_xmit_buf(port) < 0)
+ goto err;
+
+ /* Allocate the buffer we use for reading data */
+ rx_buf = kzalloc(RX_BUF_SIZE, GFP_KERNEL);
+ if (!rx_buf)
+ goto err_free_xmit;
+
+ spin_lock_bh(&dport->rx_lock);
+ dport->rx_buf = rx_buf;
+ spin_unlock_bh(&dport->rx_lock);
+
+ /*
+ * Don't add the poll timer if we're opening a console. This
+ * avoids the overhead of polling the Dash but means it is not
+ * possible to have a login on /dev/console.
+ *
+ */
+ if (dport != &dashtty_ports[CONSOLE_CHANNEL])
+ if (atomic_inc_return(&num_channels_need_poll) == 1)
+ add_poll_timer(&poll_timer);
+
+ return 0;
+err_free_xmit:
+ tty_port_free_xmit_buf(port);
+err:
+ return -ENOMEM;
+}
+
+static void dashtty_port_shutdown(struct tty_port *port)
+{
+ struct dashtty_port *dport = container_of(port, struct dashtty_port,
+ port);
+ void *rx_buf;
+ unsigned int count;
+
+ /* stop reading */
+ if (dport != &dashtty_ports[CONSOLE_CHANNEL])
+ if (atomic_dec_and_test(&num_channels_need_poll))
+ del_timer_sync(&poll_timer);
+
+ mutex_lock(&dport->xmit_lock);
+ count = dport->xmit_cnt;
+ mutex_unlock(&dport->xmit_lock);
+ if (count) {
+ /*
+ * There's still data to write out, so wake and wait for the
+ * writer thread to drain the buffer.
+ */
+ del_timer(&put_timer);
+ wake_up_interruptible(&dashtty_waitqueue);
+ wait_for_completion(&dport->xmit_empty);
+ }
+
+ /* Null the read buffer (timer could still be running!) */
+ spin_lock_bh(&dport->rx_lock);
+ rx_buf = dport->rx_buf;
+ dport->rx_buf = NULL;
+ spin_unlock_bh(&dport->rx_lock);
+ /* Free the read buffer */
+ kfree(rx_buf);
+
+ /* Free the write buffer */
+ tty_port_free_xmit_buf(port);
+}
+
+static const struct tty_port_operations dashtty_port_ops = {
+ .activate = dashtty_port_activate,
+ .shutdown = dashtty_port_shutdown,
+};
+
+static int dashtty_install(struct tty_driver *driver, struct tty_struct *tty)
+{
+ return tty_port_install(&dashtty_ports[tty->index].port, driver, tty);
+}
+
+static int dashtty_open(struct tty_struct *tty, struct file *filp)
+{
+ return tty_port_open(tty->port, tty, filp);
+}
+
+static void dashtty_close(struct tty_struct *tty, struct file *filp)
+{
+ return tty_port_close(tty->port, tty, filp);
+}
+
+static void dashtty_hangup(struct tty_struct *tty)
+{
+ int channel;
+ struct dashtty_port *dport;
+
+ channel = tty->index;
+ dport = &dashtty_ports[channel];
+
+ /* drop any data in the xmit buffer */
+ mutex_lock(&dport->xmit_lock);
+ if (dport->xmit_cnt) {
+ atomic_sub(dport->xmit_cnt, &dashtty_xmit_cnt);
+ dport->xmit_cnt = 0;
+ dport->xmit_head = 0;
+ dport->xmit_tail = 0;
+ complete(&dport->xmit_empty);
+ }
+ mutex_unlock(&dport->xmit_lock);
+
+ tty_port_hangup(tty->port);
+}
+
+/**
+ * dashtty_put_timer() - Delayed wake up of kernel thread.
+ * @ignored: unused
+ *
+ * This timer function wakes up the kernel thread if any data exists in the
+ * buffers. It is used to delay the expensive writeout until the writer has
+ * stopped writing.
+ */
+static void dashtty_put_timer(unsigned long ignored)
+{
+ if (atomic_read(&dashtty_xmit_cnt))
+ wake_up_interruptible(&dashtty_waitqueue);
+}
+
+static int dashtty_write(struct tty_struct *tty, const unsigned char *buf,
+ int total)
+{
+ int channel, count, block;
+ struct dashtty_port *dport;
+
+ /* Determine the channel */
+ channel = tty->index;
+ dport = &dashtty_ports[channel];
+
+ /*
+ * Write to output buffer.
+ *
+ * The reason that we asynchronously write the buffer is because if we
+ * were to write the buffer synchronously then because DA channels are
+ * per-CPU the buffer would be written to the channel of whatever CPU
+ * we're running on.
+ *
+ * What we actually want to happen is have all input and output done on
+ * one CPU.
+ */
+ mutex_lock(&dport->xmit_lock);
+ /* work out how many bytes we can write to the xmit buffer */
+ total = min(total, (int)(SERIAL_XMIT_SIZE - dport->xmit_cnt));
+ atomic_add(total, &dashtty_xmit_cnt);
+ dport->xmit_cnt += total;
+ /* write the actual bytes (may need splitting if it wraps) */
+ for (count = total; count; count -= block) {
+ block = min(count, (int)(SERIAL_XMIT_SIZE - dport->xmit_head));
+ memcpy(dport->port.xmit_buf + dport->xmit_head, buf, block);
+ dport->xmit_head += block;
+ if (dport->xmit_head >= SERIAL_XMIT_SIZE)
+ dport->xmit_head -= SERIAL_XMIT_SIZE;
+ buf += block;
+ }
+ count = dport->xmit_cnt;
+ /* xmit buffer no longer empty? */
+ if (count)
+ INIT_COMPLETION(dport->xmit_empty);
+ mutex_unlock(&dport->xmit_lock);
+
+ if (total) {
+ /*
+ * If the buffer is full, wake up the kthread, otherwise allow
+ * some more time for the buffer to fill up a bit before waking
+ * it.
+ */
+ if (count == SERIAL_XMIT_SIZE) {
+ del_timer(&put_timer);
+ wake_up_interruptible(&dashtty_waitqueue);
+ } else {
+ mod_timer(&put_timer, jiffies + DA_TTY_PUT_DELAY);
+ }
+ }
+ return total;
+}
+
+static int dashtty_write_room(struct tty_struct *tty)
+{
+ struct dashtty_port *dport;
+ int channel;
+ int room;
+
+ channel = tty->index;
+ dport = &dashtty_ports[channel];
+
+ /* report the space in the xmit buffer */
+ mutex_lock(&dport->xmit_lock);
+ room = SERIAL_XMIT_SIZE - dport->xmit_cnt;
+ mutex_unlock(&dport->xmit_lock);
+
+ return room;
+}
+
+static int dashtty_chars_in_buffer(struct tty_struct *tty)
+{
+ struct dashtty_port *dport;
+ int channel;
+ int chars;
+
+ channel = tty->index;
+ dport = &dashtty_ports[channel];
+
+ /* report the number of bytes in the xmit buffer */
+ mutex_lock(&dport->xmit_lock);
+ chars = dport->xmit_cnt;
+ mutex_unlock(&dport->xmit_lock);
+
+ return chars;
+}
+
+static const struct tty_operations dashtty_ops = {
+ .install = dashtty_install,
+ .open = dashtty_open,
+ .close = dashtty_close,
+ .hangup = dashtty_hangup,
+ .write = dashtty_write,
+ .write_room = dashtty_write_room,
+ .chars_in_buffer = dashtty_chars_in_buffer,
+};
+
+static int __init dashtty_init(void)
+{
+ int ret;
+ int nport;
+ struct dashtty_port *dport;
+
+ if (!metag_da_enabled())
+ return -ENODEV;
+
+ channel_driver = tty_alloc_driver(NUM_TTY_CHANNELS,
+ TTY_DRIVER_REAL_RAW);
+ if (IS_ERR(channel_driver))
+ return PTR_ERR(channel_driver);
+
+ channel_driver->driver_name = "metag_da";
+ channel_driver->name = "ttyDA";
+ channel_driver->major = DA_TTY_MAJOR;
+ channel_driver->minor_start = 0;
+ channel_driver->type = TTY_DRIVER_TYPE_SERIAL;
+ channel_driver->subtype = SERIAL_TYPE_NORMAL;
+ channel_driver->init_termios = tty_std_termios;
+ channel_driver->init_termios.c_cflag |= CLOCAL;
+
+ tty_set_operations(channel_driver, &dashtty_ops);
+ for (nport = 0; nport < NUM_TTY_CHANNELS; nport++) {
+ dport = &dashtty_ports[nport];
+ tty_port_init(&dport->port);
+ dport->port.ops = &dashtty_port_ops;
+ spin_lock_init(&dport->rx_lock);
+ mutex_init(&dport->xmit_lock);
+ /* the xmit buffer starts empty, i.e. completely written */
+ init_completion(&dport->xmit_empty);
+ complete(&dport->xmit_empty);
+ }
+
+ setup_timer(&put_timer, dashtty_put_timer, 0);
+
+ init_waitqueue_head(&dashtty_waitqueue);
+ dashtty_thread = kthread_create(put_data, NULL, "ttyDA");
+ if (IS_ERR(dashtty_thread)) {
+ pr_err("Couldn't create dashtty thread\n");
+ ret = PTR_ERR(dashtty_thread);
+ goto err_destroy_ports;
+ }
+ /*
+ * Bind the writer thread to the boot CPU so it can't migrate.
+ * DA channels are per-CPU and we want all channel I/O to be on a single
+ * predictable CPU.
+ */
+ kthread_bind(dashtty_thread, 0);
+ wake_up_process(dashtty_thread);
+
+ ret = tty_register_driver(channel_driver);
+
+ if (ret < 0) {
+ pr_err("Couldn't install dashtty driver: err %d\n",
+ ret);
+ goto err_stop_kthread;
+ }
+
+ return 0;
+
+err_stop_kthread:
+ kthread_stop(dashtty_thread);
+err_destroy_ports:
+ for (nport = 0; nport < NUM_TTY_CHANNELS; nport++) {
+ dport = &dashtty_ports[nport];
+ tty_port_destroy(&dport->port);
+ }
+ put_tty_driver(channel_driver);
+ return ret;
+}
+
+static void dashtty_exit(void)
+{
+ int nport;
+ struct dashtty_port *dport;
+
+ del_timer_sync(&put_timer);
+ kthread_stop(dashtty_thread);
+ del_timer_sync(&poll_timer);
+ tty_unregister_driver(channel_driver);
+ for (nport = 0; nport < NUM_TTY_CHANNELS; nport++) {
+ dport = &dashtty_ports[nport];
+ tty_port_destroy(&dport->port);
+ }
+ put_tty_driver(channel_driver);
+}
+
+module_init(dashtty_init);
+module_exit(dashtty_exit);
+
+#ifdef CONFIG_DA_CONSOLE
+
+static void dash_console_write(struct console *co, const char *s,
+ unsigned int count)
+{
+ int actually_written;
+
+ chancall(WRBUF, CONSOLE_CHANNEL, count, (void *)s, &actually_written);
+}
+
+static struct tty_driver *dash_console_device(struct console *c, int *index)
+{
+ *index = c->index;
+ return channel_driver;
+}
+
+struct console dash_console = {
+ .name = "ttyDA",
+ .write = dash_console_write,
+ .device = dash_console_device,
+ .flags = CON_PRINTBUFFER,
+ .index = 1,
+};
+
+#endif
diff --git a/drivers/tty/moxa.c b/drivers/tty/moxa.c
index f9d28503bdec..adeac255e526 100644
--- a/drivers/tty/moxa.c
+++ b/drivers/tty/moxa.c
@@ -1405,7 +1405,7 @@ static int moxa_poll_port(struct moxa_port *p, unsigned int handle,
if (inited && !test_bit(TTY_THROTTLED, &tty->flags) &&
MoxaPortRxQueue(p) > 0) { /* RX */
MoxaPortReadData(p);
- tty_schedule_flip(tty);
+ tty_schedule_flip(&p->port);
}
} else {
clear_bit(EMPTYWAIT, &p->statusflags);
@@ -1429,8 +1429,8 @@ static int moxa_poll_port(struct moxa_port *p, unsigned int handle,
goto put;
if (tty && (intr & IntrBreak) && !I_IGNBRK(tty)) { /* BREAK */
- tty_insert_flip_char(tty, 0, TTY_BREAK);
- tty_schedule_flip(tty);
+ tty_insert_flip_char(&p->port, 0, TTY_BREAK);
+ tty_schedule_flip(&p->port);
}
if (intr & IntrLine)
@@ -1966,7 +1966,7 @@ static int MoxaPortReadData(struct moxa_port *port)
ofs = baseAddr + DynPage_addr + bufhead + head;
len = (tail >= head) ? (tail - head) :
(rx_mask + 1 - head);
- len = tty_prepare_flip_string(tty, &dst,
+ len = tty_prepare_flip_string(&port->port, &dst,
min(len, count));
memcpy_fromio(dst, ofs, len);
head = (head + len) & rx_mask;
@@ -1978,7 +1978,7 @@ static int MoxaPortReadData(struct moxa_port *port)
while (count > 0) {
writew(pageno, baseAddr + Control_reg);
ofs = baseAddr + DynPage_addr + pageofs;
- len = tty_prepare_flip_string(tty, &dst,
+ len = tty_prepare_flip_string(&port->port, &dst,
min(Page_size - pageofs, count));
memcpy_fromio(dst, ofs, len);
diff --git a/drivers/tty/mxser.c b/drivers/tty/mxser.c
index 40113868bec2..484b6a3c9b03 100644
--- a/drivers/tty/mxser.c
+++ b/drivers/tty/mxser.c
@@ -1264,7 +1264,7 @@ static int mxser_set_serial_info(struct tty_struct *tty,
(new_serial.flags & ASYNC_FLAGS));
port->close_delay = new_serial.close_delay * HZ / 100;
port->closing_wait = new_serial.closing_wait * HZ / 100;
- tty->low_latency = (port->flags & ASYNC_LOW_LATENCY) ? 1 : 0;
+ port->low_latency = (port->flags & ASYNC_LOW_LATENCY) ? 1 : 0;
if ((port->flags & ASYNC_SPD_MASK) == ASYNC_SPD_CUST &&
(new_serial.baud_base != info->baud_base ||
new_serial.custom_divisor !=
@@ -2079,7 +2079,7 @@ static void mxser_receive_chars(struct tty_struct *tty,
}
while (gdl--) {
ch = inb(port->ioaddr + UART_RX);
- tty_insert_flip_char(tty, ch, 0);
+ tty_insert_flip_char(&port->port, ch, 0);
cnt++;
}
goto end_intr;
@@ -2118,7 +2118,7 @@ intr_old:
} else
flag = TTY_BREAK;
}
- tty_insert_flip_char(tty, ch, flag);
+ tty_insert_flip_char(&port->port, ch, flag);
cnt++;
if (cnt >= recv_room) {
if (!port->ldisc_stop_rx)
@@ -2145,7 +2145,7 @@ end_intr:
* recursive locking.
*/
spin_unlock(&port->slock);
- tty_flip_buffer_push(tty);
+ tty_flip_buffer_push(&port->port);
spin_lock(&port->slock);
}
@@ -2364,7 +2364,6 @@ static void mxser_release_vector(struct mxser_board *brd)
static void mxser_release_ISA_res(struct mxser_board *brd)
{
- free_irq(brd->irq, brd);
release_region(brd->ports[0].ioaddr, 8 * brd->info->nports);
mxser_release_vector(brd);
}
@@ -2430,6 +2429,7 @@ static void mxser_board_remove(struct mxser_board *brd)
tty_unregister_device(mxvar_sdriver, brd->idx + i);
tty_port_destroy(&brd->ports[i].port);
}
+ free_irq(brd->irq, brd);
}
static int __init mxser_get_ISA_conf(int cap, struct mxser_board *brd)
@@ -2554,6 +2554,7 @@ static int mxser_probe(struct pci_dev *pdev,
struct mxser_board *brd;
unsigned int i, j;
unsigned long ioaddress;
+ struct device *tty_dev;
int retval = -EINVAL;
for (i = 0; i < MXSER_BOARDS; i++)
@@ -2637,13 +2638,25 @@ static int mxser_probe(struct pci_dev *pdev,
if (retval)
goto err_rel3;
- for (i = 0; i < brd->info->nports; i++)
- tty_port_register_device(&brd->ports[i].port, mxvar_sdriver,
- brd->idx + i, &pdev->dev);
+ for (i = 0; i < brd->info->nports; i++) {
+ tty_dev = tty_port_register_device(&brd->ports[i].port,
+ mxvar_sdriver, brd->idx + i, &pdev->dev);
+ if (IS_ERR(tty_dev)) {
+ retval = PTR_ERR(tty_dev);
+ for (i--; i >= 0; i--)
+ tty_unregister_device(mxvar_sdriver,
+ brd->idx + i);
+ goto err_relbrd;
+ }
+ }
pci_set_drvdata(pdev, brd);
return 0;
+err_relbrd:
+ for (i = 0; i < brd->info->nports; i++)
+ tty_port_destroy(&brd->ports[i].port);
+ free_irq(brd->irq, brd);
err_rel3:
pci_release_region(pdev, 3);
err_zero:
@@ -2665,7 +2678,6 @@ static void mxser_remove(struct pci_dev *pdev)
mxser_board_remove(brd);
- free_irq(pdev->irq, brd);
pci_release_region(pdev, 2);
pci_release_region(pdev, 3);
pci_disable_device(pdev);
@@ -2683,6 +2695,7 @@ static struct pci_driver mxser_driver = {
static int __init mxser_module_init(void)
{
struct mxser_board *brd;
+ struct device *tty_dev;
unsigned int b, i, m;
int retval;
@@ -2728,14 +2741,29 @@ static int __init mxser_module_init(void)
/* mxser_initbrd will hook ISR. */
if (mxser_initbrd(brd, NULL) < 0) {
+ mxser_release_ISA_res(brd);
brd->info = NULL;
continue;
}
brd->idx = m * MXSER_PORTS_PER_BOARD;
- for (i = 0; i < brd->info->nports; i++)
- tty_port_register_device(&brd->ports[i].port,
+ for (i = 0; i < brd->info->nports; i++) {
+ tty_dev = tty_port_register_device(&brd->ports[i].port,
mxvar_sdriver, brd->idx + i, NULL);
+ if (IS_ERR(tty_dev)) {
+ for (i--; i >= 0; i--)
+ tty_unregister_device(mxvar_sdriver,
+ brd->idx + i);
+ for (i = 0; i < brd->info->nports; i++)
+ tty_port_destroy(&brd->ports[i].port);
+ free_irq(brd->irq, brd);
+ mxser_release_ISA_res(brd);
+ brd->info = NULL;
+ break;
+ }
+ }
+ if (brd->info == NULL)
+ continue;
m++;
}
diff --git a/drivers/tty/n_gsm.c b/drivers/tty/n_gsm.c
index dcc0430a49c8..4a43ef5d7962 100644
--- a/drivers/tty/n_gsm.c
+++ b/drivers/tty/n_gsm.c
@@ -1067,9 +1067,9 @@ static void gsm_process_modem(struct tty_struct *tty, struct gsm_dlci *dlci,
if ((mlines & TIOCM_CD) == 0 && (dlci->modem_rx & TIOCM_CD))
if (!(tty->termios.c_cflag & CLOCAL))
tty_hangup(tty);
- if (brk & 0x01)
- tty_insert_flip_char(tty, 0, TTY_BREAK);
}
+ if (brk & 0x01)
+ tty_insert_flip_char(&dlci->port, 0, TTY_BREAK);
dlci->modem_rx = mlines;
}
@@ -1137,7 +1137,7 @@ static void gsm_control_modem(struct gsm_mux *gsm, u8 *data, int clen)
static void gsm_control_rls(struct gsm_mux *gsm, u8 *data, int clen)
{
- struct tty_struct *tty;
+ struct tty_port *port;
unsigned int addr = 0 ;
u8 bits;
int len = clen;
@@ -1160,19 +1160,18 @@ static void gsm_control_rls(struct gsm_mux *gsm, u8 *data, int clen)
bits = *dp;
if ((bits & 1) == 0)
return;
- /* See if we have an uplink tty */
- tty = tty_port_tty_get(&gsm->dlci[addr]->port);
- if (tty) {
- if (bits & 2)
- tty_insert_flip_char(tty, 0, TTY_OVERRUN);
- if (bits & 4)
- tty_insert_flip_char(tty, 0, TTY_PARITY);
- if (bits & 8)
- tty_insert_flip_char(tty, 0, TTY_FRAME);
- tty_flip_buffer_push(tty);
- tty_kref_put(tty);
- }
+ port = &gsm->dlci[addr]->port;
+
+ if (bits & 2)
+ tty_insert_flip_char(port, 0, TTY_OVERRUN);
+ if (bits & 4)
+ tty_insert_flip_char(port, 0, TTY_PARITY);
+ if (bits & 8)
+ tty_insert_flip_char(port, 0, TTY_FRAME);
+
+ tty_flip_buffer_push(port);
+
gsm_control_reply(gsm, CMD_RLS, data, clen);
}
@@ -1545,36 +1544,37 @@ static void gsm_dlci_data(struct gsm_dlci *dlci, u8 *data, int clen)
{
/* krefs .. */
struct tty_port *port = &dlci->port;
- struct tty_struct *tty = tty_port_tty_get(port);
+ struct tty_struct *tty;
unsigned int modem = 0;
int len = clen;
if (debug & 16)
- pr_debug("%d bytes for tty %p\n", len, tty);
- if (tty) {
- switch (dlci->adaption) {
- /* Unsupported types */
- /* Packetised interruptible data */
- case 4:
- break;
- /* Packetised uininterruptible voice/data */
- case 3:
- break;
- /* Asynchronous serial with line state in each frame */
- case 2:
- while (gsm_read_ea(&modem, *data++) == 0) {
- len--;
- if (len == 0)
- return;
- }
+ pr_debug("%d bytes for tty\n", len);
+ switch (dlci->adaption) {
+ /* Unsupported types */
+ /* Packetised interruptible data */
+ case 4:
+ break;
+ /* Packetised uininterruptible voice/data */
+ case 3:
+ break;
+ /* Asynchronous serial with line state in each frame */
+ case 2:
+ while (gsm_read_ea(&modem, *data++) == 0) {
+ len--;
+ if (len == 0)
+ return;
+ }
+ tty = tty_port_tty_get(port);
+ if (tty) {
gsm_process_modem(tty, dlci, modem, clen);
- /* Line state will go via DLCI 0 controls only */
- case 1:
- default:
- tty_insert_flip_string(tty, data, len);
- tty_flip_buffer_push(tty);
+ tty_kref_put(tty);
}
- tty_kref_put(tty);
+ /* Line state will go via DLCI 0 controls only */
+ case 1:
+ default:
+ tty_insert_flip_string(port, data, len);
+ tty_flip_buffer_push(port);
}
}
@@ -1689,6 +1689,8 @@ static inline void dlci_put(struct gsm_dlci *dlci)
tty_port_put(&dlci->port);
}
+static void gsm_destroy_network(struct gsm_dlci *dlci);
+
/**
* gsm_dlci_release - release DLCI
* @dlci: DLCI to destroy
@@ -1702,9 +1704,19 @@ static void gsm_dlci_release(struct gsm_dlci *dlci)
{
struct tty_struct *tty = tty_port_tty_get(&dlci->port);
if (tty) {
+ mutex_lock(&dlci->mutex);
+ gsm_destroy_network(dlci);
+ mutex_unlock(&dlci->mutex);
+
+ /* tty_vhangup needs the tty_lock, so unlock and
+ relock after doing the hangup. */
+ tty_unlock(tty);
tty_vhangup(tty);
+ tty_lock(tty);
+ tty_port_tty_set(&dlci->port, NULL);
tty_kref_put(tty);
}
+ dlci->state = DLCI_CLOSED;
dlci_put(dlci);
}
@@ -2947,6 +2959,8 @@ static void gsmtty_close(struct tty_struct *tty, struct file *filp)
if (dlci == NULL)
return;
+ if (dlci->state == DLCI_CLOSED)
+ return;
mutex_lock(&dlci->mutex);
gsm_destroy_network(dlci);
mutex_unlock(&dlci->mutex);
@@ -2965,6 +2979,8 @@ out:
static void gsmtty_hangup(struct tty_struct *tty)
{
struct gsm_dlci *dlci = tty->driver_data;
+ if (dlci->state == DLCI_CLOSED)
+ return;
tty_port_hangup(&dlci->port);
gsm_dlci_begin_close(dlci);
}
@@ -2972,9 +2988,12 @@ static void gsmtty_hangup(struct tty_struct *tty)
static int gsmtty_write(struct tty_struct *tty, const unsigned char *buf,
int len)
{
+ int sent;
struct gsm_dlci *dlci = tty->driver_data;
+ if (dlci->state == DLCI_CLOSED)
+ return -EINVAL;
/* Stuff the bytes into the fifo queue */
- int sent = kfifo_in_locked(dlci->fifo, buf, len, &dlci->lock);
+ sent = kfifo_in_locked(dlci->fifo, buf, len, &dlci->lock);
/* Need to kick the channel */
gsm_dlci_data_kick(dlci);
return sent;
@@ -2983,18 +3002,24 @@ static int gsmtty_write(struct tty_struct *tty, const unsigned char *buf,
static int gsmtty_write_room(struct tty_struct *tty)
{
struct gsm_dlci *dlci = tty->driver_data;
+ if (dlci->state == DLCI_CLOSED)
+ return -EINVAL;
return TX_SIZE - kfifo_len(dlci->fifo);
}
static int gsmtty_chars_in_buffer(struct tty_struct *tty)
{
struct gsm_dlci *dlci = tty->driver_data;
+ if (dlci->state == DLCI_CLOSED)
+ return -EINVAL;
return kfifo_len(dlci->fifo);
}
static void gsmtty_flush_buffer(struct tty_struct *tty)
{
struct gsm_dlci *dlci = tty->driver_data;
+ if (dlci->state == DLCI_CLOSED)
+ return;
/* Caution needed: If we implement reliable transport classes
then the data being transmitted can't simply be junked once
it has first hit the stack. Until then we can just blow it
@@ -3013,6 +3038,8 @@ static void gsmtty_wait_until_sent(struct tty_struct *tty, int timeout)
static int gsmtty_tiocmget(struct tty_struct *tty)
{
struct gsm_dlci *dlci = tty->driver_data;
+ if (dlci->state == DLCI_CLOSED)
+ return -EINVAL;
return dlci->modem_rx;
}
@@ -3022,6 +3049,8 @@ static int gsmtty_tiocmset(struct tty_struct *tty,
struct gsm_dlci *dlci = tty->driver_data;
unsigned int modem_tx = dlci->modem_tx;
+ if (dlci->state == DLCI_CLOSED)
+ return -EINVAL;
modem_tx &= ~clear;
modem_tx |= set;
@@ -3040,6 +3069,8 @@ static int gsmtty_ioctl(struct tty_struct *tty,
struct gsm_netconfig nc;
int index;
+ if (dlci->state == DLCI_CLOSED)
+ return -EINVAL;
switch (cmd) {
case GSMIOC_ENABLE_NET:
if (copy_from_user(&nc, (void __user *)arg, sizeof(nc)))
@@ -3066,6 +3097,9 @@ static int gsmtty_ioctl(struct tty_struct *tty,
static void gsmtty_set_termios(struct tty_struct *tty, struct ktermios *old)
{
+ struct gsm_dlci *dlci = tty->driver_data;
+ if (dlci->state == DLCI_CLOSED)
+ return;
/* For the moment its fixed. In actual fact the speed information
for the virtual channel can be propogated in both directions by
the RPN control message. This however rapidly gets nasty as we
@@ -3077,6 +3111,8 @@ static void gsmtty_set_termios(struct tty_struct *tty, struct ktermios *old)
static void gsmtty_throttle(struct tty_struct *tty)
{
struct gsm_dlci *dlci = tty->driver_data;
+ if (dlci->state == DLCI_CLOSED)
+ return;
if (tty->termios.c_cflag & CRTSCTS)
dlci->modem_tx &= ~TIOCM_DTR;
dlci->throttled = 1;
@@ -3087,6 +3123,8 @@ static void gsmtty_throttle(struct tty_struct *tty)
static void gsmtty_unthrottle(struct tty_struct *tty)
{
struct gsm_dlci *dlci = tty->driver_data;
+ if (dlci->state == DLCI_CLOSED)
+ return;
if (tty->termios.c_cflag & CRTSCTS)
dlci->modem_tx |= TIOCM_DTR;
dlci->throttled = 0;
@@ -3098,6 +3136,8 @@ static int gsmtty_break_ctl(struct tty_struct *tty, int state)
{
struct gsm_dlci *dlci = tty->driver_data;
int encode = 0; /* Off */
+ if (dlci->state == DLCI_CLOSED)
+ return -EINVAL;
if (state == -1) /* "On indefinitely" - we can't encode this
properly */
diff --git a/drivers/tty/n_tty.c b/drivers/tty/n_tty.c
index 19083efa2314..05e72bea9b07 100644
--- a/drivers/tty/n_tty.c
+++ b/drivers/tty/n_tty.c
@@ -49,6 +49,7 @@
#include <linux/file.h>
#include <linux/uaccess.h>
#include <linux/module.h>
+#include <linux/ratelimit.h>
/* number of characters left in xmit buffer before select has we have room */
@@ -100,7 +101,7 @@ struct n_tty_data {
struct mutex atomic_read_lock;
struct mutex output_lock;
struct mutex echo_lock;
- spinlock_t read_lock;
+ raw_spinlock_t read_lock;
};
static inline int tty_put_user(struct tty_struct *tty, unsigned char x,
@@ -182,9 +183,9 @@ static void put_tty_queue(unsigned char c, struct n_tty_data *ldata)
* The problem of stomping on the buffers ends here.
* Why didn't anyone see this one coming? --AJK
*/
- spin_lock_irqsave(&ldata->read_lock, flags);
+ raw_spin_lock_irqsave(&ldata->read_lock, flags);
put_tty_queue_nolock(c, ldata);
- spin_unlock_irqrestore(&ldata->read_lock, flags);
+ raw_spin_unlock_irqrestore(&ldata->read_lock, flags);
}
/**
@@ -218,9 +219,9 @@ static void reset_buffer_flags(struct tty_struct *tty)
struct n_tty_data *ldata = tty->disc_data;
unsigned long flags;
- spin_lock_irqsave(&ldata->read_lock, flags);
+ raw_spin_lock_irqsave(&ldata->read_lock, flags);
ldata->read_head = ldata->read_tail = ldata->read_cnt = 0;
- spin_unlock_irqrestore(&ldata->read_lock, flags);
+ raw_spin_unlock_irqrestore(&ldata->read_lock, flags);
mutex_lock(&ldata->echo_lock);
ldata->echo_pos = ldata->echo_cnt = ldata->echo_overrun = 0;
@@ -276,7 +277,7 @@ static ssize_t n_tty_chars_in_buffer(struct tty_struct *tty)
unsigned long flags;
ssize_t n = 0;
- spin_lock_irqsave(&ldata->read_lock, flags);
+ raw_spin_lock_irqsave(&ldata->read_lock, flags);
if (!ldata->icanon) {
n = ldata->read_cnt;
} else if (ldata->canon_data) {
@@ -284,7 +285,7 @@ static ssize_t n_tty_chars_in_buffer(struct tty_struct *tty)
ldata->canon_head - ldata->read_tail :
ldata->canon_head + (N_TTY_BUF_SIZE - ldata->read_tail);
}
- spin_unlock_irqrestore(&ldata->read_lock, flags);
+ raw_spin_unlock_irqrestore(&ldata->read_lock, flags);
return n;
}
@@ -915,19 +916,19 @@ static void eraser(unsigned char c, struct tty_struct *tty)
kill_type = WERASE;
else {
if (!L_ECHO(tty)) {
- spin_lock_irqsave(&ldata->read_lock, flags);
+ raw_spin_lock_irqsave(&ldata->read_lock, flags);
ldata->read_cnt -= ((ldata->read_head - ldata->canon_head) &
(N_TTY_BUF_SIZE - 1));
ldata->read_head = ldata->canon_head;
- spin_unlock_irqrestore(&ldata->read_lock, flags);
+ raw_spin_unlock_irqrestore(&ldata->read_lock, flags);
return;
}
if (!L_ECHOK(tty) || !L_ECHOKE(tty) || !L_ECHOE(tty)) {
- spin_lock_irqsave(&ldata->read_lock, flags);
+ raw_spin_lock_irqsave(&ldata->read_lock, flags);
ldata->read_cnt -= ((ldata->read_head - ldata->canon_head) &
(N_TTY_BUF_SIZE - 1));
ldata->read_head = ldata->canon_head;
- spin_unlock_irqrestore(&ldata->read_lock, flags);
+ raw_spin_unlock_irqrestore(&ldata->read_lock, flags);
finish_erasing(ldata);
echo_char(KILL_CHAR(tty), tty);
/* Add a newline if ECHOK is on and ECHOKE is off. */
@@ -961,10 +962,10 @@ static void eraser(unsigned char c, struct tty_struct *tty)
break;
}
cnt = (ldata->read_head - head) & (N_TTY_BUF_SIZE-1);
- spin_lock_irqsave(&ldata->read_lock, flags);
+ raw_spin_lock_irqsave(&ldata->read_lock, flags);
ldata->read_head = head;
ldata->read_cnt -= cnt;
- spin_unlock_irqrestore(&ldata->read_lock, flags);
+ raw_spin_unlock_irqrestore(&ldata->read_lock, flags);
if (L_ECHO(tty)) {
if (L_ECHOPRT(tty)) {
if (!ldata->erasing) {
@@ -1344,12 +1345,12 @@ send_signal:
put_tty_queue(c, ldata);
handle_newline:
- spin_lock_irqsave(&ldata->read_lock, flags);
+ raw_spin_lock_irqsave(&ldata->read_lock, flags);
set_bit(ldata->read_head, ldata->read_flags);
put_tty_queue_nolock(c, ldata);
ldata->canon_head = ldata->read_head;
ldata->canon_data++;
- spin_unlock_irqrestore(&ldata->read_lock, flags);
+ raw_spin_unlock_irqrestore(&ldata->read_lock, flags);
kill_fasync(&tty->fasync, SIGIO, POLL_IN);
if (waitqueue_active(&tty->read_wait))
wake_up_interruptible(&tty->read_wait);
@@ -1423,7 +1424,7 @@ static void n_tty_receive_buf(struct tty_struct *tty, const unsigned char *cp,
unsigned long cpuflags;
if (ldata->real_raw) {
- spin_lock_irqsave(&ldata->read_lock, cpuflags);
+ raw_spin_lock_irqsave(&ldata->read_lock, cpuflags);
i = min(N_TTY_BUF_SIZE - ldata->read_cnt,
N_TTY_BUF_SIZE - ldata->read_head);
i = min(count, i);
@@ -1439,7 +1440,7 @@ static void n_tty_receive_buf(struct tty_struct *tty, const unsigned char *cp,
memcpy(ldata->read_buf + ldata->read_head, cp, i);
ldata->read_head = (ldata->read_head + i) & (N_TTY_BUF_SIZE-1);
ldata->read_cnt += i;
- spin_unlock_irqrestore(&ldata->read_lock, cpuflags);
+ raw_spin_unlock_irqrestore(&ldata->read_lock, cpuflags);
} else {
for (i = count, p = cp, f = fp; i; i--, p++) {
if (f)
@@ -1635,7 +1636,7 @@ static int n_tty_open(struct tty_struct *tty)
mutex_init(&ldata->atomic_read_lock);
mutex_init(&ldata->output_lock);
mutex_init(&ldata->echo_lock);
- spin_lock_init(&ldata->read_lock);
+ raw_spin_lock_init(&ldata->read_lock);
/* These are ugly. Currently a malloc failure here can panic */
ldata->read_buf = kzalloc(N_TTY_BUF_SIZE, GFP_KERNEL);
@@ -1703,10 +1704,10 @@ static int copy_from_read_buf(struct tty_struct *tty,
bool is_eof;
retval = 0;
- spin_lock_irqsave(&ldata->read_lock, flags);
+ raw_spin_lock_irqsave(&ldata->read_lock, flags);
n = min(ldata->read_cnt, N_TTY_BUF_SIZE - ldata->read_tail);
n = min(*nr, n);
- spin_unlock_irqrestore(&ldata->read_lock, flags);
+ raw_spin_unlock_irqrestore(&ldata->read_lock, flags);
if (n) {
retval = copy_to_user(*b, &ldata->read_buf[ldata->read_tail], n);
n -= retval;
@@ -1714,13 +1715,13 @@ static int copy_from_read_buf(struct tty_struct *tty,
ldata->read_buf[ldata->read_tail] == EOF_CHAR(tty);
tty_audit_add_data(tty, &ldata->read_buf[ldata->read_tail], n,
ldata->icanon);
- spin_lock_irqsave(&ldata->read_lock, flags);
+ raw_spin_lock_irqsave(&ldata->read_lock, flags);
ldata->read_tail = (ldata->read_tail + n) & (N_TTY_BUF_SIZE-1);
ldata->read_cnt -= n;
/* Turn single EOF into zero-length read */
if (L_EXTPROC(tty) && ldata->icanon && is_eof && !ldata->read_cnt)
n = 0;
- spin_unlock_irqrestore(&ldata->read_lock, flags);
+ raw_spin_unlock_irqrestore(&ldata->read_lock, flags);
*b += n;
*nr -= n;
}
@@ -1900,7 +1901,7 @@ do_it_again:
if (ldata->icanon && !L_EXTPROC(tty)) {
/* N.B. avoid overrun if nr == 0 */
- spin_lock_irqsave(&ldata->read_lock, flags);
+ raw_spin_lock_irqsave(&ldata->read_lock, flags);
while (nr && ldata->read_cnt) {
int eol;
@@ -1918,25 +1919,25 @@ do_it_again:
if (--ldata->canon_data < 0)
ldata->canon_data = 0;
}
- spin_unlock_irqrestore(&ldata->read_lock, flags);
+ raw_spin_unlock_irqrestore(&ldata->read_lock, flags);
if (!eol || (c != __DISABLED_CHAR)) {
if (tty_put_user(tty, c, b++)) {
retval = -EFAULT;
b--;
- spin_lock_irqsave(&ldata->read_lock, flags);
+ raw_spin_lock_irqsave(&ldata->read_lock, flags);
break;
}
nr--;
}
if (eol) {
tty_audit_push(tty);
- spin_lock_irqsave(&ldata->read_lock, flags);
+ raw_spin_lock_irqsave(&ldata->read_lock, flags);
break;
}
- spin_lock_irqsave(&ldata->read_lock, flags);
+ raw_spin_lock_irqsave(&ldata->read_lock, flags);
}
- spin_unlock_irqrestore(&ldata->read_lock, flags);
+ raw_spin_unlock_irqrestore(&ldata->read_lock, flags);
if (retval)
break;
} else {
@@ -2188,7 +2189,7 @@ struct tty_ldisc_ops tty_ldisc_N_TTY = {
* n_tty_inherit_ops - inherit N_TTY methods
* @ops: struct tty_ldisc_ops where to save N_TTY methods
*
- * Used by a generic struct tty_ldisc_ops to easily inherit N_TTY
+ * Enables a 'subclass' line discipline to 'inherit' N_TTY
* methods.
*/
diff --git a/drivers/tty/nozomi.c b/drivers/tty/nozomi.c
index a0c69ab04399..2dff19796157 100644
--- a/drivers/tty/nozomi.c
+++ b/drivers/tty/nozomi.c
@@ -827,15 +827,10 @@ static int receive_data(enum port_type index, struct nozomi *dc)
struct tty_struct *tty = tty_port_tty_get(&port->port);
int i, ret;
- if (unlikely(!tty)) {
- DBG1("tty not open for port: %d?", index);
- return 1;
- }
-
read_mem32((u32 *) &size, addr, 4);
/* DBG1( "%d bytes port: %d", size, index); */
- if (test_bit(TTY_THROTTLED, &tty->flags)) {
+ if (tty && test_bit(TTY_THROTTLED, &tty->flags)) {
DBG1("No room in tty, don't read data, don't ack interrupt, "
"disable interrupt");
@@ -855,13 +850,14 @@ static int receive_data(enum port_type index, struct nozomi *dc)
read_mem32((u32 *) buf, addr + offset, RECEIVE_BUF_MAX);
if (size == 1) {
- tty_insert_flip_char(tty, buf[0], TTY_NORMAL);
+ tty_insert_flip_char(&port->port, buf[0], TTY_NORMAL);
size = 0;
} else if (size < RECEIVE_BUF_MAX) {
- size -= tty_insert_flip_string(tty, (char *) buf, size);
+ size -= tty_insert_flip_string(&port->port,
+ (char *)buf, size);
} else {
- i = tty_insert_flip_string(tty, \
- (char *) buf, RECEIVE_BUF_MAX);
+ i = tty_insert_flip_string(&port->port,
+ (char *)buf, RECEIVE_BUF_MAX);
size -= i;
offset += i;
}
@@ -1276,15 +1272,11 @@ static irqreturn_t interrupt_handler(int irq, void *dev_id)
exit_handler:
spin_unlock(&dc->spin_mutex);
- for (a = 0; a < NOZOMI_MAX_PORTS; a++) {
- struct tty_struct *tty;
- if (test_and_clear_bit(a, &dc->flip)) {
- tty = tty_port_tty_get(&dc->port[a].port);
- if (tty)
- tty_flip_buffer_push(tty);
- tty_kref_put(tty);
- }
- }
+
+ for (a = 0; a < NOZOMI_MAX_PORTS; a++)
+ if (test_and_clear_bit(a, &dc->flip))
+ tty_flip_buffer_push(&dc->port[a].port);
+
return IRQ_HANDLED;
none:
spin_unlock(&dc->spin_mutex);
@@ -1687,12 +1679,6 @@ static int ntty_write(struct tty_struct *tty, const unsigned char *buffer,
rval = kfifo_in(&port->fifo_ul, (unsigned char *)buffer, count);
- /* notify card */
- if (unlikely(dc == NULL)) {
- DBG1("No device context?");
- goto exit;
- }
-
spin_lock_irqsave(&dc->spin_mutex, flags);
/* CTS is only valid on the modem channel */
if (port == &(dc->port[PORT_MDM])) {
@@ -1708,7 +1694,6 @@ static int ntty_write(struct tty_struct *tty, const unsigned char *buffer,
}
spin_unlock_irqrestore(&dc->spin_mutex, flags);
-exit:
return rval;
}
diff --git a/drivers/tty/pty.c b/drivers/tty/pty.c
index 79ff3a5e925d..c24b4db243b9 100644
--- a/drivers/tty/pty.c
+++ b/drivers/tty/pty.c
@@ -38,16 +38,18 @@ static void pty_close(struct tty_struct *tty, struct file *filp)
if (tty->driver->subtype == PTY_TYPE_MASTER)
WARN_ON(tty->count > 1);
else {
+ if (test_bit(TTY_IO_ERROR, &tty->flags))
+ return;
if (tty->count > 2)
return;
}
+ set_bit(TTY_IO_ERROR, &tty->flags);
wake_up_interruptible(&tty->read_wait);
wake_up_interruptible(&tty->write_wait);
tty->packet = 0;
/* Review - krefs on tty_link ?? */
if (!tty->link)
return;
- tty->link->packet = 0;
set_bit(TTY_OTHER_CLOSED, &tty->link->flags);
wake_up_interruptible(&tty->link->read_wait);
wake_up_interruptible(&tty->link->write_wait);
@@ -55,9 +57,10 @@ static void pty_close(struct tty_struct *tty, struct file *filp)
set_bit(TTY_OTHER_CLOSED, &tty->flags);
#ifdef CONFIG_UNIX98_PTYS
if (tty->driver == ptm_driver) {
- mutex_lock(&devpts_mutex);
- devpts_pty_kill(tty->link->driver_data);
- mutex_unlock(&devpts_mutex);
+ mutex_lock(&devpts_mutex);
+ if (tty->link->driver_data)
+ devpts_pty_kill(tty->link->driver_data);
+ mutex_unlock(&devpts_mutex);
}
#endif
tty_unlock(tty);
@@ -120,10 +123,10 @@ static int pty_write(struct tty_struct *tty, const unsigned char *buf, int c)
if (c > 0) {
/* Stuff the data into the input queue of the other end */
- c = tty_insert_flip_string(to, buf, c);
+ c = tty_insert_flip_string(to->port, buf, c);
/* And shovel */
if (c) {
- tty_flip_buffer_push(to);
+ tty_flip_buffer_push(to->port);
tty_wakeup(tty);
}
}
@@ -246,14 +249,17 @@ static int pty_open(struct tty_struct *tty, struct file *filp)
if (!tty || !tty->link)
goto out;
+ set_bit(TTY_IO_ERROR, &tty->flags);
+
retval = -EIO;
if (test_bit(TTY_OTHER_CLOSED, &tty->flags))
goto out;
if (test_bit(TTY_PTY_LOCK, &tty->link->flags))
goto out;
- if (tty->link->count != 1)
+ if (tty->driver->subtype == PTY_TYPE_SLAVE && tty->link->count != 1)
goto out;
+ clear_bit(TTY_IO_ERROR, &tty->flags);
clear_bit(TTY_OTHER_CLOSED, &tty->link->flags);
set_bit(TTY_THROTTLED, &tty->flags);
retval = 0;
@@ -663,7 +669,7 @@ static const struct tty_operations pty_unix98_ops = {
* Allocate a unix98 pty master device from the ptmx driver.
*
* Locking: tty_mutex protects the init_dev work. tty->count should
- * protect the rest.
+ * protect the rest.
* allocated_ptys_lock handles the list of free pty numbers
*/
@@ -704,6 +710,7 @@ static int ptmx_open(struct inode *inode, struct file *filp)
mutex_unlock(&tty_mutex);
set_bit(TTY_PTY_LOCK, &tty->flags); /* LOCK THE SLAVE */
+ tty->driver_data = inode;
tty_add_file(tty, filp);
@@ -714,14 +721,13 @@ static int ptmx_open(struct inode *inode, struct file *filp)
retval = PTR_ERR(slave_inode);
goto err_release;
}
+ tty->link->driver_data = slave_inode;
retval = ptm_driver->ops->open(tty, filp);
if (retval)
goto err_release;
tty_unlock(tty);
- tty->driver_data = inode;
- tty->link->driver_data = slave_inode;
return 0;
err_release:
tty_unlock(tty);
@@ -797,7 +803,7 @@ static void __init unix98_pty_init(void)
cdev_init(&ptmx_cdev, &ptmx_fops);
if (cdev_add(&ptmx_cdev, MKDEV(TTYAUX_MAJOR, 2), 1) ||
register_chrdev_region(MKDEV(TTYAUX_MAJOR, 2), 1, "/dev/ptmx") < 0)
- panic("Couldn't register /dev/ptmx driver\n");
+ panic("Couldn't register /dev/ptmx driver");
device_create(tty_class, NULL, MKDEV(TTYAUX_MAJOR, 2), NULL, "ptmx");
}
diff --git a/drivers/tty/rocket.c b/drivers/tty/rocket.c
index e42009a00529..1d270034bfc3 100644
--- a/drivers/tty/rocket.c
+++ b/drivers/tty/rocket.c
@@ -55,7 +55,7 @@
#undef REV_PCI_ORDER
#undef ROCKET_DEBUG_IO
-#define POLL_PERIOD HZ/100 /* Polling period .01 seconds (10ms) */
+#define POLL_PERIOD (HZ/100) /* Polling period .01 seconds (10ms) */
/****** Kernel includes ******/
@@ -315,9 +315,8 @@ static inline int rocket_paranoia_check(struct r_port *info,
* that receive data is present on a serial port. Pulls data from FIFO, moves it into the
* tty layer.
*/
-static void rp_do_receive(struct r_port *info,
- struct tty_struct *tty,
- CHANNEL_t * cp, unsigned int ChanStatus)
+static void rp_do_receive(struct r_port *info, CHANNEL_t *cp,
+ unsigned int ChanStatus)
{
unsigned int CharNStat;
int ToRecv, wRecv, space;
@@ -379,7 +378,8 @@ static void rp_do_receive(struct r_port *info,
flag = TTY_OVERRUN;
else
flag = TTY_NORMAL;
- tty_insert_flip_char(tty, CharNStat & 0xff, flag);
+ tty_insert_flip_char(&info->port, CharNStat & 0xff,
+ flag);
ToRecv--;
}
@@ -399,7 +399,7 @@ static void rp_do_receive(struct r_port *info,
* characters at time by doing repeated word IO
* transfer.
*/
- space = tty_prepare_flip_string(tty, &cbuf, ToRecv);
+ space = tty_prepare_flip_string(&info->port, &cbuf, ToRecv);
if (space < ToRecv) {
#ifdef ROCKET_DEBUG_RECEIVE
printk(KERN_INFO "rp_do_receive:insufficient space ToRecv=%d space=%d\n", ToRecv, space);
@@ -415,7 +415,7 @@ static void rp_do_receive(struct r_port *info,
cbuf[ToRecv - 1] = sInB(sGetTxRxDataIO(cp));
}
/* Push the data up to the tty layer */
- tty_flip_buffer_push(tty);
+ tty_flip_buffer_push(&info->port);
}
/*
@@ -494,7 +494,6 @@ static void rp_do_transmit(struct r_port *info)
static void rp_handle_port(struct r_port *info)
{
CHANNEL_t *cp;
- struct tty_struct *tty;
unsigned int IntMask, ChanStatus;
if (!info)
@@ -505,12 +504,7 @@ static void rp_handle_port(struct r_port *info)
"info->flags & NOT_INIT\n");
return;
}
- tty = tty_port_tty_get(&info->port);
- if (!tty) {
- printk(KERN_WARNING "rp: WARNING: rp_handle_port called with "
- "tty==NULL\n");
- return;
- }
+
cp = &info->channel;
IntMask = sGetChanIntID(cp) & info->intmask;
@@ -519,7 +513,7 @@ static void rp_handle_port(struct r_port *info)
#endif
ChanStatus = sGetChanStatus(cp);
if (IntMask & RXF_TRIG) { /* Rx FIFO trigger level */
- rp_do_receive(info, tty, cp, ChanStatus);
+ rp_do_receive(info, cp, ChanStatus);
}
if (IntMask & DELTA_CD) { /* CD change */
#if (defined(ROCKET_DEBUG_OPEN) || defined(ROCKET_DEBUG_INTR) || defined(ROCKET_DEBUG_HANGUP))
@@ -527,10 +521,15 @@ static void rp_handle_port(struct r_port *info)
(ChanStatus & CD_ACT) ? "on" : "off");
#endif
if (!(ChanStatus & CD_ACT) && info->cd_status) {
+ struct tty_struct *tty;
#ifdef ROCKET_DEBUG_HANGUP
printk(KERN_INFO "CD drop, calling hangup.\n");
#endif
- tty_hangup(tty);
+ tty = tty_port_tty_get(&info->port);
+ if (tty) {
+ tty_hangup(tty);
+ tty_kref_put(tty);
+ }
}
info->cd_status = (ChanStatus & CD_ACT) ? 1 : 0;
wake_up_interruptible(&info->port.open_wait);
@@ -543,7 +542,6 @@ static void rp_handle_port(struct r_port *info)
printk(KERN_INFO "DSR change...\n");
}
#endif
- tty_kref_put(tty);
}
/*
@@ -1758,8 +1756,29 @@ static void rp_flush_buffer(struct tty_struct *tty)
#ifdef CONFIG_PCI
-static struct pci_device_id __used rocket_pci_ids[] = {
- { PCI_DEVICE(PCI_VENDOR_ID_RP, PCI_ANY_ID) },
+static DEFINE_PCI_DEVICE_TABLE(rocket_pci_ids) = {
+ { PCI_DEVICE(PCI_VENDOR_ID_RP, PCI_DEVICE_ID_RP4QUAD) },
+ { PCI_DEVICE(PCI_VENDOR_ID_RP, PCI_DEVICE_ID_RP8OCTA) },
+ { PCI_DEVICE(PCI_VENDOR_ID_RP, PCI_DEVICE_ID_URP8OCTA) },
+ { PCI_DEVICE(PCI_VENDOR_ID_RP, PCI_DEVICE_ID_RP8INTF) },
+ { PCI_DEVICE(PCI_VENDOR_ID_RP, PCI_DEVICE_ID_URP8INTF) },
+ { PCI_DEVICE(PCI_VENDOR_ID_RP, PCI_DEVICE_ID_RP8J) },
+ { PCI_DEVICE(PCI_VENDOR_ID_RP, PCI_DEVICE_ID_RP4J) },
+ { PCI_DEVICE(PCI_VENDOR_ID_RP, PCI_DEVICE_ID_RP8SNI) },
+ { PCI_DEVICE(PCI_VENDOR_ID_RP, PCI_DEVICE_ID_RP16SNI) },
+ { PCI_DEVICE(PCI_VENDOR_ID_RP, PCI_DEVICE_ID_RP16INTF) },
+ { PCI_DEVICE(PCI_VENDOR_ID_RP, PCI_DEVICE_ID_URP16INTF) },
+ { PCI_DEVICE(PCI_VENDOR_ID_RP, PCI_DEVICE_ID_CRP16INTF) },
+ { PCI_DEVICE(PCI_VENDOR_ID_RP, PCI_DEVICE_ID_RP32INTF) },
+ { PCI_DEVICE(PCI_VENDOR_ID_RP, PCI_DEVICE_ID_URP32INTF) },
+ { PCI_DEVICE(PCI_VENDOR_ID_RP, PCI_DEVICE_ID_RPP4) },
+ { PCI_DEVICE(PCI_VENDOR_ID_RP, PCI_DEVICE_ID_RPP8) },
+ { PCI_DEVICE(PCI_VENDOR_ID_RP, PCI_DEVICE_ID_RP2_232) },
+ { PCI_DEVICE(PCI_VENDOR_ID_RP, PCI_DEVICE_ID_RP2_422) },
+ { PCI_DEVICE(PCI_VENDOR_ID_RP, PCI_DEVICE_ID_RP6M) },
+ { PCI_DEVICE(PCI_VENDOR_ID_RP, PCI_DEVICE_ID_RP4M) },
+ { PCI_DEVICE(PCI_VENDOR_ID_RP, PCI_DEVICE_ID_UPCI_RM3_8PORT) },
+ { PCI_DEVICE(PCI_VENDOR_ID_RP, PCI_DEVICE_ID_UPCI_RM3_4PORT) },
{ }
};
MODULE_DEVICE_TABLE(pci, rocket_pci_ids);
@@ -1781,7 +1800,8 @@ static __init int register_PCI(int i, struct pci_dev *dev)
WordIO_t ConfigIO = 0;
ByteIO_t UPCIRingInd = 0;
- if (!dev || pci_enable_device(dev))
+ if (!dev || !pci_match_id(rocket_pci_ids, dev) ||
+ pci_enable_device(dev))
return 0;
rcktpt_io_addr[i] = pci_resource_start(dev, 0);
diff --git a/drivers/tty/serial/21285.c b/drivers/tty/serial/21285.c
index a44345a2dbb4..c7e8b60b6177 100644
--- a/drivers/tty/serial/21285.c
+++ b/drivers/tty/serial/21285.c
@@ -85,7 +85,6 @@ static void serial21285_enable_ms(struct uart_port *port)
static irqreturn_t serial21285_rx_chars(int irq, void *dev_id)
{
struct uart_port *port = dev_id;
- struct tty_struct *tty = port->state->port.tty;
unsigned int status, ch, flag, rxs, max_count = 256;
status = *CSR_UARTFLG;
@@ -115,7 +114,7 @@ static irqreturn_t serial21285_rx_chars(int irq, void *dev_id)
status = *CSR_UARTFLG;
}
- tty_flip_buffer_push(tty);
+ tty_flip_buffer_push(&port->state->port);
return IRQ_HANDLED;
}
diff --git a/drivers/tty/serial/68328serial.c b/drivers/tty/serial/68328serial.c
index f99a84526f82..49399470794d 100644
--- a/drivers/tty/serial/68328serial.c
+++ b/drivers/tty/serial/68328serial.c
@@ -262,8 +262,7 @@ static void rs_start(struct tty_struct *tty)
local_irq_restore(flags);
}
-static void receive_chars(struct m68k_serial *info, struct tty_struct *tty,
- unsigned short rx)
+static void receive_chars(struct m68k_serial *info, unsigned short rx)
{
m68328_uart *uart = &uart_addr[info->line];
unsigned char ch, flag;
@@ -293,9 +292,6 @@ static void receive_chars(struct m68k_serial *info, struct tty_struct *tty,
}
}
- if(!tty)
- goto clear_and_exit;
-
flag = TTY_NORMAL;
if (rx & URX_PARITY_ERROR)
@@ -305,15 +301,12 @@ static void receive_chars(struct m68k_serial *info, struct tty_struct *tty,
else if (rx & URX_FRAME_ERROR)
flag = TTY_FRAME;
- tty_insert_flip_char(tty, ch, flag);
+ tty_insert_flip_char(&info->tport, ch, flag);
#ifndef CONFIG_XCOPILOT_BUGS
} while((rx = uart->urx.w) & URX_DATA_READY);
#endif
- tty_schedule_flip(tty);
-
-clear_and_exit:
- return;
+ tty_schedule_flip(&info->tport);
}
static void transmit_chars(struct m68k_serial *info, struct tty_struct *tty)
@@ -367,11 +360,11 @@ irqreturn_t rs_interrupt(int irq, void *dev_id)
tx = uart->utx.w;
if (rx & URX_DATA_READY)
- receive_chars(info, tty, rx);
+ receive_chars(info, rx);
if (tx & UTX_TX_AVAIL)
transmit_chars(info, tty);
#else
- receive_chars(info, tty, rx);
+ receive_chars(info, rx);
#endif
tty_kref_put(tty);
@@ -1009,7 +1002,7 @@ static void rs_close(struct tty_struct *tty, struct file * filp)
m68328_uart *uart = &uart_addr[info->line];
unsigned long flags;
- if (!info || serial_paranoia_check(info, tty->name, "rs_close"))
+ if (serial_paranoia_check(info, tty->name, "rs_close"))
return;
local_irq_save(flags);
diff --git a/drivers/tty/serial/8250/8250.c b/drivers/tty/serial/8250/8250.c
index f9320437a649..0efc815a4968 100644
--- a/drivers/tty/serial/8250/8250.c
+++ b/drivers/tty/serial/8250/8250.c
@@ -239,13 +239,6 @@ static const struct serial8250_config uart_config[] = {
.fcr = UART_FCR_ENABLE_FIFO | UART_FCR_R_TRIG_10,
.flags = UART_CAP_FIFO | UART_CAP_UUE | UART_CAP_RTOIE,
},
- [PORT_RM9000] = {
- .name = "RM9000",
- .fifo_size = 16,
- .tx_loadsz = 16,
- .fcr = UART_FCR_ENABLE_FIFO | UART_FCR_R_TRIG_10,
- .flags = UART_CAP_FIFO,
- },
[PORT_OCTEON] = {
.name = "OCTEON",
.fifo_size = 64,
@@ -324,9 +317,9 @@ static void default_serial_dl_write(struct uart_8250_port *up, int value)
serial_out(up, UART_DLM, value >> 8 & 0xff);
}
-#ifdef CONFIG_MIPS_ALCHEMY
+#if defined(CONFIG_MIPS_ALCHEMY) || defined(CONFIG_SERIAL_8250_RT288X)
-/* Au1x00 UART hardware has a weird register layout */
+/* Au1x00/RT288x UART hardware has a weird register layout */
static const u8 au_io_in_map[] = {
[UART_RX] = 0,
[UART_IER] = 2,
@@ -370,56 +363,6 @@ static void au_serial_dl_write(struct uart_8250_port *up, int value)
#endif
-#ifdef CONFIG_SERIAL_8250_RM9K
-
-static const u8
- regmap_in[8] = {
- [UART_RX] = 0x00,
- [UART_IER] = 0x0c,
- [UART_IIR] = 0x14,
- [UART_LCR] = 0x1c,
- [UART_MCR] = 0x20,
- [UART_LSR] = 0x24,
- [UART_MSR] = 0x28,
- [UART_SCR] = 0x2c
- },
- regmap_out[8] = {
- [UART_TX] = 0x04,
- [UART_IER] = 0x0c,
- [UART_FCR] = 0x18,
- [UART_LCR] = 0x1c,
- [UART_MCR] = 0x20,
- [UART_LSR] = 0x24,
- [UART_MSR] = 0x28,
- [UART_SCR] = 0x2c
- };
-
-static unsigned int rm9k_serial_in(struct uart_port *p, int offset)
-{
- offset = regmap_in[offset] << p->regshift;
- return readl(p->membase + offset);
-}
-
-static void rm9k_serial_out(struct uart_port *p, int offset, int value)
-{
- offset = regmap_out[offset] << p->regshift;
- writel(value, p->membase + offset);
-}
-
-static int rm9k_serial_dl_read(struct uart_8250_port *up)
-{
- return ((__raw_readl(up->port.membase + 0x10) << 8) |
- (__raw_readl(up->port.membase + 0x08) & 0xff)) & 0xffff;
-}
-
-static void rm9k_serial_dl_write(struct uart_8250_port *up, int value)
-{
- __raw_writel(value, up->port.membase + 0x08);
- __raw_writel(value >> 8, up->port.membase + 0x10);
-}
-
-#endif
-
static unsigned int hub6_serial_in(struct uart_port *p, int offset)
{
offset = offset << p->regshift;
@@ -497,16 +440,7 @@ static void set_io_from_upio(struct uart_port *p)
p->serial_out = mem32_serial_out;
break;
-#ifdef CONFIG_SERIAL_8250_RM9K
- case UPIO_RM9000:
- p->serial_in = rm9k_serial_in;
- p->serial_out = rm9k_serial_out;
- up->dl_read = rm9k_serial_dl_read;
- up->dl_write = rm9k_serial_dl_write;
- break;
-#endif
-
-#ifdef CONFIG_MIPS_ALCHEMY
+#if defined(CONFIG_MIPS_ALCHEMY) || defined(CONFIG_SERIAL_8250_RT288X)
case UPIO_AU:
p->serial_in = au_serial_in;
p->serial_out = au_serial_out;
@@ -1341,7 +1275,9 @@ static void serial8250_start_tx(struct uart_port *port)
struct uart_8250_port *up =
container_of(port, struct uart_8250_port, port);
- if (!(up->ier & UART_IER_THRI)) {
+ if (up->dma && !serial8250_tx_dma(up)) {
+ return;
+ } else if (!(up->ier & UART_IER_THRI)) {
up->ier |= UART_IER_THRI;
serial_port_out(port, UART_IER, up->ier);
@@ -1349,9 +1285,7 @@ static void serial8250_start_tx(struct uart_port *port)
unsigned char lsr;
lsr = serial_in(up, UART_LSR);
up->lsr_saved_flags |= lsr & LSR_SAVE_FLAGS;
- if ((port->type == PORT_RM9000) ?
- (lsr & UART_LSR_THRE) :
- (lsr & UART_LSR_TEMT))
+ if (lsr & UART_LSR_TEMT)
serial8250_tx_chars(up);
}
}
@@ -1397,7 +1331,6 @@ unsigned char
serial8250_rx_chars(struct uart_8250_port *up, unsigned char lsr)
{
struct uart_port *port = &up->port;
- struct tty_struct *tty = port->state->port.tty;
unsigned char ch;
int max_count = 256;
char flag;
@@ -1462,7 +1395,7 @@ ignore_char:
lsr = serial_in(up, UART_LSR);
} while ((lsr & (UART_LSR_DR | UART_LSR_BI)) && (max_count-- > 0));
spin_unlock(&port->lock);
- tty_flip_buffer_push(tty);
+ tty_flip_buffer_push(&port->state->port);
spin_lock(&port->lock);
return lsr;
}
@@ -1547,6 +1480,7 @@ int serial8250_handle_irq(struct uart_port *port, unsigned int iir)
unsigned long flags;
struct uart_8250_port *up =
container_of(port, struct uart_8250_port, port);
+ int dma_err = 0;
if (iir & UART_IIR_NO_INT)
return 0;
@@ -1557,8 +1491,13 @@ int serial8250_handle_irq(struct uart_port *port, unsigned int iir)
DEBUG_INTR("status = %x...", status);
- if (status & (UART_LSR_DR | UART_LSR_BI))
- status = serial8250_rx_chars(up, status);
+ if (status & (UART_LSR_DR | UART_LSR_BI)) {
+ if (up->dma)
+ dma_err = serial8250_rx_dma(up, iir);
+
+ if (!up->dma || dma_err)
+ status = serial8250_rx_chars(up, status);
+ }
serial8250_modem_status(up);
if (status & UART_LSR_THRE)
serial8250_tx_chars(up);
@@ -1991,9 +1930,12 @@ static int serial8250_startup(struct uart_port *port)
if (port->type == PORT_8250_CIR)
return -ENODEV;
- port->fifosize = uart_config[up->port.type].fifo_size;
- up->tx_loadsz = uart_config[up->port.type].tx_loadsz;
- up->capabilities = uart_config[up->port.type].flags;
+ if (!port->fifosize)
+ port->fifosize = uart_config[port->type].fifo_size;
+ if (!up->tx_loadsz)
+ up->tx_loadsz = uart_config[port->type].tx_loadsz;
+ if (!up->capabilities)
+ up->capabilities = uart_config[port->type].flags;
up->mcr = 0;
if (port->iotype != up->cur_iotype)
@@ -2198,6 +2140,18 @@ dont_test_tx_en:
up->msr_saved_flags = 0;
/*
+ * Request DMA channels for both RX and TX.
+ */
+ if (up->dma) {
+ retval = serial8250_request_dma(up);
+ if (retval) {
+ pr_warn_ratelimited("ttyS%d - failed to request DMA\n",
+ serial_index(port));
+ up->dma = NULL;
+ }
+ }
+
+ /*
* Finally, enable interrupts. Note: Modem status interrupts
* are set via set_termios(), which will be occurring imminently
* anyway, so we don't enable them here.
@@ -2230,6 +2184,9 @@ static void serial8250_shutdown(struct uart_port *port)
up->ier = 0;
serial_port_out(port, UART_IER, 0);
+ if (up->dma)
+ serial8250_release_dma(up);
+
spin_lock_irqsave(&port->lock, flags);
if (port->flags & UPF_FOURPORT) {
/* reset interrupts on the AST Fourport board */
@@ -2826,9 +2783,12 @@ static void
serial8250_init_fixed_type_port(struct uart_8250_port *up, unsigned int type)
{
up->port.type = type;
- up->port.fifosize = uart_config[type].fifo_size;
- up->capabilities = uart_config[type].flags;
- up->tx_loadsz = uart_config[type].tx_loadsz;
+ if (!up->port.fifosize)
+ up->port.fifosize = uart_config[type].fifo_size;
+ if (!up->tx_loadsz)
+ up->tx_loadsz = uart_config[type].tx_loadsz;
+ if (!up->capabilities)
+ up->capabilities = uart_config[type].flags;
}
static void __init
@@ -3262,6 +3222,10 @@ int serial8250_register_8250_port(struct uart_8250_port *up)
uart->bugs = up->bugs;
uart->port.mapbase = up->port.mapbase;
uart->port.private_data = up->port.private_data;
+ uart->port.fifosize = up->port.fifosize;
+ uart->tx_loadsz = up->tx_loadsz;
+ uart->capabilities = up->capabilities;
+
if (up->port.dev)
uart->port.dev = up->port.dev;
@@ -3287,6 +3251,8 @@ int serial8250_register_8250_port(struct uart_8250_port *up)
uart->dl_read = up->dl_read;
if (up->dl_write)
uart->dl_write = up->dl_write;
+ if (up->dma)
+ uart->dma = up->dma;
if (serial8250_isa_config != NULL)
serial8250_isa_config(0, &uart->port,
diff --git a/drivers/tty/serial/8250/8250.h b/drivers/tty/serial/8250/8250.h
index 12caa1292b75..34eb676916fe 100644
--- a/drivers/tty/serial/8250/8250.h
+++ b/drivers/tty/serial/8250/8250.h
@@ -12,6 +12,35 @@
*/
#include <linux/serial_8250.h>
+#include <linux/dmaengine.h>
+
+struct uart_8250_dma {
+ dma_filter_fn fn;
+ void *rx_param;
+ void *tx_param;
+
+ int rx_chan_id;
+ int tx_chan_id;
+
+ struct dma_slave_config rxconf;
+ struct dma_slave_config txconf;
+
+ struct dma_chan *rxchan;
+ struct dma_chan *txchan;
+
+ dma_addr_t rx_addr;
+ dma_addr_t tx_addr;
+
+ dma_cookie_t rx_cookie;
+ dma_cookie_t tx_cookie;
+
+ void *rx_buf;
+
+ size_t rx_size;
+ size_t tx_size;
+
+ unsigned char tx_running:1;
+};
struct old_serial_port {
unsigned int uart;
@@ -143,3 +172,24 @@ static inline int is_omap1510_8250(struct uart_8250_port *pt)
return 0;
}
#endif
+
+#ifdef CONFIG_SERIAL_8250_DMA
+extern int serial8250_tx_dma(struct uart_8250_port *);
+extern int serial8250_rx_dma(struct uart_8250_port *, unsigned int iir);
+extern int serial8250_request_dma(struct uart_8250_port *);
+extern void serial8250_release_dma(struct uart_8250_port *);
+#else
+static inline int serial8250_tx_dma(struct uart_8250_port *p)
+{
+ return -1;
+}
+static inline int serial8250_rx_dma(struct uart_8250_port *p, unsigned int iir)
+{
+ return -1;
+}
+static inline int serial8250_request_dma(struct uart_8250_port *p)
+{
+ return -1;
+}
+static inline void serial8250_release_dma(struct uart_8250_port *p) { }
+#endif
diff --git a/drivers/tty/serial/8250/8250_dma.c b/drivers/tty/serial/8250/8250_dma.c
new file mode 100644
index 000000000000..b9f7fd28112e
--- /dev/null
+++ b/drivers/tty/serial/8250/8250_dma.c
@@ -0,0 +1,216 @@
+/*
+ * 8250_dma.c - DMA Engine API support for 8250.c
+ *
+ * Copyright (C) 2013 Intel Corporation
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+#include <linux/tty.h>
+#include <linux/tty_flip.h>
+#include <linux/serial_reg.h>
+#include <linux/dma-mapping.h>
+
+#include "8250.h"
+
+static void __dma_tx_complete(void *param)
+{
+ struct uart_8250_port *p = param;
+ struct uart_8250_dma *dma = p->dma;
+ struct circ_buf *xmit = &p->port.state->xmit;
+
+ dma->tx_running = 0;
+
+ dma_sync_single_for_cpu(dma->txchan->device->dev, dma->tx_addr,
+ UART_XMIT_SIZE, DMA_TO_DEVICE);
+
+ xmit->tail += dma->tx_size;
+ xmit->tail &= UART_XMIT_SIZE - 1;
+ p->port.icount.tx += dma->tx_size;
+
+ if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS)
+ uart_write_wakeup(&p->port);
+
+ if (!uart_circ_empty(xmit) && !uart_tx_stopped(&p->port)) {
+ serial8250_tx_dma(p);
+ uart_write_wakeup(&p->port);
+ }
+}
+
+static void __dma_rx_complete(void *param)
+{
+ struct uart_8250_port *p = param;
+ struct uart_8250_dma *dma = p->dma;
+ struct tty_port *tty_port = &p->port.state->port;
+ struct dma_tx_state state;
+ int count;
+
+ dma_sync_single_for_cpu(dma->rxchan->device->dev, dma->rx_addr,
+ dma->rx_size, DMA_FROM_DEVICE);
+
+ dmaengine_tx_status(dma->rxchan, dma->rx_cookie, &state);
+ dmaengine_terminate_all(dma->rxchan);
+
+ count = dma->rx_size - state.residue;
+
+ tty_insert_flip_string(tty_port, dma->rx_buf, count);
+ p->port.icount.rx += count;
+
+ tty_flip_buffer_push(tty_port);
+}
+
+int serial8250_tx_dma(struct uart_8250_port *p)
+{
+ struct uart_8250_dma *dma = p->dma;
+ struct circ_buf *xmit = &p->port.state->xmit;
+ struct dma_async_tx_descriptor *desc;
+
+ if (dma->tx_running)
+ return -EBUSY;
+
+ dma->tx_size = CIRC_CNT_TO_END(xmit->head, xmit->tail, UART_XMIT_SIZE);
+ if (!dma->tx_size)
+ return -EINVAL;
+
+ desc = dmaengine_prep_slave_single(dma->txchan,
+ dma->tx_addr + xmit->tail,
+ dma->tx_size, DMA_MEM_TO_DEV,
+ DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
+ if (!desc)
+ return -EBUSY;
+
+ dma->tx_running = 1;
+
+ desc->callback = __dma_tx_complete;
+ desc->callback_param = p;
+
+ dma->tx_cookie = dmaengine_submit(desc);
+
+ dma_sync_single_for_device(dma->txchan->device->dev, dma->tx_addr,
+ UART_XMIT_SIZE, DMA_TO_DEVICE);
+
+ dma_async_issue_pending(dma->txchan);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(serial8250_tx_dma);
+
+int serial8250_rx_dma(struct uart_8250_port *p, unsigned int iir)
+{
+ struct uart_8250_dma *dma = p->dma;
+ struct dma_async_tx_descriptor *desc;
+ struct dma_tx_state state;
+ int dma_status;
+
+ /*
+ * If RCVR FIFO trigger level was not reached, complete the transfer and
+ * let 8250.c copy the remaining data.
+ */
+ if ((iir & 0x3f) == UART_IIR_RX_TIMEOUT) {
+ dma_status = dmaengine_tx_status(dma->rxchan, dma->rx_cookie,
+ &state);
+ if (dma_status == DMA_IN_PROGRESS) {
+ dmaengine_pause(dma->rxchan);
+ __dma_rx_complete(p);
+ }
+ return -ETIMEDOUT;
+ }
+
+ desc = dmaengine_prep_slave_single(dma->rxchan, dma->rx_addr,
+ dma->rx_size, DMA_DEV_TO_MEM,
+ DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
+ if (!desc)
+ return -EBUSY;
+
+ desc->callback = __dma_rx_complete;
+ desc->callback_param = p;
+
+ dma->rx_cookie = dmaengine_submit(desc);
+
+ dma_sync_single_for_device(dma->rxchan->device->dev, dma->rx_addr,
+ dma->rx_size, DMA_FROM_DEVICE);
+
+ dma_async_issue_pending(dma->rxchan);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(serial8250_rx_dma);
+
+int serial8250_request_dma(struct uart_8250_port *p)
+{
+ struct uart_8250_dma *dma = p->dma;
+ dma_cap_mask_t mask;
+
+ dma->rxconf.src_addr = p->port.mapbase + UART_RX;
+ dma->txconf.dst_addr = p->port.mapbase + UART_TX;
+
+ dma_cap_zero(mask);
+ dma_cap_set(DMA_SLAVE, mask);
+
+ /* Get a channel for RX */
+ dma->rxchan = dma_request_channel(mask, dma->fn, dma->rx_param);
+ if (!dma->rxchan)
+ return -ENODEV;
+
+ dmaengine_slave_config(dma->rxchan, &dma->rxconf);
+
+ /* Get a channel for TX */
+ dma->txchan = dma_request_channel(mask, dma->fn, dma->tx_param);
+ if (!dma->txchan) {
+ dma_release_channel(dma->rxchan);
+ return -ENODEV;
+ }
+
+ dmaengine_slave_config(dma->txchan, &dma->txconf);
+
+ /* RX buffer */
+ if (!dma->rx_size)
+ dma->rx_size = PAGE_SIZE;
+
+ dma->rx_buf = dma_alloc_coherent(dma->rxchan->device->dev, dma->rx_size,
+ &dma->rx_addr, GFP_KERNEL);
+ if (!dma->rx_buf) {
+ dma_release_channel(dma->rxchan);
+ dma_release_channel(dma->txchan);
+ return -ENOMEM;
+ }
+
+ /* TX buffer */
+ dma->tx_addr = dma_map_single(dma->txchan->device->dev,
+ p->port.state->xmit.buf,
+ UART_XMIT_SIZE,
+ DMA_TO_DEVICE);
+
+ dev_dbg_ratelimited(p->port.dev, "got both dma channels\n");
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(serial8250_request_dma);
+
+void serial8250_release_dma(struct uart_8250_port *p)
+{
+ struct uart_8250_dma *dma = p->dma;
+
+ if (!dma)
+ return;
+
+ /* Release RX resources */
+ dmaengine_terminate_all(dma->rxchan);
+ dma_free_coherent(dma->rxchan->device->dev, dma->rx_size, dma->rx_buf,
+ dma->rx_addr);
+ dma_release_channel(dma->rxchan);
+ dma->rxchan = NULL;
+
+ /* Release TX resources */
+ dmaengine_terminate_all(dma->txchan);
+ dma_unmap_single(dma->txchan->device->dev, dma->tx_addr,
+ UART_XMIT_SIZE, DMA_TO_DEVICE);
+ dma_release_channel(dma->txchan);
+ dma->txchan = NULL;
+ dma->tx_running = 0;
+
+ dev_dbg_ratelimited(p->port.dev, "dma channels released\n");
+}
+EXPORT_SYMBOL_GPL(serial8250_release_dma);
diff --git a/drivers/tty/serial/8250/8250_dw.c b/drivers/tty/serial/8250/8250_dw.c
index 096d2ef48b32..db0e66f6dd0e 100644
--- a/drivers/tty/serial/8250/8250_dw.c
+++ b/drivers/tty/serial/8250/8250_dw.c
@@ -2,6 +2,7 @@
* Synopsys DesignWare 8250 driver.
*
* Copyright 2011 Picochip, Jamie Iles.
+ * Copyright 2013 Intel Corporation
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
@@ -24,6 +25,34 @@
#include <linux/of_platform.h>
#include <linux/platform_device.h>
#include <linux/slab.h>
+#include <linux/acpi.h>
+
+#include "8250.h"
+
+/* Offsets for the DesignWare specific registers */
+#define DW_UART_USR 0x1f /* UART Status Register */
+#define DW_UART_CPR 0xf4 /* Component Parameter Register */
+#define DW_UART_UCV 0xf8 /* UART Component Version */
+
+/* Intel Low Power Subsystem specific */
+#define LPSS_PRV_CLOCK_PARAMS 0x800
+
+/* Component Parameter Register bits */
+#define DW_UART_CPR_ABP_DATA_WIDTH (3 << 0)
+#define DW_UART_CPR_AFCE_MODE (1 << 4)
+#define DW_UART_CPR_THRE_MODE (1 << 5)
+#define DW_UART_CPR_SIR_MODE (1 << 6)
+#define DW_UART_CPR_SIR_LP_MODE (1 << 7)
+#define DW_UART_CPR_ADDITIONAL_FEATURES (1 << 8)
+#define DW_UART_CPR_FIFO_ACCESS (1 << 9)
+#define DW_UART_CPR_FIFO_STAT (1 << 10)
+#define DW_UART_CPR_SHADOW (1 << 11)
+#define DW_UART_CPR_ENCODED_PARMS (1 << 12)
+#define DW_UART_CPR_DMA_EXTRA (1 << 13)
+#define DW_UART_CPR_FIFO_MODE (0xff << 16)
+/* Helper for fifo size calculation */
+#define DW_UART_CPR_FIFO_SIZE(a) (((a >> 16) & 0xff) * 16)
+
struct dw8250_data {
int last_lcr;
@@ -66,9 +95,6 @@ static unsigned int dw8250_serial_in32(struct uart_port *p, int offset)
return readl(p->membase + offset);
}
-/* Offset for the DesignWare's UART Status Register. */
-#define UART_USR 0x1f
-
static int dw8250_handle_irq(struct uart_port *p)
{
struct dw8250_data *d = p->private_data;
@@ -78,7 +104,7 @@ static int dw8250_handle_irq(struct uart_port *p)
return 1;
} else if ((iir & UART_IIR_BUSY) == UART_IIR_BUSY) {
/* Clear the USR and write the LCR again. */
- (void)p->serial_in(p, UART_USR);
+ (void)p->serial_in(p, DW_UART_USR);
p->serial_out(p, UART_LCR, d->last_lcr);
return 1;
@@ -87,61 +113,210 @@ static int dw8250_handle_irq(struct uart_port *p)
return 0;
}
+static int dw8250_probe_of(struct uart_port *p)
+{
+ struct device_node *np = p->dev->of_node;
+ u32 val;
+
+ if (!of_property_read_u32(np, "reg-io-width", &val)) {
+ switch (val) {
+ case 1:
+ break;
+ case 4:
+ p->iotype = UPIO_MEM32;
+ p->serial_in = dw8250_serial_in32;
+ p->serial_out = dw8250_serial_out32;
+ break;
+ default:
+ dev_err(p->dev, "unsupported reg-io-width (%u)\n", val);
+ return -EINVAL;
+ }
+ }
+
+ if (!of_property_read_u32(np, "reg-shift", &val))
+ p->regshift = val;
+
+ if (of_property_read_u32(np, "clock-frequency", &val)) {
+ dev_err(p->dev, "no clock-frequency property set\n");
+ return -EINVAL;
+ }
+ p->uartclk = val;
+
+ return 0;
+}
+
+#ifdef CONFIG_ACPI
+static bool dw8250_acpi_dma_filter(struct dma_chan *chan, void *parm)
+{
+ return chan->chan_id == *(int *)parm;
+}
+
+static acpi_status
+dw8250_acpi_walk_resource(struct acpi_resource *res, void *data)
+{
+ struct uart_port *p = data;
+ struct uart_8250_port *port;
+ struct uart_8250_dma *dma;
+ struct acpi_resource_fixed_dma *fixed_dma;
+ struct dma_slave_config *slave;
+
+ port = container_of(p, struct uart_8250_port, port);
+
+ switch (res->type) {
+ case ACPI_RESOURCE_TYPE_FIXED_DMA:
+ fixed_dma = &res->data.fixed_dma;
+
+ /* TX comes first */
+ if (!port->dma) {
+ dma = devm_kzalloc(p->dev, sizeof(*dma), GFP_KERNEL);
+ if (!dma)
+ return AE_NO_MEMORY;
+
+ port->dma = dma;
+ slave = &dma->txconf;
+
+ slave->direction = DMA_MEM_TO_DEV;
+ slave->dst_addr_width = DMA_SLAVE_BUSWIDTH_1_BYTE;
+ slave->slave_id = fixed_dma->request_lines;
+ slave->dst_maxburst = port->tx_loadsz / 4;
+
+ dma->tx_chan_id = fixed_dma->channels;
+ dma->tx_param = &dma->tx_chan_id;
+ dma->fn = dw8250_acpi_dma_filter;
+ } else {
+ dma = port->dma;
+ slave = &dma->rxconf;
+
+ slave->direction = DMA_DEV_TO_MEM;
+ slave->src_addr_width = DMA_SLAVE_BUSWIDTH_1_BYTE;
+ slave->slave_id = fixed_dma->request_lines;
+ slave->src_maxburst = p->fifosize / 4;
+
+ dma->rx_chan_id = fixed_dma->channels;
+ dma->rx_param = &dma->rx_chan_id;
+ }
+
+ break;
+ }
+
+ return AE_OK;
+}
+
+static int dw8250_probe_acpi(struct uart_port *p)
+{
+ const struct acpi_device_id *id;
+ acpi_status status;
+ u32 reg;
+
+ id = acpi_match_device(p->dev->driver->acpi_match_table, p->dev);
+ if (!id)
+ return -ENODEV;
+
+ p->iotype = UPIO_MEM32;
+ p->serial_in = dw8250_serial_in32;
+ p->serial_out = dw8250_serial_out32;
+ p->regshift = 2;
+ p->uartclk = (unsigned int)id->driver_data;
+
+ status = acpi_walk_resources(ACPI_HANDLE(p->dev), METHOD_NAME__CRS,
+ dw8250_acpi_walk_resource, p);
+ if (ACPI_FAILURE(status)) {
+ dev_err_ratelimited(p->dev, "%s failed \"%s\"\n", __func__,
+ acpi_format_exception(status));
+ return -ENODEV;
+ }
+
+ /* Fix Haswell issue where the clocks do not get enabled */
+ if (!strcmp(id->id, "INT33C4") || !strcmp(id->id, "INT33C5")) {
+ reg = readl(p->membase + LPSS_PRV_CLOCK_PARAMS);
+ writel(reg | 1, p->membase + LPSS_PRV_CLOCK_PARAMS);
+ }
+
+ return 0;
+}
+#else
+static inline int dw8250_probe_acpi(struct uart_port *p)
+{
+ return -ENODEV;
+}
+#endif /* CONFIG_ACPI */
+
+static void dw8250_setup_port(struct uart_8250_port *up)
+{
+ struct uart_port *p = &up->port;
+ u32 reg = readl(p->membase + DW_UART_UCV);
+
+ /*
+ * If the Component Version Register returns zero, we know that
+ * ADDITIONAL_FEATURES are not enabled. No need to go any further.
+ */
+ if (!reg)
+ return;
+
+ dev_dbg_ratelimited(p->dev, "Designware UART version %c.%c%c\n",
+ (reg >> 24) & 0xff, (reg >> 16) & 0xff, (reg >> 8) & 0xff);
+
+ reg = readl(p->membase + DW_UART_CPR);
+ if (!reg)
+ return;
+
+ /* Select the type based on fifo */
+ if (reg & DW_UART_CPR_FIFO_MODE) {
+ p->type = PORT_16550A;
+ p->flags |= UPF_FIXED_TYPE;
+ p->fifosize = DW_UART_CPR_FIFO_SIZE(reg);
+ up->tx_loadsz = p->fifosize;
+ }
+}
+
static int dw8250_probe(struct platform_device *pdev)
{
struct uart_8250_port uart = {};
struct resource *regs = platform_get_resource(pdev, IORESOURCE_MEM, 0);
struct resource *irq = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
- struct device_node *np = pdev->dev.of_node;
- u32 val;
struct dw8250_data *data;
+ int err;
if (!regs || !irq) {
dev_err(&pdev->dev, "no registers/irq defined\n");
return -EINVAL;
}
- data = devm_kzalloc(&pdev->dev, sizeof(*data), GFP_KERNEL);
- if (!data)
- return -ENOMEM;
- uart.port.private_data = data;
-
spin_lock_init(&uart.port.lock);
uart.port.mapbase = regs->start;
uart.port.irq = irq->start;
uart.port.handle_irq = dw8250_handle_irq;
uart.port.type = PORT_8250;
- uart.port.flags = UPF_SHARE_IRQ | UPF_BOOT_AUTOCONF | UPF_IOREMAP |
- UPF_FIXED_PORT | UPF_FIXED_TYPE;
+ uart.port.flags = UPF_SHARE_IRQ | UPF_BOOT_AUTOCONF | UPF_FIXED_PORT;
uart.port.dev = &pdev->dev;
+ uart.port.membase = ioremap(regs->start, resource_size(regs));
+ if (!uart.port.membase)
+ return -ENOMEM;
+
uart.port.iotype = UPIO_MEM;
uart.port.serial_in = dw8250_serial_in;
uart.port.serial_out = dw8250_serial_out;
- if (!of_property_read_u32(np, "reg-io-width", &val)) {
- switch (val) {
- case 1:
- break;
- case 4:
- uart.port.iotype = UPIO_MEM32;
- uart.port.serial_in = dw8250_serial_in32;
- uart.port.serial_out = dw8250_serial_out32;
- break;
- default:
- dev_err(&pdev->dev, "unsupported reg-io-width (%u)\n",
- val);
- return -EINVAL;
- }
+
+ dw8250_setup_port(&uart);
+
+ if (pdev->dev.of_node) {
+ err = dw8250_probe_of(&uart.port);
+ if (err)
+ return err;
+ } else if (ACPI_HANDLE(&pdev->dev)) {
+ err = dw8250_probe_acpi(&uart.port);
+ if (err)
+ return err;
+ } else {
+ return -ENODEV;
}
- if (!of_property_read_u32(np, "reg-shift", &val))
- uart.port.regshift = val;
+ data = devm_kzalloc(&pdev->dev, sizeof(*data), GFP_KERNEL);
+ if (!data)
+ return -ENOMEM;
- if (of_property_read_u32(np, "clock-frequency", &val)) {
- dev_err(&pdev->dev, "no clock-frequency property set\n");
- return -EINVAL;
- }
- uart.port.uartclk = val;
+ uart.port.private_data = data;
data->line = serial8250_register_8250_port(&uart);
if (data->line < 0)
@@ -184,17 +359,25 @@ static int dw8250_resume(struct platform_device *pdev)
#define dw8250_resume NULL
#endif /* CONFIG_PM */
-static const struct of_device_id dw8250_match[] = {
+static const struct of_device_id dw8250_of_match[] = {
{ .compatible = "snps,dw-apb-uart" },
{ /* Sentinel */ }
};
-MODULE_DEVICE_TABLE(of, dw8250_match);
+MODULE_DEVICE_TABLE(of, dw8250_of_match);
+
+static const struct acpi_device_id dw8250_acpi_match[] = {
+ { "INT33C4", 100000000 },
+ { "INT33C5", 100000000 },
+ { },
+};
+MODULE_DEVICE_TABLE(acpi, dw8250_acpi_match);
static struct platform_driver dw8250_platform_driver = {
.driver = {
.name = "dw-apb-uart",
.owner = THIS_MODULE,
- .of_match_table = dw8250_match,
+ .of_match_table = dw8250_of_match,
+ .acpi_match_table = ACPI_PTR(dw8250_acpi_match),
},
.probe = dw8250_probe,
.remove = dw8250_remove,
diff --git a/drivers/tty/serial/8250/8250_early.c b/drivers/tty/serial/8250/8250_early.c
index f53a7db4350d..721904f8efa9 100644
--- a/drivers/tty/serial/8250/8250_early.c
+++ b/drivers/tty/serial/8250/8250_early.c
@@ -194,7 +194,7 @@ static int __init parse_options(struct early_serial8250_device *device,
options++;
device->baud = simple_strtoul(options, NULL, 0);
length = min(strcspn(options, " "), sizeof(device->options));
- strncpy(device->options, options, length);
+ strlcpy(device->options, options, length);
} else {
device->baud = probe_baud(port);
snprintf(device->options, sizeof(device->options), "%u",
diff --git a/drivers/tty/serial/8250/8250_pci.c b/drivers/tty/serial/8250/8250_pci.c
index a27a98e1b066..791c5a77ec61 100644
--- a/drivers/tty/serial/8250/8250_pci.c
+++ b/drivers/tty/serial/8250/8250_pci.c
@@ -1040,6 +1040,253 @@ static int pci_asix_setup(struct serial_private *priv,
return pci_default_setup(priv, board, port, idx);
}
+/* Quatech devices have their own extra interface features */
+
+struct quatech_feature {
+ u16 devid;
+ bool amcc;
+};
+
+#define QPCR_TEST_FOR1 0x3F
+#define QPCR_TEST_GET1 0x00
+#define QPCR_TEST_FOR2 0x40
+#define QPCR_TEST_GET2 0x40
+#define QPCR_TEST_FOR3 0x80
+#define QPCR_TEST_GET3 0x40
+#define QPCR_TEST_FOR4 0xC0
+#define QPCR_TEST_GET4 0x80
+
+#define QOPR_CLOCK_X1 0x0000
+#define QOPR_CLOCK_X2 0x0001
+#define QOPR_CLOCK_X4 0x0002
+#define QOPR_CLOCK_X8 0x0003
+#define QOPR_CLOCK_RATE_MASK 0x0003
+
+
+static struct quatech_feature quatech_cards[] = {
+ { PCI_DEVICE_ID_QUATECH_QSC100, 1 },
+ { PCI_DEVICE_ID_QUATECH_DSC100, 1 },
+ { PCI_DEVICE_ID_QUATECH_DSC100E, 0 },
+ { PCI_DEVICE_ID_QUATECH_DSC200, 1 },
+ { PCI_DEVICE_ID_QUATECH_DSC200E, 0 },
+ { PCI_DEVICE_ID_QUATECH_ESC100D, 1 },
+ { PCI_DEVICE_ID_QUATECH_ESC100M, 1 },
+ { PCI_DEVICE_ID_QUATECH_QSCP100, 1 },
+ { PCI_DEVICE_ID_QUATECH_DSCP100, 1 },
+ { PCI_DEVICE_ID_QUATECH_QSCP200, 1 },
+ { PCI_DEVICE_ID_QUATECH_DSCP200, 1 },
+ { PCI_DEVICE_ID_QUATECH_ESCLP100, 0 },
+ { PCI_DEVICE_ID_QUATECH_QSCLP100, 0 },
+ { PCI_DEVICE_ID_QUATECH_DSCLP100, 0 },
+ { PCI_DEVICE_ID_QUATECH_SSCLP100, 0 },
+ { PCI_DEVICE_ID_QUATECH_QSCLP200, 0 },
+ { PCI_DEVICE_ID_QUATECH_DSCLP200, 0 },
+ { PCI_DEVICE_ID_QUATECH_SSCLP200, 0 },
+ { PCI_DEVICE_ID_QUATECH_SPPXP_100, 0 },
+ { 0, }
+};
+
+static int pci_quatech_amcc(u16 devid)
+{
+ struct quatech_feature *qf = &quatech_cards[0];
+ while (qf->devid) {
+ if (qf->devid == devid)
+ return qf->amcc;
+ qf++;
+ }
+ pr_err("quatech: unknown port type '0x%04X'.\n", devid);
+ return 0;
+};
+
+static int pci_quatech_rqopr(struct uart_8250_port *port)
+{
+ unsigned long base = port->port.iobase;
+ u8 LCR, val;
+
+ LCR = inb(base + UART_LCR);
+ outb(0xBF, base + UART_LCR);
+ val = inb(base + UART_SCR);
+ outb(LCR, base + UART_LCR);
+ return val;
+}
+
+static void pci_quatech_wqopr(struct uart_8250_port *port, u8 qopr)
+{
+ unsigned long base = port->port.iobase;
+ u8 LCR, val;
+
+ LCR = inb(base + UART_LCR);
+ outb(0xBF, base + UART_LCR);
+ val = inb(base + UART_SCR);
+ outb(qopr, base + UART_SCR);
+ outb(LCR, base + UART_LCR);
+}
+
+static int pci_quatech_rqmcr(struct uart_8250_port *port)
+{
+ unsigned long base = port->port.iobase;
+ u8 LCR, val, qmcr;
+
+ LCR = inb(base + UART_LCR);
+ outb(0xBF, base + UART_LCR);
+ val = inb(base + UART_SCR);
+ outb(val | 0x10, base + UART_SCR);
+ qmcr = inb(base + UART_MCR);
+ outb(val, base + UART_SCR);
+ outb(LCR, base + UART_LCR);
+
+ return qmcr;
+}
+
+static void pci_quatech_wqmcr(struct uart_8250_port *port, u8 qmcr)
+{
+ unsigned long base = port->port.iobase;
+ u8 LCR, val;
+
+ LCR = inb(base + UART_LCR);
+ outb(0xBF, base + UART_LCR);
+ val = inb(base + UART_SCR);
+ outb(val | 0x10, base + UART_SCR);
+ outb(qmcr, base + UART_MCR);
+ outb(val, base + UART_SCR);
+ outb(LCR, base + UART_LCR);
+}
+
+static int pci_quatech_has_qmcr(struct uart_8250_port *port)
+{
+ unsigned long base = port->port.iobase;
+ u8 LCR, val;
+
+ LCR = inb(base + UART_LCR);
+ outb(0xBF, base + UART_LCR);
+ val = inb(base + UART_SCR);
+ if (val & 0x20) {
+ outb(0x80, UART_LCR);
+ if (!(inb(UART_SCR) & 0x20)) {
+ outb(LCR, base + UART_LCR);
+ return 1;
+ }
+ }
+ return 0;
+}
+
+static int pci_quatech_test(struct uart_8250_port *port)
+{
+ u8 reg;
+ u8 qopr = pci_quatech_rqopr(port);
+ pci_quatech_wqopr(port, qopr & QPCR_TEST_FOR1);
+ reg = pci_quatech_rqopr(port) & 0xC0;
+ if (reg != QPCR_TEST_GET1)
+ return -EINVAL;
+ pci_quatech_wqopr(port, (qopr & QPCR_TEST_FOR1)|QPCR_TEST_FOR2);
+ reg = pci_quatech_rqopr(port) & 0xC0;
+ if (reg != QPCR_TEST_GET2)
+ return -EINVAL;
+ pci_quatech_wqopr(port, (qopr & QPCR_TEST_FOR1)|QPCR_TEST_FOR3);
+ reg = pci_quatech_rqopr(port) & 0xC0;
+ if (reg != QPCR_TEST_GET3)
+ return -EINVAL;
+ pci_quatech_wqopr(port, (qopr & QPCR_TEST_FOR1)|QPCR_TEST_FOR4);
+ reg = pci_quatech_rqopr(port) & 0xC0;
+ if (reg != QPCR_TEST_GET4)
+ return -EINVAL;
+
+ pci_quatech_wqopr(port, qopr);
+ return 0;
+}
+
+static int pci_quatech_clock(struct uart_8250_port *port)
+{
+ u8 qopr, reg, set;
+ unsigned long clock;
+
+ if (pci_quatech_test(port) < 0)
+ return 1843200;
+
+ qopr = pci_quatech_rqopr(port);
+
+ pci_quatech_wqopr(port, qopr & ~QOPR_CLOCK_X8);
+ reg = pci_quatech_rqopr(port);
+ if (reg & QOPR_CLOCK_X8) {
+ clock = 1843200;
+ goto out;
+ }
+ pci_quatech_wqopr(port, qopr | QOPR_CLOCK_X8);
+ reg = pci_quatech_rqopr(port);
+ if (!(reg & QOPR_CLOCK_X8)) {
+ clock = 1843200;
+ goto out;
+ }
+ reg &= QOPR_CLOCK_X8;
+ if (reg == QOPR_CLOCK_X2) {
+ clock = 3685400;
+ set = QOPR_CLOCK_X2;
+ } else if (reg == QOPR_CLOCK_X4) {
+ clock = 7372800;
+ set = QOPR_CLOCK_X4;
+ } else if (reg == QOPR_CLOCK_X8) {
+ clock = 14745600;
+ set = QOPR_CLOCK_X8;
+ } else {
+ clock = 1843200;
+ set = QOPR_CLOCK_X1;
+ }
+ qopr &= ~QOPR_CLOCK_RATE_MASK;
+ qopr |= set;
+
+out:
+ pci_quatech_wqopr(port, qopr);
+ return clock;
+}
+
+static int pci_quatech_rs422(struct uart_8250_port *port)
+{
+ u8 qmcr;
+ int rs422 = 0;
+
+ if (!pci_quatech_has_qmcr(port))
+ return 0;
+ qmcr = pci_quatech_rqmcr(port);
+ pci_quatech_wqmcr(port, 0xFF);
+ if (pci_quatech_rqmcr(port))
+ rs422 = 1;
+ pci_quatech_wqmcr(port, qmcr);
+ return rs422;
+}
+
+static int pci_quatech_init(struct pci_dev *dev)
+{
+ if (pci_quatech_amcc(dev->device)) {
+ unsigned long base = pci_resource_start(dev, 0);
+ if (base) {
+ u32 tmp;
+ outl(inl(base + 0x38), base + 0x38);
+ tmp = inl(base + 0x3c);
+ outl(tmp | 0x01000000, base + 0x3c);
+ outl(tmp, base + 0x3c);
+ }
+ }
+ return 0;
+}
+
+static int pci_quatech_setup(struct serial_private *priv,
+ const struct pciserial_board *board,
+ struct uart_8250_port *port, int idx)
+{
+ /* Needed by pci_quatech calls below */
+ port->port.iobase = pci_resource_start(priv->dev, FL_GET_BASE(board->flags));
+ /* Set up the clocking */
+ port->port.uartclk = pci_quatech_clock(port);
+ /* For now just warn about RS422 */
+ if (pci_quatech_rs422(port))
+ pr_warn("quatech: software control of RS422 features not currently supported.\n");
+ return pci_default_setup(priv, board, port, idx);
+}
+
+static void pci_quatech_exit(struct pci_dev *dev)
+{
+}
+
static int pci_default_setup(struct serial_private *priv,
const struct pciserial_board *board,
struct uart_8250_port *port, int idx)
@@ -1318,6 +1565,9 @@ pci_wch_ch353_setup(struct serial_private *priv,
#define PCI_DEVICE_ID_COMMTECH_4222PCIE 0x0022
#define PCI_DEVICE_ID_BROADCOM_TRUMANAGE 0x160a
+#define PCI_VENDOR_ID_SUNIX 0x1fd4
+#define PCI_DEVICE_ID_SUNIX_1999 0x1999
+
/* Unknown vendors/cards - this should not be in linux/pci_ids.h */
#define PCI_SUBDEVICE_ID_UNKNOWN_0x1584 0x1584
@@ -1541,6 +1791,16 @@ static struct pci_serial_quirk pci_serial_quirks[] __refdata = {
.setup = pci_ni8430_setup,
.exit = pci_ni8430_exit,
},
+ /* Quatech */
+ {
+ .vendor = PCI_VENDOR_ID_QUATECH,
+ .device = PCI_ANY_ID,
+ .subvendor = PCI_ANY_ID,
+ .subdevice = PCI_ANY_ID,
+ .init = pci_quatech_init,
+ .setup = pci_quatech_setup,
+ .exit = pci_quatech_exit,
+ },
/*
* Panacom
*/
@@ -1704,6 +1964,23 @@ static struct pci_serial_quirk pci_serial_quirks[] __refdata = {
.setup = pci_timedia_setup,
},
/*
+ * SUNIX (Timedia) cards
+ * Do not "probe" for these cards as there is at least one combination
+ * card that should be handled by parport_pc that doesn't match the
+ * rule in pci_timedia_probe.
+ * It is part number is MIO5079A but its subdevice ID is 0x0102.
+ * There are some boards with part number SER5037AL that report
+ * subdevice ID 0x0002.
+ */
+ {
+ .vendor = PCI_VENDOR_ID_SUNIX,
+ .device = PCI_DEVICE_ID_SUNIX_1999,
+ .subvendor = PCI_VENDOR_ID_SUNIX,
+ .subdevice = PCI_ANY_ID,
+ .init = pci_timedia_init,
+ .setup = pci_timedia_setup,
+ },
+ /*
* Exar cards
*/
{
@@ -3506,18 +3783,70 @@ static struct pci_device_id serial_pci_tbl[] = {
{ PCI_VENDOR_ID_PLX, PCI_DEVICE_ID_PLX_ROMULUS,
0x10b5, 0x106a, 0, 0,
pbn_plx_romulus },
+ /*
+ * Quatech cards. These actually have configurable clocks but for
+ * now we just use the default.
+ *
+ * 100 series are RS232, 200 series RS422,
+ */
{ PCI_VENDOR_ID_QUATECH, PCI_DEVICE_ID_QUATECH_QSC100,
PCI_ANY_ID, PCI_ANY_ID, 0, 0,
pbn_b1_4_115200 },
{ PCI_VENDOR_ID_QUATECH, PCI_DEVICE_ID_QUATECH_DSC100,
PCI_ANY_ID, PCI_ANY_ID, 0, 0,
pbn_b1_2_115200 },
+ { PCI_VENDOR_ID_QUATECH, PCI_DEVICE_ID_QUATECH_DSC100E,
+ PCI_ANY_ID, PCI_ANY_ID, 0, 0,
+ pbn_b2_2_115200 },
+ { PCI_VENDOR_ID_QUATECH, PCI_DEVICE_ID_QUATECH_DSC200,
+ PCI_ANY_ID, PCI_ANY_ID, 0, 0,
+ pbn_b1_2_115200 },
+ { PCI_VENDOR_ID_QUATECH, PCI_DEVICE_ID_QUATECH_DSC200E,
+ PCI_ANY_ID, PCI_ANY_ID, 0, 0,
+ pbn_b2_2_115200 },
+ { PCI_VENDOR_ID_QUATECH, PCI_DEVICE_ID_QUATECH_QSC200,
+ PCI_ANY_ID, PCI_ANY_ID, 0, 0,
+ pbn_b1_4_115200 },
{ PCI_VENDOR_ID_QUATECH, PCI_DEVICE_ID_QUATECH_ESC100D,
PCI_ANY_ID, PCI_ANY_ID, 0, 0,
pbn_b1_8_115200 },
{ PCI_VENDOR_ID_QUATECH, PCI_DEVICE_ID_QUATECH_ESC100M,
PCI_ANY_ID, PCI_ANY_ID, 0, 0,
pbn_b1_8_115200 },
+ { PCI_VENDOR_ID_QUATECH, PCI_DEVICE_ID_QUATECH_QSCP100,
+ PCI_ANY_ID, PCI_ANY_ID, 0, 0,
+ pbn_b1_4_115200 },
+ { PCI_VENDOR_ID_QUATECH, PCI_DEVICE_ID_QUATECH_DSCP100,
+ PCI_ANY_ID, PCI_ANY_ID, 0, 0,
+ pbn_b1_2_115200 },
+ { PCI_VENDOR_ID_QUATECH, PCI_DEVICE_ID_QUATECH_QSCP200,
+ PCI_ANY_ID, PCI_ANY_ID, 0, 0,
+ pbn_b1_4_115200 },
+ { PCI_VENDOR_ID_QUATECH, PCI_DEVICE_ID_QUATECH_DSCP200,
+ PCI_ANY_ID, PCI_ANY_ID, 0, 0,
+ pbn_b1_2_115200 },
+ { PCI_VENDOR_ID_QUATECH, PCI_DEVICE_ID_QUATECH_QSCLP100,
+ PCI_ANY_ID, PCI_ANY_ID, 0, 0,
+ pbn_b2_4_115200 },
+ { PCI_VENDOR_ID_QUATECH, PCI_DEVICE_ID_QUATECH_DSCLP100,
+ PCI_ANY_ID, PCI_ANY_ID, 0, 0,
+ pbn_b2_2_115200 },
+ { PCI_VENDOR_ID_QUATECH, PCI_DEVICE_ID_QUATECH_SSCLP100,
+ PCI_ANY_ID, PCI_ANY_ID, 0, 0,
+ pbn_b2_1_115200 },
+ { PCI_VENDOR_ID_QUATECH, PCI_DEVICE_ID_QUATECH_QSCLP200,
+ PCI_ANY_ID, PCI_ANY_ID, 0, 0,
+ pbn_b2_4_115200 },
+ { PCI_VENDOR_ID_QUATECH, PCI_DEVICE_ID_QUATECH_DSCLP200,
+ PCI_ANY_ID, PCI_ANY_ID, 0, 0,
+ pbn_b2_2_115200 },
+ { PCI_VENDOR_ID_QUATECH, PCI_DEVICE_ID_QUATECH_SSCLP200,
+ PCI_ANY_ID, PCI_ANY_ID, 0, 0,
+ pbn_b2_1_115200 },
+ { PCI_VENDOR_ID_QUATECH, PCI_DEVICE_ID_QUATECH_ESCLP100,
+ PCI_ANY_ID, PCI_ANY_ID, 0, 0,
+ pbn_b0_8_115200 },
+
{ PCI_VENDOR_ID_SPECIALIX, PCI_DEVICE_ID_OXSEMI_16PCI954,
PCI_VENDOR_ID_SPECIALIX, PCI_SUBDEVICE_ID_SPECIALIX_SPEED4,
0, 0,
@@ -3902,6 +4231,19 @@ static struct pci_device_id serial_pci_tbl[] = {
pbn_b0_bt_1_921600 },
/*
+ * SUNIX (TIMEDIA)
+ */
+ { PCI_VENDOR_ID_SUNIX, PCI_DEVICE_ID_SUNIX_1999,
+ PCI_VENDOR_ID_SUNIX, PCI_ANY_ID,
+ PCI_CLASS_COMMUNICATION_SERIAL << 8, 0xffff00,
+ pbn_b0_bt_1_921600 },
+
+ { PCI_VENDOR_ID_SUNIX, PCI_DEVICE_ID_SUNIX_1999,
+ PCI_VENDOR_ID_SUNIX, PCI_ANY_ID,
+ PCI_CLASS_COMMUNICATION_MULTISERIAL << 8, 0xffff00,
+ pbn_b0_bt_1_921600 },
+
+ /*
* AFAVLAB serial card, from Harald Welte <laforge@gnumonks.org>
*/
{ PCI_VENDOR_ID_AFAVLAB, PCI_DEVICE_ID_AFAVLAB_P028,
diff --git a/drivers/tty/serial/8250/Kconfig b/drivers/tty/serial/8250/Kconfig
index c31133a6ea8e..2ef9537bcb2c 100644
--- a/drivers/tty/serial/8250/Kconfig
+++ b/drivers/tty/serial/8250/Kconfig
@@ -84,6 +84,14 @@ config SERIAL_8250_GSC
depends on SERIAL_8250 && GSC
default SERIAL_8250
+config SERIAL_8250_DMA
+ bool "DMA support for 16550 compatible UART controllers" if EXPERT
+ depends on SERIAL_8250 && DMADEVICES=y
+ default SERIAL_8250
+ help
+ This builds DMA support that can be used with 8250/16650
+ compatible UART controllers that support DMA signaling.
+
config SERIAL_8250_PCI
tristate "8250/16550 PCI device support" if EXPERT
depends on SERIAL_8250 && PCI
@@ -249,15 +257,6 @@ config SERIAL_8250_ACORN
system, say Y to this option. The driver can handle 1, 2, or 3 port
cards. If unsure, say N.
-config SERIAL_8250_RM9K
- bool "Support for MIPS RM9xxx integrated serial port"
- depends on SERIAL_8250 != n && SERIAL_RM9000
- select SERIAL_8250_SHARE_IRQ
- help
- Selecting this option will add support for the integrated serial
- port hardware found on MIPS RM9122 and similar processors.
- If unsure, say N.
-
config SERIAL_8250_FSL
bool
depends on SERIAL_8250_CONSOLE && PPC_UDBG_16550
@@ -265,7 +264,7 @@ config SERIAL_8250_FSL
config SERIAL_8250_DW
tristate "Support for Synopsys DesignWare 8250 quirks"
- depends on SERIAL_8250 && OF
+ depends on SERIAL_8250
help
Selecting this option will enable handling of the extra features
present in the Synopsys DesignWare APB UART.
@@ -277,3 +276,11 @@ config SERIAL_8250_EM
Selecting this option will add support for the integrated serial
port hardware found on the Emma Mobile line of processors.
If unsure, say N.
+
+config SERIAL_8250_RT288X
+ bool "Ralink RT288x/RT305x/RT3662/RT3883 serial port support"
+ depends on SERIAL_8250 && (SOC_RT288X || SOC_RT305X || SOC_RT3883)
+ help
+ If you have a Ralink RT288x/RT305x SoC based board and want to use the
+ serial port, say Y to this option. The driver can handle up to 2 serial
+ ports. If unsure, say N.
diff --git a/drivers/tty/serial/8250/Makefile b/drivers/tty/serial/8250/Makefile
index 108fe7fe13e2..a23838a4d535 100644
--- a/drivers/tty/serial/8250/Makefile
+++ b/drivers/tty/serial/8250/Makefile
@@ -5,6 +5,7 @@
obj-$(CONFIG_SERIAL_8250) += 8250_core.o
8250_core-y := 8250.o
8250_core-$(CONFIG_SERIAL_8250_PNP) += 8250_pnp.o
+8250_core-$(CONFIG_SERIAL_8250_DMA) += 8250_dma.o
obj-$(CONFIG_SERIAL_8250_GSC) += 8250_gsc.o
obj-$(CONFIG_SERIAL_8250_PCI) += 8250_pci.o
obj-$(CONFIG_SERIAL_8250_HP300) += 8250_hp300.o
diff --git a/drivers/tty/serial/Kconfig b/drivers/tty/serial/Kconfig
index 59c23d038106..cf9210db9fa9 100644
--- a/drivers/tty/serial/Kconfig
+++ b/drivers/tty/serial/Kconfig
@@ -2,8 +2,10 @@
# Serial device configuration
#
+if TTY
+
menu "Serial drivers"
- depends on HAS_IOMEM
+ depends on HAS_IOMEM && GENERIC_HARDIRQS
source "drivers/tty/serial/8250/Kconfig"
@@ -269,6 +271,17 @@ config SERIAL_SIRFSOC_CONSOLE
your boot loader about how to pass options to the kernel at
boot time.)
+config SERIAL_TEGRA
+ tristate "NVIDIA Tegra20/30 SoC serial controller"
+ depends on ARCH_TEGRA && TEGRA20_APB_DMA
+ select SERIAL_CORE
+ help
+ Support for the on-chip UARTs on the NVIDIA Tegra series SOCs
+ providing /dev/ttyHS0, 1, 2, 3 and 4 (note, some machines may not
+ provide all of these ports, depending on how the serial port
+ are enabled). This driver uses the APB DMA to achieve higher baudrate
+ and better performance.
+
config SERIAL_MAX3100
tristate "MAX3100 support"
depends on SPI
@@ -716,19 +729,19 @@ config SERIAL_SH_SCI_DMA
config SERIAL_PNX8XXX
bool "Enable PNX8XXX SoCs' UART Support"
- depends on SOC_PNX8550 || SOC_PNX833X
+ depends on SOC_PNX833X
select SERIAL_CORE
help
- If you have a MIPS-based Philips SoC such as PNX8550 or PNX8330
- and you want to use serial ports, say Y. Otherwise, say N.
+ If you have a MIPS-based Philips SoC such as PNX8330 and you want
+ to use serial ports, say Y. Otherwise, say N.
config SERIAL_PNX8XXX_CONSOLE
bool "Enable PNX8XX0 serial console"
depends on SERIAL_PNX8XXX
select SERIAL_CORE_CONSOLE
help
- If you have a MIPS-based Philips SoC such as PNX8550 or PNX8330
- and you want to use serial console, say Y. Otherwise, say N.
+ If you have a MIPS-based Philips SoC such as PNX8330 and you want
+ to use serial console, say Y. Otherwise, say N.
config SERIAL_HS_LPC32XX
tristate "LPC32XX high speed serial port support"
@@ -1447,4 +1460,30 @@ config SERIAL_ARC_NR_PORTS
Set this to the number of serial ports you want the driver
to support.
+config SERIAL_RP2
+ tristate "Comtrol RocketPort EXPRESS/INFINITY support"
+ depends on PCI
+ select SERIAL_CORE
+ help
+ This driver supports the Comtrol RocketPort EXPRESS and
+ RocketPort INFINITY families of PCI/PCIe multiport serial adapters.
+ These adapters use a "RocketPort 2" ASIC that is not compatible
+ with the original RocketPort driver (CONFIG_ROCKETPORT).
+
+ To compile this driver as a module, choose M here: the
+ module will be called rp2.
+
+ If you want to compile this driver into the kernel, say Y here. If
+ you don't have a suitable RocketPort card installed, say N.
+
+config SERIAL_RP2_NR_UARTS
+ int "Maximum number of RocketPort EXPRESS/INFINITY ports"
+ depends on SERIAL_RP2
+ default "32"
+ help
+ If multiple cards are present, the default limit of 32 ports may
+ need to be increased.
+
endmenu
+
+endif # TTY
diff --git a/drivers/tty/serial/Makefile b/drivers/tty/serial/Makefile
index df1b998c436b..eedfec40e3dd 100644
--- a/drivers/tty/serial/Makefile
+++ b/drivers/tty/serial/Makefile
@@ -80,6 +80,8 @@ obj-$(CONFIG_SERIAL_MXS_AUART) += mxs-auart.o
obj-$(CONFIG_SERIAL_LANTIQ) += lantiq.o
obj-$(CONFIG_SERIAL_XILINX_PS_UART) += xilinx_uartps.o
obj-$(CONFIG_SERIAL_SIRFSOC) += sirfsoc_uart.o
+obj-$(CONFIG_SERIAL_TEGRA) += serial-tegra.o
obj-$(CONFIG_SERIAL_AR933X) += ar933x_uart.o
obj-$(CONFIG_SERIAL_EFM32_UART) += efm32-uart.o
obj-$(CONFIG_SERIAL_ARC) += arc_uart.o
+obj-$(CONFIG_SERIAL_RP2) += rp2.o
diff --git a/drivers/tty/serial/altera_jtaguart.c b/drivers/tty/serial/altera_jtaguart.c
index 872f14ae43d2..c6bdb943726b 100644
--- a/drivers/tty/serial/altera_jtaguart.c
+++ b/drivers/tty/serial/altera_jtaguart.c
@@ -139,7 +139,7 @@ static void altera_jtaguart_rx_chars(struct altera_jtaguart *pp)
uart_insert_char(port, 0, 0, ch, flag);
}
- tty_flip_buffer_push(port->state->port.tty);
+ tty_flip_buffer_push(&port->state->port);
}
static void altera_jtaguart_tx_chars(struct altera_jtaguart *pp)
@@ -493,11 +493,9 @@ static int __init altera_jtaguart_init(void)
if (rc)
return rc;
rc = platform_driver_register(&altera_jtaguart_platform_driver);
- if (rc) {
+ if (rc)
uart_unregister_driver(&altera_jtaguart_driver);
- return rc;
- }
- return 0;
+ return rc;
}
static void __exit altera_jtaguart_exit(void)
diff --git a/drivers/tty/serial/altera_uart.c b/drivers/tty/serial/altera_uart.c
index 684a0808e1c7..13471dd95793 100644
--- a/drivers/tty/serial/altera_uart.c
+++ b/drivers/tty/serial/altera_uart.c
@@ -231,7 +231,7 @@ static void altera_uart_rx_chars(struct altera_uart *pp)
flag);
}
- tty_flip_buffer_push(port->state->port.tty);
+ tty_flip_buffer_push(&port->state->port);
}
static void altera_uart_tx_chars(struct altera_uart *pp)
@@ -637,11 +637,9 @@ static int __init altera_uart_init(void)
if (rc)
return rc;
rc = platform_driver_register(&altera_uart_platform_driver);
- if (rc) {
+ if (rc)
uart_unregister_driver(&altera_uart_driver);
- return rc;
- }
- return 0;
+ return rc;
}
static void __exit altera_uart_exit(void)
diff --git a/drivers/tty/serial/amba-pl010.c b/drivers/tty/serial/amba-pl010.c
index 22317dd16474..c36840519527 100644
--- a/drivers/tty/serial/amba-pl010.c
+++ b/drivers/tty/serial/amba-pl010.c
@@ -116,7 +116,6 @@ static void pl010_enable_ms(struct uart_port *port)
static void pl010_rx_chars(struct uart_amba_port *uap)
{
- struct tty_struct *tty = uap->port.state->port.tty;
unsigned int status, ch, flag, rsr, max_count = 256;
status = readb(uap->port.membase + UART01x_FR);
@@ -165,7 +164,7 @@ static void pl010_rx_chars(struct uart_amba_port *uap)
status = readb(uap->port.membase + UART01x_FR);
}
spin_unlock(&uap->port.lock);
- tty_flip_buffer_push(tty);
+ tty_flip_buffer_push(&uap->port.state->port);
spin_lock(&uap->port.lock);
}
diff --git a/drivers/tty/serial/amba-pl011.c b/drivers/tty/serial/amba-pl011.c
index 7fca4022a8b2..3ea5408fcbeb 100644
--- a/drivers/tty/serial/amba-pl011.c
+++ b/drivers/tty/serial/amba-pl011.c
@@ -698,7 +698,7 @@ static void pl011_dma_rx_chars(struct uart_amba_port *uap,
u32 pending, bool use_buf_b,
bool readfifo)
{
- struct tty_struct *tty = uap->port.state->port.tty;
+ struct tty_port *port = &uap->port.state->port;
struct pl011_sgbuf *sgbuf = use_buf_b ?
&uap->dmarx.sgbuf_b : &uap->dmarx.sgbuf_a;
struct device *dev = uap->dmarx.chan->device->dev;
@@ -715,8 +715,7 @@ static void pl011_dma_rx_chars(struct uart_amba_port *uap,
* Note that tty_insert_flip_buf() tries to take as many chars
* as it can.
*/
- dma_count = tty_insert_flip_string(uap->port.state->port.tty,
- sgbuf->buf, pending);
+ dma_count = tty_insert_flip_string(port, sgbuf->buf, pending);
/* Return buffer to device */
dma_sync_sg_for_device(dev, &sgbuf->sg, 1, DMA_FROM_DEVICE);
@@ -754,7 +753,7 @@ static void pl011_dma_rx_chars(struct uart_amba_port *uap,
dev_vdbg(uap->port.dev,
"Took %d chars from DMA buffer and %d chars from the FIFO\n",
dma_count, fifotaken);
- tty_flip_buffer_push(tty);
+ tty_flip_buffer_push(port);
spin_lock(&uap->port.lock);
}
@@ -1076,12 +1075,10 @@ static void pl011_enable_ms(struct uart_port *port)
static void pl011_rx_chars(struct uart_amba_port *uap)
{
- struct tty_struct *tty = uap->port.state->port.tty;
-
pl011_fifo_to_tty(uap);
spin_unlock(&uap->port.lock);
- tty_flip_buffer_push(tty);
+ tty_flip_buffer_push(&uap->port.state->port);
/*
* If we were temporarily out of DMA mode for a while,
* attempt to switch back to DMA mode again.
diff --git a/drivers/tty/serial/apbuart.c b/drivers/tty/serial/apbuart.c
index 59ae2b53e765..6331464d9101 100644
--- a/drivers/tty/serial/apbuart.c
+++ b/drivers/tty/serial/apbuart.c
@@ -78,7 +78,6 @@ static void apbuart_enable_ms(struct uart_port *port)
static void apbuart_rx_chars(struct uart_port *port)
{
- struct tty_struct *tty = port->state->port.tty;
unsigned int status, ch, rsr, flag;
unsigned int max_chars = port->fifosize;
@@ -126,7 +125,7 @@ static void apbuart_rx_chars(struct uart_port *port)
status = UART_GET_STATUS(port);
}
- tty_flip_buffer_push(tty);
+ tty_flip_buffer_push(&port->state->port);
}
static void apbuart_tx_chars(struct uart_port *port)
diff --git a/drivers/tty/serial/ar933x_uart.c b/drivers/tty/serial/ar933x_uart.c
index 505c490c0b44..27f20c57abed 100644
--- a/drivers/tty/serial/ar933x_uart.c
+++ b/drivers/tty/serial/ar933x_uart.c
@@ -297,10 +297,9 @@ static void ar933x_uart_set_termios(struct uart_port *port,
static void ar933x_uart_rx_chars(struct ar933x_uart_port *up)
{
- struct tty_struct *tty;
+ struct tty_port *port = &up->port.state->port;
int max_count = 256;
- tty = tty_port_tty_get(&up->port.state->port);
do {
unsigned int rdata;
unsigned char ch;
@@ -313,11 +312,6 @@ static void ar933x_uart_rx_chars(struct ar933x_uart_port *up)
ar933x_uart_write(up, AR933X_UART_DATA_REG,
AR933X_UART_DATA_RX_CSR);
- if (!tty) {
- /* discard the data if no tty available */
- continue;
- }
-
up->port.icount.rx++;
ch = rdata & AR933X_UART_DATA_TX_RX_MASK;
@@ -325,13 +319,10 @@ static void ar933x_uart_rx_chars(struct ar933x_uart_port *up)
continue;
if ((up->port.ignore_status_mask & AR933X_DUMMY_STATUS_RD) == 0)
- tty_insert_flip_char(tty, ch, TTY_NORMAL);
+ tty_insert_flip_char(port, ch, TTY_NORMAL);
} while (max_count-- > 0);
- if (tty) {
- tty_flip_buffer_push(tty);
- tty_kref_put(tty);
- }
+ tty_flip_buffer_push(port);
}
static void ar933x_uart_tx_chars(struct ar933x_uart_port *up)
diff --git a/drivers/tty/serial/arc_uart.c b/drivers/tty/serial/arc_uart.c
index 3e0b3fac6a0e..d97e194b6bc5 100644
--- a/drivers/tty/serial/arc_uart.c
+++ b/drivers/tty/serial/arc_uart.c
@@ -37,6 +37,8 @@
#include <linux/tty_flip.h>
#include <linux/serial_core.h>
#include <linux/io.h>
+#include <linux/of.h>
+#include <linux/of_platform.h>
/*************************************
* ARC UART Hardware Specs
@@ -209,12 +211,8 @@ static void arc_serial_start_tx(struct uart_port *port)
static void arc_serial_rx_chars(struct arc_uart_port *uart)
{
- struct tty_struct *tty = tty_port_tty_get(&uart->port.state->port);
unsigned int status, ch, flg = 0;
- if (!tty)
- return;
-
/*
* UART has 4 deep RX-FIFO. Driver's recongnition of this fact
* is very subtle. Here's how ...
@@ -250,10 +248,8 @@ static void arc_serial_rx_chars(struct arc_uart_port *uart)
uart_insert_char(&uart->port, status, RXOERR, ch, flg);
done:
- tty_flip_buffer_push(tty);
+ tty_flip_buffer_push(&uart->port.state->port);
}
-
- tty_kref_put(tty);
}
/*
@@ -526,18 +522,37 @@ static struct uart_ops arc_serial_pops = {
};
static int
-arc_uart_init_one(struct platform_device *pdev, struct arc_uart_port *uart)
+arc_uart_init_one(struct platform_device *pdev, int dev_id)
{
struct resource *res, *res2;
unsigned long *plat_data;
-
- if (pdev->id < 0 || pdev->id >= CONFIG_SERIAL_ARC_NR_PORTS) {
- dev_err(&pdev->dev, "Wrong uart platform device id.\n");
- return -ENOENT;
- }
+ struct arc_uart_port *uart = &arc_uart_ports[dev_id];
plat_data = ((unsigned long *)(pdev->dev.platform_data));
- uart->baud = plat_data[0];
+ if (!plat_data)
+ return -ENODEV;
+
+ uart->is_emulated = !!plat_data[0]; /* workaround ISS bug */
+
+ if (is_early_platform_device(pdev)) {
+ uart->port.uartclk = plat_data[1];
+ uart->baud = plat_data[2];
+ } else {
+ struct device_node *np = pdev->dev.of_node;
+ u32 val;
+
+ if (of_property_read_u32(np, "clock-frequency", &val)) {
+ dev_err(&pdev->dev, "clock-frequency property NOTset\n");
+ return -EINVAL;
+ }
+ uart->port.uartclk = val;
+
+ if (of_property_read_u32(np, "current-speed", &val)) {
+ dev_err(&pdev->dev, "current-speed property NOT set\n");
+ return -EINVAL;
+ }
+ uart->baud = val;
+ }
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
if (!res)
@@ -557,10 +572,9 @@ arc_uart_init_one(struct platform_device *pdev, struct arc_uart_port *uart)
uart->port.dev = &pdev->dev;
uart->port.iotype = UPIO_MEM;
uart->port.flags = UPF_BOOT_AUTOCONF;
- uart->port.line = pdev->id;
+ uart->port.line = dev_id;
uart->port.ops = &arc_serial_pops;
- uart->port.uartclk = plat_data[1];
uart->port.fifosize = ARC_UART_TX_FIFO_SIZE;
/*
@@ -569,9 +583,6 @@ arc_uart_init_one(struct platform_device *pdev, struct arc_uart_port *uart)
*/
uart->port.ignore_status_mask = 0;
- /* Real Hardware vs. emulated to work around a bug */
- uart->is_emulated = !!plat_data[2];
-
return 0;
}
@@ -648,45 +659,50 @@ static __init void early_serial_write(struct console *con, const char *s,
}
}
-static struct __initdata console arc_early_serial_console = {
+static struct console arc_early_serial_console __initdata = {
.name = "early_ARCuart",
.write = early_serial_write,
.flags = CON_PRINTBUFFER | CON_BOOT,
.index = -1
};
-static int arc_serial_probe_earlyprintk(struct platform_device *pdev)
+static int __init arc_serial_probe_earlyprintk(struct platform_device *pdev)
{
- arc_early_serial_console.index = pdev->id;
+ int dev_id = pdev->id < 0 ? 0 : pdev->id;
+ int rc;
+
+ arc_early_serial_console.index = dev_id;
- arc_uart_init_one(pdev, &arc_uart_ports[pdev->id]);
+ rc = arc_uart_init_one(pdev, dev_id);
+ if (rc)
+ panic("early console init failed\n");
arc_serial_console_setup(&arc_early_serial_console, NULL);
register_console(&arc_early_serial_console);
return 0;
}
-#else
-static int arc_serial_probe_earlyprintk(struct platform_device *pdev)
-{
- return -ENODEV;
-}
#endif /* CONFIG_SERIAL_ARC_CONSOLE */
static int arc_serial_probe(struct platform_device *pdev)
{
- struct arc_uart_port *uart;
- int rc;
+ int rc, dev_id;
+ struct device_node *np = pdev->dev.of_node;
+
+ /* no device tree device */
+ if (!np)
+ return -ENODEV;
- if (is_early_platform_device(pdev))
- return arc_serial_probe_earlyprintk(pdev);
+ dev_id = of_alias_get_id(np, "serial");
+ if (dev_id < 0)
+ dev_id = 0;
- uart = &arc_uart_ports[pdev->id];
- rc = arc_uart_init_one(pdev, uart);
+ rc = arc_uart_init_one(pdev, dev_id);
if (rc)
return rc;
- return uart_add_one_port(&arc_uart_driver, &uart->port);
+ rc = uart_add_one_port(&arc_uart_driver, &arc_uart_ports[dev_id].port);
+ return rc;
}
static int arc_serial_remove(struct platform_device *pdev)
@@ -695,16 +711,32 @@ static int arc_serial_remove(struct platform_device *pdev)
return 0;
}
+static const struct of_device_id arc_uart_dt_ids[] = {
+ { .compatible = "snps,arc-uart" },
+ { /* Sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, arc_uart_dt_ids);
+
static struct platform_driver arc_platform_driver = {
.probe = arc_serial_probe,
.remove = arc_serial_remove,
.driver = {
.name = DRIVER_NAME,
.owner = THIS_MODULE,
+ .of_match_table = arc_uart_dt_ids,
},
};
#ifdef CONFIG_SERIAL_ARC_CONSOLE
+
+static struct platform_driver early_arc_platform_driver __initdata = {
+ .probe = arc_serial_probe_earlyprintk,
+ .remove = arc_serial_remove,
+ .driver = {
+ .name = DRIVER_NAME,
+ .owner = THIS_MODULE,
+ },
+};
/*
* Register an early platform driver of "earlyprintk" class.
* ARCH platform code installs the driver and probes the early devices
@@ -712,7 +744,7 @@ static struct platform_driver arc_platform_driver = {
* or it could be done independently, for all "earlyprintk" class drivers.
* [see arch/arc/plat-arcfpga/platform.c]
*/
-early_platform_init("earlyprintk", &arc_platform_driver);
+early_platform_init("earlyprintk", &early_arc_platform_driver);
#endif /* CONFIG_SERIAL_ARC_CONSOLE */
diff --git a/drivers/tty/serial/atmel_serial.c b/drivers/tty/serial/atmel_serial.c
index 922e85aeb63a..d4a7c241b751 100644
--- a/drivers/tty/serial/atmel_serial.c
+++ b/drivers/tty/serial/atmel_serial.c
@@ -774,14 +774,14 @@ static void atmel_rx_from_ring(struct uart_port *port)
* uart_start(), which takes the lock.
*/
spin_unlock(&port->lock);
- tty_flip_buffer_push(port->state->port.tty);
+ tty_flip_buffer_push(&port->state->port);
spin_lock(&port->lock);
}
static void atmel_rx_from_dma(struct uart_port *port)
{
struct atmel_uart_port *atmel_port = to_atmel_uart_port(port);
- struct tty_struct *tty = port->state->port.tty;
+ struct tty_port *tport = &port->state->port;
struct atmel_dma_buffer *pdc;
int rx_idx = atmel_port->pdc_rx_idx;
unsigned int head;
@@ -820,7 +820,8 @@ static void atmel_rx_from_dma(struct uart_port *port)
*/
count = head - tail;
- tty_insert_flip_string(tty, pdc->buf + pdc->ofs, count);
+ tty_insert_flip_string(tport, pdc->buf + pdc->ofs,
+ count);
dma_sync_single_for_device(port->dev, pdc->dma_addr,
pdc->dma_size, DMA_FROM_DEVICE);
@@ -848,7 +849,7 @@ static void atmel_rx_from_dma(struct uart_port *port)
* uart_start(), which takes the lock.
*/
spin_unlock(&port->lock);
- tty_flip_buffer_push(tty);
+ tty_flip_buffer_push(tport);
spin_lock(&port->lock);
UART_PUT_IER(port, ATMEL_US_ENDRX | ATMEL_US_TIMEOUT);
diff --git a/drivers/tty/serial/bcm63xx_uart.c b/drivers/tty/serial/bcm63xx_uart.c
index c76a226080f2..719594e5fc21 100644
--- a/drivers/tty/serial/bcm63xx_uart.c
+++ b/drivers/tty/serial/bcm63xx_uart.c
@@ -235,14 +235,13 @@ static const char *bcm_uart_type(struct uart_port *port)
*/
static void bcm_uart_do_rx(struct uart_port *port)
{
- struct tty_struct *tty;
+ struct tty_port *port = &port->state->port;
unsigned int max_count;
/* limit number of char read in interrupt, should not be
* higher than fifo size anyway since we're much faster than
* serial port */
max_count = 32;
- tty = port->state->port.tty;
do {
unsigned int iestat, c, cstat;
char flag;
@@ -261,7 +260,7 @@ static void bcm_uart_do_rx(struct uart_port *port)
bcm_uart_writel(port, val, UART_CTL_REG);
port->icount.overrun++;
- tty_insert_flip_char(tty, 0, TTY_OVERRUN);
+ tty_insert_flip_char(port, 0, TTY_OVERRUN);
}
if (!(iestat & UART_IR_STAT(UART_IR_RXNOTEMPTY)))
@@ -300,11 +299,11 @@ static void bcm_uart_do_rx(struct uart_port *port)
if ((cstat & port->ignore_status_mask) == 0)
- tty_insert_flip_char(tty, c, flag);
+ tty_insert_flip_char(port, c, flag);
} while (--max_count);
- tty_flip_buffer_push(tty);
+ tty_flip_buffer_push(port);
}
/*
diff --git a/drivers/tty/serial/bfin_sport_uart.c b/drivers/tty/serial/bfin_sport_uart.c
index f5d117379b60..487c173b0f72 100644
--- a/drivers/tty/serial/bfin_sport_uart.c
+++ b/drivers/tty/serial/bfin_sport_uart.c
@@ -149,7 +149,7 @@ static int sport_uart_setup(struct sport_uart_port *up, int size, int baud_rate)
static irqreturn_t sport_uart_rx_irq(int irq, void *dev_id)
{
struct sport_uart_port *up = dev_id;
- struct tty_struct *tty = up->port.state->port.tty;
+ struct tty_port *port = &up->port.state->port;
unsigned int ch;
spin_lock(&up->port.lock);
@@ -159,9 +159,10 @@ static irqreturn_t sport_uart_rx_irq(int irq, void *dev_id)
up->port.icount.rx++;
if (!uart_handle_sysrq_char(&up->port, ch))
- tty_insert_flip_char(tty, ch, TTY_NORMAL);
+ tty_insert_flip_char(port, ch, TTY_NORMAL);
}
- tty_flip_buffer_push(tty);
+ /* XXX this won't deadlock with lowlat? */
+ tty_flip_buffer_push(port);
spin_unlock(&up->port.lock);
@@ -182,7 +183,6 @@ static irqreturn_t sport_uart_tx_irq(int irq, void *dev_id)
static irqreturn_t sport_uart_err_irq(int irq, void *dev_id)
{
struct sport_uart_port *up = dev_id;
- struct tty_struct *tty = up->port.state->port.tty;
unsigned int stat = SPORT_GET_STAT(up);
spin_lock(&up->port.lock);
@@ -190,7 +190,7 @@ static irqreturn_t sport_uart_err_irq(int irq, void *dev_id)
/* Overflow in RX FIFO */
if (stat & ROVF) {
up->port.icount.overrun++;
- tty_insert_flip_char(tty, 0, TTY_OVERRUN);
+ tty_insert_flip_char(&up->port.state->port, 0, TTY_OVERRUN);
SPORT_PUT_STAT(up, ROVF); /* Clear ROVF bit */
}
/* These should not happen */
@@ -205,6 +205,8 @@ static irqreturn_t sport_uart_err_irq(int irq, void *dev_id)
SSYNC();
spin_unlock(&up->port.lock);
+ /* XXX we don't push the overrun bit to TTY? */
+
return IRQ_HANDLED;
}
diff --git a/drivers/tty/serial/bfin_uart.c b/drivers/tty/serial/bfin_uart.c
index 2e2b2c1cb722..12dceda9db33 100644
--- a/drivers/tty/serial/bfin_uart.c
+++ b/drivers/tty/serial/bfin_uart.c
@@ -223,7 +223,6 @@ static void bfin_serial_enable_ms(struct uart_port *port)
#ifdef CONFIG_SERIAL_BFIN_PIO
static void bfin_serial_rx_chars(struct bfin_serial_port *uart)
{
- struct tty_struct *tty = NULL;
unsigned int status, ch, flg;
static struct timeval anomaly_start = { .tv_sec = 0 };
@@ -242,11 +241,9 @@ static void bfin_serial_rx_chars(struct bfin_serial_port *uart)
return;
}
- if (!uart->port.state || !uart->port.state->port.tty)
+ if (!uart->port.state)
return;
#endif
- tty = uart->port.state->port.tty;
-
if (ANOMALY_05000363) {
/* The BF533 (and BF561) family of processors have a nice anomaly
* where they continuously generate characters for a "single" break.
@@ -325,7 +322,7 @@ static void bfin_serial_rx_chars(struct bfin_serial_port *uart)
uart_insert_char(&uart->port, status, OE, ch, flg);
ignore_char:
- tty_flip_buffer_push(tty);
+ tty_flip_buffer_push(&uart->port.state->port);
}
static void bfin_serial_tx_chars(struct bfin_serial_port *uart)
@@ -426,7 +423,6 @@ static void bfin_serial_dma_tx_chars(struct bfin_serial_port *uart)
static void bfin_serial_dma_rx_chars(struct bfin_serial_port *uart)
{
- struct tty_struct *tty = uart->port.state->port.tty;
int i, flg, status;
status = UART_GET_LSR(uart);
@@ -471,7 +467,7 @@ static void bfin_serial_dma_rx_chars(struct bfin_serial_port *uart)
}
dma_ignore_char:
- tty_flip_buffer_push(tty);
+ tty_flip_buffer_push(&uart->port.state->port);
}
void bfin_serial_rx_dma_timeout(struct bfin_serial_port *uart)
diff --git a/drivers/tty/serial/clps711x.c b/drivers/tty/serial/clps711x.c
index 3fd2526d121e..bfb17968c8db 100644
--- a/drivers/tty/serial/clps711x.c
+++ b/drivers/tty/serial/clps711x.c
@@ -85,12 +85,8 @@ static void uart_clps711x_enable_ms(struct uart_port *port)
static irqreturn_t uart_clps711x_int_rx(int irq, void *dev_id)
{
struct uart_port *port = dev_id;
- struct tty_struct *tty = tty_port_tty_get(&port->state->port);
unsigned int status, ch, flg;
- if (!tty)
- return IRQ_HANDLED;
-
for (;;) {
status = clps_readl(SYSFLG(port));
if (status & SYSFLG_URXFE)
@@ -130,9 +126,7 @@ static irqreturn_t uart_clps711x_int_rx(int irq, void *dev_id)
uart_insert_char(port, status, UARTDR_OVERR, ch, flg);
}
- tty_flip_buffer_push(tty);
-
- tty_kref_put(tty);
+ tty_flip_buffer_push(&port->state->port);
return IRQ_HANDLED;
}
diff --git a/drivers/tty/serial/cpm_uart/cpm_uart_core.c b/drivers/tty/serial/cpm_uart/cpm_uart_core.c
index ad0caf176808..97f4e1858649 100644
--- a/drivers/tty/serial/cpm_uart/cpm_uart_core.c
+++ b/drivers/tty/serial/cpm_uart/cpm_uart_core.c
@@ -245,7 +245,7 @@ static void cpm_uart_int_rx(struct uart_port *port)
int i;
unsigned char ch;
u8 *cp;
- struct tty_struct *tty = port->state->port.tty;
+ struct tty_port *tport = &port->state->port;
struct uart_cpm_port *pinfo = (struct uart_cpm_port *)port;
cbd_t __iomem *bdp;
u16 status;
@@ -276,7 +276,7 @@ static void cpm_uart_int_rx(struct uart_port *port)
/* If we have not enough room in tty flip buffer, then we try
* later, which will be the next rx-interrupt or a timeout
*/
- if(tty_buffer_request_room(tty, i) < i) {
+ if (tty_buffer_request_room(tport, i) < i) {
printk(KERN_WARNING "No room in flip buffer\n");
return;
}
@@ -302,7 +302,7 @@ static void cpm_uart_int_rx(struct uart_port *port)
}
#endif
error_return:
- tty_insert_flip_char(tty, ch, flg);
+ tty_insert_flip_char(tport, ch, flg);
} /* End while (i--) */
@@ -322,7 +322,7 @@ static void cpm_uart_int_rx(struct uart_port *port)
pinfo->rx_cur = bdp;
/* activate BH processing */
- tty_flip_buffer_push(tty);
+ tty_flip_buffer_push(tport);
return;
@@ -507,7 +507,7 @@ static void cpm_uart_set_termios(struct uart_port *port,
baud = uart_get_baud_rate(port, termios, old, 0, port->uartclk / 16);
if (baud < HW_BUF_SPD_THRESHOLD ||
- (pinfo->port.state && pinfo->port.state->port.tty->low_latency))
+ (pinfo->port.state && pinfo->port.state->port.low_latency))
pinfo->rx_fifosize = 1;
else
pinfo->rx_fifosize = RX_BUF_SIZE;
diff --git a/drivers/tty/serial/crisv10.c b/drivers/tty/serial/crisv10.c
index 35ee6a2c6877..5f37c31e32bc 100644
--- a/drivers/tty/serial/crisv10.c
+++ b/drivers/tty/serial/crisv10.c
@@ -1760,8 +1760,7 @@ add_char_and_flag(struct e100_serial *info, unsigned char data, unsigned char fl
info->icount.rx++;
} else {
- struct tty_struct *tty = info->port.tty;
- tty_insert_flip_char(tty, data, flag);
+ tty_insert_flip_char(&info->port, data, flag);
info->icount.rx++;
}
@@ -2105,22 +2104,15 @@ static int force_eop_if_needed(struct e100_serial *info)
static void flush_to_flip_buffer(struct e100_serial *info)
{
- struct tty_struct *tty;
struct etrax_recv_buffer *buffer;
unsigned long flags;
local_irq_save(flags);
- tty = info->port.tty;
-
- if (!tty) {
- local_irq_restore(flags);
- return;
- }
while ((buffer = info->first_recv_buffer) != NULL) {
unsigned int count = buffer->length;
- tty_insert_flip_string(tty, buffer->buffer, count);
+ tty_insert_flip_string(&info->port, buffer->buffer, count);
info->recv_cnt -= count;
if (count == buffer->length) {
@@ -2139,7 +2131,7 @@ static void flush_to_flip_buffer(struct e100_serial *info)
local_irq_restore(flags);
/* This includes a check for low-latency */
- tty_flip_buffer_push(tty);
+ tty_flip_buffer_push(&info->port);
}
static void check_flush_timeout(struct e100_serial *info)
@@ -2275,12 +2267,6 @@ static
struct e100_serial * handle_ser_rx_interrupt_no_dma(struct e100_serial *info)
{
unsigned long data_read;
- struct tty_struct *tty = info->port.tty;
-
- if (!tty) {
- printk("!NO TTY!\n");
- return info;
- }
/* Read data and status at the same time */
data_read = *((unsigned long *)&info->ioport[REG_DATA_STATUS32]);
@@ -2338,8 +2324,7 @@ more_data:
data_in, data_read);
char flag = TTY_NORMAL;
if (info->errorcode == ERRCODE_INSERT_BREAK) {
- struct tty_struct *tty = info->port.tty;
- tty_insert_flip_char(tty, 0, flag);
+ tty_insert_flip_char(&info->port, 0, flag);
info->icount.rx++;
}
@@ -2353,7 +2338,7 @@ more_data:
info->icount.frame++;
flag = TTY_FRAME;
}
- tty_insert_flip_char(tty, data, flag);
+ tty_insert_flip_char(&info->port, data, flag);
info->errorcode = 0;
}
info->break_detected_cnt = 0;
@@ -2369,7 +2354,7 @@ more_data:
log_int(rdpc(), 0, 0);
}
);
- tty_insert_flip_char(tty,
+ tty_insert_flip_char(&info->port,
IO_EXTRACT(R_SERIAL0_READ, data_in, data_read),
TTY_NORMAL);
} else {
@@ -2384,7 +2369,7 @@ more_data:
goto more_data;
}
- tty_flip_buffer_push(info->port.tty);
+ tty_flip_buffer_push(&info->port);
return info;
}
@@ -3137,7 +3122,7 @@ static int rs_raw_write(struct tty_struct *tty,
/* first some sanity checks */
- if (!tty || !info->xmit.buf)
+ if (!info->xmit.buf)
return 0;
#ifdef SERIAL_DEBUG_DATA
@@ -3464,7 +3449,7 @@ set_serial_info(struct e100_serial *info,
info->type = new_serial.type;
info->close_delay = new_serial.close_delay;
info->closing_wait = new_serial.closing_wait;
- info->port.tty->low_latency = (info->flags & ASYNC_LOW_LATENCY) ? 1 : 0;
+ info->port.low_latency = (info->flags & ASYNC_LOW_LATENCY) ? 1 : 0;
check_and_exit:
if (info->flags & ASYNC_INITIALIZED) {
@@ -4108,7 +4093,7 @@ rs_open(struct tty_struct *tty, struct file * filp)
tty->driver_data = info;
info->port.tty = tty;
- tty->low_latency = !!(info->flags & ASYNC_LOW_LATENCY);
+ info->port.low_latency = !!(info->flags & ASYNC_LOW_LATENCY);
/*
* If the port is in the middle of closing, bail out now
diff --git a/drivers/tty/serial/dz.c b/drivers/tty/serial/dz.c
index 6491b8644a7f..2f2b2e538a54 100644
--- a/drivers/tty/serial/dz.c
+++ b/drivers/tty/serial/dz.c
@@ -187,7 +187,6 @@ static inline void dz_receive_chars(struct dz_mux *mux)
{
struct uart_port *uport;
struct dz_port *dport = &mux->dport[0];
- struct tty_struct *tty = NULL;
struct uart_icount *icount;
int lines_rx[DZ_NB_PORT] = { [0 ... DZ_NB_PORT - 1] = 0 };
unsigned char ch, flag;
@@ -197,7 +196,6 @@ static inline void dz_receive_chars(struct dz_mux *mux)
while ((status = dz_in(dport, DZ_RBUF)) & DZ_DVAL) {
dport = &mux->dport[LINE(status)];
uport = &dport->port;
- tty = uport->state->port.tty; /* point to the proper dev */
ch = UCHAR(status); /* grab the char */
flag = TTY_NORMAL;
@@ -249,7 +247,7 @@ static inline void dz_receive_chars(struct dz_mux *mux)
}
for (i = 0; i < DZ_NB_PORT; i++)
if (lines_rx[i])
- tty_flip_buffer_push(mux->dport[i].port.state->port.tty);
+ tty_flip_buffer_push(&mux->dport[i].port.state->port);
}
/*
diff --git a/drivers/tty/serial/efm32-uart.c b/drivers/tty/serial/efm32-uart.c
index a8cbb2670521..7d199c8e1a75 100644
--- a/drivers/tty/serial/efm32-uart.c
+++ b/drivers/tty/serial/efm32-uart.c
@@ -81,6 +81,7 @@ struct efm32_uart_port {
struct uart_port port;
unsigned int txirq;
struct clk *clk;
+ struct efm32_uart_pdata pdata;
};
#define to_efm_port(_port) container_of(_port, struct efm32_uart_port, port)
#define efm_debug(efm_port, format, arg...) \
@@ -194,8 +195,7 @@ static void efm32_uart_break_ctl(struct uart_port *port, int ctl)
/* not possible without fiddling with gpios */
}
-static void efm32_uart_rx_chars(struct efm32_uart_port *efm_port,
- struct tty_struct *tty)
+static void efm32_uart_rx_chars(struct efm32_uart_port *efm_port)
{
struct uart_port *port = &efm_port->port;
@@ -237,8 +237,8 @@ static void efm32_uart_rx_chars(struct efm32_uart_port *efm_port,
rxdata & UARTn_RXDATAX_RXDATA__MASK))
continue;
- if (tty && (rxdata & port->ignore_status_mask) == 0)
- tty_insert_flip_char(tty,
+ if ((rxdata & port->ignore_status_mask) == 0)
+ tty_insert_flip_char(&port->state->port,
rxdata & UARTn_RXDATAX_RXDATA__MASK, flag);
}
}
@@ -249,15 +249,13 @@ static irqreturn_t efm32_uart_rxirq(int irq, void *data)
u32 irqflag = efm32_uart_read32(efm_port, UARTn_IF);
int handled = IRQ_NONE;
struct uart_port *port = &efm_port->port;
- struct tty_struct *tty;
+ struct tty_port *tport = &port->state->port;
spin_lock(&port->lock);
- tty = tty_kref_get(port->state->port.tty);
-
if (irqflag & UARTn_IF_RXDATAV) {
efm32_uart_write32(efm_port, UARTn_IF_RXDATAV, UARTn_IFC);
- efm32_uart_rx_chars(efm_port, tty);
+ efm32_uart_rx_chars(efm_port);
handled = IRQ_HANDLED;
}
@@ -265,16 +263,12 @@ static irqreturn_t efm32_uart_rxirq(int irq, void *data)
if (irqflag & UARTn_IF_RXOF) {
efm32_uart_write32(efm_port, UARTn_IF_RXOF, UARTn_IFC);
port->icount.overrun++;
- if (tty)
- tty_insert_flip_char(tty, 0, TTY_OVERRUN);
+ tty_insert_flip_char(tport, 0, TTY_OVERRUN);
handled = IRQ_HANDLED;
}
- if (tty) {
- tty_flip_buffer_push(tty);
- tty_kref_put(tty);
- }
+ tty_flip_buffer_push(tport);
spin_unlock(&port->lock);
@@ -300,13 +294,8 @@ static irqreturn_t efm32_uart_txirq(int irq, void *data)
static int efm32_uart_startup(struct uart_port *port)
{
struct efm32_uart_port *efm_port = to_efm_port(port);
- u32 location = 0;
- struct efm32_uart_pdata *pdata = dev_get_platdata(port->dev);
int ret;
- if (pdata)
- location = UARTn_ROUTE_LOCATION(pdata->location);
-
ret = clk_enable(efm_port->clk);
if (ret) {
efm_debug(efm_port, "failed to enable clk\n");
@@ -315,7 +304,9 @@ static int efm32_uart_startup(struct uart_port *port)
port->uartclk = clk_get_rate(efm_port->clk);
/* Enable pins at configured location */
- efm32_uart_write32(efm_port, location | UARTn_ROUTE_RXPEN | UARTn_ROUTE_TXPEN,
+ efm32_uart_write32(efm_port,
+ UARTn_ROUTE_LOCATION(efm_port->pdata.location) |
+ UARTn_ROUTE_RXPEN | UARTn_ROUTE_TXPEN,
UARTn_ROUTE);
ret = request_irq(port->irq, efm32_uart_rxirq, 0,
@@ -674,11 +665,24 @@ static int efm32_uart_probe_dt(struct platform_device *pdev,
struct efm32_uart_port *efm_port)
{
struct device_node *np = pdev->dev.of_node;
+ u32 location;
int ret;
if (!np)
return 1;
+ ret = of_property_read_u32(np, "location", &location);
+ if (!ret) {
+ if (location > 5) {
+ dev_err(&pdev->dev, "invalid location\n");
+ return -EINVAL;
+ }
+ efm_debug(efm_port, "using location %u\n", location);
+ efm_port->pdata.location = location;
+ } else {
+ efm_debug(efm_port, "fall back to location 0\n");
+ }
+
ret = of_alias_get_id(np, "serial");
if (ret < 0) {
dev_err(&pdev->dev, "failed to get alias id: %d\n", ret);
@@ -738,10 +742,16 @@ static int efm32_uart_probe(struct platform_device *pdev)
efm_port->port.flags = UPF_BOOT_AUTOCONF;
ret = efm32_uart_probe_dt(pdev, efm_port);
- if (ret > 0)
+ if (ret > 0) {
/* not created by device tree */
+ const struct efm32_uart_pdata *pdata = dev_get_platdata(&pdev->dev);
+
efm_port->port.line = pdev->id;
+ if (pdata)
+ efm_port->pdata = *pdata;
+ }
+
if (efm_port->port.line >= 0 &&
efm_port->port.line < ARRAY_SIZE(efm32_uart_ports))
efm32_uart_ports[efm_port->port.line] = efm_port;
diff --git a/drivers/tty/serial/icom.c b/drivers/tty/serial/icom.c
index 72b6334bcf1a..bc9e6b017b05 100644
--- a/drivers/tty/serial/icom.c
+++ b/drivers/tty/serial/icom.c
@@ -734,7 +734,7 @@ static void xmit_interrupt(u16 port_int_reg, struct icom_port *icom_port)
static void recv_interrupt(u16 port_int_reg, struct icom_port *icom_port)
{
short int count, rcv_buff;
- struct tty_struct *tty = icom_port->uart_port.state->port.tty;
+ struct tty_port *port = &icom_port->uart_port.state->port;
unsigned short int status;
struct uart_icount *icount;
unsigned long offset;
@@ -761,7 +761,7 @@ static void recv_interrupt(u16 port_int_reg, struct icom_port *icom_port)
/* Block copy all but the last byte as this may have status */
if (count > 0) {
first = icom_port->recv_buf[offset];
- tty_insert_flip_string(tty, icom_port->recv_buf + offset, count - 1);
+ tty_insert_flip_string(port, icom_port->recv_buf + offset, count - 1);
}
icount = &icom_port->uart_port.icount;
@@ -812,7 +812,7 @@ static void recv_interrupt(u16 port_int_reg, struct icom_port *icom_port)
}
- tty_insert_flip_char(tty, *(icom_port->recv_buf + offset + count - 1), flag);
+ tty_insert_flip_char(port, *(icom_port->recv_buf + offset + count - 1), flag);
if (status & SA_FLAGS_OVERRUN)
/*
@@ -820,7 +820,7 @@ static void recv_interrupt(u16 port_int_reg, struct icom_port *icom_port)
* reported immediately, and doesn't
* affect the current character
*/
- tty_insert_flip_char(tty, 0, TTY_OVERRUN);
+ tty_insert_flip_char(port, 0, TTY_OVERRUN);
ignore_char:
icom_port->statStg->rcv[rcv_buff].flags = 0;
icom_port->statStg->rcv[rcv_buff].leLength = 0;
@@ -834,7 +834,7 @@ ignore_char:
status = cpu_to_le16(icom_port->statStg->rcv[rcv_buff].flags);
}
icom_port->next_rcv = rcv_buff;
- tty_flip_buffer_push(tty);
+ tty_flip_buffer_push(port);
}
static void process_interrupt(u16 port_int_reg,
diff --git a/drivers/tty/serial/ifx6x60.c b/drivers/tty/serial/ifx6x60.c
index 8cb6d8d66a13..68d7ce997ede 100644
--- a/drivers/tty/serial/ifx6x60.c
+++ b/drivers/tty/serial/ifx6x60.c
@@ -481,7 +481,6 @@ static int ifx_spi_prepare_tx_buffer(struct ifx_spi_device *ifx_dev)
unsigned char *tx_buffer;
tx_buffer = ifx_dev->tx_buffer;
- memset(tx_buffer, 0, IFX_SPI_TRANSFER_SIZE);
/* make room for required SPI header */
tx_buffer += IFX_SPI_HEADER_OVERHEAD;
@@ -615,7 +614,7 @@ static int ifx_port_activate(struct tty_port *port, struct tty_struct *tty)
tty->driver_data = ifx_dev;
/* allows flip string push from int context */
- tty->low_latency = 1;
+ port->low_latency = 1;
/* set flag to allows data transfer */
set_bit(IFX_SPI_STATE_IO_AVAILABLE, &ifx_dev->flags);
@@ -670,12 +669,8 @@ static const struct tty_operations ifx_spi_serial_ops = {
static void ifx_spi_insert_flip_string(struct ifx_spi_device *ifx_dev,
unsigned char *chars, size_t size)
{
- struct tty_struct *tty = tty_port_tty_get(&ifx_dev->tty_port);
- if (!tty)
- return;
- tty_insert_flip_string(tty, chars, size);
- tty_flip_buffer_push(tty);
- tty_kref_put(tty);
+ tty_insert_flip_string(&ifx_dev->tty_port, chars, size);
+ tty_flip_buffer_push(&ifx_dev->tty_port);
}
/**
diff --git a/drivers/tty/serial/imx.c b/drivers/tty/serial/imx.c
index 59819121fe9b..147c9e193595 100644
--- a/drivers/tty/serial/imx.c
+++ b/drivers/tty/serial/imx.c
@@ -48,8 +48,8 @@
#include <linux/of.h>
#include <linux/of_device.h>
#include <linux/pinctrl/consumer.h>
+#include <linux/io.h>
-#include <asm/io.h>
#include <asm/irq.h>
#include <linux/platform_data/serial-imx.h>
@@ -73,102 +73,102 @@
#define IMX21_UTS 0xb4 /* UART Test Register on all other i.mx*/
/* UART Control Register Bit Fields.*/
-#define URXD_CHARRDY (1<<15)
-#define URXD_ERR (1<<14)
-#define URXD_OVRRUN (1<<13)
-#define URXD_FRMERR (1<<12)
-#define URXD_BRK (1<<11)
-#define URXD_PRERR (1<<10)
-#define UCR1_ADEN (1<<15) /* Auto detect interrupt */
-#define UCR1_ADBR (1<<14) /* Auto detect baud rate */
-#define UCR1_TRDYEN (1<<13) /* Transmitter ready interrupt enable */
-#define UCR1_IDEN (1<<12) /* Idle condition interrupt */
-#define UCR1_RRDYEN (1<<9) /* Recv ready interrupt enable */
-#define UCR1_RDMAEN (1<<8) /* Recv ready DMA enable */
-#define UCR1_IREN (1<<7) /* Infrared interface enable */
-#define UCR1_TXMPTYEN (1<<6) /* Transimitter empty interrupt enable */
-#define UCR1_RTSDEN (1<<5) /* RTS delta interrupt enable */
-#define UCR1_SNDBRK (1<<4) /* Send break */
-#define UCR1_TDMAEN (1<<3) /* Transmitter ready DMA enable */
-#define IMX1_UCR1_UARTCLKEN (1<<2) /* UART clock enabled, i.mx1 only */
-#define UCR1_DOZE (1<<1) /* Doze */
-#define UCR1_UARTEN (1<<0) /* UART enabled */
-#define UCR2_ESCI (1<<15) /* Escape seq interrupt enable */
-#define UCR2_IRTS (1<<14) /* Ignore RTS pin */
-#define UCR2_CTSC (1<<13) /* CTS pin control */
-#define UCR2_CTS (1<<12) /* Clear to send */
-#define UCR2_ESCEN (1<<11) /* Escape enable */
-#define UCR2_PREN (1<<8) /* Parity enable */
-#define UCR2_PROE (1<<7) /* Parity odd/even */
-#define UCR2_STPB (1<<6) /* Stop */
-#define UCR2_WS (1<<5) /* Word size */
-#define UCR2_RTSEN (1<<4) /* Request to send interrupt enable */
-#define UCR2_ATEN (1<<3) /* Aging Timer Enable */
-#define UCR2_TXEN (1<<2) /* Transmitter enabled */
-#define UCR2_RXEN (1<<1) /* Receiver enabled */
-#define UCR2_SRST (1<<0) /* SW reset */
-#define UCR3_DTREN (1<<13) /* DTR interrupt enable */
-#define UCR3_PARERREN (1<<12) /* Parity enable */
-#define UCR3_FRAERREN (1<<11) /* Frame error interrupt enable */
-#define UCR3_DSR (1<<10) /* Data set ready */
-#define UCR3_DCD (1<<9) /* Data carrier detect */
-#define UCR3_RI (1<<8) /* Ring indicator */
-#define UCR3_TIMEOUTEN (1<<7) /* Timeout interrupt enable */
-#define UCR3_RXDSEN (1<<6) /* Receive status interrupt enable */
-#define UCR3_AIRINTEN (1<<5) /* Async IR wake interrupt enable */
-#define UCR3_AWAKEN (1<<4) /* Async wake interrupt enable */
-#define IMX21_UCR3_RXDMUXSEL (1<<2) /* RXD Muxed Input Select */
-#define UCR3_INVT (1<<1) /* Inverted Infrared transmission */
-#define UCR3_BPEN (1<<0) /* Preset registers enable */
-#define UCR4_CTSTL_SHF 10 /* CTS trigger level shift */
-#define UCR4_CTSTL_MASK 0x3F /* CTS trigger is 6 bits wide */
-#define UCR4_INVR (1<<9) /* Inverted infrared reception */
-#define UCR4_ENIRI (1<<8) /* Serial infrared interrupt enable */
-#define UCR4_WKEN (1<<7) /* Wake interrupt enable */
-#define UCR4_REF16 (1<<6) /* Ref freq 16 MHz */
-#define UCR4_IRSC (1<<5) /* IR special case */
-#define UCR4_TCEN (1<<3) /* Transmit complete interrupt enable */
-#define UCR4_BKEN (1<<2) /* Break condition interrupt enable */
-#define UCR4_OREN (1<<1) /* Receiver overrun interrupt enable */
-#define UCR4_DREN (1<<0) /* Recv data ready interrupt enable */
-#define UFCR_RXTL_SHF 0 /* Receiver trigger level shift */
-#define UFCR_DCEDTE (1<<6) /* DCE/DTE mode select */
-#define UFCR_RFDIV (7<<7) /* Reference freq divider mask */
-#define UFCR_RFDIV_REG(x) (((x) < 7 ? 6 - (x) : 6) << 7)
-#define UFCR_TXTL_SHF 10 /* Transmitter trigger level shift */
-#define USR1_PARITYERR (1<<15) /* Parity error interrupt flag */
-#define USR1_RTSS (1<<14) /* RTS pin status */
-#define USR1_TRDY (1<<13) /* Transmitter ready interrupt/dma flag */
-#define USR1_RTSD (1<<12) /* RTS delta */
-#define USR1_ESCF (1<<11) /* Escape seq interrupt flag */
-#define USR1_FRAMERR (1<<10) /* Frame error interrupt flag */
-#define USR1_RRDY (1<<9) /* Receiver ready interrupt/dma flag */
-#define USR1_TIMEOUT (1<<7) /* Receive timeout interrupt status */
-#define USR1_RXDS (1<<6) /* Receiver idle interrupt flag */
-#define USR1_AIRINT (1<<5) /* Async IR wake interrupt flag */
-#define USR1_AWAKE (1<<4) /* Aysnc wake interrupt flag */
-#define USR2_ADET (1<<15) /* Auto baud rate detect complete */
-#define USR2_TXFE (1<<14) /* Transmit buffer FIFO empty */
-#define USR2_DTRF (1<<13) /* DTR edge interrupt flag */
-#define USR2_IDLE (1<<12) /* Idle condition */
-#define USR2_IRINT (1<<8) /* Serial infrared interrupt flag */
-#define USR2_WAKE (1<<7) /* Wake */
-#define USR2_RTSF (1<<4) /* RTS edge interrupt flag */
-#define USR2_TXDC (1<<3) /* Transmitter complete */
-#define USR2_BRCD (1<<2) /* Break condition */
-#define USR2_ORE (1<<1) /* Overrun error */
-#define USR2_RDR (1<<0) /* Recv data ready */
-#define UTS_FRCPERR (1<<13) /* Force parity error */
-#define UTS_LOOP (1<<12) /* Loop tx and rx */
-#define UTS_TXEMPTY (1<<6) /* TxFIFO empty */
-#define UTS_RXEMPTY (1<<5) /* RxFIFO empty */
-#define UTS_TXFULL (1<<4) /* TxFIFO full */
-#define UTS_RXFULL (1<<3) /* RxFIFO full */
-#define UTS_SOFTRST (1<<0) /* Software reset */
+#define URXD_CHARRDY (1<<15)
+#define URXD_ERR (1<<14)
+#define URXD_OVRRUN (1<<13)
+#define URXD_FRMERR (1<<12)
+#define URXD_BRK (1<<11)
+#define URXD_PRERR (1<<10)
+#define UCR1_ADEN (1<<15) /* Auto detect interrupt */
+#define UCR1_ADBR (1<<14) /* Auto detect baud rate */
+#define UCR1_TRDYEN (1<<13) /* Transmitter ready interrupt enable */
+#define UCR1_IDEN (1<<12) /* Idle condition interrupt */
+#define UCR1_RRDYEN (1<<9) /* Recv ready interrupt enable */
+#define UCR1_RDMAEN (1<<8) /* Recv ready DMA enable */
+#define UCR1_IREN (1<<7) /* Infrared interface enable */
+#define UCR1_TXMPTYEN (1<<6) /* Transimitter empty interrupt enable */
+#define UCR1_RTSDEN (1<<5) /* RTS delta interrupt enable */
+#define UCR1_SNDBRK (1<<4) /* Send break */
+#define UCR1_TDMAEN (1<<3) /* Transmitter ready DMA enable */
+#define IMX1_UCR1_UARTCLKEN (1<<2) /* UART clock enabled, i.mx1 only */
+#define UCR1_DOZE (1<<1) /* Doze */
+#define UCR1_UARTEN (1<<0) /* UART enabled */
+#define UCR2_ESCI (1<<15) /* Escape seq interrupt enable */
+#define UCR2_IRTS (1<<14) /* Ignore RTS pin */
+#define UCR2_CTSC (1<<13) /* CTS pin control */
+#define UCR2_CTS (1<<12) /* Clear to send */
+#define UCR2_ESCEN (1<<11) /* Escape enable */
+#define UCR2_PREN (1<<8) /* Parity enable */
+#define UCR2_PROE (1<<7) /* Parity odd/even */
+#define UCR2_STPB (1<<6) /* Stop */
+#define UCR2_WS (1<<5) /* Word size */
+#define UCR2_RTSEN (1<<4) /* Request to send interrupt enable */
+#define UCR2_ATEN (1<<3) /* Aging Timer Enable */
+#define UCR2_TXEN (1<<2) /* Transmitter enabled */
+#define UCR2_RXEN (1<<1) /* Receiver enabled */
+#define UCR2_SRST (1<<0) /* SW reset */
+#define UCR3_DTREN (1<<13) /* DTR interrupt enable */
+#define UCR3_PARERREN (1<<12) /* Parity enable */
+#define UCR3_FRAERREN (1<<11) /* Frame error interrupt enable */
+#define UCR3_DSR (1<<10) /* Data set ready */
+#define UCR3_DCD (1<<9) /* Data carrier detect */
+#define UCR3_RI (1<<8) /* Ring indicator */
+#define UCR3_TIMEOUTEN (1<<7) /* Timeout interrupt enable */
+#define UCR3_RXDSEN (1<<6) /* Receive status interrupt enable */
+#define UCR3_AIRINTEN (1<<5) /* Async IR wake interrupt enable */
+#define UCR3_AWAKEN (1<<4) /* Async wake interrupt enable */
+#define IMX21_UCR3_RXDMUXSEL (1<<2) /* RXD Muxed Input Select */
+#define UCR3_INVT (1<<1) /* Inverted Infrared transmission */
+#define UCR3_BPEN (1<<0) /* Preset registers enable */
+#define UCR4_CTSTL_SHF 10 /* CTS trigger level shift */
+#define UCR4_CTSTL_MASK 0x3F /* CTS trigger is 6 bits wide */
+#define UCR4_INVR (1<<9) /* Inverted infrared reception */
+#define UCR4_ENIRI (1<<8) /* Serial infrared interrupt enable */
+#define UCR4_WKEN (1<<7) /* Wake interrupt enable */
+#define UCR4_REF16 (1<<6) /* Ref freq 16 MHz */
+#define UCR4_IRSC (1<<5) /* IR special case */
+#define UCR4_TCEN (1<<3) /* Transmit complete interrupt enable */
+#define UCR4_BKEN (1<<2) /* Break condition interrupt enable */
+#define UCR4_OREN (1<<1) /* Receiver overrun interrupt enable */
+#define UCR4_DREN (1<<0) /* Recv data ready interrupt enable */
+#define UFCR_RXTL_SHF 0 /* Receiver trigger level shift */
+#define UFCR_DCEDTE (1<<6) /* DCE/DTE mode select */
+#define UFCR_RFDIV (7<<7) /* Reference freq divider mask */
+#define UFCR_RFDIV_REG(x) (((x) < 7 ? 6 - (x) : 6) << 7)
+#define UFCR_TXTL_SHF 10 /* Transmitter trigger level shift */
+#define USR1_PARITYERR (1<<15) /* Parity error interrupt flag */
+#define USR1_RTSS (1<<14) /* RTS pin status */
+#define USR1_TRDY (1<<13) /* Transmitter ready interrupt/dma flag */
+#define USR1_RTSD (1<<12) /* RTS delta */
+#define USR1_ESCF (1<<11) /* Escape seq interrupt flag */
+#define USR1_FRAMERR (1<<10) /* Frame error interrupt flag */
+#define USR1_RRDY (1<<9) /* Receiver ready interrupt/dma flag */
+#define USR1_TIMEOUT (1<<7) /* Receive timeout interrupt status */
+#define USR1_RXDS (1<<6) /* Receiver idle interrupt flag */
+#define USR1_AIRINT (1<<5) /* Async IR wake interrupt flag */
+#define USR1_AWAKE (1<<4) /* Aysnc wake interrupt flag */
+#define USR2_ADET (1<<15) /* Auto baud rate detect complete */
+#define USR2_TXFE (1<<14) /* Transmit buffer FIFO empty */
+#define USR2_DTRF (1<<13) /* DTR edge interrupt flag */
+#define USR2_IDLE (1<<12) /* Idle condition */
+#define USR2_IRINT (1<<8) /* Serial infrared interrupt flag */
+#define USR2_WAKE (1<<7) /* Wake */
+#define USR2_RTSF (1<<4) /* RTS edge interrupt flag */
+#define USR2_TXDC (1<<3) /* Transmitter complete */
+#define USR2_BRCD (1<<2) /* Break condition */
+#define USR2_ORE (1<<1) /* Overrun error */
+#define USR2_RDR (1<<0) /* Recv data ready */
+#define UTS_FRCPERR (1<<13) /* Force parity error */
+#define UTS_LOOP (1<<12) /* Loop tx and rx */
+#define UTS_TXEMPTY (1<<6) /* TxFIFO empty */
+#define UTS_RXEMPTY (1<<5) /* RxFIFO empty */
+#define UTS_TXFULL (1<<4) /* TxFIFO full */
+#define UTS_RXFULL (1<<3) /* RxFIFO full */
+#define UTS_SOFTRST (1<<0) /* Software reset */
/* We've been assigned a range on the "Low-density serial ports" major */
-#define SERIAL_IMX_MAJOR 207
-#define MINOR_START 16
+#define SERIAL_IMX_MAJOR 207
+#define MINOR_START 16
#define DEV_NAME "ttymxc"
/*
@@ -199,7 +199,7 @@ struct imx_port {
struct uart_port port;
struct timer_list timer;
unsigned int old_status;
- int txirq,rxirq,rtsirq;
+ int txirq, rxirq, rtsirq;
unsigned int have_rtscts:1;
unsigned int use_irda:1;
unsigned int irda_inv_rx:1;
@@ -397,7 +397,7 @@ static void imx_stop_rx(struct uart_port *port)
unsigned long temp;
temp = readl(sport->port.membase + UCR2);
- writel(temp &~ UCR2_RXEN, sport->port.membase + UCR2);
+ writel(temp & ~UCR2_RXEN, sport->port.membase + UCR2);
}
/*
@@ -490,9 +490,8 @@ static irqreturn_t imx_txint(int irq, void *dev_id)
struct circ_buf *xmit = &sport->port.state->xmit;
unsigned long flags;
- spin_lock_irqsave(&sport->port.lock,flags);
- if (sport->port.x_char)
- {
+ spin_lock_irqsave(&sport->port.lock, flags);
+ if (sport->port.x_char) {
/* Send next char */
writel(sport->port.x_char, sport->port.membase + URTX0);
goto out;
@@ -509,18 +508,18 @@ static irqreturn_t imx_txint(int irq, void *dev_id)
uart_write_wakeup(&sport->port);
out:
- spin_unlock_irqrestore(&sport->port.lock,flags);
+ spin_unlock_irqrestore(&sport->port.lock, flags);
return IRQ_HANDLED;
}
static irqreturn_t imx_rxint(int irq, void *dev_id)
{
struct imx_port *sport = dev_id;
- unsigned int rx,flg,ignored = 0;
- struct tty_struct *tty = sport->port.state->port.tty;
+ unsigned int rx, flg, ignored = 0;
+ struct tty_port *port = &sport->port.state->port;
unsigned long flags, temp;
- spin_lock_irqsave(&sport->port.lock,flags);
+ spin_lock_irqsave(&sport->port.lock, flags);
while (readl(sport->port.membase + USR2) & USR2_RDR) {
flg = TTY_NORMAL;
@@ -570,12 +569,12 @@ static irqreturn_t imx_rxint(int irq, void *dev_id)
#endif
}
- tty_insert_flip_char(tty, rx, flg);
+ tty_insert_flip_char(port, rx, flg);
}
out:
- spin_unlock_irqrestore(&sport->port.lock,flags);
- tty_flip_buffer_push(tty);
+ spin_unlock_irqrestore(&sport->port.lock, flags);
+ tty_flip_buffer_push(port);
return IRQ_HANDLED;
}
@@ -654,7 +653,7 @@ static void imx_break_ctl(struct uart_port *port, int break_state)
temp = readl(sport->port.membase + UCR1) & ~UCR1_SNDBRK;
- if ( break_state != 0 )
+ if (break_state != 0)
temp |= UCR1_SNDBRK;
writel(temp, sport->port.membase + UCR1);
@@ -696,8 +695,8 @@ static int imx_startup(struct uart_port *port)
temp |= UCR4_IRSC;
/* set the trigger level for CTS */
- temp &= ~(UCR4_CTSTL_MASK<< UCR4_CTSTL_SHF);
- temp |= CTSTL<< UCR4_CTSTL_SHF;
+ temp &= ~(UCR4_CTSTL_MASK << UCR4_CTSTL_SHF);
+ temp |= CTSTL << UCR4_CTSTL_SHF;
writel(temp & ~UCR4_DREN, sport->port.membase + UCR4);
@@ -799,7 +798,7 @@ static int imx_startup(struct uart_port *port)
* Enable modem status interrupts
*/
imx_enable_ms(&sport->port);
- spin_unlock_irqrestore(&sport->port.lock,flags);
+ spin_unlock_irqrestore(&sport->port.lock, flags);
if (USE_IRDA(sport)) {
struct imxuart_platform_data *pdata;
@@ -909,7 +908,7 @@ imx_set_termios(struct uart_port *port, struct ktermios *termios,
ucr2 = UCR2_SRST | UCR2_IRTS;
if (termios->c_cflag & CRTSCTS) {
- if( sport->have_rtscts ) {
+ if (sport->have_rtscts) {
ucr2 &= ~UCR2_IRTS;
ucr2 |= UCR2_CTSC;
} else {
@@ -969,12 +968,12 @@ imx_set_termios(struct uart_port *port, struct ktermios *termios,
writel(old_ucr1 & ~(UCR1_TXMPTYEN | UCR1_RRDYEN | UCR1_RTSDEN),
sport->port.membase + UCR1);
- while ( !(readl(sport->port.membase + USR2) & USR2_TXDC))
+ while (!(readl(sport->port.membase + USR2) & USR2_TXDC))
barrier();
/* then, disable everything */
old_txrxen = readl(sport->port.membase + UCR2);
- writel(old_txrxen & ~( UCR2_TXEN | UCR2_RXEN),
+ writel(old_txrxen & ~(UCR2_TXEN | UCR2_RXEN),
sport->port.membase + UCR2);
old_txrxen &= (UCR2_TXEN | UCR2_RXEN);
@@ -1212,9 +1211,15 @@ imx_console_write(struct console *co, const char *s, unsigned int count)
struct imx_port *sport = imx_ports[co->index];
struct imx_port_ucrs old_ucr;
unsigned int ucr1;
- unsigned long flags;
+ unsigned long flags = 0;
+ int locked = 1;
- spin_lock_irqsave(&sport->port.lock, flags);
+ if (sport->port.sysrq)
+ locked = 0;
+ else if (oops_in_progress)
+ locked = spin_trylock_irqsave(&sport->port.lock, flags);
+ else
+ spin_lock_irqsave(&sport->port.lock, flags);
/*
* First, save UCR1/2/3 and then disable interrupts
@@ -1241,7 +1246,8 @@ imx_console_write(struct console *co, const char *s, unsigned int count)
imx_port_ucrs_restore(&sport->port, &old_ucr);
- spin_unlock_irqrestore(&sport->port.lock, flags);
+ if (locked)
+ spin_unlock_irqrestore(&sport->port.lock, flags);
}
/*
@@ -1255,7 +1261,7 @@ imx_console_get_options(struct imx_port *sport, int *baud,
if (readl(sport->port.membase + UCR1) & UCR1_UARTEN) {
/* ok, the port was enabled */
- unsigned int ucr2, ubir,ubmr, uartclk;
+ unsigned int ucr2, ubir, ubmr, uartclk;
unsigned int baud_raw;
unsigned int ucfr_rfdiv;
@@ -1301,8 +1307,8 @@ imx_console_get_options(struct imx_port *sport, int *baud,
*baud = (baud_raw + 50) / 100 * 100;
}
- if(*baud != baud_raw)
- printk(KERN_INFO "Serial: Console IMX rounded baud rate from %d to %d\n",
+ if (*baud != baud_raw)
+ pr_info("Console IMX rounded baud rate from %d to %d\n",
baud_raw, *baud);
}
}
@@ -1324,7 +1330,7 @@ imx_console_setup(struct console *co, char *options)
if (co->index == -1 || co->index >= ARRAY_SIZE(imx_ports))
co->index = 0;
sport = imx_ports[co->index];
- if(sport == NULL)
+ if (sport == NULL)
return -ENODEV;
if (options)
@@ -1462,7 +1468,7 @@ static int serial_imx_probe(struct platform_device *pdev)
struct resource *res;
struct pinctrl *pinctrl;
- sport = kzalloc(sizeof(*sport), GFP_KERNEL);
+ sport = devm_kzalloc(&pdev->dev, sizeof(*sport), GFP_KERNEL);
if (!sport)
return -ENOMEM;
@@ -1470,19 +1476,15 @@ static int serial_imx_probe(struct platform_device *pdev)
if (ret > 0)
serial_imx_probe_pdata(sport, pdev);
else if (ret < 0)
- goto free;
+ return ret;
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- if (!res) {
- ret = -ENODEV;
- goto free;
- }
+ if (!res)
+ return -ENODEV;
- base = ioremap(res->start, PAGE_SIZE);
- if (!base) {
- ret = -ENOMEM;
- goto free;
- }
+ base = devm_ioremap(&pdev->dev, res->start, PAGE_SIZE);
+ if (!base)
+ return -ENOMEM;
sport->port.dev = &pdev->dev;
sport->port.mapbase = res->start;
@@ -1504,21 +1506,21 @@ static int serial_imx_probe(struct platform_device *pdev)
if (IS_ERR(pinctrl)) {
ret = PTR_ERR(pinctrl);
dev_err(&pdev->dev, "failed to get default pinctrl: %d\n", ret);
- goto unmap;
+ return ret;
}
sport->clk_ipg = devm_clk_get(&pdev->dev, "ipg");
if (IS_ERR(sport->clk_ipg)) {
ret = PTR_ERR(sport->clk_ipg);
dev_err(&pdev->dev, "failed to get ipg clk: %d\n", ret);
- goto unmap;
+ return ret;
}
sport->clk_per = devm_clk_get(&pdev->dev, "per");
if (IS_ERR(sport->clk_per)) {
ret = PTR_ERR(sport->clk_per);
dev_err(&pdev->dev, "failed to get per clk: %d\n", ret);
- goto unmap;
+ return ret;
}
clk_prepare_enable(sport->clk_per);
@@ -1547,11 +1549,6 @@ deinit:
clkput:
clk_disable_unprepare(sport->clk_per);
clk_disable_unprepare(sport->clk_ipg);
-unmap:
- iounmap(sport->port.membase);
-free:
- kfree(sport);
-
return ret;
}
@@ -1572,9 +1569,6 @@ static int serial_imx_remove(struct platform_device *pdev)
if (pdata && pdata->exit)
pdata->exit(pdev);
- iounmap(sport->port.membase);
- kfree(sport);
-
return 0;
}
@@ -1596,7 +1590,7 @@ static int __init imx_serial_init(void)
{
int ret;
- printk(KERN_INFO "Serial: IMX driver\n");
+ pr_info("Serial: IMX driver\n");
ret = uart_register_driver(&imx_reg);
if (ret)
diff --git a/drivers/tty/serial/ioc3_serial.c b/drivers/tty/serial/ioc3_serial.c
index d8f1d1d54471..6e4c715c5d26 100644
--- a/drivers/tty/serial/ioc3_serial.c
+++ b/drivers/tty/serial/ioc3_serial.c
@@ -1000,7 +1000,7 @@ ioc3_change_speed(struct uart_port *the_port,
the_port->ignore_status_mask = N_ALL_INPUT;
- state->port.tty->low_latency = 1;
+ state->port.low_latency = 1;
if (iflag & IGNPAR)
the_port->ignore_status_mask &= ~(N_PARITY_ERROR
@@ -1393,7 +1393,6 @@ static inline int do_read(struct uart_port *the_port, char *buf, int len)
*/
static int receive_chars(struct uart_port *the_port)
{
- struct tty_struct *tty;
unsigned char ch[MAX_CHARS];
int read_count = 0, read_room, flip = 0;
struct uart_state *state = the_port->state;
@@ -1403,25 +1402,23 @@ static int receive_chars(struct uart_port *the_port)
/* Make sure all the pointers are "good" ones */
if (!state)
return 0;
- if (!state->port.tty)
- return 0;
if (!(port->ip_flags & INPUT_ENABLE))
return 0;
spin_lock_irqsave(&the_port->lock, pflags);
- tty = state->port.tty;
read_count = do_read(the_port, ch, MAX_CHARS);
if (read_count > 0) {
flip = 1;
- read_room = tty_insert_flip_string(tty, ch, read_count);
+ read_room = tty_insert_flip_string(&state->port, ch,
+ read_count);
the_port->icount.rx += read_count;
}
spin_unlock_irqrestore(&the_port->lock, pflags);
if (flip)
- tty_flip_buffer_push(tty);
+ tty_flip_buffer_push(&state->port);
return read_count;
}
diff --git a/drivers/tty/serial/ioc4_serial.c b/drivers/tty/serial/ioc4_serial.c
index 3e7da10cebba..e2520abcb1c4 100644
--- a/drivers/tty/serial/ioc4_serial.c
+++ b/drivers/tty/serial/ioc4_serial.c
@@ -1740,7 +1740,7 @@ ioc4_change_speed(struct uart_port *the_port,
the_port->ignore_status_mask = N_ALL_INPUT;
- state->port.tty->low_latency = 1;
+ state->port.low_latency = 1;
if (iflag & IGNPAR)
the_port->ignore_status_mask &= ~(N_PARITY_ERROR
@@ -2340,7 +2340,6 @@ static inline int do_read(struct uart_port *the_port, unsigned char *buf,
*/
static void receive_chars(struct uart_port *the_port)
{
- struct tty_struct *tty;
unsigned char ch[IOC4_MAX_CHARS];
int read_count, request_count = IOC4_MAX_CHARS;
struct uart_icount *icount;
@@ -2350,26 +2349,23 @@ static void receive_chars(struct uart_port *the_port)
/* Make sure all the pointers are "good" ones */
if (!state)
return;
- if (!state->port.tty)
- return;
spin_lock_irqsave(&the_port->lock, pflags);
- tty = state->port.tty;
- request_count = tty_buffer_request_room(tty, IOC4_MAX_CHARS);
+ request_count = tty_buffer_request_room(&state->port, IOC4_MAX_CHARS);
if (request_count > 0) {
icount = &the_port->icount;
read_count = do_read(the_port, ch, request_count);
if (read_count > 0) {
- tty_insert_flip_string(tty, ch, read_count);
+ tty_insert_flip_string(&state->port, ch, read_count);
icount->rx += read_count;
}
}
spin_unlock_irqrestore(&the_port->lock, pflags);
- tty_flip_buffer_push(tty);
+ tty_flip_buffer_push(&state->port);
}
/**
@@ -2883,6 +2879,7 @@ ioc4_serial_attach_one(struct ioc4_driver_data *idd)
/* error exits that give back resources */
out5:
ioc4_serial_remove_one(idd);
+ return ret;
out4:
kfree(soft);
out3:
diff --git a/drivers/tty/serial/ip22zilog.c b/drivers/tty/serial/ip22zilog.c
index 7b1cda59ebb5..cb3c81eb0996 100644
--- a/drivers/tty/serial/ip22zilog.c
+++ b/drivers/tty/serial/ip22zilog.c
@@ -248,17 +248,12 @@ static void ip22zilog_maybe_update_regs(struct uart_ip22zilog_port *up,
#define Rx_BRK 0x0100 /* BREAK event software flag. */
#define Rx_SYS 0x0200 /* SysRq event software flag. */
-static struct tty_struct *ip22zilog_receive_chars(struct uart_ip22zilog_port *up,
+static bool ip22zilog_receive_chars(struct uart_ip22zilog_port *up,
struct zilog_channel *channel)
{
- struct tty_struct *tty;
unsigned char ch, flag;
unsigned int r1;
-
- tty = NULL;
- if (up->port.state != NULL &&
- up->port.state->port.tty != NULL)
- tty = up->port.state->port.tty;
+ bool push = up->port.state != NULL;
for (;;) {
ch = readb(&channel->control);
@@ -312,10 +307,10 @@ static struct tty_struct *ip22zilog_receive_chars(struct uart_ip22zilog_port *up
if (uart_handle_sysrq_char(&up->port, ch))
continue;
- if (tty)
+ if (push)
uart_insert_char(&up->port, r1, Rx_OVR, ch, flag);
}
- return tty;
+ return push;
}
static void ip22zilog_status_handle(struct uart_ip22zilog_port *up,
@@ -438,21 +433,20 @@ static irqreturn_t ip22zilog_interrupt(int irq, void *dev_id)
while (up) {
struct zilog_channel *channel
= ZILOG_CHANNEL_FROM_PORT(&up->port);
- struct tty_struct *tty;
unsigned char r3;
+ bool push = false;
spin_lock(&up->port.lock);
r3 = read_zsreg(channel, R3);
/* Channel A */
- tty = NULL;
if (r3 & (CHAEXT | CHATxIP | CHARxIP)) {
writeb(RES_H_IUS, &channel->control);
ZSDELAY();
ZS_WSYNC(channel);
if (r3 & CHARxIP)
- tty = ip22zilog_receive_chars(up, channel);
+ push = ip22zilog_receive_chars(up, channel);
if (r3 & CHAEXT)
ip22zilog_status_handle(up, channel);
if (r3 & CHATxIP)
@@ -460,22 +454,22 @@ static irqreturn_t ip22zilog_interrupt(int irq, void *dev_id)
}
spin_unlock(&up->port.lock);
- if (tty)
- tty_flip_buffer_push(tty);
+ if (push)
+ tty_flip_buffer_push(&up->port.state->port);
/* Channel B */
up = up->next;
channel = ZILOG_CHANNEL_FROM_PORT(&up->port);
+ push = false;
spin_lock(&up->port.lock);
- tty = NULL;
if (r3 & (CHBEXT | CHBTxIP | CHBRxIP)) {
writeb(RES_H_IUS, &channel->control);
ZSDELAY();
ZS_WSYNC(channel);
if (r3 & CHBRxIP)
- tty = ip22zilog_receive_chars(up, channel);
+ push = ip22zilog_receive_chars(up, channel);
if (r3 & CHBEXT)
ip22zilog_status_handle(up, channel);
if (r3 & CHBTxIP)
@@ -483,8 +477,8 @@ static irqreturn_t ip22zilog_interrupt(int irq, void *dev_id)
}
spin_unlock(&up->port.lock);
- if (tty)
- tty_flip_buffer_push(tty);
+ if (push)
+ tty_flip_buffer_push(&up->port.state->port);
up = up->next;
}
diff --git a/drivers/tty/serial/jsm/jsm_tty.c b/drivers/tty/serial/jsm/jsm_tty.c
index 4c00c5550b1a..00f250ae14c5 100644
--- a/drivers/tty/serial/jsm/jsm_tty.c
+++ b/drivers/tty/serial/jsm/jsm_tty.c
@@ -521,6 +521,7 @@ void jsm_input(struct jsm_channel *ch)
{
struct jsm_board *bd;
struct tty_struct *tp;
+ struct tty_port *port;
u32 rmask;
u16 head;
u16 tail;
@@ -536,7 +537,8 @@ void jsm_input(struct jsm_channel *ch)
if (!ch)
return;
- tp = ch->uart_port.state->port.tty;
+ port = &ch->uart_port.state->port;
+ tp = port->tty;
bd = ch->ch_bd;
if(!bd)
@@ -600,7 +602,7 @@ void jsm_input(struct jsm_channel *ch)
return;
}
- len = tty_buffer_request_room(tp, data_len);
+ len = tty_buffer_request_room(port, data_len);
n = len;
/*
@@ -629,16 +631,16 @@ void jsm_input(struct jsm_channel *ch)
* format it likes.
*/
if (*(ch->ch_equeue +tail +i) & UART_LSR_BI)
- tty_insert_flip_char(tp, *(ch->ch_rqueue +tail +i), TTY_BREAK);
+ tty_insert_flip_char(port, *(ch->ch_rqueue +tail +i), TTY_BREAK);
else if (*(ch->ch_equeue +tail +i) & UART_LSR_PE)
- tty_insert_flip_char(tp, *(ch->ch_rqueue +tail +i), TTY_PARITY);
+ tty_insert_flip_char(port, *(ch->ch_rqueue +tail +i), TTY_PARITY);
else if (*(ch->ch_equeue +tail +i) & UART_LSR_FE)
- tty_insert_flip_char(tp, *(ch->ch_rqueue +tail +i), TTY_FRAME);
+ tty_insert_flip_char(port, *(ch->ch_rqueue +tail +i), TTY_FRAME);
else
- tty_insert_flip_char(tp, *(ch->ch_rqueue +tail +i), TTY_NORMAL);
+ tty_insert_flip_char(port, *(ch->ch_rqueue +tail +i), TTY_NORMAL);
}
} else {
- tty_insert_flip_string(tp, ch->ch_rqueue + tail, s) ;
+ tty_insert_flip_string(port, ch->ch_rqueue + tail, s);
}
tail += s;
n -= s;
@@ -652,7 +654,7 @@ void jsm_input(struct jsm_channel *ch)
spin_unlock_irqrestore(&ch->ch_lock, lock_flags);
/* Tell the tty layer its okay to "eat" the data now */
- tty_flip_buffer_push(tp);
+ tty_flip_buffer_push(port);
jsm_dbg(IOCTL, &ch->ch_bd->pci_dev, "finish\n");
}
diff --git a/drivers/tty/serial/kgdb_nmi.c b/drivers/tty/serial/kgdb_nmi.c
index 6ac2b797a764..5dafcf1c227b 100644
--- a/drivers/tty/serial/kgdb_nmi.c
+++ b/drivers/tty/serial/kgdb_nmi.c
@@ -23,6 +23,7 @@
#include <linux/tty.h>
#include <linux/tty_driver.h>
#include <linux/tty_flip.h>
+#include <linux/serial_core.h>
#include <linux/interrupt.h>
#include <linux/hrtimer.h>
#include <linux/tick.h>
@@ -202,7 +203,6 @@ bool kgdb_nmi_poll_knock(void)
static void kgdb_nmi_tty_receiver(unsigned long data)
{
struct kgdb_nmi_tty_priv *priv = (void *)data;
- struct tty_struct *tty;
char ch;
tasklet_schedule(&priv->tlet);
@@ -210,16 +210,9 @@ static void kgdb_nmi_tty_receiver(unsigned long data)
if (likely(!kgdb_nmi_tty_enabled || !kfifo_len(&priv->fifo)))
return;
- /* Port is there, but tty might be hung up, check. */
- tty = tty_port_tty_get(kgdb_nmi_port);
- if (!tty)
- return;
-
while (kfifo_out(&priv->fifo, &ch, 1))
- tty_insert_flip_char(priv->port.tty, ch, TTY_NORMAL);
- tty_flip_buffer_push(priv->port.tty);
-
- tty_kref_put(tty);
+ tty_insert_flip_char(&priv->port, ch, TTY_NORMAL);
+ tty_flip_buffer_push(&priv->port);
}
static int kgdb_nmi_tty_activate(struct tty_port *port, struct tty_struct *tty)
diff --git a/drivers/tty/serial/lantiq.c b/drivers/tty/serial/lantiq.c
index 02da071fe1e7..15733da757c6 100644
--- a/drivers/tty/serial/lantiq.c
+++ b/drivers/tty/serial/lantiq.c
@@ -162,21 +162,16 @@ lqasc_enable_ms(struct uart_port *port)
static int
lqasc_rx_chars(struct uart_port *port)
{
- struct tty_struct *tty = tty_port_tty_get(&port->state->port);
+ struct tty_port *tport = &port->state->port;
unsigned int ch = 0, rsr = 0, fifocnt;
- if (!tty) {
- dev_dbg(port->dev, "%s:tty is busy now", __func__);
- return -EBUSY;
- }
- fifocnt =
- ltq_r32(port->membase + LTQ_ASC_FSTAT) & ASCFSTAT_RXFFLMASK;
+ fifocnt = ltq_r32(port->membase + LTQ_ASC_FSTAT) & ASCFSTAT_RXFFLMASK;
while (fifocnt--) {
u8 flag = TTY_NORMAL;
ch = ltq_r8(port->membase + LTQ_ASC_RBUF);
rsr = (ltq_r32(port->membase + LTQ_ASC_STATE)
& ASCSTATE_ANY) | UART_DUMMY_UER_RX;
- tty_flip_buffer_push(tty);
+ tty_flip_buffer_push(tport);
port->icount.rx++;
/*
@@ -208,7 +203,7 @@ lqasc_rx_chars(struct uart_port *port)
}
if ((rsr & port->ignore_status_mask) == 0)
- tty_insert_flip_char(tty, ch, flag);
+ tty_insert_flip_char(tport, ch, flag);
if (rsr & ASCSTATE_ROE)
/*
@@ -216,11 +211,12 @@ lqasc_rx_chars(struct uart_port *port)
* immediately, and doesn't affect the current
* character
*/
- tty_insert_flip_char(tty, 0, TTY_OVERRUN);
+ tty_insert_flip_char(tport, 0, TTY_OVERRUN);
}
+
if (ch != 0)
- tty_flip_buffer_push(tty);
- tty_kref_put(tty);
+ tty_flip_buffer_push(tport);
+
return 0;
}
diff --git a/drivers/tty/serial/lpc32xx_hs.c b/drivers/tty/serial/lpc32xx_hs.c
index 0e86bff3fe2a..dffea6b2cd7d 100644
--- a/drivers/tty/serial/lpc32xx_hs.c
+++ b/drivers/tty/serial/lpc32xx_hs.c
@@ -257,17 +257,8 @@ static void __serial_uart_flush(struct uart_port *port)
static void __serial_lpc32xx_rx(struct uart_port *port)
{
+ struct tty_port *tport = &port->state->port;
unsigned int tmp, flag;
- struct tty_struct *tty = tty_port_tty_get(&port->state->port);
-
- if (!tty) {
- /* Discard data: no tty available */
- while (!(readl(LPC32XX_HSUART_FIFO(port->membase)) &
- LPC32XX_HSU_RX_EMPTY))
- ;
-
- return;
- }
/* Read data from FIFO and push into terminal */
tmp = readl(LPC32XX_HSUART_FIFO(port->membase));
@@ -281,15 +272,14 @@ static void __serial_lpc32xx_rx(struct uart_port *port)
LPC32XX_HSUART_IIR(port->membase));
port->icount.frame++;
flag = TTY_FRAME;
- tty_insert_flip_char(tty, 0, TTY_FRAME);
+ tty_insert_flip_char(tport, 0, TTY_FRAME);
}
- tty_insert_flip_char(tty, (tmp & 0xFF), flag);
+ tty_insert_flip_char(tport, (tmp & 0xFF), flag);
tmp = readl(LPC32XX_HSUART_FIFO(port->membase));
}
- tty_flip_buffer_push(tty);
- tty_kref_put(tty);
+ tty_flip_buffer_push(tport);
}
static void __serial_lpc32xx_tx(struct uart_port *port)
@@ -332,7 +322,7 @@ exit_tx:
static irqreturn_t serial_lpc32xx_interrupt(int irq, void *dev_id)
{
struct uart_port *port = dev_id;
- struct tty_struct *tty = tty_port_tty_get(&port->state->port);
+ struct tty_port *tport = &port->state->port;
u32 status;
spin_lock(&port->lock);
@@ -356,17 +346,14 @@ static irqreturn_t serial_lpc32xx_interrupt(int irq, void *dev_id)
writel(LPC32XX_HSU_RX_OE_INT,
LPC32XX_HSUART_IIR(port->membase));
port->icount.overrun++;
- if (tty) {
- tty_insert_flip_char(tty, 0, TTY_OVERRUN);
- tty_schedule_flip(tty);
- }
+ tty_insert_flip_char(tport, 0, TTY_OVERRUN);
+ tty_schedule_flip(tport);
}
/* Data received? */
if (status & (LPC32XX_HSU_RX_TIMEOUT_INT | LPC32XX_HSU_RX_TRIG_INT)) {
__serial_lpc32xx_rx(port);
- if (tty)
- tty_flip_buffer_push(tty);
+ tty_flip_buffer_push(tport);
}
/* Transmit data request? */
@@ -376,7 +363,6 @@ static irqreturn_t serial_lpc32xx_interrupt(int irq, void *dev_id)
}
spin_unlock(&port->lock);
- tty_kref_put(tty);
return IRQ_HANDLED;
}
diff --git a/drivers/tty/serial/m32r_sio.c b/drivers/tty/serial/m32r_sio.c
index b13949ad3408..bb1afa0922e1 100644
--- a/drivers/tty/serial/m32r_sio.c
+++ b/drivers/tty/serial/m32r_sio.c
@@ -300,7 +300,7 @@ static void m32r_sio_enable_ms(struct uart_port *port)
static void receive_chars(struct uart_sio_port *up, int *status)
{
- struct tty_struct *tty = up->port.state->port.tty;
+ struct tty_port *port = &up->port.state->port;
unsigned char ch;
unsigned char flag;
int max_count = 256;
@@ -355,7 +355,7 @@ static void receive_chars(struct uart_sio_port *up, int *status)
if (uart_handle_sysrq_char(&up->port, ch))
goto ignore_char;
if ((*status & up->port.ignore_status_mask) == 0)
- tty_insert_flip_char(tty, ch, flag);
+ tty_insert_flip_char(port, ch, flag);
if (*status & UART_LSR_OE) {
/*
@@ -363,12 +363,12 @@ static void receive_chars(struct uart_sio_port *up, int *status)
* immediately, and doesn't affect the current
* character.
*/
- tty_insert_flip_char(tty, 0, TTY_OVERRUN);
+ tty_insert_flip_char(port, 0, TTY_OVERRUN);
}
ignore_char:
*status = serial_in(up, UART_LSR);
} while ((*status & UART_LSR_DR) && (max_count-- > 0));
- tty_flip_buffer_push(tty);
+ tty_flip_buffer_push(port);
}
static void transmit_chars(struct uart_sio_port *up)
diff --git a/drivers/tty/serial/max3100.c b/drivers/tty/serial/max3100.c
index dd6277eb5a38..32517d4bceab 100644
--- a/drivers/tty/serial/max3100.c
+++ b/drivers/tty/serial/max3100.c
@@ -310,8 +310,8 @@ static void max3100_work(struct work_struct *w)
}
}
- if (rxchars > 16 && s->port.state->port.tty != NULL) {
- tty_flip_buffer_push(s->port.state->port.tty);
+ if (rxchars > 16) {
+ tty_flip_buffer_push(&s->port.state->port);
rxchars = 0;
}
if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS)
@@ -323,8 +323,8 @@ static void max3100_work(struct work_struct *w)
(!uart_circ_empty(xmit) &&
!uart_tx_stopped(&s->port))));
- if (rxchars > 0 && s->port.state->port.tty != NULL)
- tty_flip_buffer_push(s->port.state->port.tty);
+ if (rxchars > 0)
+ tty_flip_buffer_push(&s->port.state->port);
}
static irqreturn_t max3100_irq(int irqno, void *dev_id)
@@ -529,7 +529,7 @@ max3100_set_termios(struct uart_port *port, struct ktermios *termios,
MAX3100_STATUS_OE;
/* we are sending char from a workqueue so enable */
- s->port.state->port.tty->low_latency = 1;
+ s->port.state->port.low_latency = 1;
if (s->poll_time > 0)
del_timer_sync(&s->timer);
diff --git a/drivers/tty/serial/max310x.c b/drivers/tty/serial/max310x.c
index a801f6872cad..0c2422cb04ea 100644
--- a/drivers/tty/serial/max310x.c
+++ b/drivers/tty/serial/max310x.c
@@ -460,10 +460,6 @@ static int max310x_set_ref_clk(struct max310x_port *s)
static void max310x_handle_rx(struct max310x_port *s, unsigned int rxlen)
{
unsigned int sts = 0, ch = 0, flag;
- struct tty_struct *tty = tty_port_tty_get(&s->port.state->port);
-
- if (!tty)
- return;
if (unlikely(rxlen >= MAX310X_FIFO_SIZE)) {
dev_warn(s->port.dev, "Possible RX FIFO overrun %d\n", rxlen);
@@ -516,9 +512,7 @@ static void max310x_handle_rx(struct max310x_port *s, unsigned int rxlen)
ch, flag);
}
- tty_flip_buffer_push(tty);
-
- tty_kref_put(tty);
+ tty_flip_buffer_push(&s->port.state->port);
}
static void max310x_handle_tx(struct max310x_port *s)
diff --git a/drivers/tty/serial/mcf.c b/drivers/tty/serial/mcf.c
index fcd56ab6053f..e956377a38fe 100644
--- a/drivers/tty/serial/mcf.c
+++ b/drivers/tty/serial/mcf.c
@@ -23,6 +23,7 @@
#include <linux/serial.h>
#include <linux/serial_core.h>
#include <linux/io.h>
+#include <linux/uaccess.h>
#include <asm/coldfire.h>
#include <asm/mcfsim.h>
#include <asm/mcfuart.h>
@@ -55,6 +56,7 @@ struct mcf_uart {
struct uart_port port;
unsigned int sigs; /* Local copy of line sigs */
unsigned char imr; /* Local IMR mirror */
+ struct serial_rs485 rs485; /* RS485 settings */
};
/****************************************************************************/
@@ -101,6 +103,12 @@ static void mcf_start_tx(struct uart_port *port)
{
struct mcf_uart *pp = container_of(port, struct mcf_uart, port);
+ if (pp->rs485.flags & SER_RS485_ENABLED) {
+ /* Enable Transmitter */
+ writeb(MCFUART_UCR_TXENABLE, port->membase + MCFUART_UCR);
+ /* Manually assert RTS */
+ writeb(MCFUART_UOP_RTS, port->membase + MCFUART_UOP1);
+ }
pp->imr |= MCFUART_UIR_TXREADY;
writeb(pp->imr, port->membase + MCFUART_UIMR);
}
@@ -196,6 +204,7 @@ static void mcf_shutdown(struct uart_port *port)
static void mcf_set_termios(struct uart_port *port, struct ktermios *termios,
struct ktermios *old)
{
+ struct mcf_uart *pp = container_of(port, struct mcf_uart, port);
unsigned long flags;
unsigned int baud, baudclk;
#if defined(CONFIG_M5272)
@@ -248,6 +257,11 @@ static void mcf_set_termios(struct uart_port *port, struct ktermios *termios,
mr2 |= MCFUART_MR2_TXCTS;
}
+ if (pp->rs485.flags & SER_RS485_ENABLED) {
+ dev_dbg(port->dev, "Setting UART to RS485\n");
+ mr2 |= MCFUART_MR2_TXRTS;
+ }
+
spin_lock_irqsave(&port->lock, flags);
uart_update_timeout(port, termios->c_cflag, baud);
writeb(MCFUART_UCR_CMDRESETRX, port->membase + MCFUART_UCR);
@@ -310,7 +324,7 @@ static void mcf_rx_chars(struct mcf_uart *pp)
uart_insert_char(port, status, MCFUART_USR_RXOVERRUN, ch, flag);
}
- tty_flip_buffer_push(port->state->port.tty);
+ tty_flip_buffer_push(&port->state->port);
}
/****************************************************************************/
@@ -342,6 +356,10 @@ static void mcf_tx_chars(struct mcf_uart *pp)
if (xmit->head == xmit->tail) {
pp->imr &= ~MCFUART_UIR_TXREADY;
writeb(pp->imr, port->membase + MCFUART_UIMR);
+ /* Disable TX to negate RTS automatically */
+ if (pp->rs485.flags & SER_RS485_ENABLED)
+ writeb(MCFUART_UCR_TXDISABLE,
+ port->membase + MCFUART_UCR);
}
}
@@ -418,6 +436,58 @@ static int mcf_verify_port(struct uart_port *port, struct serial_struct *ser)
/****************************************************************************/
+/* Enable or disable the RS485 support */
+static void mcf_config_rs485(struct uart_port *port, struct serial_rs485 *rs485)
+{
+ struct mcf_uart *pp = container_of(port, struct mcf_uart, port);
+ unsigned long flags;
+ unsigned char mr1, mr2;
+
+ spin_lock_irqsave(&port->lock, flags);
+ /* Get mode registers */
+ mr1 = readb(port->membase + MCFUART_UMR);
+ mr2 = readb(port->membase + MCFUART_UMR);
+ if (rs485->flags & SER_RS485_ENABLED) {
+ dev_dbg(port->dev, "Setting UART to RS485\n");
+ /* Automatically negate RTS after TX completes */
+ mr2 |= MCFUART_MR2_TXRTS;
+ } else {
+ dev_dbg(port->dev, "Setting UART to RS232\n");
+ mr2 &= ~MCFUART_MR2_TXRTS;
+ }
+ writeb(mr1, port->membase + MCFUART_UMR);
+ writeb(mr2, port->membase + MCFUART_UMR);
+ pp->rs485 = *rs485;
+ spin_unlock_irqrestore(&port->lock, flags);
+}
+
+static int mcf_ioctl(struct uart_port *port, unsigned int cmd,
+ unsigned long arg)
+{
+ switch (cmd) {
+ case TIOCSRS485: {
+ struct serial_rs485 rs485;
+ if (copy_from_user(&rs485, (struct serial_rs485 *)arg,
+ sizeof(struct serial_rs485)))
+ return -EFAULT;
+ mcf_config_rs485(port, &rs485);
+ break;
+ }
+ case TIOCGRS485: {
+ struct mcf_uart *pp = container_of(port, struct mcf_uart, port);
+ if (copy_to_user((struct serial_rs485 *)arg, &pp->rs485,
+ sizeof(struct serial_rs485)))
+ return -EFAULT;
+ break;
+ }
+ default:
+ return -ENOIOCTLCMD;
+ }
+ return 0;
+}
+
+/****************************************************************************/
+
/*
* Define the basic serial functions we support.
*/
@@ -438,6 +508,7 @@ static const struct uart_ops mcf_uart_ops = {
.release_port = mcf_release_port,
.config_port = mcf_config_port,
.verify_port = mcf_verify_port,
+ .ioctl = mcf_ioctl,
};
static struct mcf_uart mcf_ports[4];
diff --git a/drivers/tty/serial/mfd.c b/drivers/tty/serial/mfd.c
index 2c01344dc332..5f4765a7a5c5 100644
--- a/drivers/tty/serial/mfd.c
+++ b/drivers/tty/serial/mfd.c
@@ -387,12 +387,9 @@ void hsu_dma_rx(struct uart_hsu_port *up, u32 int_sts)
struct hsu_dma_buffer *dbuf = &up->rxbuf;
struct hsu_dma_chan *chan = up->rxc;
struct uart_port *port = &up->port;
- struct tty_struct *tty = port->state->port.tty;
+ struct tty_port *tport = &port->state->port;
int count;
- if (!tty)
- return;
-
/*
* First need to know how many is already transferred,
* then check if its a timeout DMA irq, and return
@@ -423,7 +420,7 @@ void hsu_dma_rx(struct uart_hsu_port *up, u32 int_sts)
* explicitly set tail to 0. So head will
* always be greater than tail.
*/
- tty_insert_flip_string(tty, dbuf->buf, count);
+ tty_insert_flip_string(tport, dbuf->buf, count);
port->icount.rx += count;
dma_sync_single_for_device(up->port.dev, dbuf->dma_addr,
@@ -437,7 +434,7 @@ void hsu_dma_rx(struct uart_hsu_port *up, u32 int_sts)
| (0x1 << 16)
| (0x1 << 24) /* timeout bit, see HSU Errata 1 */
);
- tty_flip_buffer_push(tty);
+ tty_flip_buffer_push(tport);
chan_writel(chan, HSU_CH_CR, 0x3);
@@ -460,13 +457,9 @@ static void serial_hsu_stop_rx(struct uart_port *port)
static inline void receive_chars(struct uart_hsu_port *up, int *status)
{
- struct tty_struct *tty = up->port.state->port.tty;
unsigned int ch, flag;
unsigned int max_count = 256;
- if (!tty)
- return;
-
do {
ch = serial_in(up, UART_RX);
flag = TTY_NORMAL;
@@ -522,7 +515,7 @@ static inline void receive_chars(struct uart_hsu_port *up, int *status)
ignore_char:
*status = serial_in(up, UART_LSR);
} while ((*status & UART_LSR_DR) && max_count--);
- tty_flip_buffer_push(tty);
+ tty_flip_buffer_push(&up->port.state->port);
}
static void transmit_chars(struct uart_hsu_port *up)
diff --git a/drivers/tty/serial/mpc52xx_uart.c b/drivers/tty/serial/mpc52xx_uart.c
index 7c23c4f4c58d..c0e1fad51be7 100644
--- a/drivers/tty/serial/mpc52xx_uart.c
+++ b/drivers/tty/serial/mpc52xx_uart.c
@@ -941,7 +941,7 @@ static struct uart_ops mpc52xx_uart_ops = {
static inline int
mpc52xx_uart_int_rx_chars(struct uart_port *port)
{
- struct tty_struct *tty = port->state->port.tty;
+ struct tty_port *tport = &port->state->port;
unsigned char ch, flag;
unsigned short status;
@@ -986,20 +986,20 @@ mpc52xx_uart_int_rx_chars(struct uart_port *port)
out_8(&PSC(port)->command, MPC52xx_PSC_RST_ERR_STAT);
}
- tty_insert_flip_char(tty, ch, flag);
+ tty_insert_flip_char(tport, ch, flag);
if (status & MPC52xx_PSC_SR_OE) {
/*
* Overrun is special, since it's
* reported immediately, and doesn't
* affect the current character
*/
- tty_insert_flip_char(tty, 0, TTY_OVERRUN);
+ tty_insert_flip_char(tport, 0, TTY_OVERRUN);
port->icount.overrun++;
}
}
spin_unlock(&port->lock);
- tty_flip_buffer_push(tty);
+ tty_flip_buffer_push(tport);
spin_lock(&port->lock);
return psc_ops->raw_rx_rdy(port);
diff --git a/drivers/tty/serial/mpsc.c b/drivers/tty/serial/mpsc.c
index 6a9c6605666a..bc24f4931670 100644
--- a/drivers/tty/serial/mpsc.c
+++ b/drivers/tty/serial/mpsc.c
@@ -937,7 +937,7 @@ static int serial_polled;
static int mpsc_rx_intr(struct mpsc_port_info *pi)
{
struct mpsc_rx_desc *rxre;
- struct tty_struct *tty = pi->port.state->port.tty;
+ struct tty_port *port = &pi->port.state->port;
u32 cmdstat, bytes_in, i;
int rc = 0;
u8 *bp;
@@ -968,10 +968,9 @@ static int mpsc_rx_intr(struct mpsc_port_info *pi)
}
#endif
/* Following use of tty struct directly is deprecated */
- if (unlikely(tty_buffer_request_room(tty, bytes_in)
- < bytes_in)) {
- if (tty->low_latency)
- tty_flip_buffer_push(tty);
+ if (tty_buffer_request_room(port, bytes_in) < bytes_in) {
+ if (port->low_latency)
+ tty_flip_buffer_push(port);
/*
* If this failed then we will throw away the bytes
* but must do so to clear interrupts.
@@ -1040,10 +1039,10 @@ static int mpsc_rx_intr(struct mpsc_port_info *pi)
| SDMA_DESC_CMDSTAT_FR
| SDMA_DESC_CMDSTAT_OR)))
&& !(cmdstat & pi->port.ignore_status_mask)) {
- tty_insert_flip_char(tty, *bp, flag);
+ tty_insert_flip_char(port, *bp, flag);
} else {
for (i=0; i<bytes_in; i++)
- tty_insert_flip_char(tty, *bp++, TTY_NORMAL);
+ tty_insert_flip_char(port, *bp++, TTY_NORMAL);
pi->port.icount.rx += bytes_in;
}
@@ -1081,7 +1080,7 @@ next_frame:
if ((readl(pi->sdma_base + SDMA_SDCM) & SDMA_SDCM_ERD) == 0)
mpsc_start_rx(pi);
- tty_flip_buffer_push(tty);
+ tty_flip_buffer_push(port);
return rc;
}
diff --git a/drivers/tty/serial/mrst_max3110.c b/drivers/tty/serial/mrst_max3110.c
index 58734d7e746d..f641c232beca 100644
--- a/drivers/tty/serial/mrst_max3110.c
+++ b/drivers/tty/serial/mrst_max3110.c
@@ -339,7 +339,7 @@ static int
receive_chars(struct uart_max3110 *max, unsigned short *str, int len)
{
struct uart_port *port = &max->port;
- struct tty_struct *tty;
+ struct tty_port *tport;
char buf[M3110_RX_FIFO_DEPTH];
int r, w, usable;
@@ -347,9 +347,7 @@ receive_chars(struct uart_max3110 *max, unsigned short *str, int len)
if (!port->state)
return 0;
- tty = tty_port_tty_get(&port->state->port);
- if (!tty)
- return 0;
+ tport = &port->state->port;
for (r = 0, w = 0; r < len; r++) {
if (str[r] & MAX3110_BREAK &&
@@ -364,20 +362,17 @@ receive_chars(struct uart_max3110 *max, unsigned short *str, int len)
}
}
- if (!w) {
- tty_kref_put(tty);
+ if (!w)
return 0;
- }
for (r = 0; w; r += usable, w -= usable) {
- usable = tty_buffer_request_room(tty, w);
+ usable = tty_buffer_request_room(tport, w);
if (usable) {
- tty_insert_flip_string(tty, buf + r, usable);
+ tty_insert_flip_string(tport, buf + r, usable);
port->icount.rx += usable;
}
}
- tty_flip_buffer_push(tty);
- tty_kref_put(tty);
+ tty_flip_buffer_push(tport);
return r;
}
@@ -493,7 +488,7 @@ static int serial_m3110_startup(struct uart_port *port)
| WC_BAUD_DR2;
/* as we use thread to handle tx/rx, need set low latency */
- port->state->port.tty->low_latency = 1;
+ port->state->port.low_latency = 1;
if (max->irq) {
max->read_thread = NULL;
diff --git a/drivers/tty/serial/msm_serial.c b/drivers/tty/serial/msm_serial.c
index 95fd39be2934..b11e99797fd8 100644
--- a/drivers/tty/serial/msm_serial.c
+++ b/drivers/tty/serial/msm_serial.c
@@ -91,14 +91,14 @@ static void msm_enable_ms(struct uart_port *port)
static void handle_rx_dm(struct uart_port *port, unsigned int misr)
{
- struct tty_struct *tty = port->state->port.tty;
+ struct tty_port *tport = &port->state->port;
unsigned int sr;
int count = 0;
struct msm_port *msm_port = UART_TO_MSM(port);
if ((msm_read(port, UART_SR) & UART_SR_OVERRUN)) {
port->icount.overrun++;
- tty_insert_flip_char(tty, 0, TTY_OVERRUN);
+ tty_insert_flip_char(tport, 0, TTY_OVERRUN);
msm_write(port, UART_CR_CMD_RESET_ERR, UART_CR);
}
@@ -132,12 +132,12 @@ static void handle_rx_dm(struct uart_port *port, unsigned int misr)
port->icount.frame++;
/* TODO: handle sysrq */
- tty_insert_flip_string(tty, (char *) &c,
+ tty_insert_flip_string(tport, (char *)&c,
(count > 4) ? 4 : count);
count -= 4;
}
- tty_flip_buffer_push(tty);
+ tty_flip_buffer_push(tport);
if (misr & (UART_IMR_RXSTALE))
msm_write(port, UART_CR_CMD_RESET_STALE_INT, UART_CR);
msm_write(port, 0xFFFFFF, UARTDM_DMRX);
@@ -146,7 +146,7 @@ static void handle_rx_dm(struct uart_port *port, unsigned int misr)
static void handle_rx(struct uart_port *port)
{
- struct tty_struct *tty = port->state->port.tty;
+ struct tty_port *tport = &port->state->port;
unsigned int sr;
/*
@@ -155,7 +155,7 @@ static void handle_rx(struct uart_port *port)
*/
if ((msm_read(port, UART_SR) & UART_SR_OVERRUN)) {
port->icount.overrun++;
- tty_insert_flip_char(tty, 0, TTY_OVERRUN);
+ tty_insert_flip_char(tport, 0, TTY_OVERRUN);
msm_write(port, UART_CR_CMD_RESET_ERR, UART_CR);
}
@@ -186,10 +186,10 @@ static void handle_rx(struct uart_port *port)
}
if (!uart_handle_sysrq_char(port, c))
- tty_insert_flip_char(tty, c, flag);
+ tty_insert_flip_char(tport, c, flag);
}
- tty_flip_buffer_push(tty);
+ tty_flip_buffer_push(tport);
}
static void reset_dm_count(struct uart_port *port)
diff --git a/drivers/tty/serial/msm_serial_hs.c b/drivers/tty/serial/msm_serial_hs.c
index 1fa92284ade0..4a942c78347e 100644
--- a/drivers/tty/serial/msm_serial_hs.c
+++ b/drivers/tty/serial/msm_serial_hs.c
@@ -908,6 +908,7 @@ static void msm_hs_dmov_rx_callback(struct msm_dmov_cmd *cmd_ptr,
unsigned long flags;
unsigned int flush;
struct tty_struct *tty;
+ struct tty_port *port;
struct uart_port *uport;
struct msm_hs_port *msm_uport;
@@ -917,7 +918,8 @@ static void msm_hs_dmov_rx_callback(struct msm_dmov_cmd *cmd_ptr,
spin_lock_irqsave(&uport->lock, flags);
clk_enable(msm_uport->clk);
- tty = uport->state->port.tty;
+ port = &uport->state->port;
+ tty = port->tty;
msm_hs_write(uport, UARTDM_CR_ADDR, STALE_EVENT_DISABLE);
@@ -926,7 +928,7 @@ static void msm_hs_dmov_rx_callback(struct msm_dmov_cmd *cmd_ptr,
/* overflow is not connect to data in a FIFO */
if (unlikely((status & UARTDM_SR_OVERRUN_BMSK) &&
(uport->read_status_mask & CREAD))) {
- tty_insert_flip_char(tty, 0, TTY_OVERRUN);
+ tty_insert_flip_char(port, 0, TTY_OVERRUN);
uport->icount.buf_overrun++;
error_f = 1;
}
@@ -939,7 +941,7 @@ static void msm_hs_dmov_rx_callback(struct msm_dmov_cmd *cmd_ptr,
uport->icount.parity++;
error_f = 1;
if (uport->ignore_status_mask & IGNPAR)
- tty_insert_flip_char(tty, 0, TTY_PARITY);
+ tty_insert_flip_char(port, 0, TTY_PARITY);
}
if (error_f)
@@ -959,7 +961,7 @@ static void msm_hs_dmov_rx_callback(struct msm_dmov_cmd *cmd_ptr,
rx_count = msm_hs_read(uport, UARTDM_RX_TOTAL_SNAP_ADDR);
if (0 != (uport->read_status_mask & CREAD)) {
- retval = tty_insert_flip_string(tty, msm_uport->rx.buffer,
+ retval = tty_insert_flip_string(port, msm_uport->rx.buffer,
rx_count);
BUG_ON(retval != rx_count);
}
@@ -979,9 +981,8 @@ static void msm_hs_tty_flip_buffer_work(struct work_struct *work)
{
struct msm_hs_port *msm_uport =
container_of(work, struct msm_hs_port, rx.tty_work);
- struct tty_struct *tty = msm_uport->uport.state->port.tty;
- tty_flip_buffer_push(tty);
+ tty_flip_buffer_push(&msm_uport->uport.state->port);
}
/*
@@ -1344,7 +1345,6 @@ static irqreturn_t msm_hs_rx_wakeup_isr(int irq, void *dev)
unsigned long flags;
struct msm_hs_port *msm_uport = dev;
struct uart_port *uport = &msm_uport->uport;
- struct tty_struct *tty = NULL;
spin_lock_irqsave(&uport->lock, flags);
if (msm_uport->clk_state == MSM_HS_CLK_OFF) {
@@ -1361,8 +1361,7 @@ static irqreturn_t msm_hs_rx_wakeup_isr(int irq, void *dev)
* optionally inject char into tty rx */
msm_hs_request_clock_on_locked(uport);
if (msm_uport->rx_wakeup.inject_rx) {
- tty = uport->state->port.tty;
- tty_insert_flip_char(tty,
+ tty_insert_flip_char(&uport->state->port,
msm_uport->rx_wakeup.rx_to_inject,
TTY_NORMAL);
queue_work(msm_hs_workqueue, &msm_uport->rx.tty_work);
@@ -1400,7 +1399,7 @@ static int msm_hs_startup(struct uart_port *uport)
/* do not let tty layer execute RX in global workqueue, use a
* dedicated workqueue managed by this driver */
- uport->state->port.tty->low_latency = 1;
+ uport->state->port.low_latency = 1;
/* turn on uart clk */
ret = msm_hs_init_clk_locked(uport);
diff --git a/drivers/tty/serial/msm_smd_tty.c b/drivers/tty/serial/msm_smd_tty.c
index 925d1fa153db..e722ff163d91 100644
--- a/drivers/tty/serial/msm_smd_tty.c
+++ b/drivers/tty/serial/msm_smd_tty.c
@@ -70,7 +70,7 @@ static void smd_tty_notify(void *priv, unsigned event)
if (avail == 0)
break;
- avail = tty_prepare_flip_string(tty, &ptr, avail);
+ avail = tty_prepare_flip_string(&info->port, &ptr, avail);
if (smd_read(info->ch, ptr, avail) != avail) {
/* shouldn't be possible since we're in interrupt
@@ -80,7 +80,7 @@ static void smd_tty_notify(void *priv, unsigned event)
pr_err("OOPS - smd_tty_buffer mismatch?!");
}
- tty_flip_buffer_push(tty);
+ tty_flip_buffer_push(&info->port);
}
/* XXX only when writable and necessary */
diff --git a/drivers/tty/serial/mux.c b/drivers/tty/serial/mux.c
index e2775b6df5a5..7fd6aaaacd8e 100644
--- a/drivers/tty/serial/mux.c
+++ b/drivers/tty/serial/mux.c
@@ -242,8 +242,8 @@ static void mux_write(struct uart_port *port)
*/
static void mux_read(struct uart_port *port)
{
+ struct tty_port *tport = &port->state->port;
int data;
- struct tty_struct *tty = port->state->port.tty;
__u32 start_count = port->icount.rx;
while(1) {
@@ -266,12 +266,11 @@ static void mux_read(struct uart_port *port)
if (uart_handle_sysrq_char(port, data & 0xffu))
continue;
- tty_insert_flip_char(tty, data & 0xFF, TTY_NORMAL);
+ tty_insert_flip_char(tport, data & 0xFF, TTY_NORMAL);
}
- if (start_count != port->icount.rx) {
- tty_flip_buffer_push(tty);
- }
+ if (start_count != port->icount.rx)
+ tty_flip_buffer_push(tport);
}
/**
diff --git a/drivers/tty/serial/mxs-auart.c b/drivers/tty/serial/mxs-auart.c
index e55615eb34ad..d549fe1fa42a 100644
--- a/drivers/tty/serial/mxs-auart.c
+++ b/drivers/tty/serial/mxs-auart.c
@@ -364,7 +364,6 @@ out:
static void mxs_auart_rx_chars(struct mxs_auart_port *s)
{
- struct tty_struct *tty = s->port.state->port.tty;
u32 stat = 0;
for (;;) {
@@ -375,7 +374,7 @@ static void mxs_auart_rx_chars(struct mxs_auart_port *s)
}
writel(stat, s->port.membase + AUART_STAT);
- tty_flip_buffer_push(tty);
+ tty_flip_buffer_push(&s->port.state->port);
}
static int mxs_auart_request_port(struct uart_port *u)
@@ -459,7 +458,7 @@ static int mxs_auart_dma_prep_rx(struct mxs_auart_port *s);
static void dma_rx_callback(void *arg)
{
struct mxs_auart_port *s = (struct mxs_auart_port *) arg;
- struct tty_struct *tty = s->port.state->port.tty;
+ struct tty_port *port = &s->port.state->port;
int count;
u32 stat;
@@ -470,10 +469,10 @@ static void dma_rx_callback(void *arg)
AUART_STAT_PERR | AUART_STAT_FERR);
count = stat & AUART_STAT_RXCOUNT_MASK;
- tty_insert_flip_string(tty, s->rx_dma_buf, count);
+ tty_insert_flip_string(port, s->rx_dma_buf, count);
writel(stat, s->port.membase + AUART_STAT);
- tty_flip_buffer_push(tty);
+ tty_flip_buffer_push(port);
/* start the next DMA for RX. */
mxs_auart_dma_prep_rx(s);
@@ -552,7 +551,7 @@ static int mxs_auart_dma_init(struct mxs_auart_port *s)
return 0;
/* We do not get the right DMA channels. */
- if (s->dma_channel_rx == -1 || s->dma_channel_rx == -1)
+ if (s->dma_channel_rx == -1 || s->dma_channel_tx == -1)
return -EINVAL;
/* init for RX */
diff --git a/drivers/tty/serial/netx-serial.c b/drivers/tty/serial/netx-serial.c
index d40da78e7c85..b9a40ed70be2 100644
--- a/drivers/tty/serial/netx-serial.c
+++ b/drivers/tty/serial/netx-serial.c
@@ -199,7 +199,6 @@ static void netx_txint(struct uart_port *port)
static void netx_rxint(struct uart_port *port)
{
unsigned char rx, flg, status;
- struct tty_struct *tty = port->state->port.tty;
while (!(readl(port->membase + UART_FR) & FR_RXFE)) {
rx = readl(port->membase + UART_DR);
@@ -237,8 +236,7 @@ static void netx_rxint(struct uart_port *port)
uart_insert_char(port, status, SR_OE, rx, flg);
}
- tty_flip_buffer_push(tty);
- return;
+ tty_flip_buffer_push(&port->state->port);
}
static irqreturn_t netx_int(int irq, void *dev_id)
diff --git a/drivers/tty/serial/nwpserial.c b/drivers/tty/serial/nwpserial.c
index dd4c31d1aee5..77287c54f331 100644
--- a/drivers/tty/serial/nwpserial.c
+++ b/drivers/tty/serial/nwpserial.c
@@ -128,7 +128,7 @@ static void nwpserial_config_port(struct uart_port *port, int flags)
static irqreturn_t nwpserial_interrupt(int irq, void *dev_id)
{
struct nwpserial_port *up = dev_id;
- struct tty_struct *tty = up->port.state->port.tty;
+ struct tty_port *port = &up->port.state->port;
irqreturn_t ret;
unsigned int iir;
unsigned char ch;
@@ -146,10 +146,10 @@ static irqreturn_t nwpserial_interrupt(int irq, void *dev_id)
up->port.icount.rx++;
ch = dcr_read(up->dcr_host, UART_RX);
if (up->port.ignore_status_mask != NWPSERIAL_STATUS_RXVALID)
- tty_insert_flip_char(tty, ch, TTY_NORMAL);
+ tty_insert_flip_char(port, ch, TTY_NORMAL);
} while (dcr_read(up->dcr_host, UART_LSR) & UART_LSR_DR);
- tty_flip_buffer_push(tty);
+ tty_flip_buffer_push(port);
ret = IRQ_HANDLED;
/* clear interrupt */
diff --git a/drivers/tty/serial/of_serial.c b/drivers/tty/serial/of_serial.c
index e7cae1c2d7d2..d5874605682b 100644
--- a/drivers/tty/serial/of_serial.c
+++ b/drivers/tty/serial/of_serial.c
@@ -18,7 +18,6 @@
#include <linux/serial_reg.h>
#include <linux/of_address.h>
#include <linux/of_irq.h>
-#include <linux/of_serial.h>
#include <linux/of_platform.h>
#include <linux/nwpserial.h>
#include <linux/clk.h>
@@ -45,8 +44,10 @@ void tegra_serial_handle_break(struct uart_port *p)
udelay(1);
} while (1);
}
-/* FIXME remove this export when tegra finishes conversion to open firmware */
-EXPORT_SYMBOL_GPL(tegra_serial_handle_break);
+#else
+static inline void tegra_serial_handle_break(struct uart_port *port)
+{
+}
#endif
/*
diff --git a/drivers/tty/serial/omap-serial.c b/drivers/tty/serial/omap-serial.c
index 57d6b29c039c..4dc41408ecb7 100644
--- a/drivers/tty/serial/omap-serial.c
+++ b/drivers/tty/serial/omap-serial.c
@@ -59,6 +59,7 @@
/* SCR register bitmasks */
#define OMAP_UART_SCR_RX_TRIG_GRANU1_MASK (1 << 7)
+#define OMAP_UART_SCR_TX_TRIG_GRANU1_MASK (1 << 6)
#define OMAP_UART_SCR_TX_EMPTY (1 << 3)
/* FCR register bitmasks */
@@ -232,24 +233,42 @@ static void serial_omap_enable_wakeup(struct uart_omap_port *up, bool enable)
}
/*
+ * serial_omap_baud_is_mode16 - check if baud rate is MODE16X
+ * @port: uart port info
+ * @baud: baudrate for which mode needs to be determined
+ *
+ * Returns true if baud rate is MODE16X and false if MODE13X
+ * Original table in OMAP TRM named "UART Mode Baud Rates, Divisor Values,
+ * and Error Rates" determines modes not for all common baud rates.
+ * E.g. for 1000000 baud rate mode must be 16x, but according to that
+ * table it's determined as 13x.
+ */
+static bool
+serial_omap_baud_is_mode16(struct uart_port *port, unsigned int baud)
+{
+ unsigned int n13 = port->uartclk / (13 * baud);
+ unsigned int n16 = port->uartclk / (16 * baud);
+ int baudAbsDiff13 = baud - (port->uartclk / (13 * n13));
+ int baudAbsDiff16 = baud - (port->uartclk / (16 * n16));
+ if(baudAbsDiff13 < 0)
+ baudAbsDiff13 = -baudAbsDiff13;
+ if(baudAbsDiff16 < 0)
+ baudAbsDiff16 = -baudAbsDiff16;
+
+ return (baudAbsDiff13 > baudAbsDiff16);
+}
+
+/*
* serial_omap_get_divisor - calculate divisor value
* @port: uart port info
* @baud: baudrate for which divisor needs to be calculated.
- *
- * We have written our own function to get the divisor so as to support
- * 13x mode. 3Mbps Baudrate as an different divisor.
- * Reference OMAP TRM Chapter 17:
- * Table 17-1. UART Mode Baud Rates, Divisor Values, and Error Rates
- * referring to oversampling - divisor value
- * baudrate 460,800 to 3,686,400 all have divisor 13
- * except 3,000,000 which has divisor value 16
*/
static unsigned int
serial_omap_get_divisor(struct uart_port *port, unsigned int baud)
{
unsigned int divisor;
- if (baud > OMAP_MODE13X_SPEED && baud != 3000000)
+ if (!serial_omap_baud_is_mode16(port, baud))
divisor = 13;
else
divisor = 16;
@@ -302,9 +321,6 @@ static void transmit_chars(struct uart_omap_port *up, unsigned int lsr)
struct circ_buf *xmit = &up->port.state->xmit;
int count;
- if (!(lsr & UART_LSR_THRE))
- return;
-
if (up->port.x_char) {
serial_out(up, UART_TX, up->port.x_char);
up->port.icount.tx++;
@@ -483,7 +499,6 @@ static void serial_omap_rdi(struct uart_omap_port *up, unsigned int lsr)
static irqreturn_t serial_omap_irq(int irq, void *dev_id)
{
struct uart_omap_port *up = dev_id;
- struct tty_struct *tty = up->port.state->port.tty;
unsigned int iir, lsr;
unsigned int type;
irqreturn_t ret = IRQ_NONE;
@@ -530,7 +545,7 @@ static irqreturn_t serial_omap_irq(int irq, void *dev_id)
spin_unlock(&up->port.lock);
- tty_flip_buffer_push(tty);
+ tty_flip_buffer_push(&up->port.state->port);
pm_runtime_mark_last_busy(up->dev);
pm_runtime_put_autosuspend(up->dev);
@@ -776,6 +791,8 @@ serial_omap_set_termios(struct uart_port *port, struct ktermios *termios,
cval |= UART_LCR_PARITY;
if (!(termios->c_cflag & PARODD))
cval |= UART_LCR_EPAR;
+ if (termios->c_cflag & CMSPAR)
+ cval |= UART_LCR_SPAR;
/*
* Ask the core to calculate the divisor for us.
@@ -845,7 +862,7 @@ serial_omap_set_termios(struct uart_port *port, struct ktermios *termios,
serial_out(up, UART_IER, up->ier);
serial_out(up, UART_LCR, cval); /* reset DLAB */
up->lcr = cval;
- up->scr = OMAP_UART_SCR_TX_EMPTY;
+ up->scr = 0;
/* FIFOs and DMA Settings */
@@ -869,8 +886,6 @@ serial_omap_set_termios(struct uart_port *port, struct ktermios *termios,
serial_out(up, UART_MCR, up->mcr | UART_MCR_TCRTLR);
/* FIFO ENABLE, DMA MODE */
- up->scr |= OMAP_UART_SCR_RX_TRIG_GRANU1_MASK;
-
/* Set receive FIFO threshold to 16 characters and
* transmit FIFO threshold to 16 spaces
*/
@@ -915,7 +930,7 @@ serial_omap_set_termios(struct uart_port *port, struct ktermios *termios,
serial_out(up, UART_EFR, up->efr);
serial_out(up, UART_LCR, cval);
- if (baud > 230400 && baud != 3000000)
+ if (!serial_omap_baud_is_mode16(port, baud))
up->mdr1 = UART_OMAP_MDR1_13X_MODE;
else
up->mdr1 = UART_OMAP_MDR1_16X_MODE;
diff --git a/drivers/tty/serial/pch_uart.c b/drivers/tty/serial/pch_uart.c
index 8318925fbf6b..7a6c989924b3 100644
--- a/drivers/tty/serial/pch_uart.c
+++ b/drivers/tty/serial/pch_uart.c
@@ -14,18 +14,21 @@
*along with this program; if not, write to the Free Software
*Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
*/
+#if defined(CONFIG_SERIAL_PCH_UART_CONSOLE) && defined(CONFIG_MAGIC_SYSRQ)
+#define SUPPORT_SYSRQ
+#endif
#include <linux/kernel.h>
#include <linux/serial_reg.h>
#include <linux/slab.h>
#include <linux/module.h>
#include <linux/pci.h>
+#include <linux/console.h>
#include <linux/serial_core.h>
#include <linux/tty.h>
#include <linux/tty_flip.h>
#include <linux/interrupt.h>
#include <linux/io.h>
#include <linux/dmi.h>
-#include <linux/console.h>
#include <linux/nmi.h>
#include <linux/delay.h>
@@ -553,12 +556,26 @@ static int pch_uart_hal_read(struct eg20t_port *priv, unsigned char *buf,
{
int i;
u8 rbr, lsr;
+ struct uart_port *port = &priv->port;
lsr = ioread8(priv->membase + UART_LSR);
for (i = 0, lsr = ioread8(priv->membase + UART_LSR);
- i < rx_size && lsr & UART_LSR_DR;
+ i < rx_size && lsr & (UART_LSR_DR | UART_LSR_BI);
lsr = ioread8(priv->membase + UART_LSR)) {
rbr = ioread8(priv->membase + PCH_UART_RBR);
+
+ if (lsr & UART_LSR_BI) {
+ port->icount.brk++;
+ if (uart_handle_break(port))
+ continue;
+ }
+#ifdef SUPPORT_SYSRQ
+ if (port->sysrq) {
+ if (uart_handle_sysrq_char(port, rbr))
+ continue;
+ }
+#endif
+
buf[i++] = rbr;
}
return i;
@@ -591,19 +608,11 @@ static void pch_uart_hal_set_break(struct eg20t_port *priv, int on)
static int push_rx(struct eg20t_port *priv, const unsigned char *buf,
int size)
{
- struct uart_port *port;
- struct tty_struct *tty;
-
- port = &priv->port;
- tty = tty_port_tty_get(&port->state->port);
- if (!tty) {
- dev_dbg(priv->port.dev, "%s:tty is busy now", __func__);
- return -EBUSY;
- }
+ struct uart_port *port = &priv->port;
+ struct tty_port *tport = &port->state->port;
- tty_insert_flip_string(tty, buf, size);
- tty_flip_buffer_push(tty);
- tty_kref_put(tty);
+ tty_insert_flip_string(tport, buf, size);
+ tty_flip_buffer_push(tport);
return 0;
}
@@ -629,15 +638,16 @@ static int dma_push_rx(struct eg20t_port *priv, int size)
struct tty_struct *tty;
int room;
struct uart_port *port = &priv->port;
+ struct tty_port *tport = &port->state->port;
port = &priv->port;
- tty = tty_port_tty_get(&port->state->port);
+ tty = tty_port_tty_get(tport);
if (!tty) {
dev_dbg(priv->port.dev, "%s:tty is busy now", __func__);
return 0;
}
- room = tty_buffer_request_room(tty, size);
+ room = tty_buffer_request_room(tport, size);
if (room < size)
dev_warn(port->dev, "Rx overrun: dropping %u bytes\n",
@@ -645,7 +655,7 @@ static int dma_push_rx(struct eg20t_port *priv, int size)
if (!room)
return room;
- tty_insert_flip_string(tty, sg_virt(&priv->sg_rx), size);
+ tty_insert_flip_string(tport, sg_virt(&priv->sg_rx), size);
port->icount.rx += room;
tty_kref_put(tty);
@@ -743,19 +753,12 @@ static void pch_dma_rx_complete(void *arg)
{
struct eg20t_port *priv = arg;
struct uart_port *port = &priv->port;
- struct tty_struct *tty = tty_port_tty_get(&port->state->port);
int count;
- if (!tty) {
- dev_dbg(priv->port.dev, "%s:tty is busy now", __func__);
- return;
- }
-
dma_sync_sg_for_cpu(port->dev, &priv->sg_rx, 1, DMA_FROM_DEVICE);
count = dma_push_rx(priv, priv->trigger_level);
if (count)
- tty_flip_buffer_push(tty);
- tty_kref_put(tty);
+ tty_flip_buffer_push(&port->state->port);
async_tx_ack(priv->desc_rx);
pch_uart_hal_enable_interrupt(priv, PCH_UART_HAL_RX_INT |
PCH_UART_HAL_RX_ERR_INT);
@@ -1037,23 +1040,33 @@ static unsigned int dma_handle_tx(struct eg20t_port *priv)
static void pch_uart_err_ir(struct eg20t_port *priv, unsigned int lsr)
{
- u8 fcr = ioread8(priv->membase + UART_FCR);
-
- /* Reset FIFO */
- fcr |= UART_FCR_CLEAR_RCVR;
- iowrite8(fcr, priv->membase + UART_FCR);
+ struct uart_port *port = &priv->port;
+ struct tty_struct *tty = tty_port_tty_get(&port->state->port);
+ char *error_msg[5] = {};
+ int i = 0;
if (lsr & PCH_UART_LSR_ERR)
- dev_err(&priv->pdev->dev, "Error data in FIFO\n");
+ error_msg[i++] = "Error data in FIFO\n";
- if (lsr & UART_LSR_FE)
- dev_err(&priv->pdev->dev, "Framing Error\n");
+ if (lsr & UART_LSR_FE) {
+ port->icount.frame++;
+ error_msg[i++] = " Framing Error\n";
+ }
- if (lsr & UART_LSR_PE)
- dev_err(&priv->pdev->dev, "Parity Error\n");
+ if (lsr & UART_LSR_PE) {
+ port->icount.parity++;
+ error_msg[i++] = " Parity Error\n";
+ }
- if (lsr & UART_LSR_OE)
- dev_err(&priv->pdev->dev, "Overrun Error\n");
+ if (lsr & UART_LSR_OE) {
+ port->icount.overrun++;
+ error_msg[i++] = " Overrun Error\n";
+ }
+
+ if (tty == NULL) {
+ for (i = 0; error_msg[i] != NULL; i++)
+ dev_err(&priv->pdev->dev, error_msg[i]);
+ }
}
static irqreturn_t pch_uart_interrupt(int irq, void *dev_id)
@@ -1564,7 +1577,8 @@ pch_console_write(struct console *co, const char *s, unsigned int count)
local_irq_save(flags);
if (priv->port.sysrq) {
- spin_lock(&priv->lock);
+ /* call to uart_handle_sysrq_char already took the priv lock */
+ priv_locked = 0;
/* serial8250_handle_port() already took the port lock */
port_locked = 0;
} else if (oops_in_progress) {
diff --git a/drivers/tty/serial/pmac_zilog.c b/drivers/tty/serial/pmac_zilog.c
index 333c8d012b0e..b1785f58b6e3 100644
--- a/drivers/tty/serial/pmac_zilog.c
+++ b/drivers/tty/serial/pmac_zilog.c
@@ -227,19 +227,19 @@ static void pmz_interrupt_control(struct uart_pmac_port *uap, int enable)
write_zsreg(uap, R1, uap->curregs[1]);
}
-static struct tty_struct *pmz_receive_chars(struct uart_pmac_port *uap)
+static bool pmz_receive_chars(struct uart_pmac_port *uap)
{
- struct tty_struct *tty = NULL;
+ struct tty_port *port;
unsigned char ch, r1, drop, error, flag;
int loops = 0;
/* Sanity check, make sure the old bug is no longer happening */
- if (uap->port.state == NULL || uap->port.state->port.tty == NULL) {
+ if (uap->port.state == NULL) {
WARN_ON(1);
(void)read_zsdata(uap);
- return NULL;
+ return false;
}
- tty = uap->port.state->port.tty;
+ port = &uap->port.state->port;
while (1) {
error = 0;
@@ -309,10 +309,10 @@ static struct tty_struct *pmz_receive_chars(struct uart_pmac_port *uap)
if (uap->port.ignore_status_mask == 0xff ||
(r1 & uap->port.ignore_status_mask) == 0) {
- tty_insert_flip_char(tty, ch, flag);
+ tty_insert_flip_char(port, ch, flag);
}
if (r1 & Rx_OVR)
- tty_insert_flip_char(tty, 0, TTY_OVERRUN);
+ tty_insert_flip_char(port, 0, TTY_OVERRUN);
next_char:
/* We can get stuck in an infinite loop getting char 0 when the
* line is in a wrong HW state, we break that here.
@@ -328,11 +328,11 @@ static struct tty_struct *pmz_receive_chars(struct uart_pmac_port *uap)
break;
}
- return tty;
+ return true;
flood:
pmz_interrupt_control(uap, 0);
pmz_error("pmz: rx irq flood !\n");
- return tty;
+ return true;
}
static void pmz_status_handle(struct uart_pmac_port *uap)
@@ -453,7 +453,7 @@ static irqreturn_t pmz_interrupt(int irq, void *dev_id)
struct uart_pmac_port *uap_a;
struct uart_pmac_port *uap_b;
int rc = IRQ_NONE;
- struct tty_struct *tty;
+ bool push;
u8 r3;
uap_a = pmz_get_port_A(uap);
@@ -466,7 +466,7 @@ static irqreturn_t pmz_interrupt(int irq, void *dev_id)
pmz_debug("irq, r3: %x\n", r3);
#endif
/* Channel A */
- tty = NULL;
+ push = false;
if (r3 & (CHAEXT | CHATxIP | CHARxIP)) {
if (!ZS_IS_OPEN(uap_a)) {
pmz_debug("ChanA interrupt while not open !\n");
@@ -477,21 +477,21 @@ static irqreturn_t pmz_interrupt(int irq, void *dev_id)
if (r3 & CHAEXT)
pmz_status_handle(uap_a);
if (r3 & CHARxIP)
- tty = pmz_receive_chars(uap_a);
+ push = pmz_receive_chars(uap_a);
if (r3 & CHATxIP)
pmz_transmit_chars(uap_a);
rc = IRQ_HANDLED;
}
skip_a:
spin_unlock(&uap_a->port.lock);
- if (tty != NULL)
- tty_flip_buffer_push(tty);
+ if (push)
+ tty_flip_buffer_push(&uap->port.state->port);
if (!uap_b)
goto out;
spin_lock(&uap_b->port.lock);
- tty = NULL;
+ push = false;
if (r3 & (CHBEXT | CHBTxIP | CHBRxIP)) {
if (!ZS_IS_OPEN(uap_b)) {
pmz_debug("ChanB interrupt while not open !\n");
@@ -502,15 +502,15 @@ static irqreturn_t pmz_interrupt(int irq, void *dev_id)
if (r3 & CHBEXT)
pmz_status_handle(uap_b);
if (r3 & CHBRxIP)
- tty = pmz_receive_chars(uap_b);
+ push = pmz_receive_chars(uap_b);
if (r3 & CHBTxIP)
pmz_transmit_chars(uap_b);
rc = IRQ_HANDLED;
}
skip_b:
spin_unlock(&uap_b->port.lock);
- if (tty != NULL)
- tty_flip_buffer_push(tty);
+ if (push)
+ tty_flip_buffer_push(&uap->port.state->port);
out:
return rc;
diff --git a/drivers/tty/serial/pnx8xxx_uart.c b/drivers/tty/serial/pnx8xxx_uart.c
index 0aa75a97531c..7e277a5384a7 100644
--- a/drivers/tty/serial/pnx8xxx_uart.c
+++ b/drivers/tty/serial/pnx8xxx_uart.c
@@ -181,7 +181,6 @@ static void pnx8xxx_enable_ms(struct uart_port *port)
static void pnx8xxx_rx_chars(struct pnx8xxx_port *sport)
{
- struct tty_struct *tty = sport->port.state->port.tty;
unsigned int status, ch, flg;
status = FIFO_TO_SM(serial_in(sport, PNX8XXX_FIFO)) |
@@ -238,7 +237,7 @@ static void pnx8xxx_rx_chars(struct pnx8xxx_port *sport)
status = FIFO_TO_SM(serial_in(sport, PNX8XXX_FIFO)) |
ISTAT_TO_SM(serial_in(sport, PNX8XXX_ISTAT));
}
- tty_flip_buffer_push(tty);
+ tty_flip_buffer_push(&sport->port.state->port);
}
static void pnx8xxx_tx_chars(struct pnx8xxx_port *sport)
diff --git a/drivers/tty/serial/pxa.c b/drivers/tty/serial/pxa.c
index 2764828251f5..05f504e0c271 100644
--- a/drivers/tty/serial/pxa.c
+++ b/drivers/tty/serial/pxa.c
@@ -98,7 +98,6 @@ static void serial_pxa_stop_rx(struct uart_port *port)
static inline void receive_chars(struct uart_pxa_port *up, int *status)
{
- struct tty_struct *tty = up->port.state->port.tty;
unsigned int ch, flag;
int max_count = 256;
@@ -168,7 +167,7 @@ static inline void receive_chars(struct uart_pxa_port *up, int *status)
ignore_char:
*status = serial_in(up, UART_LSR);
} while ((*status & UART_LSR_DR) && (max_count-- > 0));
- tty_flip_buffer_push(tty);
+ tty_flip_buffer_push(&up->port.state->port);
/* work around Errata #20 according to
* Intel(R) PXA27x Processor Family
@@ -673,8 +672,7 @@ serial_pxa_console_write(struct console *co, const char *s, unsigned int count)
unsigned long flags;
int locked = 1;
- clk_prepare_enable(up->clk);
-
+ clk_enable(up->clk);
local_irq_save(flags);
if (up->port.sysrq)
locked = 0;
@@ -701,8 +699,8 @@ serial_pxa_console_write(struct console *co, const char *s, unsigned int count)
if (locked)
spin_unlock(&up->port.lock);
local_irq_restore(flags);
+ clk_disable(up->clk);
- clk_disable_unprepare(up->clk);
}
#ifdef CONFIG_CONSOLE_POLL
@@ -899,6 +897,12 @@ static int serial_pxa_probe(struct platform_device *dev)
goto err_free;
}
+ ret = clk_prepare(sport->clk);
+ if (ret) {
+ clk_put(sport->clk);
+ goto err_free;
+ }
+
sport->port.type = PORT_PXA;
sport->port.iotype = UPIO_MEM;
sport->port.mapbase = mmres->start;
@@ -930,6 +934,7 @@ static int serial_pxa_probe(struct platform_device *dev)
return 0;
err_clk:
+ clk_unprepare(sport->clk);
clk_put(sport->clk);
err_free:
kfree(sport);
@@ -943,6 +948,8 @@ static int serial_pxa_remove(struct platform_device *dev)
platform_set_drvdata(dev, NULL);
uart_remove_one_port(&serial_pxa_reg, &sport->port);
+
+ clk_unprepare(sport->clk);
clk_put(sport->clk);
kfree(sport);
diff --git a/drivers/tty/serial/rp2.c b/drivers/tty/serial/rp2.c
new file mode 100644
index 000000000000..a314a943f124
--- /dev/null
+++ b/drivers/tty/serial/rp2.c
@@ -0,0 +1,885 @@
+/*
+ * Driver for Comtrol RocketPort EXPRESS/INFINITY cards
+ *
+ * Copyright (C) 2012 Kevin Cernekee <cernekee@gmail.com>
+ *
+ * Inspired by, and loosely based on:
+ *
+ * ar933x_uart.c
+ * Copyright (C) 2011 Gabor Juhos <juhosg@openwrt.org>
+ *
+ * rocketport_infinity_express-linux-1.20.tar.gz
+ * Copyright (C) 2004-2011 Comtrol, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published
+ * by the Free Software Foundation.
+ */
+
+#include <linux/bitops.h>
+#include <linux/compiler.h>
+#include <linux/completion.h>
+#include <linux/console.h>
+#include <linux/delay.h>
+#include <linux/firmware.h>
+#include <linux/init.h>
+#include <linux/io.h>
+#include <linux/ioport.h>
+#include <linux/irq.h>
+#include <linux/kernel.h>
+#include <linux/log2.h>
+#include <linux/module.h>
+#include <linux/pci.h>
+#include <linux/serial.h>
+#include <linux/serial_core.h>
+#include <linux/slab.h>
+#include <linux/sysrq.h>
+#include <linux/tty.h>
+#include <linux/tty_flip.h>
+#include <linux/types.h>
+
+#define DRV_NAME "rp2"
+
+#define RP2_FW_NAME "rp2.fw"
+#define RP2_UCODE_BYTES 0x3f
+
+#define PORTS_PER_ASIC 16
+#define ALL_PORTS_MASK (BIT(PORTS_PER_ASIC) - 1)
+
+#define UART_CLOCK 44236800
+#define DEFAULT_BAUD_DIV (UART_CLOCK / (9600 * 16))
+#define FIFO_SIZE 512
+
+/* BAR0 registers */
+#define RP2_FPGA_CTL0 0x110
+#define RP2_FPGA_CTL1 0x11c
+#define RP2_IRQ_MASK 0x1ec
+#define RP2_IRQ_MASK_EN_m BIT(0)
+#define RP2_IRQ_STATUS 0x1f0
+
+/* BAR1 registers */
+#define RP2_ASIC_SPACING 0x1000
+#define RP2_ASIC_OFFSET(i) ((i) << ilog2(RP2_ASIC_SPACING))
+
+#define RP2_PORT_BASE 0x000
+#define RP2_PORT_SPACING 0x040
+
+#define RP2_UCODE_BASE 0x400
+#define RP2_UCODE_SPACING 0x80
+
+#define RP2_CLK_PRESCALER 0xc00
+#define RP2_CH_IRQ_STAT 0xc04
+#define RP2_CH_IRQ_MASK 0xc08
+#define RP2_ASIC_IRQ 0xd00
+#define RP2_ASIC_IRQ_EN_m BIT(20)
+#define RP2_GLOBAL_CMD 0xd0c
+#define RP2_ASIC_CFG 0xd04
+
+/* port registers */
+#define RP2_DATA_DWORD 0x000
+
+#define RP2_DATA_BYTE 0x008
+#define RP2_DATA_BYTE_ERR_PARITY_m BIT(8)
+#define RP2_DATA_BYTE_ERR_OVERRUN_m BIT(9)
+#define RP2_DATA_BYTE_ERR_FRAMING_m BIT(10)
+#define RP2_DATA_BYTE_BREAK_m BIT(11)
+
+/* This lets uart_insert_char() drop bytes received on a !CREAD port */
+#define RP2_DUMMY_READ BIT(16)
+
+#define RP2_DATA_BYTE_EXCEPTION_MASK (RP2_DATA_BYTE_ERR_PARITY_m | \
+ RP2_DATA_BYTE_ERR_OVERRUN_m | \
+ RP2_DATA_BYTE_ERR_FRAMING_m | \
+ RP2_DATA_BYTE_BREAK_m)
+
+#define RP2_RX_FIFO_COUNT 0x00c
+#define RP2_TX_FIFO_COUNT 0x00e
+
+#define RP2_CHAN_STAT 0x010
+#define RP2_CHAN_STAT_RXDATA_m BIT(0)
+#define RP2_CHAN_STAT_DCD_m BIT(3)
+#define RP2_CHAN_STAT_DSR_m BIT(4)
+#define RP2_CHAN_STAT_CTS_m BIT(5)
+#define RP2_CHAN_STAT_RI_m BIT(6)
+#define RP2_CHAN_STAT_OVERRUN_m BIT(13)
+#define RP2_CHAN_STAT_DSR_CHANGED_m BIT(16)
+#define RP2_CHAN_STAT_CTS_CHANGED_m BIT(17)
+#define RP2_CHAN_STAT_CD_CHANGED_m BIT(18)
+#define RP2_CHAN_STAT_RI_CHANGED_m BIT(22)
+#define RP2_CHAN_STAT_TXEMPTY_m BIT(25)
+
+#define RP2_CHAN_STAT_MS_CHANGED_MASK (RP2_CHAN_STAT_DSR_CHANGED_m | \
+ RP2_CHAN_STAT_CTS_CHANGED_m | \
+ RP2_CHAN_STAT_CD_CHANGED_m | \
+ RP2_CHAN_STAT_RI_CHANGED_m)
+
+#define RP2_TXRX_CTL 0x014
+#define RP2_TXRX_CTL_MSRIRQ_m BIT(0)
+#define RP2_TXRX_CTL_RXIRQ_m BIT(2)
+#define RP2_TXRX_CTL_RX_TRIG_s 3
+#define RP2_TXRX_CTL_RX_TRIG_m (0x3 << RP2_TXRX_CTL_RX_TRIG_s)
+#define RP2_TXRX_CTL_RX_TRIG_1 (0x1 << RP2_TXRX_CTL_RX_TRIG_s)
+#define RP2_TXRX_CTL_RX_TRIG_256 (0x2 << RP2_TXRX_CTL_RX_TRIG_s)
+#define RP2_TXRX_CTL_RX_TRIG_448 (0x3 << RP2_TXRX_CTL_RX_TRIG_s)
+#define RP2_TXRX_CTL_RX_EN_m BIT(5)
+#define RP2_TXRX_CTL_RTSFLOW_m BIT(6)
+#define RP2_TXRX_CTL_DTRFLOW_m BIT(7)
+#define RP2_TXRX_CTL_TX_TRIG_s 16
+#define RP2_TXRX_CTL_TX_TRIG_m (0x3 << RP2_TXRX_CTL_RX_TRIG_s)
+#define RP2_TXRX_CTL_DSRFLOW_m BIT(18)
+#define RP2_TXRX_CTL_TXIRQ_m BIT(19)
+#define RP2_TXRX_CTL_CTSFLOW_m BIT(23)
+#define RP2_TXRX_CTL_TX_EN_m BIT(24)
+#define RP2_TXRX_CTL_RTS_m BIT(25)
+#define RP2_TXRX_CTL_DTR_m BIT(26)
+#define RP2_TXRX_CTL_LOOP_m BIT(27)
+#define RP2_TXRX_CTL_BREAK_m BIT(28)
+#define RP2_TXRX_CTL_CMSPAR_m BIT(29)
+#define RP2_TXRX_CTL_nPARODD_m BIT(30)
+#define RP2_TXRX_CTL_PARENB_m BIT(31)
+
+#define RP2_UART_CTL 0x018
+#define RP2_UART_CTL_MODE_s 0
+#define RP2_UART_CTL_MODE_m (0x7 << RP2_UART_CTL_MODE_s)
+#define RP2_UART_CTL_MODE_rs232 (0x1 << RP2_UART_CTL_MODE_s)
+#define RP2_UART_CTL_FLUSH_RX_m BIT(3)
+#define RP2_UART_CTL_FLUSH_TX_m BIT(4)
+#define RP2_UART_CTL_RESET_CH_m BIT(5)
+#define RP2_UART_CTL_XMIT_EN_m BIT(6)
+#define RP2_UART_CTL_DATABITS_s 8
+#define RP2_UART_CTL_DATABITS_m (0x3 << RP2_UART_CTL_DATABITS_s)
+#define RP2_UART_CTL_DATABITS_8 (0x3 << RP2_UART_CTL_DATABITS_s)
+#define RP2_UART_CTL_DATABITS_7 (0x2 << RP2_UART_CTL_DATABITS_s)
+#define RP2_UART_CTL_DATABITS_6 (0x1 << RP2_UART_CTL_DATABITS_s)
+#define RP2_UART_CTL_DATABITS_5 (0x0 << RP2_UART_CTL_DATABITS_s)
+#define RP2_UART_CTL_STOPBITS_m BIT(10)
+
+#define RP2_BAUD 0x01c
+
+/* ucode registers */
+#define RP2_TX_SWFLOW 0x02
+#define RP2_TX_SWFLOW_ena 0x81
+#define RP2_TX_SWFLOW_dis 0x9d
+
+#define RP2_RX_SWFLOW 0x0c
+#define RP2_RX_SWFLOW_ena 0x81
+#define RP2_RX_SWFLOW_dis 0x8d
+
+#define RP2_RX_FIFO 0x37
+#define RP2_RX_FIFO_ena 0x08
+#define RP2_RX_FIFO_dis 0x81
+
+static struct uart_driver rp2_uart_driver = {
+ .owner = THIS_MODULE,
+ .driver_name = DRV_NAME,
+ .dev_name = "ttyRP",
+ .nr = CONFIG_SERIAL_RP2_NR_UARTS,
+};
+
+struct rp2_card;
+
+struct rp2_uart_port {
+ struct uart_port port;
+ int idx;
+ int ignore_rx;
+ struct rp2_card *card;
+ void __iomem *asic_base;
+ void __iomem *base;
+ void __iomem *ucode;
+};
+
+struct rp2_card {
+ struct pci_dev *pdev;
+ struct rp2_uart_port *ports;
+ int n_ports;
+ int initialized_ports;
+ int minor_start;
+ int smpte;
+ void __iomem *bar0;
+ void __iomem *bar1;
+ spinlock_t card_lock;
+ struct completion fw_loaded;
+};
+
+#define RP_ID(prod) PCI_VDEVICE(RP, (prod))
+#define RP_CAP(ports, smpte) (((ports) << 8) | ((smpte) << 0))
+
+static inline void rp2_decode_cap(const struct pci_device_id *id,
+ int *ports, int *smpte)
+{
+ *ports = id->driver_data >> 8;
+ *smpte = id->driver_data & 0xff;
+}
+
+static DEFINE_SPINLOCK(rp2_minor_lock);
+static int rp2_minor_next;
+
+static int rp2_alloc_ports(int n_ports)
+{
+ int ret = -ENOSPC;
+
+ spin_lock(&rp2_minor_lock);
+ if (rp2_minor_next + n_ports <= CONFIG_SERIAL_RP2_NR_UARTS) {
+ /* sorry, no support for hot unplugging individual cards */
+ ret = rp2_minor_next;
+ rp2_minor_next += n_ports;
+ }
+ spin_unlock(&rp2_minor_lock);
+
+ return ret;
+}
+
+static inline struct rp2_uart_port *port_to_up(struct uart_port *port)
+{
+ return container_of(port, struct rp2_uart_port, port);
+}
+
+static void rp2_rmw(struct rp2_uart_port *up, int reg,
+ u32 clr_bits, u32 set_bits)
+{
+ u32 tmp = readl(up->base + reg);
+ tmp &= ~clr_bits;
+ tmp |= set_bits;
+ writel(tmp, up->base + reg);
+}
+
+static void rp2_rmw_clr(struct rp2_uart_port *up, int reg, u32 val)
+{
+ rp2_rmw(up, reg, val, 0);
+}
+
+static void rp2_rmw_set(struct rp2_uart_port *up, int reg, u32 val)
+{
+ rp2_rmw(up, reg, 0, val);
+}
+
+static void rp2_mask_ch_irq(struct rp2_uart_port *up, int ch_num,
+ int is_enabled)
+{
+ unsigned long flags, irq_mask;
+
+ spin_lock_irqsave(&up->card->card_lock, flags);
+
+ irq_mask = readl(up->asic_base + RP2_CH_IRQ_MASK);
+ if (is_enabled)
+ irq_mask &= ~BIT(ch_num);
+ else
+ irq_mask |= BIT(ch_num);
+ writel(irq_mask, up->asic_base + RP2_CH_IRQ_MASK);
+
+ spin_unlock_irqrestore(&up->card->card_lock, flags);
+}
+
+static unsigned int rp2_uart_tx_empty(struct uart_port *port)
+{
+ struct rp2_uart_port *up = port_to_up(port);
+ unsigned long tx_fifo_bytes, flags;
+
+ /*
+ * This should probably check the transmitter, not the FIFO.
+ * But the TXEMPTY bit doesn't seem to work unless the TX IRQ is
+ * enabled.
+ */
+ spin_lock_irqsave(&up->port.lock, flags);
+ tx_fifo_bytes = readw(up->base + RP2_TX_FIFO_COUNT);
+ spin_unlock_irqrestore(&up->port.lock, flags);
+
+ return tx_fifo_bytes ? 0 : TIOCSER_TEMT;
+}
+
+static unsigned int rp2_uart_get_mctrl(struct uart_port *port)
+{
+ struct rp2_uart_port *up = port_to_up(port);
+ u32 status;
+
+ status = readl(up->base + RP2_CHAN_STAT);
+ return ((status & RP2_CHAN_STAT_DCD_m) ? TIOCM_CAR : 0) |
+ ((status & RP2_CHAN_STAT_DSR_m) ? TIOCM_DSR : 0) |
+ ((status & RP2_CHAN_STAT_CTS_m) ? TIOCM_CTS : 0) |
+ ((status & RP2_CHAN_STAT_RI_m) ? TIOCM_RI : 0);
+}
+
+static void rp2_uart_set_mctrl(struct uart_port *port, unsigned int mctrl)
+{
+ rp2_rmw(port_to_up(port), RP2_TXRX_CTL,
+ RP2_TXRX_CTL_DTR_m | RP2_TXRX_CTL_RTS_m | RP2_TXRX_CTL_LOOP_m,
+ ((mctrl & TIOCM_DTR) ? RP2_TXRX_CTL_DTR_m : 0) |
+ ((mctrl & TIOCM_RTS) ? RP2_TXRX_CTL_RTS_m : 0) |
+ ((mctrl & TIOCM_LOOP) ? RP2_TXRX_CTL_LOOP_m : 0));
+}
+
+static void rp2_uart_start_tx(struct uart_port *port)
+{
+ rp2_rmw_set(port_to_up(port), RP2_TXRX_CTL, RP2_TXRX_CTL_TXIRQ_m);
+}
+
+static void rp2_uart_stop_tx(struct uart_port *port)
+{
+ rp2_rmw_clr(port_to_up(port), RP2_TXRX_CTL, RP2_TXRX_CTL_TXIRQ_m);
+}
+
+static void rp2_uart_stop_rx(struct uart_port *port)
+{
+ rp2_rmw_clr(port_to_up(port), RP2_TXRX_CTL, RP2_TXRX_CTL_RXIRQ_m);
+}
+
+static void rp2_uart_break_ctl(struct uart_port *port, int break_state)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&port->lock, flags);
+ rp2_rmw(port_to_up(port), RP2_TXRX_CTL, RP2_TXRX_CTL_BREAK_m,
+ break_state ? RP2_TXRX_CTL_BREAK_m : 0);
+ spin_unlock_irqrestore(&port->lock, flags);
+}
+
+static void rp2_uart_enable_ms(struct uart_port *port)
+{
+ rp2_rmw_set(port_to_up(port), RP2_TXRX_CTL, RP2_TXRX_CTL_MSRIRQ_m);
+}
+
+static void __rp2_uart_set_termios(struct rp2_uart_port *up,
+ unsigned long cfl,
+ unsigned long ifl,
+ unsigned int baud_div)
+{
+ /* baud rate divisor (calculated elsewhere). 0 = divide-by-1 */
+ writew(baud_div - 1, up->base + RP2_BAUD);
+
+ /* data bits and stop bits */
+ rp2_rmw(up, RP2_UART_CTL,
+ RP2_UART_CTL_STOPBITS_m | RP2_UART_CTL_DATABITS_m,
+ ((cfl & CSTOPB) ? RP2_UART_CTL_STOPBITS_m : 0) |
+ (((cfl & CSIZE) == CS8) ? RP2_UART_CTL_DATABITS_8 : 0) |
+ (((cfl & CSIZE) == CS7) ? RP2_UART_CTL_DATABITS_7 : 0) |
+ (((cfl & CSIZE) == CS6) ? RP2_UART_CTL_DATABITS_6 : 0) |
+ (((cfl & CSIZE) == CS5) ? RP2_UART_CTL_DATABITS_5 : 0));
+
+ /* parity and hardware flow control */
+ rp2_rmw(up, RP2_TXRX_CTL,
+ RP2_TXRX_CTL_PARENB_m | RP2_TXRX_CTL_nPARODD_m |
+ RP2_TXRX_CTL_CMSPAR_m | RP2_TXRX_CTL_DTRFLOW_m |
+ RP2_TXRX_CTL_DSRFLOW_m | RP2_TXRX_CTL_RTSFLOW_m |
+ RP2_TXRX_CTL_CTSFLOW_m,
+ ((cfl & PARENB) ? RP2_TXRX_CTL_PARENB_m : 0) |
+ ((cfl & PARODD) ? 0 : RP2_TXRX_CTL_nPARODD_m) |
+ ((cfl & CMSPAR) ? RP2_TXRX_CTL_CMSPAR_m : 0) |
+ ((cfl & CRTSCTS) ? (RP2_TXRX_CTL_RTSFLOW_m |
+ RP2_TXRX_CTL_CTSFLOW_m) : 0));
+
+ /* XON/XOFF software flow control */
+ writeb((ifl & IXON) ? RP2_TX_SWFLOW_ena : RP2_TX_SWFLOW_dis,
+ up->ucode + RP2_TX_SWFLOW);
+ writeb((ifl & IXOFF) ? RP2_RX_SWFLOW_ena : RP2_RX_SWFLOW_dis,
+ up->ucode + RP2_RX_SWFLOW);
+}
+
+static void rp2_uart_set_termios(struct uart_port *port,
+ struct ktermios *new,
+ struct ktermios *old)
+{
+ struct rp2_uart_port *up = port_to_up(port);
+ unsigned long flags;
+ unsigned int baud, baud_div;
+
+ baud = uart_get_baud_rate(port, new, old, 0, port->uartclk / 16);
+ baud_div = uart_get_divisor(port, baud);
+
+ if (tty_termios_baud_rate(new))
+ tty_termios_encode_baud_rate(new, baud, baud);
+
+ spin_lock_irqsave(&port->lock, flags);
+
+ /* ignore all characters if CREAD is not set */
+ port->ignore_status_mask = (new->c_cflag & CREAD) ? 0 : RP2_DUMMY_READ;
+
+ __rp2_uart_set_termios(up, new->c_cflag, new->c_iflag, baud_div);
+ uart_update_timeout(port, new->c_cflag, baud);
+
+ spin_unlock_irqrestore(&port->lock, flags);
+}
+
+static void rp2_rx_chars(struct rp2_uart_port *up)
+{
+ u16 bytes = readw(up->base + RP2_RX_FIFO_COUNT);
+ struct tty_port *port = &up->port.state->port;
+
+ for (; bytes != 0; bytes--) {
+ u32 byte = readw(up->base + RP2_DATA_BYTE) | RP2_DUMMY_READ;
+ char ch = byte & 0xff;
+
+ if (likely(!(byte & RP2_DATA_BYTE_EXCEPTION_MASK))) {
+ if (!uart_handle_sysrq_char(&up->port, ch))
+ uart_insert_char(&up->port, byte, 0, ch,
+ TTY_NORMAL);
+ } else {
+ char flag = TTY_NORMAL;
+
+ if (byte & RP2_DATA_BYTE_BREAK_m)
+ flag = TTY_BREAK;
+ else if (byte & RP2_DATA_BYTE_ERR_FRAMING_m)
+ flag = TTY_FRAME;
+ else if (byte & RP2_DATA_BYTE_ERR_PARITY_m)
+ flag = TTY_PARITY;
+ uart_insert_char(&up->port, byte,
+ RP2_DATA_BYTE_ERR_OVERRUN_m, ch, flag);
+ }
+ up->port.icount.rx++;
+ }
+
+ tty_flip_buffer_push(port);
+}
+
+static void rp2_tx_chars(struct rp2_uart_port *up)
+{
+ u16 max_tx = FIFO_SIZE - readw(up->base + RP2_TX_FIFO_COUNT);
+ struct circ_buf *xmit = &up->port.state->xmit;
+
+ if (uart_tx_stopped(&up->port)) {
+ rp2_uart_stop_tx(&up->port);
+ return;
+ }
+
+ for (; max_tx != 0; max_tx--) {
+ if (up->port.x_char) {
+ writeb(up->port.x_char, up->base + RP2_DATA_BYTE);
+ up->port.x_char = 0;
+ up->port.icount.tx++;
+ continue;
+ }
+ if (uart_circ_empty(xmit)) {
+ rp2_uart_stop_tx(&up->port);
+ break;
+ }
+ writeb(xmit->buf[xmit->tail], up->base + RP2_DATA_BYTE);
+ xmit->tail = (xmit->tail + 1) & (UART_XMIT_SIZE - 1);
+ up->port.icount.tx++;
+ }
+
+ if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS)
+ uart_write_wakeup(&up->port);
+}
+
+static void rp2_ch_interrupt(struct rp2_uart_port *up)
+{
+ u32 status;
+
+ spin_lock(&up->port.lock);
+
+ /*
+ * The IRQ status bits are clear-on-write. Other status bits in
+ * this register aren't, so it's harmless to write to them.
+ */
+ status = readl(up->base + RP2_CHAN_STAT);
+ writel(status, up->base + RP2_CHAN_STAT);
+
+ if (status & RP2_CHAN_STAT_RXDATA_m)
+ rp2_rx_chars(up);
+ if (status & RP2_CHAN_STAT_TXEMPTY_m)
+ rp2_tx_chars(up);
+ if (status & RP2_CHAN_STAT_MS_CHANGED_MASK)
+ wake_up_interruptible(&up->port.state->port.delta_msr_wait);
+
+ spin_unlock(&up->port.lock);
+}
+
+static int rp2_asic_interrupt(struct rp2_card *card, unsigned int asic_id)
+{
+ void __iomem *base = card->bar1 + RP2_ASIC_OFFSET(asic_id);
+ int ch, handled = 0;
+ unsigned long status = readl(base + RP2_CH_IRQ_STAT) &
+ ~readl(base + RP2_CH_IRQ_MASK);
+
+ for_each_set_bit(ch, &status, PORTS_PER_ASIC) {
+ rp2_ch_interrupt(&card->ports[ch]);
+ handled++;
+ }
+ return handled;
+}
+
+static irqreturn_t rp2_uart_interrupt(int irq, void *dev_id)
+{
+ struct rp2_card *card = dev_id;
+ int handled;
+
+ handled = rp2_asic_interrupt(card, 0);
+ if (card->n_ports >= PORTS_PER_ASIC)
+ handled += rp2_asic_interrupt(card, 1);
+
+ return handled ? IRQ_HANDLED : IRQ_NONE;
+}
+
+static inline void rp2_flush_fifos(struct rp2_uart_port *up)
+{
+ rp2_rmw_set(up, RP2_UART_CTL,
+ RP2_UART_CTL_FLUSH_RX_m | RP2_UART_CTL_FLUSH_TX_m);
+ readl(up->base + RP2_UART_CTL);
+ udelay(10);
+ rp2_rmw_clr(up, RP2_UART_CTL,
+ RP2_UART_CTL_FLUSH_RX_m | RP2_UART_CTL_FLUSH_TX_m);
+}
+
+static int rp2_uart_startup(struct uart_port *port)
+{
+ struct rp2_uart_port *up = port_to_up(port);
+
+ rp2_flush_fifos(up);
+ rp2_rmw(up, RP2_TXRX_CTL, RP2_TXRX_CTL_MSRIRQ_m, RP2_TXRX_CTL_RXIRQ_m);
+ rp2_rmw(up, RP2_TXRX_CTL, RP2_TXRX_CTL_RX_TRIG_m,
+ RP2_TXRX_CTL_RX_TRIG_1);
+ rp2_rmw(up, RP2_CHAN_STAT, 0, 0);
+ rp2_mask_ch_irq(up, up->idx, 1);
+
+ return 0;
+}
+
+static void rp2_uart_shutdown(struct uart_port *port)
+{
+ struct rp2_uart_port *up = port_to_up(port);
+ unsigned long flags;
+
+ rp2_uart_break_ctl(port, 0);
+
+ spin_lock_irqsave(&port->lock, flags);
+ rp2_mask_ch_irq(up, up->idx, 0);
+ rp2_rmw(up, RP2_CHAN_STAT, 0, 0);
+ spin_unlock_irqrestore(&port->lock, flags);
+}
+
+static const char *rp2_uart_type(struct uart_port *port)
+{
+ return (port->type == PORT_RP2) ? "RocketPort 2 UART" : NULL;
+}
+
+static void rp2_uart_release_port(struct uart_port *port)
+{
+ /* Nothing to release ... */
+}
+
+static int rp2_uart_request_port(struct uart_port *port)
+{
+ /* UARTs always present */
+ return 0;
+}
+
+static void rp2_uart_config_port(struct uart_port *port, int flags)
+{
+ if (flags & UART_CONFIG_TYPE)
+ port->type = PORT_RP2;
+}
+
+static int rp2_uart_verify_port(struct uart_port *port,
+ struct serial_struct *ser)
+{
+ if (ser->type != PORT_UNKNOWN && ser->type != PORT_RP2)
+ return -EINVAL;
+
+ return 0;
+}
+
+static const struct uart_ops rp2_uart_ops = {
+ .tx_empty = rp2_uart_tx_empty,
+ .set_mctrl = rp2_uart_set_mctrl,
+ .get_mctrl = rp2_uart_get_mctrl,
+ .stop_tx = rp2_uart_stop_tx,
+ .start_tx = rp2_uart_start_tx,
+ .stop_rx = rp2_uart_stop_rx,
+ .enable_ms = rp2_uart_enable_ms,
+ .break_ctl = rp2_uart_break_ctl,
+ .startup = rp2_uart_startup,
+ .shutdown = rp2_uart_shutdown,
+ .set_termios = rp2_uart_set_termios,
+ .type = rp2_uart_type,
+ .release_port = rp2_uart_release_port,
+ .request_port = rp2_uart_request_port,
+ .config_port = rp2_uart_config_port,
+ .verify_port = rp2_uart_verify_port,
+};
+
+static void rp2_reset_asic(struct rp2_card *card, unsigned int asic_id)
+{
+ void __iomem *base = card->bar1 + RP2_ASIC_OFFSET(asic_id);
+ u32 clk_cfg;
+
+ writew(1, base + RP2_GLOBAL_CMD);
+ readw(base + RP2_GLOBAL_CMD);
+ msleep(100);
+ writel(0, base + RP2_CLK_PRESCALER);
+
+ /* TDM clock configuration */
+ clk_cfg = readw(base + RP2_ASIC_CFG);
+ clk_cfg = (clk_cfg & ~BIT(8)) | BIT(9);
+ writew(clk_cfg, base + RP2_ASIC_CFG);
+
+ /* IRQ routing */
+ writel(ALL_PORTS_MASK, base + RP2_CH_IRQ_MASK);
+ writel(RP2_ASIC_IRQ_EN_m, base + RP2_ASIC_IRQ);
+}
+
+static void rp2_init_card(struct rp2_card *card)
+{
+ writel(4, card->bar0 + RP2_FPGA_CTL0);
+ writel(0, card->bar0 + RP2_FPGA_CTL1);
+
+ rp2_reset_asic(card, 0);
+ if (card->n_ports >= PORTS_PER_ASIC)
+ rp2_reset_asic(card, 1);
+
+ writel(RP2_IRQ_MASK_EN_m, card->bar0 + RP2_IRQ_MASK);
+}
+
+static void rp2_init_port(struct rp2_uart_port *up, const struct firmware *fw)
+{
+ int i;
+
+ writel(RP2_UART_CTL_RESET_CH_m, up->base + RP2_UART_CTL);
+ readl(up->base + RP2_UART_CTL);
+ udelay(1);
+
+ writel(0, up->base + RP2_TXRX_CTL);
+ writel(0, up->base + RP2_UART_CTL);
+ readl(up->base + RP2_UART_CTL);
+ udelay(1);
+
+ rp2_flush_fifos(up);
+
+ for (i = 0; i < min_t(int, fw->size, RP2_UCODE_BYTES); i++)
+ writeb(fw->data[i], up->ucode + i);
+
+ __rp2_uart_set_termios(up, CS8 | CREAD | CLOCAL, 0, DEFAULT_BAUD_DIV);
+ rp2_uart_set_mctrl(&up->port, 0);
+
+ writeb(RP2_RX_FIFO_ena, up->ucode + RP2_RX_FIFO);
+ rp2_rmw(up, RP2_UART_CTL, RP2_UART_CTL_MODE_m,
+ RP2_UART_CTL_XMIT_EN_m | RP2_UART_CTL_MODE_rs232);
+ rp2_rmw_set(up, RP2_TXRX_CTL,
+ RP2_TXRX_CTL_TX_EN_m | RP2_TXRX_CTL_RX_EN_m);
+}
+
+static void rp2_remove_ports(struct rp2_card *card)
+{
+ int i;
+
+ for (i = 0; i < card->initialized_ports; i++)
+ uart_remove_one_port(&rp2_uart_driver, &card->ports[i].port);
+ card->initialized_ports = 0;
+}
+
+static void rp2_fw_cb(const struct firmware *fw, void *context)
+{
+ struct rp2_card *card = context;
+ resource_size_t phys_base;
+ int i, rc = -ENOENT;
+
+ if (!fw) {
+ dev_err(&card->pdev->dev, "cannot find '%s' firmware image\n",
+ RP2_FW_NAME);
+ goto no_fw;
+ }
+
+ phys_base = pci_resource_start(card->pdev, 1);
+
+ for (i = 0; i < card->n_ports; i++) {
+ struct rp2_uart_port *rp = &card->ports[i];
+ struct uart_port *p;
+ int j = (unsigned)i % PORTS_PER_ASIC;
+
+ rp->asic_base = card->bar1;
+ rp->base = card->bar1 + RP2_PORT_BASE + j*RP2_PORT_SPACING;
+ rp->ucode = card->bar1 + RP2_UCODE_BASE + j*RP2_UCODE_SPACING;
+ rp->card = card;
+ rp->idx = j;
+
+ p = &rp->port;
+ p->line = card->minor_start + i;
+ p->dev = &card->pdev->dev;
+ p->type = PORT_RP2;
+ p->iotype = UPIO_MEM32;
+ p->uartclk = UART_CLOCK;
+ p->regshift = 2;
+ p->fifosize = FIFO_SIZE;
+ p->ops = &rp2_uart_ops;
+ p->irq = card->pdev->irq;
+ p->membase = rp->base;
+ p->mapbase = phys_base + RP2_PORT_BASE + j*RP2_PORT_SPACING;
+
+ if (i >= PORTS_PER_ASIC) {
+ rp->asic_base += RP2_ASIC_SPACING;
+ rp->base += RP2_ASIC_SPACING;
+ rp->ucode += RP2_ASIC_SPACING;
+ p->mapbase += RP2_ASIC_SPACING;
+ }
+
+ rp2_init_port(rp, fw);
+ rc = uart_add_one_port(&rp2_uart_driver, p);
+ if (rc) {
+ dev_err(&card->pdev->dev,
+ "error registering port %d: %d\n", i, rc);
+ rp2_remove_ports(card);
+ break;
+ }
+ card->initialized_ports++;
+ }
+
+ release_firmware(fw);
+no_fw:
+ /*
+ * rp2_fw_cb() is called from a workqueue long after rp2_probe()
+ * has already returned success. So if something failed here,
+ * we'll just leave the now-dormant device in place until somebody
+ * unbinds it.
+ */
+ if (rc)
+ dev_warn(&card->pdev->dev, "driver initialization failed\n");
+
+ complete(&card->fw_loaded);
+}
+
+static int rp2_probe(struct pci_dev *pdev,
+ const struct pci_device_id *id)
+{
+ struct rp2_card *card;
+ struct rp2_uart_port *ports;
+ void __iomem * const *bars;
+ int rc;
+
+ card = devm_kzalloc(&pdev->dev, sizeof(*card), GFP_KERNEL);
+ if (!card)
+ return -ENOMEM;
+ pci_set_drvdata(pdev, card);
+ spin_lock_init(&card->card_lock);
+ init_completion(&card->fw_loaded);
+
+ rc = pcim_enable_device(pdev);
+ if (rc)
+ return rc;
+
+ rc = pcim_iomap_regions_request_all(pdev, 0x03, DRV_NAME);
+ if (rc)
+ return rc;
+
+ bars = pcim_iomap_table(pdev);
+ card->bar0 = bars[0];
+ card->bar1 = bars[1];
+ card->pdev = pdev;
+
+ rp2_decode_cap(id, &card->n_ports, &card->smpte);
+ dev_info(&pdev->dev, "found new card with %d ports\n", card->n_ports);
+
+ card->minor_start = rp2_alloc_ports(card->n_ports);
+ if (card->minor_start < 0) {
+ dev_err(&pdev->dev,
+ "too many ports (try increasing CONFIG_SERIAL_RP2_NR_UARTS)\n");
+ return -EINVAL;
+ }
+
+ rp2_init_card(card);
+
+ ports = devm_kzalloc(&pdev->dev, sizeof(*ports) * card->n_ports,
+ GFP_KERNEL);
+ if (!ports)
+ return -ENOMEM;
+ card->ports = ports;
+
+ rc = devm_request_irq(&pdev->dev, pdev->irq, rp2_uart_interrupt,
+ IRQF_SHARED, DRV_NAME, card);
+ if (rc)
+ return rc;
+
+ /*
+ * Only catastrophic errors (e.g. ENOMEM) are reported here.
+ * If the FW image is missing, we'll find out in rp2_fw_cb()
+ * and print an error message.
+ */
+ rc = request_firmware_nowait(THIS_MODULE, 1, RP2_FW_NAME, &pdev->dev,
+ GFP_KERNEL, card, rp2_fw_cb);
+ if (rc)
+ return rc;
+ dev_dbg(&pdev->dev, "waiting for firmware blob...\n");
+
+ return 0;
+}
+
+static void rp2_remove(struct pci_dev *pdev)
+{
+ struct rp2_card *card = pci_get_drvdata(pdev);
+
+ wait_for_completion(&card->fw_loaded);
+ rp2_remove_ports(card);
+}
+
+static DEFINE_PCI_DEVICE_TABLE(rp2_pci_tbl) = {
+
+ /* RocketPort INFINITY cards */
+
+ { RP_ID(0x0040), RP_CAP(8, 0) }, /* INF Octa, RJ45, selectable */
+ { RP_ID(0x0041), RP_CAP(32, 0) }, /* INF 32, ext interface */
+ { RP_ID(0x0042), RP_CAP(8, 0) }, /* INF Octa, ext interface */
+ { RP_ID(0x0043), RP_CAP(16, 0) }, /* INF 16, ext interface */
+ { RP_ID(0x0044), RP_CAP(4, 0) }, /* INF Quad, DB, selectable */
+ { RP_ID(0x0045), RP_CAP(8, 0) }, /* INF Octa, DB, selectable */
+ { RP_ID(0x0046), RP_CAP(4, 0) }, /* INF Quad, ext interface */
+ { RP_ID(0x0047), RP_CAP(4, 0) }, /* INF Quad, RJ45 */
+ { RP_ID(0x004a), RP_CAP(4, 0) }, /* INF Plus, Quad */
+ { RP_ID(0x004b), RP_CAP(8, 0) }, /* INF Plus, Octa */
+ { RP_ID(0x004c), RP_CAP(8, 0) }, /* INF III, Octa */
+ { RP_ID(0x004d), RP_CAP(4, 0) }, /* INF III, Quad */
+ { RP_ID(0x004e), RP_CAP(2, 0) }, /* INF Plus, 2, RS232 */
+ { RP_ID(0x004f), RP_CAP(2, 1) }, /* INF Plus, 2, SMPTE */
+ { RP_ID(0x0050), RP_CAP(4, 0) }, /* INF Plus, Quad, RJ45 */
+ { RP_ID(0x0051), RP_CAP(8, 0) }, /* INF Plus, Octa, RJ45 */
+ { RP_ID(0x0052), RP_CAP(8, 1) }, /* INF Octa, SMPTE */
+
+ /* RocketPort EXPRESS cards */
+
+ { RP_ID(0x0060), RP_CAP(8, 0) }, /* EXP Octa, RJ45, selectable */
+ { RP_ID(0x0061), RP_CAP(32, 0) }, /* EXP 32, ext interface */
+ { RP_ID(0x0062), RP_CAP(8, 0) }, /* EXP Octa, ext interface */
+ { RP_ID(0x0063), RP_CAP(16, 0) }, /* EXP 16, ext interface */
+ { RP_ID(0x0064), RP_CAP(4, 0) }, /* EXP Quad, DB, selectable */
+ { RP_ID(0x0065), RP_CAP(8, 0) }, /* EXP Octa, DB, selectable */
+ { RP_ID(0x0066), RP_CAP(4, 0) }, /* EXP Quad, ext interface */
+ { RP_ID(0x0067), RP_CAP(4, 0) }, /* EXP Quad, RJ45 */
+ { RP_ID(0x0068), RP_CAP(8, 0) }, /* EXP Octa, RJ11 */
+ { RP_ID(0x0072), RP_CAP(8, 1) }, /* EXP Octa, SMPTE */
+ { }
+};
+MODULE_DEVICE_TABLE(pci, rp2_pci_tbl);
+
+static struct pci_driver rp2_pci_driver = {
+ .name = DRV_NAME,
+ .id_table = rp2_pci_tbl,
+ .probe = rp2_probe,
+ .remove = rp2_remove,
+};
+
+static int __init rp2_uart_init(void)
+{
+ int rc;
+
+ rc = uart_register_driver(&rp2_uart_driver);
+ if (rc)
+ return rc;
+
+ rc = pci_register_driver(&rp2_pci_driver);
+ if (rc) {
+ uart_unregister_driver(&rp2_uart_driver);
+ return rc;
+ }
+
+ return 0;
+}
+
+static void __exit rp2_uart_exit(void)
+{
+ pci_unregister_driver(&rp2_pci_driver);
+ uart_unregister_driver(&rp2_uart_driver);
+}
+
+module_init(rp2_uart_init);
+module_exit(rp2_uart_exit);
+
+MODULE_DESCRIPTION("Comtrol RocketPort EXPRESS/INFINITY driver");
+MODULE_AUTHOR("Kevin Cernekee <cernekee@gmail.com>");
+MODULE_LICENSE("GPL v2");
+MODULE_FIRMWARE(RP2_FW_NAME);
diff --git a/drivers/tty/serial/sa1100.c b/drivers/tty/serial/sa1100.c
index 5d4b9b449b4a..af6b3e3ad24d 100644
--- a/drivers/tty/serial/sa1100.c
+++ b/drivers/tty/serial/sa1100.c
@@ -188,7 +188,6 @@ static void sa1100_enable_ms(struct uart_port *port)
static void
sa1100_rx_chars(struct sa1100_port *sport)
{
- struct tty_struct *tty = sport->port.state->port.tty;
unsigned int status, ch, flg;
status = UTSR1_TO_SM(UART_GET_UTSR1(sport)) |
@@ -233,7 +232,7 @@ sa1100_rx_chars(struct sa1100_port *sport)
status = UTSR1_TO_SM(UART_GET_UTSR1(sport)) |
UTSR0_TO_SM(UART_GET_UTSR0(sport));
}
- tty_flip_buffer_push(tty);
+ tty_flip_buffer_push(&sport->port.state->port);
}
static void sa1100_tx_chars(struct sa1100_port *sport)
diff --git a/drivers/tty/serial/samsung.c b/drivers/tty/serial/samsung.c
index e514b3a4dc57..2769a38d15b6 100644
--- a/drivers/tty/serial/samsung.c
+++ b/drivers/tty/serial/samsung.c
@@ -47,7 +47,6 @@
#include <asm/irq.h>
#include <mach/hardware.h>
-#include <mach/map.h>
#include <plat/regs-serial.h>
#include <plat/clock.h>
@@ -221,7 +220,6 @@ s3c24xx_serial_rx_chars(int irq, void *dev_id)
{
struct s3c24xx_uart_port *ourport = dev_id;
struct uart_port *port = &ourport->port;
- struct tty_struct *tty = port->state->port.tty;
unsigned int ufcon, ch, flag, ufstat, uerstat;
unsigned long flags;
int max_count = 64;
@@ -299,7 +297,7 @@ s3c24xx_serial_rx_chars(int irq, void *dev_id)
ignore_char:
continue;
}
- tty_flip_buffer_push(tty);
+ tty_flip_buffer_push(&port->state->port);
out:
spin_unlock_irqrestore(&port->lock, flags);
@@ -1143,8 +1141,13 @@ static int s3c24xx_serial_init_port(struct s3c24xx_uart_port *ourport,
dbg("resource %p (%lx..%lx)\n", res, res->start, res->end);
+ port->membase = devm_ioremap(port->dev, res->start, resource_size(res));
+ if (!port->membase) {
+ dev_err(port->dev, "failed to remap controller address\n");
+ return -EBUSY;
+ }
+
port->mapbase = res->start;
- port->membase = S3C_VA_UART + (res->start & 0xfffff);
ret = platform_get_irq(platdev, 0);
if (ret < 0)
port->irq = 0;
@@ -1724,8 +1727,6 @@ static const struct of_device_id s3c24xx_uart_dt_match[] = {
{},
};
MODULE_DEVICE_TABLE(of, s3c24xx_uart_dt_match);
-#else
-#define s3c24xx_uart_dt_match NULL
#endif
static struct platform_driver samsung_serial_driver = {
@@ -1736,7 +1737,7 @@ static struct platform_driver samsung_serial_driver = {
.name = "samsung-uart",
.owner = THIS_MODULE,
.pm = SERIAL_SAMSUNG_PM_OPS,
- .of_match_table = s3c24xx_uart_dt_match,
+ .of_match_table = of_match_ptr(s3c24xx_uart_dt_match),
},
};
diff --git a/drivers/tty/serial/sb1250-duart.c b/drivers/tty/serial/sb1250-duart.c
index f76b1688c5c8..a7cdec2962dd 100644
--- a/drivers/tty/serial/sb1250-duart.c
+++ b/drivers/tty/serial/sb1250-duart.c
@@ -384,7 +384,7 @@ static void sbd_receive_chars(struct sbd_port *sport)
uart_insert_char(uport, status, M_DUART_OVRUN_ERR, ch, flag);
}
- tty_flip_buffer_push(uport->state->port.tty);
+ tty_flip_buffer_push(&uport->state->port);
}
static void sbd_transmit_chars(struct sbd_port *sport)
diff --git a/drivers/tty/serial/sc26xx.c b/drivers/tty/serial/sc26xx.c
index aced1dd923d8..c9735680762d 100644
--- a/drivers/tty/serial/sc26xx.c
+++ b/drivers/tty/serial/sc26xx.c
@@ -136,16 +136,17 @@ static void sc26xx_disable_irq(struct uart_port *port, int mask)
WRITE_SC(port, IMR, up->imr);
}
-static struct tty_struct *receive_chars(struct uart_port *port)
+static bool receive_chars(struct uart_port *port)
{
- struct tty_struct *tty = NULL;
+ struct tty_port *tport = NULL;
int limit = 10000;
unsigned char ch;
char flag;
u8 status;
+ /* FIXME what is this trying to achieve? */
if (port->state != NULL) /* Unopened serial console */
- tty = port->state->port.tty;
+ tport = &port->state->port;
while (limit-- > 0) {
status = READ_SC_PORT(port, SR);
@@ -185,9 +186,9 @@ static struct tty_struct *receive_chars(struct uart_port *port)
if (status & port->ignore_status_mask)
continue;
- tty_insert_flip_char(tty, ch, flag);
+ tty_insert_flip_char(tport, ch, flag);
}
- return tty;
+ return !!tport;
}
static void transmit_chars(struct uart_port *port)
@@ -217,36 +218,36 @@ static void transmit_chars(struct uart_port *port)
static irqreturn_t sc26xx_interrupt(int irq, void *dev_id)
{
struct uart_sc26xx_port *up = dev_id;
- struct tty_struct *tty;
unsigned long flags;
+ bool push;
u8 isr;
spin_lock_irqsave(&up->port[0].lock, flags);
- tty = NULL;
+ push = false;
isr = READ_SC(&up->port[0], ISR);
if (isr & ISR_TXRDYA)
transmit_chars(&up->port[0]);
if (isr & ISR_RXRDYA)
- tty = receive_chars(&up->port[0]);
+ push = receive_chars(&up->port[0]);
spin_unlock(&up->port[0].lock);
- if (tty)
- tty_flip_buffer_push(tty);
+ if (push)
+ tty_flip_buffer_push(&up->port[0].state->port);
spin_lock(&up->port[1].lock);
- tty = NULL;
+ push = false;
if (isr & ISR_TXRDYB)
transmit_chars(&up->port[1]);
if (isr & ISR_RXRDYB)
- tty = receive_chars(&up->port[1]);
+ push = receive_chars(&up->port[1]);
spin_unlock_irqrestore(&up->port[1].lock, flags);
- if (tty)
- tty_flip_buffer_push(tty);
+ if (push)
+ tty_flip_buffer_push(&up->port[1].state->port);
return IRQ_HANDLED;
}
diff --git a/drivers/tty/serial/sccnxp.c b/drivers/tty/serial/sccnxp.c
index e869eab180be..08dbfb88d42c 100644
--- a/drivers/tty/serial/sccnxp.c
+++ b/drivers/tty/serial/sccnxp.c
@@ -24,8 +24,9 @@
#include <linux/io.h>
#include <linux/tty.h>
#include <linux/tty_flip.h>
+#include <linux/spinlock.h>
#include <linux/platform_device.h>
-#include <linux/platform_data/sccnxp.h>
+#include <linux/platform_data/serial-sccnxp.h>
#define SCCNXP_NAME "uart-sccnxp"
#define SCCNXP_MAJOR 204
@@ -107,6 +108,7 @@ enum {
struct sccnxp_port {
struct uart_driver uart;
struct uart_port port[SCCNXP_MAX_UARTS];
+ bool opened[SCCNXP_MAX_UARTS];
const char *name;
int irq;
@@ -123,7 +125,10 @@ struct sccnxp_port {
struct console console;
#endif
- struct mutex sccnxp_mutex;
+ spinlock_t lock;
+
+ bool poll;
+ struct timer_list timer;
struct sccnxp_pdata pdata;
};
@@ -175,14 +180,12 @@ static int sccnxp_update_best_err(int a, int b, int *besterr)
return 1;
}
-struct baud_table {
+static const struct {
u8 csr;
u8 acr;
u8 mr0;
int baud;
-};
-
-const struct baud_table baud_std[] = {
+} baud_std[] = {
{ 0, ACR_BAUD0, MR0_BAUD_NORMAL, 50, },
{ 0, ACR_BAUD1, MR0_BAUD_NORMAL, 75, },
{ 1, ACR_BAUD0, MR0_BAUD_NORMAL, 110, },
@@ -286,10 +289,6 @@ static void sccnxp_handle_rx(struct uart_port *port)
{
u8 sr;
unsigned int ch, flag;
- struct tty_struct *tty = tty_port_tty_get(&port->state->port);
-
- if (!tty)
- return;
for (;;) {
sr = sccnxp_port_read(port, SCCNXP_SR_REG);
@@ -305,14 +304,19 @@ static void sccnxp_handle_rx(struct uart_port *port)
if (unlikely(sr)) {
if (sr & SR_BRK) {
port->icount.brk++;
+ sccnxp_port_write(port, SCCNXP_CR_REG,
+ CR_CMD_BREAK_RESET);
if (uart_handle_break(port))
continue;
} else if (sr & SR_PE)
port->icount.parity++;
else if (sr & SR_FE)
port->icount.frame++;
- else if (sr & SR_OVR)
+ else if (sr & SR_OVR) {
port->icount.overrun++;
+ sccnxp_port_write(port, SCCNXP_CR_REG,
+ CR_CMD_STATUS_RESET);
+ }
sr &= port->read_status_mask;
if (sr & SR_BRK)
@@ -334,9 +338,7 @@ static void sccnxp_handle_rx(struct uart_port *port)
uart_insert_char(port, sr, SR_OVR, ch, flag);
}
- tty_flip_buffer_push(tty);
-
- tty_kref_put(tty);
+ tty_flip_buffer_push(&port->state->port);
}
static void sccnxp_handle_tx(struct uart_port *port)
@@ -378,31 +380,48 @@ static void sccnxp_handle_tx(struct uart_port *port)
uart_write_wakeup(port);
}
-static irqreturn_t sccnxp_ist(int irq, void *dev_id)
+static void sccnxp_handle_events(struct sccnxp_port *s)
{
int i;
u8 isr;
- struct sccnxp_port *s = (struct sccnxp_port *)dev_id;
-
- mutex_lock(&s->sccnxp_mutex);
- for (;;) {
+ do {
isr = sccnxp_read(&s->port[0], SCCNXP_ISR_REG);
isr &= s->imr;
if (!isr)
break;
- dev_dbg(s->port[0].dev, "IRQ status: 0x%02x\n", isr);
-
for (i = 0; i < s->uart.nr; i++) {
- if (isr & ISR_RXRDY(i))
+ if (s->opened[i] && (isr & ISR_RXRDY(i)))
sccnxp_handle_rx(&s->port[i]);
- if (isr & ISR_TXRDY(i))
+ if (s->opened[i] && (isr & ISR_TXRDY(i)))
sccnxp_handle_tx(&s->port[i]);
}
- }
+ } while (1);
+}
+
+static void sccnxp_timer(unsigned long data)
+{
+ struct sccnxp_port *s = (struct sccnxp_port *)data;
+ unsigned long flags;
- mutex_unlock(&s->sccnxp_mutex);
+ spin_lock_irqsave(&s->lock, flags);
+ sccnxp_handle_events(s);
+ spin_unlock_irqrestore(&s->lock, flags);
+
+ if (!timer_pending(&s->timer))
+ mod_timer(&s->timer, jiffies +
+ usecs_to_jiffies(s->pdata.poll_time_us));
+}
+
+static irqreturn_t sccnxp_ist(int irq, void *dev_id)
+{
+ struct sccnxp_port *s = (struct sccnxp_port *)dev_id;
+ unsigned long flags;
+
+ spin_lock_irqsave(&s->lock, flags);
+ sccnxp_handle_events(s);
+ spin_unlock_irqrestore(&s->lock, flags);
return IRQ_HANDLED;
}
@@ -410,8 +429,9 @@ static irqreturn_t sccnxp_ist(int irq, void *dev_id)
static void sccnxp_start_tx(struct uart_port *port)
{
struct sccnxp_port *s = dev_get_drvdata(port->dev);
+ unsigned long flags;
- mutex_lock(&s->sccnxp_mutex);
+ spin_lock_irqsave(&s->lock, flags);
/* Set direction to output */
if (s->flags & SCCNXP_HAVE_IO)
@@ -419,7 +439,7 @@ static void sccnxp_start_tx(struct uart_port *port)
sccnxp_enable_irq(port, IMR_TXRDY);
- mutex_unlock(&s->sccnxp_mutex);
+ spin_unlock_irqrestore(&s->lock, flags);
}
static void sccnxp_stop_tx(struct uart_port *port)
@@ -430,20 +450,22 @@ static void sccnxp_stop_tx(struct uart_port *port)
static void sccnxp_stop_rx(struct uart_port *port)
{
struct sccnxp_port *s = dev_get_drvdata(port->dev);
+ unsigned long flags;
- mutex_lock(&s->sccnxp_mutex);
+ spin_lock_irqsave(&s->lock, flags);
sccnxp_port_write(port, SCCNXP_CR_REG, CR_RX_DISABLE);
- mutex_unlock(&s->sccnxp_mutex);
+ spin_unlock_irqrestore(&s->lock, flags);
}
static unsigned int sccnxp_tx_empty(struct uart_port *port)
{
u8 val;
+ unsigned long flags;
struct sccnxp_port *s = dev_get_drvdata(port->dev);
- mutex_lock(&s->sccnxp_mutex);
+ spin_lock_irqsave(&s->lock, flags);
val = sccnxp_port_read(port, SCCNXP_SR_REG);
- mutex_unlock(&s->sccnxp_mutex);
+ spin_unlock_irqrestore(&s->lock, flags);
return (val & SR_TXEMT) ? TIOCSER_TEMT : 0;
}
@@ -456,28 +478,30 @@ static void sccnxp_enable_ms(struct uart_port *port)
static void sccnxp_set_mctrl(struct uart_port *port, unsigned int mctrl)
{
struct sccnxp_port *s = dev_get_drvdata(port->dev);
+ unsigned long flags;
if (!(s->flags & SCCNXP_HAVE_IO))
return;
- mutex_lock(&s->sccnxp_mutex);
+ spin_lock_irqsave(&s->lock, flags);
sccnxp_set_bit(port, DTR_OP, mctrl & TIOCM_DTR);
sccnxp_set_bit(port, RTS_OP, mctrl & TIOCM_RTS);
- mutex_unlock(&s->sccnxp_mutex);
+ spin_unlock_irqrestore(&s->lock, flags);
}
static unsigned int sccnxp_get_mctrl(struct uart_port *port)
{
u8 bitmask, ipr;
+ unsigned long flags;
struct sccnxp_port *s = dev_get_drvdata(port->dev);
unsigned int mctrl = TIOCM_DSR | TIOCM_CTS | TIOCM_CAR;
if (!(s->flags & SCCNXP_HAVE_IO))
return mctrl;
- mutex_lock(&s->sccnxp_mutex);
+ spin_lock_irqsave(&s->lock, flags);
ipr = ~sccnxp_read(port, SCCNXP_IPCR_REG);
@@ -506,7 +530,7 @@ static unsigned int sccnxp_get_mctrl(struct uart_port *port)
mctrl |= (ipr & bitmask) ? TIOCM_RNG : 0;
}
- mutex_unlock(&s->sccnxp_mutex);
+ spin_unlock_irqrestore(&s->lock, flags);
return mctrl;
}
@@ -514,21 +538,23 @@ static unsigned int sccnxp_get_mctrl(struct uart_port *port)
static void sccnxp_break_ctl(struct uart_port *port, int break_state)
{
struct sccnxp_port *s = dev_get_drvdata(port->dev);
+ unsigned long flags;
- mutex_lock(&s->sccnxp_mutex);
+ spin_lock_irqsave(&s->lock, flags);
sccnxp_port_write(port, SCCNXP_CR_REG, break_state ?
CR_CMD_START_BREAK : CR_CMD_STOP_BREAK);
- mutex_unlock(&s->sccnxp_mutex);
+ spin_unlock_irqrestore(&s->lock, flags);
}
static void sccnxp_set_termios(struct uart_port *port,
struct ktermios *termios, struct ktermios *old)
{
struct sccnxp_port *s = dev_get_drvdata(port->dev);
+ unsigned long flags;
u8 mr1, mr2;
int baud;
- mutex_lock(&s->sccnxp_mutex);
+ spin_lock_irqsave(&s->lock, flags);
/* Mask termios capabilities we don't support */
termios->c_cflag &= ~CMSPAR;
@@ -595,20 +621,22 @@ static void sccnxp_set_termios(struct uart_port *port,
/* Update timeout according to new baud rate */
uart_update_timeout(port, termios->c_cflag, baud);
+ /* Report actual baudrate back to core */
if (tty_termios_baud_rate(termios))
tty_termios_encode_baud_rate(termios, baud, baud);
/* Enable RX & TX */
sccnxp_port_write(port, SCCNXP_CR_REG, CR_RX_ENABLE | CR_TX_ENABLE);
- mutex_unlock(&s->sccnxp_mutex);
+ spin_unlock_irqrestore(&s->lock, flags);
}
static int sccnxp_startup(struct uart_port *port)
{
struct sccnxp_port *s = dev_get_drvdata(port->dev);
+ unsigned long flags;
- mutex_lock(&s->sccnxp_mutex);
+ spin_lock_irqsave(&s->lock, flags);
if (s->flags & SCCNXP_HAVE_IO) {
/* Outputs are controlled manually */
@@ -627,7 +655,9 @@ static int sccnxp_startup(struct uart_port *port)
/* Enable RX interrupt */
sccnxp_enable_irq(port, IMR_RXRDY);
- mutex_unlock(&s->sccnxp_mutex);
+ s->opened[port->line] = 1;
+
+ spin_unlock_irqrestore(&s->lock, flags);
return 0;
}
@@ -635,8 +665,11 @@ static int sccnxp_startup(struct uart_port *port)
static void sccnxp_shutdown(struct uart_port *port)
{
struct sccnxp_port *s = dev_get_drvdata(port->dev);
+ unsigned long flags;
- mutex_lock(&s->sccnxp_mutex);
+ spin_lock_irqsave(&s->lock, flags);
+
+ s->opened[port->line] = 0;
/* Disable interrupts */
sccnxp_disable_irq(port, IMR_TXRDY | IMR_RXRDY);
@@ -648,7 +681,7 @@ static void sccnxp_shutdown(struct uart_port *port)
if (s->flags & SCCNXP_HAVE_IO)
sccnxp_set_bit(port, DIR_OP, 0);
- mutex_unlock(&s->sccnxp_mutex);
+ spin_unlock_irqrestore(&s->lock, flags);
}
static const char *sccnxp_type(struct uart_port *port)
@@ -722,10 +755,11 @@ static void sccnxp_console_write(struct console *co, const char *c, unsigned n)
{
struct sccnxp_port *s = (struct sccnxp_port *)co->data;
struct uart_port *port = &s->port[co->index];
+ unsigned long flags;
- mutex_lock(&s->sccnxp_mutex);
+ spin_lock_irqsave(&s->lock, flags);
uart_console_write(port, c, n, sccnxp_console_putchar);
- mutex_unlock(&s->sccnxp_mutex);
+ spin_unlock_irqrestore(&s->lock, flags);
}
static int sccnxp_console_setup(struct console *co, char *options)
@@ -764,7 +798,7 @@ static int sccnxp_probe(struct platform_device *pdev)
}
platform_set_drvdata(pdev, s);
- mutex_init(&s->sccnxp_mutex);
+ spin_lock_init(&s->lock);
/* Individual chip settings */
switch (chiptype) {
@@ -861,11 +895,19 @@ static int sccnxp_probe(struct platform_device *pdev)
} else
memcpy(&s->pdata, pdata, sizeof(struct sccnxp_pdata));
- s->irq = platform_get_irq(pdev, 0);
- if (s->irq <= 0) {
- dev_err(&pdev->dev, "Missing irq resource data\n");
- ret = -ENXIO;
- goto err_out;
+ if (s->pdata.poll_time_us) {
+ dev_info(&pdev->dev, "Using poll mode, resolution %u usecs\n",
+ s->pdata.poll_time_us);
+ s->poll = 1;
+ }
+
+ if (!s->poll) {
+ s->irq = platform_get_irq(pdev, 0);
+ if (s->irq < 0) {
+ dev_err(&pdev->dev, "Missing irq resource data\n");
+ ret = -ENXIO;
+ goto err_out;
+ }
}
/* Check input frequency */
@@ -929,13 +971,23 @@ static int sccnxp_probe(struct platform_device *pdev)
if (s->pdata.init)
s->pdata.init();
- ret = devm_request_threaded_irq(&pdev->dev, s->irq, NULL, sccnxp_ist,
- IRQF_TRIGGER_FALLING | IRQF_ONESHOT,
- dev_name(&pdev->dev), s);
- if (!ret)
+ if (!s->poll) {
+ ret = devm_request_threaded_irq(&pdev->dev, s->irq, NULL,
+ sccnxp_ist,
+ IRQF_TRIGGER_FALLING |
+ IRQF_ONESHOT,
+ dev_name(&pdev->dev), s);
+ if (!ret)
+ return 0;
+
+ dev_err(&pdev->dev, "Unable to reguest IRQ %i\n", s->irq);
+ } else {
+ init_timer(&s->timer);
+ setup_timer(&s->timer, sccnxp_timer, (unsigned long)s);
+ mod_timer(&s->timer, jiffies +
+ usecs_to_jiffies(s->pdata.poll_time_us));
return 0;
-
- dev_err(&pdev->dev, "Unable to reguest IRQ %i\n", s->irq);
+ }
err_out:
platform_set_drvdata(pdev, NULL);
@@ -948,7 +1000,10 @@ static int sccnxp_remove(struct platform_device *pdev)
int i;
struct sccnxp_port *s = platform_get_drvdata(pdev);
- devm_free_irq(&pdev->dev, s->irq, s);
+ if (!s->poll)
+ devm_free_irq(&pdev->dev, s->irq, s);
+ else
+ del_timer_sync(&s->timer);
for (i = 0; i < s->uart.nr; i++)
uart_remove_one_port(&s->uart, &s->port[i]);
diff --git a/drivers/tty/serial/serial-tegra.c b/drivers/tty/serial/serial-tegra.c
new file mode 100644
index 000000000000..372de8ade76a
--- /dev/null
+++ b/drivers/tty/serial/serial-tegra.c
@@ -0,0 +1,1401 @@
+/*
+ * serial_tegra.c
+ *
+ * High-speed serial driver for NVIDIA Tegra SoCs
+ *
+ * Copyright (c) 2012-2013, NVIDIA CORPORATION. All rights reserved.
+ *
+ * Author: Laxman Dewangan <ldewangan@nvidia.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include <linux/clk.h>
+#include <linux/debugfs.h>
+#include <linux/delay.h>
+#include <linux/dmaengine.h>
+#include <linux/dma-mapping.h>
+#include <linux/dmapool.h>
+#include <linux/io.h>
+#include <linux/irq.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/pagemap.h>
+#include <linux/platform_device.h>
+#include <linux/serial.h>
+#include <linux/serial_8250.h>
+#include <linux/serial_core.h>
+#include <linux/serial_reg.h>
+#include <linux/slab.h>
+#include <linux/string.h>
+#include <linux/termios.h>
+#include <linux/tty.h>
+#include <linux/tty_flip.h>
+
+#include <linux/clk/tegra.h>
+
+#define TEGRA_UART_TYPE "TEGRA_UART"
+#define TX_EMPTY_STATUS (UART_LSR_TEMT | UART_LSR_THRE)
+#define BYTES_TO_ALIGN(x) ((unsigned long)(x) & 0x3)
+
+#define TEGRA_UART_RX_DMA_BUFFER_SIZE 4096
+#define TEGRA_UART_LSR_TXFIFO_FULL 0x100
+#define TEGRA_UART_IER_EORD 0x20
+#define TEGRA_UART_MCR_RTS_EN 0x40
+#define TEGRA_UART_MCR_CTS_EN 0x20
+#define TEGRA_UART_LSR_ANY (UART_LSR_OE | UART_LSR_BI | \
+ UART_LSR_PE | UART_LSR_FE)
+#define TEGRA_UART_IRDA_CSR 0x08
+#define TEGRA_UART_SIR_ENABLED 0x80
+
+#define TEGRA_UART_TX_PIO 1
+#define TEGRA_UART_TX_DMA 2
+#define TEGRA_UART_MIN_DMA 16
+#define TEGRA_UART_FIFO_SIZE 32
+
+/*
+ * Tx fifo trigger level setting in tegra uart is in
+ * reverse way then conventional uart.
+ */
+#define TEGRA_UART_TX_TRIG_16B 0x00
+#define TEGRA_UART_TX_TRIG_8B 0x10
+#define TEGRA_UART_TX_TRIG_4B 0x20
+#define TEGRA_UART_TX_TRIG_1B 0x30
+
+#define TEGRA_UART_MAXIMUM 5
+
+/* Default UART setting when started: 115200 no parity, stop, 8 data bits */
+#define TEGRA_UART_DEFAULT_BAUD 115200
+#define TEGRA_UART_DEFAULT_LSR UART_LCR_WLEN8
+
+/* Tx transfer mode */
+#define TEGRA_TX_PIO 1
+#define TEGRA_TX_DMA 2
+
+/**
+ * tegra_uart_chip_data: SOC specific data.
+ *
+ * @tx_fifo_full_status: Status flag available for checking tx fifo full.
+ * @allow_txfifo_reset_fifo_mode: allow_tx fifo reset with fifo mode or not.
+ * Tegra30 does not allow this.
+ * @support_clk_src_div: Clock source support the clock divider.
+ */
+struct tegra_uart_chip_data {
+ bool tx_fifo_full_status;
+ bool allow_txfifo_reset_fifo_mode;
+ bool support_clk_src_div;
+};
+
+struct tegra_uart_port {
+ struct uart_port uport;
+ const struct tegra_uart_chip_data *cdata;
+
+ struct clk *uart_clk;
+ unsigned int current_baud;
+
+ /* Register shadow */
+ unsigned long fcr_shadow;
+ unsigned long mcr_shadow;
+ unsigned long lcr_shadow;
+ unsigned long ier_shadow;
+ bool rts_active;
+
+ int tx_in_progress;
+ unsigned int tx_bytes;
+
+ bool enable_modem_interrupt;
+
+ bool rx_timeout;
+ int rx_in_progress;
+ int symb_bit;
+ int dma_req_sel;
+
+ struct dma_chan *rx_dma_chan;
+ struct dma_chan *tx_dma_chan;
+ dma_addr_t rx_dma_buf_phys;
+ dma_addr_t tx_dma_buf_phys;
+ unsigned char *rx_dma_buf_virt;
+ unsigned char *tx_dma_buf_virt;
+ struct dma_async_tx_descriptor *tx_dma_desc;
+ struct dma_async_tx_descriptor *rx_dma_desc;
+ dma_cookie_t tx_cookie;
+ dma_cookie_t rx_cookie;
+ int tx_bytes_requested;
+ int rx_bytes_requested;
+};
+
+static void tegra_uart_start_next_tx(struct tegra_uart_port *tup);
+static int tegra_uart_start_rx_dma(struct tegra_uart_port *tup);
+
+static inline unsigned long tegra_uart_read(struct tegra_uart_port *tup,
+ unsigned long reg)
+{
+ return readl(tup->uport.membase + (reg << tup->uport.regshift));
+}
+
+static inline void tegra_uart_write(struct tegra_uart_port *tup, unsigned val,
+ unsigned long reg)
+{
+ writel(val, tup->uport.membase + (reg << tup->uport.regshift));
+}
+
+static inline struct tegra_uart_port *to_tegra_uport(struct uart_port *u)
+{
+ return container_of(u, struct tegra_uart_port, uport);
+}
+
+static unsigned int tegra_uart_get_mctrl(struct uart_port *u)
+{
+ struct tegra_uart_port *tup = to_tegra_uport(u);
+
+ /*
+ * RI - Ring detector is active
+ * CD/DCD/CAR - Carrier detect is always active. For some reason
+ * linux has different names for carrier detect.
+ * DSR - Data Set ready is active as the hardware doesn't support it.
+ * Don't know if the linux support this yet?
+ * CTS - Clear to send. Always set to active, as the hardware handles
+ * CTS automatically.
+ */
+ if (tup->enable_modem_interrupt)
+ return TIOCM_RI | TIOCM_CD | TIOCM_DSR | TIOCM_CTS;
+ return TIOCM_CTS;
+}
+
+static void set_rts(struct tegra_uart_port *tup, bool active)
+{
+ unsigned long mcr;
+
+ mcr = tup->mcr_shadow;
+ if (active)
+ mcr |= TEGRA_UART_MCR_RTS_EN;
+ else
+ mcr &= ~TEGRA_UART_MCR_RTS_EN;
+ if (mcr != tup->mcr_shadow) {
+ tegra_uart_write(tup, mcr, UART_MCR);
+ tup->mcr_shadow = mcr;
+ }
+ return;
+}
+
+static void set_dtr(struct tegra_uart_port *tup, bool active)
+{
+ unsigned long mcr;
+
+ mcr = tup->mcr_shadow;
+ if (active)
+ mcr |= UART_MCR_DTR;
+ else
+ mcr &= ~UART_MCR_DTR;
+ if (mcr != tup->mcr_shadow) {
+ tegra_uart_write(tup, mcr, UART_MCR);
+ tup->mcr_shadow = mcr;
+ }
+ return;
+}
+
+static void tegra_uart_set_mctrl(struct uart_port *u, unsigned int mctrl)
+{
+ struct tegra_uart_port *tup = to_tegra_uport(u);
+ unsigned long mcr;
+ int dtr_enable;
+
+ mcr = tup->mcr_shadow;
+ tup->rts_active = !!(mctrl & TIOCM_RTS);
+ set_rts(tup, tup->rts_active);
+
+ dtr_enable = !!(mctrl & TIOCM_DTR);
+ set_dtr(tup, dtr_enable);
+ return;
+}
+
+static void tegra_uart_break_ctl(struct uart_port *u, int break_ctl)
+{
+ struct tegra_uart_port *tup = to_tegra_uport(u);
+ unsigned long lcr;
+
+ lcr = tup->lcr_shadow;
+ if (break_ctl)
+ lcr |= UART_LCR_SBC;
+ else
+ lcr &= ~UART_LCR_SBC;
+ tegra_uart_write(tup, lcr, UART_LCR);
+ tup->lcr_shadow = lcr;
+}
+
+/* Wait for a symbol-time. */
+static void tegra_uart_wait_sym_time(struct tegra_uart_port *tup,
+ unsigned int syms)
+{
+ if (tup->current_baud)
+ udelay(DIV_ROUND_UP(syms * tup->symb_bit * 1000000,
+ tup->current_baud));
+}
+
+static void tegra_uart_fifo_reset(struct tegra_uart_port *tup, u8 fcr_bits)
+{
+ unsigned long fcr = tup->fcr_shadow;
+
+ if (tup->cdata->allow_txfifo_reset_fifo_mode) {
+ fcr |= fcr_bits & (UART_FCR_CLEAR_RCVR | UART_FCR_CLEAR_XMIT);
+ tegra_uart_write(tup, fcr, UART_FCR);
+ } else {
+ fcr &= ~UART_FCR_ENABLE_FIFO;
+ tegra_uart_write(tup, fcr, UART_FCR);
+ udelay(60);
+ fcr |= fcr_bits & (UART_FCR_CLEAR_RCVR | UART_FCR_CLEAR_XMIT);
+ tegra_uart_write(tup, fcr, UART_FCR);
+ fcr |= UART_FCR_ENABLE_FIFO;
+ tegra_uart_write(tup, fcr, UART_FCR);
+ }
+
+ /* Dummy read to ensure the write is posted */
+ tegra_uart_read(tup, UART_SCR);
+
+ /* Wait for the flush to propagate. */
+ tegra_uart_wait_sym_time(tup, 1);
+}
+
+static int tegra_set_baudrate(struct tegra_uart_port *tup, unsigned int baud)
+{
+ unsigned long rate;
+ unsigned int divisor;
+ unsigned long lcr;
+ int ret;
+
+ if (tup->current_baud == baud)
+ return 0;
+
+ if (tup->cdata->support_clk_src_div) {
+ rate = baud * 16;
+ ret = clk_set_rate(tup->uart_clk, rate);
+ if (ret < 0) {
+ dev_err(tup->uport.dev,
+ "clk_set_rate() failed for rate %lu\n", rate);
+ return ret;
+ }
+ divisor = 1;
+ } else {
+ rate = clk_get_rate(tup->uart_clk);
+ divisor = DIV_ROUND_CLOSEST(rate, baud * 16);
+ }
+
+ lcr = tup->lcr_shadow;
+ lcr |= UART_LCR_DLAB;
+ tegra_uart_write(tup, lcr, UART_LCR);
+
+ tegra_uart_write(tup, divisor & 0xFF, UART_TX);
+ tegra_uart_write(tup, ((divisor >> 8) & 0xFF), UART_IER);
+
+ lcr &= ~UART_LCR_DLAB;
+ tegra_uart_write(tup, lcr, UART_LCR);
+
+ /* Dummy read to ensure the write is posted */
+ tegra_uart_read(tup, UART_SCR);
+
+ tup->current_baud = baud;
+
+ /* wait two character intervals at new rate */
+ tegra_uart_wait_sym_time(tup, 2);
+ return 0;
+}
+
+static char tegra_uart_decode_rx_error(struct tegra_uart_port *tup,
+ unsigned long lsr)
+{
+ char flag = TTY_NORMAL;
+
+ if (unlikely(lsr & TEGRA_UART_LSR_ANY)) {
+ if (lsr & UART_LSR_OE) {
+ /* Overrrun error */
+ flag |= TTY_OVERRUN;
+ tup->uport.icount.overrun++;
+ dev_err(tup->uport.dev, "Got overrun errors\n");
+ } else if (lsr & UART_LSR_PE) {
+ /* Parity error */
+ flag |= TTY_PARITY;
+ tup->uport.icount.parity++;
+ dev_err(tup->uport.dev, "Got Parity errors\n");
+ } else if (lsr & UART_LSR_FE) {
+ flag |= TTY_FRAME;
+ tup->uport.icount.frame++;
+ dev_err(tup->uport.dev, "Got frame errors\n");
+ } else if (lsr & UART_LSR_BI) {
+ dev_err(tup->uport.dev, "Got Break\n");
+ tup->uport.icount.brk++;
+ /* If FIFO read error without any data, reset Rx FIFO */
+ if (!(lsr & UART_LSR_DR) && (lsr & UART_LSR_FIFOE))
+ tegra_uart_fifo_reset(tup, UART_FCR_CLEAR_RCVR);
+ }
+ }
+ return flag;
+}
+
+static int tegra_uart_request_port(struct uart_port *u)
+{
+ return 0;
+}
+
+static void tegra_uart_release_port(struct uart_port *u)
+{
+ /* Nothing to do here */
+}
+
+static void tegra_uart_fill_tx_fifo(struct tegra_uart_port *tup, int max_bytes)
+{
+ struct circ_buf *xmit = &tup->uport.state->xmit;
+ int i;
+
+ for (i = 0; i < max_bytes; i++) {
+ BUG_ON(uart_circ_empty(xmit));
+ if (tup->cdata->tx_fifo_full_status) {
+ unsigned long lsr = tegra_uart_read(tup, UART_LSR);
+ if ((lsr & TEGRA_UART_LSR_TXFIFO_FULL))
+ break;
+ }
+ tegra_uart_write(tup, xmit->buf[xmit->tail], UART_TX);
+ xmit->tail = (xmit->tail + 1) & (UART_XMIT_SIZE - 1);
+ tup->uport.icount.tx++;
+ }
+}
+
+static void tegra_uart_start_pio_tx(struct tegra_uart_port *tup,
+ unsigned int bytes)
+{
+ if (bytes > TEGRA_UART_MIN_DMA)
+ bytes = TEGRA_UART_MIN_DMA;
+
+ tup->tx_in_progress = TEGRA_UART_TX_PIO;
+ tup->tx_bytes = bytes;
+ tup->ier_shadow |= UART_IER_THRI;
+ tegra_uart_write(tup, tup->ier_shadow, UART_IER);
+}
+
+static void tegra_uart_tx_dma_complete(void *args)
+{
+ struct tegra_uart_port *tup = args;
+ struct circ_buf *xmit = &tup->uport.state->xmit;
+ struct dma_tx_state state;
+ unsigned long flags;
+ int count;
+
+ dmaengine_tx_status(tup->tx_dma_chan, tup->rx_cookie, &state);
+ count = tup->tx_bytes_requested - state.residue;
+ async_tx_ack(tup->tx_dma_desc);
+ spin_lock_irqsave(&tup->uport.lock, flags);
+ xmit->tail = (xmit->tail + count) & (UART_XMIT_SIZE - 1);
+ tup->tx_in_progress = 0;
+ if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS)
+ uart_write_wakeup(&tup->uport);
+ tegra_uart_start_next_tx(tup);
+ spin_unlock_irqrestore(&tup->uport.lock, flags);
+}
+
+static int tegra_uart_start_tx_dma(struct tegra_uart_port *tup,
+ unsigned long count)
+{
+ struct circ_buf *xmit = &tup->uport.state->xmit;
+ dma_addr_t tx_phys_addr;
+
+ dma_sync_single_for_device(tup->uport.dev, tup->tx_dma_buf_phys,
+ UART_XMIT_SIZE, DMA_TO_DEVICE);
+
+ tup->tx_bytes = count & ~(0xF);
+ tx_phys_addr = tup->tx_dma_buf_phys + xmit->tail;
+ tup->tx_dma_desc = dmaengine_prep_slave_single(tup->tx_dma_chan,
+ tx_phys_addr, tup->tx_bytes, DMA_MEM_TO_DEV,
+ DMA_PREP_INTERRUPT);
+ if (!tup->tx_dma_desc) {
+ dev_err(tup->uport.dev, "Not able to get desc for Tx\n");
+ return -EIO;
+ }
+
+ tup->tx_dma_desc->callback = tegra_uart_tx_dma_complete;
+ tup->tx_dma_desc->callback_param = tup;
+ tup->tx_in_progress = TEGRA_UART_TX_DMA;
+ tup->tx_bytes_requested = tup->tx_bytes;
+ tup->tx_cookie = dmaengine_submit(tup->tx_dma_desc);
+ dma_async_issue_pending(tup->tx_dma_chan);
+ return 0;
+}
+
+static void tegra_uart_start_next_tx(struct tegra_uart_port *tup)
+{
+ unsigned long tail;
+ unsigned long count;
+ struct circ_buf *xmit = &tup->uport.state->xmit;
+
+ tail = (unsigned long)&xmit->buf[xmit->tail];
+ count = CIRC_CNT_TO_END(xmit->head, xmit->tail, UART_XMIT_SIZE);
+ if (!count)
+ return;
+
+ if (count < TEGRA_UART_MIN_DMA)
+ tegra_uart_start_pio_tx(tup, count);
+ else if (BYTES_TO_ALIGN(tail) > 0)
+ tegra_uart_start_pio_tx(tup, BYTES_TO_ALIGN(tail));
+ else
+ tegra_uart_start_tx_dma(tup, count);
+}
+
+/* Called by serial core driver with u->lock taken. */
+static void tegra_uart_start_tx(struct uart_port *u)
+{
+ struct tegra_uart_port *tup = to_tegra_uport(u);
+ struct circ_buf *xmit = &u->state->xmit;
+
+ if (!uart_circ_empty(xmit) && !tup->tx_in_progress)
+ tegra_uart_start_next_tx(tup);
+}
+
+static unsigned int tegra_uart_tx_empty(struct uart_port *u)
+{
+ struct tegra_uart_port *tup = to_tegra_uport(u);
+ unsigned int ret = 0;
+ unsigned long flags;
+
+ spin_lock_irqsave(&u->lock, flags);
+ if (!tup->tx_in_progress) {
+ unsigned long lsr = tegra_uart_read(tup, UART_LSR);
+ if ((lsr & TX_EMPTY_STATUS) == TX_EMPTY_STATUS)
+ ret = TIOCSER_TEMT;
+ }
+ spin_unlock_irqrestore(&u->lock, flags);
+ return ret;
+}
+
+static void tegra_uart_stop_tx(struct uart_port *u)
+{
+ struct tegra_uart_port *tup = to_tegra_uport(u);
+ struct circ_buf *xmit = &tup->uport.state->xmit;
+ struct dma_tx_state state;
+ int count;
+
+ dmaengine_terminate_all(tup->tx_dma_chan);
+ dmaengine_tx_status(tup->tx_dma_chan, tup->tx_cookie, &state);
+ count = tup->tx_bytes_requested - state.residue;
+ async_tx_ack(tup->tx_dma_desc);
+ xmit->tail = (xmit->tail + count) & (UART_XMIT_SIZE - 1);
+ tup->tx_in_progress = 0;
+ return;
+}
+
+static void tegra_uart_handle_tx_pio(struct tegra_uart_port *tup)
+{
+ struct circ_buf *xmit = &tup->uport.state->xmit;
+
+ tegra_uart_fill_tx_fifo(tup, tup->tx_bytes);
+ tup->tx_in_progress = 0;
+ if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS)
+ uart_write_wakeup(&tup->uport);
+ tegra_uart_start_next_tx(tup);
+ return;
+}
+
+static void tegra_uart_handle_rx_pio(struct tegra_uart_port *tup,
+ struct tty_port *tty)
+{
+ do {
+ char flag = TTY_NORMAL;
+ unsigned long lsr = 0;
+ unsigned char ch;
+
+ lsr = tegra_uart_read(tup, UART_LSR);
+ if (!(lsr & UART_LSR_DR))
+ break;
+
+ flag = tegra_uart_decode_rx_error(tup, lsr);
+ ch = (unsigned char) tegra_uart_read(tup, UART_RX);
+ tup->uport.icount.rx++;
+
+ if (!uart_handle_sysrq_char(&tup->uport, ch) && tty)
+ tty_insert_flip_char(tty, ch, flag);
+ } while (1);
+
+ return;
+}
+
+static void tegra_uart_copy_rx_to_tty(struct tegra_uart_port *tup,
+ struct tty_port *tty, int count)
+{
+ int copied;
+
+ tup->uport.icount.rx += count;
+ if (!tty) {
+ dev_err(tup->uport.dev, "No tty port\n");
+ return;
+ }
+ dma_sync_single_for_cpu(tup->uport.dev, tup->rx_dma_buf_phys,
+ TEGRA_UART_RX_DMA_BUFFER_SIZE, DMA_FROM_DEVICE);
+ copied = tty_insert_flip_string(tty,
+ ((unsigned char *)(tup->rx_dma_buf_virt)), count);
+ if (copied != count) {
+ WARN_ON(1);
+ dev_err(tup->uport.dev, "RxData copy to tty layer failed\n");
+ }
+ dma_sync_single_for_device(tup->uport.dev, tup->rx_dma_buf_phys,
+ TEGRA_UART_RX_DMA_BUFFER_SIZE, DMA_TO_DEVICE);
+}
+
+static void tegra_uart_rx_dma_complete(void *args)
+{
+ struct tegra_uart_port *tup = args;
+ struct uart_port *u = &tup->uport;
+ int count = tup->rx_bytes_requested;
+ struct tty_struct *tty = tty_port_tty_get(&tup->uport.state->port);
+ struct tty_port *port = &u->state->port;
+ unsigned long flags;
+
+ async_tx_ack(tup->rx_dma_desc);
+ spin_lock_irqsave(&u->lock, flags);
+
+ /* Deactivate flow control to stop sender */
+ if (tup->rts_active)
+ set_rts(tup, false);
+
+ /* If we are here, DMA is stopped */
+ if (count)
+ tegra_uart_copy_rx_to_tty(tup, port, count);
+
+ tegra_uart_handle_rx_pio(tup, port);
+ if (tty) {
+ tty_flip_buffer_push(port);
+ tty_kref_put(tty);
+ }
+ tegra_uart_start_rx_dma(tup);
+
+ /* Activate flow control to start transfer */
+ if (tup->rts_active)
+ set_rts(tup, true);
+
+ spin_unlock_irqrestore(&u->lock, flags);
+}
+
+static void tegra_uart_handle_rx_dma(struct tegra_uart_port *tup)
+{
+ struct dma_tx_state state;
+ struct tty_struct *tty = tty_port_tty_get(&tup->uport.state->port);
+ struct tty_port *port = &tup->uport.state->port;
+ int count;
+
+ /* Deactivate flow control to stop sender */
+ if (tup->rts_active)
+ set_rts(tup, false);
+
+ dmaengine_terminate_all(tup->rx_dma_chan);
+ dmaengine_tx_status(tup->rx_dma_chan, tup->rx_cookie, &state);
+ count = tup->rx_bytes_requested - state.residue;
+
+ /* If we are here, DMA is stopped */
+ if (count)
+ tegra_uart_copy_rx_to_tty(tup, port, count);
+
+ tegra_uart_handle_rx_pio(tup, port);
+ if (tty) {
+ tty_flip_buffer_push(port);
+ tty_kref_put(tty);
+ }
+ tegra_uart_start_rx_dma(tup);
+
+ if (tup->rts_active)
+ set_rts(tup, true);
+}
+
+static int tegra_uart_start_rx_dma(struct tegra_uart_port *tup)
+{
+ unsigned int count = TEGRA_UART_RX_DMA_BUFFER_SIZE;
+
+ tup->rx_dma_desc = dmaengine_prep_slave_single(tup->rx_dma_chan,
+ tup->rx_dma_buf_phys, count, DMA_DEV_TO_MEM,
+ DMA_PREP_INTERRUPT);
+ if (!tup->rx_dma_desc) {
+ dev_err(tup->uport.dev, "Not able to get desc for Rx\n");
+ return -EIO;
+ }
+
+ tup->rx_dma_desc->callback = tegra_uart_rx_dma_complete;
+ tup->rx_dma_desc->callback_param = tup;
+ dma_sync_single_for_device(tup->uport.dev, tup->rx_dma_buf_phys,
+ count, DMA_TO_DEVICE);
+ tup->rx_bytes_requested = count;
+ tup->rx_cookie = dmaengine_submit(tup->rx_dma_desc);
+ dma_async_issue_pending(tup->rx_dma_chan);
+ return 0;
+}
+
+static void tegra_uart_handle_modem_signal_change(struct uart_port *u)
+{
+ struct tegra_uart_port *tup = to_tegra_uport(u);
+ unsigned long msr;
+
+ msr = tegra_uart_read(tup, UART_MSR);
+ if (!(msr & UART_MSR_ANY_DELTA))
+ return;
+
+ if (msr & UART_MSR_TERI)
+ tup->uport.icount.rng++;
+ if (msr & UART_MSR_DDSR)
+ tup->uport.icount.dsr++;
+ /* We may only get DDCD when HW init and reset */
+ if (msr & UART_MSR_DDCD)
+ uart_handle_dcd_change(&tup->uport, msr & UART_MSR_DCD);
+ /* Will start/stop_tx accordingly */
+ if (msr & UART_MSR_DCTS)
+ uart_handle_cts_change(&tup->uport, msr & UART_MSR_CTS);
+ return;
+}
+
+static irqreturn_t tegra_uart_isr(int irq, void *data)
+{
+ struct tegra_uart_port *tup = data;
+ struct uart_port *u = &tup->uport;
+ unsigned long iir;
+ unsigned long ier;
+ bool is_rx_int = false;
+ unsigned long flags;
+
+ spin_lock_irqsave(&u->lock, flags);
+ while (1) {
+ iir = tegra_uart_read(tup, UART_IIR);
+ if (iir & UART_IIR_NO_INT) {
+ if (is_rx_int) {
+ tegra_uart_handle_rx_dma(tup);
+ if (tup->rx_in_progress) {
+ ier = tup->ier_shadow;
+ ier |= (UART_IER_RLSI | UART_IER_RTOIE |
+ TEGRA_UART_IER_EORD);
+ tup->ier_shadow = ier;
+ tegra_uart_write(tup, ier, UART_IER);
+ }
+ }
+ spin_unlock_irqrestore(&u->lock, flags);
+ return IRQ_HANDLED;
+ }
+
+ switch ((iir >> 1) & 0x7) {
+ case 0: /* Modem signal change interrupt */
+ tegra_uart_handle_modem_signal_change(u);
+ break;
+
+ case 1: /* Transmit interrupt only triggered when using PIO */
+ tup->ier_shadow &= ~UART_IER_THRI;
+ tegra_uart_write(tup, tup->ier_shadow, UART_IER);
+ tegra_uart_handle_tx_pio(tup);
+ break;
+
+ case 4: /* End of data */
+ case 6: /* Rx timeout */
+ case 2: /* Receive */
+ if (!is_rx_int) {
+ is_rx_int = true;
+ /* Disable Rx interrupts */
+ ier = tup->ier_shadow;
+ ier |= UART_IER_RDI;
+ tegra_uart_write(tup, ier, UART_IER);
+ ier &= ~(UART_IER_RDI | UART_IER_RLSI |
+ UART_IER_RTOIE | TEGRA_UART_IER_EORD);
+ tup->ier_shadow = ier;
+ tegra_uart_write(tup, ier, UART_IER);
+ }
+ break;
+
+ case 3: /* Receive error */
+ tegra_uart_decode_rx_error(tup,
+ tegra_uart_read(tup, UART_LSR));
+ break;
+
+ case 5: /* break nothing to handle */
+ case 7: /* break nothing to handle */
+ break;
+ }
+ }
+}
+
+static void tegra_uart_stop_rx(struct uart_port *u)
+{
+ struct tegra_uart_port *tup = to_tegra_uport(u);
+ struct tty_struct *tty = tty_port_tty_get(&tup->uport.state->port);
+ struct tty_port *port = &u->state->port;
+ struct dma_tx_state state;
+ unsigned long ier;
+ int count;
+
+ if (tup->rts_active)
+ set_rts(tup, false);
+
+ if (!tup->rx_in_progress)
+ return;
+
+ tegra_uart_wait_sym_time(tup, 1); /* wait a character interval */
+
+ ier = tup->ier_shadow;
+ ier &= ~(UART_IER_RDI | UART_IER_RLSI | UART_IER_RTOIE |
+ TEGRA_UART_IER_EORD);
+ tup->ier_shadow = ier;
+ tegra_uart_write(tup, ier, UART_IER);
+ tup->rx_in_progress = 0;
+ if (tup->rx_dma_chan) {
+ dmaengine_terminate_all(tup->rx_dma_chan);
+ dmaengine_tx_status(tup->rx_dma_chan, tup->rx_cookie, &state);
+ async_tx_ack(tup->rx_dma_desc);
+ count = tup->rx_bytes_requested - state.residue;
+ tegra_uart_copy_rx_to_tty(tup, port, count);
+ tegra_uart_handle_rx_pio(tup, port);
+ } else {
+ tegra_uart_handle_rx_pio(tup, port);
+ }
+ if (tty) {
+ tty_flip_buffer_push(port);
+ tty_kref_put(tty);
+ }
+ return;
+}
+
+static void tegra_uart_hw_deinit(struct tegra_uart_port *tup)
+{
+ unsigned long flags;
+ unsigned long char_time = DIV_ROUND_UP(10000000, tup->current_baud);
+ unsigned long fifo_empty_time = tup->uport.fifosize * char_time;
+ unsigned long wait_time;
+ unsigned long lsr;
+ unsigned long msr;
+ unsigned long mcr;
+
+ /* Disable interrupts */
+ tegra_uart_write(tup, 0, UART_IER);
+
+ lsr = tegra_uart_read(tup, UART_LSR);
+ if ((lsr & UART_LSR_TEMT) != UART_LSR_TEMT) {
+ msr = tegra_uart_read(tup, UART_MSR);
+ mcr = tegra_uart_read(tup, UART_MCR);
+ if ((mcr & TEGRA_UART_MCR_CTS_EN) && (msr & UART_MSR_CTS))
+ dev_err(tup->uport.dev,
+ "Tx Fifo not empty, CTS disabled, waiting\n");
+
+ /* Wait for Tx fifo to be empty */
+ while ((lsr & UART_LSR_TEMT) != UART_LSR_TEMT) {
+ wait_time = min(fifo_empty_time, 100lu);
+ udelay(wait_time);
+ fifo_empty_time -= wait_time;
+ if (!fifo_empty_time) {
+ msr = tegra_uart_read(tup, UART_MSR);
+ mcr = tegra_uart_read(tup, UART_MCR);
+ if ((mcr & TEGRA_UART_MCR_CTS_EN) &&
+ (msr & UART_MSR_CTS))
+ dev_err(tup->uport.dev,
+ "Slave not ready\n");
+ break;
+ }
+ lsr = tegra_uart_read(tup, UART_LSR);
+ }
+ }
+
+ spin_lock_irqsave(&tup->uport.lock, flags);
+ /* Reset the Rx and Tx FIFOs */
+ tegra_uart_fifo_reset(tup, UART_FCR_CLEAR_XMIT | UART_FCR_CLEAR_RCVR);
+ tup->current_baud = 0;
+ spin_unlock_irqrestore(&tup->uport.lock, flags);
+
+ clk_disable_unprepare(tup->uart_clk);
+}
+
+static int tegra_uart_hw_init(struct tegra_uart_port *tup)
+{
+ int ret;
+
+ tup->fcr_shadow = 0;
+ tup->mcr_shadow = 0;
+ tup->lcr_shadow = 0;
+ tup->ier_shadow = 0;
+ tup->current_baud = 0;
+
+ clk_prepare_enable(tup->uart_clk);
+
+ /* Reset the UART controller to clear all previous status.*/
+ tegra_periph_reset_assert(tup->uart_clk);
+ udelay(10);
+ tegra_periph_reset_deassert(tup->uart_clk);
+
+ tup->rx_in_progress = 0;
+ tup->tx_in_progress = 0;
+
+ /*
+ * Set the trigger level
+ *
+ * For PIO mode:
+ *
+ * For receive, this will interrupt the CPU after that many number of
+ * bytes are received, for the remaining bytes the receive timeout
+ * interrupt is received. Rx high watermark is set to 4.
+ *
+ * For transmit, if the trasnmit interrupt is enabled, this will
+ * interrupt the CPU when the number of entries in the FIFO reaches the
+ * low watermark. Tx low watermark is set to 16 bytes.
+ *
+ * For DMA mode:
+ *
+ * Set the Tx trigger to 16. This should match the DMA burst size that
+ * programmed in the DMA registers.
+ */
+ tup->fcr_shadow = UART_FCR_ENABLE_FIFO;
+ tup->fcr_shadow |= UART_FCR_R_TRIG_01;
+ tup->fcr_shadow |= TEGRA_UART_TX_TRIG_16B;
+ tegra_uart_write(tup, tup->fcr_shadow, UART_FCR);
+
+ /*
+ * Initialize the UART with default configuration
+ * (115200, N, 8, 1) so that the receive DMA buffer may be
+ * enqueued
+ */
+ tup->lcr_shadow = TEGRA_UART_DEFAULT_LSR;
+ tegra_set_baudrate(tup, TEGRA_UART_DEFAULT_BAUD);
+ tup->fcr_shadow |= UART_FCR_DMA_SELECT;
+ tegra_uart_write(tup, tup->fcr_shadow, UART_FCR);
+
+ ret = tegra_uart_start_rx_dma(tup);
+ if (ret < 0) {
+ dev_err(tup->uport.dev, "Not able to start Rx DMA\n");
+ return ret;
+ }
+ tup->rx_in_progress = 1;
+
+ /*
+ * Enable IE_RXS for the receive status interrupts like line errros.
+ * Enable IE_RX_TIMEOUT to get the bytes which cannot be DMA'd.
+ *
+ * If using DMA mode, enable EORD instead of receive interrupt which
+ * will interrupt after the UART is done with the receive instead of
+ * the interrupt when the FIFO "threshold" is reached.
+ *
+ * EORD is different interrupt than RX_TIMEOUT - RX_TIMEOUT occurs when
+ * the DATA is sitting in the FIFO and couldn't be transferred to the
+ * DMA as the DMA size alignment(4 bytes) is not met. EORD will be
+ * triggered when there is a pause of the incomming data stream for 4
+ * characters long.
+ *
+ * For pauses in the data which is not aligned to 4 bytes, we get
+ * both the EORD as well as RX_TIMEOUT - SW sees RX_TIMEOUT first
+ * then the EORD.
+ */
+ tup->ier_shadow = UART_IER_RLSI | UART_IER_RTOIE | TEGRA_UART_IER_EORD;
+ tegra_uart_write(tup, tup->ier_shadow, UART_IER);
+ return 0;
+}
+
+static int tegra_uart_dma_channel_allocate(struct tegra_uart_port *tup,
+ bool dma_to_memory)
+{
+ struct dma_chan *dma_chan;
+ unsigned char *dma_buf;
+ dma_addr_t dma_phys;
+ int ret;
+ struct dma_slave_config dma_sconfig;
+ dma_cap_mask_t mask;
+
+ dma_cap_zero(mask);
+ dma_cap_set(DMA_SLAVE, mask);
+ dma_chan = dma_request_channel(mask, NULL, NULL);
+ if (!dma_chan) {
+ dev_err(tup->uport.dev,
+ "Dma channel is not available, will try later\n");
+ return -EPROBE_DEFER;
+ }
+
+ if (dma_to_memory) {
+ dma_buf = dma_alloc_coherent(tup->uport.dev,
+ TEGRA_UART_RX_DMA_BUFFER_SIZE,
+ &dma_phys, GFP_KERNEL);
+ if (!dma_buf) {
+ dev_err(tup->uport.dev,
+ "Not able to allocate the dma buffer\n");
+ dma_release_channel(dma_chan);
+ return -ENOMEM;
+ }
+ } else {
+ dma_phys = dma_map_single(tup->uport.dev,
+ tup->uport.state->xmit.buf, UART_XMIT_SIZE,
+ DMA_TO_DEVICE);
+ dma_buf = tup->uport.state->xmit.buf;
+ }
+
+ dma_sconfig.slave_id = tup->dma_req_sel;
+ if (dma_to_memory) {
+ dma_sconfig.src_addr = tup->uport.mapbase;
+ dma_sconfig.src_addr_width = DMA_SLAVE_BUSWIDTH_1_BYTE;
+ dma_sconfig.src_maxburst = 4;
+ } else {
+ dma_sconfig.dst_addr = tup->uport.mapbase;
+ dma_sconfig.dst_addr_width = DMA_SLAVE_BUSWIDTH_1_BYTE;
+ dma_sconfig.dst_maxburst = 16;
+ }
+
+ ret = dmaengine_slave_config(dma_chan, &dma_sconfig);
+ if (ret < 0) {
+ dev_err(tup->uport.dev,
+ "Dma slave config failed, err = %d\n", ret);
+ goto scrub;
+ }
+
+ if (dma_to_memory) {
+ tup->rx_dma_chan = dma_chan;
+ tup->rx_dma_buf_virt = dma_buf;
+ tup->rx_dma_buf_phys = dma_phys;
+ } else {
+ tup->tx_dma_chan = dma_chan;
+ tup->tx_dma_buf_virt = dma_buf;
+ tup->tx_dma_buf_phys = dma_phys;
+ }
+ return 0;
+
+scrub:
+ dma_release_channel(dma_chan);
+ return ret;
+}
+
+static void tegra_uart_dma_channel_free(struct tegra_uart_port *tup,
+ bool dma_to_memory)
+{
+ struct dma_chan *dma_chan;
+
+ if (dma_to_memory) {
+ dma_free_coherent(tup->uport.dev, TEGRA_UART_RX_DMA_BUFFER_SIZE,
+ tup->rx_dma_buf_virt, tup->rx_dma_buf_phys);
+ dma_chan = tup->rx_dma_chan;
+ tup->rx_dma_chan = NULL;
+ tup->rx_dma_buf_phys = 0;
+ tup->rx_dma_buf_virt = NULL;
+ } else {
+ dma_unmap_single(tup->uport.dev, tup->tx_dma_buf_phys,
+ UART_XMIT_SIZE, DMA_TO_DEVICE);
+ dma_chan = tup->tx_dma_chan;
+ tup->tx_dma_chan = NULL;
+ tup->tx_dma_buf_phys = 0;
+ tup->tx_dma_buf_virt = NULL;
+ }
+ dma_release_channel(dma_chan);
+}
+
+static int tegra_uart_startup(struct uart_port *u)
+{
+ struct tegra_uart_port *tup = to_tegra_uport(u);
+ int ret;
+
+ ret = tegra_uart_dma_channel_allocate(tup, false);
+ if (ret < 0) {
+ dev_err(u->dev, "Tx Dma allocation failed, err = %d\n", ret);
+ return ret;
+ }
+
+ ret = tegra_uart_dma_channel_allocate(tup, true);
+ if (ret < 0) {
+ dev_err(u->dev, "Rx Dma allocation failed, err = %d\n", ret);
+ goto fail_rx_dma;
+ }
+
+ ret = tegra_uart_hw_init(tup);
+ if (ret < 0) {
+ dev_err(u->dev, "Uart HW init failed, err = %d\n", ret);
+ goto fail_hw_init;
+ }
+
+ ret = request_irq(u->irq, tegra_uart_isr, IRQF_DISABLED,
+ dev_name(u->dev), tup);
+ if (ret < 0) {
+ dev_err(u->dev, "Failed to register ISR for IRQ %d\n", u->irq);
+ goto fail_hw_init;
+ }
+ return 0;
+
+fail_hw_init:
+ tegra_uart_dma_channel_free(tup, true);
+fail_rx_dma:
+ tegra_uart_dma_channel_free(tup, false);
+ return ret;
+}
+
+static void tegra_uart_shutdown(struct uart_port *u)
+{
+ struct tegra_uart_port *tup = to_tegra_uport(u);
+
+ tegra_uart_hw_deinit(tup);
+
+ tup->rx_in_progress = 0;
+ tup->tx_in_progress = 0;
+
+ tegra_uart_dma_channel_free(tup, true);
+ tegra_uart_dma_channel_free(tup, false);
+ free_irq(u->irq, tup);
+}
+
+static void tegra_uart_enable_ms(struct uart_port *u)
+{
+ struct tegra_uart_port *tup = to_tegra_uport(u);
+
+ if (tup->enable_modem_interrupt) {
+ tup->ier_shadow |= UART_IER_MSI;
+ tegra_uart_write(tup, tup->ier_shadow, UART_IER);
+ }
+}
+
+static void tegra_uart_set_termios(struct uart_port *u,
+ struct ktermios *termios, struct ktermios *oldtermios)
+{
+ struct tegra_uart_port *tup = to_tegra_uport(u);
+ unsigned int baud;
+ unsigned long flags;
+ unsigned int lcr;
+ int symb_bit = 1;
+ struct clk *parent_clk = clk_get_parent(tup->uart_clk);
+ unsigned long parent_clk_rate = clk_get_rate(parent_clk);
+ int max_divider = (tup->cdata->support_clk_src_div) ? 0x7FFF : 0xFFFF;
+
+ max_divider *= 16;
+ spin_lock_irqsave(&u->lock, flags);
+
+ /* Changing configuration, it is safe to stop any rx now */
+ if (tup->rts_active)
+ set_rts(tup, false);
+
+ /* Clear all interrupts as configuration is going to be change */
+ tegra_uart_write(tup, tup->ier_shadow | UART_IER_RDI, UART_IER);
+ tegra_uart_read(tup, UART_IER);
+ tegra_uart_write(tup, 0, UART_IER);
+ tegra_uart_read(tup, UART_IER);
+
+ /* Parity */
+ lcr = tup->lcr_shadow;
+ lcr &= ~UART_LCR_PARITY;
+
+ /* CMSPAR isn't supported by this driver */
+ termios->c_cflag &= ~CMSPAR;
+
+ if ((termios->c_cflag & PARENB) == PARENB) {
+ symb_bit++;
+ if (termios->c_cflag & PARODD) {
+ lcr |= UART_LCR_PARITY;
+ lcr &= ~UART_LCR_EPAR;
+ lcr &= ~UART_LCR_SPAR;
+ } else {
+ lcr |= UART_LCR_PARITY;
+ lcr |= UART_LCR_EPAR;
+ lcr &= ~UART_LCR_SPAR;
+ }
+ }
+
+ lcr &= ~UART_LCR_WLEN8;
+ switch (termios->c_cflag & CSIZE) {
+ case CS5:
+ lcr |= UART_LCR_WLEN5;
+ symb_bit += 5;
+ break;
+ case CS6:
+ lcr |= UART_LCR_WLEN6;
+ symb_bit += 6;
+ break;
+ case CS7:
+ lcr |= UART_LCR_WLEN7;
+ symb_bit += 7;
+ break;
+ default:
+ lcr |= UART_LCR_WLEN8;
+ symb_bit += 8;
+ break;
+ }
+
+ /* Stop bits */
+ if (termios->c_cflag & CSTOPB) {
+ lcr |= UART_LCR_STOP;
+ symb_bit += 2;
+ } else {
+ lcr &= ~UART_LCR_STOP;
+ symb_bit++;
+ }
+
+ tegra_uart_write(tup, lcr, UART_LCR);
+ tup->lcr_shadow = lcr;
+ tup->symb_bit = symb_bit;
+
+ /* Baud rate. */
+ baud = uart_get_baud_rate(u, termios, oldtermios,
+ parent_clk_rate/max_divider,
+ parent_clk_rate/16);
+ spin_unlock_irqrestore(&u->lock, flags);
+ tegra_set_baudrate(tup, baud);
+ if (tty_termios_baud_rate(termios))
+ tty_termios_encode_baud_rate(termios, baud, baud);
+ spin_lock_irqsave(&u->lock, flags);
+
+ /* Flow control */
+ if (termios->c_cflag & CRTSCTS) {
+ tup->mcr_shadow |= TEGRA_UART_MCR_CTS_EN;
+ tup->mcr_shadow &= ~TEGRA_UART_MCR_RTS_EN;
+ tegra_uart_write(tup, tup->mcr_shadow, UART_MCR);
+ /* if top layer has asked to set rts active then do so here */
+ if (tup->rts_active)
+ set_rts(tup, true);
+ } else {
+ tup->mcr_shadow &= ~TEGRA_UART_MCR_CTS_EN;
+ tup->mcr_shadow &= ~TEGRA_UART_MCR_RTS_EN;
+ tegra_uart_write(tup, tup->mcr_shadow, UART_MCR);
+ }
+
+ /* update the port timeout based on new settings */
+ uart_update_timeout(u, termios->c_cflag, baud);
+
+ /* Make sure all write has completed */
+ tegra_uart_read(tup, UART_IER);
+
+ /* Reenable interrupt */
+ tegra_uart_write(tup, tup->ier_shadow, UART_IER);
+ tegra_uart_read(tup, UART_IER);
+
+ spin_unlock_irqrestore(&u->lock, flags);
+ return;
+}
+
+/*
+ * Flush any TX data submitted for DMA and PIO. Called when the
+ * TX circular buffer is reset.
+ */
+static void tegra_uart_flush_buffer(struct uart_port *u)
+{
+ struct tegra_uart_port *tup = to_tegra_uport(u);
+
+ tup->tx_bytes = 0;
+ if (tup->tx_dma_chan)
+ dmaengine_terminate_all(tup->tx_dma_chan);
+ return;
+}
+
+static const char *tegra_uart_type(struct uart_port *u)
+{
+ return TEGRA_UART_TYPE;
+}
+
+static struct uart_ops tegra_uart_ops = {
+ .tx_empty = tegra_uart_tx_empty,
+ .set_mctrl = tegra_uart_set_mctrl,
+ .get_mctrl = tegra_uart_get_mctrl,
+ .stop_tx = tegra_uart_stop_tx,
+ .start_tx = tegra_uart_start_tx,
+ .stop_rx = tegra_uart_stop_rx,
+ .flush_buffer = tegra_uart_flush_buffer,
+ .enable_ms = tegra_uart_enable_ms,
+ .break_ctl = tegra_uart_break_ctl,
+ .startup = tegra_uart_startup,
+ .shutdown = tegra_uart_shutdown,
+ .set_termios = tegra_uart_set_termios,
+ .type = tegra_uart_type,
+ .request_port = tegra_uart_request_port,
+ .release_port = tegra_uart_release_port,
+};
+
+static struct uart_driver tegra_uart_driver = {
+ .owner = THIS_MODULE,
+ .driver_name = "tegra_hsuart",
+ .dev_name = "ttyTHS",
+ .cons = 0,
+ .nr = TEGRA_UART_MAXIMUM,
+};
+
+static int tegra_uart_parse_dt(struct platform_device *pdev,
+ struct tegra_uart_port *tup)
+{
+ struct device_node *np = pdev->dev.of_node;
+ u32 of_dma[2];
+ int port;
+
+ if (of_property_read_u32_array(np, "nvidia,dma-request-selector",
+ of_dma, 2) >= 0) {
+ tup->dma_req_sel = of_dma[1];
+ } else {
+ dev_err(&pdev->dev, "missing dma requestor in device tree\n");
+ return -EINVAL;
+ }
+
+ port = of_alias_get_id(np, "serial");
+ if (port < 0) {
+ dev_err(&pdev->dev, "failed to get alias id, errno %d\n", port);
+ return port;
+ }
+ tup->uport.line = port;
+
+ tup->enable_modem_interrupt = of_property_read_bool(np,
+ "nvidia,enable-modem-interrupt");
+ return 0;
+}
+
+struct tegra_uart_chip_data tegra20_uart_chip_data = {
+ .tx_fifo_full_status = false,
+ .allow_txfifo_reset_fifo_mode = true,
+ .support_clk_src_div = false,
+};
+
+struct tegra_uart_chip_data tegra30_uart_chip_data = {
+ .tx_fifo_full_status = true,
+ .allow_txfifo_reset_fifo_mode = false,
+ .support_clk_src_div = true,
+};
+
+static struct of_device_id tegra_uart_of_match[] = {
+ {
+ .compatible = "nvidia,tegra30-hsuart",
+ .data = &tegra30_uart_chip_data,
+ }, {
+ .compatible = "nvidia,tegra20-hsuart",
+ .data = &tegra20_uart_chip_data,
+ }, {
+ },
+};
+MODULE_DEVICE_TABLE(of, tegra_uart_of_match);
+
+static int tegra_uart_probe(struct platform_device *pdev)
+{
+ struct tegra_uart_port *tup;
+ struct uart_port *u;
+ struct resource *resource;
+ int ret;
+ const struct tegra_uart_chip_data *cdata;
+ const struct of_device_id *match;
+
+ match = of_match_device(tegra_uart_of_match, &pdev->dev);
+ if (!match) {
+ dev_err(&pdev->dev, "Error: No device match found\n");
+ return -ENODEV;
+ }
+ cdata = match->data;
+
+ tup = devm_kzalloc(&pdev->dev, sizeof(*tup), GFP_KERNEL);
+ if (!tup) {
+ dev_err(&pdev->dev, "Failed to allocate memory for tup\n");
+ return -ENOMEM;
+ }
+
+ ret = tegra_uart_parse_dt(pdev, tup);
+ if (ret < 0)
+ return ret;
+
+ u = &tup->uport;
+ u->dev = &pdev->dev;
+ u->ops = &tegra_uart_ops;
+ u->type = PORT_TEGRA;
+ u->fifosize = 32;
+ tup->cdata = cdata;
+
+ platform_set_drvdata(pdev, tup);
+ resource = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!resource) {
+ dev_err(&pdev->dev, "No IO memory resource\n");
+ return -ENODEV;
+ }
+
+ u->mapbase = resource->start;
+ u->membase = devm_request_and_ioremap(&pdev->dev, resource);
+ if (!u->membase) {
+ dev_err(&pdev->dev, "memregion/iomap address req failed\n");
+ return -EADDRNOTAVAIL;
+ }
+
+ tup->uart_clk = devm_clk_get(&pdev->dev, NULL);
+ if (IS_ERR(tup->uart_clk)) {
+ dev_err(&pdev->dev, "Couldn't get the clock\n");
+ return PTR_ERR(tup->uart_clk);
+ }
+
+ u->iotype = UPIO_MEM32;
+ u->irq = platform_get_irq(pdev, 0);
+ u->regshift = 2;
+ ret = uart_add_one_port(&tegra_uart_driver, u);
+ if (ret < 0) {
+ dev_err(&pdev->dev, "Failed to add uart port, err %d\n", ret);
+ return ret;
+ }
+ return ret;
+}
+
+static int tegra_uart_remove(struct platform_device *pdev)
+{
+ struct tegra_uart_port *tup = platform_get_drvdata(pdev);
+ struct uart_port *u = &tup->uport;
+
+ uart_remove_one_port(&tegra_uart_driver, u);
+ return 0;
+}
+
+#ifdef CONFIG_PM_SLEEP
+static int tegra_uart_suspend(struct device *dev)
+{
+ struct tegra_uart_port *tup = dev_get_drvdata(dev);
+ struct uart_port *u = &tup->uport;
+
+ return uart_suspend_port(&tegra_uart_driver, u);
+}
+
+static int tegra_uart_resume(struct device *dev)
+{
+ struct tegra_uart_port *tup = dev_get_drvdata(dev);
+ struct uart_port *u = &tup->uport;
+
+ return uart_resume_port(&tegra_uart_driver, u);
+}
+#endif
+
+static const struct dev_pm_ops tegra_uart_pm_ops = {
+ SET_SYSTEM_SLEEP_PM_OPS(tegra_uart_suspend, tegra_uart_resume)
+};
+
+static struct platform_driver tegra_uart_platform_driver = {
+ .probe = tegra_uart_probe,
+ .remove = tegra_uart_remove,
+ .driver = {
+ .name = "serial-tegra",
+ .of_match_table = tegra_uart_of_match,
+ .pm = &tegra_uart_pm_ops,
+ },
+};
+
+static int __init tegra_uart_init(void)
+{
+ int ret;
+
+ ret = uart_register_driver(&tegra_uart_driver);
+ if (ret < 0) {
+ pr_err("Could not register %s driver\n",
+ tegra_uart_driver.driver_name);
+ return ret;
+ }
+
+ ret = platform_driver_register(&tegra_uart_platform_driver);
+ if (ret < 0) {
+ pr_err("Uart platfrom driver register failed, e = %d\n", ret);
+ uart_unregister_driver(&tegra_uart_driver);
+ return ret;
+ }
+ return 0;
+}
+
+static void __exit tegra_uart_exit(void)
+{
+ pr_info("Unloading tegra uart driver\n");
+ platform_driver_unregister(&tegra_uart_platform_driver);
+ uart_unregister_driver(&tegra_uart_driver);
+}
+
+module_init(tegra_uart_init);
+module_exit(tegra_uart_exit);
+
+MODULE_ALIAS("platform:serial-tegra");
+MODULE_DESCRIPTION("High speed UART driver for tegra chipset");
+MODULE_AUTHOR("Laxman Dewangan <ldewangan@nvidia.com>");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/tty/serial/serial_core.c b/drivers/tty/serial/serial_core.c
index 2c7230aaefd4..a400002dfa84 100644
--- a/drivers/tty/serial/serial_core.c
+++ b/drivers/tty/serial/serial_core.c
@@ -59,7 +59,8 @@ static struct lock_class_key port_lock_key;
static void uart_change_speed(struct tty_struct *tty, struct uart_state *state,
struct ktermios *old_termios);
static void uart_wait_until_sent(struct tty_struct *tty, int timeout);
-static void uart_change_pm(struct uart_state *state, int pm_state);
+static void uart_change_pm(struct uart_state *state,
+ enum uart_pm_state pm_state);
static void uart_port_shutdown(struct tty_port *port);
@@ -866,9 +867,7 @@ static int uart_set_info(struct tty_struct *tty, struct tty_port *port,
port->closing_wait = closing_wait;
if (new_info->xmit_fifo_size)
uport->fifosize = new_info->xmit_fifo_size;
- if (port->tty)
- port->tty->low_latency =
- (uport->flags & UPF_LOW_LATENCY) ? 1 : 0;
+ port->low_latency = (uport->flags & UPF_LOW_LATENCY) ? 1 : 0;
check_and_exit:
retval = 0;
@@ -1308,9 +1307,10 @@ static void uart_set_termios(struct tty_struct *tty,
}
/*
- * In 2.4.5, calls to this will be serialized via the BKL in
- * linux/drivers/char/tty_io.c:tty_release()
- * linux/drivers/char/tty_io.c:do_tty_handup()
+ * Calls to uart_close() are serialised via the tty_lock in
+ * drivers/tty/tty_io.c:tty_release()
+ * drivers/tty/tty_io.c:do_tty_hangup()
+ * This runs from a workqueue and can sleep for a _short_ time only.
*/
static void uart_close(struct tty_struct *tty, struct file *filp)
{
@@ -1365,7 +1365,7 @@ static void uart_close(struct tty_struct *tty, struct file *filp)
spin_lock_irqsave(&port->lock, flags);
} else if (!uart_console(uport)) {
spin_unlock_irqrestore(&port->lock, flags);
- uart_change_pm(state, 3);
+ uart_change_pm(state, UART_PM_STATE_OFF);
spin_lock_irqsave(&port->lock, flags);
}
@@ -1437,10 +1437,9 @@ static void uart_wait_until_sent(struct tty_struct *tty, int timeout)
}
/*
- * This is called with the BKL held in
- * linux/drivers/char/tty_io.c:do_tty_hangup()
- * We're called from the eventd thread, so we can sleep for
- * a _short_ time only.
+ * Calls to uart_hangup() are serialised by the tty_lock in
+ * drivers/tty/tty_io.c:do_tty_hangup()
+ * This runs from a workqueue and can sleep for a _short_ time only.
*/
static void uart_hangup(struct tty_struct *tty)
{
@@ -1521,8 +1520,8 @@ static void uart_dtr_rts(struct tty_port *port, int onoff)
}
/*
- * calls to uart_open are serialised by the BKL in
- * fs/char_dev.c:chrdev_open()
+ * Calls to uart_open are serialised by the tty_lock in
+ * drivers/tty/tty_io.c:tty_open()
* Note that if this fails, then uart_close() _will_ be called.
*
* In time, we want to scrap the "opening nonpresent ports"
@@ -1564,7 +1563,8 @@ static int uart_open(struct tty_struct *tty, struct file *filp)
*/
tty->driver_data = state;
state->uart_port->state = state;
- tty->low_latency = (state->uart_port->flags & UPF_LOW_LATENCY) ? 1 : 0;
+ state->port.low_latency =
+ (state->uart_port->flags & UPF_LOW_LATENCY) ? 1 : 0;
tty_port_tty_set(port, tty);
/*
@@ -1579,7 +1579,7 @@ static int uart_open(struct tty_struct *tty, struct file *filp)
* Make sure the device is in D0 state.
*/
if (port->count == 1)
- uart_change_pm(state, 0);
+ uart_change_pm(state, UART_PM_STATE_ON);
/*
* Start up the serial port.
@@ -1620,7 +1620,7 @@ static void uart_line_info(struct seq_file *m, struct uart_driver *drv, int i)
{
struct uart_state *state = drv->state + i;
struct tty_port *port = &state->port;
- int pm_state;
+ enum uart_pm_state pm_state;
struct uart_port *uport = state->uart_port;
char stat_buf[32];
unsigned int status;
@@ -1645,12 +1645,12 @@ static void uart_line_info(struct seq_file *m, struct uart_driver *drv, int i)
if (capable(CAP_SYS_ADMIN)) {
mutex_lock(&port->mutex);
pm_state = state->pm_state;
- if (pm_state)
- uart_change_pm(state, 0);
+ if (pm_state != UART_PM_STATE_ON)
+ uart_change_pm(state, UART_PM_STATE_ON);
spin_lock_irq(&uport->lock);
status = uport->ops->get_mctrl(uport);
spin_unlock_irq(&uport->lock);
- if (pm_state)
+ if (pm_state != UART_PM_STATE_ON)
uart_change_pm(state, pm_state);
mutex_unlock(&port->mutex);
@@ -1897,7 +1897,8 @@ EXPORT_SYMBOL_GPL(uart_set_options);
*
* Locking: port->mutex has to be held
*/
-static void uart_change_pm(struct uart_state *state, int pm_state)
+static void uart_change_pm(struct uart_state *state,
+ enum uart_pm_state pm_state)
{
struct uart_port *port = state->uart_port;
@@ -1982,7 +1983,7 @@ int uart_suspend_port(struct uart_driver *drv, struct uart_port *uport)
console_stop(uport->cons);
if (console_suspend_enabled || !uart_console(uport))
- uart_change_pm(state, 3);
+ uart_change_pm(state, UART_PM_STATE_OFF);
mutex_unlock(&port->mutex);
@@ -2027,7 +2028,7 @@ int uart_resume_port(struct uart_driver *drv, struct uart_port *uport)
termios = port->tty->termios;
if (console_suspend_enabled)
- uart_change_pm(state, 0);
+ uart_change_pm(state, UART_PM_STATE_ON);
uport->ops->set_termios(uport, &termios, NULL);
if (console_suspend_enabled)
console_start(uport->cons);
@@ -2037,7 +2038,7 @@ int uart_resume_port(struct uart_driver *drv, struct uart_port *uport)
const struct uart_ops *ops = uport->ops;
int ret;
- uart_change_pm(state, 0);
+ uart_change_pm(state, UART_PM_STATE_ON);
spin_lock_irq(&uport->lock);
ops->set_mctrl(uport, 0);
spin_unlock_irq(&uport->lock);
@@ -2137,7 +2138,7 @@ uart_configure_port(struct uart_driver *drv, struct uart_state *state,
uart_report_port(drv, port);
/* Power up port for set_mctrl() */
- uart_change_pm(state, 0);
+ uart_change_pm(state, UART_PM_STATE_ON);
/*
* Ensure that the modem control lines are de-activated.
@@ -2161,7 +2162,7 @@ uart_configure_port(struct uart_driver *drv, struct uart_state *state,
* console if we have one.
*/
if (!uart_console(port))
- uart_change_pm(state, 3);
+ uart_change_pm(state, UART_PM_STATE_OFF);
}
}
@@ -2588,7 +2589,7 @@ int uart_add_one_port(struct uart_driver *drv, struct uart_port *uport)
}
state->uart_port = uport;
- state->pm_state = -1;
+ state->pm_state = UART_PM_STATE_UNDEFINED;
uport->cons = drv->cons;
uport->state = state;
@@ -2642,6 +2643,7 @@ int uart_remove_one_port(struct uart_driver *drv, struct uart_port *uport)
{
struct uart_state *state = drv->state + uport->line;
struct tty_port *port = &state->port;
+ int ret = 0;
BUG_ON(in_interrupt());
@@ -2656,6 +2658,11 @@ int uart_remove_one_port(struct uart_driver *drv, struct uart_port *uport)
* succeeding while we shut down the port.
*/
mutex_lock(&port->mutex);
+ if (!state->uart_port) {
+ mutex_unlock(&port->mutex);
+ ret = -EINVAL;
+ goto out;
+ }
uport->flags |= UPF_DEAD;
mutex_unlock(&port->mutex);
@@ -2679,9 +2686,10 @@ int uart_remove_one_port(struct uart_driver *drv, struct uart_port *uport)
uport->type = PORT_UNKNOWN;
state->uart_port = NULL;
+out:
mutex_unlock(&port_mutex);
- return 0;
+ return ret;
}
/*
@@ -2715,22 +2723,17 @@ EXPORT_SYMBOL(uart_match_port);
*/
void uart_handle_dcd_change(struct uart_port *uport, unsigned int status)
{
- struct uart_state *state = uport->state;
- struct tty_port *port = &state->port;
- struct tty_ldisc *ld = NULL;
- struct pps_event_time ts;
+ struct tty_port *port = &uport->state->port;
struct tty_struct *tty = port->tty;
+ struct tty_ldisc *ld = tty ? tty_ldisc_ref(tty) : NULL;
- if (tty)
- ld = tty_ldisc_ref(tty);
- if (ld && ld->ops->dcd_change)
- pps_get_ts(&ts);
+ if (ld) {
+ if (ld->ops->dcd_change)
+ ld->ops->dcd_change(tty, status);
+ tty_ldisc_deref(ld);
+ }
uport->icount.dcd++;
-#ifdef CONFIG_HARD_PPS
- if ((uport->flags & UPF_HARDPPS_CD) && status)
- hardpps();
-#endif
if (port->flags & ASYNC_CHECK_CD) {
if (status)
@@ -2738,11 +2741,6 @@ void uart_handle_dcd_change(struct uart_port *uport, unsigned int status)
else if (tty)
tty_hangup(tty);
}
-
- if (ld && ld->ops->dcd_change)
- ld->ops->dcd_change(tty, status, &ts);
- if (ld)
- tty_ldisc_deref(ld);
}
EXPORT_SYMBOL_GPL(uart_handle_dcd_change);
@@ -2790,10 +2788,10 @@ EXPORT_SYMBOL_GPL(uart_handle_cts_change);
void uart_insert_char(struct uart_port *port, unsigned int status,
unsigned int overrun, unsigned int ch, unsigned int flag)
{
- struct tty_struct *tty = port->state->port.tty;
+ struct tty_port *tport = &port->state->port;
if ((status & port->ignore_status_mask & ~overrun) == 0)
- if (tty_insert_flip_char(tty, ch, flag) == 0)
+ if (tty_insert_flip_char(tport, ch, flag) == 0)
++port->icount.buf_overrun;
/*
@@ -2801,7 +2799,7 @@ void uart_insert_char(struct uart_port *port, unsigned int status,
* it doesn't affect the current character.
*/
if (status & ~port->ignore_status_mask & overrun)
- if (tty_insert_flip_char(tty, 0, TTY_OVERRUN) == 0)
+ if (tty_insert_flip_char(tport, 0, TTY_OVERRUN) == 0)
++port->icount.buf_overrun;
}
EXPORT_SYMBOL_GPL(uart_insert_char);
diff --git a/drivers/tty/serial/serial_ks8695.c b/drivers/tty/serial/serial_ks8695.c
index 9bd004f9da89..e1caa99e3d3b 100644
--- a/drivers/tty/serial/serial_ks8695.c
+++ b/drivers/tty/serial/serial_ks8695.c
@@ -153,7 +153,6 @@ static void ks8695uart_disable_ms(struct uart_port *port)
static irqreturn_t ks8695uart_rx_chars(int irq, void *dev_id)
{
struct uart_port *port = dev_id;
- struct tty_struct *tty = port->state->port.tty;
unsigned int status, ch, lsr, flg, max_count = 256;
status = UART_GET_LSR(port); /* clears pending LSR interrupts */
@@ -200,7 +199,7 @@ static irqreturn_t ks8695uart_rx_chars(int irq, void *dev_id)
ignore_char:
status = UART_GET_LSR(port);
}
- tty_flip_buffer_push(tty);
+ tty_flip_buffer_push(&port->state->port);
return IRQ_HANDLED;
}
diff --git a/drivers/tty/serial/serial_txx9.c b/drivers/tty/serial/serial_txx9.c
index b52b21aeb250..fe48a0c2b4ca 100644
--- a/drivers/tty/serial/serial_txx9.c
+++ b/drivers/tty/serial/serial_txx9.c
@@ -277,7 +277,6 @@ static void serial_txx9_initialize(struct uart_port *port)
static inline void
receive_chars(struct uart_txx9_port *up, unsigned int *status)
{
- struct tty_struct *tty = up->port.state->port.tty;
unsigned char ch;
unsigned int disr = *status;
int max_count = 256;
@@ -346,7 +345,7 @@ receive_chars(struct uart_txx9_port *up, unsigned int *status)
disr = sio_in(up, TXX9_SIDISR);
} while (!(disr & TXX9_SIDISR_UVALID) && (max_count-- > 0));
spin_unlock(&up->port.lock);
- tty_flip_buffer_push(tty);
+ tty_flip_buffer_push(&up->port.state->port);
spin_lock(&up->port.lock);
*status = disr;
}
diff --git a/drivers/tty/serial/sh-sci.c b/drivers/tty/serial/sh-sci.c
index 61477567423f..156418619949 100644
--- a/drivers/tty/serial/sh-sci.c
+++ b/drivers/tty/serial/sh-sci.c
@@ -596,7 +596,7 @@ static void sci_transmit_chars(struct uart_port *port)
static void sci_receive_chars(struct uart_port *port)
{
struct sci_port *sci_port = to_sci_port(port);
- struct tty_struct *tty = port->state->port.tty;
+ struct tty_port *tport = &port->state->port;
int i, count, copied = 0;
unsigned short status;
unsigned char flag;
@@ -607,7 +607,7 @@ static void sci_receive_chars(struct uart_port *port)
while (1) {
/* Don't copy more bytes than there is room for in the buffer */
- count = tty_buffer_request_room(tty, sci_rxfill(port));
+ count = tty_buffer_request_room(tport, sci_rxfill(port));
/* If for any reason we can't copy more data, we're done! */
if (count == 0)
@@ -619,7 +619,7 @@ static void sci_receive_chars(struct uart_port *port)
sci_port->break_flag)
count = 0;
else
- tty_insert_flip_char(tty, c, TTY_NORMAL);
+ tty_insert_flip_char(tport, c, TTY_NORMAL);
} else {
for (i = 0; i < count; i++) {
char c = serial_port_in(port, SCxRDR);
@@ -661,7 +661,7 @@ static void sci_receive_chars(struct uart_port *port)
} else
flag = TTY_NORMAL;
- tty_insert_flip_char(tty, c, flag);
+ tty_insert_flip_char(tport, c, flag);
}
}
@@ -674,7 +674,7 @@ static void sci_receive_chars(struct uart_port *port)
if (copied) {
/* Tell the rest of the system the news. New characters! */
- tty_flip_buffer_push(tty);
+ tty_flip_buffer_push(tport);
} else {
serial_port_in(port, SCxSR); /* dummy read */
serial_port_out(port, SCxSR, SCxSR_RDxF_CLEAR(port));
@@ -720,7 +720,7 @@ static int sci_handle_errors(struct uart_port *port)
{
int copied = 0;
unsigned short status = serial_port_in(port, SCxSR);
- struct tty_struct *tty = port->state->port.tty;
+ struct tty_port *tport = &port->state->port;
struct sci_port *s = to_sci_port(port);
/*
@@ -731,7 +731,7 @@ static int sci_handle_errors(struct uart_port *port)
port->icount.overrun++;
/* overrun error */
- if (tty_insert_flip_char(tty, 0, TTY_OVERRUN))
+ if (tty_insert_flip_char(tport, 0, TTY_OVERRUN))
copied++;
dev_notice(port->dev, "overrun error");
@@ -755,7 +755,7 @@ static int sci_handle_errors(struct uart_port *port)
dev_dbg(port->dev, "BREAK detected\n");
- if (tty_insert_flip_char(tty, 0, TTY_BREAK))
+ if (tty_insert_flip_char(tport, 0, TTY_BREAK))
copied++;
}
@@ -763,7 +763,7 @@ static int sci_handle_errors(struct uart_port *port)
/* frame error */
port->icount.frame++;
- if (tty_insert_flip_char(tty, 0, TTY_FRAME))
+ if (tty_insert_flip_char(tport, 0, TTY_FRAME))
copied++;
dev_notice(port->dev, "frame error\n");
@@ -774,21 +774,21 @@ static int sci_handle_errors(struct uart_port *port)
/* parity error */
port->icount.parity++;
- if (tty_insert_flip_char(tty, 0, TTY_PARITY))
+ if (tty_insert_flip_char(tport, 0, TTY_PARITY))
copied++;
dev_notice(port->dev, "parity error");
}
if (copied)
- tty_flip_buffer_push(tty);
+ tty_flip_buffer_push(tport);
return copied;
}
static int sci_handle_fifo_overrun(struct uart_port *port)
{
- struct tty_struct *tty = port->state->port.tty;
+ struct tty_port *tport = &port->state->port;
struct sci_port *s = to_sci_port(port);
struct plat_sci_reg *reg;
int copied = 0;
@@ -802,8 +802,8 @@ static int sci_handle_fifo_overrun(struct uart_port *port)
port->icount.overrun++;
- tty_insert_flip_char(tty, 0, TTY_OVERRUN);
- tty_flip_buffer_push(tty);
+ tty_insert_flip_char(tport, 0, TTY_OVERRUN);
+ tty_flip_buffer_push(tport);
dev_notice(port->dev, "overrun error\n");
copied++;
@@ -816,7 +816,7 @@ static int sci_handle_breaks(struct uart_port *port)
{
int copied = 0;
unsigned short status = serial_port_in(port, SCxSR);
- struct tty_struct *tty = port->state->port.tty;
+ struct tty_port *tport = &port->state->port;
struct sci_port *s = to_sci_port(port);
if (uart_handle_break(port))
@@ -831,14 +831,14 @@ static int sci_handle_breaks(struct uart_port *port)
port->icount.brk++;
/* Notify of BREAK */
- if (tty_insert_flip_char(tty, 0, TTY_BREAK))
+ if (tty_insert_flip_char(tport, 0, TTY_BREAK))
copied++;
dev_dbg(port->dev, "BREAK detected\n");
}
if (copied)
- tty_flip_buffer_push(tty);
+ tty_flip_buffer_push(tport);
copied += sci_handle_fifo_overrun(port);
@@ -1259,13 +1259,13 @@ static void sci_dma_tx_complete(void *arg)
}
/* Locking: called with port lock held */
-static int sci_dma_rx_push(struct sci_port *s, struct tty_struct *tty,
- size_t count)
+static int sci_dma_rx_push(struct sci_port *s, size_t count)
{
struct uart_port *port = &s->port;
+ struct tty_port *tport = &port->state->port;
int i, active, room;
- room = tty_buffer_request_room(tty, count);
+ room = tty_buffer_request_room(tport, count);
if (s->active_rx == s->cookie_rx[0]) {
active = 0;
@@ -1283,7 +1283,7 @@ static int sci_dma_rx_push(struct sci_port *s, struct tty_struct *tty,
return room;
for (i = 0; i < room; i++)
- tty_insert_flip_char(tty, ((u8 *)sg_virt(&s->sg_rx[active]))[i],
+ tty_insert_flip_char(tport, ((u8 *)sg_virt(&s->sg_rx[active]))[i],
TTY_NORMAL);
port->icount.rx += room;
@@ -1295,7 +1295,6 @@ static void sci_dma_rx_complete(void *arg)
{
struct sci_port *s = arg;
struct uart_port *port = &s->port;
- struct tty_struct *tty = port->state->port.tty;
unsigned long flags;
int count;
@@ -1303,14 +1302,14 @@ static void sci_dma_rx_complete(void *arg)
spin_lock_irqsave(&port->lock, flags);
- count = sci_dma_rx_push(s, tty, s->buf_len_rx);
+ count = sci_dma_rx_push(s, s->buf_len_rx);
mod_timer(&s->rx_timer, jiffies + s->rx_timeout);
spin_unlock_irqrestore(&port->lock, flags);
if (count)
- tty_flip_buffer_push(tty);
+ tty_flip_buffer_push(&port->state->port);
schedule_work(&s->work_rx);
}
@@ -1404,7 +1403,6 @@ static void work_fn_rx(struct work_struct *work)
if (dma_async_is_tx_complete(s->chan_rx, s->active_rx, NULL, NULL) !=
DMA_SUCCESS) {
/* Handle incomplete DMA receive */
- struct tty_struct *tty = port->state->port.tty;
struct dma_chan *chan = s->chan_rx;
struct shdma_desc *sh_desc = container_of(desc,
struct shdma_desc, async_tx);
@@ -1416,11 +1414,11 @@ static void work_fn_rx(struct work_struct *work)
sh_desc->partial, sh_desc->cookie);
spin_lock_irqsave(&port->lock, flags);
- count = sci_dma_rx_push(s, tty, sh_desc->partial);
+ count = sci_dma_rx_push(s, sh_desc->partial);
spin_unlock_irqrestore(&port->lock, flags);
if (count)
- tty_flip_buffer_push(tty);
+ tty_flip_buffer_push(&port->state->port);
sci_submit_rx(s);
diff --git a/drivers/tty/serial/sirfsoc_uart.c b/drivers/tty/serial/sirfsoc_uart.c
index 5da5cb962769..6bbfe9934a4d 100644
--- a/drivers/tty/serial/sirfsoc_uart.c
+++ b/drivers/tty/serial/sirfsoc_uart.c
@@ -75,6 +75,20 @@ static struct sirfsoc_uart_port sirfsoc_uart_ports[SIRFSOC_UART_NR] = {
.line = 2,
},
},
+ [3] = {
+ .port = {
+ .iotype = UPIO_MEM,
+ .flags = UPF_BOOT_AUTOCONF,
+ .line = 3,
+ },
+ },
+ [4] = {
+ .port = {
+ .iotype = UPIO_MEM,
+ .flags = UPF_BOOT_AUTOCONF,
+ .line = 4,
+ },
+ },
};
static inline struct sirfsoc_uart_port *to_sirfport(struct uart_port *port)
@@ -192,11 +206,6 @@ static unsigned int
sirfsoc_uart_pio_rx_chars(struct uart_port *port, unsigned int max_rx_count)
{
unsigned int ch, rx_count = 0;
- struct tty_struct *tty;
-
- tty = tty_port_tty_get(&port->state->port);
- if (!tty)
- return -ENODEV;
while (!(rd_regl(port, SIRFUART_RX_FIFO_STATUS) &
SIRFUART_FIFOEMPTY_MASK(port))) {
@@ -210,8 +219,7 @@ sirfsoc_uart_pio_rx_chars(struct uart_port *port, unsigned int max_rx_count)
}
port->icount.rx += rx_count;
- tty_flip_buffer_push(tty);
- tty_kref_put(tty);
+ tty_flip_buffer_push(&port->state->port);
return rx_count;
}
@@ -245,6 +253,7 @@ static irqreturn_t sirfsoc_uart_isr(int irq, void *dev_id)
struct uart_port *port = &sirfport->port;
struct uart_state *state = port->state;
struct circ_buf *xmit = &port->state->xmit;
+ spin_lock(&port->lock);
intr_status = rd_regl(port, SIRFUART_INT_STATUS);
wr_regl(port, SIRFUART_INT_STATUS, intr_status);
intr_status &= rd_regl(port, SIRFUART_INT_EN);
@@ -254,6 +263,7 @@ static irqreturn_t sirfsoc_uart_isr(int irq, void *dev_id)
goto recv_char;
uart_insert_char(port, intr_status,
SIRFUART_RX_OFLOW, 0, TTY_BREAK);
+ spin_unlock(&port->lock);
return IRQ_HANDLED;
}
if (intr_status & SIRFUART_RX_OFLOW)
@@ -286,6 +296,7 @@ recv_char:
sirfsoc_uart_pio_rx_chars(port, SIRFSOC_UART_IO_RX_MAX_CNT);
if (intr_status & SIRFUART_TX_INT_EN) {
if (uart_circ_empty(xmit) || uart_tx_stopped(port)) {
+ spin_unlock(&port->lock);
return IRQ_HANDLED;
} else {
sirfsoc_uart_pio_tx_chars(sirfport,
@@ -296,6 +307,7 @@ recv_char:
sirfsoc_uart_stop_tx(port);
}
}
+ spin_unlock(&port->lock);
return IRQ_HANDLED;
}
@@ -345,7 +357,6 @@ static void sirfsoc_uart_set_termios(struct uart_port *port,
struct ktermios *old)
{
struct sirfsoc_uart_port *sirfport = to_sirfport(port);
- unsigned long ioclk_rate;
unsigned long config_reg = 0;
unsigned long baud_rate;
unsigned long setted_baud;
@@ -357,7 +368,6 @@ static void sirfsoc_uart_set_termios(struct uart_port *port,
int threshold_div;
int temp;
- ioclk_rate = 150000000;
switch (termios->c_cflag & CSIZE) {
default:
case CS8:
@@ -413,14 +423,17 @@ static void sirfsoc_uart_set_termios(struct uart_port *port,
sirfsoc_uart_disable_ms(port);
}
- /* common rate: fast calculation */
- for (ic = 0; ic < SIRF_BAUD_RATE_SUPPORT_NR; ic++)
- if (baud_rate == baudrate_to_regv[ic].baud_rate)
- clk_div_reg = baudrate_to_regv[ic].reg_val;
+ if (port->uartclk == 150000000) {
+ /* common rate: fast calculation */
+ for (ic = 0; ic < SIRF_BAUD_RATE_SUPPORT_NR; ic++)
+ if (baud_rate == baudrate_to_regv[ic].baud_rate)
+ clk_div_reg = baudrate_to_regv[ic].reg_val;
+ }
+
setted_baud = baud_rate;
/* arbitary rate setting */
if (unlikely(clk_div_reg == 0))
- clk_div_reg = sirfsoc_calc_sample_div(baud_rate, ioclk_rate,
+ clk_div_reg = sirfsoc_calc_sample_div(baud_rate, port->uartclk,
&setted_baud);
wr_regl(port, SIRFUART_DIVISOR, clk_div_reg);
@@ -679,6 +692,14 @@ int sirfsoc_uart_probe(struct platform_device *pdev)
goto err;
}
+ sirfport->clk = clk_get(&pdev->dev, NULL);
+ if (IS_ERR(sirfport->clk)) {
+ ret = PTR_ERR(sirfport->clk);
+ goto clk_err;
+ }
+ clk_prepare_enable(sirfport->clk);
+ port->uartclk = clk_get_rate(sirfport->clk);
+
port->ops = &sirfsoc_uart_ops;
spin_lock_init(&port->lock);
@@ -692,6 +713,9 @@ int sirfsoc_uart_probe(struct platform_device *pdev)
return 0;
port_err:
+ clk_disable_unprepare(sirfport->clk);
+ clk_put(sirfport->clk);
+clk_err:
platform_set_drvdata(pdev, NULL);
if (sirfport->hw_flow_ctrl)
pinctrl_put(sirfport->p);
@@ -706,6 +730,8 @@ static int sirfsoc_uart_remove(struct platform_device *pdev)
platform_set_drvdata(pdev, NULL);
if (sirfport->hw_flow_ctrl)
pinctrl_put(sirfport->p);
+ clk_disable_unprepare(sirfport->clk);
+ clk_put(sirfport->clk);
uart_remove_one_port(&sirfsoc_uart_drv, port);
return 0;
}
@@ -729,6 +755,7 @@ static int sirfsoc_uart_resume(struct platform_device *pdev)
static struct of_device_id sirfsoc_uart_ids[] = {
{ .compatible = "sirf,prima2-uart", },
+ { .compatible = "sirf,marco-uart", },
{}
};
MODULE_DEVICE_TABLE(of, sirfsoc_serial_of_match);
diff --git a/drivers/tty/serial/sirfsoc_uart.h b/drivers/tty/serial/sirfsoc_uart.h
index 6e207fdc2fed..85328ba0c4e3 100644
--- a/drivers/tty/serial/sirfsoc_uart.h
+++ b/drivers/tty/serial/sirfsoc_uart.h
@@ -139,7 +139,7 @@
#define SIRFSOC_UART_MINOR 0
#define SIRFUART_PORT_NAME "sirfsoc-uart"
#define SIRFUART_MAP_SIZE 0x200
-#define SIRFSOC_UART_NR 3
+#define SIRFSOC_UART_NR 5
#define SIRFSOC_PORT_TYPE 0xa5
/* Baud Rate Calculation */
@@ -163,6 +163,7 @@ struct sirfsoc_uart_port {
struct uart_port port;
struct pinctrl *p;
+ struct clk *clk;
};
/* Hardware Flow Control */
diff --git a/drivers/tty/serial/sn_console.c b/drivers/tty/serial/sn_console.c
index 1c6de9f58699..f51ffdc696fd 100644
--- a/drivers/tty/serial/sn_console.c
+++ b/drivers/tty/serial/sn_console.c
@@ -457,8 +457,8 @@ static int sn_debug_printf(const char *fmt, ...)
static void
sn_receive_chars(struct sn_cons_port *port, unsigned long flags)
{
+ struct tty_port *tport = NULL;
int ch;
- struct tty_struct *tty;
if (!port) {
printk(KERN_ERR "sn_receive_chars - port NULL so can't receive\n");
@@ -472,11 +472,7 @@ sn_receive_chars(struct sn_cons_port *port, unsigned long flags)
if (port->sc_port.state) {
/* The serial_core stuffs are initialized, use them */
- tty = port->sc_port.state->port.tty;
- }
- else {
- /* Not registered yet - can't pass to tty layer. */
- tty = NULL;
+ tport = &port->sc_port.state->port;
}
while (port->sc_ops->sal_input_pending()) {
@@ -516,15 +512,15 @@ sn_receive_chars(struct sn_cons_port *port, unsigned long flags)
#endif /* CONFIG_MAGIC_SYSRQ */
/* record the character to pass up to the tty layer */
- if (tty) {
- if(tty_insert_flip_char(tty, ch, TTY_NORMAL) == 0)
+ if (tport) {
+ if (tty_insert_flip_char(tport, ch, TTY_NORMAL) == 0)
break;
}
port->sc_port.icount.rx++;
}
- if (tty)
- tty_flip_buffer_push(tty);
+ if (tport)
+ tty_flip_buffer_push(tport);
}
/**
diff --git a/drivers/tty/serial/sunhv.c b/drivers/tty/serial/sunhv.c
index b9bf9c53f7fd..ba60708053e0 100644
--- a/drivers/tty/serial/sunhv.c
+++ b/drivers/tty/serial/sunhv.c
@@ -72,7 +72,7 @@ static void transmit_chars_write(struct uart_port *port, struct circ_buf *xmit)
}
}
-static int receive_chars_getchar(struct uart_port *port, struct tty_struct *tty)
+static int receive_chars_getchar(struct uart_port *port)
{
int saw_console_brk = 0;
int limit = 10000;
@@ -99,7 +99,7 @@ static int receive_chars_getchar(struct uart_port *port, struct tty_struct *tty)
uart_handle_dcd_change(port, 1);
}
- if (tty == NULL) {
+ if (port->state == NULL) {
uart_handle_sysrq_char(port, c);
continue;
}
@@ -109,13 +109,13 @@ static int receive_chars_getchar(struct uart_port *port, struct tty_struct *tty)
if (uart_handle_sysrq_char(port, c))
continue;
- tty_insert_flip_char(tty, c, TTY_NORMAL);
+ tty_insert_flip_char(&port->state->port, c, TTY_NORMAL);
}
return saw_console_brk;
}
-static int receive_chars_read(struct uart_port *port, struct tty_struct *tty)
+static int receive_chars_read(struct uart_port *port)
{
int saw_console_brk = 0;
int limit = 10000;
@@ -152,12 +152,13 @@ static int receive_chars_read(struct uart_port *port, struct tty_struct *tty)
for (i = 0; i < bytes_read; i++)
uart_handle_sysrq_char(port, con_read_page[i]);
- if (tty == NULL)
+ if (port->state == NULL)
continue;
port->icount.rx += bytes_read;
- tty_insert_flip_string(tty, con_read_page, bytes_read);
+ tty_insert_flip_string(&port->state->port, con_read_page,
+ bytes_read);
}
return saw_console_brk;
@@ -165,7 +166,7 @@ static int receive_chars_read(struct uart_port *port, struct tty_struct *tty)
struct sunhv_ops {
void (*transmit_chars)(struct uart_port *port, struct circ_buf *xmit);
- int (*receive_chars)(struct uart_port *port, struct tty_struct *tty);
+ int (*receive_chars)(struct uart_port *port);
};
static struct sunhv_ops bychar_ops = {
@@ -180,17 +181,17 @@ static struct sunhv_ops bywrite_ops = {
static struct sunhv_ops *sunhv_ops = &bychar_ops;
-static struct tty_struct *receive_chars(struct uart_port *port)
+static struct tty_port *receive_chars(struct uart_port *port)
{
- struct tty_struct *tty = NULL;
+ struct tty_port *tport = NULL;
if (port->state != NULL) /* Unopened serial console */
- tty = port->state->port.tty;
+ tport = &port->state->port;
- if (sunhv_ops->receive_chars(port, tty))
+ if (sunhv_ops->receive_chars(port))
sun_do_break();
- return tty;
+ return tport;
}
static void transmit_chars(struct uart_port *port)
@@ -213,16 +214,16 @@ static void transmit_chars(struct uart_port *port)
static irqreturn_t sunhv_interrupt(int irq, void *dev_id)
{
struct uart_port *port = dev_id;
- struct tty_struct *tty;
+ struct tty_port *tport;
unsigned long flags;
spin_lock_irqsave(&port->lock, flags);
- tty = receive_chars(port);
+ tport = receive_chars(port);
transmit_chars(port);
spin_unlock_irqrestore(&port->lock, flags);
- if (tty)
- tty_flip_buffer_push(tty);
+ if (tport)
+ tty_flip_buffer_push(tport);
return IRQ_HANDLED;
}
diff --git a/drivers/tty/serial/sunsab.c b/drivers/tty/serial/sunsab.c
index bd8b3b634103..8de2213664e0 100644
--- a/drivers/tty/serial/sunsab.c
+++ b/drivers/tty/serial/sunsab.c
@@ -107,11 +107,11 @@ static __inline__ void sunsab_cec_wait(struct uart_sunsab_port *up)
udelay(1);
}
-static struct tty_struct *
+static struct tty_port *
receive_chars(struct uart_sunsab_port *up,
union sab82532_irq_status *stat)
{
- struct tty_struct *tty = NULL;
+ struct tty_port *port = NULL;
unsigned char buf[32];
int saw_console_brk = 0;
int free_fifo = 0;
@@ -119,7 +119,7 @@ receive_chars(struct uart_sunsab_port *up,
int i;
if (up->port.state != NULL) /* Unopened serial console */
- tty = up->port.state->port.tty;
+ port = &up->port.state->port;
/* Read number of BYTES (Character + Status) available. */
if (stat->sreg.isr0 & SAB82532_ISR0_RPF) {
@@ -136,7 +136,7 @@ receive_chars(struct uart_sunsab_port *up,
if (stat->sreg.isr0 & SAB82532_ISR0_TIME) {
sunsab_cec_wait(up);
writeb(SAB82532_CMDR_RFRD, &up->regs->w.cmdr);
- return tty;
+ return port;
}
if (stat->sreg.isr0 & SAB82532_ISR0_RFO)
@@ -160,11 +160,6 @@ receive_chars(struct uart_sunsab_port *up,
for (i = 0; i < count; i++) {
unsigned char ch = buf[i], flag;
- if (tty == NULL) {
- uart_handle_sysrq_char(&up->port, ch);
- continue;
- }
-
flag = TTY_NORMAL;
up->port.icount.rx++;
@@ -213,15 +208,15 @@ receive_chars(struct uart_sunsab_port *up,
if ((stat->sreg.isr0 & (up->port.ignore_status_mask & 0xff)) == 0 &&
(stat->sreg.isr1 & ((up->port.ignore_status_mask >> 8) & 0xff)) == 0)
- tty_insert_flip_char(tty, ch, flag);
+ tty_insert_flip_char(port, ch, flag);
if (stat->sreg.isr0 & SAB82532_ISR0_RFO)
- tty_insert_flip_char(tty, 0, TTY_OVERRUN);
+ tty_insert_flip_char(port, 0, TTY_OVERRUN);
}
if (saw_console_brk)
sun_do_break();
- return tty;
+ return port;
}
static void sunsab_stop_tx(struct uart_port *);
@@ -304,7 +299,7 @@ static void check_status(struct uart_sunsab_port *up,
static irqreturn_t sunsab_interrupt(int irq, void *dev_id)
{
struct uart_sunsab_port *up = dev_id;
- struct tty_struct *tty;
+ struct tty_port *port = NULL;
union sab82532_irq_status status;
unsigned long flags;
unsigned char gis;
@@ -318,12 +313,11 @@ static irqreturn_t sunsab_interrupt(int irq, void *dev_id)
if (gis & 2)
status.sreg.isr1 = readb(&up->regs->r.isr1);
- tty = NULL;
if (status.stat) {
if ((status.sreg.isr0 & (SAB82532_ISR0_TCD | SAB82532_ISR0_TIME |
SAB82532_ISR0_RFO | SAB82532_ISR0_RPF)) ||
(status.sreg.isr1 & SAB82532_ISR1_BRK))
- tty = receive_chars(up, &status);
+ port = receive_chars(up, &status);
if ((status.sreg.isr0 & SAB82532_ISR0_CDSC) ||
(status.sreg.isr1 & SAB82532_ISR1_CSC))
check_status(up, &status);
@@ -333,8 +327,8 @@ static irqreturn_t sunsab_interrupt(int irq, void *dev_id)
spin_unlock_irqrestore(&up->port.lock, flags);
- if (tty)
- tty_flip_buffer_push(tty);
+ if (port)
+ tty_flip_buffer_push(port);
return IRQ_HANDLED;
}
diff --git a/drivers/tty/serial/sunsu.c b/drivers/tty/serial/sunsu.c
index 220da3f9724f..e343d6670854 100644
--- a/drivers/tty/serial/sunsu.c
+++ b/drivers/tty/serial/sunsu.c
@@ -315,10 +315,10 @@ static void sunsu_enable_ms(struct uart_port *port)
spin_unlock_irqrestore(&up->port.lock, flags);
}
-static struct tty_struct *
+static void
receive_chars(struct uart_sunsu_port *up, unsigned char *status)
{
- struct tty_struct *tty = up->port.state->port.tty;
+ struct tty_port *port = &up->port.state->port;
unsigned char ch, flag;
int max_count = 256;
int saw_console_brk = 0;
@@ -376,22 +376,20 @@ receive_chars(struct uart_sunsu_port *up, unsigned char *status)
if (uart_handle_sysrq_char(&up->port, ch))
goto ignore_char;
if ((*status & up->port.ignore_status_mask) == 0)
- tty_insert_flip_char(tty, ch, flag);
+ tty_insert_flip_char(port, ch, flag);
if (*status & UART_LSR_OE)
/*
* Overrun is special, since it's reported
* immediately, and doesn't affect the current
* character.
*/
- tty_insert_flip_char(tty, 0, TTY_OVERRUN);
+ tty_insert_flip_char(port, 0, TTY_OVERRUN);
ignore_char:
*status = serial_inp(up, UART_LSR);
} while ((*status & UART_LSR_DR) && (max_count-- > 0));
if (saw_console_brk)
sun_do_break();
-
- return tty;
}
static void transmit_chars(struct uart_sunsu_port *up)
@@ -460,20 +458,16 @@ static irqreturn_t sunsu_serial_interrupt(int irq, void *dev_id)
spin_lock_irqsave(&up->port.lock, flags);
do {
- struct tty_struct *tty;
-
status = serial_inp(up, UART_LSR);
- tty = NULL;
if (status & UART_LSR_DR)
- tty = receive_chars(up, &status);
+ receive_chars(up, &status);
check_modem_status(up);
if (status & UART_LSR_THRE)
transmit_chars(up);
spin_unlock_irqrestore(&up->port.lock, flags);
- if (tty)
- tty_flip_buffer_push(tty);
+ tty_flip_buffer_push(&up->port.state->port);
spin_lock_irqsave(&up->port.lock, flags);
diff --git a/drivers/tty/serial/sunzilog.c b/drivers/tty/serial/sunzilog.c
index aef4fab957c3..27669ff3d446 100644
--- a/drivers/tty/serial/sunzilog.c
+++ b/drivers/tty/serial/sunzilog.c
@@ -323,17 +323,15 @@ static void sunzilog_kbdms_receive_chars(struct uart_sunzilog_port *up,
}
}
-static struct tty_struct *
+static struct tty_port *
sunzilog_receive_chars(struct uart_sunzilog_port *up,
struct zilog_channel __iomem *channel)
{
- struct tty_struct *tty;
+ struct tty_port *port = NULL;
unsigned char ch, r1, flag;
- tty = NULL;
- if (up->port.state != NULL && /* Unopened serial console */
- up->port.state->port.tty != NULL) /* Keyboard || mouse */
- tty = up->port.state->port.tty;
+ if (up->port.state != NULL) /* Unopened serial console */
+ port = &up->port.state->port;
for (;;) {
@@ -366,11 +364,6 @@ sunzilog_receive_chars(struct uart_sunzilog_port *up,
continue;
}
- if (tty == NULL) {
- uart_handle_sysrq_char(&up->port, ch);
- continue;
- }
-
/* A real serial line, record the character and status. */
flag = TTY_NORMAL;
up->port.icount.rx++;
@@ -400,13 +393,13 @@ sunzilog_receive_chars(struct uart_sunzilog_port *up,
if (up->port.ignore_status_mask == 0xff ||
(r1 & up->port.ignore_status_mask) == 0) {
- tty_insert_flip_char(tty, ch, flag);
+ tty_insert_flip_char(port, ch, flag);
}
if (r1 & Rx_OVR)
- tty_insert_flip_char(tty, 0, TTY_OVERRUN);
+ tty_insert_flip_char(port, 0, TTY_OVERRUN);
}
- return tty;
+ return port;
}
static void sunzilog_status_handle(struct uart_sunzilog_port *up,
@@ -539,21 +532,21 @@ static irqreturn_t sunzilog_interrupt(int irq, void *dev_id)
while (up) {
struct zilog_channel __iomem *channel
= ZILOG_CHANNEL_FROM_PORT(&up->port);
- struct tty_struct *tty;
+ struct tty_port *port;
unsigned char r3;
spin_lock(&up->port.lock);
r3 = read_zsreg(channel, R3);
/* Channel A */
- tty = NULL;
+ port = NULL;
if (r3 & (CHAEXT | CHATxIP | CHARxIP)) {
writeb(RES_H_IUS, &channel->control);
ZSDELAY();
ZS_WSYNC(channel);
if (r3 & CHARxIP)
- tty = sunzilog_receive_chars(up, channel);
+ port = sunzilog_receive_chars(up, channel);
if (r3 & CHAEXT)
sunzilog_status_handle(up, channel);
if (r3 & CHATxIP)
@@ -561,22 +554,22 @@ static irqreturn_t sunzilog_interrupt(int irq, void *dev_id)
}
spin_unlock(&up->port.lock);
- if (tty)
- tty_flip_buffer_push(tty);
+ if (port)
+ tty_flip_buffer_push(port);
/* Channel B */
up = up->next;
channel = ZILOG_CHANNEL_FROM_PORT(&up->port);
spin_lock(&up->port.lock);
- tty = NULL;
+ port = NULL;
if (r3 & (CHBEXT | CHBTxIP | CHBRxIP)) {
writeb(RES_H_IUS, &channel->control);
ZSDELAY();
ZS_WSYNC(channel);
if (r3 & CHBRxIP)
- tty = sunzilog_receive_chars(up, channel);
+ port = sunzilog_receive_chars(up, channel);
if (r3 & CHBEXT)
sunzilog_status_handle(up, channel);
if (r3 & CHBTxIP)
@@ -584,8 +577,8 @@ static irqreturn_t sunzilog_interrupt(int irq, void *dev_id)
}
spin_unlock(&up->port.lock);
- if (tty)
- tty_flip_buffer_push(tty);
+ if (port)
+ tty_flip_buffer_push(port);
up = up->next;
}
diff --git a/drivers/tty/serial/timbuart.c b/drivers/tty/serial/timbuart.c
index 5be0d68feceb..6818410a2bea 100644
--- a/drivers/tty/serial/timbuart.c
+++ b/drivers/tty/serial/timbuart.c
@@ -91,16 +91,16 @@ static void timbuart_flush_buffer(struct uart_port *port)
static void timbuart_rx_chars(struct uart_port *port)
{
- struct tty_struct *tty = port->state->port.tty;
+ struct tty_port *tport = &port->state->port;
while (ioread32(port->membase + TIMBUART_ISR) & RXDP) {
u8 ch = ioread8(port->membase + TIMBUART_RXFIFO);
port->icount.rx++;
- tty_insert_flip_char(tty, ch, TTY_NORMAL);
+ tty_insert_flip_char(tport, ch, TTY_NORMAL);
}
spin_unlock(&port->lock);
- tty_flip_buffer_push(port->state->port.tty);
+ tty_flip_buffer_push(tport);
spin_lock(&port->lock);
dev_dbg(port->dev, "%s - total read %d bytes\n",
diff --git a/drivers/tty/serial/uartlite.c b/drivers/tty/serial/uartlite.c
index 89eee43c4e2d..5f90ef24d475 100644
--- a/drivers/tty/serial/uartlite.c
+++ b/drivers/tty/serial/uartlite.c
@@ -19,7 +19,7 @@
#include <linux/delay.h>
#include <linux/interrupt.h>
#include <linux/init.h>
-#include <asm/io.h>
+#include <linux/io.h>
#include <linux/of.h>
#include <linux/of_address.h>
#include <linux/of_device.h>
@@ -34,7 +34,7 @@
* Register definitions
*
* For register details see datasheet:
- * http://www.xilinx.com/support/documentation/ip_documentation/opb_uartlite.pdf
+ * http://www.xilinx.com/support/documentation/ip_documentation/opb_uartlite.pdf
*/
#define ULITE_RX 0x00
@@ -57,6 +57,54 @@
#define ULITE_CONTROL_RST_RX 0x02
#define ULITE_CONTROL_IE 0x10
+struct uartlite_reg_ops {
+ u32 (*in)(void __iomem *addr);
+ void (*out)(u32 val, void __iomem *addr);
+};
+
+static u32 uartlite_inbe32(void __iomem *addr)
+{
+ return ioread32be(addr);
+}
+
+static void uartlite_outbe32(u32 val, void __iomem *addr)
+{
+ iowrite32be(val, addr);
+}
+
+static struct uartlite_reg_ops uartlite_be = {
+ .in = uartlite_inbe32,
+ .out = uartlite_outbe32,
+};
+
+static u32 uartlite_inle32(void __iomem *addr)
+{
+ return ioread32(addr);
+}
+
+static void uartlite_outle32(u32 val, void __iomem *addr)
+{
+ iowrite32(val, addr);
+}
+
+static struct uartlite_reg_ops uartlite_le = {
+ .in = uartlite_inle32,
+ .out = uartlite_outle32,
+};
+
+static inline u32 uart_in32(u32 offset, struct uart_port *port)
+{
+ struct uartlite_reg_ops *reg_ops = port->private_data;
+
+ return reg_ops->in(port->membase + offset);
+}
+
+static inline void uart_out32(u32 val, u32 offset, struct uart_port *port)
+{
+ struct uartlite_reg_ops *reg_ops = port->private_data;
+
+ reg_ops->out(val, port->membase + offset);
+}
static struct uart_port ulite_ports[ULITE_NR_UARTS];
@@ -66,7 +114,7 @@ static struct uart_port ulite_ports[ULITE_NR_UARTS];
static int ulite_receive(struct uart_port *port, int stat)
{
- struct tty_struct *tty = port->state->port.tty;
+ struct tty_port *tport = &port->state->port;
unsigned char ch = 0;
char flag = TTY_NORMAL;
@@ -77,7 +125,7 @@ static int ulite_receive(struct uart_port *port, int stat)
/* stats */
if (stat & ULITE_STATUS_RXVALID) {
port->icount.rx++;
- ch = ioread32be(port->membase + ULITE_RX);
+ ch = uart_in32(ULITE_RX, port);
if (stat & ULITE_STATUS_PARITY)
port->icount.parity++;
@@ -103,13 +151,13 @@ static int ulite_receive(struct uart_port *port, int stat)
stat &= ~port->ignore_status_mask;
if (stat & ULITE_STATUS_RXVALID)
- tty_insert_flip_char(tty, ch, flag);
+ tty_insert_flip_char(tport, ch, flag);
if (stat & ULITE_STATUS_FRAME)
- tty_insert_flip_char(tty, 0, TTY_FRAME);
+ tty_insert_flip_char(tport, 0, TTY_FRAME);
if (stat & ULITE_STATUS_OVERRUN)
- tty_insert_flip_char(tty, 0, TTY_OVERRUN);
+ tty_insert_flip_char(tport, 0, TTY_OVERRUN);
return 1;
}
@@ -122,7 +170,7 @@ static int ulite_transmit(struct uart_port *port, int stat)
return 0;
if (port->x_char) {
- iowrite32be(port->x_char, port->membase + ULITE_TX);
+ uart_out32(port->x_char, ULITE_TX, port);
port->x_char = 0;
port->icount.tx++;
return 1;
@@ -131,7 +179,7 @@ static int ulite_transmit(struct uart_port *port, int stat)
if (uart_circ_empty(xmit) || uart_tx_stopped(port))
return 0;
- iowrite32be(xmit->buf[xmit->tail], port->membase + ULITE_TX);
+ uart_out32(xmit->buf[xmit->tail], ULITE_TX, port);
xmit->tail = (xmit->tail + 1) & (UART_XMIT_SIZE-1);
port->icount.tx++;
@@ -148,7 +196,7 @@ static irqreturn_t ulite_isr(int irq, void *dev_id)
int busy, n = 0;
do {
- int stat = ioread32be(port->membase + ULITE_STATUS);
+ int stat = uart_in32(ULITE_STATUS, port);
busy = ulite_receive(port, stat);
busy |= ulite_transmit(port, stat);
n++;
@@ -156,7 +204,7 @@ static irqreturn_t ulite_isr(int irq, void *dev_id)
/* work done? */
if (n > 1) {
- tty_flip_buffer_push(port->state->port.tty);
+ tty_flip_buffer_push(&port->state->port);
return IRQ_HANDLED;
} else {
return IRQ_NONE;
@@ -169,7 +217,7 @@ static unsigned int ulite_tx_empty(struct uart_port *port)
unsigned int ret;
spin_lock_irqsave(&port->lock, flags);
- ret = ioread32be(port->membase + ULITE_STATUS);
+ ret = uart_in32(ULITE_STATUS, port);
spin_unlock_irqrestore(&port->lock, flags);
return ret & ULITE_STATUS_TXEMPTY ? TIOCSER_TEMT : 0;
@@ -192,7 +240,7 @@ static void ulite_stop_tx(struct uart_port *port)
static void ulite_start_tx(struct uart_port *port)
{
- ulite_transmit(port, ioread32be(port->membase + ULITE_STATUS));
+ ulite_transmit(port, uart_in32(ULITE_STATUS, port));
}
static void ulite_stop_rx(struct uart_port *port)
@@ -220,17 +268,17 @@ static int ulite_startup(struct uart_port *port)
if (ret)
return ret;
- iowrite32be(ULITE_CONTROL_RST_RX | ULITE_CONTROL_RST_TX,
- port->membase + ULITE_CONTROL);
- iowrite32be(ULITE_CONTROL_IE, port->membase + ULITE_CONTROL);
+ uart_out32(ULITE_CONTROL_RST_RX | ULITE_CONTROL_RST_TX,
+ ULITE_CONTROL, port);
+ uart_out32(ULITE_CONTROL_IE, ULITE_CONTROL, port);
return 0;
}
static void ulite_shutdown(struct uart_port *port)
{
- iowrite32be(0, port->membase + ULITE_CONTROL);
- ioread32be(port->membase + ULITE_CONTROL); /* dummy */
+ uart_out32(0, ULITE_CONTROL, port);
+ uart_in32(ULITE_CONTROL, port); /* dummy */
free_irq(port->irq, port);
}
@@ -281,6 +329,8 @@ static void ulite_release_port(struct uart_port *port)
static int ulite_request_port(struct uart_port *port)
{
+ int ret;
+
pr_debug("ulite console: port=%p; port->mapbase=%llx\n",
port, (unsigned long long) port->mapbase);
@@ -296,6 +346,14 @@ static int ulite_request_port(struct uart_port *port)
return -EBUSY;
}
+ port->private_data = &uartlite_be;
+ ret = uart_in32(ULITE_CONTROL, port);
+ uart_out32(ULITE_CONTROL_RST_TX, ULITE_CONTROL, port);
+ ret = uart_in32(ULITE_STATUS, port);
+ /* Endianess detection */
+ if ((ret & ULITE_STATUS_TXEMPTY) != ULITE_STATUS_TXEMPTY)
+ port->private_data = &uartlite_le;
+
return 0;
}
@@ -314,20 +372,19 @@ static int ulite_verify_port(struct uart_port *port, struct serial_struct *ser)
#ifdef CONFIG_CONSOLE_POLL
static int ulite_get_poll_char(struct uart_port *port)
{
- if (!(ioread32be(port->membase + ULITE_STATUS)
- & ULITE_STATUS_RXVALID))
+ if (!(uart_in32(ULITE_STATUS, port) & ULITE_STATUS_RXVALID))
return NO_POLL_CHAR;
- return ioread32be(port->membase + ULITE_RX);
+ return uart_in32(ULITE_RX, port);
}
static void ulite_put_poll_char(struct uart_port *port, unsigned char ch)
{
- while (ioread32be(port->membase + ULITE_STATUS) & ULITE_STATUS_TXFULL)
+ while (uart_in32(ULITE_STATUS, port) & ULITE_STATUS_TXFULL)
cpu_relax();
/* write char to device */
- iowrite32be(ch, port->membase + ULITE_TX);
+ uart_out32(ch, ULITE_TX, port);
}
#endif
@@ -366,7 +423,7 @@ static void ulite_console_wait_tx(struct uart_port *port)
/* Spin waiting for TX fifo to have space available */
for (i = 0; i < 100000; i++) {
- val = ioread32be(port->membase + ULITE_STATUS);
+ val = uart_in32(ULITE_STATUS, port);
if ((val & ULITE_STATUS_TXFULL) == 0)
break;
cpu_relax();
@@ -376,7 +433,7 @@ static void ulite_console_wait_tx(struct uart_port *port)
static void ulite_console_putchar(struct uart_port *port, int ch)
{
ulite_console_wait_tx(port);
- iowrite32be(ch, port->membase + ULITE_TX);
+ uart_out32(ch, ULITE_TX, port);
}
static void ulite_console_write(struct console *co, const char *s,
@@ -393,8 +450,8 @@ static void ulite_console_write(struct console *co, const char *s,
spin_lock_irqsave(&port->lock, flags);
/* save and disable interrupt */
- ier = ioread32be(port->membase + ULITE_STATUS) & ULITE_STATUS_IE;
- iowrite32be(0, port->membase + ULITE_CONTROL);
+ ier = uart_in32(ULITE_STATUS, port) & ULITE_STATUS_IE;
+ uart_out32(0, ULITE_CONTROL, port);
uart_console_write(port, s, count, ulite_console_putchar);
@@ -402,7 +459,7 @@ static void ulite_console_write(struct console *co, const char *s,
/* restore interrupt state */
if (ier)
- iowrite32be(ULITE_CONTROL_IE, port->membase + ULITE_CONTROL);
+ uart_out32(ULITE_CONTROL_IE, ULITE_CONTROL, port);
if (locked)
spin_unlock_irqrestore(&port->lock, flags);
@@ -615,7 +672,7 @@ static struct platform_driver ulite_platform_driver = {
* Module setup/teardown
*/
-int __init ulite_init(void)
+static int __init ulite_init(void)
{
int ret;
@@ -634,11 +691,11 @@ int __init ulite_init(void)
err_plat:
uart_unregister_driver(&ulite_uart_driver);
err_uart:
- printk(KERN_ERR "registering uartlite driver failed: err=%i", ret);
+ pr_err("registering uartlite driver failed: err=%i", ret);
return ret;
}
-void __exit ulite_exit(void)
+static void __exit ulite_exit(void)
{
platform_driver_unregister(&ulite_platform_driver);
uart_unregister_driver(&ulite_uart_driver);
diff --git a/drivers/tty/serial/ucc_uart.c b/drivers/tty/serial/ucc_uart.c
index f99b0c965f85..7355303dad99 100644
--- a/drivers/tty/serial/ucc_uart.c
+++ b/drivers/tty/serial/ucc_uart.c
@@ -469,7 +469,7 @@ static void qe_uart_int_rx(struct uart_qe_port *qe_port)
int i;
unsigned char ch, *cp;
struct uart_port *port = &qe_port->port;
- struct tty_struct *tty = port->state->port.tty;
+ struct tty_port *tport = &port->state->port;
struct qe_bd *bdp;
u16 status;
unsigned int flg;
@@ -491,7 +491,7 @@ static void qe_uart_int_rx(struct uart_qe_port *qe_port)
/* If we don't have enough room in RX buffer for the entire BD,
* then we try later, which will be the next RX interrupt.
*/
- if (tty_buffer_request_room(tty, i) < i) {
+ if (tty_buffer_request_room(tport, i) < i) {
dev_dbg(port->dev, "ucc-uart: no room in RX buffer\n");
return;
}
@@ -512,7 +512,7 @@ static void qe_uart_int_rx(struct uart_qe_port *qe_port)
continue;
error_return:
- tty_insert_flip_char(tty, ch, flg);
+ tty_insert_flip_char(tport, ch, flg);
}
@@ -530,7 +530,7 @@ error_return:
qe_port->rx_cur = bdp;
/* Activate BH processing */
- tty_flip_buffer_push(tty);
+ tty_flip_buffer_push(tport);
return;
@@ -560,7 +560,7 @@ handle_error:
/* Overrun does not affect the current character ! */
if (status & BD_SC_OV)
- tty_insert_flip_char(tty, 0, TTY_OVERRUN);
+ tty_insert_flip_char(tport, 0, TTY_OVERRUN);
#ifdef SUPPORT_SYSRQ
port->sysrq = 0;
#endif
diff --git a/drivers/tty/serial/vr41xx_siu.c b/drivers/tty/serial/vr41xx_siu.c
index 62ee0166bc65..f655997f44af 100644
--- a/drivers/tty/serial/vr41xx_siu.c
+++ b/drivers/tty/serial/vr41xx_siu.c
@@ -313,12 +313,10 @@ static void siu_break_ctl(struct uart_port *port, int ctl)
static inline void receive_chars(struct uart_port *port, uint8_t *status)
{
- struct tty_struct *tty;
uint8_t lsr, ch;
char flag;
int max_count = RX_MAX_COUNT;
- tty = port->state->port.tty;
lsr = *status;
do {
@@ -365,7 +363,7 @@ static inline void receive_chars(struct uart_port *port, uint8_t *status)
lsr = siu_read(port, UART_LSR);
} while ((lsr & UART_LSR_DR) && (max_count-- > 0));
- tty_flip_buffer_push(tty);
+ tty_flip_buffer_push(&port->state->port);
*status = lsr;
}
diff --git a/drivers/tty/serial/vt8500_serial.c b/drivers/tty/serial/vt8500_serial.c
index d5ed9f613005..a3f9dd5c9dff 100644
--- a/drivers/tty/serial/vt8500_serial.c
+++ b/drivers/tty/serial/vt8500_serial.c
@@ -136,22 +136,14 @@ static void vt8500_enable_ms(struct uart_port *port)
static void handle_rx(struct uart_port *port)
{
- struct tty_struct *tty = tty_port_tty_get(&port->state->port);
- if (!tty) {
- /* Discard data: no tty available */
- int count = (vt8500_read(port, VT8500_URFIDX) & 0x1f00) >> 8;
- u16 ch;
- while (count--)
- ch = readw(port->membase + VT8500_RXFIFO);
- return;
- }
+ struct tty_port *tport = &port->state->port;
/*
* Handle overrun
*/
if ((vt8500_read(port, VT8500_URISR) & RXOVER)) {
port->icount.overrun++;
- tty_insert_flip_char(tty, 0, TTY_OVERRUN);
+ tty_insert_flip_char(tport, 0, TTY_OVERRUN);
}
/* and now the main RX loop */
@@ -174,11 +166,10 @@ static void handle_rx(struct uart_port *port)
port->icount.rx++;
if (!uart_handle_sysrq_char(port, c))
- tty_insert_flip_char(tty, c, flag);
+ tty_insert_flip_char(tport, c, flag);
}
- tty_flip_buffer_push(tty);
- tty_kref_put(tty);
+ tty_flip_buffer_push(tport);
}
static void handle_tx(struct uart_port *port)
@@ -569,7 +560,7 @@ static int vt8500_serial_probe(struct platform_device *pdev)
if (np)
port = of_alias_get_id(np, "serial");
- if (port > VT8500_MAX_PORTS)
+ if (port >= VT8500_MAX_PORTS)
port = -1;
else
port = -1;
@@ -580,7 +571,7 @@ static int vt8500_serial_probe(struct platform_device *pdev)
sizeof(vt8500_ports_in_use));
}
- if (port > VT8500_MAX_PORTS)
+ if (port >= VT8500_MAX_PORTS)
return -ENODEV;
/* reserve the port id */
@@ -589,10 +580,27 @@ static int vt8500_serial_probe(struct platform_device *pdev)
return -EBUSY;
}
- vt8500_port = kzalloc(sizeof(struct vt8500_port), GFP_KERNEL);
+ vt8500_port = devm_kzalloc(&pdev->dev, sizeof(struct vt8500_port),
+ GFP_KERNEL);
if (!vt8500_port)
return -ENOMEM;
+ vt8500_port->uart.membase = devm_request_and_ioremap(&pdev->dev, mmres);
+ if (!vt8500_port->uart.membase)
+ return -EADDRNOTAVAIL;
+
+ vt8500_port->clk = of_clk_get(pdev->dev.of_node, 0);
+ if (IS_ERR(vt8500_port->clk)) {
+ dev_err(&pdev->dev, "failed to get clock\n");
+ return -EINVAL;
+ }
+
+ ret = clk_prepare_enable(vt8500_port->clk);
+ if (ret) {
+ dev_err(&pdev->dev, "failed to enable clock\n");
+ return ret;
+ }
+
vt8500_port->uart.type = PORT_VT8500;
vt8500_port->uart.iotype = UPIO_MEM;
vt8500_port->uart.mapbase = mmres->start;
@@ -615,12 +623,6 @@ static int vt8500_serial_probe(struct platform_device *pdev)
snprintf(vt8500_port->name, sizeof(vt8500_port->name),
"VT8500 UART%d", pdev->id);
- vt8500_port->uart.membase = ioremap(mmres->start, resource_size(mmres));
- if (!vt8500_port->uart.membase) {
- ret = -ENOMEM;
- goto err;
- }
-
vt8500_uart_ports[port] = vt8500_port;
uart_add_one_port(&vt8500_uart_driver, &vt8500_port->uart);
@@ -628,10 +630,6 @@ static int vt8500_serial_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, vt8500_port);
return 0;
-
-err:
- kfree(vt8500_port);
- return ret;
}
static int vt8500_serial_remove(struct platform_device *pdev)
@@ -639,8 +637,8 @@ static int vt8500_serial_remove(struct platform_device *pdev)
struct vt8500_port *vt8500_port = platform_get_drvdata(pdev);
platform_set_drvdata(pdev, NULL);
+ clk_disable_unprepare(vt8500_port->clk);
uart_remove_one_port(&vt8500_uart_driver, &vt8500_port->uart);
- kfree(vt8500_port);
return 0;
}
diff --git a/drivers/tty/serial/xilinx_uartps.c b/drivers/tty/serial/xilinx_uartps.c
index 9ab910370c56..ba451c7209fc 100644
--- a/drivers/tty/serial/xilinx_uartps.c
+++ b/drivers/tty/serial/xilinx_uartps.c
@@ -17,6 +17,7 @@
#include <linux/tty.h>
#include <linux/tty_flip.h>
#include <linux/console.h>
+#include <linux/clk.h>
#include <linux/irq.h>
#include <linux/io.h>
#include <linux/of.h>
@@ -147,15 +148,11 @@
static irqreturn_t xuartps_isr(int irq, void *dev_id)
{
struct uart_port *port = (struct uart_port *)dev_id;
- struct tty_struct *tty;
unsigned long flags;
unsigned int isrstatus, numbytes;
unsigned int data;
char status = TTY_NORMAL;
- /* Get the tty which could be NULL so don't assume it's valid */
- tty = tty_port_tty_get(&port->state->port);
-
spin_lock_irqsave(&port->lock, flags);
/* Read the interrupt status register to determine which
@@ -187,14 +184,11 @@ static irqreturn_t xuartps_isr(int irq, void *dev_id)
} else if (isrstatus & XUARTPS_IXR_OVERRUN)
port->icount.overrun++;
- if (tty)
- uart_insert_char(port, isrstatus,
- XUARTPS_IXR_OVERRUN, data,
- status);
+ uart_insert_char(port, isrstatus, XUARTPS_IXR_OVERRUN,
+ data, status);
}
spin_unlock(&port->lock);
- if (tty)
- tty_flip_buffer_push(tty);
+ tty_flip_buffer_push(&port->state->port);
spin_lock(&port->lock);
}
@@ -237,7 +231,6 @@ static irqreturn_t xuartps_isr(int irq, void *dev_id)
/* be sure to release the lock and tty before leaving */
spin_unlock_irqrestore(&port->lock, flags);
- tty_kref_put(tty);
return IRQ_HANDLED;
}
@@ -944,16 +937,18 @@ static int xuartps_probe(struct platform_device *pdev)
int rc;
struct uart_port *port;
struct resource *res, *res2;
- int clk = 0;
-
- const unsigned int *prop;
+ struct clk *clk;
- prop = of_get_property(pdev->dev.of_node, "clock", NULL);
- if (prop)
- clk = be32_to_cpup(prop);
- if (!clk) {
+ clk = of_clk_get(pdev->dev.of_node, 0);
+ if (IS_ERR(clk)) {
dev_err(&pdev->dev, "no clock specified\n");
- return -ENODEV;
+ return PTR_ERR(clk);
+ }
+
+ rc = clk_prepare_enable(clk);
+ if (rc) {
+ dev_err(&pdev->dev, "could not enable clock\n");
+ return -EBUSY;
}
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
@@ -978,7 +973,8 @@ static int xuartps_probe(struct platform_device *pdev)
port->mapbase = res->start;
port->irq = res2->start;
port->dev = &pdev->dev;
- port->uartclk = clk;
+ port->uartclk = clk_get_rate(clk);
+ port->private_data = clk;
dev_set_drvdata(&pdev->dev, port);
rc = uart_add_one_port(&xuartps_uart_driver, port);
if (rc) {
@@ -1000,14 +996,14 @@ static int xuartps_probe(struct platform_device *pdev)
static int xuartps_remove(struct platform_device *pdev)
{
struct uart_port *port = dev_get_drvdata(&pdev->dev);
- int rc = 0;
+ struct clk *clk = port->private_data;
+ int rc;
/* Remove the xuartps port from the serial core */
- if (port) {
- rc = uart_remove_one_port(&xuartps_uart_driver, port);
- dev_set_drvdata(&pdev->dev, NULL);
- port->mapbase = 0;
- }
+ rc = uart_remove_one_port(&xuartps_uart_driver, port);
+ dev_set_drvdata(&pdev->dev, NULL);
+ port->mapbase = 0;
+ clk_disable_unprepare(clk);
return rc;
}
@@ -1048,7 +1044,7 @@ MODULE_DEVICE_TABLE(of, xuartps_of_match);
static struct platform_driver xuartps_platform_driver = {
.probe = xuartps_probe, /* Probe method */
- .remove = __exit_p(xuartps_remove), /* Detach method */
+ .remove = xuartps_remove, /* Detach method */
.suspend = xuartps_suspend, /* Suspend */
.resume = xuartps_resume, /* Resume after a suspend */
.driver = {
diff --git a/drivers/tty/serial/zs.c b/drivers/tty/serial/zs.c
index 92c00b24d0df..6a169877109b 100644
--- a/drivers/tty/serial/zs.c
+++ b/drivers/tty/serial/zs.c
@@ -603,7 +603,7 @@ static void zs_receive_chars(struct zs_port *zport)
uart_insert_char(uport, status, Rx_OVR, ch, flag);
}
- tty_flip_buffer_push(uport->state->port.tty);
+ tty_flip_buffer_push(&uport->state->port);
}
static void zs_raw_transmit_chars(struct zs_port *zport)
diff --git a/drivers/tty/synclink.c b/drivers/tty/synclink.c
index 9e071f6985f6..8983276aa35e 100644
--- a/drivers/tty/synclink.c
+++ b/drivers/tty/synclink.c
@@ -291,8 +291,7 @@ struct mgsl_struct {
bool lcr_mem_requested;
u32 misc_ctrl_value;
- char flag_buf[MAX_ASYNC_BUFFER_SIZE];
- char char_buf[MAX_ASYNC_BUFFER_SIZE];
+ char *flag_buf;
bool drop_rts_on_tx_done;
bool loopmode_insert_requested;
@@ -1440,7 +1439,6 @@ static void mgsl_isr_receive_data( struct mgsl_struct *info )
u16 status;
int work = 0;
unsigned char DataByte;
- struct tty_struct *tty = info->port.tty;
struct mgsl_icount *icount = &info->icount;
if ( debug_level >= DEBUG_LEVEL_ISR )
@@ -1502,19 +1500,19 @@ static void mgsl_isr_receive_data( struct mgsl_struct *info )
if (status & RXSTATUS_BREAK_RECEIVED) {
flag = TTY_BREAK;
if (info->port.flags & ASYNC_SAK)
- do_SAK(tty);
+ do_SAK(info->port.tty);
} else if (status & RXSTATUS_PARITY_ERROR)
flag = TTY_PARITY;
else if (status & RXSTATUS_FRAMING_ERROR)
flag = TTY_FRAME;
} /* end of if (error) */
- tty_insert_flip_char(tty, DataByte, flag);
+ tty_insert_flip_char(&info->port, DataByte, flag);
if (status & RXSTATUS_OVERRUN) {
/* Overrun is special, since it's
* reported immediately, and doesn't
* affect the current character
*/
- work += tty_insert_flip_char(tty, 0, TTY_OVERRUN);
+ work += tty_insert_flip_char(&info->port, 0, TTY_OVERRUN);
}
}
@@ -1525,7 +1523,7 @@ static void mgsl_isr_receive_data( struct mgsl_struct *info )
}
if(work)
- tty_flip_buffer_push(tty);
+ tty_flip_buffer_push(&info->port);
}
/* mgsl_isr_misc()
@@ -1852,7 +1850,7 @@ static void shutdown(struct mgsl_struct * info)
usc_OutReg(info, PCR, (u16)((usc_InReg(info, PCR) | BIT13) | BIT12));
if (!info->port.tty || info->port.tty->termios.c_cflag & HUPCL) {
- info->serial_signals &= ~(SerialSignal_DTR + SerialSignal_RTS);
+ info->serial_signals &= ~(SerialSignal_RTS | SerialSignal_DTR);
usc_set_serial_signals(info);
}
@@ -1917,12 +1915,12 @@ static void mgsl_change_params(struct mgsl_struct *info)
cflag = info->port.tty->termios.c_cflag;
- /* if B0 rate (hangup) specified then negate DTR and RTS */
- /* otherwise assert DTR and RTS */
+ /* if B0 rate (hangup) specified then negate RTS and DTR */
+ /* otherwise assert RTS and DTR */
if (cflag & CBAUD)
- info->serial_signals |= SerialSignal_RTS + SerialSignal_DTR;
+ info->serial_signals |= SerialSignal_RTS | SerialSignal_DTR;
else
- info->serial_signals &= ~(SerialSignal_RTS + SerialSignal_DTR);
+ info->serial_signals &= ~(SerialSignal_RTS | SerialSignal_DTR);
/* byte size and parity */
@@ -3046,7 +3044,7 @@ static void mgsl_set_termios(struct tty_struct *tty, struct ktermios *old_termio
/* Handle transition to B0 status */
if (old_termios->c_cflag & CBAUD &&
!(tty->termios.c_cflag & CBAUD)) {
- info->serial_signals &= ~(SerialSignal_RTS + SerialSignal_DTR);
+ info->serial_signals &= ~(SerialSignal_RTS | SerialSignal_DTR);
spin_lock_irqsave(&info->irq_spinlock,flags);
usc_set_serial_signals(info);
spin_unlock_irqrestore(&info->irq_spinlock,flags);
@@ -3245,9 +3243,9 @@ static void dtr_rts(struct tty_port *port, int on)
spin_lock_irqsave(&info->irq_spinlock,flags);
if (on)
- info->serial_signals |= SerialSignal_RTS + SerialSignal_DTR;
+ info->serial_signals |= SerialSignal_RTS | SerialSignal_DTR;
else
- info->serial_signals &= ~(SerialSignal_RTS + SerialSignal_DTR);
+ info->serial_signals &= ~(SerialSignal_RTS | SerialSignal_DTR);
usc_set_serial_signals(info);
spin_unlock_irqrestore(&info->irq_spinlock,flags);
}
@@ -3416,7 +3414,7 @@ static int mgsl_open(struct tty_struct *tty, struct file * filp)
goto cleanup;
}
- info->port.tty->low_latency = (info->port.flags & ASYNC_LOW_LATENCY) ? 1 : 0;
+ info->port.low_latency = (info->port.flags & ASYNC_LOW_LATENCY) ? 1 : 0;
spin_lock_irqsave(&info->netlock, flags);
if (info->netcount) {
@@ -3898,7 +3896,13 @@ static int mgsl_alloc_intermediate_rxbuffer_memory(struct mgsl_struct *info)
info->intermediate_rxbuffer = kmalloc(info->max_frame_size, GFP_KERNEL | GFP_DMA);
if ( info->intermediate_rxbuffer == NULL )
return -ENOMEM;
-
+ /* unused flag buffer to satisfy receive_buf calling interface */
+ info->flag_buf = kzalloc(info->max_frame_size, GFP_KERNEL);
+ if (!info->flag_buf) {
+ kfree(info->intermediate_rxbuffer);
+ info->intermediate_rxbuffer = NULL;
+ return -ENOMEM;
+ }
return 0;
} /* end of mgsl_alloc_intermediate_rxbuffer_memory() */
@@ -3917,6 +3921,8 @@ static void mgsl_free_intermediate_rxbuffer_memory(struct mgsl_struct *info)
{
kfree(info->intermediate_rxbuffer);
info->intermediate_rxbuffer = NULL;
+ kfree(info->flag_buf);
+ info->flag_buf = NULL;
} /* end of mgsl_free_intermediate_rxbuffer_memory() */
@@ -6233,8 +6239,8 @@ static void usc_get_serial_signals( struct mgsl_struct *info )
{
u16 status;
- /* clear all serial signals except DTR and RTS */
- info->serial_signals &= SerialSignal_DTR + SerialSignal_RTS;
+ /* clear all serial signals except RTS and DTR */
+ info->serial_signals &= SerialSignal_RTS | SerialSignal_DTR;
/* Read the Misc Interrupt status Register (MISR) to get */
/* the V24 status signals. */
@@ -6259,7 +6265,7 @@ static void usc_get_serial_signals( struct mgsl_struct *info )
/* usc_set_serial_signals()
*
- * Set the state of DTR and RTS based on contents of
+ * Set the state of RTS and DTR based on contents of
* serial_signals member of device extension.
*
* Arguments: info pointer to device instance data
@@ -7773,8 +7779,8 @@ static int hdlcdev_open(struct net_device *dev)
return rc;
}
- /* assert DTR and RTS, apply hardware settings */
- info->serial_signals |= SerialSignal_RTS + SerialSignal_DTR;
+ /* assert RTS and DTR, apply hardware settings */
+ info->serial_signals |= SerialSignal_RTS | SerialSignal_DTR;
mgsl_program_hw(info);
/* enable network layer transmit */
diff --git a/drivers/tty/synclink_gt.c b/drivers/tty/synclink_gt.c
index aba1e59f4a88..aa9eece35c3b 100644
--- a/drivers/tty/synclink_gt.c
+++ b/drivers/tty/synclink_gt.c
@@ -317,8 +317,7 @@ struct slgt_info {
unsigned char *tx_buf;
int tx_count;
- char flag_buf[MAX_ASYNC_BUFFER_SIZE];
- char char_buf[MAX_ASYNC_BUFFER_SIZE];
+ char *flag_buf;
bool drop_rts_on_tx_done;
struct _input_signal_events input_signal_events;
@@ -683,7 +682,7 @@ static int open(struct tty_struct *tty, struct file *filp)
}
mutex_lock(&info->port.mutex);
- info->port.tty->low_latency = (info->port.flags & ASYNC_LOW_LATENCY) ? 1 : 0;
+ info->port.low_latency = (info->port.flags & ASYNC_LOW_LATENCY) ? 1 : 0;
spin_lock_irqsave(&info->netlock, flags);
if (info->netcount) {
@@ -786,7 +785,7 @@ static void set_termios(struct tty_struct *tty, struct ktermios *old_termios)
/* Handle transition to B0 status */
if (old_termios->c_cflag & CBAUD &&
!(tty->termios.c_cflag & CBAUD)) {
- info->signals &= ~(SerialSignal_RTS + SerialSignal_DTR);
+ info->signals &= ~(SerialSignal_RTS | SerialSignal_DTR);
spin_lock_irqsave(&info->lock,flags);
set_signals(info);
spin_unlock_irqrestore(&info->lock,flags);
@@ -1561,8 +1560,8 @@ static int hdlcdev_open(struct net_device *dev)
return rc;
}
- /* assert DTR and RTS, apply hardware settings */
- info->signals |= SerialSignal_RTS + SerialSignal_DTR;
+ /* assert RTS and DTR, apply hardware settings */
+ info->signals |= SerialSignal_RTS | SerialSignal_DTR;
program_hw(info);
/* enable network layer transmit */
@@ -1855,7 +1854,6 @@ static void hdlcdev_exit(struct slgt_info *info)
*/
static void rx_async(struct slgt_info *info)
{
- struct tty_struct *tty = info->port.tty;
struct mgsl_icount *icount = &info->icount;
unsigned int start, end;
unsigned char *p;
@@ -1894,10 +1892,8 @@ static void rx_async(struct slgt_info *info)
else if (status & BIT0)
stat = TTY_FRAME;
}
- if (tty) {
- tty_insert_flip_char(tty, ch, stat);
- chars++;
- }
+ tty_insert_flip_char(&info->port, ch, stat);
+ chars++;
}
if (i < count) {
@@ -1918,8 +1914,8 @@ static void rx_async(struct slgt_info *info)
break;
}
- if (tty && chars)
- tty_flip_buffer_push(tty);
+ if (chars)
+ tty_flip_buffer_push(&info->port);
}
/*
@@ -1961,8 +1957,6 @@ static void bh_handler(struct work_struct *work)
struct slgt_info *info = container_of(work, struct slgt_info, task);
int action;
- if (!info)
- return;
info->bh_running = true;
while((action = bh_action(info))) {
@@ -2183,7 +2177,7 @@ static void isr_serial(struct slgt_info *info)
if (info->port.tty) {
if (!(status & info->ignore_status_mask)) {
if (info->read_status_mask & MASK_BREAK) {
- tty_insert_flip_char(info->port.tty, 0, TTY_BREAK);
+ tty_insert_flip_char(&info->port, 0, TTY_BREAK);
if (info->port.flags & ASYNC_SAK)
do_SAK(info->port.tty);
}
@@ -2494,7 +2488,7 @@ static void shutdown(struct slgt_info *info)
slgt_irq_off(info, IRQ_ALL | IRQ_MASTER);
if (!info->port.tty || info->port.tty->termios.c_cflag & HUPCL) {
- info->signals &= ~(SerialSignal_DTR + SerialSignal_RTS);
+ info->signals &= ~(SerialSignal_RTS | SerialSignal_DTR);
set_signals(info);
}
@@ -2554,12 +2548,12 @@ static void change_params(struct slgt_info *info)
cflag = info->port.tty->termios.c_cflag;
- /* if B0 rate (hangup) specified then negate DTR and RTS */
- /* otherwise assert DTR and RTS */
+ /* if B0 rate (hangup) specified then negate RTS and DTR */
+ /* otherwise assert RTS and DTR */
if (cflag & CBAUD)
- info->signals |= SerialSignal_RTS + SerialSignal_DTR;
+ info->signals |= SerialSignal_RTS | SerialSignal_DTR;
else
- info->signals &= ~(SerialSignal_RTS + SerialSignal_DTR);
+ info->signals &= ~(SerialSignal_RTS | SerialSignal_DTR);
/* byte size and parity */
@@ -3262,9 +3256,9 @@ static void dtr_rts(struct tty_port *port, int on)
spin_lock_irqsave(&info->lock,flags);
if (on)
- info->signals |= SerialSignal_RTS + SerialSignal_DTR;
+ info->signals |= SerialSignal_RTS | SerialSignal_DTR;
else
- info->signals &= ~(SerialSignal_RTS + SerialSignal_DTR);
+ info->signals &= ~(SerialSignal_RTS | SerialSignal_DTR);
set_signals(info);
spin_unlock_irqrestore(&info->lock,flags);
}
@@ -3355,11 +3349,24 @@ static int block_til_ready(struct tty_struct *tty, struct file *filp,
return retval;
}
+/*
+ * allocate buffers used for calling line discipline receive_buf
+ * directly in synchronous mode
+ * note: add 5 bytes to max frame size to allow appending
+ * 32-bit CRC and status byte when configured to do so
+ */
static int alloc_tmp_rbuf(struct slgt_info *info)
{
info->tmp_rbuf = kmalloc(info->max_frame_size + 5, GFP_KERNEL);
if (info->tmp_rbuf == NULL)
return -ENOMEM;
+ /* unused flag buffer to satisfy receive_buf calling interface */
+ info->flag_buf = kzalloc(info->max_frame_size + 5, GFP_KERNEL);
+ if (!info->flag_buf) {
+ kfree(info->tmp_rbuf);
+ info->tmp_rbuf = NULL;
+ return -ENOMEM;
+ }
return 0;
}
@@ -3367,6 +3374,8 @@ static void free_tmp_rbuf(struct slgt_info *info)
{
kfree(info->tmp_rbuf);
info->tmp_rbuf = NULL;
+ kfree(info->flag_buf);
+ info->flag_buf = NULL;
}
/*
@@ -4110,7 +4119,7 @@ static void reset_port(struct slgt_info *info)
tx_stop(info);
rx_stop(info);
- info->signals &= ~(SerialSignal_DTR + SerialSignal_RTS);
+ info->signals &= ~(SerialSignal_RTS | SerialSignal_DTR);
set_signals(info);
slgt_irq_off(info, IRQ_ALL | IRQ_MASTER);
@@ -4537,8 +4546,8 @@ static void get_signals(struct slgt_info *info)
{
unsigned short status = rd_reg16(info, SSR);
- /* clear all serial signals except DTR and RTS */
- info->signals &= SerialSignal_DTR + SerialSignal_RTS;
+ /* clear all serial signals except RTS and DTR */
+ info->signals &= SerialSignal_RTS | SerialSignal_DTR;
if (status & BIT3)
info->signals |= SerialSignal_DSR;
diff --git a/drivers/tty/synclinkmp.c b/drivers/tty/synclinkmp.c
index fd43fb6f7cee..6d5780cf1d57 100644
--- a/drivers/tty/synclinkmp.c
+++ b/drivers/tty/synclinkmp.c
@@ -262,8 +262,7 @@ typedef struct _synclinkmp_info {
bool sca_statctrl_requested;
u32 misc_ctrl_value;
- char flag_buf[MAX_ASYNC_BUFFER_SIZE];
- char char_buf[MAX_ASYNC_BUFFER_SIZE];
+ char *flag_buf;
bool drop_rts_on_tx_done;
struct _input_signal_events input_signal_events;
@@ -762,7 +761,7 @@ static int open(struct tty_struct *tty, struct file *filp)
goto cleanup;
}
- info->port.tty->low_latency = (info->port.flags & ASYNC_LOW_LATENCY) ? 1 : 0;
+ info->port.low_latency = (info->port.flags & ASYNC_LOW_LATENCY) ? 1 : 0;
spin_lock_irqsave(&info->netlock, flags);
if (info->netcount) {
@@ -883,7 +882,7 @@ static void set_termios(struct tty_struct *tty, struct ktermios *old_termios)
/* Handle transition to B0 status */
if (old_termios->c_cflag & CBAUD &&
!(tty->termios.c_cflag & CBAUD)) {
- info->serial_signals &= ~(SerialSignal_RTS + SerialSignal_DTR);
+ info->serial_signals &= ~(SerialSignal_RTS | SerialSignal_DTR);
spin_lock_irqsave(&info->lock,flags);
set_signals(info);
spin_unlock_irqrestore(&info->lock,flags);
@@ -1677,8 +1676,8 @@ static int hdlcdev_open(struct net_device *dev)
return rc;
}
- /* assert DTR and RTS, apply hardware settings */
- info->serial_signals |= SerialSignal_RTS + SerialSignal_DTR;
+ /* assert RTS and DTR, apply hardware settings */
+ info->serial_signals |= SerialSignal_RTS | SerialSignal_DTR;
program_hw(info);
/* enable network layer transmit */
@@ -2008,9 +2007,6 @@ static void bh_handler(struct work_struct *work)
SLMP_INFO *info = container_of(work, SLMP_INFO, task);
int action;
- if (!info)
- return;
-
if ( debug_level >= DEBUG_LEVEL_BH )
printk( "%s(%d):%s bh_handler() entry\n",
__FILE__,__LINE__,info->device_name);
@@ -2132,13 +2128,11 @@ static void isr_rxint(SLMP_INFO * info)
/* process break detection if tty control
* is not set to ignore it
*/
- if ( tty ) {
- if (!(status & info->ignore_status_mask1)) {
- if (info->read_status_mask1 & BRKD) {
- tty_insert_flip_char(tty, 0, TTY_BREAK);
- if (info->port.flags & ASYNC_SAK)
- do_SAK(tty);
- }
+ if (!(status & info->ignore_status_mask1)) {
+ if (info->read_status_mask1 & BRKD) {
+ tty_insert_flip_char(&info->port, 0, TTY_BREAK);
+ if (tty && (info->port.flags & ASYNC_SAK))
+ do_SAK(tty);
}
}
}
@@ -2170,7 +2164,6 @@ static void isr_rxrdy(SLMP_INFO * info)
{
u16 status;
unsigned char DataByte;
- struct tty_struct *tty = info->port.tty;
struct mgsl_icount *icount = &info->icount;
if ( debug_level >= DEBUG_LEVEL_ISR )
@@ -2203,26 +2196,22 @@ static void isr_rxrdy(SLMP_INFO * info)
status &= info->read_status_mask2;
- if ( tty ) {
- if (status & PE)
- flag = TTY_PARITY;
- else if (status & FRME)
- flag = TTY_FRAME;
- if (status & OVRN) {
- /* Overrun is special, since it's
- * reported immediately, and doesn't
- * affect the current character
- */
- over = true;
- }
+ if (status & PE)
+ flag = TTY_PARITY;
+ else if (status & FRME)
+ flag = TTY_FRAME;
+ if (status & OVRN) {
+ /* Overrun is special, since it's
+ * reported immediately, and doesn't
+ * affect the current character
+ */
+ over = true;
}
} /* end of if (error) */
- if ( tty ) {
- tty_insert_flip_char(tty, DataByte, flag);
- if (over)
- tty_insert_flip_char(tty, 0, TTY_OVERRUN);
- }
+ tty_insert_flip_char(&info->port, DataByte, flag);
+ if (over)
+ tty_insert_flip_char(&info->port, 0, TTY_OVERRUN);
}
if ( debug_level >= DEBUG_LEVEL_ISR ) {
@@ -2232,8 +2221,7 @@ static void isr_rxrdy(SLMP_INFO * info)
icount->frame,icount->overrun);
}
- if ( tty )
- tty_flip_buffer_push(tty);
+ tty_flip_buffer_push(&info->port);
}
static void isr_txeom(SLMP_INFO * info, unsigned char status)
@@ -2718,7 +2706,7 @@ static void shutdown(SLMP_INFO * info)
reset_port(info);
if (!info->port.tty || info->port.tty->termios.c_cflag & HUPCL) {
- info->serial_signals &= ~(SerialSignal_DTR + SerialSignal_RTS);
+ info->serial_signals &= ~(SerialSignal_RTS | SerialSignal_DTR);
set_signals(info);
}
@@ -2780,12 +2768,12 @@ static void change_params(SLMP_INFO *info)
cflag = info->port.tty->termios.c_cflag;
- /* if B0 rate (hangup) specified then negate DTR and RTS */
- /* otherwise assert DTR and RTS */
+ /* if B0 rate (hangup) specified then negate RTS and DTR */
+ /* otherwise assert RTS and DTR */
if (cflag & CBAUD)
- info->serial_signals |= SerialSignal_RTS + SerialSignal_DTR;
+ info->serial_signals |= SerialSignal_RTS | SerialSignal_DTR;
else
- info->serial_signals &= ~(SerialSignal_RTS + SerialSignal_DTR);
+ info->serial_signals &= ~(SerialSignal_RTS | SerialSignal_DTR);
/* byte size and parity */
@@ -3224,12 +3212,12 @@ static int tiocmget(struct tty_struct *tty)
get_signals(info);
spin_unlock_irqrestore(&info->lock,flags);
- result = ((info->serial_signals & SerialSignal_RTS) ? TIOCM_RTS:0) +
- ((info->serial_signals & SerialSignal_DTR) ? TIOCM_DTR:0) +
- ((info->serial_signals & SerialSignal_DCD) ? TIOCM_CAR:0) +
- ((info->serial_signals & SerialSignal_RI) ? TIOCM_RNG:0) +
- ((info->serial_signals & SerialSignal_DSR) ? TIOCM_DSR:0) +
- ((info->serial_signals & SerialSignal_CTS) ? TIOCM_CTS:0);
+ result = ((info->serial_signals & SerialSignal_RTS) ? TIOCM_RTS : 0) |
+ ((info->serial_signals & SerialSignal_DTR) ? TIOCM_DTR : 0) |
+ ((info->serial_signals & SerialSignal_DCD) ? TIOCM_CAR : 0) |
+ ((info->serial_signals & SerialSignal_RI) ? TIOCM_RNG : 0) |
+ ((info->serial_signals & SerialSignal_DSR) ? TIOCM_DSR : 0) |
+ ((info->serial_signals & SerialSignal_CTS) ? TIOCM_CTS : 0);
if (debug_level >= DEBUG_LEVEL_INFO)
printk("%s(%d):%s tiocmget() value=%08X\n",
@@ -3284,9 +3272,9 @@ static void dtr_rts(struct tty_port *port, int on)
spin_lock_irqsave(&info->lock,flags);
if (on)
- info->serial_signals |= SerialSignal_RTS + SerialSignal_DTR;
+ info->serial_signals |= SerialSignal_RTS | SerialSignal_DTR;
else
- info->serial_signals &= ~(SerialSignal_RTS + SerialSignal_DTR);
+ info->serial_signals &= ~(SerialSignal_RTS | SerialSignal_DTR);
set_signals(info);
spin_unlock_irqrestore(&info->lock,flags);
}
@@ -3553,6 +3541,13 @@ static int alloc_tmp_rx_buf(SLMP_INFO *info)
info->tmp_rx_buf = kmalloc(info->max_frame_size, GFP_KERNEL);
if (info->tmp_rx_buf == NULL)
return -ENOMEM;
+ /* unused flag buffer to satisfy receive_buf calling interface */
+ info->flag_buf = kzalloc(info->max_frame_size, GFP_KERNEL);
+ if (!info->flag_buf) {
+ kfree(info->tmp_rx_buf);
+ info->tmp_rx_buf = NULL;
+ return -ENOMEM;
+ }
return 0;
}
@@ -3560,6 +3555,8 @@ static void free_tmp_rx_buf(SLMP_INFO *info)
{
kfree(info->tmp_rx_buf);
info->tmp_rx_buf = NULL;
+ kfree(info->flag_buf);
+ info->flag_buf = NULL;
}
static int claim_resources(SLMP_INFO *info)
@@ -4357,7 +4354,7 @@ static void reset_port(SLMP_INFO *info)
tx_stop(info);
rx_stop(info);
- info->serial_signals &= ~(SerialSignal_DTR + SerialSignal_RTS);
+ info->serial_signals &= ~(SerialSignal_RTS | SerialSignal_DTR);
set_signals(info);
/* disable all port interrupts */
@@ -4753,8 +4750,8 @@ static void get_signals(SLMP_INFO *info)
u16 gpstatus = read_status_reg(info);
u16 testbit;
- /* clear all serial signals except DTR and RTS */
- info->serial_signals &= SerialSignal_DTR + SerialSignal_RTS;
+ /* clear all serial signals except RTS and DTR */
+ info->serial_signals &= SerialSignal_RTS | SerialSignal_DTR;
/* set serial signal bits to reflect MISR */
@@ -4773,7 +4770,7 @@ static void get_signals(SLMP_INFO *info)
info->serial_signals |= SerialSignal_DSR;
}
-/* Set the state of DTR and RTS based on contents of
+/* Set the state of RTS and DTR based on contents of
* serial_signals member of device context.
*/
static void set_signals(SLMP_INFO *info)
diff --git a/drivers/tty/sysrq.c b/drivers/tty/sysrq.c
index 814655ee2d61..3687f0cad642 100644
--- a/drivers/tty/sysrq.c
+++ b/drivers/tty/sysrq.c
@@ -870,21 +870,20 @@ static struct input_handler sysrq_handler = {
static bool sysrq_handler_registered;
+unsigned short platform_sysrq_reset_seq[] __weak = { KEY_RESERVED };
+
static inline void sysrq_register_handler(void)
{
- extern unsigned short platform_sysrq_reset_seq[] __weak;
unsigned short key;
int error;
int i;
- if (platform_sysrq_reset_seq) {
- for (i = 0; i < ARRAY_SIZE(sysrq_reset_seq); i++) {
- key = platform_sysrq_reset_seq[i];
- if (key == KEY_RESERVED || key > KEY_MAX)
- break;
+ for (i = 0; i < ARRAY_SIZE(sysrq_reset_seq); i++) {
+ key = platform_sysrq_reset_seq[i];
+ if (key == KEY_RESERVED || key > KEY_MAX)
+ break;
- sysrq_reset_seq[sysrq_reset_seq_len++] = key;
- }
+ sysrq_reset_seq[sysrq_reset_seq_len++] = key;
}
error = input_register_handler(&sysrq_handler);
diff --git a/drivers/tty/tty_buffer.c b/drivers/tty/tty_buffer.c
index 45d916198f78..bb119934e76c 100644
--- a/drivers/tty/tty_buffer.c
+++ b/drivers/tty/tty_buffer.c
@@ -16,6 +16,7 @@
#include <linux/bitops.h>
#include <linux/delay.h>
#include <linux/module.h>
+#include <linux/ratelimit.h>
/**
* tty_buffer_free_all - free buffers used by a tty
@@ -119,11 +120,14 @@ static void __tty_buffer_flush(struct tty_port *port)
struct tty_bufhead *buf = &port->buf;
struct tty_buffer *thead;
- while ((thead = buf->head) != NULL) {
- buf->head = thead->next;
- tty_buffer_free(port, thead);
+ if (unlikely(buf->head == NULL))
+ return;
+ while ((thead = buf->head->next) != NULL) {
+ tty_buffer_free(port, buf->head);
+ buf->head = thead;
}
- buf->tail = NULL;
+ WARN_ON(buf->head != buf->tail);
+ buf->head->read = buf->head->commit;
}
/**
@@ -194,19 +198,22 @@ static struct tty_buffer *tty_buffer_find(struct tty_port *port, size_t size)
have queued and recycle that ? */
}
/**
- * __tty_buffer_request_room - grow tty buffer if needed
+ * tty_buffer_request_room - grow tty buffer if needed
* @tty: tty structure
* @size: size desired
*
* Make at least size bytes of linear space available for the tty
* buffer. If we fail return the size we managed to find.
- * Locking: Caller must hold port->buf.lock
+ *
+ * Locking: Takes port->buf.lock
*/
-static int __tty_buffer_request_room(struct tty_port *port, size_t size)
+int tty_buffer_request_room(struct tty_port *port, size_t size)
{
struct tty_bufhead *buf = &port->buf;
struct tty_buffer *b, *n;
int left;
+ unsigned long flags;
+ spin_lock_irqsave(&buf->lock, flags);
/* OPTIMISATION: We could keep a per tty "zero" sized buffer to
remove this conditional if its worth it. This would be invisible
to the callers */
@@ -228,37 +235,14 @@ static int __tty_buffer_request_room(struct tty_port *port, size_t size)
} else
size = left;
}
-
+ spin_unlock_irqrestore(&buf->lock, flags);
return size;
}
-
-
-/**
- * tty_buffer_request_room - grow tty buffer if needed
- * @tty: tty structure
- * @size: size desired
- *
- * Make at least size bytes of linear space available for the tty
- * buffer. If we fail return the size we managed to find.
- *
- * Locking: Takes port->buf.lock
- */
-int tty_buffer_request_room(struct tty_struct *tty, size_t size)
-{
- struct tty_port *port = tty->port;
- unsigned long flags;
- int length;
-
- spin_lock_irqsave(&port->buf.lock, flags);
- length = __tty_buffer_request_room(port, size);
- spin_unlock_irqrestore(&port->buf.lock, flags);
- return length;
-}
EXPORT_SYMBOL_GPL(tty_buffer_request_room);
/**
* tty_insert_flip_string_fixed_flag - Add characters to the tty buffer
- * @tty: tty structure
+ * @port: tty port
* @chars: characters
* @flag: flag value for each character
* @size: size
@@ -269,29 +253,21 @@ EXPORT_SYMBOL_GPL(tty_buffer_request_room);
* Locking: Called functions may take port->buf.lock
*/
-int tty_insert_flip_string_fixed_flag(struct tty_struct *tty,
+int tty_insert_flip_string_fixed_flag(struct tty_port *port,
const unsigned char *chars, char flag, size_t size)
{
- struct tty_bufhead *buf = &tty->port->buf;
int copied = 0;
do {
int goal = min_t(size_t, size - copied, TTY_BUFFER_PAGE);
- int space;
- unsigned long flags;
- struct tty_buffer *tb;
-
- spin_lock_irqsave(&buf->lock, flags);
- space = __tty_buffer_request_room(tty->port, goal);
- tb = buf->tail;
+ int space = tty_buffer_request_room(port, goal);
+ struct tty_buffer *tb = port->buf.tail;
/* If there is no space then tb may be NULL */
if (unlikely(space == 0)) {
- spin_unlock_irqrestore(&buf->lock, flags);
break;
}
memcpy(tb->char_buf_ptr + tb->used, chars, space);
memset(tb->flag_buf_ptr + tb->used, flag, space);
tb->used += space;
- spin_unlock_irqrestore(&buf->lock, flags);
copied += space;
chars += space;
/* There is a small chance that we need to split the data over
@@ -303,7 +279,7 @@ EXPORT_SYMBOL(tty_insert_flip_string_fixed_flag);
/**
* tty_insert_flip_string_flags - Add characters to the tty buffer
- * @tty: tty structure
+ * @port: tty port
* @chars: characters
* @flags: flag bytes
* @size: size
@@ -315,29 +291,21 @@ EXPORT_SYMBOL(tty_insert_flip_string_fixed_flag);
* Locking: Called functions may take port->buf.lock
*/
-int tty_insert_flip_string_flags(struct tty_struct *tty,
+int tty_insert_flip_string_flags(struct tty_port *port,
const unsigned char *chars, const char *flags, size_t size)
{
- struct tty_bufhead *buf = &tty->port->buf;
int copied = 0;
do {
int goal = min_t(size_t, size - copied, TTY_BUFFER_PAGE);
- int space;
- unsigned long __flags;
- struct tty_buffer *tb;
-
- spin_lock_irqsave(&buf->lock, __flags);
- space = __tty_buffer_request_room(tty->port, goal);
- tb = buf->tail;
+ int space = tty_buffer_request_room(port, goal);
+ struct tty_buffer *tb = port->buf.tail;
/* If there is no space then tb may be NULL */
if (unlikely(space == 0)) {
- spin_unlock_irqrestore(&buf->lock, __flags);
break;
}
memcpy(tb->char_buf_ptr + tb->used, chars, space);
memcpy(tb->flag_buf_ptr + tb->used, flags, space);
tb->used += space;
- spin_unlock_irqrestore(&buf->lock, __flags);
copied += space;
chars += space;
flags += space;
@@ -350,7 +318,7 @@ EXPORT_SYMBOL(tty_insert_flip_string_flags);
/**
* tty_schedule_flip - push characters to ldisc
- * @tty: tty to push from
+ * @port: tty port to push from
*
* Takes any pending buffers and transfers their ownership to the
* ldisc side of the queue. It then schedules those characters for
@@ -361,11 +329,11 @@ EXPORT_SYMBOL(tty_insert_flip_string_flags);
* Locking: Takes port->buf.lock
*/
-void tty_schedule_flip(struct tty_struct *tty)
+void tty_schedule_flip(struct tty_port *port)
{
- struct tty_bufhead *buf = &tty->port->buf;
+ struct tty_bufhead *buf = &port->buf;
unsigned long flags;
- WARN_ON(tty->low_latency);
+ WARN_ON(port->low_latency);
spin_lock_irqsave(&buf->lock, flags);
if (buf->tail != NULL)
@@ -377,7 +345,7 @@ EXPORT_SYMBOL(tty_schedule_flip);
/**
* tty_prepare_flip_string - make room for characters
- * @tty: tty
+ * @port: tty port
* @chars: return pointer for character write area
* @size: desired size
*
@@ -390,31 +358,23 @@ EXPORT_SYMBOL(tty_schedule_flip);
* Locking: May call functions taking port->buf.lock
*/
-int tty_prepare_flip_string(struct tty_struct *tty, unsigned char **chars,
+int tty_prepare_flip_string(struct tty_port *port, unsigned char **chars,
size_t size)
{
- struct tty_bufhead *buf = &tty->port->buf;
- int space;
- unsigned long flags;
- struct tty_buffer *tb;
-
- spin_lock_irqsave(&buf->lock, flags);
- space = __tty_buffer_request_room(tty->port, size);
-
- tb = buf->tail;
+ int space = tty_buffer_request_room(port, size);
if (likely(space)) {
+ struct tty_buffer *tb = port->buf.tail;
*chars = tb->char_buf_ptr + tb->used;
memset(tb->flag_buf_ptr + tb->used, TTY_NORMAL, space);
tb->used += space;
}
- spin_unlock_irqrestore(&buf->lock, flags);
return space;
}
EXPORT_SYMBOL_GPL(tty_prepare_flip_string);
/**
* tty_prepare_flip_string_flags - make room for characters
- * @tty: tty
+ * @port: tty port
* @chars: return pointer for character write area
* @flags: return pointer for status flag write area
* @size: desired size
@@ -428,24 +388,16 @@ EXPORT_SYMBOL_GPL(tty_prepare_flip_string);
* Locking: May call functions taking port->buf.lock
*/
-int tty_prepare_flip_string_flags(struct tty_struct *tty,
+int tty_prepare_flip_string_flags(struct tty_port *port,
unsigned char **chars, char **flags, size_t size)
{
- struct tty_bufhead *buf = &tty->port->buf;
- int space;
- unsigned long __flags;
- struct tty_buffer *tb;
-
- spin_lock_irqsave(&buf->lock, __flags);
- space = __tty_buffer_request_room(tty->port, size);
-
- tb = buf->tail;
+ int space = tty_buffer_request_room(port, size);
if (likely(space)) {
+ struct tty_buffer *tb = port->buf.tail;
*chars = tb->char_buf_ptr + tb->used;
*flags = tb->flag_buf_ptr + tb->used;
tb->used += space;
}
- spin_unlock_irqrestore(&buf->lock, __flags);
return space;
}
EXPORT_SYMBOL_GPL(tty_prepare_flip_string_flags);
@@ -539,16 +491,17 @@ static void flush_to_ldisc(struct work_struct *work)
*/
void tty_flush_to_ldisc(struct tty_struct *tty)
{
- if (!tty->low_latency)
+ if (!tty->port->low_latency)
flush_work(&tty->port->buf.work);
}
/**
* tty_flip_buffer_push - terminal
- * @tty: tty to push
+ * @port: tty port to push
*
* Queue a push of the terminal flip buffers to the line discipline. This
- * function must not be called from IRQ context if tty->low_latency is set.
+ * function must not be called from IRQ context if port->low_latency is
+ * set.
*
* In the event of the queue being busy for flipping the work will be
* held off and retried later.
@@ -556,9 +509,9 @@ void tty_flush_to_ldisc(struct tty_struct *tty)
* Locking: tty buffer lock. Driver locks in low latency mode.
*/
-void tty_flip_buffer_push(struct tty_struct *tty)
+void tty_flip_buffer_push(struct tty_port *port)
{
- struct tty_bufhead *buf = &tty->port->buf;
+ struct tty_bufhead *buf = &port->buf;
unsigned long flags;
spin_lock_irqsave(&buf->lock, flags);
@@ -566,7 +519,7 @@ void tty_flip_buffer_push(struct tty_struct *tty)
buf->tail->commit = buf->tail->used;
spin_unlock_irqrestore(&buf->lock, flags);
- if (tty->low_latency)
+ if (port->low_latency)
flush_to_ldisc(&buf->work);
else
schedule_work(&buf->work);
diff --git a/drivers/tty/tty_io.c b/drivers/tty/tty_io.c
index 6b20fd66d4ad..fd473639ab70 100644
--- a/drivers/tty/tty_io.c
+++ b/drivers/tty/tty_io.c
@@ -536,7 +536,7 @@ EXPORT_SYMBOL_GPL(tty_wakeup);
* __tty_hangup - actual handler for hangup events
* @work: tty device
*
- * This can be called by the "eventd" kernel thread. That is process
+ * This can be called by a "kworker" kernel thread. That is process
* synchronous but doesn't hold any locks, so we need to make sure we
* have the appropriate locks for what we're doing.
*
@@ -977,8 +977,7 @@ static ssize_t tty_read(struct file *file, char __user *buf, size_t count,
else
i = -EIO;
tty_ldisc_deref(ld);
- if (i > 0)
- inode->i_atime = current_fs_time(inode->i_sb);
+
return i;
}
@@ -1079,11 +1078,8 @@ static inline ssize_t do_tty_write(
break;
cond_resched();
}
- if (written) {
- struct inode *inode = file->f_path.dentry->d_inode;
- inode->i_mtime = current_fs_time(inode->i_sb);
+ if (written)
ret = written;
- }
out:
tty_write_unlock(tty);
return ret;
@@ -2203,6 +2199,7 @@ done:
mutex_unlock(&tty->termios_mutex);
return 0;
}
+EXPORT_SYMBOL(tty_do_resize);
/**
* tiocswinsz - implement window size set ioctl
diff --git a/drivers/tty/tty_ioctl.c b/drivers/tty/tty_ioctl.c
index 8481b29d5b3a..d58b92cc187c 100644
--- a/drivers/tty/tty_ioctl.c
+++ b/drivers/tty/tty_ioctl.c
@@ -617,7 +617,7 @@ static int set_termios(struct tty_struct *tty, void __user *arg, int opt)
if (opt & TERMIOS_WAIT) {
tty_wait_until_sent(tty, 0);
if (signal_pending(current))
- return -EINTR;
+ return -ERESTARTSYS;
}
tty_set_termios(tty, &tmp_termios);
@@ -684,7 +684,7 @@ static int set_termiox(struct tty_struct *tty, void __user *arg, int opt)
if (opt & TERMIOS_WAIT) {
tty_wait_until_sent(tty, 0);
if (signal_pending(current))
- return -EINTR;
+ return -ERESTARTSYS;
}
mutex_lock(&tty->termios_mutex);
@@ -1096,12 +1096,16 @@ int tty_perform_flush(struct tty_struct *tty, unsigned long arg)
ld = tty_ldisc_ref_wait(tty);
switch (arg) {
case TCIFLUSH:
- if (ld && ld->ops->flush_buffer)
+ if (ld && ld->ops->flush_buffer) {
ld->ops->flush_buffer(tty);
+ tty_unthrottle(tty);
+ }
break;
case TCIOFLUSH:
- if (ld && ld->ops->flush_buffer)
+ if (ld && ld->ops->flush_buffer) {
ld->ops->flush_buffer(tty);
+ tty_unthrottle(tty);
+ }
/* fall through */
case TCOFLUSH:
tty_driver_flush_buffer(tty);
diff --git a/drivers/tty/tty_ldisc.c b/drivers/tty/tty_ldisc.c
index c5782294e532..d794087c327e 100644
--- a/drivers/tty/tty_ldisc.c
+++ b/drivers/tty/tty_ldisc.c
@@ -64,7 +64,9 @@ static void put_ldisc(struct tty_ldisc *ld)
return;
}
raw_spin_unlock_irqrestore(&tty_ldisc_lock, flags);
- wake_up(&ld->wq_idle);
+
+ if (waitqueue_active(&ld->wq_idle))
+ wake_up(&ld->wq_idle);
}
/**
@@ -934,17 +936,17 @@ void tty_ldisc_release(struct tty_struct *tty, struct tty_struct *o_tty)
* race with the set_ldisc code path.
*/
- tty_lock_pair(tty, o_tty);
tty_ldisc_halt(tty);
- tty_ldisc_flush_works(tty);
- if (o_tty) {
+ if (o_tty)
tty_ldisc_halt(o_tty);
+
+ tty_ldisc_flush_works(tty);
+ if (o_tty)
tty_ldisc_flush_works(o_tty);
- }
+ tty_lock_pair(tty, o_tty);
/* This will need doing differently if we need to lock */
tty_ldisc_kill(tty);
-
if (o_tty)
tty_ldisc_kill(o_tty);
diff --git a/drivers/tty/vt/Makefile b/drivers/tty/vt/Makefile
index 14a51c9960df..17ae94cb29f8 100644
--- a/drivers/tty/vt/Makefile
+++ b/drivers/tty/vt/Makefile
@@ -27,8 +27,6 @@ $(obj)/defkeymap.o: $(obj)/defkeymap.c
ifdef GENERATE_KEYMAP
$(obj)/defkeymap.c: $(obj)/%.c: $(src)/%.map
- loadkeys --mktable $< > $@.tmp
- sed -e 's/^static *//' $@.tmp > $@
- rm $@.tmp
+ loadkeys --mktable $< > $@
endif
diff --git a/drivers/tty/vt/keyboard.c b/drivers/tty/vt/keyboard.c
index 681765baef69..a9af1b9ae160 100644
--- a/drivers/tty/vt/keyboard.c
+++ b/drivers/tty/vt/keyboard.c
@@ -307,26 +307,17 @@ int kbd_rate(struct kbd_repeat *rep)
*/
static void put_queue(struct vc_data *vc, int ch)
{
- struct tty_struct *tty = vc->port.tty;
-
- if (tty) {
- tty_insert_flip_char(tty, ch, 0);
- tty_schedule_flip(tty);
- }
+ tty_insert_flip_char(&vc->port, ch, 0);
+ tty_schedule_flip(&vc->port);
}
static void puts_queue(struct vc_data *vc, char *cp)
{
- struct tty_struct *tty = vc->port.tty;
-
- if (!tty)
- return;
-
while (*cp) {
- tty_insert_flip_char(tty, *cp, 0);
+ tty_insert_flip_char(&vc->port, *cp, 0);
cp++;
}
- tty_schedule_flip(tty);
+ tty_schedule_flip(&vc->port);
}
static void applkey(struct vc_data *vc, int key, char mode)
@@ -582,12 +573,8 @@ static void fn_inc_console(struct vc_data *vc)
static void fn_send_intr(struct vc_data *vc)
{
- struct tty_struct *tty = vc->port.tty;
-
- if (!tty)
- return;
- tty_insert_flip_char(tty, 0, TTY_BREAK);
- tty_schedule_flip(tty);
+ tty_insert_flip_char(&vc->port, 0, TTY_BREAK);
+ tty_schedule_flip(&vc->port);
}
static void fn_scroll_forw(struct vc_data *vc)
diff --git a/drivers/tty/vt/vc_screen.c b/drivers/tty/vt/vc_screen.c
index fa7268a93c06..e4ca345873c3 100644
--- a/drivers/tty/vt/vc_screen.c
+++ b/drivers/tty/vt/vc_screen.c
@@ -101,7 +101,7 @@ vcs_poll_data_get(struct file *file)
poll = kzalloc(sizeof(*poll), GFP_KERNEL);
if (!poll)
return NULL;
- poll->cons_num = iminor(file->f_path.dentry->d_inode) & 127;
+ poll->cons_num = iminor(file_inode(file)) & 127;
init_waitqueue_head(&poll->waitq);
poll->notifier.notifier_call = vcs_notifier;
if (register_vt_notifier(&poll->notifier) != 0) {
@@ -182,7 +182,7 @@ static loff_t vcs_lseek(struct file *file, loff_t offset, int orig)
int size;
console_lock();
- size = vcs_size(file->f_path.dentry->d_inode);
+ size = vcs_size(file_inode(file));
console_unlock();
if (size < 0)
return size;
@@ -208,7 +208,7 @@ static loff_t vcs_lseek(struct file *file, loff_t offset, int orig)
static ssize_t
vcs_read(struct file *file, char __user *buf, size_t count, loff_t *ppos)
{
- struct inode *inode = file->f_path.dentry->d_inode;
+ struct inode *inode = file_inode(file);
unsigned int currcons = iminor(inode);
struct vc_data *vc;
struct vcs_poll_data *poll;
@@ -386,7 +386,7 @@ unlock_out:
static ssize_t
vcs_write(struct file *file, const char __user *buf, size_t count, loff_t *ppos)
{
- struct inode *inode = file->f_path.dentry->d_inode;
+ struct inode *inode = file_inode(file);
unsigned int currcons = iminor(inode);
struct vc_data *vc;
long pos;
diff --git a/drivers/tty/vt/vt.c b/drivers/tty/vt/vt.c
index 8fd89687d068..fbd447b390f7 100644
--- a/drivers/tty/vt/vt.c
+++ b/drivers/tty/vt/vt.c
@@ -539,7 +539,7 @@ static void insert_char(struct vc_data *vc, unsigned int nr)
{
unsigned short *p = (unsigned short *) vc->vc_pos;
- scr_memmovew(p + nr, p, (vc->vc_cols - vc->vc_x) * 2);
+ scr_memmovew(p + nr, p, (vc->vc_cols - vc->vc_x - nr) * 2);
scr_memsetw(p, vc->vc_video_erase_char, nr * 2);
vc->vc_need_wrap = 0;
if (DO_UPDATE(vc))
@@ -638,7 +638,7 @@ static inline void save_screen(struct vc_data *vc)
* Redrawing of screen
*/
-static void clear_buffer_attributes(struct vc_data *vc)
+void clear_buffer_attributes(struct vc_data *vc)
{
unsigned short *p = (unsigned short *)vc->vc_origin;
int count = vc->vc_screenbuf_size / 2;
@@ -1333,13 +1333,13 @@ static void csi_m(struct vc_data *vc)
update_attr(vc);
}
-static void respond_string(const char *p, struct tty_struct *tty)
+static void respond_string(const char *p, struct tty_port *port)
{
while (*p) {
- tty_insert_flip_char(tty, *p, 0);
+ tty_insert_flip_char(port, *p, 0);
p++;
}
- tty_schedule_flip(tty);
+ tty_schedule_flip(port);
}
static void cursor_report(struct vc_data *vc, struct tty_struct *tty)
@@ -1347,17 +1347,17 @@ static void cursor_report(struct vc_data *vc, struct tty_struct *tty)
char buf[40];
sprintf(buf, "\033[%d;%dR", vc->vc_y + (vc->vc_decom ? vc->vc_top + 1 : 1), vc->vc_x + 1);
- respond_string(buf, tty);
+ respond_string(buf, tty->port);
}
static inline void status_report(struct tty_struct *tty)
{
- respond_string("\033[0n", tty); /* Terminal ok */
+ respond_string("\033[0n", tty->port); /* Terminal ok */
}
-static inline void respond_ID(struct tty_struct * tty)
+static inline void respond_ID(struct tty_struct *tty)
{
- respond_string(VT102ID, tty);
+ respond_string(VT102ID, tty->port);
}
void mouse_report(struct tty_struct *tty, int butt, int mrx, int mry)
@@ -1366,7 +1366,7 @@ void mouse_report(struct tty_struct *tty, int butt, int mrx, int mry)
sprintf(buf, "\033[M%c%c%c", (char)(' ' + butt), (char)('!' + mrx),
(char)('!' + mry));
- respond_string(buf, tty);
+ respond_string(buf, tty->port);
}
/* invoked via ioctl(TIOCLINUX) and through set_selection */
@@ -2987,7 +2987,7 @@ int __init vty_init(const struct file_operations *console_fops)
static struct class *vtconsole_class;
-static int bind_con_driver(const struct consw *csw, int first, int last,
+static int do_bind_con_driver(const struct consw *csw, int first, int last,
int deflt)
{
struct module *owner = csw->owner;
@@ -2998,7 +2998,7 @@ static int bind_con_driver(const struct consw *csw, int first, int last,
if (!try_module_get(owner))
return -ENODEV;
- console_lock();
+ WARN_CONSOLE_UNLOCKED();
/* check if driver is registered */
for (i = 0; i < MAX_NR_CON_DRIVER; i++) {
@@ -3083,11 +3083,22 @@ static int bind_con_driver(const struct consw *csw, int first, int last,
retval = 0;
err:
- console_unlock();
module_put(owner);
return retval;
};
+
+static int bind_con_driver(const struct consw *csw, int first, int last,
+ int deflt)
+{
+ int ret;
+
+ console_lock();
+ ret = do_bind_con_driver(csw, first, last, deflt);
+ console_unlock();
+ return ret;
+}
+
#ifdef CONFIG_VT_HW_CONSOLE_BINDING
static int con_is_graphics(const struct consw *csw, int first, int last)
{
@@ -3124,6 +3135,18 @@ static int con_is_graphics(const struct consw *csw, int first, int last)
*/
int unbind_con_driver(const struct consw *csw, int first, int last, int deflt)
{
+ int retval;
+
+ console_lock();
+ retval = do_unbind_con_driver(csw, first, last, deflt);
+ console_unlock();
+ return retval;
+}
+EXPORT_SYMBOL(unbind_con_driver);
+
+/* unlocked version of unbind_con_driver() */
+int do_unbind_con_driver(const struct consw *csw, int first, int last, int deflt)
+{
struct module *owner = csw->owner;
const struct consw *defcsw = NULL;
struct con_driver *con_driver = NULL, *con_back = NULL;
@@ -3132,7 +3155,7 @@ int unbind_con_driver(const struct consw *csw, int first, int last, int deflt)
if (!try_module_get(owner))
return -ENODEV;
- console_lock();
+ WARN_CONSOLE_UNLOCKED();
/* check if driver is registered and if it is unbindable */
for (i = 0; i < MAX_NR_CON_DRIVER; i++) {
@@ -3145,10 +3168,8 @@ int unbind_con_driver(const struct consw *csw, int first, int last, int deflt)
}
}
- if (retval) {
- console_unlock();
+ if (retval)
goto err;
- }
retval = -ENODEV;
@@ -3164,15 +3185,11 @@ int unbind_con_driver(const struct consw *csw, int first, int last, int deflt)
}
}
- if (retval) {
- console_unlock();
+ if (retval)
goto err;
- }
- if (!con_is_bound(csw)) {
- console_unlock();
+ if (!con_is_bound(csw))
goto err;
- }
first = max(first, con_driver->first);
last = min(last, con_driver->last);
@@ -3199,15 +3216,14 @@ int unbind_con_driver(const struct consw *csw, int first, int last, int deflt)
if (!con_is_bound(csw))
con_driver->flag &= ~CON_DRIVER_FLAG_INIT;
- console_unlock();
/* ignore return value, binding should not fail */
- bind_con_driver(defcsw, first, last, deflt);
+ do_bind_con_driver(defcsw, first, last, deflt);
err:
module_put(owner);
return retval;
}
-EXPORT_SYMBOL(unbind_con_driver);
+EXPORT_SYMBOL_GPL(do_unbind_con_driver);
static int vt_bind(struct con_driver *con)
{
@@ -3492,28 +3508,18 @@ int con_debug_leave(void)
}
EXPORT_SYMBOL_GPL(con_debug_leave);
-/**
- * register_con_driver - register console driver to console layer
- * @csw: console driver
- * @first: the first console to take over, minimum value is 0
- * @last: the last console to take over, maximum value is MAX_NR_CONSOLES -1
- *
- * DESCRIPTION: This function registers a console driver which can later
- * bind to a range of consoles specified by @first and @last. It will
- * also initialize the console driver by calling con_startup().
- */
-int register_con_driver(const struct consw *csw, int first, int last)
+static int do_register_con_driver(const struct consw *csw, int first, int last)
{
struct module *owner = csw->owner;
struct con_driver *con_driver;
const char *desc;
int i, retval = 0;
+ WARN_CONSOLE_UNLOCKED();
+
if (!try_module_get(owner))
return -ENODEV;
- console_lock();
-
for (i = 0; i < MAX_NR_CON_DRIVER; i++) {
con_driver = &registered_con_driver[i];
@@ -3566,10 +3572,29 @@ int register_con_driver(const struct consw *csw, int first, int last)
}
err:
- console_unlock();
module_put(owner);
return retval;
}
+
+/**
+ * register_con_driver - register console driver to console layer
+ * @csw: console driver
+ * @first: the first console to take over, minimum value is 0
+ * @last: the last console to take over, maximum value is MAX_NR_CONSOLES -1
+ *
+ * DESCRIPTION: This function registers a console driver which can later
+ * bind to a range of consoles specified by @first and @last. It will
+ * also initialize the console driver by calling con_startup().
+ */
+int register_con_driver(const struct consw *csw, int first, int last)
+{
+ int retval;
+
+ console_lock();
+ retval = do_register_con_driver(csw, first, last);
+ console_unlock();
+ return retval;
+}
EXPORT_SYMBOL(register_con_driver);
/**
@@ -3585,9 +3610,18 @@ EXPORT_SYMBOL(register_con_driver);
*/
int unregister_con_driver(const struct consw *csw)
{
- int i, retval = -ENODEV;
+ int retval;
console_lock();
+ retval = do_unregister_con_driver(csw);
+ console_unlock();
+ return retval;
+}
+EXPORT_SYMBOL(unregister_con_driver);
+
+int do_unregister_con_driver(const struct consw *csw)
+{
+ int i, retval = -ENODEV;
/* cannot unregister a bound driver */
if (con_is_bound(csw))
@@ -3613,27 +3647,53 @@ int unregister_con_driver(const struct consw *csw)
}
}
err:
- console_unlock();
return retval;
}
-EXPORT_SYMBOL(unregister_con_driver);
+EXPORT_SYMBOL_GPL(do_unregister_con_driver);
/*
* If we support more console drivers, this function is used
* when a driver wants to take over some existing consoles
* and become default driver for newly opened ones.
*
- * take_over_console is basically a register followed by unbind
+ * take_over_console is basically a register followed by unbind
+ */
+int do_take_over_console(const struct consw *csw, int first, int last, int deflt)
+{
+ int err;
+
+ err = do_register_con_driver(csw, first, last);
+ /*
+ * If we get an busy error we still want to bind the console driver
+ * and return success, as we may have unbound the console driver
+ * but not unregistered it.
+ */
+ if (err == -EBUSY)
+ err = 0;
+ if (!err)
+ do_bind_con_driver(csw, first, last, deflt);
+
+ return err;
+}
+EXPORT_SYMBOL_GPL(do_take_over_console);
+
+/*
+ * If we support more console drivers, this function is used
+ * when a driver wants to take over some existing consoles
+ * and become default driver for newly opened ones.
+ *
+ * take_over_console is basically a register followed by unbind
*/
int take_over_console(const struct consw *csw, int first, int last, int deflt)
{
int err;
err = register_con_driver(csw, first, last);
- /* if we get an busy error we still want to bind the console driver
+ /*
+ * If we get an busy error we still want to bind the console driver
* and return success, as we may have unbound the console driver
-  * but not unregistered it.
- */
+ * but not unregistered it.
+ */
if (err == -EBUSY)
err = 0;
if (!err)
diff --git a/drivers/uio/Kconfig b/drivers/uio/Kconfig
index f56d185790ea..e92eeaf251fe 100644
--- a/drivers/uio/Kconfig
+++ b/drivers/uio/Kconfig
@@ -1,6 +1,5 @@
menuconfig UIO
tristate "Userspace I/O drivers"
- depends on !S390
help
Enable this to allow the userspace driver core code to be
built. This code allows userspace programs easy access to
diff --git a/drivers/uio/uio.c b/drivers/uio/uio.c
index 5110f367f1f1..c8b926291e28 100644
--- a/drivers/uio/uio.c
+++ b/drivers/uio/uio.c
@@ -369,26 +369,15 @@ static void uio_dev_del_attributes(struct uio_device *idev)
static int uio_get_minor(struct uio_device *idev)
{
int retval = -ENOMEM;
- int id;
mutex_lock(&minor_lock);
- if (idr_pre_get(&uio_idr, GFP_KERNEL) == 0)
- goto exit;
-
- retval = idr_get_new(&uio_idr, idev, &id);
- if (retval < 0) {
- if (retval == -EAGAIN)
- retval = -ENOMEM;
- goto exit;
- }
- if (id < UIO_MAX_DEVICES) {
- idev->minor = id;
- } else {
+ retval = idr_alloc(&uio_idr, idev, 0, UIO_MAX_DEVICES, GFP_KERNEL);
+ if (retval >= 0) {
+ idev->minor = retval;
+ } else if (retval == -ENOSPC) {
dev_err(idev->dev, "too many uio devices\n");
retval = -EINVAL;
- idr_remove(&uio_idr, id);
}
-exit:
mutex_unlock(&minor_lock);
return retval;
}
diff --git a/drivers/usb/chipidea/debug.c b/drivers/usb/chipidea/debug.c
index 3bc244d2636a..a62c4a47d52c 100644
--- a/drivers/usb/chipidea/debug.c
+++ b/drivers/usb/chipidea/debug.c
@@ -222,7 +222,7 @@ static struct {
} dbg_data = {
.idx = 0,
.tty = 0,
- .lck = __RW_LOCK_UNLOCKED(lck)
+ .lck = __RW_LOCK_UNLOCKED(dbg_data.lck)
};
/**
diff --git a/drivers/usb/class/Kconfig b/drivers/usb/class/Kconfig
index 2519e320098f..316aac8e4ca1 100644
--- a/drivers/usb/class/Kconfig
+++ b/drivers/usb/class/Kconfig
@@ -6,7 +6,7 @@ comment "USB Device Class drivers"
config USB_ACM
tristate "USB Modem (CDC ACM) support"
- depends on USB
+ depends on USB && TTY
---help---
This driver supports USB modems and ISDN adapters which support the
Communication Device Class Abstract Control Model interface.
diff --git a/drivers/usb/class/cdc-acm.c b/drivers/usb/class/cdc-acm.c
index 2d92cce260d7..8ac25adf31b4 100644
--- a/drivers/usb/class/cdc-acm.c
+++ b/drivers/usb/class/cdc-acm.c
@@ -410,19 +410,12 @@ static int acm_submit_read_urbs(struct acm *acm, gfp_t mem_flags)
static void acm_process_read_urb(struct acm *acm, struct urb *urb)
{
- struct tty_struct *tty;
-
if (!urb->actual_length)
return;
- tty = tty_port_tty_get(&acm->port);
- if (!tty)
- return;
-
- tty_insert_flip_string(tty, urb->transfer_buffer, urb->actual_length);
- tty_flip_buffer_push(tty);
-
- tty_kref_put(tty);
+ tty_insert_flip_string(&acm->port, urb->transfer_buffer,
+ urb->actual_length);
+ tty_flip_buffer_push(&acm->port);
}
static void acm_read_bulk_callback(struct urb *urb)
diff --git a/drivers/usb/core/devices.c b/drivers/usb/core/devices.c
index e33224e23770..2a3bbdf7eb94 100644
--- a/drivers/usb/core/devices.c
+++ b/drivers/usb/core/devices.c
@@ -665,7 +665,7 @@ static loff_t usb_device_lseek(struct file *file, loff_t offset, int orig)
{
loff_t ret;
- mutex_lock(&file->f_dentry->d_inode->i_mutex);
+ mutex_lock(&file_inode(file)->i_mutex);
switch (orig) {
case 0:
@@ -681,7 +681,7 @@ static loff_t usb_device_lseek(struct file *file, loff_t offset, int orig)
ret = -EINVAL;
}
- mutex_unlock(&file->f_dentry->d_inode->i_mutex);
+ mutex_unlock(&file_inode(file)->i_mutex);
return ret;
}
diff --git a/drivers/usb/core/devio.c b/drivers/usb/core/devio.c
index 4a863fdbdccd..8823e98989fe 100644
--- a/drivers/usb/core/devio.c
+++ b/drivers/usb/core/devio.c
@@ -161,7 +161,7 @@ static loff_t usbdev_lseek(struct file *file, loff_t offset, int orig)
{
loff_t ret;
- mutex_lock(&file->f_dentry->d_inode->i_mutex);
+ mutex_lock(&file_inode(file)->i_mutex);
switch (orig) {
case 0:
@@ -177,7 +177,7 @@ static loff_t usbdev_lseek(struct file *file, loff_t offset, int orig)
ret = -EINVAL;
}
- mutex_unlock(&file->f_dentry->d_inode->i_mutex);
+ mutex_unlock(&file_inode(file)->i_mutex);
return ret;
}
@@ -1971,7 +1971,7 @@ static long usbdev_do_ioctl(struct file *file, unsigned int cmd,
void __user *p)
{
struct dev_state *ps = file->private_data;
- struct inode *inode = file->f_path.dentry->d_inode;
+ struct inode *inode = file_inode(file);
struct usb_device *dev = ps->dev;
int ret = -ENOTTY;
diff --git a/drivers/usb/core/hub.c b/drivers/usb/core/hub.c
index 1775ad471edd..5480352f984d 100644
--- a/drivers/usb/core/hub.c
+++ b/drivers/usb/core/hub.c
@@ -5177,6 +5177,7 @@ int usb_reset_device(struct usb_device *udev)
{
int ret;
int i;
+ unsigned int noio_flag;
struct usb_host_config *config = udev->actconfig;
if (udev->state == USB_STATE_NOTATTACHED ||
@@ -5186,6 +5187,17 @@ int usb_reset_device(struct usb_device *udev)
return -EINVAL;
}
+ /*
+ * Don't allocate memory with GFP_KERNEL in current
+ * context to avoid possible deadlock if usb mass
+ * storage interface or usbnet interface(iSCSI case)
+ * is included in current configuration. The easist
+ * approach is to do it for every device reset,
+ * because the device 'memalloc_noio' flag may have
+ * not been set before reseting the usb device.
+ */
+ noio_flag = memalloc_noio_save();
+
/* Prevent autosuspend during the reset */
usb_autoresume_device(udev);
@@ -5230,6 +5242,7 @@ int usb_reset_device(struct usb_device *udev)
}
usb_autosuspend_device(udev);
+ memalloc_noio_restore(noio_flag);
return ret;
}
EXPORT_SYMBOL_GPL(usb_reset_device);
diff --git a/drivers/usb/gadget/Kconfig b/drivers/usb/gadget/Kconfig
index c5c6fa60910d..5a0c541daf89 100644
--- a/drivers/usb/gadget/Kconfig
+++ b/drivers/usb/gadget/Kconfig
@@ -762,6 +762,7 @@ config USB_GADGET_TARGET
config USB_G_SERIAL
tristate "Serial Gadget (with CDC ACM and CDC OBEX support)"
+ depends on TTY
select USB_U_SERIAL
select USB_F_ACM
select USB_LIBCOMPOSITE
@@ -813,6 +814,8 @@ config USB_G_PRINTER
For more information, see Documentation/usb/gadget_printer.txt
which includes sample code for accessing the device file.
+if TTY
+
config USB_CDC_COMPOSITE
tristate "CDC Composite Device (Ethernet and ACM)"
depends on NET
@@ -900,6 +903,8 @@ config USB_G_MULTI_CDC
If unsure, say "y".
+endif # TTY
+
config USB_G_HID
tristate "HID Gadget"
select USB_LIBCOMPOSITE
@@ -916,6 +921,7 @@ config USB_G_HID
# Standalone / single function gadgets
config USB_G_DBGP
tristate "EHCI Debug Device Gadget"
+ depends on TTY
select USB_LIBCOMPOSITE
help
This gadget emulates an EHCI Debug device. This is useful when you want
diff --git a/drivers/usb/gadget/atmel_usba_udc.c b/drivers/usb/gadget/atmel_usba_udc.c
index bc19496bcec0..b66130c97269 100644
--- a/drivers/usb/gadget/atmel_usba_udc.c
+++ b/drivers/usb/gadget/atmel_usba_udc.c
@@ -93,7 +93,7 @@ static ssize_t queue_dbg_read(struct file *file, char __user *buf,
if (!access_ok(VERIFY_WRITE, buf, nbytes))
return -EFAULT;
- mutex_lock(&file->f_dentry->d_inode->i_mutex);
+ mutex_lock(&file_inode(file)->i_mutex);
list_for_each_entry_safe(req, tmp_req, queue, queue) {
len = snprintf(tmpbuf, sizeof(tmpbuf),
"%8p %08x %c%c%c %5d %c%c%c\n",
@@ -120,7 +120,7 @@ static ssize_t queue_dbg_read(struct file *file, char __user *buf,
nbytes -= len;
buf += len;
}
- mutex_unlock(&file->f_dentry->d_inode->i_mutex);
+ mutex_unlock(&file_inode(file)->i_mutex);
return actual;
}
@@ -168,13 +168,13 @@ out:
static ssize_t regs_dbg_read(struct file *file, char __user *buf,
size_t nbytes, loff_t *ppos)
{
- struct inode *inode = file->f_dentry->d_inode;
+ struct inode *inode = file_inode(file);
int ret;
mutex_lock(&inode->i_mutex);
ret = simple_read_from_buffer(buf, nbytes, ppos,
file->private_data,
- file->f_dentry->d_inode->i_size);
+ file_inode(file)->i_size);
mutex_unlock(&inode->i_mutex);
return ret;
diff --git a/drivers/usb/gadget/f_mass_storage.c b/drivers/usb/gadget/f_mass_storage.c
index fc5c16ca5e0a..97666e8b1b95 100644
--- a/drivers/usb/gadget/f_mass_storage.c
+++ b/drivers/usb/gadget/f_mass_storage.c
@@ -978,7 +978,7 @@ static int do_synchronize_cache(struct fsg_common *common)
static void invalidate_sub(struct fsg_lun *curlun)
{
struct file *filp = curlun->filp;
- struct inode *inode = filp->f_path.dentry->d_inode;
+ struct inode *inode = file_inode(filp);
unsigned long rc;
rc = invalidate_mapping_pages(inode->i_mapping, 0, -1);
diff --git a/drivers/usb/gadget/printer.c b/drivers/usb/gadget/printer.c
index 35bcc83d1e04..bf7a56b6d48a 100644
--- a/drivers/usb/gadget/printer.c
+++ b/drivers/usb/gadget/printer.c
@@ -688,7 +688,7 @@ static int
printer_fsync(struct file *fd, loff_t start, loff_t end, int datasync)
{
struct printer_dev *dev = fd->private_data;
- struct inode *inode = fd->f_path.dentry->d_inode;
+ struct inode *inode = file_inode(fd);
unsigned long flags;
int tx_list_empty;
diff --git a/drivers/usb/gadget/rndis.c b/drivers/usb/gadget/rndis.c
index e4192b887de9..d9297eebbf73 100644
--- a/drivers/usb/gadget/rndis.c
+++ b/drivers/usb/gadget/rndis.c
@@ -1065,7 +1065,7 @@ static int rndis_proc_show(struct seq_file *m, void *v)
static ssize_t rndis_proc_write(struct file *file, const char __user *buffer,
size_t count, loff_t *ppos)
{
- rndis_params *p = PDE(file->f_path.dentry->d_inode)->data;
+ rndis_params *p = PDE(file_inode(file))->data;
u32 speed = 0;
int i, fl_speed = 0;
diff --git a/drivers/usb/gadget/storage_common.c b/drivers/usb/gadget/storage_common.c
index 4ecbf8496f48..dbce3a9074e6 100644
--- a/drivers/usb/gadget/storage_common.c
+++ b/drivers/usb/gadget/storage_common.c
@@ -440,7 +440,7 @@ static int fsg_lun_open(struct fsg_lun *curlun, const char *filename)
if (!(filp->f_mode & FMODE_WRITE))
ro = 1;
- inode = filp->f_path.dentry->d_inode;
+ inode = file_inode(filp);
if ((!S_ISREG(inode->i_mode) && !S_ISBLK(inode->i_mode))) {
LINFO(curlun, "invalid file type: %s\n", filename);
goto out;
diff --git a/drivers/usb/gadget/u_serial.c b/drivers/usb/gadget/u_serial.c
index 588a9be18ef8..c5034d9c946b 100644
--- a/drivers/usb/gadget/u_serial.c
+++ b/drivers/usb/gadget/u_serial.c
@@ -496,12 +496,8 @@ static void gs_rx_push(unsigned long _port)
req = list_first_entry(queue, struct usb_request, list);
- /* discard data if tty was closed */
- if (!tty)
- goto recycle;
-
/* leave data queued if tty was rx throttled */
- if (test_bit(TTY_THROTTLED, &tty->flags))
+ if (tty && test_bit(TTY_THROTTLED, &tty->flags))
break;
switch (req->status) {
@@ -534,7 +530,8 @@ static void gs_rx_push(unsigned long _port)
size -= n;
}
- count = tty_insert_flip_string(tty, packet, size);
+ count = tty_insert_flip_string(&port->port, packet,
+ size);
if (count)
do_push = true;
if (count != size) {
@@ -547,7 +544,7 @@ static void gs_rx_push(unsigned long _port)
}
port->n_read = 0;
}
-recycle:
+
list_move(&req->list, &port->read_pool);
port->read_started--;
}
@@ -555,8 +552,8 @@ recycle:
/* Push from tty to ldisc; without low_latency set this is handled by
* a workqueue, so we won't get callbacks and can hold port_lock
*/
- if (tty && do_push)
- tty_flip_buffer_push(tty);
+ if (do_push)
+ tty_flip_buffer_push(&port->port);
/* We want our data queue to become empty ASAP, keeping data
diff --git a/drivers/usb/host/ehci-omap.c b/drivers/usb/host/ehci-omap.c
index 99899e808c6a..0555ee42d7cb 100644
--- a/drivers/usb/host/ehci-omap.c
+++ b/drivers/usb/host/ehci-omap.c
@@ -107,7 +107,7 @@ static int omap_ehci_init(struct usb_hcd *hcd)
{
struct ehci_hcd *ehci = hcd_to_ehci(hcd);
int rc;
- struct ehci_hcd_omap_platform_data *pdata;
+ struct usbhs_omap_platform_data *pdata;
pdata = hcd->self.controller->platform_data;
@@ -151,7 +151,7 @@ static int omap_ehci_init(struct usb_hcd *hcd)
}
static void disable_put_regulator(
- struct ehci_hcd_omap_platform_data *pdata)
+ struct usbhs_omap_platform_data *pdata)
{
int i;
@@ -176,7 +176,7 @@ static void disable_put_regulator(
static int ehci_hcd_omap_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
- struct ehci_hcd_omap_platform_data *pdata = dev->platform_data;
+ struct usbhs_omap_platform_data *pdata = dev->platform_data;
struct resource *res;
struct usb_hcd *hcd;
void __iomem *regs;
diff --git a/drivers/usb/host/ehci-tegra.c b/drivers/usb/host/ehci-tegra.c
index acf17556bd87..568aecc7075b 100644
--- a/drivers/usb/host/ehci-tegra.c
+++ b/drivers/usb/host/ehci-tegra.c
@@ -2,7 +2,7 @@
* EHCI-compliant USB host controller driver for NVIDIA Tegra SoCs
*
* Copyright (C) 2010 Google, Inc.
- * Copyright (C) 2009 NVIDIA Corporation
+ * Copyright (C) 2009 - 2013 NVIDIA Corporation
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the
@@ -26,23 +26,28 @@
#include <linux/of.h>
#include <linux/of_gpio.h>
#include <linux/pm_runtime.h>
-
+#include <linux/usb/ehci_def.h>
#include <linux/usb/tegra_usb_phy.h>
#define TEGRA_USB_BASE 0xC5000000
#define TEGRA_USB2_BASE 0xC5004000
#define TEGRA_USB3_BASE 0xC5008000
+/* PORTSC registers */
+#define TEGRA_USB_PORTSC1 0x184
+#define TEGRA_USB_PORTSC1_PTS(x) (((x) & 0x3) << 30)
+#define TEGRA_USB_PORTSC1_PHCD (1 << 23)
+
#define TEGRA_USB_DMA_ALIGN 32
struct tegra_ehci_hcd {
struct ehci_hcd *ehci;
struct tegra_usb_phy *phy;
struct clk *clk;
- struct clk *emc_clk;
struct usb_phy *transceiver;
int host_resumed;
int port_resuming;
+ bool needs_double_reset;
enum tegra_usb_phy_port_speed port_speed;
};
@@ -50,9 +55,8 @@ static void tegra_ehci_power_up(struct usb_hcd *hcd)
{
struct tegra_ehci_hcd *tegra = dev_get_drvdata(hcd->self.controller);
- clk_prepare_enable(tegra->emc_clk);
clk_prepare_enable(tegra->clk);
- usb_phy_set_suspend(&tegra->phy->u_phy, 0);
+ usb_phy_set_suspend(hcd->phy, 0);
tegra->host_resumed = 1;
}
@@ -61,9 +65,8 @@ static void tegra_ehci_power_down(struct usb_hcd *hcd)
struct tegra_ehci_hcd *tegra = dev_get_drvdata(hcd->self.controller);
tegra->host_resumed = 0;
- usb_phy_set_suspend(&tegra->phy->u_phy, 1);
+ usb_phy_set_suspend(hcd->phy, 1);
clk_disable_unprepare(tegra->clk);
- clk_disable_unprepare(tegra->emc_clk);
}
static int tegra_ehci_internal_port_reset(
@@ -156,7 +159,7 @@ static int tegra_ehci_hub_control(
if (tegra->port_resuming && !(temp & PORT_SUSPEND)) {
/* Resume completed, re-enable disconnect detection */
tegra->port_resuming = 0;
- tegra_usb_phy_postresume(tegra->phy);
+ tegra_usb_phy_postresume(hcd->phy);
}
}
@@ -184,7 +187,7 @@ static int tegra_ehci_hub_control(
}
/* For USB1 port we need to issue Port Reset twice internally */
- if (tegra->phy->instance == 0 &&
+ if (tegra->needs_double_reset &&
(typeReq == SetPortFeature && wValue == USB_PORT_FEAT_RESET)) {
spin_unlock_irqrestore(&ehci->lock, flags);
return tegra_ehci_internal_port_reset(ehci, status_reg);
@@ -209,7 +212,7 @@ static int tegra_ehci_hub_control(
goto done;
/* Disable disconnect detection during port resume */
- tegra_usb_phy_preresume(tegra->phy);
+ tegra_usb_phy_preresume(hcd->phy);
ehci->reset_done[wIndex-1] = jiffies + msecs_to_jiffies(25);
@@ -473,7 +476,7 @@ static int controller_resume(struct device *dev)
}
/* Force the phy to keep data lines in suspend state */
- tegra_ehci_phy_restore_start(tegra->phy, tegra->port_speed);
+ tegra_ehci_phy_restore_start(hcd->phy, tegra->port_speed);
/* Enable host mode */
tdi_reset(ehci);
@@ -540,17 +543,17 @@ static int controller_resume(struct device *dev)
}
}
- tegra_ehci_phy_restore_end(tegra->phy);
+ tegra_ehci_phy_restore_end(hcd->phy);
goto done;
restart:
if (tegra->port_speed <= TEGRA_USB_PHY_PORT_SPEED_HIGH)
- tegra_ehci_phy_restore_end(tegra->phy);
+ tegra_ehci_phy_restore_end(hcd->phy);
tegra_ehci_restart(hcd);
done:
- tegra_usb_phy_preresume(tegra->phy);
+ tegra_usb_phy_preresume(hcd->phy);
tegra->port_resuming = 1;
return 0;
}
@@ -604,6 +607,37 @@ static const struct dev_pm_ops tegra_ehci_pm_ops = {
#endif
+/* Bits of PORTSC1, which will get cleared by writing 1 into them */
+#define TEGRA_PORTSC1_RWC_BITS (PORT_CSC | PORT_PEC | PORT_OCC)
+
+void tegra_ehci_set_pts(struct usb_phy *x, u8 pts_val)
+{
+ unsigned long val;
+ struct usb_hcd *hcd = bus_to_hcd(x->otg->host);
+ void __iomem *base = hcd->regs;
+
+ val = readl(base + TEGRA_USB_PORTSC1) & ~TEGRA_PORTSC1_RWC_BITS;
+ val &= ~TEGRA_USB_PORTSC1_PTS(3);
+ val |= TEGRA_USB_PORTSC1_PTS(pts_val & 3);
+ writel(val, base + TEGRA_USB_PORTSC1);
+}
+EXPORT_SYMBOL_GPL(tegra_ehci_set_pts);
+
+void tegra_ehci_set_phcd(struct usb_phy *x, bool enable)
+{
+ unsigned long val;
+ struct usb_hcd *hcd = bus_to_hcd(x->otg->host);
+ void __iomem *base = hcd->regs;
+
+ val = readl(base + TEGRA_USB_PORTSC1) & ~TEGRA_PORTSC1_RWC_BITS;
+ if (enable)
+ val |= TEGRA_USB_PORTSC1_PHCD;
+ else
+ val &= ~TEGRA_USB_PORTSC1_PHCD;
+ writel(val, base + TEGRA_USB_PORTSC1);
+}
+EXPORT_SYMBOL_GPL(tegra_ehci_set_phcd);
+
static u64 tegra_ehci_dma_mask = DMA_BIT_MASK(32);
static int tegra_ehci_probe(struct platform_device *pdev)
@@ -615,6 +649,7 @@ static int tegra_ehci_probe(struct platform_device *pdev)
int err = 0;
int irq;
int instance = pdev->id;
+ struct usb_phy *u_phy;
pdata = pdev->dev.platform_data;
if (!pdata) {
@@ -656,15 +691,8 @@ static int tegra_ehci_probe(struct platform_device *pdev)
if (err)
goto fail_clk;
- tegra->emc_clk = devm_clk_get(&pdev->dev, "emc");
- if (IS_ERR(tegra->emc_clk)) {
- dev_err(&pdev->dev, "Can't get emc clock\n");
- err = PTR_ERR(tegra->emc_clk);
- goto fail_emc_clk;
- }
-
- clk_prepare_enable(tegra->emc_clk);
- clk_set_rate(tegra->emc_clk, 400000000);
+ tegra->needs_double_reset = of_property_read_bool(pdev->dev.of_node,
+ "nvidia,needs-double-reset");
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
if (!res) {
@@ -712,9 +740,19 @@ static int tegra_ehci_probe(struct platform_device *pdev)
goto fail_io;
}
- usb_phy_init(&tegra->phy->u_phy);
+ hcd->phy = u_phy = &tegra->phy->u_phy;
+ usb_phy_init(hcd->phy);
+
+ u_phy->otg = devm_kzalloc(&pdev->dev, sizeof(struct usb_otg),
+ GFP_KERNEL);
+ if (!u_phy->otg) {
+ dev_err(&pdev->dev, "Failed to alloc memory for otg\n");
+ err = -ENOMEM;
+ goto fail_io;
+ }
+ u_phy->otg->host = hcd_to_bus(hcd);
- err = usb_phy_set_suspend(&tegra->phy->u_phy, 0);
+ err = usb_phy_set_suspend(hcd->phy, 0);
if (err) {
dev_err(&pdev->dev, "Failed to power on the phy\n");
goto fail;
@@ -760,10 +798,8 @@ fail:
if (!IS_ERR_OR_NULL(tegra->transceiver))
otg_set_host(tegra->transceiver->otg, NULL);
#endif
- usb_phy_shutdown(&tegra->phy->u_phy);
+ usb_phy_shutdown(hcd->phy);
fail_io:
- clk_disable_unprepare(tegra->emc_clk);
-fail_emc_clk:
clk_disable_unprepare(tegra->clk);
fail_clk:
usb_put_hcd(hcd);
@@ -784,15 +820,12 @@ static int tegra_ehci_remove(struct platform_device *pdev)
otg_set_host(tegra->transceiver->otg, NULL);
#endif
+ usb_phy_shutdown(hcd->phy);
usb_remove_hcd(hcd);
usb_put_hcd(hcd);
- usb_phy_shutdown(&tegra->phy->u_phy);
-
clk_disable_unprepare(tegra->clk);
- clk_disable_unprepare(tegra->emc_clk);
-
return 0;
}
diff --git a/drivers/usb/phy/tegra_usb_phy.c b/drivers/usb/phy/tegra_usb_phy.c
index 9d13c81754e0..5487d38481af 100644
--- a/drivers/usb/phy/tegra_usb_phy.c
+++ b/drivers/usb/phy/tegra_usb_phy.c
@@ -24,6 +24,7 @@
#include <linux/platform_device.h>
#include <linux/io.h>
#include <linux/gpio.h>
+#include <linux/of.h>
#include <linux/of_gpio.h>
#include <linux/usb/otg.h>
#include <linux/usb/ulpi.h>
@@ -35,19 +36,6 @@
#define ULPI_VIEWPORT 0x170
-#define USB_PORTSC1 0x184
-#define USB_PORTSC1_PTS(x) (((x) & 0x3) << 30)
-#define USB_PORTSC1_PSPD(x) (((x) & 0x3) << 26)
-#define USB_PORTSC1_PHCD (1 << 23)
-#define USB_PORTSC1_WKOC (1 << 22)
-#define USB_PORTSC1_WKDS (1 << 21)
-#define USB_PORTSC1_WKCN (1 << 20)
-#define USB_PORTSC1_PTC(x) (((x) & 0xf) << 16)
-#define USB_PORTSC1_PP (1 << 12)
-#define USB_PORTSC1_SUSP (1 << 7)
-#define USB_PORTSC1_PE (1 << 2)
-#define USB_PORTSC1_CCS (1 << 0)
-
#define USB_SUSP_CTRL 0x400
#define USB_WAKE_ON_CNNT_EN_DEV (1 << 3)
#define USB_WAKE_ON_DISCON_EN_DEV (1 << 4)
@@ -208,11 +196,6 @@ static struct tegra_utmip_config utmip_default[] = {
},
};
-static inline bool phy_is_ulpi(struct tegra_usb_phy *phy)
-{
- return (phy->instance == 1);
-}
-
static int utmip_pad_open(struct tegra_usb_phy *phy)
{
phy->pad_clk = clk_get_sys("utmip-pad", NULL);
@@ -221,7 +204,7 @@ static int utmip_pad_open(struct tegra_usb_phy *phy)
return PTR_ERR(phy->pad_clk);
}
- if (phy->instance == 0) {
+ if (phy->is_legacy_phy) {
phy->pad_regs = phy->regs;
} else {
phy->pad_regs = ioremap(TEGRA_USB_BASE, TEGRA_USB_SIZE);
@@ -236,7 +219,7 @@ static int utmip_pad_open(struct tegra_usb_phy *phy)
static void utmip_pad_close(struct tegra_usb_phy *phy)
{
- if (phy->instance != 0)
+ if (!phy->is_legacy_phy)
iounmap(phy->pad_regs);
clk_put(phy->pad_clk);
}
@@ -305,7 +288,7 @@ static void utmi_phy_clk_disable(struct tegra_usb_phy *phy)
unsigned long val;
void __iomem *base = phy->regs;
- if (phy->instance == 0) {
+ if (phy->is_legacy_phy) {
val = readl(base + USB_SUSP_CTRL);
val |= USB_SUSP_SET;
writel(val, base + USB_SUSP_CTRL);
@@ -315,13 +298,8 @@ static void utmi_phy_clk_disable(struct tegra_usb_phy *phy)
val = readl(base + USB_SUSP_CTRL);
val &= ~USB_SUSP_SET;
writel(val, base + USB_SUSP_CTRL);
- }
-
- if (phy->instance == 2) {
- val = readl(base + USB_PORTSC1);
- val |= USB_PORTSC1_PHCD;
- writel(val, base + USB_PORTSC1);
- }
+ } else
+ tegra_ehci_set_phcd(&phy->u_phy, true);
if (utmi_wait_register(base + USB_SUSP_CTRL, USB_PHY_CLK_VALID, 0) < 0)
pr_err("%s: timeout waiting for phy to stabilize\n", __func__);
@@ -332,7 +310,7 @@ static void utmi_phy_clk_enable(struct tegra_usb_phy *phy)
unsigned long val;
void __iomem *base = phy->regs;
- if (phy->instance == 0) {
+ if (phy->is_legacy_phy) {
val = readl(base + USB_SUSP_CTRL);
val |= USB_SUSP_CLR;
writel(val, base + USB_SUSP_CTRL);
@@ -342,13 +320,8 @@ static void utmi_phy_clk_enable(struct tegra_usb_phy *phy)
val = readl(base + USB_SUSP_CTRL);
val &= ~USB_SUSP_CLR;
writel(val, base + USB_SUSP_CTRL);
- }
-
- if (phy->instance == 2) {
- val = readl(base + USB_PORTSC1);
- val &= ~USB_PORTSC1_PHCD;
- writel(val, base + USB_PORTSC1);
- }
+ } else
+ tegra_ehci_set_phcd(&phy->u_phy, false);
if (utmi_wait_register(base + USB_SUSP_CTRL, USB_PHY_CLK_VALID,
USB_PHY_CLK_VALID))
@@ -365,7 +338,7 @@ static int utmi_phy_power_on(struct tegra_usb_phy *phy)
val |= UTMIP_RESET;
writel(val, base + USB_SUSP_CTRL);
- if (phy->instance == 0) {
+ if (phy->is_legacy_phy) {
val = readl(base + USB1_LEGACY_CTRL);
val |= USB1_NO_LEGACY_MODE;
writel(val, base + USB1_LEGACY_CTRL);
@@ -440,16 +413,14 @@ static int utmi_phy_power_on(struct tegra_usb_phy *phy)
val |= UTMIP_BIAS_PDTRK_COUNT(0x5);
writel(val, base + UTMIP_BIAS_CFG1);
- if (phy->instance == 0) {
+ if (phy->is_legacy_phy) {
val = readl(base + UTMIP_SPARE_CFG0);
if (phy->mode == TEGRA_USB_PHY_MODE_DEVICE)
val &= ~FUSE_SETUP_SEL;
else
val |= FUSE_SETUP_SEL;
writel(val, base + UTMIP_SPARE_CFG0);
- }
-
- if (phy->instance == 2) {
+ } else {
val = readl(base + USB_SUSP_CTRL);
val |= UTMIP_PHY_ENABLE;
writel(val, base + USB_SUSP_CTRL);
@@ -459,7 +430,7 @@ static int utmi_phy_power_on(struct tegra_usb_phy *phy)
val &= ~UTMIP_RESET;
writel(val, base + USB_SUSP_CTRL);
- if (phy->instance == 0) {
+ if (phy->is_legacy_phy) {
val = readl(base + USB1_LEGACY_CTRL);
val &= ~USB1_VBUS_SENSE_CTL_MASK;
val |= USB1_VBUS_SENSE_CTL_A_SESS_VLD;
@@ -472,11 +443,8 @@ static int utmi_phy_power_on(struct tegra_usb_phy *phy)
utmi_phy_clk_enable(phy);
- if (phy->instance == 2) {
- val = readl(base + USB_PORTSC1);
- val &= ~USB_PORTSC1_PTS(~0);
- writel(val, base + USB_PORTSC1);
- }
+ if (!phy->is_legacy_phy)
+ tegra_ehci_set_pts(&phy->u_phy, 0);
return 0;
}
@@ -621,10 +589,6 @@ static int ulpi_phy_power_on(struct tegra_usb_phy *phy)
return ret;
}
- val = readl(base + USB_PORTSC1);
- val |= USB_PORTSC1_WKOC | USB_PORTSC1_WKDS | USB_PORTSC1_WKCN;
- writel(val, base + USB_PORTSC1);
-
val = readl(base + USB_SUSP_CTRL);
val |= USB_SUSP_CLR;
writel(val, base + USB_SUSP_CTRL);
@@ -639,17 +603,8 @@ static int ulpi_phy_power_on(struct tegra_usb_phy *phy)
static int ulpi_phy_power_off(struct tegra_usb_phy *phy)
{
- unsigned long val;
- void __iomem *base = phy->regs;
struct tegra_ulpi_config *config = phy->config;
- /* Clear WKCN/WKDS/WKOC wake-on events that can cause the USB
- * Controller to immediately bring the ULPI PHY out of low power
- */
- val = readl(base + USB_PORTSC1);
- val &= ~(USB_PORTSC1_WKOC | USB_PORTSC1_WKDS | USB_PORTSC1_WKCN);
- writel(val, base + USB_PORTSC1);
-
clk_disable(phy->clk);
return gpio_direction_output(config->reset_gpio, 0);
}
@@ -660,7 +615,7 @@ static int tegra_phy_init(struct usb_phy *x)
struct tegra_ulpi_config *ulpi_config;
int err;
- if (phy_is_ulpi(phy)) {
+ if (phy->is_ulpi_phy) {
ulpi_config = phy->config;
phy->clk = clk_get_sys(NULL, ulpi_config->clk);
if (IS_ERR(phy->clk)) {
@@ -698,7 +653,7 @@ static void tegra_usb_phy_close(struct usb_phy *x)
{
struct tegra_usb_phy *phy = container_of(x, struct tegra_usb_phy, u_phy);
- if (phy_is_ulpi(phy))
+ if (phy->is_ulpi_phy)
clk_put(phy->clk);
else
utmip_pad_close(phy);
@@ -709,7 +664,7 @@ static void tegra_usb_phy_close(struct usb_phy *x)
static int tegra_usb_phy_power_on(struct tegra_usb_phy *phy)
{
- if (phy_is_ulpi(phy))
+ if (phy->is_ulpi_phy)
return ulpi_phy_power_on(phy);
else
return utmi_phy_power_on(phy);
@@ -717,7 +672,7 @@ static int tegra_usb_phy_power_on(struct tegra_usb_phy *phy)
static int tegra_usb_phy_power_off(struct tegra_usb_phy *phy)
{
- if (phy_is_ulpi(phy))
+ if (phy->is_ulpi_phy)
return ulpi_phy_power_off(phy);
else
return utmi_phy_power_off(phy);
@@ -739,8 +694,9 @@ struct tegra_usb_phy *tegra_usb_phy_open(struct device *dev, int instance,
unsigned long parent_rate;
int i;
int err;
+ struct device_node *np = dev->of_node;
- phy = kmalloc(sizeof(struct tegra_usb_phy), GFP_KERNEL);
+ phy = kzalloc(sizeof(struct tegra_usb_phy), GFP_KERNEL);
if (!phy)
return ERR_PTR(-ENOMEM);
@@ -749,9 +705,16 @@ struct tegra_usb_phy *tegra_usb_phy_open(struct device *dev, int instance,
phy->config = config;
phy->mode = phy_mode;
phy->dev = dev;
+ phy->is_legacy_phy =
+ of_property_read_bool(np, "nvidia,has-legacy-mode");
+ err = of_property_match_string(np, "phy_type", "ulpi");
+ if (err < 0)
+ phy->is_ulpi_phy = false;
+ else
+ phy->is_ulpi_phy = true;
if (!phy->config) {
- if (phy_is_ulpi(phy)) {
+ if (phy->is_ulpi_phy) {
pr_err("%s: ulpi phy configuration missing", __func__);
err = -EINVAL;
goto err0;
@@ -796,45 +759,40 @@ err0:
}
EXPORT_SYMBOL_GPL(tegra_usb_phy_open);
-void tegra_usb_phy_preresume(struct tegra_usb_phy *phy)
+void tegra_usb_phy_preresume(struct usb_phy *x)
{
- if (!phy_is_ulpi(phy))
+ struct tegra_usb_phy *phy = container_of(x, struct tegra_usb_phy, u_phy);
+
+ if (!phy->is_ulpi_phy)
utmi_phy_preresume(phy);
}
EXPORT_SYMBOL_GPL(tegra_usb_phy_preresume);
-void tegra_usb_phy_postresume(struct tegra_usb_phy *phy)
+void tegra_usb_phy_postresume(struct usb_phy *x)
{
- if (!phy_is_ulpi(phy))
+ struct tegra_usb_phy *phy = container_of(x, struct tegra_usb_phy, u_phy);
+
+ if (!phy->is_ulpi_phy)
utmi_phy_postresume(phy);
}
EXPORT_SYMBOL_GPL(tegra_usb_phy_postresume);
-void tegra_ehci_phy_restore_start(struct tegra_usb_phy *phy,
+void tegra_ehci_phy_restore_start(struct usb_phy *x,
enum tegra_usb_phy_port_speed port_speed)
{
- if (!phy_is_ulpi(phy))
+ struct tegra_usb_phy *phy = container_of(x, struct tegra_usb_phy, u_phy);
+
+ if (!phy->is_ulpi_phy)
utmi_phy_restore_start(phy, port_speed);
}
EXPORT_SYMBOL_GPL(tegra_ehci_phy_restore_start);
-void tegra_ehci_phy_restore_end(struct tegra_usb_phy *phy)
+void tegra_ehci_phy_restore_end(struct usb_phy *x)
{
- if (!phy_is_ulpi(phy))
+ struct tegra_usb_phy *phy = container_of(x, struct tegra_usb_phy, u_phy);
+
+ if (!phy->is_ulpi_phy)
utmi_phy_restore_end(phy);
}
EXPORT_SYMBOL_GPL(tegra_ehci_phy_restore_end);
-void tegra_usb_phy_clk_disable(struct tegra_usb_phy *phy)
-{
- if (!phy_is_ulpi(phy))
- utmi_phy_clk_disable(phy);
-}
-EXPORT_SYMBOL_GPL(tegra_usb_phy_clk_disable);
-
-void tegra_usb_phy_clk_enable(struct tegra_usb_phy *phy)
-{
- if (!phy_is_ulpi(phy))
- utmi_phy_clk_enable(phy);
-}
-EXPORT_SYMBOL_GPL(tegra_usb_phy_clk_enable);
diff --git a/drivers/usb/serial/Kconfig b/drivers/usb/serial/Kconfig
index dad8363e5b2a..17b7f9ae36ad 100644
--- a/drivers/usb/serial/Kconfig
+++ b/drivers/usb/serial/Kconfig
@@ -4,7 +4,7 @@
menuconfig USB_SERIAL
tristate "USB Serial Converter support"
- depends on USB
+ depends on USB && TTY
---help---
Say Y here if you have a USB device that provides normal serial
ports, or acts like a serial device, and you want to connect it to
diff --git a/drivers/usb/serial/aircable.c b/drivers/usb/serial/aircable.c
index 6d110a3bc7e7..6e320cec397d 100644
--- a/drivers/usb/serial/aircable.c
+++ b/drivers/usb/serial/aircable.c
@@ -119,9 +119,8 @@ static int aircable_probe(struct usb_serial *serial,
return 0;
}
-static int aircable_process_packet(struct tty_struct *tty,
- struct usb_serial_port *port, int has_headers,
- char *packet, int len)
+static int aircable_process_packet(struct usb_serial_port *port,
+ int has_headers, char *packet, int len)
{
if (has_headers) {
len -= HCI_HEADER_LENGTH;
@@ -132,7 +131,7 @@ static int aircable_process_packet(struct tty_struct *tty,
return 0;
}
- tty_insert_flip_string(tty, packet, len);
+ tty_insert_flip_string(&port->port, packet, len);
return len;
}
@@ -141,28 +140,22 @@ static void aircable_process_read_urb(struct urb *urb)
{
struct usb_serial_port *port = urb->context;
char *data = (char *)urb->transfer_buffer;
- struct tty_struct *tty;
int has_headers;
int count;
int len;
int i;
- tty = tty_port_tty_get(&port->port);
- if (!tty)
- return;
-
has_headers = (urb->actual_length > 2 && data[0] == RX_HEADER_0);
count = 0;
for (i = 0; i < urb->actual_length; i += HCI_COMPLETE_FRAME) {
len = min_t(int, urb->actual_length - i, HCI_COMPLETE_FRAME);
- count += aircable_process_packet(tty, port, has_headers,
+ count += aircable_process_packet(port, has_headers,
&data[i], len);
}
if (count)
- tty_flip_buffer_push(tty);
- tty_kref_put(tty);
+ tty_flip_buffer_push(&port->port);
}
static struct usb_serial_driver aircable_device = {
diff --git a/drivers/usb/serial/ark3116.c b/drivers/usb/serial/ark3116.c
index a88882c0e237..cbd904b8fba5 100644
--- a/drivers/usb/serial/ark3116.c
+++ b/drivers/usb/serial/ark3116.c
@@ -674,7 +674,6 @@ static void ark3116_process_read_urb(struct urb *urb)
{
struct usb_serial_port *port = urb->context;
struct ark3116_private *priv = usb_get_serial_port_data(port);
- struct tty_struct *tty;
unsigned char *data = urb->transfer_buffer;
char tty_flag = TTY_NORMAL;
unsigned long flags;
@@ -689,10 +688,6 @@ static void ark3116_process_read_urb(struct urb *urb)
if (!urb->actual_length)
return;
- tty = tty_port_tty_get(&port->port);
- if (!tty)
- return;
-
if (lsr & UART_LSR_BRK_ERROR_BITS) {
if (lsr & UART_LSR_BI)
tty_flag = TTY_BREAK;
@@ -703,12 +698,11 @@ static void ark3116_process_read_urb(struct urb *urb)
/* overrun is special, not associated with a char */
if (lsr & UART_LSR_OE)
- tty_insert_flip_char(tty, 0, TTY_OVERRUN);
+ tty_insert_flip_char(&port->port, 0, TTY_OVERRUN);
}
- tty_insert_flip_string_fixed_flag(tty, data, tty_flag,
+ tty_insert_flip_string_fixed_flag(&port->port, data, tty_flag,
urb->actual_length);
- tty_flip_buffer_push(tty);
- tty_kref_put(tty);
+ tty_flip_buffer_push(&port->port);
}
static struct usb_serial_driver ark3116_device = {
diff --git a/drivers/usb/serial/belkin_sa.c b/drivers/usb/serial/belkin_sa.c
index b72a4c166705..84217e78ded4 100644
--- a/drivers/usb/serial/belkin_sa.c
+++ b/drivers/usb/serial/belkin_sa.c
@@ -242,7 +242,6 @@ static void belkin_sa_process_read_urb(struct urb *urb)
{
struct usb_serial_port *port = urb->context;
struct belkin_sa_private *priv = usb_get_serial_port_data(port);
- struct tty_struct *tty;
unsigned char *data = urb->transfer_buffer;
unsigned long flags;
unsigned char status;
@@ -259,10 +258,6 @@ static void belkin_sa_process_read_urb(struct urb *urb)
if (!urb->actual_length)
return;
- tty = tty_port_tty_get(&port->port);
- if (!tty)
- return;
-
if (status & BELKIN_SA_LSR_ERR) {
/* Break takes precedence over parity, which takes precedence
* over framing errors. */
@@ -276,13 +271,12 @@ static void belkin_sa_process_read_urb(struct urb *urb)
/* Overrun is special, not associated with a char. */
if (status & BELKIN_SA_LSR_OE)
- tty_insert_flip_char(tty, 0, TTY_OVERRUN);
+ tty_insert_flip_char(&port->port, 0, TTY_OVERRUN);
}
- tty_insert_flip_string_fixed_flag(tty, data, tty_flag,
+ tty_insert_flip_string_fixed_flag(&port->port, data, tty_flag,
urb->actual_length);
- tty_flip_buffer_push(tty);
- tty_kref_put(tty);
+ tty_flip_buffer_push(&port->port);
}
static void belkin_sa_set_termios(struct tty_struct *tty,
diff --git a/drivers/usb/serial/cyberjack.c b/drivers/usb/serial/cyberjack.c
index 69a4fa1cee25..629bd2894506 100644
--- a/drivers/usb/serial/cyberjack.c
+++ b/drivers/usb/serial/cyberjack.c
@@ -324,7 +324,6 @@ static void cyberjack_read_bulk_callback(struct urb *urb)
struct usb_serial_port *port = urb->context;
struct cyberjack_private *priv = usb_get_serial_port_data(port);
struct device *dev = &port->dev;
- struct tty_struct *tty;
unsigned char *data = urb->transfer_buffer;
short todo;
int result;
@@ -337,16 +336,10 @@ static void cyberjack_read_bulk_callback(struct urb *urb)
return;
}
- tty = tty_port_tty_get(&port->port);
- if (!tty) {
- dev_dbg(dev, "%s - ignoring since device not open\n", __func__);
- return;
- }
if (urb->actual_length) {
- tty_insert_flip_string(tty, data, urb->actual_length);
- tty_flip_buffer_push(tty);
+ tty_insert_flip_string(&port->port, data, urb->actual_length);
+ tty_flip_buffer_push(&port->port);
}
- tty_kref_put(tty);
spin_lock(&priv->lock);
diff --git a/drivers/usb/serial/cypress_m8.c b/drivers/usb/serial/cypress_m8.c
index fd8c35fd452e..8efa19d0e9fb 100644
--- a/drivers/usb/serial/cypress_m8.c
+++ b/drivers/usb/serial/cypress_m8.c
@@ -1214,10 +1214,10 @@ static void cypress_read_int_callback(struct urb *urb)
spin_unlock_irqrestore(&priv->lock, flags);
/* process read if there is data other than line status */
- if (tty && bytes > i) {
- tty_insert_flip_string_fixed_flag(tty, data + i,
+ if (bytes > i) {
+ tty_insert_flip_string_fixed_flag(&port->port, data + i,
tty_flag, bytes - i);
- tty_flip_buffer_push(tty);
+ tty_flip_buffer_push(&port->port);
}
spin_lock_irqsave(&priv->lock, flags);
diff --git a/drivers/usb/serial/digi_acceleport.c b/drivers/usb/serial/digi_acceleport.c
index 45d4af62967f..ebe45fa0ed50 100644
--- a/drivers/usb/serial/digi_acceleport.c
+++ b/drivers/usb/serial/digi_acceleport.c
@@ -1399,9 +1399,7 @@ static void digi_read_bulk_callback(struct urb *urb)
static int digi_read_inb_callback(struct urb *urb)
{
-
struct usb_serial_port *port = urb->context;
- struct tty_struct *tty;
struct digi_port *priv = usb_get_serial_port_data(port);
int opcode = ((unsigned char *)urb->transfer_buffer)[0];
int len = ((unsigned char *)urb->transfer_buffer)[1];
@@ -1425,7 +1423,6 @@ static int digi_read_inb_callback(struct urb *urb)
return -1;
}
- tty = tty_port_tty_get(&port->port);
spin_lock(&priv->dp_port_lock);
/* check for throttle; if set, do not resubmit read urb */
@@ -1435,13 +1432,13 @@ static int digi_read_inb_callback(struct urb *urb)
priv->dp_throttle_restart = 1;
/* receive data */
- if (tty && opcode == DIGI_CMD_RECEIVE_DATA) {
+ if (opcode == DIGI_CMD_RECEIVE_DATA) {
/* get flag from port_status */
flag = 0;
/* overrun is special, not associated with a char */
if (port_status & DIGI_OVERRUN_ERROR)
- tty_insert_flip_char(tty, 0, TTY_OVERRUN);
+ tty_insert_flip_char(&port->port, 0, TTY_OVERRUN);
/* break takes precedence over parity, */
/* which takes precedence over framing errors */
@@ -1455,13 +1452,12 @@ static int digi_read_inb_callback(struct urb *urb)
/* data length is len-1 (one byte of len is port_status) */
--len;
if (len > 0) {
- tty_insert_flip_string_fixed_flag(tty, data, flag,
- len);
- tty_flip_buffer_push(tty);
+ tty_insert_flip_string_fixed_flag(&port->port, data,
+ flag, len);
+ tty_flip_buffer_push(&port->port);
}
}
spin_unlock(&priv->dp_port_lock);
- tty_kref_put(tty);
if (opcode == DIGI_CMD_RECEIVE_DISABLE)
dev_dbg(&port->dev, "%s: got RECEIVE_DISABLE\n", __func__);
diff --git a/drivers/usb/serial/f81232.c b/drivers/usb/serial/f81232.c
index 6e4eb57d0177..b1b2dc64b50b 100644
--- a/drivers/usb/serial/f81232.c
+++ b/drivers/usb/serial/f81232.c
@@ -100,7 +100,6 @@ static void f81232_process_read_urb(struct urb *urb)
{
struct usb_serial_port *port = urb->context;
struct f81232_private *priv = usb_get_serial_port_data(port);
- struct tty_struct *tty;
unsigned char *data = urb->transfer_buffer;
char tty_flag = TTY_NORMAL;
unsigned long flags;
@@ -117,10 +116,6 @@ static void f81232_process_read_urb(struct urb *urb)
if (!urb->actual_length)
return;
- tty = tty_port_tty_get(&port->port);
- if (!tty)
- return;
-
/* break takes precedence over parity, */
/* which takes precedence over framing errors */
if (line_status & UART_BREAK_ERROR)
@@ -133,19 +128,19 @@ static void f81232_process_read_urb(struct urb *urb)
/* overrun is special, not associated with a char */
if (line_status & UART_OVERRUN_ERROR)
- tty_insert_flip_char(tty, 0, TTY_OVERRUN);
+ tty_insert_flip_char(&port->port, 0, TTY_OVERRUN);
if (port->port.console && port->sysrq) {
for (i = 0; i < urb->actual_length; ++i)
if (!usb_serial_handle_sysrq_char(port, data[i]))
- tty_insert_flip_char(tty, data[i], tty_flag);
+ tty_insert_flip_char(&port->port, data[i],
+ tty_flag);
} else {
- tty_insert_flip_string_fixed_flag(tty, data, tty_flag,
+ tty_insert_flip_string_fixed_flag(&port->port, data, tty_flag,
urb->actual_length);
}
- tty_flip_buffer_push(tty);
- tty_kref_put(tty);
+ tty_flip_buffer_push(&port->port);
}
static int set_control_lines(struct usb_device *dev, u8 value)
diff --git a/drivers/usb/serial/ftdi_sio.c b/drivers/usb/serial/ftdi_sio.c
index d07fccf3bab5..edd162df49ca 100644
--- a/drivers/usb/serial/ftdi_sio.c
+++ b/drivers/usb/serial/ftdi_sio.c
@@ -1960,9 +1960,8 @@ static int ftdi_prepare_write_buffer(struct usb_serial_port *port,
#define FTDI_RS_ERR_MASK (FTDI_RS_BI | FTDI_RS_PE | FTDI_RS_FE | FTDI_RS_OE)
-static int ftdi_process_packet(struct tty_struct *tty,
- struct usb_serial_port *port, struct ftdi_private *priv,
- char *packet, int len)
+static int ftdi_process_packet(struct usb_serial_port *port,
+ struct ftdi_private *priv, char *packet, int len)
{
int i;
char status;
@@ -2012,7 +2011,7 @@ static int ftdi_process_packet(struct tty_struct *tty,
/* Overrun is special, not associated with a char */
if (packet[1] & FTDI_RS_OE) {
priv->icount.overrun++;
- tty_insert_flip_char(tty, 0, TTY_OVERRUN);
+ tty_insert_flip_char(&port->port, 0, TTY_OVERRUN);
}
}
@@ -2031,10 +2030,10 @@ static int ftdi_process_packet(struct tty_struct *tty,
if (port->port.console && port->sysrq) {
for (i = 0; i < len; i++, ch++) {
if (!usb_serial_handle_sysrq_char(port, *ch))
- tty_insert_flip_char(tty, *ch, flag);
+ tty_insert_flip_char(&port->port, *ch, flag);
}
} else {
- tty_insert_flip_string_fixed_flag(tty, ch, flag, len);
+ tty_insert_flip_string_fixed_flag(&port->port, ch, flag, len);
}
return len;
@@ -2043,25 +2042,19 @@ static int ftdi_process_packet(struct tty_struct *tty,
static void ftdi_process_read_urb(struct urb *urb)
{
struct usb_serial_port *port = urb->context;
- struct tty_struct *tty;
struct ftdi_private *priv = usb_get_serial_port_data(port);
char *data = (char *)urb->transfer_buffer;
int i;
int len;
int count = 0;
- tty = tty_port_tty_get(&port->port);
- if (!tty)
- return;
-
for (i = 0; i < urb->actual_length; i += priv->max_packet_size) {
len = min_t(int, urb->actual_length - i, priv->max_packet_size);
- count += ftdi_process_packet(tty, port, priv, &data[i], len);
+ count += ftdi_process_packet(port, priv, &data[i], len);
}
if (count)
- tty_flip_buffer_push(tty);
- tty_kref_put(tty);
+ tty_flip_buffer_push(&port->port);
}
static void ftdi_break_ctl(struct tty_struct *tty, int break_state)
diff --git a/drivers/usb/serial/garmin_gps.c b/drivers/usb/serial/garmin_gps.c
index 203358d7e7bc..1a07b12ef341 100644
--- a/drivers/usb/serial/garmin_gps.c
+++ b/drivers/usb/serial/garmin_gps.c
@@ -252,14 +252,11 @@ static inline int isAbortTrfCmnd(const unsigned char *buf)
static void send_to_tty(struct usb_serial_port *port,
char *data, unsigned int actual_length)
{
- struct tty_struct *tty = tty_port_tty_get(&port->port);
-
- if (tty && actual_length) {
+ if (actual_length) {
usb_serial_debug_data(&port->dev, __func__, actual_length, data);
- tty_insert_flip_string(tty, data, actual_length);
- tty_flip_buffer_push(tty);
+ tty_insert_flip_string(&port->port, data, actual_length);
+ tty_flip_buffer_push(&port->port);
}
- tty_kref_put(tty);
}
diff --git a/drivers/usb/serial/generic.c b/drivers/usb/serial/generic.c
index 2ea70a631996..4c5c23f1cae5 100644
--- a/drivers/usb/serial/generic.c
+++ b/drivers/usb/serial/generic.c
@@ -313,30 +313,24 @@ EXPORT_SYMBOL_GPL(usb_serial_generic_submit_read_urbs);
void usb_serial_generic_process_read_urb(struct urb *urb)
{
struct usb_serial_port *port = urb->context;
- struct tty_struct *tty;
char *ch = (char *)urb->transfer_buffer;
int i;
if (!urb->actual_length)
return;
- tty = tty_port_tty_get(&port->port);
- if (!tty)
- return;
-
/* The per character mucking around with sysrq path it too slow for
stuff like 3G modems, so shortcircuit it in the 99.9999999% of cases
where the USB serial is not a console anyway */
if (!port->port.console || !port->sysrq)
- tty_insert_flip_string(tty, ch, urb->actual_length);
+ tty_insert_flip_string(&port->port, ch, urb->actual_length);
else {
for (i = 0; i < urb->actual_length; i++, ch++) {
if (!usb_serial_handle_sysrq_char(port, *ch))
- tty_insert_flip_char(tty, *ch, TTY_NORMAL);
+ tty_insert_flip_char(&port->port, *ch, TTY_NORMAL);
}
}
- tty_flip_buffer_push(tty);
- tty_kref_put(tty);
+ tty_flip_buffer_push(&port->port);
}
EXPORT_SYMBOL_GPL(usb_serial_generic_process_read_urb);
diff --git a/drivers/usb/serial/io_edgeport.c b/drivers/usb/serial/io_edgeport.c
index 7b770c7f8b11..b00e5cbf741f 100644
--- a/drivers/usb/serial/io_edgeport.c
+++ b/drivers/usb/serial/io_edgeport.c
@@ -232,8 +232,8 @@ static void process_rcvd_data(struct edgeport_serial *edge_serial,
unsigned char *buffer, __u16 bufferLength);
static void process_rcvd_status(struct edgeport_serial *edge_serial,
__u8 byte2, __u8 byte3);
-static void edge_tty_recv(struct device *dev, struct tty_struct *tty,
- unsigned char *data, int length);
+static void edge_tty_recv(struct usb_serial_port *port, unsigned char *data,
+ int length);
static void handle_new_msr(struct edgeport_port *edge_port, __u8 newMsr);
static void handle_new_lsr(struct edgeport_port *edge_port, __u8 lsrData,
__u8 lsr, __u8 data);
@@ -1752,7 +1752,6 @@ static void process_rcvd_data(struct edgeport_serial *edge_serial,
struct device *dev = &edge_serial->serial->dev->dev;
struct usb_serial_port *port;
struct edgeport_port *edge_port;
- struct tty_struct *tty;
__u16 lastBufferLength;
__u16 rxLen;
@@ -1860,14 +1859,11 @@ static void process_rcvd_data(struct edgeport_serial *edge_serial,
edge_serial->rxPort];
edge_port = usb_get_serial_port_data(port);
if (edge_port->open) {
- tty = tty_port_tty_get(
- &edge_port->port->port);
- if (tty) {
- dev_dbg(dev, "%s - Sending %d bytes to TTY for port %d\n",
- __func__, rxLen, edge_serial->rxPort);
- edge_tty_recv(&edge_serial->serial->dev->dev, tty, buffer, rxLen);
- tty_kref_put(tty);
- }
+ dev_dbg(dev, "%s - Sending %d bytes to TTY for port %d\n",
+ __func__, rxLen,
+ edge_serial->rxPort);
+ edge_tty_recv(edge_port->port, buffer,
+ rxLen);
edge_port->icount.rx += rxLen;
}
buffer += rxLen;
@@ -2017,20 +2013,20 @@ static void process_rcvd_status(struct edgeport_serial *edge_serial,
* edge_tty_recv
* this function passes data on to the tty flip buffer
*****************************************************************************/
-static void edge_tty_recv(struct device *dev, struct tty_struct *tty,
- unsigned char *data, int length)
+static void edge_tty_recv(struct usb_serial_port *port, unsigned char *data,
+ int length)
{
int cnt;
- cnt = tty_insert_flip_string(tty, data, length);
+ cnt = tty_insert_flip_string(&port->port, data, length);
if (cnt < length) {
- dev_err(dev, "%s - dropping data, %d bytes lost\n",
+ dev_err(&port->dev, "%s - dropping data, %d bytes lost\n",
__func__, length - cnt);
}
data += cnt;
length -= cnt;
- tty_flip_buffer_push(tty);
+ tty_flip_buffer_push(&port->port);
}
@@ -2086,14 +2082,9 @@ static void handle_new_lsr(struct edgeport_port *edge_port, __u8 lsrData,
}
/* Place LSR data byte into Rx buffer */
- if (lsrData) {
- struct tty_struct *tty =
- tty_port_tty_get(&edge_port->port->port);
- if (tty) {
- edge_tty_recv(&edge_port->port->dev, tty, &data, 1);
- tty_kref_put(tty);
- }
- }
+ if (lsrData)
+ edge_tty_recv(edge_port->port, &data, 1);
+
/* update input line counters */
icount = &edge_port->icount;
if (newLsr & LSR_BREAK)
diff --git a/drivers/usb/serial/io_ti.c b/drivers/usb/serial/io_ti.c
index 641ab3da2d83..c23776679f70 100644
--- a/drivers/usb/serial/io_ti.c
+++ b/drivers/usb/serial/io_ti.c
@@ -201,8 +201,8 @@ static int closing_wait = EDGE_CLOSING_WAIT;
static bool ignore_cpu_rev;
static int default_uart_mode; /* RS232 */
-static void edge_tty_recv(struct device *dev, struct tty_struct *tty,
- unsigned char *data, int length);
+static void edge_tty_recv(struct usb_serial_port *port, unsigned char *data,
+ int length);
static void stop_read(struct edgeport_port *edge_port);
static int restart_read(struct edgeport_port *edge_port);
@@ -1484,7 +1484,6 @@ static void handle_new_lsr(struct edgeport_port *edge_port, int lsr_data,
struct async_icount *icount;
__u8 new_lsr = (__u8)(lsr & (__u8)(LSR_OVER_ERR | LSR_PAR_ERR |
LSR_FRM_ERR | LSR_BREAK));
- struct tty_struct *tty;
dev_dbg(&edge_port->port->dev, "%s - %02x\n", __func__, new_lsr);
@@ -1498,13 +1497,8 @@ static void handle_new_lsr(struct edgeport_port *edge_port, int lsr_data,
new_lsr &= (__u8)(LSR_OVER_ERR | LSR_BREAK);
/* Place LSR data byte into Rx buffer */
- if (lsr_data) {
- tty = tty_port_tty_get(&edge_port->port->port);
- if (tty) {
- edge_tty_recv(&edge_port->port->dev, tty, &data, 1);
- tty_kref_put(tty);
- }
- }
+ if (lsr_data)
+ edge_tty_recv(edge_port->port, &data, 1);
/* update input line counters */
icount = &edge_port->icount;
@@ -1620,7 +1614,6 @@ static void edge_bulk_in_callback(struct urb *urb)
struct edgeport_port *edge_port = urb->context;
struct device *dev = &edge_port->port->dev;
unsigned char *data = urb->transfer_buffer;
- struct tty_struct *tty;
int retval = 0;
int port_number;
int status = urb->status;
@@ -1659,17 +1652,16 @@ static void edge_bulk_in_callback(struct urb *urb)
++data;
}
- tty = tty_port_tty_get(&edge_port->port->port);
- if (tty && urb->actual_length) {
+ if (urb->actual_length) {
usb_serial_debug_data(dev, __func__, urb->actual_length, data);
if (edge_port->close_pending)
dev_dbg(dev, "%s - close pending, dropping data on the floor\n",
__func__);
else
- edge_tty_recv(dev, tty, data, urb->actual_length);
+ edge_tty_recv(edge_port->port, data,
+ urb->actual_length);
edge_port->icount.rx += urb->actual_length;
}
- tty_kref_put(tty);
exit:
/* continue read unless stopped */
@@ -1684,16 +1676,16 @@ exit:
dev_err(dev, "%s - usb_submit_urb failed with result %d\n", __func__, retval);
}
-static void edge_tty_recv(struct device *dev, struct tty_struct *tty,
- unsigned char *data, int length)
+static void edge_tty_recv(struct usb_serial_port *port, unsigned char *data,
+ int length)
{
int queued;
- queued = tty_insert_flip_string(tty, data, length);
+ queued = tty_insert_flip_string(&port->port, data, length);
if (queued < length)
- dev_err(dev, "%s - dropping data, %d bytes lost\n",
+ dev_err(&port->dev, "%s - dropping data, %d bytes lost\n",
__func__, length - queued);
- tty_flip_buffer_push(tty);
+ tty_flip_buffer_push(&port->port);
}
static void edge_bulk_out_callback(struct urb *urb)
diff --git a/drivers/usb/serial/ir-usb.c b/drivers/usb/serial/ir-usb.c
index e24e2d4f4c1b..716930ab1bb1 100644
--- a/drivers/usb/serial/ir-usb.c
+++ b/drivers/usb/serial/ir-usb.c
@@ -287,7 +287,6 @@ static void ir_process_read_urb(struct urb *urb)
{
struct usb_serial_port *port = urb->context;
unsigned char *data = urb->transfer_buffer;
- struct tty_struct *tty;
if (!urb->actual_length)
return;
@@ -302,12 +301,8 @@ static void ir_process_read_urb(struct urb *urb)
if (urb->actual_length == 1)
return;
- tty = tty_port_tty_get(&port->port);
- if (!tty)
- return;
- tty_insert_flip_string(tty, data + 1, urb->actual_length - 1);
- tty_flip_buffer_push(tty);
- tty_kref_put(tty);
+ tty_insert_flip_string(&port->port, data + 1, urb->actual_length - 1);
+ tty_flip_buffer_push(&port->port);
}
static void ir_set_termios_callback(struct urb *urb)
diff --git a/drivers/usb/serial/iuu_phoenix.c b/drivers/usb/serial/iuu_phoenix.c
index 1e1fbed65ef2..ff77027160aa 100644
--- a/drivers/usb/serial/iuu_phoenix.c
+++ b/drivers/usb/serial/iuu_phoenix.c
@@ -581,7 +581,6 @@ static void read_buf_callback(struct urb *urb)
{
struct usb_serial_port *port = urb->context;
unsigned char *data = urb->transfer_buffer;
- struct tty_struct *tty;
int status = urb->status;
if (status) {
@@ -592,14 +591,12 @@ static void read_buf_callback(struct urb *urb)
}
dev_dbg(&port->dev, "%s - %i chars to write\n", __func__, urb->actual_length);
- tty = tty_port_tty_get(&port->port);
if (data == NULL)
dev_dbg(&port->dev, "%s - data is NULL !!!\n", __func__);
- if (tty && urb->actual_length && data) {
- tty_insert_flip_string(tty, data, urb->actual_length);
- tty_flip_buffer_push(tty);
+ if (urb->actual_length && data) {
+ tty_insert_flip_string(&port->port, data, urb->actual_length);
+ tty_flip_buffer_push(&port->port);
}
- tty_kref_put(tty);
iuu_led_activity_on(urb);
}
diff --git a/drivers/usb/serial/keyspan.c b/drivers/usb/serial/keyspan.c
index 3d95637f3d68..1fd1935c8316 100644
--- a/drivers/usb/serial/keyspan.c
+++ b/drivers/usb/serial/keyspan.c
@@ -291,7 +291,6 @@ static void usa26_indat_callback(struct urb *urb)
int i, err;
int endpoint;
struct usb_serial_port *port;
- struct tty_struct *tty;
unsigned char *data = urb->transfer_buffer;
int status = urb->status;
@@ -304,8 +303,7 @@ static void usa26_indat_callback(struct urb *urb)
}
port = urb->context;
- tty = tty_port_tty_get(&port->port);
- if (tty && urb->actual_length) {
+ if (urb->actual_length) {
/* 0x80 bit is error flag */
if ((data[0] & 0x80) == 0) {
/* no errors on individual bytes, only
@@ -315,7 +313,7 @@ static void usa26_indat_callback(struct urb *urb)
else
err = 0;
for (i = 1; i < urb->actual_length ; ++i)
- tty_insert_flip_char(tty, data[i], err);
+ tty_insert_flip_char(&port->port, data[i], err);
} else {
/* some bytes had errors, every byte has status */
dev_dbg(&port->dev, "%s - RX error!!!!\n", __func__);
@@ -328,12 +326,12 @@ static void usa26_indat_callback(struct urb *urb)
if (stat & RXERROR_PARITY)
flag |= TTY_PARITY;
/* XXX should handle break (0x10) */
- tty_insert_flip_char(tty, data[i+1], flag);
+ tty_insert_flip_char(&port->port, data[i+1],
+ flag);
}
}
- tty_flip_buffer_push(tty);
+ tty_flip_buffer_push(&port->port);
}
- tty_kref_put(tty);
/* Resubmit urb so we continue receiving */
err = usb_submit_urb(urb, GFP_ATOMIC);
@@ -446,7 +444,6 @@ static void usa28_indat_callback(struct urb *urb)
{
int err;
struct usb_serial_port *port;
- struct tty_struct *tty;
unsigned char *data;
struct keyspan_port_private *p_priv;
int status = urb->status;
@@ -469,12 +466,11 @@ static void usa28_indat_callback(struct urb *urb)
p_priv = usb_get_serial_port_data(port);
data = urb->transfer_buffer;
- tty = tty_port_tty_get(&port->port);
- if (tty && urb->actual_length) {
- tty_insert_flip_string(tty, data, urb->actual_length);
- tty_flip_buffer_push(tty);
+ if (urb->actual_length) {
+ tty_insert_flip_string(&port->port, data,
+ urb->actual_length);
+ tty_flip_buffer_push(&port->port);
}
- tty_kref_put(tty);
/* Resubmit urb so we continue receiving */
err = usb_submit_urb(urb, GFP_ATOMIC);
@@ -669,7 +665,6 @@ static void usa49_indat_callback(struct urb *urb)
int i, err;
int endpoint;
struct usb_serial_port *port;
- struct tty_struct *tty;
unsigned char *data = urb->transfer_buffer;
int status = urb->status;
@@ -682,12 +677,11 @@ static void usa49_indat_callback(struct urb *urb)
}
port = urb->context;
- tty = tty_port_tty_get(&port->port);
- if (tty && urb->actual_length) {
+ if (urb->actual_length) {
/* 0x80 bit is error flag */
if ((data[0] & 0x80) == 0) {
/* no error on any byte */
- tty_insert_flip_string(tty, data + 1,
+ tty_insert_flip_string(&port->port, data + 1,
urb->actual_length - 1);
} else {
/* some bytes had errors, every byte has status */
@@ -700,12 +694,12 @@ static void usa49_indat_callback(struct urb *urb)
if (stat & RXERROR_PARITY)
flag |= TTY_PARITY;
/* XXX should handle break (0x10) */
- tty_insert_flip_char(tty, data[i+1], flag);
+ tty_insert_flip_char(&port->port, data[i+1],
+ flag);
}
}
- tty_flip_buffer_push(tty);
+ tty_flip_buffer_push(&port->port);
}
- tty_kref_put(tty);
/* Resubmit urb so we continue receiving */
err = usb_submit_urb(urb, GFP_ATOMIC);
@@ -718,7 +712,6 @@ static void usa49wg_indat_callback(struct urb *urb)
int i, len, x, err;
struct usb_serial *serial;
struct usb_serial_port *port;
- struct tty_struct *tty;
unsigned char *data = urb->transfer_buffer;
int status = urb->status;
@@ -743,7 +736,6 @@ static void usa49wg_indat_callback(struct urb *urb)
return;
}
port = serial->port[data[i++]];
- tty = tty_port_tty_get(&port->port);
len = data[i++];
/* 0x80 bit is error flag */
@@ -751,7 +743,8 @@ static void usa49wg_indat_callback(struct urb *urb)
/* no error on any byte */
i++;
for (x = 1; x < len ; ++x)
- tty_insert_flip_char(tty, data[i++], 0);
+ tty_insert_flip_char(&port->port,
+ data[i++], 0);
} else {
/*
* some bytes had errors, every byte has status
@@ -765,13 +758,12 @@ static void usa49wg_indat_callback(struct urb *urb)
if (stat & RXERROR_PARITY)
flag |= TTY_PARITY;
/* XXX should handle break (0x10) */
- tty_insert_flip_char(tty,
+ tty_insert_flip_char(&port->port,
data[i+1], flag);
i += 2;
}
}
- tty_flip_buffer_push(tty);
- tty_kref_put(tty);
+ tty_flip_buffer_push(&port->port);
}
}
@@ -792,7 +784,6 @@ static void usa90_indat_callback(struct urb *urb)
int endpoint;
struct usb_serial_port *port;
struct keyspan_port_private *p_priv;
- struct tty_struct *tty;
unsigned char *data = urb->transfer_buffer;
int status = urb->status;
@@ -808,12 +799,12 @@ static void usa90_indat_callback(struct urb *urb)
p_priv = usb_get_serial_port_data(port);
if (urb->actual_length) {
- tty = tty_port_tty_get(&port->port);
/* if current mode is DMA, looks like usa28 format
otherwise looks like usa26 data format */
if (p_priv->baud > 57600)
- tty_insert_flip_string(tty, data, urb->actual_length);
+ tty_insert_flip_string(&port->port, data,
+ urb->actual_length);
else {
/* 0x80 bit is error flag */
if ((data[0] & 0x80) == 0) {
@@ -824,8 +815,8 @@ static void usa90_indat_callback(struct urb *urb)
else
err = 0;
for (i = 1; i < urb->actual_length ; ++i)
- tty_insert_flip_char(tty, data[i],
- err);
+ tty_insert_flip_char(&port->port,
+ data[i], err);
} else {
/* some bytes had errors, every byte has status */
dev_dbg(&port->dev, "%s - RX error!!!!\n", __func__);
@@ -838,13 +829,12 @@ static void usa90_indat_callback(struct urb *urb)
if (stat & RXERROR_PARITY)
flag |= TTY_PARITY;
/* XXX should handle break (0x10) */
- tty_insert_flip_char(tty, data[i+1],
- flag);
+ tty_insert_flip_char(&port->port,
+ data[i+1], flag);
}
}
}
- tty_flip_buffer_push(tty);
- tty_kref_put(tty);
+ tty_flip_buffer_push(&port->port);
}
/* Resubmit urb so we continue receiving */
diff --git a/drivers/usb/serial/keyspan_pda.c b/drivers/usb/serial/keyspan_pda.c
index 41b01092af07..3b17d5d13dc8 100644
--- a/drivers/usb/serial/keyspan_pda.c
+++ b/drivers/usb/serial/keyspan_pda.c
@@ -138,7 +138,6 @@ static void keyspan_pda_request_unthrottle(struct work_struct *work)
static void keyspan_pda_rx_interrupt(struct urb *urb)
{
struct usb_serial_port *port = urb->context;
- struct tty_struct *tty;
unsigned char *data = urb->transfer_buffer;
int retval;
int status = urb->status;
@@ -163,14 +162,12 @@ static void keyspan_pda_rx_interrupt(struct urb *urb)
/* see if the message is data or a status interrupt */
switch (data[0]) {
case 0:
- tty = tty_port_tty_get(&port->port);
/* rest of message is rx data */
- if (tty && urb->actual_length) {
- tty_insert_flip_string(tty, data + 1,
+ if (urb->actual_length) {
+ tty_insert_flip_string(&port->port, data + 1,
urb->actual_length - 1);
- tty_flip_buffer_push(tty);
+ tty_flip_buffer_push(&port->port);
}
- tty_kref_put(tty);
break;
case 1:
/* status interrupt */
diff --git a/drivers/usb/serial/kl5kusb105.c b/drivers/usb/serial/kl5kusb105.c
index fc9e14a1e9b3..769d910ae0a5 100644
--- a/drivers/usb/serial/kl5kusb105.c
+++ b/drivers/usb/serial/kl5kusb105.c
@@ -389,7 +389,6 @@ static void klsi_105_process_read_urb(struct urb *urb)
{
struct usb_serial_port *port = urb->context;
unsigned char *data = urb->transfer_buffer;
- struct tty_struct *tty;
unsigned len;
/* empty urbs seem to happen, we ignore them */
@@ -401,19 +400,14 @@ static void klsi_105_process_read_urb(struct urb *urb)
return;
}
- tty = tty_port_tty_get(&port->port);
- if (!tty)
- return;
-
len = get_unaligned_le16(data);
if (len > urb->actual_length - KLSI_HDR_LEN) {
dev_dbg(&port->dev, "%s - packet length mismatch\n", __func__);
len = urb->actual_length - KLSI_HDR_LEN;
}
- tty_insert_flip_string(tty, data + KLSI_HDR_LEN, len);
- tty_flip_buffer_push(tty);
- tty_kref_put(tty);
+ tty_insert_flip_string(&port->port, data + KLSI_HDR_LEN, len);
+ tty_flip_buffer_push(&port->port);
}
static void klsi_105_set_termios(struct tty_struct *tty,
diff --git a/drivers/usb/serial/kobil_sct.c b/drivers/usb/serial/kobil_sct.c
index b747ba615d0b..903d938e174b 100644
--- a/drivers/usb/serial/kobil_sct.c
+++ b/drivers/usb/serial/kobil_sct.c
@@ -324,7 +324,6 @@ static void kobil_read_int_callback(struct urb *urb)
{
int result;
struct usb_serial_port *port = urb->context;
- struct tty_struct *tty;
unsigned char *data = urb->transfer_buffer;
int status = urb->status;
@@ -333,8 +332,7 @@ static void kobil_read_int_callback(struct urb *urb)
return;
}
- tty = tty_port_tty_get(&port->port);
- if (tty && urb->actual_length) {
+ if (urb->actual_length) {
/* BEGIN DEBUG */
/*
@@ -353,10 +351,9 @@ static void kobil_read_int_callback(struct urb *urb)
*/
/* END DEBUG */
- tty_insert_flip_string(tty, data, urb->actual_length);
- tty_flip_buffer_push(tty);
+ tty_insert_flip_string(&port->port, data, urb->actual_length);
+ tty_flip_buffer_push(&port->port);
}
- tty_kref_put(tty);
result = usb_submit_urb(port->interrupt_in_urb, GFP_ATOMIC);
dev_dbg(&port->dev, "%s - Send read URB returns: %i\n", __func__, result);
diff --git a/drivers/usb/serial/mct_u232.c b/drivers/usb/serial/mct_u232.c
index d9c86516fed4..a64d420f687b 100644
--- a/drivers/usb/serial/mct_u232.c
+++ b/drivers/usb/serial/mct_u232.c
@@ -527,7 +527,6 @@ static void mct_u232_read_int_callback(struct urb *urb)
{
struct usb_serial_port *port = urb->context;
struct mct_u232_private *priv = usb_get_serial_port_data(port);
- struct tty_struct *tty;
unsigned char *data = urb->transfer_buffer;
int retval;
int status = urb->status;
@@ -557,13 +556,9 @@ static void mct_u232_read_int_callback(struct urb *urb)
*/
if (urb->transfer_buffer_length > 2) {
if (urb->actual_length) {
- tty = tty_port_tty_get(&port->port);
- if (tty) {
- tty_insert_flip_string(tty, data,
- urb->actual_length);
- tty_flip_buffer_push(tty);
- }
- tty_kref_put(tty);
+ tty_insert_flip_string(&port->port, data,
+ urb->actual_length);
+ tty_flip_buffer_push(&port->port);
}
goto exit;
}
diff --git a/drivers/usb/serial/metro-usb.c b/drivers/usb/serial/metro-usb.c
index 3d258448c29a..bf3c7a23553e 100644
--- a/drivers/usb/serial/metro-usb.c
+++ b/drivers/usb/serial/metro-usb.c
@@ -95,7 +95,6 @@ static void metrousb_read_int_callback(struct urb *urb)
{
struct usb_serial_port *port = urb->context;
struct metrousb_private *metro_priv = usb_get_serial_port_data(port);
- struct tty_struct *tty;
unsigned char *data = urb->transfer_buffer;
int throttled = 0;
int result = 0;
@@ -124,15 +123,13 @@ static void metrousb_read_int_callback(struct urb *urb)
/* Set the data read from the usb port into the serial port buffer. */
- tty = tty_port_tty_get(&port->port);
- if (tty && urb->actual_length) {
+ if (urb->actual_length) {
/* Loop through the data copying each byte to the tty layer. */
- tty_insert_flip_string(tty, data, urb->actual_length);
+ tty_insert_flip_string(&port->port, data, urb->actual_length);
/* Force the data to the tty layer. */
- tty_flip_buffer_push(tty);
+ tty_flip_buffer_push(&port->port);
}
- tty_kref_put(tty);
/* Set any port variables. */
spin_lock_irqsave(&metro_priv->lock, flags);
diff --git a/drivers/usb/serial/mos7720.c b/drivers/usb/serial/mos7720.c
index f57a6b1fe787..e0ebec3b5d6a 100644
--- a/drivers/usb/serial/mos7720.c
+++ b/drivers/usb/serial/mos7720.c
@@ -899,7 +899,6 @@ static void mos7720_bulk_in_callback(struct urb *urb)
int retval;
unsigned char *data ;
struct usb_serial_port *port;
- struct tty_struct *tty;
int status = urb->status;
if (status) {
@@ -913,12 +912,10 @@ static void mos7720_bulk_in_callback(struct urb *urb)
data = urb->transfer_buffer;
- tty = tty_port_tty_get(&port->port);
- if (tty && urb->actual_length) {
- tty_insert_flip_string(tty, data, urb->actual_length);
- tty_flip_buffer_push(tty);
+ if (urb->actual_length) {
+ tty_insert_flip_string(&port->port, data, urb->actual_length);
+ tty_flip_buffer_push(&port->port);
}
- tty_kref_put(tty);
if (port->read_urb->status != -EINPROGRESS) {
retval = usb_submit_urb(port->read_urb, GFP_ATOMIC);
diff --git a/drivers/usb/serial/mos7840.c b/drivers/usb/serial/mos7840.c
index 66d9e088d9d9..809fb329eca5 100644
--- a/drivers/usb/serial/mos7840.c
+++ b/drivers/usb/serial/mos7840.c
@@ -744,7 +744,6 @@ static void mos7840_bulk_in_callback(struct urb *urb)
struct usb_serial *serial;
struct usb_serial_port *port;
struct moschip_port *mos7840_port;
- struct tty_struct *tty;
int status = urb->status;
mos7840_port = urb->context;
@@ -773,12 +772,9 @@ static void mos7840_bulk_in_callback(struct urb *urb)
usb_serial_debug_data(&port->dev, __func__, urb->actual_length, data);
if (urb->actual_length) {
- tty = tty_port_tty_get(&mos7840_port->port->port);
- if (tty) {
- tty_insert_flip_string(tty, data, urb->actual_length);
- tty_flip_buffer_push(tty);
- tty_kref_put(tty);
- }
+ struct tty_port *tport = &mos7840_port->port->port;
+ tty_insert_flip_string(tport, data, urb->actual_length);
+ tty_flip_buffer_push(tport);
mos7840_port->icount.rx += urb->actual_length;
smp_wmb();
dev_dbg(&port->dev, "mos7840_port->icount.rx is %d:\n", mos7840_port->icount.rx);
diff --git a/drivers/usb/serial/navman.c b/drivers/usb/serial/navman.c
index 1566f8f500ae..38725fc8c2c8 100644
--- a/drivers/usb/serial/navman.c
+++ b/drivers/usb/serial/navman.c
@@ -32,7 +32,6 @@ static void navman_read_int_callback(struct urb *urb)
{
struct usb_serial_port *port = urb->context;
unsigned char *data = urb->transfer_buffer;
- struct tty_struct *tty;
int status = urb->status;
int result;
@@ -55,12 +54,10 @@ static void navman_read_int_callback(struct urb *urb)
usb_serial_debug_data(&port->dev, __func__, urb->actual_length, data);
- tty = tty_port_tty_get(&port->port);
- if (tty && urb->actual_length) {
- tty_insert_flip_string(tty, data, urb->actual_length);
- tty_flip_buffer_push(tty);
+ if (urb->actual_length) {
+ tty_insert_flip_string(&port->port, data, urb->actual_length);
+ tty_flip_buffer_push(&port->port);
}
- tty_kref_put(tty);
exit:
result = usb_submit_urb(urb, GFP_ATOMIC);
diff --git a/drivers/usb/serial/omninet.c b/drivers/usb/serial/omninet.c
index 7818af931a48..1e1cafe287e4 100644
--- a/drivers/usb/serial/omninet.c
+++ b/drivers/usb/serial/omninet.c
@@ -174,13 +174,9 @@ static void omninet_read_bulk_callback(struct urb *urb)
}
if (urb->actual_length && header->oh_len) {
- struct tty_struct *tty = tty_port_tty_get(&port->port);
- if (tty) {
- tty_insert_flip_string(tty, data + OMNINET_DATAOFFSET,
- header->oh_len);
- tty_flip_buffer_push(tty);
- tty_kref_put(tty);
- }
+ tty_insert_flip_string(&port->port, data + OMNINET_DATAOFFSET,
+ header->oh_len);
+ tty_flip_buffer_push(&port->port);
}
/* Continue trying to always read */
diff --git a/drivers/usb/serial/opticon.c b/drivers/usb/serial/opticon.c
index c6bfb83efb1e..e13e1a4d3e1e 100644
--- a/drivers/usb/serial/opticon.c
+++ b/drivers/usb/serial/opticon.c
@@ -51,15 +51,8 @@ struct opticon_private {
static void opticon_process_data_packet(struct usb_serial_port *port,
const unsigned char *buf, size_t len)
{
- struct tty_struct *tty;
-
- tty = tty_port_tty_get(&port->port);
- if (!tty)
- return;
-
- tty_insert_flip_string(tty, buf, len);
- tty_flip_buffer_push(tty);
- tty_kref_put(tty);
+ tty_insert_flip_string(&port->port, buf, len);
+ tty_flip_buffer_push(&port->port);
}
static void opticon_process_status_packet(struct usb_serial_port *port,
diff --git a/drivers/usb/serial/oti6858.c b/drivers/usb/serial/oti6858.c
index d217fd6ee43f..a958fd41b5b3 100644
--- a/drivers/usb/serial/oti6858.c
+++ b/drivers/usb/serial/oti6858.c
@@ -820,7 +820,6 @@ static void oti6858_read_bulk_callback(struct urb *urb)
{
struct usb_serial_port *port = urb->context;
struct oti6858_private *priv = usb_get_serial_port_data(port);
- struct tty_struct *tty;
unsigned char *data = urb->transfer_buffer;
unsigned long flags;
int status = urb->status;
@@ -835,12 +834,10 @@ static void oti6858_read_bulk_callback(struct urb *urb)
return;
}
- tty = tty_port_tty_get(&port->port);
- if (tty != NULL && urb->actual_length > 0) {
- tty_insert_flip_string(tty, data, urb->actual_length);
- tty_flip_buffer_push(tty);
+ if (urb->actual_length > 0) {
+ tty_insert_flip_string(&port->port, data, urb->actual_length);
+ tty_flip_buffer_push(&port->port);
}
- tty_kref_put(tty);
/* schedule the interrupt urb */
result = usb_submit_urb(port->interrupt_in_urb, GFP_ATOMIC);
diff --git a/drivers/usb/serial/pl2303.c b/drivers/usb/serial/pl2303.c
index 600241901361..54adc9125e5c 100644
--- a/drivers/usb/serial/pl2303.c
+++ b/drivers/usb/serial/pl2303.c
@@ -772,7 +772,6 @@ static void pl2303_process_read_urb(struct urb *urb)
{
struct usb_serial_port *port = urb->context;
struct pl2303_private *priv = usb_get_serial_port_data(port);
- struct tty_struct *tty;
unsigned char *data = urb->transfer_buffer;
char tty_flag = TTY_NORMAL;
unsigned long flags;
@@ -789,10 +788,6 @@ static void pl2303_process_read_urb(struct urb *urb)
if (!urb->actual_length)
return;
- tty = tty_port_tty_get(&port->port);
- if (!tty)
- return;
-
/* break takes precedence over parity, */
/* which takes precedence over framing errors */
if (line_status & UART_BREAK_ERROR)
@@ -805,19 +800,19 @@ static void pl2303_process_read_urb(struct urb *urb)
/* overrun is special, not associated with a char */
if (line_status & UART_OVERRUN_ERROR)
- tty_insert_flip_char(tty, 0, TTY_OVERRUN);
+ tty_insert_flip_char(&port->port, 0, TTY_OVERRUN);
if (port->port.console && port->sysrq) {
for (i = 0; i < urb->actual_length; ++i)
if (!usb_serial_handle_sysrq_char(port, data[i]))
- tty_insert_flip_char(tty, data[i], tty_flag);
+ tty_insert_flip_char(&port->port, data[i],
+ tty_flag);
} else {
- tty_insert_flip_string_fixed_flag(tty, data, tty_flag,
+ tty_insert_flip_string_fixed_flag(&port->port, data, tty_flag,
urb->actual_length);
}
- tty_flip_buffer_push(tty);
- tty_kref_put(tty);
+ tty_flip_buffer_push(&port->port);
}
/* All of the device info needed for the PL2303 SIO serial converter */
diff --git a/drivers/usb/serial/quatech2.c b/drivers/usb/serial/quatech2.c
index a8d5110d4cc5..00e6c9bac8a3 100644
--- a/drivers/usb/serial/quatech2.c
+++ b/drivers/usb/serial/quatech2.c
@@ -609,7 +609,6 @@ void qt2_process_read_urb(struct urb *urb)
struct qt2_serial_private *serial_priv;
struct usb_serial_port *port;
struct qt2_port_private *port_priv;
- struct tty_struct *tty;
bool escapeflag;
unsigned char *ch;
int i;
@@ -620,15 +619,11 @@ void qt2_process_read_urb(struct urb *urb)
return;
ch = urb->transfer_buffer;
- tty = NULL;
serial = urb->context;
serial_priv = usb_get_serial_data(serial);
port = serial->port[serial_priv->current_port];
port_priv = usb_get_serial_port_data(port);
- if (port_priv->is_open)
- tty = tty_port_tty_get(&port->port);
-
for (i = 0; i < urb->actual_length; i++) {
ch = (unsigned char *)urb->transfer_buffer + i;
if ((i <= (len - 3)) &&
@@ -666,10 +661,7 @@ void qt2_process_read_urb(struct urb *urb)
__func__);
break;
}
- if (tty) {
- tty_flip_buffer_push(tty);
- tty_kref_put(tty);
- }
+ tty_flip_buffer_push(&port->port);
newport = *(ch + 3);
@@ -683,10 +675,6 @@ void qt2_process_read_urb(struct urb *urb)
serial_priv->current_port = newport;
port = serial->port[serial_priv->current_port];
port_priv = usb_get_serial_port_data(port);
- if (port_priv->is_open)
- tty = tty_port_tty_get(&port->port);
- else
- tty = NULL;
i += 3;
escapeflag = true;
break;
@@ -697,8 +685,8 @@ void qt2_process_read_urb(struct urb *urb)
escapeflag = true;
break;
case QT2_CONTROL_ESCAPE:
- tty_buffer_request_room(tty, 2);
- tty_insert_flip_string(tty, ch, 2);
+ tty_buffer_request_room(&port->port, 2);
+ tty_insert_flip_string(&port->port, ch, 2);
i += 2;
escapeflag = true;
break;
@@ -712,16 +700,11 @@ void qt2_process_read_urb(struct urb *urb)
continue;
}
- if (tty) {
- tty_buffer_request_room(tty, 1);
- tty_insert_flip_string(tty, ch, 1);
- }
+ tty_buffer_request_room(&port->port, 1);
+ tty_insert_flip_string(&port->port, ch, 1);
}
- if (tty) {
- tty_flip_buffer_push(tty);
- tty_kref_put(tty);
- }
+ tty_flip_buffer_push(&port->port);
}
static void qt2_write_bulk_callback(struct urb *urb)
diff --git a/drivers/usb/serial/safe_serial.c b/drivers/usb/serial/safe_serial.c
index c949ce6ef0c6..21cd7bf2a8cc 100644
--- a/drivers/usb/serial/safe_serial.c
+++ b/drivers/usb/serial/safe_serial.c
@@ -207,38 +207,31 @@ static void safe_process_read_urb(struct urb *urb)
unsigned char *data = urb->transfer_buffer;
unsigned char length = urb->actual_length;
int actual_length;
- struct tty_struct *tty;
__u16 fcs;
if (!length)
return;
- tty = tty_port_tty_get(&port->port);
- if (!tty)
- return;
-
if (!safe)
goto out;
fcs = fcs_compute10(data, length, CRC10_INITFCS);
if (fcs) {
dev_err(&port->dev, "%s - bad CRC %x\n", __func__, fcs);
- goto err;
+ return;
}
actual_length = data[length - 2] >> 2;
if (actual_length > (length - 2)) {
dev_err(&port->dev, "%s - inconsistent lengths %d:%d\n",
__func__, actual_length, length);
- goto err;
+ return;
}
dev_info(&urb->dev->dev, "%s - actual: %d\n", __func__, actual_length);
length = actual_length;
out:
- tty_insert_flip_string(tty, data, length);
- tty_flip_buffer_push(tty);
-err:
- tty_kref_put(tty);
+ tty_insert_flip_string(&port->port, data, length);
+ tty_flip_buffer_push(&port->port);
}
static int safe_prepare_write_buffer(struct usb_serial_port *port,
diff --git a/drivers/usb/serial/sierra.c b/drivers/usb/serial/sierra.c
index d4426c038c32..c13f6e747748 100644
--- a/drivers/usb/serial/sierra.c
+++ b/drivers/usb/serial/sierra.c
@@ -569,7 +569,6 @@ static void sierra_indat_callback(struct urb *urb)
int err;
int endpoint;
struct usb_serial_port *port;
- struct tty_struct *tty;
unsigned char *data = urb->transfer_buffer;
int status = urb->status;
@@ -581,16 +580,12 @@ static void sierra_indat_callback(struct urb *urb)
" endpoint %02x\n", __func__, status, endpoint);
} else {
if (urb->actual_length) {
- tty = tty_port_tty_get(&port->port);
- if (tty) {
- tty_insert_flip_string(tty, data,
- urb->actual_length);
- tty_flip_buffer_push(tty);
-
- tty_kref_put(tty);
- usb_serial_debug_data(&port->dev, __func__,
- urb->actual_length, data);
- }
+ tty_insert_flip_string(&port->port, data,
+ urb->actual_length);
+ tty_flip_buffer_push(&port->port);
+
+ usb_serial_debug_data(&port->dev, __func__,
+ urb->actual_length, data);
} else {
dev_dbg(&port->dev, "%s: empty read urb"
" received\n", __func__);
diff --git a/drivers/usb/serial/spcp8x5.c b/drivers/usb/serial/spcp8x5.c
index a42536af1256..91ff8e3bddbd 100644
--- a/drivers/usb/serial/spcp8x5.c
+++ b/drivers/usb/serial/spcp8x5.c
@@ -462,7 +462,6 @@ static void spcp8x5_process_read_urb(struct urb *urb)
{
struct usb_serial_port *port = urb->context;
struct spcp8x5_private *priv = usb_get_serial_port_data(port);
- struct tty_struct *tty;
unsigned char *data = urb->transfer_buffer;
unsigned long flags;
u8 status;
@@ -481,9 +480,6 @@ static void spcp8x5_process_read_urb(struct urb *urb)
if (!urb->actual_length)
return;
- tty = tty_port_tty_get(&port->port);
- if (!tty)
- return;
if (status & UART_STATE_TRANSIENT_MASK) {
/* break takes precedence over parity, which takes precedence
@@ -498,17 +494,21 @@ static void spcp8x5_process_read_urb(struct urb *urb)
/* overrun is special, not associated with a char */
if (status & UART_OVERRUN_ERROR)
- tty_insert_flip_char(tty, 0, TTY_OVERRUN);
-
- if (status & UART_DCD)
- usb_serial_handle_dcd_change(port, tty,
- priv->line_status & MSR_STATUS_LINE_DCD);
+ tty_insert_flip_char(&port->port, 0, TTY_OVERRUN);
+
+ if (status & UART_DCD) {
+ struct tty_struct *tty = tty_port_tty_get(&port->port);
+ if (tty) {
+ usb_serial_handle_dcd_change(port, tty,
+ priv->line_status & MSR_STATUS_LINE_DCD);
+ tty_kref_put(tty);
+ }
+ }
}
- tty_insert_flip_string_fixed_flag(tty, data, tty_flag,
+ tty_insert_flip_string_fixed_flag(&port->port, data, tty_flag,
urb->actual_length);
- tty_flip_buffer_push(tty);
- tty_kref_put(tty);
+ tty_flip_buffer_push(&port->port);
}
static int spcp8x5_wait_modem_info(struct usb_serial_port *port,
diff --git a/drivers/usb/serial/ssu100.c b/drivers/usb/serial/ssu100.c
index d938396171e8..b57cf841c5b6 100644
--- a/drivers/usb/serial/ssu100.c
+++ b/drivers/usb/serial/ssu100.c
@@ -579,8 +579,7 @@ static void ssu100_update_lsr(struct usb_serial_port *port, u8 lsr,
}
-static int ssu100_process_packet(struct urb *urb,
- struct tty_struct *tty)
+static void ssu100_process_read_urb(struct urb *urb)
{
struct usb_serial_port *port = urb->context;
char *packet = (char *)urb->transfer_buffer;
@@ -595,7 +594,8 @@ static int ssu100_process_packet(struct urb *urb,
if (packet[2] == 0x00) {
ssu100_update_lsr(port, packet[3], &flag);
if (flag == TTY_OVERRUN)
- tty_insert_flip_char(tty, 0, TTY_OVERRUN);
+ tty_insert_flip_char(&port->port, 0,
+ TTY_OVERRUN);
}
if (packet[2] == 0x01)
ssu100_update_msr(port, packet[3]);
@@ -606,34 +606,17 @@ static int ssu100_process_packet(struct urb *urb,
ch = packet;
if (!len)
- return 0; /* status only */
+ return; /* status only */
if (port->port.console && port->sysrq) {
for (i = 0; i < len; i++, ch++) {
if (!usb_serial_handle_sysrq_char(port, *ch))
- tty_insert_flip_char(tty, *ch, flag);
+ tty_insert_flip_char(&port->port, *ch, flag);
}
} else
- tty_insert_flip_string_fixed_flag(tty, ch, flag, len);
-
- return len;
-}
-
-static void ssu100_process_read_urb(struct urb *urb)
-{
- struct usb_serial_port *port = urb->context;
- struct tty_struct *tty;
- int count;
-
- tty = tty_port_tty_get(&port->port);
- if (!tty)
- return;
-
- count = ssu100_process_packet(urb, tty);
+ tty_insert_flip_string_fixed_flag(&port->port, ch, flag, len);
- if (count)
- tty_flip_buffer_push(tty);
- tty_kref_put(tty);
+ tty_flip_buffer_push(&port->port);
}
static struct usb_serial_driver ssu100_device = {
diff --git a/drivers/usb/serial/symbolserial.c b/drivers/usb/serial/symbolserial.c
index 701fffa8431f..be05e6caf9a3 100644
--- a/drivers/usb/serial/symbolserial.c
+++ b/drivers/usb/serial/symbolserial.c
@@ -48,7 +48,6 @@ static void symbol_int_callback(struct urb *urb)
unsigned char *data = urb->transfer_buffer;
struct usb_serial_port *port = priv->port;
int status = urb->status;
- struct tty_struct *tty;
int result;
int data_length;
@@ -82,12 +81,8 @@ static void symbol_int_callback(struct urb *urb)
* we pretty much just ignore the size and send everything
* else to the tty layer.
*/
- tty = tty_port_tty_get(&port->port);
- if (tty) {
- tty_insert_flip_string(tty, &data[1], data_length);
- tty_flip_buffer_push(tty);
- tty_kref_put(tty);
- }
+ tty_insert_flip_string(&port->port, &data[1], data_length);
+ tty_flip_buffer_push(&port->port);
} else {
dev_dbg(&priv->udev->dev,
"Improper amount of data received from the device, "
diff --git a/drivers/usb/serial/ti_usb_3410_5052.c b/drivers/usb/serial/ti_usb_3410_5052.c
index f2530d2ef3c4..39cb9b807c3c 100644
--- a/drivers/usb/serial/ti_usb_3410_5052.c
+++ b/drivers/usb/serial/ti_usb_3410_5052.c
@@ -121,8 +121,8 @@ static void ti_interrupt_callback(struct urb *urb);
static void ti_bulk_in_callback(struct urb *urb);
static void ti_bulk_out_callback(struct urb *urb);
-static void ti_recv(struct device *dev, struct tty_struct *tty,
- unsigned char *data, int length);
+static void ti_recv(struct usb_serial_port *port, unsigned char *data,
+ int length);
static void ti_send(struct ti_port *tport);
static int ti_set_mcr(struct ti_port *tport, unsigned int mcr);
static int ti_get_lsr(struct ti_port *tport);
@@ -1118,7 +1118,6 @@ static void ti_bulk_in_callback(struct urb *urb)
struct device *dev = &urb->dev->dev;
int status = urb->status;
int retval = 0;
- struct tty_struct *tty;
switch (status) {
case 0:
@@ -1145,24 +1144,18 @@ static void ti_bulk_in_callback(struct urb *urb)
return;
}
- tty = tty_port_tty_get(&port->port);
- if (tty) {
- if (urb->actual_length) {
- usb_serial_debug_data(dev, __func__, urb->actual_length,
- urb->transfer_buffer);
+ if (urb->actual_length) {
+ usb_serial_debug_data(dev, __func__, urb->actual_length,
+ urb->transfer_buffer);
- if (!tport->tp_is_open)
- dev_dbg(dev, "%s - port closed, dropping data\n",
- __func__);
- else
- ti_recv(&urb->dev->dev, tty,
- urb->transfer_buffer,
- urb->actual_length);
- spin_lock(&tport->tp_lock);
- tport->tp_icount.rx += urb->actual_length;
- spin_unlock(&tport->tp_lock);
- }
- tty_kref_put(tty);
+ if (!tport->tp_is_open)
+ dev_dbg(dev, "%s - port closed, dropping data\n",
+ __func__);
+ else
+ ti_recv(port, urb->transfer_buffer, urb->actual_length);
+ spin_lock(&tport->tp_lock);
+ tport->tp_icount.rx += urb->actual_length;
+ spin_unlock(&tport->tp_lock);
}
exit:
@@ -1210,24 +1203,23 @@ static void ti_bulk_out_callback(struct urb *urb)
}
-static void ti_recv(struct device *dev, struct tty_struct *tty,
- unsigned char *data, int length)
+static void ti_recv(struct usb_serial_port *port, unsigned char *data,
+ int length)
{
int cnt;
do {
- cnt = tty_insert_flip_string(tty, data, length);
+ cnt = tty_insert_flip_string(&port->port, data, length);
if (cnt < length) {
- dev_err(dev, "%s - dropping data, %d bytes lost\n",
+ dev_err(&port->dev, "%s - dropping data, %d bytes lost\n",
__func__, length - cnt);
if (cnt == 0)
break;
}
- tty_flip_buffer_push(tty);
+ tty_flip_buffer_push(&port->port);
data += cnt;
length -= cnt;
} while (length > 0);
-
}
diff --git a/drivers/usb/serial/usb_wwan.c b/drivers/usb/serial/usb_wwan.c
index 1355a6cd4508..571965aa1cc0 100644
--- a/drivers/usb/serial/usb_wwan.c
+++ b/drivers/usb/serial/usb_wwan.c
@@ -273,7 +273,6 @@ static void usb_wwan_indat_callback(struct urb *urb)
int err;
int endpoint;
struct usb_serial_port *port;
- struct tty_struct *tty;
struct device *dev;
unsigned char *data = urb->transfer_buffer;
int status = urb->status;
@@ -286,16 +285,12 @@ static void usb_wwan_indat_callback(struct urb *urb)
dev_dbg(dev, "%s: nonzero status: %d on endpoint %02x.\n",
__func__, status, endpoint);
} else {
- tty = tty_port_tty_get(&port->port);
- if (tty) {
- if (urb->actual_length) {
- tty_insert_flip_string(tty, data,
- urb->actual_length);
- tty_flip_buffer_push(tty);
- } else
- dev_dbg(dev, "%s: empty read urb received\n", __func__);
- tty_kref_put(tty);
- }
+ if (urb->actual_length) {
+ tty_insert_flip_string(&port->port, data,
+ urb->actual_length);
+ tty_flip_buffer_push(&port->port);
+ } else
+ dev_dbg(dev, "%s: empty read urb received\n", __func__);
/* Resubmit urb so we continue receiving */
err = usb_submit_urb(urb, GFP_ATOMIC);
diff --git a/drivers/vfio/pci/Kconfig b/drivers/vfio/pci/Kconfig
index 5980758563eb..c41b01e2b693 100644
--- a/drivers/vfio/pci/Kconfig
+++ b/drivers/vfio/pci/Kconfig
@@ -6,3 +6,13 @@ config VFIO_PCI
use of PCI drivers using the VFIO framework.
If you don't know what to do here, say N.
+
+config VFIO_PCI_VGA
+ bool "VFIO PCI support for VGA devices"
+ depends on VFIO_PCI && X86 && VGA_ARB
+ help
+ Support for VGA extension to VFIO PCI. This exposes an additional
+ region on VGA devices for accessing legacy VGA addresses used by
+ BIOS and generic video drivers.
+
+ If you don't know what to do here, say N.
diff --git a/drivers/vfio/pci/vfio_pci.c b/drivers/vfio/pci/vfio_pci.c
index b28e66c4376a..8189cb6a86af 100644
--- a/drivers/vfio/pci/vfio_pci.c
+++ b/drivers/vfio/pci/vfio_pci.c
@@ -84,6 +84,11 @@ static int vfio_pci_enable(struct vfio_pci_device *vdev)
} else
vdev->msix_bar = 0xFF;
+#ifdef CONFIG_VFIO_PCI_VGA
+ if ((pdev->class >> 8) == PCI_CLASS_DISPLAY_VGA)
+ vdev->has_vga = true;
+#endif
+
return 0;
}
@@ -285,6 +290,16 @@ static long vfio_pci_ioctl(void *device_data,
info.flags = VFIO_REGION_INFO_FLAG_READ;
break;
}
+ case VFIO_PCI_VGA_REGION_INDEX:
+ if (!vdev->has_vga)
+ return -EINVAL;
+
+ info.offset = VFIO_PCI_INDEX_TO_OFFSET(info.index);
+ info.size = 0xc0000;
+ info.flags = VFIO_REGION_INFO_FLAG_READ |
+ VFIO_REGION_INFO_FLAG_WRITE;
+
+ break;
default:
return -EINVAL;
}
@@ -366,52 +381,50 @@ static long vfio_pci_ioctl(void *device_data,
return -ENOTTY;
}
-static ssize_t vfio_pci_read(void *device_data, char __user *buf,
- size_t count, loff_t *ppos)
+static ssize_t vfio_pci_rw(void *device_data, char __user *buf,
+ size_t count, loff_t *ppos, bool iswrite)
{
unsigned int index = VFIO_PCI_OFFSET_TO_INDEX(*ppos);
struct vfio_pci_device *vdev = device_data;
- struct pci_dev *pdev = vdev->pdev;
if (index >= VFIO_PCI_NUM_REGIONS)
return -EINVAL;
- if (index == VFIO_PCI_CONFIG_REGION_INDEX)
- return vfio_pci_config_readwrite(vdev, buf, count, ppos, false);
- else if (index == VFIO_PCI_ROM_REGION_INDEX)
- return vfio_pci_mem_readwrite(vdev, buf, count, ppos, false);
- else if (pci_resource_flags(pdev, index) & IORESOURCE_IO)
- return vfio_pci_io_readwrite(vdev, buf, count, ppos, false);
- else if (pci_resource_flags(pdev, index) & IORESOURCE_MEM)
- return vfio_pci_mem_readwrite(vdev, buf, count, ppos, false);
+ switch (index) {
+ case VFIO_PCI_CONFIG_REGION_INDEX:
+ return vfio_pci_config_rw(vdev, buf, count, ppos, iswrite);
+
+ case VFIO_PCI_ROM_REGION_INDEX:
+ if (iswrite)
+ return -EINVAL;
+ return vfio_pci_bar_rw(vdev, buf, count, ppos, false);
+
+ case VFIO_PCI_BAR0_REGION_INDEX ... VFIO_PCI_BAR5_REGION_INDEX:
+ return vfio_pci_bar_rw(vdev, buf, count, ppos, iswrite);
+
+ case VFIO_PCI_VGA_REGION_INDEX:
+ return vfio_pci_vga_rw(vdev, buf, count, ppos, iswrite);
+ }
return -EINVAL;
}
-static ssize_t vfio_pci_write(void *device_data, const char __user *buf,
- size_t count, loff_t *ppos)
+static ssize_t vfio_pci_read(void *device_data, char __user *buf,
+ size_t count, loff_t *ppos)
{
- unsigned int index = VFIO_PCI_OFFSET_TO_INDEX(*ppos);
- struct vfio_pci_device *vdev = device_data;
- struct pci_dev *pdev = vdev->pdev;
+ if (!count)
+ return 0;
- if (index >= VFIO_PCI_NUM_REGIONS)
- return -EINVAL;
+ return vfio_pci_rw(device_data, buf, count, ppos, false);
+}
- if (index == VFIO_PCI_CONFIG_REGION_INDEX)
- return vfio_pci_config_readwrite(vdev, (char __user *)buf,
- count, ppos, true);
- else if (index == VFIO_PCI_ROM_REGION_INDEX)
- return -EINVAL;
- else if (pci_resource_flags(pdev, index) & IORESOURCE_IO)
- return vfio_pci_io_readwrite(vdev, (char __user *)buf,
- count, ppos, true);
- else if (pci_resource_flags(pdev, index) & IORESOURCE_MEM) {
- return vfio_pci_mem_readwrite(vdev, (char __user *)buf,
- count, ppos, true);
- }
+static ssize_t vfio_pci_write(void *device_data, const char __user *buf,
+ size_t count, loff_t *ppos)
+{
+ if (!count)
+ return 0;
- return -EINVAL;
+ return vfio_pci_rw(device_data, (char __user *)buf, count, ppos, true);
}
static int vfio_pci_mmap(void *device_data, struct vm_area_struct *vma)
diff --git a/drivers/vfio/pci/vfio_pci_config.c b/drivers/vfio/pci/vfio_pci_config.c
index 8b8f7d11e102..964ff22bf281 100644
--- a/drivers/vfio/pci/vfio_pci_config.c
+++ b/drivers/vfio/pci/vfio_pci_config.c
@@ -587,12 +587,46 @@ static int __init init_pci_cap_basic_perm(struct perm_bits *perm)
return 0;
}
+static int vfio_pm_config_write(struct vfio_pci_device *vdev, int pos,
+ int count, struct perm_bits *perm,
+ int offset, __le32 val)
+{
+ count = vfio_default_config_write(vdev, pos, count, perm, offset, val);
+ if (count < 0)
+ return count;
+
+ if (offset == PCI_PM_CTRL) {
+ pci_power_t state;
+
+ switch (le32_to_cpu(val) & PCI_PM_CTRL_STATE_MASK) {
+ case 0:
+ state = PCI_D0;
+ break;
+ case 1:
+ state = PCI_D1;
+ break;
+ case 2:
+ state = PCI_D2;
+ break;
+ case 3:
+ state = PCI_D3hot;
+ break;
+ }
+
+ pci_set_power_state(vdev->pdev, state);
+ }
+
+ return count;
+}
+
/* Permissions for the Power Management capability */
static int __init init_pci_cap_pm_perm(struct perm_bits *perm)
{
if (alloc_perm_bits(perm, pci_cap_length[PCI_CAP_ID_PM]))
return -ENOMEM;
+ perm->writefn = vfio_pm_config_write;
+
/*
* We always virtualize the next field so we can remove
* capabilities from the chain if we want to.
@@ -600,10 +634,11 @@ static int __init init_pci_cap_pm_perm(struct perm_bits *perm)
p_setb(perm, PCI_CAP_LIST_NEXT, (u8)ALL_VIRT, NO_WRITE);
/*
- * Power management is defined *per function*,
- * so we let the user write this
+ * Power management is defined *per function*, so we can let
+ * the user change power state, but we trap and initiate the
+ * change ourselves, so the state bits are read-only.
*/
- p_setd(perm, PCI_PM_CTRL, NO_VIRT, ALL_WRITE);
+ p_setd(perm, PCI_PM_CTRL, NO_VIRT, ~PCI_PM_CTRL_STATE_MASK);
return 0;
}
@@ -985,12 +1020,12 @@ static int vfio_cap_len(struct vfio_pci_device *vdev, u8 cap, u8 pos)
if (ret)
return pcibios_err_to_errno(ret);
+ vdev->extended_caps = true;
+
if ((word & PCI_EXP_FLAGS_VERS) == 1)
return PCI_CAP_EXP_ENDPOINT_SIZEOF_V1;
- else {
- vdev->extended_caps = true;
+ else
return PCI_CAP_EXP_ENDPOINT_SIZEOF_V2;
- }
case PCI_CAP_ID_HT:
ret = pci_read_config_byte(pdev, pos + 3, &byte);
if (ret)
@@ -1501,9 +1536,8 @@ static ssize_t vfio_config_do_rw(struct vfio_pci_device *vdev, char __user *buf,
return ret;
}
-ssize_t vfio_pci_config_readwrite(struct vfio_pci_device *vdev,
- char __user *buf, size_t count,
- loff_t *ppos, bool iswrite)
+ssize_t vfio_pci_config_rw(struct vfio_pci_device *vdev, char __user *buf,
+ size_t count, loff_t *ppos, bool iswrite)
{
size_t done = 0;
int ret = 0;
diff --git a/drivers/vfio/pci/vfio_pci_private.h b/drivers/vfio/pci/vfio_pci_private.h
index 611827cba8cd..d7e55d03f49e 100644
--- a/drivers/vfio/pci/vfio_pci_private.h
+++ b/drivers/vfio/pci/vfio_pci_private.h
@@ -53,6 +53,7 @@ struct vfio_pci_device {
bool reset_works;
bool extended_caps;
bool bardirty;
+ bool has_vga;
struct pci_saved_state *pci_saved_state;
atomic_t refcnt;
};
@@ -70,15 +71,15 @@ extern int vfio_pci_set_irqs_ioctl(struct vfio_pci_device *vdev,
uint32_t flags, unsigned index,
unsigned start, unsigned count, void *data);
-extern ssize_t vfio_pci_config_readwrite(struct vfio_pci_device *vdev,
- char __user *buf, size_t count,
- loff_t *ppos, bool iswrite);
-extern ssize_t vfio_pci_mem_readwrite(struct vfio_pci_device *vdev,
- char __user *buf, size_t count,
- loff_t *ppos, bool iswrite);
-extern ssize_t vfio_pci_io_readwrite(struct vfio_pci_device *vdev,
- char __user *buf, size_t count,
- loff_t *ppos, bool iswrite);
+extern ssize_t vfio_pci_config_rw(struct vfio_pci_device *vdev,
+ char __user *buf, size_t count,
+ loff_t *ppos, bool iswrite);
+
+extern ssize_t vfio_pci_bar_rw(struct vfio_pci_device *vdev, char __user *buf,
+ size_t count, loff_t *ppos, bool iswrite);
+
+extern ssize_t vfio_pci_vga_rw(struct vfio_pci_device *vdev, char __user *buf,
+ size_t count, loff_t *ppos, bool iswrite);
extern int vfio_pci_init_perm_bits(void);
extern void vfio_pci_uninit_perm_bits(void);
diff --git a/drivers/vfio/pci/vfio_pci_rdwr.c b/drivers/vfio/pci/vfio_pci_rdwr.c
index f72323ef618f..210db24d2204 100644
--- a/drivers/vfio/pci/vfio_pci_rdwr.c
+++ b/drivers/vfio/pci/vfio_pci_rdwr.c
@@ -17,253 +17,222 @@
#include <linux/pci.h>
#include <linux/uaccess.h>
#include <linux/io.h>
+#include <linux/vgaarb.h>
#include "vfio_pci_private.h"
-/* I/O Port BAR access */
-ssize_t vfio_pci_io_readwrite(struct vfio_pci_device *vdev, char __user *buf,
- size_t count, loff_t *ppos, bool iswrite)
+/*
+ * Read or write from an __iomem region (MMIO or I/O port) with an excluded
+ * range which is inaccessible. The excluded range drops writes and fills
+ * reads with -1. This is intended for handling MSI-X vector tables and
+ * leftover space for ROM BARs.
+ */
+static ssize_t do_io_rw(void __iomem *io, char __user *buf,
+ loff_t off, size_t count, size_t x_start,
+ size_t x_end, bool iswrite)
{
- struct pci_dev *pdev = vdev->pdev;
- loff_t pos = *ppos & VFIO_PCI_OFFSET_MASK;
- int bar = VFIO_PCI_OFFSET_TO_INDEX(*ppos);
- void __iomem *io;
- size_t done = 0;
-
- if (!pci_resource_start(pdev, bar))
- return -EINVAL;
-
- if (pos + count > pci_resource_len(pdev, bar))
- return -EINVAL;
-
- if (!vdev->barmap[bar]) {
- int ret;
-
- ret = pci_request_selected_regions(pdev, 1 << bar, "vfio");
- if (ret)
- return ret;
-
- vdev->barmap[bar] = pci_iomap(pdev, bar, 0);
-
- if (!vdev->barmap[bar]) {
- pci_release_selected_regions(pdev, 1 << bar);
- return -EINVAL;
- }
- }
-
- io = vdev->barmap[bar];
+ ssize_t done = 0;
while (count) {
- int filled;
+ size_t fillable, filled;
+
+ if (off < x_start)
+ fillable = min(count, (size_t)(x_start - off));
+ else if (off >= x_end)
+ fillable = count;
+ else
+ fillable = 0;
- if (count >= 3 && !(pos % 4)) {
+ if (fillable >= 4 && !(off % 4)) {
__le32 val;
if (iswrite) {
if (copy_from_user(&val, buf, 4))
return -EFAULT;
- iowrite32(le32_to_cpu(val), io + pos);
+ iowrite32(le32_to_cpu(val), io + off);
} else {
- val = cpu_to_le32(ioread32(io + pos));
+ val = cpu_to_le32(ioread32(io + off));
if (copy_to_user(buf, &val, 4))
return -EFAULT;
}
filled = 4;
-
- } else if ((pos % 2) == 0 && count >= 2) {
+ } else if (fillable >= 2 && !(off % 2)) {
__le16 val;
if (iswrite) {
if (copy_from_user(&val, buf, 2))
return -EFAULT;
- iowrite16(le16_to_cpu(val), io + pos);
+ iowrite16(le16_to_cpu(val), io + off);
} else {
- val = cpu_to_le16(ioread16(io + pos));
+ val = cpu_to_le16(ioread16(io + off));
if (copy_to_user(buf, &val, 2))
return -EFAULT;
}
filled = 2;
- } else {
+ } else if (fillable) {
u8 val;
if (iswrite) {
if (copy_from_user(&val, buf, 1))
return -EFAULT;
- iowrite8(val, io + pos);
+ iowrite8(val, io + off);
} else {
- val = ioread8(io + pos);
+ val = ioread8(io + off);
if (copy_to_user(buf, &val, 1))
return -EFAULT;
}
filled = 1;
+ } else {
+ /* Fill reads with -1, drop writes */
+ filled = min(count, (size_t)(x_end - off));
+ if (!iswrite) {
+ u8 val = 0xFF;
+ size_t i;
+
+ for (i = 0; i < filled; i++)
+ if (copy_to_user(buf + i, &val, 1))
+ return -EFAULT;
+ }
}
count -= filled;
done += filled;
+ off += filled;
buf += filled;
- pos += filled;
}
- *ppos += done;
-
return done;
}
-/*
- * MMIO BAR access
- * We handle two excluded ranges here as well, if the user tries to read
- * the ROM beyond what PCI tells us is available or the MSI-X table region,
- * we return 0xFF and writes are dropped.
- */
-ssize_t vfio_pci_mem_readwrite(struct vfio_pci_device *vdev, char __user *buf,
- size_t count, loff_t *ppos, bool iswrite)
+ssize_t vfio_pci_bar_rw(struct vfio_pci_device *vdev, char __user *buf,
+ size_t count, loff_t *ppos, bool iswrite)
{
struct pci_dev *pdev = vdev->pdev;
loff_t pos = *ppos & VFIO_PCI_OFFSET_MASK;
int bar = VFIO_PCI_OFFSET_TO_INDEX(*ppos);
- void __iomem *io;
+ size_t x_start = 0, x_end = 0;
resource_size_t end;
- size_t done = 0;
- size_t x_start = 0, x_end = 0; /* excluded range */
+ void __iomem *io;
+ ssize_t done;
if (!pci_resource_start(pdev, bar))
return -EINVAL;
end = pci_resource_len(pdev, bar);
- if (pos > end)
+ if (pos >= end)
return -EINVAL;
- if (pos == end)
- return 0;
-
- if (pos + count > end)
- count = end - pos;
+ count = min(count, (size_t)(end - pos));
if (bar == PCI_ROM_RESOURCE) {
+ /*
+ * The ROM can fill less space than the BAR, so we start the
+ * excluded range at the end of the actual ROM. This makes
+ * filling large ROM BARs much faster.
+ */
io = pci_map_rom(pdev, &x_start);
+ if (!io)
+ return -ENOMEM;
x_end = end;
- } else {
- if (!vdev->barmap[bar]) {
- int ret;
-
- ret = pci_request_selected_regions(pdev, 1 << bar,
- "vfio");
- if (ret)
- return ret;
+ } else if (!vdev->barmap[bar]) {
+ int ret;
- vdev->barmap[bar] = pci_iomap(pdev, bar, 0);
+ ret = pci_request_selected_regions(pdev, 1 << bar, "vfio");
+ if (ret)
+ return ret;
- if (!vdev->barmap[bar]) {
- pci_release_selected_regions(pdev, 1 << bar);
- return -EINVAL;
- }
+ io = pci_iomap(pdev, bar, 0);
+ if (!io) {
+ pci_release_selected_regions(pdev, 1 << bar);
+ return -ENOMEM;
}
+ vdev->barmap[bar] = io;
+ } else
io = vdev->barmap[bar];
- if (bar == vdev->msix_bar) {
- x_start = vdev->msix_offset;
- x_end = vdev->msix_offset + vdev->msix_size;
- }
+ if (bar == vdev->msix_bar) {
+ x_start = vdev->msix_offset;
+ x_end = vdev->msix_offset + vdev->msix_size;
}
- if (!io)
- return -EINVAL;
-
- while (count) {
- size_t fillable, filled;
-
- if (pos < x_start)
- fillable = x_start - pos;
- else if (pos >= x_end)
- fillable = end - pos;
- else
- fillable = 0;
-
- if (fillable >= 4 && !(pos % 4) && (count >= 4)) {
- __le32 val;
-
- if (iswrite) {
- if (copy_from_user(&val, buf, 4))
- goto out;
-
- iowrite32(le32_to_cpu(val), io + pos);
- } else {
- val = cpu_to_le32(ioread32(io + pos));
+ done = do_io_rw(io, buf, pos, count, x_start, x_end, iswrite);
- if (copy_to_user(buf, &val, 4))
- goto out;
- }
+ if (done >= 0)
+ *ppos += done;
- filled = 4;
- } else if (fillable >= 2 && !(pos % 2) && (count >= 2)) {
- __le16 val;
-
- if (iswrite) {
- if (copy_from_user(&val, buf, 2))
- goto out;
-
- iowrite16(le16_to_cpu(val), io + pos);
- } else {
- val = cpu_to_le16(ioread16(io + pos));
-
- if (copy_to_user(buf, &val, 2))
- goto out;
- }
-
- filled = 2;
- } else if (fillable) {
- u8 val;
+ if (bar == PCI_ROM_RESOURCE)
+ pci_unmap_rom(pdev, io);
- if (iswrite) {
- if (copy_from_user(&val, buf, 1))
- goto out;
+ return done;
+}
- iowrite8(val, io + pos);
- } else {
- val = ioread8(io + pos);
+ssize_t vfio_pci_vga_rw(struct vfio_pci_device *vdev, char __user *buf,
+ size_t count, loff_t *ppos, bool iswrite)
+{
+ int ret;
+ loff_t off, pos = *ppos & VFIO_PCI_OFFSET_MASK;
+ void __iomem *iomem = NULL;
+ unsigned int rsrc;
+ bool is_ioport;
+ ssize_t done;
+
+ if (!vdev->has_vga)
+ return -EINVAL;
- if (copy_to_user(buf, &val, 1))
- goto out;
- }
+ switch (pos) {
+ case 0xa0000 ... 0xbffff:
+ count = min(count, (size_t)(0xc0000 - pos));
+ iomem = ioremap_nocache(0xa0000, 0xbffff - 0xa0000 + 1);
+ off = pos - 0xa0000;
+ rsrc = VGA_RSRC_LEGACY_MEM;
+ is_ioport = false;
+ break;
+ case 0x3b0 ... 0x3bb:
+ count = min(count, (size_t)(0x3bc - pos));
+ iomem = ioport_map(0x3b0, 0x3bb - 0x3b0 + 1);
+ off = pos - 0x3b0;
+ rsrc = VGA_RSRC_LEGACY_IO;
+ is_ioport = true;
+ break;
+ case 0x3c0 ... 0x3df:
+ count = min(count, (size_t)(0x3e0 - pos));
+ iomem = ioport_map(0x3c0, 0x3df - 0x3c0 + 1);
+ off = pos - 0x3c0;
+ rsrc = VGA_RSRC_LEGACY_IO;
+ is_ioport = true;
+ break;
+ default:
+ return -EINVAL;
+ }
- filled = 1;
- } else {
- /* Drop writes, fill reads with FF */
- filled = min((size_t)(x_end - pos), count);
- if (!iswrite) {
- char val = 0xFF;
- size_t i;
+ if (!iomem)
+ return -ENOMEM;
- for (i = 0; i < filled; i++) {
- if (put_user(val, buf + i))
- goto out;
- }
- }
+ ret = vga_get_interruptible(vdev->pdev, rsrc);
+ if (ret) {
+ is_ioport ? ioport_unmap(iomem) : iounmap(iomem);
+ return ret;
+ }
- }
+ done = do_io_rw(iomem, buf, off, count, 0, 0, iswrite);
- count -= filled;
- done += filled;
- buf += filled;
- pos += filled;
- }
+ vga_put(vdev->pdev, rsrc);
- *ppos += done;
+ is_ioport ? ioport_unmap(iomem) : iounmap(iomem);
-out:
- if (bar == PCI_ROM_RESOURCE)
- pci_unmap_rom(pdev, io);
+ if (done >= 0)
+ *ppos += done;
- return count ? -EFAULT : done;
+ return done;
}
diff --git a/drivers/vfio/vfio.c b/drivers/vfio/vfio.c
index 12c264d3b058..fcc12f3e60a3 100644
--- a/drivers/vfio/vfio.c
+++ b/drivers/vfio/vfio.c
@@ -139,23 +139,8 @@ EXPORT_SYMBOL_GPL(vfio_unregister_iommu_driver);
*/
static int vfio_alloc_group_minor(struct vfio_group *group)
{
- int ret, minor;
-
-again:
- if (unlikely(idr_pre_get(&vfio.group_idr, GFP_KERNEL) == 0))
- return -ENOMEM;
-
/* index 0 is used by /dev/vfio/vfio */
- ret = idr_get_new_above(&vfio.group_idr, group, 1, &minor);
- if (ret == -EAGAIN)
- goto again;
- if (ret || minor > MINORMASK) {
- if (minor > MINORMASK)
- idr_remove(&vfio.group_idr, minor);
- return -ENOSPC;
- }
-
- return minor;
+ return idr_alloc(&vfio.group_idr, group, 1, MINORMASK + 1, GFP_KERNEL);
}
static void vfio_free_group_minor(int minor)
@@ -442,7 +427,7 @@ static struct vfio_device *vfio_group_get_device(struct vfio_group *group,
* a device. It's not always practical to leave a device within a group
* driverless as it could get re-bound to something unsafe.
*/
-static const char * const vfio_driver_whitelist[] = { "pci-stub" };
+static const char * const vfio_driver_whitelist[] = { "pci-stub", "pcieport" };
static bool vfio_whitelisted_driver(struct device_driver *drv)
{
@@ -642,33 +627,16 @@ int vfio_add_group_dev(struct device *dev,
}
EXPORT_SYMBOL_GPL(vfio_add_group_dev);
-/* Test whether a struct device is present in our tracking */
-static bool vfio_dev_present(struct device *dev)
+/* Given a referenced group, check if it contains the device */
+static bool vfio_dev_present(struct vfio_group *group, struct device *dev)
{
- struct iommu_group *iommu_group;
- struct vfio_group *group;
struct vfio_device *device;
- iommu_group = iommu_group_get(dev);
- if (!iommu_group)
- return false;
-
- group = vfio_group_get_from_iommu(iommu_group);
- if (!group) {
- iommu_group_put(iommu_group);
- return false;
- }
-
device = vfio_group_get_device(group, dev);
- if (!device) {
- vfio_group_put(group);
- iommu_group_put(iommu_group);
+ if (!device)
return false;
- }
vfio_device_put(device);
- vfio_group_put(group);
- iommu_group_put(iommu_group);
return true;
}
@@ -682,10 +650,18 @@ void *vfio_del_group_dev(struct device *dev)
struct iommu_group *iommu_group = group->iommu_group;
void *device_data = device->device_data;
+ /*
+ * The group exists so long as we have a device reference. Get
+ * a group reference and use it to scan for the device going away.
+ */
+ vfio_group_get(group);
+
vfio_device_put(device);
/* TODO send a signal to encourage this to be released */
- wait_event(vfio.release_q, !vfio_dev_present(dev));
+ wait_event(vfio.release_q, !vfio_dev_present(group, dev));
+
+ vfio_group_put(group);
iommu_group_put(iommu_group);
diff --git a/drivers/vhost/tcm_vhost.c b/drivers/vhost/tcm_vhost.c
index 22321cf84fbe..9951297b2427 100644
--- a/drivers/vhost/tcm_vhost.c
+++ b/drivers/vhost/tcm_vhost.c
@@ -47,6 +47,8 @@
#include <linux/vhost.h>
#include <linux/virtio_net.h> /* TODO vhost.h currently depends on this */
#include <linux/virtio_scsi.h>
+#include <linux/llist.h>
+#include <linux/bitmap.h>
#include "vhost.c"
#include "vhost.h"
@@ -58,14 +60,20 @@ enum {
VHOST_SCSI_VQ_IO = 2,
};
+#define VHOST_SCSI_MAX_TARGET 256
+#define VHOST_SCSI_MAX_VQ 128
+
struct vhost_scsi {
- struct tcm_vhost_tpg *vs_tpg; /* Protected by vhost_scsi->dev.mutex */
+ /* Protected by vhost_scsi->dev.mutex */
+ struct tcm_vhost_tpg *vs_tpg[VHOST_SCSI_MAX_TARGET];
+ char vs_vhost_wwpn[TRANSPORT_IQN_LEN];
+ bool vs_endpoint;
+
struct vhost_dev dev;
- struct vhost_virtqueue vqs[3];
+ struct vhost_virtqueue vqs[VHOST_SCSI_MAX_VQ];
struct vhost_work vs_completion_work; /* cmd completion work item */
- struct list_head vs_completion_list; /* cmd completion queue */
- spinlock_t vs_completion_lock; /* protects s_completion_list */
+ struct llist_head vs_completion_list; /* cmd completion queue */
};
/* Local pointer to allocated TCM configfs fabric module */
@@ -77,6 +85,12 @@ static struct workqueue_struct *tcm_vhost_workqueue;
static DEFINE_MUTEX(tcm_vhost_mutex);
static LIST_HEAD(tcm_vhost_list);
+static int iov_num_pages(struct iovec *iov)
+{
+ return (PAGE_ALIGN((unsigned long)iov->iov_base + iov->iov_len) -
+ ((unsigned long)iov->iov_base & PAGE_MASK)) >> PAGE_SHIFT;
+}
+
static int tcm_vhost_check_true(struct se_portal_group *se_tpg)
{
return 1;
@@ -301,9 +315,7 @@ static void vhost_scsi_complete_cmd(struct tcm_vhost_cmd *tv_cmd)
{
struct vhost_scsi *vs = tv_cmd->tvc_vhost;
- spin_lock_bh(&vs->vs_completion_lock);
- list_add_tail(&tv_cmd->tvc_completion_list, &vs->vs_completion_list);
- spin_unlock_bh(&vs->vs_completion_lock);
+ llist_add(&tv_cmd->tvc_completion_list, &vs->vs_completion_list);
vhost_work_queue(&vs->dev, &vs->vs_completion_work);
}
@@ -347,27 +359,6 @@ static void vhost_scsi_free_cmd(struct tcm_vhost_cmd *tv_cmd)
kfree(tv_cmd);
}
-/* Dequeue a command from the completion list */
-static struct tcm_vhost_cmd *vhost_scsi_get_cmd_from_completion(
- struct vhost_scsi *vs)
-{
- struct tcm_vhost_cmd *tv_cmd = NULL;
-
- spin_lock_bh(&vs->vs_completion_lock);
- if (list_empty(&vs->vs_completion_list)) {
- spin_unlock_bh(&vs->vs_completion_lock);
- return NULL;
- }
-
- list_for_each_entry(tv_cmd, &vs->vs_completion_list,
- tvc_completion_list) {
- list_del(&tv_cmd->tvc_completion_list);
- break;
- }
- spin_unlock_bh(&vs->vs_completion_lock);
- return tv_cmd;
-}
-
/* Fill in status and signal that we are done processing this command
*
* This is scheduled in the vhost work queue so we are called with the owner
@@ -377,12 +368,20 @@ static void vhost_scsi_complete_cmd_work(struct vhost_work *work)
{
struct vhost_scsi *vs = container_of(work, struct vhost_scsi,
vs_completion_work);
+ DECLARE_BITMAP(signal, VHOST_SCSI_MAX_VQ);
+ struct virtio_scsi_cmd_resp v_rsp;
struct tcm_vhost_cmd *tv_cmd;
-
- while ((tv_cmd = vhost_scsi_get_cmd_from_completion(vs))) {
- struct virtio_scsi_cmd_resp v_rsp;
- struct se_cmd *se_cmd = &tv_cmd->tvc_se_cmd;
- int ret;
+ struct llist_node *llnode;
+ struct se_cmd *se_cmd;
+ int ret, vq;
+
+ bitmap_zero(signal, VHOST_SCSI_MAX_VQ);
+ llnode = llist_del_all(&vs->vs_completion_list);
+ while (llnode) {
+ tv_cmd = llist_entry(llnode, struct tcm_vhost_cmd,
+ tvc_completion_list);
+ llnode = llist_next(llnode);
+ se_cmd = &tv_cmd->tvc_se_cmd;
pr_debug("%s tv_cmd %p resid %u status %#02x\n", __func__,
tv_cmd, se_cmd->residual_count, se_cmd->scsi_status);
@@ -395,15 +394,20 @@ static void vhost_scsi_complete_cmd_work(struct vhost_work *work)
memcpy(v_rsp.sense, tv_cmd->tvc_sense_buf,
v_rsp.sense_len);
ret = copy_to_user(tv_cmd->tvc_resp, &v_rsp, sizeof(v_rsp));
- if (likely(ret == 0))
- vhost_add_used(&vs->vqs[2], tv_cmd->tvc_vq_desc, 0);
- else
+ if (likely(ret == 0)) {
+ vhost_add_used(tv_cmd->tvc_vq, tv_cmd->tvc_vq_desc, 0);
+ vq = tv_cmd->tvc_vq - vs->vqs;
+ __set_bit(vq, signal);
+ } else
pr_err("Faulted on virtio_scsi_cmd_resp\n");
vhost_scsi_free_cmd(tv_cmd);
}
- vhost_signal(&vs->dev, &vs->vqs[2]);
+ vq = -1;
+ while ((vq = find_next_bit(signal, VHOST_SCSI_MAX_VQ, vq + 1))
+ < VHOST_SCSI_MAX_VQ)
+ vhost_signal(&vs->dev, &vs->vqs[vq]);
}
static struct tcm_vhost_cmd *vhost_scsi_allocate_cmd(
@@ -426,7 +430,6 @@ static struct tcm_vhost_cmd *vhost_scsi_allocate_cmd(
pr_err("Unable to allocate struct tcm_vhost_cmd\n");
return ERR_PTR(-ENOMEM);
}
- INIT_LIST_HEAD(&tv_cmd->tvc_completion_list);
tv_cmd->tvc_tag = v_req->tag;
tv_cmd->tvc_task_attr = v_req->task_attr;
tv_cmd->tvc_exp_data_len = exp_data_len;
@@ -442,40 +445,47 @@ static struct tcm_vhost_cmd *vhost_scsi_allocate_cmd(
* Returns the number of scatterlist entries used or -errno on error.
*/
static int vhost_scsi_map_to_sgl(struct scatterlist *sgl,
- unsigned int sgl_count, void __user *ptr, size_t len, int write)
+ unsigned int sgl_count, struct iovec *iov, int write)
{
+ unsigned int npages = 0, pages_nr, offset, nbytes;
struct scatterlist *sg = sgl;
- unsigned int npages = 0;
- int ret;
+ void __user *ptr = iov->iov_base;
+ size_t len = iov->iov_len;
+ struct page **pages;
+ int ret, i;
- while (len > 0) {
- struct page *page;
- unsigned int offset = (uintptr_t)ptr & ~PAGE_MASK;
- unsigned int nbytes = min_t(unsigned int,
- PAGE_SIZE - offset, len);
+ pages_nr = iov_num_pages(iov);
+ if (pages_nr > sgl_count)
+ return -ENOBUFS;
- if (npages == sgl_count) {
- ret = -ENOBUFS;
- goto err;
- }
+ pages = kmalloc(pages_nr * sizeof(struct page *), GFP_KERNEL);
+ if (!pages)
+ return -ENOMEM;
- ret = get_user_pages_fast((unsigned long)ptr, 1, write, &page);
- BUG_ON(ret == 0); /* we should either get our page or fail */
- if (ret < 0)
- goto err;
+ ret = get_user_pages_fast((unsigned long)ptr, pages_nr, write, pages);
+ /* No pages were pinned */
+ if (ret < 0)
+ goto out;
+ /* Less pages pinned than wanted */
+ if (ret != pages_nr) {
+ for (i = 0; i < ret; i++)
+ put_page(pages[i]);
+ ret = -EFAULT;
+ goto out;
+ }
- sg_set_page(sg, page, nbytes, offset);
+ while (len > 0) {
+ offset = (uintptr_t)ptr & ~PAGE_MASK;
+ nbytes = min_t(unsigned int, PAGE_SIZE - offset, len);
+ sg_set_page(sg, pages[npages], nbytes, offset);
ptr += nbytes;
len -= nbytes;
sg++;
npages++;
}
- return npages;
-err:
- /* Put pages that we hold */
- for (sg = sgl; sg != &sgl[npages]; sg++)
- put_page(sg_page(sg));
+out:
+ kfree(pages);
return ret;
}
@@ -491,11 +501,9 @@ static int vhost_scsi_map_iov_to_sgl(struct tcm_vhost_cmd *tv_cmd,
* Find out how long sglist needs to be
*/
sgl_count = 0;
- for (i = 0; i < niov; i++) {
- sgl_count += (((uintptr_t)iov[i].iov_base + iov[i].iov_len +
- PAGE_SIZE - 1) >> PAGE_SHIFT) -
- ((uintptr_t)iov[i].iov_base >> PAGE_SHIFT);
- }
+ for (i = 0; i < niov; i++)
+ sgl_count += iov_num_pages(&iov[i]);
+
/* TODO overflow checking */
sg = kmalloc(sizeof(tv_cmd->tvc_sgl[0]) * sgl_count, GFP_ATOMIC);
@@ -510,8 +518,7 @@ static int vhost_scsi_map_iov_to_sgl(struct tcm_vhost_cmd *tv_cmd,
pr_debug("Mapping %u iovecs for %u pages\n", niov, sgl_count);
for (i = 0; i < niov; i++) {
- ret = vhost_scsi_map_to_sgl(sg, sgl_count, iov[i].iov_base,
- iov[i].iov_len, write);
+ ret = vhost_scsi_map_to_sgl(sg, sgl_count, &iov[i], write);
if (ret < 0) {
for (i = 0; i < tv_cmd->tvc_sgl_count; i++)
put_page(sg_page(&tv_cmd->tvc_sgl[i]));
@@ -563,19 +570,19 @@ static void tcm_vhost_submission_work(struct work_struct *work)
}
}
-static void vhost_scsi_handle_vq(struct vhost_scsi *vs)
+static void vhost_scsi_handle_vq(struct vhost_scsi *vs,
+ struct vhost_virtqueue *vq)
{
- struct vhost_virtqueue *vq = &vs->vqs[2];
struct virtio_scsi_cmd_req v_req;
struct tcm_vhost_tpg *tv_tpg;
struct tcm_vhost_cmd *tv_cmd;
u32 exp_data_len, data_first, data_num, data_direction;
unsigned out, in, i;
int head, ret;
+ u8 target;
/* Must use ioctl VHOST_SCSI_SET_ENDPOINT */
- tv_tpg = vs->vs_tpg;
- if (unlikely(!tv_tpg))
+ if (unlikely(!vs->vs_endpoint))
return;
mutex_lock(&vq->mutex);
@@ -643,6 +650,28 @@ static void vhost_scsi_handle_vq(struct vhost_scsi *vs)
break;
}
+ /* Extract the tpgt */
+ target = v_req.lun[1];
+ tv_tpg = vs->vs_tpg[target];
+
+ /* Target does not exist, fail the request */
+ if (unlikely(!tv_tpg)) {
+ struct virtio_scsi_cmd_resp __user *resp;
+ struct virtio_scsi_cmd_resp rsp;
+
+ memset(&rsp, 0, sizeof(rsp));
+ rsp.response = VIRTIO_SCSI_S_BAD_TARGET;
+ resp = vq->iov[out].iov_base;
+ ret = __copy_to_user(resp, &rsp, sizeof(rsp));
+ if (!ret)
+ vhost_add_used_and_signal(&vs->dev,
+ vq, head, 0);
+ else
+ pr_err("Faulted on virtio_scsi_cmd_resp\n");
+
+ continue;
+ }
+
exp_data_len = 0;
for (i = 0; i < data_num; i++)
exp_data_len += vq->iov[data_first + i].iov_len;
@@ -658,6 +687,7 @@ static void vhost_scsi_handle_vq(struct vhost_scsi *vs)
": %d\n", tv_cmd, exp_data_len, data_direction);
tv_cmd->tvc_vhost = vs;
+ tv_cmd->tvc_vq = vq;
if (unlikely(vq->iov[out].iov_len !=
sizeof(struct virtio_scsi_cmd_resp))) {
@@ -738,7 +768,7 @@ static void vhost_scsi_handle_kick(struct vhost_work *work)
poll.work);
struct vhost_scsi *vs = container_of(vq->dev, struct vhost_scsi, dev);
- vhost_scsi_handle_vq(vs);
+ vhost_scsi_handle_vq(vs, vq);
}
/*
@@ -751,7 +781,8 @@ static int vhost_scsi_set_endpoint(
{
struct tcm_vhost_tport *tv_tport;
struct tcm_vhost_tpg *tv_tpg;
- int index;
+ bool match = false;
+ int index, ret;
mutex_lock(&vs->dev.mutex);
/* Verify that ring has been setup correctly. */
@@ -762,7 +793,6 @@ static int vhost_scsi_set_endpoint(
return -EFAULT;
}
}
- mutex_unlock(&vs->dev.mutex);
mutex_lock(&tcm_vhost_mutex);
list_for_each_entry(tv_tpg, &tcm_vhost_list, tv_tpg_list) {
@@ -777,30 +807,33 @@ static int vhost_scsi_set_endpoint(
}
tv_tport = tv_tpg->tport;
- if (!strcmp(tv_tport->tport_name, t->vhost_wwpn) &&
- (tv_tpg->tport_tpgt == t->vhost_tpgt)) {
- tv_tpg->tv_tpg_vhost_count++;
- mutex_unlock(&tv_tpg->tv_tpg_mutex);
- mutex_unlock(&tcm_vhost_mutex);
-
- mutex_lock(&vs->dev.mutex);
- if (vs->vs_tpg) {
- mutex_unlock(&vs->dev.mutex);
- mutex_lock(&tv_tpg->tv_tpg_mutex);
- tv_tpg->tv_tpg_vhost_count--;
+ if (!strcmp(tv_tport->tport_name, t->vhost_wwpn)) {
+ if (vs->vs_tpg[tv_tpg->tport_tpgt]) {
mutex_unlock(&tv_tpg->tv_tpg_mutex);
+ mutex_unlock(&tcm_vhost_mutex);
+ mutex_unlock(&vs->dev.mutex);
return -EEXIST;
}
-
- vs->vs_tpg = tv_tpg;
+ tv_tpg->tv_tpg_vhost_count++;
+ vs->vs_tpg[tv_tpg->tport_tpgt] = tv_tpg;
smp_mb__after_atomic_inc();
- mutex_unlock(&vs->dev.mutex);
- return 0;
+ match = true;
}
mutex_unlock(&tv_tpg->tv_tpg_mutex);
}
mutex_unlock(&tcm_vhost_mutex);
- return -EINVAL;
+
+ if (match) {
+ memcpy(vs->vs_vhost_wwpn, t->vhost_wwpn,
+ sizeof(vs->vs_vhost_wwpn));
+ vs->vs_endpoint = true;
+ ret = 0;
+ } else {
+ ret = -EEXIST;
+ }
+
+ mutex_unlock(&vs->dev.mutex);
+ return ret;
}
static int vhost_scsi_clear_endpoint(
@@ -809,7 +842,8 @@ static int vhost_scsi_clear_endpoint(
{
struct tcm_vhost_tport *tv_tport;
struct tcm_vhost_tpg *tv_tpg;
- int index, ret;
+ int index, ret, i;
+ u8 target;
mutex_lock(&vs->dev.mutex);
/* Verify that ring has been setup correctly. */
@@ -819,27 +853,32 @@ static int vhost_scsi_clear_endpoint(
goto err;
}
}
+ for (i = 0; i < VHOST_SCSI_MAX_TARGET; i++) {
+ target = i;
- if (!vs->vs_tpg) {
- ret = -ENODEV;
- goto err;
- }
- tv_tpg = vs->vs_tpg;
- tv_tport = tv_tpg->tport;
-
- if (strcmp(tv_tport->tport_name, t->vhost_wwpn) ||
- (tv_tpg->tport_tpgt != t->vhost_tpgt)) {
- pr_warn("tv_tport->tport_name: %s, tv_tpg->tport_tpgt: %hu"
- " does not match t->vhost_wwpn: %s, t->vhost_tpgt: %hu\n",
- tv_tport->tport_name, tv_tpg->tport_tpgt,
- t->vhost_wwpn, t->vhost_tpgt);
- ret = -EINVAL;
- goto err;
+ tv_tpg = vs->vs_tpg[target];
+ if (!tv_tpg)
+ continue;
+
+ tv_tport = tv_tpg->tport;
+ if (!tv_tport) {
+ ret = -ENODEV;
+ goto err;
+ }
+
+ if (strcmp(tv_tport->tport_name, t->vhost_wwpn)) {
+ pr_warn("tv_tport->tport_name: %s, tv_tpg->tport_tpgt: %hu"
+ " does not match t->vhost_wwpn: %s, t->vhost_tpgt: %hu\n",
+ tv_tport->tport_name, tv_tpg->tport_tpgt,
+ t->vhost_wwpn, t->vhost_tpgt);
+ ret = -EINVAL;
+ goto err;
+ }
+ tv_tpg->tv_tpg_vhost_count--;
+ vs->vs_tpg[target] = NULL;
+ vs->vs_endpoint = false;
}
- tv_tpg->tv_tpg_vhost_count--;
- vs->vs_tpg = NULL;
mutex_unlock(&vs->dev.mutex);
-
return 0;
err:
@@ -850,20 +889,19 @@ err:
static int vhost_scsi_open(struct inode *inode, struct file *f)
{
struct vhost_scsi *s;
- int r;
+ int r, i;
s = kzalloc(sizeof(*s), GFP_KERNEL);
if (!s)
return -ENOMEM;
vhost_work_init(&s->vs_completion_work, vhost_scsi_complete_cmd_work);
- INIT_LIST_HEAD(&s->vs_completion_list);
- spin_lock_init(&s->vs_completion_lock);
s->vqs[VHOST_SCSI_VQ_CTL].handle_kick = vhost_scsi_ctl_handle_kick;
s->vqs[VHOST_SCSI_VQ_EVT].handle_kick = vhost_scsi_evt_handle_kick;
- s->vqs[VHOST_SCSI_VQ_IO].handle_kick = vhost_scsi_handle_kick;
- r = vhost_dev_init(&s->dev, s->vqs, 3);
+ for (i = VHOST_SCSI_VQ_IO; i < VHOST_SCSI_MAX_VQ; i++)
+ s->vqs[i].handle_kick = vhost_scsi_handle_kick;
+ r = vhost_dev_init(&s->dev, s->vqs, VHOST_SCSI_MAX_VQ);
if (r < 0) {
kfree(s);
return r;
@@ -876,16 +914,12 @@ static int vhost_scsi_open(struct inode *inode, struct file *f)
static int vhost_scsi_release(struct inode *inode, struct file *f)
{
struct vhost_scsi *s = f->private_data;
+ struct vhost_scsi_target t;
- if (s->vs_tpg && s->vs_tpg->tport) {
- struct vhost_scsi_target backend;
-
- memcpy(backend.vhost_wwpn, s->vs_tpg->tport->tport_name,
- sizeof(backend.vhost_wwpn));
- backend.vhost_tpgt = s->vs_tpg->tport_tpgt;
- vhost_scsi_clear_endpoint(s, &backend);
- }
-
+ mutex_lock(&s->dev.mutex);
+ memcpy(t.vhost_wwpn, s->vs_vhost_wwpn, sizeof(t.vhost_wwpn));
+ mutex_unlock(&s->dev.mutex);
+ vhost_scsi_clear_endpoint(s, &t);
vhost_dev_stop(&s->dev);
vhost_dev_cleanup(&s->dev, false);
kfree(s);
@@ -899,9 +933,10 @@ static void vhost_scsi_flush_vq(struct vhost_scsi *vs, int index)
static void vhost_scsi_flush(struct vhost_scsi *vs)
{
- vhost_scsi_flush_vq(vs, VHOST_SCSI_VQ_CTL);
- vhost_scsi_flush_vq(vs, VHOST_SCSI_VQ_EVT);
- vhost_scsi_flush_vq(vs, VHOST_SCSI_VQ_IO);
+ int i;
+
+ for (i = 0; i < VHOST_SCSI_MAX_VQ; i++)
+ vhost_scsi_flush_vq(vs, i);
}
static int vhost_scsi_set_features(struct vhost_scsi *vs, u64 features)
diff --git a/drivers/vhost/tcm_vhost.h b/drivers/vhost/tcm_vhost.h
index 7e87c63ecbcd..1d2ae7a60e11 100644
--- a/drivers/vhost/tcm_vhost.h
+++ b/drivers/vhost/tcm_vhost.h
@@ -23,6 +23,8 @@ struct tcm_vhost_cmd {
struct virtio_scsi_cmd_resp __user *tvc_resp;
/* Pointer to vhost_scsi for our device */
struct vhost_scsi *tvc_vhost;
+ /* Pointer to vhost_virtqueue for the cmd */
+ struct vhost_virtqueue *tvc_vq;
/* Pointer to vhost nexus memory */
struct tcm_vhost_nexus *tvc_nexus;
/* The TCM I/O descriptor that is accessed via container_of() */
@@ -34,7 +36,7 @@ struct tcm_vhost_cmd {
/* Sense buffer that will be mapped into outgoing status */
unsigned char tvc_sense_buf[TRANSPORT_SENSE_BUFFER];
/* Completed commands list, serviced from vhost worker thread */
- struct list_head tvc_completion_list;
+ struct llist_node tvc_completion_list;
};
struct tcm_vhost_nexus {
@@ -93,9 +95,11 @@ struct tcm_vhost_tport {
*
* ABI Rev 0: July 2012 version starting point for v3.6-rc merge candidate +
* RFC-v2 vhost-scsi userspace. Add GET_ABI_VERSION ioctl usage
+ * ABI Rev 1: January 2013. Ignore vhost_tpgt filed in struct vhost_scsi_target.
+ * All the targets under vhost_wwpn can be seen and used by guset.
*/
-#define VHOST_SCSI_ABI_VERSION 0
+#define VHOST_SCSI_ABI_VERSION 1
struct vhost_scsi_target {
int abi_version;
diff --git a/drivers/video/Kconfig b/drivers/video/Kconfig
index e4e1765b82f2..4c1546f71d56 100644
--- a/drivers/video/Kconfig
+++ b/drivers/video/Kconfig
@@ -21,8 +21,6 @@ source "drivers/gpu/vga/Kconfig"
source "drivers/gpu/drm/Kconfig"
-source "drivers/gpu/stub/Kconfig"
-
config VGASTATE
tristate
default n
@@ -33,6 +31,30 @@ config VIDEO_OUTPUT_CONTROL
This framework adds support for low-level control of the video
output switch.
+config DISPLAY_TIMING
+ bool
+
+config VIDEOMODE
+ bool
+
+config OF_DISPLAY_TIMING
+ bool "Enable device tree display timing support"
+ depends on OF
+ select DISPLAY_TIMING
+ help
+ helper to parse display timings from the devicetree
+
+config OF_VIDEOMODE
+ bool "Enable device tree videomode support"
+ depends on OF
+ select VIDEOMODE
+ select OF_DISPLAY_TIMING
+ help
+ helper to get videomodes from the devicetree
+
+config HDMI
+ bool
+
menuconfig FB
tristate "Support for frame buffer devices"
---help---
@@ -364,7 +386,7 @@ config FB_SA1100
Y here.
config FB_IMX
- tristate "Freescale i.MX LCD support"
+ tristate "Freescale i.MX1/21/25/27 LCD support"
depends on FB && IMX_HAVE_PLATFORM_IMX_FB
select FB_CFB_FILLRECT
select FB_CFB_COPYAREA
@@ -2025,7 +2047,8 @@ config FB_TMIO_ACCELL
config FB_S3C
tristate "Samsung S3C framebuffer support"
- depends on FB && (S3C_DEV_FB || S5P_DEV_FIMD0)
+ depends on FB && (CPU_S3C2416 || ARCH_S3C64XX || ARCH_S5P64X0 || \
+ ARCH_S5PC100 || ARCH_S5PV210 || ARCH_EXYNOS)
select FB_CFB_FILLRECT
select FB_CFB_COPYAREA
select FB_CFB_IMAGEBLIT
@@ -2183,6 +2206,15 @@ config FB_XILINX
framebuffer. ML300 carries a 640*480 LCD display on the board,
ML403 uses a standard DB15 VGA connector.
+config FB_GOLDFISH
+ tristate "Goldfish Framebuffer"
+ depends on FB
+ select FB_CFB_FILLRECT
+ select FB_CFB_COPYAREA
+ select FB_CFB_IMAGEBLIT
+ ---help---
+ Framebuffer driver for Goldfish Virtual Platform
+
config FB_COBALT
tristate "Cobalt server LCD frame buffer support"
depends on FB && (MIPS_COBALT || MIPS_SEAD3)
@@ -2422,6 +2454,7 @@ config FB_PUV3_UNIGFX
source "drivers/video/omap/Kconfig"
source "drivers/video/omap2/Kconfig"
source "drivers/video/exynos/Kconfig"
+source "drivers/video/mmp/Kconfig"
source "drivers/video/backlight/Kconfig"
if VT
diff --git a/drivers/video/Makefile b/drivers/video/Makefile
index 768a137a1bac..9df387334cb7 100644
--- a/drivers/video/Makefile
+++ b/drivers/video/Makefile
@@ -5,6 +5,7 @@
# Each configuration option enables a list of files.
obj-$(CONFIG_VGASTATE) += vgastate.o
+obj-$(CONFIG_HDMI) += hdmi.o
obj-y += fb_notify.o
obj-$(CONFIG_FB) += fb.o
fb-y := fbmem.o fbmon.o fbcmap.o fbsysfs.o \
@@ -98,6 +99,7 @@ obj-$(CONFIG_FB_ATMEL) += atmel_lcdfb.o
obj-$(CONFIG_FB_PVR2) += pvr2fb.o
obj-$(CONFIG_FB_VOODOO1) += sstfb.o
obj-$(CONFIG_FB_ARMCLCD) += amba-clcd.o
+obj-$(CONFIG_FB_GOLDFISH) += goldfishfb.o
obj-$(CONFIG_FB_68328) += 68328fb.o
obj-$(CONFIG_FB_GBE) += gbefb.o
obj-$(CONFIG_FB_CIRRUS) += cirrusfb.o
@@ -105,6 +107,7 @@ obj-$(CONFIG_FB_ASILIANT) += asiliantfb.o
obj-$(CONFIG_FB_PXA) += pxafb.o
obj-$(CONFIG_FB_PXA168) += pxa168fb.o
obj-$(CONFIG_PXA3XX_GCU) += pxa3xx-gcu.o
+obj-$(CONFIG_MMP_DISP) += mmp/
obj-$(CONFIG_FB_W100) += w100fb.o
obj-$(CONFIG_FB_TMIO) += tmiofb.o
obj-$(CONFIG_FB_AU1100) += au1100fb.o
@@ -168,3 +171,7 @@ obj-$(CONFIG_FB_VIRTUAL) += vfb.o
#video output switch sysfs driver
obj-$(CONFIG_VIDEO_OUTPUT_CONTROL) += output.o
+obj-$(CONFIG_DISPLAY_TIMING) += display_timing.o
+obj-$(CONFIG_OF_DISPLAY_TIMING) += of_display_timing.o
+obj-$(CONFIG_VIDEOMODE) += videomode.o
+obj-$(CONFIG_OF_VIDEOMODE) += of_videomode.o
diff --git a/drivers/video/auo_k190x.c b/drivers/video/auo_k190x.c
index 97f79356141e..53846cb534d4 100644
--- a/drivers/video/auo_k190x.c
+++ b/drivers/video/auo_k190x.c
@@ -11,6 +11,7 @@
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/gpio.h>
+#include <linux/platform_device.h>
#include <linux/pm_runtime.h>
#include <linux/fb.h>
#include <linux/delay.h>
diff --git a/drivers/video/backlight/88pm860x_bl.c b/drivers/video/backlight/88pm860x_bl.c
index c072ed9aea36..2cd63507ed74 100644
--- a/drivers/video/backlight/88pm860x_bl.c
+++ b/drivers/video/backlight/88pm860x_bl.c
@@ -165,8 +165,10 @@ static int pm860x_backlight_dt_init(struct platform_device *pdev,
struct pm860x_backlight_data *data,
char *name)
{
- struct device_node *nproot = pdev->dev.parent->of_node, *np;
+ struct device_node *nproot, *np;
int iset = 0;
+
+ nproot = of_node_get(pdev->dev.parent->of_node);
if (!nproot)
return -ENODEV;
nproot = of_find_node_by_name(nproot, "backlights");
@@ -184,6 +186,7 @@ static int pm860x_backlight_dt_init(struct platform_device *pdev,
break;
}
}
+ of_node_put(nproot);
return 0;
}
#else
diff --git a/drivers/video/backlight/Kconfig b/drivers/video/backlight/Kconfig
index 765a945f8ea1..db10d0120d2b 100644
--- a/drivers/video/backlight/Kconfig
+++ b/drivers/video/backlight/Kconfig
@@ -126,6 +126,21 @@ config LCD_AMS369FG06
If you have an AMS369FG06 AMOLED Panel, say Y to enable its
LCD control driver.
+config LCD_LMS501KF03
+ tristate "LMS501KF03 LCD Driver"
+ depends on SPI
+ default n
+ help
+ If you have an LMS501KF03 LCD Panel, say Y to enable its
+ LCD control driver.
+
+config LCD_HX8357
+ tristate "Himax HX-8357 LCD Driver"
+ depends on SPI
+ help
+ If you have a HX-8357 LCD panel, say Y to enable its LCD control
+ driver.
+
endif # LCD_CLASS_DEVICE
#
@@ -366,9 +381,15 @@ config BACKLIGHT_LP855X
tristate "Backlight driver for TI LP855X"
depends on BACKLIGHT_CLASS_DEVICE && I2C
help
- This supports TI LP8550, LP8551, LP8552, LP8553 and LP8556
+ This supports TI LP8550, LP8551, LP8552, LP8553, LP8556 and LP8557
backlight driver.
+config BACKLIGHT_LP8788
+ tristate "Backlight driver for TI LP8788 MFD"
+ depends on BACKLIGHT_CLASS_DEVICE && MFD_LP8788
+ help
+ This supports TI LP8788 backlight driver.
+
config BACKLIGHT_OT200
tristate "Backlight driver for ot200 visualisation device"
depends on BACKLIGHT_CLASS_DEVICE && CS5535_MFGPT && GPIO_CS5535
@@ -390,6 +411,13 @@ config BACKLIGHT_TPS65217
If you have a Texas Instruments TPS65217 say Y to enable the
backlight driver.
+config BACKLIGHT_AS3711
+ tristate "AS3711 Backlight"
+ depends on BACKLIGHT_CLASS_DEVICE && MFD_AS3711
+ help
+ If you have an Austrian Microsystems AS3711 say Y to enable the
+ backlight driver.
+
endif # BACKLIGHT_CLASS_DEVICE
endif # BACKLIGHT_LCD_SUPPORT
diff --git a/drivers/video/backlight/Makefile b/drivers/video/backlight/Makefile
index e7ce7291635d..96c4d620c5ce 100644
--- a/drivers/video/backlight/Makefile
+++ b/drivers/video/backlight/Makefile
@@ -1,47 +1,51 @@
# Backlight & LCD drivers
-obj-$(CONFIG_LCD_CLASS_DEVICE) += lcd.o
-obj-$(CONFIG_LCD_CORGI) += corgi_lcd.o
-obj-$(CONFIG_LCD_HP700) += jornada720_lcd.o
-obj-$(CONFIG_LCD_L4F00242T03) += l4f00242t03.o
-obj-$(CONFIG_LCD_LMS283GF05) += lms283gf05.o
-obj-$(CONFIG_LCD_LTV350QV) += ltv350qv.o
-obj-$(CONFIG_LCD_ILI9320) += ili9320.o
-obj-$(CONFIG_LCD_PLATFORM) += platform_lcd.o
-obj-$(CONFIG_LCD_VGG2432A4) += vgg2432a4.o
-obj-$(CONFIG_LCD_TDO24M) += tdo24m.o
-obj-$(CONFIG_LCD_TOSA) += tosa_lcd.o
-obj-$(CONFIG_LCD_S6E63M0) += s6e63m0.o
-obj-$(CONFIG_LCD_LD9040) += ld9040.o
-obj-$(CONFIG_LCD_AMS369FG06) += ams369fg06.o
+obj-$(CONFIG_LCD_AMS369FG06) += ams369fg06.o
+obj-$(CONFIG_LCD_CLASS_DEVICE) += lcd.o
+obj-$(CONFIG_LCD_CORGI) += corgi_lcd.o
+obj-$(CONFIG_LCD_HP700) += jornada720_lcd.o
+obj-$(CONFIG_LCD_HX8357) += hx8357.o
+obj-$(CONFIG_LCD_ILI9320) += ili9320.o
+obj-$(CONFIG_LCD_L4F00242T03) += l4f00242t03.o
+obj-$(CONFIG_LCD_LD9040) += ld9040.o
+obj-$(CONFIG_LCD_LMS283GF05) += lms283gf05.o
+obj-$(CONFIG_LCD_LMS501KF03) += lms501kf03.o
+obj-$(CONFIG_LCD_LTV350QV) += ltv350qv.o
+obj-$(CONFIG_LCD_PLATFORM) += platform_lcd.o
+obj-$(CONFIG_LCD_S6E63M0) += s6e63m0.o
+obj-$(CONFIG_LCD_TDO24M) += tdo24m.o
+obj-$(CONFIG_LCD_TOSA) += tosa_lcd.o
+obj-$(CONFIG_LCD_VGG2432A4) += vgg2432a4.o
-obj-$(CONFIG_BACKLIGHT_CLASS_DEVICE) += backlight.o
-obj-$(CONFIG_BACKLIGHT_ATMEL_PWM) += atmel-pwm-bl.o
-obj-$(CONFIG_BACKLIGHT_EP93XX) += ep93xx_bl.o
-obj-$(CONFIG_BACKLIGHT_GENERIC) += generic_bl.o
-obj-$(CONFIG_BACKLIGHT_HP700) += jornada720_bl.o
-obj-$(CONFIG_BACKLIGHT_HP680) += hp680_bl.o
-obj-$(CONFIG_BACKLIGHT_LM3533) += lm3533_bl.o
-obj-$(CONFIG_BACKLIGHT_LOCOMO) += locomolcd.o
-obj-$(CONFIG_BACKLIGHT_LM3630) += lm3630_bl.o
-obj-$(CONFIG_BACKLIGHT_LM3639) += lm3639_bl.o
-obj-$(CONFIG_BACKLIGHT_LP855X) += lp855x_bl.o
-obj-$(CONFIG_BACKLIGHT_OMAP1) += omap1_bl.o
-obj-$(CONFIG_BACKLIGHT_PANDORA) += pandora_bl.o
-obj-$(CONFIG_BACKLIGHT_CARILLO_RANCH) += cr_bllcd.o
-obj-$(CONFIG_BACKLIGHT_PWM) += pwm_bl.o
-obj-$(CONFIG_BACKLIGHT_DA903X) += da903x_bl.o
-obj-$(CONFIG_BACKLIGHT_DA9052) += da9052_bl.o
-obj-$(CONFIG_BACKLIGHT_MAX8925) += max8925_bl.o
-obj-$(CONFIG_BACKLIGHT_APPLE) += apple_bl.o
-obj-$(CONFIG_BACKLIGHT_TOSA) += tosa_bl.o
-obj-$(CONFIG_BACKLIGHT_SAHARA) += kb3886_bl.o
-obj-$(CONFIG_BACKLIGHT_WM831X) += wm831x_bl.o
-obj-$(CONFIG_BACKLIGHT_ADP5520) += adp5520_bl.o
-obj-$(CONFIG_BACKLIGHT_ADP8860) += adp8860_bl.o
-obj-$(CONFIG_BACKLIGHT_ADP8870) += adp8870_bl.o
-obj-$(CONFIG_BACKLIGHT_88PM860X) += 88pm860x_bl.o
+obj-$(CONFIG_BACKLIGHT_88PM860X) += 88pm860x_bl.o
+obj-$(CONFIG_BACKLIGHT_AAT2870) += aat2870_bl.o
+obj-$(CONFIG_BACKLIGHT_ADP5520) += adp5520_bl.o
+obj-$(CONFIG_BACKLIGHT_ADP8860) += adp8860_bl.o
+obj-$(CONFIG_BACKLIGHT_ADP8870) += adp8870_bl.o
+obj-$(CONFIG_BACKLIGHT_APPLE) += apple_bl.o
+obj-$(CONFIG_BACKLIGHT_AS3711) += as3711_bl.o
+obj-$(CONFIG_BACKLIGHT_ATMEL_PWM) += atmel-pwm-bl.o
+obj-$(CONFIG_BACKLIGHT_CARILLO_RANCH) += cr_bllcd.o
+obj-$(CONFIG_BACKLIGHT_CLASS_DEVICE) += backlight.o
+obj-$(CONFIG_BACKLIGHT_DA903X) += da903x_bl.o
+obj-$(CONFIG_BACKLIGHT_DA9052) += da9052_bl.o
+obj-$(CONFIG_BACKLIGHT_EP93XX) += ep93xx_bl.o
+obj-$(CONFIG_BACKLIGHT_GENERIC) += generic_bl.o
+obj-$(CONFIG_BACKLIGHT_HP680) += hp680_bl.o
+obj-$(CONFIG_BACKLIGHT_HP700) += jornada720_bl.o
+obj-$(CONFIG_BACKLIGHT_LM3533) += lm3533_bl.o
+obj-$(CONFIG_BACKLIGHT_LM3630) += lm3630_bl.o
+obj-$(CONFIG_BACKLIGHT_LM3639) += lm3639_bl.o
+obj-$(CONFIG_BACKLIGHT_LOCOMO) += locomolcd.o
+obj-$(CONFIG_BACKLIGHT_LP855X) += lp855x_bl.o
+obj-$(CONFIG_BACKLIGHT_LP8788) += lp8788_bl.o
+obj-$(CONFIG_BACKLIGHT_MAX8925) += max8925_bl.o
+obj-$(CONFIG_BACKLIGHT_OMAP1) += omap1_bl.o
+obj-$(CONFIG_BACKLIGHT_OT200) += ot200_bl.o
+obj-$(CONFIG_BACKLIGHT_PANDORA) += pandora_bl.o
obj-$(CONFIG_BACKLIGHT_PCF50633) += pcf50633-backlight.o
-obj-$(CONFIG_BACKLIGHT_AAT2870) += aat2870_bl.o
-obj-$(CONFIG_BACKLIGHT_OT200) += ot200_bl.o
-obj-$(CONFIG_BACKLIGHT_TPS65217) += tps65217_bl.o
+obj-$(CONFIG_BACKLIGHT_PWM) += pwm_bl.o
+obj-$(CONFIG_BACKLIGHT_SAHARA) += kb3886_bl.o
+obj-$(CONFIG_BACKLIGHT_TOSA) += tosa_bl.o
+obj-$(CONFIG_BACKLIGHT_TPS65217) += tps65217_bl.o
+obj-$(CONFIG_BACKLIGHT_WM831X) += wm831x_bl.o
diff --git a/drivers/video/backlight/aat2870_bl.c b/drivers/video/backlight/aat2870_bl.c
index 7ff752288b92..c6fc668d6236 100644
--- a/drivers/video/backlight/aat2870_bl.c
+++ b/drivers/video/backlight/aat2870_bl.c
@@ -74,7 +74,7 @@ static int aat2870_bl_get_brightness(struct backlight_device *bd)
static int aat2870_bl_update_status(struct backlight_device *bd)
{
- struct aat2870_bl_driver_data *aat2870_bl = dev_get_drvdata(&bd->dev);
+ struct aat2870_bl_driver_data *aat2870_bl = bl_get_data(bd);
struct aat2870_data *aat2870 =
dev_get_drvdata(aat2870_bl->pdev->dev.parent);
int brightness = bd->props.brightness;
diff --git a/drivers/video/backlight/adp8860_bl.c b/drivers/video/backlight/adp8860_bl.c
index 6bb72c0cb803..a77c9cad3320 100644
--- a/drivers/video/backlight/adp8860_bl.c
+++ b/drivers/video/backlight/adp8860_bl.c
@@ -783,7 +783,7 @@ static int adp8860_i2c_suspend(struct i2c_client *client, pm_message_t message)
static int adp8860_i2c_resume(struct i2c_client *client)
{
- adp8860_set_bits(client, ADP8860_MDCR, NSTBY);
+ adp8860_set_bits(client, ADP8860_MDCR, NSTBY | BLEN);
return 0;
}
diff --git a/drivers/video/backlight/adp8870_bl.c b/drivers/video/backlight/adp8870_bl.c
index 63c882b8177a..712c25a0d8fe 100644
--- a/drivers/video/backlight/adp8870_bl.c
+++ b/drivers/video/backlight/adp8870_bl.c
@@ -957,7 +957,7 @@ static int adp8870_i2c_suspend(struct i2c_client *client, pm_message_t message)
static int adp8870_i2c_resume(struct i2c_client *client)
{
- adp8870_set_bits(client, ADP8870_MDCR, NSTBY);
+ adp8870_set_bits(client, ADP8870_MDCR, NSTBY | BLEN);
return 0;
}
diff --git a/drivers/video/backlight/ams369fg06.c b/drivers/video/backlight/ams369fg06.c
index f57e1905236a..c02aa2c2575a 100644
--- a/drivers/video/backlight/ams369fg06.c
+++ b/drivers/video/backlight/ams369fg06.c
@@ -10,25 +10,16 @@
* under the terms of the GNU General Public License as published by the
* Free Software Foundation; either version 2 of the License, or (at your
* option) any later version.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License along
- * with this program; if not, write to the Free Software Foundation, Inc.,
- * 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
*/
-#include <linux/wait.h>
-#include <linux/module.h>
-#include <linux/fb.h>
+#include <linux/backlight.h>
#include <linux/delay.h>
+#include <linux/fb.h>
#include <linux/gpio.h>
-#include <linux/spi/spi.h>
#include <linux/lcd.h>
-#include <linux/backlight.h>
+#include <linux/module.h>
+#include <linux/spi/spi.h>
+#include <linux/wait.h>
#define SLEEPMSEC 0x1000
#define ENDDEF 0x2000
@@ -210,8 +201,9 @@ static int ams369fg06_panel_send_sequence(struct ams369fg06 *lcd,
ret = ams369fg06_spi_write(lcd, wbuf[i], wbuf[i+1]);
if (ret)
break;
- } else
- mdelay(wbuf[i+1]);
+ } else {
+ msleep(wbuf[i+1]);
+ }
i += 2;
}
@@ -313,41 +305,29 @@ static int ams369fg06_ldi_disable(struct ams369fg06 *lcd)
static int ams369fg06_power_is_on(int power)
{
- return ((power) <= FB_BLANK_NORMAL);
+ return power <= FB_BLANK_NORMAL;
}
static int ams369fg06_power_on(struct ams369fg06 *lcd)
{
int ret = 0;
- struct lcd_platform_data *pd = NULL;
- struct backlight_device *bd = NULL;
+ struct lcd_platform_data *pd;
+ struct backlight_device *bd;
pd = lcd->lcd_pd;
- if (!pd) {
- dev_err(lcd->dev, "platform data is NULL.\n");
- return -EFAULT;
- }
-
bd = lcd->bd;
- if (!bd) {
- dev_err(lcd->dev, "backlight device is NULL.\n");
- return -EFAULT;
- }
- if (!pd->power_on) {
- dev_err(lcd->dev, "power_on is NULL.\n");
- return -EFAULT;
- } else {
+ if (pd->power_on) {
pd->power_on(lcd->ld, 1);
- mdelay(pd->power_on_delay);
+ msleep(pd->power_on_delay);
}
if (!pd->reset) {
dev_err(lcd->dev, "reset is NULL.\n");
- return -EFAULT;
+ return -EINVAL;
} else {
pd->reset(lcd->ld);
- mdelay(pd->reset_delay);
+ msleep(pd->reset_delay);
}
ret = ams369fg06_ldi_init(lcd);
@@ -374,14 +354,10 @@ static int ams369fg06_power_on(struct ams369fg06 *lcd)
static int ams369fg06_power_off(struct ams369fg06 *lcd)
{
- int ret = 0;
- struct lcd_platform_data *pd = NULL;
+ int ret;
+ struct lcd_platform_data *pd;
pd = lcd->lcd_pd;
- if (!pd) {
- dev_err(lcd->dev, "platform data is NULL\n");
- return -EFAULT;
- }
ret = ams369fg06_ldi_disable(lcd);
if (ret) {
@@ -389,12 +365,9 @@ static int ams369fg06_power_off(struct ams369fg06 *lcd)
return -EIO;
}
- mdelay(pd->power_off_delay);
+ msleep(pd->power_off_delay);
- if (!pd->power_on) {
- dev_err(lcd->dev, "power_on is NULL.\n");
- return -EFAULT;
- } else
+ if (pd->power_on)
pd->power_on(lcd->ld, 0);
return 0;
@@ -446,7 +419,7 @@ static int ams369fg06_set_brightness(struct backlight_device *bd)
{
int ret = 0;
int brightness = bd->props.brightness;
- struct ams369fg06 *lcd = dev_get_drvdata(&bd->dev);
+ struct ams369fg06 *lcd = bl_get_data(bd);
if (brightness < MIN_BRIGHTNESS ||
brightness > bd->props.max_brightness) {
@@ -501,7 +474,7 @@ static int ams369fg06_probe(struct spi_device *spi)
lcd->lcd_pd = spi->dev.platform_data;
if (!lcd->lcd_pd) {
dev_err(&spi->dev, "platform data is NULL\n");
- return -EFAULT;
+ return -EINVAL;
}
ld = lcd_device_register("ams369fg06", &spi->dev, lcd,
@@ -534,10 +507,11 @@ static int ams369fg06_probe(struct spi_device *spi)
lcd->power = FB_BLANK_POWERDOWN;
ams369fg06_power(lcd, FB_BLANK_UNBLANK);
- } else
+ } else {
lcd->power = FB_BLANK_UNBLANK;
+ }
- dev_set_drvdata(&spi->dev, lcd);
+ spi_set_drvdata(spi, lcd);
dev_info(&spi->dev, "ams369fg06 panel driver has been probed.\n");
@@ -550,7 +524,7 @@ out_lcd_unregister:
static int ams369fg06_remove(struct spi_device *spi)
{
- struct ams369fg06 *lcd = dev_get_drvdata(&spi->dev);
+ struct ams369fg06 *lcd = spi_get_drvdata(spi);
ams369fg06_power(lcd, FB_BLANK_POWERDOWN);
backlight_device_unregister(lcd->bd);
@@ -560,44 +534,26 @@ static int ams369fg06_remove(struct spi_device *spi)
}
#if defined(CONFIG_PM)
-static unsigned int before_power;
-
static int ams369fg06_suspend(struct spi_device *spi, pm_message_t mesg)
{
- int ret = 0;
- struct ams369fg06 *lcd = dev_get_drvdata(&spi->dev);
+ struct ams369fg06 *lcd = spi_get_drvdata(spi);
dev_dbg(&spi->dev, "lcd->power = %d\n", lcd->power);
- before_power = lcd->power;
-
/*
* when lcd panel is suspend, lcd panel becomes off
* regardless of status.
*/
- ret = ams369fg06_power(lcd, FB_BLANK_POWERDOWN);
-
- return ret;
+ return ams369fg06_power(lcd, FB_BLANK_POWERDOWN);
}
static int ams369fg06_resume(struct spi_device *spi)
{
- int ret = 0;
- struct ams369fg06 *lcd = dev_get_drvdata(&spi->dev);
+ struct ams369fg06 *lcd = spi_get_drvdata(spi);
- /*
- * after suspended, if lcd panel status is FB_BLANK_UNBLANK
- * (at that time, before_power is FB_BLANK_UNBLANK) then
- * it changes that status to FB_BLANK_POWERDOWN to get lcd on.
- */
- if (before_power == FB_BLANK_UNBLANK)
- lcd->power = FB_BLANK_POWERDOWN;
-
- dev_dbg(&spi->dev, "before_power = %d\n", before_power);
+ lcd->power = FB_BLANK_POWERDOWN;
- ret = ams369fg06_power(lcd, before_power);
-
- return ret;
+ return ams369fg06_power(lcd, FB_BLANK_UNBLANK);
}
#else
#define ams369fg06_suspend NULL
@@ -606,7 +562,7 @@ static int ams369fg06_resume(struct spi_device *spi)
static void ams369fg06_shutdown(struct spi_device *spi)
{
- struct ams369fg06 *lcd = dev_get_drvdata(&spi->dev);
+ struct ams369fg06 *lcd = spi_get_drvdata(spi);
ams369fg06_power(lcd, FB_BLANK_POWERDOWN);
}
diff --git a/drivers/video/backlight/as3711_bl.c b/drivers/video/backlight/as3711_bl.c
new file mode 100644
index 000000000000..41d52fe52543
--- /dev/null
+++ b/drivers/video/backlight/as3711_bl.c
@@ -0,0 +1,380 @@
+/*
+ * AS3711 PMIC backlight driver, using DCDC Step Up Converters
+ *
+ * Copyright (C) 2012 Renesas Electronics Corporation
+ * Author: Guennadi Liakhovetski, <g.liakhovetski@gmx.de>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the version 2 of the GNU General Public License as
+ * published by the Free Software Foundation
+ */
+
+#include <linux/backlight.h>
+#include <linux/delay.h>
+#include <linux/device.h>
+#include <linux/err.h>
+#include <linux/fb.h>
+#include <linux/kernel.h>
+#include <linux/mfd/as3711.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/regmap.h>
+#include <linux/slab.h>
+
+enum as3711_bl_type {
+ AS3711_BL_SU1,
+ AS3711_BL_SU2,
+};
+
+struct as3711_bl_data {
+ bool powered;
+ const char *fb_name;
+ struct device *fb_dev;
+ enum as3711_bl_type type;
+ int brightness;
+ struct backlight_device *bl;
+};
+
+struct as3711_bl_supply {
+ struct as3711_bl_data su1;
+ struct as3711_bl_data su2;
+ const struct as3711_bl_pdata *pdata;
+ struct as3711 *as3711;
+};
+
+static struct as3711_bl_supply *to_supply(struct as3711_bl_data *su)
+{
+ switch (su->type) {
+ case AS3711_BL_SU1:
+ return container_of(su, struct as3711_bl_supply, su1);
+ case AS3711_BL_SU2:
+ return container_of(su, struct as3711_bl_supply, su2);
+ }
+ return NULL;
+}
+
+static int as3711_set_brightness_auto_i(struct as3711_bl_data *data,
+ unsigned int brightness)
+{
+ struct as3711_bl_supply *supply = to_supply(data);
+ struct as3711 *as3711 = supply->as3711;
+ const struct as3711_bl_pdata *pdata = supply->pdata;
+ int ret = 0;
+
+ /* Only all equal current values are supported */
+ if (pdata->su2_auto_curr1)
+ ret = regmap_write(as3711->regmap, AS3711_CURR1_VALUE,
+ brightness);
+ if (!ret && pdata->su2_auto_curr2)
+ ret = regmap_write(as3711->regmap, AS3711_CURR2_VALUE,
+ brightness);
+ if (!ret && pdata->su2_auto_curr3)
+ ret = regmap_write(as3711->regmap, AS3711_CURR3_VALUE,
+ brightness);
+
+ return ret;
+}
+
+static int as3711_set_brightness_v(struct as3711 *as3711,
+ unsigned int brightness,
+ unsigned int reg)
+{
+ if (brightness > 31)
+ return -EINVAL;
+
+ return regmap_update_bits(as3711->regmap, reg, 0xf0,
+ brightness << 4);
+}
+
+static int as3711_bl_su2_reset(struct as3711_bl_supply *supply)
+{
+ struct as3711 *as3711 = supply->as3711;
+ int ret = regmap_update_bits(as3711->regmap, AS3711_STEPUP_CONTROL_5,
+ 3, supply->pdata->su2_fbprot);
+ if (!ret)
+ ret = regmap_update_bits(as3711->regmap,
+ AS3711_STEPUP_CONTROL_2, 1, 0);
+ if (!ret)
+ ret = regmap_update_bits(as3711->regmap,
+ AS3711_STEPUP_CONTROL_2, 1, 1);
+ return ret;
+}
+
+/*
+ * Someone with less fragile or less expensive hardware could try to simplify
+ * the brightness adjustment procedure.
+ */
+static int as3711_bl_update_status(struct backlight_device *bl)
+{
+ struct as3711_bl_data *data = bl_get_data(bl);
+ struct as3711_bl_supply *supply = to_supply(data);
+ struct as3711 *as3711 = supply->as3711;
+ int brightness = bl->props.brightness;
+ int ret = 0;
+
+ dev_dbg(&bl->dev, "%s(): brightness %u, pwr %x, blank %x, state %x\n",
+ __func__, bl->props.brightness, bl->props.power,
+ bl->props.fb_blank, bl->props.state);
+
+ if (bl->props.power != FB_BLANK_UNBLANK ||
+ bl->props.fb_blank != FB_BLANK_UNBLANK ||
+ bl->props.state & (BL_CORE_SUSPENDED | BL_CORE_FBBLANK))
+ brightness = 0;
+
+ if (data->type == AS3711_BL_SU1) {
+ ret = as3711_set_brightness_v(as3711, brightness,
+ AS3711_STEPUP_CONTROL_1);
+ } else {
+ const struct as3711_bl_pdata *pdata = supply->pdata;
+
+ switch (pdata->su2_feedback) {
+ case AS3711_SU2_VOLTAGE:
+ ret = as3711_set_brightness_v(as3711, brightness,
+ AS3711_STEPUP_CONTROL_2);
+ break;
+ case AS3711_SU2_CURR_AUTO:
+ ret = as3711_set_brightness_auto_i(data, brightness / 4);
+ if (ret < 0)
+ return ret;
+ if (brightness) {
+ ret = as3711_bl_su2_reset(supply);
+ if (ret < 0)
+ return ret;
+ udelay(500);
+ ret = as3711_set_brightness_auto_i(data, brightness);
+ } else {
+ ret = regmap_update_bits(as3711->regmap,
+ AS3711_STEPUP_CONTROL_2, 1, 0);
+ }
+ break;
+ /* Manual one current feedback pin below */
+ case AS3711_SU2_CURR1:
+ ret = regmap_write(as3711->regmap, AS3711_CURR1_VALUE,
+ brightness);
+ break;
+ case AS3711_SU2_CURR2:
+ ret = regmap_write(as3711->regmap, AS3711_CURR2_VALUE,
+ brightness);
+ break;
+ case AS3711_SU2_CURR3:
+ ret = regmap_write(as3711->regmap, AS3711_CURR3_VALUE,
+ brightness);
+ break;
+ default:
+ ret = -EINVAL;
+ }
+ }
+ if (!ret)
+ data->brightness = brightness;
+
+ return ret;
+}
+
+static int as3711_bl_get_brightness(struct backlight_device *bl)
+{
+ struct as3711_bl_data *data = bl_get_data(bl);
+
+ return data->brightness;
+}
+
+static const struct backlight_ops as3711_bl_ops = {
+ .update_status = as3711_bl_update_status,
+ .get_brightness = as3711_bl_get_brightness,
+};
+
+static int as3711_bl_init_su2(struct as3711_bl_supply *supply)
+{
+ struct as3711 *as3711 = supply->as3711;
+ const struct as3711_bl_pdata *pdata = supply->pdata;
+ u8 ctl = 0;
+ int ret;
+
+ dev_dbg(as3711->dev, "%s(): use %u\n", __func__, pdata->su2_feedback);
+
+ /* Turn SU2 off */
+ ret = regmap_write(as3711->regmap, AS3711_STEPUP_CONTROL_2, 0);
+ if (ret < 0)
+ return ret;
+
+ switch (pdata->su2_feedback) {
+ case AS3711_SU2_VOLTAGE:
+ ret = regmap_update_bits(as3711->regmap, AS3711_STEPUP_CONTROL_4, 3, 0);
+ break;
+ case AS3711_SU2_CURR1:
+ ctl = 1;
+ ret = regmap_update_bits(as3711->regmap, AS3711_STEPUP_CONTROL_4, 3, 1);
+ break;
+ case AS3711_SU2_CURR2:
+ ctl = 4;
+ ret = regmap_update_bits(as3711->regmap, AS3711_STEPUP_CONTROL_4, 3, 2);
+ break;
+ case AS3711_SU2_CURR3:
+ ctl = 0x10;
+ ret = regmap_update_bits(as3711->regmap, AS3711_STEPUP_CONTROL_4, 3, 3);
+ break;
+ case AS3711_SU2_CURR_AUTO:
+ if (pdata->su2_auto_curr1)
+ ctl = 2;
+ if (pdata->su2_auto_curr2)
+ ctl |= 8;
+ if (pdata->su2_auto_curr3)
+ ctl |= 0x20;
+ ret = 0;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ if (!ret)
+ ret = regmap_write(as3711->regmap, AS3711_CURR_CONTROL, ctl);
+
+ return ret;
+}
+
+static int as3711_bl_register(struct platform_device *pdev,
+ unsigned int max_brightness, struct as3711_bl_data *su)
+{
+ struct backlight_properties props = {.type = BACKLIGHT_RAW,};
+ struct backlight_device *bl;
+
+ /* max tuning I = 31uA for voltage- and 38250uA for current-feedback */
+ props.max_brightness = max_brightness;
+
+ bl = backlight_device_register(su->type == AS3711_BL_SU1 ?
+ "as3711-su1" : "as3711-su2",
+ &pdev->dev, su,
+ &as3711_bl_ops, &props);
+ if (IS_ERR(bl)) {
+ dev_err(&pdev->dev, "failed to register backlight\n");
+ return PTR_ERR(bl);
+ }
+
+ bl->props.brightness = props.max_brightness;
+
+ backlight_update_status(bl);
+
+ su->bl = bl;
+
+ return 0;
+}
+
+static int as3711_backlight_probe(struct platform_device *pdev)
+{
+ struct as3711_bl_pdata *pdata = dev_get_platdata(&pdev->dev);
+ struct as3711 *as3711 = dev_get_drvdata(pdev->dev.parent);
+ struct as3711_bl_supply *supply;
+ struct as3711_bl_data *su;
+ unsigned int max_brightness;
+ int ret;
+
+ if (!pdata || (!pdata->su1_fb && !pdata->su2_fb)) {
+ dev_err(&pdev->dev, "No platform data, exiting...\n");
+ return -ENODEV;
+ }
+
+ /*
+ * Due to possible hardware damage I chose to block all modes,
+ * unsupported on my hardware. Anyone, wishing to use any of those modes
+ * will have to first review the code, then activate and test it.
+ */
+ if (pdata->su1_fb ||
+ pdata->su2_fbprot != AS3711_SU2_GPIO4 ||
+ pdata->su2_feedback != AS3711_SU2_CURR_AUTO) {
+ dev_warn(&pdev->dev,
+ "Attention! An untested mode has been chosen!\n"
+ "Please, review the code, enable, test, and report success:-)\n");
+ return -EINVAL;
+ }
+
+ supply = devm_kzalloc(&pdev->dev, sizeof(*supply), GFP_KERNEL);
+ if (!supply)
+ return -ENOMEM;
+
+ supply->as3711 = as3711;
+ supply->pdata = pdata;
+
+ if (pdata->su1_fb) {
+ su = &supply->su1;
+ su->fb_name = pdata->su1_fb;
+ su->type = AS3711_BL_SU1;
+
+ max_brightness = min(pdata->su1_max_uA, 31);
+ ret = as3711_bl_register(pdev, max_brightness, su);
+ if (ret < 0)
+ return ret;
+ }
+
+ if (pdata->su2_fb) {
+ su = &supply->su2;
+ su->fb_name = pdata->su2_fb;
+ su->type = AS3711_BL_SU2;
+
+ switch (pdata->su2_fbprot) {
+ case AS3711_SU2_GPIO2:
+ case AS3711_SU2_GPIO3:
+ case AS3711_SU2_GPIO4:
+ case AS3711_SU2_LX_SD4:
+ break;
+ default:
+ ret = -EINVAL;
+ goto esu2;
+ }
+
+ switch (pdata->su2_feedback) {
+ case AS3711_SU2_VOLTAGE:
+ max_brightness = min(pdata->su2_max_uA, 31);
+ break;
+ case AS3711_SU2_CURR1:
+ case AS3711_SU2_CURR2:
+ case AS3711_SU2_CURR3:
+ case AS3711_SU2_CURR_AUTO:
+ max_brightness = min(pdata->su2_max_uA / 150, 255);
+ break;
+ default:
+ ret = -EINVAL;
+ goto esu2;
+ }
+
+ ret = as3711_bl_init_su2(supply);
+ if (ret < 0)
+ return ret;
+
+ ret = as3711_bl_register(pdev, max_brightness, su);
+ if (ret < 0)
+ goto esu2;
+ }
+
+ platform_set_drvdata(pdev, supply);
+
+ return 0;
+
+esu2:
+ backlight_device_unregister(supply->su1.bl);
+ return ret;
+}
+
+static int as3711_backlight_remove(struct platform_device *pdev)
+{
+ struct as3711_bl_supply *supply = platform_get_drvdata(pdev);
+
+ backlight_device_unregister(supply->su1.bl);
+ backlight_device_unregister(supply->su2.bl);
+
+ return 0;
+}
+
+static struct platform_driver as3711_backlight_driver = {
+ .driver = {
+ .name = "as3711-backlight",
+ .owner = THIS_MODULE,
+ },
+ .probe = as3711_backlight_probe,
+ .remove = as3711_backlight_remove,
+};
+
+module_platform_driver(as3711_backlight_driver);
+
+MODULE_DESCRIPTION("Backlight Driver for AS3711 PMICs");
+MODULE_AUTHOR("Guennadi Liakhovetski <g.liakhovetski@gmx.de");
+MODULE_LICENSE("GPL");
+MODULE_ALIAS("platform:as3711-backlight");
diff --git a/drivers/video/backlight/corgi_lcd.c b/drivers/video/backlight/corgi_lcd.c
index e323fcbe884e..aa782f302983 100644
--- a/drivers/video/backlight/corgi_lcd.c
+++ b/drivers/video/backlight/corgi_lcd.c
@@ -337,7 +337,7 @@ static void corgi_lcd_power_off(struct corgi_lcd *lcd)
static int corgi_lcd_set_mode(struct lcd_device *ld, struct fb_videomode *m)
{
- struct corgi_lcd *lcd = dev_get_drvdata(&ld->dev);
+ struct corgi_lcd *lcd = lcd_get_data(ld);
int mode = CORGI_LCD_MODE_QVGA;
if (m->xres == 640 || m->xres == 480)
@@ -364,7 +364,7 @@ static int corgi_lcd_set_mode(struct lcd_device *ld, struct fb_videomode *m)
static int corgi_lcd_set_power(struct lcd_device *ld, int power)
{
- struct corgi_lcd *lcd = dev_get_drvdata(&ld->dev);
+ struct corgi_lcd *lcd = lcd_get_data(ld);
if (POWER_IS_ON(power) && !POWER_IS_ON(lcd->power))
corgi_lcd_power_on(lcd);
@@ -378,7 +378,7 @@ static int corgi_lcd_set_power(struct lcd_device *ld, int power)
static int corgi_lcd_get_power(struct lcd_device *ld)
{
- struct corgi_lcd *lcd = dev_get_drvdata(&ld->dev);
+ struct corgi_lcd *lcd = lcd_get_data(ld);
return lcd->power;
}
@@ -391,7 +391,7 @@ static struct lcd_ops corgi_lcd_ops = {
static int corgi_bl_get_intensity(struct backlight_device *bd)
{
- struct corgi_lcd *lcd = dev_get_drvdata(&bd->dev);
+ struct corgi_lcd *lcd = bl_get_data(bd);
return lcd->intensity;
}
@@ -423,7 +423,7 @@ static int corgi_bl_set_intensity(struct corgi_lcd *lcd, int intensity)
static int corgi_bl_update_status(struct backlight_device *bd)
{
- struct corgi_lcd *lcd = dev_get_drvdata(&bd->dev);
+ struct corgi_lcd *lcd = bl_get_data(bd);
int intensity = bd->props.brightness;
if (bd->props.power != FB_BLANK_UNBLANK)
@@ -460,7 +460,7 @@ static const struct backlight_ops corgi_bl_ops = {
#ifdef CONFIG_PM
static int corgi_lcd_suspend(struct spi_device *spi, pm_message_t state)
{
- struct corgi_lcd *lcd = dev_get_drvdata(&spi->dev);
+ struct corgi_lcd *lcd = spi_get_drvdata(spi);
corgibl_flags |= CORGIBL_SUSPENDED;
corgi_bl_set_intensity(lcd, 0);
@@ -470,7 +470,7 @@ static int corgi_lcd_suspend(struct spi_device *spi, pm_message_t state)
static int corgi_lcd_resume(struct spi_device *spi)
{
- struct corgi_lcd *lcd = dev_get_drvdata(&spi->dev);
+ struct corgi_lcd *lcd = spi_get_drvdata(spi);
corgibl_flags &= ~CORGIBL_SUSPENDED;
corgi_lcd_set_power(lcd->lcd_dev, FB_BLANK_UNBLANK);
@@ -577,7 +577,7 @@ static int corgi_lcd_probe(struct spi_device *spi)
lcd->kick_battery = pdata->kick_battery;
- dev_set_drvdata(&spi->dev, lcd);
+ spi_set_drvdata(spi, lcd);
corgi_lcd_set_power(lcd->lcd_dev, FB_BLANK_UNBLANK);
backlight_update_status(lcd->bl_dev);
@@ -594,7 +594,7 @@ err_unregister_lcd:
static int corgi_lcd_remove(struct spi_device *spi)
{
- struct corgi_lcd *lcd = dev_get_drvdata(&spi->dev);
+ struct corgi_lcd *lcd = spi_get_drvdata(spi);
lcd->bl_dev->props.power = FB_BLANK_UNBLANK;
lcd->bl_dev->props.brightness = 0;
diff --git a/drivers/video/backlight/hx8357.c b/drivers/video/backlight/hx8357.c
new file mode 100644
index 000000000000..a0482b567bfe
--- /dev/null
+++ b/drivers/video/backlight/hx8357.c
@@ -0,0 +1,497 @@
+/*
+ * Driver for the Himax HX-8357 LCD Controller
+ *
+ * Copyright 2012 Free Electrons
+ *
+ * Licensed under the GPLv2 or later.
+ */
+
+#include <linux/delay.h>
+#include <linux/lcd.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/of_gpio.h>
+#include <linux/spi/spi.h>
+
+#define HX8357_NUM_IM_PINS 3
+
+#define HX8357_SWRESET 0x01
+#define HX8357_GET_RED_CHANNEL 0x06
+#define HX8357_GET_GREEN_CHANNEL 0x07
+#define HX8357_GET_BLUE_CHANNEL 0x08
+#define HX8357_GET_POWER_MODE 0x0a
+#define HX8357_GET_MADCTL 0x0b
+#define HX8357_GET_PIXEL_FORMAT 0x0c
+#define HX8357_GET_DISPLAY_MODE 0x0d
+#define HX8357_GET_SIGNAL_MODE 0x0e
+#define HX8357_GET_DIAGNOSTIC_RESULT 0x0f
+#define HX8357_ENTER_SLEEP_MODE 0x10
+#define HX8357_EXIT_SLEEP_MODE 0x11
+#define HX8357_ENTER_PARTIAL_MODE 0x12
+#define HX8357_ENTER_NORMAL_MODE 0x13
+#define HX8357_EXIT_INVERSION_MODE 0x20
+#define HX8357_ENTER_INVERSION_MODE 0x21
+#define HX8357_SET_DISPLAY_OFF 0x28
+#define HX8357_SET_DISPLAY_ON 0x29
+#define HX8357_SET_COLUMN_ADDRESS 0x2a
+#define HX8357_SET_PAGE_ADDRESS 0x2b
+#define HX8357_WRITE_MEMORY_START 0x2c
+#define HX8357_READ_MEMORY_START 0x2e
+#define HX8357_SET_PARTIAL_AREA 0x30
+#define HX8357_SET_SCROLL_AREA 0x33
+#define HX8357_SET_TEAR_OFF 0x34
+#define HX8357_SET_TEAR_ON 0x35
+#define HX8357_SET_ADDRESS_MODE 0x36
+#define HX8357_SET_SCROLL_START 0x37
+#define HX8357_EXIT_IDLE_MODE 0x38
+#define HX8357_ENTER_IDLE_MODE 0x39
+#define HX8357_SET_PIXEL_FORMAT 0x3a
+#define HX8357_SET_PIXEL_FORMAT_DBI_3BIT (0x1)
+#define HX8357_SET_PIXEL_FORMAT_DBI_16BIT (0x5)
+#define HX8357_SET_PIXEL_FORMAT_DBI_18BIT (0x6)
+#define HX8357_SET_PIXEL_FORMAT_DPI_3BIT (0x1 << 4)
+#define HX8357_SET_PIXEL_FORMAT_DPI_16BIT (0x5 << 4)
+#define HX8357_SET_PIXEL_FORMAT_DPI_18BIT (0x6 << 4)
+#define HX8357_WRITE_MEMORY_CONTINUE 0x3c
+#define HX8357_READ_MEMORY_CONTINUE 0x3e
+#define HX8357_SET_TEAR_SCAN_LINES 0x44
+#define HX8357_GET_SCAN_LINES 0x45
+#define HX8357_READ_DDB_START 0xa1
+#define HX8357_SET_DISPLAY_MODE 0xb4
+#define HX8357_SET_DISPLAY_MODE_RGB_THROUGH (0x3)
+#define HX8357_SET_DISPLAY_MODE_RGB_INTERFACE (1 << 4)
+#define HX8357_SET_PANEL_DRIVING 0xc0
+#define HX8357_SET_DISPLAY_FRAME 0xc5
+#define HX8357_SET_RGB 0xc6
+#define HX8357_SET_RGB_ENABLE_HIGH (1 << 1)
+#define HX8357_SET_GAMMA 0xc8
+#define HX8357_SET_POWER 0xd0
+#define HX8357_SET_VCOM 0xd1
+#define HX8357_SET_POWER_NORMAL 0xd2
+#define HX8357_SET_PANEL_RELATED 0xe9
+
+struct hx8357_data {
+ unsigned im_pins[HX8357_NUM_IM_PINS];
+ unsigned reset;
+ struct spi_device *spi;
+ int state;
+};
+
+static u8 hx8357_seq_power[] = {
+ HX8357_SET_POWER, 0x44, 0x41, 0x06,
+};
+
+static u8 hx8357_seq_vcom[] = {
+ HX8357_SET_VCOM, 0x40, 0x10,
+};
+
+static u8 hx8357_seq_power_normal[] = {
+ HX8357_SET_POWER_NORMAL, 0x05, 0x12,
+};
+
+static u8 hx8357_seq_panel_driving[] = {
+ HX8357_SET_PANEL_DRIVING, 0x14, 0x3b, 0x00, 0x02, 0x11,
+};
+
+static u8 hx8357_seq_display_frame[] = {
+ HX8357_SET_DISPLAY_FRAME, 0x0c,
+};
+
+static u8 hx8357_seq_panel_related[] = {
+ HX8357_SET_PANEL_RELATED, 0x01,
+};
+
+static u8 hx8357_seq_undefined1[] = {
+ 0xea, 0x03, 0x00, 0x00,
+};
+
+static u8 hx8357_seq_undefined2[] = {
+ 0xeb, 0x40, 0x54, 0x26, 0xdb,
+};
+
+static u8 hx8357_seq_gamma[] = {
+ HX8357_SET_GAMMA, 0x00, 0x15, 0x00, 0x22, 0x00,
+ 0x08, 0x77, 0x26, 0x77, 0x22, 0x04, 0x00,
+};
+
+static u8 hx8357_seq_address_mode[] = {
+ HX8357_SET_ADDRESS_MODE, 0xc0,
+};
+
+static u8 hx8357_seq_pixel_format[] = {
+ HX8357_SET_PIXEL_FORMAT,
+ HX8357_SET_PIXEL_FORMAT_DPI_18BIT |
+ HX8357_SET_PIXEL_FORMAT_DBI_18BIT,
+};
+
+static u8 hx8357_seq_column_address[] = {
+ HX8357_SET_COLUMN_ADDRESS, 0x00, 0x00, 0x01, 0x3f,
+};
+
+static u8 hx8357_seq_page_address[] = {
+ HX8357_SET_PAGE_ADDRESS, 0x00, 0x00, 0x01, 0xdf,
+};
+
+static u8 hx8357_seq_rgb[] = {
+ HX8357_SET_RGB, 0x02,
+};
+
+static u8 hx8357_seq_display_mode[] = {
+ HX8357_SET_DISPLAY_MODE,
+ HX8357_SET_DISPLAY_MODE_RGB_THROUGH |
+ HX8357_SET_DISPLAY_MODE_RGB_INTERFACE,
+};
+
+static int hx8357_spi_write_then_read(struct lcd_device *lcdev,
+ u8 *txbuf, u16 txlen,
+ u8 *rxbuf, u16 rxlen)
+{
+ struct hx8357_data *lcd = lcd_get_data(lcdev);
+ struct spi_message msg;
+ struct spi_transfer xfer[2];
+ u16 *local_txbuf = NULL;
+ int ret = 0;
+
+ memset(xfer, 0, sizeof(xfer));
+ spi_message_init(&msg);
+
+ if (txlen) {
+ int i;
+
+ local_txbuf = kcalloc(txlen, sizeof(*local_txbuf), GFP_KERNEL);
+
+ if (!local_txbuf)
+ return -ENOMEM;
+
+ for (i = 0; i < txlen; i++) {
+ local_txbuf[i] = txbuf[i];
+ if (i > 0)
+ local_txbuf[i] |= 1 << 8;
+ }
+
+ xfer[0].len = 2 * txlen;
+ xfer[0].bits_per_word = 9;
+ xfer[0].tx_buf = local_txbuf;
+ spi_message_add_tail(&xfer[0], &msg);
+ }
+
+ if (rxlen) {
+ xfer[1].len = rxlen;
+ xfer[1].bits_per_word = 8;
+ xfer[1].rx_buf = rxbuf;
+ spi_message_add_tail(&xfer[1], &msg);
+ }
+
+ ret = spi_sync(lcd->spi, &msg);
+ if (ret < 0)
+ dev_err(&lcdev->dev, "Couldn't send SPI data\n");
+
+ if (txlen)
+ kfree(local_txbuf);
+
+ return ret;
+}
+
+static inline int hx8357_spi_write_array(struct lcd_device *lcdev,
+ u8 *value, u8 len)
+{
+ return hx8357_spi_write_then_read(lcdev, value, len, NULL, 0);
+}
+
+static inline int hx8357_spi_write_byte(struct lcd_device *lcdev,
+ u8 value)
+{
+ return hx8357_spi_write_then_read(lcdev, &value, 1, NULL, 0);
+}
+
+static int hx8357_enter_standby(struct lcd_device *lcdev)
+{
+ int ret;
+
+ ret = hx8357_spi_write_byte(lcdev, HX8357_SET_DISPLAY_OFF);
+ if (ret < 0)
+ return ret;
+
+ usleep_range(10000, 12000);
+
+ ret = hx8357_spi_write_byte(lcdev, HX8357_ENTER_SLEEP_MODE);
+ if (ret < 0)
+ return ret;
+
+ msleep(120);
+
+ return 0;
+}
+
+static int hx8357_exit_standby(struct lcd_device *lcdev)
+{
+ int ret;
+
+ ret = hx8357_spi_write_byte(lcdev, HX8357_EXIT_SLEEP_MODE);
+ if (ret < 0)
+ return ret;
+
+ msleep(120);
+
+ ret = hx8357_spi_write_byte(lcdev, HX8357_SET_DISPLAY_ON);
+ if (ret < 0)
+ return ret;
+
+ return 0;
+}
+
+static int hx8357_lcd_init(struct lcd_device *lcdev)
+{
+ struct hx8357_data *lcd = lcd_get_data(lcdev);
+ int ret;
+
+ /*
+ * Set the interface selection pins to SPI mode, with three
+ * wires
+ */
+ gpio_set_value_cansleep(lcd->im_pins[0], 1);
+ gpio_set_value_cansleep(lcd->im_pins[1], 0);
+ gpio_set_value_cansleep(lcd->im_pins[2], 1);
+
+ /* Reset the screen */
+ gpio_set_value(lcd->reset, 1);
+ usleep_range(10000, 12000);
+ gpio_set_value(lcd->reset, 0);
+ usleep_range(10000, 12000);
+ gpio_set_value(lcd->reset, 1);
+ msleep(120);
+
+ ret = hx8357_spi_write_array(lcdev, hx8357_seq_power,
+ ARRAY_SIZE(hx8357_seq_power));
+ if (ret < 0)
+ return ret;
+
+ ret = hx8357_spi_write_array(lcdev, hx8357_seq_vcom,
+ ARRAY_SIZE(hx8357_seq_vcom));
+ if (ret < 0)
+ return ret;
+
+ ret = hx8357_spi_write_array(lcdev, hx8357_seq_power_normal,
+ ARRAY_SIZE(hx8357_seq_power_normal));
+ if (ret < 0)
+ return ret;
+
+ ret = hx8357_spi_write_array(lcdev, hx8357_seq_panel_driving,
+ ARRAY_SIZE(hx8357_seq_panel_driving));
+ if (ret < 0)
+ return ret;
+
+ ret = hx8357_spi_write_array(lcdev, hx8357_seq_display_frame,
+ ARRAY_SIZE(hx8357_seq_display_frame));
+ if (ret < 0)
+ return ret;
+
+ ret = hx8357_spi_write_array(lcdev, hx8357_seq_panel_related,
+ ARRAY_SIZE(hx8357_seq_panel_related));
+ if (ret < 0)
+ return ret;
+
+ ret = hx8357_spi_write_array(lcdev, hx8357_seq_undefined1,
+ ARRAY_SIZE(hx8357_seq_undefined1));
+ if (ret < 0)
+ return ret;
+
+ ret = hx8357_spi_write_array(lcdev, hx8357_seq_undefined2,
+ ARRAY_SIZE(hx8357_seq_undefined2));
+ if (ret < 0)
+ return ret;
+
+ ret = hx8357_spi_write_array(lcdev, hx8357_seq_gamma,
+ ARRAY_SIZE(hx8357_seq_gamma));
+ if (ret < 0)
+ return ret;
+
+ ret = hx8357_spi_write_array(lcdev, hx8357_seq_address_mode,
+ ARRAY_SIZE(hx8357_seq_address_mode));
+ if (ret < 0)
+ return ret;
+
+ ret = hx8357_spi_write_array(lcdev, hx8357_seq_pixel_format,
+ ARRAY_SIZE(hx8357_seq_pixel_format));
+ if (ret < 0)
+ return ret;
+
+ ret = hx8357_spi_write_array(lcdev, hx8357_seq_column_address,
+ ARRAY_SIZE(hx8357_seq_column_address));
+ if (ret < 0)
+ return ret;
+
+ ret = hx8357_spi_write_array(lcdev, hx8357_seq_page_address,
+ ARRAY_SIZE(hx8357_seq_page_address));
+ if (ret < 0)
+ return ret;
+
+ ret = hx8357_spi_write_array(lcdev, hx8357_seq_rgb,
+ ARRAY_SIZE(hx8357_seq_rgb));
+ if (ret < 0)
+ return ret;
+
+ ret = hx8357_spi_write_array(lcdev, hx8357_seq_display_mode,
+ ARRAY_SIZE(hx8357_seq_display_mode));
+ if (ret < 0)
+ return ret;
+
+ ret = hx8357_spi_write_byte(lcdev, HX8357_EXIT_SLEEP_MODE);
+ if (ret < 0)
+ return ret;
+
+ msleep(120);
+
+ ret = hx8357_spi_write_byte(lcdev, HX8357_SET_DISPLAY_ON);
+ if (ret < 0)
+ return ret;
+
+ usleep_range(5000, 7000);
+
+ ret = hx8357_spi_write_byte(lcdev, HX8357_WRITE_MEMORY_START);
+ if (ret < 0)
+ return ret;
+
+ return 0;
+}
+
+#define POWER_IS_ON(pwr) ((pwr) <= FB_BLANK_NORMAL)
+
+static int hx8357_set_power(struct lcd_device *lcdev, int power)
+{
+ struct hx8357_data *lcd = lcd_get_data(lcdev);
+ int ret = 0;
+
+ if (POWER_IS_ON(power) && !POWER_IS_ON(lcd->state))
+ ret = hx8357_exit_standby(lcdev);
+ else if (!POWER_IS_ON(power) && POWER_IS_ON(lcd->state))
+ ret = hx8357_enter_standby(lcdev);
+
+ if (ret == 0)
+ lcd->state = power;
+ else
+ dev_warn(&lcdev->dev, "failed to set power mode %d\n", power);
+
+ return ret;
+}
+
+static int hx8357_get_power(struct lcd_device *lcdev)
+{
+ struct hx8357_data *lcd = lcd_get_data(lcdev);
+
+ return lcd->state;
+}
+
+static struct lcd_ops hx8357_ops = {
+ .set_power = hx8357_set_power,
+ .get_power = hx8357_get_power,
+};
+
+static int hx8357_probe(struct spi_device *spi)
+{
+ struct lcd_device *lcdev;
+ struct hx8357_data *lcd;
+ int i, ret;
+
+ lcd = devm_kzalloc(&spi->dev, sizeof(*lcd), GFP_KERNEL);
+ if (!lcd) {
+ dev_err(&spi->dev, "Couldn't allocate lcd internal structure!\n");
+ return -ENOMEM;
+ }
+
+ ret = spi_setup(spi);
+ if (ret < 0) {
+ dev_err(&spi->dev, "SPI setup failed.\n");
+ return ret;
+ }
+
+ lcd->spi = spi;
+
+ lcd->reset = of_get_named_gpio(spi->dev.of_node, "gpios-reset", 0);
+ if (!gpio_is_valid(lcd->reset)) {
+ dev_err(&spi->dev, "Missing dt property: gpios-reset\n");
+ return -EINVAL;
+ }
+
+ ret = devm_gpio_request_one(&spi->dev, lcd->reset,
+ GPIOF_OUT_INIT_HIGH,
+ "hx8357-reset");
+ if (ret) {
+ dev_err(&spi->dev,
+ "failed to request gpio %d: %d\n",
+ lcd->reset, ret);
+ return -EINVAL;
+ }
+
+ for (i = 0; i < HX8357_NUM_IM_PINS; i++) {
+ lcd->im_pins[i] = of_get_named_gpio(spi->dev.of_node,
+ "im-gpios", i);
+ if (lcd->im_pins[i] == -EPROBE_DEFER) {
+ dev_info(&spi->dev, "GPIO requested is not here yet, deferring the probe\n");
+ return -EPROBE_DEFER;
+ }
+ if (!gpio_is_valid(lcd->im_pins[i])) {
+ dev_err(&spi->dev, "Missing dt property: im-gpios\n");
+ return -EINVAL;
+ }
+
+ ret = devm_gpio_request_one(&spi->dev, lcd->im_pins[i],
+ GPIOF_OUT_INIT_LOW, "im_pins");
+ if (ret) {
+ dev_err(&spi->dev, "failed to request gpio %d: %d\n",
+ lcd->im_pins[i], ret);
+ return -EINVAL;
+ }
+ }
+
+ lcdev = lcd_device_register("mxsfb", &spi->dev, lcd, &hx8357_ops);
+ if (IS_ERR(lcdev)) {
+ ret = PTR_ERR(lcdev);
+ return ret;
+ }
+ spi_set_drvdata(spi, lcdev);
+
+ ret = hx8357_lcd_init(lcdev);
+ if (ret) {
+ dev_err(&spi->dev, "Couldn't initialize panel\n");
+ goto init_error;
+ }
+
+ dev_info(&spi->dev, "Panel probed\n");
+
+ return 0;
+
+init_error:
+ lcd_device_unregister(lcdev);
+ return ret;
+}
+
+static int hx8357_remove(struct spi_device *spi)
+{
+ struct lcd_device *lcdev = spi_get_drvdata(spi);
+
+ lcd_device_unregister(lcdev);
+ return 0;
+}
+
+static const struct of_device_id hx8357_dt_ids[] = {
+ { .compatible = "himax,hx8357" },
+ {},
+};
+MODULE_DEVICE_TABLE(of, hx8357_dt_ids);
+
+static struct spi_driver hx8357_driver = {
+ .probe = hx8357_probe,
+ .remove = hx8357_remove,
+ .driver = {
+ .name = "hx8357",
+ .of_match_table = of_match_ptr(hx8357_dt_ids),
+ },
+};
+
+module_spi_driver(hx8357_driver);
+
+MODULE_AUTHOR("Maxime Ripard <maxime.ripard@free-electrons.com>");
+MODULE_DESCRIPTION("Himax HX-8357 LCD Driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/video/backlight/l4f00242t03.c b/drivers/video/backlight/l4f00242t03.c
index 9a35196d12d7..fb6155771326 100644
--- a/drivers/video/backlight/l4f00242t03.c
+++ b/drivers/video/backlight/l4f00242t03.c
@@ -49,7 +49,7 @@ static void l4f00242t03_reset(unsigned int gpio)
static void l4f00242t03_lcd_init(struct spi_device *spi)
{
struct l4f00242t03_pdata *pdata = spi->dev.platform_data;
- struct l4f00242t03_priv *priv = dev_get_drvdata(&spi->dev);
+ struct l4f00242t03_priv *priv = spi_get_drvdata(spi);
const u16 cmd[] = { 0x36, param(0), 0x3A, param(0x60) };
dev_dbg(&spi->dev, "initializing LCD\n");
@@ -70,7 +70,7 @@ static void l4f00242t03_lcd_init(struct spi_device *spi)
static void l4f00242t03_lcd_powerdown(struct spi_device *spi)
{
struct l4f00242t03_pdata *pdata = spi->dev.platform_data;
- struct l4f00242t03_priv *priv = dev_get_drvdata(&spi->dev);
+ struct l4f00242t03_priv *priv = spi_get_drvdata(spi);
dev_dbg(&spi->dev, "Powering down LCD\n");
@@ -168,7 +168,7 @@ static int l4f00242t03_probe(struct spi_device *spi)
return -ENOMEM;
}
- dev_set_drvdata(&spi->dev, priv);
+ spi_set_drvdata(spi, priv);
spi->bits_per_word = 9;
spi_setup(spi);
@@ -190,27 +190,24 @@ static int l4f00242t03_probe(struct spi_device *spi)
return ret;
}
- priv->io_reg = regulator_get(&spi->dev, "vdd");
+ priv->io_reg = devm_regulator_get(&spi->dev, "vdd");
if (IS_ERR(priv->io_reg)) {
dev_err(&spi->dev, "%s: Unable to get the IO regulator\n",
__func__);
return PTR_ERR(priv->io_reg);
}
- priv->core_reg = regulator_get(&spi->dev, "vcore");
+ priv->core_reg = devm_regulator_get(&spi->dev, "vcore");
if (IS_ERR(priv->core_reg)) {
- ret = PTR_ERR(priv->core_reg);
dev_err(&spi->dev, "%s: Unable to get the core regulator\n",
__func__);
- goto err1;
+ return PTR_ERR(priv->core_reg);
}
priv->ld = lcd_device_register("l4f00242t03",
&spi->dev, priv, &l4f_ops);
- if (IS_ERR(priv->ld)) {
- ret = PTR_ERR(priv->ld);
- goto err2;
- }
+ if (IS_ERR(priv->ld))
+ return PTR_ERR(priv->ld);
/* Init the LCD */
l4f00242t03_lcd_init(spi);
@@ -220,33 +217,22 @@ static int l4f00242t03_probe(struct spi_device *spi)
dev_info(&spi->dev, "Epson l4f00242t03 lcd probed.\n");
return 0;
-
-err2:
- regulator_put(priv->core_reg);
-err1:
- regulator_put(priv->io_reg);
-
- return ret;
}
static int l4f00242t03_remove(struct spi_device *spi)
{
- struct l4f00242t03_priv *priv = dev_get_drvdata(&spi->dev);
+ struct l4f00242t03_priv *priv = spi_get_drvdata(spi);
l4f00242t03_lcd_power_set(priv->ld, FB_BLANK_POWERDOWN);
lcd_device_unregister(priv->ld);
-
- dev_set_drvdata(&spi->dev, NULL);
-
- regulator_put(priv->io_reg);
- regulator_put(priv->core_reg);
+ spi_set_drvdata(spi, NULL);
return 0;
}
static void l4f00242t03_shutdown(struct spi_device *spi)
{
- struct l4f00242t03_priv *priv = dev_get_drvdata(&spi->dev);
+ struct l4f00242t03_priv *priv = spi_get_drvdata(spi);
if (priv)
l4f00242t03_lcd_power_set(priv->ld, FB_BLANK_POWERDOWN);
diff --git a/drivers/video/backlight/ld9040.c b/drivers/video/backlight/ld9040.c
index 1cb352418513..1b642f5f381a 100644
--- a/drivers/video/backlight/ld9040.c
+++ b/drivers/video/backlight/ld9040.c
@@ -9,29 +9,20 @@
* under the terms of the GNU General Public License as published by the
* Free Software Foundation; either version 2 of the License, or (at your
* option) any later version.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License along
- * with this program; if not, write to the Free Software Foundation, Inc.,
- * 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
*/
-#include <linux/wait.h>
-#include <linux/fb.h>
+#include <linux/backlight.h>
#include <linux/delay.h>
+#include <linux/fb.h>
#include <linux/gpio.h>
-#include <linux/spi/spi.h>
-#include <linux/irq.h>
#include <linux/interrupt.h>
+#include <linux/irq.h>
#include <linux/kernel.h>
#include <linux/lcd.h>
-#include <linux/backlight.h>
#include <linux/module.h>
#include <linux/regulator/consumer.h>
+#include <linux/spi/spi.h>
+#include <linux/wait.h>
#include "ld9040_gamma.h"
@@ -43,7 +34,6 @@
#define MIN_BRIGHTNESS 0
#define MAX_BRIGHTNESS 24
-#define power_is_on(pwr) ((pwr) <= FB_BLANK_NORMAL)
struct ld9040 {
struct device *dev;
@@ -78,7 +68,7 @@ static void ld9040_regulator_enable(struct ld9040 *lcd)
lcd->enabled = true;
}
- mdelay(pd->power_on_delay);
+ msleep(pd->power_on_delay);
out:
mutex_unlock(&lcd->lock);
}
@@ -474,8 +464,9 @@ static int ld9040_panel_send_sequence(struct ld9040 *lcd,
ret = ld9040_spi_write(lcd, wbuf[i], wbuf[i+1]);
if (ret)
break;
- } else
- udelay(wbuf[i+1]*1000);
+ } else {
+ msleep(wbuf[i+1]);
+ }
i += 2;
}
@@ -513,14 +504,9 @@ gamma_err:
static int ld9040_gamma_ctl(struct ld9040 *lcd, int gamma)
{
- int ret = 0;
-
- ret = _ld9040_gamma_ctl(lcd, gamma_table.gamma_22_table[gamma]);
-
- return ret;
+ return _ld9040_gamma_ctl(lcd, gamma_table.gamma_22_table[gamma]);
}
-
static int ld9040_ldi_init(struct ld9040 *lcd)
{
int ret, i;
@@ -539,7 +525,7 @@ static int ld9040_ldi_init(struct ld9040 *lcd)
for (i = 0; i < ARRAY_SIZE(init_seq); i++) {
ret = ld9040_panel_send_sequence(lcd, init_seq[i]);
/* workaround: minimum delay time for transferring CMD */
- udelay(300);
+ usleep_range(300, 310);
if (ret)
break;
}
@@ -549,11 +535,7 @@ static int ld9040_ldi_init(struct ld9040 *lcd)
static int ld9040_ldi_enable(struct ld9040 *lcd)
{
- int ret = 0;
-
- ret = ld9040_panel_send_sequence(lcd, seq_display_on);
-
- return ret;
+ return ld9040_panel_send_sequence(lcd, seq_display_on);
}
static int ld9040_ldi_disable(struct ld9040 *lcd)
@@ -566,25 +548,27 @@ static int ld9040_ldi_disable(struct ld9040 *lcd)
return ret;
}
+static int ld9040_power_is_on(int power)
+{
+ return power <= FB_BLANK_NORMAL;
+}
+
static int ld9040_power_on(struct ld9040 *lcd)
{
int ret = 0;
- struct lcd_platform_data *pd = NULL;
+ struct lcd_platform_data *pd;
+
pd = lcd->lcd_pd;
- if (!pd) {
- dev_err(lcd->dev, "platform data is NULL.\n");
- return -EFAULT;
- }
/* lcd power on */
ld9040_regulator_enable(lcd);
if (!pd->reset) {
dev_err(lcd->dev, "reset is NULL.\n");
- return -EFAULT;
+ return -EINVAL;
} else {
pd->reset(lcd->ld);
- mdelay(pd->reset_delay);
+ msleep(pd->reset_delay);
}
ret = ld9040_ldi_init(lcd);
@@ -604,14 +588,10 @@ static int ld9040_power_on(struct ld9040 *lcd)
static int ld9040_power_off(struct ld9040 *lcd)
{
- int ret = 0;
- struct lcd_platform_data *pd = NULL;
+ int ret;
+ struct lcd_platform_data *pd;
pd = lcd->lcd_pd;
- if (!pd) {
- dev_err(lcd->dev, "platform data is NULL.\n");
- return -EFAULT;
- }
ret = ld9040_ldi_disable(lcd);
if (ret) {
@@ -619,7 +599,7 @@ static int ld9040_power_off(struct ld9040 *lcd)
return -EIO;
}
- mdelay(pd->power_off_delay);
+ msleep(pd->power_off_delay);
/* lcd power off */
ld9040_regulator_disable(lcd);
@@ -631,9 +611,9 @@ static int ld9040_power(struct ld9040 *lcd, int power)
{
int ret = 0;
- if (power_is_on(power) && !power_is_on(lcd->power))
+ if (ld9040_power_is_on(power) && !ld9040_power_is_on(lcd->power))
ret = ld9040_power_on(lcd);
- else if (!power_is_on(power) && power_is_on(lcd->power))
+ else if (!ld9040_power_is_on(power) && ld9040_power_is_on(lcd->power))
ret = ld9040_power_off(lcd);
if (!ret)
@@ -698,7 +678,6 @@ static const struct backlight_ops ld9040_backlight_ops = {
.update_status = ld9040_set_brightness,
};
-
static int ld9040_probe(struct spi_device *spi)
{
int ret = 0;
@@ -726,22 +705,20 @@ static int ld9040_probe(struct spi_device *spi)
lcd->lcd_pd = spi->dev.platform_data;
if (!lcd->lcd_pd) {
dev_err(&spi->dev, "platform data is NULL.\n");
- return -EFAULT;
+ return -EINVAL;
}
mutex_init(&lcd->lock);
- ret = regulator_bulk_get(lcd->dev, ARRAY_SIZE(supplies), supplies);
+ ret = devm_regulator_bulk_get(lcd->dev, ARRAY_SIZE(supplies), supplies);
if (ret) {
dev_err(lcd->dev, "Failed to get regulators: %d\n", ret);
return ret;
}
ld = lcd_device_register("ld9040", &spi->dev, lcd, &ld9040_lcd_ops);
- if (IS_ERR(ld)) {
- ret = PTR_ERR(ld);
- goto out_free_regulator;
- }
+ if (IS_ERR(ld))
+ return PTR_ERR(ld);
lcd->ld = ld;
@@ -772,30 +749,28 @@ static int ld9040_probe(struct spi_device *spi)
lcd->power = FB_BLANK_POWERDOWN;
ld9040_power(lcd, FB_BLANK_UNBLANK);
- } else
+ } else {
lcd->power = FB_BLANK_UNBLANK;
+ }
- dev_set_drvdata(&spi->dev, lcd);
+ spi_set_drvdata(spi, lcd);
dev_info(&spi->dev, "ld9040 panel driver has been probed.\n");
return 0;
out_unregister_lcd:
lcd_device_unregister(lcd->ld);
-out_free_regulator:
- regulator_bulk_free(ARRAY_SIZE(supplies), supplies);
return ret;
}
static int ld9040_remove(struct spi_device *spi)
{
- struct ld9040 *lcd = dev_get_drvdata(&spi->dev);
+ struct ld9040 *lcd = spi_get_drvdata(spi);
ld9040_power(lcd, FB_BLANK_POWERDOWN);
backlight_device_unregister(lcd->bd);
lcd_device_unregister(lcd->ld);
- regulator_bulk_free(ARRAY_SIZE(supplies), supplies);
return 0;
}
@@ -803,8 +778,7 @@ static int ld9040_remove(struct spi_device *spi)
#if defined(CONFIG_PM)
static int ld9040_suspend(struct spi_device *spi, pm_message_t mesg)
{
- int ret = 0;
- struct ld9040 *lcd = dev_get_drvdata(&spi->dev);
+ struct ld9040 *lcd = spi_get_drvdata(spi);
dev_dbg(&spi->dev, "lcd->power = %d\n", lcd->power);
@@ -812,21 +786,16 @@ static int ld9040_suspend(struct spi_device *spi, pm_message_t mesg)
* when lcd panel is suspend, lcd panel becomes off
* regardless of status.
*/
- ret = ld9040_power(lcd, FB_BLANK_POWERDOWN);
-
- return ret;
+ return ld9040_power(lcd, FB_BLANK_POWERDOWN);
}
static int ld9040_resume(struct spi_device *spi)
{
- int ret = 0;
- struct ld9040 *lcd = dev_get_drvdata(&spi->dev);
+ struct ld9040 *lcd = spi_get_drvdata(spi);
lcd->power = FB_BLANK_POWERDOWN;
- ret = ld9040_power(lcd, FB_BLANK_UNBLANK);
-
- return ret;
+ return ld9040_power(lcd, FB_BLANK_UNBLANK);
}
#else
#define ld9040_suspend NULL
@@ -836,7 +805,7 @@ static int ld9040_resume(struct spi_device *spi)
/* Power down all displays on reboot, poweroff or halt. */
static void ld9040_shutdown(struct spi_device *spi)
{
- struct ld9040 *lcd = dev_get_drvdata(&spi->dev);
+ struct ld9040 *lcd = spi_get_drvdata(spi);
ld9040_power(lcd, FB_BLANK_POWERDOWN);
}
diff --git a/drivers/video/backlight/lm3630_bl.c b/drivers/video/backlight/lm3630_bl.c
index a6d637b5c68f..76a62e978fc3 100644
--- a/drivers/video/backlight/lm3630_bl.c
+++ b/drivers/video/backlight/lm3630_bl.c
@@ -320,7 +320,7 @@ static int lm3630_backlight_register(struct lm3630_chip_data *pchip,
backlight_device_register(name, pchip->dev, pchip,
&lm3630_bank_a_ops, &props);
if (IS_ERR(pchip->bled1))
- return -EIO;
+ return PTR_ERR(pchip->bled1);
break;
case BLED_2:
props.brightness = pdata->init_brt_led2;
@@ -329,7 +329,7 @@ static int lm3630_backlight_register(struct lm3630_chip_data *pchip,
backlight_device_register(name, pchip->dev, pchip,
&lm3630_bank_b_ops, &props);
if (IS_ERR(pchip->bled2))
- return -EIO;
+ return PTR_ERR(pchip->bled2);
break;
}
return 0;
diff --git a/drivers/video/backlight/lm3639_bl.c b/drivers/video/backlight/lm3639_bl.c
index 7ab2d2a04e41..053964da8dd3 100644
--- a/drivers/video/backlight/lm3639_bl.c
+++ b/drivers/video/backlight/lm3639_bl.c
@@ -350,14 +350,13 @@ static int lm3639_probe(struct i2c_client *client,
&lm3639_bled_ops, &props);
if (IS_ERR(pchip->bled)) {
dev_err(&client->dev, "fail : backlight register\n");
- ret = -EIO;
+ ret = PTR_ERR(pchip->bled);
goto err_out;
}
ret = device_create_file(&(pchip->bled->dev), &dev_attr_bled_mode);
if (ret < 0) {
dev_err(&client->dev, "failed : add sysfs entries\n");
- ret = -EIO;
goto err_bled_mode;
}
@@ -369,7 +368,6 @@ static int lm3639_probe(struct i2c_client *client,
&client->dev, &pchip->cdev_flash);
if (ret < 0) {
dev_err(&client->dev, "fail : flash register\n");
- ret = -EIO;
goto err_flash;
}
@@ -381,7 +379,6 @@ static int lm3639_probe(struct i2c_client *client,
&client->dev, &pchip->cdev_torch);
if (ret < 0) {
dev_err(&client->dev, "fail : torch register\n");
- ret = -EIO;
goto err_torch;
}
diff --git a/drivers/video/backlight/lms283gf05.c b/drivers/video/backlight/lms283gf05.c
index 55819b384701..4eec47261cd3 100644
--- a/drivers/video/backlight/lms283gf05.c
+++ b/drivers/video/backlight/lms283gf05.c
@@ -180,7 +180,7 @@ static int lms283gf05_probe(struct spi_device *spi)
st->spi = spi;
st->ld = ld;
- dev_set_drvdata(&spi->dev, st);
+ spi_set_drvdata(spi, st);
/* kick in the LCD */
if (pdata)
@@ -192,7 +192,7 @@ static int lms283gf05_probe(struct spi_device *spi)
static int lms283gf05_remove(struct spi_device *spi)
{
- struct lms283gf05_state *st = dev_get_drvdata(&spi->dev);
+ struct lms283gf05_state *st = spi_get_drvdata(spi);
lcd_device_unregister(st->ld);
diff --git a/drivers/video/backlight/lms501kf03.c b/drivers/video/backlight/lms501kf03.c
new file mode 100644
index 000000000000..b43882abefaf
--- /dev/null
+++ b/drivers/video/backlight/lms501kf03.c
@@ -0,0 +1,441 @@
+/*
+ * lms501kf03 TFT LCD panel driver.
+ *
+ * Copyright (c) 2012 Samsung Electronics Co., Ltd.
+ * Author: Jingoo Han <jg1.han@samsung.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ */
+
+#include <linux/backlight.h>
+#include <linux/delay.h>
+#include <linux/fb.h>
+#include <linux/gpio.h>
+#include <linux/lcd.h>
+#include <linux/module.h>
+#include <linux/spi/spi.h>
+#include <linux/wait.h>
+
+#define COMMAND_ONLY 0x00
+#define DATA_ONLY 0x01
+
+struct lms501kf03 {
+ struct device *dev;
+ struct spi_device *spi;
+ unsigned int power;
+ struct lcd_device *ld;
+ struct lcd_platform_data *lcd_pd;
+};
+
+static const unsigned char seq_password[] = {
+ 0xb9, 0xff, 0x83, 0x69,
+};
+
+static const unsigned char seq_power[] = {
+ 0xb1, 0x01, 0x00, 0x34, 0x06, 0x00, 0x14, 0x14, 0x20, 0x28,
+ 0x12, 0x12, 0x17, 0x0a, 0x01, 0xe6, 0xe6, 0xe6, 0xe6, 0xe6,
+};
+
+static const unsigned char seq_display[] = {
+ 0xb2, 0x00, 0x2b, 0x03, 0x03, 0x70, 0x00, 0xff, 0x00, 0x00,
+ 0x00, 0x00, 0x03, 0x03, 0x00, 0x01,
+};
+
+static const unsigned char seq_rgb_if[] = {
+ 0xb3, 0x09,
+};
+
+static const unsigned char seq_display_inv[] = {
+ 0xb4, 0x01, 0x08, 0x77, 0x0e, 0x06,
+};
+
+static const unsigned char seq_vcom[] = {
+ 0xb6, 0x4c, 0x2e,
+};
+
+static const unsigned char seq_gate[] = {
+ 0xd5, 0x00, 0x05, 0x03, 0x29, 0x01, 0x07, 0x17, 0x68, 0x13,
+ 0x37, 0x20, 0x31, 0x8a, 0x46, 0x9b, 0x57, 0x13, 0x02, 0x75,
+ 0xb9, 0x64, 0xa8, 0x07, 0x0f, 0x04, 0x07,
+};
+
+static const unsigned char seq_panel[] = {
+ 0xcc, 0x02,
+};
+
+static const unsigned char seq_col_mod[] = {
+ 0x3a, 0x77,
+};
+
+static const unsigned char seq_w_gamma[] = {
+ 0xe0, 0x00, 0x04, 0x09, 0x0f, 0x1f, 0x3f, 0x1f, 0x2f, 0x0a,
+ 0x0f, 0x10, 0x16, 0x18, 0x16, 0x17, 0x0d, 0x15, 0x00, 0x04,
+ 0x09, 0x0f, 0x38, 0x3f, 0x20, 0x39, 0x0a, 0x0f, 0x10, 0x16,
+ 0x18, 0x16, 0x17, 0x0d, 0x15,
+};
+
+static const unsigned char seq_rgb_gamma[] = {
+ 0xc1, 0x01, 0x03, 0x07, 0x0f, 0x1a, 0x22, 0x2c, 0x33, 0x3c,
+ 0x46, 0x4f, 0x58, 0x60, 0x69, 0x71, 0x79, 0x82, 0x89, 0x92,
+ 0x9a, 0xa1, 0xa9, 0xb1, 0xb9, 0xc1, 0xc9, 0xcf, 0xd6, 0xde,
+ 0xe5, 0xec, 0xf3, 0xf9, 0xff, 0xdd, 0x39, 0x07, 0x1c, 0xcb,
+ 0xab, 0x5f, 0x49, 0x80, 0x03, 0x07, 0x0f, 0x19, 0x20, 0x2a,
+ 0x31, 0x39, 0x42, 0x4b, 0x53, 0x5b, 0x63, 0x6b, 0x73, 0x7b,
+ 0x83, 0x8a, 0x92, 0x9b, 0xa2, 0xaa, 0xb2, 0xba, 0xc2, 0xca,
+ 0xd0, 0xd8, 0xe1, 0xe8, 0xf0, 0xf8, 0xff, 0xf7, 0xd8, 0xbe,
+ 0xa7, 0x39, 0x40, 0x85, 0x8c, 0xc0, 0x04, 0x07, 0x0c, 0x17,
+ 0x1c, 0x23, 0x2b, 0x34, 0x3b, 0x43, 0x4c, 0x54, 0x5b, 0x63,
+ 0x6a, 0x73, 0x7a, 0x82, 0x8a, 0x91, 0x98, 0xa1, 0xa8, 0xb0,
+ 0xb7, 0xc1, 0xc9, 0xcf, 0xd9, 0xe3, 0xea, 0xf4, 0xff, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+};
+
+static const unsigned char seq_up_dn[] = {
+ 0x36, 0x10,
+};
+
+static const unsigned char seq_sleep_in[] = {
+ 0x10,
+};
+
+static const unsigned char seq_sleep_out[] = {
+ 0x11,
+};
+
+static const unsigned char seq_display_on[] = {
+ 0x29,
+};
+
+static const unsigned char seq_display_off[] = {
+ 0x10,
+};
+
+static int lms501kf03_spi_write_byte(struct lms501kf03 *lcd, int addr, int data)
+{
+ u16 buf[1];
+ struct spi_message msg;
+
+ struct spi_transfer xfer = {
+ .len = 2,
+ .tx_buf = buf,
+ };
+
+ buf[0] = (addr << 8) | data;
+
+ spi_message_init(&msg);
+ spi_message_add_tail(&xfer, &msg);
+
+ return spi_sync(lcd->spi, &msg);
+}
+
+static int lms501kf03_spi_write(struct lms501kf03 *lcd, unsigned char address,
+ unsigned char command)
+{
+ return lms501kf03_spi_write_byte(lcd, address, command);
+}
+
+static int lms501kf03_panel_send_sequence(struct lms501kf03 *lcd,
+ const unsigned char *wbuf,
+ unsigned int len)
+{
+ int ret = 0, i = 0;
+
+ while (i < len) {
+ if (i == 0)
+ ret = lms501kf03_spi_write(lcd, COMMAND_ONLY, wbuf[i]);
+ else
+ ret = lms501kf03_spi_write(lcd, DATA_ONLY, wbuf[i]);
+ if (ret)
+ break;
+ i += 1;
+ }
+
+ return ret;
+}
+
+static int lms501kf03_ldi_init(struct lms501kf03 *lcd)
+{
+ int ret, i;
+ static const unsigned char *init_seq[] = {
+ seq_password,
+ seq_power,
+ seq_display,
+ seq_rgb_if,
+ seq_display_inv,
+ seq_vcom,
+ seq_gate,
+ seq_panel,
+ seq_col_mod,
+ seq_w_gamma,
+ seq_rgb_gamma,
+ seq_sleep_out,
+ };
+
+ static const unsigned int size_seq[] = {
+ ARRAY_SIZE(seq_password),
+ ARRAY_SIZE(seq_power),
+ ARRAY_SIZE(seq_display),
+ ARRAY_SIZE(seq_rgb_if),
+ ARRAY_SIZE(seq_display_inv),
+ ARRAY_SIZE(seq_vcom),
+ ARRAY_SIZE(seq_gate),
+ ARRAY_SIZE(seq_panel),
+ ARRAY_SIZE(seq_col_mod),
+ ARRAY_SIZE(seq_w_gamma),
+ ARRAY_SIZE(seq_rgb_gamma),
+ ARRAY_SIZE(seq_sleep_out),
+ };
+
+ for (i = 0; i < ARRAY_SIZE(init_seq); i++) {
+ ret = lms501kf03_panel_send_sequence(lcd, init_seq[i],
+ size_seq[i]);
+ if (ret)
+ break;
+ }
+ /*
+ * According to the datasheet, 120ms delay time is required.
+ * After sleep out sequence, command is blocked for 120ms.
+ * Thus, LDI should wait for 120ms.
+ */
+ msleep(120);
+
+ return ret;
+}
+
+static int lms501kf03_ldi_enable(struct lms501kf03 *lcd)
+{
+ return lms501kf03_panel_send_sequence(lcd, seq_display_on,
+ ARRAY_SIZE(seq_display_on));
+}
+
+static int lms501kf03_ldi_disable(struct lms501kf03 *lcd)
+{
+ return lms501kf03_panel_send_sequence(lcd, seq_display_off,
+ ARRAY_SIZE(seq_display_off));
+}
+
+static int lms501kf03_power_is_on(int power)
+{
+ return (power) <= FB_BLANK_NORMAL;
+}
+
+static int lms501kf03_power_on(struct lms501kf03 *lcd)
+{
+ int ret = 0;
+ struct lcd_platform_data *pd;
+
+ pd = lcd->lcd_pd;
+
+ if (!pd->power_on) {
+ dev_err(lcd->dev, "power_on is NULL.\n");
+ return -EINVAL;
+ } else {
+ pd->power_on(lcd->ld, 1);
+ msleep(pd->power_on_delay);
+ }
+
+ if (!pd->reset) {
+ dev_err(lcd->dev, "reset is NULL.\n");
+ return -EINVAL;
+ } else {
+ pd->reset(lcd->ld);
+ msleep(pd->reset_delay);
+ }
+
+ ret = lms501kf03_ldi_init(lcd);
+ if (ret) {
+ dev_err(lcd->dev, "failed to initialize ldi.\n");
+ return ret;
+ }
+
+ ret = lms501kf03_ldi_enable(lcd);
+ if (ret) {
+ dev_err(lcd->dev, "failed to enable ldi.\n");
+ return ret;
+ }
+
+ return 0;
+}
+
+static int lms501kf03_power_off(struct lms501kf03 *lcd)
+{
+ int ret = 0;
+ struct lcd_platform_data *pd;
+
+ pd = lcd->lcd_pd;
+
+ ret = lms501kf03_ldi_disable(lcd);
+ if (ret) {
+ dev_err(lcd->dev, "lcd setting failed.\n");
+ return -EIO;
+ }
+
+ msleep(pd->power_off_delay);
+
+ pd->power_on(lcd->ld, 0);
+
+ return 0;
+}
+
+static int lms501kf03_power(struct lms501kf03 *lcd, int power)
+{
+ int ret = 0;
+
+ if (lms501kf03_power_is_on(power) &&
+ !lms501kf03_power_is_on(lcd->power))
+ ret = lms501kf03_power_on(lcd);
+ else if (!lms501kf03_power_is_on(power) &&
+ lms501kf03_power_is_on(lcd->power))
+ ret = lms501kf03_power_off(lcd);
+
+ if (!ret)
+ lcd->power = power;
+
+ return ret;
+}
+
+static int lms501kf03_get_power(struct lcd_device *ld)
+{
+ struct lms501kf03 *lcd = lcd_get_data(ld);
+
+ return lcd->power;
+}
+
+static int lms501kf03_set_power(struct lcd_device *ld, int power)
+{
+ struct lms501kf03 *lcd = lcd_get_data(ld);
+
+ if (power != FB_BLANK_UNBLANK && power != FB_BLANK_POWERDOWN &&
+ power != FB_BLANK_NORMAL) {
+ dev_err(lcd->dev, "power value should be 0, 1 or 4.\n");
+ return -EINVAL;
+ }
+
+ return lms501kf03_power(lcd, power);
+}
+
+static struct lcd_ops lms501kf03_lcd_ops = {
+ .get_power = lms501kf03_get_power,
+ .set_power = lms501kf03_set_power,
+};
+
+static int lms501kf03_probe(struct spi_device *spi)
+{
+ struct lms501kf03 *lcd = NULL;
+ struct lcd_device *ld = NULL;
+ int ret = 0;
+
+ lcd = devm_kzalloc(&spi->dev, sizeof(struct lms501kf03), GFP_KERNEL);
+ if (!lcd)
+ return -ENOMEM;
+
+ /* lms501kf03 lcd panel uses 3-wire 9-bit SPI Mode. */
+ spi->bits_per_word = 9;
+
+ ret = spi_setup(spi);
+ if (ret < 0) {
+ dev_err(&spi->dev, "spi setup failed.\n");
+ return ret;
+ }
+
+ lcd->spi = spi;
+ lcd->dev = &spi->dev;
+
+ lcd->lcd_pd = spi->dev.platform_data;
+ if (!lcd->lcd_pd) {
+ dev_err(&spi->dev, "platform data is NULL\n");
+ return -EINVAL;
+ }
+
+ ld = lcd_device_register("lms501kf03", &spi->dev, lcd,
+ &lms501kf03_lcd_ops);
+ if (IS_ERR(ld))
+ return PTR_ERR(ld);
+
+ lcd->ld = ld;
+
+ if (!lcd->lcd_pd->lcd_enabled) {
+ /*
+ * if lcd panel was off from bootloader then
+ * current lcd status is powerdown and then
+ * it enables lcd panel.
+ */
+ lcd->power = FB_BLANK_POWERDOWN;
+
+ lms501kf03_power(lcd, FB_BLANK_UNBLANK);
+ } else {
+ lcd->power = FB_BLANK_UNBLANK;
+ }
+
+ spi_set_drvdata(spi, lcd);
+
+ dev_info(&spi->dev, "lms501kf03 panel driver has been probed.\n");
+
+ return 0;
+}
+
+static int lms501kf03_remove(struct spi_device *spi)
+{
+ struct lms501kf03 *lcd = spi_get_drvdata(spi);
+
+ lms501kf03_power(lcd, FB_BLANK_POWERDOWN);
+ lcd_device_unregister(lcd->ld);
+
+ return 0;
+}
+
+#if defined(CONFIG_PM)
+
+static int lms501kf03_suspend(struct spi_device *spi, pm_message_t mesg)
+{
+ struct lms501kf03 *lcd = spi_get_drvdata(spi);
+
+ dev_dbg(&spi->dev, "lcd->power = %d\n", lcd->power);
+
+ /*
+ * when lcd panel is suspend, lcd panel becomes off
+ * regardless of status.
+ */
+ return lms501kf03_power(lcd, FB_BLANK_POWERDOWN);
+}
+
+static int lms501kf03_resume(struct spi_device *spi)
+{
+ struct lms501kf03 *lcd = spi_get_drvdata(spi);
+
+ lcd->power = FB_BLANK_POWERDOWN;
+
+ return lms501kf03_power(lcd, FB_BLANK_UNBLANK);
+}
+#else
+#define lms501kf03_suspend NULL
+#define lms501kf03_resume NULL
+#endif
+
+static void lms501kf03_shutdown(struct spi_device *spi)
+{
+ struct lms501kf03 *lcd = spi_get_drvdata(spi);
+
+ lms501kf03_power(lcd, FB_BLANK_POWERDOWN);
+}
+
+static struct spi_driver lms501kf03_driver = {
+ .driver = {
+ .name = "lms501kf03",
+ .owner = THIS_MODULE,
+ },
+ .probe = lms501kf03_probe,
+ .remove = lms501kf03_remove,
+ .shutdown = lms501kf03_shutdown,
+ .suspend = lms501kf03_suspend,
+ .resume = lms501kf03_resume,
+};
+
+module_spi_driver(lms501kf03_driver);
+
+MODULE_AUTHOR("Jingoo Han <jg1.han@samsung.com>");
+MODULE_DESCRIPTION("lms501kf03 LCD Driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/video/backlight/lp855x_bl.c b/drivers/video/backlight/lp855x_bl.c
index 6e4db0c874c8..7ae9ae6f4655 100644
--- a/drivers/video/backlight/lp855x_bl.c
+++ b/drivers/video/backlight/lp855x_bl.c
@@ -17,21 +17,48 @@
#include <linux/platform_data/lp855x.h>
#include <linux/pwm.h>
-/* Registers */
-#define BRIGHTNESS_CTRL 0x00
-#define DEVICE_CTRL 0x01
-#define EEPROM_START 0xA0
-#define EEPROM_END 0xA7
-#define EPROM_START 0xA0
-#define EPROM_END 0xAF
+/* LP8550/1/2/3/6 Registers */
+#define LP855X_BRIGHTNESS_CTRL 0x00
+#define LP855X_DEVICE_CTRL 0x01
+#define LP855X_EEPROM_START 0xA0
+#define LP855X_EEPROM_END 0xA7
+#define LP8556_EPROM_START 0xA0
+#define LP8556_EPROM_END 0xAF
+
+/* LP8557 Registers */
+#define LP8557_BL_CMD 0x00
+#define LP8557_BL_MASK 0x01
+#define LP8557_BL_ON 0x01
+#define LP8557_BL_OFF 0x00
+#define LP8557_BRIGHTNESS_CTRL 0x04
+#define LP8557_CONFIG 0x10
+#define LP8557_EPROM_START 0x10
+#define LP8557_EPROM_END 0x1E
#define BUF_SIZE 20
#define DEFAULT_BL_NAME "lcd-backlight"
#define MAX_BRIGHTNESS 255
+struct lp855x;
+
+/*
+ * struct lp855x_device_config
+ * @pre_init_device: init device function call before updating the brightness
+ * @reg_brightness: register address for brigthenss control
+ * @reg_devicectrl: register address for device control
+ * @post_init_device: late init device function call
+ */
+struct lp855x_device_config {
+ int (*pre_init_device)(struct lp855x *);
+ u8 reg_brightness;
+ u8 reg_devicectrl;
+ int (*post_init_device)(struct lp855x *);
+};
+
struct lp855x {
const char *chipname;
enum lp855x_chip_id chip_id;
+ struct lp855x_device_config *cfg;
struct i2c_client *client;
struct backlight_device *bl;
struct device *dev;
@@ -39,9 +66,15 @@ struct lp855x {
struct pwm_device *pwm;
};
-static int lp855x_read_byte(struct lp855x *lp, u8 reg, u8 *data)
+static int lp855x_write_byte(struct lp855x *lp, u8 reg, u8 data)
+{
+ return i2c_smbus_write_byte_data(lp->client, reg, data);
+}
+
+static int lp855x_update_bit(struct lp855x *lp, u8 reg, u8 mask, u8 data)
{
int ret;
+ u8 tmp;
ret = i2c_smbus_read_byte_data(lp->client, reg);
if (ret < 0) {
@@ -49,13 +82,11 @@ static int lp855x_read_byte(struct lp855x *lp, u8 reg, u8 *data)
return ret;
}
- *data = (u8)ret;
- return 0;
-}
+ tmp = (u8)ret;
+ tmp &= ~mask;
+ tmp |= data & mask;
-static int lp855x_write_byte(struct lp855x *lp, u8 reg, u8 data)
-{
- return i2c_smbus_write_byte_data(lp->client, reg, data);
+ return lp855x_write_byte(lp, reg, tmp);
}
static bool lp855x_is_valid_rom_area(struct lp855x *lp, u8 addr)
@@ -67,12 +98,16 @@ static bool lp855x_is_valid_rom_area(struct lp855x *lp, u8 addr)
case LP8551:
case LP8552:
case LP8553:
- start = EEPROM_START;
- end = EEPROM_END;
+ start = LP855X_EEPROM_START;
+ end = LP855X_EEPROM_END;
break;
case LP8556:
- start = EPROM_START;
- end = EPROM_END;
+ start = LP8556_EPROM_START;
+ end = LP8556_EPROM_END;
+ break;
+ case LP8557:
+ start = LP8557_EPROM_START;
+ end = LP8557_EPROM_END;
break;
default:
return false;
@@ -81,21 +116,76 @@ static bool lp855x_is_valid_rom_area(struct lp855x *lp, u8 addr)
return (addr >= start && addr <= end);
}
-static int lp855x_init_registers(struct lp855x *lp)
+static int lp8557_bl_off(struct lp855x *lp)
+{
+ /* BL_ON = 0 before updating EPROM settings */
+ return lp855x_update_bit(lp, LP8557_BL_CMD, LP8557_BL_MASK,
+ LP8557_BL_OFF);
+}
+
+static int lp8557_bl_on(struct lp855x *lp)
+{
+ /* BL_ON = 1 after updating EPROM settings */
+ return lp855x_update_bit(lp, LP8557_BL_CMD, LP8557_BL_MASK,
+ LP8557_BL_ON);
+}
+
+static struct lp855x_device_config lp855x_dev_cfg = {
+ .reg_brightness = LP855X_BRIGHTNESS_CTRL,
+ .reg_devicectrl = LP855X_DEVICE_CTRL,
+};
+
+static struct lp855x_device_config lp8557_dev_cfg = {
+ .reg_brightness = LP8557_BRIGHTNESS_CTRL,
+ .reg_devicectrl = LP8557_CONFIG,
+ .pre_init_device = lp8557_bl_off,
+ .post_init_device = lp8557_bl_on,
+};
+
+/*
+ * Device specific configuration flow
+ *
+ * a) pre_init_device(optional)
+ * b) update the brightness register
+ * c) update device control register
+ * d) update ROM area(optional)
+ * e) post_init_device(optional)
+ *
+ */
+static int lp855x_configure(struct lp855x *lp)
{
u8 val, addr;
int i, ret;
struct lp855x_platform_data *pd = lp->pdata;
+ switch (lp->chip_id) {
+ case LP8550 ... LP8556:
+ lp->cfg = &lp855x_dev_cfg;
+ break;
+ case LP8557:
+ lp->cfg = &lp8557_dev_cfg;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ if (lp->cfg->pre_init_device) {
+ ret = lp->cfg->pre_init_device(lp);
+ if (ret) {
+ dev_err(lp->dev, "pre init device err: %d\n", ret);
+ goto err;
+ }
+ }
+
val = pd->initial_brightness;
- ret = lp855x_write_byte(lp, BRIGHTNESS_CTRL, val);
+ ret = lp855x_write_byte(lp, lp->cfg->reg_brightness, val);
if (ret)
- return ret;
+ goto err;
val = pd->device_control;
- ret = lp855x_write_byte(lp, DEVICE_CTRL, val);
+ ret = lp855x_write_byte(lp, lp->cfg->reg_devicectrl, val);
if (ret)
- return ret;
+ goto err;
if (pd->load_new_rom_data && pd->size_program) {
for (i = 0; i < pd->size_program; i++) {
@@ -106,10 +196,21 @@ static int lp855x_init_registers(struct lp855x *lp)
ret = lp855x_write_byte(lp, addr, val);
if (ret)
- return ret;
+ goto err;
+ }
+ }
+
+ if (lp->cfg->post_init_device) {
+ ret = lp->cfg->post_init_device(lp);
+ if (ret) {
+ dev_err(lp->dev, "post init device err: %d\n", ret);
+ goto err;
}
}
+ return 0;
+
+err:
return ret;
}
@@ -151,7 +252,7 @@ static int lp855x_bl_update_status(struct backlight_device *bl)
} else if (mode == REGISTER_BASED) {
u8 val = bl->props.brightness;
- lp855x_write_byte(lp, BRIGHTNESS_CTRL, val);
+ lp855x_write_byte(lp, lp->cfg->reg_brightness, val);
}
return 0;
@@ -159,16 +260,6 @@ static int lp855x_bl_update_status(struct backlight_device *bl)
static int lp855x_bl_get_brightness(struct backlight_device *bl)
{
- struct lp855x *lp = bl_get_data(bl);
- enum lp855x_brightness_ctrl_mode mode = lp->pdata->mode;
-
- if (mode == REGISTER_BASED) {
- u8 val = 0;
-
- lp855x_read_byte(lp, BRIGHTNESS_CTRL, &val);
- bl->props.brightness = val;
- }
-
return bl->props.brightness;
}
@@ -271,11 +362,10 @@ static int lp855x_probe(struct i2c_client *cl, const struct i2c_device_id *id)
lp->chip_id = id->driver_data;
i2c_set_clientdata(cl, lp);
- ret = lp855x_init_registers(lp);
+ ret = lp855x_configure(lp);
if (ret) {
- dev_err(lp->dev, "i2c communication err: %d", ret);
- if (mode == REGISTER_BASED)
- goto err_dev;
+ dev_err(lp->dev, "device config err: %d", ret);
+ goto err_dev;
}
ret = lp855x_backlight_register(lp);
@@ -318,6 +408,7 @@ static const struct i2c_device_id lp855x_ids[] = {
{"lp8552", LP8552},
{"lp8553", LP8553},
{"lp8556", LP8556},
+ {"lp8557", LP8557},
{ }
};
MODULE_DEVICE_TABLE(i2c, lp855x_ids);
diff --git a/drivers/video/backlight/lp8788_bl.c b/drivers/video/backlight/lp8788_bl.c
new file mode 100644
index 000000000000..4bb8b4f140cf
--- /dev/null
+++ b/drivers/video/backlight/lp8788_bl.c
@@ -0,0 +1,333 @@
+/*
+ * TI LP8788 MFD - backlight driver
+ *
+ * Copyright 2012 Texas Instruments
+ *
+ * Author: Milo(Woogyom) Kim <milo.kim@ti.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ */
+
+#include <linux/backlight.h>
+#include <linux/err.h>
+#include <linux/mfd/lp8788.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/pwm.h>
+#include <linux/slab.h>
+
+/* Register address */
+#define LP8788_BL_CONFIG 0x96
+#define LP8788_BL_EN BIT(0)
+#define LP8788_BL_PWM_INPUT_EN BIT(5)
+#define LP8788_BL_FULLSCALE_SHIFT 2
+#define LP8788_BL_DIM_MODE_SHIFT 1
+#define LP8788_BL_PWM_POLARITY_SHIFT 6
+
+#define LP8788_BL_BRIGHTNESS 0x97
+
+#define LP8788_BL_RAMP 0x98
+#define LP8788_BL_RAMP_RISE_SHIFT 4
+
+#define MAX_BRIGHTNESS 127
+#define DEFAULT_BL_NAME "lcd-backlight"
+
+struct lp8788_bl_config {
+ enum lp8788_bl_ctrl_mode bl_mode;
+ enum lp8788_bl_dim_mode dim_mode;
+ enum lp8788_bl_full_scale_current full_scale;
+ enum lp8788_bl_ramp_step rise_time;
+ enum lp8788_bl_ramp_step fall_time;
+ enum pwm_polarity pwm_pol;
+};
+
+struct lp8788_bl {
+ struct lp8788 *lp;
+ struct backlight_device *bl_dev;
+ struct lp8788_backlight_platform_data *pdata;
+ enum lp8788_bl_ctrl_mode mode;
+ struct pwm_device *pwm;
+};
+
+struct lp8788_bl_config default_bl_config = {
+ .bl_mode = LP8788_BL_REGISTER_ONLY,
+ .dim_mode = LP8788_DIM_EXPONENTIAL,
+ .full_scale = LP8788_FULLSCALE_1900uA,
+ .rise_time = LP8788_RAMP_8192us,
+ .fall_time = LP8788_RAMP_8192us,
+ .pwm_pol = PWM_POLARITY_NORMAL,
+};
+
+static inline bool is_brightness_ctrl_by_pwm(enum lp8788_bl_ctrl_mode mode)
+{
+ return (mode == LP8788_BL_COMB_PWM_BASED);
+}
+
+static inline bool is_brightness_ctrl_by_register(enum lp8788_bl_ctrl_mode mode)
+{
+ return (mode == LP8788_BL_REGISTER_ONLY ||
+ mode == LP8788_BL_COMB_REGISTER_BASED);
+}
+
+static int lp8788_backlight_configure(struct lp8788_bl *bl)
+{
+ struct lp8788_backlight_platform_data *pdata = bl->pdata;
+ struct lp8788_bl_config *cfg = &default_bl_config;
+ int ret;
+ u8 val;
+
+ /*
+ * Update chip configuration if platform data exists,
+ * otherwise use the default settings.
+ */
+ if (pdata) {
+ cfg->bl_mode = pdata->bl_mode;
+ cfg->dim_mode = pdata->dim_mode;
+ cfg->full_scale = pdata->full_scale;
+ cfg->rise_time = pdata->rise_time;
+ cfg->fall_time = pdata->fall_time;
+ cfg->pwm_pol = pdata->pwm_pol;
+ }
+
+ /* Brightness ramp up/down */
+ val = (cfg->rise_time << LP8788_BL_RAMP_RISE_SHIFT) | cfg->fall_time;
+ ret = lp8788_write_byte(bl->lp, LP8788_BL_RAMP, val);
+ if (ret)
+ return ret;
+
+ /* Fullscale current setting */
+ val = (cfg->full_scale << LP8788_BL_FULLSCALE_SHIFT) |
+ (cfg->dim_mode << LP8788_BL_DIM_MODE_SHIFT);
+
+ /* Brightness control mode */
+ switch (cfg->bl_mode) {
+ case LP8788_BL_REGISTER_ONLY:
+ val |= LP8788_BL_EN;
+ break;
+ case LP8788_BL_COMB_PWM_BASED:
+ case LP8788_BL_COMB_REGISTER_BASED:
+ val |= LP8788_BL_EN | LP8788_BL_PWM_INPUT_EN |
+ (cfg->pwm_pol << LP8788_BL_PWM_POLARITY_SHIFT);
+ break;
+ default:
+ dev_err(bl->lp->dev, "invalid mode: %d\n", cfg->bl_mode);
+ return -EINVAL;
+ }
+
+ bl->mode = cfg->bl_mode;
+
+ return lp8788_write_byte(bl->lp, LP8788_BL_CONFIG, val);
+}
+
+static void lp8788_pwm_ctrl(struct lp8788_bl *bl, int br, int max_br)
+{
+ unsigned int period;
+ unsigned int duty;
+ struct device *dev;
+ struct pwm_device *pwm;
+
+ if (!bl->pdata)
+ return;
+
+ period = bl->pdata->period_ns;
+ duty = br * period / max_br;
+ dev = bl->lp->dev;
+
+ /* request PWM device with the consumer name */
+ if (!bl->pwm) {
+ pwm = devm_pwm_get(dev, LP8788_DEV_BACKLIGHT);
+ if (IS_ERR(pwm)) {
+ dev_err(dev, "can not get PWM device\n");
+ return;
+ }
+
+ bl->pwm = pwm;
+ }
+
+ pwm_config(bl->pwm, duty, period);
+ if (duty)
+ pwm_enable(bl->pwm);
+ else
+ pwm_disable(bl->pwm);
+}
+
+static int lp8788_bl_update_status(struct backlight_device *bl_dev)
+{
+ struct lp8788_bl *bl = bl_get_data(bl_dev);
+ enum lp8788_bl_ctrl_mode mode = bl->mode;
+
+ if (bl_dev->props.state & BL_CORE_SUSPENDED)
+ bl_dev->props.brightness = 0;
+
+ if (is_brightness_ctrl_by_pwm(mode)) {
+ int brt = bl_dev->props.brightness;
+ int max = bl_dev->props.max_brightness;
+
+ lp8788_pwm_ctrl(bl, brt, max);
+ } else if (is_brightness_ctrl_by_register(mode)) {
+ u8 brt = bl_dev->props.brightness;
+
+ lp8788_write_byte(bl->lp, LP8788_BL_BRIGHTNESS, brt);
+ }
+
+ return 0;
+}
+
+static int lp8788_bl_get_brightness(struct backlight_device *bl_dev)
+{
+ return bl_dev->props.brightness;
+}
+
+static const struct backlight_ops lp8788_bl_ops = {
+ .options = BL_CORE_SUSPENDRESUME,
+ .update_status = lp8788_bl_update_status,
+ .get_brightness = lp8788_bl_get_brightness,
+};
+
+static int lp8788_backlight_register(struct lp8788_bl *bl)
+{
+ struct backlight_device *bl_dev;
+ struct backlight_properties props;
+ struct lp8788_backlight_platform_data *pdata = bl->pdata;
+ int init_brt;
+ char *name;
+
+ props.type = BACKLIGHT_PLATFORM;
+ props.max_brightness = MAX_BRIGHTNESS;
+
+ /* Initial brightness */
+ if (pdata)
+ init_brt = min_t(int, pdata->initial_brightness,
+ props.max_brightness);
+ else
+ init_brt = 0;
+
+ props.brightness = init_brt;
+
+ /* Backlight device name */
+ if (!pdata || !pdata->name)
+ name = DEFAULT_BL_NAME;
+ else
+ name = pdata->name;
+
+ bl_dev = backlight_device_register(name, bl->lp->dev, bl,
+ &lp8788_bl_ops, &props);
+ if (IS_ERR(bl_dev))
+ return PTR_ERR(bl_dev);
+
+ bl->bl_dev = bl_dev;
+
+ return 0;
+}
+
+static void lp8788_backlight_unregister(struct lp8788_bl *bl)
+{
+ struct backlight_device *bl_dev = bl->bl_dev;
+
+ if (bl_dev)
+ backlight_device_unregister(bl_dev);
+}
+
+static ssize_t lp8788_get_bl_ctl_mode(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct lp8788_bl *bl = dev_get_drvdata(dev);
+ enum lp8788_bl_ctrl_mode mode = bl->mode;
+ char *strmode;
+
+ if (is_brightness_ctrl_by_pwm(mode))
+ strmode = "PWM based";
+ else if (is_brightness_ctrl_by_register(mode))
+ strmode = "Register based";
+ else
+ strmode = "Invalid mode";
+
+ return scnprintf(buf, PAGE_SIZE, "%s\n", strmode);
+}
+
+static DEVICE_ATTR(bl_ctl_mode, S_IRUGO, lp8788_get_bl_ctl_mode, NULL);
+
+static struct attribute *lp8788_attributes[] = {
+ &dev_attr_bl_ctl_mode.attr,
+ NULL,
+};
+
+static const struct attribute_group lp8788_attr_group = {
+ .attrs = lp8788_attributes,
+};
+
+static int lp8788_backlight_probe(struct platform_device *pdev)
+{
+ struct lp8788 *lp = dev_get_drvdata(pdev->dev.parent);
+ struct lp8788_bl *bl;
+ int ret;
+
+ bl = devm_kzalloc(lp->dev, sizeof(struct lp8788_bl), GFP_KERNEL);
+ if (!bl)
+ return -ENOMEM;
+
+ bl->lp = lp;
+ if (lp->pdata)
+ bl->pdata = lp->pdata->bl_pdata;
+
+ platform_set_drvdata(pdev, bl);
+
+ ret = lp8788_backlight_configure(bl);
+ if (ret) {
+ dev_err(lp->dev, "backlight config err: %d\n", ret);
+ goto err_dev;
+ }
+
+ ret = lp8788_backlight_register(bl);
+ if (ret) {
+ dev_err(lp->dev, "register backlight err: %d\n", ret);
+ goto err_dev;
+ }
+
+ ret = sysfs_create_group(&pdev->dev.kobj, &lp8788_attr_group);
+ if (ret) {
+ dev_err(lp->dev, "register sysfs err: %d\n", ret);
+ goto err_sysfs;
+ }
+
+ backlight_update_status(bl->bl_dev);
+
+ return 0;
+
+err_sysfs:
+ lp8788_backlight_unregister(bl);
+err_dev:
+ return ret;
+}
+
+static int lp8788_backlight_remove(struct platform_device *pdev)
+{
+ struct lp8788_bl *bl = platform_get_drvdata(pdev);
+ struct backlight_device *bl_dev = bl->bl_dev;
+
+ bl_dev->props.brightness = 0;
+ backlight_update_status(bl_dev);
+ sysfs_remove_group(&pdev->dev.kobj, &lp8788_attr_group);
+ lp8788_backlight_unregister(bl);
+ platform_set_drvdata(pdev, NULL);
+
+ return 0;
+}
+
+static struct platform_driver lp8788_bl_driver = {
+ .probe = lp8788_backlight_probe,
+ .remove = lp8788_backlight_remove,
+ .driver = {
+ .name = LP8788_DEV_BACKLIGHT,
+ .owner = THIS_MODULE,
+ },
+};
+module_platform_driver(lp8788_bl_driver);
+
+MODULE_DESCRIPTION("Texas Instruments LP8788 Backlight Driver");
+MODULE_AUTHOR("Milo Kim");
+MODULE_LICENSE("GPL");
+MODULE_ALIAS("platform:lp8788-backlight");
diff --git a/drivers/video/backlight/ltv350qv.c b/drivers/video/backlight/ltv350qv.c
index 226d813edf01..c0b4b8f2de98 100644
--- a/drivers/video/backlight/ltv350qv.c
+++ b/drivers/video/backlight/ltv350qv.c
@@ -252,7 +252,7 @@ static int ltv350qv_probe(struct spi_device *spi)
if (ret)
goto out_unregister;
- dev_set_drvdata(&spi->dev, lcd);
+ spi_set_drvdata(spi, lcd);
return 0;
@@ -263,7 +263,7 @@ out_unregister:
static int ltv350qv_remove(struct spi_device *spi)
{
- struct ltv350qv *lcd = dev_get_drvdata(&spi->dev);
+ struct ltv350qv *lcd = spi_get_drvdata(spi);
ltv350qv_power(lcd, FB_BLANK_POWERDOWN);
lcd_device_unregister(lcd->ld);
@@ -274,14 +274,14 @@ static int ltv350qv_remove(struct spi_device *spi)
#ifdef CONFIG_PM
static int ltv350qv_suspend(struct spi_device *spi, pm_message_t state)
{
- struct ltv350qv *lcd = dev_get_drvdata(&spi->dev);
+ struct ltv350qv *lcd = spi_get_drvdata(spi);
return ltv350qv_power(lcd, FB_BLANK_POWERDOWN);
}
static int ltv350qv_resume(struct spi_device *spi)
{
- struct ltv350qv *lcd = dev_get_drvdata(&spi->dev);
+ struct ltv350qv *lcd = spi_get_drvdata(spi);
return ltv350qv_power(lcd, FB_BLANK_UNBLANK);
}
@@ -293,7 +293,7 @@ static int ltv350qv_resume(struct spi_device *spi)
/* Power down all displays on reboot, poweroff or halt */
static void ltv350qv_shutdown(struct spi_device *spi)
{
- struct ltv350qv *lcd = dev_get_drvdata(&spi->dev);
+ struct ltv350qv *lcd = spi_get_drvdata(spi);
ltv350qv_power(lcd, FB_BLANK_POWERDOWN);
}
diff --git a/drivers/video/backlight/max8925_bl.c b/drivers/video/backlight/max8925_bl.c
index 2c9bce050aa9..5ca11b066b7e 100644
--- a/drivers/video/backlight/max8925_bl.c
+++ b/drivers/video/backlight/max8925_bl.c
@@ -101,6 +101,29 @@ static const struct backlight_ops max8925_backlight_ops = {
.get_brightness = max8925_backlight_get_brightness,
};
+#ifdef CONFIG_OF
+static int max8925_backlight_dt_init(struct platform_device *pdev,
+ struct max8925_backlight_pdata *pdata)
+{
+ struct device_node *nproot = pdev->dev.parent->of_node, *np;
+ int dual_string;
+
+ if (!nproot)
+ return -ENODEV;
+ np = of_find_node_by_name(nproot, "backlight");
+ if (!np) {
+ dev_err(&pdev->dev, "failed to find backlight node\n");
+ return -ENODEV;
+ }
+
+ of_property_read_u32(np, "maxim,max8925-dual-string", &dual_string);
+ pdata->dual_string = dual_string;
+ return 0;
+}
+#else
+#define max8925_backlight_dt_init(x, y) (-1)
+#endif
+
static int max8925_backlight_probe(struct platform_device *pdev)
{
struct max8925_chip *chip = dev_get_drvdata(pdev->dev.parent);
@@ -147,6 +170,13 @@ static int max8925_backlight_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, bl);
value = 0;
+ if (pdev->dev.parent->of_node && !pdata) {
+ pdata = devm_kzalloc(&pdev->dev,
+ sizeof(struct max8925_backlight_pdata),
+ GFP_KERNEL);
+ max8925_backlight_dt_init(pdev, pdata);
+ }
+
if (pdata) {
if (pdata->lxw_scl)
value |= (1 << 7);
@@ -158,7 +188,6 @@ static int max8925_backlight_probe(struct platform_device *pdev)
ret = max8925_set_bits(chip->i2c, data->reg_mode_cntl, 0xfe, value);
if (ret < 0)
goto out_brt;
-
backlight_update_status(bl);
return 0;
out_brt:
diff --git a/drivers/video/backlight/omap1_bl.c b/drivers/video/backlight/omap1_bl.c
index af31c269baa6..627110163067 100644
--- a/drivers/video/backlight/omap1_bl.c
+++ b/drivers/video/backlight/omap1_bl.c
@@ -77,7 +77,7 @@ static void omapbl_blank(struct omap_backlight *bl, int mode)
static int omapbl_suspend(struct platform_device *pdev, pm_message_t state)
{
struct backlight_device *dev = platform_get_drvdata(pdev);
- struct omap_backlight *bl = dev_get_drvdata(&dev->dev);
+ struct omap_backlight *bl = bl_get_data(dev);
omapbl_blank(bl, FB_BLANK_POWERDOWN);
return 0;
@@ -86,7 +86,7 @@ static int omapbl_suspend(struct platform_device *pdev, pm_message_t state)
static int omapbl_resume(struct platform_device *pdev)
{
struct backlight_device *dev = platform_get_drvdata(pdev);
- struct omap_backlight *bl = dev_get_drvdata(&dev->dev);
+ struct omap_backlight *bl = bl_get_data(dev);
omapbl_blank(bl, bl->powermode);
return 0;
@@ -98,7 +98,7 @@ static int omapbl_resume(struct platform_device *pdev)
static int omapbl_set_power(struct backlight_device *dev, int state)
{
- struct omap_backlight *bl = dev_get_drvdata(&dev->dev);
+ struct omap_backlight *bl = bl_get_data(dev);
omapbl_blank(bl, state);
bl->powermode = state;
@@ -108,7 +108,7 @@ static int omapbl_set_power(struct backlight_device *dev, int state)
static int omapbl_update_status(struct backlight_device *dev)
{
- struct omap_backlight *bl = dev_get_drvdata(&dev->dev);
+ struct omap_backlight *bl = bl_get_data(dev);
if (bl->current_intensity != dev->props.brightness) {
if (bl->powermode == FB_BLANK_UNBLANK)
@@ -124,7 +124,7 @@ static int omapbl_update_status(struct backlight_device *dev)
static int omapbl_get_intensity(struct backlight_device *dev)
{
- struct omap_backlight *bl = dev_get_drvdata(&dev->dev);
+ struct omap_backlight *bl = bl_get_data(dev);
return bl->current_intensity;
}
diff --git a/drivers/video/backlight/ot200_bl.c b/drivers/video/backlight/ot200_bl.c
index 469cf0f109d2..fdbb6ee5027c 100644
--- a/drivers/video/backlight/ot200_bl.c
+++ b/drivers/video/backlight/ot200_bl.c
@@ -14,6 +14,7 @@
#include <linux/fb.h>
#include <linux/backlight.h>
#include <linux/gpio.h>
+#include <linux/platform_device.h>
#include <linux/cs5535.h>
static struct cs5535_mfgpt_timer *pwm_timer;
diff --git a/drivers/video/backlight/pwm_bl.c b/drivers/video/backlight/pwm_bl.c
index 069983ca49ff..fa00304a63d8 100644
--- a/drivers/video/backlight/pwm_bl.c
+++ b/drivers/video/backlight/pwm_bl.c
@@ -37,14 +37,13 @@ struct pwm_bl_data {
static int pwm_backlight_update_status(struct backlight_device *bl)
{
- struct pwm_bl_data *pb = dev_get_drvdata(&bl->dev);
+ struct pwm_bl_data *pb = bl_get_data(bl);
int brightness = bl->props.brightness;
int max = bl->props.max_brightness;
- if (bl->props.power != FB_BLANK_UNBLANK)
- brightness = 0;
-
- if (bl->props.fb_blank != FB_BLANK_UNBLANK)
+ if (bl->props.power != FB_BLANK_UNBLANK ||
+ bl->props.fb_blank != FB_BLANK_UNBLANK ||
+ bl->props.state & BL_CORE_FBBLANK)
brightness = 0;
if (pb->notify)
@@ -83,7 +82,7 @@ static int pwm_backlight_get_brightness(struct backlight_device *bl)
static int pwm_backlight_check_fb(struct backlight_device *bl,
struct fb_info *info)
{
- struct pwm_bl_data *pb = dev_get_drvdata(&bl->dev);
+ struct pwm_bl_data *pb = bl_get_data(bl);
return !pb->check_fb || pb->check_fb(pb->dev, info);
}
@@ -135,12 +134,6 @@ static int pwm_backlight_parse_dt(struct device *dev,
if (ret < 0)
return ret;
- if (value >= data->max_brightness) {
- dev_warn(dev, "invalid default brightness level: %u, using %u\n",
- value, data->max_brightness - 1);
- value = data->max_brightness - 1;
- }
-
data->dft_brightness = value;
data->max_brightness--;
}
@@ -249,6 +242,13 @@ static int pwm_backlight_probe(struct platform_device *pdev)
goto err_alloc;
}
+ if (data->dft_brightness > data->max_brightness) {
+ dev_warn(&pdev->dev,
+ "invalid default brightness level: %u, using %u\n",
+ data->dft_brightness, data->max_brightness);
+ data->dft_brightness = data->max_brightness;
+ }
+
bl->props.brightness = data->dft_brightness;
backlight_update_status(bl);
@@ -264,7 +264,7 @@ err_alloc:
static int pwm_backlight_remove(struct platform_device *pdev)
{
struct backlight_device *bl = platform_get_drvdata(pdev);
- struct pwm_bl_data *pb = dev_get_drvdata(&bl->dev);
+ struct pwm_bl_data *pb = bl_get_data(bl);
backlight_device_unregister(bl);
pwm_config(pb->pwm, 0, pb->period);
@@ -278,7 +278,7 @@ static int pwm_backlight_remove(struct platform_device *pdev)
static int pwm_backlight_suspend(struct device *dev)
{
struct backlight_device *bl = dev_get_drvdata(dev);
- struct pwm_bl_data *pb = dev_get_drvdata(&bl->dev);
+ struct pwm_bl_data *pb = bl_get_data(bl);
if (pb->notify)
pb->notify(pb->dev, 0);
diff --git a/drivers/video/backlight/s6e63m0.c b/drivers/video/backlight/s6e63m0.c
index 3e1c1135f6df..9c2677f0ef7d 100644
--- a/drivers/video/backlight/s6e63m0.c
+++ b/drivers/video/backlight/s6e63m0.c
@@ -9,28 +9,19 @@
* under the terms of the GNU General Public License as published by the
* Free Software Foundation; either version 2 of the License, or (at your
* option) any later version.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License along
- * with this program; if not, write to the Free Software Foundation, Inc.,
- * 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
*/
-#include <linux/wait.h>
-#include <linux/fb.h>
+#include <linux/backlight.h>
#include <linux/delay.h>
+#include <linux/fb.h>
#include <linux/gpio.h>
-#include <linux/spi/spi.h>
-#include <linux/irq.h>
#include <linux/interrupt.h>
+#include <linux/irq.h>
#include <linux/kernel.h>
#include <linux/lcd.h>
-#include <linux/backlight.h>
#include <linux/module.h>
+#include <linux/spi/spi.h>
+#include <linux/wait.h>
#include "s6e63m0_gamma.h"
@@ -43,8 +34,6 @@
#define MIN_BRIGHTNESS 0
#define MAX_BRIGHTNESS 10
-#define POWER_IS_ON(pwr) ((pwr) <= FB_BLANK_NORMAL)
-
struct s6e63m0 {
struct device *dev;
struct spi_device *spi;
@@ -57,7 +46,7 @@ struct s6e63m0 {
struct lcd_platform_data *lcd_pd;
};
-static const unsigned short SEQ_PANEL_CONDITION_SET[] = {
+static const unsigned short seq_panel_condition_set[] = {
0xF8, 0x01,
DATA_ONLY, 0x27,
DATA_ONLY, 0x27,
@@ -76,7 +65,7 @@ static const unsigned short SEQ_PANEL_CONDITION_SET[] = {
ENDDEF, 0x0000
};
-static const unsigned short SEQ_DISPLAY_CONDITION_SET[] = {
+static const unsigned short seq_display_condition_set[] = {
0xf2, 0x02,
DATA_ONLY, 0x03,
DATA_ONLY, 0x1c,
@@ -90,7 +79,7 @@ static const unsigned short SEQ_DISPLAY_CONDITION_SET[] = {
ENDDEF, 0x0000
};
-static const unsigned short SEQ_GAMMA_SETTING[] = {
+static const unsigned short seq_gamma_setting[] = {
0xfa, 0x00,
DATA_ONLY, 0x18,
DATA_ONLY, 0x08,
@@ -119,7 +108,7 @@ static const unsigned short SEQ_GAMMA_SETTING[] = {
ENDDEF, 0x0000
};
-static const unsigned short SEQ_ETC_CONDITION_SET[] = {
+static const unsigned short seq_etc_condition_set[] = {
0xf6, 0x00,
DATA_ONLY, 0x8c,
DATA_ONLY, 0x07,
@@ -318,47 +307,47 @@ static const unsigned short SEQ_ETC_CONDITION_SET[] = {
ENDDEF, 0x0000
};
-static const unsigned short SEQ_ACL_ON[] = {
+static const unsigned short seq_acl_on[] = {
/* ACL on */
0xc0, 0x01,
ENDDEF, 0x0000
};
-static const unsigned short SEQ_ACL_OFF[] = {
+static const unsigned short seq_acl_off[] = {
/* ACL off */
0xc0, 0x00,
ENDDEF, 0x0000
};
-static const unsigned short SEQ_ELVSS_ON[] = {
+static const unsigned short seq_elvss_on[] = {
/* ELVSS on */
0xb1, 0x0b,
ENDDEF, 0x0000
};
-static const unsigned short SEQ_ELVSS_OFF[] = {
+static const unsigned short seq_elvss_off[] = {
/* ELVSS off */
0xb1, 0x0a,
ENDDEF, 0x0000
};
-static const unsigned short SEQ_STAND_BY_OFF[] = {
+static const unsigned short seq_stand_by_off[] = {
0x11, COMMAND_ONLY,
ENDDEF, 0x0000
};
-static const unsigned short SEQ_STAND_BY_ON[] = {
+static const unsigned short seq_stand_by_on[] = {
0x10, COMMAND_ONLY,
ENDDEF, 0x0000
};
-static const unsigned short SEQ_DISPLAY_ON[] = {
+static const unsigned short seq_display_on[] = {
0x29, COMMAND_ONLY,
ENDDEF, 0x0000
@@ -406,8 +395,9 @@ static int s6e63m0_panel_send_sequence(struct s6e63m0 *lcd,
ret = s6e63m0_spi_write(lcd, wbuf[i], wbuf[i+1]);
if (ret)
break;
- } else
- udelay(wbuf[i+1]*1000);
+ } else {
+ msleep(wbuf[i+1]);
+ }
i += 2;
}
@@ -457,12 +447,12 @@ static int s6e63m0_ldi_init(struct s6e63m0 *lcd)
{
int ret, i;
const unsigned short *init_seq[] = {
- SEQ_PANEL_CONDITION_SET,
- SEQ_DISPLAY_CONDITION_SET,
- SEQ_GAMMA_SETTING,
- SEQ_ETC_CONDITION_SET,
- SEQ_ACL_ON,
- SEQ_ELVSS_ON,
+ seq_panel_condition_set,
+ seq_display_condition_set,
+ seq_gamma_setting,
+ seq_etc_condition_set,
+ seq_acl_on,
+ seq_elvss_on,
};
for (i = 0; i < ARRAY_SIZE(init_seq); i++) {
@@ -478,8 +468,8 @@ static int s6e63m0_ldi_enable(struct s6e63m0 *lcd)
{
int ret = 0, i;
const unsigned short *enable_seq[] = {
- SEQ_STAND_BY_OFF,
- SEQ_DISPLAY_ON,
+ seq_stand_by_off,
+ seq_display_on,
};
for (i = 0; i < ARRAY_SIZE(enable_seq); i++) {
@@ -495,43 +485,39 @@ static int s6e63m0_ldi_disable(struct s6e63m0 *lcd)
{
int ret;
- ret = s6e63m0_panel_send_sequence(lcd, SEQ_STAND_BY_ON);
+ ret = s6e63m0_panel_send_sequence(lcd, seq_stand_by_on);
return ret;
}
+static int s6e63m0_power_is_on(int power)
+{
+ return power <= FB_BLANK_NORMAL;
+}
+
static int s6e63m0_power_on(struct s6e63m0 *lcd)
{
int ret = 0;
- struct lcd_platform_data *pd = NULL;
- struct backlight_device *bd = NULL;
+ struct lcd_platform_data *pd;
+ struct backlight_device *bd;
pd = lcd->lcd_pd;
- if (!pd) {
- dev_err(lcd->dev, "platform data is NULL.\n");
- return -EFAULT;
- }
-
bd = lcd->bd;
- if (!bd) {
- dev_err(lcd->dev, "backlight device is NULL.\n");
- return -EFAULT;
- }
if (!pd->power_on) {
dev_err(lcd->dev, "power_on is NULL.\n");
- return -EFAULT;
+ return -EINVAL;
} else {
pd->power_on(lcd->ld, 1);
- mdelay(pd->power_on_delay);
+ msleep(pd->power_on_delay);
}
if (!pd->reset) {
dev_err(lcd->dev, "reset is NULL.\n");
- return -EFAULT;
+ return -EINVAL;
} else {
pd->reset(lcd->ld);
- mdelay(pd->reset_delay);
+ msleep(pd->reset_delay);
}
ret = s6e63m0_ldi_init(lcd);
@@ -558,14 +544,10 @@ static int s6e63m0_power_on(struct s6e63m0 *lcd)
static int s6e63m0_power_off(struct s6e63m0 *lcd)
{
- int ret = 0;
- struct lcd_platform_data *pd = NULL;
+ int ret;
+ struct lcd_platform_data *pd;
pd = lcd->lcd_pd;
- if (!pd) {
- dev_err(lcd->dev, "platform data is NULL.\n");
- return -EFAULT;
- }
ret = s6e63m0_ldi_disable(lcd);
if (ret) {
@@ -573,13 +555,9 @@ static int s6e63m0_power_off(struct s6e63m0 *lcd)
return -EIO;
}
- mdelay(pd->power_off_delay);
+ msleep(pd->power_off_delay);
- if (!pd->power_on) {
- dev_err(lcd->dev, "power_on is NULL.\n");
- return -EFAULT;
- } else
- pd->power_on(lcd->ld, 0);
+ pd->power_on(lcd->ld, 0);
return 0;
}
@@ -588,9 +566,9 @@ static int s6e63m0_power(struct s6e63m0 *lcd, int power)
{
int ret = 0;
- if (POWER_IS_ON(power) && !POWER_IS_ON(lcd->power))
+ if (s6e63m0_power_is_on(power) && !s6e63m0_power_is_on(lcd->power))
ret = s6e63m0_power_on(lcd);
- else if (!POWER_IS_ON(power) && POWER_IS_ON(lcd->power))
+ else if (!s6e63m0_power_is_on(power) && s6e63m0_power_is_on(lcd->power))
ret = s6e63m0_power_off(lcd);
if (!ret)
@@ -760,7 +738,7 @@ static int s6e63m0_probe(struct spi_device *spi)
lcd->lcd_pd = spi->dev.platform_data;
if (!lcd->lcd_pd) {
dev_err(&spi->dev, "platform data is NULL.\n");
- return -EFAULT;
+ return -EINVAL;
}
ld = lcd_device_register("s6e63m0", &spi->dev, lcd, &s6e63m0_lcd_ops);
@@ -788,7 +766,7 @@ static int s6e63m0_probe(struct spi_device *spi)
* know that.
*/
lcd->gamma_table_count =
- sizeof(gamma_table) / (MAX_GAMMA_LEVEL * sizeof(int));
+ sizeof(gamma_table) / (MAX_GAMMA_LEVEL * sizeof(int *));
ret = device_create_file(&(spi->dev), &dev_attr_gamma_mode);
if (ret < 0)
@@ -811,10 +789,11 @@ static int s6e63m0_probe(struct spi_device *spi)
lcd->power = FB_BLANK_POWERDOWN;
s6e63m0_power(lcd, FB_BLANK_UNBLANK);
- } else
+ } else {
lcd->power = FB_BLANK_UNBLANK;
+ }
- dev_set_drvdata(&spi->dev, lcd);
+ spi_set_drvdata(spi, lcd);
dev_info(&spi->dev, "s6e63m0 panel driver has been probed.\n");
@@ -827,7 +806,7 @@ out_lcd_unregister:
static int s6e63m0_remove(struct spi_device *spi)
{
- struct s6e63m0 *lcd = dev_get_drvdata(&spi->dev);
+ struct s6e63m0 *lcd = spi_get_drvdata(spi);
s6e63m0_power(lcd, FB_BLANK_POWERDOWN);
device_remove_file(&spi->dev, &dev_attr_gamma_table);
@@ -839,44 +818,26 @@ static int s6e63m0_remove(struct spi_device *spi)
}
#if defined(CONFIG_PM)
-static unsigned int before_power;
-
static int s6e63m0_suspend(struct spi_device *spi, pm_message_t mesg)
{
- int ret = 0;
- struct s6e63m0 *lcd = dev_get_drvdata(&spi->dev);
+ struct s6e63m0 *lcd = spi_get_drvdata(spi);
dev_dbg(&spi->dev, "lcd->power = %d\n", lcd->power);
- before_power = lcd->power;
-
/*
* when lcd panel is suspend, lcd panel becomes off
* regardless of status.
*/
- ret = s6e63m0_power(lcd, FB_BLANK_POWERDOWN);
-
- return ret;
+ return s6e63m0_power(lcd, FB_BLANK_POWERDOWN);
}
static int s6e63m0_resume(struct spi_device *spi)
{
- int ret = 0;
- struct s6e63m0 *lcd = dev_get_drvdata(&spi->dev);
-
- /*
- * after suspended, if lcd panel status is FB_BLANK_UNBLANK
- * (at that time, before_power is FB_BLANK_UNBLANK) then
- * it changes that status to FB_BLANK_POWERDOWN to get lcd on.
- */
- if (before_power == FB_BLANK_UNBLANK)
- lcd->power = FB_BLANK_POWERDOWN;
+ struct s6e63m0 *lcd = spi_get_drvdata(spi);
- dev_dbg(&spi->dev, "before_power = %d\n", before_power);
+ lcd->power = FB_BLANK_POWERDOWN;
- ret = s6e63m0_power(lcd, before_power);
-
- return ret;
+ return s6e63m0_power(lcd, FB_BLANK_UNBLANK);
}
#else
#define s6e63m0_suspend NULL
@@ -886,7 +847,7 @@ static int s6e63m0_resume(struct spi_device *spi)
/* Power down all displays on reboot, poweroff or halt. */
static void s6e63m0_shutdown(struct spi_device *spi)
{
- struct s6e63m0 *lcd = dev_get_drvdata(&spi->dev);
+ struct s6e63m0 *lcd = spi_get_drvdata(spi);
s6e63m0_power(lcd, FB_BLANK_POWERDOWN);
}
diff --git a/drivers/video/backlight/tdo24m.c b/drivers/video/backlight/tdo24m.c
index ad2325f3d652..00162085eec0 100644
--- a/drivers/video/backlight/tdo24m.c
+++ b/drivers/video/backlight/tdo24m.c
@@ -390,7 +390,7 @@ static int tdo24m_probe(struct spi_device *spi)
if (IS_ERR(lcd->lcd_dev))
return PTR_ERR(lcd->lcd_dev);
- dev_set_drvdata(&spi->dev, lcd);
+ spi_set_drvdata(spi, lcd);
err = tdo24m_power(lcd, FB_BLANK_UNBLANK);
if (err)
goto out_unregister;
@@ -404,7 +404,7 @@ out_unregister:
static int tdo24m_remove(struct spi_device *spi)
{
- struct tdo24m *lcd = dev_get_drvdata(&spi->dev);
+ struct tdo24m *lcd = spi_get_drvdata(spi);
tdo24m_power(lcd, FB_BLANK_POWERDOWN);
lcd_device_unregister(lcd->lcd_dev);
@@ -415,14 +415,14 @@ static int tdo24m_remove(struct spi_device *spi)
#ifdef CONFIG_PM
static int tdo24m_suspend(struct spi_device *spi, pm_message_t state)
{
- struct tdo24m *lcd = dev_get_drvdata(&spi->dev);
+ struct tdo24m *lcd = spi_get_drvdata(spi);
return tdo24m_power(lcd, FB_BLANK_POWERDOWN);
}
static int tdo24m_resume(struct spi_device *spi)
{
- struct tdo24m *lcd = dev_get_drvdata(&spi->dev);
+ struct tdo24m *lcd = spi_get_drvdata(spi);
return tdo24m_power(lcd, FB_BLANK_UNBLANK);
}
@@ -434,7 +434,7 @@ static int tdo24m_resume(struct spi_device *spi)
/* Power down all displays on reboot, poweroff or halt */
static void tdo24m_shutdown(struct spi_device *spi)
{
- struct tdo24m *lcd = dev_get_drvdata(&spi->dev);
+ struct tdo24m *lcd = spi_get_drvdata(spi);
tdo24m_power(lcd, FB_BLANK_POWERDOWN);
}
diff --git a/drivers/video/backlight/tosa_bl.c b/drivers/video/backlight/tosa_bl.c
index 588682cc1614..2326fa810c59 100644
--- a/drivers/video/backlight/tosa_bl.c
+++ b/drivers/video/backlight/tosa_bl.c
@@ -54,7 +54,7 @@ static void tosa_bl_set_backlight(struct tosa_bl_data *data, int brightness)
static int tosa_bl_update_status(struct backlight_device *dev)
{
struct backlight_properties *props = &dev->props;
- struct tosa_bl_data *data = dev_get_drvdata(&dev->dev);
+ struct tosa_bl_data *data = bl_get_data(dev);
int power = max(props->power, props->fb_blank);
int brightness = props->brightness;
diff --git a/drivers/video/backlight/tosa_lcd.c b/drivers/video/backlight/tosa_lcd.c
index 96bae941585a..666fe2593ea4 100644
--- a/drivers/video/backlight/tosa_lcd.c
+++ b/drivers/video/backlight/tosa_lcd.c
@@ -193,7 +193,7 @@ static int tosa_lcd_probe(struct spi_device *spi)
return ret;
data->spi = spi;
- dev_set_drvdata(&spi->dev, data);
+ spi_set_drvdata(spi, data);
ret = devm_gpio_request_one(&spi->dev, TOSA_GPIO_TG_ON,
GPIOF_OUT_INIT_LOW, "tg #pwr");
@@ -220,13 +220,13 @@ static int tosa_lcd_probe(struct spi_device *spi)
err_register:
tosa_lcd_tg_off(data);
err_gpio_tg:
- dev_set_drvdata(&spi->dev, NULL);
+ spi_set_drvdata(spi, NULL);
return ret;
}
static int tosa_lcd_remove(struct spi_device *spi)
{
- struct tosa_lcd_data *data = dev_get_drvdata(&spi->dev);
+ struct tosa_lcd_data *data = spi_get_drvdata(spi);
lcd_device_unregister(data->lcd);
@@ -235,7 +235,7 @@ static int tosa_lcd_remove(struct spi_device *spi)
tosa_lcd_tg_off(data);
- dev_set_drvdata(&spi->dev, NULL);
+ spi_set_drvdata(spi, NULL);
return 0;
}
@@ -243,7 +243,7 @@ static int tosa_lcd_remove(struct spi_device *spi)
#ifdef CONFIG_PM
static int tosa_lcd_suspend(struct spi_device *spi, pm_message_t state)
{
- struct tosa_lcd_data *data = dev_get_drvdata(&spi->dev);
+ struct tosa_lcd_data *data = spi_get_drvdata(spi);
tosa_lcd_tg_off(data);
@@ -252,7 +252,7 @@ static int tosa_lcd_suspend(struct spi_device *spi, pm_message_t state)
static int tosa_lcd_resume(struct spi_device *spi)
{
- struct tosa_lcd_data *data = dev_get_drvdata(&spi->dev);
+ struct tosa_lcd_data *data = spi_get_drvdata(spi);
tosa_lcd_tg_init(data);
if (POWER_IS_ON(data->lcd_power))
diff --git a/drivers/video/backlight/vgg2432a4.c b/drivers/video/backlight/vgg2432a4.c
index 45e81b4cf8b4..84d582f591dc 100644
--- a/drivers/video/backlight/vgg2432a4.c
+++ b/drivers/video/backlight/vgg2432a4.c
@@ -208,12 +208,11 @@ static int vgg2432a4_lcd_init(struct ili9320 *lcd,
#ifdef CONFIG_PM
static int vgg2432a4_suspend(struct spi_device *spi, pm_message_t state)
{
- return ili9320_suspend(dev_get_drvdata(&spi->dev), state);
+ return ili9320_suspend(spi_get_drvdata(spi), state);
}
-
static int vgg2432a4_resume(struct spi_device *spi)
{
- return ili9320_resume(dev_get_drvdata(&spi->dev));
+ return ili9320_resume(spi_get_drvdata(spi));
}
#else
#define vgg2432a4_suspend NULL
@@ -242,12 +241,12 @@ static int vgg2432a4_probe(struct spi_device *spi)
static int vgg2432a4_remove(struct spi_device *spi)
{
- return ili9320_remove(dev_get_drvdata(&spi->dev));
+ return ili9320_remove(spi_get_drvdata(spi));
}
static void vgg2432a4_shutdown(struct spi_device *spi)
{
- ili9320_shutdown(dev_get_drvdata(&spi->dev));
+ ili9320_shutdown(spi_get_drvdata(spi));
}
static struct spi_driver vgg2432a4_driver = {
diff --git a/drivers/video/clps711xfb.c b/drivers/video/clps711xfb.c
index 5a7af0deced2..f00980607b8f 100644
--- a/drivers/video/clps711xfb.c
+++ b/drivers/video/clps711xfb.c
@@ -26,6 +26,7 @@
#include <linux/fb.h>
#include <linux/init.h>
#include <linux/delay.h>
+#include <linux/platform_device.h>
#include <mach/hardware.h>
#include <asm/mach-types.h>
diff --git a/drivers/video/console/fbcon.c b/drivers/video/console/fbcon.c
index fdefa8fd72c4..3cd675927826 100644
--- a/drivers/video/console/fbcon.c
+++ b/drivers/video/console/fbcon.c
@@ -529,6 +529,33 @@ static int search_for_mapped_con(void)
return retval;
}
+static int do_fbcon_takeover(int show_logo)
+{
+ int err, i;
+
+ if (!num_registered_fb)
+ return -ENODEV;
+
+ if (!show_logo)
+ logo_shown = FBCON_LOGO_DONTSHOW;
+
+ for (i = first_fb_vc; i <= last_fb_vc; i++)
+ con2fb_map[i] = info_idx;
+
+ err = do_take_over_console(&fb_con, first_fb_vc, last_fb_vc,
+ fbcon_is_default);
+
+ if (err) {
+ for (i = first_fb_vc; i <= last_fb_vc; i++)
+ con2fb_map[i] = -1;
+ info_idx = -1;
+ } else {
+ fbcon_has_console_bind = 1;
+ }
+
+ return err;
+}
+
static int fbcon_takeover(int show_logo)
{
int err, i;
@@ -815,6 +842,8 @@ static void con2fb_init_display(struct vc_data *vc, struct fb_info *info,
*
* Maps a virtual console @unit to a frame buffer device
* @newidx.
+ *
+ * This should be called with the console lock held.
*/
static int set_con2fb_map(int unit, int newidx, int user)
{
@@ -832,7 +861,7 @@ static int set_con2fb_map(int unit, int newidx, int user)
if (!search_for_mapped_con() || !con_is_bound(&fb_con)) {
info_idx = newidx;
- return fbcon_takeover(0);
+ return do_fbcon_takeover(0);
}
if (oldidx != -1)
@@ -840,7 +869,6 @@ static int set_con2fb_map(int unit, int newidx, int user)
found = search_fb_in_map(newidx);
- console_lock();
con2fb_map[unit] = newidx;
if (!err && !found)
err = con2fb_acquire_newinfo(vc, info, unit, oldidx);
@@ -867,7 +895,6 @@ static int set_con2fb_map(int unit, int newidx, int user)
if (!search_fb_in_map(info_idx))
info_idx = newidx;
- console_unlock();
return err;
}
@@ -990,7 +1017,7 @@ static const char *fbcon_startup(void)
}
/* Setup default font */
- if (!p->fontdata) {
+ if (!p->fontdata && !vc->vc_font.data) {
if (!fontname[0] || !(font = find_font(fontname)))
font = get_default_font(info->var.xres,
info->var.yres,
@@ -1000,6 +1027,8 @@ static const char *fbcon_startup(void)
vc->vc_font.height = font->height;
vc->vc_font.data = (void *)(p->fontdata = font->data);
vc->vc_font.charcount = 256; /* FIXME Need to support more fonts */
+ } else {
+ p->fontdata = vc->vc_font.data;
}
cols = FBCON_SWAP(ops->rotate, info->var.xres, info->var.yres);
@@ -1159,9 +1188,9 @@ static void fbcon_init(struct vc_data *vc, int init)
ops->p = &fb_display[fg_console];
}
-static void fbcon_free_font(struct display *p)
+static void fbcon_free_font(struct display *p, bool freefont)
{
- if (p->userfont && p->fontdata && (--REFCOUNT(p->fontdata) == 0))
+ if (freefont && p->userfont && p->fontdata && (--REFCOUNT(p->fontdata) == 0))
kfree(p->fontdata - FONT_EXTRA_WORDS * sizeof(int));
p->fontdata = NULL;
p->userfont = 0;
@@ -1173,8 +1202,8 @@ static void fbcon_deinit(struct vc_data *vc)
struct fb_info *info;
struct fbcon_ops *ops;
int idx;
+ bool free_font = true;
- fbcon_free_font(p);
idx = con2fb_map[vc->vc_num];
if (idx == -1)
@@ -1185,6 +1214,8 @@ static void fbcon_deinit(struct vc_data *vc)
if (!info)
goto finished;
+ if (info->flags & FBINFO_MISC_FIRMWARE)
+ free_font = false;
ops = info->fbcon_par;
if (!ops)
@@ -1196,6 +1227,8 @@ static void fbcon_deinit(struct vc_data *vc)
ops->flags &= ~FBCON_FLAGS_INIT;
finished:
+ fbcon_free_font(p, free_font);
+
if (!con_is_bound(&fb_con))
fbcon_exit();
@@ -1242,8 +1275,16 @@ static void fbcon_clear(struct vc_data *vc, int sy, int sx, int height,
if (!height || !width)
return;
- if (sy < vc->vc_top && vc->vc_top == logo_lines)
+ if (sy < vc->vc_top && vc->vc_top == logo_lines) {
vc->vc_top = 0;
+ /*
+ * If the font dimensions are not an integral of the display
+ * dimensions then the ops->clear below won't end up clearing
+ * the margins. Call clear_margins here in case the logo
+ * bitmap stretched into the margin area.
+ */
+ fbcon_clear_margins(vc, 0);
+ }
/* Split blits that cross physical y_wrap boundary */
@@ -2977,7 +3018,7 @@ static int fbcon_unbind(void)
{
int ret;
- ret = unbind_con_driver(&fb_con, first_fb_vc, last_fb_vc,
+ ret = do_unbind_con_driver(&fb_con, first_fb_vc, last_fb_vc,
fbcon_is_default);
if (!ret)
@@ -2992,6 +3033,7 @@ static inline int fbcon_unbind(void)
}
#endif /* CONFIG_VT_HW_CONSOLE_BINDING */
+/* called with console_lock held */
static int fbcon_fb_unbind(int idx)
{
int i, new_idx = -1, ret = 0;
@@ -3018,6 +3060,7 @@ static int fbcon_fb_unbind(int idx)
return ret;
}
+/* called with console_lock held */
static int fbcon_fb_unregistered(struct fb_info *info)
{
int i, idx;
@@ -3050,11 +3093,12 @@ static int fbcon_fb_unregistered(struct fb_info *info)
primary_device = -1;
if (!num_registered_fb)
- unregister_con_driver(&fb_con);
+ do_unregister_con_driver(&fb_con);
return 0;
}
+/* called with console_lock held */
static void fbcon_remap_all(int idx)
{
int i;
@@ -3099,6 +3143,7 @@ static inline void fbcon_select_primary(struct fb_info *info)
}
#endif /* CONFIG_FRAMEBUFFER_DETECT_PRIMARY */
+/* called with console_lock held */
static int fbcon_fb_registered(struct fb_info *info)
{
int ret = 0, i, idx;
@@ -3115,7 +3160,7 @@ static int fbcon_fb_registered(struct fb_info *info)
}
if (info_idx != -1)
- ret = fbcon_takeover(1);
+ ret = do_fbcon_takeover(1);
} else {
for (i = first_fb_vc; i <= last_fb_vc; i++) {
if (con2fb_map_boot[i] == idx)
@@ -3251,6 +3296,7 @@ static int fbcon_event_notify(struct notifier_block *self,
ret = fbcon_fb_unregistered(info);
break;
case FB_EVENT_SET_CONSOLE_MAP:
+ /* called with console lock held */
con2fb = event->data;
ret = set_con2fb_map(con2fb->console - 1,
con2fb->framebuffer, 1);
diff --git a/drivers/video/console/vgacon.c b/drivers/video/console/vgacon.c
index d449a74d4a31..5855d17d19ac 100644
--- a/drivers/video/console/vgacon.c
+++ b/drivers/video/console/vgacon.c
@@ -1064,7 +1064,7 @@ static int vgacon_do_font_op(struct vgastate *state,char *arg,int set,int ch512)
unsigned short video_port_status = vga_video_port_reg + 6;
int font_select = 0x00, beg, i;
char *charmap;
-
+ bool clear_attribs = false;
if (vga_video_type != VIDEO_TYPE_EGAM) {
charmap = (char *) VGA_MAP_MEM(colourmap, 0);
beg = 0x0e;
@@ -1169,12 +1169,6 @@ static int vgacon_do_font_op(struct vgastate *state,char *arg,int set,int ch512)
/* if 512 char mode is already enabled don't re-enable it. */
if ((set) && (ch512 != vga_512_chars)) {
- /* attribute controller */
- for (i = 0; i < MAX_NR_CONSOLES; i++) {
- struct vc_data *c = vc_cons[i].d;
- if (c && c->vc_sw == &vga_con)
- c->vc_hi_font_mask = ch512 ? 0x0800 : 0;
- }
vga_512_chars = ch512;
/* 256-char: enable intensity bit
512-char: disable intensity bit */
@@ -1185,8 +1179,22 @@ static int vgacon_do_font_op(struct vgastate *state,char *arg,int set,int ch512)
it means, but it works, and it appears necessary */
inb_p(video_port_status);
vga_wattr(state->vgabase, VGA_AR_ENABLE_DISPLAY, 0);
+ clear_attribs = true;
}
raw_spin_unlock_irq(&vga_lock);
+
+ if (clear_attribs) {
+ for (i = 0; i < MAX_NR_CONSOLES; i++) {
+ struct vc_data *c = vc_cons[i].d;
+ if (c && c->vc_sw == &vga_con) {
+ /* force hi font mask to 0, so we always clear
+ the bit on either transition */
+ c->vc_hi_font_mask = 0x00;
+ clear_buffer_attributes(c);
+ c->vc_hi_font_mask = ch512 ? 0x0800 : 0;
+ }
+ }
+ }
return 0;
}
diff --git a/drivers/video/display_timing.c b/drivers/video/display_timing.c
new file mode 100644
index 000000000000..5e1822cef571
--- /dev/null
+++ b/drivers/video/display_timing.c
@@ -0,0 +1,24 @@
+/*
+ * generic display timing functions
+ *
+ * Copyright (c) 2012 Steffen Trumtrar <s.trumtrar@pengutronix.de>, Pengutronix
+ *
+ * This file is released under the GPLv2
+ */
+
+#include <linux/export.h>
+#include <linux/slab.h>
+#include <video/display_timing.h>
+
+void display_timings_release(struct display_timings *disp)
+{
+ if (disp->timings) {
+ unsigned int i;
+
+ for (i = 0; i < disp->num_timings; i++)
+ kfree(disp->timings[i]);
+ kfree(disp->timings);
+ }
+ kfree(disp);
+}
+EXPORT_SYMBOL_GPL(display_timings_release);
diff --git a/drivers/video/exynos/exynos_dp_core.c b/drivers/video/exynos/exynos_dp_core.c
index 2ed97769aa6d..de9d4da0e3da 100644
--- a/drivers/video/exynos/exynos_dp_core.c
+++ b/drivers/video/exynos/exynos_dp_core.c
@@ -965,10 +965,11 @@ static struct exynos_dp_platdata *exynos_dp_dt_parse_pdata(struct device *dev)
static int exynos_dp_dt_parse_phydata(struct exynos_dp_device *dp)
{
- struct device_node *dp_phy_node;
+ struct device_node *dp_phy_node = of_node_get(dp->dev->of_node);
u32 phy_base;
+ int ret = 0;
- dp_phy_node = of_find_node_by_name(dp->dev->of_node, "dptx-phy");
+ dp_phy_node = of_find_node_by_name(dp_phy_node, "dptx-phy");
if (!dp_phy_node) {
dev_err(dp->dev, "could not find dptx-phy node\n");
return -ENODEV;
@@ -976,22 +977,28 @@ static int exynos_dp_dt_parse_phydata(struct exynos_dp_device *dp)
if (of_property_read_u32(dp_phy_node, "reg", &phy_base)) {
dev_err(dp->dev, "faild to get reg for dptx-phy\n");
- return -EINVAL;
+ ret = -EINVAL;
+ goto err;
}
if (of_property_read_u32(dp_phy_node, "samsung,enable-mask",
&dp->enable_mask)) {
dev_err(dp->dev, "faild to get enable-mask for dptx-phy\n");
- return -EINVAL;
+ ret = -EINVAL;
+ goto err;
}
dp->phy_addr = ioremap(phy_base, SZ_4);
if (!dp->phy_addr) {
dev_err(dp->dev, "failed to ioremap dp-phy\n");
- return -ENOMEM;
+ ret = -ENOMEM;
+ goto err;
}
- return 0;
+err:
+ of_node_put(dp_phy_node);
+
+ return ret;
}
static void exynos_dp_phy_init(struct exynos_dp_device *dp)
@@ -1117,8 +1124,6 @@ static int exynos_dp_remove(struct platform_device *pdev)
struct exynos_dp_platdata *pdata = pdev->dev.platform_data;
struct exynos_dp_device *dp = platform_get_drvdata(pdev);
- disable_irq(dp->irq);
-
flush_work(&dp->hotplug_work);
if (pdev->dev.of_node) {
@@ -1141,6 +1146,8 @@ static int exynos_dp_suspend(struct device *dev)
struct exynos_dp_platdata *pdata = dev->platform_data;
struct exynos_dp_device *dp = dev_get_drvdata(dev);
+ disable_irq(dp->irq);
+
flush_work(&dp->hotplug_work);
if (dev->of_node) {
diff --git a/drivers/video/exynos/exynos_mipi_dsi.c b/drivers/video/exynos/exynos_mipi_dsi.c
index 4a17cdccef34..fac7df6d1aba 100644
--- a/drivers/video/exynos/exynos_mipi_dsi.c
+++ b/drivers/video/exynos/exynos_mipi_dsi.c
@@ -338,7 +338,8 @@ static int exynos_mipi_dsi_probe(struct platform_device *pdev)
struct mipi_dsim_ddi *dsim_ddi;
int ret = -EINVAL;
- dsim = kzalloc(sizeof(struct mipi_dsim_device), GFP_KERNEL);
+ dsim = devm_kzalloc(&pdev->dev, sizeof(struct mipi_dsim_device),
+ GFP_KERNEL);
if (!dsim) {
dev_err(&pdev->dev, "failed to allocate dsim object.\n");
return -ENOMEM;
@@ -352,13 +353,13 @@ static int exynos_mipi_dsi_probe(struct platform_device *pdev)
dsim_pd = (struct mipi_dsim_platform_data *)dsim->pd;
if (dsim_pd == NULL) {
dev_err(&pdev->dev, "failed to get platform data for dsim.\n");
- goto err_clock_get;
+ return -EINVAL;
}
/* get mipi_dsim_config. */
dsim_config = dsim_pd->dsim_config;
if (dsim_config == NULL) {
dev_err(&pdev->dev, "failed to get dsim config data.\n");
- goto err_clock_get;
+ return -EINVAL;
}
dsim->dsim_config = dsim_config;
@@ -366,41 +367,28 @@ static int exynos_mipi_dsi_probe(struct platform_device *pdev)
mutex_init(&dsim->lock);
- ret = regulator_bulk_get(&pdev->dev, ARRAY_SIZE(supplies), supplies);
+ ret = devm_regulator_bulk_get(&pdev->dev, ARRAY_SIZE(supplies),
+ supplies);
if (ret) {
dev_err(&pdev->dev, "Failed to get regulators: %d\n", ret);
- goto err_clock_get;
+ return ret;
}
- dsim->clock = clk_get(&pdev->dev, "dsim0");
+ dsim->clock = devm_clk_get(&pdev->dev, "dsim0");
if (IS_ERR(dsim->clock)) {
dev_err(&pdev->dev, "failed to get dsim clock source\n");
- ret = -ENODEV;
- goto err_clock_get;
+ return -ENODEV;
}
clk_enable(dsim->clock);
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- if (!res) {
- dev_err(&pdev->dev, "failed to get io memory region\n");
- ret = -ENODEV;
- goto err_platform_get;
- }
-
- dsim->res = request_mem_region(res->start, resource_size(res),
- dev_name(&pdev->dev));
- if (!dsim->res) {
- dev_err(&pdev->dev, "failed to request io memory region\n");
- ret = -ENOMEM;
- goto err_mem_region;
- }
- dsim->reg_base = ioremap(res->start, resource_size(res));
+ dsim->reg_base = devm_request_and_ioremap(&pdev->dev, res);
if (!dsim->reg_base) {
dev_err(&pdev->dev, "failed to remap io region\n");
ret = -ENOMEM;
- goto err_ioremap;
+ goto error;
}
mutex_init(&dsim->lock);
@@ -410,26 +398,27 @@ static int exynos_mipi_dsi_probe(struct platform_device *pdev)
if (!dsim_ddi) {
dev_err(&pdev->dev, "mipi_dsim_ddi object not found.\n");
ret = -EINVAL;
- goto err_bind;
+ goto error;
}
dsim->irq = platform_get_irq(pdev, 0);
- if (dsim->irq < 0) {
+ if (IS_ERR_VALUE(dsim->irq)) {
dev_err(&pdev->dev, "failed to request dsim irq resource\n");
ret = -EINVAL;
- goto err_platform_get_irq;
+ goto error;
}
init_completion(&dsim_wr_comp);
init_completion(&dsim_rd_comp);
platform_set_drvdata(pdev, dsim);
- ret = request_irq(dsim->irq, exynos_mipi_dsi_interrupt_handler,
+ ret = devm_request_irq(&pdev->dev, dsim->irq,
+ exynos_mipi_dsi_interrupt_handler,
IRQF_SHARED, dev_name(&pdev->dev), dsim);
if (ret != 0) {
dev_err(&pdev->dev, "failed to request dsim irq\n");
ret = -EINVAL;
- goto err_bind;
+ goto error;
}
/* enable interrupts */
@@ -471,22 +460,8 @@ done:
return 0;
-err_bind:
- iounmap(dsim->reg_base);
-
-err_ioremap:
- release_mem_region(dsim->res->start, resource_size(dsim->res));
-
-err_mem_region:
- release_resource(dsim->res);
-
-err_platform_get:
+error:
clk_disable(dsim->clock);
- clk_put(dsim->clock);
-err_clock_get:
- kfree(dsim);
-
-err_platform_get_irq:
return ret;
}
@@ -496,13 +471,7 @@ static int exynos_mipi_dsi_remove(struct platform_device *pdev)
struct mipi_dsim_ddi *dsim_ddi, *next;
struct mipi_dsim_lcd_driver *dsim_lcd_drv;
- iounmap(dsim->reg_base);
-
clk_disable(dsim->clock);
- clk_put(dsim->clock);
-
- release_resource(dsim->res);
- release_mem_region(dsim->res->start, resource_size(dsim->res));
list_for_each_entry_safe(dsim_ddi, next, &dsim_ddi_list, list) {
if (dsim_ddi) {
@@ -518,9 +487,6 @@ static int exynos_mipi_dsi_remove(struct platform_device *pdev)
}
}
- regulator_bulk_free(ARRAY_SIZE(supplies), supplies);
- kfree(dsim);
-
return 0;
}
diff --git a/drivers/video/exynos/exynos_mipi_dsi_common.c b/drivers/video/exynos/exynos_mipi_dsi_common.c
index 3cd29a4fc10a..c70cb8926df6 100644
--- a/drivers/video/exynos/exynos_mipi_dsi_common.c
+++ b/drivers/video/exynos/exynos_mipi_dsi_common.c
@@ -25,6 +25,7 @@
#include <linux/io.h>
#include <linux/memory.h>
#include <linux/delay.h>
+#include <linux/irqreturn.h>
#include <linux/kthread.h>
#include <video/mipi_display.h>
diff --git a/drivers/video/exynos/exynos_mipi_dsi_lowlevel.c b/drivers/video/exynos/exynos_mipi_dsi_lowlevel.c
index 0ef38ce72af6..95cb99a1fe2d 100644
--- a/drivers/video/exynos/exynos_mipi_dsi_lowlevel.c
+++ b/drivers/video/exynos/exynos_mipi_dsi_lowlevel.c
@@ -21,6 +21,7 @@
#include <linux/fs.h>
#include <linux/mm.h>
#include <linux/ctype.h>
+#include <linux/platform_device.h>
#include <linux/io.h>
#include <video/exynos_mipi_dsim.h>
diff --git a/drivers/video/exynos/s6e8ax0.c b/drivers/video/exynos/s6e8ax0.c
index 05d080b63bc0..ca2602413aa4 100644
--- a/drivers/video/exynos/s6e8ax0.c
+++ b/drivers/video/exynos/s6e8ax0.c
@@ -776,7 +776,7 @@ static int s6e8ax0_probe(struct mipi_dsim_lcd_device *dsim_dev)
int ret;
u8 mtp_id[3] = {0, };
- lcd = kzalloc(sizeof(struct s6e8ax0), GFP_KERNEL);
+ lcd = devm_kzalloc(&dsim_dev->dev, sizeof(struct s6e8ax0), GFP_KERNEL);
if (!lcd) {
dev_err(&dsim_dev->dev, "failed to allocate s6e8ax0 structure.\n");
return -ENOMEM;
@@ -788,18 +788,17 @@ static int s6e8ax0_probe(struct mipi_dsim_lcd_device *dsim_dev)
mutex_init(&lcd->lock);
- ret = regulator_bulk_get(lcd->dev, ARRAY_SIZE(supplies), supplies);
+ ret = devm_regulator_bulk_get(lcd->dev, ARRAY_SIZE(supplies), supplies);
if (ret) {
dev_err(lcd->dev, "Failed to get regulators: %d\n", ret);
- goto err_lcd_register;
+ return ret;
}
lcd->ld = lcd_device_register("s6e8ax0", lcd->dev, lcd,
&s6e8ax0_lcd_ops);
if (IS_ERR(lcd->ld)) {
dev_err(lcd->dev, "failed to register lcd ops.\n");
- ret = PTR_ERR(lcd->ld);
- goto err_lcd_register;
+ return PTR_ERR(lcd->ld);
}
lcd->bd = backlight_device_register("s6e8ax0-bl", lcd->dev, lcd,
@@ -838,11 +837,6 @@ static int s6e8ax0_probe(struct mipi_dsim_lcd_device *dsim_dev)
err_backlight_register:
lcd_device_unregister(lcd->ld);
-
-err_lcd_register:
- regulator_bulk_free(ARRAY_SIZE(supplies), supplies);
- kfree(lcd);
-
return ret;
}
diff --git a/drivers/video/fb_defio.c b/drivers/video/fb_defio.c
index 88cad6b8b479..900aa4ecd617 100644
--- a/drivers/video/fb_defio.c
+++ b/drivers/video/fb_defio.c
@@ -69,7 +69,7 @@ static int fb_deferred_io_fault(struct vm_area_struct *vma,
int fb_deferred_io_fsync(struct file *file, loff_t start, loff_t end, int datasync)
{
struct fb_info *info = file->private_data;
- struct inode *inode = file->f_path.dentry->d_inode;
+ struct inode *inode = file_inode(file);
int err = filemap_write_and_wait_range(inode->i_mapping, start, end);
if (err)
return err;
diff --git a/drivers/video/fbmem.c b/drivers/video/fbmem.c
index 3ff0105a496a..7c254084b6a0 100644
--- a/drivers/video/fbmem.c
+++ b/drivers/video/fbmem.c
@@ -727,7 +727,7 @@ static const struct file_operations fb_proc_fops = {
*/
static struct fb_info *file_fb_info(struct file *file)
{
- struct inode *inode = file->f_path.dentry->d_inode;
+ struct inode *inode = file_inode(file);
int fbidx = iminor(inode);
struct fb_info *info = registered_fb[fbidx];
@@ -1177,8 +1177,10 @@ static long do_fb_ioctl(struct fb_info *info, unsigned int cmd,
event.data = &con2fb;
if (!lock_fb_info(info))
return -ENODEV;
+ console_lock();
event.info = info;
ret = fb_notifier_call_chain(FB_EVENT_SET_CONSOLE_MAP, &event);
+ console_unlock();
unlock_fb_info(info);
break;
case FBIOBLANK:
@@ -1650,7 +1652,9 @@ static int do_register_framebuffer(struct fb_info *fb_info)
event.info = fb_info;
if (!lock_fb_info(fb_info))
return -ENODEV;
+ console_lock();
fb_notifier_call_chain(FB_EVENT_FB_REGISTERED, &event);
+ console_unlock();
unlock_fb_info(fb_info);
return 0;
}
@@ -1666,8 +1670,10 @@ static int do_unregister_framebuffer(struct fb_info *fb_info)
if (!lock_fb_info(fb_info))
return -ENODEV;
+ console_lock();
event.info = fb_info;
ret = fb_notifier_call_chain(FB_EVENT_FB_UNBIND, &event);
+ console_unlock();
unlock_fb_info(fb_info);
if (ret)
@@ -1682,7 +1688,9 @@ static int do_unregister_framebuffer(struct fb_info *fb_info)
num_registered_fb--;
fb_cleanup_device(fb_info);
event.info = fb_info;
+ console_lock();
fb_notifier_call_chain(FB_EVENT_FB_UNREGISTERED, &event);
+ console_unlock();
/* this may free fb info */
put_fb_info(fb_info);
@@ -1853,11 +1861,8 @@ int fb_new_modelist(struct fb_info *info)
err = 1;
if (!list_empty(&info->modelist)) {
- if (!lock_fb_info(info))
- return -ENODEV;
event.info = info;
err = fb_notifier_call_chain(FB_EVENT_NEW_MODELIST, &event);
- unlock_fb_info(info);
}
return err;
diff --git a/drivers/video/fbmon.c b/drivers/video/fbmon.c
index cef65574db6c..94ad0f71383c 100644
--- a/drivers/video/fbmon.c
+++ b/drivers/video/fbmon.c
@@ -31,6 +31,8 @@
#include <linux/pci.h>
#include <linux/slab.h>
#include <video/edid.h>
+#include <video/of_videomode.h>
+#include <video/videomode.h>
#ifdef CONFIG_PPC_OF
#include <asm/prom.h>
#include <asm/pci-bridge.h>
@@ -1373,6 +1375,98 @@ int fb_get_mode(int flags, u32 val, struct fb_var_screeninfo *var, struct fb_inf
kfree(timings);
return err;
}
+
+#if IS_ENABLED(CONFIG_VIDEOMODE)
+int fb_videomode_from_videomode(const struct videomode *vm,
+ struct fb_videomode *fbmode)
+{
+ unsigned int htotal, vtotal;
+
+ fbmode->xres = vm->hactive;
+ fbmode->left_margin = vm->hback_porch;
+ fbmode->right_margin = vm->hfront_porch;
+ fbmode->hsync_len = vm->hsync_len;
+
+ fbmode->yres = vm->vactive;
+ fbmode->upper_margin = vm->vback_porch;
+ fbmode->lower_margin = vm->vfront_porch;
+ fbmode->vsync_len = vm->vsync_len;
+
+ /* prevent division by zero in KHZ2PICOS macro */
+ fbmode->pixclock = vm->pixelclock ?
+ KHZ2PICOS(vm->pixelclock / 1000) : 0;
+
+ fbmode->sync = 0;
+ fbmode->vmode = 0;
+ if (vm->dmt_flags & VESA_DMT_HSYNC_HIGH)
+ fbmode->sync |= FB_SYNC_HOR_HIGH_ACT;
+ if (vm->dmt_flags & VESA_DMT_HSYNC_HIGH)
+ fbmode->sync |= FB_SYNC_VERT_HIGH_ACT;
+ if (vm->data_flags & DISPLAY_FLAGS_INTERLACED)
+ fbmode->vmode |= FB_VMODE_INTERLACED;
+ if (vm->data_flags & DISPLAY_FLAGS_DOUBLESCAN)
+ fbmode->vmode |= FB_VMODE_DOUBLE;
+ fbmode->flag = 0;
+
+ htotal = vm->hactive + vm->hfront_porch + vm->hback_porch +
+ vm->hsync_len;
+ vtotal = vm->vactive + vm->vfront_porch + vm->vback_porch +
+ vm->vsync_len;
+ /* prevent division by zero */
+ if (htotal && vtotal) {
+ fbmode->refresh = vm->pixelclock / (htotal * vtotal);
+ /* a mode must have htotal and vtotal != 0 or it is invalid */
+ } else {
+ fbmode->refresh = 0;
+ return -EINVAL;
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(fb_videomode_from_videomode);
+#endif
+
+#if IS_ENABLED(CONFIG_OF_VIDEOMODE)
+static inline void dump_fb_videomode(const struct fb_videomode *m)
+{
+ pr_debug("fb_videomode = %ux%u@%uHz (%ukHz) %u %u %u %u %u %u %u %u %u\n",
+ m->xres, m->yres, m->refresh, m->pixclock, m->left_margin,
+ m->right_margin, m->upper_margin, m->lower_margin,
+ m->hsync_len, m->vsync_len, m->sync, m->vmode, m->flag);
+}
+
+/**
+ * of_get_fb_videomode - get a fb_videomode from devicetree
+ * @np: device_node with the timing specification
+ * @fb: will be set to the return value
+ * @index: index into the list of display timings in devicetree
+ *
+ * DESCRIPTION:
+ * This function is expensive and should only be used, if only one mode is to be
+ * read from DT. To get multiple modes start with of_get_display_timings ond
+ * work with that instead.
+ */
+int of_get_fb_videomode(struct device_node *np, struct fb_videomode *fb,
+ int index)
+{
+ struct videomode vm;
+ int ret;
+
+ ret = of_get_videomode(np, &vm, index);
+ if (ret)
+ return ret;
+
+ fb_videomode_from_videomode(&vm, fb);
+
+ pr_debug("%s: got %dx%d display mode from %s\n",
+ of_node_full_name(np), vm.hactive, vm.vactive, np->name);
+ dump_fb_videomode(fb);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(of_get_fb_videomode);
+#endif
+
#else
int fb_parse_edid(unsigned char *edid, struct fb_var_screeninfo *var)
{
diff --git a/drivers/video/fbsysfs.c b/drivers/video/fbsysfs.c
index a55e3669d135..ef476b02fbe5 100644
--- a/drivers/video/fbsysfs.c
+++ b/drivers/video/fbsysfs.c
@@ -177,6 +177,8 @@ static ssize_t store_modes(struct device *device,
if (i * sizeof(struct fb_videomode) != count)
return -EINVAL;
+ if (!lock_fb_info(fb_info))
+ return -ENODEV;
console_lock();
list_splice(&fb_info->modelist, &old_list);
fb_videomode_to_modelist((const struct fb_videomode *)buf, i,
@@ -188,6 +190,7 @@ static ssize_t store_modes(struct device *device,
fb_destroy_modelist(&old_list);
console_unlock();
+ unlock_fb_info(fb_info);
return 0;
}
diff --git a/drivers/video/fsl-diu-fb.c b/drivers/video/fsl-diu-fb.c
index 19cfd7a92563..41fbd9453c5f 100644
--- a/drivers/video/fsl-diu-fb.c
+++ b/drivers/video/fsl-diu-fb.c
@@ -944,7 +944,7 @@ static u32 fsl_diu_get_pixel_format(unsigned int bits_per_pixel)
#define PF_COMP_0_MASK 0x0000000F
#define PF_COMP_0_SHIFT 0
-#define MAKE_PF(alpha, red, blue, green, size, c0, c1, c2, c3) \
+#define MAKE_PF(alpha, red, green, blue, size, c0, c1, c2, c3) \
cpu_to_le32(PF_BYTE_F | (alpha << PF_ALPHA_C_SHIFT) | \
(blue << PF_BLUE_C_SHIFT) | (green << PF_GREEN_C_SHIFT) | \
(red << PF_RED_C_SHIFT) | (c3 << PF_COMP_3_SHIFT) | \
@@ -954,10 +954,10 @@ static u32 fsl_diu_get_pixel_format(unsigned int bits_per_pixel)
switch (bits_per_pixel) {
case 32:
/* 0x88883316 */
- return MAKE_PF(3, 2, 0, 1, 3, 8, 8, 8, 8);
+ return MAKE_PF(3, 2, 1, 0, 3, 8, 8, 8, 8);
case 24:
/* 0x88082219 */
- return MAKE_PF(4, 0, 1, 2, 2, 0, 8, 8, 8);
+ return MAKE_PF(4, 0, 1, 2, 2, 8, 8, 8, 0);
case 16:
/* 0x65053118 */
return MAKE_PF(4, 2, 1, 0, 1, 5, 6, 5, 0);
@@ -1232,6 +1232,16 @@ static int fsl_diu_ioctl(struct fb_info *info, unsigned int cmd,
return 0;
}
+static inline void fsl_diu_enable_interrupts(struct fsl_diu_data *data)
+{
+ u32 int_mask = INT_UNDRUN; /* enable underrun detection */
+
+ if (IS_ENABLED(CONFIG_NOT_COHERENT_CACHE))
+ int_mask |= INT_VSYNC; /* enable vertical sync */
+
+ clrbits32(&data->diu_reg->int_mask, int_mask);
+}
+
/* turn on fb if count == 1
*/
static int fsl_diu_open(struct fb_info *info, int user)
@@ -1251,19 +1261,7 @@ static int fsl_diu_open(struct fb_info *info, int user)
if (res < 0)
mfbi->count--;
else {
- struct fsl_diu_data *data = mfbi->parent;
-
-#ifdef CONFIG_NOT_COHERENT_CACHE
- /*
- * Enable underrun detection and vertical sync
- * interrupts.
- */
- clrbits32(&data->diu_reg->int_mask,
- INT_UNDRUN | INT_VSYNC);
-#else
- /* Enable underrun detection */
- clrbits32(&data->diu_reg->int_mask, INT_UNDRUN);
-#endif
+ fsl_diu_enable_interrupts(mfbi->parent);
fsl_diu_enable_panel(info);
}
}
@@ -1283,9 +1281,18 @@ static int fsl_diu_release(struct fb_info *info, int user)
mfbi->count--;
if (mfbi->count == 0) {
struct fsl_diu_data *data = mfbi->parent;
+ bool disable = true;
+ int i;
- /* Disable interrupts */
- out_be32(&data->diu_reg->int_mask, 0xffffffff);
+ /* Disable interrupts only if all AOIs are closed */
+ for (i = 0; i < NUM_AOIS; i++) {
+ struct mfb_info *mi = data->fsl_diu_info[i].par;
+
+ if (mi->count)
+ disable = false;
+ }
+ if (disable)
+ out_be32(&data->diu_reg->int_mask, 0xffffffff);
fsl_diu_disable_panel(info);
}
@@ -1614,14 +1621,6 @@ static int fsl_diu_probe(struct platform_device *pdev)
out_be32(&data->diu_reg->desc[1], data->dummy_ad.paddr);
out_be32(&data->diu_reg->desc[2], data->dummy_ad.paddr);
- for (i = 0; i < NUM_AOIS; i++) {
- ret = install_fb(&data->fsl_diu_info[i]);
- if (ret) {
- dev_err(&pdev->dev, "could not register fb %d\n", i);
- goto error;
- }
- }
-
/*
* Older versions of U-Boot leave interrupts enabled, so disable
* all of them and clear the status register.
@@ -1630,12 +1629,21 @@ static int fsl_diu_probe(struct platform_device *pdev)
in_be32(&data->diu_reg->int_status);
ret = request_irq(data->irq, fsl_diu_isr, 0, "fsl-diu-fb",
- &data->diu_reg);
+ data->diu_reg);
if (ret) {
dev_err(&pdev->dev, "could not claim irq\n");
goto error;
}
+ for (i = 0; i < NUM_AOIS; i++) {
+ ret = install_fb(&data->fsl_diu_info[i]);
+ if (ret) {
+ dev_err(&pdev->dev, "could not register fb %d\n", i);
+ free_irq(data->irq, data->diu_reg);
+ goto error;
+ }
+ }
+
sysfs_attr_init(&data->dev_attr.attr);
data->dev_attr.attr.name = "monitor";
data->dev_attr.attr.mode = S_IRUGO|S_IWUSR;
@@ -1667,7 +1675,7 @@ static int fsl_diu_remove(struct platform_device *pdev)
data = dev_get_drvdata(&pdev->dev);
disable_lcdc(&data->fsl_diu_info[0]);
- free_irq(data->irq, &data->diu_reg);
+ free_irq(data->irq, data->diu_reg);
for (i = 0; i < NUM_AOIS; i++)
uninstall_fb(&data->fsl_diu_info[i]);
diff --git a/drivers/video/goldfishfb.c b/drivers/video/goldfishfb.c
new file mode 100644
index 000000000000..489abb32fc04
--- /dev/null
+++ b/drivers/video/goldfishfb.c
@@ -0,0 +1,318 @@
+/*
+ * Copyright (C) 2007 Google, Inc.
+ * Copyright (C) 2012 Intel, Inc.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/dma-mapping.h>
+#include <linux/errno.h>
+#include <linux/string.h>
+#include <linux/slab.h>
+#include <linux/delay.h>
+#include <linux/mm.h>
+#include <linux/fb.h>
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/ioport.h>
+#include <linux/platform_device.h>
+
+enum {
+ FB_GET_WIDTH = 0x00,
+ FB_GET_HEIGHT = 0x04,
+ FB_INT_STATUS = 0x08,
+ FB_INT_ENABLE = 0x0c,
+ FB_SET_BASE = 0x10,
+ FB_SET_ROTATION = 0x14,
+ FB_SET_BLANK = 0x18,
+ FB_GET_PHYS_WIDTH = 0x1c,
+ FB_GET_PHYS_HEIGHT = 0x20,
+
+ FB_INT_VSYNC = 1U << 0,
+ FB_INT_BASE_UPDATE_DONE = 1U << 1
+};
+
+struct goldfish_fb {
+ void __iomem *reg_base;
+ int irq;
+ spinlock_t lock;
+ wait_queue_head_t wait;
+ int base_update_count;
+ int rotation;
+ struct fb_info fb;
+ u32 cmap[16];
+};
+
+static irqreturn_t goldfish_fb_interrupt(int irq, void *dev_id)
+{
+ unsigned long irq_flags;
+ struct goldfish_fb *fb = dev_id;
+ u32 status;
+
+ spin_lock_irqsave(&fb->lock, irq_flags);
+ status = readl(fb->reg_base + FB_INT_STATUS);
+ if (status & FB_INT_BASE_UPDATE_DONE) {
+ fb->base_update_count++;
+ wake_up(&fb->wait);
+ }
+ spin_unlock_irqrestore(&fb->lock, irq_flags);
+ return status ? IRQ_HANDLED : IRQ_NONE;
+}
+
+static inline u32 convert_bitfield(int val, struct fb_bitfield *bf)
+{
+ unsigned int mask = (1 << bf->length) - 1;
+
+ return (val >> (16 - bf->length) & mask) << bf->offset;
+}
+
+static int
+goldfish_fb_setcolreg(unsigned int regno, unsigned int red, unsigned int green,
+ unsigned int blue, unsigned int transp, struct fb_info *info)
+{
+ struct goldfish_fb *fb = container_of(info, struct goldfish_fb, fb);
+
+ if (regno < 16) {
+ fb->cmap[regno] = convert_bitfield(transp, &fb->fb.var.transp) |
+ convert_bitfield(blue, &fb->fb.var.blue) |
+ convert_bitfield(green, &fb->fb.var.green) |
+ convert_bitfield(red, &fb->fb.var.red);
+ return 0;
+ } else {
+ return 1;
+ }
+}
+
+static int goldfish_fb_check_var(struct fb_var_screeninfo *var,
+ struct fb_info *info)
+{
+ if ((var->rotate & 1) != (info->var.rotate & 1)) {
+ if ((var->xres != info->var.yres) ||
+ (var->yres != info->var.xres) ||
+ (var->xres_virtual != info->var.yres) ||
+ (var->yres_virtual > info->var.xres * 2) ||
+ (var->yres_virtual < info->var.xres)) {
+ return -EINVAL;
+ }
+ } else {
+ if ((var->xres != info->var.xres) ||
+ (var->yres != info->var.yres) ||
+ (var->xres_virtual != info->var.xres) ||
+ (var->yres_virtual > info->var.yres * 2) ||
+ (var->yres_virtual < info->var.yres)) {
+ return -EINVAL;
+ }
+ }
+ if ((var->xoffset != info->var.xoffset) ||
+ (var->bits_per_pixel != info->var.bits_per_pixel) ||
+ (var->grayscale != info->var.grayscale)) {
+ return -EINVAL;
+ }
+ return 0;
+}
+
+static int goldfish_fb_set_par(struct fb_info *info)
+{
+ struct goldfish_fb *fb = container_of(info, struct goldfish_fb, fb);
+ if (fb->rotation != fb->fb.var.rotate) {
+ info->fix.line_length = info->var.xres * 2;
+ fb->rotation = fb->fb.var.rotate;
+ writel(fb->rotation, fb->reg_base + FB_SET_ROTATION);
+ }
+ return 0;
+}
+
+
+static int goldfish_fb_pan_display(struct fb_var_screeninfo *var,
+ struct fb_info *info)
+{
+ unsigned long irq_flags;
+ int base_update_count;
+ struct goldfish_fb *fb = container_of(info, struct goldfish_fb, fb);
+
+ spin_lock_irqsave(&fb->lock, irq_flags);
+ base_update_count = fb->base_update_count;
+ writel(fb->fb.fix.smem_start + fb->fb.var.xres * 2 * var->yoffset,
+ fb->reg_base + FB_SET_BASE);
+ spin_unlock_irqrestore(&fb->lock, irq_flags);
+ wait_event_timeout(fb->wait,
+ fb->base_update_count != base_update_count, HZ / 15);
+ if (fb->base_update_count == base_update_count)
+ pr_err("goldfish_fb_pan_display: timeout wating for base update\n");
+ return 0;
+}
+
+static int goldfish_fb_blank(int blank, struct fb_info *info)
+{
+ struct goldfish_fb *fb = container_of(info, struct goldfish_fb, fb);
+ switch (blank) {
+ case FB_BLANK_NORMAL:
+ writel(1, fb->reg_base + FB_SET_BLANK);
+ break;
+ case FB_BLANK_UNBLANK:
+ writel(0, fb->reg_base + FB_SET_BLANK);
+ break;
+ }
+ return 0;
+}
+
+static struct fb_ops goldfish_fb_ops = {
+ .owner = THIS_MODULE,
+ .fb_check_var = goldfish_fb_check_var,
+ .fb_set_par = goldfish_fb_set_par,
+ .fb_setcolreg = goldfish_fb_setcolreg,
+ .fb_pan_display = goldfish_fb_pan_display,
+ .fb_blank = goldfish_fb_blank,
+ .fb_fillrect = cfb_fillrect,
+ .fb_copyarea = cfb_copyarea,
+ .fb_imageblit = cfb_imageblit,
+};
+
+
+static int goldfish_fb_probe(struct platform_device *pdev)
+{
+ int ret;
+ struct resource *r;
+ struct goldfish_fb *fb;
+ size_t framesize;
+ u32 width, height;
+ dma_addr_t fbpaddr;
+
+ fb = kzalloc(sizeof(*fb), GFP_KERNEL);
+ if (fb == NULL) {
+ ret = -ENOMEM;
+ goto err_fb_alloc_failed;
+ }
+ spin_lock_init(&fb->lock);
+ init_waitqueue_head(&fb->wait);
+ platform_set_drvdata(pdev, fb);
+
+ r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (r == NULL) {
+ ret = -ENODEV;
+ goto err_no_io_base;
+ }
+ fb->reg_base = ioremap(r->start, PAGE_SIZE);
+ if (fb->reg_base == NULL) {
+ ret = -ENOMEM;
+ goto err_no_io_base;
+ }
+
+ fb->irq = platform_get_irq(pdev, 0);
+ if (fb->irq <= 0) {
+ ret = -ENODEV;
+ goto err_no_irq;
+ }
+
+ width = readl(fb->reg_base + FB_GET_WIDTH);
+ height = readl(fb->reg_base + FB_GET_HEIGHT);
+
+ fb->fb.fbops = &goldfish_fb_ops;
+ fb->fb.flags = FBINFO_FLAG_DEFAULT;
+ fb->fb.pseudo_palette = fb->cmap;
+ fb->fb.fix.type = FB_TYPE_PACKED_PIXELS;
+ fb->fb.fix.visual = FB_VISUAL_TRUECOLOR;
+ fb->fb.fix.line_length = width * 2;
+ fb->fb.fix.accel = FB_ACCEL_NONE;
+ fb->fb.fix.ypanstep = 1;
+
+ fb->fb.var.xres = width;
+ fb->fb.var.yres = height;
+ fb->fb.var.xres_virtual = width;
+ fb->fb.var.yres_virtual = height * 2;
+ fb->fb.var.bits_per_pixel = 16;
+ fb->fb.var.activate = FB_ACTIVATE_NOW;
+ fb->fb.var.height = readl(fb->reg_base + FB_GET_PHYS_HEIGHT);
+ fb->fb.var.width = readl(fb->reg_base + FB_GET_PHYS_WIDTH);
+ fb->fb.var.pixclock = 10000;
+
+ fb->fb.var.red.offset = 11;
+ fb->fb.var.red.length = 5;
+ fb->fb.var.green.offset = 5;
+ fb->fb.var.green.length = 6;
+ fb->fb.var.blue.offset = 0;
+ fb->fb.var.blue.length = 5;
+
+ framesize = width * height * 2 * 2;
+ fb->fb.screen_base = (char __force __iomem *)dma_alloc_coherent(
+ &pdev->dev, framesize,
+ &fbpaddr, GFP_KERNEL);
+ pr_debug("allocating frame buffer %d * %d, got %p\n",
+ width, height, fb->fb.screen_base);
+ if (fb->fb.screen_base == NULL) {
+ ret = -ENOMEM;
+ goto err_alloc_screen_base_failed;
+ }
+ fb->fb.fix.smem_start = fbpaddr;
+ fb->fb.fix.smem_len = framesize;
+
+ ret = fb_set_var(&fb->fb, &fb->fb.var);
+ if (ret)
+ goto err_fb_set_var_failed;
+
+ ret = request_irq(fb->irq, goldfish_fb_interrupt, IRQF_SHARED,
+ pdev->name, fb);
+ if (ret)
+ goto err_request_irq_failed;
+
+ writel(FB_INT_BASE_UPDATE_DONE, fb->reg_base + FB_INT_ENABLE);
+ goldfish_fb_pan_display(&fb->fb.var, &fb->fb); /* updates base */
+
+ ret = register_framebuffer(&fb->fb);
+ if (ret)
+ goto err_register_framebuffer_failed;
+ return 0;
+
+err_register_framebuffer_failed:
+ free_irq(fb->irq, fb);
+err_request_irq_failed:
+err_fb_set_var_failed:
+ dma_free_coherent(&pdev->dev, framesize,
+ (void *)fb->fb.screen_base,
+ fb->fb.fix.smem_start);
+err_alloc_screen_base_failed:
+err_no_irq:
+ iounmap(fb->reg_base);
+err_no_io_base:
+ kfree(fb);
+err_fb_alloc_failed:
+ return ret;
+}
+
+static int goldfish_fb_remove(struct platform_device *pdev)
+{
+ size_t framesize;
+ struct goldfish_fb *fb = platform_get_drvdata(pdev);
+
+ framesize = fb->fb.var.xres_virtual * fb->fb.var.yres_virtual * 2;
+ unregister_framebuffer(&fb->fb);
+ free_irq(fb->irq, fb);
+
+ dma_free_coherent(&pdev->dev, framesize, (void *)fb->fb.screen_base,
+ fb->fb.fix.smem_start);
+ iounmap(fb->reg_base);
+ return 0;
+}
+
+
+static struct platform_driver goldfish_fb_driver = {
+ .probe = goldfish_fb_probe,
+ .remove = goldfish_fb_remove,
+ .driver = {
+ .name = "goldfish_fb"
+ }
+};
+
+module_platform_driver(goldfish_fb_driver);
+
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/video/hdmi.c b/drivers/video/hdmi.c
new file mode 100644
index 000000000000..ab23c9b79143
--- /dev/null
+++ b/drivers/video/hdmi.c
@@ -0,0 +1,308 @@
+/*
+ * Copyright (C) 2012 Avionic Design GmbH
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/bitops.h>
+#include <linux/errno.h>
+#include <linux/export.h>
+#include <linux/hdmi.h>
+#include <linux/string.h>
+
+static void hdmi_infoframe_checksum(void *buffer, size_t size)
+{
+ u8 *ptr = buffer;
+ u8 csum = 0;
+ size_t i;
+
+ /* compute checksum */
+ for (i = 0; i < size; i++)
+ csum += ptr[i];
+
+ ptr[3] = 256 - csum;
+}
+
+/**
+ * hdmi_avi_infoframe_init() - initialize an HDMI AVI infoframe
+ * @frame: HDMI AVI infoframe
+ *
+ * Returns 0 on success or a negative error code on failure.
+ */
+int hdmi_avi_infoframe_init(struct hdmi_avi_infoframe *frame)
+{
+ memset(frame, 0, sizeof(*frame));
+
+ frame->type = HDMI_INFOFRAME_TYPE_AVI;
+ frame->version = 2;
+ frame->length = 13;
+
+ return 0;
+}
+EXPORT_SYMBOL(hdmi_avi_infoframe_init);
+
+/**
+ * hdmi_avi_infoframe_pack() - write HDMI AVI infoframe to binary buffer
+ * @frame: HDMI AVI infoframe
+ * @buffer: destination buffer
+ * @size: size of buffer
+ *
+ * Packs the information contained in the @frame structure into a binary
+ * representation that can be written into the corresponding controller
+ * registers. Also computes the checksum as required by section 5.3.5 of
+ * the HDMI 1.4 specification.
+ *
+ * Returns the number of bytes packed into the binary buffer or a negative
+ * error code on failure.
+ */
+ssize_t hdmi_avi_infoframe_pack(struct hdmi_avi_infoframe *frame, void *buffer,
+ size_t size)
+{
+ u8 *ptr = buffer;
+ size_t length;
+
+ length = HDMI_INFOFRAME_HEADER_SIZE + frame->length;
+
+ if (size < length)
+ return -ENOSPC;
+
+ memset(buffer, 0, length);
+
+ ptr[0] = frame->type;
+ ptr[1] = frame->version;
+ ptr[2] = frame->length;
+ ptr[3] = 0; /* checksum */
+
+ /* start infoframe payload */
+ ptr += HDMI_INFOFRAME_HEADER_SIZE;
+
+ ptr[0] = ((frame->colorspace & 0x3) << 5) | (frame->scan_mode & 0x3);
+
+ if (frame->active_info_valid)
+ ptr[0] |= BIT(4);
+
+ if (frame->horizontal_bar_valid)
+ ptr[0] |= BIT(3);
+
+ if (frame->vertical_bar_valid)
+ ptr[0] |= BIT(2);
+
+ ptr[1] = ((frame->colorimetry & 0x3) << 6) |
+ ((frame->picture_aspect & 0x3) << 4) |
+ (frame->active_aspect & 0xf);
+
+ ptr[2] = ((frame->extended_colorimetry & 0x7) << 4) |
+ ((frame->quantization_range & 0x3) << 2) |
+ (frame->nups & 0x3);
+
+ if (frame->itc)
+ ptr[2] |= BIT(7);
+
+ ptr[3] = frame->video_code & 0x7f;
+
+ ptr[4] = ((frame->ycc_quantization_range & 0x3) << 6) |
+ ((frame->content_type & 0x3) << 4) |
+ (frame->pixel_repeat & 0xf);
+
+ ptr[5] = frame->top_bar & 0xff;
+ ptr[6] = (frame->top_bar >> 8) & 0xff;
+ ptr[7] = frame->bottom_bar & 0xff;
+ ptr[8] = (frame->bottom_bar >> 8) & 0xff;
+ ptr[9] = frame->left_bar & 0xff;
+ ptr[10] = (frame->left_bar >> 8) & 0xff;
+ ptr[11] = frame->right_bar & 0xff;
+ ptr[12] = (frame->right_bar >> 8) & 0xff;
+
+ hdmi_infoframe_checksum(buffer, length);
+
+ return length;
+}
+EXPORT_SYMBOL(hdmi_avi_infoframe_pack);
+
+/**
+ * hdmi_spd_infoframe_init() - initialize an HDMI SPD infoframe
+ * @frame: HDMI SPD infoframe
+ * @vendor: vendor string
+ * @product: product string
+ *
+ * Returns 0 on success or a negative error code on failure.
+ */
+int hdmi_spd_infoframe_init(struct hdmi_spd_infoframe *frame,
+ const char *vendor, const char *product)
+{
+ memset(frame, 0, sizeof(*frame));
+
+ frame->type = HDMI_INFOFRAME_TYPE_SPD;
+ frame->version = 1;
+ frame->length = 25;
+
+ strncpy(frame->vendor, vendor, sizeof(frame->vendor));
+ strncpy(frame->product, product, sizeof(frame->product));
+
+ return 0;
+}
+EXPORT_SYMBOL(hdmi_spd_infoframe_init);
+
+/**
+ * hdmi_spd_infoframe_pack() - write HDMI SPD infoframe to binary buffer
+ * @frame: HDMI SPD infoframe
+ * @buffer: destination buffer
+ * @size: size of buffer
+ *
+ * Packs the information contained in the @frame structure into a binary
+ * representation that can be written into the corresponding controller
+ * registers. Also computes the checksum as required by section 5.3.5 of
+ * the HDMI 1.4 specification.
+ *
+ * Returns the number of bytes packed into the binary buffer or a negative
+ * error code on failure.
+ */
+ssize_t hdmi_spd_infoframe_pack(struct hdmi_spd_infoframe *frame, void *buffer,
+ size_t size)
+{
+ u8 *ptr = buffer;
+ size_t length;
+
+ length = HDMI_INFOFRAME_HEADER_SIZE + frame->length;
+
+ if (size < length)
+ return -ENOSPC;
+
+ memset(buffer, 0, length);
+
+ ptr[0] = frame->type;
+ ptr[1] = frame->version;
+ ptr[2] = frame->length;
+ ptr[3] = 0; /* checksum */
+
+ /* start infoframe payload */
+ ptr += HDMI_INFOFRAME_HEADER_SIZE;
+
+ memcpy(ptr, frame->vendor, sizeof(frame->vendor));
+ memcpy(ptr + 8, frame->product, sizeof(frame->product));
+
+ ptr[24] = frame->sdi;
+
+ hdmi_infoframe_checksum(buffer, length);
+
+ return length;
+}
+EXPORT_SYMBOL(hdmi_spd_infoframe_pack);
+
+/**
+ * hdmi_audio_infoframe_init() - initialize an HDMI audio infoframe
+ * @frame: HDMI audio infoframe
+ *
+ * Returns 0 on success or a negative error code on failure.
+ */
+int hdmi_audio_infoframe_init(struct hdmi_audio_infoframe *frame)
+{
+ memset(frame, 0, sizeof(*frame));
+
+ frame->type = HDMI_INFOFRAME_TYPE_AUDIO;
+ frame->version = 1;
+ frame->length = 10;
+
+ return 0;
+}
+EXPORT_SYMBOL(hdmi_audio_infoframe_init);
+
+/**
+ * hdmi_audio_infoframe_pack() - write HDMI audio infoframe to binary buffer
+ * @frame: HDMI audio infoframe
+ * @buffer: destination buffer
+ * @size: size of buffer
+ *
+ * Packs the information contained in the @frame structure into a binary
+ * representation that can be written into the corresponding controller
+ * registers. Also computes the checksum as required by section 5.3.5 of
+ * the HDMI 1.4 specification.
+ *
+ * Returns the number of bytes packed into the binary buffer or a negative
+ * error code on failure.
+ */
+ssize_t hdmi_audio_infoframe_pack(struct hdmi_audio_infoframe *frame,
+ void *buffer, size_t size)
+{
+ unsigned char channels;
+ u8 *ptr = buffer;
+ size_t length;
+
+ length = HDMI_INFOFRAME_HEADER_SIZE + frame->length;
+
+ if (size < length)
+ return -ENOSPC;
+
+ memset(buffer, 0, length);
+
+ if (frame->channels >= 2)
+ channels = frame->channels - 1;
+ else
+ channels = 0;
+
+ ptr[0] = frame->type;
+ ptr[1] = frame->version;
+ ptr[2] = frame->length;
+ ptr[3] = 0; /* checksum */
+
+ /* start infoframe payload */
+ ptr += HDMI_INFOFRAME_HEADER_SIZE;
+
+ ptr[0] = ((frame->coding_type & 0xf) << 4) | (channels & 0x7);
+ ptr[1] = ((frame->sample_frequency & 0x7) << 2) |
+ (frame->sample_size & 0x3);
+ ptr[2] = frame->coding_type_ext & 0x1f;
+ ptr[3] = frame->channel_allocation;
+ ptr[4] = (frame->level_shift_value & 0xf) << 3;
+
+ if (frame->downmix_inhibit)
+ ptr[4] |= BIT(7);
+
+ hdmi_infoframe_checksum(buffer, length);
+
+ return length;
+}
+EXPORT_SYMBOL(hdmi_audio_infoframe_pack);
+
+/**
+ * hdmi_vendor_infoframe_pack() - write a HDMI vendor infoframe to binary
+ * buffer
+ * @frame: HDMI vendor infoframe
+ * @buffer: destination buffer
+ * @size: size of buffer
+ *
+ * Packs the information contained in the @frame structure into a binary
+ * representation that can be written into the corresponding controller
+ * registers. Also computes the checksum as required by section 5.3.5 of
+ * the HDMI 1.4 specification.
+ *
+ * Returns the number of bytes packed into the binary buffer or a negative
+ * error code on failure.
+ */
+ssize_t hdmi_vendor_infoframe_pack(struct hdmi_vendor_infoframe *frame,
+ void *buffer, size_t size)
+{
+ u8 *ptr = buffer;
+ size_t length;
+
+ length = HDMI_INFOFRAME_HEADER_SIZE + frame->length;
+
+ if (size < length)
+ return -ENOSPC;
+
+ memset(buffer, 0, length);
+
+ ptr[0] = frame->type;
+ ptr[1] = frame->version;
+ ptr[2] = frame->length;
+ ptr[3] = 0; /* checksum */
+
+ memcpy(&ptr[HDMI_INFOFRAME_HEADER_SIZE], frame->data, frame->length);
+
+ hdmi_infoframe_checksum(buffer, length);
+
+ return length;
+}
+EXPORT_SYMBOL(hdmi_vendor_infoframe_pack);
diff --git a/drivers/video/mmp/Kconfig b/drivers/video/mmp/Kconfig
new file mode 100644
index 000000000000..e9ea39e13722
--- /dev/null
+++ b/drivers/video/mmp/Kconfig
@@ -0,0 +1,11 @@
+menuconfig MMP_DISP
+ tristate "Marvell MMP Display Subsystem support"
+ depends on CPU_PXA910 || CPU_MMP2 || CPU_MMP3 || CPU_PXA988
+ help
+ Marvell Display Subsystem support.
+
+if MMP_DISP
+source "drivers/video/mmp/hw/Kconfig"
+source "drivers/video/mmp/panel/Kconfig"
+source "drivers/video/mmp/fb/Kconfig"
+endif
diff --git a/drivers/video/mmp/Makefile b/drivers/video/mmp/Makefile
new file mode 100644
index 000000000000..a014cb358bf8
--- /dev/null
+++ b/drivers/video/mmp/Makefile
@@ -0,0 +1 @@
+obj-y += core.o hw/ panel/ fb/
diff --git a/drivers/video/mmp/core.c b/drivers/video/mmp/core.c
new file mode 100644
index 000000000000..9ed83419038b
--- /dev/null
+++ b/drivers/video/mmp/core.c
@@ -0,0 +1,258 @@
+/*
+ * linux/drivers/video/mmp/common.c
+ * This driver is a common framework for Marvell Display Controller
+ *
+ * Copyright (C) 2012 Marvell Technology Group Ltd.
+ * Authors: Zhou Zhu <zzhu3@marvell.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program. If not, see <http://www.gnu.org/licenses/>.
+ *
+ */
+
+#include <linux/slab.h>
+#include <linux/dma-mapping.h>
+#include <linux/export.h>
+#include <video/mmp_disp.h>
+
+static struct mmp_overlay *path_get_overlay(struct mmp_path *path,
+ int overlay_id)
+{
+ if (path && overlay_id < path->overlay_num)
+ return &path->overlays[overlay_id];
+ return 0;
+}
+
+static int path_check_status(struct mmp_path *path)
+{
+ int i;
+ for (i = 0; i < path->overlay_num; i++)
+ if (path->overlays[i].status)
+ return 1;
+
+ return 0;
+}
+
+/*
+ * Get modelist write pointer of modelist.
+ * It also returns modelist number
+ * this function fetches modelist from phy/panel:
+ * for HDMI/parallel or dsi to hdmi cases, get from phy
+ * or get from panel
+ */
+static int path_get_modelist(struct mmp_path *path,
+ struct mmp_mode **modelist)
+{
+ BUG_ON(!path || !modelist);
+
+ if (path->panel && path->panel->get_modelist)
+ return path->panel->get_modelist(path->panel, modelist);
+
+ return 0;
+}
+
+/*
+ * panel list is used to pair panel/path when path/panel registered
+ * path list is used for both buffer driver and platdriver
+ * plat driver do path register/unregister
+ * panel driver do panel register/unregister
+ * buffer driver get registered path
+ */
+static LIST_HEAD(panel_list);
+static LIST_HEAD(path_list);
+static DEFINE_MUTEX(disp_lock);
+
+/*
+ * mmp_register_panel - register panel to panel_list and connect to path
+ * @p: panel to be registered
+ *
+ * this function provides interface for panel drivers to register panel
+ * to panel_list and connect to path which matchs panel->plat_path_name.
+ * no error returns when no matching path is found as path register after
+ * panel register is permitted.
+ */
+void mmp_register_panel(struct mmp_panel *panel)
+{
+ struct mmp_path *path;
+
+ mutex_lock(&disp_lock);
+
+ /* add */
+ list_add_tail(&panel->node, &panel_list);
+
+ /* try to register to path */
+ list_for_each_entry(path, &path_list, node) {
+ if (!strcmp(panel->plat_path_name, path->name)) {
+ dev_info(panel->dev, "connect to path %s\n",
+ path->name);
+ path->panel = panel;
+ break;
+ }
+ }
+
+ mutex_unlock(&disp_lock);
+}
+EXPORT_SYMBOL_GPL(mmp_register_panel);
+
+/*
+ * mmp_unregister_panel - unregister panel from panel_list and disconnect
+ * @p: panel to be unregistered
+ *
+ * this function provides interface for panel drivers to unregister panel
+ * from panel_list and disconnect from path.
+ */
+void mmp_unregister_panel(struct mmp_panel *panel)
+{
+ struct mmp_path *path;
+
+ mutex_lock(&disp_lock);
+ list_del(&panel->node);
+
+ list_for_each_entry(path, &path_list, node) {
+ if (path->panel && path->panel == panel) {
+ dev_info(panel->dev, "disconnect from path %s\n",
+ path->name);
+ path->panel = NULL;
+ break;
+ }
+ }
+ mutex_unlock(&disp_lock);
+}
+EXPORT_SYMBOL_GPL(mmp_unregister_panel);
+
+/*
+ * mmp_get_path - get path by name
+ * @p: path name
+ *
+ * this function checks path name in path_list and return matching path
+ * return NULL if no matching path
+ */
+struct mmp_path *mmp_get_path(const char *name)
+{
+ struct mmp_path *path;
+ int found = 0;
+
+ mutex_lock(&disp_lock);
+ list_for_each_entry(path, &path_list, node) {
+ if (!strcmp(name, path->name)) {
+ found = 1;
+ break;
+ }
+ }
+ mutex_unlock(&disp_lock);
+
+ return found ? path : NULL;
+}
+EXPORT_SYMBOL_GPL(mmp_get_path);
+
+/*
+ * mmp_register_path - init and register path by path_info
+ * @p: path info provided by display controller
+ *
+ * this function init by path info and register path to path_list
+ * this function also try to connect path with panel by name
+ */
+struct mmp_path *mmp_register_path(struct mmp_path_info *info)
+{
+ int i;
+ size_t size;
+ struct mmp_path *path = NULL;
+ struct mmp_panel *panel;
+
+ size = sizeof(struct mmp_path)
+ + sizeof(struct mmp_overlay) * info->overlay_num;
+ path = kzalloc(size, GFP_KERNEL);
+ if (!path)
+ goto failed;
+
+ /* path set */
+ mutex_init(&path->access_ok);
+ path->dev = info->dev;
+ path->id = info->id;
+ path->name = info->name;
+ path->output_type = info->output_type;
+ path->overlay_num = info->overlay_num;
+ path->plat_data = info->plat_data;
+ path->ops.set_mode = info->set_mode;
+
+ mutex_lock(&disp_lock);
+ /* get panel */
+ list_for_each_entry(panel, &panel_list, node) {
+ if (!strcmp(info->name, panel->plat_path_name)) {
+ dev_info(path->dev, "get panel %s\n", panel->name);
+ path->panel = panel;
+ break;
+ }
+ }
+
+ dev_info(path->dev, "register %s, overlay_num %d\n",
+ path->name, path->overlay_num);
+
+ /* default op set: if already set by driver, never cover it */
+ if (!path->ops.check_status)
+ path->ops.check_status = path_check_status;
+ if (!path->ops.get_overlay)
+ path->ops.get_overlay = path_get_overlay;
+ if (!path->ops.get_modelist)
+ path->ops.get_modelist = path_get_modelist;
+
+ /* step3: init overlays */
+ for (i = 0; i < path->overlay_num; i++) {
+ path->overlays[i].path = path;
+ path->overlays[i].id = i;
+ mutex_init(&path->overlays[i].access_ok);
+ path->overlays[i].ops = info->overlay_ops;
+ }
+
+ /* add to pathlist */
+ list_add_tail(&path->node, &path_list);
+
+ mutex_unlock(&disp_lock);
+ return path;
+
+failed:
+ kfree(path);
+ mutex_unlock(&disp_lock);
+ return NULL;
+}
+EXPORT_SYMBOL_GPL(mmp_register_path);
+
+/*
+ * mmp_unregister_path - unregister and destory path
+ * @p: path to be destoried.
+ *
+ * this function registers path and destorys it.
+ */
+void mmp_unregister_path(struct mmp_path *path)
+{
+ int i;
+
+ if (!path)
+ return;
+
+ mutex_lock(&disp_lock);
+ /* del from pathlist */
+ list_del(&path->node);
+
+ /* deinit overlays */
+ for (i = 0; i < path->overlay_num; i++)
+ mutex_destroy(&path->overlays[i].access_ok);
+
+ mutex_destroy(&path->access_ok);
+
+ kfree(path);
+ mutex_unlock(&disp_lock);
+
+ dev_info(path->dev, "de-register %s\n", path->name);
+}
+EXPORT_SYMBOL_GPL(mmp_unregister_path);
diff --git a/drivers/video/mmp/fb/Kconfig b/drivers/video/mmp/fb/Kconfig
new file mode 100644
index 000000000000..9b0141f105f5
--- /dev/null
+++ b/drivers/video/mmp/fb/Kconfig
@@ -0,0 +1,13 @@
+if MMP_DISP
+
+config MMP_FB
+ bool "fb driver for Marvell MMP Display Subsystem"
+ depends on FB
+ select FB_CFB_FILLRECT
+ select FB_CFB_COPYAREA
+ select FB_CFB_IMAGEBLIT
+ default y
+ help
+ fb driver for Marvell MMP Display Subsystem
+
+endif
diff --git a/drivers/video/mmp/fb/Makefile b/drivers/video/mmp/fb/Makefile
new file mode 100644
index 000000000000..709fd1f76abe
--- /dev/null
+++ b/drivers/video/mmp/fb/Makefile
@@ -0,0 +1 @@
+obj-$(CONFIG_MMP_FB) += mmpfb.o
diff --git a/drivers/video/mmp/fb/mmpfb.c b/drivers/video/mmp/fb/mmpfb.c
new file mode 100644
index 000000000000..6d1fa96c5cc3
--- /dev/null
+++ b/drivers/video/mmp/fb/mmpfb.c
@@ -0,0 +1,685 @@
+/*
+ * linux/drivers/video/mmp/fb/mmpfb.c
+ * Framebuffer driver for Marvell Display controller.
+ *
+ * Copyright (C) 2012 Marvell Technology Group Ltd.
+ * Authors: Zhou Zhu <zzhu3@marvell.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program. If not, see <http://www.gnu.org/licenses/>.
+ *
+ */
+#include <linux/module.h>
+#include <linux/dma-mapping.h>
+#include <linux/platform_device.h>
+#include "mmpfb.h"
+
+static int var_to_pixfmt(struct fb_var_screeninfo *var)
+{
+ /*
+ * Pseudocolor mode?
+ */
+ if (var->bits_per_pixel == 8)
+ return PIXFMT_PSEUDOCOLOR;
+
+ /*
+ * Check for YUV422PLANAR.
+ */
+ if (var->bits_per_pixel == 16 && var->red.length == 8 &&
+ var->green.length == 4 && var->blue.length == 4) {
+ if (var->green.offset >= var->blue.offset)
+ return PIXFMT_YUV422P;
+ else
+ return PIXFMT_YVU422P;
+ }
+
+ /*
+ * Check for YUV420PLANAR.
+ */
+ if (var->bits_per_pixel == 12 && var->red.length == 8 &&
+ var->green.length == 2 && var->blue.length == 2) {
+ if (var->green.offset >= var->blue.offset)
+ return PIXFMT_YUV420P;
+ else
+ return PIXFMT_YVU420P;
+ }
+
+ /*
+ * Check for YUV422PACK.
+ */
+ if (var->bits_per_pixel == 16 && var->red.length == 16 &&
+ var->green.length == 16 && var->blue.length == 16) {
+ if (var->red.offset == 0)
+ return PIXFMT_YUYV;
+ else if (var->green.offset >= var->blue.offset)
+ return PIXFMT_UYVY;
+ else
+ return PIXFMT_VYUY;
+ }
+
+ /*
+ * Check for 565/1555.
+ */
+ if (var->bits_per_pixel == 16 && var->red.length <= 5 &&
+ var->green.length <= 6 && var->blue.length <= 5) {
+ if (var->transp.length == 0) {
+ if (var->red.offset >= var->blue.offset)
+ return PIXFMT_RGB565;
+ else
+ return PIXFMT_BGR565;
+ }
+ }
+
+ /*
+ * Check for 888/A888.
+ */
+ if (var->bits_per_pixel <= 32 && var->red.length <= 8 &&
+ var->green.length <= 8 && var->blue.length <= 8) {
+ if (var->bits_per_pixel == 24 && var->transp.length == 0) {
+ if (var->red.offset >= var->blue.offset)
+ return PIXFMT_RGB888PACK;
+ else
+ return PIXFMT_BGR888PACK;
+ }
+
+ if (var->bits_per_pixel == 32 && var->transp.offset == 24) {
+ if (var->red.offset >= var->blue.offset)
+ return PIXFMT_RGBA888;
+ else
+ return PIXFMT_BGRA888;
+ } else {
+ if (var->red.offset >= var->blue.offset)
+ return PIXFMT_RGB888UNPACK;
+ else
+ return PIXFMT_BGR888UNPACK;
+ }
+
+ /* fall through */
+ }
+
+ return -EINVAL;
+}
+
+static void pixfmt_to_var(struct fb_var_screeninfo *var, int pix_fmt)
+{
+ switch (pix_fmt) {
+ case PIXFMT_RGB565:
+ var->bits_per_pixel = 16;
+ var->red.offset = 11; var->red.length = 5;
+ var->green.offset = 5; var->green.length = 6;
+ var->blue.offset = 0; var->blue.length = 5;
+ var->transp.offset = 0; var->transp.length = 0;
+ break;
+ case PIXFMT_BGR565:
+ var->bits_per_pixel = 16;
+ var->red.offset = 0; var->red.length = 5;
+ var->green.offset = 5; var->green.length = 6;
+ var->blue.offset = 11; var->blue.length = 5;
+ var->transp.offset = 0; var->transp.length = 0;
+ break;
+ case PIXFMT_RGB888UNPACK:
+ var->bits_per_pixel = 32;
+ var->red.offset = 16; var->red.length = 8;
+ var->green.offset = 8; var->green.length = 8;
+ var->blue.offset = 0; var->blue.length = 8;
+ var->transp.offset = 0; var->transp.length = 0;
+ break;
+ case PIXFMT_BGR888UNPACK:
+ var->bits_per_pixel = 32;
+ var->red.offset = 0; var->red.length = 8;
+ var->green.offset = 8; var->green.length = 8;
+ var->blue.offset = 16; var->blue.length = 8;
+ var->transp.offset = 0; var->transp.length = 0;
+ break;
+ case PIXFMT_RGBA888:
+ var->bits_per_pixel = 32;
+ var->red.offset = 16; var->red.length = 8;
+ var->green.offset = 8; var->green.length = 8;
+ var->blue.offset = 0; var->blue.length = 8;
+ var->transp.offset = 24; var->transp.length = 8;
+ break;
+ case PIXFMT_BGRA888:
+ var->bits_per_pixel = 32;
+ var->red.offset = 0; var->red.length = 8;
+ var->green.offset = 8; var->green.length = 8;
+ var->blue.offset = 16; var->blue.length = 8;
+ var->transp.offset = 24; var->transp.length = 8;
+ break;
+ case PIXFMT_RGB888PACK:
+ var->bits_per_pixel = 24;
+ var->red.offset = 16; var->red.length = 8;
+ var->green.offset = 8; var->green.length = 8;
+ var->blue.offset = 0; var->blue.length = 8;
+ var->transp.offset = 0; var->transp.length = 0;
+ break;
+ case PIXFMT_BGR888PACK:
+ var->bits_per_pixel = 24;
+ var->red.offset = 0; var->red.length = 8;
+ var->green.offset = 8; var->green.length = 8;
+ var->blue.offset = 16; var->blue.length = 8;
+ var->transp.offset = 0; var->transp.length = 0;
+ break;
+ case PIXFMT_YUV420P:
+ var->bits_per_pixel = 12;
+ var->red.offset = 4; var->red.length = 8;
+ var->green.offset = 2; var->green.length = 2;
+ var->blue.offset = 0; var->blue.length = 2;
+ var->transp.offset = 0; var->transp.length = 0;
+ break;
+ case PIXFMT_YVU420P:
+ var->bits_per_pixel = 12;
+ var->red.offset = 4; var->red.length = 8;
+ var->green.offset = 0; var->green.length = 2;
+ var->blue.offset = 2; var->blue.length = 2;
+ var->transp.offset = 0; var->transp.length = 0;
+ break;
+ case PIXFMT_YUV422P:
+ var->bits_per_pixel = 16;
+ var->red.offset = 8; var->red.length = 8;
+ var->green.offset = 4; var->green.length = 4;
+ var->blue.offset = 0; var->blue.length = 4;
+ var->transp.offset = 0; var->transp.length = 0;
+ break;
+ case PIXFMT_YVU422P:
+ var->bits_per_pixel = 16;
+ var->red.offset = 8; var->red.length = 8;
+ var->green.offset = 0; var->green.length = 4;
+ var->blue.offset = 4; var->blue.length = 4;
+ var->transp.offset = 0; var->transp.length = 0;
+ break;
+ case PIXFMT_UYVY:
+ var->bits_per_pixel = 16;
+ var->red.offset = 8; var->red.length = 16;
+ var->green.offset = 4; var->green.length = 16;
+ var->blue.offset = 0; var->blue.length = 16;
+ var->transp.offset = 0; var->transp.length = 0;
+ break;
+ case PIXFMT_VYUY:
+ var->bits_per_pixel = 16;
+ var->red.offset = 8; var->red.length = 16;
+ var->green.offset = 0; var->green.length = 16;
+ var->blue.offset = 4; var->blue.length = 16;
+ var->transp.offset = 0; var->transp.length = 0;
+ break;
+ case PIXFMT_YUYV:
+ var->bits_per_pixel = 16;
+ var->red.offset = 0; var->red.length = 16;
+ var->green.offset = 4; var->green.length = 16;
+ var->blue.offset = 8; var->blue.length = 16;
+ var->transp.offset = 0; var->transp.length = 0;
+ break;
+ case PIXFMT_PSEUDOCOLOR:
+ var->bits_per_pixel = 8;
+ var->red.offset = 0; var->red.length = 8;
+ var->green.offset = 0; var->green.length = 8;
+ var->blue.offset = 0; var->blue.length = 8;
+ var->transp.offset = 0; var->transp.length = 0;
+ break;
+ }
+}
+
+/*
+ * fb framework has its limitation:
+ * 1. input color/output color is not seprated
+ * 2. fb_videomode not include output color
+ * so for fb usage, we keep a output format which is not changed
+ * then it's added for mmpmode
+ */
+static void fbmode_to_mmpmode(struct mmp_mode *mode,
+ struct fb_videomode *videomode, int output_fmt)
+{
+ u64 div_result = 1000000000000ll;
+ mode->name = videomode->name;
+ mode->refresh = videomode->refresh;
+ mode->xres = videomode->xres;
+ mode->yres = videomode->yres;
+
+ do_div(div_result, videomode->pixclock);
+ mode->pixclock_freq = (u32)div_result;
+
+ mode->left_margin = videomode->left_margin;
+ mode->right_margin = videomode->right_margin;
+ mode->upper_margin = videomode->upper_margin;
+ mode->lower_margin = videomode->lower_margin;
+ mode->hsync_len = videomode->hsync_len;
+ mode->vsync_len = videomode->vsync_len;
+ mode->hsync_invert = !!(videomode->sync & FB_SYNC_HOR_HIGH_ACT);
+ mode->vsync_invert = !!(videomode->sync & FB_SYNC_VERT_HIGH_ACT);
+ /* no defined flag in fb, use vmode>>3*/
+ mode->invert_pixclock = !!(videomode->vmode & 8);
+ mode->pix_fmt_out = output_fmt;
+}
+
+static void mmpmode_to_fbmode(struct fb_videomode *videomode,
+ struct mmp_mode *mode)
+{
+ u64 div_result = 1000000000000ll;
+
+ videomode->name = mode->name;
+ videomode->refresh = mode->refresh;
+ videomode->xres = mode->xres;
+ videomode->yres = mode->yres;
+
+ do_div(div_result, mode->pixclock_freq);
+ videomode->pixclock = (u32)div_result;
+
+ videomode->left_margin = mode->left_margin;
+ videomode->right_margin = mode->right_margin;
+ videomode->upper_margin = mode->upper_margin;
+ videomode->lower_margin = mode->lower_margin;
+ videomode->hsync_len = mode->hsync_len;
+ videomode->vsync_len = mode->vsync_len;
+ videomode->sync = (mode->hsync_invert ? FB_SYNC_HOR_HIGH_ACT : 0)
+ | (mode->vsync_invert ? FB_SYNC_VERT_HIGH_ACT : 0);
+ videomode->vmode = mode->invert_pixclock ? 8 : 0;
+}
+
+static int mmpfb_check_var(struct fb_var_screeninfo *var,
+ struct fb_info *info)
+{
+ struct mmpfb_info *fbi = info->par;
+
+ if (var->bits_per_pixel == 8)
+ return -EINVAL;
+ /*
+ * Basic geometry sanity checks.
+ */
+ if (var->xoffset + var->xres > var->xres_virtual)
+ return -EINVAL;
+ if (var->yoffset + var->yres > var->yres_virtual)
+ return -EINVAL;
+
+ /*
+ * Check size of framebuffer.
+ */
+ if (var->xres_virtual * var->yres_virtual *
+ (var->bits_per_pixel >> 3) > fbi->fb_size)
+ return -EINVAL;
+
+ return 0;
+}
+
+static unsigned int chan_to_field(unsigned int chan, struct fb_bitfield *bf)
+{
+ return ((chan & 0xffff) >> (16 - bf->length)) << bf->offset;
+}
+
+static u32 to_rgb(u16 red, u16 green, u16 blue)
+{
+ red >>= 8;
+ green >>= 8;
+ blue >>= 8;
+
+ return (red << 16) | (green << 8) | blue;
+}
+
+static int mmpfb_setcolreg(unsigned int regno, unsigned int red,
+ unsigned int green, unsigned int blue,
+ unsigned int trans, struct fb_info *info)
+{
+ struct mmpfb_info *fbi = info->par;
+ u32 val;
+
+ if (info->fix.visual == FB_VISUAL_TRUECOLOR && regno < 16) {
+ val = chan_to_field(red, &info->var.red);
+ val |= chan_to_field(green, &info->var.green);
+ val |= chan_to_field(blue , &info->var.blue);
+ fbi->pseudo_palette[regno] = val;
+ }
+
+ if (info->fix.visual == FB_VISUAL_PSEUDOCOLOR && regno < 256) {
+ val = to_rgb(red, green, blue);
+ /* TODO */
+ }
+
+ return 0;
+}
+
+static int mmpfb_pan_display(struct fb_var_screeninfo *var,
+ struct fb_info *info)
+{
+ struct mmpfb_info *fbi = info->par;
+ struct mmp_addr addr;
+
+ memset(&addr, 0, sizeof(addr));
+ addr.phys[0] = (var->yoffset * var->xres_virtual + var->xoffset)
+ * var->bits_per_pixel / 8 + fbi->fb_start_dma;
+ mmp_overlay_set_addr(fbi->overlay, &addr);
+
+ return 0;
+}
+
+static int var_update(struct fb_info *info)
+{
+ struct mmpfb_info *fbi = info->par;
+ struct fb_var_screeninfo *var = &info->var;
+ struct fb_videomode *m;
+ int pix_fmt;
+
+ /* set pix_fmt */
+ pix_fmt = var_to_pixfmt(var);
+ if (pix_fmt < 0)
+ return -EINVAL;
+ pixfmt_to_var(var, pix_fmt);
+ fbi->pix_fmt = pix_fmt;
+
+ /* set var according to best video mode*/
+ m = (struct fb_videomode *)fb_match_mode(var, &info->modelist);
+ if (!m) {
+ dev_err(fbi->dev, "set par: no match mode, use best mode\n");
+ m = (struct fb_videomode *)fb_find_best_mode(var,
+ &info->modelist);
+ fb_videomode_to_var(var, m);
+ }
+ memcpy(&fbi->mode, m, sizeof(struct fb_videomode));
+
+ /* fix to 2* yres */
+ var->yres_virtual = var->yres * 2;
+ info->fix.visual = (pix_fmt == PIXFMT_PSEUDOCOLOR) ?
+ FB_VISUAL_PSEUDOCOLOR : FB_VISUAL_TRUECOLOR;
+ info->fix.line_length = var->xres_virtual * var->bits_per_pixel / 8;
+ info->fix.ypanstep = var->yres;
+ return 0;
+}
+
+static int mmpfb_set_par(struct fb_info *info)
+{
+ struct mmpfb_info *fbi = info->par;
+ struct fb_var_screeninfo *var = &info->var;
+ struct mmp_addr addr;
+ struct mmp_win win;
+ struct mmp_mode mode;
+ int ret;
+
+ ret = var_update(info);
+ if (ret != 0)
+ return ret;
+
+ /* set window/path according to new videomode */
+ fbmode_to_mmpmode(&mode, &fbi->mode, fbi->output_fmt);
+ mmp_path_set_mode(fbi->path, &mode);
+
+ memset(&win, 0, sizeof(win));
+ win.xsrc = win.xdst = fbi->mode.xres;
+ win.ysrc = win.ydst = fbi->mode.yres;
+ win.pix_fmt = fbi->pix_fmt;
+ mmp_overlay_set_win(fbi->overlay, &win);
+
+ /* set address always */
+ memset(&addr, 0, sizeof(addr));
+ addr.phys[0] = (var->yoffset * var->xres_virtual + var->xoffset)
+ * var->bits_per_pixel / 8 + fbi->fb_start_dma;
+ mmp_overlay_set_addr(fbi->overlay, &addr);
+
+ return 0;
+}
+
+static void mmpfb_power(struct mmpfb_info *fbi, int power)
+{
+ struct mmp_addr addr;
+ struct mmp_win win;
+ struct fb_var_screeninfo *var = &fbi->fb_info->var;
+
+ /* for power on, always set address/window again */
+ if (power) {
+ memset(&win, 0, sizeof(win));
+ win.xsrc = win.xdst = fbi->mode.xres;
+ win.ysrc = win.ydst = fbi->mode.yres;
+ win.pix_fmt = fbi->pix_fmt;
+ mmp_overlay_set_win(fbi->overlay, &win);
+
+ /* set address always */
+ memset(&addr, 0, sizeof(addr));
+ addr.phys[0] = fbi->fb_start_dma +
+ (var->yoffset * var->xres_virtual + var->xoffset)
+ * var->bits_per_pixel / 8;
+ mmp_overlay_set_addr(fbi->overlay, &addr);
+ }
+ mmp_overlay_set_onoff(fbi->overlay, power);
+}
+
+static int mmpfb_blank(int blank, struct fb_info *info)
+{
+ struct mmpfb_info *fbi = info->par;
+
+ mmpfb_power(fbi, (blank == FB_BLANK_UNBLANK));
+
+ return 0;
+}
+
+static struct fb_ops mmpfb_ops = {
+ .owner = THIS_MODULE,
+ .fb_blank = mmpfb_blank,
+ .fb_check_var = mmpfb_check_var,
+ .fb_set_par = mmpfb_set_par,
+ .fb_setcolreg = mmpfb_setcolreg,
+ .fb_pan_display = mmpfb_pan_display,
+ .fb_fillrect = cfb_fillrect,
+ .fb_copyarea = cfb_copyarea,
+ .fb_imageblit = cfb_imageblit,
+};
+
+static int modes_setup(struct mmpfb_info *fbi)
+{
+ struct fb_videomode *videomodes;
+ struct mmp_mode *mmp_modes;
+ struct fb_info *info = fbi->fb_info;
+ int videomode_num, i;
+
+ /* get videomodes from path */
+ videomode_num = mmp_path_get_modelist(fbi->path, &mmp_modes);
+ if (!videomode_num) {
+ dev_warn(fbi->dev, "can't get videomode num\n");
+ return 0;
+ }
+ /* put videomode list to info structure */
+ videomodes = kzalloc(sizeof(struct fb_videomode) * videomode_num,
+ GFP_KERNEL);
+ if (!videomodes) {
+ dev_err(fbi->dev, "can't malloc video modes\n");
+ return -ENOMEM;
+ }
+ for (i = 0; i < videomode_num; i++)
+ mmpmode_to_fbmode(&videomodes[i], &mmp_modes[i]);
+ fb_videomode_to_modelist(videomodes, videomode_num, &info->modelist);
+
+ /* set videomode[0] as default mode */
+ memcpy(&fbi->mode, &videomodes[0], sizeof(struct fb_videomode));
+ fbi->output_fmt = mmp_modes[0].pix_fmt_out;
+ fb_videomode_to_var(&info->var, &fbi->mode);
+ mmp_path_set_mode(fbi->path, &mmp_modes[0]);
+
+ kfree(videomodes);
+ return videomode_num;
+}
+
+static int fb_info_setup(struct fb_info *info,
+ struct mmpfb_info *fbi)
+{
+ int ret = 0;
+ /* Initialise static fb parameters.*/
+ info->flags = FBINFO_DEFAULT | FBINFO_PARTIAL_PAN_OK |
+ FBINFO_HWACCEL_XPAN | FBINFO_HWACCEL_YPAN;
+ info->node = -1;
+ strcpy(info->fix.id, fbi->name);
+ info->fix.type = FB_TYPE_PACKED_PIXELS;
+ info->fix.type_aux = 0;
+ info->fix.xpanstep = 0;
+ info->fix.ypanstep = info->var.yres;
+ info->fix.ywrapstep = 0;
+ info->fix.accel = FB_ACCEL_NONE;
+ info->fix.smem_start = fbi->fb_start_dma;
+ info->fix.smem_len = fbi->fb_size;
+ info->fix.visual = (fbi->pix_fmt == PIXFMT_PSEUDOCOLOR) ?
+ FB_VISUAL_PSEUDOCOLOR : FB_VISUAL_TRUECOLOR;
+ info->fix.line_length = info->var.xres_virtual *
+ info->var.bits_per_pixel / 8;
+ info->fbops = &mmpfb_ops;
+ info->pseudo_palette = fbi->pseudo_palette;
+ info->screen_base = fbi->fb_start;
+ info->screen_size = fbi->fb_size;
+
+ /* For FB framework: Allocate color map and Register framebuffer*/
+ if (fb_alloc_cmap(&info->cmap, 256, 0) < 0)
+ ret = -ENOMEM;
+
+ return ret;
+}
+
+static void fb_info_clear(struct fb_info *info)
+{
+ fb_dealloc_cmap(&info->cmap);
+}
+
+static int mmpfb_probe(struct platform_device *pdev)
+{
+ struct mmp_buffer_driver_mach_info *mi;
+ struct fb_info *info = 0;
+ struct mmpfb_info *fbi = 0;
+ int ret, modes_num;
+
+ mi = pdev->dev.platform_data;
+ if (mi == NULL) {
+ dev_err(&pdev->dev, "no platform data defined\n");
+ return -EINVAL;
+ }
+
+ /* initialize fb */
+ info = framebuffer_alloc(sizeof(struct mmpfb_info), &pdev->dev);
+ if (info == NULL)
+ return -ENOMEM;
+ fbi = info->par;
+ if (!fbi) {
+ ret = -EINVAL;
+ goto failed;
+ }
+
+ /* init fb */
+ fbi->fb_info = info;
+ platform_set_drvdata(pdev, fbi);
+ fbi->dev = &pdev->dev;
+ fbi->name = mi->name;
+ fbi->pix_fmt = mi->default_pixfmt;
+ pixfmt_to_var(&info->var, fbi->pix_fmt);
+ mutex_init(&fbi->access_ok);
+
+ /* get display path by name */
+ fbi->path = mmp_get_path(mi->path_name);
+ if (!fbi->path) {
+ dev_err(&pdev->dev, "can't get the path %s\n", mi->path_name);
+ ret = -EINVAL;
+ goto failed_destroy_mutex;
+ }
+
+ dev_info(fbi->dev, "path %s get\n", fbi->path->name);
+
+ /* get overlay */
+ fbi->overlay = mmp_path_get_overlay(fbi->path, mi->overlay_id);
+ if (!fbi->overlay) {
+ ret = -EINVAL;
+ goto failed_destroy_mutex;
+ }
+ /* set fetch used */
+ mmp_overlay_set_fetch(fbi->overlay, mi->dmafetch_id);
+
+ modes_num = modes_setup(fbi);
+ if (modes_num < 0) {
+ ret = modes_num;
+ goto failed_destroy_mutex;
+ }
+
+ /*
+ * if get modes success, means not hotplug panels, use caculated buffer
+ * or use default size
+ */
+ if (modes_num > 0) {
+ /* fix to 2* yres */
+ info->var.yres_virtual = info->var.yres * 2;
+
+ /* Allocate framebuffer memory: size = modes xy *4 */
+ fbi->fb_size = info->var.xres_virtual * info->var.yres_virtual
+ * info->var.bits_per_pixel / 8;
+ } else {
+ fbi->fb_size = MMPFB_DEFAULT_SIZE;
+ }
+
+ fbi->fb_start = dma_alloc_coherent(&pdev->dev, PAGE_ALIGN(fbi->fb_size),
+ &fbi->fb_start_dma, GFP_KERNEL);
+ if (fbi->fb_start == NULL) {
+ dev_err(&pdev->dev, "can't alloc framebuffer\n");
+ ret = -ENOMEM;
+ goto failed_destroy_mutex;
+ }
+ memset(fbi->fb_start, 0, fbi->fb_size);
+ dev_info(fbi->dev, "fb %dk allocated\n", fbi->fb_size/1024);
+
+ /* fb power on */
+ if (modes_num > 0)
+ mmpfb_power(fbi, 1);
+
+ ret = fb_info_setup(info, fbi);
+ if (ret < 0)
+ goto failed_free_buff;
+
+ ret = register_framebuffer(info);
+ if (ret < 0) {
+ dev_err(&pdev->dev, "Failed to register fb: %d\n", ret);
+ ret = -ENXIO;
+ goto failed_clear_info;
+ }
+
+ dev_info(fbi->dev, "loaded to /dev/fb%d <%s>.\n",
+ info->node, info->fix.id);
+
+#ifdef CONFIG_LOGO
+ if (fbi->fb_start) {
+ fb_prepare_logo(info, 0);
+ fb_show_logo(info, 0);
+ }
+#endif
+
+ return 0;
+
+failed_clear_info:
+ fb_info_clear(info);
+failed_free_buff:
+ dma_free_coherent(&pdev->dev, PAGE_ALIGN(fbi->fb_size), fbi->fb_start,
+ fbi->fb_start_dma);
+failed_destroy_mutex:
+ mutex_destroy(&fbi->access_ok);
+failed:
+ dev_err(fbi->dev, "mmp-fb: frame buffer device init failed\n");
+ platform_set_drvdata(pdev, NULL);
+
+ framebuffer_release(info);
+
+ return ret;
+}
+
+static struct platform_driver mmpfb_driver = {
+ .driver = {
+ .name = "mmp-fb",
+ .owner = THIS_MODULE,
+ },
+ .probe = mmpfb_probe,
+};
+
+static int mmpfb_init(void)
+{
+ return platform_driver_register(&mmpfb_driver);
+}
+module_init(mmpfb_init);
+
+MODULE_AUTHOR("Zhou Zhu <zhou.zhu@marvell.com>");
+MODULE_DESCRIPTION("Framebuffer driver for Marvell displays");
+MODULE_LICENSE("GPL");
diff --git a/drivers/video/mmp/fb/mmpfb.h b/drivers/video/mmp/fb/mmpfb.h
new file mode 100644
index 000000000000..88c23c10a9ec
--- /dev/null
+++ b/drivers/video/mmp/fb/mmpfb.h
@@ -0,0 +1,54 @@
+/*
+ * linux/drivers/video/mmp/fb/mmpfb.h
+ * Framebuffer driver for Marvell Display controller.
+ *
+ * Copyright (C) 2012 Marvell Technology Group Ltd.
+ * Authors: Zhou Zhu <zzhu3@marvell.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program. If not, see <http://www.gnu.org/licenses/>.
+ *
+ */
+
+#ifndef _MMP_FB_H_
+#define _MMP_FB_H_
+
+#include <video/mmp_disp.h>
+#include <linux/fb.h>
+
+/* LCD controller private state. */
+struct mmpfb_info {
+ struct device *dev;
+ int id;
+ const char *name;
+
+ struct fb_info *fb_info;
+ /* basicaly videomode is for output */
+ struct fb_videomode mode;
+ int pix_fmt;
+
+ void *fb_start;
+ int fb_size;
+ dma_addr_t fb_start_dma;
+
+ struct mmp_overlay *overlay;
+ struct mmp_path *path;
+
+ struct mutex access_ok;
+
+ unsigned int pseudo_palette[16];
+ int output_fmt;
+};
+
+#define MMPFB_DEFAULT_SIZE (PAGE_ALIGN(1920 * 1080 * 4 * 2))
+#endif /* _MMP_FB_H_ */
diff --git a/drivers/video/mmp/hw/Kconfig b/drivers/video/mmp/hw/Kconfig
new file mode 100644
index 000000000000..02f109a20cd0
--- /dev/null
+++ b/drivers/video/mmp/hw/Kconfig
@@ -0,0 +1,20 @@
+if MMP_DISP
+
+config MMP_DISP_CONTROLLER
+ bool "mmp display controller hw support"
+ depends on CPU_PXA910 || CPU_MMP2 || CPU_MMP3 || CPU_PXA988
+ default n
+ help
+ Marvell MMP display hw controller support
+ this controller is used on Marvell PXA910,
+ MMP2, MMP3, PXA988 chips
+
+config MMP_DISP_SPI
+ bool "mmp display controller spi port"
+ depends on MMP_DISP_CONTROLLER && SPI_MASTER
+ default y
+ help
+ Marvell MMP display hw controller spi port support
+ will register as a spi master for panel usage
+
+endif
diff --git a/drivers/video/mmp/hw/Makefile b/drivers/video/mmp/hw/Makefile
new file mode 100644
index 000000000000..0000a714fedf
--- /dev/null
+++ b/drivers/video/mmp/hw/Makefile
@@ -0,0 +1,2 @@
+obj-$(CONFIG_MMP_DISP_CONTROLLER) += mmp_ctrl.o
+obj-$(CONFIG_MMP_DISP_SPI) += mmp_spi.o
diff --git a/drivers/video/mmp/hw/mmp_ctrl.c b/drivers/video/mmp/hw/mmp_ctrl.c
new file mode 100644
index 000000000000..4bd31b2af398
--- /dev/null
+++ b/drivers/video/mmp/hw/mmp_ctrl.c
@@ -0,0 +1,591 @@
+/*
+ * linux/drivers/video/mmp/hw/mmp_ctrl.c
+ * Marvell MMP series Display Controller support
+ *
+ * Copyright (C) 2012 Marvell Technology Group Ltd.
+ * Authors: Guoqing Li <ligq@marvell.com>
+ * Lisa Du <cldu@marvell.com>
+ * Zhou Zhu <zzhu3@marvell.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program. If not, see <http://www.gnu.org/licenses/>.
+ *
+ */
+#include <linux/module.h>
+#include <linux/moduleparam.h>
+#include <linux/kernel.h>
+#include <linux/errno.h>
+#include <linux/string.h>
+#include <linux/interrupt.h>
+#include <linux/slab.h>
+#include <linux/delay.h>
+#include <linux/platform_device.h>
+#include <linux/dma-mapping.h>
+#include <linux/clk.h>
+#include <linux/err.h>
+#include <linux/vmalloc.h>
+#include <linux/uaccess.h>
+#include <linux/kthread.h>
+#include <linux/io.h>
+
+#include "mmp_ctrl.h"
+
+static irqreturn_t ctrl_handle_irq(int irq, void *dev_id)
+{
+ struct mmphw_ctrl *ctrl = (struct mmphw_ctrl *)dev_id;
+ u32 isr, imask, tmp;
+
+ isr = readl_relaxed(ctrl->reg_base + SPU_IRQ_ISR);
+ imask = readl_relaxed(ctrl->reg_base + SPU_IRQ_ENA);
+
+ do {
+ /* clear clock only */
+ tmp = readl_relaxed(ctrl->reg_base + SPU_IRQ_ISR);
+ if (tmp & isr)
+ writel_relaxed(~isr, ctrl->reg_base + SPU_IRQ_ISR);
+ } while ((isr = readl(ctrl->reg_base + SPU_IRQ_ISR)) & imask);
+
+ return IRQ_HANDLED;
+}
+
+static u32 fmt_to_reg(struct mmp_overlay *overlay, int pix_fmt)
+{
+ u32 link_config = path_to_path_plat(overlay->path)->link_config;
+ u32 rbswap, uvswap = 0, yuvswap = 0,
+ csc_en = 0, val = 0,
+ vid = overlay_is_vid(overlay);
+
+ switch (pix_fmt) {
+ case PIXFMT_RGB565:
+ case PIXFMT_RGB1555:
+ case PIXFMT_RGB888PACK:
+ case PIXFMT_RGB888UNPACK:
+ case PIXFMT_RGBA888:
+ rbswap = !(link_config & 0x1);
+ break;
+ case PIXFMT_VYUY:
+ case PIXFMT_YVU422P:
+ case PIXFMT_YVU420P:
+ rbswap = link_config & 0x1;
+ uvswap = 1;
+ break;
+ case PIXFMT_YUYV:
+ rbswap = link_config & 0x1;
+ yuvswap = 1;
+ break;
+ default:
+ rbswap = link_config & 0x1;
+ break;
+ }
+
+ switch (pix_fmt) {
+ case PIXFMT_RGB565:
+ case PIXFMT_BGR565:
+ val = 0;
+ break;
+ case PIXFMT_RGB1555:
+ case PIXFMT_BGR1555:
+ val = 0x1;
+ break;
+ case PIXFMT_RGB888PACK:
+ case PIXFMT_BGR888PACK:
+ val = 0x2;
+ break;
+ case PIXFMT_RGB888UNPACK:
+ case PIXFMT_BGR888UNPACK:
+ val = 0x3;
+ break;
+ case PIXFMT_RGBA888:
+ case PIXFMT_BGRA888:
+ val = 0x4;
+ break;
+ case PIXFMT_UYVY:
+ case PIXFMT_VYUY:
+ case PIXFMT_YUYV:
+ val = 0x5;
+ csc_en = 1;
+ break;
+ case PIXFMT_YUV422P:
+ case PIXFMT_YVU422P:
+ val = 0x6;
+ csc_en = 1;
+ break;
+ case PIXFMT_YUV420P:
+ case PIXFMT_YVU420P:
+ val = 0x7;
+ csc_en = 1;
+ break;
+ default:
+ break;
+ }
+
+ return (dma_palette(0) | dma_fmt(vid, val) |
+ dma_swaprb(vid, rbswap) | dma_swapuv(vid, uvswap) |
+ dma_swapyuv(vid, yuvswap) | dma_csc(vid, csc_en));
+}
+
+static void dmafetch_set_fmt(struct mmp_overlay *overlay)
+{
+ u32 tmp;
+ struct mmp_path *path = overlay->path;
+ tmp = readl_relaxed(ctrl_regs(path) + dma_ctrl(0, path->id));
+ tmp &= ~dma_mask(overlay_is_vid(overlay));
+ tmp |= fmt_to_reg(overlay, overlay->win.pix_fmt);
+ writel_relaxed(tmp, ctrl_regs(path) + dma_ctrl(0, path->id));
+}
+
+static void overlay_set_win(struct mmp_overlay *overlay, struct mmp_win *win)
+{
+ struct lcd_regs *regs = path_regs(overlay->path);
+ u32 pitch;
+
+ /* assert win supported */
+ memcpy(&overlay->win, win, sizeof(struct mmp_win));
+
+ mutex_lock(&overlay->access_ok);
+ pitch = win->xsrc * pixfmt_to_stride(win->pix_fmt);
+ writel_relaxed(pitch, &regs->g_pitch);
+ writel_relaxed((win->ysrc << 16) | win->xsrc, &regs->g_size);
+ writel_relaxed((win->ydst << 16) | win->xdst, &regs->g_size_z);
+ writel_relaxed(0, &regs->g_start);
+
+ dmafetch_set_fmt(overlay);
+ mutex_unlock(&overlay->access_ok);
+}
+
+static void dmafetch_onoff(struct mmp_overlay *overlay, int on)
+{
+ u32 mask = overlay_is_vid(overlay) ? CFG_GRA_ENA_MASK :
+ CFG_DMA_ENA_MASK;
+ u32 enable = overlay_is_vid(overlay) ? CFG_GRA_ENA(1) : CFG_DMA_ENA(1);
+ u32 tmp;
+ struct mmp_path *path = overlay->path;
+
+ mutex_lock(&overlay->access_ok);
+ tmp = readl_relaxed(ctrl_regs(path) + dma_ctrl(0, path->id));
+ tmp &= ~mask;
+ tmp |= (on ? enable : 0);
+ writel(tmp, ctrl_regs(path) + dma_ctrl(0, path->id));
+ mutex_unlock(&overlay->access_ok);
+}
+
+static void path_enabledisable(struct mmp_path *path, int on)
+{
+ u32 tmp;
+ mutex_lock(&path->access_ok);
+ tmp = readl_relaxed(ctrl_regs(path) + LCD_SCLK(path));
+ if (on)
+ tmp &= ~SCLK_DISABLE;
+ else
+ tmp |= SCLK_DISABLE;
+ writel_relaxed(tmp, ctrl_regs(path) + LCD_SCLK(path));
+ mutex_unlock(&path->access_ok);
+}
+
+static void path_onoff(struct mmp_path *path, int on)
+{
+ if (path->status == on) {
+ dev_info(path->dev, "path %s is already %s\n",
+ path->name, stat_name(path->status));
+ return;
+ }
+
+ if (on) {
+ path_enabledisable(path, 1);
+
+ if (path->panel && path->panel->set_onoff)
+ path->panel->set_onoff(path->panel, 1);
+ } else {
+ if (path->panel && path->panel->set_onoff)
+ path->panel->set_onoff(path->panel, 0);
+
+ path_enabledisable(path, 0);
+ }
+ path->status = on;
+}
+
+static void overlay_set_onoff(struct mmp_overlay *overlay, int on)
+{
+ if (overlay->status == on) {
+ dev_info(overlay_to_ctrl(overlay)->dev, "overlay %s is already %s\n",
+ overlay->path->name, stat_name(overlay->status));
+ return;
+ }
+ overlay->status = on;
+ dmafetch_onoff(overlay, on);
+ if (overlay->path->ops.check_status(overlay->path)
+ != overlay->path->status)
+ path_onoff(overlay->path, on);
+}
+
+static void overlay_set_fetch(struct mmp_overlay *overlay, int fetch_id)
+{
+ overlay->dmafetch_id = fetch_id;
+}
+
+static int overlay_set_addr(struct mmp_overlay *overlay, struct mmp_addr *addr)
+{
+ struct lcd_regs *regs = path_regs(overlay->path);
+
+ /* FIXME: assert addr supported */
+ memcpy(&overlay->addr, addr, sizeof(struct mmp_win));
+ writel(addr->phys[0], &regs->g_0);
+
+ return overlay->addr.phys[0];
+}
+
+static void path_set_mode(struct mmp_path *path, struct mmp_mode *mode)
+{
+ struct lcd_regs *regs = path_regs(path);
+ u32 total_x, total_y, vsync_ctrl, tmp, sclk_src, sclk_div,
+ link_config = path_to_path_plat(path)->link_config;
+
+ /* FIXME: assert videomode supported */
+ memcpy(&path->mode, mode, sizeof(struct mmp_mode));
+
+ mutex_lock(&path->access_ok);
+
+ /* polarity of timing signals */
+ tmp = readl_relaxed(ctrl_regs(path) + intf_ctrl(path->id)) & 0x1;
+ tmp |= mode->vsync_invert ? 0 : 0x8;
+ tmp |= mode->hsync_invert ? 0 : 0x4;
+ tmp |= link_config & CFG_DUMBMODE_MASK;
+ tmp |= CFG_DUMB_ENA(1);
+ writel_relaxed(tmp, ctrl_regs(path) + intf_ctrl(path->id));
+
+ writel_relaxed((mode->yres << 16) | mode->xres, &regs->screen_active);
+ writel_relaxed((mode->left_margin << 16) | mode->right_margin,
+ &regs->screen_h_porch);
+ writel_relaxed((mode->upper_margin << 16) | mode->lower_margin,
+ &regs->screen_v_porch);
+ total_x = mode->xres + mode->left_margin + mode->right_margin +
+ mode->hsync_len;
+ total_y = mode->yres + mode->upper_margin + mode->lower_margin +
+ mode->vsync_len;
+ writel_relaxed((total_y << 16) | total_x, &regs->screen_size);
+
+ /* vsync ctrl */
+ if (path->output_type == PATH_OUT_DSI)
+ vsync_ctrl = 0x01330133;
+ else
+ vsync_ctrl = ((mode->xres + mode->right_margin) << 16)
+ | (mode->xres + mode->right_margin);
+ writel_relaxed(vsync_ctrl, &regs->vsync_ctrl);
+
+ /* set pixclock div */
+ sclk_src = clk_get_rate(path_to_ctrl(path)->clk);
+ sclk_div = sclk_src / mode->pixclock_freq;
+ if (sclk_div * mode->pixclock_freq < sclk_src)
+ sclk_div++;
+
+ dev_info(path->dev, "%s sclk_src %d sclk_div 0x%x pclk %d\n",
+ __func__, sclk_src, sclk_div, mode->pixclock_freq);
+
+ tmp = readl_relaxed(ctrl_regs(path) + LCD_SCLK(path));
+ tmp &= ~CLK_INT_DIV_MASK;
+ tmp |= sclk_div;
+ writel_relaxed(tmp, ctrl_regs(path) + LCD_SCLK(path));
+
+ mutex_unlock(&path->access_ok);
+}
+
+static struct mmp_overlay_ops mmphw_overlay_ops = {
+ .set_fetch = overlay_set_fetch,
+ .set_onoff = overlay_set_onoff,
+ .set_win = overlay_set_win,
+ .set_addr = overlay_set_addr,
+};
+
+static void ctrl_set_default(struct mmphw_ctrl *ctrl)
+{
+ u32 tmp, irq_mask;
+
+ /*
+ * LCD Global control(LCD_TOP_CTRL) should be configed before
+ * any other LCD registers read/write, or there maybe issues.
+ */
+ tmp = readl_relaxed(ctrl->reg_base + LCD_TOP_CTRL);
+ tmp |= 0xfff0;
+ writel_relaxed(tmp, ctrl->reg_base + LCD_TOP_CTRL);
+
+
+ /* disable all interrupts */
+ irq_mask = path_imasks(0) | err_imask(0) |
+ path_imasks(1) | err_imask(1);
+ tmp = readl_relaxed(ctrl->reg_base + SPU_IRQ_ENA);
+ tmp &= ~irq_mask;
+ tmp |= irq_mask;
+ writel_relaxed(tmp, ctrl->reg_base + SPU_IRQ_ENA);
+}
+
+static void path_set_default(struct mmp_path *path)
+{
+ struct lcd_regs *regs = path_regs(path);
+ u32 dma_ctrl1, mask, tmp, path_config;
+
+ path_config = path_to_path_plat(path)->path_config;
+
+ /* Configure IOPAD: should be parallel only */
+ if (PATH_OUT_PARALLEL == path->output_type) {
+ mask = CFG_IOPADMODE_MASK | CFG_BURST_MASK | CFG_BOUNDARY_MASK;
+ tmp = readl_relaxed(ctrl_regs(path) + SPU_IOPAD_CONTROL);
+ tmp &= ~mask;
+ tmp |= path_config;
+ writel_relaxed(tmp, ctrl_regs(path) + SPU_IOPAD_CONTROL);
+ }
+
+ /* Select path clock source */
+ tmp = readl_relaxed(ctrl_regs(path) + LCD_SCLK(path));
+ tmp &= ~SCLK_SRC_SEL_MASK;
+ tmp |= path_config;
+ writel_relaxed(tmp, ctrl_regs(path) + LCD_SCLK(path));
+
+ /*
+ * Configure default bits: vsync triggers DMA,
+ * power save enable, configure alpha registers to
+ * display 100% graphics, and set pixel command.
+ */
+ dma_ctrl1 = 0x2032ff81;
+
+ dma_ctrl1 |= CFG_VSYNC_INV_MASK;
+ writel_relaxed(dma_ctrl1, ctrl_regs(path) + dma_ctrl(1, path->id));
+
+ /* Configure default register values */
+ writel_relaxed(0x00000000, &regs->blank_color);
+ writel_relaxed(0x00000000, &regs->g_1);
+ writel_relaxed(0x00000000, &regs->g_start);
+
+ /*
+ * 1.enable multiple burst request in DMA AXI
+ * bus arbiter for faster read if not tv path;
+ * 2.enable horizontal smooth filter;
+ */
+ if (PATH_PN == path->id) {
+ mask = CFG_GRA_HSMOOTH_MASK | CFG_DMA_HSMOOTH_MASK
+ | CFG_ARBFAST_ENA(1);
+ tmp = readl_relaxed(ctrl_regs(path) + dma_ctrl(0, path->id));
+ tmp |= mask;
+ writel_relaxed(tmp, ctrl_regs(path) + dma_ctrl(0, path->id));
+ } else if (PATH_TV == path->id) {
+ mask = CFG_GRA_HSMOOTH_MASK | CFG_DMA_HSMOOTH_MASK
+ | CFG_ARBFAST_ENA(1);
+ tmp = readl_relaxed(ctrl_regs(path) + dma_ctrl(0, path->id));
+ tmp &= ~mask;
+ tmp |= CFG_GRA_HSMOOTH_MASK | CFG_DMA_HSMOOTH_MASK;
+ writel_relaxed(tmp, ctrl_regs(path) + dma_ctrl(0, path->id));
+ }
+}
+
+static int path_init(struct mmphw_path_plat *path_plat,
+ struct mmp_mach_path_config *config)
+{
+ struct mmphw_ctrl *ctrl = path_plat->ctrl;
+ struct mmp_path_info *path_info;
+ struct mmp_path *path = NULL;
+
+ dev_info(ctrl->dev, "%s: %s\n", __func__, config->name);
+
+ /* init driver data */
+ path_info = kzalloc(sizeof(struct mmp_path_info), GFP_KERNEL);
+ if (!path_info) {
+ dev_err(ctrl->dev, "%s: unable to alloc path_info for %s\n",
+ __func__, config->name);
+ return 0;
+ }
+ path_info->name = config->name;
+ path_info->id = path_plat->id;
+ path_info->dev = ctrl->dev;
+ path_info->overlay_num = config->overlay_num;
+ path_info->overlay_ops = &mmphw_overlay_ops;
+ path_info->set_mode = path_set_mode;
+ path_info->plat_data = path_plat;
+
+ /* create/register platform device */
+ path = mmp_register_path(path_info);
+ if (!path) {
+ kfree(path_info);
+ return 0;
+ }
+ path_plat->path = path;
+ path_plat->path_config = config->path_config;
+ path_plat->link_config = config->link_config;
+ path_set_default(path);
+
+ kfree(path_info);
+ return 1;
+}
+
+static void path_deinit(struct mmphw_path_plat *path_plat)
+{
+ if (!path_plat)
+ return;
+
+ if (path_plat->path)
+ mmp_unregister_path(path_plat->path);
+}
+
+static int mmphw_probe(struct platform_device *pdev)
+{
+ struct mmp_mach_plat_info *mi;
+ struct resource *res;
+ int ret, i, size, irq;
+ struct mmphw_path_plat *path_plat;
+ struct mmphw_ctrl *ctrl = NULL;
+
+ /* get resources from platform data */
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (res == NULL) {
+ dev_err(&pdev->dev, "%s: no IO memory defined\n", __func__);
+ ret = -ENOENT;
+ goto failed;
+ }
+
+ irq = platform_get_irq(pdev, 0);
+ if (irq < 0) {
+ dev_err(&pdev->dev, "%s: no IRQ defined\n", __func__);
+ ret = -ENOENT;
+ goto failed;
+ }
+
+ /* get configs from platform data */
+ mi = pdev->dev.platform_data;
+ if (mi == NULL || !mi->path_num || !mi->paths) {
+ dev_err(&pdev->dev, "%s: no platform data defined\n", __func__);
+ ret = -EINVAL;
+ goto failed;
+ }
+
+ /* allocate */
+ size = sizeof(struct mmphw_ctrl) + sizeof(struct mmphw_path_plat) *
+ mi->path_num;
+ ctrl = devm_kzalloc(&pdev->dev, size, GFP_KERNEL);
+ if (!ctrl) {
+ ret = -ENOMEM;
+ goto failed;
+ }
+
+ ctrl->name = mi->name;
+ ctrl->path_num = mi->path_num;
+ ctrl->dev = &pdev->dev;
+ ctrl->irq = irq;
+ platform_set_drvdata(pdev, ctrl);
+ mutex_init(&ctrl->access_ok);
+
+ /* map registers.*/
+ if (!devm_request_mem_region(ctrl->dev, res->start,
+ resource_size(res), ctrl->name)) {
+ dev_err(ctrl->dev,
+ "can't request region for resource %pR\n", res);
+ ret = -EINVAL;
+ goto failed;
+ }
+
+ ctrl->reg_base = devm_ioremap_nocache(ctrl->dev,
+ res->start, resource_size(res));
+ if (ctrl->reg_base == NULL) {
+ dev_err(ctrl->dev, "%s: res %x - %x map failed\n", __func__,
+ res->start, res->end);
+ ret = -ENOMEM;
+ goto failed;
+ }
+
+ /* request irq */
+ ret = devm_request_irq(ctrl->dev, ctrl->irq, ctrl_handle_irq,
+ IRQF_SHARED, "lcd_controller", ctrl);
+ if (ret < 0) {
+ dev_err(ctrl->dev, "%s unable to request IRQ %d\n",
+ __func__, ctrl->irq);
+ ret = -ENXIO;
+ goto failed;
+ }
+
+ /* get clock */
+ ctrl->clk = devm_clk_get(ctrl->dev, mi->clk_name);
+ if (IS_ERR(ctrl->clk)) {
+ dev_err(ctrl->dev, "unable to get clk %s\n", mi->clk_name);
+ ret = -ENOENT;
+ goto failed_get_clk;
+ }
+ clk_prepare_enable(ctrl->clk);
+
+ /* init global regs */
+ ctrl_set_default(ctrl);
+
+ /* init pathes from machine info and register them */
+ for (i = 0; i < ctrl->path_num; i++) {
+ /* get from config and machine info */
+ path_plat = &ctrl->path_plats[i];
+ path_plat->id = i;
+ path_plat->ctrl = ctrl;
+
+ /* path init */
+ if (!path_init(path_plat, &mi->paths[i])) {
+ ret = -EINVAL;
+ goto failed_path_init;
+ }
+ }
+
+#ifdef CONFIG_MMP_DISP_SPI
+ ret = lcd_spi_register(ctrl);
+ if (ret < 0)
+ goto failed_path_init;
+#endif
+
+ dev_info(ctrl->dev, "device init done\n");
+
+ return 0;
+
+failed_path_init:
+ for (i = 0; i < ctrl->path_num; i++) {
+ path_plat = &ctrl->path_plats[i];
+ path_deinit(path_plat);
+ }
+
+ if (ctrl->clk) {
+ devm_clk_put(ctrl->dev, ctrl->clk);
+ clk_disable_unprepare(ctrl->clk);
+ }
+failed_get_clk:
+ devm_free_irq(ctrl->dev, ctrl->irq, ctrl);
+failed:
+ if (ctrl) {
+ if (ctrl->reg_base)
+ devm_iounmap(ctrl->dev, ctrl->reg_base);
+ devm_release_mem_region(ctrl->dev, res->start,
+ resource_size(res));
+ devm_kfree(ctrl->dev, ctrl);
+ }
+
+ platform_set_drvdata(pdev, NULL);
+ dev_err(&pdev->dev, "device init failed\n");
+
+ return ret;
+}
+
+static struct platform_driver mmphw_driver = {
+ .driver = {
+ .name = "mmp-disp",
+ .owner = THIS_MODULE,
+ },
+ .probe = mmphw_probe,
+};
+
+static int mmphw_init(void)
+{
+ return platform_driver_register(&mmphw_driver);
+}
+module_init(mmphw_init);
+
+MODULE_AUTHOR("Li Guoqing<ligq@marvell.com>");
+MODULE_DESCRIPTION("Framebuffer driver for mmp");
+MODULE_LICENSE("GPL");
diff --git a/drivers/video/mmp/hw/mmp_ctrl.h b/drivers/video/mmp/hw/mmp_ctrl.h
new file mode 100644
index 000000000000..6408d8ef3abb
--- /dev/null
+++ b/drivers/video/mmp/hw/mmp_ctrl.h
@@ -0,0 +1,1974 @@
+/*
+ * drivers/video/mmp/hw/mmp_ctrl.h
+ *
+ *
+ * Copyright (C) 2012 Marvell Technology Group Ltd.
+ * Authors: Guoqing Li <ligq@marvell.com>
+ * Lisa Du <cldu@marvell.com>
+ * Zhou Zhu <zzhu3@marvell.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program. If not, see <http://www.gnu.org/licenses/>.
+ *
+ */
+
+#ifndef _MMP_CTRL_H_
+#define _MMP_CTRL_H_
+
+#include <video/mmp_disp.h>
+
+/* ------------< LCD register >------------ */
+struct lcd_regs {
+/* TV patch register for MMP2 */
+/* 32 bit TV Video Frame0 Y Starting Address */
+#define LCD_TVD_START_ADDR_Y0 (0x0000)
+/* 32 bit TV Video Frame0 U Starting Address */
+#define LCD_TVD_START_ADDR_U0 (0x0004)
+/* 32 bit TV Video Frame0 V Starting Address */
+#define LCD_TVD_START_ADDR_V0 (0x0008)
+/* 32 bit TV Video Frame0 Command Starting Address */
+#define LCD_TVD_START_ADDR_C0 (0x000C)
+/* 32 bit TV Video Frame1 Y Starting Address Register*/
+#define LCD_TVD_START_ADDR_Y1 (0x0010)
+/* 32 bit TV Video Frame1 U Starting Address Register*/
+#define LCD_TVD_START_ADDR_U1 (0x0014)
+/* 32 bit TV Video Frame1 V Starting Address Register*/
+#define LCD_TVD_START_ADDR_V1 (0x0018)
+/* 32 bit TV Video Frame1 Command Starting Address Register*/
+#define LCD_TVD_START_ADDR_C1 (0x001C)
+/* 32 bit TV Video Y andC Line Length(Pitch)Register*/
+#define LCD_TVD_PITCH_YC (0x0020)
+/* 32 bit TV Video U andV Line Length(Pitch)Register*/
+#define LCD_TVD_PITCH_UV (0x0024)
+/* 32 bit TV Video Starting Point on Screen Register*/
+#define LCD_TVD_OVSA_HPXL_VLN (0x0028)
+/* 32 bit TV Video Source Size Register*/
+#define LCD_TVD_HPXL_VLN (0x002C)
+/* 32 bit TV Video Destination Size (After Zooming)Register*/
+#define LCD_TVDZM_HPXL_VLN (0x0030)
+ u32 v_y0;
+ u32 v_u0;
+ u32 v_v0;
+ u32 v_c0;
+ u32 v_y1;
+ u32 v_u1;
+ u32 v_v1;
+ u32 v_c1;
+ u32 v_pitch_yc; /* Video Y and C Line Length (Pitch) */
+ u32 v_pitch_uv; /* Video U and V Line Length (Pitch) */
+ u32 v_start; /* Video Starting Point on Screen */
+ u32 v_size; /* Video Source Size */
+ u32 v_size_z; /* Video Destination Size (After Zooming) */
+
+/* 32 bit TV Graphic Frame 0 Starting Address Register*/
+#define LCD_TVG_START_ADDR0 (0x0034)
+/* 32 bit TV Graphic Frame 1 Starting Address Register*/
+#define LCD_TVG_START_ADDR1 (0x0038)
+/* 32 bit TV Graphic Line Length(Pitch)Register*/
+#define LCD_TVG_PITCH (0x003C)
+/* 32 bit TV Graphic Starting Point on Screen Register*/
+#define LCD_TVG_OVSA_HPXL_VLN (0x0040)
+/* 32 bit TV Graphic Source Size Register*/
+#define LCD_TVG_HPXL_VLN (0x0044)
+/* 32 bit TV Graphic Destination size (after Zooming)Register*/
+#define LCD_TVGZM_HPXL_VLN (0x0048)
+ u32 g_0; /* Graphic Frame 0/1 Starting Address */
+ u32 g_1;
+ u32 g_pitch; /* Graphic Line Length (Pitch) */
+ u32 g_start; /* Graphic Starting Point on Screen */
+ u32 g_size; /* Graphic Source Size */
+ u32 g_size_z; /* Graphic Destination Size (After Zooming) */
+
+/* 32 bit TV Hardware Cursor Starting Point on screen Register*/
+#define LCD_TVC_OVSA_HPXL_VLN (0x004C)
+/* 32 bit TV Hardware Cursor Size Register */
+#define LCD_TVC_HPXL_VLN (0x0050)
+ u32 hc_start; /* Hardware Cursor */
+ u32 hc_size; /* Hardware Cursor */
+
+/* 32 bit TV Total Screen Size Register*/
+#define LCD_TV_V_H_TOTAL (0x0054)
+/* 32 bit TV Screen Active Size Register*/
+#define LCD_TV_V_H_ACTIVE (0x0058)
+/* 32 bit TV Screen Horizontal Porch Register*/
+#define LCD_TV_H_PORCH (0x005C)
+/* 32 bit TV Screen Vertical Porch Register*/
+#define LCD_TV_V_PORCH (0x0060)
+ u32 screen_size; /* Screen Total Size */
+ u32 screen_active; /* Screen Active Size */
+ u32 screen_h_porch; /* Screen Horizontal Porch */
+ u32 screen_v_porch; /* Screen Vertical Porch */
+
+/* 32 bit TV Screen Blank Color Register*/
+#define LCD_TV_BLANKCOLOR (0x0064)
+/* 32 bit TV Hardware Cursor Color1 Register*/
+#define LCD_TV_ALPHA_COLOR1 (0x0068)
+/* 32 bit TV Hardware Cursor Color2 Register*/
+#define LCD_TV_ALPHA_COLOR2 (0x006C)
+ u32 blank_color; /* Screen Blank Color */
+ u32 hc_Alpha_color1; /* Hardware Cursor Color1 */
+ u32 hc_Alpha_color2; /* Hardware Cursor Color2 */
+
+/* 32 bit TV Video Y Color Key Control*/
+#define LCD_TV_COLORKEY_Y (0x0070)
+/* 32 bit TV Video U Color Key Control*/
+#define LCD_TV_COLORKEY_U (0x0074)
+/* 32 bit TV Video V Color Key Control*/
+#define LCD_TV_COLORKEY_V (0x0078)
+ u32 v_colorkey_y; /* Video Y Color Key Control */
+ u32 v_colorkey_u; /* Video U Color Key Control */
+ u32 v_colorkey_v; /* Video V Color Key Control */
+
+/* 32 bit TV VSYNC PulsePixel Edge Control Register*/
+#define LCD_TV_SEPXLCNT (0x007C)
+ u32 vsync_ctrl; /* VSYNC PulsePixel Edge Control */
+};
+
+#define intf_ctrl(id) ((id) ? (((id) & 1) ? LCD_TVIF_CTRL : \
+ LCD_DUMB2_CTRL) : LCD_SPU_DUMB_CTRL)
+#define dma_ctrl0(id) ((id) ? (((id) & 1) ? LCD_TV_CTRL0 : \
+ LCD_PN2_CTRL0) : LCD_SPU_DMA_CTRL0)
+#define dma_ctrl1(id) ((id) ? (((id) & 1) ? LCD_TV_CTRL1 : \
+ LCD_PN2_CTRL1) : LCD_SPU_DMA_CTRL1)
+#define dma_ctrl(ctrl1, id) (ctrl1 ? dma_ctrl1(id) : dma_ctrl0(id))
+
+/* 32 bit TV Path DMA Control 0*/
+#define LCD_TV_CTRL0 (0x0080)
+/* 32 bit TV Path DMA Control 1*/
+#define LCD_TV_CTRL1 (0x0084)
+/* 32 bit TV Path Video Contrast*/
+#define LCD_TV_CONTRAST (0x0088)
+/* 32 bit TV Path Video Saturation*/
+#define LCD_TV_SATURATION (0x008C)
+/* 32 bit TV Path Video Hue Adjust*/
+#define LCD_TV_CBSH_HUE (0x0090)
+/* 32 bit TV Path TVIF Control Register */
+#define LCD_TVIF_CTRL (0x0094)
+#define TV_VBLNK_VALID_EN (1 << 12)
+
+/* 32 bit TV Path I/O Pad Control*/
+#define LCD_TVIOPAD_CTRL (0x0098)
+/* 32 bit TV Path Cloc Divider */
+#define LCD_TCLK_DIV (0x009C)
+
+#define LCD_SCLK(path) ((PATH_PN == path->id) ? LCD_CFG_SCLK_DIV :\
+ ((PATH_TV == path->id) ? LCD_TCLK_DIV : LCD_PN2_SCLK_DIV))
+
+/* dither configure */
+#ifdef CONFIG_CPU_PXA988
+#define LCD_DITHER_CTRL (0x01EC)
+#else
+#define LCD_DITHER_CTRL (0x00A0)
+#endif
+
+#define DITHER_TBL_INDEX_SEL(s) ((s) << 16)
+#define DITHER_MODE2(m) ((m) << 12)
+#define DITHER_MODE2_SHIFT (12)
+#define DITHER_4X8_EN2 (1 << 9)
+#define DITHER_4X8_EN2_SHIFT (9)
+#define DITHER_EN2 (1 << 8)
+#define DITHER_MODE1(m) ((m) << 4)
+#define DITHER_MODE1_SHIFT (4)
+#define DITHER_4X8_EN1 (1 << 1)
+#define DITHER_4X8_EN1_SHIFT (1)
+#define DITHER_EN1 (1)
+
+/* dither table data was fixed by video bpp of input and output*/
+#ifdef CONFIG_CPU_PXA988
+#define DITHER_TB_4X4_INDEX0 (0x6e4ca280)
+#define DITHER_TB_4X4_INDEX1 (0x5d7f91b3)
+#define DITHER_TB_4X8_INDEX0 (0xb391a280)
+#define DITHER_TB_4X8_INDEX1 (0x7f5d6e4c)
+#define DITHER_TB_4X8_INDEX2 (0x80a291b3)
+#define DITHER_TB_4X8_INDEX3 (0x4c6e5d7f)
+#define LCD_DITHER_TBL_DATA (0x01F0)
+#else
+#define DITHER_TB_4X4_INDEX0 (0x3b19f7d5)
+#define DITHER_TB_4X4_INDEX1 (0x082ac4e6)
+#define DITHER_TB_4X8_INDEX0 (0xf7d508e6)
+#define DITHER_TB_4X8_INDEX1 (0x3b194c2a)
+#define DITHER_TB_4X8_INDEX2 (0xc4e6d5f7)
+#define DITHER_TB_4X8_INDEX3 (0x082a193b)
+#define LCD_DITHER_TBL_DATA (0x00A4)
+#endif
+
+/* Video Frame 0&1 start address registers */
+#define LCD_SPU_DMA_START_ADDR_Y0 0x00C0
+#define LCD_SPU_DMA_START_ADDR_U0 0x00C4
+#define LCD_SPU_DMA_START_ADDR_V0 0x00C8
+#define LCD_CFG_DMA_START_ADDR_0 0x00CC /* Cmd address */
+#define LCD_SPU_DMA_START_ADDR_Y1 0x00D0
+#define LCD_SPU_DMA_START_ADDR_U1 0x00D4
+#define LCD_SPU_DMA_START_ADDR_V1 0x00D8
+#define LCD_CFG_DMA_START_ADDR_1 0x00DC /* Cmd address */
+
+/* YC & UV Pitch */
+#define LCD_SPU_DMA_PITCH_YC 0x00E0
+#define SPU_DMA_PITCH_C(c) ((c)<<16)
+#define SPU_DMA_PITCH_Y(y) (y)
+#define LCD_SPU_DMA_PITCH_UV 0x00E4
+#define SPU_DMA_PITCH_V(v) ((v)<<16)
+#define SPU_DMA_PITCH_U(u) (u)
+
+/* Video Starting Point on Screen Register */
+#define LCD_SPUT_DMA_OVSA_HPXL_VLN 0x00E8
+#define CFG_DMA_OVSA_VLN(y) ((y)<<16) /* 0~0xfff */
+#define CFG_DMA_OVSA_HPXL(x) (x) /* 0~0xfff */
+
+/* Video Size Register */
+#define LCD_SPU_DMA_HPXL_VLN 0x00EC
+#define CFG_DMA_VLN(y) ((y)<<16)
+#define CFG_DMA_HPXL(x) (x)
+
+/* Video Size After zooming Register */
+#define LCD_SPU_DZM_HPXL_VLN 0x00F0
+#define CFG_DZM_VLN(y) ((y)<<16)
+#define CFG_DZM_HPXL(x) (x)
+
+/* Graphic Frame 0&1 Starting Address Register */
+#define LCD_CFG_GRA_START_ADDR0 0x00F4
+#define LCD_CFG_GRA_START_ADDR1 0x00F8
+
+/* Graphic Frame Pitch */
+#define LCD_CFG_GRA_PITCH 0x00FC
+
+/* Graphic Starting Point on Screen Register */
+#define LCD_SPU_GRA_OVSA_HPXL_VLN 0x0100
+#define CFG_GRA_OVSA_VLN(y) ((y)<<16)
+#define CFG_GRA_OVSA_HPXL(x) (x)
+
+/* Graphic Size Register */
+#define LCD_SPU_GRA_HPXL_VLN 0x0104
+#define CFG_GRA_VLN(y) ((y)<<16)
+#define CFG_GRA_HPXL(x) (x)
+
+/* Graphic Size after Zooming Register */
+#define LCD_SPU_GZM_HPXL_VLN 0x0108
+#define CFG_GZM_VLN(y) ((y)<<16)
+#define CFG_GZM_HPXL(x) (x)
+
+/* HW Cursor Starting Point on Screen Register */
+#define LCD_SPU_HWC_OVSA_HPXL_VLN 0x010C
+#define CFG_HWC_OVSA_VLN(y) ((y)<<16)
+#define CFG_HWC_OVSA_HPXL(x) (x)
+
+/* HW Cursor Size */
+#define LCD_SPU_HWC_HPXL_VLN 0x0110
+#define CFG_HWC_VLN(y) ((y)<<16)
+#define CFG_HWC_HPXL(x) (x)
+
+/* Total Screen Size Register */
+#define LCD_SPUT_V_H_TOTAL 0x0114
+#define CFG_V_TOTAL(y) ((y)<<16)
+#define CFG_H_TOTAL(x) (x)
+
+/* Total Screen Active Size Register */
+#define LCD_SPU_V_H_ACTIVE 0x0118
+#define CFG_V_ACTIVE(y) ((y)<<16)
+#define CFG_H_ACTIVE(x) (x)
+
+/* Screen H&V Porch Register */
+#define LCD_SPU_H_PORCH 0x011C
+#define CFG_H_BACK_PORCH(b) ((b)<<16)
+#define CFG_H_FRONT_PORCH(f) (f)
+#define LCD_SPU_V_PORCH 0x0120
+#define CFG_V_BACK_PORCH(b) ((b)<<16)
+#define CFG_V_FRONT_PORCH(f) (f)
+
+/* Screen Blank Color Register */
+#define LCD_SPU_BLANKCOLOR 0x0124
+#define CFG_BLANKCOLOR_MASK 0x00FFFFFF
+#define CFG_BLANKCOLOR_R_MASK 0x000000FF
+#define CFG_BLANKCOLOR_G_MASK 0x0000FF00
+#define CFG_BLANKCOLOR_B_MASK 0x00FF0000
+
+/* HW Cursor Color 1&2 Register */
+#define LCD_SPU_ALPHA_COLOR1 0x0128
+#define CFG_HWC_COLOR1 0x00FFFFFF
+#define CFG_HWC_COLOR1_R(red) ((red)<<16)
+#define CFG_HWC_COLOR1_G(green) ((green)<<8)
+#define CFG_HWC_COLOR1_B(blue) (blue)
+#define CFG_HWC_COLOR1_R_MASK 0x000000FF
+#define CFG_HWC_COLOR1_G_MASK 0x0000FF00
+#define CFG_HWC_COLOR1_B_MASK 0x00FF0000
+#define LCD_SPU_ALPHA_COLOR2 0x012C
+#define CFG_HWC_COLOR2 0x00FFFFFF
+#define CFG_HWC_COLOR2_R_MASK 0x000000FF
+#define CFG_HWC_COLOR2_G_MASK 0x0000FF00
+#define CFG_HWC_COLOR2_B_MASK 0x00FF0000
+
+/* Video YUV Color Key Control */
+#define LCD_SPU_COLORKEY_Y 0x0130
+#define CFG_CKEY_Y2(y2) ((y2)<<24)
+#define CFG_CKEY_Y2_MASK 0xFF000000
+#define CFG_CKEY_Y1(y1) ((y1)<<16)
+#define CFG_CKEY_Y1_MASK 0x00FF0000
+#define CFG_CKEY_Y(y) ((y)<<8)
+#define CFG_CKEY_Y_MASK 0x0000FF00
+#define CFG_ALPHA_Y(y) (y)
+#define CFG_ALPHA_Y_MASK 0x000000FF
+#define LCD_SPU_COLORKEY_U 0x0134
+#define CFG_CKEY_U2(u2) ((u2)<<24)
+#define CFG_CKEY_U2_MASK 0xFF000000
+#define CFG_CKEY_U1(u1) ((u1)<<16)
+#define CFG_CKEY_U1_MASK 0x00FF0000
+#define CFG_CKEY_U(u) ((u)<<8)
+#define CFG_CKEY_U_MASK 0x0000FF00
+#define CFG_ALPHA_U(u) (u)
+#define CFG_ALPHA_U_MASK 0x000000FF
+#define LCD_SPU_COLORKEY_V 0x0138
+#define CFG_CKEY_V2(v2) ((v2)<<24)
+#define CFG_CKEY_V2_MASK 0xFF000000
+#define CFG_CKEY_V1(v1) ((v1)<<16)
+#define CFG_CKEY_V1_MASK 0x00FF0000
+#define CFG_CKEY_V(v) ((v)<<8)
+#define CFG_CKEY_V_MASK 0x0000FF00
+#define CFG_ALPHA_V(v) (v)
+#define CFG_ALPHA_V_MASK 0x000000FF
+
+/* Graphics/Video DMA color key enable bits in LCD_TV_CTRL1 */
+#define CFG_CKEY_GRA 0x2
+#define CFG_CKEY_DMA 0x1
+
+/* Interlace mode enable bits in LCD_TV_CTRL1 */
+#define CFG_TV_INTERLACE_EN (1 << 22)
+#define CFG_TV_NIB (1 << 0)
+
+#define LCD_PN_SEPXLCNT 0x013c /* MMP2 */
+
+/* SPI Read Data Register */
+#define LCD_SPU_SPI_RXDATA 0x0140
+
+/* Smart Panel Read Data Register */
+#define LCD_SPU_ISA_RSDATA 0x0144
+#define ISA_RXDATA_16BIT_1_DATA_MASK 0x000000FF
+#define ISA_RXDATA_16BIT_2_DATA_MASK 0x0000FF00
+#define ISA_RXDATA_16BIT_3_DATA_MASK 0x00FF0000
+#define ISA_RXDATA_16BIT_4_DATA_MASK 0xFF000000
+#define ISA_RXDATA_32BIT_1_DATA_MASK 0x00FFFFFF
+
+#define LCD_SPU_DBG_ISA (0x0148) /* TTC */
+#define LCD_SPU_DMAVLD_YC (0x014C)
+#define LCD_SPU_DMAVLD_UV (0x0150)
+#define LCD_SPU_DMAVLD_UVSPU_GRAVLD (0x0154)
+
+#define LCD_READ_IOPAD (0x0148) /* MMP2*/
+#define LCD_DMAVLD_YC (0x014C)
+#define LCD_DMAVLD_UV (0x0150)
+#define LCD_TVGGRAVLD_HLEN (0x0154)
+
+/* HWC SRAM Read Data Register */
+#define LCD_SPU_HWC_RDDAT 0x0158
+
+/* Gamma Table SRAM Read Data Register */
+#define LCD_SPU_GAMMA_RDDAT 0x015c
+#define CFG_GAMMA_RDDAT_MASK 0x000000FF
+
+/* Palette Table SRAM Read Data Register */
+#define LCD_SPU_PALETTE_RDDAT 0x0160
+#define CFG_PALETTE_RDDAT_MASK 0x00FFFFFF
+
+#define LCD_SPU_DBG_DMATOP (0x0164) /* TTC */
+#define LCD_SPU_DBG_GRATOP (0x0168)
+#define LCD_SPU_DBG_TXCTRL (0x016C)
+#define LCD_SPU_DBG_SLVTOP (0x0170)
+#define LCD_SPU_DBG_MUXTOP (0x0174)
+
+#define LCD_SLV_DBG (0x0164) /* MMP2 */
+#define LCD_TVDVLD_YC (0x0168)
+#define LCD_TVDVLD_UV (0x016C)
+#define LCD_TVC_RDDAT (0x0170)
+#define LCD_TV_GAMMA_RDDAT (0x0174)
+
+/* I/O Pads Input Read Only Register */
+#define LCD_SPU_IOPAD_IN 0x0178
+#define CFG_IOPAD_IN_MASK 0x0FFFFFFF
+
+#define LCD_TV_PALETTE_RDDAT (0x0178) /* MMP2 */
+
+/* Reserved Read Only Registers */
+#define LCD_CFG_RDREG5F 0x017C
+#define IRE_FRAME_CNT_MASK 0x000000C0
+#define IPE_FRAME_CNT_MASK 0x00000030
+#define GRA_FRAME_CNT_MASK 0x0000000C /* Graphic */
+#define DMA_FRAME_CNT_MASK 0x00000003 /* Video */
+
+#define LCD_FRAME_CNT (0x017C) /* MMP2 */
+
+/* SPI Control Register. */
+#define LCD_SPU_SPI_CTRL 0x0180
+#define CFG_SCLKCNT(div) ((div)<<24) /* 0xFF~0x2 */
+#define CFG_SCLKCNT_MASK 0xFF000000
+#define CFG_RXBITS(rx) (((rx) - 1)<<16) /* 0x1F~0x1 */
+#define CFG_RXBITS_MASK 0x00FF0000
+#define CFG_TXBITS(tx) (((tx) - 1)<<8) /* 0x1F~0x1 */
+#define CFG_TXBITS_MASK 0x0000FF00
+#define CFG_CLKINV(clk) ((clk)<<7)
+#define CFG_CLKINV_MASK 0x00000080
+#define CFG_KEEPXFER(transfer) ((transfer)<<6)
+#define CFG_KEEPXFER_MASK 0x00000040
+#define CFG_RXBITSTO0(rx) ((rx)<<5)
+#define CFG_RXBITSTO0_MASK 0x00000020
+#define CFG_TXBITSTO0(tx) ((tx)<<4)
+#define CFG_TXBITSTO0_MASK 0x00000010
+#define CFG_SPI_ENA(spi) ((spi)<<3)
+#define CFG_SPI_ENA_MASK 0x00000008
+#define CFG_SPI_SEL(spi) ((spi)<<2)
+#define CFG_SPI_SEL_MASK 0x00000004
+#define CFG_SPI_3W4WB(wire) ((wire)<<1)
+#define CFG_SPI_3W4WB_MASK 0x00000002
+#define CFG_SPI_START(start) (start)
+#define CFG_SPI_START_MASK 0x00000001
+
+/* SPI Tx Data Register */
+#define LCD_SPU_SPI_TXDATA 0x0184
+
+/*
+ 1. Smart Pannel 8-bit Bus Control Register.
+ 2. AHB Slave Path Data Port Register
+*/
+#define LCD_SPU_SMPN_CTRL 0x0188
+
+/* DMA Control 0 Register */
+#define LCD_SPU_DMA_CTRL0 0x0190
+#define CFG_NOBLENDING(nb) ((nb)<<31)
+#define CFG_NOBLENDING_MASK 0x80000000
+#define CFG_GAMMA_ENA(gn) ((gn)<<30)
+#define CFG_GAMMA_ENA_MASK 0x40000000
+#define CFG_CBSH_ENA(cn) ((cn)<<29)
+#define CFG_CBSH_ENA_MASK 0x20000000
+#define CFG_PALETTE_ENA(pn) ((pn)<<28)
+#define CFG_PALETTE_ENA_MASK 0x10000000
+#define CFG_ARBFAST_ENA(an) ((an)<<27)
+#define CFG_ARBFAST_ENA_MASK 0x08000000
+#define CFG_HWC_1BITMOD(mode) ((mode)<<26)
+#define CFG_HWC_1BITMOD_MASK 0x04000000
+#define CFG_HWC_1BITENA(mn) ((mn)<<25)
+#define CFG_HWC_1BITENA_MASK 0x02000000
+#define CFG_HWC_ENA(cn) ((cn)<<24)
+#define CFG_HWC_ENA_MASK 0x01000000
+#define CFG_DMAFORMAT(dmaformat) ((dmaformat)<<20)
+#define CFG_DMAFORMAT_MASK 0x00F00000
+#define CFG_GRAFORMAT(graformat) ((graformat)<<16)
+#define CFG_GRAFORMAT_MASK 0x000F0000
+/* for graphic part */
+#define CFG_GRA_FTOGGLE(toggle) ((toggle)<<15)
+#define CFG_GRA_FTOGGLE_MASK 0x00008000
+#define CFG_GRA_HSMOOTH(smooth) ((smooth)<<14)
+#define CFG_GRA_HSMOOTH_MASK 0x00004000
+#define CFG_GRA_TSTMODE(test) ((test)<<13)
+#define CFG_GRA_TSTMODE_MASK 0x00002000
+#define CFG_GRA_SWAPRB(swap) ((swap)<<12)
+#define CFG_GRA_SWAPRB_MASK 0x00001000
+#define CFG_GRA_SWAPUV(swap) ((swap)<<11)
+#define CFG_GRA_SWAPUV_MASK 0x00000800
+#define CFG_GRA_SWAPYU(swap) ((swap)<<10)
+#define CFG_GRA_SWAPYU_MASK 0x00000400
+#define CFG_GRA_SWAP_MASK 0x00001C00
+#define CFG_YUV2RGB_GRA(cvrt) ((cvrt)<<9)
+#define CFG_YUV2RGB_GRA_MASK 0x00000200
+#define CFG_GRA_ENA(gra) ((gra)<<8)
+#define CFG_GRA_ENA_MASK 0x00000100
+#define dma0_gfx_masks (CFG_GRAFORMAT_MASK | CFG_GRA_FTOGGLE_MASK | \
+ CFG_GRA_HSMOOTH_MASK | CFG_GRA_TSTMODE_MASK | CFG_GRA_SWAP_MASK | \
+ CFG_YUV2RGB_GRA_MASK | CFG_GRA_ENA_MASK)
+/* for video part */
+#define CFG_DMA_FTOGGLE(toggle) ((toggle)<<7)
+#define CFG_DMA_FTOGGLE_MASK 0x00000080
+#define CFG_DMA_HSMOOTH(smooth) ((smooth)<<6)
+#define CFG_DMA_HSMOOTH_MASK 0x00000040
+#define CFG_DMA_TSTMODE(test) ((test)<<5)
+#define CFG_DMA_TSTMODE_MASK 0x00000020
+#define CFG_DMA_SWAPRB(swap) ((swap)<<4)
+#define CFG_DMA_SWAPRB_MASK 0x00000010
+#define CFG_DMA_SWAPUV(swap) ((swap)<<3)
+#define CFG_DMA_SWAPUV_MASK 0x00000008
+#define CFG_DMA_SWAPYU(swap) ((swap)<<2)
+#define CFG_DMA_SWAPYU_MASK 0x00000004
+#define CFG_DMA_SWAP_MASK 0x0000001C
+#define CFG_YUV2RGB_DMA(cvrt) ((cvrt)<<1)
+#define CFG_YUV2RGB_DMA_MASK 0x00000002
+#define CFG_DMA_ENA(video) (video)
+#define CFG_DMA_ENA_MASK 0x00000001
+#define dma0_vid_masks (CFG_DMAFORMAT_MASK | CFG_DMA_FTOGGLE_MASK | \
+ CFG_DMA_HSMOOTH_MASK | CFG_DMA_TSTMODE_MASK | CFG_DMA_SWAP_MASK | \
+ CFG_YUV2RGB_DMA_MASK | CFG_DMA_ENA_MASK)
+#define dma_palette(val) ((val ? 1 : 0) << 28)
+#define dma_fmt(vid, val) ((val & 0xf) << ((vid) ? 20 : 16))
+#define dma_swaprb(vid, val) ((val ? 1 : 0) << ((vid) ? 4 : 12))
+#define dma_swapuv(vid, val) ((val ? 1 : 0) << ((vid) ? 3 : 11))
+#define dma_swapyuv(vid, val) ((val ? 1 : 0) << ((vid) ? 2 : 10))
+#define dma_csc(vid, val) ((val ? 1 : 0) << ((vid) ? 1 : 9))
+#define dma_hsmooth(vid, val) ((val ? 1 : 0) << ((vid) ? 6 : 14))
+#define dma_mask(vid) (dma_palette(1) | dma_fmt(vid, 0xf) | dma_csc(vid, 1) \
+ | dma_swaprb(vid, 1) | dma_swapuv(vid, 1) | dma_swapyuv(vid, 1))
+
+/* DMA Control 1 Register */
+#define LCD_SPU_DMA_CTRL1 0x0194
+#define CFG_FRAME_TRIG(trig) ((trig)<<31)
+#define CFG_FRAME_TRIG_MASK 0x80000000
+#define CFG_VSYNC_TRIG(trig) ((trig)<<28)
+#define CFG_VSYNC_TRIG_MASK 0x70000000
+#define CFG_VSYNC_INV(inv) ((inv)<<27)
+#define CFG_VSYNC_INV_MASK 0x08000000
+#define CFG_COLOR_KEY_MODE(cmode) ((cmode)<<24)
+#define CFG_COLOR_KEY_MASK 0x07000000
+#define CFG_CARRY(carry) ((carry)<<23)
+#define CFG_CARRY_MASK 0x00800000
+#define CFG_LNBUF_ENA(lnbuf) ((lnbuf)<<22)
+#define CFG_LNBUF_ENA_MASK 0x00400000
+#define CFG_GATED_ENA(gated) ((gated)<<21)
+#define CFG_GATED_ENA_MASK 0x00200000
+#define CFG_PWRDN_ENA(power) ((power)<<20)
+#define CFG_PWRDN_ENA_MASK 0x00100000
+#define CFG_DSCALE(dscale) ((dscale)<<18)
+#define CFG_DSCALE_MASK 0x000C0000
+#define CFG_ALPHA_MODE(amode) ((amode)<<16)
+#define CFG_ALPHA_MODE_MASK 0x00030000
+#define CFG_ALPHA(alpha) ((alpha)<<8)
+#define CFG_ALPHA_MASK 0x0000FF00
+#define CFG_PXLCMD(pxlcmd) (pxlcmd)
+#define CFG_PXLCMD_MASK 0x000000FF
+
+/* SRAM Control Register */
+#define LCD_SPU_SRAM_CTRL 0x0198
+#define CFG_SRAM_INIT_WR_RD(mode) ((mode)<<14)
+#define CFG_SRAM_INIT_WR_RD_MASK 0x0000C000
+#define CFG_SRAM_ADDR_LCDID(id) ((id)<<8)
+#define CFG_SRAM_ADDR_LCDID_MASK 0x00000F00
+#define CFG_SRAM_ADDR(addr) (addr)
+#define CFG_SRAM_ADDR_MASK 0x000000FF
+
+/* SRAM Write Data Register */
+#define LCD_SPU_SRAM_WRDAT 0x019C
+
+/* SRAM RTC/WTC Control Register */
+#define LCD_SPU_SRAM_PARA0 0x01A0
+
+/* SRAM Power Down Control Register */
+#define LCD_SPU_SRAM_PARA1 0x01A4
+#define CFG_CSB_256x32(hwc) ((hwc)<<15) /* HWC */
+#define CFG_CSB_256x32_MASK 0x00008000
+#define CFG_CSB_256x24(palette) ((palette)<<14) /* Palette */
+#define CFG_CSB_256x24_MASK 0x00004000
+#define CFG_CSB_256x8(gamma) ((gamma)<<13) /* Gamma */
+#define CFG_CSB_256x8_MASK 0x00002000
+#define CFG_PDWN256x32(pdwn) ((pdwn)<<7) /* HWC */
+#define CFG_PDWN256x32_MASK 0x00000080
+#define CFG_PDWN256x24(pdwn) ((pdwn)<<6) /* Palette */
+#define CFG_PDWN256x24_MASK 0x00000040
+#define CFG_PDWN256x8(pdwn) ((pdwn)<<5) /* Gamma */
+#define CFG_PDWN256x8_MASK 0x00000020
+#define CFG_PDWN32x32(pdwn) ((pdwn)<<3)
+#define CFG_PDWN32x32_MASK 0x00000008
+#define CFG_PDWN16x66(pdwn) ((pdwn)<<2)
+#define CFG_PDWN16x66_MASK 0x00000004
+#define CFG_PDWN32x66(pdwn) ((pdwn)<<1)
+#define CFG_PDWN32x66_MASK 0x00000002
+#define CFG_PDWN64x66(pdwn) (pdwn)
+#define CFG_PDWN64x66_MASK 0x00000001
+
+/* Smart or Dumb Panel Clock Divider */
+#define LCD_CFG_SCLK_DIV 0x01A8
+#define SCLK_SRC_SEL(src) ((src)<<31)
+#define SCLK_SRC_SEL_MASK 0x80000000
+#define SCLK_DISABLE (1<<28)
+#define CLK_FRACDIV(frac) ((frac)<<16)
+#define CLK_FRACDIV_MASK 0x0FFF0000
+#define DSI1_BITCLK_DIV(div) (div<<8)
+#define DSI1_BITCLK_DIV_MASK 0x00000F00
+#define CLK_INT_DIV(div) (div)
+#define CLK_INT_DIV_MASK 0x000000FF
+
+/* Video Contrast Register */
+#define LCD_SPU_CONTRAST 0x01AC
+#define CFG_BRIGHTNESS(bright) ((bright)<<16)
+#define CFG_BRIGHTNESS_MASK 0xFFFF0000
+#define CFG_CONTRAST(contrast) (contrast)
+#define CFG_CONTRAST_MASK 0x0000FFFF
+
+/* Video Saturation Register */
+#define LCD_SPU_SATURATION 0x01B0
+#define CFG_C_MULTS(mult) ((mult)<<16)
+#define CFG_C_MULTS_MASK 0xFFFF0000
+#define CFG_SATURATION(sat) (sat)
+#define CFG_SATURATION_MASK 0x0000FFFF
+
+/* Video Hue Adjust Register */
+#define LCD_SPU_CBSH_HUE 0x01B4
+#define CFG_SIN0(sin0) ((sin0)<<16)
+#define CFG_SIN0_MASK 0xFFFF0000
+#define CFG_COS0(con0) (con0)
+#define CFG_COS0_MASK 0x0000FFFF
+
+/* Dump LCD Panel Control Register */
+#define LCD_SPU_DUMB_CTRL 0x01B8
+#define CFG_DUMBMODE(mode) ((mode)<<28)
+#define CFG_DUMBMODE_MASK 0xF0000000
+#define CFG_LCDGPIO_O(data) ((data)<<20)
+#define CFG_LCDGPIO_O_MASK 0x0FF00000
+#define CFG_LCDGPIO_ENA(gpio) ((gpio)<<12)
+#define CFG_LCDGPIO_ENA_MASK 0x000FF000
+#define CFG_BIAS_OUT(bias) ((bias)<<8)
+#define CFG_BIAS_OUT_MASK 0x00000100
+#define CFG_REVERSE_RGB(RGB) ((RGB)<<7)
+#define CFG_REVERSE_RGB_MASK 0x00000080
+#define CFG_INV_COMPBLANK(blank) ((blank)<<6)
+#define CFG_INV_COMPBLANK_MASK 0x00000040
+#define CFG_INV_COMPSYNC(sync) ((sync)<<5)
+#define CFG_INV_COMPSYNC_MASK 0x00000020
+#define CFG_INV_HENA(hena) ((hena)<<4)
+#define CFG_INV_HENA_MASK 0x00000010
+#define CFG_INV_VSYNC(vsync) ((vsync)<<3)
+#define CFG_INV_VSYNC_MASK 0x00000008
+#define CFG_INV_HSYNC(hsync) ((hsync)<<2)
+#define CFG_INV_HSYNC_MASK 0x00000004
+#define CFG_INV_PCLK(pclk) ((pclk)<<1)
+#define CFG_INV_PCLK_MASK 0x00000002
+#define CFG_DUMB_ENA(dumb) (dumb)
+#define CFG_DUMB_ENA_MASK 0x00000001
+
+/* LCD I/O Pads Control Register */
+#define SPU_IOPAD_CONTROL 0x01BC
+#define CFG_GRA_VM_ENA(vm) ((vm)<<15)
+#define CFG_GRA_VM_ENA_MASK 0x00008000
+#define CFG_DMA_VM_ENA(vm) ((vm)<<13)
+#define CFG_DMA_VM_ENA_MASK 0x00002000
+#define CFG_CMD_VM_ENA(vm) ((vm)<<12)
+#define CFG_CMD_VM_ENA_MASK 0x00001000
+#define CFG_CSC(csc) ((csc)<<8)
+#define CFG_CSC_MASK 0x00000300
+#define CFG_BOUNDARY(size) ((size)<<5)
+#define CFG_BOUNDARY_MASK 0x00000020
+#define CFG_BURST(len) ((len)<<4)
+#define CFG_BURST_MASK 0x00000010
+#define CFG_IOPADMODE(iopad) (iopad)
+#define CFG_IOPADMODE_MASK 0x0000000F
+
+/* LCD Interrupt Control Register */
+#define SPU_IRQ_ENA 0x01C0
+#define DMA_FRAME_IRQ0_ENA(irq) ((irq)<<31)
+#define DMA_FRAME_IRQ0_ENA_MASK 0x80000000
+#define DMA_FRAME_IRQ1_ENA(irq) ((irq)<<30)
+#define DMA_FRAME_IRQ1_ENA_MASK 0x40000000
+#define DMA_FF_UNDERFLOW_ENA(ff) ((ff)<<29)
+#define DMA_FF_UNDERFLOW_ENA_MASK 0x20000000
+#define AXI_BUS_ERROR_IRQ_ENA(irq) ((irq)<<28)
+#define AXI_BUS_ERROR_IRQ_ENA_MASK 0x10000000
+#define GRA_FRAME_IRQ0_ENA(irq) ((irq)<<27)
+#define GRA_FRAME_IRQ0_ENA_MASK 0x08000000
+#define GRA_FRAME_IRQ1_ENA(irq) ((irq)<<26)
+#define GRA_FRAME_IRQ1_ENA_MASK 0x04000000
+#define GRA_FF_UNDERFLOW_ENA(ff) ((ff)<<25)
+#define GRA_FF_UNDERFLOW_ENA_MASK 0x02000000
+#define VSYNC_IRQ_ENA(vsync_irq) ((vsync_irq)<<23)
+#define VSYNC_IRQ_ENA_MASK 0x00800000
+#define DUMB_FRAMEDONE_ENA(fdone) ((fdone)<<22)
+#define DUMB_FRAMEDONE_ENA_MASK 0x00400000
+#define TWC_FRAMEDONE_ENA(fdone) ((fdone)<<21)
+#define TWC_FRAMEDONE_ENA_MASK 0x00200000
+#define HWC_FRAMEDONE_ENA(fdone) ((fdone)<<20)
+#define HWC_FRAMEDONE_ENA_MASK 0x00100000
+#define SLV_IRQ_ENA(irq) ((irq)<<19)
+#define SLV_IRQ_ENA_MASK 0x00080000
+#define SPI_IRQ_ENA(irq) ((irq)<<18)
+#define SPI_IRQ_ENA_MASK 0x00040000
+#define PWRDN_IRQ_ENA(irq) ((irq)<<17)
+#define PWRDN_IRQ_ENA_MASK 0x00020000
+#define AXI_LATENCY_TOO_LONG_IRQ_ENA(irq) ((irq)<<16)
+#define AXI_LATENCY_TOO_LONG_IRQ_ENA_MASK 0x00010000
+#define CLEAN_SPU_IRQ_ISR(irq) (irq)
+#define CLEAN_SPU_IRQ_ISR_MASK 0x0000FFFF
+#define TV_DMA_FRAME_IRQ0_ENA(irq) ((irq)<<15)
+#define TV_DMA_FRAME_IRQ0_ENA_MASK 0x00008000
+#define TV_DMA_FRAME_IRQ1_ENA(irq) ((irq)<<14)
+#define TV_DMA_FRAME_IRQ1_ENA_MASK 0x00004000
+#define TV_DMA_FF_UNDERFLOW_ENA(unerrun) ((unerrun)<<13)
+#define TV_DMA_FF_UNDERFLOW_ENA_MASK 0x00002000
+#define TVSYNC_IRQ_ENA(irq) ((irq)<<12)
+#define TVSYNC_IRQ_ENA_MASK 0x00001000
+#define TV_FRAME_IRQ0_ENA(irq) ((irq)<<11)
+#define TV_FRAME_IRQ0_ENA_MASK 0x00000800
+#define TV_FRAME_IRQ1_ENA(irq) ((irq)<<10)
+#define TV_FRAME_IRQ1_ENA_MASK 0x00000400
+#define TV_GRA_FF_UNDERFLOW_ENA(unerrun) ((unerrun)<<9)
+#define TV_GRA_FF_UNDERFLOW_ENA_MASK 0x00000200
+#define TV_FRAMEDONE_ENA(irq) ((irq)<<8)
+#define TV_FRAMEDONE_ENA_MASK 0x00000100
+
+/* FIXME - JUST GUESS */
+#define PN2_DMA_FRAME_IRQ0_ENA(irq) ((irq)<<7)
+#define PN2_DMA_FRAME_IRQ0_ENA_MASK 0x00000080
+#define PN2_DMA_FRAME_IRQ1_ENA(irq) ((irq)<<6)
+#define PN2_DMA_FRAME_IRQ1_ENA_MASK 0x00000040
+#define PN2_DMA_FF_UNDERFLOW_ENA(ff) ((ff)<<5)
+#define PN2_DMA_FF_UNDERFLOW_ENA_MASK 0x00000020
+#define PN2_GRA_FRAME_IRQ0_ENA(irq) ((irq)<<3)
+#define PN2_GRA_FRAME_IRQ0_ENA_MASK 0x00000008
+#define PN2_GRA_FRAME_IRQ1_ENA(irq) ((irq)<<2)
+#define PN2_GRA_FRAME_IRQ1_ENA_MASK 0x04000004
+#define PN2_GRA_FF_UNDERFLOW_ENA(ff) ((ff)<<1)
+#define PN2_GRA_FF_UNDERFLOW_ENA_MASK 0x00000002
+#define PN2_VSYNC_IRQ_ENA(irq) ((irq)<<0)
+#define PN2_SYNC_IRQ_ENA_MASK 0x00000001
+
+#define gf0_imask(id) ((id) ? (((id) & 1) ? TV_FRAME_IRQ0_ENA_MASK \
+ : PN2_GRA_FRAME_IRQ0_ENA_MASK) : GRA_FRAME_IRQ0_ENA_MASK)
+#define gf1_imask(id) ((id) ? (((id) & 1) ? TV_FRAME_IRQ1_ENA_MASK \
+ : PN2_GRA_FRAME_IRQ1_ENA_MASK) : GRA_FRAME_IRQ1_ENA_MASK)
+#define vsync_imask(id) ((id) ? (((id) & 1) ? TVSYNC_IRQ_ENA_MASK \
+ : PN2_SYNC_IRQ_ENA_MASK) : VSYNC_IRQ_ENA_MASK)
+#define vsync_imasks (vsync_imask(0) | vsync_imask(1))
+
+#define display_done_imask(id) ((id) ? (((id) & 1) ? TV_FRAMEDONE_ENA_MASK\
+ : (PN2_DMA_FRAME_IRQ0_ENA_MASK | PN2_DMA_FRAME_IRQ1_ENA_MASK))\
+ : DUMB_FRAMEDONE_ENA_MASK)
+
+#define display_done_imasks (display_done_imask(0) | display_done_imask(1))
+
+#define vf0_imask(id) ((id) ? (((id) & 1) ? TV_DMA_FRAME_IRQ0_ENA_MASK \
+ : PN2_DMA_FRAME_IRQ0_ENA_MASK) : DMA_FRAME_IRQ0_ENA_MASK)
+#define vf1_imask(id) ((id) ? (((id) & 1) ? TV_DMA_FRAME_IRQ1_ENA_MASK \
+ : PN2_DMA_FRAME_IRQ1_ENA_MASK) : DMA_FRAME_IRQ1_ENA_MASK)
+
+#define gfx_imasks (gf0_imask(0) | gf1_imask(0) | gf0_imask(1) | \
+ gf1_imask(1))
+#define vid_imasks (vf0_imask(0) | vf1_imask(0) | vf0_imask(1) | \
+ vf1_imask(1))
+#define vid_imask(id) (display_done_imask(id))
+
+#define pn1_imasks (gf0_imask(0) | gf1_imask(0) | vsync_imask(0) | \
+ display_done_imask(0) | vf0_imask(0) | vf1_imask(0))
+#define tv_imasks (gf0_imask(1) | gf1_imask(1) | vsync_imask(1) | \
+ display_done_imask(1) | vf0_imask(1) | vf1_imask(1))
+#define path_imasks(id) ((id) ? (tv_imasks) : (pn1_imasks))
+
+/* error indications */
+#define vid_udflow_imask(id) ((id) ? (((id) & 1) ? \
+ (TV_DMA_FF_UNDERFLOW_ENA_MASK) : (PN2_DMA_FF_UNDERFLOW_ENA_MASK)) : \
+ (DMA_FF_UNDERFLOW_ENA_MASK))
+#define gfx_udflow_imask(id) ((id) ? (((id) & 1) ? \
+ (TV_GRA_FF_UNDERFLOW_ENA_MASK) : (PN2_GRA_FF_UNDERFLOW_ENA_MASK)) : \
+ (GRA_FF_UNDERFLOW_ENA_MASK))
+
+#define err_imask(id) (vid_udflow_imask(id) | gfx_udflow_imask(id) | \
+ AXI_BUS_ERROR_IRQ_ENA_MASK | AXI_LATENCY_TOO_LONG_IRQ_ENA_MASK)
+#define err_imasks (err_imask(0) | err_imask(1) | err_imask(2))
+/* LCD Interrupt Status Register */
+#define SPU_IRQ_ISR 0x01C4
+#define DMA_FRAME_IRQ0(irq) ((irq)<<31)
+#define DMA_FRAME_IRQ0_MASK 0x80000000
+#define DMA_FRAME_IRQ1(irq) ((irq)<<30)
+#define DMA_FRAME_IRQ1_MASK 0x40000000
+#define DMA_FF_UNDERFLOW(ff) ((ff)<<29)
+#define DMA_FF_UNDERFLOW_MASK 0x20000000
+#define AXI_BUS_ERROR_IRQ(irq) ((irq)<<28)
+#define AXI_BUS_ERROR_IRQ_MASK 0x10000000
+#define GRA_FRAME_IRQ0(irq) ((irq)<<27)
+#define GRA_FRAME_IRQ0_MASK 0x08000000
+#define GRA_FRAME_IRQ1(irq) ((irq)<<26)
+#define GRA_FRAME_IRQ1_MASK 0x04000000
+#define GRA_FF_UNDERFLOW(ff) ((ff)<<25)
+#define GRA_FF_UNDERFLOW_MASK 0x02000000
+#define VSYNC_IRQ(vsync_irq) ((vsync_irq)<<23)
+#define VSYNC_IRQ_MASK 0x00800000
+#define DUMB_FRAMEDONE(fdone) ((fdone)<<22)
+#define DUMB_FRAMEDONE_MASK 0x00400000
+#define TWC_FRAMEDONE(fdone) ((fdone)<<21)
+#define TWC_FRAMEDONE_MASK 0x00200000
+#define HWC_FRAMEDONE(fdone) ((fdone)<<20)
+#define HWC_FRAMEDONE_MASK 0x00100000
+#define SLV_IRQ(irq) ((irq)<<19)
+#define SLV_IRQ_MASK 0x00080000
+#define SPI_IRQ(irq) ((irq)<<18)
+#define SPI_IRQ_MASK 0x00040000
+#define PWRDN_IRQ(irq) ((irq)<<17)
+#define PWRDN_IRQ_MASK 0x00020000
+#define AXI_LATENCY_TOO_LONGR_IRQ(irq) ((irq)<<16)
+#define AXI_LATENCY_TOO_LONGR_IRQ_MASK 0x00010000
+#define TV_DMA_FRAME_IRQ0(irq) ((irq)<<15)
+#define TV_DMA_FRAME_IRQ0_MASK 0x00008000
+#define TV_DMA_FRAME_IRQ1(irq) ((irq)<<14)
+#define TV_DMA_FRAME_IRQ1_MASK 0x00004000
+#define TV_DMA_FF_UNDERFLOW(unerrun) ((unerrun)<<13)
+#define TV_DMA_FF_UNDERFLOW_MASK 0x00002000
+#define TVSYNC_IRQ(irq) ((irq)<<12)
+#define TVSYNC_IRQ_MASK 0x00001000
+#define TV_FRAME_IRQ0(irq) ((irq)<<11)
+#define TV_FRAME_IRQ0_MASK 0x00000800
+#define TV_FRAME_IRQ1(irq) ((irq)<<10)
+#define TV_FRAME_IRQ1_MASK 0x00000400
+#define TV_GRA_FF_UNDERFLOW(unerrun) ((unerrun)<<9)
+#define TV_GRA_FF_UNDERFLOW_MASK 0x00000200
+#define PN2_DMA_FRAME_IRQ0(irq) ((irq)<<7)
+#define PN2_DMA_FRAME_IRQ0_MASK 0x00000080
+#define PN2_DMA_FRAME_IRQ1(irq) ((irq)<<6)
+#define PN2_DMA_FRAME_IRQ1_MASK 0x00000040
+#define PN2_DMA_FF_UNDERFLOW(ff) ((ff)<<5)
+#define PN2_DMA_FF_UNDERFLOW_MASK 0x00000020
+#define PN2_GRA_FRAME_IRQ0(irq) ((irq)<<3)
+#define PN2_GRA_FRAME_IRQ0_MASK 0x00000008
+#define PN2_GRA_FRAME_IRQ1(irq) ((irq)<<2)
+#define PN2_GRA_FRAME_IRQ1_MASK 0x04000004
+#define PN2_GRA_FF_UNDERFLOW(ff) ((ff)<<1)
+#define PN2_GRA_FF_UNDERFLOW_MASK 0x00000002
+#define PN2_VSYNC_IRQ(irq) ((irq)<<0)
+#define PN2_SYNC_IRQ_MASK 0x00000001
+
+/* LCD FIFO Depth register */
+#define LCD_FIFO_DEPTH 0x01c8
+#define VIDEO_FIFO(fi) ((fi) << 0)
+#define VIDEO_FIFO_MASK 0x00000003
+#define GRAPHIC_FIFO(fi) ((fi) << 2)
+#define GRAPHIC_FIFO_MASK 0x0000000c
+
+/* read-only */
+#define DMA_FRAME_IRQ0_LEVEL_MASK 0x00008000
+#define DMA_FRAME_IRQ1_LEVEL_MASK 0x00004000
+#define DMA_FRAME_CNT_ISR_MASK 0x00003000
+#define GRA_FRAME_IRQ0_LEVEL_MASK 0x00000800
+#define GRA_FRAME_IRQ1_LEVEL_MASK 0x00000400
+#define GRA_FRAME_CNT_ISR_MASK 0x00000300
+#define VSYNC_IRQ_LEVEL_MASK 0x00000080
+#define DUMB_FRAMEDONE_LEVEL_MASK 0x00000040
+#define TWC_FRAMEDONE_LEVEL_MASK 0x00000020
+#define HWC_FRAMEDONE_LEVEL_MASK 0x00000010
+#define SLV_FF_EMPTY_MASK 0x00000008
+#define DMA_FF_ALLEMPTY_MASK 0x00000004
+#define GRA_FF_ALLEMPTY_MASK 0x00000002
+#define PWRDN_IRQ_LEVEL_MASK 0x00000001
+
+/* 32 bit LCD Interrupt Reset Status*/
+#define SPU_IRQ_RSR (0x01C8)
+/* 32 bit Panel Path Graphic Partial Display Horizontal Control Register*/
+#define LCD_GRA_CUTHPXL (0x01CC)
+/* 32 bit Panel Path Graphic Partial Display Vertical Control Register*/
+#define LCD_GRA_CUTVLN (0x01D0)
+/* 32 bit TV Path Graphic Partial Display Horizontal Control Register*/
+#define LCD_TVG_CUTHPXL (0x01D4)
+/* 32 bit TV Path Graphic Partial Display Vertical Control Register*/
+#define LCD_TVG_CUTVLN (0x01D8)
+/* 32 bit LCD Global Control Register*/
+#define LCD_TOP_CTRL (0x01DC)
+/* 32 bit LCD SQU Line Buffer Control Register 1*/
+#define LCD_SQULN1_CTRL (0x01E0)
+/* 32 bit LCD SQU Line Buffer Control Register 2*/
+#define LCD_SQULN2_CTRL (0x01E4)
+#define squln_ctrl(id) ((id) ? (((id) & 1) ? LCD_SQULN2_CTRL : \
+ LCD_PN2_SQULN1_CTRL) : LCD_SQULN1_CTRL)
+
+/* 32 bit LCD Mixed Overlay Control Register */
+#define LCD_AFA_ALL2ONE (0x01E8)
+
+#define LCD_PN2_SCLK_DIV (0x01EC)
+#define LCD_PN2_TCLK_DIV (0x01F0)
+#define LCD_LVDS_SCLK_DIV_WR (0x01F4)
+#define LCD_LVDS_SCLK_DIV_RD (0x01FC)
+#define PN2_LCD_DMA_START_ADDR_Y0 (0x0200)
+#define PN2_LCD_DMA_START_ADDR_U0 (0x0204)
+#define PN2_LCD_DMA_START_ADDR_V0 (0x0208)
+#define PN2_LCD_DMA_START_ADDR_C0 (0x020C)
+#define PN2_LCD_DMA_START_ADDR_Y1 (0x0210)
+#define PN2_LCD_DMA_START_ADDR_U1 (0x0214)
+#define PN2_LCD_DMA_START_ADDR_V1 (0x0218)
+#define PN2_LCD_DMA_START_ADDR_C1 (0x021C)
+#define PN2_LCD_DMA_PITCH_YC (0x0220)
+#define PN2_LCD_DMA_PITCH_UV (0x0224)
+#define PN2_LCD_DMA_OVSA_HPXL_VLN (0x0228)
+#define PN2_LCD_DMA_HPXL_VLN (0x022C)
+#define PN2_LCD_DMAZM_HPXL_VLN (0x0230)
+#define PN2_LCD_GRA_START_ADDR0 (0x0234)
+#define PN2_LCD_GRA_START_ADDR1 (0x0238)
+#define PN2_LCD_GRA_PITCH (0x023C)
+#define PN2_LCD_GRA_OVSA_HPXL_VLN (0x0240)
+#define PN2_LCD_GRA_HPXL_VLN (0x0244)
+#define PN2_LCD_GRAZM_HPXL_VLN (0x0248)
+#define PN2_LCD_HWC_OVSA_HPXL_VLN (0x024C)
+#define PN2_LCD_HWC_HPXL_VLN (0x0250)
+#define LCD_PN2_V_H_TOTAL (0x0254)
+#define LCD_PN2_V_H_ACTIVE (0x0258)
+#define LCD_PN2_H_PORCH (0x025C)
+#define LCD_PN2_V_PORCH (0x0260)
+#define LCD_PN2_BLANKCOLOR (0x0264)
+#define LCD_PN2_ALPHA_COLOR1 (0x0268)
+#define LCD_PN2_ALPHA_COLOR2 (0x026C)
+#define LCD_PN2_COLORKEY_Y (0x0270)
+#define LCD_PN2_COLORKEY_U (0x0274)
+#define LCD_PN2_COLORKEY_V (0x0278)
+#define LCD_PN2_SEPXLCNT (0x027C)
+#define LCD_TV_V_H_TOTAL_FLD (0x0280)
+#define LCD_TV_V_PORCH_FLD (0x0284)
+#define LCD_TV_SEPXLCNT_FLD (0x0288)
+
+#define LCD_2ND_ALPHA (0x0294)
+#define LCD_PN2_CONTRAST (0x0298)
+#define LCD_PN2_SATURATION (0x029c)
+#define LCD_PN2_CBSH_HUE (0x02a0)
+#define LCD_TIMING_EXT (0x02C0)
+#define LCD_PN2_LAYER_ALPHA_SEL1 (0x02c4)
+#define LCD_PN2_CTRL0 (0x02C8)
+#define TV_LAYER_ALPHA_SEL1 (0x02cc)
+#define LCD_SMPN2_CTRL (0x02D0)
+#define LCD_IO_OVERL_MAP_CTRL (0x02D4)
+#define LCD_DUMB2_CTRL (0x02d8)
+#define LCD_PN2_CTRL1 (0x02DC)
+#define PN2_IOPAD_CONTROL (0x02E0)
+#define LCD_PN2_SQULN1_CTRL (0x02E4)
+#define PN2_LCD_GRA_CUTHPXL (0x02e8)
+#define PN2_LCD_GRA_CUTVLN (0x02ec)
+#define LCD_PN2_SQULN2_CTRL (0x02F0)
+#define ALL_LAYER_ALPHA_SEL (0x02F4)
+
+/* pxa988 has different MASTER_CTRL from MMP3/MMP2 */
+#ifdef CONFIG_CPU_PXA988
+#define TIMING_MASTER_CONTROL (0x01F4)
+#define MASTER_ENH(id) (1 << ((id) + 5))
+#define MASTER_ENV(id) (1 << ((id) + 6))
+#else
+#define TIMING_MASTER_CONTROL (0x02F8)
+#define MASTER_ENH(id) (1 << (id))
+#define MASTER_ENV(id) (1 << ((id) + 4))
+#endif
+
+#define DSI_START_SEL_SHIFT(id) (((id) << 1) + 8)
+#define timing_master_config(path, dsi_id, lcd_id) \
+ (MASTER_ENH(path) | MASTER_ENV(path) | \
+ (((lcd_id) + ((dsi_id) << 1)) << DSI_START_SEL_SHIFT(path)))
+
+#define LCD_2ND_BLD_CTL (0x02Fc)
+#define LVDS_SRC_MASK (3 << 30)
+#define LVDS_SRC_SHIFT (30)
+#define LVDS_FMT_MASK (1 << 28)
+#define LVDS_FMT_SHIFT (28)
+
+#define CLK_SCLK (1 << 0)
+#define CLK_LVDS_RD (1 << 1)
+#define CLK_LVDS_WR (1 << 2)
+
+#define gra_partdisp_ctrl_hor(id) ((id) ? (((id) & 1) ? \
+ LCD_TVG_CUTHPXL : PN2_LCD_GRA_CUTHPXL) : LCD_GRA_CUTHPXL)
+#define gra_partdisp_ctrl_ver(id) ((id) ? (((id) & 1) ? \
+ LCD_TVG_CUTVLN : PN2_LCD_GRA_CUTVLN) : LCD_GRA_CUTVLN)
+
+/*
+ * defined Video Memory Color format for DMA control 0 register
+ * DMA0 bit[23:20]
+ */
+#define VMODE_RGB565 0x0
+#define VMODE_RGB1555 0x1
+#define VMODE_RGB888PACKED 0x2
+#define VMODE_RGB888UNPACKED 0x3
+#define VMODE_RGBA888 0x4
+#define VMODE_YUV422PACKED 0x5
+#define VMODE_YUV422PLANAR 0x6
+#define VMODE_YUV420PLANAR 0x7
+#define VMODE_SMPNCMD 0x8
+#define VMODE_PALETTE4BIT 0x9
+#define VMODE_PALETTE8BIT 0xa
+#define VMODE_RESERVED 0xb
+
+/*
+ * defined Graphic Memory Color format for DMA control 0 register
+ * DMA0 bit[19:16]
+ */
+#define GMODE_RGB565 0x0
+#define GMODE_RGB1555 0x1
+#define GMODE_RGB888PACKED 0x2
+#define GMODE_RGB888UNPACKED 0x3
+#define GMODE_RGBA888 0x4
+#define GMODE_YUV422PACKED 0x5
+#define GMODE_YUV422PLANAR 0x6
+#define GMODE_YUV420PLANAR 0x7
+#define GMODE_SMPNCMD 0x8
+#define GMODE_PALETTE4BIT 0x9
+#define GMODE_PALETTE8BIT 0xa
+#define GMODE_RESERVED 0xb
+
+/*
+ * define for DMA control 1 register
+ */
+#define DMA1_FRAME_TRIG 31 /* bit location */
+#define DMA1_VSYNC_MODE 28
+#define DMA1_VSYNC_INV 27
+#define DMA1_CKEY 24
+#define DMA1_CARRY 23
+#define DMA1_LNBUF_ENA 22
+#define DMA1_GATED_ENA 21
+#define DMA1_PWRDN_ENA 20
+#define DMA1_DSCALE 18
+#define DMA1_ALPHA_MODE 16
+#define DMA1_ALPHA 08
+#define DMA1_PXLCMD 00
+
+/*
+ * defined for Configure Dumb Mode
+ * DUMB LCD Panel bit[31:28]
+ */
+#define DUMB16_RGB565_0 0x0
+#define DUMB16_RGB565_1 0x1
+#define DUMB18_RGB666_0 0x2
+#define DUMB18_RGB666_1 0x3
+#define DUMB12_RGB444_0 0x4
+#define DUMB12_RGB444_1 0x5
+#define DUMB24_RGB888_0 0x6
+#define DUMB_BLANK 0x7
+
+/*
+ * defined for Configure I/O Pin Allocation Mode
+ * LCD LCD I/O Pads control register bit[3:0]
+ */
+#define IOPAD_DUMB24 0x0
+#define IOPAD_DUMB18SPI 0x1
+#define IOPAD_DUMB18GPIO 0x2
+#define IOPAD_DUMB16SPI 0x3
+#define IOPAD_DUMB16GPIO 0x4
+#define IOPAD_DUMB12 0x5
+#define IOPAD_SMART18SPI 0x6
+#define IOPAD_SMART16SPI 0x7
+#define IOPAD_SMART8BOTH 0x8
+#define IOPAD_DUMB18_SMART8 0x9
+#define IOPAD_DUMB16_SMART8SPI 0xa
+#define IOPAD_DUMB16_SMART8GPIO 0xb
+#define IOPAD_DUMB16_DUMB16 0xc
+#define IOPAD_SMART8_SMART8 0xc
+
+/*
+ *defined for indicating boundary and cycle burst length
+ */
+#define CFG_BOUNDARY_1KB (1<<5)
+#define CFG_BOUNDARY_4KB (0<<5)
+#define CFG_CYC_BURST_LEN16 (1<<4)
+#define CFG_CYC_BURST_LEN8 (0<<4)
+
+/*
+ * defined Dumb Panel Clock Divider register
+ * SCLK_Source bit[31]
+ */
+ /* 0: PLL clock select*/
+#define AXI_BUS_SEL 0x80000000
+#define CCD_CLK_SEL 0x40000000
+#define DCON_CLK_SEL 0x20000000
+#define ENA_CLK_INT_DIV CONFIG_FB_DOVE_CLCD_SCLK_DIV
+#define IDLE_CLK_INT_DIV 0x1 /* idle Integer Divider */
+#define DIS_CLK_INT_DIV 0x0 /* Disable Integer Divider */
+
+/* SRAM ID */
+#define SRAMID_GAMMA_YR 0x0
+#define SRAMID_GAMMA_UG 0x1
+#define SRAMID_GAMMA_VB 0x2
+#define SRAMID_PALATTE 0x3
+#define SRAMID_HWC 0xf
+
+/* SRAM INIT Read/Write */
+#define SRAMID_INIT_READ 0x0
+#define SRAMID_INIT_WRITE 0x2
+#define SRAMID_INIT_DEFAULT 0x3
+
+/*
+ * defined VSYNC selection mode for DMA control 1 register
+ * DMA1 bit[30:28]
+ */
+#define VMODE_SMPN 0x0
+#define VMODE_SMPNIRQ 0x1
+#define VMODE_DUMB 0x2
+#define VMODE_IPE 0x3
+#define VMODE_IRE 0x4
+
+/*
+ * defined Configure Alpha and Alpha mode for DMA control 1 register
+ * DMA1 bit[15:08](alpha) / bit[17:16](alpha mode)
+ */
+/* ALPHA mode */
+#define MODE_ALPHA_DMA 0x0
+#define MODE_ALPHA_GRA 0x1
+#define MODE_ALPHA_CFG 0x2
+
+/* alpha value */
+#define ALPHA_NOGRAPHIC 0xFF /* all video, no graphic */
+#define ALPHA_NOVIDEO 0x00 /* all graphic, no video */
+#define ALPHA_GRAPHNVIDEO 0x0F /* Selects graphic & video */
+
+/*
+ * defined Pixel Command for DMA control 1 register
+ * DMA1 bit[07:00]
+ */
+#define PIXEL_CMD 0x81
+
+/* DSI */
+/* DSI1 - 4 Lane Controller base */
+#define DSI1_REGS_PHYSICAL_BASE 0xD420B800
+/* DSI2 - 3 Lane Controller base */
+#define DSI2_REGS_PHYSICAL_BASE 0xD420BA00
+
+/* DSI Controller Registers */
+struct dsi_lcd_regs {
+#define DSI_LCD1_CTRL_0 0x100 /* DSI Active Panel 1 Control register 0 */
+#define DSI_LCD1_CTRL_1 0x104 /* DSI Active Panel 1 Control register 1 */
+ u32 ctrl0;
+ u32 ctrl1;
+ u32 reserved1[2];
+
+#define DSI_LCD1_TIMING_0 0x110 /* Timing register 0 */
+#define DSI_LCD1_TIMING_1 0x114 /* Timing register 1 */
+#define DSI_LCD1_TIMING_2 0x118 /* Timing register 2 */
+#define DSI_LCD1_TIMING_3 0x11C /* Timing register 3 */
+#define DSI_LCD1_WC_0 0x120 /* Word Count register 0 */
+#define DSI_LCD1_WC_1 0x124 /* Word Count register 1 */
+#define DSI_LCD1_WC_2 0x128 /* Word Count register 2 */
+ u32 timing0;
+ u32 timing1;
+ u32 timing2;
+ u32 timing3;
+ u32 wc0;
+ u32 wc1;
+ u32 wc2;
+ u32 reserved2[1];
+ u32 slot_cnt0;
+ u32 slot_cnt1;
+ u32 reserved3[2];
+ u32 status_0;
+ u32 status_1;
+ u32 status_2;
+ u32 status_3;
+ u32 status_4;
+};
+
+struct dsi_regs {
+#define DSI_CTRL_0 0x000 /* DSI control register 0 */
+#define DSI_CTRL_1 0x004 /* DSI control register 1 */
+ u32 ctrl0;
+ u32 ctrl1;
+ u32 reserved1[2];
+ u32 irq_status;
+ u32 irq_mask;
+ u32 reserved2[2];
+
+#define DSI_CPU_CMD_0 0x020 /* DSI CPU packet command register 0 */
+#define DSI_CPU_CMD_1 0x024 /* DSU CPU Packet Command Register 1 */
+#define DSI_CPU_CMD_3 0x02C /* DSU CPU Packet Command Register 3 */
+#define DSI_CPU_WDAT_0 0x030 /* DSI CUP */
+ u32 cmd0;
+ u32 cmd1;
+ u32 cmd2;
+ u32 cmd3;
+ u32 dat0;
+ u32 status0;
+ u32 status1;
+ u32 status2;
+ u32 status3;
+ u32 status4;
+ u32 reserved3[2];
+
+ u32 smt_cmd;
+ u32 smt_ctrl0;
+ u32 smt_ctrl1;
+ u32 reserved4[1];
+
+ u32 rx0_status;
+
+/* Rx Packet Header - data from slave device */
+#define DSI_RX_PKT_HDR_0 0x064
+ u32 rx0_header;
+ u32 rx1_status;
+ u32 rx1_header;
+ u32 rx_ctrl;
+ u32 rx_ctrl1;
+ u32 rx2_status;
+ u32 rx2_header;
+ u32 reserved5[1];
+
+ u32 phy_ctrl1;
+#define DSI_PHY_CTRL_2 0x088 /* DSI DPHI Control Register 2 */
+#define DSI_PHY_CTRL_3 0x08C /* DPHY Control Register 3 */
+ u32 phy_ctrl2;
+ u32 phy_ctrl3;
+ u32 phy_status0;
+ u32 phy_status1;
+ u32 reserved6[5];
+ u32 phy_status2;
+
+#define DSI_PHY_RCOMP_0 0x0B0 /* DPHY Rcomp Control Register */
+ u32 phy_rcomp0;
+ u32 reserved7[3];
+#define DSI_PHY_TIME_0 0x0C0 /* DPHY Timing Control Register 0 */
+#define DSI_PHY_TIME_1 0x0C4 /* DPHY Timing Control Register 1 */
+#define DSI_PHY_TIME_2 0x0C8 /* DPHY Timing Control Register 2 */
+#define DSI_PHY_TIME_3 0x0CC /* DPHY Timing Control Register 3 */
+#define DSI_PHY_TIME_4 0x0D0 /* DPHY Timing Control Register 4 */
+#define DSI_PHY_TIME_5 0x0D4 /* DPHY Timing Control Register 5 */
+ u32 phy_timing0;
+ u32 phy_timing1;
+ u32 phy_timing2;
+ u32 phy_timing3;
+ u32 phy_code_0;
+ u32 phy_code_1;
+ u32 reserved8[2];
+ u32 mem_ctrl;
+ u32 tx_timer;
+ u32 rx_timer;
+ u32 turn_timer;
+ u32 reserved9[4];
+
+#define DSI_LCD1_CTRL_0 0x100 /* DSI Active Panel 1 Control register 0 */
+#define DSI_LCD1_CTRL_1 0x104 /* DSI Active Panel 1 Control register 1 */
+#define DSI_LCD1_TIMING_0 0x110 /* Timing register 0 */
+#define DSI_LCD1_TIMING_1 0x114 /* Timing register 1 */
+#define DSI_LCD1_TIMING_2 0x118 /* Timing register 2 */
+#define DSI_LCD1_TIMING_3 0x11C /* Timing register 3 */
+#define DSI_LCD1_WC_0 0x120 /* Word Count register 0 */
+#define DSI_LCD1_WC_1 0x124 /* Word Count register 1 */
+#define DSI_LCD1_WC_2 0x128 /* Word Count register 2 */
+ struct dsi_lcd_regs lcd1;
+ u32 reserved10[11];
+ struct dsi_lcd_regs lcd2;
+};
+
+#define DSI_LCD2_CTRL_0 0x180 /* DSI Active Panel 2 Control register 0 */
+#define DSI_LCD2_CTRL_1 0x184 /* DSI Active Panel 2 Control register 1 */
+#define DSI_LCD2_TIMING_0 0x190 /* Timing register 0 */
+#define DSI_LCD2_TIMING_1 0x194 /* Timing register 1 */
+#define DSI_LCD2_TIMING_2 0x198 /* Timing register 2 */
+#define DSI_LCD2_TIMING_3 0x19C /* Timing register 3 */
+#define DSI_LCD2_WC_0 0x1A0 /* Word Count register 0 */
+#define DSI_LCD2_WC_1 0x1A4 /* Word Count register 1 */
+#define DSI_LCD2_WC_2 0x1A8 /* Word Count register 2 */
+
+/* DSI_CTRL_0 0x0000 DSI Control Register 0 */
+#define DSI_CTRL_0_CFG_SOFT_RST (1<<31)
+#define DSI_CTRL_0_CFG_SOFT_RST_REG (1<<30)
+#define DSI_CTRL_0_CFG_LCD1_TX_EN (1<<8)
+#define DSI_CTRL_0_CFG_LCD1_SLV (1<<4)
+#define DSI_CTRL_0_CFG_LCD1_EN (1<<0)
+
+/* DSI_CTRL_1 0x0004 DSI Control Register 1 */
+#define DSI_CTRL_1_CFG_EOTP (1<<8)
+#define DSI_CTRL_1_CFG_RSVD (2<<4)
+#define DSI_CTRL_1_CFG_LCD2_VCH_NO_MASK (3<<2)
+#define DSI_CTRL_1_CFG_LCD2_VCH_NO_SHIFT 2
+#define DSI_CTRL_1_CFG_LCD1_VCH_NO_MASK (3<<0)
+#define DSI_CTRL_1_CFG_LCD1_VCH_NO_SHIFT 0
+
+/* DSI_LCD1_CTRL_1 0x0104 DSI Active Panel 1 Control Register 1 */
+/* LCD 1 Vsync Reset Enable */
+#define DSI_LCD1_CTRL_1_CFG_L1_VSYNC_RST_EN (1<<31)
+/* LCD 1 2K Pixel Buffer Mode Enable */
+#define DSI_LCD1_CTRL_1_CFG_L1_M2K_EN (1<<30)
+/* Bit(s) DSI_LCD1_CTRL_1_RSRV_29_23 reserved */
+/* Long Blanking Packet Enable */
+#define DSI_LCD1_CTRL_1_CFG_L1_HLP_PKT_EN (1<<22)
+/* Extra Long Blanking Packet Enable */
+#define DSI_LCD1_CTRL_1_CFG_L1_HEX_PKT_EN (1<<21)
+/* Front Porch Packet Enable */
+#define DSI_LCD1_CTRL_1_CFG_L1_HFP_PKT_EN (1<<20)
+/* hact Packet Enable */
+#define DSI_LCD1_CTRL_1_CFG_L1_HACT_PKT_EN (1<<19)
+/* Back Porch Packet Enable */
+#define DSI_LCD1_CTRL_1_CFG_L1_HBP_PKT_EN (1<<18)
+/* hse Packet Enable */
+#define DSI_LCD1_CTRL_1_CFG_L1_HSE_PKT_EN (1<<17)
+/* hsa Packet Enable */
+#define DSI_LCD1_CTRL_1_CFG_L1_HSA_PKT_EN (1<<16)
+/* All Item Enable after Pixel Data */
+#define DSI_LCD1_CTRL_1_CFG_L1_ALL_SLOT_EN (1<<15)
+/* Extra Long Packet Enable after Pixel Data */
+#define DSI_LCD1_CTRL_1_CFG_L1_HEX_SLOT_EN (1<<14)
+/* Bit(s) DSI_LCD1_CTRL_1_RSRV_13_11 reserved */
+/* Turn Around Bus at Last h Line */
+#define DSI_LCD1_CTRL_1_CFG_L1_LAST_LINE_TURN (1<<10)
+/* Go to Low Power Every Frame */
+#define DSI_LCD1_CTRL_1_CFG_L1_LPM_FRAME_EN (1<<9)
+/* Go to Low Power Every Line */
+#define DSI_LCD1_CTRL_1_CFG_L1_LPM_LINE_EN (1<<8)
+/* Bit(s) DSI_LCD1_CTRL_1_RSRV_7_4 reserved */
+/* DSI Transmission Mode for LCD 1 */
+#define DSI_LCD1_CTRL_1_CFG_L1_BURST_MODE_SHIFT 2
+#define DSI_LCD1_CTRL_1_CFG_L1_BURST_MODE_MASK (3<<2)
+/* LCD 1 Input Data RGB Mode for LCD 1 */
+#define DSI_LCD2_CTRL_1_CFG_L1_RGB_TYPE_SHIFT 0
+#define DSI_LCD2_CTRL_1_CFG_L1_RGB_TYPE_MASK (3<<2)
+
+/* DSI_PHY_CTRL_2 0x0088 DPHY Control Register 2 */
+/* Bit(s) DSI_PHY_CTRL_2_RSRV_31_12 reserved */
+/* DPHY LP Receiver Enable */
+#define DSI_PHY_CTRL_2_CFG_CSR_LANE_RESC_EN_MASK (0xf<<8)
+#define DSI_PHY_CTRL_2_CFG_CSR_LANE_RESC_EN_SHIFT 8
+/* DPHY Data Lane Enable */
+#define DSI_PHY_CTRL_2_CFG_CSR_LANE_EN_MASK (0xf<<4)
+#define DSI_PHY_CTRL_2_CFG_CSR_LANE_EN_SHIFT 4
+/* DPHY Bus Turn Around */
+#define DSI_PHY_CTRL_2_CFG_CSR_LANE_TURN_MASK (0xf)
+#define DSI_PHY_CTRL_2_CFG_CSR_LANE_TURN_SHIFT 0
+
+/* DSI_CPU_CMD_1 0x0024 DSI CPU Packet Command Register 1 */
+/* Bit(s) DSI_CPU_CMD_1_RSRV_31_24 reserved */
+/* LPDT TX Enable */
+#define DSI_CPU_CMD_1_CFG_TXLP_LPDT_MASK (0xf<<20)
+#define DSI_CPU_CMD_1_CFG_TXLP_LPDT_SHIFT 20
+/* ULPS TX Enable */
+#define DSI_CPU_CMD_1_CFG_TXLP_ULPS_MASK (0xf<<16)
+#define DSI_CPU_CMD_1_CFG_TXLP_ULPS_SHIFT 16
+/* Low Power TX Trigger Code */
+#define DSI_CPU_CMD_1_CFG_TXLP_TRIGGER_CODE_MASK (0xffff)
+#define DSI_CPU_CMD_1_CFG_TXLP_TRIGGER_CODE_SHIFT 0
+
+/* DSI_PHY_TIME_0 0x00c0 DPHY Timing Control Register 0 */
+/* Length of HS Exit Period in tx_clk_esc Cycles */
+#define DSI_PHY_TIME_0_CFG_CSR_TIME_HS_EXIT_MASK (0xff<<24)
+#define DSI_PHY_TIME_0_CFG_CSR_TIME_HS_EXIT_SHIFT 24
+/* DPHY HS Trail Period Length */
+#define DSI_PHY_TIME_0_CFG_CSR_TIME_HS_TRAIL_MASK (0xff<<16)
+#define DSI_PHY_TIME_0_CFG_CSR_TIME_HS_TRAIL_SHIFT 16
+/* DPHY HS Zero State Length */
+#define DSI_PHY_TIME_0_CDG_CSR_TIME_HS_ZERO_MASK (0xff<<8)
+#define DSI_PHY_TIME_0_CDG_CSR_TIME_HS_ZERO_SHIFT 8
+/* DPHY HS Prepare State Length */
+#define DSI_PHY_TIME_0_CFG_CSR_TIME_HS_PREP_MASK (0xff)
+#define DSI_PHY_TIME_0_CFG_CSR_TIME_HS_PREP_SHIFT 0
+
+/* DSI_PHY_TIME_1 0x00c4 DPHY Timing Control Register 1 */
+/* Time to Drive LP-00 by New Transmitter */
+#define DSI_PHY_TIME_1_CFG_CSR_TIME_TA_GET_MASK (0xff<<24)
+#define DSI_PHY_TIME_1_CFG_CSR_TIME_TA_GET_SHIFT 24
+/* Time to Drive LP-00 after Turn Request */
+#define DSI_PHY_TIME_1_CFG_CSR_TIME_TA_GO_MASK (0xff<<16)
+#define DSI_PHY_TIME_1_CFG_CSR_TIME_TA_GO_SHIFT 16
+/* DPHY HS Wakeup Period Length */
+#define DSI_PHY_TIME_1_CFG_CSR_TIME_WAKEUP_MASK (0xffff)
+#define DSI_PHY_TIME_1_CFG_CSR_TIME_WAKEUP_SHIFT 0
+
+/* DSI_PHY_TIME_2 0x00c8 DPHY Timing Control Register 2 */
+/* DPHY CLK Exit Period Length */
+#define DSI_PHY_TIME_2_CFG_CSR_TIME_CK_EXIT_MASK (0xff<<24)
+#define DSI_PHY_TIME_2_CFG_CSR_TIME_CK_EXIT_SHIFT 24
+/* DPHY CLK Trail Period Length */
+#define DSI_PHY_TIME_2_CFG_CSR_TIME_CK_TRAIL_MASK (0xff<<16)
+#define DSI_PHY_TIME_2_CFG_CSR_TIME_CK_TRAIL_SHIFT 16
+/* DPHY CLK Zero State Length */
+#define DSI_PHY_TIME_2_CFG_CSR_TIME_CK_ZERO_MASK (0xff<<8)
+#define DSI_PHY_TIME_2_CFG_CSR_TIME_CK_ZERO_SHIFT 8
+/* DPHY CLK LP Length */
+#define DSI_PHY_TIME_2_CFG_CSR_TIME_CK_LPX_MASK (0xff)
+#define DSI_PHY_TIME_2_CFG_CSR_TIME_CK_LPX_SHIFT 0
+
+/* DSI_PHY_TIME_3 0x00cc DPHY Timing Control Register 3 */
+/* Bit(s) DSI_PHY_TIME_3_RSRV_31_16 reserved */
+/* DPHY LP Length */
+#define DSI_PHY_TIME_3_CFG_CSR_TIME_LPX_MASK (0xff<<8)
+#define DSI_PHY_TIME_3_CFG_CSR_TIME_LPX_SHIFT 8
+/* DPHY HS req to rdy Length */
+#define DSI_PHY_TIME_3_CFG_CSR_TIME_REQRDY_MASK (0xff)
+#define DSI_PHY_TIME_3_CFG_CSR_TIME_REQRDY_SHIFT 0
+
+/*
+ * DSI timings
+ * PXA988 has diffrent ESC CLK with MMP2/MMP3
+ * it will be used in dsi_set_dphy() in pxa688_phy.c
+ * as low power mode clock.
+ */
+#ifdef CONFIG_CPU_PXA988
+#define DSI_ESC_CLK 52 /* Unit: Mhz */
+#define DSI_ESC_CLK_T 19 /* Unit: ns */
+#else
+#define DSI_ESC_CLK 66 /* Unit: Mhz */
+#define DSI_ESC_CLK_T 15 /* Unit: ns */
+#endif
+
+/* LVDS */
+/* LVDS_PHY_CTRL */
+#define LVDS_PHY_CTL 0x2A4
+#define LVDS_PLL_LOCK (1 << 31)
+#define LVDS_PHY_EXT_MASK (7 << 28)
+#define LVDS_PHY_EXT_SHIFT (28)
+#define LVDS_CLK_PHASE_MASK (0x7f << 16)
+#define LVDS_CLK_PHASE_SHIFT (16)
+#define LVDS_SSC_RESET_EXT (1 << 13)
+#define LVDS_SSC_MODE_DOWN_SPREAD (1 << 12)
+#define LVDS_SSC_EN (1 << 11)
+#define LVDS_PU_PLL (1 << 10)
+#define LVDS_PU_TX (1 << 9)
+#define LVDS_PU_IVREF (1 << 8)
+#define LVDS_CLK_SEL (1 << 7)
+#define LVDS_CLK_SEL_LVDS_PCLK (1 << 7)
+#define LVDS_PD_CH_MASK (0x3f << 1)
+#define LVDS_PD_CH(ch) ((ch) << 1)
+#define LVDS_RST (1 << 0)
+
+#define LVDS_PHY_CTL_EXT 0x2A8
+
+/* LVDS_PHY_CTRL_EXT1 */
+#define LVDS_SSC_RNGE_MASK (0x7ff << 16)
+#define LVDS_SSC_RNGE_SHIFT (16)
+#define LVDS_RESERVE_IN_MASK (0xf << 12)
+#define LVDS_RESERVE_IN_SHIFT (12)
+#define LVDS_TEST_MON_MASK (0x7 << 8)
+#define LVDS_TEST_MON_SHIFT (8)
+#define LVDS_POL_SWAP_MASK (0x3f << 0)
+#define LVDS_POL_SWAP_SHIFT (0)
+
+/* LVDS_PHY_CTRL_EXT2 */
+#define LVDS_TX_DIF_AMP_MASK (0xf << 24)
+#define LVDS_TX_DIF_AMP_SHIFT (24)
+#define LVDS_TX_DIF_CM_MASK (0x3 << 22)
+#define LVDS_TX_DIF_CM_SHIFT (22)
+#define LVDS_SELLV_TXCLK_MASK (0x1f << 16)
+#define LVDS_SELLV_TXCLK_SHIFT (16)
+#define LVDS_TX_CMFB_EN (0x1 << 15)
+#define LVDS_TX_TERM_EN (0x1 << 14)
+#define LVDS_SELLV_TXDATA_MASK (0x1f << 8)
+#define LVDS_SELLV_TXDATA_SHIFT (8)
+#define LVDS_SELLV_OP7_MASK (0x3 << 6)
+#define LVDS_SELLV_OP7_SHIFT (6)
+#define LVDS_SELLV_OP6_MASK (0x3 << 4)
+#define LVDS_SELLV_OP6_SHIFT (4)
+#define LVDS_SELLV_OP9_MASK (0x3 << 2)
+#define LVDS_SELLV_OP9_SHIFT (2)
+#define LVDS_STRESSTST_EN (0x1 << 0)
+
+/* LVDS_PHY_CTRL_EXT3 */
+#define LVDS_KVCO_MASK (0xf << 28)
+#define LVDS_KVCO_SHIFT (28)
+#define LVDS_CTUNE_MASK (0x3 << 26)
+#define LVDS_CTUNE_SHIFT (26)
+#define LVDS_VREG_IVREF_MASK (0x3 << 24)
+#define LVDS_VREG_IVREF_SHIFT (24)
+#define LVDS_VDDL_MASK (0xf << 20)
+#define LVDS_VDDL_SHIFT (20)
+#define LVDS_VDDM_MASK (0x3 << 18)
+#define LVDS_VDDM_SHIFT (18)
+#define LVDS_FBDIV_MASK (0xf << 8)
+#define LVDS_FBDIV_SHIFT (8)
+#define LVDS_REFDIV_MASK (0x7f << 0)
+#define LVDS_REFDIV_SHIFT (0)
+
+/* LVDS_PHY_CTRL_EXT4 */
+#define LVDS_SSC_FREQ_DIV_MASK (0xffff << 16)
+#define LVDS_SSC_FREQ_DIV_SHIFT (16)
+#define LVDS_INTPI_MASK (0xf << 12)
+#define LVDS_INTPI_SHIFT (12)
+#define LVDS_VCODIV_SEL_SE_MASK (0xf << 8)
+#define LVDS_VCODIV_SEL_SE_SHIFT (8)
+#define LVDS_RESET_INTP_EXT (0x1 << 7)
+#define LVDS_VCO_VRNG_MASK (0x7 << 4)
+#define LVDS_VCO_VRNG_SHIFT (4)
+#define LVDS_PI_EN (0x1 << 3)
+#define LVDS_ICP_MASK (0x7 << 0)
+#define LVDS_ICP_SHIFT (0)
+
+/* LVDS_PHY_CTRL_EXT5 */
+#define LVDS_FREQ_OFFSET_MASK (0x1ffff << 15)
+#define LVDS_FREQ_OFFSET_SHIFT (15)
+#define LVDS_FREQ_OFFSET_VALID (0x1 << 2)
+#define LVDS_FREQ_OFFSET_MODE_CK_DIV4_OUT (0x1 << 1)
+#define LVDS_FREQ_OFFSET_MODE_EN (0x1 << 0)
+
+/* VDMA */
+struct vdma_ch_regs {
+#define VDMA_DC_SADDR_1 0x320
+#define VDMA_DC_SADDR_2 0x3A0
+#define VDMA_DC_SZ_1 0x324
+#define VDMA_DC_SZ_2 0x3A4
+#define VDMA_CTRL_1 0x328
+#define VDMA_CTRL_2 0x3A8
+#define VDMA_SRC_SZ_1 0x32C
+#define VDMA_SRC_SZ_2 0x3AC
+#define VDMA_SA_1 0x330
+#define VDMA_SA_2 0x3B0
+#define VDMA_DA_1 0x334
+#define VDMA_DA_2 0x3B4
+#define VDMA_SZ_1 0x338
+#define VDMA_SZ_2 0x3B8
+ u32 dc_saddr;
+ u32 dc_size;
+ u32 ctrl;
+ u32 src_size;
+ u32 src_addr;
+ u32 dst_addr;
+ u32 dst_size;
+#define VDMA_PITCH_1 0x33C
+#define VDMA_PITCH_2 0x3BC
+#define VDMA_ROT_CTRL_1 0x340
+#define VDMA_ROT_CTRL_2 0x3C0
+#define VDMA_RAM_CTRL0_1 0x344
+#define VDMA_RAM_CTRL0_2 0x3C4
+#define VDMA_RAM_CTRL1_1 0x348
+#define VDMA_RAM_CTRL1_2 0x3C8
+ u32 pitch;
+ u32 rot_ctrl;
+ u32 ram_ctrl0;
+ u32 ram_ctrl1;
+
+};
+struct vdma_regs {
+#define VDMA_ARBR_CTRL 0x300
+#define VDMA_IRQR 0x304
+#define VDMA_IRQM 0x308
+#define VDMA_IRQS 0x30C
+#define VDMA_MDMA_ARBR_CTRL 0x310
+ u32 arbr_ctr;
+ u32 irq_raw;
+ u32 irq_mask;
+ u32 irq_status;
+ u32 mdma_arbr_ctrl;
+ u32 reserved[3];
+
+ struct vdma_ch_regs ch1;
+ u32 reserved2[21];
+ struct vdma_ch_regs ch2;
+};
+
+/* CMU */
+#define CMU_PIP_DE_H_CFG 0x0008
+#define CMU_PRI1_H_CFG 0x000C
+#define CMU_PRI2_H_CFG 0x0010
+#define CMU_ACE_MAIN_DE1_H_CFG 0x0014
+#define CMU_ACE_MAIN_DE2_H_CFG 0x0018
+#define CMU_ACE_PIP_DE1_H_CFG 0x001C
+#define CMU_ACE_PIP_DE2_H_CFG 0x0020
+#define CMU_PIP_DE_V_CFG 0x0024
+#define CMU_PRI_V_CFG 0x0028
+#define CMU_ACE_MAIN_DE_V_CFG 0x002C
+#define CMU_ACE_PIP_DE_V_CFG 0x0030
+#define CMU_BAR_0_CFG 0x0034
+#define CMU_BAR_1_CFG 0x0038
+#define CMU_BAR_2_CFG 0x003C
+#define CMU_BAR_3_CFG 0x0040
+#define CMU_BAR_4_CFG 0x0044
+#define CMU_BAR_5_CFG 0x0048
+#define CMU_BAR_6_CFG 0x004C
+#define CMU_BAR_7_CFG 0x0050
+#define CMU_BAR_8_CFG 0x0054
+#define CMU_BAR_9_CFG 0x0058
+#define CMU_BAR_10_CFG 0x005C
+#define CMU_BAR_11_CFG 0x0060
+#define CMU_BAR_12_CFG 0x0064
+#define CMU_BAR_13_CFG 0x0068
+#define CMU_BAR_14_CFG 0x006C
+#define CMU_BAR_15_CFG 0x0070
+#define CMU_BAR_CTRL 0x0074
+#define PATTERN_TOTAL 0x0078
+#define PATTERN_ACTIVE 0x007C
+#define PATTERN_FRONT_PORCH 0x0080
+#define PATTERN_BACK_PORCH 0x0084
+#define CMU_CLK_CTRL 0x0088
+
+#define CMU_ICSC_M_C0_L 0x0900
+#define CMU_ICSC_M_C0_H 0x0901
+#define CMU_ICSC_M_C1_L 0x0902
+#define CMU_ICSC_M_C1_H 0x0903
+#define CMU_ICSC_M_C2_L 0x0904
+#define CMU_ICSC_M_C2_H 0x0905
+#define CMU_ICSC_M_C3_L 0x0906
+#define CMU_ICSC_M_C3_H 0x0907
+#define CMU_ICSC_M_C4_L 0x0908
+#define CMU_ICSC_M_C4_H 0x0909
+#define CMU_ICSC_M_C5_L 0x090A
+#define CMU_ICSC_M_C5_H 0x090B
+#define CMU_ICSC_M_C6_L 0x090C
+#define CMU_ICSC_M_C6_H 0x090D
+#define CMU_ICSC_M_C7_L 0x090E
+#define CMU_ICSC_M_C7_H 0x090F
+#define CMU_ICSC_M_C8_L 0x0910
+#define CMU_ICSC_M_C8_H 0x0911
+#define CMU_ICSC_M_O1_0 0x0914
+#define CMU_ICSC_M_O1_1 0x0915
+#define CMU_ICSC_M_O1_2 0x0916
+#define CMU_ICSC_M_O2_0 0x0918
+#define CMU_ICSC_M_O2_1 0x0919
+#define CMU_ICSC_M_O2_2 0x091A
+#define CMU_ICSC_M_O3_0 0x091C
+#define CMU_ICSC_M_O3_1 0x091D
+#define CMU_ICSC_M_O3_2 0x091E
+#define CMU_ICSC_P_C0_L 0x0920
+#define CMU_ICSC_P_C0_H 0x0921
+#define CMU_ICSC_P_C1_L 0x0922
+#define CMU_ICSC_P_C1_H 0x0923
+#define CMU_ICSC_P_C2_L 0x0924
+#define CMU_ICSC_P_C2_H 0x0925
+#define CMU_ICSC_P_C3_L 0x0926
+#define CMU_ICSC_P_C3_H 0x0927
+#define CMU_ICSC_P_C4_L 0x0928
+#define CMU_ICSC_P_C4_H 0x0929
+#define CMU_ICSC_P_C5_L 0x092A
+#define CMU_ICSC_P_C5_H 0x092B
+#define CMU_ICSC_P_C6_L 0x092C
+#define CMU_ICSC_P_C6_H 0x092D
+#define CMU_ICSC_P_C7_L 0x092E
+#define CMU_ICSC_P_C7_H 0x092F
+#define CMU_ICSC_P_C8_L 0x0930
+#define CMU_ICSC_P_C8_H 0x0931
+#define CMU_ICSC_P_O1_0 0x0934
+#define CMU_ICSC_P_O1_1 0x0935
+#define CMU_ICSC_P_O1_2 0x0936
+#define CMU_ICSC_P_O2_0 0x0938
+#define CMU_ICSC_P_O2_1 0x0939
+#define CMU_ICSC_P_O2_2 0x093A
+#define CMU_ICSC_P_O3_0 0x093C
+#define CMU_ICSC_P_O3_1 0x093D
+#define CMU_ICSC_P_O3_2 0x093E
+#define CMU_BR_M_EN 0x0940
+#define CMU_BR_M_TH1_L 0x0942
+#define CMU_BR_M_TH1_H 0x0943
+#define CMU_BR_M_TH2_L 0x0944
+#define CMU_BR_M_TH2_H 0x0945
+#define CMU_ACE_M_EN 0x0950
+#define CMU_ACE_M_WFG1 0x0951
+#define CMU_ACE_M_WFG2 0x0952
+#define CMU_ACE_M_WFG3 0x0953
+#define CMU_ACE_M_TH0 0x0954
+#define CMU_ACE_M_TH1 0x0955
+#define CMU_ACE_M_TH2 0x0956
+#define CMU_ACE_M_TH3 0x0957
+#define CMU_ACE_M_TH4 0x0958
+#define CMU_ACE_M_TH5 0x0959
+#define CMU_ACE_M_OP0_L 0x095A
+#define CMU_ACE_M_OP0_H 0x095B
+#define CMU_ACE_M_OP5_L 0x095C
+#define CMU_ACE_M_OP5_H 0x095D
+#define CMU_ACE_M_GB2 0x095E
+#define CMU_ACE_M_GB3 0x095F
+#define CMU_ACE_M_MS1 0x0960
+#define CMU_ACE_M_MS2 0x0961
+#define CMU_ACE_M_MS3 0x0962
+#define CMU_BR_P_EN 0x0970
+#define CMU_BR_P_TH1_L 0x0972
+#define CMU_BR_P_TH1_H 0x0973
+#define CMU_BR_P_TH2_L 0x0974
+#define CMU_BR_P_TH2_H 0x0975
+#define CMU_ACE_P_EN 0x0980
+#define CMU_ACE_P_WFG1 0x0981
+#define CMU_ACE_P_WFG2 0x0982
+#define CMU_ACE_P_WFG3 0x0983
+#define CMU_ACE_P_TH0 0x0984
+#define CMU_ACE_P_TH1 0x0985
+#define CMU_ACE_P_TH2 0x0986
+#define CMU_ACE_P_TH3 0x0987
+#define CMU_ACE_P_TH4 0x0988
+#define CMU_ACE_P_TH5 0x0989
+#define CMU_ACE_P_OP0_L 0x098A
+#define CMU_ACE_P_OP0_H 0x098B
+#define CMU_ACE_P_OP5_L 0x098C
+#define CMU_ACE_P_OP5_H 0x098D
+#define CMU_ACE_P_GB2 0x098E
+#define CMU_ACE_P_GB3 0x098F
+#define CMU_ACE_P_MS1 0x0990
+#define CMU_ACE_P_MS2 0x0991
+#define CMU_ACE_P_MS3 0x0992
+#define CMU_FTDC_M_EN 0x09A0
+#define CMU_FTDC_P_EN 0x09A1
+#define CMU_FTDC_INLOW_L 0x09A2
+#define CMU_FTDC_INLOW_H 0x09A3
+#define CMU_FTDC_INHIGH_L 0x09A4
+#define CMU_FTDC_INHIGH_H 0x09A5
+#define CMU_FTDC_OUTLOW_L 0x09A6
+#define CMU_FTDC_OUTLOW_H 0x09A7
+#define CMU_FTDC_OUTHIGH_L 0x09A8
+#define CMU_FTDC_OUTHIGH_H 0x09A9
+#define CMU_FTDC_YLOW 0x09AA
+#define CMU_FTDC_YHIGH 0x09AB
+#define CMU_FTDC_CH1 0x09AC
+#define CMU_FTDC_CH2_L 0x09AE
+#define CMU_FTDC_CH2_H 0x09AF
+#define CMU_FTDC_CH3_L 0x09B0
+#define CMU_FTDC_CH3_H 0x09B1
+#define CMU_FTDC_1_C00_6 0x09B2
+#define CMU_FTDC_1_C01_6 0x09B8
+#define CMU_FTDC_1_C11_6 0x09BE
+#define CMU_FTDC_1_C10_6 0x09C4
+#define CMU_FTDC_1_OFF00_6 0x09CA
+#define CMU_FTDC_1_OFF10_6 0x09D0
+#define CMU_HS_M_EN 0x0A00
+#define CMU_HS_M_AX1_L 0x0A02
+#define CMU_HS_M_AX1_H 0x0A03
+#define CMU_HS_M_AX2_L 0x0A04
+#define CMU_HS_M_AX2_H 0x0A05
+#define CMU_HS_M_AX3_L 0x0A06
+#define CMU_HS_M_AX3_H 0x0A07
+#define CMU_HS_M_AX4_L 0x0A08
+#define CMU_HS_M_AX4_H 0x0A09
+#define CMU_HS_M_AX5_L 0x0A0A
+#define CMU_HS_M_AX5_H 0x0A0B
+#define CMU_HS_M_AX6_L 0x0A0C
+#define CMU_HS_M_AX6_H 0x0A0D
+#define CMU_HS_M_AX7_L 0x0A0E
+#define CMU_HS_M_AX7_H 0x0A0F
+#define CMU_HS_M_AX8_L 0x0A10
+#define CMU_HS_M_AX8_H 0x0A11
+#define CMU_HS_M_AX9_L 0x0A12
+#define CMU_HS_M_AX9_H 0x0A13
+#define CMU_HS_M_AX10_L 0x0A14
+#define CMU_HS_M_AX10_H 0x0A15
+#define CMU_HS_M_AX11_L 0x0A16
+#define CMU_HS_M_AX11_H 0x0A17
+#define CMU_HS_M_AX12_L 0x0A18
+#define CMU_HS_M_AX12_H 0x0A19
+#define CMU_HS_M_AX13_L 0x0A1A
+#define CMU_HS_M_AX13_H 0x0A1B
+#define CMU_HS_M_AX14_L 0x0A1C
+#define CMU_HS_M_AX14_H 0x0A1D
+#define CMU_HS_M_H1_H14 0x0A1E
+#define CMU_HS_M_S1_S14 0x0A2C
+#define CMU_HS_M_GL 0x0A3A
+#define CMU_HS_M_MAXSAT_RGB_Y_L 0x0A3C
+#define CMU_HS_M_MAXSAT_RGB_Y_H 0x0A3D
+#define CMU_HS_M_MAXSAT_RCR_L 0x0A3E
+#define CMU_HS_M_MAXSAT_RCR_H 0x0A3F
+#define CMU_HS_M_MAXSAT_RCB_L 0x0A40
+#define CMU_HS_M_MAXSAT_RCB_H 0x0A41
+#define CMU_HS_M_MAXSAT_GCR_L 0x0A42
+#define CMU_HS_M_MAXSAT_GCR_H 0x0A43
+#define CMU_HS_M_MAXSAT_GCB_L 0x0A44
+#define CMU_HS_M_MAXSAT_GCB_H 0x0A45
+#define CMU_HS_M_MAXSAT_BCR_L 0x0A46
+#define CMU_HS_M_MAXSAT_BCR_H 0x0A47
+#define CMU_HS_M_MAXSAT_BCB_L 0x0A48
+#define CMU_HS_M_MAXSAT_BCB_H 0x0A49
+#define CMU_HS_M_ROFF_L 0x0A4A
+#define CMU_HS_M_ROFF_H 0x0A4B
+#define CMU_HS_M_GOFF_L 0x0A4C
+#define CMU_HS_M_GOFF_H 0x0A4D
+#define CMU_HS_M_BOFF_L 0x0A4E
+#define CMU_HS_M_BOFF_H 0x0A4F
+#define CMU_HS_P_EN 0x0A50
+#define CMU_HS_P_AX1_L 0x0A52
+#define CMU_HS_P_AX1_H 0x0A53
+#define CMU_HS_P_AX2_L 0x0A54
+#define CMU_HS_P_AX2_H 0x0A55
+#define CMU_HS_P_AX3_L 0x0A56
+#define CMU_HS_P_AX3_H 0x0A57
+#define CMU_HS_P_AX4_L 0x0A58
+#define CMU_HS_P_AX4_H 0x0A59
+#define CMU_HS_P_AX5_L 0x0A5A
+#define CMU_HS_P_AX5_H 0x0A5B
+#define CMU_HS_P_AX6_L 0x0A5C
+#define CMU_HS_P_AX6_H 0x0A5D
+#define CMU_HS_P_AX7_L 0x0A5E
+#define CMU_HS_P_AX7_H 0x0A5F
+#define CMU_HS_P_AX8_L 0x0A60
+#define CMU_HS_P_AX8_H 0x0A61
+#define CMU_HS_P_AX9_L 0x0A62
+#define CMU_HS_P_AX9_H 0x0A63
+#define CMU_HS_P_AX10_L 0x0A64
+#define CMU_HS_P_AX10_H 0x0A65
+#define CMU_HS_P_AX11_L 0x0A66
+#define CMU_HS_P_AX11_H 0x0A67
+#define CMU_HS_P_AX12_L 0x0A68
+#define CMU_HS_P_AX12_H 0x0A69
+#define CMU_HS_P_AX13_L 0x0A6A
+#define CMU_HS_P_AX13_H 0x0A6B
+#define CMU_HS_P_AX14_L 0x0A6C
+#define CMU_HS_P_AX14_H 0x0A6D
+#define CMU_HS_P_H1_H14 0x0A6E
+#define CMU_HS_P_S1_S14 0x0A7C
+#define CMU_HS_P_GL 0x0A8A
+#define CMU_HS_P_MAXSAT_RGB_Y_L 0x0A8C
+#define CMU_HS_P_MAXSAT_RGB_Y_H 0x0A8D
+#define CMU_HS_P_MAXSAT_RCR_L 0x0A8E
+#define CMU_HS_P_MAXSAT_RCR_H 0x0A8F
+#define CMU_HS_P_MAXSAT_RCB_L 0x0A90
+#define CMU_HS_P_MAXSAT_RCB_H 0x0A91
+#define CMU_HS_P_MAXSAT_GCR_L 0x0A92
+#define CMU_HS_P_MAXSAT_GCR_H 0x0A93
+#define CMU_HS_P_MAXSAT_GCB_L 0x0A94
+#define CMU_HS_P_MAXSAT_GCB_H 0x0A95
+#define CMU_HS_P_MAXSAT_BCR_L 0x0A96
+#define CMU_HS_P_MAXSAT_BCR_H 0x0A97
+#define CMU_HS_P_MAXSAT_BCB_L 0x0A98
+#define CMU_HS_P_MAXSAT_BCB_H 0x0A99
+#define CMU_HS_P_ROFF_L 0x0A9A
+#define CMU_HS_P_ROFF_H 0x0A9B
+#define CMU_HS_P_GOFF_L 0x0A9C
+#define CMU_HS_P_GOFF_H 0x0A9D
+#define CMU_HS_P_BOFF_L 0x0A9E
+#define CMU_HS_P_BOFF_H 0x0A9F
+#define CMU_GLCSC_M_C0_L 0x0AA0
+#define CMU_GLCSC_M_C0_H 0x0AA1
+#define CMU_GLCSC_M_C1_L 0x0AA2
+#define CMU_GLCSC_M_C1_H 0x0AA3
+#define CMU_GLCSC_M_C2_L 0x0AA4
+#define CMU_GLCSC_M_C2_H 0x0AA5
+#define CMU_GLCSC_M_C3_L 0x0AA6
+#define CMU_GLCSC_M_C3_H 0x0AA7
+#define CMU_GLCSC_M_C4_L 0x0AA8
+#define CMU_GLCSC_M_C4_H 0x0AA9
+#define CMU_GLCSC_M_C5_L 0x0AAA
+#define CMU_GLCSC_M_C5_H 0x0AAB
+#define CMU_GLCSC_M_C6_L 0x0AAC
+#define CMU_GLCSC_M_C6_H 0x0AAD
+#define CMU_GLCSC_M_C7_L 0x0AAE
+#define CMU_GLCSC_M_C7_H 0x0AAF
+#define CMU_GLCSC_M_C8_L 0x0AB0
+#define CMU_GLCSC_M_C8_H 0x0AB1
+#define CMU_GLCSC_M_O1_1 0x0AB4
+#define CMU_GLCSC_M_O1_2 0x0AB5
+#define CMU_GLCSC_M_O1_3 0x0AB6
+#define CMU_GLCSC_M_O2_1 0x0AB8
+#define CMU_GLCSC_M_O2_2 0x0AB9
+#define CMU_GLCSC_M_O2_3 0x0ABA
+#define CMU_GLCSC_M_O3_1 0x0ABC
+#define CMU_GLCSC_M_O3_2 0x0ABD
+#define CMU_GLCSC_M_O3_3 0x0ABE
+#define CMU_GLCSC_P_C0_L 0x0AC0
+#define CMU_GLCSC_P_C0_H 0x0AC1
+#define CMU_GLCSC_P_C1_L 0x0AC2
+#define CMU_GLCSC_P_C1_H 0x0AC3
+#define CMU_GLCSC_P_C2_L 0x0AC4
+#define CMU_GLCSC_P_C2_H 0x0AC5
+#define CMU_GLCSC_P_C3_L 0x0AC6
+#define CMU_GLCSC_P_C3_H 0x0AC7
+#define CMU_GLCSC_P_C4_L 0x0AC8
+#define CMU_GLCSC_P_C4_H 0x0AC9
+#define CMU_GLCSC_P_C5_L 0x0ACA
+#define CMU_GLCSC_P_C5_H 0x0ACB
+#define CMU_GLCSC_P_C6_L 0x0ACC
+#define CMU_GLCSC_P_C6_H 0x0ACD
+#define CMU_GLCSC_P_C7_L 0x0ACE
+#define CMU_GLCSC_P_C7_H 0x0ACF
+#define CMU_GLCSC_P_C8_L 0x0AD0
+#define CMU_GLCSC_P_C8_H 0x0AD1
+#define CMU_GLCSC_P_O1_1 0x0AD4
+#define CMU_GLCSC_P_O1_2 0x0AD5
+#define CMU_GLCSC_P_O1_3 0x0AD6
+#define CMU_GLCSC_P_O2_1 0x0AD8
+#define CMU_GLCSC_P_O2_2 0x0AD9
+#define CMU_GLCSC_P_O2_3 0x0ADA
+#define CMU_GLCSC_P_O3_1 0x0ADC
+#define CMU_GLCSC_P_O3_2 0x0ADD
+#define CMU_GLCSC_P_O3_3 0x0ADE
+#define CMU_PIXVAL_M_EN 0x0AE0
+#define CMU_PIXVAL_P_EN 0x0AE1
+
+#define CMU_CLK_CTRL_TCLK 0x0
+#define CMU_CLK_CTRL_SCLK 0x2
+#define CMU_CLK_CTRL_MSK 0x2
+#define CMU_CLK_CTRL_ENABLE 0x1
+
+#define LCD_TOP_CTRL_TV 0x2
+#define LCD_TOP_CTRL_PN 0x0
+#define LCD_TOP_CTRL_SEL_MSK 0x2
+#define LCD_IO_CMU_IN_SEL_MSK (0x3 << 20)
+#define LCD_IO_CMU_IN_SEL_TV 0
+#define LCD_IO_CMU_IN_SEL_PN 1
+#define LCD_IO_CMU_IN_SEL_PN2 2
+#define LCD_IO_TV_OUT_SEL_MSK (0x3 << 26)
+#define LCD_IO_PN_OUT_SEL_MSK (0x3 << 24)
+#define LCD_IO_PN2_OUT_SEL_MSK (0x3 << 28)
+#define LCD_IO_TV_OUT_SEL_NON 3
+#define LCD_IO_PN_OUT_SEL_NON 3
+#define LCD_IO_PN2_OUT_SEL_NON 3
+#define LCD_TOP_CTRL_CMU_ENABLE 0x1
+#define LCD_IO_OVERL_MSK 0xC00000
+#define LCD_IO_OVERL_TV 0x0
+#define LCD_IO_OVERL_LCD1 0x400000
+#define LCD_IO_OVERL_LCD2 0xC00000
+#define HINVERT_MSK 0x4
+#define VINVERT_MSK 0x8
+#define HINVERT_LEN 0x2
+#define VINVERT_LEN 0x3
+
+#define CMU_CTRL 0x88
+#define CMU_CTRL_A0_MSK 0x6
+#define CMU_CTRL_A0_TV 0x0
+#define CMU_CTRL_A0_LCD1 0x1
+#define CMU_CTRL_A0_LCD2 0x2
+#define CMU_CTRL_A0_HDMI 0x3
+
+#define ICR_DRV_ROUTE_OFF 0x0
+#define ICR_DRV_ROUTE_TV 0x1
+#define ICR_DRV_ROUTE_LCD1 0x2
+#define ICR_DRV_ROUTE_LCD2 0x3
+
+enum {
+ PATH_PN = 0,
+ PATH_TV,
+ PATH_P2,
+};
+
+/*
+ * mmp path describes part of mmp path related info:
+ * which is hiden in display driver and not exported to buffer driver
+ */
+struct mmphw_ctrl;
+struct mmphw_path_plat {
+ int id;
+ struct mmphw_ctrl *ctrl;
+ struct mmp_path *path;
+ u32 path_config;
+ u32 link_config;
+};
+
+/* mmp ctrl describes mmp controller related info */
+struct mmphw_ctrl {
+ /* platform related, get from config */
+ const char *name;
+ int irq;
+ void *reg_base;
+ struct clk *clk;
+
+ /* sys info */
+ struct device *dev;
+
+ /* state */
+ int open_count;
+ int status;
+ struct mutex access_ok;
+
+ /*pathes*/
+ int path_num;
+ struct mmphw_path_plat path_plats[0];
+};
+
+static inline int overlay_is_vid(struct mmp_overlay *overlay)
+{
+ return overlay->dmafetch_id & 1;
+}
+
+static inline struct mmphw_path_plat *path_to_path_plat(struct mmp_path *path)
+{
+ return (struct mmphw_path_plat *)path->plat_data;
+}
+
+static inline struct mmphw_ctrl *path_to_ctrl(struct mmp_path *path)
+{
+ return path_to_path_plat(path)->ctrl;
+}
+
+static inline struct mmphw_ctrl *overlay_to_ctrl(struct mmp_overlay *overlay)
+{
+ return path_to_ctrl(overlay->path);
+}
+
+static inline void *ctrl_regs(struct mmp_path *path)
+{
+ return path_to_ctrl(path)->reg_base;
+}
+
+/* path regs, for regs symmetrical for both pathes */
+static inline struct lcd_regs *path_regs(struct mmp_path *path)
+{
+ if (path->id == PATH_PN)
+ return (struct lcd_regs *)(ctrl_regs(path) + 0xc0);
+ else if (path->id == PATH_TV)
+ return (struct lcd_regs *)ctrl_regs(path);
+ else if (path->id == PATH_P2)
+ return (struct lcd_regs *)(ctrl_regs(path) + 0x200);
+ else {
+ dev_err(path->dev, "path id %d invalid\n", path->id);
+ BUG_ON(1);
+ return NULL;
+ }
+}
+
+#ifdef CONFIG_MMP_DISP_SPI
+extern int lcd_spi_register(struct mmphw_ctrl *ctrl);
+#endif
+#endif /* _MMP_CTRL_H_ */
diff --git a/drivers/video/mmp/hw/mmp_spi.c b/drivers/video/mmp/hw/mmp_spi.c
new file mode 100644
index 000000000000..e62ca7bf0d5e
--- /dev/null
+++ b/drivers/video/mmp/hw/mmp_spi.c
@@ -0,0 +1,180 @@
+/*
+ * linux/drivers/video/mmp/hw/mmp_spi.c
+ * using the spi in LCD controler for commands send
+ *
+ * Copyright (C) 2012 Marvell Technology Group Ltd.
+ * Authors: Guoqing Li <ligq@marvell.com>
+ * Lisa Du <cldu@marvell.com>
+ * Zhou Zhu <zzhu3@marvell.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program. If not, see <http://www.gnu.org/licenses/>.
+ *
+ */
+#include <linux/errno.h>
+#include <linux/delay.h>
+#include <linux/err.h>
+#include <linux/io.h>
+#include <linux/spi/spi.h>
+#include "mmp_ctrl.h"
+
+/**
+ * spi_write - write command to the SPI port
+ * @data: can be 8/16/32-bit, MSB justified data to write.
+ * @len: data length.
+ *
+ * Wait bus transfer complete IRQ.
+ * The caller is expected to perform the necessary locking.
+ *
+ * Returns:
+ * %-ETIMEDOUT timeout occurred
+ * 0 success
+ */
+static inline int lcd_spi_write(struct spi_device *spi, u32 data)
+{
+ int timeout = 100000, isr, ret = 0;
+ u32 tmp;
+ void *reg_base =
+ *(void **)spi_master_get_devdata(spi->master);
+
+ /* clear ISR */
+ writel_relaxed(~SPI_IRQ_MASK, reg_base + SPU_IRQ_ISR);
+
+ switch (spi->bits_per_word) {
+ case 8:
+ writel_relaxed((u8)data, reg_base + LCD_SPU_SPI_TXDATA);
+ break;
+ case 16:
+ writel_relaxed((u16)data, reg_base + LCD_SPU_SPI_TXDATA);
+ break;
+ case 32:
+ writel_relaxed((u32)data, reg_base + LCD_SPU_SPI_TXDATA);
+ break;
+ default:
+ dev_err(&spi->dev, "Wrong spi bit length\n");
+ }
+
+ /* SPI start to send command */
+ tmp = readl_relaxed(reg_base + LCD_SPU_SPI_CTRL);
+ tmp &= ~CFG_SPI_START_MASK;
+ tmp |= CFG_SPI_START(1);
+ writel(tmp, reg_base + LCD_SPU_SPI_CTRL);
+
+ isr = readl_relaxed(reg_base + SPU_IRQ_ISR);
+ while (!(isr & SPI_IRQ_ENA_MASK)) {
+ udelay(100);
+ isr = readl_relaxed(reg_base + SPU_IRQ_ISR);
+ if (!--timeout) {
+ ret = -ETIMEDOUT;
+ dev_err(&spi->dev, "spi cmd send time out\n");
+ break;
+ }
+ }
+
+ tmp = readl_relaxed(reg_base + LCD_SPU_SPI_CTRL);
+ tmp &= ~CFG_SPI_START_MASK;
+ tmp |= CFG_SPI_START(0);
+ writel_relaxed(tmp, reg_base + LCD_SPU_SPI_CTRL);
+
+ writel_relaxed(~SPI_IRQ_MASK, reg_base + SPU_IRQ_ISR);
+
+ return ret;
+}
+
+static int lcd_spi_setup(struct spi_device *spi)
+{
+ void *reg_base =
+ *(void **)spi_master_get_devdata(spi->master);
+ u32 tmp;
+
+ tmp = CFG_SCLKCNT(16) |
+ CFG_TXBITS(spi->bits_per_word) |
+ CFG_SPI_SEL(1) | CFG_SPI_ENA(1) |
+ CFG_SPI_3W4WB(1);
+ writel(tmp, reg_base + LCD_SPU_SPI_CTRL);
+
+ /*
+ * After set mode it need a time to pull up the spi singals,
+ * or it would cause the wrong waveform when send spi command,
+ * especially on pxa910h
+ */
+ tmp = readl_relaxed(reg_base + SPU_IOPAD_CONTROL);
+ if ((tmp & CFG_IOPADMODE_MASK) != IOPAD_DUMB18SPI)
+ writel_relaxed(IOPAD_DUMB18SPI |
+ (tmp & ~CFG_IOPADMODE_MASK),
+ reg_base + SPU_IOPAD_CONTROL);
+ udelay(20);
+ return 0;
+}
+
+static int lcd_spi_one_transfer(struct spi_device *spi, struct spi_message *m)
+{
+ struct spi_transfer *t;
+ int i;
+
+ list_for_each_entry(t, &m->transfers, transfer_list) {
+ switch (spi->bits_per_word) {
+ case 8:
+ for (i = 0; i < t->len; i++)
+ lcd_spi_write(spi, ((u8 *)t->tx_buf)[i]);
+ break;
+ case 16:
+ for (i = 0; i < t->len/2; i++)
+ lcd_spi_write(spi, ((u16 *)t->tx_buf)[i]);
+ break;
+ case 32:
+ for (i = 0; i < t->len/4; i++)
+ lcd_spi_write(spi, ((u32 *)t->tx_buf)[i]);
+ break;
+ default:
+ dev_err(&spi->dev, "Wrong spi bit length\n");
+ }
+ }
+
+ m->status = 0;
+ if (m->complete)
+ m->complete(m->context);
+ return 0;
+}
+
+int lcd_spi_register(struct mmphw_ctrl *ctrl)
+{
+ struct spi_master *master;
+ void **p_regbase;
+ int err;
+
+ master = spi_alloc_master(ctrl->dev, sizeof(void *));
+ if (!master) {
+ dev_err(ctrl->dev, "unable to allocate SPI master\n");
+ return -ENOMEM;
+ }
+ p_regbase = spi_master_get_devdata(master);
+ *p_regbase = ctrl->reg_base;
+
+ /* set bus num to 5 to avoid conflict with other spi hosts */
+ master->bus_num = 5;
+ master->num_chipselect = 1;
+ master->setup = lcd_spi_setup;
+ master->transfer = lcd_spi_one_transfer;
+
+ err = spi_register_master(master);
+ if (err < 0) {
+ dev_err(ctrl->dev, "unable to register SPI master\n");
+ spi_master_put(master);
+ return err;
+ }
+
+ dev_info(&master->dev, "registered\n");
+
+ return 0;
+}
diff --git a/drivers/video/mmp/panel/Kconfig b/drivers/video/mmp/panel/Kconfig
new file mode 100644
index 000000000000..4b2c4f457b11
--- /dev/null
+++ b/drivers/video/mmp/panel/Kconfig
@@ -0,0 +1,6 @@
+config MMP_PANEL_TPOHVGA
+ bool "tpohvga panel TJ032MD01BW support"
+ depends on SPI_MASTER
+ default n
+ help
+ tpohvga panel support
diff --git a/drivers/video/mmp/panel/Makefile b/drivers/video/mmp/panel/Makefile
new file mode 100644
index 000000000000..2f91611c7e5e
--- /dev/null
+++ b/drivers/video/mmp/panel/Makefile
@@ -0,0 +1 @@
+obj-$(CONFIG_MMP_PANEL_TPOHVGA) += tpo_tj032md01bw.o
diff --git a/drivers/video/mmp/panel/tpo_tj032md01bw.c b/drivers/video/mmp/panel/tpo_tj032md01bw.c
new file mode 100644
index 000000000000..998978b08f5e
--- /dev/null
+++ b/drivers/video/mmp/panel/tpo_tj032md01bw.c
@@ -0,0 +1,186 @@
+/*
+ * linux/drivers/video/mmp/panel/tpo_tj032md01bw.c
+ * active panel using spi interface to do init
+ *
+ * Copyright (C) 2012 Marvell Technology Group Ltd.
+ * Authors: Guoqing Li <ligq@marvell.com>
+ * Lisa Du <cldu@marvell.com>
+ * Zhou Zhu <zzhu3@marvell.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program. If not, see <http://www.gnu.org/licenses/>.
+ *
+ */
+
+#include <linux/module.h>
+#include <linux/moduleparam.h>
+#include <linux/kernel.h>
+#include <linux/errno.h>
+#include <linux/string.h>
+#include <linux/delay.h>
+#include <linux/platform_device.h>
+#include <linux/err.h>
+#include <linux/spi/spi.h>
+#include <video/mmp_disp.h>
+
+static u16 init[] = {
+ 0x0801,
+ 0x0800,
+ 0x0200,
+ 0x0304,
+ 0x040e,
+ 0x0903,
+ 0x0b18,
+ 0x0c53,
+ 0x0d01,
+ 0x0ee0,
+ 0x0f01,
+ 0x1058,
+ 0x201e,
+ 0x210a,
+ 0x220a,
+ 0x231e,
+ 0x2400,
+ 0x2532,
+ 0x2600,
+ 0x27ac,
+ 0x2904,
+ 0x2aa2,
+ 0x2b45,
+ 0x2c45,
+ 0x2d15,
+ 0x2e5a,
+ 0x2fff,
+ 0x306b,
+ 0x310d,
+ 0x3248,
+ 0x3382,
+ 0x34bd,
+ 0x35e7,
+ 0x3618,
+ 0x3794,
+ 0x3801,
+ 0x395d,
+ 0x3aae,
+ 0x3bff,
+ 0x07c9,
+};
+
+static u16 poweroff[] = {
+ 0x07d9,
+};
+
+struct tpohvga_plat_data {
+ void (*plat_onoff)(int status);
+ struct spi_device *spi;
+};
+
+static void tpohvga_onoff(struct mmp_panel *panel, int status)
+{
+ struct tpohvga_plat_data *plat = panel->plat_data;
+ int ret;
+
+ if (status) {
+ plat->plat_onoff(1);
+
+ ret = spi_write(plat->spi, init, sizeof(init));
+ if (ret < 0)
+ dev_warn(panel->dev, "init cmd failed(%d)\n", ret);
+ } else {
+ ret = spi_write(plat->spi, poweroff, sizeof(poweroff));
+ if (ret < 0)
+ dev_warn(panel->dev, "poweroff cmd failed(%d)\n", ret);
+
+ plat->plat_onoff(0);
+ }
+}
+
+static struct mmp_mode mmp_modes_tpohvga[] = {
+ [0] = {
+ .pixclock_freq = 10394400,
+ .refresh = 60,
+ .xres = 320,
+ .yres = 480,
+ .hsync_len = 10,
+ .left_margin = 15,
+ .right_margin = 10,
+ .vsync_len = 2,
+ .upper_margin = 4,
+ .lower_margin = 2,
+ .invert_pixclock = 1,
+ .pix_fmt_out = PIXFMT_RGB565,
+ },
+};
+
+static int tpohvga_get_modelist(struct mmp_panel *panel,
+ struct mmp_mode **modelist)
+{
+ *modelist = mmp_modes_tpohvga;
+ return 1;
+}
+
+static struct mmp_panel panel_tpohvga = {
+ .name = "tpohvga",
+ .panel_type = PANELTYPE_ACTIVE,
+ .get_modelist = tpohvga_get_modelist,
+ .set_onoff = tpohvga_onoff,
+};
+
+static int tpohvga_probe(struct spi_device *spi)
+{
+ struct mmp_mach_panel_info *mi;
+ int ret;
+ struct tpohvga_plat_data *plat_data;
+
+ /* get configs from platform data */
+ mi = spi->dev.platform_data;
+ if (mi == NULL) {
+ dev_err(&spi->dev, "%s: no platform data defined\n", __func__);
+ return -EINVAL;
+ }
+
+ /* setup spi related info */
+ spi->bits_per_word = 16;
+ ret = spi_setup(spi);
+ if (ret < 0) {
+ dev_err(&spi->dev, "spi setup failed %d", ret);
+ return ret;
+ }
+
+ plat_data = kzalloc(sizeof(*plat_data), GFP_KERNEL);
+ if (plat_data == NULL)
+ return -ENOMEM;
+
+ plat_data->spi = spi;
+ plat_data->plat_onoff = mi->plat_set_onoff;
+ panel_tpohvga.plat_data = plat_data;
+ panel_tpohvga.plat_path_name = mi->plat_path_name;
+ panel_tpohvga.dev = &spi->dev;
+
+ mmp_register_panel(&panel_tpohvga);
+
+ return 0;
+}
+
+static struct spi_driver panel_tpohvga_driver = {
+ .driver = {
+ .name = "tpo-hvga",
+ .owner = THIS_MODULE,
+ },
+ .probe = tpohvga_probe,
+};
+module_spi_driver(panel_tpohvga_driver);
+
+MODULE_AUTHOR("Lisa Du<cldu@marvell.com>");
+MODULE_DESCRIPTION("Panel driver for tpohvga");
+MODULE_LICENSE("GPL");
diff --git a/drivers/video/msm/mdp.c b/drivers/video/msm/mdp.c
index f2566c19e71c..113c7876c855 100644
--- a/drivers/video/msm/mdp.c
+++ b/drivers/video/msm/mdp.c
@@ -261,7 +261,7 @@ int get_img(struct mdp_img *img, struct fb_info *info,
if (f.file == NULL)
return -1;
- if (MAJOR(f.file->f_dentry->d_inode->i_rdev) == FB_MAJOR) {
+ if (MAJOR(file_inode(f.file)->i_rdev) == FB_MAJOR) {
*start = info->fix.smem_start;
*len = info->fix.smem_len;
} else
diff --git a/drivers/video/mx3fb.c b/drivers/video/mx3fb.c
index 736887208574..cfdb380ec81e 100644
--- a/drivers/video/mx3fb.c
+++ b/drivers/video/mx3fb.c
@@ -1306,7 +1306,7 @@ static int mx3fb_unmap_video_memory(struct fb_info *fbi)
dma_free_writecombine(fbi->device, fbi->fix.smem_len,
fbi->screen_base, fbi->fix.smem_start);
- fbi->screen_base = 0;
+ fbi->screen_base = NULL;
mutex_lock(&fbi->mm_lock);
fbi->fix.smem_start = 0;
fbi->fix.smem_len = 0;
diff --git a/drivers/video/of_display_timing.c b/drivers/video/of_display_timing.c
new file mode 100644
index 000000000000..13ecd9897010
--- /dev/null
+++ b/drivers/video/of_display_timing.c
@@ -0,0 +1,239 @@
+/*
+ * OF helpers for parsing display timings
+ *
+ * Copyright (c) 2012 Steffen Trumtrar <s.trumtrar@pengutronix.de>, Pengutronix
+ *
+ * based on of_videomode.c by Sascha Hauer <s.hauer@pengutronix.de>
+ *
+ * This file is released under the GPLv2
+ */
+#include <linux/export.h>
+#include <linux/of.h>
+#include <linux/slab.h>
+#include <video/display_timing.h>
+#include <video/of_display_timing.h>
+
+/**
+ * parse_timing_property - parse timing_entry from device_node
+ * @np: device_node with the property
+ * @name: name of the property
+ * @result: will be set to the return value
+ *
+ * DESCRIPTION:
+ * Every display_timing can be specified with either just the typical value or
+ * a range consisting of min/typ/max. This function helps handling this
+ **/
+static int parse_timing_property(struct device_node *np, const char *name,
+ struct timing_entry *result)
+{
+ struct property *prop;
+ int length, cells, ret;
+
+ prop = of_find_property(np, name, &length);
+ if (!prop) {
+ pr_err("%s: could not find property %s\n",
+ of_node_full_name(np), name);
+ return -EINVAL;
+ }
+
+ cells = length / sizeof(u32);
+ if (cells == 1) {
+ ret = of_property_read_u32(np, name, &result->typ);
+ result->min = result->typ;
+ result->max = result->typ;
+ } else if (cells == 3) {
+ ret = of_property_read_u32_array(np, name, &result->min, cells);
+ } else {
+ pr_err("%s: illegal timing specification in %s\n",
+ of_node_full_name(np), name);
+ return -EINVAL;
+ }
+
+ return ret;
+}
+
+/**
+ * of_get_display_timing - parse display_timing entry from device_node
+ * @np: device_node with the properties
+ **/
+static struct display_timing *of_get_display_timing(struct device_node *np)
+{
+ struct display_timing *dt;
+ u32 val = 0;
+ int ret = 0;
+
+ dt = kzalloc(sizeof(*dt), GFP_KERNEL);
+ if (!dt) {
+ pr_err("%s: could not allocate display_timing struct\n",
+ of_node_full_name(np));
+ return NULL;
+ }
+
+ ret |= parse_timing_property(np, "hback-porch", &dt->hback_porch);
+ ret |= parse_timing_property(np, "hfront-porch", &dt->hfront_porch);
+ ret |= parse_timing_property(np, "hactive", &dt->hactive);
+ ret |= parse_timing_property(np, "hsync-len", &dt->hsync_len);
+ ret |= parse_timing_property(np, "vback-porch", &dt->vback_porch);
+ ret |= parse_timing_property(np, "vfront-porch", &dt->vfront_porch);
+ ret |= parse_timing_property(np, "vactive", &dt->vactive);
+ ret |= parse_timing_property(np, "vsync-len", &dt->vsync_len);
+ ret |= parse_timing_property(np, "clock-frequency", &dt->pixelclock);
+
+ dt->dmt_flags = 0;
+ dt->data_flags = 0;
+ if (!of_property_read_u32(np, "vsync-active", &val))
+ dt->dmt_flags |= val ? VESA_DMT_VSYNC_HIGH :
+ VESA_DMT_VSYNC_LOW;
+ if (!of_property_read_u32(np, "hsync-active", &val))
+ dt->dmt_flags |= val ? VESA_DMT_HSYNC_HIGH :
+ VESA_DMT_HSYNC_LOW;
+ if (!of_property_read_u32(np, "de-active", &val))
+ dt->data_flags |= val ? DISPLAY_FLAGS_DE_HIGH :
+ DISPLAY_FLAGS_DE_LOW;
+ if (!of_property_read_u32(np, "pixelclk-active", &val))
+ dt->data_flags |= val ? DISPLAY_FLAGS_PIXDATA_POSEDGE :
+ DISPLAY_FLAGS_PIXDATA_NEGEDGE;
+
+ if (of_property_read_bool(np, "interlaced"))
+ dt->data_flags |= DISPLAY_FLAGS_INTERLACED;
+ if (of_property_read_bool(np, "doublescan"))
+ dt->data_flags |= DISPLAY_FLAGS_DOUBLESCAN;
+
+ if (ret) {
+ pr_err("%s: error reading timing properties\n",
+ of_node_full_name(np));
+ kfree(dt);
+ return NULL;
+ }
+
+ return dt;
+}
+
+/**
+ * of_get_display_timings - parse all display_timing entries from a device_node
+ * @np: device_node with the subnodes
+ **/
+struct display_timings *of_get_display_timings(struct device_node *np)
+{
+ struct device_node *timings_np;
+ struct device_node *entry;
+ struct device_node *native_mode;
+ struct display_timings *disp;
+
+ if (!np) {
+ pr_err("%s: no devicenode given\n", of_node_full_name(np));
+ return NULL;
+ }
+
+ timings_np = of_find_node_by_name(np, "display-timings");
+ if (!timings_np) {
+ pr_err("%s: could not find display-timings node\n",
+ of_node_full_name(np));
+ return NULL;
+ }
+
+ disp = kzalloc(sizeof(*disp), GFP_KERNEL);
+ if (!disp) {
+ pr_err("%s: could not allocate struct disp'\n",
+ of_node_full_name(np));
+ goto dispfail;
+ }
+
+ entry = of_parse_phandle(timings_np, "native-mode", 0);
+ /* assume first child as native mode if none provided */
+ if (!entry)
+ entry = of_get_next_child(np, NULL);
+ /* if there is no child, it is useless to go on */
+ if (!entry) {
+ pr_err("%s: no timing specifications given\n",
+ of_node_full_name(np));
+ goto entryfail;
+ }
+
+ pr_debug("%s: using %s as default timing\n",
+ of_node_full_name(np), entry->name);
+
+ native_mode = entry;
+
+ disp->num_timings = of_get_child_count(timings_np);
+ if (disp->num_timings == 0) {
+ /* should never happen, as entry was already found above */
+ pr_err("%s: no timings specified\n", of_node_full_name(np));
+ goto entryfail;
+ }
+
+ disp->timings = kzalloc(sizeof(struct display_timing *) *
+ disp->num_timings, GFP_KERNEL);
+ if (!disp->timings) {
+ pr_err("%s: could not allocate timings array\n",
+ of_node_full_name(np));
+ goto entryfail;
+ }
+
+ disp->num_timings = 0;
+ disp->native_mode = 0;
+
+ for_each_child_of_node(timings_np, entry) {
+ struct display_timing *dt;
+
+ dt = of_get_display_timing(entry);
+ if (!dt) {
+ /*
+ * to not encourage wrong devicetrees, fail in case of
+ * an error
+ */
+ pr_err("%s: error in timing %d\n",
+ of_node_full_name(np), disp->num_timings + 1);
+ goto timingfail;
+ }
+
+ if (native_mode == entry)
+ disp->native_mode = disp->num_timings;
+
+ disp->timings[disp->num_timings] = dt;
+ disp->num_timings++;
+ }
+ of_node_put(timings_np);
+ /*
+ * native_mode points to the device_node returned by of_parse_phandle
+ * therefore call of_node_put on it
+ */
+ of_node_put(native_mode);
+
+ pr_debug("%s: got %d timings. Using timing #%d as default\n",
+ of_node_full_name(np), disp->num_timings,
+ disp->native_mode + 1);
+
+ return disp;
+
+timingfail:
+ if (native_mode)
+ of_node_put(native_mode);
+ display_timings_release(disp);
+entryfail:
+ kfree(disp);
+dispfail:
+ of_node_put(timings_np);
+ return NULL;
+}
+EXPORT_SYMBOL_GPL(of_get_display_timings);
+
+/**
+ * of_display_timings_exist - check if a display-timings node is provided
+ * @np: device_node with the timing
+ **/
+int of_display_timings_exist(struct device_node *np)
+{
+ struct device_node *timings_np;
+
+ if (!np)
+ return -EINVAL;
+
+ timings_np = of_parse_phandle(np, "display-timings", 0);
+ if (!timings_np)
+ return -EINVAL;
+
+ of_node_put(timings_np);
+ return 1;
+}
+EXPORT_SYMBOL_GPL(of_display_timings_exist);
diff --git a/drivers/video/of_videomode.c b/drivers/video/of_videomode.c
new file mode 100644
index 000000000000..5b8066cd397f
--- /dev/null
+++ b/drivers/video/of_videomode.c
@@ -0,0 +1,54 @@
+/*
+ * generic videomode helper
+ *
+ * Copyright (c) 2012 Steffen Trumtrar <s.trumtrar@pengutronix.de>, Pengutronix
+ *
+ * This file is released under the GPLv2
+ */
+#include <linux/errno.h>
+#include <linux/export.h>
+#include <linux/of.h>
+#include <video/display_timing.h>
+#include <video/of_display_timing.h>
+#include <video/of_videomode.h>
+#include <video/videomode.h>
+
+/**
+ * of_get_videomode - get the videomode #<index> from devicetree
+ * @np - devicenode with the display_timings
+ * @vm - set to return value
+ * @index - index into list of display_timings
+ * (Set this to OF_USE_NATIVE_MODE to use whatever mode is
+ * specified as native mode in the DT.)
+ *
+ * DESCRIPTION:
+ * Get a list of all display timings and put the one
+ * specified by index into *vm. This function should only be used, if
+ * only one videomode is to be retrieved. A driver that needs to work
+ * with multiple/all videomodes should work with
+ * of_get_display_timings instead.
+ **/
+int of_get_videomode(struct device_node *np, struct videomode *vm,
+ int index)
+{
+ struct display_timings *disp;
+ int ret;
+
+ disp = of_get_display_timings(np);
+ if (!disp) {
+ pr_err("%s: no timings specified\n", of_node_full_name(np));
+ return -EINVAL;
+ }
+
+ if (index == OF_USE_NATIVE_MODE)
+ index = disp->native_mode;
+
+ ret = videomode_from_timing(disp, vm, index);
+ if (ret)
+ return ret;
+
+ display_timings_release(disp);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(of_get_videomode);
diff --git a/drivers/video/omap2/displays/panel-generic-dpi.c b/drivers/video/omap2/displays/panel-generic-dpi.c
index 54ca8ae21078..c904f42d81c1 100644
--- a/drivers/video/omap2/displays/panel-generic-dpi.c
+++ b/drivers/video/omap2/displays/panel-generic-dpi.c
@@ -291,30 +291,6 @@ static struct panel_config generic_dpi_panels[] = {
.name = "h4",
},
- /* Unknown panel used in Samsung OMAP2 Apollon */
- {
- {
- .x_res = 480,
- .y_res = 272,
-
- .pixel_clock = 6250,
-
- .hsw = 41,
- .hfp = 2,
- .hbp = 2,
-
- .vsw = 10,
- .vfp = 2,
- .vbp = 2,
-
- .vsync_level = OMAPDSS_SIG_ACTIVE_LOW,
- .hsync_level = OMAPDSS_SIG_ACTIVE_LOW,
- .data_pclk_edge = OMAPDSS_DRIVE_SIG_RISING_EDGE,
- .de_level = OMAPDSS_SIG_ACTIVE_HIGH,
- .sync_pclk_edge = OMAPDSS_DRIVE_SIG_OPPOSITE_EDGES,
- },
- .name = "apollon",
- },
/* FocalTech ETM070003DH6 */
{
{
diff --git a/drivers/video/via/hw.c b/drivers/video/via/hw.c
index 80233dae358a..22450908306c 100644
--- a/drivers/video/via/hw.c
+++ b/drivers/video/via/hw.c
@@ -1467,10 +1467,10 @@ void viafb_set_vclock(u32 clk, int set_iga)
via_write_misc_reg_mask(0x0C, 0x0C); /* select external clock */
}
-struct display_timing var_to_timing(const struct fb_var_screeninfo *var,
+struct via_display_timing var_to_timing(const struct fb_var_screeninfo *var,
u16 cxres, u16 cyres)
{
- struct display_timing timing;
+ struct via_display_timing timing;
u16 dx = (var->xres - cxres) / 2, dy = (var->yres - cyres) / 2;
timing.hor_addr = cxres;
@@ -1491,7 +1491,7 @@ struct display_timing var_to_timing(const struct fb_var_screeninfo *var,
void viafb_fill_crtc_timing(const struct fb_var_screeninfo *var,
u16 cxres, u16 cyres, int iga)
{
- struct display_timing crt_reg = var_to_timing(var,
+ struct via_display_timing crt_reg = var_to_timing(var,
cxres ? cxres : var->xres, cyres ? cyres : var->yres);
if (iga == IGA1)
diff --git a/drivers/video/via/hw.h b/drivers/video/via/hw.h
index a8205754c736..3be073c58b03 100644
--- a/drivers/video/via/hw.h
+++ b/drivers/video/via/hw.h
@@ -637,7 +637,7 @@ extern int viafb_LCD_ON;
extern int viafb_DVI_ON;
extern int viafb_hotplug;
-struct display_timing var_to_timing(const struct fb_var_screeninfo *var,
+struct via_display_timing var_to_timing(const struct fb_var_screeninfo *var,
u16 cxres, u16 cyres);
void viafb_fill_crtc_timing(const struct fb_var_screeninfo *var,
u16 cxres, u16 cyres, int iga);
diff --git a/drivers/video/via/lcd.c b/drivers/video/via/lcd.c
index 980ee1b1dcf3..5d21ff436ec8 100644
--- a/drivers/video/via/lcd.c
+++ b/drivers/video/via/lcd.c
@@ -549,7 +549,7 @@ void viafb_lcd_set_mode(const struct fb_var_screeninfo *var, u16 cxres,
int panel_hres = plvds_setting_info->lcd_panel_hres;
int panel_vres = plvds_setting_info->lcd_panel_vres;
u32 clock;
- struct display_timing timing;
+ struct via_display_timing timing;
struct fb_var_screeninfo panel_var;
const struct fb_videomode *mode_crt_table, *panel_crt_table;
diff --git a/drivers/video/via/share.h b/drivers/video/via/share.h
index 3158dfc90bed..65c65c611e0a 100644
--- a/drivers/video/via/share.h
+++ b/drivers/video/via/share.h
@@ -319,7 +319,7 @@ struct crt_mode_table {
int refresh_rate;
int h_sync_polarity;
int v_sync_polarity;
- struct display_timing crtc;
+ struct via_display_timing crtc;
};
struct io_reg {
diff --git a/drivers/video/via/via_modesetting.c b/drivers/video/via/via_modesetting.c
index 0e431aee17bb..0b414b09b9b4 100644
--- a/drivers/video/via/via_modesetting.c
+++ b/drivers/video/via/via_modesetting.c
@@ -30,9 +30,9 @@
#include "debug.h"
-void via_set_primary_timing(const struct display_timing *timing)
+void via_set_primary_timing(const struct via_display_timing *timing)
{
- struct display_timing raw;
+ struct via_display_timing raw;
raw.hor_total = timing->hor_total / 8 - 5;
raw.hor_addr = timing->hor_addr / 8 - 1;
@@ -88,9 +88,9 @@ void via_set_primary_timing(const struct display_timing *timing)
via_write_reg_mask(VIACR, 0x17, 0x80, 0x80);
}
-void via_set_secondary_timing(const struct display_timing *timing)
+void via_set_secondary_timing(const struct via_display_timing *timing)
{
- struct display_timing raw;
+ struct via_display_timing raw;
raw.hor_total = timing->hor_total - 1;
raw.hor_addr = timing->hor_addr - 1;
diff --git a/drivers/video/via/via_modesetting.h b/drivers/video/via/via_modesetting.h
index 06e09fe351ae..f6a6503da3b3 100644
--- a/drivers/video/via/via_modesetting.h
+++ b/drivers/video/via/via_modesetting.h
@@ -33,7 +33,7 @@
#define VIA_PITCH_MAX 0x3FF8
-struct display_timing {
+struct via_display_timing {
u16 hor_total;
u16 hor_addr;
u16 hor_blank_start;
@@ -49,8 +49,8 @@ struct display_timing {
};
-void via_set_primary_timing(const struct display_timing *timing);
-void via_set_secondary_timing(const struct display_timing *timing);
+void via_set_primary_timing(const struct via_display_timing *timing);
+void via_set_secondary_timing(const struct via_display_timing *timing);
void via_set_primary_address(u32 addr);
void via_set_secondary_address(u32 addr);
void via_set_primary_pitch(u32 pitch);
diff --git a/drivers/video/videomode.c b/drivers/video/videomode.c
new file mode 100644
index 000000000000..21c47a202afa
--- /dev/null
+++ b/drivers/video/videomode.c
@@ -0,0 +1,39 @@
+/*
+ * generic display timing functions
+ *
+ * Copyright (c) 2012 Steffen Trumtrar <s.trumtrar@pengutronix.de>, Pengutronix
+ *
+ * This file is released under the GPLv2
+ */
+
+#include <linux/errno.h>
+#include <linux/export.h>
+#include <video/display_timing.h>
+#include <video/videomode.h>
+
+int videomode_from_timing(const struct display_timings *disp,
+ struct videomode *vm, unsigned int index)
+{
+ struct display_timing *dt;
+
+ dt = display_timings_get(disp, index);
+ if (!dt)
+ return -EINVAL;
+
+ vm->pixelclock = display_timing_get_value(&dt->pixelclock, TE_TYP);
+ vm->hactive = display_timing_get_value(&dt->hactive, TE_TYP);
+ vm->hfront_porch = display_timing_get_value(&dt->hfront_porch, TE_TYP);
+ vm->hback_porch = display_timing_get_value(&dt->hback_porch, TE_TYP);
+ vm->hsync_len = display_timing_get_value(&dt->hsync_len, TE_TYP);
+
+ vm->vactive = display_timing_get_value(&dt->vactive, TE_TYP);
+ vm->vfront_porch = display_timing_get_value(&dt->vfront_porch, TE_TYP);
+ vm->vback_porch = display_timing_get_value(&dt->vback_porch, TE_TYP);
+ vm->vsync_len = display_timing_get_value(&dt->vsync_len, TE_TYP);
+
+ vm->dmt_flags = dt->dmt_flags;
+ vm->data_flags = dt->data_flags;
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(videomode_from_timing);
diff --git a/drivers/virtio/virtio_balloon.c b/drivers/virtio/virtio_balloon.c
index 797e1c79a104..8dab163c5ef0 100644
--- a/drivers/virtio/virtio_balloon.c
+++ b/drivers/virtio/virtio_balloon.c
@@ -560,18 +560,7 @@ static struct virtio_driver virtio_balloon_driver = {
#endif
};
-static int __init init(void)
-{
- return register_virtio_driver(&virtio_balloon_driver);
-}
-
-static void __exit fini(void)
-{
- unregister_virtio_driver(&virtio_balloon_driver);
-}
-module_init(init);
-module_exit(fini);
-
+module_virtio_driver(virtio_balloon_driver);
MODULE_DEVICE_TABLE(virtio, id_table);
MODULE_DESCRIPTION("Virtio balloon driver");
MODULE_LICENSE("GPL");
diff --git a/drivers/virtio/virtio_mmio.c b/drivers/virtio/virtio_mmio.c
index 31f966f4d27f..1ba0d6831015 100644
--- a/drivers/virtio/virtio_mmio.c
+++ b/drivers/virtio/virtio_mmio.c
@@ -75,7 +75,7 @@
*
* 0x050 W QueueNotify Queue notifier
* 0x060 R InterruptStatus Interrupt status register
- * 0x060 W InterruptACK Interrupt acknowledge register
+ * 0x064 W InterruptACK Interrupt acknowledge register
* 0x070 RW Status Device status register
*
* 0x100+ RW Device-specific configuration space
@@ -423,7 +423,7 @@ static const char *vm_bus_name(struct virtio_device *vdev)
return vm_dev->pdev->name;
}
-static struct virtio_config_ops virtio_mmio_config_ops = {
+static const struct virtio_config_ops virtio_mmio_config_ops = {
.get = vm_get,
.set = vm_set,
.get_status = vm_get_status,
diff --git a/drivers/virtio/virtio_pci.c b/drivers/virtio/virtio_pci.c
index 0c142892c105..a7ce73029f59 100644
--- a/drivers/virtio/virtio_pci.c
+++ b/drivers/virtio/virtio_pci.c
@@ -91,9 +91,9 @@ struct virtio_pci_vq_info
};
/* Qumranet donated their vendor ID for devices 0x1000 thru 0x10FF. */
-static struct pci_device_id virtio_pci_id_table[] = {
- { 0x1af4, PCI_ANY_ID, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 },
- { 0 },
+static DEFINE_PCI_DEVICE_TABLE(virtio_pci_id_table) = {
+ { PCI_DEVICE(0x1af4, PCI_ANY_ID) },
+ { 0 }
};
MODULE_DEVICE_TABLE(pci, virtio_pci_id_table);
@@ -652,7 +652,7 @@ static int vp_set_vq_affinity(struct virtqueue *vq, int cpu)
return 0;
}
-static struct virtio_config_ops virtio_pci_config_ops = {
+static const struct virtio_config_ops virtio_pci_config_ops = {
.get = vp_get,
.set = vp_set,
.get_status = vp_get_status,
diff --git a/drivers/vme/vme.c b/drivers/vme/vme.c
index 95a9f71d793e..5e6c7d74e19f 100644
--- a/drivers/vme/vme.c
+++ b/drivers/vme/vme.c
@@ -1376,6 +1376,7 @@ static int __vme_register_driver_bus(struct vme_driver *drv,
return 0;
err_reg:
+ put_device(&vdev->dev);
kfree(vdev);
err_devalloc:
list_for_each_entry_safe(vdev, tmp, &drv->devices, drv_list) {
diff --git a/drivers/w1/masters/ds1wm.c b/drivers/w1/masters/ds1wm.c
index 7c294f4dc0ed..96cab6ac2b4e 100644
--- a/drivers/w1/masters/ds1wm.c
+++ b/drivers/w1/masters/ds1wm.c
@@ -13,6 +13,7 @@
#include <linux/module.h>
#include <linux/interrupt.h>
+#include <linux/io.h>
#include <linux/irq.h>
#include <linux/pm.h>
#include <linux/platform_device.h>
@@ -459,43 +460,34 @@ static int ds1wm_probe(struct platform_device *pdev)
if (!pdev)
return -ENODEV;
- ds1wm_data = kzalloc(sizeof(*ds1wm_data), GFP_KERNEL);
+ ds1wm_data = devm_kzalloc(&pdev->dev, sizeof(*ds1wm_data), GFP_KERNEL);
if (!ds1wm_data)
return -ENOMEM;
platform_set_drvdata(pdev, ds1wm_data);
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- if (!res) {
- ret = -ENXIO;
- goto err0;
- }
- ds1wm_data->map = ioremap(res->start, resource_size(res));
- if (!ds1wm_data->map) {
- ret = -ENOMEM;
- goto err0;
- }
+ if (!res)
+ return -ENXIO;
+ ds1wm_data->map = devm_ioremap(&pdev->dev, res->start,
+ resource_size(res));
+ if (!ds1wm_data->map)
+ return -ENOMEM;
/* calculate bus shift from mem resource */
ds1wm_data->bus_shift = resource_size(res) >> 3;
ds1wm_data->pdev = pdev;
ds1wm_data->cell = mfd_get_cell(pdev);
- if (!ds1wm_data->cell) {
- ret = -ENODEV;
- goto err1;
- }
+ if (!ds1wm_data->cell)
+ return -ENODEV;
plat = pdev->dev.platform_data;
- if (!plat) {
- ret = -ENODEV;
- goto err1;
- }
+ if (!plat)
+ return -ENODEV;
res = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
- if (!res) {
- ret = -ENXIO;
- goto err1;
- }
+ if (!res)
+ return -ENXIO;
ds1wm_data->irq = res->start;
ds1wm_data->int_en_reg_none = (plat->active_high ? DS1WM_INTEN_IAS : 0);
ds1wm_data->reset_recover_delay = plat->reset_recover_delay;
@@ -505,10 +497,10 @@ static int ds1wm_probe(struct platform_device *pdev)
if (res->flags & IORESOURCE_IRQ_LOWEDGE)
irq_set_irq_type(ds1wm_data->irq, IRQ_TYPE_EDGE_FALLING);
- ret = request_irq(ds1wm_data->irq, ds1wm_isr,
+ ret = devm_request_irq(&pdev->dev, ds1wm_data->irq, ds1wm_isr,
IRQF_DISABLED | IRQF_SHARED, "ds1wm", ds1wm_data);
if (ret)
- goto err1;
+ return ret;
ds1wm_up(ds1wm_data);
@@ -516,17 +508,12 @@ static int ds1wm_probe(struct platform_device *pdev)
ret = w1_add_master_device(&ds1wm_master);
if (ret)
- goto err2;
+ goto err;
return 0;
-err2:
+err:
ds1wm_down(ds1wm_data);
- free_irq(ds1wm_data->irq, ds1wm_data);
-err1:
- iounmap(ds1wm_data->map);
-err0:
- kfree(ds1wm_data);
return ret;
}
@@ -560,9 +547,6 @@ static int ds1wm_remove(struct platform_device *pdev)
w1_remove_master_device(&ds1wm_master);
ds1wm_down(ds1wm_data);
- free_irq(ds1wm_data->irq, ds1wm_data);
- iounmap(ds1wm_data->map);
- kfree(ds1wm_data);
return 0;
}
diff --git a/drivers/w1/masters/ds2482.c b/drivers/w1/masters/ds2482.c
index 6429b9e9fb82..e033491fe308 100644
--- a/drivers/w1/masters/ds2482.c
+++ b/drivers/w1/masters/ds2482.c
@@ -51,10 +51,10 @@
* The top 4 bits always read 0.
* To write, the top nibble must be the 1's compl. of the low nibble.
*/
-#define DS2482_REG_CFG_1WS 0x08
-#define DS2482_REG_CFG_SPU 0x04
-#define DS2482_REG_CFG_PPM 0x02
-#define DS2482_REG_CFG_APU 0x01
+#define DS2482_REG_CFG_1WS 0x08 /* 1-wire speed */
+#define DS2482_REG_CFG_SPU 0x04 /* strong pull-up */
+#define DS2482_REG_CFG_PPM 0x02 /* presence pulse masking */
+#define DS2482_REG_CFG_APU 0x01 /* active pull-up */
/**
@@ -132,6 +132,17 @@ struct ds2482_data {
/**
+ * Helper to calculate values for configuration register
+ * @param conf the raw config value
+ * @return the value w/ complements that can be written to register
+ */
+static inline u8 ds2482_calculate_config(u8 conf)
+{
+ return conf | ((~conf & 0x0f) << 4);
+}
+
+
+/**
* Sets the read pointer.
* @param pdev The ds2482 client pointer
* @param read_ptr see DS2482_PTR_CODE_xxx above
@@ -399,7 +410,7 @@ static u8 ds2482_w1_reset_bus(void *data)
/* If the chip did reset since detect, re-config it */
if (err & DS2482_REG_STS_RST)
ds2482_send_cmd_data(pdev, DS2482_CMD_WRITE_CONFIG,
- 0xF0);
+ ds2482_calculate_config(0x00));
}
mutex_unlock(&pdev->access_lock);
@@ -407,6 +418,32 @@ static u8 ds2482_w1_reset_bus(void *data)
return retval;
}
+static u8 ds2482_w1_set_pullup(void *data, int delay)
+{
+ struct ds2482_w1_chan *pchan = data;
+ struct ds2482_data *pdev = pchan->pdev;
+ u8 retval = 1;
+
+ /* if delay is non-zero activate the pullup,
+ * the strong pullup will be automatically deactivated
+ * by the master, so do not explicitly deactive it
+ */
+ if (delay) {
+ /* both waits are crucial, otherwise devices might not be
+ * powered long enough, causing e.g. a w1_therm sensor to
+ * provide wrong conversion results
+ */
+ ds2482_wait_1wire_idle(pdev);
+ /* note: it seems like both SPU and APU have to be set! */
+ retval = ds2482_send_cmd_data(pdev, DS2482_CMD_WRITE_CONFIG,
+ ds2482_calculate_config(DS2482_REG_CFG_SPU |
+ DS2482_REG_CFG_APU));
+ ds2482_wait_1wire_idle(pdev);
+ }
+
+ return retval;
+}
+
static int ds2482_probe(struct i2c_client *client,
const struct i2c_device_id *id)
@@ -452,7 +489,8 @@ static int ds2482_probe(struct i2c_client *client,
data->w1_count = 8;
/* Set all config items to 0 (off) */
- ds2482_send_cmd_data(data, DS2482_CMD_WRITE_CONFIG, 0xF0);
+ ds2482_send_cmd_data(data, DS2482_CMD_WRITE_CONFIG,
+ ds2482_calculate_config(0x00));
mutex_init(&data->access_lock);
@@ -468,6 +506,7 @@ static int ds2482_probe(struct i2c_client *client,
data->w1_ch[idx].w1_bm.touch_bit = ds2482_w1_touch_bit;
data->w1_ch[idx].w1_bm.triplet = ds2482_w1_triplet;
data->w1_ch[idx].w1_bm.reset_bus = ds2482_w1_reset_bus;
+ data->w1_ch[idx].w1_bm.set_pullup = ds2482_w1_set_pullup;
err = w1_add_master_device(&data->w1_ch[idx].w1_bm);
if (err) {
diff --git a/drivers/w1/masters/mxc_w1.c b/drivers/w1/masters/mxc_w1.c
index 708a25fc9961..950d354d50e2 100644
--- a/drivers/w1/masters/mxc_w1.c
+++ b/drivers/w1/masters/mxc_w1.c
@@ -109,34 +109,21 @@ static int mxc_w1_probe(struct platform_device *pdev)
struct resource *res;
int err = 0;
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- if (!res)
- return -ENODEV;
-
- mdev = kzalloc(sizeof(struct mxc_w1_device), GFP_KERNEL);
+ mdev = devm_kzalloc(&pdev->dev, sizeof(struct mxc_w1_device),
+ GFP_KERNEL);
if (!mdev)
return -ENOMEM;
- mdev->clk = clk_get(&pdev->dev, NULL);
- if (IS_ERR(mdev->clk)) {
- err = PTR_ERR(mdev->clk);
- goto failed_clk;
- }
+ mdev->clk = devm_clk_get(&pdev->dev, NULL);
+ if (IS_ERR(mdev->clk))
+ return PTR_ERR(mdev->clk);
mdev->clkdiv = (clk_get_rate(mdev->clk) / 1000000) - 1;
- res = request_mem_region(res->start, resource_size(res),
- "mxc_w1");
- if (!res) {
- err = -EBUSY;
- goto failed_req;
- }
-
- mdev->regs = ioremap(res->start, resource_size(res));
- if (!mdev->regs) {
- dev_err(&pdev->dev, "Cannot map mxc_w1 registers\n");
- goto failed_ioremap;
- }
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ mdev->regs = devm_request_and_ioremap(&pdev->dev, res);
+ if (!mdev->regs)
+ return -EBUSY;
clk_prepare_enable(mdev->clk);
__raw_writeb(mdev->clkdiv, mdev->regs + MXC_W1_TIME_DIVIDER);
@@ -148,20 +135,10 @@ static int mxc_w1_probe(struct platform_device *pdev)
err = w1_add_master_device(&mdev->bus_master);
if (err)
- goto failed_add;
+ return err;
platform_set_drvdata(pdev, mdev);
return 0;
-
-failed_add:
- iounmap(mdev->regs);
-failed_ioremap:
- release_mem_region(res->start, resource_size(res));
-failed_req:
- clk_put(mdev->clk);
-failed_clk:
- kfree(mdev);
- return err;
}
/*
@@ -170,25 +147,26 @@ failed_clk:
static int mxc_w1_remove(struct platform_device *pdev)
{
struct mxc_w1_device *mdev = platform_get_drvdata(pdev);
- struct resource *res;
-
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
w1_remove_master_device(&mdev->bus_master);
- iounmap(mdev->regs);
- release_mem_region(res->start, resource_size(res));
clk_disable_unprepare(mdev->clk);
- clk_put(mdev->clk);
platform_set_drvdata(pdev, NULL);
return 0;
}
+static struct of_device_id mxc_w1_dt_ids[] = {
+ { .compatible = "fsl,imx21-owire" },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, mxc_w1_dt_ids);
+
static struct platform_driver mxc_w1_driver = {
.driver = {
- .name = "mxc_w1",
+ .name = "mxc_w1",
+ .of_match_table = mxc_w1_dt_ids,
},
.probe = mxc_w1_probe,
.remove = mxc_w1_remove,
diff --git a/drivers/w1/masters/w1-gpio.c b/drivers/w1/masters/w1-gpio.c
index 85b363a5bd0f..d39dfa4cc235 100644
--- a/drivers/w1/masters/w1-gpio.c
+++ b/drivers/w1/masters/w1-gpio.c
@@ -72,7 +72,7 @@ static int w1_gpio_probe_dt(struct platform_device *pdev)
return 0;
}
-static int __init w1_gpio_probe(struct platform_device *pdev)
+static int w1_gpio_probe(struct platform_device *pdev)
{
struct w1_bus_master *master;
struct w1_gpio_platform_data *pdata;
diff --git a/drivers/w1/slaves/Kconfig b/drivers/w1/slaves/Kconfig
index 67526690acbc..762561fbabbf 100644
--- a/drivers/w1/slaves/Kconfig
+++ b/drivers/w1/slaves/Kconfig
@@ -17,11 +17,16 @@ config W1_SLAVE_SMEM
simple 64bit memory rom(ds2401/ds2411/ds1990*) to your wire.
config W1_SLAVE_DS2408
- tristate "8-Channel Addressable Switch (IO Expander) 0x29 family support (DS2408)"
- help
- Say Y here if you want to use a 1-wire
+ tristate "8-Channel Addressable Switch (IO Expander) 0x29 family support (DS2408)"
+ help
+ Say Y here if you want to use a 1-wire
+ DS2408 8-Channel Addressable Switch device support
- DS2408 8-Channel Addressable Switch device support
+config W1_SLAVE_DS2413
+ tristate "Dual Channel Addressable Switch 0x3a family support (DS2413)"
+ help
+ Say Y here if you want to use a 1-wire
+ DS2413 Dual Channel Addressable Switch device support
config W1_SLAVE_DS2423
tristate "Counter 1-wire device (DS2423)"
diff --git a/drivers/w1/slaves/Makefile b/drivers/w1/slaves/Makefile
index 05188f6aab5a..06529f3157ab 100644
--- a/drivers/w1/slaves/Makefile
+++ b/drivers/w1/slaves/Makefile
@@ -4,7 +4,8 @@
obj-$(CONFIG_W1_SLAVE_THERM) += w1_therm.o
obj-$(CONFIG_W1_SLAVE_SMEM) += w1_smem.o
-obj-$(CONFIG_W1_SLAVE_DS2408) += w1_ds2408.o
+obj-$(CONFIG_W1_SLAVE_DS2408) += w1_ds2408.o
+obj-$(CONFIG_W1_SLAVE_DS2413) += w1_ds2413.o
obj-$(CONFIG_W1_SLAVE_DS2423) += w1_ds2423.o
obj-$(CONFIG_W1_SLAVE_DS2431) += w1_ds2431.o
obj-$(CONFIG_W1_SLAVE_DS2433) += w1_ds2433.o
diff --git a/drivers/w1/slaves/w1_ds2413.c b/drivers/w1/slaves/w1_ds2413.c
new file mode 100644
index 000000000000..829786252c6b
--- /dev/null
+++ b/drivers/w1/slaves/w1_ds2413.c
@@ -0,0 +1,177 @@
+/*
+ * w1_ds2413.c - w1 family 3a (DS2413) driver
+ * based on w1_ds2408.c by Jean-Francois Dagenais <dagenaisj@sonatest.com>
+ *
+ * Copyright (c) 2013 Mariusz Bialonczyk <manio@skyboo.net>
+ *
+ * This source code is licensed under the GNU General Public License,
+ * Version 2. See the file COPYING for more details.
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/moduleparam.h>
+#include <linux/device.h>
+#include <linux/types.h>
+#include <linux/delay.h>
+#include <linux/slab.h>
+
+#include "../w1.h"
+#include "../w1_int.h"
+#include "../w1_family.h"
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Mariusz Bialonczyk <manio@skyboo.net>");
+MODULE_DESCRIPTION("w1 family 3a driver for DS2413 2 Pin IO");
+
+#define W1_F3A_RETRIES 3
+#define W1_F3A_FUNC_PIO_ACCESS_READ 0xF5
+#define W1_F3A_FUNC_PIO_ACCESS_WRITE 0x5A
+#define W1_F3A_SUCCESS_CONFIRM_BYTE 0xAA
+
+static ssize_t w1_f3a_read_state(
+ struct file *filp, struct kobject *kobj,
+ struct bin_attribute *bin_attr,
+ char *buf, loff_t off, size_t count)
+{
+ struct w1_slave *sl = kobj_to_w1_slave(kobj);
+ dev_dbg(&sl->dev,
+ "Reading %s kobj: %p, off: %0#10x, count: %zu, buff addr: %p",
+ bin_attr->attr.name, kobj, (unsigned int)off, count, buf);
+
+ if (off != 0)
+ return 0;
+ if (!buf)
+ return -EINVAL;
+
+ mutex_lock(&sl->master->bus_mutex);
+ dev_dbg(&sl->dev, "mutex locked");
+
+ if (w1_reset_select_slave(sl)) {
+ mutex_unlock(&sl->master->bus_mutex);
+ return -EIO;
+ }
+
+ w1_write_8(sl->master, W1_F3A_FUNC_PIO_ACCESS_READ);
+ *buf = w1_read_8(sl->master);
+
+ mutex_unlock(&sl->master->bus_mutex);
+ dev_dbg(&sl->dev, "mutex unlocked");
+
+ /* check for correct complement */
+ if ((*buf & 0x0F) != ((~*buf >> 4) & 0x0F))
+ return -EIO;
+ else
+ return 1;
+}
+
+static ssize_t w1_f3a_write_output(
+ struct file *filp, struct kobject *kobj,
+ struct bin_attribute *bin_attr,
+ char *buf, loff_t off, size_t count)
+{
+ struct w1_slave *sl = kobj_to_w1_slave(kobj);
+ u8 w1_buf[3];
+ unsigned int retries = W1_F3A_RETRIES;
+
+ if (count != 1 || off != 0)
+ return -EFAULT;
+
+ dev_dbg(&sl->dev, "locking mutex for write_output");
+ mutex_lock(&sl->master->bus_mutex);
+ dev_dbg(&sl->dev, "mutex locked");
+
+ if (w1_reset_select_slave(sl))
+ goto error;
+
+ /* according to the DS2413 datasheet the most significant 6 bits
+ should be set to "1"s, so do it now */
+ *buf = *buf | 0xFC;
+
+ while (retries--) {
+ w1_buf[0] = W1_F3A_FUNC_PIO_ACCESS_WRITE;
+ w1_buf[1] = *buf;
+ w1_buf[2] = ~(*buf);
+ w1_write_block(sl->master, w1_buf, 3);
+
+ if (w1_read_8(sl->master) == W1_F3A_SUCCESS_CONFIRM_BYTE) {
+ mutex_unlock(&sl->master->bus_mutex);
+ dev_dbg(&sl->dev, "mutex unlocked, retries:%d", retries);
+ return 1;
+ }
+ if (w1_reset_resume_command(sl->master))
+ goto error;
+ }
+
+error:
+ mutex_unlock(&sl->master->bus_mutex);
+ dev_dbg(&sl->dev, "mutex unlocked in error, retries:%d", retries);
+ return -EIO;
+}
+
+#define NB_SYSFS_BIN_FILES 2
+static struct bin_attribute w1_f3a_sysfs_bin_files[NB_SYSFS_BIN_FILES] = {
+ {
+ .attr = {
+ .name = "state",
+ .mode = S_IRUGO,
+ },
+ .size = 1,
+ .read = w1_f3a_read_state,
+ },
+ {
+ .attr = {
+ .name = "output",
+ .mode = S_IRUGO | S_IWUSR | S_IWGRP,
+ },
+ .size = 1,
+ .write = w1_f3a_write_output,
+ }
+};
+
+static int w1_f3a_add_slave(struct w1_slave *sl)
+{
+ int err = 0;
+ int i;
+
+ for (i = 0; i < NB_SYSFS_BIN_FILES && !err; ++i)
+ err = sysfs_create_bin_file(
+ &sl->dev.kobj,
+ &(w1_f3a_sysfs_bin_files[i]));
+ if (err)
+ while (--i >= 0)
+ sysfs_remove_bin_file(&sl->dev.kobj,
+ &(w1_f3a_sysfs_bin_files[i]));
+ return err;
+}
+
+static void w1_f3a_remove_slave(struct w1_slave *sl)
+{
+ int i;
+ for (i = NB_SYSFS_BIN_FILES - 1; i >= 0; --i)
+ sysfs_remove_bin_file(&sl->dev.kobj,
+ &(w1_f3a_sysfs_bin_files[i]));
+}
+
+static struct w1_family_ops w1_f3a_fops = {
+ .add_slave = w1_f3a_add_slave,
+ .remove_slave = w1_f3a_remove_slave,
+};
+
+static struct w1_family w1_family_3a = {
+ .fid = W1_FAMILY_DS2413,
+ .fops = &w1_f3a_fops,
+};
+
+static int __init w1_f3a_init(void)
+{
+ return w1_register_family(&w1_family_3a);
+}
+
+static void __exit w1_f3a_exit(void)
+{
+ w1_unregister_family(&w1_family_3a);
+}
+
+module_init(w1_f3a_init);
+module_exit(w1_f3a_exit);
diff --git a/drivers/w1/slaves/w1_therm.c b/drivers/w1/slaves/w1_therm.c
index 92d08e7fcba2..c1a702f8c803 100644
--- a/drivers/w1/slaves/w1_therm.c
+++ b/drivers/w1/slaves/w1_therm.c
@@ -41,14 +41,18 @@ MODULE_DESCRIPTION("Driver for 1-wire Dallas network protocol, temperature famil
* If it was disabled a parasite powered device might not get the require
* current to do a temperature conversion. If it is enabled parasite powered
* devices have a better chance of getting the current required.
+ * In case the parasite power-detection is not working (seems to be the case
+ * for some DS18S20) the strong pullup can also be forced, regardless of the
+ * power state of the devices.
+ *
+ * Summary of options:
+ * - strong_pullup = 0 Disable strong pullup completely
+ * - strong_pullup = 1 Enable automatic strong pullup detection
+ * - strong_pullup = 2 Force strong pullup
*/
static int w1_strong_pullup = 1;
module_param_named(strong_pullup, w1_strong_pullup, int, 0);
-static u8 bad_roms[][9] = {
- {0xaa, 0x00, 0x4b, 0x46, 0xff, 0xff, 0x0c, 0x10, 0x87},
- {}
- };
static ssize_t w1_therm_read(struct device *device,
struct device_attribute *attr, char *buf);
@@ -168,16 +172,6 @@ static inline int w1_convert_temp(u8 rom[9], u8 fid)
return 0;
}
-static int w1_therm_check_rom(u8 rom[9])
-{
- int i;
-
- for (i=0; i<sizeof(bad_roms)/9; ++i)
- if (!memcmp(bad_roms[i], rom, 9))
- return 1;
-
- return 0;
-}
static ssize_t w1_therm_read(struct device *device,
struct device_attribute *attr, char *buf)
@@ -194,10 +188,11 @@ static ssize_t w1_therm_read(struct device *device,
memset(rom, 0, sizeof(rom));
- verdict = 0;
- crc = 0;
-
while (max_trying--) {
+
+ verdict = 0;
+ crc = 0;
+
if (!w1_reset_select_slave(sl)) {
int count = 0;
unsigned int tm = 750;
@@ -210,7 +205,8 @@ static ssize_t w1_therm_read(struct device *device,
continue;
/* 750ms strong pullup (or delay) after the convert */
- if (!external_power && w1_strong_pullup)
+ if (w1_strong_pullup == 2 ||
+ (!external_power && w1_strong_pullup))
w1_next_pullup(dev, tm);
w1_write_8(dev, W1_CONVERT_TEMP);
@@ -249,7 +245,7 @@ static ssize_t w1_therm_read(struct device *device,
}
}
- if (!w1_therm_check_rom(rom))
+ if (verdict)
break;
}
@@ -260,7 +256,7 @@ static ssize_t w1_therm_read(struct device *device,
if (verdict)
memcpy(sl->rom, rom, sizeof(sl->rom));
else
- dev_warn(device, "18S20 doesn't respond to CONVERT_TEMP.\n");
+ dev_warn(device, "Read failed CRC check\n");
for (i = 0; i < 9; ++i)
c -= snprintf(buf + PAGE_SIZE - c, c, "%02x ", sl->rom[i]);
diff --git a/drivers/w1/w1_family.h b/drivers/w1/w1_family.h
index a1f0ce151d53..625dd08f775f 100644
--- a/drivers/w1/w1_family.h
+++ b/drivers/w1/w1_family.h
@@ -39,6 +39,7 @@
#define W1_EEPROM_DS2431 0x2D
#define W1_FAMILY_DS2760 0x30
#define W1_FAMILY_DS2780 0x32
+#define W1_FAMILY_DS2413 0x3A
#define W1_THERM_DS1825 0x3B
#define W1_FAMILY_DS2781 0x3D
#define W1_THERM_DS28EA00 0x42
diff --git a/drivers/watchdog/Kconfig b/drivers/watchdog/Kconfig
index 7f809fd4a57f..9fcc70c11cea 100644
--- a/drivers/watchdog/Kconfig
+++ b/drivers/watchdog/Kconfig
@@ -79,6 +79,7 @@ config DA9052_WATCHDOG
config DA9055_WATCHDOG
tristate "Dialog Semiconductor DA9055 Watchdog"
depends on MFD_DA9055
+ select WATCHDOG_CORE
help
If you say yes here you get support for watchdog on the Dialog
Semiconductor DA9055 PMIC.
@@ -108,7 +109,7 @@ config WM8350_WATCHDOG
config ARM_SP805_WATCHDOG
tristate "ARM SP805 Watchdog"
- depends on ARM_AMBA
+ depends on ARM && ARM_AMBA
select WATCHDOG_CORE
help
ARM Primecell SP805 Watchdog timer. This will reboot your system when
@@ -116,7 +117,7 @@ config ARM_SP805_WATCHDOG
config AT91RM9200_WATCHDOG
tristate "AT91RM9200 watchdog"
- depends on ARCH_AT91RM9200
+ depends on ARCH_AT91
help
Watchdog timer embedded into AT91RM9200 chips. This will reboot your
system when the timeout is reached.
@@ -124,6 +125,7 @@ config AT91RM9200_WATCHDOG
config AT91SAM9X_WATCHDOG
tristate "AT91SAM9X / AT91CAP9 watchdog"
depends on ARCH_AT91 && !ARCH_AT91RM9200
+ select WATCHDOG_CORE
help
Watchdog timer embedded into AT91SAM9X and AT91CAP9 chips. This will
reboot your system when the timeout is reached.
@@ -316,14 +318,15 @@ config TWL4030_WATCHDOG
Support for TI TWL4030 watchdog. Say 'Y' here to enable the
watchdog timer support for TWL4030 chips.
-config STMP3XXX_WATCHDOG
- tristate "Freescale STMP3XXX watchdog"
- depends on ARCH_STMP3XXX
+config STMP3XXX_RTC_WATCHDOG
+ tristate "Freescale STMP3XXX & i.MX23/28 watchdog"
+ depends on RTC_DRV_STMP
+ select WATCHDOG_CORE
help
- Say Y here if to include support for the watchdog timer
- for the Sigmatel STMP37XX/378X SoC.
+ Say Y here to include support for the watchdog timer inside
+ the RTC for the STMP37XX/378X or i.MX23/28 SoC.
To compile this driver as a module, choose M here: the
- module will be called stmp3xxx_wdt.
+ module will be called stmp3xxx_rtc_wdt.
config NUC900_WATCHDOG
tristate "Nuvoton NUC900 watchdog"
@@ -364,6 +367,30 @@ config IMX2_WDT
To compile this driver as a module, choose M here: the
module will be called imx2_wdt.
+config UX500_WATCHDOG
+ tristate "ST-Ericsson Ux500 watchdog"
+ depends on MFD_DB8500_PRCMU
+ select WATCHDOG_CORE
+ default y
+ help
+ Say Y here to include Watchdog timer support for the watchdog
+ existing in the prcmu of ST-Ericsson Ux500 series platforms.
+
+ To compile this driver as a module, choose M here: the
+ module will be called ux500_wdt.
+
+config RETU_WATCHDOG
+ tristate "Retu watchdog"
+ depends on MFD_RETU
+ select WATCHDOG_CORE
+ help
+ Retu watchdog driver for Nokia Internet Tablets (770, N800,
+ N810). At least on N800 the watchdog cannot be disabled, so
+ this driver is essential and you should enable it.
+
+ To compile this driver as a module, choose M here: the
+ module will be called retu_wdt.
+
# AVR32 Architecture
config AT32AP700X_WDT
@@ -581,7 +608,7 @@ config IE6XX_WDT
config INTEL_SCU_WATCHDOG
bool "Intel SCU Watchdog for Mobile Platforms"
- depends on X86_MRST
+ depends on X86_INTEL_MID
---help---
Hardware driver for the watchdog time built into the Intel SCU
for Intel Mobile Platforms.
@@ -971,6 +998,7 @@ config ATH79_WDT
config BCM47XX_WDT
tristate "Broadcom BCM47xx Watchdog Timer"
depends on BCM47XX
+ select WATCHDOG_CORE
help
Hardware driver for the Broadcom BCM47xx Watchdog Timer.
@@ -1119,6 +1147,7 @@ config PIKA_WDT
config BOOKE_WDT
tristate "PowerPC Book-E Watchdog Timer"
depends on BOOKE || 4xx
+ select WATCHDOG_CORE
---help---
Watchdog driver for PowerPC Book-E chips, such as the Freescale
MPC85xx SOCs and the IBM PowerPC 440.
diff --git a/drivers/watchdog/Makefile b/drivers/watchdog/Makefile
index 97bbdb3a4648..a300b948f254 100644
--- a/drivers/watchdog/Makefile
+++ b/drivers/watchdog/Makefile
@@ -48,10 +48,12 @@ obj-$(CONFIG_IOP_WATCHDOG) += iop_wdt.o
obj-$(CONFIG_DAVINCI_WATCHDOG) += davinci_wdt.o
obj-$(CONFIG_ORION_WATCHDOG) += orion_wdt.o
obj-$(CONFIG_COH901327_WATCHDOG) += coh901327_wdt.o
-obj-$(CONFIG_STMP3XXX_WATCHDOG) += stmp3xxx_wdt.o
+obj-$(CONFIG_STMP3XXX_RTC_WATCHDOG) += stmp3xxx_rtc_wdt.o
obj-$(CONFIG_NUC900_WATCHDOG) += nuc900_wdt.o
obj-$(CONFIG_TS72XX_WATCHDOG) += ts72xx_wdt.o
obj-$(CONFIG_IMX2_WDT) += imx2_wdt.o
+obj-$(CONFIG_UX500_WATCHDOG) += ux500_wdt.o
+obj-$(CONFIG_RETU_WATCHDOG) += retu_wdt.o
# AVR32 Architecture
obj-$(CONFIG_AT32AP700X_WDT) += at32ap700x_wdt.o
diff --git a/drivers/watchdog/at91rm9200_wdt.c b/drivers/watchdog/at91rm9200_wdt.c
index 89831ed24a4f..1c75260b987c 100644
--- a/drivers/watchdog/at91rm9200_wdt.c
+++ b/drivers/watchdog/at91rm9200_wdt.c
@@ -24,6 +24,8 @@
#include <linux/types.h>
#include <linux/watchdog.h>
#include <linux/uaccess.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
#include <mach/at91_st.h>
#define WDT_DEFAULT_TIME 5 /* seconds */
@@ -252,6 +254,12 @@ static int at91wdt_resume(struct platform_device *pdev)
#define at91wdt_resume NULL
#endif
+static const struct of_device_id at91_wdt_dt_ids[] = {
+ { .compatible = "atmel,at91rm9200-wdt" },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, at91_wdt_dt_ids);
+
static struct platform_driver at91wdt_driver = {
.probe = at91wdt_probe,
.remove = at91wdt_remove,
@@ -261,6 +269,7 @@ static struct platform_driver at91wdt_driver = {
.driver = {
.name = "at91_wdt",
.owner = THIS_MODULE,
+ .of_match_table = of_match_ptr(at91_wdt_dt_ids),
},
};
diff --git a/drivers/watchdog/at91sam9_wdt.c b/drivers/watchdog/at91sam9_wdt.c
index c08933cc565e..be37dde4f864 100644
--- a/drivers/watchdog/at91sam9_wdt.c
+++ b/drivers/watchdog/at91sam9_wdt.c
@@ -18,11 +18,9 @@
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <linux/errno.h>
-#include <linux/fs.h>
#include <linux/init.h>
#include <linux/io.h>
#include <linux/kernel.h>
-#include <linux/miscdevice.h>
#include <linux/module.h>
#include <linux/moduleparam.h>
#include <linux/platform_device.h>
@@ -58,7 +56,7 @@
/* User land timeout */
#define WDT_HEARTBEAT 15
-static int heartbeat = WDT_HEARTBEAT;
+static int heartbeat;
module_param(heartbeat, int, 0);
MODULE_PARM_DESC(heartbeat, "Watchdog heartbeats in seconds. "
"(default = " __MODULE_STRING(WDT_HEARTBEAT) ")");
@@ -68,19 +66,17 @@ module_param(nowayout, bool, 0);
MODULE_PARM_DESC(nowayout, "Watchdog cannot be stopped once started "
"(default=" __MODULE_STRING(WATCHDOG_NOWAYOUT) ")");
+static struct watchdog_device at91_wdt_dev;
static void at91_ping(unsigned long data);
static struct {
void __iomem *base;
unsigned long next_heartbeat; /* the next_heartbeat for the timer */
- unsigned long open;
- char expect_close;
struct timer_list timer; /* The timer that pings the watchdog */
} at91wdt_private;
/* ......................................................................... */
-
/*
* Reload the watchdog timer. (ie, pat the watchdog)
*/
@@ -95,39 +91,37 @@ static inline void at91_wdt_reset(void)
static void at91_ping(unsigned long data)
{
if (time_before(jiffies, at91wdt_private.next_heartbeat) ||
- (!nowayout && !at91wdt_private.open)) {
+ (!watchdog_active(&at91_wdt_dev))) {
at91_wdt_reset();
mod_timer(&at91wdt_private.timer, jiffies + WDT_TIMEOUT);
} else
pr_crit("I will reset your machine !\n");
}
-/*
- * Watchdog device is opened, and watchdog starts running.
- */
-static int at91_wdt_open(struct inode *inode, struct file *file)
+static int at91_wdt_ping(struct watchdog_device *wdd)
{
- if (test_and_set_bit(0, &at91wdt_private.open))
- return -EBUSY;
+ /* calculate when the next userspace timeout will be */
+ at91wdt_private.next_heartbeat = jiffies + wdd->timeout * HZ;
+ return 0;
+}
- at91wdt_private.next_heartbeat = jiffies + heartbeat * HZ;
+static int at91_wdt_start(struct watchdog_device *wdd)
+{
+ /* calculate the next userspace timeout and modify the timer */
+ at91_wdt_ping(wdd);
mod_timer(&at91wdt_private.timer, jiffies + WDT_TIMEOUT);
-
- return nonseekable_open(inode, file);
+ return 0;
}
-/*
- * Close the watchdog device.
- */
-static int at91_wdt_close(struct inode *inode, struct file *file)
+static int at91_wdt_stop(struct watchdog_device *wdd)
{
- clear_bit(0, &at91wdt_private.open);
-
- /* stop internal ping */
- if (!at91wdt_private.expect_close)
- del_timer(&at91wdt_private.timer);
+ /* The watchdog timer hardware can not be stopped... */
+ return 0;
+}
- at91wdt_private.expect_close = 0;
+static int at91_wdt_set_timeout(struct watchdog_device *wdd, unsigned int new_timeout)
+{
+ wdd->timeout = new_timeout;
return 0;
}
@@ -163,96 +157,28 @@ static int at91_wdt_settimeout(unsigned int timeout)
return 0;
}
+/* ......................................................................... */
+
static const struct watchdog_info at91_wdt_info = {
.identity = DRV_NAME,
.options = WDIOF_SETTIMEOUT | WDIOF_KEEPALIVEPING |
WDIOF_MAGICCLOSE,
};
-/*
- * Handle commands from user-space.
- */
-static long at91_wdt_ioctl(struct file *file,
- unsigned int cmd, unsigned long arg)
-{
- void __user *argp = (void __user *)arg;
- int __user *p = argp;
- int new_value;
-
- switch (cmd) {
- case WDIOC_GETSUPPORT:
- return copy_to_user(argp, &at91_wdt_info,
- sizeof(at91_wdt_info)) ? -EFAULT : 0;
-
- case WDIOC_GETSTATUS:
- case WDIOC_GETBOOTSTATUS:
- return put_user(0, p);
-
- case WDIOC_KEEPALIVE:
- at91wdt_private.next_heartbeat = jiffies + heartbeat * HZ;
- return 0;
-
- case WDIOC_SETTIMEOUT:
- if (get_user(new_value, p))
- return -EFAULT;
-
- heartbeat = new_value;
- at91wdt_private.next_heartbeat = jiffies + heartbeat * HZ;
-
- return put_user(new_value, p); /* return current value */
-
- case WDIOC_GETTIMEOUT:
- return put_user(heartbeat, p);
- }
- return -ENOTTY;
-}
-
-/*
- * Pat the watchdog whenever device is written to.
- */
-static ssize_t at91_wdt_write(struct file *file, const char *data, size_t len,
- loff_t *ppos)
-{
- if (!len)
- return 0;
-
- /* Scan for magic character */
- if (!nowayout) {
- size_t i;
-
- at91wdt_private.expect_close = 0;
-
- for (i = 0; i < len; i++) {
- char c;
- if (get_user(c, data + i))
- return -EFAULT;
- if (c == 'V') {
- at91wdt_private.expect_close = 42;
- break;
- }
- }
- }
-
- at91wdt_private.next_heartbeat = jiffies + heartbeat * HZ;
-
- return len;
-}
-
-/* ......................................................................... */
-
-static const struct file_operations at91wdt_fops = {
- .owner = THIS_MODULE,
- .llseek = no_llseek,
- .unlocked_ioctl = at91_wdt_ioctl,
- .open = at91_wdt_open,
- .release = at91_wdt_close,
- .write = at91_wdt_write,
+static const struct watchdog_ops at91_wdt_ops = {
+ .owner = THIS_MODULE,
+ .start = at91_wdt_start,
+ .stop = at91_wdt_stop,
+ .ping = at91_wdt_ping,
+ .set_timeout = at91_wdt_set_timeout,
};
-static struct miscdevice at91wdt_miscdev = {
- .minor = WATCHDOG_MINOR,
- .name = "watchdog",
- .fops = &at91wdt_fops,
+static struct watchdog_device at91_wdt_dev = {
+ .info = &at91_wdt_info,
+ .ops = &at91_wdt_ops,
+ .timeout = WDT_HEARTBEAT,
+ .min_timeout = 1,
+ .max_timeout = 0xFFFF,
};
static int __init at91wdt_probe(struct platform_device *pdev)
@@ -260,10 +186,6 @@ static int __init at91wdt_probe(struct platform_device *pdev)
struct resource *r;
int res;
- if (at91wdt_miscdev.parent)
- return -EBUSY;
- at91wdt_miscdev.parent = &pdev->dev;
-
r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
if (!r)
return -ENODEV;
@@ -273,38 +195,41 @@ static int __init at91wdt_probe(struct platform_device *pdev)
return -ENOMEM;
}
+ at91_wdt_dev.parent = &pdev->dev;
+ watchdog_init_timeout(&at91_wdt_dev, heartbeat, &pdev->dev);
+ watchdog_set_nowayout(&at91_wdt_dev, nowayout);
+
/* Set watchdog */
res = at91_wdt_settimeout(ms_to_ticks(WDT_HW_TIMEOUT * 1000));
if (res)
return res;
- res = misc_register(&at91wdt_miscdev);
+ res = watchdog_register_device(&at91_wdt_dev);
if (res)
return res;
- at91wdt_private.next_heartbeat = jiffies + heartbeat * HZ;
+ at91wdt_private.next_heartbeat = jiffies + at91_wdt_dev.timeout * HZ;
setup_timer(&at91wdt_private.timer, at91_ping, 0);
mod_timer(&at91wdt_private.timer, jiffies + WDT_TIMEOUT);
pr_info("enabled (heartbeat=%d sec, nowayout=%d)\n",
- heartbeat, nowayout);
+ at91_wdt_dev.timeout, nowayout);
return 0;
}
static int __exit at91wdt_remove(struct platform_device *pdev)
{
- int res;
+ watchdog_unregister_device(&at91_wdt_dev);
- res = misc_deregister(&at91wdt_miscdev);
- if (!res)
- at91wdt_miscdev.parent = NULL;
+ pr_warn("I quit now, hardware will probably reboot!\n");
+ del_timer(&at91wdt_private.timer);
- return res;
+ return 0;
}
#if defined(CONFIG_OF)
-static const struct of_device_id at91_wdt_dt_ids[] __initconst = {
+static const struct of_device_id at91_wdt_dt_ids[] = {
{ .compatible = "atmel,at91sam9260-wdt" },
{ /* sentinel */ }
};
@@ -326,4 +251,3 @@ module_platform_driver_probe(at91wdt_driver, at91wdt_probe);
MODULE_AUTHOR("Renaud CERRATO <r.cerrato@til-technologies.fr>");
MODULE_DESCRIPTION("Watchdog driver for Atmel AT91SAM9x processors");
MODULE_LICENSE("GPL");
-MODULE_ALIAS_MISCDEV(WATCHDOG_MINOR);
diff --git a/drivers/watchdog/ath79_wdt.c b/drivers/watchdog/ath79_wdt.c
index 38a999e60c0d..898799074a13 100644
--- a/drivers/watchdog/ath79_wdt.c
+++ b/drivers/watchdog/ath79_wdt.c
@@ -23,6 +23,7 @@
#include <linux/errno.h>
#include <linux/fs.h>
#include <linux/init.h>
+#include <linux/io.h>
#include <linux/kernel.h>
#include <linux/miscdevice.h>
#include <linux/module.h>
@@ -32,14 +33,16 @@
#include <linux/watchdog.h>
#include <linux/clk.h>
#include <linux/err.h>
-
-#include <asm/mach-ath79/ath79.h>
-#include <asm/mach-ath79/ar71xx_regs.h>
+#include <linux/of.h>
+#include <linux/of_platform.h>
#define DRIVER_NAME "ath79-wdt"
#define WDT_TIMEOUT 15 /* seconds */
+#define WDOG_REG_CTRL 0x00
+#define WDOG_REG_TIMER 0x04
+
#define WDOG_CTRL_LAST_RESET BIT(31)
#define WDOG_CTRL_ACTION_MASK 3
#define WDOG_CTRL_ACTION_NONE 0 /* no action */
@@ -66,27 +69,38 @@ static struct clk *wdt_clk;
static unsigned long wdt_freq;
static int boot_status;
static int max_timeout;
+static void __iomem *wdt_base;
+
+static inline void ath79_wdt_wr(unsigned reg, u32 val)
+{
+ iowrite32(val, wdt_base + reg);
+}
+
+static inline u32 ath79_wdt_rr(unsigned reg)
+{
+ return ioread32(wdt_base + reg);
+}
static inline void ath79_wdt_keepalive(void)
{
- ath79_reset_wr(AR71XX_RESET_REG_WDOG, wdt_freq * timeout);
+ ath79_wdt_wr(WDOG_REG_TIMER, wdt_freq * timeout);
/* flush write */
- ath79_reset_rr(AR71XX_RESET_REG_WDOG);
+ ath79_wdt_rr(WDOG_REG_TIMER);
}
static inline void ath79_wdt_enable(void)
{
ath79_wdt_keepalive();
- ath79_reset_wr(AR71XX_RESET_REG_WDOG_CTRL, WDOG_CTRL_ACTION_FCR);
+ ath79_wdt_wr(WDOG_REG_CTRL, WDOG_CTRL_ACTION_FCR);
/* flush write */
- ath79_reset_rr(AR71XX_RESET_REG_WDOG_CTRL);
+ ath79_wdt_rr(WDOG_REG_CTRL);
}
static inline void ath79_wdt_disable(void)
{
- ath79_reset_wr(AR71XX_RESET_REG_WDOG_CTRL, WDOG_CTRL_ACTION_NONE);
+ ath79_wdt_wr(WDOG_REG_CTRL, WDOG_CTRL_ACTION_NONE);
/* flush write */
- ath79_reset_rr(AR71XX_RESET_REG_WDOG_CTRL);
+ ath79_wdt_rr(WDOG_REG_CTRL);
}
static int ath79_wdt_set_timeout(int val)
@@ -226,16 +240,32 @@ static struct miscdevice ath79_wdt_miscdev = {
static int ath79_wdt_probe(struct platform_device *pdev)
{
+ struct resource *res;
u32 ctrl;
int err;
- wdt_clk = clk_get(&pdev->dev, "wdt");
+ if (wdt_base)
+ return -EBUSY;
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!res) {
+ dev_err(&pdev->dev, "no memory resource found\n");
+ return -EINVAL;
+ }
+
+ wdt_base = devm_request_and_ioremap(&pdev->dev, res);
+ if (!wdt_base) {
+ dev_err(&pdev->dev, "unable to remap memory region\n");
+ return -ENOMEM;
+ }
+
+ wdt_clk = devm_clk_get(&pdev->dev, "wdt");
if (IS_ERR(wdt_clk))
return PTR_ERR(wdt_clk);
err = clk_enable(wdt_clk);
if (err)
- goto err_clk_put;
+ return err;
wdt_freq = clk_get_rate(wdt_clk);
if (!wdt_freq) {
@@ -251,7 +281,7 @@ static int ath79_wdt_probe(struct platform_device *pdev)
max_timeout, timeout);
}
- ctrl = ath79_reset_rr(AR71XX_RESET_REG_WDOG_CTRL);
+ ctrl = ath79_wdt_rr(WDOG_REG_CTRL);
boot_status = (ctrl & WDOG_CTRL_LAST_RESET) ? WDIOF_CARDRESET : 0;
err = misc_register(&ath79_wdt_miscdev);
@@ -265,8 +295,6 @@ static int ath79_wdt_probe(struct platform_device *pdev)
err_clk_disable:
clk_disable(wdt_clk);
-err_clk_put:
- clk_put(wdt_clk);
return err;
}
@@ -274,7 +302,6 @@ static int ath79_wdt_remove(struct platform_device *pdev)
{
misc_deregister(&ath79_wdt_miscdev);
clk_disable(wdt_clk);
- clk_put(wdt_clk);
return 0;
}
@@ -283,6 +310,14 @@ static void ath97_wdt_shutdown(struct platform_device *pdev)
ath79_wdt_disable();
}
+#ifdef CONFIG_OF
+static const struct of_device_id ath79_wdt_match[] = {
+ { .compatible = "qca,ar7130-wdt" },
+ {},
+};
+MODULE_DEVICE_TABLE(of, ath79_wdt_match);
+#endif
+
static struct platform_driver ath79_wdt_driver = {
.probe = ath79_wdt_probe,
.remove = ath79_wdt_remove,
@@ -290,6 +325,7 @@ static struct platform_driver ath79_wdt_driver = {
.driver = {
.name = DRIVER_NAME,
.owner = THIS_MODULE,
+ .of_match_table = of_match_ptr(ath79_wdt_match),
},
};
diff --git a/drivers/watchdog/bcm47xx_wdt.c b/drivers/watchdog/bcm47xx_wdt.c
index bc0e91e78e86..b4021a2b459b 100644
--- a/drivers/watchdog/bcm47xx_wdt.c
+++ b/drivers/watchdog/bcm47xx_wdt.c
@@ -3,6 +3,7 @@
*
* Copyright (C) 2008 Aleksandar Radovanovic <biblbroks@sezampro.rs>
* Copyright (C) 2009 Matthieu CASTET <castet.matthieu@free.fr>
+ * Copyright (C) 2012-2013 Hauke Mehrtens <hauke@hauke-m.de>
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
@@ -12,165 +13,143 @@
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+#include <linux/bcm47xx_wdt.h>
#include <linux/bitops.h>
#include <linux/errno.h>
-#include <linux/fs.h>
#include <linux/init.h>
#include <linux/kernel.h>
-#include <linux/miscdevice.h>
#include <linux/module.h>
#include <linux/moduleparam.h>
+#include <linux/platform_device.h>
#include <linux/reboot.h>
#include <linux/types.h>
-#include <linux/uaccess.h>
#include <linux/watchdog.h>
#include <linux/timer.h>
#include <linux/jiffies.h>
-#include <linux/ssb/ssb_embedded.h>
-#include <asm/mach-bcm47xx/bcm47xx.h>
#define DRV_NAME "bcm47xx_wdt"
#define WDT_DEFAULT_TIME 30 /* seconds */
-#define WDT_MAX_TIME 255 /* seconds */
+#define WDT_SOFTTIMER_MAX 255 /* seconds */
+#define WDT_SOFTTIMER_THRESHOLD 60 /* seconds */
-static int wdt_time = WDT_DEFAULT_TIME;
+static int timeout = WDT_DEFAULT_TIME;
static bool nowayout = WATCHDOG_NOWAYOUT;
-module_param(wdt_time, int, 0);
-MODULE_PARM_DESC(wdt_time, "Watchdog time in seconds. (default="
+module_param(timeout, int, 0);
+MODULE_PARM_DESC(timeout, "Watchdog time in seconds. (default="
__MODULE_STRING(WDT_DEFAULT_TIME) ")");
-#ifdef CONFIG_WATCHDOG_NOWAYOUT
module_param(nowayout, bool, 0);
MODULE_PARM_DESC(nowayout,
"Watchdog cannot be stopped once started (default="
__MODULE_STRING(WATCHDOG_NOWAYOUT) ")");
-#endif
-static unsigned long bcm47xx_wdt_busy;
-static char expect_release;
-static struct timer_list wdt_timer;
-static atomic_t ticks;
-
-static inline void bcm47xx_wdt_hw_start(void)
+static inline struct bcm47xx_wdt *bcm47xx_wdt_get(struct watchdog_device *wdd)
{
- /* this is 2,5s on 100Mhz clock and 2s on 133 Mhz */
- switch (bcm47xx_bus_type) {
-#ifdef CONFIG_BCM47XX_SSB
- case BCM47XX_BUS_TYPE_SSB:
- ssb_watchdog_timer_set(&bcm47xx_bus.ssb, 0xfffffff);
- break;
-#endif
-#ifdef CONFIG_BCM47XX_BCMA
- case BCM47XX_BUS_TYPE_BCMA:
- bcma_chipco_watchdog_timer_set(&bcm47xx_bus.bcma.bus.drv_cc,
- 0xfffffff);
- break;
-#endif
- }
+ return container_of(wdd, struct bcm47xx_wdt, wdd);
}
-static inline int bcm47xx_wdt_hw_stop(void)
+static int bcm47xx_wdt_hard_keepalive(struct watchdog_device *wdd)
{
- switch (bcm47xx_bus_type) {
-#ifdef CONFIG_BCM47XX_SSB
- case BCM47XX_BUS_TYPE_SSB:
- return ssb_watchdog_timer_set(&bcm47xx_bus.ssb, 0);
-#endif
-#ifdef CONFIG_BCM47XX_BCMA
- case BCM47XX_BUS_TYPE_BCMA:
- bcma_chipco_watchdog_timer_set(&bcm47xx_bus.bcma.bus.drv_cc, 0);
- return 0;
-#endif
- }
- return -EINVAL;
-}
+ struct bcm47xx_wdt *wdt = bcm47xx_wdt_get(wdd);
-static void bcm47xx_timer_tick(unsigned long unused)
-{
- if (!atomic_dec_and_test(&ticks)) {
- bcm47xx_wdt_hw_start();
- mod_timer(&wdt_timer, jiffies + HZ);
- } else {
- pr_crit("Watchdog will fire soon!!!\n");
- }
+ wdt->timer_set_ms(wdt, wdd->timeout * 1000);
+
+ return 0;
}
-static inline void bcm47xx_wdt_pet(void)
+static int bcm47xx_wdt_hard_start(struct watchdog_device *wdd)
{
- atomic_set(&ticks, wdt_time);
+ return 0;
}
-static void bcm47xx_wdt_start(void)
+static int bcm47xx_wdt_hard_stop(struct watchdog_device *wdd)
{
- bcm47xx_wdt_pet();
- bcm47xx_timer_tick(0);
+ struct bcm47xx_wdt *wdt = bcm47xx_wdt_get(wdd);
+
+ wdt->timer_set(wdt, 0);
+
+ return 0;
}
-static void bcm47xx_wdt_pause(void)
+static int bcm47xx_wdt_hard_set_timeout(struct watchdog_device *wdd,
+ unsigned int new_time)
{
- del_timer_sync(&wdt_timer);
- bcm47xx_wdt_hw_stop();
+ struct bcm47xx_wdt *wdt = bcm47xx_wdt_get(wdd);
+ u32 max_timer = wdt->max_timer_ms;
+
+ if (new_time < 1 || new_time > max_timer / 1000) {
+ pr_warn("timeout value must be 1<=x<=%d, using %d\n",
+ max_timer / 1000, new_time);
+ return -EINVAL;
+ }
+
+ wdd->timeout = new_time;
+ return 0;
}
-static void bcm47xx_wdt_stop(void)
+static struct watchdog_ops bcm47xx_wdt_hard_ops = {
+ .owner = THIS_MODULE,
+ .start = bcm47xx_wdt_hard_start,
+ .stop = bcm47xx_wdt_hard_stop,
+ .ping = bcm47xx_wdt_hard_keepalive,
+ .set_timeout = bcm47xx_wdt_hard_set_timeout,
+};
+
+static void bcm47xx_wdt_soft_timer_tick(unsigned long data)
{
- bcm47xx_wdt_pause();
+ struct bcm47xx_wdt *wdt = (struct bcm47xx_wdt *)data;
+ u32 next_tick = min(wdt->wdd.timeout * 1000, wdt->max_timer_ms);
+
+ if (!atomic_dec_and_test(&wdt->soft_ticks)) {
+ wdt->timer_set_ms(wdt, next_tick);
+ mod_timer(&wdt->soft_timer, jiffies + HZ);
+ } else {
+ pr_crit("Watchdog will fire soon!!!\n");
+ }
}
-static int bcm47xx_wdt_settimeout(int new_time)
+static int bcm47xx_wdt_soft_keepalive(struct watchdog_device *wdd)
{
- if ((new_time <= 0) || (new_time > WDT_MAX_TIME))
- return -EINVAL;
+ struct bcm47xx_wdt *wdt = bcm47xx_wdt_get(wdd);
+
+ atomic_set(&wdt->soft_ticks, wdd->timeout);
- wdt_time = new_time;
return 0;
}
-static int bcm47xx_wdt_open(struct inode *inode, struct file *file)
+static int bcm47xx_wdt_soft_start(struct watchdog_device *wdd)
{
- if (test_and_set_bit(0, &bcm47xx_wdt_busy))
- return -EBUSY;
+ struct bcm47xx_wdt *wdt = bcm47xx_wdt_get(wdd);
+
+ bcm47xx_wdt_soft_keepalive(wdd);
+ bcm47xx_wdt_soft_timer_tick((unsigned long)wdt);
- bcm47xx_wdt_start();
- return nonseekable_open(inode, file);
+ return 0;
}
-static int bcm47xx_wdt_release(struct inode *inode, struct file *file)
+static int bcm47xx_wdt_soft_stop(struct watchdog_device *wdd)
{
- if (expect_release == 42) {
- bcm47xx_wdt_stop();
- } else {
- pr_crit("Unexpected close, not stopping watchdog!\n");
- bcm47xx_wdt_start();
- }
+ struct bcm47xx_wdt *wdt = bcm47xx_wdt_get(wdd);
+
+ del_timer_sync(&wdt->soft_timer);
+ wdt->timer_set(wdt, 0);
- clear_bit(0, &bcm47xx_wdt_busy);
- expect_release = 0;
return 0;
}
-static ssize_t bcm47xx_wdt_write(struct file *file, const char __user *data,
- size_t len, loff_t *ppos)
+static int bcm47xx_wdt_soft_set_timeout(struct watchdog_device *wdd,
+ unsigned int new_time)
{
- if (len) {
- if (!nowayout) {
- size_t i;
-
- expect_release = 0;
-
- for (i = 0; i != len; i++) {
- char c;
- if (get_user(c, data + i))
- return -EFAULT;
- if (c == 'V')
- expect_release = 42;
- }
- }
- bcm47xx_wdt_pet();
+ if (new_time < 1 || new_time > WDT_SOFTTIMER_MAX) {
+ pr_warn("timeout value must be 1<=x<=%d, using %d\n",
+ WDT_SOFTTIMER_MAX, new_time);
+ return -EINVAL;
}
- return len;
+
+ wdd->timeout = new_time;
+ return 0;
}
static const struct watchdog_info bcm47xx_wdt_info = {
@@ -180,130 +159,100 @@ static const struct watchdog_info bcm47xx_wdt_info = {
WDIOF_MAGICCLOSE,
};
-static long bcm47xx_wdt_ioctl(struct file *file,
- unsigned int cmd, unsigned long arg)
-{
- void __user *argp = (void __user *)arg;
- int __user *p = argp;
- int new_value, retval = -EINVAL;
-
- switch (cmd) {
- case WDIOC_GETSUPPORT:
- return copy_to_user(argp, &bcm47xx_wdt_info,
- sizeof(bcm47xx_wdt_info)) ? -EFAULT : 0;
-
- case WDIOC_GETSTATUS:
- case WDIOC_GETBOOTSTATUS:
- return put_user(0, p);
-
- case WDIOC_SETOPTIONS:
- if (get_user(new_value, p))
- return -EFAULT;
-
- if (new_value & WDIOS_DISABLECARD) {
- bcm47xx_wdt_stop();
- retval = 0;
- }
-
- if (new_value & WDIOS_ENABLECARD) {
- bcm47xx_wdt_start();
- retval = 0;
- }
-
- return retval;
-
- case WDIOC_KEEPALIVE:
- bcm47xx_wdt_pet();
- return 0;
-
- case WDIOC_SETTIMEOUT:
- if (get_user(new_value, p))
- return -EFAULT;
-
- if (bcm47xx_wdt_settimeout(new_value))
- return -EINVAL;
-
- bcm47xx_wdt_pet();
-
- case WDIOC_GETTIMEOUT:
- return put_user(wdt_time, p);
-
- default:
- return -ENOTTY;
- }
-}
-
static int bcm47xx_wdt_notify_sys(struct notifier_block *this,
- unsigned long code, void *unused)
+ unsigned long code, void *unused)
{
+ struct bcm47xx_wdt *wdt;
+
+ wdt = container_of(this, struct bcm47xx_wdt, notifier);
if (code == SYS_DOWN || code == SYS_HALT)
- bcm47xx_wdt_stop();
+ wdt->wdd.ops->stop(&wdt->wdd);
return NOTIFY_DONE;
}
-static const struct file_operations bcm47xx_wdt_fops = {
+static struct watchdog_ops bcm47xx_wdt_soft_ops = {
.owner = THIS_MODULE,
- .llseek = no_llseek,
- .unlocked_ioctl = bcm47xx_wdt_ioctl,
- .open = bcm47xx_wdt_open,
- .release = bcm47xx_wdt_release,
- .write = bcm47xx_wdt_write,
-};
-
-static struct miscdevice bcm47xx_wdt_miscdev = {
- .minor = WATCHDOG_MINOR,
- .name = "watchdog",
- .fops = &bcm47xx_wdt_fops,
-};
-
-static struct notifier_block bcm47xx_wdt_notifier = {
- .notifier_call = bcm47xx_wdt_notify_sys,
+ .start = bcm47xx_wdt_soft_start,
+ .stop = bcm47xx_wdt_soft_stop,
+ .ping = bcm47xx_wdt_soft_keepalive,
+ .set_timeout = bcm47xx_wdt_soft_set_timeout,
};
-static int __init bcm47xx_wdt_init(void)
+static int bcm47xx_wdt_probe(struct platform_device *pdev)
{
int ret;
+ bool soft;
+ struct bcm47xx_wdt *wdt = dev_get_platdata(&pdev->dev);
- if (bcm47xx_wdt_hw_stop() < 0)
- return -ENODEV;
+ if (!wdt)
+ return -ENXIO;
- setup_timer(&wdt_timer, bcm47xx_timer_tick, 0L);
+ soft = wdt->max_timer_ms < WDT_SOFTTIMER_THRESHOLD * 1000;
- if (bcm47xx_wdt_settimeout(wdt_time)) {
- bcm47xx_wdt_settimeout(WDT_DEFAULT_TIME);
- pr_info("wdt_time value must be 0 < wdt_time < %d, using %d\n",
- (WDT_MAX_TIME + 1), wdt_time);
+ if (soft) {
+ wdt->wdd.ops = &bcm47xx_wdt_soft_ops;
+ setup_timer(&wdt->soft_timer, bcm47xx_wdt_soft_timer_tick,
+ (long unsigned int)wdt);
+ } else {
+ wdt->wdd.ops = &bcm47xx_wdt_hard_ops;
}
- ret = register_reboot_notifier(&bcm47xx_wdt_notifier);
+ wdt->wdd.info = &bcm47xx_wdt_info;
+ wdt->wdd.timeout = WDT_DEFAULT_TIME;
+ ret = wdt->wdd.ops->set_timeout(&wdt->wdd, timeout);
if (ret)
- return ret;
+ goto err_timer;
+ watchdog_set_nowayout(&wdt->wdd, nowayout);
- ret = misc_register(&bcm47xx_wdt_miscdev);
- if (ret) {
- unregister_reboot_notifier(&bcm47xx_wdt_notifier);
- return ret;
- }
+ wdt->notifier.notifier_call = &bcm47xx_wdt_notify_sys;
+
+ ret = register_reboot_notifier(&wdt->notifier);
+ if (ret)
+ goto err_timer;
- pr_info("BCM47xx Watchdog Timer enabled (%d seconds%s)\n",
- wdt_time, nowayout ? ", nowayout" : "");
+ ret = watchdog_register_device(&wdt->wdd);
+ if (ret)
+ goto err_notifier;
+
+ dev_info(&pdev->dev, "BCM47xx Watchdog Timer enabled (%d seconds%s%s)\n",
+ timeout, nowayout ? ", nowayout" : "",
+ soft ? ", Software Timer" : "");
return 0;
+
+err_notifier:
+ unregister_reboot_notifier(&wdt->notifier);
+err_timer:
+ if (soft)
+ del_timer_sync(&wdt->soft_timer);
+
+ return ret;
}
-static void __exit bcm47xx_wdt_exit(void)
+static int bcm47xx_wdt_remove(struct platform_device *pdev)
{
- if (!nowayout)
- bcm47xx_wdt_stop();
+ struct bcm47xx_wdt *wdt = dev_get_platdata(&pdev->dev);
- misc_deregister(&bcm47xx_wdt_miscdev);
+ if (!wdt)
+ return -ENXIO;
- unregister_reboot_notifier(&bcm47xx_wdt_notifier);
+ watchdog_unregister_device(&wdt->wdd);
+ unregister_reboot_notifier(&wdt->notifier);
+
+ return 0;
}
-module_init(bcm47xx_wdt_init);
-module_exit(bcm47xx_wdt_exit);
+static struct platform_driver bcm47xx_wdt_driver = {
+ .driver = {
+ .owner = THIS_MODULE,
+ .name = "bcm47xx-wdt",
+ },
+ .probe = bcm47xx_wdt_probe,
+ .remove = bcm47xx_wdt_remove,
+};
+
+module_platform_driver(bcm47xx_wdt_driver);
MODULE_AUTHOR("Aleksandar Radovanovic");
+MODULE_AUTHOR("Hauke Mehrtens <hauke@hauke-m.de>");
MODULE_DESCRIPTION("Watchdog driver for Broadcom BCM47xx");
MODULE_LICENSE("GPL");
-MODULE_ALIAS_MISCDEV(WATCHDOG_MINOR);
diff --git a/drivers/watchdog/booke_wdt.c b/drivers/watchdog/booke_wdt.c
index c0bc92d8e438..a8dbceb32914 100644
--- a/drivers/watchdog/booke_wdt.c
+++ b/drivers/watchdog/booke_wdt.c
@@ -15,12 +15,8 @@
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <linux/module.h>
-#include <linux/fs.h>
#include <linux/smp.h>
-#include <linux/miscdevice.h>
-#include <linux/notifier.h>
#include <linux/watchdog.h>
-#include <linux/uaccess.h>
#include <asm/reg_booke.h>
#include <asm/time.h>
@@ -45,7 +41,7 @@ u32 booke_wdt_period = CONFIG_BOOKE_WDT_DEFAULT_TIMEOUT;
#define WDTP_MASK (TCR_WP_MASK)
#endif
-static DEFINE_SPINLOCK(booke_wdt_lock);
+#ifdef CONFIG_PPC_FSL_BOOK3E
/* For the specified period, determine the number of seconds
* corresponding to the reset time. There will be a watchdog
@@ -86,6 +82,24 @@ static unsigned int sec_to_period(unsigned int secs)
return 0;
}
+#define MAX_WDT_TIMEOUT period_to_sec(1)
+
+#else /* CONFIG_PPC_FSL_BOOK3E */
+
+static unsigned long long period_to_sec(unsigned int period)
+{
+ return period;
+}
+
+static unsigned int sec_to_period(unsigned int secs)
+{
+ return secs;
+}
+
+#define MAX_WDT_TIMEOUT 3 /* from Kconfig */
+
+#endif /* !CONFIG_PPC_FSL_BOOK3E */
+
static void __booke_wdt_set(void *data)
{
u32 val;
@@ -107,9 +121,11 @@ static void __booke_wdt_ping(void *data)
mtspr(SPRN_TSR, TSR_ENW|TSR_WIS);
}
-static void booke_wdt_ping(void)
+static int booke_wdt_ping(struct watchdog_device *wdog)
{
on_each_cpu(__booke_wdt_ping, NULL, 0);
+
+ return 0;
}
static void __booke_wdt_enable(void *data)
@@ -146,152 +162,81 @@ static void __booke_wdt_disable(void *data)
}
-static ssize_t booke_wdt_write(struct file *file, const char __user *buf,
- size_t count, loff_t *ppos)
+static void __booke_wdt_start(struct watchdog_device *wdog)
{
- booke_wdt_ping();
- return count;
+ on_each_cpu(__booke_wdt_enable, NULL, 0);
+ pr_debug("watchdog enabled (timeout = %u sec)\n", wdog->timeout);
}
-static struct watchdog_info ident = {
- .options = WDIOF_SETTIMEOUT | WDIOF_KEEPALIVEPING,
- .identity = "PowerPC Book-E Watchdog",
-};
-
-static long booke_wdt_ioctl(struct file *file,
- unsigned int cmd, unsigned long arg)
+static int booke_wdt_start(struct watchdog_device *wdog)
{
- u32 tmp = 0;
- u32 __user *p = (u32 __user *)arg;
-
- switch (cmd) {
- case WDIOC_GETSUPPORT:
- return copy_to_user(p, &ident, sizeof(ident)) ? -EFAULT : 0;
- case WDIOC_GETSTATUS:
- return put_user(0, p);
- case WDIOC_GETBOOTSTATUS:
- /* XXX: something is clearing TSR */
- tmp = mfspr(SPRN_TSR) & TSR_WRS(3);
- /* returns CARDRESET if last reset was caused by the WDT */
- return put_user((tmp ? WDIOF_CARDRESET : 0), p);
- case WDIOC_SETOPTIONS:
- if (get_user(tmp, p))
- return -EFAULT;
- if (tmp == WDIOS_ENABLECARD) {
- booke_wdt_ping();
- break;
- } else
- return -EINVAL;
- return 0;
- case WDIOC_KEEPALIVE:
- booke_wdt_ping();
- return 0;
- case WDIOC_SETTIMEOUT:
- if (get_user(tmp, p))
- return -EFAULT;
-#ifdef CONFIG_PPC_FSL_BOOK3E
- /* period of 1 gives the largest possible timeout */
- if (tmp > period_to_sec(1))
- return -EINVAL;
- booke_wdt_period = sec_to_period(tmp);
-#else
- booke_wdt_period = tmp;
-#endif
- booke_wdt_set();
- /* Fall */
- case WDIOC_GETTIMEOUT:
-#ifdef CONFIG_FSL_BOOKE
- return put_user(period_to_sec(booke_wdt_period), p);
-#else
- return put_user(booke_wdt_period, p);
-#endif
- default:
- return -ENOTTY;
- }
-
- return 0;
-}
-
-/* wdt_is_active stores whether or not the /dev/watchdog device is opened */
-static unsigned long wdt_is_active;
-
-static int booke_wdt_open(struct inode *inode, struct file *file)
-{
- /* /dev/watchdog can only be opened once */
- if (test_and_set_bit(0, &wdt_is_active))
- return -EBUSY;
-
- spin_lock(&booke_wdt_lock);
if (booke_wdt_enabled == 0) {
booke_wdt_enabled = 1;
- on_each_cpu(__booke_wdt_enable, NULL, 0);
- pr_debug("watchdog enabled (timeout = %llu sec)\n",
- period_to_sec(booke_wdt_period));
+ __booke_wdt_start(wdog);
}
- spin_unlock(&booke_wdt_lock);
-
- return nonseekable_open(inode, file);
+ return 0;
}
-static int booke_wdt_release(struct inode *inode, struct file *file)
+static int booke_wdt_stop(struct watchdog_device *wdog)
{
-#ifndef CONFIG_WATCHDOG_NOWAYOUT
- /* Normally, the watchdog is disabled when /dev/watchdog is closed, but
- * if CONFIG_WATCHDOG_NOWAYOUT is defined, then it means that the
- * watchdog should remain enabled. So we disable it only if
- * CONFIG_WATCHDOG_NOWAYOUT is not defined.
- */
on_each_cpu(__booke_wdt_disable, NULL, 0);
booke_wdt_enabled = 0;
pr_debug("watchdog disabled\n");
-#endif
- clear_bit(0, &wdt_is_active);
+ return 0;
+}
+
+static int booke_wdt_set_timeout(struct watchdog_device *wdt_dev,
+ unsigned int timeout)
+{
+ if (timeout > MAX_WDT_TIMEOUT)
+ return -EINVAL;
+ booke_wdt_period = sec_to_period(timeout);
+ wdt_dev->timeout = timeout;
+ booke_wdt_set();
return 0;
}
-static const struct file_operations booke_wdt_fops = {
+static struct watchdog_info booke_wdt_info = {
+ .options = WDIOF_SETTIMEOUT | WDIOF_KEEPALIVEPING,
+ .identity = "PowerPC Book-E Watchdog",
+};
+
+static struct watchdog_ops booke_wdt_ops = {
.owner = THIS_MODULE,
- .llseek = no_llseek,
- .write = booke_wdt_write,
- .unlocked_ioctl = booke_wdt_ioctl,
- .open = booke_wdt_open,
- .release = booke_wdt_release,
+ .start = booke_wdt_start,
+ .stop = booke_wdt_stop,
+ .ping = booke_wdt_ping,
+ .set_timeout = booke_wdt_set_timeout,
};
-static struct miscdevice booke_wdt_miscdev = {
- .minor = WATCHDOG_MINOR,
- .name = "watchdog",
- .fops = &booke_wdt_fops,
+static struct watchdog_device booke_wdt_dev = {
+ .info = &booke_wdt_info,
+ .ops = &booke_wdt_ops,
+ .min_timeout = 1,
+ .max_timeout = 0xFFFF
};
static void __exit booke_wdt_exit(void)
{
- misc_deregister(&booke_wdt_miscdev);
+ watchdog_unregister_device(&booke_wdt_dev);
}
static int __init booke_wdt_init(void)
{
int ret = 0;
+ bool nowayout = WATCHDOG_NOWAYOUT;
pr_info("powerpc book-e watchdog driver loaded\n");
- ident.firmware_version = cur_cpu_spec->pvr_value;
-
- ret = misc_register(&booke_wdt_miscdev);
- if (ret) {
- pr_err("cannot register device (minor=%u, ret=%i)\n",
- WATCHDOG_MINOR, ret);
- return ret;
- }
-
- spin_lock(&booke_wdt_lock);
- if (booke_wdt_enabled == 1) {
- pr_info("watchdog enabled (timeout = %llu sec)\n",
- period_to_sec(booke_wdt_period));
- on_each_cpu(__booke_wdt_enable, NULL, 0);
- }
- spin_unlock(&booke_wdt_lock);
+ booke_wdt_info.firmware_version = cur_cpu_spec->pvr_value;
+ booke_wdt_set_timeout(&booke_wdt_dev,
+ period_to_sec(CONFIG_BOOKE_WDT_DEFAULT_TIMEOUT));
+ watchdog_set_nowayout(&booke_wdt_dev, nowayout);
+ if (booke_wdt_enabled)
+ __booke_wdt_start(&booke_wdt_dev);
+
+ ret = watchdog_register_device(&booke_wdt_dev);
return ret;
}
diff --git a/drivers/watchdog/cpwd.c b/drivers/watchdog/cpwd.c
index 11d55ce5ca81..70387582843f 100644
--- a/drivers/watchdog/cpwd.c
+++ b/drivers/watchdog/cpwd.c
@@ -411,7 +411,7 @@ static long cpwd_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
.identity = DRIVER_NAME,
};
void __user *argp = (void __user *)arg;
- struct inode *inode = file->f_path.dentry->d_inode;
+ struct inode *inode = file_inode(file);
int index = iminor(inode) - WD0_MINOR;
struct cpwd *p = cpwd_device;
int setopt = 0;
@@ -499,7 +499,7 @@ static long cpwd_compat_ioctl(struct file *file, unsigned int cmd,
static ssize_t cpwd_write(struct file *file, const char __user *buf,
size_t count, loff_t *ppos)
{
- struct inode *inode = file->f_path.dentry->d_inode;
+ struct inode *inode = file_inode(file);
struct cpwd *p = cpwd_device;
int index = iminor(inode);
diff --git a/drivers/watchdog/davinci_wdt.c b/drivers/watchdog/davinci_wdt.c
index e8e87246ea6d..7df1fdca9e78 100644
--- a/drivers/watchdog/davinci_wdt.c
+++ b/drivers/watchdog/davinci_wdt.c
@@ -69,7 +69,6 @@ static unsigned long wdt_status;
#define WDT_REGION_INITED 2
#define WDT_DEVICE_INITED 3
-static struct resource *wdt_mem;
static void __iomem *wdt_base;
struct clk *wdt_clk;
@@ -201,10 +200,11 @@ static struct miscdevice davinci_wdt_miscdev = {
static int davinci_wdt_probe(struct platform_device *pdev)
{
- int ret = 0, size;
+ int ret = 0;
struct device *dev = &pdev->dev;
+ struct resource *wdt_mem;
- wdt_clk = clk_get(dev, NULL);
+ wdt_clk = devm_clk_get(dev, NULL);
if (WARN_ON(IS_ERR(wdt_clk)))
return PTR_ERR(wdt_clk);
@@ -221,43 +221,26 @@ static int davinci_wdt_probe(struct platform_device *pdev)
return -ENOENT;
}
- size = resource_size(wdt_mem);
- if (!request_mem_region(wdt_mem->start, size, pdev->name)) {
- dev_err(dev, "failed to get memory region\n");
- return -ENOENT;
- }
-
- wdt_base = ioremap(wdt_mem->start, size);
+ wdt_base = devm_request_and_ioremap(dev, wdt_mem);
if (!wdt_base) {
- dev_err(dev, "failed to map memory region\n");
- release_mem_region(wdt_mem->start, size);
- wdt_mem = NULL;
- return -ENOMEM;
+ dev_err(dev, "ioremap failed\n");
+ return -EADDRNOTAVAIL;
}
ret = misc_register(&davinci_wdt_miscdev);
if (ret < 0) {
dev_err(dev, "cannot register misc device\n");
- release_mem_region(wdt_mem->start, size);
- wdt_mem = NULL;
} else {
set_bit(WDT_DEVICE_INITED, &wdt_status);
}
- iounmap(wdt_base);
return ret;
}
static int davinci_wdt_remove(struct platform_device *pdev)
{
misc_deregister(&davinci_wdt_miscdev);
- if (wdt_mem) {
- release_mem_region(wdt_mem->start, resource_size(wdt_mem));
- wdt_mem = NULL;
- }
-
clk_disable_unprepare(wdt_clk);
- clk_put(wdt_clk);
return 0;
}
diff --git a/drivers/watchdog/gef_wdt.c b/drivers/watchdog/gef_wdt.c
index b9c5b58e59d3..257cfbad21da 100644
--- a/drivers/watchdog/gef_wdt.c
+++ b/drivers/watchdog/gef_wdt.c
@@ -310,6 +310,7 @@ static struct platform_driver gef_wdt_driver = {
.of_match_table = gef_wdt_ids,
},
.probe = gef_wdt_probe,
+ .remove = gef_wdt_remove,
};
static int __init gef_wdt_init(void)
diff --git a/drivers/watchdog/omap_wdt.c b/drivers/watchdog/omap_wdt.c
index b0e541d022e6..af88ffd1068f 100644
--- a/drivers/watchdog/omap_wdt.c
+++ b/drivers/watchdog/omap_wdt.c
@@ -45,6 +45,11 @@
#include "omap_wdt.h"
+static bool nowayout = WATCHDOG_NOWAYOUT;
+module_param(nowayout, bool, 0);
+MODULE_PARM_DESC(nowayout, "Watchdog cannot be stopped once started "
+ "(default=" __MODULE_STRING(WATCHDOG_NOWAYOUT) ")");
+
static unsigned timer_margin;
module_param(timer_margin, uint, 0);
MODULE_PARM_DESC(timer_margin, "initial watchdog timeout (in seconds)");
@@ -201,7 +206,6 @@ static const struct watchdog_ops omap_wdt_ops = {
static int omap_wdt_probe(struct platform_device *pdev)
{
struct omap_wd_timer_platform_data *pdata = pdev->dev.platform_data;
- bool nowayout = WATCHDOG_NOWAYOUT;
struct watchdog_device *omap_wdt;
struct resource *res, *mem;
struct omap_wdt_dev *wdev;
diff --git a/drivers/watchdog/orion_wdt.c b/drivers/watchdog/orion_wdt.c
index 7c18b3bffcf7..da577980d390 100644
--- a/drivers/watchdog/orion_wdt.c
+++ b/drivers/watchdog/orion_wdt.c
@@ -140,6 +140,7 @@ static const struct watchdog_ops orion_wdt_ops = {
static struct watchdog_device orion_wdt = {
.info = &orion_wdt_info,
.ops = &orion_wdt_ops,
+ .min_timeout = 1,
};
static int orion_wdt_probe(struct platform_device *pdev)
@@ -164,12 +165,9 @@ static int orion_wdt_probe(struct platform_device *pdev)
wdt_max_duration = WDT_MAX_CYCLE_COUNT / wdt_tclk;
- if ((heartbeat < 1) || (heartbeat > wdt_max_duration))
- heartbeat = wdt_max_duration;
-
- orion_wdt.timeout = heartbeat;
- orion_wdt.min_timeout = 1;
+ orion_wdt.timeout = wdt_max_duration;
orion_wdt.max_timeout = wdt_max_duration;
+ watchdog_init_timeout(&orion_wdt, heartbeat, &pdev->dev);
watchdog_set_nowayout(&orion_wdt, nowayout);
ret = watchdog_register_device(&orion_wdt);
@@ -179,7 +177,7 @@ static int orion_wdt_probe(struct platform_device *pdev)
}
pr_info("Initial timeout %d sec%s\n",
- heartbeat, nowayout ? ", nowayout" : "");
+ orion_wdt.timeout, nowayout ? ", nowayout" : "");
return 0;
}
@@ -225,4 +223,5 @@ MODULE_PARM_DESC(nowayout, "Watchdog cannot be stopped once started (default="
__MODULE_STRING(WATCHDOG_NOWAYOUT) ")");
MODULE_LICENSE("GPL");
+MODULE_ALIAS("platform:orion_wdt");
MODULE_ALIAS_MISCDEV(WATCHDOG_MINOR);
diff --git a/drivers/watchdog/pnx4008_wdt.c b/drivers/watchdog/pnx4008_wdt.c
index de1f3fa1d787..a3684a30eb69 100644
--- a/drivers/watchdog/pnx4008_wdt.c
+++ b/drivers/watchdog/pnx4008_wdt.c
@@ -142,6 +142,7 @@ static const struct watchdog_ops pnx4008_wdt_ops = {
static struct watchdog_device pnx4008_wdd = {
.info = &pnx4008_wdt_ident,
.ops = &pnx4008_wdt_ops,
+ .timeout = DEFAULT_HEARTBEAT,
.min_timeout = 1,
.max_timeout = MAX_HEARTBEAT,
};
@@ -151,8 +152,7 @@ static int pnx4008_wdt_probe(struct platform_device *pdev)
struct resource *r;
int ret = 0;
- if (heartbeat < 1 || heartbeat > MAX_HEARTBEAT)
- heartbeat = DEFAULT_HEARTBEAT;
+ watchdog_init_timeout(&pnx4008_wdd, heartbeat, &pdev->dev);
r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
wdt_base = devm_ioremap_resource(&pdev->dev, r);
@@ -167,7 +167,6 @@ static int pnx4008_wdt_probe(struct platform_device *pdev)
if (ret)
goto out;
- pnx4008_wdd.timeout = heartbeat;
pnx4008_wdd.bootstatus = (readl(WDTIM_RES(wdt_base)) & WDOG_RESET) ?
WDIOF_CARDRESET : 0;
watchdog_set_nowayout(&pnx4008_wdd, nowayout);
@@ -181,7 +180,7 @@ static int pnx4008_wdt_probe(struct platform_device *pdev)
}
dev_info(&pdev->dev, "PNX4008 Watchdog Timer: heartbeat %d sec\n",
- heartbeat);
+ pnx4008_wdd.timeout);
return 0;
diff --git a/drivers/watchdog/retu_wdt.c b/drivers/watchdog/retu_wdt.c
new file mode 100644
index 000000000000..f53615dc633d
--- /dev/null
+++ b/drivers/watchdog/retu_wdt.c
@@ -0,0 +1,178 @@
+/*
+ * Retu watchdog driver
+ *
+ * Copyright (C) 2004, 2005 Nokia Corporation
+ *
+ * Based on code written by Amit Kucheria and Michael Buesch.
+ * Rewritten by Aaro Koskinen.
+ *
+ * This file is subject to the terms and conditions of the GNU General
+ * Public License. See the file "COPYING" in the main directory of this
+ * archive for more details.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/init.h>
+#include <linux/slab.h>
+#include <linux/errno.h>
+#include <linux/device.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/mfd/retu.h>
+#include <linux/watchdog.h>
+#include <linux/platform_device.h>
+
+/* Watchdog timer values in seconds */
+#define RETU_WDT_MAX_TIMER 63
+
+struct retu_wdt_dev {
+ struct retu_dev *rdev;
+ struct device *dev;
+ struct delayed_work ping_work;
+};
+
+/*
+ * Since Retu watchdog cannot be disabled in hardware, we must kick it
+ * with a timer until userspace watchdog software takes over. If
+ * CONFIG_WATCHDOG_NOWAYOUT is set, we never start the feeding.
+ */
+static void retu_wdt_ping_enable(struct retu_wdt_dev *wdev)
+{
+ retu_write(wdev->rdev, RETU_REG_WATCHDOG, RETU_WDT_MAX_TIMER);
+ schedule_delayed_work(&wdev->ping_work,
+ round_jiffies_relative(RETU_WDT_MAX_TIMER * HZ / 2));
+}
+
+static void retu_wdt_ping_disable(struct retu_wdt_dev *wdev)
+{
+ retu_write(wdev->rdev, RETU_REG_WATCHDOG, RETU_WDT_MAX_TIMER);
+ cancel_delayed_work_sync(&wdev->ping_work);
+}
+
+static void retu_wdt_ping_work(struct work_struct *work)
+{
+ struct retu_wdt_dev *wdev = container_of(to_delayed_work(work),
+ struct retu_wdt_dev, ping_work);
+ retu_wdt_ping_enable(wdev);
+}
+
+static int retu_wdt_start(struct watchdog_device *wdog)
+{
+ struct retu_wdt_dev *wdev = watchdog_get_drvdata(wdog);
+
+ retu_wdt_ping_disable(wdev);
+
+ return retu_write(wdev->rdev, RETU_REG_WATCHDOG, wdog->timeout);
+}
+
+static int retu_wdt_stop(struct watchdog_device *wdog)
+{
+ struct retu_wdt_dev *wdev = watchdog_get_drvdata(wdog);
+
+ retu_wdt_ping_enable(wdev);
+
+ return 0;
+}
+
+static int retu_wdt_ping(struct watchdog_device *wdog)
+{
+ struct retu_wdt_dev *wdev = watchdog_get_drvdata(wdog);
+
+ return retu_write(wdev->rdev, RETU_REG_WATCHDOG, wdog->timeout);
+}
+
+static int retu_wdt_set_timeout(struct watchdog_device *wdog,
+ unsigned int timeout)
+{
+ struct retu_wdt_dev *wdev = watchdog_get_drvdata(wdog);
+
+ wdog->timeout = timeout;
+ return retu_write(wdev->rdev, RETU_REG_WATCHDOG, wdog->timeout);
+}
+
+static const struct watchdog_info retu_wdt_info = {
+ .options = WDIOF_SETTIMEOUT | WDIOF_KEEPALIVEPING,
+ .identity = "Retu watchdog",
+};
+
+static const struct watchdog_ops retu_wdt_ops = {
+ .owner = THIS_MODULE,
+ .start = retu_wdt_start,
+ .stop = retu_wdt_stop,
+ .ping = retu_wdt_ping,
+ .set_timeout = retu_wdt_set_timeout,
+};
+
+static int retu_wdt_probe(struct platform_device *pdev)
+{
+ struct retu_dev *rdev = dev_get_drvdata(pdev->dev.parent);
+ bool nowayout = WATCHDOG_NOWAYOUT;
+ struct watchdog_device *retu_wdt;
+ struct retu_wdt_dev *wdev;
+ int ret;
+
+ retu_wdt = devm_kzalloc(&pdev->dev, sizeof(*retu_wdt), GFP_KERNEL);
+ if (!retu_wdt)
+ return -ENOMEM;
+
+ wdev = devm_kzalloc(&pdev->dev, sizeof(*wdev), GFP_KERNEL);
+ if (!wdev)
+ return -ENOMEM;
+
+ retu_wdt->info = &retu_wdt_info;
+ retu_wdt->ops = &retu_wdt_ops;
+ retu_wdt->timeout = RETU_WDT_MAX_TIMER;
+ retu_wdt->min_timeout = 0;
+ retu_wdt->max_timeout = RETU_WDT_MAX_TIMER;
+
+ watchdog_set_drvdata(retu_wdt, wdev);
+ watchdog_set_nowayout(retu_wdt, nowayout);
+
+ wdev->rdev = rdev;
+ wdev->dev = &pdev->dev;
+
+ INIT_DELAYED_WORK(&wdev->ping_work, retu_wdt_ping_work);
+
+ ret = watchdog_register_device(retu_wdt);
+ if (ret < 0)
+ return ret;
+
+ if (nowayout)
+ retu_wdt_ping(retu_wdt);
+ else
+ retu_wdt_ping_enable(wdev);
+
+ platform_set_drvdata(pdev, retu_wdt);
+
+ return 0;
+}
+
+static int retu_wdt_remove(struct platform_device *pdev)
+{
+ struct watchdog_device *wdog = platform_get_drvdata(pdev);
+ struct retu_wdt_dev *wdev = watchdog_get_drvdata(wdog);
+
+ watchdog_unregister_device(wdog);
+ cancel_delayed_work_sync(&wdev->ping_work);
+
+ return 0;
+}
+
+static struct platform_driver retu_wdt_driver = {
+ .probe = retu_wdt_probe,
+ .remove = retu_wdt_remove,
+ .driver = {
+ .name = "retu-wdt",
+ },
+};
+module_platform_driver(retu_wdt_driver);
+
+MODULE_ALIAS("platform:retu-wdt");
+MODULE_DESCRIPTION("Retu watchdog");
+MODULE_AUTHOR("Amit Kucheria");
+MODULE_AUTHOR("Aaro Koskinen <aaro.koskinen@iki.fi>");
+MODULE_LICENSE("GPL");
diff --git a/drivers/watchdog/s3c2410_wdt.c b/drivers/watchdog/s3c2410_wdt.c
index 27bcd4e2c4a4..c1a221cbeae4 100644
--- a/drivers/watchdog/s3c2410_wdt.c
+++ b/drivers/watchdog/s3c2410_wdt.c
@@ -53,7 +53,7 @@
#define CONFIG_S3C2410_WATCHDOG_DEFAULT_TIME (15)
static bool nowayout = WATCHDOG_NOWAYOUT;
-static int tmr_margin = CONFIG_S3C2410_WATCHDOG_DEFAULT_TIME;
+static int tmr_margin;
static int tmr_atboot = CONFIG_S3C2410_WATCHDOG_ATBOOT;
static int soft_noboot;
static int debug;
@@ -226,6 +226,7 @@ static struct watchdog_ops s3c2410wdt_ops = {
static struct watchdog_device s3c2410_wdd = {
.info = &s3c2410_wdt_ident,
.ops = &s3c2410wdt_ops,
+ .timeout = CONFIG_S3C2410_WATCHDOG_DEFAULT_TIME,
};
/* interrupt handler code */
@@ -309,7 +310,6 @@ static int s3c2410wdt_probe(struct platform_device *pdev)
unsigned int wtcon;
int started = 0;
int ret;
- int size;
DBG("%s: probe=%p\n", __func__, pdev);
@@ -330,28 +330,20 @@ static int s3c2410wdt_probe(struct platform_device *pdev)
}
/* get the memory region for the watchdog timer */
-
- size = resource_size(wdt_mem);
- if (!request_mem_region(wdt_mem->start, size, pdev->name)) {
- dev_err(dev, "failed to get memory region\n");
- ret = -EBUSY;
- goto err;
- }
-
- wdt_base = ioremap(wdt_mem->start, size);
+ wdt_base = devm_request_and_ioremap(dev, wdt_mem);
if (wdt_base == NULL) {
- dev_err(dev, "failed to ioremap() region\n");
- ret = -EINVAL;
- goto err_req;
+ dev_err(dev, "failed to devm_request_and_ioremap() region\n");
+ ret = -ENOMEM;
+ goto err;
}
DBG("probe: mapped wdt_base=%p\n", wdt_base);
- wdt_clock = clk_get(&pdev->dev, "watchdog");
+ wdt_clock = devm_clk_get(dev, "watchdog");
if (IS_ERR(wdt_clock)) {
dev_err(dev, "failed to find watchdog clock source\n");
ret = PTR_ERR(wdt_clock);
- goto err_map;
+ goto err;
}
clk_prepare_enable(wdt_clock);
@@ -365,7 +357,8 @@ static int s3c2410wdt_probe(struct platform_device *pdev)
/* see if we can actually set the requested timer margin, and if
* not, try the default value */
- if (s3c2410wdt_set_heartbeat(&s3c2410_wdd, tmr_margin)) {
+ watchdog_init_timeout(&s3c2410_wdd, tmr_margin, &pdev->dev);
+ if (s3c2410wdt_set_heartbeat(&s3c2410_wdd, s3c2410_wdd.timeout)) {
started = s3c2410wdt_set_heartbeat(&s3c2410_wdd,
CONFIG_S3C2410_WATCHDOG_DEFAULT_TIME);
@@ -378,7 +371,8 @@ static int s3c2410wdt_probe(struct platform_device *pdev)
"cannot start\n");
}
- ret = request_irq(wdt_irq->start, s3c2410wdt_irq, 0, pdev->name, pdev);
+ ret = devm_request_irq(dev, wdt_irq->start, s3c2410wdt_irq, 0,
+ pdev->name, pdev);
if (ret != 0) {
dev_err(dev, "failed to install irq (%d)\n", ret);
goto err_cpufreq;
@@ -389,7 +383,7 @@ static int s3c2410wdt_probe(struct platform_device *pdev)
ret = watchdog_register_device(&s3c2410_wdd);
if (ret) {
dev_err(dev, "cannot register watchdog (%d)\n", ret);
- goto err_irq;
+ goto err_cpufreq;
}
if (tmr_atboot && started == 0) {
@@ -414,23 +408,13 @@ static int s3c2410wdt_probe(struct platform_device *pdev)
return 0;
- err_irq:
- free_irq(wdt_irq->start, pdev);
-
err_cpufreq:
s3c2410wdt_cpufreq_deregister();
err_clk:
clk_disable_unprepare(wdt_clock);
- clk_put(wdt_clock);
wdt_clock = NULL;
- err_map:
- iounmap(wdt_base);
-
- err_req:
- release_mem_region(wdt_mem->start, size);
-
err:
wdt_irq = NULL;
wdt_mem = NULL;
@@ -441,17 +425,11 @@ static int s3c2410wdt_remove(struct platform_device *dev)
{
watchdog_unregister_device(&s3c2410_wdd);
- free_irq(wdt_irq->start, dev);
-
s3c2410wdt_cpufreq_deregister();
clk_disable_unprepare(wdt_clock);
- clk_put(wdt_clock);
wdt_clock = NULL;
- iounmap(wdt_base);
-
- release_mem_region(wdt_mem->start, resource_size(wdt_mem));
wdt_irq = NULL;
wdt_mem = NULL;
return 0;
diff --git a/drivers/watchdog/sp5100_tco.c b/drivers/watchdog/sp5100_tco.c
index 2b0e000d4377..e3b8f757d2d3 100644
--- a/drivers/watchdog/sp5100_tco.c
+++ b/drivers/watchdog/sp5100_tco.c
@@ -361,7 +361,7 @@ static unsigned char sp5100_tco_setupdevice(void)
{
struct pci_dev *dev = NULL;
const char *dev_name = NULL;
- u32 val;
+ u32 val, tmp_val;
u32 index_reg, data_reg, base_addr;
/* Match the PCI device */
@@ -497,30 +497,19 @@ static unsigned char sp5100_tco_setupdevice(void)
pr_debug("Got 0x%04x from resource tree\n", val);
}
- /* Restore to the low three bits, if chipset is SB8x0(or later) */
- if (sp5100_tco_pci->revision >= 0x40) {
- u8 reserved_bit;
- reserved_bit = inb(base_addr) & 0x7;
- val |= (u32)reserved_bit;
- }
+ /* Restore to the low three bits */
+ outb(base_addr+0, index_reg);
+ tmp_val = val | (inb(data_reg) & 0x7);
/* Re-programming the watchdog timer base address */
outb(base_addr+0, index_reg);
- /* Low three bits of BASE are reserved */
- outb((val >> 0) & 0xf8, data_reg);
+ outb((tmp_val >> 0) & 0xff, data_reg);
outb(base_addr+1, index_reg);
- outb((val >> 8) & 0xff, data_reg);
+ outb((tmp_val >> 8) & 0xff, data_reg);
outb(base_addr+2, index_reg);
- outb((val >> 16) & 0xff, data_reg);
+ outb((tmp_val >> 16) & 0xff, data_reg);
outb(base_addr+3, index_reg);
- outb((val >> 24) & 0xff, data_reg);
-
- /*
- * Clear unnecessary the low three bits,
- * if chipset is SB8x0(or later)
- */
- if (sp5100_tco_pci->revision >= 0x40)
- val &= ~0x7;
+ outb((tmp_val >> 24) & 0xff, data_reg);
if (!request_mem_region_exclusive(val, SP5100_WDT_MEM_MAP_SIZE,
dev_name)) {
diff --git a/drivers/watchdog/stmp3xxx_rtc_wdt.c b/drivers/watchdog/stmp3xxx_rtc_wdt.c
new file mode 100644
index 000000000000..c97e98dcde62
--- /dev/null
+++ b/drivers/watchdog/stmp3xxx_rtc_wdt.c
@@ -0,0 +1,111 @@
+/*
+ * Watchdog driver for the RTC based watchdog in STMP3xxx and i.MX23/28
+ *
+ * Author: Wolfram Sang <w.sang@pengutronix.de>
+ *
+ * Copyright (C) 2011-12 Wolfram Sang, Pengutronix
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ */
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/miscdevice.h>
+#include <linux/watchdog.h>
+#include <linux/platform_device.h>
+#include <linux/stmp3xxx_rtc_wdt.h>
+
+#define WDOG_TICK_RATE 1000 /* 1 kHz clock */
+#define STMP3XXX_DEFAULT_TIMEOUT 19
+#define STMP3XXX_MAX_TIMEOUT (UINT_MAX / WDOG_TICK_RATE)
+
+static int heartbeat = STMP3XXX_DEFAULT_TIMEOUT;
+module_param(heartbeat, uint, 0);
+MODULE_PARM_DESC(heartbeat, "Watchdog heartbeat period in seconds from 1 to "
+ __MODULE_STRING(STMP3XXX_MAX_TIMEOUT) ", default "
+ __MODULE_STRING(STMP3XXX_DEFAULT_TIMEOUT));
+
+static int wdt_start(struct watchdog_device *wdd)
+{
+ struct device *dev = watchdog_get_drvdata(wdd);
+ struct stmp3xxx_wdt_pdata *pdata = dev->platform_data;
+
+ pdata->wdt_set_timeout(dev->parent, wdd->timeout * WDOG_TICK_RATE);
+ return 0;
+}
+
+static int wdt_stop(struct watchdog_device *wdd)
+{
+ struct device *dev = watchdog_get_drvdata(wdd);
+ struct stmp3xxx_wdt_pdata *pdata = dev->platform_data;
+
+ pdata->wdt_set_timeout(dev->parent, 0);
+ return 0;
+}
+
+static int wdt_set_timeout(struct watchdog_device *wdd, unsigned new_timeout)
+{
+ wdd->timeout = new_timeout;
+ return wdt_start(wdd);
+}
+
+static const struct watchdog_info stmp3xxx_wdt_ident = {
+ .options = WDIOF_MAGICCLOSE | WDIOF_SETTIMEOUT | WDIOF_KEEPALIVEPING,
+ .identity = "STMP3XXX RTC Watchdog",
+};
+
+static const struct watchdog_ops stmp3xxx_wdt_ops = {
+ .owner = THIS_MODULE,
+ .start = wdt_start,
+ .stop = wdt_stop,
+ .set_timeout = wdt_set_timeout,
+};
+
+static struct watchdog_device stmp3xxx_wdd = {
+ .info = &stmp3xxx_wdt_ident,
+ .ops = &stmp3xxx_wdt_ops,
+ .min_timeout = 1,
+ .max_timeout = STMP3XXX_MAX_TIMEOUT,
+ .status = WATCHDOG_NOWAYOUT_INIT_STATUS,
+};
+
+static int stmp3xxx_wdt_probe(struct platform_device *pdev)
+{
+ int ret;
+
+ watchdog_set_drvdata(&stmp3xxx_wdd, &pdev->dev);
+
+ stmp3xxx_wdd.timeout = clamp_t(unsigned, heartbeat, 1, STMP3XXX_MAX_TIMEOUT);
+
+ ret = watchdog_register_device(&stmp3xxx_wdd);
+ if (ret < 0) {
+ dev_err(&pdev->dev, "cannot register watchdog device\n");
+ return ret;
+ }
+
+ dev_info(&pdev->dev, "initialized watchdog with heartbeat %ds\n",
+ stmp3xxx_wdd.timeout);
+ return 0;
+}
+
+static int stmp3xxx_wdt_remove(struct platform_device *pdev)
+{
+ watchdog_unregister_device(&stmp3xxx_wdd);
+ return 0;
+}
+
+static struct platform_driver stmp3xxx_wdt_driver = {
+ .driver = {
+ .name = "stmp3xxx_rtc_wdt",
+ },
+ .probe = stmp3xxx_wdt_probe,
+ .remove = stmp3xxx_wdt_remove,
+};
+module_platform_driver(stmp3xxx_wdt_driver);
+
+MODULE_DESCRIPTION("STMP3XXX RTC Watchdog Driver");
+MODULE_LICENSE("GPL v2");
+MODULE_AUTHOR("Wolfram Sang <w.sang@pengutronix.de>");
+MODULE_ALIAS_MISCDEV(WATCHDOG_MINOR);
diff --git a/drivers/watchdog/stmp3xxx_wdt.c b/drivers/watchdog/stmp3xxx_wdt.c
deleted file mode 100644
index 1f4f69728fee..000000000000
--- a/drivers/watchdog/stmp3xxx_wdt.c
+++ /dev/null
@@ -1,288 +0,0 @@
-/*
- * Watchdog driver for Freescale STMP37XX/STMP378X
- *
- * Author: Vitaly Wool <vital@embeddedalley.com>
- *
- * Copyright 2008 Freescale Semiconductor, Inc. All Rights Reserved.
- * Copyright 2008 Embedded Alley Solutions, Inc All Rights Reserved.
- */
-
-#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
-
-#include <linux/init.h>
-#include <linux/kernel.h>
-#include <linux/fs.h>
-#include <linux/miscdevice.h>
-#include <linux/watchdog.h>
-#include <linux/platform_device.h>
-#include <linux/spinlock.h>
-#include <linux/uaccess.h>
-#include <linux/module.h>
-
-#include <mach/platform.h>
-#include <mach/regs-rtc.h>
-
-#define DEFAULT_HEARTBEAT 19
-#define MAX_HEARTBEAT (0x10000000 >> 6)
-
-/* missing bitmask in headers */
-#define BV_RTC_PERSISTENT1_GENERAL__RTC_FORCE_UPDATER 0x80000000
-
-#define WDT_IN_USE 0
-#define WDT_OK_TO_CLOSE 1
-
-#define WDOG_COUNTER_RATE 1000 /* 1 kHz clock */
-
-static DEFINE_SPINLOCK(stmp3xxx_wdt_io_lock);
-static unsigned long wdt_status;
-static const bool nowayout = WATCHDOG_NOWAYOUT;
-static int heartbeat = DEFAULT_HEARTBEAT;
-static unsigned long boot_status;
-
-static void wdt_enable(u32 value)
-{
- spin_lock(&stmp3xxx_wdt_io_lock);
- __raw_writel(value, REGS_RTC_BASE + HW_RTC_WATCHDOG);
- stmp3xxx_setl(BM_RTC_CTRL_WATCHDOGEN, REGS_RTC_BASE + HW_RTC_CTRL);
- stmp3xxx_setl(BV_RTC_PERSISTENT1_GENERAL__RTC_FORCE_UPDATER,
- REGS_RTC_BASE + HW_RTC_PERSISTENT1);
- spin_unlock(&stmp3xxx_wdt_io_lock);
-}
-
-static void wdt_disable(void)
-{
- spin_lock(&stmp3xxx_wdt_io_lock);
- stmp3xxx_clearl(BV_RTC_PERSISTENT1_GENERAL__RTC_FORCE_UPDATER,
- REGS_RTC_BASE + HW_RTC_PERSISTENT1);
- stmp3xxx_clearl(BM_RTC_CTRL_WATCHDOGEN, REGS_RTC_BASE + HW_RTC_CTRL);
- spin_unlock(&stmp3xxx_wdt_io_lock);
-}
-
-static void wdt_ping(void)
-{
- wdt_enable(heartbeat * WDOG_COUNTER_RATE);
-}
-
-static int stmp3xxx_wdt_open(struct inode *inode, struct file *file)
-{
- if (test_and_set_bit(WDT_IN_USE, &wdt_status))
- return -EBUSY;
-
- clear_bit(WDT_OK_TO_CLOSE, &wdt_status);
- wdt_ping();
-
- return nonseekable_open(inode, file);
-}
-
-static ssize_t stmp3xxx_wdt_write(struct file *file, const char __user *data,
- size_t len, loff_t *ppos)
-{
- if (len) {
- if (!nowayout) {
- size_t i;
-
- clear_bit(WDT_OK_TO_CLOSE, &wdt_status);
-
- for (i = 0; i != len; i++) {
- char c;
-
- if (get_user(c, data + i))
- return -EFAULT;
- if (c == 'V')
- set_bit(WDT_OK_TO_CLOSE, &wdt_status);
- }
- }
- wdt_ping();
- }
-
- return len;
-}
-
-static const struct watchdog_info ident = {
- .options = WDIOF_CARDRESET |
- WDIOF_MAGICCLOSE |
- WDIOF_SETTIMEOUT |
- WDIOF_KEEPALIVEPING,
- .identity = "STMP3XXX Watchdog",
-};
-
-static long stmp3xxx_wdt_ioctl(struct file *file, unsigned int cmd,
- unsigned long arg)
-{
- void __user *argp = (void __user *)arg;
- int __user *p = argp;
- int new_heartbeat, opts;
- int ret = -ENOTTY;
-
- switch (cmd) {
- case WDIOC_GETSUPPORT:
- ret = copy_to_user(argp, &ident, sizeof(ident)) ? -EFAULT : 0;
- break;
-
- case WDIOC_GETSTATUS:
- ret = put_user(0, p);
- break;
-
- case WDIOC_GETBOOTSTATUS:
- ret = put_user(boot_status, p);
- break;
-
- case WDIOC_SETOPTIONS:
- if (get_user(opts, p)) {
- ret = -EFAULT;
- break;
- }
- if (opts & WDIOS_DISABLECARD)
- wdt_disable();
- else if (opts & WDIOS_ENABLECARD)
- wdt_ping();
- else {
- pr_debug("%s: unknown option 0x%x\n", __func__, opts);
- ret = -EINVAL;
- break;
- }
- ret = 0;
- break;
-
- case WDIOC_KEEPALIVE:
- wdt_ping();
- ret = 0;
- break;
-
- case WDIOC_SETTIMEOUT:
- if (get_user(new_heartbeat, p)) {
- ret = -EFAULT;
- break;
- }
- if (new_heartbeat <= 0 || new_heartbeat > MAX_HEARTBEAT) {
- ret = -EINVAL;
- break;
- }
-
- heartbeat = new_heartbeat;
- wdt_ping();
- /* Fall through */
-
- case WDIOC_GETTIMEOUT:
- ret = put_user(heartbeat, p);
- break;
- }
- return ret;
-}
-
-static int stmp3xxx_wdt_release(struct inode *inode, struct file *file)
-{
- int ret = 0;
-
- if (!nowayout) {
- if (!test_bit(WDT_OK_TO_CLOSE, &wdt_status)) {
- wdt_ping();
- pr_debug("%s: Device closed unexpectedly\n", __func__);
- ret = -EINVAL;
- } else {
- wdt_disable();
- clear_bit(WDT_OK_TO_CLOSE, &wdt_status);
- }
- }
- clear_bit(WDT_IN_USE, &wdt_status);
-
- return ret;
-}
-
-static const struct file_operations stmp3xxx_wdt_fops = {
- .owner = THIS_MODULE,
- .llseek = no_llseek,
- .write = stmp3xxx_wdt_write,
- .unlocked_ioctl = stmp3xxx_wdt_ioctl,
- .open = stmp3xxx_wdt_open,
- .release = stmp3xxx_wdt_release,
-};
-
-static struct miscdevice stmp3xxx_wdt_miscdev = {
- .minor = WATCHDOG_MINOR,
- .name = "watchdog",
- .fops = &stmp3xxx_wdt_fops,
-};
-
-static int stmp3xxx_wdt_probe(struct platform_device *pdev)
-{
- int ret = 0;
-
- if (heartbeat < 1 || heartbeat > MAX_HEARTBEAT)
- heartbeat = DEFAULT_HEARTBEAT;
-
- boot_status = __raw_readl(REGS_RTC_BASE + HW_RTC_PERSISTENT1) &
- BV_RTC_PERSISTENT1_GENERAL__RTC_FORCE_UPDATER;
- boot_status = !!boot_status;
- stmp3xxx_clearl(BV_RTC_PERSISTENT1_GENERAL__RTC_FORCE_UPDATER,
- REGS_RTC_BASE + HW_RTC_PERSISTENT1);
- wdt_disable(); /* disable for now */
-
- ret = misc_register(&stmp3xxx_wdt_miscdev);
- if (ret < 0) {
- dev_err(&pdev->dev, "cannot register misc device\n");
- return ret;
- }
-
- pr_info("initialized, heartbeat %d sec\n", heartbeat);
-
- return ret;
-}
-
-static int stmp3xxx_wdt_remove(struct platform_device *pdev)
-{
- misc_deregister(&stmp3xxx_wdt_miscdev);
- return 0;
-}
-
-#ifdef CONFIG_PM
-static int wdt_suspended;
-static u32 wdt_saved_time;
-
-static int stmp3xxx_wdt_suspend(struct platform_device *pdev,
- pm_message_t state)
-{
- if (__raw_readl(REGS_RTC_BASE + HW_RTC_CTRL) &
- BM_RTC_CTRL_WATCHDOGEN) {
- wdt_suspended = 1;
- wdt_saved_time = __raw_readl(REGS_RTC_BASE + HW_RTC_WATCHDOG);
- wdt_disable();
- }
- return 0;
-}
-
-static int stmp3xxx_wdt_resume(struct platform_device *pdev)
-{
- if (wdt_suspended) {
- wdt_enable(wdt_saved_time);
- wdt_suspended = 0;
- }
- return 0;
-}
-#else
-#define stmp3xxx_wdt_suspend NULL
-#define stmp3xxx_wdt_resume NULL
-#endif
-
-static struct platform_driver platform_wdt_driver = {
- .driver = {
- .name = "stmp3xxx_wdt",
- },
- .probe = stmp3xxx_wdt_probe,
- .remove = stmp3xxx_wdt_remove,
- .suspend = stmp3xxx_wdt_suspend,
- .resume = stmp3xxx_wdt_resume,
-};
-
-module_platform_driver(platform_wdt_driver);
-
-MODULE_DESCRIPTION("STMP3XXX Watchdog Driver");
-MODULE_LICENSE("GPL");
-
-module_param(heartbeat, int, 0);
-MODULE_PARM_DESC(heartbeat,
- "Watchdog heartbeat period in seconds from 1 to "
- __MODULE_STRING(MAX_HEARTBEAT) ", default "
- __MODULE_STRING(DEFAULT_HEARTBEAT));
-
-MODULE_ALIAS_MISCDEV(WATCHDOG_MINOR);
diff --git a/drivers/watchdog/ux500_wdt.c b/drivers/watchdog/ux500_wdt.c
new file mode 100644
index 000000000000..a614d84121c3
--- /dev/null
+++ b/drivers/watchdog/ux500_wdt.c
@@ -0,0 +1,171 @@
+/*
+ * Copyright (C) ST-Ericsson SA 2011-2013
+ *
+ * License Terms: GNU General Public License v2
+ *
+ * Author: Mathieu Poirier <mathieu.poirier@linaro.org> for ST-Ericsson
+ * Author: Jonas Aaberg <jonas.aberg@stericsson.com> for ST-Ericsson
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/moduleparam.h>
+#include <linux/miscdevice.h>
+#include <linux/err.h>
+#include <linux/uaccess.h>
+#include <linux/watchdog.h>
+#include <linux/platform_device.h>
+#include <linux/platform_data/ux500_wdt.h>
+
+#include <linux/mfd/dbx500-prcmu.h>
+
+#define WATCHDOG_TIMEOUT 600 /* 10 minutes */
+
+#define WATCHDOG_MIN 0
+#define WATCHDOG_MAX28 268435 /* 28 bit resolution in ms == 268435.455 s */
+#define WATCHDOG_MAX32 4294967 /* 32 bit resolution in ms == 4294967.295 s */
+
+static unsigned int timeout = WATCHDOG_TIMEOUT;
+module_param(timeout, uint, 0);
+MODULE_PARM_DESC(timeout,
+ "Watchdog timeout in seconds. default="
+ __MODULE_STRING(WATCHDOG_TIMEOUT) ".");
+
+static bool nowayout = WATCHDOG_NOWAYOUT;
+module_param(nowayout, bool, 0);
+MODULE_PARM_DESC(nowayout,
+ "Watchdog cannot be stopped once started (default="
+ __MODULE_STRING(WATCHDOG_NOWAYOUT) ")");
+
+static int ux500_wdt_start(struct watchdog_device *wdd)
+{
+ return prcmu_enable_a9wdog(PRCMU_WDOG_ALL);
+}
+
+static int ux500_wdt_stop(struct watchdog_device *wdd)
+{
+ return prcmu_disable_a9wdog(PRCMU_WDOG_ALL);
+}
+
+static int ux500_wdt_keepalive(struct watchdog_device *wdd)
+{
+ return prcmu_kick_a9wdog(PRCMU_WDOG_ALL);
+}
+
+static int ux500_wdt_set_timeout(struct watchdog_device *wdd,
+ unsigned int timeout)
+{
+ ux500_wdt_stop(wdd);
+ prcmu_load_a9wdog(PRCMU_WDOG_ALL, timeout * 1000);
+ ux500_wdt_start(wdd);
+
+ return 0;
+}
+
+static const struct watchdog_info ux500_wdt_info = {
+ .options = WDIOF_SETTIMEOUT | WDIOF_KEEPALIVEPING | WDIOF_MAGICCLOSE,
+ .identity = "Ux500 WDT",
+ .firmware_version = 1,
+};
+
+static const struct watchdog_ops ux500_wdt_ops = {
+ .owner = THIS_MODULE,
+ .start = ux500_wdt_start,
+ .stop = ux500_wdt_stop,
+ .ping = ux500_wdt_keepalive,
+ .set_timeout = ux500_wdt_set_timeout,
+};
+
+static struct watchdog_device ux500_wdt = {
+ .info = &ux500_wdt_info,
+ .ops = &ux500_wdt_ops,
+ .min_timeout = WATCHDOG_MIN,
+ .max_timeout = WATCHDOG_MAX32,
+};
+
+static int ux500_wdt_probe(struct platform_device *pdev)
+{
+ int ret;
+ struct ux500_wdt_data *pdata = pdev->dev.platform_data;
+
+ if (pdata) {
+ if (pdata->timeout > 0)
+ timeout = pdata->timeout;
+ if (pdata->has_28_bits_resolution)
+ ux500_wdt.max_timeout = WATCHDOG_MAX28;
+ }
+
+ watchdog_set_nowayout(&ux500_wdt, nowayout);
+
+ /* disable auto off on sleep */
+ prcmu_config_a9wdog(PRCMU_WDOG_CPU1, false);
+
+ /* set HW initial value */
+ prcmu_load_a9wdog(PRCMU_WDOG_ALL, timeout * 1000);
+
+ ret = watchdog_register_device(&ux500_wdt);
+ if (ret)
+ return ret;
+
+ dev_info(&pdev->dev, "initialized\n");
+
+ return 0;
+}
+
+static int ux500_wdt_remove(struct platform_device *dev)
+{
+ watchdog_unregister_device(&ux500_wdt);
+
+ return 0;
+}
+
+#ifdef CONFIG_PM
+static int ux500_wdt_suspend(struct platform_device *pdev,
+ pm_message_t state)
+{
+ if (watchdog_active(&ux500_wdt)) {
+ ux500_wdt_stop(&ux500_wdt);
+ prcmu_config_a9wdog(PRCMU_WDOG_CPU1, true);
+
+ prcmu_load_a9wdog(PRCMU_WDOG_ALL, timeout * 1000);
+ ux500_wdt_start(&ux500_wdt);
+ }
+ return 0;
+}
+
+static int ux500_wdt_resume(struct platform_device *pdev)
+{
+ if (watchdog_active(&ux500_wdt)) {
+ ux500_wdt_stop(&ux500_wdt);
+ prcmu_config_a9wdog(PRCMU_WDOG_CPU1, false);
+
+ prcmu_load_a9wdog(PRCMU_WDOG_ALL, timeout * 1000);
+ ux500_wdt_start(&ux500_wdt);
+ }
+ return 0;
+}
+#else
+#define ux500_wdt_suspend NULL
+#define ux500_wdt_resume NULL
+#endif
+
+static struct platform_driver ux500_wdt_driver = {
+ .probe = ux500_wdt_probe,
+ .remove = ux500_wdt_remove,
+ .suspend = ux500_wdt_suspend,
+ .resume = ux500_wdt_resume,
+ .driver = {
+ .owner = THIS_MODULE,
+ .name = "ux500_wdt",
+ },
+};
+
+module_platform_driver(ux500_wdt_driver);
+
+MODULE_AUTHOR("Jonas Aaberg <jonas.aberg@stericsson.com>");
+MODULE_DESCRIPTION("Ux500 Watchdog Driver");
+MODULE_LICENSE("GPL");
+MODULE_ALIAS_MISCDEV(WATCHDOG_MINOR);
+MODULE_ALIAS("platform:ux500_wdt");
diff --git a/drivers/watchdog/watchdog_core.c b/drivers/watchdog/watchdog_core.c
index 3796434991fa..05d18b4c661b 100644
--- a/drivers/watchdog/watchdog_core.c
+++ b/drivers/watchdog/watchdog_core.c
@@ -36,12 +36,68 @@
#include <linux/init.h> /* For __init/__exit/... */
#include <linux/idr.h> /* For ida_* macros */
#include <linux/err.h> /* For IS_ERR macros */
+#include <linux/of.h> /* For of_get_timeout_sec */
#include "watchdog_core.h" /* For watchdog_dev_register/... */
static DEFINE_IDA(watchdog_ida);
static struct class *watchdog_class;
+static void watchdog_check_min_max_timeout(struct watchdog_device *wdd)
+{
+ /*
+ * Check that we have valid min and max timeout values, if
+ * not reset them both to 0 (=not used or unknown)
+ */
+ if (wdd->min_timeout > wdd->max_timeout) {
+ pr_info("Invalid min and max timeout values, resetting to 0!\n");
+ wdd->min_timeout = 0;
+ wdd->max_timeout = 0;
+ }
+}
+
+/**
+ * watchdog_init_timeout() - initialize the timeout field
+ * @timeout_parm: timeout module parameter
+ * @dev: Device that stores the timeout-sec property
+ *
+ * Initialize the timeout field of the watchdog_device struct with either the
+ * timeout module parameter (if it is valid value) or the timeout-sec property
+ * (only if it is a valid value and the timeout_parm is out of bounds).
+ * If none of them are valid then we keep the old value (which should normally
+ * be the default timeout value.
+ *
+ * A zero is returned on success and -EINVAL for failure.
+ */
+int watchdog_init_timeout(struct watchdog_device *wdd,
+ unsigned int timeout_parm, struct device *dev)
+{
+ unsigned int t = 0;
+ int ret = 0;
+
+ watchdog_check_min_max_timeout(wdd);
+
+ /* try to get the tiemout module parameter first */
+ if (!watchdog_timeout_invalid(wdd, timeout_parm)) {
+ wdd->timeout = timeout_parm;
+ return ret;
+ }
+ if (timeout_parm)
+ ret = -EINVAL;
+
+ /* try to get the timeout_sec property */
+ if (dev == NULL || dev->of_node == NULL)
+ return ret;
+ of_property_read_u32(dev->of_node, "timeout-sec", &t);
+ if (!watchdog_timeout_invalid(wdd, t))
+ wdd->timeout = t;
+ else
+ ret = -EINVAL;
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(watchdog_init_timeout);
+
/**
* watchdog_register_device() - register a watchdog device
* @wdd: watchdog device
@@ -63,15 +119,7 @@ int watchdog_register_device(struct watchdog_device *wdd)
if (wdd->ops->start == NULL || wdd->ops->stop == NULL)
return -EINVAL;
- /*
- * Check that we have valid min and max timeout values, if
- * not reset them both to 0 (=not used or unknown)
- */
- if (wdd->min_timeout > wdd->max_timeout) {
- pr_info("Invalid min and max timeout values, resetting to 0!\n");
- wdd->min_timeout = 0;
- wdd->max_timeout = 0;
- }
+ watchdog_check_min_max_timeout(wdd);
/*
* Note: now that all watchdog_device data has been verified, we
diff --git a/drivers/watchdog/watchdog_dev.c b/drivers/watchdog/watchdog_dev.c
index ef8edecfc526..08b48bbf9f4b 100644
--- a/drivers/watchdog/watchdog_dev.c
+++ b/drivers/watchdog/watchdog_dev.c
@@ -200,8 +200,7 @@ static int watchdog_set_timeout(struct watchdog_device *wddev,
!(wddev->info->options & WDIOF_SETTIMEOUT))
return -EOPNOTSUPP;
- if ((wddev->max_timeout != 0) &&
- (timeout < wddev->min_timeout || timeout > wddev->max_timeout))
+ if (watchdog_timeout_invalid(wddev, timeout))
return -EINVAL;
mutex_lock(&wddev->lock);
diff --git a/drivers/xen/Kconfig b/drivers/xen/Kconfig
index cabfa97f4674..5a32232cf7c1 100644
--- a/drivers/xen/Kconfig
+++ b/drivers/xen/Kconfig
@@ -180,6 +180,40 @@ config XEN_PRIVCMD
depends on XEN
default m
+config XEN_STUB
+ bool "Xen stub drivers"
+ depends on XEN && X86_64
+ default n
+ help
+ Allow kernel to install stub drivers, to reserve space for Xen drivers,
+ i.e. memory hotplug and cpu hotplug, and to block native drivers loaded,
+ so that real Xen drivers can be modular.
+
+ To enable Xen features like cpu and memory hotplug, select Y here.
+
+config XEN_ACPI_HOTPLUG_MEMORY
+ tristate "Xen ACPI memory hotplug"
+ depends on XEN_DOM0 && XEN_STUB && ACPI
+ default n
+ help
+ This is Xen ACPI memory hotplug.
+
+ Currently Xen only support ACPI memory hot-add. If you want
+ to hot-add memory at runtime (the hot-added memory cannot be
+ removed until machine stop), select Y/M here, otherwise select N.
+
+config XEN_ACPI_HOTPLUG_CPU
+ tristate "Xen ACPI cpu hotplug"
+ depends on XEN_DOM0 && XEN_STUB && ACPI
+ select ACPI_CONTAINER
+ default n
+ help
+ Xen ACPI cpu enumerating and hotplugging
+
+ For hotplugging, currently Xen only support ACPI cpu hotadd.
+ If you want to hotadd cpu at runtime (the hotadded cpu cannot
+ be removed until machine stop), select Y/M here.
+
config XEN_ACPI_PROCESSOR
tristate "Xen ACPI processor"
depends on XEN && X86 && ACPI_PROCESSOR && CPU_FREQ
diff --git a/drivers/xen/Makefile b/drivers/xen/Makefile
index fb213cf81a7b..eabd0ee1c2bc 100644
--- a/drivers/xen/Makefile
+++ b/drivers/xen/Makefile
@@ -30,6 +30,9 @@ obj-$(CONFIG_SWIOTLB_XEN) += swiotlb-xen.o
obj-$(CONFIG_XEN_MCE_LOG) += mcelog.o
obj-$(CONFIG_XEN_PCIDEV_BACKEND) += xen-pciback/
obj-$(CONFIG_XEN_PRIVCMD) += xen-privcmd.o
+obj-$(CONFIG_XEN_STUB) += xen-stub.o
+obj-$(CONFIG_XEN_ACPI_HOTPLUG_MEMORY) += xen-acpi-memhotplug.o
+obj-$(CONFIG_XEN_ACPI_HOTPLUG_CPU) += xen-acpi-cpuhotplug.o
obj-$(CONFIG_XEN_ACPI_PROCESSOR) += xen-acpi-processor.o
xen-evtchn-y := evtchn.o
xen-gntdev-y := gntdev.o
diff --git a/drivers/xen/events.c b/drivers/xen/events.c
index 22f77c5f6012..d17aa41a9041 100644
--- a/drivers/xen/events.c
+++ b/drivers/xen/events.c
@@ -120,7 +120,22 @@ static unsigned long *pirq_eoi_map;
#endif
static bool (*pirq_needs_eoi)(unsigned irq);
-static DEFINE_PER_CPU(unsigned long [NR_EVENT_CHANNELS/BITS_PER_LONG],
+/*
+ * Note sizeof(xen_ulong_t) can be more than sizeof(unsigned long). Be
+ * careful to only use bitops which allow for this (e.g
+ * test_bit/find_first_bit and friends but not __ffs) and to pass
+ * BITS_PER_EVTCHN_WORD as the bitmask length.
+ */
+#define BITS_PER_EVTCHN_WORD (sizeof(xen_ulong_t)*8)
+/*
+ * Make a bitmask (i.e. unsigned long *) of a xen_ulong_t
+ * array. Primarily to avoid long lines (hence the terse name).
+ */
+#define BM(x) (unsigned long *)(x)
+/* Find the first set bit in a evtchn mask */
+#define EVTCHN_FIRST_BIT(w) find_first_bit(BM(&(w)), BITS_PER_EVTCHN_WORD)
+
+static DEFINE_PER_CPU(xen_ulong_t [NR_EVENT_CHANNELS/BITS_PER_EVTCHN_WORD],
cpu_evtchn_mask);
/* Xen will never allocate port zero for any purpose. */
@@ -294,9 +309,9 @@ static bool pirq_needs_eoi_flag(unsigned irq)
return info->u.pirq.flags & PIRQ_NEEDS_EOI;
}
-static inline unsigned long active_evtchns(unsigned int cpu,
- struct shared_info *sh,
- unsigned int idx)
+static inline xen_ulong_t active_evtchns(unsigned int cpu,
+ struct shared_info *sh,
+ unsigned int idx)
{
return sh->evtchn_pending[idx] &
per_cpu(cpu_evtchn_mask, cpu)[idx] &
@@ -312,8 +327,8 @@ static void bind_evtchn_to_cpu(unsigned int chn, unsigned int cpu)
cpumask_copy(irq_to_desc(irq)->irq_data.affinity, cpumask_of(cpu));
#endif
- clear_bit(chn, per_cpu(cpu_evtchn_mask, cpu_from_irq(irq)));
- set_bit(chn, per_cpu(cpu_evtchn_mask, cpu));
+ clear_bit(chn, BM(per_cpu(cpu_evtchn_mask, cpu_from_irq(irq))));
+ set_bit(chn, BM(per_cpu(cpu_evtchn_mask, cpu)));
info_for_irq(irq)->cpu = cpu;
}
@@ -339,19 +354,19 @@ static void init_evtchn_cpu_bindings(void)
static inline void clear_evtchn(int port)
{
struct shared_info *s = HYPERVISOR_shared_info;
- sync_clear_bit(port, &s->evtchn_pending[0]);
+ sync_clear_bit(port, BM(&s->evtchn_pending[0]));
}
static inline void set_evtchn(int port)
{
struct shared_info *s = HYPERVISOR_shared_info;
- sync_set_bit(port, &s->evtchn_pending[0]);
+ sync_set_bit(port, BM(&s->evtchn_pending[0]));
}
static inline int test_evtchn(int port)
{
struct shared_info *s = HYPERVISOR_shared_info;
- return sync_test_bit(port, &s->evtchn_pending[0]);
+ return sync_test_bit(port, BM(&s->evtchn_pending[0]));
}
@@ -375,7 +390,7 @@ EXPORT_SYMBOL_GPL(notify_remote_via_irq);
static void mask_evtchn(int port)
{
struct shared_info *s = HYPERVISOR_shared_info;
- sync_set_bit(port, &s->evtchn_mask[0]);
+ sync_set_bit(port, BM(&s->evtchn_mask[0]));
}
static void unmask_evtchn(int port)
@@ -389,7 +404,7 @@ static void unmask_evtchn(int port)
if (unlikely((cpu != cpu_from_evtchn(port))))
do_hypercall = 1;
else
- evtchn_pending = sync_test_bit(port, &s->evtchn_pending[0]);
+ evtchn_pending = sync_test_bit(port, BM(&s->evtchn_pending[0]));
if (unlikely(evtchn_pending && xen_hvm_domain()))
do_hypercall = 1;
@@ -403,7 +418,7 @@ static void unmask_evtchn(int port)
} else {
struct vcpu_info *vcpu_info = __this_cpu_read(xen_vcpu);
- sync_clear_bit(port, &s->evtchn_mask[0]);
+ sync_clear_bit(port, BM(&s->evtchn_mask[0]));
/*
* The following is basically the equivalent of
@@ -411,8 +426,8 @@ static void unmask_evtchn(int port)
* the interrupt edge' if the channel is masked.
*/
if (evtchn_pending &&
- !sync_test_and_set_bit(port / BITS_PER_LONG,
- &vcpu_info->evtchn_pending_sel))
+ !sync_test_and_set_bit(port / BITS_PER_EVTCHN_WORD,
+ BM(&vcpu_info->evtchn_pending_sel)))
vcpu_info->evtchn_upcall_pending = 1;
}
@@ -1189,7 +1204,7 @@ irqreturn_t xen_debug_interrupt(int irq, void *dev_id)
{
struct shared_info *sh = HYPERVISOR_shared_info;
int cpu = smp_processor_id();
- unsigned long *cpu_evtchn = per_cpu(cpu_evtchn_mask, cpu);
+ xen_ulong_t *cpu_evtchn = per_cpu(cpu_evtchn_mask, cpu);
int i;
unsigned long flags;
static DEFINE_SPINLOCK(debug_lock);
@@ -1205,7 +1220,7 @@ irqreturn_t xen_debug_interrupt(int irq, void *dev_id)
pending = (get_irq_regs() && i == cpu)
? xen_irqs_disabled(get_irq_regs())
: v->evtchn_upcall_mask;
- printk("%d: masked=%d pending=%d event_sel %0*lx\n ", i,
+ printk("%d: masked=%d pending=%d event_sel %0*"PRI_xen_ulong"\n ", i,
pending, v->evtchn_upcall_pending,
(int)(sizeof(v->evtchn_pending_sel)*2),
v->evtchn_pending_sel);
@@ -1214,49 +1229,52 @@ irqreturn_t xen_debug_interrupt(int irq, void *dev_id)
printk("\npending:\n ");
for (i = ARRAY_SIZE(sh->evtchn_pending)-1; i >= 0; i--)
- printk("%0*lx%s", (int)sizeof(sh->evtchn_pending[0])*2,
+ printk("%0*"PRI_xen_ulong"%s",
+ (int)sizeof(sh->evtchn_pending[0])*2,
sh->evtchn_pending[i],
i % 8 == 0 ? "\n " : " ");
printk("\nglobal mask:\n ");
for (i = ARRAY_SIZE(sh->evtchn_mask)-1; i >= 0; i--)
- printk("%0*lx%s",
+ printk("%0*"PRI_xen_ulong"%s",
(int)(sizeof(sh->evtchn_mask[0])*2),
sh->evtchn_mask[i],
i % 8 == 0 ? "\n " : " ");
printk("\nglobally unmasked:\n ");
for (i = ARRAY_SIZE(sh->evtchn_mask)-1; i >= 0; i--)
- printk("%0*lx%s", (int)(sizeof(sh->evtchn_mask[0])*2),
+ printk("%0*"PRI_xen_ulong"%s",
+ (int)(sizeof(sh->evtchn_mask[0])*2),
sh->evtchn_pending[i] & ~sh->evtchn_mask[i],
i % 8 == 0 ? "\n " : " ");
printk("\nlocal cpu%d mask:\n ", cpu);
- for (i = (NR_EVENT_CHANNELS/BITS_PER_LONG)-1; i >= 0; i--)
- printk("%0*lx%s", (int)(sizeof(cpu_evtchn[0])*2),
+ for (i = (NR_EVENT_CHANNELS/BITS_PER_EVTCHN_WORD)-1; i >= 0; i--)
+ printk("%0*"PRI_xen_ulong"%s", (int)(sizeof(cpu_evtchn[0])*2),
cpu_evtchn[i],
i % 8 == 0 ? "\n " : " ");
printk("\nlocally unmasked:\n ");
for (i = ARRAY_SIZE(sh->evtchn_mask)-1; i >= 0; i--) {
- unsigned long pending = sh->evtchn_pending[i]
+ xen_ulong_t pending = sh->evtchn_pending[i]
& ~sh->evtchn_mask[i]
& cpu_evtchn[i];
- printk("%0*lx%s", (int)(sizeof(sh->evtchn_mask[0])*2),
+ printk("%0*"PRI_xen_ulong"%s",
+ (int)(sizeof(sh->evtchn_mask[0])*2),
pending, i % 8 == 0 ? "\n " : " ");
}
printk("\npending list:\n");
for (i = 0; i < NR_EVENT_CHANNELS; i++) {
- if (sync_test_bit(i, sh->evtchn_pending)) {
- int word_idx = i / BITS_PER_LONG;
+ if (sync_test_bit(i, BM(sh->evtchn_pending))) {
+ int word_idx = i / BITS_PER_EVTCHN_WORD;
printk(" %d: event %d -> irq %d%s%s%s\n",
cpu_from_evtchn(i), i,
evtchn_to_irq[i],
- sync_test_bit(word_idx, &v->evtchn_pending_sel)
+ sync_test_bit(word_idx, BM(&v->evtchn_pending_sel))
? "" : " l2-clear",
- !sync_test_bit(i, sh->evtchn_mask)
+ !sync_test_bit(i, BM(sh->evtchn_mask))
? "" : " globally-masked",
- sync_test_bit(i, cpu_evtchn)
+ sync_test_bit(i, BM(cpu_evtchn))
? "" : " locally-masked");
}
}
@@ -1273,7 +1291,7 @@ static DEFINE_PER_CPU(unsigned int, current_bit_idx);
/*
* Mask out the i least significant bits of w
*/
-#define MASK_LSBS(w, i) (w & ((~0UL) << i))
+#define MASK_LSBS(w, i) (w & ((~((xen_ulong_t)0UL)) << i))
/*
* Search the CPUs pending events bitmasks. For each one found, map
@@ -1295,18 +1313,19 @@ static void __xen_evtchn_do_upcall(void)
unsigned count;
do {
- unsigned long pending_words;
+ xen_ulong_t pending_words;
vcpu_info->evtchn_upcall_pending = 0;
if (__this_cpu_inc_return(xed_nesting_count) - 1)
goto out;
-#ifndef CONFIG_X86 /* No need for a barrier -- XCHG is a barrier on x86. */
- /* Clear master flag /before/ clearing selector flag. */
- wmb();
-#endif
- pending_words = xchg(&vcpu_info->evtchn_pending_sel, 0);
+ /*
+ * Master flag must be cleared /before/ clearing
+ * selector flag. xchg_xen_ulong must contain an
+ * appropriate barrier.
+ */
+ pending_words = xchg_xen_ulong(&vcpu_info->evtchn_pending_sel, 0);
start_word_idx = __this_cpu_read(current_word_idx);
start_bit_idx = __this_cpu_read(current_bit_idx);
@@ -1314,8 +1333,8 @@ static void __xen_evtchn_do_upcall(void)
word_idx = start_word_idx;
for (i = 0; pending_words != 0; i++) {
- unsigned long pending_bits;
- unsigned long words;
+ xen_ulong_t pending_bits;
+ xen_ulong_t words;
words = MASK_LSBS(pending_words, word_idx);
@@ -1327,7 +1346,7 @@ static void __xen_evtchn_do_upcall(void)
bit_idx = 0;
continue;
}
- word_idx = __ffs(words);
+ word_idx = EVTCHN_FIRST_BIT(words);
pending_bits = active_evtchns(cpu, s, word_idx);
bit_idx = 0; /* usually scan entire word from start */
@@ -1342,7 +1361,7 @@ static void __xen_evtchn_do_upcall(void)
}
do {
- unsigned long bits;
+ xen_ulong_t bits;
int port, irq;
struct irq_desc *desc;
@@ -1352,10 +1371,10 @@ static void __xen_evtchn_do_upcall(void)
if (bits == 0)
break;
- bit_idx = __ffs(bits);
+ bit_idx = EVTCHN_FIRST_BIT(bits);
/* Process port. */
- port = (word_idx * BITS_PER_LONG) + bit_idx;
+ port = (word_idx * BITS_PER_EVTCHN_WORD) + bit_idx;
irq = evtchn_to_irq[port];
if (irq != -1) {
@@ -1364,12 +1383,12 @@ static void __xen_evtchn_do_upcall(void)
generic_handle_irq_desc(irq, desc);
}
- bit_idx = (bit_idx + 1) % BITS_PER_LONG;
+ bit_idx = (bit_idx + 1) % BITS_PER_EVTCHN_WORD;
/* Next caller starts at last processed + 1 */
__this_cpu_write(current_word_idx,
bit_idx ? word_idx :
- (word_idx+1) % BITS_PER_LONG);
+ (word_idx+1) % BITS_PER_EVTCHN_WORD);
__this_cpu_write(current_bit_idx, bit_idx);
} while (bit_idx != 0);
@@ -1377,7 +1396,7 @@ static void __xen_evtchn_do_upcall(void)
if ((word_idx != start_word_idx) || (i != 0))
pending_words &= ~(1UL << word_idx);
- word_idx = (word_idx + 1) % BITS_PER_LONG;
+ word_idx = (word_idx + 1) % BITS_PER_EVTCHN_WORD;
}
BUG_ON(!irqs_disabled());
@@ -1487,8 +1506,8 @@ int resend_irq_on_evtchn(unsigned int irq)
if (!VALID_EVTCHN(evtchn))
return 1;
- masked = sync_test_and_set_bit(evtchn, s->evtchn_mask);
- sync_set_bit(evtchn, s->evtchn_pending);
+ masked = sync_test_and_set_bit(evtchn, BM(s->evtchn_mask));
+ sync_set_bit(evtchn, BM(s->evtchn_pending));
if (!masked)
unmask_evtchn(evtchn);
@@ -1536,8 +1555,8 @@ static int retrigger_dynirq(struct irq_data *data)
if (VALID_EVTCHN(evtchn)) {
int masked;
- masked = sync_test_and_set_bit(evtchn, sh->evtchn_mask);
- sync_set_bit(evtchn, sh->evtchn_pending);
+ masked = sync_test_and_set_bit(evtchn, BM(sh->evtchn_mask));
+ sync_set_bit(evtchn, BM(sh->evtchn_pending));
if (!masked)
unmask_evtchn(evtchn);
ret = 1;
diff --git a/drivers/xen/evtchn.c b/drivers/xen/evtchn.c
index b1f60a0c0bea..45c8efaa6b3e 100644
--- a/drivers/xen/evtchn.c
+++ b/drivers/xen/evtchn.c
@@ -269,6 +269,14 @@ static int evtchn_bind_to_user(struct per_user_data *u, int port)
u->name, (void *)(unsigned long)port);
if (rc >= 0)
rc = evtchn_make_refcounted(port);
+ else {
+ /* bind failed, should close the port now */
+ struct evtchn_close close;
+ close.port = port;
+ if (HYPERVISOR_event_channel_op(EVTCHNOP_close, &close) != 0)
+ BUG();
+ set_port_user(port, NULL);
+ }
return rc;
}
@@ -277,6 +285,8 @@ static void evtchn_unbind_from_user(struct per_user_data *u, int port)
{
int irq = irq_from_evtchn(port);
+ BUG_ON(irq < 0);
+
unbind_from_irqhandler(irq, (void *)(unsigned long)port);
set_port_user(port, NULL);
@@ -534,10 +544,10 @@ static int __init evtchn_init(void)
spin_lock_init(&port_user_lock);
- /* Create '/dev/misc/evtchn'. */
+ /* Create '/dev/xen/evtchn'. */
err = misc_register(&evtchn_miscdev);
if (err != 0) {
- printk(KERN_ALERT "Could not register /dev/misc/evtchn\n");
+ printk(KERN_ERR "Could not register /dev/xen/evtchn\n");
return err;
}
diff --git a/drivers/xen/grant-table.c b/drivers/xen/grant-table.c
index 157c0ccda3ef..04c1b2d9b775 100644
--- a/drivers/xen/grant-table.c
+++ b/drivers/xen/grant-table.c
@@ -1147,7 +1147,7 @@ static int gnttab_setup(void)
return gnttab_map(0, nr_grant_frames - 1);
if (gnttab_shared.addr == NULL) {
- gnttab_shared.addr = ioremap(xen_hvm_resume_frames,
+ gnttab_shared.addr = xen_remap(xen_hvm_resume_frames,
PAGE_SIZE * max_nr_gframes);
if (gnttab_shared.addr == NULL) {
printk(KERN_WARNING
diff --git a/drivers/xen/pcpu.c b/drivers/xen/pcpu.c
index 5a27a4599a4a..6536d5ab1697 100644
--- a/drivers/xen/pcpu.c
+++ b/drivers/xen/pcpu.c
@@ -332,6 +332,41 @@ static irqreturn_t xen_pcpu_interrupt(int irq, void *dev_id)
return IRQ_HANDLED;
}
+/* Sync with Xen hypervisor after cpu hotadded */
+void xen_pcpu_hotplug_sync(void)
+{
+ schedule_work(&xen_pcpu_work);
+}
+EXPORT_SYMBOL_GPL(xen_pcpu_hotplug_sync);
+
+/*
+ * For hypervisor presented cpu, return logic cpu id;
+ * For hypervisor non-presented cpu, return -ENODEV.
+ */
+int xen_pcpu_id(uint32_t acpi_id)
+{
+ int cpu_id = 0, max_id = 0;
+ struct xen_platform_op op;
+
+ op.cmd = XENPF_get_cpuinfo;
+ while (cpu_id <= max_id) {
+ op.u.pcpu_info.xen_cpuid = cpu_id;
+ if (HYPERVISOR_dom0_op(&op)) {
+ cpu_id++;
+ continue;
+ }
+
+ if (acpi_id == op.u.pcpu_info.acpi_id)
+ return cpu_id;
+ if (op.u.pcpu_info.max_present > max_id)
+ max_id = op.u.pcpu_info.max_present;
+ cpu_id++;
+ }
+
+ return -ENODEV;
+}
+EXPORT_SYMBOL_GPL(xen_pcpu_id);
+
static int __init xen_pcpu_init(void)
{
int irq, ret;
diff --git a/drivers/xen/swiotlb-xen.c b/drivers/xen/swiotlb-xen.c
index af47e7594460..1d94316f0ea4 100644
--- a/drivers/xen/swiotlb-xen.c
+++ b/drivers/xen/swiotlb-xen.c
@@ -231,7 +231,9 @@ retry:
}
start_dma_addr = xen_virt_to_bus(xen_io_tlb_start);
if (early) {
- swiotlb_init_with_tbl(xen_io_tlb_start, xen_io_tlb_nslabs, verbose);
+ if (swiotlb_init_with_tbl(xen_io_tlb_start, xen_io_tlb_nslabs,
+ verbose))
+ panic("Cannot allocate SWIOTLB buffer");
rc = 0;
} else
rc = swiotlb_late_init_with_tbl(xen_io_tlb_start, xen_io_tlb_nslabs);
diff --git a/drivers/xen/tmem.c b/drivers/xen/tmem.c
index 144564e5eb29..3ee836d42581 100644
--- a/drivers/xen/tmem.c
+++ b/drivers/xen/tmem.c
@@ -385,7 +385,7 @@ static int __init xen_tmem_init(void)
if (old_ops.init != NULL)
s = " (WARNING: frontswap_ops overridden)";
printk(KERN_INFO "frontswap enabled, RAM provided by "
- "Xen Transcendent Memory\n");
+ "Xen Transcendent Memory%s\n", s);
}
#endif
#ifdef CONFIG_CLEANCACHE
diff --git a/drivers/xen/xen-acpi-cpuhotplug.c b/drivers/xen/xen-acpi-cpuhotplug.c
new file mode 100644
index 000000000000..757827966e34
--- /dev/null
+++ b/drivers/xen/xen-acpi-cpuhotplug.c
@@ -0,0 +1,471 @@
+/*
+ * Copyright (C) 2012 Intel Corporation
+ * Author: Liu Jinsong <jinsong.liu@intel.com>
+ * Author: Jiang Yunhong <yunhong.jiang@intel.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or (at
+ * your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
+ * NON INFRINGEMENT. See the GNU General Public License for more
+ * details.
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/types.h>
+#include <linux/cpu.h>
+#include <linux/acpi.h>
+#include <linux/uaccess.h>
+#include <acpi/acpi_bus.h>
+#include <acpi/acpi_drivers.h>
+#include <acpi/processor.h>
+
+#include <xen/acpi.h>
+#include <xen/interface/platform.h>
+#include <asm/xen/hypercall.h>
+
+#define PREFIX "ACPI:xen_cpu_hotplug:"
+
+#define INSTALL_NOTIFY_HANDLER 0
+#define UNINSTALL_NOTIFY_HANDLER 1
+
+static acpi_status xen_acpi_cpu_hotadd(struct acpi_processor *pr);
+
+/* --------------------------------------------------------------------------
+ Driver Interface
+-------------------------------------------------------------------------- */
+
+static int xen_acpi_processor_enable(struct acpi_device *device)
+{
+ acpi_status status = 0;
+ unsigned long long value;
+ union acpi_object object = { 0 };
+ struct acpi_buffer buffer = { sizeof(union acpi_object), &object };
+ struct acpi_processor *pr;
+
+ pr = acpi_driver_data(device);
+ if (!pr) {
+ pr_err(PREFIX "Cannot find driver data\n");
+ return -EINVAL;
+ }
+
+ if (!strcmp(acpi_device_hid(device), ACPI_PROCESSOR_OBJECT_HID)) {
+ /* Declared with "Processor" statement; match ProcessorID */
+ status = acpi_evaluate_object(pr->handle, NULL, NULL, &buffer);
+ if (ACPI_FAILURE(status)) {
+ pr_err(PREFIX "Evaluating processor object\n");
+ return -ENODEV;
+ }
+
+ pr->acpi_id = object.processor.proc_id;
+ } else {
+ /* Declared with "Device" statement; match _UID */
+ status = acpi_evaluate_integer(pr->handle, METHOD_NAME__UID,
+ NULL, &value);
+ if (ACPI_FAILURE(status)) {
+ pr_err(PREFIX "Evaluating processor _UID\n");
+ return -ENODEV;
+ }
+
+ pr->acpi_id = value;
+ }
+
+ pr->id = xen_pcpu_id(pr->acpi_id);
+
+ if ((int)pr->id < 0)
+ /* This cpu is not presented at hypervisor, try to hotadd it */
+ if (ACPI_FAILURE(xen_acpi_cpu_hotadd(pr))) {
+ pr_err(PREFIX "Hotadd CPU (acpi_id = %d) failed.\n",
+ pr->acpi_id);
+ return -ENODEV;
+ }
+
+ return 0;
+}
+
+static int __cpuinit xen_acpi_processor_add(struct acpi_device *device)
+{
+ int ret;
+ struct acpi_processor *pr;
+
+ if (!device)
+ return -EINVAL;
+
+ pr = kzalloc(sizeof(struct acpi_processor), GFP_KERNEL);
+ if (!pr)
+ return -ENOMEM;
+
+ pr->handle = device->handle;
+ strcpy(acpi_device_name(device), ACPI_PROCESSOR_DEVICE_NAME);
+ strcpy(acpi_device_class(device), ACPI_PROCESSOR_CLASS);
+ device->driver_data = pr;
+
+ ret = xen_acpi_processor_enable(device);
+ if (ret)
+ pr_err(PREFIX "Error when enabling Xen processor\n");
+
+ return ret;
+}
+
+static int xen_acpi_processor_remove(struct acpi_device *device)
+{
+ struct acpi_processor *pr;
+
+ if (!device)
+ return -EINVAL;
+
+ pr = acpi_driver_data(device);
+ if (!pr)
+ return -EINVAL;
+
+ kfree(pr);
+ return 0;
+}
+
+/*--------------------------------------------------------------
+ Acpi processor hotplug support
+--------------------------------------------------------------*/
+
+static int is_processor_present(acpi_handle handle)
+{
+ acpi_status status;
+ unsigned long long sta = 0;
+
+
+ status = acpi_evaluate_integer(handle, "_STA", NULL, &sta);
+
+ if (ACPI_SUCCESS(status) && (sta & ACPI_STA_DEVICE_PRESENT))
+ return 1;
+
+ /*
+ * _STA is mandatory for a processor that supports hot plug
+ */
+ if (status == AE_NOT_FOUND)
+ pr_info(PREFIX "Processor does not support hot plug\n");
+ else
+ pr_info(PREFIX "Processor Device is not present");
+ return 0;
+}
+
+static int xen_apic_id(acpi_handle handle)
+{
+ struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL };
+ union acpi_object *obj;
+ struct acpi_madt_local_apic *lapic;
+ int apic_id;
+
+ if (ACPI_FAILURE(acpi_evaluate_object(handle, "_MAT", NULL, &buffer)))
+ return -EINVAL;
+
+ if (!buffer.length || !buffer.pointer)
+ return -EINVAL;
+
+ obj = buffer.pointer;
+ if (obj->type != ACPI_TYPE_BUFFER ||
+ obj->buffer.length < sizeof(*lapic)) {
+ kfree(buffer.pointer);
+ return -EINVAL;
+ }
+
+ lapic = (struct acpi_madt_local_apic *)obj->buffer.pointer;
+
+ if (lapic->header.type != ACPI_MADT_TYPE_LOCAL_APIC ||
+ !(lapic->lapic_flags & ACPI_MADT_ENABLED)) {
+ kfree(buffer.pointer);
+ return -EINVAL;
+ }
+
+ apic_id = (uint32_t)lapic->id;
+ kfree(buffer.pointer);
+ buffer.length = ACPI_ALLOCATE_BUFFER;
+ buffer.pointer = NULL;
+
+ return apic_id;
+}
+
+static int xen_hotadd_cpu(struct acpi_processor *pr)
+{
+ int cpu_id, apic_id, pxm;
+ struct xen_platform_op op;
+
+ apic_id = xen_apic_id(pr->handle);
+ if (apic_id < 0) {
+ pr_err(PREFIX "Failed to get apic_id for acpi_id %d\n",
+ pr->acpi_id);
+ return -ENODEV;
+ }
+
+ pxm = xen_acpi_get_pxm(pr->handle);
+ if (pxm < 0) {
+ pr_err(PREFIX "Failed to get _PXM for acpi_id %d\n",
+ pr->acpi_id);
+ return pxm;
+ }
+
+ op.cmd = XENPF_cpu_hotadd;
+ op.u.cpu_add.apic_id = apic_id;
+ op.u.cpu_add.acpi_id = pr->acpi_id;
+ op.u.cpu_add.pxm = pxm;
+
+ cpu_id = HYPERVISOR_dom0_op(&op);
+ if (cpu_id < 0)
+ pr_err(PREFIX "Failed to hotadd CPU for acpi_id %d\n",
+ pr->acpi_id);
+
+ return cpu_id;
+}
+
+static acpi_status xen_acpi_cpu_hotadd(struct acpi_processor *pr)
+{
+ if (!is_processor_present(pr->handle))
+ return AE_ERROR;
+
+ pr->id = xen_hotadd_cpu(pr);
+ if ((int)pr->id < 0)
+ return AE_ERROR;
+
+ /*
+ * Sync with Xen hypervisor, providing new /sys/.../xen_cpuX
+ * interface after cpu hotadded.
+ */
+ xen_pcpu_hotplug_sync();
+
+ return AE_OK;
+}
+
+static
+int acpi_processor_device_add(acpi_handle handle, struct acpi_device **device)
+{
+ acpi_handle phandle;
+ struct acpi_device *pdev;
+
+ if (acpi_get_parent(handle, &phandle))
+ return -ENODEV;
+
+ if (acpi_bus_get_device(phandle, &pdev))
+ return -ENODEV;
+
+ if (acpi_bus_scan(handle))
+ return -ENODEV;
+
+ return 0;
+}
+
+static int acpi_processor_device_remove(struct acpi_device *device)
+{
+ pr_debug(PREFIX "Xen does not support CPU hotremove\n");
+
+ return -ENOSYS;
+}
+
+static void acpi_processor_hotplug_notify(acpi_handle handle,
+ u32 event, void *data)
+{
+ struct acpi_processor *pr;
+ struct acpi_device *device = NULL;
+ u32 ost_code = ACPI_OST_SC_NON_SPECIFIC_FAILURE; /* default */
+ int result;
+
+ switch (event) {
+ case ACPI_NOTIFY_BUS_CHECK:
+ case ACPI_NOTIFY_DEVICE_CHECK:
+ ACPI_DEBUG_PRINT((ACPI_DB_INFO,
+ "Processor driver received %s event\n",
+ (event == ACPI_NOTIFY_BUS_CHECK) ?
+ "ACPI_NOTIFY_BUS_CHECK" : "ACPI_NOTIFY_DEVICE_CHECK"));
+
+ if (!is_processor_present(handle))
+ break;
+
+ if (!acpi_bus_get_device(handle, &device))
+ break;
+
+ result = acpi_processor_device_add(handle, &device);
+ if (result) {
+ pr_err(PREFIX "Unable to add the device\n");
+ break;
+ }
+
+ ost_code = ACPI_OST_SC_SUCCESS;
+ break;
+
+ case ACPI_NOTIFY_EJECT_REQUEST:
+ ACPI_DEBUG_PRINT((ACPI_DB_INFO,
+ "received ACPI_NOTIFY_EJECT_REQUEST\n"));
+
+ if (acpi_bus_get_device(handle, &device)) {
+ pr_err(PREFIX "Device don't exist, dropping EJECT\n");
+ break;
+ }
+ pr = acpi_driver_data(device);
+ if (!pr) {
+ pr_err(PREFIX "Driver data is NULL, dropping EJECT\n");
+ break;
+ }
+
+ /*
+ * TBD: implement acpi_processor_device_remove if Xen support
+ * CPU hotremove in the future.
+ */
+ acpi_processor_device_remove(device);
+ break;
+
+ default:
+ ACPI_DEBUG_PRINT((ACPI_DB_INFO,
+ "Unsupported event [0x%x]\n", event));
+
+ /* non-hotplug event; possibly handled by other handler */
+ return;
+ }
+
+ (void) acpi_evaluate_hotplug_ost(handle, event, ost_code, NULL);
+ return;
+}
+
+static acpi_status is_processor_device(acpi_handle handle)
+{
+ struct acpi_device_info *info;
+ char *hid;
+ acpi_status status;
+
+ status = acpi_get_object_info(handle, &info);
+ if (ACPI_FAILURE(status))
+ return status;
+
+ if (info->type == ACPI_TYPE_PROCESSOR) {
+ kfree(info);
+ return AE_OK; /* found a processor object */
+ }
+
+ if (!(info->valid & ACPI_VALID_HID)) {
+ kfree(info);
+ return AE_ERROR;
+ }
+
+ hid = info->hardware_id.string;
+ if ((hid == NULL) || strcmp(hid, ACPI_PROCESSOR_DEVICE_HID)) {
+ kfree(info);
+ return AE_ERROR;
+ }
+
+ kfree(info);
+ return AE_OK; /* found a processor device object */
+}
+
+static acpi_status
+processor_walk_namespace_cb(acpi_handle handle,
+ u32 lvl, void *context, void **rv)
+{
+ acpi_status status;
+ int *action = context;
+
+ status = is_processor_device(handle);
+ if (ACPI_FAILURE(status))
+ return AE_OK; /* not a processor; continue to walk */
+
+ switch (*action) {
+ case INSTALL_NOTIFY_HANDLER:
+ acpi_install_notify_handler(handle,
+ ACPI_SYSTEM_NOTIFY,
+ acpi_processor_hotplug_notify,
+ NULL);
+ break;
+ case UNINSTALL_NOTIFY_HANDLER:
+ acpi_remove_notify_handler(handle,
+ ACPI_SYSTEM_NOTIFY,
+ acpi_processor_hotplug_notify);
+ break;
+ default:
+ break;
+ }
+
+ /* found a processor; skip walking underneath */
+ return AE_CTRL_DEPTH;
+}
+
+static
+void acpi_processor_install_hotplug_notify(void)
+{
+ int action = INSTALL_NOTIFY_HANDLER;
+ acpi_walk_namespace(ACPI_TYPE_ANY,
+ ACPI_ROOT_OBJECT,
+ ACPI_UINT32_MAX,
+ processor_walk_namespace_cb, NULL, &action, NULL);
+}
+
+static
+void acpi_processor_uninstall_hotplug_notify(void)
+{
+ int action = UNINSTALL_NOTIFY_HANDLER;
+ acpi_walk_namespace(ACPI_TYPE_ANY,
+ ACPI_ROOT_OBJECT,
+ ACPI_UINT32_MAX,
+ processor_walk_namespace_cb, NULL, &action, NULL);
+}
+
+static const struct acpi_device_id processor_device_ids[] = {
+ {ACPI_PROCESSOR_OBJECT_HID, 0},
+ {ACPI_PROCESSOR_DEVICE_HID, 0},
+ {"", 0},
+};
+MODULE_DEVICE_TABLE(acpi, processor_device_ids);
+
+static struct acpi_driver xen_acpi_processor_driver = {
+ .name = "processor",
+ .class = ACPI_PROCESSOR_CLASS,
+ .ids = processor_device_ids,
+ .ops = {
+ .add = xen_acpi_processor_add,
+ .remove = xen_acpi_processor_remove,
+ },
+};
+
+static int __init xen_acpi_processor_init(void)
+{
+ int result = 0;
+
+ if (!xen_initial_domain())
+ return -ENODEV;
+
+ /* unregister the stub which only used to reserve driver space */
+ xen_stub_processor_exit();
+
+ result = acpi_bus_register_driver(&xen_acpi_processor_driver);
+ if (result < 0) {
+ xen_stub_processor_init();
+ return result;
+ }
+
+ acpi_processor_install_hotplug_notify();
+ return 0;
+}
+
+static void __exit xen_acpi_processor_exit(void)
+{
+ if (!xen_initial_domain())
+ return;
+
+ acpi_processor_uninstall_hotplug_notify();
+
+ acpi_bus_unregister_driver(&xen_acpi_processor_driver);
+
+ /*
+ * stub reserve space again to prevent any chance of native
+ * driver loading.
+ */
+ xen_stub_processor_init();
+ return;
+}
+
+module_init(xen_acpi_processor_init);
+module_exit(xen_acpi_processor_exit);
+ACPI_MODULE_NAME("xen-acpi-cpuhotplug");
+MODULE_AUTHOR("Liu Jinsong <jinsong.liu@intel.com>");
+MODULE_DESCRIPTION("Xen Hotplug CPU Driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/xen/xen-acpi-memhotplug.c b/drivers/xen/xen-acpi-memhotplug.c
new file mode 100644
index 000000000000..853b12dba5bb
--- /dev/null
+++ b/drivers/xen/xen-acpi-memhotplug.c
@@ -0,0 +1,483 @@
+/*
+ * Copyright (C) 2012 Intel Corporation
+ * Author: Liu Jinsong <jinsong.liu@intel.com>
+ * Author: Jiang Yunhong <yunhong.jiang@intel.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or (at
+ * your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
+ * NON INFRINGEMENT. See the GNU General Public License for more
+ * details.
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/types.h>
+#include <linux/acpi.h>
+#include <acpi/acpi_drivers.h>
+#include <xen/acpi.h>
+#include <xen/interface/platform.h>
+#include <asm/xen/hypercall.h>
+
+#define PREFIX "ACPI:xen_memory_hotplug:"
+
+struct acpi_memory_info {
+ struct list_head list;
+ u64 start_addr; /* Memory Range start physical addr */
+ u64 length; /* Memory Range length */
+ unsigned short caching; /* memory cache attribute */
+ unsigned short write_protect; /* memory read/write attribute */
+ /* copied from buffer getting from _CRS */
+ unsigned int enabled:1;
+};
+
+struct acpi_memory_device {
+ struct acpi_device *device;
+ struct list_head res_list;
+};
+
+static bool acpi_hotmem_initialized __read_mostly;
+
+static int xen_hotadd_memory(int pxm, struct acpi_memory_info *info)
+{
+ int rc;
+ struct xen_platform_op op;
+
+ op.cmd = XENPF_mem_hotadd;
+ op.u.mem_add.spfn = info->start_addr >> PAGE_SHIFT;
+ op.u.mem_add.epfn = (info->start_addr + info->length) >> PAGE_SHIFT;
+ op.u.mem_add.pxm = pxm;
+
+ rc = HYPERVISOR_dom0_op(&op);
+ if (rc)
+ pr_err(PREFIX "Xen Hotplug Memory Add failed on "
+ "0x%lx -> 0x%lx, _PXM: %d, error: %d\n",
+ (unsigned long)info->start_addr,
+ (unsigned long)(info->start_addr + info->length),
+ pxm, rc);
+
+ return rc;
+}
+
+static int xen_acpi_memory_enable_device(struct acpi_memory_device *mem_device)
+{
+ int pxm, result;
+ int num_enabled = 0;
+ struct acpi_memory_info *info;
+
+ if (!mem_device)
+ return -EINVAL;
+
+ pxm = xen_acpi_get_pxm(mem_device->device->handle);
+ if (pxm < 0)
+ return pxm;
+
+ list_for_each_entry(info, &mem_device->res_list, list) {
+ if (info->enabled) { /* just sanity check...*/
+ num_enabled++;
+ continue;
+ }
+
+ if (!info->length)
+ continue;
+
+ result = xen_hotadd_memory(pxm, info);
+ if (result)
+ continue;
+ info->enabled = 1;
+ num_enabled++;
+ }
+
+ if (!num_enabled)
+ return -ENODEV;
+
+ return 0;
+}
+
+static acpi_status
+acpi_memory_get_resource(struct acpi_resource *resource, void *context)
+{
+ struct acpi_memory_device *mem_device = context;
+ struct acpi_resource_address64 address64;
+ struct acpi_memory_info *info, *new;
+ acpi_status status;
+
+ status = acpi_resource_to_address64(resource, &address64);
+ if (ACPI_FAILURE(status) ||
+ (address64.resource_type != ACPI_MEMORY_RANGE))
+ return AE_OK;
+
+ list_for_each_entry(info, &mem_device->res_list, list) {
+ if ((info->caching == address64.info.mem.caching) &&
+ (info->write_protect == address64.info.mem.write_protect) &&
+ (info->start_addr + info->length == address64.minimum)) {
+ info->length += address64.address_length;
+ return AE_OK;
+ }
+ }
+
+ new = kzalloc(sizeof(struct acpi_memory_info), GFP_KERNEL);
+ if (!new)
+ return AE_ERROR;
+
+ INIT_LIST_HEAD(&new->list);
+ new->caching = address64.info.mem.caching;
+ new->write_protect = address64.info.mem.write_protect;
+ new->start_addr = address64.minimum;
+ new->length = address64.address_length;
+ list_add_tail(&new->list, &mem_device->res_list);
+
+ return AE_OK;
+}
+
+static int
+acpi_memory_get_device_resources(struct acpi_memory_device *mem_device)
+{
+ acpi_status status;
+ struct acpi_memory_info *info, *n;
+
+ if (!list_empty(&mem_device->res_list))
+ return 0;
+
+ status = acpi_walk_resources(mem_device->device->handle,
+ METHOD_NAME__CRS, acpi_memory_get_resource, mem_device);
+
+ if (ACPI_FAILURE(status)) {
+ list_for_each_entry_safe(info, n, &mem_device->res_list, list)
+ kfree(info);
+ INIT_LIST_HEAD(&mem_device->res_list);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int
+acpi_memory_get_device(acpi_handle handle,
+ struct acpi_memory_device **mem_device)
+{
+ acpi_status status;
+ acpi_handle phandle;
+ struct acpi_device *device = NULL;
+ struct acpi_device *pdevice = NULL;
+ int result;
+
+ if (!acpi_bus_get_device(handle, &device) && device)
+ goto end;
+
+ status = acpi_get_parent(handle, &phandle);
+ if (ACPI_FAILURE(status)) {
+ pr_warn(PREFIX "Cannot find acpi parent\n");
+ return -EINVAL;
+ }
+
+ /* Get the parent device */
+ result = acpi_bus_get_device(phandle, &pdevice);
+ if (result) {
+ pr_warn(PREFIX "Cannot get acpi bus device\n");
+ return -EINVAL;
+ }
+
+ /*
+ * Now add the notified device. This creates the acpi_device
+ * and invokes .add function
+ */
+ result = acpi_bus_scan(handle);
+ if (result) {
+ pr_warn(PREFIX "Cannot add acpi bus\n");
+ return -EINVAL;
+ }
+
+end:
+ *mem_device = acpi_driver_data(device);
+ if (!(*mem_device)) {
+ pr_err(PREFIX "Driver data not found\n");
+ return -ENODEV;
+ }
+
+ return 0;
+}
+
+static int acpi_memory_check_device(struct acpi_memory_device *mem_device)
+{
+ unsigned long long current_status;
+
+ /* Get device present/absent information from the _STA */
+ if (ACPI_FAILURE(acpi_evaluate_integer(mem_device->device->handle,
+ "_STA", NULL, &current_status)))
+ return -ENODEV;
+ /*
+ * Check for device status. Device should be
+ * present/enabled/functioning.
+ */
+ if (!((current_status & ACPI_STA_DEVICE_PRESENT)
+ && (current_status & ACPI_STA_DEVICE_ENABLED)
+ && (current_status & ACPI_STA_DEVICE_FUNCTIONING)))
+ return -ENODEV;
+
+ return 0;
+}
+
+static int acpi_memory_disable_device(struct acpi_memory_device *mem_device)
+{
+ pr_debug(PREFIX "Xen does not support memory hotremove\n");
+
+ return -ENOSYS;
+}
+
+static void acpi_memory_device_notify(acpi_handle handle, u32 event, void *data)
+{
+ struct acpi_memory_device *mem_device;
+ struct acpi_device *device;
+ u32 ost_code = ACPI_OST_SC_NON_SPECIFIC_FAILURE; /* default */
+
+ switch (event) {
+ case ACPI_NOTIFY_BUS_CHECK:
+ ACPI_DEBUG_PRINT((ACPI_DB_INFO,
+ "\nReceived BUS CHECK notification for device\n"));
+ /* Fall Through */
+ case ACPI_NOTIFY_DEVICE_CHECK:
+ if (event == ACPI_NOTIFY_DEVICE_CHECK)
+ ACPI_DEBUG_PRINT((ACPI_DB_INFO,
+ "\nReceived DEVICE CHECK notification for device\n"));
+
+ if (acpi_memory_get_device(handle, &mem_device)) {
+ pr_err(PREFIX "Cannot find driver data\n");
+ break;
+ }
+
+ ost_code = ACPI_OST_SC_SUCCESS;
+ break;
+
+ case ACPI_NOTIFY_EJECT_REQUEST:
+ ACPI_DEBUG_PRINT((ACPI_DB_INFO,
+ "\nReceived EJECT REQUEST notification for device\n"));
+
+ if (acpi_bus_get_device(handle, &device)) {
+ pr_err(PREFIX "Device doesn't exist\n");
+ break;
+ }
+ mem_device = acpi_driver_data(device);
+ if (!mem_device) {
+ pr_err(PREFIX "Driver Data is NULL\n");
+ break;
+ }
+
+ /*
+ * TBD: implement acpi_memory_disable_device and invoke
+ * acpi_bus_remove if Xen support hotremove in the future
+ */
+ acpi_memory_disable_device(mem_device);
+ break;
+
+ default:
+ ACPI_DEBUG_PRINT((ACPI_DB_INFO,
+ "Unsupported event [0x%x]\n", event));
+ /* non-hotplug event; possibly handled by other handler */
+ return;
+ }
+
+ (void) acpi_evaluate_hotplug_ost(handle, event, ost_code, NULL);
+ return;
+}
+
+static int xen_acpi_memory_device_add(struct acpi_device *device)
+{
+ int result;
+ struct acpi_memory_device *mem_device = NULL;
+
+
+ if (!device)
+ return -EINVAL;
+
+ mem_device = kzalloc(sizeof(struct acpi_memory_device), GFP_KERNEL);
+ if (!mem_device)
+ return -ENOMEM;
+
+ INIT_LIST_HEAD(&mem_device->res_list);
+ mem_device->device = device;
+ sprintf(acpi_device_name(device), "%s", ACPI_MEMORY_DEVICE_NAME);
+ sprintf(acpi_device_class(device), "%s", ACPI_MEMORY_DEVICE_CLASS);
+ device->driver_data = mem_device;
+
+ /* Get the range from the _CRS */
+ result = acpi_memory_get_device_resources(mem_device);
+ if (result) {
+ kfree(mem_device);
+ return result;
+ }
+
+ /*
+ * For booting existed memory devices, early boot code has recognized
+ * memory area by EFI/E820. If DSDT shows these memory devices on boot,
+ * hotplug is not necessary for them.
+ * For hot-added memory devices during runtime, it need hypercall to
+ * Xen hypervisor to add memory.
+ */
+ if (!acpi_hotmem_initialized)
+ return 0;
+
+ if (!acpi_memory_check_device(mem_device))
+ result = xen_acpi_memory_enable_device(mem_device);
+
+ return result;
+}
+
+static int xen_acpi_memory_device_remove(struct acpi_device *device)
+{
+ struct acpi_memory_device *mem_device = NULL;
+
+ if (!device || !acpi_driver_data(device))
+ return -EINVAL;
+
+ mem_device = acpi_driver_data(device);
+ kfree(mem_device);
+
+ return 0;
+}
+
+/*
+ * Helper function to check for memory device
+ */
+static acpi_status is_memory_device(acpi_handle handle)
+{
+ char *hardware_id;
+ acpi_status status;
+ struct acpi_device_info *info;
+
+ status = acpi_get_object_info(handle, &info);
+ if (ACPI_FAILURE(status))
+ return status;
+
+ if (!(info->valid & ACPI_VALID_HID)) {
+ kfree(info);
+ return AE_ERROR;
+ }
+
+ hardware_id = info->hardware_id.string;
+ if ((hardware_id == NULL) ||
+ (strcmp(hardware_id, ACPI_MEMORY_DEVICE_HID)))
+ status = AE_ERROR;
+
+ kfree(info);
+ return status;
+}
+
+static acpi_status
+acpi_memory_register_notify_handler(acpi_handle handle,
+ u32 level, void *ctxt, void **retv)
+{
+ acpi_status status;
+
+ status = is_memory_device(handle);
+ if (ACPI_FAILURE(status))
+ return AE_OK; /* continue */
+
+ status = acpi_install_notify_handler(handle, ACPI_SYSTEM_NOTIFY,
+ acpi_memory_device_notify, NULL);
+ /* continue */
+ return AE_OK;
+}
+
+static acpi_status
+acpi_memory_deregister_notify_handler(acpi_handle handle,
+ u32 level, void *ctxt, void **retv)
+{
+ acpi_status status;
+
+ status = is_memory_device(handle);
+ if (ACPI_FAILURE(status))
+ return AE_OK; /* continue */
+
+ status = acpi_remove_notify_handler(handle,
+ ACPI_SYSTEM_NOTIFY,
+ acpi_memory_device_notify);
+
+ return AE_OK; /* continue */
+}
+
+static const struct acpi_device_id memory_device_ids[] = {
+ {ACPI_MEMORY_DEVICE_HID, 0},
+ {"", 0},
+};
+MODULE_DEVICE_TABLE(acpi, memory_device_ids);
+
+static struct acpi_driver xen_acpi_memory_device_driver = {
+ .name = "acpi_memhotplug",
+ .class = ACPI_MEMORY_DEVICE_CLASS,
+ .ids = memory_device_ids,
+ .ops = {
+ .add = xen_acpi_memory_device_add,
+ .remove = xen_acpi_memory_device_remove,
+ },
+};
+
+static int __init xen_acpi_memory_device_init(void)
+{
+ int result;
+ acpi_status status;
+
+ if (!xen_initial_domain())
+ return -ENODEV;
+
+ /* unregister the stub which only used to reserve driver space */
+ xen_stub_memory_device_exit();
+
+ result = acpi_bus_register_driver(&xen_acpi_memory_device_driver);
+ if (result < 0) {
+ xen_stub_memory_device_init();
+ return -ENODEV;
+ }
+
+ status = acpi_walk_namespace(ACPI_TYPE_DEVICE, ACPI_ROOT_OBJECT,
+ ACPI_UINT32_MAX,
+ acpi_memory_register_notify_handler,
+ NULL, NULL, NULL);
+
+ if (ACPI_FAILURE(status)) {
+ pr_warn(PREFIX "walk_namespace failed\n");
+ acpi_bus_unregister_driver(&xen_acpi_memory_device_driver);
+ xen_stub_memory_device_init();
+ return -ENODEV;
+ }
+
+ acpi_hotmem_initialized = true;
+ return 0;
+}
+
+static void __exit xen_acpi_memory_device_exit(void)
+{
+ acpi_status status;
+
+ if (!xen_initial_domain())
+ return;
+
+ status = acpi_walk_namespace(ACPI_TYPE_DEVICE, ACPI_ROOT_OBJECT,
+ ACPI_UINT32_MAX,
+ acpi_memory_deregister_notify_handler,
+ NULL, NULL, NULL);
+ if (ACPI_FAILURE(status))
+ pr_warn(PREFIX "walk_namespace failed\n");
+
+ acpi_bus_unregister_driver(&xen_acpi_memory_device_driver);
+
+ /*
+ * stub reserve space again to prevent any chance of native
+ * driver loading.
+ */
+ xen_stub_memory_device_init();
+ return;
+}
+
+module_init(xen_acpi_memory_device_init);
+module_exit(xen_acpi_memory_device_exit);
+ACPI_MODULE_NAME("xen-acpi-memhotplug");
+MODULE_AUTHOR("Liu Jinsong <jinsong.liu@intel.com>");
+MODULE_DESCRIPTION("Xen Hotplug Mem Driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/xen/xen-stub.c b/drivers/xen/xen-stub.c
new file mode 100644
index 000000000000..d85e411cbf89
--- /dev/null
+++ b/drivers/xen/xen-stub.c
@@ -0,0 +1,101 @@
+/*
+ * xen-stub.c - stub drivers to reserve space for Xen
+ *
+ * Copyright (C) 2012 Intel Corporation
+ * Author: Liu Jinsong <jinsong.liu@intel.com>
+ * Author: Jiang Yunhong <yunhong.jiang@intel.com>
+ *
+ * Copyright (C) 2012 Oracle Inc
+ * Author: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or (at
+ * your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
+ * NON INFRINGEMENT. See the GNU General Public License for more
+ * details.
+ */
+
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/export.h>
+#include <linux/types.h>
+#include <linux/acpi.h>
+#include <acpi/acpi_drivers.h>
+#include <xen/acpi.h>
+
+#ifdef CONFIG_ACPI
+
+/*--------------------------------------------
+ stub driver for Xen memory hotplug
+--------------------------------------------*/
+
+static const struct acpi_device_id memory_device_ids[] = {
+ {ACPI_MEMORY_DEVICE_HID, 0},
+ {"", 0},
+};
+
+static struct acpi_driver xen_stub_memory_device_driver = {
+ /* same name as native memory driver to block native loaded */
+ .name = "acpi_memhotplug",
+ .class = ACPI_MEMORY_DEVICE_CLASS,
+ .ids = memory_device_ids,
+};
+
+int xen_stub_memory_device_init(void)
+{
+ if (!xen_initial_domain())
+ return -ENODEV;
+
+ /* just reserve space for Xen, block native driver loaded */
+ return acpi_bus_register_driver(&xen_stub_memory_device_driver);
+}
+EXPORT_SYMBOL_GPL(xen_stub_memory_device_init);
+subsys_initcall(xen_stub_memory_device_init);
+
+void xen_stub_memory_device_exit(void)
+{
+ acpi_bus_unregister_driver(&xen_stub_memory_device_driver);
+}
+EXPORT_SYMBOL_GPL(xen_stub_memory_device_exit);
+
+
+/*--------------------------------------------
+ stub driver for Xen cpu hotplug
+--------------------------------------------*/
+
+static const struct acpi_device_id processor_device_ids[] = {
+ {ACPI_PROCESSOR_OBJECT_HID, 0},
+ {ACPI_PROCESSOR_DEVICE_HID, 0},
+ {"", 0},
+};
+
+static struct acpi_driver xen_stub_processor_driver = {
+ /* same name as native processor driver to block native loaded */
+ .name = "processor",
+ .class = ACPI_PROCESSOR_CLASS,
+ .ids = processor_device_ids,
+};
+
+int xen_stub_processor_init(void)
+{
+ if (!xen_initial_domain())
+ return -ENODEV;
+
+ /* just reserve space for Xen, block native driver loaded */
+ return acpi_bus_register_driver(&xen_stub_processor_driver);
+}
+EXPORT_SYMBOL_GPL(xen_stub_processor_init);
+subsys_initcall(xen_stub_processor_init);
+
+void xen_stub_processor_exit(void)
+{
+ acpi_bus_unregister_driver(&xen_stub_processor_driver);
+}
+EXPORT_SYMBOL_GPL(xen_stub_processor_exit);
+
+#endif
diff --git a/drivers/xen/xenbus/xenbus_probe.c b/drivers/xen/xenbus/xenbus_probe.c
index 038b71dbf03c..3325884c693f 100644
--- a/drivers/xen/xenbus/xenbus_probe.c
+++ b/drivers/xen/xenbus/xenbus_probe.c
@@ -769,7 +769,7 @@ static int __init xenbus_init(void)
goto out_error;
xen_store_mfn = (unsigned long)v;
xen_store_interface =
- ioremap(xen_store_mfn << PAGE_SHIFT, PAGE_SIZE);
+ xen_remap(xen_store_mfn << PAGE_SHIFT, PAGE_SIZE);
break;
default:
pr_warn("Xenstore state unknown\n");
diff --git a/drivers/xen/xenfs/super.c b/drivers/xen/xenfs/super.c
index 459b9ac45cf5..ec0abb6df3c3 100644
--- a/drivers/xen/xenfs/super.c
+++ b/drivers/xen/xenfs/super.c
@@ -24,47 +24,6 @@
MODULE_DESCRIPTION("Xen filesystem");
MODULE_LICENSE("GPL");
-static struct inode *xenfs_make_inode(struct super_block *sb, int mode)
-{
- struct inode *ret = new_inode(sb);
-
- if (ret) {
- ret->i_mode = mode;
- ret->i_uid = GLOBAL_ROOT_UID;
- ret->i_gid = GLOBAL_ROOT_GID;
- ret->i_blocks = 0;
- ret->i_atime = ret->i_mtime = ret->i_ctime = CURRENT_TIME;
- }
- return ret;
-}
-
-static struct dentry *xenfs_create_file(struct super_block *sb,
- struct dentry *parent,
- const char *name,
- const struct file_operations *fops,
- void *data,
- int mode)
-{
- struct dentry *dentry;
- struct inode *inode;
-
- dentry = d_alloc_name(parent, name);
- if (!dentry)
- return NULL;
-
- inode = xenfs_make_inode(sb, S_IFREG | mode);
- if (!inode) {
- dput(dentry);
- return NULL;
- }
-
- inode->i_fop = fops;
- inode->i_private = data;
-
- d_add(dentry, inode);
- return dentry;
-}
-
static ssize_t capabilities_read(struct file *file, char __user *buf,
size_t size, loff_t *off)
{
@@ -84,26 +43,23 @@ static const struct file_operations capabilities_file_ops = {
static int xenfs_fill_super(struct super_block *sb, void *data, int silent)
{
static struct tree_descr xenfs_files[] = {
- [1] = {},
- { "xenbus", &xen_xenbus_fops, S_IRUSR|S_IWUSR },
+ [2] = { "xenbus", &xen_xenbus_fops, S_IRUSR|S_IWUSR },
{ "capabilities", &capabilities_file_ops, S_IRUGO },
{ "privcmd", &xen_privcmd_fops, S_IRUSR|S_IWUSR },
{""},
};
- int rc;
- rc = simple_fill_super(sb, XENFS_SUPER_MAGIC, xenfs_files);
- if (rc < 0)
- return rc;
-
- if (xen_initial_domain()) {
- xenfs_create_file(sb, sb->s_root, "xsd_kva",
- &xsd_kva_file_ops, NULL, S_IRUSR|S_IWUSR);
- xenfs_create_file(sb, sb->s_root, "xsd_port",
- &xsd_port_file_ops, NULL, S_IRUSR|S_IWUSR);
- }
+ static struct tree_descr xenfs_init_files[] = {
+ [2] = { "xenbus", &xen_xenbus_fops, S_IRUSR|S_IWUSR },
+ { "capabilities", &capabilities_file_ops, S_IRUGO },
+ { "privcmd", &xen_privcmd_fops, S_IRUSR|S_IWUSR },
+ { "xsd_kva", &xsd_kva_file_ops, S_IRUSR|S_IWUSR},
+ { "xsd_port", &xsd_port_file_ops, S_IRUSR|S_IWUSR},
+ {""},
+ };
- return rc;
+ return simple_fill_super(sb, XENFS_SUPER_MAGIC,
+ xen_initial_domain() ? xenfs_init_files : xenfs_files);
}
static struct dentry *xenfs_mount(struct file_system_type *fs_type,
diff --git a/drivers/zorro/proc.c b/drivers/zorro/proc.c
index 988880dcee75..73b33837e12c 100644
--- a/drivers/zorro/proc.c
+++ b/drivers/zorro/proc.c
@@ -22,7 +22,7 @@ static loff_t
proc_bus_zorro_lseek(struct file *file, loff_t off, int whence)
{
loff_t new = -1;
- struct inode *inode = file->f_path.dentry->d_inode;
+ struct inode *inode = file_inode(file);
mutex_lock(&inode->i_mutex);
switch (whence) {
@@ -47,7 +47,7 @@ proc_bus_zorro_lseek(struct file *file, loff_t off, int whence)
static ssize_t
proc_bus_zorro_read(struct file *file, char __user *buf, size_t nbytes, loff_t *ppos)
{
- struct inode *ino = file->f_path.dentry->d_inode;
+ struct inode *ino = file_inode(file);
struct proc_dir_entry *dp = PDE(ino);
struct zorro_dev *z = dp->data;
struct ConfigDev cd;